diff --git a/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/86L4hko5DF4_raw.srt b/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/86L4hko5DF4_raw.srt new file mode 100644 index 0000000000000000000000000000000000000000..e4e07ef0369ab6dd5b340218888fca7f9cb59718 --- /dev/null +++ b/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/86L4hko5DF4_raw.srt @@ -0,0 +1,2044 @@ +1 +00:00:21,690 --> 00:00:24,510 +النهار الفاتر كنا بنحكي في ال primary hemostasis + +2 +00:00:24,510 --> 00:00:30,870 +واتفقنا على انه ال primary hemostasis usually + +3 +00:00:30,870 --> 00:00:35,350 +involve two items two components of the hemostatic + +4 +00:00:35,350 --> 00:00:40,110 +mechanism هي ال blood vessels و ال platelet حكينا + +5 +00:00:40,110 --> 00:00:43,830 +عن ال blood vessels بالتفسير و بدنا نحكي عن ال + +6 +00:00:43,830 --> 00:00:47,690 +platelet و اتعرفنا على ال platelet + +7 +00:00:53,420 --> 00:00:58,360 +ثم وصلنا إلى Platelet + +8 +00:00:58,360 --> 00:01:06,440 +Function وقولنا بأن Platelet Function هي عبارة عن + +9 +00:01:06,440 --> 00:01:11,660 +وظائف متعددة سنتعرف عليها وعلى خطوات نشاط هذه + +10 +00:01:11,660 --> 00:01:18,180 +البلاتلت من أهموظائف الـ platelets إنها بتشتغل ك + +11 +00:01:18,180 --> 00:01:21,960 +surveillance أو blood vessel continuity بمعنى إنها + +12 +00:01:21,960 --> 00:01:27,800 +بتساعد على بقاء ال blood vessels intact ومن خلال + +13 +00:01:27,800 --> 00:01:33,040 +إنها بتشيك checks endothelial lining for gaps and + +14 +00:01:33,040 --> 00:01:37,420 +breaks أي قطع أي gaps موجودة في ال blood vessels + +15 +00:01:37,420 --> 00:01:41,920 +هي بتقوم بتعبئتها و بتقوم بتحشيتها fill in small + +16 +00:01:41,920 --> 00:01:45,520 +gapscaused by separation of endothelial cells، أي + +17 +00:01:45,520 --> 00:01:49,940 +جاب صارت بين ال endothelial cell، بتعملها عيش، + +18 +00:01:49,940 --> 00:01:57,000 +بتعملها fillingHemostatic Plague وهذا متفق عليه ثم + +19 +00:01:57,000 --> 00:02:01,660 +بتعتبر surface بتشكل surface for the activation of + +20 +00:02:01,660 --> 00:02:05,800 +coagulation cascade mechanism وبتلعب دور كبير في + +21 +00:02:05,800 --> 00:02:10,420 +ال healing process لإنه بيصير فيه إفراز for + +22 +00:02:10,420 --> 00:02:18,840 +certain growth factor or cytokines for healing + +23 +00:02:18,840 --> 00:02:26,290 +نبدأ هذه الخطواتخطوات نشاط البليتلت قولنا البليتلت + +24 +00:02:26,290 --> 00:02:33,170 +في الوضع العادي يا شباب لأ تمشي تمشي normal بمعنى + +25 +00:02:33,170 --> 00:02:40,510 +it's not acting 100% يوم ما تتغير الظروف عليها + +26 +00:02:40,510 --> 00:02:45,270 +normal circumstances بتبدأ تتحول الى functioning + +27 +00:02:45,270 --> 00:02:52,160 +الى activePlatelet و بتمر في عدة مراحل هذه المراحل + +28 +00:02:52,160 --> 00:02:59,540 +تبدأ بال adhesion stage أو step ثم change in shape + +29 +00:02:59,540 --> 00:03:04,960 +ثم degradation or secretion ثم aggregation ثم + +30 +00:03:04,960 --> 00:03:10,080 +stable clot formation ماشي؟ هنجي ان شاء الله لكل + +31 +00:03:10,080 --> 00:03:14,820 +خطوة من هذه الخطوات بالتفصيل و نبدأ بأول مرحلة و + +32 +00:03:14,820 --> 00:03:19,510 +هي ال platelet adhesionطبعا في هذه العملية بتتم + +33 +00:03:19,510 --> 00:03:27,450 +عملية ارتباط او ربط لـ platelet بال subendothelial + +34 +00:03:27,450 --> 00:03:34,530 +layer لـ platelet بال subendothelial layer او ما + +35 +00:03:34,530 --> 00:03:37,790 +يسمى بال non-platelet surface لأنه في الوضع + +36 +00:03:37,790 --> 00:03:43,230 +الطبيعي لـ platelet ماتتحتك نهائي بمين بال + +37 +00:03:43,230 --> 00:03:47,930 +subendothelialطبعا في هذه الحالة بتم المسكة الـ + +38 +00:03:47,930 --> 00:03:52,730 +platelet شباب كل اللي بيصير أنه هو الأول ردد فعلا + +39 +00:03:52,730 --> 00:03:56,290 +ال platelet أنه بتغير شكلها من ال discord shape + +40 +00:03:56,290 --> 00:04:03,690 +إلى الacanthocytes طبعا بتمر في مرحلة وسطة وهي ال + +41 +00:04:03,690 --> 00:04:09,160 +plate-like form shape أنها بتصير من ال discordالى + +42 +00:04:09,160 --> 00:04:13,440 +plate-like يعني بتفرد وبتزود ال surface area + +43 +00:04:13,440 --> 00:04:19,720 +تبعتها ثم إذا استمر ال adhesive process بتتحول ل + +44 +00:04:19,720 --> 00:04:24,300 +catharsis لإنه بتبدأ تعمل degranulation تبدأ تعمل + +45 +00:04:24,300 --> 00:04:30,600 +إيه؟ degranulation طبعا هذه أول مرح أول خطوة من + +46 +00:04:30,600 --> 00:04:34,660 +خطواتها اللي هي عملية الأرتباط عملية الأرتباط عشان + +47 +00:04:34,660 --> 00:04:38,780 +تتم عملية الأرتباط تحتاج إلى ظروف معينةتحتاج إلى + +48 +00:04:38,780 --> 00:04:43,640 +ظروف معينة، بدها معطيات معينة، لازم تتوفر شروط + +49 +00:04:43,640 --> 00:04:47,640 +معينة منها، الـrelease of certain activating + +50 +00:04:47,640 --> 00:04:55,220 +factors بيسموهم زي ثرمبين، وثرمبين كلكم بتعرفوا هو + +51 +00:04:55,220 --> 00:04:59,720 +عبارة عن إيش؟ هو عبارة عن active coagulation + +52 +00:04:59,720 --> 00:05:04,950 +factor، صح؟ إيش بيعمل؟100% 100% بيحاول في ال + +53 +00:05:04,950 --> 00:05:07,610 +bridge اللي في ال fibers يعني هذا primary + +54 +00:05:07,610 --> 00:05:10,990 +hemostasis ولا secondary hemostasis secondary + +55 +00:05:10,990 --> 00:05:16,770 +hemostasis 100% which means لما انه مطلوب ان يكون + +56 +00:05:16,770 --> 00:05:20,910 +موجود ان ال primary و ال secondary بيمشوا جنبا إلى + +57 +00:05:20,910 --> 00:05:27,980 +جنب بيتمشطوا مع بعض ماشي لكن مين يسبقالـ Primary، + +58 +00:05:27,980 --> 00:05:31,840 +مين بيسبق الـ Primary؟ فالproblem المطلوب، which + +59 +00:05:31,840 --> 00:05:35,320 +means انه إيش؟ انه ال secondary hemostasis is also + +60 +00:05:35,320 --> 00:05:39,840 +activating at this stage لكن اللي بيسبق هو ال + +61 +00:05:39,840 --> 00:05:43,660 +primary، نمرا اتنين, fibrinicted, and this is a + +62 +00:05:43,660 --> 00:05:47,100 +diger molecule، هذا عبارة عن a diger molecule، إيش + +63 +00:05:47,100 --> 00:05:51,780 +اسم الخطوة؟اسمها platelet adhesion بيتما ادهيجي + +64 +00:05:51,780 --> 00:05:55,740 +موليكيون ولا لا؟ لأ لأ بعنديك لأ بيتليت 100% بربط + +65 +00:05:55,740 --> 00:05:59,800 +لبليتليت برضه بال sub endothelia نمرى تلاتة von + +66 +00:05:59,800 --> 00:06:03,000 +Willebrand and this is the largest اللي هو + +67 +00:06:03,000 --> 00:06:09,200 +component which is required for platelet adhesion + +68 +00:06:09,200 --> 00:06:14,320 +لمين؟ to sub endothelia او to collagen layer to + +69 +00:06:14,320 --> 00:06:19,090 +collagen layer هو عبارة عن big moleculeبيكون دائما + +70 +00:06:19,090 --> 00:06:26,590 +حامل factor 8 على دهره يا شباب، ماشي؟ وماشي، ماشي؟ + +71 +00:06:26,590 --> 00:06:29,550 +as a complex، volume of the brand factor 8 هي + +72 +00:06:29,550 --> 00:06:34,370 +complex نسبة فاق volume of the brand ل factor 8 + +73 +00:06:34,370 --> 00:06:37,950 +تسعة إلى واحد، قداش حجم volume of the brand، تسعة + +74 +00:06:37,950 --> 00:06:42,610 +أضعافير which means أن فاق volume of the brand هو + +75 +00:06:42,610 --> 00:06:45,250 +برعام big molecule ولا مش big molecule، big + +76 +00:06:45,250 --> 00:06:49,630 +moleculeطب هل المطلوب ان يكون big؟ طبعا لأن انا + +77 +00:06:49,630 --> 00:06:55,990 +بدي ايش؟ هو بدي شغلك ايش؟ كرابط، بدي يربط جسمين مع + +78 +00:06:55,990 --> 00:06:58,730 +بعض، ال platelet مع ال collagen و بالتالي لازم + +79 +00:06:58,730 --> 00:07:06,110 +يكون الوسط ياش؟ الوسط كبير الان هنشوف كيف تتم هذه + +80 +00:07:06,110 --> 00:07:11,610 +العملية شايفين العنوان مش مقول؟ مقول collagen و ال + +81 +00:07:11,610 --> 00:07:15,880 +platelet و في النص ايش فيه؟Vom Librem الـ Vom + +82 +00:07:15,880 --> 00:07:19,120 +Librem هو الوسيط هو الوسطى هو اللي بيش يربط + +83 +00:07:19,120 --> 00:07:23,740 +الكلاجن بمين بالـ platelet أو ال platelet بالـ air + +84 +00:07:23,740 --> 00:07:29,780 +بالكلاجن قبل ان انا اشرحكم هذا ال slide فى شوية + +85 +00:07:29,780 --> 00:07:33,060 +slide شاعتكم معاكوا انتبهوا عليها ماشي اليوم + +86 +00:07:33,060 --> 00:07:36,720 +بنزلكوا على الصفحة بإذن الله ماشي يا شباب انتبهوا + +87 +00:07:36,720 --> 00:07:45,570 +عليها شو اللي بصير اللي بصير انه فى جسمينبيتمثلوا + +88 +00:07:45,570 --> 00:07:48,990 +بالـPlatelet والـCollagen. مين بربطهم؟ مين + +89 +00:07:48,990 --> 00:07:52,230 +الواسطة؟ الـValvular Brand Factor. كيف تتم هذه + +90 +00:07:52,230 --> 00:07:56,850 +العملية؟ يعني بقدر أقول، Valvular Brand Act as a + +91 +00:07:56,850 --> 00:08:01,450 +Bridge. Bridge يعني بيقصر، بيجسر، بيجسر المسافة + +92 +00:08:01,450 --> 00:08:04,430 +الـPhysical Distance between Platelet and the + +93 +00:08:04,430 --> 00:08:11,030 +subendothelia. وهي طبعاً وجوده بيعمل Sealing، + +94 +00:08:11,030 --> 00:08:15,940 +Bond. ماشي؟Increase the bond that sealed platelet + +95 +00:08:15,940 --> 00:08:20,460 +to the vessel wall ومن صفات هذه الخطوة انها + +96 +00:08:20,460 --> 00:08:24,400 +reversible شو يعني reversal؟ ممكن تسلق ممكن تفوق و + +97 +00:08:24,400 --> 00:08:29,100 +هذه نعم يا شباب، من نعم الخالق علينا ليش؟ لأنها + +98 +00:08:29,100 --> 00:08:33,860 +بتعني انه مش هذا لا يعني انه كل ما بلاتلت مسكت في + +99 +00:08:33,860 --> 00:08:38,360 +blood vessels، بتعمل جلطة، صح؟ لأنه اذا كان كل + +100 +00:08:38,360 --> 00:08:43,700 +بلاتلتبتلق أو بتمسك في plot visit، بدها تعمل جلطة، + +101 +00:08:43,700 --> 00:08:49,440 +معناه تقول الجلطات على جفن ميشي، لكن مجرد الأرض + +102 +00:08:49,440 --> 00:08:53,880 +بقى بهذه الطريقة، it's reversible، بمعنى أنه ممكن + +103 +00:08:53,880 --> 00:08:56,960 +يفتح، ممكن تغير رأيها الـplaything، ممكن ايش؟ تغير + +104 +00:08:56,960 --> 00:09:01,950 +رأيها الـplaythingكيف تتم هذه العملية؟ بالتبايش ب + +105 +00:09:01,950 --> 00:09:09,850 +.. بتكلمه ب .. اللي هو two mechanism الأولان يشوش + +106 +00:09:09,850 --> 00:09:14,550 +بقى هذا هو .. هاي طبعا Endothelia وهي ال + +107 +00:09:14,550 --> 00:09:19,450 +subendothelia وهي ال platelet وهي ال bond بما أنا + +108 +00:09:19,450 --> 00:09:24,050 +فاكر ربط بين مين؟ بين ال platelet و بين مين؟ ال + +109 +00:09:24,050 --> 00:09:28,480 +subendothelia كيف تتم هذه العملية؟ من خلالأضطباط + +110 +00:09:28,480 --> 00:09:32,600 +الـ Von Willebrand بالـ GB1B ريسبتور اللي على سطح + +111 +00:09:32,600 --> 00:09:40,000 +الـ Platelet وفي الكلاجب ماشي فجاء الكلاجب مسك + +112 +00:09:40,000 --> 00:09:43,680 +بالـ Platelet الواسطة Von Willebrand و Von + +113 +00:09:43,680 --> 00:09:47,620 +Willebrand مسك ال Platelet من خلال المنطقة من خلال + +114 +00:09:47,620 --> 00:09:53,800 +ريسبتور اسمه Glucoprotein 1B ماشي؟ طيب، هل هذه + +115 +00:09:53,800 --> 00:09:59,440 +العملية تتم بدون واسطة؟بدون فوم لبران تستطيع أن + +116 +00:09:59,440 --> 00:10:03,980 +تتم نازرات تستطيع أن تتم نازرات تستطيع أن تتم + +117 +00:10:03,980 --> 00:10:09,040 +نازرات تستطيع أن تتم نازرات تستطيع أن تتم نازرات + +118 +00:10:09,040 --> 00:10:12,900 +تستطيع أن تتم نازرات تستطيع أن تتم نازرات تستطيع + +119 +00:10:12,900 --> 00:10:13,180 +أن تتم نازرات تستطيع أن تتم نازرات تستطيع أن تتم + +120 +00:10:13,180 --> 00:10:13,260 +نازرات تستطيع أن تتم نازرات تستطيع أن تتم نازرات + +121 +00:10:13,260 --> 00:10:15,020 +تستطيع أن تتم نازرات تستطيع أن تتم نازرات تستطيع + +122 +00:10:15,020 --> 00:10:15,020 +أن تتم نازرات تستطيع أن تتم نازرات تستطيع أن تتم + +123 +00:10:15,020 --> 00:10:17,000 +نازرات تستطيع أن تتم نازرات تستطيع أن تتم نازرات + +124 +00:10:17,000 --> 00:10:22,880 +تستطيع + +125 +00:10:22,880 --> 00:10:28,410 +أن تتم نماشي، يبقى two mechanism إما من خلال + +126 +00:10:28,410 --> 00:10:31,590 +ارتباط الوغل البراد بال glycoprotein 1B وإما من + +127 +00:10:31,590 --> 00:10:35,130 +خلال ارتباط ال glycoprotein 1A ال receptor اللي + +128 +00:10:35,130 --> 00:10:38,150 +على سطح ال platelet directly with ash و with blood + +129 +00:10:38,150 --> 00:10:42,690 +vessels with ash و with blood vessels هذا هي ال + +130 +00:10:42,690 --> 00:10:47,450 +first stage هذه هي ال first stage وهي ال adhesion + +131 +00:10:47,450 --> 00:10:50,730 +stage و هذه الصورة زي ما تشايفين يا شباب هي blood + +132 +00:10:50,730 --> 00:10:53,810 +vessels، normal blood vessels في الوضع الطبيعي يا + +133 +00:10:53,810 --> 00:10:55,210 +شباب ال platelet is ash + +134 +00:10:58,950 --> 00:11:02,990 +بتنقمط على سطح مين؟ على سطح الفضلة الذاتية، ليش + +135 +00:11:02,990 --> 00:11:09,270 +بتنقمط؟ بشكل طبيعي، لأن في كونتينيوسكلين، صح؟ من + +136 +00:11:09,270 --> 00:11:11,970 +وين جاء ال negative charge؟ من النساء الذاتية، من + +137 +00:11:11,970 --> 00:11:15,930 +الأولوية، من الطبية، طلعت لنا إيه؟ ناتروكسر، + +138 +00:11:15,930 --> 00:11:18,410 +نيروكسر، نيروكسر، نيروكسر، نيروكسر، نيروكسر، + +139 +00:11:18,410 --> 00:11:18,570 +نيروكسر، نيروكسر، نيروكسر، نيروكسر، نيروكسر، + +140 +00:11:18,570 --> 00:11:21,870 +نيروكسر، نيروكسر، نيروكسر، نيروكسر، نيروكسر، + +141 +00:11:21,870 --> 00:11:27,310 +نيروكسر، نيروكسر، نيروكالاندروسيدين one بيعمل + +142 +00:11:27,310 --> 00:11:33,390 +العكس فشو اللي صار؟ صار فيه مزح في ال blood + +143 +00:11:33,390 --> 00:11:37,250 +vessels طلع منه vom lipan فلاوة من vom lipan شو + +144 +00:11:37,250 --> 00:11:40,390 +عمل vom lipan؟ ربط البليتلت اللي هي ربط البليتلت + +145 +00:11:40,390 --> 00:11:45,210 +بمين؟ بالكلاجة ربط البليتلت بالكلاجة وبالتالي صار + +146 +00:11:45,210 --> 00:11:50,550 +فيه ايش؟ فيه adhesion ماشي؟ it's reversible مرة + +147 +00:11:50,550 --> 00:11:56,870 +تانية بقولكوا ليش؟and there is no ADP production، + +148 +00:11:56,870 --> 00:12:02,550 +لسه ما سط ال ADP، يعني وين موجود ال ADP؟ ال + +149 +00:12:02,550 --> 00:12:07,170 +receptor على سطح البلاتلة، في البلاتلة الموجودة، + +150 +00:12:07,170 --> 00:12:12,900 +وين موجود في البلاتلة؟موجود في ال dense granules، + +151 +00:12:12,900 --> 00:12:18,120 +مظبوط، مش قولنا ATP و ADP و Calcium و Serotin طيب + +152 +00:12:18,120 --> 00:12:21,360 +طلع من البلد اللي بتشي؟ ما طلعش من البلد اللي + +153 +00:12:21,360 --> 00:12:25,440 +بتشي، يعني في ATP برا، مافيش ADP، هو إيش ال ADP؟ + +154 +00:12:25,440 --> 00:12:32,000 +مش خدناها قبل ذلك احنا، إيش بعمل؟ بحفظ البلد، هو + +155 +00:12:32,000 --> 00:12:36,100 +بعمل، أه بحفظ، إيش يعني بيعملها؟ بطيع طاقة، طاقة + +156 +00:12:36,100 --> 00:12:38,460 +يعني إيش؟ يعني بتشرب شو بالطاقة؟ + +157 +00:12:42,160 --> 00:12:46,100 +طاقة يعني platelet aggregation بتخليها تعمل + +158 +00:12:46,100 --> 00:12:48,900 +platelet aggregation مش يقولنا strong platelet + +159 +00:12:48,900 --> 00:12:53,420 +aggregate هو بقى strong platelet aggregate احنا + +160 +00:12:53,420 --> 00:12:56,340 +لسه مافيش ADD ليعمل aggregation لكن في adhesion + +161 +00:12:56,340 --> 00:13:01,560 +صار اه في adhesion reversal نعم reversal ماشي طيب + +162 +00:13:01,560 --> 00:13:03,960 +صار في initiation + +163 +00:13:07,820 --> 00:13:14,040 +متمثلة بالـ Capturing، إيش يعني Capture؟ إتقاء، ثم + +164 +00:13:14,040 --> 00:13:20,140 +Adhesion، ثم Activation، الخطوة التي تلقى الـ + +165 +00:13:20,140 --> 00:13:24,920 +Capturing هي إيش؟ هي الـ Adhesion، ثم إيش؟ ثم + +166 +00:13:24,920 --> 00:13:33,280 +Activation، هنشوف أي صورة واضحة تماما، لكن مكرمة، + +167 +00:13:33,280 --> 00:13:35,780 +هذا هي نفس الحكاية، الوضع اللي بدنا نطلع ونشيك، + +168 +00:13:35,780 --> 00:13:44,660 +وهو دهمفروض شباب؟ طيب قولنا بس تنمسك بس تصبح الـ + +169 +00:13:44,660 --> 00:13:49,620 +adhesion بيصير فيه change in shape بيصير فيه + +170 +00:13:49,620 --> 00:13:54,100 +change in shape للـ plate تتحول from discord to + +171 +00:13:54,100 --> 00:13:59,660 +plate form shape ثم أكانثوسايت ثم أكانثوسايت + +172 +00:13:59,660 --> 00:14:03,480 +عارفين مصطلح أكانثوسايت؟ أخدته صح؟ خدته في + +173 +00:14:03,480 --> 00:14:08,380 +الhematology؟ صحيحأيوة ال acanthocyte غير معروف + +174 +00:14:08,380 --> 00:14:12,300 +يعني مافي شيطاك محدد لأ شيطاك محدد لا شيطاك محدد + +175 +00:14:12,300 --> 00:14:13,320 +لا شيطاك محدد لا شيطاك محدد لا شيطاك محدد لا شيطاك + +176 +00:14:13,320 --> 00:14:14,240 +محدد لا شيطاك محدد لا شيطاك محدد لا شيطاك محدد لا + +177 +00:14:14,240 --> 00:14:16,600 +شيطاك محدد لا شيطاك محدد لا شيطاك محدد لا شيطاك + +178 +00:14:16,600 --> 00:14:19,320 +محدد لا شيطاك محدد لا شيطاك محدد لا شيطاك محدد لا + +179 +00:14:19,320 --> 00:14:23,100 +شيطاك محدد لا شيطاك محدد لا شيطاك محدد لا شيطاك + +180 +00:14:23,100 --> 00:14:35,680 +محدد لا شيطاك محدد لا شيطاك + +181 +00:14:35,680 --> 00:14:42,740 +مSharp حدث، ماشي؟ طيب، تحولت الخلية الـIH إلى .. + +182 +00:14:42,740 --> 00:14:47,180 +إلى Acanthocytes يعني صار فيه change in shape، صار + +183 +00:14:47,180 --> 00:14:49,980 +فيه change in shape طبعا هذا دليل على إيش؟ دليل + +184 +00:14:49,980 --> 00:14:53,040 +على إنه في continuous adhesion طول ما هي ممسوقة، + +185 +00:14:53,040 --> 00:14:56,160 +قلتلكوا، طول ما هو ممسوقة، الـplated بتمر بعد، في + +186 +00:14:56,160 --> 00:15:01,280 +مراحل إيش؟ النشاط، بتمر في مراحل النشاط وقبل مراحل + +187 +00:15:01,280 --> 00:15:04,580 +النشاط، هذه المرحلة أو أول مراحل النشاط، هذه + +188 +00:15:04,580 --> 00:15:08,290 +المرحلةهذه التي تسبق الـ Degranulation لازم تغير + +189 +00:15:08,290 --> 00:15:12,090 +شكلها واحد بيكون بمسيك ال bracelet بالشكل هذا نمسك + +190 +00:15:12,090 --> 00:15:16,730 +كرة بالشكل هذا ماشي، شو بيصير؟ بيصير في ال .. ال + +191 +00:15:16,730 --> 00:15:19,930 +.. ال .. ال .. ال .. ال .. ال .. ال .. ال .. ال .. + +192 +00:15:19,930 --> 00:15:20,690 +ال .. ال .. ال .. ال .. ال .. ال .. ال .. ال .. ال + +193 +00:15:20,690 --> 00:15:21,090 +.. ال .. ال .. ال .. ال .. ال .. ال .. ال .. ال .. + +194 +00:15:21,090 --> 00:15:21,270 +ال .. ال .. ال .. ال .. ال .. ال .. ال .. ال .. ال + +195 +00:15:21,270 --> 00:15:21,790 +.. ال .. ال .. ال .. ال .. ال .. ال .. ال .. ال .. + +196 +00:15:21,790 --> 00:15:21,810 +ال .. ال .. ال .. ال .. ال .. ال .. ال .. ال .. ال + +197 +00:15:21,810 --> 00:15:21,930 +.. ال .. ال .. ال .. ال .. ال .. ال .. ال .. ال .. + +198 +00:15:21,930 --> 00:15:23,490 +ال .. ال .. ال .. ال .. ال .. ال .. ال .. ال .. ال + +199 +00:15:23,490 --> 00:15:31,050 +.. ال .. ال .. ال .. ال .. ال .. ال .. ال .. ال .. + +200 +00:15:31,050 --> 00:15:33,390 +ال .. ال .. ال .. + +201 +00:15:35,400 --> 00:15:38,420 +تقص عليها الشكل هذا بتغير شكلها ولا بتغيرش؟ + +202 +00:15:38,420 --> 00:15:41,720 +الادهيجي نعمل هي نفس الحركة، نسكت الـplatelet، + +203 +00:15:41,720 --> 00:15:45,760 +ففعصت الـplatelet حاولت تخلط يعني، ماقدرتش، + +204 +00:15:45,760 --> 00:15:52,880 +وبالتالي صار في change in shape طيب، + +205 +00:15:52,880 --> 00:16:00,260 +محصلة ذلك اللي هو .. محصلة ذلك اللي هو ال .. + +206 +00:16:00,420 --> 00:16:05,400 +secretion or release or degranulation ناشي and + +207 +00:16:05,400 --> 00:16:12,860 +this stage require energy بدها طاقة بدها أياش بدها + +208 +00:16:12,860 --> 00:16:19,520 +طاقة متمثلة بإياش بال ATP it requires ATP وطبعا ال + +209 +00:16:19,520 --> 00:16:29,760 +ATP او ADP ناشي وحصلته degranulation بيصاحب هذه + +210 +00:16:29,760 --> 00:16:34,500 +العملية release of catecholamine أو الابنفرين مرضو + +211 +00:16:34,500 --> 00:16:39,920 +أحد المكونات اللي هي ال platelet هو السراطين + +212 +00:16:39,920 --> 00:16:46,880 +والتانية شو بيعملوا؟ بيعملوا vasoconstriction طبعا + +213 +00:16:46,880 --> 00:16:53,790 +these granules trigger بجانب ال primaryاللي هو + +214 +00:16:53,790 --> 00:16:56,890 +بالجانب الـ adhesion بيعملوا حاجة بيسموها + +215 +00:16:56,890 --> 00:17:01,270 +secondary aggregation وعندنا هنتعرف على ال primary + +216 +00:17:01,270 --> 00:17:04,690 +aggregation الخطوة الأساسية secondary aggregation + +217 +00:17:04,690 --> 00:17:09,190 +لإن هذه المواد ال ADP هو عبارة عن agonist قوي + +218 +00:17:09,190 --> 00:17:16,150 +يستطيع أن يعمل direct aggregation لل platelet even + +219 +00:17:16,150 --> 00:17:21,190 +without اللي هو adhesion automatically ممكن + +220 +00:17:21,190 --> 00:17:26,060 +يعمليهاشوبالتالي وجوده في الوسط مهم جدا strong + +221 +00:17:26,060 --> 00:17:32,140 +aggregate طبعا + +222 +00:17:32,140 --> 00:17:36,020 +عندما أصار في aggregation يا شباب there is no more + +223 +00:17:36,020 --> 00:17:40,760 +reversibility no more reversibility بمعنى أن + +224 +00:17:40,760 --> 00:17:43,500 +الخطوة بتصير irreversible شو اللي بيصير؟ + +225 +00:17:50,480 --> 00:17:52,260 +degranulation نحكي عن دي دي دي دي دي دي دي دي دي + +226 +00:17:52,260 --> 00:17:54,160 +دي دي دي دي دي دي دي دي دي دي دي دي دي دي دي دي دي + +227 +00:17:54,160 --> 00:17:55,520 +دي دي دي دي دي دي دي دي دي دي دي دي دي دي دي دي دي + +228 +00:17:55,520 --> 00:18:02,660 +دي دي دي دي دي دي دي دي دي + +229 +00:18:02,660 --> 00:18:02,720 +دي دي دي دي دي دي دي دي دي دي دي دي دي دي دي دي دي + +230 +00:18:02,720 --> 00:18:06,640 +دي دي دي + +231 +00:18:06,640 --> 00:18:06,640 +دي دي دي دي دي دي دي دي دي دي دي دي دي دي دي دي دي + +232 +00:18:06,640 --> 00:18:06,680 +دي دي دي دي دي دي دي دي دي دي دي دي دي دي دي دي دي + +233 +00:18:06,680 --> 00:18:10,180 +دي دي + +234 +00:18:18,510 --> 00:18:24,950 +وبالتالي بيعمل acid hydrolysis ثم ال alpha + +235 +00:18:24,950 --> 00:18:28,310 +granules فيها مكونات كثيرة فيها إيش؟ مكونات كثيرة + +236 +00:18:28,310 --> 00:18:33,650 +منها على سبيل المثال اللي هو release of platelet + +237 +00:18:33,650 --> 00:18:40,330 +factor 4 عارفين إيش بيسوي؟ عارفين إيش؟ بيسوي؟ شو + +238 +00:18:40,330 --> 00:18:46,810 +بيسوي؟ اللي هو .. شو بيسوي؟ أه؟ + +239 +00:18:52,110 --> 00:18:57,150 +يعمل neutralization للهيبرين نيوتراليزيشن للهيبرين + +240 +00:18:57,150 --> 00:19:01,030 +نعم بالظبط + +241 +00:19:01,030 --> 00:19:05,270 +احنا قاعدين نعمل جلطة بدنا هيبرين بدناش هيبرين + +242 +00:19:05,270 --> 00:19:07,630 +فشوف تعمل ال platelet و تطلع ال platelet على ال + +243 +00:19:07,630 --> 00:19:09,730 +factor 4 وعلى ال platelet factor 4 تعمل + +244 +00:19:09,730 --> 00:19:14,890 +inactivation لمن؟ للهيبرين وبالتالي بيهيئ اللي هو + +245 +00:19:16,780 --> 00:19:19,700 +نمر يعني اتنين بيطلع الـ Beta-thromboglobulin، + +246 +00:19:19,700 --> 00:19:22,460 +another protein which is required for أكتر من + +247 +00:19:22,460 --> 00:19:25,680 +المرحلة، منها الـ adhesion ومنها اللي هو الـ + +248 +00:19:25,680 --> 00:19:30,020 +healing process، as well as platelet derived + +249 +00:19:30,020 --> 00:19:32,800 +growth factor، وقبل عن growth cytokine، growth + +250 +00:19:32,800 --> 00:19:37,780 +cytokine، وهذا مطلوب for healing، and another + +251 +00:19:37,780 --> 00:19:41,060 +coagulation protein، احنا عارفين ان في جلب + +252 +00:19:41,060 --> 00:19:46,250 +الـplatelet فيه coagulation factors،منها factor 1 + +253 +00:19:46,250 --> 00:19:55,050 +و factor 5 و factor 8 و factor 13، ماشي؟ واحد و + +254 +00:19:55,050 --> 00:19:58,770 +خمسة و تمانية، كلهم موجودين في الجلب الـplatelet، + +255 +00:19:58,770 --> 00:20:02,230 +نعم في الـalpha granules، و شوية من هناش، تلاتة + +256 +00:20:02,230 --> 00:20:07,160 +عشرالنوع التاني وهو dense granules وهو ايش؟ dense + +257 +00:20:07,160 --> 00:20:10,300 +granules و ال dense granules عارفين مكوناتها ايه + +258 +00:20:10,300 --> 00:20:15,380 +ATP و ABD و كالسيوم و سراطين كالسيوم و سراطين و + +259 +00:20:15,380 --> 00:20:21,900 +مطلوب جدا الكالسيوم يكون في هذه المراحل لما + +260 +00:20:21,900 --> 00:20:25,100 +يزيد بيعمل vasoconstriction احسن اجير بيعمل احسن + +261 +00:20:25,100 --> 00:20:29,160 +طيب زي ما انتوا شايفين هي الوعائد ده مرة ثانية + +262 +00:20:29,160 --> 00:20:31,340 +ماشي + +263 +00:20:33,580 --> 00:20:37,180 +المزحة طلع منه فوملي براند تعملها تيجي وشوية + +264 +00:20:37,180 --> 00:20:40,100 +aggregation هي لدى وزي ما انتوا ملاحظين ايش صار + +265 +00:20:40,100 --> 00:20:43,160 +فيه موجود في الوسط حال جيتها انت قاعد البيت ماحق + +266 +00:20:43,160 --> 00:20:46,520 +انك تدفع البيت صار في LED وقولنا هذا عبارة عن + +267 +00:20:46,520 --> 00:20:52,620 +strong air aggregation وفي عندنا كومبوكسين A2 هذا + +268 +00:20:52,620 --> 00:20:55,940 +ايش هو بيعمل aggregation و بيعمل plated + +269 +00:20:55,940 --> 00:21:01,710 +aggregation بيعمل plated aggregationاحنا بدنا نعمل + +270 +00:21:01,710 --> 00:21:04,490 +platelet aggregation، بدنا نصل إلى المرحلة التي + +271 +00:21:04,490 --> 00:21:08,910 +تليها، نصل إلى ايه؟ للمرحلة التي تليها، فبالتالي + +272 +00:21:08,910 --> 00:21:13,610 +بدنا نوفر الظروف لهذه المرحلة، ماشي؟ فهي المسكة + +273 +00:21:13,610 --> 00:21:18,530 +عملت degranulation، وال granules اللي طلعت منها، + +274 +00:21:18,530 --> 00:21:23,540 +ماشي؟ تحتوي على platelet aggregate، فمنها ADPو من + +275 +00:21:23,540 --> 00:21:26,140 +شرط الـ Platelet طلعت الـ Thromboxane A2 وعارفين + +276 +00:21:26,140 --> 00:21:29,660 +الميكانيزم تبعه أخدناه .. ماشي .. قالولي هذا + +277 +00:21:29,660 --> 00:21:32,620 +strong agregant وبالتالي بيعملوا Platelet إيه عشان + +278 +00:21:32,620 --> 00:21:37,300 +.. ماجي .. ماجي .. ماجي .. ماجي .. ماجي .. ماجي .. + +279 +00:21:37,300 --> 00:21:38,060 +ماجي .. ماجي .. ماجي .. ماجي .. ماجي .. ماجي .. + +280 +00:21:38,060 --> 00:21:39,160 +ماجي .. ماجي .. ماجي .. ماجي .. ماجي .. ماجي .. + +281 +00:21:39,160 --> 00:21:39,380 +ماجي .. ماجي .. ماجي .. ماجي .. ماجي .. ماجي .. + +282 +00:21:39,380 --> 00:21:40,280 +ماجي .. ماجي .. ماجي .. ماجي .. ماجي .. ماجي .. + +283 +00:21:40,280 --> 00:21:43,100 +ماجي .. ماجي .. ماجي .. ماجي .. ماجي .. ماجي .. + +284 +00:21:43,100 --> 00:21:47,160 +ماجي .. ماجي + +285 +00:21:47,160 --> 00:21:51,590 +..طب إيش بيشكل؟ جاله factor خمسة هذا هو هدف هو + +286 +00:21:51,590 --> 00:21:55,350 +عبارة عن substrate لمين؟ ل factor عشرة يعني بنشر + +287 +00:21:55,350 --> 00:21:57,970 +coagulation cascade mechanism و factor اتنين + +288 +00:21:57,970 --> 00:22:01,930 +factor عشرة بيشتغل على خمسة في وجود خمسة على اتنين + +289 +00:22:01,930 --> 00:22:06,010 +و اتنين بتحول لهول برو ترمبل تحول ل ترمبل و ترمبل + +290 +00:22:06,010 --> 00:22:09,110 +بيشتغل على ال fiber region و هكذا فاهمين عليا؟ + +291 +00:22:09,110 --> 00:22:14,290 +يعني كمان على سطح البلد شو بيصير؟ بيصير فانشغل ال + +292 +00:22:14,290 --> 00:22:19,220 +coagulation cascade mechanismمش سيريال تنشيط لل + +293 +00:22:19,220 --> 00:22:22,360 +coagulation cascade mechanism أيه موجود receptor + +294 +00:22:22,360 --> 00:22:25,020 +في ال extrinsic أم لأ في ال .. لأ في ال intrinsic + +295 +00:22:25,020 --> 00:22:28,420 +فدي في ال common حتى بس المرة الجاية مش هروح + +296 +00:22:28,420 --> 00:22:31,820 +نفسيها Platelet factor أربعة طوّبوا قلتلكوا إياه، + +297 +00:22:31,820 --> 00:22:35,980 +إيش بيطلّقوا؟ إيه بيعمل؟ إيه بيعمل؟ إيه بيعمل؟ إيه + +298 +00:22:35,980 --> 00:22:36,120 +بيعمل؟ إيه بيعمل؟ إيه بيعمل؟ إيه بيعمل؟ إيه بيعمل؟ + +299 +00:22:36,120 --> 00:22:37,320 +إيه بيعمل؟ إيه بيعمل؟ إيه بيعمل؟ إيه بيعمل؟ إيه + +300 +00:22:37,320 --> 00:22:44,400 +بيعمل؟ إيه بيعمل؟ إيه بيعمل؟ إيه بيعمل؟ إيهبعمل + +301 +00:22:44,400 --> 00:22:48,860 +حاجتين كنتي ليه؟ بت recruit، recruit يعني يستمر + +302 +00:22:48,860 --> 00:22:53,260 +استدعاء، كل platelet جديدة بتيصل للمكان بنشطها، كل + +303 +00:22:53,260 --> 00:22:56,960 +platelet جديدة بتيصل للمكان الممزوع يعني، شو + +304 +00:22:56,960 --> 00:23:01,380 +بعملها؟بعملها بنشطها، وفي نفس الوقت لما بتتنشط، + +305 +00:23:01,380 --> 00:23:05,480 +بتطلع من الـplatelet، dense granules، إيش بتطلع؟ + +306 +00:23:05,480 --> 00:23:09,240 +من الـcalcium، عندنا هنشوف إنه لازم لكل عمليات + +307 +00:23:09,240 --> 00:23:12,180 +الحيوية اللي بتتنشط من خلالها الـcoagulation + +308 +00:23:12,180 --> 00:23:19,160 +cascade، عارف؟ معدى بدايته ونهايته، بس، شايفين إيش + +309 +00:23:19,160 --> 00:23:24,000 +صورة؟بطلت فيه adhesion طبقة واحدة أكم طبقة صارت + +310 +00:23:24,000 --> 00:23:28,760 +طبقات يعني adhesion و aggregation ادهيجين و ايه؟ و + +311 +00:23:28,760 --> 00:23:35,700 +aggregation ماشي صار فيه perputation لل platelet + +312 +00:23:35,700 --> 00:23:40,560 +blood formation وبالتالي تعظمت و كبرت وصارت ايه + +313 +00:23:40,560 --> 00:23:45,060 +اش؟ وصارت جلبة شوفوا إيش اللي موجودة شباب ADD + +314 +00:23:48,160 --> 00:23:52,460 +و في حاجة اهم كمان موجودة ثرمبين و زي ما قلتلكوا + +315 +00:23:52,460 --> 00:23:55,740 +دليل على ان اتنين ماشيين مع بعض و هي الثرمبين + +316 +00:23:55,740 --> 00:23:59,620 +محسنة و عارفين ايش الكتورب الحمراء هذي؟ عبارة عن + +317 +00:23:59,620 --> 00:24:04,760 +fiber عبارة عن ايه بالظبط عاملة تقوية عاملة ايه؟ + +318 +00:24:04,760 --> 00:24:09,680 +أسباب تشبيك أي بقى حسبت يبقى صار في platelet + +319 +00:24:09,680 --> 00:24:14,400 +aggregation فالplatelet aggregation هذا هي + +320 +00:24:14,400 --> 00:24:16,880 +chemical changes cause platelets to aggregate and + +321 +00:24:16,880 --> 00:24:21,060 +stick to one anotherأكيد صار في وسط مليانة مواد + +322 +00:24:21,060 --> 00:24:26,140 +كيموية كل هذه المواد بتعمل على إيش؟ على إنه يشير + +323 +00:24:26,140 --> 00:24:29,500 +فيه platelet aggregation تحفظ newly arriving + +324 +00:24:29,500 --> 00:24:33,920 +platelets كل platelets بتيجي become activated by + +325 +00:24:33,920 --> 00:24:40,380 +agonist ثم aggregation is triggered by platelet + +326 +00:24:40,380 --> 00:24:50,030 +factor 3 ثم ADEثم طمبق وطمبق سيريتو، شايفين ال + +327 +00:24:50,030 --> 00:24:52,890 +aggregation إيش صار؟ platelet factor تلاتة أشوة + +328 +00:24:52,890 --> 00:24:56,310 +شبه ..لو الصفحة اللي بتوفره لplatelet عشان أشوة + +329 +00:24:56,310 --> 00:24:58,290 +عشان أشوة ..هو اللي أدفش في ليلة ال membrane اللي + +330 +00:24:58,290 --> 00:25:03,050 +موجود وين؟ في ال membrane of platelet، وهو برهان + +331 +00:25:03,050 --> 00:25:09,030 +الصفحة، مظبوط بتوفره لplatelet عشان إيش؟ + +332 +00:25:14,070 --> 00:25:16,470 +ولكن بالتامة عملية الـ aggregation جانوا بالتامة + +333 +00:25:16,470 --> 00:25:22,010 +خلال الارتباط بالرسيبتور التاني اسمه 2B3A 2B3A + +334 +00:25:22,010 --> 00:25:26,910 +ماشي برتبط في مين؟ برتبط مع الفيبرانوجين شو بعمل + +335 +00:25:26,910 --> 00:25:31,610 +الفيبرانوجين؟ بربط كل اتاني بليتلت مع بعض وبالتالي + +336 +00:25:31,610 --> 00:25:35,570 +بيصير فيه أيش بليتلت aggregation وقلنا في هذه + +337 +00:25:35,570 --> 00:25:41,070 +الخطوة ال calcium is present لازم يكون موجود وكمان + +338 +00:25:41,070 --> 00:25:44,290 +ثرمبوكسين A2وهو كلها طالعة لإنه بيعمل Platelet + +339 +00:25:44,290 --> 00:25:48,650 +Aggregation هاي Calcium ثرمبوكسين A2 ثرمبوكسين A2 + +340 +00:25:48,650 --> 00:25:52,450 +is a stimulus or another stimulus و بيصير فيه + +341 +00:25:52,450 --> 00:25:55,190 +Platelet Aggregation طبعا ما تنسى إنه في ADP في + +342 +00:25:55,190 --> 00:25:59,370 +Thrombin في ثرمبوكسين A2 لإن ماتما صار في Platelet + +343 +00:25:59,370 --> 00:26:02,150 +Aggregation it's irreversible and no more + +344 +00:26:02,150 --> 00:26:06,030 +reversibility is there بيصير فيه disagregation ما + +345 +00:26:06,030 --> 00:26:08,370 +بيصير في AL disagregation + +346 +00:26:11,420 --> 00:26:15,820 +الـPlatelet شباب هي شد Platelet شد بمعنى .. إيش + +347 +00:26:15,820 --> 00:26:22,400 +يعني؟ Shedding تقشير ماشي؟ Shedding بمعنى تتقشر، + +348 +00:26:22,400 --> 00:26:27,000 +تطلع، بتطلع منها إيش؟ Shed Membranes اللي هو + +349 +00:26:27,000 --> 00:26:33,200 +الـMembrane Rich in phospholipids لأن الـMembrane + +350 +00:26:33,200 --> 00:26:38,660 +تبعها درعنية Appearance of Platelet Factor 3 بيظهر + +351 +00:26:38,660 --> 00:26:43,170 +على شكل Factor 3 Platelet Factor 3on the platelet + +352 +00:26:43,170 --> 00:26:47,090 +membrane فبالتالي + +353 +00:26:47,090 --> 00:26:51,690 +صار لها aggregation وصار فيه توفير لسطح جديد وهو + +354 +00:26:51,690 --> 00:26:55,950 +ايه؟ Platelet factor 3 على سطح ميه؟ على سطح البلد + +355 +00:26:55,950 --> 00:26:59,970 +this happens during platelet formation + +356 +00:27:03,140 --> 00:27:07,800 +وانت سيرف as academic sites نفس هي ده ال platelet + +357 +00:27:07,800 --> 00:27:14,580 +factor تلاتة بيمثل سطح بيمشط كون ال coagulation + +358 +00:27:14,580 --> 00:27:18,160 +cascade mechanism كون ال coagulation factor على + +359 +00:27:18,160 --> 00:27:23,430 +سطح على هذا السطح الجديد بتتنشط و بتكون عيشاللي هو + +360 +00:27:23,430 --> 00:27:27,150 +الـ Fibrin Formation الـ Fibrin Formation يبقى هي + +361 +00:27:27,150 --> 00:27:31,930 +وفّرت سطح لمين لنشاط الـ Coagulation Cascade + +362 +00:27:31,930 --> 00:27:35,030 +Mechanism أو نشاط الـ Coagulation Factor معايا يا + +363 +00:27:35,030 --> 00:27:40,810 +أبو إشمعي؟ مع الله، هاي العملية، هاي الـ Plate + +364 +00:27:40,810 --> 00:27:44,590 +اللي Blood غزل صار فيه نزع طلب منه زندريبان، مسيك + +365 +00:27:44,590 --> 00:27:47,710 +الـ Platelet و Platelet ربطت حالها مع Platelet + +366 +00:27:47,710 --> 00:27:55,660 +التانية من خلال الـ Fibrin Formationخلال طبعا + +367 +00:27:55,660 --> 00:27:58,880 +في وجود ال calcium و ال fiber نوجين مظبوط بدنا + +368 +00:27:58,880 --> 00:28:04,300 +calcium و بدنا إيش 100% فلنفتح هذا ال ميكانيزم + +369 +00:28:04,300 --> 00:28:09,280 +خدنا من التفصيل محصلته إيش رومبوكسين A2 formation + +370 +00:28:09,280 --> 00:28:12,780 +which is إيش aggregate platelet aggregate وهي + +371 +00:28:12,780 --> 00:28:19,000 +بيعمل as و هذه الصورة بتبين إيش اللي هو هذه ال + +372 +00:28:19,000 --> 00:28:24,660 +platelet freeعلى سطح الـ Receptor ممسوك يا باشي؟ + +373 +00:28:24,660 --> 00:28:30,900 +ماشي ممسوك يوم متعارض ل agonist زي ال ADD و ال + +374 +00:28:30,900 --> 00:28:34,580 +thrombocynetol و ال thrombin هى جالك Myprologen + +375 +00:28:34,580 --> 00:28:39,180 +ماشي و ربطه لتل ال platelet مع بعض ربط ايه؟ لإن + +376 +00:28:39,180 --> 00:28:46,140 +هؤلاء دي are strong aggregants ماشي يا شباب و + +377 +00:28:46,140 --> 00:28:49,660 +هالصورة بتبين ليهاش؟ Platelet Aggregation شايفين + +378 +00:28:49,660 --> 00:28:54,840 +كيف؟كل الـ Platelet مربوطة مع بعض انت باين معايا؟ + +379 +00:28:54,840 --> 00:29:03,300 +حد عنده سؤال؟ احسن كتير حد عنده سؤال؟طب انتبهوا + +380 +00:29:03,300 --> 00:29:06,940 +عليا على ال animation ماشي لان هنظرة فيه animation + +381 +00:29:06,940 --> 00:29:11,640 +بده يبين ان العملية كب التن ماشي هاي ال platelet و + +382 +00:29:11,640 --> 00:29:15,140 +زى ما تلاحظى فيه receptor على سطحها بتمثل + +383 +00:29:15,140 --> 00:29:19,200 +different agonist مش ماشى طبعا السهم اللى ايه هذا + +384 +00:29:19,200 --> 00:29:24,560 +هو بتنجر من واحد لتانى كل agonist عنده استعداد + +385 +00:29:24,560 --> 00:29:27,120 +ينشط هذا ال receptor و هذا ال receptor تبع مين + +386 +00:29:27,120 --> 00:29:34,430 +aggregation يعني بيعمل ايه aggregationكل receptor + +387 +00:29:34,430 --> 00:29:39,450 +نشوف اه شايفين؟ طبعا زي اللي انتوا مشايفين فينا + +388 +00:29:39,450 --> 00:29:43,770 +collagen receptor, ADP, Epinephrine او Adrenaline + +389 +00:29:43,770 --> 00:29:49,330 +و Thrombin، Strong، Thrombin، أدوين ماشي، شو اللي + +390 +00:29:49,330 --> 00:29:52,910 +صار؟ ده هو، هاي ال collagen برضه بعمل direct + +391 +00:29:52,910 --> 00:29:58,650 +activation المفروشة بقى طبعا ال .. ده البداية بت + +392 +00:29:58,650 --> 00:30:03,630 +.. بتصير كأدهال Indotheliaأى blood vessels النزع + +393 +00:30:03,630 --> 00:30:12,030 +طلع منه ايش؟ فسك فى مين؟ 100% عامل ايش؟ adhesion + +394 +00:30:12,030 --> 00:30:17,550 +وطبعا تلال adhesion تنشيط لإنه ما طبعا تنشط GB1B + +395 +00:30:17,550 --> 00:30:24,570 +بنشطت مين؟ GB2B3A فهذه بتعمل التانية بيصير في ايش؟ + +396 +00:30:24,570 --> 00:30:31,630 +اجل بتعمل تنشط الأولانى وبنشط التانىفبعمل ايه؟ + +397 +00:30:31,630 --> 00:30:36,930 +Platelet Aggregation وان + +398 +00:30:36,930 --> 00:30:41,570 +الصورة ده هي blood vessels طلع منه tissue factor + +399 +00:30:41,570 --> 00:30:45,130 +مزعق فطلع منه ايه؟ tissue factor tissue factor + +400 +00:30:45,130 --> 00:30:48,630 +بنشط ايه؟ Coagulation factor نشط ايه؟ Coagulation + +401 +00:30:48,630 --> 00:30:52,350 +factor يعني هدا كذلك العوامل نشطت ال platelet وهذا + +402 +00:30:52,350 --> 00:30:57,960 +نشط ايه؟ Coagulation factor التنينبيعملوا Primary + +403 +00:30:57,960 --> 00:31:04,140 +وSecondary Hemostatic Plug وهنا ارتباط لبلاتلت هذا + +404 +00:31:04,140 --> 00:31:10,100 +هي تدعى بالمومي البلالت عملية الاتهيجة كيف يتم + +405 +00:31:10,100 --> 00:31:13,540 +تنشيط ال coagulation factor على سطح البلاتلت بتتم + +406 +00:31:13,540 --> 00:31:21,080 +كلئته يا شباب ماشي؟ هذه البلاتلت ال membrane تبعها + +407 +00:31:21,080 --> 00:31:27,020 +فيه phospholipid ماشي؟ متمثلبالـ Phosphatidyl + +408 +00:31:27,020 --> 00:31:31,720 +Serine Substance Phosphatidyl Serine Substance + +409 +00:31:31,720 --> 00:31:34,940 +موجودة في المنبرين؟ أه موجودة في المنبرين يعني و + +410 +00:31:34,940 --> 00:31:41,580 +قبل binding to any agonist و طبعا هنا الـ Thrombin + +411 +00:31:41,580 --> 00:31:45,800 +هنا is an example of strong agonist شو بيصير؟ يعني + +412 +00:31:45,800 --> 00:31:49,940 +بتتنشط لـ Platelet فشو اللي بيصير؟ بتطلع + +413 +00:31:49,940 --> 00:31:53,740 +Phosphatidyl Serine يعني بتتنشط + +414 +00:31:56,040 --> 00:32:00,140 +activation فتطلع الفوسفة الديالسيرين من جوا ال + +415 +00:32:00,140 --> 00:32:06,740 +platelet إلى سطح ال platelet ماشي؟ ايه طلعت + +416 +00:32:06,740 --> 00:32:09,920 +الفوسفة الديالسيرين ولما تطلع الفوسفة الديالسيرين + +417 +00:32:09,920 --> 00:32:15,280 +على سطح ال platelet بتشكل negative charge بتشكل + +418 +00:32:15,280 --> 00:32:19,980 +negative charge وهذا المطلوبNegative charge for + +419 +00:32:19,980 --> 00:32:23,460 +activation of coagulation cascade mechanism عشان + +420 +00:32:23,460 --> 00:32:26,440 +هيك بكرا لما نبقى في ال coagulation فال cascade + +421 +00:32:26,440 --> 00:32:30,660 +mechanism هنعرف أنه upon the presence of negative + +422 +00:32:30,660 --> 00:32:33,700 +charge بيصير فيه activation فمين اللي هو أكثر + +423 +00:32:33,700 --> 00:32:38,780 +negative charge؟ اللي هو جاي من وين؟ جاي من ال + +424 +00:32:38,780 --> 00:32:42,400 +platelet، ماشي، platelet، phospholipidبلادلت + +425 +00:32:42,400 --> 00:32:47,100 +فوسفوليبيت ممبريد ماشي طلعت وكونت نيجاتيب اتشارش + +426 +00:32:47,100 --> 00:32:50,900 +شو باش شططت ال coagulation cascade ميكانيزي اللي + +427 +00:32:50,900 --> 00:32:58,080 +محصلته ثرومبين فورميشي والثرومبين بيشتغل على مين؟ + +428 +00:32:58,080 --> 00:33:00,900 +على الفائبرين و بيحاولوا لمين؟ على الفائبرين و + +429 +00:33:00,900 --> 00:33:04,020 +بيكونوا الاداين ثكابرين أستاتيكوبلاجو هي عبارة عن + +430 +00:33:04,020 --> 00:33:09,920 +أن هي stableStable مش work of fiber وهو ربعا solid + +431 +00:33:09,920 --> 00:33:15,220 +لأنه ببدأ في الأول فرجال ثم solid وبالتالي بيقوّل + +432 +00:33:15,220 --> 00:33:18,740 +جلطة اللي صارت و لا ما بيقوّها لأنه صار على سطح + +433 +00:33:18,740 --> 00:33:21,700 +replated ولا لا يبقى في الأول بيصير فيه primary + +434 +00:33:21,700 --> 00:33:25,660 +hemostatic plug ثم secondary hemostatic plug + +435 +00:33:25,660 --> 00:33:29,740 +formation فبناخد السؤال شباب أنه في هذه العملية في + +436 +00:33:29,740 --> 00:33:33,160 +ثرمبوكسين A2 cell facets بيجاية من وين؟ + +437 +00:33:37,860 --> 00:33:41,120 +ميكانيزم من الاراجدونك أسد اللي جاي من ال + +438 +00:33:41,120 --> 00:33:45,120 +phospholipid membrane، بتحوله بالدينا ثرمبوكسين + +439 +00:33:45,120 --> 00:33:50,520 +A2، مين بيعمله inhibition هذا الميكانيزم؟ baby + +440 +00:33:50,520 --> 00:33:55,820 +aspirins، إيش بيعمل ال baby aspirin؟ بيعمل + +441 +00:33:55,820 --> 00:33:58,300 +inhibition للcyclooxygenase enzyme اللي بيحول + +442 +00:33:58,300 --> 00:34:04,880 +الاراجدونك أسد اللي عيش إلى ثرمبوكسين A2طبعا هذه + +443 +00:34:04,880 --> 00:34:08,920 +صورة تتمثل الـ aggregation هي two-platelet جمعت في + +444 +00:34:08,920 --> 00:34:15,420 +النص في الـhyperallergic نجنس يا شباب أنه احنا في + +445 +00:34:15,420 --> 00:34:18,620 +الورقة الطبيعي الـheparin موجود على سطح ال blood + +446 +00:34:18,620 --> 00:34:24,700 +vessels بشكل طبيعي as heparan sulfate لكن لما يصير + +447 +00:34:24,700 --> 00:34:31,120 +فيه مزع مقنوض منهاإنه نوقف عمله، نوقف عمله لإنه + +448 +00:34:31,120 --> 00:34:34,880 +يتكون جلطة، لإنه بيشتغل as anticoagulant، مين + +449 +00:34:34,880 --> 00:34:39,000 +بيوقف عمله؟ Platelet factor 4، Platelet factor 4 + +450 +00:34:39,000 --> 00:34:45,340 +بيشتغل as a neutralizer لليش؟ أخر خطوة من خطوات ال + +451 +00:34:45,340 --> 00:34:50,320 +platelet activation هي عبارة عن شغل ال coagulation + +452 +00:34:50,320 --> 00:34:53,450 +factor، ملاقش، ملاقش علاقة لplatelet في الموضوعكل + +453 +00:34:53,450 --> 00:34:57,510 +اللي بيصير إنه بتكون رشة regulation factor على سطح + +454 +00:34:57,510 --> 00:35:01,290 +الـplatelet بتكون ثرومبين، وثرومبين بيكون fibrin، + +455 +00:35:01,290 --> 00:35:06,090 +الـfibrin stabilize the إيش، the clot، وبالتالي + +456 +00:35:06,090 --> 00:35:13,130 +بيصير فيه clot formation.ماشي + +457 +00:35:13,130 --> 00:35:17,850 +يا شباب، هد ندو سؤال، هد هي الوظائف أو القطوات + +458 +00:35:17,850 --> 00:35:24,820 +التي تمر فيها الـplatelet في أثناء عملية نشاطةطيب، + +459 +00:35:24,820 --> 00:35:28,760 +في عوامل أخرى، في أشيطة أخرى تقوم فيها الـ + +460 +00:35:28,760 --> 00:35:32,520 +Platelet، خلّينا نشوف، يعني لو Platelet بالإضافة + +461 +00:35:32,520 --> 00:35:38,740 +إلى ذلك، اتفقنا عليها أنها بتوفر سطح، ساحة معركة، + +462 +00:35:38,740 --> 00:35:44,520 +لمين؟للـ Secondary Hemostasis المتمثل بمشروع الـ + +463 +00:35:44,520 --> 00:35:48,320 +Coagulation Casking Mechanism، مظبوط؟ مين اللي + +464 +00:35:48,320 --> 00:35:52,760 +بيشكل هذا الكلام؟ Platelet Factor تلاتة، ماشي؟ + +465 +00:35:52,760 --> 00:35:57,960 +اللي هو مانا عنفش في البنت، او Platelet الحاجة + +466 +00:35:57,960 --> 00:36:05,920 +التانية إنها بتشكل support داعمة، داعمةو محافظة + +467 +00:36:05,920 --> 00:36:11,280 +على الـ endothelial lining cell لأنها تحافظ على + +468 +00:36:11,280 --> 00:36:17,180 +تابعية البطاريات الطبيعية من خلال إدخال الاختلافات + +469 +00:36:17,180 --> 00:36:24,940 +بينها وبين البطاريات طبعا الـ defective يا شباب أي + +470 +00:36:24,940 --> 00:36:31,720 +عملية من هذا النوع ماتصارش بتعني أنه فيه defective + +471 +00:36:31,720 --> 00:36:36,780 +hemostatic mechanismوإذا كانت بتخص ل primary + +472 +00:36:36,780 --> 00:36:41,520 +hemostesis هذا يعني إنه في خلل بيتمثل هذا الخلل + +473 +00:36:41,520 --> 00:36:45,640 +إما quantitatively وإما qualitatively يا مافيشي + +474 +00:36:45,640 --> 00:36:49,760 +platelet يا ما ال platelet بتشتغلش ماشي يا مافيشي + +475 +00:36:49,760 --> 00:36:52,980 +platelet يا ما ياش ال platelet بتشتغلش + +476 +00:36:52,980 --> 00:36:57,040 +quantitatively or qualitatively تعرفوا الحاجات + +477 +00:36:57,040 --> 00:37:00,840 +اللي هي هذه بتمثل الحاجات المنشرة شباب ADP, + +478 +00:37:01,900 --> 00:37:08,390 +thrombin, collagenCerotinin, Thrombocyanate and + +479 +00:37:08,390 --> 00:37:12,310 +Mechanical Stimuli. Mechanical Stimuli شو المعنى؟ + +480 +00:37:12,310 --> 00:37:18,870 +ال injury. او shear forces اذا زادت. shear forces + +481 +00:37:18,870 --> 00:37:22,790 +كمان اذا زادت بتعني انك هذه عبارة ممكن تعمل + +482 +00:37:22,790 --> 00:37:27,890 +mechanical injury.ماشي يا شباب؟ شو بتعني هذه + +483 +00:37:27,890 --> 00:37:33,890 +الشغلات؟ كلنا they are platelet activators.ماشي؟ + +484 +00:37:34,350 --> 00:37:38,690 +فبوكتشفت كورنالودان قالوله إذا كان في different + +485 +00:37:38,690 --> 00:37:42,430 +hormonal stimuli، these are stimulus, substances + +486 +00:37:42,430 --> 00:37:47,870 +وموجودهم يعني إنه في several different receptors + +487 +00:37:47,870 --> 00:37:52,610 +على سطح الـplatelet وإلا كيف بتم ال action تبعهم، + +488 +00:37:52,610 --> 00:37:57,380 +كيف بتم تنفيذ العمل تبعهمكلنا بنعرف ما زالها + +489 +00:37:57,380 --> 00:37:59,760 +chemical substance لازم تمسك فى receptor و ال + +490 +00:37:59,760 --> 00:38:02,560 +receptor يعني فى signal transduction و ال signal + +491 +00:38:02,560 --> 00:38:07,000 +transduction يعني فى function صح؟ مش أخدته؟ لازم + +492 +00:38:07,000 --> 00:38:12,700 +تمسك فى receptor و ال receptor ناشي بيتبعه signal + +493 +00:38:12,700 --> 00:38:17,440 +transduction يعني فى signal بده يتصل داخل النيل + +494 +00:38:17,440 --> 00:38:20,880 +الى ال planet او الخلية و تاعه بيصير فيه ايش؟ + +495 +00:38:20,880 --> 00:38:24,340 +بيصير function ففى multiple signaling pathway + +496 +00:38:24,340 --> 00:38:29,870 +مطبوعةالـ Function كمان من المصطلحات اللي مابين + +497 +00:38:29,870 --> 00:38:32,890 +عليها مصطلح الـ Agonist والـ Agonist اتعرفناه بإن + +498 +00:38:32,890 --> 00:38:36,970 +هو عبارة عن أي مادة ترتبط بسطح لـ Platelet و بتعمل + +499 +00:38:36,970 --> 00:38:43,130 +Platelet Activation ترتبط بسطح لـ Platelet و بتعمل + +500 +00:38:43,130 --> 00:38:47,370 +Platelet Activation وهذه أمثلة على Agonist مختلفة + +501 +00:38:47,370 --> 00:38:52,390 +لـ ADD, Collagen, Tropin, Epinephrine, Arachidonic + +502 +00:38:52,390 --> 00:38:52,730 +Acid, + +503 +00:39:04,210 --> 00:39:08,450 +هذا مثال من مصادر الوصول للتعليقات، وهذا مثال من + +504 +00:39:08,450 --> 00:39:09,810 +مصادر الوصول للتعليقات، وهذا مثال من مصادر الوصول + +505 +00:39:09,810 --> 00:39:11,210 +للتعليقات، وهذا مثال من مصادر الوصول للتعليقات، + +506 +00:39:11,210 --> 00:39:14,410 +وهذا مثال من مصادر الوصول للتعليقات، وهذا مثال من + +507 +00:39:14,410 --> 00:39:14,790 +مصادر الوصول للتعليقات، وهذا مثال من مصادر الوصول + +508 +00:39:14,790 --> 00:39:18,230 +للتعليقات، وهذا مثال من مصادر الوصول للتعليقات، + +509 +00:39:18,230 --> 00:39:21,070 +وهذا مثال من مصادر الوصول للتعليقات، وهذا مثال من + +510 +00:39:21,070 --> 00:39:24,830 +مصادر الوصول للتعليقات، وهذا مثال من مصادر الوصول + +511 +00:39:24,830 --> 00:39:27,730 +للتعليقات، وهذا مثال + diff --git a/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/FIuCiM3ywWg.srt b/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/FIuCiM3ywWg.srt new file mode 100644 index 0000000000000000000000000000000000000000..64c80904657f7da588388b4340e4a73d682bd542 --- /dev/null +++ b/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/FIuCiM3ywWg.srt @@ -0,0 +1,1811 @@ +1 +00:00:23,560 --> 00:00:27,200 +عايز أنا أتوكّل ولا اجمعين؟ طيب احنا اليوم إن شاء + +2 +00:00:27,200 --> 00:00:29,920 +الله، المرة الفاتت كنا بنحكي في thrombotic disease + +3 +00:00:29,920 --> 00:00:34,460 +بشكل عام وعملنا لها classification وقولنا الشرحة + +4 +00:00:34,460 --> 00:00:38,980 +الـ classification اللي بتقسمها إلى ثلاث أحسن هي + +5 +00:00:38,980 --> 00:00:43,840 +inherited و acquired و mixed inherited و acquired + +6 +00:00:43,840 --> 00:00:47,580 +و mixed ثم بدينا ندرب أمثلة على كل نوع من هذه + +7 +00:00:47,580 --> 00:00:55,430 +الألوان ويبدو مرة ثانية حاجات كلها للذكر يعني و + +8 +00:00:55,430 --> 00:00:59,790 +ليس الحصر، أنا أخدت أمثلة على هذا الموضوع، إنما في + +9 +00:00:59,790 --> 00:01:03,510 +أمثلة كثيرة على ذلك، ماشي؟ طيب، منها factor five + +10 +00:01:03,510 --> 00:01:06,070 +-laden، اللي هي الـ .. الـ .. الـ .. الـ inherited, + +11 +00:01:06,470 --> 00:01:10,170 +factor five-laden, OTG و ASO, antithrombin, glated + +12 +00:01:10,170 --> 00:01:13,190 +deficiency, abnormal prothrombin و sticky, + +13 +00:01:13,770 --> 00:01:17,960 +platelet syndrome هنشوف كل واحد منهم على حدى + +14 +00:01:17,960 --> 00:01:25,020 +ماشي ويا ريت نشوف أول تقسيمة ونكملها الـ + +15 +00:01:25,020 --> 00:01:30,860 +acquired لاحظوا بقى ممكن تدرج تحتها عدة أنواع منها + +16 +00:01:30,860 --> 00:01:35,740 +الـ advancing age طبعًا العمر وكل ما الإنسان كبر في + +17 +00:01:35,740 --> 00:01:39,620 +العمر كل ما زادت إمكانية حدوث جلطات + +18 +00:01:41,740 --> 00:01:46,060 +للوريد والجواب، والأخرى pre-orthoposis جلطة سابقة + +19 +00:01:46,060 --> 00:01:50,500 +الجلطة السابقة هي عبارة عن risk factor لجلطة قادمة + +20 +00:01:50,500 --> 00:01:56,060 +لجلطة قادمة ثم immobilization الـ اتحاركة major + +21 +00:01:56,060 --> 00:02:01,180 +surgery طبيعي، خايف الواحد لما بيصير فيه جريح + +22 +00:02:01,180 --> 00:02:06,560 +إمكانية حدوث جلطة، والدّ ماليجنانسي بيصير فيها + +23 +00:02:06,560 --> 00:02:12,150 +sustained release أو إمكانية إنه يتبعتشه factor + +24 +00:02:12,150 --> 00:02:16,490 +عالي جدًا، استروجين ثم الـ antiphospholipid antibody + +25 +00:02:16,490 --> 00:02:19,190 +ماهيله proliferative disorder حتة لإنه يستخدم + +26 +00:02:19,190 --> 00:02:25,490 +بستروبينيا and prolonged air travel طبيعي الناس + +27 +00:02:25,490 --> 00:02:29,790 +اللي بتسافر فترة طويلة في الطيارة اليوم وارد جدًا + +28 +00:02:29,790 --> 00:02:31,210 +لأنه immobilization + +29 +00:02:34,660 --> 00:02:42,240 +بتحجز الدم تحت + +30 +00:02:42,240 --> 00:02:47,100 +وإن كان يتحدث جلطة عالية طبعًا نشوف النوع الثالث + +31 +00:02:47,100 --> 00:02:50,680 +وهو الـ mixed or الـ unknown hyperhomocystinemia + +32 +00:02:50,680 --> 00:02:54,760 +اتعرفت الـ homocysteine؟ ماشي، وين أخذنا؟ + +33 +00:02:54,760 --> 00:02:56,880 +hyperhomocysteinemia + +34 +00:03:00,440 --> 00:03:07,760 +أخذناها في الـ vitamin folic acid and vitamin B12 + +35 +00:03:07,760 --> 00:03:13,160 +ماشي، لما أخذنا الـ biochemistry of both B12 و + +36 +00:03:13,160 --> 00:03:20,840 +folate عرفنا أنه الـ homocysteine بتتحول إلى مثيونين + +37 +00:03:20,840 --> 00:03:25,140 +فاكرينه؟ 100% folded in هذول طبعًا إذا ما اتحولش + +38 +00:03:25,140 --> 00:03:27,860 +بيصير في accumulation of homocysteine بيصير + +39 +00:03:54,820 --> 00:04:00,220 +نبدأ في أول هذه الأسباب وهي inherited form الأسباب + +40 +00:04:00,220 --> 00:04:07,360 +الوراثية وأشهرها هو factor V lysin factor V خداشة + +41 +00:04:07,360 --> 00:04:11,040 +بقى صح؟ اللي هو دور مهم في الـ coagulation cascade؟ + +42 +00:04:11,040 --> 00:04:16,220 +شو بيعمل؟ بيكوّن بروترومبينيز كومبلكس مهم جدًا + +43 +00:04:16,220 --> 00:04:21,480 +وبالتالي بيعمل clot formation عشان هيك عند مرحلة + +44 +00:04:21,480 --> 00:04:28,030 +معينة في anticoagulant بيوقفوا مين هو؟ protein C + +45 +00:04:28,030 --> 00:04:32,550 +لما بيتنشط وبيتحول لـ activated protein C لما كنا + +46 +00:04:32,550 --> 00:04:39,850 +اتفدناها 100% وبيعمل degradation لـ factor 5 و 8 لو + +47 +00:04:39,850 --> 00:04:45,410 +وجدوا بعض المرات ليدل العالمة ليدل وهي عبارة عن + +48 +00:04:45,410 --> 00:04:49,550 +هولندية وجدت بعض المرات factor 5 + +49 +00:05:01,470 --> 00:05:08,510 +في مين؟ في factor 5 في مواقع ارتباط protein C في + +50 +00:05:08,510 --> 00:05:12,950 +factor 5 واشي المواقع؟ شوفوا هي المواقع شباب هي و + +51 +00:05:12,950 --> 00:05:18,810 +factor 5 النشط قالوا في ثلاث مواقع بيرتبط في الـ + +52 +00:05:18,810 --> 00:05:24,980 +protein C الـ activated protein C في الـ factor + +53 +00:05:24,980 --> 00:05:31,240 +الخامسة النشط وبيقطعوا عند هذه النقاط + +54 +00:05:31,240 --> 00:05:40,080 +إيش يجيبوا نقطة 306 و 506 و 1765 هذه المواقع فيها + +55 +00:05:40,080 --> 00:05:44,600 +arginine لو استبدلت بأي amino acid ثاني ستكون + +56 +00:05:44,600 --> 00:05:49,240 +مميزة أشهر الـ mutation اللي موجودة هو استبدال الـ + +57 +00:05:49,240 --> 00:05:54,320 +arginine بالـ glutamine هو اكتشف البروتين C لا يستطيع + +58 +00:05:54,320 --> 00:05:58,440 +أن يرتبط ويتعرف على الـ factor 5 النشط وبالتالي + +59 +00:05:58,440 --> 00:06:06,220 +يبقى الـ factor 5 نشط، صح؟ ماذا يعمل؟ جلطة، صح؟ + +60 +00:06:06,220 --> 00:06:10,400 +عشان هيك الناس دولة عندهم إيش؟ عندهم risk + +61 +00:06:10,400 --> 00:06:14,280 +factor for إيش؟ for clot formation بيسموا هذه + +62 +00:06:14,280 --> 00:06:22,490 +الحالة شباب activated protein C resistant إيش + +63 +00:06:22,490 --> 00:06:27,870 +بيسموها activated protein C resistance أو بيسموها + +64 +00:06:27,870 --> 00:06:37,290 +باسم العالم اللي اكتشفتها factor 5-lightening عيد + +65 +00:06:37,290 --> 00:06:39,550 +ثاني فهمتوها؟ + +66 +00:06:49,100 --> 00:06:57,340 +وندلنا هذه الاميراس الطبيعي الاميراس الثالث طبعًا + +67 +00:06:57,340 --> 00:07:02,080 +one of the common cause of thrombophilia وعرفنا + +68 +00:07:02,080 --> 00:07:05,800 +للـ thrombophilia هي عبارة عن حالة بالزبط فيها + +69 +00:07:05,800 --> 00:07:09,340 +increased the tendency of thrombosis ماشي؟ + +70 +00:07:09,340 --> 00:07:15,080 +thrombophilia بيسموها يعتبر بيمثل 20% من الحالات + +71 +00:07:17,360 --> 00:07:21,060 +بالبقابة الـ antithrombin 3 والبروتين C و S بيمثل + +72 +00:07:21,060 --> 00:07:29,620 +5% وجوده زاد risk factor ثاني بيعمل motion ماشي الـ + +73 +00:07:29,620 --> 00:07:32,640 +activated protein C بيعمل inhibition لخمسة أو + +74 +00:07:32,640 --> 00:07:38,200 +ثمانية الناشطين وهي عدم قدرة الـ activated protein + +75 +00:07:38,200 --> 00:07:43,020 +C للارتباط to inhibit factor 5 بيعمل نتيجة لـ + +76 +00:07:43,020 --> 00:07:46,120 +mutation بيزيد من إمكانية الـ clot formation + +77 +00:07:46,120 --> 00:07:53,560 +ويوجد نوعين من الناس عندهم هذه المشكلة ناس + +78 +00:07:53,560 --> 00:07:58,520 +heterozygous وناس homozygous الـ heterozygous + +79 +00:07:58,520 --> 00:08:07,160 +individual للحالة هذه كانوا بزيادة احتمالية تكون + +80 +00:08:07,160 --> 00:08:12,300 +الجلطة عندهم من خمسة إلى عشر أضعاف الناس الطبيعي + +81 +00:08:12,300 --> 00:08:19,560 +بينما الـ homozygous for يعني الناس اللي عندهم + +82 +00:08:19,560 --> 00:08:21,780 +activated protein C resistance + +83 +00:08:25,080 --> 00:08:29,960 +بتزيد احتمالية حدوث جلطة عندهم من خمسين لـ مية في + +84 +00:08:29,960 --> 00:08:34,240 +المية في زيادة في الـ risk of thrombo embolism + +85 +00:08:34,240 --> 00:08:40,220 +وبالتالي مثلًا عالي ماشي يا شباب هذه عصاية ثانية + +86 +00:08:40,220 --> 00:08:47,360 +بتقول إن هيتيروزايجس بيزيد الـ risk of venar + +87 +00:08:47,360 --> 00:08:52,520 +thrombosis سبع أضعاف، لو واحد هوموزايجس ثمانين ضعف + +88 +00:08:52,520 --> 00:08:53,860 +نفس المسبب الفاتر + +89 +00:08:59,040 --> 00:09:04,820 +الحبوب منع الحمل تزيد ثلاث أضعاف وقرأ + +90 +00:09:04,820 --> 00:09:08,940 +الـ contraceptive factor 5-platin 5-platin بتعرف فوق + +91 +00:09:08,940 --> 00:09:15,140 +إيش بتعمل الحبوب منع الحمل mechanism of action + +92 +00:09:15,140 --> 00:09:20,260 +أو الـ mode of action الـ contraceptive بنزه شباب + +93 +00:09:20,260 --> 00:09:24,820 +تعمل inhibition للـ antithrombin 3 + +94 +00:09:31,230 --> 00:09:35,630 +ماشي، شو يعني؟ يعني inhibition لـ naturally + +95 +00:09:35,630 --> 00:09:39,090 +occurring anticoagulant ولما تقلل الـ + +96 +00:09:39,090 --> 00:09:44,690 +anticoagulant شو يصير؟ تصير clotting، بسبب أنه + +97 +00:09:44,690 --> 00:09:47,230 +بيصير فيه خلل في الـ hemostatic level وبسرعة تصير + +98 +00:09:47,230 --> 00:09:51,910 +إيش الـ hemostatic level؟ protein C deficiency حالة + +99 +00:09:51,910 --> 00:09:55,110 +بتختلف وبدنا نميز بينها وبين الـ activated + +100 +00:09:55,110 --> 00:09:58,530 +protein C resistance أو فارقة الفارقة الأدوية، هذه + +101 +00:09:58,530 --> 00:10:02,050 +كانت حالة لو عرفناها ماذا يفعل البروتين C deficiency؟ + +102 +00:10:09,190 --> 00:10:13,010 +مش protein C إما بيكون على شكل heterozygous or + +103 +00:10:13,010 --> 00:10:16,950 +homozygous صح؟ طبعًا protein C لـ heterozygous + +104 +00:10:16,950 --> 00:10:21,030 +بيكون protein C عنده خمسين في المية الـ homozygous + +105 +00:10:21,030 --> 00:10:24,910 +complete أمسك والـ homozygous عشان الشباب يشخص + +106 +00:10:24,910 --> 00:10:28,850 +يشخص الناس اللي homozygous عند الولادة منذ الولادة + +107 +00:10:28,850 --> 00:10:32,470 +لأنه بنولد الطفل على طول يكون عنده protein C + +108 +00:10:32,470 --> 00:10:37,650 +deficiency يعني خلف الـ hemostatic level فاكرين + +109 +00:10:37,650 --> 00:10:44,780 +الميزان؟ بتكون بالضبط في الشيء توازن ما بين الـ + +110 +00:10:44,780 --> 00:10:48,580 +procoagulant والـ anticoagulant فبيخلي المريض في + +111 +00:10:48,580 --> 00:10:51,800 +thrombi formation وين؟ في الـ microvasculature + +112 +00:10:51,800 --> 00:10:52,680 +system + +113 +00:10:55,920 --> 00:11:00,260 +أشهرها ملاحظة .. أكثرها ملاحظة هي اللي بتصير في الـ + +114 +00:11:00,260 --> 00:11:05,320 +skin ممكن تصير DIC وإذا صار في DIC يعني جلطة و + +115 +00:11:05,320 --> 00:11:10,020 +نزيف يعني حيصير Necrosis صح؟ وإذا صار Necrosis + +116 +00:11:10,020 --> 00:11:15,820 +بيصير حالة بيسموها purpura fulminans ماشي؟ هذه + +117 +00:11:15,820 --> 00:11:22,480 +ممكن تصير في الـ skin وممكن تصير في Intra + +118 +00:11:22,480 --> 00:11:27,260 +-terranial Hemorrhage في الدمار ماذا يحصل في الطفل؟ + +119 +00:11:27,260 --> 00:11:33,880 +بتتبع أمها وبالتالي الأطفال اللي بنولده بهذه الصفة + +120 +00:11:33,880 --> 00:11:40,540 +بيكون إمكانية الإصابة بالـ formulas عالية جدًا + +121 +00:11:40,540 --> 00:11:46,980 +وبالتالي ممكن يموت خلال ساعات من الولادة الـ + +122 +00:11:46,980 --> 00:11:53,170 +protein أسس شباب تعرفوا دوره صح؟ Cofactor لمين + +123 +00:11:53,170 --> 00:11:58,070 +لبروتين C وقلنا هو اللي بيمسك في بروتين C النشط مع + +124 +00:11:58,070 --> 00:12:02,230 +الـ thrombomodulin مع بروتين S مظبوط في وجود + +125 +00:12:02,230 --> 00:12:07,610 +الـ كالسيوم وبيعمل degradation لمين؟ لخمسة تمام فنفسها + +126 +00:12:07,610 --> 00:12:11,430 +نفسه ما ينطبق على بروتين C ينطبق على بروتين S وكلنا + +127 +00:12:11,430 --> 00:12:16,030 +بنعرف أن بروتين S موجود على شكلتين Free 40% + +128 +00:12:16,030 --> 00:12:21,160 +والباقي bound 60% free والباقي bound + +129 +00:12:21,160 --> 00:12:25,920 +C4A 100 + +130 +00:12:25,920 --> 00:12:34,740 +% C4A 100% C4A 100% C4A 100% C4A 100% C4A 100% + +131 +00:12:34,740 --> 00:12:35,640 +100% C4A 100% C4A 100% C4A 100% C4A 100% C4A + +132 +00:12:35,640 --> 00:12:36,120 +100% C4A 100% C4A 100% C4A 100% C4A 100% + +133 +00:12:36,120 --> 00:12:36,180 +100% C4A 100% C4A 100% C4A 100% C4A 100% + +134 +00:12:36,180 --> 00:12:37,840 +C4A 100% C4A 100% C4A 100% C4A 100% C4A 100% + +135 +00:12:37,840 --> 00:12:44,800 +100% C4A 100% C4A 100% C4A + +136 +00:12:44,800 --> 00:12:47,870 +100 برضه عرفنا دوره ولا لأ ال incidence واحدة في الألف + +137 +00:12:47,870 --> 00:12:50,770 +لواحدة في الخمسة لأ برضه في منه heterozygous و + +138 +00:12:50,770 --> 00:12:55,790 +homozygous four لكن دوره مهم جدا أن الـ interferon بينقله + +139 +00:12:55,790 --> 00:13:00,110 +مش تلاقي كام factor على كل الـ serine proteins صح؟ + +140 +00:13:00,110 --> 00:13:06,270 +و بتزيد نسبة أو سرعة الـ inactivation of serine + +141 +00:13:06,270 --> 00:13:15,690 +proteins إلى الضعف عند مرضى الهيموفيليا + +142 +00:13:15,690 --> 00:13:20,550 +مظبوط اكسلانهيت + +143 +00:13:20,550 --> 00:13:26,310 +اللي هو الـ action of الهيموفيليا بيقولوا بزيادة + +144 +00:13:26,310 --> 00:13:30,090 +الإصابة بزيادة العمر و بالمناسبة شباب الانتفاب + +145 +00:13:30,090 --> 00:13:34,990 +التلاتة غيابه قوي جدا الـ heterozygous four إذا + +146 +00:13:34,990 --> 00:13:40,330 +شوفوها حتى الـ heterozygous four بدون أي risk + +147 +00:13:40,330 --> 00:13:48,470 +factor إضافي ممكن يصير عنده جرعة عالية يعني في أوامر + +148 +00:13:48,470 --> 00:13:52,150 +كثيرة تحتاج إلى other risk factor وفي الـ + +149 +00:13:52,150 --> 00:13:57,970 +heterozygous form يعني ما نشوفهاش لكن في الـ + +150 +00:13:57,970 --> 00:14:01,850 +antithrombin تلاتة حتى الـ heterozygous form و بدون + +151 +00:14:01,850 --> 00:14:06,150 +أي risk factor الإمكانية أن يصير thromboporesin + +152 +00:14:06,150 --> 00:14:06,750 +عالية + +153 +00:14:15,530 --> 00:14:20,850 +أو خامس من الاسم abnormal Prothrombin ماشي هو + +154 +00:14:20,850 --> 00:14:23,170 +عبارة عن Prothrombin صار فيه mutation في الموضع + +155 +00:14:23,170 --> 00:14:32,230 +هذا 20210A ماشي شو قد هذا؟ قالوا قد إلى زيادة + +156 +00:14:32,230 --> 00:14:35,710 +مستوى الـ Prothrombin إذا زاد الـ Prothrombin ماذا + +157 +00:14:35,710 --> 00:14:41,270 +تريد أن تحصل على؟ بالضبط لزيادة الثرومبين، جلطة، + +158 +00:14:41,270 --> 00:14:45,310 +حسنا؟ لكي الناس اللي ممكن يفعلوا جلطة على طبعهم من + +159 +00:14:45,310 --> 00:14:54,710 +قياس، من level، من شخص بالـ PCR technique. ستة، + +160 +00:14:54,710 --> 00:15:00,550 +Sticky Platelet Syndrome. Sticky، ماذا يعني؟ لزج + +161 +00:15:02,100 --> 00:15:08,620 +Sticky Platelet Syndrome ناشي؟ ونشوفها بكثرة في + +162 +00:15:08,620 --> 00:15:15,160 +حالات الذبحة الصدرية لعمالي ناشي؟ especially in + +163 +00:15:15,160 --> 00:15:21,840 +arterial thrombosis وكمان + +164 +00:15:21,840 --> 00:15:25,000 +نشوفها في حالات thromboembolism عند ناس قاعدين + +165 +00:15:25,000 --> 00:15:30,520 +بياخدوا warfarin واحد بياخد warfarin يعني بياخد anti + +166 +00:15:30,520 --> 00:15:33,700 +-coagulant، بتتوقع يا سيد عنده جلطة؟ لو صار عنده + +167 +00:15:33,700 --> 00:15:37,920 +جلطة، بدك تشك في إيه إيش؟ في .. لا، في حاجة + +168 +00:15:37,920 --> 00:15:41,840 +ثانية، غير الـ coagulation factor، لإن هو بوقف الـ + +169 +00:15:41,840 --> 00:15:46,940 +coagulation factor، بدك تشك في عالم آخر من + +170 +00:15:46,940 --> 00:15:50,000 +العوامل الهمستيزية، primary مثلا، تروح الـ primary، + +171 +00:15:50,000 --> 00:15:51,140 +مانو يفيد للتانية؟ + +172 +00:15:55,880 --> 00:15:59,300 +أو Platelet Defect زي هيك ستيكي بليتليت سندروم + +173 +00:15:59,300 --> 00:16:05,640 +إذا كان بدك تشخصها و بياخد أسبرين و أوقف الأسبرين + +174 +00:16:05,640 --> 00:16:12,140 +at least fourteen days وبعدين ابدا أشتغل هذه + +175 +00:16:12,140 --> 00:16:18,900 +العوامل اللي هو إيش الـ NMD الأكوان ده شباب زي ما + +176 +00:16:18,900 --> 00:16:23,720 +شوفته كثيرة لكن أنا أختارت لكم موضوع مهم جدا و هو + +177 +00:16:23,720 --> 00:16:28,390 +الـ antiphospholipid syndrome أخذته في العمل إيه؟ بس + +178 +00:16:28,390 --> 00:16:34,110 +أنا .. لأ بس كنت .. كنت كاجل آه شرحه؟ بس في البحث + +179 +00:16:34,110 --> 00:16:38,270 +عن الـ BDT أخر ما عرفت اسمه أخر ما عرفت اسمه أخر ما + +180 +00:16:38,270 --> 00:16:38,310 +عرفت اسمه أخر ما عرفت اسمه أخر ما عرفت اسمه + +181 +00:16:38,310 --> 00:16:38,350 +عرفت اسمه أخر ما عرفت اسمه أخر ما عرفت اسمه + +182 +00:16:38,350 --> 00:16:38,790 +عرفت اسمه أخر ما عرفت اسمه أخر ما عرفت اسمه + +183 +00:16:38,790 --> 00:16:40,210 +عرفت اسمه أخر ما عرفت اسمه أخر ما عرفت اسمه + +184 +00:16:40,210 --> 00:16:49,030 +عرفت اسمه أخر ما + +185 +00:16:49,030 --> 00:16:55,230 +عرفت اسمه أخر ما عرفت اسم + +186 +00:16:55,960 --> 00:16:59,960 +على الـ phospholipid membrane لماذا يجب أن نعرف + +187 +00:16:59,960 --> 00:17:02,100 +phospholipid membrane؟ تحفيز الـ coagulation + +188 +00:17:02,100 --> 00:17:05,660 +factor بالضبط كذلك مثل الـ Factor 3 المشغولة هي + +189 +00:17:05,660 --> 00:17:12,020 +عبارة عن ساحة معارك الأمين ساحة معارك الأمين ساحة + +190 +00:17:12,020 --> 00:17:12,560 +معارك الأمين، ساحة معارك الأمين، ساحة معارك + +191 +00:17:12,560 --> 00:17:12,660 +الأمين، ساحة معارك الأمين، ساحة معارك الأمين، + +192 +00:17:12,660 --> 00:17:18,420 +معارك الأمين، ساحة معارك الأمين، ساحة معارك + +193 +00:17:18,420 --> 00:17:21,540 +الأمين، + +194 +00:17:21,540 --> 00:17:23,780 +ساحة معارك + +195 +00:17:26,180 --> 00:17:30,820 +بتمنع دي مساحة المعرضة، بتنشغل، إذا إن شغلت الـ + +196 +00:17:30,820 --> 00:17:35,260 +coagulation factor بتشتغل، بتشتغلش، ماشي، وبتاني + +197 +00:17:35,260 --> 00:17:41,420 +شو بيصير؟ bleeding، و هذا اللي بيصير القراءة + +198 +00:17:41,420 --> 00:17:45,520 +الأولية لو واحد عنده antiphospholipid antibody + +199 +00:17:45,520 --> 00:17:52,080 +وعملت له BT و BTT، mainly BTT، هتبقى إيه prolong + +200 +00:17:53,450 --> 00:17:57,850 +هتلاقيهاش prolong واحد عنده الفحص الـ prolong تشوف + +201 +00:17:57,850 --> 00:18:04,230 +تشوف فيهاش بغض النظر تشوف تشوف فيهاش فيه + +202 +00:18:04,230 --> 00:18:11,190 +coagulation factor deficiency في خلل بس اللي بيصير + +203 +00:18:11,190 --> 00:18:16,310 +لأ يعني اللي بيصير و بيصير bleeding واحد عنده + +204 +00:18:16,310 --> 00:18:19,730 +prolong يعني أول ما تشوف فيه bleeding بس اللي + +205 +00:18:19,730 --> 00:18:21,390 +بيصير عند المريض in vivo + +206 +00:18:24,550 --> 00:18:30,150 +جلطة .. بس سببها مش معلوم .. إيه؟ أحكي .. والله + +207 +00:18:30,150 --> 00:18:32,670 +سببها يعني مش معلوم فن .. لأ معلوم فن، قاعد + +208 +00:18:32,670 --> 00:18:37,650 +بحكيلك، Antibody، مش كتر Phospholipid، مفهوم عليا؟ + +209 +00:18:37,650 --> 00:18:41,450 +هي Antiphospholipid Antibody، over syndrome اللي + +210 +00:18:41,450 --> 00:18:44,550 +بيصير عند المريض جلطة لأن الناس هدول بيعانوا من + +211 +00:18:44,550 --> 00:18:48,950 +جلطة لكن لو فحصنا لهم in vitro بنلاقي الفحص في + +212 +00:18:48,950 --> 00:18:52,930 +البرنامج مش راكب صح؟ صح مش راكب ليش؟ شو اللي + +213 +00:18:52,930 --> 00:18:57,410 +بيصير؟ اللي بيصير يا شباب أن in vivo this + +214 +00:18:57,410 --> 00:19:01,470 +antibody بتعمل شغل تاني بتعمل إيه؟ شغل تاني الشغل + +215 +00:19:01,470 --> 00:19:09,370 +الأولانية induction أو induce ماشي high level of + +216 +00:19:10,820 --> 00:19:14,280 +الـ Coagulation Factors أكثر Coagulation Factors + +217 +00:19:14,280 --> 00:19:20,820 +يعني بتزامد وبتنشط الـ Coagulation Factors النشطة + +218 +00:19:20,820 --> 00:19:26,700 +بزيادة تركيزها نمرا اتنين طبعا إذا زادت الـ + +219 +00:19:26,700 --> 00:19:28,960 +Coagulation Factors النشطة بتزيد الجانب طبعا لأ + +220 +00:19:28,960 --> 00:19:34,520 +نمرا اتنين بتعمل Inhibition لـ tissue plasminogen + +221 +00:19:34,520 --> 00:19:41,370 +activator tissue plasminogen activator مفهومش يبقى + +222 +00:19:41,370 --> 00:19:47,790 +tissue, plasma reactivator إيش دول؟ يعني إحنا + +223 +00:19:47,790 --> 00:19:52,090 +بنعمل activation of fibrinolysis طب و إذا ما عناه + +224 +00:19:52,090 --> 00:19:56,710 +إيش بيصير؟ بيصير جلد فاهمين عليا؟ يبقى الـ antibody + +225 +00:19:56,710 --> 00:20:01,370 +في الداخل جسم in vivo عمل جلد لكن الـ indication in + +226 +00:20:01,370 --> 00:20:06,970 +vitro لأ معمل جلد عمل bleeding صح؟ فرجت لنا كمان + +227 +00:20:06,970 --> 00:20:09,350 +حد فاصلا، this antibody mostly + +228 +00:20:16,200 --> 00:20:19,700 +وLupus Anticoagula وقسموها لنوعين Primary + +229 +00:20:19,700 --> 00:20:26,260 +وSecondary والـ Primary وSecondary + +230 +00:20:26,260 --> 00:20:33,300 +هي تقسيمة ممكن تتقسم إلى Classification ثانية حسب + +231 +00:20:33,300 --> 00:20:36,560 +الـ Function of Antibody أو أصل الـ Antibody + +232 +00:20:44,460 --> 00:20:49,300 +أو Infection-Induced Antibiotic أنتجت نتيجة أنتجت + +233 +00:20:49,300 --> 00:20:49,840 +أنتجت أنتجت أنتجت أنتجت أنتجت أنتجت أنتجت + +234 +00:20:49,840 --> 00:20:52,900 +أنتجت أنتجت أنتجت أنتجت أنتجت أنتجت أنتجت + +235 +00:20:52,900 --> 00:20:53,740 +أنتجت أنتجت أنتجت أنتجت أنتجت أنتجت أنتجت + +236 +00:20:53,740 --> 00:20:54,500 +أنتجت أنتجت أنتجت أنتجت أنتجت أنتجت أنتجت + +237 +00:20:54,500 --> 00:20:55,480 +أنتجت أنتجت أنتجت أنتجت أنتجت أنتجت أنتجت + +238 +00:20:55,480 --> 00:20:55,740 +أنتجت أنتجت + +239 +00:21:13,520 --> 00:21:17,640 +Antibody which affect Clotestin إيش بتعمل؟ + +240 +00:21:17,640 --> 00:21:23,860 +Clotestin عشان هي كالفياسة شباب للنوعين المختلفة I + +241 +00:21:23,860 --> 00:21:27,920 +tell بقولنا في نوعين Anti-cardio-lipid ولو بس + +242 +00:21:27,920 --> 00:21:32,200 +Anti-coagulant ماشي وممكن يكونوا Primary أو + +243 +00:21:32,200 --> 00:21:37,920 +Secondary وممكن يكونوا Autoimmune أو Infection + +244 +00:21:37,920 --> 00:21:43,070 +induced ماشي بالمناسبة الـ autoimmune are + +245 +00:21:43,070 --> 00:21:47,330 +permanent مش فيش منها الواحد لكن الـ infection + +246 +00:21:47,330 --> 00:21:56,050 +induced are transient ناشي؟ طيب الـ anticarbureting + +247 +00:21:56,050 --> 00:22:03,390 +اللي هو IgG وIgM و IgG هو الأهم لأن IgM بتشوفه في + +248 +00:22:03,390 --> 00:22:08,210 +حالات معينة فقط و النوع الثاني هو Lupus + +249 +00:22:08,210 --> 00:22:11,310 +anticoagulant يبقى فيه Anti-cardiolipin و Lupus + +250 +00:22:11,310 --> 00:22:14,950 +Anticoagulant فيه نوع ثالث حتى Beta-2-glycoprotein + +251 +00:22:14,950 --> 00:22:21,890 +-1 هنجا ان شاء الله حاجة تتطرح إليه ال Lupus + +252 +00:22:21,890 --> 00:22:24,830 +Anticoagulant هي عبارة عن antibody بتأثر على ال + +253 +00:22:24,830 --> 00:22:28,770 +clotting time على ال clotting test ال primary + +254 +00:22:28,770 --> 00:22:32,810 +antiphospholipid syndrome بتعمل thromboembolic + +255 +00:22:32,810 --> 00:22:37,230 +disease وممكن تعمل miscarriage إيش يعني + +256 +00:22:37,230 --> 00:22:38,030 +miscarriage؟ + +257 +00:22:42,270 --> 00:22:45,630 +ممكن تعمل intrauterine death + +258 +00:23:08,380 --> 00:23:11,800 +لماذا تتميز هذه الحالة؟ لأنها تتميز من وجود + +259 +00:23:11,800 --> 00:23:16,360 +arterial organothrombosis فيه thrombocytopenia فيه + +260 +00:23:16,360 --> 00:23:21,260 +recurrent fetal loss وغالبا بيكون مصحوبة ب + +261 +00:23:21,260 --> 00:23:26,700 +antiphospholipid antibody مرتبطة ب phospholipid or + +262 +00:23:26,700 --> 00:23:33,060 +protein complex اللي برتبط فيها مفهوم شباب وقلنا + +263 +00:23:33,060 --> 00:23:37,340 +نوعين هيهم النوع الأولاني IgG أو IgM ممكن يكونوا + +264 +00:23:37,340 --> 00:23:49,300 +IgG IgA Directed to a + +265 +00:23:49,300 --> 00:23:53,360 +protein زي الـ Cardiolipin هو عبارة عن بروتينات + +266 +00:23:53,360 --> 00:23:56,640 +موجودة على الفسفوليبيد β2 Glycoprotein + +267 +00:23:56,640 --> 00:24:02,260 +برضه عبارة عن protein أو البروترومبين كلهم مرتبطوا + +268 +00:24:02,260 --> 00:24:05,620 +بالفسفوليبيد membranes فلو واحد جاي يرتبط في + +269 +00:24:05,620 --> 00:24:12,180 +phospholipid membrane وصالب وانبسك ماشي ممسوك لا + +270 +00:24:12,180 --> 00:24:17,140 +أستطيع انه يعيش انه هتنقش النوع التاني هو lupus + +271 +00:24:17,140 --> 00:24:19,200 +anticoagulant وقولتلكوا هالي عبارة عن ال plot + +272 +00:24:19,200 --> 00:24:23,420 +بيستأثير وهي التقسيمة autoimmune وinfection + +273 +00:24:23,420 --> 00:24:25,460 +induced ماشي + +274 +00:24:29,560 --> 00:24:32,840 +هذا التفسير اللي اتوه فاسطرتلكوا يعني هذا in vivo + +275 +00:24:32,840 --> 00:24:37,480 +these antibodies are associated with thrombosis + +276 +00:24:37,480 --> 00:24:45,240 +شرحتلكوا ليش؟ ليش؟ بتزود تركيز ال coagulation + +277 +00:24:45,240 --> 00:24:53,020 +factor نشطة بتعمل inhibition لمين؟ تشوف لازمه + +278 +00:24:53,020 --> 00:25:00,830 +جاهتك، ماشي؟ وبتالي بتعمل إيه؟ جلطة in vitro لأ + +279 +00:25:00,830 --> 00:25:13,130 +بتعمل anticoagulant activity لأنه بالظبط لأ in + +280 +00:25:13,130 --> 00:25:17,310 +vivo هذا الكلام بيصير in vitro لأ هي بتعتمد على + +281 +00:25:17,310 --> 00:25:23,470 +عدم عمل ال coagulation factor فحط فبالتالي الفحص + +282 +00:25:23,470 --> 00:25:24,190 +بيطلع prolonged + +283 +00:25:26,870 --> 00:25:32,450 +وبالتالي الفحص إيش بيطلع الـ Prolonged طيب ما هي + +284 +00:25:32,450 --> 00:25:37,750 +الحالات اللي مطلوب منها نعمل فيها Screening للـ + +285 +00:25:37,750 --> 00:25:42,310 +Antiphospholipid-7؟ قالوا أول جلطة تيجي في سن + +286 +00:25:42,310 --> 00:25:46,630 +مبكرة، هي عبارة عن Indication لحاجة من هالنوع، + +287 +00:25:46,630 --> 00:25:50,530 +يعني أقل من أربعين نمرة اتنين Recurrent + +288 +00:25:50,530 --> 00:25:53,930 +thromboembolism، قلنا تكرار الجلطات + +289 +00:26:05,190 --> 00:26:10,750 +أو المريض عنده SLE أو Systemic Lupus Erythematosus + +290 +00:26:10,750 --> 00:26:17,450 +يعني أو Pre-Hormonal Therapy زي الحالات اللي قبل + +291 +00:26:17,450 --> 00:26:22,150 +ما بيبقوا هم هرمونات بيبقوا هم إيش بي .. لازم + +292 +00:26:22,150 --> 00:26:23,690 +يفحصوا لهم ال antiphospholipid + +293 +00:26:30,890 --> 00:26:34,050 +طبعاً في حالات أخرى يا شباب استكمالاً للموضوع زي + +294 +00:26:34,050 --> 00:26:40,910 +tissue plasminogen activator يوم ما اتقل يعني عسى وقفنا + +295 +00:26:40,910 --> 00:26:45,930 +ال fibrinolysis يعني جلطة صح؟ plasminogen activator + +296 +00:26:45,930 --> 00:26:54,550 +انهيبته إذا زاد تركيزه شو بدي أعمل؟ انهيبشي لل + +297 +00:26:54,550 --> 00:26:59,210 +activator يعني fibrinolysis انهيبشي صح؟ ثم دي is + +298 +00:26:59,210 --> 00:27:08,820 +fibrinogen فيبرينوجين طبيعي factor 12 deficiency فيبرينوجين + +299 +00:27:08,820 --> 00:27:13,160 +زياده هو factor 8 بلازمينوجين و + +300 +00:27:13,160 --> 00:27:20,520 +الهيبرهموسستيميا عشان شباب ال investigation شباب + +301 +00:27:20,520 --> 00:27:25,400 +خاضعة في هذا الموضوع there is no single simple + +302 +00:27:25,400 --> 00:27:25,920 +test + +303 +00:27:30,740 --> 00:27:35,800 +مجموعة من الفحصات كل ما بتفكر فيه اللي عمل جلطة + +304 +00:27:35,800 --> 00:27:41,280 +فكر افحصه عشان تحط ايدك على التشخيص السليم + +305 +00:27:41,280 --> 00:27:45,100 +فبيعملوا فحص كامل من هذا طبعا بيعملوا أول شيء ال + +306 +00:27:45,100 --> 00:27:50,480 +CBC و ال platelet و ESR بتعرفوا ليش ال CBC ماشي + +307 +00:27:50,480 --> 00:27:56,100 +شوف ال platelet فيه باطي عالي صح هي بتعمل جلطة + +308 +00:27:56,100 --> 00:28:04,580 +بتفهم ده sticky platelet syndrome ESR زيادة ال ESR + +309 +00:28:04,580 --> 00:28:10,040 +بتاع الالتهاب شباب زيادة بعض البروتينات اللي ممكن + +310 +00:28:10,040 --> 00:28:13,620 +تعمل ال low formation يعني sticky platelet + +311 +00:28:13,620 --> 00:28:20,020 +syndrome تقريبا صح؟ ثم PT و PTT و كل + +312 +00:28:20,020 --> 00:28:24,360 +الفحصات thrombin time فيبرينوجين هذه عبارة عن فحصات + +313 +00:28:24,360 --> 00:28:28,460 +بتعطينا indication protein C و protein S thrombin time 3 + +314 +00:28:28,460 --> 00:28:33,230 +deficiency برضه indication activated protein C + +315 +00:28:33,230 --> 00:28:41,110 +resistance و هكذا كلها تبدأ و اكتشاف تفحص واحد + +316 +00:28:41,110 --> 00:28:43,630 +عنده أقل من خمسين سنة زي ما قلتلكوا صار عنده + +317 +00:28:43,630 --> 00:28:47,830 +recurrent thromboembolism و خصوصا اذا كان بياخد + +318 +00:28:47,830 --> 00:28:59,550 +warfarin و غالبا بتصير في الحالات + +319 +00:28:59,550 --> 00:29:04,090 +اللي هوبصير فيه جلطات في مواقع غير .. غير طبيعية + +320 +00:29:04,090 --> 00:29:08,410 +طبعا في هذه الحالة شباب المهم انه تفحص بظروف + +321 +00:29:08,410 --> 00:29:12,630 +طبيعية إيش الظروف الطبيعية اولا انك تفحص على طول + +322 +00:29:12,630 --> 00:29:16,650 +في ال acute phase أو ال acute emulsion لأ لازم + +323 +00:29:16,650 --> 00:29:20,770 +تستنى على بال ما يرجع الجسم الى ال hemostatic + +324 +00:29:20,770 --> 00:29:26,130 +balance تبعته بده تفحص و غالبا بستنوا 6 أسابيع يعني + +325 +00:29:26,130 --> 00:29:27,310 +مريض اجى مجلس + +326 +00:29:30,530 --> 00:29:34,710 +ليه؟ لأنه ال coagulation بتكون ماشية ببطء، بتأثر على + +327 +00:29:34,710 --> 00:29:39,330 +نتيجتك ولا بتأثر عليك، فبالتالي الأصل إنك تستنى 6 + +328 +00:29:39,330 --> 00:29:44,190 +أسابيع طيب، + +329 +00:29:44,190 --> 00:29:47,330 +كمان + +330 +00:29:47,330 --> 00:29:54,270 +من الخبرة بتقول إنه إذا كان المريض عنده أي مرض من + +331 +00:29:54,270 --> 00:29:57,450 +هذه الأمراض، قبل ما تبدأ بأي علاج معاه، اعمله + +332 +00:29:57,450 --> 00:30:04,220 +فحص يوم ما يوقف الفعل .. العلاج عملوا كمان فحصة + +333 +00:30:04,220 --> 00:30:09,520 +ماشي؟ عشان تقارنه Sticky Platelet Syndrome عملها + +334 +00:30:09,520 --> 00:30:14,780 +فحص لـ Platelet Aggregation Test مش خدتوه؟ اه و + +335 +00:30:14,780 --> 00:30:20,040 +قالوا ان كل مرأة بتاخد Contraceptive Pills أو + +336 +00:30:20,040 --> 00:30:25,020 +عندها Heart Disease اللي اصلا انك تفحصها مفهوم يا + +337 +00:30:25,020 --> 00:30:28,820 +شباب؟ هددوا سؤال؟ + +338 +00:30:57,470 --> 00:31:04,070 +عشان هيك بيحصل في جلطة، بيجفش يعنيفهمت عليا؟ احنا + +339 +00:31:04,070 --> 00:31:08,990 +قلنا لما بيقدي دولة ال coagulation factor، بعمل + +340 +00:31:08,990 --> 00:31:14,690 +جلطة ونخلص، بنبدأ نكسر، نرجع عادة بالميكانيزمات، + +341 +00:31:14,690 --> 00:31:19,950 +باتجاه ال anticoagulant effect، فبنبدأ نكسر، + +342 +00:31:19,950 --> 00:31:21,370 +ما بنبدأش نكسرها + +343 +00:31:36,360 --> 00:31:43,460 +هو ليش عندها Vitamin K كافية وكتير وده؟ نعم هدى + +344 +00:31:43,460 --> 00:31:47,540 +حالة من خلالها بيصير في التوازن إذا كان الطفل عنده + +345 +00:31:47,540 --> 00:31:51,460 +protein C deficient اللي أقصد إنك ماتعطيش Vitamin + +346 +00:31:51,460 --> 00:31:55,260 +K لإنه بيصير في sudden shifting في ال hemostatic + +347 +00:31:55,260 --> 00:31:59,500 +balance عند المريض، مظبوط؟ واحد عنده protein C + +348 +00:31:59,500 --> 00:32:05,540 +deficient يعني أنت ناقصه مظبوط؟يعني ال pro + +349 +00:32:05,540 --> 00:32:08,220 +coagulant أعلى من ال anti-coagulant و تديه كمان + +350 +00:32:08,220 --> 00:32:15,960 +pro-coagulant بصي غير مشكلة حد عنده سؤال تاني طيب + +351 +00:32:15,960 --> 00:32:19,880 +هذا الكلام كله بيتعلق بال venous thrombosis نكمل + +352 +00:32:19,880 --> 00:32:23,580 +نشوف ال arterial thrombosis و ال Arterial كما + +353 +00:32:23,580 --> 00:32:28,080 +نفكرين هي الجلطة الشمال في الارضي في نص الأرضي + +354 +00:32:28,080 --> 00:32:31,880 +اللي يبدأ تكوين الجلطة من خلال تنشيط + +355 +00:32:36,550 --> 00:32:42,130 +و تبدأ تبني جلطة تسكر الشريان و إذا اتسكر الشريان + +356 +00:32:42,130 --> 00:32:46,490 +بيصير في Ischemia Ischemia تعريفا أنه مش Oxygen + +357 +00:32:46,490 --> 00:32:57,270 +بيصل للعضو بسبب Infarction للعضو فغالبا بتبدأ هذه + +358 +00:32:57,270 --> 00:33:05,550 +الأعراض بغياب الأكسوجين عن ال left ventricle و إيش + +359 +00:33:05,550 --> 00:33:12,040 +اللي بيصير؟ بيرجع الدم على ال heart مجبوط من خلال + +360 +00:33:12,040 --> 00:33:18,060 +الأذين الأيمن ثم البطين الأيمن و يبدأ ضخه ماشي من + +361 +00:33:18,060 --> 00:33:23,120 +هنا من ال .. ال .. ال .. على الشجرة الشمال ثم يضخ + +362 +00:33:23,120 --> 00:33:27,960 +إلى جميع أنحاء الجسم في الشرايين sorry الشرايين + +363 +00:33:27,960 --> 00:33:31,820 +إلى جميع أنحاء الجسم ب .. من خلال ال left + +364 +00:33:31,820 --> 00:33:35,420 +ventricle طيب يبقى مين اللي عليه ضغط الأكبر؟ left + +365 +00:33:35,420 --> 00:33:41,140 +ventricle مافيش Oxygen وصله مش هيهش مش ال Ischemia + +366 +00:33:41,140 --> 00:33:45,100 +وين؟ في ال heart و إذا صارت Ischemia في ال heart + +367 +00:33:45,100 --> 00:33:50,980 +يعني مشكلة تعمل Myocardial Infarction Myocardial + +368 +00:33:50,980 --> 00:33:56,100 +Infarction وممكن تبتدي إلى حالة بيسموها left + +369 +00:33:56,100 --> 00:33:59,760 +ventricular fibrillation left ventricular + +370 +00:33:59,760 --> 00:34:05,390 +fibrillation أو Ischemic left ventricular + +371 +00:34:05,390 --> 00:34:11,950 +fibrillation و sudden death بعد ذلك بموت المرأة + +372 +00:34:11,950 --> 00:34:18,670 +إيش يعني؟ إيش يعني؟ يعني والله القلب بطاير الأيسر + +373 +00:34:18,670 --> 00:34:24,590 +فيه موصلهوش Oxygen مفروض يضخ دم فبصير بنزنج فبيبدأ + +374 +00:34:24,590 --> 00:34:30,730 +إيش؟ بيصير في اللي هي الرفة الناتجة عن نقص Oxygen + +375 +00:34:32,540 --> 00:34:36,800 +بيبقوا بيشتغل بتاكيكارديال لو عسى انه يصلوا + +376 +00:34:36,800 --> 00:34:40,780 +شوية دم فيه أكسجين، بيصلش، فبيصير sudden death، + +377 +00:34:40,780 --> 00:34:46,540 +ايش بصير؟ similar episode نفس المظاهر ممكن نشوفها + +378 +00:34:46,540 --> 00:34:52,940 +في وين؟ في الـ cerebral circulation ونفس الحكاية، + +379 +00:34:52,940 --> 00:34:58,120 +إذا ماوصلش أكسجين للـ cerebral circulation بيصير في + +380 +00:34:58,120 --> 00:35:00,840 +حالتين، يا اما transient + +381 +00:35:03,540 --> 00:35:13,780 +Transient Ischemic Attack جلطة دماغية بيسموها بسيطة أو + +382 +00:35:13,780 --> 00:35:20,260 +Thrombotic Stroke اللي هو إذا استمرت أكثر من 24 + +383 +00:35:20,260 --> 00:35:28,720 +ساعة ستدخل في Collapse وDeath وممكن تلعب Permanent + +384 +00:35:28,720 --> 00:35:30,280 +Disability + +385 +00:35:34,450 --> 00:35:37,790 +من أسباب القتل على الـ arterial thrombosis هي الـ + +386 +00:35:37,790 --> 00:35:41,970 +myocardial infarction أو الذبح الصدرية، شو بيصير؟ + +387 +00:35:41,970 --> 00:35:45,770 +شوفوا مظاهرها يا شباب، شوفوا مظاهر الذبح الصدرية، + +388 +00:35:45,770 --> 00:35:49,850 +إيش بتقول؟ بيقول crushing tightness، بتوصف + +389 +00:35:49,850 --> 00:35:54,110 +كالقاتل، crushing tightness of the chest عارفين شو + +390 +00:35:54,110 --> 00:36:02,090 +معناته crushing؟ هرش، تحطيم، بتحطن إيه؟ بتحطن غضب + +391 +00:36:02,090 --> 00:36:07,340 +أنت جاي،وبالتالي ألم فظيع جدا وكأن الواحد كان بيكسر + +392 +00:36:07,340 --> 00:36:13,440 +في العظم تكسير ألم فظيع جدا وين؟ في الصدر with + +393 +00:36:13,440 --> 00:36:21,680 +sweating في تعرق and nausea غثيان معدى and + +394 +00:36:21,680 --> 00:36:29,670 +collapse ممكن يغمى عليه ممكن يغمى عليه ماشي؟ لكن + +395 +00:36:29,670 --> 00:36:34,130 +استمرارية الـ chest pain اللي اللي اللي في الصدر + +396 +00:36:34,130 --> 00:36:43,070 +ممكن يمتد إلى الذراعين وإلى throat، إيش يعني؟ + +397 +00:36:43,070 --> 00:36:49,170 +الحلق؟ والـ jaws اللي هو الفكين وبيصير في locked + +398 +00:36:49,170 --> 00:36:57,450 +jaws، ماشي؟ يعني الأمقارات قد تتضاعف، تزيد يعني من + +399 +00:36:57,450 --> 00:37:01,030 +خلال إيش؟ أن القلم اللي في الصدر هذا بيمتد على + +400 +00:37:01,030 --> 00:37:05,990 +الذراع والذراع وأن الاثنين وممكن يصل إلى الحلق و + +401 +00:37:05,990 --> 00:37:12,830 +الحلق بالمناسبة و lock jaws يعني ما فيش نفس، مظبوط؟ + +402 +00:37:12,830 --> 00:37:17,490 +chest pain هذه الأعراض كلها يا شباب للأسف الشديد + +403 +00:37:17,490 --> 00:37:27,310 +بتتشابه مع أعراض التلبك المعوي فاهمين عليّ؟ التلبك + +404 +00:37:27,310 --> 00:37:32,110 +المعوي بدون إيش؟ بدون أعراض الامتداد إلى الأيدين + +405 +00:37:32,110 --> 00:37:39,510 +إنما نفس الأعراض واحد قاعد على أكل أكلة دسمة جدا و + +406 +00:37:39,510 --> 00:37:45,290 +أفرط في الأكل ممكن يؤدي إلى تلبك معوي ويتقيئ إيده + +407 +00:37:45,290 --> 00:37:49,650 +نفس الأعراض ليش؟ انتفاخ؟ فما ضغط على الحجاب الحاجز؟ + +408 +00:37:49,740 --> 00:37:52,980 +اللي ممكن يلخط على الصدر بما فيهم إيه؟ عشان القلب، + +409 +00:37:52,980 --> 00:37:56,520 +مظبوط؟ آه، لكن في جميع أحوال أي إنسان عنده هذه + +410 +00:37:56,520 --> 00:38:00,040 +الأعراض لابد من إيه؟ من زيارة الطبيب، من زيارة + +411 +00:38:00,040 --> 00:38:06,500 +الطبيب، على المطرح، مش لازم يشوف طبيبه، ماشي؟ + +412 +00:38:06,500 --> 00:38:11,060 +طيب، نخش على الـ cerebral stroke أو الجلطات + +413 +00:38:11,060 --> 00:38:14,620 +الدماغية، قولنا نوعين هي transient ischemic + +414 +00:38:14,620 --> 00:38:17,720 +attack، و transient يعني إيه؟ + +415 +00:38:22,310 --> 00:38:29,110 +لكنها مرات بتكون مصحوبة بفقدان الحاسة من الحاسة من + +416 +00:38:29,110 --> 00:38:35,910 +الحواس منها dysfunctional of or loss of vision + +417 +00:38:35,910 --> 00:38:41,390 +الإنسان طبعا في ناس بتفقد الرؤية في ناس بتفقد + +418 +00:38:41,390 --> 00:38:47,050 +الكلام في ناس بتفقد اللي هو البلع قليل البلع في + +419 +00:38:47,050 --> 00:38:54,600 +ناس بتفقد الحركة ماشي حسب منطقة الإحساس التي ضربت، + +420 +00:38:54,600 --> 00:38:57,980 +إيش يعني ضربت؟ يعني اللي ما وصلهاش Oxygen، اللي + +421 +00:38:57,980 --> 00:39:03,060 +ما وصلهاش.. آه دي مني، ما وصلهاش Oxygen، مش قولنا + +422 +00:39:03,060 --> 00:39:07,320 +إذا ما فيش Oxygen في سكينة والفرشة، فهي اللي + +423 +00:39:07,320 --> 00:39:07,580 +بيصير + +424 +00:39:19,660 --> 00:39:22,640 +جلطات العين تكون في الوخ ولا في العين؟ لا لا في + +425 +00:39:22,640 --> 00:39:26,860 +العين، هذا موضوع ثاني، ليه مالهاش ولا.. آه هذه + +426 +00:39:26,860 --> 00:39:31,360 +جلطات في العين ممكن تكون نتيجة ضربة، نتيجة إرهاق + +427 +00:39:31,360 --> 00:39:37,100 +شديد، نتيجة انفجار أحد الشرايين وطبعا، آه ليه.. + +428 +00:39:37,100 --> 00:39:41,080 +لا مالهاش ولا.. طبعا في complete النوع الثاني هو + +429 +00:39:41,080 --> 00:39:48,000 +complete thrombotic stroke هي جلطة دماغية الكاملة، + +430 +00:39:48,000 --> 00:39:55,200 +وهي عبارة عن.. الجلطة الدماغية المرحلية لكنها إذا + +431 +00:39:55,200 --> 00:40:01,080 +استمرت أكثر من 24 ساعة بيطلع المريض يبعها دائما.. + +432 +00:40:01,080 --> 00:40:08,000 +يبعها دائما وممكن يصل إلى الوفاة طبعا إذا طيب + +433 +00:40:08,000 --> 00:40:17,300 +بيطلع بياج يبعها دائما طبعا هذا جدول يا شباب بيبين لي + +434 +00:40:17,300 --> 00:40:24,230 +الـ risk factor للـ arterial والـ venous thrombosis + +435 +00:40:24,230 --> 00:40:28,830 +شوفنا آليات العمل مختلفة ماشي فبالتالي المفروض + +436 +00:40:28,830 --> 00:40:32,890 +يكون فيه اختلاف في الـ risk factor لكن الصحيح + +437 +00:40:32,890 --> 00:40:36,330 +الاختلافات بسيطة زي ما تشوفوا زيادة العمر الـ + +438 +00:40:36,330 --> 00:40:41,390 +obesity الـ immobility pregnancy يعنوها كذا ماشي + +439 +00:40:47,260 --> 00:40:50,980 +يعني ممكن أقول لك اذكر لي أربع خمس أسباب لإنه risk + +440 +00:40:50,980 --> 00:40:56,680 +factor ماشي واكتب عليه للحفظ يا حمود أيوة تاه + +441 +00:40:56,680 --> 00:41:04,080 +Prophylaxis and Treatment of Thrombosis كيف نتقي شر + +442 +00:41:04,080 --> 00:41:10,180 +الجلطات ونعالجها طبعا شوية بصراحة عشان ما أصرش عند + +443 +00:41:10,180 --> 00:41:15,620 +واحد جلطة بده يبعد عن الـ risk factor صح؟ زي إيش؟ + +444 +00:41:15,620 --> 00:41:23,840 +smoking تدخين زي الـ obesity السمنة زي الـ lack of + +445 +00:41:23,840 --> 00:41:29,420 +exercise عارف؟ زي الـ poor diet كلها عبارة عن risk + +446 +00:41:29,420 --> 00:41:34,540 +factor بتؤدي إلى إيه؟ جنب طب لأ ما جدرناش في ناس كثير + +447 +00:41:34,540 --> 00:41:40,430 +يقول لك أنا لأ ما جدرش أبعد عن مثلا الأكل أو التدخين + +448 +00:41:40,430 --> 00:41:44,030 +أو أو الأخرى أو ما التزمتش طبعا في هذه الحالة يا شباب + +449 +00:41:44,030 --> 00:41:49,470 +لابد من أخذ العلاجات الكيميائية عشان إيش؟ عشان + +450 +00:41:49,470 --> 00:41:53,770 +يتقي زي إيش يعني؟ بده ياخذ anticoagulant ولا لا؟ + +451 +00:41:53,770 --> 00:41:57,630 +is a land risk of thrombosis بده ياخذ إيش؟ + +452 +00:41:57,630 --> 00:42:03,590 +anticoagulant زي ما.. حتى أنت سؤال، ماشي أعطيكم + +453 +00:42:03,590 --> 00:42:03,950 +العافية diff --git a/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/HKKaEh5-Hkw.srt b/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/HKKaEh5-Hkw.srt new file mode 100644 index 0000000000000000000000000000000000000000..42f0b5d04986478d8462047eb609b31b925ea2e2 --- /dev/null +++ b/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/HKKaEh5-Hkw.srt @@ -0,0 +1,1579 @@ +1 +00:00:26,880 --> 00:00:32,980 +drugs that affect haemostasis و + +2 +00:00:32,980 --> 00:00:38,120 +drugs اللي بتأثر في ال haemostatic mechanisms كثيرة + +3 +00:00:38,120 --> 00:00:47,060 +قسموها حسب دورها في التحكم بوظائف المختلفة لل + +4 +00:00:47,060 --> 00:00:50,740 +haemostatic mechanism طبعا كلنا بنعرف أن ال + +5 +00:00:50,740 --> 00:00:58,340 +haemostasis أدواته كثيرة لكنها تبدأ غالبا + +6 +00:00:58,340 --> 00:01:03,120 +بالcontraction of the blood vessels وما ال primary + +7 +00:01:03,120 --> 00:01:07,640 +haemostasis وما ال secondary haemostasis وما ال + +8 +00:01:07,640 --> 00:01:12,320 +fibrolytic system وبالتالي في drugs بتؤثر على كل + +9 +00:01:12,320 --> 00:01:17,840 +وظيفة من هذه الوظائف إما يعني في الغالب it + +10 +00:01:17,840 --> 00:01:20,960 +modulates it inhibits + +11 +00:01:25,150 --> 00:01:32,010 +which is directed لنبدأ في هذه الأدوات ونبدأ بالـ + +12 +00:01:32,010 --> 00:01:36,930 +Anticoagulant ذكروا منها ثلاث مجموعات أساسية هي + +13 +00:01:36,930 --> 00:01:43,550 +الواحد هو الهيبارين أو ال heparin derivative الهيبارين + +14 +00:01:43,550 --> 00:01:48,410 +heparin derivative ثم النوع الثاني هو الكومارين + +15 +00:01:48,410 --> 00:01:51,450 +المثال الشهير عليه الwarfarin + +16 +00:02:04,620 --> 00:02:11,160 +العلاقات بتعرف العلاقات ليتشت بتتحط على الظلومة + +17 +00:02:11,160 --> 00:02:15,100 +منها + +18 +00:02:15,100 --> 00:02:21,560 +الهيرودين والبايفلوردين والارجاتور فكلها بتؤثر على + +19 +00:02:21,560 --> 00:02:25,340 +ال thrombin بالذات و ال thrombin ما بتعرفيه هو + +20 +00:02:25,340 --> 00:02:30,640 +عبارة عن ال coagulation factor الأساسي في كل + +21 +00:02:30,640 --> 00:02:35,090 +ميكانيزم هذه الأدوات بتنعطف فيه حالات مختلفة + +22 +00:02:35,090 --> 00:02:40,210 +معظمها تتعلق بالقلب واختلالاته أو الشرايين + +23 +00:02:40,210 --> 00:02:45,290 +واختلالاته نبدأ بأول واحد وهو ال oral + +24 +00:02:45,290 --> 00:02:49,670 +anticoagulant أو ال warfarin وزي ما اتفقنا هو + +25 +00:02:49,670 --> 00:02:54,390 +عبارة عن molecule بيتبع عائلة سميناها الكومارين + +26 +00:02:54,390 --> 00:02:58,950 +family وبيضمن حوالي خمس وعشرين different molecule + +27 +00:02:58,950 --> 00:03:05,140 +خمس وعشرين different molecule أشهرها warfarin وكلنا + +28 +00:03:05,140 --> 00:03:11,060 +بنعرف إيش بيعمل ال warfarin، ال warfarin بالظبط + +29 +00:03:11,060 --> 00:03:18,500 +بينتفير وبيؤثر على synthesis of vitamin K in the + +30 +00:03:18,500 --> 00:03:23,140 +coagulation factors وكلنا بنعرف إن هم عبارة عن + +31 +00:03:23,140 --> 00:03:25,520 +اثنين وسبعة وتسعة وعشرة وإحدى عشرة + +32 +00:03:32,610 --> 00:03:38,330 +دوره الأساسي بيعمل carboxylation لإيه؟ للvitamin K + +33 +00:03:38,330 --> 00:03:40,910 +بين ال coagulation factors يعني بيضيف carboxyl + +34 +00:03:40,910 --> 00:03:46,270 +group وكنا بنعرف أن إضافة carboxyl group بتمكن ال + +35 +00:03:46,270 --> 00:03:50,270 +coagulation factors من الارتباط بالكالسيوم + +36 +00:03:50,270 --> 00:03:57,690 +والكالسيوم هو اللي بيعمل bridging بيربط ال factors + +37 +00:03:57,690 --> 00:04:01,210 +اللي بيشتغل في الphospholipid + +38 +00:04:04,750 --> 00:04:13,150 +الورفين يعمل blocking لل vitamin K cycle أو يوقف + +39 +00:04:13,150 --> 00:04:18,570 +هذه العملية وبالتالي يمنع reduction أو re + +40 +00:04:18,570 --> 00:04:23,270 +-duction of vitamin K مرة ثانية وبالتالي يعمل + +41 +00:04:23,270 --> 00:04:28,570 +blocking للvitamin K cycle ال + +42 +00:04:28,570 --> 00:04:35,580 +pharmacokinetics للورفين امتصاصه سريع و complete و + +43 +00:04:35,580 --> 00:04:44,300 +يستخدم Orally و انتشاره في الجسم very small لأن + +44 +00:04:44,300 --> 00:04:52,000 +حوالي 99% منه يكون مرتبط ببروتين وبالتالي ال 1% هو + +45 +00:04:52,000 --> 00:04:57,380 +ال free هو اللي بيشتغل بالنسبة + +46 +00:04:57,380 --> 00:05:05,820 +لل drug هذة يؤخذ من ال construction اللي موجود + +47 +00:05:05,820 --> 00:05:11,200 +عليه أنه ده يُأخذ أثناء الحمل أو الرضاعة الحمل أو + +48 +00:05:11,200 --> 00:05:15,620 +الرضاعة ليش؟ لأنه it can cross the placenta + +49 +00:05:15,620 --> 00:05:22,100 +وبالتالي بيصل إلى ال baby during pregnancy أو + +50 +00:05:22,100 --> 00:05:29,200 +through breast milk feeding وبالتالي بيصل ال baby + +51 +00:05:29,200 --> 00:05:33,340 +سواء كان في حالة رضاعة أو حالة في حمل وبيؤثر عليه + +52 +00:05:33,340 --> 00:05:40,440 +استراتوجينية ال elimination of such drug بيحتاج ل + +53 +00:05:40,440 --> 00:05:44,120 +40 ساعة to be completely removed from the body + +54 +00:05:44,120 --> 00:05:47,060 +يعني يوم ما نوجف المريض اللي بياخد ال warfarin + +55 +00:05:47,060 --> 00:05:51,120 +بيوقف ياخد ال warfarin بعد 40 ساعة بيتهي مفعول ال + +56 +00:05:51,120 --> 00:05:56,900 +warfarin من الجسم بيتهي مفعول ال warfarin من الجسم و + +57 +00:05:56,900 --> 00:06:04,260 +اتفقنا سابقا على انه بيحتاج اليومين إلى ثلاث أيام to + +58 +00:06:04,260 --> 00:06:11,080 +see its effect 100% 100% ليش؟ بس خلصت الدور دلوقتي + +59 +00:06:11,080 --> 00:06:19,220 +بس خلصت إن كل الدم يتعمل طبعا + +60 +00:06:19,220 --> 00:06:24,330 +كل المشاكل يتعمل 40 ساعة لأربعين ساعة عشان نخلص + +61 +00:06:24,330 --> 00:06:27,850 +منها ال elimination لأ مش five years لأ ماني مش + +62 +00:06:27,850 --> 00:06:31,290 +دخل فى ال final license هذا ايه دخل فى حاجة تانية + +63 +00:06:31,290 --> 00:06:36,490 +يا ابني؟ أنا بقيت ابن كايدى بندر لمّا بيخلص ال + +64 +00:06:36,490 --> 00:06:41,010 +half-life تبعي للبروترومبين أعلى half-life اتفقنا + +65 +00:06:41,010 --> 00:06:46,470 +هي للبروترومبين صح؟ فبالتالي هو ما بيؤثر على ال + +66 +00:06:46,470 --> 00:06:51,460 +already formed factors وإنما بيؤثر على اللي بيدها + +67 +00:06:51,460 --> 00:06:56,500 +تتصنع فبالتالي يوم ما ياخد المريض بيكون already + +68 +00:06:56,500 --> 00:07:00,580 +فيه مين اتصنع خالص prothrombin بس يخلص هال + +69 +00:07:00,580 --> 00:07:04,820 +prothrombin و أخه و شبته ماشي ال half-life تبعه + +70 +00:07:04,820 --> 00:07:10,640 +تنتهي بيبدأ نورمين يبدأ تأثير ال warfarin وبالتالي + +71 +00:07:10,640 --> 00:07:14,220 +يحتاج إلى يومين إلى ثلاث أيام على بعد ما تخلص ال + +72 +00:07:14,220 --> 00:07:16,800 +half-life تبع ال warfarin ال prothrombin + +73 +00:07:20,590 --> 00:07:28,510 +خمس مرات في الشهور ومن + +74 +00:07:28,510 --> 00:07:32,970 +ال adverse reaction تبعته طبعا احنا بنخاف يوم نعطي + +75 +00:07:32,970 --> 00:07:36,390 +anticoagulant بنخاف من ال bleeding بنخاف من ايش؟ + +76 +00:07:36,390 --> 00:07:41,050 +من ال bleeding ويوم يصير bleeding بمعنى يوم يصل + +77 +00:07:41,050 --> 00:07:45,030 +المريض الى المستشفى عنده bleeding due to overdose + +78 +00:07:45,030 --> 00:07:52,930 +of warfarin بياخد ال antidote تبعه، ايش هو؟ + +79 +00:07:52,930 --> 00:07:57,230 +لأنك تعطي vitamin K من الحاجات اللي بيعطوها و + +80 +00:07:57,230 --> 00:07:59,870 +بيشتغل ضد ال vitamin K، بيعمل Blocking of vitamin + +81 +00:07:59,870 --> 00:08:05,070 +K Cycle يوم ما تعطي vitamin K، أنت بتخلق إن أنت في + +82 +00:08:05,070 --> 00:08:08,030 +الورق أو بتعطي coagulation factors national + +83 +00:08:19,360 --> 00:08:22,240 +يبقى ال adverse reaction احنا بنخاف من أيش؟ من ال + +84 +00:08:22,240 --> 00:08:25,160 +baby الحاجة الثانية اتفقنا على ان هو can cross + +85 +00:08:25,160 --> 00:08:31,640 +placenta و بيصل لل baby ومؤدي جدا في الفترة ما + +86 +00:08:31,640 --> 00:08:37,800 +بين 6 و 12 شهر من الحمل كمان بيعمل alopecia تعرفوا + +87 +00:08:37,800 --> 00:08:44,200 +ايش ال alopecia؟ فعلبة تعرفوا مراد الفعلبة؟ yes + +88 +00:08:44,200 --> 00:08:48,540 +urticaria حكة + +89 +00:08:49,920 --> 00:08:57,600 +dermatitis Skin Inflammation Fever Nausea Diarrhea + +90 +00:08:57,600 --> 00:09:03,560 +Abdominal Cramps تقلصات Anorexia and Skin Necrosis + +91 +00:09:03,560 --> 00:09:11,220 +كلها حالات سجنة في complication + +92 +00:09:11,220 --> 00:09:15,620 +of warfarin طيب + +93 +00:09:15,620 --> 00:09:20,940 +drug interaction القاعدة يا شباب كل العلاجات بيصير + +94 +00:09:20,940 --> 00:09:26,700 +في بينها و بين drug drug interaction و + +95 +00:09:26,700 --> 00:09:33,200 +ال warfarin زي و زي أي علاج فرضه لو أخد المريض + +96 +00:09:33,200 --> 00:09:35,940 +علاج ثاني معها بيصير فيه drug drug interaction + +97 +00:09:35,940 --> 00:09:39,960 +وجدوا اللي drug drug interaction في ال warfarin + +98 +00:09:39,960 --> 00:09:45,880 +يؤثر على ال warfarin إما بيزود تأثيره وإما بيقلل + +99 +00:09:45,880 --> 00:09:51,560 +يعني حسب ال drug اللي بياخده إما بيزود تأثير ال + +100 +00:09:51,560 --> 00:09:55,560 +warfarin يا إما بيقلل تأثير ال warfarin عشان هيك + +101 +00:09:55,560 --> 00:10:02,840 +في قاعدة في المختبر بتقول قاعدة طبية بتقول إنه إذا + +102 +00:10:02,840 --> 00:10:08,560 +المريض اللي بياخد warfarin أخد أي علاج ثاني جنب ال + +103 +00:10:08,560 --> 00:10:14,000 +warfarin مع ال warfarin should be monitored by the + +104 +00:10:14,000 --> 00:10:18,660 +physician على طول، ده نعمله فحسبنعمله + +105 +00:10:18,660 --> 00:10:22,860 +ايه؟ فحص بي تي ونتابع ونشوف لأن ال بي تي من + +106 +00:10:22,860 --> 00:10:28,380 +المرة اللي اتفقنا عليه أنه indirectly ماشي assist + +107 +00:10:28,380 --> 00:10:37,600 +the function of effect of warfarin ماشي + +108 +00:10:37,600 --> 00:10:40,940 +يا شباب؟ وبالتالي أي مريض بياخد عين warfarin + +109 +00:10:40,940 --> 00:10:44,780 +إذا خد علاج ثاني should be monitored by the physician by + +110 +00:10:44,780 --> 00:10:50,310 +warfarin by بي تي طيب، ايش هي العلاجات اللي ممكن + +111 +00:10:50,310 --> 00:10:55,230 +تزيد من تأثيره أو تقلل من تأثيره؟ نشوف الحاجات أو + +112 +00:10:55,230 --> 00:10:58,570 +الحالات اللي بتزيد من التأثير اللي هو anticoagulant أكبر + +113 +00:10:58,570 --> 00:11:02,790 +من ال Fat to Warfarin نجرب من أمر واحد، displacement + +114 +00:11:02,790 --> 00:11:09,510 +of protein bound warfarin ايش يعني؟ والله إنك + +115 +00:11:09,510 --> 00:11:13,930 +تقلل، ما احنا اتفقنا إن 99% من ال warfarin يكون + +116 +00:11:13,930 --> 00:11:20,650 +مرتبط ببروتين 1% لو قللت العدد في ال bound عشان قللت + +117 +00:11:20,650 --> 00:11:25,150 +البروتين أطا مش بيزيد ال free و زاد ال free + +118 +00:11:25,150 --> 00:11:29,110 +بيزيد التأثير أو لا؟ هذا واحد لأجل ان ال Aspirin + +119 +00:11:29,110 --> 00:11:35,550 +من المواد التي بتقلل من ايه عشان من ال protein + +120 +00:11:35,550 --> 00:11:40,750 +bounded to warfarin وبالتالي بيزود ال free نمرة + +121 +00:11:40,750 --> 00:11:46,150 +اثنين Inhibition of the liver microzyma enzyme احنا + +122 +00:11:46,150 --> 00:11:49,910 +اتفقنا إن ال elimination خلال الأربعين ساعة لل + +123 +00:11:49,910 --> 00:11:56,770 +drug بيصير وين يعني بيصير في ال liver في إنزيمات + +124 +00:11:56,770 --> 00:12:01,310 +بتكسر ال warfarin وبالتالي بتخلصنا منها و بيطلع + +125 +00:12:01,310 --> 00:12:06,490 +أزمي تابولايد في ال kidney فلو الإنزيمات هذه قلة + +126 +00:12:06,490 --> 00:12:11,490 +أو زيادة مش بتزيد أو بتغير تأثير الـ warfarin نمر + +127 +00:12:11,490 --> 00:12:18,530 +ثلاثة والـ warfarin أنه receptor ووجدوا أن في بعض + +128 +00:12:18,530 --> 00:12:23,510 +العلاجات بتعمل up-regulation و down-regulation + +129 +00:12:23,510 --> 00:12:27,790 +للـ receptor فيه يعني up-regulation يعني + +130 +00:12:27,790 --> 00:12:33,290 +إظهار وإخفاء لمين؟ للـ receptor site of warfarin + +131 +00:12:33,290 --> 00:12:39,530 +على سطح الخلايا وبالتالي بتزود أو بتقلل من تأثير + +132 +00:12:39,530 --> 00:12:45,460 +الـ warfarin تابعين معايا؟ أكتر من أربعة؟ Vitamin K يجب + +133 +00:12:45,460 --> 00:12:48,960 +أن تعمل بلوك له، Vitamin K صحيح، طب لو مريض + +134 +00:12:48,960 --> 00:12:55,700 +أخذ Vitamin K بيقلل؟ ولا بيزود التأثير الانتقالي + +135 +00:12:55,700 --> 00:13:01,280 +والتفكير؟ بيقلل، طب لو ما أخدش Vitamin K عايز + +136 +00:13:01,280 --> 00:13:05,760 +أراه أنه يزود التأثير الانتقالي؟ نمرة خامسة اللي + +137 +00:13:05,760 --> 00:13:11,060 +هو Inhibition of Platelet و Inhibition of Platelet + +138 +00:13:11,060 --> 00:13:17,380 +Function، النساء الشهيرة والسمرين على ذلك بيزود الـ + +139 +00:13:17,380 --> 00:13:22,000 +anticoagulant effect والنهاية كما هو طبعًا العلاجات + +140 +00:13:22,000 --> 00:13:26,680 +التي تقلل من تأثير الـ anticoagulant effect هي + +141 +00:13:26,680 --> 00:13:35,300 +الحالات العكسية النهاية اللي حدّدهم هيبارين + +142 +00:13:35,300 --> 00:13:37,980 +يا شباب، نوع ثاني من الـ anticoagulant هو + +143 +00:13:37,980 --> 00:13:43,770 +الهيبارين، intravenous or subcutaneous drug هذا + +144 +00:13:43,770 --> 00:13:51,230 +oral هذا منعطق إيه؟ Orally وهو عبارة عن 42 كيلو + +145 +00:13:51,230 --> 00:13:55,790 +دالتون molecular weight موجود naturally في الجسم + +146 +00:13:55,790 --> 00:14:02,130 +and it acts it can act in vivo و in vitro مصموم؟ + +147 +00:14:02,130 --> 00:14:06,710 +يستخدموه لأنه مستخدموش برة؟ يشتغل جوا وبره وهو + +148 +00:14:06,710 --> 00:14:11,530 +غالبا بتشوفه بكثرة في الـ granules of muscle the + +149 +00:14:11,530 --> 00:14:15,790 +granules of ash والـ muscle وبالتالي لما تتنشط الـ + +150 +00:14:15,790 --> 00:14:19,210 +muscle وتعمل degranulation نطبع منها ash وابل + +151 +00:14:19,210 --> 00:14:26,750 +low molecular أو lower molecular polymers بتحتفظ + +152 +00:14:26,750 --> 00:14:35,070 +بالـ most of the biological activity وبيعتبر the + +153 +00:14:35,070 --> 00:14:41,580 +most acidic organic compound في الجسم ولا ينتصر + +154 +00:14:41,580 --> 00:14:47,740 +عليهم، شو هو بيكتب؟ شو بيعمل؟ أسلر أسلر أسلر 100% + +155 +00:14:47,740 --> 00:14:57,540 +بسبب بيسرع الـ inactivation of serine proteases by + +156 +00:14:57,540 --> 00:15:00,940 +the naturally occurring anticoagulant وهو الـ + +157 +00:15:00,940 --> 00:15:03,820 +antithrombin الثلاثية الشيطان الكريسي ما أخذناها + +158 +00:15:03,820 --> 00:15:09,460 +بالتفسير بيسرع it accelerates the inactivation of + +159 +00:15:10,460 --> 00:15:19,520 +سيرين وبروتيزي زي 2 و10 و9 و11 و12 كلها عبارة عن + +160 +00:15:19,520 --> 00:15:25,520 +عوامل ماشي تجرّط تحتوي على سيرين وهو بيعملها + +161 +00:15:25,520 --> 00:15:30,420 +inactivation يوم ينشطش الـ antithrombin الثلاثية وهو + +162 +00:15:30,420 --> 00:15:37,600 +مارتشارلير كيرينج يعني ماشي طيب هما عن هذه الصورة + +163 +00:15:37,600 --> 00:15:39,860 +بتقولكوا أخيرًا antithrombin الثلاثية مع الهيبارين + +164 +00:15:44,640 --> 00:15:49,820 +بتبطئ مع الهيبارين أولا ثم يسود التفاعل ثم يتبطئ مع + +165 +00:15:49,820 --> 00:15:55,180 +السيرين بروتيز ثم يتبطئ مع الهيبارين ثم يتبطئ مع + +166 +00:15:55,180 --> 00:15:58,820 +السيرين بروتيز ثم يتبطئ مع السيرين بروتيز ثم يتبطئ + +167 +00:15:58,820 --> 00:15:59,080 +ثم يتبطئ مع السيرين بروتيز ثم يتبطئ مع السيرين + +168 +00:15:59,080 --> 00:16:01,640 +بروتيز ثم يتبطئ مع السيرين بروتيز ثم يتبطئ مع + +169 +00:16:01,640 --> 00:16:05,780 +السيرين بروتيز ثم يتبطئ مع السيرين بروتيز ثم يتبطئ + +170 +00:16:05,780 --> 00:16:09,880 +مع السيرين بروتيز ثم يتبطئ مع السيرين بروتيز ثم + +171 +00:16:09,880 --> 00:16:18,490 +يتبطئ طبعًا هذا اللي هو high molecular weight وإنه + +172 +00:16:18,490 --> 00:16:21,110 +بيساوي عشان إيه؟ تطلعه الـ low molecular weight هيبارين + +173 +00:16:21,110 --> 00:16:28,210 +molecular weight طبعًا من 4600 وبيتربط في الغالب against + +174 +00:16:28,210 --> 00:16:34,390 +factor X الهفلانف تبعت ودخلت الـ standard هيبارين + +175 +00:16:34,390 --> 00:16:40,150 +ومايعملش bleeding لسه bleeding كمان ما يأثرش على الـ + +176 +00:16:40,150 --> 00:16:44,690 +platelet مثل الـ standard الهيبارين ولا على فاكته + +177 +00:16:44,690 --> 00:16:49,470 +التبطار والمثال الحقيقي عليه هو النارفين أو + +178 +00:16:49,470 --> 00:16:56,270 +الكلكسان وهو موجود تجاريًا في السورة الانكتران + +179 +00:16:56,270 --> 00:16:59,470 +بالتلادة ماسك في الهيبارين وفي فاكته الثانية فهذه + +180 +00:16:59,470 --> 00:17:05,370 +الصورة تبين لي أن الـ standard الهيبارين يحتاج إلى 18 + +181 +00:17:05,370 --> 00:17:13,020 +monosaccharides units to be activated بينما + +182 +00:17:13,020 --> 00:17:23,920 +الـ low molecular يحتاج فقط لخمسة monosaccharides pharmacokinetic + +183 +00:17:23,920 --> 00:17:28,980 +تفّقنا على أنه لا يؤخذ بشكل oral بينما + +184 +00:17:28,980 --> 00:17:36,060 +يؤخذ intravenously أو subcutaneously وdistributionه صغيرة + +185 +00:17:36,060 --> 00:17:41,610 +جدا لأن نسبة الابتلاط عالية بالكبد، نسبة + +186 +00:17:41,610 --> 00:17:47,830 +الارتباط بتؤثر على نسبة الـ active الأنصت + +187 +00:17:47,830 --> 00:17:55,490 +تبعه إذا أخذته IV التأثير immediate لكن إذا + +188 +00:17:55,490 --> 00:18:00,310 +أخذته subcutaneously يحتاج لها ساعة تقريبًا من 20 + +189 +00:18:00,310 --> 00:18:05,250 +دقيقة إلى 60 دقيقة to see the effect of the age of + +190 +00:18:05,250 --> 00:18:05,530 +war + +191 +00:18:11,390 --> 00:18:15,250 +الـ adverse reaction احنا بتخاف برضه مرة ثانية من + +192 +00:18:15,250 --> 00:18:21,210 +الـ anticoagulant من الـ bleeding من إيش؟ الـ + +193 +00:18:21,210 --> 00:18:26,790 +bleeding، الـ antidote تبع الـ heparin هو الـ protamine + +194 +00:18:26,790 --> 00:18:30,550 +sulfate، هو إيش؟ الـ protamine sulfate يعني لو مريض + +195 +00:18:30,550 --> 00:18:34,990 +بيجي المستشفى بيأخذ heparin ومرضوه صار عنده + +196 +00:18:34,990 --> 00:18:40,380 +bleeding بيعطوه إيش؟ شو بيعمل كمان؟ شوفوا عالي + +197 +00:18:40,380 --> 00:18:43,000 +بيعمل ثلاث شغلات ثانية، نمرة واحد بيعمل + +198 +00:18:43,000 --> 00:18:48,820 +thrombocytopenia في أنه أقل من 5% من الناس اللي + +199 +00:18:48,820 --> 00:18:54,680 +بياخدوه بعد أيام لكنه يختفي باختفاء بوقف + +200 +00:18:54,680 --> 00:18:59,060 +العلاج، thrombocytopenia بترجع reversible يعني + +201 +00:18:59,060 --> 00:19:05,460 +بترجع طبيعي، نمرة اثنين بتعمل rapid and profound + +202 +00:19:05,460 --> 00:19:12,430 +thrombocytopenia في أقل من 5% من الناس إذا بعد 8 + +203 +00:19:12,430 --> 00:19:17,510 +إلى 10 دقائق من أخذه وهي طبعًا بيصاحب هذه العملية + +204 +00:19:17,510 --> 00:19:23,470 +جلطة، انتبهوا لميكانيزم تبع الجلطة، نعطي + +205 +00:19:23,470 --> 00:19:28,770 +anticoagulant بسبب المريض جلطة ماشي؟ شوفوش + +206 +00:19:28,770 --> 00:19:34,930 +الميكانيزم، heparin بيدخل الجسم بيتكون له antibody + +207 +00:19:35,980 --> 00:19:42,320 +بيصير فيه heparin antibody complex، على + +208 +00:19:42,320 --> 00:19:47,660 +الثاني، heparin بيدخل كـ drug في الجسم بتكون له + +209 +00:19:47,660 --> 00:19:53,640 +antibody وبيكون complex مع الـ heparin، الـ antibody + +210 +00:19:53,640 --> 00:19:57,900 +بيكون complex مع الـ heparin which then bind to + +211 +00:19:57,900 --> 00:20:01,640 +platelet and + +212 +00:20:01,640 --> 00:20:05,710 +induced aggregation، نشطها وبعملها إيش؟ platelet + +213 +00:20:05,710 --> 00:20:08,870 +aggregation، طبعًا إذا صار فيه platelet aggregation + +214 +00:20:08,870 --> 00:20:16,330 +يعني فيه جلطة، يعني إيش؟ يعني فيه جلطة + +215 +00:20:16,330 --> 00:20:21,250 +مفهومة يا شباب؟ + +216 +00:20:21,250 --> 00:20:26,050 +طيب إيش فيه كمان بيعمل الـ .. الـ .. الـ .. الهيبارين + +217 +00:20:26,050 --> 00:20:30,770 +قالوا بيعمل reversible osteoporosis، osteoporosis + +218 +00:20:30,770 --> 00:20:35,050 +هشاشة عظم لكنها reversible بعد ست شهور من أخذها + +219 +00:20:35,130 --> 00:20:39,430 +الهيبارين ممكن يعمل reversible osteoporosis + +220 +00:20:39,430 --> 00:20:43,650 +reversible بمعنى لو شيلنا الهيبارين برجع الوضع + +221 +00:20:43,650 --> 00:20:50,630 +يعود طبيعي، آه + +222 +00:20:50,630 --> 00:21:01,070 +أنا أعتبر الهيبارين yes لأ + +223 +00:21:01,070 --> 00:21:04,590 +اللي naturally occurring anticoagulant هو موجود + +224 +00:21:06,380 --> 00:21:10,080 +الـ antibodies اللي ممكن يتكون احنا هنسميهم inhibitors + +225 +00:21:10,080 --> 00:21:13,700 +ممكن يكونوا auto-antibody وممكن يكونوا allo-antibody + +226 +00:21:13,700 --> 00:21:20,440 +أنت عارفها؟ الـ auto مرضش سبب حدوثه بيصير مش محتاجة + +227 +00:21:20,440 --> 00:21:24,140 +مش مشتاجة مش شرط عشان هيك بيصير فيه علم بيصير فيه + +228 +00:21:24,140 --> 00:21:34,660 +مرض بيتعارف؟ آه طيب حد عنده سؤال ثاني؟ طيب النوع + +229 +00:21:34,660 --> 00:21:40,950 +الثالث من الـ drug أو المجموعة الثانية من الـ drug هو + +230 +00:21:40,950 --> 00:21:45,410 +الـ Anti-Platelet ومن اسمه Anti-Platelet يعني ما + +231 +00:21:45,410 --> 00:21:50,290 +بتشتغل ضد الـ platelet في المجموعة في عدة مجموعات منها الصحيح + +232 +00:21:50,290 --> 00:21:53,910 +منها الـ Aspirin ومنها Clopidogrel ومنها 2B3A + +233 +00:21:53,910 --> 00:21:59,670 +Inhibitor ومجموعة أخرى اللي هنظهر عليها ونبدأ + +234 +00:21:59,670 --> 00:22:04,030 +بأول واحد وهو الـ Aspirin وكلكم بتعرفوا إيش بيعمل + +235 +00:22:04,030 --> 00:22:05,690 +الـ baby Aspirin أو الـ Aspirin + +236 +00:22:11,000 --> 00:22:17,320 +عشان تكون دقيقة هو بيعمل آه طبعًا مادام عمل + +237 +00:22:17,320 --> 00:22:21,480 +inhibition للـ platelet للـ cyclooxygenase هيبوس معناته + +238 +00:22:21,480 --> 00:22:25,920 +محشي thromboxane A2 وبالتالي بيعمل inhibition للـ + +239 +00:22:25,920 --> 00:22:31,240 +function أو platelet طب كيف بتتم آلية العمل؟ بيعمل + +240 +00:22:31,240 --> 00:22:35,320 +الاسبرين هو عبارة عن Acetyl salicylic acid، Acetyl في + +241 +00:22:35,320 --> 00:22:41,590 +Acetyl group، وشو بيعمل؟ بيعمل للسيكلوكسينيز وبالتالي + +242 +00:22:41,590 --> 00:22:48,350 +بيعملوا Inhibition أي ثاني Acetyl Salicylic Acid + +243 +00:22:48,350 --> 00:22:55,370 +يعني فيه Acetyl group، شو بيعمل؟ بيقولوا بيعمل + +244 +00:22:55,370 --> 00:23:00,990 +acetylation للسيكلوكسينيز وبالتالي بيعملوا + +245 +00:23:00,990 --> 00:23:03,430 +Inhibition، يوم بيعملوا Inhibition بيعملوا + +246 +00:23:03,430 --> 00:23:07,070 +Inhibition لكل الـ .. للـ Phospholipids لكل الـ + +247 +00:23:07,070 --> 00:23:16,230 +Platelet طب لو كان استهدف اسمه للـ + +248 +00:23:16,230 --> 00:23:20,090 +Epithelial cells وللـ Hepatocytes فعشان الـ + +249 +00:23:20,090 --> 00:23:24,350 +Cyclooxygenase موجود فيه كميكانيزم في معظم الخلايا + +250 +00:23:24,350 --> 00:23:30,290 +فبالتالي آه أنه جاي من ليبوكسجينيز إيه طول مش + +251 +00:23:30,290 --> 00:23:36,370 +طلع فاشفولايديز مبدلينهم نفسهم بس من ايه يا أفندي؟ + +252 +00:23:36,370 --> 00:23:43,440 +فمش طلع الجهاتطب ليش بيشتغل فقط على ال platelet ال + +253 +00:23:43,440 --> 00:23:47,980 +baby aspirin؟ قالوا لأن ال baby aspirin بيشتغل على + +254 +00:23:47,980 --> 00:23:51,900 +ال platelet Cyclooxygenase قبل ما يسروا ال liver + +255 +00:23:51,900 --> 00:23:55,700 +يعني قبل ما يسروا ال liver يصل ال liver قبل ما يوصل + +256 +00:23:55,700 --> 00:24:00,460 +يصل ال liver أي يعني بيشتغل على ال platelet + +257 +00:24:00,460 --> 00:24:03,260 +Cyclooxygenase before + +258 +00:24:09,220 --> 00:24:11,140 +deacetylation in the liver. + +259 +00:24:19,080 --> 00:24:23,080 +وطبعا احنا قلنا ان ال baby أسبابين بيأثرش على ال + +260 +00:24:23,080 --> 00:24:27,020 +labia fina etc وبالتالي systematically بيأثرش على + +261 +00:24:27,020 --> 00:24:27,740 +الخلايا + +262 +00:24:37,350 --> 00:24:40,930 +طيب ايش ال .. ال pharmacokinetics كانوا absorption + +263 +00:24:40,930 --> 00:24:49,590 +سريعة و at low dose معظم ال aspirin is bound + +264 +00:24:49,590 --> 00:24:52,970 +لكن at high dose بيكون قلة ال bound و ال protein + +265 +00:24:52,970 --> 00:25:03,010 +اقل بيستخدموا elimination لل liver ال baby aspirin + +266 +00:25:03,010 --> 00:25:05,870 +بعد اربع ساعات بينما ال standard aspirin بعد خمس + +267 +00:25:05,870 --> 00:25:06,210 +ساعات + +268 +00:25:09,180 --> 00:25:12,400 +تأثيره يظهر بعد تلاتين دقيقة يعني لو واحد أخذ + +269 +00:25:12,400 --> 00:25:16,920 +أسبرين لأحد بعد نصف ساعة بسلاية يصير فيه تأثير و + +270 +00:25:16,920 --> 00:25:21,740 +بتاخد من سابعة لعشر دقيقة ال adversary action أو + +271 +00:25:21,740 --> 00:25:26,780 +side action تبعته نقرأ + +272 +00:25:26,780 --> 00:25:29,920 +واحد بعمل gastrointestinal disturbances بشكل عام + +273 +00:25:29,920 --> 00:25:37,580 +من ضمنها ال epigastric pain و ال heartburn و ال + +274 +00:25:37,580 --> 00:25:42,780 +nausea و ال gastric ulcer و gastric + +275 +00:25:42,780 --> 00:25:47,640 +ulcer اللي كلها بتتعلق بال gastric + +276 +00:25:47,640 --> 00:25:51,100 +problems سجلت ايضا مضاعفات اخرى من + +277 +00:25:51,100 --> 00:25:57,200 +ضمنها rash و tinnitus tinnitus يا شباب اللي هو + +278 +00:25:57,200 --> 00:26:02,320 +طنين الأذن دي بتزن تطبق بتزن الوند ثم نزل polyps + +279 +00:26:02,320 --> 00:26:05,700 +المعدة and gout + +280 +00:26:08,380 --> 00:26:15,700 +و acid-base disturbances Interaction + +281 +00:26:15,700 --> 00:26:20,080 +كانوا بيأثر على معظم علاجات الدماغ Anti + +282 +00:26:20,080 --> 00:26:24,540 +-hypertensive drugs كمان + +283 +00:26:24,540 --> 00:26:31,080 +لو اتخذ مع الwarfarin بزيد تأثيره و بعمل + +284 +00:26:31,080 --> 00:26:36,000 +attenuation يضعف عمل + +285 +00:26:37,280 --> 00:26:41,400 +اللي هو Zyloric Acid اللي هو علاج الـ Gout Disease + +286 +00:26:41,400 --> 00:26:48,080 +علاج ايش؟ الـ Gout Disease نوع + +287 +00:26:48,080 --> 00:26:53,820 +التاني + +288 +00:26:53,820 --> 00:26:58,380 +من ال drugs يا شباب اسمه Ticlopidin او Clopidogrel + +289 +00:26:58,380 --> 00:27:04,360 +Clopidogrel وهو عبارة عن P2Y12 + +290 +00:27:06,210 --> 00:27:10,690 +الرسبتور التاجوليس و تبقى عليها طلعة اللوحة طلعة + +291 +00:27:10,690 --> 00:27:14,410 +الشبكة هذا عبارة عن cell membrane cell membrane + +292 +00:27:14,410 --> 00:27:19,570 +هايو او platelet و كلنا عرفنا انه على سطح ال + +293 +00:27:19,570 --> 00:27:23,250 +platelet في رسبتور لل ADP بطلع من ال platelet ADP + +294 +00:27:23,250 --> 00:27:30,510 +و بيدخل الرسبتور على سطح مين؟ شو بيعمل؟ عبارة عن + +295 +00:27:30,510 --> 00:27:32,930 +agonist بيعمل platelet aggregation كيف بتتم + +296 +00:27:32,930 --> 00:27:37,490 +الحكاية؟ شوفوا كيف بتتممكانوا الادى بيومها مربط + +297 +00:27:37,490 --> 00:27:43,850 +برسيبته أيوة بنشط ال adenylate cyclase enzyme + +298 +00:27:43,850 --> 00:27:50,110 +ولمّا بتنشط ال adenylate cyclase بيزود ال calcium + +299 +00:27:50,110 --> 00:27:57,990 +influx بيزود ال intracellular calcium ولمّا + +300 +00:27:57,990 --> 00:28:03,130 +يزود ال intracellular calcium بيعمل a platelet + +301 +00:28:03,130 --> 00:28:03,750 +aggregation + +302 +00:28:06,370 --> 00:28:11,050 +بتقبل البروتينات اللي موجودة في الخلية وبالتالي + +303 +00:28:11,050 --> 00:28:13,870 +بيقللها وبتزوّد من غيري المهم بيعمل plate + +304 +00:28:13,870 --> 00:28:17,810 +let aggregation في الانترنت عيال تاني ADP development + +305 +00:28:17,810 --> 00:28:20,930 +من الريسيبتور بنشط من الريسيكلية بيعمل + +306 +00:28:20,930 --> 00:28:28,250 +intracellular calcium influx بيعمل calcium influx + +307 +00:28:28,250 --> 00:28:29,870 +وبالتالي بيزود ال intracellular calcium + +308 +00:28:29,870 --> 00:28:35,980 +concentration وبالتالي بيعمل platelet aggregation طيب + +309 +00:28:35,980 --> 00:28:44,240 +ايش بيعمل تيكلوبيدين او تيكلوبيدو جرين؟ بيعمل هنا + +310 +00:28:44,240 --> 00:28:53,160 +بيعمل blocking لل ADP receptor و + +311 +00:28:53,160 --> 00:28:57,520 +بالتالي شو بيصير في الادناليز سايكليز؟ بطر، + +312 +00:28:57,520 --> 00:29:03,680 +ادناليز سايكليز بطرش مش هيزيد الكرش + +313 +00:29:05,210 --> 00:29:11,410 +100% يبقى عرفنا إيش هو ال Clopidogrel شو بيعمل + +314 +00:29:11,410 --> 00:29:17,150 +طبعا بيتاخد بيقلل ال incidence of stroke و + +315 +00:29:17,150 --> 00:29:25,050 +الميخاردة في الفرشة بيبنعطه في الغالب مقلق أسبرين + +316 +00:29:25,050 --> 00:29:29,150 +كمان بيبنعطيهم مع بعض و بيعتبر اليوم ال drug of + +317 +00:29:29,150 --> 00:29:35,710 +choice لسبك أكيد أسترات خنبوسيو ال post ischemic + +318 +00:29:35,710 --> 00:29:40,550 +stroke treatment ناس اللي بيعملوا قصطرة لازم + +319 +00:29:40,550 --> 00:29:45,810 +يعملوا ياخدوا Plavix او ياخدوا Clopidogrel جابل + +320 +00:29:45,810 --> 00:29:52,430 +العملية مشروع جابل العملية مشروع وبالتالي بعطوا كل + +321 +00:29:52,430 --> 00:29:56,070 +اللي .. ليش؟ لأنه وجدوا من المضاعفات هذه العملية + +322 +00:29:56,070 --> 00:30:02,110 +اللي القصطرة انه ال wire اللي بيخش اللي بيسلكوا + +323 +00:30:02,110 --> 00:30:06,460 +فيه شريان هذا عبارة عن activator عبارة عن .. بيعتبر + +324 +00:30:06,460 --> 00:30:09,240 +ك agonist للي بلايك اللي فتشتها وضع للي بلايك اللي + +325 +00:30:09,240 --> 00:30:13,620 +ضايقه عليه فعشان يعملوا تعطيل للي بلايك اللي فده + +326 +00:30:13,620 --> 00:30:21,840 +كلوبيدو غيرينجة بالاسبوع من إيه؟ من القسترال ودوع + +327 +00:30:21,840 --> 00:30:26,520 +ثاني من الألاجات اللي هو 2B3A inhibitor 2B3A + +328 +00:30:26,520 --> 00:30:32,440 +inhibitor فهو receptor كلنا بنعرف 2B3Aمظبوط هو + +329 +00:30:32,440 --> 00:30:36,840 +ريسبتور على سطح لت لت برتبط بالفيبرانجين وفي + +330 +00:30:36,840 --> 00:30:41,340 +البواب بالبرنامج مظبوط وبيعمل aggregation بعمليات + +331 +00:30:41,340 --> 00:30:47,200 +لت لت aggregation صح؟ زي اي agonist تاني ممكن + +332 +00:30:47,200 --> 00:30:52,200 +يتنشط الريسبتور هذا ويمسك بالإياش ويعمل + +333 +00:30:52,200 --> 00:30:55,180 +aggregation وهذه الميكانيزة مش عارفة، شايفين؟ هذه + +334 +00:30:55,180 --> 00:31:00,380 +لت لت التجاين على سطح ريسبتور، ده هو 2G3لو حطينا + +335 +00:31:00,380 --> 00:31:04,180 +لها agonist شوف بيصير لها ايه؟ little degradation + +336 +00:31:04,180 --> 00:31:06,740 +بسكت في ال fibrinogen و اردت ال little + +337 +00:31:06,740 --> 00:31:12,800 +degradation لكن لو حطينا antagonist لل 2b3a شوفوا + +338 +00:31:12,800 --> 00:31:18,880 +ايش صار؟ صار في blocking لل receptor مش بيقدر يمسك + +339 +00:31:18,880 --> 00:31:22,400 +ال fibrinogen ولا ال formula برهن مش بيقدرش يمسك + +340 +00:31:22,400 --> 00:31:29,670 +وبالتالي we block the platelet function or fluidأهم + +341 +00:31:29,670 --> 00:31:36,410 +معظم ال inhibitors هو مصطلح يطلق على monoclonal + +342 +00:31:36,410 --> 00:31:42,590 +antibody بيعمل blocking specifically لمين لرسالتك + +343 +00:31:42,590 --> 00:31:46,250 +monoclonal antibody يعني ايه؟ بيتبط specifically + +344 +00:31:46,250 --> 00:31:51,690 +بمين؟ من الأمثل على ذلك Abciximab + +345 +00:31:58,400 --> 00:32:02,960 +و Tirofiban وفي أمثلة كثيرة على ذلك كل جولة + +346 +00:32:02,960 --> 00:32:09,680 +الهزيمة معينة مافي + +347 +00:32:09,680 --> 00:32:17,700 +فشبان مافي حد او نسوان طيب مجموعة رابعة فيبراليتكس + +348 +00:32:17,700 --> 00:32:21,700 +مذيبات الجلطة مذيبات الجلطة يعني احنا خدنا ال + +349 +00:32:21,700 --> 00:32:27,300 +anticoagulant وخدنا ال antiplatelet صح؟الـ + +350 +00:32:27,300 --> 00:32:29,840 +Anticoagulant اتخذنا عليها الورفين والهبارين + +351 +00:32:29,840 --> 00:32:33,820 +والانتيفليتلي اتخذنا عليها تلاتة أسبرين و + +352 +00:32:33,820 --> 00:32:38,400 +Clopidogrel او التكلوبيدين ثم 2-BPA انهيبوتر + +353 +00:32:38,400 --> 00:32:43,800 +وهالجيتر اننا نحكي في مين؟ في المضيبات الجلطة او + +354 +00:32:43,800 --> 00:32:50,120 +ال fibrinolytics طبعا احنا بندوب الجلطة ليش؟ لان + +355 +00:32:50,120 --> 00:32:54,590 +الجلطة بتعمل obstruction بتعمل occlusionوبالتالي + +356 +00:32:54,590 --> 00:32:59,030 +تمنع وصول الدم الى أعضاء الجسم المختلفة فيوم ما + +357 +00:32:59,030 --> 00:33:04,690 +ندوّبها we restore بنرجع الدم الى مجرى وبالتالي + +358 +00:33:04,690 --> 00:33:10,170 +بنعمل good profusion تروية لمين for our body ومجرى + +359 +00:33:10,170 --> 00:33:16,650 +فبنعمل إعادة تروية لمين للأعضاء الجسم وبالتالي ده + +360 +00:33:16,650 --> 00:33:22,430 +طبعا كده بنعمل الحفظ الجسمي ومنشطات + +361 +00:33:22,430 --> 00:33:29,000 +لللازمة الموجودة خدناهم قبل ذلك ايه نشوف + +362 +00:33:29,000 --> 00:33:34,660 +لازميرجين اكتبيتر ثم اليورو كاينيز و الستربتو + +363 +00:33:34,660 --> 00:33:40,600 +كاينيز و الanti أو ال streptokinase كلها عبارة عن ايش + +364 +00:33:40,600 --> 00:33:45,320 +منشطات و طبعا هذا اللي كان يجب ان اخدناه بالتفصيل + +365 +00:33:45,320 --> 00:33:46,340 +ده برنامج + +366 +00:33:52,690 --> 00:33:56,410 +و في موزة إصابة نيجاتيف يعني ايش نهيبشن و موزة + +367 +00:33:56,410 --> 00:33:58,790 +إصابة يعني ايش نهيبشن و موزة إصابة يعني ايش نهيبشن + +368 +00:33:58,790 --> 00:34:01,530 +و موزة إصابة يعني ايش نهيبشن و موزة إصابة يعني ايش + +369 +00:34:01,530 --> 00:34:06,490 +نهيبشن و موزة إصابة يعني ايش نهيبشن و موزة إصابة + +370 +00:34:06,490 --> 00:34:09,250 +يعني ايش نهيبشن و موزة إصابة يعني ايش نهيبشن و + +371 +00:34:09,250 --> 00:34:12,870 +موزة إصابة يعني ايش نهيبشن و موزة إصابة يعني ايش + +372 +00:34:12,870 --> 00:34:22,590 +نهيبشن و موزة إصابة يعني ايش reperfusion يعني + +373 +00:34:22,590 --> 00:34:27,290 +ايش؟ إعادة التروية لل infarcted area وبالتالى + +374 +00:34:27,290 --> 00:34:35,650 +بنحافظ على ال tissue normal tissue ماشي فبنقولتك + +375 +00:34:35,650 --> 00:34:40,730 +طبعا success نقطة مهمة شديدة جدا شديدة نجاح العلاج تبع + +376 +00:34:40,730 --> 00:34:47,050 +المدير بجلطة بيعتمد على عمر الجلطة بيعتمد على إيه؟ + +377 +00:34:47,050 --> 00:34:50,850 +على عمر الجلطة يعني الجلطة الحديثة سهلة التكسير + +378 +00:34:51,650 --> 00:34:59,710 +الجلطة القديمة صعب جدا تتكسر صعب جدا تتكسر ده بيضرب + +379 +00:34:59,710 --> 00:35:03,310 +فيها ويضرب فيها الطبيب لغاية ما يفتحها ومرات + +380 +00:35:03,310 --> 00:35:08,050 +يحتاج إلى يومين في مريض طبعا + +381 +00:35:08,050 --> 00:35:15,850 +الخطر قوي جدا جدا جدا في الجلطة القديمة ليه؟ لأنه + +382 +00:35:15,850 --> 00:35:24,720 +مُسلّح مش سهل التكسير الجلطة القديمة تكون صلبة والجزء + +383 +00:35:24,720 --> 00:35:31,800 +اللي يكسر فيه جلطة already بيكون يادس فسهل تكسر + +384 +00:35:31,800 --> 00:35:38,680 +فسهل تنفجر وهو قادر فيها سهل ينفجر الشريان أو + +385 +00:35:38,680 --> 00:35:42,840 +الأوعية اللي فيها الدموع عشان هيك يقولوا إنه عشان + +386 +00:35:42,840 --> 00:35:46,280 +يصير فيه نجاح للعمل اللي بعمله لازم يتاخد العلاج قبل + +387 +00:35:46,280 --> 00:35:51,990 +سبع أيام أو في حدود أول سبع أيام حدوث الجلطة و + +388 +00:35:51,990 --> 00:35:55,550 +المرمورة الريموليزم اليومين الأول والأيام ما يبالغ + +389 +00:35:55,550 --> 00:36:01,730 +فرش في أول ساعتين لأربع ساعات و ال stroke أقل من + +390 +00:36:01,730 --> 00:36:07,590 +إيش لثلاث ساعات من خاتم الإيش يا شباب برضه من + +391 +00:36:07,590 --> 00:36:11,670 +مُضادات الجلطة دي تماما ال FDA استعمل ال .. + +392 +00:36:11,670 --> 00:36:15,930 +Anticoagulant برضه Anticoagulant دي وكمان هتروحي ممكن بعتهم + +393 +00:36:15,930 --> 00:36:17,050 +سبعين ساعة دي بيتيو + +394 +00:36:21,390 --> 00:36:27,570 +الجنة هو الخوش المنوية، حد قده شوية، يالا أكل + +395 +00:36:27,570 --> 00:36:29,710 +عشرة، ألعب وأطل بخير diff --git a/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/VwavrJFmS8o_raw.json b/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/VwavrJFmS8o_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..76f0b8d62bd39e3c6594a1aff696a30d0279e564 --- /dev/null +++ b/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/VwavrJFmS8o_raw.json @@ -0,0 +1 @@ +{"segments": [{"id": 1, "seek": 5030, "start": 20.7, "end": 50.3, "text": "حاضر من جهة مشتركة بسم الله الرحمن الرحيم يوم ان شاء الله هنستكمل hereditary platelet defense بس هناخدها from laboratory point of view يعني هنتطرق للفعصات اللتي نقوم بيها لكي نشخص platelet disorders طبعا معروف من مظاهر ال platelet disorders أن يكون فيه بتاشي وهذه صورة معجلة لل بتاشي", "tokens": [5016, 46958, 2288, 9154, 10874, 3224, 3660, 37893, 2655, 31747, 3660, 4724, 38251, 21984, 34892, 5016, 27842, 34892, 5016, 32640, 7251, 20498, 16472, 13412, 16606, 21984, 8032, 1863, 14851, 24793, 1211, 720, 292, 4109, 3403, 15966, 7654, 4724, 3794, 8032, 1863, 47283, 3215, 11296, 490, 16523, 935, 295, 1910, 37495, 22653, 8032, 29399, 9566, 2288, 4587, 24976, 5172, 3615, 9381, 9307, 13672, 31371, 8717, 4587, 20498, 4724, 1829, 11296, 5296, 4117, 1829, 8717, 8592, 9778, 9381, 3403, 15966, 20261, 23032, 3555, 3615, 995, 20449, 32887, 5172, 9154, 3714, 19913, 40294, 2288, 2423, 3403, 15966, 20261, 14739, 7251, 30544, 8978, 3224, 39894, 33599, 1829, 37037, 24192, 20328, 13063, 3660, 20449, 7435, 37977, 24976, 39894, 33599, 1829], "avg_logprob": -0.242456897836307, "compression_ratio": 1.6564885496183206, "no_speech_prob": 3.5762786865234375e-07, "words": [{"start": 20.7, "end": 21.02, "word": "حاضر", "probability": 0.53515625}, {"start": 21.02, "end": 21.14, "word": " من", "probability": 0.291259765625}, {"start": 21.14, "end": 21.24, "word": " جهة", "probability": 0.59716796875}, {"start": 21.24, "end": 21.56, "word": " مشتركة", "probability": 0.6416015625}, {"start": 21.56, "end": 21.9, "word": " بسم", "probability": 0.3629150390625}, {"start": 21.9, "end": 22.1, "word": " الله", "probability": 0.95556640625}, {"start": 22.1, "end": 22.4, "word": " الرحمن", "probability": 0.9327799479166666}, {"start": 22.4, "end": 22.76, "word": " الرحيم", "probability": 0.9866536458333334}, {"start": 22.76, "end": 24.08, "word": " يوم", "probability": 0.6600341796875}, {"start": 24.08, "end": 24.18, "word": " ان", "probability": 0.54345703125}, {"start": 24.18, "end": 24.3, "word": " شاء", "probability": 0.977783203125}, {"start": 24.3, "end": 24.34, "word": " الله", "probability": 0.96435546875}, {"start": 24.34, "end": 24.94, "word": " هنستكمل", "probability": 0.87080078125}, {"start": 24.94, "end": 25.38, "word": " hereditary", "probability": 0.7060546875}, {"start": 25.38, "end": 25.92, "word": " platelet", "probability": 0.725341796875}, {"start": 25.92, "end": 27.14, "word": " defense", "probability": 0.26611328125}, {"start": 27.14, "end": 28.02, "word": " بس", "probability": 0.85107421875}, {"start": 28.02, "end": 28.56, "word": " هناخدها", "probability": 0.96181640625}, {"start": 28.56, "end": 28.96, "word": " from", "probability": 0.88720703125}, {"start": 28.96, "end": 29.7, "word": " laboratory", "probability": 0.86669921875}, {"start": 29.7, "end": 30.06, "word": " point", "probability": 0.9833984375}, {"start": 30.06, "end": 30.24, "word": " of", "probability": 0.974609375}, {"start": 30.24, "end": 30.46, "word": " view", "probability": 0.91943359375}, {"start": 30.46, "end": 31.38, "word": " يعني", "probability": 0.89208984375}, {"start": 31.38, "end": 32.14, "word": " هنتطرق", "probability": 0.8853515625}, {"start": 32.14, "end": 32.96, "word": " للفعصات", "probability": 0.89365234375}, {"start": 32.96, "end": 33.3, "word": " اللتي", "probability": 0.821533203125}, {"start": 33.3, "end": 33.76, "word": " نقوم", "probability": 0.9620768229166666}, {"start": 33.76, "end": 34.36, "word": " بيها", "probability": 0.87939453125}, {"start": 34.36, "end": 35.32, "word": " لكي", "probability": 0.8408203125}, {"start": 35.32, "end": 36.84, "word": " نشخص", "probability": 0.9742431640625}, {"start": 36.84, "end": 38.16, "word": " platelet", "probability": 0.765869140625}, {"start": 38.16, "end": 38.9, "word": " disorders", "probability": 0.966796875}, {"start": 38.9, "end": 41.54, "word": " طبعا", "probability": 0.8297119140625}, {"start": 41.54, "end": 43.22, "word": " معروف", "probability": 0.9597981770833334}, {"start": 43.22, "end": 43.74, "word": " من", "probability": 0.98876953125}, {"start": 43.74, "end": 44.44, "word": " مظاهر", "probability": 0.9901123046875}, {"start": 44.44, "end": 44.62, "word": " ال", "probability": 0.73779296875}, {"start": 44.62, "end": 45.06, "word": " platelet", "probability": 0.7705078125}, {"start": 45.06, "end": 45.8, "word": " disorders", "probability": 0.9208984375}, {"start": 45.8, "end": 46.1, "word": " أن", "probability": 0.43017578125}, {"start": 46.1, "end": 46.32, "word": " يكون", "probability": 0.79736328125}, {"start": 46.32, "end": 46.54, "word": " فيه", "probability": 0.759033203125}, {"start": 46.54, "end": 47.14, "word": " بتاشي", "probability": 0.6829427083333334}, {"start": 47.14, "end": 48.58, "word": " وهذه", "probability": 0.699462890625}, {"start": 48.58, "end": 48.84, "word": " صورة", "probability": 0.9676106770833334}, {"start": 48.84, "end": 49.32, "word": " معجلة", "probability": 0.74853515625}, {"start": 49.32, "end": 49.58, "word": " لل", "probability": 0.818359375}, {"start": 49.58, "end": 50.3, "word": " بتاشي", "probability": 0.9384765625}], "temperature": 1.0}, {"id": 2, "seek": 7327, "start": 51.13, "end": 73.27, "text": "هي على مستوى رجلين كاملة في bleeding شايفينها انتوا او لا؟ ما يميزها طبعا في كتير من الناس ممكن اقولك قدر حساسية it's an allergy الفرق بينها و بين ال allergy ان حاجتين الحاجة الأولانية انه does not planch", "tokens": [3224, 1829, 15844, 3714, 14851, 2407, 7578, 12602, 7435, 1211, 9957, 9122, 10943, 37977, 8978, 19312, 13412, 995, 33911, 9957, 11296, 16472, 2655, 14407, 1975, 2407, 20193, 22807, 19446, 7251, 2304, 1829, 11622, 11296, 23032, 3555, 3615, 995, 8978, 9122, 2655, 13546, 9154, 2423, 8315, 3794, 3714, 43020, 1975, 39648, 4117, 12174, 3215, 2288, 11331, 3794, 32277, 10632, 309, 311, 364, 41505, 27188, 2288, 4587, 49374, 11296, 4032, 49374, 2423, 41505, 16472, 11331, 26108, 2655, 9957, 21542, 26108, 3660, 16247, 12610, 7649, 10632, 16472, 3224, 775, 406, 1393, 339], "avg_logprob": -0.19045139137241576, "compression_ratio": 1.5817307692307692, "no_speech_prob": 4.231929779052734e-06, "words": [{"start": 51.13, "end": 51.49, "word": "هي", "probability": 0.72265625}, {"start": 51.49, "end": 51.85, "word": " على", "probability": 0.79443359375}, {"start": 51.85, "end": 52.65, "word": " مستوى", "probability": 0.976806640625}, {"start": 52.65, "end": 53.39, "word": " رجلين", "probability": 0.84228515625}, {"start": 53.39, "end": 54.09, "word": " كاملة", "probability": 0.978515625}, {"start": 54.09, "end": 54.77, "word": " في", "probability": 0.7685546875}, {"start": 54.77, "end": 55.17, "word": " bleeding", "probability": 0.7958984375}, {"start": 55.17, "end": 56.15, "word": " شايفينها", "probability": 0.872412109375}, {"start": 56.15, "end": 56.31, "word": " انتوا", "probability": 0.7703450520833334}, {"start": 56.31, "end": 56.39, "word": " او", "probability": 0.609375}, {"start": 56.39, "end": 56.77, "word": " لا؟", "probability": 0.624755859375}, {"start": 56.77, "end": 58.67, "word": " ما", "probability": 0.81982421875}, {"start": 58.67, "end": 59.57, "word": " يميزها", "probability": 0.9857421875}, {"start": 59.57, "end": 60.15, "word": " طبعا", "probability": 0.9442138671875}, {"start": 60.15, "end": 60.43, "word": " في", "probability": 0.7607421875}, {"start": 60.43, "end": 60.79, "word": " كتير", "probability": 0.8992513020833334}, {"start": 60.79, "end": 60.97, "word": " من", "probability": 0.97607421875}, {"start": 60.97, "end": 61.31, "word": " الناس", "probability": 0.9939778645833334}, {"start": 61.31, "end": 61.53, "word": " ممكن", "probability": 0.8935546875}, {"start": 61.53, "end": 61.87, "word": " اقولك", "probability": 0.91357421875}, {"start": 61.87, "end": 62.05, "word": " قدر", "probability": 0.7119140625}, {"start": 62.05, "end": 62.69, "word": " حساسية", "probability": 0.9285888671875}, {"start": 62.69, "end": 63.97, "word": " it's", "probability": 0.716796875}, {"start": 63.97, "end": 64.11, "word": " an", "probability": 0.87158203125}, {"start": 64.11, "end": 64.49, "word": " allergy", "probability": 0.431396484375}, {"start": 64.49, "end": 65.79, "word": " الفرق", "probability": 0.739990234375}, {"start": 65.79, "end": 66.05, "word": " بينها", "probability": 0.94775390625}, {"start": 66.05, "end": 66.13, "word": " و", "probability": 0.7890625}, {"start": 66.13, "end": 66.27, "word": " بين", "probability": 0.92822265625}, {"start": 66.27, "end": 66.33, "word": " ال", "probability": 0.88916015625}, {"start": 66.33, "end": 66.69, "word": " allergy", "probability": 0.38037109375}, {"start": 66.69, "end": 67.09, "word": " ان", "probability": 0.451416015625}, {"start": 67.09, "end": 67.73, "word": " حاجتين", "probability": 0.91796875}, {"start": 67.73, "end": 69.23, "word": " الحاجة", "probability": 0.92578125}, {"start": 69.23, "end": 69.87, "word": " الأولانية", "probability": 0.9537353515625}, {"start": 69.87, "end": 71.55, "word": " انه", "probability": 0.9052734375}, {"start": 71.55, "end": 72.09, "word": " does", "probability": 0.89208984375}, {"start": 72.09, "end": 72.37, "word": " not", "probability": 0.970703125}, {"start": 72.37, "end": 73.27, "word": " planch", "probability": 0.4869384765625}], "temperature": 1.0}, {"id": 3, "seek": 10491, "start": 76.07, "end": 104.91, "text": "بتضغط عليها بتحيش انت في الحساسية بتضغط عليها بتلاقي المنطقة صارت صفره مظبوط لإنها superficial 100% بس هذه لأ already bleeding under the skin فمهما تضغط عليها it will not disappear الحاجة التانية ان it's not palpable فالاليرجي بتلاقي فيه tender and palpable ماشي بتلاقيه محسوسة ومعيش", "tokens": [3555, 2655, 11242, 17082, 9566, 25894, 11296, 39894, 5016, 1829, 8592, 16472, 2655, 8978, 21542, 3794, 32277, 10632, 39894, 11242, 17082, 9566, 25894, 11296, 39894, 15040, 38436, 9673, 1863, 9566, 28671, 20328, 9640, 2655, 20328, 5172, 2288, 3224, 3714, 19913, 3555, 2407, 9566, 5296, 28814, 1863, 11296, 34622, 2319, 4, 4724, 3794, 29538, 5296, 10721, 1217, 19312, 833, 264, 3178, 6156, 2304, 3224, 15042, 6055, 11242, 17082, 9566, 25894, 11296, 309, 486, 406, 11596, 21542, 26108, 3660, 16712, 7649, 10632, 16472, 309, 311, 406, 3984, 30454, 6156, 6027, 6027, 13546, 7435, 1829, 39894, 15040, 38436, 8978, 3224, 15036, 293, 3984, 30454, 3714, 33599, 1829, 39894, 15040, 38436, 3224, 3714, 5016, 3794, 41779, 3660, 4032, 2304, 3615, 1829, 8592], "avg_logprob": -0.19905462585577444, "compression_ratio": 1.633587786259542, "no_speech_prob": 7.748603820800781e-07, "words": [{"start": 76.07, "end": 76.27, "word": "بتضغط", "probability": 0.74287109375}, {"start": 76.27, "end": 76.61, "word": " عليها", "probability": 0.98046875}, {"start": 76.61, "end": 77.21, "word": " بتحيش", "probability": 0.6717529296875}, {"start": 77.21, "end": 78.01, "word": " انت", "probability": 0.76806640625}, {"start": 78.01, "end": 78.11, "word": " في", "probability": 0.74609375}, {"start": 78.11, "end": 78.65, "word": " الحساسية", "probability": 0.9505615234375}, {"start": 78.65, "end": 79.07, "word": " بتضغط", "probability": 0.9508056640625}, {"start": 79.07, "end": 79.59, "word": " عليها", "probability": 0.9892578125}, {"start": 79.59, "end": 80.15, "word": " بتلاقي", "probability": 0.8582356770833334}, {"start": 80.15, "end": 80.67, "word": " المنطقة", "probability": 0.9371337890625}, {"start": 80.67, "end": 81.07, "word": " صارت", "probability": 0.8458658854166666}, {"start": 81.07, "end": 81.85, "word": " صفره", "probability": 0.6671142578125}, {"start": 81.85, "end": 82.75, "word": " مظبوط", "probability": 0.81005859375}, {"start": 82.75, "end": 84.17, "word": " لإنها", "probability": 0.78533935546875}, {"start": 84.17, "end": 84.75, "word": " superficial", "probability": 0.615234375}, {"start": 84.75, "end": 85.83, "word": " 100", "probability": 0.31494140625}, {"start": 85.83, "end": 87.25, "word": "%", "probability": 0.904296875}, {"start": 87.25, "end": 87.59, "word": " بس", "probability": 0.78515625}, {"start": 87.59, "end": 87.89, "word": " هذه", "probability": 0.70361328125}, {"start": 87.89, "end": 88.19, "word": " لأ", "probability": 0.955322265625}, {"start": 88.19, "end": 88.81, "word": " already", "probability": 0.94921875}, {"start": 88.81, "end": 89.27, "word": " bleeding", "probability": 0.91455078125}, {"start": 89.27, "end": 89.59, "word": " under", "probability": 0.83837890625}, {"start": 89.59, "end": 89.79, "word": " the", "probability": 0.908203125}, {"start": 89.79, "end": 90.15, "word": " skin", "probability": 0.958984375}, {"start": 90.15, "end": 90.67, "word": " فمهما", "probability": 0.9169921875}, {"start": 90.67, "end": 91.11, "word": " تضغط", "probability": 0.990234375}, {"start": 91.11, "end": 91.67, "word": " عليها", "probability": 0.994384765625}, {"start": 91.67, "end": 92.63, "word": " it", "probability": 0.73193359375}, {"start": 92.63, "end": 92.77, "word": " will", "probability": 0.85888671875}, {"start": 92.77, "end": 93.01, "word": " not", "probability": 0.96044921875}, {"start": 93.01, "end": 94.17, "word": " disappear", "probability": 0.77783203125}, {"start": 94.17, "end": 95.15, "word": " الحاجة", "probability": 0.7090657552083334}, {"start": 95.15, "end": 95.65, "word": " التانية", "probability": 0.9723307291666666}, {"start": 95.65, "end": 96.49, "word": " ان", "probability": 0.77685546875}, {"start": 96.49, "end": 96.71, "word": " it's", "probability": 0.59326171875}, {"start": 96.71, "end": 96.97, "word": " not", "probability": 0.95166015625}, {"start": 96.97, "end": 97.57, "word": " palpable", "probability": 0.958984375}, {"start": 97.57, "end": 98.75, "word": " فالاليرجي", "probability": 0.7161458333333334}, {"start": 98.75, "end": 99.63, "word": " بتلاقي", "probability": 0.9547526041666666}, {"start": 99.63, "end": 100.11, "word": " فيه", "probability": 0.946533203125}, {"start": 100.11, "end": 100.55, "word": " tender", "probability": 0.8505859375}, {"start": 100.55, "end": 100.91, "word": " and", "probability": 0.9599609375}, {"start": 100.91, "end": 101.57, "word": " palpable", "probability": 0.937255859375}, {"start": 101.57, "end": 102.57, "word": " ماشي", "probability": 0.8479817708333334}, {"start": 102.57, "end": 102.97, "word": " بتلاقيه", "probability": 0.8118896484375}, {"start": 102.97, "end": 103.57, "word": " محسوسة", "probability": 0.918359375}, {"start": 103.57, "end": 104.91, "word": " ومعيش", "probability": 0.85654296875}], "temperature": 1.0}, {"id": 4, "seek": 13085, "start": 105.35, "end": 130.85, "text": "و محجرة مظبوط تبدر and palpable بس هنا لأ it's not palpable لإنه it's under the skin leak over the skin طيب أخر أمراض المراثية اللي بتتعلق بال platelet function قسموها according to platelet function according to the stages of platelet function", "tokens": [2407, 3714, 5016, 7435, 25720, 3714, 19913, 3555, 2407, 9566, 6055, 44510, 2288, 293, 3984, 30454, 4724, 3794, 34105, 5296, 10721, 309, 311, 406, 3984, 30454, 5296, 28814, 1863, 3224, 309, 311, 833, 264, 3178, 17143, 670, 264, 3178, 23032, 1829, 3555, 5551, 34740, 5551, 29973, 46958, 9673, 23557, 12984, 10632, 13672, 1829, 39894, 2655, 30241, 4587, 20666, 3403, 15966, 2445, 12174, 38251, 2407, 11296, 4650, 281, 3403, 15966, 2445, 4650, 281, 264, 10232, 295, 3403, 15966, 2445], "avg_logprob": -0.2668117164056512, "compression_ratio": 1.5427135678391959, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 105.35, "end": 105.53, "word": "و", "probability": 0.360595703125}, {"start": 105.53, "end": 106.13, "word": " محجرة", "probability": 0.873046875}, {"start": 106.13, "end": 106.93, "word": " مظبوط", "probability": 0.87373046875}, {"start": 106.93, "end": 107.61, "word": " تبدر", "probability": 0.4532877604166667}, {"start": 107.61, "end": 107.95, "word": " and", "probability": 0.139404296875}, {"start": 107.95, "end": 108.55, "word": " palpable", "probability": 0.9482421875}, {"start": 108.55, "end": 109.31, "word": " بس", "probability": 0.92529296875}, {"start": 109.31, "end": 109.55, "word": " هنا", "probability": 0.93408203125}, {"start": 109.55, "end": 109.83, "word": " لأ", "probability": 0.89013671875}, {"start": 109.83, "end": 110.05, "word": " it's", "probability": 0.747314453125}, {"start": 110.05, "end": 110.21, "word": " not", "probability": 0.94287109375}, {"start": 110.21, "end": 110.77, "word": " palpable", "probability": 0.938720703125}, {"start": 110.77, "end": 111.35, "word": " لإنه", "probability": 0.81982421875}, {"start": 111.35, "end": 111.95, "word": " it's", "probability": 0.88525390625}, {"start": 111.95, "end": 112.35, "word": " under", "probability": 0.8740234375}, {"start": 112.35, "end": 112.63, "word": " the", "probability": 0.91845703125}, {"start": 112.63, "end": 112.93, "word": " skin", "probability": 0.9638671875}, {"start": 112.93, "end": 113.33, "word": " leak", "probability": 0.08636474609375}, {"start": 113.33, "end": 113.65, "word": " over", "probability": 0.896484375}, {"start": 113.65, "end": 113.91, "word": " the", "probability": 0.841796875}, {"start": 113.91, "end": 114.19, "word": " skin", "probability": 0.9609375}, {"start": 114.19, "end": 117.37, "word": " طيب", "probability": 0.7925618489583334}, {"start": 117.37, "end": 118.31, "word": " أخر", "probability": 0.743408203125}, {"start": 118.31, "end": 119.51, "word": " أمراض", "probability": 0.90869140625}, {"start": 119.51, "end": 121.11, "word": " المراثية", "probability": 0.629150390625}, {"start": 121.11, "end": 121.21, "word": " اللي", "probability": 0.7119140625}, {"start": 121.21, "end": 121.65, "word": " بتتعلق", "probability": 0.9747314453125}, {"start": 121.65, "end": 121.85, "word": " بال", "probability": 0.935546875}, {"start": 121.85, "end": 122.25, "word": " platelet", "probability": 0.593505859375}, {"start": 122.25, "end": 122.77, "word": " function", "probability": 0.9462890625}, {"start": 122.77, "end": 125.29, "word": " قسموها", "probability": 0.97998046875}, {"start": 125.29, "end": 125.69, "word": " according", "probability": 0.95947265625}, {"start": 125.69, "end": 125.97, "word": " to", "probability": 0.970703125}, {"start": 125.97, "end": 126.41, "word": " platelet", "probability": 0.8310546875}, {"start": 126.41, "end": 126.89, "word": " function", "probability": 0.97119140625}, {"start": 126.89, "end": 127.65, "word": " according", "probability": 0.90234375}, {"start": 127.65, "end": 127.91, "word": " to", "probability": 0.9697265625}, {"start": 127.91, "end": 128.15, "word": " the", "probability": 0.276123046875}, {"start": 128.15, "end": 129.65, "word": " stages", "probability": 0.9130859375}, {"start": 129.65, "end": 130.01, "word": " of", "probability": 0.97705078125}, {"start": 130.01, "end": 130.37, "word": " platelet", "probability": 0.90478515625}, {"start": 130.37, "end": 130.85, "word": " function", "probability": 0.96728515625}], "temperature": 1.0}, {"id": 5, "seek": 15768, "start": 131.66, "end": 157.68, "text": "فكنا بنعرف أن الـ stages of platelet function الـ platelet يتمور في ثلاث مراحل أساسية أو خمس مراحل حسب التقسيم الإندكياني مرحلة الأولى أدهيجيا وبالتالي في أمراض تتعلق بالأدهيجيا ثم المرحلة التانية change in shape هي أخدنا بظبط ثم secretion وبالتالي في احتمال في أمراض تتعلق بال secretion", "tokens": [5172, 4117, 8315, 44945, 3615, 28480, 14739, 2423, 39184, 10232, 295, 3403, 15966, 2445, 2423, 39184, 3403, 15966, 7251, 39237, 13063, 8978, 38637, 1211, 5718, 104, 3714, 23557, 5016, 1211, 5551, 3794, 32277, 10632, 34051, 16490, 2304, 3794, 3714, 23557, 5016, 1211, 11331, 35457, 16712, 4587, 3794, 32640, 33688, 41260, 4117, 1829, 7649, 1829, 3714, 2288, 5016, 37977, 16247, 12610, 7578, 5551, 3215, 3224, 1829, 7435, 25528, 46599, 6027, 2655, 6027, 1829, 8978, 5551, 29973, 46958, 6055, 2655, 30241, 4587, 20666, 10721, 3215, 3224, 1829, 7435, 25528, 38637, 2304, 9673, 2288, 5016, 37977, 16712, 7649, 10632, 1319, 294, 3909, 39896, 5551, 9778, 3215, 8315, 4724, 19913, 3555, 9566, 38637, 2304, 4054, 313, 46599, 6027, 2655, 6027, 1829, 8978, 1975, 33753, 2304, 6027, 8978, 5551, 29973, 46958, 6055, 2655, 30241, 4587, 20666, 4054, 313], "avg_logprob": -0.2574626805622186, "compression_ratio": 1.9094650205761317, "no_speech_prob": 3.5762786865234375e-07, "words": [{"start": 131.66, "end": 132.06, "word": "فكنا", "probability": 0.75439453125}, {"start": 132.06, "end": 132.44, "word": " بنعرف", "probability": 0.9444986979166666}, {"start": 132.44, "end": 132.62, "word": " أن", "probability": 0.40478515625}, {"start": 132.62, "end": 132.76, "word": " الـ", "probability": 0.42919921875}, {"start": 132.76, "end": 133.16, "word": " stages", "probability": 0.36865234375}, {"start": 133.16, "end": 133.42, "word": " of", "probability": 0.865234375}, {"start": 133.42, "end": 133.7, "word": " platelet", "probability": 0.734130859375}, {"start": 133.7, "end": 134.18, "word": " function", "probability": 0.91357421875}, {"start": 134.18, "end": 134.9, "word": " الـ", "probability": 0.36962890625}, {"start": 134.9, "end": 135.2, "word": " platelet", "probability": 0.695556640625}, {"start": 135.2, "end": 135.6, "word": " يتمور", "probability": 0.5093587239583334}, {"start": 135.6, "end": 135.76, "word": " في", "probability": 0.9033203125}, {"start": 135.76, "end": 135.98, "word": " ثلاث", "probability": 0.8038330078125}, {"start": 135.98, "end": 136.36, "word": " مراحل", "probability": 0.920654296875}, {"start": 136.36, "end": 136.94, "word": " أساسية", "probability": 0.9754638671875}, {"start": 136.94, "end": 137.68, "word": " أو", "probability": 0.9228515625}, {"start": 137.68, "end": 138.14, "word": " خمس", "probability": 0.9840494791666666}, {"start": 138.14, "end": 138.58, "word": " مراحل", "probability": 0.946533203125}, {"start": 138.58, "end": 138.88, "word": " حسب", "probability": 0.5283203125}, {"start": 138.88, "end": 139.36, "word": " التقسيم", "probability": 0.8570556640625}, {"start": 139.36, "end": 140.26, "word": " الإندكياني", "probability": 0.7057088216145834}, {"start": 140.26, "end": 141.58, "word": " مرحلة", "probability": 0.8533935546875}, {"start": 141.58, "end": 141.82, "word": " الأولى", "probability": 0.8870442708333334}, {"start": 141.82, "end": 142.64, "word": " أدهيجيا", "probability": 0.66314697265625}, {"start": 142.64, "end": 144.3, "word": " وبالتالي", "probability": 0.918359375}, {"start": 144.3, "end": 144.42, "word": " في", "probability": 0.79541015625}, {"start": 144.42, "end": 144.78, "word": " أمراض", "probability": 0.880859375}, {"start": 144.78, "end": 145.26, "word": " تتعلق", "probability": 0.9366455078125}, {"start": 145.26, "end": 145.98, "word": " بالأدهيجيا", "probability": 0.9095982142857143}, {"start": 145.98, "end": 147.26, "word": " ثم", "probability": 0.93701171875}, {"start": 147.26, "end": 147.98, "word": " المرحلة", "probability": 0.9732666015625}, {"start": 147.98, "end": 148.56, "word": " التانية", "probability": 0.970703125}, {"start": 148.56, "end": 149.12, "word": " change", "probability": 0.460205078125}, {"start": 149.12, "end": 149.38, "word": " in", "probability": 0.92578125}, {"start": 149.38, "end": 149.7, "word": " shape", "probability": 0.916015625}, {"start": 149.7, "end": 149.84, "word": " هي", "probability": 0.935546875}, {"start": 149.84, "end": 150.24, "word": " أخدنا", "probability": 0.750732421875}, {"start": 150.24, "end": 150.74, "word": " بظبط", "probability": 0.76312255859375}, {"start": 150.74, "end": 152.14, "word": " ثم", "probability": 0.973388671875}, {"start": 152.14, "end": 153.0, "word": " secretion", "probability": 0.627197265625}, {"start": 153.0, "end": 155.3, "word": " وبالتالي", "probability": 0.880810546875}, {"start": 155.3, "end": 155.44, "word": " في", "probability": 0.8916015625}, {"start": 155.44, "end": 155.94, "word": " احتمال", "probability": 0.918701171875}, {"start": 155.94, "end": 156.14, "word": " في", "probability": 0.5927734375}, {"start": 156.14, "end": 156.5, "word": " أمراض", "probability": 0.9464518229166666}, {"start": 156.5, "end": 157.0, "word": " تتعلق", "probability": 0.9779052734375}, {"start": 157.0, "end": 157.16, "word": " بال", "probability": 0.88427734375}, {"start": 157.16, "end": 157.68, "word": " secretion", "probability": 0.7239990234375}], "temperature": 1.0}, {"id": 6, "seek": 19058, "start": 165.4, "end": 190.58, "text": "أمراض تتعلق بالـ aggregation ثم هناك Miscellaneous Defects حسب الميكانيزمات و هناك Miscellaneous مختلفة يعني متشعرة سنبدأ بأول Defect وهو الذي يتعلق بالـ adhesion process وعشان أذكركم الـ adhesion هي عبارة عن عملية", "tokens": [10721, 2304, 2288, 46958, 6055, 2655, 30241, 4587, 20666, 39184, 16743, 399, 38637, 2304, 34105, 4117, 23240, 4164, 15447, 1346, 1836, 82, 11331, 35457, 9673, 1829, 41361, 1829, 11622, 2304, 9307, 4032, 34105, 4117, 23240, 4164, 15447, 3714, 46456, 46538, 3660, 37495, 22653, 44650, 8592, 3615, 25720, 8608, 1863, 44510, 10721, 4724, 10721, 12610, 1346, 1836, 37037, 2407, 43527, 7251, 2655, 30241, 4587, 20666, 39184, 614, 38571, 1399, 4032, 3615, 8592, 7649, 5551, 8848, 37983, 24793, 2423, 39184, 614, 38571, 39896, 6225, 3555, 9640, 3660, 18871, 6225, 42213, 10632], "avg_logprob": -0.4305555413166682, "compression_ratio": 1.5833333333333333, "no_speech_prob": 1.1205673217773438e-05, "words": [{"start": 165.4, "end": 166.8, "word": "أمراض", "probability": 0.3023834228515625}, {"start": 166.8, "end": 167.4, "word": " تتعلق", "probability": 0.777252197265625}, {"start": 167.4, "end": 167.94, "word": " بالـ", "probability": 0.49810791015625}, {"start": 167.94, "end": 168.66, "word": " aggregation", "probability": 0.781494140625}, {"start": 168.66, "end": 169.36, "word": " ثم", "probability": 0.67578125}, {"start": 169.36, "end": 169.56, "word": " هناك", "probability": 0.472900390625}, {"start": 169.56, "end": 170.32, "word": " Miscellaneous", "probability": 0.6658732096354166}, {"start": 170.32, "end": 171.48, "word": " Defects", "probability": 0.6498209635416666}, {"start": 171.48, "end": 172.12, "word": " حسب", "probability": 0.799560546875}, {"start": 172.12, "end": 173.24, "word": " الميكانيزمات", "probability": 0.86474609375}, {"start": 173.24, "end": 174.44, "word": " و", "probability": 0.218994140625}, {"start": 174.44, "end": 176.3, "word": " هناك", "probability": 0.59686279296875}, {"start": 176.3, "end": 176.86, "word": " Miscellaneous", "probability": 0.6988525390625}, {"start": 176.86, "end": 177.68, "word": " مختلفة", "probability": 0.900146484375}, {"start": 177.68, "end": 177.92, "word": " يعني", "probability": 0.56072998046875}, {"start": 177.92, "end": 178.64, "word": " متشعرة", "probability": 0.79681396484375}, {"start": 178.64, "end": 180.2, "word": " سنبدأ", "probability": 0.77801513671875}, {"start": 180.2, "end": 180.74, "word": " بأول", "probability": 0.8466796875}, {"start": 180.74, "end": 181.38, "word": " Defect", "probability": 0.7783203125}, {"start": 181.38, "end": 181.78, "word": " وهو", "probability": 0.86962890625}, {"start": 181.78, "end": 182.04, "word": " الذي", "probability": 0.64453125}, {"start": 182.04, "end": 182.7, "word": " يتعلق", "probability": 0.9114990234375}, {"start": 182.7, "end": 184.04, "word": " بالـ", "probability": 0.5379638671875}, {"start": 184.04, "end": 184.68, "word": " adhesion", "probability": 0.6693115234375}, {"start": 184.68, "end": 185.86, "word": " process", "probability": 0.23486328125}, {"start": 185.86, "end": 186.86, "word": " وعشان", "probability": 0.8720703125}, {"start": 186.86, "end": 187.58, "word": " أذكركم", "probability": 0.8729248046875}, {"start": 187.58, "end": 187.8, "word": " الـ", "probability": 0.72705078125}, {"start": 187.8, "end": 188.28, "word": " adhesion", "probability": 0.935302734375}, {"start": 188.28, "end": 189.24, "word": " هي", "probability": 0.8974609375}, {"start": 189.24, "end": 189.66, "word": " عبارة", "probability": 0.99658203125}, {"start": 189.66, "end": 189.86, "word": " عن", "probability": 0.974609375}, {"start": 189.86, "end": 190.58, "word": " عملية", "probability": 0.98779296875}], "temperature": 1.0}, {"id": 7, "seek": 21896, "start": 192.24, "end": 218.96, "text": "بتسير بعد blood vessel injury بعد blood vessel injury بطلع ال vomiblybrand اللى بربط ال platelet بالcollagen of sub endothelium صح؟ ال vomiblybrand بربط ممسك ال platelet من خلال ال receptors سميناه glycoprotein will be 1X every ماشى شباب؟", "tokens": [3555, 2655, 3794, 13546, 39182, 3390, 18098, 10454, 39182, 3390, 18098, 10454, 4724, 9566, 1211, 3615, 2423, 10135, 897, 356, 30476, 13672, 7578, 4724, 25513, 9566, 2423, 3403, 15966, 20666, 33891, 4698, 295, 1422, 917, 900, 338, 2197, 20328, 5016, 22807, 2423, 10135, 897, 356, 30476, 4724, 25513, 9566, 3714, 2304, 3794, 4117, 2423, 3403, 15966, 9154, 16490, 1211, 6027, 2423, 34102, 8608, 2304, 1829, 8315, 3224, 22633, 13084, 81, 1370, 259, 486, 312, 502, 55, 633, 3714, 33599, 7578, 13412, 3555, 16758, 22807], "avg_logprob": -0.3681985238019158, "compression_ratio": 1.5223880597014925, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 192.24, "end": 193.02, "word": "بتسير", "probability": 0.8267822265625}, {"start": 193.02, "end": 193.56, "word": " بعد", "probability": 0.87158203125}, {"start": 193.56, "end": 194.28, "word": " blood", "probability": 0.86962890625}, {"start": 194.28, "end": 194.66, "word": " vessel", "probability": 0.6533203125}, {"start": 194.66, "end": 195.68, "word": " injury", "probability": 0.869140625}, {"start": 195.68, "end": 196.1, "word": " بعد", "probability": 0.288818359375}, {"start": 196.1, "end": 196.42, "word": " blood", "probability": 0.9609375}, {"start": 196.42, "end": 196.82, "word": " vessel", "probability": 0.77783203125}, {"start": 196.82, "end": 197.42, "word": " injury", "probability": 0.9296875}, {"start": 197.42, "end": 198.32, "word": " بطلع", "probability": 0.73236083984375}, {"start": 198.32, "end": 198.46, "word": " ال", "probability": 0.76220703125}, {"start": 198.46, "end": 199.34, "word": " vomiblybrand", "probability": 0.31842041015625}, {"start": 199.34, "end": 200.1, "word": " اللى", "probability": 0.812744140625}, {"start": 200.1, "end": 200.5, "word": " بربط", "probability": 0.9527994791666666}, {"start": 200.5, "end": 200.72, "word": " ال", "probability": 0.90869140625}, {"start": 200.72, "end": 201.38, "word": " platelet", "probability": 0.68408203125}, {"start": 201.38, "end": 202.44, "word": " بالcollagen", "probability": 0.623046875}, {"start": 202.44, "end": 202.74, "word": " of", "probability": 0.81103515625}, {"start": 202.74, "end": 203.04, "word": " sub", "probability": 0.72412109375}, {"start": 203.04, "end": 203.68, "word": " endothelium", "probability": 0.6966552734375}, {"start": 203.68, "end": 204.12, "word": " صح؟", "probability": 0.7727864583333334}, {"start": 204.12, "end": 205.16, "word": " ال", "probability": 0.64111328125}, {"start": 205.16, "end": 206.24, "word": " vomiblybrand", "probability": 0.950927734375}, {"start": 206.24, "end": 206.98, "word": " بربط", "probability": 0.9842122395833334}, {"start": 206.98, "end": 207.5, "word": " ممسك", "probability": 0.82586669921875}, {"start": 207.5, "end": 207.62, "word": " ال", "probability": 0.91015625}, {"start": 207.62, "end": 208.1, "word": " platelet", "probability": 0.908203125}, {"start": 208.1, "end": 208.3, "word": " من", "probability": 0.9970703125}, {"start": 208.3, "end": 208.64, "word": " خلال", "probability": 0.9832356770833334}, {"start": 208.64, "end": 208.78, "word": " ال", "probability": 0.92529296875}, {"start": 208.78, "end": 209.38, "word": " receptors", "probability": 0.82421875}, {"start": 209.38, "end": 210.64, "word": " سميناه", "probability": 0.77568359375}, {"start": 210.64, "end": 211.52, "word": " glycoprotein", "probability": 0.782568359375}, {"start": 211.52, "end": 212.04, "word": " will", "probability": 0.1510009765625}, {"start": 212.04, "end": 212.38, "word": " be", "probability": 0.94482421875}, {"start": 212.38, "end": 214.18, "word": " 1X", "probability": 0.5098876953125}, {"start": 214.18, "end": 214.72, "word": " every", "probability": 0.08258056640625}, {"start": 214.72, "end": 218.32, "word": " ماشى", "probability": 0.64599609375}, {"start": 218.32, "end": 218.96, "word": " شباب؟", "probability": 0.774169921875}], "temperature": 1.0}, {"id": 8, "seek": 24844, "start": 219.36, "end": 248.44, "text": "يبقى الخلل في ال adhesion defect ممكن نحصله بإما غياب ال receptor أو غياب ال vomibrand ماشي؟ اللي هو اللي برضه فغياب ال receptor بيسموه المرض perinatal liver disease وفي حالة غياب الرابط أو vomibrand بيسموه المرض vomibrand", "tokens": [1829, 3555, 4587, 7578, 33962, 1211, 1211, 8978, 2423, 614, 38571, 16445, 3714, 43020, 8717, 5016, 36520, 3224, 4724, 28814, 15042, 32771, 1829, 16758, 2423, 32264, 34051, 32771, 1829, 16758, 2423, 10135, 897, 3699, 3714, 33599, 1829, 22807, 13672, 1829, 31439, 13672, 1829, 4724, 43042, 3224, 6156, 17082, 1829, 16758, 2423, 32264, 4724, 1829, 38251, 2407, 3224, 9673, 43042, 680, 259, 40478, 15019, 4752, 4032, 41185, 11331, 6027, 3660, 32771, 1829, 16758, 34892, 16758, 9566, 34051, 10135, 897, 3699, 4724, 1829, 38251, 2407, 3224, 9673, 43042, 10135, 897, 3699], "avg_logprob": -0.2776041557391485, "compression_ratio": 1.7225130890052356, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 219.36, "end": 220.46, "word": "يبقى", "probability": 0.9266357421875}, {"start": 220.46, "end": 222.94, "word": " الخلل", "probability": 0.96875}, {"start": 222.94, "end": 223.46, "word": " في", "probability": 0.7783203125}, {"start": 223.46, "end": 223.6, "word": " ال", "probability": 0.9560546875}, {"start": 223.6, "end": 224.16, "word": " adhesion", "probability": 0.631103515625}, {"start": 224.16, "end": 224.92, "word": " defect", "probability": 0.90869140625}, {"start": 224.92, "end": 225.86, "word": " ممكن", "probability": 0.9765625}, {"start": 225.86, "end": 227.12, "word": " نحصله", "probability": 0.975830078125}, {"start": 227.12, "end": 227.76, "word": " بإما", "probability": 0.8909505208333334}, {"start": 227.76, "end": 228.5, "word": " غياب", "probability": 0.9796549479166666}, {"start": 228.5, "end": 229.74, "word": " ال", "probability": 0.84228515625}, {"start": 229.74, "end": 230.32, "word": " receptor", "probability": 0.6220703125}, {"start": 230.32, "end": 231.32, "word": " أو", "probability": 0.79638671875}, {"start": 231.32, "end": 231.64, "word": " غياب", "probability": 0.9796549479166666}, {"start": 231.64, "end": 231.78, "word": " ال", "probability": 0.9423828125}, {"start": 231.78, "end": 232.26, "word": " vomibrand", "probability": 0.2631022135416667}, {"start": 232.26, "end": 234.54, "word": " ماشي؟", "probability": 0.6544647216796875}, {"start": 234.54, "end": 234.82, "word": " اللي", "probability": 0.9208984375}, {"start": 234.82, "end": 234.96, "word": " هو", "probability": 0.943359375}, {"start": 234.96, "end": 235.14, "word": " اللي", "probability": 0.950927734375}, {"start": 235.14, "end": 235.8, "word": " برضه", "probability": 0.7144368489583334}, {"start": 235.8, "end": 236.84, "word": " فغياب", "probability": 0.9091796875}, {"start": 236.84, "end": 236.96, "word": " ال", "probability": 0.97802734375}, {"start": 236.96, "end": 237.44, "word": " receptor", "probability": 0.970703125}, {"start": 237.44, "end": 238.92, "word": " بيسموه", "probability": 0.780712890625}, {"start": 238.92, "end": 239.28, "word": " المرض", "probability": 0.78662109375}, {"start": 239.28, "end": 239.84, "word": " perinatal", "probability": 0.328369140625}, {"start": 239.84, "end": 240.2, "word": " liver", "probability": 0.07440185546875}, {"start": 240.2, "end": 240.66, "word": " disease", "probability": 0.908203125}, {"start": 240.66, "end": 242.86, "word": " وفي", "probability": 0.84130859375}, {"start": 242.86, "end": 243.24, "word": " حالة", "probability": 0.99560546875}, {"start": 243.24, "end": 243.88, "word": " غياب", "probability": 0.9842122395833334}, {"start": 243.88, "end": 245.28, "word": " الرابط", "probability": 0.9303385416666666}, {"start": 245.28, "end": 245.46, "word": " أو", "probability": 0.66650390625}, {"start": 245.46, "end": 246.46, "word": " vomibrand", "probability": 0.8776041666666666}, {"start": 246.46, "end": 247.54, "word": " بيسموه", "probability": 0.89267578125}, {"start": 247.54, "end": 247.82, "word": " المرض", "probability": 0.965087890625}, {"start": 247.82, "end": 248.44, "word": " vomibrand", "probability": 0.9407552083333334}], "temperature": 1.0}, {"id": 9, "seek": 27873, "start": 249.71, "end": 278.73, "text": "ماشى هنبدأ بأول واحد وهو البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج البرنامج", "tokens": [2304, 33599, 7578, 8032, 1863, 44510, 10721, 4724, 10721, 12610, 36764, 24401, 37037, 2407, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435, 2423, 26890, 8315, 2304, 7435], "avg_logprob": -0.09388888676961263, "compression_ratio": 10.985507246376812, "no_speech_prob": 8.940696716308594e-07, "words": [{"start": 249.71, "end": 250.79, "word": "ماشى", "probability": 0.46429443359375}, {"start": 250.79, "end": 253.29, "word": " هنبدأ", "probability": 0.8310546875}, {"start": 253.29, "end": 253.67, "word": " بأول", "probability": 0.88671875}, {"start": 253.67, "end": 254.43, "word": " واحد", "probability": 0.9833984375}, {"start": 254.43, "end": 254.67, "word": " وهو", "probability": 0.81884765625}, {"start": 254.67, "end": 256.91, "word": " البرنامج", "probability": 0.7181640625}, {"start": 256.91, "end": 258.19, "word": " البرنامج", "probability": 0.65462646484375}, {"start": 258.19, "end": 261.77, "word": " البرنامج", "probability": 0.86962890625}, {"start": 261.77, "end": 264.17, "word": " البرنامج", "probability": 0.80390625}, {"start": 264.17, "end": 266.37, "word": " البرنامج", "probability": 0.794873046875}, {"start": 266.37, "end": 267.77, "word": " البرنامج", "probability": 0.811181640625}, {"start": 267.77, "end": 268.31, "word": " البرنامج", "probability": 0.841162109375}, {"start": 268.31, "end": 268.37, "word": " البرنامج", "probability": 0.87958984375}, {"start": 268.37, "end": 268.55, "word": " البرنامج", "probability": 0.91513671875}, {"start": 268.55, "end": 269.51, "word": " البرنامج", "probability": 0.9384765625}, {"start": 269.51, "end": 269.51, "word": " البرنامج", "probability": 0.956640625}, {"start": 269.51, "end": 269.51, "word": " البرنامج", "probability": 0.96611328125}, {"start": 269.51, "end": 269.77, "word": " البرنامج", "probability": 0.97041015625}, {"start": 269.77, "end": 269.77, "word": " البرنامج", "probability": 0.97265625}, {"start": 269.77, "end": 269.77, "word": " البرنامج", "probability": 0.97373046875}, {"start": 269.77, "end": 269.77, "word": " البرنامج", "probability": 0.97421875}, {"start": 269.77, "end": 269.79, "word": " البرنامج", "probability": 0.97421875}, {"start": 269.79, "end": 270.73, "word": " البرنامج", "probability": 0.97470703125}, {"start": 270.73, "end": 270.73, "word": " البرنامج", "probability": 0.97568359375}, {"start": 270.73, "end": 270.73, "word": " البرنامج", "probability": 0.97587890625}, {"start": 270.73, "end": 270.73, "word": " البرنامج", "probability": 0.97626953125}, {"start": 270.73, "end": 270.73, "word": " البرنامج", "probability": 0.97685546875}, {"start": 270.73, "end": 270.73, "word": " البرنامج", "probability": 0.9767578125}, {"start": 270.73, "end": 270.73, "word": " البرنامج", "probability": 0.97705078125}, {"start": 270.73, "end": 270.73, "word": " البرنامج", "probability": 0.978125}, {"start": 270.73, "end": 270.73, "word": " البرنامج", "probability": 0.97861328125}, {"start": 270.73, "end": 270.73, "word": " البرنامج", "probability": 0.97890625}, {"start": 270.73, "end": 270.73, "word": " البرنامج", "probability": 0.97890625}, {"start": 270.73, "end": 270.73, "word": " البرنامج", "probability": 0.9796875}, {"start": 270.73, "end": 270.73, "word": " البرنامج", "probability": 0.980078125}, {"start": 270.73, "end": 270.73, "word": " البرنامج", "probability": 0.9798828125}, {"start": 270.73, "end": 270.73, "word": " البرنامج", "probability": 0.98017578125}, {"start": 270.73, "end": 270.73, "word": " البرنامج", "probability": 0.9802734375}, {"start": 270.73, "end": 270.73, "word": " البرنامج", "probability": 0.9802734375}, {"start": 270.73, "end": 270.73, "word": " البرنامج", "probability": 0.9806640625}, {"start": 270.73, "end": 270.73, "word": " البرنامج", "probability": 0.97978515625}, {"start": 270.73, "end": 270.73, "word": " البرنامج", "probability": 0.98056640625}, {"start": 270.73, "end": 270.75, "word": " البرنامج", "probability": 0.9798828125}, {"start": 270.75, "end": 273.91, "word": " البرنامج", "probability": 0.9798828125}, {"start": 273.91, "end": 275.53, "word": " البرنامج", "probability": 0.98017578125}, {"start": 275.53, "end": 277.79, "word": " البرنامج", "probability": 0.97978515625}, {"start": 277.79, "end": 278.73, "word": " البرنامج", "probability": 0.98154296875}], "temperature": 1.0}, {"id": 10, "seek": 30512, "start": 280.44, "end": 305.12, "text": "disease وبيتميز بالآتي، انتبهوا عليها، بيتميز بالتلت شغلات أشبه، الشغلة الأولانية انه بيكون في moderate or severe absence للplatelet، يعني في thrombocytopenia moderate aware أو severe thrombocytopenia", "tokens": [67, 908, 651, 4032, 21292, 39237, 1829, 11622, 20666, 148, 95, 31371, 12399, 16472, 2655, 3555, 3224, 14407, 25894, 11296, 12399, 4724, 36081, 2304, 1829, 11622, 20666, 2655, 1211, 2655, 13412, 17082, 1211, 9307, 5551, 8592, 3555, 3224, 12399, 25124, 17082, 37977, 16247, 12610, 7649, 10632, 16472, 3224, 4724, 1829, 30544, 8978, 18174, 420, 8922, 17145, 24976, 39975, 15966, 12399, 37495, 22653, 8978, 739, 3548, 905, 4328, 15752, 654, 18174, 3650, 34051, 8922, 739, 3548, 905, 4328, 15752, 654], "avg_logprob": -0.28847657330334187, "compression_ratio": 1.565934065934066, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 280.44, "end": 281.12, "word": "disease", "probability": 0.7185872395833334}, {"start": 281.12, "end": 283.12, "word": " وبيتميز", "probability": 0.90546875}, {"start": 283.12, "end": 284.12, "word": " بالآتي،", "probability": 0.605322265625}, {"start": 284.12, "end": 284.5, "word": " انتبهوا", "probability": 0.81865234375}, {"start": 284.5, "end": 285.8, "word": " عليها،", "probability": 0.8361002604166666}, {"start": 285.8, "end": 286.58, "word": " بيتميز", "probability": 0.96337890625}, {"start": 286.58, "end": 287.02, "word": " بالتلت", "probability": 0.72119140625}, {"start": 287.02, "end": 287.48, "word": " شغلات", "probability": 0.985107421875}, {"start": 287.48, "end": 288.46, "word": " أشبه،", "probability": 0.593505859375}, {"start": 288.46, "end": 288.94, "word": " الشغلة", "probability": 0.8673502604166666}, {"start": 288.94, "end": 289.64, "word": " الأولانية", "probability": 0.9847412109375}, {"start": 289.64, "end": 291.42, "word": " انه", "probability": 0.63818359375}, {"start": 291.42, "end": 291.8, "word": " بيكون", "probability": 0.95361328125}, {"start": 291.8, "end": 292.18, "word": " في", "probability": 0.849609375}, {"start": 292.18, "end": 293.04, "word": " moderate", "probability": 0.9833984375}, {"start": 293.04, "end": 294.04, "word": " or", "probability": 0.4970703125}, {"start": 294.04, "end": 295.34, "word": " severe", "probability": 0.865234375}, {"start": 295.34, "end": 297.1, "word": " absence", "probability": 0.9794921875}, {"start": 297.1, "end": 298.3, "word": " للplatelet،", "probability": 0.572265625}, {"start": 298.3, "end": 298.6, "word": " يعني", "probability": 0.9228515625}, {"start": 298.6, "end": 298.84, "word": " في", "probability": 0.93505859375}, {"start": 298.84, "end": 300.36, "word": " thrombocytopenia", "probability": 0.7908121744791666}, {"start": 300.36, "end": 301.32, "word": " moderate", "probability": 0.6826171875}, {"start": 301.32, "end": 302.32, "word": " aware", "probability": 0.365234375}, {"start": 302.32, "end": 303.36, "word": " أو", "probability": 0.6982421875}, {"start": 303.36, "end": 303.96, "word": " severe", "probability": 0.76123046875}, {"start": 303.96, "end": 305.12, "word": " thrombocytopenia", "probability": 0.9557291666666666}], "temperature": 1.0}, {"id": 11, "seek": 33690, "start": 318.76, "end": 336.9, "text": "العامل التاني اللى موجود وهو giant platelet قدت فعل جسم طبيعى يوم ما تقل platelet شباب المصنع بيحاول يصنع اكتر لكن ما بيحصل سيكتف الواصل selective removal from display فبتقل الاول بوم", "tokens": [6027, 3615, 10943, 1211, 16712, 7649, 1829, 13672, 7578, 3714, 29245, 23328, 37037, 2407, 7410, 3403, 15966, 12174, 3215, 2655, 6156, 30241, 10874, 38251, 23032, 21292, 3615, 7578, 7251, 20498, 19446, 6055, 4587, 1211, 3403, 15966, 13412, 3555, 16758, 9673, 9381, 1863, 3615, 4724, 1829, 5016, 995, 12610, 7251, 9381, 1863, 3615, 1975, 4117, 2655, 2288, 44381, 19446, 4724, 1829, 5016, 36520, 8608, 1829, 4117, 2655, 5172, 2423, 14407, 36520, 33930, 17933, 490, 4674, 6156, 3555, 2655, 4587, 1211, 42963, 12610, 4724, 20498], "avg_logprob": -0.4200148724374317, "compression_ratio": 1.5265957446808511, "no_speech_prob": 1.0073184967041016e-05, "words": [{"start": 318.76, "end": 319.76, "word": "العامل", "probability": 0.5021514892578125}, {"start": 319.76, "end": 320.26, "word": " التاني", "probability": 0.8489583333333334}, {"start": 320.26, "end": 320.42, "word": " اللى", "probability": 0.70703125}, {"start": 320.42, "end": 320.98, "word": " موجود", "probability": 0.9759114583333334}, {"start": 320.98, "end": 321.68, "word": " وهو", "probability": 0.699951171875}, {"start": 321.68, "end": 321.86, "word": " giant", "probability": 0.287841796875}, {"start": 321.86, "end": 322.38, "word": " platelet", "probability": 0.750244140625}, {"start": 322.38, "end": 323.36, "word": " قدت", "probability": 0.6127115885416666}, {"start": 323.36, "end": 323.78, "word": " فعل", "probability": 0.909423828125}, {"start": 323.78, "end": 324.24, "word": " جسم", "probability": 0.971923828125}, {"start": 324.24, "end": 324.88, "word": " طبيعى", "probability": 0.84637451171875}, {"start": 324.88, "end": 325.26, "word": " يوم", "probability": 0.74072265625}, {"start": 325.26, "end": 325.4, "word": " ما", "probability": 0.420654296875}, {"start": 325.4, "end": 325.7, "word": " تقل", "probability": 0.9415690104166666}, {"start": 325.7, "end": 326.22, "word": " platelet", "probability": 0.5372314453125}, {"start": 326.22, "end": 326.7, "word": " شباب", "probability": 0.84619140625}, {"start": 326.7, "end": 327.2, "word": " المصنع", "probability": 0.9071044921875}, {"start": 327.2, "end": 327.64, "word": " بيحاول", "probability": 0.9283203125}, {"start": 327.64, "end": 328.08, "word": " يصنع", "probability": 0.83807373046875}, {"start": 328.08, "end": 328.8, "word": " اكتر", "probability": 0.8779296875}, {"start": 328.8, "end": 329.24, "word": " لكن", "probability": 0.7841796875}, {"start": 329.24, "end": 329.48, "word": " ما", "probability": 0.63720703125}, {"start": 329.48, "end": 330.76, "word": " بيحصل", "probability": 0.54364013671875}, {"start": 330.76, "end": 331.58, "word": " سيكتف", "probability": 0.4915283203125}, {"start": 331.58, "end": 332.44, "word": " الواصل", "probability": 0.3741861979166667}, {"start": 332.44, "end": 333.5, "word": " selective", "probability": 0.521484375}, {"start": 333.5, "end": 334.16, "word": " removal", "probability": 0.88427734375}, {"start": 334.16, "end": 334.5, "word": " from", "probability": 0.8798828125}, {"start": 334.5, "end": 335.08, "word": " display", "probability": 0.1705322265625}, {"start": 335.08, "end": 336.3, "word": " فبتقل", "probability": 0.90341796875}, {"start": 336.3, "end": 336.56, "word": " الاول", "probability": 0.6849365234375}, {"start": 336.56, "end": 336.9, "word": " بوم", "probability": 0.596435546875}], "temperature": 1.0}, {"id": 12, "seek": 35331, "start": 337.95, "end": 353.31, "text": "فالسر بمارو مجنون فبصنع بسرعة أكبر فبتطلع لبلاتلت أكبر ففي ال trombocytopenia في جيانت البلاتلت و طبيعي في غياب البلاتلت او", "tokens": [5172, 6027, 3794, 2288, 4724, 2304, 9640, 2407, 3714, 7435, 1863, 11536, 6156, 3555, 9381, 1863, 3615, 4724, 3794, 2288, 27884, 5551, 4117, 26890, 6156, 3555, 2655, 9566, 1211, 3615, 5296, 3555, 15040, 2655, 1211, 2655, 5551, 4117, 26890, 6156, 41185, 2423, 504, 3548, 905, 4328, 15752, 654, 8978, 10874, 1829, 7649, 2655, 29739, 15040, 2655, 1211, 2655, 4032, 23032, 21292, 3615, 1829, 8978, 32771, 1829, 16758, 29739, 15040, 2655, 1211, 2655, 1975, 2407], "avg_logprob": -0.36770832459131875, "compression_ratio": 1.5289855072463767, "no_speech_prob": 3.7550926208496094e-06, "words": [{"start": 337.95, "end": 338.73, "word": "فالسر", "probability": 0.6439208984375}, {"start": 338.73, "end": 340.39, "word": " بمارو", "probability": 0.500823974609375}, {"start": 340.39, "end": 340.93, "word": " مجنون", "probability": 0.837646484375}, {"start": 340.93, "end": 341.71, "word": " فبصنع", "probability": 0.854296875}, {"start": 341.71, "end": 342.67, "word": " بسرعة", "probability": 0.939697265625}, {"start": 342.67, "end": 343.03, "word": " أكبر", "probability": 0.8811848958333334}, {"start": 343.03, "end": 343.57, "word": " فبتطلع", "probability": 0.7791341145833334}, {"start": 343.57, "end": 345.45, "word": " لبلاتلت", "probability": 0.6127522786458334}, {"start": 345.45, "end": 346.41, "word": " أكبر", "probability": 0.6360270182291666}, {"start": 346.41, "end": 347.07, "word": " ففي", "probability": 0.8095703125}, {"start": 347.07, "end": 347.25, "word": " ال", "probability": 0.353515625}, {"start": 347.25, "end": 348.61, "word": " trombocytopenia", "probability": 0.7089436848958334}, {"start": 348.61, "end": 349.15, "word": " في", "probability": 0.646484375}, {"start": 349.15, "end": 349.57, "word": " جيانت", "probability": 0.6043701171875}, {"start": 349.57, "end": 350.25, "word": " البلاتلت", "probability": 0.853515625}, {"start": 350.25, "end": 350.81, "word": " و", "probability": 0.890625}, {"start": 350.81, "end": 351.27, "word": " طبيعي", "probability": 0.855712890625}, {"start": 351.27, "end": 351.43, "word": " في", "probability": 0.82763671875}, {"start": 351.43, "end": 351.95, "word": " غياب", "probability": 0.9807942708333334}, {"start": 351.95, "end": 352.87, "word": " البلاتلت", "probability": 0.90087890625}, {"start": 352.87, "end": 353.31, "word": " او", "probability": 0.751708984375}], "temperature": 1.0}, {"id": 13, "seek": 38262, "start": 354.74, "end": 382.62, "text": "وقلة عملها ال bleeding صح فبالتالي فى spontaneous bleeding وهذه ال classification اللى حكيناها سابقا ان يوم ما تقل بليتلت عن عشرين الفي spontaneous bleeding الخلل الأساسى فى الburnout, sorry disease هو غير ال receptor وهو GB 1B 1V and 1X V and 1X وهو برنامج دائما مليكيوم", "tokens": [2407, 4587, 37977, 6225, 42213, 11296, 2423, 19312, 20328, 5016, 6156, 3555, 6027, 2655, 6027, 1829, 6156, 7578, 32744, 19312, 4032, 3224, 24192, 2423, 21538, 13672, 7578, 11331, 4117, 1829, 8315, 11296, 8608, 16758, 4587, 995, 16472, 7251, 20498, 19446, 6055, 4587, 1211, 4724, 20292, 2655, 1211, 2655, 18871, 6225, 46309, 9957, 27188, 1829, 32744, 19312, 33962, 1211, 1211, 16247, 3794, 32277, 7578, 6156, 7578, 2423, 21763, 346, 11, 2597, 4752, 31439, 32771, 13546, 2423, 32264, 4032, 3224, 2407, 26809, 502, 33, 502, 53, 293, 502, 55, 691, 293, 502, 55, 37037, 2407, 4724, 2288, 8315, 2304, 7435, 11778, 16373, 15042, 3714, 20292, 4117, 1829, 20498], "avg_logprob": -0.40566587336709564, "compression_ratio": 1.5476190476190477, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 354.74, "end": 355.4, "word": "وقلة", "probability": 0.5533854166666666}, {"start": 355.4, "end": 355.96, "word": " عملها", "probability": 0.9285481770833334}, {"start": 355.96, "end": 356.12, "word": " ال", "probability": 0.293212890625}, {"start": 356.12, "end": 356.38, "word": " bleeding", "probability": 0.822265625}, {"start": 356.38, "end": 357.14, "word": " صح", "probability": 0.7254638671875}, {"start": 357.14, "end": 358.1, "word": " فبالتالي", "probability": 0.8447265625}, {"start": 358.1, "end": 358.38, "word": " فى", "probability": 0.6776123046875}, {"start": 358.38, "end": 359.36, "word": " spontaneous", "probability": 0.9267578125}, {"start": 359.36, "end": 359.88, "word": " bleeding", "probability": 0.96484375}, {"start": 359.88, "end": 360.26, "word": " وهذه", "probability": 0.5760091145833334}, {"start": 360.26, "end": 361.0, "word": " ال", "probability": 0.80712890625}, {"start": 361.0, "end": 361.66, "word": " classification", "probability": 0.845703125}, {"start": 361.66, "end": 361.82, "word": " اللى", "probability": 0.973876953125}, {"start": 361.82, "end": 362.46, "word": " حكيناها", "probability": 0.7716796875}, {"start": 362.46, "end": 363.08, "word": " سابقا", "probability": 0.9652099609375}, {"start": 363.08, "end": 363.62, "word": " ان", "probability": 0.397216796875}, {"start": 363.62, "end": 363.9, "word": " يوم", "probability": 0.9541015625}, {"start": 363.9, "end": 364.02, "word": " ما", "probability": 0.74755859375}, {"start": 364.02, "end": 364.3, "word": " تقل", "probability": 0.9420572916666666}, {"start": 364.3, "end": 364.88, "word": " بليتلت", "probability": 0.514404296875}, {"start": 364.88, "end": 365.1, "word": " عن", "probability": 0.70751953125}, {"start": 365.1, "end": 365.86, "word": " عشرين", "probability": 0.90771484375}, {"start": 365.86, "end": 366.36, "word": " الفي", "probability": 0.755126953125}, {"start": 366.36, "end": 367.08, "word": " spontaneous", "probability": 0.92041015625}, {"start": 367.08, "end": 368.26, "word": " bleeding", "probability": 0.85009765625}, {"start": 368.26, "end": 370.38, "word": " الخلل", "probability": 0.7779947916666666}, {"start": 370.38, "end": 371.06, "word": " الأساسى", "probability": 0.9420166015625}, {"start": 371.06, "end": 371.22, "word": " فى", "probability": 0.836181640625}, {"start": 371.22, "end": 371.76, "word": " الburnout,", "probability": 0.5428873697916666}, {"start": 371.86, "end": 372.08, "word": " sorry", "probability": 0.06817626953125}, {"start": 372.08, "end": 373.06, "word": " disease", "probability": 0.6162109375}, {"start": 373.06, "end": 373.64, "word": " هو", "probability": 0.876953125}, {"start": 373.64, "end": 373.9, "word": " غير", "probability": 0.660888671875}, {"start": 373.9, "end": 374.08, "word": " ال", "probability": 0.650390625}, {"start": 374.08, "end": 374.46, "word": " receptor", "probability": 0.265380859375}, {"start": 374.46, "end": 375.3, "word": " وهو", "probability": 0.7654622395833334}, {"start": 375.3, "end": 375.56, "word": " GB", "probability": 0.404052734375}, {"start": 375.56, "end": 377.02, "word": " 1B", "probability": 0.631591796875}, {"start": 377.02, "end": 378.82, "word": " 1V", "probability": 0.580322265625}, {"start": 378.82, "end": 379.22, "word": " and", "probability": 0.391357421875}, {"start": 379.22, "end": 379.76, "word": " 1X", "probability": 0.964599609375}, {"start": 379.76, "end": 380.06, "word": " V", "probability": 0.46875}, {"start": 380.06, "end": 380.52, "word": " and", "probability": 0.837890625}, {"start": 380.52, "end": 380.96, "word": " 1X", "probability": 0.904541015625}, {"start": 380.96, "end": 381.2, "word": " وهو", "probability": 0.66796875}, {"start": 381.2, "end": 381.48, "word": " برنامج", "probability": 0.8455078125}, {"start": 381.48, "end": 382.06, "word": " دائما", "probability": 0.66748046875}, {"start": 382.06, "end": 382.62, "word": " مليكيوم", "probability": 0.4814208984375}], "temperature": 1.0}, {"id": 14, "seek": 41083, "start": 383.35, "end": 410.83, "text": "بيتكوّن من اكتر من ايهاش من monomer يعني اكتر من ايهاش من موليكيور و جاروا في حوالي عشرين الف نسخة من هذا R ستر اللي موجود على شكل هتيرو دايمل تلاتة موليكيور glycoprotein 1B", "tokens": [3555, 36081, 4117, 2407, 11703, 1863, 9154, 1975, 4117, 2655, 2288, 9154, 1975, 1829, 3224, 33599, 9154, 1108, 14301, 37495, 22653, 1975, 4117, 2655, 2288, 9154, 1975, 1829, 3224, 33599, 9154, 3714, 12610, 1829, 4117, 1829, 13063, 4032, 10874, 9640, 14407, 8978, 11331, 2407, 6027, 1829, 6225, 46309, 9957, 27188, 8717, 3794, 9778, 3660, 9154, 23758, 497, 8608, 2655, 2288, 13672, 1829, 3714, 29245, 23328, 15844, 13412, 28820, 8032, 2655, 13546, 2407, 11778, 995, 32640, 1211, 6055, 1211, 9307, 3660, 3714, 12610, 1829, 4117, 1829, 13063, 22633, 13084, 81, 1370, 259, 502, 33], "avg_logprob": -0.3347739412429485, "compression_ratio": 1.6348314606741574, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 383.35, "end": 383.91, "word": "بيتكوّن", "probability": 0.6704508463541666}, {"start": 383.91, "end": 384.07, "word": " من", "probability": 0.9697265625}, {"start": 384.07, "end": 384.39, "word": " اكتر", "probability": 0.85205078125}, {"start": 384.39, "end": 384.55, "word": " من", "probability": 0.9736328125}, {"start": 384.55, "end": 384.99, "word": " ايهاش", "probability": 0.797607421875}, {"start": 384.99, "end": 386.33, "word": " من", "probability": 0.86181640625}, {"start": 386.33, "end": 387.61, "word": " monomer", "probability": 0.4849853515625}, {"start": 387.61, "end": 388.19, "word": " يعني", "probability": 0.714599609375}, {"start": 388.19, "end": 390.47, "word": " اكتر", "probability": 0.7185516357421875}, {"start": 390.47, "end": 390.61, "word": " من", "probability": 0.984375}, {"start": 390.61, "end": 390.97, "word": " ايهاش", "probability": 0.9107666015625}, {"start": 390.97, "end": 391.15, "word": " من", "probability": 0.962890625}, {"start": 391.15, "end": 391.77, "word": " موليكيور", "probability": 0.7126057942708334}, {"start": 391.77, "end": 393.25, "word": " و", "probability": 0.53076171875}, {"start": 393.25, "end": 394.63, "word": " جاروا", "probability": 0.7683919270833334}, {"start": 394.63, "end": 394.81, "word": " في", "probability": 0.88623046875}, {"start": 394.81, "end": 395.45, "word": " حوالي", "probability": 0.828125}, {"start": 395.45, "end": 396.49, "word": " عشرين", "probability": 0.8465169270833334}, {"start": 396.49, "end": 396.83, "word": " الف", "probability": 0.79931640625}, {"start": 396.83, "end": 397.41, "word": " نسخة", "probability": 0.98486328125}, {"start": 397.41, "end": 397.61, "word": " من", "probability": 0.97216796875}, {"start": 397.61, "end": 398.19, "word": " هذا", "probability": 0.92919921875}, {"start": 398.19, "end": 399.13, "word": " R", "probability": 0.440673828125}, {"start": 399.13, "end": 399.57, "word": " ستر", "probability": 0.3618977864583333}, {"start": 399.57, "end": 404.57, "word": " اللي", "probability": 0.5511474609375}, {"start": 404.57, "end": 405.03, "word": " موجود", "probability": 0.98876953125}, {"start": 405.03, "end": 405.19, "word": " على", "probability": 0.67724609375}, {"start": 405.19, "end": 405.49, "word": " شكل", "probability": 0.982666015625}, {"start": 405.49, "end": 405.87, "word": " هتيرو", "probability": 0.65081787109375}, {"start": 405.87, "end": 406.27, "word": " دايمل", "probability": 0.8016357421875}, {"start": 406.27, "end": 407.27, "word": " تلاتة", "probability": 0.7728271484375}, {"start": 407.27, "end": 408.87, "word": " موليكيور", "probability": 0.9060872395833334}, {"start": 408.87, "end": 410.43, "word": " glycoprotein", "probability": 0.7455078125}, {"start": 410.43, "end": 410.83, "word": " 1B", "probability": 0.5595703125}], "temperature": 1.0}, {"id": 15, "seek": 43443, "start": 412.63, "end": 434.43, "text": "ثم إجلايكوبروتين 1X and V والتلاتة بيصنعوا من أماكن مختلفة ال 1B ال gene تبعه موجود على كرومزوم رقم 17 بينما ال 1X and V الجينات موجودة على كرومزوم رقم 8", "tokens": [12984, 2304, 11933, 7435, 15040, 1829, 4117, 2407, 26890, 35473, 9957, 502, 55, 293, 691, 16070, 2655, 1211, 9307, 3660, 4724, 1829, 9381, 1863, 3615, 14407, 9154, 5551, 15042, 19452, 3714, 46456, 46538, 3660, 2423, 502, 33, 2423, 12186, 6055, 3555, 3615, 3224, 3714, 29245, 23328, 15844, 9122, 2288, 20498, 11622, 20498, 12602, 4587, 2304, 3282, 49374, 15042, 2423, 502, 55, 293, 691, 25724, 1829, 8315, 2655, 3714, 29245, 23328, 3660, 15844, 9122, 2288, 20498, 11622, 20498, 12602, 4587, 2304, 1649], "avg_logprob": -0.18864329431842014, "compression_ratio": 1.6037735849056605, "no_speech_prob": 3.5762786865234375e-07, "words": [{"start": 412.63, "end": 413.55, "word": "ثم", "probability": 0.880615234375}, {"start": 413.55, "end": 414.47, "word": " إجلايكوبروتين", "probability": 0.7460666232638888}, {"start": 414.47, "end": 416.59, "word": " 1X", "probability": 0.4185791015625}, {"start": 416.59, "end": 417.91, "word": " and", "probability": 0.49560546875}, {"start": 417.91, "end": 418.53, "word": " V", "probability": 0.9189453125}, {"start": 418.53, "end": 421.37, "word": " والتلاتة", "probability": 0.83046875}, {"start": 421.37, "end": 422.09, "word": " بيصنعوا", "probability": 0.93212890625}, {"start": 422.09, "end": 422.25, "word": " من", "probability": 0.994140625}, {"start": 422.25, "end": 422.63, "word": " أماكن", "probability": 0.95654296875}, {"start": 422.63, "end": 423.83, "word": " مختلفة", "probability": 0.9954833984375}, {"start": 423.83, "end": 424.37, "word": " ال", "probability": 0.96875}, {"start": 424.37, "end": 424.83, "word": " 1B", "probability": 0.46630859375}, {"start": 424.83, "end": 425.61, "word": " ال", "probability": 0.52001953125}, {"start": 425.61, "end": 425.81, "word": " gene", "probability": 0.6220703125}, {"start": 425.81, "end": 426.15, "word": " تبعه", "probability": 0.87890625}, {"start": 426.15, "end": 426.41, "word": " موجود", "probability": 0.9840494791666666}, {"start": 426.41, "end": 426.55, "word": " على", "probability": 0.81982421875}, {"start": 426.55, "end": 427.01, "word": " كرومزوم", "probability": 0.805859375}, {"start": 427.01, "end": 427.29, "word": " رقم", "probability": 0.9637044270833334}, {"start": 427.29, "end": 427.71, "word": " 17", "probability": 0.9296875}, {"start": 427.71, "end": 429.53, "word": " بينما", "probability": 0.93017578125}, {"start": 429.53, "end": 429.97, "word": " ال", "probability": 0.9736328125}, {"start": 429.97, "end": 430.55, "word": " 1X", "probability": 0.940185546875}, {"start": 430.55, "end": 430.95, "word": " and", "probability": 0.8935546875}, {"start": 430.95, "end": 431.31, "word": " V", "probability": 0.98486328125}, {"start": 431.31, "end": 432.73, "word": " الجينات", "probability": 0.8546142578125}, {"start": 432.73, "end": 433.21, "word": " موجودة", "probability": 0.9752197265625}, {"start": 433.21, "end": 433.35, "word": " على", "probability": 0.92333984375}, {"start": 433.35, "end": 433.87, "word": " كرومزوم", "probability": 0.98798828125}, {"start": 433.87, "end": 434.15, "word": " رقم", "probability": 0.9903971354166666}, {"start": 434.15, "end": 434.43, "word": " 8", "probability": 0.59130859375}], "temperature": 1.0}, {"id": 16, "seek": 46570, "start": 436.52, "end": 465.7, "text": "ووجدوا أن معظم الـ mutation موجودة على شكل messiness الـ mutation أو الـ frame shifting وهذا كلكم بتعرفوا أنه بيعمل remature stop coding فالمحصلة انه either quantitative او qualitative مفهوم، ماشي؟ وهذا اللي بنشوفه في مرض منها النوع معظم ال mutation موجودة في GPE 1B", "tokens": [2407, 29245, 3215, 14407, 14739, 20449, 19913, 2304, 2423, 39184, 27960, 3714, 29245, 23328, 3660, 15844, 13412, 28820, 2082, 1324, 2423, 39184, 27960, 34051, 2423, 39184, 3920, 17573, 37037, 15730, 9122, 23275, 2304, 39894, 3615, 28480, 14407, 14739, 3224, 4724, 1829, 25957, 1211, 890, 1503, 1590, 17720, 6156, 45340, 5016, 36520, 3660, 16472, 3224, 2139, 27778, 1975, 2407, 31312, 3714, 5172, 3224, 20498, 12399, 3714, 33599, 1829, 22807, 4032, 3224, 15730, 13672, 1829, 44945, 8592, 38688, 3224, 8978, 3714, 43042, 9154, 11296, 28239, 45367, 20449, 19913, 2304, 2423, 27960, 3714, 29245, 23328, 3660, 8978, 460, 5208, 502, 33], "avg_logprob": -0.3304924362837666, "compression_ratio": 1.598360655737705, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 436.52, "end": 437.4, "word": "ووجدوا", "probability": 0.8416748046875}, {"start": 437.4, "end": 437.76, "word": " أن", "probability": 0.463623046875}, {"start": 437.76, "end": 438.58, "word": " معظم", "probability": 0.974609375}, {"start": 438.58, "end": 438.86, "word": " الـ", "probability": 0.77490234375}, {"start": 438.86, "end": 439.46, "word": " mutation", "probability": 0.476318359375}, {"start": 439.46, "end": 440.74, "word": " موجودة", "probability": 0.9649658203125}, {"start": 440.74, "end": 441.26, "word": " على", "probability": 0.8720703125}, {"start": 441.26, "end": 441.8, "word": " شكل", "probability": 0.977294921875}, {"start": 441.8, "end": 442.68, "word": " messiness", "probability": 0.57000732421875}, {"start": 442.68, "end": 443.42, "word": " الـ", "probability": 0.511962890625}, {"start": 443.42, "end": 443.94, "word": " mutation", "probability": 0.92626953125}, {"start": 443.94, "end": 444.46, "word": " أو", "probability": 0.6337890625}, {"start": 444.46, "end": 444.56, "word": " الـ", "probability": 0.775390625}, {"start": 444.56, "end": 444.82, "word": " frame", "probability": 0.382568359375}, {"start": 444.82, "end": 445.26, "word": " shifting", "probability": 0.77783203125}, {"start": 445.26, "end": 446.72, "word": " وهذا", "probability": 0.919921875}, {"start": 446.72, "end": 447.06, "word": " كلكم", "probability": 0.79248046875}, {"start": 447.06, "end": 447.5, "word": " بتعرفوا", "probability": 0.80126953125}, {"start": 447.5, "end": 447.64, "word": " أنه", "probability": 0.682861328125}, {"start": 447.64, "end": 447.92, "word": " بيعمل", "probability": 0.80120849609375}, {"start": 447.92, "end": 448.86, "word": " remature", "probability": 0.4603271484375}, {"start": 448.86, "end": 449.7, "word": " stop", "probability": 0.7822265625}, {"start": 449.7, "end": 450.18, "word": " coding", "probability": 0.222900390625}, {"start": 450.18, "end": 452.34, "word": " فالمحصلة", "probability": 0.842431640625}, {"start": 452.34, "end": 453.56, "word": " انه", "probability": 0.674560546875}, {"start": 453.56, "end": 453.78, "word": " either", "probability": 0.74365234375}, {"start": 453.78, "end": 454.46, "word": " quantitative", "probability": 0.80810546875}, {"start": 454.46, "end": 454.98, "word": " او", "probability": 0.6888427734375}, {"start": 454.98, "end": 455.6, "word": " qualitative", "probability": 0.98876953125}, {"start": 455.6, "end": 456.9, "word": " مفهوم،", "probability": 0.47628173828125}, {"start": 456.9, "end": 457.66, "word": " ماشي؟", "probability": 0.8001708984375}, {"start": 457.66, "end": 457.98, "word": " وهذا", "probability": 0.6158040364583334}, {"start": 457.98, "end": 458.06, "word": " اللي", "probability": 0.939208984375}, {"start": 458.06, "end": 458.54, "word": " بنشوفه", "probability": 0.8585205078125}, {"start": 458.54, "end": 458.7, "word": " في", "probability": 0.96630859375}, {"start": 458.7, "end": 459.0, "word": " مرض", "probability": 0.98193359375}, {"start": 459.0, "end": 460.0, "word": " منها", "probability": 0.766357421875}, {"start": 460.0, "end": 460.28, "word": " النوع", "probability": 0.913330078125}, {"start": 460.28, "end": 461.82, "word": " معظم", "probability": 0.958984375}, {"start": 461.82, "end": 461.96, "word": " ال", "probability": 0.974609375}, {"start": 461.96, "end": 462.48, "word": " mutation", "probability": 0.87646484375}, {"start": 462.48, "end": 463.58, "word": " موجودة", "probability": 0.9832763671875}, {"start": 463.58, "end": 463.74, "word": " في", "probability": 0.96142578125}, {"start": 463.74, "end": 464.5, "word": " GPE", "probability": 0.579833984375}, {"start": 464.5, "end": 465.7, "word": " 1B", "probability": 0.507080078125}], "temperature": 1.0}, {"id": 17, "seek": 48967, "start": 467.45, "end": 489.67, "text": "الـ 1X rare والـ V مافيش قبلتهش وهذه صورة معبرة على أنها مواقع الجينات على كل الكروبوتروم فاشي؟ كيف نشخص الشخص؟ مرض دا دي بنحكي عنه أنه مافيش عنده platelet و مافي عنده نزينة، صح؟", "tokens": [6027, 39184, 502, 55, 5892, 16070, 39184, 691, 19446, 41185, 8592, 12174, 36150, 47395, 8592, 37037, 24192, 20328, 13063, 3660, 20449, 26890, 3660, 15844, 14739, 11296, 3714, 14407, 4587, 3615, 25724, 1829, 8315, 2655, 15844, 28242, 33251, 32887, 3555, 35473, 2288, 20498, 6156, 995, 8592, 1829, 22807, 9122, 33911, 8717, 8592, 9778, 9381, 25124, 9778, 9381, 22807, 3714, 43042, 11778, 995, 11778, 1829, 44945, 5016, 4117, 1829, 18871, 3224, 14739, 3224, 19446, 41185, 8592, 43242, 3224, 3403, 15966, 4032, 19446, 41185, 43242, 3224, 8717, 11622, 9957, 3660, 12399, 20328, 5016, 22807], "avg_logprob": -0.44769022387007007, "compression_ratio": 1.5743589743589743, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 467.45, "end": 467.83, "word": "الـ", "probability": 0.80712890625}, {"start": 467.83, "end": 468.57, "word": " 1X", "probability": 0.64013671875}, {"start": 468.57, "end": 469.57, "word": " rare", "probability": 0.1268310546875}, {"start": 469.57, "end": 470.15, "word": " والـ", "probability": 0.785888671875}, {"start": 470.15, "end": 470.41, "word": " V", "probability": 0.89599609375}, {"start": 470.41, "end": 472.19, "word": " مافيش", "probability": 0.8567708333333334}, {"start": 472.19, "end": 472.79, "word": " قبلتهش", "probability": 0.440093994140625}, {"start": 472.79, "end": 473.67, "word": " وهذه", "probability": 0.545166015625}, {"start": 473.67, "end": 473.95, "word": " صورة", "probability": 0.8308919270833334}, {"start": 473.95, "end": 474.39, "word": " معبرة", "probability": 0.6890462239583334}, {"start": 474.39, "end": 474.51, "word": " على", "probability": 0.2032470703125}, {"start": 474.51, "end": 474.75, "word": " أنها", "probability": 0.540771484375}, {"start": 474.75, "end": 475.43, "word": " مواقع", "probability": 0.9398193359375}, {"start": 475.43, "end": 476.35, "word": " الجينات", "probability": 0.926513671875}, {"start": 476.35, "end": 477.29, "word": " على", "probability": 0.92236328125}, {"start": 477.29, "end": 477.73, "word": " كل", "probability": 0.96630859375}, {"start": 477.73, "end": 479.29, "word": " الكروبوتروم", "probability": 0.4921468098958333}, {"start": 479.29, "end": 481.21, "word": " فاشي؟", "probability": 0.57149658203125}, {"start": 481.21, "end": 482.41, "word": " كيف", "probability": 0.963134765625}, {"start": 482.41, "end": 482.97, "word": " نشخص", "probability": 0.82135009765625}, {"start": 482.97, "end": 484.13, "word": " الشخص؟", "probability": 0.537841796875}, {"start": 484.13, "end": 485.01, "word": " مرض", "probability": 0.52099609375}, {"start": 485.01, "end": 485.35, "word": " دا", "probability": 0.47998046875}, {"start": 485.35, "end": 485.45, "word": " دي", "probability": 0.931396484375}, {"start": 485.45, "end": 485.87, "word": " بنحكي", "probability": 0.8345947265625}, {"start": 485.87, "end": 486.23, "word": " عنه", "probability": 0.98046875}, {"start": 486.23, "end": 486.77, "word": " أنه", "probability": 0.683837890625}, {"start": 486.77, "end": 487.01, "word": " مافيش", "probability": 0.68505859375}, {"start": 487.01, "end": 487.31, "word": " عنده", "probability": 0.9580078125}, {"start": 487.31, "end": 487.75, "word": " platelet", "probability": 0.34185791015625}, {"start": 487.75, "end": 488.07, "word": " و", "probability": 0.90234375}, {"start": 488.07, "end": 488.15, "word": " مافي", "probability": 0.6229248046875}, {"start": 488.15, "end": 488.37, "word": " عنده", "probability": 0.81103515625}, {"start": 488.37, "end": 489.25, "word": " نزينة،", "probability": 0.702099609375}, {"start": 489.25, "end": 489.67, "word": " صح؟", "probability": 0.9640299479166666}], "temperature": 1.0}, {"id": 18, "seek": 51420, "start": 490.5, "end": 514.2, "text": "طبعا من الفحصات اللى ممكن نشخصها اللى هو ال bleeding time صح it's not sensitive او it's not specific زى ما بيقولوها لكن ال it's one of the لغاية الان used test فى خلال فى ال plate اللى تبقى فى خلال خلاج فى ال bleeding time طبعا ال bleeding time يكون فى prolonged", "tokens": [9566, 3555, 3615, 995, 9154, 27188, 5016, 9381, 9307, 13672, 7578, 3714, 43020, 8717, 8592, 9778, 9381, 11296, 13672, 7578, 31439, 2423, 19312, 565, 20328, 5016, 309, 311, 406, 9477, 1975, 2407, 309, 311, 406, 2685, 30767, 7578, 19446, 4724, 1829, 39648, 2407, 11296, 44381, 2423, 309, 311, 472, 295, 264, 5296, 17082, 995, 10632, 2423, 7649, 1143, 1500, 6156, 7578, 16490, 1211, 6027, 6156, 7578, 2423, 5924, 13672, 7578, 6055, 3555, 4587, 7578, 6156, 7578, 16490, 1211, 6027, 16490, 1211, 26108, 6156, 7578, 2423, 19312, 565, 23032, 3555, 3615, 995, 2423, 19312, 565, 7251, 30544, 6156, 7578, 41237], "avg_logprob": -0.19281249806284906, "compression_ratio": 1.7417840375586855, "no_speech_prob": 0.0, "words": [{"start": 490.5, "end": 490.84, "word": "طبعا", "probability": 0.9140625}, {"start": 490.84, "end": 490.98, "word": " من", "probability": 0.505859375}, {"start": 490.98, "end": 491.48, "word": " الفحصات", "probability": 0.9691162109375}, {"start": 491.48, "end": 491.62, "word": " اللى", "probability": 0.880615234375}, {"start": 491.62, "end": 491.98, "word": " ممكن", "probability": 0.986328125}, {"start": 491.98, "end": 493.08, "word": " نشخصها", "probability": 0.8853515625}, {"start": 493.08, "end": 494.04, "word": " اللى", "probability": 0.965576171875}, {"start": 494.04, "end": 494.26, "word": " هو", "probability": 0.72314453125}, {"start": 494.26, "end": 494.4, "word": " ال", "probability": 0.6123046875}, {"start": 494.4, "end": 494.64, "word": " bleeding", "probability": 0.83642578125}, {"start": 494.64, "end": 495.06, "word": " time", "probability": 0.84619140625}, {"start": 495.06, "end": 495.4, "word": " صح", "probability": 0.957763671875}, {"start": 495.4, "end": 496.3, "word": " it's", "probability": 0.60302734375}, {"start": 496.3, "end": 496.54, "word": " not", "probability": 0.96484375}, {"start": 496.54, "end": 497.06, "word": " sensitive", "probability": 0.83544921875}, {"start": 497.06, "end": 498.2, "word": " او", "probability": 0.7320556640625}, {"start": 498.2, "end": 498.36, "word": " it's", "probability": 0.941650390625}, {"start": 498.36, "end": 498.52, "word": " not", "probability": 0.96337890625}, {"start": 498.52, "end": 499.18, "word": " specific", "probability": 0.970703125}, {"start": 499.18, "end": 499.44, "word": " زى", "probability": 0.9033203125}, {"start": 499.44, "end": 499.54, "word": " ما", "probability": 0.90478515625}, {"start": 499.54, "end": 500.08, "word": " بيقولوها", "probability": 0.847119140625}, {"start": 500.08, "end": 500.86, "word": " لكن", "probability": 0.84765625}, {"start": 500.86, "end": 502.3, "word": " ال", "probability": 0.482421875}, {"start": 502.3, "end": 502.62, "word": " it's", "probability": 0.95556640625}, {"start": 502.62, "end": 502.86, "word": " one", "probability": 0.96533203125}, {"start": 502.86, "end": 503.02, "word": " of", "probability": 0.97998046875}, {"start": 503.02, "end": 503.74, "word": " the", "probability": 0.86572265625}, {"start": 503.74, "end": 504.5, "word": " لغاية", "probability": 0.976318359375}, {"start": 504.5, "end": 504.78, "word": " الان", "probability": 0.58880615234375}, {"start": 504.78, "end": 505.14, "word": " used", "probability": 0.890625}, {"start": 505.14, "end": 505.68, "word": " test", "probability": 0.65185546875}, {"start": 505.68, "end": 507.26, "word": " فى", "probability": 0.6827392578125}, {"start": 507.26, "end": 507.76, "word": " خلال", "probability": 0.8614908854166666}, {"start": 507.76, "end": 508.94, "word": " فى", "probability": 0.904541015625}, {"start": 508.94, "end": 509.0, "word": " ال", "probability": 0.8798828125}, {"start": 509.0, "end": 509.3, "word": " plate", "probability": 0.89892578125}, {"start": 509.3, "end": 509.48, "word": " اللى", "probability": 0.8896484375}, {"start": 509.48, "end": 509.76, "word": " تبقى", "probability": 0.69036865234375}, {"start": 509.76, "end": 509.9, "word": " فى", "probability": 0.964111328125}, {"start": 509.9, "end": 510.24, "word": " خلال", "probability": 0.9871419270833334}, {"start": 510.24, "end": 510.8, "word": " خلاج", "probability": 0.59814453125}, {"start": 510.8, "end": 511.36, "word": " فى", "probability": 0.87548828125}, {"start": 511.36, "end": 511.46, "word": " ال", "probability": 0.9287109375}, {"start": 511.46, "end": 511.74, "word": " bleeding", "probability": 0.96533203125}, {"start": 511.74, "end": 512.14, "word": " time", "probability": 0.91455078125}, {"start": 512.14, "end": 512.66, "word": " طبعا", "probability": 0.9888916015625}, {"start": 512.66, "end": 512.72, "word": " ال", "probability": 0.97265625}, {"start": 512.72, "end": 512.9, "word": " bleeding", "probability": 0.9775390625}, {"start": 512.9, "end": 513.24, "word": " time", "probability": 0.908203125}, {"start": 513.24, "end": 513.52, "word": " يكون", "probability": 0.766357421875}, {"start": 513.52, "end": 513.7, "word": " فى", "probability": 0.78125}, {"start": 513.7, "end": 514.2, "word": " prolonged", "probability": 0.51513671875}], "temperature": 1.0}, {"id": 19, "seek": 54913, "start": 522.17, "end": 549.13, "text": "ليه؟ لأنه نتطلع عليها بالناس ال diameter تبعها اكتر من تلتة و نص micro اكتر من ايش؟ تلتة و نص micro يا شباب هو normal level لإنه ال platelet بتعرف بإن ال diameter تبعها تلت ال normal RBC's ماشي من تمانية لعشر يعني جديش التلتة تلت العشر", "tokens": [20292, 3224, 22807, 5296, 33456, 3224, 8717, 2655, 9566, 1211, 3615, 25894, 11296, 20666, 8315, 3794, 2423, 14196, 6055, 3555, 3615, 11296, 1975, 4117, 2655, 2288, 9154, 6055, 1211, 2655, 3660, 4032, 8717, 9381, 4532, 1975, 4117, 2655, 2288, 9154, 1975, 1829, 8592, 22807, 6055, 1211, 2655, 3660, 4032, 8717, 9381, 4532, 35186, 13412, 3555, 16758, 31439, 2710, 1496, 5296, 28814, 1863, 3224, 2423, 3403, 15966, 39894, 3615, 28480, 4724, 28814, 1863, 2423, 14196, 6055, 3555, 3615, 11296, 6055, 1211, 2655, 2423, 2710, 497, 7869, 311, 3714, 33599, 1829, 9154, 46811, 7649, 10632, 5296, 3615, 46309, 37495, 22653, 10874, 16254, 8592, 16712, 1211, 2655, 3660, 6055, 1211, 2655, 18863, 46309], "avg_logprob": -0.22212837327707996, "compression_ratio": 1.7523809523809524, "no_speech_prob": 0.0, "words": [{"start": 522.17, "end": 523.05, "word": "ليه؟", "probability": 0.79541015625}, {"start": 523.05, "end": 523.93, "word": " لأنه", "probability": 0.6438802083333334}, {"start": 523.93, "end": 524.37, "word": " نتطلع", "probability": 0.65166015625}, {"start": 524.37, "end": 524.67, "word": " عليها", "probability": 0.951904296875}, {"start": 524.67, "end": 525.01, "word": " بالناس", "probability": 0.610107421875}, {"start": 525.01, "end": 525.15, "word": " ال", "probability": 0.7373046875}, {"start": 525.15, "end": 525.45, "word": " diameter", "probability": 0.7265625}, {"start": 525.45, "end": 525.93, "word": " تبعها", "probability": 0.901611328125}, {"start": 525.93, "end": 526.17, "word": " اكتر", "probability": 0.85009765625}, {"start": 526.17, "end": 526.27, "word": " من", "probability": 0.9755859375}, {"start": 526.27, "end": 526.89, "word": " تلتة", "probability": 0.71142578125}, {"start": 526.89, "end": 526.97, "word": " و", "probability": 0.99560546875}, {"start": 526.97, "end": 527.15, "word": " نص", "probability": 0.932373046875}, {"start": 527.15, "end": 527.57, "word": " micro", "probability": 0.5771484375}, {"start": 527.57, "end": 527.99, "word": " اكتر", "probability": 0.924072265625}, {"start": 527.99, "end": 528.15, "word": " من", "probability": 0.97900390625}, {"start": 528.15, "end": 529.29, "word": " ايش؟", "probability": 0.8392333984375}, {"start": 529.29, "end": 529.61, "word": " تلتة", "probability": 0.966796875}, {"start": 529.61, "end": 529.71, "word": " و", "probability": 0.99853515625}, {"start": 529.71, "end": 529.85, "word": " نص", "probability": 0.9921875}, {"start": 529.85, "end": 530.19, "word": " micro", "probability": 0.92822265625}, {"start": 530.19, "end": 530.35, "word": " يا", "probability": 0.5888671875}, {"start": 530.35, "end": 530.61, "word": " شباب", "probability": 0.9866536458333334}, {"start": 530.61, "end": 530.87, "word": " هو", "probability": 0.9755859375}, {"start": 530.87, "end": 531.79, "word": " normal", "probability": 0.6904296875}, {"start": 531.79, "end": 532.45, "word": " level", "probability": 0.98681640625}, {"start": 532.45, "end": 533.63, "word": " لإنه", "probability": 0.76220703125}, {"start": 533.63, "end": 533.73, "word": " ال", "probability": 0.9296875}, {"start": 533.73, "end": 534.17, "word": " platelet", "probability": 0.54510498046875}, {"start": 534.17, "end": 535.23, "word": " بتعرف", "probability": 0.8839518229166666}, {"start": 535.23, "end": 536.23, "word": " بإن", "probability": 0.8899739583333334}, {"start": 536.23, "end": 536.37, "word": " ال", "probability": 0.73876953125}, {"start": 536.37, "end": 536.81, "word": " diameter", "probability": 0.9814453125}, {"start": 536.81, "end": 537.63, "word": " تبعها", "probability": 0.979248046875}, {"start": 537.63, "end": 539.35, "word": " تلت", "probability": 0.9361979166666666}, {"start": 539.35, "end": 540.01, "word": " ال", "probability": 0.97412109375}, {"start": 540.01, "end": 540.79, "word": " normal", "probability": 0.8935546875}, {"start": 540.79, "end": 542.51, "word": " RBC's", "probability": 0.6363932291666666}, {"start": 542.51, "end": 543.03, "word": " ماشي", "probability": 0.8313802083333334}, {"start": 543.03, "end": 543.45, "word": " من", "probability": 0.51220703125}, {"start": 543.45, "end": 543.75, "word": " تمانية", "probability": 0.8670247395833334}, {"start": 543.75, "end": 544.21, "word": " لعشر", "probability": 0.6600748697916666}, {"start": 544.21, "end": 546.09, "word": " يعني", "probability": 0.93798828125}, {"start": 546.09, "end": 546.61, "word": " جديش", "probability": 0.789306640625}, {"start": 546.61, "end": 547.61, "word": " التلتة", "probability": 0.9173583984375}, {"start": 547.61, "end": 548.77, "word": " تلت", "probability": 0.9072265625}, {"start": 548.77, "end": 549.13, "word": " العشر", "probability": 0.92236328125}], "temperature": 1.0}, {"id": 20, "seek": 57934, "start": 552.16, "end": 579.34, "text": "فهي تلت يبقى اكتر من تلت ال normal herbicide له 3.5 ميكروبيل ودى برضه صورة معبرة ده زى ما انتوا شايفين هذه عبارة عن platelet كبرت حد تانى يا شباب كيف نشخص يبقى نمرة واحد شخصنا ب bleeding time نمرة اتنين عملنا blood film وشوفنا فيه giant platelet", "tokens": [5172, 3224, 1829, 6055, 1211, 2655, 7251, 3555, 4587, 7578, 1975, 4117, 2655, 2288, 9154, 6055, 1211, 2655, 2423, 2710, 22662, 9584, 46740, 805, 13, 20, 3714, 1829, 4117, 32887, 21292, 1211, 4032, 3215, 7578, 4724, 43042, 3224, 20328, 13063, 3660, 20449, 3555, 25720, 11778, 3224, 30767, 7578, 19446, 16472, 2655, 14407, 13412, 995, 33911, 9957, 29538, 6225, 3555, 9640, 3660, 18871, 3403, 15966, 9122, 26890, 2655, 11331, 3215, 6055, 7649, 7578, 35186, 13412, 3555, 16758, 9122, 33911, 8717, 8592, 9778, 9381, 7251, 3555, 4587, 7578, 8717, 2304, 25720, 36764, 24401, 13412, 9778, 9381, 8315, 4724, 19312, 565, 8717, 2304, 25720, 1975, 2655, 1863, 9957, 6225, 42213, 8315, 3390, 2007, 4032, 8592, 38688, 8315, 8978, 3224, 7410, 3403, 15966], "avg_logprob": -0.260677095502615, "compression_ratio": 1.602510460251046, "no_speech_prob": 0.0, "words": [{"start": 552.16, "end": 552.88, "word": "فهي", "probability": 0.549072265625}, {"start": 552.88, "end": 553.24, "word": " تلت", "probability": 0.8562825520833334}, {"start": 553.24, "end": 553.6, "word": " يبقى", "probability": 0.82916259765625}, {"start": 553.6, "end": 554.1, "word": " اكتر", "probability": 0.8848876953125}, {"start": 554.1, "end": 554.34, "word": " من", "probability": 0.9814453125}, {"start": 554.34, "end": 554.8, "word": " تلت", "probability": 0.974609375}, {"start": 554.8, "end": 554.98, "word": " ال", "probability": 0.44677734375}, {"start": 554.98, "end": 555.36, "word": " normal", "probability": 0.53466796875}, {"start": 555.36, "end": 556.06, "word": " herbicide", "probability": 0.439697265625}, {"start": 556.06, "end": 557.84, "word": " له", "probability": 0.2308349609375}, {"start": 557.84, "end": 558.12, "word": " 3", "probability": 0.363525390625}, {"start": 558.12, "end": 558.76, "word": ".5", "probability": 0.931884765625}, {"start": 558.76, "end": 559.26, "word": " ميكروبيل", "probability": 0.6326904296875}, {"start": 559.26, "end": 559.7, "word": " ودى", "probability": 0.5364583333333334}, {"start": 559.7, "end": 559.98, "word": " برضه", "probability": 0.9622395833333334}, {"start": 559.98, "end": 560.24, "word": " صورة", "probability": 0.9591471354166666}, {"start": 560.24, "end": 560.58, "word": " معبرة", "probability": 0.8248697916666666}, {"start": 560.58, "end": 560.8, "word": " ده", "probability": 0.89208984375}, {"start": 560.8, "end": 561.0, "word": " زى", "probability": 0.692138671875}, {"start": 561.0, "end": 561.12, "word": " ما", "probability": 0.8994140625}, {"start": 561.12, "end": 561.28, "word": " انتوا", "probability": 0.8284505208333334}, {"start": 561.28, "end": 561.68, "word": " شايفين", "probability": 0.9659423828125}, {"start": 561.68, "end": 562.52, "word": " هذه", "probability": 0.225341796875}, {"start": 562.52, "end": 562.82, "word": " عبارة", "probability": 0.9273681640625}, {"start": 562.82, "end": 562.96, "word": " عن", "probability": 0.9853515625}, {"start": 562.96, "end": 563.5, "word": " platelet", "probability": 0.49066162109375}, {"start": 563.5, "end": 567.42, "word": " كبرت", "probability": 0.4947916666666667}, {"start": 567.42, "end": 571.06, "word": " حد", "probability": 0.658935546875}, {"start": 571.06, "end": 571.34, "word": " تانى", "probability": 0.953125}, {"start": 571.34, "end": 571.46, "word": " يا", "probability": 0.88818359375}, {"start": 571.46, "end": 571.7, "word": " شباب", "probability": 0.9718424479166666}, {"start": 571.7, "end": 572.12, "word": " كيف", "probability": 0.948486328125}, {"start": 572.12, "end": 572.7, "word": " نشخص", "probability": 0.94873046875}, {"start": 572.7, "end": 572.94, "word": " يبقى", "probability": 0.8912353515625}, {"start": 572.94, "end": 573.14, "word": " نمرة", "probability": 0.6927083333333334}, {"start": 573.14, "end": 573.5, "word": " واحد", "probability": 0.972900390625}, {"start": 573.5, "end": 574.32, "word": " شخصنا", "probability": 0.978271484375}, {"start": 574.32, "end": 574.46, "word": " ب", "probability": 0.84326171875}, {"start": 574.46, "end": 574.72, "word": " bleeding", "probability": 0.939453125}, {"start": 574.72, "end": 575.14, "word": " time", "probability": 0.84375}, {"start": 575.14, "end": 575.88, "word": " نمرة", "probability": 0.8282877604166666}, {"start": 575.88, "end": 576.24, "word": " اتنين", "probability": 0.9390869140625}, {"start": 576.24, "end": 577.16, "word": " عملنا", "probability": 0.9386393229166666}, {"start": 577.16, "end": 577.44, "word": " blood", "probability": 0.9619140625}, {"start": 577.44, "end": 577.8, "word": " film", "probability": 0.9716796875}, {"start": 577.8, "end": 578.28, "word": " وشوفنا", "probability": 0.9486083984375}, {"start": 578.28, "end": 578.52, "word": " فيه", "probability": 0.828857421875}, {"start": 578.52, "end": 578.72, "word": " giant", "probability": 0.5751953125}, {"start": 578.72, "end": 579.34, "word": " platelet", "probability": 0.79150390625}], "temperature": 1.0}, {"id": 21, "seek": 60694, "start": 579.76, "end": 606.94, "text": "لمرة تلاتة في حاجة بيسموها aggregation tests aggregation study وهي كالآتي تنتبهوا عليها عشان أشرحكوا إياها بالمجمل و بعدين نمشي على اللي هي ال flow cytometry أو التكنيكا العربي ال aggregation study إنك تدرس ال function of a platelet", "tokens": [19528, 25720, 6055, 1211, 9307, 3660, 8978, 11331, 26108, 3660, 4724, 1829, 38251, 2407, 11296, 16743, 399, 6921, 16743, 399, 2979, 37037, 1829, 9122, 6027, 148, 95, 31371, 6055, 29399, 3555, 3224, 14407, 25894, 11296, 6225, 8592, 7649, 5551, 46309, 5016, 4117, 14407, 11933, 25528, 11296, 20666, 2304, 7435, 42213, 4032, 39182, 9957, 8717, 2304, 8592, 1829, 15844, 13672, 1829, 39896, 2423, 3095, 40248, 34730, 34051, 16712, 19452, 1829, 4117, 995, 18863, 2288, 21292, 2423, 16743, 399, 2979, 36145, 4117, 6055, 3215, 2288, 3794, 2423, 2445, 295, 257, 3403, 15966], "avg_logprob": -0.2733516431116796, "compression_ratio": 1.5871559633027523, "no_speech_prob": 0.0, "words": [{"start": 579.76, "end": 580.2, "word": "لمرة", "probability": 0.514068603515625}, {"start": 580.2, "end": 580.88, "word": " تلاتة", "probability": 0.883544921875}, {"start": 580.88, "end": 581.54, "word": " في", "probability": 0.62744140625}, {"start": 581.54, "end": 582.1, "word": " حاجة", "probability": 0.9475911458333334}, {"start": 582.1, "end": 582.54, "word": " بيسموها", "probability": 0.88984375}, {"start": 582.54, "end": 583.1, "word": " aggregation", "probability": 0.798095703125}, {"start": 583.1, "end": 583.6, "word": " tests", "probability": 0.50341796875}, {"start": 583.6, "end": 585.2, "word": " aggregation", "probability": 0.84521484375}, {"start": 585.2, "end": 585.88, "word": " study", "probability": 0.95849609375}, {"start": 585.88, "end": 587.24, "word": " وهي", "probability": 0.722412109375}, {"start": 587.24, "end": 587.96, "word": " كالآتي", "probability": 0.739501953125}, {"start": 587.96, "end": 589.12, "word": " تنتبهوا", "probability": 0.75908203125}, {"start": 589.12, "end": 589.68, "word": " عليها", "probability": 0.807861328125}, {"start": 589.68, "end": 590.14, "word": " عشان", "probability": 0.8997395833333334}, {"start": 590.14, "end": 590.62, "word": " أشرحكوا", "probability": 0.6693115234375}, {"start": 590.62, "end": 590.86, "word": " إياها", "probability": 0.6857096354166666}, {"start": 590.86, "end": 591.52, "word": " بالمجمل", "probability": 0.9107666015625}, {"start": 591.52, "end": 591.64, "word": " و", "probability": 0.607421875}, {"start": 591.64, "end": 591.9, "word": " بعدين", "probability": 0.892822265625}, {"start": 591.9, "end": 592.22, "word": " نمشي", "probability": 0.900146484375}, {"start": 592.22, "end": 592.5, "word": " على", "probability": 0.463134765625}, {"start": 592.5, "end": 593.32, "word": " اللي", "probability": 0.88818359375}, {"start": 593.32, "end": 593.56, "word": " هي", "probability": 0.76953125}, {"start": 593.56, "end": 594.44, "word": " ال", "probability": 0.76708984375}, {"start": 594.44, "end": 594.86, "word": " flow", "probability": 0.9169921875}, {"start": 594.86, "end": 595.7, "word": " cytometry", "probability": 0.971435546875}, {"start": 595.7, "end": 595.98, "word": " أو", "probability": 0.65283203125}, {"start": 595.98, "end": 596.78, "word": " التكنيكا", "probability": 0.7498046875}, {"start": 596.78, "end": 597.34, "word": " العربي", "probability": 0.6922200520833334}, {"start": 597.34, "end": 598.8, "word": " ال", "probability": 0.7939453125}, {"start": 598.8, "end": 599.26, "word": " aggregation", "probability": 0.962890625}, {"start": 599.26, "end": 599.72, "word": " study", "probability": 0.974609375}, {"start": 599.72, "end": 601.8, "word": " إنك", "probability": 0.773193359375}, {"start": 601.8, "end": 602.32, "word": " تدرس", "probability": 0.9927978515625}, {"start": 602.32, "end": 603.96, "word": " ال", "probability": 0.97802734375}, {"start": 603.96, "end": 604.48, "word": " function", "probability": 0.93994140625}, {"start": 604.48, "end": 605.78, "word": " of", "probability": 0.97314453125}, {"start": 605.78, "end": 606.48, "word": " a", "probability": 0.30322265625}, {"start": 606.48, "end": 606.94, "word": " platelet", "probability": 0.68115234375}], "temperature": 1.0}, {"id": 22, "seek": 63369, "start": 611.57, "end": 633.69, "text": "ماشي و ببساطة بتتم من خلال إضافة certain agonist على البلد كلنا عرفنا ال agonist سابقا أن هو المادة اللتي ترتبط من البلد اللي بتنشطها، صح؟ بترتبط من البلد اللي بتنشطها، يومك نشطها بتعمل إيه؟", "tokens": [2304, 33599, 1829, 4032, 4724, 3555, 3794, 41193, 3660, 39894, 39237, 9154, 16490, 1211, 6027, 11933, 11242, 31845, 3660, 1629, 623, 266, 468, 15844, 29739, 1211, 3215, 28242, 8315, 6225, 28480, 8315, 2423, 623, 266, 468, 8608, 16758, 4587, 995, 14739, 31439, 9673, 18513, 3660, 13672, 31371, 6055, 43500, 3555, 9566, 9154, 29739, 1211, 3215, 13672, 1829, 39894, 1863, 8592, 9566, 11296, 12399, 20328, 5016, 22807, 39894, 43500, 3555, 9566, 9154, 29739, 1211, 3215, 13672, 1829, 39894, 1863, 8592, 9566, 11296, 12399, 7251, 20498, 4117, 8717, 8592, 9566, 11296, 39894, 25957, 1211, 11933, 1829, 3224, 22807], "avg_logprob": -0.21278995336945525, "compression_ratio": 1.8055555555555556, "no_speech_prob": 6.556510925292969e-07, "words": [{"start": 611.57, "end": 612.49, "word": "ماشي", "probability": 0.7672526041666666}, {"start": 612.49, "end": 613.41, "word": " و", "probability": 0.3759765625}, {"start": 613.41, "end": 614.43, "word": " ببساطة", "probability": 0.90517578125}, {"start": 614.43, "end": 615.11, "word": " بتتم", "probability": 0.770263671875}, {"start": 615.11, "end": 615.33, "word": " من", "probability": 0.994140625}, {"start": 615.33, "end": 616.37, "word": " خلال", "probability": 0.9943033854166666}, {"start": 616.37, "end": 617.49, "word": " إضافة", "probability": 0.9798583984375}, {"start": 617.49, "end": 618.11, "word": " certain", "probability": 0.791015625}, {"start": 618.11, "end": 618.97, "word": " agonist", "probability": 0.9480794270833334}, {"start": 618.97, "end": 620.09, "word": " على", "probability": 0.83203125}, {"start": 620.09, "end": 620.49, "word": " البلد", "probability": 0.4890950520833333}, {"start": 620.49, "end": 622.35, "word": " كلنا", "probability": 0.5919189453125}, {"start": 622.35, "end": 622.83, "word": " عرفنا", "probability": 0.9593098958333334}, {"start": 622.83, "end": 622.95, "word": " ال", "probability": 0.92333984375}, {"start": 622.95, "end": 623.41, "word": " agonist", "probability": 0.9212239583333334}, {"start": 623.41, "end": 624.11, "word": " سابقا", "probability": 0.9886474609375}, {"start": 624.11, "end": 624.41, "word": " أن", "probability": 0.479736328125}, {"start": 624.41, "end": 624.71, "word": " هو", "probability": 0.71240234375}, {"start": 624.71, "end": 625.65, "word": " المادة", "probability": 0.9723307291666666}, {"start": 625.65, "end": 625.97, "word": " اللتي", "probability": 0.5074462890625}, {"start": 625.97, "end": 626.55, "word": " ترتبط", "probability": 0.9130859375}, {"start": 626.55, "end": 626.73, "word": " من", "probability": 0.63037109375}, {"start": 626.73, "end": 627.05, "word": " البلد", "probability": 0.9501953125}, {"start": 627.05, "end": 627.23, "word": " اللي", "probability": 0.915283203125}, {"start": 627.23, "end": 628.47, "word": " بتنشطها،", "probability": 0.7199503580729166}, {"start": 628.47, "end": 629.81, "word": " صح؟", "probability": 0.6881103515625}, {"start": 629.81, "end": 630.55, "word": " بترتبط", "probability": 0.857177734375}, {"start": 630.55, "end": 630.71, "word": " من", "probability": 0.9765625}, {"start": 630.71, "end": 631.11, "word": " البلد", "probability": 0.9783528645833334}, {"start": 631.11, "end": 631.27, "word": " اللي", "probability": 0.9794921875}, {"start": 631.27, "end": 632.31, "word": " بتنشطها،", "probability": 0.9273274739583334}, {"start": 632.31, "end": 632.67, "word": " يومك", "probability": 0.82373046875}, {"start": 632.67, "end": 633.05, "word": " نشطها", "probability": 0.9378662109375}, {"start": 633.05, "end": 633.47, "word": " بتعمل", "probability": 0.9173177083333334}, {"start": 633.47, "end": 633.69, "word": " إيه؟", "probability": 0.9263916015625}], "temperature": 1.0}, {"id": 23, "seek": 66152, "start": 635.36, "end": 661.52, "text": "ماشي؟ كيف نقيس هذه الصفة؟ كيف نقيس هذه الصفة؟ ماشي؟ قالوا it .. it's usually measured by photometric procedures اتقيس ماشي؟ either absorbance or transmittance بالفوتومتري يبقى اتقيس absorbance او ايش او transmittance", "tokens": [2304, 33599, 1829, 22807, 9122, 33911, 8717, 38436, 3794, 29538, 31767, 5172, 3660, 22807, 9122, 33911, 8717, 38436, 3794, 29538, 31767, 5172, 3660, 22807, 3714, 33599, 1829, 22807, 50239, 14407, 309, 4386, 309, 311, 2673, 12690, 538, 2409, 29470, 13846, 1975, 2655, 38436, 3794, 3714, 33599, 1829, 22807, 2139, 15631, 719, 420, 7715, 593, 719, 20666, 5172, 35473, 20498, 2655, 16572, 7251, 3555, 4587, 7578, 1975, 2655, 38436, 3794, 15631, 719, 1975, 2407, 1975, 1829, 8592, 1975, 2407, 7715, 593, 719], "avg_logprob": -0.2416158494789426, "compression_ratio": 1.675977653631285, "no_speech_prob": 0.0, "words": [{"start": 635.36, "end": 636.02, "word": "ماشي؟", "probability": 0.56805419921875}, {"start": 636.02, "end": 636.86, "word": " كيف", "probability": 0.9462890625}, {"start": 636.86, "end": 637.5, "word": " نقيس", "probability": 0.90478515625}, {"start": 637.5, "end": 637.9, "word": " هذه", "probability": 0.986328125}, {"start": 637.9, "end": 639.12, "word": " الصفة؟", "probability": 0.9058837890625}, {"start": 639.12, "end": 639.48, "word": " كيف", "probability": 0.694091796875}, {"start": 639.48, "end": 640.44, "word": " نقيس", "probability": 0.9879557291666666}, {"start": 640.44, "end": 640.88, "word": " هذه", "probability": 0.98388671875}, {"start": 640.88, "end": 641.86, "word": " الصفة؟", "probability": 0.9864501953125}, {"start": 641.86, "end": 642.62, "word": " ماشي؟", "probability": 0.8179931640625}, {"start": 642.62, "end": 642.94, "word": " قالوا", "probability": 0.88232421875}, {"start": 642.94, "end": 643.2, "word": " it", "probability": 0.8056640625}, {"start": 643.2, "end": 643.9, "word": " ..", "probability": 0.435791015625}, {"start": 643.9, "end": 644.26, "word": " it's", "probability": 0.712158203125}, {"start": 644.26, "end": 644.74, "word": " usually", "probability": 0.912109375}, {"start": 644.74, "end": 645.28, "word": " measured", "probability": 0.83203125}, {"start": 645.28, "end": 646.62, "word": " by", "probability": 0.927734375}, {"start": 646.62, "end": 647.46, "word": " photometric", "probability": 0.94775390625}, {"start": 647.46, "end": 649.58, "word": " procedures", "probability": 0.8134765625}, {"start": 649.58, "end": 650.96, "word": " اتقيس", "probability": 0.68145751953125}, {"start": 650.96, "end": 654.64, "word": " ماشي؟", "probability": 0.8270263671875}, {"start": 654.64, "end": 654.9, "word": " either", "probability": 0.9521484375}, {"start": 654.9, "end": 655.58, "word": " absorbance", "probability": 0.79296875}, {"start": 655.58, "end": 655.74, "word": " or", "probability": 0.888671875}, {"start": 655.74, "end": 656.94, "word": " transmittance", "probability": 0.8751627604166666}, {"start": 656.94, "end": 657.94, "word": " بالفوتومتري", "probability": 0.7609049479166666}, {"start": 657.94, "end": 658.54, "word": " يبقى", "probability": 0.65411376953125}, {"start": 658.54, "end": 659.02, "word": " اتقيس", "probability": 0.9378662109375}, {"start": 659.02, "end": 659.88, "word": " absorbance", "probability": 0.841064453125}, {"start": 659.88, "end": 660.1, "word": " او", "probability": 0.7266845703125}, {"start": 660.1, "end": 660.4, "word": " ايش", "probability": 0.6861979166666666}, {"start": 660.4, "end": 660.88, "word": " او", "probability": 0.895263671875}, {"start": 660.88, "end": 661.52, "word": " transmittance", "probability": 0.9488932291666666}], "temperature": 1.0}, {"id": 24, "seek": 68908, "start": 665.92, "end": 689.08, "text": "it's not a spectrophotometer فوتوميتر بنقيس من خلاله إما absorbance وإما transmittance في حالة ال aggregation study احنا بنقيس ال transmittance و العملية ببساطة عشان بقى انتبهوا عليها بتم كالاتي بنجيب platelet", "tokens": [270, 311, 406, 257, 6177, 11741, 310, 13606, 6156, 35473, 20498, 36081, 2288, 44945, 38436, 3794, 9154, 16490, 1211, 6027, 3224, 11933, 15042, 15631, 719, 4032, 28814, 15042, 7715, 593, 719, 8978, 11331, 6027, 3660, 2423, 16743, 399, 2979, 1975, 5016, 8315, 44945, 38436, 3794, 2423, 7715, 593, 719, 4032, 18863, 42213, 10632, 4724, 3555, 3794, 41193, 3660, 6225, 8592, 7649, 4724, 4587, 7578, 16472, 2655, 3555, 3224, 14407, 25894, 11296, 39894, 2304, 9122, 6027, 9307, 1829, 44945, 7435, 1829, 3555, 5924, 2631], "avg_logprob": -0.30245534472522284, "compression_ratio": 1.444976076555024, "no_speech_prob": 0.0, "words": [{"start": 665.92, "end": 666.24, "word": "it's", "probability": 0.432861328125}, {"start": 666.24, "end": 666.4, "word": " not", "probability": 0.9384765625}, {"start": 666.4, "end": 666.54, "word": " a", "probability": 0.46240234375}, {"start": 666.54, "end": 667.36, "word": " spectrophotometer", "probability": 0.89794921875}, {"start": 667.36, "end": 668.74, "word": " فوتوميتر", "probability": 0.725390625}, {"start": 668.74, "end": 669.62, "word": " بنقيس", "probability": 0.7195638020833334}, {"start": 669.62, "end": 669.84, "word": " من", "probability": 0.99462890625}, {"start": 669.84, "end": 670.7, "word": " خلاله", "probability": 0.991943359375}, {"start": 670.7, "end": 671.44, "word": " إما", "probability": 0.7119140625}, {"start": 671.44, "end": 672.82, "word": " absorbance", "probability": 0.7353515625}, {"start": 672.82, "end": 673.26, "word": " وإما", "probability": 0.8689778645833334}, {"start": 673.26, "end": 675.04, "word": " transmittance", "probability": 0.7814127604166666}, {"start": 675.04, "end": 675.9, "word": " في", "probability": 0.7666015625}, {"start": 675.9, "end": 676.44, "word": " حالة", "probability": 0.9962565104166666}, {"start": 676.44, "end": 676.58, "word": " ال", "probability": 0.94970703125}, {"start": 676.58, "end": 677.34, "word": " aggregation", "probability": 0.822998046875}, {"start": 677.34, "end": 677.86, "word": " study", "probability": 0.97314453125}, {"start": 677.86, "end": 678.64, "word": " احنا", "probability": 0.78955078125}, {"start": 678.64, "end": 679.08, "word": " بنقيس", "probability": 0.9500325520833334}, {"start": 679.08, "end": 679.2, "word": " ال", "probability": 0.65283203125}, {"start": 679.2, "end": 680.14, "word": " transmittance", "probability": 0.958984375}, {"start": 680.14, "end": 681.42, "word": " و", "probability": 0.499755859375}, {"start": 681.42, "end": 681.98, "word": " العملية", "probability": 0.9451497395833334}, {"start": 681.98, "end": 682.62, "word": " ببساطة", "probability": 0.9853515625}, {"start": 682.62, "end": 682.86, "word": " عشان", "probability": 0.732666015625}, {"start": 682.86, "end": 683.6, "word": " بقى", "probability": 0.8121744791666666}, {"start": 683.6, "end": 684.1, "word": " انتبهوا", "probability": 0.770458984375}, {"start": 684.1, "end": 685.0, "word": " عليها", "probability": 0.4339599609375}, {"start": 685.0, "end": 685.6, "word": " بتم", "probability": 0.3477783203125}, {"start": 685.6, "end": 686.64, "word": " كالاتي", "probability": 0.668426513671875}, {"start": 686.64, "end": 687.76, "word": " بنجيب", "probability": 0.8897705078125}, {"start": 687.76, "end": 689.08, "word": " platelet", "probability": 0.5006103515625}], "temperature": 1.0}, {"id": 25, "seek": 71424, "start": 689.48, "end": 714.24, "text": "rich plasma، platelet، rich إيش، plasma، بحطها في test tube، يوم ما نجيبه platelet rich plasma، يعني فيه platelet في البلازمة، بتكون ال platelet معلقة في البلازمة ولا بتكون إيش؟ معلقة، بتكون transmit، إبت، شو بتعكس هذا على البلازمة؟ إن البلازمة بتكون turbulent، بتكون إيش؟ تربط، يعني لو قصنا transmittance", "tokens": [1341, 71, 22564, 12399, 3403, 15966, 12399, 4593, 11933, 1829, 8592, 12399, 22564, 12399, 4724, 5016, 9566, 11296, 8978, 1500, 9917, 12399, 7251, 20498, 19446, 8717, 7435, 1829, 3555, 3224, 3403, 15966, 4593, 22564, 12399, 37495, 22653, 8978, 3224, 3403, 15966, 8978, 29739, 1211, 31377, 46007, 12399, 39894, 30544, 2423, 3403, 15966, 20449, 1211, 28671, 8978, 29739, 1211, 31377, 46007, 49429, 39894, 30544, 11933, 1829, 8592, 22807, 20449, 1211, 28671, 12399, 39894, 30544, 17831, 12399, 11933, 3555, 2655, 12399, 13412, 2407, 39894, 3615, 4117, 3794, 23758, 15844, 29739, 1211, 31377, 46007, 22807, 36145, 29739, 1211, 31377, 46007, 39894, 30544, 41697, 12399, 39894, 30544, 11933, 1829, 8592, 22807, 6055, 25513, 9566, 12399, 37495, 22653, 45164, 12174, 9381, 8315, 7715, 593, 719], "avg_logprob": -0.3354855312788782, "compression_ratio": 2.0765765765765765, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 689.48, "end": 690.22, "word": "rich", "probability": 0.504425048828125}, {"start": 690.22, "end": 691.28, "word": " plasma،", "probability": 0.5211181640625}, {"start": 691.28, "end": 692.14, "word": " platelet،", "probability": 0.5802815755208334}, {"start": 692.14, "end": 692.56, "word": " rich", "probability": 0.513671875}, {"start": 692.56, "end": 693.5, "word": " إيش،", "probability": 0.63330078125}, {"start": 693.5, "end": 694.26, "word": " plasma،", "probability": 0.745361328125}, {"start": 694.26, "end": 695.02, "word": " بحطها", "probability": 0.89111328125}, {"start": 695.02, "end": 695.12, "word": " في", "probability": 0.84716796875}, {"start": 695.12, "end": 695.32, "word": " test", "probability": 0.84130859375}, {"start": 695.32, "end": 696.44, "word": " tube،", "probability": 0.342529296875}, {"start": 696.44, "end": 696.8, "word": " يوم", "probability": 0.970947265625}, {"start": 696.8, "end": 697.02, "word": " ما", "probability": 0.78564453125}, {"start": 697.02, "end": 697.86, "word": " نجيبه", "probability": 0.7255859375}, {"start": 697.86, "end": 698.36, "word": " platelet", "probability": 0.738525390625}, {"start": 698.36, "end": 698.68, "word": " rich", "probability": 0.50146484375}, {"start": 698.68, "end": 699.34, "word": " plasma،", "probability": 0.851806640625}, {"start": 699.34, "end": 699.44, "word": " يعني", "probability": 0.90087890625}, {"start": 699.44, "end": 699.72, "word": " فيه", "probability": 0.71435546875}, {"start": 699.72, "end": 700.12, "word": " platelet", "probability": 0.872802734375}, {"start": 700.12, "end": 700.26, "word": " في", "probability": 0.80615234375}, {"start": 700.26, "end": 700.92, "word": " البلازمة،", "probability": 0.78583984375}, {"start": 700.92, "end": 701.2, "word": " بتكون", "probability": 0.91943359375}, {"start": 701.2, "end": 701.34, "word": " ال", "probability": 0.6689453125}, {"start": 701.34, "end": 701.84, "word": " platelet", "probability": 0.822998046875}, {"start": 701.84, "end": 702.78, "word": " معلقة", "probability": 0.9329427083333334}, {"start": 702.78, "end": 703.22, "word": " في", "probability": 0.96044921875}, {"start": 703.22, "end": 703.7, "word": " البلازمة", "probability": 0.944580078125}, {"start": 703.7, "end": 703.84, "word": " ولا", "probability": 0.349609375}, {"start": 703.84, "end": 704.18, "word": " بتكون", "probability": 0.9755859375}, {"start": 704.18, "end": 704.4, "word": " إيش؟", "probability": 0.7213134765625}, {"start": 704.4, "end": 705.08, "word": " معلقة،", "probability": 0.774169921875}, {"start": 705.08, "end": 705.62, "word": " بتكون", "probability": 0.54217529296875}, {"start": 705.62, "end": 705.84, "word": " transmit،", "probability": 0.347900390625}, {"start": 705.84, "end": 706.46, "word": " إبت،", "probability": 0.664794921875}, {"start": 706.46, "end": 706.64, "word": " شو", "probability": 0.8349609375}, {"start": 706.64, "end": 707.06, "word": " بتعكس", "probability": 0.9268798828125}, {"start": 707.06, "end": 707.32, "word": " هذا", "probability": 0.9013671875}, {"start": 707.32, "end": 707.5, "word": " على", "probability": 0.88330078125}, {"start": 707.5, "end": 708.52, "word": " البلازمة؟", "probability": 0.95166015625}, {"start": 708.52, "end": 708.64, "word": " إن", "probability": 0.51806640625}, {"start": 708.64, "end": 709.08, "word": " البلازمة", "probability": 0.9158935546875}, {"start": 709.08, "end": 709.42, "word": " بتكون", "probability": 0.96728515625}, {"start": 709.42, "end": 710.2, "word": " turbulent،", "probability": 0.53790283203125}, {"start": 710.2, "end": 710.54, "word": " بتكون", "probability": 0.978271484375}, {"start": 710.54, "end": 710.92, "word": " إيش؟", "probability": 0.957275390625}, {"start": 710.92, "end": 712.16, "word": " تربط،", "probability": 0.6292724609375}, {"start": 712.16, "end": 712.48, "word": " يعني", "probability": 0.87255859375}, {"start": 712.48, "end": 712.7, "word": " لو", "probability": 0.984375}, {"start": 712.7, "end": 713.06, "word": " قصنا", "probability": 0.87548828125}, {"start": 713.06, "end": 714.24, "word": " transmittance", "probability": 0.8761393229166666}], "temperature": 1.0}, {"id": 26, "seek": 73452, "start": 715.6, "end": 734.52, "text": "للقنبوبة هذه اللي فيها platelet معلقة في بلازمة ال transmittance بيكون عالي ولا واطي؟ عالي، انت .. انا متخفض، انا في بدأت .. اتعالجت .. اتعالجت، اتعالجت، اتعالجت، اتعالجت، اتعالجت، اتعالجت، اتعالجت، اتعالجت، اتعالجت، اتعالجت، اتعالجت، اتعالجت، اتعالجت، اتعالجت، اتعالجت، اتعالجت، اتعالجت، اتعالجت، اتعالجت، اتعالجت، اتعالجت، اتعالجت، اتعال", "tokens": [1211, 1211, 4587, 1863, 3555, 37746, 3660, 29538, 13672, 1829, 8978, 11296, 5924, 306, 83, 20449, 1211, 28671, 8978, 4724, 1211, 31377, 46007, 2423, 7715, 593, 719, 4724, 1829, 30544, 6225, 6027, 1829, 49429, 4032, 41193, 1829, 22807, 6225, 6027, 1829, 12399, 16472, 2655, 4386, 1975, 8315, 44650, 9778, 5172, 11242, 12399, 1975, 8315, 8978, 47525, 10721, 2655, 4386, 1975, 2655, 3615, 6027, 7435, 2655, 4386, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027, 7435, 2655, 12399, 1975, 2655, 3615, 6027], "avg_logprob": -0.17527778413560655, "compression_ratio": 3.728395061728395, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 715.6, "end": 716.46, "word": "للقنبوبة", "probability": 0.8024553571428571}, {"start": 716.46, "end": 716.84, "word": " هذه", "probability": 0.77978515625}, {"start": 716.84, "end": 717.22, "word": " اللي", "probability": 0.8369140625}, {"start": 717.22, "end": 717.68, "word": " فيها", "probability": 0.980224609375}, {"start": 717.68, "end": 718.46, "word": " platelet", "probability": 0.5489908854166666}, {"start": 718.46, "end": 719.26, "word": " معلقة", "probability": 0.9208984375}, {"start": 719.26, "end": 719.66, "word": " في", "probability": 0.73974609375}, {"start": 719.66, "end": 720.34, "word": " بلازمة", "probability": 0.78369140625}, {"start": 720.34, "end": 720.6, "word": " ال", "probability": 0.496337890625}, {"start": 720.6, "end": 721.84, "word": " transmittance", "probability": 0.7201334635416666}, {"start": 721.84, "end": 722.16, "word": " بيكون", "probability": 0.9384765625}, {"start": 722.16, "end": 722.44, "word": " عالي", "probability": 0.8977864583333334}, {"start": 722.44, "end": 722.6, "word": " ولا", "probability": 0.876953125}, {"start": 722.6, "end": 723.02, "word": " واطي؟", "probability": 0.8487548828125}, {"start": 723.02, "end": 723.92, "word": " عالي،", "probability": 0.79095458984375}, {"start": 723.92, "end": 724.4, "word": " انت", "probability": 0.6861572265625}, {"start": 724.4, "end": 724.58, "word": " ..", "probability": 0.2325439453125}, {"start": 724.58, "end": 724.74, "word": " انا", "probability": 0.52557373046875}, {"start": 724.74, "end": 725.18, "word": " متخفض،", "probability": 0.710595703125}, {"start": 725.18, "end": 725.26, "word": " انا", "probability": 0.858154296875}, {"start": 725.26, "end": 725.38, "word": " في", "probability": 0.72216796875}, {"start": 725.38, "end": 725.92, "word": " بدأت", "probability": 0.4410807291666667}, {"start": 725.92, "end": 725.94, "word": " ..", "probability": 0.33154296875}, {"start": 725.94, "end": 726.62, "word": " اتعالجت", "probability": 0.5132649739583334}, {"start": 726.62, "end": 726.62, "word": " ..", "probability": 0.427978515625}, {"start": 726.62, "end": 727.42, "word": " اتعالجت،", "probability": 0.6599644252232143}, {"start": 727.42, "end": 727.98, "word": " اتعالجت،", "probability": 0.7425188337053571}, {"start": 727.98, "end": 728.36, "word": " اتعالجت،", "probability": 0.8734305245535714}, {"start": 728.36, "end": 728.58, "word": " اتعالجت،", "probability": 0.912109375}, {"start": 728.58, "end": 728.58, "word": " اتعالجت،", "probability": 0.9361746651785714}, {"start": 728.58, "end": 728.58, "word": " اتعالجت،", "probability": 0.9497767857142857}, {"start": 728.58, "end": 728.58, "word": " اتعالجت،", "probability": 0.9577287946428571}, {"start": 728.58, "end": 728.6, "word": " اتعالجت،", "probability": 0.9630998883928571}, {"start": 728.6, "end": 729.12, "word": " اتعالجت،", "probability": 0.9665178571428571}, {"start": 729.12, "end": 729.32, "word": " اتعالجت،", "probability": 0.9686104910714286}, {"start": 729.32, "end": 729.82, "word": " اتعالجت،", "probability": 0.9703543526785714}, {"start": 729.82, "end": 729.82, "word": " اتعالجت،", "probability": 0.9717494419642857}, {"start": 729.82, "end": 729.82, "word": " اتعالجت،", "probability": 0.9732840401785714}, {"start": 729.82, "end": 729.86, "word": " اتعالجت،", "probability": 0.9742606026785714}, {"start": 729.86, "end": 729.9, "word": " اتعالجت،", "probability": 0.9756556919642857}, {"start": 729.9, "end": 729.9, "word": " اتعالجت،", "probability": 0.9773297991071429}, {"start": 729.9, "end": 729.96, "word": " اتعالجت،", "probability": 0.9784458705357143}, {"start": 729.96, "end": 729.96, "word": " اتعالجت،", "probability": 0.9793526785714286}, {"start": 729.96, "end": 730.7, "word": " اتعالجت،", "probability": 0.9802594866071429}, {"start": 730.7, "end": 731.36, "word": " اتعالجت،", "probability": 0.9813755580357143}, {"start": 731.36, "end": 731.92, "word": " اتعالجت،", "probability": 0.9824916294642857}, {"start": 731.92, "end": 733.64, "word": " اتعالجت،", "probability": 0.9836774553571429}, {"start": 733.64, "end": 734.52, "word": " اتعال", "probability": 0.979736328125}], "temperature": 1.0}, {"id": 27, "seek": 75003, "start": 739.71, "end": 750.03, "text": "طيب لو أضفنا على هذه الأنبوبة agonist، شو بدي يشير؟ لما يشير فيك مليش؟ شو بيشير في ليش؟ في البلد؟", "tokens": [9566, 1829, 3555, 45164, 5551, 11242, 5172, 8315, 15844, 29538, 16247, 1863, 3555, 37746, 3660, 623, 266, 468, 12399, 13412, 2407, 4724, 16254, 7251, 8592, 13546, 22807, 5296, 15042, 7251, 8592, 13546, 8978, 4117, 3714, 20292, 8592, 22807, 13412, 2407, 4724, 1829, 8592, 13546, 8978, 32239, 8592, 22807, 8978, 29739, 1211, 3215, 22807], "avg_logprob": -0.39554399583074784, "compression_ratio": 1.3884297520661157, "no_speech_prob": 2.205371856689453e-06, "words": [{"start": 739.71, "end": 740.49, "word": "طيب", "probability": 0.743896484375}, {"start": 740.49, "end": 741.03, "word": " لو", "probability": 0.671875}, {"start": 741.03, "end": 741.63, "word": " أضفنا", "probability": 0.794189453125}, {"start": 741.63, "end": 741.85, "word": " على", "probability": 0.69091796875}, {"start": 741.85, "end": 742.21, "word": " هذه", "probability": 0.76025390625}, {"start": 742.21, "end": 742.71, "word": " الأنبوبة", "probability": 0.88623046875}, {"start": 742.71, "end": 743.29, "word": " agonist،", "probability": 0.74658203125}, {"start": 743.29, "end": 743.37, "word": " شو", "probability": 0.809326171875}, {"start": 743.37, "end": 743.59, "word": " بدي", "probability": 0.692626953125}, {"start": 743.59, "end": 744.01, "word": " يشير؟", "probability": 0.74652099609375}, {"start": 744.01, "end": 746.05, "word": " لما", "probability": 0.43951416015625}, {"start": 746.05, "end": 746.37, "word": " يشير", "probability": 0.8025716145833334}, {"start": 746.37, "end": 746.57, "word": " فيك", "probability": 0.669189453125}, {"start": 746.57, "end": 747.11, "word": " مليش؟", "probability": 0.46484375}, {"start": 747.11, "end": 748.69, "word": " شو", "probability": 0.68896484375}, {"start": 748.69, "end": 748.97, "word": " بيشير", "probability": 0.9697265625}, {"start": 748.97, "end": 749.07, "word": " في", "probability": 0.861328125}, {"start": 749.07, "end": 749.37, "word": " ليش؟", "probability": 0.6476236979166666}, {"start": 749.37, "end": 749.43, "word": " في", "probability": 0.87109375}, {"start": 749.43, "end": 750.03, "word": " البلد؟", "probability": 0.58465576171875}], "temperature": 1.0}, {"id": 28, "seek": 78059, "start": 753.05, "end": 780.59, "text": "ماشي لإنه بزيد خلاص تصير Plumb وانت ترسّب في جعله مبوبة، ماشي؟ طيب لو قصنا Transmittance في هذا الحالة، شو بزيد؟ 100%، بزيد، فبالتالي الفرق في ال Transmittance ما بين Zero Time و After Addition ال Agonist هو عبارة عن Indication لل Platelet Function", "tokens": [2304, 33599, 1829, 5296, 28814, 1863, 3224, 4724, 11622, 25708, 16490, 1211, 33546, 6055, 9381, 13546, 2149, 2860, 4032, 7649, 2655, 6055, 2288, 3794, 11703, 3555, 8978, 10874, 30241, 3224, 3714, 3555, 37746, 3660, 12399, 3714, 33599, 1829, 22807, 23032, 1829, 3555, 45164, 12174, 9381, 8315, 6531, 15548, 719, 8978, 23758, 21542, 6027, 3660, 12399, 13412, 2407, 4724, 11622, 25708, 22807, 2319, 4, 12399, 4724, 11622, 25708, 12399, 6156, 3555, 6027, 2655, 6027, 1829, 27188, 2288, 4587, 8978, 2423, 6531, 15548, 719, 19446, 49374, 17182, 6161, 4032, 2381, 5349, 849, 2423, 2725, 266, 468, 31439, 6225, 3555, 9640, 3660, 18871, 2333, 8758, 24976, 17461, 15966, 11166, 882], "avg_logprob": -0.2799479169426141, "compression_ratio": 1.4330708661417322, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 753.05, "end": 753.41, "word": "ماشي", "probability": 0.6331380208333334}, {"start": 753.41, "end": 753.77, "word": " لإنه", "probability": 0.76531982421875}, {"start": 753.77, "end": 754.05, "word": " بزيد", "probability": 0.8240559895833334}, {"start": 754.05, "end": 754.35, "word": " خلاص", "probability": 0.9420572916666666}, {"start": 754.35, "end": 754.65, "word": " تصير", "probability": 0.8191731770833334}, {"start": 754.65, "end": 755.07, "word": " Plumb", "probability": 0.395263671875}, {"start": 755.07, "end": 756.43, "word": " وانت", "probability": 0.6923828125}, {"start": 756.43, "end": 756.93, "word": " ترسّب", "probability": 0.7087890625}, {"start": 756.93, "end": 757.09, "word": " في", "probability": 0.90380859375}, {"start": 757.09, "end": 757.45, "word": " جعله", "probability": 0.8098958333333334}, {"start": 757.45, "end": 758.07, "word": " مبوبة،", "probability": 0.6501708984375}, {"start": 758.07, "end": 759.13, "word": " ماشي؟", "probability": 0.770263671875}, {"start": 759.13, "end": 759.45, "word": " طيب", "probability": 0.9541015625}, {"start": 759.45, "end": 759.67, "word": " لو", "probability": 0.83203125}, {"start": 759.67, "end": 760.09, "word": " قصنا", "probability": 0.833984375}, {"start": 760.09, "end": 761.05, "word": " Transmittance", "probability": 0.7029622395833334}, {"start": 761.05, "end": 761.21, "word": " في", "probability": 0.94189453125}, {"start": 761.21, "end": 761.39, "word": " هذا", "probability": 0.34814453125}, {"start": 761.39, "end": 761.79, "word": " الحالة،", "probability": 0.87939453125}, {"start": 761.79, "end": 761.91, "word": " شو", "probability": 0.81591796875}, {"start": 761.91, "end": 762.85, "word": " بزيد؟", "probability": 0.8094482421875}, {"start": 762.85, "end": 763.85, "word": " 100", "probability": 0.250732421875}, {"start": 763.85, "end": 764.45, "word": "%،", "probability": 0.715576171875}, {"start": 764.45, "end": 765.31, "word": " بزيد،", "probability": 0.847412109375}, {"start": 765.31, "end": 765.73, "word": " فبالتالي", "probability": 0.9618326822916666}, {"start": 765.73, "end": 766.23, "word": " الفرق", "probability": 0.9674479166666666}, {"start": 766.23, "end": 766.71, "word": " في", "probability": 0.94677734375}, {"start": 766.71, "end": 766.83, "word": " ال", "probability": 0.42529296875}, {"start": 766.83, "end": 767.79, "word": " Transmittance", "probability": 0.9099934895833334}, {"start": 767.79, "end": 768.43, "word": " ما", "probability": 0.81103515625}, {"start": 768.43, "end": 768.87, "word": " بين", "probability": 0.98583984375}, {"start": 768.87, "end": 770.57, "word": " Zero", "probability": 0.73974609375}, {"start": 770.57, "end": 771.17, "word": " Time", "probability": 0.4609375}, {"start": 771.17, "end": 772.39, "word": " و", "probability": 0.916015625}, {"start": 772.39, "end": 772.91, "word": " After", "probability": 0.5302734375}, {"start": 772.91, "end": 773.71, "word": " Addition", "probability": 0.7340087890625}, {"start": 773.71, "end": 774.91, "word": " ال", "probability": 0.6474609375}, {"start": 774.91, "end": 775.81, "word": " Agonist", "probability": 0.7490234375}, {"start": 775.81, "end": 776.87, "word": " هو", "probability": 0.89404296875}, {"start": 776.87, "end": 777.19, "word": " عبارة", "probability": 0.9393310546875}, {"start": 777.19, "end": 777.41, "word": " عن", "probability": 0.998046875}, {"start": 777.41, "end": 778.17, "word": " Indication", "probability": 0.906982421875}, {"start": 778.17, "end": 778.59, "word": " لل", "probability": 0.77099609375}, {"start": 778.59, "end": 779.27, "word": " Platelet", "probability": 0.697998046875}, {"start": 779.27, "end": 780.59, "word": " Function", "probability": 0.8603515625}], "temperature": 1.0}, {"id": 29, "seek": 81000, "start": 780.92, "end": 810.0, "text": "يعني بيصار aggregation و لا بيصار اش aggregation quantity و لا quantity؟ كوانتيت و لا كوانتيت؟ لأ لأ بتصير quantity بتخيص اقامة او اكترار و بتعكس .. انا جيت حافظجيك صور لهذا الموضوع و بتعكس على شكل chart لماشي قرائتك فتطلع .. قبلكوا شفتوا جهاز لسم القلب؟ مش فيه إبرة ال ECG مش فيه إبرة قاعدة بترسم curves", "tokens": [40228, 22653, 4724, 1829, 9381, 9640, 16743, 399, 4032, 20193, 4724, 1829, 9381, 9640, 1975, 8592, 16743, 399, 11275, 4032, 20193, 11275, 22807, 9122, 2407, 7649, 2655, 36081, 4032, 20193, 9122, 2407, 7649, 2655, 36081, 22807, 5296, 10721, 5296, 10721, 39894, 9381, 13546, 11275, 39894, 9778, 1829, 9381, 1975, 4587, 10943, 3660, 1975, 2407, 1975, 4117, 2655, 2288, 9640, 4032, 39894, 3615, 4117, 3794, 4386, 1975, 8315, 10874, 36081, 11331, 31845, 19913, 7435, 1829, 4117, 20328, 13063, 5296, 3224, 15730, 9673, 2407, 11242, 45367, 4032, 39894, 3615, 4117, 3794, 15844, 13412, 28820, 6927, 5296, 2304, 33599, 1829, 12174, 23557, 19986, 2655, 4117, 6156, 2655, 9566, 1211, 3615, 4386, 12174, 36150, 4117, 14407, 13412, 5172, 2655, 14407, 10874, 3224, 31377, 5296, 38251, 25062, 46152, 22807, 37893, 8978, 3224, 11933, 3555, 25720, 2423, 19081, 38, 37893, 8978, 3224, 11933, 3555, 25720, 12174, 995, 22488, 3660, 39894, 2288, 38251, 19490], "avg_logprob": -0.36423140804509857, "compression_ratio": 1.7773722627737227, "no_speech_prob": 0.0, "words": [{"start": 780.92, "end": 781.24, "word": "يعني", "probability": 0.59490966796875}, {"start": 781.24, "end": 781.64, "word": " بيصار", "probability": 0.65191650390625}, {"start": 781.64, "end": 782.4, "word": " aggregation", "probability": 0.875732421875}, {"start": 782.4, "end": 782.56, "word": " و", "probability": 0.5224609375}, {"start": 782.56, "end": 782.66, "word": " لا", "probability": 0.88232421875}, {"start": 782.66, "end": 782.98, "word": " بيصار", "probability": 0.7930908203125}, {"start": 782.98, "end": 783.1, "word": " اش", "probability": 0.5902099609375}, {"start": 783.1, "end": 783.58, "word": " aggregation", "probability": 0.9609375}, {"start": 783.58, "end": 784.0, "word": " quantity", "probability": 0.12158203125}, {"start": 784.0, "end": 784.58, "word": " و", "probability": 0.2083740234375}, {"start": 784.58, "end": 784.78, "word": " لا", "probability": 0.984375}, {"start": 784.78, "end": 785.8, "word": " quantity؟", "probability": 0.37664794921875}, {"start": 785.8, "end": 786.98, "word": " كوانتيت", "probability": 0.5431884765625}, {"start": 786.98, "end": 787.08, "word": " و", "probability": 0.79296875}, {"start": 787.08, "end": 787.16, "word": " لا", "probability": 0.9931640625}, {"start": 787.16, "end": 787.96, "word": " كوانتيت؟", "probability": 0.7727864583333334}, {"start": 787.96, "end": 788.14, "word": " لأ", "probability": 0.70947265625}, {"start": 788.14, "end": 788.24, "word": " لأ", "probability": 0.878662109375}, {"start": 788.24, "end": 788.5, "word": " بتصير", "probability": 0.9142252604166666}, {"start": 788.5, "end": 789.04, "word": " quantity", "probability": 0.82275390625}, {"start": 789.04, "end": 789.84, "word": " بتخيص", "probability": 0.75732421875}, {"start": 789.84, "end": 790.44, "word": " اقامة", "probability": 0.670684814453125}, {"start": 790.44, "end": 790.58, "word": " او", "probability": 0.6640625}, {"start": 790.58, "end": 791.14, "word": " اكترار", "probability": 0.5339111328125}, {"start": 791.14, "end": 791.34, "word": " و", "probability": 0.5546875}, {"start": 791.34, "end": 792.2, "word": " بتعكس", "probability": 0.7547607421875}, {"start": 792.2, "end": 792.58, "word": " ..", "probability": 0.283935546875}, {"start": 792.58, "end": 792.76, "word": " انا", "probability": 0.6151123046875}, {"start": 792.76, "end": 792.92, "word": " جيت", "probability": 0.561767578125}, {"start": 792.92, "end": 793.34, "word": " حافظجيك", "probability": 0.6377360026041666}, {"start": 793.34, "end": 793.6, "word": " صور", "probability": 0.99267578125}, {"start": 793.6, "end": 793.9, "word": " لهذا", "probability": 0.660400390625}, {"start": 793.9, "end": 794.4, "word": " الموضوع", "probability": 0.9952392578125}, {"start": 794.4, "end": 795.0, "word": " و", "probability": 0.85009765625}, {"start": 795.0, "end": 795.7, "word": " بتعكس", "probability": 0.973388671875}, {"start": 795.7, "end": 795.98, "word": " على", "probability": 0.8876953125}, {"start": 795.98, "end": 796.48, "word": " شكل", "probability": 0.99267578125}, {"start": 796.48, "end": 797.64, "word": " chart", "probability": 0.8056640625}, {"start": 797.64, "end": 800.02, "word": " لماشي", "probability": 0.628173828125}, {"start": 800.02, "end": 800.88, "word": " قرائتك", "probability": 0.77470703125}, {"start": 800.88, "end": 802.18, "word": " فتطلع", "probability": 0.9125}, {"start": 802.18, "end": 803.26, "word": " ..", "probability": 0.48681640625}, {"start": 803.26, "end": 803.54, "word": " قبلكوا", "probability": 0.8779296875}, {"start": 803.54, "end": 804.0, "word": " شفتوا", "probability": 0.8336181640625}, {"start": 804.0, "end": 804.54, "word": " جهاز", "probability": 0.9933268229166666}, {"start": 804.54, "end": 804.82, "word": " لسم", "probability": 0.898681640625}, {"start": 804.82, "end": 806.06, "word": " القلب؟", "probability": 0.8662109375}, {"start": 806.06, "end": 806.72, "word": " مش", "probability": 0.6103515625}, {"start": 806.72, "end": 806.98, "word": " فيه", "probability": 0.9111328125}, {"start": 806.98, "end": 807.24, "word": " إبرة", "probability": 0.7942708333333334}, {"start": 807.24, "end": 807.42, "word": " ال", "probability": 0.5478515625}, {"start": 807.42, "end": 807.98, "word": " ECG", "probability": 0.95361328125}, {"start": 807.98, "end": 808.56, "word": " مش", "probability": 0.413330078125}, {"start": 808.56, "end": 808.72, "word": " فيه", "probability": 0.947998046875}, {"start": 808.72, "end": 808.9, "word": " إبرة", "probability": 0.9611002604166666}, {"start": 808.9, "end": 809.16, "word": " قاعدة", "probability": 0.81060791015625}, {"start": 809.16, "end": 809.52, "word": " بترسم", "probability": 0.9583333333333334}, {"start": 809.52, "end": 810.0, "word": " curves", "probability": 0.5029296875}], "temperature": 1.0}, {"id": 30, "seek": 83920, "start": 810.42, "end": 839.2, "text": "ماشى نفس التحكاية هذه بتعطيك اياش بتعطيك اللى هو response in figures بما يشبه اللى هو ال ECG وبالتالي بتقيسلك إذا في response or there is no response يعني إذا كان في response في حركة للإبرة ولا لا وإذا مافيش response عادة الإبرة بتكون أياش يعني في محل مفهوم شباب؟ ولا أنا هتشوفها بالكلام أحد الطرق لقياس", "tokens": [2304, 33599, 7578, 8717, 36178, 16712, 5016, 4117, 995, 10632, 29538, 39894, 3615, 9566, 1829, 4117, 1975, 1829, 33599, 39894, 3615, 9566, 1829, 4117, 13672, 7578, 31439, 4134, 294, 9624, 4724, 15042, 7251, 8592, 3555, 3224, 13672, 7578, 31439, 2423, 19081, 38, 46599, 6027, 2655, 6027, 1829, 39894, 38436, 3794, 23275, 11933, 15730, 8978, 4134, 420, 456, 307, 572, 4134, 37495, 22653, 11933, 15730, 25961, 8978, 4134, 8978, 11331, 31747, 3660, 24976, 28814, 3555, 25720, 49429, 20193, 4032, 28814, 15730, 19446, 41185, 8592, 4134, 6225, 18513, 3660, 33688, 3555, 25720, 39894, 30544, 36632, 33599, 37495, 22653, 8978, 3714, 5016, 1211, 3714, 5172, 3224, 20498, 13412, 3555, 16758, 22807, 49429, 41850, 8032, 2655, 8592, 38688, 11296, 20666, 28820, 10943, 5551, 24401, 41950, 2288, 4587, 5296, 38436, 32277], "avg_logprob": -0.21309055141576633, "compression_ratio": 1.8659003831417624, "no_speech_prob": 0.0, "words": [{"start": 810.42, "end": 810.84, "word": "ماشى", "probability": 0.8037109375}, {"start": 810.84, "end": 811.06, "word": " نفس", "probability": 0.8505859375}, {"start": 811.06, "end": 811.56, "word": " التحكاية", "probability": 0.7265625}, {"start": 811.56, "end": 812.1, "word": " هذه", "probability": 0.43359375}, {"start": 812.1, "end": 812.56, "word": " بتعطيك", "probability": 0.9796875}, {"start": 812.56, "end": 812.88, "word": " اياش", "probability": 0.5907389322916666}, {"start": 812.88, "end": 814.16, "word": " بتعطيك", "probability": 0.884326171875}, {"start": 814.16, "end": 814.78, "word": " اللى", "probability": 0.85693359375}, {"start": 814.78, "end": 814.94, "word": " هو", "probability": 0.93017578125}, {"start": 814.94, "end": 815.52, "word": " response", "probability": 0.88525390625}, {"start": 815.52, "end": 815.8, "word": " in", "probability": 0.9150390625}, {"start": 815.8, "end": 816.3, "word": " figures", "probability": 0.896484375}, {"start": 816.3, "end": 817.08, "word": " بما", "probability": 0.955810546875}, {"start": 817.08, "end": 817.62, "word": " يشبه", "probability": 0.982666015625}, {"start": 817.62, "end": 818.26, "word": " اللى", "probability": 0.977294921875}, {"start": 818.26, "end": 818.36, "word": " هو", "probability": 0.888671875}, {"start": 818.36, "end": 818.48, "word": " ال", "probability": 0.89306640625}, {"start": 818.48, "end": 819.22, "word": " ECG", "probability": 0.963134765625}, {"start": 819.22, "end": 820.2, "word": " وبالتالي", "probability": 0.81767578125}, {"start": 820.2, "end": 821.68, "word": " بتقيسلك", "probability": 0.802734375}, {"start": 821.68, "end": 822.36, "word": " إذا", "probability": 0.7298583984375}, {"start": 822.36, "end": 822.54, "word": " في", "probability": 0.71826171875}, {"start": 822.54, "end": 823.3, "word": " response", "probability": 0.73193359375}, {"start": 823.3, "end": 824.14, "word": " or", "probability": 0.65869140625}, {"start": 824.14, "end": 824.42, "word": " there", "probability": 0.814453125}, {"start": 824.42, "end": 824.6, "word": " is", "probability": 0.9423828125}, {"start": 824.6, "end": 824.8, "word": " no", "probability": 0.9453125}, {"start": 824.8, "end": 825.32, "word": " response", "probability": 0.95751953125}, {"start": 825.32, "end": 825.74, "word": " يعني", "probability": 0.824951171875}, {"start": 825.74, "end": 826.36, "word": " إذا", "probability": 0.953125}, {"start": 826.36, "end": 826.62, "word": " كان", "probability": 0.9931640625}, {"start": 826.62, "end": 826.76, "word": " في", "probability": 0.94482421875}, {"start": 826.76, "end": 827.24, "word": " response", "probability": 0.93408203125}, {"start": 827.24, "end": 827.4, "word": " في", "probability": 0.4990234375}, {"start": 827.4, "end": 827.78, "word": " حركة", "probability": 0.9869791666666666}, {"start": 827.78, "end": 828.24, "word": " للإبرة", "probability": 0.7723388671875}, {"start": 828.24, "end": 828.56, "word": " ولا", "probability": 0.818359375}, {"start": 828.56, "end": 828.82, "word": " لا", "probability": 0.74169921875}, {"start": 828.82, "end": 829.34, "word": " وإذا", "probability": 0.880859375}, {"start": 829.34, "end": 829.66, "word": " مافيش", "probability": 0.9441731770833334}, {"start": 829.66, "end": 830.2, "word": " response", "probability": 0.95751953125}, {"start": 830.2, "end": 830.84, "word": " عادة", "probability": 0.9012044270833334}, {"start": 830.84, "end": 831.18, "word": " الإبرة", "probability": 0.8401692708333334}, {"start": 831.18, "end": 831.56, "word": " بتكون", "probability": 0.930419921875}, {"start": 831.56, "end": 831.9, "word": " أياش", "probability": 0.77392578125}, {"start": 831.9, "end": 833.24, "word": " يعني", "probability": 0.86865234375}, {"start": 833.24, "end": 833.4, "word": " في", "probability": 0.89404296875}, {"start": 833.4, "end": 833.9, "word": " محل", "probability": 0.9798177083333334}, {"start": 833.9, "end": 834.4, "word": " مفهوم", "probability": 0.793548583984375}, {"start": 834.4, "end": 834.98, "word": " شباب؟", "probability": 0.7366943359375}, {"start": 834.98, "end": 835.14, "word": " ولا", "probability": 0.29736328125}, {"start": 835.14, "end": 835.26, "word": " أنا", "probability": 0.51318359375}, {"start": 835.26, "end": 835.66, "word": " هتشوفها", "probability": 0.853515625}, {"start": 835.66, "end": 836.04, "word": " بالكلام", "probability": 0.5926513671875}, {"start": 836.04, "end": 837.14, "word": " أحد", "probability": 0.920654296875}, {"start": 837.14, "end": 837.86, "word": " الطرق", "probability": 0.9080403645833334}, {"start": 837.86, "end": 839.2, "word": " لقياس", "probability": 0.8694661458333334}], "temperature": 1.0}, {"id": 31, "seek": 86021, "start": 839.51, "end": 860.21, "text": "الـ Platelet Function هو هدا التكنيكى، ووجدوا تكملال الموضوع انتبهوا عليها يا شباب، وجدوا انه there are different responses by using different agonists، ماشي؟ يعني حتى", "tokens": [6027, 39184, 17461, 15966, 11166, 882, 31439, 8032, 28259, 16712, 19452, 1829, 4117, 7578, 12399, 4032, 29245, 3215, 14407, 6055, 24793, 1211, 6027, 9673, 2407, 11242, 45367, 16472, 2655, 3555, 3224, 14407, 25894, 11296, 35186, 13412, 3555, 16758, 12399, 49610, 3215, 14407, 16472, 3224, 456, 366, 819, 13019, 538, 1228, 819, 623, 266, 1751, 12399, 3714, 33599, 1829, 22807, 37495, 22653, 11331, 49975], "avg_logprob": -0.3847656240686774, "compression_ratio": 1.3728813559322033, "no_speech_prob": 7.808208465576172e-06, "words": [{"start": 839.51, "end": 839.91, "word": "الـ", "probability": 0.637939453125}, {"start": 839.91, "end": 840.51, "word": " Platelet", "probability": 0.63671875}, {"start": 840.51, "end": 841.13, "word": " Function", "probability": 0.77294921875}, {"start": 841.13, "end": 841.63, "word": " هو", "probability": 0.83984375}, {"start": 841.63, "end": 841.85, "word": " هدا", "probability": 0.3297119140625}, {"start": 841.85, "end": 843.97, "word": " التكنيكى،", "probability": 0.6986083984375}, {"start": 843.97, "end": 844.67, "word": " ووجدوا", "probability": 0.7828369140625}, {"start": 844.67, "end": 845.41, "word": " تكملال", "probability": 0.576751708984375}, {"start": 845.41, "end": 845.77, "word": " الموضوع", "probability": 0.981201171875}, {"start": 845.77, "end": 846.21, "word": " انتبهوا", "probability": 0.83291015625}, {"start": 846.21, "end": 846.45, "word": " عليها", "probability": 0.780517578125}, {"start": 846.45, "end": 846.55, "word": " يا", "probability": 0.71044921875}, {"start": 846.55, "end": 847.03, "word": " شباب،", "probability": 0.90234375}, {"start": 847.03, "end": 847.57, "word": " وجدوا", "probability": 0.9169921875}, {"start": 847.57, "end": 848.37, "word": " انه", "probability": 0.49871826171875}, {"start": 848.37, "end": 848.61, "word": " there", "probability": 0.40966796875}, {"start": 848.61, "end": 850.05, "word": " are", "probability": 0.96533203125}, {"start": 850.05, "end": 851.43, "word": " different", "probability": 0.95166015625}, {"start": 851.43, "end": 852.73, "word": " responses", "probability": 0.398193359375}, {"start": 852.73, "end": 853.83, "word": " by", "probability": 0.9208984375}, {"start": 853.83, "end": 854.49, "word": " using", "probability": 0.943359375}, {"start": 854.49, "end": 855.79, "word": " different", "probability": 0.91259765625}, {"start": 855.79, "end": 857.97, "word": " agonists،", "probability": 0.50836181640625}, {"start": 857.97, "end": 858.89, "word": " ماشي؟", "probability": 0.7672119140625}, {"start": 858.89, "end": 859.43, "word": " يعني", "probability": 0.936767578125}, {"start": 859.43, "end": 860.21, "word": " حتى", "probability": 0.6558837890625}], "temperature": 1.0}, {"id": 32, "seek": 89011, "start": 871.79, "end": 890.11, "text": "عشان هيك ال procedure بحد ذاته مش كتير مهم، مش سهل، مش يعني تجيب البوبة و تاخد ال transmitters في البداية و بعد ما ضيف ال agonist عملية صعبة، مش صعبة، لكن المهم في ذلك بعد ذلك", "tokens": [3615, 8592, 7649, 39896, 4117, 2423, 10747, 4724, 24401, 29910, 9307, 3224, 37893, 9122, 2655, 13546, 3714, 16095, 12399, 37893, 8608, 3224, 1211, 12399, 37893, 37495, 22653, 6055, 7435, 1829, 3555, 29739, 37746, 3660, 4032, 6055, 47283, 3215, 2423, 17831, 1559, 8978, 29739, 28259, 10632, 4032, 39182, 19446, 48812, 33911, 2423, 623, 266, 468, 6225, 42213, 10632, 20328, 3615, 49401, 12399, 37893, 20328, 3615, 49401, 12399, 44381, 9673, 16095, 8978, 29910, 23275, 39182, 29910, 23275], "avg_logprob": -0.28536183465468257, "compression_ratio": 1.5888888888888888, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 871.79, "end": 872.55, "word": "عشان", "probability": 0.5231730143229166}, {"start": 872.55, "end": 872.71, "word": " هيك", "probability": 0.59521484375}, {"start": 872.71, "end": 872.77, "word": " ال", "probability": 0.89990234375}, {"start": 872.77, "end": 873.15, "word": " procedure", "probability": 0.91552734375}, {"start": 873.15, "end": 873.55, "word": " بحد", "probability": 0.705078125}, {"start": 873.55, "end": 874.01, "word": " ذاته", "probability": 0.9807942708333334}, {"start": 874.01, "end": 874.53, "word": " مش", "probability": 0.962890625}, {"start": 874.53, "end": 874.97, "word": " كتير", "probability": 0.9122721354166666}, {"start": 874.97, "end": 876.15, "word": " مهم،", "probability": 0.7423909505208334}, {"start": 876.15, "end": 876.47, "word": " مش", "probability": 0.5205078125}, {"start": 876.47, "end": 877.01, "word": " سهل،", "probability": 0.905517578125}, {"start": 877.01, "end": 877.25, "word": " مش", "probability": 0.767578125}, {"start": 877.25, "end": 877.53, "word": " يعني", "probability": 0.635009765625}, {"start": 877.53, "end": 877.89, "word": " تجيب", "probability": 0.9033203125}, {"start": 877.89, "end": 878.83, "word": " البوبة", "probability": 0.6759440104166666}, {"start": 878.83, "end": 878.93, "word": " و", "probability": 0.76953125}, {"start": 878.93, "end": 880.91, "word": " تاخد", "probability": 0.5241292317708334}, {"start": 880.91, "end": 881.05, "word": " ال", "probability": 0.51171875}, {"start": 881.05, "end": 882.21, "word": " transmitters", "probability": 0.7154541015625}, {"start": 882.21, "end": 882.33, "word": " في", "probability": 0.7158203125}, {"start": 882.33, "end": 882.87, "word": " البداية", "probability": 0.9866536458333334}, {"start": 882.87, "end": 882.95, "word": " و", "probability": 0.705078125}, {"start": 882.95, "end": 883.15, "word": " بعد", "probability": 0.91796875}, {"start": 883.15, "end": 883.29, "word": " ما", "probability": 0.4775390625}, {"start": 883.29, "end": 883.51, "word": " ضيف", "probability": 0.660888671875}, {"start": 883.51, "end": 883.65, "word": " ال", "probability": 0.91650390625}, {"start": 883.65, "end": 883.97, "word": " agonist", "probability": 0.8292643229166666}, {"start": 883.97, "end": 884.69, "word": " عملية", "probability": 0.8492838541666666}, {"start": 884.69, "end": 885.39, "word": " صعبة،", "probability": 0.87646484375}, {"start": 885.39, "end": 885.97, "word": " مش", "probability": 0.95556640625}, {"start": 885.97, "end": 886.75, "word": " صعبة،", "probability": 0.9110107421875}, {"start": 886.75, "end": 887.07, "word": " لكن", "probability": 0.95703125}, {"start": 887.07, "end": 888.39, "word": " المهم", "probability": 0.986328125}, {"start": 888.39, "end": 888.69, "word": " في", "probability": 0.921875}, {"start": 888.69, "end": 889.27, "word": " ذلك", "probability": 0.97705078125}, {"start": 889.27, "end": 889.55, "word": " بعد", "probability": 0.64599609375}, {"start": 889.55, "end": 890.11, "word": " ذلك", "probability": 0.993408203125}], "temperature": 1.0}, {"id": 33, "seek": 91916, "start": 890.34, "end": 919.16, "text": "هو ال interpretation of your results انك تعرف تقرأ نتائجك،ماشي؟ لكن لإنه في different behavior للplatinum في كل مرض مختلف،ماشي؟ لكن اليوم سهلو علينا خالص صار في software،ماشي؟ خاص بهذا الموضوع فقط حسب النتيجة بديك تشخيص مباشرة على طول،ماشي؟ في software الآن بيبيع في السوق", "tokens": [3224, 2407, 2423, 14174, 295, 428, 3542, 16472, 4117, 37279, 28480, 6055, 4587, 2288, 10721, 8717, 2655, 16373, 7435, 4117, 12399, 2304, 33599, 1829, 22807, 44381, 5296, 28814, 1863, 3224, 8978, 819, 5223, 24976, 564, 22685, 449, 8978, 28242, 3714, 43042, 3714, 46456, 46538, 12399, 2304, 33599, 1829, 22807, 44381, 45595, 20498, 8608, 3224, 1211, 2407, 25894, 8315, 16490, 6027, 9381, 20328, 9640, 8978, 4722, 12399, 2304, 33599, 1829, 22807, 16490, 33546, 39627, 15730, 9673, 2407, 11242, 45367, 6156, 47432, 11331, 35457, 28239, 31371, 7435, 3660, 4724, 16254, 4117, 6055, 8592, 9778, 1829, 9381, 3714, 3555, 33599, 25720, 15844, 23032, 12610, 12399, 2304, 33599, 1829, 22807, 8978, 4722, 6024, 48506, 4724, 1829, 21292, 3615, 8978, 21136, 30543], "avg_logprob": -0.16631355679641335, "compression_ratio": 1.6179775280898876, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 890.34, "end": 890.68, "word": "هو", "probability": 0.91845703125}, {"start": 890.68, "end": 890.78, "word": " ال", "probability": 0.7177734375}, {"start": 890.78, "end": 891.3, "word": " interpretation", "probability": 0.9072265625}, {"start": 891.3, "end": 891.88, "word": " of", "probability": 0.97998046875}, {"start": 891.88, "end": 892.14, "word": " your", "probability": 0.91162109375}, {"start": 892.14, "end": 892.68, "word": " results", "probability": 0.845703125}, {"start": 892.68, "end": 893.42, "word": " انك", "probability": 0.8134765625}, {"start": 893.42, "end": 893.66, "word": " تعرف", "probability": 0.961669921875}, {"start": 893.66, "end": 894.04, "word": " تقرأ", "probability": 0.9212646484375}, {"start": 894.04, "end": 895.84, "word": " نتائجك،ماشي؟", "probability": 0.8058349609375}, {"start": 895.84, "end": 896.5, "word": " لكن", "probability": 0.78369140625}, {"start": 896.5, "end": 897.04, "word": " لإنه", "probability": 0.7340087890625}, {"start": 897.04, "end": 897.16, "word": " في", "probability": 0.8740234375}, {"start": 897.16, "end": 897.64, "word": " different", "probability": 0.87841796875}, {"start": 897.64, "end": 898.4, "word": " behavior", "probability": 0.83544921875}, {"start": 898.4, "end": 899.36, "word": " للplatinum", "probability": 0.46978759765625}, {"start": 899.36, "end": 899.5, "word": " في", "probability": 0.84912109375}, {"start": 899.5, "end": 900.0, "word": " كل", "probability": 0.9912109375}, {"start": 900.0, "end": 900.74, "word": " مرض", "probability": 0.931640625}, {"start": 900.74, "end": 902.82, "word": " مختلف،ماشي؟", "probability": 0.96588134765625}, {"start": 902.82, "end": 903.2, "word": " لكن", "probability": 0.9306640625}, {"start": 903.2, "end": 903.62, "word": " اليوم", "probability": 0.97607421875}, {"start": 903.62, "end": 904.26, "word": " سهلو", "probability": 0.8802490234375}, {"start": 904.26, "end": 904.58, "word": " علينا", "probability": 0.829833984375}, {"start": 904.58, "end": 905.02, "word": " خالص", "probability": 0.99755859375}, {"start": 905.02, "end": 905.9, "word": " صار", "probability": 0.782958984375}, {"start": 905.9, "end": 906.0, "word": " في", "probability": 0.89794921875}, {"start": 906.0, "end": 908.54, "word": " software،ماشي؟", "probability": 0.9366048177083334}, {"start": 908.54, "end": 908.84, "word": " خاص", "probability": 0.9873046875}, {"start": 908.84, "end": 909.22, "word": " بهذا", "probability": 0.644287109375}, {"start": 909.22, "end": 909.68, "word": " الموضوع", "probability": 0.9979248046875}, {"start": 909.68, "end": 910.52, "word": " فقط", "probability": 0.90673828125}, {"start": 910.52, "end": 911.38, "word": " حسب", "probability": 0.980712890625}, {"start": 911.38, "end": 911.98, "word": " النتيجة", "probability": 0.95703125}, {"start": 911.98, "end": 912.32, "word": " بديك", "probability": 0.7999674479166666}, {"start": 912.32, "end": 912.68, "word": " تشخيص", "probability": 0.96611328125}, {"start": 912.68, "end": 913.24, "word": " مباشرة", "probability": 0.9884033203125}, {"start": 913.24, "end": 913.5, "word": " على", "probability": 0.7119140625}, {"start": 913.5, "end": 915.6, "word": " طول،ماشي؟", "probability": 0.9762834821428571}, {"start": 915.6, "end": 915.78, "word": " في", "probability": 0.8076171875}, {"start": 915.78, "end": 916.34, "word": " software", "probability": 0.95263671875}, {"start": 916.34, "end": 917.76, "word": " الآن", "probability": 0.67236328125}, {"start": 917.76, "end": 918.62, "word": " بيبيع", "probability": 0.75750732421875}, {"start": 918.62, "end": 918.82, "word": " في", "probability": 0.9521484375}, {"start": 918.82, "end": 919.16, "word": " السوق", "probability": 0.973388671875}], "temperature": 1.0}, {"id": 34, "seek": 93944, "start": 919.6, "end": 939.44, "text": "بحسب نتيجة التشخيص اللى ممكن يطلع هذه تقنية تالية اشخص من خلال الـ platelet function ال procedure الرابع اللى انا ممكن اشخص من خلاله", "tokens": [3555, 5016, 35457, 8717, 31371, 7435, 3660, 16712, 8592, 9778, 1829, 9381, 13672, 7578, 3714, 43020, 7251, 9566, 1211, 3615, 29538, 6055, 4587, 1863, 10632, 6055, 6027, 10632, 1975, 8592, 9778, 9381, 9154, 16490, 1211, 6027, 2423, 39184, 3403, 15966, 2445, 2423, 10747, 34892, 16758, 3615, 13672, 7578, 1975, 8315, 3714, 43020, 1975, 8592, 9778, 9381, 9154, 16490, 1211, 6027, 3224], "avg_logprob": -0.37903224604745067, "compression_ratio": 1.4896551724137932, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 919.6, "end": 920.28, "word": "بحسب", "probability": 0.6869303385416666}, {"start": 920.28, "end": 921.76, "word": " نتيجة", "probability": 0.786865234375}, {"start": 921.76, "end": 923.74, "word": " التشخيص", "probability": 0.7579833984375}, {"start": 923.74, "end": 924.26, "word": " اللى", "probability": 0.60205078125}, {"start": 924.26, "end": 925.02, "word": " ممكن", "probability": 0.62744140625}, {"start": 925.02, "end": 925.34, "word": " يطلع", "probability": 0.69281005859375}, {"start": 925.34, "end": 926.36, "word": " هذه", "probability": 0.13232421875}, {"start": 926.36, "end": 930.98, "word": " تقنية", "probability": 0.7296142578125}, {"start": 930.98, "end": 931.44, "word": " تالية", "probability": 0.77294921875}, {"start": 931.44, "end": 933.1, "word": " اشخص", "probability": 0.761383056640625}, {"start": 933.1, "end": 933.22, "word": " من", "probability": 0.97509765625}, {"start": 933.22, "end": 933.52, "word": " خلال", "probability": 0.9853515625}, {"start": 933.52, "end": 933.64, "word": " الـ", "probability": 0.3966064453125}, {"start": 933.64, "end": 933.92, "word": " platelet", "probability": 0.57476806640625}, {"start": 933.92, "end": 934.4, "word": " function", "probability": 0.92724609375}, {"start": 934.4, "end": 935.34, "word": " ال", "probability": 0.475341796875}, {"start": 935.34, "end": 936.0, "word": " procedure", "probability": 0.36865234375}, {"start": 936.0, "end": 936.58, "word": " الرابع", "probability": 0.9651692708333334}, {"start": 936.58, "end": 936.78, "word": " اللى", "probability": 0.9619140625}, {"start": 936.78, "end": 937.0, "word": " انا", "probability": 0.672607421875}, {"start": 937.0, "end": 937.44, "word": " ممكن", "probability": 0.9453125}, {"start": 937.44, "end": 938.58, "word": " اشخص", "probability": 0.9464111328125}, {"start": 938.58, "end": 938.78, "word": " من", "probability": 0.99365234375}, {"start": 938.78, "end": 939.44, "word": " خلاله", "probability": 0.98876953125}], "temperature": 1.0}, {"id": 35, "seek": 97152, "start": 941.98, "end": 971.52, "text": "أو البرنامج solier disease بذأها هو ال flow cytometer و كلنا عارفين ال flow cytometer كيف بتشجع أو كيف بيش ال principle of flow cytometer اللي بيصير شبه انه احنا بستخدم monoclonal antibody for each receptor على سطح البليتين يعني for each CD على سطح البليتين لأن كل receptor أخد رقم خاص فاكرين لما خلعتلكم ال CD اختصار ليش؟", "tokens": [10721, 2407, 2423, 26890, 8315, 2304, 7435, 1404, 811, 4752, 4724, 8848, 10721, 11296, 31439, 2423, 3095, 40248, 13606, 4032, 28242, 8315, 6225, 9640, 5172, 9957, 2423, 3095, 40248, 13606, 9122, 33911, 39894, 8592, 7435, 3615, 34051, 9122, 33911, 4724, 1829, 8592, 2423, 8665, 295, 3095, 40248, 13606, 13672, 1829, 4724, 1829, 9381, 13546, 13412, 3555, 3224, 16472, 3224, 1975, 5016, 8315, 4724, 14851, 9778, 40448, 1108, 905, 14864, 304, 34507, 337, 1184, 32264, 15844, 8608, 9566, 5016, 29739, 20292, 2655, 9957, 37495, 22653, 337, 1184, 6743, 15844, 8608, 9566, 5016, 29739, 20292, 2655, 9957, 5296, 33456, 28242, 32264, 5551, 9778, 3215, 12602, 4587, 2304, 16490, 33546, 6156, 995, 37983, 9957, 5296, 15042, 16490, 1211, 34268, 23275, 2304, 2423, 6743, 1975, 46456, 9381, 9640, 32239, 8592, 22807], "avg_logprob": -0.28295898251235485, "compression_ratio": 1.7252747252747254, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 941.98, "end": 942.3, "word": "أو", "probability": 0.77734375}, {"start": 942.3, "end": 942.84, "word": " البرنامج", "probability": 0.6921142578125}, {"start": 942.84, "end": 943.26, "word": " solier", "probability": 0.28521728515625}, {"start": 943.26, "end": 943.78, "word": " disease", "probability": 0.85888671875}, {"start": 943.78, "end": 944.6, "word": " بذأها", "probability": 0.439849853515625}, {"start": 944.6, "end": 945.4, "word": " هو", "probability": 0.9453125}, {"start": 945.4, "end": 945.58, "word": " ال", "probability": 0.70361328125}, {"start": 945.58, "end": 945.72, "word": " flow", "probability": 0.88427734375}, {"start": 945.72, "end": 946.3, "word": " cytometer", "probability": 0.822021484375}, {"start": 946.3, "end": 947.24, "word": " و", "probability": 0.7099609375}, {"start": 947.24, "end": 947.68, "word": " كلنا", "probability": 0.5858154296875}, {"start": 947.68, "end": 948.0, "word": " عارفين", "probability": 0.97705078125}, {"start": 948.0, "end": 948.12, "word": " ال", "probability": 0.74462890625}, {"start": 948.12, "end": 948.28, "word": " flow", "probability": 0.96826171875}, {"start": 948.28, "end": 948.74, "word": " cytometer", "probability": 0.947021484375}, {"start": 948.74, "end": 949.02, "word": " كيف", "probability": 0.959228515625}, {"start": 949.02, "end": 949.58, "word": " بتشجع", "probability": 0.7113037109375}, {"start": 949.58, "end": 950.5, "word": " أو", "probability": 0.701171875}, {"start": 950.5, "end": 950.78, "word": " كيف", "probability": 0.6676025390625}, {"start": 950.78, "end": 950.92, "word": " بيش", "probability": 0.7378743489583334}, {"start": 950.92, "end": 951.04, "word": " ال", "probability": 0.7978515625}, {"start": 951.04, "end": 951.5, "word": " principle", "probability": 0.85791015625}, {"start": 951.5, "end": 951.68, "word": " of", "probability": 0.86865234375}, {"start": 951.68, "end": 951.92, "word": " flow", "probability": 0.9052734375}, {"start": 951.92, "end": 952.4, "word": " cytometer", "probability": 0.956787109375}, {"start": 952.4, "end": 953.36, "word": " اللي", "probability": 0.8232421875}, {"start": 953.36, "end": 953.72, "word": " بيصير", "probability": 0.9029541015625}, {"start": 953.72, "end": 954.12, "word": " شبه", "probability": 0.8717447916666666}, {"start": 954.12, "end": 955.36, "word": " انه", "probability": 0.56689453125}, {"start": 955.36, "end": 955.56, "word": " احنا", "probability": 0.8850911458333334}, {"start": 955.56, "end": 956.08, "word": " بستخدم", "probability": 0.83154296875}, {"start": 956.08, "end": 956.68, "word": " monoclonal", "probability": 0.8355712890625}, {"start": 956.68, "end": 957.18, "word": " antibody", "probability": 0.8994140625}, {"start": 957.18, "end": 957.44, "word": " for", "probability": 0.92041015625}, {"start": 957.44, "end": 957.7, "word": " each", "probability": 0.94873046875}, {"start": 957.7, "end": 958.22, "word": " receptor", "probability": 0.9326171875}, {"start": 958.22, "end": 958.44, "word": " على", "probability": 0.88720703125}, {"start": 958.44, "end": 958.76, "word": " سطح", "probability": 0.98388671875}, {"start": 958.76, "end": 960.74, "word": " البليتين", "probability": 0.465728759765625}, {"start": 960.74, "end": 961.72, "word": " يعني", "probability": 0.925537109375}, {"start": 961.72, "end": 961.96, "word": " for", "probability": 0.9580078125}, {"start": 961.96, "end": 962.3, "word": " each", "probability": 0.96044921875}, {"start": 962.3, "end": 962.76, "word": " CD", "probability": 0.81396484375}, {"start": 962.76, "end": 963.06, "word": " على", "probability": 0.88037109375}, {"start": 963.06, "end": 963.4, "word": " سطح", "probability": 0.9978841145833334}, {"start": 963.4, "end": 963.98, "word": " البليتين", "probability": 0.986083984375}, {"start": 963.98, "end": 964.26, "word": " لأن", "probability": 0.745361328125}, {"start": 964.26, "end": 964.52, "word": " كل", "probability": 0.61865234375}, {"start": 964.52, "end": 965.0, "word": " receptor", "probability": 0.95068359375}, {"start": 965.0, "end": 967.1, "word": " أخد", "probability": 0.8111979166666666}, {"start": 967.1, "end": 967.46, "word": " رقم", "probability": 0.9962565104166666}, {"start": 967.46, "end": 967.82, "word": " خاص", "probability": 0.98486328125}, {"start": 967.82, "end": 968.3, "word": " فاكرين", "probability": 0.806396484375}, {"start": 968.3, "end": 968.52, "word": " لما", "probability": 0.869140625}, {"start": 968.52, "end": 969.26, "word": " خلعتلكم", "probability": 0.64951171875}, {"start": 969.26, "end": 969.8, "word": " ال", "probability": 0.52685546875}, {"start": 969.8, "end": 970.24, "word": " CD", "probability": 0.86865234375}, {"start": 970.24, "end": 971.0, "word": " اختصار", "probability": 0.93017578125}, {"start": 971.0, "end": 971.52, "word": " ليش؟", "probability": 0.8694661458333334}], "temperature": 1.0}, {"id": 36, "seek": 99691, "start": 975.27, "end": 996.91, "text": "عشان اعطوا ال CDs الانتجنات اللى موجودة على سطح الخلايا ارقام مختلفة صح؟ فصار فينا CD واحد و اتنين و تلاتة و كل CD indication for the mean for certain receptor نجوا ان ال receptor اللى على سطح ال platelet اللى و ال glycoprotein 1B", "tokens": [3615, 8592, 7649, 1975, 3615, 9566, 14407, 2423, 45257, 2423, 7649, 2655, 7435, 8315, 2655, 13672, 7578, 3714, 29245, 23328, 3660, 15844, 8608, 9566, 5016, 33962, 15040, 25528, 1975, 2288, 4587, 10943, 3714, 46456, 46538, 3660, 20328, 5016, 22807, 6156, 9381, 9640, 8978, 8315, 6743, 36764, 24401, 4032, 1975, 2655, 1863, 9957, 4032, 6055, 1211, 9307, 3660, 4032, 28242, 6743, 18877, 337, 264, 914, 337, 1629, 32264, 8717, 7435, 14407, 16472, 2423, 32264, 13672, 7578, 15844, 8608, 9566, 5016, 2423, 3403, 15966, 13672, 7578, 4032, 2423, 22633, 13084, 81, 1370, 259, 502, 33], "avg_logprob": -0.3520611587991106, "compression_ratio": 1.6084905660377358, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 975.27, "end": 975.99, "word": "عشان", "probability": 0.564422607421875}, {"start": 975.99, "end": 976.49, "word": " اعطوا", "probability": 0.5378265380859375}, {"start": 976.49, "end": 976.57, "word": " ال", "probability": 0.84912109375}, {"start": 976.57, "end": 976.91, "word": " CDs", "probability": 0.2413330078125}, {"start": 976.91, "end": 978.21, "word": " الانتجنات", "probability": 0.7745361328125}, {"start": 978.21, "end": 978.39, "word": " اللى", "probability": 0.7939453125}, {"start": 978.39, "end": 978.87, "word": " موجودة", "probability": 0.9664306640625}, {"start": 978.87, "end": 979.07, "word": " على", "probability": 0.8349609375}, {"start": 979.07, "end": 979.45, "word": " سطح", "probability": 0.9793294270833334}, {"start": 979.45, "end": 980.15, "word": " الخلايا", "probability": 0.94580078125}, {"start": 980.15, "end": 981.07, "word": " ارقام", "probability": 0.7664794921875}, {"start": 981.07, "end": 982.61, "word": " مختلفة", "probability": 0.9615478515625}, {"start": 982.61, "end": 983.21, "word": " صح؟", "probability": 0.646240234375}, {"start": 983.21, "end": 984.23, "word": " فصار", "probability": 0.93798828125}, {"start": 984.23, "end": 984.53, "word": " فينا", "probability": 0.5216064453125}, {"start": 984.53, "end": 984.81, "word": " CD", "probability": 0.888671875}, {"start": 984.81, "end": 985.17, "word": " واحد", "probability": 0.747802734375}, {"start": 985.17, "end": 985.31, "word": " و", "probability": 0.92236328125}, {"start": 985.31, "end": 985.55, "word": " اتنين", "probability": 0.80279541015625}, {"start": 985.55, "end": 985.63, "word": " و", "probability": 0.91552734375}, {"start": 985.63, "end": 986.07, "word": " تلاتة", "probability": 0.8740234375}, {"start": 986.07, "end": 986.37, "word": " و", "probability": 0.97021484375}, {"start": 986.37, "end": 986.55, "word": " كل", "probability": 0.5537109375}, {"start": 986.55, "end": 986.95, "word": " CD", "probability": 0.9462890625}, {"start": 986.95, "end": 988.13, "word": " indication", "probability": 0.6953125}, {"start": 988.13, "end": 989.03, "word": " for", "probability": 0.9228515625}, {"start": 989.03, "end": 989.17, "word": " the", "probability": 0.298828125}, {"start": 989.17, "end": 989.43, "word": " mean", "probability": 0.4580078125}, {"start": 989.43, "end": 989.77, "word": " for", "probability": 0.81689453125}, {"start": 989.77, "end": 990.07, "word": " certain", "probability": 0.83349609375}, {"start": 990.07, "end": 990.87, "word": " receptor", "probability": 0.65185546875}, {"start": 990.87, "end": 991.97, "word": " نجوا", "probability": 0.6322428385416666}, {"start": 991.97, "end": 992.13, "word": " ان", "probability": 0.74560546875}, {"start": 992.13, "end": 992.21, "word": " ال", "probability": 0.94189453125}, {"start": 992.21, "end": 992.57, "word": " receptor", "probability": 0.62744140625}, {"start": 992.57, "end": 992.85, "word": " اللى", "probability": 0.98291015625}, {"start": 992.85, "end": 993.27, "word": " على", "probability": 0.72265625}, {"start": 993.27, "end": 993.85, "word": " سطح", "probability": 0.9856770833333334}, {"start": 993.85, "end": 994.45, "word": " ال", "probability": 0.92529296875}, {"start": 994.45, "end": 995.27, "word": " platelet", "probability": 0.6728515625}, {"start": 995.27, "end": 995.55, "word": " اللى", "probability": 0.955810546875}, {"start": 995.55, "end": 995.63, "word": " و", "probability": 0.1707763671875}, {"start": 995.63, "end": 995.67, "word": " ال", "probability": 0.5966796875}, {"start": 995.67, "end": 996.45, "word": " glycoprotein", "probability": 0.8533203125}, {"start": 996.45, "end": 996.91, "word": " 1B", "probability": 0.639892578125}], "temperature": 1.0}, {"id": 37, "seek": 102586, "start": 997.73, "end": 1025.87, "text": "هو رقمه CD42 CDR42 وبما أنه هتيرو دياما موجود على شكل 1B V 1X يبقى فيه عندنا تلاتة CD صح؟ عشان ما يبعدوش ويده قالوا الـCD42 هدل من الـDA وB وC فقلّوا Monoclonal Antibody للتلاتة hetيرو دياما", "tokens": [3224, 2407, 12602, 4587, 2304, 3224, 6743, 15628, 6743, 49, 15628, 4032, 3555, 15042, 14739, 3224, 8032, 2655, 13546, 2407, 11778, 1829, 10943, 995, 3714, 29245, 23328, 15844, 13412, 28820, 502, 33, 691, 502, 55, 7251, 3555, 4587, 7578, 8978, 3224, 43242, 8315, 6055, 1211, 9307, 3660, 6743, 20328, 5016, 22807, 6225, 8592, 7649, 19446, 7251, 3555, 22488, 2407, 8592, 4032, 25708, 3224, 50239, 14407, 2423, 39184, 16508, 15628, 8032, 3215, 1211, 9154, 2423, 39184, 7509, 4032, 33, 4032, 34, 6156, 4587, 1211, 11703, 14407, 4713, 905, 14864, 304, 5130, 897, 843, 24976, 2655, 1211, 9307, 3660, 3639, 13546, 2407, 11778, 1829, 10943, 995], "avg_logprob": -0.3303571337745303, "compression_ratio": 1.430622009569378, "no_speech_prob": 0.0, "words": [{"start": 997.73, "end": 998.43, "word": "هو", "probability": 0.6900634765625}, {"start": 998.43, "end": 999.49, "word": " رقمه", "probability": 0.916748046875}, {"start": 999.49, "end": 1000.57, "word": " CD42", "probability": 0.5888671875}, {"start": 1000.57, "end": 1002.13, "word": " CDR42", "probability": 0.446044921875}, {"start": 1002.13, "end": 1003.13, "word": " وبما", "probability": 0.6605631510416666}, {"start": 1003.13, "end": 1003.37, "word": " أنه", "probability": 0.6834716796875}, {"start": 1003.37, "end": 1003.71, "word": " هتيرو", "probability": 0.71990966796875}, {"start": 1003.71, "end": 1004.21, "word": " دياما", "probability": 0.6834716796875}, {"start": 1004.21, "end": 1005.87, "word": " موجود", "probability": 0.9602864583333334}, {"start": 1005.87, "end": 1005.99, "word": " على", "probability": 0.9482421875}, {"start": 1005.99, "end": 1006.33, "word": " شكل", "probability": 0.9091796875}, {"start": 1006.33, "end": 1006.85, "word": " 1B", "probability": 0.68359375}, {"start": 1006.85, "end": 1008.67, "word": " V", "probability": 0.634765625}, {"start": 1008.67, "end": 1009.69, "word": " 1X", "probability": 0.634765625}, {"start": 1009.69, "end": 1010.45, "word": " يبقى", "probability": 0.837158203125}, {"start": 1010.45, "end": 1010.61, "word": " فيه", "probability": 0.82666015625}, {"start": 1010.61, "end": 1010.81, "word": " عندنا", "probability": 0.581298828125}, {"start": 1010.81, "end": 1011.13, "word": " تلاتة", "probability": 0.822998046875}, {"start": 1011.13, "end": 1011.47, "word": " CD", "probability": 0.9208984375}, {"start": 1011.47, "end": 1013.55, "word": " صح؟", "probability": 0.8250325520833334}, {"start": 1013.55, "end": 1014.33, "word": " عشان", "probability": 0.9651692708333334}, {"start": 1014.33, "end": 1014.49, "word": " ما", "probability": 0.88671875}, {"start": 1014.49, "end": 1015.13, "word": " يبعدوش", "probability": 0.93876953125}, {"start": 1015.13, "end": 1015.61, "word": " ويده", "probability": 0.7294921875}, {"start": 1015.61, "end": 1016.33, "word": " قالوا", "probability": 0.72900390625}, {"start": 1016.33, "end": 1017.27, "word": " الـCD42", "probability": 0.56842041015625}, {"start": 1017.27, "end": 1017.61, "word": " هدل", "probability": 0.58349609375}, {"start": 1017.61, "end": 1017.73, "word": " من", "probability": 0.37255859375}, {"start": 1017.73, "end": 1018.37, "word": " الـDA", "probability": 0.7586263020833334}, {"start": 1018.37, "end": 1018.79, "word": " وB", "probability": 0.818115234375}, {"start": 1018.79, "end": 1019.21, "word": " وC", "probability": 0.98583984375}, {"start": 1019.21, "end": 1021.81, "word": " فقلّوا", "probability": 0.6619140625}, {"start": 1021.81, "end": 1022.49, "word": " Monoclonal", "probability": 0.632232666015625}, {"start": 1022.49, "end": 1023.07, "word": " Antibody", "probability": 0.8863932291666666}, {"start": 1023.07, "end": 1024.75, "word": " للتلاتة", "probability": 0.9115234375}, {"start": 1024.75, "end": 1025.53, "word": " hetيرو", "probability": 0.6682535807291666}, {"start": 1025.53, "end": 1025.87, "word": " دياما", "probability": 0.7430419921875}], "temperature": 1.0}, {"id": 38, "seek": 105440, "start": 1027.92, "end": 1054.4, "text": "ماشي، شو بيصير شباب؟ مدخل الـplatinum على monoclonal antibody إذا ال receptor موجود، نمسح فيه وإذا مش موجود، لكل مسكة، tag تدلنا سجن، يعني بعيد ال concentration ومبعدد، فاهمين عليا؟", "tokens": [2304, 33599, 1829, 12399, 13412, 2407, 4724, 1829, 9381, 13546, 13412, 3555, 16758, 22807, 3714, 3215, 9778, 1211, 2423, 39184, 564, 22685, 449, 15844, 1108, 905, 14864, 304, 34507, 11933, 15730, 2423, 32264, 3714, 29245, 23328, 12399, 8717, 2304, 3794, 5016, 8978, 3224, 4032, 28814, 15730, 37893, 3714, 29245, 23328, 12399, 5296, 28820, 47524, 4117, 3660, 12399, 6162, 6055, 3215, 1211, 8315, 8608, 7435, 1863, 12399, 37495, 22653, 45030, 25708, 2423, 9856, 4032, 2304, 3555, 22488, 3215, 12399, 6156, 995, 16095, 9957, 11203, 25528, 22807], "avg_logprob": -0.40406975808531737, "compression_ratio": 1.4292929292929293, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1027.92, "end": 1028.98, "word": "ماشي،", "probability": 0.5908203125}, {"start": 1028.98, "end": 1029.16, "word": " شو", "probability": 0.6090087890625}, {"start": 1029.16, "end": 1029.52, "word": " بيصير", "probability": 0.6732177734375}, {"start": 1029.52, "end": 1030.22, "word": " شباب؟", "probability": 0.7340087890625}, {"start": 1030.22, "end": 1030.8, "word": " مدخل", "probability": 0.734375}, {"start": 1030.8, "end": 1032.24, "word": " الـplatinum", "probability": 0.42318115234375}, {"start": 1032.24, "end": 1035.68, "word": " على", "probability": 0.8232421875}, {"start": 1035.68, "end": 1036.34, "word": " monoclonal", "probability": 0.730987548828125}, {"start": 1036.34, "end": 1036.86, "word": " antibody", "probability": 0.86376953125}, {"start": 1036.86, "end": 1038.78, "word": " إذا", "probability": 0.880859375}, {"start": 1038.78, "end": 1040.12, "word": " ال", "probability": 0.98046875}, {"start": 1040.12, "end": 1040.62, "word": " receptor", "probability": 0.428466796875}, {"start": 1040.62, "end": 1041.38, "word": " موجود،", "probability": 0.84912109375}, {"start": 1041.38, "end": 1041.76, "word": " نمسح", "probability": 0.741943359375}, {"start": 1041.76, "end": 1042.18, "word": " فيه", "probability": 0.96240234375}, {"start": 1042.18, "end": 1043.3, "word": " وإذا", "probability": 0.8175455729166666}, {"start": 1043.3, "end": 1043.46, "word": " مش", "probability": 0.9541015625}, {"start": 1043.46, "end": 1044.9, "word": " موجود،", "probability": 0.865478515625}, {"start": 1044.9, "end": 1045.7, "word": " لكل", "probability": 0.7822265625}, {"start": 1045.7, "end": 1046.68, "word": " مسكة،", "probability": 0.7462158203125}, {"start": 1046.68, "end": 1046.98, "word": " tag", "probability": 0.73291015625}, {"start": 1046.98, "end": 1049.52, "word": " تدلنا", "probability": 0.44189453125}, {"start": 1049.52, "end": 1050.84, "word": " سجن،", "probability": 0.8192138671875}, {"start": 1050.84, "end": 1051.06, "word": " يعني", "probability": 0.970947265625}, {"start": 1051.06, "end": 1051.32, "word": " بعيد", "probability": 0.623291015625}, {"start": 1051.32, "end": 1051.42, "word": " ال", "probability": 0.65966796875}, {"start": 1051.42, "end": 1051.88, "word": " concentration", "probability": 0.8466796875}, {"start": 1051.88, "end": 1053.38, "word": " ومبعدد،", "probability": 0.6050211588541666}, {"start": 1053.38, "end": 1053.78, "word": " فاهمين", "probability": 0.9512939453125}, {"start": 1053.78, "end": 1054.4, "word": " عليا؟", "probability": 0.7989908854166666}], "temperature": 1.0}, {"id": 39, "seek": 108099, "start": 1054.65, "end": 1080.99, "text": "يبقى هذه طريقة وطريقة sensitive ومحترمة جدا بنقيش من خلالها presence or absence of this receptor even in the heterodimer form تبعه، ماشي؟ يبقى كام طريقة أنا قصد فيها؟ أربع طرق، bleeding time، ثم morphology، ثم aggregation study، ثم flow cytometry", "tokens": [1829, 3555, 4587, 7578, 29538, 23032, 16572, 28671, 4032, 9566, 16572, 28671, 9477, 4032, 2304, 33753, 2288, 46007, 10874, 28259, 44945, 38436, 8592, 9154, 16490, 1211, 6027, 11296, 6814, 420, 17145, 295, 341, 32264, 754, 294, 264, 20789, 378, 9713, 1254, 6055, 3555, 3615, 3224, 12399, 3714, 33599, 1829, 22807, 7251, 3555, 4587, 7578, 9122, 10943, 23032, 16572, 28671, 41850, 12174, 9381, 3215, 8978, 11296, 22807, 5551, 25513, 3615, 23032, 2288, 4587, 12399, 19312, 565, 12399, 38637, 2304, 25778, 1793, 12399, 38637, 2304, 16743, 399, 2979, 12399, 38637, 2304, 3095, 40248, 34730], "avg_logprob": -0.13860886648137083, "compression_ratio": 1.4525862068965518, "no_speech_prob": 0.0, "words": [{"start": 1054.65, "end": 1055.19, "word": "يبقى", "probability": 0.9459228515625}, {"start": 1055.19, "end": 1055.59, "word": " هذه", "probability": 0.9306640625}, {"start": 1055.59, "end": 1056.11, "word": " طريقة", "probability": 0.9747721354166666}, {"start": 1056.11, "end": 1056.53, "word": " وطريقة", "probability": 0.8760986328125}, {"start": 1056.53, "end": 1057.09, "word": " sensitive", "probability": 0.91162109375}, {"start": 1057.09, "end": 1057.95, "word": " ومحترمة", "probability": 0.973828125}, {"start": 1057.95, "end": 1058.47, "word": " جدا", "probability": 0.992919921875}, {"start": 1058.47, "end": 1059.53, "word": " بنقيش", "probability": 0.6748046875}, {"start": 1059.53, "end": 1059.67, "word": " من", "probability": 0.99658203125}, {"start": 1059.67, "end": 1060.29, "word": " خلالها", "probability": 0.9918212890625}, {"start": 1060.29, "end": 1061.05, "word": " presence", "probability": 0.78173828125}, {"start": 1061.05, "end": 1061.35, "word": " or", "probability": 0.73046875}, {"start": 1061.35, "end": 1061.79, "word": " absence", "probability": 0.97412109375}, {"start": 1061.79, "end": 1062.29, "word": " of", "probability": 0.96435546875}, {"start": 1062.29, "end": 1063.57, "word": " this", "probability": 0.908203125}, {"start": 1063.57, "end": 1064.13, "word": " receptor", "probability": 0.91455078125}, {"start": 1064.13, "end": 1066.27, "word": " even", "probability": 0.84423828125}, {"start": 1066.27, "end": 1067.01, "word": " in", "probability": 0.9599609375}, {"start": 1067.01, "end": 1067.13, "word": " the", "probability": 0.87841796875}, {"start": 1067.13, "end": 1067.73, "word": " heterodimer", "probability": 0.8421223958333334}, {"start": 1067.73, "end": 1068.33, "word": " form", "probability": 0.9482421875}, {"start": 1068.33, "end": 1070.27, "word": " تبعه،", "probability": 0.694873046875}, {"start": 1070.27, "end": 1072.79, "word": " ماشي؟", "probability": 0.880859375}, {"start": 1072.79, "end": 1073.21, "word": " يبقى", "probability": 0.9752197265625}, {"start": 1073.21, "end": 1073.39, "word": " كام", "probability": 0.740234375}, {"start": 1073.39, "end": 1073.75, "word": " طريقة", "probability": 0.9895833333333334}, {"start": 1073.75, "end": 1073.97, "word": " أنا", "probability": 0.763671875}, {"start": 1073.97, "end": 1074.27, "word": " قصد", "probability": 0.8850911458333334}, {"start": 1074.27, "end": 1074.75, "word": " فيها؟", "probability": 0.8614908854166666}, {"start": 1074.75, "end": 1075.05, "word": " أربع", "probability": 0.8147786458333334}, {"start": 1075.05, "end": 1075.47, "word": " طرق،", "probability": 0.818115234375}, {"start": 1075.47, "end": 1075.65, "word": " bleeding", "probability": 0.830078125}, {"start": 1075.65, "end": 1076.37, "word": " time،", "probability": 0.73046875}, {"start": 1076.37, "end": 1076.53, "word": " ثم", "probability": 0.90673828125}, {"start": 1076.53, "end": 1078.03, "word": " morphology،", "probability": 0.9153645833333334}, {"start": 1078.03, "end": 1078.29, "word": " ثم", "probability": 0.91796875}, {"start": 1078.29, "end": 1078.97, "word": " aggregation", "probability": 0.93115234375}, {"start": 1078.97, "end": 1079.93, "word": " study،", "probability": 0.893310546875}, {"start": 1079.93, "end": 1080.11, "word": " ثم", "probability": 0.986083984375}, {"start": 1080.11, "end": 1080.41, "word": " flow", "probability": 0.63427734375}, {"start": 1080.41, "end": 1080.99, "word": " cytometry", "probability": 0.959228515625}], "temperature": 1.0}, {"id": 40, "seek": 111026, "start": 1081.6, "end": 1110.26, "text": "طبعا this is a photo meter شايفينها يا شباب؟ جهاز عادي زي اللي بتقيسه عليه لكن هي زي ما انتوا شايفين مزود بيهاش ب chart ماشي chart paper اللي بنتوا تسمع عليها هيبقى عن ايهاش رسم بياني عادي برقة رسم بياني بيطلع عليها متار وهي ال procedure يا شباب هي long movement وهي ال plate المعلقة", "tokens": [9566, 3555, 3615, 995, 341, 307, 257, 5052, 9255, 13412, 995, 33911, 9957, 11296, 35186, 13412, 3555, 16758, 22807, 10874, 3224, 31377, 6225, 995, 16254, 30767, 1829, 13672, 1829, 39894, 38436, 3794, 3224, 47356, 44381, 39896, 30767, 1829, 19446, 16472, 2655, 14407, 13412, 995, 33911, 9957, 3714, 11622, 23328, 4724, 1829, 3224, 33599, 4724, 6927, 3714, 33599, 1829, 6927, 3035, 13672, 1829, 4724, 29399, 14407, 6055, 38251, 3615, 25894, 11296, 39896, 3555, 4587, 7578, 18871, 1975, 1829, 3224, 33599, 12602, 38251, 4724, 1829, 7649, 1829, 6225, 995, 16254, 4724, 2288, 28671, 12602, 38251, 4724, 1829, 7649, 1829, 4724, 1829, 9566, 1211, 3615, 25894, 11296, 44650, 9640, 37037, 1829, 2423, 10747, 35186, 13412, 3555, 16758, 39896, 938, 3963, 37037, 1829, 2423, 5924, 9673, 30241, 28671], "avg_logprob": -0.32325000953674315, "compression_ratio": 1.8565573770491803, "no_speech_prob": 0.0, "words": [{"start": 1081.6, "end": 1081.94, "word": "طبعا", "probability": 0.899658203125}, {"start": 1081.94, "end": 1082.12, "word": " this", "probability": 0.4736328125}, {"start": 1082.12, "end": 1082.3, "word": " is", "probability": 0.923828125}, {"start": 1082.3, "end": 1082.56, "word": " a", "probability": 0.521484375}, {"start": 1082.56, "end": 1083.18, "word": " photo", "probability": 0.3544921875}, {"start": 1083.18, "end": 1083.8, "word": " meter", "probability": 0.8525390625}, {"start": 1083.8, "end": 1084.66, "word": " شايفينها", "probability": 0.88525390625}, {"start": 1084.66, "end": 1084.76, "word": " يا", "probability": 0.45068359375}, {"start": 1084.76, "end": 1085.12, "word": " شباب؟", "probability": 0.8748779296875}, {"start": 1085.12, "end": 1085.4, "word": " جهاز", "probability": 0.8370768229166666}, {"start": 1085.4, "end": 1085.74, "word": " عادي", "probability": 0.9376627604166666}, {"start": 1085.74, "end": 1085.96, "word": " زي", "probability": 0.72509765625}, {"start": 1085.96, "end": 1086.36, "word": " اللي", "probability": 0.76708984375}, {"start": 1086.36, "end": 1086.76, "word": " بتقيسه", "probability": 0.64111328125}, {"start": 1086.76, "end": 1086.96, "word": " عليه", "probability": 0.587890625}, {"start": 1086.96, "end": 1088.3, "word": " لكن", "probability": 0.52783203125}, {"start": 1088.3, "end": 1088.66, "word": " هي", "probability": 0.712890625}, {"start": 1088.66, "end": 1088.9, "word": " زي", "probability": 0.951904296875}, {"start": 1088.9, "end": 1089.0, "word": " ما", "probability": 0.9697265625}, {"start": 1089.0, "end": 1089.16, "word": " انتوا", "probability": 0.8138020833333334}, {"start": 1089.16, "end": 1089.5, "word": " شايفين", "probability": 0.9903564453125}, {"start": 1089.5, "end": 1089.94, "word": " مزود", "probability": 0.93896484375}, {"start": 1089.94, "end": 1090.5, "word": " بيهاش", "probability": 0.65130615234375}, {"start": 1090.5, "end": 1091.66, "word": " ب", "probability": 0.83203125}, {"start": 1091.66, "end": 1093.32, "word": " chart", "probability": 0.484375}, {"start": 1093.32, "end": 1094.66, "word": " ماشي", "probability": 0.7110188802083334}, {"start": 1094.66, "end": 1095.88, "word": " chart", "probability": 0.442138671875}, {"start": 1095.88, "end": 1096.34, "word": " paper", "probability": 0.8916015625}, {"start": 1096.34, "end": 1097.18, "word": " اللي", "probability": 0.90283203125}, {"start": 1097.18, "end": 1097.46, "word": " بنتوا", "probability": 0.6617024739583334}, {"start": 1097.46, "end": 1097.64, "word": " تسمع", "probability": 0.9586588541666666}, {"start": 1097.64, "end": 1097.86, "word": " عليها", "probability": 0.5787353515625}, {"start": 1097.86, "end": 1098.08, "word": " هيبقى", "probability": 0.600921630859375}, {"start": 1098.08, "end": 1098.26, "word": " عن", "probability": 0.77294921875}, {"start": 1098.26, "end": 1098.76, "word": " ايهاش", "probability": 0.66876220703125}, {"start": 1098.76, "end": 1100.28, "word": " رسم", "probability": 0.800048828125}, {"start": 1100.28, "end": 1100.78, "word": " بياني", "probability": 0.9586181640625}, {"start": 1100.78, "end": 1101.12, "word": " عادي", "probability": 0.9583333333333334}, {"start": 1101.12, "end": 1102.0, "word": " برقة", "probability": 0.744140625}, {"start": 1102.0, "end": 1102.32, "word": " رسم", "probability": 0.98486328125}, {"start": 1102.32, "end": 1102.8, "word": " بياني", "probability": 0.9901123046875}, {"start": 1102.8, "end": 1103.88, "word": " بيطلع", "probability": 0.6224365234375}, {"start": 1103.88, "end": 1104.08, "word": " عليها", "probability": 0.882080078125}, {"start": 1104.08, "end": 1104.52, "word": " متار", "probability": 0.344482421875}, {"start": 1104.52, "end": 1105.94, "word": " وهي", "probability": 0.8212890625}, {"start": 1105.94, "end": 1106.06, "word": " ال", "probability": 0.97900390625}, {"start": 1106.06, "end": 1106.52, "word": " procedure", "probability": 0.6806640625}, {"start": 1106.52, "end": 1106.82, "word": " يا", "probability": 0.3955078125}, {"start": 1106.82, "end": 1106.98, "word": " شباب", "probability": 0.9825846354166666}, {"start": 1106.98, "end": 1107.22, "word": " هي", "probability": 0.404296875}, {"start": 1107.22, "end": 1107.42, "word": " long", "probability": 0.52392578125}, {"start": 1107.42, "end": 1107.82, "word": " movement", "probability": 0.35498046875}, {"start": 1107.82, "end": 1109.26, "word": " وهي", "probability": 0.8369140625}, {"start": 1109.26, "end": 1109.38, "word": " ال", "probability": 0.814453125}, {"start": 1109.38, "end": 1109.56, "word": " plate", "probability": 0.4375}, {"start": 1109.56, "end": 1110.26, "word": " المعلقة", "probability": 0.7529296875}], "temperature": 1.0}, {"id": 41, "seek": 113567, "start": 1110.73, "end": 1135.67, "text": "و هاي الضوء اللى بدنا نقيسه فى البداية بيكون عياش قليل بعد ما يصير فيه cycling down بيصير فيه عياش كتير و بدناش نسهلة ال steric part او ال magnetic part بنحط فى الأنبوبة عارفين ليه؟ بالظبط بيعمل حركة لأن ضرورى ال planet تكون بحركة مستمرة", "tokens": [2407, 8032, 47302, 6024, 114, 2407, 38207, 13672, 7578, 47525, 8315, 8717, 38436, 3794, 3224, 6156, 7578, 29739, 28259, 10632, 4724, 1829, 30544, 6225, 1829, 33599, 12174, 20292, 1211, 39182, 19446, 7251, 9381, 13546, 8978, 3224, 22425, 760, 4724, 1829, 9381, 13546, 8978, 3224, 6225, 1829, 33599, 9122, 2655, 13546, 4032, 47525, 8315, 8592, 8717, 3794, 3224, 37977, 2423, 18924, 299, 644, 1975, 2407, 2423, 12688, 644, 44945, 5016, 9566, 6156, 7578, 16247, 1863, 3555, 37746, 3660, 6225, 9640, 5172, 9957, 32239, 3224, 22807, 20666, 19913, 3555, 9566, 4724, 1829, 25957, 1211, 11331, 31747, 3660, 5296, 33456, 48812, 2288, 13063, 7578, 2423, 5054, 6055, 30544, 4724, 5016, 31747, 3660, 3714, 14851, 2304, 25720], "avg_logprob": -0.24163924719680818, "compression_ratio": 1.6738197424892705, "no_speech_prob": 0.0, "words": [{"start": 1110.73, "end": 1110.97, "word": "و", "probability": 0.85498046875}, {"start": 1110.97, "end": 1111.41, "word": " هاي", "probability": 0.60009765625}, {"start": 1111.41, "end": 1111.81, "word": " الضوء", "probability": 0.767913818359375}, {"start": 1111.81, "end": 1111.95, "word": " اللى", "probability": 0.9072265625}, {"start": 1111.95, "end": 1112.17, "word": " بدنا", "probability": 0.92822265625}, {"start": 1112.17, "end": 1112.77, "word": " نقيسه", "probability": 0.880859375}, {"start": 1112.77, "end": 1113.39, "word": " فى", "probability": 0.6771240234375}, {"start": 1113.39, "end": 1113.83, "word": " البداية", "probability": 0.9775390625}, {"start": 1113.83, "end": 1114.19, "word": " بيكون", "probability": 0.9422200520833334}, {"start": 1114.19, "end": 1114.65, "word": " عياش", "probability": 0.78564453125}, {"start": 1114.65, "end": 1116.35, "word": " قليل", "probability": 0.9684244791666666}, {"start": 1116.35, "end": 1117.09, "word": " بعد", "probability": 0.767578125}, {"start": 1117.09, "end": 1117.25, "word": " ما", "probability": 0.9697265625}, {"start": 1117.25, "end": 1117.53, "word": " يصير", "probability": 0.7880859375}, {"start": 1117.53, "end": 1117.73, "word": " فيه", "probability": 0.556884765625}, {"start": 1117.73, "end": 1117.99, "word": " cycling", "probability": 0.1473388671875}, {"start": 1117.99, "end": 1118.49, "word": " down", "probability": 0.81884765625}, {"start": 1118.49, "end": 1118.89, "word": " بيصير", "probability": 0.904052734375}, {"start": 1118.89, "end": 1119.17, "word": " فيه", "probability": 0.7646484375}, {"start": 1119.17, "end": 1119.49, "word": " عياش", "probability": 0.85546875}, {"start": 1119.49, "end": 1120.17, "word": " كتير", "probability": 0.7073567708333334}, {"start": 1120.17, "end": 1121.63, "word": " و", "probability": 0.88623046875}, {"start": 1121.63, "end": 1122.15, "word": " بدناش", "probability": 0.6226399739583334}, {"start": 1122.15, "end": 1122.93, "word": " نسهلة", "probability": 0.703155517578125}, {"start": 1122.93, "end": 1123.65, "word": " ال", "probability": 0.7958984375}, {"start": 1123.65, "end": 1124.49, "word": " steric", "probability": 0.5762939453125}, {"start": 1124.49, "end": 1125.31, "word": " part", "probability": 0.5078125}, {"start": 1125.31, "end": 1125.79, "word": " او", "probability": 0.800537109375}, {"start": 1125.79, "end": 1126.05, "word": " ال", "probability": 0.8251953125}, {"start": 1126.05, "end": 1126.53, "word": " magnetic", "probability": 0.892578125}, {"start": 1126.53, "end": 1127.11, "word": " part", "probability": 0.970703125}, {"start": 1127.11, "end": 1128.43, "word": " بنحط", "probability": 0.8806966145833334}, {"start": 1128.43, "end": 1128.55, "word": " فى", "probability": 0.937255859375}, {"start": 1128.55, "end": 1129.55, "word": " الأنبوبة", "probability": 0.88017578125}, {"start": 1129.55, "end": 1130.55, "word": " عارفين", "probability": 0.9788818359375}, {"start": 1130.55, "end": 1131.07, "word": " ليه؟", "probability": 0.8780924479166666}, {"start": 1131.07, "end": 1132.15, "word": " بالظبط", "probability": 0.932373046875}, {"start": 1132.15, "end": 1132.55, "word": " بيعمل", "probability": 0.8865966796875}, {"start": 1132.55, "end": 1132.99, "word": " حركة", "probability": 0.9830729166666666}, {"start": 1132.99, "end": 1133.25, "word": " لأن", "probability": 0.6025390625}, {"start": 1133.25, "end": 1133.63, "word": " ضرورى", "probability": 0.8499755859375}, {"start": 1133.63, "end": 1133.75, "word": " ال", "probability": 0.88037109375}, {"start": 1133.75, "end": 1133.97, "word": " planet", "probability": 0.474609375}, {"start": 1133.97, "end": 1134.25, "word": " تكون", "probability": 0.97705078125}, {"start": 1134.25, "end": 1134.87, "word": " بحركة", "probability": 0.9786376953125}, {"start": 1134.87, "end": 1135.67, "word": " مستمرة", "probability": 0.9359130859375}], "temperature": 1.0}, {"id": 42, "seek": 116161, "start": 1136.16, "end": 1161.62, "text": "وإلا لو مافيش حركة وصلها settling ده أن هذا sort of activity صح؟ فعلا بنقيس الطريقة بشكل هذا وهي ال response يا شباب هاي شايف؟ شايفين؟ في عندنا خطين في عندنا أسود وهو ال control و الأحمر هو ال patient ماشي؟ الأسود هو ال control و الأحمر هو إيش؟", "tokens": [2407, 28814, 15040, 45164, 19446, 41185, 8592, 11331, 31747, 3660, 4032, 36520, 11296, 33841, 11778, 3224, 14739, 23758, 1333, 295, 5191, 20328, 5016, 22807, 6156, 3615, 15040, 44945, 38436, 3794, 41950, 16572, 28671, 4724, 8592, 28820, 23758, 37037, 1829, 2423, 4134, 35186, 13412, 3555, 16758, 8032, 47302, 13412, 995, 33911, 22807, 13412, 995, 33911, 9957, 22807, 8978, 43242, 8315, 16490, 9566, 9957, 8978, 43242, 8315, 5551, 3794, 23328, 37037, 2407, 2423, 1969, 4032, 16247, 5016, 29973, 31439, 2423, 4537, 3714, 33599, 1829, 22807, 16247, 3794, 23328, 31439, 2423, 1969, 4032, 16247, 5016, 29973, 31439, 11933, 1829, 8592, 22807], "avg_logprob": -0.23974116161616163, "compression_ratio": 1.706140350877193, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1136.16, "end": 1136.66, "word": "وإلا", "probability": 0.6993001302083334}, {"start": 1136.66, "end": 1136.92, "word": " لو", "probability": 0.93505859375}, {"start": 1136.92, "end": 1137.36, "word": " مافيش", "probability": 0.9189453125}, {"start": 1137.36, "end": 1137.9, "word": " حركة", "probability": 0.9899088541666666}, {"start": 1137.9, "end": 1138.66, "word": " وصلها", "probability": 0.6686197916666666}, {"start": 1138.66, "end": 1138.98, "word": " settling", "probability": 0.286865234375}, {"start": 1138.98, "end": 1139.36, "word": " ده", "probability": 0.7177734375}, {"start": 1139.36, "end": 1139.48, "word": " أن", "probability": 0.2890625}, {"start": 1139.48, "end": 1139.68, "word": " هذا", "probability": 0.4140625}, {"start": 1139.68, "end": 1140.04, "word": " sort", "probability": 0.91943359375}, {"start": 1140.04, "end": 1140.18, "word": " of", "probability": 0.98095703125}, {"start": 1140.18, "end": 1140.76, "word": " activity", "probability": 0.9609375}, {"start": 1140.76, "end": 1142.62, "word": " صح؟", "probability": 0.7099609375}, {"start": 1142.62, "end": 1143.7, "word": " فعلا", "probability": 0.8675130208333334}, {"start": 1143.7, "end": 1144.26, "word": " بنقيس", "probability": 0.738037109375}, {"start": 1144.26, "end": 1145.42, "word": " الطريقة", "probability": 0.9755859375}, {"start": 1145.42, "end": 1145.8, "word": " بشكل", "probability": 0.9597981770833334}, {"start": 1145.8, "end": 1146.04, "word": " هذا", "probability": 0.17919921875}, {"start": 1146.04, "end": 1146.38, "word": " وهي", "probability": 0.641845703125}, {"start": 1146.38, "end": 1146.46, "word": " ال", "probability": 0.60546875}, {"start": 1146.46, "end": 1146.82, "word": " response", "probability": 0.734375}, {"start": 1146.82, "end": 1146.98, "word": " يا", "probability": 0.66748046875}, {"start": 1146.98, "end": 1147.28, "word": " شباب", "probability": 0.99072265625}, {"start": 1147.28, "end": 1147.94, "word": " هاي", "probability": 0.404052734375}, {"start": 1147.94, "end": 1148.64, "word": " شايف؟", "probability": 0.8487548828125}, {"start": 1148.64, "end": 1149.68, "word": " شايفين؟", "probability": 0.9845703125}, {"start": 1149.68, "end": 1149.82, "word": " في", "probability": 0.8603515625}, {"start": 1149.82, "end": 1150.04, "word": " عندنا", "probability": 0.79833984375}, {"start": 1150.04, "end": 1150.62, "word": " خطين", "probability": 0.7913411458333334}, {"start": 1150.62, "end": 1151.52, "word": " في", "probability": 0.5615234375}, {"start": 1151.52, "end": 1151.96, "word": " عندنا", "probability": 0.972412109375}, {"start": 1151.96, "end": 1152.64, "word": " أسود", "probability": 0.9446614583333334}, {"start": 1152.64, "end": 1152.98, "word": " وهو", "probability": 0.799072265625}, {"start": 1152.98, "end": 1153.12, "word": " ال", "probability": 0.91552734375}, {"start": 1153.12, "end": 1153.66, "word": " control", "probability": 0.84765625}, {"start": 1153.66, "end": 1154.5, "word": " و", "probability": 0.499755859375}, {"start": 1154.5, "end": 1154.94, "word": " الأحمر", "probability": 0.9427083333333334}, {"start": 1154.94, "end": 1155.16, "word": " هو", "probability": 0.9873046875}, {"start": 1155.16, "end": 1155.32, "word": " ال", "probability": 0.92431640625}, {"start": 1155.32, "end": 1155.78, "word": " patient", "probability": 0.96630859375}, {"start": 1155.78, "end": 1157.9, "word": " ماشي؟", "probability": 0.8797607421875}, {"start": 1157.9, "end": 1158.82, "word": " الأسود", "probability": 0.9856770833333334}, {"start": 1158.82, "end": 1159.04, "word": " هو", "probability": 0.98486328125}, {"start": 1159.04, "end": 1159.2, "word": " ال", "probability": 0.892578125}, {"start": 1159.2, "end": 1159.74, "word": " control", "probability": 0.90771484375}, {"start": 1159.74, "end": 1160.44, "word": " و", "probability": 0.5908203125}, {"start": 1160.44, "end": 1160.92, "word": " الأحمر", "probability": 0.9794921875}, {"start": 1160.92, "end": 1161.16, "word": " هو", "probability": 0.99169921875}, {"start": 1161.16, "end": 1161.62, "word": " إيش؟", "probability": 0.8370361328125}], "temperature": 1.0}, {"id": 43, "seek": 119095, "start": 1164.59, "end": 1190.95, "text": "و إذا إنتوا ملاحظين ال response بصير على شكل two wave مش واحدة two wave ماشي طبعا ممكن يكون على one wave او two wave او even three wave ف ال two wave هي indication ل primary و secondary aggregation", "tokens": [2407, 11933, 15730, 11933, 29399, 14407, 3714, 15040, 5016, 19913, 9957, 2423, 4134, 4724, 9381, 13546, 15844, 13412, 28820, 732, 5772, 37893, 36764, 24401, 3660, 732, 5772, 3714, 33599, 1829, 23032, 3555, 3615, 995, 3714, 43020, 7251, 30544, 15844, 472, 5772, 1975, 2407, 732, 5772, 1975, 2407, 754, 1045, 5772, 6156, 2423, 732, 5772, 39896, 18877, 5296, 6194, 4032, 11396, 16743, 399], "avg_logprob": -0.17274304845976451, "compression_ratio": 1.4802259887005649, "no_speech_prob": 0.0, "words": [{"start": 1164.59, "end": 1164.81, "word": "و", "probability": 0.87353515625}, {"start": 1164.81, "end": 1165.21, "word": " إذا", "probability": 0.6771240234375}, {"start": 1165.21, "end": 1166.41, "word": " إنتوا", "probability": 0.76220703125}, {"start": 1166.41, "end": 1166.97, "word": " ملاحظين", "probability": 0.894140625}, {"start": 1166.97, "end": 1167.83, "word": " ال", "probability": 0.87646484375}, {"start": 1167.83, "end": 1168.31, "word": " response", "probability": 0.87060546875}, {"start": 1168.31, "end": 1169.25, "word": " بصير", "probability": 0.8489583333333334}, {"start": 1169.25, "end": 1170.41, "word": " على", "probability": 0.87158203125}, {"start": 1170.41, "end": 1171.19, "word": " شكل", "probability": 0.98681640625}, {"start": 1171.19, "end": 1171.81, "word": " two", "probability": 0.88671875}, {"start": 1171.81, "end": 1172.29, "word": " wave", "probability": 0.7626953125}, {"start": 1172.29, "end": 1173.09, "word": " مش", "probability": 0.8896484375}, {"start": 1173.09, "end": 1173.57, "word": " واحدة", "probability": 0.9308268229166666}, {"start": 1173.57, "end": 1173.93, "word": " two", "probability": 0.35009765625}, {"start": 1173.93, "end": 1174.33, "word": " wave", "probability": 0.94091796875}, {"start": 1174.33, "end": 1176.21, "word": " ماشي", "probability": 0.7679850260416666}, {"start": 1176.21, "end": 1177.19, "word": " طبعا", "probability": 0.9100341796875}, {"start": 1177.19, "end": 1177.49, "word": " ممكن", "probability": 0.9853515625}, {"start": 1177.49, "end": 1177.75, "word": " يكون", "probability": 0.975341796875}, {"start": 1177.75, "end": 1177.93, "word": " على", "probability": 0.9091796875}, {"start": 1177.93, "end": 1178.19, "word": " one", "probability": 0.94189453125}, {"start": 1178.19, "end": 1178.63, "word": " wave", "probability": 0.96875}, {"start": 1178.63, "end": 1179.41, "word": " او", "probability": 0.72119140625}, {"start": 1179.41, "end": 1179.71, "word": " two", "probability": 0.92822265625}, {"start": 1179.71, "end": 1180.07, "word": " wave", "probability": 0.96875}, {"start": 1180.07, "end": 1180.35, "word": " او", "probability": 0.820068359375}, {"start": 1180.35, "end": 1180.61, "word": " even", "probability": 0.873046875}, {"start": 1180.61, "end": 1180.95, "word": " three", "probability": 0.87939453125}, {"start": 1180.95, "end": 1181.39, "word": " wave", "probability": 0.93798828125}, {"start": 1181.39, "end": 1183.41, "word": " ف", "probability": 0.41357421875}, {"start": 1183.41, "end": 1183.93, "word": " ال", "probability": 0.61865234375}, {"start": 1183.93, "end": 1184.17, "word": " two", "probability": 0.9140625}, {"start": 1184.17, "end": 1184.71, "word": " wave", "probability": 0.958984375}, {"start": 1184.71, "end": 1185.17, "word": " هي", "probability": 0.853515625}, {"start": 1185.17, "end": 1186.09, "word": " indication", "probability": 0.93505859375}, {"start": 1186.09, "end": 1187.33, "word": " ل", "probability": 0.95263671875}, {"start": 1187.33, "end": 1188.51, "word": " primary", "probability": 0.93798828125}, {"start": 1188.51, "end": 1189.57, "word": " و", "probability": 0.9658203125}, {"start": 1189.57, "end": 1190.17, "word": " secondary", "probability": 0.9638671875}, {"start": 1190.17, "end": 1190.95, "word": " aggregation", "probability": 0.96533203125}], "temperature": 1.0}, {"id": 44, "seek": 122182, "start": 1192.4, "end": 1221.82, "text": "طب كيف بيصير ال primary و ال secondary aggregation؟ يعني ال aggregation بتصير على خطواتين، ماشي؟ الخطوة الأولى اللي هي عشان بقى هو أول ما تبدأ تتنشط ل platelet بتطلع شوية ADP، من مين؟ من ال platelet، ما هو موجود في ال platelet، dense granules، dense granules، مظبوط؟ لشوية هادى، في البداية بتدينا ال first wave، اللي هي هادى", "tokens": [9566, 3555, 9122, 33911, 4724, 1829, 9381, 13546, 2423, 6194, 4032, 2423, 11396, 16743, 399, 22807, 37495, 22653, 2423, 16743, 399, 39894, 9381, 13546, 15844, 16490, 9566, 2407, 9307, 9957, 12399, 3714, 33599, 1829, 22807, 33962, 9566, 2407, 3660, 16247, 12610, 7578, 13672, 1829, 39896, 6225, 8592, 7649, 4724, 4587, 7578, 31439, 5551, 12610, 19446, 6055, 44510, 10721, 6055, 2655, 1863, 8592, 9566, 5296, 3403, 15966, 39894, 9566, 1211, 3615, 13412, 2407, 10632, 9135, 47, 12399, 9154, 3714, 9957, 22807, 9154, 2423, 3403, 15966, 12399, 19446, 31439, 3714, 29245, 23328, 8978, 2423, 3403, 15966, 12399, 1441, 405, 9370, 3473, 12399, 1441, 405, 9370, 3473, 12399, 3714, 19913, 3555, 2407, 9566, 22807, 5296, 8592, 2407, 10632, 8032, 18513, 7578, 12399, 8978, 29739, 28259, 10632, 39894, 16254, 8315, 2423, 700, 5772, 12399, 13672, 1829, 39896, 8032, 18513, 7578], "avg_logprob": -0.26300183525920784, "compression_ratio": 1.7753623188405796, "no_speech_prob": 0.0, "words": [{"start": 1192.4, "end": 1192.64, "word": "طب", "probability": 0.923583984375}, {"start": 1192.64, "end": 1192.84, "word": " كيف", "probability": 0.852294921875}, {"start": 1192.84, "end": 1193.14, "word": " بيصير", "probability": 0.9046630859375}, {"start": 1193.14, "end": 1193.22, "word": " ال", "probability": 0.6259765625}, {"start": 1193.22, "end": 1193.52, "word": " primary", "probability": 0.55419921875}, {"start": 1193.52, "end": 1193.72, "word": " و", "probability": 0.8603515625}, {"start": 1193.72, "end": 1193.74, "word": " ال", "probability": 0.64794921875}, {"start": 1193.74, "end": 1194.08, "word": " secondary", "probability": 0.943359375}, {"start": 1194.08, "end": 1195.58, "word": " aggregation؟", "probability": 0.8058268229166666}, {"start": 1195.58, "end": 1195.96, "word": " يعني", "probability": 0.906005859375}, {"start": 1195.96, "end": 1196.1, "word": " ال", "probability": 0.9775390625}, {"start": 1196.1, "end": 1196.62, "word": " aggregation", "probability": 0.948974609375}, {"start": 1196.62, "end": 1196.98, "word": " بتصير", "probability": 0.9314778645833334}, {"start": 1196.98, "end": 1197.14, "word": " على", "probability": 0.66455078125}, {"start": 1197.14, "end": 1198.44, "word": " خطواتين،", "probability": 0.6549479166666666}, {"start": 1198.44, "end": 1199.7, "word": " ماشي؟", "probability": 0.8209228515625}, {"start": 1199.7, "end": 1200.04, "word": " الخطوة", "probability": 0.93310546875}, {"start": 1200.04, "end": 1200.32, "word": " الأولى", "probability": 0.91650390625}, {"start": 1200.32, "end": 1200.46, "word": " اللي", "probability": 0.4320068359375}, {"start": 1200.46, "end": 1200.52, "word": " هي", "probability": 0.6376953125}, {"start": 1200.52, "end": 1200.76, "word": " عشان", "probability": 0.7294921875}, {"start": 1200.76, "end": 1201.1, "word": " بقى", "probability": 0.9694010416666666}, {"start": 1201.1, "end": 1202.22, "word": " هو", "probability": 0.7021484375}, {"start": 1202.22, "end": 1202.64, "word": " أول", "probability": 0.927734375}, {"start": 1202.64, "end": 1202.82, "word": " ما", "probability": 0.80419921875}, {"start": 1202.82, "end": 1203.32, "word": " تبدأ", "probability": 0.9288736979166666}, {"start": 1203.32, "end": 1203.92, "word": " تتنشط", "probability": 0.83369140625}, {"start": 1203.92, "end": 1204.08, "word": " ل", "probability": 0.8896484375}, {"start": 1204.08, "end": 1204.66, "word": " platelet", "probability": 0.45098876953125}, {"start": 1204.66, "end": 1206.38, "word": " بتطلع", "probability": 0.9512939453125}, {"start": 1206.38, "end": 1207.2, "word": " شوية", "probability": 0.8870442708333334}, {"start": 1207.2, "end": 1208.28, "word": " ADP،", "probability": 0.69873046875}, {"start": 1208.28, "end": 1208.44, "word": " من", "probability": 0.98779296875}, {"start": 1208.44, "end": 1211.1, "word": " مين؟", "probability": 0.98486328125}, {"start": 1211.1, "end": 1211.26, "word": " من", "probability": 0.9169921875}, {"start": 1211.26, "end": 1211.4, "word": " ال", "probability": 0.921875}, {"start": 1211.4, "end": 1211.96, "word": " platelet،", "probability": 0.7067057291666666}, {"start": 1211.96, "end": 1212.06, "word": " ما", "probability": 0.273681640625}, {"start": 1212.06, "end": 1212.16, "word": " هو", "probability": 0.94189453125}, {"start": 1212.16, "end": 1212.5, "word": " موجود", "probability": 0.978515625}, {"start": 1212.5, "end": 1212.62, "word": " في", "probability": 0.90185546875}, {"start": 1212.62, "end": 1212.74, "word": " ال", "probability": 0.87841796875}, {"start": 1212.74, "end": 1213.24, "word": " platelet،", "probability": 0.6995442708333334}, {"start": 1213.24, "end": 1213.48, "word": " dense", "probability": 0.52288818359375}, {"start": 1213.48, "end": 1214.66, "word": " granules،", "probability": 0.403076171875}, {"start": 1214.66, "end": 1215.04, "word": " dense", "probability": 0.66015625}, {"start": 1215.04, "end": 1216.0, "word": " granules،", "probability": 0.8982747395833334}, {"start": 1216.0, "end": 1217.4, "word": " مظبوط؟", "probability": 0.8553873697916666}, {"start": 1217.4, "end": 1217.94, "word": " لشوية", "probability": 0.8770751953125}, {"start": 1217.94, "end": 1218.38, "word": " هادى،", "probability": 0.65472412109375}, {"start": 1218.38, "end": 1218.56, "word": " في", "probability": 0.84228515625}, {"start": 1218.56, "end": 1219.04, "word": " البداية", "probability": 0.97509765625}, {"start": 1219.04, "end": 1219.58, "word": " بتدينا", "probability": 0.7010091145833334}, {"start": 1219.58, "end": 1219.92, "word": " ال", "probability": 0.974609375}, {"start": 1219.92, "end": 1220.24, "word": " first", "probability": 0.91748046875}, {"start": 1220.24, "end": 1221.18, "word": " wave،", "probability": 0.85791015625}, {"start": 1221.18, "end": 1221.38, "word": " اللي", "probability": 0.93994140625}, {"start": 1221.38, "end": 1221.54, "word": " هي", "probability": 0.87109375}, {"start": 1221.54, "end": 1221.82, "word": " هادى", "probability": 0.89111328125}], "temperature": 1.0}, {"id": 45, "seek": 125136, "start": 1222.63, "end": 1251.37, "text": "هذا ده يعني، شايفين الأولاني هذا؟ and this is reversible في الغالب إيش؟ reversible بعد هيك بيصير في complete degranulation فبتطلع كمية كبيرة من ال 100 فبتعمل irreversible aggregation or agglutination irreversible", "tokens": [3224, 15730, 11778, 3224, 37495, 22653, 12399, 13412, 995, 33911, 9957, 16247, 12610, 7649, 1829, 23758, 22807, 293, 341, 307, 44788, 8978, 6024, 118, 6027, 3555, 11933, 1829, 8592, 22807, 44788, 39182, 39896, 4117, 4724, 1829, 9381, 13546, 8978, 3566, 368, 42381, 2776, 6156, 3555, 2655, 9566, 1211, 3615, 9122, 2304, 10632, 9122, 3555, 48923, 9154, 2423, 2319, 6156, 3555, 2655, 25957, 1211, 16014, 840, 964, 16743, 399, 420, 623, 7191, 325, 2486, 16014, 840, 964], "avg_logprob": -0.24208604205738415, "compression_ratio": 1.5263157894736843, "no_speech_prob": 0.0, "words": [{"start": 1222.63, "end": 1223.03, "word": "هذا", "probability": 0.592529296875}, {"start": 1223.03, "end": 1223.23, "word": " ده", "probability": 0.59521484375}, {"start": 1223.23, "end": 1223.51, "word": " يعني،", "probability": 0.4500325520833333}, {"start": 1223.51, "end": 1223.99, "word": " شايفين", "probability": 0.9454345703125}, {"start": 1223.99, "end": 1224.91, "word": " الأولاني", "probability": 0.9061279296875}, {"start": 1224.91, "end": 1226.79, "word": " هذا؟", "probability": 0.822998046875}, {"start": 1226.79, "end": 1229.03, "word": " and", "probability": 0.525390625}, {"start": 1229.03, "end": 1229.23, "word": " this", "probability": 0.96826171875}, {"start": 1229.23, "end": 1229.41, "word": " is", "probability": 0.955078125}, {"start": 1229.41, "end": 1229.95, "word": " reversible", "probability": 0.96826171875}, {"start": 1229.95, "end": 1230.93, "word": " في", "probability": 0.374267578125}, {"start": 1230.93, "end": 1231.33, "word": " الغالب", "probability": 0.9376220703125}, {"start": 1231.33, "end": 1231.83, "word": " إيش؟", "probability": 0.73876953125}, {"start": 1231.83, "end": 1232.41, "word": " reversible", "probability": 0.84130859375}, {"start": 1232.41, "end": 1235.01, "word": " بعد", "probability": 0.89111328125}, {"start": 1235.01, "end": 1235.45, "word": " هيك", "probability": 0.962646484375}, {"start": 1235.45, "end": 1236.71, "word": " بيصير", "probability": 0.8004150390625}, {"start": 1236.71, "end": 1236.97, "word": " في", "probability": 0.91845703125}, {"start": 1236.97, "end": 1238.07, "word": " complete", "probability": 0.447021484375}, {"start": 1238.07, "end": 1239.69, "word": " degranulation", "probability": 0.7496744791666666}, {"start": 1239.69, "end": 1242.05, "word": " فبتطلع", "probability": 0.8884684244791666}, {"start": 1242.05, "end": 1242.55, "word": " كمية", "probability": 0.9973958333333334}, {"start": 1242.55, "end": 1243.19, "word": " كبيرة", "probability": 0.9881184895833334}, {"start": 1243.19, "end": 1243.37, "word": " من", "probability": 0.990234375}, {"start": 1243.37, "end": 1243.67, "word": " ال", "probability": 0.4130859375}, {"start": 1243.67, "end": 1243.79, "word": " 100", "probability": 0.415283203125}, {"start": 1243.79, "end": 1245.59, "word": " فبتعمل", "probability": 0.9048828125}, {"start": 1245.59, "end": 1246.83, "word": " irreversible", "probability": 0.8982747395833334}, {"start": 1246.83, "end": 1248.29, "word": " aggregation", "probability": 0.81103515625}, {"start": 1248.29, "end": 1248.73, "word": " or", "probability": 0.81298828125}, {"start": 1248.73, "end": 1249.85, "word": " agglutination", "probability": 0.9281005859375}, {"start": 1249.85, "end": 1251.37, "word": " irreversible", "probability": 0.8831380208333334}], "temperature": 1.0}, {"id": 46, "seek": 128061, "start": 1252.04, "end": 1280.62, "text": "procedures بيكون إيه عشان .. irreversible ما بترجع لـplatelet في كلامها أبدا عشان هيك بيكون ال response عليها مرحلتين primary و secondary response ماشي يا سيباها؟ طيب في الbernerdsonier disease و الvolvalbrand disease اتنين have the same response ماشي؟", "tokens": [4318, 1232, 1303, 4724, 1829, 30544, 11933, 1829, 3224, 6225, 8592, 7649, 4386, 16014, 840, 964, 19446, 39894, 47341, 3615, 5296, 39184, 39975, 15966, 8978, 28242, 10943, 11296, 5551, 3555, 28259, 6225, 8592, 7649, 39896, 4117, 4724, 1829, 30544, 2423, 4134, 25894, 11296, 3714, 2288, 5016, 1211, 2655, 9957, 6194, 4032, 11396, 4134, 3714, 33599, 1829, 35186, 8608, 1829, 3555, 995, 11296, 22807, 23032, 1829, 3555, 8978, 2423, 607, 1193, 67, 3015, 811, 4752, 4032, 2423, 9646, 3337, 30476, 4752, 1975, 2655, 1863, 9957, 362, 264, 912, 4134, 3714, 33599, 1829, 22807], "avg_logprob": -0.3756720455743933, "compression_ratio": 1.513157894736842, "no_speech_prob": 0.0, "words": [{"start": 1252.04, "end": 1252.74, "word": "procedures", "probability": 0.5552164713541666}, {"start": 1252.74, "end": 1253.66, "word": " بيكون", "probability": 0.7372233072916666}, {"start": 1253.66, "end": 1253.96, "word": " إيه", "probability": 0.6396077473958334}, {"start": 1253.96, "end": 1254.16, "word": " عشان", "probability": 0.7615559895833334}, {"start": 1254.16, "end": 1254.36, "word": " ..", "probability": 0.2271728515625}, {"start": 1254.36, "end": 1255.12, "word": " irreversible", "probability": 0.8557942708333334}, {"start": 1255.12, "end": 1255.3, "word": " ما", "probability": 0.5615234375}, {"start": 1255.3, "end": 1255.78, "word": " بترجع", "probability": 0.8595377604166666}, {"start": 1255.78, "end": 1256.78, "word": " لـplatelet", "probability": 0.513092041015625}, {"start": 1256.78, "end": 1256.96, "word": " في", "probability": 0.77001953125}, {"start": 1256.96, "end": 1257.4, "word": " كلامها", "probability": 0.9759114583333334}, {"start": 1257.4, "end": 1257.98, "word": " أبدا", "probability": 0.8924153645833334}, {"start": 1257.98, "end": 1258.6, "word": " عشان", "probability": 0.9189453125}, {"start": 1258.6, "end": 1258.74, "word": " هيك", "probability": 0.506591796875}, {"start": 1258.74, "end": 1258.98, "word": " بيكون", "probability": 0.8948567708333334}, {"start": 1258.98, "end": 1259.1, "word": " ال", "probability": 0.8974609375}, {"start": 1259.1, "end": 1259.5, "word": " response", "probability": 0.84765625}, {"start": 1259.5, "end": 1260.16, "word": " عليها", "probability": 0.962646484375}, {"start": 1260.16, "end": 1261.38, "word": " مرحلتين", "probability": 0.9803873697916666}, {"start": 1261.38, "end": 1262.1, "word": " primary", "probability": 0.7236328125}, {"start": 1262.1, "end": 1262.9, "word": " و", "probability": 0.91259765625}, {"start": 1262.9, "end": 1263.48, "word": " secondary", "probability": 0.83447265625}, {"start": 1263.48, "end": 1264.82, "word": " response", "probability": 0.9013671875}, {"start": 1264.82, "end": 1265.22, "word": " ماشي", "probability": 0.8081868489583334}, {"start": 1265.22, "end": 1265.34, "word": " يا", "probability": 0.48388671875}, {"start": 1265.34, "end": 1266.86, "word": " سيباها؟", "probability": 0.613037109375}, {"start": 1266.86, "end": 1267.78, "word": " طيب", "probability": 0.8430989583333334}, {"start": 1267.78, "end": 1268.8, "word": " في", "probability": 0.6044921875}, {"start": 1268.8, "end": 1270.2, "word": " الbernerdsonier", "probability": 0.5219930013020834}, {"start": 1270.2, "end": 1270.86, "word": " disease", "probability": 0.9150390625}, {"start": 1270.86, "end": 1272.14, "word": " و", "probability": 0.8134765625}, {"start": 1272.14, "end": 1273.38, "word": " الvolvalbrand", "probability": 0.508209228515625}, {"start": 1273.38, "end": 1274.22, "word": " disease", "probability": 0.93359375}, {"start": 1274.22, "end": 1275.18, "word": " اتنين", "probability": 0.79150390625}, {"start": 1275.18, "end": 1276.6, "word": " have", "probability": 0.74072265625}, {"start": 1276.6, "end": 1276.88, "word": " the", "probability": 0.94287109375}, {"start": 1276.88, "end": 1277.4, "word": " same", "probability": 0.923828125}, {"start": 1277.4, "end": 1278.42, "word": " response", "probability": 0.955078125}, {"start": 1278.42, "end": 1280.62, "word": " ماشي؟", "probability": 0.900634765625}], "temperature": 1.0}, {"id": 47, "seek": 131130, "start": 1283.78, "end": 1311.3, "text": "in the aggregation study كيف قالوا دي response تقول agonist except rest وستة except rest وستة او rest وستة حسب في تلفزوش ال D فاهمين يا شباب و لو طلعناها هنلاقي فيه response ل ADP ولا فشي لحظة ال patient ماشي مع ال normal ال control ماشي", "tokens": [259, 264, 16743, 399, 2979, 9122, 33911, 50239, 14407, 11778, 1829, 4134, 6055, 39648, 623, 266, 468, 3993, 1472, 4032, 14851, 3660, 3993, 1472, 4032, 14851, 3660, 1975, 2407, 1472, 4032, 14851, 3660, 11331, 35457, 8978, 6055, 46538, 11622, 2407, 8592, 2423, 413, 6156, 995, 16095, 9957, 35186, 13412, 3555, 16758, 4032, 45164, 23032, 1211, 3615, 8315, 11296, 8032, 1863, 15040, 38436, 8978, 3224, 4134, 5296, 9135, 47, 49429, 6156, 8592, 1829, 5296, 5016, 19913, 3660, 2423, 4537, 3714, 33599, 1829, 20449, 2423, 2710, 2423, 1969, 3714, 33599, 1829], "avg_logprob": -0.30173610548178353, "compression_ratio": 1.5483870967741935, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1283.78, "end": 1284.14, "word": "in", "probability": 0.1522216796875}, {"start": 1284.14, "end": 1284.7, "word": " the", "probability": 0.8359375}, {"start": 1284.7, "end": 1286.28, "word": " aggregation", "probability": 0.861572265625}, {"start": 1286.28, "end": 1286.76, "word": " study", "probability": 0.9345703125}, {"start": 1286.76, "end": 1287.62, "word": " كيف", "probability": 0.81884765625}, {"start": 1287.62, "end": 1288.88, "word": " قالوا", "probability": 0.6480712890625}, {"start": 1288.88, "end": 1289.16, "word": " دي", "probability": 0.5242919921875}, {"start": 1289.16, "end": 1290.06, "word": " response", "probability": 0.86669921875}, {"start": 1290.06, "end": 1291.1, "word": " تقول", "probability": 0.938720703125}, {"start": 1291.1, "end": 1292.04, "word": " agonist", "probability": 0.8894856770833334}, {"start": 1292.04, "end": 1293.26, "word": " except", "probability": 0.95263671875}, {"start": 1293.26, "end": 1293.6, "word": " rest", "probability": 0.67919921875}, {"start": 1293.6, "end": 1294.22, "word": " وستة", "probability": 0.3454182942708333}, {"start": 1294.22, "end": 1295.3, "word": " except", "probability": 0.826171875}, {"start": 1295.3, "end": 1296.04, "word": " rest", "probability": 0.39892578125}, {"start": 1296.04, "end": 1297.68, "word": " وستة", "probability": 0.9303385416666666}, {"start": 1297.68, "end": 1298.1, "word": " او", "probability": 0.68701171875}, {"start": 1298.1, "end": 1298.34, "word": " rest", "probability": 0.85205078125}, {"start": 1298.34, "end": 1298.86, "word": " وستة", "probability": 0.9300130208333334}, {"start": 1298.86, "end": 1299.3, "word": " حسب", "probability": 0.772216796875}, {"start": 1299.3, "end": 1299.52, "word": " في", "probability": 0.291259765625}, {"start": 1299.52, "end": 1300.18, "word": " تلفزوش", "probability": 0.8357421875}, {"start": 1300.18, "end": 1300.36, "word": " ال", "probability": 0.54150390625}, {"start": 1300.36, "end": 1300.56, "word": " D", "probability": 0.375}, {"start": 1300.56, "end": 1301.6, "word": " فاهمين", "probability": 0.9290771484375}, {"start": 1301.6, "end": 1301.72, "word": " يا", "probability": 0.943359375}, {"start": 1301.72, "end": 1302.02, "word": " شباب", "probability": 0.9869791666666666}, {"start": 1302.02, "end": 1303.26, "word": " و", "probability": 0.478271484375}, {"start": 1303.26, "end": 1303.48, "word": " لو", "probability": 0.73876953125}, {"start": 1303.48, "end": 1304.2, "word": " طلعناها", "probability": 0.962890625}, {"start": 1304.2, "end": 1304.64, "word": " هنلاقي", "probability": 0.74462890625}, {"start": 1304.64, "end": 1304.96, "word": " فيه", "probability": 0.71826171875}, {"start": 1304.96, "end": 1305.5, "word": " response", "probability": 0.9326171875}, {"start": 1305.5, "end": 1305.72, "word": " ل", "probability": 0.62939453125}, {"start": 1305.72, "end": 1306.16, "word": " ADP", "probability": 0.651123046875}, {"start": 1306.16, "end": 1306.32, "word": " ولا", "probability": 0.728515625}, {"start": 1306.32, "end": 1306.94, "word": " فشي", "probability": 0.6783040364583334}, {"start": 1306.94, "end": 1307.62, "word": " لحظة", "probability": 0.89990234375}, {"start": 1307.62, "end": 1308.24, "word": " ال", "probability": 0.397705078125}, {"start": 1308.24, "end": 1308.56, "word": " patient", "probability": 0.9345703125}, {"start": 1308.56, "end": 1308.94, "word": " ماشي", "probability": 0.9259440104166666}, {"start": 1308.94, "end": 1309.06, "word": " مع", "probability": 0.97802734375}, {"start": 1309.06, "end": 1309.18, "word": " ال", "probability": 0.86181640625}, {"start": 1309.18, "end": 1309.54, "word": " normal", "probability": 0.83837890625}, {"start": 1309.54, "end": 1310.28, "word": " ال", "probability": 0.2149658203125}, {"start": 1310.28, "end": 1310.74, "word": " control", "probability": 0.91552734375}, {"start": 1310.74, "end": 1311.3, "word": " ماشي", "probability": 0.9671223958333334}], "temperature": 1.0}, {"id": 48, "seek": 132446, "start": 1311.74, "end": 1324.46, "text": "فى response لل epinephrine ولا لا؟ فى response لل .. ما حدثتوا ستة؟ شايفين ال patient ايش؟ ماصلاش، مافهوش بقى، يبقى فى المرضين", "tokens": [5172, 7578, 4134, 24976, 2388, 533, 950, 15140, 49429, 20193, 22807, 6156, 7578, 4134, 24976, 4386, 19446, 11331, 3215, 12984, 2655, 14407, 8608, 2655, 3660, 22807, 13412, 995, 33911, 9957, 2423, 4537, 1975, 1829, 8592, 22807, 19446, 9381, 15040, 8592, 12399, 19446, 5172, 3224, 2407, 8592, 4724, 4587, 7578, 12399, 7251, 3555, 4587, 7578, 6156, 7578, 9673, 43042, 9957], "avg_logprob": -0.43984375645716983, "compression_ratio": 1.39568345323741, "no_speech_prob": 0.0, "words": [{"start": 1311.74, "end": 1311.94, "word": "فى", "probability": 0.57025146484375}, {"start": 1311.94, "end": 1312.4, "word": " response", "probability": 0.80419921875}, {"start": 1312.4, "end": 1312.62, "word": " لل", "probability": 0.67724609375}, {"start": 1312.62, "end": 1313.2, "word": " epinephrine", "probability": 0.7062149047851562}, {"start": 1313.2, "end": 1313.36, "word": " ولا", "probability": 0.7548828125}, {"start": 1313.36, "end": 1313.94, "word": " لا؟", "probability": 0.81640625}, {"start": 1313.94, "end": 1314.06, "word": " فى", "probability": 0.79345703125}, {"start": 1314.06, "end": 1315.32, "word": " response", "probability": 0.89111328125}, {"start": 1315.32, "end": 1315.7, "word": " لل", "probability": 0.51123046875}, {"start": 1315.7, "end": 1316.08, "word": " ..", "probability": 0.345947265625}, {"start": 1316.08, "end": 1316.28, "word": " ما", "probability": 0.54345703125}, {"start": 1316.28, "end": 1316.66, "word": " حدثتوا", "probability": 0.5768798828125}, {"start": 1316.66, "end": 1317.12, "word": " ستة؟", "probability": 0.74945068359375}, {"start": 1317.12, "end": 1317.9, "word": " شايفين", "probability": 0.8470458984375}, {"start": 1317.9, "end": 1318.04, "word": " ال", "probability": 0.96044921875}, {"start": 1318.04, "end": 1318.3, "word": " patient", "probability": 0.94921875}, {"start": 1318.3, "end": 1319.72, "word": " ايش؟", "probability": 0.676300048828125}, {"start": 1319.72, "end": 1320.72, "word": " ماصلاش،", "probability": 0.429345703125}, {"start": 1320.72, "end": 1322.14, "word": " مافهوش", "probability": 0.800244140625}, {"start": 1322.14, "end": 1323.4, "word": " بقى،", "probability": 0.8768310546875}, {"start": 1323.4, "end": 1323.68, "word": " يبقى", "probability": 0.7562255859375}, {"start": 1323.68, "end": 1323.8, "word": " فى", "probability": 0.73193359375}, {"start": 1323.8, "end": 1324.46, "word": " المرضين", "probability": 0.88037109375}], "temperature": 1.0}, {"id": 49, "seek": 134830, "start": 1325.26, "end": 1348.3, "text": "هناك إجابة لجميع الـ Agonists إلا أن الـ Restocetin هو نوع من أنواع الـ Antibiotic يستخرج من .. هو Antibiotic يستخرج من نوع من أنواع البلاكتيريا، وهو Strong Agonist، وهو Strong Agonist يعني ينشط عنده القدرة أنه ينشط لـ Platelet حتى المثبت بالفرنالين", "tokens": [3224, 8315, 4117, 11933, 7435, 16758, 3660, 5296, 7435, 2304, 40228, 2423, 39184, 2725, 266, 1751, 11933, 15040, 14739, 2423, 39184, 13094, 905, 38645, 31439, 8717, 45367, 9154, 14739, 14407, 3615, 2423, 39184, 5130, 897, 6471, 299, 7251, 14851, 34740, 7435, 9154, 4386, 31439, 5130, 897, 6471, 299, 7251, 14851, 34740, 7435, 9154, 8717, 45367, 9154, 14739, 14407, 3615, 29739, 15040, 4117, 2655, 13546, 25528, 12399, 37037, 2407, 22792, 2725, 266, 468, 12399, 37037, 2407, 22792, 2725, 266, 468, 37495, 22653, 7251, 1863, 8592, 9566, 43242, 3224, 25062, 3215, 25720, 14739, 3224, 7251, 1863, 8592, 9566, 5296, 39184, 17461, 15966, 11331, 49975, 9673, 12984, 3555, 2655, 20666, 5172, 2288, 1863, 6027, 9957], "avg_logprob": -0.3368362831858407, "compression_ratio": 1.7934272300469483, "no_speech_prob": 0.0, "words": [{"start": 1325.26, "end": 1325.8, "word": "هناك", "probability": 0.6282552083333334}, {"start": 1325.8, "end": 1326.64, "word": " إجابة", "probability": 0.6886138916015625}, {"start": 1326.64, "end": 1327.22, "word": " لجميع", "probability": 0.69610595703125}, {"start": 1327.22, "end": 1327.4, "word": " الـ", "probability": 0.2978515625}, {"start": 1327.4, "end": 1328.04, "word": " Agonists", "probability": 0.6695149739583334}, {"start": 1328.04, "end": 1328.5, "word": " إلا", "probability": 0.44775390625}, {"start": 1328.5, "end": 1329.44, "word": " أن", "probability": 0.1461181640625}, {"start": 1329.44, "end": 1329.74, "word": " الـ", "probability": 0.523681640625}, {"start": 1329.74, "end": 1330.92, "word": " Restocetin", "probability": 0.679443359375}, {"start": 1330.92, "end": 1331.12, "word": " هو", "probability": 0.623046875}, {"start": 1331.12, "end": 1331.78, "word": " نوع", "probability": 0.858642578125}, {"start": 1331.78, "end": 1331.88, "word": " من", "probability": 0.970703125}, {"start": 1331.88, "end": 1332.14, "word": " أنواع", "probability": 0.5859375}, {"start": 1332.14, "end": 1332.2, "word": " الـ", "probability": 0.4259033203125}, {"start": 1332.2, "end": 1333.12, "word": " Antibiotic", "probability": 0.7862548828125}, {"start": 1333.12, "end": 1334.24, "word": " يستخرج", "probability": 0.7991943359375}, {"start": 1334.24, "end": 1334.48, "word": " من", "probability": 0.9765625}, {"start": 1334.48, "end": 1334.54, "word": " ..", "probability": 0.1761474609375}, {"start": 1334.54, "end": 1334.86, "word": " هو", "probability": 0.869140625}, {"start": 1334.86, "end": 1335.56, "word": " Antibiotic", "probability": 0.9105224609375}, {"start": 1335.56, "end": 1336.02, "word": " يستخرج", "probability": 0.984130859375}, {"start": 1336.02, "end": 1336.16, "word": " من", "probability": 0.99462890625}, {"start": 1336.16, "end": 1336.34, "word": " نوع", "probability": 0.950439453125}, {"start": 1336.34, "end": 1336.46, "word": " من", "probability": 0.98095703125}, {"start": 1336.46, "end": 1336.68, "word": " أنواع", "probability": 0.9620768229166666}, {"start": 1336.68, "end": 1337.74, "word": " البلاكتيريا،", "probability": 0.7967006138392857}, {"start": 1337.74, "end": 1338.4, "word": " وهو", "probability": 0.908447265625}, {"start": 1338.4, "end": 1338.88, "word": " Strong", "probability": 0.497802734375}, {"start": 1338.88, "end": 1340.34, "word": " Agonist،", "probability": 0.8426513671875}, {"start": 1340.34, "end": 1340.64, "word": " وهو", "probability": 0.8505859375}, {"start": 1340.64, "end": 1341.12, "word": " Strong", "probability": 0.8115234375}, {"start": 1341.12, "end": 1341.78, "word": " Agonist", "probability": 0.9739583333333334}, {"start": 1341.78, "end": 1342.44, "word": " يعني", "probability": 0.893798828125}, {"start": 1342.44, "end": 1343.26, "word": " ينشط", "probability": 0.8310546875}, {"start": 1343.26, "end": 1343.64, "word": " عنده", "probability": 0.723388671875}, {"start": 1343.64, "end": 1344.06, "word": " القدرة", "probability": 0.97412109375}, {"start": 1344.06, "end": 1344.38, "word": " أنه", "probability": 0.72119140625}, {"start": 1344.38, "end": 1344.82, "word": " ينشط", "probability": 0.9849853515625}, {"start": 1344.82, "end": 1345.02, "word": " لـ", "probability": 0.499267578125}, {"start": 1345.02, "end": 1345.46, "word": " Platelet", "probability": 0.7110595703125}, {"start": 1345.46, "end": 1346.5, "word": " حتى", "probability": 0.892822265625}, {"start": 1346.5, "end": 1347.48, "word": " المثبت", "probability": 0.9273681640625}, {"start": 1347.48, "end": 1348.3, "word": " بالفرنالين", "probability": 0.886474609375}], "temperature": 1.0}, {"id": 50, "seek": 136485, "start": 1349.41, "end": 1364.85, "text": "لكن في غياب ال receptor لو مش مثبته ولا مش عارف ايش مستحيل يصير فياه في اللي هو ال aggregation process، المفهوم مش ابقى", "tokens": [1211, 19452, 8978, 32771, 1829, 16758, 2423, 32264, 45164, 37893, 3714, 12984, 3555, 47395, 49429, 37893, 6225, 9640, 5172, 1975, 1829, 8592, 3714, 14851, 5016, 26895, 7251, 9381, 13546, 8978, 40294, 8978, 13672, 1829, 31439, 2423, 16743, 399, 1399, 12399, 9673, 5172, 3224, 20498, 37893, 48127, 4587, 7578], "avg_logprob": -0.26833545918367346, "compression_ratio": 1.3309859154929577, "no_speech_prob": 0.0, "words": [{"start": 1349.41, "end": 1349.89, "word": "لكن", "probability": 0.830078125}, {"start": 1349.89, "end": 1350.41, "word": " في", "probability": 0.86767578125}, {"start": 1350.41, "end": 1350.97, "word": " غياب", "probability": 0.853515625}, {"start": 1350.97, "end": 1351.07, "word": " ال", "probability": 0.91357421875}, {"start": 1351.07, "end": 1351.57, "word": " receptor", "probability": 0.55859375}, {"start": 1351.57, "end": 1352.69, "word": " لو", "probability": 0.75}, {"start": 1352.69, "end": 1353.15, "word": " مش", "probability": 0.9306640625}, {"start": 1353.15, "end": 1353.81, "word": " مثبته", "probability": 0.6995849609375}, {"start": 1353.81, "end": 1354.03, "word": " ولا", "probability": 0.72607421875}, {"start": 1354.03, "end": 1354.29, "word": " مش", "probability": 0.9658203125}, {"start": 1354.29, "end": 1354.59, "word": " عارف", "probability": 0.98876953125}, {"start": 1354.59, "end": 1354.91, "word": " ايش", "probability": 0.68798828125}, {"start": 1354.91, "end": 1355.73, "word": " مستحيل", "probability": 0.9146728515625}, {"start": 1355.73, "end": 1356.01, "word": " يصير", "probability": 0.9586588541666666}, {"start": 1356.01, "end": 1356.53, "word": " فياه", "probability": 0.58660888671875}, {"start": 1356.53, "end": 1357.69, "word": " في", "probability": 0.7734375}, {"start": 1357.69, "end": 1358.95, "word": " اللي", "probability": 0.87890625}, {"start": 1358.95, "end": 1359.35, "word": " هو", "probability": 0.9853515625}, {"start": 1359.35, "end": 1360.17, "word": " ال", "probability": 0.89453125}, {"start": 1360.17, "end": 1362.81, "word": " aggregation", "probability": 0.9072265625}, {"start": 1362.81, "end": 1364.21, "word": " process،", "probability": 0.6748046875}, {"start": 1364.21, "end": 1364.51, "word": " المفهوم", "probability": 0.84979248046875}, {"start": 1364.51, "end": 1364.65, "word": " مش", "probability": 0.47900390625}, {"start": 1364.65, "end": 1364.85, "word": " ابقى", "probability": 0.5967203776041666}], "temperature": 1.0}, {"id": 51, "seek": 139522, "start": 1366.28, "end": 1395.22, "text": "حد عنده سؤال؟ أحسنت، هذا سؤال قيم جدا، بس قبل ما أجاوبك، أنا جيت بورجي، أنا جيت هذا يا شباب، هذه صورة معبرة برضه لمين؟ للمرادين، هي عبارة عن هذه الـplatelet، ماشي، في جواها وفوق البراند، احنا جولنا الـplatelet وفوق البراند يخزن في موقعين، في الـplatelet وفياش؟ الـplatelet alpha granules", "tokens": [5016, 3215, 43242, 3224, 8608, 33604, 6027, 22807, 5551, 5016, 3794, 29399, 12399, 23758, 8608, 33604, 6027, 12174, 32640, 10874, 28259, 12399, 4724, 3794, 12174, 36150, 19446, 5551, 7435, 995, 37746, 4117, 12399, 41850, 10874, 36081, 4724, 13063, 7435, 1829, 12399, 41850, 10874, 36081, 23758, 35186, 13412, 3555, 16758, 12399, 29538, 20328, 13063, 3660, 20449, 3555, 25720, 4724, 43042, 3224, 32767, 9957, 22807, 5296, 19528, 2288, 18513, 9957, 12399, 39896, 6225, 3555, 9640, 3660, 18871, 29538, 2423, 39184, 39975, 15966, 12399, 3714, 33599, 1829, 12399, 8978, 10874, 14407, 11296, 4032, 5172, 30543, 2423, 26890, 7649, 3215, 12399, 1975, 5016, 8315, 10874, 12610, 8315, 2423, 39184, 39975, 15966, 4032, 5172, 30543, 2423, 26890, 7649, 3215, 7251, 9778, 11622, 1863, 8978, 3714, 30543, 3615, 9957, 12399, 8978, 2423, 39184, 39975, 15966, 4032, 41185, 33599, 22807, 2423, 39184, 39975, 15966, 8961, 9370, 3473], "avg_logprob": -0.3129432641022594, "compression_ratio": 1.927710843373494, "no_speech_prob": 0.0, "words": [{"start": 1366.28, "end": 1366.56, "word": "حد", "probability": 0.6103515625}, {"start": 1366.56, "end": 1366.78, "word": " عنده", "probability": 0.7451171875}, {"start": 1366.78, "end": 1367.6, "word": " سؤال؟", "probability": 0.9136962890625}, {"start": 1367.6, "end": 1370.88, "word": " أحسنت،", "probability": 0.668505859375}, {"start": 1370.88, "end": 1371.16, "word": " هذا", "probability": 0.5859375}, {"start": 1371.16, "end": 1371.44, "word": " سؤال", "probability": 0.990234375}, {"start": 1371.44, "end": 1371.86, "word": " قيم", "probability": 0.550048828125}, {"start": 1371.86, "end": 1372.44, "word": " جدا،", "probability": 0.90576171875}, {"start": 1372.44, "end": 1372.82, "word": " بس", "probability": 0.9169921875}, {"start": 1372.82, "end": 1373.02, "word": " قبل", "probability": 0.938232421875}, {"start": 1373.02, "end": 1373.16, "word": " ما", "probability": 0.75244140625}, {"start": 1373.16, "end": 1373.94, "word": " أجاوبك،", "probability": 0.7573649088541666}, {"start": 1373.94, "end": 1374.08, "word": " أنا", "probability": 0.59130859375}, {"start": 1374.08, "end": 1374.22, "word": " جيت", "probability": 0.9140625}, {"start": 1374.22, "end": 1375.06, "word": " بورجي،", "probability": 0.61953125}, {"start": 1375.06, "end": 1375.54, "word": " أنا", "probability": 0.71337890625}, {"start": 1375.54, "end": 1375.72, "word": " جيت", "probability": 0.979248046875}, {"start": 1375.72, "end": 1375.92, "word": " هذا", "probability": 0.828125}, {"start": 1375.92, "end": 1376.08, "word": " يا", "probability": 0.88623046875}, {"start": 1376.08, "end": 1376.5, "word": " شباب،", "probability": 0.813720703125}, {"start": 1376.5, "end": 1377.18, "word": " هذه", "probability": 0.482666015625}, {"start": 1377.18, "end": 1377.42, "word": " صورة", "probability": 0.9625651041666666}, {"start": 1377.42, "end": 1377.82, "word": " معبرة", "probability": 0.837890625}, {"start": 1377.82, "end": 1378.14, "word": " برضه", "probability": 0.8209635416666666}, {"start": 1378.14, "end": 1379.2, "word": " لمين؟", "probability": 0.8214518229166666}, {"start": 1379.2, "end": 1381.52, "word": " للمرادين،", "probability": 0.7803955078125}, {"start": 1381.52, "end": 1381.7, "word": " هي", "probability": 0.7080078125}, {"start": 1381.7, "end": 1381.96, "word": " عبارة", "probability": 0.9815673828125}, {"start": 1381.96, "end": 1382.08, "word": " عن", "probability": 0.837890625}, {"start": 1382.08, "end": 1382.24, "word": " هذه", "probability": 0.32763671875}, {"start": 1382.24, "end": 1383.52, "word": " الـplatelet،", "probability": 0.65537109375}, {"start": 1383.52, "end": 1386.6, "word": " ماشي،", "probability": 0.8372802734375}, {"start": 1386.6, "end": 1386.76, "word": " في", "probability": 0.94873046875}, {"start": 1386.76, "end": 1387.22, "word": " جواها", "probability": 0.84228515625}, {"start": 1387.22, "end": 1387.46, "word": " وفوق", "probability": 0.24464925130208334}, {"start": 1387.46, "end": 1387.92, "word": " البراند،", "probability": 0.7380859375}, {"start": 1387.92, "end": 1388.1, "word": " احنا", "probability": 0.7464192708333334}, {"start": 1388.1, "end": 1388.34, "word": " جولنا", "probability": 0.7314453125}, {"start": 1388.34, "end": 1388.84, "word": " الـplatelet", "probability": 0.77056884765625}, {"start": 1388.84, "end": 1389.44, "word": " وفوق", "probability": 0.8595377604166666}, {"start": 1389.44, "end": 1389.84, "word": " البراند", "probability": 0.9881591796875}, {"start": 1389.84, "end": 1390.32, "word": " يخزن", "probability": 0.9537353515625}, {"start": 1390.32, "end": 1390.66, "word": " في", "probability": 0.98974609375}, {"start": 1390.66, "end": 1391.72, "word": " موقعين،", "probability": 0.9294921875}, {"start": 1391.72, "end": 1391.8, "word": " في", "probability": 0.77392578125}, {"start": 1391.8, "end": 1392.3, "word": " الـplatelet", "probability": 0.89013671875}, {"start": 1392.3, "end": 1393.76, "word": " وفياش؟", "probability": 0.6640625}, {"start": 1393.76, "end": 1394.24, "word": " الـplatelet", "probability": 0.7840576171875}, {"start": 1394.24, "end": 1394.52, "word": " alpha", "probability": 0.297607421875}, {"start": 1394.52, "end": 1395.22, "word": " granules", "probability": 0.84814453125}], "temperature": 1.0}, {"id": 52, "seek": 142233, "start": 1398.11, "end": 1422.33, "text": "في الـ endothelial cells في الـ وي بل بلد places فاكرينها؟ مخازن، مخازن، مياه في الماء، فهذه عبارة عن سورمان، هاي ال receptor وهذا الجزيء الأحمر هو عبارة عن الـ von Willebrand يوم يكون فيه pernal sorier disease، هاي ال platelet مافيش عليها ولا receptor، لكن ال von Willebrand موجود ولا لا؟ موجود", "tokens": [41185, 2423, 39184, 917, 900, 338, 831, 5438, 8978, 2423, 39184, 4032, 1829, 4724, 1211, 4724, 1211, 3215, 3190, 6156, 995, 37983, 9957, 11296, 22807, 3714, 9778, 31377, 1863, 12399, 3714, 9778, 31377, 1863, 12399, 3714, 25528, 3224, 8978, 9673, 16606, 12399, 6156, 3224, 24192, 6225, 3555, 9640, 3660, 18871, 8608, 13063, 2304, 7649, 12399, 8032, 47302, 2423, 32264, 37037, 15730, 25724, 11622, 1829, 38207, 16247, 5016, 29973, 31439, 6225, 3555, 9640, 3660, 18871, 2423, 39184, 2957, 3099, 68, 30476, 7251, 20498, 7251, 30544, 8978, 3224, 680, 4660, 9359, 811, 4752, 12399, 8032, 47302, 2423, 3403, 15966, 19446, 41185, 8592, 25894, 11296, 49429, 32264, 12399, 44381, 2423, 2957, 3099, 68, 30476, 3714, 29245, 23328, 49429, 20193, 22807, 3714, 29245, 23328], "avg_logprob": -0.4075413104916407, "compression_ratio": 1.7808764940239044, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1398.11, "end": 1398.31, "word": "في", "probability": 0.740234375}, {"start": 1398.31, "end": 1398.45, "word": " الـ", "probability": 0.5272216796875}, {"start": 1398.45, "end": 1398.99, "word": " endothelial", "probability": 0.831787109375}, {"start": 1398.99, "end": 1399.39, "word": " cells", "probability": 0.82861328125}, {"start": 1399.39, "end": 1399.85, "word": " في", "probability": 0.578125}, {"start": 1399.85, "end": 1399.97, "word": " الـ", "probability": 0.946044921875}, {"start": 1399.97, "end": 1400.13, "word": " وي", "probability": 0.43463134765625}, {"start": 1400.13, "end": 1400.35, "word": " بل", "probability": 0.5018310546875}, {"start": 1400.35, "end": 1400.71, "word": " بلد", "probability": 0.8359375}, {"start": 1400.71, "end": 1401.25, "word": " places", "probability": 0.52392578125}, {"start": 1401.25, "end": 1402.29, "word": " فاكرينها؟", "probability": 0.850341796875}, {"start": 1402.29, "end": 1402.89, "word": " مخازن،", "probability": 0.6250244140625}, {"start": 1402.89, "end": 1403.37, "word": " مخازن،", "probability": 0.89765625}, {"start": 1403.37, "end": 1403.71, "word": " مياه", "probability": 0.5597737630208334}, {"start": 1403.71, "end": 1403.81, "word": " في", "probability": 0.72607421875}, {"start": 1403.81, "end": 1404.53, "word": " الماء،", "probability": 0.4391276041666667}, {"start": 1404.53, "end": 1405.73, "word": " فهذه", "probability": 0.5997721354166666}, {"start": 1405.73, "end": 1405.99, "word": " عبارة", "probability": 0.906494140625}, {"start": 1405.99, "end": 1406.09, "word": " عن", "probability": 0.99267578125}, {"start": 1406.09, "end": 1407.09, "word": " سورمان،", "probability": 0.5888671875}, {"start": 1407.09, "end": 1407.39, "word": " هاي", "probability": 0.6923828125}, {"start": 1407.39, "end": 1407.51, "word": " ال", "probability": 0.94970703125}, {"start": 1407.51, "end": 1407.97, "word": " receptor", "probability": 0.38232421875}, {"start": 1407.97, "end": 1408.91, "word": " وهذا", "probability": 0.6060791015625}, {"start": 1408.91, "end": 1409.33, "word": " الجزيء", "probability": 0.69805908203125}, {"start": 1409.33, "end": 1409.69, "word": " الأحمر", "probability": 0.9661458333333334}, {"start": 1409.69, "end": 1409.87, "word": " هو", "probability": 0.95556640625}, {"start": 1409.87, "end": 1410.09, "word": " عبارة", "probability": 0.9344482421875}, {"start": 1410.09, "end": 1410.19, "word": " عن", "probability": 0.974609375}, {"start": 1410.19, "end": 1410.29, "word": " الـ", "probability": 0.30517578125}, {"start": 1410.29, "end": 1410.39, "word": " von", "probability": 0.1361083984375}, {"start": 1410.39, "end": 1410.77, "word": " Willebrand", "probability": 0.466552734375}, {"start": 1410.77, "end": 1412.47, "word": " يوم", "probability": 0.74462890625}, {"start": 1412.47, "end": 1412.85, "word": " يكون", "probability": 0.89208984375}, {"start": 1412.85, "end": 1413.31, "word": " فيه", "probability": 0.919677734375}, {"start": 1413.31, "end": 1413.91, "word": " pernal", "probability": 0.55206298828125}, {"start": 1413.91, "end": 1414.43, "word": " sorier", "probability": 0.35491943359375}, {"start": 1414.43, "end": 1415.49, "word": " disease،", "probability": 0.762939453125}, {"start": 1415.49, "end": 1415.69, "word": " هاي", "probability": 0.656005859375}, {"start": 1415.69, "end": 1416.07, "word": " ال", "probability": 0.63232421875}, {"start": 1416.07, "end": 1416.63, "word": " platelet", "probability": 0.5250244140625}, {"start": 1416.63, "end": 1417.13, "word": " مافيش", "probability": 0.9278971354166666}, {"start": 1417.13, "end": 1417.55, "word": " عليها", "probability": 0.98583984375}, {"start": 1417.55, "end": 1417.85, "word": " ولا", "probability": 0.87744140625}, {"start": 1417.85, "end": 1419.37, "word": " receptor،", "probability": 0.6954345703125}, {"start": 1419.37, "end": 1419.53, "word": " لكن", "probability": 0.97607421875}, {"start": 1419.53, "end": 1419.69, "word": " ال", "probability": 0.955078125}, {"start": 1419.69, "end": 1419.85, "word": " von", "probability": 0.83642578125}, {"start": 1419.85, "end": 1420.19, "word": " Willebrand", "probability": 0.837890625}, {"start": 1420.19, "end": 1420.57, "word": " موجود", "probability": 0.98974609375}, {"start": 1420.57, "end": 1420.71, "word": " ولا", "probability": 0.23291015625}, {"start": 1420.71, "end": 1421.63, "word": " لا؟", "probability": 0.796875}, {"start": 1421.63, "end": 1422.33, "word": " موجود", "probability": 0.97607421875}], "temperature": 1.0}, {"id": 53, "seek": 144994, "start": 1423.46, "end": 1449.94, "text": "وهي طبعا لما يكون فيه bowel gland disease ال receptor موجود لكن ال bowel gland غايب كيف بفرق بين المرضين in the laboratory by aggregation study لما زال نتائج واحدة في الجهتين بنفرق كالا آتي انتبه عليها ببساطة بعمل التحس", "tokens": [2407, 3224, 1829, 23032, 3555, 3615, 995, 5296, 15042, 7251, 30544, 8978, 3224, 40094, 43284, 4752, 2423, 32264, 3714, 29245, 23328, 44381, 2423, 40094, 43284, 32771, 47302, 3555, 9122, 33911, 4724, 5172, 2288, 4587, 49374, 9673, 43042, 9957, 294, 264, 16523, 538, 16743, 399, 2979, 5296, 15042, 30767, 6027, 8717, 2655, 16373, 7435, 36764, 24401, 3660, 8978, 25724, 3224, 2655, 9957, 44945, 5172, 2288, 4587, 9122, 6027, 995, 19753, 31371, 16472, 2655, 3555, 3224, 25894, 11296, 4724, 3555, 3794, 41193, 3660, 4724, 25957, 1211, 16712, 5016, 3794], "avg_logprob": -0.2626065340909091, "compression_ratio": 1.4646017699115044, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1423.46, "end": 1423.84, "word": "وهي", "probability": 0.8055013020833334}, {"start": 1423.84, "end": 1424.14, "word": " طبعا", "probability": 0.984130859375}, {"start": 1424.14, "end": 1424.34, "word": " لما", "probability": 0.747802734375}, {"start": 1424.34, "end": 1424.72, "word": " يكون", "probability": 0.985107421875}, {"start": 1424.72, "end": 1425.8, "word": " فيه", "probability": 0.49981689453125}, {"start": 1425.8, "end": 1425.96, "word": " bowel", "probability": 0.10272216796875}, {"start": 1425.96, "end": 1426.28, "word": " gland", "probability": 0.2802734375}, {"start": 1426.28, "end": 1426.62, "word": " disease", "probability": 0.87744140625}, {"start": 1426.62, "end": 1426.78, "word": " ال", "probability": 0.53857421875}, {"start": 1426.78, "end": 1427.1, "word": " receptor", "probability": 0.2283935546875}, {"start": 1427.1, "end": 1427.54, "word": " موجود", "probability": 0.9371744791666666}, {"start": 1427.54, "end": 1427.84, "word": " لكن", "probability": 0.80322265625}, {"start": 1427.84, "end": 1428.0, "word": " ال", "probability": 0.5107421875}, {"start": 1428.0, "end": 1428.18, "word": " bowel", "probability": 0.7353515625}, {"start": 1428.18, "end": 1428.76, "word": " gland", "probability": 0.94677734375}, {"start": 1428.76, "end": 1430.12, "word": " غايب", "probability": 0.7586263020833334}, {"start": 1430.12, "end": 1431.68, "word": " كيف", "probability": 0.933349609375}, {"start": 1431.68, "end": 1432.26, "word": " بفرق", "probability": 0.9052734375}, {"start": 1432.26, "end": 1432.56, "word": " بين", "probability": 0.98291015625}, {"start": 1432.56, "end": 1433.62, "word": " المرضين", "probability": 0.9501953125}, {"start": 1433.62, "end": 1435.84, "word": " in", "probability": 0.9228515625}, {"start": 1435.84, "end": 1436.06, "word": " the", "probability": 0.88330078125}, {"start": 1436.06, "end": 1436.58, "word": " laboratory", "probability": 0.904296875}, {"start": 1436.58, "end": 1437.06, "word": " by", "probability": 0.82470703125}, {"start": 1437.06, "end": 1437.74, "word": " aggregation", "probability": 0.946533203125}, {"start": 1437.74, "end": 1438.3, "word": " study", "probability": 0.91845703125}, {"start": 1438.3, "end": 1440.02, "word": " لما", "probability": 0.45965576171875}, {"start": 1440.02, "end": 1440.32, "word": " زال", "probability": 0.927734375}, {"start": 1440.32, "end": 1440.98, "word": " نتائج", "probability": 0.9886474609375}, {"start": 1440.98, "end": 1441.62, "word": " واحدة", "probability": 0.9713541666666666}, {"start": 1441.62, "end": 1441.74, "word": " في", "probability": 0.94140625}, {"start": 1441.74, "end": 1442.36, "word": " الجهتين", "probability": 0.8963623046875}, {"start": 1442.36, "end": 1443.42, "word": " بنفرق", "probability": 0.8599853515625}, {"start": 1443.42, "end": 1443.82, "word": " كالا", "probability": 0.7069498697916666}, {"start": 1443.82, "end": 1444.3, "word": " آتي", "probability": 0.548828125}, {"start": 1444.3, "end": 1445.46, "word": " انتبه", "probability": 0.823974609375}, {"start": 1445.46, "end": 1445.88, "word": " عليها", "probability": 0.7998046875}, {"start": 1445.88, "end": 1446.64, "word": " ببساطة", "probability": 0.958984375}, {"start": 1446.64, "end": 1449.52, "word": " بعمل", "probability": 0.8092447916666666}, {"start": 1449.52, "end": 1449.94, "word": " التحس", "probability": 0.7138671875}], "temperature": 1.0}, {"id": 54, "seek": 147578, "start": 1458.82, "end": 1475.78, "text": "بعد فحص مرة تانية by addition of normal plasma يعني تحتوي على كل المكونات إذا صار في correction للنتيجة", "tokens": [3555, 22488, 6156, 5016, 9381, 3714, 25720, 6055, 7649, 10632, 538, 4500, 295, 2710, 22564, 37495, 22653, 6055, 33753, 45865, 15844, 28242, 9673, 4117, 2407, 8315, 2655, 11933, 15730, 20328, 9640, 8978, 19984, 24976, 29399, 1829, 7435, 3660], "avg_logprob": -0.46634614620453274, "compression_ratio": 1.1755725190839694, "no_speech_prob": 8.940696716308594e-07, "words": [{"start": 1458.82, "end": 1460.06, "word": "بعد", "probability": 0.149627685546875}, {"start": 1460.06, "end": 1460.58, "word": " فحص", "probability": 0.7001139322916666}, {"start": 1460.58, "end": 1460.9, "word": " مرة", "probability": 0.68115234375}, {"start": 1460.9, "end": 1461.38, "word": " تانية", "probability": 0.9427083333333334}, {"start": 1461.38, "end": 1462.22, "word": " by", "probability": 0.176513671875}, {"start": 1462.22, "end": 1462.84, "word": " addition", "probability": 0.8818359375}, {"start": 1462.84, "end": 1463.68, "word": " of", "probability": 0.41845703125}, {"start": 1463.68, "end": 1464.18, "word": " normal", "probability": 0.79248046875}, {"start": 1464.18, "end": 1465.04, "word": " plasma", "probability": 0.8955078125}, {"start": 1465.04, "end": 1469.34, "word": " يعني", "probability": 0.52886962890625}, {"start": 1469.34, "end": 1469.8, "word": " تحتوي", "probability": 0.88720703125}, {"start": 1469.8, "end": 1469.96, "word": " على", "probability": 0.8818359375}, {"start": 1469.96, "end": 1470.28, "word": " كل", "probability": 0.97021484375}, {"start": 1470.28, "end": 1471.9, "word": " المكونات", "probability": 0.9322265625}, {"start": 1471.9, "end": 1472.58, "word": " إذا", "probability": 0.7379150390625}, {"start": 1472.58, "end": 1472.94, "word": " صار", "probability": 0.790771484375}, {"start": 1472.94, "end": 1473.08, "word": " في", "probability": 0.81640625}, {"start": 1473.08, "end": 1473.98, "word": " correction", "probability": 0.8876953125}, {"start": 1473.98, "end": 1475.78, "word": " للنتيجة", "probability": 0.861181640625}], "temperature": 1.0}, {"id": 55, "seek": 150062, "start": 1479.92, "end": 1500.62, "text": "as a source of normal blood بلازمة اللي أضافناها هي source of mean of normal blood إذا ماصارش فيه connection بيكون في ال .. بيكون في ال نفس الشيء او هي ال differentiation اللي انا حاول احكي حتى اندي سؤال any question يا شباب؟", "tokens": [296, 257, 4009, 295, 2710, 3390, 4724, 1211, 31377, 46007, 13672, 1829, 5551, 11242, 31845, 8315, 11296, 39896, 4009, 295, 914, 295, 2710, 3390, 11933, 15730, 19446, 9381, 9640, 8592, 8978, 3224, 4984, 4724, 1829, 30544, 8978, 2423, 4386, 4724, 1829, 30544, 8978, 2423, 8717, 36178, 25124, 1829, 38207, 1975, 2407, 39896, 2423, 38902, 13672, 1829, 1975, 8315, 11331, 995, 12610, 1975, 5016, 4117, 1829, 11331, 49975, 16472, 16254, 8608, 33604, 6027, 604, 1168, 35186, 13412, 3555, 16758, 22807], "avg_logprob": -0.5394531153142452, "compression_ratio": 1.5784313725490196, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 1479.9199999999998, "end": 1480.56, "word": "as", "probability": 0.23046875}, {"start": 1480.56, "end": 1481.2, "word": " a", "probability": 0.8271484375}, {"start": 1481.2, "end": 1481.5, "word": " source", "probability": 0.89892578125}, {"start": 1481.5, "end": 1481.72, "word": " of", "probability": 0.92626953125}, {"start": 1481.72, "end": 1482.0, "word": " normal", "probability": 0.12384033203125}, {"start": 1482.0, "end": 1482.2, "word": " blood", "probability": 0.49609375}, {"start": 1482.2, "end": 1482.78, "word": " بلازمة", "probability": 0.6949462890625}, {"start": 1482.78, "end": 1483.28, "word": " اللي", "probability": 0.77685546875}, {"start": 1483.28, "end": 1484.08, "word": " أضافناها", "probability": 0.797509765625}, {"start": 1484.08, "end": 1484.5, "word": " هي", "probability": 0.78662109375}, {"start": 1484.5, "end": 1484.84, "word": " source", "probability": 0.921875}, {"start": 1484.84, "end": 1485.08, "word": " of", "probability": 0.97119140625}, {"start": 1485.08, "end": 1485.4, "word": " mean", "probability": 0.40625}, {"start": 1485.4, "end": 1486.28, "word": " of", "probability": 0.90673828125}, {"start": 1486.28, "end": 1486.6, "word": " normal", "probability": 0.921875}, {"start": 1486.6, "end": 1487.0, "word": " blood", "probability": 0.98095703125}, {"start": 1487.0, "end": 1487.42, "word": " إذا", "probability": 0.821044921875}, {"start": 1487.42, "end": 1487.82, "word": " ماصارش", "probability": 0.75543212890625}, {"start": 1487.82, "end": 1487.96, "word": " فيه", "probability": 0.6484375}, {"start": 1487.96, "end": 1488.36, "word": " connection", "probability": 0.8916015625}, {"start": 1488.36, "end": 1489.38, "word": " بيكون", "probability": 0.5089925130208334}, {"start": 1489.38, "end": 1489.52, "word": " في", "probability": 0.7861328125}, {"start": 1489.52, "end": 1489.76, "word": " ال", "probability": 0.7255859375}, {"start": 1489.76, "end": 1490.0, "word": " ..", "probability": 0.1278076171875}, {"start": 1490.0, "end": 1490.0, "word": " بيكون", "probability": 0.6599324544270834}, {"start": 1490.0, "end": 1490.16, "word": " في", "probability": 0.7744140625}, {"start": 1490.16, "end": 1490.22, "word": " ال", "probability": 0.64404296875}, {"start": 1490.22, "end": 1490.98, "word": " نفس", "probability": 0.54229736328125}, {"start": 1490.98, "end": 1491.12, "word": " الشيء", "probability": 0.538330078125}, {"start": 1491.12, "end": 1493.84, "word": " او", "probability": 0.21307373046875}, {"start": 1493.84, "end": 1493.98, "word": " هي", "probability": 0.57958984375}, {"start": 1493.98, "end": 1494.04, "word": " ال", "probability": 0.873046875}, {"start": 1494.04, "end": 1494.6, "word": " differentiation", "probability": 0.9150390625}, {"start": 1494.6, "end": 1495.8, "word": " اللي", "probability": 0.844970703125}, {"start": 1495.8, "end": 1496.02, "word": " انا", "probability": 0.446044921875}, {"start": 1496.02, "end": 1496.22, "word": " حاول", "probability": 0.7132975260416666}, {"start": 1496.22, "end": 1496.42, "word": " احكي", "probability": 0.7740478515625}, {"start": 1496.42, "end": 1496.98, "word": " حتى", "probability": 0.53466796875}, {"start": 1496.98, "end": 1497.18, "word": " اندي", "probability": 0.2890625}, {"start": 1497.18, "end": 1498.42, "word": " سؤال", "probability": 0.98876953125}, {"start": 1498.42, "end": 1499.58, "word": " any", "probability": 0.52734375}, {"start": 1499.58, "end": 1499.94, "word": " question", "probability": 0.83154296875}, {"start": 1499.94, "end": 1500.12, "word": " يا", "probability": 0.14697265625}, {"start": 1500.12, "end": 1500.62, "word": " شباب؟", "probability": 0.884033203125}], "temperature": 1.0}, {"id": 56, "seek": 152901, "start": 1502.75, "end": 1529.01, "text": "ناخد المرض التاني وهو defects in platelet-platelet interaction او in aggregation هداك كان خلل في ال adhesion صح؟ هان الخلل وين؟ في ال aggregation و جانوا في ال aggregation في two important disease واحد اسمه congenital a-fibrinogenemia اش اسمه؟ congenital a-fibrinogenemia يعني", "tokens": [1863, 47283, 3215, 9673, 43042, 16712, 7649, 1829, 37037, 2407, 32655, 294, 3403, 15966, 12, 39975, 15966, 9285, 1975, 2407, 294, 16743, 399, 8032, 28259, 4117, 25961, 16490, 1211, 1211, 8978, 2423, 614, 38571, 20328, 5016, 22807, 8032, 7649, 33962, 1211, 1211, 4032, 9957, 22807, 8978, 2423, 16743, 399, 4032, 10874, 7649, 14407, 8978, 2423, 16743, 399, 8978, 732, 1021, 4752, 36764, 24401, 24525, 2304, 3224, 416, 1766, 1686, 257, 12, 69, 6414, 259, 8799, 14058, 1975, 8592, 24525, 2304, 3224, 22807, 416, 1766, 1686, 257, 12, 69, 6414, 259, 8799, 14058, 37495, 22653], "avg_logprob": -0.2618421109099137, "compression_ratio": 1.6822429906542056, "no_speech_prob": 0.0, "words": [{"start": 1502.75, "end": 1503.15, "word": "ناخد", "probability": 0.666259765625}, {"start": 1503.15, "end": 1504.27, "word": " المرض", "probability": 0.905517578125}, {"start": 1504.27, "end": 1504.73, "word": " التاني", "probability": 0.86083984375}, {"start": 1504.73, "end": 1505.59, "word": " وهو", "probability": 0.7078857421875}, {"start": 1505.59, "end": 1506.23, "word": " defects", "probability": 0.52197265625}, {"start": 1506.23, "end": 1506.55, "word": " in", "probability": 0.9091796875}, {"start": 1506.55, "end": 1506.91, "word": " platelet", "probability": 0.7451171875}, {"start": 1506.91, "end": 1507.35, "word": "-platelet", "probability": 0.7098795572916666}, {"start": 1507.35, "end": 1508.03, "word": " interaction", "probability": 0.8603515625}, {"start": 1508.03, "end": 1508.43, "word": " او", "probability": 0.783935546875}, {"start": 1508.43, "end": 1509.33, "word": " in", "probability": 0.65771484375}, {"start": 1509.33, "end": 1510.05, "word": " aggregation", "probability": 0.93896484375}, {"start": 1510.05, "end": 1510.45, "word": " هداك", "probability": 0.37744140625}, {"start": 1510.45, "end": 1510.81, "word": " كان", "probability": 0.9794921875}, {"start": 1510.81, "end": 1511.29, "word": " خلل", "probability": 0.8815104166666666}, {"start": 1511.29, "end": 1511.47, "word": " في", "probability": 0.88720703125}, {"start": 1511.47, "end": 1511.57, "word": " ال", "probability": 0.97900390625}, {"start": 1511.57, "end": 1512.11, "word": " adhesion", "probability": 0.6478271484375}, {"start": 1512.11, "end": 1512.93, "word": " صح؟", "probability": 0.6975911458333334}, {"start": 1512.93, "end": 1513.29, "word": " هان", "probability": 0.51080322265625}, {"start": 1513.29, "end": 1513.73, "word": " الخلل", "probability": 0.9296875}, {"start": 1513.73, "end": 1514.49, "word": " وين؟", "probability": 0.9026692708333334}, {"start": 1514.49, "end": 1514.65, "word": " في", "probability": 0.802734375}, {"start": 1514.65, "end": 1514.75, "word": " ال", "probability": 0.97265625}, {"start": 1514.75, "end": 1515.31, "word": " aggregation", "probability": 0.975341796875}, {"start": 1515.31, "end": 1515.45, "word": " و", "probability": 0.71923828125}, {"start": 1515.45, "end": 1515.89, "word": " جانوا", "probability": 0.62060546875}, {"start": 1515.89, "end": 1516.41, "word": " في", "probability": 0.8701171875}, {"start": 1516.41, "end": 1516.53, "word": " ال", "probability": 0.96875}, {"start": 1516.53, "end": 1517.05, "word": " aggregation", "probability": 0.9833984375}, {"start": 1517.05, "end": 1517.31, "word": " في", "probability": 0.9375}, {"start": 1517.31, "end": 1517.87, "word": " two", "probability": 0.8515625}, {"start": 1517.87, "end": 1520.17, "word": " important", "probability": 0.943359375}, {"start": 1520.17, "end": 1521.15, "word": " disease", "probability": 0.580078125}, {"start": 1521.15, "end": 1522.01, "word": " واحد", "probability": 0.9443359375}, {"start": 1522.01, "end": 1522.39, "word": " اسمه", "probability": 0.966796875}, {"start": 1522.39, "end": 1523.21, "word": " congenital", "probability": 0.9222005208333334}, {"start": 1523.21, "end": 1523.51, "word": " a", "probability": 0.1961669921875}, {"start": 1523.51, "end": 1524.55, "word": "-fibrinogenemia", "probability": 0.8352864583333334}, {"start": 1524.55, "end": 1526.11, "word": " اش", "probability": 0.649658203125}, {"start": 1526.11, "end": 1526.67, "word": " اسمه؟", "probability": 0.83349609375}, {"start": 1526.67, "end": 1527.37, "word": " congenital", "probability": 0.9456380208333334}, {"start": 1527.37, "end": 1527.73, "word": " a", "probability": 0.93359375}, {"start": 1527.73, "end": 1528.63, "word": "-fibrinogenemia", "probability": 0.927978515625}, {"start": 1528.63, "end": 1529.01, "word": " يعني", "probability": 0.936767578125}], "temperature": 1.0}, {"id": 57, "seek": 155321, "start": 1530.61, "end": 1553.21, "text": "غياب في الـ fibrinogen ايه يعني ابسط صح؟ غياب في الـ fibrinogen و احنا عارفين ان الـ fibrinogen هو اللي .. هو اللي مربط زي اللي في البرهيل في الاليجين صح؟ او بكل .. اجلاز من بستينيا بيكون فيه غياب في ال receptor بتاع ال aggregation وهو", "tokens": [17082, 1829, 16758, 8978, 2423, 39184, 283, 6414, 259, 8799, 1975, 1829, 3224, 37495, 22653, 1975, 3555, 3794, 9566, 20328, 5016, 22807, 32771, 1829, 16758, 8978, 2423, 39184, 283, 6414, 259, 8799, 4032, 1975, 5016, 8315, 6225, 9640, 5172, 9957, 16472, 2423, 39184, 283, 6414, 259, 8799, 31439, 13672, 1829, 4386, 31439, 13672, 1829, 3714, 25513, 9566, 30767, 1829, 13672, 1829, 8978, 2423, 26890, 3224, 26895, 8978, 2423, 6027, 1829, 7435, 9957, 20328, 5016, 22807, 1975, 2407, 4724, 28820, 4386, 1975, 7435, 1211, 31377, 9154, 4724, 14851, 9957, 25528, 4724, 1829, 30544, 8978, 3224, 32771, 1829, 16758, 8978, 2423, 32264, 39894, 45761, 2423, 16743, 399, 37037, 2407], "avg_logprob": -0.42476852652099395, "compression_ratio": 1.883248730964467, "no_speech_prob": 0.0, "words": [{"start": 1530.61, "end": 1531.25, "word": "غياب", "probability": 0.8902994791666666}, {"start": 1531.25, "end": 1531.47, "word": " في", "probability": 0.8984375}, {"start": 1531.47, "end": 1531.63, "word": " الـ", "probability": 0.689208984375}, {"start": 1531.63, "end": 1532.33, "word": " fibrinogen", "probability": 0.68914794921875}, {"start": 1532.33, "end": 1533.19, "word": " ايه", "probability": 0.4283447265625}, {"start": 1533.19, "end": 1534.05, "word": " يعني", "probability": 0.7156982421875}, {"start": 1534.05, "end": 1534.67, "word": " ابسط", "probability": 0.692352294921875}, {"start": 1534.67, "end": 1536.13, "word": " صح؟", "probability": 0.6119791666666666}, {"start": 1536.13, "end": 1538.33, "word": " غياب", "probability": 0.96728515625}, {"start": 1538.33, "end": 1538.51, "word": " في", "probability": 0.9580078125}, {"start": 1538.51, "end": 1538.71, "word": " الـ", "probability": 0.65380859375}, {"start": 1538.71, "end": 1539.35, "word": " fibrinogen", "probability": 0.9014892578125}, {"start": 1539.35, "end": 1539.95, "word": " و", "probability": 0.75341796875}, {"start": 1539.95, "end": 1540.15, "word": " احنا", "probability": 0.8538411458333334}, {"start": 1540.15, "end": 1540.47, "word": " عارفين", "probability": 0.9720458984375}, {"start": 1540.47, "end": 1540.59, "word": " ان", "probability": 0.841796875}, {"start": 1540.59, "end": 1540.79, "word": " الـ", "probability": 0.78857421875}, {"start": 1540.79, "end": 1541.23, "word": " fibrinogen", "probability": 0.9395751953125}, {"start": 1541.23, "end": 1541.41, "word": " هو", "probability": 0.97900390625}, {"start": 1541.41, "end": 1541.57, "word": " اللي", "probability": 0.718505859375}, {"start": 1541.57, "end": 1541.87, "word": " ..", "probability": 0.199951171875}, {"start": 1541.87, "end": 1543.21, "word": " هو", "probability": 0.76904296875}, {"start": 1543.21, "end": 1543.39, "word": " اللي", "probability": 0.9609375}, {"start": 1543.39, "end": 1543.79, "word": " مربط", "probability": 0.7809244791666666}, {"start": 1543.79, "end": 1543.99, "word": " زي", "probability": 0.90283203125}, {"start": 1543.99, "end": 1544.15, "word": " اللي", "probability": 0.52301025390625}, {"start": 1544.15, "end": 1544.21, "word": " في", "probability": 0.50537109375}, {"start": 1544.21, "end": 1544.65, "word": " البرهيل", "probability": 0.473968505859375}, {"start": 1544.65, "end": 1544.77, "word": " في", "probability": 0.261962890625}, {"start": 1544.77, "end": 1545.39, "word": " الاليجين", "probability": 0.6709228515625}, {"start": 1545.39, "end": 1546.91, "word": " صح؟", "probability": 0.9039713541666666}, {"start": 1546.91, "end": 1547.23, "word": " او", "probability": 0.898681640625}, {"start": 1547.23, "end": 1547.57, "word": " بكل", "probability": 0.549072265625}, {"start": 1547.57, "end": 1547.89, "word": " ..", "probability": 0.2568359375}, {"start": 1547.89, "end": 1548.35, "word": " اجلاز", "probability": 0.5392608642578125}, {"start": 1548.35, "end": 1548.49, "word": " من", "probability": 0.556640625}, {"start": 1548.49, "end": 1549.53, "word": " بستينيا", "probability": 0.603668212890625}, {"start": 1549.53, "end": 1550.17, "word": " بيكون", "probability": 0.745849609375}, {"start": 1550.17, "end": 1550.41, "word": " فيه", "probability": 0.737548828125}, {"start": 1550.41, "end": 1550.69, "word": " غياب", "probability": 0.98876953125}, {"start": 1550.69, "end": 1550.81, "word": " في", "probability": 0.92041015625}, {"start": 1550.81, "end": 1550.85, "word": " ال", "probability": 0.9677734375}, {"start": 1550.85, "end": 1551.33, "word": " receptor", "probability": 0.34423828125}, {"start": 1551.33, "end": 1552.11, "word": " بتاع", "probability": 0.8388671875}, {"start": 1552.11, "end": 1552.25, "word": " ال", "probability": 0.96533203125}, {"start": 1552.25, "end": 1552.81, "word": " aggregation", "probability": 0.894287109375}, {"start": 1552.81, "end": 1553.21, "word": " وهو", "probability": 0.750244140625}], "temperature": 1.0}, {"id": 58, "seek": 158727, "start": 1559.03, "end": 1587.27, "text": "3A مظبوط يعني زي الفرد الأول يعني يا غياب في ال receptor يا غياب في ال legend يا غياب في ال receptor يا غياب في ال legend و هنشوفه في glansman glansman ده ناسم عالم سويسري أو سويدي عفوا هو طبيب أطفال اكتشف هذا المرض واكتشفه في مرحلة مبكرة جدا", "tokens": [18, 32, 3714, 19913, 3555, 2407, 9566, 37495, 22653, 30767, 1829, 27188, 2288, 3215, 16247, 12610, 37495, 22653, 35186, 32771, 1829, 16758, 8978, 2423, 32264, 35186, 32771, 1829, 16758, 8978, 2423, 9451, 35186, 32771, 1829, 16758, 8978, 2423, 32264, 35186, 32771, 1829, 16758, 8978, 2423, 9451, 4032, 8032, 1863, 8592, 38688, 3224, 8978, 1563, 599, 1601, 1563, 599, 1601, 11778, 3224, 8717, 32277, 2304, 6225, 45340, 8608, 45865, 3794, 16572, 34051, 8608, 2407, 25708, 1829, 6225, 5172, 14407, 31439, 23032, 21292, 3555, 5551, 9566, 5172, 6027, 1975, 4117, 2655, 8592, 5172, 23758, 9673, 43042, 36764, 4117, 2655, 8592, 5172, 3224, 8978, 3714, 2288, 5016, 37977, 3714, 3555, 4117, 25720, 10874, 28259], "avg_logprob": -0.20242745469191245, "compression_ratio": 1.945, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 1559.03, "end": 1559.61, "word": "3A", "probability": 0.751708984375}, {"start": 1559.61, "end": 1560.31, "word": " مظبوط", "probability": 0.59677734375}, {"start": 1560.31, "end": 1561.47, "word": " يعني", "probability": 0.868896484375}, {"start": 1561.47, "end": 1561.77, "word": " زي", "probability": 0.91943359375}, {"start": 1561.77, "end": 1562.45, "word": " الفرد", "probability": 0.6040852864583334}, {"start": 1562.45, "end": 1562.67, "word": " الأول", "probability": 0.8388671875}, {"start": 1562.67, "end": 1562.91, "word": " يعني", "probability": 0.911376953125}, {"start": 1562.91, "end": 1563.09, "word": " يا", "probability": 0.75439453125}, {"start": 1563.09, "end": 1563.39, "word": " غياب", "probability": 0.9684244791666666}, {"start": 1563.39, "end": 1563.51, "word": " في", "probability": 0.9228515625}, {"start": 1563.51, "end": 1563.55, "word": " ال", "probability": 0.97216796875}, {"start": 1563.55, "end": 1563.89, "word": " receptor", "probability": 0.1849365234375}, {"start": 1563.89, "end": 1564.07, "word": " يا", "probability": 0.8212890625}, {"start": 1564.07, "end": 1564.27, "word": " غياب", "probability": 0.9900716145833334}, {"start": 1564.27, "end": 1564.41, "word": " في", "probability": 0.98974609375}, {"start": 1564.41, "end": 1564.51, "word": " ال", "probability": 0.8544921875}, {"start": 1564.51, "end": 1564.79, "word": " legend", "probability": 0.3271484375}, {"start": 1564.79, "end": 1565.03, "word": " يا", "probability": 0.7265625}, {"start": 1565.03, "end": 1565.25, "word": " غياب", "probability": 0.9864908854166666}, {"start": 1565.25, "end": 1565.37, "word": " في", "probability": 0.9814453125}, {"start": 1565.37, "end": 1565.41, "word": " ال", "probability": 0.97119140625}, {"start": 1565.41, "end": 1565.75, "word": " receptor", "probability": 0.95751953125}, {"start": 1565.75, "end": 1565.95, "word": " يا", "probability": 0.9306640625}, {"start": 1565.95, "end": 1566.23, "word": " غياب", "probability": 0.9886067708333334}, {"start": 1566.23, "end": 1566.43, "word": " في", "probability": 0.8671875}, {"start": 1566.43, "end": 1566.55, "word": " ال", "probability": 0.79443359375}, {"start": 1566.55, "end": 1567.53, "word": " legend", "probability": 0.9775390625}, {"start": 1567.53, "end": 1568.53, "word": " و", "probability": 0.76953125}, {"start": 1568.53, "end": 1569.39, "word": " هنشوفه", "probability": 0.720068359375}, {"start": 1569.39, "end": 1570.19, "word": " في", "probability": 0.892578125}, {"start": 1570.19, "end": 1570.85, "word": " glansman", "probability": 0.4934895833333333}, {"start": 1570.85, "end": 1572.89, "word": " glansman", "probability": 0.81689453125}, {"start": 1572.89, "end": 1573.03, "word": " ده", "probability": 0.873779296875}, {"start": 1573.03, "end": 1573.39, "word": " ناسم", "probability": 0.7989908854166666}, {"start": 1573.39, "end": 1573.69, "word": " عالم", "probability": 0.9814453125}, {"start": 1573.69, "end": 1574.29, "word": " سويسري", "probability": 0.892578125}, {"start": 1574.29, "end": 1575.87, "word": " أو", "probability": 0.68896484375}, {"start": 1575.87, "end": 1576.79, "word": " سويدي", "probability": 0.72900390625}, {"start": 1576.79, "end": 1577.17, "word": " عفوا", "probability": 0.8478190104166666}, {"start": 1577.17, "end": 1580.11, "word": " هو", "probability": 0.439697265625}, {"start": 1580.11, "end": 1580.51, "word": " طبيب", "probability": 0.99560546875}, {"start": 1580.51, "end": 1580.93, "word": " أطفال", "probability": 0.9549560546875}, {"start": 1580.93, "end": 1582.19, "word": " اكتشف", "probability": 0.98017578125}, {"start": 1582.19, "end": 1582.43, "word": " هذا", "probability": 0.94140625}, {"start": 1582.43, "end": 1582.83, "word": " المرض", "probability": 0.989013671875}, {"start": 1582.83, "end": 1584.85, "word": " واكتشفه", "probability": 0.9340006510416666}, {"start": 1584.85, "end": 1585.45, "word": " في", "probability": 0.9599609375}, {"start": 1585.45, "end": 1586.45, "word": " مرحلة", "probability": 0.964111328125}, {"start": 1586.45, "end": 1586.93, "word": " مبكرة", "probability": 0.9403076171875}, {"start": 1586.93, "end": 1587.27, "word": " جدا", "probability": 0.994873046875}], "temperature": 1.0}, {"id": 59, "seek": 161283, "start": 1587.85, "end": 1612.83, "text": "وهو هالي صورته وجال انه غياب في ال glazman disease يكون غياب ل receptor 2b3a ولمّا اجوا يدرسوا ال 2b3a receptors لجوا انه في 80 ألف نسقة من ال receptor هذا موجود على كل platelet ماشي و هو عبارة عن برضه complex molecule", "tokens": [2407, 3224, 2407, 8032, 6027, 1829, 20328, 13063, 47395, 4032, 7435, 6027, 16472, 3224, 32771, 1829, 16758, 8978, 2423, 8771, 89, 1601, 4752, 7251, 30544, 32771, 1829, 16758, 5296, 32264, 568, 65, 18, 64, 4032, 19528, 11703, 995, 1975, 7435, 14407, 7251, 3215, 2288, 3794, 14407, 2423, 568, 65, 18, 64, 34102, 5296, 7435, 14407, 16472, 3224, 8978, 4688, 5551, 46538, 8717, 3794, 28671, 9154, 2423, 32264, 23758, 3714, 29245, 23328, 15844, 28242, 3403, 15966, 3714, 33599, 1829, 4032, 31439, 6225, 3555, 9640, 3660, 18871, 4724, 43042, 3224, 3997, 15582], "avg_logprob": -0.2529189580089443, "compression_ratio": 1.4570135746606334, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1587.85, "end": 1588.19, "word": "وهو", "probability": 0.8562825520833334}, {"start": 1588.19, "end": 1588.69, "word": " هالي", "probability": 0.4737141927083333}, {"start": 1588.69, "end": 1589.33, "word": " صورته", "probability": 0.9640299479166666}, {"start": 1589.33, "end": 1592.79, "word": " وجال", "probability": 0.6741536458333334}, {"start": 1592.79, "end": 1593.05, "word": " انه", "probability": 0.721435546875}, {"start": 1593.05, "end": 1593.57, "word": " غياب", "probability": 0.9733072916666666}, {"start": 1593.57, "end": 1594.65, "word": " في", "probability": 0.78515625}, {"start": 1594.65, "end": 1594.75, "word": " ال", "probability": 0.701171875}, {"start": 1594.75, "end": 1595.33, "word": " glazman", "probability": 0.501708984375}, {"start": 1595.33, "end": 1596.01, "word": " disease", "probability": 0.77001953125}, {"start": 1596.01, "end": 1596.69, "word": " يكون", "probability": 0.75048828125}, {"start": 1596.69, "end": 1597.11, "word": " غياب", "probability": 0.9866536458333334}, {"start": 1597.11, "end": 1597.23, "word": " ل", "probability": 0.77294921875}, {"start": 1597.23, "end": 1597.65, "word": " receptor", "probability": 0.14013671875}, {"start": 1597.65, "end": 1598.77, "word": " 2b3a", "probability": 0.82763671875}, {"start": 1598.77, "end": 1600.67, "word": " ولمّا", "probability": 0.7919921875}, {"start": 1600.67, "end": 1600.91, "word": " اجوا", "probability": 0.728759765625}, {"start": 1600.91, "end": 1601.33, "word": " يدرسوا", "probability": 0.98955078125}, {"start": 1601.33, "end": 1601.43, "word": " ال", "probability": 0.94384765625}, {"start": 1601.43, "end": 1602.35, "word": " 2b3a", "probability": 0.9329833984375}, {"start": 1602.35, "end": 1603.03, "word": " receptors", "probability": 0.76220703125}, {"start": 1603.03, "end": 1603.43, "word": " لجوا", "probability": 0.7859700520833334}, {"start": 1603.43, "end": 1603.65, "word": " انه", "probability": 0.6295166015625}, {"start": 1603.65, "end": 1603.77, "word": " في", "probability": 0.95068359375}, {"start": 1603.77, "end": 1604.27, "word": " 80", "probability": 0.57421875}, {"start": 1604.27, "end": 1604.59, "word": " ألف", "probability": 0.851806640625}, {"start": 1604.59, "end": 1605.01, "word": " نسقة", "probability": 0.8720703125}, {"start": 1605.01, "end": 1605.25, "word": " من", "probability": 0.9921875}, {"start": 1605.25, "end": 1605.37, "word": " ال", "probability": 0.97509765625}, {"start": 1605.37, "end": 1605.73, "word": " receptor", "probability": 0.92138671875}, {"start": 1605.73, "end": 1606.01, "word": " هذا", "probability": 0.666015625}, {"start": 1606.01, "end": 1606.39, "word": " موجود", "probability": 0.9884440104166666}, {"start": 1606.39, "end": 1606.57, "word": " على", "probability": 0.755859375}, {"start": 1606.57, "end": 1607.05, "word": " كل", "probability": 0.99267578125}, {"start": 1607.05, "end": 1607.65, "word": " platelet", "probability": 0.5887451171875}, {"start": 1607.65, "end": 1609.55, "word": " ماشي", "probability": 0.8927408854166666}, {"start": 1609.55, "end": 1609.67, "word": " و", "probability": 0.53515625}, {"start": 1609.67, "end": 1609.93, "word": " هو", "probability": 0.66455078125}, {"start": 1609.93, "end": 1610.29, "word": " عبارة", "probability": 0.9227294921875}, {"start": 1610.29, "end": 1610.47, "word": " عن", "probability": 0.99267578125}, {"start": 1610.47, "end": 1610.95, "word": " برضه", "probability": 0.9611002604166666}, {"start": 1610.95, "end": 1612.09, "word": " complex", "probability": 0.9111328125}, {"start": 1612.09, "end": 1612.83, "word": " molecule", "probability": 0.7197265625}], "temperature": 1.0}, {"id": 60, "seek": 163971, "start": 1614.55, "end": 1639.71, "text": "بيعتمد على الـ Calcium في ربطته، في تجميعته، in its complexity وإنه موجود في الـ gene تبعه على كرموزوم سبعتار وإن كل أرواح ال mutation لجوها في هذا ال receptor، في هذا ال gene", "tokens": [21292, 34268, 2304, 3215, 15844, 2423, 39184, 3511, 19324, 8978, 12602, 3555, 9566, 47395, 12399, 8978, 6055, 7435, 2304, 40228, 47395, 12399, 294, 1080, 14024, 4032, 28814, 1863, 3224, 3714, 29245, 23328, 8978, 2423, 39184, 12186, 6055, 3555, 3615, 3224, 15844, 9122, 2288, 2304, 2407, 11622, 20498, 8608, 3555, 34268, 9640, 4032, 28814, 1863, 28242, 5551, 2288, 14407, 5016, 2423, 27960, 5296, 7435, 2407, 11296, 8978, 23758, 2423, 32264, 12399, 8978, 23758, 2423, 12186], "avg_logprob": -0.19812500596046448, "compression_ratio": 1.4808743169398908, "no_speech_prob": 0.0, "words": [{"start": 1614.55, "end": 1615.19, "word": "بيعتمد", "probability": 0.8829345703125}, {"start": 1615.19, "end": 1615.41, "word": " على", "probability": 0.89111328125}, {"start": 1615.41, "end": 1615.81, "word": " الـ", "probability": 0.5732421875}, {"start": 1615.81, "end": 1616.49, "word": " Calcium", "probability": 0.8662109375}, {"start": 1616.49, "end": 1617.57, "word": " في", "probability": 0.91943359375}, {"start": 1617.57, "end": 1619.51, "word": " ربطته،", "probability": 0.84716796875}, {"start": 1619.51, "end": 1619.63, "word": " في", "probability": 0.962890625}, {"start": 1619.63, "end": 1620.61, "word": " تجميعته،", "probability": 0.9085286458333334}, {"start": 1620.61, "end": 1621.23, "word": " in", "probability": 0.796875}, {"start": 1621.23, "end": 1621.43, "word": " its", "probability": 0.88818359375}, {"start": 1621.43, "end": 1622.09, "word": " complexity", "probability": 0.9296875}, {"start": 1622.09, "end": 1624.97, "word": " وإنه", "probability": 0.7589111328125}, {"start": 1624.97, "end": 1625.49, "word": " موجود", "probability": 0.9923502604166666}, {"start": 1625.49, "end": 1625.93, "word": " في", "probability": 0.7578125}, {"start": 1625.93, "end": 1626.05, "word": " الـ", "probability": 0.685791015625}, {"start": 1626.05, "end": 1626.27, "word": " gene", "probability": 0.73046875}, {"start": 1626.27, "end": 1626.75, "word": " تبعه", "probability": 0.889892578125}, {"start": 1626.75, "end": 1626.97, "word": " على", "probability": 0.89599609375}, {"start": 1626.97, "end": 1627.65, "word": " كرموزوم", "probability": 0.8177083333333334}, {"start": 1627.65, "end": 1629.45, "word": " سبعتار", "probability": 0.828369140625}, {"start": 1629.45, "end": 1630.77, "word": " وإن", "probability": 0.8723958333333334}, {"start": 1630.77, "end": 1631.07, "word": " كل", "probability": 0.94970703125}, {"start": 1631.07, "end": 1631.73, "word": " أرواح", "probability": 0.7064208984375}, {"start": 1631.73, "end": 1632.05, "word": " ال", "probability": 0.943359375}, {"start": 1632.05, "end": 1632.69, "word": " mutation", "probability": 0.59912109375}, {"start": 1632.69, "end": 1634.95, "word": " لجوها", "probability": 0.8101806640625}, {"start": 1634.95, "end": 1635.17, "word": " في", "probability": 0.9833984375}, {"start": 1635.17, "end": 1635.59, "word": " هذا", "probability": 0.96826171875}, {"start": 1635.59, "end": 1638.01, "word": " ال", "probability": 0.97265625}, {"start": 1638.01, "end": 1639.03, "word": " receptor،", "probability": 0.611083984375}, {"start": 1639.03, "end": 1639.13, "word": " في", "probability": 0.9765625}, {"start": 1639.13, "end": 1639.37, "word": " هذا", "probability": 0.9794921875}, {"start": 1639.37, "end": 1639.51, "word": " ال", "probability": 0.90234375}, {"start": 1639.51, "end": 1639.71, "word": " gene", "probability": 0.7880859375}], "temperature": 1.0}, {"id": 61, "seek": 166980, "start": 1640.52, "end": 1669.8, "text": "وإنه محصل عن هذه الجين الـ mutation إما quantitative وإما qualitative أكبر منها وهذه الكرمزومات التانية طيب الخلل اللي موجود في هذا المرض ال glazman هو غياب في ال aggregation مدام في خلل في ال receptor تبع ال aggregation يبقى في غياب في aggregation", "tokens": [2407, 28814, 1863, 3224, 3714, 5016, 36520, 18871, 29538, 25724, 9957, 2423, 39184, 27960, 11933, 15042, 27778, 4032, 28814, 15042, 31312, 5551, 4117, 26890, 9154, 11296, 4032, 3224, 24192, 33251, 2288, 2304, 11622, 20498, 9307, 16712, 7649, 10632, 23032, 1829, 3555, 33962, 1211, 1211, 13672, 1829, 3714, 29245, 23328, 8978, 23758, 9673, 43042, 2423, 8771, 89, 1601, 31439, 32771, 1829, 16758, 8978, 2423, 16743, 399, 3714, 3215, 10943, 8978, 16490, 1211, 1211, 8978, 2423, 32264, 6055, 3555, 3615, 2423, 16743, 399, 7251, 3555, 4587, 7578, 8978, 32771, 1829, 16758, 8978, 16743, 399], "avg_logprob": -0.3398857462790705, "compression_ratio": 1.7155963302752293, "no_speech_prob": 0.0, "words": [{"start": 1640.52, "end": 1641.32, "word": "وإنه", "probability": 0.68951416015625}, {"start": 1641.32, "end": 1642.0, "word": " محصل", "probability": 0.681396484375}, {"start": 1642.0, "end": 1642.22, "word": " عن", "probability": 0.27392578125}, {"start": 1642.22, "end": 1642.56, "word": " هذه", "probability": 0.830078125}, {"start": 1642.56, "end": 1643.0, "word": " الجين", "probability": 0.809326171875}, {"start": 1643.0, "end": 1643.22, "word": " الـ", "probability": 0.492919921875}, {"start": 1643.22, "end": 1643.74, "word": " mutation", "probability": 0.73681640625}, {"start": 1643.74, "end": 1644.4, "word": " إما", "probability": 0.861328125}, {"start": 1644.4, "end": 1646.3, "word": " quantitative", "probability": 0.880859375}, {"start": 1646.3, "end": 1647.0, "word": " وإما", "probability": 0.8343098958333334}, {"start": 1647.0, "end": 1647.88, "word": " qualitative", "probability": 0.994140625}, {"start": 1647.88, "end": 1649.4, "word": " أكبر", "probability": 0.74658203125}, {"start": 1649.4, "end": 1649.8, "word": " منها", "probability": 0.33563232421875}, {"start": 1649.8, "end": 1650.58, "word": " وهذه", "probability": 0.5907389322916666}, {"start": 1650.58, "end": 1651.4, "word": " الكرمزومات", "probability": 0.6385498046875}, {"start": 1651.4, "end": 1652.08, "word": " التانية", "probability": 0.6766764322916666}, {"start": 1652.08, "end": 1658.18, "word": " طيب", "probability": 0.7042643229166666}, {"start": 1658.18, "end": 1658.8, "word": " الخلل", "probability": 0.9226888020833334}, {"start": 1658.8, "end": 1659.16, "word": " اللي", "probability": 0.78955078125}, {"start": 1659.16, "end": 1659.54, "word": " موجود", "probability": 0.9884440104166666}, {"start": 1659.54, "end": 1659.66, "word": " في", "probability": 0.955078125}, {"start": 1659.66, "end": 1659.92, "word": " هذا", "probability": 0.64111328125}, {"start": 1659.92, "end": 1660.3, "word": " المرض", "probability": 0.98583984375}, {"start": 1660.3, "end": 1660.6, "word": " ال", "probability": 0.498046875}, {"start": 1660.6, "end": 1661.16, "word": " glazman", "probability": 0.536376953125}, {"start": 1661.16, "end": 1662.44, "word": " هو", "probability": 0.923828125}, {"start": 1662.44, "end": 1663.74, "word": " غياب", "probability": 0.9788411458333334}, {"start": 1663.74, "end": 1663.86, "word": " في", "probability": 0.94775390625}, {"start": 1663.86, "end": 1663.96, "word": " ال", "probability": 0.9169921875}, {"start": 1663.96, "end": 1664.52, "word": " aggregation", "probability": 0.935546875}, {"start": 1664.52, "end": 1664.98, "word": " مدام", "probability": 0.3694864908854167}, {"start": 1664.98, "end": 1665.24, "word": " في", "probability": 0.734375}, {"start": 1665.24, "end": 1666.08, "word": " خلل", "probability": 0.92919921875}, {"start": 1666.08, "end": 1666.24, "word": " في", "probability": 0.8623046875}, {"start": 1666.24, "end": 1666.26, "word": " ال", "probability": 0.849609375}, {"start": 1666.26, "end": 1666.64, "word": " receptor", "probability": 0.70947265625}, {"start": 1666.64, "end": 1666.98, "word": " تبع", "probability": 0.8075358072916666}, {"start": 1666.98, "end": 1667.08, "word": " ال", "probability": 0.93115234375}, {"start": 1667.08, "end": 1667.58, "word": " aggregation", "probability": 0.97314453125}, {"start": 1667.58, "end": 1667.86, "word": " يبقى", "probability": 0.92724609375}, {"start": 1667.86, "end": 1668.06, "word": " في", "probability": 0.9052734375}, {"start": 1668.06, "end": 1668.54, "word": " غياب", "probability": 0.982421875}, {"start": 1668.54, "end": 1668.8, "word": " في", "probability": 0.96435546875}, {"start": 1668.8, "end": 1669.8, "word": " aggregation", "probability": 0.6973876953125}], "temperature": 1.0}, {"id": 62, "seek": 169713, "start": 1670.27, "end": 1697.13, "text": "طبعاً في other technique عملوها لجوا فيه كمان receptor تانية كمان صفات تانية موجودة منها انه عدم قدرة ال platelet انها ت spread انها تنفرد على سطح subendothelial matrix و دي in vitro technique which means انه في خلل في ال adhesion كمان لإن ال fibrinectin", "tokens": [9566, 3555, 3615, 995, 14111, 8978, 661, 6532, 6225, 42213, 2407, 11296, 5296, 7435, 14407, 8978, 3224, 9122, 2304, 7649, 32264, 6055, 7649, 10632, 9122, 2304, 7649, 20328, 5172, 9307, 6055, 7649, 10632, 3714, 29245, 23328, 3660, 9154, 11296, 16472, 3224, 6225, 40448, 12174, 3215, 25720, 2423, 3403, 15966, 16472, 11296, 6055, 3974, 16472, 11296, 6055, 1863, 5172, 2288, 3215, 15844, 8608, 9566, 5016, 1422, 521, 900, 338, 831, 8141, 4032, 11778, 1829, 294, 9467, 340, 6532, 597, 1355, 16472, 3224, 8978, 16490, 1211, 1211, 8978, 2423, 614, 38571, 9122, 2304, 7649, 5296, 28814, 1863, 2423, 283, 6414, 259, 557, 259], "avg_logprob": -0.27481618494379756, "compression_ratio": 1.5603448275862069, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1670.27, "end": 1670.67, "word": "طبعاً", "probability": 0.743603515625}, {"start": 1670.67, "end": 1670.81, "word": " في", "probability": 0.765625}, {"start": 1670.81, "end": 1671.17, "word": " other", "probability": 0.2548828125}, {"start": 1671.17, "end": 1672.41, "word": " technique", "probability": 0.8388671875}, {"start": 1672.41, "end": 1674.81, "word": " عملوها", "probability": 0.97802734375}, {"start": 1674.81, "end": 1675.75, "word": " لجوا", "probability": 0.7493489583333334}, {"start": 1675.75, "end": 1675.99, "word": " فيه", "probability": 0.8544921875}, {"start": 1675.99, "end": 1676.31, "word": " كمان", "probability": 0.8849283854166666}, {"start": 1676.31, "end": 1676.73, "word": " receptor", "probability": 0.8759765625}, {"start": 1676.73, "end": 1677.31, "word": " تانية", "probability": 0.9484049479166666}, {"start": 1677.31, "end": 1678.47, "word": " كمان", "probability": 0.7606608072916666}, {"start": 1678.47, "end": 1678.83, "word": " صفات", "probability": 0.9892578125}, {"start": 1678.83, "end": 1679.11, "word": " تانية", "probability": 0.91748046875}, {"start": 1679.11, "end": 1679.61, "word": " موجودة", "probability": 0.98974609375}, {"start": 1679.61, "end": 1680.07, "word": " منها", "probability": 0.93505859375}, {"start": 1680.07, "end": 1680.73, "word": " انه", "probability": 0.64013671875}, {"start": 1680.73, "end": 1680.97, "word": " عدم", "probability": 0.954833984375}, {"start": 1680.97, "end": 1681.33, "word": " قدرة", "probability": 0.9807942708333334}, {"start": 1681.33, "end": 1681.47, "word": " ال", "probability": 0.86669921875}, {"start": 1681.47, "end": 1681.93, "word": " platelet", "probability": 0.5374755859375}, {"start": 1681.93, "end": 1682.33, "word": " انها", "probability": 0.87939453125}, {"start": 1682.33, "end": 1683.03, "word": " ت", "probability": 0.8154296875}, {"start": 1683.03, "end": 1683.53, "word": " spread", "probability": 0.79345703125}, {"start": 1683.53, "end": 1684.07, "word": " انها", "probability": 0.865478515625}, {"start": 1684.07, "end": 1684.99, "word": " تنفرد", "probability": 0.724560546875}, {"start": 1684.99, "end": 1685.55, "word": " على", "probability": 0.87060546875}, {"start": 1685.55, "end": 1686.09, "word": " سطح", "probability": 0.9654947916666666}, {"start": 1686.09, "end": 1687.79, "word": " subendothelial", "probability": 0.76357421875}, {"start": 1687.79, "end": 1688.35, "word": " matrix", "probability": 0.955078125}, {"start": 1688.35, "end": 1689.31, "word": " و", "probability": 0.359619140625}, {"start": 1689.31, "end": 1689.53, "word": " دي", "probability": 0.47711181640625}, {"start": 1689.53, "end": 1689.69, "word": " in", "probability": 0.650390625}, {"start": 1689.69, "end": 1690.05, "word": " vitro", "probability": 0.952392578125}, {"start": 1690.05, "end": 1690.65, "word": " technique", "probability": 0.84814453125}, {"start": 1690.65, "end": 1692.43, "word": " which", "probability": 0.6103515625}, {"start": 1692.43, "end": 1692.81, "word": " means", "probability": 0.91015625}, {"start": 1692.81, "end": 1693.11, "word": " انه", "probability": 0.84619140625}, {"start": 1693.11, "end": 1693.21, "word": " في", "probability": 0.95458984375}, {"start": 1693.21, "end": 1693.63, "word": " خلل", "probability": 0.9244791666666666}, {"start": 1693.63, "end": 1693.87, "word": " في", "probability": 0.92919921875}, {"start": 1693.87, "end": 1694.21, "word": " ال", "probability": 0.9501953125}, {"start": 1694.21, "end": 1695.27, "word": " adhesion", "probability": 0.7841796875}, {"start": 1695.27, "end": 1695.85, "word": " كمان", "probability": 0.9752604166666666}, {"start": 1695.85, "end": 1696.31, "word": " لإن", "probability": 0.782470703125}, {"start": 1696.31, "end": 1696.47, "word": " ال", "probability": 0.46142578125}, {"start": 1696.47, "end": 1697.13, "word": " fibrinectin", "probability": 0.629736328125}], "temperature": 1.0}, {"id": 63, "seek": 172761, "start": 1703.23, "end": 1727.61, "text": "absent وخلال اخر جاله ال fiber engine اللى موجود فى ال alpha granules تبع تابليتل بيكون قليل او absent طبعا كلكم بتعرف انه في coordination factor موجودة فى ال alpha granules والتابليتل فاكرين هم؟ اه واحد او اتنين او سبعة او تمانية", "tokens": [17243, 317, 4032, 9778, 1211, 6027, 1975, 34740, 10874, 6027, 3224, 2423, 12874, 2848, 13672, 7578, 3714, 29245, 23328, 6156, 7578, 2423, 8961, 9370, 3473, 6055, 3555, 3615, 6055, 16758, 20292, 2655, 1211, 4724, 1829, 30544, 12174, 20292, 1211, 1975, 2407, 25185, 23032, 3555, 3615, 995, 9122, 23275, 2304, 39894, 3615, 28480, 16472, 3224, 8978, 21252, 5952, 3714, 29245, 23328, 3660, 6156, 7578, 2423, 8961, 9370, 3473, 4032, 6027, 2655, 16758, 20292, 2655, 1211, 6156, 995, 37983, 9957, 8032, 2304, 22807, 1975, 3224, 36764, 24401, 1975, 2407, 1975, 2655, 1863, 9957, 1975, 2407, 8608, 3555, 27884, 1975, 2407, 46811, 7649, 10632], "avg_logprob": -0.35784314864990757, "compression_ratio": 1.6296296296296295, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1703.23, "end": 1704.19, "word": "absent", "probability": 0.6988525390625}, {"start": 1704.19, "end": 1705.15, "word": " وخلال", "probability": 0.779541015625}, {"start": 1705.15, "end": 1705.83, "word": " اخر", "probability": 0.684814453125}, {"start": 1705.83, "end": 1707.43, "word": " جاله", "probability": 0.6744791666666666}, {"start": 1707.43, "end": 1707.69, "word": " ال", "probability": 0.85302734375}, {"start": 1707.69, "end": 1707.93, "word": " fiber", "probability": 0.200927734375}, {"start": 1707.93, "end": 1708.33, "word": " engine", "probability": 0.435791015625}, {"start": 1708.33, "end": 1708.53, "word": " اللى", "probability": 0.828857421875}, {"start": 1708.53, "end": 1708.81, "word": " موجود", "probability": 0.9876302083333334}, {"start": 1708.81, "end": 1708.91, "word": " فى", "probability": 0.807373046875}, {"start": 1708.91, "end": 1709.01, "word": " ال", "probability": 0.89697265625}, {"start": 1709.01, "end": 1709.21, "word": " alpha", "probability": 0.8232421875}, {"start": 1709.21, "end": 1709.69, "word": " granules", "probability": 0.78466796875}, {"start": 1709.69, "end": 1710.07, "word": " تبع", "probability": 0.6905924479166666}, {"start": 1710.07, "end": 1710.63, "word": " تابليتل", "probability": 0.60205078125}, {"start": 1710.63, "end": 1711.15, "word": " بيكون", "probability": 0.7274576822916666}, {"start": 1711.15, "end": 1712.67, "word": " قليل", "probability": 0.9881184895833334}, {"start": 1712.67, "end": 1712.89, "word": " او", "probability": 0.93212890625}, {"start": 1712.89, "end": 1713.41, "word": " absent", "probability": 0.93701171875}, {"start": 1713.41, "end": 1715.01, "word": " طبعا", "probability": 0.933837890625}, {"start": 1715.01, "end": 1715.27, "word": " كلكم", "probability": 0.7556966145833334}, {"start": 1715.27, "end": 1715.57, "word": " بتعرف", "probability": 0.9586588541666666}, {"start": 1715.57, "end": 1715.79, "word": " انه", "probability": 0.654541015625}, {"start": 1715.79, "end": 1715.85, "word": " في", "probability": 0.751953125}, {"start": 1715.85, "end": 1716.27, "word": " coordination", "probability": 0.2008056640625}, {"start": 1716.27, "end": 1716.85, "word": " factor", "probability": 0.8681640625}, {"start": 1716.85, "end": 1717.35, "word": " موجودة", "probability": 0.80224609375}, {"start": 1717.35, "end": 1717.49, "word": " فى", "probability": 0.84326171875}, {"start": 1717.49, "end": 1717.59, "word": " ال", "probability": 0.79638671875}, {"start": 1717.59, "end": 1717.95, "word": " alpha", "probability": 0.88525390625}, {"start": 1717.95, "end": 1718.43, "word": " granules", "probability": 0.931884765625}, {"start": 1718.43, "end": 1719.01, "word": " والتابليتل", "probability": 0.7635323660714286}, {"start": 1719.01, "end": 1719.93, "word": " فاكرين", "probability": 0.9482421875}, {"start": 1719.93, "end": 1720.99, "word": " هم؟", "probability": 0.52490234375}, {"start": 1720.99, "end": 1720.99, "word": " اه", "probability": 0.497314453125}, {"start": 1720.99, "end": 1721.75, "word": " واحد", "probability": 0.5992431640625}, {"start": 1721.75, "end": 1722.41, "word": " او", "probability": 0.2474365234375}, {"start": 1722.41, "end": 1722.51, "word": " اتنين", "probability": 0.64581298828125}, {"start": 1722.51, "end": 1724.95, "word": " او", "probability": 0.60595703125}, {"start": 1724.95, "end": 1726.79, "word": " سبعة", "probability": 0.8271484375}, {"start": 1726.79, "end": 1727.07, "word": " او", "probability": 0.859619140625}, {"start": 1727.07, "end": 1727.61, "word": " تمانية", "probability": 0.8362630208333334}], "temperature": 1.0}, {"id": 64, "seek": 175814, "start": 1729.94, "end": 1758.14, "text": "في تسعة و عشرة او تلت عشرة ما دلش كلهم سابت ذا ذاكرهم كلهم في تسعة و تلت عشرة طيب خلاص و ال five origin واحد و اتنين خمس تمين تلت عشرة ماشيين خمس كلهم موجودين في ال alpha granules تبعت من؟ تبعت البلد لكن", "tokens": [41185, 6055, 3794, 27884, 4032, 6225, 8592, 25720, 1975, 2407, 6055, 1211, 2655, 6225, 8592, 25720, 19446, 11778, 1211, 8592, 28242, 16095, 8608, 16758, 2655, 29910, 995, 29910, 995, 37983, 16095, 28242, 16095, 8978, 6055, 3794, 27884, 4032, 6055, 1211, 2655, 6225, 8592, 25720, 23032, 1829, 3555, 16490, 1211, 33546, 4032, 2423, 1732, 4957, 36764, 24401, 4032, 1975, 2655, 1863, 9957, 16490, 2304, 3794, 46811, 9957, 6055, 1211, 2655, 6225, 8592, 25720, 3714, 33599, 1829, 9957, 16490, 2304, 3794, 28242, 16095, 3714, 29245, 23328, 9957, 8978, 2423, 8961, 9370, 3473, 6055, 3555, 34268, 9154, 22807, 6055, 3555, 34268, 29739, 1211, 3215, 44381], "avg_logprob": -0.34496358181666403, "compression_ratio": 1.8225806451612903, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 1729.94, "end": 1730.1, "word": "في", "probability": 0.619140625}, {"start": 1730.1, "end": 1730.48, "word": " تسعة", "probability": 0.8087565104166666}, {"start": 1730.48, "end": 1730.58, "word": " و", "probability": 0.7666015625}, {"start": 1730.58, "end": 1730.9, "word": " عشرة", "probability": 0.80712890625}, {"start": 1730.9, "end": 1731.38, "word": " او", "probability": 0.565673828125}, {"start": 1731.38, "end": 1731.94, "word": " تلت", "probability": 0.8689778645833334}, {"start": 1731.94, "end": 1732.3, "word": " عشرة", "probability": 0.8038736979166666}, {"start": 1732.3, "end": 1732.46, "word": " ما", "probability": 0.18603515625}, {"start": 1732.46, "end": 1732.86, "word": " دلش", "probability": 0.47509765625}, {"start": 1732.86, "end": 1733.3, "word": " كلهم", "probability": 0.94921875}, {"start": 1733.3, "end": 1733.68, "word": " سابت", "probability": 0.4403076171875}, {"start": 1733.68, "end": 1733.88, "word": " ذا", "probability": 0.4451904296875}, {"start": 1733.88, "end": 1734.24, "word": " ذاكرهم", "probability": 0.8594970703125}, {"start": 1734.24, "end": 1735.78, "word": " كلهم", "probability": 0.833984375}, {"start": 1735.78, "end": 1736.18, "word": " في", "probability": 0.343505859375}, {"start": 1736.18, "end": 1737.0, "word": " تسعة", "probability": 0.9661458333333334}, {"start": 1737.0, "end": 1737.12, "word": " و", "probability": 0.84228515625}, {"start": 1737.12, "end": 1737.4, "word": " تلت", "probability": 0.9884440104166666}, {"start": 1737.4, "end": 1737.72, "word": " عشرة", "probability": 0.8758138020833334}, {"start": 1737.72, "end": 1739.26, "word": " طيب", "probability": 0.8544921875}, {"start": 1739.26, "end": 1740.72, "word": " خلاص", "probability": 0.7709147135416666}, {"start": 1740.72, "end": 1741.14, "word": " و", "probability": 0.5419921875}, {"start": 1741.14, "end": 1741.24, "word": " ال", "probability": 0.49609375}, {"start": 1741.24, "end": 1741.44, "word": " five", "probability": 0.59326171875}, {"start": 1741.44, "end": 1741.76, "word": " origin", "probability": 0.22412109375}, {"start": 1741.76, "end": 1742.88, "word": " واحد", "probability": 0.8583984375}, {"start": 1742.88, "end": 1744.64, "word": " و", "probability": 0.71484375}, {"start": 1744.64, "end": 1745.08, "word": " اتنين", "probability": 0.9156494140625}, {"start": 1745.08, "end": 1746.94, "word": " خمس", "probability": 0.9178059895833334}, {"start": 1746.94, "end": 1748.32, "word": " تمين", "probability": 0.445556640625}, {"start": 1748.32, "end": 1749.92, "word": " تلت", "probability": 0.9251302083333334}, {"start": 1749.92, "end": 1750.9, "word": " عشرة", "probability": 0.9114583333333334}, {"start": 1750.9, "end": 1752.18, "word": " ماشيين", "probability": 0.67388916015625}, {"start": 1752.18, "end": 1752.6, "word": " خمس", "probability": 0.8697916666666666}, {"start": 1752.6, "end": 1754.3, "word": " كلهم", "probability": 0.781494140625}, {"start": 1754.3, "end": 1754.74, "word": " موجودين", "probability": 0.989013671875}, {"start": 1754.74, "end": 1754.84, "word": " في", "probability": 0.9140625}, {"start": 1754.84, "end": 1754.92, "word": " ال", "probability": 0.9296875}, {"start": 1754.92, "end": 1755.12, "word": " alpha", "probability": 0.73583984375}, {"start": 1755.12, "end": 1755.64, "word": " granules", "probability": 0.545166015625}, {"start": 1755.64, "end": 1755.94, "word": " تبعت", "probability": 0.8317057291666666}, {"start": 1755.94, "end": 1756.64, "word": " من؟", "probability": 0.6064453125}, {"start": 1756.64, "end": 1757.12, "word": " تبعت", "probability": 0.8888346354166666}, {"start": 1757.12, "end": 1757.48, "word": " البلد", "probability": 0.5914713541666666}, {"start": 1757.48, "end": 1758.14, "word": " لكن", "probability": 0.52734375}], "temperature": 1.0}, {"id": 65, "seek": 178230, "start": 1758.68, "end": 1782.3, "text": "اللي بيهنى في هذا المرض فبروجي صح؟ لإن هو اللي بدى يربط هو ليش؟ اللي بدى يربط هو ال legend في هذا الحالة هو ال bridge فبيكون غايب او decreased في ال alpha granules of it inherited as autosome على السيسيو manner و ال patient suffered from a wide spectrum", "tokens": [6027, 20292, 4724, 1829, 3224, 1863, 7578, 8978, 23758, 9673, 43042, 6156, 26890, 29245, 1829, 20328, 5016, 22807, 5296, 28814, 1863, 31439, 13672, 1829, 47525, 7578, 7251, 25513, 9566, 31439, 32239, 8592, 22807, 13672, 1829, 47525, 7578, 7251, 25513, 9566, 31439, 2423, 9451, 8978, 23758, 21542, 6027, 3660, 31439, 2423, 7283, 6156, 3555, 1829, 30544, 32771, 47302, 3555, 1975, 2407, 24436, 8978, 2423, 8961, 9370, 3473, 295, 309, 27091, 382, 1476, 329, 423, 15844, 21136, 1829, 3794, 1829, 2407, 9060, 4032, 2423, 4537, 12770, 490, 257, 4874, 11143], "avg_logprob": -0.3908005537611715, "compression_ratio": 1.6017699115044248, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 1758.68, "end": 1758.96, "word": "اللي", "probability": 0.596435546875}, {"start": 1758.96, "end": 1759.42, "word": " بيهنى", "probability": 0.778955078125}, {"start": 1759.42, "end": 1759.56, "word": " في", "probability": 0.62744140625}, {"start": 1759.56, "end": 1759.8, "word": " هذا", "probability": 0.62939453125}, {"start": 1759.8, "end": 1760.16, "word": " المرض", "probability": 0.980712890625}, {"start": 1760.16, "end": 1760.98, "word": " فبروجي", "probability": 0.450439453125}, {"start": 1760.98, "end": 1762.54, "word": " صح؟", "probability": 0.6153971354166666}, {"start": 1762.54, "end": 1762.72, "word": " لإن", "probability": 0.8015950520833334}, {"start": 1762.72, "end": 1762.82, "word": " هو", "probability": 0.96435546875}, {"start": 1762.82, "end": 1762.92, "word": " اللي", "probability": 0.804443359375}, {"start": 1762.92, "end": 1763.08, "word": " بدى", "probability": 0.678466796875}, {"start": 1763.08, "end": 1763.48, "word": " يربط", "probability": 0.921875}, {"start": 1763.48, "end": 1764.64, "word": " هو", "probability": 0.396240234375}, {"start": 1764.64, "end": 1765.32, "word": " ليش؟", "probability": 0.7561848958333334}, {"start": 1765.32, "end": 1765.46, "word": " اللي", "probability": 0.6727294921875}, {"start": 1765.46, "end": 1765.64, "word": " بدى", "probability": 0.849609375}, {"start": 1765.64, "end": 1765.84, "word": " يربط", "probability": 0.9905598958333334}, {"start": 1765.84, "end": 1765.96, "word": " هو", "probability": 0.71923828125}, {"start": 1765.96, "end": 1766.08, "word": " ال", "probability": 0.73046875}, {"start": 1766.08, "end": 1766.38, "word": " legend", "probability": 0.64892578125}, {"start": 1766.38, "end": 1766.54, "word": " في", "probability": 0.82275390625}, {"start": 1766.54, "end": 1766.68, "word": " هذا", "probability": 0.399658203125}, {"start": 1766.68, "end": 1767.06, "word": " الحالة", "probability": 0.93115234375}, {"start": 1767.06, "end": 1767.18, "word": " هو", "probability": 0.74853515625}, {"start": 1767.18, "end": 1767.32, "word": " ال", "probability": 0.9873046875}, {"start": 1767.32, "end": 1767.66, "word": " bridge", "probability": 0.82080078125}, {"start": 1767.66, "end": 1768.96, "word": " فبيكون", "probability": 0.78948974609375}, {"start": 1768.96, "end": 1769.62, "word": " غايب", "probability": 0.84033203125}, {"start": 1769.62, "end": 1770.18, "word": " او", "probability": 0.755859375}, {"start": 1770.18, "end": 1771.06, "word": " decreased", "probability": 0.89306640625}, {"start": 1771.06, "end": 1772.0, "word": " في", "probability": 0.90869140625}, {"start": 1772.0, "end": 1772.88, "word": " ال", "probability": 0.9599609375}, {"start": 1772.88, "end": 1773.16, "word": " alpha", "probability": 0.841796875}, {"start": 1773.16, "end": 1773.7, "word": " granules", "probability": 0.739013671875}, {"start": 1773.7, "end": 1773.9, "word": " of", "probability": 0.41943359375}, {"start": 1773.9, "end": 1774.2, "word": " it", "probability": 0.276123046875}, {"start": 1774.2, "end": 1775.6, "word": " inherited", "probability": 0.18310546875}, {"start": 1775.6, "end": 1775.84, "word": " as", "probability": 0.480712890625}, {"start": 1775.84, "end": 1776.32, "word": " autosome", "probability": 0.6522623697916666}, {"start": 1776.32, "end": 1776.46, "word": " على", "probability": 0.76806640625}, {"start": 1776.46, "end": 1777.4, "word": " السيسيو", "probability": 0.6146484375}, {"start": 1777.4, "end": 1777.88, "word": " manner", "probability": 0.0748291015625}, {"start": 1777.88, "end": 1779.52, "word": " و", "probability": 0.6845703125}, {"start": 1779.52, "end": 1780.3, "word": " ال", "probability": 0.9091796875}, {"start": 1780.3, "end": 1780.68, "word": " patient", "probability": 0.9501953125}, {"start": 1780.68, "end": 1781.2, "word": " suffered", "probability": 0.499755859375}, {"start": 1781.2, "end": 1781.5, "word": " from", "probability": 0.904296875}, {"start": 1781.5, "end": 1781.62, "word": " a", "probability": 0.92822265625}, {"start": 1781.62, "end": 1781.9, "word": " wide", "probability": 0.798828125}, {"start": 1781.9, "end": 1782.3, "word": " spectrum", "probability": 0.8193359375}], "temperature": 1.0}, {"id": 66, "seek": 181536, "start": 1786.66, "end": 1815.36, "text": "بأشكالها المختلفة، under the skin, petasche, chemosis، بيعاني من recurrent epistaxis، بيعاني من gastrointestinal hemorrhage، قد حالة، منوراجيا، and immediate bleeding even after small or minor trauma، فش، منزف بيبطل يعش؟ دقيقة، لحظة، intracranial hemorrhage", "tokens": [3555, 10721, 8592, 4117, 6027, 11296, 9673, 46456, 46538, 3660, 12399, 833, 264, 3178, 11, 3817, 296, 1876, 11, 417, 4485, 271, 12399, 4724, 40228, 7649, 1829, 9154, 18680, 1753, 2388, 468, 24633, 12399, 4724, 40228, 7649, 1829, 9154, 17898, 340, 686, 377, 2071, 8636, 24362, 71, 609, 12399, 12174, 3215, 11331, 6027, 3660, 12399, 9154, 13063, 26108, 25528, 12399, 293, 11629, 19312, 754, 934, 1359, 420, 6696, 11407, 12399, 6156, 8592, 12399, 9154, 11622, 5172, 4724, 1829, 3555, 9566, 1211, 37495, 8592, 22807, 11778, 38436, 28671, 12399, 5296, 5016, 19913, 3660, 12399, 560, 12080, 4257, 831, 8636, 24362, 71, 609], "avg_logprob": -0.37867648315195945, "compression_ratio": 1.446351931330472, "no_speech_prob": 0.0, "words": [{"start": 1786.66, "end": 1787.54, "word": "بأشكالها", "probability": 0.8939615885416666}, {"start": 1787.54, "end": 1788.34, "word": " المختلفة،", "probability": 0.8082275390625}, {"start": 1788.34, "end": 1788.52, "word": " under", "probability": 0.74951171875}, {"start": 1788.52, "end": 1788.76, "word": " the", "probability": 0.8779296875}, {"start": 1788.76, "end": 1789.0, "word": " skin,", "probability": 0.95166015625}, {"start": 1789.2, "end": 1789.6, "word": " petasche,", "probability": 0.4167887369791667}, {"start": 1789.76, "end": 1791.04, "word": " chemosis،", "probability": 0.517486572265625}, {"start": 1791.04, "end": 1791.5, "word": " بيعاني", "probability": 0.7802734375}, {"start": 1791.5, "end": 1791.64, "word": " من", "probability": 0.98828125}, {"start": 1791.64, "end": 1792.38, "word": " recurrent", "probability": 0.6485595703125}, {"start": 1792.38, "end": 1793.98, "word": " epistaxis،", "probability": 0.651153564453125}, {"start": 1793.98, "end": 1794.4, "word": " بيعاني", "probability": 0.9769287109375}, {"start": 1794.4, "end": 1794.54, "word": " من", "probability": 0.99462890625}, {"start": 1794.54, "end": 1795.58, "word": " gastrointestinal", "probability": 0.92763671875}, {"start": 1795.58, "end": 1796.62, "word": " hemorrhage،", "probability": 0.82431640625}, {"start": 1796.62, "end": 1796.76, "word": " قد", "probability": 0.7049560546875}, {"start": 1796.76, "end": 1797.94, "word": " حالة،", "probability": 0.59576416015625}, {"start": 1797.94, "end": 1800.82, "word": " منوراجيا،", "probability": 0.612646484375}, {"start": 1800.82, "end": 1801.44, "word": " and", "probability": 0.7177734375}, {"start": 1801.44, "end": 1802.48, "word": " immediate", "probability": 0.86279296875}, {"start": 1802.48, "end": 1803.64, "word": " bleeding", "probability": 0.92626953125}, {"start": 1803.64, "end": 1804.02, "word": " even", "probability": 0.763671875}, {"start": 1804.02, "end": 1804.44, "word": " after", "probability": 0.86279296875}, {"start": 1804.44, "end": 1804.92, "word": " small", "probability": 0.724609375}, {"start": 1804.92, "end": 1805.2, "word": " or", "probability": 0.9521484375}, {"start": 1805.2, "end": 1805.58, "word": " minor", "probability": 0.98779296875}, {"start": 1805.58, "end": 1806.48, "word": " trauma،", "probability": 0.6988525390625}, {"start": 1806.48, "end": 1807.54, "word": " فش،", "probability": 0.3661702473958333}, {"start": 1807.54, "end": 1808.04, "word": " منزف", "probability": 0.6407063802083334}, {"start": 1808.04, "end": 1808.46, "word": " بيبطل", "probability": 0.77734375}, {"start": 1808.46, "end": 1809.46, "word": " يعش؟", "probability": 0.5535481770833334}, {"start": 1809.46, "end": 1811.54, "word": " دقيقة،", "probability": 0.74810791015625}, {"start": 1811.54, "end": 1813.02, "word": " لحظة،", "probability": 0.75322265625}, {"start": 1813.02, "end": 1814.7, "word": " intracranial", "probability": 0.8922119140625}, {"start": 1814.7, "end": 1815.36, "word": " hemorrhage", "probability": 0.8887939453125}], "temperature": 1.0}, {"id": 67, "seek": 183959, "start": 1816.27, "end": 1839.59, "text": "ICH اختصار ماهيش؟ Intracranial Hemorrhage joined and muscle bleeding are uncommon ودول شباب احفظوهم من هالجيه هدول عبارة عن deep tissue و ال deep tissue ال bleeding لما يصل ال deep tissue بكون ناتج عن", "tokens": [2532, 39, 1975, 46456, 9381, 9640, 19446, 3224, 1829, 8592, 22807, 5681, 12080, 4257, 831, 18568, 24362, 71, 609, 6869, 293, 8679, 19312, 366, 29289, 4032, 3215, 12610, 13412, 3555, 16758, 1975, 5016, 5172, 19913, 2407, 16095, 9154, 8032, 6027, 7435, 1829, 3224, 8032, 3215, 12610, 6225, 3555, 9640, 3660, 18871, 2452, 12404, 4032, 2423, 2452, 12404, 2423, 19312, 5296, 15042, 7251, 36520, 2423, 2452, 12404, 4724, 30544, 8717, 9307, 7435, 18871], "avg_logprob": -0.31720891390761285, "compression_ratio": 1.4083769633507854, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1816.27, "end": 1817.01, "word": "ICH", "probability": 0.653564453125}, {"start": 1817.01, "end": 1817.55, "word": " اختصار", "probability": 0.8695068359375}, {"start": 1817.55, "end": 1818.67, "word": " ماهيش؟", "probability": 0.475}, {"start": 1818.67, "end": 1819.73, "word": " Intracranial", "probability": 0.8572998046875}, {"start": 1819.73, "end": 1820.59, "word": " Hemorrhage", "probability": 0.750732421875}, {"start": 1820.59, "end": 1821.61, "word": " joined", "probability": 0.049224853515625}, {"start": 1821.61, "end": 1823.03, "word": " and", "probability": 0.162841796875}, {"start": 1823.03, "end": 1823.57, "word": " muscle", "probability": 0.9208984375}, {"start": 1823.57, "end": 1824.81, "word": " bleeding", "probability": 0.5732421875}, {"start": 1824.81, "end": 1825.61, "word": " are", "probability": 0.75634765625}, {"start": 1825.61, "end": 1826.17, "word": " uncommon", "probability": 0.93359375}, {"start": 1826.17, "end": 1827.53, "word": " ودول", "probability": 0.53955078125}, {"start": 1827.53, "end": 1828.09, "word": " شباب", "probability": 0.79345703125}, {"start": 1828.09, "end": 1828.75, "word": " احفظوهم", "probability": 0.8557942708333334}, {"start": 1828.75, "end": 1828.91, "word": " من", "probability": 0.9716796875}, {"start": 1828.91, "end": 1829.47, "word": " هالجيه", "probability": 0.730712890625}, {"start": 1829.47, "end": 1830.37, "word": " هدول", "probability": 0.8839518229166666}, {"start": 1830.37, "end": 1830.75, "word": " عبارة", "probability": 0.95654296875}, {"start": 1830.75, "end": 1830.95, "word": " عن", "probability": 0.98974609375}, {"start": 1830.95, "end": 1831.25, "word": " deep", "probability": 0.830078125}, {"start": 1831.25, "end": 1831.95, "word": " tissue", "probability": 0.93408203125}, {"start": 1831.95, "end": 1833.81, "word": " و", "probability": 0.9052734375}, {"start": 1833.81, "end": 1833.95, "word": " ال", "probability": 0.79296875}, {"start": 1833.95, "end": 1834.17, "word": " deep", "probability": 0.98095703125}, {"start": 1834.17, "end": 1834.73, "word": " tissue", "probability": 0.96484375}, {"start": 1834.73, "end": 1836.27, "word": " ال", "probability": 0.6435546875}, {"start": 1836.27, "end": 1836.97, "word": " bleeding", "probability": 0.9775390625}, {"start": 1836.97, "end": 1837.39, "word": " لما", "probability": 0.885009765625}, {"start": 1837.39, "end": 1837.73, "word": " يصل", "probability": 0.959716796875}, {"start": 1837.73, "end": 1837.93, "word": " ال", "probability": 0.892578125}, {"start": 1837.93, "end": 1838.15, "word": " deep", "probability": 0.9951171875}, {"start": 1838.15, "end": 1838.49, "word": " tissue", "probability": 0.9775390625}, {"start": 1838.49, "end": 1838.91, "word": " بكون", "probability": 0.6431884765625}, {"start": 1838.91, "end": 1839.25, "word": " ناتج", "probability": 0.9754231770833334}, {"start": 1839.25, "end": 1839.59, "word": " عن", "probability": 0.9658203125}], "temperature": 1.0}, {"id": 68, "seek": 187177, "start": 1843.15, "end": 1871.77, "text": "يعني عند .. اللي بيقوم في هذا الموضوع ال secondary hemostasis و ليش هذه primary؟ و أنا قاعد بحكي على platelet ال platelet يعني ايه؟ 100% عشان هيك مانشوفش هذه المظاهر المرضية هذه المظاهر المرضية مابتشوفها ليش؟ لأنها deep ماشي مش superficial و هذه ال deep اللي بيصليلها أو اللي بيكون بعمل", "tokens": [40228, 22653, 43242, 4386, 13672, 1829, 4724, 1829, 4587, 20498, 8978, 23758, 9673, 2407, 11242, 45367, 2423, 11396, 415, 1761, 26632, 4032, 32239, 8592, 29538, 6194, 22807, 4032, 41850, 12174, 995, 22488, 4724, 5016, 4117, 1829, 15844, 3403, 15966, 2423, 3403, 15966, 37495, 22653, 1975, 1829, 3224, 22807, 2319, 4, 6225, 8592, 7649, 39896, 4117, 3714, 7649, 8592, 38688, 8592, 29538, 9673, 19913, 40294, 2288, 9673, 43042, 10632, 29538, 9673, 19913, 40294, 2288, 9673, 43042, 10632, 3714, 16758, 2655, 8592, 38688, 11296, 32239, 8592, 22807, 5296, 33456, 11296, 2452, 3714, 33599, 1829, 37893, 34622, 4032, 29538, 2423, 2452, 13672, 1829, 4724, 1829, 9381, 20292, 1211, 11296, 34051, 13672, 1829, 4724, 1829, 30544, 4724, 25957, 1211], "avg_logprob": -0.23949353795113235, "compression_ratio": 1.780392156862745, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 1843.15, "end": 1843.51, "word": "يعني", "probability": 0.90673828125}, {"start": 1843.51, "end": 1843.83, "word": " عند", "probability": 0.609375}, {"start": 1843.83, "end": 1844.09, "word": " ..", "probability": 0.31884765625}, {"start": 1844.09, "end": 1844.27, "word": " اللي", "probability": 0.81591796875}, {"start": 1844.27, "end": 1844.81, "word": " بيقوم", "probability": 0.981201171875}, {"start": 1844.81, "end": 1845.09, "word": " في", "probability": 0.8837890625}, {"start": 1845.09, "end": 1845.53, "word": " هذا", "probability": 0.91259765625}, {"start": 1845.53, "end": 1846.07, "word": " الموضوع", "probability": 0.9976806640625}, {"start": 1846.07, "end": 1846.25, "word": " ال", "probability": 0.58984375}, {"start": 1846.25, "end": 1846.67, "word": " secondary", "probability": 0.89990234375}, {"start": 1846.67, "end": 1847.61, "word": " hemostasis", "probability": 0.6869303385416666}, {"start": 1847.61, "end": 1847.75, "word": " و", "probability": 0.78857421875}, {"start": 1847.75, "end": 1847.95, "word": " ليش", "probability": 0.7490234375}, {"start": 1847.95, "end": 1848.07, "word": " هذه", "probability": 0.310546875}, {"start": 1848.07, "end": 1849.15, "word": " primary؟", "probability": 0.6429443359375}, {"start": 1849.15, "end": 1849.69, "word": " و", "probability": 0.72802734375}, {"start": 1849.69, "end": 1849.81, "word": " أنا", "probability": 0.791015625}, {"start": 1849.81, "end": 1850.03, "word": " قاعد", "probability": 0.8004557291666666}, {"start": 1850.03, "end": 1850.35, "word": " بحكي", "probability": 0.966796875}, {"start": 1850.35, "end": 1850.51, "word": " على", "probability": 0.7734375}, {"start": 1850.51, "end": 1850.99, "word": " platelet", "probability": 0.5433349609375}, {"start": 1850.99, "end": 1851.11, "word": " ال", "probability": 0.2186279296875}, {"start": 1851.11, "end": 1851.53, "word": " platelet", "probability": 0.839111328125}, {"start": 1851.53, "end": 1851.77, "word": " يعني", "probability": 0.951416015625}, {"start": 1851.77, "end": 1852.17, "word": " ايه؟", "probability": 0.8583984375}, {"start": 1852.17, "end": 1852.43, "word": " 100", "probability": 0.161376953125}, {"start": 1852.43, "end": 1853.35, "word": "%", "probability": 0.88671875}, {"start": 1853.35, "end": 1853.59, "word": " عشان", "probability": 0.8671875}, {"start": 1853.59, "end": 1853.73, "word": " هيك", "probability": 0.556396484375}, {"start": 1853.73, "end": 1854.11, "word": " مانشوفش", "probability": 0.719384765625}, {"start": 1854.11, "end": 1854.31, "word": " هذه", "probability": 0.84326171875}, {"start": 1854.31, "end": 1854.83, "word": " المظاهر", "probability": 0.9908447265625}, {"start": 1854.83, "end": 1855.35, "word": " المرضية", "probability": 0.9768880208333334}, {"start": 1855.35, "end": 1856.59, "word": " هذه", "probability": 0.865234375}, {"start": 1856.59, "end": 1857.09, "word": " المظاهر", "probability": 0.992919921875}, {"start": 1857.09, "end": 1857.57, "word": " المرضية", "probability": 0.9943033854166666}, {"start": 1857.57, "end": 1858.33, "word": " مابتشوفها", "probability": 0.8897705078125}, {"start": 1858.33, "end": 1859.45, "word": " ليش؟", "probability": 0.71630859375}, {"start": 1859.45, "end": 1859.79, "word": " لأنها", "probability": 0.7945963541666666}, {"start": 1859.79, "end": 1860.19, "word": " deep", "probability": 0.65478515625}, {"start": 1860.19, "end": 1862.25, "word": " ماشي", "probability": 0.8551432291666666}, {"start": 1862.25, "end": 1863.35, "word": " مش", "probability": 0.8955078125}, {"start": 1863.35, "end": 1866.19, "word": " superficial", "probability": 0.83251953125}, {"start": 1866.19, "end": 1868.35, "word": " و", "probability": 0.69921875}, {"start": 1868.35, "end": 1868.75, "word": " هذه", "probability": 0.845703125}, {"start": 1868.75, "end": 1868.95, "word": " ال", "probability": 0.97509765625}, {"start": 1868.95, "end": 1869.37, "word": " deep", "probability": 0.97607421875}, {"start": 1869.37, "end": 1869.83, "word": " اللي", "probability": 0.962158203125}, {"start": 1869.83, "end": 1870.67, "word": " بيصليلها", "probability": 0.8284912109375}, {"start": 1870.67, "end": 1870.85, "word": " أو", "probability": 0.61376953125}, {"start": 1870.85, "end": 1870.97, "word": " اللي", "probability": 0.978271484375}, {"start": 1870.97, "end": 1871.29, "word": " بيكون", "probability": 0.8470052083333334}, {"start": 1871.29, "end": 1871.77, "word": " بعمل", "probability": 0.8668619791666666}], "temperature": 1.0}, {"id": 69, "seek": 190045, "start": 1872.23, "end": 1900.45, "text": "الـ Hemostasis فيها هو الـ secondary يعني الـ coagulation factor وبالتالي أي خلال فيها بتشوف في ال coagulation factor بتشوف خلال فيها طيب قسموا وصنفوا ال Lanzmann disease إلى تلت أنواع و جالوا type 1 و type 2 و type 3 و التصنيفة كانت according to the title", "tokens": [6027, 39184, 18568, 555, 26632, 8978, 11296, 31439, 2423, 39184, 11396, 37495, 22653, 2423, 39184, 598, 559, 2776, 5952, 46599, 6027, 2655, 6027, 1829, 36632, 16490, 1211, 6027, 8978, 11296, 39894, 8592, 38688, 8978, 2423, 598, 559, 2776, 5952, 39894, 8592, 38688, 16490, 1211, 6027, 8978, 11296, 23032, 1829, 3555, 12174, 38251, 14407, 4032, 9381, 1863, 5172, 14407, 2423, 441, 3910, 14912, 4752, 30731, 6055, 1211, 2655, 14739, 14407, 3615, 4032, 10874, 6027, 14407, 2010, 502, 4032, 2010, 568, 4032, 2010, 805, 4032, 16712, 9381, 22653, 5172, 3660, 25961, 2655, 4650, 281, 264, 4876], "avg_logprob": -0.258881572045778, "compression_ratio": 1.5991189427312775, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1872.23, "end": 1872.75, "word": "الـ", "probability": 0.660400390625}, {"start": 1872.75, "end": 1873.31, "word": " Hemostasis", "probability": 0.7420247395833334}, {"start": 1873.31, "end": 1873.61, "word": " فيها", "probability": 0.615966796875}, {"start": 1873.61, "end": 1873.71, "word": " هو", "probability": 0.79541015625}, {"start": 1873.71, "end": 1873.87, "word": " الـ", "probability": 0.833251953125}, {"start": 1873.87, "end": 1874.21, "word": " secondary", "probability": 0.56201171875}, {"start": 1874.21, "end": 1874.47, "word": " يعني", "probability": 0.722412109375}, {"start": 1874.47, "end": 1874.59, "word": " الـ", "probability": 0.688720703125}, {"start": 1874.59, "end": 1875.03, "word": " coagulation", "probability": 0.7705891927083334}, {"start": 1875.03, "end": 1875.51, "word": " factor", "probability": 0.89501953125}, {"start": 1875.51, "end": 1876.27, "word": " وبالتالي", "probability": 0.81962890625}, {"start": 1876.27, "end": 1876.47, "word": " أي", "probability": 0.693359375}, {"start": 1876.47, "end": 1876.77, "word": " خلال", "probability": 0.90966796875}, {"start": 1876.77, "end": 1877.15, "word": " فيها", "probability": 0.979248046875}, {"start": 1877.15, "end": 1878.29, "word": " بتشوف", "probability": 0.7347819010416666}, {"start": 1878.29, "end": 1879.81, "word": " في", "probability": 0.69287109375}, {"start": 1879.81, "end": 1879.89, "word": " ال", "probability": 0.9794921875}, {"start": 1879.89, "end": 1880.33, "word": " coagulation", "probability": 0.8479817708333334}, {"start": 1880.33, "end": 1880.69, "word": " factor", "probability": 0.92138671875}, {"start": 1880.69, "end": 1881.05, "word": " بتشوف", "probability": 0.8795572916666666}, {"start": 1881.05, "end": 1881.37, "word": " خلال", "probability": 0.97705078125}, {"start": 1881.37, "end": 1882.97, "word": " فيها", "probability": 0.97314453125}, {"start": 1882.97, "end": 1886.09, "word": " طيب", "probability": 0.7474772135416666}, {"start": 1886.09, "end": 1886.75, "word": " قسموا", "probability": 0.9013671875}, {"start": 1886.75, "end": 1888.33, "word": " وصنفوا", "probability": 0.85361328125}, {"start": 1888.33, "end": 1888.89, "word": " ال", "probability": 0.90185546875}, {"start": 1888.89, "end": 1891.91, "word": " Lanzmann", "probability": 0.3950602213541667}, {"start": 1891.91, "end": 1892.75, "word": " disease", "probability": 0.55712890625}, {"start": 1892.75, "end": 1893.01, "word": " إلى", "probability": 0.71728515625}, {"start": 1893.01, "end": 1893.37, "word": " تلت", "probability": 0.8782552083333334}, {"start": 1893.37, "end": 1893.83, "word": " أنواع", "probability": 0.9484049479166666}, {"start": 1893.83, "end": 1893.99, "word": " و", "probability": 0.54736328125}, {"start": 1893.99, "end": 1894.39, "word": " جالوا", "probability": 0.70361328125}, {"start": 1894.39, "end": 1894.77, "word": " type", "probability": 0.71044921875}, {"start": 1894.77, "end": 1895.09, "word": " 1", "probability": 0.39501953125}, {"start": 1895.09, "end": 1895.25, "word": " و", "probability": 0.970703125}, {"start": 1895.25, "end": 1895.41, "word": " type", "probability": 0.59130859375}, {"start": 1895.41, "end": 1895.67, "word": " 2", "probability": 0.92138671875}, {"start": 1895.67, "end": 1895.81, "word": " و", "probability": 0.99462890625}, {"start": 1895.81, "end": 1895.95, "word": " type", "probability": 0.74560546875}, {"start": 1895.95, "end": 1896.33, "word": " 3", "probability": 0.94677734375}, {"start": 1896.33, "end": 1897.17, "word": " و", "probability": 0.6083984375}, {"start": 1897.17, "end": 1898.03, "word": " التصنيفة", "probability": 0.947265625}, {"start": 1898.03, "end": 1898.31, "word": " كانت", "probability": 0.983154296875}, {"start": 1898.31, "end": 1898.67, "word": " according", "probability": 0.91552734375}, {"start": 1898.67, "end": 1898.97, "word": " to", "probability": 0.9814453125}, {"start": 1898.97, "end": 1899.29, "word": " the", "probability": 0.8916015625}, {"start": 1899.29, "end": 1900.45, "word": " title", "probability": 0.37353515625}], "temperature": 1.0}, {"id": 70, "seek": 192999, "start": 1901.39, "end": 1929.99, "text": "of glycoprotein 2b3a receptors على سطح البليتلت ماشي بشكل عادي وجود ال receptor نمره اتنين او receptor نمره اتنين اذا كان في غياب لل alpha granules فيبرينوجين ولا لا تشوف قسموهم ل تلت اقسام قالوا type 1 هو ال severe هو بيكون", "tokens": [2670, 22633, 13084, 81, 1370, 259, 568, 65, 18, 64, 34102, 15844, 8608, 9566, 5016, 29739, 20292, 2655, 1211, 2655, 3714, 33599, 1829, 4724, 8592, 28820, 6225, 995, 16254, 49610, 23328, 2423, 32264, 8717, 29973, 3224, 1975, 2655, 1863, 9957, 1975, 2407, 32264, 8717, 29973, 3224, 1975, 2655, 1863, 9957, 1975, 15730, 25961, 8978, 32771, 1829, 16758, 24976, 8961, 9370, 3473, 8978, 26890, 9957, 29245, 9957, 49429, 20193, 6055, 8592, 38688, 12174, 38251, 2407, 16095, 5296, 6055, 1211, 2655, 1975, 4587, 3794, 10943, 50239, 14407, 2010, 502, 31439, 2423, 8922, 31439, 4724, 1829, 30544], "avg_logprob": -0.34572368170085704, "compression_ratio": 1.5454545454545454, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 1901.39, "end": 1901.77, "word": "of", "probability": 0.1295166015625}, {"start": 1901.77, "end": 1902.77, "word": " glycoprotein", "probability": 0.84970703125}, {"start": 1902.77, "end": 1903.71, "word": " 2b3a", "probability": 0.78057861328125}, {"start": 1903.71, "end": 1904.77, "word": " receptors", "probability": 0.1114501953125}, {"start": 1904.77, "end": 1905.27, "word": " على", "probability": 0.73388671875}, {"start": 1905.27, "end": 1906.43, "word": " سطح", "probability": 0.9811197916666666}, {"start": 1906.43, "end": 1907.23, "word": " البليتلت", "probability": 0.508251953125}, {"start": 1907.23, "end": 1907.87, "word": " ماشي", "probability": 0.76220703125}, {"start": 1907.87, "end": 1908.81, "word": " بشكل", "probability": 0.8772786458333334}, {"start": 1908.81, "end": 1909.23, "word": " عادي", "probability": 0.77392578125}, {"start": 1909.23, "end": 1911.39, "word": " وجود", "probability": 0.947998046875}, {"start": 1911.39, "end": 1912.11, "word": " ال", "probability": 0.8427734375}, {"start": 1912.11, "end": 1912.57, "word": " receptor", "probability": 0.6142578125}, {"start": 1912.57, "end": 1913.53, "word": " نمره", "probability": 0.4678548177083333}, {"start": 1913.53, "end": 1913.73, "word": " اتنين", "probability": 0.83441162109375}, {"start": 1913.73, "end": 1913.85, "word": " او", "probability": 0.7142333984375}, {"start": 1913.85, "end": 1914.23, "word": " receptor", "probability": 0.2496337890625}, {"start": 1914.23, "end": 1915.57, "word": " نمره", "probability": 0.9353841145833334}, {"start": 1915.57, "end": 1915.93, "word": " اتنين", "probability": 0.981201171875}, {"start": 1915.93, "end": 1916.63, "word": " اذا", "probability": 0.87548828125}, {"start": 1916.63, "end": 1916.91, "word": " كان", "probability": 0.9931640625}, {"start": 1916.91, "end": 1917.21, "word": " في", "probability": 0.90380859375}, {"start": 1917.21, "end": 1918.25, "word": " غياب", "probability": 0.908203125}, {"start": 1918.25, "end": 1918.49, "word": " لل", "probability": 0.8447265625}, {"start": 1918.49, "end": 1918.97, "word": " alpha", "probability": 0.66162109375}, {"start": 1918.97, "end": 1919.85, "word": " granules", "probability": 0.690185546875}, {"start": 1919.85, "end": 1920.67, "word": " فيبرينوجين", "probability": 0.6423095703125}, {"start": 1920.67, "end": 1921.31, "word": " ولا", "probability": 0.833984375}, {"start": 1921.31, "end": 1921.61, "word": " لا", "probability": 0.69091796875}, {"start": 1921.61, "end": 1922.87, "word": " تشوف", "probability": 0.8050130208333334}, {"start": 1922.87, "end": 1923.97, "word": " قسموهم", "probability": 0.838623046875}, {"start": 1923.97, "end": 1924.09, "word": " ل", "probability": 0.96484375}, {"start": 1924.09, "end": 1924.35, "word": " تلت", "probability": 0.7057291666666666}, {"start": 1924.35, "end": 1924.93, "word": " اقسام", "probability": 0.74462890625}, {"start": 1924.93, "end": 1925.85, "word": " قالوا", "probability": 0.8017578125}, {"start": 1925.85, "end": 1926.07, "word": " type", "probability": 0.58154296875}, {"start": 1926.07, "end": 1926.49, "word": " 1", "probability": 0.4619140625}, {"start": 1926.49, "end": 1927.65, "word": " هو", "probability": 0.9677734375}, {"start": 1927.65, "end": 1928.13, "word": " ال", "probability": 0.70556640625}, {"start": 1928.13, "end": 1928.61, "word": " severe", "probability": 0.759765625}, {"start": 1928.61, "end": 1929.61, "word": " هو", "probability": 0.759765625}, {"start": 1929.61, "end": 1929.99, "word": " بيكون", "probability": 0.786376953125}], "temperature": 1.0}, {"id": 71, "seek": 195907, "start": 1930.63, "end": 1959.07, "text": "الـ expression of receptors أقل من 5% ويكون في غياب للفيبرينوجين في الـ alpha granules of beta كامة ماشي يبقى ال title أقل من 5% ومافيش فيبرينوجين نمرا اتنين ال title أقل من 20% لكن الفيبرينوجين موجود ماشي", "tokens": [6027, 39184, 6114, 295, 34102, 5551, 4587, 1211, 9154, 1025, 4, 4032, 1829, 30544, 8978, 32771, 1829, 16758, 24976, 5172, 1829, 26890, 9957, 29245, 9957, 8978, 2423, 39184, 8961, 9370, 3473, 295, 9861, 9122, 10943, 3660, 3714, 33599, 1829, 7251, 3555, 4587, 7578, 2423, 4876, 5551, 4587, 1211, 9154, 1025, 4, 4032, 15042, 41185, 8592, 8978, 26890, 9957, 29245, 9957, 8717, 2304, 23557, 1975, 2655, 1863, 9957, 2423, 4876, 5551, 4587, 1211, 9154, 945, 4, 44381, 27188, 1829, 26890, 9957, 29245, 9957, 3714, 29245, 23328, 3714, 33599, 1829], "avg_logprob": -0.2507022364755695, "compression_ratio": 1.7173913043478262, "no_speech_prob": 4.172325134277344e-07, "words": [{"start": 1930.63, "end": 1930.99, "word": "الـ", "probability": 0.441650390625}, {"start": 1930.99, "end": 1931.55, "word": " expression", "probability": 0.456298828125}, {"start": 1931.55, "end": 1931.87, "word": " of", "probability": 0.96240234375}, {"start": 1931.87, "end": 1932.49, "word": " receptors", "probability": 0.81005859375}, {"start": 1932.49, "end": 1932.91, "word": " أقل", "probability": 0.9026692708333334}, {"start": 1932.91, "end": 1933.05, "word": " من", "probability": 0.99365234375}, {"start": 1933.05, "end": 1933.33, "word": " 5", "probability": 0.87939453125}, {"start": 1933.33, "end": 1933.87, "word": "%", "probability": 0.91259765625}, {"start": 1933.87, "end": 1935.81, "word": " ويكون", "probability": 0.6318359375}, {"start": 1935.81, "end": 1936.01, "word": " في", "probability": 0.7509765625}, {"start": 1936.01, "end": 1936.79, "word": " غياب", "probability": 0.90380859375}, {"start": 1936.79, "end": 1938.51, "word": " للفيبرينوجين", "probability": 0.7087751116071429}, {"start": 1938.51, "end": 1938.87, "word": " في", "probability": 0.91064453125}, {"start": 1938.87, "end": 1939.11, "word": " الـ", "probability": 0.612548828125}, {"start": 1939.11, "end": 1939.33, "word": " alpha", "probability": 0.321533203125}, {"start": 1939.33, "end": 1940.03, "word": " granules", "probability": 0.891845703125}, {"start": 1940.03, "end": 1940.21, "word": " of", "probability": 0.90869140625}, {"start": 1940.21, "end": 1940.61, "word": " beta", "probability": 0.2958984375}, {"start": 1940.61, "end": 1942.17, "word": " كامة", "probability": 0.74267578125}, {"start": 1942.17, "end": 1943.57, "word": " ماشي", "probability": 0.8287760416666666}, {"start": 1943.57, "end": 1944.59, "word": " يبقى", "probability": 0.74420166015625}, {"start": 1944.59, "end": 1944.71, "word": " ال", "probability": 0.955078125}, {"start": 1944.71, "end": 1944.95, "word": " title", "probability": 0.68310546875}, {"start": 1944.95, "end": 1945.33, "word": " أقل", "probability": 0.9627278645833334}, {"start": 1945.33, "end": 1945.47, "word": " من", "probability": 0.9951171875}, {"start": 1945.47, "end": 1945.73, "word": " 5", "probability": 0.9833984375}, {"start": 1945.73, "end": 1946.33, "word": "%", "probability": 0.982421875}, {"start": 1946.33, "end": 1947.49, "word": " ومافيش", "probability": 0.9005126953125}, {"start": 1947.49, "end": 1948.93, "word": " فيبرينوجين", "probability": 0.816015625}, {"start": 1948.93, "end": 1949.63, "word": " نمرا", "probability": 0.63427734375}, {"start": 1949.63, "end": 1950.03, "word": " اتنين", "probability": 0.79248046875}, {"start": 1950.03, "end": 1951.43, "word": " ال", "probability": 0.94384765625}, {"start": 1951.43, "end": 1951.87, "word": " title", "probability": 0.96533203125}, {"start": 1951.87, "end": 1952.51, "word": " أقل", "probability": 0.9606119791666666}, {"start": 1952.51, "end": 1952.69, "word": " من", "probability": 0.99560546875}, {"start": 1952.69, "end": 1953.15, "word": " 20", "probability": 0.91552734375}, {"start": 1953.15, "end": 1953.69, "word": "%", "probability": 0.98779296875}, {"start": 1953.69, "end": 1955.39, "word": " لكن", "probability": 0.93212890625}, {"start": 1955.39, "end": 1956.43, "word": " الفيبرينوجين", "probability": 0.880859375}, {"start": 1956.43, "end": 1956.97, "word": " موجود", "probability": 0.96875}, {"start": 1956.97, "end": 1959.07, "word": " ماشي", "probability": 0.9130859375}], "temperature": 1.0}, {"id": 72, "seek": 198205, "start": 1959.73, "end": 1982.05, "text": "نمرة تلاتة التايتر أكتر من خمسين في المية ماشي و لكن في qualitative disorders بمعنى حتى لو كان موجود بالشراب يعني عالية qualitative يعني dysfunction طبعا زي ما تشايفي طيب واحد", "tokens": [1863, 2304, 25720, 6055, 1211, 9307, 3660, 16712, 995, 36081, 2288, 5551, 4117, 2655, 2288, 9154, 16490, 2304, 3794, 9957, 8978, 9673, 10632, 3714, 33599, 1829, 4032, 44381, 8978, 31312, 20261, 4724, 2304, 3615, 1863, 7578, 11331, 49975, 45164, 25961, 3714, 29245, 23328, 20666, 46309, 16758, 37495, 22653, 6225, 6027, 10632, 31312, 37495, 22653, 32002, 23032, 3555, 3615, 995, 30767, 1829, 19446, 6055, 8592, 995, 33911, 1829, 23032, 1829, 3555, 36764, 24401], "avg_logprob": -0.3424657403606258, "compression_ratio": 1.4787234042553192, "no_speech_prob": 6.556510925292969e-07, "words": [{"start": 1959.73, "end": 1960.13, "word": "نمرة", "probability": 0.573974609375}, {"start": 1960.13, "end": 1960.77, "word": " تلاتة", "probability": 0.8759765625}, {"start": 1960.77, "end": 1962.95, "word": " التايتر", "probability": 0.6282958984375}, {"start": 1962.95, "end": 1963.89, "word": " أكتر", "probability": 0.865478515625}, {"start": 1963.89, "end": 1964.07, "word": " من", "probability": 0.9873046875}, {"start": 1964.07, "end": 1964.53, "word": " خمسين", "probability": 0.84698486328125}, {"start": 1964.53, "end": 1964.69, "word": " في", "probability": 0.4658203125}, {"start": 1964.69, "end": 1965.11, "word": " المية", "probability": 0.857421875}, {"start": 1965.11, "end": 1966.07, "word": " ماشي", "probability": 0.6637369791666666}, {"start": 1966.07, "end": 1966.85, "word": " و", "probability": 0.3544921875}, {"start": 1966.85, "end": 1967.69, "word": " لكن", "probability": 0.59228515625}, {"start": 1967.69, "end": 1967.99, "word": " في", "probability": 0.91015625}, {"start": 1967.99, "end": 1968.73, "word": " qualitative", "probability": 0.9189453125}, {"start": 1968.73, "end": 1969.77, "word": " disorders", "probability": 0.90771484375}, {"start": 1969.77, "end": 1970.45, "word": " بمعنى", "probability": 0.9083984375}, {"start": 1970.45, "end": 1970.85, "word": " حتى", "probability": 0.876953125}, {"start": 1970.85, "end": 1971.03, "word": " لو", "probability": 0.99462890625}, {"start": 1971.03, "end": 1971.23, "word": " كان", "probability": 0.9951171875}, {"start": 1971.23, "end": 1971.77, "word": " موجود", "probability": 0.978515625}, {"start": 1971.77, "end": 1973.33, "word": " بالشراب", "probability": 0.481201171875}, {"start": 1973.33, "end": 1974.83, "word": " يعني", "probability": 0.6685791015625}, {"start": 1974.83, "end": 1975.27, "word": " عالية", "probability": 0.4952799479166667}, {"start": 1975.27, "end": 1977.09, "word": " qualitative", "probability": 0.7607421875}, {"start": 1977.09, "end": 1977.65, "word": " يعني", "probability": 0.96728515625}, {"start": 1977.65, "end": 1978.13, "word": " dysfunction", "probability": 0.265380859375}, {"start": 1978.13, "end": 1980.69, "word": " طبعا", "probability": 0.920654296875}, {"start": 1980.69, "end": 1980.85, "word": " زي", "probability": 0.879638671875}, {"start": 1980.85, "end": 1980.97, "word": " ما", "probability": 0.9306640625}, {"start": 1980.97, "end": 1981.45, "word": " تشايفي", "probability": 0.7396484375}, {"start": 1981.45, "end": 1981.89, "word": " طيب", "probability": 0.6964518229166666}, {"start": 1981.89, "end": 1982.05, "word": " واحد", "probability": 0.990234375}], "temperature": 1.0}, {"id": 73, "seek": 201008, "start": 1983.06, "end": 2010.08, "text": "أو تلاتة الـ severe هو واحد و mostly ال severe بيكون ناتج عن خلل في ال gene اللي مسؤول عن ال 2b مش عن ال 3e اللي مسؤول عن مين ال 2b مش ال 3e كيف نشخص بنفس الطرق الأولانية نمرواها نعمل platelet counter morphology imagine and these are normal", "tokens": [10721, 2407, 6055, 1211, 9307, 3660, 2423, 39184, 8922, 31439, 36764, 24401, 4032, 5240, 2423, 8922, 4724, 1829, 30544, 8717, 9307, 7435, 18871, 16490, 1211, 1211, 8978, 2423, 12186, 13672, 1829, 47524, 33604, 12610, 18871, 2423, 568, 65, 37893, 18871, 2423, 805, 68, 13672, 1829, 47524, 33604, 12610, 18871, 3714, 9957, 2423, 568, 65, 37893, 2423, 805, 68, 9122, 33911, 8717, 8592, 9778, 9381, 44945, 36178, 41950, 2288, 4587, 16247, 12610, 7649, 10632, 8717, 29973, 14407, 11296, 8717, 25957, 1211, 3403, 15966, 5682, 25778, 1793, 3811, 293, 613, 366, 2710], "avg_logprob": -0.2429601664726551, "compression_ratio": 1.5714285714285714, "no_speech_prob": 7.748603820800781e-07, "words": [{"start": 1983.06, "end": 1983.24, "word": "أو", "probability": 0.666259765625}, {"start": 1983.24, "end": 1984.04, "word": " تلاتة", "probability": 0.86328125}, {"start": 1984.04, "end": 1984.42, "word": " الـ", "probability": 0.4300537109375}, {"start": 1984.42, "end": 1984.62, "word": " severe", "probability": 0.414794921875}, {"start": 1984.62, "end": 1984.82, "word": " هو", "probability": 0.93994140625}, {"start": 1984.82, "end": 1985.18, "word": " واحد", "probability": 0.9814453125}, {"start": 1985.18, "end": 1985.92, "word": " و", "probability": 0.6904296875}, {"start": 1985.92, "end": 1986.34, "word": " mostly", "probability": 0.5703125}, {"start": 1986.34, "end": 1986.54, "word": " ال", "probability": 0.8837890625}, {"start": 1986.54, "end": 1986.86, "word": " severe", "probability": 0.52587890625}, {"start": 1986.86, "end": 1987.26, "word": " بيكون", "probability": 0.69873046875}, {"start": 1987.26, "end": 1987.64, "word": " ناتج", "probability": 0.97412109375}, {"start": 1987.64, "end": 1987.84, "word": " عن", "probability": 0.9306640625}, {"start": 1987.84, "end": 1988.44, "word": " خلل", "probability": 0.9853515625}, {"start": 1988.44, "end": 1989.12, "word": " في", "probability": 0.8447265625}, {"start": 1989.12, "end": 1989.36, "word": " ال", "probability": 0.56005859375}, {"start": 1989.36, "end": 1989.8, "word": " gene", "probability": 0.80029296875}, {"start": 1989.8, "end": 1990.14, "word": " اللي", "probability": 0.84814453125}, {"start": 1990.14, "end": 1990.56, "word": " مسؤول", "probability": 0.8513997395833334}, {"start": 1990.56, "end": 1990.72, "word": " عن", "probability": 0.98486328125}, {"start": 1990.72, "end": 1990.96, "word": " ال", "probability": 0.65185546875}, {"start": 1990.96, "end": 1991.2, "word": " 2b", "probability": 0.521484375}, {"start": 1991.2, "end": 1991.52, "word": " مش", "probability": 0.8359375}, {"start": 1991.52, "end": 1991.66, "word": " عن", "probability": 0.7392578125}, {"start": 1991.66, "end": 1991.78, "word": " ال", "probability": 0.955078125}, {"start": 1991.78, "end": 1992.28, "word": " 3e", "probability": 0.82666015625}, {"start": 1992.28, "end": 1993.24, "word": " اللي", "probability": 0.916259765625}, {"start": 1993.24, "end": 1993.58, "word": " مسؤول", "probability": 0.90673828125}, {"start": 1993.58, "end": 1993.76, "word": " عن", "probability": 0.98681640625}, {"start": 1993.76, "end": 1994.18, "word": " مين", "probability": 0.923828125}, {"start": 1994.18, "end": 1994.78, "word": " ال", "probability": 0.51611328125}, {"start": 1994.78, "end": 1995.26, "word": " 2b", "probability": 0.904541015625}, {"start": 1995.26, "end": 1995.96, "word": " مش", "probability": 0.87646484375}, {"start": 1995.96, "end": 1996.7, "word": " ال", "probability": 0.9423828125}, {"start": 1996.7, "end": 1997.36, "word": " 3e", "probability": 0.753173828125}, {"start": 1997.36, "end": 1998.08, "word": " كيف", "probability": 0.972900390625}, {"start": 1998.08, "end": 1998.86, "word": " نشخص", "probability": 0.949462890625}, {"start": 1998.86, "end": 2000.7, "word": " بنفس", "probability": 0.90673828125}, {"start": 2000.7, "end": 2001.06, "word": " الطرق", "probability": 0.962890625}, {"start": 2001.06, "end": 2001.8, "word": " الأولانية", "probability": 0.82757568359375}, {"start": 2001.8, "end": 2003.06, "word": " نمرواها", "probability": 0.702880859375}, {"start": 2003.06, "end": 2003.46, "word": " نعمل", "probability": 0.935546875}, {"start": 2003.46, "end": 2003.84, "word": " platelet", "probability": 0.56268310546875}, {"start": 2003.84, "end": 2004.22, "word": " counter", "probability": 0.433349609375}, {"start": 2004.22, "end": 2005.04, "word": " morphology", "probability": 0.935302734375}, {"start": 2005.04, "end": 2006.92, "word": " imagine", "probability": 0.484619140625}, {"start": 2006.92, "end": 2008.06, "word": " and", "probability": 0.798828125}, {"start": 2008.06, "end": 2008.8, "word": " these", "probability": 0.83056640625}, {"start": 2008.8, "end": 2009.46, "word": " are", "probability": 0.95654296875}, {"start": 2009.46, "end": 2010.08, "word": " normal", "probability": 0.8779296875}], "temperature": 1.0}, {"id": 74, "seek": 203976, "start": 2011.16, "end": 2039.76, "text": "في نجلةها طبيعية نمرة اتنين bleeding time and it's usually prolonged نمرة تلاتة aggregation study ماشي ووجدوا ان ال aggregation study نتيجتها ان تبقوا علي عكس اللي شفناه في ال adhesion defect", "tokens": [41185, 8717, 7435, 37977, 11296, 23032, 21292, 3615, 10632, 8717, 2304, 25720, 1975, 2655, 1863, 9957, 19312, 565, 293, 309, 311, 2673, 41237, 8717, 2304, 25720, 6055, 1211, 9307, 3660, 16743, 399, 2979, 3714, 33599, 1829, 4032, 29245, 3215, 14407, 16472, 2423, 16743, 399, 2979, 8717, 31371, 7435, 2655, 11296, 16472, 6055, 3555, 4587, 14407, 25894, 6225, 4117, 3794, 13672, 1829, 13412, 5172, 8315, 3224, 8978, 2423, 614, 38571, 16445], "avg_logprob": -0.2568221847775956, "compression_ratio": 1.4438502673796791, "no_speech_prob": 0.0, "words": [{"start": 2011.16, "end": 2011.34, "word": "في", "probability": 0.463134765625}, {"start": 2011.34, "end": 2011.94, "word": " نجلةها", "probability": 0.598907470703125}, {"start": 2011.94, "end": 2013.26, "word": " طبيعية", "probability": 0.76873779296875}, {"start": 2013.26, "end": 2015.16, "word": " نمرة", "probability": 0.4197184244791667}, {"start": 2015.16, "end": 2015.58, "word": " اتنين", "probability": 0.8739013671875}, {"start": 2015.58, "end": 2016.66, "word": " bleeding", "probability": 0.60400390625}, {"start": 2016.66, "end": 2017.5, "word": " time", "probability": 0.84619140625}, {"start": 2017.5, "end": 2018.96, "word": " and", "probability": 0.71728515625}, {"start": 2018.96, "end": 2019.3, "word": " it's", "probability": 0.7236328125}, {"start": 2019.3, "end": 2019.88, "word": " usually", "probability": 0.89990234375}, {"start": 2019.88, "end": 2020.52, "word": " prolonged", "probability": 0.86083984375}, {"start": 2020.52, "end": 2022.96, "word": " نمرة", "probability": 0.91015625}, {"start": 2022.96, "end": 2023.6, "word": " تلاتة", "probability": 0.9241943359375}, {"start": 2023.6, "end": 2025.36, "word": " aggregation", "probability": 0.911376953125}, {"start": 2025.36, "end": 2025.92, "word": " study", "probability": 0.9462890625}, {"start": 2025.92, "end": 2028.12, "word": " ماشي", "probability": 0.7716471354166666}, {"start": 2028.12, "end": 2030.02, "word": " ووجدوا", "probability": 0.7733154296875}, {"start": 2030.02, "end": 2031.06, "word": " ان", "probability": 0.81591796875}, {"start": 2031.06, "end": 2031.28, "word": " ال", "probability": 0.8759765625}, {"start": 2031.28, "end": 2032.08, "word": " aggregation", "probability": 0.94384765625}, {"start": 2032.08, "end": 2032.52, "word": " study", "probability": 0.98583984375}, {"start": 2032.52, "end": 2033.64, "word": " نتيجتها", "probability": 0.9650390625}, {"start": 2033.64, "end": 2034.1, "word": " ان", "probability": 0.67138671875}, {"start": 2034.1, "end": 2034.34, "word": " تبقوا", "probability": 0.72576904296875}, {"start": 2034.34, "end": 2034.58, "word": " علي", "probability": 0.53857421875}, {"start": 2034.58, "end": 2035.5, "word": " عكس", "probability": 0.8429361979166666}, {"start": 2035.5, "end": 2037.06, "word": " اللي", "probability": 0.836181640625}, {"start": 2037.06, "end": 2038.14, "word": " شفناه", "probability": 0.955322265625}, {"start": 2038.14, "end": 2038.8, "word": " في", "probability": 0.837890625}, {"start": 2038.8, "end": 2038.88, "word": " ال", "probability": 0.7470703125}, {"start": 2038.88, "end": 2039.32, "word": " adhesion", "probability": 0.770751953125}, {"start": 2039.32, "end": 2039.76, "word": " defect", "probability": 0.83447265625}], "temperature": 1.0}, {"id": 75, "seek": 206996, "start": 2041.65, "end": 2069.97, "text": "البرنامج سوليير وياش و ال bomb of lebron هناك كان في response لكل agonist ما عدا مين هنا مافيش response لكل ال agonist ما عدا ما عدا ال assassin فاهمين عليها سهل حفظها هناك مافيش في response لكل ما عدا ال assassin هنا مافيش ما عدا ال assassin طبعا التقنيك الرابع اللي بقى انا بشخص من خلاله هو ال flow cytometry", "tokens": [6027, 26890, 8315, 2304, 7435, 8608, 12610, 1829, 13546, 4032, 1829, 33599, 4032, 2423, 7851, 295, 476, 1443, 266, 34105, 4117, 25961, 8978, 4134, 5296, 28820, 623, 266, 468, 19446, 6225, 28259, 3714, 9957, 34105, 19446, 41185, 8592, 4134, 5296, 28820, 2423, 623, 266, 468, 19446, 6225, 28259, 19446, 6225, 28259, 2423, 36294, 6156, 995, 16095, 9957, 25894, 11296, 8608, 3224, 1211, 11331, 5172, 19913, 11296, 34105, 4117, 19446, 41185, 8592, 8978, 4134, 5296, 28820, 19446, 6225, 28259, 2423, 36294, 34105, 19446, 41185, 8592, 19446, 6225, 28259, 2423, 36294, 23032, 3555, 3615, 995, 16712, 4587, 22653, 4117, 34892, 16758, 3615, 13672, 1829, 4724, 4587, 7578, 1975, 8315, 4724, 8592, 9778, 9381, 9154, 16490, 1211, 6027, 3224, 31439, 2423, 3095, 40248, 34730], "avg_logprob": -0.34707991754422424, "compression_ratio": 2.074561403508772, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2041.65, "end": 2042.23, "word": "البرنامج", "probability": 0.617431640625}, {"start": 2042.23, "end": 2042.73, "word": " سوليير", "probability": 0.60833740234375}, {"start": 2042.73, "end": 2043.31, "word": " وياش", "probability": 0.5533650716145834}, {"start": 2043.31, "end": 2044.59, "word": " و", "probability": 0.62890625}, {"start": 2044.59, "end": 2044.85, "word": " ال", "probability": 0.2137451171875}, {"start": 2044.85, "end": 2045.09, "word": " bomb", "probability": 0.1541748046875}, {"start": 2045.09, "end": 2045.23, "word": " of", "probability": 0.151611328125}, {"start": 2045.23, "end": 2045.65, "word": " lebron", "probability": 0.6138509114583334}, {"start": 2045.65, "end": 2046.91, "word": " هناك", "probability": 0.93505859375}, {"start": 2046.91, "end": 2047.15, "word": " كان", "probability": 0.978515625}, {"start": 2047.15, "end": 2047.33, "word": " في", "probability": 0.84326171875}, {"start": 2047.33, "end": 2048.01, "word": " response", "probability": 0.828125}, {"start": 2048.01, "end": 2048.67, "word": " لكل", "probability": 0.515625}, {"start": 2048.67, "end": 2049.55, "word": " agonist", "probability": 0.8318684895833334}, {"start": 2049.55, "end": 2050.29, "word": " ما", "probability": 0.7626953125}, {"start": 2050.29, "end": 2050.45, "word": " عدا", "probability": 0.86572265625}, {"start": 2050.45, "end": 2050.89, "word": " مين", "probability": 0.8017578125}, {"start": 2050.89, "end": 2052.53, "word": " هنا", "probability": 0.2978515625}, {"start": 2052.53, "end": 2053.01, "word": " مافيش", "probability": 0.8660481770833334}, {"start": 2053.01, "end": 2053.73, "word": " response", "probability": 0.9462890625}, {"start": 2053.73, "end": 2054.33, "word": " لكل", "probability": 0.95654296875}, {"start": 2054.33, "end": 2054.47, "word": " ال", "probability": 0.3173828125}, {"start": 2054.47, "end": 2054.99, "word": " agonist", "probability": 0.8782552083333334}, {"start": 2054.99, "end": 2055.25, "word": " ما", "probability": 0.91943359375}, {"start": 2055.25, "end": 2055.69, "word": " عدا", "probability": 0.9208984375}, {"start": 2055.69, "end": 2056.69, "word": " ما", "probability": 0.30712890625}, {"start": 2056.69, "end": 2056.87, "word": " عدا", "probability": 0.822509765625}, {"start": 2056.87, "end": 2056.99, "word": " ال", "probability": 0.245361328125}, {"start": 2056.99, "end": 2057.21, "word": " assassin", "probability": 0.163818359375}, {"start": 2057.21, "end": 2057.85, "word": " فاهمين", "probability": 0.941650390625}, {"start": 2057.85, "end": 2058.27, "word": " عليها", "probability": 0.753662109375}, {"start": 2058.27, "end": 2058.73, "word": " سهل", "probability": 0.89501953125}, {"start": 2058.73, "end": 2059.73, "word": " حفظها", "probability": 0.9085693359375}, {"start": 2059.73, "end": 2060.13, "word": " هناك", "probability": 0.84521484375}, {"start": 2060.13, "end": 2060.91, "word": " مافيش", "probability": 0.802490234375}, {"start": 2060.91, "end": 2061.43, "word": " في", "probability": 0.5556640625}, {"start": 2061.43, "end": 2062.27, "word": " response", "probability": 0.87744140625}, {"start": 2062.27, "end": 2062.73, "word": " لكل", "probability": 0.60552978515625}, {"start": 2062.73, "end": 2062.91, "word": " ما", "probability": 0.473876953125}, {"start": 2062.91, "end": 2063.05, "word": " عدا", "probability": 0.8095703125}, {"start": 2063.05, "end": 2063.67, "word": " ال", "probability": 0.916015625}, {"start": 2063.67, "end": 2063.67, "word": " assassin", "probability": 0.9677734375}, {"start": 2063.67, "end": 2063.95, "word": " هنا", "probability": 0.849609375}, {"start": 2063.95, "end": 2064.57, "word": " مافيش", "probability": 0.9713541666666666}, {"start": 2064.57, "end": 2065.05, "word": " ما", "probability": 0.916015625}, {"start": 2065.05, "end": 2065.25, "word": " عدا", "probability": 0.8525390625}, {"start": 2065.25, "end": 2065.37, "word": " ال", "probability": 0.966796875}, {"start": 2065.37, "end": 2065.71, "word": " assassin", "probability": 0.970703125}, {"start": 2065.71, "end": 2066.67, "word": " طبعا", "probability": 0.9615478515625}, {"start": 2066.67, "end": 2067.15, "word": " التقنيك", "probability": 0.53106689453125}, {"start": 2067.15, "end": 2067.47, "word": " الرابع", "probability": 0.9095052083333334}, {"start": 2067.47, "end": 2067.73, "word": " اللي", "probability": 0.815185546875}, {"start": 2067.73, "end": 2067.89, "word": " بقى", "probability": 0.8689778645833334}, {"start": 2067.89, "end": 2068.01, "word": " انا", "probability": 0.5369873046875}, {"start": 2068.01, "end": 2068.43, "word": " بشخص", "probability": 0.8665771484375}, {"start": 2068.43, "end": 2068.57, "word": " من", "probability": 0.99609375}, {"start": 2068.57, "end": 2068.91, "word": " خلاله", "probability": 0.9814453125}, {"start": 2068.91, "end": 2069.11, "word": " هو", "probability": 0.970703125}, {"start": 2069.11, "end": 2069.27, "word": " ال", "probability": 0.61083984375}, {"start": 2069.27, "end": 2069.45, "word": " flow", "probability": 0.9033203125}, {"start": 2069.45, "end": 2069.97, "word": " cytometry", "probability": 0.968017578125}], "temperature": 1.0}, {"id": 76, "seek": 209919, "start": 2071.94, "end": 2099.2, "text": "و من خلال استخدام انتبادي مونوكلون لصورة CD 41 و 61 و دول إذا فاكرين هم أخدناهم في بريماتولوجي و هي عبارة عن رسبتورز على سطح مين على سطح بليتلت واحد 2B و التاني 3E و هذه الصورة معبرة جدا لتو بليتلت مسكين في بعض مين مسيكهم في النصف؟ الفابرنر و المرض", "tokens": [2407, 9154, 16490, 1211, 6027, 44713, 9778, 3215, 10943, 16472, 2655, 3555, 995, 16254, 3714, 11536, 2407, 4117, 1211, 11536, 5296, 9381, 13063, 3660, 6743, 18173, 4032, 28294, 4032, 11778, 12610, 11933, 15730, 6156, 995, 37983, 9957, 8032, 2304, 5551, 9778, 3215, 8315, 16095, 8978, 4724, 16572, 2304, 9307, 12610, 29245, 1829, 4032, 39896, 6225, 3555, 9640, 3660, 18871, 12602, 3794, 3555, 2655, 13063, 11622, 15844, 8608, 9566, 5016, 3714, 9957, 15844, 8608, 9566, 5016, 4724, 20292, 2655, 1211, 2655, 36764, 24401, 568, 33, 4032, 16712, 7649, 1829, 805, 36, 4032, 29538, 31767, 13063, 3660, 20449, 3555, 25720, 10874, 28259, 5296, 2655, 2407, 4724, 20292, 2655, 1211, 2655, 47524, 4117, 9957, 8978, 45030, 11242, 3714, 9957, 47524, 1829, 4117, 16095, 8978, 28239, 9381, 5172, 22807, 27188, 16758, 2288, 1863, 2288, 4032, 9673, 43042], "avg_logprob": -0.5013992341596689, "compression_ratio": 1.8125, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 2071.94, "end": 2072.16, "word": "و", "probability": 0.5361328125}, {"start": 2072.16, "end": 2072.6, "word": " من", "probability": 0.09991455078125}, {"start": 2072.6, "end": 2072.62, "word": " خلال", "probability": 0.8660481770833334}, {"start": 2072.62, "end": 2072.98, "word": " استخدام", "probability": 0.755615234375}, {"start": 2072.98, "end": 2074.02, "word": " انتبادي", "probability": 0.49130859375}, {"start": 2074.02, "end": 2074.02, "word": " مونوكلون", "probability": 0.6261189778645834}, {"start": 2074.02, "end": 2075.3, "word": " لصورة", "probability": 0.403900146484375}, {"start": 2075.3, "end": 2075.82, "word": " CD", "probability": 0.57763671875}, {"start": 2075.82, "end": 2076.78, "word": " 41", "probability": 0.3583984375}, {"start": 2076.78, "end": 2077.06, "word": " و", "probability": 0.70751953125}, {"start": 2077.06, "end": 2077.64, "word": " 61", "probability": 0.78759765625}, {"start": 2077.64, "end": 2078.38, "word": " و", "probability": 0.293212890625}, {"start": 2078.38, "end": 2078.52, "word": " دول", "probability": 0.5472412109375}, {"start": 2078.52, "end": 2078.64, "word": " إذا", "probability": 0.68505859375}, {"start": 2078.64, "end": 2079.08, "word": " فاكرين", "probability": 0.91064453125}, {"start": 2079.08, "end": 2079.24, "word": " هم", "probability": 0.49658203125}, {"start": 2079.24, "end": 2079.68, "word": " أخدناهم", "probability": 0.8876953125}, {"start": 2079.68, "end": 2079.8, "word": " في", "probability": 0.876953125}, {"start": 2079.8, "end": 2080.48, "word": " بريماتولوجي", "probability": 0.5876552036830357}, {"start": 2080.48, "end": 2081.0, "word": " و", "probability": 0.452880859375}, {"start": 2081.0, "end": 2081.12, "word": " هي", "probability": 0.134521484375}, {"start": 2081.12, "end": 2081.32, "word": " عبارة", "probability": 0.8973388671875}, {"start": 2081.32, "end": 2081.5, "word": " عن", "probability": 0.97900390625}, {"start": 2081.5, "end": 2082.14, "word": " رسبتورز", "probability": 0.5708516438802084}, {"start": 2082.14, "end": 2082.28, "word": " على", "probability": 0.845703125}, {"start": 2082.28, "end": 2082.66, "word": " سطح", "probability": 0.93896484375}, {"start": 2082.66, "end": 2083.08, "word": " مين", "probability": 0.460693359375}, {"start": 2083.08, "end": 2083.82, "word": " على", "probability": 0.3583984375}, {"start": 2083.82, "end": 2084.3, "word": " سطح", "probability": 0.99169921875}, {"start": 2084.3, "end": 2085.22, "word": " بليتلت", "probability": 0.63636474609375}, {"start": 2085.22, "end": 2086.14, "word": " واحد", "probability": 0.8603515625}, {"start": 2086.14, "end": 2086.62, "word": " 2B", "probability": 0.40997314453125}, {"start": 2086.62, "end": 2086.82, "word": " و", "probability": 0.63623046875}, {"start": 2086.82, "end": 2087.18, "word": " التاني", "probability": 0.772216796875}, {"start": 2087.18, "end": 2088.52, "word": " 3E", "probability": 0.789794921875}, {"start": 2088.52, "end": 2090.2, "word": " و", "probability": 0.837890625}, {"start": 2090.2, "end": 2090.32, "word": " هذه", "probability": 0.32080078125}, {"start": 2090.32, "end": 2090.56, "word": " الصورة", "probability": 0.9088541666666666}, {"start": 2090.56, "end": 2091.0, "word": " معبرة", "probability": 0.7275390625}, {"start": 2091.0, "end": 2091.3, "word": " جدا", "probability": 0.960693359375}, {"start": 2091.3, "end": 2092.96, "word": " لتو", "probability": 0.4764811197916667}, {"start": 2092.96, "end": 2093.46, "word": " بليتلت", "probability": 0.97080078125}, {"start": 2093.46, "end": 2093.8, "word": " مسكين", "probability": 0.91064453125}, {"start": 2093.8, "end": 2093.94, "word": " في", "probability": 0.93017578125}, {"start": 2093.94, "end": 2094.28, "word": " بعض", "probability": 0.9794921875}, {"start": 2094.28, "end": 2095.0, "word": " مين", "probability": 0.841064453125}, {"start": 2095.0, "end": 2095.4, "word": " مسيكهم", "probability": 0.78057861328125}, {"start": 2095.4, "end": 2095.56, "word": " في", "probability": 0.94921875}, {"start": 2095.56, "end": 2096.44, "word": " النصف؟", "probability": 0.73809814453125}, {"start": 2096.44, "end": 2097.74, "word": " الفابرنر", "probability": 0.4936767578125}, {"start": 2097.74, "end": 2098.72, "word": " و", "probability": 0.29443359375}, {"start": 2098.72, "end": 2099.2, "word": " المرض", "probability": 0.97265625}], "temperature": 1.0}, {"id": 77, "seek": 212266, "start": 2100.72, "end": 2122.66, "text": "الخلل اللي بيصير إما غياب ال receptor و أيه ال receptor؟ إذا غياب إيش بيعمل؟ Thrombostimia أو غياب ال fibrinogen، أيوة، إيش بيعمل؟ A-fibrinogen يعني، A-fibrinogen، ماشي؟ و ال receptors اللي احنا شفناها طبعا سببها برا signal transduction", "tokens": [6027, 9778, 1211, 1211, 13672, 1829, 4724, 1829, 9381, 13546, 11933, 15042, 32771, 1829, 16758, 2423, 32264, 4032, 36632, 3224, 2423, 32264, 22807, 11933, 15730, 32771, 1829, 16758, 11933, 1829, 8592, 4724, 1829, 25957, 1211, 22807, 41645, 3548, 555, 332, 654, 34051, 32771, 1829, 16758, 2423, 283, 6414, 259, 8799, 12399, 36632, 2407, 3660, 12399, 11933, 1829, 8592, 4724, 1829, 25957, 1211, 22807, 316, 12, 69, 6414, 259, 8799, 37495, 22653, 12399, 316, 12, 69, 6414, 259, 8799, 12399, 3714, 33599, 1829, 22807, 4032, 2423, 34102, 13672, 1829, 1975, 5016, 8315, 13412, 5172, 8315, 11296, 23032, 3555, 3615, 995, 8608, 3555, 3555, 11296, 4724, 23557, 6358, 1145, 40335], "avg_logprob": -0.31278668521741115, "compression_ratio": 1.6911764705882353, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 2100.72, "end": 2101.36, "word": "الخلل", "probability": 0.91259765625}, {"start": 2101.36, "end": 2101.52, "word": " اللي", "probability": 0.741943359375}, {"start": 2101.52, "end": 2101.88, "word": " بيصير", "probability": 0.9354248046875}, {"start": 2101.88, "end": 2102.1, "word": " إما", "probability": 0.61041259765625}, {"start": 2102.1, "end": 2102.44, "word": " غياب", "probability": 0.93017578125}, {"start": 2102.44, "end": 2102.62, "word": " ال", "probability": 0.82958984375}, {"start": 2102.62, "end": 2103.04, "word": " receptor", "probability": 0.43212890625}, {"start": 2103.04, "end": 2103.28, "word": " و", "probability": 0.72412109375}, {"start": 2103.28, "end": 2103.58, "word": " أيه", "probability": 0.839111328125}, {"start": 2103.58, "end": 2103.64, "word": " ال", "probability": 0.966796875}, {"start": 2103.64, "end": 2104.2, "word": " receptor؟", "probability": 0.6234130859375}, {"start": 2104.2, "end": 2104.3, "word": " إذا", "probability": 0.917236328125}, {"start": 2104.3, "end": 2104.58, "word": " غياب", "probability": 0.9786783854166666}, {"start": 2104.58, "end": 2104.74, "word": " إيش", "probability": 0.6546223958333334}, {"start": 2104.74, "end": 2105.5, "word": " بيعمل؟", "probability": 0.94560546875}, {"start": 2105.5, "end": 2107.4, "word": " Thrombostimia", "probability": 0.4473388671875}, {"start": 2107.4, "end": 2108.58, "word": " أو", "probability": 0.68896484375}, {"start": 2108.58, "end": 2108.92, "word": " غياب", "probability": 0.97607421875}, {"start": 2108.92, "end": 2109.1, "word": " ال", "probability": 0.66357421875}, {"start": 2109.1, "end": 2110.4, "word": " fibrinogen،", "probability": 0.60869140625}, {"start": 2110.4, "end": 2110.92, "word": " أيوة،", "probability": 0.6300048828125}, {"start": 2110.92, "end": 2111.14, "word": " إيش", "probability": 0.9425455729166666}, {"start": 2111.14, "end": 2112.14, "word": " بيعمل؟", "probability": 0.99033203125}, {"start": 2112.14, "end": 2112.42, "word": " A", "probability": 0.638671875}, {"start": 2112.42, "end": 2113.22, "word": "-fibrinogen", "probability": 0.877734375}, {"start": 2113.22, "end": 2114.16, "word": " يعني،", "probability": 0.6959635416666666}, {"start": 2114.16, "end": 2114.5, "word": " A", "probability": 0.80322265625}, {"start": 2114.5, "end": 2116.28, "word": "-fibrinogen،", "probability": 0.8494059244791666}, {"start": 2116.28, "end": 2117.34, "word": " ماشي؟", "probability": 0.851806640625}, {"start": 2117.34, "end": 2117.58, "word": " و", "probability": 0.6796875}, {"start": 2117.58, "end": 2117.72, "word": " ال", "probability": 0.291259765625}, {"start": 2117.72, "end": 2118.14, "word": " receptors", "probability": 0.47216796875}, {"start": 2118.14, "end": 2118.5, "word": " اللي", "probability": 0.984619140625}, {"start": 2118.5, "end": 2118.84, "word": " احنا", "probability": 0.8478190104166666}, {"start": 2118.84, "end": 2120.14, "word": " شفناها", "probability": 0.9368896484375}, {"start": 2120.14, "end": 2120.44, "word": " طبعا", "probability": 0.8902587890625}, {"start": 2120.44, "end": 2120.96, "word": " سببها", "probability": 0.58880615234375}, {"start": 2120.96, "end": 2121.54, "word": " برا", "probability": 0.381591796875}, {"start": 2121.54, "end": 2121.9, "word": " signal", "probability": 0.40380859375}, {"start": 2121.9, "end": 2122.66, "word": " transduction", "probability": 0.806884765625}], "temperature": 1.0}, {"id": 78, "seek": 215067, "start": 2123.16, "end": 2150.68, "text": "البروسيجيرز أخدتهوا في ال .. مع الدكتور فضل أيه هو؟ وبعدين أضيف agorist بيصير فيه تنشيط لفسفاتيديل انوسيطول ديفوسفيت أخدتهوا الفي الميكانيزم هذا، صح؟ طبعا هذا بتفنع، قدينا two results، diacetylglyceride وانوسيطول triphosphate ال EG هو عامل بالـD", "tokens": [6027, 26890, 2407, 3794, 1829, 7435, 13546, 11622, 5551, 9778, 3215, 47395, 14407, 8978, 2423, 4386, 20449, 32748, 4117, 2655, 13063, 6156, 11242, 1211, 36632, 3224, 31439, 22807, 46599, 22488, 9957, 5551, 11242, 33911, 623, 284, 468, 4724, 1829, 9381, 13546, 8978, 3224, 6055, 1863, 8592, 1829, 9566, 5296, 5172, 3794, 5172, 9307, 1829, 16254, 1211, 16472, 2407, 3794, 1829, 9566, 12610, 11778, 33911, 41779, 5172, 36081, 5551, 9778, 3215, 47395, 14407, 27188, 1829, 9673, 1829, 41361, 1829, 11622, 2304, 23758, 12399, 20328, 5016, 22807, 23032, 3555, 3615, 995, 23758, 39894, 5172, 1863, 3615, 12399, 12174, 3215, 9957, 995, 732, 3542, 12399, 1026, 326, 2210, 75, 70, 356, 1776, 482, 4032, 7649, 2407, 3794, 1829, 9566, 12610, 1376, 950, 16378, 473, 2423, 462, 38, 31439, 6225, 10943, 1211, 20666, 39184, 35], "avg_logprob": -0.539299235199437, "compression_ratio": 1.5521235521235521, "no_speech_prob": 0.0, "words": [{"start": 2123.16, "end": 2124.04, "word": "البروسيجيرز", "probability": 0.5070343017578125}, {"start": 2124.04, "end": 2124.44, "word": " أخدتهوا", "probability": 0.6083984375}, {"start": 2124.44, "end": 2124.5, "word": " في", "probability": 0.77294921875}, {"start": 2124.5, "end": 2124.72, "word": " ال", "probability": 0.3828125}, {"start": 2124.72, "end": 2124.88, "word": " ..", "probability": 0.32763671875}, {"start": 2124.88, "end": 2125.12, "word": " مع", "probability": 0.52685546875}, {"start": 2125.12, "end": 2125.36, "word": " الدكتور", "probability": 0.8721923828125}, {"start": 2125.36, "end": 2125.8, "word": " فضل", "probability": 0.6846516927083334}, {"start": 2125.8, "end": 2126.66, "word": " أيه", "probability": 0.44793701171875}, {"start": 2126.66, "end": 2126.92, "word": " هو؟", "probability": 0.3721923828125}, {"start": 2126.92, "end": 2127.2, "word": " وبعدين", "probability": 0.5576985677083334}, {"start": 2127.2, "end": 2127.4, "word": " أضيف", "probability": 0.6816813151041666}, {"start": 2127.4, "end": 2128.0, "word": " agorist", "probability": 0.627685546875}, {"start": 2128.0, "end": 2129.38, "word": " بيصير", "probability": 0.73907470703125}, {"start": 2129.38, "end": 2129.54, "word": " فيه", "probability": 0.9296875}, {"start": 2129.54, "end": 2130.04, "word": " تنشيط", "probability": 0.86259765625}, {"start": 2130.04, "end": 2131.26, "word": " لفسفاتيديل", "probability": 0.59503173828125}, {"start": 2131.26, "end": 2132.6, "word": " انوسيطول", "probability": 0.53125}, {"start": 2132.6, "end": 2133.38, "word": " ديفوسفيت", "probability": 0.485205078125}, {"start": 2133.38, "end": 2134.26, "word": " أخدتهوا", "probability": 0.767724609375}, {"start": 2134.26, "end": 2134.42, "word": " الفي", "probability": 0.33984375}, {"start": 2134.42, "end": 2135.18, "word": " الميكانيزم", "probability": 0.8495686848958334}, {"start": 2135.18, "end": 2135.54, "word": " هذا،", "probability": 0.4305419921875}, {"start": 2135.54, "end": 2136.44, "word": " صح؟", "probability": 0.9820963541666666}, {"start": 2136.44, "end": 2137.6, "word": " طبعا", "probability": 0.9447021484375}, {"start": 2137.6, "end": 2137.74, "word": " هذا", "probability": 0.169189453125}, {"start": 2137.74, "end": 2138.36, "word": " بتفنع،", "probability": 0.58037109375}, {"start": 2138.36, "end": 2138.88, "word": " قدينا", "probability": 0.61248779296875}, {"start": 2138.88, "end": 2139.26, "word": " two", "probability": 0.7958984375}, {"start": 2139.26, "end": 2140.38, "word": " results،", "probability": 0.6260986328125}, {"start": 2140.38, "end": 2142.66, "word": " diacetylglyceride", "probability": 0.6262054443359375}, {"start": 2142.66, "end": 2145.1, "word": " وانوسيطول", "probability": 0.8378208705357143}, {"start": 2145.1, "end": 2145.98, "word": " triphosphate", "probability": 0.744354248046875}, {"start": 2145.98, "end": 2147.24, "word": " ال", "probability": 0.66455078125}, {"start": 2147.24, "end": 2147.78, "word": " EG", "probability": 0.49395751953125}, {"start": 2147.78, "end": 2148.34, "word": " هو", "probability": 0.56591796875}, {"start": 2148.34, "end": 2149.16, "word": " عامل", "probability": 0.8816731770833334}, {"start": 2149.16, "end": 2150.68, "word": " بالـD", "probability": 0.489501953125}], "temperature": 1.0}, {"id": 79, "seek": 217867, "start": 2151.21, "end": 2178.67, "text": "بيوروبيت كاينيز كومبليكس هذا ممكن يدى طاقة للريسبتور يعسى از يفتح ويمسك فى مين ويمسك فى ال fiber انا بتلت ال degradation study يا شباب شرحناها لكم وهى الصورة تبعتها بالظبط هى اللى بلتت هى معلقة ولا مش معلقة بعد ما صار فى إضاءة ولا لأ وناس اتخفت ولا لأ", "tokens": [21292, 13063, 2407, 21292, 2655, 9122, 995, 9957, 1829, 11622, 9122, 20498, 3555, 20292, 4117, 3794, 23758, 3714, 43020, 7251, 3215, 7578, 23032, 995, 28671, 24976, 16572, 35457, 2655, 13063, 37495, 3794, 7578, 1975, 11622, 7251, 5172, 2655, 5016, 4032, 32640, 3794, 4117, 6156, 7578, 3714, 9957, 4032, 32640, 3794, 4117, 6156, 7578, 2423, 12874, 1975, 8315, 39894, 1211, 2655, 2423, 40519, 2979, 35186, 13412, 3555, 16758, 13412, 2288, 5016, 8315, 11296, 5296, 24793, 37037, 7578, 31767, 13063, 3660, 6055, 3555, 34268, 11296, 20666, 19913, 3555, 9566, 8032, 7578, 13672, 7578, 4724, 1211, 2655, 2655, 8032, 7578, 3714, 30241, 28671, 49429, 37893, 3714, 30241, 28671, 39182, 19446, 20328, 9640, 6156, 7578, 11933, 11242, 16606, 3660, 49429, 5296, 10721, 4032, 8315, 3794, 1975, 2655, 9778, 5172, 2655, 49429, 5296, 10721], "avg_logprob": -0.3326923184669935, "compression_ratio": 1.728, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2151.21, "end": 2151.79, "word": "بيوروبيت", "probability": 0.4662841796875}, {"start": 2151.79, "end": 2152.31, "word": " كاينيز", "probability": 0.658349609375}, {"start": 2152.31, "end": 2153.11, "word": " كومبليكس", "probability": 0.7457275390625}, {"start": 2153.11, "end": 2154.09, "word": " هذا", "probability": 0.142578125}, {"start": 2154.09, "end": 2154.83, "word": " ممكن", "probability": 0.968505859375}, {"start": 2154.83, "end": 2155.15, "word": " يدى", "probability": 0.7317708333333334}, {"start": 2155.15, "end": 2155.49, "word": " طاقة", "probability": 0.9923502604166666}, {"start": 2155.49, "end": 2156.65, "word": " للريسبتور", "probability": 0.69228515625}, {"start": 2156.65, "end": 2157.23, "word": " يعسى", "probability": 0.7521158854166666}, {"start": 2157.23, "end": 2157.45, "word": " از", "probability": 0.34765625}, {"start": 2157.45, "end": 2157.85, "word": " يفتح", "probability": 0.8817138671875}, {"start": 2157.85, "end": 2158.27, "word": " ويمسك", "probability": 0.945068359375}, {"start": 2158.27, "end": 2158.45, "word": " فى", "probability": 0.774169921875}, {"start": 2158.45, "end": 2158.75, "word": " مين", "probability": 0.953857421875}, {"start": 2158.75, "end": 2160.01, "word": " ويمسك", "probability": 0.9754638671875}, {"start": 2160.01, "end": 2160.29, "word": " فى", "probability": 0.974853515625}, {"start": 2160.29, "end": 2160.49, "word": " ال", "probability": 0.90966796875}, {"start": 2160.49, "end": 2161.19, "word": " fiber", "probability": 0.335205078125}, {"start": 2161.19, "end": 2161.47, "word": " انا", "probability": 0.3807373046875}, {"start": 2161.47, "end": 2165.53, "word": " بتلت", "probability": 0.4654947916666667}, {"start": 2165.53, "end": 2165.63, "word": " ال", "probability": 0.759765625}, {"start": 2165.63, "end": 2165.93, "word": " degradation", "probability": 0.488037109375}, {"start": 2165.93, "end": 2166.45, "word": " study", "probability": 0.48583984375}, {"start": 2166.45, "end": 2166.59, "word": " يا", "probability": 0.77978515625}, {"start": 2166.59, "end": 2166.97, "word": " شباب", "probability": 0.98876953125}, {"start": 2166.97, "end": 2167.71, "word": " شرحناها", "probability": 0.8623046875}, {"start": 2167.71, "end": 2168.19, "word": " لكم", "probability": 0.46820068359375}, {"start": 2168.19, "end": 2168.89, "word": " وهى", "probability": 0.700927734375}, {"start": 2168.89, "end": 2169.29, "word": " الصورة", "probability": 0.96728515625}, {"start": 2169.29, "end": 2169.73, "word": " تبعتها", "probability": 0.8765869140625}, {"start": 2169.73, "end": 2170.49, "word": " بالظبط", "probability": 0.9647216796875}, {"start": 2170.49, "end": 2170.91, "word": " هى", "probability": 0.5072021484375}, {"start": 2170.91, "end": 2171.17, "word": " اللى", "probability": 0.908447265625}, {"start": 2171.17, "end": 2172.21, "word": " بلتت", "probability": 0.6146240234375}, {"start": 2172.21, "end": 2172.55, "word": " هى", "probability": 0.6640625}, {"start": 2172.55, "end": 2173.13, "word": " معلقة", "probability": 0.814453125}, {"start": 2173.13, "end": 2173.27, "word": " ولا", "probability": 0.6708984375}, {"start": 2173.27, "end": 2173.43, "word": " مش", "probability": 0.97119140625}, {"start": 2173.43, "end": 2173.97, "word": " معلقة", "probability": 0.880859375}, {"start": 2173.97, "end": 2175.67, "word": " بعد", "probability": 0.83447265625}, {"start": 2175.67, "end": 2176.03, "word": " ما", "probability": 0.97607421875}, {"start": 2176.03, "end": 2176.47, "word": " صار", "probability": 0.970458984375}, {"start": 2176.47, "end": 2176.71, "word": " فى", "probability": 0.88818359375}, {"start": 2176.71, "end": 2177.11, "word": " إضاءة", "probability": 0.79443359375}, {"start": 2177.11, "end": 2177.21, "word": " ولا", "probability": 0.8203125}, {"start": 2177.21, "end": 2177.43, "word": " لأ", "probability": 0.7210693359375}, {"start": 2177.43, "end": 2177.69, "word": " وناس", "probability": 0.6200358072916666}, {"start": 2177.69, "end": 2178.17, "word": " اتخفت", "probability": 0.90947265625}, {"start": 2178.17, "end": 2178.35, "word": " ولا", "probability": 0.86767578125}, {"start": 2178.35, "end": 2178.67, "word": " لأ", "probability": 0.9208984375}], "temperature": 1.0}, {"id": 80, "seek": 220529, "start": 2182.95, "end": 2205.29, "text": "الطبيعي هو عند إضافة ال agonist وعند إيش هو اسمها aggregation هو different concentration of each agonist are used فعلى سبيل المثال ADP بدينا biphasic pattern و first wave و second wave مش راح تركوا إيها تماما", "tokens": [6027, 9566, 21292, 3615, 1829, 31439, 43242, 11933, 11242, 31845, 3660, 2423, 623, 266, 468, 4032, 3615, 41260, 11933, 1829, 8592, 31439, 24525, 2304, 11296, 16743, 399, 31439, 819, 9856, 295, 1184, 623, 266, 468, 366, 1143, 6156, 3615, 23942, 8608, 21292, 1211, 9673, 12984, 6027, 9135, 47, 4724, 16254, 8315, 3228, 7485, 299, 5102, 4032, 700, 5772, 4032, 1150, 5772, 37893, 12602, 39319, 6055, 31747, 14407, 11933, 1829, 11296, 46811, 10943, 995], "avg_logprob": -0.3703547281187934, "compression_ratio": 1.3940886699507389, "no_speech_prob": 0.0, "words": [{"start": 2182.95, "end": 2183.73, "word": "الطبيعي", "probability": 0.655029296875}, {"start": 2183.73, "end": 2184.43, "word": " هو", "probability": 0.326171875}, {"start": 2184.43, "end": 2184.89, "word": " عند", "probability": 0.767578125}, {"start": 2184.89, "end": 2185.29, "word": " إضافة", "probability": 0.904296875}, {"start": 2185.29, "end": 2185.41, "word": " ال", "probability": 0.771484375}, {"start": 2185.41, "end": 2185.83, "word": " agonist", "probability": 0.7770182291666666}, {"start": 2185.83, "end": 2186.19, "word": " وعند", "probability": 0.7308756510416666}, {"start": 2186.19, "end": 2186.57, "word": " إيش", "probability": 0.535400390625}, {"start": 2186.57, "end": 2187.97, "word": " هو", "probability": 0.1263427734375}, {"start": 2187.97, "end": 2188.17, "word": " اسمها", "probability": 0.7430013020833334}, {"start": 2188.17, "end": 2189.11, "word": " aggregation", "probability": 0.612060546875}, {"start": 2189.11, "end": 2191.71, "word": " هو", "probability": 0.2366943359375}, {"start": 2191.71, "end": 2192.59, "word": " different", "probability": 0.269775390625}, {"start": 2192.59, "end": 2193.83, "word": " concentration", "probability": 0.6337890625}, {"start": 2193.83, "end": 2194.35, "word": " of", "probability": 0.95703125}, {"start": 2194.35, "end": 2194.81, "word": " each", "probability": 0.9296875}, {"start": 2194.81, "end": 2195.47, "word": " agonist", "probability": 0.96142578125}, {"start": 2195.47, "end": 2195.71, "word": " are", "probability": 0.90478515625}, {"start": 2195.71, "end": 2196.09, "word": " used", "probability": 0.9365234375}, {"start": 2196.09, "end": 2198.19, "word": " فعلى", "probability": 0.78271484375}, {"start": 2198.19, "end": 2198.47, "word": " سبيل", "probability": 0.990234375}, {"start": 2198.47, "end": 2198.81, "word": " المثال", "probability": 0.8271484375}, {"start": 2198.81, "end": 2199.33, "word": " ADP", "probability": 0.8330078125}, {"start": 2199.33, "end": 2199.69, "word": " بدينا", "probability": 0.6827799479166666}, {"start": 2199.69, "end": 2200.29, "word": " biphasic", "probability": 0.6863606770833334}, {"start": 2200.29, "end": 2200.73, "word": " pattern", "probability": 0.931640625}, {"start": 2200.73, "end": 2202.27, "word": " و", "probability": 0.58935546875}, {"start": 2202.27, "end": 2202.73, "word": " first", "probability": 0.64990234375}, {"start": 2202.73, "end": 2203.09, "word": " wave", "probability": 0.91943359375}, {"start": 2203.09, "end": 2203.25, "word": " و", "probability": 0.98876953125}, {"start": 2203.25, "end": 2203.55, "word": " second", "probability": 0.91455078125}, {"start": 2203.55, "end": 2203.97, "word": " wave", "probability": 0.955078125}, {"start": 2203.97, "end": 2204.41, "word": " مش", "probability": 0.299072265625}, {"start": 2204.41, "end": 2204.55, "word": " راح", "probability": 0.637939453125}, {"start": 2204.55, "end": 2204.77, "word": " تركوا", "probability": 0.7262369791666666}, {"start": 2204.77, "end": 2204.89, "word": " إيها", "probability": 0.7755533854166666}, {"start": 2204.89, "end": 2205.29, "word": " تماما", "probability": 0.9444986979166666}], "temperature": 1.0}, {"id": 81, "seek": 222701, "start": 2205.51, "end": 2227.01, "text": "هي ال first wave لها و إيش ال second wave؟ لتحكي، ماشي؟ 100% هي low concentration of ADP في البداية and it's reversible and high concentration بيدينا second wave في حالة ال high concentration", "tokens": [3224, 1829, 2423, 700, 5772, 5296, 11296, 4032, 11933, 1829, 8592, 2423, 1150, 5772, 22807, 5296, 2655, 5016, 4117, 1829, 12399, 3714, 33599, 1829, 22807, 2319, 4, 39896, 2295, 9856, 295, 9135, 47, 8978, 29739, 28259, 10632, 293, 309, 311, 44788, 293, 1090, 9856, 4724, 1829, 16254, 8315, 1150, 5772, 8978, 11331, 6027, 3660, 2423, 1090, 9856], "avg_logprob": -0.35048492406976633, "compression_ratio": 1.4011627906976745, "no_speech_prob": 0.0, "words": [{"start": 2205.51, "end": 2205.79, "word": "هي", "probability": 0.572021484375}, {"start": 2205.79, "end": 2205.89, "word": " ال", "probability": 0.9736328125}, {"start": 2205.89, "end": 2206.19, "word": " first", "probability": 0.81298828125}, {"start": 2206.19, "end": 2206.47, "word": " wave", "probability": 0.94921875}, {"start": 2206.47, "end": 2206.99, "word": " لها", "probability": 0.945556640625}, {"start": 2206.99, "end": 2207.31, "word": " و", "probability": 0.88623046875}, {"start": 2207.31, "end": 2207.53, "word": " إيش", "probability": 0.7218424479166666}, {"start": 2207.53, "end": 2207.67, "word": " ال", "probability": 0.85498046875}, {"start": 2207.67, "end": 2207.95, "word": " second", "probability": 0.95947265625}, {"start": 2207.95, "end": 2209.09, "word": " wave؟", "probability": 0.6114501953125}, {"start": 2209.09, "end": 2210.21, "word": " لتحكي،", "probability": 0.6320393880208334}, {"start": 2210.21, "end": 2210.73, "word": " ماشي؟", "probability": 0.7359619140625}, {"start": 2210.73, "end": 2211.25, "word": " 100", "probability": 0.0633544921875}, {"start": 2211.25, "end": 2213.91, "word": "%", "probability": 0.7646484375}, {"start": 2213.91, "end": 2214.59, "word": " هي", "probability": 0.203857421875}, {"start": 2214.59, "end": 2214.87, "word": " low", "probability": 0.92578125}, {"start": 2214.87, "end": 2215.71, "word": " concentration", "probability": 0.86376953125}, {"start": 2215.71, "end": 2216.21, "word": " of", "probability": 0.96337890625}, {"start": 2216.21, "end": 2217.15, "word": " ADP", "probability": 0.707275390625}, {"start": 2217.15, "end": 2217.53, "word": " في", "probability": 0.775390625}, {"start": 2217.53, "end": 2218.41, "word": " البداية", "probability": 0.9944661458333334}, {"start": 2218.41, "end": 2218.95, "word": " and", "probability": 0.54296875}, {"start": 2218.95, "end": 2219.15, "word": " it's", "probability": 0.6875}, {"start": 2219.15, "end": 2219.67, "word": " reversible", "probability": 0.93310546875}, {"start": 2219.67, "end": 2222.45, "word": " and", "probability": 0.8203125}, {"start": 2222.45, "end": 2222.75, "word": " high", "probability": 0.923828125}, {"start": 2222.75, "end": 2223.55, "word": " concentration", "probability": 0.8837890625}, {"start": 2223.55, "end": 2223.91, "word": " بيدينا", "probability": 0.80810546875}, {"start": 2223.91, "end": 2224.29, "word": " second", "probability": 0.96875}, {"start": 2224.29, "end": 2224.79, "word": " wave", "probability": 0.955078125}, {"start": 2224.79, "end": 2225.67, "word": " في", "probability": 0.89599609375}, {"start": 2225.67, "end": 2226.05, "word": " حالة", "probability": 0.9892578125}, {"start": 2226.05, "end": 2226.15, "word": " ال", "probability": 0.67578125}, {"start": 2226.15, "end": 2226.31, "word": " high", "probability": 0.8525390625}, {"start": 2226.31, "end": 2227.01, "word": " concentration", "probability": 0.8994140625}], "temperature": 1.0}, {"id": 82, "seek": 224981, "start": 2229.19, "end": 2249.81, "text": "ماشي وشوفوا النتيجة في حالة ال glands methrobalstemia، ماشي هى هنا مستخدمين hand ADD هى ال normal control و ال patient، الأحمق هو patient، في response؟ مصلش طيب و هذا عبارة عن إيه؟ Adrenalin، في response؟", "tokens": [2304, 33599, 1829, 4032, 8592, 38688, 14407, 2423, 29399, 1829, 7435, 3660, 8978, 11331, 6027, 3660, 2423, 49533, 1131, 1703, 996, 304, 1099, 654, 12399, 3714, 33599, 1829, 8032, 7578, 34105, 3714, 14851, 9778, 40448, 9957, 1011, 9135, 35, 8032, 7578, 2423, 2710, 1969, 4032, 2423, 4537, 12399, 16247, 35571, 4587, 31439, 4537, 12399, 8978, 4134, 22807, 3714, 36520, 8592, 23032, 1829, 3555, 4032, 23758, 6225, 3555, 9640, 3660, 18871, 11933, 1829, 3224, 22807, 1999, 23658, 259, 12399, 8978, 4134, 22807], "avg_logprob": -0.4237805001619386, "compression_ratio": 1.4433497536945812, "no_speech_prob": 4.76837158203125e-07, "words": [{"start": 2229.19, "end": 2229.67, "word": "ماشي", "probability": 0.65478515625}, {"start": 2229.67, "end": 2230.91, "word": " وشوفوا", "probability": 0.8568115234375}, {"start": 2230.91, "end": 2232.85, "word": " النتيجة", "probability": 0.82529296875}, {"start": 2232.85, "end": 2233.03, "word": " في", "probability": 0.79638671875}, {"start": 2233.03, "end": 2233.51, "word": " حالة", "probability": 0.9947916666666666}, {"start": 2233.51, "end": 2233.65, "word": " ال", "probability": 0.72998046875}, {"start": 2233.65, "end": 2234.01, "word": " glands", "probability": 0.451904296875}, {"start": 2234.01, "end": 2235.93, "word": " methrobalstemia،", "probability": 0.4089442661830357}, {"start": 2235.93, "end": 2236.35, "word": " ماشي", "probability": 0.7647298177083334}, {"start": 2236.35, "end": 2236.61, "word": " هى", "probability": 0.457763671875}, {"start": 2236.61, "end": 2236.97, "word": " هنا", "probability": 0.2469482421875}, {"start": 2236.97, "end": 2237.93, "word": " مستخدمين", "probability": 0.9529296875}, {"start": 2237.93, "end": 2238.13, "word": " hand", "probability": 0.1793212890625}, {"start": 2238.13, "end": 2238.83, "word": " ADD", "probability": 0.540283203125}, {"start": 2238.83, "end": 2239.77, "word": " هى", "probability": 0.6383056640625}, {"start": 2239.77, "end": 2239.93, "word": " ال", "probability": 0.9306640625}, {"start": 2239.93, "end": 2240.21, "word": " normal", "probability": 0.81591796875}, {"start": 2240.21, "end": 2240.85, "word": " control", "probability": 0.8955078125}, {"start": 2240.85, "end": 2241.43, "word": " و", "probability": 0.69140625}, {"start": 2241.43, "end": 2241.51, "word": " ال", "probability": 0.93701171875}, {"start": 2241.51, "end": 2242.01, "word": " patient،", "probability": 0.776123046875}, {"start": 2242.01, "end": 2242.43, "word": " الأحمق", "probability": 0.7626953125}, {"start": 2242.43, "end": 2242.61, "word": " هو", "probability": 0.974609375}, {"start": 2242.61, "end": 2243.41, "word": " patient،", "probability": 0.656005859375}, {"start": 2243.41, "end": 2243.51, "word": " في", "probability": 0.85546875}, {"start": 2243.51, "end": 2244.17, "word": " response؟", "probability": 0.854248046875}, {"start": 2244.17, "end": 2245.05, "word": " مصلش", "probability": 0.4303792317708333}, {"start": 2245.05, "end": 2245.53, "word": " طيب", "probability": 0.89794921875}, {"start": 2245.53, "end": 2245.69, "word": " و", "probability": 0.48486328125}, {"start": 2245.69, "end": 2245.97, "word": " هذا", "probability": 0.421875}, {"start": 2245.97, "end": 2246.39, "word": " عبارة", "probability": 0.9837646484375}, {"start": 2246.39, "end": 2246.59, "word": " عن", "probability": 0.97607421875}, {"start": 2246.59, "end": 2247.23, "word": " إيه؟", "probability": 0.58203125}, {"start": 2247.23, "end": 2249.13, "word": " Adrenalin،", "probability": 0.64031982421875}, {"start": 2249.13, "end": 2249.21, "word": " في", "probability": 0.9384765625}, {"start": 2249.21, "end": 2249.81, "word": " response؟", "probability": 0.975830078125}], "temperature": 1.0}, {"id": 83, "seek": 227923, "start": 2250.84, "end": 2279.24, "text": "و هذا كله جين فيه response لكن لما استخدمنا ال ressositin صار فيه response صار فيه ايش؟ response في ألوان من ال agonist بتدينا try phasing pattern يعني حتى during resting platelet ناخد wave ثم primary wave و secondary wave", "tokens": [2407, 23758, 28242, 3224, 10874, 9957, 8978, 3224, 4134, 44381, 5296, 15042, 44713, 9778, 40448, 8315, 2423, 24689, 329, 270, 259, 20328, 9640, 8978, 3224, 4134, 20328, 9640, 8978, 3224, 1975, 1829, 8592, 22807, 4134, 8978, 5551, 1211, 2407, 7649, 9154, 2423, 623, 266, 468, 39894, 16254, 8315, 853, 903, 3349, 5102, 37495, 22653, 11331, 49975, 1830, 21221, 3403, 15966, 8717, 47283, 3215, 5772, 38637, 2304, 6194, 5772, 4032, 11396, 5772], "avg_logprob": -0.3563368043137921, "compression_ratio": 1.5126903553299493, "no_speech_prob": 0.0, "words": [{"start": 2250.84, "end": 2251.16, "word": "و", "probability": 0.89306640625}, {"start": 2251.16, "end": 2251.44, "word": " هذا", "probability": 0.400390625}, {"start": 2251.44, "end": 2251.88, "word": " كله", "probability": 0.779052734375}, {"start": 2251.88, "end": 2252.06, "word": " جين", "probability": 0.3800048828125}, {"start": 2252.06, "end": 2252.28, "word": " فيه", "probability": 0.5909423828125}, {"start": 2252.28, "end": 2252.7, "word": " response", "probability": 0.904296875}, {"start": 2252.7, "end": 2253.7, "word": " لكن", "probability": 0.54736328125}, {"start": 2253.7, "end": 2253.96, "word": " لما", "probability": 0.931640625}, {"start": 2253.96, "end": 2254.74, "word": " استخدمنا", "probability": 0.951416015625}, {"start": 2254.74, "end": 2254.88, "word": " ال", "probability": 0.7783203125}, {"start": 2254.88, "end": 2255.64, "word": " ressositin", "probability": 0.3799896240234375}, {"start": 2255.64, "end": 2256.46, "word": " صار", "probability": 0.905029296875}, {"start": 2256.46, "end": 2256.66, "word": " فيه", "probability": 0.853271484375}, {"start": 2256.66, "end": 2257.04, "word": " response", "probability": 0.79248046875}, {"start": 2257.04, "end": 2258.0, "word": " صار", "probability": 0.60809326171875}, {"start": 2258.0, "end": 2258.46, "word": " فيه", "probability": 0.91357421875}, {"start": 2258.46, "end": 2262.3, "word": " ايش؟", "probability": 0.683349609375}, {"start": 2262.3, "end": 2262.3, "word": " response", "probability": 0.494873046875}, {"start": 2262.3, "end": 2263.72, "word": " في", "probability": 0.68701171875}, {"start": 2263.72, "end": 2264.1, "word": " ألوان", "probability": 0.6082763671875}, {"start": 2264.1, "end": 2264.3, "word": " من", "probability": 0.98828125}, {"start": 2264.3, "end": 2264.6, "word": " ال", "probability": 0.97314453125}, {"start": 2264.6, "end": 2265.2, "word": " agonist", "probability": 0.81201171875}, {"start": 2265.2, "end": 2265.82, "word": " بتدينا", "probability": 0.69873046875}, {"start": 2265.82, "end": 2266.16, "word": " try", "probability": 0.271240234375}, {"start": 2266.16, "end": 2266.76, "word": " phasing", "probability": 0.618896484375}, {"start": 2266.76, "end": 2269.9, "word": " pattern", "probability": 0.8671875}, {"start": 2269.9, "end": 2270.8, "word": " يعني", "probability": 0.915283203125}, {"start": 2270.8, "end": 2271.38, "word": " حتى", "probability": 0.93701171875}, {"start": 2271.38, "end": 2272.84, "word": " during", "probability": 0.935546875}, {"start": 2272.84, "end": 2273.5, "word": " resting", "probability": 0.90185546875}, {"start": 2273.5, "end": 2274.44, "word": " platelet", "probability": 0.769287109375}, {"start": 2274.44, "end": 2275.38, "word": " ناخد", "probability": 0.9352213541666666}, {"start": 2275.38, "end": 2275.84, "word": " wave", "probability": 0.95654296875}, {"start": 2275.84, "end": 2277.6, "word": " ثم", "probability": 0.984130859375}, {"start": 2277.6, "end": 2278.1, "word": " primary", "probability": 0.939453125}, {"start": 2278.1, "end": 2278.42, "word": " wave", "probability": 0.9580078125}, {"start": 2278.42, "end": 2278.56, "word": " و", "probability": 0.85986328125}, {"start": 2278.56, "end": 2278.92, "word": " secondary", "probability": 0.84814453125}, {"start": 2278.92, "end": 2279.24, "word": " wave", "probability": 0.95703125}], "temperature": 1.0}, {"id": 84, "seek": 229369, "start": 2280.05, "end": 2293.69, "text": "ومنها البنفرين هي اعطانا تلاتة wave، واي واحدة، تنتين، هاي التلاتة، ماشي، حتى في ال resting بليتلي، ثم change in shape، هى أدتنى primary، ثم ياش", "tokens": [20498, 1863, 11296, 29739, 1863, 5172, 2288, 9957, 39896, 1975, 3615, 9566, 7649, 995, 6055, 1211, 9307, 3660, 5772, 12399, 36764, 1829, 36764, 24401, 3660, 12399, 6055, 29399, 9957, 12399, 8032, 47302, 16712, 1211, 9307, 3660, 12399, 3714, 33599, 1829, 12399, 11331, 49975, 8978, 2423, 21221, 4724, 20292, 2655, 20292, 12399, 38637, 2304, 1319, 294, 3909, 12399, 8032, 7578, 5551, 3215, 2655, 1863, 7578, 6194, 12399, 38637, 2304, 7251, 33599], "avg_logprob": -0.3675176140288232, "compression_ratio": 1.4146341463414633, "no_speech_prob": 3.5762786865234375e-07, "words": [{"start": 2280.05, "end": 2280.57, "word": "ومنها", "probability": 0.81005859375}, {"start": 2280.57, "end": 2281.39, "word": " البنفرين", "probability": 0.6317626953125}, {"start": 2281.39, "end": 2281.57, "word": " هي", "probability": 0.685546875}, {"start": 2281.57, "end": 2282.33, "word": " اعطانا", "probability": 0.8259765625}, {"start": 2282.33, "end": 2282.77, "word": " تلاتة", "probability": 0.926513671875}, {"start": 2282.77, "end": 2283.09, "word": " wave،", "probability": 0.40960693359375}, {"start": 2283.09, "end": 2283.21, "word": " واي", "probability": 0.57421875}, {"start": 2283.21, "end": 2284.53, "word": " واحدة،", "probability": 0.92236328125}, {"start": 2284.53, "end": 2285.99, "word": " تنتين،", "probability": 0.653656005859375}, {"start": 2285.99, "end": 2286.31, "word": " هاي", "probability": 0.5325927734375}, {"start": 2286.31, "end": 2287.63, "word": " التلاتة،", "probability": 0.9248046875}, {"start": 2287.63, "end": 2288.63, "word": " ماشي،", "probability": 0.69415283203125}, {"start": 2288.63, "end": 2288.95, "word": " حتى", "probability": 0.934814453125}, {"start": 2288.95, "end": 2289.15, "word": " في", "probability": 0.92626953125}, {"start": 2289.15, "end": 2289.19, "word": " ال", "probability": 0.9892578125}, {"start": 2289.19, "end": 2289.45, "word": " resting", "probability": 0.86572265625}, {"start": 2289.45, "end": 2290.13, "word": " بليتلي،", "probability": 0.53828125}, {"start": 2290.13, "end": 2290.33, "word": " ثم", "probability": 0.989990234375}, {"start": 2290.33, "end": 2290.69, "word": " change", "probability": 0.6162109375}, {"start": 2290.69, "end": 2290.89, "word": " in", "probability": 0.85009765625}, {"start": 2290.89, "end": 2291.23, "word": " shape،", "probability": 0.804443359375}, {"start": 2291.23, "end": 2291.37, "word": " هى", "probability": 0.6448974609375}, {"start": 2291.37, "end": 2291.81, "word": " أدتنى", "probability": 0.7794921875}, {"start": 2291.81, "end": 2292.93, "word": " primary،", "probability": 0.885009765625}, {"start": 2292.93, "end": 2293.19, "word": " ثم", "probability": 0.995849609375}, {"start": 2293.19, "end": 2293.69, "word": " ياش", "probability": 0.557037353515625}], "temperature": 1.0}, {"id": 85, "seek": 232719, "start": 2299.49, "end": 2327.19, "text": "طبعا يا شباب في agonists تانية زي الكلاجن, arachidinic acid, الجالسيون, plate activating factor، كلهم induce a single wave of irreversible aggregation، قوين جدا، يعني بنشته مباشرة، يعني مافيش داعي يدينا first wave و second wave، مباشرة بيدينا هي 100% single wave، ال rest الست هو عبارة عن antibiotic", "tokens": [9566, 3555, 3615, 995, 35186, 13412, 3555, 16758, 8978, 623, 266, 1751, 6055, 7649, 10632, 30767, 1829, 2423, 28820, 26108, 1863, 11, 594, 608, 327, 259, 299, 8258, 11, 25724, 6027, 3794, 1829, 11536, 11, 5924, 42481, 5952, 12399, 28242, 16095, 41263, 257, 2167, 5772, 295, 16014, 840, 964, 16743, 399, 12399, 12174, 2407, 9957, 10874, 28259, 12399, 37495, 22653, 44945, 8592, 47395, 3714, 3555, 33599, 25720, 12399, 37495, 22653, 19446, 41185, 8592, 11778, 45761, 1829, 7251, 16254, 8315, 700, 5772, 4032, 1150, 5772, 12399, 3714, 3555, 33599, 25720, 4724, 1829, 16254, 8315, 39896, 2319, 4, 2167, 5772, 12399, 2423, 1472, 2423, 14851, 31439, 6225, 3555, 9640, 3660, 18871, 37828], "avg_logprob": -0.3139076683972333, "compression_ratio": 1.5390334572490707, "no_speech_prob": 0.0, "words": [{"start": 2299.49, "end": 2299.79, "word": "طبعا", "probability": 0.93603515625}, {"start": 2299.79, "end": 2299.99, "word": " يا", "probability": 0.26904296875}, {"start": 2299.99, "end": 2300.05, "word": " شباب", "probability": 0.7864583333333334}, {"start": 2300.05, "end": 2300.51, "word": " في", "probability": 0.67919921875}, {"start": 2300.51, "end": 2301.19, "word": " agonists", "probability": 0.5966796875}, {"start": 2301.19, "end": 2301.59, "word": " تانية", "probability": 0.97705078125}, {"start": 2301.59, "end": 2301.81, "word": " زي", "probability": 0.80078125}, {"start": 2301.81, "end": 2302.63, "word": " الكلاجن,", "probability": 0.638824462890625}, {"start": 2302.71, "end": 2303.21, "word": " arachidinic", "probability": 0.626220703125}, {"start": 2303.21, "end": 2303.65, "word": " acid,", "probability": 0.96484375}, {"start": 2303.85, "end": 2304.47, "word": " الجالسيون,", "probability": 0.731689453125}, {"start": 2304.57, "end": 2304.77, "word": " plate", "probability": 0.1495361328125}, {"start": 2304.77, "end": 2305.23, "word": " activating", "probability": 0.53271484375}, {"start": 2305.23, "end": 2306.17, "word": " factor،", "probability": 0.4783935546875}, {"start": 2306.17, "end": 2306.71, "word": " كلهم", "probability": 0.95947265625}, {"start": 2306.71, "end": 2307.81, "word": " induce", "probability": 0.88134765625}, {"start": 2307.81, "end": 2308.37, "word": " a", "probability": 0.9384765625}, {"start": 2308.37, "end": 2309.05, "word": " single", "probability": 0.94873046875}, {"start": 2309.05, "end": 2309.77, "word": " wave", "probability": 0.939453125}, {"start": 2309.77, "end": 2310.69, "word": " of", "probability": 0.9716796875}, {"start": 2310.69, "end": 2312.29, "word": " irreversible", "probability": 0.9554036458333334}, {"start": 2312.29, "end": 2313.31, "word": " aggregation،", "probability": 0.7493489583333334}, {"start": 2313.31, "end": 2313.71, "word": " قوين", "probability": 0.8151041666666666}, {"start": 2313.71, "end": 2315.35, "word": " جدا،", "probability": 0.9637044270833334}, {"start": 2315.35, "end": 2315.59, "word": " يعني", "probability": 0.967529296875}, {"start": 2315.59, "end": 2315.99, "word": " بنشته", "probability": 0.9485677083333334}, {"start": 2315.99, "end": 2317.65, "word": " مباشرة،", "probability": 0.9560546875}, {"start": 2317.65, "end": 2317.83, "word": " يعني", "probability": 0.846435546875}, {"start": 2317.83, "end": 2318.11, "word": " مافيش", "probability": 0.8416341145833334}, {"start": 2318.11, "end": 2318.41, "word": " داعي", "probability": 0.8868815104166666}, {"start": 2318.41, "end": 2318.69, "word": " يدينا", "probability": 0.6905110677083334}, {"start": 2318.69, "end": 2319.03, "word": " first", "probability": 0.890625}, {"start": 2319.03, "end": 2319.31, "word": " wave", "probability": 0.9443359375}, {"start": 2319.31, "end": 2319.43, "word": " و", "probability": 0.94921875}, {"start": 2319.43, "end": 2319.71, "word": " second", "probability": 0.84521484375}, {"start": 2319.71, "end": 2320.17, "word": " wave،", "probability": 0.91552734375}, {"start": 2320.17, "end": 2320.87, "word": " مباشرة", "probability": 0.991455078125}, {"start": 2320.87, "end": 2321.33, "word": " بيدينا", "probability": 0.7928466796875}, {"start": 2321.33, "end": 2321.55, "word": " هي", "probability": 0.615234375}, {"start": 2321.55, "end": 2321.79, "word": " 100", "probability": 0.330322265625}, {"start": 2321.79, "end": 2323.41, "word": "%", "probability": 0.93017578125}, {"start": 2323.41, "end": 2324.23, "word": " single", "probability": 0.90478515625}, {"start": 2324.23, "end": 2325.23, "word": " wave،", "probability": 0.818115234375}, {"start": 2325.23, "end": 2325.31, "word": " ال", "probability": 0.6962890625}, {"start": 2325.31, "end": 2325.51, "word": " rest", "probability": 0.8623046875}, {"start": 2325.51, "end": 2325.87, "word": " الست", "probability": 0.36065673828125}, {"start": 2325.87, "end": 2326.27, "word": " هو", "probability": 0.89306640625}, {"start": 2326.27, "end": 2326.51, "word": " عبارة", "probability": 0.79986572265625}, {"start": 2326.51, "end": 2326.63, "word": " عن", "probability": 0.99853515625}, {"start": 2326.63, "end": 2327.19, "word": " antibiotic", "probability": 0.47900390625}], "temperature": 1.0}, {"id": 86, "seek": 235498, "start": 2328.19, "end": 2354.99, "text": "و ال aggregation can be شوفوا تخيل قوته reproduced with metabolically inert metabolically inert ماشي formally fixed يعني حتى اللي مسلطينها بلي formally ماشي و أوقفنا نشاطها صارت inert خاملة ممكن اللي .. اللي هو ال 36 ناشطة", "tokens": [2407, 2423, 16743, 399, 393, 312, 13412, 38688, 14407, 6055, 9778, 26895, 12174, 35473, 3224, 11408, 1232, 365, 19110, 984, 25832, 19110, 984, 25832, 3714, 33599, 1829, 25983, 6806, 37495, 22653, 11331, 49975, 13672, 1829, 47524, 1211, 9566, 9957, 11296, 4724, 20292, 25983, 3714, 33599, 1829, 4032, 5551, 30543, 5172, 8315, 8717, 8592, 41193, 11296, 20328, 9640, 2655, 25832, 16490, 10943, 37977, 3714, 43020, 13672, 1829, 4386, 13672, 1829, 31439, 2423, 8652, 8717, 33599, 9566, 3660], "avg_logprob": -0.28043830085110355, "compression_ratio": 1.5097087378640777, "no_speech_prob": 0.0, "words": [{"start": 2328.19, "end": 2328.39, "word": "و", "probability": 0.85546875}, {"start": 2328.39, "end": 2328.47, "word": " ال", "probability": 0.74853515625}, {"start": 2328.47, "end": 2329.07, "word": " aggregation", "probability": 0.85546875}, {"start": 2329.07, "end": 2329.39, "word": " can", "probability": 0.84716796875}, {"start": 2329.39, "end": 2329.71, "word": " be", "probability": 0.9638671875}, {"start": 2329.71, "end": 2330.09, "word": " شوفوا", "probability": 0.7364095052083334}, {"start": 2330.09, "end": 2330.49, "word": " تخيل", "probability": 0.94921875}, {"start": 2330.49, "end": 2331.29, "word": " قوته", "probability": 0.8857421875}, {"start": 2331.29, "end": 2333.03, "word": " reproduced", "probability": 0.86572265625}, {"start": 2333.03, "end": 2333.91, "word": " with", "probability": 0.92724609375}, {"start": 2333.91, "end": 2335.27, "word": " metabolically", "probability": 0.705322265625}, {"start": 2335.27, "end": 2336.13, "word": " inert", "probability": 0.95703125}, {"start": 2336.13, "end": 2337.27, "word": " metabolically", "probability": 0.7236328125}, {"start": 2337.27, "end": 2337.87, "word": " inert", "probability": 0.99267578125}, {"start": 2337.87, "end": 2339.11, "word": " ماشي", "probability": 0.7000325520833334}, {"start": 2339.11, "end": 2339.99, "word": " formally", "probability": 0.8515625}, {"start": 2339.99, "end": 2340.59, "word": " fixed", "probability": 0.72998046875}, {"start": 2340.59, "end": 2341.45, "word": " يعني", "probability": 0.54254150390625}, {"start": 2341.45, "end": 2341.91, "word": " حتى", "probability": 0.93896484375}, {"start": 2341.91, "end": 2342.11, "word": " اللي", "probability": 0.901611328125}, {"start": 2342.11, "end": 2342.89, "word": " مسلطينها", "probability": 0.735400390625}, {"start": 2342.89, "end": 2343.11, "word": " بلي", "probability": 0.44085693359375}, {"start": 2343.11, "end": 2343.61, "word": " formally", "probability": 0.81494140625}, {"start": 2343.61, "end": 2345.89, "word": " ماشي", "probability": 0.8445638020833334}, {"start": 2345.89, "end": 2346.33, "word": " و", "probability": 0.82177734375}, {"start": 2346.33, "end": 2347.39, "word": " أوقفنا", "probability": 0.8890380859375}, {"start": 2347.39, "end": 2348.39, "word": " نشاطها", "probability": 0.9912109375}, {"start": 2348.39, "end": 2349.15, "word": " صارت", "probability": 0.9191080729166666}, {"start": 2349.15, "end": 2349.43, "word": " inert", "probability": 0.96630859375}, {"start": 2349.43, "end": 2350.29, "word": " خاملة", "probability": 0.8873697916666666}, {"start": 2350.29, "end": 2351.91, "word": " ممكن", "probability": 0.961181640625}, {"start": 2351.91, "end": 2352.39, "word": " اللي", "probability": 0.960205078125}, {"start": 2352.39, "end": 2352.41, "word": " ..", "probability": 0.2685546875}, {"start": 2352.41, "end": 2352.57, "word": " اللي", "probability": 0.967529296875}, {"start": 2352.57, "end": 2352.77, "word": " هو", "probability": 0.55517578125}, {"start": 2352.77, "end": 2352.89, "word": " ال", "probability": 0.299560546875}, {"start": 2352.89, "end": 2353.13, "word": " 36", "probability": 0.33447265625}, {"start": 2353.13, "end": 2354.99, "word": " ناشطة", "probability": 0.62738037109375}], "temperature": 1.0}, {"id": 87, "seek": 238263, "start": 2357.69, "end": 2382.63, "text": "فبيستعملوه كعناد للي عنده نقص فيه ولا ..؟ اه ده ممنوع استخدام ال vivo .. ال video .. اه من ذاته بس .. اه ده .. لو اتصنع اشي مشابه له فاستخدام ممكن .. من خارج .. من خارج .. احنا ممكن .. عشان مايعملش ل .. احنا ممكن نفوق جلد .. بس في الجسم مش ممكن نعمل جلد إلا في حالات مزيفة حد يعني", "tokens": [5172, 21292, 14851, 25957, 1211, 2407, 3224, 9122, 3615, 8315, 3215, 5296, 20292, 43242, 3224, 8717, 4587, 9381, 8978, 3224, 49429, 4386, 22807, 1975, 3224, 11778, 3224, 3714, 27842, 45367, 44713, 9778, 3215, 10943, 2423, 30689, 4386, 2423, 960, 4386, 1975, 3224, 9154, 29910, 9307, 3224, 4724, 3794, 4386, 1975, 3224, 11778, 3224, 4386, 45164, 1975, 2655, 9381, 1863, 3615, 1975, 8592, 1829, 37893, 16758, 3224, 46740, 6156, 995, 14851, 9778, 3215, 10943, 3714, 43020, 4386, 9154, 16490, 9640, 7435, 4386, 9154, 16490, 9640, 7435, 4386, 1975, 5016, 8315, 3714, 43020, 4386, 6225, 8592, 7649, 19446, 40228, 42213, 8592, 5296, 4386, 1975, 5016, 8315, 3714, 43020, 8717, 5172, 30543, 10874, 1211, 3215, 4386, 4724, 3794, 8978, 25724, 38251, 37893, 3714, 43020, 8717, 25957, 1211, 10874, 1211, 3215, 11933, 15040, 8978, 11331, 6027, 9307, 3714, 11622, 33911, 3660, 11331, 3215, 37495, 22653], "avg_logprob": -0.2887323796749115, "compression_ratio": 1.9787234042553192, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 2357.69, "end": 2358.33, "word": "فبيستعملوه", "probability": 0.7675083705357143}, {"start": 2358.33, "end": 2358.97, "word": " كعناد", "probability": 0.791259765625}, {"start": 2358.97, "end": 2359.33, "word": " للي", "probability": 0.893798828125}, {"start": 2359.33, "end": 2359.73, "word": " عنده", "probability": 0.96337890625}, {"start": 2359.73, "end": 2359.99, "word": " نقص", "probability": 0.8312174479166666}, {"start": 2359.99, "end": 2360.31, "word": " فيه", "probability": 0.978271484375}, {"start": 2360.31, "end": 2360.43, "word": " ولا", "probability": 0.67041015625}, {"start": 2360.43, "end": 2361.89, "word": " ..؟", "probability": 0.5694580078125}, {"start": 2361.89, "end": 2362.39, "word": " اه", "probability": 0.46820068359375}, {"start": 2362.39, "end": 2362.49, "word": " ده", "probability": 0.561767578125}, {"start": 2362.49, "end": 2362.83, "word": " ممنوع", "probability": 0.8671875}, {"start": 2362.83, "end": 2363.25, "word": " استخدام", "probability": 0.9410400390625}, {"start": 2363.25, "end": 2363.41, "word": " ال", "probability": 0.4677734375}, {"start": 2363.41, "end": 2363.77, "word": " vivo", "probability": 0.85400390625}, {"start": 2363.77, "end": 2364.63, "word": " ..", "probability": 0.2425537109375}, {"start": 2364.63, "end": 2364.81, "word": " ال", "probability": 0.60546875}, {"start": 2364.81, "end": 2365.19, "word": " video", "probability": 0.6494140625}, {"start": 2365.19, "end": 2365.21, "word": " ..", "probability": 0.345458984375}, {"start": 2365.21, "end": 2365.33, "word": " اه", "probability": 0.539306640625}, {"start": 2365.33, "end": 2365.47, "word": " من", "probability": 0.1971435546875}, {"start": 2365.47, "end": 2365.89, "word": " ذاته", "probability": 0.5767415364583334}, {"start": 2365.89, "end": 2366.21, "word": " بس", "probability": 0.7587890625}, {"start": 2366.21, "end": 2366.39, "word": " ..", "probability": 0.49658203125}, {"start": 2366.39, "end": 2366.59, "word": " اه", "probability": 0.815673828125}, {"start": 2366.59, "end": 2366.67, "word": " ده", "probability": 0.627685546875}, {"start": 2366.67, "end": 2366.73, "word": " ..", "probability": 0.458740234375}, {"start": 2366.73, "end": 2366.95, "word": " لو", "probability": 0.9306640625}, {"start": 2366.95, "end": 2367.45, "word": " اتصنع", "probability": 0.89013671875}, {"start": 2367.45, "end": 2367.71, "word": " اشي", "probability": 0.8927408854166666}, {"start": 2367.71, "end": 2368.19, "word": " مشابه", "probability": 0.8974609375}, {"start": 2368.19, "end": 2368.47, "word": " له", "probability": 0.48388671875}, {"start": 2368.47, "end": 2369.49, "word": " فاستخدام", "probability": 0.8787434895833334}, {"start": 2369.49, "end": 2369.83, "word": " ممكن", "probability": 0.647216796875}, {"start": 2369.83, "end": 2369.91, "word": " ..", "probability": 0.59130859375}, {"start": 2369.91, "end": 2370.01, "word": " من", "probability": 0.380859375}, {"start": 2370.01, "end": 2371.15, "word": " خارج", "probability": 0.9620768229166666}, {"start": 2371.15, "end": 2371.61, "word": " ..", "probability": 0.94482421875}, {"start": 2371.61, "end": 2371.61, "word": " من", "probability": 0.94775390625}, {"start": 2371.61, "end": 2371.61, "word": " خارج", "probability": 0.99267578125}, {"start": 2371.61, "end": 2373.13, "word": " ..", "probability": 0.68359375}, {"start": 2373.13, "end": 2373.41, "word": " احنا", "probability": 0.9078776041666666}, {"start": 2373.41, "end": 2373.51, "word": " ممكن", "probability": 0.988525390625}, {"start": 2373.51, "end": 2373.51, "word": " ..", "probability": 0.91064453125}, {"start": 2373.51, "end": 2373.79, "word": " عشان", "probability": 0.9833984375}, {"start": 2373.79, "end": 2374.31, "word": " مايعملش", "probability": 0.8272705078125}, {"start": 2374.31, "end": 2374.43, "word": " ل", "probability": 0.5478515625}, {"start": 2374.43, "end": 2374.53, "word": " ..", "probability": 0.505859375}, {"start": 2374.53, "end": 2374.73, "word": " احنا", "probability": 0.7459309895833334}, {"start": 2374.73, "end": 2375.05, "word": " ممكن", "probability": 0.98974609375}, {"start": 2375.05, "end": 2375.57, "word": " نفوق", "probability": 0.6925455729166666}, {"start": 2375.57, "end": 2376.01, "word": " جلد", "probability": 0.8556315104166666}, {"start": 2376.01, "end": 2376.15, "word": " ..", "probability": 0.32275390625}, {"start": 2376.15, "end": 2377.35, "word": " بس", "probability": 0.994384765625}, {"start": 2377.35, "end": 2377.47, "word": " في", "probability": 0.7490234375}, {"start": 2377.47, "end": 2377.87, "word": " الجسم", "probability": 0.97900390625}, {"start": 2377.87, "end": 2378.39, "word": " مش", "probability": 0.98681640625}, {"start": 2378.39, "end": 2378.97, "word": " ممكن", "probability": 0.991943359375}, {"start": 2378.97, "end": 2380.41, "word": " نعمل", "probability": 0.9886067708333334}, {"start": 2380.41, "end": 2380.83, "word": " جلد", "probability": 0.99462890625}, {"start": 2380.83, "end": 2381.19, "word": " إلا", "probability": 0.6767578125}, {"start": 2381.19, "end": 2381.35, "word": " في", "probability": 0.98388671875}, {"start": 2381.35, "end": 2381.65, "word": " حالات", "probability": 0.9029947916666666}, {"start": 2381.65, "end": 2382.09, "word": " مزيفة", "probability": 0.7264404296875}, {"start": 2382.09, "end": 2382.37, "word": " حد", "probability": 0.848876953125}, {"start": 2382.37, "end": 2382.63, "word": " يعني", "probability": 0.83056640625}], "temperature": 1.0}, {"id": 88, "seek": 241064, "start": 2383.22, "end": 2410.64, "text": "لو استعملها في الجسم ممكن يغسل في أماكن مش ضالونين طبعا إلا في الحالات نازيفة الحد طيب هد عنده مشاكل يا شباب هد عنده سؤال من المشاكل الأخرى اللي بتصير في هذه اللي هو ال aggregation study يبقى في مشاكل أخرى منها نمر واحد ال anticoagulant used", "tokens": [1211, 2407, 44713, 25957, 1211, 11296, 8978, 25724, 38251, 3714, 43020, 7251, 17082, 3794, 1211, 8978, 5551, 15042, 19452, 37893, 48812, 6027, 11536, 9957, 23032, 3555, 3615, 995, 11933, 15040, 8978, 21542, 6027, 9307, 8717, 31377, 33911, 3660, 21542, 3215, 23032, 1829, 3555, 8032, 3215, 43242, 3224, 37893, 995, 28820, 35186, 13412, 3555, 16758, 8032, 3215, 43242, 3224, 8608, 33604, 6027, 9154, 9673, 8592, 995, 28820, 16247, 34740, 7578, 13672, 1829, 39894, 9381, 13546, 8978, 29538, 13672, 1829, 31439, 2423, 16743, 399, 2979, 7251, 3555, 4587, 7578, 8978, 37893, 995, 28820, 5551, 34740, 7578, 9154, 11296, 8717, 29973, 36764, 24401, 2423, 2511, 2789, 559, 425, 394, 1143], "avg_logprob": -0.23538773051566547, "compression_ratio": 1.7076271186440677, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 2383.22, "end": 2383.48, "word": "لو", "probability": 0.31982421875}, {"start": 2383.48, "end": 2384.0, "word": " استعملها", "probability": 0.791259765625}, {"start": 2384.0, "end": 2384.1, "word": " في", "probability": 0.802734375}, {"start": 2384.1, "end": 2384.38, "word": " الجسم", "probability": 0.925048828125}, {"start": 2384.38, "end": 2384.68, "word": " ممكن", "probability": 0.870361328125}, {"start": 2384.68, "end": 2385.06, "word": " يغسل", "probability": 0.666748046875}, {"start": 2385.06, "end": 2385.2, "word": " في", "probability": 0.90283203125}, {"start": 2385.2, "end": 2385.54, "word": " أماكن", "probability": 0.8274739583333334}, {"start": 2385.54, "end": 2385.82, "word": " مش", "probability": 0.9833984375}, {"start": 2385.82, "end": 2387.02, "word": " ضالونين", "probability": 0.60882568359375}, {"start": 2387.02, "end": 2387.46, "word": " طبعا", "probability": 0.8074951171875}, {"start": 2387.46, "end": 2388.44, "word": " إلا", "probability": 0.6656494140625}, {"start": 2388.44, "end": 2388.56, "word": " في", "probability": 0.97265625}, {"start": 2388.56, "end": 2388.96, "word": " الحالات", "probability": 0.9552408854166666}, {"start": 2388.96, "end": 2389.44, "word": " نازيفة", "probability": 0.8218994140625}, {"start": 2389.44, "end": 2389.84, "word": " الحد", "probability": 0.782470703125}, {"start": 2389.84, "end": 2391.42, "word": " طيب", "probability": 0.9259440104166666}, {"start": 2391.42, "end": 2391.86, "word": " هد", "probability": 0.628662109375}, {"start": 2391.86, "end": 2392.04, "word": " عنده", "probability": 0.7080078125}, {"start": 2392.04, "end": 2392.6, "word": " مشاكل", "probability": 0.9832356770833334}, {"start": 2392.6, "end": 2392.74, "word": " يا", "probability": 0.97021484375}, {"start": 2392.74, "end": 2393.06, "word": " شباب", "probability": 0.9933268229166666}, {"start": 2393.06, "end": 2393.6, "word": " هد", "probability": 0.729736328125}, {"start": 2393.6, "end": 2393.9, "word": " عنده", "probability": 0.973876953125}, {"start": 2393.9, "end": 2394.58, "word": " سؤال", "probability": 0.9773763020833334}, {"start": 2394.58, "end": 2396.94, "word": " من", "probability": 0.580078125}, {"start": 2396.94, "end": 2397.54, "word": " المشاكل", "probability": 0.99365234375}, {"start": 2397.54, "end": 2398.18, "word": " الأخرى", "probability": 0.98681640625}, {"start": 2398.18, "end": 2398.42, "word": " اللي", "probability": 0.77978515625}, {"start": 2398.42, "end": 2398.88, "word": " بتصير", "probability": 0.7090657552083334}, {"start": 2398.88, "end": 2399.08, "word": " في", "probability": 0.96923828125}, {"start": 2399.08, "end": 2399.6, "word": " هذه", "probability": 0.93994140625}, {"start": 2399.6, "end": 2400.78, "word": " اللي", "probability": 0.942138671875}, {"start": 2400.78, "end": 2401.22, "word": " هو", "probability": 0.97412109375}, {"start": 2401.22, "end": 2401.82, "word": " ال", "probability": 0.89453125}, {"start": 2401.82, "end": 2403.34, "word": " aggregation", "probability": 0.796630859375}, {"start": 2403.34, "end": 2403.86, "word": " study", "probability": 0.9677734375}, {"start": 2403.86, "end": 2404.32, "word": " يبقى", "probability": 0.63262939453125}, {"start": 2404.32, "end": 2404.46, "word": " في", "probability": 0.73388671875}, {"start": 2404.46, "end": 2404.82, "word": " مشاكل", "probability": 0.8753255208333334}, {"start": 2404.82, "end": 2405.64, "word": " أخرى", "probability": 0.9847005208333334}, {"start": 2405.64, "end": 2406.4, "word": " منها", "probability": 0.959716796875}, {"start": 2406.4, "end": 2407.64, "word": " نمر", "probability": 0.5631103515625}, {"start": 2407.64, "end": 2408.08, "word": " واحد", "probability": 0.9140625}, {"start": 2408.08, "end": 2409.12, "word": " ال", "probability": 0.94482421875}, {"start": 2409.12, "end": 2410.14, "word": " anticoagulant", "probability": 0.89892578125}, {"start": 2410.14, "end": 2410.64, "word": " used", "probability": 0.89111328125}], "temperature": 1.0}, {"id": 89, "seek": 243589, "start": 2411.89, "end": 2435.89, "text": "نبقى واحد نوع ال anticoagulant وافضل anticoagulant هو 100% نمشي نبقى اتنين platelet count بمعنى كده فيه عدد platelet في ال platelet rich بلازم اللى انت استخدمت انه لما بنضيف ال agonist بعمل aggregation ب certain activity", "tokens": [1863, 3555, 4587, 7578, 36764, 24401, 8717, 45367, 2423, 2511, 2789, 559, 425, 394, 4032, 31845, 11242, 1211, 2511, 2789, 559, 425, 394, 31439, 2319, 4, 8717, 2304, 8592, 1829, 8717, 3555, 4587, 7578, 1975, 2655, 1863, 9957, 3403, 15966, 1207, 4724, 2304, 3615, 1863, 7578, 9122, 3215, 3224, 8978, 3224, 6225, 3215, 3215, 3403, 15966, 8978, 2423, 3403, 15966, 4593, 4724, 1211, 31377, 2304, 13672, 7578, 16472, 2655, 44713, 9778, 40448, 2655, 16472, 3224, 5296, 15042, 44945, 11242, 33911, 2423, 623, 266, 468, 4724, 25957, 1211, 16743, 399, 4724, 1629, 5191], "avg_logprob": -0.31653224524631296, "compression_ratio": 1.5247524752475248, "no_speech_prob": 0.0, "words": [{"start": 2411.89, "end": 2412.19, "word": "نبقى", "probability": 0.6787109375}, {"start": 2412.19, "end": 2412.63, "word": " واحد", "probability": 0.886474609375}, {"start": 2412.63, "end": 2412.93, "word": " نوع", "probability": 0.89208984375}, {"start": 2412.93, "end": 2413.03, "word": " ال", "probability": 0.90087890625}, {"start": 2413.03, "end": 2413.77, "word": " anticoagulant", "probability": 0.79580078125}, {"start": 2413.77, "end": 2414.79, "word": " وافضل", "probability": 0.7322998046875}, {"start": 2414.79, "end": 2415.51, "word": " anticoagulant", "probability": 0.90068359375}, {"start": 2415.51, "end": 2415.67, "word": " هو", "probability": 0.433837890625}, {"start": 2415.67, "end": 2415.97, "word": " 100", "probability": 0.063720703125}, {"start": 2415.97, "end": 2416.45, "word": "%", "probability": 0.8193359375}, {"start": 2416.45, "end": 2417.35, "word": " نمشي", "probability": 0.60296630859375}, {"start": 2417.35, "end": 2418.51, "word": " نبقى", "probability": 0.8035888671875}, {"start": 2418.51, "end": 2419.01, "word": " اتنين", "probability": 0.918212890625}, {"start": 2419.01, "end": 2419.89, "word": " platelet", "probability": 0.57568359375}, {"start": 2419.89, "end": 2420.79, "word": " count", "probability": 0.93896484375}, {"start": 2420.79, "end": 2422.19, "word": " بمعنى", "probability": 0.98642578125}, {"start": 2422.19, "end": 2423.47, "word": " كده", "probability": 0.765625}, {"start": 2423.47, "end": 2423.75, "word": " فيه", "probability": 0.4759521484375}, {"start": 2423.75, "end": 2424.01, "word": " عدد", "probability": 0.9524739583333334}, {"start": 2424.01, "end": 2424.55, "word": " platelet", "probability": 0.717529296875}, {"start": 2424.55, "end": 2424.75, "word": " في", "probability": 0.8408203125}, {"start": 2424.75, "end": 2424.95, "word": " ال", "probability": 0.65234375}, {"start": 2424.95, "end": 2425.43, "word": " platelet", "probability": 0.8818359375}, {"start": 2425.43, "end": 2425.75, "word": " rich", "probability": 0.417724609375}, {"start": 2425.75, "end": 2426.21, "word": " بلازم", "probability": 0.6771240234375}, {"start": 2426.21, "end": 2426.39, "word": " اللى", "probability": 0.714599609375}, {"start": 2426.39, "end": 2426.53, "word": " انت", "probability": 0.737060546875}, {"start": 2426.53, "end": 2428.13, "word": " استخدمت", "probability": 0.8817138671875}, {"start": 2428.13, "end": 2429.11, "word": " انه", "probability": 0.574462890625}, {"start": 2429.11, "end": 2431.47, "word": " لما", "probability": 0.76220703125}, {"start": 2431.47, "end": 2431.97, "word": " بنضيف", "probability": 0.7840983072916666}, {"start": 2431.97, "end": 2432.13, "word": " ال", "probability": 0.849609375}, {"start": 2432.13, "end": 2432.69, "word": " agonist", "probability": 0.93310546875}, {"start": 2432.69, "end": 2433.27, "word": " بعمل", "probability": 0.9021809895833334}, {"start": 2433.27, "end": 2434.03, "word": " aggregation", "probability": 0.916748046875}, {"start": 2434.03, "end": 2434.27, "word": " ب", "probability": 0.256591796875}, {"start": 2434.27, "end": 2434.69, "word": " certain", "probability": 0.66552734375}, {"start": 2434.69, "end": 2435.89, "word": " activity", "probability": 0.7001953125}], "temperature": 1.0}, {"id": 90, "seek": 247346, "start": 2443.58, "end": 2473.46, "text": "مش هتلاقي فيه فرق في ال transmittance reading طب وإذا ال platelet كتيرة كتير نفس الأكتران فبالتالي في هذه الحالة إذا كتيرة كتير بدنا نخففها ونشتغل مرة تانية بقى platelet size distribution حجمها قال كمان ال platelet اللي حجمها كبير مهما ترسم فيها هتاخد حجم", "tokens": [2304, 8592, 8032, 2655, 15040, 38436, 8978, 3224, 6156, 2288, 4587, 8978, 2423, 7715, 593, 719, 3760, 23032, 3555, 4032, 28814, 15730, 2423, 3403, 15966, 9122, 2655, 48923, 9122, 2655, 13546, 8717, 36178, 16247, 4117, 2655, 2288, 7649, 6156, 3555, 6027, 2655, 6027, 1829, 8978, 29538, 21542, 6027, 3660, 11933, 15730, 9122, 2655, 48923, 9122, 2655, 13546, 47525, 8315, 8717, 9778, 5172, 5172, 11296, 4032, 1863, 8592, 2655, 17082, 1211, 3714, 25720, 6055, 7649, 10632, 4724, 4587, 7578, 3403, 15966, 2744, 7316, 11331, 7435, 2304, 11296, 50239, 9122, 2304, 7649, 2423, 3403, 15966, 13672, 1829, 11331, 7435, 2304, 11296, 9122, 3555, 13546, 3714, 3224, 15042, 6055, 2288, 38251, 8978, 11296, 8032, 2655, 47283, 3215, 11331, 7435, 2304], "avg_logprob": -0.20497881242279278, "compression_ratio": 1.735042735042735, "no_speech_prob": 0.0, "words": [{"start": 2443.58, "end": 2443.84, "word": "مش", "probability": 0.65576171875}, {"start": 2443.84, "end": 2444.32, "word": " هتلاقي", "probability": 0.9376220703125}, {"start": 2444.32, "end": 2445.0, "word": " فيه", "probability": 0.720458984375}, {"start": 2445.0, "end": 2445.4, "word": " فرق", "probability": 0.9934895833333334}, {"start": 2445.4, "end": 2445.68, "word": " في", "probability": 0.83984375}, {"start": 2445.68, "end": 2445.9, "word": " ال", "probability": 0.83349609375}, {"start": 2445.9, "end": 2446.82, "word": " transmittance", "probability": 0.77099609375}, {"start": 2446.82, "end": 2447.22, "word": " reading", "probability": 0.79443359375}, {"start": 2447.22, "end": 2448.62, "word": " طب", "probability": 0.858154296875}, {"start": 2448.62, "end": 2448.84, "word": " وإذا", "probability": 0.7627766927083334}, {"start": 2448.84, "end": 2449.0, "word": " ال", "probability": 0.84765625}, {"start": 2449.0, "end": 2449.4, "word": " platelet", "probability": 0.61474609375}, {"start": 2449.4, "end": 2449.82, "word": " كتيرة", "probability": 0.86962890625}, {"start": 2449.82, "end": 2450.44, "word": " كتير", "probability": 0.9720052083333334}, {"start": 2450.44, "end": 2453.14, "word": " نفس", "probability": 0.547088623046875}, {"start": 2453.14, "end": 2453.76, "word": " الأكتران", "probability": 0.5955322265625}, {"start": 2453.76, "end": 2454.72, "word": " فبالتالي", "probability": 0.9435221354166666}, {"start": 2454.72, "end": 2455.58, "word": " في", "probability": 0.91650390625}, {"start": 2455.58, "end": 2455.8, "word": " هذه", "probability": 0.6396484375}, {"start": 2455.8, "end": 2456.32, "word": " الحالة", "probability": 0.9833984375}, {"start": 2456.32, "end": 2456.52, "word": " إذا", "probability": 0.9619140625}, {"start": 2456.52, "end": 2456.82, "word": " كتيرة", "probability": 0.912109375}, {"start": 2456.82, "end": 2457.38, "word": " كتير", "probability": 0.9873046875}, {"start": 2457.38, "end": 2458.46, "word": " بدنا", "probability": 0.88134765625}, {"start": 2458.46, "end": 2459.74, "word": " نخففها", "probability": 0.98623046875}, {"start": 2459.74, "end": 2462.48, "word": " ونشتغل", "probability": 0.8338216145833334}, {"start": 2462.48, "end": 2463.0, "word": " مرة", "probability": 0.77099609375}, {"start": 2463.0, "end": 2463.4, "word": " تانية", "probability": 0.9830729166666666}, {"start": 2463.4, "end": 2463.76, "word": " بقى", "probability": 0.6393229166666666}, {"start": 2463.76, "end": 2464.64, "word": " platelet", "probability": 0.60888671875}, {"start": 2464.64, "end": 2465.02, "word": " size", "probability": 0.77490234375}, {"start": 2465.02, "end": 2465.92, "word": " distribution", "probability": 0.91796875}, {"start": 2465.92, "end": 2466.74, "word": " حجمها", "probability": 0.9769287109375}, {"start": 2466.74, "end": 2468.0, "word": " قال", "probability": 0.10211181640625}, {"start": 2468.0, "end": 2468.52, "word": " كمان", "probability": 0.8603515625}, {"start": 2468.52, "end": 2468.92, "word": " ال", "probability": 0.755859375}, {"start": 2468.92, "end": 2469.42, "word": " platelet", "probability": 0.899658203125}, {"start": 2469.42, "end": 2469.62, "word": " اللي", "probability": 0.921875}, {"start": 2469.62, "end": 2470.06, "word": " حجمها", "probability": 0.99658203125}, {"start": 2470.06, "end": 2471.34, "word": " كبير", "probability": 0.9910481770833334}, {"start": 2471.34, "end": 2472.0, "word": " مهما", "probability": 0.8968098958333334}, {"start": 2472.0, "end": 2472.4, "word": " ترسم", "probability": 0.87548828125}, {"start": 2472.4, "end": 2472.76, "word": " فيها", "probability": 0.934814453125}, {"start": 2472.76, "end": 2473.12, "word": " هتاخد", "probability": 0.8897705078125}, {"start": 2473.12, "end": 2473.46, "word": " حجم", "probability": 0.9931640625}], "temperature": 1.0}, {"id": 91, "seek": 250339, "start": 2475.27, "end": 2503.39, "text": "بتحجم بيهاش؟ Transmitters فبالتالي برضه هتأثر على النتيجة من الحاجات الغريبة شباب لجوا حاجتين كمان بيأثروا على نتائجنا الحاجة الأولانية وقت صحب العينة وجدوا انه في اختلاف في اليوم الواحد ماشي في ال platelet distribution شو يعني؟ يعني ال platelet الصبح غير عن ال platelet", "tokens": [3555, 2655, 5016, 7435, 2304, 4724, 1829, 3224, 33599, 22807, 6531, 3508, 1559, 6156, 3555, 6027, 2655, 6027, 1829, 4724, 43042, 3224, 8032, 2655, 10721, 49115, 15844, 28239, 31371, 7435, 3660, 9154, 21542, 26108, 9307, 6024, 118, 16572, 49401, 13412, 3555, 16758, 5296, 7435, 14407, 11331, 26108, 2655, 9957, 9122, 2304, 7649, 4724, 1829, 10721, 49115, 14407, 15844, 8717, 2655, 16373, 7435, 8315, 21542, 26108, 3660, 16247, 12610, 7649, 10632, 4032, 38149, 20328, 5016, 3555, 18863, 9957, 3660, 49610, 3215, 14407, 16472, 3224, 8978, 1975, 46456, 15040, 5172, 8978, 45595, 20498, 2423, 14407, 24401, 3714, 33599, 1829, 8978, 2423, 3403, 15966, 7316, 13412, 2407, 37495, 22653, 22807, 37495, 22653, 2423, 3403, 15966, 31767, 49628, 32771, 13546, 18871, 2423, 3403, 15966], "avg_logprob": -0.19008264068729622, "compression_ratio": 1.7431906614785992, "no_speech_prob": 4.76837158203125e-07, "words": [{"start": 2475.27, "end": 2475.71, "word": "بتحجم", "probability": 0.64637451171875}, {"start": 2475.71, "end": 2476.53, "word": " بيهاش؟", "probability": 0.610400390625}, {"start": 2476.53, "end": 2478.21, "word": " Transmitters", "probability": 0.7711588541666666}, {"start": 2478.21, "end": 2479.15, "word": " فبالتالي", "probability": 0.8576253255208334}, {"start": 2479.15, "end": 2479.39, "word": " برضه", "probability": 0.9207356770833334}, {"start": 2479.39, "end": 2479.87, "word": " هتأثر", "probability": 0.823974609375}, {"start": 2479.87, "end": 2479.97, "word": " على", "probability": 0.81689453125}, {"start": 2479.97, "end": 2480.45, "word": " النتيجة", "probability": 0.9263916015625}, {"start": 2480.45, "end": 2481.11, "word": " من", "probability": 0.88037109375}, {"start": 2481.11, "end": 2481.45, "word": " الحاجات", "probability": 0.9905598958333334}, {"start": 2481.45, "end": 2481.91, "word": " الغريبة", "probability": 0.9720458984375}, {"start": 2481.91, "end": 2482.31, "word": " شباب", "probability": 0.8780924479166666}, {"start": 2482.31, "end": 2482.87, "word": " لجوا", "probability": 0.74267578125}, {"start": 2482.87, "end": 2484.19, "word": " حاجتين", "probability": 0.967041015625}, {"start": 2484.19, "end": 2484.59, "word": " كمان", "probability": 0.8624674479166666}, {"start": 2484.59, "end": 2485.07, "word": " بيأثروا", "probability": 0.9751953125}, {"start": 2485.07, "end": 2485.19, "word": " على", "probability": 0.81494140625}, {"start": 2485.19, "end": 2485.73, "word": " نتائجنا", "probability": 0.98642578125}, {"start": 2485.73, "end": 2486.05, "word": " الحاجة", "probability": 0.9353841145833334}, {"start": 2486.05, "end": 2486.71, "word": " الأولانية", "probability": 0.8857421875}, {"start": 2486.71, "end": 2488.07, "word": " وقت", "probability": 0.956787109375}, {"start": 2488.07, "end": 2488.43, "word": " صحب", "probability": 0.8224283854166666}, {"start": 2488.43, "end": 2489.35, "word": " العينة", "probability": 0.8531901041666666}, {"start": 2489.35, "end": 2490.23, "word": " وجدوا", "probability": 0.84814453125}, {"start": 2490.23, "end": 2490.41, "word": " انه", "probability": 0.577392578125}, {"start": 2490.41, "end": 2490.51, "word": " في", "probability": 0.92431640625}, {"start": 2490.51, "end": 2491.35, "word": " اختلاف", "probability": 0.9168701171875}, {"start": 2491.35, "end": 2492.17, "word": " في", "probability": 0.98486328125}, {"start": 2492.17, "end": 2492.81, "word": " اليوم", "probability": 0.988037109375}, {"start": 2492.81, "end": 2494.07, "word": " الواحد", "probability": 0.9866536458333334}, {"start": 2494.07, "end": 2495.39, "word": " ماشي", "probability": 0.8553059895833334}, {"start": 2495.39, "end": 2495.89, "word": " في", "probability": 0.82080078125}, {"start": 2495.89, "end": 2496.13, "word": " ال", "probability": 0.88330078125}, {"start": 2496.13, "end": 2496.67, "word": " platelet", "probability": 0.64599609375}, {"start": 2496.67, "end": 2497.57, "word": " distribution", "probability": 0.90673828125}, {"start": 2497.57, "end": 2498.59, "word": " شو", "probability": 0.736083984375}, {"start": 2498.59, "end": 2499.03, "word": " يعني؟", "probability": 0.776611328125}, {"start": 2499.03, "end": 2499.73, "word": " يعني", "probability": 0.93994140625}, {"start": 2499.73, "end": 2499.87, "word": " ال", "probability": 0.84912109375}, {"start": 2499.87, "end": 2500.35, "word": " platelet", "probability": 0.861572265625}, {"start": 2500.35, "end": 2500.97, "word": " الصبح", "probability": 0.989990234375}, {"start": 2500.97, "end": 2501.95, "word": " غير", "probability": 0.97509765625}, {"start": 2501.95, "end": 2502.79, "word": " عن", "probability": 0.94287109375}, {"start": 2502.79, "end": 2502.95, "word": " ال", "probability": 0.85302734375}, {"start": 2502.95, "end": 2503.39, "word": " platelet", "probability": 0.897705078125}], "temperature": 1.0}, {"id": 92, "seek": 253719, "start": 2508.19, "end": 2537.19, "text": "distribution مختلفة و هدى بيسموها diurnal variation اختلافات في عدد ال platelets even within the same day زى ما قلتلكوا انا في ال hematology على سبيل المثال ال white pieces الصبح اقل من بعد الظهر ليش ببساطة لإنه لسه الإنسان ماتردش ل antigenet عشان تفور لإن هى الشغلها", "tokens": [42649, 30783, 3714, 46456, 46538, 3660, 4032, 8032, 3215, 7578, 4724, 1829, 38251, 2407, 11296, 1026, 925, 304, 12990, 1975, 46456, 15040, 5172, 9307, 8978, 6225, 3215, 3215, 2423, 3403, 37220, 754, 1951, 264, 912, 786, 30767, 7578, 19446, 12174, 1211, 2655, 23275, 14407, 1975, 8315, 8978, 2423, 8636, 267, 1793, 15844, 8608, 21292, 1211, 9673, 12984, 6027, 2423, 2418, 3755, 31767, 49628, 1975, 4587, 1211, 9154, 39182, 6024, 116, 3224, 2288, 32239, 8592, 4724, 3555, 3794, 41193, 3660, 5296, 28814, 1863, 3224, 5296, 3794, 3224, 33688, 1863, 3794, 7649, 3714, 9307, 2288, 3215, 8592, 5296, 2511, 3213, 302, 6225, 8592, 7649, 6055, 5172, 13063, 5296, 28814, 1863, 8032, 7578, 25124, 17082, 1211, 11296], "avg_logprob": -0.25991849121840105, "compression_ratio": 1.4981273408239701, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 2508.19, "end": 2509.03, "word": "distribution", "probability": 0.778076171875}, {"start": 2509.03, "end": 2509.85, "word": " مختلفة", "probability": 0.98681640625}, {"start": 2509.85, "end": 2510.17, "word": " و", "probability": 0.391357421875}, {"start": 2510.17, "end": 2510.33, "word": " هدى", "probability": 0.531494140625}, {"start": 2510.33, "end": 2510.79, "word": " بيسموها", "probability": 0.97646484375}, {"start": 2510.79, "end": 2511.91, "word": " diurnal", "probability": 0.8211263020833334}, {"start": 2511.91, "end": 2517.43, "word": " variation", "probability": 0.89453125}, {"start": 2517.43, "end": 2518.77, "word": " اختلافات", "probability": 0.7388671875}, {"start": 2518.77, "end": 2519.59, "word": " في", "probability": 0.90185546875}, {"start": 2519.59, "end": 2521.07, "word": " عدد", "probability": 0.9514973958333334}, {"start": 2521.07, "end": 2521.19, "word": " ال", "probability": 0.61572265625}, {"start": 2521.19, "end": 2521.63, "word": " platelets", "probability": 0.476318359375}, {"start": 2521.63, "end": 2521.95, "word": " even", "probability": 0.76953125}, {"start": 2521.95, "end": 2522.25, "word": " within", "probability": 0.8740234375}, {"start": 2522.25, "end": 2522.59, "word": " the", "probability": 0.88671875}, {"start": 2522.59, "end": 2523.47, "word": " same", "probability": 0.912109375}, {"start": 2523.47, "end": 2523.89, "word": " day", "probability": 0.9404296875}, {"start": 2523.89, "end": 2524.53, "word": " زى", "probability": 0.8896484375}, {"start": 2524.53, "end": 2524.59, "word": " ما", "probability": 0.93017578125}, {"start": 2524.59, "end": 2524.97, "word": " قلتلكوا", "probability": 0.91064453125}, {"start": 2524.97, "end": 2525.13, "word": " انا", "probability": 0.791748046875}, {"start": 2525.13, "end": 2525.25, "word": " في", "probability": 0.54443359375}, {"start": 2525.25, "end": 2525.31, "word": " ال", "probability": 0.6923828125}, {"start": 2525.31, "end": 2525.77, "word": " hematology", "probability": 0.7085774739583334}, {"start": 2525.77, "end": 2525.91, "word": " على", "probability": 0.81640625}, {"start": 2525.91, "end": 2526.15, "word": " سبيل", "probability": 0.9327799479166666}, {"start": 2526.15, "end": 2526.57, "word": " المثال", "probability": 0.8562825520833334}, {"start": 2526.57, "end": 2527.29, "word": " ال", "probability": 0.955078125}, {"start": 2527.29, "end": 2527.55, "word": " white", "probability": 0.7998046875}, {"start": 2527.55, "end": 2527.93, "word": " pieces", "probability": 0.52001953125}, {"start": 2527.93, "end": 2528.53, "word": " الصبح", "probability": 0.9599609375}, {"start": 2528.53, "end": 2529.03, "word": " اقل", "probability": 0.94482421875}, {"start": 2529.03, "end": 2529.23, "word": " من", "probability": 0.9921875}, {"start": 2529.23, "end": 2529.45, "word": " بعد", "probability": 0.931640625}, {"start": 2529.45, "end": 2531.19, "word": " الظهر", "probability": 0.825439453125}, {"start": 2531.19, "end": 2531.83, "word": " ليش", "probability": 0.840087890625}, {"start": 2531.83, "end": 2532.53, "word": " ببساطة", "probability": 0.94140625}, {"start": 2532.53, "end": 2533.33, "word": " لإنه", "probability": 0.73114013671875}, {"start": 2533.33, "end": 2533.51, "word": " لسه", "probability": 0.9358723958333334}, {"start": 2533.51, "end": 2533.83, "word": " الإنسان", "probability": 0.8834228515625}, {"start": 2533.83, "end": 2534.41, "word": " ماتردش", "probability": 0.695654296875}, {"start": 2534.41, "end": 2534.53, "word": " ل", "probability": 0.435791015625}, {"start": 2534.53, "end": 2535.29, "word": " antigenet", "probability": 0.6298828125}, {"start": 2535.29, "end": 2535.67, "word": " عشان", "probability": 0.7530110677083334}, {"start": 2535.67, "end": 2536.03, "word": " تفور", "probability": 0.7386881510416666}, {"start": 2536.03, "end": 2536.47, "word": " لإن", "probability": 0.919921875}, {"start": 2536.47, "end": 2536.65, "word": " هى", "probability": 0.6456298828125}, {"start": 2536.65, "end": 2537.19, "word": " الشغلها", "probability": 0.903564453125}], "temperature": 1.0}, {"id": 93, "seek": 256732, "start": 2538.08, "end": 2567.32, "text": "مش defence؟ اه defence، platelet نفس الحكاية، ماشي؟ وبالتالي تتأثر بوقت صح بالعينة، الاشي التاني له في علاقة ما بين الواجبة، الأكل اللي بتاكله والنتيجة وهذا وضع طبيعي جدا يا شباب لإنه برضه يعني عكس سلبا على ال platelet distribution يعني واحد ما أكل وراه يفحص", "tokens": [2304, 8592, 25913, 22807, 1975, 3224, 25913, 12399, 3403, 15966, 8717, 36178, 21542, 4117, 995, 10632, 12399, 3714, 33599, 1829, 22807, 46599, 6027, 2655, 6027, 1829, 6055, 2655, 10721, 49115, 4724, 30543, 2655, 20328, 5016, 20666, 3615, 9957, 3660, 12399, 2423, 33599, 1829, 16712, 7649, 1829, 46740, 8978, 11203, 995, 28671, 19446, 49374, 2423, 2407, 26108, 49401, 12399, 16247, 28820, 13672, 1829, 39894, 995, 28820, 3224, 16070, 29399, 1829, 7435, 3660, 37037, 15730, 4032, 11242, 3615, 23032, 21292, 3615, 1829, 10874, 28259, 35186, 13412, 3555, 16758, 5296, 28814, 1863, 3224, 4724, 43042, 3224, 37495, 22653, 6225, 4117, 3794, 8608, 46152, 995, 15844, 2423, 3403, 15966, 7316, 37495, 22653, 36764, 24401, 19446, 5551, 28820, 4032, 23557, 3224, 7251, 5172, 5016, 9381], "avg_logprob": -0.22908058540880188, "compression_ratio": 1.7023809523809523, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 2538.08, "end": 2538.32, "word": "مش", "probability": 0.5404052734375}, {"start": 2538.32, "end": 2539.34, "word": " defence؟", "probability": 0.384521484375}, {"start": 2539.34, "end": 2539.54, "word": " اه", "probability": 0.6795654296875}, {"start": 2539.54, "end": 2540.46, "word": " defence،", "probability": 0.5875244140625}, {"start": 2540.46, "end": 2540.9, "word": " platelet", "probability": 0.5684814453125}, {"start": 2540.9, "end": 2541.18, "word": " نفس", "probability": 0.976318359375}, {"start": 2541.18, "end": 2542.5, "word": " الحكاية،", "probability": 0.848779296875}, {"start": 2542.5, "end": 2543.6, "word": " ماشي؟", "probability": 0.8265380859375}, {"start": 2543.6, "end": 2544.22, "word": " وبالتالي", "probability": 0.90126953125}, {"start": 2544.22, "end": 2545.16, "word": " تتأثر", "probability": 0.970458984375}, {"start": 2545.16, "end": 2546.04, "word": " بوقت", "probability": 0.9837239583333334}, {"start": 2546.04, "end": 2546.22, "word": " صح", "probability": 0.917724609375}, {"start": 2546.22, "end": 2547.14, "word": " بالعينة،", "probability": 0.80205078125}, {"start": 2547.14, "end": 2547.48, "word": " الاشي", "probability": 0.7953287760416666}, {"start": 2547.48, "end": 2547.9, "word": " التاني", "probability": 0.9163411458333334}, {"start": 2547.9, "end": 2548.12, "word": " له", "probability": 0.86083984375}, {"start": 2548.12, "end": 2548.34, "word": " في", "probability": 0.92626953125}, {"start": 2548.34, "end": 2548.96, "word": " علاقة", "probability": 0.8634440104166666}, {"start": 2548.96, "end": 2549.7, "word": " ما", "probability": 0.94775390625}, {"start": 2549.7, "end": 2550.08, "word": " بين", "probability": 0.9931640625}, {"start": 2550.08, "end": 2552.88, "word": " الواجبة،", "probability": 0.758837890625}, {"start": 2552.88, "end": 2553.3, "word": " الأكل", "probability": 0.859375}, {"start": 2553.3, "end": 2553.6, "word": " اللي", "probability": 0.813232421875}, {"start": 2553.6, "end": 2554.54, "word": " بتاكله", "probability": 0.93603515625}, {"start": 2554.54, "end": 2557.6, "word": " والنتيجة", "probability": 0.84462890625}, {"start": 2557.6, "end": 2558.54, "word": " وهذا", "probability": 0.696044921875}, {"start": 2558.54, "end": 2558.8, "word": " وضع", "probability": 0.48828125}, {"start": 2558.8, "end": 2559.14, "word": " طبيعي", "probability": 0.974365234375}, {"start": 2559.14, "end": 2559.44, "word": " جدا", "probability": 0.986083984375}, {"start": 2559.44, "end": 2559.66, "word": " يا", "probability": 0.79736328125}, {"start": 2559.66, "end": 2559.9, "word": " شباب", "probability": 0.9910481770833334}, {"start": 2559.9, "end": 2560.3, "word": " لإنه", "probability": 0.7772216796875}, {"start": 2560.3, "end": 2560.62, "word": " برضه", "probability": 0.8943684895833334}, {"start": 2560.62, "end": 2560.98, "word": " يعني", "probability": 0.74755859375}, {"start": 2560.98, "end": 2561.52, "word": " عكس", "probability": 0.9386393229166666}, {"start": 2561.52, "end": 2562.28, "word": " سلبا", "probability": 0.9749348958333334}, {"start": 2562.28, "end": 2563.14, "word": " على", "probability": 0.89501953125}, {"start": 2563.14, "end": 2563.46, "word": " ال", "probability": 0.818359375}, {"start": 2563.46, "end": 2564.62, "word": " platelet", "probability": 0.6307373046875}, {"start": 2564.62, "end": 2565.28, "word": " distribution", "probability": 0.93701171875}, {"start": 2565.28, "end": 2565.96, "word": " يعني", "probability": 0.781494140625}, {"start": 2565.96, "end": 2566.24, "word": " واحد", "probability": 0.987548828125}, {"start": 2566.24, "end": 2566.42, "word": " ما", "probability": 0.85791015625}, {"start": 2566.42, "end": 2566.6, "word": " أكل", "probability": 0.751220703125}, {"start": 2566.6, "end": 2566.86, "word": " وراه", "probability": 0.7556966145833334}, {"start": 2566.86, "end": 2567.32, "word": " يفحص", "probability": 0.9720458984375}], "temperature": 1.0}, {"id": 94, "seek": 259367, "start": 2568.0, "end": 2593.68, "text": "هتلاقي كل الدم وين وصل من ال .. عند المعدة، في normal distribution، مش normal نفس الأشي لواحد بالعكس، بالجهة المقابلة، واحد جاينا من شهود رياضة، كرة قدم، كان يجري له ساعتين تلاتة، كيف؟ فبالتالي الجهتين لازم يكونوا عايش يأخذوا بعين الاعتبار، حد عندك سؤال؟", "tokens": [3224, 2655, 15040, 38436, 28242, 32748, 2304, 4032, 9957, 4032, 36520, 9154, 2423, 4386, 43242, 9673, 22488, 3660, 12399, 8978, 2710, 7316, 12399, 37893, 2710, 8717, 36178, 16247, 8592, 1829, 5296, 14407, 24401, 20666, 3615, 4117, 3794, 12399, 20666, 7435, 3224, 3660, 9673, 4587, 16758, 37977, 12399, 36764, 24401, 10874, 47302, 8315, 9154, 13412, 3224, 23328, 12602, 1829, 46958, 3660, 12399, 9122, 25720, 12174, 40448, 12399, 25961, 7251, 7435, 16572, 46740, 8608, 995, 34268, 9957, 6055, 1211, 9307, 3660, 12399, 9122, 33911, 22807, 6156, 3555, 6027, 2655, 6027, 1829, 25724, 3224, 2655, 9957, 5296, 31377, 2304, 7251, 30544, 14407, 6225, 47302, 8592, 7251, 10721, 9778, 8848, 14407, 45030, 9957, 42963, 34268, 3555, 9640, 12399, 11331, 3215, 43242, 4117, 8608, 33604, 6027, 22807], "avg_logprob": -0.18699186870722267, "compression_ratio": 1.7312252964426877, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2568.0, "end": 2568.46, "word": "هتلاقي", "probability": 0.941162109375}, {"start": 2568.46, "end": 2568.72, "word": " كل", "probability": 0.9482421875}, {"start": 2568.72, "end": 2569.08, "word": " الدم", "probability": 0.9501953125}, {"start": 2569.08, "end": 2569.28, "word": " وين", "probability": 0.919921875}, {"start": 2569.28, "end": 2569.64, "word": " وصل", "probability": 0.9873046875}, {"start": 2569.64, "end": 2570.4, "word": " من", "probability": 0.292236328125}, {"start": 2570.4, "end": 2570.6, "word": " ال", "probability": 0.38818359375}, {"start": 2570.6, "end": 2570.7, "word": " ..", "probability": 0.5703125}, {"start": 2570.7, "end": 2570.86, "word": " عند", "probability": 0.71826171875}, {"start": 2570.86, "end": 2571.78, "word": " المعدة،", "probability": 0.63409423828125}, {"start": 2571.78, "end": 2571.9, "word": " في", "probability": 0.79248046875}, {"start": 2571.9, "end": 2572.24, "word": " normal", "probability": 0.62646484375}, {"start": 2572.24, "end": 2573.32, "word": " distribution،", "probability": 0.632568359375}, {"start": 2573.32, "end": 2573.62, "word": " مش", "probability": 0.77490234375}, {"start": 2573.62, "end": 2574.02, "word": " normal", "probability": 0.88818359375}, {"start": 2574.02, "end": 2574.76, "word": " نفس", "probability": 0.835205078125}, {"start": 2574.76, "end": 2575.06, "word": " الأشي", "probability": 0.80126953125}, {"start": 2575.06, "end": 2575.62, "word": " لواحد", "probability": 0.8125813802083334}, {"start": 2575.62, "end": 2576.5, "word": " بالعكس،", "probability": 0.97509765625}, {"start": 2576.5, "end": 2576.84, "word": " بالجهة", "probability": 0.9525146484375}, {"start": 2576.84, "end": 2577.82, "word": " المقابلة،", "probability": 0.97431640625}, {"start": 2577.82, "end": 2578.1, "word": " واحد", "probability": 0.896240234375}, {"start": 2578.1, "end": 2578.82, "word": " جاينا", "probability": 0.8040364583333334}, {"start": 2578.82, "end": 2578.98, "word": " من", "probability": 0.98388671875}, {"start": 2578.98, "end": 2579.32, "word": " شهود", "probability": 0.9108072916666666}, {"start": 2579.32, "end": 2581.26, "word": " رياضة،", "probability": 0.95869140625}, {"start": 2581.26, "end": 2581.44, "word": " كرة", "probability": 0.8525390625}, {"start": 2581.44, "end": 2582.6, "word": " قدم،", "probability": 0.9220377604166666}, {"start": 2582.6, "end": 2582.82, "word": " كان", "probability": 0.96923828125}, {"start": 2582.82, "end": 2583.2, "word": " يجري", "probability": 0.9853515625}, {"start": 2583.2, "end": 2583.44, "word": " له", "probability": 0.376708984375}, {"start": 2583.44, "end": 2583.88, "word": " ساعتين", "probability": 0.959228515625}, {"start": 2583.88, "end": 2584.9, "word": " تلاتة،", "probability": 0.72109375}, {"start": 2584.9, "end": 2586.3, "word": " كيف؟", "probability": 0.7899576822916666}, {"start": 2586.3, "end": 2586.86, "word": " فبالتالي", "probability": 0.9740397135416666}, {"start": 2586.86, "end": 2587.96, "word": " الجهتين", "probability": 0.9576416015625}, {"start": 2587.96, "end": 2588.68, "word": " لازم", "probability": 0.9835611979166666}, {"start": 2588.68, "end": 2589.08, "word": " يكونوا", "probability": 0.9661458333333334}, {"start": 2589.08, "end": 2589.48, "word": " عايش", "probability": 0.640625}, {"start": 2589.48, "end": 2591.24, "word": " يأخذوا", "probability": 0.80185546875}, {"start": 2591.24, "end": 2591.6, "word": " بعين", "probability": 0.941650390625}, {"start": 2591.6, "end": 2592.76, "word": " الاعتبار،", "probability": 0.8939453125}, {"start": 2592.76, "end": 2593.02, "word": " حد", "probability": 0.6903076171875}, {"start": 2593.02, "end": 2593.24, "word": " عندك", "probability": 0.552978515625}, {"start": 2593.24, "end": 2593.68, "word": " سؤال؟", "probability": 0.9871826171875}], "temperature": 1.0}, {"id": 95, "seek": 262711, "start": 2597.87, "end": 2627.11, "text": "أه لازم يكون واجبة خفيفة قبل ساعتين من صحب العينة و يكون مرتاح من أي مجهود عضني هتدني سؤال؟ طيب من الأمراض التانية يا شباب storage pool defect storage pool defect هو خلل في ال granules خلل في ايش؟ في ال granules و كنا بنعرف أن فيانا two types و تانية درس و ال .. و ال .. و ال ..", "tokens": [10721, 3224, 5296, 31377, 2304, 7251, 30544, 4032, 26108, 49401, 16490, 5172, 33911, 3660, 12174, 36150, 8608, 995, 34268, 9957, 9154, 20328, 5016, 3555, 18863, 9957, 3660, 4032, 7251, 30544, 3714, 43500, 39319, 9154, 36632, 3714, 7435, 3224, 23328, 6225, 11242, 22653, 8032, 2655, 3215, 22653, 8608, 33604, 6027, 22807, 23032, 1829, 3555, 9154, 16247, 29973, 46958, 16712, 7649, 10632, 35186, 13412, 3555, 16758, 6725, 7005, 16445, 6725, 7005, 16445, 31439, 16490, 1211, 1211, 8978, 2423, 9370, 3473, 16490, 1211, 1211, 8978, 1975, 1829, 8592, 22807, 8978, 2423, 9370, 3473, 4032, 9122, 8315, 44945, 3615, 28480, 14739, 8978, 7649, 995, 732, 3467, 4032, 6055, 7649, 10632, 11778, 2288, 3794, 4032, 2423, 4386, 4032, 2423, 4386, 4032, 2423, 4386], "avg_logprob": -0.2557773074182142, "compression_ratio": 1.815126050420168, "no_speech_prob": 0.0, "words": [{"start": 2597.87, "end": 2598.19, "word": "أه", "probability": 0.67919921875}, {"start": 2598.19, "end": 2598.49, "word": " لازم", "probability": 0.9422200520833334}, {"start": 2598.49, "end": 2598.77, "word": " يكون", "probability": 0.989013671875}, {"start": 2598.77, "end": 2599.11, "word": " واجبة", "probability": 0.85693359375}, {"start": 2599.11, "end": 2599.73, "word": " خفيفة", "probability": 0.9808349609375}, {"start": 2599.73, "end": 2600.27, "word": " قبل", "probability": 0.982177734375}, {"start": 2600.27, "end": 2600.91, "word": " ساعتين", "probability": 0.9453125}, {"start": 2600.91, "end": 2601.89, "word": " من", "probability": 0.95068359375}, {"start": 2601.89, "end": 2602.25, "word": " صحب", "probability": 0.7862955729166666}, {"start": 2602.25, "end": 2602.97, "word": " العينة", "probability": 0.8834635416666666}, {"start": 2602.97, "end": 2603.31, "word": " و", "probability": 0.841796875}, {"start": 2603.31, "end": 2603.69, "word": " يكون", "probability": 0.71142578125}, {"start": 2603.69, "end": 2604.29, "word": " مرتاح", "probability": 0.98828125}, {"start": 2604.29, "end": 2605.77, "word": " من", "probability": 0.97998046875}, {"start": 2605.77, "end": 2606.01, "word": " أي", "probability": 0.79150390625}, {"start": 2606.01, "end": 2606.43, "word": " مجهود", "probability": 0.9317626953125}, {"start": 2606.43, "end": 2606.83, "word": " عضني", "probability": 0.74609375}, {"start": 2606.83, "end": 2608.51, "word": " هتدني", "probability": 0.679443359375}, {"start": 2608.51, "end": 2611.95, "word": " سؤال؟", "probability": 0.872802734375}, {"start": 2611.95, "end": 2612.47, "word": " طيب", "probability": 0.8533528645833334}, {"start": 2612.47, "end": 2613.43, "word": " من", "probability": 0.7451171875}, {"start": 2613.43, "end": 2613.91, "word": " الأمراض", "probability": 0.7884928385416666}, {"start": 2613.91, "end": 2614.25, "word": " التانية", "probability": 0.9825846354166666}, {"start": 2614.25, "end": 2614.43, "word": " يا", "probability": 0.6435546875}, {"start": 2614.43, "end": 2614.57, "word": " شباب", "probability": 0.9912109375}, {"start": 2614.57, "end": 2614.93, "word": " storage", "probability": 0.45849609375}, {"start": 2614.93, "end": 2615.19, "word": " pool", "probability": 0.62548828125}, {"start": 2615.19, "end": 2615.97, "word": " defect", "probability": 0.927734375}, {"start": 2615.97, "end": 2616.51, "word": " storage", "probability": 0.283935546875}, {"start": 2616.51, "end": 2616.89, "word": " pool", "probability": 0.9833984375}, {"start": 2616.89, "end": 2617.43, "word": " defect", "probability": 0.9853515625}, {"start": 2617.43, "end": 2618.15, "word": " هو", "probability": 0.91162109375}, {"start": 2618.15, "end": 2618.49, "word": " خلل", "probability": 0.9817708333333334}, {"start": 2618.49, "end": 2618.71, "word": " في", "probability": 0.93115234375}, {"start": 2618.71, "end": 2618.89, "word": " ال", "probability": 0.85546875}, {"start": 2618.89, "end": 2619.73, "word": " granules", "probability": 0.79443359375}, {"start": 2619.73, "end": 2620.97, "word": " خلل", "probability": 0.9775390625}, {"start": 2620.97, "end": 2621.15, "word": " في", "probability": 0.98046875}, {"start": 2621.15, "end": 2621.49, "word": " ايش؟", "probability": 0.72186279296875}, {"start": 2621.49, "end": 2621.57, "word": " في", "probability": 0.95751953125}, {"start": 2621.57, "end": 2621.67, "word": " ال", "probability": 0.8994140625}, {"start": 2621.67, "end": 2622.13, "word": " granules", "probability": 0.935791015625}, {"start": 2622.13, "end": 2622.25, "word": " و", "probability": 0.57080078125}, {"start": 2622.25, "end": 2622.43, "word": " كنا", "probability": 0.6783447265625}, {"start": 2622.43, "end": 2622.75, "word": " بنعرف", "probability": 0.8655598958333334}, {"start": 2622.75, "end": 2622.89, "word": " أن", "probability": 0.56787109375}, {"start": 2622.89, "end": 2623.23, "word": " فيانا", "probability": 0.6195475260416666}, {"start": 2623.23, "end": 2623.33, "word": " two", "probability": 0.6015625}, {"start": 2623.33, "end": 2623.65, "word": " types", "probability": 0.908203125}, {"start": 2623.65, "end": 2623.81, "word": " و", "probability": 0.77783203125}, {"start": 2623.81, "end": 2624.19, "word": " تانية", "probability": 0.5557861328125}, {"start": 2624.19, "end": 2625.01, "word": " درس", "probability": 0.6234537760416666}, {"start": 2625.01, "end": 2626.17, "word": " و", "probability": 0.85693359375}, {"start": 2626.17, "end": 2626.27, "word": " ال", "probability": 0.3408203125}, {"start": 2626.27, "end": 2626.47, "word": " ..", "probability": 0.35693359375}, {"start": 2626.47, "end": 2626.65, "word": " و", "probability": 0.6259765625}, {"start": 2626.65, "end": 2626.79, "word": " ال", "probability": 0.7001953125}, {"start": 2626.79, "end": 2626.85, "word": " ..", "probability": 0.91015625}, {"start": 2626.85, "end": 2626.95, "word": " و", "probability": 0.775390625}, {"start": 2626.95, "end": 2627.07, "word": " ال", "probability": 0.880859375}, {"start": 2627.07, "end": 2627.11, "word": " ..", "probability": 0.93212890625}], "temperature": 1.0}, {"id": 96, "seek": 265694, "start": 2627.74, "end": 2656.94, "text": "classified by type of granule deficiency or secretion defect النوع الأولاني هو dense body deficiency أو alpha granule deficiency حسب نوعها إما dense أو غير في ال alpha بيسموها كمان gray platelet syndrome ليش جاروا gray platelet syndrome؟ كان لإنه بتاعتهم اللون اللي هو ال gray color under the microscope يوم تسبقها الجمزا أو الرايت", "tokens": [11665, 2587, 538, 2010, 295, 9370, 2271, 37500, 420, 4054, 313, 16445, 28239, 45367, 16247, 12610, 7649, 1829, 31439, 18011, 1772, 37500, 34051, 8961, 9370, 2271, 37500, 11331, 35457, 8717, 45367, 11296, 11933, 15042, 18011, 34051, 32771, 13546, 8978, 2423, 8961, 4724, 1829, 38251, 2407, 11296, 9122, 2304, 7649, 10855, 3403, 15966, 19371, 32239, 8592, 10874, 9640, 14407, 10855, 3403, 15966, 19371, 22807, 25961, 5296, 28814, 1863, 3224, 39894, 995, 34268, 16095, 13672, 11536, 13672, 1829, 31439, 2423, 10855, 2017, 833, 264, 29753, 7251, 20498, 6055, 35457, 4587, 11296, 25724, 2304, 11622, 995, 34051, 34892, 995, 36081], "avg_logprob": -0.29544004189724826, "compression_ratio": 1.6466165413533835, "no_speech_prob": 0.0, "words": [{"start": 2627.74, "end": 2628.54, "word": "classified", "probability": 0.6268310546875}, {"start": 2628.54, "end": 2628.8, "word": " by", "probability": 0.95849609375}, {"start": 2628.8, "end": 2629.18, "word": " type", "probability": 0.8486328125}, {"start": 2629.18, "end": 2629.34, "word": " of", "probability": 0.95849609375}, {"start": 2629.34, "end": 2629.7, "word": " granule", "probability": 0.579345703125}, {"start": 2629.7, "end": 2630.36, "word": " deficiency", "probability": 0.90185546875}, {"start": 2630.36, "end": 2630.86, "word": " or", "probability": 0.80908203125}, {"start": 2630.86, "end": 2631.92, "word": " secretion", "probability": 0.93896484375}, {"start": 2631.92, "end": 2632.36, "word": " defect", "probability": 0.44873046875}, {"start": 2632.36, "end": 2633.78, "word": " النوع", "probability": 0.723388671875}, {"start": 2633.78, "end": 2634.28, "word": " الأولاني", "probability": 0.87353515625}, {"start": 2634.28, "end": 2634.46, "word": " هو", "probability": 0.9501953125}, {"start": 2634.46, "end": 2634.82, "word": " dense", "probability": 0.814453125}, {"start": 2634.82, "end": 2635.36, "word": " body", "probability": 0.8212890625}, {"start": 2635.36, "end": 2636.2, "word": " deficiency", "probability": 0.8701171875}, {"start": 2636.2, "end": 2637.74, "word": " أو", "probability": 0.77001953125}, {"start": 2637.74, "end": 2638.42, "word": " alpha", "probability": 0.6845703125}, {"start": 2638.42, "end": 2639.08, "word": " granule", "probability": 0.88330078125}, {"start": 2639.08, "end": 2639.94, "word": " deficiency", "probability": 0.9384765625}, {"start": 2639.94, "end": 2640.34, "word": " حسب", "probability": 0.91943359375}, {"start": 2640.34, "end": 2640.76, "word": " نوعها", "probability": 0.98291015625}, {"start": 2640.76, "end": 2641.06, "word": " إما", "probability": 0.61669921875}, {"start": 2641.06, "end": 2641.38, "word": " dense", "probability": 0.87841796875}, {"start": 2641.38, "end": 2641.54, "word": " أو", "probability": 0.74169921875}, {"start": 2641.54, "end": 2641.86, "word": " غير", "probability": 0.6868896484375}, {"start": 2641.86, "end": 2642.76, "word": " في", "probability": 0.53076171875}, {"start": 2642.76, "end": 2642.88, "word": " ال", "probability": 0.71142578125}, {"start": 2642.88, "end": 2643.12, "word": " alpha", "probability": 0.471435546875}, {"start": 2643.12, "end": 2643.6, "word": " بيسموها", "probability": 0.9111328125}, {"start": 2643.6, "end": 2643.96, "word": " كمان", "probability": 0.8268229166666666}, {"start": 2643.96, "end": 2644.32, "word": " gray", "probability": 0.415771484375}, {"start": 2644.32, "end": 2645.08, "word": " platelet", "probability": 0.724609375}, {"start": 2645.08, "end": 2645.74, "word": " syndrome", "probability": 0.84814453125}, {"start": 2645.74, "end": 2646.7, "word": " ليش", "probability": 0.8193359375}, {"start": 2646.7, "end": 2647.0, "word": " جاروا", "probability": 0.8046875}, {"start": 2647.0, "end": 2647.36, "word": " gray", "probability": 0.76708984375}, {"start": 2647.36, "end": 2648.28, "word": " platelet", "probability": 0.913818359375}, {"start": 2648.28, "end": 2649.02, "word": " syndrome؟", "probability": 0.6712646484375}, {"start": 2649.02, "end": 2649.02, "word": " كان", "probability": 0.239013671875}, {"start": 2649.02, "end": 2649.34, "word": " لإنه", "probability": 0.76348876953125}, {"start": 2649.34, "end": 2649.7, "word": " بتاعتهم", "probability": 0.5133056640625}, {"start": 2649.7, "end": 2650.5, "word": " اللون", "probability": 0.900146484375}, {"start": 2650.5, "end": 2650.82, "word": " اللي", "probability": 0.934326171875}, {"start": 2650.82, "end": 2651.08, "word": " هو", "probability": 0.95703125}, {"start": 2651.08, "end": 2651.28, "word": " ال", "probability": 0.76513671875}, {"start": 2651.28, "end": 2651.52, "word": " gray", "probability": 0.62255859375}, {"start": 2651.52, "end": 2651.94, "word": " color", "probability": 0.9169921875}, {"start": 2651.94, "end": 2652.38, "word": " under", "probability": 0.8662109375}, {"start": 2652.38, "end": 2653.14, "word": " the", "probability": 0.833984375}, {"start": 2653.14, "end": 2654.32, "word": " microscope", "probability": 0.95556640625}, {"start": 2654.32, "end": 2654.98, "word": " يوم", "probability": 0.841064453125}, {"start": 2654.98, "end": 2655.74, "word": " تسبقها", "probability": 0.7486572265625}, {"start": 2655.74, "end": 2656.4, "word": " الجمزا", "probability": 0.7613525390625}, {"start": 2656.4, "end": 2656.6, "word": " أو", "probability": 0.86328125}, {"start": 2656.6, "end": 2656.94, "word": " الرايت", "probability": 0.7159830729166666}], "temperature": 1.0}, {"id": 97, "seek": 268938, "start": 2662.74, "end": 2689.38, "text": "طبعاً في mixed deficiency وفي factor five cubic هذا factor خمسة ما هو موجود في فلتلة Alpha granules غيابه نعمل هذه الحاجة ناخد أمثل على ذلك storage pool defect defect in secondary aggregation deficiency of content of one of the granules يكون في غياب لأحد عوامل", "tokens": [9566, 3555, 3615, 995, 14111, 8978, 7467, 37500, 4032, 41185, 5952, 1732, 28733, 23758, 5952, 16490, 2304, 3794, 3660, 19446, 31439, 3714, 29245, 23328, 8978, 6156, 1211, 2655, 37977, 20588, 9370, 3473, 32771, 1829, 16758, 3224, 8717, 25957, 1211, 29538, 21542, 26108, 3660, 8717, 47283, 3215, 5551, 2304, 12984, 1211, 15844, 29910, 23275, 6725, 7005, 16445, 16445, 294, 11396, 16743, 399, 37500, 295, 2701, 295, 472, 295, 264, 9370, 3473, 7251, 30544, 8978, 32771, 1829, 16758, 5296, 10721, 24401, 6225, 2407, 10943, 1211], "avg_logprob": -0.3275669639309247, "compression_ratio": 1.4453781512605042, "no_speech_prob": 0.0, "words": [{"start": 2662.74, "end": 2663.1, "word": "طبعاً", "probability": 0.66690673828125}, {"start": 2663.1, "end": 2663.26, "word": " في", "probability": 0.72412109375}, {"start": 2663.26, "end": 2663.7, "word": " mixed", "probability": 0.410400390625}, {"start": 2663.7, "end": 2664.44, "word": " deficiency", "probability": 0.69482421875}, {"start": 2664.44, "end": 2665.72, "word": " وفي", "probability": 0.734375}, {"start": 2665.72, "end": 2666.0, "word": " factor", "probability": 0.8505859375}, {"start": 2666.0, "end": 2666.36, "word": " five", "probability": 0.306640625}, {"start": 2666.36, "end": 2666.84, "word": " cubic", "probability": 0.6142578125}, {"start": 2666.84, "end": 2667.16, "word": " هذا", "probability": 0.65380859375}, {"start": 2667.16, "end": 2668.18, "word": " factor", "probability": 0.564453125}, {"start": 2668.18, "end": 2668.54, "word": " خمسة", "probability": 0.9176025390625}, {"start": 2668.54, "end": 2668.68, "word": " ما", "probability": 0.291015625}, {"start": 2668.68, "end": 2668.74, "word": " هو", "probability": 0.7978515625}, {"start": 2668.74, "end": 2669.0, "word": " موجود", "probability": 0.9791666666666666}, {"start": 2669.0, "end": 2669.14, "word": " في", "probability": 0.873046875}, {"start": 2669.14, "end": 2669.52, "word": " فلتلة", "probability": 0.529510498046875}, {"start": 2669.52, "end": 2669.78, "word": " Alpha", "probability": 0.368896484375}, {"start": 2669.78, "end": 2670.38, "word": " granules", "probability": 0.627197265625}, {"start": 2670.38, "end": 2671.64, "word": " غيابه", "probability": 0.937255859375}, {"start": 2671.64, "end": 2672.46, "word": " نعمل", "probability": 0.6689860026041666}, {"start": 2672.46, "end": 2673.36, "word": " هذه", "probability": 0.96826171875}, {"start": 2673.36, "end": 2674.14, "word": " الحاجة", "probability": 0.9080403645833334}, {"start": 2674.14, "end": 2676.0, "word": " ناخد", "probability": 0.9033203125}, {"start": 2676.0, "end": 2676.44, "word": " أمثل", "probability": 0.78778076171875}, {"start": 2676.44, "end": 2676.64, "word": " على", "probability": 0.88232421875}, {"start": 2676.64, "end": 2677.06, "word": " ذلك", "probability": 0.974853515625}, {"start": 2677.06, "end": 2677.54, "word": " storage", "probability": 0.68994140625}, {"start": 2677.54, "end": 2677.86, "word": " pool", "probability": 0.39013671875}, {"start": 2677.86, "end": 2678.42, "word": " defect", "probability": 0.953125}, {"start": 2678.42, "end": 2679.14, "word": " defect", "probability": 0.76318359375}, {"start": 2679.14, "end": 2679.44, "word": " in", "probability": 0.91845703125}, {"start": 2679.44, "end": 2680.0, "word": " secondary", "probability": 0.92431640625}, {"start": 2680.0, "end": 2681.52, "word": " aggregation", "probability": 0.93505859375}, {"start": 2681.52, "end": 2682.7, "word": " deficiency", "probability": 0.9111328125}, {"start": 2682.7, "end": 2683.34, "word": " of", "probability": 0.970703125}, {"start": 2683.34, "end": 2683.92, "word": " content", "probability": 0.3984375}, {"start": 2683.92, "end": 2684.6, "word": " of", "probability": 0.8955078125}, {"start": 2684.6, "end": 2685.08, "word": " one", "probability": 0.92431640625}, {"start": 2685.08, "end": 2686.14, "word": " of", "probability": 0.95458984375}, {"start": 2686.14, "end": 2686.28, "word": " the", "probability": 0.455078125}, {"start": 2686.28, "end": 2686.84, "word": " granules", "probability": 0.96630859375}, {"start": 2686.84, "end": 2687.2, "word": " يكون", "probability": 0.6561279296875}, {"start": 2687.2, "end": 2687.4, "word": " في", "probability": 0.9228515625}, {"start": 2687.4, "end": 2687.98, "word": " غياب", "probability": 0.8821614583333334}, {"start": 2687.98, "end": 2688.6, "word": " لأحد", "probability": 0.94189453125}, {"start": 2688.6, "end": 2689.38, "word": " عوامل", "probability": 0.98828125}], "temperature": 1.0}, {"id": 98, "seek": 271851, "start": 2690.77, "end": 2718.51, "text": "alpha granules على سبيل المثال في الموجودة واحد من the granules contents بيكون غايب لإن كان alpha ولا dense بس هنا heterogeneous group و ال bleeding usually is mild to moderate لكن ممكن يزيد إذا المريض بياخد aspirin baby aspirin فاكرين إيش وظيفة ال baby aspirin", "tokens": [304, 7211, 9370, 3473, 15844, 8608, 21292, 1211, 9673, 12984, 6027, 8978, 9673, 29245, 23328, 3660, 36764, 24401, 9154, 264, 9370, 3473, 15768, 4724, 1829, 30544, 32771, 47302, 3555, 5296, 28814, 1863, 25961, 8961, 49429, 18011, 4724, 3794, 34105, 20789, 31112, 1594, 4032, 2423, 19312, 2673, 307, 15154, 281, 18174, 44381, 3714, 43020, 7251, 11622, 25708, 11933, 15730, 9673, 16572, 11242, 4724, 1829, 47283, 3215, 20003, 259, 3186, 20003, 259, 6156, 995, 37983, 9957, 11933, 1829, 8592, 4032, 19913, 33911, 3660, 2423, 3186, 20003, 259], "avg_logprob": -0.20112644794375398, "compression_ratio": 1.4875, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2690.77, "end": 2691.33, "word": "alpha", "probability": 0.5611572265625}, {"start": 2691.33, "end": 2691.93, "word": " granules", "probability": 0.951904296875}, {"start": 2691.93, "end": 2692.09, "word": " على", "probability": 0.84033203125}, {"start": 2692.09, "end": 2692.45, "word": " سبيل", "probability": 0.9646809895833334}, {"start": 2692.45, "end": 2692.99, "word": " المثال", "probability": 0.9951171875}, {"start": 2692.99, "end": 2693.95, "word": " في", "probability": 0.7001953125}, {"start": 2693.95, "end": 2695.53, "word": " الموجودة", "probability": 0.81378173828125}, {"start": 2695.53, "end": 2696.33, "word": " واحد", "probability": 0.86669921875}, {"start": 2696.33, "end": 2696.67, "word": " من", "probability": 0.9912109375}, {"start": 2696.67, "end": 2696.97, "word": " the", "probability": 0.423828125}, {"start": 2696.97, "end": 2697.55, "word": " granules", "probability": 0.941650390625}, {"start": 2697.55, "end": 2698.81, "word": " contents", "probability": 0.90087890625}, {"start": 2698.81, "end": 2699.17, "word": " بيكون", "probability": 0.9446614583333334}, {"start": 2699.17, "end": 2699.65, "word": " غايب", "probability": 0.8279622395833334}, {"start": 2699.65, "end": 2700.11, "word": " لإن", "probability": 0.5438639322916666}, {"start": 2700.11, "end": 2700.27, "word": " كان", "probability": 0.98486328125}, {"start": 2700.27, "end": 2700.55, "word": " alpha", "probability": 0.5673828125}, {"start": 2700.55, "end": 2700.77, "word": " ولا", "probability": 0.85791015625}, {"start": 2700.77, "end": 2701.09, "word": " dense", "probability": 0.62158203125}, {"start": 2701.09, "end": 2702.99, "word": " بس", "probability": 0.8828125}, {"start": 2702.99, "end": 2703.19, "word": " هنا", "probability": 0.32763671875}, {"start": 2703.19, "end": 2703.73, "word": " heterogeneous", "probability": 0.65771484375}, {"start": 2703.73, "end": 2704.29, "word": " group", "probability": 0.955078125}, {"start": 2704.29, "end": 2705.17, "word": " و", "probability": 0.7314453125}, {"start": 2705.17, "end": 2705.25, "word": " ال", "probability": 0.84423828125}, {"start": 2705.25, "end": 2705.51, "word": " bleeding", "probability": 0.978515625}, {"start": 2705.51, "end": 2706.09, "word": " usually", "probability": 0.890625}, {"start": 2706.09, "end": 2706.37, "word": " is", "probability": 0.92138671875}, {"start": 2706.37, "end": 2706.97, "word": " mild", "probability": 0.8466796875}, {"start": 2706.97, "end": 2708.41, "word": " to", "probability": 0.91748046875}, {"start": 2708.41, "end": 2708.95, "word": " moderate", "probability": 0.9248046875}, {"start": 2708.95, "end": 2710.35, "word": " لكن", "probability": 0.89892578125}, {"start": 2710.35, "end": 2710.85, "word": " ممكن", "probability": 0.990478515625}, {"start": 2710.85, "end": 2711.45, "word": " يزيد", "probability": 0.9952799479166666}, {"start": 2711.45, "end": 2712.69, "word": " إذا", "probability": 0.927734375}, {"start": 2712.69, "end": 2713.29, "word": " المريض", "probability": 0.9324544270833334}, {"start": 2713.29, "end": 2713.93, "word": " بياخد", "probability": 0.9635009765625}, {"start": 2713.93, "end": 2714.63, "word": " aspirin", "probability": 0.8583984375}, {"start": 2714.63, "end": 2714.99, "word": " baby", "probability": 0.73779296875}, {"start": 2714.99, "end": 2716.19, "word": " aspirin", "probability": 0.95361328125}, {"start": 2716.19, "end": 2716.85, "word": " فاكرين", "probability": 0.83380126953125}, {"start": 2716.85, "end": 2716.99, "word": " إيش", "probability": 0.90478515625}, {"start": 2716.99, "end": 2717.41, "word": " وظيفة", "probability": 0.9786376953125}, {"start": 2717.41, "end": 2717.53, "word": " ال", "probability": 0.921875}, {"start": 2717.53, "end": 2717.69, "word": " baby", "probability": 0.951171875}, {"start": 2717.69, "end": 2718.51, "word": " aspirin", "probability": 0.931640625}], "temperature": 1.0}, {"id": 99, "seek": 274672, "start": 2723.64, "end": 2746.72, "text": "اللي بيكتبلي في انتحار السيورة الدم حطله صفر و بيشتغل عندنا قعدت .. هذه السيورة الدم بيقولنا هذه معلومة ممكن تاخدها جابل ما تاخدها دي المادة اللي جيه تاخدنا ميكانيزم احنا، اظبط؟ ايش ميكانيزم بيقول؟ بيقول بيصير في انهيبشن لل ..", "tokens": [6027, 20292, 4724, 1829, 4117, 2655, 3555, 20292, 8978, 16472, 2655, 5016, 9640, 21136, 1829, 13063, 3660, 32748, 2304, 11331, 9566, 43761, 20328, 5172, 2288, 4032, 4724, 1829, 8592, 2655, 17082, 1211, 43242, 8315, 12174, 22488, 2655, 4386, 29538, 21136, 1829, 13063, 3660, 32748, 2304, 4724, 1829, 39648, 8315, 29538, 20449, 1211, 20498, 3660, 3714, 43020, 6055, 47283, 3215, 11296, 10874, 16758, 1211, 19446, 6055, 47283, 3215, 11296, 11778, 1829, 9673, 18513, 3660, 13672, 1829, 10874, 1829, 3224, 6055, 47283, 3215, 8315, 3714, 1829, 41361, 1829, 11622, 2304, 1975, 5016, 8315, 12399, 1975, 19913, 3555, 9566, 22807, 1975, 1829, 8592, 3714, 1829, 41361, 1829, 11622, 2304, 4724, 1829, 39648, 22807, 4724, 1829, 39648, 4724, 1829, 9381, 13546, 8978, 16472, 3224, 1829, 3555, 8592, 1863, 24976, 4386], "avg_logprob": -0.3270177109034981, "compression_ratio": 1.9710144927536233, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2723.6400000000003, "end": 2724.28, "word": "اللي", "probability": 0.382568359375}, {"start": 2724.28, "end": 2724.92, "word": " بيكتبلي", "probability": 0.642333984375}, {"start": 2724.92, "end": 2725.02, "word": " في", "probability": 0.80615234375}, {"start": 2725.02, "end": 2725.34, "word": " انتحار", "probability": 0.802001953125}, {"start": 2725.34, "end": 2725.58, "word": " السيورة", "probability": 0.831787109375}, {"start": 2725.58, "end": 2725.86, "word": " الدم", "probability": 0.903076171875}, {"start": 2725.86, "end": 2726.32, "word": " حطله", "probability": 0.6293131510416666}, {"start": 2726.32, "end": 2726.64, "word": " صفر", "probability": 0.8190104166666666}, {"start": 2726.64, "end": 2726.72, "word": " و", "probability": 0.1514892578125}, {"start": 2726.72, "end": 2727.16, "word": " بيشتغل", "probability": 0.7672119140625}, {"start": 2727.16, "end": 2727.48, "word": " عندنا", "probability": 0.644287109375}, {"start": 2727.48, "end": 2727.78, "word": " قعدت", "probability": 0.5137125651041666}, {"start": 2727.78, "end": 2727.8, "word": " ..", "probability": 0.413818359375}, {"start": 2727.8, "end": 2728.02, "word": " هذه", "probability": 0.16845703125}, {"start": 2728.02, "end": 2728.34, "word": " السيورة", "probability": 0.966552734375}, {"start": 2728.34, "end": 2728.66, "word": " الدم", "probability": 0.9892578125}, {"start": 2728.66, "end": 2729.08, "word": " بيقولنا", "probability": 0.7174072265625}, {"start": 2729.08, "end": 2729.28, "word": " هذه", "probability": 0.8193359375}, {"start": 2729.28, "end": 2730.04, "word": " معلومة", "probability": 0.9700927734375}, {"start": 2730.04, "end": 2730.92, "word": " ممكن", "probability": 0.98388671875}, {"start": 2730.92, "end": 2731.8, "word": " تاخدها", "probability": 0.9854736328125}, {"start": 2731.8, "end": 2732.14, "word": " جابل", "probability": 0.5799967447916666}, {"start": 2732.14, "end": 2732.22, "word": " ما", "probability": 0.9462890625}, {"start": 2732.22, "end": 2732.6, "word": " تاخدها", "probability": 0.9219970703125}, {"start": 2732.6, "end": 2732.68, "word": " دي", "probability": 0.688720703125}, {"start": 2732.68, "end": 2733.1, "word": " المادة", "probability": 0.9347330729166666}, {"start": 2733.1, "end": 2733.28, "word": " اللي", "probability": 0.56524658203125}, {"start": 2733.28, "end": 2733.4, "word": " جيه", "probability": 0.724609375}, {"start": 2733.4, "end": 2733.72, "word": " تاخدنا", "probability": 0.8909912109375}, {"start": 2733.72, "end": 2734.22, "word": " ميكانيزم", "probability": 0.854736328125}, {"start": 2734.22, "end": 2735.32, "word": " احنا،", "probability": 0.80621337890625}, {"start": 2735.32, "end": 2735.98, "word": " اظبط؟", "probability": 0.69443359375}, {"start": 2735.98, "end": 2736.12, "word": " ايش", "probability": 0.7355143229166666}, {"start": 2736.12, "end": 2736.62, "word": " ميكانيزم", "probability": 0.9835611979166666}, {"start": 2736.62, "end": 2738.84, "word": " بيقول؟", "probability": 0.8619384765625}, {"start": 2738.84, "end": 2742.4, "word": " بيقول", "probability": 0.9031575520833334}, {"start": 2742.4, "end": 2742.7, "word": " بيصير", "probability": 0.93798828125}, {"start": 2742.7, "end": 2742.96, "word": " في", "probability": 0.93115234375}, {"start": 2742.96, "end": 2744.04, "word": " انهيبشن", "probability": 0.7806396484375}, {"start": 2744.04, "end": 2745.74, "word": " لل", "probability": 0.59716796875}, {"start": 2745.74, "end": 2746.72, "word": " ..", "probability": 0.366943359375}], "temperature": 1.0}, {"id": 100, "seek": 277621, "start": 2748.83, "end": 2776.21, "text": "cyclooxygenase انزالت صح؟ وبالتالي ال .. بيصير فيه تعطيل لايجت و بيبقى عالية العيال دياشترا و بيبقى عالية العيال دياشترا و بيبقى عالية العيال دياشترا و بيبقى عالية العيال دياشترا و بيبقى عالية العيال دياشترا و بيبقى عالية العيال دياشترا و بيبقى عالية العيال دياشترا و بيبقى عالية العيال دياشترا و بيبقى عالية العيال دياشترا و بيبقى عالية العيال دياشترا و بيبقى عالية العيال دياشترا و بي", "tokens": [1344, 66, 752, 5230, 8647, 651, 16472, 11622, 6027, 2655, 20328, 5016, 22807, 4032, 3555, 6027, 2655, 6027, 1829, 2423, 4386, 4724, 1829, 9381, 13546, 8978, 3224, 37279, 9566, 26895, 20193, 1829, 7435, 2655, 4032, 4724, 1829, 3555, 4587, 7578, 6225, 6027, 10632, 18863, 1829, 6027, 11778, 1829, 33599, 2655, 23557, 4032, 4724, 1829, 3555, 4587, 7578, 6225, 6027, 10632, 18863, 1829, 6027, 11778, 1829, 33599, 2655, 23557, 4032, 4724, 1829, 3555, 4587, 7578, 6225, 6027, 10632, 18863, 1829, 6027, 11778, 1829, 33599, 2655, 23557, 4032, 4724, 1829, 3555, 4587, 7578, 6225, 6027, 10632, 18863, 1829, 6027, 11778, 1829, 33599, 2655, 23557, 4032, 4724, 1829, 3555, 4587, 7578, 6225, 6027, 10632, 18863, 1829, 6027, 11778, 1829, 33599, 2655, 23557, 4032, 4724, 1829, 3555, 4587, 7578, 6225, 6027, 10632, 18863, 1829, 6027, 11778, 1829, 33599, 2655, 23557, 4032, 4724, 1829, 3555, 4587, 7578, 6225, 6027, 10632, 18863, 1829, 6027, 11778, 1829, 33599, 2655, 23557, 4032, 4724, 1829, 3555, 4587, 7578, 6225, 6027, 10632, 18863, 1829, 6027, 11778, 1829, 33599, 2655, 23557, 4032, 4724, 1829, 3555, 4587, 7578, 6225, 6027, 10632, 18863, 1829, 6027, 11778, 1829, 33599, 2655, 23557, 4032, 4724, 1829, 3555, 4587, 7578, 6225, 6027, 10632, 18863, 1829, 6027, 11778, 1829, 33599, 2655, 23557, 4032, 4724, 1829, 3555, 4587, 7578, 6225, 6027, 10632, 18863, 1829, 6027, 11778, 1829, 33599, 2655, 23557, 4032, 4724, 1829], "avg_logprob": -0.225277771419949, "compression_ratio": 5.18796992481203, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2748.83, "end": 2749.75, "word": "cyclooxygenase", "probability": 0.7998046875}, {"start": 2749.75, "end": 2750.41, "word": " انزالت", "probability": 0.55181884765625}, {"start": 2750.41, "end": 2752.27, "word": " صح؟", "probability": 0.5120442708333334}, {"start": 2752.27, "end": 2754.31, "word": " وبالتالي", "probability": 0.7884114583333334}, {"start": 2754.31, "end": 2754.81, "word": " ال", "probability": 0.517578125}, {"start": 2754.81, "end": 2755.49, "word": " ..", "probability": 0.32958984375}, {"start": 2755.49, "end": 2757.01, "word": " بيصير", "probability": 0.747650146484375}, {"start": 2757.01, "end": 2757.19, "word": " فيه", "probability": 0.67041015625}, {"start": 2757.19, "end": 2757.55, "word": " تعطيل", "probability": 0.9365234375}, {"start": 2757.55, "end": 2758.65, "word": " لايجت", "probability": 0.413970947265625}, {"start": 2758.65, "end": 2758.81, "word": " و", "probability": 0.109375}, {"start": 2758.81, "end": 2759.15, "word": " بيبقى", "probability": 0.6596435546875}, {"start": 2759.15, "end": 2759.45, "word": " عالية", "probability": 0.3687744140625}, {"start": 2759.45, "end": 2759.79, "word": " العيال", "probability": 0.20391845703125}, {"start": 2759.79, "end": 2760.05, "word": " دياشترا", "probability": 0.47978515625}, {"start": 2760.05, "end": 2760.07, "word": " و", "probability": 0.06976318359375}, {"start": 2760.07, "end": 2760.19, "word": " بيبقى", "probability": 0.747216796875}, {"start": 2760.19, "end": 2760.55, "word": " عالية", "probability": 0.8932291666666666}, {"start": 2760.55, "end": 2760.55, "word": " العيال", "probability": 0.935546875}, {"start": 2760.55, "end": 2760.55, "word": " دياشترا", "probability": 0.98984375}, {"start": 2760.55, "end": 2760.55, "word": " و", "probability": 0.135009765625}, {"start": 2760.55, "end": 2760.55, "word": " بيبقى", "probability": 0.906640625}, {"start": 2760.55, "end": 2760.55, "word": " عالية", "probability": 0.97998046875}, {"start": 2760.55, "end": 2760.55, "word": " العيال", "probability": 0.94140625}, {"start": 2760.55, "end": 2760.55, "word": " دياشترا", "probability": 0.9921875}, {"start": 2760.55, "end": 2760.55, "word": " و", "probability": 0.2239990234375}, {"start": 2760.55, "end": 2760.55, "word": " بيبقى", "probability": 0.91533203125}, {"start": 2760.55, "end": 2760.55, "word": " عالية", "probability": 0.9807942708333334}, {"start": 2760.55, "end": 2760.55, "word": " العيال", "probability": 0.9462890625}, {"start": 2760.55, "end": 2760.55, "word": " دياشترا", "probability": 0.9912109375}, {"start": 2760.55, "end": 2760.55, "word": " و", "probability": 0.375}, {"start": 2760.55, "end": 2760.55, "word": " بيبقى", "probability": 0.9169921875}, {"start": 2760.55, "end": 2760.55, "word": " عالية", "probability": 0.9812825520833334}, {"start": 2760.55, "end": 2760.55, "word": " العيال", "probability": 0.94482421875}, {"start": 2760.55, "end": 2760.55, "word": " دياشترا", "probability": 0.990234375}, {"start": 2760.55, "end": 2760.55, "word": " و", "probability": 0.5498046875}, {"start": 2760.55, "end": 2760.55, "word": " بيبقى", "probability": 0.92529296875}, {"start": 2760.55, "end": 2760.55, "word": " عالية", "probability": 0.9840494791666666}, {"start": 2760.55, "end": 2760.65, "word": " العيال", "probability": 0.9524739583333334}, {"start": 2760.65, "end": 2761.31, "word": " دياشترا", "probability": 0.99033203125}, {"start": 2761.31, "end": 2761.31, "word": " و", "probability": 0.7041015625}, {"start": 2761.31, "end": 2761.31, "word": " بيبقى", "probability": 0.9431640625}, {"start": 2761.31, "end": 2761.31, "word": " عالية", "probability": 0.9874674479166666}, {"start": 2761.31, "end": 2761.31, "word": " العيال", "probability": 0.9593098958333334}, {"start": 2761.31, "end": 2761.31, "word": " دياشترا", "probability": 0.990625}, {"start": 2761.31, "end": 2761.93, "word": " و", "probability": 0.83984375}, {"start": 2761.93, "end": 2762.07, "word": " بيبقى", "probability": 0.96708984375}, {"start": 2762.07, "end": 2762.07, "word": " عالية", "probability": 0.9894205729166666}, {"start": 2762.07, "end": 2762.07, "word": " العيال", "probability": 0.96337890625}, {"start": 2762.07, "end": 2762.53, "word": " دياشترا", "probability": 0.99091796875}, {"start": 2762.53, "end": 2762.53, "word": " و", "probability": 0.91357421875}, {"start": 2762.53, "end": 2762.53, "word": " بيبقى", "probability": 0.98388671875}, {"start": 2762.53, "end": 2762.53, "word": " عالية", "probability": 0.9905598958333334}, {"start": 2762.53, "end": 2762.53, "word": " العيال", "probability": 0.9694010416666666}, {"start": 2762.53, "end": 2762.53, "word": " دياشترا", "probability": 0.99111328125}, {"start": 2762.53, "end": 2762.91, "word": " و", "probability": 0.93505859375}, {"start": 2762.91, "end": 2763.89, "word": " بيبقى", "probability": 0.98876953125}, {"start": 2763.89, "end": 2764.11, "word": " عالية", "probability": 0.9912109375}, {"start": 2764.11, "end": 2764.41, "word": " العيال", "probability": 0.9734700520833334}, {"start": 2764.41, "end": 2764.99, "word": " دياشترا", "probability": 0.99111328125}, {"start": 2764.99, "end": 2766.79, "word": " و", "probability": 0.9326171875}, {"start": 2766.79, "end": 2767.01, "word": " بيبقى", "probability": 0.98896484375}, {"start": 2767.01, "end": 2767.47, "word": " عالية", "probability": 0.9913736979166666}, {"start": 2767.47, "end": 2768.13, "word": " العيال", "probability": 0.9778645833333334}, {"start": 2768.13, "end": 2770.23, "word": " دياشترا", "probability": 0.99052734375}, {"start": 2770.23, "end": 2774.61, "word": " و", "probability": 0.921875}, {"start": 2774.61, "end": 2776.21, "word": " بي", "probability": 0.973388671875}], "temperature": 1.0}, {"id": 101, "seek": 279999, "start": 2777.79, "end": 2799.99, "text": "مين اللي بيعمل inhibition اللي انا قلت في تاج ريجليشن؟ الاسفل رجح المعلومات، مش هو اللي بيعمل، المهم هو بيعمل inhibition لثرانبوكسين A2", "tokens": [2304, 9957, 13672, 1829, 4724, 1829, 25957, 1211, 47707, 897, 849, 13672, 1829, 1975, 8315, 12174, 1211, 2655, 8978, 6055, 26108, 12602, 1829, 7435, 20292, 8592, 1863, 22807, 2423, 32277, 5172, 1211, 12602, 7435, 5016, 9673, 30241, 20498, 9307, 12399, 37893, 31439, 13672, 1829, 4724, 1829, 25957, 1211, 12399, 9673, 16095, 31439, 4724, 1829, 25957, 1211, 20406, 849, 5296, 12984, 2288, 7649, 3555, 2407, 4117, 3794, 9957, 316, 17], "avg_logprob": -0.5120535595076424, "compression_ratio": 1.5804195804195804, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2777.79, "end": 2778.65, "word": "مين", "probability": 0.3848876953125}, {"start": 2778.65, "end": 2778.65, "word": " اللي", "probability": 0.758544921875}, {"start": 2778.65, "end": 2778.75, "word": " بيعمل", "probability": 0.862060546875}, {"start": 2778.75, "end": 2779.03, "word": " inhibition", "probability": 0.702392578125}, {"start": 2779.03, "end": 2779.17, "word": " اللي", "probability": 0.71630859375}, {"start": 2779.17, "end": 2779.25, "word": " انا", "probability": 0.525146484375}, {"start": 2779.25, "end": 2779.43, "word": " قلت", "probability": 0.4863688151041667}, {"start": 2779.43, "end": 2779.53, "word": " في", "probability": 0.5703125}, {"start": 2779.53, "end": 2779.67, "word": " تاج", "probability": 0.5032958984375}, {"start": 2779.67, "end": 2781.99, "word": " ريجليشن؟", "probability": 0.5817609514508929}, {"start": 2781.99, "end": 2782.63, "word": " الاسفل", "probability": 0.71466064453125}, {"start": 2782.63, "end": 2792.23, "word": " رجح", "probability": 0.4698486328125}, {"start": 2792.23, "end": 2794.23, "word": " المعلومات،", "probability": 0.5444580078125}, {"start": 2794.23, "end": 2795.15, "word": " مش", "probability": 0.76513671875}, {"start": 2795.15, "end": 2795.29, "word": " هو", "probability": 0.982421875}, {"start": 2795.29, "end": 2795.43, "word": " اللي", "probability": 0.9873046875}, {"start": 2795.43, "end": 2795.99, "word": " بيعمل،", "probability": 0.86904296875}, {"start": 2795.99, "end": 2796.37, "word": " المهم", "probability": 0.9873046875}, {"start": 2796.37, "end": 2797.51, "word": " هو", "probability": 0.9462890625}, {"start": 2797.51, "end": 2797.93, "word": " بيعمل", "probability": 0.9705810546875}, {"start": 2797.93, "end": 2798.67, "word": " inhibition", "probability": 0.770751953125}, {"start": 2798.67, "end": 2799.55, "word": " لثرانبوكسين", "probability": 0.6851942274305556}, {"start": 2799.55, "end": 2799.99, "word": " A2", "probability": 0.84765625}], "temperature": 1.0}, {"id": 102, "seek": 282554, "start": 2822.18, "end": 2825.54, "text": "بعد الولادة بيصير فيه عيش نظيف", "tokens": [3555, 22488, 2423, 12610, 18513, 3660, 4724, 1829, 9381, 13546, 8978, 3224, 6225, 1829, 8592, 8717, 19913, 33911], "avg_logprob": -0.5378289285459017, "compression_ratio": 0.9322033898305084, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 2822.18, "end": 2823.14, "word": "بعد", "probability": 0.3747406005859375}, {"start": 2823.14, "end": 2824.1, "word": " الولادة", "probability": 0.7069854736328125}, {"start": 2824.1, "end": 2824.58, "word": " بيصير", "probability": 0.6689453125}, {"start": 2824.58, "end": 2824.92, "word": " فيه", "probability": 0.7421875}, {"start": 2824.92, "end": 2825.06, "word": " عيش", "probability": 0.7804361979166666}, {"start": 2825.06, "end": 2825.54, "word": " نظيف", "probability": 0.9441731770833334}], "temperature": 1.0}, {"id": 103, "seek": 285295, "start": 2827.05, "end": 2852.95, "text": "النوعين زى ما قلتلكوا ال alpha و ال yash و ال dense ال dense بيسموها ده هي ال dense body deficiency شايفين ال platelet هنشوفها الجدران decreased in dense body يعني decreased في ال ADP و ال ATP و ال calcium و ال yash و السباطنة اللى هو أبوه جاي من وين five hydroxy tryptophan five HT يعني اختصار ده هي", "tokens": [6027, 1863, 45367, 9957, 30767, 7578, 19446, 12174, 1211, 2655, 23275, 14407, 2423, 8961, 4032, 2423, 288, 1299, 4032, 2423, 18011, 2423, 18011, 4724, 1829, 38251, 2407, 11296, 11778, 3224, 39896, 2423, 18011, 1772, 37500, 13412, 995, 33911, 9957, 2423, 3403, 15966, 8032, 1863, 8592, 38688, 11296, 25724, 3215, 2288, 7649, 24436, 294, 18011, 1772, 37495, 22653, 24436, 8978, 2423, 9135, 47, 4032, 2423, 39202, 4032, 2423, 20918, 4032, 2423, 288, 1299, 4032, 21136, 3555, 41193, 1863, 3660, 13672, 7578, 31439, 5551, 3555, 2407, 3224, 10874, 47302, 9154, 4032, 9957, 1732, 15435, 12876, 853, 662, 404, 3451, 1732, 11751, 37495, 22653, 1975, 46456, 9381, 9640, 11778, 3224, 39896], "avg_logprob": -0.3408830264292726, "compression_ratio": 1.6574803149606299, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2827.05, "end": 2827.51, "word": "النوعين", "probability": 0.7415771484375}, {"start": 2827.51, "end": 2827.63, "word": " زى", "probability": 0.638671875}, {"start": 2827.63, "end": 2827.69, "word": " ما", "probability": 0.9794921875}, {"start": 2827.69, "end": 2828.13, "word": " قلتلكوا", "probability": 0.7294921875}, {"start": 2828.13, "end": 2828.21, "word": " ال", "probability": 0.875}, {"start": 2828.21, "end": 2828.43, "word": " alpha", "probability": 0.55126953125}, {"start": 2828.43, "end": 2828.63, "word": " و", "probability": 0.818359375}, {"start": 2828.63, "end": 2828.69, "word": " ال", "probability": 0.56396484375}, {"start": 2828.69, "end": 2829.09, "word": " yash", "probability": 0.30767822265625}, {"start": 2829.09, "end": 2829.43, "word": " و", "probability": 0.7998046875}, {"start": 2829.43, "end": 2829.65, "word": " ال", "probability": 0.9619140625}, {"start": 2829.65, "end": 2829.97, "word": " dense", "probability": 0.9091796875}, {"start": 2829.97, "end": 2830.27, "word": " ال", "probability": 0.56494140625}, {"start": 2830.27, "end": 2830.63, "word": " dense", "probability": 0.93017578125}, {"start": 2830.63, "end": 2831.29, "word": " بيسموها", "probability": 0.8787109375}, {"start": 2831.29, "end": 2831.47, "word": " ده", "probability": 0.624755859375}, {"start": 2831.47, "end": 2831.59, "word": " هي", "probability": 0.21240234375}, {"start": 2831.59, "end": 2831.71, "word": " ال", "probability": 0.9345703125}, {"start": 2831.71, "end": 2831.91, "word": " dense", "probability": 0.67138671875}, {"start": 2831.91, "end": 2832.29, "word": " body", "probability": 0.9287109375}, {"start": 2832.29, "end": 2833.59, "word": " deficiency", "probability": 0.6826171875}, {"start": 2833.59, "end": 2834.07, "word": " شايفين", "probability": 0.8697509765625}, {"start": 2834.07, "end": 2834.17, "word": " ال", "probability": 0.9462890625}, {"start": 2834.17, "end": 2834.61, "word": " platelet", "probability": 0.6875}, {"start": 2834.61, "end": 2836.57, "word": " هنشوفها", "probability": 0.796337890625}, {"start": 2836.57, "end": 2836.89, "word": " الجدران", "probability": 0.500091552734375}, {"start": 2836.89, "end": 2837.41, "word": " decreased", "probability": 0.54248046875}, {"start": 2837.41, "end": 2837.77, "word": " in", "probability": 0.93310546875}, {"start": 2837.77, "end": 2838.19, "word": " dense", "probability": 0.8388671875}, {"start": 2838.19, "end": 2838.59, "word": " body", "probability": 0.91015625}, {"start": 2838.59, "end": 2838.89, "word": " يعني", "probability": 0.8935546875}, {"start": 2838.89, "end": 2839.31, "word": " decreased", "probability": 0.5673828125}, {"start": 2839.31, "end": 2839.45, "word": " في", "probability": 0.78759765625}, {"start": 2839.45, "end": 2839.53, "word": " ال", "probability": 0.9765625}, {"start": 2839.53, "end": 2839.89, "word": " ADP", "probability": 0.737060546875}, {"start": 2839.89, "end": 2840.01, "word": " و", "probability": 0.70751953125}, {"start": 2840.01, "end": 2840.03, "word": " ال", "probability": 0.75927734375}, {"start": 2840.03, "end": 2840.27, "word": " ATP", "probability": 0.86572265625}, {"start": 2840.27, "end": 2840.57, "word": " و", "probability": 0.65771484375}, {"start": 2840.57, "end": 2841.01, "word": " ال", "probability": 0.91650390625}, {"start": 2841.01, "end": 2841.47, "word": " calcium", "probability": 0.9609375}, {"start": 2841.47, "end": 2841.75, "word": " و", "probability": 0.99169921875}, {"start": 2841.75, "end": 2841.83, "word": " ال", "probability": 0.72265625}, {"start": 2841.83, "end": 2842.11, "word": " yash", "probability": 0.5748291015625}, {"start": 2842.11, "end": 2842.71, "word": " و", "probability": 0.7734375}, {"start": 2842.71, "end": 2844.69, "word": " السباطنة", "probability": 0.78154296875}, {"start": 2844.69, "end": 2845.13, "word": " اللى", "probability": 0.9033203125}, {"start": 2845.13, "end": 2845.35, "word": " هو", "probability": 0.90869140625}, {"start": 2845.35, "end": 2845.95, "word": " أبوه", "probability": 0.921142578125}, {"start": 2845.95, "end": 2846.17, "word": " جاي", "probability": 0.79150390625}, {"start": 2846.17, "end": 2846.31, "word": " من", "probability": 0.9912109375}, {"start": 2846.31, "end": 2846.67, "word": " وين", "probability": 0.87890625}, {"start": 2846.67, "end": 2848.03, "word": " five", "probability": 0.366943359375}, {"start": 2848.03, "end": 2848.99, "word": " hydroxy", "probability": 0.3551025390625}, {"start": 2848.99, "end": 2849.75, "word": " tryptophan", "probability": 0.84130859375}, {"start": 2849.75, "end": 2851.33, "word": " five", "probability": 0.5283203125}, {"start": 2851.33, "end": 2851.93, "word": " HT", "probability": 0.609375}, {"start": 2851.93, "end": 2852.19, "word": " يعني", "probability": 0.927978515625}, {"start": 2852.19, "end": 2852.67, "word": " اختصار", "probability": 0.9571533203125}, {"start": 2852.67, "end": 2852.87, "word": " ده", "probability": 0.639404296875}, {"start": 2852.87, "end": 2852.95, "word": " هي", "probability": 0.347412109375}], "temperature": 1.0}, {"id": 104, "seek": 288255, "start": 2854.17, "end": 2882.55, "text": "5-hydroxy-treptofan طيب، على الـNormal Platelet جاله بتحتوي على 3 إلى 6 ماشي dense body كل واحد فيهم حجم 300 ميجرام ماشي 3 إلى 6", "tokens": [20, 12, 21591, 340, 12876, 12, 3599, 662, 2670, 282, 23032, 1829, 3555, 12399, 15844, 2423, 39184, 45, 24440, 17461, 15966, 10874, 6027, 3224, 39894, 33753, 45865, 15844, 805, 30731, 1386, 3714, 33599, 1829, 18011, 1772, 28242, 36764, 24401, 8978, 16095, 11331, 7435, 2304, 6641, 3714, 1829, 7435, 2288, 10943, 3714, 33599, 1829, 805, 30731, 1386], "avg_logprob": -0.3569078947368421, "compression_ratio": 1.2026143790849673, "no_speech_prob": 0.0, "words": [{"start": 2854.17, "end": 2854.61, "word": "5", "probability": 0.264892578125}, {"start": 2854.61, "end": 2855.31, "word": "-hydroxy", "probability": 0.8323974609375}, {"start": 2855.31, "end": 2857.07, "word": "-treptofan", "probability": 0.711474609375}, {"start": 2857.07, "end": 2867.15, "word": " طيب،", "probability": 0.597869873046875}, {"start": 2867.15, "end": 2867.33, "word": " على", "probability": 0.61181640625}, {"start": 2867.33, "end": 2867.75, "word": " الـNormal", "probability": 0.5791015625}, {"start": 2867.75, "end": 2868.33, "word": " Platelet", "probability": 0.6019287109375}, {"start": 2868.33, "end": 2868.65, "word": " جاله", "probability": 0.70947265625}, {"start": 2868.65, "end": 2869.21, "word": " بتحتوي", "probability": 0.9181315104166666}, {"start": 2869.21, "end": 2869.45, "word": " على", "probability": 0.8779296875}, {"start": 2869.45, "end": 2869.93, "word": " 3", "probability": 0.80908203125}, {"start": 2869.93, "end": 2870.23, "word": " إلى", "probability": 0.85595703125}, {"start": 2870.23, "end": 2870.83, "word": " 6", "probability": 0.99462890625}, {"start": 2870.83, "end": 2873.25, "word": " ماشي", "probability": 0.6954752604166666}, {"start": 2873.25, "end": 2876.23, "word": " dense", "probability": 0.3046875}, {"start": 2876.23, "end": 2876.65, "word": " body", "probability": 0.8310546875}, {"start": 2876.65, "end": 2877.75, "word": " كل", "probability": 0.8125}, {"start": 2877.75, "end": 2878.03, "word": " واحد", "probability": 0.994140625}, {"start": 2878.03, "end": 2878.35, "word": " فيهم", "probability": 0.983642578125}, {"start": 2878.35, "end": 2878.73, "word": " حجم", "probability": 0.9933268229166666}, {"start": 2878.73, "end": 2879.15, "word": " 300", "probability": 0.703125}, {"start": 2879.15, "end": 2879.93, "word": " ميجرام", "probability": 0.759765625}, {"start": 2879.93, "end": 2881.25, "word": " ماشي", "probability": 0.7963053385416666}, {"start": 2881.25, "end": 2881.51, "word": " 3", "probability": 0.71533203125}, {"start": 2881.51, "end": 2882.09, "word": " إلى", "probability": 0.9619140625}, {"start": 2882.09, "end": 2882.55, "word": " 6", "probability": 0.99658203125}], "temperature": 1.0}, {"id": 105, "seek": 290977, "start": 2883.35, "end": 2909.77, "text": "Dense body حجمهم 300 ميجرو And described in inherited disease لاجو هذا المرض is associated with inherited disease other inherited disease منها الهارموسكايب بودلاند سيندروم واسكوت ألدرخ سيندروم ماشي تشيديا كيجاشي سيندروم انترومبسايتروبينا واد أبسن ريدياس", "tokens": [35, 1288, 1772, 11331, 7435, 2304, 16095, 6641, 3714, 1829, 7435, 32887, 400, 7619, 294, 27091, 4752, 5296, 26108, 2407, 23758, 9673, 43042, 307, 6615, 365, 27091, 4752, 661, 27091, 4752, 9154, 11296, 2423, 3224, 9640, 2304, 41779, 4117, 995, 1829, 3555, 4724, 23328, 1211, 7649, 3215, 8608, 9957, 3215, 2288, 20498, 4032, 32277, 4117, 35473, 5551, 1211, 3215, 2288, 9778, 8608, 9957, 3215, 2288, 20498, 3714, 33599, 1829, 6055, 8592, 1829, 16254, 995, 9122, 1829, 7435, 33599, 1829, 8608, 9957, 3215, 2288, 20498, 16472, 2655, 2288, 20498, 3555, 3794, 995, 36081, 2288, 2407, 21292, 8315, 4032, 18513, 5551, 3555, 3794, 1863, 12602, 1829, 16254, 32277], "avg_logprob": -0.5292055996778969, "compression_ratio": 1.6936936936936937, "no_speech_prob": 0.0, "words": [{"start": 2883.35, "end": 2883.87, "word": "Dense", "probability": 0.48162841796875}, {"start": 2883.87, "end": 2884.31, "word": " body", "probability": 0.427490234375}, {"start": 2884.31, "end": 2886.81, "word": " حجمهم", "probability": 0.816650390625}, {"start": 2886.81, "end": 2888.09, "word": " 300", "probability": 0.884765625}, {"start": 2888.09, "end": 2889.17, "word": " ميجرو", "probability": 0.31390380859375}, {"start": 2889.17, "end": 2890.67, "word": " And", "probability": 0.1514892578125}, {"start": 2890.67, "end": 2891.13, "word": " described", "probability": 0.78759765625}, {"start": 2891.13, "end": 2891.35, "word": " in", "probability": 0.1510009765625}, {"start": 2891.35, "end": 2891.77, "word": " inherited", "probability": 0.9619140625}, {"start": 2891.77, "end": 2892.29, "word": " disease", "probability": 0.69140625}, {"start": 2892.29, "end": 2892.61, "word": " لاجو", "probability": 0.4050699869791667}, {"start": 2892.61, "end": 2892.79, "word": " هذا", "probability": 0.7861328125}, {"start": 2892.79, "end": 2893.11, "word": " المرض", "probability": 0.987060546875}, {"start": 2893.11, "end": 2893.29, "word": " is", "probability": 0.58935546875}, {"start": 2893.29, "end": 2893.89, "word": " associated", "probability": 0.9189453125}, {"start": 2893.89, "end": 2894.23, "word": " with", "probability": 0.884765625}, {"start": 2894.23, "end": 2894.95, "word": " inherited", "probability": 0.9111328125}, {"start": 2894.95, "end": 2895.47, "word": " disease", "probability": 0.76318359375}, {"start": 2895.47, "end": 2895.75, "word": " other", "probability": 0.193603515625}, {"start": 2895.75, "end": 2896.27, "word": " inherited", "probability": 0.97900390625}, {"start": 2896.27, "end": 2896.71, "word": " disease", "probability": 0.62646484375}, {"start": 2896.71, "end": 2898.11, "word": " منها", "probability": 0.879150390625}, {"start": 2898.11, "end": 2899.51, "word": " الهارموسكايب", "probability": 0.5818820529513888}, {"start": 2899.51, "end": 2900.81, "word": " بودلاند", "probability": 0.5531494140625}, {"start": 2900.81, "end": 2901.51, "word": " سيندروم", "probability": 0.67099609375}, {"start": 2901.51, "end": 2902.45, "word": " واسكوت", "probability": 0.6884765625}, {"start": 2902.45, "end": 2903.01, "word": " ألدرخ", "probability": 0.65576171875}, {"start": 2903.01, "end": 2903.97, "word": " سيندروم", "probability": 0.881396484375}, {"start": 2903.97, "end": 2904.61, "word": " ماشي", "probability": 0.682373046875}, {"start": 2904.61, "end": 2905.11, "word": " تشيديا", "probability": 0.775341796875}, {"start": 2905.11, "end": 2905.75, "word": " كيجاشي", "probability": 0.9158203125}, {"start": 2905.75, "end": 2906.43, "word": " سيندروم", "probability": 0.92763671875}, {"start": 2906.43, "end": 2907.99, "word": " انترومبسايتروبينا", "probability": 0.5543416341145834}, {"start": 2907.99, "end": 2908.21, "word": " واد", "probability": 0.19647216796875}, {"start": 2908.21, "end": 2908.61, "word": " أبسن", "probability": 0.8800048828125}, {"start": 2908.61, "end": 2909.77, "word": " ريدياس", "probability": 0.674560546875}], "temperature": 1.0}, {"id": 106, "seek": 293712, "start": 2911.48, "end": 2937.12, "text": "ماشي كلها عبارة عن syndrome يعني هو عبارة عن مرض موجود أزس صايم ماشي من خلال من خلال syndrome ونمر عليهم على السريع الويسكوت ألدريخ سندروم ماشي هو عبارة عن defect genetic defect X-linked genetic defect", "tokens": [2304, 33599, 1829, 28242, 11296, 6225, 3555, 9640, 3660, 18871, 19371, 37495, 22653, 31439, 6225, 3555, 9640, 3660, 18871, 3714, 43042, 3714, 29245, 23328, 5551, 11622, 3794, 20328, 995, 32640, 3714, 33599, 1829, 9154, 16490, 1211, 6027, 9154, 16490, 1211, 6027, 19371, 4032, 1863, 29973, 25894, 16095, 15844, 21136, 16572, 3615, 2423, 2407, 1829, 3794, 4117, 35473, 5551, 1211, 3215, 16572, 9778, 8608, 41260, 2288, 20498, 3714, 33599, 1829, 31439, 6225, 3555, 9640, 3660, 18871, 16445, 12462, 16445, 1783, 12, 22473, 292, 12462, 16445], "avg_logprob": -0.2617647086872774, "compression_ratio": 1.747191011235955, "no_speech_prob": 1.0728836059570312e-06, "words": [{"start": 2911.48, "end": 2911.98, "word": "ماشي", "probability": 0.6979166666666666}, {"start": 2911.98, "end": 2912.32, "word": " كلها", "probability": 0.7890625}, {"start": 2912.32, "end": 2912.52, "word": " عبارة", "probability": 0.9727783203125}, {"start": 2912.52, "end": 2912.64, "word": " عن", "probability": 0.998046875}, {"start": 2912.64, "end": 2913.1, "word": " syndrome", "probability": 0.5185546875}, {"start": 2913.1, "end": 2913.56, "word": " يعني", "probability": 0.86669921875}, {"start": 2913.56, "end": 2913.74, "word": " هو", "probability": 0.9453125}, {"start": 2913.74, "end": 2913.94, "word": " عبارة", "probability": 0.9688720703125}, {"start": 2913.94, "end": 2914.08, "word": " عن", "probability": 0.99365234375}, {"start": 2914.08, "end": 2914.3, "word": " مرض", "probability": 0.93701171875}, {"start": 2914.3, "end": 2915.0, "word": " موجود", "probability": 0.9586588541666666}, {"start": 2915.0, "end": 2915.88, "word": " أزس", "probability": 0.849609375}, {"start": 2915.88, "end": 2917.44, "word": " صايم", "probability": 0.76904296875}, {"start": 2917.44, "end": 2919.06, "word": " ماشي", "probability": 0.7686360677083334}, {"start": 2919.06, "end": 2919.92, "word": " من", "probability": 0.8427734375}, {"start": 2919.92, "end": 2920.42, "word": " خلال", "probability": 0.9886067708333334}, {"start": 2920.42, "end": 2920.62, "word": " من", "probability": 0.6943359375}, {"start": 2920.62, "end": 2921.02, "word": " خلال", "probability": 0.99169921875}, {"start": 2921.02, "end": 2921.5, "word": " syndrome", "probability": 0.72705078125}, {"start": 2921.5, "end": 2925.3, "word": " ونمر", "probability": 0.7548014322916666}, {"start": 2925.3, "end": 2925.6, "word": " عليهم", "probability": 0.6729736328125}, {"start": 2925.6, "end": 2925.72, "word": " على", "probability": 0.81689453125}, {"start": 2925.72, "end": 2926.0, "word": " السريع", "probability": 0.4662272135416667}, {"start": 2926.0, "end": 2926.48, "word": " الويسكوت", "probability": 0.6776936848958334}, {"start": 2926.48, "end": 2926.86, "word": " ألدريخ", "probability": 0.79794921875}, {"start": 2926.86, "end": 2927.46, "word": " سندروم", "probability": 0.554107666015625}, {"start": 2927.46, "end": 2930.22, "word": " ماشي", "probability": 0.7902018229166666}, {"start": 2930.22, "end": 2932.0, "word": " هو", "probability": 0.96337890625}, {"start": 2932.0, "end": 2932.66, "word": " عبارة", "probability": 0.9976806640625}, {"start": 2932.66, "end": 2933.02, "word": " عن", "probability": 0.9892578125}, {"start": 2933.02, "end": 2933.96, "word": " defect", "probability": 0.9736328125}, {"start": 2933.96, "end": 2934.72, "word": " genetic", "probability": 0.68896484375}, {"start": 2934.72, "end": 2935.5, "word": " defect", "probability": 0.98193359375}, {"start": 2935.5, "end": 2936.0, "word": " X", "probability": 0.57958984375}, {"start": 2936.0, "end": 2936.28, "word": "-linked", "probability": 0.6943359375}, {"start": 2936.28, "end": 2936.62, "word": " genetic", "probability": 0.85498046875}, {"start": 2936.62, "end": 2937.12, "word": " defect", "probability": 0.9833984375}], "temperature": 1.0}, {"id": 107, "seek": 297050, "start": 2942.9, "end": 2970.5, "text": "اللي بيصير فيه خلال في الويسكوت أدرخ syndrome protein وهو مسؤول عن ال acting cytoskeleton، formation in hemopoietic cell بيتميز بترمبو سايتوبينيا ووجود ال storage pool disease وفي إكزيمة وفي recurrent infection، يعني بحكي على سندروم، ماشي يعني أكتر من ظاهر، أكتر من مرض", "tokens": [6027, 20292, 4724, 1829, 9381, 13546, 8978, 3224, 16490, 1211, 6027, 8978, 2423, 2407, 1829, 3794, 4117, 35473, 5551, 3215, 2288, 9778, 19371, 7944, 37037, 2407, 47524, 33604, 12610, 18871, 2423, 6577, 40248, 329, 330, 14806, 12399, 11723, 294, 8636, 404, 78, 1684, 299, 2815, 4724, 36081, 2304, 1829, 11622, 39894, 2288, 2304, 3555, 2407, 8608, 995, 36081, 2407, 21292, 22653, 995, 4032, 29245, 23328, 2423, 6725, 7005, 4752, 4032, 41185, 11933, 4117, 11622, 32640, 3660, 4032, 41185, 18680, 1753, 11764, 12399, 37495, 22653, 4724, 5016, 4117, 1829, 15844, 8608, 41260, 2288, 20498, 12399, 3714, 33599, 1829, 37495, 22653, 5551, 4117, 2655, 2288, 9154, 1357, 116, 40294, 2288, 12399, 5551, 4117, 2655, 2288, 9154, 3714, 43042], "avg_logprob": -0.31704059523394984, "compression_ratio": 1.492537313432836, "no_speech_prob": 3.5762786865234375e-07, "words": [{"start": 2942.9, "end": 2943.26, "word": "اللي", "probability": 0.919677734375}, {"start": 2943.26, "end": 2943.52, "word": " بيصير", "probability": 0.9659423828125}, {"start": 2943.52, "end": 2943.82, "word": " فيه", "probability": 0.955078125}, {"start": 2943.82, "end": 2944.16, "word": " خلال", "probability": 0.9617513020833334}, {"start": 2944.16, "end": 2944.4, "word": " في", "probability": 0.91845703125}, {"start": 2944.4, "end": 2944.94, "word": " الويسكوت", "probability": 0.684326171875}, {"start": 2944.94, "end": 2945.46, "word": " أدرخ", "probability": 0.80810546875}, {"start": 2945.46, "end": 2947.28, "word": " syndrome", "probability": 0.396728515625}, {"start": 2947.28, "end": 2948.22, "word": " protein", "probability": 0.6904296875}, {"start": 2948.22, "end": 2950.42, "word": " وهو", "probability": 0.86669921875}, {"start": 2950.42, "end": 2950.78, "word": " مسؤول", "probability": 0.8292643229166666}, {"start": 2950.78, "end": 2951.14, "word": " عن", "probability": 0.98974609375}, {"start": 2951.14, "end": 2951.46, "word": " ال", "probability": 0.94873046875}, {"start": 2951.46, "end": 2952.08, "word": " acting", "probability": 0.278564453125}, {"start": 2952.08, "end": 2954.12, "word": " cytoskeleton،", "probability": 0.74580078125}, {"start": 2954.12, "end": 2954.58, "word": " formation", "probability": 0.552734375}, {"start": 2954.58, "end": 2954.88, "word": " in", "probability": 0.94873046875}, {"start": 2954.88, "end": 2955.56, "word": " hemopoietic", "probability": 0.6616455078125}, {"start": 2955.56, "end": 2955.86, "word": " cell", "probability": 0.50634765625}, {"start": 2955.86, "end": 2958.82, "word": " بيتميز", "probability": 0.8841796875}, {"start": 2958.82, "end": 2959.44, "word": " بترمبو", "probability": 0.5651123046875}, {"start": 2959.44, "end": 2960.52, "word": " سايتوبينيا", "probability": 0.7482212611607143}, {"start": 2960.52, "end": 2961.28, "word": " ووجود", "probability": 0.85693359375}, {"start": 2961.28, "end": 2961.48, "word": " ال", "probability": 0.373291015625}, {"start": 2961.48, "end": 2962.3, "word": " storage", "probability": 0.865234375}, {"start": 2962.3, "end": 2962.6, "word": " pool", "probability": 0.81689453125}, {"start": 2962.6, "end": 2963.3, "word": " disease", "probability": 0.92333984375}, {"start": 2963.3, "end": 2964.02, "word": " وفي", "probability": 0.767822265625}, {"start": 2964.02, "end": 2964.7, "word": " إكزيمة", "probability": 0.616650390625}, {"start": 2964.7, "end": 2965.0, "word": " وفي", "probability": 0.86572265625}, {"start": 2965.0, "end": 2965.46, "word": " recurrent", "probability": 0.876220703125}, {"start": 2965.46, "end": 2966.26, "word": " infection،", "probability": 0.620849609375}, {"start": 2966.26, "end": 2966.4, "word": " يعني", "probability": 0.770751953125}, {"start": 2966.4, "end": 2966.7, "word": " بحكي", "probability": 0.9356689453125}, {"start": 2966.7, "end": 2966.84, "word": " على", "probability": 0.8037109375}, {"start": 2966.84, "end": 2968.14, "word": " سندروم،", "probability": 0.5412109375}, {"start": 2968.14, "end": 2968.56, "word": " ماشي", "probability": 0.7196451822916666}, {"start": 2968.56, "end": 2968.74, "word": " يعني", "probability": 0.7666015625}, {"start": 2968.74, "end": 2969.12, "word": " أكتر", "probability": 0.966552734375}, {"start": 2969.12, "end": 2969.28, "word": " من", "probability": 0.99462890625}, {"start": 2969.28, "end": 2969.8, "word": " ظاهر،", "probability": 0.84453125}, {"start": 2969.8, "end": 2970.1, "word": " أكتر", "probability": 0.9766845703125}, {"start": 2970.1, "end": 2970.26, "word": " من", "probability": 0.9951171875}, {"start": 2970.26, "end": 2970.5, "word": " مرض", "probability": 0.75830078125}], "temperature": 1.0}, {"id": 108, "seek": 300056, "start": 2971.5, "end": 3000.56, "text": "ماشى thrombocytopenia ثم اللى هو platelet storage disease ثم eczema ثم infection عارفينهم كلها ال eczema اللى عبارة عن ايه الصدافية خلف الصدافية ماشى عيبا شايفين يا شباب شايفين ال eczema اللى موجودة", "tokens": [2304, 33599, 7578, 739, 3548, 905, 4328, 15752, 654, 38637, 2304, 13672, 7578, 31439, 3403, 15966, 6725, 4752, 38637, 2304, 308, 3689, 5619, 38637, 2304, 11764, 6225, 9640, 5172, 9957, 16095, 28242, 11296, 2423, 308, 3689, 5619, 13672, 7578, 6225, 3555, 9640, 3660, 18871, 1975, 1829, 3224, 31767, 28259, 5172, 10632, 16490, 46538, 31767, 28259, 5172, 10632, 3714, 33599, 7578, 6225, 1829, 3555, 995, 13412, 995, 33911, 9957, 35186, 13412, 3555, 16758, 13412, 995, 33911, 9957, 2423, 308, 3689, 5619, 13672, 7578, 3714, 29245, 23328, 3660], "avg_logprob": -0.23886494595428992, "compression_ratio": 1.6797752808988764, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 2971.5, "end": 2971.98, "word": "ماشى", "probability": 0.6959635416666666}, {"start": 2971.98, "end": 2973.28, "word": " thrombocytopenia", "probability": 0.8198445638020834}, {"start": 2973.28, "end": 2974.46, "word": " ثم", "probability": 0.92431640625}, {"start": 2974.46, "end": 2975.08, "word": " اللى", "probability": 0.941650390625}, {"start": 2975.08, "end": 2975.38, "word": " هو", "probability": 0.87060546875}, {"start": 2975.38, "end": 2976.24, "word": " platelet", "probability": 0.53704833984375}, {"start": 2976.24, "end": 2976.94, "word": " storage", "probability": 0.826171875}, {"start": 2976.94, "end": 2978.46, "word": " disease", "probability": 0.654296875}, {"start": 2978.46, "end": 2979.52, "word": " ثم", "probability": 0.91357421875}, {"start": 2979.52, "end": 2980.2, "word": " eczema", "probability": 0.7151692708333334}, {"start": 2980.2, "end": 2981.9, "word": " ثم", "probability": 0.986572265625}, {"start": 2981.9, "end": 2982.58, "word": " infection", "probability": 0.81689453125}, {"start": 2982.58, "end": 2983.74, "word": " عارفينهم", "probability": 0.93857421875}, {"start": 2983.74, "end": 2984.22, "word": " كلها", "probability": 0.6473388671875}, {"start": 2984.22, "end": 2985.38, "word": " ال", "probability": 0.43505859375}, {"start": 2985.38, "end": 2985.86, "word": " eczema", "probability": 0.8621419270833334}, {"start": 2985.86, "end": 2986.62, "word": " اللى", "probability": 0.83837890625}, {"start": 2986.62, "end": 2986.88, "word": " عبارة", "probability": 0.9556884765625}, {"start": 2986.88, "end": 2987.04, "word": " عن", "probability": 0.990234375}, {"start": 2987.04, "end": 2987.28, "word": " ايه", "probability": 0.8499348958333334}, {"start": 2987.28, "end": 2990.36, "word": " الصدافية", "probability": 0.69146728515625}, {"start": 2990.36, "end": 2991.64, "word": " خلف", "probability": 0.51495361328125}, {"start": 2991.64, "end": 2992.8, "word": " الصدافية", "probability": 0.98583984375}, {"start": 2992.8, "end": 2993.9, "word": " ماشى", "probability": 0.7692057291666666}, {"start": 2993.9, "end": 2995.5, "word": " عيبا", "probability": 0.67962646484375}, {"start": 2995.5, "end": 2998.96, "word": " شايفين", "probability": 0.8541259765625}, {"start": 2998.96, "end": 2999.08, "word": " يا", "probability": 0.72119140625}, {"start": 2999.08, "end": 2999.28, "word": " شباب", "probability": 0.9931640625}, {"start": 2999.28, "end": 2999.56, "word": " شايفين", "probability": 0.961181640625}, {"start": 2999.56, "end": 2999.66, "word": " ال", "probability": 0.9697265625}, {"start": 2999.66, "end": 2999.92, "word": " eczema", "probability": 0.9493815104166666}, {"start": 2999.92, "end": 3000.06, "word": " اللى", "probability": 0.98876953125}, {"start": 3000.06, "end": 3000.56, "word": " موجودة", "probability": 0.98583984375}], "temperature": 1.0}, {"id": 109, "seek": 302692, "start": 3001.96, "end": 3026.92, "text": "انتوا واضحة الصورة تماما ولا ده؟ باقى واضح .. باقى شايفين ال eczema اللى جاى للطفل هذا مسكين في رجليه في أجهزة رجليه؟ ومرد جلدي، ماشي؟ ال harness-kye-bottling syndrome برضه حكوا عنه في التسعة و خمسين، الفتس عمان وتسعة و خمسين وواصل تطب العلماء التانين هدول", "tokens": [7649, 2655, 14407, 4032, 46958, 5016, 3660, 31767, 13063, 3660, 46811, 10943, 995, 49429, 11778, 3224, 22807, 4724, 995, 4587, 7578, 4032, 46958, 5016, 4386, 4724, 995, 4587, 7578, 13412, 995, 33911, 9957, 2423, 308, 3689, 5619, 13672, 7578, 10874, 995, 7578, 24976, 9566, 5172, 1211, 23758, 47524, 4117, 9957, 8978, 12602, 7435, 20292, 3224, 8978, 5551, 7435, 3224, 11622, 3660, 12602, 7435, 20292, 3224, 22807, 4032, 29973, 3215, 10874, 1211, 16254, 12399, 3714, 33599, 1829, 22807, 2423, 19700, 12, 74, 1200, 12, 65, 1521, 1688, 19371, 4724, 43042, 3224, 11331, 4117, 14407, 18871, 3224, 8978, 16712, 3794, 27884, 4032, 16490, 2304, 3794, 9957, 12399, 27188, 2655, 3794, 6225, 2304, 7649, 34683, 3794, 27884, 4032, 16490, 2304, 3794, 9957, 4032, 2407, 33546, 1211, 6055, 9566, 3555, 18863, 19528, 16606, 16712, 7649, 9957, 8032, 3215, 12610], "avg_logprob": -0.3692555142676129, "compression_ratio": 1.664092664092664, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 3001.96, "end": 3002.4, "word": "انتوا", "probability": 0.7395833333333334}, {"start": 3002.4, "end": 3002.86, "word": " واضحة", "probability": 0.61944580078125}, {"start": 3002.86, "end": 3003.22, "word": " الصورة", "probability": 0.98046875}, {"start": 3003.22, "end": 3003.66, "word": " تماما", "probability": 0.99072265625}, {"start": 3003.66, "end": 3003.86, "word": " ولا", "probability": 0.20947265625}, {"start": 3003.86, "end": 3005.08, "word": " ده؟", "probability": 0.6576334635416666}, {"start": 3005.08, "end": 3005.36, "word": " باقى", "probability": 0.57489013671875}, {"start": 3005.36, "end": 3005.7, "word": " واضح", "probability": 0.7706705729166666}, {"start": 3005.7, "end": 3005.74, "word": " ..", "probability": 0.174560546875}, {"start": 3005.74, "end": 3006.16, "word": " باقى", "probability": 0.908203125}, {"start": 3006.16, "end": 3007.06, "word": " شايفين", "probability": 0.92431640625}, {"start": 3007.06, "end": 3007.16, "word": " ال", "probability": 0.86083984375}, {"start": 3007.16, "end": 3007.38, "word": " eczema", "probability": 0.7020263671875}, {"start": 3007.38, "end": 3007.54, "word": " اللى", "probability": 0.842041015625}, {"start": 3007.54, "end": 3007.76, "word": " جاى", "probability": 0.7552083333333334}, {"start": 3007.76, "end": 3008.18, "word": " للطفل", "probability": 0.9727783203125}, {"start": 3008.18, "end": 3008.38, "word": " هذا", "probability": 0.6640625}, {"start": 3008.38, "end": 3008.7, "word": " مسكين", "probability": 0.9619140625}, {"start": 3008.7, "end": 3008.84, "word": " في", "probability": 0.7470703125}, {"start": 3008.84, "end": 3009.26, "word": " رجليه", "probability": 0.8927001953125}, {"start": 3009.26, "end": 3009.36, "word": " في", "probability": 0.41650390625}, {"start": 3009.36, "end": 3009.66, "word": " أجهزة", "probability": 0.52265625}, {"start": 3009.66, "end": 3011.18, "word": " رجليه؟", "probability": 0.79189453125}, {"start": 3011.18, "end": 3011.48, "word": " ومرد", "probability": 0.7229817708333334}, {"start": 3011.48, "end": 3012.5, "word": " جلدي،", "probability": 0.760284423828125}, {"start": 3012.5, "end": 3017.0, "word": " ماشي؟", "probability": 0.659210205078125}, {"start": 3017.0, "end": 3017.24, "word": " ال", "probability": 0.9111328125}, {"start": 3017.24, "end": 3017.58, "word": " harness", "probability": 0.156005859375}, {"start": 3017.58, "end": 3017.96, "word": "-kye", "probability": 0.386474609375}, {"start": 3017.96, "end": 3018.48, "word": "-bottling", "probability": 0.64398193359375}, {"start": 3018.48, "end": 3019.22, "word": " syndrome", "probability": 0.82421875}, {"start": 3019.22, "end": 3020.62, "word": " برضه", "probability": 0.9593098958333334}, {"start": 3020.62, "end": 3021.44, "word": " حكوا", "probability": 0.8190104166666666}, {"start": 3021.44, "end": 3021.8, "word": " عنه", "probability": 0.99462890625}, {"start": 3021.8, "end": 3022.0, "word": " في", "probability": 0.83447265625}, {"start": 3022.0, "end": 3022.48, "word": " التسعة", "probability": 0.6354166666666666}, {"start": 3022.48, "end": 3022.58, "word": " و", "probability": 0.982421875}, {"start": 3022.58, "end": 3023.26, "word": " خمسين،", "probability": 0.78408203125}, {"start": 3023.26, "end": 3023.48, "word": " الفتس", "probability": 0.59326171875}, {"start": 3023.48, "end": 3023.66, "word": " عمان", "probability": 0.6215006510416666}, {"start": 3023.66, "end": 3023.9, "word": " وتسعة", "probability": 0.7431640625}, {"start": 3023.9, "end": 3023.96, "word": " و", "probability": 0.990234375}, {"start": 3023.96, "end": 3024.46, "word": " خمسين", "probability": 0.97412109375}, {"start": 3024.46, "end": 3025.64, "word": " وواصل", "probability": 0.50885009765625}, {"start": 3025.64, "end": 3025.86, "word": " تطب", "probability": 0.6599934895833334}, {"start": 3025.86, "end": 3026.22, "word": " العلماء", "probability": 0.9674479166666666}, {"start": 3026.22, "end": 3026.56, "word": " التانين", "probability": 0.91259765625}, {"start": 3026.56, "end": 3026.92, "word": " هدول", "probability": 0.9407552083333334}], "temperature": 1.0}, {"id": 110, "seek": 305692, "start": 3028.34, "end": 3056.92, "text": "هو عبارة عن تيروزينيز positive oculocutaneous disease Albinism oculocutaneous albinism ماعرفينش يعني albinism تعرفوا مرض العشاء الليلي انت راجعيهاش دول الناس اللي .. اللي ..", "tokens": [3224, 2407, 6225, 3555, 9640, 3660, 18871, 6055, 13546, 2407, 11622, 9957, 1829, 11622, 3353, 277, 2444, 905, 325, 15447, 4752, 967, 13496, 1434, 277, 2444, 905, 325, 15447, 419, 13496, 1434, 19446, 3615, 28480, 9957, 8592, 37495, 22653, 419, 13496, 1434, 37279, 28480, 14407, 3714, 43042, 18863, 8592, 16606, 13672, 1829, 20292, 16472, 2655, 12602, 26108, 3615, 1829, 3224, 33599, 11778, 12610, 2423, 8315, 3794, 13672, 1829, 4386, 13672, 1829, 4386], "avg_logprob": -0.3341181474189236, "compression_ratio": 1.5029940119760479, "no_speech_prob": 0.0, "words": [{"start": 3028.34, "end": 3028.6, "word": "هو", "probability": 0.891845703125}, {"start": 3028.6, "end": 3028.86, "word": " عبارة", "probability": 0.871826171875}, {"start": 3028.86, "end": 3028.98, "word": " عن", "probability": 0.99658203125}, {"start": 3028.98, "end": 3030.98, "word": " تيروزينيز", "probability": 0.7506975446428571}, {"start": 3030.98, "end": 3032.56, "word": " positive", "probability": 0.5419921875}, {"start": 3032.56, "end": 3035.28, "word": " oculocutaneous", "probability": 0.571875}, {"start": 3035.28, "end": 3036.38, "word": " disease", "probability": 0.931640625}, {"start": 3036.38, "end": 3038.06, "word": " Albinism", "probability": 0.6420491536458334}, {"start": 3038.06, "end": 3039.28, "word": " oculocutaneous", "probability": 0.7974609375}, {"start": 3039.28, "end": 3040.08, "word": " albinism", "probability": 0.9202473958333334}, {"start": 3040.08, "end": 3040.68, "word": " ماعرفينش", "probability": 0.7778076171875}, {"start": 3040.68, "end": 3040.88, "word": " يعني", "probability": 0.899169921875}, {"start": 3040.88, "end": 3041.6, "word": " albinism", "probability": 0.89794921875}, {"start": 3041.6, "end": 3044.46, "word": " تعرفوا", "probability": 0.735107421875}, {"start": 3044.46, "end": 3044.88, "word": " مرض", "probability": 0.83642578125}, {"start": 3044.88, "end": 3048.04, "word": " العشاء", "probability": 0.8704427083333334}, {"start": 3048.04, "end": 3048.62, "word": " الليلي", "probability": 0.9140625}, {"start": 3048.62, "end": 3049.98, "word": " انت", "probability": 0.579833984375}, {"start": 3049.98, "end": 3050.7, "word": " راجعيهاش", "probability": 0.657470703125}, {"start": 3050.7, "end": 3053.5, "word": " دول", "probability": 0.6446533203125}, {"start": 3053.5, "end": 3053.82, "word": " الناس", "probability": 0.9905598958333334}, {"start": 3053.82, "end": 3054.86, "word": " اللي", "probability": 0.93408203125}, {"start": 3054.86, "end": 3054.86, "word": " ..", "probability": 0.5263671875}, {"start": 3054.86, "end": 3055.62, "word": " اللي", "probability": 0.9267578125}, {"start": 3055.62, "end": 3056.92, "word": " ..", "probability": 0.85693359375}], "temperature": 1.0}, {"id": 111, "seek": 308439, "start": 3058.39, "end": 3084.39, "text": "الأبرز بالعربي بروس تلاقي أشجار كاملة و بيكون في الليل بيمشي في صورة .. في صورة هالجيت بولجي كتير مشهور في Puerto Rico ماشي و it's associated with pulmonary fibrosis and carotid infection", "tokens": [6027, 10721, 26890, 11622, 20666, 3615, 2288, 21292, 4724, 32887, 3794, 6055, 15040, 38436, 5551, 8592, 7435, 9640, 9122, 10943, 37977, 4032, 4724, 1829, 30544, 8978, 13672, 26895, 4724, 32640, 8592, 1829, 8978, 20328, 13063, 3660, 4386, 8978, 20328, 13063, 3660, 8032, 6027, 7435, 36081, 4724, 12610, 7435, 1829, 9122, 2655, 13546, 37893, 3224, 13063, 8978, 21472, 22643, 3714, 33599, 1829, 4032, 309, 311, 6615, 365, 8331, 46386, 13116, 2635, 271, 293, 1032, 310, 327, 11764], "avg_logprob": -0.32163148731380314, "compression_ratio": 1.4450261780104712, "no_speech_prob": 0.0, "words": [{"start": 3058.39, "end": 3059.11, "word": "الأبرز", "probability": 0.8822021484375}, {"start": 3059.11, "end": 3060.29, "word": " بالعربي", "probability": 0.68878173828125}, {"start": 3060.29, "end": 3062.21, "word": " بروس", "probability": 0.672119140625}, {"start": 3062.21, "end": 3063.41, "word": " تلاقي", "probability": 0.7233072916666666}, {"start": 3063.41, "end": 3063.99, "word": " أشجار", "probability": 0.79693603515625}, {"start": 3063.99, "end": 3064.57, "word": " كاملة", "probability": 0.9772135416666666}, {"start": 3064.57, "end": 3065.69, "word": " و", "probability": 0.69775390625}, {"start": 3065.69, "end": 3066.01, "word": " بيكون", "probability": 0.8238932291666666}, {"start": 3066.01, "end": 3066.17, "word": " في", "probability": 0.93408203125}, {"start": 3066.17, "end": 3066.45, "word": " الليل", "probability": 0.960205078125}, {"start": 3066.45, "end": 3066.93, "word": " بيمشي", "probability": 0.9581298828125}, {"start": 3066.93, "end": 3067.07, "word": " في", "probability": 0.6796875}, {"start": 3067.07, "end": 3067.49, "word": " صورة", "probability": 0.9482421875}, {"start": 3067.49, "end": 3067.55, "word": " ..", "probability": 0.2435302734375}, {"start": 3067.55, "end": 3067.71, "word": " في", "probability": 0.78759765625}, {"start": 3067.71, "end": 3067.97, "word": " صورة", "probability": 0.9733072916666666}, {"start": 3067.97, "end": 3068.17, "word": " هالجيت", "probability": 0.42181396484375}, {"start": 3068.17, "end": 3068.47, "word": " بولجي", "probability": 0.67657470703125}, {"start": 3068.47, "end": 3069.63, "word": " كتير", "probability": 0.5049641927083334}, {"start": 3069.63, "end": 3069.95, "word": " مشهور", "probability": 0.9401041666666666}, {"start": 3069.95, "end": 3070.11, "word": " في", "probability": 0.98291015625}, {"start": 3070.11, "end": 3070.43, "word": " Puerto", "probability": 0.88232421875}, {"start": 3070.43, "end": 3070.85, "word": " Rico", "probability": 0.83447265625}, {"start": 3070.85, "end": 3072.91, "word": " ماشي", "probability": 0.6904703776041666}, {"start": 3072.91, "end": 3074.83, "word": " و", "probability": 0.52294921875}, {"start": 3074.83, "end": 3079.93, "word": " it's", "probability": 0.744140625}, {"start": 3079.93, "end": 3080.43, "word": " associated", "probability": 0.908203125}, {"start": 3080.43, "end": 3080.73, "word": " with", "probability": 0.912109375}, {"start": 3080.73, "end": 3081.27, "word": " pulmonary", "probability": 0.890380859375}, {"start": 3081.27, "end": 3082.61, "word": " fibrosis", "probability": 0.9503580729166666}, {"start": 3082.61, "end": 3083.45, "word": " and", "probability": 0.85791015625}, {"start": 3083.45, "end": 3083.97, "word": " carotid", "probability": 0.7126057942708334}, {"start": 3083.97, "end": 3084.39, "word": " infection", "probability": 0.73779296875}], "temperature": 1.0}, {"id": 112, "seek": 311672, "start": 3091.7, "end": 3116.72, "text": "و في نزيف شايفين الصورة؟ واضحة اه؟ واضحة ولا لا؟ هذه نفس الحكاية شايفين ال alpinism واضح تماما تخيلوا هذا .. هذا الصورة طبعا في ال net تخيلوا هذا المريض المسكين هايل عينتين تبعونه كل واحدة بلون", "tokens": [2407, 8978, 8717, 11622, 33911, 13412, 995, 33911, 9957, 31767, 13063, 3660, 22807, 4032, 46958, 5016, 3660, 1975, 3224, 22807, 4032, 46958, 5016, 3660, 49429, 20193, 22807, 29538, 8717, 36178, 21542, 4117, 995, 10632, 13412, 995, 33911, 9957, 2423, 419, 17836, 1434, 4032, 46958, 5016, 46811, 10943, 995, 6055, 9778, 26895, 14407, 23758, 4386, 23758, 31767, 13063, 3660, 23032, 3555, 3615, 995, 8978, 2423, 2533, 6055, 9778, 26895, 14407, 23758, 9673, 16572, 11242, 9673, 3794, 4117, 9957, 8032, 995, 26895, 6225, 9957, 2655, 9957, 6055, 3555, 3615, 11536, 3224, 28242, 36764, 24401, 3660, 4724, 1211, 11536], "avg_logprob": -0.32989691704818885, "compression_ratio": 1.8118279569892473, "no_speech_prob": 4.172325134277344e-07, "words": [{"start": 3091.7, "end": 3092.62, "word": "و", "probability": 0.053192138671875}, {"start": 3092.62, "end": 3093.54, "word": " في", "probability": 0.2587890625}, {"start": 3093.54, "end": 3094.18, "word": " نزيف", "probability": 0.8790690104166666}, {"start": 3094.18, "end": 3094.68, "word": " شايفين", "probability": 0.80780029296875}, {"start": 3094.68, "end": 3096.48, "word": " الصورة؟", "probability": 0.8287353515625}, {"start": 3096.48, "end": 3097.22, "word": " واضحة", "probability": 0.8875732421875}, {"start": 3097.22, "end": 3098.64, "word": " اه؟", "probability": 0.65673828125}, {"start": 3098.64, "end": 3099.6, "word": " واضحة", "probability": 0.97802734375}, {"start": 3099.6, "end": 3099.72, "word": " ولا", "probability": 0.86083984375}, {"start": 3099.72, "end": 3101.06, "word": " لا؟", "probability": 0.737548828125}, {"start": 3101.06, "end": 3101.46, "word": " هذه", "probability": 0.343505859375}, {"start": 3101.46, "end": 3101.7, "word": " نفس", "probability": 0.9970703125}, {"start": 3101.7, "end": 3102.2, "word": " الحكاية", "probability": 0.8834228515625}, {"start": 3102.2, "end": 3104.06, "word": " شايفين", "probability": 0.8883056640625}, {"start": 3104.06, "end": 3104.18, "word": " ال", "probability": 0.2425537109375}, {"start": 3104.18, "end": 3104.72, "word": " alpinism", "probability": 0.7067057291666666}, {"start": 3104.72, "end": 3105.12, "word": " واضح", "probability": 0.8544921875}, {"start": 3105.12, "end": 3107.16, "word": " تماما", "probability": 0.99560546875}, {"start": 3107.16, "end": 3108.8, "word": " تخيلوا", "probability": 0.8687744140625}, {"start": 3108.8, "end": 3109.06, "word": " هذا", "probability": 0.71875}, {"start": 3109.06, "end": 3109.36, "word": " ..", "probability": 0.174560546875}, {"start": 3109.36, "end": 3109.78, "word": " هذا", "probability": 0.419189453125}, {"start": 3109.78, "end": 3110.12, "word": " الصورة", "probability": 0.91064453125}, {"start": 3110.12, "end": 3110.3, "word": " طبعا", "probability": 0.9486083984375}, {"start": 3110.3, "end": 3110.42, "word": " في", "probability": 0.86279296875}, {"start": 3110.42, "end": 3110.5, "word": " ال", "probability": 0.9287109375}, {"start": 3110.5, "end": 3110.66, "word": " net", "probability": 0.296142578125}, {"start": 3110.66, "end": 3112.38, "word": " تخيلوا", "probability": 0.80712890625}, {"start": 3112.38, "end": 3112.56, "word": " هذا", "probability": 0.8623046875}, {"start": 3112.56, "end": 3113.98, "word": " المريض", "probability": 0.9226888020833334}, {"start": 3113.98, "end": 3114.44, "word": " المسكين", "probability": 0.8465576171875}, {"start": 3114.44, "end": 3114.86, "word": " هايل", "probability": 0.2930094401041667}, {"start": 3114.86, "end": 3115.52, "word": " عينتين", "probability": 0.814697265625}, {"start": 3115.52, "end": 3116.08, "word": " تبعونه", "probability": 0.78193359375}, {"start": 3116.08, "end": 3116.2, "word": " كل", "probability": 0.5908203125}, {"start": 3116.2, "end": 3116.4, "word": " واحدة", "probability": 0.9754231770833334}, {"start": 3116.4, "end": 3116.72, "word": " بلون", "probability": 0.6514485677083334}], "temperature": 1.0}, {"id": 113, "seek": 314232, "start": 3117.74, "end": 3142.32, "text": "صح؟ اتظهرت؟ طبعا هذه انسة الموضوع يشوف فيها، هي alpinism مواضع، في الليل مشوفش فيها، في الليل مشوفش فيها، بيصير عشه الليلي عنده، ودويش، و بعدين العشه الليلي مش بس مشوفش لإن هو مشوفش لإن هو كمان مابقدرش يتحمل دخول كمية كبيرة من الضوء إلى العيلة، بتلاقي دايما وقام مدانيه بترجعه", "tokens": [9381, 5016, 22807, 1975, 2655, 19913, 3224, 43500, 22807, 23032, 3555, 3615, 995, 29538, 16472, 3794, 3660, 9673, 2407, 11242, 45367, 7251, 8592, 38688, 8978, 11296, 12399, 39896, 419, 17836, 1434, 3714, 2407, 46958, 3615, 12399, 8978, 13672, 26895, 37893, 38688, 8592, 8978, 11296, 12399, 8978, 13672, 26895, 37893, 38688, 8592, 8978, 11296, 12399, 4724, 1829, 9381, 13546, 6225, 8592, 3224, 13672, 1829, 20292, 43242, 3224, 12399, 4032, 3215, 45865, 8592, 12399, 4032, 45030, 16254, 1863, 18863, 8592, 3224, 13672, 1829, 20292, 37893, 4724, 3794, 37893, 38688, 8592, 5296, 28814, 1863, 31439, 37893, 38688, 8592, 5296, 28814, 1863, 31439, 9122, 2304, 7649, 3714, 16758, 28543, 2288, 8592, 7251, 2655, 35571, 1211, 11778, 9778, 12610, 9122, 2304, 10632, 9122, 3555, 48923, 9154, 6024, 114, 2407, 38207, 30731, 18863, 26895, 3660, 12399, 39894, 15040, 38436, 11778, 47302, 15042, 4032, 4587, 10943, 3714, 3215, 7649, 1829, 3224, 39894, 47341, 3615, 3224], "avg_logprob": -0.30306208213703745, "compression_ratio": 1.9839357429718876, "no_speech_prob": 3.5762786865234375e-07, "words": [{"start": 3117.74, "end": 3118.5, "word": "صح؟", "probability": 0.72119140625}, {"start": 3118.5, "end": 3119.2, "word": " اتظهرت؟", "probability": 0.601837158203125}, {"start": 3119.2, "end": 3120.78, "word": " طبعا", "probability": 0.8807373046875}, {"start": 3120.78, "end": 3121.0, "word": " هذه", "probability": 0.403564453125}, {"start": 3121.0, "end": 3121.48, "word": " انسة", "probability": 0.5769856770833334}, {"start": 3121.48, "end": 3121.78, "word": " الموضوع", "probability": 0.984619140625}, {"start": 3121.78, "end": 3122.1, "word": " يشوف", "probability": 0.9591471354166666}, {"start": 3122.1, "end": 3122.68, "word": " فيها،", "probability": 0.8751627604166666}, {"start": 3122.68, "end": 3122.84, "word": " هي", "probability": 0.642578125}, {"start": 3122.84, "end": 3123.3, "word": " alpinism", "probability": 0.5923665364583334}, {"start": 3123.3, "end": 3124.84, "word": " مواضع،", "probability": 0.642578125}, {"start": 3124.84, "end": 3125.12, "word": " في", "probability": 0.81787109375}, {"start": 3125.12, "end": 3125.44, "word": " الليل", "probability": 0.974609375}, {"start": 3125.44, "end": 3126.22, "word": " مشوفش", "probability": 0.6720377604166666}, {"start": 3126.22, "end": 3127.06, "word": " فيها،", "probability": 0.9202473958333334}, {"start": 3127.06, "end": 3127.16, "word": " في", "probability": 0.93505859375}, {"start": 3127.16, "end": 3127.38, "word": " الليل", "probability": 0.958984375}, {"start": 3127.38, "end": 3127.82, "word": " مشوفش", "probability": 0.890625}, {"start": 3127.82, "end": 3128.94, "word": " فيها،", "probability": 0.9140625}, {"start": 3128.94, "end": 3129.16, "word": " بيصير", "probability": 0.80322265625}, {"start": 3129.16, "end": 3129.46, "word": " عشه", "probability": 0.8419596354166666}, {"start": 3129.46, "end": 3129.78, "word": " الليلي", "probability": 0.6971842447916666}, {"start": 3129.78, "end": 3130.46, "word": " عنده،", "probability": 0.7013346354166666}, {"start": 3130.46, "end": 3131.8, "word": " ودويش،", "probability": 0.4984375}, {"start": 3131.8, "end": 3131.9, "word": " و", "probability": 0.88330078125}, {"start": 3131.9, "end": 3132.24, "word": " بعدين", "probability": 0.80810546875}, {"start": 3132.24, "end": 3132.62, "word": " العشه", "probability": 0.8772786458333334}, {"start": 3132.62, "end": 3132.92, "word": " الليلي", "probability": 0.96240234375}, {"start": 3132.92, "end": 3133.14, "word": " مش", "probability": 0.9541015625}, {"start": 3133.14, "end": 3133.36, "word": " بس", "probability": 0.7412109375}, {"start": 3133.36, "end": 3133.86, "word": " مشوفش", "probability": 0.8640950520833334}, {"start": 3133.86, "end": 3134.24, "word": " لإن", "probability": 0.8400065104166666}, {"start": 3134.24, "end": 3134.32, "word": " هو", "probability": 0.6708984375}, {"start": 3134.32, "end": 3134.86, "word": " مشوفش", "probability": 0.9230143229166666}, {"start": 3134.86, "end": 3135.42, "word": " لإن", "probability": 0.8582356770833334}, {"start": 3135.42, "end": 3135.5, "word": " هو", "probability": 0.69482421875}, {"start": 3135.5, "end": 3135.74, "word": " كمان", "probability": 0.9685872395833334}, {"start": 3135.74, "end": 3136.14, "word": " مابقدرش", "probability": 0.852880859375}, {"start": 3136.14, "end": 3136.9, "word": " يتحمل", "probability": 0.99267578125}, {"start": 3136.9, "end": 3137.44, "word": " دخول", "probability": 0.99365234375}, {"start": 3137.44, "end": 3137.8, "word": " كمية", "probability": 0.9152018229166666}, {"start": 3137.8, "end": 3138.2, "word": " كبيرة", "probability": 0.9794921875}, {"start": 3138.2, "end": 3138.4, "word": " من", "probability": 0.99169921875}, {"start": 3138.4, "end": 3138.78, "word": " الضوء", "probability": 0.8253173828125}, {"start": 3138.78, "end": 3139.1, "word": " إلى", "probability": 0.87255859375}, {"start": 3139.1, "end": 3140.3, "word": " العيلة،", "probability": 0.75146484375}, {"start": 3140.3, "end": 3140.76, "word": " بتلاقي", "probability": 0.6798909505208334}, {"start": 3140.76, "end": 3141.16, "word": " دايما", "probability": 0.78515625}, {"start": 3141.16, "end": 3141.42, "word": " وقام", "probability": 0.4106038411458333}, {"start": 3141.42, "end": 3141.82, "word": " مدانيه", "probability": 0.52861328125}, {"start": 3141.82, "end": 3142.32, "word": " بترجعه", "probability": 0.6407470703125}], "temperature": 1.0}, {"id": 114, "seek": 317321, "start": 3143.73, "end": 3173.21, "text": "Chidiak-Higashi syndrome برضه عبارة عن Chidiak وHigashi حكوا عن هذا الموضوع في ال 54 وعبارة عن abnormal microtupule formation and giant lysozymal granules are present in phagocytes and melanocytes خلل في ال granules تبع النيوتروفيل غالبا و الميكروفاج بشكل عال there is no degranulation", "tokens": [6546, 12716, 514, 12, 39, 328, 15612, 19371, 4724, 43042, 3224, 6225, 3555, 9640, 3660, 18871, 761, 12716, 514, 4032, 39, 328, 15612, 11331, 4117, 14407, 18871, 23758, 9673, 2407, 11242, 45367, 8978, 2423, 20793, 4032, 3615, 3555, 9640, 3660, 18871, 32847, 3123, 10536, 1010, 2271, 11723, 293, 7410, 17293, 539, 1229, 5579, 9370, 3473, 366, 1974, 294, 903, 559, 905, 43673, 293, 47969, 905, 43673, 16490, 1211, 1211, 8978, 2423, 9370, 3473, 6055, 3555, 3615, 2423, 22653, 35473, 32887, 5172, 26895, 32771, 6027, 3555, 995, 4032, 9673, 1829, 4117, 32887, 5172, 26108, 4724, 8592, 28820, 6225, 6027, 456, 307, 572, 368, 42381, 2776], "avg_logprob": -0.2782738231477283, "compression_ratio": 1.5, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 3143.73, "end": 3144.23, "word": "Chidiak", "probability": 0.451416015625}, {"start": 3144.23, "end": 3144.67, "word": "-Higashi", "probability": 0.8173828125}, {"start": 3144.67, "end": 3145.13, "word": " syndrome", "probability": 0.08587646484375}, {"start": 3145.13, "end": 3146.73, "word": " برضه", "probability": 0.7189127604166666}, {"start": 3146.73, "end": 3147.15, "word": " عبارة", "probability": 0.9808349609375}, {"start": 3147.15, "end": 3147.41, "word": " عن", "probability": 0.98681640625}, {"start": 3147.41, "end": 3148.03, "word": " Chidiak", "probability": 0.7989095052083334}, {"start": 3148.03, "end": 3148.77, "word": " وHigashi", "probability": 0.856201171875}, {"start": 3148.77, "end": 3149.37, "word": " حكوا", "probability": 0.9324544270833334}, {"start": 3149.37, "end": 3149.49, "word": " عن", "probability": 0.984375}, {"start": 3149.49, "end": 3149.73, "word": " هذا", "probability": 0.9208984375}, {"start": 3149.73, "end": 3150.25, "word": " الموضوع", "probability": 0.9921875}, {"start": 3150.25, "end": 3150.97, "word": " في", "probability": 0.65478515625}, {"start": 3150.97, "end": 3151.03, "word": " ال", "probability": 0.4755859375}, {"start": 3151.03, "end": 3151.45, "word": " 54", "probability": 0.375}, {"start": 3151.45, "end": 3153.15, "word": " وعبارة", "probability": 0.827099609375}, {"start": 3153.15, "end": 3153.29, "word": " عن", "probability": 0.98388671875}, {"start": 3153.29, "end": 3153.73, "word": " abnormal", "probability": 0.63232421875}, {"start": 3153.73, "end": 3155.31, "word": " microtupule", "probability": 0.814697265625}, {"start": 3155.31, "end": 3157.29, "word": " formation", "probability": 0.80517578125}, {"start": 3157.29, "end": 3158.19, "word": " and", "probability": 0.81591796875}, {"start": 3158.19, "end": 3158.63, "word": " giant", "probability": 0.77392578125}, {"start": 3158.63, "end": 3159.57, "word": " lysozymal", "probability": 0.8291015625}, {"start": 3159.57, "end": 3160.39, "word": " granules", "probability": 0.954345703125}, {"start": 3160.39, "end": 3161.41, "word": " are", "probability": 0.91796875}, {"start": 3161.41, "end": 3161.79, "word": " present", "probability": 0.80224609375}, {"start": 3161.79, "end": 3162.07, "word": " in", "probability": 0.92822265625}, {"start": 3162.07, "end": 3162.73, "word": " phagocytes", "probability": 0.80908203125}, {"start": 3162.73, "end": 3162.99, "word": " and", "probability": 0.82861328125}, {"start": 3162.99, "end": 3163.79, "word": " melanocytes", "probability": 0.9508463541666666}, {"start": 3163.79, "end": 3164.91, "word": " خلل", "probability": 0.83056640625}, {"start": 3164.91, "end": 3165.07, "word": " في", "probability": 0.93603515625}, {"start": 3165.07, "end": 3165.15, "word": " ال", "probability": 0.67333984375}, {"start": 3165.15, "end": 3165.69, "word": " granules", "probability": 0.754638671875}, {"start": 3165.69, "end": 3166.01, "word": " تبع", "probability": 0.8697916666666666}, {"start": 3166.01, "end": 3166.95, "word": " النيوتروفيل", "probability": 0.5703938802083334}, {"start": 3166.95, "end": 3168.19, "word": " غالبا", "probability": 0.9677734375}, {"start": 3168.19, "end": 3168.59, "word": " و", "probability": 0.69677734375}, {"start": 3168.59, "end": 3169.65, "word": " الميكروفاج", "probability": 0.6822509765625}, {"start": 3169.65, "end": 3170.05, "word": " بشكل", "probability": 0.8888346354166666}, {"start": 3170.05, "end": 3170.41, "word": " عال", "probability": 0.87158203125}, {"start": 3170.41, "end": 3171.67, "word": " there", "probability": 0.432861328125}, {"start": 3171.67, "end": 3171.87, "word": " is", "probability": 0.9521484375}, {"start": 3171.87, "end": 3172.15, "word": " no", "probability": 0.94677734375}, {"start": 3172.15, "end": 3173.21, "word": " degranulation", "probability": 0.7825520833333334}], "temperature": 1.0}, {"id": 115, "seek": 320380, "start": 3174.3, "end": 3203.8, "text": "no chemotaxis، إيش ال chemotaxis؟ بس chemotaxis حركة، حركة الخلية بيسموها chemo من .. من .. من .. من الكمية يعني بالظبط هي حركة موجهة chemical chemotaxis حركة موجهة chemokinases يا شباب هي عبارة عن random movement", "tokens": [1771, 4771, 310, 24633, 12399, 11933, 1829, 8592, 2423, 4771, 310, 24633, 22807, 4724, 3794, 4771, 310, 24633, 11331, 31747, 3660, 12399, 11331, 31747, 3660, 33962, 1211, 10632, 4724, 1829, 38251, 2407, 11296, 4771, 78, 9154, 4386, 9154, 4386, 9154, 4386, 9154, 2423, 24793, 10632, 37495, 22653, 20666, 19913, 3555, 9566, 39896, 11331, 31747, 3660, 3714, 29245, 3224, 3660, 7313, 4771, 310, 24633, 11331, 31747, 3660, 3714, 29245, 3224, 3660, 4771, 453, 259, 1957, 35186, 13412, 3555, 16758, 39896, 6225, 3555, 9640, 3660, 18871, 4974, 3963], "avg_logprob": -0.3365660830475818, "compression_ratio": 1.7314285714285715, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 3174.3, "end": 3174.66, "word": "no", "probability": 0.176513671875}, {"start": 3174.66, "end": 3175.76, "word": " chemotaxis،", "probability": 0.743743896484375}, {"start": 3175.76, "end": 3175.9, "word": " إيش", "probability": 0.768310546875}, {"start": 3175.9, "end": 3176.04, "word": " ال", "probability": 0.8681640625}, {"start": 3176.04, "end": 3176.82, "word": " chemotaxis؟", "probability": 0.76605224609375}, {"start": 3176.82, "end": 3178.86, "word": " بس", "probability": 0.43798828125}, {"start": 3178.86, "end": 3179.56, "word": " chemotaxis", "probability": 0.8505859375}, {"start": 3179.56, "end": 3180.34, "word": " حركة،", "probability": 0.9302978515625}, {"start": 3180.34, "end": 3180.68, "word": " حركة", "probability": 0.9881184895833334}, {"start": 3180.68, "end": 3181.08, "word": " الخلية", "probability": 0.82958984375}, {"start": 3181.08, "end": 3181.64, "word": " بيسموها", "probability": 0.8966796875}, {"start": 3181.64, "end": 3183.08, "word": " chemo", "probability": 0.6634521484375}, {"start": 3183.08, "end": 3183.62, "word": " من", "probability": 0.82861328125}, {"start": 3183.62, "end": 3183.8, "word": " ..", "probability": 0.380126953125}, {"start": 3183.8, "end": 3183.96, "word": " من", "probability": 0.86865234375}, {"start": 3183.96, "end": 3184.16, "word": " ..", "probability": 0.77001953125}, {"start": 3184.16, "end": 3184.64, "word": " من", "probability": 0.8896484375}, {"start": 3184.64, "end": 3184.86, "word": " ..", "probability": 0.341064453125}, {"start": 3184.86, "end": 3184.86, "word": " من", "probability": 0.45947265625}, {"start": 3184.86, "end": 3185.12, "word": " الكمية", "probability": 0.35791015625}, {"start": 3185.12, "end": 3187.84, "word": " يعني", "probability": 0.802734375}, {"start": 3187.84, "end": 3192.12, "word": " بالظبط", "probability": 0.736328125}, {"start": 3192.12, "end": 3192.28, "word": " هي", "probability": 0.86767578125}, {"start": 3192.28, "end": 3192.72, "word": " حركة", "probability": 0.99658203125}, {"start": 3192.72, "end": 3193.52, "word": " موجهة", "probability": 0.9393310546875}, {"start": 3193.52, "end": 3195.02, "word": " chemical", "probability": 0.035247802734375}, {"start": 3195.02, "end": 3196.04, "word": " chemotaxis", "probability": 0.9353841145833334}, {"start": 3196.04, "end": 3196.48, "word": " حركة", "probability": 0.951171875}, {"start": 3196.48, "end": 3197.68, "word": " موجهة", "probability": 0.9549560546875}, {"start": 3197.68, "end": 3200.44, "word": " chemokinases", "probability": 0.6907958984375}, {"start": 3200.44, "end": 3200.66, "word": " يا", "probability": 0.68115234375}, {"start": 3200.66, "end": 3201.02, "word": " شباب", "probability": 0.9850260416666666}, {"start": 3201.02, "end": 3201.96, "word": " هي", "probability": 0.6904296875}, {"start": 3201.96, "end": 3202.16, "word": " عبارة", "probability": 0.80755615234375}, {"start": 3202.16, "end": 3202.28, "word": " عن", "probability": 0.9990234375}, {"start": 3202.28, "end": 3202.7, "word": " random", "probability": 0.81396484375}, {"start": 3202.7, "end": 3203.8, "word": " movement", "probability": 0.9169921875}], "temperature": 1.0}, {"id": 116, "seek": 322766, "start": 3204.92, "end": 3227.66, "text": "ماشي هذه أربعان directed طبعا بيصير في واحدة .. واحدة قرية تقدرش تعمل la degranulation ولا بتتحرر بيصير .. can't defense .. can't defense our body؟ it cannot وبالتالي في .. بيصير في recurrent infection", "tokens": [2304, 33599, 1829, 29538, 5551, 25513, 3615, 7649, 12898, 23032, 3555, 3615, 995, 4724, 1829, 9381, 13546, 8978, 36764, 24401, 3660, 4386, 36764, 24401, 3660, 12174, 2288, 10632, 6055, 28543, 2288, 8592, 6055, 25957, 1211, 635, 368, 42381, 2776, 49429, 39894, 2655, 5016, 2288, 2288, 4724, 1829, 9381, 13546, 4386, 393, 380, 7654, 4386, 393, 380, 7654, 527, 1772, 22807, 309, 2644, 46599, 6027, 2655, 6027, 1829, 8978, 4386, 4724, 1829, 9381, 13546, 8978, 18680, 1753, 11764], "avg_logprob": -0.30288462073375017, "compression_ratio": 1.4814814814814814, "no_speech_prob": 0.0, "words": [{"start": 3204.92, "end": 3205.34, "word": "ماشي", "probability": 0.8194986979166666}, {"start": 3205.34, "end": 3205.56, "word": " هذه", "probability": 0.238525390625}, {"start": 3205.56, "end": 3205.94, "word": " أربعان", "probability": 0.436492919921875}, {"start": 3205.94, "end": 3206.5, "word": " directed", "probability": 0.76513671875}, {"start": 3206.5, "end": 3209.38, "word": " طبعا", "probability": 0.934814453125}, {"start": 3209.38, "end": 3210.24, "word": " بيصير", "probability": 0.9393310546875}, {"start": 3210.24, "end": 3210.5, "word": " في", "probability": 0.80029296875}, {"start": 3210.5, "end": 3211.42, "word": " واحدة", "probability": 0.9285481770833334}, {"start": 3211.42, "end": 3211.52, "word": " ..", "probability": 0.5625}, {"start": 3211.52, "end": 3213.4, "word": " واحدة", "probability": 0.9617513020833334}, {"start": 3213.4, "end": 3214.22, "word": " قرية", "probability": 0.608642578125}, {"start": 3214.22, "end": 3215.42, "word": " تقدرش", "probability": 0.78045654296875}, {"start": 3215.42, "end": 3216.48, "word": " تعمل", "probability": 0.98486328125}, {"start": 3216.48, "end": 3216.84, "word": " la", "probability": 0.247802734375}, {"start": 3216.84, "end": 3217.7, "word": " degranulation", "probability": 0.764892578125}, {"start": 3217.7, "end": 3218.06, "word": " ولا", "probability": 0.81005859375}, {"start": 3218.06, "end": 3218.8, "word": " بتتحرر", "probability": 0.94013671875}, {"start": 3218.8, "end": 3219.58, "word": " بيصير", "probability": 0.8275146484375}, {"start": 3219.58, "end": 3219.94, "word": " ..", "probability": 0.7119140625}, {"start": 3219.94, "end": 3221.4, "word": " can't", "probability": 0.5771484375}, {"start": 3221.4, "end": 3221.74, "word": " defense", "probability": 0.5224609375}, {"start": 3221.74, "end": 3221.92, "word": " ..", "probability": 0.767578125}, {"start": 3221.92, "end": 3222.28, "word": " can't", "probability": 0.59375}, {"start": 3222.28, "end": 3222.9, "word": " defense", "probability": 0.3720703125}, {"start": 3222.9, "end": 3223.14, "word": " our", "probability": 0.8095703125}, {"start": 3223.14, "end": 3223.8, "word": " body؟", "probability": 0.610107421875}, {"start": 3223.8, "end": 3224.12, "word": " it", "probability": 0.90380859375}, {"start": 3224.12, "end": 3224.58, "word": " cannot", "probability": 0.83349609375}, {"start": 3224.58, "end": 3225.76, "word": " وبالتالي", "probability": 0.87841796875}, {"start": 3225.76, "end": 3225.96, "word": " في", "probability": 0.43994140625}, {"start": 3225.96, "end": 3226.16, "word": " ..", "probability": 0.7763671875}, {"start": 3226.16, "end": 3226.54, "word": " بيصير", "probability": 0.9833984375}, {"start": 3226.54, "end": 3226.68, "word": " في", "probability": 0.80419921875}, {"start": 3226.68, "end": 3227.18, "word": " recurrent", "probability": 0.923828125}, {"start": 3227.18, "end": 3227.66, "word": " infection", "probability": 0.91259765625}], "temperature": 1.0}, {"id": 117, "seek": 325808, "start": 3229.18, "end": 3258.08, "text": "طبعا احنا بنحكي على سندروم بالاضافة لـ Recurrent Infection فيه Partial Coquinocotonous Albinism برضه فيه أيش؟ Albinism و Edens Body Granules are Decreased or Absent شايفين الصورة هذه معبرة جدا، ماشي؟ وهي هذه نفس الحكاية وهي Neutrophil لكن لسه شديد، بالعة لكن مش جادرة أيش؟", "tokens": [9566, 3555, 3615, 995, 1975, 5016, 8315, 44945, 5016, 4117, 1829, 15844, 8608, 41260, 2288, 20498, 20666, 46958, 31845, 3660, 5296, 39184, 9647, 374, 1753, 682, 1836, 313, 8978, 3224, 4100, 831, 3066, 29360, 905, 27794, 563, 967, 13496, 1434, 4724, 43042, 3224, 8978, 3224, 36632, 8592, 22807, 967, 13496, 1434, 4032, 3977, 694, 21329, 23554, 3473, 366, 12427, 265, 1937, 420, 5813, 317, 13412, 995, 33911, 9957, 31767, 13063, 3660, 29538, 20449, 3555, 25720, 10874, 28259, 12399, 3714, 33599, 1829, 22807, 37037, 1829, 29538, 8717, 36178, 21542, 4117, 995, 10632, 37037, 1829, 1734, 325, 11741, 388, 44381, 5296, 3794, 3224, 13412, 16254, 3215, 12399, 4724, 6027, 27884, 44381, 37893, 10874, 18513, 25720, 36632, 8592, 22807], "avg_logprob": -0.36698718152494514, "compression_ratio": 1.4229390681003584, "no_speech_prob": 0.0, "words": [{"start": 3229.18, "end": 3229.6, "word": "طبعا", "probability": 0.84368896484375}, {"start": 3229.6, "end": 3230.14, "word": " احنا", "probability": 0.9075520833333334}, {"start": 3230.14, "end": 3230.54, "word": " بنحكي", "probability": 0.8988037109375}, {"start": 3230.54, "end": 3230.66, "word": " على", "probability": 0.8330078125}, {"start": 3230.66, "end": 3231.26, "word": " سندروم", "probability": 0.639739990234375}, {"start": 3231.26, "end": 3232.0, "word": " بالاضافة", "probability": 0.93701171875}, {"start": 3232.0, "end": 3232.26, "word": " لـ", "probability": 0.36468505859375}, {"start": 3232.26, "end": 3232.6, "word": " Recurrent", "probability": 0.7715657552083334}, {"start": 3232.6, "end": 3233.08, "word": " Infection", "probability": 0.79638671875}, {"start": 3233.08, "end": 3233.72, "word": " فيه", "probability": 0.5550537109375}, {"start": 3233.72, "end": 3234.24, "word": " Partial", "probability": 0.62158203125}, {"start": 3234.24, "end": 3235.7, "word": " Coquinocotonous", "probability": 0.44736328125}, {"start": 3235.7, "end": 3236.36, "word": " Albinism", "probability": 0.79345703125}, {"start": 3236.36, "end": 3237.04, "word": " برضه", "probability": 0.9064127604166666}, {"start": 3237.04, "end": 3237.3, "word": " فيه", "probability": 0.950927734375}, {"start": 3237.3, "end": 3238.04, "word": " أيش؟", "probability": 0.4756673177083333}, {"start": 3238.04, "end": 3238.7, "word": " Albinism", "probability": 0.8292643229166666}, {"start": 3238.7, "end": 3239.34, "word": " و", "probability": 0.87353515625}, {"start": 3239.34, "end": 3239.6, "word": " Edens", "probability": 0.436859130859375}, {"start": 3239.6, "end": 3239.9, "word": " Body", "probability": 0.300537109375}, {"start": 3239.9, "end": 3240.6, "word": " Granules", "probability": 0.591796875}, {"start": 3240.6, "end": 3241.34, "word": " are", "probability": 0.75830078125}, {"start": 3241.34, "end": 3241.94, "word": " Decreased", "probability": 0.73095703125}, {"start": 3241.94, "end": 3242.16, "word": " or", "probability": 0.70263671875}, {"start": 3242.16, "end": 3242.62, "word": " Absent", "probability": 0.94677734375}, {"start": 3242.62, "end": 3243.5, "word": " شايفين", "probability": 0.88623046875}, {"start": 3243.5, "end": 3244.52, "word": " الصورة", "probability": 0.9588216145833334}, {"start": 3244.52, "end": 3244.7, "word": " هذه", "probability": 0.49169921875}, {"start": 3244.7, "end": 3245.12, "word": " معبرة", "probability": 0.78515625}, {"start": 3245.12, "end": 3245.84, "word": " جدا،", "probability": 0.6258544921875}, {"start": 3245.84, "end": 3246.74, "word": " ماشي؟", "probability": 0.790771484375}, {"start": 3246.74, "end": 3247.02, "word": " وهي", "probability": 0.730712890625}, {"start": 3247.02, "end": 3247.24, "word": " هذه", "probability": 0.81005859375}, {"start": 3247.24, "end": 3247.46, "word": " نفس", "probability": 0.996337890625}, {"start": 3247.46, "end": 3248.24, "word": " الحكاية", "probability": 0.73870849609375}, {"start": 3248.24, "end": 3254.08, "word": " وهي", "probability": 0.8359375}, {"start": 3254.08, "end": 3254.78, "word": " Neutrophil", "probability": 0.75848388671875}, {"start": 3254.78, "end": 3255.64, "word": " لكن", "probability": 0.63427734375}, {"start": 3255.64, "end": 3255.98, "word": " لسه", "probability": 0.8466796875}, {"start": 3255.98, "end": 3256.46, "word": " شديد،", "probability": 0.68084716796875}, {"start": 3256.46, "end": 3256.82, "word": " بالعة", "probability": 0.5552571614583334}, {"start": 3256.82, "end": 3257.08, "word": " لكن", "probability": 0.85498046875}, {"start": 3257.08, "end": 3257.26, "word": " مش", "probability": 0.9873046875}, {"start": 3257.26, "end": 3257.58, "word": " جادرة", "probability": 0.9669596354166666}, {"start": 3257.58, "end": 3258.08, "word": " أيش؟", "probability": 0.7893880208333334}], "temperature": 1.0}, {"id": 118, "seek": 328522, "start": 3262.02, "end": 3285.22, "text": "Trombocytopenia with absent radius syndrome برضه في ال 51 وصفوه بيتميز بغياب ال radius او ال radii تعرف ال radius وين في الجسم؟ اه في ال ايد اضمة من اضمة", "tokens": [14252, 3548, 905, 4328, 15752, 654, 365, 25185, 15845, 19371, 4724, 43042, 3224, 8978, 2423, 18485, 4032, 9381, 5172, 2407, 3224, 4724, 36081, 2304, 1829, 11622, 4724, 17082, 1829, 16758, 2423, 15845, 1975, 2407, 2423, 2843, 5597, 6055, 3615, 28480, 2423, 15845, 4032, 9957, 8978, 25724, 38251, 22807, 1975, 3224, 8978, 2423, 1975, 25708, 1975, 11242, 46007, 9154, 1975, 11242, 46007], "avg_logprob": -0.42363911001913007, "compression_ratio": 1.4038461538461537, "no_speech_prob": 0.0, "words": [{"start": 3262.02, "end": 3262.78, "word": "Trombocytopenia", "probability": 0.7522379557291666}, {"start": 3262.78, "end": 3262.88, "word": " with", "probability": 0.802734375}, {"start": 3262.88, "end": 3263.18, "word": " absent", "probability": 0.497314453125}, {"start": 3263.18, "end": 3263.84, "word": " radius", "probability": 0.60546875}, {"start": 3263.84, "end": 3265.14, "word": " syndrome", "probability": 0.323974609375}, {"start": 3265.14, "end": 3267.76, "word": " برضه", "probability": 0.6820882161458334}, {"start": 3267.76, "end": 3267.94, "word": " في", "probability": 0.69482421875}, {"start": 3267.94, "end": 3268.38, "word": " ال", "probability": 0.6826171875}, {"start": 3268.38, "end": 3268.66, "word": " 51", "probability": 0.57421875}, {"start": 3268.66, "end": 3270.4, "word": " وصفوه", "probability": 0.83076171875}, {"start": 3270.4, "end": 3274.32, "word": " بيتميز", "probability": 0.7342529296875}, {"start": 3274.32, "end": 3275.04, "word": " بغياب", "probability": 0.9619140625}, {"start": 3275.04, "end": 3275.22, "word": " ال", "probability": 0.9306640625}, {"start": 3275.22, "end": 3275.72, "word": " radius", "probability": 0.60888671875}, {"start": 3275.72, "end": 3276.14, "word": " او", "probability": 0.78466796875}, {"start": 3276.14, "end": 3276.24, "word": " ال", "probability": 0.89990234375}, {"start": 3276.24, "end": 3276.86, "word": " radii", "probability": 0.629638671875}, {"start": 3276.86, "end": 3277.64, "word": " تعرف", "probability": 0.68365478515625}, {"start": 3277.64, "end": 3277.78, "word": " ال", "probability": 0.9248046875}, {"start": 3277.78, "end": 3278.08, "word": " radius", "probability": 0.81689453125}, {"start": 3278.08, "end": 3278.36, "word": " وين", "probability": 0.8173828125}, {"start": 3278.36, "end": 3278.5, "word": " في", "probability": 0.51513671875}, {"start": 3278.5, "end": 3280.24, "word": " الجسم؟", "probability": 0.7450358072916666}, {"start": 3280.24, "end": 3280.56, "word": " اه", "probability": 0.3599853515625}, {"start": 3280.56, "end": 3280.56, "word": " في", "probability": 0.8525390625}, {"start": 3280.56, "end": 3280.94, "word": " ال", "probability": 0.65869140625}, {"start": 3280.94, "end": 3281.16, "word": " ايد", "probability": 0.60968017578125}, {"start": 3281.16, "end": 3284.88, "word": " اضمة", "probability": 0.5365804036458334}, {"start": 3284.88, "end": 3285.0, "word": " من", "probability": 0.986328125}, {"start": 3285.0, "end": 3285.22, "word": " اضمة", "probability": 0.94970703125}], "temperature": 1.0}, {"id": 119, "seek": 331906, "start": 3291.08, "end": 3319.06, "text": "فبتصير الإيد مشبوكة مباشرة بالفرقة، كفة الإيد تصير إيه؟ مشبوكة مباشرة في إيه؟ طبعا من إضافة لها ده بحكي على السندرومشفع، لجيت حاجبكوا صغر، فيه thrombocytopenia، فيه storage pool disease، فيه إيش؟ GI and cardiovascular system abnormality ال etiology is unknown، unclear لغاية الآن", "tokens": [5172, 3555, 2655, 9381, 13546, 33688, 25708, 37893, 3555, 2407, 4117, 3660, 3714, 3555, 33599, 25720, 20666, 5172, 2288, 28671, 12399, 9122, 5172, 3660, 33688, 25708, 6055, 9381, 13546, 11933, 1829, 3224, 22807, 37893, 3555, 2407, 4117, 3660, 3714, 3555, 33599, 25720, 8978, 11933, 1829, 3224, 22807, 23032, 3555, 3615, 995, 9154, 11933, 11242, 31845, 3660, 5296, 11296, 11778, 3224, 4724, 5016, 4117, 1829, 15844, 21136, 41260, 2288, 20498, 8592, 5172, 3615, 12399, 5296, 7435, 36081, 11331, 26108, 3555, 4117, 14407, 20328, 17082, 2288, 12399, 8978, 3224, 739, 3548, 905, 4328, 15752, 654, 12399, 8978, 3224, 6725, 7005, 4752, 12399, 8978, 3224, 11933, 1829, 8592, 22807, 26634, 293, 31786, 1185, 47104, 1860, 2423, 1030, 46457, 307, 9841, 12399, 25636, 5296, 17082, 995, 10632, 6024, 48506], "avg_logprob": -0.2929067446125878, "compression_ratio": 1.5592592592592593, "no_speech_prob": 0.0, "words": [{"start": 3291.08, "end": 3291.58, "word": "فبتصير", "probability": 0.9291015625}, {"start": 3291.58, "end": 3292.08, "word": " الإيد", "probability": 0.71142578125}, {"start": 3292.08, "end": 3293.38, "word": " مشبوكة", "probability": 0.9455078125}, {"start": 3293.38, "end": 3293.88, "word": " مباشرة", "probability": 0.9923095703125}, {"start": 3293.88, "end": 3295.22, "word": " بالفرقة،", "probability": 0.75849609375}, {"start": 3295.22, "end": 3295.9, "word": " كفة", "probability": 0.95947265625}, {"start": 3295.9, "end": 3296.38, "word": " الإيد", "probability": 0.973388671875}, {"start": 3296.38, "end": 3296.94, "word": " تصير", "probability": 0.7010904947916666}, {"start": 3296.94, "end": 3297.4, "word": " إيه؟", "probability": 0.70947265625}, {"start": 3297.4, "end": 3298.3, "word": " مشبوكة", "probability": 0.98525390625}, {"start": 3298.3, "end": 3298.94, "word": " مباشرة", "probability": 0.9927978515625}, {"start": 3298.94, "end": 3299.28, "word": " في", "probability": 0.65380859375}, {"start": 3299.28, "end": 3300.06, "word": " إيه؟", "probability": 0.88916015625}, {"start": 3300.06, "end": 3301.98, "word": " طبعا", "probability": 0.875244140625}, {"start": 3301.98, "end": 3302.1, "word": " من", "probability": 0.53271484375}, {"start": 3302.1, "end": 3302.52, "word": " إضافة", "probability": 0.937744140625}, {"start": 3302.52, "end": 3302.74, "word": " لها", "probability": 0.814697265625}, {"start": 3302.74, "end": 3302.94, "word": " ده", "probability": 0.671875}, {"start": 3302.94, "end": 3303.34, "word": " بحكي", "probability": 0.693115234375}, {"start": 3303.34, "end": 3303.44, "word": " على", "probability": 0.75}, {"start": 3303.44, "end": 3304.14, "word": " السندرومشفع،", "probability": 0.48846435546875}, {"start": 3304.14, "end": 3304.32, "word": " لجيت", "probability": 0.6525065104166666}, {"start": 3304.32, "end": 3304.72, "word": " حاجبكوا", "probability": 0.677734375}, {"start": 3304.72, "end": 3305.9, "word": " صغر،", "probability": 0.83251953125}, {"start": 3305.9, "end": 3306.1, "word": " فيه", "probability": 0.565673828125}, {"start": 3306.1, "end": 3307.68, "word": " thrombocytopenia،", "probability": 0.8253871372767857}, {"start": 3307.68, "end": 3307.84, "word": " فيه", "probability": 0.938232421875}, {"start": 3307.84, "end": 3308.18, "word": " storage", "probability": 0.76025390625}, {"start": 3308.18, "end": 3308.42, "word": " pool", "probability": 0.25341796875}, {"start": 3308.42, "end": 3309.64, "word": " disease،", "probability": 0.9072265625}, {"start": 3309.64, "end": 3310.06, "word": " فيه", "probability": 0.96044921875}, {"start": 3310.06, "end": 3311.3, "word": " إيش؟", "probability": 0.74090576171875}, {"start": 3311.3, "end": 3311.72, "word": " GI", "probability": 0.666015625}, {"start": 3311.72, "end": 3312.36, "word": " and", "probability": 0.6533203125}, {"start": 3312.36, "end": 3312.88, "word": " cardiovascular", "probability": 0.80810546875}, {"start": 3312.88, "end": 3313.6, "word": " system", "probability": 0.89697265625}, {"start": 3313.6, "end": 3314.34, "word": " abnormality", "probability": 0.728515625}, {"start": 3314.34, "end": 3315.74, "word": " ال", "probability": 0.8486328125}, {"start": 3315.74, "end": 3316.4, "word": " etiology", "probability": 0.653076171875}, {"start": 3316.4, "end": 3317.32, "word": " is", "probability": 0.95751953125}, {"start": 3317.32, "end": 3317.92, "word": " unknown،", "probability": 0.7900390625}, {"start": 3317.92, "end": 3318.26, "word": " unclear", "probability": 0.81201171875}, {"start": 3318.26, "end": 3318.76, "word": " لغاية", "probability": 0.978515625}, {"start": 3318.76, "end": 3319.06, "word": " الآن", "probability": 0.78076171875}], "temperature": 1.0}, {"id": 120, "seek": 333957, "start": 3319.83, "end": 3339.57, "text": "لكن في hemorrhage هو اللي بيأدي للوفاة في أغلب الأحيان and the prognosis is good إذا كان عاش لمدة سنتين فمفوق شايفين الشباب؟ في غياب لمين؟ لل four arms تقريبا لل radius", "tokens": [1211, 19452, 8978, 8636, 24362, 71, 609, 31439, 13672, 1829, 4724, 1829, 10721, 16254, 24976, 38688, 995, 3660, 8978, 5551, 17082, 46152, 16247, 5016, 1829, 7649, 293, 264, 447, 4568, 8211, 307, 665, 11933, 15730, 25961, 6225, 33599, 32767, 41891, 8608, 29399, 9957, 6156, 2304, 5172, 30543, 13412, 995, 33911, 9957, 25124, 3555, 16758, 22807, 8978, 32771, 1829, 16758, 32767, 9957, 22807, 24976, 1451, 5812, 6055, 4587, 16572, 3555, 995, 24976, 15845], "avg_logprob": -0.28574485321567483, "compression_ratio": 1.4054054054054055, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 3319.83, "end": 3320.15, "word": "لكن", "probability": 0.806884765625}, {"start": 3320.15, "end": 3320.31, "word": " في", "probability": 0.7646484375}, {"start": 3320.31, "end": 3320.91, "word": " hemorrhage", "probability": 0.717041015625}, {"start": 3320.91, "end": 3321.51, "word": " هو", "probability": 0.74658203125}, {"start": 3321.51, "end": 3321.71, "word": " اللي", "probability": 0.71826171875}, {"start": 3321.71, "end": 3322.05, "word": " بيأدي", "probability": 0.514801025390625}, {"start": 3322.05, "end": 3322.71, "word": " للوفاة", "probability": 0.7471923828125}, {"start": 3322.71, "end": 3322.79, "word": " في", "probability": 0.89794921875}, {"start": 3322.79, "end": 3323.03, "word": " أغلب", "probability": 0.9713541666666666}, {"start": 3323.03, "end": 3323.65, "word": " الأحيان", "probability": 0.954833984375}, {"start": 3323.65, "end": 3324.93, "word": " and", "probability": 0.650390625}, {"start": 3324.93, "end": 3325.13, "word": " the", "probability": 0.31298828125}, {"start": 3325.13, "end": 3325.83, "word": " prognosis", "probability": 0.8806966145833334}, {"start": 3325.83, "end": 3326.17, "word": " is", "probability": 0.9501953125}, {"start": 3326.17, "end": 3326.65, "word": " good", "probability": 0.9130859375}, {"start": 3326.65, "end": 3327.85, "word": " إذا", "probability": 0.870361328125}, {"start": 3327.85, "end": 3328.13, "word": " كان", "probability": 0.92041015625}, {"start": 3328.13, "end": 3328.51, "word": " عاش", "probability": 0.979248046875}, {"start": 3328.51, "end": 3329.03, "word": " لمدة", "probability": 0.916015625}, {"start": 3329.03, "end": 3329.57, "word": " سنتين", "probability": 0.9646809895833334}, {"start": 3329.57, "end": 3330.29, "word": " فمفوق", "probability": 0.7093505859375}, {"start": 3330.29, "end": 3331.37, "word": " شايفين", "probability": 0.8538818359375}, {"start": 3331.37, "end": 3332.95, "word": " الشباب؟", "probability": 0.73406982421875}, {"start": 3332.95, "end": 3334.53, "word": " في", "probability": 0.339111328125}, {"start": 3334.53, "end": 3335.11, "word": " غياب", "probability": 0.86474609375}, {"start": 3335.11, "end": 3336.37, "word": " لمين؟", "probability": 0.7354329427083334}, {"start": 3336.37, "end": 3337.67, "word": " لل", "probability": 0.52294921875}, {"start": 3337.67, "end": 3337.93, "word": " four", "probability": 0.7373046875}, {"start": 3337.93, "end": 3338.25, "word": " arms", "probability": 0.6953125}, {"start": 3338.25, "end": 3338.83, "word": " تقريبا", "probability": 0.98837890625}, {"start": 3338.83, "end": 3339.11, "word": " لل", "probability": 0.64453125}, {"start": 3339.11, "end": 3339.57, "word": " radius", "probability": 0.8359375}], "temperature": 1.0}, {"id": 121, "seek": 337551, "start": 3351.69, "end": 3375.51, "text": "طيب storage pool disease or disorders، typical laboratory finding ماذا نرى، ماذا نجد from laboratory point of view، نبرى واحد usually normal platelet count، platelet موضوع طبيعي أو طبيعي، احنا مابنحكيش على platelet، مابنحكيش على جدراني او بالفكرة، morphology is variable، لأنه في خنال فيليجرامي", "tokens": [9566, 1829, 3555, 6725, 7005, 4752, 420, 20261, 12399, 7476, 16523, 5006, 3714, 45636, 995, 8717, 2288, 7578, 12399, 3714, 45636, 995, 8717, 7435, 3215, 490, 16523, 935, 295, 1910, 12399, 8717, 26890, 7578, 36764, 24401, 2673, 2710, 3403, 15966, 1207, 12399, 3403, 15966, 3714, 2407, 11242, 45367, 23032, 21292, 3615, 1829, 34051, 23032, 21292, 3615, 1829, 12399, 1975, 5016, 8315, 3714, 16758, 1863, 5016, 4117, 1829, 8592, 15844, 3403, 15966, 12399, 3714, 16758, 1863, 5016, 4117, 1829, 8592, 15844, 10874, 3215, 2288, 7649, 1829, 1975, 2407, 20666, 5172, 4117, 25720, 12399, 25778, 1793, 307, 7006, 12399, 5296, 33456, 3224, 8978, 16490, 1863, 6027, 8978, 20292, 7435, 2288, 10943, 1829], "avg_logprob": -0.3268581064971718, "compression_ratio": 1.578125, "no_speech_prob": 0.0, "words": [{"start": 3351.69, "end": 3352.03, "word": "طيب", "probability": 0.595489501953125}, {"start": 3352.03, "end": 3352.49, "word": " storage", "probability": 0.4375}, {"start": 3352.49, "end": 3352.75, "word": " pool", "probability": 0.6884765625}, {"start": 3352.75, "end": 3353.21, "word": " disease", "probability": 0.8154296875}, {"start": 3353.21, "end": 3353.61, "word": " or", "probability": 0.57763671875}, {"start": 3353.61, "end": 3355.61, "word": " disorders،", "probability": 0.58648681640625}, {"start": 3355.61, "end": 3356.13, "word": " typical", "probability": 0.6787109375}, {"start": 3356.13, "end": 3356.57, "word": " laboratory", "probability": 0.8564453125}, {"start": 3356.57, "end": 3357.13, "word": " finding", "probability": 0.630859375}, {"start": 3357.13, "end": 3357.57, "word": " ماذا", "probability": 0.8248697916666666}, {"start": 3357.57, "end": 3358.17, "word": " نرى،", "probability": 0.7674560546875}, {"start": 3358.17, "end": 3358.55, "word": " ماذا", "probability": 0.96484375}, {"start": 3358.55, "end": 3359.03, "word": " نجد", "probability": 0.9578450520833334}, {"start": 3359.03, "end": 3359.79, "word": " from", "probability": 0.356201171875}, {"start": 3359.79, "end": 3360.27, "word": " laboratory", "probability": 0.888671875}, {"start": 3360.27, "end": 3360.65, "word": " point", "probability": 0.9794921875}, {"start": 3360.65, "end": 3360.83, "word": " of", "probability": 0.92578125}, {"start": 3360.83, "end": 3362.27, "word": " view،", "probability": 0.636474609375}, {"start": 3362.27, "end": 3362.51, "word": " نبرى", "probability": 0.7154134114583334}, {"start": 3362.51, "end": 3362.75, "word": " واحد", "probability": 0.988037109375}, {"start": 3362.75, "end": 3363.05, "word": " usually", "probability": 0.7568359375}, {"start": 3363.05, "end": 3363.65, "word": " normal", "probability": 0.87939453125}, {"start": 3363.65, "end": 3364.09, "word": " platelet", "probability": 0.851318359375}, {"start": 3364.09, "end": 3365.69, "word": " count،", "probability": 0.8876953125}, {"start": 3365.69, "end": 3366.45, "word": " platelet", "probability": 0.5731201171875}, {"start": 3366.45, "end": 3366.85, "word": " موضوع", "probability": 0.74560546875}, {"start": 3366.85, "end": 3367.35, "word": " طبيعي", "probability": 0.822021484375}, {"start": 3367.35, "end": 3367.47, "word": " أو", "probability": 0.560546875}, {"start": 3367.47, "end": 3367.95, "word": " طبيعي،", "probability": 0.87392578125}, {"start": 3367.95, "end": 3368.07, "word": " احنا", "probability": 0.9039713541666666}, {"start": 3368.07, "end": 3368.43, "word": " مابنحكيش", "probability": 0.8680943080357143}, {"start": 3368.43, "end": 3368.57, "word": " على", "probability": 0.83203125}, {"start": 3368.57, "end": 3369.09, "word": " platelet،", "probability": 0.731201171875}, {"start": 3369.09, "end": 3369.33, "word": " مابنحكيش", "probability": 0.8741978236607143}, {"start": 3369.33, "end": 3369.35, "word": " على", "probability": 0.92041015625}, {"start": 3369.35, "end": 3369.91, "word": " جدراني", "probability": 0.7685546875}, {"start": 3369.91, "end": 3370.01, "word": " او", "probability": 0.55059814453125}, {"start": 3370.01, "end": 3371.59, "word": " بالفكرة،", "probability": 0.71595458984375}, {"start": 3371.59, "end": 3372.21, "word": " morphology", "probability": 0.9189453125}, {"start": 3372.21, "end": 3372.51, "word": " is", "probability": 0.966796875}, {"start": 3372.51, "end": 3373.99, "word": " variable،", "probability": 0.69970703125}, {"start": 3373.99, "end": 3374.39, "word": " لأنه", "probability": 0.7916666666666666}, {"start": 3374.39, "end": 3374.53, "word": " في", "probability": 0.85498046875}, {"start": 3374.53, "end": 3374.87, "word": " خنال", "probability": 0.613525390625}, {"start": 3374.87, "end": 3375.51, "word": " فيليجرامي", "probability": 0.70941162109375}], "temperature": 1.0}, {"id": 122, "seek": 340153, "start": 3376.57, "end": 3401.53, "text": "Platelet aggregation shows primary wave لكن في غياب للsecondary wave طبيعي قليلة ما زادتش ما زادتش يا شباب احنا قولنا ال primary بتكون كمية ال agonist قليل وبالتالي بتعمل primary wave", "tokens": [47, 14087, 15966, 16743, 399, 3110, 6194, 5772, 44381, 8978, 32771, 1829, 16758, 24976, 27375, 822, 5772, 23032, 21292, 3615, 1829, 12174, 1211, 26895, 3660, 19446, 30767, 18513, 2655, 8592, 19446, 30767, 18513, 2655, 8592, 35186, 13412, 3555, 16758, 1975, 5016, 8315, 12174, 12610, 8315, 2423, 6194, 39894, 30544, 9122, 2304, 10632, 2423, 623, 266, 468, 12174, 20292, 1211, 46599, 6027, 2655, 6027, 1829, 39894, 25957, 1211, 6194, 5772], "avg_logprob": -0.2886160650423595, "compression_ratio": 1.502857142857143, "no_speech_prob": 0.0, "words": [{"start": 3376.57, "end": 3377.05, "word": "Platelet", "probability": 0.6558430989583334}, {"start": 3377.05, "end": 3377.85, "word": " aggregation", "probability": 0.6932373046875}, {"start": 3377.85, "end": 3378.41, "word": " shows", "probability": 0.496826171875}, {"start": 3378.41, "end": 3379.19, "word": " primary", "probability": 0.79931640625}, {"start": 3379.19, "end": 3380.11, "word": " wave", "probability": 0.87841796875}, {"start": 3380.11, "end": 3380.99, "word": " لكن", "probability": 0.8544921875}, {"start": 3380.99, "end": 3381.21, "word": " في", "probability": 0.8681640625}, {"start": 3381.21, "end": 3381.55, "word": " غياب", "probability": 0.93310546875}, {"start": 3381.55, "end": 3382.27, "word": " للsecondary", "probability": 0.5953776041666666}, {"start": 3382.27, "end": 3382.53, "word": " wave", "probability": 0.94140625}, {"start": 3382.53, "end": 3383.07, "word": " طبيعي", "probability": 0.8682861328125}, {"start": 3383.07, "end": 3383.75, "word": " قليلة", "probability": 0.53509521484375}, {"start": 3383.75, "end": 3385.57, "word": " ما", "probability": 0.07769775390625}, {"start": 3385.57, "end": 3391.31, "word": " زادتش", "probability": 0.895751953125}, {"start": 3391.31, "end": 3391.85, "word": " ما", "probability": 0.3896484375}, {"start": 3391.85, "end": 3392.41, "word": " زادتش", "probability": 0.88916015625}, {"start": 3392.41, "end": 3392.53, "word": " يا", "probability": 0.70068359375}, {"start": 3392.53, "end": 3392.85, "word": " شباب", "probability": 0.9871419270833334}, {"start": 3392.85, "end": 3393.51, "word": " احنا", "probability": 0.88037109375}, {"start": 3393.51, "end": 3393.89, "word": " قولنا", "probability": 0.8515625}, {"start": 3393.89, "end": 3394.43, "word": " ال", "probability": 0.427490234375}, {"start": 3394.43, "end": 3394.87, "word": " primary", "probability": 0.8837890625}, {"start": 3394.87, "end": 3395.29, "word": " بتكون", "probability": 0.94677734375}, {"start": 3395.29, "end": 3395.69, "word": " كمية", "probability": 0.982421875}, {"start": 3395.69, "end": 3395.93, "word": " ال", "probability": 0.87841796875}, {"start": 3395.93, "end": 3396.47, "word": " agonist", "probability": 0.7482096354166666}, {"start": 3396.47, "end": 3397.81, "word": " قليل", "probability": 0.8069661458333334}, {"start": 3397.81, "end": 3399.09, "word": " وبالتالي", "probability": 0.9185546875}, {"start": 3399.09, "end": 3400.01, "word": " بتعمل", "probability": 0.8186848958333334}, {"start": 3400.01, "end": 3401.03, "word": " primary", "probability": 0.92236328125}, {"start": 3401.03, "end": 3401.53, "word": " wave", "probability": 0.962890625}], "temperature": 1.0}, {"id": 123, "seek": 343126, "start": 3402.34, "end": 3431.26, "text": "لكن يوم ما يصير complete دي granulation بيصير secondary wave طب مافيشش granules، فى secondary wave؟ مافيش secondary wave and it's طبعا ماهو stimulated by ADP, Epinephrine, and Arachidonic acid platelet aggregation with thrombin is usually normal and resucitin agglutination is also normal كيف نشخص؟ قالوا we measure whole platelet count", "tokens": [1211, 19452, 7251, 20498, 19446, 7251, 9381, 13546, 3566, 11778, 1829, 9370, 2776, 4724, 1829, 9381, 13546, 11396, 5772, 23032, 3555, 19446, 41185, 8592, 8592, 9370, 3473, 12399, 6156, 7578, 11396, 5772, 22807, 19446, 41185, 8592, 11396, 5772, 293, 309, 311, 23032, 3555, 3615, 995, 19446, 3224, 2407, 8983, 6987, 538, 9135, 47, 11, 9970, 533, 950, 15140, 11, 293, 1587, 608, 327, 11630, 8258, 3403, 15966, 16743, 399, 365, 739, 3548, 259, 307, 2673, 2710, 293, 725, 1311, 270, 259, 623, 7191, 325, 2486, 307, 611, 2710, 9122, 33911, 8717, 8592, 9778, 9381, 22807, 50239, 14407, 321, 3481, 1379, 3403, 15966, 1207], "avg_logprob": -0.41436298134235233, "compression_ratio": 1.5, "no_speech_prob": 0.0, "words": [{"start": 3402.34, "end": 3402.8, "word": "لكن", "probability": 0.6846923828125}, {"start": 3402.8, "end": 3403.12, "word": " يوم", "probability": 0.739013671875}, {"start": 3403.12, "end": 3403.24, "word": " ما", "probability": 0.30810546875}, {"start": 3403.24, "end": 3403.44, "word": " يصير", "probability": 0.869140625}, {"start": 3403.44, "end": 3403.9, "word": " complete", "probability": 0.264892578125}, {"start": 3403.9, "end": 3404.12, "word": " دي", "probability": 0.49053955078125}, {"start": 3404.12, "end": 3404.9, "word": " granulation", "probability": 0.73681640625}, {"start": 3404.9, "end": 3405.3, "word": " بيصير", "probability": 0.73760986328125}, {"start": 3405.3, "end": 3405.76, "word": " secondary", "probability": 0.556640625}, {"start": 3405.76, "end": 3406.12, "word": " wave", "probability": 0.146484375}, {"start": 3406.12, "end": 3406.38, "word": " طب", "probability": 0.947509765625}, {"start": 3406.38, "end": 3406.86, "word": " مافيشش", "probability": 0.74688720703125}, {"start": 3406.86, "end": 3408.68, "word": " granules،", "probability": 0.48828125}, {"start": 3408.68, "end": 3408.82, "word": " فى", "probability": 0.5364990234375}, {"start": 3408.82, "end": 3409.2, "word": " secondary", "probability": 0.92822265625}, {"start": 3409.2, "end": 3409.8, "word": " wave؟", "probability": 0.659912109375}, {"start": 3409.8, "end": 3410.2, "word": " مافيش", "probability": 0.732666015625}, {"start": 3410.2, "end": 3410.56, "word": " secondary", "probability": 0.91796875}, {"start": 3410.56, "end": 3410.86, "word": " wave", "probability": 0.9267578125}, {"start": 3410.86, "end": 3412.2, "word": " and", "probability": 0.388916015625}, {"start": 3412.2, "end": 3413.68, "word": " it's", "probability": 0.880126953125}, {"start": 3413.68, "end": 3414.44, "word": " طبعا", "probability": 0.91845703125}, {"start": 3414.44, "end": 3414.6, "word": " ماهو", "probability": 0.4060465494791667}, {"start": 3414.6, "end": 3415.04, "word": " stimulated", "probability": 0.662841796875}, {"start": 3415.04, "end": 3415.3, "word": " by", "probability": 0.892578125}, {"start": 3415.3, "end": 3415.76, "word": " ADP,", "probability": 0.856689453125}, {"start": 3415.88, "end": 3416.22, "word": " Epinephrine,", "probability": 0.736419677734375}, {"start": 3416.3, "end": 3416.36, "word": " and", "probability": 0.31103515625}, {"start": 3416.36, "end": 3416.7, "word": " Arachidonic", "probability": 0.7027587890625}, {"start": 3416.7, "end": 3417.12, "word": " acid", "probability": 0.54150390625}, {"start": 3417.12, "end": 3418.56, "word": " platelet", "probability": 0.5887451171875}, {"start": 3418.56, "end": 3419.18, "word": " aggregation", "probability": 0.920166015625}, {"start": 3419.18, "end": 3419.4, "word": " with", "probability": 0.76708984375}, {"start": 3419.4, "end": 3419.88, "word": " thrombin", "probability": 0.7677408854166666}, {"start": 3419.88, "end": 3420.1, "word": " is", "probability": 0.90087890625}, {"start": 3420.1, "end": 3420.5, "word": " usually", "probability": 0.88818359375}, {"start": 3420.5, "end": 3421.12, "word": " normal", "probability": 0.87744140625}, {"start": 3421.12, "end": 3423.24, "word": " and", "probability": 0.79833984375}, {"start": 3423.24, "end": 3423.92, "word": " resucitin", "probability": 0.5521240234375}, {"start": 3423.92, "end": 3424.78, "word": " agglutination", "probability": 0.9761962890625}, {"start": 3424.78, "end": 3425.26, "word": " is", "probability": 0.96728515625}, {"start": 3425.26, "end": 3425.8, "word": " also", "probability": 0.87548828125}, {"start": 3425.8, "end": 3426.62, "word": " normal", "probability": 0.8662109375}, {"start": 3426.62, "end": 3427.82, "word": " كيف", "probability": 0.972412109375}, {"start": 3427.82, "end": 3428.8, "word": " نشخص؟", "probability": 0.84423828125}, {"start": 3428.8, "end": 3429.22, "word": " قالوا", "probability": 0.71923828125}, {"start": 3429.22, "end": 3429.76, "word": " we", "probability": 0.5615234375}, {"start": 3429.76, "end": 3430.08, "word": " measure", "probability": 0.95361328125}, {"start": 3430.08, "end": 3430.34, "word": " whole", "probability": 0.771484375}, {"start": 3430.34, "end": 3430.78, "word": " platelet", "probability": 0.884765625}, {"start": 3430.78, "end": 3431.26, "word": " count", "probability": 0.87255859375}], "temperature": 1.0}, {"id": 124, "seek": 345913, "start": 3432.91, "end": 3459.13, "text": "ونقوم بتقرير المقارنة بين ATP وADP المفروض تكون 2.5 ل 1 لكن في الـ storage pool disease تكون أكتر من 3 إلى 1 في الغالب هذا بيكون موجود مع اخر مخاطر زي ما قلنا اللي هي شيرديا كيجاشي سندروم او harm sky bottling syndrome", "tokens": [2407, 1863, 4587, 20498, 39894, 4587, 16572, 2288, 9673, 4587, 9640, 1863, 3660, 49374, 39202, 4032, 6112, 47, 9673, 5172, 32887, 11242, 6055, 30544, 568, 13, 20, 5296, 502, 44381, 8978, 2423, 39184, 6725, 7005, 4752, 6055, 30544, 5551, 4117, 2655, 2288, 9154, 805, 30731, 502, 8978, 6024, 118, 6027, 3555, 23758, 4724, 1829, 30544, 3714, 29245, 23328, 20449, 1975, 34740, 3714, 9778, 41193, 2288, 30767, 1829, 19446, 12174, 1211, 8315, 13672, 1829, 39896, 13412, 13546, 3215, 25528, 9122, 1829, 7435, 33599, 1829, 8608, 41260, 2288, 20498, 1975, 2407, 6491, 5443, 2274, 1688, 19371], "avg_logprob": -0.5200657919833535, "compression_ratio": 1.396694214876033, "no_speech_prob": 3.5762786865234375e-07, "words": [{"start": 3432.91, "end": 3433.75, "word": "ونقوم", "probability": 0.394744873046875}, {"start": 3433.75, "end": 3433.85, "word": " بتقرير", "probability": 0.29339599609375}, {"start": 3433.85, "end": 3434.97, "word": " المقارنة", "probability": 0.5401611328125}, {"start": 3434.97, "end": 3435.71, "word": " بين", "probability": 0.496337890625}, {"start": 3435.71, "end": 3436.25, "word": " ATP", "probability": 0.58544921875}, {"start": 3436.25, "end": 3437.43, "word": " وADP", "probability": 0.668701171875}, {"start": 3437.43, "end": 3438.91, "word": " المفروض", "probability": 0.9537353515625}, {"start": 3438.91, "end": 3439.83, "word": " تكون", "probability": 0.6328125}, {"start": 3439.83, "end": 3439.97, "word": " 2", "probability": 0.50537109375}, {"start": 3439.97, "end": 3440.25, "word": ".5", "probability": 0.906005859375}, {"start": 3440.25, "end": 3440.39, "word": " ل", "probability": 0.298828125}, {"start": 3440.39, "end": 3440.79, "word": " 1", "probability": 0.7099609375}, {"start": 3440.79, "end": 3441.57, "word": " لكن", "probability": 0.59423828125}, {"start": 3441.57, "end": 3441.83, "word": " في", "probability": 0.8828125}, {"start": 3441.83, "end": 3441.93, "word": " الـ", "probability": 0.331298828125}, {"start": 3441.93, "end": 3442.29, "word": " storage", "probability": 0.304931640625}, {"start": 3442.29, "end": 3442.51, "word": " pool", "probability": 0.425048828125}, {"start": 3442.51, "end": 3442.89, "word": " disease", "probability": 0.8857421875}, {"start": 3442.89, "end": 3443.43, "word": " تكون", "probability": 0.656494140625}, {"start": 3443.43, "end": 3443.95, "word": " أكتر", "probability": 0.866455078125}, {"start": 3443.95, "end": 3444.15, "word": " من", "probability": 0.9912109375}, {"start": 3444.15, "end": 3444.61, "word": " 3", "probability": 0.78955078125}, {"start": 3444.61, "end": 3445.87, "word": " إلى", "probability": 0.63427734375}, {"start": 3445.87, "end": 3446.39, "word": " 1", "probability": 0.98193359375}, {"start": 3446.39, "end": 3448.17, "word": " في", "probability": 0.475341796875}, {"start": 3448.17, "end": 3448.49, "word": " الغالب", "probability": 0.929931640625}, {"start": 3448.49, "end": 3448.69, "word": " هذا", "probability": 0.72119140625}, {"start": 3448.69, "end": 3449.05, "word": " بيكون", "probability": 0.7343343098958334}, {"start": 3449.05, "end": 3449.63, "word": " موجود", "probability": 0.9754231770833334}, {"start": 3449.63, "end": 3451.29, "word": " مع", "probability": 0.86181640625}, {"start": 3451.29, "end": 3451.79, "word": " اخر", "probability": 0.44268798828125}, {"start": 3451.79, "end": 3452.17, "word": " مخاطر", "probability": 0.4606170654296875}, {"start": 3452.17, "end": 3452.91, "word": " زي", "probability": 0.66162109375}, {"start": 3452.91, "end": 3453.01, "word": " ما", "probability": 0.93701171875}, {"start": 3453.01, "end": 3453.39, "word": " قلنا", "probability": 0.9261067708333334}, {"start": 3453.39, "end": 3454.19, "word": " اللي", "probability": 0.7178955078125}, {"start": 3454.19, "end": 3454.41, "word": " هي", "probability": 0.9072265625}, {"start": 3454.41, "end": 3455.09, "word": " شيرديا", "probability": 0.48187255859375}, {"start": 3455.09, "end": 3455.71, "word": " كيجاشي", "probability": 0.88818359375}, {"start": 3455.71, "end": 3456.35, "word": " سندروم", "probability": 0.6304931640625}, {"start": 3456.35, "end": 3457.25, "word": " او", "probability": 0.817626953125}, {"start": 3457.25, "end": 3457.67, "word": " harm", "probability": 0.322265625}, {"start": 3457.67, "end": 3458.01, "word": " sky", "probability": 0.68017578125}, {"start": 3458.01, "end": 3458.51, "word": " bottling", "probability": 0.85986328125}, {"start": 3458.51, "end": 3459.13, "word": " syndrome", "probability": 0.78125}], "temperature": 1.0}, {"id": 125, "seek": 348810, "start": 3459.64, "end": 3488.1, "text": "او اللى هو ويسكوت ادريخسون راشى؟ diagnoses طيب قالوا انه فى ال storage port disease بيكون فى low concentration low response to low concentration of collagen كل ما جللنا ال collagen concentration كل ما كان response", "tokens": [995, 2407, 13672, 7578, 31439, 4032, 1829, 3794, 4117, 35473, 1975, 3215, 16572, 9778, 3794, 11536, 12602, 33599, 7578, 22807, 7234, 4201, 23032, 1829, 3555, 50239, 14407, 16472, 3224, 6156, 7578, 2423, 6725, 2436, 4752, 4724, 1829, 30544, 6156, 7578, 2295, 9856, 2295, 4134, 281, 2295, 9856, 295, 40444, 28242, 19446, 10874, 1211, 1211, 8315, 2423, 40444, 9856, 28242, 19446, 25961, 4134], "avg_logprob": -0.356646822558509, "compression_ratio": 1.5819209039548023, "no_speech_prob": 0.0, "words": [{"start": 3459.64, "end": 3460.18, "word": "او", "probability": 0.57000732421875}, {"start": 3460.18, "end": 3461.46, "word": " اللى", "probability": 0.636962890625}, {"start": 3461.46, "end": 3461.84, "word": " هو", "probability": 0.92919921875}, {"start": 3461.84, "end": 3463.04, "word": " ويسكوت", "probability": 0.65628662109375}, {"start": 3463.04, "end": 3463.82, "word": " ادريخسون", "probability": 0.61090087890625}, {"start": 3463.82, "end": 3467.36, "word": " راشى؟", "probability": 0.5497283935546875}, {"start": 3467.36, "end": 3468.4, "word": " diagnoses", "probability": 0.5928955078125}, {"start": 3468.4, "end": 3473.26, "word": " طيب", "probability": 0.7198893229166666}, {"start": 3473.26, "end": 3474.08, "word": " قالوا", "probability": 0.73583984375}, {"start": 3474.08, "end": 3474.32, "word": " انه", "probability": 0.631591796875}, {"start": 3474.32, "end": 3474.42, "word": " فى", "probability": 0.71044921875}, {"start": 3474.42, "end": 3474.5, "word": " ال", "probability": 0.90625}, {"start": 3474.5, "end": 3474.82, "word": " storage", "probability": 0.7431640625}, {"start": 3474.82, "end": 3475.04, "word": " port", "probability": 0.311767578125}, {"start": 3475.04, "end": 3475.4, "word": " disease", "probability": 0.82421875}, {"start": 3475.4, "end": 3475.7, "word": " بيكون", "probability": 0.9358723958333334}, {"start": 3475.7, "end": 3475.96, "word": " فى", "probability": 0.923828125}, {"start": 3475.96, "end": 3476.22, "word": " low", "probability": 0.97705078125}, {"start": 3476.22, "end": 3477.1, "word": " concentration", "probability": 0.88916015625}, {"start": 3477.1, "end": 3477.38, "word": " low", "probability": 0.8828125}, {"start": 3477.38, "end": 3478.54, "word": " response", "probability": 0.9150390625}, {"start": 3478.54, "end": 3479.4, "word": " to", "probability": 0.86328125}, {"start": 3479.4, "end": 3479.64, "word": " low", "probability": 0.98095703125}, {"start": 3479.64, "end": 3480.3, "word": " concentration", "probability": 0.9208984375}, {"start": 3480.3, "end": 3480.5, "word": " of", "probability": 0.93310546875}, {"start": 3480.5, "end": 3481.04, "word": " collagen", "probability": 0.95361328125}, {"start": 3481.04, "end": 3482.24, "word": " كل", "probability": 0.97119140625}, {"start": 3482.24, "end": 3482.34, "word": " ما", "probability": 0.7802734375}, {"start": 3482.34, "end": 3483.1, "word": " جللنا", "probability": 0.94140625}, {"start": 3483.1, "end": 3484.48, "word": " ال", "probability": 0.95361328125}, {"start": 3484.48, "end": 3485.26, "word": " collagen", "probability": 0.927734375}, {"start": 3485.26, "end": 3486.48, "word": " concentration", "probability": 0.83544921875}, {"start": 3486.48, "end": 3486.9, "word": " كل", "probability": 0.98291015625}, {"start": 3486.9, "end": 3487.1, "word": " ما", "probability": 0.72900390625}, {"start": 3487.1, "end": 3487.32, "word": " كان", "probability": 0.9814453125}, {"start": 3487.32, "end": 3488.1, "word": " response", "probability": 0.493408203125}], "temperature": 1.0}, {"id": 126, "seek": 351807, "start": 3489.01, "end": 3518.07, "text": "دعيف جدا نمر اتنين ADP و الابنفرن شوهو dimension second wave agglutination هو ال response مافي شي second wave في حالة ليش الابنفرن و ال ADP Restocetin بيعطي normal agglutination or aggregation electron microscope لو طلعنا على ال .. على ال platelet تحت ال electron microscope نلاقيها normal sorry lack of dense body", "tokens": [3215, 3615, 33911, 10874, 28259, 8717, 29973, 1975, 2655, 1863, 9957, 9135, 47, 4032, 2423, 16758, 1863, 5172, 2288, 1863, 13412, 2407, 3224, 2407, 10139, 1150, 5772, 623, 7191, 325, 2486, 31439, 2423, 4134, 19446, 41185, 44049, 1150, 5772, 8978, 11331, 6027, 3660, 32239, 8592, 2423, 16758, 1863, 5172, 2288, 1863, 4032, 2423, 9135, 47, 13094, 905, 38645, 4724, 40228, 9566, 1829, 2710, 623, 7191, 325, 2486, 420, 16743, 399, 6084, 29753, 45164, 23032, 1211, 3615, 8315, 15844, 2423, 4386, 15844, 2423, 3403, 15966, 6055, 33753, 2423, 6084, 29753, 8717, 15040, 38436, 11296, 2710, 2597, 5011, 295, 18011, 1772], "avg_logprob": -0.35125000923871996, "compression_ratio": 1.624, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 3489.01, "end": 3489.45, "word": "دعيف", "probability": 0.6274007161458334}, {"start": 3489.45, "end": 3489.83, "word": " جدا", "probability": 0.987060546875}, {"start": 3489.83, "end": 3491.85, "word": " نمر", "probability": 0.5135498046875}, {"start": 3491.85, "end": 3492.13, "word": " اتنين", "probability": 0.741363525390625}, {"start": 3492.13, "end": 3492.59, "word": " ADP", "probability": 0.735595703125}, {"start": 3492.59, "end": 3492.69, "word": " و", "probability": 0.69287109375}, {"start": 3492.69, "end": 3493.29, "word": " الابنفرن", "probability": 0.54901123046875}, {"start": 3493.29, "end": 3493.65, "word": " شوهو", "probability": 0.6890869140625}, {"start": 3493.65, "end": 3494.15, "word": " dimension", "probability": 0.418212890625}, {"start": 3494.15, "end": 3494.87, "word": " second", "probability": 0.48974609375}, {"start": 3494.87, "end": 3495.85, "word": " wave", "probability": 0.86767578125}, {"start": 3495.85, "end": 3497.39, "word": " agglutination", "probability": 0.8790283203125}, {"start": 3497.39, "end": 3497.57, "word": " هو", "probability": 0.76220703125}, {"start": 3497.57, "end": 3497.67, "word": " ال", "probability": 0.6416015625}, {"start": 3497.67, "end": 3498.11, "word": " response", "probability": 0.78076171875}, {"start": 3498.11, "end": 3498.73, "word": " مافي", "probability": 0.6400146484375}, {"start": 3498.73, "end": 3498.95, "word": " شي", "probability": 0.51416015625}, {"start": 3498.95, "end": 3499.93, "word": " second", "probability": 0.78125}, {"start": 3499.93, "end": 3500.35, "word": " wave", "probability": 0.96923828125}, {"start": 3500.35, "end": 3500.53, "word": " في", "probability": 0.71533203125}, {"start": 3500.53, "end": 3500.85, "word": " حالة", "probability": 0.9786783854166666}, {"start": 3500.85, "end": 3501.15, "word": " ليش", "probability": 0.72998046875}, {"start": 3501.15, "end": 3501.85, "word": " الابنفرن", "probability": 0.9047037760416666}, {"start": 3501.85, "end": 3502.51, "word": " و", "probability": 0.5751953125}, {"start": 3502.51, "end": 3502.63, "word": " ال", "probability": 0.84130859375}, {"start": 3502.63, "end": 3503.17, "word": " ADP", "probability": 0.88818359375}, {"start": 3503.17, "end": 3504.85, "word": " Restocetin", "probability": 0.4375}, {"start": 3504.85, "end": 3505.99, "word": " بيعطي", "probability": 0.90283203125}, {"start": 3505.99, "end": 3506.41, "word": " normal", "probability": 0.81689453125}, {"start": 3506.41, "end": 3507.61, "word": " agglutination", "probability": 0.9776611328125}, {"start": 3507.61, "end": 3507.87, "word": " or", "probability": 0.39794921875}, {"start": 3507.87, "end": 3509.13, "word": " aggregation", "probability": 0.871826171875}, {"start": 3509.13, "end": 3510.43, "word": " electron", "probability": 0.40771484375}, {"start": 3510.43, "end": 3511.07, "word": " microscope", "probability": 0.88134765625}, {"start": 3511.07, "end": 3511.41, "word": " لو", "probability": 0.96484375}, {"start": 3511.41, "end": 3511.91, "word": " طلعنا", "probability": 0.9805908203125}, {"start": 3511.91, "end": 3512.13, "word": " على", "probability": 0.86328125}, {"start": 3512.13, "end": 3512.29, "word": " ال", "probability": 0.6806640625}, {"start": 3512.29, "end": 3512.41, "word": " ..", "probability": 0.1295166015625}, {"start": 3512.41, "end": 3512.71, "word": " على", "probability": 0.74072265625}, {"start": 3512.71, "end": 3512.87, "word": " ال", "probability": 0.98828125}, {"start": 3512.87, "end": 3513.43, "word": " platelet", "probability": 0.5924072265625}, {"start": 3513.43, "end": 3513.93, "word": " تحت", "probability": 0.93994140625}, {"start": 3513.93, "end": 3514.09, "word": " ال", "probability": 0.9560546875}, {"start": 3514.09, "end": 3514.33, "word": " electron", "probability": 0.92333984375}, {"start": 3514.33, "end": 3514.77, "word": " microscope", "probability": 0.96630859375}, {"start": 3514.77, "end": 3515.19, "word": " نلاقيها", "probability": 0.62628173828125}, {"start": 3515.19, "end": 3515.57, "word": " normal", "probability": 0.85595703125}, {"start": 3515.57, "end": 3516.91, "word": " sorry", "probability": 0.5029296875}, {"start": 3516.91, "end": 3517.41, "word": " lack", "probability": 0.86328125}, {"start": 3517.41, "end": 3517.61, "word": " of", "probability": 0.98095703125}, {"start": 3517.61, "end": 3517.81, "word": " dense", "probability": 0.94580078125}, {"start": 3517.81, "end": 3518.07, "word": " body", "probability": 0.87109375}], "temperature": 1.0}, {"id": 127, "seek": 354820, "start": 3519.06, "end": 3548.2, "text": "و هى طبعا فى زيادة فى ال ATP-ADP ratio و ده هى شباب هى ال Bernard Sawyer's syndrome هى نورمان ماشى هى نورمان فى نورمان فى primary wave وفى secondary wave صح primary aggregation و secondary aggregation فى ال storage pool disease شو اللى بيصير يا شباب هى نزلت", "tokens": [2407, 8032, 7578, 23032, 3555, 3615, 995, 6156, 7578, 30767, 1829, 18513, 3660, 6156, 7578, 2423, 8872, 47, 12, 6112, 47, 8509, 4032, 11778, 3224, 8032, 7578, 13412, 3555, 16758, 8032, 7578, 2423, 30116, 6299, 86, 7224, 311, 19371, 8032, 7578, 8717, 13063, 2304, 7649, 3714, 33599, 7578, 8032, 7578, 8717, 13063, 2304, 7649, 6156, 7578, 8717, 13063, 2304, 7649, 6156, 7578, 6194, 5772, 4032, 5172, 7578, 11396, 5772, 20328, 5016, 6194, 16743, 399, 4032, 11396, 16743, 399, 6156, 7578, 2423, 6725, 7005, 4752, 13412, 2407, 13672, 7578, 4724, 1829, 9381, 13546, 35186, 13412, 3555, 16758, 8032, 7578, 8717, 11622, 1211, 2655], "avg_logprob": -0.33677184118807896, "compression_ratio": 1.6714285714285715, "no_speech_prob": 0.0, "words": [{"start": 3519.06, "end": 3519.28, "word": "و", "probability": 0.83544921875}, {"start": 3519.28, "end": 3519.4, "word": " هى", "probability": 0.450775146484375}, {"start": 3519.4, "end": 3519.62, "word": " طبعا", "probability": 0.9727783203125}, {"start": 3519.62, "end": 3519.8, "word": " فى", "probability": 0.77734375}, {"start": 3519.8, "end": 3520.16, "word": " زيادة", "probability": 0.9703369140625}, {"start": 3520.16, "end": 3520.3, "word": " فى", "probability": 0.788818359375}, {"start": 3520.3, "end": 3520.4, "word": " ال", "probability": 0.8828125}, {"start": 3520.4, "end": 3520.72, "word": " ATP", "probability": 0.337158203125}, {"start": 3520.72, "end": 3521.24, "word": "-ADP", "probability": 0.6823323567708334}, {"start": 3521.24, "end": 3522.18, "word": " ratio", "probability": 0.7275390625}, {"start": 3522.18, "end": 3524.0, "word": " و", "probability": 0.2978515625}, {"start": 3524.0, "end": 3525.54, "word": " ده", "probability": 0.76513671875}, {"start": 3525.54, "end": 3525.74, "word": " هى", "probability": 0.75732421875}, {"start": 3525.74, "end": 3526.16, "word": " شباب", "probability": 0.7607421875}, {"start": 3526.16, "end": 3527.68, "word": " هى", "probability": 0.64501953125}, {"start": 3527.68, "end": 3527.84, "word": " ال", "probability": 0.9658203125}, {"start": 3527.84, "end": 3528.16, "word": " Bernard", "probability": 0.09100341796875}, {"start": 3528.16, "end": 3529.38, "word": " Sawyer's", "probability": 0.4849853515625}, {"start": 3529.38, "end": 3530.88, "word": " syndrome", "probability": 0.58544921875}, {"start": 3530.88, "end": 3531.6, "word": " هى", "probability": 0.839599609375}, {"start": 3531.6, "end": 3532.04, "word": " نورمان", "probability": 0.5753173828125}, {"start": 3532.04, "end": 3534.76, "word": " ماشى", "probability": 0.734375}, {"start": 3534.76, "end": 3535.78, "word": " هى", "probability": 0.82568359375}, {"start": 3535.78, "end": 3536.28, "word": " نورمان", "probability": 0.9141845703125}, {"start": 3536.28, "end": 3536.98, "word": " فى", "probability": 0.851318359375}, {"start": 3536.98, "end": 3537.34, "word": " نورمان", "probability": 0.90576171875}, {"start": 3537.34, "end": 3537.56, "word": " فى", "probability": 0.736572265625}, {"start": 3537.56, "end": 3538.02, "word": " primary", "probability": 0.7919921875}, {"start": 3538.02, "end": 3538.44, "word": " wave", "probability": 0.8154296875}, {"start": 3538.44, "end": 3538.72, "word": " وفى", "probability": 0.783203125}, {"start": 3538.72, "end": 3539.2, "word": " secondary", "probability": 0.9541015625}, {"start": 3539.2, "end": 3539.54, "word": " wave", "probability": 0.95654296875}, {"start": 3539.54, "end": 3539.8, "word": " صح", "probability": 0.6595458984375}, {"start": 3539.8, "end": 3540.64, "word": " primary", "probability": 0.40185546875}, {"start": 3540.64, "end": 3541.38, "word": " aggregation", "probability": 0.792724609375}, {"start": 3541.38, "end": 3541.5, "word": " و", "probability": 0.97509765625}, {"start": 3541.5, "end": 3541.94, "word": " secondary", "probability": 0.62548828125}, {"start": 3541.94, "end": 3543.42, "word": " aggregation", "probability": 0.747314453125}, {"start": 3543.42, "end": 3544.08, "word": " فى", "probability": 0.95166015625}, {"start": 3544.08, "end": 3544.18, "word": " ال", "probability": 0.82666015625}, {"start": 3544.18, "end": 3544.6, "word": " storage", "probability": 0.9208984375}, {"start": 3544.6, "end": 3544.9, "word": " pool", "probability": 0.9169921875}, {"start": 3544.9, "end": 3545.44, "word": " disease", "probability": 0.89404296875}, {"start": 3545.44, "end": 3545.64, "word": " شو", "probability": 0.930419921875}, {"start": 3545.64, "end": 3545.72, "word": " اللى", "probability": 0.955078125}, {"start": 3545.72, "end": 3546.06, "word": " بيصير", "probability": 0.9078369140625}, {"start": 3546.06, "end": 3546.22, "word": " يا", "probability": 0.451171875}, {"start": 3546.22, "end": 3547.06, "word": " شباب", "probability": 0.9825846354166666}, {"start": 3547.06, "end": 3547.38, "word": " هى", "probability": 0.75341796875}, {"start": 3547.38, "end": 3548.2, "word": " نزلت", "probability": 0.9761962890625}], "temperature": 1.0}, {"id": 128, "seek": 357509, "start": 3549.55, "end": 3575.09, "text": "فى response؟ لأ مش response فى first wave وعند ال second wave نزلت ماشي حد عنده سؤال تعالى ناخد اللى هو ال alpha granules طب انا بحكي على alpha granules زى ما حكيت عن dense granules alpha granules pool", "tokens": [5172, 7578, 4134, 22807, 5296, 10721, 37893, 4134, 6156, 7578, 700, 5772, 4032, 3615, 41260, 2423, 1150, 5772, 8717, 11622, 1211, 2655, 3714, 33599, 1829, 11331, 3215, 43242, 3224, 8608, 33604, 6027, 37279, 6027, 7578, 8717, 47283, 3215, 13672, 7578, 31439, 2423, 8961, 9370, 3473, 23032, 3555, 1975, 8315, 4724, 5016, 4117, 1829, 15844, 8961, 9370, 3473, 30767, 7578, 19446, 11331, 4117, 36081, 18871, 1441, 405, 9370, 3473, 8961, 9370, 3473, 7005], "avg_logprob": -0.33946919114622354, "compression_ratio": 1.5872093023255813, "no_speech_prob": 0.0, "words": [{"start": 3549.55, "end": 3549.73, "word": "فى", "probability": 0.5560302734375}, {"start": 3549.73, "end": 3550.75, "word": " response؟", "probability": 0.55615234375}, {"start": 3550.75, "end": 3551.05, "word": " لأ", "probability": 0.57647705078125}, {"start": 3551.05, "end": 3551.37, "word": " مش", "probability": 0.314208984375}, {"start": 3551.37, "end": 3551.75, "word": " response", "probability": 0.869140625}, {"start": 3551.75, "end": 3552.71, "word": " فى", "probability": 0.63037109375}, {"start": 3552.71, "end": 3553.25, "word": " first", "probability": 0.74609375}, {"start": 3553.25, "end": 3553.83, "word": " wave", "probability": 0.95751953125}, {"start": 3553.83, "end": 3554.85, "word": " وعند", "probability": 0.7039388020833334}, {"start": 3554.85, "end": 3555.03, "word": " ال", "probability": 0.41015625}, {"start": 3555.03, "end": 3555.35, "word": " second", "probability": 0.92236328125}, {"start": 3555.35, "end": 3555.67, "word": " wave", "probability": 0.982421875}, {"start": 3555.67, "end": 3557.03, "word": " نزلت", "probability": 0.989990234375}, {"start": 3557.03, "end": 3559.05, "word": " ماشي", "probability": 0.589599609375}, {"start": 3559.05, "end": 3563.27, "word": " حد", "probability": 0.74169921875}, {"start": 3563.27, "end": 3563.51, "word": " عنده", "probability": 0.54296875}, {"start": 3563.51, "end": 3563.89, "word": " سؤال", "probability": 0.9801432291666666}, {"start": 3563.89, "end": 3566.61, "word": " تعالى", "probability": 0.72998046875}, {"start": 3566.61, "end": 3567.09, "word": " ناخد", "probability": 0.9119466145833334}, {"start": 3567.09, "end": 3567.29, "word": " اللى", "probability": 0.948974609375}, {"start": 3567.29, "end": 3567.81, "word": " هو", "probability": 0.96044921875}, {"start": 3567.81, "end": 3569.17, "word": " ال", "probability": 0.95263671875}, {"start": 3569.17, "end": 3569.93, "word": " alpha", "probability": 0.7509765625}, {"start": 3569.93, "end": 3570.43, "word": " granules", "probability": 0.6416015625}, {"start": 3570.43, "end": 3570.63, "word": " طب", "probability": 0.855224609375}, {"start": 3570.63, "end": 3570.73, "word": " انا", "probability": 0.7216796875}, {"start": 3570.73, "end": 3571.01, "word": " بحكي", "probability": 0.914794921875}, {"start": 3571.01, "end": 3571.13, "word": " على", "probability": 0.65283203125}, {"start": 3571.13, "end": 3571.41, "word": " alpha", "probability": 0.71435546875}, {"start": 3571.41, "end": 3571.87, "word": " granules", "probability": 0.9501953125}, {"start": 3571.87, "end": 3572.03, "word": " زى", "probability": 0.813720703125}, {"start": 3572.03, "end": 3572.07, "word": " ما", "probability": 0.9619140625}, {"start": 3572.07, "end": 3572.31, "word": " حكيت", "probability": 0.97216796875}, {"start": 3572.31, "end": 3572.43, "word": " عن", "probability": 0.5205078125}, {"start": 3572.43, "end": 3572.67, "word": " dense", "probability": 0.3297119140625}, {"start": 3572.67, "end": 3573.21, "word": " granules", "probability": 0.957763671875}, {"start": 3573.21, "end": 3573.75, "word": " alpha", "probability": 0.53369140625}, {"start": 3573.75, "end": 3574.39, "word": " granules", "probability": 0.939208984375}, {"start": 3574.39, "end": 3575.09, "word": " pool", "probability": 0.83349609375}], "temperature": 1.0}, {"id": 129, "seek": 360636, "start": 3579.96, "end": 3606.36, "text": "أو زى ما قلتلكم بيسموه gray platelet syndrome أول ما بدوا يحكوا عنه في ال 71، Platelet بتحتوي normally على خمسين granules Alpha granules وكنا عارفين مكونات ال alpha granules لكن المرضى اللى عندهم alpha granule deficiency بيكون عندهم lack of these granules present with life long", "tokens": [10721, 2407, 30767, 7578, 19446, 12174, 1211, 2655, 23275, 2304, 4724, 1829, 38251, 2407, 3224, 10855, 3403, 15966, 19371, 5551, 12610, 19446, 47525, 14407, 7251, 5016, 4117, 14407, 18871, 3224, 8978, 2423, 30942, 12399, 17461, 15966, 39894, 33753, 45865, 5646, 15844, 16490, 2304, 3794, 9957, 9370, 3473, 20588, 9370, 3473, 4032, 4117, 8315, 6225, 9640, 5172, 9957, 3714, 4117, 2407, 8315, 2655, 2423, 8961, 9370, 3473, 44381, 9673, 43042, 7578, 13672, 7578, 43242, 16095, 8961, 9370, 2271, 37500, 4724, 1829, 30544, 43242, 16095, 5011, 295, 613, 9370, 3473, 1974, 365, 993, 938], "avg_logprob": -0.26461694702025385, "compression_ratio": 1.5365853658536586, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 3579.96, "end": 3580.3, "word": "أو", "probability": 0.86279296875}, {"start": 3580.3, "end": 3580.5, "word": " زى", "probability": 0.3909912109375}, {"start": 3580.5, "end": 3580.56, "word": " ما", "probability": 0.96142578125}, {"start": 3580.56, "end": 3581.02, "word": " قلتلكم", "probability": 0.81455078125}, {"start": 3581.02, "end": 3581.44, "word": " بيسموه", "probability": 0.91123046875}, {"start": 3581.44, "end": 3581.68, "word": " gray", "probability": 0.17333984375}, {"start": 3581.68, "end": 3582.14, "word": " platelet", "probability": 0.760498046875}, {"start": 3582.14, "end": 3582.88, "word": " syndrome", "probability": 0.8828125}, {"start": 3582.88, "end": 3585.22, "word": " أول", "probability": 0.76318359375}, {"start": 3585.22, "end": 3585.36, "word": " ما", "probability": 0.85400390625}, {"start": 3585.36, "end": 3585.56, "word": " بدوا", "probability": 0.8818359375}, {"start": 3585.56, "end": 3585.88, "word": " يحكوا", "probability": 0.974853515625}, {"start": 3585.88, "end": 3586.14, "word": " عنه", "probability": 0.9833984375}, {"start": 3586.14, "end": 3586.24, "word": " في", "probability": 0.763671875}, {"start": 3586.24, "end": 3586.32, "word": " ال", "probability": 0.77880859375}, {"start": 3586.32, "end": 3587.5, "word": " 71،", "probability": 0.3355712890625}, {"start": 3587.5, "end": 3588.06, "word": " Platelet", "probability": 0.548095703125}, {"start": 3588.06, "end": 3588.66, "word": " بتحتوي", "probability": 0.9471028645833334}, {"start": 3588.66, "end": 3589.4, "word": " normally", "probability": 0.51611328125}, {"start": 3589.4, "end": 3589.68, "word": " على", "probability": 0.830078125}, {"start": 3589.68, "end": 3590.38, "word": " خمسين", "probability": 0.8671875}, {"start": 3590.38, "end": 3591.5, "word": " granules", "probability": 0.686767578125}, {"start": 3591.5, "end": 3593.72, "word": " Alpha", "probability": 0.376953125}, {"start": 3593.72, "end": 3594.42, "word": " granules", "probability": 0.871337890625}, {"start": 3594.42, "end": 3594.78, "word": " وكنا", "probability": 0.69384765625}, {"start": 3594.78, "end": 3595.02, "word": " عارفين", "probability": 0.95068359375}, {"start": 3595.02, "end": 3595.62, "word": " مكونات", "probability": 0.83798828125}, {"start": 3595.62, "end": 3595.72, "word": " ال", "probability": 0.91064453125}, {"start": 3595.72, "end": 3595.96, "word": " alpha", "probability": 0.63525390625}, {"start": 3595.96, "end": 3596.5, "word": " granules", "probability": 0.945068359375}, {"start": 3596.5, "end": 3598.04, "word": " لكن", "probability": 0.759765625}, {"start": 3598.04, "end": 3598.86, "word": " المرضى", "probability": 0.9065755208333334}, {"start": 3598.86, "end": 3599.08, "word": " اللى", "probability": 0.927978515625}, {"start": 3599.08, "end": 3599.4, "word": " عندهم", "probability": 0.988037109375}, {"start": 3599.4, "end": 3599.72, "word": " alpha", "probability": 0.75634765625}, {"start": 3599.72, "end": 3600.16, "word": " granule", "probability": 0.70458984375}, {"start": 3600.16, "end": 3600.86, "word": " deficiency", "probability": 0.79248046875}, {"start": 3600.86, "end": 3601.3, "word": " بيكون", "probability": 0.8543294270833334}, {"start": 3601.3, "end": 3601.68, "word": " عندهم", "probability": 0.991943359375}, {"start": 3601.68, "end": 3602.16, "word": " lack", "probability": 0.18701171875}, {"start": 3602.16, "end": 3602.6, "word": " of", "probability": 0.97802734375}, {"start": 3602.6, "end": 3602.86, "word": " these", "probability": 0.6484375}, {"start": 3602.86, "end": 3603.54, "word": " granules", "probability": 0.9716796875}, {"start": 3603.54, "end": 3604.96, "word": " present", "probability": 0.73779296875}, {"start": 3604.96, "end": 3605.52, "word": " with", "probability": 0.90283203125}, {"start": 3605.52, "end": 3605.98, "word": " life", "probability": 0.5908203125}, {"start": 3605.98, "end": 3606.36, "word": " long", "probability": 0.87255859375}], "temperature": 1.0}, {"id": 130, "seek": 363640, "start": 3607.5, "end": 3636.4, "text": "بتقدر تطلع مرالة of mild to moderate myocotinous bleeding وبيعاني من عمر كله من bleeding بهذه الطريقة تشخيص بيعمل bleeding time بيلاقي عالي صح؟ بيعمل thrombocyte count بيلاقي هواطية، مظبوط طب و بندوي على platelet granules بيلاقي A granular هي في ال platelet و زي ما قلتلكوا في ال peripheral blood", "tokens": [3555, 2655, 28543, 2288, 6055, 9566, 1211, 3615, 3714, 2288, 6027, 3660, 295, 15154, 281, 18174, 452, 905, 310, 259, 563, 19312, 4032, 21292, 3615, 7649, 1829, 9154, 6225, 29973, 28242, 3224, 9154, 19312, 4724, 3224, 24192, 41950, 16572, 28671, 6055, 8592, 9778, 1829, 9381, 4724, 1829, 25957, 1211, 19312, 565, 4724, 1829, 15040, 38436, 6225, 6027, 1829, 20328, 5016, 22807, 4724, 1829, 25957, 1211, 739, 3548, 31078, 975, 1207, 4724, 1829, 15040, 38436, 8032, 2407, 41193, 10632, 12399, 3714, 19913, 3555, 2407, 9566, 23032, 3555, 4032, 4724, 41260, 45865, 15844, 3403, 15966, 9370, 3473, 4724, 1829, 15040, 38436, 316, 39962, 39896, 8978, 2423, 3403, 15966, 4032, 30767, 1829, 19446, 12174, 1211, 2655, 23275, 14407, 8978, 2423, 40235, 3390], "avg_logprob": -0.3174479069809119, "compression_ratio": 1.6206896551724137, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 3607.5, "end": 3608.32, "word": "بتقدر", "probability": 0.544952392578125}, {"start": 3608.32, "end": 3608.62, "word": " تطلع", "probability": 0.77520751953125}, {"start": 3608.62, "end": 3609.26, "word": " مرالة", "probability": 0.403167724609375}, {"start": 3609.26, "end": 3609.38, "word": " of", "probability": 0.283935546875}, {"start": 3609.38, "end": 3610.62, "word": " mild", "probability": 0.2138671875}, {"start": 3610.62, "end": 3610.84, "word": " to", "probability": 0.9228515625}, {"start": 3610.84, "end": 3611.12, "word": " moderate", "probability": 0.91796875}, {"start": 3611.12, "end": 3612.32, "word": " myocotinous", "probability": 0.61611328125}, {"start": 3612.32, "end": 3612.86, "word": " bleeding", "probability": 0.91455078125}, {"start": 3612.86, "end": 3614.06, "word": " وبيعاني", "probability": 0.70869140625}, {"start": 3614.06, "end": 3614.28, "word": " من", "probability": 0.814453125}, {"start": 3614.28, "end": 3614.68, "word": " عمر", "probability": 0.895751953125}, {"start": 3614.68, "end": 3615.32, "word": " كله", "probability": 0.93994140625}, {"start": 3615.32, "end": 3615.52, "word": " من", "probability": 0.78125}, {"start": 3615.52, "end": 3615.9, "word": " bleeding", "probability": 0.78125}, {"start": 3615.9, "end": 3616.42, "word": " بهذه", "probability": 0.563232421875}, {"start": 3616.42, "end": 3616.9, "word": " الطريقة", "probability": 0.9920247395833334}, {"start": 3616.9, "end": 3618.56, "word": " تشخيص", "probability": 0.96494140625}, {"start": 3618.56, "end": 3619.18, "word": " بيعمل", "probability": 0.9619140625}, {"start": 3619.18, "end": 3619.46, "word": " bleeding", "probability": 0.9248046875}, {"start": 3619.46, "end": 3619.76, "word": " time", "probability": 0.666015625}, {"start": 3619.76, "end": 3620.02, "word": " بيلاقي", "probability": 0.722412109375}, {"start": 3620.02, "end": 3620.48, "word": " عالي", "probability": 0.798828125}, {"start": 3620.48, "end": 3621.04, "word": " صح؟", "probability": 0.6647135416666666}, {"start": 3621.04, "end": 3622.0, "word": " بيعمل", "probability": 0.950927734375}, {"start": 3622.0, "end": 3622.86, "word": " thrombocyte", "probability": 0.8638916015625}, {"start": 3622.86, "end": 3623.5, "word": " count", "probability": 0.94873046875}, {"start": 3623.5, "end": 3624.0, "word": " بيلاقي", "probability": 0.943359375}, {"start": 3624.0, "end": 3624.96, "word": " هواطية،", "probability": 0.663232421875}, {"start": 3624.96, "end": 3626.56, "word": " مظبوط", "probability": 0.8953125}, {"start": 3626.56, "end": 3627.0, "word": " طب", "probability": 0.76416015625}, {"start": 3627.0, "end": 3627.04, "word": " و", "probability": 0.5927734375}, {"start": 3627.04, "end": 3627.36, "word": " بندوي", "probability": 0.7473958333333334}, {"start": 3627.36, "end": 3627.78, "word": " على", "probability": 0.84912109375}, {"start": 3627.78, "end": 3629.12, "word": " platelet", "probability": 0.47686767578125}, {"start": 3629.12, "end": 3629.78, "word": " granules", "probability": 0.852294921875}, {"start": 3629.78, "end": 3630.24, "word": " بيلاقي", "probability": 0.873291015625}, {"start": 3630.24, "end": 3630.48, "word": " A", "probability": 0.440185546875}, {"start": 3630.48, "end": 3631.0, "word": " granular", "probability": 0.8291015625}, {"start": 3631.0, "end": 3631.6, "word": " هي", "probability": 0.310302734375}, {"start": 3631.6, "end": 3632.54, "word": " في", "probability": 0.83837890625}, {"start": 3632.54, "end": 3632.68, "word": " ال", "probability": 0.8603515625}, {"start": 3632.68, "end": 3633.76, "word": " platelet", "probability": 0.764404296875}, {"start": 3633.76, "end": 3633.94, "word": " و", "probability": 0.92724609375}, {"start": 3633.94, "end": 3634.26, "word": " زي", "probability": 0.645263671875}, {"start": 3634.26, "end": 3634.34, "word": " ما", "probability": 0.98193359375}, {"start": 3634.34, "end": 3635.06, "word": " قلتلكوا", "probability": 0.8974609375}, {"start": 3635.06, "end": 3635.38, "word": " في", "probability": 0.88623046875}, {"start": 3635.38, "end": 3635.5, "word": " ال", "probability": 0.9228515625}, {"start": 3635.5, "end": 3635.86, "word": " peripheral", "probability": 0.73486328125}, {"start": 3635.86, "end": 3636.4, "word": " blood", "probability": 0.96728515625}], "temperature": 1.0}, {"id": 131, "seek": 366578, "start": 3637.32, "end": 3665.78, "text": "لو سبقناها normal stain تأخد لونش Aggregation study of decreased or absence response to collision وده شكل .. مش واضح اهتمامها بس يعني هدا هي شايفين ال grey؟ شايفين شكل ال platelet يعني؟ واحدة، اتنين، تلك مي روى ده", "tokens": [1211, 2407, 8608, 3555, 4587, 8315, 11296, 2710, 16441, 6055, 10721, 9778, 3215, 5296, 11536, 8592, 41512, 20167, 2979, 295, 24436, 420, 17145, 4134, 281, 24644, 4032, 3215, 3224, 13412, 28820, 4386, 37893, 4032, 46958, 5016, 1975, 3224, 39237, 10943, 11296, 4724, 3794, 37495, 22653, 8032, 28259, 39896, 13412, 995, 33911, 9957, 2423, 16578, 22807, 13412, 995, 33911, 9957, 13412, 28820, 2423, 3403, 15966, 37495, 22653, 22807, 36764, 24401, 3660, 12399, 1975, 2655, 1863, 9957, 12399, 6055, 1211, 4117, 3714, 1829, 12602, 2407, 7578, 11778, 3224], "avg_logprob": -0.4924568883303938, "compression_ratio": 1.3811659192825112, "no_speech_prob": 0.0, "words": [{"start": 3637.32, "end": 3637.62, "word": "لو", "probability": 0.869873046875}, {"start": 3637.62, "end": 3638.28, "word": " سبقناها", "probability": 0.85458984375}, {"start": 3638.28, "end": 3638.84, "word": " normal", "probability": 0.41943359375}, {"start": 3638.84, "end": 3639.74, "word": " stain", "probability": 0.80224609375}, {"start": 3639.74, "end": 3640.6, "word": " تأخد", "probability": 0.564453125}, {"start": 3640.6, "end": 3641.48, "word": " لونش", "probability": 0.6182047526041666}, {"start": 3641.48, "end": 3643.52, "word": " Aggregation", "probability": 0.46710205078125}, {"start": 3643.52, "end": 3644.0, "word": " study", "probability": 0.495849609375}, {"start": 3644.0, "end": 3644.28, "word": " of", "probability": 0.25927734375}, {"start": 3644.28, "end": 3644.84, "word": " decreased", "probability": 0.7373046875}, {"start": 3644.84, "end": 3645.12, "word": " or", "probability": 0.92431640625}, {"start": 3645.12, "end": 3645.74, "word": " absence", "probability": 0.54345703125}, {"start": 3645.74, "end": 3646.9, "word": " response", "probability": 0.76123046875}, {"start": 3646.9, "end": 3647.12, "word": " to", "probability": 0.962890625}, {"start": 3647.12, "end": 3647.68, "word": " collision", "probability": 0.2147216796875}, {"start": 3647.68, "end": 3648.9, "word": " وده", "probability": 0.5638020833333334}, {"start": 3648.9, "end": 3649.34, "word": " شكل", "probability": 0.9677734375}, {"start": 3649.34, "end": 3651.76, "word": " ..", "probability": 0.188720703125}, {"start": 3651.76, "end": 3653.5, "word": " مش", "probability": 0.859375}, {"start": 3653.5, "end": 3653.74, "word": " واضح", "probability": 0.96533203125}, {"start": 3653.74, "end": 3654.24, "word": " اهتمامها", "probability": 0.56201171875}, {"start": 3654.24, "end": 3654.44, "word": " بس", "probability": 0.852783203125}, {"start": 3654.44, "end": 3654.64, "word": " يعني", "probability": 0.757568359375}, {"start": 3654.64, "end": 3655.1, "word": " هدا", "probability": 0.34619140625}, {"start": 3655.1, "end": 3655.36, "word": " هي", "probability": 0.337890625}, {"start": 3655.36, "end": 3656.06, "word": " شايفين", "probability": 0.91748046875}, {"start": 3656.06, "end": 3656.18, "word": " ال", "probability": 0.8681640625}, {"start": 3656.18, "end": 3657.04, "word": " grey؟", "probability": 0.4298095703125}, {"start": 3657.04, "end": 3657.38, "word": " شايفين", "probability": 0.9781494140625}, {"start": 3657.38, "end": 3657.68, "word": " شكل", "probability": 0.95751953125}, {"start": 3657.68, "end": 3657.82, "word": " ال", "probability": 0.9375}, {"start": 3657.82, "end": 3658.24, "word": " platelet", "probability": 0.48565673828125}, {"start": 3658.24, "end": 3660.36, "word": " يعني؟", "probability": 0.6962076822916666}, {"start": 3660.36, "end": 3661.12, "word": " واحدة،", "probability": 0.68548583984375}, {"start": 3661.12, "end": 3661.76, "word": " اتنين،", "probability": 0.79853515625}, {"start": 3661.76, "end": 3662.26, "word": " تلك", "probability": 0.7054443359375}, {"start": 3662.26, "end": 3665.2, "word": " مي", "probability": 0.4747314453125}, {"start": 3665.2, "end": 3665.54, "word": " روى", "probability": 0.392578125}, {"start": 3665.54, "end": 3665.78, "word": " ده", "probability": 0.919921875}], "temperature": 1.0}, {"id": 132, "seek": 369518, "start": 3667.1, "end": 3695.18, "text": "حد انت سؤال ايش بقى؟ no question؟ نلخص morphology and role of platelet in primary hemostasis شوفناه؟ شوفناه ولا ده؟ اه حكينا ولا ده؟ حكينا ليله منيه؟ شوفنا دور ال glycoprotein 1B in adhesion وغياب وعمل بيرنر سوريا disease او vomliebrand", "tokens": [5016, 3215, 16472, 2655, 8608, 33604, 6027, 1975, 1829, 8592, 4724, 4587, 7578, 22807, 572, 1168, 22807, 8717, 1211, 9778, 9381, 25778, 1793, 293, 3090, 295, 3403, 15966, 294, 6194, 8636, 555, 26632, 13412, 38688, 8315, 3224, 22807, 13412, 38688, 8315, 3224, 49429, 11778, 3224, 22807, 1975, 3224, 11331, 4117, 1829, 8315, 49429, 11778, 3224, 22807, 11331, 4117, 1829, 8315, 5296, 26895, 3224, 9154, 1829, 3224, 22807, 13412, 38688, 8315, 11778, 13063, 2423, 22633, 13084, 81, 1370, 259, 502, 33, 294, 614, 38571, 4032, 17082, 1829, 16758, 4032, 25957, 1211, 4724, 13546, 1863, 2288, 8608, 13063, 25528, 4752, 1975, 2407, 10135, 6302, 30476], "avg_logprob": -0.421875008310263, "compression_ratio": 1.4778761061946903, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 3667.1, "end": 3667.36, "word": "حد", "probability": 0.3604736328125}, {"start": 3667.36, "end": 3667.52, "word": " انت", "probability": 0.451416015625}, {"start": 3667.52, "end": 3667.74, "word": " سؤال", "probability": 0.8797200520833334}, {"start": 3667.74, "end": 3667.9, "word": " ايش", "probability": 0.524658203125}, {"start": 3667.9, "end": 3670.34, "word": " بقى؟", "probability": 0.8096923828125}, {"start": 3670.34, "end": 3670.5, "word": " no", "probability": 0.202392578125}, {"start": 3670.5, "end": 3672.38, "word": " question؟", "probability": 0.58648681640625}, {"start": 3672.38, "end": 3673.02, "word": " نلخص", "probability": 0.71673583984375}, {"start": 3673.02, "end": 3675.68, "word": " morphology", "probability": 0.6278076171875}, {"start": 3675.68, "end": 3675.86, "word": " and", "probability": 0.90234375}, {"start": 3675.86, "end": 3676.04, "word": " role", "probability": 0.96435546875}, {"start": 3676.04, "end": 3676.18, "word": " of", "probability": 0.96728515625}, {"start": 3676.18, "end": 3676.56, "word": " platelet", "probability": 0.6328125}, {"start": 3676.56, "end": 3676.74, "word": " in", "probability": 0.8193359375}, {"start": 3676.74, "end": 3677.0, "word": " primary", "probability": 0.384521484375}, {"start": 3677.0, "end": 3677.54, "word": " hemostasis", "probability": 0.4261881510416667}, {"start": 3677.54, "end": 3678.94, "word": " شوفناه؟", "probability": 0.76220703125}, {"start": 3678.94, "end": 3680.66, "word": " شوفناه", "probability": 0.94775390625}, {"start": 3680.66, "end": 3680.74, "word": " ولا", "probability": 0.9560546875}, {"start": 3680.74, "end": 3682.84, "word": " ده؟", "probability": 0.6964111328125}, {"start": 3682.84, "end": 3683.58, "word": " اه", "probability": 0.692626953125}, {"start": 3683.58, "end": 3683.94, "word": " حكينا", "probability": 0.8768310546875}, {"start": 3683.94, "end": 3684.1, "word": " ولا", "probability": 0.8974609375}, {"start": 3684.1, "end": 3684.44, "word": " ده؟", "probability": 0.9498697916666666}, {"start": 3684.44, "end": 3684.88, "word": " حكينا", "probability": 0.81005859375}, {"start": 3684.88, "end": 3685.3, "word": " ليله", "probability": 0.3292643229166667}, {"start": 3685.3, "end": 3686.1, "word": " منيه؟", "probability": 0.6898193359375}, {"start": 3686.1, "end": 3687.18, "word": " شوفنا", "probability": 0.9182942708333334}, {"start": 3687.18, "end": 3687.4, "word": " دور", "probability": 0.914306640625}, {"start": 3687.4, "end": 3687.56, "word": " ال", "probability": 0.814453125}, {"start": 3687.56, "end": 3688.32, "word": " glycoprotein", "probability": 0.900390625}, {"start": 3688.32, "end": 3688.86, "word": " 1B", "probability": 0.645263671875}, {"start": 3688.86, "end": 3689.34, "word": " in", "probability": 0.841796875}, {"start": 3689.34, "end": 3690.0, "word": " adhesion", "probability": 0.941162109375}, {"start": 3690.0, "end": 3691.36, "word": " وغياب", "probability": 0.8502197265625}, {"start": 3691.36, "end": 3691.7, "word": " وعمل", "probability": 0.9085286458333334}, {"start": 3691.7, "end": 3692.2, "word": " بيرنر", "probability": 0.5684814453125}, {"start": 3692.2, "end": 3692.56, "word": " سوريا", "probability": 0.57958984375}, {"start": 3692.56, "end": 3693.14, "word": " disease", "probability": 0.33056640625}, {"start": 3693.14, "end": 3694.26, "word": " او", "probability": 0.72705078125}, {"start": 3694.26, "end": 3695.18, "word": " vomliebrand", "probability": 0.430908203125}], "temperature": 1.0}, {"id": 133, "seek": 372152, "start": 3696.06, "end": 3721.52, "text": "وقولنا نتيجته انه في response لكل ال agonist ما عدا 100% activation or secretion dense body deficiency usually associated with syndromes وكم سندروم حكينا عنه؟ 100% ال alpha granule deficiency حكينا عنها ثم aggregation بيتعلق بال 2b3a وعمل glazman", "tokens": [2407, 39648, 8315, 8717, 31371, 7435, 47395, 16472, 3224, 8978, 4134, 5296, 28820, 2423, 623, 266, 468, 19446, 6225, 28259, 2319, 4, 24433, 420, 4054, 313, 18011, 1772, 37500, 2673, 6615, 365, 15198, 4397, 279, 4032, 24793, 8608, 41260, 2288, 20498, 11331, 4117, 1829, 8315, 18871, 3224, 22807, 2319, 4, 2423, 8961, 9370, 2271, 37500, 11331, 4117, 1829, 8315, 18871, 11296, 38637, 2304, 623, 70, 20167, 4724, 36081, 30241, 4587, 20666, 568, 65, 18, 64, 4032, 25957, 1211, 8771, 89, 1601], "avg_logprob": -0.28525152548057275, "compression_ratio": 1.4026548672566372, "no_speech_prob": 0.0, "words": [{"start": 3696.06, "end": 3696.5, "word": "وقولنا", "probability": 0.7843424479166666}, {"start": 3696.5, "end": 3697.18, "word": " نتيجته", "probability": 0.978759765625}, {"start": 3697.18, "end": 3697.54, "word": " انه", "probability": 0.630859375}, {"start": 3697.54, "end": 3697.64, "word": " في", "probability": 0.765625}, {"start": 3697.64, "end": 3698.34, "word": " response", "probability": 0.521484375}, {"start": 3698.34, "end": 3699.66, "word": " لكل", "probability": 0.951171875}, {"start": 3699.66, "end": 3699.84, "word": " ال", "probability": 0.9345703125}, {"start": 3699.84, "end": 3700.32, "word": " agonist", "probability": 0.85498046875}, {"start": 3700.32, "end": 3700.52, "word": " ما", "probability": 0.84765625}, {"start": 3700.52, "end": 3700.88, "word": " عدا", "probability": 0.68359375}, {"start": 3700.88, "end": 3702.18, "word": " 100", "probability": 0.313232421875}, {"start": 3702.18, "end": 3702.62, "word": "%", "probability": 0.900390625}, {"start": 3702.62, "end": 3704.14, "word": " activation", "probability": 0.94189453125}, {"start": 3704.14, "end": 3704.64, "word": " or", "probability": 0.638671875}, {"start": 3704.64, "end": 3705.4, "word": " secretion", "probability": 0.949462890625}, {"start": 3705.4, "end": 3705.88, "word": " dense", "probability": 0.16357421875}, {"start": 3705.88, "end": 3706.16, "word": " body", "probability": 0.8359375}, {"start": 3706.16, "end": 3706.9, "word": " deficiency", "probability": 0.92333984375}, {"start": 3706.9, "end": 3708.3, "word": " usually", "probability": 0.9345703125}, {"start": 3708.3, "end": 3709.46, "word": " associated", "probability": 0.92529296875}, {"start": 3709.46, "end": 3709.92, "word": " with", "probability": 0.93359375}, {"start": 3709.92, "end": 3711.16, "word": " syndromes", "probability": 0.9261067708333334}, {"start": 3711.16, "end": 3711.98, "word": " وكم", "probability": 0.8125}, {"start": 3711.98, "end": 3712.44, "word": " سندروم", "probability": 0.7255859375}, {"start": 3712.44, "end": 3712.84, "word": " حكينا", "probability": 0.977294921875}, {"start": 3712.84, "end": 3713.24, "word": " عنه؟", "probability": 0.8056640625}, {"start": 3713.24, "end": 3713.96, "word": " 100", "probability": 0.7109375}, {"start": 3713.96, "end": 3714.5, "word": "%", "probability": 0.84912109375}, {"start": 3714.5, "end": 3714.8, "word": " ال", "probability": 0.5576171875}, {"start": 3714.8, "end": 3715.02, "word": " alpha", "probability": 0.78759765625}, {"start": 3715.02, "end": 3715.38, "word": " granule", "probability": 0.4300537109375}, {"start": 3715.38, "end": 3715.88, "word": " deficiency", "probability": 0.5634765625}, {"start": 3715.88, "end": 3716.2, "word": " حكينا", "probability": 0.9659423828125}, {"start": 3716.2, "end": 3716.54, "word": " عنها", "probability": 0.9443359375}, {"start": 3716.54, "end": 3716.96, "word": " ثم", "probability": 0.843505859375}, {"start": 3716.96, "end": 3717.64, "word": " aggregation", "probability": 0.7294921875}, {"start": 3717.64, "end": 3718.58, "word": " بيتعلق", "probability": 0.775909423828125}, {"start": 3718.58, "end": 3718.72, "word": " بال", "probability": 0.77978515625}, {"start": 3718.72, "end": 3719.72, "word": " 2b3a", "probability": 0.6563720703125}, {"start": 3719.72, "end": 3720.74, "word": " وعمل", "probability": 0.90478515625}, {"start": 3720.74, "end": 3721.52, "word": " glazman", "probability": 0.556884765625}], "temperature": 1.0}, {"id": 134, "seek": 374609, "start": 3721.95, "end": 3746.09, "text": "واللي قلنا فيه fish aggregation لكل الأجنس مع ناس وده الجدول اللي بيبين ليه اللي هو differential diagnosis تبع الأمراض المختلفة اتبعوا يا شباب الجدول لإنه مهم جدا لحظوا ال بتاشي هي عبارة عن characteristics feature موجودة في ال", "tokens": [2407, 6027, 20292, 12174, 1211, 8315, 8978, 3224, 3506, 16743, 399, 5296, 28820, 16247, 7435, 1863, 3794, 20449, 8717, 32277, 4032, 3215, 3224, 2423, 7435, 3215, 12610, 13672, 1829, 4724, 1829, 3555, 9957, 32239, 3224, 13672, 1829, 31439, 15756, 15217, 6055, 3555, 3615, 16247, 29973, 46958, 9673, 46456, 46538, 3660, 1975, 2655, 3555, 3615, 14407, 35186, 13412, 3555, 16758, 25724, 3215, 12610, 5296, 28814, 1863, 3224, 3714, 16095, 10874, 28259, 5296, 5016, 19913, 14407, 2423, 39894, 33599, 1829, 39896, 6225, 3555, 9640, 3660, 18871, 10891, 4111, 3714, 29245, 23328, 3660, 8978, 2423], "avg_logprob": -0.2750335983050767, "compression_ratio": 1.5682819383259912, "no_speech_prob": 0.0, "words": [{"start": 3721.95, "end": 3722.65, "word": "واللي", "probability": 0.7312825520833334}, {"start": 3722.65, "end": 3722.95, "word": " قلنا", "probability": 0.8640950520833334}, {"start": 3722.95, "end": 3723.45, "word": " فيه", "probability": 0.9248046875}, {"start": 3723.45, "end": 3724.03, "word": " fish", "probability": 0.451416015625}, {"start": 3724.03, "end": 3724.69, "word": " aggregation", "probability": 0.9365234375}, {"start": 3724.69, "end": 3725.31, "word": " لكل", "probability": 0.952880859375}, {"start": 3725.31, "end": 3725.73, "word": " الأجنس", "probability": 0.556121826171875}, {"start": 3725.73, "end": 3725.95, "word": " مع", "probability": 0.71240234375}, {"start": 3725.95, "end": 3726.23, "word": " ناس", "probability": 0.457763671875}, {"start": 3726.23, "end": 3728.15, "word": " وده", "probability": 0.5447591145833334}, {"start": 3728.15, "end": 3728.45, "word": " الجدول", "probability": 0.70245361328125}, {"start": 3728.45, "end": 3728.61, "word": " اللي", "probability": 0.59326171875}, {"start": 3728.61, "end": 3729.05, "word": " بيبين", "probability": 0.76068115234375}, {"start": 3729.05, "end": 3729.45, "word": " ليه", "probability": 0.911865234375}, {"start": 3729.45, "end": 3730.23, "word": " اللي", "probability": 0.781005859375}, {"start": 3730.23, "end": 3730.37, "word": " هو", "probability": 0.9365234375}, {"start": 3730.37, "end": 3730.87, "word": " differential", "probability": 0.748046875}, {"start": 3730.87, "end": 3731.77, "word": " diagnosis", "probability": 0.85546875}, {"start": 3731.77, "end": 3732.81, "word": " تبع", "probability": 0.9422200520833334}, {"start": 3732.81, "end": 3734.03, "word": " الأمراض", "probability": 0.9864908854166666}, {"start": 3734.03, "end": 3735.23, "word": " المختلفة", "probability": 0.996826171875}, {"start": 3735.23, "end": 3735.83, "word": " اتبعوا", "probability": 0.76728515625}, {"start": 3735.83, "end": 3735.93, "word": " يا", "probability": 0.8544921875}, {"start": 3735.93, "end": 3736.21, "word": " شباب", "probability": 0.98876953125}, {"start": 3736.21, "end": 3736.85, "word": " الجدول", "probability": 0.927734375}, {"start": 3736.85, "end": 3737.13, "word": " لإنه", "probability": 0.8033447265625}, {"start": 3737.13, "end": 3737.33, "word": " مهم", "probability": 0.9921875}, {"start": 3737.33, "end": 3737.73, "word": " جدا", "probability": 0.9892578125}, {"start": 3737.73, "end": 3739.79, "word": " لحظوا", "probability": 0.6710205078125}, {"start": 3739.79, "end": 3740.03, "word": " ال", "probability": 0.87890625}, {"start": 3740.03, "end": 3740.63, "word": " بتاشي", "probability": 0.651611328125}, {"start": 3740.63, "end": 3741.69, "word": " هي", "probability": 0.7216796875}, {"start": 3741.69, "end": 3741.93, "word": " عبارة", "probability": 0.9566650390625}, {"start": 3741.93, "end": 3742.17, "word": " عن", "probability": 0.99755859375}, {"start": 3742.17, "end": 3743.21, "word": " characteristics", "probability": 0.89013671875}, {"start": 3743.21, "end": 3744.45, "word": " feature", "probability": 0.8642578125}, {"start": 3744.45, "end": 3745.33, "word": " موجودة", "probability": 0.98828125}, {"start": 3745.33, "end": 3745.97, "word": " في", "probability": 0.96875}, {"start": 3745.97, "end": 3746.09, "word": " ال", "probability": 0.83203125}], "temperature": 1.0}, {"id": 135, "seek": 377939, "start": 3751.29, "end": 3779.39, "text": "و هدول عبارة عن primary hemostasis و لا نهت؟ تبعين معايا؟ لكن هناك في disorders of coagulation و هذه معروفة واحدة تركيها، صح؟ نمرا اتنين، deep موجود بالظبط بتكون characteristics فيه ال coagulation بشكل كام Superficial Okomosis بنشوفها في ال platelet", "tokens": [2407, 8032, 3215, 12610, 6225, 3555, 9640, 3660, 18871, 6194, 415, 1761, 26632, 4032, 20193, 8717, 3224, 2655, 22807, 6055, 3555, 3615, 9957, 20449, 995, 25528, 22807, 44381, 34105, 4117, 8978, 20261, 295, 598, 559, 2776, 4032, 29538, 20449, 32887, 5172, 3660, 36764, 24401, 3660, 6055, 31747, 1829, 11296, 12399, 20328, 5016, 22807, 8717, 2304, 23557, 1975, 2655, 1863, 9957, 12399, 2452, 3714, 29245, 23328, 20666, 19913, 3555, 9566, 39894, 30544, 10891, 8978, 3224, 2423, 598, 559, 2776, 4724, 8592, 28820, 9122, 10943, 4548, 1786, 831, 3477, 298, 8211, 44945, 8592, 38688, 11296, 8978, 2423, 3403, 15966], "avg_logprob": -0.38010204872306513, "compression_ratio": 1.4308300395256917, "no_speech_prob": 0.0, "words": [{"start": 3751.29, "end": 3751.47, "word": "و", "probability": 0.720703125}, {"start": 3751.47, "end": 3751.67, "word": " هدول", "probability": 0.8575846354166666}, {"start": 3751.67, "end": 3751.97, "word": " عبارة", "probability": 0.94189453125}, {"start": 3751.97, "end": 3752.09, "word": " عن", "probability": 0.99267578125}, {"start": 3752.09, "end": 3752.37, "word": " primary", "probability": 0.64794921875}, {"start": 3752.37, "end": 3753.03, "word": " hemostasis", "probability": 0.7782389322916666}, {"start": 3753.03, "end": 3753.17, "word": " و", "probability": 0.5283203125}, {"start": 3753.17, "end": 3753.21, "word": " لا", "probability": 0.91015625}, {"start": 3753.21, "end": 3753.65, "word": " نهت؟", "probability": 0.447265625}, {"start": 3753.65, "end": 3754.91, "word": " تبعين", "probability": 0.8623046875}, {"start": 3754.91, "end": 3755.77, "word": " معايا؟", "probability": 0.7020263671875}, {"start": 3755.77, "end": 3756.91, "word": " لكن", "probability": 0.92626953125}, {"start": 3756.91, "end": 3757.73, "word": " هناك", "probability": 0.732177734375}, {"start": 3757.73, "end": 3759.37, "word": " في", "probability": 0.8095703125}, {"start": 3759.37, "end": 3760.01, "word": " disorders", "probability": 0.85693359375}, {"start": 3760.01, "end": 3760.63, "word": " of", "probability": 0.9794921875}, {"start": 3760.63, "end": 3761.37, "word": " coagulation", "probability": 0.9876302083333334}, {"start": 3761.37, "end": 3762.15, "word": " و", "probability": 0.4130859375}, {"start": 3762.15, "end": 3762.25, "word": " هذه", "probability": 0.480712890625}, {"start": 3762.25, "end": 3762.63, "word": " معروفة", "probability": 0.8787841796875}, {"start": 3762.63, "end": 3762.99, "word": " واحدة", "probability": 0.9384765625}, {"start": 3762.99, "end": 3763.77, "word": " تركيها،", "probability": 0.498876953125}, {"start": 3763.77, "end": 3765.09, "word": " صح؟", "probability": 0.9737955729166666}, {"start": 3765.09, "end": 3766.43, "word": " نمرا", "probability": 0.64697265625}, {"start": 3766.43, "end": 3767.81, "word": " اتنين،", "probability": 0.872265625}, {"start": 3767.81, "end": 3768.27, "word": " deep", "probability": 0.34521484375}, {"start": 3768.27, "end": 3770.79, "word": " موجود", "probability": 0.84912109375}, {"start": 3770.79, "end": 3771.47, "word": " بالظبط", "probability": 0.9842529296875}, {"start": 3771.47, "end": 3772.47, "word": " بتكون", "probability": 0.781005859375}, {"start": 3772.47, "end": 3773.31, "word": " characteristics", "probability": 0.875}, {"start": 3773.31, "end": 3773.73, "word": " فيه", "probability": 0.713134765625}, {"start": 3773.73, "end": 3773.75, "word": " ال", "probability": 0.11199951171875}, {"start": 3773.75, "end": 3774.25, "word": " coagulation", "probability": 0.8299153645833334}, {"start": 3774.25, "end": 3774.79, "word": " بشكل", "probability": 0.4175618489583333}, {"start": 3774.79, "end": 3775.67, "word": " كام", "probability": 0.533447265625}, {"start": 3775.67, "end": 3776.91, "word": " Superficial", "probability": 0.708740234375}, {"start": 3776.91, "end": 3777.65, "word": " Okomosis", "probability": 0.4991861979166667}, {"start": 3777.65, "end": 3778.67, "word": " بنشوفها", "probability": 0.8892822265625}, {"start": 3778.67, "end": 3778.81, "word": " في", "probability": 0.90673828125}, {"start": 3778.81, "end": 3778.93, "word": " ال", "probability": 0.94970703125}, {"start": 3778.93, "end": 3779.39, "word": " platelet", "probability": 0.5860595703125}], "temperature": 1.0}, {"id": 136, "seek": 380990, "start": 3780.2, "end": 3809.9, "text": "موجودة كمان في الموضوع بحكي على ايكموزر مش بتاشي hemarthrosis هي عبارة عن هل هي عبارة عن ديب soft tissue بنلاقيها في ال secondary hemostasis فهي characteristics of ايش؟ 100% delayed bleeding تأخر نشوفه في ال secondary hemostasis bleeding from superficial or scratches اللي هي الخدوش", "tokens": [2304, 29245, 23328, 3660, 9122, 2304, 7649, 8978, 9673, 2407, 11242, 45367, 4724, 5016, 4117, 1829, 15844, 1975, 1829, 24793, 2407, 11622, 2288, 37893, 39894, 33599, 1829, 8636, 18352, 2635, 271, 39896, 6225, 3555, 9640, 3660, 18871, 8032, 1211, 39896, 6225, 3555, 9640, 3660, 18871, 11778, 1829, 3555, 2787, 12404, 44945, 15040, 38436, 11296, 8978, 2423, 11396, 415, 1761, 26632, 6156, 3224, 1829, 10891, 295, 1975, 1829, 8592, 22807, 2319, 4, 20268, 19312, 6055, 10721, 34740, 8717, 8592, 38688, 3224, 8978, 2423, 11396, 415, 1761, 26632, 19312, 490, 34622, 420, 33695, 13672, 1829, 39896, 33962, 3215, 2407, 8592], "avg_logprob": -0.3087121067625104, "compression_ratio": 1.5685483870967742, "no_speech_prob": 0.0, "words": [{"start": 3780.2, "end": 3780.88, "word": "موجودة", "probability": 0.95654296875}, {"start": 3780.88, "end": 3781.18, "word": " كمان", "probability": 0.947265625}, {"start": 3781.18, "end": 3781.38, "word": " في", "probability": 0.86962890625}, {"start": 3781.38, "end": 3781.86, "word": " الموضوع", "probability": 0.638427734375}, {"start": 3781.86, "end": 3782.92, "word": " بحكي", "probability": 0.7073974609375}, {"start": 3782.92, "end": 3783.08, "word": " على", "probability": 0.74267578125}, {"start": 3783.08, "end": 3783.74, "word": " ايكموزر", "probability": 0.617279052734375}, {"start": 3783.74, "end": 3784.0, "word": " مش", "probability": 0.84326171875}, {"start": 3784.0, "end": 3785.2, "word": " بتاشي", "probability": 0.6663411458333334}, {"start": 3785.2, "end": 3786.62, "word": " hemarthrosis", "probability": 0.7161865234375}, {"start": 3786.62, "end": 3787.54, "word": " هي", "probability": 0.603515625}, {"start": 3787.54, "end": 3788.02, "word": " عبارة", "probability": 0.979248046875}, {"start": 3788.02, "end": 3788.52, "word": " عن", "probability": 0.99609375}, {"start": 3788.52, "end": 3788.82, "word": " هل", "probability": 0.36785888671875}, {"start": 3788.82, "end": 3788.9, "word": " هي", "probability": 0.70556640625}, {"start": 3788.9, "end": 3789.08, "word": " عبارة", "probability": 0.9915771484375}, {"start": 3789.08, "end": 3789.28, "word": " عن", "probability": 0.97509765625}, {"start": 3789.28, "end": 3789.76, "word": " ديب", "probability": 0.9099934895833334}, {"start": 3789.76, "end": 3790.24, "word": " soft", "probability": 0.6162109375}, {"start": 3790.24, "end": 3790.68, "word": " tissue", "probability": 0.93359375}, {"start": 3790.68, "end": 3792.02, "word": " بنلاقيها", "probability": 0.6646728515625}, {"start": 3792.02, "end": 3792.18, "word": " في", "probability": 0.9404296875}, {"start": 3792.18, "end": 3792.32, "word": " ال", "probability": 0.55615234375}, {"start": 3792.32, "end": 3792.74, "word": " secondary", "probability": 0.78125}, {"start": 3792.74, "end": 3793.3, "word": " hemostasis", "probability": 0.736572265625}, {"start": 3793.3, "end": 3793.64, "word": " فهي", "probability": 0.7511393229166666}, {"start": 3793.64, "end": 3794.22, "word": " characteristics", "probability": 0.7529296875}, {"start": 3794.22, "end": 3794.62, "word": " of", "probability": 0.951171875}, {"start": 3794.62, "end": 3795.96, "word": " ايش؟", "probability": 0.73486328125}, {"start": 3795.96, "end": 3796.82, "word": " 100", "probability": 0.32763671875}, {"start": 3796.82, "end": 3797.56, "word": "%", "probability": 0.958984375}, {"start": 3797.56, "end": 3798.28, "word": " delayed", "probability": 0.69384765625}, {"start": 3798.28, "end": 3798.98, "word": " bleeding", "probability": 0.94775390625}, {"start": 3798.98, "end": 3800.88, "word": " تأخر", "probability": 0.5694986979166666}, {"start": 3800.88, "end": 3802.42, "word": " نشوفه", "probability": 0.8187255859375}, {"start": 3802.42, "end": 3803.24, "word": " في", "probability": 0.9619140625}, {"start": 3803.24, "end": 3803.38, "word": " ال", "probability": 0.78076171875}, {"start": 3803.38, "end": 3803.86, "word": " secondary", "probability": 0.8515625}, {"start": 3803.86, "end": 3804.7, "word": " hemostasis", "probability": 0.8460286458333334}, {"start": 3804.7, "end": 3805.92, "word": " bleeding", "probability": 0.904296875}, {"start": 3805.92, "end": 3806.22, "word": " from", "probability": 0.9130859375}, {"start": 3806.22, "end": 3806.94, "word": " superficial", "probability": 0.91064453125}, {"start": 3806.94, "end": 3807.52, "word": " or", "probability": 0.9072265625}, {"start": 3807.52, "end": 3808.3, "word": " scratches", "probability": 0.8857421875}, {"start": 3808.3, "end": 3808.94, "word": " اللي", "probability": 0.884765625}, {"start": 3808.94, "end": 3809.14, "word": " هي", "probability": 0.91748046875}, {"start": 3809.14, "end": 3809.9, "word": " الخدوش", "probability": 0.96533203125}], "temperature": 1.0}, {"id": 137, "seek": 383791, "start": 3811.17, "end": 3837.91, "text": "اللي بتقوم فيها شباب ال primary مظبوط resistant and often profuse ليش؟ لأنه إذا مافيش خلل إذا في خلل في ال platelet اللي بتقوم بهذه العملية مش مبروك فتنتشر إنما في ال secondary minimal ال sex of patient", "tokens": [6027, 20292, 39894, 4587, 20498, 8978, 11296, 13412, 3555, 16758, 2423, 6194, 3714, 19913, 3555, 2407, 9566, 20383, 293, 2049, 1740, 438, 32239, 8592, 22807, 5296, 33456, 3224, 11933, 15730, 19446, 41185, 8592, 16490, 1211, 1211, 11933, 15730, 8978, 16490, 1211, 1211, 8978, 2423, 3403, 15966, 13672, 1829, 39894, 4587, 20498, 39627, 24192, 18863, 42213, 10632, 37893, 3714, 26890, 2407, 4117, 6156, 2655, 29399, 46309, 36145, 15042, 8978, 2423, 11396, 13206, 2423, 3260, 295, 4537], "avg_logprob": -0.25102797111398295, "compression_ratio": 1.5075376884422111, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 3811.17, "end": 3811.55, "word": "اللي", "probability": 0.897705078125}, {"start": 3811.55, "end": 3811.91, "word": " بتقوم", "probability": 0.896484375}, {"start": 3811.91, "end": 3812.23, "word": " فيها", "probability": 0.95703125}, {"start": 3812.23, "end": 3812.69, "word": " شباب", "probability": 0.690185546875}, {"start": 3812.69, "end": 3813.55, "word": " ال", "probability": 0.67578125}, {"start": 3813.55, "end": 3815.33, "word": " primary", "probability": 0.75634765625}, {"start": 3815.33, "end": 3816.15, "word": " مظبوط", "probability": 0.87568359375}, {"start": 3816.15, "end": 3816.77, "word": " resistant", "probability": 0.08770751953125}, {"start": 3816.77, "end": 3818.51, "word": " and", "probability": 0.755859375}, {"start": 3818.51, "end": 3820.51, "word": " often", "probability": 0.71826171875}, {"start": 3820.51, "end": 3821.89, "word": " profuse", "probability": 0.902099609375}, {"start": 3821.89, "end": 3825.07, "word": " ليش؟", "probability": 0.8175455729166666}, {"start": 3825.07, "end": 3825.41, "word": " لأنه", "probability": 0.7604166666666666}, {"start": 3825.41, "end": 3825.51, "word": " إذا", "probability": 0.8203125}, {"start": 3825.51, "end": 3825.95, "word": " مافيش", "probability": 0.9500325520833334}, {"start": 3825.95, "end": 3826.55, "word": " خلل", "probability": 0.7867838541666666}, {"start": 3826.55, "end": 3826.75, "word": " إذا", "probability": 0.785888671875}, {"start": 3826.75, "end": 3826.87, "word": " في", "probability": 0.88330078125}, {"start": 3826.87, "end": 3827.15, "word": " خلل", "probability": 0.9544270833333334}, {"start": 3827.15, "end": 3827.31, "word": " في", "probability": 0.91748046875}, {"start": 3827.31, "end": 3827.39, "word": " ال", "probability": 0.927734375}, {"start": 3827.39, "end": 3827.81, "word": " platelet", "probability": 0.54638671875}, {"start": 3827.81, "end": 3828.43, "word": " اللي", "probability": 0.88427734375}, {"start": 3828.43, "end": 3828.87, "word": " بتقوم", "probability": 0.9519856770833334}, {"start": 3828.87, "end": 3829.41, "word": " بهذه", "probability": 0.875}, {"start": 3829.41, "end": 3830.85, "word": " العملية", "probability": 0.9747721354166666}, {"start": 3830.85, "end": 3831.11, "word": " مش", "probability": 0.982421875}, {"start": 3831.11, "end": 3831.61, "word": " مبروك", "probability": 0.50531005859375}, {"start": 3831.61, "end": 3833.19, "word": " فتنتشر", "probability": 0.7740478515625}, {"start": 3833.19, "end": 3833.67, "word": " إنما", "probability": 0.6978759765625}, {"start": 3833.67, "end": 3833.81, "word": " في", "probability": 0.95947265625}, {"start": 3833.81, "end": 3833.87, "word": " ال", "probability": 0.8056640625}, {"start": 3833.87, "end": 3834.29, "word": " secondary", "probability": 0.94091796875}, {"start": 3834.29, "end": 3834.75, "word": " minimal", "probability": 0.71044921875}, {"start": 3834.75, "end": 3836.99, "word": " ال", "probability": 0.9228515625}, {"start": 3836.99, "end": 3837.35, "word": " sex", "probability": 0.71875}, {"start": 3837.35, "end": 3837.53, "word": " of", "probability": 0.955078125}, {"start": 3837.53, "end": 3837.91, "word": " patient", "probability": 0.6494140625}], "temperature": 1.0}, {"id": 138, "seek": 386967, "start": 3840.61, "end": 3869.67, "text": "المشاكل أو معظم الأمراض اللي بتتعلق بال primary بنشوفها في ال female أكتر من ال male لكن في ال secondary تسعين فمية من الأمراض في ال male موجودة و مش موجودة في ال female عشان هال X أغلبها X يعني ممكن و بال .. يبقى في اتعادل ماشي why clinical cases يا شباب شايفين هال nail بت hematoma ماشي", "tokens": [45340, 8592, 995, 28820, 34051, 20449, 19913, 2304, 16247, 29973, 46958, 13672, 1829, 39894, 2655, 30241, 4587, 20666, 6194, 44945, 8592, 38688, 11296, 8978, 2423, 6556, 5551, 4117, 2655, 2288, 9154, 2423, 7133, 44381, 8978, 2423, 11396, 6055, 3794, 3615, 9957, 6156, 2304, 10632, 9154, 16247, 29973, 46958, 8978, 2423, 7133, 3714, 29245, 23328, 3660, 4032, 37893, 3714, 29245, 23328, 3660, 8978, 2423, 6556, 6225, 8592, 7649, 8032, 6027, 1783, 5551, 17082, 46152, 11296, 1783, 37495, 22653, 3714, 43020, 4032, 20666, 4386, 7251, 3555, 4587, 7578, 8978, 1975, 2655, 3615, 18513, 1211, 3714, 33599, 1829, 983, 9115, 3331, 35186, 13412, 3555, 16758, 13412, 995, 33911, 9957, 8032, 6027, 10173, 39894, 8636, 267, 6440, 3714, 33599, 1829], "avg_logprob": -0.24131944393500304, "compression_ratio": 1.7716535433070866, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 3840.6099999999997, "end": 3841.37, "word": "المشاكل", "probability": 0.8680419921875}, {"start": 3841.37, "end": 3841.71, "word": " أو", "probability": 0.8583984375}, {"start": 3841.71, "end": 3842.09, "word": " معظم", "probability": 0.9915364583333334}, {"start": 3842.09, "end": 3842.49, "word": " الأمراض", "probability": 0.9807942708333334}, {"start": 3842.49, "end": 3842.61, "word": " اللي", "probability": 0.6868896484375}, {"start": 3842.61, "end": 3843.07, "word": " بتتعلق", "probability": 0.9696044921875}, {"start": 3843.07, "end": 3843.21, "word": " بال", "probability": 0.97900390625}, {"start": 3843.21, "end": 3843.65, "word": " primary", "probability": 0.82275390625}, {"start": 3843.65, "end": 3844.37, "word": " بنشوفها", "probability": 0.72491455078125}, {"start": 3844.37, "end": 3844.49, "word": " في", "probability": 0.80224609375}, {"start": 3844.49, "end": 3844.55, "word": " ال", "probability": 0.92041015625}, {"start": 3844.55, "end": 3844.85, "word": " female", "probability": 0.9775390625}, {"start": 3844.85, "end": 3845.17, "word": " أكتر", "probability": 0.956787109375}, {"start": 3845.17, "end": 3845.35, "word": " من", "probability": 0.994140625}, {"start": 3845.35, "end": 3845.49, "word": " ال", "probability": 0.90283203125}, {"start": 3845.49, "end": 3845.85, "word": " male", "probability": 0.97216796875}, {"start": 3845.85, "end": 3847.09, "word": " لكن", "probability": 0.8720703125}, {"start": 3847.09, "end": 3847.47, "word": " في", "probability": 0.9580078125}, {"start": 3847.47, "end": 3848.61, "word": " ال", "probability": 0.9384765625}, {"start": 3848.61, "end": 3849.35, "word": " secondary", "probability": 0.94189453125}, {"start": 3849.35, "end": 3850.31, "word": " تسعين", "probability": 0.8046875}, {"start": 3850.31, "end": 3850.69, "word": " فمية", "probability": 0.6520182291666666}, {"start": 3850.69, "end": 3850.83, "word": " من", "probability": 0.98193359375}, {"start": 3850.83, "end": 3851.19, "word": " الأمراض", "probability": 0.9837239583333334}, {"start": 3851.19, "end": 3851.37, "word": " في", "probability": 0.91259765625}, {"start": 3851.37, "end": 3851.47, "word": " ال", "probability": 0.95556640625}, {"start": 3851.47, "end": 3851.65, "word": " male", "probability": 0.9677734375}, {"start": 3851.65, "end": 3852.33, "word": " موجودة", "probability": 0.9913330078125}, {"start": 3852.33, "end": 3853.05, "word": " و", "probability": 0.900390625}, {"start": 3853.05, "end": 3853.15, "word": " مش", "probability": 0.77587890625}, {"start": 3853.15, "end": 3853.55, "word": " موجودة", "probability": 0.99365234375}, {"start": 3853.55, "end": 3853.71, "word": " في", "probability": 0.9111328125}, {"start": 3853.71, "end": 3853.75, "word": " ال", "probability": 0.896484375}, {"start": 3853.75, "end": 3854.13, "word": " female", "probability": 0.861328125}, {"start": 3854.13, "end": 3854.81, "word": " عشان", "probability": 0.6555582682291666}, {"start": 3854.81, "end": 3854.97, "word": " هال", "probability": 0.737548828125}, {"start": 3854.97, "end": 3855.23, "word": " X", "probability": 0.381591796875}, {"start": 3855.23, "end": 3856.55, "word": " أغلبها", "probability": 0.95068359375}, {"start": 3856.55, "end": 3856.95, "word": " X", "probability": 0.7705078125}, {"start": 3856.95, "end": 3858.21, "word": " يعني", "probability": 0.51763916015625}, {"start": 3858.21, "end": 3858.67, "word": " ممكن", "probability": 0.6318359375}, {"start": 3858.67, "end": 3858.67, "word": " و", "probability": 0.266357421875}, {"start": 3858.67, "end": 3859.05, "word": " بال", "probability": 0.4404296875}, {"start": 3859.05, "end": 3859.35, "word": " ..", "probability": 0.330078125}, {"start": 3859.35, "end": 3859.63, "word": " يبقى", "probability": 0.6944580078125}, {"start": 3859.63, "end": 3859.77, "word": " في", "probability": 0.90771484375}, {"start": 3859.77, "end": 3860.25, "word": " اتعادل", "probability": 0.722314453125}, {"start": 3860.25, "end": 3862.31, "word": " ماشي", "probability": 0.7831217447916666}, {"start": 3862.31, "end": 3862.61, "word": " why", "probability": 0.411865234375}, {"start": 3862.61, "end": 3862.99, "word": " clinical", "probability": 0.9169921875}, {"start": 3862.99, "end": 3863.45, "word": " cases", "probability": 0.921875}, {"start": 3863.45, "end": 3863.63, "word": " يا", "probability": 0.91455078125}, {"start": 3863.63, "end": 3863.95, "word": " شباب", "probability": 0.9913736979166666}, {"start": 3863.95, "end": 3865.03, "word": " شايفين", "probability": 0.816650390625}, {"start": 3865.03, "end": 3865.29, "word": " هال", "probability": 0.828369140625}, {"start": 3865.29, "end": 3866.05, "word": " nail", "probability": 0.830078125}, {"start": 3866.05, "end": 3866.97, "word": " بت", "probability": 0.7880859375}, {"start": 3866.97, "end": 3868.03, "word": " hematoma", "probability": 0.8821614583333334}, {"start": 3868.03, "end": 3869.67, "word": " ماشي", "probability": 0.82861328125}], "temperature": 1.0}, {"id": 139, "seek": 389992, "start": 3870.06, "end": 3899.92, "text": "هذه زى واحد وجع على رجله على أصمره بصير red و blue و brown مر في مراحل ألوان مختلفة صح؟ وهذا contusion، إيش يعني contusion؟ نار ممسكة؟ لأ لأ لأ، contusion جرسة بتعرف لو واحد مسك تاني بذردية، ماشي؟ هي عبارة عن contusion، دغّة بالعربي، ماشي؟ أكلت إيه؟", "tokens": [3224, 24192, 30767, 7578, 36764, 24401, 4032, 7435, 3615, 15844, 12602, 7435, 43761, 15844, 5551, 9381, 29973, 3224, 4724, 9381, 13546, 2182, 4032, 3344, 4032, 6292, 3714, 2288, 8978, 3714, 23557, 5016, 1211, 5551, 1211, 2407, 7649, 3714, 46456, 46538, 3660, 20328, 5016, 22807, 37037, 15730, 660, 5704, 12399, 11933, 1829, 8592, 37495, 22653, 660, 5704, 22807, 8717, 9640, 3714, 2304, 3794, 4117, 3660, 22807, 5296, 10721, 5296, 10721, 5296, 10721, 12399, 660, 5704, 10874, 2288, 3794, 3660, 39894, 3615, 28480, 45164, 36764, 24401, 47524, 4117, 6055, 7649, 1829, 4724, 8848, 2288, 3215, 10632, 12399, 3714, 33599, 1829, 22807, 39896, 6225, 3555, 9640, 3660, 18871, 660, 5704, 12399, 11778, 17082, 11703, 3660, 20666, 3615, 2288, 21292, 12399, 3714, 33599, 1829, 22807, 5551, 28820, 2655, 11933, 1829, 3224, 22807], "avg_logprob": -0.3078972868217054, "compression_ratio": 1.6932773109243697, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 3870.06, "end": 3870.54, "word": "هذه", "probability": 0.6380615234375}, {"start": 3870.54, "end": 3870.74, "word": " زى", "probability": 0.60595703125}, {"start": 3870.74, "end": 3871.16, "word": " واحد", "probability": 0.968994140625}, {"start": 3871.16, "end": 3872.0, "word": " وجع", "probability": 0.690185546875}, {"start": 3872.0, "end": 3872.18, "word": " على", "probability": 0.8681640625}, {"start": 3872.18, "end": 3872.64, "word": " رجله", "probability": 0.97119140625}, {"start": 3872.64, "end": 3873.1, "word": " على", "probability": 0.56982421875}, {"start": 3873.1, "end": 3873.78, "word": " أصمره", "probability": 0.82666015625}, {"start": 3873.78, "end": 3875.56, "word": " بصير", "probability": 0.4681193033854167}, {"start": 3875.56, "end": 3876.18, "word": " red", "probability": 0.35205078125}, {"start": 3876.18, "end": 3876.48, "word": " و", "probability": 0.9287109375}, {"start": 3876.48, "end": 3876.82, "word": " blue", "probability": 0.73291015625}, {"start": 3876.82, "end": 3877.22, "word": " و", "probability": 0.9873046875}, {"start": 3877.22, "end": 3877.82, "word": " brown", "probability": 0.880859375}, {"start": 3877.82, "end": 3878.98, "word": " مر", "probability": 0.393798828125}, {"start": 3878.98, "end": 3879.12, "word": " في", "probability": 0.89892578125}, {"start": 3879.12, "end": 3879.62, "word": " مراحل", "probability": 0.9739990234375}, {"start": 3879.62, "end": 3879.98, "word": " ألوان", "probability": 0.9638671875}, {"start": 3879.98, "end": 3880.64, "word": " مختلفة", "probability": 0.990966796875}, {"start": 3880.64, "end": 3881.42, "word": " صح؟", "probability": 0.7732747395833334}, {"start": 3881.42, "end": 3883.36, "word": " وهذا", "probability": 0.556640625}, {"start": 3883.36, "end": 3884.18, "word": " contusion،", "probability": 0.625244140625}, {"start": 3884.18, "end": 3884.36, "word": " إيش", "probability": 0.7625325520833334}, {"start": 3884.36, "end": 3884.52, "word": " يعني", "probability": 0.8388671875}, {"start": 3884.52, "end": 3886.66, "word": " contusion؟", "probability": 0.8727213541666666}, {"start": 3886.66, "end": 3888.18, "word": " نار", "probability": 0.3917236328125}, {"start": 3888.18, "end": 3889.22, "word": " ممسكة؟", "probability": 0.6765950520833334}, {"start": 3889.22, "end": 3889.74, "word": " لأ", "probability": 0.70751953125}, {"start": 3889.74, "end": 3889.92, "word": " لأ", "probability": 0.62841796875}, {"start": 3889.92, "end": 3890.08, "word": " لأ،", "probability": 0.5338541666666666}, {"start": 3890.08, "end": 3890.58, "word": " contusion", "probability": 0.868896484375}, {"start": 3890.58, "end": 3891.22, "word": " جرسة", "probability": 0.65826416015625}, {"start": 3891.22, "end": 3891.88, "word": " بتعرف", "probability": 0.7664388020833334}, {"start": 3891.88, "end": 3892.06, "word": " لو", "probability": 0.9443359375}, {"start": 3892.06, "end": 3892.26, "word": " واحد", "probability": 0.99169921875}, {"start": 3892.26, "end": 3892.56, "word": " مسك", "probability": 0.695068359375}, {"start": 3892.56, "end": 3892.82, "word": " تاني", "probability": 0.7652994791666666}, {"start": 3892.82, "end": 3894.42, "word": " بذردية،", "probability": 0.6946207682291666}, {"start": 3894.42, "end": 3895.62, "word": " ماشي؟", "probability": 0.7320556640625}, {"start": 3895.62, "end": 3895.92, "word": " هي", "probability": 0.86376953125}, {"start": 3895.92, "end": 3896.16, "word": " عبارة", "probability": 0.9505615234375}, {"start": 3896.16, "end": 3896.28, "word": " عن", "probability": 0.98193359375}, {"start": 3896.28, "end": 3897.0, "word": " contusion،", "probability": 0.8701171875}, {"start": 3897.0, "end": 3897.52, "word": " دغّة", "probability": 0.773681640625}, {"start": 3897.52, "end": 3898.44, "word": " بالعربي،", "probability": 0.94208984375}, {"start": 3898.44, "end": 3899.06, "word": " ماشي؟", "probability": 0.8677978515625}, {"start": 3899.06, "end": 3899.52, "word": " أكلت", "probability": 0.9021809895833334}, {"start": 3899.52, "end": 3899.92, "word": " إيه؟", "probability": 0.8919677734375}], "temperature": 1.0}, {"id": 140, "seek": 392954, "start": 3901.14, "end": 3929.54, "text": "الجلد ده .. فهي بتعمل hematoma ولا بتعمل ايه؟ بتعمل .. بتعمل hematoma فهذه عبارة عن ال contusion اللي فيه contusion with laceration ماشي؟ او trauma with laceration اللي بيصير فيها مزعج شايفين اللي بتتعاشى اللي حوالينا؟ كشف دي انا نفسي الكشف؟ انا صار فيه مزعج طبعا هذه big hematoma مانشوفاش في ال primary anesthesia مانشوفاش السكدة", "tokens": [6027, 7435, 1211, 3215, 11778, 3224, 4386, 6156, 3224, 1829, 39894, 25957, 1211, 8636, 267, 6440, 49429, 39894, 25957, 1211, 1975, 1829, 3224, 22807, 39894, 25957, 1211, 4386, 39894, 25957, 1211, 8636, 267, 6440, 6156, 3224, 24192, 6225, 3555, 9640, 3660, 18871, 2423, 660, 5704, 13672, 1829, 8978, 3224, 660, 5704, 365, 28027, 5053, 3714, 33599, 1829, 22807, 1975, 2407, 11407, 365, 28027, 5053, 13672, 1829, 4724, 1829, 9381, 13546, 8978, 11296, 3714, 11622, 3615, 7435, 13412, 995, 33911, 9957, 13672, 1829, 39894, 2655, 3615, 33599, 7578, 13672, 1829, 11331, 2407, 6027, 1829, 8315, 22807, 9122, 8592, 5172, 11778, 1829, 1975, 8315, 8717, 36178, 1829, 33251, 8592, 5172, 22807, 1975, 8315, 20328, 9640, 8978, 3224, 3714, 11622, 3615, 7435, 23032, 3555, 3615, 995, 29538, 955, 8636, 267, 6440, 19446, 1863, 8592, 38688, 33599, 8978, 2423, 6194, 31750, 42464, 19446, 1863, 8592, 38688, 33599, 21136, 4117, 41891], "avg_logprob": -0.3392857223951898, "compression_ratio": 1.9090909090909092, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 3901.1400000000003, "end": 3901.78, "word": "الجلد", "probability": 0.7430419921875}, {"start": 3901.78, "end": 3902.0, "word": " ده", "probability": 0.866455078125}, {"start": 3902.0, "end": 3902.34, "word": " ..", "probability": 0.2015380859375}, {"start": 3902.34, "end": 3902.56, "word": " فهي", "probability": 0.6869303385416666}, {"start": 3902.56, "end": 3902.86, "word": " بتعمل", "probability": 0.982421875}, {"start": 3902.86, "end": 3903.2, "word": " hematoma", "probability": 0.6663411458333334}, {"start": 3903.2, "end": 3903.32, "word": " ولا", "probability": 0.92822265625}, {"start": 3903.32, "end": 3903.62, "word": " بتعمل", "probability": 0.8863932291666666}, {"start": 3903.62, "end": 3903.94, "word": " ايه؟", "probability": 0.77020263671875}, {"start": 3903.94, "end": 3904.26, "word": " بتعمل", "probability": 0.93994140625}, {"start": 3904.26, "end": 3904.3, "word": " ..", "probability": 0.315185546875}, {"start": 3904.3, "end": 3904.78, "word": " بتعمل", "probability": 0.9248046875}, {"start": 3904.78, "end": 3905.1, "word": " hematoma", "probability": 0.9012044270833334}, {"start": 3905.1, "end": 3905.54, "word": " فهذه", "probability": 0.7389322916666666}, {"start": 3905.54, "end": 3905.8, "word": " عبارة", "probability": 0.9451904296875}, {"start": 3905.8, "end": 3905.92, "word": " عن", "probability": 0.9814453125}, {"start": 3905.92, "end": 3906.06, "word": " ال", "probability": 0.53173828125}, {"start": 3906.06, "end": 3906.28, "word": " contusion", "probability": 0.759033203125}, {"start": 3906.28, "end": 3906.84, "word": " اللي", "probability": 0.668212890625}, {"start": 3906.84, "end": 3907.04, "word": " فيه", "probability": 0.7939453125}, {"start": 3907.04, "end": 3907.6, "word": " contusion", "probability": 0.920654296875}, {"start": 3907.6, "end": 3908.5, "word": " with", "probability": 0.921875}, {"start": 3908.5, "end": 3909.68, "word": " laceration", "probability": 0.831298828125}, {"start": 3909.68, "end": 3911.08, "word": " ماشي؟", "probability": 0.68408203125}, {"start": 3911.08, "end": 3911.32, "word": " او", "probability": 0.823486328125}, {"start": 3911.32, "end": 3911.76, "word": " trauma", "probability": 0.99072265625}, {"start": 3911.76, "end": 3912.06, "word": " with", "probability": 0.93212890625}, {"start": 3912.06, "end": 3912.8, "word": " laceration", "probability": 0.98583984375}, {"start": 3912.8, "end": 3913.36, "word": " اللي", "probability": 0.9619140625}, {"start": 3913.36, "end": 3913.64, "word": " بيصير", "probability": 0.960205078125}, {"start": 3913.64, "end": 3913.86, "word": " فيها", "probability": 0.9912109375}, {"start": 3913.86, "end": 3914.3, "word": " مزعج", "probability": 0.8134765625}, {"start": 3914.3, "end": 3915.7, "word": " شايفين", "probability": 0.78326416015625}, {"start": 3915.7, "end": 3915.86, "word": " اللي", "probability": 0.934814453125}, {"start": 3915.86, "end": 3916.36, "word": " بتتعاشى", "probability": 0.6271484375}, {"start": 3916.36, "end": 3916.48, "word": " اللي", "probability": 0.883544921875}, {"start": 3916.48, "end": 3917.42, "word": " حوالينا؟", "probability": 0.8267415364583334}, {"start": 3917.42, "end": 3917.72, "word": " كشف", "probability": 0.3585611979166667}, {"start": 3917.72, "end": 3917.84, "word": " دي", "probability": 0.68505859375}, {"start": 3917.84, "end": 3917.96, "word": " انا", "probability": 0.20269775390625}, {"start": 3917.96, "end": 3918.2, "word": " نفسي", "probability": 0.702880859375}, {"start": 3918.2, "end": 3918.92, "word": " الكشف؟", "probability": 0.74505615234375}, {"start": 3918.92, "end": 3919.22, "word": " انا", "probability": 0.447021484375}, {"start": 3919.22, "end": 3919.44, "word": " صار", "probability": 0.79736328125}, {"start": 3919.44, "end": 3919.6, "word": " فيه", "probability": 0.723388671875}, {"start": 3919.6, "end": 3920.52, "word": " مزعج", "probability": 0.974365234375}, {"start": 3920.52, "end": 3923.9, "word": " طبعا", "probability": 0.986083984375}, {"start": 3923.9, "end": 3924.1, "word": " هذه", "probability": 0.505859375}, {"start": 3924.1, "end": 3924.48, "word": " big", "probability": 0.787109375}, {"start": 3924.48, "end": 3925.34, "word": " hematoma", "probability": 0.9630533854166666}, {"start": 3925.34, "end": 3926.5, "word": " مانشوفاش", "probability": 0.6869140625}, {"start": 3926.5, "end": 3926.62, "word": " في", "probability": 0.8955078125}, {"start": 3926.62, "end": 3926.68, "word": " ال", "probability": 0.9697265625}, {"start": 3926.68, "end": 3926.96, "word": " primary", "probability": 0.177490234375}, {"start": 3926.96, "end": 3927.62, "word": " anesthesia", "probability": 0.525177001953125}, {"start": 3927.62, "end": 3929.02, "word": " مانشوفاش", "probability": 0.83134765625}, {"start": 3929.02, "end": 3929.54, "word": " السكدة", "probability": 0.4619954427083333}], "temperature": 1.0}, {"id": 141, "seek": 396168, "start": 3932.36, "end": 3961.68, "text": "هذه عبارة عن بتاشي و Echomoses هذي بتاشي و هذي Echomoses شايفين كده ايش؟ صارت patch طبعا فى ترمبوسيتوكينى نفس الحكاية شباب شايفين؟ طبعا هذى شوفوا ايش .. هتصوروا الماء القاتى شايفين شباب؟", "tokens": [3224, 24192, 6225, 3555, 9640, 3660, 18871, 39894, 33599, 1829, 4032, 462, 339, 298, 4201, 8032, 8848, 1829, 39894, 33599, 1829, 4032, 8032, 8848, 1829, 462, 339, 298, 4201, 13412, 995, 33911, 9957, 9122, 3215, 3224, 1975, 1829, 8592, 22807, 20328, 9640, 2655, 9972, 23032, 3555, 3615, 995, 6156, 7578, 6055, 2288, 2304, 3555, 41779, 36081, 2407, 4117, 9957, 7578, 8717, 36178, 21542, 4117, 995, 10632, 13412, 3555, 16758, 13412, 995, 33911, 9957, 22807, 23032, 3555, 3615, 995, 8032, 8848, 7578, 13412, 38688, 14407, 1975, 1829, 8592, 4386, 8032, 2655, 9381, 13063, 14407, 9673, 16606, 25062, 9307, 7578, 13412, 995, 33911, 9957, 13412, 3555, 16758, 22807], "avg_logprob": -0.4483060881356213, "compression_ratio": 1.6844919786096257, "no_speech_prob": 0.0, "words": [{"start": 3932.36, "end": 3932.78, "word": "هذه", "probability": 0.841552734375}, {"start": 3932.78, "end": 3933.16, "word": " عبارة", "probability": 0.8533935546875}, {"start": 3933.16, "end": 3933.4, "word": " عن", "probability": 0.99365234375}, {"start": 3933.4, "end": 3934.06, "word": " بتاشي", "probability": 0.6785074869791666}, {"start": 3934.06, "end": 3934.36, "word": " و", "probability": 0.8681640625}, {"start": 3934.36, "end": 3935.24, "word": " Echomoses", "probability": 0.341461181640625}, {"start": 3935.24, "end": 3937.46, "word": " هذي", "probability": 0.3389485677083333}, {"start": 3937.46, "end": 3937.96, "word": " بتاشي", "probability": 0.9296875}, {"start": 3937.96, "end": 3939.26, "word": " و", "probability": 0.059173583984375}, {"start": 3939.26, "end": 3940.7, "word": " هذي", "probability": 0.7728678385416666}, {"start": 3940.7, "end": 3941.2, "word": " Echomoses", "probability": 0.7825927734375}, {"start": 3941.2, "end": 3941.52, "word": " شايفين", "probability": 0.69781494140625}, {"start": 3941.52, "end": 3941.88, "word": " كده", "probability": 0.781494140625}, {"start": 3941.88, "end": 3942.34, "word": " ايش؟", "probability": 0.61163330078125}, {"start": 3942.34, "end": 3942.62, "word": " صارت", "probability": 0.6378580729166666}, {"start": 3942.62, "end": 3942.92, "word": " patch", "probability": 0.458740234375}, {"start": 3942.92, "end": 3946.18, "word": " طبعا", "probability": 0.8897705078125}, {"start": 3946.18, "end": 3946.34, "word": " فى", "probability": 0.610595703125}, {"start": 3946.34, "end": 3947.5, "word": " ترمبوسيتوكينى", "probability": 0.47811279296875}, {"start": 3947.5, "end": 3947.98, "word": " نفس", "probability": 0.889892578125}, {"start": 3947.98, "end": 3948.36, "word": " الحكاية", "probability": 0.969482421875}, {"start": 3948.36, "end": 3948.76, "word": " شباب", "probability": 0.93359375}, {"start": 3948.76, "end": 3953.28, "word": " شايفين؟", "probability": 0.8060546875}, {"start": 3953.28, "end": 3955.74, "word": " طبعا", "probability": 0.85760498046875}, {"start": 3955.74, "end": 3956.04, "word": " هذى", "probability": 0.7115071614583334}, {"start": 3956.04, "end": 3956.8, "word": " شوفوا", "probability": 0.7410481770833334}, {"start": 3956.8, "end": 3956.94, "word": " ايش", "probability": 0.5547688802083334}, {"start": 3956.94, "end": 3957.5, "word": " ..", "probability": 0.1541748046875}, {"start": 3957.5, "end": 3958.28, "word": " هتصوروا", "probability": 0.90859375}, {"start": 3958.28, "end": 3958.72, "word": " الماء", "probability": 0.350830078125}, {"start": 3958.72, "end": 3960.48, "word": " القاتى", "probability": 0.709228515625}, {"start": 3960.48, "end": 3961.06, "word": " شايفين", "probability": 0.9542236328125}, {"start": 3961.06, "end": 3961.68, "word": " شباب؟", "probability": 0.9188232421875}], "temperature": 1.0}, {"id": 142, "seek": 399120, "start": 3962.08, "end": 3991.2, "text": "عبارة عن bleeding في الرقبة و الرقبة ديب ولا superficial؟ ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. ديب .. دي", "tokens": [3615, 3555, 9640, 3660, 18871, 19312, 8978, 34892, 4587, 49401, 4032, 34892, 4587, 49401, 11778, 1829, 3555, 49429, 1687, 69, 14730, 22807, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829, 3555, 4386, 11778, 1829], "avg_logprob": -0.06416666666666666, "compression_ratio": 6.566666666666666, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 3962.08, "end": 3962.52, "word": "عبارة", "probability": 0.912353515625}, {"start": 3962.52, "end": 3962.86, "word": " عن", "probability": 0.978515625}, {"start": 3962.86, "end": 3963.28, "word": " bleeding", "probability": 0.9189453125}, {"start": 3963.28, "end": 3964.56, "word": " في", "probability": 0.8486328125}, {"start": 3964.56, "end": 3965.04, "word": " الرقبة", "probability": 0.9000651041666666}, {"start": 3965.04, "end": 3967.36, "word": " و", "probability": 0.37646484375}, {"start": 3967.36, "end": 3967.66, "word": " الرقبة", "probability": 0.8916015625}, {"start": 3967.66, "end": 3967.9, "word": " ديب", "probability": 0.7515462239583334}, {"start": 3967.9, "end": 3968.06, "word": " ولا", "probability": 0.7490234375}, {"start": 3968.06, "end": 3969.08, "word": " superficial؟", "probability": 0.56622314453125}, {"start": 3969.08, "end": 3969.5, "word": " ديب", "probability": 0.9239908854166666}, {"start": 3969.5, "end": 3969.52, "word": " ..", "probability": 0.1356201171875}, {"start": 3969.52, "end": 3969.94, "word": " ديب", "probability": 0.8771158854166666}, {"start": 3969.94, "end": 3970.02, "word": " ..", "probability": 0.398681640625}, {"start": 3970.02, "end": 3970.16, "word": " ديب", "probability": 0.83154296875}, {"start": 3970.16, "end": 3970.34, "word": " ..", "probability": 0.3837890625}, {"start": 3970.34, "end": 3970.62, "word": " ديب", "probability": 0.8956705729166666}, {"start": 3970.62, "end": 3970.62, "word": " ..", "probability": 0.5986328125}, {"start": 3970.62, "end": 3970.66, "word": " ديب", "probability": 0.94873046875}, {"start": 3970.66, "end": 3971.06, "word": " ..", "probability": 0.828125}, {"start": 3971.06, "end": 3971.06, "word": " ديب", "probability": 0.9729817708333334}, {"start": 3971.06, "end": 3971.1, "word": " ..", "probability": 0.91796875}, {"start": 3971.1, "end": 3971.1, "word": " ديب", "probability": 0.9812825520833334}, {"start": 3971.1, "end": 3971.36, "word": " ..", "probability": 0.94677734375}, {"start": 3971.36, "end": 3971.36, "word": " ديب", "probability": 0.9840494791666666}, {"start": 3971.36, "end": 3971.56, "word": " ..", "probability": 0.95947265625}, {"start": 3971.56, "end": 3971.78, "word": " ديب", "probability": 0.9855143229166666}, {"start": 3971.78, "end": 3971.78, "word": " ..", "probability": 0.9658203125}, {"start": 3971.78, "end": 3971.88, "word": " ديب", "probability": 0.9869791666666666}, {"start": 3971.88, "end": 3971.9, "word": " ..", "probability": 0.96923828125}, {"start": 3971.9, "end": 3973.22, "word": " ديب", "probability": 0.9873046875}, {"start": 3973.22, "end": 3973.86, "word": " ..", "probability": 0.97216796875}, {"start": 3973.86, "end": 3973.86, "word": " ديب", "probability": 0.98779296875}, {"start": 3973.86, "end": 3973.92, "word": " ..", "probability": 0.9736328125}, {"start": 3973.92, "end": 3973.94, "word": " ديب", "probability": 0.9884440104166666}, {"start": 3973.94, "end": 3973.94, "word": " ..", "probability": 0.97509765625}, {"start": 3973.94, "end": 3974.0, "word": " ديب", "probability": 0.9890950520833334}, {"start": 3974.0, "end": 3974.08, "word": " ..", "probability": 0.9765625}, {"start": 3974.08, "end": 3974.08, "word": " ديب", "probability": 0.9895833333333334}, {"start": 3974.08, "end": 3974.1, "word": " ..", "probability": 0.97802734375}, {"start": 3974.1, "end": 3974.1, "word": " ديب", "probability": 0.9903971354166666}, {"start": 3974.1, "end": 3974.1, "word": " ..", "probability": 0.97900390625}, {"start": 3974.1, "end": 3974.1, "word": " ديب", "probability": 0.99072265625}, {"start": 3974.1, "end": 3974.1, "word": " ..", "probability": 0.98046875}, {"start": 3974.1, "end": 3974.1, "word": " ديب", "probability": 0.9912109375}, {"start": 3974.1, "end": 3974.1, "word": " ..", "probability": 0.9814453125}, {"start": 3974.1, "end": 3974.1, "word": " ديب", "probability": 0.99169921875}, {"start": 3974.1, "end": 3974.1, "word": " ..", "probability": 0.982421875}, {"start": 3974.1, "end": 3974.1, "word": " ديب", "probability": 0.9921875}, {"start": 3974.1, "end": 3974.1, "word": " ..", "probability": 0.98291015625}, {"start": 3974.1, "end": 3974.1, "word": " ديب", "probability": 0.9925130208333334}, {"start": 3974.1, "end": 3974.5, "word": " ..", "probability": 0.98388671875}, {"start": 3974.5, "end": 3975.42, "word": " ديب", "probability": 0.9928385416666666}, {"start": 3975.42, "end": 3975.42, "word": " ..", "probability": 0.9853515625}, {"start": 3975.42, "end": 3975.88, "word": " ديب", "probability": 0.9931640625}, {"start": 3975.88, "end": 3979.18, "word": " ..", "probability": 0.986328125}, {"start": 3979.18, "end": 3979.64, "word": " ديب", "probability": 0.9934895833333334}, {"start": 3979.64, "end": 3979.82, "word": " ..", "probability": 0.98681640625}, {"start": 3979.82, "end": 3981.66, "word": " ديب", "probability": 0.9939778645833334}, {"start": 3981.66, "end": 3981.68, "word": " ..", "probability": 0.98779296875}, {"start": 3981.68, "end": 3982.08, "word": " ديب", "probability": 0.9939778645833334}, {"start": 3982.08, "end": 3982.3, "word": " ..", "probability": 0.98876953125}, {"start": 3982.3, "end": 3983.06, "word": " ديب", "probability": 0.994140625}, {"start": 3983.06, "end": 3983.06, "word": " ..", "probability": 0.9892578125}, {"start": 3983.06, "end": 3983.06, "word": " ديب", "probability": 0.9944661458333334}, {"start": 3983.06, "end": 3983.06, "word": " ..", "probability": 0.98974609375}, {"start": 3983.06, "end": 3983.06, "word": " ديب", "probability": 0.9944661458333334}, {"start": 3983.06, "end": 3983.06, "word": " ..", "probability": 0.990234375}, {"start": 3983.06, "end": 3983.06, "word": " ديب", "probability": 0.99462890625}, {"start": 3983.06, "end": 3983.06, "word": " ..", "probability": 0.99072265625}, {"start": 3983.06, "end": 3983.06, "word": " ديب", "probability": 0.9947916666666666}, {"start": 3983.06, "end": 3983.06, "word": " ..", "probability": 0.9912109375}, {"start": 3983.06, "end": 3983.14, "word": " ديب", "probability": 0.9949544270833334}, {"start": 3983.14, "end": 3983.14, "word": " ..", "probability": 0.9912109375}, {"start": 3983.14, "end": 3985.7, "word": " ديب", "probability": 0.9949544270833334}, {"start": 3985.7, "end": 3985.98, "word": " ..", "probability": 0.99169921875}, {"start": 3985.98, "end": 3986.48, "word": " ديب", "probability": 0.9951171875}, {"start": 3986.48, "end": 3988.18, "word": " ..", "probability": 0.99169921875}, {"start": 3988.18, "end": 3988.18, "word": " ديب", "probability": 0.9952799479166666}, {"start": 3988.18, "end": 3988.18, "word": " ..", "probability": 0.9921875}, {"start": 3988.18, "end": 3988.18, "word": " ديب", "probability": 0.9951171875}, {"start": 3988.18, "end": 3988.18, "word": " ..", "probability": 0.9921875}, {"start": 3988.18, "end": 3988.18, "word": " ديب", "probability": 0.9951171875}, {"start": 3988.18, "end": 3988.18, "word": " ..", "probability": 0.9921875}, {"start": 3988.18, "end": 3988.18, "word": " ديب", "probability": 0.9952799479166666}, {"start": 3988.18, "end": 3988.18, "word": " ..", "probability": 0.99267578125}, {"start": 3988.18, "end": 3988.18, "word": " ديب", "probability": 0.9954427083333334}, {"start": 3988.18, "end": 3988.18, "word": " ..", "probability": 0.99267578125}, {"start": 3988.18, "end": 3988.18, "word": " ديب", "probability": 0.99560546875}, {"start": 3988.18, "end": 3988.18, "word": " ..", "probability": 0.9931640625}, {"start": 3988.18, "end": 3988.18, "word": " ديب", "probability": 0.9954427083333334}, {"start": 3988.18, "end": 3988.18, "word": " ..", "probability": 0.9931640625}, {"start": 3988.18, "end": 3988.18, "word": " ديب", "probability": 0.99560546875}, {"start": 3988.18, "end": 3988.18, "word": " ..", "probability": 0.9931640625}, {"start": 3988.18, "end": 3988.18, "word": " ديب", "probability": 0.9952799479166666}, {"start": 3988.18, "end": 3988.18, "word": " ..", "probability": 0.9931640625}, {"start": 3988.18, "end": 3988.18, "word": " ديب", "probability": 0.9954427083333334}, {"start": 3988.18, "end": 3988.18, "word": " ..", "probability": 0.99365234375}, {"start": 3988.18, "end": 3988.18, "word": " ديب", "probability": 0.9954427083333334}, {"start": 3988.18, "end": 3988.18, "word": " ..", "probability": 0.99365234375}, {"start": 3988.18, "end": 3988.18, "word": " ديب", "probability": 0.9954427083333334}, {"start": 3988.18, "end": 3988.18, "word": " ..", "probability": 0.99365234375}, {"start": 3988.18, "end": 3988.18, "word": " ديب", "probability": 0.9952799479166666}, {"start": 3988.18, "end": 3988.18, "word": " ..", "probability": 0.99365234375}, {"start": 3988.18, "end": 3988.18, "word": " ديب", "probability": 0.9954427083333334}, {"start": 3988.18, "end": 3988.18, "word": " ..", "probability": 0.99365234375}, {"start": 3988.18, "end": 3988.18, "word": " ديب", "probability": 0.9954427083333334}, {"start": 3988.18, "end": 3988.18, "word": " ..", "probability": 0.99365234375}, {"start": 3988.18, "end": 3988.28, "word": " ديب", "probability": 0.9952799479166666}, {"start": 3988.28, "end": 3988.28, "word": " ..", "probability": 0.99365234375}, {"start": 3988.28, "end": 3991.2, "word": " دي", "probability": 0.993408203125}], "temperature": 1.0}, {"id": 143, "seek": 400707, "start": 3991.95, "end": 4007.07, "text": "اللي subcontinent طيبها وهي هيمونيكي، لذيك، plated white، هدود شواشر، هتفكر كلش؟ طيب، اكتفي، مرة جاية", "tokens": [6027, 20292, 1422, 9000, 11058, 23032, 1829, 3555, 11296, 37037, 1829, 8032, 32640, 11536, 1829, 4117, 1829, 12399, 5296, 8848, 1829, 4117, 12399, 5924, 67, 2418, 12399, 8032, 3215, 2407, 3215, 13412, 2407, 33599, 2288, 12399, 8032, 2655, 5172, 37983, 28242, 8592, 22807, 23032, 1829, 3555, 12399, 1975, 4117, 2655, 41185, 12399, 3714, 25720, 10874, 995, 10632], "avg_logprob": -0.685344849167199, "compression_ratio": 1.251908396946565, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 3991.95, "end": 3992.33, "word": "اللي", "probability": 0.200714111328125}, {"start": 3992.33, "end": 3993.11, "word": " subcontinent", "probability": 0.4551595052083333}, {"start": 3993.11, "end": 3993.71, "word": " طيبها", "probability": 0.616943359375}, {"start": 3993.71, "end": 3995.05, "word": " وهي", "probability": 0.68603515625}, {"start": 3995.05, "end": 3996.25, "word": " هيمونيكي،", "probability": 0.41312081473214285}, {"start": 3996.25, "end": 3997.77, "word": " لذيك،", "probability": 0.62607421875}, {"start": 3997.77, "end": 3998.99, "word": " plated", "probability": 0.5772705078125}, {"start": 3998.99, "end": 3999.67, "word": " white،", "probability": 0.5103759765625}, {"start": 3999.67, "end": 4000.09, "word": " هدود", "probability": 0.380157470703125}, {"start": 4000.09, "end": 4001.43, "word": " شواشر،", "probability": 0.47080078125}, {"start": 4001.43, "end": 4002.15, "word": " هتفكر", "probability": 0.4940185546875}, {"start": 4002.15, "end": 4003.15, "word": " كلش؟", "probability": 0.8028971354166666}, {"start": 4003.15, "end": 4004.85, "word": " طيب،", "probability": 0.8963623046875}, {"start": 4004.85, "end": 4005.79, "word": " اكتفي،", "probability": 0.724853515625}, {"start": 4005.79, "end": 4006.81, "word": " مرة", "probability": 0.853271484375}, {"start": 4006.81, "end": 4007.07, "word": " جاية", "probability": 0.7544759114583334}], "temperature": 1.0}], "language": "ar", "language_probability": 1.0, "duration": 4008.34475, "duration_after_vad": 3783.726249999978} \ No newline at end of file diff --git a/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/XfuCeEmG_MY_raw.json b/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/XfuCeEmG_MY_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..80d5b8dadabe651c82afce8754557606e9e79c46 --- /dev/null +++ b/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/XfuCeEmG_MY_raw.json @@ -0,0 +1 @@ +{"segments": [{"id": 1, "seek": 4751, "start": 20.89, "end": 47.51, "text": "عظم الله مش طازج باسم الله الرحمن الرحيم طيب اليوم ان شاء الله هحكي في thrombotic disease or disorders و من الإسم يا شباب by definition they are disease that are inherited or acquired inherited or acquired بيتميزوا بوجود جلطة بتتكون في الوعاء قدامهم", "tokens": [3615, 19913, 2304, 21984, 37893, 23032, 31377, 7435, 4724, 32277, 2304, 21984, 34892, 5016, 27842, 34892, 5016, 32640, 23032, 1829, 3555, 45595, 20498, 16472, 13412, 16606, 21984, 8032, 5016, 4117, 1829, 8978, 739, 3548, 9411, 4752, 420, 20261, 4032, 9154, 33688, 38251, 35186, 13412, 3555, 16758, 538, 7123, 436, 366, 4752, 300, 366, 27091, 420, 17554, 27091, 420, 17554, 4724, 36081, 2304, 1829, 11622, 14407, 4724, 29245, 23328, 10874, 1211, 9566, 3660, 39894, 2655, 30544, 8978, 2423, 45367, 16606, 12174, 3215, 10943, 16095], "avg_logprob": -0.21447172636787096, "compression_ratio": 1.6181818181818182, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 20.89, "end": 21.29, "word": "عظم", "probability": 0.6631673177083334}, {"start": 21.29, "end": 21.39, "word": " الله", "probability": 0.92822265625}, {"start": 21.39, "end": 21.57, "word": " مش", "probability": 0.59619140625}, {"start": 21.57, "end": 21.85, "word": " طازج", "probability": 0.7190755208333334}, {"start": 21.85, "end": 22.19, "word": " باسم", "probability": 0.6056315104166666}, {"start": 22.19, "end": 22.37, "word": " الله", "probability": 0.95947265625}, {"start": 22.37, "end": 22.63, "word": " الرحمن", "probability": 0.9464518229166666}, {"start": 22.63, "end": 23.05, "word": " الرحيم", "probability": 0.98681640625}, {"start": 23.05, "end": 24.21, "word": " طيب", "probability": 0.7945963541666666}, {"start": 24.21, "end": 24.47, "word": " اليوم", "probability": 0.9580078125}, {"start": 24.47, "end": 24.61, "word": " ان", "probability": 0.677734375}, {"start": 24.61, "end": 24.75, "word": " شاء", "probability": 0.932373046875}, {"start": 24.75, "end": 24.77, "word": " الله", "probability": 0.94482421875}, {"start": 24.77, "end": 25.19, "word": " هحكي", "probability": 0.746826171875}, {"start": 25.19, "end": 25.33, "word": " في", "probability": 0.9130859375}, {"start": 25.33, "end": 25.95, "word": " thrombotic", "probability": 0.7516276041666666}, {"start": 25.95, "end": 26.49, "word": " disease", "probability": 0.71630859375}, {"start": 26.49, "end": 27.07, "word": " or", "probability": 0.34130859375}, {"start": 27.07, "end": 27.93, "word": " disorders", "probability": 0.96142578125}, {"start": 27.93, "end": 29.85, "word": " و", "probability": 0.94970703125}, {"start": 29.85, "end": 30.81, "word": " من", "probability": 0.60205078125}, {"start": 30.81, "end": 31.23, "word": " الإسم", "probability": 0.61669921875}, {"start": 31.23, "end": 31.49, "word": " يا", "probability": 0.449951171875}, {"start": 31.49, "end": 31.93, "word": " شباب", "probability": 0.9884440104166666}, {"start": 31.93, "end": 33.19, "word": " by", "probability": 0.77587890625}, {"start": 33.19, "end": 33.81, "word": " definition", "probability": 0.91845703125}, {"start": 33.81, "end": 35.45, "word": " they", "probability": 0.88134765625}, {"start": 35.45, "end": 35.77, "word": " are", "probability": 0.9638671875}, {"start": 35.77, "end": 36.59, "word": " disease", "probability": 0.493896484375}, {"start": 36.59, "end": 37.95, "word": " that", "probability": 0.93212890625}, {"start": 37.95, "end": 38.25, "word": " are", "probability": 0.7490234375}, {"start": 38.25, "end": 38.85, "word": " inherited", "probability": 0.98046875}, {"start": 38.85, "end": 39.49, "word": " or", "probability": 0.97412109375}, {"start": 39.49, "end": 40.37, "word": " acquired", "probability": 0.89404296875}, {"start": 40.37, "end": 42.65, "word": " inherited", "probability": 0.99169921875}, {"start": 42.65, "end": 43.17, "word": " or", "probability": 0.97412109375}, {"start": 43.17, "end": 43.75, "word": " acquired", "probability": 0.88818359375}, {"start": 43.75, "end": 44.63, "word": " بيتميزوا", "probability": 0.940673828125}, {"start": 44.63, "end": 45.05, "word": " بوجود", "probability": 0.9479166666666666}, {"start": 45.05, "end": 45.59, "word": " جلطة", "probability": 0.97119140625}, {"start": 45.59, "end": 46.49, "word": " بتتكون", "probability": 0.87158203125}, {"start": 46.49, "end": 46.67, "word": " في", "probability": 0.900390625}, {"start": 46.67, "end": 47.03, "word": " الوعاء", "probability": 0.7928059895833334}, {"start": 47.03, "end": 47.51, "word": " قدامهم", "probability": 0.8619384765625}], "temperature": 1.0}, {"id": 2, "seek": 7992, "start": 57.7, "end": 79.92, "text": "أو partially obstructed يعني إغلاق جزهي وقد تنفصل جزء من هذه الجلطة وينزل الدولة الدموية إلى مكان آخر ويسبب جلطة في مكان آخر ويسبب جلطة في مكان آخر يسمون هذه الظاهرة", "tokens": [10721, 2407, 18886, 45579, 292, 37495, 22653, 11933, 17082, 15040, 4587, 10874, 11622, 3224, 1829, 4032, 28543, 6055, 1863, 5172, 36520, 10874, 11622, 38207, 9154, 29538, 25724, 1211, 9566, 3660, 4032, 9957, 11622, 1211, 32748, 12610, 3660, 32748, 2304, 2407, 10632, 30731, 3714, 41361, 19753, 34740, 4032, 1829, 35457, 3555, 10874, 1211, 9566, 3660, 8978, 3714, 41361, 19753, 34740, 4032, 1829, 35457, 3555, 10874, 1211, 9566, 3660, 8978, 3714, 41361, 19753, 34740, 7251, 38251, 11536, 29538, 6024, 116, 40294, 25720], "avg_logprob": -0.15972222810910072, "compression_ratio": 1.7345679012345678, "no_speech_prob": 2.2649765014648438e-06, "words": [{"start": 57.7, "end": 58.74, "word": "أو", "probability": 0.52374267578125}, {"start": 58.74, "end": 59.78, "word": " partially", "probability": 0.68505859375}, {"start": 59.78, "end": 61.62, "word": " obstructed", "probability": 0.8427734375}, {"start": 61.62, "end": 62.5, "word": " يعني", "probability": 0.7646484375}, {"start": 62.5, "end": 63.58, "word": " إغلاق", "probability": 0.86480712890625}, {"start": 63.58, "end": 64.56, "word": " جزهي", "probability": 0.904296875}, {"start": 64.56, "end": 65.76, "word": " وقد", "probability": 0.768310546875}, {"start": 65.76, "end": 66.44, "word": " تنفصل", "probability": 0.940185546875}, {"start": 66.44, "end": 67.14, "word": " جزء", "probability": 0.9912109375}, {"start": 67.14, "end": 67.38, "word": " من", "probability": 0.99267578125}, {"start": 67.38, "end": 67.82, "word": " هذه", "probability": 0.9404296875}, {"start": 67.82, "end": 68.62, "word": " الجلطة", "probability": 0.9832763671875}, {"start": 68.62, "end": 71.58, "word": " وينزل", "probability": 0.94189453125}, {"start": 71.58, "end": 72.82, "word": " الدولة", "probability": 0.758544921875}, {"start": 72.82, "end": 73.28, "word": " الدموية", "probability": 0.7830810546875}, {"start": 73.28, "end": 73.58, "word": " إلى", "probability": 0.9345703125}, {"start": 73.58, "end": 73.98, "word": " مكان", "probability": 0.9853515625}, {"start": 73.98, "end": 74.56, "word": " آخر", "probability": 0.916015625}, {"start": 74.56, "end": 75.08, "word": " ويسبب", "probability": 0.965576171875}, {"start": 75.08, "end": 75.42, "word": " جلطة", "probability": 0.9930419921875}, {"start": 75.42, "end": 75.58, "word": " في", "probability": 0.9853515625}, {"start": 75.58, "end": 75.94, "word": " مكان", "probability": 0.9873046875}, {"start": 75.94, "end": 76.4, "word": " آخر", "probability": 0.971435546875}, {"start": 76.4, "end": 76.92, "word": " ويسبب", "probability": 0.9281005859375}, {"start": 76.92, "end": 77.44, "word": " جلطة", "probability": 0.9915771484375}, {"start": 77.44, "end": 77.96, "word": " في", "probability": 0.986328125}, {"start": 77.96, "end": 78.28, "word": " مكان", "probability": 0.989990234375}, {"start": 78.28, "end": 78.64, "word": " آخر", "probability": 0.989990234375}, {"start": 78.64, "end": 79.06, "word": " يسمون", "probability": 0.638916015625}, {"start": 79.06, "end": 79.36, "word": " هذه", "probability": 0.970703125}, {"start": 79.36, "end": 79.92, "word": " الظاهرة", "probability": 0.922607421875}], "temperature": 1.0}, {"id": 3, "seek": 10982, "start": 83.82, "end": 109.82, "text": "impolism و .. يعني by definition thromboimpolism هي عبارة عن أمراض بتتكون من خلالها أو فيها جلطة في مكان ما من الأواياء الدموية thrombus, clot is a thrombus اسم الآخر للجلطة لclot هي عبارة عن ايه؟ thrombus", "tokens": [332, 12892, 1434, 4032, 4386, 37495, 22653, 538, 7123, 739, 3548, 78, 332, 12892, 1434, 39896, 6225, 3555, 9640, 3660, 18871, 5551, 29973, 46958, 39894, 2655, 30544, 9154, 16490, 1211, 6027, 11296, 34051, 8978, 11296, 10874, 1211, 9566, 3660, 8978, 3714, 41361, 19446, 9154, 16247, 14407, 1829, 16606, 32748, 2304, 2407, 10632, 739, 3548, 301, 11, 48587, 307, 257, 739, 3548, 301, 24525, 2304, 6024, 95, 34740, 24976, 7435, 1211, 9566, 3660, 5296, 3474, 310, 39896, 6225, 3555, 9640, 3660, 18871, 1975, 1829, 3224, 22807, 739, 3548, 301], "avg_logprob": -0.2591292241985878, "compression_ratio": 1.5759162303664922, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 83.82, "end": 84.82, "word": "impolism", "probability": 0.4169514973958333}, {"start": 84.82, "end": 85.82, "word": " و", "probability": 0.353759765625}, {"start": 85.82, "end": 87.3, "word": " ..", "probability": 0.350830078125}, {"start": 87.3, "end": 91.6, "word": " يعني", "probability": 0.87646484375}, {"start": 91.6, "end": 91.8, "word": " by", "probability": 0.40869140625}, {"start": 91.8, "end": 92.2, "word": " definition", "probability": 0.69384765625}, {"start": 92.2, "end": 93.54, "word": " thromboimpolism", "probability": 0.649017333984375}, {"start": 93.54, "end": 93.76, "word": " هي", "probability": 0.89111328125}, {"start": 93.76, "end": 93.98, "word": " عبارة", "probability": 0.9610595703125}, {"start": 93.98, "end": 94.16, "word": " عن", "probability": 0.99609375}, {"start": 94.16, "end": 94.54, "word": " أمراض", "probability": 0.8522135416666666}, {"start": 94.54, "end": 95.08, "word": " بتتكون", "probability": 0.8846028645833334}, {"start": 95.08, "end": 95.32, "word": " من", "probability": 0.751953125}, {"start": 95.32, "end": 95.76, "word": " خلالها", "probability": 0.9952392578125}, {"start": 95.76, "end": 95.92, "word": " أو", "probability": 0.8798828125}, {"start": 95.92, "end": 96.2, "word": " فيها", "probability": 0.99072265625}, {"start": 96.2, "end": 96.86, "word": " جلطة", "probability": 0.9859619140625}, {"start": 96.86, "end": 97.62, "word": " في", "probability": 0.9619140625}, {"start": 97.62, "end": 98.56, "word": " مكان", "probability": 0.992431640625}, {"start": 98.56, "end": 98.88, "word": " ما", "probability": 0.8515625}, {"start": 98.88, "end": 99.34, "word": " من", "probability": 0.99365234375}, {"start": 99.34, "end": 99.88, "word": " الأواياء", "probability": 0.64727783203125}, {"start": 99.88, "end": 100.52, "word": " الدموية", "probability": 0.943115234375}, {"start": 100.52, "end": 101.64, "word": " thrombus,", "probability": 0.7455240885416666}, {"start": 101.78, "end": 102.04, "word": " clot", "probability": 0.6708984375}, {"start": 102.04, "end": 102.38, "word": " is", "probability": 0.86767578125}, {"start": 102.38, "end": 102.56, "word": " a", "probability": 0.96728515625}, {"start": 102.56, "end": 103.28, "word": " thrombus", "probability": 0.9705403645833334}, {"start": 103.28, "end": 104.32, "word": " اسم", "probability": 0.77490234375}, {"start": 104.32, "end": 105.18, "word": " الآخر", "probability": 0.9454752604166666}, {"start": 105.18, "end": 106.96, "word": " للجلطة", "probability": 0.9568359375}, {"start": 106.96, "end": 107.58, "word": " لclot", "probability": 0.685546875}, {"start": 107.58, "end": 108.22, "word": " هي", "probability": 0.9443359375}, {"start": 108.22, "end": 108.48, "word": " عبارة", "probability": 0.9921875}, {"start": 108.48, "end": 108.7, "word": " عن", "probability": 0.97119140625}, {"start": 108.7, "end": 109.16, "word": " ايه؟", "probability": 0.75396728515625}, {"start": 109.16, "end": 109.82, "word": " thrombus", "probability": 0.9246419270833334}], "temperature": 1.0}, {"id": 4, "seek": 13811, "start": 111.15, "end": 138.11, "text": "الذي يمكن أن يتكوّن في الوعاء الدموطبان كان artery or vein مصطلح الثرومبوس ريبايتس هو عبارة عن جلطة مصحوبة بinflammation وبعض الجلطات زي ما قلت لكم ممكن تكون superficial وطراب العين المجردة أشهرها هو ال DVT deep venous thrombosis اللي قد يؤدي إلى", "tokens": [6027, 8848, 1829, 7251, 43020, 14739, 7251, 2655, 4117, 2407, 11703, 1863, 8978, 2423, 45367, 16606, 32748, 2304, 2407, 9566, 3555, 7649, 25961, 38520, 420, 30669, 3714, 9381, 9566, 1211, 5016, 6024, 104, 2288, 20498, 3555, 41779, 12602, 1829, 3555, 995, 36081, 3794, 31439, 6225, 3555, 9640, 3660, 18871, 10874, 1211, 9566, 3660, 3714, 9381, 5016, 37746, 3660, 4724, 45684, 399, 46599, 3615, 11242, 25724, 1211, 9566, 9307, 30767, 1829, 19446, 12174, 1211, 2655, 5296, 24793, 3714, 43020, 6055, 30544, 34622, 4032, 9566, 2288, 16758, 18863, 9957, 9673, 7435, 2288, 41891, 5551, 8592, 3224, 2288, 11296, 31439, 2423, 17021, 51, 2452, 6138, 563, 739, 3548, 8211, 13672, 1829, 12174, 3215, 7251, 33604, 16254, 30731], "avg_logprob": -0.2932065258855405, "compression_ratio": 1.4848484848484849, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 111.15, "end": 111.39, "word": "الذي", "probability": 0.552490234375}, {"start": 111.39, "end": 111.59, "word": " يمكن", "probability": 0.7646484375}, {"start": 111.59, "end": 111.69, "word": " أن", "probability": 0.6640625}, {"start": 111.69, "end": 112.05, "word": " يتكوّن", "probability": 0.7746175130208334}, {"start": 112.05, "end": 112.17, "word": " في", "probability": 0.94091796875}, {"start": 112.17, "end": 112.61, "word": " الوعاء", "probability": 0.8087565104166666}, {"start": 112.61, "end": 113.27, "word": " الدموطبان", "probability": 0.49530029296875}, {"start": 113.27, "end": 113.49, "word": " كان", "probability": 0.483642578125}, {"start": 113.49, "end": 113.83, "word": " artery", "probability": 0.06689453125}, {"start": 113.83, "end": 115.89, "word": " or", "probability": 0.58642578125}, {"start": 115.89, "end": 116.51, "word": " vein", "probability": 0.88427734375}, {"start": 116.51, "end": 118.11, "word": " مصطلح", "probability": 0.836474609375}, {"start": 118.11, "end": 118.61, "word": " الثرومبوس", "probability": 0.6938832600911459}, {"start": 118.61, "end": 119.49, "word": " ريبايتس", "probability": 0.7565714518229166}, {"start": 119.49, "end": 119.71, "word": " هو", "probability": 0.79638671875}, {"start": 119.71, "end": 120.07, "word": " عبارة", "probability": 0.977783203125}, {"start": 120.07, "end": 120.85, "word": " عن", "probability": 0.99267578125}, {"start": 120.85, "end": 121.53, "word": " جلطة", "probability": 0.969482421875}, {"start": 121.53, "end": 122.45, "word": " مصحوبة", "probability": 0.756201171875}, {"start": 122.45, "end": 123.25, "word": " بinflammation", "probability": 0.7132975260416666}, {"start": 123.25, "end": 125.25, "word": " وبعض", "probability": 0.8292643229166666}, {"start": 125.25, "end": 125.67, "word": " الجلطات", "probability": 0.977783203125}, {"start": 125.67, "end": 125.85, "word": " زي", "probability": 0.666259765625}, {"start": 125.85, "end": 125.89, "word": " ما", "probability": 0.9521484375}, {"start": 125.89, "end": 126.09, "word": " قلت", "probability": 0.9314778645833334}, {"start": 126.09, "end": 126.27, "word": " لكم", "probability": 0.78076171875}, {"start": 126.27, "end": 126.53, "word": " ممكن", "probability": 0.927490234375}, {"start": 126.53, "end": 126.89, "word": " تكون", "probability": 0.98583984375}, {"start": 126.89, "end": 128.45, "word": " superficial", "probability": 0.720703125}, {"start": 128.45, "end": 130.51, "word": " وطراب", "probability": 0.6949462890625}, {"start": 130.51, "end": 130.89, "word": " العين", "probability": 0.986572265625}, {"start": 130.89, "end": 131.81, "word": " المجردة", "probability": 0.980224609375}, {"start": 131.81, "end": 133.09, "word": " أشهرها", "probability": 0.97431640625}, {"start": 133.09, "end": 133.33, "word": " هو", "probability": 0.880859375}, {"start": 133.33, "end": 133.47, "word": " ال", "probability": 0.7275390625}, {"start": 133.47, "end": 134.07, "word": " DVT", "probability": 0.690673828125}, {"start": 134.07, "end": 134.55, "word": " deep", "probability": 0.4248046875}, {"start": 134.55, "end": 135.03, "word": " venous", "probability": 0.881103515625}, {"start": 135.03, "end": 135.85, "word": " thrombosis", "probability": 0.8937174479166666}, {"start": 135.85, "end": 136.99, "word": " اللي", "probability": 0.77197265625}, {"start": 136.99, "end": 137.27, "word": " قد", "probability": 0.9658203125}, {"start": 137.27, "end": 137.71, "word": " يؤدي", "probability": 0.9622395833333334}, {"start": 137.71, "end": 138.11, "word": " إلى", "probability": 0.83984375}], "temperature": 1.0}, {"id": 5, "seek": 16873, "start": 139.47, "end": 168.73, "text": "embolism أو جلطة في الـ lung جلطة في الـ lung deep venous thrombosis deep venous thrombosis ماشي قسموا هذه الأمراض إلى قسمين كانوا إما familial أو non-familial أو تقسيمة تانية هي physiological أو", "tokens": [443, 17460, 1434, 34051, 10874, 1211, 9566, 3660, 8978, 2423, 39184, 16730, 10874, 1211, 9566, 3660, 8978, 2423, 39184, 16730, 2452, 6138, 563, 739, 3548, 8211, 2452, 6138, 563, 739, 3548, 8211, 3714, 33599, 1829, 12174, 38251, 14407, 29538, 16247, 29973, 46958, 30731, 12174, 38251, 9957, 25961, 14407, 11933, 15042, 4085, 831, 34051, 2107, 12, 69, 335, 388, 831, 34051, 6055, 4587, 3794, 32640, 3660, 6055, 7649, 10632, 39896, 41234, 34051], "avg_logprob": -0.213758681797319, "compression_ratio": 1.5730994152046784, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 139.47, "end": 140.39, "word": "embolism", "probability": 0.719970703125}, {"start": 140.39, "end": 140.69, "word": " أو", "probability": 0.671875}, {"start": 140.69, "end": 141.15, "word": " جلطة", "probability": 0.9754638671875}, {"start": 141.15, "end": 141.31, "word": " في", "probability": 0.9453125}, {"start": 141.31, "end": 141.81, "word": " الـ", "probability": 0.564453125}, {"start": 141.81, "end": 142.47, "word": " lung", "probability": 0.64111328125}, {"start": 142.47, "end": 143.41, "word": " جلطة", "probability": 0.9112548828125}, {"start": 143.41, "end": 143.97, "word": " في", "probability": 0.96484375}, {"start": 143.97, "end": 144.13, "word": " الـ", "probability": 0.674072265625}, {"start": 144.13, "end": 144.41, "word": " lung", "probability": 0.90283203125}, {"start": 144.41, "end": 146.49, "word": " deep", "probability": 0.259765625}, {"start": 146.49, "end": 147.59, "word": " venous", "probability": 0.913818359375}, {"start": 147.59, "end": 149.09, "word": " thrombosis", "probability": 0.9773763020833334}, {"start": 149.09, "end": 151.27, "word": " deep", "probability": 0.371826171875}, {"start": 151.27, "end": 151.91, "word": " venous", "probability": 0.962890625}, {"start": 151.91, "end": 152.89, "word": " thrombosis", "probability": 0.98095703125}, {"start": 152.89, "end": 156.99, "word": " ماشي", "probability": 0.8673502604166666}, {"start": 156.99, "end": 157.51, "word": " قسموا", "probability": 0.9344075520833334}, {"start": 157.51, "end": 157.73, "word": " هذه", "probability": 0.958984375}, {"start": 157.73, "end": 158.25, "word": " الأمراض", "probability": 0.9908854166666666}, {"start": 158.25, "end": 158.59, "word": " إلى", "probability": 0.93212890625}, {"start": 158.59, "end": 159.59, "word": " قسمين", "probability": 0.9856770833333334}, {"start": 159.59, "end": 160.05, "word": " كانوا", "probability": 0.56085205078125}, {"start": 160.05, "end": 160.25, "word": " إما", "probability": 0.72802734375}, {"start": 160.25, "end": 161.07, "word": " familial", "probability": 0.860107421875}, {"start": 161.07, "end": 161.67, "word": " أو", "probability": 0.8837890625}, {"start": 161.67, "end": 162.01, "word": " non", "probability": 0.90771484375}, {"start": 162.01, "end": 162.79, "word": "-familial", "probability": 0.75634765625}, {"start": 162.79, "end": 164.49, "word": " أو", "probability": 0.93115234375}, {"start": 164.49, "end": 165.17, "word": " تقسيمة", "probability": 0.7541015625}, {"start": 165.17, "end": 165.45, "word": " تانية", "probability": 0.9874674479166666}, {"start": 165.45, "end": 165.67, "word": " هي", "probability": 0.83154296875}, {"start": 165.67, "end": 166.29, "word": " physiological", "probability": 0.9833984375}, {"start": 166.29, "end": 168.73, "word": " أو", "probability": 0.83837890625}], "temperature": 1.0}, {"id": 6, "seek": 19938, "start": 170.42, "end": 199.38, "text": "الـ non-familial ممكن تكون physiological أو pathological physiological أو pathological و الآن هنشوف هذه التقسيمة طبعا يعني هذه من أحلى التقسيمات اللي موجودة زى ما انتوا شايفين هي الجزء الفوغانى هو عبارة عن inherited disorders و بيندلش تحتى أمثلة كثيرة منها factor V Leiden سمعتوا هى؟ لأ شرحته أنا بس هنشتغل التفاصيل تجلجوش ثم Prothrombin", "tokens": [6027, 39184, 2107, 12, 69, 335, 388, 831, 3714, 43020, 6055, 30544, 41234, 34051, 3100, 4383, 41234, 34051, 3100, 4383, 4032, 6024, 48506, 8032, 1863, 8592, 38688, 29538, 16712, 4587, 3794, 32640, 3660, 23032, 3555, 3615, 995, 37495, 22653, 29538, 9154, 5551, 5016, 23942, 16712, 4587, 3794, 32640, 9307, 13672, 1829, 3714, 29245, 23328, 3660, 30767, 7578, 19446, 16472, 2655, 14407, 13412, 995, 33911, 9957, 39896, 25724, 11622, 38207, 27188, 2407, 17082, 7649, 7578, 31439, 6225, 3555, 9640, 3660, 18871, 27091, 20261, 4032, 49374, 3215, 1211, 8592, 6055, 33753, 7578, 5551, 2304, 12984, 37977, 9122, 12984, 48923, 9154, 11296, 5952, 691, 1456, 4380, 8608, 2304, 34268, 14407, 8032, 7578, 22807, 5296, 10721, 13412, 2288, 33753, 3224, 41850, 4724, 3794, 8032, 1863, 8592, 2655, 17082, 1211, 16712, 5172, 33546, 26895, 6055, 7435, 1211, 7435, 2407, 8592, 38637, 2304, 2114, 900, 81, 3548, 259], "avg_logprob": -0.26333040999365853, "compression_ratio": 1.6277602523659307, "no_speech_prob": 2.7418136596679688e-06, "words": [{"start": 170.42, "end": 171.0, "word": "الـ", "probability": 0.75732421875}, {"start": 171.0, "end": 171.3, "word": " non", "probability": 0.312255859375}, {"start": 171.3, "end": 171.9, "word": "-familial", "probability": 0.6576171875}, {"start": 171.9, "end": 172.2, "word": " ممكن", "probability": 0.898193359375}, {"start": 172.2, "end": 172.44, "word": " تكون", "probability": 0.94482421875}, {"start": 172.44, "end": 172.94, "word": " physiological", "probability": 0.974609375}, {"start": 172.94, "end": 173.6, "word": " أو", "probability": 0.79052734375}, {"start": 173.6, "end": 174.48, "word": " pathological", "probability": 0.98046875}, {"start": 174.48, "end": 174.96, "word": " physiological", "probability": 0.83251953125}, {"start": 174.96, "end": 176.48, "word": " أو", "probability": 0.72119140625}, {"start": 176.48, "end": 177.32, "word": " pathological", "probability": 0.978515625}, {"start": 177.32, "end": 177.5, "word": " و", "probability": 0.291015625}, {"start": 177.5, "end": 177.68, "word": " الآن", "probability": 0.728515625}, {"start": 177.68, "end": 178.58, "word": " هنشوف", "probability": 0.9503173828125}, {"start": 178.58, "end": 178.84, "word": " هذه", "probability": 0.92626953125}, {"start": 178.84, "end": 179.42, "word": " التقسيمة", "probability": 0.920703125}, {"start": 179.42, "end": 179.7, "word": " طبعا", "probability": 0.975341796875}, {"start": 179.7, "end": 180.66, "word": " يعني", "probability": 0.763671875}, {"start": 180.66, "end": 180.94, "word": " هذه", "probability": 0.6875}, {"start": 180.94, "end": 181.14, "word": " من", "probability": 0.9853515625}, {"start": 181.14, "end": 181.42, "word": " أحلى", "probability": 0.94580078125}, {"start": 181.42, "end": 182.02, "word": " التقسيمات", "probability": 0.982421875}, {"start": 182.02, "end": 182.18, "word": " اللي", "probability": 0.8642578125}, {"start": 182.18, "end": 182.74, "word": " موجودة", "probability": 0.9691162109375}, {"start": 182.74, "end": 183.2, "word": " زى", "probability": 0.73828125}, {"start": 183.2, "end": 183.26, "word": " ما", "probability": 0.88720703125}, {"start": 183.26, "end": 183.42, "word": " انتوا", "probability": 0.7672526041666666}, {"start": 183.42, "end": 183.84, "word": " شايفين", "probability": 0.9876708984375}, {"start": 183.84, "end": 184.02, "word": " هي", "probability": 0.4873046875}, {"start": 184.02, "end": 184.34, "word": " الجزء", "probability": 0.9728190104166666}, {"start": 184.34, "end": 185.02, "word": " الفوغانى", "probability": 0.74326171875}, {"start": 185.02, "end": 185.22, "word": " هو", "probability": 0.8486328125}, {"start": 185.22, "end": 185.48, "word": " عبارة", "probability": 0.9940185546875}, {"start": 185.48, "end": 185.66, "word": " عن", "probability": 0.9921875}, {"start": 185.66, "end": 186.08, "word": " inherited", "probability": 0.98974609375}, {"start": 186.08, "end": 186.92, "word": " disorders", "probability": 0.98486328125}, {"start": 186.92, "end": 187.76, "word": " و", "probability": 0.95654296875}, {"start": 187.76, "end": 188.74, "word": " بيندلش", "probability": 0.712890625}, {"start": 188.74, "end": 189.48, "word": " تحتى", "probability": 0.8600260416666666}, {"start": 189.48, "end": 189.98, "word": " أمثلة", "probability": 0.96337890625}, {"start": 189.98, "end": 190.54, "word": " كثيرة", "probability": 0.9658203125}, {"start": 190.54, "end": 191.28, "word": " منها", "probability": 0.965576171875}, {"start": 191.28, "end": 191.58, "word": " factor", "probability": 0.806640625}, {"start": 191.58, "end": 191.94, "word": " V", "probability": 0.257568359375}, {"start": 191.94, "end": 192.48, "word": " Leiden", "probability": 0.6624755859375}, {"start": 192.48, "end": 193.38, "word": " سمعتوا", "probability": 0.814697265625}, {"start": 193.38, "end": 194.36, "word": " هى؟", "probability": 0.5875651041666666}, {"start": 194.36, "end": 195.56, "word": " لأ", "probability": 0.770751953125}, {"start": 195.56, "end": 196.02, "word": " شرحته", "probability": 0.9571533203125}, {"start": 196.02, "end": 196.2, "word": " أنا", "probability": 0.63671875}, {"start": 196.2, "end": 196.38, "word": " بس", "probability": 0.946044921875}, {"start": 196.38, "end": 196.8, "word": " هنشتغل", "probability": 0.7633056640625}, {"start": 196.8, "end": 197.34, "word": " التفاصيل", "probability": 0.61865234375}, {"start": 197.34, "end": 198.14, "word": " تجلجوش", "probability": 0.5296834309895834}, {"start": 198.14, "end": 198.6, "word": " ثم", "probability": 0.9697265625}, {"start": 198.6, "end": 199.38, "word": " Prothrombin", "probability": 0.52734375}], "temperature": 1.0}, {"id": 7, "seek": 22644, "start": 202.48, "end": 226.44, "text": "20210A هو رقم الـ mutation اللي حدث فيها ثم protein C deficiency هي protein C protein S deficiency antithrombin deficiency and hyperhomocystinemia وكلها inherited disorders inherited disorders طبعا كلها بتؤدي لthroposis", "tokens": [2009, 17, 3279, 32, 31439, 12602, 4587, 2304, 2423, 39184, 27960, 13672, 1829, 11331, 3215, 12984, 8978, 11296, 38637, 2304, 7944, 383, 37500, 39896, 7944, 383, 7944, 318, 37500, 2511, 355, 81, 3548, 259, 37500, 293, 9848, 71, 298, 31078, 372, 259, 14058, 4032, 28820, 11296, 27091, 20261, 27091, 20261, 23032, 3555, 3615, 995, 28242, 11296, 39894, 33604, 16254, 5296, 14222, 8211], "avg_logprob": -0.34126983843152486, "compression_ratio": 1.458100558659218, "no_speech_prob": 0.0, "words": [{"start": 202.48, "end": 203.64, "word": "20210A", "probability": 0.5611572265625}, {"start": 203.64, "end": 204.22, "word": " هو", "probability": 0.646484375}, {"start": 204.22, "end": 204.8, "word": " رقم", "probability": 0.9602864583333334}, {"start": 204.8, "end": 204.92, "word": " الـ", "probability": 0.5296630859375}, {"start": 204.92, "end": 205.46, "word": " mutation", "probability": 0.7900390625}, {"start": 205.46, "end": 206.44, "word": " اللي", "probability": 0.688720703125}, {"start": 206.44, "end": 206.8, "word": " حدث", "probability": 0.9895833333333334}, {"start": 206.8, "end": 207.3, "word": " فيها", "probability": 0.972412109375}, {"start": 207.3, "end": 207.8, "word": " ثم", "probability": 0.599853515625}, {"start": 207.8, "end": 208.14, "word": " protein", "probability": 0.6650390625}, {"start": 208.14, "end": 208.4, "word": " C", "probability": 0.81103515625}, {"start": 208.4, "end": 209.18, "word": " deficiency", "probability": 0.92822265625}, {"start": 209.18, "end": 210.46, "word": " هي", "probability": 0.609375}, {"start": 210.46, "end": 210.82, "word": " protein", "probability": 0.81884765625}, {"start": 210.82, "end": 211.28, "word": " C", "probability": 0.9404296875}, {"start": 211.28, "end": 211.72, "word": " protein", "probability": 0.5791015625}, {"start": 211.72, "end": 211.98, "word": " S", "probability": 0.5830078125}, {"start": 211.98, "end": 212.8, "word": " deficiency", "probability": 0.966796875}, {"start": 212.8, "end": 214.16, "word": " antithrombin", "probability": 0.6890869140625}, {"start": 214.16, "end": 215.4, "word": " deficiency", "probability": 0.9580078125}, {"start": 215.4, "end": 216.3, "word": " and", "probability": 0.662109375}, {"start": 216.3, "end": 217.58, "word": " hyperhomocystinemia", "probability": 0.6548200334821429}, {"start": 217.58, "end": 219.02, "word": " وكلها", "probability": 0.8216145833333334}, {"start": 219.02, "end": 219.6, "word": " inherited", "probability": 0.96435546875}, {"start": 219.6, "end": 220.5, "word": " disorders", "probability": 0.95849609375}, {"start": 220.5, "end": 220.96, "word": " inherited", "probability": 0.3046875}, {"start": 220.96, "end": 221.98, "word": " disorders", "probability": 0.97119140625}, {"start": 221.98, "end": 223.7, "word": " طبعا", "probability": 0.920166015625}, {"start": 223.7, "end": 224.2, "word": " كلها", "probability": 0.96435546875}, {"start": 224.2, "end": 224.92, "word": " بتؤدي", "probability": 0.93798828125}, {"start": 224.92, "end": 226.44, "word": " لthroposis", "probability": 0.6787923177083334}], "temperature": 1.0}, {"id": 8, "seek": 24917, "start": 228.47, "end": 249.17, "text": "ثم ال acquired diseases او protobiotic stimulus منها ال antiphospholipid syndrome or antibodies أكيد سمعتوا فيه، antiphospholipid antibody، طيب ماشي مش أفضل، ايوة، ثم مالك نانسي", "tokens": [12984, 2304, 2423, 17554, 11044, 1975, 2407, 1742, 996, 6471, 299, 21366, 9154, 11296, 2423, 2511, 24595, 16378, 401, 647, 327, 19371, 420, 28356, 5551, 4117, 25708, 8608, 2304, 34268, 14407, 8978, 3224, 12399, 2511, 24595, 16378, 401, 647, 327, 34507, 12399, 23032, 1829, 3555, 3714, 33599, 1829, 37893, 5551, 5172, 11242, 1211, 12399, 1975, 1829, 2407, 3660, 12399, 38637, 2304, 3714, 6027, 4117, 8717, 7649, 3794, 1829], "avg_logprob": -0.34420290373373724, "compression_ratio": 1.3604651162790697, "no_speech_prob": 0.0, "words": [{"start": 228.47, "end": 228.75, "word": "ثم", "probability": 0.60919189453125}, {"start": 228.75, "end": 228.87, "word": " ال", "probability": 0.54833984375}, {"start": 228.87, "end": 229.29, "word": " acquired", "probability": 0.43994140625}, {"start": 229.29, "end": 230.51, "word": " diseases", "probability": 0.94091796875}, {"start": 230.51, "end": 231.33, "word": " او", "probability": 0.71044921875}, {"start": 231.33, "end": 232.19, "word": " protobiotic", "probability": 0.580413818359375}, {"start": 232.19, "end": 232.91, "word": " stimulus", "probability": 0.80517578125}, {"start": 232.91, "end": 233.99, "word": " منها", "probability": 0.9072265625}, {"start": 233.99, "end": 234.45, "word": " ال", "probability": 0.85546875}, {"start": 234.45, "end": 235.63, "word": " antiphospholipid", "probability": 0.8824869791666666}, {"start": 235.63, "end": 236.31, "word": " syndrome", "probability": 0.697265625}, {"start": 236.31, "end": 236.85, "word": " or", "probability": 0.464599609375}, {"start": 236.85, "end": 237.95, "word": " antibodies", "probability": 0.8701171875}, {"start": 237.95, "end": 239.23, "word": " أكيد", "probability": 0.8216145833333334}, {"start": 239.23, "end": 239.67, "word": " سمعتوا", "probability": 0.8001708984375}, {"start": 239.67, "end": 241.49, "word": " فيه،", "probability": 0.69140625}, {"start": 241.49, "end": 242.69, "word": " antiphospholipid", "probability": 0.9388020833333334}, {"start": 242.69, "end": 244.17, "word": " antibody،", "probability": 0.63134765625}, {"start": 244.17, "end": 244.57, "word": " طيب", "probability": 0.8372395833333334}, {"start": 244.57, "end": 244.99, "word": " ماشي", "probability": 0.90966796875}, {"start": 244.99, "end": 245.57, "word": " مش", "probability": 0.806640625}, {"start": 245.57, "end": 247.01, "word": " أفضل،", "probability": 0.5344970703125}, {"start": 247.01, "end": 248.05, "word": " ايوة،", "probability": 0.78916015625}, {"start": 248.05, "end": 248.25, "word": " ثم", "probability": 0.97119140625}, {"start": 248.25, "end": 248.59, "word": " مالك", "probability": 0.5592447916666666}, {"start": 248.59, "end": 249.17, "word": " نانسي", "probability": 0.8905029296875}], "temperature": 1.0}, {"id": 9, "seek": 27858, "start": 250.98, "end": 278.58, "text": "و Immobilization و Surgery و Pregnancy و Estrogen from a hepalin-induced thrombocytopenia كلهم عبارة عن inducer or stimulus للثروبوسس كلهم يودّي إلى ال thrombosis ان شاء الله هنتطرخ لكل لأنثى كثيرة من كلا النوعين كلا النوعين", "tokens": [2407, 17322, 21725, 2144, 4032, 6732, 7337, 4032, 430, 3375, 77, 6717, 4032, 4410, 7747, 490, 257, 415, 79, 24861, 12, 471, 41209, 739, 3548, 905, 4328, 15752, 654, 28242, 16095, 6225, 3555, 9640, 3660, 18871, 13716, 1776, 420, 21366, 24976, 12984, 32887, 3555, 41779, 3794, 28242, 16095, 7251, 23328, 11703, 1829, 30731, 2423, 739, 3548, 8211, 16472, 13412, 16606, 21984, 8032, 29399, 9566, 2288, 9778, 5296, 28820, 5296, 33456, 12984, 7578, 9122, 12984, 48923, 9154, 9122, 15040, 28239, 45367, 9957, 9122, 15040, 28239, 45367, 9957], "avg_logprob": -0.30387932267682305, "compression_ratio": 1.3866666666666667, "no_speech_prob": 0.0, "words": [{"start": 250.98, "end": 251.34, "word": "و", "probability": 0.8359375}, {"start": 251.34, "end": 252.7, "word": " Immobilization", "probability": 0.6726888020833334}, {"start": 252.7, "end": 253.02, "word": " و", "probability": 0.96142578125}, {"start": 253.02, "end": 253.74, "word": " Surgery", "probability": 0.8798828125}, {"start": 253.74, "end": 254.1, "word": " و", "probability": 0.99462890625}, {"start": 254.1, "end": 254.98, "word": " Pregnancy", "probability": 0.898681640625}, {"start": 254.98, "end": 255.3, "word": " و", "probability": 0.97998046875}, {"start": 255.3, "end": 256.08, "word": " Estrogen", "probability": 0.8486328125}, {"start": 256.08, "end": 256.92, "word": " from", "probability": 0.151123046875}, {"start": 256.92, "end": 257.04, "word": " a", "probability": 0.83837890625}, {"start": 257.04, "end": 257.46, "word": " hepalin", "probability": 0.41357421875}, {"start": 257.46, "end": 257.9, "word": "-induced", "probability": 0.7035319010416666}, {"start": 257.9, "end": 258.84, "word": " thrombocytopenia", "probability": 0.8821207682291666}, {"start": 258.84, "end": 259.32, "word": " كلهم", "probability": 0.8984375}, {"start": 259.32, "end": 259.74, "word": " عبارة", "probability": 0.99609375}, {"start": 259.74, "end": 260.22, "word": " عن", "probability": 0.99462890625}, {"start": 260.22, "end": 262.9, "word": " inducer", "probability": 0.817626953125}, {"start": 262.9, "end": 263.34, "word": " or", "probability": 0.57666015625}, {"start": 263.34, "end": 265.6, "word": " stimulus", "probability": 0.9365234375}, {"start": 265.6, "end": 267.26, "word": " للثروبوسس", "probability": 0.6583658854166666}, {"start": 267.26, "end": 268.14, "word": " كلهم", "probability": 0.822265625}, {"start": 268.14, "end": 269.56, "word": " يودّي", "probability": 0.57305908203125}, {"start": 269.56, "end": 270.56, "word": " إلى", "probability": 0.52734375}, {"start": 270.56, "end": 270.8, "word": " ال", "probability": 0.68359375}, {"start": 270.8, "end": 271.4, "word": " thrombosis", "probability": 0.708251953125}, {"start": 271.4, "end": 271.9, "word": " ان", "probability": 0.7568359375}, {"start": 271.9, "end": 272.1, "word": " شاء", "probability": 0.9560546875}, {"start": 272.1, "end": 272.16, "word": " الله", "probability": 0.95556640625}, {"start": 272.16, "end": 272.92, "word": " هنتطرخ", "probability": 0.7890625}, {"start": 272.92, "end": 273.4, "word": " لكل", "probability": 0.896240234375}, {"start": 273.4, "end": 274.74, "word": " لأنثى", "probability": 0.69488525390625}, {"start": 274.74, "end": 275.28, "word": " كثيرة", "probability": 0.7361653645833334}, {"start": 275.28, "end": 275.52, "word": " من", "probability": 0.77685546875}, {"start": 275.52, "end": 276.1, "word": " كلا", "probability": 0.81689453125}, {"start": 276.1, "end": 277.24, "word": " النوعين", "probability": 0.9609375}, {"start": 277.24, "end": 277.88, "word": " كلا", "probability": 0.857666015625}, {"start": 277.88, "end": 278.58, "word": " النوعين", "probability": 0.98291015625}], "temperature": 1.0}, {"id": 10, "seek": 30843, "start": 279.53, "end": 308.43, "text": "طبعا بدي أذكركم في الاخ virtue سمعناها اظن بالتفصيل الممل في محاضرة .. المحاضرة الأولى، مظبوط؟ لما بدينا نشرح ال private and external and exogenous، مظبوط؟ طبعا هذا بيشرح أسباب تكوين الجلطة، مظبوط؟ أسباب تكوين الجلطة، طبعا أي خلل فيه", "tokens": [9566, 3555, 3615, 995, 4724, 16254, 5551, 8848, 37983, 24793, 8978, 2423, 47283, 20816, 8608, 2304, 3615, 8315, 11296, 1975, 19913, 1863, 20666, 2655, 5172, 9381, 26895, 9673, 42213, 8978, 3714, 5016, 46958, 25720, 4386, 9673, 5016, 46958, 25720, 16247, 12610, 7578, 12399, 3714, 19913, 3555, 2407, 9566, 22807, 5296, 15042, 47525, 1829, 8315, 8717, 46309, 5016, 2423, 4551, 293, 1279, 1248, 304, 293, 454, 45519, 12399, 3714, 19913, 3555, 2407, 9566, 22807, 23032, 3555, 3615, 995, 23758, 4724, 1829, 46309, 5016, 5551, 35457, 16758, 6055, 4117, 2407, 9957, 25724, 1211, 9566, 3660, 12399, 3714, 19913, 3555, 2407, 9566, 22807, 5551, 35457, 16758, 6055, 4117, 2407, 9957, 25724, 1211, 9566, 3660, 12399, 23032, 3555, 3615, 995, 36632, 16490, 1211, 1211, 8978, 3224], "avg_logprob": -0.19893292246795283, "compression_ratio": 1.760180995475113, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 279.53, "end": 279.85, "word": "طبعا", "probability": 0.974365234375}, {"start": 279.85, "end": 279.99, "word": " بدي", "probability": 0.3963623046875}, {"start": 279.99, "end": 280.55, "word": " أذكركم", "probability": 0.8499755859375}, {"start": 280.55, "end": 280.69, "word": " في", "probability": 0.947265625}, {"start": 280.69, "end": 282.15, "word": " الاخ", "probability": 0.4556884765625}, {"start": 282.15, "end": 284.03, "word": " virtue", "probability": 0.40673828125}, {"start": 284.03, "end": 284.87, "word": " سمعناها", "probability": 0.94560546875}, {"start": 284.87, "end": 285.09, "word": " اظن", "probability": 0.7494303385416666}, {"start": 285.09, "end": 285.67, "word": " بالتفصيل", "probability": 0.94775390625}, {"start": 285.67, "end": 286.11, "word": " الممل", "probability": 0.874267578125}, {"start": 286.11, "end": 286.81, "word": " في", "probability": 0.6806640625}, {"start": 286.81, "end": 287.41, "word": " محاضرة", "probability": 0.78887939453125}, {"start": 287.41, "end": 287.67, "word": " ..", "probability": 0.5576171875}, {"start": 287.67, "end": 288.39, "word": " المحاضرة", "probability": 0.919189453125}, {"start": 288.39, "end": 289.71, "word": " الأولى،", "probability": 0.8531494140625}, {"start": 289.71, "end": 290.81, "word": " مظبوط؟", "probability": 0.86669921875}, {"start": 290.81, "end": 291.05, "word": " لما", "probability": 0.878173828125}, {"start": 291.05, "end": 291.49, "word": " بدينا", "probability": 0.857421875}, {"start": 291.49, "end": 291.85, "word": " نشرح", "probability": 0.9801432291666666}, {"start": 291.85, "end": 291.99, "word": " ال", "probability": 0.9814453125}, {"start": 291.99, "end": 292.29, "word": " private", "probability": 0.1534423828125}, {"start": 292.29, "end": 292.49, "word": " and", "probability": 0.5283203125}, {"start": 292.49, "end": 295.69, "word": " external", "probability": 0.5777994791666666}, {"start": 295.69, "end": 295.69, "word": " and", "probability": 0.183837890625}, {"start": 295.69, "end": 297.49, "word": " exogenous،", "probability": 0.6742350260416666}, {"start": 297.49, "end": 299.19, "word": " مظبوط؟", "probability": 0.9436848958333334}, {"start": 299.19, "end": 300.51, "word": " طبعا", "probability": 0.92333984375}, {"start": 300.51, "end": 300.75, "word": " هذا", "probability": 0.86083984375}, {"start": 300.75, "end": 302.77, "word": " بيشرح", "probability": 0.873291015625}, {"start": 302.77, "end": 303.27, "word": " أسباب", "probability": 0.92724609375}, {"start": 303.27, "end": 303.69, "word": " تكوين", "probability": 0.9632568359375}, {"start": 303.69, "end": 304.43, "word": " الجلطة،", "probability": 0.923046875}, {"start": 304.43, "end": 305.51, "word": " مظبوط؟", "probability": 0.968994140625}, {"start": 305.51, "end": 306.11, "word": " أسباب", "probability": 0.9763997395833334}, {"start": 306.11, "end": 306.59, "word": " تكوين", "probability": 0.983642578125}, {"start": 306.59, "end": 307.25, "word": " الجلطة،", "probability": 0.9283203125}, {"start": 307.25, "end": 307.39, "word": " طبعا", "probability": 0.958740234375}, {"start": 307.39, "end": 307.57, "word": " أي", "probability": 0.82177734375}, {"start": 307.57, "end": 307.91, "word": " خلل", "probability": 0.9930013020833334}, {"start": 307.91, "end": 308.43, "word": " فيه", "probability": 0.916015625}], "temperature": 1.0}, {"id": 11, "seek": 32947, "start": 308.57, "end": 329.47, "text": "الأواعي الدموية أو في مكوناتها أو في محتوها الخارجي أو الداخلي بيودي لجلده من ال clinical condition which is associated with thrombotic disease ال immobility جلة الحركة الشباب دي كلها عبارة عن risk factor", "tokens": [6027, 10721, 14407, 3615, 1829, 32748, 2304, 2407, 10632, 34051, 8978, 3714, 4117, 2407, 8315, 2655, 11296, 34051, 8978, 3714, 33753, 2407, 11296, 33962, 9640, 7435, 1829, 34051, 32748, 47283, 20292, 4724, 1829, 2407, 16254, 5296, 7435, 1211, 3215, 3224, 9154, 2423, 9115, 4188, 597, 307, 6615, 365, 739, 3548, 9411, 4752, 2423, 3397, 996, 1140, 10874, 37977, 21542, 31747, 3660, 25124, 3555, 16758, 11778, 1829, 28242, 11296, 6225, 3555, 9640, 3660, 18871, 3148, 5952], "avg_logprob": -0.2504111806813039, "compression_ratio": 1.5, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 308.57, "end": 309.17, "word": "الأواعي", "probability": 0.5609619140625}, {"start": 309.17, "end": 309.79, "word": " الدموية", "probability": 0.8885498046875}, {"start": 309.79, "end": 310.49, "word": " أو", "probability": 0.86328125}, {"start": 310.49, "end": 310.69, "word": " في", "probability": 0.91943359375}, {"start": 310.69, "end": 311.97, "word": " مكوناتها", "probability": 0.8984375}, {"start": 311.97, "end": 312.63, "word": " أو", "probability": 0.87451171875}, {"start": 312.63, "end": 312.93, "word": " في", "probability": 0.953125}, {"start": 312.93, "end": 313.63, "word": " محتوها", "probability": 0.873779296875}, {"start": 313.63, "end": 314.37, "word": " الخارجي", "probability": 0.9295654296875}, {"start": 314.37, "end": 314.49, "word": " أو", "probability": 0.814453125}, {"start": 314.49, "end": 314.95, "word": " الداخلي", "probability": 0.9127604166666666}, {"start": 314.95, "end": 315.35, "word": " بيودي", "probability": 0.6876220703125}, {"start": 315.35, "end": 316.91, "word": " لجلده", "probability": 0.527001953125}, {"start": 316.91, "end": 320.17, "word": " من", "probability": 0.77978515625}, {"start": 320.17, "end": 320.79, "word": " ال", "probability": 0.97314453125}, {"start": 320.79, "end": 321.31, "word": " clinical", "probability": 0.7685546875}, {"start": 321.31, "end": 323.07, "word": " condition", "probability": 0.83837890625}, {"start": 323.07, "end": 323.35, "word": " which", "probability": 0.8291015625}, {"start": 323.35, "end": 323.67, "word": " is", "probability": 0.92138671875}, {"start": 323.67, "end": 324.29, "word": " associated", "probability": 0.921875}, {"start": 324.29, "end": 324.53, "word": " with", "probability": 0.779296875}, {"start": 324.53, "end": 324.97, "word": " thrombotic", "probability": 0.7916666666666666}, {"start": 324.97, "end": 325.47, "word": " disease", "probability": 0.94091796875}, {"start": 325.47, "end": 326.31, "word": " ال", "probability": 0.548828125}, {"start": 326.31, "end": 327.07, "word": " immobility", "probability": 0.8603515625}, {"start": 327.07, "end": 327.37, "word": " جلة", "probability": 0.475341796875}, {"start": 327.37, "end": 327.83, "word": " الحركة", "probability": 0.9700520833333334}, {"start": 327.83, "end": 328.15, "word": " الشباب", "probability": 0.9554036458333334}, {"start": 328.15, "end": 328.33, "word": " دي", "probability": 0.565185546875}, {"start": 328.33, "end": 328.53, "word": " كلها", "probability": 0.88916015625}, {"start": 328.53, "end": 328.69, "word": " عبارة", "probability": 0.9705810546875}, {"start": 328.69, "end": 328.77, "word": " عن", "probability": 0.97509765625}, {"start": 328.77, "end": 329.01, "word": " risk", "probability": 0.94873046875}, {"start": 329.01, "end": 329.47, "word": " factor", "probability": 0.87548828125}], "temperature": 1.0}, {"id": 12, "seek": 36374, "start": 338.12, "end": 363.74, "text": "smoking ثم cancer ماليجنانسي ماليجنانسي and estrogen therapy كلها عبارة عن stimulus لا إيش لا thrombus formation thrombus formation كانوا أنواع جلطات هي أنواعين إما بتحدث في ال artery و إما في ال vein يسموها arterial thrombosis و venous thrombosis", "tokens": [10817, 5953, 38637, 2304, 5592, 3714, 6027, 1829, 7435, 1863, 7649, 3794, 1829, 3714, 6027, 1829, 7435, 1863, 7649, 3794, 1829, 293, 44754, 9492, 28242, 11296, 6225, 3555, 9640, 3660, 18871, 21366, 20193, 11933, 1829, 8592, 20193, 739, 3548, 301, 11723, 739, 3548, 301, 11723, 25961, 14407, 14739, 14407, 3615, 10874, 1211, 9566, 9307, 39896, 14739, 14407, 3615, 9957, 11933, 15042, 39894, 24401, 12984, 8978, 2423, 38520, 4032, 11933, 15042, 8978, 2423, 30669, 7251, 38251, 2407, 11296, 30455, 831, 739, 3548, 8211, 4032, 6138, 563, 739, 3548, 8211], "avg_logprob": -0.3304073087285074, "compression_ratio": 1.6407766990291262, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 338.12000000000006, "end": 339.14000000000004, "word": "smoking", "probability": 0.5623779296875}, {"start": 339.14000000000004, "end": 340.16, "word": " ثم", "probability": 0.91259765625}, {"start": 340.16, "end": 340.84, "word": " cancer", "probability": 0.79638671875}, {"start": 340.84, "end": 341.9, "word": " ماليجنانسي", "probability": 0.6243438720703125}, {"start": 341.9, "end": 343.02, "word": " ماليجنانسي", "probability": 0.916778564453125}, {"start": 343.02, "end": 343.14, "word": " and", "probability": 0.2364501953125}, {"start": 343.14, "end": 343.46, "word": " estrogen", "probability": 0.6318359375}, {"start": 343.46, "end": 343.82, "word": " therapy", "probability": 0.341064453125}, {"start": 343.82, "end": 344.22, "word": " كلها", "probability": 0.8662109375}, {"start": 344.22, "end": 344.5, "word": " عبارة", "probability": 0.82379150390625}, {"start": 344.5, "end": 344.76, "word": " عن", "probability": 0.99267578125}, {"start": 344.76, "end": 345.66, "word": " stimulus", "probability": 0.87353515625}, {"start": 345.66, "end": 346.52, "word": " لا", "probability": 0.427734375}, {"start": 346.52, "end": 346.9, "word": " إيش", "probability": 0.6737060546875}, {"start": 346.9, "end": 347.12, "word": " لا", "probability": 0.08758544921875}, {"start": 347.12, "end": 347.68, "word": " thrombus", "probability": 0.7861328125}, {"start": 347.68, "end": 348.52, "word": " formation", "probability": 0.91064453125}, {"start": 348.52, "end": 349.96, "word": " thrombus", "probability": 0.72314453125}, {"start": 349.96, "end": 350.7, "word": " formation", "probability": 0.97314453125}, {"start": 350.7, "end": 353.86, "word": " كانوا", "probability": 0.6650390625}, {"start": 353.86, "end": 354.16, "word": " أنواع", "probability": 0.9440104166666666}, {"start": 354.16, "end": 354.72, "word": " جلطات", "probability": 0.9166259765625}, {"start": 354.72, "end": 354.88, "word": " هي", "probability": 0.4599609375}, {"start": 354.88, "end": 355.34, "word": " أنواعين", "probability": 0.78369140625}, {"start": 355.34, "end": 355.64, "word": " إما", "probability": 0.79931640625}, {"start": 355.64, "end": 356.46, "word": " بتحدث", "probability": 0.9060872395833334}, {"start": 356.46, "end": 356.58, "word": " في", "probability": 0.96435546875}, {"start": 356.58, "end": 356.68, "word": " ال", "probability": 0.75732421875}, {"start": 356.68, "end": 357.06, "word": " artery", "probability": 0.21728515625}, {"start": 357.06, "end": 357.34, "word": " و", "probability": 0.916015625}, {"start": 357.34, "end": 357.62, "word": " إما", "probability": 0.70654296875}, {"start": 357.62, "end": 357.94, "word": " في", "probability": 0.96826171875}, {"start": 357.94, "end": 358.06, "word": " ال", "probability": 0.90625}, {"start": 358.06, "end": 358.44, "word": " vein", "probability": 0.8466796875}, {"start": 358.44, "end": 359.36, "word": " يسموها", "probability": 0.888427734375}, {"start": 359.36, "end": 360.06, "word": " arterial", "probability": 0.8515625}, {"start": 360.06, "end": 360.9, "word": " thrombosis", "probability": 0.9723307291666666}, {"start": 360.9, "end": 362.0, "word": " و", "probability": 0.98876953125}, {"start": 362.0, "end": 362.78, "word": " venous", "probability": 0.730224609375}, {"start": 362.78, "end": 363.74, "word": " thrombosis", "probability": 0.9755859375}], "temperature": 1.0}, {"id": 13, "seek": 39216, "start": 365.98, "end": 392.16, "text": "و جالوا ان الـ arterial thrombosis جلطة اللى بتتكون فى الأرض هي برا white thrombus والاسم جاي لإن مكوناتها بيضاء اللون تتكون من ال platelet تتكون من أيهاش؟ من ال platelet ماشى؟ و طبعا ال platelet مرتبط بإيهاش؟", "tokens": [2407, 10874, 6027, 14407, 16472, 2423, 39184, 30455, 831, 739, 3548, 8211, 10874, 1211, 9566, 3660, 13672, 7578, 39894, 2655, 30544, 6156, 7578, 16247, 43042, 39896, 4724, 23557, 2418, 739, 3548, 301, 16070, 32277, 2304, 10874, 47302, 5296, 28814, 1863, 3714, 4117, 2407, 8315, 2655, 11296, 4724, 1829, 11242, 16606, 13672, 11536, 6055, 2655, 30544, 9154, 2423, 3403, 15966, 6055, 2655, 30544, 9154, 36632, 3224, 33599, 22807, 9154, 2423, 3403, 15966, 3714, 33599, 7578, 22807, 4032, 23032, 3555, 3615, 995, 2423, 3403, 15966, 3714, 43500, 3555, 9566, 4724, 28814, 1829, 3224, 33599, 22807], "avg_logprob": -0.2709441559111818, "compression_ratio": 1.7248677248677249, "no_speech_prob": 0.0, "words": [{"start": 365.98, "end": 366.28, "word": "و", "probability": 0.7236328125}, {"start": 366.28, "end": 366.92, "word": " جالوا", "probability": 0.5126546223958334}, {"start": 366.92, "end": 367.08, "word": " ان", "probability": 0.452392578125}, {"start": 367.08, "end": 367.34, "word": " الـ", "probability": 0.62744140625}, {"start": 367.34, "end": 368.04, "word": " arterial", "probability": 0.7161865234375}, {"start": 368.04, "end": 369.02, "word": " thrombosis", "probability": 0.9239908854166666}, {"start": 369.02, "end": 370.08, "word": " جلطة", "probability": 0.907470703125}, {"start": 370.08, "end": 370.18, "word": " اللى", "probability": 0.718505859375}, {"start": 370.18, "end": 370.64, "word": " بتتكون", "probability": 0.7890625}, {"start": 370.64, "end": 370.86, "word": " فى", "probability": 0.852294921875}, {"start": 370.86, "end": 371.22, "word": " الأرض", "probability": 0.928955078125}, {"start": 371.22, "end": 371.48, "word": " هي", "probability": 0.62109375}, {"start": 371.48, "end": 371.68, "word": " برا", "probability": 0.4683837890625}, {"start": 371.68, "end": 372.1, "word": " white", "probability": 0.205810546875}, {"start": 372.1, "end": 372.76, "word": " thrombus", "probability": 0.6929524739583334}, {"start": 372.76, "end": 374.14, "word": " والاسم", "probability": 0.7689615885416666}, {"start": 374.14, "end": 374.72, "word": " جاي", "probability": 0.756103515625}, {"start": 374.72, "end": 375.78, "word": " لإن", "probability": 0.9347330729166666}, {"start": 375.78, "end": 377.22, "word": " مكوناتها", "probability": 0.894775390625}, {"start": 377.22, "end": 378.52, "word": " بيضاء", "probability": 0.955078125}, {"start": 378.52, "end": 379.02, "word": " اللون", "probability": 0.827392578125}, {"start": 379.02, "end": 380.7, "word": " تتكون", "probability": 0.8719075520833334}, {"start": 380.7, "end": 380.92, "word": " من", "probability": 0.98193359375}, {"start": 380.92, "end": 381.02, "word": " ال", "probability": 0.869140625}, {"start": 381.02, "end": 381.6, "word": " platelet", "probability": 0.5416259765625}, {"start": 381.6, "end": 382.42, "word": " تتكون", "probability": 0.8063151041666666}, {"start": 382.42, "end": 382.64, "word": " من", "probability": 0.97802734375}, {"start": 382.64, "end": 383.62, "word": " أيهاش؟", "probability": 0.6651611328125}, {"start": 383.62, "end": 383.9, "word": " من", "probability": 0.97998046875}, {"start": 383.9, "end": 384.04, "word": " ال", "probability": 0.9482421875}, {"start": 384.04, "end": 384.68, "word": " platelet", "probability": 0.905029296875}, {"start": 384.68, "end": 387.34, "word": " ماشى؟", "probability": 0.7086181640625}, {"start": 387.34, "end": 387.46, "word": " و", "probability": 0.90869140625}, {"start": 387.46, "end": 387.84, "word": " طبعا", "probability": 0.9520263671875}, {"start": 387.84, "end": 387.94, "word": " ال", "probability": 0.875}, {"start": 387.94, "end": 388.48, "word": " platelet", "probability": 0.913330078125}, {"start": 388.48, "end": 390.56, "word": " مرتبط", "probability": 0.9876708984375}, {"start": 390.56, "end": 392.16, "word": " بإيهاش؟", "probability": 0.8510335286458334}], "temperature": 1.0}, {"id": 14, "seek": 42162, "start": 392.8, "end": 421.62, "text": "بالـ Collagen تتنشط و تخرجت، هتشوفوا الـ Mechanism الـ Venus Thrombosis بيعتبروا الجلطة Fibrin Based يعني أساس الجلطة الـ Coagulation Factor اتنشطت و كونت Fibrin و الـ Fibrin كون جلطة ماشي و الجلطة في هذا الحالة بيدخل في تركيب الـ RBCs مما يؤتي إلى", "tokens": [3555, 6027, 39184, 4586, 4698, 6055, 2655, 1863, 8592, 9566, 4032, 6055, 34740, 7435, 2655, 12399, 8032, 2655, 8592, 38688, 14407, 2423, 39184, 30175, 1434, 2423, 39184, 23994, 41645, 3548, 8211, 4724, 1829, 34268, 26890, 14407, 25724, 1211, 9566, 3660, 479, 6414, 259, 18785, 37495, 22653, 5551, 3794, 32277, 25724, 1211, 9566, 3660, 2423, 39184, 3066, 559, 2776, 479, 15104, 1975, 2655, 1863, 8592, 9566, 2655, 4032, 9122, 11536, 2655, 479, 6414, 259, 4032, 2423, 39184, 479, 6414, 259, 9122, 11536, 10874, 1211, 9566, 3660, 3714, 33599, 1829, 4032, 25724, 1211, 9566, 3660, 8978, 23758, 21542, 6027, 3660, 4724, 25708, 9778, 1211, 8978, 6055, 31747, 1829, 3555, 2423, 39184, 497, 7869, 82, 3714, 15042, 7251, 33604, 31371, 30731], "avg_logprob": -0.2799369762925541, "compression_ratio": 1.597457627118644, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 392.8, "end": 393.3, "word": "بالـ", "probability": 0.697265625}, {"start": 393.3, "end": 393.96, "word": " Collagen", "probability": 0.700927734375}, {"start": 393.96, "end": 395.06, "word": " تتنشط", "probability": 0.883203125}, {"start": 395.06, "end": 395.2, "word": " و", "probability": 0.65771484375}, {"start": 395.2, "end": 395.84, "word": " تخرجت،", "probability": 0.48984375}, {"start": 395.84, "end": 396.18, "word": " هتشوفوا", "probability": 0.84873046875}, {"start": 396.18, "end": 396.28, "word": " الـ", "probability": 0.719482421875}, {"start": 396.28, "end": 396.68, "word": " Mechanism", "probability": 0.5904541015625}, {"start": 396.68, "end": 398.52, "word": " الـ", "probability": 0.82373046875}, {"start": 398.52, "end": 398.86, "word": " Venus", "probability": 0.50390625}, {"start": 398.86, "end": 399.9, "word": " Thrombosis", "probability": 0.7212727864583334}, {"start": 399.9, "end": 403.4, "word": " بيعتبروا", "probability": 0.82265625}, {"start": 403.4, "end": 403.82, "word": " الجلطة", "probability": 0.845703125}, {"start": 403.82, "end": 404.26, "word": " Fibrin", "probability": 0.6542154947916666}, {"start": 404.26, "end": 404.6, "word": " Based", "probability": 0.302001953125}, {"start": 404.6, "end": 404.98, "word": " يعني", "probability": 0.837646484375}, {"start": 404.98, "end": 405.74, "word": " أساس", "probability": 0.8865559895833334}, {"start": 405.74, "end": 407.62, "word": " الجلطة", "probability": 0.84320068359375}, {"start": 407.62, "end": 407.76, "word": " الـ", "probability": 0.408447265625}, {"start": 407.76, "end": 408.28, "word": " Coagulation", "probability": 0.8972981770833334}, {"start": 408.28, "end": 408.76, "word": " Factor", "probability": 0.72119140625}, {"start": 408.76, "end": 409.6, "word": " اتنشطت", "probability": 0.9312337239583334}, {"start": 409.6, "end": 410.12, "word": " و", "probability": 0.947265625}, {"start": 410.12, "end": 410.58, "word": " كونت", "probability": 0.7469075520833334}, {"start": 410.58, "end": 411.28, "word": " Fibrin", "probability": 0.8509114583333334}, {"start": 411.28, "end": 411.84, "word": " و", "probability": 0.740234375}, {"start": 411.84, "end": 412.0, "word": " الـ", "probability": 0.681396484375}, {"start": 412.0, "end": 412.66, "word": " Fibrin", "probability": 0.8929036458333334}, {"start": 412.66, "end": 413.72, "word": " كون", "probability": 0.913330078125}, {"start": 413.72, "end": 414.86, "word": " جلطة", "probability": 0.9205322265625}, {"start": 414.86, "end": 415.38, "word": " ماشي", "probability": 0.6728108723958334}, {"start": 415.38, "end": 415.98, "word": " و", "probability": 0.5654296875}, {"start": 415.98, "end": 416.44, "word": " الجلطة", "probability": 0.986328125}, {"start": 416.44, "end": 416.58, "word": " في", "probability": 0.970703125}, {"start": 416.58, "end": 416.72, "word": " هذا", "probability": 0.63330078125}, {"start": 416.72, "end": 417.3, "word": " الحالة", "probability": 0.9807942708333334}, {"start": 417.3, "end": 417.94, "word": " بيدخل", "probability": 0.9681396484375}, {"start": 417.94, "end": 418.1, "word": " في", "probability": 0.91650390625}, {"start": 418.1, "end": 418.56, "word": " تركيب", "probability": 0.9656982421875}, {"start": 418.56, "end": 418.86, "word": " الـ", "probability": 0.6240234375}, {"start": 418.86, "end": 420.08, "word": " RBCs", "probability": 0.65966796875}, {"start": 420.08, "end": 420.6, "word": " مما", "probability": 0.904296875}, {"start": 420.6, "end": 421.28, "word": " يؤتي", "probability": 0.85302734375}, {"start": 421.28, "end": 421.62, "word": " إلى", "probability": 0.83935546875}], "temperature": 1.0}, {"id": 15, "seek": 44711, "start": 422.35, "end": 447.11, "text": "إن سير لونها Red و بيسموها Red Thrombus إيش بيسموها؟ Red Thrombus يبقى ال artery هي عبارة عن White Thrombus و ال vein هي عبارة عن Red Thrombus طيب نشوف كيف تتم القالية في كلا الحالتين نبدا من ال arterial formation", "tokens": [28814, 1863, 8608, 13546, 5296, 11536, 11296, 4477, 4032, 4724, 1829, 38251, 2407, 11296, 4477, 41645, 3548, 301, 11933, 1829, 8592, 4724, 1829, 38251, 2407, 11296, 22807, 4477, 41645, 3548, 301, 7251, 3555, 4587, 7578, 2423, 38520, 39896, 6225, 3555, 9640, 3660, 18871, 5552, 41645, 3548, 301, 4032, 2423, 30669, 39896, 6225, 3555, 9640, 3660, 18871, 4477, 41645, 3548, 301, 23032, 1829, 3555, 8717, 8592, 38688, 9122, 33911, 6055, 39237, 25062, 6027, 10632, 8978, 9122, 15040, 21542, 6027, 2655, 9957, 8717, 3555, 28259, 9154, 2423, 30455, 831, 11723], "avg_logprob": -0.23507724585158102, "compression_ratio": 1.6666666666666667, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 422.35, "end": 422.69, "word": "إن", "probability": 0.5567626953125}, {"start": 422.69, "end": 423.11, "word": " سير", "probability": 0.70703125}, {"start": 423.11, "end": 423.69, "word": " لونها", "probability": 0.9031575520833334}, {"start": 423.69, "end": 424.41, "word": " Red", "probability": 0.381103515625}, {"start": 424.41, "end": 424.95, "word": " و", "probability": 0.5888671875}, {"start": 424.95, "end": 425.49, "word": " بيسموها", "probability": 0.852978515625}, {"start": 425.49, "end": 425.79, "word": " Red", "probability": 0.8369140625}, {"start": 425.79, "end": 426.43, "word": " Thrombus", "probability": 0.8307291666666666}, {"start": 426.43, "end": 426.71, "word": " إيش", "probability": 0.7745768229166666}, {"start": 426.71, "end": 427.93, "word": " بيسموها؟", "probability": 0.929931640625}, {"start": 427.93, "end": 428.27, "word": " Red", "probability": 0.89892578125}, {"start": 428.27, "end": 428.95, "word": " Thrombus", "probability": 0.9423828125}, {"start": 428.95, "end": 429.55, "word": " يبقى", "probability": 0.82061767578125}, {"start": 429.55, "end": 429.73, "word": " ال", "probability": 0.92138671875}, {"start": 429.73, "end": 430.15, "word": " artery", "probability": 0.350341796875}, {"start": 430.15, "end": 430.43, "word": " هي", "probability": 0.90087890625}, {"start": 430.43, "end": 430.67, "word": " عبارة", "probability": 0.9769287109375}, {"start": 430.67, "end": 430.83, "word": " عن", "probability": 0.93017578125}, {"start": 430.83, "end": 431.11, "word": " White", "probability": 0.50341796875}, {"start": 431.11, "end": 431.73, "word": " Thrombus", "probability": 0.94482421875}, {"start": 431.73, "end": 431.89, "word": " و", "probability": 0.73193359375}, {"start": 431.89, "end": 432.03, "word": " ال", "probability": 0.8125}, {"start": 432.03, "end": 432.47, "word": " vein", "probability": 0.8681640625}, {"start": 432.47, "end": 433.15, "word": " هي", "probability": 0.87060546875}, {"start": 433.15, "end": 433.41, "word": " عبارة", "probability": 0.9921875}, {"start": 433.41, "end": 433.57, "word": " عن", "probability": 0.95166015625}, {"start": 433.57, "end": 434.05, "word": " Red", "probability": 0.24560546875}, {"start": 434.05, "end": 435.55, "word": " Thrombus", "probability": 0.95654296875}, {"start": 435.55, "end": 437.37, "word": " طيب", "probability": 0.8865559895833334}, {"start": 437.37, "end": 437.87, "word": " نشوف", "probability": 0.8963216145833334}, {"start": 437.87, "end": 438.19, "word": " كيف", "probability": 0.98583984375}, {"start": 438.19, "end": 438.67, "word": " تتم", "probability": 0.953857421875}, {"start": 438.67, "end": 439.37, "word": " القالية", "probability": 0.76220703125}, {"start": 439.37, "end": 440.53, "word": " في", "probability": 0.90478515625}, {"start": 440.53, "end": 441.05, "word": " كلا", "probability": 0.79248046875}, {"start": 441.05, "end": 443.49, "word": " الحالتين", "probability": 0.982421875}, {"start": 443.49, "end": 445.17, "word": " نبدا", "probability": 0.4750162760416667}, {"start": 445.17, "end": 445.29, "word": " من", "probability": 0.459716796875}, {"start": 445.29, "end": 445.43, "word": " ال", "probability": 0.6484375}, {"start": 445.43, "end": 446.17, "word": " arterial", "probability": 0.84716796875}, {"start": 446.17, "end": 447.11, "word": " formation", "probability": 0.91357421875}], "temperature": 1.0}, {"id": 16, "seek": 47570, "start": 450.16, "end": 475.7, "text": "وانتقوا عليها شباب، شو اللي بيصير؟ كنا خدنا تركيبة الوعاء الدموي، وكنا الوعاء الدموي يتكوّم الخلايا ابثيلية مبطنة لجدار الوعاء الدموي، و هذه الخلايا بتكون طبقة خاصة منها عرفناها منها smooth، smooth، ال non-thrombogenic، non-thrombogenic، وإن الإبلات لما تيجي تقعد عليها، شو بيصلها؟", "tokens": [2407, 7649, 2655, 4587, 14407, 25894, 11296, 13412, 3555, 16758, 12399, 13412, 2407, 13672, 1829, 4724, 1829, 9381, 13546, 22807, 9122, 8315, 16490, 3215, 8315, 6055, 31747, 1829, 49401, 2423, 45367, 16606, 32748, 2304, 45865, 12399, 4032, 4117, 8315, 2423, 45367, 16606, 32748, 2304, 45865, 7251, 2655, 4117, 2407, 11703, 2304, 33962, 15040, 25528, 48127, 12984, 26895, 10632, 3714, 3555, 9566, 1863, 3660, 5296, 7435, 3215, 9640, 2423, 45367, 16606, 32748, 2304, 45865, 12399, 4032, 29538, 33962, 15040, 25528, 39894, 30544, 23032, 3555, 28671, 16490, 33546, 3660, 9154, 11296, 6225, 28480, 8315, 11296, 9154, 11296, 5508, 12399, 5508, 12399, 2423, 2107, 12, 392, 81, 3548, 25473, 12399, 2107, 12, 392, 81, 3548, 25473, 12399, 4032, 28814, 1863, 33688, 36150, 9307, 5296, 15042, 6055, 1829, 7435, 1829, 6055, 4587, 22488, 25894, 11296, 12399, 13412, 2407, 4724, 1829, 36520, 11296, 22807], "avg_logprob": -0.21183035991021565, "compression_ratio": 1.9116465863453815, "no_speech_prob": 4.76837158203125e-07, "words": [{"start": 450.16, "end": 450.6, "word": "وانتقوا", "probability": 0.631591796875}, {"start": 450.6, "end": 450.8, "word": " عليها", "probability": 0.940185546875}, {"start": 450.8, "end": 451.5, "word": " شباب،", "probability": 0.76513671875}, {"start": 451.5, "end": 451.78, "word": " شو", "probability": 0.695556640625}, {"start": 451.78, "end": 451.86, "word": " اللي", "probability": 0.8154296875}, {"start": 451.86, "end": 452.52, "word": " بيصير؟", "probability": 0.8849609375}, {"start": 452.52, "end": 452.76, "word": " كنا", "probability": 0.923583984375}, {"start": 452.76, "end": 453.3, "word": " خدنا", "probability": 0.81591796875}, {"start": 453.3, "end": 454.1, "word": " تركيبة", "probability": 0.9547119140625}, {"start": 454.1, "end": 454.46, "word": " الوعاء", "probability": 0.8548177083333334}, {"start": 454.46, "end": 455.46, "word": " الدموي،", "probability": 0.8594970703125}, {"start": 455.46, "end": 455.72, "word": " وكنا", "probability": 0.8961588541666666}, {"start": 455.72, "end": 456.06, "word": " الوعاء", "probability": 0.9537760416666666}, {"start": 456.06, "end": 456.62, "word": " الدموي", "probability": 0.9884440104166666}, {"start": 456.62, "end": 457.26, "word": " يتكوّم", "probability": 0.870361328125}, {"start": 457.26, "end": 457.88, "word": " الخلايا", "probability": 0.7623697916666666}, {"start": 457.88, "end": 459.0, "word": " ابثيلية", "probability": 0.706787109375}, {"start": 459.0, "end": 459.68, "word": " مبطنة", "probability": 0.95029296875}, {"start": 459.68, "end": 460.14, "word": " لجدار", "probability": 0.7972412109375}, {"start": 460.14, "end": 460.46, "word": " الوعاء", "probability": 0.9244791666666666}, {"start": 460.46, "end": 461.2, "word": " الدموي،", "probability": 0.8782958984375}, {"start": 461.2, "end": 461.46, "word": " و", "probability": 0.94287109375}, {"start": 461.46, "end": 461.64, "word": " هذه", "probability": 0.28173828125}, {"start": 461.64, "end": 462.22, "word": " الخلايا", "probability": 0.9724934895833334}, {"start": 462.22, "end": 462.64, "word": " بتكون", "probability": 0.84375}, {"start": 462.64, "end": 463.08, "word": " طبقة", "probability": 0.8199869791666666}, {"start": 463.08, "end": 463.46, "word": " خاصة", "probability": 0.9658203125}, {"start": 463.46, "end": 463.86, "word": " منها", "probability": 0.839111328125}, {"start": 463.86, "end": 464.98, "word": " عرفناها", "probability": 0.9107666015625}, {"start": 464.98, "end": 465.2, "word": " منها", "probability": 0.8935546875}, {"start": 465.2, "end": 466.52, "word": " smooth،", "probability": 0.63427734375}, {"start": 466.52, "end": 467.56, "word": " smooth،", "probability": 0.4615478515625}, {"start": 467.56, "end": 467.78, "word": " ال", "probability": 0.56982421875}, {"start": 467.78, "end": 468.14, "word": " non", "probability": 0.6259765625}, {"start": 468.14, "end": 469.4, "word": "-thrombogenic،", "probability": 0.81494140625}, {"start": 469.4, "end": 469.8, "word": " non", "probability": 0.95556640625}, {"start": 469.8, "end": 471.32, "word": "-thrombogenic،", "probability": 0.8583170572916666}, {"start": 471.32, "end": 472.22, "word": " وإن", "probability": 0.8458658854166666}, {"start": 472.22, "end": 472.92, "word": " الإبلات", "probability": 0.485595703125}, {"start": 472.92, "end": 473.3, "word": " لما", "probability": 0.81884765625}, {"start": 473.3, "end": 473.66, "word": " تيجي", "probability": 0.96044921875}, {"start": 473.66, "end": 474.2, "word": " تقعد", "probability": 0.8561197916666666}, {"start": 474.2, "end": 475.04, "word": " عليها،", "probability": 0.8876953125}, {"start": 475.04, "end": 475.14, "word": " شو", "probability": 0.984130859375}, {"start": 475.14, "end": 475.7, "word": " بيصلها؟", "probability": 0.8794921875}], "temperature": 1.0}, {"id": 17, "seek": 49117, "start": 480.93, "end": 491.17, "text": "مواد قولنا إما Anti-coagulant أو Pro-coagulant 100% المعظم Anti-coagulant في ال condition عادية", "tokens": [2304, 14407, 3215, 12174, 12610, 8315, 11933, 15042, 27757, 12, 1291, 559, 425, 394, 34051, 1705, 12, 1291, 559, 425, 394, 2319, 4, 9673, 3615, 19913, 2304, 27757, 12, 1291, 559, 425, 394, 8978, 2423, 4188, 6225, 18513, 10632], "avg_logprob": -0.48906252533197403, "compression_ratio": 1.180952380952381, "no_speech_prob": 8.58306884765625e-06, "words": [{"start": 480.93, "end": 482.01, "word": "مواد", "probability": 0.5023600260416666}, {"start": 482.01, "end": 483.09, "word": " قولنا", "probability": 0.4667154947916667}, {"start": 483.09, "end": 483.47, "word": " إما", "probability": 0.7069091796875}, {"start": 483.47, "end": 483.99, "word": " Anti", "probability": 0.25146484375}, {"start": 483.99, "end": 484.59, "word": "-coagulant", "probability": 0.73212890625}, {"start": 484.59, "end": 485.49, "word": " أو", "probability": 0.77197265625}, {"start": 485.49, "end": 485.79, "word": " Pro", "probability": 0.77392578125}, {"start": 485.79, "end": 486.43, "word": "-coagulant", "probability": 0.93740234375}, {"start": 486.43, "end": 487.17, "word": " 100", "probability": 0.13330078125}, {"start": 487.17, "end": 488.83, "word": "%", "probability": 0.81982421875}, {"start": 488.83, "end": 489.47, "word": " المعظم", "probability": 0.9427490234375}, {"start": 489.47, "end": 489.75, "word": " Anti", "probability": 0.6923828125}, {"start": 489.75, "end": 490.27, "word": "-coagulant", "probability": 0.95771484375}, {"start": 490.27, "end": 490.41, "word": " في", "probability": 0.7470703125}, {"start": 490.41, "end": 490.47, "word": " ال", "probability": 0.5224609375}, {"start": 490.47, "end": 491.01, "word": " condition", "probability": 0.2022705078125}, {"start": 491.01, "end": 491.17, "word": " عادية", "probability": 0.6446533203125}], "temperature": 1.0}, {"id": 18, "seek": 52313, "start": 500.27, "end": 523.13, "text": "و يوم ما اتبطل تعمل ال platelets بما تيجي تجه الى الاسوسيا هذه صورة بيناراها جالية في ال atherosclerosis تصلب الشرايين ال atherosclerosis ال epithelial cells بتبطل non-thrombogenic بل بتتحول الى thrombogenic layer", "tokens": [2407, 7251, 20498, 19446, 1975, 2655, 3555, 9566, 1211, 6055, 25957, 1211, 2423, 3403, 37220, 4724, 15042, 6055, 1829, 7435, 1829, 6055, 7435, 3224, 2423, 7578, 2423, 32277, 41779, 25528, 29538, 20328, 13063, 3660, 4724, 1829, 8315, 23557, 11296, 10874, 6027, 10632, 8978, 2423, 257, 616, 10466, 1918, 8211, 6055, 9381, 46152, 25124, 23557, 1829, 9957, 2423, 257, 616, 10466, 1918, 8211, 2423, 2388, 355, 338, 831, 5438, 39894, 3555, 9566, 1211, 2107, 12, 392, 4397, 65, 25473, 4724, 1211, 39894, 2655, 5016, 12610, 2423, 7578, 739, 3548, 25473, 4583], "avg_logprob": -0.3705357097007416, "compression_ratio": 1.5906735751295338, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 500.27, "end": 501.09, "word": "و", "probability": 0.260009765625}, {"start": 501.09, "end": 501.43, "word": " يوم", "probability": 0.55438232421875}, {"start": 501.43, "end": 501.69, "word": " ما", "probability": 0.83544921875}, {"start": 501.69, "end": 502.33, "word": " اتبطل", "probability": 0.7712646484375}, {"start": 502.33, "end": 502.73, "word": " تعمل", "probability": 0.9527994791666666}, {"start": 502.73, "end": 502.91, "word": " ال", "probability": 0.6083984375}, {"start": 502.91, "end": 503.39, "word": " platelets", "probability": 0.41656494140625}, {"start": 503.39, "end": 503.65, "word": " بما", "probability": 0.2728271484375}, {"start": 503.65, "end": 504.07, "word": " تيجي", "probability": 0.70989990234375}, {"start": 504.07, "end": 505.07, "word": " تجه", "probability": 0.6422526041666666}, {"start": 505.07, "end": 505.25, "word": " الى", "probability": 0.4974365234375}, {"start": 505.25, "end": 506.09, "word": " الاسوسيا", "probability": 0.6754150390625}, {"start": 506.09, "end": 507.25, "word": " هذه", "probability": 0.182373046875}, {"start": 507.25, "end": 507.97, "word": " صورة", "probability": 0.8932291666666666}, {"start": 507.97, "end": 508.71, "word": " بيناراها", "probability": 0.733154296875}, {"start": 508.71, "end": 509.31, "word": " جالية", "probability": 0.9580078125}, {"start": 509.31, "end": 509.93, "word": " في", "probability": 0.74462890625}, {"start": 509.93, "end": 510.07, "word": " ال", "probability": 0.96533203125}, {"start": 510.07, "end": 511.47, "word": " atherosclerosis", "probability": 0.816552734375}, {"start": 511.47, "end": 512.73, "word": " تصلب", "probability": 0.8048502604166666}, {"start": 512.73, "end": 513.67, "word": " الشرايين", "probability": 0.6575927734375}, {"start": 513.67, "end": 514.03, "word": " ال", "probability": 0.55322265625}, {"start": 514.03, "end": 515.43, "word": " atherosclerosis", "probability": 0.794580078125}, {"start": 515.43, "end": 516.11, "word": " ال", "probability": 0.9208984375}, {"start": 516.11, "end": 516.89, "word": " epithelial", "probability": 0.8681640625}, {"start": 516.89, "end": 517.29, "word": " cells", "probability": 0.85888671875}, {"start": 517.29, "end": 518.35, "word": " بتبطل", "probability": 0.9681396484375}, {"start": 518.35, "end": 518.65, "word": " non", "probability": 0.394775390625}, {"start": 518.65, "end": 519.51, "word": "-thrombogenic", "probability": 0.7923828125}, {"start": 519.51, "end": 520.29, "word": " بل", "probability": 0.9521484375}, {"start": 520.29, "end": 521.79, "word": " بتتحول", "probability": 0.963134765625}, {"start": 521.79, "end": 522.09, "word": " الى", "probability": 0.625}, {"start": 522.09, "end": 522.79, "word": " thrombogenic", "probability": 0.8391927083333334}, {"start": 522.79, "end": 523.13, "word": " layer", "probability": 0.36328125}], "temperature": 1.0}, {"id": 19, "seek": 54754, "start": 523.9, "end": 547.54, "text": "ماشى، ليش نتيجة تكون كميات كبيرة من المخلفات اسمها bleak، bleak بمعنى طعوم، بطل إيش الوعاء الدموي healthy، بطل إيش الوعاء الدموي healthy وبالتالي صار thrombogenic ليه؟ وبالتالي صارتيش تمسك في البلاد", "tokens": [2304, 33599, 7578, 12399, 32239, 8592, 8717, 31371, 7435, 3660, 6055, 30544, 9122, 2304, 1829, 9307, 9122, 3555, 48923, 9154, 9673, 9778, 46538, 9307, 24525, 2304, 11296, 5408, 514, 12399, 5408, 514, 4724, 2304, 3615, 1863, 7578, 23032, 3615, 20498, 12399, 4724, 9566, 1211, 11933, 1829, 8592, 2423, 45367, 16606, 32748, 2304, 45865, 4627, 12399, 4724, 9566, 1211, 11933, 1829, 8592, 2423, 45367, 16606, 32748, 2304, 45865, 4627, 46599, 6027, 2655, 6027, 1829, 20328, 9640, 739, 3548, 25473, 32239, 3224, 22807, 46599, 6027, 2655, 6027, 1829, 20328, 9640, 31371, 8592, 46811, 3794, 4117, 8978, 29739, 15040, 3215], "avg_logprob": -0.21316964726667015, "compression_ratio": 1.7593582887700534, "no_speech_prob": 0.0, "words": [{"start": 523.9, "end": 525.0, "word": "ماشى،", "probability": 0.7662353515625}, {"start": 525.0, "end": 525.42, "word": " ليش", "probability": 0.987060546875}, {"start": 525.42, "end": 526.0, "word": " نتيجة", "probability": 0.846435546875}, {"start": 526.0, "end": 526.5, "word": " تكون", "probability": 0.93017578125}, {"start": 526.5, "end": 527.14, "word": " كميات", "probability": 0.985107421875}, {"start": 527.14, "end": 527.54, "word": " كبيرة", "probability": 0.9739583333333334}, {"start": 527.54, "end": 527.96, "word": " من", "probability": 0.98828125}, {"start": 527.96, "end": 530.06, "word": " المخلفات", "probability": 0.962646484375}, {"start": 530.06, "end": 530.52, "word": " اسمها", "probability": 0.8738606770833334}, {"start": 530.52, "end": 531.46, "word": " bleak،", "probability": 0.40234375}, {"start": 531.46, "end": 531.86, "word": " bleak", "probability": 0.904541015625}, {"start": 531.86, "end": 532.3, "word": " بمعنى", "probability": 0.96572265625}, {"start": 532.3, "end": 533.3, "word": " طعوم،", "probability": 0.8184814453125}, {"start": 533.3, "end": 533.7, "word": " بطل", "probability": 0.89208984375}, {"start": 533.7, "end": 534.06, "word": " إيش", "probability": 0.5776774088541666}, {"start": 534.06, "end": 535.94, "word": " الوعاء", "probability": 0.81396484375}, {"start": 535.94, "end": 536.44, "word": " الدموي", "probability": 0.95751953125}, {"start": 536.44, "end": 537.26, "word": " healthy،", "probability": 0.6195068359375}, {"start": 537.26, "end": 537.94, "word": " بطل", "probability": 0.9332682291666666}, {"start": 537.94, "end": 538.28, "word": " إيش", "probability": 0.9837239583333334}, {"start": 538.28, "end": 538.76, "word": " الوعاء", "probability": 0.98876953125}, {"start": 538.76, "end": 539.34, "word": " الدموي", "probability": 0.9900716145833334}, {"start": 539.34, "end": 540.08, "word": " healthy", "probability": 0.95166015625}, {"start": 540.08, "end": 540.8, "word": " وبالتالي", "probability": 0.852490234375}, {"start": 540.8, "end": 542.2, "word": " صار", "probability": 0.987060546875}, {"start": 542.2, "end": 543.78, "word": " thrombogenic", "probability": 0.8689778645833334}, {"start": 543.78, "end": 545.12, "word": " ليه؟", "probability": 0.7132161458333334}, {"start": 545.12, "end": 545.88, "word": " وبالتالي", "probability": 0.89931640625}, {"start": 545.88, "end": 546.54, "word": " صارتيش", "probability": 0.692657470703125}, {"start": 546.54, "end": 547.06, "word": " تمسك", "probability": 0.9658203125}, {"start": 547.06, "end": 547.2, "word": " في", "probability": 0.77734375}, {"start": 547.2, "end": 547.54, "word": " البلاد", "probability": 0.6788736979166666}], "temperature": 1.0}, {"id": 20, "seek": 57504, "start": 547.89, "end": 575.05, "text": "يبقى بداية تكوين الجلطة في الوعاء الدمو في ال arterial .. في ال arterial هو .. هو غالبا ايه اش؟ ان هو ال platelet تنشطت نتيجة ايه اش؟ انه صار في atherosclerotic condition اش يعني تنشطت؟ تمسكت و بداية نشاطها adhesion و هي adhesion", "tokens": [1829, 3555, 4587, 7578, 4724, 28259, 10632, 6055, 4117, 2407, 9957, 25724, 1211, 9566, 3660, 8978, 2423, 45367, 16606, 32748, 2304, 2407, 8978, 2423, 30455, 831, 4386, 8978, 2423, 30455, 831, 31439, 4386, 31439, 32771, 6027, 3555, 995, 1975, 1829, 3224, 1975, 8592, 22807, 16472, 31439, 2423, 3403, 15966, 6055, 1863, 8592, 9566, 2655, 8717, 31371, 7435, 3660, 1975, 1829, 3224, 1975, 8592, 22807, 16472, 3224, 20328, 9640, 8978, 257, 616, 10466, 1918, 9411, 4188, 1975, 8592, 37495, 22653, 6055, 1863, 8592, 9566, 2655, 22807, 46811, 3794, 4117, 2655, 4032, 4724, 28259, 10632, 8717, 8592, 41193, 11296, 614, 38571, 4032, 39896, 614, 38571], "avg_logprob": -0.23046874770751366, "compression_ratio": 1.6926829268292682, "no_speech_prob": 0.0, "words": [{"start": 547.89, "end": 548.39, "word": "يبقى", "probability": 0.9293212890625}, {"start": 548.39, "end": 548.95, "word": " بداية", "probability": 0.9544270833333334}, {"start": 548.95, "end": 549.47, "word": " تكوين", "probability": 0.9735107421875}, {"start": 549.47, "end": 550.03, "word": " الجلطة", "probability": 0.9114990234375}, {"start": 550.03, "end": 550.13, "word": " في", "probability": 0.892578125}, {"start": 550.13, "end": 550.49, "word": " الوعاء", "probability": 0.8328450520833334}, {"start": 550.49, "end": 550.87, "word": " الدمو", "probability": 0.8053385416666666}, {"start": 550.87, "end": 550.97, "word": " في", "probability": 0.4912109375}, {"start": 550.97, "end": 551.09, "word": " ال", "probability": 0.96728515625}, {"start": 551.09, "end": 551.75, "word": " arterial", "probability": 0.63330078125}, {"start": 551.75, "end": 551.75, "word": " ..", "probability": 0.26904296875}, {"start": 551.75, "end": 552.51, "word": " في", "probability": 0.6767578125}, {"start": 552.51, "end": 552.63, "word": " ال", "probability": 0.95166015625}, {"start": 552.63, "end": 553.81, "word": " arterial", "probability": 0.98046875}, {"start": 553.81, "end": 557.67, "word": " هو", "probability": 0.39453125}, {"start": 557.67, "end": 558.03, "word": " ..", "probability": 0.68896484375}, {"start": 558.03, "end": 558.55, "word": " هو", "probability": 0.9765625}, {"start": 558.55, "end": 559.11, "word": " غالبا", "probability": 0.98779296875}, {"start": 559.11, "end": 559.35, "word": " ايه", "probability": 0.6250813802083334}, {"start": 559.35, "end": 559.89, "word": " اش؟", "probability": 0.5330403645833334}, {"start": 559.89, "end": 560.09, "word": " ان", "probability": 0.90576171875}, {"start": 560.09, "end": 560.25, "word": " هو", "probability": 0.587890625}, {"start": 560.25, "end": 560.39, "word": " ال", "probability": 0.47314453125}, {"start": 560.39, "end": 561.05, "word": " platelet", "probability": 0.462127685546875}, {"start": 561.05, "end": 562.21, "word": " تنشطت", "probability": 0.882421875}, {"start": 562.21, "end": 563.41, "word": " نتيجة", "probability": 0.986572265625}, {"start": 563.41, "end": 563.69, "word": " ايه", "probability": 0.8816731770833334}, {"start": 563.69, "end": 564.39, "word": " اش؟", "probability": 0.939453125}, {"start": 564.39, "end": 565.03, "word": " انه", "probability": 0.976806640625}, {"start": 565.03, "end": 565.79, "word": " صار", "probability": 0.9912109375}, {"start": 565.79, "end": 566.13, "word": " في", "probability": 0.94677734375}, {"start": 566.13, "end": 567.77, "word": " atherosclerotic", "probability": 0.8857421875}, {"start": 567.77, "end": 568.71, "word": " condition", "probability": 0.9853515625}, {"start": 568.71, "end": 569.49, "word": " اش", "probability": 0.77099609375}, {"start": 569.49, "end": 569.67, "word": " يعني", "probability": 0.928466796875}, {"start": 569.67, "end": 570.49, "word": " تنشطت؟", "probability": 0.8530680338541666}, {"start": 570.49, "end": 571.19, "word": " تمسكت", "probability": 0.81927490234375}, {"start": 571.19, "end": 571.35, "word": " و", "probability": 0.402099609375}, {"start": 571.35, "end": 571.81, "word": " بداية", "probability": 0.8790690104166666}, {"start": 571.81, "end": 572.49, "word": " نشاطها", "probability": 0.9510498046875}, {"start": 572.49, "end": 573.07, "word": " adhesion", "probability": 0.773193359375}, {"start": 573.07, "end": 574.35, "word": " و", "probability": 0.529296875}, {"start": 574.35, "end": 574.49, "word": " هي", "probability": 0.91455078125}, {"start": 574.49, "end": 575.05, "word": " adhesion", "probability": 0.9482421875}], "temperature": 1.0}, {"id": 21, "seek": 60433, "start": 575.67, "end": 604.33, "text": "بياليها .. بظبط secretion ففي ADP release وال ADP بنشر ال platelet اللي انمسكت و اللي بدأ تدفع، مظبوط؟ فالمحاصلة بيناق جلطة من ال platelet في منطقة الاتصاق في منطقة الاتصاق هذا بيودي اليش إلى blood formation و obstruction للوعاء الدموي", "tokens": [3555, 1829, 6027, 1829, 11296, 4386, 4724, 19913, 3555, 9566, 4054, 313, 6156, 41185, 9135, 47, 4374, 16070, 9135, 47, 44945, 46309, 2423, 3403, 15966, 13672, 1829, 16472, 2304, 3794, 4117, 2655, 4032, 13672, 1829, 47525, 10721, 6055, 3215, 5172, 3615, 12399, 3714, 19913, 3555, 2407, 9566, 22807, 6156, 45340, 5016, 33546, 37977, 4724, 1829, 8315, 4587, 10874, 1211, 9566, 3660, 9154, 2423, 3403, 15966, 8978, 9154, 9566, 28671, 2423, 9307, 9381, 995, 4587, 8978, 9154, 9566, 28671, 2423, 9307, 9381, 995, 4587, 23758, 4724, 1829, 2407, 16254, 45595, 8592, 30731, 3390, 11723, 4032, 49711, 24976, 45367, 16606, 32748, 2304, 45865], "avg_logprob": -0.3102022102650474, "compression_ratio": 1.5732758620689655, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 575.67, "end": 576.55, "word": "بياليها", "probability": 0.672216796875}, {"start": 576.55, "end": 576.99, "word": " ..", "probability": 0.2286376953125}, {"start": 576.99, "end": 578.87, "word": " بظبط", "probability": 0.59136962890625}, {"start": 578.87, "end": 579.67, "word": " secretion", "probability": 0.7020263671875}, {"start": 579.67, "end": 580.07, "word": " ففي", "probability": 0.842041015625}, {"start": 580.07, "end": 580.65, "word": " ADP", "probability": 0.7022705078125}, {"start": 580.65, "end": 581.47, "word": " release", "probability": 0.52001953125}, {"start": 581.47, "end": 582.19, "word": " وال", "probability": 0.63330078125}, {"start": 582.19, "end": 582.69, "word": " ADP", "probability": 0.861083984375}, {"start": 582.69, "end": 583.17, "word": " بنشر", "probability": 0.73095703125}, {"start": 583.17, "end": 583.41, "word": " ال", "probability": 0.470703125}, {"start": 583.41, "end": 584.25, "word": " platelet", "probability": 0.611328125}, {"start": 584.25, "end": 585.03, "word": " اللي", "probability": 0.909423828125}, {"start": 585.03, "end": 585.99, "word": " انمسكت", "probability": 0.857275390625}, {"start": 585.99, "end": 586.31, "word": " و", "probability": 0.60107421875}, {"start": 586.31, "end": 586.77, "word": " اللي", "probability": 0.93408203125}, {"start": 586.77, "end": 587.93, "word": " بدأ", "probability": 0.81494140625}, {"start": 587.93, "end": 588.41, "word": " تدفع،", "probability": 0.45625}, {"start": 588.41, "end": 589.51, "word": " مظبوط؟", "probability": 0.89794921875}, {"start": 589.51, "end": 590.57, "word": " فالمحاصلة", "probability": 0.89677734375}, {"start": 590.57, "end": 591.83, "word": " بيناق", "probability": 0.77880859375}, {"start": 591.83, "end": 593.11, "word": " جلطة", "probability": 0.9930419921875}, {"start": 593.11, "end": 593.27, "word": " من", "probability": 0.990234375}, {"start": 593.27, "end": 593.43, "word": " ال", "probability": 0.95361328125}, {"start": 593.43, "end": 594.09, "word": " platelet", "probability": 0.822021484375}, {"start": 594.09, "end": 594.69, "word": " في", "probability": 0.90869140625}, {"start": 594.69, "end": 595.19, "word": " منطقة", "probability": 0.9412434895833334}, {"start": 595.19, "end": 596.65, "word": " الاتصاق", "probability": 0.7958984375}, {"start": 596.65, "end": 597.15, "word": " في", "probability": 0.62158203125}, {"start": 597.15, "end": 597.69, "word": " منطقة", "probability": 0.9459635416666666}, {"start": 597.69, "end": 598.47, "word": " الاتصاق", "probability": 0.98603515625}, {"start": 598.47, "end": 598.65, "word": " هذا", "probability": 0.5126953125}, {"start": 598.65, "end": 599.07, "word": " بيودي", "probability": 0.77532958984375}, {"start": 599.07, "end": 599.59, "word": " اليش", "probability": 0.3953857421875}, {"start": 599.59, "end": 600.51, "word": " إلى", "probability": 0.34033203125}, {"start": 600.51, "end": 601.11, "word": " blood", "probability": 0.54833984375}, {"start": 601.11, "end": 601.79, "word": " formation", "probability": 0.9892578125}, {"start": 601.79, "end": 602.37, "word": " و", "probability": 0.91259765625}, {"start": 602.37, "end": 603.05, "word": " obstruction", "probability": 0.85888671875}, {"start": 603.05, "end": 603.75, "word": " للوعاء", "probability": 0.6884765625}, {"start": 603.75, "end": 604.33, "word": " الدموي", "probability": 0.9676106770833334}], "temperature": 1.0}, {"id": 22, "seek": 63221, "start": 604.75, "end": 632.21, "text": "و ischemia و إذا صار فيه ischemia طبعا بيصير فيه organ failure بيصير فيه organ failure طبعا البحثة شباب localized tissue injury والسبب في قلة في ال perfusion ال perfusion يعني إيه إيش؟", "tokens": [2407, 307, 339, 14058, 4032, 11933, 15730, 20328, 9640, 8978, 3224, 307, 339, 14058, 23032, 3555, 3615, 995, 4724, 1829, 9381, 13546, 8978, 3224, 1798, 7763, 4724, 1829, 9381, 13546, 8978, 3224, 1798, 7763, 23032, 3555, 3615, 995, 29739, 5016, 12984, 3660, 13412, 3555, 16758, 44574, 12404, 10454, 16070, 35457, 3555, 8978, 12174, 37977, 8978, 2423, 13826, 5704, 2423, 13826, 5704, 37495, 22653, 11933, 1829, 3224, 11933, 1829, 8592, 22807], "avg_logprob": -0.2557218368624298, "compression_ratio": 1.5864197530864197, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 604.75, "end": 604.99, "word": "و", "probability": 0.9404296875}, {"start": 604.99, "end": 605.73, "word": " ischemia", "probability": 0.6979166666666666}, {"start": 605.73, "end": 606.33, "word": " و", "probability": 0.9345703125}, {"start": 606.33, "end": 606.41, "word": " إذا", "probability": 0.6998291015625}, {"start": 606.41, "end": 606.69, "word": " صار", "probability": 0.931640625}, {"start": 606.69, "end": 606.87, "word": " فيه", "probability": 0.7275390625}, {"start": 606.87, "end": 607.33, "word": " ischemia", "probability": 0.8743489583333334}, {"start": 607.33, "end": 607.85, "word": " طبعا", "probability": 0.960205078125}, {"start": 607.85, "end": 608.87, "word": " بيصير", "probability": 0.8941650390625}, {"start": 608.87, "end": 609.39, "word": " فيه", "probability": 0.6915283203125}, {"start": 609.39, "end": 610.35, "word": " organ", "probability": 0.91259765625}, {"start": 610.35, "end": 610.87, "word": " failure", "probability": 0.86376953125}, {"start": 610.87, "end": 611.51, "word": " بيصير", "probability": 0.87744140625}, {"start": 611.51, "end": 611.83, "word": " فيه", "probability": 0.931640625}, {"start": 611.83, "end": 612.25, "word": " organ", "probability": 0.9541015625}, {"start": 612.25, "end": 615.47, "word": " failure", "probability": 0.385986328125}, {"start": 615.47, "end": 616.91, "word": " طبعا", "probability": 0.9737548828125}, {"start": 616.91, "end": 622.65, "word": " البحثة", "probability": 0.6990966796875}, {"start": 622.65, "end": 623.05, "word": " شباب", "probability": 0.692138671875}, {"start": 623.05, "end": 623.61, "word": " localized", "probability": 0.8818359375}, {"start": 623.61, "end": 624.45, "word": " tissue", "probability": 0.91064453125}, {"start": 624.45, "end": 625.71, "word": " injury", "probability": 0.90234375}, {"start": 625.71, "end": 627.79, "word": " والسبب", "probability": 0.800048828125}, {"start": 627.79, "end": 628.17, "word": " في", "probability": 0.94287109375}, {"start": 628.17, "end": 628.59, "word": " قلة", "probability": 0.960693359375}, {"start": 628.59, "end": 628.81, "word": " في", "probability": 0.83740234375}, {"start": 628.81, "end": 628.93, "word": " ال", "probability": 0.95068359375}, {"start": 628.93, "end": 629.53, "word": " perfusion", "probability": 0.726806640625}, {"start": 629.53, "end": 629.71, "word": " ال", "probability": 0.91796875}, {"start": 629.71, "end": 630.23, "word": " perfusion", "probability": 0.90625}, {"start": 630.23, "end": 630.47, "word": " يعني", "probability": 0.96826171875}, {"start": 630.47, "end": 630.61, "word": " إيه", "probability": 0.7183024088541666}, {"start": 630.61, "end": 632.21, "word": " إيش؟", "probability": 0.7052001953125}], "temperature": 1.0}, {"id": 23, "seek": 65395, "start": 635.95, "end": 653.95, "text": "من Perfusion ارتواء او ان الوعاء الدموي او ال organ بيرتوى بال Oxygen مظبوط لما بيسأل الدم للوعاء الدم هو شو بيسير؟ بيسأل Oxygen فش يو دم فش Oxygen عارف؟ لذا بيسموها", "tokens": [27842, 3026, 69, 5704, 1975, 43500, 2407, 16606, 1975, 2407, 16472, 2423, 45367, 16606, 32748, 2304, 45865, 1975, 2407, 2423, 1798, 4724, 13546, 2655, 2407, 7578, 20666, 16489, 8647, 3714, 19913, 3555, 2407, 9566, 5296, 15042, 4724, 1829, 3794, 10721, 1211, 32748, 2304, 24976, 45367, 16606, 32748, 2304, 31439, 13412, 2407, 4724, 1829, 3794, 13546, 22807, 4724, 1829, 3794, 10721, 1211, 16489, 8647, 6156, 8592, 7251, 2407, 11778, 2304, 6156, 8592, 16489, 8647, 6225, 9640, 5172, 22807, 5296, 15730, 4724, 1829, 38251, 2407, 11296], "avg_logprob": -0.33492645936853743, "compression_ratio": 1.679245283018868, "no_speech_prob": 1.3113021850585938e-06, "words": [{"start": 635.95, "end": 636.23, "word": "من", "probability": 0.72119140625}, {"start": 636.23, "end": 637.39, "word": " Perfusion", "probability": 0.7196451822916666}, {"start": 637.39, "end": 639.07, "word": " ارتواء", "probability": 0.8212890625}, {"start": 639.07, "end": 639.59, "word": " او", "probability": 0.8271484375}, {"start": 639.59, "end": 639.97, "word": " ان", "probability": 0.89208984375}, {"start": 639.97, "end": 640.39, "word": " الوعاء", "probability": 0.8948567708333334}, {"start": 640.39, "end": 640.91, "word": " الدموي", "probability": 0.9440104166666666}, {"start": 640.91, "end": 641.15, "word": " او", "probability": 0.924072265625}, {"start": 641.15, "end": 641.61, "word": " ال", "probability": 0.9384765625}, {"start": 641.61, "end": 642.05, "word": " organ", "probability": 0.66552734375}, {"start": 642.05, "end": 642.77, "word": " بيرتوى", "probability": 0.80341796875}, {"start": 642.77, "end": 642.91, "word": " بال", "probability": 0.95361328125}, {"start": 642.91, "end": 643.51, "word": " Oxygen", "probability": 0.6259765625}, {"start": 643.51, "end": 645.63, "word": " مظبوط", "probability": 0.77216796875}, {"start": 645.63, "end": 645.81, "word": " لما", "probability": 0.838623046875}, {"start": 645.81, "end": 646.09, "word": " بيسأل", "probability": 0.679296875}, {"start": 646.09, "end": 646.39, "word": " الدم", "probability": 0.978271484375}, {"start": 646.39, "end": 646.79, "word": " للوعاء", "probability": 0.8507486979166666}, {"start": 646.79, "end": 647.01, "word": " الدم", "probability": 0.98046875}, {"start": 647.01, "end": 647.13, "word": " هو", "probability": 0.3173828125}, {"start": 647.13, "end": 647.33, "word": " شو", "probability": 0.663330078125}, {"start": 647.33, "end": 648.23, "word": " بيسير؟", "probability": 0.5072265625}, {"start": 648.23, "end": 648.97, "word": " بيسأل", "probability": 0.9775390625}, {"start": 648.97, "end": 649.55, "word": " Oxygen", "probability": 0.78662109375}, {"start": 649.55, "end": 650.11, "word": " فش", "probability": 0.77099609375}, {"start": 650.11, "end": 650.35, "word": " يو", "probability": 0.4228515625}, {"start": 650.35, "end": 650.65, "word": " دم", "probability": 0.959228515625}, {"start": 650.65, "end": 651.25, "word": " فش", "probability": 0.89599609375}, {"start": 651.25, "end": 652.13, "word": " Oxygen", "probability": 0.93212890625}, {"start": 652.13, "end": 652.99, "word": " عارف؟", "probability": 0.677581787109375}, {"start": 652.99, "end": 653.35, "word": " لذا", "probability": 0.21270751953125}, {"start": 653.35, "end": 653.95, "word": " بيسموها", "probability": 0.881640625}], "temperature": 1.0}, {"id": 24, "seek": 67830, "start": 658.04, "end": 678.3, "text": "المفهوم يا شباب يبقى آلية عمل أو تكوين جلطة في الارطري هي عبارة عن atherosclerosis وقدمه تنشط لبليط تنشط لبليط التواشق وعمل الجلطة ففي ال V الموضوع مختلف", "tokens": [45340, 5172, 3224, 20498, 35186, 13412, 3555, 16758, 7251, 3555, 4587, 7578, 19753, 1211, 10632, 6225, 42213, 34051, 6055, 4117, 2407, 9957, 10874, 1211, 9566, 3660, 8978, 2423, 9640, 9566, 16572, 39896, 6225, 3555, 9640, 3660, 18871, 257, 616, 10466, 1918, 8211, 4032, 28543, 2304, 3224, 6055, 1863, 8592, 9566, 5296, 3555, 20292, 9566, 6055, 1863, 8592, 9566, 5296, 3555, 20292, 9566, 16712, 2407, 33599, 4587, 4032, 25957, 1211, 25724, 1211, 9566, 3660, 6156, 41185, 2423, 691, 2423, 2304, 2407, 11242, 45367, 3714, 46456, 46538], "avg_logprob": -0.31704214423201804, "compression_ratio": 1.5654761904761905, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 658.04, "end": 659.14, "word": "المفهوم", "probability": 0.80755615234375}, {"start": 659.14, "end": 659.28, "word": " يا", "probability": 0.374267578125}, {"start": 659.28, "end": 659.52, "word": " شباب", "probability": 0.9837239583333334}, {"start": 659.52, "end": 659.94, "word": " يبقى", "probability": 0.7803955078125}, {"start": 659.94, "end": 660.42, "word": " آلية", "probability": 0.5833333333333334}, {"start": 660.42, "end": 660.88, "word": " عمل", "probability": 0.974609375}, {"start": 660.88, "end": 661.12, "word": " أو", "probability": 0.814453125}, {"start": 661.12, "end": 661.68, "word": " تكوين", "probability": 0.73431396484375}, {"start": 661.68, "end": 662.24, "word": " جلطة", "probability": 0.982177734375}, {"start": 662.24, "end": 662.34, "word": " في", "probability": 0.92626953125}, {"start": 662.34, "end": 662.94, "word": " الارطري", "probability": 0.5560302734375}, {"start": 662.94, "end": 663.58, "word": " هي", "probability": 0.89111328125}, {"start": 663.58, "end": 663.88, "word": " عبارة", "probability": 0.982421875}, {"start": 663.88, "end": 664.02, "word": " عن", "probability": 0.99365234375}, {"start": 664.02, "end": 665.14, "word": " atherosclerosis", "probability": 0.8201171875}, {"start": 665.14, "end": 666.32, "word": " وقدمه", "probability": 0.6710205078125}, {"start": 666.32, "end": 667.46, "word": " تنشط", "probability": 0.8228759765625}, {"start": 667.46, "end": 667.94, "word": " لبليط", "probability": 0.521240234375}, {"start": 667.94, "end": 668.34, "word": " تنشط", "probability": 0.73724365234375}, {"start": 668.34, "end": 668.92, "word": " لبليط", "probability": 0.8050537109375}, {"start": 668.92, "end": 670.2, "word": " التواشق", "probability": 0.46319580078125}, {"start": 670.2, "end": 671.82, "word": " وعمل", "probability": 0.8082682291666666}, {"start": 671.82, "end": 672.46, "word": " الجلطة", "probability": 0.866455078125}, {"start": 672.46, "end": 674.02, "word": " ففي", "probability": 0.802734375}, {"start": 674.02, "end": 674.18, "word": " ال", "probability": 0.81982421875}, {"start": 674.18, "end": 674.46, "word": " V", "probability": 0.81005859375}, {"start": 674.46, "end": 677.76, "word": " الموضوع", "probability": 0.815283203125}, {"start": 677.76, "end": 678.3, "word": " مختلف", "probability": 0.9853515625}], "temperature": 1.0}, {"id": 25, "seek": 70815, "start": 680.53, "end": 708.15, "text": "الفين بدناش ننساه شباب انه الفين هو بيرجع الدم الى القلب مظبوط يعني الدم الراجع مين بيصير في الفين الدم الراجع على القلب بيصير في الفين غالبا شباب إذا تكونت وتهيأت ظروف لإن هالدم هذا يمشي ببطء او ينحجز في وعاء الدم وفي الفين يعني هذه مشكلة", "tokens": [6027, 5172, 9957, 47525, 8315, 8592, 8717, 1863, 3794, 40294, 13412, 3555, 16758, 16472, 3224, 27188, 9957, 31439, 4724, 13546, 7435, 3615, 32748, 2304, 2423, 7578, 25062, 46152, 3714, 19913, 3555, 2407, 9566, 37495, 22653, 32748, 2304, 34892, 26108, 3615, 3714, 9957, 4724, 1829, 9381, 13546, 8978, 27188, 9957, 32748, 2304, 34892, 26108, 3615, 15844, 25062, 46152, 4724, 1829, 9381, 13546, 8978, 27188, 9957, 32771, 6027, 3555, 995, 13412, 3555, 16758, 11933, 15730, 6055, 30544, 2655, 34683, 3224, 1829, 10721, 2655, 1357, 116, 32887, 5172, 5296, 28814, 1863, 8032, 6027, 40448, 23758, 7251, 2304, 8592, 1829, 4724, 3555, 9566, 38207, 1975, 2407, 7251, 1863, 5016, 7435, 11622, 8978, 4032, 3615, 16606, 32748, 2304, 4032, 41185, 27188, 9957, 37495, 22653, 29538, 37893, 28820, 3660], "avg_logprob": -0.212953632877719, "compression_ratio": 2.1928934010152283, "no_speech_prob": 8.940696716308594e-07, "words": [{"start": 680.53, "end": 680.99, "word": "الفين", "probability": 0.67919921875}, {"start": 680.99, "end": 681.41, "word": " بدناش", "probability": 0.4630533854166667}, {"start": 681.41, "end": 681.79, "word": " ننساه", "probability": 0.906005859375}, {"start": 681.79, "end": 682.07, "word": " شباب", "probability": 0.8455403645833334}, {"start": 682.07, "end": 682.29, "word": " انه", "probability": 0.5263671875}, {"start": 682.29, "end": 682.63, "word": " الفين", "probability": 0.87158203125}, {"start": 682.63, "end": 683.05, "word": " هو", "probability": 0.9736328125}, {"start": 683.05, "end": 683.77, "word": " بيرجع", "probability": 0.961669921875}, {"start": 683.77, "end": 684.23, "word": " الدم", "probability": 0.981201171875}, {"start": 684.23, "end": 684.63, "word": " الى", "probability": 0.84423828125}, {"start": 684.63, "end": 684.99, "word": " القلب", "probability": 0.976806640625}, {"start": 684.99, "end": 685.73, "word": " مظبوط", "probability": 0.865625}, {"start": 685.73, "end": 686.09, "word": " يعني", "probability": 0.6201171875}, {"start": 686.09, "end": 686.37, "word": " الدم", "probability": 0.982177734375}, {"start": 686.37, "end": 686.81, "word": " الراجع", "probability": 0.7978515625}, {"start": 686.81, "end": 687.59, "word": " مين", "probability": 0.683349609375}, {"start": 687.59, "end": 688.91, "word": " بيصير", "probability": 0.736572265625}, {"start": 688.91, "end": 689.07, "word": " في", "probability": 0.498046875}, {"start": 689.07, "end": 689.51, "word": " الفين", "probability": 0.97119140625}, {"start": 689.51, "end": 689.87, "word": " الدم", "probability": 0.8779296875}, {"start": 689.87, "end": 690.37, "word": " الراجع", "probability": 0.9158528645833334}, {"start": 690.37, "end": 690.53, "word": " على", "probability": 0.556640625}, {"start": 690.53, "end": 690.89, "word": " القلب", "probability": 0.9951171875}, {"start": 690.89, "end": 691.19, "word": " بيصير", "probability": 0.9678955078125}, {"start": 691.19, "end": 691.37, "word": " في", "probability": 0.80908203125}, {"start": 691.37, "end": 692.39, "word": " الفين", "probability": 0.965087890625}, {"start": 692.39, "end": 693.75, "word": " غالبا", "probability": 0.73236083984375}, {"start": 693.75, "end": 694.15, "word": " شباب", "probability": 0.97412109375}, {"start": 694.15, "end": 695.13, "word": " إذا", "probability": 0.78076171875}, {"start": 695.13, "end": 696.27, "word": " تكونت", "probability": 0.78271484375}, {"start": 696.27, "end": 697.31, "word": " وتهيأت", "probability": 0.85869140625}, {"start": 697.31, "end": 698.21, "word": " ظروف", "probability": 0.898193359375}, {"start": 698.21, "end": 698.55, "word": " لإن", "probability": 0.9070638020833334}, {"start": 698.55, "end": 699.05, "word": " هالدم", "probability": 0.7064615885416666}, {"start": 699.05, "end": 699.45, "word": " هذا", "probability": 0.63232421875}, {"start": 699.45, "end": 700.53, "word": " يمشي", "probability": 0.9869384765625}, {"start": 700.53, "end": 701.13, "word": " ببطء", "probability": 0.9498291015625}, {"start": 701.13, "end": 701.57, "word": " او", "probability": 0.7457275390625}, {"start": 701.57, "end": 702.57, "word": " ينحجز", "probability": 0.9357421875}, {"start": 702.57, "end": 703.43, "word": " في", "probability": 0.9072265625}, {"start": 703.43, "end": 704.03, "word": " وعاء", "probability": 0.8719075520833334}, {"start": 704.03, "end": 704.29, "word": " الدم", "probability": 0.90869140625}, {"start": 704.29, "end": 704.57, "word": " وفي", "probability": 0.79443359375}, {"start": 704.57, "end": 704.95, "word": " الفين", "probability": 0.98828125}, {"start": 704.95, "end": 705.37, "word": " يعني", "probability": 0.8349609375}, {"start": 705.37, "end": 707.43, "word": " هذه", "probability": 0.1689453125}, {"start": 707.43, "end": 708.15, "word": " مشكلة", "probability": 0.9737955729166666}], "temperature": 1.0}, {"id": 26, "seek": 73742, "start": 708.66, "end": 737.42, "text": "هذه مشكلة ليش؟ قالوا لأنه stagnation of blood حجز الدم في الوعاء الدموي في الvein بيؤدي لتنشيط ال coagulation cascade mechanism بيتنشط ال coagulation cascade وإذا تنشط ال coagulation cascade ليش محصل اتنشاطه؟ fibrin مظبوط؟ و ال fibrin يعني جلطة و بتبدأ هذه الجلطة بال", "tokens": [3224, 24192, 37893, 28820, 3660, 32239, 8592, 22807, 50239, 14407, 5296, 33456, 3224, 32853, 399, 295, 3390, 11331, 7435, 11622, 32748, 2304, 8978, 2423, 45367, 16606, 32748, 2304, 45865, 8978, 2423, 303, 259, 4724, 1829, 33604, 16254, 5296, 2655, 1863, 8592, 1829, 9566, 2423, 598, 559, 2776, 50080, 7513, 4724, 36081, 1863, 8592, 9566, 2423, 598, 559, 2776, 50080, 4032, 28814, 15730, 6055, 1863, 8592, 9566, 2423, 598, 559, 2776, 50080, 32239, 8592, 3714, 5016, 36520, 1975, 2655, 1863, 8592, 41193, 3224, 22807, 283, 6414, 259, 3714, 19913, 3555, 2407, 9566, 22807, 4032, 2423, 283, 6414, 259, 37495, 22653, 10874, 1211, 9566, 3660, 4032, 39894, 44510, 10721, 29538, 25724, 1211, 9566, 3660, 20666], "avg_logprob": -0.21422697721343292, "compression_ratio": 1.7668161434977578, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 708.66, "end": 709.14, "word": "هذه", "probability": 0.859130859375}, {"start": 709.14, "end": 709.94, "word": " مشكلة", "probability": 0.9581705729166666}, {"start": 709.94, "end": 710.88, "word": " ليش؟", "probability": 0.6058349609375}, {"start": 710.88, "end": 711.16, "word": " قالوا", "probability": 0.59918212890625}, {"start": 711.16, "end": 711.7, "word": " لأنه", "probability": 0.6710611979166666}, {"start": 711.7, "end": 712.44, "word": " stagnation", "probability": 0.756103515625}, {"start": 712.44, "end": 712.64, "word": " of", "probability": 0.80615234375}, {"start": 712.64, "end": 713.08, "word": " blood", "probability": 0.97412109375}, {"start": 713.08, "end": 714.06, "word": " حجز", "probability": 0.9075520833333334}, {"start": 714.06, "end": 714.56, "word": " الدم", "probability": 0.969482421875}, {"start": 714.56, "end": 715.3, "word": " في", "probability": 0.92333984375}, {"start": 715.3, "end": 715.86, "word": " الوعاء", "probability": 0.9306640625}, {"start": 715.86, "end": 716.46, "word": " الدموي", "probability": 0.9744466145833334}, {"start": 716.46, "end": 716.66, "word": " في", "probability": 0.72412109375}, {"start": 716.66, "end": 717.22, "word": " الvein", "probability": 0.56622314453125}, {"start": 717.22, "end": 718.14, "word": " بيؤدي", "probability": 0.814208984375}, {"start": 718.14, "end": 718.98, "word": " لتنشيط", "probability": 0.9775390625}, {"start": 718.98, "end": 719.28, "word": " ال", "probability": 0.89453125}, {"start": 719.28, "end": 719.9, "word": " coagulation", "probability": 0.8748372395833334}, {"start": 719.9, "end": 720.4, "word": " cascade", "probability": 0.94287109375}, {"start": 720.4, "end": 721.06, "word": " mechanism", "probability": 0.340087890625}, {"start": 721.06, "end": 722.64, "word": " بيتنشط", "probability": 0.925}, {"start": 722.64, "end": 722.8, "word": " ال", "probability": 0.9677734375}, {"start": 722.8, "end": 723.5, "word": " coagulation", "probability": 0.9646809895833334}, {"start": 723.5, "end": 724.24, "word": " cascade", "probability": 0.98828125}, {"start": 724.24, "end": 725.4, "word": " وإذا", "probability": 0.849609375}, {"start": 725.4, "end": 725.84, "word": " تنشط", "probability": 0.8880615234375}, {"start": 725.84, "end": 725.96, "word": " ال", "probability": 0.8515625}, {"start": 725.96, "end": 726.46, "word": " coagulation", "probability": 0.97265625}, {"start": 726.46, "end": 726.8, "word": " cascade", "probability": 0.9912109375}, {"start": 726.8, "end": 727.08, "word": " ليش", "probability": 0.88623046875}, {"start": 727.08, "end": 727.48, "word": " محصل", "probability": 0.9443359375}, {"start": 727.48, "end": 728.32, "word": " اتنشاطه؟", "probability": 0.8420758928571429}, {"start": 728.32, "end": 729.94, "word": " fibrin", "probability": 0.7266031901041666}, {"start": 729.94, "end": 731.3, "word": " مظبوط؟", "probability": 0.77734375}, {"start": 731.3, "end": 731.34, "word": " و", "probability": 0.67822265625}, {"start": 731.34, "end": 731.48, "word": " ال", "probability": 0.386474609375}, {"start": 731.48, "end": 732.14, "word": " fibrin", "probability": 0.8649088541666666}, {"start": 732.14, "end": 732.98, "word": " يعني", "probability": 0.923583984375}, {"start": 732.98, "end": 733.62, "word": " جلطة", "probability": 0.9898681640625}, {"start": 733.62, "end": 734.92, "word": " و", "probability": 0.72900390625}, {"start": 734.92, "end": 735.94, "word": " بتبدأ", "probability": 0.79638671875}, {"start": 735.94, "end": 736.5, "word": " هذه", "probability": 0.96728515625}, {"start": 736.5, "end": 737.2, "word": " الجلطة", "probability": 0.99365234375}, {"start": 737.2, "end": 737.42, "word": " بال", "probability": 0.791015625}], "temperature": 1.0}, {"id": 27, "seek": 76004, "start": 738.7, "end": 760.04, "text": "بالإيش؟ بالتكون وبالبناء وتبنى الجلطة ومشكلتها إنها بتنبني وبتكون جلطة كبيرة وممكن تكون الجلطة في ال vein في مساحة كبيرة من ال vein فبيكون الهراس ديل اللى ممكن ينفصل جزء منها الديل", "tokens": [3555, 6027, 28814, 1829, 8592, 22807, 20666, 2655, 30544, 46599, 6027, 3555, 1863, 16606, 34683, 3555, 1863, 7578, 25724, 1211, 9566, 3660, 4032, 2304, 8592, 28820, 2655, 11296, 36145, 11296, 39894, 1863, 3555, 22653, 46599, 2655, 30544, 10874, 1211, 9566, 3660, 9122, 3555, 48923, 4032, 2304, 43020, 6055, 30544, 25724, 1211, 9566, 3660, 8978, 2423, 30669, 8978, 47524, 39319, 3660, 9122, 3555, 48923, 9154, 2423, 30669, 6156, 21292, 30544, 2423, 3224, 23557, 3794, 11778, 26895, 13672, 7578, 3714, 43020, 7251, 1863, 5172, 36520, 10874, 11622, 38207, 9154, 11296, 32748, 26895], "avg_logprob": -0.16380495094990993, "compression_ratio": 1.9337349397590362, "no_speech_prob": 1.2516975402832031e-06, "words": [{"start": 738.7, "end": 739.28, "word": "بالإيش؟", "probability": 0.7847493489583334}, {"start": 739.28, "end": 739.98, "word": " بالتكون", "probability": 0.7265625}, {"start": 739.98, "end": 741.42, "word": " وبالبناء", "probability": 0.871875}, {"start": 741.42, "end": 742.52, "word": " وتبنى", "probability": 0.8284912109375}, {"start": 742.52, "end": 744.02, "word": " الجلطة", "probability": 0.9456787109375}, {"start": 744.02, "end": 745.4, "word": " ومشكلتها", "probability": 0.9689127604166666}, {"start": 745.4, "end": 745.66, "word": " إنها", "probability": 0.66357421875}, {"start": 745.66, "end": 746.32, "word": " بتنبني", "probability": 0.8272705078125}, {"start": 746.32, "end": 748.0, "word": " وبتكون", "probability": 0.8033040364583334}, {"start": 748.0, "end": 748.4, "word": " جلطة", "probability": 0.9898681640625}, {"start": 748.4, "end": 748.76, "word": " كبيرة", "probability": 0.98583984375}, {"start": 748.76, "end": 749.5, "word": " وممكن", "probability": 0.90771484375}, {"start": 749.5, "end": 749.84, "word": " تكون", "probability": 0.975341796875}, {"start": 749.84, "end": 750.38, "word": " الجلطة", "probability": 0.9910888671875}, {"start": 750.38, "end": 750.52, "word": " في", "probability": 0.93115234375}, {"start": 750.52, "end": 750.64, "word": " ال", "probability": 0.81591796875}, {"start": 750.64, "end": 750.98, "word": " vein", "probability": 0.324462890625}, {"start": 750.98, "end": 751.7, "word": " في", "probability": 0.86279296875}, {"start": 751.7, "end": 752.26, "word": " مساحة", "probability": 0.9856770833333334}, {"start": 752.26, "end": 752.76, "word": " كبيرة", "probability": 0.9842122395833334}, {"start": 752.76, "end": 752.94, "word": " من", "probability": 0.95849609375}, {"start": 752.94, "end": 753.1, "word": " ال", "probability": 0.9228515625}, {"start": 753.1, "end": 753.36, "word": " vein", "probability": 0.880859375}, {"start": 753.36, "end": 755.46, "word": " فبيكون", "probability": 0.5755208333333334}, {"start": 755.46, "end": 755.94, "word": " الهراس", "probability": 0.8370361328125}, {"start": 755.94, "end": 756.34, "word": " ديل", "probability": 0.5931396484375}, {"start": 756.34, "end": 757.48, "word": " اللى", "probability": 0.87109375}, {"start": 757.48, "end": 757.82, "word": " ممكن", "probability": 0.9814453125}, {"start": 757.82, "end": 758.4, "word": " ينفصل", "probability": 0.8997802734375}, {"start": 758.4, "end": 758.88, "word": " جزء", "probability": 0.9949544270833334}, {"start": 758.88, "end": 759.36, "word": " منها", "probability": 0.984619140625}, {"start": 759.36, "end": 760.04, "word": " الديل", "probability": 0.82763671875}], "temperature": 1.0}, {"id": 28, "seek": 78823, "start": 760.79, "end": 788.23, "text": "ويروح ع مكان تاني ويروح وين؟ ع مكان تاني ممكن و تجهد في مكان تاني و تعمل إيش؟ تعمل pulmonary embolism أو cerebral embolism or whatever أو تروح على ال lung و تعمل مشاكل in adequate oxygen و cellucarbon and CO2 exchange وبالتالي المحاصلة اللي هو pulmonary embolism", "tokens": [2407, 13546, 2407, 5016, 6225, 3714, 41361, 6055, 7649, 1829, 4032, 13546, 2407, 5016, 4032, 9957, 22807, 6225, 3714, 41361, 6055, 7649, 1829, 3714, 43020, 4032, 6055, 7435, 3224, 3215, 8978, 3714, 41361, 6055, 7649, 1829, 4032, 6055, 25957, 1211, 11933, 1829, 8592, 22807, 6055, 25957, 1211, 8331, 46386, 4605, 401, 1434, 34051, 43561, 4605, 401, 1434, 420, 437, 68, 331, 34051, 6055, 32887, 5016, 15844, 2423, 16730, 4032, 6055, 25957, 1211, 37893, 995, 28820, 294, 20927, 9169, 4032, 2815, 1311, 289, 4351, 293, 3002, 17, 7742, 4032, 3555, 6027, 2655, 6027, 1829, 9673, 5016, 33546, 37977, 13672, 1829, 31439, 8331, 46386, 4605, 401, 1434], "avg_logprob": -0.2236143928372635, "compression_ratio": 1.6681614349775784, "no_speech_prob": 0.0, "words": [{"start": 760.79, "end": 761.15, "word": "ويروح", "probability": 0.8221435546875}, {"start": 761.15, "end": 761.23, "word": " ع", "probability": 0.11395263671875}, {"start": 761.23, "end": 761.43, "word": " مكان", "probability": 0.9775390625}, {"start": 761.43, "end": 761.89, "word": " تاني", "probability": 0.8740234375}, {"start": 761.89, "end": 762.43, "word": " ويروح", "probability": 0.8709716796875}, {"start": 762.43, "end": 763.11, "word": " وين؟", "probability": 0.7584635416666666}, {"start": 763.11, "end": 763.25, "word": " ع", "probability": 0.900390625}, {"start": 763.25, "end": 763.51, "word": " مكان", "probability": 0.990234375}, {"start": 763.51, "end": 764.01, "word": " تاني", "probability": 0.9801432291666666}, {"start": 764.01, "end": 764.91, "word": " ممكن", "probability": 0.969970703125}, {"start": 764.91, "end": 765.41, "word": " و", "probability": 0.57177734375}, {"start": 765.41, "end": 765.71, "word": " تجهد", "probability": 0.7728271484375}, {"start": 765.71, "end": 765.83, "word": " في", "probability": 0.79443359375}, {"start": 765.83, "end": 766.09, "word": " مكان", "probability": 0.993408203125}, {"start": 766.09, "end": 766.45, "word": " تاني", "probability": 0.9899088541666666}, {"start": 766.45, "end": 766.57, "word": " و", "probability": 0.448974609375}, {"start": 766.57, "end": 766.83, "word": " تعمل", "probability": 0.9720052083333334}, {"start": 766.83, "end": 767.55, "word": " إيش؟", "probability": 0.7589111328125}, {"start": 767.55, "end": 768.23, "word": " تعمل", "probability": 0.9840494791666666}, {"start": 768.23, "end": 769.15, "word": " pulmonary", "probability": 0.72021484375}, {"start": 769.15, "end": 770.03, "word": " embolism", "probability": 0.97119140625}, {"start": 770.03, "end": 770.29, "word": " أو", "probability": 0.7431640625}, {"start": 770.29, "end": 770.73, "word": " cerebral", "probability": 0.91015625}, {"start": 770.73, "end": 771.33, "word": " embolism", "probability": 0.9807942708333334}, {"start": 771.33, "end": 771.53, "word": " or", "probability": 0.66650390625}, {"start": 771.53, "end": 772.01, "word": " whatever", "probability": 0.6143391927083334}, {"start": 772.01, "end": 772.63, "word": " أو", "probability": 0.72900390625}, {"start": 772.63, "end": 772.89, "word": " تروح", "probability": 0.99462890625}, {"start": 772.89, "end": 773.05, "word": " على", "probability": 0.84765625}, {"start": 773.05, "end": 773.19, "word": " ال", "probability": 0.9599609375}, {"start": 773.19, "end": 773.57, "word": " lung", "probability": 0.85595703125}, {"start": 773.57, "end": 774.19, "word": " و", "probability": 0.56005859375}, {"start": 774.19, "end": 774.89, "word": " تعمل", "probability": 0.9783528645833334}, {"start": 774.89, "end": 776.05, "word": " مشاكل", "probability": 0.9899088541666666}, {"start": 776.05, "end": 776.25, "word": " in", "probability": 0.7001953125}, {"start": 776.25, "end": 776.49, "word": " adequate", "probability": 0.53857421875}, {"start": 776.49, "end": 777.15, "word": " oxygen", "probability": 0.79443359375}, {"start": 777.15, "end": 777.33, "word": " و", "probability": 0.775390625}, {"start": 777.33, "end": 778.25, "word": " cellucarbon", "probability": 0.488067626953125}, {"start": 778.25, "end": 778.91, "word": " and", "probability": 0.779296875}, {"start": 778.91, "end": 779.35, "word": " CO2", "probability": 0.76953125}, {"start": 779.35, "end": 779.99, "word": " exchange", "probability": 0.9765625}, {"start": 779.99, "end": 782.23, "word": " وبالتالي", "probability": 0.8568522135416666}, {"start": 782.23, "end": 782.91, "word": " المحاصلة", "probability": 0.75933837890625}, {"start": 782.91, "end": 783.89, "word": " اللي", "probability": 0.8642578125}, {"start": 783.89, "end": 784.41, "word": " هو", "probability": 0.9814453125}, {"start": 784.41, "end": 787.61, "word": " pulmonary", "probability": 0.94921875}, {"start": 787.61, "end": 788.23, "word": " embolism", "probability": 0.9791666666666666}], "temperature": 1.0}, {"id": 29, "seek": 81349, "start": 794.37, "end": 813.49, "text": "فهموه و فاهمين عليها شبه عرفنا ان الارتري قالية تكون الجلطة في الارتري ثم قالية تكونها في ال vein ماشي و عرفنا ان الأسباب مختلفة الأسباب مختلفة لكن بتتكون الجلطة طبعا من", "tokens": [5172, 16095, 2407, 3224, 4032, 6156, 995, 16095, 9957, 25894, 11296, 13412, 3555, 3224, 6225, 28480, 8315, 16472, 2423, 9640, 2655, 16572, 12174, 6027, 10632, 6055, 30544, 25724, 1211, 9566, 3660, 8978, 2423, 9640, 2655, 16572, 38637, 2304, 12174, 6027, 10632, 6055, 30544, 11296, 8978, 2423, 30669, 3714, 33599, 1829, 4032, 6225, 28480, 8315, 16472, 16247, 35457, 16758, 3714, 46456, 46538, 3660, 16247, 35457, 16758, 3714, 46456, 46538, 3660, 44381, 39894, 2655, 30544, 25724, 1211, 9566, 3660, 23032, 3555, 3615, 995, 9154], "avg_logprob": -0.29687498922807626, "compression_ratio": 1.993421052631579, "no_speech_prob": 6.133317947387695e-05, "words": [{"start": 794.37, "end": 795.55, "word": "فهموه", "probability": 0.4601593017578125}, {"start": 795.55, "end": 795.61, "word": " و", "probability": 0.60791015625}, {"start": 795.61, "end": 795.83, "word": " فاهمين", "probability": 0.7777099609375}, {"start": 795.83, "end": 796.09, "word": " عليها", "probability": 0.865966796875}, {"start": 796.09, "end": 796.43, "word": " شبه", "probability": 0.9274088541666666}, {"start": 796.43, "end": 798.15, "word": " عرفنا", "probability": 0.772705078125}, {"start": 798.15, "end": 798.27, "word": " ان", "probability": 0.27734375}, {"start": 798.27, "end": 799.29, "word": " الارتري", "probability": 0.650970458984375}, {"start": 799.29, "end": 800.09, "word": " قالية", "probability": 0.5230305989583334}, {"start": 800.09, "end": 800.83, "word": " تكون", "probability": 0.74560546875}, {"start": 800.83, "end": 801.33, "word": " الجلطة", "probability": 0.9261474609375}, {"start": 801.33, "end": 801.41, "word": " في", "probability": 0.76708984375}, {"start": 801.41, "end": 801.93, "word": " الارتري", "probability": 0.9539794921875}, {"start": 801.93, "end": 802.53, "word": " ثم", "probability": 0.6314697265625}, {"start": 802.53, "end": 802.95, "word": " قالية", "probability": 0.9632161458333334}, {"start": 802.95, "end": 803.65, "word": " تكونها", "probability": 0.9666341145833334}, {"start": 803.65, "end": 803.87, "word": " في", "probability": 0.966796875}, {"start": 803.87, "end": 803.99, "word": " ال", "probability": 0.85400390625}, {"start": 803.99, "end": 804.57, "word": " vein", "probability": 0.415771484375}, {"start": 804.57, "end": 805.89, "word": " ماشي", "probability": 0.7674153645833334}, {"start": 805.89, "end": 806.49, "word": " و", "probability": 0.94091796875}, {"start": 806.49, "end": 806.79, "word": " عرفنا", "probability": 0.9358723958333334}, {"start": 806.79, "end": 806.93, "word": " ان", "probability": 0.7568359375}, {"start": 806.93, "end": 807.69, "word": " الأسباب", "probability": 0.9762369791666666}, {"start": 807.69, "end": 808.61, "word": " مختلفة", "probability": 0.99072265625}, {"start": 808.61, "end": 809.37, "word": " الأسباب", "probability": 0.8187662760416666}, {"start": 809.37, "end": 810.37, "word": " مختلفة", "probability": 0.984619140625}, {"start": 810.37, "end": 810.87, "word": " لكن", "probability": 0.90234375}, {"start": 810.87, "end": 811.67, "word": " بتتكون", "probability": 0.4794108072916667}, {"start": 811.67, "end": 812.55, "word": " الجلطة", "probability": 0.83868408203125}, {"start": 812.55, "end": 813.17, "word": " طبعا", "probability": 0.934326171875}, {"start": 813.17, "end": 813.49, "word": " من", "probability": 0.95068359375}], "temperature": 1.0}, {"id": 30, "seek": 83873, "start": 814.55, "end": 838.73, "text": "ملاحظاتنا للـ mechanism of thrombus formation نلاحظ أنه في الأرض في Platelet صح؟ Platelet غالبا الناس اللي بتصير عندهم arterial thrombosis بعلجوهم prophylactically بال Anti-Platelet Aggregate Anti-Platelet Aggregate بمنعوا ال Platelet Aggregation", "tokens": [2304, 15040, 5016, 19913, 9307, 8315, 24976, 39184, 7513, 295, 739, 3548, 301, 11723, 8717, 15040, 5016, 19913, 14739, 3224, 8978, 16247, 43042, 8978, 17461, 15966, 20328, 5016, 22807, 17461, 15966, 32771, 6027, 3555, 995, 2423, 8315, 3794, 13672, 1829, 39894, 9381, 13546, 43242, 16095, 30455, 831, 739, 3548, 8211, 4724, 30241, 7435, 2407, 16095, 17051, 5088, 578, 984, 20666, 27757, 12, 47, 14087, 15966, 41512, 3375, 473, 27757, 12, 47, 14087, 15966, 41512, 3375, 473, 4724, 27842, 3615, 14407, 2423, 17461, 15966, 41512, 20167], "avg_logprob": -0.2830668712078139, "compression_ratio": 1.5238095238095237, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 814.55, "end": 815.73, "word": "ملاحظاتنا", "probability": 0.8663330078125}, {"start": 815.73, "end": 816.57, "word": " للـ", "probability": 0.2958984375}, {"start": 816.57, "end": 817.55, "word": " mechanism", "probability": 0.51318359375}, {"start": 817.55, "end": 818.65, "word": " of", "probability": 0.9248046875}, {"start": 818.65, "end": 819.59, "word": " thrombus", "probability": 0.8741861979166666}, {"start": 819.59, "end": 820.47, "word": " formation", "probability": 0.96044921875}, {"start": 820.47, "end": 821.49, "word": " نلاحظ", "probability": 0.81341552734375}, {"start": 821.49, "end": 821.89, "word": " أنه", "probability": 0.4853515625}, {"start": 821.89, "end": 822.17, "word": " في", "probability": 0.9228515625}, {"start": 822.17, "end": 822.55, "word": " الأرض", "probability": 0.91748046875}, {"start": 822.55, "end": 822.79, "word": " في", "probability": 0.4345703125}, {"start": 822.79, "end": 823.41, "word": " Platelet", "probability": 0.615234375}, {"start": 823.41, "end": 824.47, "word": " صح؟", "probability": 0.7552083333333334}, {"start": 824.47, "end": 825.21, "word": " Platelet", "probability": 0.714111328125}, {"start": 825.21, "end": 826.31, "word": " غالبا", "probability": 0.9337158203125}, {"start": 826.31, "end": 827.09, "word": " الناس", "probability": 0.94384765625}, {"start": 827.09, "end": 827.23, "word": " اللي", "probability": 0.89892578125}, {"start": 827.23, "end": 827.49, "word": " بتصير", "probability": 0.7510579427083334}, {"start": 827.49, "end": 827.95, "word": " عندهم", "probability": 0.9853515625}, {"start": 827.95, "end": 828.83, "word": " arterial", "probability": 0.884765625}, {"start": 828.83, "end": 829.63, "word": " thrombosis", "probability": 0.9542643229166666}, {"start": 829.63, "end": 830.75, "word": " بعلجوهم", "probability": 0.78203125}, {"start": 830.75, "end": 831.93, "word": " prophylactically", "probability": 0.9051513671875}, {"start": 831.93, "end": 832.69, "word": " بال", "probability": 0.90234375}, {"start": 832.69, "end": 833.85, "word": " Anti", "probability": 0.1878662109375}, {"start": 833.85, "end": 834.37, "word": "-Platelet", "probability": 0.72833251953125}, {"start": 834.37, "end": 835.17, "word": " Aggregate", "probability": 0.78076171875}, {"start": 835.17, "end": 835.53, "word": " Anti", "probability": 0.310302734375}, {"start": 835.53, "end": 836.09, "word": "-Platelet", "probability": 0.93212890625}, {"start": 836.09, "end": 836.73, "word": " Aggregate", "probability": 0.9313151041666666}, {"start": 836.73, "end": 837.49, "word": " بمنعوا", "probability": 0.8878173828125}, {"start": 837.49, "end": 837.61, "word": " ال", "probability": 0.72705078125}, {"start": 837.61, "end": 838.05, "word": " Platelet", "probability": 0.664306640625}, {"start": 838.05, "end": 838.73, "word": " Aggregation", "probability": 0.962158203125}], "temperature": 1.0}, {"id": 31, "seek": 87388, "start": 845.94, "end": 873.88, "text": "Low dose Low dose بيبقى يعني زي اقل اه ماشي و الناس اللي عندهم venous thrombosis بعندهم مش بال anticoagulant زي الwarfarin و زي الheparin وبالتالي بمنعوا حدوث ال venous thrombosis عندهم ماشي من الحاجات الشباب اللي كمان", "tokens": [43, 305, 14041, 17078, 14041, 4724, 1829, 3555, 4587, 7578, 37495, 22653, 30767, 1829, 1975, 4587, 1211, 1975, 3224, 3714, 33599, 1829, 4032, 2423, 8315, 3794, 13672, 1829, 43242, 16095, 6138, 563, 739, 3548, 8211, 4724, 3615, 41260, 16095, 37893, 20666, 2511, 2789, 559, 425, 394, 30767, 1829, 2423, 6925, 69, 19829, 4032, 30767, 1829, 2423, 71, 595, 19829, 46599, 6027, 2655, 6027, 1829, 4724, 27842, 3615, 14407, 11331, 3215, 2407, 12984, 2423, 6138, 563, 739, 3548, 8211, 43242, 16095, 3714, 33599, 1829, 9154, 21542, 26108, 9307, 25124, 3555, 16758, 13672, 1829, 9122, 2304, 7649], "avg_logprob": -0.29687501179675263, "compression_ratio": 1.7195767195767195, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 845.9399999999999, "end": 847.16, "word": "Low", "probability": 0.3466796875}, {"start": 847.16, "end": 847.4, "word": " dose", "probability": 0.61279296875}, {"start": 847.4, "end": 847.8, "word": " Low", "probability": 0.1326904296875}, {"start": 847.8, "end": 848.92, "word": " dose", "probability": 0.8955078125}, {"start": 848.92, "end": 849.42, "word": " بيبقى", "probability": 0.85}, {"start": 849.42, "end": 849.62, "word": " يعني", "probability": 0.650634765625}, {"start": 849.62, "end": 849.8, "word": " زي", "probability": 0.59814453125}, {"start": 849.8, "end": 850.2, "word": " اقل", "probability": 0.7437337239583334}, {"start": 850.2, "end": 851.52, "word": " اه", "probability": 0.4703369140625}, {"start": 851.52, "end": 852.14, "word": " ماشي", "probability": 0.8797200520833334}, {"start": 852.14, "end": 853.36, "word": " و", "probability": 0.392333984375}, {"start": 853.36, "end": 854.74, "word": " الناس", "probability": 0.9817708333333334}, {"start": 854.74, "end": 855.06, "word": " اللي", "probability": 0.913330078125}, {"start": 855.06, "end": 855.92, "word": " عندهم", "probability": 0.9892578125}, {"start": 855.92, "end": 856.42, "word": " venous", "probability": 0.46728515625}, {"start": 856.42, "end": 857.1, "word": " thrombosis", "probability": 0.9293619791666666}, {"start": 857.1, "end": 857.58, "word": " بعندهم", "probability": 0.5936279296875}, {"start": 857.58, "end": 858.14, "word": " مش", "probability": 0.240478515625}, {"start": 858.14, "end": 859.4, "word": " بال", "probability": 0.7158203125}, {"start": 859.4, "end": 860.92, "word": " anticoagulant", "probability": 0.842333984375}, {"start": 860.92, "end": 861.24, "word": " زي", "probability": 0.962158203125}, {"start": 861.24, "end": 861.96, "word": " الwarfarin", "probability": 0.7215576171875}, {"start": 861.96, "end": 862.12, "word": " و", "probability": 0.93115234375}, {"start": 862.12, "end": 862.24, "word": " زي", "probability": 0.804443359375}, {"start": 862.24, "end": 862.84, "word": " الheparin", "probability": 0.784912109375}, {"start": 862.84, "end": 864.24, "word": " وبالتالي", "probability": 0.8587890625}, {"start": 864.24, "end": 864.92, "word": " بمنعوا", "probability": 0.69622802734375}, {"start": 864.92, "end": 865.8, "word": " حدوث", "probability": 0.9447021484375}, {"start": 865.8, "end": 866.42, "word": " ال", "probability": 0.96484375}, {"start": 866.42, "end": 866.84, "word": " venous", "probability": 0.845458984375}, {"start": 866.84, "end": 867.54, "word": " thrombosis", "probability": 0.97314453125}, {"start": 867.54, "end": 868.04, "word": " عندهم", "probability": 0.973388671875}, {"start": 868.04, "end": 870.86, "word": " ماشي", "probability": 0.9410807291666666}, {"start": 870.86, "end": 871.84, "word": " من", "probability": 0.66064453125}, {"start": 871.84, "end": 872.16, "word": " الحاجات", "probability": 0.9641927083333334}, {"start": 872.16, "end": 872.58, "word": " الشباب", "probability": 0.9806315104166666}, {"start": 872.58, "end": 873.44, "word": " اللي", "probability": 0.953857421875}, {"start": 873.44, "end": 873.88, "word": " كمان", "probability": 0.95458984375}], "temperature": 1.0}, {"id": 32, "seek": 90377, "start": 874.63, "end": 903.77, "text": "لازم نعرفها أنواع الجلطات جالو أنواع الجلطات قُسمت حسب ال function ففي جلطة بتسكر الوعاء الدمو تماما و بيسموها inclusive thrombus وفي جلطة بتسكره جزئي و بيسموها neural thrombus شو بيسموها؟ neural thrombus يعني هي بتكون ملزقة في جدار الوعاء الدمو فبتسكرش الوعاء الدمو كاملا بيسموها neural", "tokens": [1211, 31377, 2304, 8717, 3615, 28480, 11296, 14739, 14407, 3615, 25724, 1211, 9566, 9307, 10874, 6027, 2407, 14739, 14407, 3615, 25724, 1211, 9566, 9307, 12174, 10859, 38251, 2655, 11331, 35457, 2423, 2445, 6156, 41185, 10874, 1211, 9566, 3660, 39894, 3794, 37983, 2423, 45367, 16606, 32748, 2304, 2407, 46811, 10943, 995, 4032, 4724, 1829, 38251, 2407, 11296, 13429, 739, 3548, 301, 4032, 41185, 10874, 1211, 9566, 3660, 39894, 3794, 37983, 3224, 10874, 11622, 19986, 1829, 4032, 4724, 1829, 38251, 2407, 11296, 18161, 739, 3548, 301, 13412, 2407, 4724, 1829, 38251, 2407, 11296, 22807, 18161, 739, 3548, 301, 37495, 22653, 39896, 39894, 30544, 3714, 1211, 11622, 28671, 8978, 10874, 3215, 9640, 2423, 45367, 16606, 32748, 2304, 2407, 6156, 3555, 2655, 3794, 37983, 8592, 2423, 45367, 16606, 32748, 2304, 2407, 9122, 10943, 15040, 4724, 1829, 38251, 2407, 11296, 18161], "avg_logprob": -0.1427919664522157, "compression_ratio": 2.1126126126126126, "no_speech_prob": 9.655952453613281e-06, "words": [{"start": 874.63, "end": 875.61, "word": "لازم", "probability": 0.8170572916666666}, {"start": 875.61, "end": 876.03, "word": " نعرفها", "probability": 0.985107421875}, {"start": 876.03, "end": 876.41, "word": " أنواع", "probability": 0.8382161458333334}, {"start": 876.41, "end": 877.17, "word": " الجلطات", "probability": 0.91796875}, {"start": 877.17, "end": 878.03, "word": " جالو", "probability": 0.64453125}, {"start": 878.03, "end": 878.37, "word": " أنواع", "probability": 0.9166666666666666}, {"start": 878.37, "end": 878.79, "word": " الجلطات", "probability": 0.98779296875}, {"start": 878.79, "end": 879.31, "word": " قُسمت", "probability": 0.853271484375}, {"start": 879.31, "end": 879.81, "word": " حسب", "probability": 0.984130859375}, {"start": 879.81, "end": 880.09, "word": " ال", "probability": 0.98193359375}, {"start": 880.09, "end": 880.59, "word": " function", "probability": 0.60595703125}, {"start": 880.59, "end": 881.39, "word": " ففي", "probability": 0.877197265625}, {"start": 881.39, "end": 881.83, "word": " جلطة", "probability": 0.96875}, {"start": 881.83, "end": 882.27, "word": " بتسكر", "probability": 0.82958984375}, {"start": 882.27, "end": 882.57, "word": " الوعاء", "probability": 0.6908365885416666}, {"start": 882.57, "end": 882.85, "word": " الدمو", "probability": 0.81884765625}, {"start": 882.85, "end": 883.97, "word": " تماما", "probability": 0.8785807291666666}, {"start": 883.97, "end": 884.07, "word": " و", "probability": 0.66748046875}, {"start": 884.07, "end": 884.59, "word": " بيسموها", "probability": 0.8623046875}, {"start": 884.59, "end": 885.43, "word": " inclusive", "probability": 0.258544921875}, {"start": 885.43, "end": 886.93, "word": " thrombus", "probability": 0.9401041666666666}, {"start": 886.93, "end": 888.03, "word": " وفي", "probability": 0.79443359375}, {"start": 888.03, "end": 888.49, "word": " جلطة", "probability": 0.995361328125}, {"start": 888.49, "end": 889.57, "word": " بتسكره", "probability": 0.8402099609375}, {"start": 889.57, "end": 890.31, "word": " جزئي", "probability": 0.9237060546875}, {"start": 890.31, "end": 890.91, "word": " و", "probability": 0.88232421875}, {"start": 890.91, "end": 891.35, "word": " بيسموها", "probability": 0.90849609375}, {"start": 891.35, "end": 891.87, "word": " neural", "probability": 0.38623046875}, {"start": 891.87, "end": 893.41, "word": " thrombus", "probability": 0.9781901041666666}, {"start": 893.41, "end": 893.65, "word": " شو", "probability": 0.803466796875}, {"start": 893.65, "end": 894.81, "word": " بيسموها؟", "probability": 0.9149576822916666}, {"start": 894.81, "end": 895.43, "word": " neural", "probability": 0.5673828125}, {"start": 895.43, "end": 896.75, "word": " thrombus", "probability": 0.9817708333333334}, {"start": 896.75, "end": 897.03, "word": " يعني", "probability": 0.9287109375}, {"start": 897.03, "end": 897.13, "word": " هي", "probability": 0.60546875}, {"start": 897.13, "end": 897.47, "word": " بتكون", "probability": 0.89501953125}, {"start": 897.47, "end": 897.93, "word": " ملزقة", "probability": 0.8955078125}, {"start": 897.93, "end": 898.35, "word": " في", "probability": 0.92919921875}, {"start": 898.35, "end": 899.15, "word": " جدار", "probability": 0.9903971354166666}, {"start": 899.15, "end": 899.49, "word": " الوعاء", "probability": 0.8800455729166666}, {"start": 899.49, "end": 899.91, "word": " الدمو", "probability": 0.9959309895833334}, {"start": 899.91, "end": 900.81, "word": " فبتسكرش", "probability": 0.908935546875}, {"start": 900.81, "end": 901.55, "word": " الوعاء", "probability": 0.9650065104166666}, {"start": 901.55, "end": 901.85, "word": " الدمو", "probability": 0.9918619791666666}, {"start": 901.85, "end": 902.43, "word": " كاملا", "probability": 0.9847005208333334}, {"start": 902.43, "end": 903.37, "word": " بيسموها", "probability": 0.93681640625}, {"start": 903.37, "end": 903.77, "word": " neural", "probability": 0.89306640625}], "temperature": 1.0}, {"id": 33, "seek": 92758, "start": 904.46, "end": 927.58, "text": "أثرومبس طبعا هذه هي العوامل risk factors for thrombosis منها زي ما انتوا شايفين atherosclerosis, acquired thrombophilia, ciagary, estrogen, malignancy, inflammation, immobility, hereditary, thrombophilia كلها عبارة عن عوامل ان شاء الله هنتطرخ لمعظمها", "tokens": [10721, 12984, 2288, 20498, 3555, 3794, 23032, 3555, 3615, 995, 29538, 39896, 18863, 2407, 10943, 1211, 3148, 6771, 337, 739, 3548, 8211, 9154, 11296, 30767, 1829, 19446, 16472, 2655, 14407, 13412, 995, 33911, 9957, 257, 616, 10466, 1918, 8211, 11, 17554, 739, 3548, 5317, 24169, 11, 6983, 559, 822, 11, 44754, 11, 2806, 788, 6717, 11, 21613, 11, 3397, 996, 1140, 11, 720, 292, 4109, 11, 739, 3548, 5317, 24169, 28242, 11296, 6225, 3555, 9640, 3660, 18871, 6225, 2407, 10943, 1211, 16472, 13412, 16606, 21984, 8032, 1863, 2655, 9566, 2288, 9778, 32767, 3615, 19913, 2304, 11296], "avg_logprob": -0.28672679429201736, "compression_ratio": 1.4163090128755365, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 904.46, "end": 905.24, "word": "أثرومبس", "probability": 0.5033111572265625}, {"start": 905.24, "end": 906.36, "word": " طبعا", "probability": 0.800537109375}, {"start": 906.36, "end": 906.64, "word": " هذه", "probability": 0.654296875}, {"start": 906.64, "end": 906.96, "word": " هي", "probability": 0.61767578125}, {"start": 906.96, "end": 907.68, "word": " العوامل", "probability": 0.970947265625}, {"start": 907.68, "end": 909.26, "word": " risk", "probability": 0.218017578125}, {"start": 909.26, "end": 909.92, "word": " factors", "probability": 0.896484375}, {"start": 909.92, "end": 910.24, "word": " for", "probability": 0.58251953125}, {"start": 910.24, "end": 910.94, "word": " thrombosis", "probability": 0.8177897135416666}, {"start": 910.94, "end": 914.34, "word": " منها", "probability": 0.964599609375}, {"start": 914.34, "end": 914.6, "word": " زي", "probability": 0.4112548828125}, {"start": 914.6, "end": 914.68, "word": " ما", "probability": 0.95654296875}, {"start": 914.68, "end": 914.84, "word": " انتوا", "probability": 0.82861328125}, {"start": 914.84, "end": 915.46, "word": " شايفين", "probability": 0.9862060546875}, {"start": 915.46, "end": 916.68, "word": " atherosclerosis,", "probability": 0.836328125}, {"start": 916.86, "end": 917.16, "word": " acquired", "probability": 0.6201171875}, {"start": 917.16, "end": 917.96, "word": " thrombophilia,", "probability": 0.95703125}, {"start": 918.16, "end": 918.72, "word": " ciagary,", "probability": 0.4607747395833333}, {"start": 918.86, "end": 919.22, "word": " estrogen,", "probability": 0.662109375}, {"start": 919.88, "end": 920.74, "word": " malignancy,", "probability": 0.9166666666666666}, {"start": 921.34, "end": 921.88, "word": " inflammation,", "probability": 0.8505859375}, {"start": 922.16, "end": 922.76, "word": " immobility,", "probability": 0.9638671875}, {"start": 922.86, "end": 923.44, "word": " hereditary,", "probability": 0.8225911458333334}, {"start": 923.9, "end": 924.64, "word": " thrombophilia", "probability": 0.769134521484375}, {"start": 924.64, "end": 924.92, "word": " كلها", "probability": 0.589111328125}, {"start": 924.92, "end": 925.14, "word": " عبارة", "probability": 0.96337890625}, {"start": 925.14, "end": 925.28, "word": " عن", "probability": 0.97705078125}, {"start": 925.28, "end": 925.76, "word": " عوامل", "probability": 0.9783935546875}, {"start": 925.76, "end": 926.08, "word": " ان", "probability": 0.85302734375}, {"start": 926.08, "end": 926.18, "word": " شاء", "probability": 0.899169921875}, {"start": 926.18, "end": 926.2, "word": " الله", "probability": 0.95263671875}, {"start": 926.2, "end": 926.86, "word": " هنتطرخ", "probability": 0.7765299479166666}, {"start": 926.86, "end": 927.58, "word": " لمعظمها", "probability": 0.8615234375}], "temperature": 1.0}, {"id": 34, "seek": 95797, "start": 928.43, "end": 957.97, "text": "Venous thrombosis طبعا أنواع الـ thrombotic disorders زي ما اتفقنا Venus thrombosis و Articular thrombosis و نبدأ بالـ Venus thrombosis و قسموها الـ acquired و inherited و النوع التالت هو mixed or unknown origin و زي ما انتوا شايفين قليل عمل الـ Venus الجلطة في الـ Venus thrombosis انه والله في low في ال blood flow and pressure طبعا صار في high potential", "tokens": [53, 268, 563, 739, 3548, 8211, 23032, 3555, 3615, 995, 14739, 14407, 3615, 2423, 39184, 739, 3548, 9411, 20261, 30767, 1829, 19446, 1975, 2655, 5172, 4587, 8315, 23994, 739, 3548, 8211, 4032, 5735, 14646, 739, 3548, 8211, 4032, 8717, 44510, 10721, 20666, 39184, 23994, 739, 3548, 8211, 4032, 12174, 38251, 2407, 11296, 2423, 39184, 17554, 4032, 27091, 4032, 28239, 45367, 16712, 6027, 2655, 31439, 7467, 420, 9841, 4957, 4032, 30767, 1829, 19446, 16472, 2655, 14407, 13412, 995, 33911, 9957, 12174, 20292, 1211, 6225, 42213, 2423, 39184, 23994, 25724, 1211, 9566, 3660, 8978, 2423, 39184, 23994, 739, 3548, 8211, 16472, 3224, 16070, 43761, 8978, 2295, 8978, 2423, 3390, 3095, 293, 3321, 23032, 3555, 3615, 995, 20328, 9640, 8978, 1090, 3995], "avg_logprob": -0.26796873956918715, "compression_ratio": 1.7518518518518518, "no_speech_prob": 0.0, "words": [{"start": 928.43, "end": 928.83, "word": "Venous", "probability": 0.6839192708333334}, {"start": 928.83, "end": 929.37, "word": " thrombosis", "probability": 0.813232421875}, {"start": 929.37, "end": 929.69, "word": " طبعا", "probability": 0.8250732421875}, {"start": 929.69, "end": 930.09, "word": " أنواع", "probability": 0.6827392578125}, {"start": 930.09, "end": 930.21, "word": " الـ", "probability": 0.31036376953125}, {"start": 930.21, "end": 930.65, "word": " thrombotic", "probability": 0.6970621744791666}, {"start": 930.65, "end": 931.11, "word": " disorders", "probability": 0.90478515625}, {"start": 931.11, "end": 931.35, "word": " زي", "probability": 0.739990234375}, {"start": 931.35, "end": 931.43, "word": " ما", "probability": 0.88720703125}, {"start": 931.43, "end": 931.99, "word": " اتفقنا", "probability": 0.90693359375}, {"start": 931.99, "end": 932.57, "word": " Venus", "probability": 0.1485595703125}, {"start": 932.57, "end": 933.15, "word": " thrombosis", "probability": 0.9527994791666666}, {"start": 933.15, "end": 933.29, "word": " و", "probability": 0.97265625}, {"start": 933.29, "end": 933.63, "word": " Articular", "probability": 0.4173583984375}, {"start": 933.63, "end": 934.29, "word": " thrombosis", "probability": 0.9599609375}, {"start": 934.29, "end": 934.81, "word": " و", "probability": 0.88330078125}, {"start": 934.81, "end": 935.23, "word": " نبدأ", "probability": 0.7753092447916666}, {"start": 935.23, "end": 935.55, "word": " بالـ", "probability": 0.6143798828125}, {"start": 935.55, "end": 935.87, "word": " Venus", "probability": 0.57421875}, {"start": 935.87, "end": 936.75, "word": " thrombosis", "probability": 0.95068359375}, {"start": 936.75, "end": 937.27, "word": " و", "probability": 0.9619140625}, {"start": 937.27, "end": 937.73, "word": " قسموها", "probability": 0.9063720703125}, {"start": 937.73, "end": 937.89, "word": " الـ", "probability": 0.60595703125}, {"start": 937.89, "end": 938.27, "word": " acquired", "probability": 0.60400390625}, {"start": 938.27, "end": 939.05, "word": " و", "probability": 0.984375}, {"start": 939.05, "end": 939.83, "word": " inherited", "probability": 0.9599609375}, {"start": 939.83, "end": 940.91, "word": " و", "probability": 0.93115234375}, {"start": 940.91, "end": 941.33, "word": " النوع", "probability": 0.935546875}, {"start": 941.33, "end": 941.79, "word": " التالت", "probability": 0.93994140625}, {"start": 941.79, "end": 942.03, "word": " هو", "probability": 0.90673828125}, {"start": 942.03, "end": 942.57, "word": " mixed", "probability": 0.80615234375}, {"start": 942.57, "end": 942.87, "word": " or", "probability": 0.791015625}, {"start": 942.87, "end": 943.43, "word": " unknown", "probability": 0.92333984375}, {"start": 943.43, "end": 944.41, "word": " origin", "probability": 0.93994140625}, {"start": 944.41, "end": 946.25, "word": " و", "probability": 0.305419921875}, {"start": 946.25, "end": 946.39, "word": " زي", "probability": 0.885986328125}, {"start": 946.39, "end": 946.47, "word": " ما", "probability": 0.85693359375}, {"start": 946.47, "end": 946.87, "word": " انتوا", "probability": 0.8092447916666666}, {"start": 946.87, "end": 948.13, "word": " شايفين", "probability": 0.955322265625}, {"start": 948.13, "end": 948.93, "word": " قليل", "probability": 0.47509765625}, {"start": 948.93, "end": 949.17, "word": " عمل", "probability": 0.988037109375}, {"start": 949.17, "end": 949.43, "word": " الـ", "probability": 0.807861328125}, {"start": 949.43, "end": 949.63, "word": " Venus", "probability": 0.88720703125}, {"start": 949.63, "end": 950.35, "word": " الجلطة", "probability": 0.8968505859375}, {"start": 950.35, "end": 950.47, "word": " في", "probability": 0.94873046875}, {"start": 950.47, "end": 950.61, "word": " الـ", "probability": 0.9140625}, {"start": 950.61, "end": 950.77, "word": " Venus", "probability": 0.81005859375}, {"start": 950.77, "end": 951.49, "word": " thrombosis", "probability": 0.98193359375}, {"start": 951.49, "end": 952.65, "word": " انه", "probability": 0.4864501953125}, {"start": 952.65, "end": 952.83, "word": " والله", "probability": 0.6163330078125}, {"start": 952.83, "end": 953.01, "word": " في", "probability": 0.91357421875}, {"start": 953.01, "end": 953.29, "word": " low", "probability": 0.53515625}, {"start": 953.29, "end": 953.47, "word": " في", "probability": 0.86328125}, {"start": 953.47, "end": 953.53, "word": " ال", "probability": 0.8125}, {"start": 953.53, "end": 953.79, "word": " blood", "probability": 0.6728515625}, {"start": 953.79, "end": 954.25, "word": " flow", "probability": 0.90966796875}, {"start": 954.25, "end": 956.15, "word": " and", "probability": 0.79248046875}, {"start": 956.15, "end": 956.59, "word": " pressure", "probability": 0.93212890625}, {"start": 956.59, "end": 956.97, "word": " طبعا", "probability": 0.9857177734375}, {"start": 956.97, "end": 957.21, "word": " صار", "probability": 0.985595703125}, {"start": 957.21, "end": 957.35, "word": " في", "probability": 0.94287109375}, {"start": 957.35, "end": 957.55, "word": " high", "probability": 0.86962890625}, {"start": 957.55, "end": 957.97, "word": " potential", "probability": 0.7265625}], "temperature": 1.0}, {"id": 35, "seek": 98436, "start": 958.62, "end": 984.36, "text": "والمحصلة أنه بتنشط ال coagulation cascade ويتكون ال fibrin-rich thrombus طبعا هذا يعني عوام الكثيرة بتدخل على الخط مسبب لهذه الظاهرة منها الموضوع function of age يعني الموضوع الأمر اللي داخل biological condition", "tokens": [2407, 45340, 5016, 9381, 37977, 14739, 3224, 39894, 1863, 8592, 9566, 2423, 598, 559, 2776, 50080, 4032, 36081, 30544, 2423, 13116, 12629, 12, 10794, 739, 3548, 301, 23032, 3555, 3615, 995, 23758, 37495, 22653, 6225, 2407, 10943, 33251, 12984, 48923, 39894, 3215, 9778, 1211, 15844, 33962, 9566, 47524, 3555, 3555, 46740, 24192, 6024, 116, 40294, 25720, 9154, 11296, 2423, 2304, 2407, 11242, 45367, 2445, 295, 3205, 37495, 22653, 9673, 2407, 11242, 45367, 16247, 29973, 13672, 1829, 11778, 47283, 1211, 13910, 4188], "avg_logprob": -0.23513719112407871, "compression_ratio": 1.4792626728110598, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 958.62, "end": 959.48, "word": "والمحصلة", "probability": 0.76943359375}, {"start": 959.48, "end": 960.86, "word": " أنه", "probability": 0.7071533203125}, {"start": 960.86, "end": 961.62, "word": " بتنشط", "probability": 0.8743896484375}, {"start": 961.62, "end": 961.78, "word": " ال", "probability": 0.93994140625}, {"start": 961.78, "end": 962.36, "word": " coagulation", "probability": 0.80322265625}, {"start": 962.36, "end": 962.94, "word": " cascade", "probability": 0.8955078125}, {"start": 962.94, "end": 963.5, "word": " ويتكون", "probability": 0.6612955729166666}, {"start": 963.5, "end": 963.94, "word": " ال", "probability": 0.82958984375}, {"start": 963.94, "end": 964.52, "word": " fibrin", "probability": 0.4202880859375}, {"start": 964.52, "end": 968.14, "word": "-rich", "probability": 0.67919921875}, {"start": 968.14, "end": 968.88, "word": " thrombus", "probability": 0.8792317708333334}, {"start": 968.88, "end": 971.22, "word": " طبعا", "probability": 0.945556640625}, {"start": 971.22, "end": 971.44, "word": " هذا", "probability": 0.68408203125}, {"start": 971.44, "end": 972.52, "word": " يعني", "probability": 0.93310546875}, {"start": 972.52, "end": 975.4, "word": " عوام", "probability": 0.8507486979166666}, {"start": 975.4, "end": 975.82, "word": " الكثيرة", "probability": 0.6803385416666666}, {"start": 975.82, "end": 976.28, "word": " بتدخل", "probability": 0.9703369140625}, {"start": 976.28, "end": 976.46, "word": " على", "probability": 0.921875}, {"start": 976.46, "end": 976.8, "word": " الخط", "probability": 0.988037109375}, {"start": 976.8, "end": 977.38, "word": " مسبب", "probability": 0.8284505208333334}, {"start": 977.38, "end": 977.84, "word": " لهذه", "probability": 0.910400390625}, {"start": 977.84, "end": 978.56, "word": " الظاهرة", "probability": 0.9248046875}, {"start": 978.56, "end": 979.26, "word": " منها", "probability": 0.87744140625}, {"start": 979.26, "end": 979.94, "word": " الموضوع", "probability": 0.79228515625}, {"start": 979.94, "end": 980.56, "word": " function", "probability": 0.390869140625}, {"start": 980.56, "end": 980.78, "word": " of", "probability": 0.9765625}, {"start": 980.78, "end": 981.08, "word": " age", "probability": 0.89697265625}, {"start": 981.08, "end": 981.36, "word": " يعني", "probability": 0.917236328125}, {"start": 981.36, "end": 981.82, "word": " الموضوع", "probability": 0.8828125}, {"start": 981.82, "end": 982.24, "word": " الأمر", "probability": 0.646240234375}, {"start": 982.24, "end": 982.52, "word": " اللي", "probability": 0.785400390625}, {"start": 982.52, "end": 982.96, "word": " داخل", "probability": 0.737060546875}, {"start": 982.96, "end": 983.6, "word": " biological", "probability": 0.849609375}, {"start": 983.6, "end": 984.36, "word": " condition", "probability": 0.96484375}], "temperature": 1.0}, {"id": 36, "seek": 101272, "start": 985.62, "end": 1012.72, "text": "genetic and environmental factors وارتباط هذه العوامل كلها ببعض ممكن يؤدي إلى تكوين الجلد طبعا من الاسم Venus thrombosis أو Venus thromboembolism قسموها إلى تلت أنواع أساسية أول نوع سموه deep venous thrombosis أو DVT والتاني palmonary embolism", "tokens": [1766, 3532, 293, 8303, 6771, 4032, 9640, 2655, 3555, 41193, 29538, 18863, 2407, 10943, 1211, 28242, 11296, 4724, 3555, 3615, 11242, 3714, 43020, 7251, 33604, 16254, 30731, 6055, 4117, 2407, 9957, 25724, 1211, 3215, 23032, 3555, 3615, 995, 9154, 2423, 32277, 2304, 23994, 739, 3548, 8211, 34051, 23994, 739, 298, 1763, 443, 17460, 1434, 12174, 38251, 2407, 11296, 30731, 6055, 1211, 2655, 14739, 14407, 3615, 5551, 3794, 32277, 10632, 5551, 12610, 8717, 45367, 8608, 2304, 2407, 3224, 2452, 6138, 563, 739, 3548, 8211, 34051, 17021, 51, 16070, 2655, 7649, 1829, 3984, 46386, 4605, 401, 1434], "avg_logprob": -0.2283528664459785, "compression_ratio": 1.5221238938053097, "no_speech_prob": 4.172325134277344e-07, "words": [{"start": 985.62, "end": 986.74, "word": "genetic", "probability": 0.6378173828125}, {"start": 986.74, "end": 987.14, "word": " and", "probability": 0.921875}, {"start": 987.14, "end": 987.72, "word": " environmental", "probability": 0.7412109375}, {"start": 987.72, "end": 988.46, "word": " factors", "probability": 0.9423828125}, {"start": 988.46, "end": 990.66, "word": " وارتباط", "probability": 0.84482421875}, {"start": 990.66, "end": 990.9, "word": " هذه", "probability": 0.89208984375}, {"start": 990.9, "end": 991.28, "word": " العوامل", "probability": 0.954833984375}, {"start": 991.28, "end": 991.62, "word": " كلها", "probability": 0.91552734375}, {"start": 991.62, "end": 992.56, "word": " ببعض", "probability": 0.822509765625}, {"start": 992.56, "end": 992.88, "word": " ممكن", "probability": 0.93603515625}, {"start": 992.88, "end": 993.24, "word": " يؤدي", "probability": 0.7052408854166666}, {"start": 993.24, "end": 993.64, "word": " إلى", "probability": 0.464111328125}, {"start": 993.64, "end": 994.8, "word": " تكوين", "probability": 0.905517578125}, {"start": 994.8, "end": 995.28, "word": " الجلد", "probability": 0.639404296875}, {"start": 995.28, "end": 996.6, "word": " طبعا", "probability": 0.918212890625}, {"start": 996.6, "end": 997.9, "word": " من", "probability": 0.73681640625}, {"start": 997.9, "end": 998.44, "word": " الاسم", "probability": 0.794677734375}, {"start": 998.44, "end": 999.6, "word": " Venus", "probability": 0.473876953125}, {"start": 999.6, "end": 1000.52, "word": " thrombosis", "probability": 0.7849934895833334}, {"start": 1000.52, "end": 1000.74, "word": " أو", "probability": 0.70263671875}, {"start": 1000.74, "end": 1001.12, "word": " Venus", "probability": 0.56201171875}, {"start": 1001.12, "end": 1002.26, "word": " thromboembolism", "probability": 0.8243815104166666}, {"start": 1002.26, "end": 1003.16, "word": " قسموها", "probability": 0.9464111328125}, {"start": 1003.16, "end": 1003.3, "word": " إلى", "probability": 0.89111328125}, {"start": 1003.3, "end": 1003.58, "word": " تلت", "probability": 0.8636067708333334}, {"start": 1003.58, "end": 1003.88, "word": " أنواع", "probability": 0.9373372395833334}, {"start": 1003.88, "end": 1004.54, "word": " أساسية", "probability": 0.9833984375}, {"start": 1004.54, "end": 1005.26, "word": " أول", "probability": 0.914794921875}, {"start": 1005.26, "end": 1005.52, "word": " نوع", "probability": 0.908447265625}, {"start": 1005.52, "end": 1005.96, "word": " سموه", "probability": 0.8980712890625}, {"start": 1005.96, "end": 1006.2, "word": " deep", "probability": 0.54736328125}, {"start": 1006.2, "end": 1006.68, "word": " venous", "probability": 0.6298828125}, {"start": 1006.68, "end": 1007.4, "word": " thrombosis", "probability": 0.97021484375}, {"start": 1007.4, "end": 1007.66, "word": " أو", "probability": 0.6923828125}, {"start": 1007.66, "end": 1008.6, "word": " DVT", "probability": 0.7271728515625}, {"start": 1008.6, "end": 1010.32, "word": " والتاني", "probability": 0.886962890625}, {"start": 1010.32, "end": 1011.08, "word": " palmonary", "probability": 0.5528564453125}, {"start": 1011.08, "end": 1012.72, "word": " embolism", "probability": 0.94580078125}], "temperature": 1.0}, {"id": 37, "seek": 104278, "start": 1016.58, "end": 1042.78, "text": "والتالت Superficial أو Portal أو Cerebral أو Retinal Vein Thrombosis أنواع مختلفة جدا حسب الموقع صاروا يسموها إيه حسب وين صارت الجنة طيب من الأسباب التي تستدعينا إلى عمل فحوصات Hemostatic Test أو Coagulation Test", "tokens": [2407, 6027, 2655, 6027, 2655, 4548, 1786, 831, 34051, 38281, 34051, 383, 323, 32728, 34051, 11495, 2071, 9706, 259, 334, 4397, 65, 8211, 14739, 14407, 3615, 3714, 46456, 46538, 3660, 10874, 28259, 11331, 35457, 9673, 30543, 3615, 20328, 9640, 14407, 7251, 38251, 2407, 11296, 11933, 1829, 3224, 11331, 35457, 4032, 9957, 20328, 9640, 2655, 25724, 1863, 3660, 23032, 1829, 3555, 9154, 16247, 35457, 16758, 38392, 6055, 14851, 3215, 3615, 1829, 8315, 30731, 6225, 42213, 6156, 5016, 2407, 9381, 9307, 18568, 555, 2399, 9279, 34051, 3066, 559, 2776, 9279], "avg_logprob": -0.1892556179775281, "compression_ratio": 1.4383561643835616, "no_speech_prob": 0.0, "words": [{"start": 1016.58, "end": 1017.24, "word": "والتالت", "probability": 0.81162109375}, {"start": 1017.24, "end": 1018.12, "word": " Superficial", "probability": 0.7362467447916666}, {"start": 1018.12, "end": 1019.32, "word": " أو", "probability": 0.80859375}, {"start": 1019.32, "end": 1019.9, "word": " Portal", "probability": 0.513671875}, {"start": 1019.9, "end": 1020.96, "word": " أو", "probability": 0.9091796875}, {"start": 1020.96, "end": 1021.62, "word": " Cerebral", "probability": 0.8408203125}, {"start": 1021.62, "end": 1022.72, "word": " أو", "probability": 0.92431640625}, {"start": 1022.72, "end": 1023.52, "word": " Retinal", "probability": 0.967529296875}, {"start": 1023.52, "end": 1024.94, "word": " Vein", "probability": 0.7239990234375}, {"start": 1024.94, "end": 1025.58, "word": " Thrombosis", "probability": 0.79010009765625}, {"start": 1025.58, "end": 1025.9, "word": " أنواع", "probability": 0.7989908854166666}, {"start": 1025.9, "end": 1026.54, "word": " مختلفة", "probability": 0.9947509765625}, {"start": 1026.54, "end": 1026.78, "word": " جدا", "probability": 0.876953125}, {"start": 1026.78, "end": 1027.14, "word": " حسب", "probability": 0.9609375}, {"start": 1027.14, "end": 1027.82, "word": " الموقع", "probability": 0.9939778645833334}, {"start": 1027.82, "end": 1028.68, "word": " صاروا", "probability": 0.8271484375}, {"start": 1028.68, "end": 1029.1, "word": " يسموها", "probability": 0.951904296875}, {"start": 1029.1, "end": 1029.36, "word": " إيه", "probability": 0.806884765625}, {"start": 1029.36, "end": 1030.38, "word": " حسب", "probability": 0.7327880859375}, {"start": 1030.38, "end": 1030.94, "word": " وين", "probability": 0.88037109375}, {"start": 1030.94, "end": 1031.24, "word": " صارت", "probability": 0.8567708333333334}, {"start": 1031.24, "end": 1031.66, "word": " الجنة", "probability": 0.7017415364583334}, {"start": 1031.66, "end": 1033.54, "word": " طيب", "probability": 0.9129231770833334}, {"start": 1033.54, "end": 1033.76, "word": " من", "probability": 0.876953125}, {"start": 1033.76, "end": 1034.4, "word": " الأسباب", "probability": 0.9912109375}, {"start": 1034.4, "end": 1034.78, "word": " التي", "probability": 0.77734375}, {"start": 1034.78, "end": 1036.0, "word": " تستدعينا", "probability": 0.826416015625}, {"start": 1036.0, "end": 1036.58, "word": " إلى", "probability": 0.90771484375}, {"start": 1036.58, "end": 1037.48, "word": " عمل", "probability": 0.994140625}, {"start": 1037.48, "end": 1038.54, "word": " فحوصات", "probability": 0.9525390625}, {"start": 1038.54, "end": 1041.34, "word": " Hemostatic", "probability": 0.86865234375}, {"start": 1041.34, "end": 1041.64, "word": " Test", "probability": 0.63623046875}, {"start": 1041.64, "end": 1041.82, "word": " أو", "probability": 0.61376953125}, {"start": 1041.82, "end": 1042.46, "word": " Coagulation", "probability": 0.7801106770833334}, {"start": 1042.46, "end": 1042.78, "word": " Test", "probability": 0.88525390625}], "temperature": 1.0}, {"id": 38, "seek": 107157, "start": 1044.41, "end": 1071.57, "text": "إنه من الأسباب، وأسباب كثيرة، بس أكتر حاجة ممكن واحد يشوق فيها، إنه لو واحد إجته جلطة سابقة و تكررت، بدك تعمله فحوصات و إتابة ولا لأ؟ لأ دي و لا أكيد، نمرة اتنين بتعمل فحوصات عشان تحد الإيلاش ولا لأ؟ نمرة تلاتة بتعمل فحوصات for genetic counseling واحد في العيلة عنده، اتنين انصابه بالجلطة، بتعملش فحوصات لباقي العيلة؟", "tokens": [28814, 1863, 3224, 9154, 16247, 35457, 16758, 12399, 36725, 35457, 16758, 9122, 12984, 48923, 12399, 4724, 3794, 5551, 4117, 2655, 2288, 11331, 26108, 3660, 3714, 43020, 36764, 24401, 7251, 8592, 30543, 8978, 11296, 12399, 36145, 3224, 45164, 36764, 24401, 11933, 7435, 47395, 10874, 1211, 9566, 3660, 8608, 16758, 28671, 4032, 6055, 37983, 43500, 12399, 47525, 4117, 6055, 25957, 43761, 6156, 5016, 2407, 9381, 9307, 4032, 11933, 2655, 16758, 3660, 49429, 5296, 10721, 22807, 5296, 10721, 11778, 1829, 4032, 20193, 5551, 4117, 25708, 12399, 8717, 2304, 25720, 1975, 2655, 1863, 9957, 39894, 25957, 1211, 6156, 5016, 2407, 9381, 9307, 6225, 8592, 7649, 6055, 24401, 2423, 28814, 26895, 33599, 49429, 5296, 10721, 22807, 8717, 2304, 25720, 6055, 1211, 9307, 3660, 39894, 25957, 1211, 6156, 5016, 2407, 9381, 9307, 337, 12462, 23889, 36764, 24401, 8978, 18863, 26895, 3660, 43242, 3224, 12399, 1975, 2655, 1863, 9957, 16472, 9381, 16758, 3224, 20666, 7435, 1211, 9566, 3660, 12399, 39894, 25957, 1211, 8592, 6156, 5016, 2407, 9381, 9307, 5296, 3555, 995, 38436, 18863, 26895, 3660, 22807], "avg_logprob": -0.1972426496884402, "compression_ratio": 1.9716312056737588, "no_speech_prob": 6.556510925292969e-07, "words": [{"start": 1044.41, "end": 1044.83, "word": "إنه", "probability": 0.7957356770833334}, {"start": 1044.83, "end": 1045.21, "word": " من", "probability": 0.9072265625}, {"start": 1045.21, "end": 1045.75, "word": " الأسباب،", "probability": 0.798309326171875}, {"start": 1045.75, "end": 1046.05, "word": " وأسباب", "probability": 0.8157552083333334}, {"start": 1046.05, "end": 1046.47, "word": " كثيرة،", "probability": 0.8603515625}, {"start": 1046.47, "end": 1046.59, "word": " بس", "probability": 0.96337890625}, {"start": 1046.59, "end": 1046.95, "word": " أكتر", "probability": 0.9752197265625}, {"start": 1046.95, "end": 1047.49, "word": " حاجة", "probability": 0.93701171875}, {"start": 1047.49, "end": 1047.75, "word": " ممكن", "probability": 0.974853515625}, {"start": 1047.75, "end": 1047.99, "word": " واحد", "probability": 0.92822265625}, {"start": 1047.99, "end": 1048.25, "word": " يشوق", "probability": 0.79345703125}, {"start": 1048.25, "end": 1049.11, "word": " فيها،", "probability": 0.7900390625}, {"start": 1049.11, "end": 1049.33, "word": " إنه", "probability": 0.83056640625}, {"start": 1049.33, "end": 1049.43, "word": " لو", "probability": 0.9462890625}, {"start": 1049.43, "end": 1050.25, "word": " واحد", "probability": 0.983642578125}, {"start": 1050.25, "end": 1050.75, "word": " إجته", "probability": 0.6455078125}, {"start": 1050.75, "end": 1051.13, "word": " جلطة", "probability": 0.990966796875}, {"start": 1051.13, "end": 1051.65, "word": " سابقة", "probability": 0.96826171875}, {"start": 1051.65, "end": 1051.83, "word": " و", "probability": 0.64892578125}, {"start": 1051.83, "end": 1052.91, "word": " تكررت،", "probability": 0.88623046875}, {"start": 1052.91, "end": 1053.09, "word": " بدك", "probability": 0.79541015625}, {"start": 1053.09, "end": 1053.45, "word": " تعمله", "probability": 0.9503580729166666}, {"start": 1053.45, "end": 1053.89, "word": " فحوصات", "probability": 0.821728515625}, {"start": 1053.89, "end": 1054.01, "word": " و", "probability": 0.79248046875}, {"start": 1054.01, "end": 1054.29, "word": " إتابة", "probability": 0.76580810546875}, {"start": 1054.29, "end": 1054.41, "word": " ولا", "probability": 0.64990234375}, {"start": 1054.41, "end": 1054.89, "word": " لأ؟", "probability": 0.82275390625}, {"start": 1054.89, "end": 1055.71, "word": " لأ", "probability": 0.6282958984375}, {"start": 1055.71, "end": 1055.79, "word": " دي", "probability": 0.931884765625}, {"start": 1055.79, "end": 1055.87, "word": " و", "probability": 0.34521484375}, {"start": 1055.87, "end": 1055.95, "word": " لا", "probability": 0.2174072265625}, {"start": 1055.95, "end": 1056.75, "word": " أكيد،", "probability": 0.856201171875}, {"start": 1056.75, "end": 1056.93, "word": " نمرة", "probability": 0.7047526041666666}, {"start": 1056.93, "end": 1057.33, "word": " اتنين", "probability": 0.9034423828125}, {"start": 1057.33, "end": 1058.13, "word": " بتعمل", "probability": 0.87158203125}, {"start": 1058.13, "end": 1058.61, "word": " فحوصات", "probability": 0.99560546875}, {"start": 1058.61, "end": 1058.97, "word": " عشان", "probability": 0.97802734375}, {"start": 1058.97, "end": 1059.31, "word": " تحد", "probability": 0.952880859375}, {"start": 1059.31, "end": 1059.87, "word": " الإيلاش", "probability": 0.36688232421875}, {"start": 1059.87, "end": 1060.59, "word": " ولا", "probability": 0.446533203125}, {"start": 1060.59, "end": 1061.49, "word": " لأ؟", "probability": 0.8077799479166666}, {"start": 1061.49, "end": 1061.85, "word": " نمرة", "probability": 0.9645182291666666}, {"start": 1061.85, "end": 1062.83, "word": " تلاتة", "probability": 0.9725341796875}, {"start": 1062.83, "end": 1063.33, "word": " بتعمل", "probability": 0.91015625}, {"start": 1063.33, "end": 1063.99, "word": " فحوصات", "probability": 0.9978515625}, {"start": 1063.99, "end": 1064.19, "word": " for", "probability": 0.955078125}, {"start": 1064.19, "end": 1064.61, "word": " genetic", "probability": 0.7978515625}, {"start": 1064.61, "end": 1065.25, "word": " counseling", "probability": 0.72802734375}, {"start": 1065.25, "end": 1066.47, "word": " واحد", "probability": 0.828125}, {"start": 1066.47, "end": 1066.65, "word": " في", "probability": 0.974609375}, {"start": 1066.65, "end": 1066.99, "word": " العيلة", "probability": 0.9554036458333334}, {"start": 1066.99, "end": 1067.51, "word": " عنده،", "probability": 0.9212239583333334}, {"start": 1067.51, "end": 1067.81, "word": " اتنين", "probability": 0.942626953125}, {"start": 1067.81, "end": 1068.49, "word": " انصابه", "probability": 0.7291259765625}, {"start": 1068.49, "end": 1069.63, "word": " بالجلطة،", "probability": 0.9358723958333334}, {"start": 1069.63, "end": 1070.05, "word": " بتعملش", "probability": 0.79638671875}, {"start": 1070.05, "end": 1070.45, "word": " فحوصات", "probability": 0.997265625}, {"start": 1070.45, "end": 1070.73, "word": " لباقي", "probability": 0.81396484375}, {"start": 1070.73, "end": 1071.57, "word": " العيلة؟", "probability": 0.9527587890625}], "temperature": 1.0}, {"id": 39, "seek": 109939, "start": 1082.15, "end": 1099.39, "text": "هو عبارة عن Screening ممكن تشوفه بشكل عام ك most common disease ال most common is DBT أشهر أنواع الشباب هو ال DBT، DB-Nephroposis يحصل في ال lower limb", "tokens": [3224, 2407, 6225, 3555, 9640, 3660, 18871, 25823, 278, 3714, 43020, 6055, 8592, 38688, 3224, 4724, 8592, 28820, 6225, 10943, 9122, 881, 2689, 4752, 2423, 881, 2689, 307, 26754, 51, 5551, 8592, 3224, 2288, 14739, 14407, 3615, 25124, 3555, 16758, 31439, 2423, 26754, 51, 12399, 26754, 12, 45, 595, 71, 1513, 8211, 7251, 5016, 36520, 8978, 2423, 3126, 30390], "avg_logprob": -0.49661456843217217, "compression_ratio": 1.2469879518072289, "no_speech_prob": 5.0067901611328125e-06, "words": [{"start": 1082.15, "end": 1082.99, "word": "هو", "probability": 0.296051025390625}, {"start": 1082.99, "end": 1083.83, "word": " عبارة", "probability": 0.7273406982421875}, {"start": 1083.83, "end": 1083.95, "word": " عن", "probability": 0.95947265625}, {"start": 1083.95, "end": 1084.45, "word": " Screening", "probability": 0.54119873046875}, {"start": 1084.45, "end": 1084.75, "word": " ممكن", "probability": 0.750732421875}, {"start": 1084.75, "end": 1085.73, "word": " تشوفه", "probability": 0.966796875}, {"start": 1085.73, "end": 1086.27, "word": " بشكل", "probability": 0.9386393229166666}, {"start": 1086.27, "end": 1087.07, "word": " عام", "probability": 0.987548828125}, {"start": 1087.07, "end": 1087.73, "word": " ك", "probability": 0.9462890625}, {"start": 1087.73, "end": 1087.97, "word": " most", "probability": 0.314697265625}, {"start": 1087.97, "end": 1088.41, "word": " common", "probability": 0.93994140625}, {"start": 1088.41, "end": 1089.57, "word": " disease", "probability": 0.849609375}, {"start": 1089.57, "end": 1091.29, "word": " ال", "probability": 0.24267578125}, {"start": 1091.29, "end": 1091.51, "word": " most", "probability": 0.59375}, {"start": 1091.51, "end": 1091.89, "word": " common", "probability": 0.9560546875}, {"start": 1091.89, "end": 1092.25, "word": " is", "probability": 0.7080078125}, {"start": 1092.25, "end": 1092.93, "word": " DBT", "probability": 0.875}, {"start": 1092.93, "end": 1094.63, "word": " أشهر", "probability": 0.881103515625}, {"start": 1094.63, "end": 1094.97, "word": " أنواع", "probability": 0.7444661458333334}, {"start": 1094.97, "end": 1095.37, "word": " الشباب", "probability": 0.87548828125}, {"start": 1095.37, "end": 1095.65, "word": " هو", "probability": 0.8701171875}, {"start": 1095.65, "end": 1095.79, "word": " ال", "probability": 0.52734375}, {"start": 1095.79, "end": 1096.35, "word": " DBT،", "probability": 0.5927734375}, {"start": 1096.35, "end": 1096.49, "word": " DB", "probability": 0.390625}, {"start": 1096.49, "end": 1097.35, "word": "-Nephroposis", "probability": 0.4105631510416667}, {"start": 1097.35, "end": 1098.59, "word": " يحصل", "probability": 0.6686197916666666}, {"start": 1098.59, "end": 1098.71, "word": " في", "probability": 0.96142578125}, {"start": 1098.71, "end": 1098.83, "word": " ال", "probability": 0.83642578125}, {"start": 1098.83, "end": 1098.99, "word": " lower", "probability": 0.70849609375}, {"start": 1098.99, "end": 1099.39, "word": " limb", "probability": 0.91015625}], "temperature": 1.0}, {"id": 40, "seek": 113077, "start": 1102.71, "end": 1130.77, "text": "ال .. ال .. ال .. يعني بشكل أساسي و غالبا في ال lower limb بيكون فيه stasis أو low في ال blood flow، صح؟ خصوص بيكون فيه ركود في الدم، ركود في الدم، خصوص الواحد ما بتحركش، واحد قاعد فترة طويلة، واحد نايم بشكل دائم، واحد عامل عملية وقعد فترة طويلة في الفرار", "tokens": [6027, 4386, 2423, 4386, 2423, 4386, 37495, 22653, 4724, 8592, 28820, 5551, 3794, 32277, 1829, 4032, 32771, 6027, 3555, 995, 8978, 2423, 3126, 30390, 4724, 1829, 30544, 8978, 3224, 342, 26632, 34051, 2295, 8978, 2423, 3390, 3095, 12399, 20328, 5016, 22807, 16490, 9381, 2407, 9381, 4724, 1829, 30544, 8978, 3224, 12602, 4117, 23328, 8978, 32748, 2304, 12399, 12602, 4117, 23328, 8978, 32748, 2304, 12399, 16490, 9381, 2407, 9381, 2423, 14407, 24401, 19446, 39894, 5016, 31747, 8592, 12399, 36764, 24401, 12174, 995, 22488, 6156, 2655, 25720, 23032, 2407, 26895, 3660, 12399, 36764, 24401, 8717, 995, 32640, 4724, 8592, 28820, 11778, 16373, 2304, 12399, 36764, 24401, 6225, 10943, 1211, 6225, 42213, 10632, 4032, 4587, 22488, 6156, 2655, 25720, 23032, 2407, 26895, 3660, 8978, 27188, 2288, 9640], "avg_logprob": -0.14112499475479126, "compression_ratio": 1.9813084112149533, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 1102.71, "end": 1103.11, "word": "ال", "probability": 0.37548828125}, {"start": 1103.11, "end": 1103.33, "word": " ..", "probability": 0.701171875}, {"start": 1103.33, "end": 1103.53, "word": " ال", "probability": 0.55908203125}, {"start": 1103.53, "end": 1103.57, "word": " ..", "probability": 0.984375}, {"start": 1103.57, "end": 1103.69, "word": " ال", "probability": 0.82958984375}, {"start": 1103.69, "end": 1103.85, "word": " ..", "probability": 0.92529296875}, {"start": 1103.85, "end": 1104.61, "word": " يعني", "probability": 0.871826171875}, {"start": 1104.61, "end": 1104.97, "word": " بشكل", "probability": 0.9890950520833334}, {"start": 1104.97, "end": 1105.55, "word": " أساسي", "probability": 0.9454345703125}, {"start": 1105.55, "end": 1106.41, "word": " و", "probability": 0.93505859375}, {"start": 1106.41, "end": 1107.07, "word": " غالبا", "probability": 0.95361328125}, {"start": 1107.07, "end": 1107.21, "word": " في", "probability": 0.81982421875}, {"start": 1107.21, "end": 1107.33, "word": " ال", "probability": 0.845703125}, {"start": 1107.33, "end": 1107.53, "word": " lower", "probability": 0.76318359375}, {"start": 1107.53, "end": 1107.85, "word": " limb", "probability": 0.9384765625}, {"start": 1107.85, "end": 1108.23, "word": " بيكون", "probability": 0.87158203125}, {"start": 1108.23, "end": 1108.51, "word": " فيه", "probability": 0.88427734375}, {"start": 1108.51, "end": 1109.29, "word": " stasis", "probability": 0.7998046875}, {"start": 1109.29, "end": 1110.33, "word": " أو", "probability": 0.72216796875}, {"start": 1110.33, "end": 1110.75, "word": " low", "probability": 0.9755859375}, {"start": 1110.75, "end": 1111.01, "word": " في", "probability": 0.94970703125}, {"start": 1111.01, "end": 1111.09, "word": " ال", "probability": 0.7919921875}, {"start": 1111.09, "end": 1111.39, "word": " blood", "probability": 0.98095703125}, {"start": 1111.39, "end": 1112.29, "word": " flow،", "probability": 0.58203125}, {"start": 1112.29, "end": 1113.47, "word": " صح؟", "probability": 0.96044921875}, {"start": 1113.47, "end": 1113.71, "word": " خصوص", "probability": 0.785491943359375}, {"start": 1113.71, "end": 1114.01, "word": " بيكون", "probability": 0.89208984375}, {"start": 1114.01, "end": 1114.27, "word": " فيه", "probability": 0.919921875}, {"start": 1114.27, "end": 1114.79, "word": " ركود", "probability": 0.86181640625}, {"start": 1114.79, "end": 1115.03, "word": " في", "probability": 0.97900390625}, {"start": 1115.03, "end": 1115.97, "word": " الدم،", "probability": 0.95361328125}, {"start": 1115.97, "end": 1116.49, "word": " ركود", "probability": 0.9866536458333334}, {"start": 1116.49, "end": 1116.67, "word": " في", "probability": 0.9921875}, {"start": 1116.67, "end": 1117.63, "word": " الدم،", "probability": 0.9186197916666666}, {"start": 1117.63, "end": 1117.85, "word": " خصوص", "probability": 0.984130859375}, {"start": 1117.85, "end": 1118.25, "word": " الواحد", "probability": 0.7337239583333334}, {"start": 1118.25, "end": 1118.49, "word": " ما", "probability": 0.841796875}, {"start": 1118.49, "end": 1119.37, "word": " بتحركش،", "probability": 0.84658203125}, {"start": 1119.37, "end": 1119.55, "word": " واحد", "probability": 0.922119140625}, {"start": 1119.55, "end": 1119.87, "word": " قاعد", "probability": 0.85595703125}, {"start": 1119.87, "end": 1120.21, "word": " فترة", "probability": 0.994140625}, {"start": 1120.21, "end": 1121.31, "word": " طويلة،", "probability": 0.92998046875}, {"start": 1121.31, "end": 1121.91, "word": " واحد", "probability": 0.946533203125}, {"start": 1121.91, "end": 1126.73, "word": " نايم", "probability": 0.8180338541666666}, {"start": 1126.73, "end": 1127.17, "word": " بشكل", "probability": 0.9977213541666666}, {"start": 1127.17, "end": 1128.45, "word": " دائم،", "probability": 0.9874267578125}, {"start": 1128.45, "end": 1128.63, "word": " واحد", "probability": 0.96923828125}, {"start": 1128.63, "end": 1128.91, "word": " عامل", "probability": 0.88818359375}, {"start": 1128.91, "end": 1129.29, "word": " عملية", "probability": 0.9718424479166666}, {"start": 1129.29, "end": 1129.59, "word": " وقعد", "probability": 0.7584635416666666}, {"start": 1129.59, "end": 1129.87, "word": " فترة", "probability": 0.99169921875}, {"start": 1129.87, "end": 1130.21, "word": " طويلة", "probability": 0.9334716796875}, {"start": 1130.21, "end": 1130.33, "word": " في", "probability": 0.91455078125}, {"start": 1130.33, "end": 1130.77, "word": " الفرار", "probability": 0.96826171875}], "temperature": 1.0}, {"id": 41, "seek": 115110, "start": 1131.35, "end": 1151.11, "text": "هذه كلها بتؤدي لإنه يصير فيه low في ال blood flow و بيصير فيه stasis و خصوصا فيه في الأطرا خصوصا فيه في الأطرا طبعا يا جماعة بتبدأ مازال فيه stasis معناته فيه stagnation", "tokens": [3224, 24192, 28242, 11296, 39894, 33604, 16254, 5296, 28814, 1863, 3224, 7251, 9381, 13546, 8978, 3224, 2295, 8978, 2423, 3390, 3095, 4032, 4724, 1829, 9381, 13546, 8978, 3224, 342, 26632, 4032, 16490, 9381, 2407, 9381, 995, 8978, 3224, 8978, 16247, 9566, 23557, 16490, 9381, 2407, 9381, 995, 8978, 3224, 8978, 16247, 9566, 23557, 23032, 3555, 3615, 995, 35186, 10874, 15042, 27884, 39894, 44510, 10721, 3714, 31377, 6027, 8978, 3224, 342, 26632, 20449, 8315, 47395, 8978, 3224, 32853, 399], "avg_logprob": -0.2658227983909317, "compression_ratio": 1.7371794871794872, "no_speech_prob": 0.0, "words": [{"start": 1131.35, "end": 1131.67, "word": "هذه", "probability": 0.68408203125}, {"start": 1131.67, "end": 1131.99, "word": " كلها", "probability": 0.7802734375}, {"start": 1131.99, "end": 1132.49, "word": " بتؤدي", "probability": 0.5736083984375}, {"start": 1132.49, "end": 1133.79, "word": " لإنه", "probability": 0.688385009765625}, {"start": 1133.79, "end": 1134.55, "word": " يصير", "probability": 0.7609049479166666}, {"start": 1134.55, "end": 1134.89, "word": " فيه", "probability": 0.837158203125}, {"start": 1134.89, "end": 1135.19, "word": " low", "probability": 0.57568359375}, {"start": 1135.19, "end": 1135.79, "word": " في", "probability": 0.765625}, {"start": 1135.79, "end": 1135.87, "word": " ال", "probability": 0.443115234375}, {"start": 1135.87, "end": 1136.17, "word": " blood", "probability": 0.953125}, {"start": 1136.17, "end": 1136.67, "word": " flow", "probability": 0.8720703125}, {"start": 1136.67, "end": 1137.49, "word": " و", "probability": 0.5}, {"start": 1137.49, "end": 1137.79, "word": " بيصير", "probability": 0.7625732421875}, {"start": 1137.79, "end": 1138.07, "word": " فيه", "probability": 0.949951171875}, {"start": 1138.07, "end": 1138.67, "word": " stasis", "probability": 0.74658203125}, {"start": 1138.67, "end": 1138.87, "word": " و", "probability": 0.9404296875}, {"start": 1138.87, "end": 1139.35, "word": " خصوصا", "probability": 0.9607421875}, {"start": 1139.35, "end": 1139.61, "word": " فيه", "probability": 0.3924560546875}, {"start": 1139.61, "end": 1140.09, "word": " في", "probability": 0.45751953125}, {"start": 1140.09, "end": 1140.65, "word": " الأطرا", "probability": 0.88671875}, {"start": 1140.65, "end": 1141.33, "word": " خصوصا", "probability": 0.887939453125}, {"start": 1141.33, "end": 1141.77, "word": " فيه", "probability": 0.905029296875}, {"start": 1141.77, "end": 1142.33, "word": " في", "probability": 0.9111328125}, {"start": 1142.33, "end": 1142.77, "word": " الأطرا", "probability": 0.9820963541666666}, {"start": 1142.77, "end": 1144.61, "word": " طبعا", "probability": 0.9697265625}, {"start": 1144.61, "end": 1144.75, "word": " يا", "probability": 0.8310546875}, {"start": 1144.75, "end": 1145.01, "word": " جماعة", "probability": 0.7965494791666666}, {"start": 1145.01, "end": 1147.03, "word": " بتبدأ", "probability": 0.833984375}, {"start": 1147.03, "end": 1147.87, "word": " مازال", "probability": 0.6609700520833334}, {"start": 1147.87, "end": 1148.09, "word": " فيه", "probability": 0.956787109375}, {"start": 1148.09, "end": 1148.67, "word": " stasis", "probability": 0.880126953125}, {"start": 1148.67, "end": 1150.05, "word": " معناته", "probability": 0.76220703125}, {"start": 1150.05, "end": 1150.31, "word": " فيه", "probability": 0.860107421875}, {"start": 1150.31, "end": 1151.11, "word": " stagnation", "probability": 0.97802734375}], "temperature": 1.0}, {"id": 42, "seek": 117845, "start": 1152.53, "end": 1178.45, "text": "معناته في activation لل coagulation cascade mechanism معناته في رد thrombus formation لأنه بنحكي على VEN وزي ما قلتلكوا بتبنى بناء وممكن تكون كبيرة جزء منها يصير ب dislodging ويروح إلى مكان أخر ويعمل pulmonary embolism ماشي من أعراض ال pulmonary embolism", "tokens": [2304, 3615, 8315, 47395, 8978, 24433, 24976, 598, 559, 2776, 50080, 7513, 20449, 8315, 47395, 8978, 12602, 3215, 739, 3548, 301, 11723, 5296, 33456, 3224, 44945, 5016, 4117, 1829, 15844, 691, 2195, 4032, 11622, 1829, 19446, 12174, 1211, 2655, 23275, 14407, 39894, 3555, 1863, 7578, 44945, 16606, 4032, 2304, 43020, 6055, 30544, 9122, 3555, 48923, 10874, 11622, 38207, 9154, 11296, 7251, 9381, 13546, 4724, 43186, 378, 3249, 4032, 13546, 2407, 5016, 30731, 3714, 41361, 5551, 34740, 4032, 1829, 25957, 1211, 8331, 46386, 4605, 401, 1434, 3714, 33599, 1829, 9154, 5551, 3615, 2288, 46958, 2423, 8331, 46386, 4605, 401, 1434], "avg_logprob": -0.2514062511920929, "compression_ratio": 1.525, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1152.53, "end": 1153.01, "word": "معناته", "probability": 0.85009765625}, {"start": 1153.01, "end": 1153.31, "word": " في", "probability": 0.9443359375}, {"start": 1153.31, "end": 1154.19, "word": " activation", "probability": 0.9228515625}, {"start": 1154.19, "end": 1154.47, "word": " لل", "probability": 0.50927734375}, {"start": 1154.47, "end": 1155.05, "word": " coagulation", "probability": 0.7191975911458334}, {"start": 1155.05, "end": 1155.53, "word": " cascade", "probability": 0.81201171875}, {"start": 1155.53, "end": 1156.23, "word": " mechanism", "probability": 0.697265625}, {"start": 1156.23, "end": 1157.19, "word": " معناته", "probability": 0.7776692708333334}, {"start": 1157.19, "end": 1157.43, "word": " في", "probability": 0.95654296875}, {"start": 1157.43, "end": 1158.55, "word": " رد", "probability": 0.750244140625}, {"start": 1158.55, "end": 1159.11, "word": " thrombus", "probability": 0.8463541666666666}, {"start": 1159.11, "end": 1159.65, "word": " formation", "probability": 0.966796875}, {"start": 1159.65, "end": 1160.27, "word": " لأنه", "probability": 0.483154296875}, {"start": 1160.27, "end": 1160.57, "word": " بنحكي", "probability": 0.860107421875}, {"start": 1160.57, "end": 1160.73, "word": " على", "probability": 0.78125}, {"start": 1160.73, "end": 1161.33, "word": " VEN", "probability": 0.2047119140625}, {"start": 1161.33, "end": 1162.37, "word": " وزي", "probability": 0.8098958333333334}, {"start": 1162.37, "end": 1162.49, "word": " ما", "probability": 0.986328125}, {"start": 1162.49, "end": 1162.87, "word": " قلتلكوا", "probability": 0.87529296875}, {"start": 1162.87, "end": 1163.53, "word": " بتبنى", "probability": 0.8248291015625}, {"start": 1163.53, "end": 1164.21, "word": " بناء", "probability": 0.899169921875}, {"start": 1164.21, "end": 1165.07, "word": " وممكن", "probability": 0.8870442708333334}, {"start": 1165.07, "end": 1165.35, "word": " تكون", "probability": 0.99072265625}, {"start": 1165.35, "end": 1165.77, "word": " كبيرة", "probability": 0.92822265625}, {"start": 1165.77, "end": 1166.11, "word": " جزء", "probability": 0.87060546875}, {"start": 1166.11, "end": 1166.49, "word": " منها", "probability": 0.989990234375}, {"start": 1166.49, "end": 1166.77, "word": " يصير", "probability": 0.7517903645833334}, {"start": 1166.77, "end": 1166.87, "word": " ب", "probability": 0.1146240234375}, {"start": 1166.87, "end": 1167.81, "word": " dislodging", "probability": 0.771484375}, {"start": 1167.81, "end": 1168.49, "word": " ويروح", "probability": 0.9478759765625}, {"start": 1168.49, "end": 1168.67, "word": " إلى", "probability": 0.67822265625}, {"start": 1168.67, "end": 1169.07, "word": " مكان", "probability": 0.988525390625}, {"start": 1169.07, "end": 1169.55, "word": " أخر", "probability": 0.71435546875}, {"start": 1169.55, "end": 1170.57, "word": " ويعمل", "probability": 0.9212646484375}, {"start": 1170.57, "end": 1171.05, "word": " pulmonary", "probability": 0.68994140625}, {"start": 1171.05, "end": 1171.93, "word": " embolism", "probability": 0.94775390625}, {"start": 1171.93, "end": 1174.35, "word": " ماشي", "probability": 0.84033203125}, {"start": 1174.35, "end": 1175.67, "word": " من", "probability": 0.488525390625}, {"start": 1175.67, "end": 1177.17, "word": " أعراض", "probability": 0.9637451171875}, {"start": 1177.17, "end": 1177.33, "word": " ال", "probability": 0.787109375}, {"start": 1177.33, "end": 1177.79, "word": " pulmonary", "probability": 0.84033203125}, {"start": 1177.79, "end": 1178.45, "word": " embolism", "probability": 0.9763997395833334}], "temperature": 1.0}, {"id": 43, "seek": 120517, "start": 1179.51, "end": 1205.17, "text": "انه يبدأ with acute chest pain ثم breathlessness with shock فبطل يقدر يتنفس ممكن يخش فيه في shock cough and hemopsis يعني ممكن يكون سعر فظيع جدا مصحوب الدم", "tokens": [7649, 3224, 7251, 44510, 10721, 365, 24390, 7443, 1822, 38637, 2304, 6045, 26663, 365, 5588, 6156, 3555, 9566, 1211, 7251, 28543, 2288, 7251, 2655, 1863, 36178, 3714, 43020, 7251, 9778, 8592, 8978, 3224, 8978, 5588, 22777, 293, 8636, 3370, 271, 37495, 22653, 3714, 43020, 7251, 30544, 8608, 3615, 2288, 6156, 19913, 40228, 10874, 28259, 3714, 9381, 5016, 37746, 32748, 2304], "avg_logprob": -0.2415471243076637, "compression_ratio": 1.3253012048192772, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1179.51, "end": 1179.81, "word": "انه", "probability": 0.5140380859375}, {"start": 1179.81, "end": 1180.39, "word": " يبدأ", "probability": 0.9900716145833334}, {"start": 1180.39, "end": 1180.87, "word": " with", "probability": 0.7685546875}, {"start": 1180.87, "end": 1181.73, "word": " acute", "probability": 0.9873046875}, {"start": 1181.73, "end": 1182.75, "word": " chest", "probability": 0.83935546875}, {"start": 1182.75, "end": 1184.09, "word": " pain", "probability": 0.8984375}, {"start": 1184.09, "end": 1186.75, "word": " ثم", "probability": 0.889892578125}, {"start": 1186.75, "end": 1187.59, "word": " breathlessness", "probability": 0.68505859375}, {"start": 1187.59, "end": 1189.07, "word": " with", "probability": 0.89306640625}, {"start": 1189.07, "end": 1189.63, "word": " shock", "probability": 0.8095703125}, {"start": 1189.63, "end": 1190.85, "word": " فبطل", "probability": 0.654296875}, {"start": 1190.85, "end": 1191.09, "word": " يقدر", "probability": 0.9869791666666666}, {"start": 1191.09, "end": 1191.67, "word": " يتنفس", "probability": 0.9775390625}, {"start": 1191.67, "end": 1192.63, "word": " ممكن", "probability": 0.84814453125}, {"start": 1192.63, "end": 1193.01, "word": " يخش", "probability": 0.8942057291666666}, {"start": 1193.01, "end": 1193.47, "word": " فيه", "probability": 0.795166015625}, {"start": 1193.47, "end": 1194.47, "word": " في", "probability": 0.445556640625}, {"start": 1194.47, "end": 1194.89, "word": " shock", "probability": 0.79443359375}, {"start": 1194.89, "end": 1196.41, "word": " cough", "probability": 0.365234375}, {"start": 1196.41, "end": 1196.97, "word": " and", "probability": 0.93701171875}, {"start": 1196.97, "end": 1198.21, "word": " hemopsis", "probability": 0.7119140625}, {"start": 1198.21, "end": 1199.71, "word": " يعني", "probability": 0.901123046875}, {"start": 1199.71, "end": 1200.35, "word": " ممكن", "probability": 0.888671875}, {"start": 1200.35, "end": 1201.57, "word": " يكون", "probability": 0.985107421875}, {"start": 1201.57, "end": 1202.89, "word": " سعر", "probability": 0.6544596354166666}, {"start": 1202.89, "end": 1203.27, "word": " فظيع", "probability": 0.8131510416666666}, {"start": 1203.27, "end": 1203.67, "word": " جدا", "probability": 0.984619140625}, {"start": 1203.67, "end": 1204.73, "word": " مصحوب", "probability": 0.849365234375}, {"start": 1204.73, "end": 1205.17, "word": " الدم", "probability": 0.783447265625}], "temperature": 1.0}, {"id": 44, "seek": 123417, "start": 1207.37, "end": 1234.17, "text": "and it could be or maybe fata maybe fata وإزاي ما أنتوا شايفين برضه من مظاهرها بشكل عام في pain في swelling في discoloration في warmth in the affected area لكن هذه مظاهر غير خاص يعني عامة لإن هو مشترك مع كل ال", "tokens": [474, 309, 727, 312, 420, 1310, 283, 3274, 1310, 283, 3274, 4032, 28814, 11622, 47302, 19446, 14739, 2655, 14407, 13412, 995, 33911, 9957, 4724, 43042, 3224, 9154, 3714, 19913, 40294, 2288, 11296, 4724, 8592, 28820, 6225, 10943, 8978, 1822, 8978, 33127, 8978, 2983, 401, 9357, 8978, 24737, 294, 264, 8028, 1859, 44381, 29538, 3714, 19913, 40294, 2288, 32771, 13546, 16490, 33546, 37495, 22653, 6225, 10943, 3660, 5296, 28814, 1863, 31439, 37893, 2655, 31747, 20449, 28242, 2423], "avg_logprob": -0.27597403216671634, "compression_ratio": 1.4439024390243902, "no_speech_prob": 2.384185791015625e-06, "words": [{"start": 1207.37, "end": 1207.93, "word": "and", "probability": 0.181396484375}, {"start": 1207.93, "end": 1208.21, "word": " it", "probability": 0.89404296875}, {"start": 1208.21, "end": 1208.53, "word": " could", "probability": 0.88916015625}, {"start": 1208.53, "end": 1209.03, "word": " be", "probability": 0.947265625}, {"start": 1209.03, "end": 1209.57, "word": " or", "probability": 0.802734375}, {"start": 1209.57, "end": 1210.01, "word": " maybe", "probability": 0.8603515625}, {"start": 1210.01, "end": 1210.59, "word": " fata", "probability": 0.49444580078125}, {"start": 1210.59, "end": 1210.99, "word": " maybe", "probability": 0.1287841796875}, {"start": 1210.99, "end": 1212.13, "word": " fata", "probability": 0.679443359375}, {"start": 1212.13, "end": 1213.37, "word": " وإزاي", "probability": 0.636932373046875}, {"start": 1213.37, "end": 1213.47, "word": " ما", "probability": 0.6865234375}, {"start": 1213.47, "end": 1213.59, "word": " أنتوا", "probability": 0.6630045572916666}, {"start": 1213.59, "end": 1214.15, "word": " شايفين", "probability": 0.9794921875}, {"start": 1214.15, "end": 1216.41, "word": " برضه", "probability": 0.9207356770833334}, {"start": 1216.41, "end": 1216.73, "word": " من", "probability": 0.96826171875}, {"start": 1216.73, "end": 1217.51, "word": " مظاهرها", "probability": 0.98759765625}, {"start": 1217.51, "end": 1218.11, "word": " بشكل", "probability": 0.9417317708333334}, {"start": 1218.11, "end": 1218.53, "word": " عام", "probability": 0.9912109375}, {"start": 1218.53, "end": 1219.09, "word": " في", "probability": 0.81884765625}, {"start": 1219.09, "end": 1219.61, "word": " pain", "probability": 0.93896484375}, {"start": 1219.61, "end": 1220.61, "word": " في", "probability": 0.84521484375}, {"start": 1220.61, "end": 1221.27, "word": " swelling", "probability": 0.9638671875}, {"start": 1221.27, "end": 1222.09, "word": " في", "probability": 0.88623046875}, {"start": 1222.09, "end": 1223.21, "word": " discoloration", "probability": 0.7830403645833334}, {"start": 1223.21, "end": 1224.03, "word": " في", "probability": 0.95849609375}, {"start": 1224.03, "end": 1224.95, "word": " warmth", "probability": 0.990234375}, {"start": 1224.95, "end": 1225.29, "word": " in", "probability": 0.93408203125}, {"start": 1225.29, "end": 1225.41, "word": " the", "probability": 0.86962890625}, {"start": 1225.41, "end": 1225.83, "word": " affected", "probability": 0.83837890625}, {"start": 1225.83, "end": 1226.39, "word": " area", "probability": 0.87060546875}, {"start": 1226.39, "end": 1227.31, "word": " لكن", "probability": 0.9130859375}, {"start": 1227.31, "end": 1228.25, "word": " هذه", "probability": 0.89306640625}, {"start": 1228.25, "end": 1229.21, "word": " مظاهر", "probability": 0.954833984375}, {"start": 1229.21, "end": 1229.93, "word": " غير", "probability": 0.9912109375}, {"start": 1229.93, "end": 1230.47, "word": " خاص", "probability": 0.980712890625}, {"start": 1230.47, "end": 1231.43, "word": " يعني", "probability": 0.838623046875}, {"start": 1231.43, "end": 1232.55, "word": " عامة", "probability": 0.70458984375}, {"start": 1232.55, "end": 1232.89, "word": " لإن", "probability": 0.63134765625}, {"start": 1232.89, "end": 1232.95, "word": " هو", "probability": 0.65771484375}, {"start": 1232.95, "end": 1233.31, "word": " مشترك", "probability": 0.9388020833333334}, {"start": 1233.31, "end": 1233.55, "word": " مع", "probability": 0.568359375}, {"start": 1233.55, "end": 1234.01, "word": " كل", "probability": 0.61279296875}, {"start": 1234.01, "end": 1234.17, "word": " ال", "probability": 0.90234375}], "temperature": 1.0}, {"id": 45, "seek": 124931, "start": 1236.73, "end": 1249.31, "text": "مش هيك ال sign of inflammation؟ اه وبالتالي لابد من تحديتها شايفين اللي قالوها يا شباب؟ شايفين كيف المظهر العام لرجل هذه فيها DP وهذه فيها جلطة؟", "tokens": [2304, 8592, 39896, 4117, 2423, 1465, 295, 21613, 22807, 1975, 3224, 46599, 6027, 2655, 6027, 1829, 5296, 16758, 3215, 9154, 6055, 5016, 16254, 2655, 11296, 13412, 995, 33911, 9957, 13672, 1829, 50239, 2407, 11296, 35186, 13412, 3555, 16758, 22807, 13412, 995, 33911, 9957, 9122, 33911, 9673, 19913, 3224, 2288, 18863, 10943, 5296, 47341, 1211, 29538, 8978, 11296, 42796, 37037, 24192, 8978, 11296, 10874, 1211, 9566, 3660, 22807], "avg_logprob": -0.25045954436063766, "compression_ratio": 1.5220125786163523, "no_speech_prob": 5.960464477539062e-07, "words": [{"start": 1236.73, "end": 1236.95, "word": "مش", "probability": 0.511962890625}, {"start": 1236.95, "end": 1237.11, "word": " هيك", "probability": 0.92041015625}, {"start": 1237.11, "end": 1237.21, "word": " ال", "probability": 0.352294921875}, {"start": 1237.21, "end": 1237.41, "word": " sign", "probability": 0.72509765625}, {"start": 1237.41, "end": 1237.57, "word": " of", "probability": 0.97705078125}, {"start": 1237.57, "end": 1238.37, "word": " inflammation؟", "probability": 0.6551513671875}, {"start": 1238.37, "end": 1239.33, "word": " اه", "probability": 0.6328125}, {"start": 1239.33, "end": 1240.41, "word": " وبالتالي", "probability": 0.89990234375}, {"start": 1240.41, "end": 1241.45, "word": " لابد", "probability": 0.5855305989583334}, {"start": 1241.45, "end": 1241.63, "word": " من", "probability": 0.947265625}, {"start": 1241.63, "end": 1242.43, "word": " تحديتها", "probability": 0.86083984375}, {"start": 1242.43, "end": 1243.19, "word": " شايفين", "probability": 0.8583984375}, {"start": 1243.19, "end": 1243.33, "word": " اللي", "probability": 0.952392578125}, {"start": 1243.33, "end": 1243.65, "word": " قالوها", "probability": 0.8683268229166666}, {"start": 1243.65, "end": 1243.81, "word": " يا", "probability": 0.607421875}, {"start": 1243.81, "end": 1245.17, "word": " شباب؟", "probability": 0.871826171875}, {"start": 1245.17, "end": 1245.51, "word": " شايفين", "probability": 0.98095703125}, {"start": 1245.51, "end": 1245.77, "word": " كيف", "probability": 0.974365234375}, {"start": 1245.77, "end": 1246.29, "word": " المظهر", "probability": 0.92431640625}, {"start": 1246.29, "end": 1246.65, "word": " العام", "probability": 0.988037109375}, {"start": 1246.65, "end": 1247.53, "word": " لرجل", "probability": 0.8606770833333334}, {"start": 1247.53, "end": 1247.81, "word": " هذه", "probability": 0.1278076171875}, {"start": 1247.81, "end": 1248.11, "word": " فيها", "probability": 0.972900390625}, {"start": 1248.11, "end": 1248.37, "word": " DP", "probability": 0.60791015625}, {"start": 1248.37, "end": 1248.63, "word": " وهذه", "probability": 0.67724609375}, {"start": 1248.63, "end": 1248.81, "word": " فيها", "probability": 0.939208984375}, {"start": 1248.81, "end": 1249.31, "word": " جلطة؟", "probability": 0.96103515625}], "temperature": 1.0}, {"id": 46, "seek": 127811, "start": 1250.9, "end": 1278.12, "text": "هيصار فيه discoloration ولا لأ؟ لأ انها تتغير للأسوأ طبعا هذه قبل ما يصير فيه discoloration يصير فيه pain و swallowing و discoloration و warmth اللي هي ال sign of inflammation طبعا هذه ممكن يؤدي في نهاية لإنه يصير فيه inflammation و ممكن", "tokens": [3224, 1829, 9381, 9640, 8978, 3224, 2983, 401, 9357, 49429, 5296, 10721, 22807, 5296, 10721, 16472, 11296, 6055, 2655, 17082, 13546, 24976, 10721, 3794, 2407, 10721, 23032, 3555, 3615, 995, 29538, 12174, 36150, 19446, 7251, 9381, 13546, 8978, 3224, 2983, 401, 9357, 7251, 9381, 13546, 8978, 3224, 1822, 4032, 1693, 336, 9637, 4032, 2983, 401, 9357, 4032, 24737, 13672, 1829, 39896, 2423, 1465, 295, 21613, 23032, 3555, 3615, 995, 29538, 3714, 43020, 7251, 33604, 16254, 8978, 8717, 11296, 10632, 5296, 28814, 1863, 3224, 7251, 9381, 13546, 8978, 3224, 21613, 4032, 3714, 43020], "avg_logprob": -0.2399193529159792, "compression_ratio": 1.805263157894737, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 1250.9, "end": 1251.74, "word": "هيصار", "probability": 0.746337890625}, {"start": 1251.74, "end": 1251.94, "word": " فيه", "probability": 0.8330078125}, {"start": 1251.94, "end": 1252.58, "word": " discoloration", "probability": 0.7620442708333334}, {"start": 1252.58, "end": 1252.82, "word": " ولا", "probability": 0.81494140625}, {"start": 1252.82, "end": 1253.64, "word": " لأ؟", "probability": 0.693115234375}, {"start": 1253.64, "end": 1253.94, "word": " لأ", "probability": 0.579833984375}, {"start": 1253.94, "end": 1254.1, "word": " انها", "probability": 0.763916015625}, {"start": 1254.1, "end": 1254.38, "word": " تتغير", "probability": 0.775634765625}, {"start": 1254.38, "end": 1254.98, "word": " للأسوأ", "probability": 0.812890625}, {"start": 1254.98, "end": 1255.8, "word": " طبعا", "probability": 0.911376953125}, {"start": 1255.8, "end": 1256.12, "word": " هذه", "probability": 0.324951171875}, {"start": 1256.12, "end": 1256.48, "word": " قبل", "probability": 0.70166015625}, {"start": 1256.48, "end": 1256.62, "word": " ما", "probability": 0.95263671875}, {"start": 1256.62, "end": 1256.94, "word": " يصير", "probability": 0.9259440104166666}, {"start": 1256.94, "end": 1257.14, "word": " فيه", "probability": 0.91943359375}, {"start": 1257.14, "end": 1257.98, "word": " discoloration", "probability": 0.90673828125}, {"start": 1257.98, "end": 1259.04, "word": " يصير", "probability": 0.795166015625}, {"start": 1259.04, "end": 1259.3, "word": " فيه", "probability": 0.8515625}, {"start": 1259.3, "end": 1259.82, "word": " pain", "probability": 0.97021484375}, {"start": 1259.82, "end": 1260.78, "word": " و", "probability": 0.9267578125}, {"start": 1260.78, "end": 1261.54, "word": " swallowing", "probability": 0.690185546875}, {"start": 1261.54, "end": 1262.22, "word": " و", "probability": 0.96923828125}, {"start": 1262.22, "end": 1263.28, "word": " discoloration", "probability": 0.84326171875}, {"start": 1263.28, "end": 1264.18, "word": " و", "probability": 0.9931640625}, {"start": 1264.18, "end": 1264.96, "word": " warmth", "probability": 0.95263671875}, {"start": 1264.96, "end": 1265.26, "word": " اللي", "probability": 0.5164794921875}, {"start": 1265.26, "end": 1265.38, "word": " هي", "probability": 0.85302734375}, {"start": 1265.38, "end": 1265.52, "word": " ال", "probability": 0.48583984375}, {"start": 1265.52, "end": 1265.72, "word": " sign", "probability": 0.80517578125}, {"start": 1265.72, "end": 1265.9, "word": " of", "probability": 0.9609375}, {"start": 1265.9, "end": 1267.16, "word": " inflammation", "probability": 0.78271484375}, {"start": 1267.16, "end": 1269.52, "word": " طبعا", "probability": 0.9482421875}, {"start": 1269.52, "end": 1269.78, "word": " هذه", "probability": 0.81787109375}, {"start": 1269.78, "end": 1270.18, "word": " ممكن", "probability": 0.9560546875}, {"start": 1270.18, "end": 1270.62, "word": " يؤدي", "probability": 0.6931966145833334}, {"start": 1270.62, "end": 1270.76, "word": " في", "probability": 0.822265625}, {"start": 1270.76, "end": 1271.18, "word": " نهاية", "probability": 0.83056640625}, {"start": 1271.18, "end": 1271.58, "word": " لإنه", "probability": 0.7572021484375}, {"start": 1271.58, "end": 1271.88, "word": " يصير", "probability": 0.94970703125}, {"start": 1271.88, "end": 1272.32, "word": " فيه", "probability": 0.896484375}, {"start": 1272.32, "end": 1275.08, "word": " inflammation", "probability": 0.8828125}, {"start": 1275.08, "end": 1277.36, "word": " و", "probability": 0.92724609375}, {"start": 1277.36, "end": 1278.12, "word": " ممكن", "probability": 0.843017578125}], "temperature": 1.0}, {"id": 47, "seek": 130595, "start": 1278.75, "end": 1305.95, "text": "تؤدي إلى better الرجل إذا صارت necrotic لأنه .. إيش بيصير؟ بغض النظر عن البكتيريا، بيصير فيه ischemia في هذه المنطقة و .. ممكن .. اه طبعا يصير necrosis و طبعا هذه الصورة تفصيلية سبب، هي الدم المفروض راجع، يرجع بشكل طبيعي، ماشي؟ ايوة هنا sorry، نبدأ من هنا، الدم راجع بشكل طبيعي", "tokens": [2655, 33604, 16254, 30731, 1101, 34892, 7435, 1211, 11933, 15730, 20328, 9640, 2655, 408, 10757, 9411, 5296, 33456, 3224, 4386, 11933, 1829, 8592, 4724, 1829, 9381, 13546, 22807, 4724, 17082, 11242, 28239, 19913, 2288, 18871, 29739, 4117, 2655, 13546, 25528, 12399, 4724, 1829, 9381, 13546, 8978, 3224, 307, 339, 14058, 8978, 29538, 9673, 1863, 9566, 28671, 4032, 4386, 3714, 43020, 4386, 1975, 3224, 23032, 3555, 3615, 995, 7251, 9381, 13546, 408, 42471, 271, 4032, 23032, 3555, 3615, 995, 29538, 31767, 13063, 3660, 6055, 5172, 9381, 26895, 10632, 8608, 3555, 3555, 12399, 39896, 32748, 2304, 9673, 5172, 32887, 11242, 12602, 26108, 3615, 12399, 7251, 47341, 3615, 4724, 8592, 28820, 23032, 21292, 3615, 1829, 12399, 3714, 33599, 1829, 22807, 1975, 1829, 2407, 3660, 34105, 2597, 12399, 8717, 44510, 10721, 9154, 34105, 12399, 32748, 2304, 12602, 26108, 3615, 4724, 8592, 28820, 23032, 21292, 3615, 1829], "avg_logprob": -0.18509615947316577, "compression_ratio": 1.7074074074074075, "no_speech_prob": 1.0728836059570312e-06, "words": [{"start": 1278.75, "end": 1279.27, "word": "تؤدي", "probability": 0.8741861979166666}, {"start": 1279.27, "end": 1279.71, "word": " إلى", "probability": 0.306640625}, {"start": 1279.71, "end": 1280.55, "word": " better", "probability": 0.5556640625}, {"start": 1280.55, "end": 1281.55, "word": " الرجل", "probability": 0.9143880208333334}, {"start": 1281.55, "end": 1281.83, "word": " إذا", "probability": 0.845947265625}, {"start": 1281.83, "end": 1282.21, "word": " صارت", "probability": 0.91845703125}, {"start": 1282.21, "end": 1282.71, "word": " necrotic", "probability": 0.8595377604166666}, {"start": 1282.71, "end": 1283.25, "word": " لأنه", "probability": 0.8522135416666666}, {"start": 1283.25, "end": 1283.47, "word": " ..", "probability": 0.1851806640625}, {"start": 1283.47, "end": 1284.25, "word": " إيش", "probability": 0.8805338541666666}, {"start": 1284.25, "end": 1285.05, "word": " بيصير؟", "probability": 0.9177734375}, {"start": 1285.05, "end": 1286.59, "word": " بغض", "probability": 0.7100423177083334}, {"start": 1286.59, "end": 1286.91, "word": " النظر", "probability": 0.9132486979166666}, {"start": 1286.91, "end": 1287.01, "word": " عن", "probability": 0.7470703125}, {"start": 1287.01, "end": 1287.61, "word": " البكتيريا،", "probability": 0.8466389973958334}, {"start": 1287.61, "end": 1287.91, "word": " بيصير", "probability": 0.974609375}, {"start": 1287.91, "end": 1288.69, "word": " فيه", "probability": 0.920166015625}, {"start": 1288.69, "end": 1289.49, "word": " ischemia", "probability": 0.7610677083333334}, {"start": 1289.49, "end": 1289.71, "word": " في", "probability": 0.9345703125}, {"start": 1289.71, "end": 1289.97, "word": " هذه", "probability": 0.95556640625}, {"start": 1289.97, "end": 1290.71, "word": " المنطقة", "probability": 0.9906005859375}, {"start": 1290.71, "end": 1291.77, "word": " و", "probability": 0.8037109375}, {"start": 1291.77, "end": 1292.81, "word": " ..", "probability": 0.4501953125}, {"start": 1292.81, "end": 1293.49, "word": " ممكن", "probability": 0.88720703125}, {"start": 1293.49, "end": 1293.87, "word": " ..", "probability": 0.5322265625}, {"start": 1293.87, "end": 1294.17, "word": " اه", "probability": 0.669189453125}, {"start": 1294.17, "end": 1294.45, "word": " طبعا", "probability": 0.9508056640625}, {"start": 1294.45, "end": 1294.69, "word": " يصير", "probability": 0.7980143229166666}, {"start": 1294.69, "end": 1295.27, "word": " necrosis", "probability": 0.6125895182291666}, {"start": 1295.27, "end": 1296.19, "word": " و", "probability": 0.7109375}, {"start": 1296.19, "end": 1296.51, "word": " طبعا", "probability": 0.96728515625}, {"start": 1296.51, "end": 1296.67, "word": " هذه", "probability": 0.8974609375}, {"start": 1296.67, "end": 1296.93, "word": " الصورة", "probability": 0.8828125}, {"start": 1296.93, "end": 1297.43, "word": " تفصيلية", "probability": 0.9728515625}, {"start": 1297.43, "end": 1297.87, "word": " سبب،", "probability": 0.830322265625}, {"start": 1297.87, "end": 1297.99, "word": " هي", "probability": 0.9013671875}, {"start": 1297.99, "end": 1298.27, "word": " الدم", "probability": 0.984375}, {"start": 1298.27, "end": 1298.65, "word": " المفروض", "probability": 0.978759765625}, {"start": 1298.65, "end": 1299.81, "word": " راجع،", "probability": 0.84649658203125}, {"start": 1299.81, "end": 1300.11, "word": " يرجع", "probability": 0.7351888020833334}, {"start": 1300.11, "end": 1300.47, "word": " بشكل", "probability": 0.9845377604166666}, {"start": 1300.47, "end": 1301.63, "word": " طبيعي،", "probability": 0.90390625}, {"start": 1301.63, "end": 1302.37, "word": " ماشي؟", "probability": 0.90283203125}, {"start": 1302.37, "end": 1302.65, "word": " ايوة", "probability": 0.7952880859375}, {"start": 1302.65, "end": 1302.95, "word": " هنا", "probability": 0.43408203125}, {"start": 1302.95, "end": 1303.65, "word": " sorry،", "probability": 0.51416015625}, {"start": 1303.65, "end": 1303.99, "word": " نبدأ", "probability": 0.9259440104166666}, {"start": 1303.99, "end": 1304.11, "word": " من", "probability": 0.9775390625}, {"start": 1304.11, "end": 1304.43, "word": " هنا،", "probability": 0.908935546875}, {"start": 1304.43, "end": 1304.75, "word": " الدم", "probability": 0.981201171875}, {"start": 1304.75, "end": 1305.13, "word": " راجع", "probability": 0.99169921875}, {"start": 1305.13, "end": 1305.43, "word": " بشكل", "probability": 0.99462890625}, {"start": 1305.43, "end": 1305.95, "word": " طبيعي", "probability": 0.986572265625}], "temperature": 1.0}, {"id": 48, "seek": 133599, "start": 1307.57, "end": 1335.99, "text": "من الوعاية ده من ال vein ماشي لكن مرات شو اللي بيصير بيبدأ بيصير فيه حجز لدم في هذه المنطقة ممكن يكون جلطة جلطة هذه ممكن هذه جلطة هذه عارفة عن جلطة شباب this lodge يعني ايش يعني sorry lodge ملزقة في جدار الوعاية ده و هذا الباقي اللي هو اللي مش متجلط هيه قاعد طالب", "tokens": [27842, 2423, 45367, 995, 10632, 11778, 3224, 9154, 2423, 30669, 3714, 33599, 1829, 44381, 3714, 2288, 9307, 13412, 2407, 13672, 1829, 4724, 1829, 9381, 13546, 4724, 1829, 44510, 10721, 4724, 1829, 9381, 13546, 8978, 3224, 11331, 7435, 11622, 5296, 40448, 8978, 29538, 9673, 1863, 9566, 28671, 3714, 43020, 7251, 30544, 10874, 1211, 9566, 3660, 10874, 1211, 9566, 3660, 29538, 3714, 43020, 29538, 10874, 1211, 9566, 3660, 29538, 6225, 9640, 5172, 3660, 18871, 10874, 1211, 9566, 3660, 13412, 3555, 16758, 341, 47706, 37495, 22653, 1975, 1829, 8592, 37495, 22653, 2597, 47706, 3714, 1211, 11622, 28671, 8978, 10874, 3215, 9640, 2423, 45367, 995, 10632, 11778, 3224, 4032, 23758, 29739, 995, 38436, 13672, 1829, 31439, 13672, 1829, 37893, 44650, 7435, 1211, 9566, 39896, 3224, 12174, 995, 22488, 23032, 6027, 3555], "avg_logprob": -0.23437500651925802, "compression_ratio": 1.973568281938326, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 1307.57, "end": 1307.99, "word": "من", "probability": 0.76416015625}, {"start": 1307.99, "end": 1308.51, "word": " الوعاية", "probability": 0.738525390625}, {"start": 1308.51, "end": 1308.61, "word": " ده", "probability": 0.927490234375}, {"start": 1308.61, "end": 1308.75, "word": " من", "probability": 0.67529296875}, {"start": 1308.75, "end": 1308.91, "word": " ال", "probability": 0.95556640625}, {"start": 1308.91, "end": 1309.13, "word": " vein", "probability": 0.765625}, {"start": 1309.13, "end": 1309.97, "word": " ماشي", "probability": 0.736083984375}, {"start": 1309.97, "end": 1311.51, "word": " لكن", "probability": 0.62646484375}, {"start": 1311.51, "end": 1312.57, "word": " مرات", "probability": 0.9166666666666666}, {"start": 1312.57, "end": 1312.75, "word": " شو", "probability": 0.7841796875}, {"start": 1312.75, "end": 1312.85, "word": " اللي", "probability": 0.900390625}, {"start": 1312.85, "end": 1313.35, "word": " بيصير", "probability": 0.962890625}, {"start": 1313.35, "end": 1315.09, "word": " بيبدأ", "probability": 0.9437255859375}, {"start": 1315.09, "end": 1316.01, "word": " بيصير", "probability": 0.80194091796875}, {"start": 1316.01, "end": 1316.27, "word": " فيه", "probability": 0.647705078125}, {"start": 1316.27, "end": 1316.69, "word": " حجز", "probability": 0.98681640625}, {"start": 1316.69, "end": 1317.45, "word": " لدم", "probability": 0.769287109375}, {"start": 1317.45, "end": 1317.65, "word": " في", "probability": 0.8935546875}, {"start": 1317.65, "end": 1317.85, "word": " هذه", "probability": 0.9072265625}, {"start": 1317.85, "end": 1318.35, "word": " المنطقة", "probability": 0.9844970703125}, {"start": 1318.35, "end": 1319.03, "word": " ممكن", "probability": 0.949462890625}, {"start": 1319.03, "end": 1319.29, "word": " يكون", "probability": 0.92431640625}, {"start": 1319.29, "end": 1319.71, "word": " جلطة", "probability": 0.959716796875}, {"start": 1319.71, "end": 1320.05, "word": " جلطة", "probability": 0.83599853515625}, {"start": 1320.05, "end": 1320.21, "word": " هذه", "probability": 0.420166015625}, {"start": 1320.21, "end": 1320.67, "word": " ممكن", "probability": 0.982666015625}, {"start": 1320.67, "end": 1321.47, "word": " هذه", "probability": 0.12164306640625}, {"start": 1321.47, "end": 1321.89, "word": " جلطة", "probability": 0.8955078125}, {"start": 1321.89, "end": 1322.07, "word": " هذه", "probability": 0.64013671875}, {"start": 1322.07, "end": 1322.33, "word": " عارفة", "probability": 0.600555419921875}, {"start": 1322.33, "end": 1322.45, "word": " عن", "probability": 0.982421875}, {"start": 1322.45, "end": 1322.73, "word": " جلطة", "probability": 0.9925537109375}, {"start": 1322.73, "end": 1323.45, "word": " شباب", "probability": 0.8580729166666666}, {"start": 1323.45, "end": 1324.99, "word": " this", "probability": 0.06304931640625}, {"start": 1324.99, "end": 1325.37, "word": " lodge", "probability": 0.70068359375}, {"start": 1325.37, "end": 1325.63, "word": " يعني", "probability": 0.9560546875}, {"start": 1325.63, "end": 1325.87, "word": " ايش", "probability": 0.80908203125}, {"start": 1325.87, "end": 1326.55, "word": " يعني", "probability": 0.765869140625}, {"start": 1326.55, "end": 1327.05, "word": " sorry", "probability": 0.65185546875}, {"start": 1327.05, "end": 1327.61, "word": " lodge", "probability": 0.869140625}, {"start": 1327.61, "end": 1328.83, "word": " ملزقة", "probability": 0.829833984375}, {"start": 1328.83, "end": 1328.99, "word": " في", "probability": 0.9345703125}, {"start": 1328.99, "end": 1329.31, "word": " جدار", "probability": 0.9698893229166666}, {"start": 1329.31, "end": 1329.67, "word": " الوعاية", "probability": 0.905029296875}, {"start": 1329.67, "end": 1329.85, "word": " ده", "probability": 0.982666015625}, {"start": 1329.85, "end": 1330.83, "word": " و", "probability": 0.42626953125}, {"start": 1330.83, "end": 1332.23, "word": " هذا", "probability": 0.6875}, {"start": 1332.23, "end": 1332.85, "word": " الباقي", "probability": 0.6338704427083334}, {"start": 1332.85, "end": 1333.35, "word": " اللي", "probability": 0.94677734375}, {"start": 1333.35, "end": 1333.65, "word": " هو", "probability": 0.93505859375}, {"start": 1333.65, "end": 1334.19, "word": " اللي", "probability": 0.910400390625}, {"start": 1334.19, "end": 1334.37, "word": " مش", "probability": 0.9853515625}, {"start": 1334.37, "end": 1334.99, "word": " متجلط", "probability": 0.967041015625}, {"start": 1334.99, "end": 1335.29, "word": " هيه", "probability": 0.44427490234375}, {"start": 1335.29, "end": 1335.55, "word": " قاعد", "probability": 0.9226888020833334}, {"start": 1335.55, "end": 1335.99, "word": " طالب", "probability": 0.8450520833333334}], "temperature": 1.0}, {"id": 49, "seek": 136610, "start": 1337.36, "end": 1366.1, "text": "بس خفيف الجزء من الجلطة هذه ممكن يسموه dislodging و يطلع ينفسه ويبقى ممشي طبعا بجزء صغير طبعا بيبقى يمشي يمشي يمشي لغاية مثلا لو أدم هو أصغر منه فبسكره صح؟ ناحظوا كمان يا شباب شايفين الاختلاف بين الرجلتين هدول ماشي واحدة normal مش فيها جلطة والتانية فيها جلطة شايفين ال ..", "tokens": [3555, 3794, 16490, 5172, 33911, 25724, 11622, 38207, 9154, 25724, 1211, 9566, 3660, 29538, 3714, 43020, 7251, 38251, 2407, 3224, 43186, 378, 3249, 4032, 7251, 9566, 1211, 3615, 7251, 1863, 36178, 3224, 4032, 1829, 3555, 4587, 7578, 3714, 2304, 8592, 1829, 23032, 3555, 3615, 995, 4724, 7435, 11622, 38207, 20328, 17082, 13546, 23032, 3555, 3615, 995, 4724, 1829, 3555, 4587, 7578, 7251, 2304, 8592, 1829, 7251, 2304, 8592, 1829, 7251, 2304, 8592, 1829, 5296, 17082, 995, 10632, 50113, 15040, 45164, 5551, 40448, 31439, 5551, 9381, 17082, 2288, 9154, 3224, 6156, 3555, 3794, 4117, 2288, 3224, 20328, 5016, 22807, 8717, 995, 5016, 19913, 14407, 9122, 2304, 7649, 35186, 13412, 3555, 16758, 13412, 995, 33911, 9957, 2423, 47283, 2655, 15040, 5172, 49374, 34892, 7435, 1211, 2655, 9957, 8032, 3215, 12610, 3714, 33599, 1829, 36764, 24401, 3660, 2710, 37893, 8978, 11296, 10874, 1211, 9566, 3660, 16070, 2655, 7649, 10632, 8978, 11296, 10874, 1211, 9566, 3660, 13412, 995, 33911, 9957, 2423, 4386], "avg_logprob": -0.17865566693761814, "compression_ratio": 1.833976833976834, "no_speech_prob": 1.4901161193847656e-06, "words": [{"start": 1337.36, "end": 1337.68, "word": "بس", "probability": 0.881103515625}, {"start": 1337.68, "end": 1338.16, "word": " خفيف", "probability": 0.8294270833333334}, {"start": 1338.16, "end": 1339.4, "word": " الجزء", "probability": 0.6001180013020834}, {"start": 1339.4, "end": 1339.56, "word": " من", "probability": 0.96142578125}, {"start": 1339.56, "end": 1340.02, "word": " الجلطة", "probability": 0.921630859375}, {"start": 1340.02, "end": 1340.26, "word": " هذه", "probability": 0.50439453125}, {"start": 1340.26, "end": 1340.58, "word": " ممكن", "probability": 0.868408203125}, {"start": 1340.58, "end": 1341.02, "word": " يسموه", "probability": 0.72515869140625}, {"start": 1341.02, "end": 1341.52, "word": " dislodging", "probability": 0.66552734375}, {"start": 1341.52, "end": 1341.68, "word": " و", "probability": 0.95751953125}, {"start": 1341.68, "end": 1342.68, "word": " يطلع", "probability": 0.9066162109375}, {"start": 1342.68, "end": 1344.16, "word": " ينفسه", "probability": 0.76806640625}, {"start": 1344.16, "end": 1345.26, "word": " ويبقى", "probability": 0.7142578125}, {"start": 1345.26, "end": 1345.84, "word": " ممشي", "probability": 0.8031005859375}, {"start": 1345.84, "end": 1346.32, "word": " طبعا", "probability": 0.9427490234375}, {"start": 1346.32, "end": 1347.74, "word": " بجزء", "probability": 0.88671875}, {"start": 1347.74, "end": 1348.1, "word": " صغير", "probability": 0.9772135416666666}, {"start": 1348.1, "end": 1348.38, "word": " طبعا", "probability": 0.9644775390625}, {"start": 1348.38, "end": 1348.68, "word": " بيبقى", "probability": 0.84296875}, {"start": 1348.68, "end": 1349.04, "word": " يمشي", "probability": 0.9559326171875}, {"start": 1349.04, "end": 1349.36, "word": " يمشي", "probability": 0.9581298828125}, {"start": 1349.36, "end": 1349.6, "word": " يمشي", "probability": 0.9468994140625}, {"start": 1349.6, "end": 1349.92, "word": " لغاية", "probability": 0.9713134765625}, {"start": 1349.92, "end": 1350.22, "word": " مثلا", "probability": 0.926025390625}, {"start": 1350.22, "end": 1350.42, "word": " لو", "probability": 0.91162109375}, {"start": 1350.42, "end": 1350.7, "word": " أدم", "probability": 0.4720458984375}, {"start": 1350.7, "end": 1350.88, "word": " هو", "probability": 0.52392578125}, {"start": 1350.88, "end": 1351.56, "word": " أصغر", "probability": 0.958984375}, {"start": 1351.56, "end": 1352.08, "word": " منه", "probability": 0.98095703125}, {"start": 1352.08, "end": 1353.6, "word": " فبسكره", "probability": 0.8167317708333334}, {"start": 1353.6, "end": 1354.28, "word": " صح؟", "probability": 0.7750651041666666}, {"start": 1354.28, "end": 1356.24, "word": " ناحظوا", "probability": 0.74951171875}, {"start": 1356.24, "end": 1356.56, "word": " كمان", "probability": 0.9734700520833334}, {"start": 1356.56, "end": 1356.68, "word": " يا", "probability": 0.82958984375}, {"start": 1356.68, "end": 1356.96, "word": " شباب", "probability": 0.9908854166666666}, {"start": 1356.96, "end": 1357.38, "word": " شايفين", "probability": 0.92138671875}, {"start": 1357.38, "end": 1357.98, "word": " الاختلاف", "probability": 0.89169921875}, {"start": 1357.98, "end": 1358.16, "word": " بين", "probability": 0.93798828125}, {"start": 1358.16, "end": 1358.78, "word": " الرجلتين", "probability": 0.9033203125}, {"start": 1358.78, "end": 1359.18, "word": " هدول", "probability": 0.8338216145833334}, {"start": 1359.18, "end": 1361.1, "word": " ماشي", "probability": 0.7803548177083334}, {"start": 1361.1, "end": 1361.9, "word": " واحدة", "probability": 0.947265625}, {"start": 1361.9, "end": 1362.5, "word": " normal", "probability": 0.6796875}, {"start": 1362.5, "end": 1362.8, "word": " مش", "probability": 0.74365234375}, {"start": 1362.8, "end": 1363.04, "word": " فيها", "probability": 0.98388671875}, {"start": 1363.04, "end": 1363.36, "word": " جلطة", "probability": 0.98876953125}, {"start": 1363.36, "end": 1363.68, "word": " والتانية", "probability": 0.8140869140625}, {"start": 1363.68, "end": 1363.94, "word": " فيها", "probability": 0.98291015625}, {"start": 1363.94, "end": 1364.82, "word": " جلطة", "probability": 0.9903564453125}, {"start": 1364.82, "end": 1365.46, "word": " شايفين", "probability": 0.990234375}, {"start": 1365.46, "end": 1365.92, "word": " ال", "probability": 0.9248046875}, {"start": 1365.92, "end": 1366.1, "word": " ..", "probability": 0.912109375}], "temperature": 1.0}, {"id": 50, "seek": 139397, "start": 1366.57, "end": 1393.97, "text": "في قلب طبعا فظيع جدا لإن مافيش راجع جدا راجع في accumulation of blood في stagnation of blood ماشي ثم في pain و إيش قلنا swallow و changing color ده هو شايفين اللون ده أحمر لكن في النهاية صار ميكروسيزش بصير and طبعا", "tokens": [41185, 12174, 46152, 23032, 3555, 3615, 995, 6156, 19913, 40228, 10874, 28259, 5296, 28814, 1863, 19446, 41185, 8592, 12602, 26108, 3615, 10874, 28259, 12602, 26108, 3615, 8978, 35647, 295, 3390, 8978, 32853, 399, 295, 3390, 3714, 33599, 1829, 38637, 2304, 8978, 1822, 4032, 11933, 1829, 8592, 12174, 1211, 8315, 20099, 4032, 4473, 2017, 11778, 3224, 31439, 13412, 995, 33911, 9957, 13672, 11536, 11778, 3224, 5551, 5016, 29973, 44381, 8978, 28239, 11296, 10632, 20328, 9640, 3714, 1829, 4117, 32887, 3794, 1829, 11622, 8592, 4724, 9381, 13546, 293, 23032, 3555, 3615, 995], "avg_logprob": -0.27094779433784905, "compression_ratio": 1.5308056872037914, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1366.57, "end": 1366.81, "word": "في", "probability": 0.75537109375}, {"start": 1366.81, "end": 1367.05, "word": " قلب", "probability": 0.880859375}, {"start": 1367.05, "end": 1367.33, "word": " طبعا", "probability": 0.986083984375}, {"start": 1367.33, "end": 1367.63, "word": " فظيع", "probability": 0.8411458333333334}, {"start": 1367.63, "end": 1367.91, "word": " جدا", "probability": 0.996826171875}, {"start": 1367.91, "end": 1368.17, "word": " لإن", "probability": 0.85888671875}, {"start": 1368.17, "end": 1368.59, "word": " مافيش", "probability": 0.7970377604166666}, {"start": 1368.59, "end": 1369.33, "word": " راجع", "probability": 0.9684244791666666}, {"start": 1369.33, "end": 1370.09, "word": " جدا", "probability": 0.7119140625}, {"start": 1370.09, "end": 1370.61, "word": " راجع", "probability": 0.9474283854166666}, {"start": 1370.61, "end": 1371.01, "word": " في", "probability": 0.79833984375}, {"start": 1371.01, "end": 1371.53, "word": " accumulation", "probability": 0.96875}, {"start": 1371.53, "end": 1371.87, "word": " of", "probability": 0.9541015625}, {"start": 1371.87, "end": 1372.11, "word": " blood", "probability": 0.95166015625}, {"start": 1372.11, "end": 1372.87, "word": " في", "probability": 0.5048828125}, {"start": 1372.87, "end": 1373.63, "word": " stagnation", "probability": 0.983154296875}, {"start": 1373.63, "end": 1373.87, "word": " of", "probability": 0.97900390625}, {"start": 1373.87, "end": 1374.19, "word": " blood", "probability": 0.96337890625}, {"start": 1374.19, "end": 1374.99, "word": " ماشي", "probability": 0.6581217447916666}, {"start": 1374.99, "end": 1376.07, "word": " ثم", "probability": 0.946044921875}, {"start": 1376.07, "end": 1377.15, "word": " في", "probability": 0.7373046875}, {"start": 1377.15, "end": 1378.89, "word": " pain", "probability": 0.97412109375}, {"start": 1378.89, "end": 1380.13, "word": " و", "probability": 0.9453125}, {"start": 1380.13, "end": 1380.37, "word": " إيش", "probability": 0.8483072916666666}, {"start": 1380.37, "end": 1380.75, "word": " قلنا", "probability": 0.88330078125}, {"start": 1380.75, "end": 1381.65, "word": " swallow", "probability": 0.1644287109375}, {"start": 1381.65, "end": 1384.09, "word": " و", "probability": 0.362060546875}, {"start": 1384.09, "end": 1385.81, "word": " changing", "probability": 0.76123046875}, {"start": 1385.81, "end": 1386.31, "word": " color", "probability": 0.76611328125}, {"start": 1386.31, "end": 1386.59, "word": " ده", "probability": 0.55316162109375}, {"start": 1386.59, "end": 1386.71, "word": " هو", "probability": 0.97314453125}, {"start": 1386.71, "end": 1387.13, "word": " شايفين", "probability": 0.9449462890625}, {"start": 1387.13, "end": 1387.39, "word": " اللون", "probability": 0.976806640625}, {"start": 1387.39, "end": 1387.73, "word": " ده", "probability": 0.60040283203125}, {"start": 1387.73, "end": 1388.13, "word": " أحمر", "probability": 0.9246419270833334}, {"start": 1388.13, "end": 1388.75, "word": " لكن", "probability": 0.818359375}, {"start": 1388.75, "end": 1388.91, "word": " في", "probability": 0.755859375}, {"start": 1388.91, "end": 1389.33, "word": " النهاية", "probability": 0.8885091145833334}, {"start": 1389.33, "end": 1389.79, "word": " صار", "probability": 0.5780029296875}, {"start": 1389.79, "end": 1390.43, "word": " ميكروسيزش", "probability": 0.645721435546875}, {"start": 1390.43, "end": 1390.97, "word": " بصير", "probability": 0.7747395833333334}, {"start": 1390.97, "end": 1392.25, "word": " and", "probability": 0.35986328125}, {"start": 1392.25, "end": 1393.97, "word": " طبعا", "probability": 0.99609375}], "temperature": 1.0}, {"id": 51, "seek": 142377, "start": 1394.19, "end": 1423.77, "text": "hot فى المنطقة دى بيكون فى عرض علارة عالية جدا وطبعا زى هدى عبارة عن hematological مريضانية برضه جبطار لكنها صارت بعدها نزيف ماشى صار bleeding أكثر أسكن في مراكز مختلف وipotensia manifestation شايفين الرجل لما صار فى", "tokens": [12194, 6156, 7578, 9673, 1863, 9566, 28671, 11778, 7578, 4724, 1829, 30544, 6156, 7578, 6225, 43042, 11203, 9640, 3660, 6225, 6027, 10632, 10874, 28259, 4032, 9566, 3555, 3615, 995, 30767, 7578, 8032, 3215, 7578, 6225, 3555, 9640, 3660, 18871, 8636, 267, 4383, 3714, 16572, 11242, 7649, 10632, 4724, 43042, 3224, 10874, 3555, 9566, 9640, 44381, 11296, 20328, 9640, 2655, 39182, 11296, 8717, 11622, 33911, 3714, 33599, 7578, 20328, 9640, 19312, 5551, 4117, 49115, 5551, 3794, 19452, 8978, 3714, 23557, 4117, 11622, 3714, 46456, 46538, 4032, 647, 310, 694, 654, 29550, 13412, 995, 33911, 9957, 34892, 7435, 1211, 5296, 15042, 20328, 9640, 6156, 7578], "avg_logprob": -0.3431490341631266, "compression_ratio": 1.5374449339207048, "no_speech_prob": 1.3113021850585938e-06, "words": [{"start": 1394.19, "end": 1394.65, "word": "hot", "probability": 0.280029296875}, {"start": 1394.65, "end": 1394.89, "word": " فى", "probability": 0.5634765625}, {"start": 1394.89, "end": 1395.27, "word": " المنطقة", "probability": 0.958984375}, {"start": 1395.27, "end": 1395.49, "word": " دى", "probability": 0.77978515625}, {"start": 1395.49, "end": 1395.83, "word": " بيكون", "probability": 0.9498697916666666}, {"start": 1395.83, "end": 1396.01, "word": " فى", "probability": 0.8623046875}, {"start": 1396.01, "end": 1396.47, "word": " عرض", "probability": 0.52008056640625}, {"start": 1396.47, "end": 1397.19, "word": " علارة", "probability": 0.7127278645833334}, {"start": 1397.19, "end": 1397.97, "word": " عالية", "probability": 0.96630859375}, {"start": 1397.97, "end": 1398.31, "word": " جدا", "probability": 0.988037109375}, {"start": 1398.31, "end": 1400.65, "word": " وطبعا", "probability": 0.908203125}, {"start": 1400.65, "end": 1400.87, "word": " زى", "probability": 0.930419921875}, {"start": 1400.87, "end": 1401.75, "word": " هدى", "probability": 0.6771647135416666}, {"start": 1401.75, "end": 1402.19, "word": " عبارة", "probability": 0.958740234375}, {"start": 1402.19, "end": 1402.33, "word": " عن", "probability": 0.96533203125}, {"start": 1402.33, "end": 1402.97, "word": " hematological", "probability": 0.8369140625}, {"start": 1402.97, "end": 1403.49, "word": " مريضانية", "probability": 0.480126953125}, {"start": 1403.49, "end": 1404.13, "word": " برضه", "probability": 0.9659830729166666}, {"start": 1404.13, "end": 1404.71, "word": " جبطار", "probability": 0.779296875}, {"start": 1404.71, "end": 1406.23, "word": " لكنها", "probability": 0.933349609375}, {"start": 1406.23, "end": 1407.65, "word": " صارت", "probability": 0.9912109375}, {"start": 1407.65, "end": 1410.23, "word": " بعدها", "probability": 0.933349609375}, {"start": 1410.23, "end": 1411.17, "word": " نزيف", "probability": 0.7699381510416666}, {"start": 1411.17, "end": 1413.91, "word": " ماشى", "probability": 0.6032307942708334}, {"start": 1413.91, "end": 1415.25, "word": " صار", "probability": 0.6346435546875}, {"start": 1415.25, "end": 1415.59, "word": " bleeding", "probability": 0.30615234375}, {"start": 1415.59, "end": 1416.03, "word": " أكثر", "probability": 0.5664876302083334}, {"start": 1416.03, "end": 1416.59, "word": " أسكن", "probability": 0.7146809895833334}, {"start": 1416.59, "end": 1417.91, "word": " في", "probability": 0.372314453125}, {"start": 1417.91, "end": 1418.27, "word": " مراكز", "probability": 0.657470703125}, {"start": 1418.27, "end": 1418.87, "word": " مختلف", "probability": 0.994140625}, {"start": 1418.87, "end": 1421.39, "word": " وipotensia", "probability": 0.437841796875}, {"start": 1421.39, "end": 1422.23, "word": " manifestation", "probability": 0.56640625}, {"start": 1422.23, "end": 1422.67, "word": " شايفين", "probability": 0.9755859375}, {"start": 1422.67, "end": 1423.09, "word": " الرجل", "probability": 0.8377278645833334}, {"start": 1423.09, "end": 1423.33, "word": " لما", "probability": 0.92236328125}, {"start": 1423.33, "end": 1423.55, "word": " صار", "probability": 0.97314453125}, {"start": 1423.55, "end": 1423.77, "word": " فى", "probability": 0.77978515625}], "temperature": 1.0}, {"id": 52, "seek": 145538, "start": 1426.32, "end": 1455.38, "text": "هى اللى بدهم قصه ولا لأ نيكروسيز وهى هدى بدهم قصه هى الأصابع هدى كلها بدها قصه ماشى البداية بتبدأ this color تدين حمرا لكن لو انثبت يعنى ممكن يصير فى infarction نجفان انه جيت وصلنا لل familial لل genetic disease ماشى او ال genetic risk factor فالمرة الجاية ان شاء الله بنكمل", "tokens": [3224, 7578, 13672, 7578, 47525, 16095, 12174, 9381, 3224, 49429, 5296, 10721, 8717, 1829, 4117, 32887, 3794, 1829, 11622, 37037, 7578, 8032, 3215, 7578, 47525, 16095, 12174, 9381, 3224, 8032, 7578, 16247, 9381, 16758, 3615, 8032, 3215, 7578, 28242, 11296, 47525, 11296, 12174, 9381, 3224, 3714, 33599, 7578, 29739, 28259, 10632, 39894, 44510, 10721, 341, 2017, 6055, 3215, 9957, 11331, 2304, 23557, 44381, 45164, 16472, 12984, 3555, 2655, 37495, 1863, 7578, 3714, 43020, 7251, 9381, 13546, 6156, 7578, 1536, 289, 882, 8717, 7435, 5172, 7649, 16472, 3224, 10874, 36081, 4032, 36520, 8315, 24976, 4085, 831, 24976, 12462, 4752, 3714, 33599, 7578, 1975, 2407, 2423, 12462, 3148, 5952, 6156, 45340, 25720, 25724, 995, 10632, 16472, 13412, 16606, 21984, 44945, 24793, 1211], "avg_logprob": -0.3385847166550061, "compression_ratio": 1.7312252964426877, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1426.32, "end": 1426.48, "word": "هى", "probability": 0.159332275390625}, {"start": 1426.48, "end": 1426.58, "word": " اللى", "probability": 0.799072265625}, {"start": 1426.58, "end": 1426.84, "word": " بدهم", "probability": 0.859375}, {"start": 1426.84, "end": 1427.24, "word": " قصه", "probability": 0.8507486979166666}, {"start": 1427.24, "end": 1427.34, "word": " ولا", "probability": 0.6865234375}, {"start": 1427.34, "end": 1427.7, "word": " لأ", "probability": 0.6810302734375}, {"start": 1427.7, "end": 1429.24, "word": " نيكروسيز", "probability": 0.6176409040178571}, {"start": 1429.24, "end": 1430.1, "word": " وهى", "probability": 0.557861328125}, {"start": 1430.1, "end": 1430.32, "word": " هدى", "probability": 0.6158040364583334}, {"start": 1430.32, "end": 1430.56, "word": " بدهم", "probability": 0.8623046875}, {"start": 1430.56, "end": 1431.3, "word": " قصه", "probability": 0.8331705729166666}, {"start": 1431.3, "end": 1431.54, "word": " هى", "probability": 0.6363525390625}, {"start": 1431.54, "end": 1431.96, "word": " الأصابع", "probability": 0.8875732421875}, {"start": 1431.96, "end": 1432.26, "word": " هدى", "probability": 0.7923177083333334}, {"start": 1432.26, "end": 1432.7, "word": " كلها", "probability": 0.961669921875}, {"start": 1432.7, "end": 1432.96, "word": " بدها", "probability": 0.94091796875}, {"start": 1432.96, "end": 1434.64, "word": " قصه", "probability": 0.9348958333333334}, {"start": 1434.64, "end": 1435.02, "word": " ماشى", "probability": 0.6629231770833334}, {"start": 1435.02, "end": 1436.72, "word": " البداية", "probability": 0.8131510416666666}, {"start": 1436.72, "end": 1437.14, "word": " بتبدأ", "probability": 0.8963216145833334}, {"start": 1437.14, "end": 1437.38, "word": " this", "probability": 0.57861328125}, {"start": 1437.38, "end": 1437.7, "word": " color", "probability": 0.84765625}, {"start": 1437.7, "end": 1438.26, "word": " تدين", "probability": 0.6177571614583334}, {"start": 1438.26, "end": 1439.22, "word": " حمرا", "probability": 0.7701822916666666}, {"start": 1439.22, "end": 1439.94, "word": " لكن", "probability": 0.677734375}, {"start": 1439.94, "end": 1440.18, "word": " لو", "probability": 0.9755859375}, {"start": 1440.18, "end": 1440.68, "word": " انثبت", "probability": 0.74310302734375}, {"start": 1440.68, "end": 1441.04, "word": " يعنى", "probability": 0.6767171223958334}, {"start": 1441.04, "end": 1442.18, "word": " ممكن", "probability": 0.864013671875}, {"start": 1442.18, "end": 1443.16, "word": " يصير", "probability": 0.8684895833333334}, {"start": 1443.16, "end": 1443.44, "word": " فى", "probability": 0.6900634765625}, {"start": 1443.44, "end": 1444.58, "word": " infarction", "probability": 0.865234375}, {"start": 1444.58, "end": 1445.28, "word": " نجفان", "probability": 0.814208984375}, {"start": 1445.28, "end": 1446.04, "word": " انه", "probability": 0.4736328125}, {"start": 1446.04, "end": 1446.24, "word": " جيت", "probability": 0.63525390625}, {"start": 1446.24, "end": 1446.62, "word": " وصلنا", "probability": 0.85791015625}, {"start": 1446.62, "end": 1446.84, "word": " لل", "probability": 0.53759765625}, {"start": 1446.84, "end": 1447.44, "word": " familial", "probability": 0.5098876953125}, {"start": 1447.44, "end": 1447.72, "word": " لل", "probability": 0.173583984375}, {"start": 1447.72, "end": 1448.26, "word": " genetic", "probability": 0.77685546875}, {"start": 1448.26, "end": 1448.86, "word": " disease", "probability": 0.8798828125}, {"start": 1448.86, "end": 1450.08, "word": " ماشى", "probability": 0.7716471354166666}, {"start": 1450.08, "end": 1450.26, "word": " او", "probability": 0.7841796875}, {"start": 1450.26, "end": 1450.34, "word": " ال", "probability": 0.9404296875}, {"start": 1450.34, "end": 1450.66, "word": " genetic", "probability": 0.91162109375}, {"start": 1450.66, "end": 1450.98, "word": " risk", "probability": 0.95849609375}, {"start": 1450.98, "end": 1451.5, "word": " factor", "probability": 0.869140625}, {"start": 1451.5, "end": 1453.12, "word": " فالمرة", "probability": 0.9016927083333334}, {"start": 1453.12, "end": 1453.4, "word": " الجاية", "probability": 0.8370768229166666}, {"start": 1453.4, "end": 1453.56, "word": " ان", "probability": 0.892578125}, {"start": 1453.56, "end": 1453.74, "word": " شاء", "probability": 0.902099609375}, {"start": 1453.74, "end": 1453.9, "word": " الله", "probability": 0.9658203125}, {"start": 1453.9, "end": 1455.38, "word": " بنكمل", "probability": 0.8543294270833334}], "temperature": 1.0}], "language": "ar", "language_probability": 1.0, "duration": 1456.402, "duration_after_vad": 1358.8331249999985} \ No newline at end of file diff --git a/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/cYsH46grQoA_postprocess.srt b/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/cYsH46grQoA_postprocess.srt new file mode 100644 index 0000000000000000000000000000000000000000..1f5411e897824b16bbb7cba5e6e5529a8a7f90a4 --- /dev/null +++ b/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/cYsH46grQoA_postprocess.srt @@ -0,0 +1,2392 @@ +1 +00:00:20,840 --> 00:00:28,260 +بسم الله الرحمن الرحيم اليوم ان شاء الله هنكمل في + +2 +00:00:28,260 --> 00:00:32,520 +ال hereditary bleeding disorders و هنحكي في ال vom + +3 +00:00:32,520 --> 00:00:37,520 +libra disease و هنتطرق لل vom libra disease من عدة + +4 +00:00:37,520 --> 00:00:42,200 +زوايا بداية هنمسك ال factor و هو vom libra factor + +5 +00:00:42,200 --> 00:00:46,340 +من ناحية ال structure و location و function ثم + +6 +00:00:46,340 --> 00:00:47,880 +هنمسك ال disease + +7 +00:00:53,480 --> 00:00:59,380 +and diagnosis و treatment بداية + +8 +00:00:59,380 --> 00:01:04,320 +vomolibran disease هو عبارة عن مرض يعتبر family + +9 +00:01:04,320 --> 00:01:08,260 +bleeding disorders ومن اسمه vomolibran disease + +10 +00:01:08,260 --> 00:01:12,360 +بيعني ان vomolibran factor اما absent او + +11 +00:01:12,360 --> 00:01:17,860 +dysfunctional اما absent او dysfunctional بروتين + +12 +00:01:17,860 --> 00:01:26,770 +زي اي بروتين لصنعفي الجسم من خلال ال gene ال gene + +13 +00:01:26,770 --> 00:01:33,870 +تبعه موجود على شفر تقرم و في كرموزوم 12 و بيصير له + +14 +00:01:33,870 --> 00:01:37,730 +expression لهذا ال gene في مكانين ال endothelial + +15 +00:01:37,730 --> 00:01:40,450 +cell of ال megakaryocyte و احنا عارفين ان ال + +16 +00:01:40,450 --> 00:01:44,830 +endothelial cell يتصنع بعض ال gram factor و تتخزن + +17 +00:01:45,280 --> 00:01:51,880 +ثم ال mega-karyocytes برضه بتصنع اللي هو إيش ال + +18 +00:01:51,880 --> 00:01:57,020 +vomiting brain factor بيصنع الفاكتور as + +19 +00:01:57,020 --> 00:02:02,200 +preliminary structures ثم بيتطور إلى modified + +20 +00:02:02,200 --> 00:02:07,440 +structure من خلال عمليات كيميائية تتضمن بلمرة + +21 +00:02:08,210 --> 00:02:11,830 +بيتحول فيها الموليكيول من Monomer إلى Polymer + +22 +00:02:11,830 --> 00:02:16,890 +وMonomer وDiamer وTrimer وPolymer وبالتالي بنشوف + +23 +00:02:16,890 --> 00:02:21,550 +عدة أشكال من مين؟ من ال Multimer او من ال Homo + +24 +00:02:21,550 --> 00:02:26,370 +-Germanic يتصدق + +25 +00:02:26,370 --> 00:02:33,110 +في النهاية الموليكيول وعلى سطحه او يحتوي على مواقع + +26 +00:02:33,110 --> 00:02:40,670 +حامة منهاارتباطه موقع ارتباطه بال factor 80 ثم two + +27 +00:02:40,670 --> 00:02:43,350 +receptors بيرتبطوا + +28 +00:02:49,070 --> 00:02:54,310 +ثم one receptors على سطح ال collagen اللى هو ال + +29 +00:02:54,310 --> 00:03:00,170 +sub endothelial layer فبكون + +30 +00:03:00,170 --> 00:03:04,910 +على سطحه binding sites مهمة تتضمن هذه ال binding + +31 +00:03:04,910 --> 00:03:05,270 +sites + +32 +00:03:09,350 --> 00:03:11,990 +هنا ان ال production of homo-endothelial factor + +33 +00:03:11,990 --> 00:03:15,790 +يتم في الاندوثيليا السبب في ال mega-chemicides و + +34 +00:03:15,790 --> 00:03:21,350 +most of the factor is usually synthesized in a + +35 +00:03:21,350 --> 00:03:27,010 +secreted form بمعنى الاجيفير في البلازمة and + +36 +00:03:27,010 --> 00:03:34,210 +stored in endothelial في الويبال بلدي placesوفي + +37 +00:03:34,210 --> 00:03:38,270 +الـ Alpha Granules of Platelets يبقى مواقع تخزينه + +38 +00:03:38,270 --> 00:03:42,230 +عارفينها هي الـ Whipper Palatinate والـ Alpha + +39 +00:03:42,230 --> 00:03:46,130 +Granules of Platelets ومواقع إنتاجه هي الـ + +40 +00:03:46,130 --> 00:03:52,290 +Endothelial Cellular Megakaryocyte بيستلزم + +41 +00:03:52,290 --> 00:03:59,350 +خروج الـVolvule Brand من مخازنه Inducer يستلزم + +42 +00:03:59,350 --> 00:04:04,260 +مخرج أو خروج الـVolvule Brandfactor من مخازنه + +43 +00:04:04,260 --> 00:04:12,540 +certain inducer طبعا أولا ب stimulus فإذا + +44 +00:04:12,540 --> 00:04:16,480 +بدنا نطلعه من ال endothelial cells mostly ال + +45 +00:04:16,480 --> 00:04:20,120 +stimulus اللى بيستخدم هو ال thrombin و الهستمين و + +46 +00:04:20,120 --> 00:04:25,540 +ال fibrin و ال complement من ال C5 ل اللى هم + +47 +00:04:25,540 --> 00:04:30,930 +بيسمونه membrane attack complex ل C9وإذا كان بدنا + +48 +00:04:30,930 --> 00:04:36,190 +نطلعه من الـ platelet نستخدم ثرومبين و ADD وكلاجي + +49 +00:04:36,190 --> 00:04:40,330 +طبعا + +50 +00:04:40,330 --> 00:04:44,270 +ال adhesion كل كواضعي في ال family brand حكينا + +51 +00:04:44,270 --> 00:04:48,230 +فيها أكتر من مرة وقولنا بيلعب دور كبير it has a + +52 +00:04:48,230 --> 00:04:51,550 +dual role بيلعب دور كبير في ال primary hemostasis + +53 +00:04:51,550 --> 00:04:54,930 +وفي ال secondary hemostasis في ال primary بيعمل + +54 +00:04:54,930 --> 00:05:00,110 +adhesion و secondary aggregationو في ال secondary + +55 +00:05:00,110 --> 00:05:09,050 +hemostasis بيعمل 100% بس ال factor .. اه انا في ال + +56 +00:05:09,050 --> 00:05:13,990 +primary ماشي aggregation primary مافهوش بقى هو + +57 +00:05:13,990 --> 00:05:17,030 +اللي دور في ال primary و اللي دور فيهاش في ال + +58 +00:05:17,030 --> 00:05:20,310 +secondary و عارفين ال function او ال mechanism of + +59 +00:05:20,310 --> 00:05:23,250 +adhesion و ال mechanism + +60 +00:05:26,970 --> 00:05:31,870 +هو ال factor تمانية بقعد vom Lebrun بقعد factor + +61 +00:05:31,870 --> 00:05:37,970 +تمانية طيب نيجي للمرض نيجي لإيش للمرض وال vom + +62 +00:05:37,970 --> 00:05:43,130 +Lebrun disease أول من حكى فيه هو Eric vom Lebrun + +63 +00:05:43,130 --> 00:05:47,530 +في 1930 وسمى + +64 +00:05:47,530 --> 00:05:52,410 +فيه حينهhereditary pseudohemophilia hereditary + +65 +00:05:52,410 --> 00:05:58,230 +pseudohemophilia وكانت التسمية ناتجة عن الأسباب + +66 +00:05:58,230 --> 00:06:02,490 +الآتية أنه أول finding لجها لجى فيه prolongation + +67 +00:06:02,490 --> 00:06:05,970 +في ال bleeding time ماشي لكن ال platelet count + +68 +00:06:05,970 --> 00:06:12,110 +normal ال platelet count normal مش راكبة صح فلمرة + +69 +00:06:12,110 --> 00:06:18,270 +اتنين لجى فيه myocosal bleedingنبقى تلاتة لجأة أن + +70 +00:06:18,270 --> 00:06:21,250 +المرض مصير بالجهتين، male و female، both sexes are + +71 +00:06:21,250 --> 00:06:26,250 +affected في الهموفيليا، نعرف أن ال male هو ال + +72 +00:06:26,250 --> 00:06:31,110 +affected و ال female هي كارية، وبالتالي هذا لا هو + +73 +00:06:31,110 --> 00:06:36,550 +هموفيليا ولا هو أياش bleeding disorder عادي تأثرت + +74 +00:06:36,550 --> 00:06:40,740 +من خلال ال treatmentطيب، شو اللي صار؟ قالوا في + +75 +00:06:40,740 --> 00:06:44,780 +1950، طبعا هذا الكلام تطلع more investigation، ففي + +76 +00:06:44,780 --> 00:06:51,160 +1950 leading time اكتشفه is associated with factor + +77 +00:06:51,160 --> 00:06:56,780 +8 deficiency، ماشي؟ طبعا في ال homely brand هذا + +78 +00:06:56,780 --> 00:07:00,480 +الكلام موجود؟ أه موجود، قالوا factor 8 يبقى قل .. + +79 +00:07:00,480 --> 00:07:05,390 +100% يبقى قل إذا كان فيش homely brandنمرأة في الـ + +80 +00:07:05,390 --> 00:07:10,890 +1977 اكتشفوا الـ vomliebrand factor في التمانين + +81 +00:07:10,890 --> 00:07:20,570 +عملوا gene cloning للـ vomliebrand factor ال + +82 +00:07:20,570 --> 00:07:25,150 +incidents و ال frequency كانوا نسبته بشكل عام هي 1 + +83 +00:07:25,150 --> 00:07:31,710 +% من ال population 1% من ال population لكن المرضى + +84 +00:07:31,710 --> 00:07:39,190 +ب ..يعني they manifest different or variable + +85 +00:07:39,190 --> 00:07:45,450 +degree of symptoms بنلاقي في مرضى كتير بتعانيش من + +86 +00:07:45,450 --> 00:07:50,590 +إشي و مرضى كتير بتعاني ماشي ففي variation فيه في + +87 +00:07:50,590 --> 00:07:56,450 +ال symptoms between disease between مرضىلأن هو + +88 +00:07:56,450 --> 00:08:01,950 +patient وبالتالي هذا يعني أن المرض بيحمل أكتر من + +89 +00:08:01,950 --> 00:08:06,030 +نوعه ماشي؟ مادام في variation في نفس في ال + +90 +00:08:06,030 --> 00:08:09,370 +symptoms لنفس المرض مانتوا فيه أكتر من إياش من + +91 +00:08:09,370 --> 00:08:12,970 +type من هذا المرض يعني ال clinically significant + +92 +00:08:12,970 --> 00:08:18,030 +form of Lebron disease نسبة الإصابة فيه 125 شخص في + +93 +00:08:18,030 --> 00:08:25,380 +المليون في كل مليونوالـ severe نص الى خمسة في + +94 +00:08:25,380 --> 00:08:33,080 +المية من لكل مليون مصامير طبعا بقتل زومة ال + +95 +00:08:33,080 --> 00:08:37,880 +inheritance pattern بتلاقي ال male و ال female are + +96 +00:08:37,880 --> 00:08:42,420 +affected equally ماشي؟ + +97 +00:08:44,440 --> 00:08:48,280 +لأن إجوا يعملوا classification للمرض و جالوا ال + +98 +00:08:48,280 --> 00:08:51,980 +classification بدناها تعتمد على either the disease + +99 +00:08:51,980 --> 00:08:56,200 +is quantitative or qualitative هل هو quantitative + +100 +00:08:56,200 --> 00:09:05,380 +ولا إياش ولا qualitative طبعا هذا بشكل أساسيوطبعا + +101 +00:09:05,380 --> 00:09:09,280 +كلنا بنعرف دور volvomibrain factor بالنسبة الى + +102 +00:09:09,280 --> 00:09:14,280 +factor 8 وبالتالي كمان أخدوا هذا الكلام بعين + +103 +00:09:14,280 --> 00:09:19,800 +الاعتبار مما أدى الى انه يعملوا classification لل + +104 +00:09:19,800 --> 00:09:25,420 +disease الى تلت أنواع أساسية تلت أنواع أساسية + +105 +00:09:25,420 --> 00:09:30,100 +وببساطة عشان تحفظوها الشباب هدا هي أكتر حفظوها كده + +106 +00:09:30,100 --> 00:09:32,460 +اتى لمرة واحد وتلاتة + +107 +00:09:35,960 --> 00:09:44,960 +نمرا واحد و تلاتة are quantitative نمرا اتنين is + +108 +00:09:44,960 --> 00:09:49,800 +qualitative نمرا + +109 +00:09:49,800 --> 00:09:59,460 +واحد is mild to moderate and + +110 +00:09:59,460 --> 00:10:08,090 +most common70% من المرضى من type 1 بينما type 3 is + +111 +00:10:08,090 --> 00:10:13,510 +severe and + +112 +00:10:13,510 --> 00:10:17,330 +real + +113 +00:10:17,330 --> 00:10:23,050 +لأنه بشكل حوالي 5% type + +114 +00:10:23,050 --> 00:10:31,530 +2 في منه أنواع مختلفة حسب ال mutationو بيشكل ربع + +115 +00:10:31,530 --> 00:10:37,030 +الحالات بيشكل ربع الحالات لحوالي خمس أنواع مختلفة + +116 +00:10:37,030 --> 00:10:44,150 +هذه التقسيمة الأساسية لكن قالوا انه في حاجة جديدة + +117 +00:10:44,150 --> 00:10:48,170 +اكتشفوها حوالي 300 الحالات سجلة حاجة بيسموها + +118 +00:10:48,170 --> 00:10:54,430 +Acquired Bomb Lebron Factory و ان شاء الله هنحكي + +119 +00:10:54,430 --> 00:10:58,590 +فيه بالتفصيل يعني النوع الرابع هو عبارة عن أياش + +120 +00:10:59,200 --> 00:11:06,260 +Acquired Vulnerable Diseases الـ + +121 +00:11:06,260 --> 00:11:09,540 +Clinical Manifestation شباب زي ما قلتلكوا ال + +122 +00:11:09,540 --> 00:11:15,320 +symptoms تختلف from mild and manageable symptoms + +123 +00:11:15,320 --> 00:11:19,100 +لسevere وunmanageable symptoms ال symptoms + +124 +00:11:28,730 --> 00:11:34,550 +هي مختلفة من الـ mild and manageable bleeding + +125 +00:11:34,550 --> 00:11:40,050 +disorders إلى الـ very severe and unmanageable + +126 +00:11:40,050 --> 00:11:49,270 +hemorrhage ومن التالي حسب نوع المرض طبعا طيب اتنين + +127 +00:11:49,270 --> 00:11:54,730 +و تلاتة ال bleeding episodes فيهم are life + +128 +00:11:54,730 --> 00:11:55,190 +threatening + +129 +00:11:58,700 --> 00:12:02,820 +كمان ال .. المرض ممكن تشوفه في ال .. في ال female + +130 +00:12:02,820 --> 00:12:10,320 +أكثر ضروة لإن ال female بتتعرض ل risk factor شهري + +131 +00:12:10,320 --> 00:12:17,020 +صح؟ هو ال bleeding أو ال menorrhea كمان + +132 +00:12:17,020 --> 00:12:20,860 +ال bleeding ممكن يزيد إذا كان المريض بياخد aspirin + +133 +00:12:20,860 --> 00:12:24,320 +لإنه في هذا الحالة بنعطل .. احنا البلد اللي تمعطلة + +134 +00:12:24,320 --> 00:12:33,230 +و ..بنزيد تعطيلها الا انهم من محاسن الصداف ان هذا + +135 +00:12:33,230 --> 00:12:38,270 +المرض severity of symptoms are modified are + +136 +00:12:38,270 --> 00:12:44,710 +decreased by age ويجالوا + +137 +00:12:44,710 --> 00:12:50,590 +انه بتبدأ مع العمر تزيد تركيز ال vomilarin factor + +138 +00:12:50,590 --> 00:12:54,820 +which is نعمة ويعني هذه نعمة من نعم الخالق انههذه + +139 +00:12:54,820 --> 00:12:58,560 +ال-vomelogram factor حتى عند المرضى تبدأ تزيد شوية + +140 +00:12:58,560 --> 00:13:06,540 +شوية كل ما الإنسان كبر كل ما الإنسان كبر كمان + +141 +00:13:06,540 --> 00:13:11,880 +هذه كلها symptoms للمرض في حوالي 60% من ال + +142 +00:13:11,880 --> 00:13:16,760 +symptoms بيعاني منها المريض الـpromptivist axis و + +143 +00:13:16,760 --> 00:13:27,800 +40% easy bruising و hematoma 35%منوراجيا ماشي يعني + +144 +00:13:27,800 --> 00:13:36,680 +هذه هي ال symptoms أخدت نسب مختلفة عند المرضى الـ + +145 +00:13:36,680 --> 00:13:40,660 +GI bleeding و ال dental extraction و ال trauma و + +146 +00:13:40,660 --> 00:13:43,940 +ال postpartum و postoperative تعرفوا المصطلحات هذه + +147 +00:13:43,940 --> 00:13:50,600 +كلها مظبوط postpartum نزيف ما بعد الولادة + +148 +00:13:50,600 --> 00:13:53,100 +postoperative + +149 +00:13:59,160 --> 00:14:05,820 +ماشي يا شباب؟ طيب، خلّيني أشوف الأنواع المختلفة + +150 +00:14:05,820 --> 00:14:08,080 +للـVulnerable Heart Disease وبنبدأ بالنوع الأول + +151 +00:14:08,080 --> 00:14:13,360 +وهو الـType I واتفقنا على أنه mild to moderate and + +152 +00:14:13,360 --> 00:14:21,460 +it's a quantitative disorder وبالتالي + +153 +00:14:21,460 --> 00:14:23,100 +الـVulnerable Heart Disorder بقى فاكتور could + +154 +00:14:23,100 --> 00:14:28,410 +functioning normally عند المرضى هدولusually + +155 +00:14:28,410 --> 00:14:35,230 +inherited in autosomal dominant manner و + +156 +00:14:35,230 --> 00:14:40,370 +الإصابة في العائلة الواحدة الواحدة قد تختلف + +157 +00:14:40,370 --> 00:14:45,570 +dramatically يعني + +158 +00:14:45,570 --> 00:14:51,910 +ممكن تلاقي واحد إصابته خفيفة و ال symptoms عنده + +159 +00:14:51,910 --> 00:14:57,970 +خفيفة و التاني ممكن يكون ال symptoms عندهم عيشبنفس + +160 +00:14:57,970 --> 00:15:04,850 +العيلة يعني type 2 قسموه إلى خمس أنواع حسب ال + +161 +00:15:04,850 --> 00:15:12,030 +mutation و تأثيرها على ال factor ال volume + +162 +00:15:12,030 --> 00:15:20,010 +equilibrium factor type 2 قسموه + +163 +00:15:20,010 --> 00:15:26,260 +إلى خمس أنواع و حسب تأثيرها على ال factorفيقسموه + +164 +00:15:26,260 --> 00:15:32,760 +إلى type 2A و B و C و M و N خمس أنواع مختلفة + +165 +00:15:32,760 --> 00:15:37,780 +معظمها بيتأثر + +166 +00:15:37,780 --> 00:15:43,840 +بيختلف فيها صفات ال multimars formed from one to + +167 +00:15:43,840 --> 00:15:49,680 +one يعني على سبيل المثال في type 2A بنلاقي فيه نقص + +168 +00:15:49,680 --> 00:15:53,500 +في ال high and intermediate multimars إيش ال + +169 +00:15:53,500 --> 00:15:57,800 +multimars ده شباب؟مش قولنا ال volume of the brand + +170 +00:15:57,800 --> 00:16:06,440 +factor لما بتصنع بياخد عدة أشكال ماشي بيبدأ بتصنع + +171 +00:16:06,440 --> 00:16:10,000 +ب .. ب .. ب .. ب .. preliminary structure ثم + +172 +00:16:10,000 --> 00:16:15,240 +بيصلوا modification فبنشوف عدة أشكال من مين من ال + +173 +00:16:15,240 --> 00:16:18,300 +volume of the brand factor منها ال monomer و منها + +174 +00:16:18,300 --> 00:16:21,880 +ال diameter و منها ال trimer ماشي و منها ال + +175 +00:16:21,880 --> 00:16:27,300 +multimers multi يعني ايه؟ عدة مية فينالنوع هذا، + +176 +00:16:27,300 --> 00:16:30,560 +بنلاقي نوع الـ Multimers غايب، نوع إياش الـ + +177 +00:16:30,560 --> 00:16:35,040 +Multimers الـ High or Intermediate Multimers is + +178 +00:16:35,040 --> 00:16:43,120 +absent الـ Type 2B، اللي هو فيه عنده صفة مختلفة، + +179 +00:16:43,120 --> 00:16:46,780 +إنه والله ال Multimers بترتبط في الـ Platelet + +180 +00:16:46,780 --> 00:16:51,220 +Excessively، يعني عندها High Affinity لمين؟ + +181 +00:17:12,240 --> 00:17:19,220 +Type IIc ريسسف و الهي مالتيمار فورم is reduced لكن + +182 +00:17:19,220 --> 00:17:24,000 +ال multimars are qualitatively abnormal + +183 +00:17:27,060 --> 00:17:35,880 +2M تقل قدرته على الارتباط بـ Factor 8 2M تقل قدرته + +184 +00:17:35,880 --> 00:17:36,800 +على الارتباط بـ Factor 8 2M تقل قدرته على الارتباط + +185 +00:17:36,800 --> 00:17:36,820 +2M تقل قدرته على الارتباط بـ Factor 8 2M تقل قدرته + +186 +00:17:36,820 --> 00:17:36,940 +على الارتباط بـ Factor 8 2M تقل قدرته على الارتباط + +187 +00:17:36,940 --> 00:17:37,760 +بـ Factor 8 2M تقل قدرته على الارتباط بـ Factor 8 + +188 +00:17:37,760 --> 00:17:42,200 +2M تقل قدرته على الارتباط بـ Factor 8 2M تقل قدرته + +189 +00:17:42,200 --> 00:17:44,400 +على الارتباط بـ Factor 8 2M تقل قدرته على الارتباط + +190 +00:17:44,400 --> 00:17:49,480 +بـ Factor + +191 +00:17:49,480 --> 00:17:56,430 +8 2M تقهو عبارة عن severe وبالتالي ما زال severe + +192 +00:17:56,430 --> 00:18:00,530 +يعني ملاقيش factor اللي هو vomlibrand factor علشان + +193 +00:18:00,530 --> 00:18:07,210 +هيك القياس بيكون undetectable Undetectable وغياب + +194 +00:18:07,210 --> 00:18:11,490 +vomlibrand factor بينعكس سلبا على ال factor 8 + +195 +00:18:11,490 --> 00:18:17,070 +فبنلاقي المريض هذا عنده هموفيليا بالاضافة إلى + +196 +00:18:17,070 --> 00:18:23,660 +vomlibrand disease ماشي شباب؟هذه التلاتة أنواع + +197 +00:18:23,660 --> 00:18:27,440 +المختلفة قلنا فيه نوع رابع سميناه Acquired Womb + +198 +00:18:27,440 --> 00:18:32,680 +Rebrand Disease وزي ما قلتلكوا اكتشفوه في 1970 + +199 +00:18:32,680 --> 00:18:38,520 +وسجلت حوالي 300 حالة منه وغالبا + +200 +00:18:38,520 --> 00:18:44,260 +بنشوفه في الإكبار with + +201 +00:18:44,260 --> 00:18:46,760 +no personal or family bleeding history + +202 +00:18:49,780 --> 00:18:56,320 +ولمّا بنشتغل عليه بيجي الشخص و بنلاقي تقريبا بيشبه + +203 +00:18:56,320 --> 00:19:04,040 +type 2 of wolverine disease و جال ال mechanisms + +204 +00:19:04,040 --> 00:19:13,540 +بيتضمن اكتر منها منها نمرا واحد auto antibody + +205 +00:19:13,540 --> 00:19:19,560 +formation لمين؟ 100%عارفين إيش ال auto-antibody؟ + +206 +00:19:19,560 --> 00:19:23,580 +فجأة و بدون سبق انزعب تبدأ تتكوّن antibody لل + +207 +00:19:23,580 --> 00:19:28,300 +-vomellibrium factor.نبرة اتنين، ميكانيزم تاني + +208 +00:19:28,300 --> 00:19:32,220 +يؤدي ل acquired form، ان والله ال certain + +209 +00:19:32,220 --> 00:19:36,560 +vomellibrium multimers بيزيد ارتباطها ب tumor، + +210 +00:19:36,560 --> 00:19:43,460 +بإيه؟ بال tumor masses، وبالتالي وكأننا بنعمل + +211 +00:19:43,460 --> 00:19:47,710 +selective removal لمين؟ لل-vomellibrium factor.وهو + +212 +00:19:47,710 --> 00:19:53,350 +بقى acquired deficiency of vomilibrium factor صح؟ + +213 +00:19:53,350 --> 00:19:57,150 +لما ال vomilibrium factor يتطبق بال tumor cell or + +214 +00:19:57,150 --> 00:20:03,210 +reactive cell بيجيل من اللي بلازم و لا بيجيل .. + +215 +00:20:03,210 --> 00:20:05,770 +بيجيل من اللي بلازم وبالتالي هو كأنها indirectly + +216 +00:20:05,770 --> 00:20:10,870 +بنعمل Increased proteolysis of vomilibrium factor + +217 +00:20:10,870 --> 00:20:16,050 +ببدأ يتكسر بزيادة برضه من الميكانيزمات و .. + +218 +00:20:17,410 --> 00:20:19,730 +Defective synthesis and release of vulnerable + +219 +00:20:19,730 --> 00:20:23,110 +factors from cellular compartment والله بيبدأ + +220 +00:20:23,110 --> 00:20:33,710 +يتصنع تصنيع خاطق أو قليل في الخلايا المصنعة من ما + +221 +00:20:33,710 --> 00:20:36,590 +يميز ال acquired disease يا شباب acquired + +222 +00:20:36,590 --> 00:20:39,590 +vulnerable disease انه لجوء associated with + +223 +00:20:39,590 --> 00:20:42,810 +certain with other diseases زي المييلو + +224 +00:20:42,810 --> 00:20:46,520 +proliferative و الينفو proliferativeو ال + +225 +00:20:46,520 --> 00:20:54,920 +cardiovascular disease و هكذا مفهوم + +226 +00:20:54,920 --> 00:21:00,140 +يا شباب طيب كيف نشخص ال homolebral disease جالو + +227 +00:21:00,140 --> 00:21:05,540 +أدوات التشخيص هي فحصات ال hemostasis وهي ال BT و + +228 +00:21:05,540 --> 00:21:13,640 +ال VTT و ال breeding time ماشي نبدأ بال BT و ال + +229 +00:21:13,640 --> 00:21:20,080 +VTTانا انتبه عليا في التشخيص انتبه عليا بي تي و بي + +230 +00:21:20,080 --> 00:21:23,240 +تي تي ال بي تي و ال بي تي تي كل كوا أخدتوها في + +231 +00:21:23,240 --> 00:21:28,800 +المعمل بي تي بيشخص مين؟ Extrinsic pathway و ال بي + +232 +00:21:28,800 --> 00:21:34,920 +تي تي Intrinsic pathway لأ ال بي تي في ال volume + +233 +00:21:34,920 --> 00:21:40,040 +of gram factor بيكون normal ليش؟ انه انترنزق + +234 +00:21:45,680 --> 00:21:48,740 +أيوة بالظبط هو حامل فاكتور تمانية و فاكتور تمانية + +235 +00:21:48,740 --> 00:21:51,940 +اللي نحتاجه في ال intrinsic و بالتالي ال + +236 +00:21:51,940 --> 00:21:55,840 +prolongation بيصير في ايش؟ في ال BTT بينما ال Bt + +237 +00:21:55,840 --> 00:22:01,660 +بيكون normal طبعا الشباب مهم جدا ان في كثير من + +238 +00:22:01,660 --> 00:22:08,160 +الأحيان بيطلعلك BTT normal ان بعض المرضى لكن هذا + +239 +00:22:08,160 --> 00:22:15,160 +لا يستثني ال bowel membrane disease ليش؟لأن قلنا + +240 +00:22:15,160 --> 00:22:18,280 +عن بعض الأمراض اللي بيكون ال factor فيها normal + +241 +00:22:18,280 --> 00:22:24,500 +تقريبا زي type I type I اياش تقريبا ما بيعاني + +242 +00:22:24,500 --> 00:22:27,200 +المريض من أي حاجة و ال vomliebrand factor + +243 +00:22:27,200 --> 00:22:31,420 +functioning normally وبالتالي لا يستثني تشخيص من + +244 +00:22:31,420 --> 00:22:41,420 +ال vomliebrand factor طب ال .. في بعض الأحيانboth + +245 +00:22:41,420 --> 00:22:47,700 +tests are prolonged في هذا الحالة يا شباب ده + +246 +00:22:47,700 --> 00:22:51,980 +تستدنى شبوعين ثم تعيد الفحص مرة تانية علشان ايش؟ + +247 +00:22:51,980 --> 00:22:57,060 +علشان تتحقق من هذه النتيجة إذا كان شاكك في ال + +248 +00:22:57,060 --> 00:23:05,540 +disease ماشي إذا فحصين طلقة prolongedإذا الفحصين + +249 +00:23:05,540 --> 00:23:09,360 +طلعوا normal قلنا ال BTT ال normal لا يستثني + +250 +00:23:09,360 --> 00:23:13,300 +تشخيصيا ال vomilibrand إذا التانين طلعوا abnormal + +251 +00:23:13,300 --> 00:23:17,960 +دفتريد الفحص بعد شبعين إذا كنت شاكم في vomilibrand + +252 +00:23:17,960 --> 00:23:23,080 +disease ماشي؟ الأصل يطلع ال BT normal و ال BTT + +253 +00:23:23,080 --> 00:23:25,700 +abnormal ماشي؟ + +254 +00:23:31,170 --> 00:23:34,630 +طب انا نفسي طلع يعني للإثنين و بره ال video اللي + +255 +00:23:34,630 --> 00:23:38,230 +.. انت تقعد الفحش بقعدش معايا بقعدش معايا زاد طبعا + +256 +00:23:38,230 --> 00:23:43,870 +لأن المرض مش مهم بالبرادر و بدك تدور على حاجة + +257 +00:23:43,870 --> 00:23:50,270 +تانية لإن واحتمالي كبير تكون خلطان في العينة في + +258 +00:23:50,270 --> 00:23:55,090 +الفحش الأولاني و احنا بنعيد عشان نتحقق من النتيجة + +259 +00:23:55,090 --> 00:23:56,630 +باستخدام عينة + +260 +00:24:04,220 --> 00:24:07,720 +بالنسبة لل building time يا شباب لغاية الآن ماحدش + +261 +00:24:07,720 --> 00:24:15,320 +بيطلبوا لأنه non specific و non sensitive not + +262 +00:24:15,320 --> 00:24:21,080 +specific و not sensitive و the test by itself + +263 +00:24:21,080 --> 00:24:25,540 +subject to varying wild variation يعني varying + +264 +00:24:25,540 --> 00:24:27,480 +results + +265 +00:24:32,210 --> 00:24:37,710 +ماشي؟ ما هي المشاكل التي تواجهنا في تشخيص ال vom + +266 +00:24:37,710 --> 00:24:42,390 +librand disease تبقى على هذه النقاط المهمة مشاكل + +267 +00:24:42,390 --> 00:24:46,990 +التي تواجهنا في تشخيص مين ال vom librand disease + +268 +00:24:46,990 --> 00:24:51,610 +نمرا + +269 +00:24:51,610 --> 00:24:56,630 +واحد ال vom librand level itself قالو it vary + +270 +00:24:56,630 --> 00:24:57,430 +greatly + +271 +00:25:01,970 --> 00:25:05,650 +وبتاخد ال flash لأنه اعتبره acute phase protein + +272 +00:25:05,650 --> 00:25:11,590 +ايش ال acute phase protein يا شباب؟ acute phase + +273 +00:25:11,590 --> 00:25:15,830 +protein هو عبارة عن protein بتشتمع في ال liver في + +274 +00:25:15,830 --> 00:25:20,610 +ظروف معينة بكميات كبيرة من هذه الظروف + +275 +00:25:20,610 --> 00:25:24,730 +psychological stresses زي مثلًا اللي أخدته مثلًا + +276 +00:25:24,730 --> 00:25:29,120 +في ال hormone في ال endocrineإن الواحد لما يكون + +277 +00:25:29,120 --> 00:25:32,000 +خشرا امتحان وخايف منه يقول اللي حقا بكميات كبيرة + +278 +00:25:32,000 --> 00:25:36,140 +وظبط psychological factor بيسموها adrenergic + +279 +00:25:36,140 --> 00:25:41,860 +stimuli او + +280 +00:25:41,860 --> 00:25:45,980 +مثال اخر كمان اخدته اكيد في الفحصان سمعتوا بفحص + +281 +00:25:45,980 --> 00:25:51,640 +CRP؟ CR reactive protein سمعتوا فيه؟ اي اكيد + +282 +00:25:51,640 --> 00:25:56,580 +سمعتوا فيه اخدته في السواقل CR reactive protein هو + +283 +00:25:56,580 --> 00:25:57,200 +برا الفحص + +284 +00:25:59,960 --> 00:26:06,940 +ماشي هو ال specific test اللى بنعمله في حالات ال + +285 +00:26:06,940 --> 00:26:11,940 +inflammation غير ال ESR ال ESR بنعتبرها non + +286 +00:26:11,940 --> 00:26:16,580 +specific مشي ليه؟ ياله لإنه في ال inflammatory + +287 +00:26:16,580 --> 00:26:20,320 +reaction CRP C reactive protein هو برعى ال protein + +288 +00:26:20,320 --> 00:26:23,820 +is an acute phase بيصنع بجنونيات كبيرة طول ما في + +289 +00:26:23,820 --> 00:26:28,970 +inflammation و يوم ما روح ال inflammationبروحة + +290 +00:26:28,970 --> 00:26:32,570 +بضبط اللي انتج فإذا كان فيه inflammation هيدينا + +291 +00:26:32,570 --> 00:26:35,690 +positive وإذا فيش inflammation هيدينا negative + +292 +00:26:35,690 --> 00:26:42,390 +ماشي؟ ال vomiting brain factor هو واحد من هذه ال + +293 +00:26:42,390 --> 00:26:45,110 +factor acute phase protein يصنع من كميات كبيرة ان + +294 +00:26:45,110 --> 00:26:51,310 +في ظروف معينة ماشي؟ هو و الferritin seriactive + +295 +00:26:51,310 --> 00:26:58,630 +protein ومجموعة من ايش؟ منالبروتينات بتعتبر C + +296 +00:26:58,630 --> 00:27:05,570 +-reactive بتعتبر acute physiological فبيختلف + +297 +00:27:05,570 --> 00:27:11,050 +تركيزها باختلاف الوضع على سبيل مثال في ال + +298 +00:27:11,050 --> 00:27:17,110 +psychological .. physiological stresses انه اه تحت + +299 +00:27:17,110 --> 00:27:22,850 +ضغط نفس الانسان او مريض اجاك طفل صغير اجاك بعيط + +300 +00:27:22,850 --> 00:27:28,640 +على سبيل المثالوهو بعيط طول ما هو بعيط هو النسبة + +301 +00:27:28,640 --> 00:27:34,240 +احتمالي كبير النتيجة تكون خاطئة لأنه هيختلفهاش ال + +302 +00:27:34,240 --> 00:27:39,520 +crying هو عبارة عن physiological stress للطفل واذا + +303 +00:27:39,520 --> 00:27:43,100 +كان صار في physiological stress يعني صار في زيادة + +304 +00:27:43,100 --> 00:27:46,640 +فيهاش في ال vomiting factor يعني لو كان مريض بديه + +305 +00:27:46,640 --> 00:27:50,780 +يطلع normal صح؟ لو كان مريض بديه يطلع عياش normal + +306 +00:27:52,240 --> 00:27:55,340 +في موجود الاستروجين و ال vasopressin و ال growth + +307 +00:27:55,340 --> 00:28:00,620 +hormone و ال adrenergic stimuli كلهم بيعني ال + +308 +00:28:00,620 --> 00:28:06,700 +adrenaline release كلهم بيأدوا إلى زيادة ال + +309 +00:28:06,700 --> 00:28:12,400 +vomwillebrane factor هو بالتالي ال vomwillebrane + +310 +00:28:12,400 --> 00:28:16,460 +factor ممكن يطلع normal ماشي + +311 +00:28:20,140 --> 00:28:25,700 +شو لازم أعمل في هذه الحالة؟ الأصل أني أعيد الفحص + +312 +00:28:25,700 --> 00:28:32,760 +to confirm the results فبينصحوا دائما بإنه نعيد + +313 +00:28:32,760 --> 00:28:38,360 +الفحص عدة مرات in a two weeks interval بين كل فحص + +314 +00:28:38,360 --> 00:28:47,000 +و التاني أسبوعين ماشي عشان إيه؟ عشان أصل إلى تشخيص + +315 +00:28:47,000 --> 00:28:48,340 +صحيح + +316 +00:28:50,550 --> 00:28:53,970 +طبعا في .. ينصح أثناء أخذ العينة أن المريض يكون + +317 +00:28:53,970 --> 00:28:59,610 +بعيد عن أي stresses من ضمنها أي hemorrhage، أي + +318 +00:28:59,610 --> 00:29:05,290 +اللي هو infection أو extreme exercise، كل هذه ممكن + +319 +00:29:05,290 --> 00:29:10,390 +تعمل pregnancy، لازم تكون بعيد عن إيه هذه الشغلات + +320 +00:29:10,390 --> 00:29:16,010 +عشان تصل إلى نتيجة كبيرةيبقى هذه المشكلة التانية + +321 +00:29:16,010 --> 00:29:18,750 +اللي بتواجهنا المشكلة التالتة قالوا ان في بعض + +322 +00:29:18,750 --> 00:29:24,430 +الناس بتتأثر نسبة ال factor حسب فصيلة الدم وهذه + +323 +00:29:24,430 --> 00:29:29,110 +عبارة عن statistical data ماشي؟ قالوا حسب نوع الدم + +324 +00:29:29,110 --> 00:29:38,030 +بنلاقي ال factor يعني بيختلف من شخص إلى آخر طيب + +325 +00:29:38,030 --> 00:29:44,610 +كيف نشخص يا شباب؟ كيف نشخص؟شوفوا ميهر في حوصاتنا + +326 +00:29:44,610 --> 00:29:48,030 +اللي بنعملها قالوا بنعمل نمر واحد restو الست مع ال + +327 +00:29:48,030 --> 00:29:51,470 +C اشهر + +328 +00:29:51,470 --> 00:29:51,950 +restو الست + +329 +00:30:00,930 --> 00:30:04,590 +في aggregation في ايش بيجيس ال aggregation؟ في + +330 +00:30:04,590 --> 00:30:07,910 +agregometer، بصبوط؟ عشان انا اخدناها كله؟ الجهاز + +331 +00:30:07,910 --> 00:30:13,130 +ال agregometer؟ اه كل ال agonists دي طيب ايش بيعمل + +332 +00:30:13,130 --> 00:30:15,630 +في ال resource system؟ جاله resource system، اذا + +333 +00:30:15,630 --> 00:30:24,450 +فكرينش بقى، يستطيع ان ينشط حتى البلاتلي المحفوظة + +334 +00:30:24,450 --> 00:30:30,590 +في الforma geneمجبوط، فهو strong agonist، strong + +335 +00:30:30,590 --> 00:30:36,770 +agonist، ماشي؟ فبنشوف تأثيره على الـplatelet + +336 +00:30:36,770 --> 00:30:43,810 +function، ماشي؟ في وجود الفرق اللي بناه، ماشي + +337 +00:30:43,810 --> 00:30:49,860 +بنعمل، بروح و احنا جايبينVomit Repellent Factor + +338 +00:30:49,860 --> 00:30:53,560 +أيها induce الـ Vomit Repellent Factor to bind + +339 +00:30:53,560 --> 00:30:59,780 +with the platelet receptor برتفت في مين؟ GP1B + +340 +00:30:59,780 --> 00:31:04,820 +مظبوط طب إيش بده يعمل؟ بده يعمل adhesion، هذي + +341 +00:31:04,820 --> 00:31:12,380 +جابلي ال aggregation، مظبوط؟ طيب، ثم ب .. الرسو .. + +342 +00:31:12,380 --> 00:31:16,420 +بتقيس الرسو في certain cofactor activity من خلال + +343 +00:31:17,540 --> 00:31:21,980 +الـ Agglutination او Metabolically Inactive + +344 +00:31:21,980 --> 00:31:32,160 +Platelet تبدأ تترسل ثم + +345 +00:31:32,160 --> 00:31:37,960 +بتعمل Ressosectin Induced Platelet Activation RIPA + +346 +00:31:37,960 --> 00:31:44,240 +اختصار لمين؟ Ressosectin Induced + +347 +00:31:44,240 --> 00:31:46,420 +Platelet + +348 +00:31:55,420 --> 00:31:59,420 +activation طبعا هذا عبارة عن أياش بتقيس ال + +349 +00:31:59,420 --> 00:32:02,500 +metabolically active platelet الكلام هذا كله + +350 +00:32:02,500 --> 00:32:05,720 +بتقيسه بوسطة ال agregometer which is used to + +351 +00:32:05,720 --> 00:32:08,440 +measure the rate of aggregation + +352 +00:32:24,350 --> 00:32:30,570 +خلصته؟ ماشي؟ طيب شغلت تانية immunological + +353 +00:32:30,570 --> 00:32:36,190 +test بتقيس ال vowel brand antigen عشان تقيس vowel + +354 +00:32:36,190 --> 00:32:39,250 +brand antigen بدك تعمله antibody فبتصير التفاعل + +355 +00:32:39,250 --> 00:32:43,070 +antigen-antibody reaction بتقيسه كيف؟ quantitative + +356 +00:32:43,070 --> 00:32:48,120 +يا شباب بالEise Techniqueمش أخدتوها ده الـ ELISA؟ + +357 +00:32:48,120 --> 00:32:53,320 +100% بتعتبره بروتين، بتكونله antibody وcast by + +358 +00:32:53,320 --> 00:33:02,100 +ELISA quantitatively الأصل يا شباب أنه النتائج بين + +359 +00:33:02,100 --> 00:33:07,200 +الفحوصات المختلفة تتوافق بين الفحوصات المختلفة + +360 +00:33:07,200 --> 00:33:12,460 +تتوافق وعدم توافقها بيعني انه بيخلينا in shock في + +361 +00:33:12,460 --> 00:33:17,730 +انه ال Voglebrand اللي احنا بندور عليه هو منالنوع + +362 +00:33:17,730 --> 00:33:21,610 +ال qualitative type مش ال quantitative يعني type + +363 +00:33:21,610 --> 00:33:26,610 +two مش type واحد او ايش او اتنين و هي type two + +364 +00:33:26,610 --> 00:33:31,070 +عشان نشخص بالظبط اي نوع منه بدنا نعمل further + +365 +00:33:31,070 --> 00:33:35,610 +investigation صح؟ لتالي بدنا نتابع و نشوف ايش ال + +366 +00:33:35,610 --> 00:33:40,050 +investigation فبالقيم ندرس ال multiverse forms + +367 +00:33:40,050 --> 00:33:45,270 +المختلفة في ال of whom ال brandو بندرسها بواسطة + +368 +00:33:45,270 --> 00:33:50,030 +بفحص، إيش؟ + +369 +00:33:50,030 --> 00:33:53,390 +كيف بدك تعرف الأنواع المختلفة من ال multi-merge في + +370 +00:33:53,390 --> 00:34:04,030 +الدنيا؟ جيل ال Electrophoresis؟ + +371 +00:34:05,010 --> 00:34:07,150 +وانتش هاك عليها هي Electrophoresis فانا أقول اللي + +372 +00:34:07,150 --> 00:34:11,070 +هو paper هي Electrophoresis ال Electrophoresis + +373 +00:34:11,070 --> 00:34:15,850 +بتعرفوا ال principle تبعه بيصير عملية فصل between + +374 +00:34:15,850 --> 00:34:22,050 +different components بناء على ال charge where ال + +375 +00:34:22,050 --> 00:34:27,610 +size فهذا هو الفحص شايفين الفحص يا شباب؟ شايفين + +376 +00:34:27,610 --> 00:34:32,390 +الصورة؟ هذا normal هي عبارة عن multimers كل واحد + +377 +00:34:32,390 --> 00:34:38,380 +.. كل ..كل form أخد عياش band شايفين ال patient + +378 +00:34:38,380 --> 00:34:44,680 +قداش نص العياش ال normal نص العياش ال normal + +379 +00:34:44,680 --> 00:34:54,040 +بالتالي ممكن نشخص منه بسهولة treatment بيعاجوا + +380 +00:34:54,040 --> 00:35:00,720 +المرض هذا بال DDAVB خدناه المرض هذيك وقولنا هيش + +381 +00:35:00,720 --> 00:35:08,140 +بعملخدناك علاج لمرضى الهموفيليا مرضى الهموفيليا + +382 +00:35:08,140 --> 00:35:13,080 +عندهم نقص في factor 8 وقولنا الناس اللي عندهم mild + +383 +00:35:13,080 --> 00:35:19,860 +hemophilia نديهم DDABB عشان بالظبط بتطلع الفوم + +384 +00:35:19,860 --> 00:35:23,580 +الابرهن من مخالبه فبيزود الفوم الابرهن اللي بده + +385 +00:35:23,580 --> 00:35:27,980 +يحمي الكمية القليلة من factor 8 طيب يعني هو بيزود + +386 +00:35:27,980 --> 00:35:30,500 +الفوم الابرهن طب الأولى أنه نعالج مريض الفوم + +387 +00:35:30,500 --> 00:35:36,720 +الابرهن يا بابا، مجبوط؟فبالتالي هو علاج لل Vomit + +388 +00:35:36,720 --> 00:35:40,940 +Bread Cryoprecipitate هو بره ان احد ال blood + +389 +00:35:40,940 --> 00:35:46,140 +component بيحتوي على كمية كبيرة من ال Vomit Bread + +390 +00:35:46,140 --> 00:35:49,740 +فاكتور تمانية concentrate برضه احد ال plasma + +391 +00:35:49,740 --> 00:35:57,740 +component اللى بيحضر التحضير بالمصانع و بيحتوي + +392 +00:35:57,740 --> 00:36:02,830 +ايضا على كمية كبيرة من ايش؟ من ال Vomit Breadطبعا + +393 +00:36:02,830 --> 00:36:09,410 +شباب هذا ال treatment اول صفحتين و التالتين هذا و + +394 +00:36:09,410 --> 00:36:15,210 +هذا يعني كيفية العلاج + +395 +00:36:15,210 --> 00:36:20,510 +و ال protocol العلاج و كدهش لازم نعطي يعني مش كتير + +396 +00:36:20,510 --> 00:36:26,390 +احنا معانين فيه هنا بقى كمان من ال treatment هدا + +397 +00:36:26,390 --> 00:36:30,190 +هي ال platelet transfusion انك تعطي plateletمظبوط + +398 +00:36:30,190 --> 00:36:33,990 +واحد عنده كمية ال vom lipaland قليلة طب زود ال + +399 +00:36:33,990 --> 00:36:42,030 +platelet ماشي عشان ايه؟ عشان يتزود اللي هو ال vom + +400 +00:36:42,030 --> 00:36:45,450 +lipaland لإنه كمان ال platelet تحتوي على vom + +401 +00:36:45,450 --> 00:36:49,110 +lipaland طب ما cryoprecipitate قلنا عبارة عن هو + +402 +00:36:49,110 --> 00:36:52,930 +جزء من ال human plasma بيحتوي على كمية كبيرة من ال + +403 +00:36:52,930 --> 00:36:57,750 +factor 8 و vom lipaland و ال fresh frozen plasma + +404 +00:36:57,750 --> 00:37:02,820 +أيضابيحتوي على كمية كبيرة من ال phone و من الأبناء + +405 +00:37:02,820 --> 00:37:09,300 +احد عنده سؤال؟ + +406 +00:37:09,300 --> 00:37:14,520 +حد عنده سؤال يا شباب؟ + +407 +00:37:14,520 --> 00:37:20,740 +طيب نكمل في ال DIC DIC اختصار ل disseminate + +408 +00:37:20,740 --> 00:37:27,680 +Intravascular Chiropractic ماشي disseminated + +409 +00:37:27,680 --> 00:37:28,400 +Intravascular + +410 +00:37:34,790 --> 00:37:40,590 +Syndrome بيتميز بـ Systemic، إيش يعني Systemic؟ + +411 +00:37:40,590 --> 00:37:45,410 +يعني كل الجسم ده، ماشي، ينتشر Intravascular + +412 +00:37:45,410 --> 00:37:52,090 +Coagulopathy جلطات منتشرة في جميع أنحاء الجسم، + +413 +00:37:52,090 --> 00:37:59,420 +يعني بدايته إيش؟ جلطة، ماشي؟ and clotis the + +414 +00:37:59,420 --> 00:38:04,060 +initial event يعني الحدث الأولى هو إيش؟ هو cloth + +415 +00:38:04,060 --> 00:38:09,640 +formation ومعظم الوفيات بتعتمد على ال extent of + +416 +00:38:09,640 --> 00:38:13,460 +intramuscular thrombosis، جداش صار فيه جلطات + +417 +00:38:13,460 --> 00:38:21,440 +الوفيات بتعتمد على إيش؟ جداش صار فيه جلطات ومن + +418 +00:38:21,440 --> 00:38:26,100 +أسبابه .. أسبابه متعددة أسبابه متعددة عند حد أنا + +419 +00:38:26,100 --> 00:38:26,960 +تطرق إليها + +420 +00:38:31,540 --> 00:38:35,940 +بتنتشر بشكل سيستيميك في جميع حالة الجسم وحسب + +421 +00:38:35,940 --> 00:38:42,020 +انتشارها بتعتمد حالات الوفاية على انتشار هذه + +422 +00:38:42,020 --> 00:38:48,920 +الجبطات واسبابهم متعددة طيب acquired syndrome + +423 +00:38:48,920 --> 00:38:52,580 +بتتميز بـ Intravascular Coagulability وهذا اللي + +424 +00:38:52,580 --> 00:38:56,600 +تاوم حاكيناه كله مهم شوفوا كيف بتتم الحكاية شبكة + +425 +00:38:56,600 --> 00:39:00,180 +تنتبه عليها هي Systemic Activation of Coagulation + +426 +00:39:01,270 --> 00:39:07,450 +Cascade Systemic يعني إيش؟ في كلها، بالظبط، مش + +427 +00:39:07,450 --> 00:39:11,830 +localized هذا بيؤدي إلى Intravascular بعد فترة + +428 +00:39:11,830 --> 00:39:17,050 +بتضل تعمل جلطة، تعمل جلطة، تعمل جلطة المواد الخام + +429 +00:39:17,050 --> 00:39:24,510 +بتضل موجودة، بتخلص، صح؟ فبدأ جبل ما تخلص يصير فيه + +430 +00:39:24,510 --> 00:39:28,830 +Intravascular Deposition أو Fiber بتكون جلطات من + +431 +00:39:28,830 --> 00:39:34,400 +ال fiber في أماكن متعددةTroposis of a small and + +432 +00:39:34,400 --> 00:39:41,320 +mid-sized vessel وهو يؤدي إلى ischemia و organ + +433 +00:39:41,320 --> 00:39:47,360 +failure، مظبوط؟ لكن زي ما قلتلكوا، استمرار هذه + +434 +00:39:47,360 --> 00:39:54,840 +الحالة بيتطلب أو بيتبعوا أن المواد الخامة تخلص من + +435 +00:39:54,840 --> 00:39:58,520 +الـplatelet و ال coagulation factor وإذا خلصها، + +436 +00:39:58,520 --> 00:40:02,920 +يخش المريض في إيه؟ في bleedingوالاتنين بيعدوا + +437 +00:40:02,920 --> 00:40:06,640 +للوفاة + +438 +00:40:06,640 --> 00:40:13,700 +عيد تاني عيد تاني Systemic احنا بنقول + +439 +00:40:13,700 --> 00:40:18,440 +Indravascular Coagulopathy يعني في activation + +440 +00:40:18,440 --> 00:40:21,620 +الكواجوليشن كاسكيت ميكاليزم في مكان واحد في الجسم + +441 +00:40:21,620 --> 00:40:27,120 +لأ في أماكن مختلفة فمحصلة نشاط الكواجوليشن كاسكيت + +442 +00:40:27,120 --> 00:40:32,080 +تكوين vibrant يعني في جلطاتوإذا صار فيه fiber + +443 +00:40:32,080 --> 00:40:38,780 +information شو بيعمل؟ جلطات في ال small و ال mid + +444 +00:40:38,780 --> 00:40:45,740 +-sized vessel هذا بيؤدي ليش؟ 100% وإذا صار فيه + +445 +00:40:45,740 --> 00:40:50,520 +ischemia فيه war gall failure استمرت الحكاية هيؤدي + +446 +00:40:50,520 --> 00:40:54,000 +إلى depletion of coagulation factor and platelet + +447 +00:40:54,000 --> 00:40:59,780 +وهذا بيؤدي إلى bleeding وقلت ليه؟ طبعا سواء كانت + +448 +00:40:59,780 --> 00:41:06,100 +جلطةأو اسكيميا و الـ bleeding بسبب الوفاة + +449 +00:41:06,100 --> 00:41:09,820 +pathophysiology + +450 +00:41:09,820 --> 00:41:15,400 +of DIC كيف يتم ترتيب الميكانيزم كيف تم ترتيبه + +451 +00:41:15,400 --> 00:41:19,360 +قاليته activation of blood coagulation ما اتفقنا + +452 +00:41:19,360 --> 00:41:23,680 +ثم suppression of physiological anticoagulant كل + +453 +00:41:23,680 --> 00:41:27,760 +الـ anticoagulant في البداية مثل الـ unblocking + +454 +00:41:27,760 --> 00:41:33,020 +شغالالـ Coagulant الـ Pro-Coagulant يشغّل الـ Anti + +455 +00:41:33,020 --> 00:41:37,000 +-Coagulant و أقفل وهي بيصير فيه impairment للـ + +456 +00:41:37,000 --> 00:41:40,500 +Fibrolysis يعني مافيش إذهاب للجلطة أحنا قلنا الـ + +457 +00:41:40,500 --> 00:41:43,420 +Fibrolysis ممكن يعمل ال bleeding مافيش إذهاب + +458 +00:41:43,420 --> 00:41:48,700 +للجلطة لكن بيصير كمان cytokine release ونشوف واحد + +459 +00:41:48,700 --> 00:41:52,620 +واحد هذه العوامل شو بتأثر activation و blood + +460 +00:41:52,620 --> 00:41:56,280 +coagulation يا شباب كيف ممكن يصير blood + +461 +00:41:56,280 --> 00:41:56,920 +coagulation + +462 +00:41:59,650 --> 00:42:05,770 +Cascade Activation Tissue Factor صح؟ وهذه من أخطر + +463 +00:42:05,770 --> 00:42:11,790 +الحاجات اللي بتصير يا شباب انه مريض لسبب ما يصير + +464 +00:42:11,790 --> 00:42:16,250 +blood vessel injury و ب release of tissue factor و + +465 +00:42:16,250 --> 00:42:20,210 +إذا سوصل ال tissue factor للدم ممكن يسبح في الدم و + +466 +00:42:20,210 --> 00:42:25,210 +يصير يكون جلطات في مناطق مختلفة من الجسم صح؟ فهي + +467 +00:42:25,210 --> 00:42:31,330 +tissue factor factor 7AThrombin generation يتم + +468 +00:42:31,330 --> 00:42:39,490 +تنشيطه من خلال الـ Extrinsic pathway و بذكركم ان + +469 +00:42:39,490 --> 00:42:46,150 +ال Extrinsic is very short يعني لتنشيطه بسرعة طبعا + +470 +00:42:46,150 --> 00:42:50,330 +complex activate factor 9 و factor 10 و تشوف + +471 +00:42:54,770 --> 00:43:00,010 +of multiple places ماشي يعني موجود في أناق مختلفة + +472 +00:43:00,010 --> 00:43:04,330 +من الجسم في ال endothelial cells في ال monocytes و + +473 +00:43:04,330 --> 00:43:07,410 +extravasculary موجود في ال lung و ال kidney و ال + +474 +00:43:07,410 --> 00:43:10,230 +brain و ال endothelial cell ال epithelial cell + +475 +00:43:10,230 --> 00:43:15,870 +ماشي هذه مواقع حساسة و ال uterus مواقع حساسة + +476 +00:43:15,870 --> 00:43:20,730 +مليانة بالاش من ال tissue factorيبقى نمرا واحد + +477 +00:43:20,730 --> 00:43:23,590 +activation of blood coagulation through tissue + +478 +00:43:23,590 --> 00:43:27,430 +factor release اللي بيكون extra vascular و بتقوم + +479 +00:43:27,430 --> 00:43:30,890 +يصلل ال vascular يعني بيصلل الدم و هناك بيكوّنش + +480 +00:43:30,890 --> 00:43:36,690 +ثرمبين formation و fibrin formation نمرا اتنين + +481 +00:43:36,690 --> 00:43:40,050 +عشان بقى بيصير فيه suppression لل anticoagulant + +482 +00:43:40,050 --> 00:43:46,690 +ARM و بيتمثل ب reduction of antithrombin 13 + +483 +00:43:49,380 --> 00:43:52,540 +كمان بيصير فيه impairment ل protein C و protein S + +484 +00:43:52,540 --> 00:43:57,880 +و هدولة كلنا بنعرفهم صح؟ و برضه مش تلك + +485 +00:43:57,880 --> 00:44:02,620 +anticoagulant طبعا، ثم بيصير فيه insufficient of + +486 +00:44:02,620 --> 00:44:04,640 +regulation of tissue factor pathway inhibitor + +487 +00:44:04,640 --> 00:44:12,420 +وبالتالي بيصير فيه inhibition لالـ 107 يعني، + +488 +00:44:12,420 --> 00:44:12,840 +عارفين؟ + +489 +00:44:15,550 --> 00:44:19,450 +التانية Impermental Fibrinolysis، مين بيعمل + +490 +00:44:19,450 --> 00:44:27,250 +Fibrinolysis؟ Plasminogen activator بيعمل + +491 +00:44:27,250 --> 00:44:32,730 +Fibrinolysis طيب لو نشطنا ال inhibitor بتاع ال + +492 +00:44:32,730 --> 00:44:36,830 +activator، شو بنعمل؟ 100%، 100%، هذا اللي بيصير، + +493 +00:44:36,830 --> 00:44:40,970 +Impermental FibrinolysisCytokine release شباب برضه + +494 +00:44:40,970 --> 00:44:45,750 +ميكانيزم رابع نجوا انه في حالات ال DIC بيصير فيه + +495 +00:44:45,750 --> 00:44:50,410 +secretion ل certain cytokines منها ال I L6 و I L1 + +496 +00:44:50,410 --> 00:44:57,430 +و tumor necrosis factor و هذا بيعمل dysregulation + +497 +00:44:57,430 --> 00:45:03,150 +لل anticoagulant pathway و ي modulate I L6 I L10 + +498 +00:45:03,150 --> 00:45:07,610 +كمان modulated activation coagulation factorأو + +499 +00:45:07,610 --> 00:45:10,630 +coagulation cascade وبالتالي فى علاقة مباشرة جدا + +500 +00:45:10,630 --> 00:45:14,310 +ما بين ال coagulation والinflammation أو ال + +501 +00:45:14,310 --> 00:45:20,950 +inflammatory process طيب كيف نشخص مريض سرير عنده + +502 +00:45:20,950 --> 00:45:26,450 +DIC مريض سرير عنده DIC ال brothers of disease + +503 +00:45:26,450 --> 00:45:34,530 +associated with DIC ده نشوف ان الطلاع على مظاهر + +504 +00:45:34,530 --> 00:45:41,000 +مرضية بتكونموجودة appropriate clinical setting + +505 +00:45:41,000 --> 00:45:46,040 +لنشوف إذا كان في أي evidence of thrombosis أو + +506 +00:45:46,040 --> 00:45:50,280 +hemorrhage لإنه مرض من هالنوع يبدأ سال بجلطة + +507 +00:45:50,280 --> 00:45:55,960 +وينتهي فيه ب bleeding laboratory studies no single + +508 +00:45:55,960 --> 00:46:00,020 +test is accurate serial tests are more helpful + +509 +00:46:00,020 --> 00:46:06,140 +than single test يجب أن تعمل مجموعة من الفعصاتومن + +510 +00:46:06,140 --> 00:46:11,440 +أسبابه يا شباب شوفوا أسبابه إيش مالك نانسي release + +511 +00:46:11,440 --> 00:46:16,380 +of tissue factor cardiovascular hypothermia or + +512 +00:46:16,380 --> 00:46:19,760 +hyperthermia الناس اللي متعرضة لدرجة حرارة واطية + +513 +00:46:19,760 --> 00:46:24,300 +أو عالية المنري acute respiratory distress + +514 +00:46:24,300 --> 00:46:30,780 +syndrome أسدوسيز وانكسيا collagen vascular disease + +515 +00:46:30,780 --> 00:46:32,260 +وأنفلاك + +516 +00:46:39,800 --> 00:46:43,720 +كمان من الأسباب اللى بتؤدي الى DIC infectious + +517 +00:46:43,720 --> 00:46:47,180 +agent زى الـSepticemia سواء كان بكتيريا أو الفيلر + +518 +00:46:47,180 --> 00:46:52,400 +أو الفنجر ثم Intravascular Hemolysis لإنه بطلع + +519 +00:46:52,400 --> 00:47:00,940 +تشوفاكتر ثم acute liver disease و تشوفاكتر و + +520 +00:47:00,940 --> 00:47:05,540 +Obstetric من المحطات المهمة جدا يا شباب هي محطة + +521 +00:47:05,540 --> 00:47:11,140 +الولادة لإنه في الولادة بإمكانية انهيصير فيه + +522 +00:47:11,140 --> 00:47:19,320 +tissue factor release عالية جدا جدا جدا طبعا سيبقى + +523 +00:47:19,320 --> 00:47:26,080 +بهذه ال symptoms المختلفة الجسم بالإنسان يعانى من + +524 +00:47:26,080 --> 00:47:29,500 +different symptoms ممكن نشوفها في ال skin في ال + +525 +00:47:29,500 --> 00:47:33,380 +central nervous system في الكني في ال heart في + +526 +00:47:33,380 --> 00:47:40,020 +البلمونريفي كل الحالات المرضية طبعا محصلتها في + +527 +00:47:40,020 --> 00:47:45,880 +البداية ال كيميا لإنه فيه thrombosis ثم فيه + +528 +00:47:45,880 --> 00:47:51,280 +bleeding عشان فعلى سبيل المثال ال skin أول شي + +529 +00:47:51,280 --> 00:47:56,900 +بيصير فيها peritrophalminas و جانجرين لإنه فيه + +530 +00:47:56,900 --> 00:48:03,140 +جلطان ثم بعد ذلك بنشوف في بتاشي و ecomosis لإنه + +531 +00:48:03,140 --> 00:48:08,250 +صار فيه bleedingفاهمين عليها؟ فمريض ال DIC بيمر في + +532 +00:48:08,250 --> 00:48:14,250 +عدة مراحل حسب المرحلة بيصير فيه symptoms وعندنا + +533 +00:48:14,250 --> 00:48:20,610 +حسب المرحلة بتشوف نتيجة فحص لإن في كل مرحلة في فحص + +534 +00:48:20,610 --> 00:48:27,310 +مختلف في نتيجة مختلفة وحسب المرحلة بيتعالج ماشي؟ + +535 +00:48:27,310 --> 00:48:30,410 +يعني مريض مثلا قاعد بيعمل جلطة منه بيقول انت .. + +536 +00:48:30,410 --> 00:48:33,550 +انت .. كواجولة لكن مريض خاش في bleeding + +537 +00:48:37,810 --> 00:48:44,090 +بالتالي حسب المرحلة بتتأثر تطلع نتيجة الفحص و إيش + +538 +00:48:44,090 --> 00:48:51,410 +و أولاشي ال symptoms ثم نتيجة الفحص ثم العلاج و + +539 +00:48:51,410 --> 00:48:54,750 +هذه مظاهر مرضية شايفين الصور؟ برنامج دي بصور لاحقش + +540 +00:48:54,750 --> 00:48:58,950 +أنا في bleeding و بتاشي بتاشي و أكموزة شايفين + +541 +00:48:58,950 --> 00:49:03,740 +بتاشي و أكموزة؟ هذه مرحلة متقدمةهى gangrenous + +542 +00:49:03,740 --> 00:49:10,640 +شايفين ان صار فى microvessels from pie وبالتالى + +543 +00:49:10,640 --> 00:49:17,780 +قدت إلى ان بيصير فى عنده gangrenous طبعا + +544 +00:49:17,780 --> 00:49:21,920 +كمان من الحاجات اللى بيصير ان ال blood film لو + +545 +00:49:21,920 --> 00:49:24,820 +عملت blood film يا شباب بتلاقي microscopic finding + +546 +00:49:24,820 --> 00:49:26,560 +schistocytes + +547 +00:49:29,720 --> 00:49:33,060 +fragments،شيستيو سايز، pausty أو platelet، إيش + +548 +00:49:33,060 --> 00:49:41,440 +يعني pausty؟ قلة في ال platelet، وفي حاجات قولنا + +549 +00:49:41,440 --> 00:49:45,170 +ال single test is not enoughمنها D-Diamar, + +550 +00:49:45,290 --> 00:49:49,870 +Antithrombin 3, Fibrin monoxide A وB, FTB, + +551 +00:49:50,030 --> 00:49:53,570 +Platelet count, Protamine test، كل عبارة عن فحوصات + +552 +00:49:53,570 --> 00:50:05,490 +مختلفة بتطلب وبالتالي بتشخص حسب نتيجة الفحوصة، + +553 +00:50:05,490 --> 00:50:10,310 +بنعرف في أي مرحلة المريض وصل، في أي مرحلة المريض + +554 +00:50:10,310 --> 00:50:16,790 +وصلفممكن يحصل عنده thrombocytopenia ممكن يحصل فيه + +555 +00:50:16,790 --> 00:50:20,650 +prolongation في الـBT وBTT طبعا هو كله استهلك + +556 +00:50:20,650 --> 00:50:27,630 +مظبوط ممكن نشوف FDB أو D-Diamond نشوف low level of + +557 +00:50:27,630 --> 00:50:31,030 +anticoagulant زي الـ Antithrombin 3 أو البروتين C + +558 +00:50:31,030 --> 00:50:35,970 +ولو فحصنا العوامل المختلفة خمسة أو تمانية أو تسعة + +559 +00:50:35,970 --> 00:50:42,040 +كلها ملاجئة واطيةوالـ Fibrinogen الوحيد اللي هي لا + +560 +00:50:42,040 --> 00:50:46,300 +يفيد في التشخيص لأن كميته كبيرة واستهلاكه بيطول + +561 +00:50:46,300 --> 00:50:49,540 +وبالتالي + +562 +00:50:49,540 --> 00:50:55,460 +مفدش في التشخيص differential dialysis ضروري جدا + +563 +00:50:55,460 --> 00:51:00,240 +عشان تشخص DIC تتفرج بين ال DIC و العمرض الأهلي زي + +564 +00:51:00,240 --> 00:51:05,060 +ال severe liver failure شو الصفة المشتركة بينه و + +565 +00:51:05,060 --> 00:51:06,100 +بين ال DIC؟ + +566 +00:51:09,640 --> 00:51:13,380 +بالظبط ال liver اللى بيصنع coagulation فى ال liver + +567 +00:51:13,380 --> 00:51:15,640 +الفيريالى يعني مافيش coagulation فاكترواها مافيش + +568 +00:51:15,640 --> 00:51:20,680 +coagulation وقالت من K deficiency تدينا نفس الصورة + +569 +00:51:20,680 --> 00:51:27,040 +liver disease TTP و اللى بيصير فيها جلطات متكررة + +570 +00:51:27,040 --> 00:51:32,080 +ثم congenital abnormalities of fibrin and help + +571 +00:51:32,080 --> 00:51:37,290 +syndrome اللى هو hereditary elevated livertest + +572 +00:51:37,290 --> 00:51:43,690 +with low platelet help اختصارنا ايه يا شباب + +573 +00:51:43,690 --> 00:51:48,770 +hereditary elevation + +574 +00:51:48,770 --> 00:51:58,130 +of liver test with + +575 +00:51:58,130 --> 00:52:03,630 +low platelet علاج + +576 +00:52:03,630 --> 00:52:04,150 +طبعا + +577 +00:52:06,870 --> 00:52:10,930 +أول شي stop the triggering factor طبعا ال + +578 +00:52:10,930 --> 00:52:14,870 +triggering factor بدك توقفه في كتير من الأحيان + +579 +00:52:14,870 --> 00:52:21,730 +عملية التوقيف تكون صعبة مالا تكون اقوى the only + +580 +00:52:21,730 --> 00:52:26,610 +proof in the treatment أنك توقف مين تسبب ال VIC + +581 +00:52:26,610 --> 00:52:31,950 +بعدين تدي support in the treatment طبعا support in + +582 +00:52:31,950 --> 00:52:32,450 +the treatment + +583 +00:52:38,010 --> 00:52:43,530 +ليش؟ في بداية المرحلة؟ في البداية اه في بداية اللي + +584 +00:52:43,530 --> 00:52:46,950 +داكه اه ممكن ال non specific treatment is + +585 +00:52:46,950 --> 00:52:51,230 +available حسب المرحلة اللي انا بقوله ممكن تعطي + +586 +00:52:51,230 --> 00:52:55,890 +plasma و platelet substitution ممكن تعطي + +587 +00:52:55,890 --> 00:53:00,050 +anticoagulant إذا كان في البداية ممكن تعطي + +588 +00:53:00,050 --> 00:53:06,110 +inhibitors إذا كان في البداية وكذاطبعا هذه ال + +589 +00:53:06,110 --> 00:53:15,130 +plasma therapy نديها وقتيش إذا في bleeding ماشي + +590 +00:53:15,130 --> 00:53:19,710 +fresh frozen plasma برضه بتنعطف في حالات نقص بعض + +591 +00:53:19,710 --> 00:53:24,010 +العوامل platelet therapy إذا ال platelet كانت وطيط + +592 +00:53:24,010 --> 00:53:29,990 +ماشي و blood إذا صار عند المريض أنهيميا or disease + +593 +00:53:29,990 --> 00:53:34,840 +destructionQuagination Inhibitors، Antithrombin 3 + +594 +00:53:34,840 --> 00:53:37,240 +و Protein C و Tissue Factor Pathway Inhibitor و + +595 +00:53:37,240 --> 00:53:43,080 +Heparin طبعا لأنه بيستهلكوا كمان بيعطوه و هذا + +596 +00:53:43,080 --> 00:53:48,180 +العلاج أنا مش راح أخد فيه بالتفصيل Anti + +597 +00:53:48,180 --> 00:53:55,880 +Fibrillatic زي ما قال برضه بتنعطى في دي دايات الحد + +598 +00:53:55,880 --> 00:53:59,200 +اللي مش عارف الشغل، طيب no question يعني؟ + diff --git a/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/cYsH46grQoA_raw.srt b/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/cYsH46grQoA_raw.srt new file mode 100644 index 0000000000000000000000000000000000000000..a0ef7618e2c4e53d3f7a123b81b6d87b855a78ee --- /dev/null +++ b/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/cYsH46grQoA_raw.srt @@ -0,0 +1,2396 @@ +1 +00:00:20,840 --> 00:00:28,260 +بسم الله الرحمن الرحيم اليوم ان شاء الله هنكمل في + +2 +00:00:28,260 --> 00:00:32,520 +ال hereditary bleeding disorders و هنحكي في ال vom + +3 +00:00:32,520 --> 00:00:37,520 +libra disease و هنتطرق لل vom libra disease من عدة + +4 +00:00:37,520 --> 00:00:42,200 +زوايا بداية هنمسك ال factor و هو vom libra factor + +5 +00:00:42,200 --> 00:00:46,340 +من ناحية ال structure و location و function ثم + +6 +00:00:46,340 --> 00:00:47,880 +هنمسك ال disease + +7 +00:00:53,480 --> 00:00:59,380 +and diagnosis و treatment بداية + +8 +00:00:59,380 --> 00:01:04,320 +vomolibran disease هو عبارة عن مرض يعتبر family + +9 +00:01:04,320 --> 00:01:08,260 +bleeding disorders ومن اسمه vomolibran disease + +10 +00:01:08,260 --> 00:01:12,360 +بيعني ان vomolibran factor اما absent او + +11 +00:01:12,360 --> 00:01:17,860 +dysfunctional اما absent او dysfunctional بروتين + +12 +00:01:17,860 --> 00:01:26,770 +زي اي بروتين لصنعفي الجسم من خلال ال gene ال gene + +13 +00:01:26,770 --> 00:01:33,870 +تبعه موجود على شفر تقرم و في كرموزوم 12 و بيصير له + +14 +00:01:33,870 --> 00:01:37,730 +expression لهذا ال gene في مكانين ال endothelial + +15 +00:01:37,730 --> 00:01:40,450 +cell of ال megakaryocyte و احنا عارفين ان ال + +16 +00:01:40,450 --> 00:01:44,830 +endothelial cell يتصنع بعض ال gram factor و تتخزن + +17 +00:01:45,280 --> 00:01:51,880 +ثم ال mega-karyocytes برضه بتصنع اللي هو إيش ال + +18 +00:01:51,880 --> 00:01:57,020 +vomiting brain factor بيصنع الفاكتور as + +19 +00:01:57,020 --> 00:02:02,200 +preliminary structures ثم بيتطور إلى modified + +20 +00:02:02,200 --> 00:02:07,440 +structure من خلال عمليات كيميائية تتضمن بلمرة + +21 +00:02:08,210 --> 00:02:11,830 +بيتحول فيها الموليكيول من Monomer إلى Polymer + +22 +00:02:11,830 --> 00:02:16,890 +وMonomer وDiamer وTrimer وPolymer وبالتالي بنشوف + +23 +00:02:16,890 --> 00:02:21,550 +عدة أشكال من مين؟ من ال Multimer او من ال Homo + +24 +00:02:21,550 --> 00:02:26,370 +-Germanic يتصدق + +25 +00:02:26,370 --> 00:02:33,110 +في النهاية الموليكيول وعلى سطحه او يحتوي على مواقع + +26 +00:02:33,110 --> 00:02:40,670 +حامة منهاارتباطه موقع ارتباطه بال factor 80 ثم two + +27 +00:02:40,670 --> 00:02:43,350 +receptors بيرتبطوا + +28 +00:02:49,070 --> 00:02:54,310 +ثم one receptors على سطح ال collagen اللى هو ال + +29 +00:02:54,310 --> 00:03:00,170 +sub endothelial layer فبكون + +30 +00:03:00,170 --> 00:03:04,910 +على سطحه binding sites مهمة تتضمن هذه ال binding + +31 +00:03:04,910 --> 00:03:05,270 +sites + +32 +00:03:09,350 --> 00:03:11,990 +هنا ان ال production of homo-endothelial factor + +33 +00:03:11,990 --> 00:03:15,790 +يتم في الاندوثيليا السبب في ال mega-chemicides و + +34 +00:03:15,790 --> 00:03:21,350 +most of the factor is usually synthesized in a + +35 +00:03:21,350 --> 00:03:27,010 +secreted form بمعنى الاجيفير في البلازمة and + +36 +00:03:27,010 --> 00:03:34,210 +stored in endothelial في الويبال بلدي placesوفي + +37 +00:03:34,210 --> 00:03:38,270 +الـ Alpha Granules of Platelets يبقى مواقع تخزينه + +38 +00:03:38,270 --> 00:03:42,230 +عارفينها هي الـ Whipper Palatinate والـ Alpha + +39 +00:03:42,230 --> 00:03:46,130 +Granules of Platelets ومواقع إنتاجه هي الـ + +40 +00:03:46,130 --> 00:03:52,290 +Endothelial Cellular Megakaryocyte بيستلزم + +41 +00:03:52,290 --> 00:03:59,350 +خروج الـVolvule Brand من مخازنه Inducer يستلزم + +42 +00:03:59,350 --> 00:04:04,260 +مخرج أو خروج الـVolvule Brandfactor من مخازنه + +43 +00:04:04,260 --> 00:04:12,540 +certain inducer طبعا أولا ب stimulus فإذا + +44 +00:04:12,540 --> 00:04:16,480 +بدنا نطلعه من ال endothelial cells mostly ال + +45 +00:04:16,480 --> 00:04:20,120 +stimulus اللى بيستخدم هو ال thrombin و الهستمين و + +46 +00:04:20,120 --> 00:04:25,540 +ال fibrin و ال complement من ال C5 ل اللى هم + +47 +00:04:25,540 --> 00:04:30,930 +بيسمونه membrane attack complex ل C9وإذا كان بدنا + +48 +00:04:30,930 --> 00:04:36,190 +نطلعه من الـ platelet نستخدم ثرومبين و ADD وكلاجي + +49 +00:04:36,190 --> 00:04:40,330 +طبعا + +50 +00:04:40,330 --> 00:04:44,270 +ال adhesion كل كواضعي في ال family brand حكينا + +51 +00:04:44,270 --> 00:04:48,230 +فيها أكتر من مرة وقولنا بيلعب دور كبير it has a + +52 +00:04:48,230 --> 00:04:51,550 +dual role بيلعب دور كبير في ال primary hemostasis + +53 +00:04:51,550 --> 00:04:54,930 +وفي ال secondary hemostasis في ال primary بيعمل + +54 +00:04:54,930 --> 00:05:00,110 +adhesion و secondary aggregationو في ال secondary + +55 +00:05:00,110 --> 00:05:09,050 +hemostasis بيعمل 100% بس ال factor .. اه انا في ال + +56 +00:05:09,050 --> 00:05:13,990 +primary ماشي aggregation primary مافهوش بقى هو + +57 +00:05:13,990 --> 00:05:17,030 +اللي دور في ال primary و اللي دور فيهاش في ال + +58 +00:05:17,030 --> 00:05:20,310 +secondary و عارفين ال function او ال mechanism of + +59 +00:05:20,310 --> 00:05:23,250 +adhesion و ال mechanism + +60 +00:05:26,970 --> 00:05:31,870 +هو ال factor تمانية بقعد vom Lebrun بقعد factor + +61 +00:05:31,870 --> 00:05:37,970 +تمانية طيب نيجي للمرض نيجي لإيش للمرض وال vom + +62 +00:05:37,970 --> 00:05:43,130 +Lebrun disease أول من حكى فيه هو Eric vom Lebrun + +63 +00:05:43,130 --> 00:05:47,530 +في 1930 وسمى + +64 +00:05:47,530 --> 00:05:52,410 +فيه حينهhereditary pseudohemophilia hereditary + +65 +00:05:52,410 --> 00:05:58,230 +pseudohemophilia وكانت التسمية ناتجة عن الأسباب + +66 +00:05:58,230 --> 00:06:02,490 +الآتية أنه أول finding لجها لجى فيه prolongation + +67 +00:06:02,490 --> 00:06:05,970 +في ال bleeding time ماشي لكن ال platelet count + +68 +00:06:05,970 --> 00:06:12,110 +normal ال platelet count normal مش راكبة صح فلمرة + +69 +00:06:12,110 --> 00:06:18,270 +اتنين لجى فيه myocosal bleedingنبقى تلاتة لجأة أن + +70 +00:06:18,270 --> 00:06:21,250 +المرض مصير بالجهتين، male و female، both sexes are + +71 +00:06:21,250 --> 00:06:26,250 +affected في الهموفيليا، نعرف أن ال male هو ال + +72 +00:06:26,250 --> 00:06:31,110 +affected و ال female هي كارية، وبالتالي هذا لا هو + +73 +00:06:31,110 --> 00:06:36,550 +هموفيليا ولا هو أياش bleeding disorder عادي تأثرت + +74 +00:06:36,550 --> 00:06:40,740 +من خلال ال treatmentطيب، شو اللي صار؟ قالوا في + +75 +00:06:40,740 --> 00:06:44,780 +1950، طبعا هذا الكلام تطلع more investigation، ففي + +76 +00:06:44,780 --> 00:06:51,160 +1950 leading time اكتشفه is associated with factor + +77 +00:06:51,160 --> 00:06:56,780 +8 deficiency، ماشي؟ طبعا في ال homely brand هذا + +78 +00:06:56,780 --> 00:07:00,480 +الكلام موجود؟ أه موجود، قالوا factor 8 يبقى قل .. + +79 +00:07:00,480 --> 00:07:05,390 +100% يبقى قل إذا كان فيش homely brandنمرأة في الـ + +80 +00:07:05,390 --> 00:07:10,890 +1977 اكتشفوا الـ vomliebrand factor في التمانين + +81 +00:07:10,890 --> 00:07:20,570 +عملوا gene cloning للـ vomliebrand factor ال + +82 +00:07:20,570 --> 00:07:25,150 +incidents و ال frequency كانوا نسبته بشكل عام هي 1 + +83 +00:07:25,150 --> 00:07:31,710 +% من ال population 1% من ال population لكن المرضى + +84 +00:07:31,710 --> 00:07:39,190 +ب ..يعني they manifest different or variable + +85 +00:07:39,190 --> 00:07:45,450 +degree of symptoms بنلاقي في مرضى كتير بتعانيش من + +86 +00:07:45,450 --> 00:07:50,590 +إشي و مرضى كتير بتعاني ماشي ففي variation فيه في + +87 +00:07:50,590 --> 00:07:56,450 +ال symptoms between disease between مرضىلأن هو + +88 +00:07:56,450 --> 00:08:01,950 +patient وبالتالي هذا يعني أن المرض بيحمل أكتر من + +89 +00:08:01,950 --> 00:08:06,030 +نوعه ماشي؟ مادام في variation في نفس في ال + +90 +00:08:06,030 --> 00:08:09,370 +symptoms لنفس المرض مانتوا فيه أكتر من إياش من + +91 +00:08:09,370 --> 00:08:12,970 +type من هذا المرض يعني ال clinically significant + +92 +00:08:12,970 --> 00:08:18,030 +form of Lebron disease نسبة الإصابة فيه 125 شخص في + +93 +00:08:18,030 --> 00:08:25,380 +المليون في كل مليونوالـ severe نص الى خمسة في + +94 +00:08:25,380 --> 00:08:33,080 +المية من لكل مليون مصامير طبعا بقتل زومة ال + +95 +00:08:33,080 --> 00:08:37,880 +inheritance pattern بتلاقي ال male و ال female are + +96 +00:08:37,880 --> 00:08:42,420 +affected equally ماشي؟ + +97 +00:08:44,440 --> 00:08:48,280 +لأن إجوا يعملوا classification للمرض و جالوا ال + +98 +00:08:48,280 --> 00:08:51,980 +classification بدناها تعتمد على either the disease + +99 +00:08:51,980 --> 00:08:56,200 +is quantitative or qualitative هل هو quantitative + +100 +00:08:56,200 --> 00:09:05,380 +ولا إياش ولا qualitative طبعا هذا بشكل أساسيوطبعا + +101 +00:09:05,380 --> 00:09:09,280 +كلنا بنعرف دور volvomibrain factor بالنسبة الى + +102 +00:09:09,280 --> 00:09:14,280 +factor 8 وبالتالي كمان أخدوا هذا الكلام بعين + +103 +00:09:14,280 --> 00:09:19,800 +الاعتبار مما أدى الى انه يعملوا classification لل + +104 +00:09:19,800 --> 00:09:25,420 +disease الى تلت أنواع أساسية تلت أنواع أساسية + +105 +00:09:25,420 --> 00:09:30,100 +وببساطة عشان تحفظوها الشباب هدا هي أكتر حفظوها كده + +106 +00:09:30,100 --> 00:09:32,460 +اتى لمرة واحد وتلاتة + +107 +00:09:35,960 --> 00:09:44,960 +نمرا واحد و تلاتة are quantitative نمرا اتنين is + +108 +00:09:44,960 --> 00:09:49,800 +qualitative نمرا + +109 +00:09:49,800 --> 00:09:59,460 +واحد is mild to moderate and + +110 +00:09:59,460 --> 00:10:08,090 +most common70% من المرضى من type 1 بينما type 3 is + +111 +00:10:08,090 --> 00:10:13,510 +severe and + +112 +00:10:13,510 --> 00:10:17,330 +real + +113 +00:10:17,330 --> 00:10:23,050 +لأنه بشكل حوالي 5% type + +114 +00:10:23,050 --> 00:10:31,530 +2 في منه أنواع مختلفة حسب ال mutationو بيشكل ربع + +115 +00:10:31,530 --> 00:10:37,030 +الحالات بيشكل ربع الحالات لحوالي خمس أنواع مختلفة + +116 +00:10:37,030 --> 00:10:44,150 +هذه التقسيمة الأساسية لكن قالوا انه في حاجة جديدة + +117 +00:10:44,150 --> 00:10:48,170 +اكتشفوها حوالي 300 الحالات سجلة حاجة بيسموها + +118 +00:10:48,170 --> 00:10:54,430 +Acquired Bomb Lebron Factory و ان شاء الله هنحكي + +119 +00:10:54,430 --> 00:10:58,590 +فيه بالتفصيل يعني النوع الرابع هو عبارة عن أياش + +120 +00:10:59,200 --> 00:11:06,260 +Acquired Vulnerable Diseases الـ + +121 +00:11:06,260 --> 00:11:09,540 +Clinical Manifestation شباب زي ما قلتلكوا ال + +122 +00:11:09,540 --> 00:11:15,320 +symptoms تختلف from mild and manageable symptoms + +123 +00:11:15,320 --> 00:11:19,100 +لسevere وunmanageable symptoms ال symptoms + +124 +00:11:28,730 --> 00:11:34,550 +هي مختلفة من الـ mild and manageable bleeding + +125 +00:11:34,550 --> 00:11:40,050 +disorders إلى الـ very severe and unmanageable + +126 +00:11:40,050 --> 00:11:49,270 +hemorrhage ومن التالي حسب نوع المرض طبعا طيب اتنين + +127 +00:11:49,270 --> 00:11:54,730 +و تلاتة ال bleeding episodes فيهم are life + +128 +00:11:54,730 --> 00:11:55,190 +threatening + +129 +00:11:58,700 --> 00:12:02,820 +كمان ال .. المرض ممكن تشوفه في ال .. في ال female + +130 +00:12:02,820 --> 00:12:10,320 +أكثر ضروة لإن ال female بتتعرض ل risk factor شهري + +131 +00:12:10,320 --> 00:12:17,020 +صح؟ هو ال bleeding أو ال menorrhea كمان + +132 +00:12:17,020 --> 00:12:20,860 +ال bleeding ممكن يزيد إذا كان المريض بياخد aspirin + +133 +00:12:20,860 --> 00:12:24,320 +لإنه في هذا الحالة بنعطل .. احنا البلد اللي تمعطلة + +134 +00:12:24,320 --> 00:12:33,230 +و ..بنزيد تعطيلها الا انهم من محاسن الصداف ان هذا + +135 +00:12:33,230 --> 00:12:38,270 +المرض severity of symptoms are modified are + +136 +00:12:38,270 --> 00:12:44,710 +decreased by age ويجالوا + +137 +00:12:44,710 --> 00:12:50,590 +انه بتبدأ مع العمر تزيد تركيز ال vomilarin factor + +138 +00:12:50,590 --> 00:12:54,820 +which is نعمة ويعني هذه نعمة من نعم الخالق انههذه + +139 +00:12:54,820 --> 00:12:58,560 +ال-vomelogram factor حتى عند المرضى تبدأ تزيد شوية + +140 +00:12:58,560 --> 00:13:06,540 +شوية كل ما الإنسان كبر كل ما الإنسان كبر كمان + +141 +00:13:06,540 --> 00:13:11,880 +هذه كلها symptoms للمرض في حوالي 60% من ال + +142 +00:13:11,880 --> 00:13:16,760 +symptoms بيعاني منها المريض الـpromptivist axis و + +143 +00:13:16,760 --> 00:13:27,800 +40% easy bruising و hematoma 35%منوراجيا ماشي يعني + +144 +00:13:27,800 --> 00:13:36,680 +هذه هي ال symptoms أخدت نسب مختلفة عند المرضى الـ + +145 +00:13:36,680 --> 00:13:40,660 +GI bleeding و ال dental extraction و ال trauma و + +146 +00:13:40,660 --> 00:13:43,940 +ال postpartum و postoperative تعرفوا المصطلحات هذه + +147 +00:13:43,940 --> 00:13:50,600 +كلها مظبوط postpartum نزيف ما بعد الولادة + +148 +00:13:50,600 --> 00:13:53,100 +postoperative + +149 +00:13:59,160 --> 00:14:05,820 +ماشي يا شباب؟ طيب، خلّيني أشوف الأنواع المختلفة + +150 +00:14:05,820 --> 00:14:08,080 +للـVulnerable Heart Disease وبنبدأ بالنوع الأول + +151 +00:14:08,080 --> 00:14:13,360 +وهو الـType I واتفقنا على أنه mild to moderate and + +152 +00:14:13,360 --> 00:14:21,460 +it's a quantitative disorder وبالتالي + +153 +00:14:21,460 --> 00:14:23,100 +الـVulnerable Heart Disorder بقى فاكتور could + +154 +00:14:23,100 --> 00:14:28,410 +functioning normally عند المرضى هدولusually + +155 +00:14:28,410 --> 00:14:35,230 +inherited in autosomal dominant manner و + +156 +00:14:35,230 --> 00:14:40,370 +الإصابة في العائلة الواحدة الواحدة قد تختلف + +157 +00:14:40,370 --> 00:14:45,570 +dramatically يعني + +158 +00:14:45,570 --> 00:14:51,910 +ممكن تلاقي واحد إصابته خفيفة و ال symptoms عنده + +159 +00:14:51,910 --> 00:14:57,970 +خفيفة و التاني ممكن يكون ال symptoms عندهم عيشبنفس + +160 +00:14:57,970 --> 00:15:04,850 +العيلة يعني type 2 قسموه إلى خمس أنواع حسب ال + +161 +00:15:04,850 --> 00:15:12,030 +mutation و تأثيرها على ال factor ال volume + +162 +00:15:12,030 --> 00:15:20,010 +equilibrium factor type 2 قسموه + +163 +00:15:20,010 --> 00:15:26,260 +إلى خمس أنواع و حسب تأثيرها على ال factorفيقسموه + +164 +00:15:26,260 --> 00:15:32,760 +إلى type 2A و B و C و M و N خمس أنواع مختلفة + +165 +00:15:32,760 --> 00:15:37,780 +معظمها بيتأثر + +166 +00:15:37,780 --> 00:15:43,840 +بيختلف فيها صفات ال multimars formed from one to + +167 +00:15:43,840 --> 00:15:49,680 +one يعني على سبيل المثال في type 2A بنلاقي فيه نقص + +168 +00:15:49,680 --> 00:15:53,500 +في ال high and intermediate multimars إيش ال + +169 +00:15:53,500 --> 00:15:57,800 +multimars ده شباب؟مش قولنا ال volume of the brand + +170 +00:15:57,800 --> 00:16:06,440 +factor لما بتصنع بياخد عدة أشكال ماشي بيبدأ بتصنع + +171 +00:16:06,440 --> 00:16:10,000 +ب .. ب .. ب .. ب .. preliminary structure ثم + +172 +00:16:10,000 --> 00:16:15,240 +بيصلوا modification فبنشوف عدة أشكال من مين من ال + +173 +00:16:15,240 --> 00:16:18,300 +volume of the brand factor منها ال monomer و منها + +174 +00:16:18,300 --> 00:16:21,880 +ال diameter و منها ال trimer ماشي و منها ال + +175 +00:16:21,880 --> 00:16:27,300 +multimers multi يعني ايه؟ عدة مية فينالنوع هذا، + +176 +00:16:27,300 --> 00:16:30,560 +بنلاقي نوع الـ Multimers غايب، نوع إياش الـ + +177 +00:16:30,560 --> 00:16:35,040 +Multimers الـ High or Intermediate Multimers is + +178 +00:16:35,040 --> 00:16:43,120 +absent الـ Type 2B، اللي هو فيه عنده صفة مختلفة، + +179 +00:16:43,120 --> 00:16:46,780 +إنه والله ال Multimers بترتبط في الـ Platelet + +180 +00:16:46,780 --> 00:16:51,220 +Excessively، يعني عندها High Affinity لمين؟ + +181 +00:17:12,240 --> 00:17:19,220 +Type IIc ريسسف و الهي مالتيمار فورم is reduced لكن + +182 +00:17:19,220 --> 00:17:24,000 +ال multimars are qualitatively abnormal + +183 +00:17:27,060 --> 00:17:35,880 +2M تقل قدرته على الارتباط بـ Factor 8 2M تقل قدرته + +184 +00:17:35,880 --> 00:17:36,800 +على الارتباط بـ Factor 8 2M تقل قدرته على الارتباط + +185 +00:17:36,800 --> 00:17:36,800 +بـ Factor 8 2M تقل قدرته على الارتباط بـ Factor 8 + +186 +00:17:36,800 --> 00:17:36,820 +2M تقل قدرته على الارتباط بـ Factor 8 2M تقل قدرته + +187 +00:17:36,820 --> 00:17:36,940 +على الارتباط بـ Factor 8 2M تقل قدرته على الارتباط + +188 +00:17:36,940 --> 00:17:37,760 +بـ Factor 8 2M تقل قدرته على الارتباط بـ Factor 8 + +189 +00:17:37,760 --> 00:17:42,200 +2M تقل قدرته على الارتباط بـ Factor 8 2M تقل قدرته + +190 +00:17:42,200 --> 00:17:44,400 +على الارتباط بـ Factor 8 2M تقل قدرته على الارتباط + +191 +00:17:44,400 --> 00:17:49,480 +بـ Factor + +192 +00:17:49,480 --> 00:17:56,430 +8 2M تقهو عبارة عن severe وبالتالي ما زال severe + +193 +00:17:56,430 --> 00:18:00,530 +يعني ملاقيش factor اللي هو vomlibrand factor علشان + +194 +00:18:00,530 --> 00:18:07,210 +هيك القياس بيكون undetectable Undetectable وغياب + +195 +00:18:07,210 --> 00:18:11,490 +vomlibrand factor بينعكس سلبا على ال factor 8 + +196 +00:18:11,490 --> 00:18:17,070 +فبنلاقي المريض هذا عنده هموفيليا بالاضافة إلى + +197 +00:18:17,070 --> 00:18:23,660 +vomlibrand disease ماشي شباب؟هذه التلاتة أنواع + +198 +00:18:23,660 --> 00:18:27,440 +المختلفة قلنا فيه نوع رابع سميناه Acquired Womb + +199 +00:18:27,440 --> 00:18:32,680 +Rebrand Disease وزي ما قلتلكوا اكتشفوه في 1970 + +200 +00:18:32,680 --> 00:18:38,520 +وسجلت حوالي 300 حالة منه وغالبا + +201 +00:18:38,520 --> 00:18:44,260 +بنشوفه في الإكبار with + +202 +00:18:44,260 --> 00:18:46,760 +no personal or family bleeding history + +203 +00:18:49,780 --> 00:18:56,320 +ولمّا بنشتغل عليه بيجي الشخص و بنلاقي تقريبا بيشبه + +204 +00:18:56,320 --> 00:19:04,040 +type 2 of wolverine disease و جال ال mechanisms + +205 +00:19:04,040 --> 00:19:13,540 +بيتضمن اكتر منها منها نمرا واحد auto antibody + +206 +00:19:13,540 --> 00:19:19,560 +formation لمين؟ 100%عارفين إيش ال auto-antibody؟ + +207 +00:19:19,560 --> 00:19:23,580 +فجأة و بدون سبق انزعب تبدأ تتكوّن antibody لل + +208 +00:19:23,580 --> 00:19:28,300 +-vomellibrium factor.نبرة اتنين، ميكانيزم تاني + +209 +00:19:28,300 --> 00:19:32,220 +يؤدي ل acquired form، ان والله ال certain + +210 +00:19:32,220 --> 00:19:36,560 +vomellibrium multimers بيزيد ارتباطها ب tumor، + +211 +00:19:36,560 --> 00:19:43,460 +بإيه؟ بال tumor masses، وبالتالي وكأننا بنعمل + +212 +00:19:43,460 --> 00:19:47,710 +selective removal لمين؟ لل-vomellibrium factor.وهو + +213 +00:19:47,710 --> 00:19:53,350 +بقى acquired deficiency of vomilibrium factor صح؟ + +214 +00:19:53,350 --> 00:19:57,150 +لما ال vomilibrium factor يتطبق بال tumor cell or + +215 +00:19:57,150 --> 00:20:03,210 +reactive cell بيجيل من اللي بلازم و لا بيجيل .. + +216 +00:20:03,210 --> 00:20:05,770 +بيجيل من اللي بلازم وبالتالي هو كأنها indirectly + +217 +00:20:05,770 --> 00:20:10,870 +بنعمل Increased proteolysis of vomilibrium factor + +218 +00:20:10,870 --> 00:20:16,050 +ببدأ يتكسر بزيادة برضه من الميكانيزمات و .. + +219 +00:20:17,410 --> 00:20:19,730 +Defective synthesis and release of vulnerable + +220 +00:20:19,730 --> 00:20:23,110 +factors from cellular compartment والله بيبدأ + +221 +00:20:23,110 --> 00:20:33,710 +يتصنع تصنيع خاطق أو قليل في الخلايا المصنعة من ما + +222 +00:20:33,710 --> 00:20:36,590 +يميز ال acquired disease يا شباب acquired + +223 +00:20:36,590 --> 00:20:39,590 +vulnerable disease انه لجوء associated with + +224 +00:20:39,590 --> 00:20:42,810 +certain with other diseases زي المييلو + +225 +00:20:42,810 --> 00:20:46,520 +proliferative و الينفو proliferativeو ال + +226 +00:20:46,520 --> 00:20:54,920 +cardiovascular disease و هكذا مفهوم + +227 +00:20:54,920 --> 00:21:00,140 +يا شباب طيب كيف نشخص ال homolebral disease جالو + +228 +00:21:00,140 --> 00:21:05,540 +أدوات التشخيص هي فحصات ال hemostasis وهي ال BT و + +229 +00:21:05,540 --> 00:21:13,640 +ال VTT و ال breeding time ماشي نبدأ بال BT و ال + +230 +00:21:13,640 --> 00:21:20,080 +VTTانا انتبه عليا في التشخيص انتبه عليا بي تي و بي + +231 +00:21:20,080 --> 00:21:23,240 +تي تي ال بي تي و ال بي تي تي كل كوا أخدتوها في + +232 +00:21:23,240 --> 00:21:28,800 +المعمل بي تي بيشخص مين؟ Extrinsic pathway و ال بي + +233 +00:21:28,800 --> 00:21:34,920 +تي تي Intrinsic pathway لأ ال بي تي في ال volume + +234 +00:21:34,920 --> 00:21:40,040 +of gram factor بيكون normal ليش؟ انه انترنزق + +235 +00:21:45,680 --> 00:21:48,740 +أيوة بالظبط هو حامل فاكتور تمانية و فاكتور تمانية + +236 +00:21:48,740 --> 00:21:51,940 +اللي نحتاجه في ال intrinsic و بالتالي ال + +237 +00:21:51,940 --> 00:21:55,840 +prolongation بيصير في ايش؟ في ال BTT بينما ال Bt + +238 +00:21:55,840 --> 00:22:01,660 +بيكون normal طبعا الشباب مهم جدا ان في كثير من + +239 +00:22:01,660 --> 00:22:08,160 +الأحيان بيطلعلك BTT normal ان بعض المرضى لكن هذا + +240 +00:22:08,160 --> 00:22:15,160 +لا يستثني ال bowel membrane disease ليش؟لأن قلنا + +241 +00:22:15,160 --> 00:22:18,280 +عن بعض الأمراض اللي بيكون ال factor فيها normal + +242 +00:22:18,280 --> 00:22:24,500 +تقريبا زي type I type I اياش تقريبا ما بيعاني + +243 +00:22:24,500 --> 00:22:27,200 +المريض من أي حاجة و ال vomliebrand factor + +244 +00:22:27,200 --> 00:22:31,420 +functioning normally وبالتالي لا يستثني تشخيص من + +245 +00:22:31,420 --> 00:22:41,420 +ال vomliebrand factor طب ال .. في بعض الأحيانboth + +246 +00:22:41,420 --> 00:22:47,700 +tests are prolonged في هذا الحالة يا شباب ده + +247 +00:22:47,700 --> 00:22:51,980 +تستدنى شبوعين ثم تعيد الفحص مرة تانية علشان ايش؟ + +248 +00:22:51,980 --> 00:22:57,060 +علشان تتحقق من هذه النتيجة إذا كان شاكك في ال + +249 +00:22:57,060 --> 00:23:05,540 +disease ماشي إذا فحصين طلقة prolongedإذا الفحصين + +250 +00:23:05,540 --> 00:23:09,360 +طلعوا normal قلنا ال BTT ال normal لا يستثني + +251 +00:23:09,360 --> 00:23:13,300 +تشخيصيا ال vomilibrand إذا التانين طلعوا abnormal + +252 +00:23:13,300 --> 00:23:17,960 +دفتريد الفحص بعد شبعين إذا كنت شاكم في vomilibrand + +253 +00:23:17,960 --> 00:23:23,080 +disease ماشي؟ الأصل يطلع ال BT normal و ال BTT + +254 +00:23:23,080 --> 00:23:25,700 +abnormal ماشي؟ + +255 +00:23:31,170 --> 00:23:34,630 +طب انا نفسي طلع يعني للإثنين و بره ال video اللي + +256 +00:23:34,630 --> 00:23:38,230 +.. انت تقعد الفحش بقعدش معايا بقعدش معايا زاد طبعا + +257 +00:23:38,230 --> 00:23:43,870 +لأن المرض مش مهم بالبرادر و بدك تدور على حاجة + +258 +00:23:43,870 --> 00:23:50,270 +تانية لإن واحتمالي كبير تكون خلطان في العينة في + +259 +00:23:50,270 --> 00:23:55,090 +الفحش الأولاني و احنا بنعيد عشان نتحقق من النتيجة + +260 +00:23:55,090 --> 00:23:56,630 +باستخدام عينة + +261 +00:24:04,220 --> 00:24:07,720 +بالنسبة لل building time يا شباب لغاية الآن ماحدش + +262 +00:24:07,720 --> 00:24:15,320 +بيطلبوا لأنه non specific و non sensitive not + +263 +00:24:15,320 --> 00:24:21,080 +specific و not sensitive و the test by itself + +264 +00:24:21,080 --> 00:24:25,540 +subject to varying wild variation يعني varying + +265 +00:24:25,540 --> 00:24:27,480 +results + +266 +00:24:32,210 --> 00:24:37,710 +ماشي؟ ما هي المشاكل التي تواجهنا في تشخيص ال vom + +267 +00:24:37,710 --> 00:24:42,390 +librand disease تبقى على هذه النقاط المهمة مشاكل + +268 +00:24:42,390 --> 00:24:46,990 +التي تواجهنا في تشخيص مين ال vom librand disease + +269 +00:24:46,990 --> 00:24:51,610 +نمرا + +270 +00:24:51,610 --> 00:24:56,630 +واحد ال vom librand level itself قالو it vary + +271 +00:24:56,630 --> 00:24:57,430 +greatly + +272 +00:25:01,970 --> 00:25:05,650 +وبتاخد ال flash لأنه اعتبره acute phase protein + +273 +00:25:05,650 --> 00:25:11,590 +ايش ال acute phase protein يا شباب؟ acute phase + +274 +00:25:11,590 --> 00:25:15,830 +protein هو عبارة عن protein بتشتمع في ال liver في + +275 +00:25:15,830 --> 00:25:20,610 +ظروف معينة بكميات كبيرة من هذه الظروف + +276 +00:25:20,610 --> 00:25:24,730 +psychological stresses زي مثلًا اللي أخدته مثلًا + +277 +00:25:24,730 --> 00:25:29,120 +في ال hormone في ال endocrineإن الواحد لما يكون + +278 +00:25:29,120 --> 00:25:32,000 +خشرا امتحان وخايف منه يقول اللي حقا بكميات كبيرة + +279 +00:25:32,000 --> 00:25:36,140 +وظبط psychological factor بيسموها adrenergic + +280 +00:25:36,140 --> 00:25:41,860 +stimuli او + +281 +00:25:41,860 --> 00:25:45,980 +مثال اخر كمان اخدته اكيد في الفحصان سمعتوا بفحص + +282 +00:25:45,980 --> 00:25:51,640 +CRP؟ CR reactive protein سمعتوا فيه؟ اي اكيد + +283 +00:25:51,640 --> 00:25:56,580 +سمعتوا فيه اخدته في السواقل CR reactive protein هو + +284 +00:25:56,580 --> 00:25:57,200 +برا الفحص + +285 +00:25:59,960 --> 00:26:06,940 +ماشي هو ال specific test اللى بنعمله في حالات ال + +286 +00:26:06,940 --> 00:26:11,940 +inflammation غير ال ESR ال ESR بنعتبرها non + +287 +00:26:11,940 --> 00:26:16,580 +specific مشي ليه؟ ياله لإنه في ال inflammatory + +288 +00:26:16,580 --> 00:26:20,320 +reaction CRP C reactive protein هو برعى ال protein + +289 +00:26:20,320 --> 00:26:23,820 +is an acute phase بيصنع بجنونيات كبيرة طول ما في + +290 +00:26:23,820 --> 00:26:28,970 +inflammation و يوم ما روح ال inflammationبروحة + +291 +00:26:28,970 --> 00:26:32,570 +بضبط اللي انتج فإذا كان فيه inflammation هيدينا + +292 +00:26:32,570 --> 00:26:35,690 +positive وإذا فيش inflammation هيدينا negative + +293 +00:26:35,690 --> 00:26:42,390 +ماشي؟ ال vomiting brain factor هو واحد من هذه ال + +294 +00:26:42,390 --> 00:26:45,110 +factor acute phase protein يصنع من كميات كبيرة ان + +295 +00:26:45,110 --> 00:26:51,310 +في ظروف معينة ماشي؟ هو و الferritin seriactive + +296 +00:26:51,310 --> 00:26:58,630 +protein ومجموعة من ايش؟ منالبروتينات بتعتبر C + +297 +00:26:58,630 --> 00:27:05,570 +-reactive بتعتبر acute physiological فبيختلف + +298 +00:27:05,570 --> 00:27:11,050 +تركيزها باختلاف الوضع على سبيل مثال في ال + +299 +00:27:11,050 --> 00:27:17,110 +psychological .. physiological stresses انه اه تحت + +300 +00:27:17,110 --> 00:27:22,850 +ضغط نفس الانسان او مريض اجاك طفل صغير اجاك بعيط + +301 +00:27:22,850 --> 00:27:28,640 +على سبيل المثالوهو بعيط طول ما هو بعيط هو النسبة + +302 +00:27:28,640 --> 00:27:34,240 +احتمالي كبير النتيجة تكون خاطئة لأنه هيختلفهاش ال + +303 +00:27:34,240 --> 00:27:39,520 +crying هو عبارة عن physiological stress للطفل واذا + +304 +00:27:39,520 --> 00:27:43,100 +كان صار في physiological stress يعني صار في زيادة + +305 +00:27:43,100 --> 00:27:46,640 +فيهاش في ال vomiting factor يعني لو كان مريض بديه + +306 +00:27:46,640 --> 00:27:50,780 +يطلع normal صح؟ لو كان مريض بديه يطلع عياش normal + +307 +00:27:52,240 --> 00:27:55,340 +في موجود الاستروجين و ال vasopressin و ال growth + +308 +00:27:55,340 --> 00:28:00,620 +hormone و ال adrenergic stimuli كلهم بيعني ال + +309 +00:28:00,620 --> 00:28:06,700 +adrenaline release كلهم بيأدوا إلى زيادة ال + +310 +00:28:06,700 --> 00:28:12,400 +vomwillebrane factor هو بالتالي ال vomwillebrane + +311 +00:28:12,400 --> 00:28:16,460 +factor ممكن يطلع normal ماشي + +312 +00:28:20,140 --> 00:28:25,700 +شو لازم أعمل في هذه الحالة؟ الأصل أني أعيد الفحص + +313 +00:28:25,700 --> 00:28:32,760 +to confirm the results فبينصحوا دائما بإنه نعيد + +314 +00:28:32,760 --> 00:28:38,360 +الفحص عدة مرات in a two weeks interval بين كل فحص + +315 +00:28:38,360 --> 00:28:47,000 +و التاني أسبوعين ماشي عشان إيه؟ عشان أصل إلى تشخيص + +316 +00:28:47,000 --> 00:28:48,340 +صحيح + +317 +00:28:50,550 --> 00:28:53,970 +طبعا في .. ينصح أثناء أخذ العينة أن المريض يكون + +318 +00:28:53,970 --> 00:28:59,610 +بعيد عن أي stresses من ضمنها أي hemorrhage، أي + +319 +00:28:59,610 --> 00:29:05,290 +اللي هو infection أو extreme exercise، كل هذه ممكن + +320 +00:29:05,290 --> 00:29:10,390 +تعمل pregnancy، لازم تكون بعيد عن إيه هذه الشغلات + +321 +00:29:10,390 --> 00:29:16,010 +عشان تصل إلى نتيجة كبيرةيبقى هذه المشكلة التانية + +322 +00:29:16,010 --> 00:29:18,750 +اللي بتواجهنا المشكلة التالتة قالوا ان في بعض + +323 +00:29:18,750 --> 00:29:24,430 +الناس بتتأثر نسبة ال factor حسب فصيلة الدم وهذه + +324 +00:29:24,430 --> 00:29:29,110 +عبارة عن statistical data ماشي؟ قالوا حسب نوع الدم + +325 +00:29:29,110 --> 00:29:38,030 +بنلاقي ال factor يعني بيختلف من شخص إلى آخر طيب + +326 +00:29:38,030 --> 00:29:44,610 +كيف نشخص يا شباب؟ كيف نشخص؟شوفوا ميهر في حوصاتنا + +327 +00:29:44,610 --> 00:29:48,030 +اللي بنعملها قالوا بنعمل نمر واحد restو الست مع ال + +328 +00:29:48,030 --> 00:29:51,470 +C اشهر + +329 +00:29:51,470 --> 00:29:51,950 +restو الست + +330 +00:30:00,930 --> 00:30:04,590 +في aggregation في ايش بيجيس ال aggregation؟ في + +331 +00:30:04,590 --> 00:30:07,910 +agregometer، بصبوط؟ عشان انا اخدناها كله؟ الجهاز + +332 +00:30:07,910 --> 00:30:13,130 +ال agregometer؟ اه كل ال agonists دي طيب ايش بيعمل + +333 +00:30:13,130 --> 00:30:15,630 +في ال resource system؟ جاله resource system، اذا + +334 +00:30:15,630 --> 00:30:24,450 +فكرينش بقى، يستطيع ان ينشط حتى البلاتلي المحفوظة + +335 +00:30:24,450 --> 00:30:30,590 +في الforma geneمجبوط، فهو strong agonist، strong + +336 +00:30:30,590 --> 00:30:36,770 +agonist، ماشي؟ فبنشوف تأثيره على الـplatelet + +337 +00:30:36,770 --> 00:30:43,810 +function، ماشي؟ في وجود الفرق اللي بناه، ماشي + +338 +00:30:43,810 --> 00:30:49,860 +بنعمل، بروح و احنا جايبينVomit Repellent Factor + +339 +00:30:49,860 --> 00:30:53,560 +أيها induce الـ Vomit Repellent Factor to bind + +340 +00:30:53,560 --> 00:30:59,780 +with the platelet receptor برتفت في مين؟ GP1B + +341 +00:30:59,780 --> 00:31:04,820 +مظبوط طب إيش بده يعمل؟ بده يعمل adhesion، هذي + +342 +00:31:04,820 --> 00:31:12,380 +جابلي ال aggregation، مظبوط؟ طيب، ثم ب .. الرسو .. + +343 +00:31:12,380 --> 00:31:16,420 +بتقيس الرسو في certain cofactor activity من خلال + +344 +00:31:17,540 --> 00:31:21,980 +الـ Agglutination او Metabolically Inactive + +345 +00:31:21,980 --> 00:31:32,160 +Platelet تبدأ تترسل ثم + +346 +00:31:32,160 --> 00:31:37,960 +بتعمل Ressosectin Induced Platelet Activation RIPA + +347 +00:31:37,960 --> 00:31:44,240 +اختصار لمين؟ Ressosectin Induced + +348 +00:31:44,240 --> 00:31:46,420 +Platelet + +349 +00:31:55,420 --> 00:31:59,420 +activation طبعا هذا عبارة عن أياش بتقيس ال + +350 +00:31:59,420 --> 00:32:02,500 +metabolically active platelet الكلام هذا كله + +351 +00:32:02,500 --> 00:32:05,720 +بتقيسه بوسطة ال agregometer which is used to + +352 +00:32:05,720 --> 00:32:08,440 +measure the rate of aggregation + +353 +00:32:24,350 --> 00:32:30,570 +خلصته؟ ماشي؟ طيب شغلت تانية immunological + +354 +00:32:30,570 --> 00:32:36,190 +test بتقيس ال vowel brand antigen عشان تقيس vowel + +355 +00:32:36,190 --> 00:32:39,250 +brand antigen بدك تعمله antibody فبتصير التفاعل + +356 +00:32:39,250 --> 00:32:43,070 +antigen-antibody reaction بتقيسه كيف؟ quantitative + +357 +00:32:43,070 --> 00:32:48,120 +يا شباب بالEise Techniqueمش أخدتوها ده الـ ELISA؟ + +358 +00:32:48,120 --> 00:32:53,320 +100% بتعتبره بروتين، بتكونله antibody وcast by + +359 +00:32:53,320 --> 00:33:02,100 +ELISA quantitatively الأصل يا شباب أنه النتائج بين + +360 +00:33:02,100 --> 00:33:07,200 +الفحوصات المختلفة تتوافق بين الفحوصات المختلفة + +361 +00:33:07,200 --> 00:33:12,460 +تتوافق وعدم توافقها بيعني انه بيخلينا in shock في + +362 +00:33:12,460 --> 00:33:17,730 +انه ال Voglebrand اللي احنا بندور عليه هو منالنوع + +363 +00:33:17,730 --> 00:33:21,610 +ال qualitative type مش ال quantitative يعني type + +364 +00:33:21,610 --> 00:33:26,610 +two مش type واحد او ايش او اتنين و هي type two + +365 +00:33:26,610 --> 00:33:31,070 +عشان نشخص بالظبط اي نوع منه بدنا نعمل further + +366 +00:33:31,070 --> 00:33:35,610 +investigation صح؟ لتالي بدنا نتابع و نشوف ايش ال + +367 +00:33:35,610 --> 00:33:40,050 +investigation فبالقيم ندرس ال multiverse forms + +368 +00:33:40,050 --> 00:33:45,270 +المختلفة في ال of whom ال brandو بندرسها بواسطة + +369 +00:33:45,270 --> 00:33:50,030 +بفحص، إيش؟ + +370 +00:33:50,030 --> 00:33:53,390 +كيف بدك تعرف الأنواع المختلفة من ال multi-merge في + +371 +00:33:53,390 --> 00:34:04,030 +الدنيا؟ جيل ال Electrophoresis؟ + +372 +00:34:05,010 --> 00:34:07,150 +وانتش هاك عليها هي Electrophoresis فانا أقول اللي + +373 +00:34:07,150 --> 00:34:11,070 +هو paper هي Electrophoresis ال Electrophoresis + +374 +00:34:11,070 --> 00:34:15,850 +بتعرفوا ال principle تبعه بيصير عملية فصل between + +375 +00:34:15,850 --> 00:34:22,050 +different components بناء على ال charge where ال + +376 +00:34:22,050 --> 00:34:27,610 +size فهذا هو الفحص شايفين الفحص يا شباب؟ شايفين + +377 +00:34:27,610 --> 00:34:32,390 +الصورة؟ هذا normal هي عبارة عن multimers كل واحد + +378 +00:34:32,390 --> 00:34:38,380 +.. كل ..كل form أخد عياش band شايفين ال patient + +379 +00:34:38,380 --> 00:34:44,680 +قداش نص العياش ال normal نص العياش ال normal + +380 +00:34:44,680 --> 00:34:54,040 +بالتالي ممكن نشخص منه بسهولة treatment بيعاجوا + +381 +00:34:54,040 --> 00:35:00,720 +المرض هذا بال DDAVB خدناه المرض هذيك وقولنا هيش + +382 +00:35:00,720 --> 00:35:08,140 +بعملخدناك علاج لمرضى الهموفيليا مرضى الهموفيليا + +383 +00:35:08,140 --> 00:35:13,080 +عندهم نقص في factor 8 وقولنا الناس اللي عندهم mild + +384 +00:35:13,080 --> 00:35:19,860 +hemophilia نديهم DDABB عشان بالظبط بتطلع الفوم + +385 +00:35:19,860 --> 00:35:23,580 +الابرهن من مخالبه فبيزود الفوم الابرهن اللي بده + +386 +00:35:23,580 --> 00:35:27,980 +يحمي الكمية القليلة من factor 8 طيب يعني هو بيزود + +387 +00:35:27,980 --> 00:35:30,500 +الفوم الابرهن طب الأولى أنه نعالج مريض الفوم + +388 +00:35:30,500 --> 00:35:36,720 +الابرهن يا بابا، مجبوط؟فبالتالي هو علاج لل Vomit + +389 +00:35:36,720 --> 00:35:40,940 +Bread Cryoprecipitate هو بره ان احد ال blood + +390 +00:35:40,940 --> 00:35:46,140 +component بيحتوي على كمية كبيرة من ال Vomit Bread + +391 +00:35:46,140 --> 00:35:49,740 +فاكتور تمانية concentrate برضه احد ال plasma + +392 +00:35:49,740 --> 00:35:57,740 +component اللى بيحضر التحضير بالمصانع و بيحتوي + +393 +00:35:57,740 --> 00:36:02,830 +ايضا على كمية كبيرة من ايش؟ من ال Vomit Breadطبعا + +394 +00:36:02,830 --> 00:36:09,410 +شباب هذا ال treatment اول صفحتين و التالتين هذا و + +395 +00:36:09,410 --> 00:36:15,210 +هذا يعني كيفية العلاج + +396 +00:36:15,210 --> 00:36:20,510 +و ال protocol العلاج و كدهش لازم نعطي يعني مش كتير + +397 +00:36:20,510 --> 00:36:26,390 +احنا معانين فيه هنا بقى كمان من ال treatment هدا + +398 +00:36:26,390 --> 00:36:30,190 +هي ال platelet transfusion انك تعطي plateletمظبوط + +399 +00:36:30,190 --> 00:36:33,990 +واحد عنده كمية ال vom lipaland قليلة طب زود ال + +400 +00:36:33,990 --> 00:36:42,030 +platelet ماشي عشان ايه؟ عشان يتزود اللي هو ال vom + +401 +00:36:42,030 --> 00:36:45,450 +lipaland لإنه كمان ال platelet تحتوي على vom + +402 +00:36:45,450 --> 00:36:49,110 +lipaland طب ما cryoprecipitate قلنا عبارة عن هو + +403 +00:36:49,110 --> 00:36:52,930 +جزء من ال human plasma بيحتوي على كمية كبيرة من ال + +404 +00:36:52,930 --> 00:36:57,750 +factor 8 و vom lipaland و ال fresh frozen plasma + +405 +00:36:57,750 --> 00:37:02,820 +أيضابيحتوي على كمية كبيرة من ال phone و من الأبناء + +406 +00:37:02,820 --> 00:37:09,300 +احد عنده سؤال؟ + +407 +00:37:09,300 --> 00:37:14,520 +حد عنده سؤال يا شباب؟ + +408 +00:37:14,520 --> 00:37:20,740 +طيب نكمل في ال DIC DIC اختصار ل disseminate + +409 +00:37:20,740 --> 00:37:27,680 +Intravascular Chiropractic ماشي disseminated + +410 +00:37:27,680 --> 00:37:28,400 +Intravascular + +411 +00:37:34,790 --> 00:37:40,590 +Syndrome بيتميز بـ Systemic، إيش يعني Systemic؟ + +412 +00:37:40,590 --> 00:37:45,410 +يعني كل الجسم ده، ماشي، ينتشر Intravascular + +413 +00:37:45,410 --> 00:37:52,090 +Coagulopathy جلطات منتشرة في جميع أنحاء الجسم، + +414 +00:37:52,090 --> 00:37:59,420 +يعني بدايته إيش؟ جلطة، ماشي؟ and clotis the + +415 +00:37:59,420 --> 00:38:04,060 +initial event يعني الحدث الأولى هو إيش؟ هو cloth + +416 +00:38:04,060 --> 00:38:09,640 +formation ومعظم الوفيات بتعتمد على ال extent of + +417 +00:38:09,640 --> 00:38:13,460 +intramuscular thrombosis، جداش صار فيه جلطات + +418 +00:38:13,460 --> 00:38:21,440 +الوفيات بتعتمد على إيش؟ جداش صار فيه جلطات ومن + +419 +00:38:21,440 --> 00:38:26,100 +أسبابه .. أسبابه متعددة أسبابه متعددة عند حد أنا + +420 +00:38:26,100 --> 00:38:26,960 +تطرق إليها + +421 +00:38:31,540 --> 00:38:35,940 +بتنتشر بشكل سيستيميك في جميع حالة الجسم وحسب + +422 +00:38:35,940 --> 00:38:42,020 +انتشارها بتعتمد حالات الوفاية على انتشار هذه + +423 +00:38:42,020 --> 00:38:48,920 +الجبطات واسبابهم متعددة طيب acquired syndrome + +424 +00:38:48,920 --> 00:38:52,580 +بتتميز بـ Intravascular Coagulability وهذا اللي + +425 +00:38:52,580 --> 00:38:56,600 +تاوم حاكيناه كله مهم شوفوا كيف بتتم الحكاية شبكة + +426 +00:38:56,600 --> 00:39:00,180 +تنتبه عليها هي Systemic Activation of Coagulation + +427 +00:39:01,270 --> 00:39:07,450 +Cascade Systemic يعني إيش؟ في كلها، بالظبط، مش + +428 +00:39:07,450 --> 00:39:11,830 +localized هذا بيؤدي إلى Intravascular بعد فترة + +429 +00:39:11,830 --> 00:39:17,050 +بتضل تعمل جلطة، تعمل جلطة، تعمل جلطة المواد الخام + +430 +00:39:17,050 --> 00:39:24,510 +بتضل موجودة، بتخلص، صح؟ فبدأ جبل ما تخلص يصير فيه + +431 +00:39:24,510 --> 00:39:28,830 +Intravascular Deposition أو Fiber بتكون جلطات من + +432 +00:39:28,830 --> 00:39:34,400 +ال fiber في أماكن متعددةTroposis of a small and + +433 +00:39:34,400 --> 00:39:41,320 +mid-sized vessel وهو يؤدي إلى ischemia و organ + +434 +00:39:41,320 --> 00:39:47,360 +failure، مظبوط؟ لكن زي ما قلتلكوا، استمرار هذه + +435 +00:39:47,360 --> 00:39:54,840 +الحالة بيتطلب أو بيتبعوا أن المواد الخامة تخلص من + +436 +00:39:54,840 --> 00:39:58,520 +الـplatelet و ال coagulation factor وإذا خلصها، + +437 +00:39:58,520 --> 00:40:02,920 +يخش المريض في إيه؟ في bleedingوالاتنين بيعدوا + +438 +00:40:02,920 --> 00:40:06,640 +للوفاة + +439 +00:40:06,640 --> 00:40:13,700 +عيد تاني عيد تاني Systemic احنا بنقول + +440 +00:40:13,700 --> 00:40:18,440 +Indravascular Coagulopathy يعني في activation + +441 +00:40:18,440 --> 00:40:21,620 +الكواجوليشن كاسكيت ميكاليزم في مكان واحد في الجسم + +442 +00:40:21,620 --> 00:40:27,120 +لأ في أماكن مختلفة فمحصلة نشاط الكواجوليشن كاسكيت + +443 +00:40:27,120 --> 00:40:32,080 +تكوين vibrant يعني في جلطاتوإذا صار فيه fiber + +444 +00:40:32,080 --> 00:40:38,780 +information شو بيعمل؟ جلطات في ال small و ال mid + +445 +00:40:38,780 --> 00:40:45,740 +-sized vessel هذا بيؤدي ليش؟ 100% وإذا صار فيه + +446 +00:40:45,740 --> 00:40:50,520 +ischemia فيه war gall failure استمرت الحكاية هيؤدي + +447 +00:40:50,520 --> 00:40:54,000 +إلى depletion of coagulation factor and platelet + +448 +00:40:54,000 --> 00:40:59,780 +وهذا بيؤدي إلى bleeding وقلت ليه؟ طبعا سواء كانت + +449 +00:40:59,780 --> 00:41:06,100 +جلطةأو اسكيميا و الـ bleeding بسبب الوفاة + +450 +00:41:06,100 --> 00:41:09,820 +pathophysiology + +451 +00:41:09,820 --> 00:41:15,400 +of DIC كيف يتم ترتيب الميكانيزم كيف تم ترتيبه + +452 +00:41:15,400 --> 00:41:19,360 +قاليته activation of blood coagulation ما اتفقنا + +453 +00:41:19,360 --> 00:41:23,680 +ثم suppression of physiological anticoagulant كل + +454 +00:41:23,680 --> 00:41:27,760 +الـ anticoagulant في البداية مثل الـ unblocking + +455 +00:41:27,760 --> 00:41:33,020 +شغالالـ Coagulant الـ Pro-Coagulant يشغّل الـ Anti + +456 +00:41:33,020 --> 00:41:37,000 +-Coagulant و أقفل وهي بيصير فيه impairment للـ + +457 +00:41:37,000 --> 00:41:40,500 +Fibrolysis يعني مافيش إذهاب للجلطة أحنا قلنا الـ + +458 +00:41:40,500 --> 00:41:43,420 +Fibrolysis ممكن يعمل ال bleeding مافيش إذهاب + +459 +00:41:43,420 --> 00:41:48,700 +للجلطة لكن بيصير كمان cytokine release ونشوف واحد + +460 +00:41:48,700 --> 00:41:52,620 +واحد هذه العوامل شو بتأثر activation و blood + +461 +00:41:52,620 --> 00:41:56,280 +coagulation يا شباب كيف ممكن يصير blood + +462 +00:41:56,280 --> 00:41:56,920 +coagulation + +463 +00:41:59,650 --> 00:42:05,770 +Cascade Activation Tissue Factor صح؟ وهذه من أخطر + +464 +00:42:05,770 --> 00:42:11,790 +الحاجات اللي بتصير يا شباب انه مريض لسبب ما يصير + +465 +00:42:11,790 --> 00:42:16,250 +blood vessel injury و ب release of tissue factor و + +466 +00:42:16,250 --> 00:42:20,210 +إذا سوصل ال tissue factor للدم ممكن يسبح في الدم و + +467 +00:42:20,210 --> 00:42:25,210 +يصير يكون جلطات في مناطق مختلفة من الجسم صح؟ فهي + +468 +00:42:25,210 --> 00:42:31,330 +tissue factor factor 7AThrombin generation يتم + +469 +00:42:31,330 --> 00:42:39,490 +تنشيطه من خلال الـ Extrinsic pathway و بذكركم ان + +470 +00:42:39,490 --> 00:42:46,150 +ال Extrinsic is very short يعني لتنشيطه بسرعة طبعا + +471 +00:42:46,150 --> 00:42:50,330 +complex activate factor 9 و factor 10 و تشوف + +472 +00:42:54,770 --> 00:43:00,010 +of multiple places ماشي يعني موجود في أناق مختلفة + +473 +00:43:00,010 --> 00:43:04,330 +من الجسم في ال endothelial cells في ال monocytes و + +474 +00:43:04,330 --> 00:43:07,410 +extravasculary موجود في ال lung و ال kidney و ال + +475 +00:43:07,410 --> 00:43:10,230 +brain و ال endothelial cell ال epithelial cell + +476 +00:43:10,230 --> 00:43:15,870 +ماشي هذه مواقع حساسة و ال uterus مواقع حساسة + +477 +00:43:15,870 --> 00:43:20,730 +مليانة بالاش من ال tissue factorيبقى نمرا واحد + +478 +00:43:20,730 --> 00:43:23,590 +activation of blood coagulation through tissue + +479 +00:43:23,590 --> 00:43:27,430 +factor release اللي بيكون extra vascular و بتقوم + +480 +00:43:27,430 --> 00:43:30,890 +يصلل ال vascular يعني بيصلل الدم و هناك بيكوّنش + +481 +00:43:30,890 --> 00:43:36,690 +ثرمبين formation و fibrin formation نمرا اتنين + +482 +00:43:36,690 --> 00:43:40,050 +عشان بقى بيصير فيه suppression لل anticoagulant + +483 +00:43:40,050 --> 00:43:46,690 +ARM و بيتمثل ب reduction of antithrombin 13 + +484 +00:43:49,380 --> 00:43:52,540 +كمان بيصير فيه impairment ل protein C و protein S + +485 +00:43:52,540 --> 00:43:57,880 +و هدولة كلنا بنعرفهم صح؟ و برضه مش تلك + +486 +00:43:57,880 --> 00:44:02,620 +anticoagulant طبعا، ثم بيصير فيه insufficient of + +487 +00:44:02,620 --> 00:44:04,640 +regulation of tissue factor pathway inhibitor + +488 +00:44:04,640 --> 00:44:12,420 +وبالتالي بيصير فيه inhibition لالـ 107 يعني، + +489 +00:44:12,420 --> 00:44:12,840 +عارفين؟ + +490 +00:44:15,550 --> 00:44:19,450 +التانية Impermental Fibrinolysis، مين بيعمل + +491 +00:44:19,450 --> 00:44:27,250 +Fibrinolysis؟ Plasminogen activator بيعمل + +492 +00:44:27,250 --> 00:44:32,730 +Fibrinolysis طيب لو نشطنا ال inhibitor بتاع ال + +493 +00:44:32,730 --> 00:44:36,830 +activator، شو بنعمل؟ 100%، 100%، هذا اللي بيصير، + +494 +00:44:36,830 --> 00:44:40,970 +Impermental FibrinolysisCytokine release شباب برضه + +495 +00:44:40,970 --> 00:44:45,750 +ميكانيزم رابع نجوا انه في حالات ال DIC بيصير فيه + +496 +00:44:45,750 --> 00:44:50,410 +secretion ل certain cytokines منها ال I L6 و I L1 + +497 +00:44:50,410 --> 00:44:57,430 +و tumor necrosis factor و هذا بيعمل dysregulation + +498 +00:44:57,430 --> 00:45:03,150 +لل anticoagulant pathway و ي modulate I L6 I L10 + +499 +00:45:03,150 --> 00:45:07,610 +كمان modulated activation coagulation factorأو + +500 +00:45:07,610 --> 00:45:10,630 +coagulation cascade وبالتالي فى علاقة مباشرة جدا + +501 +00:45:10,630 --> 00:45:14,310 +ما بين ال coagulation والinflammation أو ال + +502 +00:45:14,310 --> 00:45:20,950 +inflammatory process طيب كيف نشخص مريض سرير عنده + +503 +00:45:20,950 --> 00:45:26,450 +DIC مريض سرير عنده DIC ال brothers of disease + +504 +00:45:26,450 --> 00:45:34,530 +associated with DIC ده نشوف ان الطلاع على مظاهر + +505 +00:45:34,530 --> 00:45:41,000 +مرضية بتكونموجودة appropriate clinical setting + +506 +00:45:41,000 --> 00:45:46,040 +لنشوف إذا كان في أي evidence of thrombosis أو + +507 +00:45:46,040 --> 00:45:50,280 +hemorrhage لإنه مرض من هالنوع يبدأ سال بجلطة + +508 +00:45:50,280 --> 00:45:55,960 +وينتهي فيه ب bleeding laboratory studies no single + +509 +00:45:55,960 --> 00:46:00,020 +test is accurate serial tests are more helpful + +510 +00:46:00,020 --> 00:46:06,140 +than single test يجب أن تعمل مجموعة من الفعصاتومن + +511 +00:46:06,140 --> 00:46:11,440 +أسبابه يا شباب شوفوا أسبابه إيش مالك نانسي release + +512 +00:46:11,440 --> 00:46:16,380 +of tissue factor cardiovascular hypothermia or + +513 +00:46:16,380 --> 00:46:19,760 +hyperthermia الناس اللي متعرضة لدرجة حرارة واطية + +514 +00:46:19,760 --> 00:46:24,300 +أو عالية المنري acute respiratory distress + +515 +00:46:24,300 --> 00:46:30,780 +syndrome أسدوسيز وانكسيا collagen vascular disease + +516 +00:46:30,780 --> 00:46:32,260 +وأنفلاك + +517 +00:46:39,800 --> 00:46:43,720 +كمان من الأسباب اللى بتؤدي الى DIC infectious + +518 +00:46:43,720 --> 00:46:47,180 +agent زى الـSepticemia سواء كان بكتيريا أو الفيلر + +519 +00:46:47,180 --> 00:46:52,400 +أو الفنجر ثم Intravascular Hemolysis لإنه بطلع + +520 +00:46:52,400 --> 00:47:00,940 +تشوفاكتر ثم acute liver disease و تشوفاكتر و + +521 +00:47:00,940 --> 00:47:05,540 +Obstetric من المحطات المهمة جدا يا شباب هي محطة + +522 +00:47:05,540 --> 00:47:11,140 +الولادة لإنه في الولادة بإمكانية انهيصير فيه + +523 +00:47:11,140 --> 00:47:19,320 +tissue factor release عالية جدا جدا جدا طبعا سيبقى + +524 +00:47:19,320 --> 00:47:26,080 +بهذه ال symptoms المختلفة الجسم بالإنسان يعانى من + +525 +00:47:26,080 --> 00:47:29,500 +different symptoms ممكن نشوفها في ال skin في ال + +526 +00:47:29,500 --> 00:47:33,380 +central nervous system في الكني في ال heart في + +527 +00:47:33,380 --> 00:47:40,020 +البلمونريفي كل الحالات المرضية طبعا محصلتها في + +528 +00:47:40,020 --> 00:47:45,880 +البداية ال كيميا لإنه فيه thrombosis ثم فيه + +529 +00:47:45,880 --> 00:47:51,280 +bleeding عشان فعلى سبيل المثال ال skin أول شي + +530 +00:47:51,280 --> 00:47:56,900 +بيصير فيها peritrophalminas و جانجرين لإنه فيه + +531 +00:47:56,900 --> 00:48:03,140 +جلطان ثم بعد ذلك بنشوف في بتاشي و ecomosis لإنه + +532 +00:48:03,140 --> 00:48:08,250 +صار فيه bleedingفاهمين عليها؟ فمريض ال DIC بيمر في + +533 +00:48:08,250 --> 00:48:14,250 +عدة مراحل حسب المرحلة بيصير فيه symptoms وعندنا + +534 +00:48:14,250 --> 00:48:20,610 +حسب المرحلة بتشوف نتيجة فحص لإن في كل مرحلة في فحص + +535 +00:48:20,610 --> 00:48:27,310 +مختلف في نتيجة مختلفة وحسب المرحلة بيتعالج ماشي؟ + +536 +00:48:27,310 --> 00:48:30,410 +يعني مريض مثلا قاعد بيعمل جلطة منه بيقول انت .. + +537 +00:48:30,410 --> 00:48:33,550 +انت .. كواجولة لكن مريض خاش في bleeding + +538 +00:48:37,810 --> 00:48:44,090 +بالتالي حسب المرحلة بتتأثر تطلع نتيجة الفحص و إيش + +539 +00:48:44,090 --> 00:48:51,410 +و أولاشي ال symptoms ثم نتيجة الفحص ثم العلاج و + +540 +00:48:51,410 --> 00:48:54,750 +هذه مظاهر مرضية شايفين الصور؟ برنامج دي بصور لاحقش + +541 +00:48:54,750 --> 00:48:58,950 +أنا في bleeding و بتاشي بتاشي و أكموزة شايفين + +542 +00:48:58,950 --> 00:49:03,740 +بتاشي و أكموزة؟ هذه مرحلة متقدمةهى gangrenous + +543 +00:49:03,740 --> 00:49:10,640 +شايفين ان صار فى microvessels from pie وبالتالى + +544 +00:49:10,640 --> 00:49:17,780 +قدت إلى ان بيصير فى عنده gangrenous طبعا + +545 +00:49:17,780 --> 00:49:21,920 +كمان من الحاجات اللى بيصير ان ال blood film لو + +546 +00:49:21,920 --> 00:49:24,820 +عملت blood film يا شباب بتلاقي microscopic finding + +547 +00:49:24,820 --> 00:49:26,560 +schistocytes + +548 +00:49:29,720 --> 00:49:33,060 +fragments،شيستيو سايز، pausty أو platelet، إيش + +549 +00:49:33,060 --> 00:49:41,440 +يعني pausty؟ قلة في ال platelet، وفي حاجات قولنا + +550 +00:49:41,440 --> 00:49:45,170 +ال single test is not enoughمنها D-Diamar, + +551 +00:49:45,290 --> 00:49:49,870 +Antithrombin 3, Fibrin monoxide A وB, FTB, + +552 +00:49:50,030 --> 00:49:53,570 +Platelet count, Protamine test، كل عبارة عن فحوصات + +553 +00:49:53,570 --> 00:50:05,490 +مختلفة بتطلب وبالتالي بتشخص حسب نتيجة الفحوصة، + +554 +00:50:05,490 --> 00:50:10,310 +بنعرف في أي مرحلة المريض وصل، في أي مرحلة المريض + +555 +00:50:10,310 --> 00:50:16,790 +وصلفممكن يحصل عنده thrombocytopenia ممكن يحصل فيه + +556 +00:50:16,790 --> 00:50:20,650 +prolongation في الـBT وBTT طبعا هو كله استهلك + +557 +00:50:20,650 --> 00:50:27,630 +مظبوط ممكن نشوف FDB أو D-Diamond نشوف low level of + +558 +00:50:27,630 --> 00:50:31,030 +anticoagulant زي الـ Antithrombin 3 أو البروتين C + +559 +00:50:31,030 --> 00:50:35,970 +ولو فحصنا العوامل المختلفة خمسة أو تمانية أو تسعة + +560 +00:50:35,970 --> 00:50:42,040 +كلها ملاجئة واطيةوالـ Fibrinogen الوحيد اللي هي لا + +561 +00:50:42,040 --> 00:50:46,300 +يفيد في التشخيص لأن كميته كبيرة واستهلاكه بيطول + +562 +00:50:46,300 --> 00:50:49,540 +وبالتالي + +563 +00:50:49,540 --> 00:50:55,460 +مفدش في التشخيص differential dialysis ضروري جدا + +564 +00:50:55,460 --> 00:51:00,240 +عشان تشخص DIC تتفرج بين ال DIC و العمرض الأهلي زي + +565 +00:51:00,240 --> 00:51:05,060 +ال severe liver failure شو الصفة المشتركة بينه و + +566 +00:51:05,060 --> 00:51:06,100 +بين ال DIC؟ + +567 +00:51:09,640 --> 00:51:13,380 +بالظبط ال liver اللى بيصنع coagulation فى ال liver + +568 +00:51:13,380 --> 00:51:15,640 +الفيريالى يعني مافيش coagulation فاكترواها مافيش + +569 +00:51:15,640 --> 00:51:20,680 +coagulation وقالت من K deficiency تدينا نفس الصورة + +570 +00:51:20,680 --> 00:51:27,040 +liver disease TTP و اللى بيصير فيها جلطات متكررة + +571 +00:51:27,040 --> 00:51:32,080 +ثم congenital abnormalities of fibrin and help + +572 +00:51:32,080 --> 00:51:37,290 +syndrome اللى هو hereditary elevated livertest + +573 +00:51:37,290 --> 00:51:43,690 +with low platelet help اختصارنا ايه يا شباب + +574 +00:51:43,690 --> 00:51:48,770 +hereditary elevation + +575 +00:51:48,770 --> 00:51:58,130 +of liver test with + +576 +00:51:58,130 --> 00:52:03,630 +low platelet علاج + +577 +00:52:03,630 --> 00:52:04,150 +طبعا + +578 +00:52:06,870 --> 00:52:10,930 +أول شي stop the triggering factor طبعا ال + +579 +00:52:10,930 --> 00:52:14,870 +triggering factor بدك توقفه في كتير من الأحيان + +580 +00:52:14,870 --> 00:52:21,730 +عملية التوقيف تكون صعبة مالا تكون اقوى the only + +581 +00:52:21,730 --> 00:52:26,610 +proof in the treatment أنك توقف مين تسبب ال VIC + +582 +00:52:26,610 --> 00:52:31,950 +بعدين تدي support in the treatment طبعا support in + +583 +00:52:31,950 --> 00:52:32,450 +the treatment + +584 +00:52:38,010 --> 00:52:43,530 +ليش؟ في بداية المرحلة؟ في البداية اه في بداية اللي + +585 +00:52:43,530 --> 00:52:46,950 +داكه اه ممكن ال non specific treatment is + +586 +00:52:46,950 --> 00:52:51,230 +available حسب المرحلة اللي انا بقوله ممكن تعطي + +587 +00:52:51,230 --> 00:52:55,890 +plasma و platelet substitution ممكن تعطي + +588 +00:52:55,890 --> 00:53:00,050 +anticoagulant إذا كان في البداية ممكن تعطي + +589 +00:53:00,050 --> 00:53:06,110 +inhibitors إذا كان في البداية وكذاطبعا هذه ال + +590 +00:53:06,110 --> 00:53:15,130 +plasma therapy نديها وقتيش إذا في bleeding ماشي + +591 +00:53:15,130 --> 00:53:19,710 +fresh frozen plasma برضه بتنعطف في حالات نقص بعض + +592 +00:53:19,710 --> 00:53:24,010 +العوامل platelet therapy إذا ال platelet كانت وطيط + +593 +00:53:24,010 --> 00:53:29,990 +ماشي و blood إذا صار عند المريض أنهيميا or disease + +594 +00:53:29,990 --> 00:53:34,840 +destructionQuagination Inhibitors، Antithrombin 3 + +595 +00:53:34,840 --> 00:53:37,240 +و Protein C و Tissue Factor Pathway Inhibitor و + +596 +00:53:37,240 --> 00:53:43,080 +Heparin طبعا لأنه بيستهلكوا كمان بيعطوه و هذا + +597 +00:53:43,080 --> 00:53:48,180 +العلاج أنا مش راح أخد فيه بالتفصيل Anti + +598 +00:53:48,180 --> 00:53:55,880 +Fibrillatic زي ما قال برضه بتنعطى في دي دايات الحد + +599 +00:53:55,880 --> 00:53:59,200 +اللي مش عارف الشغل، طيب no question يعني؟ + diff --git a/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/s9StxN7sbug.srt b/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/s9StxN7sbug.srt new file mode 100644 index 0000000000000000000000000000000000000000..1abd20c351e4b0916bae9f9daf870d0126dedd54 --- /dev/null +++ b/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/s9StxN7sbug.srt @@ -0,0 +1,1547 @@ +1 +00:00:20,790 --> 00:00:26,270 +بسم الله الرحمن الرحيم طيب اليوم هنستكمل إن شاء + +2 +00:00:26,270 --> 00:00:29,050 +الله محاضرة vitamin K deficiency أو vitamin K + +3 +00:00:29,050 --> 00:00:33,110 +deficiency and coagulation وصلنا لهذه الـ slide وهي vitamin K + +4 +00:00:33,110 --> 00:00:35,910 +deficiency وإن vitamin K deficiency ممكن + +5 +00:00:35,910 --> 00:00:43,270 +ينتج نتيجة عدة حالات في عدة حالات منها أنه نقص الـ + +6 +00:00:43,270 --> 00:00:45,770 +vitamin في الأكل اللي بتاكله، بتاكل أكل مافيش فيه + +7 +00:00:45,770 --> 00:00:54,720 +vitamin أو إنه في disorders بتقلل من امتصاص الـ + +8 +00:00:54,720 --> 00:01:00,520 +vitamin أو في drugs بتتاخد بتعمل interaction مع الـ + +9 +00:01:00,520 --> 00:01:06,580 +vitamin K وبالتالي بتمنع امتصاصه بتزود أو بتقلل + +10 +00:01:06,580 --> 00:01:09,800 +من الـ vitamin K يعني في بعض الأدوية بتعمل + +11 +00:01:09,800 --> 00:01:13,360 +potentiation وفي بعض الأدوية بتعمل inhibition للـ + +12 +00:01:13,360 --> 00:01:16,590 +vitamin K action منها على سبيل المثال الـ + +13 +00:01:16,590 --> 00:01:22,810 +Anticomposants و الـ Antibiotics طبعا سبب آخر و + +14 +00:01:22,810 --> 00:01:26,910 +آخر هو الـ use of Coumarin Warfarin كلنا عرفنا من + +15 +00:01:26,910 --> 00:01:30,990 +المحاضرة السابقة أن الـ Warfarin هو أبرع الـ Vitamin + +16 +00:01:30,990 --> 00:01:35,650 +K Antagonist Vitamin K Antagonist وبالتالي بعمل + +17 +00:01:35,650 --> 00:01:42,370 +Inhibition or Vitamin K Deficiency states ما هي + +18 +00:01:42,370 --> 00:01:46,490 +المظاهر المرضية لهذا النقص؟ طبعا كلنا بنعرف أنه + +19 +00:01:46,490 --> 00:01:49,990 +ممكن يصير فيه bleeding، ليش؟ Vitamin K، عارفين + +20 +00:01:49,990 --> 00:01:55,710 +دوره، بيساعد في activation of certain coagulation + +21 +00:01:55,710 --> 00:02:00,710 +factors أو Vitamin K، مظبوط؟ 100% وبالتالي مافيش + +22 +00:02:00,710 --> 00:02:05,770 +coagulation factors عندها القدرة أن تتنشر، يعني في + +23 +00:02:05,770 --> 00:02:10,070 +bleeding صح؟ 100% في nose bleeding، في gum + +24 +00:02:10,070 --> 00:02:15,150 +bleeding، في stomach bleeding، في urine، في blood + +25 +00:02:15,150 --> 00:02:19,650 +أو stool، في blood، في tarry black stool، tarry + +26 +00:02:19,650 --> 00:02:23,250 +black stool هو برعن حالة مرضية تصيب غالبا المناطق + +27 +00:02:23,250 --> 00:02:27,290 +الاستوائية ويصير فيها إسهال شديد، مصحوب بدم شديد، + +28 +00:02:27,290 --> 00:02:36,430 +إسهال شديد، مصحوب بـ .. بدم شديد طبعا في كمان يصير + +29 +00:02:36,430 --> 00:02:42,710 +فيه extremely heavy menstrual bleeding يعني الـ X + +30 +00:02:42,710 --> 00:02:47,610 +بتطول على الـ 6 بتقعد فترة طويلة and in infants في + +31 +00:02:47,610 --> 00:02:54,050 +خوف أنه يصير فيه intra cranial hemorrhage + +32 +00:02:54,050 --> 00:03:01,880 +in infants الأطفال newborn غالبا are brought to + +33 +00:03:01,880 --> 00:03:06,920 +vitamin K deficiency يعني غالبا أطفال بيولدوا و + +34 +00:03:06,920 --> 00:03:13,180 +عندهم إيش عندهم vitamin K deficiency أو بعد ولادة + +35 +00:03:13,180 --> 00:03:19,500 +من الأسباب لذلك هي القاتل نمر واحد أن الـ vitamin K + +36 +00:03:19,500 --> 00:03:22,720 +هو أبرعان و الـ limit و أبرعان fat-soluble sorry بالـ + +37 +00:03:22,720 --> 00:03:26,620 +substance مظبوط they cannot cross the placenta + +38 +00:03:26,620 --> 00:03:32,450 +وبالتالي بيسروا من الأم للطفل نقطة number اثنين + +39 +00:03:32,450 --> 00:03:38,770 +coagulation factor وبالذات Pro-thrombin Synthesis + +40 +00:03:38,770 --> 00:03:42,370 +في الـ liver هي عبارة عن عملية غير نادجة premature + +41 +00:03:42,370 --> 00:03:49,110 +process عشان هيك معظم الأطفال الحديثي الولادة الـ + +42 +00:03:49,110 --> 00:03:54,650 +normal value بتاعة الـ coagulation factors are less + +43 +00:03:54,650 --> 00:03:57,850 +than others are less than others بتوصل لغاية fifty + +44 +00:03:57,850 --> 00:04:03,560 +percent النسبة، ماشي؟ يبقى في عندنا قلة في إنتاج الـ + +45 +00:04:03,560 --> 00:04:07,780 +coagulation factors عند إيش؟ عند الأطفال، يبقى فيش + +46 +00:04:07,780 --> 00:04:12,020 +vitamin K وفيش coagulation factors، ماشي؟ هذه كلها + +47 +00:04:12,020 --> 00:04:16,660 +عوامل عبارة عن إيش؟ عبارة عن عوامل بتؤدي إلى + +48 +00:04:16,660 --> 00:04:20,400 +vitamin K deficiency نمرة تلاتة الطفل بنولد و الـ + +49 +00:04:20,400 --> 00:04:24,680 +gut تبقى أستراليكا الـ gut الأرمعاد تبقى أستراليكا + +50 +00:04:24,680 --> 00:04:29,860 +يعني في Menaquinone بتصنع Menaquinone هو يبقى اللي + +51 +00:04:29,860 --> 00:04:38,440 +بدأت مظهور يعني كمان هذا سبب آخر أنه يصير فيه سبب + +52 +00:04:38,440 --> 00:04:41,760 +خامس أو رابع الـ breast milk + +53 +00:04:46,090 --> 00:04:49,910 +ماشي؟ مافيش فيه vitamin K عشان هيك الناس اللي + +54 +00:04:49,910 --> 00:04:54,510 +بتطول و هي بتعتمد فقط على حليب صدرها للأم اللي طول + +55 +00:04:54,510 --> 00:04:58,030 +هي أمر عن .. هي خاطئة دول أن تدري لإن مافيش + +56 +00:04:58,030 --> 00:05:01,630 +vitamin .. بتمنع طفلها أو بتحرم طفلها من إيش؟ من + +57 +00:05:01,630 --> 00:05:05,770 +الـ vitamin K اللي هو أصلا محتاجه .. اللي هو أصلا + +58 +00:05:05,770 --> 00:05:13,770 +محتاجه طبعا كنتيجة لهذه الأسباب كلها Hemorrhage can + +59 +00:05:13,770 --> 00:05:19,450 +be seen in infants و بيسمو هذه الحالة vitamin K + +60 +00:05:19,450 --> 00:05:26,790 +deficiency bleeding disease و هذا الكلام بنشوفه + +61 +00:05:26,790 --> 00:05:29,650 +غالبا أما في الـ breast feeding of children الناس + +62 +00:05:29,650 --> 00:05:35,410 +اللي بيعتمدوا على اللي هو حليب الأم أو في المهالة + +63 +00:05:35,410 --> 00:05:39,550 +absorption أو في الـ liver disorders يكون الطفل + +64 +00:05:39,550 --> 00:05:45,020 +عنده هذه الشيء طبعا كلكوا عشان تكونوا بس جديش + +65 +00:05:45,020 --> 00:05:50,380 +تكونوا عندكوا فكرة على كمية الـ vitamin K المطلوبة + +66 +00:05:50,380 --> 00:05:56,680 +للإنسان في اليوم ماشي؟ يعني لو في كل يوم تخيلوا + +67 +00:05:56,680 --> 00:06:02,340 +الأضلتة دي الأضلتة على سبيل مثال الشباب اللي زيكم + +68 +00:06:02,340 --> 00:06:08,640 +بيحتاج إلى مائة وعشرين مايكرو جرام في اليوم 120 + +69 +00:06:08,640 --> 00:06:14,980 +microgram عرفنا من أين ممكن نحصله صح في females بتحتاج + +70 +00:06:14,980 --> 00:06:23,600 +أقل في females بتحتاج أقل طيب + +71 +00:06:23,600 --> 00:06:27,800 +prevention and treatment الـ vitamin K بنعطيه + +72 +00:06:27,800 --> 00:06:32,380 +orally ماشي لإنه قلنا intramuscular و vitamin K + +73 +00:06:32,380 --> 00:06:39,970 +الـ warfarin is a contradictory الـ vitamin K بنحطها + +74 +00:06:39,970 --> 00:06:45,170 +orally في بعض الأحيان مافيش good absorption + +75 +00:06:45,170 --> 00:06:52,050 +فبيعطوه by injection و الأمبولات بتاعة الـ vitamin + +76 +00:06:52,050 --> 00:06:56,090 +K في الـ emergency room كثيرة لإن أي إنسان بيخش بـ + +77 +00:06:56,090 --> 00:07:03,850 +bleeding بيفكروا يعطوه إيه Vitamin K. The dose is + +78 +00:07:03,850 --> 00:07:11,250 +altered or extra vitamin K is given في بعض الحالات + +79 +00:07:11,250 --> 00:07:16,890 +اللي يكون عندها mal absorption خلل في امتصاص الـ + +80 +00:07:16,890 --> 00:07:22,190 +vitamin شو بيعملوا؟ بيعطوا injection و بيزودوا + +81 +00:07:22,190 --> 00:07:27,530 +الجرعة عشان يعوضوا النقص الـ layer اللي موجود في بعض + +82 +00:07:27,530 --> 00:07:35,890 +الأحيان بعض المرضى تعاني من liver disease وبالتالي + +83 +00:07:35,890 --> 00:07:43,830 +في vitamin K deficiency و vitamin K injection لحاله + +84 +00:07:43,830 --> 00:07:48,970 +لا يكفي في هذه الحال الـ vitamin K لحاله لا يكفي + +85 +00:07:48,970 --> 00:07:53,790 +ففي إيش بيفكروا بالضبط بيعملوله blood transfusion + +86 +00:07:53,790 --> 00:07:57,910 +على أساس يعيشوا أو يعطوا coagulation factors + +87 +00:07:57,910 --> 00:08:00,890 +already يعني جاهزة for activity + +88 +00:08:04,560 --> 00:08:09,260 +اليوم منظمة الصحة العالمية أوصت بأن كل طفل بيولد + +89 +00:08:09,260 --> 00:08:13,960 +بما أنه عنده هذه الظروف لازم ياخد vitamin K عند + +90 +00:08:13,960 --> 00:08:16,980 +الولادة عشان هيك معظم الأطفال عند الولادة بياخدوا + +91 +00:08:16,980 --> 00:08:20,100 +إبرة وهذه الإبرة هي عبارة عن vitamin K عبارة عن + +92 +00:08:20,100 --> 00:08:23,880 +حقن vitamin K لإن زي ما قلتلكم بنخاف من intra + +93 +00:08:23,880 --> 00:08:27,800 +cranial bleeding عند حقن الأطفال الحاجة التانية + +94 +00:08:27,800 --> 00:08:32,340 +اللي بيوصوا فيها لإن معظم الحليب اليوم صار مدعم + +95 +00:08:32,340 --> 00:08:41,750 +بـ حقن vitamin K، حد عنده سؤال؟ طبعا الآن نقدر + +96 +00:08:41,750 --> 00:08:52,110 +نجاوب أن وين ممكن نحصل على الـ vitamin، هنا two + +97 +00:08:52,110 --> 00:08:57,930 +ways we get vitamin K نباتات وعن طريق الـ bacterium + +98 +00:08:57,930 --> 00:09:01,610 +اللي بتكون في الأمعاء 100% يعني نباتي و حيواني، خليك + +99 +00:09:01,610 --> 00:09:08,630 +منه ومنه بدنا good source for vitamin K بروكسلز + +100 +00:09:08,630 --> 00:09:16,110 +بروكسلز ماشي بروكلي بروكلي القرنبيط الكرنب الطفل ماشي + +101 +00:09:16,110 --> 00:09:21,070 +what types of chemical reaction does vitamin K 100 + +102 +00:09:21,070 --> 00:09:24,890 +% بيعمل carboxylation طبعا بيعمل 100% على vitamin + +103 +00:09:24,890 --> 00:09:30,190 +K cycle صح طبعا anticoagulant بيعمل inhibition + +104 +00:09:30,190 --> 00:09:35,300 +لـ vitamin K 100% name signs of vitamin K deficiency + +105 +00:09:35,300 --> 00:09:41,500 +really صح summer يا شباب طبعا احنا ممكن نلخص أن الـ + +106 +00:09:41,500 --> 00:09:45,440 +vitamin K is a fat-soluble نفس المحاضرة كلها عدينا + +107 +00:09:45,440 --> 00:09:50,920 +عليها حد عنده سؤال حد عنده سؤال طيب استكمال + +108 +00:09:50,920 --> 00:09:55,760 +الموضوع اليوم هنحكي في الجزء الثاني من المادة + +109 +00:09:55,760 --> 00:09:59,800 +اليوم هنبدأ نحكي في الجزء الثاني من المادة وهو + +110 +00:09:59,800 --> 00:10:04,170 +bleeding disorders وهو disorder طبعا الـ disorder يا + +111 +00:10:04,170 --> 00:10:09,830 +شباب بتقسم إلى جزئين هنحكي في bleeding disorders و + +112 +00:10:09,830 --> 00:10:13,010 +هنحكي في thrombotic disorders يعني هنحكي في + +113 +00:10:13,010 --> 00:10:14,370 +bleeding و thrombosis + +114 +00:10:19,020 --> 00:10:24,660 +هي مقسمة إلى أمراضها، مقسم إلى جزء إيه؟ إما + +115 +00:10:24,660 --> 00:10:29,440 +hereditary وإما acquired disease، إما hereditary + +116 +00:10:29,440 --> 00:10:33,060 +وإما acquired disease هندرس different disease + +117 +00:10:33,060 --> 00:10:39,080 +associated with the defective mechanisms اللي + +118 +00:10:39,080 --> 00:10:42,960 +اتعلمناها، ميكانيزمات اللي اتعلمناها، يوم ما يصير + +119 +00:10:42,960 --> 00:10:47,550 +فيها defect، معناته إيش؟ معناته حالة مرضية طبعا نشوف + +120 +00:10:47,550 --> 00:10:50,870 +إيش الموضوع و نبدأ بالـ Vascular و Platelet + +121 +00:10:50,870 --> 00:10:55,590 +Disorder و + +122 +00:10:55,590 --> 00:10:59,690 +قبل ما نبدأ في الموضوع خلينا نتعرف على بعض + +123 +00:10:59,690 --> 00:11:04,430 +المصطلحات المتعلقة بالأمراض، لما بحكي على + +124 +00:11:04,430 --> 00:11:08,650 +symptoms، بدي أحكي على مصطلح، أكون عارف هذا + +125 +00:11:08,650 --> 00:11:16,670 +المصطلح، نبدأ ورا حد بتاشي، مصطلح بتاشي ماشي أو في + +126 +00:11:16,670 --> 00:11:22,730 +بعض الدول + +127 +00:11:22,730 --> 00:11:29,330 +بتنفذوا بتاشياي بتاشياي وأن كنت أنا مش كتير بفضل + +128 +00:11:29,330 --> 00:11:35,110 +هذا المصطلح هو عبارة عن بتاشي ماشي دول ببساطة + +129 +00:11:35,110 --> 00:11:41,510 +بتاشي هو عبارة عن شباب مدموة pinpoint size or + +130 +00:11:41,510 --> 00:11:46,940 +pinhead size bleeding under the skin ماشي، إيش + +131 +00:11:46,940 --> 00:11:52,940 +يعني؟ يعني بيصير نقطة حمراء صغيرة نازمة برا .. + +132 +00:11:52,940 --> 00:11:58,140 +بحاجة براس إيه؟ الدبوس تحت الجلد ويمكن كتير + +133 +00:11:58,140 --> 00:12:03,480 +تلاحظوها، تشوفوها ولا بتشوفوهاش؟ كتير تلاحظوا أنه + +134 +00:12:03,480 --> 00:12:10,560 +مرات بيكون فياش pinpoint ندبة يعني، ندبة إشي أكبر + +135 +00:12:11,120 --> 00:12:16,080 +Petechiae مصطلح آخر وهو hemorrhage under the skin + +136 +00:12:16,080 --> 00:12:24,660 +لكن بيختلف varying in color and duration بيختلف + +137 +00:12:24,660 --> 00:12:31,920 +ماشي من حالة إلى حالة بوقت الـ .. بالوقت و بالعياش + +138 +00:12:31,920 --> 00:12:38,460 +و باللون، ماشي؟ يبدأ هو عبارة عن نزيف تحت الجلد لكن + +139 +00:12:38,460 --> 00:12:46,440 +بـ .. it's a varying picture نمرا تلاتة إيكوموزز + +140 +00:12:46,440 --> 00:12:52,740 +إيكوموزز هو عبارة عن purple .. purple patch patch + +141 +00:12:52,740 --> 00:12:56,500 +معناته إيش؟ هي الندبة اللي هو حكى عليها لطا + +142 +00:12:56,500 --> 00:13:05,100 +ماشي؟ patch كبيرة يعني نزيف وصار له diffusion وين؟ + +143 +00:13:05,100 --> 00:13:09,800 +under the skin بيصير فيه extravasation of blood + +144 +00:13:09,800 --> 00:13:17,540 +into the skin يعني بيمد تحت الجلد larger than + +145 +00:13:17,540 --> 00:13:20,760 +بتاشي أكتر من إيش الـ pinpoint اللي طبعاك هنا + +146 +00:13:20,760 --> 00:13:26,080 +عليها؟ Ecchymosis هو عبارة عن bleeding وفشل leading + +147 +00:13:26,080 --> 00:13:30,040 +و صار له diffusion ماشي طبعا الـ diffusion بيختلف + +148 +00:13:30,040 --> 00:13:34,620 +حجمها من منطقة إلى منطقة هذه كتير بتشوفوها شباب + +149 +00:13:34,620 --> 00:13:39,200 +يوم بتعمل تـ .. تسحب عينة الدم من إيه؟ من زميلك إيش + +150 +00:13:39,200 --> 00:13:44,020 +بتعمله؟ امتو امتو بالظبط بيكون فيه صار ecchymosis + +151 +00:13:44,020 --> 00:13:48,920 +وطلع دم برا الـ blood vessels وصار له diffusion + +152 +00:13:48,920 --> 00:13:50,580 +extravascular + +153 +00:13:54,430 --> 00:14:00,010 +هو عبارة عن nose bleeding بيسموها إيه؟ Epistaxis + +154 +00:14:00,010 --> 00:14:06,950 +ماشي Menorrhagia معناها excessive menses + +155 +00:14:06,950 --> 00:14:12,830 +Menorrhagia معناها إيه؟ Excessive menses ثم + +156 +00:14:12,830 --> 00:14:19,670 +Hematuria من الاسم blood in urine Hemarthrosis + +157 +00:14:21,210 --> 00:14:26,610 +الدم لكن في الـ joints ماشي and hematemesis + +158 +00:14:26,610 --> 00:14:37,690 +spitting الدم يعني يبصق أو ياش آه بالظبط ثم + +159 +00:14:37,690 --> 00:14:42,450 +sorry vomiting hematemesis hemoptysis هو الـ + +160 +00:14:42,450 --> 00:14:47,430 +spitting أنه لما ييجي يبزق بتلاقي ياش خلى دم ثم + +161 +00:14:47,430 --> 00:14:52,470 +Melena والـ Melena هي عبارة عن blood in a .. in a + +162 +00:14:52,470 --> 00:14:56,270 +stool يعني فيه gastrointestinal bleeding نزل فيهاش + +163 +00:14:56,270 --> 00:15:01,710 +في الـ stool وعمل ما يسمى بالـ occult blood خاتون ماشي + +164 +00:15:01,710 --> 00:15:06,690 +اللي هو الدم المخفي في البراز الدم المخفي فيهاش + +165 +00:15:06,690 --> 00:15:13,350 +في الـ stool ماشي دي مصطلحات هنفترق إليها أثناء شرحنا + +166 +00:15:13,350 --> 00:15:17,240 +كثيرا طبعا قبل ما نبدأ كلنا بنعرف أنه في hemostatic + +167 +00:15:17,240 --> 00:15:21,800 +balance between coagulation and fibrinolysis أو + +168 +00:15:21,800 --> 00:15:31,250 +coagulation factors بأنواع and inhibitors مظبوط في + +169 +00:15:31,250 --> 00:15:35,810 +balance يوم ما يصير فيه bleeding مين اللي رجح + +170 +00:15:35,810 --> 00:15:42,190 +كفته أو الـ inhibitors ماشي fibrinolysis أو إياش أو + +171 +00:15:42,190 --> 00:15:45,450 +الـ inhibitors وبالتالي presence of inhibitors زي + +172 +00:15:45,450 --> 00:15:49,750 +الـ warfarin أو الـ heparin ماشي أو في بعض الحالات + +173 +00:15:49,750 --> 00:15:54,270 +المرضية زي القلو أو الـ auto antibody to certain + +174 +00:15:54,270 --> 00:15:58,950 +factors بيسموهم inhibitors، شو بيعملوا؟ بيعملوا + +175 +00:15:58,950 --> 00:16:03,830 +blocking للـ inhibitors فبيصير فيه bleeding، ماشي؟ + +176 +00:16:03,830 --> 00:16:10,490 +أو الـ auto antibody دخل لـ certain coagulation + +177 +00:16:10,490 --> 00:16:15,290 +factor، ماشي؟ وعمل بيسموهم inhibitors الـ antibody + +178 +00:16:15,290 --> 00:16:20,220 +هدول عملوا inhibit لـ الـ factors و بالتالي صار فيه + +179 +00:16:20,220 --> 00:16:24,700 +bleeding و أشهرها طبعا مرض الـ hemophilia بيسموه + +180 +00:16:24,700 --> 00:16:29,800 +acquired hemophilia ليش؟ صار فيه antibody لمين؟ لـ الـ + +181 +00:16:29,800 --> 00:16:34,140 +coagulation factor قسموا الـ bleeding disorders + +182 +00:16:34,140 --> 00:16:42,120 +إلى أربع different area طبعا الشباب احنا ممكن نقسم + +183 +00:16:42,120 --> 00:16:50,630 +الـ bleeding disorders إلى أكثر من هناماشي، ليش؟ + +184 +00:16:50,630 --> 00:16:56,410 +احنا من بداية الفصل قولنا الـ hemostatic components + +185 +00:16:56,410 --> 00:17:01,570 +كم واحد، جبل بليتليتش فيه + +186 +00:17:12,280 --> 00:17:15,840 +وشيبونا الكوينم الكمني ما تضلك وخشوا على الخط أجيت + +187 +00:17:15,840 --> 00:17:22,860 +هدول العوامل المختلفة كل خلل في كل عامل من العوامل + +188 +00:17:22,860 --> 00:17:27,980 +ممكن يأدي إما إلى thrombosis أو إلى إيه؟ إلى + +189 +00:17:27,980 --> 00:17:33,780 +bleeding وبالتالي التقسيمة سهلة ماشي؟ لكن أنا شفت + +190 +00:17:33,780 --> 00:17:39,180 +أن هذه التقسيمة هي أفضل تقسيمة لتقسيم هذه الأمراض + +191 +00:17:39,180 --> 00:17:45,180 +و .. اتقسموا إلى الـ major component vascular + +192 +00:17:45,180 --> 00:17:51,840 +abnormality ثم platelet disorders ثم coagulation + +193 +00:17:51,840 --> 00:17:55,940 +factor disorders ولما بقول bleeding يعني إيش صار + +194 +00:17:55,940 --> 00:18:05,080 +صار قلة مظبوط ثم الرابع تقدروا + +195 +00:18:05,080 --> 00:18:10,500 +تسموه miscellaneous الرابع تعرفين إيش يعني + +196 +00:18:10,500 --> 00:18:15,980 +miscellaneous؟ آخر أو من آخر و تفريقات ماشي + +197 +00:18:15,980 --> 00:18:20,220 +miscellaneous فحطيت لكم مثال عليه اللي هو الـ DIC + +198 +00:18:20,220 --> 00:18:25,000 +لأنه لا بقدر أحطه تحت hereditary ولا acquired ماشي + +199 +00:18:25,000 --> 00:18:33,260 +فحطيته يعني مختلف مفهوم يا شباب ونبدأ بأول هذه الـ + +200 +00:18:33,260 --> 00:18:37,930 +component وهي الـ blood vessels or vascular defect + +201 +00:18:37,930 --> 00:18:45,490 +بتؤدي إلى bleeding بتؤدي إلى bleeding يعني it + +202 +00:18:45,490 --> 00:18:51,850 +begins with bleeding episodes in the presence of + +203 +00:18:51,850 --> 00:19:00,990 +laboratory normal laboratory coagulation test وهذا + +204 +00:19:00,990 --> 00:19:04,610 +وضع طبيعي جدا يا شباب في الـ blood vessels الـ + +205 +00:19:04,610 --> 00:19:07,990 +hemostatic tests are normal، إيش الـ hemostatic + +206 +00:19:07,990 --> 00:19:12,450 +tests؟ الـ BT طب ولما ما أخذتهم الـ BT و الـ PTT، + +207 +00:19:12,450 --> 00:19:20,730 +إيش بندرس من خلالهم؟ برضه + +208 +00:19:20,730 --> 00:19:24,890 +ما فهمتش، ندرس من خلالهم الـ coagulation cascade، + +209 +00:19:24,890 --> 00:19:29,540 +صح؟ يعني تدرس من خلال الـ coagulation cascade و + +210 +00:19:29,540 --> 00:19:31,420 +بالتالي الـ blood vessels اللي دخلت في الموضوع و + +211 +00:19:31,420 --> 00:19:36,780 +بالتالي Hemostatic tests are normal ما هو مش دخلت + +212 +00:19:36,780 --> 00:19:40,480 +يعني حتى لو خرجت الـ blood vessels الـ Hemostatic + +213 +00:19:40,480 --> 00:19:48,300 +laboratory tests of coagulation are normal قسمهم + +214 +00:19:48,300 --> 00:19:55,220 +إلى Hereditary و Acquired و في الغرب الـ symptoms + +215 +00:19:55,220 --> 00:19:59,250 +اللي بتتعلق بالـ blood vessels are superficial بحكي + +216 +00:19:59,250 --> 00:20:06,230 +على الـ blood vessels and usually these are diagnosed + +217 +00:20:06,230 --> 00:20:15,210 +by exclusion بتروح وأنت مدور على الفعصار وعاملها + +218 +00:20:15,210 --> 00:20:18,470 +واحد ورا التاني، ده كلهم normal، معناته ميدنال + +219 +00:20:18,470 --> 00:20:24,470 +بيجي by exclusion after you ruling out the + +220 +00:20:24,470 --> 00:20:29,410 +platelet ثم coagulation هما fibrinolytic + +221 +00:20:29,410 --> 00:20:34,670 +components ماشي بيضل الـ air بيضل الـ blood + +222 +00:20:34,670 --> 00:20:44,210 +vessels و الـ defect ماشي vascular disease اتفقنا + +223 +00:20:44,210 --> 00:20:48,310 +على أنه الـ platelet و الـ hemostatic tests are + +224 +00:20:48,310 --> 00:20:53,110 +normal و الـ platelet function test such as + +225 +00:20:53,110 --> 00:20:57,330 +bleeding time وبعض الفحوصات الأخرى اللي بتتعلق ب + +226 +00:20:58,030 --> 00:21:02,990 +هذا الموضوع + +227 +00:21:02,990 --> 00:21:07,470 +بالذات bleeding time بيصير لها prolongation، بيصير + +228 +00:21:07,470 --> 00:21:10,470 +لها إيش؟ prolongation، ليه يا شباب الـ bleeding + +229 +00:21:10,470 --> 00:21:14,670 +time؟ لأنه الـ platelet لما بدها تيجي تشتغل، بتشتغل + +230 +00:21:14,670 --> 00:21:17,950 +على مين؟ على الـ blood vessels؟ على الـ blood vessels لما + +231 +00:21:17,950 --> 00:21:20,830 +يكون فيه خلل في الـ blood vessels تأثر البليتلت + +232 +00:21:20,830 --> 00:21:25,090 +تتأثر، تلاقيش إشي تايم تمسك فيه، مظبوط؟ وبالتالي + +233 +00:21:25,090 --> 00:21:27,670 +الخلل مش في البليتلت، الخلل في المكان اللي بدها + +234 +00:21:27,670 --> 00:21:32,650 +تمسك فيه البليتلت، ماشي؟ إلا إنه تتأثر، إلا إنه + +235 +00:21:32,650 --> 00:21:36,990 +البليتلت، فالفحص بيصير له prolongation، لكن أنتم + +236 +00:21:36,990 --> 00:21:40,030 +اشتغلتوا الـ bleeding طبعا، وحنافته إن شاء الله هو + +237 +00:21:40,030 --> 00:21:43,710 +طبعا insensitive، non-sensitive، مظبوط؟ + +238 +00:21:49,860 --> 00:21:55,320 +Inherited نبدأ من inherited وجاله فيه مجموعة من + +239 +00:21:55,320 --> 00:22:00,340 +الأمراض الوراثية لكنها rare لكنها rare disease + +240 +00:22:00,340 --> 00:22:06,480 +بينما الـ bleeding is a common symptoms فيها + +241 +00:22:06,480 --> 00:22:11,860 +Hemostasis tests are not necessary for diagnosis + +242 +00:22:11,860 --> 00:22:15,200 +يبقى هي hereditary they are rare + +243 +00:22:18,570 --> 00:22:22,770 +الوحيد اللي بنشوفه هو الـ bleeding لو عملنا فحصات + +244 +00:22:22,770 --> 00:22:28,690 +hemostatic test بتكون إيه؟ بتكون أول هذه الأمراض + +245 +00:22:28,690 --> 00:22:34,530 +شبهها حاجة بتتعلق بالـ connective tissue disease + +246 +00:22:34,530 --> 00:22:36,390 +defect في الـ connective tissue + +247 +00:22:42,450 --> 00:22:46,910 +بنكتب blood vessels احنا بنحكي على الـ blood vessels + +248 +00:22:46,910 --> 00:22:54,350 +فأي طبقة من الـ blood vessels في الانتما + +249 +00:22:54,350 --> 00:22:59,630 +طبقة + +250 +00:22:59,630 --> 00:23:05,330 +الثالثة طبقة + +251 +00:23:05,330 --> 00:23:08,450 +التالتة + +252 +00:23:08,450 --> 00:23:15,220 +ماشي طبقة تالتة تانية فيها كولاجين وسموك مصر وخفيف من + +253 +00:23:15,220 --> 00:23:18,880 +الـ connective tissue لكن الـ connective tissue اللي + +254 +00:23:18,880 --> 00:23:22,780 +بنشوفها في الطبقة التالتة وهي طبقة داعمة وهي طبقة + +255 +00:23:22,780 --> 00:23:26,880 +إيه؟ داعمة للوعاء الدموي وبالتالي الوعاء الدموي + +256 +00:23:26,880 --> 00:23:30,560 +دون إيه؟ this connective tissue شو بيصير فيه؟ + +257 +00:23:30,560 --> 00:23:31,160 +fragile + +258 +00:23:38,310 --> 00:23:42,610 +عشان هي بيسموه defects, affects, ability to + +259 +00:23:42,610 --> 00:23:48,230 +support, to support visceral wall، ماشي؟ هي عبارة + +260 +00:23:48,230 --> 00:23:52,930 +عن خلل في الطبقة الداعمة لمين؟ بالوعي الدماغي، من + +261 +00:23:52,930 --> 00:23:56,770 +أشهر الأمراض هو Ehlers-Danlos Syndrome، بتنين هدول + +262 +00:23:56,770 --> 00:24:03,020 +علماء، Ehlers-Danlos Syndrome قالوا هذا المرض بيصير + +263 +00:24:03,020 --> 00:24:09,500 +فيه lack في الـ structural tissue which support the + +264 +00:24:09,500 --> 00:24:16,260 +blood vessels وهي الـ collagen substance فبيصير في + +265 +00:24:16,260 --> 00:24:21,800 +skin elasticity and fragility ماشي؟ ويمكن شفته في + +266 +00:24:21,800 --> 00:24:28,860 +كثير من المسلسلات أو في التليفزيون بعض الصور لبعض + +267 +00:24:28,860 --> 00:24:33,000 +الـ ...الـ ...الـ ...الـ ...الـ human beings اللي عندهم + +268 +00:24:33,000 --> 00:24:37,000 +قوة خارقة بتقول لي، ماشي؟ هي عبارة عن abnormality يا + +269 +00:24:37,000 --> 00:24:40,820 +شباب، كانوا يجيبوا بعض الصور لبعض الناس اللي ممكن + +270 +00:24:40,820 --> 00:24:46,000 +تشبك skin تلتين وأربعين سنتي عن إيدها، صح؟ أو + +271 +00:24:46,000 --> 00:24:52,330 +يلف إيدها لتر بألفات ويتركها ترجع مرة تانية هذا + +272 +00:24:52,330 --> 00:24:56,890 +عبارة عن حالات مرضية ناتجة عن خلل أو نقص في + +273 +00:24:56,890 --> 00:25:01,370 +الكولاجين، ناتجة عن إيش؟ نقص في الكولاجين، فبيصير + +274 +00:25:01,370 --> 00:25:08,430 +elastic، بالضبط الجلد عنده، الـ skin بيصير إيش؟ + +275 +00:25:08,430 --> 00:25:14,990 +elastic، لكنها fragile، سهل جدا أن هي تنبزج، بيصير + +276 +00:25:14,990 --> 00:25:20,230 +في hyper mobility of joints hyper mobility of + +277 +00:25:20,230 --> 00:25:24,270 +joints بمعنى ... زي ما قلت قبله باشي أو بيجيب رجله + +278 +00:25:24,270 --> 00:25:28,130 +من ورا وبيخشش أقل ده من الحركات البهلوانية الغريبة + +279 +00:25:28,130 --> 00:25:31,690 +جدا اللي ممكن تشوفها ولن تصدق أنه إنسان طبيعي + +280 +00:25:31,690 --> 00:25:36,630 +ممكن يعملها بالضبط أو بيصير فيه طبعا evidence of + +281 +00:25:36,630 --> 00:25:46,710 +bleeding and bruising بمعنى في scarring في علامات + +282 +00:25:49,630 --> 00:25:54,150 +في الـ skin طبعا الـ skin يوم تنمزج يا شباب وتيجي + +283 +00:25:54,150 --> 00:26:01,190 +تلحم تاني بيضال معلم صح تلاقي الجلد مشطط كثير ماشي + +284 +00:26:01,190 --> 00:26:07,330 +صار في scars multiple scars ليش لإنه لإنه بالضبط + +285 +00:26:07,330 --> 00:26:13,010 +زي اللي في دك بالضبط ماشي ليش لإنه أصلا المزعق و + +286 +00:26:13,010 --> 00:26:19,010 +لما إجا عمله healing مرة تانية صار في علامة The + +287 +00:26:19,010 --> 00:26:25,270 +most serious is deficient of type 3 collagen وهذا + +288 +00:26:25,270 --> 00:26:29,230 +موجود في blood vessels هذا النوع من الـ collagen + +289 +00:26:29,230 --> 00:26:35,440 +موجود في blood vessels بيؤدي إلى acute and severe + +290 +00:26:35,440 --> 00:26:41,500 +internal bleeding and sudden death في كثير من + +291 +00:26:41,500 --> 00:26:47,900 +الأحيان بتلاقي صار نزيف داخلي عند المريض وتوفى + +292 +00:26:47,900 --> 00:26:55,240 +فجأة وتوفى إيش؟ فجأة طيب استقبالنا للموضوع قولنا + +293 +00:26:55,240 --> 00:26:59,180 +الناس هدول بيكونوا عندهم نقص في الـ collagen رقم 3 + +294 +00:27:01,390 --> 00:27:04,470 +وهذا النوع من الكولاجين بيكون موجود في blood + +295 +00:27:04,470 --> 00:27:09,190 +vessels وبالتالي سهل جدا الناس دول اللي ما عندهمش + +296 +00:27:09,190 --> 00:27:14,310 +كولاجين بيصير في عندهم internal bleeding and sudden + +297 +00:27:14,310 --> 00:27:18,470 +death Internal bleeding occurs and sudden death + +298 +00:27:18,470 --> 00:27:23,650 +نوع ثاني من الأمراض اللي بتتعلق بالـ blood vessels + +299 +00:27:23,650 --> 00:27:26,450 +حاجة بيسموها pseudoxanthoma elastica + +300 +00:27:26,450 --> 00:27:33,620 +pseudoxanthoma elastica وهذا المرض عكس التاني شو + +301 +00:27:33,620 --> 00:27:36,120 +بيصير؟ هو عبارة عن autosomal hereditary طبعا + +302 +00:27:36,120 --> 00:27:39,860 +hereditary disease autosomal recessive trait بيصير + +303 +00:27:39,860 --> 00:27:44,640 +في lack of skin elasticity عكس اللي قبله عكس اللي + +304 +00:27:44,640 --> 00:27:49,140 +قبله وإيش صار فيها؟ صار في calcification للـ skin + +305 +00:27:49,140 --> 00:27:54,280 +calcification يعني ضم elastic يعني تيبشت يعني صارت + +306 +00:27:54,280 --> 00:27:59,520 +سهلة الكسر صارت air سهلة الكسر some connective + +307 +00:27:59,520 --> 00:28:05,810 +tissue calcified وبالتالي bruising and bleeding + +308 +00:28:05,810 --> 00:28:12,970 +evidence can ensue مرض آخر اسمه hereditary + +309 +00:28:12,970 --> 00:28:19,450 +hemorrhagic telangiectasia hereditary hemorrhagic + +310 +00:28:19,450 --> 00:28:23,190 +telangiectasia وهو عبارة عن مرض بيصير فيه خلل في + +311 +00:28:23,190 --> 00:28:27,650 +الـ angiogenesis process إيش الـ angiogenesis + +312 +00:28:27,650 --> 00:28:33,510 +process هي عبارة عن عملية بناء الوعاء الدموي عملية + +313 +00:28:33,510 --> 00:28:39,550 +بناء الوعاء الدموي وهذه العملية يحصل فيها خلل + +314 +00:28:39,550 --> 00:28:43,770 +بتؤدي إلى ما يسمى بالـ Telangiectasias + +315 +00:28:43,770 --> 00:28:49,150 +Telangiectasias وهي عبارة عن فستولة عبارة عن فتحات + +316 +00:28:49,150 --> 00:28:52,970 +عبارة عن إيش؟ فتحات في الوعاء الدموي Vessels + +317 +00:28:52,970 --> 00:29:00,680 +involved cannot contract normally and collapse + +318 +00:29:00,680 --> 00:29:06,700 +easily and collapse easily طبعا مرضى من هذا النوع + +319 +00:29:06,700 --> 00:29:11,360 +بيصير في عندهم tiny area و bleeding pinpoint + +320 +00:29:11,360 --> 00:29:16,500 +lesions و tiny area و bleeding وممكن نشوف الـ + +321 +00:29:16,500 --> 00:29:22,000 +lesion في الـ face في الـ hands في الـ feet في مناطق + +322 +00:29:22,000 --> 00:29:27,600 +مختلفة لكنه هذا مرض ممكن يصير في أي عمر من الأعمار + +323 +00:29:28,430 --> 00:29:31,790 +أي عملية خلل في الـ angiogenesis بتؤدي لإيش؟ + +324 +00:29:31,790 --> 00:29:36,650 +تلانجيبتازياز وفي نفس الوقت بنشوفها بوضع + +325 +00:29:36,650 --> 00:29:41,250 +physically ماشي كان بي diagnose by physical + +326 +00:29:41,250 --> 00:29:48,530 +appearance بتلاقي الواحد بيكون في عنده scars كثيرة + +327 +00:29:48,530 --> 00:29:52,730 +نوع + +328 +00:29:52,730 --> 00:29:55,890 +ثاني من الأمراض اسمه Kasabach-Merritt + +329 +00:29:57,860 --> 00:30:03,100 +أو بيسموه Hemangioma وهذا الـ Hemangioma بالعربي + +330 +00:30:03,100 --> 00:30:08,460 +بيسموه الـ وحمة عشان سمعته بالوحمة؟ أيوة تطبع في + +331 +00:30:08,460 --> 00:30:12,320 +أماكن مختلفة من الجسم الـ وحمة دي عبارة عن benign + +332 +00:30:12,320 --> 00:30:18,340 +tumor عبارة عن إيه؟ benign tumor of vascular + +333 +00:30:18,340 --> 00:30:23,280 +tissue وين جا في blood vessels، vascular tissue + +334 +00:30:24,410 --> 00:30:29,370 +grow rapidly to a giant proportion وبيبدأ يكبر، + +335 +00:30:29,370 --> 00:30:36,550 +يكبر، يكبر، بيبدأ صغير، بنولد الطفل غالبا بالوحمة + +336 +00:30:36,550 --> 00:30:40,510 +اللي إحنا بنحكي عليها، فبيكون حجمها صغير، لكن + +337 +00:30:40,510 --> 00:30:47,660 +ممكن هذا الحجم يكبر طبعا هذه الـ وحمة بتكون full of + +338 +00:30:47,660 --> 00:30:53,440 +small blood vessels أو capillary ماشي full of + +339 +00:30:53,440 --> 00:30:58,200 +وبالتالي أقل ضربة فيها شو بتعمل؟ بتعمل bleeding + +340 +00:30:58,200 --> 00:31:02,500 +بتعمل إيه؟ bleeding it should be removed if + +341 +00:31:02,500 --> 00:31:08,330 +applicable إذا كانت موجودة في منطقة ممكن الجراح + +342 +00:31:08,330 --> 00:31:12,250 +يصل لها، لازم يشيلها، لكن في الأسف الشديد في بعض + +343 +00:31:12,250 --> 00:31:17,150 +المناطق، بتكون حساسة يستطيع أن يصل لها الجراح، + +344 +00:31:17,150 --> 00:31:21,030 +فبالتالي بتعمل مشاكل كثيرة محمور عادية، عادية من + +345 +00:31:21,030 --> 00:31:25,950 +تشوفها ... إيه إحنا اتعودنا أن نشوفها في الإيد، في + +346 +00:31:25,950 --> 00:31:29,930 +الرقبة، فوق + +347 +00:31:29,930 --> 00:31:35,000 +العين يعني في الـ حاجب ممكن المهم هذه وحمد لونها + +348 +00:31:35,000 --> 00:31:38,800 +أحمر يا شباب عشان تتميزوا بينها وبين الشامة العادية + +349 +00:31:38,800 --> 00:31:42,900 +الاثنين benign tumor لكن هذه لونها أحمر الـ can + +350 +00:31:42,900 --> 00:31:51,860 +grow هذه أسود الشامة العادية طبعا ببقى أني بحكي + +351 +00:31:51,860 --> 00:31:59,620 +على benign tumor أشوف تكون أحمر منين؟ لأ لأ مش هي + +352 +00:31:59,620 --> 00:32:00,500 +... مش هي اللي بحكي عنها + +353 +00:32:09,540 --> 00:32:12,380 +لأ لأ أنا هنجت بشوفها بس مش ... لأ مش اللي اللي + +354 +00:32:12,380 --> 00:32:17,040 +بحكي عليها دي بتكون لونها حمراء لإنه full of blood + +355 +00:32:17,040 --> 00:32:20,520 +vessels full of blood vessels اللي بدها surgeon + +356 +00:32:20,520 --> 00:32:26,360 +بدها vascular surgeon يشيلها يعني جراح أو عيادة + +357 +00:32:26,360 --> 00:32:30,580 +موية ماشي؟ ماشي وبالتالي صعب جدا لإنه mechanical + +358 +00:32:30,580 --> 00:32:34,660 +injury أي لعب فيها هتؤدي لـ bleeding وممكن يكون + +359 +00:32:34,660 --> 00:32:37,180 +uncontrollable ممكن ما تحكمش فيه + +360 +00:32:41,090 --> 00:32:45,050 +طبعا هذه ببساطة يا شباب بتؤثر على الـ neighboring + +361 +00:32:45,050 --> 00:32:50,990 +tissue وممكن تؤدي إلى localized DIC نعرف الـ DIC + +362 +00:32:50,990 --> 00:32:59,670 +ديسميتر وطبعا بيؤدي إلى thrombocytopenia وبعض + +363 +00:32:59,670 --> 00:33:05,190 +العوامل التجلط بتستهلك عند المريض وهذا كله بيؤدي + +364 +00:33:05,190 --> 00:33:08,950 +إلى bleeding بيؤدي إلى bleeding + +365 +00:33:14,120 --> 00:33:19,660 +Tumor composed of many blood vessels أو بيسموه + +366 +00:33:19,660 --> 00:33:28,040 +blood filled tumor مليانة بالدم استكملنا للموضوع + +367 +00:33:28,040 --> 00:33:33,000 +الـ hemangioma بيسموها بعض المرات الـ cavernous + +368 +00:33:33,000 --> 00:33:39,770 +hemangioma هو عبارة عن lesion may swell and bleed + +369 +00:33:39,770 --> 00:33:44,530 +.شناد الأطفال اللي عندهم هذه الحاجات لما قلت لكم + +370 +00:33:44,530 --> 00:33:48,570 +should be removed if applicable لكن إذا ما قدرناش + +371 +00:33:48,570 --> 00:33:55,670 +نفعلها لازم يحذر الطفل ومن يعامله أن أي حركة أي لعب + +372 +00:33:55,670 --> 00:34:02,470 +دفش للطفل ممكن يؤدي إلى نزيف لتحمض عقبها. Tumor + +373 +00:34:02,470 --> 00:34:08,340 +sites may form clots Pneumolized RBCs and Vessel + +374 +00:34:08,340 --> 00:34:12,000 +Obstruction ممكن تؤدي إلى جلطة متسكّر الوعاء + +375 +00:34:12,000 --> 00:34:17,900 +الدموية المجاورة للموضوع Present since birth تيجي + +376 +00:34:17,900 --> 00:34:23,940 +مع عياش مع الولادة وال treatment هو surgical removal + +377 +00:34:23,940 --> 00:34:29,000 +تشيل عياش من جراحها if possible زي ما قلتلكوا إذا + +378 +00:34:29,000 --> 00:34:34,680 +كان الجراح يستطيع أن يصل إلى هذا المكان أو ما جدرش + +379 +00:34:34,680 --> 00:34:39,780 +localized radiotherapy تعرف العلاج من إشعاع ماشي + +380 +00:34:39,780 --> 00:34:46,900 +لمنطقة العياش الهيمانجيومة with injection of anti + +381 +00:34:46,900 --> 00:34:50,200 +.. of fibrinolytic inhibitors fibrinolytic يعني + +382 +00:34:50,200 --> 00:34:55,440 +antifibrinolysis ماشي عشان ما يصارش عياش ما يصارش + +383 +00:34:55,440 --> 00:34:59,000 +عياش بفوش + +384 +00:34:59,000 --> 00:34:59,300 +بقى + +385 +00:35:03,770 --> 00:35:07,490 +مرة جاى إن شاء الله بنكمل عشان نمشي مع + +386 +00:35:07,490 --> 00:35:10,850 +التعليبات خلصنا ال hereditary type مرة جاى بنحكي + +387 +00:35:10,850 --> 00:35:13,010 +في ال acquired type أو blood vessels diff --git a/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/yGBR5WvaSIM_postprocess.srt b/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/yGBR5WvaSIM_postprocess.srt new file mode 100644 index 0000000000000000000000000000000000000000..8c99e2bd4d4c5e4054a78d31c61207d7327e9bd0 --- /dev/null +++ b/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/yGBR5WvaSIM_postprocess.srt @@ -0,0 +1,2072 @@ +1 +00:00:21,070 --> 00:00:26,050 +بسم الله الرحمن الرحيم اليوم ان شاء الله هنحكي + +2 +00:00:26,050 --> 00:00:30,790 +فينا cherry occurring inhibitors inhibitors بيعنى + +3 +00:00:30,790 --> 00:00:35,010 +anticoagulant anticoagulant في الجسم موجودة بكميات + +4 +00:00:35,010 --> 00:00:41,290 +كبيرة موجودة بتعمل جنبا إلى جنب مع liberal + +5 +00:00:41,290 --> 00:00:47,770 +coagulant وضروري جدا وجود هذه ال anticoagulant in + +6 +00:00:47,770 --> 00:00:53,720 +order to attenuateThe Coagulant or the Coagulation + +7 +00:00:53,720 --> 00:00:56,640 +Cascade Mechanism بمعنى إن الـ Coagulation Cascade + +8 +00:00:56,640 --> 00:01:01,720 +Mechanism يظل شغال على طول هيعمل Clot Formation + +9 +00:01:01,720 --> 00:01:06,880 +كبيرة وبالتالي يعمل Complete Occlusion للـBlood + +10 +00:01:06,880 --> 00:01:09,780 +Vessels وهذا ما أحنا بدنا نشيله لأنه إذا كان صار + +11 +00:01:09,780 --> 00:01:13,680 +فيه Complete Occlusion بمعنى صار فيه .. صار فيه + +12 +00:01:13,680 --> 00:01:21,540 +Schemia صح؟ وSchemia يعني أنهبالتالي الجسم في عنده + +13 +00:01:21,540 --> 00:01:28,360 +من يحارب هذا الـmechanism اللي هو الـfibrinetic + +14 +00:01:28,360 --> 00:01:32,720 +system اللي هنحكي فيه لكن بالاضافة إلى ذلك في + +15 +00:01:32,720 --> 00:01:35,560 +naturally occurring anticoagulant موجودة في الجسم + +16 +00:01:35,560 --> 00:01:43,660 +تعمل كبح لكل عمليات الـcoagulation الغير مرهوب + +17 +00:01:43,660 --> 00:01:46,260 +فيها قسمه + +18 +00:01:47,640 --> 00:01:52,260 +الـ Anticoagulant أو الـ Inhibitors إلى تلت + +19 +00:01:52,260 --> 00:01:58,080 +مجموعات أساسية تلت مجموعات أساسية مجموعة الأولانية + +20 +00:01:59,090 --> 00:02:04,470 +جالو هي عبارة عن Serine Protease Inhibitors Serine + +21 +00:02:04,470 --> 00:02:08,250 +Protease Inhibitors و احنا عارفين ال Serine + +22 +00:02:08,250 --> 00:02:12,210 +Protease يعني Enzyme، مظبوط؟ يعني ال Enzyme + +23 +00:02:12,210 --> 00:02:17,130 +Inhibitors لمين؟ لل Coagulation Factors لبروتين C + +24 +00:02:17,130 --> 00:02:21,800 +و لأس؟ لأ سبب ما يعني، لسه خلّينا واحدة واحدةالـ + +25 +00:02:21,800 --> 00:02:26,160 +Serine Protease Inhibitor جاله بتضم مجموعة أبوها + +26 +00:02:26,160 --> 00:02:33,040 +لها المجموعة هو الـ Antithrombin 3 وبيضم + +27 +00:02:33,040 --> 00:02:38,570 +بالإضافة للـ Antithrombin 3مجموعة من الـinhibitors + +28 +00:02:38,570 --> 00:02:41,610 +زي الـC1-esterase inhibitor زي الـAlpha-2 + +29 +00:02:41,610 --> 00:02:46,750 +-macrobipyrin، Alpha-2-antiplasmin، Alpha-1 + +30 +00:02:46,750 --> 00:02:49,950 +-antitrypsin، and heparin cofactor 2 يبدأ في + +31 +00:02:49,950 --> 00:02:56,090 +مجموعة كبيرة بتندرج تحت مين؟ تحت هذه العائلة وهي + +32 +00:02:56,090 --> 00:03:00,010 +Serine Protease Inhibitors، يعني الـEnzymatic + +33 +00:03:00,010 --> 00:03:05,310 +Inhibitors، ماشي؟مجموعة التانية هي عبارة عن + +34 +00:03:05,310 --> 00:03:09,810 +cofactor in inhibitor بطلنا نحكي على coagulation + +35 +00:03:09,810 --> 00:03:13,850 +factor بطلنا نحكي على coagulation cofactor ال + +36 +00:03:13,850 --> 00:03:17,570 +cofactor احنا عارفين هم في ال coagulation case كده + +37 +00:03:17,570 --> 00:03:22,510 +كم هو حد ال cofactor + +38 +00:03:22,510 --> 00:03:30,090 +هم اتنين ال cofactor بشكل أساس ال cofactor هم + +39 +00:03:30,090 --> 00:03:35,950 +factor خمسة و factor تمانىهؤلاء هم كو فاكتور و + +40 +00:03:35,950 --> 00:03:42,270 +فاكتور يعني تمانين كلاهما تشكل كومبليكسيا واحد + +41 +00:03:42,270 --> 00:03:46,910 +تلنيز واحد بروترومبنيز و احنا نعرف ان اذا كان + +42 +00:03:46,910 --> 00:03:50,850 +التلنيز و بروترومبنيز لم يتكونوا مش فشكوا اجلاشي + +43 +00:03:50,850 --> 00:03:54,750 +صح؟ فبالتالي احنا بنعمل انهيبشين لكومبليكسيا + +44 +00:03:54,750 --> 00:04:01,350 +بطريقة غير مباشرة من خلال الانهيبشين of the + +45 +00:04:01,350 --> 00:04:04,780 +cofactorcoagulation and cofactor هم خمسة و تمانية + +46 +00:04:04,780 --> 00:04:11,860 +نشاطين و هذه عبارة عن عائلة بتضممجموعة كبيرة + +47 +00:04:11,860 --> 00:04:17,460 +بيسموها protein-c system protein-c system وهي بتضم + +48 +00:04:17,460 --> 00:04:24,380 +protein-c و protein-s ثم ال trombomodulin و ال + +49 +00:04:24,380 --> 00:04:29,020 +cofactors اللي برتبت معاهم ماشي؟ و ال cofactor + +50 +00:04:29,020 --> 00:04:33,540 +اللي برتبت معاهم يبجي هيفرها مجموعة كبيرة تعمل + +51 +00:04:33,540 --> 00:04:38,580 +انهيبش لل activated form of cofactor 5 and + +52 +00:04:38,580 --> 00:04:45,010 +cofactor 8النشاطين مجموعة التالتة وهي tissue + +53 +00:04:45,010 --> 00:04:48,190 +factor pathway inhibitor وهي مجموعة مفاصلة لحالنا + +54 +00:04:48,190 --> 00:04:51,570 +وشوفنا دور ال tissue factor مرة فاترة tissue + +55 +00:04:51,570 --> 00:04:54,770 +factor pathway inhibitor وحنتعرف على ال mechanism + +56 +00:04:54,770 --> 00:05:01,150 +هو بيعمل inhibition لمين؟ ال tissue factor سبعة + +57 +00:05:01,150 --> 00:05:06,410 +ايه؟ complex و factor عشرة و factor عشرة النشاط + +58 +00:05:06,800 --> 00:05:11,420 +بتبقى معايا انت؟ تسعة بقى؟ لأ تسعة مش بيخش .. + +59 +00:05:11,420 --> 00:05:14,240 +بيصير وش نمشي من .. بيصير وش نمشي من .. بيصير وش + +60 +00:05:14,240 --> 00:05:14,300 +نمشي من .. بيصير وش نمشي من .. بيصير وش نمشي من .. + +61 +00:05:14,300 --> 00:05:14,680 +بيصير وش نمشي من .. بيصير وش نمشي من .. بيصير وش + +62 +00:05:14,680 --> 00:05:16,680 +بيصير وش نمشي من .. بيصير وش نمشي من .. بيصير وش + +63 +00:05:16,680 --> 00:05:21,540 +نمشي من .. بيصير + +64 +00:05:21,540 --> 00:05:33,800 +وش نمشي من .. بيصير وش نمشي من .. بيصير وش + +65 +00:05:33,800 --> 00:05:42,030 +نمشي من .. بيصبيرتبط الـ 9 و 10 أو 11 و 12 بيرتبط + +66 +00:05:42,030 --> 00:05:48,090 +لأنه اسمه Antithrombin بشكل أساسي بيمسك في مين؟ + +67 +00:05:48,090 --> 00:05:50,230 +بالـ Thrombin، وThrombin من أين جهد؟ + +68 +00:05:53,650 --> 00:05:57,970 +وهو factor two لما بتنشط، بتحول إلى إيه؟ إلى + +69 +00:05:57,970 --> 00:06:01,350 +thrombin، فده بنمشيك ال thrombin، سموه anti + +70 +00:06:01,350 --> 00:06:05,270 +-thrombin، لإنه بيمشك في كل هدول الـCerebrum، لكن + +71 +00:06:05,270 --> 00:06:10,120 +ال major، ال potentأنتي اللي براكو أجلد هو عبارة + +72 +00:06:10,120 --> 00:06:14,700 +عن مين؟ لثرمبل، أبوهم مين؟ لثرمبل، المرة الفاتة + +73 +00:06:14,700 --> 00:06:21,880 +تشوفناله أكم وظيفة؟ ستة، آه ستة، ماشي؟ وفي وظيفة + +74 +00:06:21,880 --> 00:06:26,240 +سابعة كمان حنقدر نكلم فيها، ماشي يا شباب؟في الجهة + +75 +00:06:26,240 --> 00:06:31,860 +الانتيترومبال موجود مرتبط بالإضافة لتنين برتبط + +76 +00:06:31,860 --> 00:06:35,560 +التسعة و عشرة و أحداشر و أتناشر لكن ببطء + +77 +00:06:38,700 --> 00:06:45,180 +بيحتاج الى accelerator الى مسرع والمسرع هو + +78 +00:06:45,180 --> 00:06:51,560 +الهيبرين هو الهيبرين هيبرين by itself has no + +79 +00:06:51,560 --> 00:06:54,380 +anticoagulant effect مش هو اللي بيعمل + +80 +00:06:54,380 --> 00:06:58,080 +anticoagulation هو بيسرع التفاعل اللي بيعمل + +81 +00:06:58,080 --> 00:07:03,080 +anticoagulation هو ال anticoagulantالانهيبتر + +82 +00:07:03,080 --> 00:07:07,460 +الانهيبتر اللي برتبط فيه هو الانتيترومب التلاتة و + +83 +00:07:07,460 --> 00:07:10,820 +برتبط فيه الهيبارين كوفاكتور 2 برضه بيسمو + +84 +00:07:10,820 --> 00:07:14,060 +الانتيترومب التلاتة الهيبارين كوفاكتور 1 هيبارين + +85 +00:07:14,060 --> 00:07:18,260 +كوفاكتور 1 عشان يميزوا بينه بين هيبارين كوفاكتور 2 + +86 +00:07:18,260 --> 00:07:24,700 +ماشي يبقى بيرتبط ب أكسلاراتر و ال أكسلاراتر هو + +87 +00:07:24,700 --> 00:07:28,260 +عبارة عن الهيبارين و الهيبارين has no + +88 +00:07:28,260 --> 00:07:31,040 +anticoagulant effect و أنما ال effect + +89 +00:07:34,040 --> 00:07:38,140 +من ال inhibitor الطبيعي أو ال anticoagulant + +90 +00:07:38,140 --> 00:07:46,720 +الطبيعي كيف يعمل؟ كيف يعمل؟ انتبهوا عليا اللوح + +91 +00:07:46,720 --> 00:07:50,260 +اللوح زى ما انتوا شايفين في عندي antithrombin + +92 +00:07:50,260 --> 00:07:55,400 +تلاتة ده هو عليه two binding sites عليه ايه ايه؟ + +93 +00:07:55,400 --> 00:08:02,590 +two binding sites واحدة ليسم ترتبط بالالتانية + +94 +00:08:02,590 --> 00:08:05,230 +بيرتبط بالهيبرين والثانية بيرتبط بالهيبرين + +95 +00:08:05,230 --> 00:08:06,550 +والثانية بيرتبط بالهيبرين والثانية بيرتبط + +96 +00:08:06,550 --> 00:08:08,070 +بالهيبرين والثانية بيرتبط بالهيبرين والثانية + +97 +00:08:08,070 --> 00:08:10,210 +بيرتبط بالهيبرين والثانية بيرتبط بالهيبرين + +98 +00:08:10,210 --> 00:08:14,430 +والثانية بيرتبط بالهيبرين والثانية بيرتبط + +99 +00:08:14,430 --> 00:08:17,630 +بالهيبرين والثانية بيرتبط بالهيبرين والثانية + +100 +00:08:17,630 --> 00:08:17,990 +بيرتبط بالهيبرين والثانية بيرتبط بالهيبرين + +101 +00:08:17,990 --> 00:08:19,990 +والثانية بيرتبط بالهيبرين والثانية بيرتبط + +102 +00:08:19,990 --> 00:08:21,650 +بالهيبرين والثانية بيرتبط بالهيبرين والثانية + +103 +00:08:21,650 --> 00:08:23,990 +بيرتبط بالهيبرين + +104 +00:08:30,280 --> 00:08:34,240 +الـ Antithrombin سوف يقوم بـBind أولاً مع + +105 +00:08:34,240 --> 00:08:40,160 +الهيبرين، سوف يقوم بـBind أولاً مع الهيبرين، ماشي؟ + +106 +00:08:40,160 --> 00:08:46,220 +ليش؟ قولنا لتسرع الاتجاه، الـ Antithrombin برتبط + +107 +00:08:46,220 --> 00:08:50,440 +بالسيريني بروتييز بدون الهيبرين، نعم برتبط، لكن + +108 +00:08:50,440 --> 00:08:54,120 +بيعمل ببطء شديد، it takes + +109 +00:08:57,080 --> 00:09:01,780 +it takes minutes instead of years second بيقولوا + +110 +00:09:01,780 --> 00:09:06,420 +ان الهيبارين بيسرع can increase the rate of + +111 +00:09:06,420 --> 00:09:11,980 +reaction الى أكتر من الف وبعض المرات بتقول الفين + +112 +00:09:11,980 --> 00:09:18,160 +ضعف الفين ضعف وطبيعي الفين ضعف معناته السرعة + +113 +00:09:18,160 --> 00:09:25,680 +سارتير زادت الفين مرةالتفاعل بيكون عالي جدا وسريع + +114 +00:09:25,680 --> 00:09:31,960 +جدا وبيعمل inhibition بصراحة يبت .. يبتالي نعيد من + +115 +00:09:31,960 --> 00:09:36,540 +أول ال Hiparin ال antithrombin تلاتة و ال first + +116 +00:09:36,540 --> 00:09:41,860 +bind with Hiparin و بعد ما يرتبط بال Hiparin + +117 +00:09:41,860 --> 00:09:46,920 +بيرتبط بال Serine Protease بيرتبط بمين؟ بال Serine + +118 +00:09:46,920 --> 00:09:51,400 +Proteaseبمجرد ما يرتبط بالـ Serine Protease + +119 +00:09:51,400 --> 00:09:57,180 +الهيبارين بيفك الهيبارين بيغلق ماشي؟ بمجرد ما + +120 +00:09:57,180 --> 00:10:01,480 +يرتبط بالـ Serine Protease الهيبارين قد دوره + +121 +00:10:01,480 --> 00:10:07,140 +بينسحب بيسيب الاش ال Antithrombin و الأن بيضل + +122 +00:10:07,140 --> 00:10:13,620 +عندنا Complex بيتكوّن من مين؟ 100% ال Antithrombin + +123 +00:10:13,620 --> 00:10:17,720 +و Serine Protease ال Serine Protease هو ال enzyme + +124 +00:10:21,130 --> 00:10:26,690 +ماذا يفعل؟ يعمل ع الـ antithrombin ويقطعه إلى two + +125 +00:10:26,690 --> 00:10:34,470 +fragments واحدة منها تعمل blocking لمين؟ للـ + +126 +00:10:34,470 --> 00:10:37,770 +thrombin function أو للسيرين البروتييز + +127 +00:10:43,740 --> 00:10:47,560 +الـ Antithrombin ثم بيصير فيه two fragments واحدة + +128 +00:10:47,560 --> 00:10:52,040 +منهم تمسك الشيبيولايت واحدة منهم بتمسك اللي هو + +129 +00:10:52,040 --> 00:10:57,940 +بتعمل blocking لمين لل active sites of the serine + +130 +00:10:57,940 --> 00:11:03,920 +protease وبالتالي بتعمله inhibition مفهوم؟ + +131 +00:11:03,920 --> 00:11:15,400 +حد عنده سؤال؟ حد عنده سؤال؟ ماشي؟ بتحبه قاعد؟بس ال + +132 +00:11:15,400 --> 00:11:18,480 +fragment اللى بتعمله ال blocking لل active site + +133 +00:11:18,480 --> 00:11:23,160 +لمين؟ لل serine protein طبعا طبعا طب كيف عبتدان يا + +134 +00:11:23,160 --> 00:11:26,040 +دكتور؟ كيه؟ كيه كيف عبتدان يا دكتور؟ واحدة heavy + +135 +00:11:26,040 --> 00:11:31,220 +واحدة light تمسك ال ash ال heavy بال ash بال + +136 +00:11:31,220 --> 00:11:35,400 +serine protein و من عند ال binding site تبعته + +137 +00:11:35,400 --> 00:11:40,120 +فتعمله inhibition بتعمله ايش? inhibition المفهوم + +138 +00:11:40,120 --> 00:11:50,660 +شباب؟ناخد المثال التاني على العائلة التانية وهي + +139 +00:11:50,660 --> 00:11:58,960 +protein C system تباين معايا protein C system + +140 +00:11:58,960 --> 00:12:03,140 +الميكانزم + +141 +00:12:03,140 --> 00:12:13,590 +عشان يشتغل يحتاج إلى شرط مشروط يحتاج إلىإنه يكون + +142 +00:12:13,590 --> 00:12:21,130 +الـ thrombin at high level يبقى شرط عمل هذا الـ + +143 +00:12:21,130 --> 00:12:25,630 +mechanism إنه يكون الـ thrombin at high level + +144 +00:12:25,630 --> 00:12:31,310 +وماكنش مصير at high level لما يكون وصل إلى نهاية + +145 +00:12:31,310 --> 00:12:36,470 +عمله لأن ال coagulation process شباب تبدأ in a + +146 +00:12:36,470 --> 00:12:41,490 +propagation manner بحالة .. بحالة إيش؟ بحالة + +147 +00:12:41,490 --> 00:12:47,730 +العكوميةكل ما اشتغلت + +148 +00:12:47,730 --> 00:12:51,670 +بيكمل بعض وبيزود بعض وبالتالي بيزيد ال + +149 +00:12:51,670 --> 00:12:55,850 +concentration of serine proteases كل ما اشتغلت + +150 +00:12:55,850 --> 00:12:58,910 +اكتر وكل ما اشتغلت اكتر وكل ما عندنا جلطة اكتر + +151 +00:12:58,910 --> 00:13:06,330 +اسرع، بسبب عند حد معين بيكون تركيزها عالي، ماشي؟ + +152 +00:13:06,330 --> 00:13:09,150 +تركيز الثرومبين فيها عالي، والثرومبين هو اللي + +153 +00:13:09,150 --> 00:13:14,120 +بيشتغل على مين؟و بيحولوا ل مين؟ مية في المين، + +154 +00:13:14,120 --> 00:13:18,340 +ماشي؟ هو اللي بيعمل cloth، at high level of + +155 +00:13:18,340 --> 00:13:22,300 +thrombin بيكون .. بيكون فاكرين في ال function + +156 +00:13:22,300 --> 00:13:26,660 +بتاعة ال thrombin، قولنا فيه ستة functions، مظبوط، + +157 +00:13:26,660 --> 00:13:35,550 +at low level بنشط مين؟الـ 8 و 5 و الـ 8 و 5 و 5 و + +158 +00:13:35,550 --> 00:13:41,030 +8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 + +159 +00:13:41,030 --> 00:13:43,650 +و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و + +160 +00:13:43,650 --> 00:13:44,330 +5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 + +161 +00:13:44,330 --> 00:13:52,170 +8 و 8 و 8 و + +162 +00:13:52,170 --> 00:13:57,870 +8 و 8 وبالـ thrombomodulin، من أين جاء الـ + +163 +00:13:57,870 --> 00:14:02,130 +thrombomodulin؟ أولاً، هو عبارة عن مكوّن أساسي من + +164 +00:14:02,130 --> 00:14:08,170 +مكونات الـ Platelet Alpha Granule، ماشي؟ و احنا + +165 +00:14:08,170 --> 00:14:11,310 +قاعدين بنشتغل على سطح الـ Platelet، قلنا لما + +166 +00:14:11,310 --> 00:14:15,830 +اتنشطت، اعمل الـ Degranulation، طلع منها مين؟ بالـ + +167 +00:14:15,830 --> 00:14:22,350 +Thrombin، Thrombin will bind to thrombomodulin، + +168 +00:14:22,350 --> 00:14:31,180 +ماشي؟ andفي موضوع الكالسيوم صار في عنا ثرمبل و + +169 +00:14:31,180 --> 00:14:37,060 +ثرمبل موديورين و كالسيوم هذا الكمبليكس سيحول إلى + +170 +00:14:37,060 --> 00:14:43,760 +بروتين C بالظبط و بيحولوا ليه؟ to active form + +171 +00:14:43,760 --> 00:14:50,300 +فبتحول إلى activated بروتين C ماشي؟ شو اللي صار في + +172 +00:14:50,300 --> 00:14:56,920 +هذا العالم؟ كولنا شباب كولنا Quaternary Complexوفي + +173 +00:14:56,920 --> 00:15:00,280 +الهيوستاتيك ميكانيزم شرط أساسي للتنشيط يكون + +174 +00:15:00,280 --> 00:15:04,960 +كواتيرناري كومبليكس شفنا خمسة التنازل + +175 +00:15:04,960 --> 00:15:09,940 +بروثرومبينيزل يكونوا كواتيرناري ملعق وهنا كوّننا + +176 +00:15:09,940 --> 00:15:12,660 +كواتيرناري كومبليكس يتكوّن من ثرومبل + +177 +00:15:12,660 --> 00:15:18,960 +ثرومبوموديوليد كالسيوم وبروتينسيوم محصلة اكتبيت + +178 +00:15:18,960 --> 00:15:23,710 +البروتينسيوممتابعين معاها وقولنا هذا الكملة اسمها + +179 +00:15:23,710 --> 00:15:28,530 +ما بيتكون إلا at high level of its thrombin صار في + +180 +00:15:28,530 --> 00:15:35,190 +عندنا activated thrombin البروتين C this molecule + +181 +00:15:35,190 --> 00:15:40,970 +will bind to its cofactor في ال cofactor تبعه مين + +182 +00:15:40,970 --> 00:15:46,690 +ال cofactor تبعه البروتين S مين ال cofactor + +183 +00:15:46,690 --> 00:15:55,820 +البروتين S ماشياكتفت البروتين C في وجود البروتين S + +184 +00:15:55,820 --> 00:15:59,520 +يعني it will bind to protein S in the presence of + +185 +00:15:59,520 --> 00:16:03,540 +كالسيوم في وجود الكالسيوم شو بيعمل؟ يعني هو قاعد + +186 +00:16:03,540 --> 00:16:07,960 +بيكون quaternary complex تاني ماشي برتبط + +187 +00:16:07,960 --> 00:16:13,920 +بالactivated form of factor خمسة وتمانية وبيحولهم + +188 +00:16:13,920 --> 00:16:18,060 +بيعملهم degradation بيحولهم إلى inactivated form + +189 +00:16:18,060 --> 00:16:25,360 +عيد تانيهذه التانية اه at high level of thrombin + +190 +00:16:25,360 --> 00:16:30,320 +ايوه ماشي وزي ما انتوا شايفين ال thrombin من وين + +191 +00:16:30,320 --> 00:16:36,220 +جاي ماشي at high level of thrombin it will bind to + +192 +00:16:36,220 --> 00:16:40,300 +thrombomodulin لبريزاجشونكاليوشيوم و ال bind و ال + +193 +00:16:40,300 --> 00:16:43,220 +protein C و بيحاولوا ال activated form of protein + +194 +00:16:43,220 --> 00:16:49,720 +C هذا باطمط في protein S اللي هو cofactor له ماشي + +195 +00:16:50,390 --> 00:16:55,090 +في وجود الكالسيو ماشي، شو بيعمل؟ it will + +196 +00:16:55,090 --> 00:17:01,410 +inactivate factor 5A و factor 8A، 5A جاية من وين؟ + +197 +00:17:01,410 --> 00:17:08,590 +من 10A، صح؟ و 8 من 9A، فاكرين؟ ال coagulation + +198 +00:17:08,590 --> 00:17:13,910 +cascade، اه، ماشي، مش بيكونوا complexes، واحد + +199 +00:17:13,910 --> 00:17:20,630 +تنهز، واحد بيبقى ترقب الناس، مظبوط، طيب،أرتبط في + +200 +00:17:20,630 --> 00:17:25,850 +خمسة أيه؟ تمانية أو عملهم degradation عملهم ايه؟ + +201 +00:17:25,850 --> 00:17:29,890 +degradation مين اللي عمل degradation؟ protein C + +202 +00:17:29,890 --> 00:17:33,590 +النشط فوق ال .. ال .. ال .. ال .. ال .. الوضع + +203 +00:17:33,590 --> 00:17:37,750 +النشط منهم، ماشي؟ ال activated form منهم، ال + +204 +00:17:37,750 --> 00:17:43,850 +activated form منهم، مافوش بقى، ماشي؟عرفنا + +205 +00:17:43,850 --> 00:17:48,250 +الميكانيزم التاني بس في نقطة هنا بدنا نحكي فيها و + +206 +00:17:48,250 --> 00:17:54,190 +هي ال protein S ال protein S شباب وجدوا انه بيكون + +207 +00:17:54,190 --> 00:18:02,130 +في الجسم على شاكلتين على شاكلتين ال protein S + +208 +00:18:02,130 --> 00:18:06,150 +بيكون + +209 +00:18:06,150 --> 00:18:09,730 +free و + +210 +00:18:09,730 --> 00:18:10,390 +bound + +211 +00:18:14,070 --> 00:18:20,030 +الـ free form بيشكل حوالي 40% بينما ال bound form + +212 +00:18:20,030 --> 00:18:27,330 +بيشكل 60% ماشي؟ + +213 +00:18:27,330 --> 00:18:30,810 +والنسبة + +214 +00:18:30,810 --> 00:18:36,010 +هذه مهمة جدا ليش مهمة جدا؟ لأن أي لعب في هذه + +215 +00:18:36,010 --> 00:18:42,500 +النسبة هيؤثر على ال free form of protein Sوهو الـ + +216 +00:18:42,500 --> 00:18:45,040 +active form هو اللي بيشتغل هو اللي بيتبط ال + +217 +00:18:45,040 --> 00:18:50,100 +protein C لإن اللي ممسوك مش راح يتبط بال activated + +218 +00:18:50,100 --> 00:18:53,860 +protein C ماشي؟ يعني لما ييجي يشتغل ال protein C + +219 +00:18:53,860 --> 00:18:56,760 +ال activated form و ال protein C إذا ما لجاش ال + +220 +00:18:56,760 --> 00:19:01,580 +protein أسس بيشتغل؟ لأ، في لأ، فهمين عليا بيشتغل + +221 +00:19:01,580 --> 00:19:08,520 +لكن بغضب مفهوم شباب النسبة هذه مهمة جدا اللي قاعد + +222 +00:19:08,520 --> 00:19:10,900 +فيها بيؤثر indirect + +223 +00:19:13,400 --> 00:19:23,020 +البروتين C النشط طب ال bound 4 مرتبط بحاجة بيسموها + +224 +00:19:23,020 --> 00:19:31,360 +C4B binding protein سمعتوا + +225 +00:19:31,360 --> 00:19:42,260 +بال C4B C اختصار لكمplement ماشي complement رقم 4 + +226 +00:19:43,630 --> 00:19:49,370 +النشطة ماشي C4B binding protein complement كلكم + +227 +00:19:49,370 --> 00:19:53,950 +بتعرفون و هي عبارة عن سلسلة بروتيني ماشي بتعتبر + +228 +00:19:53,950 --> 00:19:59,070 +أداة من أدوات ال immune system وبالتالي تنشط في + +229 +00:19:59,070 --> 00:20:04,270 +حالات ال inflammation يوم ما تنشط تكتر فتمسك في + +230 +00:20:04,270 --> 00:20:09,770 +بروتين C أكتر مظبوططب إذا مشكلة في protein S أكتر، + +231 +00:20:09,770 --> 00:20:13,910 +بتقل ال protein في ال .. في ال E protein فوق؟ بتقل + +232 +00:20:13,910 --> 00:20:17,750 +النسبة هذه، و لما تقل النسبة هذه، بتأثر على + +233 +00:20:17,750 --> 00:20:21,510 +protein C، طب و لما تأثر على protein C، بتقل + +234 +00:20:21,510 --> 00:20:27,290 +نشاطه، شو بيصير؟ بيصير انتقال يعني إيه؟ في .. في + +235 +00:20:27,290 --> 00:20:33,310 +Clotting Clotting؟ مش Anti-coagulation يعني إيه؟ + +236 +00:20:33,310 --> 00:20:36,380 +عشان هيك في ال inflammatory reactionبصير فيه + +237 +00:20:36,380 --> 00:20:40,680 +إمكانية لتكون جلطة أكتر من إياش من إنه نزيد، مفهوم + +238 +00:20:40,680 --> 00:20:44,840 +عليا؟ ليش؟ لأنه لعبنا في الميزان، إيش هو الميزان؟ + +239 +00:20:44,840 --> 00:20:48,240 +هد بيشتغل بالميزان، ال anti-coagulant و ال pro + +240 +00:20:48,240 --> 00:20:52,900 +-coagulant، صح؟ مظبوط؟ لعبنا في إياش في كفة ميزان + +241 +00:20:52,900 --> 00:20:59,480 +واحدة منهم، قللنا الكفة، ماشي، خففناها يعني، + +242 +00:20:59,480 --> 00:21:05,000 +فالكفة التانية هتزيد، مفهوم عليا؟وبالتالي يصير فيه + +243 +00:21:05,000 --> 00:21:08,100 +إيش؟ إمكانية إنه يصير فيه جلطة أكتر من ما إمكانية + +244 +00:21:08,100 --> 00:21:13,280 +إنه يصير فيه نزيل أفهم عليها الولاقة المباشرة بين + +245 +00:21:13,280 --> 00:21:19,180 +إيش؟ بين البروتين C وبروتين S وولاقتهم بيه اللي هو + +246 +00:21:19,180 --> 00:21:26,140 +تكوين الجلطة اللي فوق الشباب، حد عنده سؤال؟ طبعا، + +247 +00:21:26,140 --> 00:21:32,360 +زي ما انتم ملاحظين ال + +248 +00:21:32,360 --> 00:21:32,540 +.. + +249 +00:21:36,440 --> 00:21:45,000 +الـ .. الـ .. الـ mechanism هذا مرات بيكون وراثيًا + +250 +00:21:45,000 --> 00:21:52,500 +genetically defected genetically defected بمعنى + +251 +00:21:52,500 --> 00:21:55,340 +الـ proteins هي يا شباب اللي مالي جيش على ال + +252 +00:21:55,340 --> 00:21:59,260 +factor 5 ماشي بيجي بمسك في ال active site تبعته + +253 +00:21:59,260 --> 00:22:05,660 +بعملها inactivation بقطعه و بعمل inactivationيعني + +254 +00:22:05,660 --> 00:22:12,620 +بيجي برتبط بمواقع معينة على factor خمسة النشط طب + +255 +00:22:12,620 --> 00:22:16,020 +هو factor خمسة عبارة عن أيش؟ عبارة عن amino acid، + +256 +00:22:16,020 --> 00:22:21,580 +protein مجبوط، amino acid فلو صار فيه تبديل في + +257 +00:22:21,580 --> 00:22:23,780 +مواقع ال active sites + +258 +00:22:29,240 --> 00:22:34,060 +فاكتور خمسة نشط ولا بيظل نشط؟ بيظل نشط، ماشي؟ + +259 +00:22:34,060 --> 00:22:38,560 +وجدوا هذه الظاهرة موجودة بعض مرات بإن ال protein + +260 +00:22:38,560 --> 00:22:44,540 +خمسة بيكون ورثيا، ماشي، defected، بمعنى صار في + +261 +00:22:44,540 --> 00:22:50,310 +singleNucleotide polymorphism SMPs وبالتالي أثرت + +262 +00:22:50,310 --> 00:22:56,110 +هذه حولت اللي هو factor 5 المواقع الناشطة فيها + +263 +00:22:56,110 --> 00:22:59,750 +اتغير ال amino acid اللي موجود وبالتالي بقرر يتعرف + +264 +00:22:59,750 --> 00:23:04,310 +عليه نعم ماشي بيسموا هذا المرض factor 5 lyden + +265 +00:23:04,310 --> 00:23:10,850 +factor 5 lyden باسم العالمة اللي هضد ديه ليش؟ + +266 +00:23:15,160 --> 00:23:18,980 +اللي اكتشفت هذا الموضوع طبعا الناس هدول في هذه + +267 +00:23:18,980 --> 00:23:23,180 +الحالة شو بيصيروا يا شباب؟ بيصيروا عرضة لإيش؟ + +268 +00:23:23,180 --> 00:23:29,880 +لجلطة بالظبط عرضة لclot formation طيب + +269 +00:23:29,880 --> 00:23:35,520 +ننتقل لميكانيزم التالت وهو إيش؟ وهو tissue factor + +270 +00:23:35,520 --> 00:23:39,000 +pathway inhibitor والمرة اللي فاتة كنا حكينا إنه + +271 +00:23:39,000 --> 00:23:41,900 +tissue factor pathway inhibitor ماهياشي هو عبارة + +272 +00:23:41,900 --> 00:23:48,860 +عن inhibitorموجود بشكل أساسي على طول على التشو + +273 +00:23:48,860 --> 00:23:55,140 +ماشي شو بعمله بعمله انهيبش إلى ال complex اللي هو + +274 +00:23:55,140 --> 00:23:58,060 +تشو فاكتور سبعة ايه complex و بعمله انهيبش إلى + +275 +00:23:58,060 --> 00:24:01,580 +فاكتور عشر و بعمله انهيبش إلى فاكتور عشر و زي ما + +276 +00:24:01,580 --> 00:24:06,060 +انتوا شايفين هذا ال endothelium طبعا مزعج + +277 +00:24:12,080 --> 00:24:18,980 +وعمل Complex هذا بنشط عشر أو تسعة مظبوط و + +278 +00:24:18,980 --> 00:24:23,080 +automatically بمجرد ما يصير في activation لهذا ال + +279 +00:24:23,080 --> 00:24:27,220 +ميكانيزم كنت اما هو بخوف ربنا اعطانا له حماية + +280 +00:24:27,220 --> 00:24:32,140 +عالمطح فورية بيجي tissue factor pathway inhibitor + +281 +00:24:32,140 --> 00:24:36,640 +وبيعملاش blocking لهذا ال ميكانيزم وشوفنا كيف بتتم + +282 +00:24:36,640 --> 00:24:43,330 +التحويلة ماشي مفهومبعمل blocking ليش لهذا ال + +283 +00:24:43,330 --> 00:24:46,190 +mechanism اللي هو tissue factor plus one inhibitor + +284 +00:24:46,190 --> 00:24:50,310 +بعمل inactivation لفكتور عشرة النشط وفكتور سبعة + +285 +00:24:50,310 --> 00:24:54,490 +tissue factor complex أو سبعين المفهوم مش هيبقى + +286 +00:24:54,490 --> 00:25:00,370 +هذا ميكانيزم موجود شبهناها المرة اللي فاتر هذا ال + +287 +00:25:00,370 --> 00:25:04,450 +mechanismوكأننا خشينا على غرفة مظلمة وضوينا الضوء + +288 +00:25:04,450 --> 00:25:10,390 +وطفنا صح؟ يعني automatically بس يبدأ التفاعل بنطفي + +289 +00:25:10,390 --> 00:25:14,610 +بس يبدأ التفاعل بنطفي لكن إنت لما بتضوي الضوء و + +290 +00:25:14,610 --> 00:25:18,070 +بتطفي بتشوف إيش فيه في الغرفة و بتشوف إيش مية في + +291 +00:25:18,070 --> 00:25:22,710 +المقادة اللي بيصير، بتكون كمية قليلة من factor 10A + +292 +00:25:22,710 --> 00:25:29,410 +النشط اللي بيتكمل ال reaction حد عنده سؤال يا شباب + +293 +00:25:32,380 --> 00:25:37,940 +حد مش فاهم، حد بيحب أعيد طيب هذه طبعا slide بتبين + +294 +00:25:37,940 --> 00:25:42,020 +ليه سلسلة مناشر، سلسلة ال anticoagulant اللي + +295 +00:25:42,020 --> 00:25:46,560 +موجودة وهذا عبارة عن ال coagulation cascade + +296 +00:25:46,560 --> 00:25:52,330 +mechanism بيبين فيه دورالـ Inheritor بشكل أساسي، + +297 +00:25:52,330 --> 00:25:56,510 +زي ما بتلاحظوا فيه دوائر صفراء ماشي، هذه الدوائر + +298 +00:25:56,510 --> 00:26:03,190 +مكتوب في جلبها plus و minus، plus يعني active و + +299 +00:26:03,190 --> 00:26:07,610 +minus الميكروفينية، فلو تتبعت اللي شرحناه كله في + +300 +00:26:07,610 --> 00:26:11,550 +الفترة اللي فاتت، هتلاقي انه المواقع الـ Pro + +301 +00:26:11,550 --> 00:26:15,930 +-coagulation ومواقع الـ Anti-coagulation الاسعار، + +302 +00:26:15,930 --> 00:26:24,760 +مفهوم يا شباب؟ حد عنده شوية؟حد عنده سؤال نبدأ + +303 +00:26:24,760 --> 00:26:37,820 +بالمحاضرة التانية المحاضرة + +304 +00:26:37,820 --> 00:26:43,500 +التانية تحكي عن الـ Fiber Analytics System واتبعوا + +305 +00:26:43,500 --> 00:26:45,360 +عليها يا شباب بداية + +306 +00:26:47,410 --> 00:26:50,850 +لما بدأنا نحكي في ال hemostasis عرفناه وقلنا له + +307 +00:26:50,850 --> 00:26:57,830 +different components قلنا له ال blood vessel as a + +308 +00:26:57,830 --> 00:27:02,170 +component أساسي ثم ال platelet ثم ال coordination + +309 +00:27:02,170 --> 00:27:09,130 +factor ماشي ثم ال fibrolytic system و بعدين قلنا + +310 +00:27:09,130 --> 00:27:14,280 +في inhibitors لكل هذه ال systemماشي؟ انا قدمت ال + +311 +00:27:14,280 --> 00:27:17,640 +inhibitors عن ال fibrotic system لإن هو ال final + +312 +00:27:17,640 --> 00:27:23,140 +هو النهائي اللى بيصير حملية healing، بيصير شفاء + +313 +00:27:23,140 --> 00:27:28,720 +للجريحة، صار فيه مزج، كولنا جلطة، بنينا الجلطة + +314 +00:27:29,040 --> 00:27:33,840 +بضلنا نبني بحيث انه ما سكرناش ال blood vessels + +315 +00:27:33,840 --> 00:27:38,080 +مظبوط و اول ما بدى يصير فيه تسكير في ال blood + +316 +00:27:38,080 --> 00:27:43,160 +vessels بدى ال fibrolytic system يعمل إذابة لكل + +317 +00:27:43,160 --> 00:27:49,280 +الجلطات اللى زوائد الجلطة اللى تكون مفهومش بقى هذا + +318 +00:27:49,280 --> 00:27:52,940 +اللى هندرسه في ال fibrolytic system هذا اللى + +319 +00:27:52,940 --> 00:27:56,200 +هندرسه في ال fibrolysis او ال fibrolytic system + +320 +00:28:05,620 --> 00:28:09,140 +بالنسبة للتاريخ، بدأت الحكاية، بدأنا نحكي عن هذا + +321 +00:28:09,140 --> 00:28:12,460 +الـsystem في الـ1937، لما مكفارلانس، scientist + +322 +00:28:12,460 --> 00:28:18,340 +اسمه مكفارلانس، إيش جال؟ جال إن الـdamage in + +323 +00:28:18,340 --> 00:28:23,120 +tissue، بيطلع منها substance، هذه ال substance + +324 +00:28:23,120 --> 00:28:28,390 +فيها عبارة عن activatorماشي باضعافة لل activators + +325 +00:28:28,390 --> 00:28:32,970 +اللي بتطلع فيه activator طلع ماشي نشط substrate + +326 +00:28:32,970 --> 00:28:38,330 +اسمه بلازمينوجين و حوله الى انزايم سموه Blaser + +327 +00:28:39,340 --> 00:28:42,220 +بداية الحكية عن الموضوع كانت في سبعة و تلاتين، + +328 +00:28:42,220 --> 00:28:45,300 +تلفة وتسعمائه و سبعة و تلاتين، MacFarlane's جاله + +329 +00:28:45,300 --> 00:28:48,340 +انه بيطلع، اليوم مايصير فيه مزعج، في tissue + +330 +00:28:48,340 --> 00:28:52,000 +plasminogen activator، a plasminogen activator، + +331 +00:28:52,000 --> 00:28:55,180 +ماشي؟ وسموها tissue لإنها طالع من ال damage + +332 +00:28:55,180 --> 00:28:59,860 +tissue، مظبوط، tissue plasminogen activator، which + +333 +00:28:59,860 --> 00:29:04,860 +convert plasminogen as a substrate إلى إيش؟ إلى + +334 +00:29:04,860 --> 00:29:09,390 +plasma، مفهوم عن أيه؟ هذا الكلام؟انحكى فيه في + +335 +00:29:09,390 --> 00:29:15,270 +السابع و تلاتين تعريفاً الـ Fibrotic process هي + +336 +00:29:15,270 --> 00:29:18,610 +عبارة عن عملية بنتور فيها دا process of removing + +337 +00:29:18,610 --> 00:29:26,790 +fibrin ماشي from .. from ماشي from the vasculature + +338 +00:29:26,790 --> 00:29:30,890 +vasculature يعني ايش الوسط الوسط الدمو اللي صار + +339 +00:29:30,890 --> 00:29:36,740 +فيه مزاق صار فيه fibrin اكلت و fibrinتكوّن في وسط + +340 +00:29:36,740 --> 00:29:41,360 +مين؟ لما المزعج بنيرال جالتر ده البقايا مين؟ + +341 +00:29:41,360 --> 00:29:44,060 +البقايا الفايربرين احنا مانينا ندوّن هذا + +342 +00:29:44,060 --> 00:29:48,860 +الفايربرين ونرجع ال blood vessels إلى وضعه 100 في + +343 +00:29:48,860 --> 00:29:56,900 +النيل ف system هدواتهماشي اللاعيبين الأساسيين فيه + +344 +00:29:56,900 --> 00:30:00,340 +نمر واحد بلازمينوجين بدنا substrate نمر واحد + +345 +00:30:00,340 --> 00:30:03,740 +بلازمينوجين وهو ال substrate ثم activator لل + +346 +00:30:03,740 --> 00:30:07,960 +substrate وهو بلازمينوجين activator ماشي which + +347 +00:30:07,960 --> 00:30:12,280 +activate البلازمينوجين إلى ال enzyme إلى ال enzyme + +348 +00:30:12,280 --> 00:30:16,940 +form وهو البلازمين ماشي شو بعمل البلازمين؟ بشتغل + +349 +00:30:16,940 --> 00:30:21,960 +على ال fibre and fibrinogen لأنه بشتغل على الجهتين + +350 +00:30:22,850 --> 00:30:27,250 +وبيحولهم إلى degradation product شو بيحولهم؟ + +351 +00:30:27,250 --> 00:30:31,950 +degradation product أو بيسموهم splitting product + +352 +00:30:31,950 --> 00:30:37,370 +بيسموهم إيش؟ splitting split يعني إيش؟ التقطيع، + +353 +00:30:37,370 --> 00:30:42,910 +split، التشجيف، التشجيف بالبلدي، ماشي؟ يبقى + +354 +00:30:42,910 --> 00:30:51,110 +المختار الاختصار هو FDB أو FSP أو إيش؟ FSP، FSP + +355 +00:30:51,110 --> 00:30:52,050 +اختصار لإيش؟ + +356 +00:30:55,050 --> 00:31:01,630 +فايرن فايرن فايرن فايرن فايرن فايرن فايرن فايرن + +357 +00:31:01,630 --> 00:31:06,470 +فايرن فايرن + +358 +00:31:06,470 --> 00:31:13,860 +فايرن فايرنطبعا هذا ال system كاملا، هذا ال system + +359 +00:31:13,860 --> 00:31:20,440 +يحتاج إلى inhibitors، يحتاج إلى controller، مظبوط؟ + +360 +00:31:20,440 --> 00:31:22,660 +فبدنا نحكي كمان على ال inhibitors أو الـ + +361 +00:31:22,660 --> 00:31:27,360 +Plasminogenic Activator and Plasminogen مافهومش + +362 +00:31:27,360 --> 00:31:30,640 +بقى؟ + +363 +00:31:30,640 --> 00:31:37,890 +ماشي؟وظيفة عرفنا تعريفا ووظيفة الـ Fibrotic System + +364 +00:31:37,890 --> 00:31:41,850 +أو الـ Fibrolysis is a system whereby the + +365 +00:31:41,850 --> 00:31:47,470 +temporary fibrin clot is systematically and + +366 +00:31:47,470 --> 00:31:51,450 +gradually dissolved يعني احنا بندوب الجلطة اللتي + +367 +00:31:51,450 --> 00:31:58,770 +تكون موقتا عشان تسد الجريح كل الجلطة اللي بدوبها + +368 +00:31:59,630 --> 00:32:03,050 +اللي ممكن تعمل إنها إنسداد في ال blood vessels + +369 +00:32:03,050 --> 00:32:08,400 +تمشيه لما بتدوبهابصير، بنرجع ال blood vessels إلى + +370 +00:32:08,400 --> 00:32:10,860 +وضع الطبيعي، لأن اليوم اللي بنجرطه في الوعاء + +371 +00:32:10,860 --> 00:32:15,560 +الدموي، بنعمل occlusion، بنعمل إيش؟ انسداد، لكن + +372 +00:32:15,560 --> 00:32:19,660 +الانسداد مش كامل، ماشي، انسداد، هذا الانسداد بيعمل + +373 +00:32:19,660 --> 00:32:22,920 +ضيق في هذه المنطقة، بيأثر على ال blood flow ولا ما + +374 +00:32:22,920 --> 00:32:27,140 +بيأثر؟ بيأثر على ال blood flow، فاحنا بنعملين رجع + +375 +00:32:27,140 --> 00:32:31,890 +ال blood vessels بما يسمح normal blood flowبما + +376 +00:32:31,890 --> 00:32:38,230 +يسمح أن الـ blood flow يرجع إلى وضعه الطبيعي طيب، + +377 +00:32:38,230 --> 00:32:43,230 +هي عبارة عن Defense Mechanism أداة، هذه الأداة، + +378 +00:32:43,230 --> 00:32:46,310 +هذا الـ mechanism هو عبارة عن Defense Mechanism + +379 +00:32:46,310 --> 00:32:50,450 +against what؟ against occlusion أو blood vessels + +380 +00:32:50,450 --> 00:32:55,290 +يبقى هو عبارة عن Defense خط دفاع أساسي ردت فعل + +381 +00:32:55,290 --> 00:33:01,190 +الجسم الطبيعية لبناء جلطة إنها تهدمهاصح؟ عشان يصير + +382 +00:33:01,190 --> 00:33:06,570 +فيه blood flow ان هي ايش تهدمها يبقى رد فعل الجسم + +383 +00:33:06,570 --> 00:33:13,130 +الطبيعي أحد وسائل الدفاع في الجسم لتكوين + +384 +00:33:13,130 --> 00:33:16,090 +الجلطة هو عبارة عن ايش؟ عبارة عن وجود system + +385 +00:33:16,090 --> 00:33:20,050 +يهدمها يا ايش؟ system يهدمها وبالتالي بيصير فيه + +386 +00:33:20,050 --> 00:33:24,350 +restoring for blood flow إلى الوضع الطبيعي إلى + +387 +00:33:24,350 --> 00:33:25,770 +ايش؟ الوضع الطبيعي + +388 +00:33:30,770 --> 00:33:36,110 +من صفات هذا ال system انه it's sensitive to + +389 +00:33:36,110 --> 00:33:42,210 +imbalances شو يعني sensitive to imbalances؟ حساس + +390 +00:33:42,210 --> 00:33:46,470 +لعدم .. الانه انه imbalance؟ ال hemostatic + +391 +00:33:46,470 --> 00:33:51,330 +balances مظبوط يوم ما يصير فيه اي تغير في الميزان + +392 +00:33:51,330 --> 00:33:56,130 +اللي احنا متفاقين عليه و يصير فيه imbalancing ببدأ + +393 +00:33:56,130 --> 00:34:00,130 +يشتغل هذا، ماشي؟ببدأ يشتغل هذا الـ mechanism + +394 +00:34:00,130 --> 00:34:06,190 +وبالتالي عند أي تغير في ال hemostatic balance ال + +395 +00:34:06,190 --> 00:34:11,790 +fiber optic system can be done من صفاته أيضا انه + +396 +00:34:11,790 --> 00:34:17,780 +لما بيشتغل بيدمن لنا ان ال thrombus formationis + +397 +00:34:17,780 --> 00:34:21,880 +localized بمعنى بيصير لها restriction في منطقة + +398 +00:34:21,880 --> 00:34:25,040 +المزعج، منطقة ال damage، ال blood vessel is + +399 +00:34:25,040 --> 00:34:31,120 +damaged، تفهم عليا؟ لإن لو صار في تكون في أماكن + +400 +00:34:31,120 --> 00:34:35,500 +أخرى ل-fibrinic load، ال-fibrinetic system بيسمح + +401 +00:34:35,500 --> 00:34:39,960 +لهاش إنها تنتقل إلى منطقة أخرى، لإنه أول بأول + +402 +00:34:39,960 --> 00:34:42,480 +بيكسرها + +403 +00:34:43,880 --> 00:34:52,440 +كمان من صفاته انه it's initiated with يعني جنبا + +404 +00:34:52,440 --> 00:34:57,180 +إلى جنب مع ال cooperation كاس كلمة كذا وعشان + +405 +00:34:57,180 --> 00:35:00,880 +تتخيلوا الموضوع عشان ما يعني مش معقول التلين يبدو + +406 +00:35:00,880 --> 00:35:06,600 +مع بعض، مظبوط عشان تتخيلوا الموضوع كيف يتم يبدأ + +407 +00:35:06,600 --> 00:35:11,620 +يصير في مزعفي blood vessel injury يبدأ ال + +408 +00:35:11,620 --> 00:35:16,660 +coagulation cascade بالنشاط زي ما قلتلكوا جنبا إلى + +409 +00:35:16,660 --> 00:35:21,740 +جنب مع ال fibrolytic system شو الفرق بين اتنين ان + +410 +00:35:21,740 --> 00:35:26,960 +واحد بمشي بسرعة طيارة و واحد بمشي بسرعة سيارة + +411 +00:35:26,960 --> 00:35:31,620 +فاهمين عليها؟ واحد بمشي سريع جدا و التاني اللي هو + +412 +00:35:31,620 --> 00:35:36,340 +ال coagulation cascade و التاني بمشي ببطءماشي، لو + +413 +00:35:36,340 --> 00:35:41,980 +ابتديش هذا الكلام بدل ما بيبقى ماشي، لإله أن تتكون + +414 +00:35:41,980 --> 00:35:46,140 +الجلطة، ويصير فيه imbalance في الhemostatic + +415 +00:35:47,110 --> 00:35:51,730 +Balance، ماشي؟ بعدها شو بيصير؟ تلعكس الآية + +416 +00:35:51,730 --> 00:35:57,590 +الفيبراليتك سيستم بيصير سريع جدا و مين بيصير يعمل + +417 +00:35:57,590 --> 00:36:01,510 +degradation ال coagulation cascade بيتراجع + +418 +00:36:01,510 --> 00:36:06,450 +وبالتالي بيصير سرعة الهدم أكتر من سرعة البناء في + +419 +00:36:06,450 --> 00:36:10,730 +هذه الحكاية يبقى في البداية سرعة البناء بتكون + +420 +00:36:10,730 --> 00:36:17,860 +عاليةأسرع من سرعة الهدم ثم بعد ذلك بتنعكس الآية و + +421 +00:36:17,860 --> 00:36:21,400 +بيصير سرعة الهدم أسرع مناش من سرعة البناء مفهوم + +422 +00:36:21,400 --> 00:36:25,000 +عليها بشبه بقى عشان هيك التنين بيبدو مع بعض صحيح + +423 +00:36:25,000 --> 00:36:32,360 +لكن بسرعات مختلفة بسرعات مختلفة واحد ممشي بسرعة + +424 +00:36:32,360 --> 00:36:37,660 +أعلى من التاني لغاية ما ينهي دوره ثم بيبدأ التاني + +425 +00:36:37,660 --> 00:36:42,350 +في العملطبعا محصلة من أدواره من صفات الـ Fibrotic + +426 +00:36:42,350 --> 00:36:48,190 +System انه it dissolves the clot by digestion of + +427 +00:36:48,190 --> 00:36:53,110 +fiber يعني شو بيعمل؟ بيكسر .. بيدوّب الجلدة من + +428 +00:36:53,110 --> 00:36:59,230 +خلال digestion اللي هو عبارة عن عملية هدم ال .. ال + +429 +00:36:59,230 --> 00:37:09,540 +fiber و الموضوع ببساطة يا شباب بشكل عامبيتم + +430 +00:37:09,540 --> 00:37:15,730 +كالقادةكلنا بنعرف إنه بداية زي ما الحاجات متفقين + +431 +00:37:15,730 --> 00:37:19,350 +إحنا إنه التنين بيبدوا مع بعض واحد بيبدأ سريع واحد + +432 +00:37:19,350 --> 00:37:22,790 +بيبدأ بطيئي، مظبوط؟ يبقى في البداية بيبدأ الـ + +433 +00:37:22,790 --> 00:37:25,970 +Coagulation Cascade Mechanism ويبدأ تكوين كميات + +434 +00:37:25,970 --> 00:37:29,730 +كبيرة من الـ Thrombin الـ Thrombin هيشتغل على الـ + +435 +00:37:29,730 --> 00:37:34,970 +Fibrinogen ويحوله إلى Fibrin Polymer ثم Fibrin + +436 +00:37:34,970 --> 00:37:39,310 +Polymer ثم X-Linked Fibrin، صح؟ مش هيك متفقين؟ + +437 +00:37:39,310 --> 00:37:43,760 +وتتكون الجلبةشو بيعمل ال .. ال .. ال .. + +438 +00:37:43,760 --> 00:37:47,780 +البلازمينوجين أو البلازمين؟ بيروح وشغال على الـ + +439 +00:37:47,780 --> 00:37:52,100 +Fibril Monomer وعمله degradation، بيشتغل على ال + +440 +00:37:52,100 --> 00:37:55,200 +Fibril Gen و Fibril Monomer وواش بيعمل؟ بيعمل + +441 +00:37:55,200 --> 00:38:01,580 +degradation، ماشي؟عيد تاني يبقى في البداية بيبدأ + +442 +00:38:01,580 --> 00:38:07,100 +تحت تأثير الترابين الـfibrin gel is cleaved + +443 +00:38:07,100 --> 00:38:11,500 +وبيتحول إلى fibrin monomer الـfibrin monomer + +444 +00:38:11,500 --> 00:38:19,100 +بيشتغل عليها اللي هو البلازمن و بيعمل degradation + +445 +00:38:19,100 --> 00:38:22,600 +و بيكون فيه fibrin degradation product أو fibrin + +446 +00:38:22,600 --> 00:38:28,490 +process product المفهوم يا شبابهذا بشكل عام بشكل + +447 +00:38:28,490 --> 00:38:35,250 +تفصيلي نبدأ بشكل تفصيلي نبدأ ونقول ان once clot + +448 +00:38:35,250 --> 00:38:39,330 +once clotting begins the fibrotic system comes to + +449 +00:38:39,330 --> 00:38:43,070 +life يعني بيبدأ ياش كان ميت؟ لأ ماكنش ميت بس كان + +450 +00:38:43,070 --> 00:38:48,930 +يزحف كسلحفاة كان ياش كان بطيئا بيحيط و لما بيحيط + +451 +00:38:48,930 --> 00:38:53,770 +شو بيعمل؟ plasmalogen شو بيعمل؟ binds to fibrin + +452 +00:39:18,840 --> 00:39:26,720 +الجلطة تتكوّن ومفجّرها في مكان ..في داخلها ومفجرها + +453 +00:39:26,720 --> 00:39:31,280 +موجود وين؟ في داخلها إيش اللي يصير؟ وإحنا بنعمل + +454 +00:39:31,280 --> 00:39:36,940 +Formation بيدخل الـ Plasminogen فيه في تركيب الجسم + +455 +00:39:36,940 --> 00:39:41,180 +طبعا هذا الكلام بيصير وين؟ في منطقة المزيع، منطقة + +456 +00:39:41,180 --> 00:39:47,140 +الحدث بعد ذلك في منطقة المزيع أو الحدث بيطلع من ال + +457 +00:39:47,140 --> 00:39:53,400 +tissue المنزوع tissue plasminogen activatorمنشط من + +458 +00:39:53,400 --> 00:40:01,340 +البلازمينوجين اللي هو دخل في بناء الجلطة فال + +459 +00:40:01,340 --> 00:40:05,300 +tissue plasminogen activator بيشترع بلازمينوجين و + +460 +00:40:05,300 --> 00:40:09,820 +بيحوله إلى بلازمين يعني بيحوله من زيوموجين إلى + +461 +00:40:09,820 --> 00:40:15,280 +أنزاين من زيوموجين إلى أنزاين فبيحوله إلى أنزاين + +462 +00:40:15,280 --> 00:40:18,920 +اللي هو ال complex formation of tissue plasminogen + +463 +00:40:18,920 --> 00:40:24,110 +activator مع البلازمينوجينمشي المخصصة اللى تبعته + +464 +00:40:24,110 --> 00:40:27,210 +عبارة عن بلازمن البلازمن بقبط مع ال fiber انه بعمل + +465 +00:40:27,210 --> 00:40:30,790 +fiber splitting او degradation بشته العلانيل + +466 +00:40:30,790 --> 00:40:34,010 +البلازمن على ال fiber اللى بعمله degradation + +467 +00:40:34,010 --> 00:40:41,870 +مافهومش هبقى ببساطة البلازمن بيدخل في تركيب الجلطة + +468 +00:40:43,210 --> 00:40:48,690 +في داخل الجلطة ماشي في نفس الوقت التشو بلازميرج + +469 +00:40:48,690 --> 00:40:51,970 +اكتيفيتر هو بنى اكتيفيتر بنشط بطلع من التشو + +470 +00:40:51,970 --> 00:40:55,790 +الممجوة من ال damage تشو وإيش بيعمل بروح و بنشط + +471 +00:40:55,790 --> 00:40:58,190 +البلازميرج و بيحول لبلازمير اللى بلازمير بيشتغل + +472 +00:40:58,190 --> 00:41:01,490 +على ال fibrin و بيحوله أو ال fibrin ال monomer و + +473 +00:41:01,490 --> 00:41:08,510 +بيحوله إلى أيش؟ ال fibrin degradation ماشي حدده + +474 +00:41:08,510 --> 00:41:11,770 +شوية؟ما هو الـ Plasminogen؟ عندنا هنجل الميكانيزمة + +475 +00:41:11,770 --> 00:41:16,010 +شغالة، تلاقوش ما هو الـ Plasminogen؟ هو عبارة عن + +476 +00:41:16,010 --> 00:41:21,510 +protein أمينواسد يصنع في الـ liver نكلارويتر 94 + +477 +00:41:21,510 --> 00:41:27,050 +ألف دالتر، ماشي، اللايكوبرتين موجود في البلازمة + +478 +00:41:27,050 --> 00:41:33,670 +normally هو عبارة عن زيموجين يعني inactive، يعني + +479 +00:41:33,670 --> 00:41:38,470 +inert، ماشي، موجود normally في البلازمة البلازمين + +480 +00:41:39,590 --> 00:41:46,870 +مش موجود، ماشي؟ لإنما الـ Plasminogen هو موجود لإن + +481 +00:41:46,870 --> 00:41:52,310 +هو عبارة عن substrate، ماشي؟ طبعاً بيتحول زي ما + +482 +00:41:52,310 --> 00:41:55,250 +اتفقنا إلى Trypsin-like Serine Protease اللي هو + +483 +00:41:55,250 --> 00:42:01,430 +الـ Plasmin وهو اللي مشتغل على الغضب، ماشي، + +484 +00:42:01,430 --> 00:42:05,910 +following injury يعني بعد المزر، شو اللي بيصير؟ it + +485 +00:42:05,910 --> 00:42:11,440 +binds، مين اللي binds؟البلايزمينوجين تتبع إلى + +486 +00:42:11,440 --> 00:42:17,760 +الفيبرين خلال تطبيق التجارب ترتبط مع الفيبرين + +487 +00:42:17,760 --> 00:42:20,960 +المونيكول + +488 +00:42:20,960 --> 00:42:25,000 +أثناء تكوين الجلطة مع بلايزمينوجين اكتيفيتر هو من + +489 +00:42:25,000 --> 00:42:29,880 +المنشط طبعه هو وميه؟ كلهم بيخشوا أين؟ في تكوين + +490 +00:42:29,880 --> 00:42:36,320 +الجلطةوبالتالي Plasminogen once needed يوم ما فضيع + +491 +00:42:36,320 --> 00:42:41,640 +it converts Plasminogen to mean من الداخل، من داخل + +492 +00:42:41,640 --> 00:42:45,400 +الجمطة عشان هيك سهل جدا تفجيرها يا شباب، ماشي؟ + +493 +00:42:45,400 --> 00:42:53,150 +لإنه زي ما كتبتكوا تدمير ذاتي أحسنالتركيز والتركيز + +494 +00:42:53,150 --> 00:42:57,190 +يتم تشكيله بالإنفلاميشن تشكيله بالإنفلاميشن تشكيله + +495 +00:42:57,190 --> 00:42:59,670 +بالإنفلاميشن تشكيله بالإنفلاميشن تشكيله + +496 +00:42:59,670 --> 00:43:03,810 +بالإنفلاميشن تشكيله بالإنفلاميشن تشكيله + +497 +00:43:03,810 --> 00:43:05,910 +بالإنفلاميشن تشكيله بالإنفلاميشن تشكيله + +498 +00:43:05,910 --> 00:43:08,130 +بالإنفلاميشن تشكيله بالإنفلاميشن تشكيله + +499 +00:43:08,130 --> 00:43:12,370 +بالإنفلاميشن تشكيله بالإنفلاميشن تشكيله + +500 +00:43:12,370 --> 00:43:21,510 +بالإنفلاميشن تشكيله بالإنفلاميشن تشكمع تزيادة + +501 +00:43:21,510 --> 00:43:23,750 +التركيز على البلازمينوجين بشكل كبير. ماذا يحصل يا + +502 +00:43:23,750 --> 00:43:28,670 +شباب؟ جالوا once activated البلازمينوجين، كل اللي + +503 +00:43:28,670 --> 00:43:34,270 +بيصير عشان يتحول للـ enzymatic form، انه بيفرد، + +504 +00:43:34,270 --> 00:43:39,450 +بيصيرله folding.يعني على خلاف الـ Coagulation + +505 +00:43:39,450 --> 00:43:43,310 +Factors المتعرف عليه يا شباب أن الـ Coagulation + +506 +00:43:43,310 --> 00:43:47,830 +Factors يصبحوا كليفجي صح؟ وين عند ال active site؟ + +507 +00:43:47,830 --> 00:43:53,790 +فبتبين ال active site هذا بيكون كامش و بفرد بيكون + +508 +00:43:53,790 --> 00:43:58,750 +كامش و بفتح يوم يصبح له opening بتبين ال active + +509 +00:43:58,750 --> 00:44:01,610 +site ولا بتبينش؟ بتبين ال active site وبالتالي + +510 +00:44:01,610 --> 00:44:07,900 +بيشتغل وبتحول إلى potent enzymatic domainحد عنده + +511 +00:44:07,900 --> 00:44:14,300 +سؤال؟ ماشي؟ حد عنده سؤال اشي بقى، مرة جاية ان شاء + +512 +00:44:14,300 --> 00:44:19,020 +الله بنبدأ في ال mechanism و بالذات و بنبدأ في + +513 +00:44:19,020 --> 00:44:22,140 +واحدة واحدة بنشوف ال activator، بنشوف ال plasma + +514 +00:44:22,140 --> 00:44:27,040 +energy، كيف هي اتحول ال plasmaوالـ Plasmin بنتعرف + +515 +00:44:27,040 --> 00:44:30,880 +على ال inhibitors تبعهم، ثم بنتعرف على عملية + +516 +00:44:30,880 --> 00:44:36,740 +التقطيع لل-fibrin كيف يقطع الـ Plasmin ليهش ال + +517 +00:44:36,740 --> 00:44:40,720 +-fibrin وبيحولوا إلى FDBs أو Fibrin Degradation + +518 +00:44:40,720 --> 00:44:45,360 +Product متفق عليه؟ حد عنده سؤال؟ + diff --git a/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/yGBR5WvaSIM_raw.json b/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/yGBR5WvaSIM_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..1150c8b6ec09dc6786451b224c726b051381755e --- /dev/null +++ b/PL9fwy3NUQKwZk82_1AeatKVgXG0nDxlHE/yGBR5WvaSIM_raw.json @@ -0,0 +1 @@ +{"segments": [{"id": 1, "seek": 5015, "start": 21.07, "end": 50.15, "text": "بسم الله الرحمن الرحيم اليوم ان شاء الله هنحكي فينا cherry occurring inhibitors inhibitors بيعنى anticoagulant anticoagulant في الجسم موجودة بكميات كبيرة موجودة بتعمل جنبا إلى جنب مع liberal coagulant وضروري جدا وجود هذه ال anticoagulant in order to attenuate", "tokens": [3555, 38251, 21984, 34892, 5016, 27842, 34892, 5016, 32640, 45595, 20498, 16472, 13412, 16606, 21984, 8032, 1863, 5016, 4117, 1829, 8978, 8315, 20164, 18386, 20406, 9862, 20406, 9862, 4724, 40228, 1863, 7578, 2511, 2789, 559, 425, 394, 2511, 2789, 559, 425, 394, 8978, 25724, 38251, 3714, 29245, 23328, 3660, 4724, 24793, 1829, 9307, 9122, 3555, 48923, 3714, 29245, 23328, 3660, 39894, 25957, 1211, 10874, 1863, 3555, 995, 30731, 10874, 1863, 3555, 20449, 13767, 598, 559, 425, 394, 4032, 11242, 2288, 13063, 1829, 10874, 28259, 49610, 23328, 29538, 2423, 2511, 2789, 559, 425, 394, 294, 1668, 281, 951, 268, 10107], "avg_logprob": -0.18781249299645425, "compression_ratio": 1.701834862385321, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 21.07, "end": 22.03, "word": "بسم", "probability": 0.47015380859375}, {"start": 22.03, "end": 22.71, "word": " الله", "probability": 0.9580078125}, {"start": 22.71, "end": 23.01, "word": " الرحمن", "probability": 0.9752604166666666}, {"start": 23.01, "end": 23.49, "word": " الرحيم", "probability": 0.9944661458333334}, {"start": 23.49, "end": 25.33, "word": " اليوم", "probability": 0.60638427734375}, {"start": 25.33, "end": 25.45, "word": " ان", "probability": 0.5068359375}, {"start": 25.45, "end": 25.57, "word": " شاء", "probability": 0.87060546875}, {"start": 25.57, "end": 25.61, "word": " الله", "probability": 0.94287109375}, {"start": 25.61, "end": 26.05, "word": " هنحكي", "probability": 0.77333984375}, {"start": 26.05, "end": 26.35, "word": " فينا", "probability": 0.8271484375}, {"start": 26.35, "end": 26.69, "word": " cherry", "probability": 0.1142578125}, {"start": 26.69, "end": 27.43, "word": " occurring", "probability": 0.75390625}, {"start": 27.43, "end": 28.37, "word": " inhibitors", "probability": 0.822998046875}, {"start": 28.37, "end": 30.13, "word": " inhibitors", "probability": 0.7159423828125}, {"start": 30.13, "end": 30.79, "word": " بيعنى", "probability": 0.71563720703125}, {"start": 30.79, "end": 31.89, "word": " anticoagulant", "probability": 0.847119140625}, {"start": 31.89, "end": 33.67, "word": " anticoagulant", "probability": 0.874462890625}, {"start": 33.67, "end": 33.79, "word": " في", "probability": 0.8544921875}, {"start": 33.79, "end": 34.11, "word": " الجسم", "probability": 0.98974609375}, {"start": 34.11, "end": 34.53, "word": " موجودة", "probability": 0.9765625}, {"start": 34.53, "end": 35.01, "word": " بكميات", "probability": 0.9019775390625}, {"start": 35.01, "end": 35.47, "word": " كبيرة", "probability": 0.9744466145833334}, {"start": 35.47, "end": 37.35, "word": " موجودة", "probability": 0.9638671875}, {"start": 37.35, "end": 39.31, "word": " بتعمل", "probability": 0.97607421875}, {"start": 39.31, "end": 40.11, "word": " جنبا", "probability": 0.9251708984375}, {"start": 40.11, "end": 40.29, "word": " إلى", "probability": 0.8671875}, {"start": 40.29, "end": 40.77, "word": " جنب", "probability": 0.9806315104166666}, {"start": 40.77, "end": 40.99, "word": " مع", "probability": 0.98828125}, {"start": 40.99, "end": 41.29, "word": " liberal", "probability": 0.25830078125}, {"start": 41.29, "end": 42.31, "word": " coagulant", "probability": 0.96484375}, {"start": 42.31, "end": 44.11, "word": " وضروري", "probability": 0.86474609375}, {"start": 44.11, "end": 44.43, "word": " جدا", "probability": 0.9833984375}, {"start": 44.43, "end": 44.93, "word": " وجود", "probability": 0.973388671875}, {"start": 44.93, "end": 45.29, "word": " هذه", "probability": 0.9248046875}, {"start": 45.29, "end": 45.45, "word": " ال", "probability": 0.8935546875}, {"start": 45.45, "end": 46.39, "word": " anticoagulant", "probability": 0.9392578125}, {"start": 46.39, "end": 47.77, "word": " in", "probability": 0.95751953125}, {"start": 47.77, "end": 48.05, "word": " order", "probability": 0.916015625}, {"start": 48.05, "end": 49.15, "word": " to", "probability": 0.98193359375}, {"start": 49.15, "end": 50.15, "word": " attenuate", "probability": 0.8917643229166666}], "temperature": 1.0}, {"id": 2, "seek": 7586, "start": 51.18, "end": 75.86, "text": "The Coagulant or the Coagulation Cascade Mechanism بمعنى إن الـ Coagulation Cascade Mechanism يظل شغال على طول هيعمل Clot Formation كبيرة وبالتالي يعمل Complete Occlusion للـBlood Vessels وهذا ما أحنا بدنا نشيله لأنه إذا كان صار فيه Complete Occlusion بمعنى صار فيه .. صار فيه Schemia صح؟ وSchemia يعني أنه", "tokens": [2278, 3066, 559, 425, 394, 420, 264, 3066, 559, 2776, 383, 4806, 762, 30175, 1434, 4724, 2304, 3615, 1863, 7578, 36145, 2423, 39184, 3066, 559, 2776, 383, 4806, 762, 30175, 1434, 7251, 19913, 1211, 13412, 17082, 6027, 15844, 23032, 12610, 39896, 25957, 1211, 2033, 310, 10126, 399, 9122, 3555, 48923, 46599, 6027, 2655, 6027, 1829, 7251, 25957, 1211, 34687, 26191, 6485, 24976, 39184, 33, 752, 378, 691, 442, 1625, 37037, 15730, 19446, 5551, 5016, 8315, 47525, 8315, 8717, 8592, 26895, 3224, 5296, 33456, 3224, 11933, 15730, 25961, 20328, 9640, 8978, 3224, 34687, 26191, 6485, 4724, 2304, 3615, 1863, 7578, 20328, 9640, 8978, 3224, 4386, 20328, 9640, 8978, 3224, 2065, 14058, 20328, 5016, 22807, 4032, 31560, 14058, 37495, 22653, 14739, 3224], "avg_logprob": -0.31482439395809964, "compression_ratio": 1.6613545816733069, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 51.18, "end": 51.36, "word": "The", "probability": 0.176025390625}, {"start": 51.36, "end": 52.56, "word": " Coagulant", "probability": 0.8594970703125}, {"start": 52.56, "end": 52.98, "word": " or", "probability": 0.4931640625}, {"start": 52.98, "end": 53.18, "word": " the", "probability": 0.254638671875}, {"start": 53.18, "end": 53.72, "word": " Coagulation", "probability": 0.9231770833333334}, {"start": 53.72, "end": 54.16, "word": " Cascade", "probability": 0.8400065104166666}, {"start": 54.16, "end": 54.6, "word": " Mechanism", "probability": 0.873779296875}, {"start": 54.6, "end": 55.14, "word": " بمعنى", "probability": 0.91279296875}, {"start": 55.14, "end": 55.6, "word": " إن", "probability": 0.22021484375}, {"start": 55.6, "end": 55.78, "word": " الـ", "probability": 0.513427734375}, {"start": 55.78, "end": 56.22, "word": " Coagulation", "probability": 0.9422200520833334}, {"start": 56.22, "end": 56.64, "word": " Cascade", "probability": 0.94775390625}, {"start": 56.64, "end": 57.04, "word": " Mechanism", "probability": 0.968505859375}, {"start": 57.04, "end": 57.38, "word": " يظل", "probability": 0.5633951822916666}, {"start": 57.38, "end": 57.76, "word": " شغال", "probability": 0.98828125}, {"start": 57.76, "end": 57.9, "word": " على", "probability": 0.7783203125}, {"start": 57.9, "end": 58.24, "word": " طول", "probability": 0.989990234375}, {"start": 58.24, "end": 59.7, "word": " هيعمل", "probability": 0.821044921875}, {"start": 59.7, "end": 60.98, "word": " Clot", "probability": 0.625732421875}, {"start": 60.98, "end": 61.72, "word": " Formation", "probability": 0.94189453125}, {"start": 61.72, "end": 62.24, "word": " كبيرة", "probability": 0.9768880208333334}, {"start": 62.24, "end": 63.92, "word": " وبالتالي", "probability": 0.88203125}, {"start": 63.92, "end": 64.34, "word": " يعمل", "probability": 0.9392903645833334}, {"start": 64.34, "end": 65.0, "word": " Complete", "probability": 0.86962890625}, {"start": 65.0, "end": 65.96, "word": " Occlusion", "probability": 0.946044921875}, {"start": 65.96, "end": 66.88, "word": " للـBlood", "probability": 0.6852783203125}, {"start": 66.88, "end": 67.32, "word": " Vessels", "probability": 0.7989908854166666}, {"start": 67.32, "end": 67.66, "word": " وهذا", "probability": 0.625244140625}, {"start": 67.66, "end": 67.8, "word": " ما", "probability": 0.3330078125}, {"start": 67.8, "end": 68.04, "word": " أحنا", "probability": 0.73583984375}, {"start": 68.04, "end": 68.24, "word": " بدنا", "probability": 0.28546142578125}, {"start": 68.24, "end": 68.76, "word": " نشيله", "probability": 0.8338623046875}, {"start": 68.76, "end": 69.0, "word": " لأنه", "probability": 0.449951171875}, {"start": 69.0, "end": 69.32, "word": " إذا", "probability": 0.799560546875}, {"start": 69.32, "end": 69.54, "word": " كان", "probability": 0.92529296875}, {"start": 69.54, "end": 69.78, "word": " صار", "probability": 0.710693359375}, {"start": 69.78, "end": 69.94, "word": " فيه", "probability": 0.674560546875}, {"start": 69.94, "end": 70.22, "word": " Complete", "probability": 0.89501953125}, {"start": 70.22, "end": 70.78, "word": " Occlusion", "probability": 0.9814453125}, {"start": 70.78, "end": 71.54, "word": " بمعنى", "probability": 0.95986328125}, {"start": 71.54, "end": 71.84, "word": " صار", "probability": 0.9755859375}, {"start": 71.84, "end": 72.62, "word": " فيه", "probability": 0.91845703125}, {"start": 72.62, "end": 72.62, "word": " ..", "probability": 0.201904296875}, {"start": 72.62, "end": 73.44, "word": " صار", "probability": 0.81396484375}, {"start": 73.44, "end": 73.68, "word": " فيه", "probability": 0.969970703125}, {"start": 73.68, "end": 74.14, "word": " Schemia", "probability": 0.63720703125}, {"start": 74.14, "end": 74.98, "word": " صح؟", "probability": 0.82861328125}, {"start": 74.98, "end": 75.48, "word": " وSchemia", "probability": 0.7989908854166666}, {"start": 75.48, "end": 75.74, "word": " يعني", "probability": 0.97314453125}, {"start": 75.74, "end": 75.86, "word": " أنه", "probability": 0.513671875}], "temperature": 1.0}, {"id": 3, "seek": 10626, "start": 78.36, "end": 106.26, "text": "بالتالي الجسم في عنده من يحارب هذا الـmechanism اللي هو الـfibrinetic system اللي هنحكي فيه لكن بالاضافة إلى ذلك في naturally occurring anticoagulant موجودة في الجسم تعمل كبح لكل عمليات الـcoagulation الغير مرهوب فيها قسمه", "tokens": [3555, 6027, 2655, 6027, 1829, 25724, 38251, 8978, 43242, 3224, 9154, 7251, 5016, 9640, 3555, 23758, 2423, 39184, 1398, 3484, 1434, 13672, 1829, 31439, 2423, 39184, 69, 6414, 259, 3532, 1185, 13672, 1829, 8032, 1863, 5016, 4117, 1829, 8978, 3224, 44381, 20666, 46958, 31845, 3660, 30731, 29910, 23275, 8978, 8195, 18386, 2511, 2789, 559, 425, 394, 3714, 29245, 23328, 3660, 8978, 25724, 38251, 6055, 25957, 1211, 9122, 49628, 5296, 28820, 6225, 2304, 20292, 9307, 2423, 39184, 1291, 559, 2776, 6024, 118, 13546, 3714, 2288, 3224, 37746, 8978, 11296, 12174, 38251, 3224], "avg_logprob": -0.2992527050816495, "compression_ratio": 1.559090909090909, "no_speech_prob": 3.039836883544922e-06, "words": [{"start": 78.36000000000001, "end": 79.52000000000001, "word": "بالتالي", "probability": 0.700439453125}, {"start": 79.52000000000001, "end": 80.68, "word": " الجسم", "probability": 0.74755859375}, {"start": 80.68, "end": 81.02, "word": " في", "probability": 0.2227783203125}, {"start": 81.02, "end": 81.54, "word": " عنده", "probability": 0.931884765625}, {"start": 81.54, "end": 81.84, "word": " من", "probability": 0.80224609375}, {"start": 81.84, "end": 82.66, "word": " يحارب", "probability": 0.982666015625}, {"start": 82.66, "end": 83.06, "word": " هذا", "probability": 0.859375}, {"start": 83.06, "end": 85.34, "word": " الـmechanism", "probability": 0.6539306640625}, {"start": 85.34, "end": 86.56, "word": " اللي", "probability": 0.5479736328125}, {"start": 86.56, "end": 86.94, "word": " هو", "probability": 0.95556640625}, {"start": 86.94, "end": 88.36, "word": " الـfibrinetic", "probability": 0.4590657552083333}, {"start": 88.36, "end": 88.7, "word": " system", "probability": 0.94287109375}, {"start": 88.7, "end": 88.84, "word": " اللي", "probability": 0.80029296875}, {"start": 88.84, "end": 89.38, "word": " هنحكي", "probability": 0.7116455078125}, {"start": 89.38, "end": 89.84, "word": " فيه", "probability": 0.911865234375}, {"start": 89.84, "end": 90.5, "word": " لكن", "probability": 0.59130859375}, {"start": 90.5, "end": 91.98, "word": " بالاضافة", "probability": 0.911865234375}, {"start": 91.98, "end": 92.2, "word": " إلى", "probability": 0.8173828125}, {"start": 92.2, "end": 92.58, "word": " ذلك", "probability": 0.9931640625}, {"start": 92.58, "end": 92.72, "word": " في", "probability": 0.484619140625}, {"start": 92.72, "end": 93.2, "word": " naturally", "probability": 0.294921875}, {"start": 93.2, "end": 93.84, "word": " occurring", "probability": 0.87841796875}, {"start": 93.84, "end": 94.76, "word": " anticoagulant", "probability": 0.90537109375}, {"start": 94.76, "end": 95.12, "word": " موجودة", "probability": 0.94921875}, {"start": 95.12, "end": 95.22, "word": " في", "probability": 0.96923828125}, {"start": 95.22, "end": 95.56, "word": " الجسم", "probability": 0.9892578125}, {"start": 95.56, "end": 97.04, "word": " تعمل", "probability": 0.89453125}, {"start": 97.04, "end": 97.58, "word": " كبح", "probability": 0.93505859375}, {"start": 97.58, "end": 98.72, "word": " لكل", "probability": 0.97216796875}, {"start": 98.72, "end": 99.92, "word": " عمليات", "probability": 0.897216796875}, {"start": 99.92, "end": 101.76, "word": " الـcoagulation", "probability": 0.801220703125}, {"start": 101.76, "end": 103.08, "word": " الغير", "probability": 0.8575846354166666}, {"start": 103.08, "end": 103.66, "word": " مرهوب", "probability": 0.91064453125}, {"start": 103.66, "end": 104.44, "word": " فيها", "probability": 0.793212890625}, {"start": 104.44, "end": 106.26, "word": " قسمه", "probability": 0.7732747395833334}], "temperature": 1.0}, {"id": 4, "seek": 11808, "start": 107.64, "end": 118.08, "text": "الـ Anticoagulant أو الـ Inhibitors إلى تلت مجموعات أساسية تلت مجموعات أساسية مجموعة الأولانية", "tokens": [6027, 39184, 5130, 2789, 559, 425, 394, 34051, 2423, 39184, 682, 5455, 9862, 30731, 6055, 1211, 2655, 3714, 7435, 2304, 45367, 9307, 5551, 3794, 32277, 10632, 6055, 1211, 2655, 3714, 7435, 2304, 45367, 9307, 5551, 3794, 32277, 10632, 3714, 7435, 2304, 2407, 27884, 16247, 12610, 7649, 10632], "avg_logprob": -0.13948568142950535, "compression_ratio": 1.4476190476190476, "no_speech_prob": 1.2516975402832031e-06, "words": [{"start": 107.64, "end": 108.2, "word": "الـ", "probability": 0.479736328125}, {"start": 108.2, "end": 109.32, "word": " Anticoagulant", "probability": 0.82333984375}, {"start": 109.32, "end": 109.6, "word": " أو", "probability": 0.80029296875}, {"start": 109.6, "end": 109.8, "word": " الـ", "probability": 0.810302734375}, {"start": 109.8, "end": 110.7, "word": " Inhibitors", "probability": 0.8902994791666666}, {"start": 110.7, "end": 111.88, "word": " إلى", "probability": 0.74658203125}, {"start": 111.88, "end": 112.26, "word": " تلت", "probability": 0.9026692708333334}, {"start": 112.26, "end": 113.02, "word": " مجموعات", "probability": 0.9876953125}, {"start": 113.02, "end": 113.72, "word": " أساسية", "probability": 0.965576171875}, {"start": 113.72, "end": 114.76, "word": " تلت", "probability": 0.7650553385416666}, {"start": 114.76, "end": 115.66, "word": " مجموعات", "probability": 0.99033203125}, {"start": 115.66, "end": 116.38, "word": " أساسية", "probability": 0.986083984375}, {"start": 116.38, "end": 117.34, "word": " مجموعة", "probability": 0.87275390625}, {"start": 117.34, "end": 118.08, "word": " الأولانية", "probability": 0.9827880859375}], "temperature": 1.0}, {"id": 5, "seek": 14071, "start": 119.09, "end": 140.71, "text": "جالو هي عبارة عن Serine Protease Inhibitors Serine Protease Inhibitors و احنا عارفين ال Serine Protease يعني Enzyme، مظبوط؟ يعني ال Enzyme Inhibitors لمين؟ لل Coagulation Factors لبروتين C و لأس؟ لأ سبب ما يعني، لسه خلّينا واحدة واحدة", "tokens": [7435, 6027, 2407, 39896, 6225, 3555, 9640, 3660, 18871, 4210, 533, 43371, 651, 682, 5455, 9862, 4210, 533, 43371, 651, 682, 5455, 9862, 4032, 1975, 5016, 8315, 6225, 9640, 5172, 9957, 2423, 4210, 533, 43371, 651, 37495, 22653, 2193, 1229, 1398, 12399, 3714, 19913, 3555, 2407, 9566, 22807, 37495, 22653, 2423, 2193, 1229, 1398, 682, 5455, 9862, 5296, 2304, 9957, 22807, 24976, 3066, 559, 2776, 33375, 830, 5296, 26890, 35473, 9957, 383, 4032, 5296, 10721, 3794, 22807, 5296, 10721, 8608, 3555, 3555, 19446, 37495, 22653, 12399, 5296, 3794, 3224, 16490, 1211, 11703, 1829, 8315, 36764, 24401, 3660, 36764, 24401, 3660], "avg_logprob": -0.29254331447110316, "compression_ratio": 1.618811881188119, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 119.09, "end": 119.55, "word": "جالو", "probability": 0.68115234375}, {"start": 119.55, "end": 119.73, "word": " هي", "probability": 0.6611328125}, {"start": 119.73, "end": 120.19, "word": " عبارة", "probability": 0.98828125}, {"start": 120.19, "end": 120.43, "word": " عن", "probability": 0.998046875}, {"start": 120.43, "end": 121.15, "word": " Serine", "probability": 0.72705078125}, {"start": 121.15, "end": 122.13, "word": " Protease", "probability": 0.818115234375}, {"start": 122.13, "end": 122.99, "word": " Inhibitors", "probability": 0.9435221354166666}, {"start": 122.99, "end": 124.47, "word": " Serine", "probability": 0.6654052734375}, {"start": 124.47, "end": 125.59, "word": " Protease", "probability": 0.939208984375}, {"start": 125.59, "end": 126.55, "word": " Inhibitors", "probability": 0.9484049479166666}, {"start": 126.55, "end": 127.27, "word": " و", "probability": 0.60546875}, {"start": 127.27, "end": 127.41, "word": " احنا", "probability": 0.773681640625}, {"start": 127.41, "end": 127.71, "word": " عارفين", "probability": 0.9896240234375}, {"start": 127.71, "end": 127.85, "word": " ال", "probability": 0.50537109375}, {"start": 127.85, "end": 128.25, "word": " Serine", "probability": 0.828857421875}, {"start": 128.25, "end": 128.91, "word": " Protease", "probability": 0.941162109375}, {"start": 128.91, "end": 129.17, "word": " يعني", "probability": 0.9619140625}, {"start": 129.17, "end": 129.81, "word": " Enzyme،", "probability": 0.687744140625}, {"start": 129.81, "end": 130.65, "word": " مظبوط؟", "probability": 0.8883463541666666}, {"start": 130.65, "end": 131.27, "word": " يعني", "probability": 0.785888671875}, {"start": 131.27, "end": 131.41, "word": " ال", "probability": 0.9287109375}, {"start": 131.41, "end": 132.21, "word": " Enzyme", "probability": 0.7299397786458334}, {"start": 132.21, "end": 133.05, "word": " Inhibitors", "probability": 0.9324544270833334}, {"start": 133.05, "end": 134.53, "word": " لمين؟", "probability": 0.80279541015625}, {"start": 134.53, "end": 134.69, "word": " لل", "probability": 0.4326171875}, {"start": 134.69, "end": 135.43, "word": " Coagulation", "probability": 0.7684733072916666}, {"start": 135.43, "end": 136.15, "word": " Factors", "probability": 0.867431640625}, {"start": 136.15, "end": 136.89, "word": " لبروتين", "probability": 0.6126708984375}, {"start": 136.89, "end": 137.13, "word": " C", "probability": 0.6591796875}, {"start": 137.13, "end": 137.31, "word": " و", "probability": 0.59716796875}, {"start": 137.31, "end": 137.89, "word": " لأس؟", "probability": 0.416107177734375}, {"start": 137.89, "end": 138.31, "word": " لأ", "probability": 0.959716796875}, {"start": 138.31, "end": 138.63, "word": " سبب", "probability": 0.5909016927083334}, {"start": 138.63, "end": 138.79, "word": " ما", "probability": 0.5986328125}, {"start": 138.79, "end": 139.43, "word": " يعني،", "probability": 0.5181477864583334}, {"start": 139.43, "end": 139.63, "word": " لسه", "probability": 0.9226888020833334}, {"start": 139.63, "end": 140.03, "word": " خلّينا", "probability": 0.749267578125}, {"start": 140.03, "end": 140.35, "word": " واحدة", "probability": 0.9773763020833334}, {"start": 140.35, "end": 140.71, "word": " واحدة", "probability": 0.9892578125}], "temperature": 1.0}, {"id": 6, "seek": 15536, "start": 141.38, "end": 155.36, "text": "الـ Serine Protease Inhibitor جاله بتضم مجموعة أبوها لها المجموعة هو الـ Antithrombin 3 وبيضم بالإضافة للـ Antithrombin 3", "tokens": [6027, 39184, 4210, 533, 43371, 651, 682, 5455, 3029, 10874, 6027, 3224, 39894, 11242, 2304, 3714, 7435, 2304, 2407, 27884, 5551, 3555, 2407, 11296, 5296, 11296, 9673, 7435, 2304, 2407, 27884, 31439, 2423, 39184, 5130, 355, 81, 3548, 259, 805, 4032, 21292, 11242, 2304, 20666, 28814, 11242, 31845, 3660, 24976, 39184, 5130, 355, 81, 3548, 259, 805], "avg_logprob": -0.22710129079119912, "compression_ratio": 1.3059701492537314, "no_speech_prob": 5.960464477539062e-07, "words": [{"start": 141.38, "end": 141.8, "word": "الـ", "probability": 0.387451171875}, {"start": 141.8, "end": 142.18, "word": " Serine", "probability": 0.695068359375}, {"start": 142.18, "end": 142.9, "word": " Protease", "probability": 0.8623046875}, {"start": 142.9, "end": 143.56, "word": " Inhibitor", "probability": 0.94189453125}, {"start": 143.56, "end": 143.86, "word": " جاله", "probability": 0.7477213541666666}, {"start": 143.86, "end": 144.46, "word": " بتضم", "probability": 0.8650716145833334}, {"start": 144.46, "end": 145.2, "word": " مجموعة", "probability": 0.94248046875}, {"start": 145.2, "end": 146.16, "word": " أبوها", "probability": 0.882568359375}, {"start": 146.16, "end": 146.88, "word": " لها", "probability": 0.6400146484375}, {"start": 146.88, "end": 147.64, "word": " المجموعة", "probability": 0.9318359375}, {"start": 147.64, "end": 148.14, "word": " هو", "probability": 0.8828125}, {"start": 148.14, "end": 148.3, "word": " الـ", "probability": 0.854736328125}, {"start": 148.3, "end": 148.92, "word": " Antithrombin", "probability": 0.80966796875}, {"start": 148.92, "end": 149.32, "word": " 3", "probability": 0.62939453125}, {"start": 149.32, "end": 153.04, "word": " وبيضم", "probability": 0.650726318359375}, {"start": 153.04, "end": 153.94, "word": " بالإضافة", "probability": 0.897265625}, {"start": 153.94, "end": 154.22, "word": " للـ", "probability": 0.75048828125}, {"start": 154.22, "end": 154.94, "word": " Antithrombin", "probability": 0.9404296875}, {"start": 154.94, "end": 155.36, "word": " 3", "probability": 0.970703125}], "temperature": 1.0}, {"id": 7, "seek": 18163, "start": 156.53, "end": 181.63, "text": "مجموعة من الـinhibitors زي الـC1-esterase inhibitor زي الـAlpha-2-macrobipyrin، Alpha-2-antiplasmin، Alpha-1-antitrypsin، and heparin cofactor 2 يبدأ في مجموعة كبيرة بتندرج تحت مين؟ تحت هذه العائلة وهي Serine Protease Inhibitors، يعني الـEnzymatic Inhibitors، ماشي؟", "tokens": [2304, 7435, 2304, 2407, 27884, 9154, 2423, 39184, 259, 5455, 9862, 30767, 1829, 2423, 39184, 34, 16, 12, 3011, 651, 20406, 3029, 30767, 1829, 2423, 39184, 9171, 7211, 12, 17, 12, 37065, 16614, 647, 6016, 259, 12399, 20588, 12, 17, 12, 394, 34442, 296, 2367, 12399, 20588, 12, 16, 12, 394, 270, 627, 1878, 259, 12399, 293, 415, 2181, 259, 598, 69, 15104, 568, 7251, 44510, 10721, 8978, 3714, 7435, 2304, 2407, 27884, 9122, 3555, 48923, 39894, 41260, 47341, 6055, 33753, 3714, 9957, 22807, 6055, 33753, 29538, 18863, 16373, 37977, 37037, 1829, 4210, 533, 2114, 1370, 651, 682, 5455, 9862, 12399, 37495, 22653, 2423, 39184, 16257, 1229, 25915, 682, 5455, 9862, 12399, 3714, 33599, 1829, 22807], "avg_logprob": -0.29033118843013406, "compression_ratio": 1.4851063829787234, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 156.53, "end": 157.25, "word": "مجموعة", "probability": 0.87412109375}, {"start": 157.25, "end": 157.49, "word": " من", "probability": 0.98193359375}, {"start": 157.49, "end": 158.57, "word": " الـinhibitors", "probability": 0.6840576171875}, {"start": 158.57, "end": 158.81, "word": " زي", "probability": 0.767333984375}, {"start": 158.81, "end": 159.29, "word": " الـC1", "probability": 0.7960205078125}, {"start": 159.29, "end": 159.81, "word": "-esterase", "probability": 0.5068766276041666}, {"start": 159.81, "end": 160.45, "word": " inhibitor", "probability": 0.767333984375}, {"start": 160.45, "end": 161.07, "word": " زي", "probability": 0.869384765625}, {"start": 161.07, "end": 161.43, "word": " الـAlpha", "probability": 0.83447265625}, {"start": 161.43, "end": 161.61, "word": "-2", "probability": 0.7008056640625}, {"start": 161.61, "end": 162.63, "word": "-macrobipyrin،", "probability": 0.5749162946428571}, {"start": 162.63, "end": 162.83, "word": " Alpha", "probability": 0.875}, {"start": 162.83, "end": 163.15, "word": "-2", "probability": 0.972412109375}, {"start": 163.15, "end": 165.05, "word": "-antiplasmin،", "probability": 0.7874348958333334}, {"start": 165.05, "end": 165.43, "word": " Alpha", "probability": 0.943359375}, {"start": 165.43, "end": 166.75, "word": "-1", "probability": 0.9736328125}, {"start": 166.75, "end": 167.95, "word": "-antitrypsin،", "probability": 0.9010881696428571}, {"start": 167.95, "end": 168.05, "word": " and", "probability": 0.72998046875}, {"start": 168.05, "end": 168.49, "word": " heparin", "probability": 0.690185546875}, {"start": 168.49, "end": 168.99, "word": " cofactor", "probability": 0.737060546875}, {"start": 168.99, "end": 169.31, "word": " 2", "probability": 0.486328125}, {"start": 169.31, "end": 169.85, "word": " يبدأ", "probability": 0.8330078125}, {"start": 169.85, "end": 169.95, "word": " في", "probability": 0.95263671875}, {"start": 169.95, "end": 170.51, "word": " مجموعة", "probability": 0.9037109375}, {"start": 170.51, "end": 171.23, "word": " كبيرة", "probability": 0.9861653645833334}, {"start": 171.23, "end": 171.89, "word": " بتندرج", "probability": 0.9235026041666666}, {"start": 171.89, "end": 172.25, "word": " تحت", "probability": 0.99609375}, {"start": 172.25, "end": 173.01, "word": " مين؟", "probability": 0.7892252604166666}, {"start": 173.01, "end": 173.33, "word": " تحت", "probability": 0.991455078125}, {"start": 173.33, "end": 173.73, "word": " هذه", "probability": 0.890625}, {"start": 173.73, "end": 175.13, "word": " العائلة", "probability": 0.9899088541666666}, {"start": 175.13, "end": 176.09, "word": " وهي", "probability": 0.77294921875}, {"start": 176.09, "end": 177.15, "word": " Serine", "probability": 0.739013671875}, {"start": 177.15, "end": 178.11, "word": " Protease", "probability": 0.6862589518229166}, {"start": 178.11, "end": 178.85, "word": " Inhibitors،", "probability": 0.6727294921875}, {"start": 178.85, "end": 179.05, "word": " يعني", "probability": 0.869140625}, {"start": 179.05, "end": 180.01, "word": " الـEnzymatic", "probability": 0.63388671875}, {"start": 180.01, "end": 181.17, "word": " Inhibitors،", "probability": 0.862060546875}, {"start": 181.17, "end": 181.63, "word": " ماشي؟", "probability": 0.8819580078125}], "temperature": 1.0}, {"id": 8, "seek": 21181, "start": 182.75, "end": 211.81, "text": "مجموعة التانية هي عبارة عن cofactor in inhibitor بطلنا نحكي على coagulation factor بطلنا نحكي على coagulation cofactor ال cofactor احنا عارفين هم في ال coagulation case كده كم هو حد ال cofactor هم اتنين ال cofactor بشكل أساس ال cofactor هم factor خمسة و factor تمانى", "tokens": [2304, 7435, 2304, 2407, 27884, 16712, 7649, 10632, 39896, 6225, 3555, 9640, 3660, 18871, 598, 69, 15104, 294, 20406, 3029, 4724, 9566, 1211, 8315, 8717, 5016, 4117, 1829, 15844, 598, 559, 2776, 5952, 4724, 9566, 1211, 8315, 8717, 5016, 4117, 1829, 15844, 598, 559, 2776, 598, 69, 15104, 2423, 598, 69, 15104, 1975, 5016, 8315, 6225, 9640, 5172, 9957, 8032, 2304, 8978, 2423, 598, 559, 2776, 1389, 9122, 3215, 3224, 9122, 2304, 31439, 11331, 3215, 2423, 598, 69, 15104, 8032, 2304, 1975, 2655, 1863, 9957, 2423, 598, 69, 15104, 4724, 8592, 28820, 5551, 3794, 32277, 2423, 598, 69, 15104, 8032, 2304, 5952, 16490, 2304, 3794, 3660, 4032, 5952, 46811, 7649, 7578], "avg_logprob": -0.19419643096625805, "compression_ratio": 1.9578947368421054, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 182.75, "end": 183.25, "word": "مجموعة", "probability": 0.777099609375}, {"start": 183.25, "end": 183.81, "word": " التانية", "probability": 0.85986328125}, {"start": 183.81, "end": 184.69, "word": " هي", "probability": 0.904296875}, {"start": 184.69, "end": 185.07, "word": " عبارة", "probability": 0.83184814453125}, {"start": 185.07, "end": 185.31, "word": " عن", "probability": 0.99609375}, {"start": 185.31, "end": 186.37, "word": " cofactor", "probability": 0.6410319010416666}, {"start": 186.37, "end": 186.81, "word": " in", "probability": 0.4296875}, {"start": 186.81, "end": 187.73, "word": " inhibitor", "probability": 0.5863037109375}, {"start": 187.73, "end": 188.75, "word": " بطلنا", "probability": 0.81640625}, {"start": 188.75, "end": 189.07, "word": " نحكي", "probability": 0.98779296875}, {"start": 189.07, "end": 189.21, "word": " على", "probability": 0.52392578125}, {"start": 189.21, "end": 189.81, "word": " coagulation", "probability": 0.8658854166666666}, {"start": 189.81, "end": 190.27, "word": " factor", "probability": 0.63623046875}, {"start": 190.27, "end": 190.57, "word": " بطلنا", "probability": 0.9215087890625}, {"start": 190.57, "end": 190.91, "word": " نحكي", "probability": 0.9945068359375}, {"start": 190.91, "end": 191.17, "word": " على", "probability": 0.83447265625}, {"start": 191.17, "end": 192.59, "word": " coagulation", "probability": 0.9573567708333334}, {"start": 192.59, "end": 193.49, "word": " cofactor", "probability": 0.8899739583333334}, {"start": 193.49, "end": 193.85, "word": " ال", "probability": 0.52685546875}, {"start": 193.85, "end": 194.47, "word": " cofactor", "probability": 0.8489583333333334}, {"start": 194.47, "end": 195.11, "word": " احنا", "probability": 0.865234375}, {"start": 195.11, "end": 195.43, "word": " عارفين", "probability": 0.976806640625}, {"start": 195.43, "end": 195.65, "word": " هم", "probability": 0.934814453125}, {"start": 195.65, "end": 195.79, "word": " في", "probability": 0.810546875}, {"start": 195.79, "end": 196.05, "word": " ال", "probability": 0.91748046875}, {"start": 196.05, "end": 197.05, "word": " coagulation", "probability": 0.8180338541666666}, {"start": 197.05, "end": 197.33, "word": " case", "probability": 0.5087890625}, {"start": 197.33, "end": 197.57, "word": " كده", "probability": 0.8639322916666666}, {"start": 197.57, "end": 197.79, "word": " كم", "probability": 0.9541015625}, {"start": 197.79, "end": 197.93, "word": " هو", "probability": 0.78173828125}, {"start": 197.93, "end": 198.29, "word": " حد", "probability": 0.677734375}, {"start": 198.29, "end": 199.29, "word": " ال", "probability": 0.46240234375}, {"start": 199.29, "end": 202.51, "word": " cofactor", "probability": 0.8982747395833334}, {"start": 202.51, "end": 204.37, "word": " هم", "probability": 0.947265625}, {"start": 204.37, "end": 204.89, "word": " اتنين", "probability": 0.9569091796875}, {"start": 204.89, "end": 205.79, "word": " ال", "probability": 0.70751953125}, {"start": 205.79, "end": 206.47, "word": " cofactor", "probability": 0.9456380208333334}, {"start": 206.47, "end": 206.87, "word": " بشكل", "probability": 0.8898111979166666}, {"start": 206.87, "end": 207.37, "word": " أساس", "probability": 0.8513997395833334}, {"start": 207.37, "end": 207.49, "word": " ال", "probability": 0.499755859375}, {"start": 207.49, "end": 209.83, "word": " cofactor", "probability": 0.9197591145833334}, {"start": 209.83, "end": 210.09, "word": " هم", "probability": 0.920166015625}, {"start": 210.09, "end": 210.43, "word": " factor", "probability": 0.85302734375}, {"start": 210.43, "end": 210.89, "word": " خمسة", "probability": 0.8983154296875}, {"start": 210.89, "end": 211.01, "word": " و", "probability": 0.93359375}, {"start": 211.01, "end": 211.27, "word": " factor", "probability": 0.875}, {"start": 211.27, "end": 211.81, "word": " تمانى", "probability": 0.7657063802083334}], "temperature": 1.0}, {"id": 9, "seek": 24221, "start": 212.83, "end": 242.21, "text": "هؤلاء هم كو فاكتور و فاكتور يعني تمانين كلاهما تشكل كومبليكسيا واحد تلنيز واحد بروترومبنيز و احنا نعرف ان اذا كان التلنيز و بروترومبنيز لم يتكونوا مش فشكوا اجلاشي صح؟ فبالتالي احنا بنعمل انهيبشين لكومبليكسيا بطريقة غير مباشرة من خلال الانهيبشين of the cofactor", "tokens": [3224, 33604, 1211, 16606, 8032, 2304, 9122, 2407, 6156, 995, 4117, 2655, 13063, 4032, 6156, 995, 4117, 2655, 13063, 37495, 22653, 46811, 7649, 9957, 9122, 15040, 3224, 15042, 6055, 8592, 28820, 9122, 20498, 3555, 20292, 4117, 3794, 25528, 36764, 24401, 6055, 1211, 22653, 11622, 36764, 24401, 4724, 32887, 2655, 2288, 20498, 3555, 22653, 11622, 4032, 1975, 5016, 8315, 8717, 3615, 28480, 16472, 1975, 15730, 25961, 16712, 1211, 22653, 11622, 4032, 4724, 32887, 2655, 2288, 20498, 3555, 22653, 11622, 32767, 7251, 2655, 30544, 14407, 37893, 6156, 8592, 4117, 14407, 1975, 7435, 1211, 33599, 1829, 20328, 5016, 22807, 6156, 3555, 6027, 2655, 6027, 1829, 1975, 5016, 8315, 44945, 25957, 1211, 16472, 3224, 1829, 3555, 8592, 9957, 5296, 4117, 20498, 3555, 20292, 4117, 3794, 25528, 4724, 9566, 16572, 28671, 32771, 13546, 3714, 3555, 33599, 25720, 9154, 16490, 1211, 6027, 2423, 7649, 3224, 1829, 3555, 8592, 9957, 295, 264, 598, 69, 15104], "avg_logprob": -0.4811241610738255, "compression_ratio": 1.974468085106383, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 212.83, "end": 213.03, "word": "هؤلاء", "probability": 0.5299530029296875}, {"start": 213.03, "end": 213.31, "word": " هم", "probability": 0.492889404296875}, {"start": 213.31, "end": 213.49, "word": " كو", "probability": 0.194366455078125}, {"start": 213.49, "end": 214.13, "word": " فاكتور", "probability": 0.82392578125}, {"start": 214.13, "end": 215.95, "word": " و", "probability": 0.2237548828125}, {"start": 215.95, "end": 216.69, "word": " فاكتور", "probability": 0.8046142578125}, {"start": 216.69, "end": 217.09, "word": " يعني", "probability": 0.554931640625}, {"start": 217.09, "end": 217.79, "word": " تمانين", "probability": 0.7034505208333334}, {"start": 217.79, "end": 219.33, "word": " كلاهما", "probability": 0.49053955078125}, {"start": 219.33, "end": 220.09, "word": " تشكل", "probability": 0.313232421875}, {"start": 220.09, "end": 221.55, "word": " كومبليكسيا", "probability": 0.6211111886160714}, {"start": 221.55, "end": 222.27, "word": " واحد", "probability": 0.7261962890625}, {"start": 222.27, "end": 223.13, "word": " تلنيز", "probability": 0.638763427734375}, {"start": 223.13, "end": 224.03, "word": " واحد", "probability": 0.794677734375}, {"start": 224.03, "end": 225.07, "word": " بروترومبنيز", "probability": 0.6970672607421875}, {"start": 225.07, "end": 225.81, "word": " و", "probability": 0.5888671875}, {"start": 225.81, "end": 225.91, "word": " احنا", "probability": 0.6798095703125}, {"start": 225.91, "end": 226.13, "word": " نعرف", "probability": 0.8289388020833334}, {"start": 226.13, "end": 226.35, "word": " ان", "probability": 0.5048828125}, {"start": 226.35, "end": 226.49, "word": " اذا", "probability": 0.7958984375}, {"start": 226.49, "end": 226.91, "word": " كان", "probability": 0.94775390625}, {"start": 226.91, "end": 228.31, "word": " التلنيز", "probability": 0.85455322265625}, {"start": 228.31, "end": 228.43, "word": " و", "probability": 0.7392578125}, {"start": 228.43, "end": 229.17, "word": " بروترومبنيز", "probability": 0.89691162109375}, {"start": 229.17, "end": 229.29, "word": " لم", "probability": 0.3115234375}, {"start": 229.29, "end": 229.73, "word": " يتكونوا", "probability": 0.784912109375}, {"start": 229.73, "end": 229.81, "word": " مش", "probability": 0.1583251953125}, {"start": 229.81, "end": 230.17, "word": " فشكوا", "probability": 0.396240234375}, {"start": 230.17, "end": 230.85, "word": " اجلاشي", "probability": 0.4546875}, {"start": 230.85, "end": 231.45, "word": " صح؟", "probability": 0.69677734375}, {"start": 231.45, "end": 232.53, "word": " فبالتالي", "probability": 0.8958740234375}, {"start": 232.53, "end": 232.69, "word": " احنا", "probability": 0.7434895833333334}, {"start": 232.69, "end": 233.07, "word": " بنعمل", "probability": 0.9676106770833334}, {"start": 233.07, "end": 233.67, "word": " انهيبشين", "probability": 0.62939453125}, {"start": 233.67, "end": 234.75, "word": " لكومبليكسيا", "probability": 0.815887451171875}, {"start": 234.75, "end": 235.57, "word": " بطريقة", "probability": 0.9716796875}, {"start": 235.57, "end": 236.01, "word": " غير", "probability": 0.99169921875}, {"start": 236.01, "end": 236.71, "word": " مباشرة", "probability": 0.9957275390625}, {"start": 236.71, "end": 237.25, "word": " من", "probability": 0.9892578125}, {"start": 237.25, "end": 237.97, "word": " خلال", "probability": 0.9905598958333334}, {"start": 237.97, "end": 239.91, "word": " الانهيبشين", "probability": 0.7743791852678571}, {"start": 239.91, "end": 240.11, "word": " of", "probability": 0.5341796875}, {"start": 240.11, "end": 241.35, "word": " the", "probability": 0.3876953125}, {"start": 241.35, "end": 242.21, "word": " cofactor", "probability": 0.5335286458333334}], "temperature": 1.0}, {"id": 10, "seek": 24954, "start": 242.24, "end": 249.54, "text": "coagulation and cofactor هم خمسة و تمانية نشاطين و هذه عبارة عن عائلة بتضم", "tokens": [1291, 559, 2776, 293, 598, 69, 15104, 8032, 2304, 16490, 2304, 3794, 3660, 4032, 46811, 7649, 10632, 8717, 8592, 41193, 9957, 4032, 29538, 6225, 3555, 9640, 3660, 18871, 6225, 16373, 37977, 39894, 11242, 2304], "avg_logprob": -0.23928571087973458, "compression_ratio": 1.0865384615384615, "no_speech_prob": 2.9802322387695312e-06, "words": [{"start": 242.24, "end": 243.14, "word": "coagulation", "probability": 0.8623046875}, {"start": 243.14, "end": 243.28, "word": " and", "probability": 0.2257080078125}, {"start": 243.28, "end": 243.8, "word": " cofactor", "probability": 0.7244466145833334}, {"start": 243.8, "end": 244.06, "word": " هم", "probability": 0.6468505859375}, {"start": 244.06, "end": 244.4, "word": " خمسة", "probability": 0.855712890625}, {"start": 244.4, "end": 244.44, "word": " و", "probability": 0.6904296875}, {"start": 244.44, "end": 244.78, "word": " تمانية", "probability": 0.9581705729166666}, {"start": 244.78, "end": 245.44, "word": " نشاطين", "probability": 0.9342041015625}, {"start": 245.44, "end": 246.3, "word": " و", "probability": 0.40380859375}, {"start": 246.3, "end": 247.82, "word": " هذه", "probability": 0.468505859375}, {"start": 247.82, "end": 248.22, "word": " عبارة", "probability": 0.9859619140625}, {"start": 248.22, "end": 248.4, "word": " عن", "probability": 0.98291015625}, {"start": 248.4, "end": 248.9, "word": " عائلة", "probability": 0.984375}, {"start": 248.9, "end": 249.54, "word": " بتضم", "probability": 0.8836263020833334}], "temperature": 1.0}, {"id": 11, "seek": 28039, "start": 250.88, "end": 280.4, "text": "مجموعة كبيرة بيسموها protein-c system protein-c system وهي بتضم protein-c و protein-s ثم ال trombomodulin و ال cofactors اللي برتبت معاهم ماشي؟ و ال cofactor اللي برتبت معاهم يبجي هيفرها مجموعة كبيرة تعمل انهيبش لل activated form of cofactor 5 and cofactor 8", "tokens": [2304, 7435, 2304, 2407, 27884, 9122, 3555, 48923, 4724, 1829, 38251, 2407, 11296, 7944, 12, 66, 1185, 7944, 12, 66, 1185, 37037, 1829, 39894, 11242, 2304, 7944, 12, 66, 4032, 7944, 12, 82, 38637, 2304, 2423, 504, 3548, 298, 378, 17701, 4032, 2423, 598, 44919, 830, 13672, 1829, 4724, 43500, 3555, 2655, 20449, 995, 16095, 3714, 33599, 1829, 22807, 4032, 2423, 598, 69, 15104, 13672, 1829, 4724, 43500, 3555, 2655, 20449, 995, 16095, 7251, 3555, 7435, 1829, 39896, 5172, 2288, 11296, 3714, 7435, 2304, 2407, 27884, 9122, 3555, 48923, 6055, 25957, 1211, 16472, 3224, 1829, 3555, 8592, 24976, 18157, 1254, 295, 598, 69, 15104, 1025, 293, 598, 69, 15104, 1649], "avg_logprob": -0.2854729665292276, "compression_ratio": 1.836734693877551, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 250.88, "end": 251.44, "word": "مجموعة", "probability": 0.86396484375}, {"start": 251.44, "end": 251.86, "word": " كبيرة", "probability": 0.96923828125}, {"start": 251.86, "end": 252.5, "word": " بيسموها", "probability": 0.85517578125}, {"start": 252.5, "end": 252.94, "word": " protein", "probability": 0.505859375}, {"start": 252.94, "end": 253.6, "word": "-c", "probability": 0.373291015625}, {"start": 253.6, "end": 254.24, "word": " system", "probability": 0.595703125}, {"start": 254.24, "end": 254.66, "word": " protein", "probability": 0.5087890625}, {"start": 254.66, "end": 255.22, "word": "-c", "probability": 0.950439453125}, {"start": 255.22, "end": 256.08, "word": " system", "probability": 0.89697265625}, {"start": 256.08, "end": 256.96, "word": " وهي", "probability": 0.6153564453125}, {"start": 256.96, "end": 257.46, "word": " بتضم", "probability": 0.73486328125}, {"start": 257.46, "end": 257.88, "word": " protein", "probability": 0.93798828125}, {"start": 257.88, "end": 258.32, "word": "-c", "probability": 0.927490234375}, {"start": 258.32, "end": 259.34, "word": " و", "probability": 0.91455078125}, {"start": 259.34, "end": 259.78, "word": " protein", "probability": 0.8876953125}, {"start": 259.78, "end": 260.34, "word": "-s", "probability": 0.939697265625}, {"start": 260.34, "end": 261.22, "word": " ثم", "probability": 0.845947265625}, {"start": 261.22, "end": 261.48, "word": " ال", "probability": 0.2215576171875}, {"start": 261.48, "end": 262.42, "word": " trombomodulin", "probability": 0.73408203125}, {"start": 262.42, "end": 263.52, "word": " و", "probability": 0.84912109375}, {"start": 263.52, "end": 264.38, "word": " ال", "probability": 0.92431640625}, {"start": 264.38, "end": 266.0, "word": " cofactors", "probability": 0.7498372395833334}, {"start": 266.0, "end": 266.2, "word": " اللي", "probability": 0.92919921875}, {"start": 266.2, "end": 266.64, "word": " برتبت", "probability": 0.7930908203125}, {"start": 266.64, "end": 267.14, "word": " معاهم", "probability": 0.9798177083333334}, {"start": 267.14, "end": 267.86, "word": " ماشي؟", "probability": 0.72674560546875}, {"start": 267.86, "end": 268.18, "word": " و", "probability": 0.8173828125}, {"start": 268.18, "end": 268.3, "word": " ال", "probability": 0.9443359375}, {"start": 268.3, "end": 269.02, "word": " cofactor", "probability": 0.778564453125}, {"start": 269.02, "end": 269.8, "word": " اللي", "probability": 0.986083984375}, {"start": 269.8, "end": 270.28, "word": " برتبت", "probability": 0.9169921875}, {"start": 270.28, "end": 270.72, "word": " معاهم", "probability": 0.9615885416666666}, {"start": 270.72, "end": 271.42, "word": " يبجي", "probability": 0.5264892578125}, {"start": 271.42, "end": 271.74, "word": " هيفرها", "probability": 0.404571533203125}, {"start": 271.74, "end": 272.2, "word": " مجموعة", "probability": 0.949609375}, {"start": 272.2, "end": 272.68, "word": " كبيرة", "probability": 0.9747721354166666}, {"start": 272.68, "end": 273.54, "word": " تعمل", "probability": 0.6671549479166666}, {"start": 273.54, "end": 273.92, "word": " انهيبش", "probability": 0.91435546875}, {"start": 273.92, "end": 274.12, "word": " لل", "probability": 0.345703125}, {"start": 274.12, "end": 274.6, "word": " activated", "probability": 0.599609375}, {"start": 274.6, "end": 275.5, "word": " form", "probability": 0.9384765625}, {"start": 275.5, "end": 275.86, "word": " of", "probability": 0.9521484375}, {"start": 275.86, "end": 276.8, "word": " cofactor", "probability": 0.8857421875}, {"start": 276.8, "end": 277.42, "word": " 5", "probability": 0.57470703125}, {"start": 277.42, "end": 278.58, "word": " and", "probability": 0.50341796875}, {"start": 278.58, "end": 279.34, "word": " cofactor", "probability": 0.9314778645833334}, {"start": 279.34, "end": 280.4, "word": " 8", "probability": 0.96630859375}], "temperature": 1.0}, {"id": 12, "seek": 30641, "start": 281.17, "end": 306.41, "text": "النشاطين مجموعة التالتة وهي tissue factor pathway inhibitor وهي مجموعة مفاصلة لحالنا وشوفنا دور ال tissue factor مرة فاترة tissue factor pathway inhibitor وحنتعرف على ال mechanism هو بيعمل inhibition لمين؟ ال tissue factor سبعة ايه؟ complex و factor عشرة و factor عشرة النشاط", "tokens": [6027, 1863, 8592, 41193, 9957, 3714, 7435, 2304, 2407, 27884, 16712, 6027, 2655, 3660, 37037, 1829, 12404, 5952, 18590, 20406, 3029, 37037, 1829, 3714, 7435, 2304, 2407, 27884, 3714, 5172, 33546, 37977, 5296, 5016, 6027, 8315, 4032, 8592, 38688, 8315, 11778, 13063, 2423, 12404, 5952, 3714, 25720, 6156, 9307, 25720, 12404, 5952, 18590, 20406, 3029, 4032, 5016, 29399, 3615, 28480, 15844, 2423, 7513, 31439, 4724, 1829, 25957, 1211, 20406, 849, 5296, 2304, 9957, 22807, 2423, 12404, 5952, 8608, 3555, 27884, 1975, 1829, 3224, 22807, 3997, 4032, 5952, 6225, 8592, 25720, 4032, 5952, 6225, 8592, 25720, 28239, 8592, 41193], "avg_logprob": -0.31960227152313847, "compression_ratio": 1.9068627450980393, "no_speech_prob": 1.4901161193847656e-06, "words": [{"start": 281.17, "end": 282.07, "word": "النشاطين", "probability": 0.82216796875}, {"start": 282.07, "end": 283.95, "word": " مجموعة", "probability": 0.78505859375}, {"start": 283.95, "end": 284.47, "word": " التالتة", "probability": 0.8826904296875}, {"start": 284.47, "end": 284.79, "word": " وهي", "probability": 0.6231689453125}, {"start": 284.79, "end": 285.01, "word": " tissue", "probability": 0.8994140625}, {"start": 285.01, "end": 285.47, "word": " factor", "probability": 0.62939453125}, {"start": 285.47, "end": 285.87, "word": " pathway", "probability": 0.94140625}, {"start": 285.87, "end": 286.47, "word": " inhibitor", "probability": 0.895751953125}, {"start": 286.47, "end": 286.77, "word": " وهي", "probability": 0.745361328125}, {"start": 286.77, "end": 287.07, "word": " مجموعة", "probability": 0.9615234375}, {"start": 287.07, "end": 287.53, "word": " مفاصلة", "probability": 0.9058837890625}, {"start": 287.53, "end": 288.19, "word": " لحالنا", "probability": 0.8165283203125}, {"start": 288.19, "end": 289.01, "word": " وشوفنا", "probability": 0.8701171875}, {"start": 289.01, "end": 289.23, "word": " دور", "probability": 0.982666015625}, {"start": 289.23, "end": 289.37, "word": " ال", "probability": 0.830078125}, {"start": 289.37, "end": 289.53, "word": " tissue", "probability": 0.87890625}, {"start": 289.53, "end": 290.03, "word": " factor", "probability": 0.853515625}, {"start": 290.03, "end": 290.45, "word": " مرة", "probability": 0.3214111328125}, {"start": 290.45, "end": 291.27, "word": " فاترة", "probability": 0.5984700520833334}, {"start": 291.27, "end": 291.57, "word": " tissue", "probability": 0.4404296875}, {"start": 291.57, "end": 292.07, "word": " factor", "probability": 0.86572265625}, {"start": 292.07, "end": 292.45, "word": " pathway", "probability": 0.94287109375}, {"start": 292.45, "end": 293.11, "word": " inhibitor", "probability": 0.868408203125}, {"start": 293.11, "end": 293.95, "word": " وحنتعرف", "probability": 0.7720703125}, {"start": 293.95, "end": 294.13, "word": " على", "probability": 0.8291015625}, {"start": 294.13, "end": 294.27, "word": " ال", "probability": 0.6513671875}, {"start": 294.27, "end": 294.77, "word": " mechanism", "probability": 0.6259765625}, {"start": 294.77, "end": 295.45, "word": " هو", "probability": 0.90771484375}, {"start": 295.45, "end": 295.99, "word": " بيعمل", "probability": 0.84661865234375}, {"start": 295.99, "end": 296.77, "word": " inhibition", "probability": 0.77880859375}, {"start": 296.77, "end": 298.67, "word": " لمين؟", "probability": 0.585845947265625}, {"start": 298.67, "end": 298.67, "word": " ال", "probability": 0.12139892578125}, {"start": 298.67, "end": 299.57, "word": " tissue", "probability": 0.92529296875}, {"start": 299.57, "end": 300.09, "word": " factor", "probability": 0.8740234375}, {"start": 300.09, "end": 301.15, "word": " سبعة", "probability": 0.7069905598958334}, {"start": 301.15, "end": 301.43, "word": " ايه؟", "probability": 0.77801513671875}, {"start": 301.43, "end": 302.07, "word": " complex", "probability": 0.49658203125}, {"start": 302.07, "end": 302.95, "word": " و", "probability": 0.72119140625}, {"start": 302.95, "end": 303.95, "word": " factor", "probability": 0.3369140625}, {"start": 303.95, "end": 304.47, "word": " عشرة", "probability": 0.9334309895833334}, {"start": 304.47, "end": 304.69, "word": " و", "probability": 0.79296875}, {"start": 304.69, "end": 304.97, "word": " factor", "probability": 0.689453125}, {"start": 304.97, "end": 305.65, "word": " عشرة", "probability": 0.7259928385416666}, {"start": 305.65, "end": 306.41, "word": " النشاط", "probability": 0.657470703125}], "temperature": 1.0}, {"id": 13, "seek": 33552, "start": 306.8, "end": 335.52, "text": "بتبقى معايا انت؟ تسعة بقى؟ لأ تسعة مش بيخش .. بيصير وش نمشي من .. بيصير وش نمشي من .. بيصير وش نمشي من .. بيصير وش نمشي من .. بيصير وش نمشي من .. بيصير وش نمشي من .. بيصير وش نمشي من .. بيصير وش نمشي من .. بيصير وش نمشي من .. بيصير وش نمشي من .. بيصير وش نمشي من .. بيصير وش نمشي من .. بيصير وش نمشي من .. بيصير وش نمشي من .. بيصير وش نمشي من .. بيصير وش نمشي من .. بيص", "tokens": [3555, 2655, 3555, 4587, 7578, 20449, 995, 25528, 16472, 2655, 22807, 6055, 3794, 27884, 4724, 4587, 7578, 22807, 5296, 10721, 6055, 3794, 27884, 37893, 4724, 1829, 9778, 8592, 4386, 4724, 1829, 9381, 13546, 4032, 8592, 8717, 2304, 8592, 1829, 9154, 4386, 4724, 1829, 9381, 13546, 4032, 8592, 8717, 2304, 8592, 1829, 9154, 4386, 4724, 1829, 9381, 13546, 4032, 8592, 8717, 2304, 8592, 1829, 9154, 4386, 4724, 1829, 9381, 13546, 4032, 8592, 8717, 2304, 8592, 1829, 9154, 4386, 4724, 1829, 9381, 13546, 4032, 8592, 8717, 2304, 8592, 1829, 9154, 4386, 4724, 1829, 9381, 13546, 4032, 8592, 8717, 2304, 8592, 1829, 9154, 4386, 4724, 1829, 9381, 13546, 4032, 8592, 8717, 2304, 8592, 1829, 9154, 4386, 4724, 1829, 9381, 13546, 4032, 8592, 8717, 2304, 8592, 1829, 9154, 4386, 4724, 1829, 9381, 13546, 4032, 8592, 8717, 2304, 8592, 1829, 9154, 4386, 4724, 1829, 9381, 13546, 4032, 8592, 8717, 2304, 8592, 1829, 9154, 4386, 4724, 1829, 9381, 13546, 4032, 8592, 8717, 2304, 8592, 1829, 9154, 4386, 4724, 1829, 9381, 13546, 4032, 8592, 8717, 2304, 8592, 1829, 9154, 4386, 4724, 1829, 9381, 13546, 4032, 8592, 8717, 2304, 8592, 1829, 9154, 4386, 4724, 1829, 9381, 13546, 4032, 8592, 8717, 2304, 8592, 1829, 9154, 4386, 4724, 1829, 9381, 13546, 4032, 8592, 8717, 2304, 8592, 1829, 9154, 4386, 4724, 1829, 9381, 13546, 4032, 8592, 8717, 2304, 8592, 1829, 9154, 4386, 4724, 1829, 9381], "avg_logprob": -0.1537499957614475, "compression_ratio": 6.673913043478261, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 306.8, "end": 307.24, "word": "بتبقى", "probability": 0.7135498046875}, {"start": 307.24, "end": 307.56, "word": " معايا", "probability": 0.7908528645833334}, {"start": 307.56, "end": 308.18, "word": " انت؟", "probability": 0.5225016276041666}, {"start": 308.18, "end": 309.02, "word": " تسعة", "probability": 0.548095703125}, {"start": 309.02, "end": 309.64, "word": " بقى؟", "probability": 0.38153076171875}, {"start": 309.64, "end": 309.98, "word": " لأ", "probability": 0.7071533203125}, {"start": 309.98, "end": 310.34, "word": " تسعة", "probability": 0.9326171875}, {"start": 310.34, "end": 310.54, "word": " مش", "probability": 0.266845703125}, {"start": 310.54, "end": 311.1, "word": " بيخش", "probability": 0.689453125}, {"start": 311.1, "end": 311.42, "word": " ..", "probability": 0.1298828125}, {"start": 311.42, "end": 312.04, "word": " بيصير", "probability": 0.56158447265625}, {"start": 312.04, "end": 312.22, "word": " وش", "probability": 0.560546875}, {"start": 312.22, "end": 312.56, "word": " نمشي", "probability": 0.6922607421875}, {"start": 312.56, "end": 312.78, "word": " من", "probability": 0.5546875}, {"start": 312.78, "end": 313.1, "word": " ..", "probability": 0.1483154296875}, {"start": 313.1, "end": 314.24, "word": " بيصير", "probability": 0.759765625}, {"start": 314.24, "end": 314.24, "word": " وش", "probability": 0.814453125}, {"start": 314.24, "end": 314.24, "word": " نمشي", "probability": 0.992431640625}, {"start": 314.24, "end": 314.24, "word": " من", "probability": 0.94287109375}, {"start": 314.24, "end": 314.24, "word": " ..", "probability": 0.288818359375}, {"start": 314.24, "end": 314.24, "word": " بيصير", "probability": 0.93310546875}, {"start": 314.24, "end": 314.24, "word": " وش", "probability": 0.991455078125}, {"start": 314.24, "end": 314.24, "word": " نمشي", "probability": 0.9976806640625}, {"start": 314.24, "end": 314.24, "word": " من", "probability": 0.97998046875}, {"start": 314.24, "end": 314.24, "word": " ..", "probability": 0.56396484375}, {"start": 314.24, "end": 314.24, "word": " بيصير", "probability": 0.9656982421875}, {"start": 314.24, "end": 314.24, "word": " وش", "probability": 0.989013671875}, {"start": 314.24, "end": 314.24, "word": " نمشي", "probability": 0.9974365234375}, {"start": 314.24, "end": 314.24, "word": " من", "probability": 0.9853515625}, {"start": 314.24, "end": 314.24, "word": " ..", "probability": 0.69482421875}, {"start": 314.24, "end": 314.24, "word": " بيصير", "probability": 0.9697265625}, {"start": 314.24, "end": 314.24, "word": " وش", "probability": 0.986083984375}, {"start": 314.24, "end": 314.3, "word": " نمشي", "probability": 0.997314453125}, {"start": 314.3, "end": 314.3, "word": " من", "probability": 0.98388671875}, {"start": 314.3, "end": 314.3, "word": " ..", "probability": 0.724609375}, {"start": 314.3, "end": 314.3, "word": " بيصير", "probability": 0.968994140625}, {"start": 314.3, "end": 314.3, "word": " وش", "probability": 0.9873046875}, {"start": 314.3, "end": 314.4, "word": " نمشي", "probability": 0.997314453125}, {"start": 314.4, "end": 314.4, "word": " من", "probability": 0.98046875}, {"start": 314.4, "end": 314.4, "word": " ..", "probability": 0.751953125}, {"start": 314.4, "end": 314.4, "word": " بيصير", "probability": 0.96923828125}, {"start": 314.4, "end": 314.4, "word": " وش", "probability": 0.989501953125}, {"start": 314.4, "end": 314.46, "word": " نمشي", "probability": 0.9974365234375}, {"start": 314.46, "end": 314.5, "word": " من", "probability": 0.97998046875}, {"start": 314.5, "end": 314.66, "word": " ..", "probability": 0.83935546875}, {"start": 314.66, "end": 314.68, "word": " بيصير", "probability": 0.9705810546875}, {"start": 314.68, "end": 314.68, "word": " وش", "probability": 0.991455078125}, {"start": 314.68, "end": 314.68, "word": " نمشي", "probability": 0.99755859375}, {"start": 314.68, "end": 314.68, "word": " من", "probability": 0.98046875}, {"start": 314.68, "end": 314.68, "word": " ..", "probability": 0.9033203125}, {"start": 314.68, "end": 314.68, "word": " بيصير", "probability": 0.9710693359375}, {"start": 314.68, "end": 314.68, "word": " وش", "probability": 0.991943359375}, {"start": 314.68, "end": 314.68, "word": " نمشي", "probability": 0.99755859375}, {"start": 314.68, "end": 314.68, "word": " من", "probability": 0.98193359375}, {"start": 314.68, "end": 314.68, "word": " ..", "probability": 0.943359375}, {"start": 314.68, "end": 314.68, "word": " بيصير", "probability": 0.9722900390625}, {"start": 314.68, "end": 314.68, "word": " وش", "probability": 0.992431640625}, {"start": 314.68, "end": 314.68, "word": " نمشي", "probability": 0.997802734375}, {"start": 314.68, "end": 314.68, "word": " من", "probability": 0.9833984375}, {"start": 314.68, "end": 314.68, "word": " ..", "probability": 0.96728515625}, {"start": 314.68, "end": 314.68, "word": " بيصير", "probability": 0.973876953125}, {"start": 314.68, "end": 314.68, "word": " وش", "probability": 0.99267578125}, {"start": 314.68, "end": 314.72, "word": " نمشي", "probability": 0.997802734375}, {"start": 314.72, "end": 314.74, "word": " من", "probability": 0.9853515625}, {"start": 314.74, "end": 314.84, "word": " ..", "probability": 0.97900390625}, {"start": 314.84, "end": 314.88, "word": " بيصير", "probability": 0.9755859375}, {"start": 314.88, "end": 314.88, "word": " وش", "probability": 0.992431640625}, {"start": 314.88, "end": 314.88, "word": " نمشي", "probability": 0.997802734375}, {"start": 314.88, "end": 314.9, "word": " من", "probability": 0.98486328125}, {"start": 314.9, "end": 315.3, "word": " ..", "probability": 0.98291015625}, {"start": 315.3, "end": 316.52, "word": " بيصير", "probability": 0.9749755859375}, {"start": 316.52, "end": 316.68, "word": " وش", "probability": 0.991943359375}, {"start": 316.68, "end": 317.14, "word": " نمشي", "probability": 0.997802734375}, {"start": 317.14, "end": 317.2, "word": " من", "probability": 0.98486328125}, {"start": 317.2, "end": 317.32, "word": " ..", "probability": 0.986328125}, {"start": 317.32, "end": 321.54, "word": " بيصير", "probability": 0.9749755859375}, {"start": 321.54, "end": 321.9, "word": " وش", "probability": 0.990966796875}, {"start": 321.9, "end": 322.22, "word": " نمشي", "probability": 0.997802734375}, {"start": 322.22, "end": 322.32, "word": " من", "probability": 0.98486328125}, {"start": 322.32, "end": 324.22, "word": " ..", "probability": 0.9873046875}, {"start": 324.22, "end": 325.52, "word": " بيصير", "probability": 0.973388671875}, {"start": 325.52, "end": 325.58, "word": " وش", "probability": 0.99072265625}, {"start": 325.58, "end": 327.08, "word": " نمشي", "probability": 0.9976806640625}, {"start": 327.08, "end": 327.08, "word": " من", "probability": 0.98583984375}, {"start": 327.08, "end": 327.16, "word": " ..", "probability": 0.98779296875}, {"start": 327.16, "end": 328.0, "word": " بيصير", "probability": 0.9722900390625}, {"start": 328.0, "end": 333.8, "word": " وش", "probability": 0.989990234375}, {"start": 333.8, "end": 334.9, "word": " نمشي", "probability": 0.99755859375}, {"start": 334.9, "end": 334.9, "word": " من", "probability": 0.98779296875}, {"start": 334.9, "end": 335.48, "word": " ..", "probability": 0.98828125}, {"start": 335.48, "end": 335.52, "word": " بيص", "probability": 0.9635416666666666}], "temperature": 1.0}, {"id": 14, "seek": 35023, "start": 336.73, "end": 350.23, "text": "بيرتبط الـ 9 و 10 أو 11 و 12 بيرتبط لأنه اسمه Antithrombin بشكل أساسي بيمسك في مين؟ بالـ Thrombin، وThrombin من أين جهد؟", "tokens": [3555, 13546, 2655, 3555, 9566, 2423, 39184, 1722, 4032, 1266, 34051, 2975, 4032, 2272, 4724, 13546, 2655, 3555, 9566, 5296, 33456, 3224, 24525, 2304, 3224, 5130, 355, 81, 3548, 259, 4724, 8592, 28820, 5551, 3794, 32277, 1829, 4724, 32640, 3794, 4117, 8978, 3714, 9957, 22807, 20666, 39184, 41645, 3548, 259, 12399, 4032, 47256, 3548, 259, 9154, 5551, 9957, 10874, 3224, 3215, 22807], "avg_logprob": -0.28571427719933645, "compression_ratio": 1.3093525179856116, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 336.72999999999996, "end": 337.53, "word": "بيرتبط", "probability": 0.834423828125}, {"start": 337.53, "end": 337.75, "word": " الـ", "probability": 0.767578125}, {"start": 337.75, "end": 337.89, "word": " 9", "probability": 0.45068359375}, {"start": 337.89, "end": 338.07, "word": " و", "probability": 0.351318359375}, {"start": 338.07, "end": 338.31, "word": " 10", "probability": 0.6123046875}, {"start": 338.31, "end": 338.49, "word": " أو", "probability": 0.640625}, {"start": 338.49, "end": 338.79, "word": " 11", "probability": 0.88330078125}, {"start": 338.79, "end": 338.97, "word": " و", "probability": 0.98046875}, {"start": 338.97, "end": 339.31, "word": " 12", "probability": 0.91748046875}, {"start": 339.31, "end": 342.03, "word": " بيرتبط", "probability": 0.8857421875}, {"start": 342.03, "end": 342.95, "word": " لأنه", "probability": 0.6134440104166666}, {"start": 342.95, "end": 343.41, "word": " اسمه", "probability": 0.9344075520833334}, {"start": 343.41, "end": 344.55, "word": " Antithrombin", "probability": 0.78818359375}, {"start": 344.55, "end": 345.19, "word": " بشكل", "probability": 0.9619140625}, {"start": 345.19, "end": 346.11, "word": " أساسي", "probability": 0.9696044921875}, {"start": 346.11, "end": 346.83, "word": " بيمسك", "probability": 0.8055419921875}, {"start": 346.83, "end": 346.99, "word": " في", "probability": 0.84375}, {"start": 346.99, "end": 348.09, "word": " مين؟", "probability": 0.664306640625}, {"start": 348.09, "end": 348.29, "word": " بالـ", "probability": 0.75439453125}, {"start": 348.29, "end": 348.91, "word": " Thrombin،", "probability": 0.6705322265625}, {"start": 348.91, "end": 349.43, "word": " وThrombin", "probability": 0.76080322265625}, {"start": 349.43, "end": 349.61, "word": " من", "probability": 0.8076171875}, {"start": 349.61, "end": 349.73, "word": " أين", "probability": 0.678466796875}, {"start": 349.73, "end": 350.23, "word": " جهد؟", "probability": 0.8221435546875}], "temperature": 1.0}, {"id": 15, "seek": 36683, "start": 353.65, "end": 366.83, "text": "وهو factor two لما بتنشط، بتحول إلى إيه؟ إلى thrombin، فده بنمشيك ال thrombin، سموه anti-thrombin، لإنه بيمشك في كل هدول الـCerebrum، لكن ال major، ال potent", "tokens": [2407, 3224, 2407, 5952, 732, 5296, 15042, 39894, 1863, 8592, 9566, 12399, 39894, 5016, 12610, 30731, 11933, 1829, 3224, 22807, 30731, 739, 3548, 259, 12399, 6156, 3215, 3224, 44945, 2304, 8592, 1829, 4117, 2423, 739, 3548, 259, 12399, 8608, 2304, 2407, 3224, 6061, 12, 392, 81, 3548, 259, 12399, 5296, 28814, 1863, 3224, 4724, 32640, 8592, 4117, 8978, 28242, 8032, 3215, 12610, 2423, 39184, 34, 323, 1443, 449, 12399, 44381, 2423, 2563, 12399, 2423, 27073], "avg_logprob": -0.43297697250780304, "compression_ratio": 1.4085365853658536, "no_speech_prob": 1.5616416931152344e-05, "words": [{"start": 353.65, "end": 354.39, "word": "وهو", "probability": 0.5903727213541666}, {"start": 354.39, "end": 354.69, "word": " factor", "probability": 0.2861328125}, {"start": 354.69, "end": 355.11, "word": " two", "probability": 0.5302734375}, {"start": 355.11, "end": 355.77, "word": " لما", "probability": 0.76513671875}, {"start": 355.77, "end": 356.37, "word": " بتنشط،", "probability": 0.6368896484375}, {"start": 356.37, "end": 356.79, "word": " بتحول", "probability": 0.8370768229166666}, {"start": 356.79, "end": 357.03, "word": " إلى", "probability": 0.7080078125}, {"start": 357.03, "end": 357.41, "word": " إيه؟", "probability": 0.674346923828125}, {"start": 357.41, "end": 357.97, "word": " إلى", "probability": 0.75}, {"start": 357.97, "end": 358.81, "word": " thrombin،", "probability": 0.76165771484375}, {"start": 358.81, "end": 359.03, "word": " فده", "probability": 0.6339518229166666}, {"start": 359.03, "end": 359.43, "word": " بنمشيك", "probability": 0.74541015625}, {"start": 359.43, "end": 359.53, "word": " ال", "probability": 0.845703125}, {"start": 359.53, "end": 360.05, "word": " thrombin،", "probability": 0.83819580078125}, {"start": 360.05, "end": 360.95, "word": " سموه", "probability": 0.876220703125}, {"start": 360.95, "end": 361.35, "word": " anti", "probability": 0.228759765625}, {"start": 361.35, "end": 362.01, "word": "-thrombin،", "probability": 0.7844645182291666}, {"start": 362.01, "end": 362.29, "word": " لإنه", "probability": 0.84130859375}, {"start": 362.29, "end": 362.65, "word": " بيمشك", "probability": 0.7734375}, {"start": 362.65, "end": 362.79, "word": " في", "probability": 0.900390625}, {"start": 362.79, "end": 363.01, "word": " كل", "probability": 0.984375}, {"start": 363.01, "end": 363.41, "word": " هدول", "probability": 0.9143880208333334}, {"start": 363.41, "end": 364.37, "word": " الـCerebrum،", "probability": 0.36411830357142855}, {"start": 364.37, "end": 365.27, "word": " لكن", "probability": 0.72998046875}, {"start": 365.27, "end": 365.57, "word": " ال", "probability": 0.91064453125}, {"start": 365.57, "end": 366.23, "word": " major،", "probability": 0.7822265625}, {"start": 366.23, "end": 366.41, "word": " ال", "probability": 0.94287109375}, {"start": 366.41, "end": 366.83, "word": " potent", "probability": 0.919921875}], "temperature": 1.0}, {"id": 16, "seek": 38496, "start": 367.72, "end": 384.96, "text": "أنتي اللي براكو أجلد هو عبارة عن مين؟ لثرمبل، أبوهم مين؟ لثرمبل، المرة الفاتة تشوفناله أكم وظيفة؟ ستة، آه ستة، ماشي؟ وفي وظيفة سابعة كمان حنقدر نكلم فيها، ماشي يا شباب؟", "tokens": [10721, 29399, 1829, 13672, 1829, 4724, 23557, 4117, 2407, 5551, 7435, 1211, 3215, 31439, 6225, 3555, 9640, 3660, 18871, 3714, 9957, 22807, 5296, 12984, 2288, 2304, 36150, 12399, 5551, 3555, 2407, 16095, 3714, 9957, 22807, 5296, 12984, 2288, 2304, 36150, 12399, 9673, 25720, 27188, 9307, 3660, 6055, 8592, 38688, 8315, 43761, 5551, 24793, 4032, 19913, 33911, 3660, 22807, 8608, 2655, 3660, 12399, 19753, 3224, 8608, 2655, 3660, 12399, 3714, 33599, 1829, 22807, 4032, 41185, 4032, 19913, 33911, 3660, 8608, 16758, 27884, 9122, 2304, 7649, 11331, 1863, 28543, 2288, 8717, 4117, 19528, 8978, 11296, 12399, 3714, 33599, 1829, 35186, 13412, 3555, 16758, 22807], "avg_logprob": -0.28140168456197945, "compression_ratio": 1.8, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 367.72, "end": 368.48, "word": "أنتي", "probability": 0.536376953125}, {"start": 368.48, "end": 368.88, "word": " اللي", "probability": 0.6160888671875}, {"start": 368.88, "end": 369.32, "word": " براكو", "probability": 0.544525146484375}, {"start": 369.32, "end": 369.76, "word": " أجلد", "probability": 0.6273193359375}, {"start": 369.76, "end": 369.88, "word": " هو", "probability": 0.96044921875}, {"start": 369.88, "end": 370.12, "word": " عبارة", "probability": 0.9737548828125}, {"start": 370.12, "end": 370.28, "word": " عن", "probability": 0.9892578125}, {"start": 370.28, "end": 371.02, "word": " مين؟", "probability": 0.9345703125}, {"start": 371.02, "end": 371.52, "word": " لثرمبل،", "probability": 0.5362345377604166}, {"start": 371.52, "end": 372.18, "word": " أبوهم", "probability": 0.9598388671875}, {"start": 372.18, "end": 373.34, "word": " مين؟", "probability": 0.9806315104166666}, {"start": 373.34, "end": 374.22, "word": " لثرمبل،", "probability": 0.8966471354166666}, {"start": 374.22, "end": 374.46, "word": " المرة", "probability": 0.813232421875}, {"start": 374.46, "end": 374.7, "word": " الفاتة", "probability": 0.6239420572916666}, {"start": 374.7, "end": 375.18, "word": " تشوفناله", "probability": 0.718798828125}, {"start": 375.18, "end": 375.36, "word": " أكم", "probability": 0.583740234375}, {"start": 375.36, "end": 375.98, "word": " وظيفة؟", "probability": 0.955859375}, {"start": 375.98, "end": 378.54, "word": " ستة،", "probability": 0.8292236328125}, {"start": 378.54, "end": 378.92, "word": " آه", "probability": 0.586669921875}, {"start": 378.92, "end": 379.94, "word": " ستة،", "probability": 0.861572265625}, {"start": 379.94, "end": 380.96, "word": " ماشي؟", "probability": 0.8746337890625}, {"start": 380.96, "end": 381.4, "word": " وفي", "probability": 0.77783203125}, {"start": 381.4, "end": 381.88, "word": " وظيفة", "probability": 0.983154296875}, {"start": 381.88, "end": 382.26, "word": " سابعة", "probability": 0.8025716145833334}, {"start": 382.26, "end": 382.62, "word": " كمان", "probability": 0.9860026041666666}, {"start": 382.62, "end": 382.98, "word": " حنقدر", "probability": 0.70428466796875}, {"start": 382.98, "end": 383.24, "word": " نكلم", "probability": 0.6702067057291666}, {"start": 383.24, "end": 384.08, "word": " فيها،", "probability": 0.9814453125}, {"start": 384.08, "end": 384.44, "word": " ماشي", "probability": 0.93505859375}, {"start": 384.44, "end": 384.56, "word": " يا", "probability": 0.638671875}, {"start": 384.56, "end": 384.96, "word": " شباب؟", "probability": 0.9783935546875}], "temperature": 1.0}, {"id": 17, "seek": 39556, "start": 385.66, "end": 395.56, "text": "في الجهة الانتيترومبال موجود مرتبط بالإضافة لتنين برتبط التسعة و عشرة و أحداشر و أتناشر لكن ببطء", "tokens": [41185, 25724, 3224, 3660, 2423, 7649, 31371, 2655, 2288, 20498, 3555, 6027, 3714, 29245, 23328, 3714, 43500, 3555, 9566, 20666, 28814, 11242, 31845, 3660, 5296, 2655, 1863, 9957, 4724, 43500, 3555, 9566, 16712, 3794, 27884, 4032, 6225, 8592, 25720, 4032, 5551, 24401, 33599, 2288, 4032, 5551, 2655, 1863, 33599, 2288, 44381, 4724, 3555, 9566, 38207], "avg_logprob": -0.5396205186843872, "compression_ratio": 1.5855855855855856, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 385.66, "end": 385.88, "word": "في", "probability": 0.080322265625}, {"start": 385.88, "end": 386.24, "word": " الجهة", "probability": 0.8800455729166666}, {"start": 386.24, "end": 387.72, "word": " الانتيترومبال", "probability": 0.508697509765625}, {"start": 387.72, "end": 389.7, "word": " موجود", "probability": 0.66455078125}, {"start": 389.7, "end": 390.14, "word": " مرتبط", "probability": 0.7764892578125}, {"start": 390.14, "end": 390.62, "word": " بالإضافة", "probability": 0.64404296875}, {"start": 390.62, "end": 391.08, "word": " لتنين", "probability": 0.68780517578125}, {"start": 391.08, "end": 391.86, "word": " برتبط", "probability": 0.66900634765625}, {"start": 391.86, "end": 392.34, "word": " التسعة", "probability": 0.78662109375}, {"start": 392.34, "end": 392.48, "word": " و", "probability": 0.7021484375}, {"start": 392.48, "end": 392.84, "word": " عشرة", "probability": 0.77001953125}, {"start": 392.84, "end": 392.96, "word": " و", "probability": 0.91357421875}, {"start": 392.96, "end": 393.4, "word": " أحداشر", "probability": 0.76727294921875}, {"start": 393.4, "end": 393.5, "word": " و", "probability": 0.912109375}, {"start": 393.5, "end": 394.14, "word": " أتناشر", "probability": 0.8244140625}, {"start": 394.14, "end": 394.84, "word": " لكن", "probability": 0.58935546875}, {"start": 394.84, "end": 395.56, "word": " ببطء", "probability": 0.51849365234375}], "temperature": 1.0}, {"id": 18, "seek": 42128, "start": 398.7, "end": 421.28, "text": "بيحتاج الى accelerator الى مسرع والمسرع هو الهيبرين هو الهيبرين هيبرين by itself has no anticoagulant effect مش هو اللي بيعمل anticoagulation هو بيسرع التفاعل اللي بيعمل anticoagulation هو ال anticoagulant", "tokens": [21292, 33753, 26108, 2423, 7578, 39889, 2423, 7578, 47524, 2288, 3615, 16070, 2304, 3794, 2288, 3615, 31439, 2423, 3224, 1829, 26890, 9957, 31439, 2423, 3224, 1829, 26890, 9957, 8032, 1829, 26890, 9957, 538, 2564, 575, 572, 2511, 2789, 559, 425, 394, 1802, 37893, 31439, 13672, 1829, 4724, 1829, 25957, 1211, 2511, 2789, 559, 2776, 31439, 4724, 1829, 3794, 2288, 3615, 16712, 5172, 995, 30241, 13672, 1829, 4724, 1829, 25957, 1211, 2511, 2789, 559, 2776, 31439, 2423, 2511, 2789, 559, 425, 394], "avg_logprob": -0.21474847088499768, "compression_ratio": 1.9090909090909092, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 398.7, "end": 399.3, "word": "بيحتاج", "probability": 0.7862141927083334}, {"start": 399.3, "end": 399.52, "word": " الى", "probability": 0.6693115234375}, {"start": 399.52, "end": 400.32, "word": " accelerator", "probability": 0.9384765625}, {"start": 400.32, "end": 401.92, "word": " الى", "probability": 0.78564453125}, {"start": 401.92, "end": 403.4, "word": " مسرع", "probability": 0.9129231770833334}, {"start": 403.4, "end": 404.92, "word": " والمسرع", "probability": 0.90859375}, {"start": 404.92, "end": 405.18, "word": " هو", "probability": 0.94091796875}, {"start": 405.18, "end": 405.94, "word": " الهيبرين", "probability": 0.73974609375}, {"start": 405.94, "end": 406.62, "word": " هو", "probability": 0.82763671875}, {"start": 406.62, "end": 408.12, "word": " الهيبرين", "probability": 0.77626953125}, {"start": 408.12, "end": 409.4, "word": " هيبرين", "probability": 0.686279296875}, {"start": 409.4, "end": 409.66, "word": " by", "probability": 0.69091796875}, {"start": 409.66, "end": 410.34, "word": " itself", "probability": 0.814453125}, {"start": 410.34, "end": 411.18, "word": " has", "probability": 0.8095703125}, {"start": 411.18, "end": 411.56, "word": " no", "probability": 0.94384765625}, {"start": 411.56, "end": 412.64, "word": " anticoagulant", "probability": 0.93818359375}, {"start": 412.64, "end": 413.16, "word": " effect", "probability": 0.9326171875}, {"start": 413.16, "end": 413.92, "word": " مش", "probability": 0.93603515625}, {"start": 413.92, "end": 414.06, "word": " هو", "probability": 0.990234375}, {"start": 414.06, "end": 414.16, "word": " اللي", "probability": 0.763427734375}, {"start": 414.16, "end": 414.38, "word": " بيعمل", "probability": 0.8150634765625}, {"start": 414.38, "end": 415.24, "word": " anticoagulation", "probability": 0.932861328125}, {"start": 415.24, "end": 415.96, "word": " هو", "probability": 0.81494140625}, {"start": 415.96, "end": 416.48, "word": " بيسرع", "probability": 0.96337890625}, {"start": 416.48, "end": 417.02, "word": " التفاعل", "probability": 0.76953125}, {"start": 417.02, "end": 417.8, "word": " اللي", "probability": 0.9384765625}, {"start": 417.8, "end": 418.08, "word": " بيعمل", "probability": 0.987060546875}, {"start": 418.08, "end": 418.92, "word": " anticoagulation", "probability": 0.960693359375}, {"start": 418.92, "end": 419.28, "word": " هو", "probability": 0.9892578125}, {"start": 419.28, "end": 420.2, "word": " ال", "probability": 0.92529296875}, {"start": 420.2, "end": 421.28, "word": " anticoagulant", "probability": 0.890234375}], "temperature": 1.0}, {"id": 19, "seek": 45104, "start": 422.24, "end": 451.04, "text": "الانهيبتر الانهيبتر اللي برتبط فيه هو الانتيترومب التلاتة و برتبط فيه الهيبارين كوفاكتور 2 برضه بيسمو الانتيترومب التلاتة الهيبارين كوفاكتور 1 هيبارين كوفاكتور 1 عشان يميزوا بينه بين هيبارين كوفاكتور 2 ماشي يبقى بيرتبط ب أكسلاراتر و ال أكسلاراتر هو عبارة عن الهيبارين و الهيبارين has no anticoagulant effect و أنما ال effect", "tokens": [6027, 7649, 3224, 1829, 3555, 2655, 2288, 2423, 7649, 3224, 1829, 3555, 2655, 2288, 13672, 1829, 4724, 43500, 3555, 9566, 8978, 3224, 31439, 2423, 7649, 31371, 2655, 2288, 20498, 3555, 16712, 1211, 9307, 3660, 4032, 4724, 43500, 3555, 9566, 8978, 3224, 2423, 3224, 1829, 3555, 9640, 9957, 9122, 38688, 995, 4117, 2655, 13063, 568, 4724, 43042, 3224, 4724, 1829, 38251, 2407, 2423, 7649, 31371, 2655, 2288, 20498, 3555, 16712, 1211, 9307, 3660, 2423, 3224, 1829, 3555, 9640, 9957, 9122, 38688, 995, 4117, 2655, 13063, 502, 8032, 1829, 3555, 9640, 9957, 9122, 38688, 995, 4117, 2655, 13063, 502, 6225, 8592, 7649, 7251, 2304, 1829, 11622, 14407, 49374, 3224, 49374, 8032, 1829, 3555, 9640, 9957, 9122, 38688, 995, 4117, 2655, 13063, 568, 3714, 33599, 1829, 7251, 3555, 4587, 7578, 4724, 13546, 2655, 3555, 9566, 4724, 5551, 4117, 3794, 1211, 9640, 9307, 2288, 4032, 2423, 5551, 4117, 3794, 1211, 9640, 9307, 2288, 31439, 6225, 3555, 9640, 3660, 18871, 2423, 3224, 1829, 3555, 9640, 9957, 4032, 2423, 3224, 1829, 3555, 9640, 9957, 575, 572, 2511, 2789, 559, 425, 394, 1802, 4032, 14739, 15042, 2423, 1802], "avg_logprob": -0.2592719860456802, "compression_ratio": 2.5545454545454547, "no_speech_prob": 1.0728836059570312e-06, "words": [{"start": 422.24, "end": 423.08, "word": "الانهيبتر", "probability": 0.6817278180803571}, {"start": 423.08, "end": 424.42, "word": " الانهيبتر", "probability": 0.7445417131696429}, {"start": 424.42, "end": 424.58, "word": " اللي", "probability": 0.70947265625}, {"start": 424.58, "end": 424.94, "word": " برتبط", "probability": 0.6947021484375}, {"start": 424.94, "end": 425.32, "word": " فيه", "probability": 0.929443359375}, {"start": 425.32, "end": 425.72, "word": " هو", "probability": 0.78076171875}, {"start": 425.72, "end": 426.42, "word": " الانتيترومب", "probability": 0.6305803571428571}, {"start": 426.42, "end": 427.0, "word": " التلاتة", "probability": 0.8651123046875}, {"start": 427.0, "end": 427.46, "word": " و", "probability": 0.27490234375}, {"start": 427.46, "end": 427.8, "word": " برتبط", "probability": 0.84844970703125}, {"start": 427.8, "end": 427.98, "word": " فيه", "probability": 0.94873046875}, {"start": 427.98, "end": 428.22, "word": " الهيبارين", "probability": 0.6337483723958334}, {"start": 428.22, "end": 428.78, "word": " كوفاكتور", "probability": 0.8001708984375}, {"start": 428.78, "end": 429.06, "word": " 2", "probability": 0.318603515625}, {"start": 429.06, "end": 430.08, "word": " برضه", "probability": 0.95947265625}, {"start": 430.08, "end": 430.82, "word": " بيسمو", "probability": 0.790771484375}, {"start": 430.82, "end": 431.44, "word": " الانتيترومب", "probability": 0.9091796875}, {"start": 431.44, "end": 431.9, "word": " التلاتة", "probability": 0.969482421875}, {"start": 431.9, "end": 432.28, "word": " الهيبارين", "probability": 0.8306172688802084}, {"start": 432.28, "end": 432.86, "word": " كوفاكتور", "probability": 0.9829915364583334}, {"start": 432.86, "end": 433.18, "word": " 1", "probability": 0.63330078125}, {"start": 433.18, "end": 434.06, "word": " هيبارين", "probability": 0.7616943359375}, {"start": 434.06, "end": 435.12, "word": " كوفاكتور", "probability": 0.9852701822916666}, {"start": 435.12, "end": 435.36, "word": " 1", "probability": 0.75537109375}, {"start": 435.36, "end": 435.7, "word": " عشان", "probability": 0.9425455729166666}, {"start": 435.7, "end": 436.16, "word": " يميزوا", "probability": 0.96259765625}, {"start": 436.16, "end": 436.4, "word": " بينه", "probability": 0.7880859375}, {"start": 436.4, "end": 436.68, "word": " بين", "probability": 0.74267578125}, {"start": 436.68, "end": 437.42, "word": " هيبارين", "probability": 0.82236328125}, {"start": 437.42, "end": 437.96, "word": " كوفاكتور", "probability": 0.9931640625}, {"start": 437.96, "end": 438.26, "word": " 2", "probability": 0.88525390625}, {"start": 438.26, "end": 439.82, "word": " ماشي", "probability": 0.880859375}, {"start": 439.82, "end": 440.42, "word": " يبقى", "probability": 0.8212890625}, {"start": 440.42, "end": 441.02, "word": " بيرتبط", "probability": 0.8826171875}, {"start": 441.02, "end": 442.08, "word": " ب", "probability": 0.93359375}, {"start": 442.08, "end": 443.14, "word": " أكسلاراتر", "probability": 0.6880841936383929}, {"start": 443.14, "end": 443.72, "word": " و", "probability": 0.354736328125}, {"start": 443.72, "end": 443.86, "word": " ال", "probability": 0.39013671875}, {"start": 443.86, "end": 444.52, "word": " أكسلاراتر", "probability": 0.9136788504464286}, {"start": 444.52, "end": 444.7, "word": " هو", "probability": 0.96630859375}, {"start": 444.7, "end": 445.04, "word": " عبارة", "probability": 0.8802490234375}, {"start": 445.04, "end": 445.36, "word": " عن", "probability": 0.9970703125}, {"start": 445.36, "end": 446.68, "word": " الهيبارين", "probability": 0.9610188802083334}, {"start": 446.68, "end": 447.3, "word": " و", "probability": 0.60302734375}, {"start": 447.3, "end": 447.76, "word": " الهيبارين", "probability": 0.975341796875}, {"start": 447.76, "end": 448.02, "word": " has", "probability": 0.103759765625}, {"start": 448.02, "end": 448.26, "word": " no", "probability": 0.92626953125}, {"start": 448.26, "end": 449.22, "word": " anticoagulant", "probability": 0.94931640625}, {"start": 449.22, "end": 449.78, "word": " effect", "probability": 0.931640625}, {"start": 449.78, "end": 450.3, "word": " و", "probability": 0.466064453125}, {"start": 450.3, "end": 450.54, "word": " أنما", "probability": 0.6246337890625}, {"start": 450.54, "end": 450.66, "word": " ال", "probability": 0.9248046875}, {"start": 450.66, "end": 451.04, "word": " effect", "probability": 0.91552734375}], "temperature": 1.0}, {"id": 20, "seek": 48076, "start": 454.04, "end": 480.76, "text": "من ال inhibitor الطبيعي أو ال anticoagulant الطبيعي كيف يعمل؟ كيف يعمل؟ انتبهوا عليا اللوح اللوح زى ما انتوا شايفين في عندي antithrombin تلاتة ده هو عليه two binding sites عليه ايه ايه؟ two binding sites واحدة ليسم ترتبط بال", "tokens": [27842, 2423, 20406, 3029, 41950, 21292, 3615, 1829, 34051, 2423, 2511, 2789, 559, 425, 394, 41950, 21292, 3615, 1829, 9122, 33911, 7251, 25957, 1211, 22807, 9122, 33911, 7251, 25957, 1211, 22807, 16472, 2655, 3555, 3224, 14407, 11203, 25528, 13672, 2407, 5016, 13672, 2407, 5016, 30767, 7578, 19446, 16472, 2655, 14407, 13412, 995, 33911, 9957, 8978, 18871, 16254, 2511, 355, 81, 3548, 259, 6055, 1211, 9307, 3660, 11778, 3224, 31439, 47356, 732, 17359, 7533, 47356, 1975, 1829, 3224, 1975, 1829, 3224, 22807, 732, 17359, 7533, 36764, 24401, 3660, 32239, 38251, 6055, 43500, 3555, 9566, 20666], "avg_logprob": -0.25789473558727066, "compression_ratio": 1.760204081632653, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 454.04, "end": 454.6, "word": "من", "probability": 0.5693359375}, {"start": 454.6, "end": 455.14, "word": " ال", "probability": 0.88818359375}, {"start": 455.14, "end": 456.42, "word": " inhibitor", "probability": 0.704345703125}, {"start": 456.42, "end": 456.92, "word": " الطبيعي", "probability": 0.9208984375}, {"start": 456.92, "end": 457.02, "word": " أو", "probability": 0.6552734375}, {"start": 457.02, "end": 457.28, "word": " ال", "probability": 0.82958984375}, {"start": 457.28, "end": 458.14, "word": " anticoagulant", "probability": 0.866748046875}, {"start": 458.14, "end": 458.72, "word": " الطبيعي", "probability": 0.970458984375}, {"start": 458.72, "end": 461.12, "word": " كيف", "probability": 0.934814453125}, {"start": 461.12, "end": 462.84, "word": " يعمل؟", "probability": 0.7542724609375}, {"start": 462.84, "end": 463.26, "word": " كيف", "probability": 0.897705078125}, {"start": 463.26, "end": 465.54, "word": " يعمل؟", "probability": 0.9412841796875}, {"start": 465.54, "end": 466.04, "word": " انتبهوا", "probability": 0.849072265625}, {"start": 466.04, "end": 466.24, "word": " عليا", "probability": 0.59454345703125}, {"start": 466.24, "end": 466.72, "word": " اللوح", "probability": 0.8352864583333334}, {"start": 466.72, "end": 467.8, "word": " اللوح", "probability": 0.8994140625}, {"start": 467.8, "end": 468.18, "word": " زى", "probability": 0.853271484375}, {"start": 468.18, "end": 468.24, "word": " ما", "probability": 0.97021484375}, {"start": 468.24, "end": 468.4, "word": " انتوا", "probability": 0.7923177083333334}, {"start": 468.4, "end": 468.82, "word": " شايفين", "probability": 0.991943359375}, {"start": 468.82, "end": 469.08, "word": " في", "probability": 0.521484375}, {"start": 469.08, "end": 469.4, "word": " عندي", "probability": 0.89599609375}, {"start": 469.4, "end": 470.26, "word": " antithrombin", "probability": 0.769482421875}, {"start": 470.26, "end": 470.86, "word": " تلاتة", "probability": 0.8397216796875}, {"start": 470.86, "end": 471.16, "word": " ده", "probability": 0.865478515625}, {"start": 471.16, "end": 471.44, "word": " هو", "probability": 0.258056640625}, {"start": 471.44, "end": 472.88, "word": " عليه", "probability": 0.60986328125}, {"start": 472.88, "end": 473.16, "word": " two", "probability": 0.81787109375}, {"start": 473.16, "end": 473.68, "word": " binding", "probability": 0.88134765625}, {"start": 473.68, "end": 474.26, "word": " sites", "probability": 0.6376953125}, {"start": 474.26, "end": 474.76, "word": " عليه", "probability": 0.85546875}, {"start": 474.76, "end": 475.08, "word": " ايه", "probability": 0.7980143229166666}, {"start": 475.08, "end": 475.4, "word": " ايه؟", "probability": 0.63507080078125}, {"start": 475.4, "end": 476.18, "word": " two", "probability": 0.734375}, {"start": 476.18, "end": 476.58, "word": " binding", "probability": 0.89306640625}, {"start": 476.58, "end": 477.06, "word": " sites", "probability": 0.927734375}, {"start": 477.06, "end": 477.96, "word": " واحدة", "probability": 0.9205729166666666}, {"start": 477.96, "end": 479.92, "word": " ليسم", "probability": 0.35418701171875}, {"start": 479.92, "end": 480.54, "word": " ترتبط", "probability": 0.970703125}, {"start": 480.54, "end": 480.76, "word": " بال", "probability": 0.9658203125}], "temperature": 1.0}, {"id": 21, "seek": 50399, "start": 481.97, "end": 503.99, "text": "التانية بيرتبط بالهيبرين والثانية بيرتبط بالهيبرين والثانية بيرتبط بالهيبرين والثانية بيرتبط بالهيبرين والثانية بيرتبط بالهيبرين والثانية بيرتبط بالهيبرين والثانية بيرتبط بالهيبرين والثانية بيرتبط بالهيبرين والثانية بيرتبط بالهيبرين والثانية بيرتبط بالهيبرين والثانية بيرتبط بالهيبرين والثانية بيرتبط بالهيبرين والثانية بيرتبط بالهيبرين والثانية بيرتبط بالهيبرين والثانية بيرتبط بالهيبرين والثانية بيرتبط بالهيبرين", "tokens": [6027, 2655, 7649, 10632, 4724, 1829, 43500, 3555, 9566, 20666, 3224, 1829, 26890, 9957, 16070, 12984, 7649, 10632, 4724, 1829, 43500, 3555, 9566, 20666, 3224, 1829, 26890, 9957, 16070, 12984, 7649, 10632, 4724, 1829, 43500, 3555, 9566, 20666, 3224, 1829, 26890, 9957, 16070, 12984, 7649, 10632, 4724, 1829, 43500, 3555, 9566, 20666, 3224, 1829, 26890, 9957, 16070, 12984, 7649, 10632, 4724, 1829, 43500, 3555, 9566, 20666, 3224, 1829, 26890, 9957, 16070, 12984, 7649, 10632, 4724, 1829, 43500, 3555, 9566, 20666, 3224, 1829, 26890, 9957, 16070, 12984, 7649, 10632, 4724, 1829, 43500, 3555, 9566, 20666, 3224, 1829, 26890, 9957, 16070, 12984, 7649, 10632, 4724, 1829, 43500, 3555, 9566, 20666, 3224, 1829, 26890, 9957, 16070, 12984, 7649, 10632, 4724, 1829, 43500, 3555, 9566, 20666, 3224, 1829, 26890, 9957, 16070, 12984, 7649, 10632, 4724, 1829, 43500, 3555, 9566, 20666, 3224, 1829, 26890, 9957, 16070, 12984, 7649, 10632, 4724, 1829, 43500, 3555, 9566, 20666, 3224, 1829, 26890, 9957, 16070, 12984, 7649, 10632, 4724, 1829, 43500, 3555, 9566, 20666, 3224, 1829, 26890, 9957, 16070, 12984, 7649, 10632, 4724, 1829, 43500, 3555, 9566, 20666, 3224, 1829, 26890, 9957, 16070, 12984, 7649, 10632, 4724, 1829, 43500, 3555, 9566, 20666, 3224, 1829, 26890, 9957, 16070, 12984, 7649, 10632, 4724, 1829, 43500, 3555, 9566, 20666, 3224, 1829, 26890, 9957, 16070, 12984, 7649, 10632, 4724, 1829, 43500, 3555, 9566, 20666, 3224, 1829, 26890, 9957], "avg_logprob": -0.11743055449591742, "compression_ratio": 12.203125, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 481.97, "end": 482.59, "word": "التانية", "probability": 0.513397216796875}, {"start": 482.59, "end": 482.59, "word": " بيرتبط", "probability": 0.60296630859375}, {"start": 482.59, "end": 483.49, "word": " بالهيبرين", "probability": 0.6921630859375}, {"start": 483.49, "end": 484.95, "word": " والثانية", "probability": 0.72845458984375}, {"start": 484.95, "end": 485.23, "word": " بيرتبط", "probability": 0.8181640625}, {"start": 485.23, "end": 485.23, "word": " بالهيبرين", "probability": 0.8215576171875}, {"start": 485.23, "end": 485.23, "word": " والثانية", "probability": 0.76470947265625}, {"start": 485.23, "end": 485.23, "word": " بيرتبط", "probability": 0.91240234375}, {"start": 485.23, "end": 485.79, "word": " بالهيبرين", "probability": 0.93427734375}, {"start": 485.79, "end": 486.55, "word": " والثانية", "probability": 0.912109375}, {"start": 486.55, "end": 486.55, "word": " بيرتبط", "probability": 0.91123046875}, {"start": 486.55, "end": 486.55, "word": " بالهيبرين", "probability": 0.97529296875}, {"start": 486.55, "end": 487.09, "word": " والثانية", "probability": 0.9390869140625}, {"start": 487.09, "end": 487.57, "word": " بيرتبط", "probability": 0.9052734375}, {"start": 487.57, "end": 488.07, "word": " بالهيبرين", "probability": 0.98349609375}, {"start": 488.07, "end": 488.07, "word": " والثانية", "probability": 0.951904296875}, {"start": 488.07, "end": 488.07, "word": " بيرتبط", "probability": 0.92158203125}, {"start": 488.07, "end": 488.79, "word": " بالهيبرين", "probability": 0.98935546875}, {"start": 488.79, "end": 488.99, "word": " والثانية", "probability": 0.955810546875}, {"start": 488.99, "end": 489.41, "word": " بيرتبط", "probability": 0.94765625}, {"start": 489.41, "end": 490.21, "word": " بالهيبرين", "probability": 0.99326171875}, {"start": 490.21, "end": 490.61, "word": " والثانية", "probability": 0.9549560546875}, {"start": 490.61, "end": 490.61, "word": " بيرتبط", "probability": 0.95234375}, {"start": 490.61, "end": 491.89, "word": " بالهيبرين", "probability": 0.994921875}, {"start": 491.89, "end": 493.39, "word": " والثانية", "probability": 0.9503173828125}, {"start": 493.39, "end": 494.43, "word": " بيرتبط", "probability": 0.94111328125}, {"start": 494.43, "end": 495.17, "word": " بالهيبرين", "probability": 0.9958984375}, {"start": 495.17, "end": 495.31, "word": " والثانية", "probability": 0.9471435546875}, {"start": 495.31, "end": 497.33, "word": " بيرتبط", "probability": 0.9244140625}, {"start": 497.33, "end": 497.63, "word": " بالهيبرين", "probability": 0.9962890625}, {"start": 497.63, "end": 497.63, "word": " والثانية", "probability": 0.94189453125}, {"start": 497.63, "end": 497.73, "word": " بيرتبط", "probability": 0.92021484375}, {"start": 497.73, "end": 497.73, "word": " بالهيبرين", "probability": 0.9962890625}, {"start": 497.73, "end": 497.73, "word": " والثانية", "probability": 0.9373779296875}, {"start": 497.73, "end": 497.99, "word": " بيرتبط", "probability": 0.9189453125}, {"start": 497.99, "end": 497.99, "word": " بالهيبرين", "probability": 0.99619140625}, {"start": 497.99, "end": 497.99, "word": " والثانية", "probability": 0.9384765625}, {"start": 497.99, "end": 498.17, "word": " بيرتبط", "probability": 0.92822265625}, {"start": 498.17, "end": 498.69, "word": " بالهيبرين", "probability": 0.99541015625}, {"start": 498.69, "end": 499.59, "word": " والثانية", "probability": 0.938720703125}, {"start": 499.59, "end": 499.99, "word": " بيرتبط", "probability": 0.94677734375}, {"start": 499.99, "end": 500.71, "word": " بالهيبرين", "probability": 0.9943359375}, {"start": 500.71, "end": 501.11, "word": " والثانية", "probability": 0.93310546875}, {"start": 501.11, "end": 501.11, "word": " بيرتبط", "probability": 0.9638671875}, {"start": 501.11, "end": 501.61, "word": " بالهيبرين", "probability": 0.99267578125}, {"start": 501.61, "end": 501.65, "word": " والثانية", "probability": 0.9320068359375}, {"start": 501.65, "end": 501.99, "word": " بيرتبط", "probability": 0.9755859375}, {"start": 501.99, "end": 503.99, "word": " بالهيبرين", "probability": 0.98984375}], "temperature": 1.0}, {"id": 22, "seek": 53412, "start": 510.28, "end": 534.12, "text": "الـ Antithrombin سوف يقوم بـBind أولاً مع الهيبرين، سوف يقوم بـBind أولاً مع الهيبرين، ماشي؟ ليش؟ قولنا لتسرع الاتجاه، الـ Antithrombin برتبط بالسيريني بروتييز بدون الهيبرين، نعم برتبط، لكن بيعمل ببطء شديد، it takes", "tokens": [6027, 39184, 5130, 355, 81, 3548, 259, 8608, 38688, 7251, 4587, 20498, 4724, 39184, 33, 471, 5551, 12610, 995, 14111, 20449, 2423, 3224, 1829, 26890, 9957, 12399, 8608, 38688, 7251, 4587, 20498, 4724, 39184, 33, 471, 5551, 12610, 995, 14111, 20449, 2423, 3224, 1829, 26890, 9957, 12399, 3714, 33599, 1829, 22807, 32239, 8592, 22807, 12174, 12610, 8315, 5296, 2655, 3794, 2288, 3615, 2423, 9307, 7435, 40294, 12399, 2423, 39184, 5130, 355, 81, 3548, 259, 4724, 43500, 3555, 9566, 20666, 3794, 13546, 9957, 1829, 4724, 32887, 31371, 1829, 11622, 47525, 11536, 2423, 3224, 1829, 26890, 9957, 12399, 8717, 25957, 4724, 43500, 3555, 9566, 12399, 44381, 4724, 1829, 25957, 1211, 4724, 3555, 9566, 38207, 13412, 16254, 3215, 12399, 309, 2516], "avg_logprob": -0.3658088370531547, "compression_ratio": 1.914438502673797, "no_speech_prob": 2.0265579223632812e-06, "words": [{"start": 510.28, "end": 510.62, "word": "الـ", "probability": 0.4178466796875}, {"start": 510.62, "end": 511.44, "word": " Antithrombin", "probability": 0.7935546875}, {"start": 511.44, "end": 512.34, "word": " سوف", "probability": 0.211090087890625}, {"start": 512.34, "end": 512.42, "word": " يقوم", "probability": 0.5500895182291666}, {"start": 512.42, "end": 513.0, "word": " بـBind", "probability": 0.4237518310546875}, {"start": 513.0, "end": 513.96, "word": " أولاً", "probability": 0.634857177734375}, {"start": 513.96, "end": 514.24, "word": " مع", "probability": 0.5732421875}, {"start": 514.24, "end": 515.1, "word": " الهيبرين،", "probability": 0.6440327962239584}, {"start": 515.1, "end": 515.92, "word": " سوف", "probability": 0.80126953125}, {"start": 515.92, "end": 515.94, "word": " يقوم", "probability": 0.966796875}, {"start": 515.94, "end": 516.38, "word": " بـBind", "probability": 0.8990478515625}, {"start": 516.38, "end": 517.42, "word": " أولاً", "probability": 0.990966796875}, {"start": 517.42, "end": 517.54, "word": " مع", "probability": 0.9921875}, {"start": 517.54, "end": 518.6, "word": " الهيبرين،", "probability": 0.9417317708333334}, {"start": 518.6, "end": 520.16, "word": " ماشي؟", "probability": 0.755523681640625}, {"start": 520.16, "end": 521.12, "word": " ليش؟", "probability": 0.8935546875}, {"start": 521.12, "end": 521.48, "word": " قولنا", "probability": 0.7357584635416666}, {"start": 521.48, "end": 522.74, "word": " لتسرع", "probability": 0.4421630859375}, {"start": 522.74, "end": 524.56, "word": " الاتجاه،", "probability": 0.3866943359375}, {"start": 524.56, "end": 525.02, "word": " الـ", "probability": 0.810302734375}, {"start": 525.02, "end": 525.7, "word": " Antithrombin", "probability": 0.9201171875}, {"start": 525.7, "end": 526.22, "word": " برتبط", "probability": 0.93212890625}, {"start": 526.22, "end": 527.22, "word": " بالسيريني", "probability": 0.713671875}, {"start": 527.22, "end": 527.76, "word": " بروتييز", "probability": 0.77744140625}, {"start": 527.76, "end": 528.06, "word": " بدون", "probability": 0.959716796875}, {"start": 528.06, "end": 528.72, "word": " الهيبرين،", "probability": 0.9234212239583334}, {"start": 528.72, "end": 528.86, "word": " نعم", "probability": 0.9775390625}, {"start": 528.86, "end": 529.92, "word": " برتبط،", "probability": 0.8501953125}, {"start": 529.92, "end": 530.44, "word": " لكن", "probability": 0.982421875}, {"start": 530.44, "end": 531.72, "word": " بيعمل", "probability": 0.94580078125}, {"start": 531.72, "end": 532.34, "word": " ببطء", "probability": 0.981689453125}, {"start": 532.34, "end": 533.6, "word": " شديد،", "probability": 0.9422607421875}, {"start": 533.6, "end": 533.7, "word": " it", "probability": 0.634765625}, {"start": 533.7, "end": 534.12, "word": " takes", "probability": 0.75146484375}], "temperature": 1.0}, {"id": 23, "seek": 56094, "start": 537.08, "end": 560.94, "text": "it takes minutes instead of years second بيقولوا ان الهيبارين بيسرع can increase the rate of reaction الى أكتر من الف وبعض المرات بتقول الفين ضعف الفين ضعف وطبيعي الفين ضعف معناته السرعة سارتير زادت الفين مرة", "tokens": [270, 2516, 2077, 2602, 295, 924, 1150, 4724, 1829, 39648, 14407, 16472, 2423, 3224, 1829, 3555, 9640, 9957, 4724, 1829, 3794, 2288, 3615, 393, 3488, 264, 3314, 295, 5480, 2423, 7578, 5551, 4117, 2655, 2288, 9154, 27188, 46599, 3615, 11242, 9673, 2288, 9307, 39894, 39648, 27188, 9957, 48812, 3615, 5172, 27188, 9957, 48812, 3615, 5172, 4032, 9566, 21292, 3615, 1829, 27188, 9957, 48812, 3615, 5172, 20449, 8315, 47395, 21136, 2288, 27884, 8608, 9640, 2655, 13546, 30767, 18513, 2655, 27188, 9957, 3714, 25720], "avg_logprob": -0.25301204244774506, "compression_ratio": 1.6307692307692307, "no_speech_prob": 2.0265579223632812e-06, "words": [{"start": 537.08, "end": 537.72, "word": "it", "probability": 0.183837890625}, {"start": 537.72, "end": 538.26, "word": " takes", "probability": 0.7470703125}, {"start": 538.26, "end": 538.7, "word": " minutes", "probability": 0.8662109375}, {"start": 538.7, "end": 539.08, "word": " instead", "probability": 0.509765625}, {"start": 539.08, "end": 539.36, "word": " of", "probability": 0.97509765625}, {"start": 539.36, "end": 539.72, "word": " years", "probability": 0.332763671875}, {"start": 539.72, "end": 540.34, "word": " second", "probability": 0.280517578125}, {"start": 540.34, "end": 541.78, "word": " بيقولوا", "probability": 0.81256103515625}, {"start": 541.78, "end": 541.98, "word": " ان", "probability": 0.427490234375}, {"start": 541.98, "end": 542.82, "word": " الهيبارين", "probability": 0.7474772135416666}, {"start": 542.82, "end": 544.4, "word": " بيسرع", "probability": 0.975}, {"start": 544.4, "end": 545.18, "word": " can", "probability": 0.51123046875}, {"start": 545.18, "end": 545.86, "word": " increase", "probability": 0.8974609375}, {"start": 545.86, "end": 546.06, "word": " the", "probability": 0.8115234375}, {"start": 546.06, "end": 546.28, "word": " rate", "probability": 0.96826171875}, {"start": 546.28, "end": 546.42, "word": " of", "probability": 0.966796875}, {"start": 546.42, "end": 546.96, "word": " reaction", "probability": 0.90380859375}, {"start": 546.96, "end": 548.14, "word": " الى", "probability": 0.68701171875}, {"start": 548.14, "end": 549.36, "word": " أكتر", "probability": 0.918212890625}, {"start": 549.36, "end": 549.6, "word": " من", "probability": 0.99169921875}, {"start": 549.6, "end": 550.16, "word": " الف", "probability": 0.7744140625}, {"start": 550.16, "end": 550.82, "word": " وبعض", "probability": 0.684326171875}, {"start": 550.82, "end": 551.08, "word": " المرات", "probability": 0.86865234375}, {"start": 551.08, "end": 551.38, "word": " بتقول", "probability": 0.529541015625}, {"start": 551.38, "end": 551.98, "word": " الفين", "probability": 0.745361328125}, {"start": 551.98, "end": 552.54, "word": " ضعف", "probability": 0.9156901041666666}, {"start": 552.54, "end": 554.14, "word": " الفين", "probability": 0.802001953125}, {"start": 554.14, "end": 554.82, "word": " ضعف", "probability": 0.99560546875}, {"start": 554.82, "end": 555.92, "word": " وطبيعي", "probability": 0.82900390625}, {"start": 555.92, "end": 556.62, "word": " الفين", "probability": 0.9873046875}, {"start": 556.62, "end": 557.12, "word": " ضعف", "probability": 0.9954427083333334}, {"start": 557.12, "end": 557.8, "word": " معناته", "probability": 0.9049479166666666}, {"start": 557.8, "end": 558.16, "word": " السرعة", "probability": 0.77880859375}, {"start": 558.16, "end": 558.8, "word": " سارتير", "probability": 0.653076171875}, {"start": 558.8, "end": 560.12, "word": " زادت", "probability": 0.96630859375}, {"start": 560.12, "end": 560.58, "word": " الفين", "probability": 0.95166015625}, {"start": 560.58, "end": 560.94, "word": " مرة", "probability": 0.94970703125}], "temperature": 1.0}, {"id": 24, "seek": 58767, "start": 562.54, "end": 587.68, "text": "التفاعل بيكون عالي جدا وسريع جدا وبيعمل inhibition بصراحة يبت .. يبتالي نعيد من أول ال Hiparin ال antithrombin تلاتة و ال first bind with Hiparin و بعد ما يرتبط بال Hiparin بيرتبط بال Serine Protease بيرتبط بمين؟ بال Serine Protease", "tokens": [6027, 2655, 5172, 995, 30241, 4724, 1829, 30544, 6225, 6027, 1829, 10874, 28259, 46952, 16572, 3615, 10874, 28259, 4032, 21292, 25957, 1211, 20406, 849, 4724, 9381, 23557, 5016, 3660, 7251, 3555, 2655, 4386, 7251, 3555, 2655, 6027, 1829, 8717, 3615, 25708, 9154, 5551, 12610, 2423, 2421, 2181, 259, 2423, 2511, 355, 81, 3548, 259, 6055, 1211, 9307, 3660, 4032, 2423, 700, 14786, 365, 2421, 2181, 259, 4032, 39182, 19446, 7251, 43500, 3555, 9566, 20666, 2421, 2181, 259, 4724, 13546, 2655, 3555, 9566, 20666, 4210, 533, 43371, 651, 4724, 13546, 2655, 3555, 9566, 4724, 2304, 9957, 22807, 20666, 4210, 533, 43371, 651], "avg_logprob": -0.27619483979309306, "compression_ratio": 1.715736040609137, "no_speech_prob": 5.781650543212891e-06, "words": [{"start": 562.54, "end": 563.0, "word": "التفاعل", "probability": 0.765625}, {"start": 563.0, "end": 563.38, "word": " بيكون", "probability": 0.8095703125}, {"start": 563.38, "end": 564.8, "word": " عالي", "probability": 0.9091796875}, {"start": 564.8, "end": 565.12, "word": " جدا", "probability": 0.97509765625}, {"start": 565.12, "end": 565.68, "word": " وسريع", "probability": 0.7815755208333334}, {"start": 565.68, "end": 566.14, "word": " جدا", "probability": 0.994873046875}, {"start": 566.14, "end": 567.56, "word": " وبيعمل", "probability": 0.8341064453125}, {"start": 567.56, "end": 568.42, "word": " inhibition", "probability": 0.7337646484375}, {"start": 568.42, "end": 570.18, "word": " بصراحة", "probability": 0.87275390625}, {"start": 570.18, "end": 570.52, "word": " يبت", "probability": 0.5367838541666666}, {"start": 570.52, "end": 570.52, "word": " ..", "probability": 0.2303466796875}, {"start": 570.52, "end": 571.36, "word": " يبتالي", "probability": 0.719189453125}, {"start": 571.36, "end": 571.78, "word": " نعيد", "probability": 0.8352864583333334}, {"start": 571.78, "end": 571.96, "word": " من", "probability": 0.978515625}, {"start": 571.96, "end": 572.34, "word": " أول", "probability": 0.897705078125}, {"start": 572.34, "end": 573.22, "word": " ال", "probability": 0.958984375}, {"start": 573.22, "end": 574.16, "word": " Hiparin", "probability": 0.4989827473958333}, {"start": 574.16, "end": 574.78, "word": " ال", "probability": 0.85498046875}, {"start": 574.78, "end": 575.58, "word": " antithrombin", "probability": 0.683935546875}, {"start": 575.58, "end": 575.96, "word": " تلاتة", "probability": 0.79998779296875}, {"start": 575.96, "end": 576.12, "word": " و", "probability": 0.4052734375}, {"start": 576.12, "end": 576.26, "word": " ال", "probability": 0.71240234375}, {"start": 576.26, "end": 576.54, "word": " first", "probability": 0.4375}, {"start": 576.54, "end": 576.92, "word": " bind", "probability": 0.708984375}, {"start": 576.92, "end": 577.14, "word": " with", "probability": 0.857421875}, {"start": 577.14, "end": 577.92, "word": " Hiparin", "probability": 0.8307291666666666}, {"start": 577.92, "end": 579.14, "word": " و", "probability": 0.69482421875}, {"start": 579.14, "end": 579.62, "word": " بعد", "probability": 0.89111328125}, {"start": 579.62, "end": 580.34, "word": " ما", "probability": 0.9775390625}, {"start": 580.34, "end": 581.02, "word": " يرتبط", "probability": 0.9803466796875}, {"start": 581.02, "end": 581.2, "word": " بال", "probability": 0.97119140625}, {"start": 581.2, "end": 581.86, "word": " Hiparin", "probability": 0.7991536458333334}, {"start": 581.86, "end": 582.76, "word": " بيرتبط", "probability": 0.93916015625}, {"start": 582.76, "end": 583.04, "word": " بال", "probability": 0.90869140625}, {"start": 583.04, "end": 583.58, "word": " Serine", "probability": 0.4324951171875}, {"start": 583.58, "end": 584.3, "word": " Protease", "probability": 0.75244140625}, {"start": 584.3, "end": 584.96, "word": " بيرتبط", "probability": 0.9763671875}, {"start": 584.96, "end": 585.78, "word": " بمين؟", "probability": 0.904296875}, {"start": 585.78, "end": 586.36, "word": " بال", "probability": 0.720703125}, {"start": 586.36, "end": 586.92, "word": " Serine", "probability": 0.97265625}, {"start": 586.92, "end": 587.68, "word": " Protease", "probability": 0.926025390625}], "temperature": 1.0}, {"id": 25, "seek": 61772, "start": 588.36, "end": 617.72, "text": "بمجرد ما يرتبط بالـ Serine Protease الهيبارين بيفك الهيبارين بيغلق ماشي؟ بمجرد ما يرتبط بالـ Serine Protease الهيبارين قد دوره بينسحب بيسيب الاش ال Antithrombin و الأن بيضل عندنا Complex بيتكوّن من مين؟ 100% ال Antithrombin و Serine Protease ال Serine Protease هو ال enzyme", "tokens": [3555, 2304, 7435, 2288, 3215, 19446, 7251, 43500, 3555, 9566, 20666, 39184, 4210, 533, 43371, 651, 2423, 3224, 1829, 3555, 9640, 9957, 4724, 33911, 4117, 2423, 3224, 1829, 3555, 9640, 9957, 4724, 1829, 17082, 1211, 4587, 3714, 33599, 1829, 22807, 4724, 2304, 7435, 2288, 3215, 19446, 7251, 43500, 3555, 9566, 20666, 39184, 4210, 533, 43371, 651, 2423, 3224, 1829, 3555, 9640, 9957, 12174, 3215, 11778, 13063, 3224, 4724, 1829, 1863, 3794, 5016, 3555, 4724, 1829, 3794, 1829, 3555, 2423, 33599, 2423, 5130, 355, 81, 3548, 259, 4032, 16247, 1863, 4724, 1829, 11242, 1211, 43242, 8315, 41184, 4724, 36081, 4117, 2407, 11703, 1863, 9154, 3714, 9957, 22807, 2319, 4, 2423, 5130, 355, 81, 3548, 259, 4032, 4210, 533, 43371, 651, 2423, 4210, 533, 43371, 651, 31439, 2423, 24521], "avg_logprob": -0.2200927793746814, "compression_ratio": 1.975609756097561, "no_speech_prob": 0.0, "words": [{"start": 588.36, "end": 589.3, "word": "بمجرد", "probability": 0.94482421875}, {"start": 589.3, "end": 589.42, "word": " ما", "probability": 0.9404296875}, {"start": 589.42, "end": 589.98, "word": " يرتبط", "probability": 0.959228515625}, {"start": 589.98, "end": 590.26, "word": " بالـ", "probability": 0.6405029296875}, {"start": 590.26, "end": 590.6, "word": " Serine", "probability": 0.745849609375}, {"start": 590.6, "end": 591.4, "word": " Protease", "probability": 0.6435546875}, {"start": 591.4, "end": 591.94, "word": " الهيبارين", "probability": 0.7454020182291666}, {"start": 591.94, "end": 592.46, "word": " بيفك", "probability": 0.518798828125}, {"start": 592.46, "end": 593.76, "word": " الهيبارين", "probability": 0.9193522135416666}, {"start": 593.76, "end": 594.38, "word": " بيغلق", "probability": 0.9}, {"start": 594.38, "end": 596.32, "word": " ماشي؟", "probability": 0.71197509765625}, {"start": 596.32, "end": 597.04, "word": " بمجرد", "probability": 0.97958984375}, {"start": 597.04, "end": 597.18, "word": " ما", "probability": 0.98046875}, {"start": 597.18, "end": 597.76, "word": " يرتبط", "probability": 0.9617919921875}, {"start": 597.76, "end": 598.08, "word": " بالـ", "probability": 0.87841796875}, {"start": 598.08, "end": 598.52, "word": " Serine", "probability": 0.949951171875}, {"start": 598.52, "end": 599.56, "word": " Protease", "probability": 0.940673828125}, {"start": 599.56, "end": 600.64, "word": " الهيبارين", "probability": 0.9861653645833334}, {"start": 600.64, "end": 600.88, "word": " قد", "probability": 0.89111328125}, {"start": 600.88, "end": 601.48, "word": " دوره", "probability": 0.81201171875}, {"start": 601.48, "end": 602.98, "word": " بينسحب", "probability": 0.750244140625}, {"start": 602.98, "end": 604.04, "word": " بيسيب", "probability": 0.9654296875}, {"start": 604.04, "end": 604.62, "word": " الاش", "probability": 0.5897216796875}, {"start": 604.62, "end": 604.82, "word": " ال", "probability": 0.90380859375}, {"start": 604.82, "end": 605.62, "word": " Antithrombin", "probability": 0.697998046875}, {"start": 605.62, "end": 606.48, "word": " و", "probability": 0.49560546875}, {"start": 606.48, "end": 606.8, "word": " الأن", "probability": 0.6458740234375}, {"start": 606.8, "end": 607.14, "word": " بيضل", "probability": 0.7576904296875}, {"start": 607.14, "end": 607.54, "word": " عندنا", "probability": 0.88720703125}, {"start": 607.54, "end": 608.24, "word": " Complex", "probability": 0.53857421875}, {"start": 608.24, "end": 609.4, "word": " بيتكوّن", "probability": 0.7812093098958334}, {"start": 609.4, "end": 609.56, "word": " من", "probability": 0.99267578125}, {"start": 609.56, "end": 610.1, "word": " مين؟", "probability": 0.8844401041666666}, {"start": 610.1, "end": 611.8, "word": " 100", "probability": 0.62255859375}, {"start": 611.8, "end": 612.26, "word": "%", "probability": 0.94873046875}, {"start": 612.26, "end": 612.42, "word": " ال", "probability": 0.63818359375}, {"start": 612.42, "end": 613.62, "word": " Antithrombin", "probability": 0.8619140625}, {"start": 613.62, "end": 613.74, "word": " و", "probability": 0.7939453125}, {"start": 613.74, "end": 614.16, "word": " Serine", "probability": 0.79296875}, {"start": 614.16, "end": 614.66, "word": " Protease", "probability": 0.948486328125}, {"start": 614.66, "end": 615.04, "word": " ال", "probability": 0.381591796875}, {"start": 615.04, "end": 615.46, "word": " Serine", "probability": 0.9150390625}, {"start": 615.46, "end": 616.18, "word": " Protease", "probability": 0.945068359375}, {"start": 616.18, "end": 616.96, "word": " هو", "probability": 0.9755859375}, {"start": 616.96, "end": 617.14, "word": " ال", "probability": 0.9150390625}, {"start": 617.14, "end": 617.72, "word": " enzyme", "probability": 0.80908203125}], "temperature": 1.0}, {"id": 26, "seek": 63777, "start": 621.13, "end": 637.77, "text": "ماذا يفعل؟ يعمل ع الـ antithrombin ويقطعه إلى two fragments واحدة منها تعمل blocking لمين؟ للـ thrombin function أو للسيرين البروتييز", "tokens": [2304, 45636, 995, 7251, 5172, 30241, 22807, 7251, 25957, 1211, 6225, 2423, 39184, 2511, 355, 81, 3548, 259, 4032, 1829, 47432, 3615, 3224, 30731, 732, 29197, 36764, 24401, 3660, 9154, 11296, 6055, 25957, 1211, 17776, 32767, 9957, 22807, 24976, 39184, 739, 3548, 259, 2445, 34051, 24976, 3794, 13546, 9957, 2423, 26890, 2407, 31371, 1829, 11622], "avg_logprob": -0.3864397294819355, "compression_ratio": 1.2611464968152866, "no_speech_prob": 0.0, "words": [{"start": 621.13, "end": 621.31, "word": "ماذا", "probability": 0.6309000651041666}, {"start": 621.31, "end": 622.35, "word": " يفعل؟", "probability": 0.878662109375}, {"start": 622.35, "end": 623.33, "word": " يعمل", "probability": 0.8982747395833334}, {"start": 623.33, "end": 623.45, "word": " ع", "probability": 0.1141357421875}, {"start": 623.45, "end": 623.63, "word": " الـ", "probability": 0.5816650390625}, {"start": 623.63, "end": 624.45, "word": " antithrombin", "probability": 0.706201171875}, {"start": 624.45, "end": 625.29, "word": " ويقطعه", "probability": 0.7814453125}, {"start": 625.29, "end": 626.37, "word": " إلى", "probability": 0.73681640625}, {"start": 626.37, "end": 626.69, "word": " two", "probability": 0.21826171875}, {"start": 626.69, "end": 627.45, "word": " fragments", "probability": 0.919921875}, {"start": 627.45, "end": 629.85, "word": " واحدة", "probability": 0.7270100911458334}, {"start": 629.85, "end": 630.23, "word": " منها", "probability": 0.8984375}, {"start": 630.23, "end": 631.75, "word": " تعمل", "probability": 0.8870442708333334}, {"start": 631.75, "end": 632.23, "word": " blocking", "probability": 0.384033203125}, {"start": 632.23, "end": 633.77, "word": " لمين؟", "probability": 0.6202799479166666}, {"start": 633.77, "end": 634.47, "word": " للـ", "probability": 0.7783203125}, {"start": 634.47, "end": 635.17, "word": " thrombin", "probability": 0.7623697916666666}, {"start": 635.17, "end": 635.75, "word": " function", "probability": 0.91748046875}, {"start": 635.75, "end": 636.23, "word": " أو", "probability": 0.85595703125}, {"start": 636.23, "end": 636.99, "word": " للسيرين", "probability": 0.8526611328125}, {"start": 636.99, "end": 637.77, "word": " البروتييز", "probability": 0.7925618489583334}], "temperature": 1.0}, {"id": 27, "seek": 67262, "start": 643.74, "end": 672.62, "text": "الـ Antithrombin ثم بيصير فيه two fragments واحدة منهم تمسك الشيبيولايت واحدة منهم بتمسك اللي هو بتعمل blocking لمين لل active sites of the serine protease وبالتالي بتعمله inhibition مفهوم؟ حد عنده سؤال؟ حد عنده سؤال؟ ماشي؟ بتحبه قاعد؟", "tokens": [6027, 39184, 5130, 355, 81, 3548, 259, 38637, 2304, 4724, 1829, 9381, 13546, 8978, 3224, 732, 29197, 36764, 24401, 3660, 9154, 16095, 46811, 3794, 4117, 25124, 1829, 21292, 12610, 995, 36081, 36764, 24401, 3660, 9154, 16095, 39894, 2304, 3794, 4117, 13672, 1829, 31439, 39894, 25957, 1211, 17776, 32767, 9957, 24976, 4967, 7533, 295, 264, 816, 533, 5631, 651, 46599, 6027, 2655, 6027, 1829, 39894, 25957, 43761, 20406, 849, 3714, 5172, 3224, 20498, 22807, 11331, 3215, 43242, 3224, 8608, 33604, 6027, 22807, 11331, 3215, 43242, 3224, 8608, 33604, 6027, 22807, 3714, 33599, 1829, 22807, 39894, 5016, 3555, 3224, 12174, 995, 22488, 22807], "avg_logprob": -0.24463847849298925, "compression_ratio": 1.6, "no_speech_prob": 4.172325134277344e-07, "words": [{"start": 643.7400000000001, "end": 644.6600000000001, "word": "الـ", "probability": 0.734375}, {"start": 644.6600000000001, "end": 645.58, "word": " Antithrombin", "probability": 0.74892578125}, {"start": 645.58, "end": 645.78, "word": " ثم", "probability": 0.6121826171875}, {"start": 645.78, "end": 646.24, "word": " بيصير", "probability": 0.8438720703125}, {"start": 646.24, "end": 646.58, "word": " فيه", "probability": 0.5526123046875}, {"start": 646.58, "end": 646.72, "word": " two", "probability": 0.491943359375}, {"start": 646.72, "end": 647.22, "word": " fragments", "probability": 0.6982421875}, {"start": 647.22, "end": 647.56, "word": " واحدة", "probability": 0.9208984375}, {"start": 647.56, "end": 647.78, "word": " منهم", "probability": 0.983154296875}, {"start": 647.78, "end": 648.24, "word": " تمسك", "probability": 0.8211263020833334}, {"start": 648.24, "end": 649.22, "word": " الشيبيولايت", "probability": 0.6488138834635416}, {"start": 649.22, "end": 650.04, "word": " واحدة", "probability": 0.916015625}, {"start": 650.04, "end": 650.28, "word": " منهم", "probability": 0.962158203125}, {"start": 650.28, "end": 651.36, "word": " بتمسك", "probability": 0.8458251953125}, {"start": 651.36, "end": 651.76, "word": " اللي", "probability": 0.825439453125}, {"start": 651.76, "end": 652.04, "word": " هو", "probability": 0.7880859375}, {"start": 652.04, "end": 652.46, "word": " بتعمل", "probability": 0.9718424479166666}, {"start": 652.46, "end": 652.86, "word": " blocking", "probability": 0.4345703125}, {"start": 652.86, "end": 653.48, "word": " لمين", "probability": 0.74658203125}, {"start": 653.48, "end": 654.16, "word": " لل", "probability": 0.41650390625}, {"start": 654.16, "end": 655.02, "word": " active", "probability": 0.74267578125}, {"start": 655.02, "end": 655.66, "word": " sites", "probability": 0.7568359375}, {"start": 655.66, "end": 656.52, "word": " of", "probability": 0.97021484375}, {"start": 656.52, "end": 657.5, "word": " the", "probability": 0.845703125}, {"start": 657.5, "end": 657.94, "word": " serine", "probability": 0.8388671875}, {"start": 657.94, "end": 658.68, "word": " protease", "probability": 0.799072265625}, {"start": 658.68, "end": 659.06, "word": " وبالتالي", "probability": 0.794140625}, {"start": 659.06, "end": 659.5, "word": " بتعمله", "probability": 0.8893229166666666}, {"start": 659.5, "end": 660.16, "word": " inhibition", "probability": 0.672119140625}, {"start": 660.16, "end": 663.92, "word": " مفهوم؟", "probability": 0.86513671875}, {"start": 663.92, "end": 664.18, "word": " حد", "probability": 0.935791015625}, {"start": 664.18, "end": 664.44, "word": " عنده", "probability": 0.951416015625}, {"start": 664.44, "end": 667.16, "word": " سؤال؟", "probability": 0.953857421875}, {"start": 667.16, "end": 667.52, "word": " حد", "probability": 0.93798828125}, {"start": 667.52, "end": 667.76, "word": " عنده", "probability": 0.97265625}, {"start": 667.76, "end": 669.68, "word": " سؤال؟", "probability": 0.9833984375}, {"start": 669.68, "end": 670.36, "word": " ماشي؟", "probability": 0.8936767578125}, {"start": 670.36, "end": 672.18, "word": " بتحبه", "probability": 0.9036865234375}, {"start": 672.18, "end": 672.62, "word": " قاعد؟", "probability": 0.705078125}], "temperature": 1.0}, {"id": 28, "seek": 70084, "start": 674.48, "end": 700.84, "text": "بس ال fragment اللى بتعمله ال blocking لل active site لمين؟ لل serine protein طبعا طبعا طب كيف عبتدان يا دكتور؟ كيه؟ كيه كيف عبتدان يا دكتور؟ واحدة heavy واحدة light تمسك ال ash ال heavy بال ash بال serine protein و من عند ال binding site تبعته فتعمله inhibition بتعمله ايش? inhibition المفهوم شباب؟", "tokens": [3555, 3794, 2423, 26424, 13672, 7578, 39894, 25957, 43761, 2423, 17776, 24976, 4967, 3621, 32767, 9957, 22807, 24976, 816, 533, 7944, 23032, 3555, 3615, 995, 23032, 3555, 3615, 995, 23032, 3555, 9122, 33911, 6225, 3555, 2655, 3215, 7649, 35186, 11778, 4117, 2655, 13063, 22807, 9122, 1829, 3224, 22807, 9122, 1829, 3224, 9122, 33911, 6225, 3555, 2655, 3215, 7649, 35186, 11778, 4117, 2655, 13063, 22807, 36764, 24401, 3660, 4676, 36764, 24401, 3660, 1442, 46811, 3794, 4117, 2423, 12588, 2423, 4676, 20666, 12588, 20666, 816, 533, 7944, 4032, 9154, 43242, 2423, 17359, 3621, 6055, 3555, 34268, 3224, 6156, 2655, 25957, 43761, 20406, 849, 39894, 25957, 43761, 1975, 1829, 8592, 30, 20406, 849, 9673, 5172, 3224, 20498, 13412, 3555, 16758, 22807], "avg_logprob": -0.299632360454367, "compression_ratio": 1.9004329004329004, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 674.48, "end": 675.2, "word": "بس", "probability": 0.8740234375}, {"start": 675.2, "end": 675.4, "word": " ال", "probability": 0.94921875}, {"start": 675.4, "end": 675.92, "word": " fragment", "probability": 0.9189453125}, {"start": 675.92, "end": 676.14, "word": " اللى", "probability": 0.81396484375}, {"start": 676.14, "end": 676.4, "word": " بتعمله", "probability": 0.8971354166666666}, {"start": 676.4, "end": 676.54, "word": " ال", "probability": 0.346435546875}, {"start": 676.54, "end": 676.88, "word": " blocking", "probability": 0.65087890625}, {"start": 676.88, "end": 677.24, "word": " لل", "probability": 0.72509765625}, {"start": 677.24, "end": 678.22, "word": " active", "probability": 0.7666015625}, {"start": 678.22, "end": 678.48, "word": " site", "probability": 0.84326171875}, {"start": 678.48, "end": 679.7, "word": " لمين؟", "probability": 0.79833984375}, {"start": 679.7, "end": 679.88, "word": " لل", "probability": 0.48095703125}, {"start": 679.88, "end": 680.28, "word": " serine", "probability": 0.609375}, {"start": 680.28, "end": 681.36, "word": " protein", "probability": 0.07281494140625}, {"start": 681.36, "end": 682.18, "word": " طبعا", "probability": 0.94775390625}, {"start": 682.18, "end": 682.32, "word": " طبعا", "probability": 0.897705078125}, {"start": 682.32, "end": 682.56, "word": " طب", "probability": 0.777587890625}, {"start": 682.56, "end": 682.76, "word": " كيف", "probability": 0.8212890625}, {"start": 682.76, "end": 683.06, "word": " عبتدان", "probability": 0.554541015625}, {"start": 683.06, "end": 683.16, "word": " يا", "probability": 0.50732421875}, {"start": 683.16, "end": 684.0, "word": " دكتور؟", "probability": 0.894921875}, {"start": 684.0, "end": 684.58, "word": " كيه؟", "probability": 0.72564697265625}, {"start": 684.58, "end": 684.72, "word": " كيه", "probability": 0.7872721354166666}, {"start": 684.72, "end": 684.86, "word": " كيف", "probability": 0.649658203125}, {"start": 684.86, "end": 685.22, "word": " عبتدان", "probability": 0.9814453125}, {"start": 685.22, "end": 685.36, "word": " يا", "probability": 0.308837890625}, {"start": 685.36, "end": 685.5, "word": " دكتور؟", "probability": 0.96396484375}, {"start": 685.5, "end": 685.88, "word": " واحدة", "probability": 0.8875325520833334}, {"start": 685.88, "end": 686.04, "word": " heavy", "probability": 0.7734375}, {"start": 686.04, "end": 686.46, "word": " واحدة", "probability": 0.8587239583333334}, {"start": 686.46, "end": 686.84, "word": " light", "probability": 0.9365234375}, {"start": 686.84, "end": 688.16, "word": " تمسك", "probability": 0.8107096354166666}, {"start": 688.16, "end": 688.32, "word": " ال", "probability": 0.9072265625}, {"start": 688.32, "end": 688.76, "word": " ash", "probability": 0.431640625}, {"start": 688.76, "end": 689.48, "word": " ال", "probability": 0.78271484375}, {"start": 689.48, "end": 689.78, "word": " heavy", "probability": 0.80615234375}, {"start": 689.78, "end": 690.02, "word": " بال", "probability": 0.94384765625}, {"start": 690.02, "end": 690.46, "word": " ash", "probability": 0.95263671875}, {"start": 690.46, "end": 691.22, "word": " بال", "probability": 0.88427734375}, {"start": 691.22, "end": 691.84, "word": " serine", "probability": 0.8505859375}, {"start": 691.84, "end": 692.48, "word": " protein", "probability": 0.444091796875}, {"start": 692.48, "end": 693.78, "word": " و", "probability": 0.8330078125}, {"start": 693.78, "end": 694.1, "word": " من", "probability": 0.80029296875}, {"start": 694.1, "end": 694.32, "word": " عند", "probability": 0.99169921875}, {"start": 694.32, "end": 694.44, "word": " ال", "probability": 0.9423828125}, {"start": 694.44, "end": 694.7, "word": " binding", "probability": 0.84619140625}, {"start": 694.7, "end": 694.98, "word": " site", "probability": 0.8125}, {"start": 694.98, "end": 695.4, "word": " تبعته", "probability": 0.7572021484375}, {"start": 695.4, "end": 696.24, "word": " فتعمله", "probability": 0.804443359375}, {"start": 696.24, "end": 697.2, "word": " inhibition", "probability": 0.82568359375}, {"start": 697.2, "end": 697.64, "word": " بتعمله", "probability": 0.5924479166666666}, {"start": 697.64, "end": 697.96, "word": " ايش?", "probability": 0.6459147135416666}, {"start": 698.32, "end": 698.88, "word": " inhibition", "probability": 0.7666015625}, {"start": 698.88, "end": 700.12, "word": " المفهوم", "probability": 0.8992919921875}, {"start": 700.12, "end": 700.84, "word": " شباب؟", "probability": 0.77001953125}], "temperature": 1.0}, {"id": 29, "seek": 73064, "start": 702.88, "end": 730.64, "text": "ناخد المثال التاني على العائلة التانية وهي protein C system تباين معايا protein C system الميكانزم عشان يشتغل يحتاج إلى شرط مشروط يحتاج إلى", "tokens": [1863, 47283, 3215, 9673, 12984, 6027, 16712, 7649, 1829, 15844, 18863, 16373, 37977, 16712, 7649, 10632, 37037, 1829, 7944, 383, 1185, 6055, 3555, 995, 9957, 20449, 995, 25528, 7944, 383, 1185, 9673, 1829, 41361, 11622, 2304, 6225, 8592, 7649, 7251, 8592, 2655, 17082, 1211, 7251, 33753, 26108, 30731, 13412, 2288, 9566, 37893, 32887, 9566, 7251, 33753, 26108, 30731], "avg_logprob": -0.16220868442018152, "compression_ratio": 1.5985915492957747, "no_speech_prob": 8.940696716308594e-07, "words": [{"start": 702.88, "end": 703.32, "word": "ناخد", "probability": 0.76806640625}, {"start": 703.32, "end": 704.58, "word": " المثال", "probability": 0.811279296875}, {"start": 704.58, "end": 705.22, "word": " التاني", "probability": 0.9275716145833334}, {"start": 705.22, "end": 706.8, "word": " على", "probability": 0.8115234375}, {"start": 706.8, "end": 708.72, "word": " العائلة", "probability": 0.984375}, {"start": 708.72, "end": 709.52, "word": " التانية", "probability": 0.9825846354166666}, {"start": 709.52, "end": 710.66, "word": " وهي", "probability": 0.79833984375}, {"start": 710.66, "end": 711.06, "word": " protein", "probability": 0.6279296875}, {"start": 711.06, "end": 711.44, "word": " C", "probability": 0.43212890625}, {"start": 711.44, "end": 713.88, "word": " system", "probability": 0.6923828125}, {"start": 713.88, "end": 715.1, "word": " تباين", "probability": 0.6549072265625}, {"start": 715.1, "end": 715.56, "word": " معايا", "probability": 0.9607747395833334}, {"start": 715.56, "end": 717.82, "word": " protein", "probability": 0.626953125}, {"start": 717.82, "end": 718.28, "word": " C", "probability": 0.85302734375}, {"start": 718.28, "end": 718.96, "word": " system", "probability": 0.958984375}, {"start": 718.96, "end": 723.14, "word": " الميكانزم", "probability": 0.8107421875}, {"start": 723.14, "end": 725.22, "word": " عشان", "probability": 0.9659830729166666}, {"start": 725.22, "end": 726.0, "word": " يشتغل", "probability": 0.9958984375}, {"start": 726.0, "end": 727.08, "word": " يحتاج", "probability": 0.966796875}, {"start": 727.08, "end": 727.46, "word": " إلى", "probability": 0.89306640625}, {"start": 727.46, "end": 728.12, "word": " شرط", "probability": 0.9781901041666666}, {"start": 728.12, "end": 729.08, "word": " مشروط", "probability": 0.8844401041666666}, {"start": 729.08, "end": 730.3, "word": " يحتاج", "probability": 0.96728515625}, {"start": 730.3, "end": 730.64, "word": " إلى", "probability": 0.93603515625}], "temperature": 1.0}, {"id": 30, "seek": 76237, "start": 732.61, "end": 762.37, "text": "إنه يكون الـ thrombin at high level يبقى شرط عمل هذا الـ mechanism إنه يكون الـ thrombin at high level وماكنش مصير at high level لما يكون وصل إلى نهاية عمله لأن ال coagulation process شباب تبدأ in a propagation manner بحالة .. بحالة إيش؟ بحالة العكومية", "tokens": [28814, 1863, 3224, 7251, 30544, 2423, 39184, 739, 3548, 259, 412, 1090, 1496, 7251, 3555, 4587, 7578, 13412, 2288, 9566, 6225, 42213, 23758, 2423, 39184, 7513, 36145, 3224, 7251, 30544, 2423, 39184, 739, 3548, 259, 412, 1090, 1496, 4032, 15042, 19452, 8592, 3714, 9381, 13546, 412, 1090, 1496, 5296, 15042, 7251, 30544, 4032, 36520, 30731, 8717, 11296, 10632, 6225, 42213, 3224, 5296, 33456, 2423, 598, 559, 2776, 1399, 13412, 3555, 16758, 6055, 44510, 10721, 294, 257, 38377, 9060, 4724, 5016, 6027, 3660, 4386, 4724, 5016, 6027, 3660, 11933, 1829, 8592, 22807, 4724, 5016, 6027, 3660, 18863, 4117, 20498, 10632], "avg_logprob": -0.23468750730156898, "compression_ratio": 1.7142857142857142, "no_speech_prob": 7.152557373046875e-07, "words": [{"start": 732.61, "end": 733.01, "word": "إنه", "probability": 0.5682779947916666}, {"start": 733.01, "end": 733.59, "word": " يكون", "probability": 0.9892578125}, {"start": 733.59, "end": 735.27, "word": " الـ", "probability": 0.646484375}, {"start": 735.27, "end": 736.09, "word": " thrombin", "probability": 0.7638346354166666}, {"start": 736.09, "end": 736.61, "word": " at", "probability": 0.82080078125}, {"start": 736.61, "end": 736.99, "word": " high", "probability": 0.86328125}, {"start": 736.99, "end": 737.45, "word": " level", "probability": 0.95556640625}, {"start": 737.45, "end": 738.49, "word": " يبقى", "probability": 0.71881103515625}, {"start": 738.49, "end": 739.17, "word": " شرط", "probability": 0.9767252604166666}, {"start": 739.17, "end": 740.27, "word": " عمل", "probability": 0.975830078125}, {"start": 740.27, "end": 740.93, "word": " هذا", "probability": 0.97314453125}, {"start": 740.93, "end": 741.13, "word": " الـ", "probability": 0.5914306640625}, {"start": 741.13, "end": 741.65, "word": " mechanism", "probability": 0.81591796875}, {"start": 741.65, "end": 742.71, "word": " إنه", "probability": 0.7156982421875}, {"start": 742.71, "end": 742.99, "word": " يكون", "probability": 0.99462890625}, {"start": 742.99, "end": 743.27, "word": " الـ", "probability": 0.6497802734375}, {"start": 743.27, "end": 743.87, "word": " thrombin", "probability": 0.9576822916666666}, {"start": 743.87, "end": 744.77, "word": " at", "probability": 0.9365234375}, {"start": 744.77, "end": 745.19, "word": " high", "probability": 0.923828125}, {"start": 745.19, "end": 745.63, "word": " level", "probability": 0.97607421875}, {"start": 745.63, "end": 746.23, "word": " وماكنش", "probability": 0.58984375}, {"start": 746.23, "end": 746.53, "word": " مصير", "probability": 0.7921549479166666}, {"start": 746.53, "end": 746.69, "word": " at", "probability": 0.77490234375}, {"start": 746.69, "end": 746.93, "word": " high", "probability": 0.9365234375}, {"start": 746.93, "end": 747.27, "word": " level", "probability": 0.97900390625}, {"start": 747.27, "end": 749.49, "word": " لما", "probability": 0.5494384765625}, {"start": 749.49, "end": 749.89, "word": " يكون", "probability": 0.984619140625}, {"start": 749.89, "end": 750.59, "word": " وصل", "probability": 0.926513671875}, {"start": 750.59, "end": 750.87, "word": " إلى", "probability": 0.7783203125}, {"start": 750.87, "end": 751.31, "word": " نهاية", "probability": 0.9567057291666666}, {"start": 751.31, "end": 752.13, "word": " عمله", "probability": 0.9913736979166666}, {"start": 752.13, "end": 753.03, "word": " لأن", "probability": 0.81298828125}, {"start": 753.03, "end": 753.17, "word": " ال", "probability": 0.8232421875}, {"start": 753.17, "end": 753.87, "word": " coagulation", "probability": 0.78369140625}, {"start": 753.87, "end": 754.37, "word": " process", "probability": 0.90380859375}, {"start": 754.37, "end": 754.91, "word": " شباب", "probability": 0.7919108072916666}, {"start": 754.91, "end": 755.95, "word": " تبدأ", "probability": 0.9493815104166666}, {"start": 755.95, "end": 756.29, "word": " in", "probability": 0.91064453125}, {"start": 756.29, "end": 756.47, "word": " a", "probability": 0.69580078125}, {"start": 756.47, "end": 757.11, "word": " propagation", "probability": 0.9814453125}, {"start": 757.11, "end": 757.65, "word": " manner", "probability": 0.869140625}, {"start": 757.65, "end": 758.79, "word": " بحالة", "probability": 0.95556640625}, {"start": 758.79, "end": 758.91, "word": " ..", "probability": 0.47509765625}, {"start": 758.91, "end": 759.59, "word": " بحالة", "probability": 0.9827880859375}, {"start": 759.59, "end": 760.73, "word": " إيش؟", "probability": 0.75067138671875}, {"start": 760.73, "end": 761.49, "word": " بحالة", "probability": 0.96923828125}, {"start": 761.49, "end": 762.37, "word": " العكومية", "probability": 0.87060546875}], "temperature": 1.0}, {"id": 31, "seek": 78993, "start": 763.09, "end": 789.93, "text": "كل ما اشتغلت بيكمل بعض وبيزود بعض وبالتالي بيزيد ال concentration of serine proteases كل ما اشتغلت اكتر وكل ما اشتغلت اكتر وكل ما عندنا جلطة اكتر اسرع، بسبب عند حد معين بيكون تركيزها عالي، ماشي؟ تركيز الثرومبين فيها عالي، والثرومبين هو اللي بيشتغل على مين؟", "tokens": [28820, 19446, 1975, 8592, 2655, 17082, 1211, 2655, 4724, 1829, 24793, 1211, 45030, 11242, 46599, 1829, 11622, 23328, 45030, 11242, 46599, 6027, 2655, 6027, 1829, 4724, 1829, 11622, 25708, 2423, 9856, 295, 816, 533, 5631, 1957, 28242, 19446, 1975, 8592, 2655, 17082, 1211, 2655, 1975, 4117, 2655, 2288, 4032, 28820, 19446, 1975, 8592, 2655, 17082, 1211, 2655, 1975, 4117, 2655, 2288, 4032, 28820, 19446, 43242, 8315, 10874, 1211, 9566, 3660, 1975, 4117, 2655, 2288, 1975, 3794, 2288, 3615, 12399, 4724, 3794, 3555, 3555, 43242, 11331, 3215, 20449, 9957, 4724, 1829, 30544, 6055, 31747, 1829, 11622, 11296, 6225, 6027, 1829, 12399, 3714, 33599, 1829, 22807, 6055, 31747, 1829, 11622, 6024, 104, 2288, 20498, 3555, 9957, 8978, 11296, 6225, 6027, 1829, 12399, 4032, 6027, 12984, 2288, 20498, 3555, 9957, 31439, 13672, 1829, 4724, 1829, 8592, 2655, 17082, 1211, 15844, 3714, 9957, 22807], "avg_logprob": -0.19791666920303452, "compression_ratio": 2.051643192488263, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 763.09, "end": 763.45, "word": "كل", "probability": 0.51025390625}, {"start": 763.45, "end": 763.87, "word": " ما", "probability": 0.609375}, {"start": 763.87, "end": 767.73, "word": " اشتغلت", "probability": 0.9112141927083334}, {"start": 767.73, "end": 768.49, "word": " بيكمل", "probability": 0.579345703125}, {"start": 768.49, "end": 768.81, "word": " بعض", "probability": 0.79150390625}, {"start": 768.81, "end": 770.57, "word": " وبيزود", "probability": 0.68499755859375}, {"start": 770.57, "end": 770.87, "word": " بعض", "probability": 0.9443359375}, {"start": 770.87, "end": 771.25, "word": " وبالتالي", "probability": 0.92060546875}, {"start": 771.25, "end": 771.55, "word": " بيزيد", "probability": 0.91015625}, {"start": 771.55, "end": 771.67, "word": " ال", "probability": 0.8564453125}, {"start": 771.67, "end": 772.25, "word": " concentration", "probability": 0.79638671875}, {"start": 772.25, "end": 772.65, "word": " of", "probability": 0.92822265625}, {"start": 772.65, "end": 773.05, "word": " serine", "probability": 0.812744140625}, {"start": 773.05, "end": 773.67, "word": " proteases", "probability": 0.6844482421875}, {"start": 773.67, "end": 774.47, "word": " كل", "probability": 0.9423828125}, {"start": 774.47, "end": 774.83, "word": " ما", "probability": 0.94189453125}, {"start": 774.83, "end": 775.85, "word": " اشتغلت", "probability": 0.9694010416666666}, {"start": 775.85, "end": 776.31, "word": " اكتر", "probability": 0.8941650390625}, {"start": 776.31, "end": 776.93, "word": " وكل", "probability": 0.870849609375}, {"start": 776.93, "end": 777.01, "word": " ما", "probability": 0.89453125}, {"start": 777.01, "end": 777.41, "word": " اشتغلت", "probability": 0.9480794270833334}, {"start": 777.41, "end": 777.65, "word": " اكتر", "probability": 0.9879150390625}, {"start": 777.65, "end": 777.83, "word": " وكل", "probability": 0.828369140625}, {"start": 777.83, "end": 777.91, "word": " ما", "probability": 0.9404296875}, {"start": 777.91, "end": 778.17, "word": " عندنا", "probability": 0.87353515625}, {"start": 778.17, "end": 778.55, "word": " جلطة", "probability": 0.7349853515625}, {"start": 778.55, "end": 778.91, "word": " اكتر", "probability": 0.958984375}, {"start": 778.91, "end": 779.43, "word": " اسرع،", "probability": 0.7150390625}, {"start": 779.43, "end": 779.87, "word": " بسبب", "probability": 0.55609130859375}, {"start": 779.87, "end": 780.97, "word": " عند", "probability": 0.982421875}, {"start": 780.97, "end": 781.47, "word": " حد", "probability": 0.904052734375}, {"start": 781.47, "end": 782.11, "word": " معين", "probability": 0.991943359375}, {"start": 782.11, "end": 782.99, "word": " بيكون", "probability": 0.8805338541666666}, {"start": 782.99, "end": 783.63, "word": " تركيزها", "probability": 0.98671875}, {"start": 783.63, "end": 785.01, "word": " عالي،", "probability": 0.81610107421875}, {"start": 785.01, "end": 786.33, "word": " ماشي؟", "probability": 0.7685546875}, {"start": 786.33, "end": 786.79, "word": " تركيز", "probability": 0.9520263671875}, {"start": 786.79, "end": 787.39, "word": " الثرومبين", "probability": 0.7728983561197916}, {"start": 787.39, "end": 787.75, "word": " فيها", "probability": 0.987060546875}, {"start": 787.75, "end": 788.37, "word": " عالي،", "probability": 0.8741455078125}, {"start": 788.37, "end": 788.89, "word": " والثرومبين", "probability": 0.8577008928571429}, {"start": 788.89, "end": 789.03, "word": " هو", "probability": 0.99169921875}, {"start": 789.03, "end": 789.15, "word": " اللي", "probability": 0.93408203125}, {"start": 789.15, "end": 789.43, "word": " بيشتغل", "probability": 0.9225260416666666}, {"start": 789.43, "end": 789.63, "word": " على", "probability": 0.53125}, {"start": 789.63, "end": 789.93, "word": " مين؟", "probability": 0.9309895833333334}], "temperature": 1.0}, {"id": 32, "seek": 80884, "start": 790.52, "end": 808.84, "text": "و بيحولوا ل مين؟ مية في المين، ماشي؟ هو اللي بيعمل cloth، at high level of thrombin بيكون .. بيكون فاكرين في ال function بتاعة ال thrombin، قولنا فيه ستة functions، مظبوط، at low level بنشط مين؟", "tokens": [2407, 4724, 1829, 5016, 12610, 14407, 5296, 3714, 9957, 22807, 3714, 10632, 8978, 9673, 9957, 12399, 3714, 33599, 1829, 22807, 31439, 13672, 1829, 4724, 1829, 25957, 1211, 13619, 12399, 412, 1090, 1496, 295, 739, 3548, 259, 4724, 1829, 30544, 4386, 4724, 1829, 30544, 6156, 995, 37983, 9957, 8978, 2423, 2445, 39894, 995, 27884, 2423, 739, 3548, 259, 12399, 12174, 12610, 8315, 8978, 3224, 8608, 2655, 3660, 6828, 12399, 3714, 19913, 3555, 2407, 9566, 12399, 412, 2295, 1496, 44945, 8592, 9566, 3714, 9957, 22807], "avg_logprob": -0.31194195577076506, "compression_ratio": 1.5372340425531914, "no_speech_prob": 1.9073486328125e-06, "words": [{"start": 790.52, "end": 791.32, "word": "و", "probability": 0.400390625}, {"start": 791.32, "end": 791.8, "word": " بيحولوا", "probability": 0.816943359375}, {"start": 791.8, "end": 791.92, "word": " ل", "probability": 0.5}, {"start": 791.92, "end": 792.76, "word": " مين؟", "probability": 0.7596028645833334}, {"start": 792.76, "end": 793.12, "word": " مية", "probability": 0.6026611328125}, {"start": 793.12, "end": 793.26, "word": " في", "probability": 0.5185546875}, {"start": 793.26, "end": 794.12, "word": " المين،", "probability": 0.6294759114583334}, {"start": 794.12, "end": 795.14, "word": " ماشي؟", "probability": 0.7806396484375}, {"start": 795.14, "end": 795.5, "word": " هو", "probability": 0.1182861328125}, {"start": 795.5, "end": 795.6, "word": " اللي", "probability": 0.9521484375}, {"start": 795.6, "end": 795.9, "word": " بيعمل", "probability": 0.97509765625}, {"start": 795.9, "end": 796.9, "word": " cloth،", "probability": 0.173980712890625}, {"start": 796.9, "end": 797.16, "word": " at", "probability": 0.921875}, {"start": 797.16, "end": 797.66, "word": " high", "probability": 0.88623046875}, {"start": 797.66, "end": 798.08, "word": " level", "probability": 0.97314453125}, {"start": 798.08, "end": 798.34, "word": " of", "probability": 0.974609375}, {"start": 798.34, "end": 799.04, "word": " thrombin", "probability": 0.7177734375}, {"start": 799.04, "end": 800.22, "word": " بيكون", "probability": 0.85791015625}, {"start": 800.22, "end": 800.72, "word": " ..", "probability": 0.31884765625}, {"start": 800.72, "end": 801.34, "word": " بيكون", "probability": 0.9427083333333334}, {"start": 801.34, "end": 801.76, "word": " فاكرين", "probability": 0.93896484375}, {"start": 801.76, "end": 801.88, "word": " في", "probability": 0.7412109375}, {"start": 801.88, "end": 801.98, "word": " ال", "probability": 0.8974609375}, {"start": 801.98, "end": 802.3, "word": " function", "probability": 0.95458984375}, {"start": 802.3, "end": 802.6, "word": " بتاعة", "probability": 0.6915690104166666}, {"start": 802.6, "end": 802.76, "word": " ال", "probability": 0.62060546875}, {"start": 802.76, "end": 803.3, "word": " thrombin،", "probability": 0.802490234375}, {"start": 803.3, "end": 803.4, "word": " قولنا", "probability": 0.80908203125}, {"start": 803.4, "end": 803.62, "word": " فيه", "probability": 0.9306640625}, {"start": 803.62, "end": 803.92, "word": " ستة", "probability": 0.9524739583333334}, {"start": 803.92, "end": 805.74, "word": " functions،", "probability": 0.61572265625}, {"start": 805.74, "end": 806.66, "word": " مظبوط،", "probability": 0.8433430989583334}, {"start": 806.66, "end": 806.88, "word": " at", "probability": 0.97607421875}, {"start": 806.88, "end": 807.2, "word": " low", "probability": 0.97265625}, {"start": 807.2, "end": 807.5, "word": " level", "probability": 0.96728515625}, {"start": 807.5, "end": 808.14, "word": " بنشط", "probability": 0.7112223307291666}, {"start": 808.14, "end": 808.84, "word": " مين؟", "probability": 0.9833984375}], "temperature": 1.0}, {"id": 33, "seek": 83515, "start": 809.05, "end": 835.15, "text": "الـ 8 و 5 و الـ 8 و 5 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 5 و 8 و 8 و 5 و 8 و 8 و 5 و 8 و 8 و 8 و 8 و 8 و 8 و 8 و 8 و 8 و 8 و 8 و 8 و 8 و 8 و 8 و 8 و 8 و 8 و 8 و 8 و 8 و", "tokens": [6027, 39184, 1649, 4032, 1025, 4032, 2423, 39184, 1649, 4032, 1025, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1649, 4032, 1025, 4032, 1649, 4032, 1649, 4032, 1649, 4032, 1649, 4032, 1649, 4032, 1649, 4032, 1649, 4032, 1649, 4032, 1649, 4032, 1649, 4032, 1649, 4032, 1649, 4032, 1649, 4032, 1649, 4032, 1649, 4032, 1649, 4032, 1649, 4032, 1649, 4032, 1649, 4032, 1649, 4032, 1649, 4032], "avg_logprob": -0.2579166581895616, "compression_ratio": 15.216216216216216, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 809.05, "end": 809.23, "word": "الـ", "probability": 0.16668701171875}, {"start": 809.23, "end": 809.43, "word": " 8", "probability": 0.41650390625}, {"start": 809.43, "end": 809.61, "word": " و", "probability": 0.71240234375}, {"start": 809.61, "end": 809.87, "word": " 5", "probability": 0.69775390625}, {"start": 809.87, "end": 810.27, "word": " و", "probability": 0.4404296875}, {"start": 810.27, "end": 810.53, "word": " الـ", "probability": 0.3602294921875}, {"start": 810.53, "end": 811.39, "word": " 8", "probability": 0.43310546875}, {"start": 811.39, "end": 813.37, "word": " و", "probability": 0.6875}, {"start": 813.37, "end": 814.05, "word": " 5", "probability": 0.89892578125}, {"start": 814.05, "end": 815.55, "word": " و", "probability": 0.2491455078125}, {"start": 815.55, "end": 815.55, "word": " 5", "probability": 0.65625}, {"start": 815.55, "end": 815.55, "word": " و", "probability": 0.5185546875}, {"start": 815.55, "end": 815.81, "word": " 8", "probability": 0.447265625}, {"start": 815.81, "end": 815.95, "word": " و", "probability": 0.64697265625}, {"start": 815.95, "end": 816.03, "word": " 5", "probability": 0.8818359375}, {"start": 816.03, "end": 816.29, "word": " و", "probability": 0.88916015625}, {"start": 816.29, "end": 816.53, "word": " 8", "probability": 0.443115234375}, {"start": 816.53, "end": 816.71, "word": " و", "probability": 0.9169921875}, {"start": 816.71, "end": 816.91, "word": " 5", "probability": 0.88330078125}, {"start": 816.91, "end": 818.15, "word": " و", "probability": 0.93212890625}, {"start": 818.15, "end": 818.15, "word": " 8", "probability": 0.396728515625}, {"start": 818.15, "end": 818.77, "word": " و", "probability": 0.9521484375}, {"start": 818.77, "end": 818.77, "word": " 5", "probability": 0.857421875}, {"start": 818.77, "end": 820.25, "word": " و", "probability": 0.94921875}, {"start": 820.25, "end": 820.25, "word": " 8", "probability": 0.466796875}, {"start": 820.25, "end": 821.03, "word": " و", "probability": 0.9619140625}, {"start": 821.03, "end": 821.03, "word": " 5", "probability": 0.81005859375}, {"start": 821.03, "end": 821.03, "word": " و", "probability": 0.95556640625}, {"start": 821.03, "end": 821.03, "word": " 8", "probability": 0.52734375}, {"start": 821.03, "end": 821.03, "word": " و", "probability": 0.966796875}, {"start": 821.03, "end": 821.03, "word": " 5", "probability": 0.751953125}, {"start": 821.03, "end": 821.03, "word": " و", "probability": 0.9599609375}, {"start": 821.03, "end": 821.03, "word": " 8", "probability": 0.56982421875}, {"start": 821.03, "end": 821.03, "word": " و", "probability": 0.96923828125}, {"start": 821.03, "end": 821.03, "word": " 5", "probability": 0.716796875}, {"start": 821.03, "end": 821.03, "word": " و", "probability": 0.96142578125}, {"start": 821.03, "end": 821.03, "word": " 8", "probability": 0.609375}, {"start": 821.03, "end": 821.03, "word": " و", "probability": 0.97119140625}, {"start": 821.03, "end": 821.03, "word": " 5", "probability": 0.6884765625}, {"start": 821.03, "end": 821.03, "word": " و", "probability": 0.96435546875}, {"start": 821.03, "end": 821.03, "word": " 8", "probability": 0.63037109375}, {"start": 821.03, "end": 821.03, "word": " و", "probability": 0.97265625}, {"start": 821.03, "end": 821.03, "word": " 5", "probability": 0.66845703125}, {"start": 821.03, "end": 821.03, "word": " و", "probability": 0.9658203125}, {"start": 821.03, "end": 821.03, "word": " 8", "probability": 0.64111328125}, {"start": 821.03, "end": 821.03, "word": " و", "probability": 0.97314453125}, {"start": 821.03, "end": 821.03, "word": " 5", "probability": 0.6572265625}, {"start": 821.03, "end": 821.03, "word": " و", "probability": 0.96728515625}, {"start": 821.03, "end": 821.03, "word": " 8", "probability": 0.65966796875}, {"start": 821.03, "end": 821.03, "word": " و", "probability": 0.97314453125}, {"start": 821.03, "end": 821.03, "word": " 5", "probability": 0.65087890625}, {"start": 821.03, "end": 821.03, "word": " و", "probability": 0.9677734375}, {"start": 821.03, "end": 821.03, "word": " 8", "probability": 0.66015625}, {"start": 821.03, "end": 821.03, "word": " و", "probability": 0.97412109375}, {"start": 821.03, "end": 821.03, "word": " 5", "probability": 0.646484375}, {"start": 821.03, "end": 821.03, "word": " و", "probability": 0.96875}, {"start": 821.03, "end": 821.03, "word": " 8", "probability": 0.6748046875}, {"start": 821.03, "end": 821.19, "word": " و", "probability": 0.97412109375}, {"start": 821.19, "end": 821.19, "word": " 5", "probability": 0.64208984375}, {"start": 821.19, "end": 821.19, "word": " و", "probability": 0.96875}, {"start": 821.19, "end": 821.19, "word": " 8", "probability": 0.68408203125}, {"start": 821.19, "end": 823.65, "word": " و", "probability": 0.974609375}, {"start": 823.65, "end": 823.67, "word": " 5", "probability": 0.6298828125}, {"start": 823.67, "end": 823.71, "word": " و", "probability": 0.96923828125}, {"start": 823.71, "end": 823.83, "word": " 8", "probability": 0.69873046875}, {"start": 823.83, "end": 823.83, "word": " و", "probability": 0.97412109375}, {"start": 823.83, "end": 823.83, "word": " 5", "probability": 0.63134765625}, {"start": 823.83, "end": 823.83, "word": " و", "probability": 0.97021484375}, {"start": 823.83, "end": 823.83, "word": " 8", "probability": 0.7138671875}, {"start": 823.83, "end": 823.83, "word": " و", "probability": 0.97412109375}, {"start": 823.83, "end": 823.83, "word": " 5", "probability": 0.6240234375}, {"start": 823.83, "end": 823.83, "word": " و", "probability": 0.970703125}, {"start": 823.83, "end": 823.83, "word": " 8", "probability": 0.71875}, {"start": 823.83, "end": 823.83, "word": " و", "probability": 0.97509765625}, {"start": 823.83, "end": 823.83, "word": " 5", "probability": 0.607421875}, {"start": 823.83, "end": 823.83, "word": " و", "probability": 0.970703125}, {"start": 823.83, "end": 823.83, "word": " 8", "probability": 0.72998046875}, {"start": 823.83, "end": 824.33, "word": " و", "probability": 0.974609375}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.6083984375}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.97119140625}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.73974609375}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.97509765625}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.5947265625}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.9716796875}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.74267578125}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.97607421875}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.59423828125}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.97216796875}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.74560546875}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.9765625}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.583984375}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.97216796875}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.75}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.97705078125}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.58740234375}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.97265625}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.75732421875}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.97802734375}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.58740234375}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.97314453125}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.75146484375}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.97900390625}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.58349609375}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.974609375}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.75634765625}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.98046875}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.5830078125}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.9755859375}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.76025390625}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.98095703125}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.57861328125}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.97607421875}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.75927734375}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.98193359375}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.5810546875}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.9775390625}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.755859375}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.982421875}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.57275390625}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.97802734375}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.7548828125}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.98388671875}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.5615234375}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.97900390625}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.7705078125}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.984375}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.56103515625}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.97998046875}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.7587890625}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.98486328125}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.54443359375}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.98046875}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.7646484375}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.9853515625}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.53515625}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.98095703125}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.767578125}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.9853515625}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.53662109375}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.98193359375}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.7578125}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.98583984375}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.525390625}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.98291015625}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.748046875}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.986328125}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.52294921875}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.98291015625}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.7529296875}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.986328125}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.498291015625}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.9833984375}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.7529296875}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.98681640625}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.492431640625}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.9833984375}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.75830078125}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.98681640625}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.4833984375}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.9833984375}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.7578125}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.98681640625}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.463134765625}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.9833984375}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.7578125}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.98681640625}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.442626953125}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.9833984375}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.7607421875}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.986328125}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.451171875}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.986328125}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.56787109375}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.98291015625}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.71923828125}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.986328125}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.4619140625}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.9853515625}, {"start": 824.33, "end": 824.33, "word": " 5", "probability": 0.486328125}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.982421875}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.76513671875}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.98486328125}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.56396484375}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.984375}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.5029296875}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.9833984375}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.53076171875}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.98193359375}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.61328125}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.98095703125}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.69140625}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.9794921875}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.78369140625}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.97802734375}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.84912109375}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.97705078125}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.89306640625}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.9755859375}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.9169921875}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.974609375}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.93310546875}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.97314453125}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.94140625}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.9716796875}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.947265625}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.970703125}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.95166015625}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.970703125}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.95361328125}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.96923828125}, {"start": 824.33, "end": 824.33, "word": " 8", "probability": 0.95458984375}, {"start": 824.33, "end": 824.33, "word": " و", "probability": 0.96923828125}, {"start": 824.33, "end": 824.41, "word": " 8", "probability": 0.95556640625}, {"start": 824.41, "end": 826.59, "word": " و", "probability": 0.96728515625}, {"start": 826.59, "end": 827.17, "word": " 8", "probability": 0.9560546875}, {"start": 827.17, "end": 827.39, "word": " و", "probability": 0.96826171875}, {"start": 827.39, "end": 827.99, "word": " 8", "probability": 0.95654296875}, {"start": 827.99, "end": 832.17, "word": " و", "probability": 0.9697265625}, {"start": 832.17, "end": 832.17, "word": " 8", "probability": 0.95751953125}, {"start": 832.17, "end": 835.15, "word": " و", "probability": 0.96630859375}, {"start": 835.15, "end": 835.15, "word": " 8", "probability": 0.9482421875}, {"start": 835.15, "end": 835.15, "word": " و", "probability": 0.96484375}], "temperature": 1.0}, {"id": 34, "seek": 86507, "start": 835.97, "end": 865.07, "text": "بالـ thrombomodulin، من أين جاء الـ thrombomodulin؟ أولاً، هو عبارة عن مكوّن أساسي من مكونات الـ Platelet Alpha Granule، ماشي؟ و احنا قاعدين بنشتغل على سطح الـ Platelet، قلنا لما اتنشطت، اعمل الـ Degranulation، طلع منها مين؟ بالـ Thrombin، Thrombin will bind to thrombomodulin، ماشي؟ and", "tokens": [3555, 6027, 39184, 739, 3548, 298, 378, 17701, 12399, 9154, 5551, 9957, 10874, 16606, 2423, 39184, 739, 3548, 298, 378, 17701, 22807, 5551, 12610, 995, 14111, 12399, 31439, 6225, 3555, 9640, 3660, 18871, 3714, 4117, 2407, 11703, 1863, 5551, 3794, 32277, 1829, 9154, 3714, 4117, 2407, 8315, 2655, 2423, 39184, 17461, 15966, 20588, 23554, 2271, 12399, 3714, 33599, 1829, 22807, 4032, 1975, 5016, 8315, 12174, 995, 22488, 9957, 44945, 8592, 2655, 17082, 1211, 15844, 8608, 9566, 5016, 2423, 39184, 17461, 15966, 12399, 12174, 1211, 8315, 5296, 15042, 1975, 2655, 1863, 8592, 9566, 2655, 12399, 1975, 25957, 1211, 2423, 39184, 1346, 42381, 2776, 12399, 23032, 1211, 3615, 9154, 11296, 3714, 9957, 22807, 20666, 39184, 41645, 3548, 259, 12399, 41645, 3548, 259, 486, 14786, 281, 739, 3548, 298, 378, 17701, 12399, 3714, 33599, 1829, 22807, 293], "avg_logprob": -0.2717592654404817, "compression_ratio": 1.6746987951807228, "no_speech_prob": 3.0994415283203125e-06, "words": [{"start": 835.97, "end": 836.39, "word": "بالـ", "probability": 0.625732421875}, {"start": 836.39, "end": 837.31, "word": " thrombomodulin،", "probability": 0.7308553059895834}, {"start": 837.31, "end": 837.41, "word": " من", "probability": 0.8583984375}, {"start": 837.41, "end": 837.59, "word": " أين", "probability": 0.910400390625}, {"start": 837.59, "end": 837.77, "word": " جاء", "probability": 0.8984375}, {"start": 837.77, "end": 837.87, "word": " الـ", "probability": 0.641357421875}, {"start": 837.87, "end": 839.73, "word": " thrombomodulin؟", "probability": 0.9453938802083334}, {"start": 839.73, "end": 840.15, "word": " أولاً،", "probability": 0.695947265625}, {"start": 840.15, "end": 840.23, "word": " هو", "probability": 0.93701171875}, {"start": 840.23, "end": 840.51, "word": " عبارة", "probability": 0.8829345703125}, {"start": 840.51, "end": 840.69, "word": " عن", "probability": 0.994140625}, {"start": 840.69, "end": 841.17, "word": " مكوّن", "probability": 0.784375}, {"start": 841.17, "end": 842.01, "word": " أساسي", "probability": 0.96240234375}, {"start": 842.01, "end": 842.13, "word": " من", "probability": 0.93408203125}, {"start": 842.13, "end": 842.83, "word": " مكونات", "probability": 0.9162109375}, {"start": 842.83, "end": 843.09, "word": " الـ", "probability": 0.84423828125}, {"start": 843.09, "end": 843.45, "word": " Platelet", "probability": 0.5999755859375}, {"start": 843.45, "end": 843.75, "word": " Alpha", "probability": 0.63232421875}, {"start": 843.75, "end": 845.23, "word": " Granule،", "probability": 0.4203287760416667}, {"start": 845.23, "end": 846.23, "word": " ماشي؟", "probability": 0.739013671875}, {"start": 846.23, "end": 847.63, "word": " و", "probability": 0.7265625}, {"start": 847.63, "end": 848.17, "word": " احنا", "probability": 0.768310546875}, {"start": 848.17, "end": 848.49, "word": " قاعدين", "probability": 0.8681640625}, {"start": 848.49, "end": 848.93, "word": " بنشتغل", "probability": 0.89453125}, {"start": 848.93, "end": 849.13, "word": " على", "probability": 0.966796875}, {"start": 849.13, "end": 849.49, "word": " سطح", "probability": 0.9866536458333334}, {"start": 849.49, "end": 849.77, "word": " الـ", "probability": 0.5162353515625}, {"start": 849.77, "end": 850.71, "word": " Platelet،", "probability": 0.7045084635416666}, {"start": 850.71, "end": 851.07, "word": " قلنا", "probability": 0.764404296875}, {"start": 851.07, "end": 851.31, "word": " لما", "probability": 0.893310546875}, {"start": 851.31, "end": 852.01, "word": " اتنشطت،", "probability": 0.8974609375}, {"start": 852.01, "end": 852.25, "word": " اعمل", "probability": 0.612060546875}, {"start": 852.25, "end": 852.45, "word": " الـ", "probability": 0.721923828125}, {"start": 852.45, "end": 853.23, "word": " Degranulation،", "probability": 0.754150390625}, {"start": 853.23, "end": 853.43, "word": " طلع", "probability": 0.9580078125}, {"start": 853.43, "end": 853.71, "word": " منها", "probability": 0.986572265625}, {"start": 853.71, "end": 855.03, "word": " مين؟", "probability": 0.9498697916666666}, {"start": 855.03, "end": 855.83, "word": " بالـ", "probability": 0.39202880859375}, {"start": 855.83, "end": 857.27, "word": " Thrombin،", "probability": 0.8192138671875}, {"start": 857.27, "end": 858.11, "word": " Thrombin", "probability": 0.7482096354166666}, {"start": 858.11, "end": 858.91, "word": " will", "probability": 0.41259765625}, {"start": 858.91, "end": 859.45, "word": " bind", "probability": 0.94677734375}, {"start": 859.45, "end": 860.65, "word": " to", "probability": 0.88720703125}, {"start": 860.65, "end": 862.35, "word": " thrombomodulin،", "probability": 0.9142252604166666}, {"start": 862.35, "end": 864.05, "word": " ماشي؟", "probability": 0.929443359375}, {"start": 864.05, "end": 865.07, "word": " and", "probability": 0.79833984375}], "temperature": 1.0}, {"id": 35, "seek": 89478, "start": 865.82, "end": 894.78, "text": "في موضوع الكالسيوم صار في عنا ثرمبل و ثرمبل موديورين و كالسيوم هذا الكمبليكس سيحول إلى بروتين C بالظبط و بيحولوا ليه؟ to active form فبتحول إلى activated بروتين C ماشي؟ شو اللي صار في هذا العالم؟ كولنا شباب كولنا Quaternary Complex", "tokens": [41185, 3714, 2407, 11242, 45367, 33251, 6027, 3794, 1829, 20498, 20328, 9640, 8978, 6225, 8315, 38637, 2288, 2304, 36150, 4032, 38637, 2288, 2304, 36150, 3714, 2407, 16254, 13063, 9957, 4032, 9122, 6027, 3794, 1829, 20498, 23758, 2423, 24793, 3555, 20292, 4117, 3794, 8608, 1829, 5016, 12610, 30731, 4724, 32887, 2655, 9957, 383, 20666, 19913, 3555, 9566, 4032, 4724, 1829, 5016, 12610, 14407, 32239, 3224, 22807, 281, 4967, 1254, 6156, 3555, 2655, 5016, 12610, 30731, 18157, 4724, 32887, 2655, 9957, 383, 3714, 33599, 1829, 22807, 13412, 2407, 13672, 1829, 20328, 9640, 8978, 23758, 18863, 45340, 22807, 9122, 12610, 8315, 13412, 3555, 16758, 9122, 12610, 8315, 2326, 771, 77, 822, 41184], "avg_logprob": -0.4230113530700857, "compression_ratio": 1.7557603686635945, "no_speech_prob": 0.0, "words": [{"start": 865.82, "end": 866.22, "word": "في", "probability": 0.438720703125}, {"start": 866.22, "end": 866.84, "word": " موضوع", "probability": 0.55450439453125}, {"start": 866.84, "end": 868.14, "word": " الكالسيوم", "probability": 0.77841796875}, {"start": 868.14, "end": 869.4, "word": " صار", "probability": 0.5117950439453125}, {"start": 869.4, "end": 869.5, "word": " في", "probability": 0.67822265625}, {"start": 869.5, "end": 869.8, "word": " عنا", "probability": 0.610107421875}, {"start": 869.8, "end": 870.42, "word": " ثرمبل", "probability": 0.67138671875}, {"start": 870.42, "end": 871.18, "word": " و", "probability": 0.724609375}, {"start": 871.18, "end": 871.68, "word": " ثرمبل", "probability": 0.67071533203125}, {"start": 871.68, "end": 872.24, "word": " موديورين", "probability": 0.696484375}, {"start": 872.24, "end": 872.86, "word": " و", "probability": 0.93115234375}, {"start": 872.86, "end": 873.64, "word": " كالسيوم", "probability": 0.9041015625}, {"start": 873.64, "end": 874.4, "word": " هذا", "probability": 0.3388671875}, {"start": 874.4, "end": 875.14, "word": " الكمبليكس", "probability": 0.649169921875}, {"start": 875.14, "end": 875.92, "word": " سيحول", "probability": 0.3916015625}, {"start": 875.92, "end": 877.06, "word": " إلى", "probability": 0.427734375}, {"start": 877.06, "end": 877.72, "word": " بروتين", "probability": 0.7783203125}, {"start": 877.72, "end": 878.12, "word": " C", "probability": 0.671875}, {"start": 878.12, "end": 881.04, "word": " بالظبط", "probability": 0.75970458984375}, {"start": 881.04, "end": 881.2, "word": " و", "probability": 0.67578125}, {"start": 881.2, "end": 881.74, "word": " بيحولوا", "probability": 0.777001953125}, {"start": 881.74, "end": 882.3, "word": " ليه؟", "probability": 0.6266276041666666}, {"start": 882.3, "end": 882.94, "word": " to", "probability": 0.26953125}, {"start": 882.94, "end": 883.34, "word": " active", "probability": 0.84130859375}, {"start": 883.34, "end": 883.76, "word": " form", "probability": 0.91552734375}, {"start": 883.76, "end": 884.36, "word": " فبتحول", "probability": 0.89814453125}, {"start": 884.36, "end": 884.6, "word": " إلى", "probability": 0.4599609375}, {"start": 884.6, "end": 885.48, "word": " activated", "probability": 0.6689453125}, {"start": 885.48, "end": 886.78, "word": " بروتين", "probability": 0.7958984375}, {"start": 886.78, "end": 887.54, "word": " C", "probability": 0.9697265625}, {"start": 887.54, "end": 889.14, "word": " ماشي؟", "probability": 0.7447509765625}, {"start": 889.14, "end": 889.86, "word": " شو", "probability": 0.70361328125}, {"start": 889.86, "end": 889.98, "word": " اللي", "probability": 0.9833984375}, {"start": 889.98, "end": 890.22, "word": " صار", "probability": 0.99267578125}, {"start": 890.22, "end": 890.3, "word": " في", "probability": 0.9580078125}, {"start": 890.3, "end": 890.5, "word": " هذا", "probability": 0.6337890625}, {"start": 890.5, "end": 891.44, "word": " العالم؟", "probability": 0.3876139322916667}, {"start": 891.44, "end": 892.08, "word": " كولنا", "probability": 0.8826497395833334}, {"start": 892.08, "end": 892.76, "word": " شباب", "probability": 0.9729817708333334}, {"start": 892.76, "end": 893.38, "word": " كولنا", "probability": 0.8972981770833334}, {"start": 893.38, "end": 894.28, "word": " Quaternary", "probability": 0.75054931640625}, {"start": 894.28, "end": 894.78, "word": " Complex", "probability": 0.7431640625}], "temperature": 1.0}, {"id": 36, "seek": 92000, "start": 896.16, "end": 920.0, "text": "وفي الهيوستاتيك ميكانيزم شرط أساسي للتنشيط يكون كواتيرناري كومبليكس شفنا خمسة التنازل بروثرومبينيزل يكونوا كواتيرناري ملعق وهنا كوّننا كواتيرناري كومبليكس يتكوّن من ثرومبل ثرومبوموديوليد كالسيوم وبروتينسيوم محصلة اكتبيت البروتينسيوم", "tokens": [38688, 1829, 2423, 3224, 1829, 2407, 14851, 9307, 1829, 4117, 3714, 1829, 41361, 1829, 11622, 2304, 13412, 2288, 9566, 5551, 3794, 32277, 1829, 24976, 2655, 1863, 8592, 1829, 9566, 7251, 30544, 9122, 2407, 9307, 13546, 1863, 9640, 1829, 9122, 20498, 3555, 20292, 4117, 3794, 13412, 5172, 8315, 16490, 2304, 3794, 3660, 16712, 1863, 31377, 1211, 4724, 32887, 12984, 2288, 20498, 3555, 9957, 1829, 11622, 1211, 7251, 30544, 14407, 9122, 2407, 9307, 13546, 1863, 9640, 1829, 3714, 1211, 3615, 4587, 37037, 8315, 9122, 2407, 11703, 1863, 8315, 9122, 2407, 9307, 13546, 1863, 9640, 1829, 9122, 20498, 3555, 20292, 4117, 3794, 7251, 2655, 4117, 2407, 11703, 1863, 9154, 38637, 2288, 20498, 3555, 1211, 38637, 2288, 20498, 3555, 20498, 2407, 16254, 12610, 25708, 9122, 6027, 3794, 1829, 20498, 4032, 26890, 35473, 9957, 3794, 1829, 20498, 3714, 5016, 36520, 3660, 1975, 4117, 2655, 21292, 2655, 2423, 26890, 35473, 9957, 3794, 1829, 20498], "avg_logprob": -0.397441281568284, "compression_ratio": 2.1691542288557213, "no_speech_prob": 0.0, "words": [{"start": 896.16, "end": 896.92, "word": "وفي", "probability": 0.4947967529296875}, {"start": 896.92, "end": 897.48, "word": " الهيوستاتيك", "probability": 0.736541748046875}, {"start": 897.48, "end": 897.98, "word": " ميكانيزم", "probability": 0.7784423828125}, {"start": 897.98, "end": 898.28, "word": " شرط", "probability": 0.751708984375}, {"start": 898.28, "end": 898.88, "word": " أساسي", "probability": 0.935546875}, {"start": 898.88, "end": 899.94, "word": " للتنشيط", "probability": 0.9204915364583334}, {"start": 899.94, "end": 900.28, "word": " يكون", "probability": 0.5126953125}, {"start": 900.28, "end": 901.14, "word": " كواتيرناري", "probability": 0.5612095424107143}, {"start": 901.14, "end": 901.7, "word": " كومبليكس", "probability": 0.7273356119791666}, {"start": 901.7, "end": 902.64, "word": " شفنا", "probability": 0.5894368489583334}, {"start": 902.64, "end": 904.34, "word": " خمسة", "probability": 0.8931884765625}, {"start": 904.34, "end": 904.96, "word": " التنازل", "probability": 0.421173095703125}, {"start": 904.96, "end": 905.84, "word": " بروثرومبينيزل", "probability": 0.6965087890625}, {"start": 905.84, "end": 906.34, "word": " يكونوا", "probability": 0.5858154296875}, {"start": 906.34, "end": 907.32, "word": " كواتيرناري", "probability": 0.8191615513392857}, {"start": 907.32, "end": 907.66, "word": " ملعق", "probability": 0.589630126953125}, {"start": 907.66, "end": 908.18, "word": " وهنا", "probability": 0.4195556640625}, {"start": 908.18, "end": 909.94, "word": " كوّننا", "probability": 0.528857421875}, {"start": 909.94, "end": 910.74, "word": " كواتيرناري", "probability": 0.9365931919642857}, {"start": 910.74, "end": 911.28, "word": " كومبليكس", "probability": 0.9674479166666666}, {"start": 911.28, "end": 912.04, "word": " يتكوّن", "probability": 0.6197713216145834}, {"start": 912.04, "end": 912.16, "word": " من", "probability": 0.96337890625}, {"start": 912.16, "end": 912.66, "word": " ثرومبل", "probability": 0.649267578125}, {"start": 912.66, "end": 914.1, "word": " ثرومبوموديوليد", "probability": 0.7449408637152778}, {"start": 914.1, "end": 915.26, "word": " كالسيوم", "probability": 0.84501953125}, {"start": 915.26, "end": 916.52, "word": " وبروتينسيوم", "probability": 0.7660435267857143}, {"start": 916.52, "end": 917.84, "word": " محصلة", "probability": 0.7919921875}, {"start": 917.84, "end": 918.96, "word": " اكتبيت", "probability": 0.651611328125}, {"start": 918.96, "end": 920.0, "word": " البروتينسيوم", "probability": 0.9043317522321429}], "temperature": 1.0}, {"id": 37, "seek": 94915, "start": 921.09, "end": 949.15, "text": "متابعين معاها وقولنا هذا الكملة اسمها ما بيتكون إلا at high level of its thrombin صار في عندنا activated thrombin البروتين C this molecule will bind to its cofactor في ال cofactor تبعه مين ال cofactor تبعه البروتين S مين ال cofactor البروتين S ماشي", "tokens": [2304, 2655, 16758, 3615, 9957, 20449, 995, 11296, 4032, 39648, 8315, 23758, 2423, 24793, 37977, 24525, 2304, 11296, 19446, 4724, 1829, 2655, 4117, 2407, 1863, 11933, 15040, 412, 1090, 1496, 295, 1080, 739, 3548, 259, 20328, 9640, 8978, 43242, 8315, 18157, 739, 3548, 259, 2423, 26890, 35473, 9957, 383, 341, 15582, 486, 14786, 281, 1080, 598, 69, 15104, 8978, 2423, 598, 69, 15104, 6055, 3555, 3615, 3224, 3714, 9957, 2423, 598, 69, 15104, 6055, 3555, 3615, 3224, 2423, 26890, 35473, 9957, 318, 3714, 9957, 2423, 598, 69, 15104, 2423, 26890, 35473, 9957, 318, 3714, 33599, 1829], "avg_logprob": -0.27835051546391754, "compression_ratio": 1.755, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 921.09, "end": 921.55, "word": "متابعين", "probability": 0.768212890625}, {"start": 921.55, "end": 921.97, "word": " معاها", "probability": 0.931640625}, {"start": 921.97, "end": 922.89, "word": " وقولنا", "probability": 0.6309407552083334}, {"start": 922.89, "end": 923.09, "word": " هذا", "probability": 0.84521484375}, {"start": 923.09, "end": 923.49, "word": " الكملة", "probability": 0.7428385416666666}, {"start": 923.49, "end": 923.71, "word": " اسمها", "probability": 0.9140625}, {"start": 923.71, "end": 923.75, "word": " ما", "probability": 0.57861328125}, {"start": 923.75, "end": 924.23, "word": " بيتكون", "probability": 0.65234375}, {"start": 924.23, "end": 924.57, "word": " إلا", "probability": 0.68408203125}, {"start": 924.57, "end": 925.21, "word": " at", "probability": 0.64208984375}, {"start": 925.21, "end": 925.57, "word": " high", "probability": 0.83154296875}, {"start": 925.57, "end": 926.01, "word": " level", "probability": 0.9599609375}, {"start": 926.01, "end": 926.39, "word": " of", "probability": 0.96337890625}, {"start": 926.39, "end": 927.05, "word": " its", "probability": 0.0960693359375}, {"start": 927.05, "end": 927.87, "word": " thrombin", "probability": 0.4854329427083333}, {"start": 927.87, "end": 928.43, "word": " صار", "probability": 0.451904296875}, {"start": 928.43, "end": 928.53, "word": " في", "probability": 0.791015625}, {"start": 928.53, "end": 928.73, "word": " عندنا", "probability": 0.887939453125}, {"start": 928.73, "end": 929.19, "word": " activated", "probability": 0.7978515625}, {"start": 929.19, "end": 929.81, "word": " thrombin", "probability": 0.8673502604166666}, {"start": 929.81, "end": 930.21, "word": " البروتين", "probability": 0.77972412109375}, {"start": 930.21, "end": 930.47, "word": " C", "probability": 0.36083984375}, {"start": 930.47, "end": 932.47, "word": " this", "probability": 0.46240234375}, {"start": 932.47, "end": 935.19, "word": " molecule", "probability": 0.810546875}, {"start": 935.19, "end": 935.63, "word": " will", "probability": 0.9111328125}, {"start": 935.63, "end": 936.43, "word": " bind", "probability": 0.90673828125}, {"start": 936.43, "end": 937.09, "word": " to", "probability": 0.9580078125}, {"start": 937.09, "end": 937.43, "word": " its", "probability": 0.8623046875}, {"start": 937.43, "end": 938.29, "word": " cofactor", "probability": 0.8453776041666666}, {"start": 938.29, "end": 939.05, "word": " في", "probability": 0.84326171875}, {"start": 939.05, "end": 939.21, "word": " ال", "probability": 0.95654296875}, {"start": 939.21, "end": 939.87, "word": " cofactor", "probability": 0.82373046875}, {"start": 939.87, "end": 940.47, "word": " تبعه", "probability": 0.9686279296875}, {"start": 940.47, "end": 940.97, "word": " مين", "probability": 0.858154296875}, {"start": 940.97, "end": 941.11, "word": " ال", "probability": 0.97021484375}, {"start": 941.11, "end": 941.59, "word": " cofactor", "probability": 0.9283854166666666}, {"start": 941.59, "end": 942.29, "word": " تبعه", "probability": 0.9815673828125}, {"start": 942.29, "end": 943.71, "word": " البروتين", "probability": 0.78155517578125}, {"start": 943.71, "end": 944.29, "word": " S", "probability": 0.9189453125}, {"start": 944.29, "end": 945.31, "word": " مين", "probability": 0.9482421875}, {"start": 945.31, "end": 945.65, "word": " ال", "probability": 0.97216796875}, {"start": 945.65, "end": 946.69, "word": " cofactor", "probability": 0.92578125}, {"start": 946.69, "end": 947.83, "word": " البروتين", "probability": 0.96630859375}, {"start": 947.83, "end": 948.29, "word": " S", "probability": 0.96923828125}, {"start": 948.29, "end": 949.15, "word": " ماشي", "probability": 0.861328125}], "temperature": 1.0}, {"id": 38, "seek": 98006, "start": 950.9, "end": 980.06, "text": "اكتفت البروتين C في وجود البروتين S يعني it will bind to protein S in the presence of كالسيوم في وجود الكالسيوم شو بيعمل؟ يعني هو قاعد بيكون quaternary complex تاني ماشي برتبط بالactivated form of factor خمسة وتمانية وبيحولهم بيعملهم degradation بيحولهم إلى inactivated form عيد تاني", "tokens": [995, 4117, 2655, 5172, 2655, 2423, 26890, 35473, 9957, 383, 8978, 49610, 23328, 2423, 26890, 35473, 9957, 318, 37495, 22653, 309, 486, 14786, 281, 7944, 318, 294, 264, 6814, 295, 9122, 6027, 3794, 1829, 20498, 8978, 49610, 23328, 33251, 6027, 3794, 1829, 20498, 13412, 2407, 4724, 1829, 25957, 1211, 22807, 37495, 22653, 31439, 12174, 995, 22488, 4724, 1829, 30544, 421, 771, 77, 822, 3997, 6055, 7649, 1829, 3714, 33599, 1829, 4724, 43500, 3555, 9566, 20666, 23397, 770, 1254, 295, 5952, 16490, 2304, 3794, 3660, 34683, 2304, 7649, 10632, 4032, 21292, 5016, 12610, 16095, 4724, 1829, 25957, 1211, 16095, 40519, 4724, 1829, 5016, 12610, 16095, 30731, 294, 23397, 770, 1254, 6225, 25708, 6055, 7649, 1829], "avg_logprob": -0.2801630382952483, "compression_ratio": 1.6991869918699187, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 950.9, "end": 951.7, "word": "اكتفت", "probability": 0.604736328125}, {"start": 951.7, "end": 952.22, "word": " البروتين", "probability": 0.74786376953125}, {"start": 952.22, "end": 952.52, "word": " C", "probability": 0.35302734375}, {"start": 952.52, "end": 954.22, "word": " في", "probability": 0.52490234375}, {"start": 954.22, "end": 954.78, "word": " وجود", "probability": 0.883056640625}, {"start": 954.78, "end": 955.56, "word": " البروتين", "probability": 0.9478759765625}, {"start": 955.56, "end": 955.82, "word": " S", "probability": 0.78125}, {"start": 955.82, "end": 956.1, "word": " يعني", "probability": 0.5665283203125}, {"start": 956.1, "end": 956.24, "word": " it", "probability": 0.1123046875}, {"start": 956.24, "end": 956.44, "word": " will", "probability": 0.7958984375}, {"start": 956.44, "end": 956.72, "word": " bind", "probability": 0.921875}, {"start": 956.72, "end": 956.9, "word": " to", "probability": 0.8759765625}, {"start": 956.9, "end": 957.24, "word": " protein", "probability": 0.67822265625}, {"start": 957.24, "end": 957.6, "word": " S", "probability": 0.88525390625}, {"start": 957.6, "end": 958.66, "word": " in", "probability": 0.345947265625}, {"start": 958.66, "end": 958.88, "word": " the", "probability": 0.9013671875}, {"start": 958.88, "end": 959.24, "word": " presence", "probability": 0.95849609375}, {"start": 959.24, "end": 959.52, "word": " of", "probability": 0.95556640625}, {"start": 959.52, "end": 960.04, "word": " كالسيوم", "probability": 0.816015625}, {"start": 960.04, "end": 960.24, "word": " في", "probability": 0.75537109375}, {"start": 960.24, "end": 960.58, "word": " وجود", "probability": 0.968505859375}, {"start": 960.58, "end": 961.32, "word": " الكالسيوم", "probability": 0.879541015625}, {"start": 961.32, "end": 961.78, "word": " شو", "probability": 0.6578369140625}, {"start": 961.78, "end": 962.76, "word": " بيعمل؟", "probability": 0.89775390625}, {"start": 962.76, "end": 963.24, "word": " يعني", "probability": 0.8125}, {"start": 963.24, "end": 963.32, "word": " هو", "probability": 0.87841796875}, {"start": 963.32, "end": 963.54, "word": " قاعد", "probability": 0.7445475260416666}, {"start": 963.54, "end": 963.96, "word": " بيكون", "probability": 0.7744140625}, {"start": 963.96, "end": 964.78, "word": " quaternary", "probability": 0.7330322265625}, {"start": 964.78, "end": 965.26, "word": " complex", "probability": 0.93115234375}, {"start": 965.26, "end": 965.76, "word": " تاني", "probability": 0.9705403645833334}, {"start": 965.76, "end": 966.88, "word": " ماشي", "probability": 0.767578125}, {"start": 966.88, "end": 967.96, "word": " برتبط", "probability": 0.6990966796875}, {"start": 967.96, "end": 969.38, "word": " بالactivated", "probability": 0.7958984375}, {"start": 969.38, "end": 969.94, "word": " form", "probability": 0.92919921875}, {"start": 969.94, "end": 970.4, "word": " of", "probability": 0.95263671875}, {"start": 970.4, "end": 970.84, "word": " factor", "probability": 0.84375}, {"start": 970.84, "end": 971.56, "word": " خمسة", "probability": 0.9376220703125}, {"start": 971.56, "end": 972.44, "word": " وتمانية", "probability": 0.867919921875}, {"start": 972.44, "end": 973.92, "word": " وبيحولهم", "probability": 0.864453125}, {"start": 973.92, "end": 974.4, "word": " بيعملهم", "probability": 0.855078125}, {"start": 974.4, "end": 974.94, "word": " degradation", "probability": 0.888671875}, {"start": 974.94, "end": 976.34, "word": " بيحولهم", "probability": 0.94697265625}, {"start": 976.34, "end": 976.68, "word": " إلى", "probability": 0.703125}, {"start": 976.68, "end": 977.74, "word": " inactivated", "probability": 0.95947265625}, {"start": 977.74, "end": 978.06, "word": " form", "probability": 0.9091796875}, {"start": 978.06, "end": 979.76, "word": " عيد", "probability": 0.4444580078125}, {"start": 979.76, "end": 980.06, "word": " تاني", "probability": 0.943359375}], "temperature": 1.0}, {"id": 39, "seek": 100972, "start": 980.66, "end": 1009.72, "text": "هذه التانية اه at high level of thrombin ايوه ماشي وزي ما انتوا شايفين ال thrombin من وين جاي ماشي at high level of thrombin it will bind to thrombomodulin لبريزاجشونكاليوشيوم و ال bind و ال protein C و بيحاولوا ال activated form of protein C هذا باطمط في protein S اللي هو cofactor له ماشي", "tokens": [3224, 24192, 16712, 7649, 10632, 1975, 3224, 412, 1090, 1496, 295, 739, 3548, 259, 1975, 1829, 2407, 3224, 3714, 33599, 1829, 4032, 11622, 1829, 19446, 16472, 2655, 14407, 13412, 995, 33911, 9957, 2423, 739, 3548, 259, 9154, 4032, 9957, 10874, 47302, 3714, 33599, 1829, 412, 1090, 1496, 295, 739, 3548, 259, 309, 486, 14786, 281, 739, 3548, 298, 378, 17701, 5296, 3555, 16572, 11622, 26108, 8592, 11536, 4117, 6027, 1829, 2407, 8592, 1829, 20498, 4032, 2423, 14786, 4032, 2423, 7944, 383, 4032, 4724, 1829, 5016, 995, 12610, 14407, 2423, 18157, 1254, 295, 7944, 383, 23758, 4724, 41193, 2304, 9566, 8978, 7944, 318, 13672, 1829, 31439, 598, 69, 15104, 46740, 3714, 33599, 1829], "avg_logprob": -0.3797013189940326, "compression_ratio": 1.7155172413793103, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 980.66, "end": 981.02, "word": "هذه", "probability": 0.4466552734375}, {"start": 981.02, "end": 981.44, "word": " التانية", "probability": 0.8385416666666666}, {"start": 981.44, "end": 981.76, "word": " اه", "probability": 0.17218017578125}, {"start": 981.76, "end": 983.4, "word": " at", "probability": 0.45166015625}, {"start": 983.4, "end": 983.68, "word": " high", "probability": 0.85302734375}, {"start": 983.68, "end": 983.98, "word": " level", "probability": 0.9560546875}, {"start": 983.98, "end": 984.3, "word": " of", "probability": 0.97998046875}, {"start": 984.3, "end": 985.36, "word": " thrombin", "probability": 0.8444010416666666}, {"start": 985.36, "end": 987.16, "word": " ايوه", "probability": 0.6201171875}, {"start": 987.16, "end": 987.64, "word": " ماشي", "probability": 0.7706705729166666}, {"start": 987.64, "end": 988.84, "word": " وزي", "probability": 0.7683919270833334}, {"start": 988.84, "end": 988.94, "word": " ما", "probability": 0.9384765625}, {"start": 988.94, "end": 989.1, "word": " انتوا", "probability": 0.8310546875}, {"start": 989.1, "end": 989.4, "word": " شايفين", "probability": 0.9710693359375}, {"start": 989.4, "end": 989.54, "word": " ال", "probability": 0.63720703125}, {"start": 989.54, "end": 989.96, "word": " thrombin", "probability": 0.8855794270833334}, {"start": 989.96, "end": 990.12, "word": " من", "probability": 0.96826171875}, {"start": 990.12, "end": 990.32, "word": " وين", "probability": 0.799072265625}, {"start": 990.32, "end": 990.68, "word": " جاي", "probability": 0.80322265625}, {"start": 990.68, "end": 992.66, "word": " ماشي", "probability": 0.8723958333333334}, {"start": 992.66, "end": 993.04, "word": " at", "probability": 0.93408203125}, {"start": 993.04, "end": 993.28, "word": " high", "probability": 0.9287109375}, {"start": 993.28, "end": 993.56, "word": " level", "probability": 0.970703125}, {"start": 993.56, "end": 993.76, "word": " of", "probability": 0.9853515625}, {"start": 993.76, "end": 994.4, "word": " thrombin", "probability": 0.96923828125}, {"start": 994.4, "end": 995.54, "word": " it", "probability": 0.89501953125}, {"start": 995.54, "end": 995.74, "word": " will", "probability": 0.89306640625}, {"start": 995.74, "end": 996.04, "word": " bind", "probability": 0.908203125}, {"start": 996.04, "end": 996.22, "word": " to", "probability": 0.931640625}, {"start": 996.22, "end": 997.12, "word": " thrombomodulin", "probability": 0.925390625}, {"start": 997.12, "end": 998.58, "word": " لبريزاجشونكاليوشيوم", "probability": 0.5389229910714286}, {"start": 998.58, "end": 999.56, "word": " و", "probability": 0.41748046875}, {"start": 999.56, "end": 999.7, "word": " ال", "probability": 0.06317138671875}, {"start": 999.7, "end": 999.98, "word": " bind", "probability": 0.3154296875}, {"start": 999.98, "end": 1000.2, "word": " و", "probability": 0.39599609375}, {"start": 1000.2, "end": 1000.3, "word": " ال", "probability": 0.9228515625}, {"start": 1000.3, "end": 1000.72, "word": " protein", "probability": 0.87548828125}, {"start": 1000.72, "end": 1001.0, "word": " C", "probability": 0.76611328125}, {"start": 1001.0, "end": 1001.18, "word": " و", "probability": 0.84130859375}, {"start": 1001.18, "end": 1001.62, "word": " بيحاولوا", "probability": 0.8526204427083334}, {"start": 1001.62, "end": 1001.74, "word": " ال", "probability": 0.576171875}, {"start": 1001.74, "end": 1002.16, "word": " activated", "probability": 0.446044921875}, {"start": 1002.16, "end": 1002.7, "word": " form", "probability": 0.90185546875}, {"start": 1002.7, "end": 1002.86, "word": " of", "probability": 0.95947265625}, {"start": 1002.86, "end": 1003.22, "word": " protein", "probability": 0.93115234375}, {"start": 1003.22, "end": 1003.58, "word": " C", "probability": 0.9521484375}, {"start": 1003.58, "end": 1004.64, "word": " هذا", "probability": 0.837890625}, {"start": 1004.64, "end": 1005.48, "word": " باطمط", "probability": 0.4891357421875}, {"start": 1005.48, "end": 1005.8, "word": " في", "probability": 0.94970703125}, {"start": 1005.8, "end": 1006.48, "word": " protein", "probability": 0.84521484375}, {"start": 1006.48, "end": 1006.98, "word": " S", "probability": 0.8798828125}, {"start": 1006.98, "end": 1007.8, "word": " اللي", "probability": 0.818603515625}, {"start": 1007.8, "end": 1007.94, "word": " هو", "probability": 0.9482421875}, {"start": 1007.94, "end": 1008.5, "word": " cofactor", "probability": 0.6502278645833334}, {"start": 1008.5, "end": 1008.8, "word": " له", "probability": 0.287841796875}, {"start": 1008.8, "end": 1009.72, "word": " ماشي", "probability": 0.9283854166666666}], "temperature": 1.0}, {"id": 40, "seek": 103873, "start": 1010.39, "end": 1038.73, "text": "في وجود الكالسيو ماشي، شو بيعمل؟ it will inactivate factor 5A و factor 8A، 5A جاية من وين؟ من 10A، صح؟ و 8 من 9A، فاكرين؟ ال coagulation cascade، اه، ماشي، مش بيكونوا complexes، واحد تنهز، واحد بيبقى ترقب الناس، مظبوط، طيب،", "tokens": [41185, 49610, 23328, 33251, 6027, 3794, 1829, 2407, 3714, 33599, 1829, 12399, 13412, 2407, 4724, 1829, 25957, 1211, 22807, 309, 486, 294, 23397, 473, 5952, 1025, 32, 4032, 5952, 1649, 32, 12399, 1025, 32, 10874, 995, 10632, 9154, 4032, 9957, 22807, 9154, 1266, 32, 12399, 20328, 5016, 22807, 4032, 1649, 9154, 1722, 32, 12399, 6156, 995, 37983, 9957, 22807, 2423, 598, 559, 2776, 50080, 12399, 1975, 3224, 12399, 3714, 33599, 1829, 12399, 37893, 4724, 1829, 30544, 14407, 43676, 12399, 36764, 24401, 6055, 1863, 3224, 11622, 12399, 36764, 24401, 4724, 1829, 3555, 4587, 7578, 6055, 2288, 4587, 3555, 2423, 8315, 3794, 12399, 3714, 19913, 3555, 2407, 9566, 12399, 23032, 1829, 3555, 12399], "avg_logprob": -0.29827008502823965, "compression_ratio": 1.4889867841409692, "no_speech_prob": 3.5762786865234375e-07, "words": [{"start": 1010.39, "end": 1010.67, "word": "في", "probability": 0.6796875}, {"start": 1010.67, "end": 1011.13, "word": " وجود", "probability": 0.962158203125}, {"start": 1011.13, "end": 1011.97, "word": " الكالسيو", "probability": 0.81484375}, {"start": 1011.97, "end": 1013.49, "word": " ماشي،", "probability": 0.547332763671875}, {"start": 1013.49, "end": 1013.63, "word": " شو", "probability": 0.73828125}, {"start": 1013.63, "end": 1014.19, "word": " بيعمل؟", "probability": 0.91435546875}, {"start": 1014.19, "end": 1014.79, "word": " it", "probability": 0.7255859375}, {"start": 1014.79, "end": 1015.09, "word": " will", "probability": 0.90673828125}, {"start": 1015.09, "end": 1016.19, "word": " inactivate", "probability": 0.92529296875}, {"start": 1016.19, "end": 1016.61, "word": " factor", "probability": 0.78955078125}, {"start": 1016.61, "end": 1017.49, "word": " 5A", "probability": 0.725341796875}, {"start": 1017.49, "end": 1018.41, "word": " و", "probability": 0.96630859375}, {"start": 1018.41, "end": 1018.71, "word": " factor", "probability": 0.67822265625}, {"start": 1018.71, "end": 1019.39, "word": " 8A،", "probability": 0.7534993489583334}, {"start": 1019.39, "end": 1019.91, "word": " 5A", "probability": 0.848876953125}, {"start": 1019.91, "end": 1020.19, "word": " جاية", "probability": 0.90380859375}, {"start": 1020.19, "end": 1020.31, "word": " من", "probability": 0.990234375}, {"start": 1020.31, "end": 1021.41, "word": " وين؟", "probability": 0.9324544270833334}, {"start": 1021.41, "end": 1021.57, "word": " من", "probability": 0.947265625}, {"start": 1021.57, "end": 1022.57, "word": " 10A،", "probability": 0.78369140625}, {"start": 1022.57, "end": 1023.83, "word": " صح؟", "probability": 0.9611002604166666}, {"start": 1023.83, "end": 1024.17, "word": " و", "probability": 0.71630859375}, {"start": 1024.17, "end": 1024.51, "word": " 8", "probability": 0.7607421875}, {"start": 1024.51, "end": 1025.49, "word": " من", "probability": 0.9365234375}, {"start": 1025.49, "end": 1026.87, "word": " 9A،", "probability": 0.8419596354166666}, {"start": 1026.87, "end": 1027.95, "word": " فاكرين؟", "probability": 0.90068359375}, {"start": 1027.95, "end": 1028.07, "word": " ال", "probability": 0.7900390625}, {"start": 1028.07, "end": 1028.59, "word": " coagulation", "probability": 0.7119547526041666}, {"start": 1028.59, "end": 1029.63, "word": " cascade،", "probability": 0.6363525390625}, {"start": 1029.63, "end": 1030.75, "word": " اه،", "probability": 0.6359049479166666}, {"start": 1030.75, "end": 1031.65, "word": " ماشي،", "probability": 0.84222412109375}, {"start": 1031.65, "end": 1032.53, "word": " مش", "probability": 0.8837890625}, {"start": 1032.53, "end": 1032.83, "word": " بيكونوا", "probability": 0.6728515625}, {"start": 1032.83, "end": 1033.65, "word": " complexes،", "probability": 0.4813232421875}, {"start": 1033.65, "end": 1033.91, "word": " واحد", "probability": 0.9794921875}, {"start": 1033.91, "end": 1034.39, "word": " تنهز،", "probability": 0.649951171875}, {"start": 1034.39, "end": 1034.57, "word": " واحد", "probability": 0.908203125}, {"start": 1034.57, "end": 1034.79, "word": " بيبقى", "probability": 0.714697265625}, {"start": 1034.79, "end": 1035.03, "word": " ترقب", "probability": 0.6318359375}, {"start": 1035.03, "end": 1035.47, "word": " الناس،", "probability": 0.8712158203125}, {"start": 1035.47, "end": 1037.29, "word": " مظبوط،", "probability": 0.81298828125}, {"start": 1037.29, "end": 1038.73, "word": " طيب،", "probability": 0.910400390625}], "temperature": 1.0}, {"id": 41, "seek": 106295, "start": 1039.79, "end": 1062.95, "text": "أرتبط في خمسة أيه؟ تمانية أو عملهم degradation عملهم ايه؟ degradation مين اللي عمل degradation؟ protein C النشط فوق ال .. ال .. ال .. ال .. ال .. الوضع النشط منهم، ماشي؟ ال activated form منهم، ال activated form منهم، مافوش بقى، ماشي؟", "tokens": [10721, 43500, 3555, 9566, 8978, 16490, 2304, 3794, 3660, 36632, 3224, 22807, 46811, 7649, 10632, 34051, 6225, 42213, 16095, 40519, 6225, 42213, 16095, 1975, 1829, 3224, 22807, 40519, 3714, 9957, 13672, 1829, 6225, 42213, 40519, 22807, 7944, 383, 28239, 8592, 9566, 6156, 30543, 2423, 4386, 2423, 4386, 2423, 4386, 2423, 4386, 2423, 4386, 2423, 2407, 11242, 3615, 28239, 8592, 9566, 9154, 16095, 12399, 3714, 33599, 1829, 22807, 2423, 18157, 1254, 9154, 16095, 12399, 2423, 18157, 1254, 9154, 16095, 12399, 19446, 5172, 2407, 8592, 4724, 4587, 7578, 12399, 3714, 33599, 1829, 22807], "avg_logprob": -0.2814198450549789, "compression_ratio": 1.9550561797752808, "no_speech_prob": 3.5762786865234375e-07, "words": [{"start": 1039.79, "end": 1040.37, "word": "أرتبط", "probability": 0.644744873046875}, {"start": 1040.37, "end": 1040.63, "word": " في", "probability": 0.9091796875}, {"start": 1040.63, "end": 1042.95, "word": " خمسة", "probability": 0.8004150390625}, {"start": 1042.95, "end": 1043.15, "word": " أيه؟", "probability": 0.5786946614583334}, {"start": 1043.15, "end": 1043.47, "word": " تمانية", "probability": 0.6271158854166666}, {"start": 1043.47, "end": 1043.59, "word": " أو", "probability": 0.350341796875}, {"start": 1043.59, "end": 1043.87, "word": " عملهم", "probability": 0.84228515625}, {"start": 1043.87, "end": 1044.33, "word": " degradation", "probability": 0.6884765625}, {"start": 1044.33, "end": 1045.51, "word": " عملهم", "probability": 0.762939453125}, {"start": 1045.51, "end": 1045.85, "word": " ايه؟", "probability": 0.80792236328125}, {"start": 1045.85, "end": 1046.41, "word": " degradation", "probability": 0.58154296875}, {"start": 1046.41, "end": 1046.95, "word": " مين", "probability": 0.749755859375}, {"start": 1046.95, "end": 1047.09, "word": " اللي", "probability": 0.949462890625}, {"start": 1047.09, "end": 1047.29, "word": " عمل", "probability": 0.986328125}, {"start": 1047.29, "end": 1048.45, "word": " degradation؟", "probability": 0.81787109375}, {"start": 1048.45, "end": 1049.01, "word": " protein", "probability": 0.48046875}, {"start": 1049.01, "end": 1049.89, "word": " C", "probability": 0.71533203125}, {"start": 1049.89, "end": 1050.79, "word": " النشط", "probability": 0.7813313802083334}, {"start": 1050.79, "end": 1051.39, "word": " فوق", "probability": 0.733642578125}, {"start": 1051.39, "end": 1051.73, "word": " ال", "probability": 0.76416015625}, {"start": 1051.73, "end": 1051.73, "word": " ..", "probability": 0.427490234375}, {"start": 1051.73, "end": 1052.07, "word": " ال", "probability": 0.94775390625}, {"start": 1052.07, "end": 1052.07, "word": " ..", "probability": 0.57421875}, {"start": 1052.07, "end": 1052.37, "word": " ال", "probability": 0.97705078125}, {"start": 1052.37, "end": 1052.37, "word": " ..", "probability": 0.8291015625}, {"start": 1052.37, "end": 1052.65, "word": " ال", "probability": 0.96826171875}, {"start": 1052.65, "end": 1052.65, "word": " ..", "probability": 0.69873046875}, {"start": 1052.65, "end": 1052.79, "word": " ال", "probability": 0.95654296875}, {"start": 1052.79, "end": 1052.79, "word": " ..", "probability": 0.48095703125}, {"start": 1052.79, "end": 1053.59, "word": " الوضع", "probability": 0.8668212890625}, {"start": 1053.59, "end": 1053.99, "word": " النشط", "probability": 0.9676106770833334}, {"start": 1053.99, "end": 1054.41, "word": " منهم،", "probability": 0.7643229166666666}, {"start": 1054.41, "end": 1055.33, "word": " ماشي؟", "probability": 0.9298095703125}, {"start": 1055.33, "end": 1055.47, "word": " ال", "probability": 0.962890625}, {"start": 1055.47, "end": 1055.93, "word": " activated", "probability": 0.77734375}, {"start": 1055.93, "end": 1056.37, "word": " form", "probability": 0.8935546875}, {"start": 1056.37, "end": 1057.63, "word": " منهم،", "probability": 0.790283203125}, {"start": 1057.63, "end": 1057.75, "word": " ال", "probability": 0.9755859375}, {"start": 1057.75, "end": 1058.29, "word": " activated", "probability": 0.9228515625}, {"start": 1058.29, "end": 1058.85, "word": " form", "probability": 0.94775390625}, {"start": 1058.85, "end": 1059.73, "word": " منهم،", "probability": 0.9615885416666666}, {"start": 1059.73, "end": 1060.11, "word": " مافوش", "probability": 0.62139892578125}, {"start": 1060.11, "end": 1061.71, "word": " بقى،", "probability": 0.851806640625}, {"start": 1061.71, "end": 1062.95, "word": " ماشي؟", "probability": 0.9561767578125}], "temperature": 1.0}, {"id": 42, "seek": 109039, "start": 1063.43, "end": 1090.39, "text": "عرفنا الميكانيزم التاني بس في نقطة هنا بدنا نحكي فيها و هي ال protein S ال protein S شباب وجدوا انه بيكون في الجسم على شاكلتين على شاكلتين ال protein S بيكون free و bound", "tokens": [3615, 28480, 8315, 9673, 1829, 41361, 1829, 11622, 2304, 16712, 7649, 1829, 4724, 3794, 8978, 8717, 47432, 3660, 34105, 47525, 8315, 8717, 5016, 4117, 1829, 8978, 11296, 4032, 39896, 2423, 7944, 318, 2423, 7944, 318, 13412, 3555, 16758, 49610, 3215, 14407, 16472, 3224, 4724, 1829, 30544, 8978, 25724, 38251, 15844, 13412, 995, 28820, 2655, 9957, 15844, 13412, 995, 28820, 2655, 9957, 2423, 7944, 318, 4724, 1829, 30544, 1737, 4032, 5472], "avg_logprob": -0.1553697191493612, "compression_ratio": 1.7169811320754718, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 1063.43, "end": 1063.85, "word": "عرفنا", "probability": 0.8258463541666666}, {"start": 1063.85, "end": 1064.31, "word": " الميكانيزم", "probability": 0.841552734375}, {"start": 1064.31, "end": 1064.81, "word": " التاني", "probability": 0.8932291666666666}, {"start": 1064.81, "end": 1065.61, "word": " بس", "probability": 0.66455078125}, {"start": 1065.61, "end": 1065.73, "word": " في", "probability": 0.72021484375}, {"start": 1065.73, "end": 1066.05, "word": " نقطة", "probability": 0.9680989583333334}, {"start": 1066.05, "end": 1066.35, "word": " هنا", "probability": 0.95849609375}, {"start": 1066.35, "end": 1067.29, "word": " بدنا", "probability": 0.91064453125}, {"start": 1067.29, "end": 1067.75, "word": " نحكي", "probability": 0.98486328125}, {"start": 1067.75, "end": 1068.09, "word": " فيها", "probability": 0.9921875}, {"start": 1068.09, "end": 1068.25, "word": " و", "probability": 0.398681640625}, {"start": 1068.25, "end": 1068.43, "word": " هي", "probability": 0.93408203125}, {"start": 1068.43, "end": 1068.71, "word": " ال", "probability": 0.97607421875}, {"start": 1068.71, "end": 1069.07, "word": " protein", "probability": 0.66455078125}, {"start": 1069.07, "end": 1069.51, "word": " S", "probability": 0.7255859375}, {"start": 1069.51, "end": 1070.13, "word": " ال", "probability": 0.63818359375}, {"start": 1070.13, "end": 1070.41, "word": " protein", "probability": 0.931640625}, {"start": 1070.41, "end": 1070.65, "word": " S", "probability": 0.9013671875}, {"start": 1070.65, "end": 1071.21, "word": " شباب", "probability": 0.8388671875}, {"start": 1071.21, "end": 1072.17, "word": " وجدوا", "probability": 0.9270833333333334}, {"start": 1072.17, "end": 1073.13, "word": " انه", "probability": 0.805908203125}, {"start": 1073.13, "end": 1074.19, "word": " بيكون", "probability": 0.9532877604166666}, {"start": 1074.19, "end": 1074.35, "word": " في", "probability": 0.89453125}, {"start": 1074.35, "end": 1074.87, "word": " الجسم", "probability": 0.98779296875}, {"start": 1074.87, "end": 1076.09, "word": " على", "probability": 0.86865234375}, {"start": 1076.09, "end": 1077.25, "word": " شاكلتين", "probability": 0.89384765625}, {"start": 1077.25, "end": 1078.61, "word": " على", "probability": 0.462158203125}, {"start": 1078.61, "end": 1080.37, "word": " شاكلتين", "probability": 0.9900390625}, {"start": 1080.37, "end": 1081.11, "word": " ال", "probability": 0.63427734375}, {"start": 1081.11, "end": 1081.51, "word": " protein", "probability": 0.962890625}, {"start": 1081.51, "end": 1082.13, "word": " S", "probability": 0.9150390625}, {"start": 1082.13, "end": 1086.15, "word": " بيكون", "probability": 0.9132486979166666}, {"start": 1086.15, "end": 1086.69, "word": " free", "probability": 0.68505859375}, {"start": 1086.69, "end": 1089.73, "word": " و", "probability": 0.82958984375}, {"start": 1089.73, "end": 1090.39, "word": " bound", "probability": 0.91748046875}], "temperature": 1.0}, {"id": 43, "seek": 112155, "start": 1094.07, "end": 1121.55, "text": "الـ free form بيشكل حوالي 40% بينما ال bound form بيشكل 60% ماشي؟ والنسبة هذه مهمة جدا ليش مهمة جدا؟ لأن أي لعب في هذه النسبة هيؤثر على ال free form of protein S", "tokens": [6027, 39184, 1737, 1254, 4724, 1829, 8592, 28820, 11331, 2407, 6027, 1829, 3356, 4, 49374, 15042, 2423, 5472, 1254, 4724, 1829, 8592, 28820, 4060, 4, 3714, 33599, 1829, 22807, 16070, 1863, 35457, 3660, 29538, 3714, 16095, 3660, 10874, 28259, 32239, 8592, 3714, 16095, 3660, 10874, 28259, 22807, 5296, 33456, 36632, 5296, 3615, 3555, 8978, 29538, 28239, 35457, 3660, 39896, 33604, 49115, 15844, 2423, 1737, 1254, 295, 7944, 318], "avg_logprob": -0.1559103260869565, "compression_ratio": 1.521472392638037, "no_speech_prob": 0.0, "words": [{"start": 1094.07, "end": 1094.39, "word": "الـ", "probability": 0.806884765625}, {"start": 1094.39, "end": 1094.57, "word": " free", "probability": 0.265380859375}, {"start": 1094.57, "end": 1094.99, "word": " form", "probability": 0.78076171875}, {"start": 1094.99, "end": 1095.51, "word": " بيشكل", "probability": 0.875}, {"start": 1095.51, "end": 1095.87, "word": " حوالي", "probability": 0.8360595703125}, {"start": 1095.87, "end": 1096.27, "word": " 40", "probability": 0.8173828125}, {"start": 1096.27, "end": 1096.81, "word": "%", "probability": 0.93505859375}, {"start": 1096.81, "end": 1098.77, "word": " بينما", "probability": 0.959716796875}, {"start": 1098.77, "end": 1098.95, "word": " ال", "probability": 0.91015625}, {"start": 1098.95, "end": 1099.33, "word": " bound", "probability": 0.6474609375}, {"start": 1099.33, "end": 1100.03, "word": " form", "probability": 0.9521484375}, {"start": 1100.03, "end": 1101.53, "word": " بيشكل", "probability": 0.9852294921875}, {"start": 1101.53, "end": 1102.17, "word": " 60", "probability": 0.94189453125}, {"start": 1102.17, "end": 1103.09, "word": "%", "probability": 0.97900390625}, {"start": 1103.09, "end": 1107.33, "word": " ماشي؟", "probability": 0.7183837890625}, {"start": 1107.33, "end": 1110.81, "word": " والنسبة", "probability": 0.8753662109375}, {"start": 1110.81, "end": 1111.05, "word": " هذه", "probability": 0.7431640625}, {"start": 1111.05, "end": 1111.51, "word": " مهمة", "probability": 0.9825846354166666}, {"start": 1111.51, "end": 1111.83, "word": " جدا", "probability": 0.99267578125}, {"start": 1111.83, "end": 1113.87, "word": " ليش", "probability": 0.799072265625}, {"start": 1113.87, "end": 1114.29, "word": " مهمة", "probability": 0.9860026041666666}, {"start": 1114.29, "end": 1114.67, "word": " جدا؟", "probability": 0.9287109375}, {"start": 1114.67, "end": 1114.85, "word": " لأن", "probability": 0.849365234375}, {"start": 1114.85, "end": 1115.11, "word": " أي", "probability": 0.6416015625}, {"start": 1115.11, "end": 1115.47, "word": " لعب", "probability": 0.8474934895833334}, {"start": 1115.47, "end": 1115.63, "word": " في", "probability": 0.9169921875}, {"start": 1115.63, "end": 1116.01, "word": " هذه", "probability": 0.98779296875}, {"start": 1116.01, "end": 1116.71, "word": " النسبة", "probability": 0.9847005208333334}, {"start": 1116.71, "end": 1117.73, "word": " هيؤثر", "probability": 0.94091796875}, {"start": 1117.73, "end": 1118.01, "word": " على", "probability": 0.9013671875}, {"start": 1118.01, "end": 1118.59, "word": " ال", "probability": 0.90673828125}, {"start": 1118.59, "end": 1118.91, "word": " free", "probability": 0.5869140625}, {"start": 1118.91, "end": 1119.73, "word": " form", "probability": 0.92138671875}, {"start": 1119.73, "end": 1120.61, "word": " of", "probability": 0.96875}, {"start": 1120.61, "end": 1121.19, "word": " protein", "probability": 0.81640625}, {"start": 1121.19, "end": 1121.55, "word": " S", "probability": 0.8251953125}], "temperature": 1.0}, {"id": 44, "seek": 115090, "start": 1121.88, "end": 1150.9, "text": "وهو الـ active form هو اللي بيشتغل هو اللي بيتبط ال protein C لإن اللي ممسوك مش راح يتبط بال activated protein C ماشي؟ يعني لما ييجي يشتغل ال protein C ال activated form و ال protein C إذا ما لجاش ال protein أسس بيشتغل؟ لأ، في لأ، فهمين عليا بيشتغل لكن بغضب مفهوم شباب النسبة هذه مهمة جدا اللي قاعد فيها بيؤثر indirect", "tokens": [2407, 3224, 2407, 2423, 39184, 4967, 1254, 31439, 13672, 1829, 4724, 1829, 8592, 2655, 17082, 1211, 31439, 13672, 1829, 4724, 36081, 3555, 9566, 2423, 7944, 383, 5296, 28814, 1863, 13672, 1829, 3714, 2304, 3794, 2407, 4117, 37893, 12602, 39319, 7251, 2655, 3555, 9566, 20666, 18157, 7944, 383, 3714, 33599, 1829, 22807, 37495, 22653, 5296, 15042, 7251, 1829, 7435, 1829, 7251, 8592, 2655, 17082, 1211, 2423, 7944, 383, 2423, 18157, 1254, 4032, 2423, 7944, 383, 11933, 15730, 19446, 5296, 7435, 33599, 2423, 7944, 5551, 3794, 3794, 4724, 1829, 8592, 2655, 17082, 1211, 22807, 5296, 10721, 12399, 8978, 5296, 10721, 12399, 6156, 16095, 9957, 11203, 25528, 4724, 1829, 8592, 2655, 17082, 1211, 44381, 4724, 17082, 11242, 3555, 3714, 5172, 3224, 20498, 13412, 3555, 16758, 28239, 35457, 3660, 29538, 3714, 16095, 3660, 10874, 28259, 13672, 1829, 12174, 995, 22488, 8978, 11296, 4724, 1829, 33604, 49115, 19523], "avg_logprob": -0.3576388756434123, "compression_ratio": 1.9603174603174602, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1121.88, "end": 1122.26, "word": "وهو", "probability": 0.5635579427083334}, {"start": 1122.26, "end": 1122.5, "word": " الـ", "probability": 0.3253173828125}, {"start": 1122.5, "end": 1122.86, "word": " active", "probability": 0.2386474609375}, {"start": 1122.86, "end": 1123.28, "word": " form", "probability": 0.8369140625}, {"start": 1123.28, "end": 1123.82, "word": " هو", "probability": 0.5966796875}, {"start": 1123.82, "end": 1123.96, "word": " اللي", "probability": 0.868408203125}, {"start": 1123.96, "end": 1124.3, "word": " بيشتغل", "probability": 0.887939453125}, {"start": 1124.3, "end": 1124.54, "word": " هو", "probability": 0.8251953125}, {"start": 1124.54, "end": 1124.62, "word": " اللي", "probability": 0.974365234375}, {"start": 1124.62, "end": 1124.94, "word": " بيتبط", "probability": 0.5035400390625}, {"start": 1124.94, "end": 1125.04, "word": " ال", "probability": 0.64794921875}, {"start": 1125.04, "end": 1125.32, "word": " protein", "probability": 0.67431640625}, {"start": 1125.32, "end": 1125.66, "word": " C", "probability": 0.64306640625}, {"start": 1125.66, "end": 1126.28, "word": " لإن", "probability": 0.7490234375}, {"start": 1126.28, "end": 1126.44, "word": " اللي", "probability": 0.76220703125}, {"start": 1126.44, "end": 1127.9, "word": " ممسوك", "probability": 0.6796875}, {"start": 1127.9, "end": 1128.3, "word": " مش", "probability": 0.89111328125}, {"start": 1128.3, "end": 1128.46, "word": " راح", "probability": 0.942626953125}, {"start": 1128.46, "end": 1128.84, "word": " يتبط", "probability": 0.9644775390625}, {"start": 1128.84, "end": 1129.64, "word": " بال", "probability": 0.9287109375}, {"start": 1129.64, "end": 1130.1, "word": " activated", "probability": 0.28759765625}, {"start": 1130.1, "end": 1130.5, "word": " protein", "probability": 0.75}, {"start": 1130.5, "end": 1130.84, "word": " C", "probability": 0.75927734375}, {"start": 1130.84, "end": 1131.96, "word": " ماشي؟", "probability": 0.74505615234375}, {"start": 1131.96, "end": 1132.52, "word": " يعني", "probability": 0.896240234375}, {"start": 1132.52, "end": 1132.68, "word": " لما", "probability": 0.89404296875}, {"start": 1132.68, "end": 1132.9, "word": " ييجي", "probability": 0.8404541015625}, {"start": 1132.9, "end": 1133.16, "word": " يشتغل", "probability": 0.99697265625}, {"start": 1133.16, "end": 1133.24, "word": " ال", "probability": 0.40185546875}, {"start": 1133.24, "end": 1133.5, "word": " protein", "probability": 0.97509765625}, {"start": 1133.5, "end": 1133.86, "word": " C", "probability": 0.974609375}, {"start": 1133.86, "end": 1134.02, "word": " ال", "probability": 0.3994140625}, {"start": 1134.02, "end": 1134.32, "word": " activated", "probability": 0.79296875}, {"start": 1134.32, "end": 1134.86, "word": " form", "probability": 0.9072265625}, {"start": 1134.86, "end": 1134.98, "word": " و", "probability": 0.53173828125}, {"start": 1134.98, "end": 1135.06, "word": " ال", "probability": 0.732421875}, {"start": 1135.06, "end": 1135.26, "word": " protein", "probability": 0.97900390625}, {"start": 1135.26, "end": 1135.66, "word": " C", "probability": 0.97998046875}, {"start": 1135.66, "end": 1136.22, "word": " إذا", "probability": 0.7470703125}, {"start": 1136.22, "end": 1136.34, "word": " ما", "probability": 0.75439453125}, {"start": 1136.34, "end": 1136.64, "word": " لجاش", "probability": 0.4110514322916667}, {"start": 1136.64, "end": 1136.76, "word": " ال", "probability": 0.9677734375}, {"start": 1136.76, "end": 1137.02, "word": " protein", "probability": 0.97119140625}, {"start": 1137.02, "end": 1137.38, "word": " أسس", "probability": 0.5475260416666666}, {"start": 1137.38, "end": 1138.76, "word": " بيشتغل؟", "probability": 0.8232247488839286}, {"start": 1138.76, "end": 1139.18, "word": " لأ،", "probability": 0.511962890625}, {"start": 1139.18, "end": 1139.34, "word": " في", "probability": 0.29931640625}, {"start": 1139.34, "end": 1140.12, "word": " لأ،", "probability": 0.4254557291666667}, {"start": 1140.12, "end": 1141.0, "word": " فهمين", "probability": 0.56591796875}, {"start": 1141.0, "end": 1141.22, "word": " عليا", "probability": 0.7060546875}, {"start": 1141.22, "end": 1141.58, "word": " بيشتغل", "probability": 0.947509765625}, {"start": 1141.58, "end": 1141.78, "word": " لكن", "probability": 0.65576171875}, {"start": 1141.78, "end": 1142.26, "word": " بغضب", "probability": 0.626708984375}, {"start": 1142.26, "end": 1144.48, "word": " مفهوم", "probability": 0.7265167236328125}, {"start": 1144.48, "end": 1144.96, "word": " شباب", "probability": 0.6923828125}, {"start": 1144.96, "end": 1146.84, "word": " النسبة", "probability": 0.74853515625}, {"start": 1146.84, "end": 1147.2, "word": " هذه", "probability": 0.92919921875}, {"start": 1147.2, "end": 1147.78, "word": " مهمة", "probability": 0.9890950520833334}, {"start": 1147.78, "end": 1148.08, "word": " جدا", "probability": 0.984619140625}, {"start": 1148.08, "end": 1148.32, "word": " اللي", "probability": 0.9736328125}, {"start": 1148.32, "end": 1148.52, "word": " قاعد", "probability": 0.62841796875}, {"start": 1148.52, "end": 1149.02, "word": " فيها", "probability": 0.98046875}, {"start": 1149.02, "end": 1150.16, "word": " بيؤثر", "probability": 0.9765625}, {"start": 1150.16, "end": 1150.9, "word": " indirect", "probability": 0.818359375}], "temperature": 1.0}, {"id": 45, "seek": 118226, "start": 1153.4, "end": 1182.26, "text": "البروتين C النشط طب ال bound 4 مرتبط بحاجة بيسموها C4B binding protein سمعتوا بال C4B C اختصار لكمplement ماشي complement رقم 4", "tokens": [6027, 26890, 35473, 9957, 383, 28239, 8592, 9566, 23032, 3555, 2423, 5472, 1017, 3714, 43500, 3555, 9566, 4724, 5016, 26108, 3660, 4724, 1829, 38251, 2407, 11296, 383, 19, 33, 17359, 7944, 8608, 2304, 34268, 14407, 20666, 383, 19, 33, 383, 1975, 46456, 9381, 9640, 5296, 24793, 43704, 3714, 33599, 1829, 17103, 12602, 4587, 2304, 1017], "avg_logprob": -0.280412957072258, "compression_ratio": 1.2567567567567568, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1153.4, "end": 1154.34, "word": "البروتين", "probability": 0.7943115234375}, {"start": 1154.34, "end": 1154.66, "word": " C", "probability": 0.5810546875}, {"start": 1154.66, "end": 1155.32, "word": " النشط", "probability": 0.8143717447916666}, {"start": 1155.32, "end": 1157.74, "word": " طب", "probability": 0.39202880859375}, {"start": 1157.74, "end": 1158.88, "word": " ال", "probability": 0.8681640625}, {"start": 1158.88, "end": 1159.28, "word": " bound", "probability": 0.39990234375}, {"start": 1159.28, "end": 1159.76, "word": " 4", "probability": 0.3046875}, {"start": 1159.76, "end": 1160.86, "word": " مرتبط", "probability": 0.968017578125}, {"start": 1160.86, "end": 1162.3, "word": " بحاجة", "probability": 0.9464111328125}, {"start": 1162.3, "end": 1163.02, "word": " بيسموها", "probability": 0.93154296875}, {"start": 1163.02, "end": 1164.68, "word": " C4B", "probability": 0.7516276041666666}, {"start": 1164.68, "end": 1166.06, "word": " binding", "probability": 0.611328125}, {"start": 1166.06, "end": 1167.02, "word": " protein", "probability": 0.9013671875}, {"start": 1167.02, "end": 1171.36, "word": " سمعتوا", "probability": 0.894287109375}, {"start": 1171.36, "end": 1173.5, "word": " بال", "probability": 0.71533203125}, {"start": 1173.5, "end": 1175.6, "word": " C4B", "probability": 0.8439127604166666}, {"start": 1175.6, "end": 1176.76, "word": " C", "probability": 0.36865234375}, {"start": 1176.76, "end": 1177.96, "word": " اختصار", "probability": 0.9571533203125}, {"start": 1177.96, "end": 1178.7, "word": " لكمplement", "probability": 0.6400553385416666}, {"start": 1178.7, "end": 1180.06, "word": " ماشي", "probability": 0.7529296875}, {"start": 1180.06, "end": 1181.36, "word": " complement", "probability": 0.281982421875}, {"start": 1181.36, "end": 1181.9, "word": " رقم", "probability": 0.9747721354166666}, {"start": 1181.9, "end": 1182.26, "word": " 4", "probability": 0.79443359375}], "temperature": 1.0}, {"id": 46, "seek": 120715, "start": 1183.63, "end": 1207.15, "text": "النشطة ماشي C4B binding protein complement كلكم بتعرفون و هي عبارة عن سلسلة بروتيني ماشي بتعتبر أداة من أدوات ال immune system وبالتالي تنشط في حالات ال inflammation يوم ما تنشط تكتر فتمسك في بروتين C أكتر مظبوط", "tokens": [6027, 1863, 8592, 9566, 3660, 3714, 33599, 1829, 383, 19, 33, 17359, 7944, 17103, 9122, 23275, 2304, 39894, 3615, 28480, 11536, 4032, 39896, 6225, 3555, 9640, 3660, 18871, 8608, 1211, 3794, 37977, 4724, 32887, 2655, 9957, 1829, 3714, 33599, 1829, 39894, 34268, 26890, 5551, 28259, 3660, 9154, 5551, 3215, 2407, 9307, 2423, 11992, 1185, 46599, 6027, 2655, 6027, 1829, 6055, 1863, 8592, 9566, 8978, 11331, 6027, 9307, 2423, 21613, 7251, 20498, 19446, 6055, 1863, 8592, 9566, 6055, 4117, 2655, 2288, 6156, 39237, 3794, 4117, 8978, 4724, 32887, 2655, 9957, 383, 5551, 4117, 2655, 2288, 3714, 19913, 3555, 2407, 9566], "avg_logprob": -0.22484375193715095, "compression_ratio": 1.5488372093023255, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 1183.63, "end": 1184.47, "word": "النشطة", "probability": 0.729541015625}, {"start": 1184.47, "end": 1185.07, "word": " ماشي", "probability": 0.64349365234375}, {"start": 1185.07, "end": 1186.29, "word": " C4B", "probability": 0.4750162760416667}, {"start": 1186.29, "end": 1187.37, "word": " binding", "probability": 0.2998046875}, {"start": 1187.37, "end": 1188.07, "word": " protein", "probability": 0.86962890625}, {"start": 1188.07, "end": 1188.95, "word": " complement", "probability": 0.1673583984375}, {"start": 1188.95, "end": 1189.37, "word": " كلكم", "probability": 0.8250325520833334}, {"start": 1189.37, "end": 1189.93, "word": " بتعرفون", "probability": 0.9439697265625}, {"start": 1189.93, "end": 1190.03, "word": " و", "probability": 0.40283203125}, {"start": 1190.03, "end": 1190.15, "word": " هي", "probability": 0.77392578125}, {"start": 1190.15, "end": 1190.39, "word": " عبارة", "probability": 0.9696044921875}, {"start": 1190.39, "end": 1190.57, "word": " عن", "probability": 0.9951171875}, {"start": 1190.57, "end": 1190.99, "word": " سلسلة", "probability": 0.9869384765625}, {"start": 1190.99, "end": 1191.71, "word": " بروتيني", "probability": 0.762451171875}, {"start": 1191.71, "end": 1192.39, "word": " ماشي", "probability": 0.8424479166666666}, {"start": 1192.39, "end": 1193.95, "word": " بتعتبر", "probability": 0.8878580729166666}, {"start": 1193.95, "end": 1194.55, "word": " أداة", "probability": 0.70556640625}, {"start": 1194.55, "end": 1194.65, "word": " من", "probability": 0.9931640625}, {"start": 1194.65, "end": 1195.11, "word": " أدوات", "probability": 0.9183349609375}, {"start": 1195.11, "end": 1195.21, "word": " ال", "probability": 0.98046875}, {"start": 1195.21, "end": 1195.57, "word": " immune", "probability": 0.6884765625}, {"start": 1195.57, "end": 1196.61, "word": " system", "probability": 0.9482421875}, {"start": 1196.61, "end": 1197.87, "word": " وبالتالي", "probability": 0.9150390625}, {"start": 1197.87, "end": 1198.55, "word": " تنشط", "probability": 0.9764404296875}, {"start": 1198.55, "end": 1199.07, "word": " في", "probability": 0.970703125}, {"start": 1199.07, "end": 1199.45, "word": " حالات", "probability": 0.94189453125}, {"start": 1199.45, "end": 1199.55, "word": " ال", "probability": 0.98388671875}, {"start": 1199.55, "end": 1200.13, "word": " inflammation", "probability": 0.8505859375}, {"start": 1200.13, "end": 1201.41, "word": " يوم", "probability": 0.8798828125}, {"start": 1201.41, "end": 1201.57, "word": " ما", "probability": 0.55712890625}, {"start": 1201.57, "end": 1202.09, "word": " تنشط", "probability": 0.994140625}, {"start": 1202.09, "end": 1202.97, "word": " تكتر", "probability": 0.8671875}, {"start": 1202.97, "end": 1204.17, "word": " فتمسك", "probability": 0.841064453125}, {"start": 1204.17, "end": 1204.27, "word": " في", "probability": 0.84228515625}, {"start": 1204.27, "end": 1204.67, "word": " بروتين", "probability": 0.793212890625}, {"start": 1204.67, "end": 1204.99, "word": " C", "probability": 0.876953125}, {"start": 1204.99, "end": 1206.53, "word": " أكتر", "probability": 0.9188232421875}, {"start": 1206.53, "end": 1207.15, "word": " مظبوط", "probability": 0.92978515625}], "temperature": 1.0}, {"id": 47, "seek": 123508, "start": 1207.69, "end": 1235.09, "text": "طب إذا مشكلة في protein S أكتر، بتقل ال protein في ال .. في ال E protein فوق؟ بتقل النسبة هذه، و لما تقل النسبة هذه، بتأثر على protein C، طب و لما تأثر على protein C، بتقل نشاطه، شو بيصير؟ بيصير انتقال يعني إيه؟ في .. في Clotting Clotting؟ مش Anti-coagulation يعني إيه؟ عشان هيك في ال inflammatory reaction", "tokens": [9566, 3555, 11933, 15730, 37893, 28820, 3660, 8978, 7944, 318, 5551, 4117, 2655, 2288, 12399, 39894, 4587, 1211, 2423, 7944, 8978, 2423, 4386, 8978, 2423, 462, 7944, 6156, 30543, 22807, 39894, 4587, 1211, 28239, 35457, 3660, 29538, 12399, 4032, 5296, 15042, 6055, 4587, 1211, 28239, 35457, 3660, 29538, 12399, 39894, 10721, 49115, 15844, 7944, 383, 12399, 23032, 3555, 4032, 5296, 15042, 6055, 10721, 49115, 15844, 7944, 383, 12399, 39894, 4587, 1211, 8717, 8592, 41193, 3224, 12399, 13412, 2407, 4724, 1829, 9381, 13546, 22807, 4724, 1829, 9381, 13546, 16472, 2655, 4587, 6027, 37495, 22653, 11933, 1829, 3224, 22807, 8978, 4386, 8978, 2033, 310, 783, 2033, 310, 783, 22807, 37893, 27757, 12, 1291, 559, 2776, 37495, 22653, 11933, 1829, 3224, 22807, 6225, 8592, 7649, 39896, 4117, 8978, 2423, 38199, 5480], "avg_logprob": -0.25339147286821706, "compression_ratio": 1.8847736625514404, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1207.69, "end": 1207.93, "word": "طب", "probability": 0.892578125}, {"start": 1207.93, "end": 1208.05, "word": " إذا", "probability": 0.84423828125}, {"start": 1208.05, "end": 1208.45, "word": " مشكلة", "probability": 0.85498046875}, {"start": 1208.45, "end": 1208.57, "word": " في", "probability": 0.95263671875}, {"start": 1208.57, "end": 1208.97, "word": " protein", "probability": 0.80859375}, {"start": 1208.97, "end": 1209.23, "word": " S", "probability": 0.69482421875}, {"start": 1209.23, "end": 1209.77, "word": " أكتر،", "probability": 0.8388671875}, {"start": 1209.77, "end": 1210.39, "word": " بتقل", "probability": 0.7278645833333334}, {"start": 1210.39, "end": 1210.49, "word": " ال", "probability": 0.55322265625}, {"start": 1210.49, "end": 1210.85, "word": " protein", "probability": 0.84765625}, {"start": 1210.85, "end": 1211.25, "word": " في", "probability": 0.6533203125}, {"start": 1211.25, "end": 1211.39, "word": " ال", "probability": 0.309814453125}, {"start": 1211.39, "end": 1211.53, "word": " ..", "probability": 0.466064453125}, {"start": 1211.53, "end": 1211.89, "word": " في", "probability": 0.83154296875}, {"start": 1211.89, "end": 1211.97, "word": " ال", "probability": 0.8037109375}, {"start": 1211.97, "end": 1212.03, "word": " E", "probability": 0.4521484375}, {"start": 1212.03, "end": 1212.41, "word": " protein", "probability": 0.38525390625}, {"start": 1212.41, "end": 1212.87, "word": " فوق؟", "probability": 0.8642578125}, {"start": 1212.87, "end": 1213.91, "word": " بتقل", "probability": 0.8662109375}, {"start": 1213.91, "end": 1214.37, "word": " النسبة", "probability": 0.9404296875}, {"start": 1214.37, "end": 1215.11, "word": " هذه،", "probability": 0.535888671875}, {"start": 1215.11, "end": 1215.19, "word": " و", "probability": 0.52734375}, {"start": 1215.19, "end": 1215.35, "word": " لما", "probability": 0.84326171875}, {"start": 1215.35, "end": 1215.67, "word": " تقل", "probability": 0.9700520833333334}, {"start": 1215.67, "end": 1216.11, "word": " النسبة", "probability": 0.982421875}, {"start": 1216.11, "end": 1217.17, "word": " هذه،", "probability": 0.760498046875}, {"start": 1217.17, "end": 1217.65, "word": " بتأثر", "probability": 0.9153645833333334}, {"start": 1217.65, "end": 1217.75, "word": " على", "probability": 0.759765625}, {"start": 1217.75, "end": 1218.07, "word": " protein", "probability": 0.87744140625}, {"start": 1218.07, "end": 1218.93, "word": " C،", "probability": 0.718017578125}, {"start": 1218.93, "end": 1219.09, "word": " طب", "probability": 0.922607421875}, {"start": 1219.09, "end": 1219.17, "word": " و", "probability": 0.90576171875}, {"start": 1219.17, "end": 1219.31, "word": " لما", "probability": 0.94921875}, {"start": 1219.31, "end": 1219.65, "word": " تأثر", "probability": 0.9811197916666666}, {"start": 1219.65, "end": 1219.75, "word": " على", "probability": 0.8876953125}, {"start": 1219.75, "end": 1219.99, "word": " protein", "probability": 0.9609375}, {"start": 1219.99, "end": 1220.89, "word": " C،", "probability": 0.9384765625}, {"start": 1220.89, "end": 1221.51, "word": " بتقل", "probability": 0.8326822916666666}, {"start": 1221.51, "end": 1222.69, "word": " نشاطه،", "probability": 0.93818359375}, {"start": 1222.69, "end": 1222.87, "word": " شو", "probability": 0.97119140625}, {"start": 1222.87, "end": 1223.81, "word": " بيصير؟", "probability": 0.9349609375}, {"start": 1223.81, "end": 1224.19, "word": " بيصير", "probability": 0.886962890625}, {"start": 1224.19, "end": 1224.55, "word": " انتقال", "probability": 0.5694580078125}, {"start": 1224.55, "end": 1225.53, "word": " يعني", "probability": 0.57696533203125}, {"start": 1225.53, "end": 1225.89, "word": " إيه؟", "probability": 0.81884765625}, {"start": 1225.89, "end": 1226.37, "word": " في", "probability": 0.89111328125}, {"start": 1226.37, "end": 1226.51, "word": " ..", "probability": 0.348876953125}, {"start": 1226.51, "end": 1227.29, "word": " في", "probability": 0.9609375}, {"start": 1227.29, "end": 1227.81, "word": " Clotting", "probability": 0.4629720052083333}, {"start": 1227.81, "end": 1229.09, "word": " Clotting؟", "probability": 0.694305419921875}, {"start": 1229.09, "end": 1230.81, "word": " مش", "probability": 0.5302734375}, {"start": 1230.81, "end": 1231.15, "word": " Anti", "probability": 0.583984375}, {"start": 1231.15, "end": 1231.83, "word": "-coagulation", "probability": 0.866455078125}, {"start": 1231.83, "end": 1232.35, "word": " يعني", "probability": 0.80126953125}, {"start": 1232.35, "end": 1233.31, "word": " إيه؟", "probability": 0.8560791015625}, {"start": 1233.31, "end": 1233.61, "word": " عشان", "probability": 0.9596354166666666}, {"start": 1233.61, "end": 1233.81, "word": " هيك", "probability": 0.968017578125}, {"start": 1233.81, "end": 1233.91, "word": " في", "probability": 0.96240234375}, {"start": 1233.91, "end": 1233.93, "word": " ال", "probability": 0.90185546875}, {"start": 1233.93, "end": 1234.33, "word": " inflammatory", "probability": 0.7919921875}, {"start": 1234.33, "end": 1235.09, "word": " reaction", "probability": 0.93701171875}], "temperature": 1.0}, {"id": 48, "seek": 126374, "start": 1235.64, "end": 1263.74, "text": "بصير فيه إمكانية لتكون جلطة أكتر من إياش من إنه نزيد، مفهوم عليا؟ ليش؟ لأنه لعبنا في الميزان، إيش هو الميزان؟ هد بيشتغل بالميزان، ال anti-coagulant و ال pro-coagulant، صح؟ مظبوط؟ لعبنا في إياش في كفة ميزان واحدة منهم، قللنا الكفة، ماشي، خففناها يعني، فالكفة التانية هتزيد، مفهوم عليا؟", "tokens": [3555, 9381, 13546, 8978, 3224, 11933, 2304, 41361, 10632, 5296, 2655, 30544, 10874, 1211, 9566, 3660, 5551, 4117, 2655, 2288, 9154, 11933, 1829, 33599, 9154, 36145, 3224, 8717, 11622, 25708, 12399, 3714, 5172, 3224, 20498, 11203, 25528, 22807, 32239, 8592, 22807, 5296, 33456, 3224, 5296, 3615, 3555, 8315, 8978, 9673, 1829, 11622, 7649, 12399, 11933, 1829, 8592, 31439, 9673, 1829, 11622, 7649, 22807, 8032, 3215, 4724, 1829, 8592, 2655, 17082, 1211, 20666, 2304, 1829, 11622, 7649, 12399, 2423, 6061, 12, 1291, 559, 425, 394, 4032, 2423, 447, 12, 1291, 559, 425, 394, 12399, 20328, 5016, 22807, 3714, 19913, 3555, 2407, 9566, 22807, 5296, 3615, 3555, 8315, 8978, 11933, 1829, 33599, 8978, 9122, 5172, 3660, 3714, 1829, 11622, 7649, 36764, 24401, 3660, 9154, 16095, 12399, 12174, 1211, 1211, 8315, 33251, 5172, 3660, 12399, 3714, 33599, 1829, 12399, 16490, 5172, 5172, 8315, 11296, 37495, 22653, 12399, 6156, 6027, 4117, 5172, 3660, 16712, 7649, 10632, 8032, 2655, 11622, 25708, 12399, 3714, 5172, 3224, 20498, 11203, 25528, 22807], "avg_logprob": -0.16448863130627256, "compression_ratio": 1.972, "no_speech_prob": 7.152557373046875e-07, "words": [{"start": 1235.64, "end": 1236.08, "word": "بصير", "probability": 0.647705078125}, {"start": 1236.08, "end": 1236.38, "word": " فيه", "probability": 0.745849609375}, {"start": 1236.38, "end": 1236.86, "word": " إمكانية", "probability": 0.9051513671875}, {"start": 1236.86, "end": 1237.34, "word": " لتكون", "probability": 0.74951171875}, {"start": 1237.34, "end": 1237.74, "word": " جلطة", "probability": 0.799560546875}, {"start": 1237.74, "end": 1238.08, "word": " أكتر", "probability": 0.9344482421875}, {"start": 1238.08, "end": 1238.24, "word": " من", "probability": 0.97705078125}, {"start": 1238.24, "end": 1238.62, "word": " إياش", "probability": 0.64208984375}, {"start": 1238.62, "end": 1239.24, "word": " من", "probability": 0.74072265625}, {"start": 1239.24, "end": 1239.46, "word": " إنه", "probability": 0.72998046875}, {"start": 1239.46, "end": 1240.38, "word": " نزيد،", "probability": 0.7998046875}, {"start": 1240.38, "end": 1240.68, "word": " مفهوم", "probability": 0.7913818359375}, {"start": 1240.68, "end": 1241.18, "word": " عليا؟", "probability": 0.74755859375}, {"start": 1241.18, "end": 1241.96, "word": " ليش؟", "probability": 0.94140625}, {"start": 1241.96, "end": 1242.24, "word": " لأنه", "probability": 0.884765625}, {"start": 1242.24, "end": 1242.58, "word": " لعبنا", "probability": 0.932373046875}, {"start": 1242.58, "end": 1242.7, "word": " في", "probability": 0.978515625}, {"start": 1242.7, "end": 1244.0, "word": " الميزان،", "probability": 0.95068359375}, {"start": 1244.0, "end": 1244.26, "word": " إيش", "probability": 0.8533528645833334}, {"start": 1244.26, "end": 1244.4, "word": " هو", "probability": 0.93212890625}, {"start": 1244.4, "end": 1244.84, "word": " الميزان؟", "probability": 0.95927734375}, {"start": 1244.84, "end": 1244.98, "word": " هد", "probability": 0.4410400390625}, {"start": 1244.98, "end": 1245.64, "word": " بيشتغل", "probability": 0.8568115234375}, {"start": 1245.64, "end": 1246.66, "word": " بالميزان،", "probability": 0.8432210286458334}, {"start": 1246.66, "end": 1246.78, "word": " ال", "probability": 0.96826171875}, {"start": 1246.78, "end": 1247.06, "word": " anti", "probability": 0.1422119140625}, {"start": 1247.06, "end": 1247.76, "word": "-coagulant", "probability": 0.8923828125}, {"start": 1247.76, "end": 1247.9, "word": " و", "probability": 0.55517578125}, {"start": 1247.9, "end": 1247.98, "word": " ال", "probability": 0.90673828125}, {"start": 1247.98, "end": 1248.24, "word": " pro", "probability": 0.84228515625}, {"start": 1248.24, "end": 1248.88, "word": "-coagulant،", "probability": 0.9475911458333334}, {"start": 1248.88, "end": 1250.0, "word": " صح؟", "probability": 0.9322916666666666}, {"start": 1250.0, "end": 1251.12, "word": " مظبوط؟", "probability": 0.9117024739583334}, {"start": 1251.12, "end": 1251.46, "word": " لعبنا", "probability": 0.9783935546875}, {"start": 1251.46, "end": 1251.58, "word": " في", "probability": 0.978515625}, {"start": 1251.58, "end": 1251.86, "word": " إياش", "probability": 0.9544270833333334}, {"start": 1251.86, "end": 1252.04, "word": " في", "probability": 0.499755859375}, {"start": 1252.04, "end": 1252.44, "word": " كفة", "probability": 0.9658203125}, {"start": 1252.44, "end": 1252.9, "word": " ميزان", "probability": 0.9959716796875}, {"start": 1252.9, "end": 1253.26, "word": " واحدة", "probability": 0.9736328125}, {"start": 1253.26, "end": 1254.24, "word": " منهم،", "probability": 0.9480794270833334}, {"start": 1254.24, "end": 1254.96, "word": " قللنا", "probability": 0.988037109375}, {"start": 1254.96, "end": 1257.06, "word": " الكفة،", "probability": 0.95068359375}, {"start": 1257.06, "end": 1257.66, "word": " ماشي،", "probability": 0.8841552734375}, {"start": 1257.66, "end": 1258.58, "word": " خففناها", "probability": 0.98427734375}, {"start": 1258.58, "end": 1259.48, "word": " يعني،", "probability": 0.8544921875}, {"start": 1259.48, "end": 1260.28, "word": " فالكفة", "probability": 0.977734375}, {"start": 1260.28, "end": 1260.58, "word": " التانية", "probability": 0.9895833333333334}, {"start": 1260.58, "end": 1262.86, "word": " هتزيد،", "probability": 0.79541015625}, {"start": 1262.86, "end": 1263.22, "word": " مفهوم", "probability": 0.9732666015625}, {"start": 1263.22, "end": 1263.74, "word": " عليا؟", "probability": 0.8899739583333334}], "temperature": 1.0}, {"id": 49, "seek": 129254, "start": 1263.78, "end": 1292.54, "text": "وبالتالي يصير فيه إيش؟ إمكانية إنه يصير فيه جلطة أكتر من ما إمكانية إنه يصير فيه نزيل أفهم عليها الولاقة المباشرة بين إيش؟ بين البروتين C وبروتين S وولاقتهم بيه اللي هو تكوين الجلطة اللي فوق الشباب، حد عنده سؤال؟ طبعا، زي ما انتم ملاحظين ال ..", "tokens": [37746, 6027, 2655, 6027, 1829, 7251, 9381, 13546, 8978, 3224, 11933, 1829, 8592, 22807, 11933, 2304, 41361, 10632, 36145, 3224, 7251, 9381, 13546, 8978, 3224, 10874, 1211, 9566, 3660, 5551, 4117, 2655, 2288, 9154, 19446, 11933, 2304, 41361, 10632, 36145, 3224, 7251, 9381, 13546, 8978, 3224, 8717, 11622, 26895, 5551, 5172, 16095, 25894, 11296, 2423, 12610, 995, 28671, 9673, 3555, 33599, 25720, 49374, 11933, 1829, 8592, 22807, 49374, 2423, 26890, 35473, 9957, 383, 4032, 26890, 35473, 9957, 318, 4032, 12610, 995, 38149, 16095, 4724, 1829, 3224, 13672, 1829, 31439, 6055, 4117, 2407, 9957, 25724, 1211, 9566, 3660, 13672, 1829, 6156, 30543, 25124, 3555, 16758, 12399, 11331, 3215, 43242, 3224, 8608, 33604, 6027, 22807, 23032, 3555, 3615, 995, 12399, 30767, 1829, 19446, 16472, 39237, 3714, 15040, 5016, 19913, 9957, 2423, 4386], "avg_logprob": -0.255248089328067, "compression_ratio": 2.0373831775700935, "no_speech_prob": 5.364418029785156e-07, "words": [{"start": 1263.78, "end": 1264.26, "word": "وبالتالي", "probability": 0.88427734375}, {"start": 1264.26, "end": 1264.66, "word": " يصير", "probability": 0.4154459635416667}, {"start": 1264.66, "end": 1265.0, "word": " فيه", "probability": 0.8681640625}, {"start": 1265.0, "end": 1265.22, "word": " إيش؟", "probability": 0.695068359375}, {"start": 1265.22, "end": 1265.6, "word": " إمكانية", "probability": 0.7923583984375}, {"start": 1265.6, "end": 1265.78, "word": " إنه", "probability": 0.6845703125}, {"start": 1265.78, "end": 1266.0, "word": " يصير", "probability": 0.9305013020833334}, {"start": 1266.0, "end": 1266.2, "word": " فيه", "probability": 0.960205078125}, {"start": 1266.2, "end": 1266.68, "word": " جلطة", "probability": 0.8165283203125}, {"start": 1266.68, "end": 1267.26, "word": " أكتر", "probability": 0.9288330078125}, {"start": 1267.26, "end": 1267.44, "word": " من", "probability": 0.8974609375}, {"start": 1267.44, "end": 1267.58, "word": " ما", "probability": 0.6787109375}, {"start": 1267.58, "end": 1268.1, "word": " إمكانية", "probability": 0.959228515625}, {"start": 1268.1, "end": 1268.3, "word": " إنه", "probability": 0.863525390625}, {"start": 1268.3, "end": 1268.52, "word": " يصير", "probability": 0.9606119791666666}, {"start": 1268.52, "end": 1269.34, "word": " فيه", "probability": 0.9384765625}, {"start": 1269.34, "end": 1269.78, "word": " نزيل", "probability": 0.8723958333333334}, {"start": 1269.78, "end": 1271.16, "word": " أفهم", "probability": 0.9016927083333334}, {"start": 1271.16, "end": 1271.7, "word": " عليها", "probability": 0.628662109375}, {"start": 1271.7, "end": 1272.54, "word": " الولاقة", "probability": 0.7149658203125}, {"start": 1272.54, "end": 1273.08, "word": " المباشرة", "probability": 0.9957275390625}, {"start": 1273.08, "end": 1273.28, "word": " بين", "probability": 0.7958984375}, {"start": 1273.28, "end": 1273.96, "word": " إيش؟", "probability": 0.7596435546875}, {"start": 1273.96, "end": 1274.18, "word": " بين", "probability": 0.9306640625}, {"start": 1274.18, "end": 1274.6, "word": " البروتين", "probability": 0.7830810546875}, {"start": 1274.6, "end": 1274.96, "word": " C", "probability": 0.66259765625}, {"start": 1274.96, "end": 1276.08, "word": " وبروتين", "probability": 0.8426513671875}, {"start": 1276.08, "end": 1276.48, "word": " S", "probability": 0.97607421875}, {"start": 1276.48, "end": 1277.7, "word": " وولاقتهم", "probability": 0.9638671875}, {"start": 1277.7, "end": 1278.5, "word": " بيه", "probability": 0.5990397135416666}, {"start": 1278.5, "end": 1278.82, "word": " اللي", "probability": 0.876708984375}, {"start": 1278.82, "end": 1279.18, "word": " هو", "probability": 0.9375}, {"start": 1279.18, "end": 1279.8, "word": " تكوين", "probability": 0.9566650390625}, {"start": 1279.8, "end": 1280.98, "word": " الجلطة", "probability": 0.9205322265625}, {"start": 1280.98, "end": 1282.38, "word": " اللي", "probability": 0.5129547119140625}, {"start": 1282.38, "end": 1282.5, "word": " فوق", "probability": 0.959228515625}, {"start": 1282.5, "end": 1283.2, "word": " الشباب،", "probability": 0.53662109375}, {"start": 1283.2, "end": 1283.34, "word": " حد", "probability": 0.6982421875}, {"start": 1283.34, "end": 1283.56, "word": " عنده", "probability": 0.6429443359375}, {"start": 1283.56, "end": 1285.26, "word": " سؤال؟", "probability": 0.9139404296875}, {"start": 1285.26, "end": 1286.14, "word": " طبعا،", "probability": 0.8595703125}, {"start": 1286.14, "end": 1286.44, "word": " زي", "probability": 0.952392578125}, {"start": 1286.44, "end": 1286.48, "word": " ما", "probability": 0.9453125}, {"start": 1286.48, "end": 1286.66, "word": " انتم", "probability": 0.667724609375}, {"start": 1286.66, "end": 1287.22, "word": " ملاحظين", "probability": 0.91220703125}, {"start": 1287.22, "end": 1292.36, "word": " ال", "probability": 0.94140625}, {"start": 1292.36, "end": 1292.54, "word": " ..", "probability": 0.9208984375}], "temperature": 1.0}, {"id": 50, "seek": 132390, "start": 1296.44, "end": 1323.9, "text": "الـ .. الـ .. الـ mechanism هذا مرات بيكون وراثيًا genetically defected genetically defected بمعنى الـ proteins هي يا شباب اللي مالي جيش على ال factor 5 ماشي بيجي بمسك في ال active site تبعته بعملها inactivation بقطعه و بعمل inactivation", "tokens": [6027, 39184, 4386, 2423, 39184, 4386, 2423, 39184, 7513, 23758, 3714, 2288, 9307, 4724, 1829, 30544, 4032, 2288, 5718, 104, 1829, 14111, 995, 37582, 16445, 292, 37582, 16445, 292, 4724, 2304, 3615, 1863, 7578, 2423, 39184, 15577, 39896, 35186, 13412, 3555, 16758, 13672, 1829, 3714, 6027, 1829, 10874, 1829, 8592, 15844, 2423, 5952, 1025, 3714, 33599, 1829, 4724, 1829, 7435, 1829, 4724, 2304, 3794, 4117, 8978, 2423, 4967, 3621, 6055, 3555, 34268, 3224, 4724, 25957, 1211, 11296, 294, 23397, 399, 4724, 47432, 3615, 3224, 4032, 4724, 25957, 1211, 294, 23397, 399], "avg_logprob": -0.26341713057911914, "compression_ratio": 1.6213592233009708, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 1296.44, "end": 1297.06, "word": "الـ", "probability": 0.772216796875}, {"start": 1297.06, "end": 1297.24, "word": " ..", "probability": 0.322998046875}, {"start": 1297.24, "end": 1297.92, "word": " الـ", "probability": 0.881103515625}, {"start": 1297.92, "end": 1297.92, "word": " ..", "probability": 0.57421875}, {"start": 1297.92, "end": 1298.22, "word": " الـ", "probability": 0.824462890625}, {"start": 1298.22, "end": 1298.44, "word": " mechanism", "probability": 0.09820556640625}, {"start": 1298.44, "end": 1298.82, "word": " هذا", "probability": 0.86865234375}, {"start": 1298.82, "end": 1299.26, "word": " مرات", "probability": 0.9461263020833334}, {"start": 1299.26, "end": 1300.7, "word": " بيكون", "probability": 0.9606119791666666}, {"start": 1300.7, "end": 1305.0, "word": " وراثيًا", "probability": 0.7994210379464286}, {"start": 1305.0, "end": 1307.18, "word": " genetically", "probability": 0.61083984375}, {"start": 1307.18, "end": 1308.92, "word": " defected", "probability": 0.938720703125}, {"start": 1308.92, "end": 1310.22, "word": " genetically", "probability": 0.415771484375}, {"start": 1310.22, "end": 1311.3, "word": " defected", "probability": 0.972412109375}, {"start": 1311.3, "end": 1312.5, "word": " بمعنى", "probability": 0.95625}, {"start": 1312.5, "end": 1313.36, "word": " الـ", "probability": 0.535888671875}, {"start": 1313.36, "end": 1313.6, "word": " proteins", "probability": 0.50439453125}, {"start": 1313.6, "end": 1313.76, "word": " هي", "probability": 0.435302734375}, {"start": 1313.76, "end": 1313.92, "word": " يا", "probability": 0.662109375}, {"start": 1313.92, "end": 1314.08, "word": " شباب", "probability": 0.9881184895833334}, {"start": 1314.08, "end": 1314.22, "word": " اللي", "probability": 0.53753662109375}, {"start": 1314.22, "end": 1314.36, "word": " مالي", "probability": 0.7569986979166666}, {"start": 1314.36, "end": 1314.52, "word": " جيش", "probability": 0.7987467447916666}, {"start": 1314.52, "end": 1314.76, "word": " على", "probability": 0.52197265625}, {"start": 1314.76, "end": 1315.34, "word": " ال", "probability": 0.496337890625}, {"start": 1315.34, "end": 1315.34, "word": " factor", "probability": 0.65185546875}, {"start": 1315.34, "end": 1315.58, "word": " 5", "probability": 0.61328125}, {"start": 1315.58, "end": 1317.18, "word": " ماشي", "probability": 0.7970377604166666}, {"start": 1317.18, "end": 1317.46, "word": " بيجي", "probability": 0.9586181640625}, {"start": 1317.46, "end": 1317.92, "word": " بمسك", "probability": 0.83447265625}, {"start": 1317.92, "end": 1318.1, "word": " في", "probability": 0.9443359375}, {"start": 1318.1, "end": 1318.2, "word": " ال", "probability": 0.98828125}, {"start": 1318.2, "end": 1318.54, "word": " active", "probability": 0.8720703125}, {"start": 1318.54, "end": 1318.86, "word": " site", "probability": 0.82080078125}, {"start": 1318.86, "end": 1319.26, "word": " تبعته", "probability": 0.8194580078125}, {"start": 1319.26, "end": 1319.8, "word": " بعملها", "probability": 0.801025390625}, {"start": 1319.8, "end": 1322.22, "word": " inactivation", "probability": 0.9192708333333334}, {"start": 1322.22, "end": 1322.9, "word": " بقطعه", "probability": 0.8284912109375}, {"start": 1322.9, "end": 1323.0, "word": " و", "probability": 0.487548828125}, {"start": 1323.0, "end": 1323.2, "word": " بعمل", "probability": 0.9710286458333334}, {"start": 1323.2, "end": 1323.9, "word": " inactivation", "probability": 0.96728515625}], "temperature": 1.0}, {"id": 51, "seek": 134378, "start": 1325.34, "end": 1343.78, "text": "يعني بيجي برتبط بمواقع معينة على factor خمسة النشط طب هو factor خمسة عبارة عن أيش؟ عبارة عن amino acid، protein مجبوط، amino acid فلو صار فيه تبديل في مواقع ال active sites", "tokens": [40228, 22653, 4724, 1829, 7435, 1829, 4724, 43500, 3555, 9566, 4724, 2304, 14407, 4587, 3615, 20449, 9957, 3660, 15844, 5952, 16490, 2304, 3794, 3660, 28239, 8592, 9566, 23032, 3555, 31439, 5952, 16490, 2304, 3794, 3660, 6225, 3555, 9640, 3660, 18871, 36632, 8592, 22807, 6225, 3555, 9640, 3660, 18871, 24674, 8258, 12399, 7944, 3714, 7435, 3555, 2407, 9566, 12399, 24674, 8258, 6156, 1211, 2407, 20328, 9640, 8978, 3224, 6055, 3555, 16254, 1211, 8978, 3714, 14407, 4587, 3615, 2423, 4967, 7533], "avg_logprob": -0.22089843973517417, "compression_ratio": 1.543859649122807, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 1325.34, "end": 1325.66, "word": "يعني", "probability": 0.955078125}, {"start": 1325.66, "end": 1325.98, "word": " بيجي", "probability": 0.81024169921875}, {"start": 1325.98, "end": 1326.54, "word": " برتبط", "probability": 0.826904296875}, {"start": 1326.54, "end": 1327.32, "word": " بمواقع", "probability": 0.98056640625}, {"start": 1327.32, "end": 1328.06, "word": " معينة", "probability": 0.9851888020833334}, {"start": 1328.06, "end": 1328.28, "word": " على", "probability": 0.84521484375}, {"start": 1328.28, "end": 1328.64, "word": " factor", "probability": 0.6552734375}, {"start": 1328.64, "end": 1329.08, "word": " خمسة", "probability": 0.76446533203125}, {"start": 1329.08, "end": 1329.54, "word": " النشط", "probability": 0.7252604166666666}, {"start": 1329.54, "end": 1332.62, "word": " طب", "probability": 0.5865478515625}, {"start": 1332.62, "end": 1332.72, "word": " هو", "probability": 0.896484375}, {"start": 1332.72, "end": 1332.96, "word": " factor", "probability": 0.81103515625}, {"start": 1332.96, "end": 1333.36, "word": " خمسة", "probability": 0.9661865234375}, {"start": 1333.36, "end": 1333.58, "word": " عبارة", "probability": 0.757080078125}, {"start": 1333.58, "end": 1333.74, "word": " عن", "probability": 0.9921875}, {"start": 1333.74, "end": 1334.92, "word": " أيش؟", "probability": 0.587890625}, {"start": 1334.92, "end": 1335.24, "word": " عبارة", "probability": 0.96484375}, {"start": 1335.24, "end": 1335.36, "word": " عن", "probability": 0.97265625}, {"start": 1335.36, "end": 1335.58, "word": " amino", "probability": 0.465576171875}, {"start": 1335.58, "end": 1336.02, "word": " acid،", "probability": 0.742431640625}, {"start": 1336.02, "end": 1336.48, "word": " protein", "probability": 0.857421875}, {"start": 1336.48, "end": 1338.04, "word": " مجبوط،", "probability": 0.6607869466145834}, {"start": 1338.04, "end": 1338.52, "word": " amino", "probability": 0.71533203125}, {"start": 1338.52, "end": 1338.88, "word": " acid", "probability": 0.9853515625}, {"start": 1338.88, "end": 1339.82, "word": " فلو", "probability": 0.8566080729166666}, {"start": 1339.82, "end": 1340.08, "word": " صار", "probability": 0.974609375}, {"start": 1340.08, "end": 1340.26, "word": " فيه", "probability": 0.900390625}, {"start": 1340.26, "end": 1340.84, "word": " تبديل", "probability": 0.870361328125}, {"start": 1340.84, "end": 1341.58, "word": " في", "probability": 0.951171875}, {"start": 1341.58, "end": 1342.42, "word": " مواقع", "probability": 0.9888916015625}, {"start": 1342.42, "end": 1342.98, "word": " ال", "probability": 0.98095703125}, {"start": 1342.98, "end": 1343.3, "word": " active", "probability": 0.75390625}, {"start": 1343.3, "end": 1343.78, "word": " sites", "probability": 0.91748046875}], "temperature": 1.0}, {"id": 52, "seek": 136496, "start": 1349.24, "end": 1364.96, "text": "فاكتور خمسة نشط ولا بيظل نشط؟ بيظل نشط، ماشي؟ وجدوا هذه الظاهرة موجودة بعض مرات بإن ال protein خمسة بيكون ورثيا، ماشي، defected، بمعنى صار في single", "tokens": [5172, 995, 4117, 2655, 13063, 16490, 2304, 3794, 3660, 8717, 8592, 9566, 49429, 4724, 1829, 19913, 1211, 8717, 8592, 9566, 22807, 4724, 1829, 19913, 1211, 8717, 8592, 9566, 12399, 3714, 33599, 1829, 22807, 49610, 3215, 14407, 29538, 6024, 116, 40294, 25720, 3714, 29245, 23328, 3660, 45030, 11242, 3714, 2288, 9307, 4724, 28814, 1863, 2423, 7944, 16490, 2304, 3794, 3660, 4724, 1829, 30544, 4032, 2288, 12984, 25528, 12399, 3714, 33599, 1829, 12399, 16445, 292, 12399, 4724, 2304, 3615, 1863, 7578, 20328, 9640, 8978, 2167], "avg_logprob": -0.2829241213344392, "compression_ratio": 1.4733727810650887, "no_speech_prob": 0.00025773048400878906, "words": [{"start": 1349.24, "end": 1349.66, "word": "فاكتور", "probability": 0.58057861328125}, {"start": 1349.66, "end": 1350.02, "word": " خمسة", "probability": 0.7174072265625}, {"start": 1350.02, "end": 1350.34, "word": " نشط", "probability": 0.6153971354166666}, {"start": 1350.34, "end": 1350.5, "word": " ولا", "probability": 0.58544921875}, {"start": 1350.5, "end": 1350.84, "word": " بيظل", "probability": 0.58612060546875}, {"start": 1350.84, "end": 1351.5, "word": " نشط؟", "probability": 0.74072265625}, {"start": 1351.5, "end": 1352.3, "word": " بيظل", "probability": 0.8936767578125}, {"start": 1352.3, "end": 1352.82, "word": " نشط،", "probability": 0.76751708984375}, {"start": 1352.82, "end": 1354.06, "word": " ماشي؟", "probability": 0.7723388671875}, {"start": 1354.06, "end": 1354.48, "word": " وجدوا", "probability": 0.9267578125}, {"start": 1354.48, "end": 1354.74, "word": " هذه", "probability": 0.9814453125}, {"start": 1354.74, "end": 1355.24, "word": " الظاهرة", "probability": 0.9586181640625}, {"start": 1355.24, "end": 1356.12, "word": " موجودة", "probability": 0.981201171875}, {"start": 1356.12, "end": 1356.72, "word": " بعض", "probability": 0.7144775390625}, {"start": 1356.72, "end": 1357.24, "word": " مرات", "probability": 0.89208984375}, {"start": 1357.24, "end": 1358.0, "word": " بإن", "probability": 0.7255045572916666}, {"start": 1358.0, "end": 1358.18, "word": " ال", "probability": 0.55029296875}, {"start": 1358.18, "end": 1358.56, "word": " protein", "probability": 0.6572265625}, {"start": 1358.56, "end": 1359.0, "word": " خمسة", "probability": 0.9351806640625}, {"start": 1359.0, "end": 1359.5, "word": " بيكون", "probability": 0.96533203125}, {"start": 1359.5, "end": 1361.44, "word": " ورثيا،", "probability": 0.809033203125}, {"start": 1361.44, "end": 1362.36, "word": " ماشي،", "probability": 0.7666015625}, {"start": 1362.36, "end": 1363.28, "word": " defected،", "probability": 0.8038736979166666}, {"start": 1363.28, "end": 1363.88, "word": " بمعنى", "probability": 0.967578125}, {"start": 1363.88, "end": 1364.4, "word": " صار", "probability": 0.952392578125}, {"start": 1364.4, "end": 1364.54, "word": " في", "probability": 0.912109375}, {"start": 1364.54, "end": 1364.96, "word": " single", "probability": 0.916015625}], "temperature": 1.0}, {"id": 53, "seek": 139085, "start": 1366.65, "end": 1390.85, "text": "Nucleotide polymorphism SMPs وبالتالي أثرت هذه حولت اللي هو factor 5 المواقع الناشطة فيها اتغير ال amino acid اللي موجود وبالتالي بقرر يتعرف عليه نعم ماشي بيسموا هذا المرض factor 5 lyden factor 5 lyden باسم العالمة اللي هضد ديه ليش؟", "tokens": [45, 1311, 306, 310, 482, 6754, 76, 18191, 1434, 318, 12224, 82, 4032, 3555, 6027, 2655, 6027, 1829, 5551, 12984, 43500, 29538, 11331, 12610, 2655, 13672, 1829, 31439, 5952, 1025, 9673, 14407, 4587, 3615, 28239, 33599, 9566, 3660, 8978, 11296, 1975, 2655, 17082, 13546, 2423, 24674, 8258, 13672, 1829, 3714, 29245, 23328, 46599, 6027, 2655, 6027, 1829, 4724, 4587, 2288, 2288, 7251, 2655, 3615, 28480, 47356, 8717, 25957, 3714, 33599, 1829, 4724, 1829, 38251, 14407, 23758, 9673, 43042, 5952, 1025, 17293, 1556, 5952, 1025, 17293, 1556, 4724, 32277, 2304, 18863, 45340, 3660, 13672, 1829, 8032, 11242, 3215, 11778, 1829, 3224, 32239, 8592, 22807], "avg_logprob": -0.29627404963740933, "compression_ratio": 1.6199095022624435, "no_speech_prob": 1.537799835205078e-05, "words": [{"start": 1366.6499999999999, "end": 1367.57, "word": "Nucleotide", "probability": 0.6910888671875}, {"start": 1367.57, "end": 1368.23, "word": " polymorphism", "probability": 0.7940673828125}, {"start": 1368.23, "end": 1369.33, "word": " SMPs", "probability": 0.52392578125}, {"start": 1369.33, "end": 1369.77, "word": " وبالتالي", "probability": 0.7997233072916666}, {"start": 1369.77, "end": 1370.31, "word": " أثرت", "probability": 0.7747395833333334}, {"start": 1370.31, "end": 1370.67, "word": " هذه", "probability": 0.5009765625}, {"start": 1370.67, "end": 1371.33, "word": " حولت", "probability": 0.9718424479166666}, {"start": 1371.33, "end": 1371.93, "word": " اللي", "probability": 0.801513671875}, {"start": 1371.93, "end": 1372.09, "word": " هو", "probability": 0.97705078125}, {"start": 1372.09, "end": 1372.43, "word": " factor", "probability": 0.666015625}, {"start": 1372.43, "end": 1372.87, "word": " 5", "probability": 0.53076171875}, {"start": 1372.87, "end": 1374.87, "word": " المواقع", "probability": 0.9775390625}, {"start": 1374.87, "end": 1375.71, "word": " الناشطة", "probability": 0.8162841796875}, {"start": 1375.71, "end": 1376.11, "word": " فيها", "probability": 0.982177734375}, {"start": 1376.11, "end": 1376.89, "word": " اتغير", "probability": 0.9342041015625}, {"start": 1376.89, "end": 1377.47, "word": " ال", "probability": 0.8408203125}, {"start": 1377.47, "end": 1377.75, "word": " amino", "probability": 0.9267578125}, {"start": 1377.75, "end": 1378.13, "word": " acid", "probability": 0.94580078125}, {"start": 1378.13, "end": 1378.31, "word": " اللي", "probability": 0.9501953125}, {"start": 1378.31, "end": 1378.69, "word": " موجود", "probability": 0.9830729166666666}, {"start": 1378.69, "end": 1379.03, "word": " وبالتالي", "probability": 0.92685546875}, {"start": 1379.03, "end": 1379.43, "word": " بقرر", "probability": 0.6270751953125}, {"start": 1379.43, "end": 1379.75, "word": " يتعرف", "probability": 0.850341796875}, {"start": 1379.75, "end": 1380.07, "word": " عليه", "probability": 0.61767578125}, {"start": 1380.07, "end": 1380.89, "word": " نعم", "probability": 0.4642333984375}, {"start": 1380.89, "end": 1381.35, "word": " ماشي", "probability": 0.8846028645833334}, {"start": 1381.35, "end": 1381.89, "word": " بيسموا", "probability": 0.775634765625}, {"start": 1381.89, "end": 1382.07, "word": " هذا", "probability": 0.91552734375}, {"start": 1382.07, "end": 1382.41, "word": " المرض", "probability": 0.975341796875}, {"start": 1382.41, "end": 1383.31, "word": " factor", "probability": 0.8798828125}, {"start": 1383.31, "end": 1383.73, "word": " 5", "probability": 0.70654296875}, {"start": 1383.73, "end": 1384.31, "word": " lyden", "probability": 0.5169677734375}, {"start": 1384.31, "end": 1384.97, "word": " factor", "probability": 0.72216796875}, {"start": 1384.97, "end": 1385.75, "word": " 5", "probability": 0.9130859375}, {"start": 1385.75, "end": 1386.77, "word": " lyden", "probability": 0.944580078125}, {"start": 1386.77, "end": 1387.23, "word": " باسم", "probability": 0.9650065104166666}, {"start": 1387.23, "end": 1388.53, "word": " العالمة", "probability": 0.7583821614583334}, {"start": 1388.53, "end": 1388.71, "word": " اللي", "probability": 0.803466796875}, {"start": 1388.71, "end": 1389.05, "word": " هضد", "probability": 0.732421875}, {"start": 1389.05, "end": 1389.53, "word": " ديه", "probability": 0.4641520182291667}, {"start": 1389.53, "end": 1390.85, "word": " ليش؟", "probability": 0.65771484375}], "temperature": 1.0}, {"id": 54, "seek": 142302, "start": 1395.16, "end": 1423.02, "text": "اللي اكتشفت هذا الموضوع طبعا الناس هدول في هذه الحالة شو بيصيروا يا شباب؟ بيصيروا عرضة لإيش؟ لجلطة بالظبط عرضة لclot formation طيب ننتقل لميكانيزم التالت وهو إيش؟ وهو tissue factor pathway inhibitor والمرة اللي فاتة كنا حكينا إنه tissue factor pathway inhibitor ماهياشي هو عبارة عن inhibitor", "tokens": [6027, 20292, 1975, 4117, 2655, 8592, 5172, 2655, 23758, 9673, 2407, 11242, 45367, 23032, 3555, 3615, 995, 2423, 8315, 3794, 8032, 3215, 12610, 8978, 29538, 21542, 6027, 3660, 13412, 2407, 4724, 1829, 9381, 13546, 14407, 35186, 13412, 3555, 16758, 22807, 4724, 1829, 9381, 13546, 14407, 6225, 43042, 3660, 5296, 28814, 1829, 8592, 22807, 5296, 7435, 1211, 9566, 3660, 20666, 19913, 3555, 9566, 6225, 43042, 3660, 5296, 3474, 310, 11723, 23032, 1829, 3555, 8717, 29399, 4587, 1211, 5296, 2304, 1829, 41361, 1829, 11622, 2304, 16712, 6027, 2655, 37037, 2407, 11933, 1829, 8592, 22807, 4032, 3224, 2407, 12404, 5952, 18590, 20406, 3029, 4032, 45340, 25720, 13672, 1829, 6156, 9307, 3660, 9122, 8315, 11331, 4117, 1829, 8315, 36145, 3224, 12404, 5952, 18590, 20406, 3029, 19446, 3224, 1829, 33599, 1829, 31439, 6225, 3555, 9640, 3660, 18871, 20406, 3029], "avg_logprob": -0.23587962322764927, "compression_ratio": 1.7509578544061302, "no_speech_prob": 3.5762786865234375e-07, "words": [{"start": 1395.16, "end": 1395.66, "word": "اللي", "probability": 0.668212890625}, {"start": 1395.66, "end": 1396.74, "word": " اكتشفت", "probability": 0.9794108072916666}, {"start": 1396.74, "end": 1396.98, "word": " هذا", "probability": 0.88720703125}, {"start": 1396.98, "end": 1397.48, "word": " الموضوع", "probability": 0.99462890625}, {"start": 1397.48, "end": 1398.0, "word": " طبعا", "probability": 0.900634765625}, {"start": 1398.0, "end": 1398.3, "word": " الناس", "probability": 0.982421875}, {"start": 1398.3, "end": 1398.58, "word": " هدول", "probability": 0.7369791666666666}, {"start": 1398.58, "end": 1398.78, "word": " في", "probability": 0.77734375}, {"start": 1398.78, "end": 1398.98, "word": " هذه", "probability": 0.7001953125}, {"start": 1398.98, "end": 1399.5, "word": " الحالة", "probability": 0.9757486979166666}, {"start": 1399.5, "end": 1399.82, "word": " شو", "probability": 0.819091796875}, {"start": 1399.82, "end": 1400.18, "word": " بيصيروا", "probability": 0.75634765625}, {"start": 1400.18, "end": 1400.26, "word": " يا", "probability": 0.9150390625}, {"start": 1400.26, "end": 1400.94, "word": " شباب؟", "probability": 0.923828125}, {"start": 1400.94, "end": 1401.32, "word": " بيصيروا", "probability": 0.87802734375}, {"start": 1401.32, "end": 1401.88, "word": " عرضة", "probability": 0.5028483072916666}, {"start": 1401.88, "end": 1403.18, "word": " لإيش؟", "probability": 0.71533203125}, {"start": 1403.18, "end": 1403.84, "word": " لجلطة", "probability": 0.7638671875}, {"start": 1403.84, "end": 1404.48, "word": " بالظبط", "probability": 0.89111328125}, {"start": 1404.48, "end": 1405.1, "word": " عرضة", "probability": 0.9646809895833334}, {"start": 1405.1, "end": 1406.06, "word": " لclot", "probability": 0.6861979166666666}, {"start": 1406.06, "end": 1406.72, "word": " formation", "probability": 0.97412109375}, {"start": 1406.72, "end": 1409.88, "word": " طيب", "probability": 0.95751953125}, {"start": 1409.88, "end": 1410.9, "word": " ننتقل", "probability": 0.8553466796875}, {"start": 1410.9, "end": 1411.62, "word": " لميكانيزم", "probability": 0.82275390625}, {"start": 1411.62, "end": 1412.3, "word": " التالت", "probability": 0.9386393229166666}, {"start": 1412.3, "end": 1413.28, "word": " وهو", "probability": 0.72607421875}, {"start": 1413.28, "end": 1414.66, "word": " إيش؟", "probability": 0.7598876953125}, {"start": 1414.66, "end": 1414.92, "word": " وهو", "probability": 0.73193359375}, {"start": 1414.92, "end": 1415.1, "word": " tissue", "probability": 0.96337890625}, {"start": 1415.1, "end": 1415.52, "word": " factor", "probability": 0.6767578125}, {"start": 1415.52, "end": 1415.9, "word": " pathway", "probability": 0.99169921875}, {"start": 1415.9, "end": 1416.5, "word": " inhibitor", "probability": 0.919921875}, {"start": 1416.5, "end": 1417.16, "word": " والمرة", "probability": 0.7506510416666666}, {"start": 1417.16, "end": 1417.3, "word": " اللي", "probability": 0.748046875}, {"start": 1417.3, "end": 1417.6, "word": " فاتة", "probability": 0.8605143229166666}, {"start": 1417.6, "end": 1417.8, "word": " كنا", "probability": 0.9287109375}, {"start": 1417.8, "end": 1418.38, "word": " حكينا", "probability": 0.9295654296875}, {"start": 1418.38, "end": 1419.0, "word": " إنه", "probability": 0.399169921875}, {"start": 1419.0, "end": 1419.2, "word": " tissue", "probability": 0.93896484375}, {"start": 1419.2, "end": 1419.66, "word": " factor", "probability": 0.875}, {"start": 1419.66, "end": 1420.04, "word": " pathway", "probability": 0.96728515625}, {"start": 1420.04, "end": 1420.66, "word": " inhibitor", "probability": 0.8720703125}, {"start": 1420.66, "end": 1421.56, "word": " ماهياشي", "probability": 0.609326171875}, {"start": 1421.56, "end": 1421.66, "word": " هو", "probability": 0.97998046875}, {"start": 1421.66, "end": 1421.9, "word": " عبارة", "probability": 0.792388916015625}, {"start": 1421.9, "end": 1422.14, "word": " عن", "probability": 0.99755859375}, {"start": 1422.14, "end": 1423.02, "word": " inhibitor", "probability": 0.809326171875}], "temperature": 1.0}, {"id": 55, "seek": 144606, "start": 1423.64, "end": 1446.06, "text": "موجود بشكل أساسي على طول على التشو ماشي شو بعمله بعمله انهيبش إلى ال complex اللي هو تشو فاكتور سبعة ايه complex و بعمله انهيبش إلى فاكتور عشر و بعمله انهيبش إلى فاكتور عشر و زي ما انتوا شايفين هذا ال endothelium طبعا مزعج", "tokens": [2304, 29245, 23328, 4724, 8592, 28820, 5551, 3794, 32277, 1829, 15844, 23032, 12610, 15844, 16712, 8592, 2407, 3714, 33599, 1829, 13412, 2407, 4724, 25957, 43761, 4724, 25957, 43761, 16472, 3224, 1829, 3555, 8592, 30731, 2423, 3997, 13672, 1829, 31439, 6055, 8592, 2407, 6156, 995, 4117, 2655, 13063, 8608, 3555, 27884, 1975, 1829, 3224, 3997, 4032, 4724, 25957, 43761, 16472, 3224, 1829, 3555, 8592, 30731, 6156, 995, 4117, 2655, 13063, 6225, 46309, 4032, 4724, 25957, 43761, 16472, 3224, 1829, 3555, 8592, 30731, 6156, 995, 4117, 2655, 13063, 6225, 46309, 4032, 30767, 1829, 19446, 16472, 2655, 14407, 13412, 995, 33911, 9957, 23758, 2423, 917, 900, 338, 2197, 23032, 3555, 3615, 995, 3714, 11622, 3615, 7435], "avg_logprob": -0.2597313489307437, "compression_ratio": 2.021505376344086, "no_speech_prob": 7.987022399902344e-06, "words": [{"start": 1423.64, "end": 1424.2, "word": "موجود", "probability": 0.730224609375}, {"start": 1424.2, "end": 1424.66, "word": " بشكل", "probability": 0.9664713541666666}, {"start": 1424.66, "end": 1425.9, "word": " أساسي", "probability": 0.92041015625}, {"start": 1425.9, "end": 1427.32, "word": " على", "probability": 0.89794921875}, {"start": 1427.32, "end": 1427.74, "word": " طول", "probability": 0.92626953125}, {"start": 1427.74, "end": 1428.34, "word": " على", "probability": 0.66748046875}, {"start": 1428.34, "end": 1428.86, "word": " التشو", "probability": 0.6171061197916666}, {"start": 1428.86, "end": 1429.82, "word": " ماشي", "probability": 0.7024332682291666}, {"start": 1429.82, "end": 1431.32, "word": " شو", "probability": 0.782470703125}, {"start": 1431.32, "end": 1431.7, "word": " بعمله", "probability": 0.690673828125}, {"start": 1431.7, "end": 1432.24, "word": " بعمله", "probability": 0.7874348958333334}, {"start": 1432.24, "end": 1432.7, "word": " انهيبش", "probability": 0.8904296875}, {"start": 1432.7, "end": 1432.96, "word": " إلى", "probability": 0.207275390625}, {"start": 1432.96, "end": 1433.48, "word": " ال", "probability": 0.91748046875}, {"start": 1433.48, "end": 1434.18, "word": " complex", "probability": 0.81396484375}, {"start": 1434.18, "end": 1434.96, "word": " اللي", "probability": 0.688720703125}, {"start": 1434.96, "end": 1435.14, "word": " هو", "probability": 0.90673828125}, {"start": 1435.14, "end": 1435.42, "word": " تشو", "probability": 0.6539713541666666}, {"start": 1435.42, "end": 1435.74, "word": " فاكتور", "probability": 0.84833984375}, {"start": 1435.74, "end": 1436.1, "word": " سبعة", "probability": 0.82763671875}, {"start": 1436.1, "end": 1436.34, "word": " ايه", "probability": 0.6686197916666666}, {"start": 1436.34, "end": 1436.8, "word": " complex", "probability": 0.35400390625}, {"start": 1436.8, "end": 1437.3, "word": " و", "probability": 0.6337890625}, {"start": 1437.3, "end": 1437.58, "word": " بعمله", "probability": 0.9449869791666666}, {"start": 1437.58, "end": 1437.92, "word": " انهيبش", "probability": 0.99130859375}, {"start": 1437.92, "end": 1438.06, "word": " إلى", "probability": 0.91064453125}, {"start": 1438.06, "end": 1438.44, "word": " فاكتور", "probability": 0.86171875}, {"start": 1438.44, "end": 1438.78, "word": " عشر", "probability": 0.847900390625}, {"start": 1438.78, "end": 1439.0, "word": " و", "probability": 0.7626953125}, {"start": 1439.0, "end": 1439.22, "word": " بعمله", "probability": 0.8761393229166666}, {"start": 1439.22, "end": 1439.56, "word": " انهيبش", "probability": 0.99462890625}, {"start": 1439.56, "end": 1439.68, "word": " إلى", "probability": 0.9677734375}, {"start": 1439.68, "end": 1439.96, "word": " فاكتور", "probability": 0.9625}, {"start": 1439.96, "end": 1440.18, "word": " عشر", "probability": 0.9189453125}, {"start": 1440.18, "end": 1441.16, "word": " و", "probability": 0.91552734375}, {"start": 1441.16, "end": 1441.48, "word": " زي", "probability": 0.8544921875}, {"start": 1441.48, "end": 1441.58, "word": " ما", "probability": 0.966796875}, {"start": 1441.58, "end": 1441.78, "word": " انتوا", "probability": 0.8172200520833334}, {"start": 1441.78, "end": 1442.2, "word": " شايفين", "probability": 0.981201171875}, {"start": 1442.2, "end": 1443.1, "word": " هذا", "probability": 0.11041259765625}, {"start": 1443.1, "end": 1443.22, "word": " ال", "probability": 0.96435546875}, {"start": 1443.22, "end": 1444.9, "word": " endothelium", "probability": 0.6116943359375}, {"start": 1444.9, "end": 1445.6, "word": " طبعا", "probability": 0.8609619140625}, {"start": 1445.6, "end": 1446.06, "word": " مزعج", "probability": 0.741455078125}], "temperature": 1.0}, {"id": 56, "seek": 148014, "start": 1452.08, "end": 1480.14, "text": "وعمل Complex هذا بنشط عشر أو تسعة مظبوط و automatically بمجرد ما يصير في activation لهذا ال ميكانيزم كنت اما هو بخوف ربنا اعطانا له حماية عالمطح فورية بيجي tissue factor pathway inhibitor وبيعملاش blocking لهذا ال ميكانيزم وشوفنا كيف بتتم التحويلة ماشي مفهوم", "tokens": [2407, 25957, 1211, 41184, 23758, 44945, 8592, 9566, 6225, 46309, 34051, 6055, 3794, 27884, 3714, 19913, 3555, 2407, 9566, 4032, 6772, 4724, 2304, 7435, 2288, 3215, 19446, 7251, 9381, 13546, 8978, 24433, 46740, 15730, 2423, 3714, 1829, 41361, 1829, 11622, 2304, 9122, 29399, 1975, 15042, 31439, 4724, 9778, 38688, 12602, 3555, 8315, 1975, 3615, 9566, 7649, 995, 46740, 11331, 15042, 10632, 6225, 45340, 9566, 5016, 6156, 13063, 10632, 4724, 1829, 7435, 1829, 12404, 5952, 18590, 20406, 3029, 4032, 21292, 25957, 1211, 33599, 17776, 46740, 15730, 2423, 3714, 1829, 41361, 1829, 11622, 2304, 4032, 8592, 38688, 8315, 9122, 33911, 39894, 39237, 16712, 5016, 2407, 26895, 3660, 3714, 33599, 1829, 3714, 5172, 3224, 20498], "avg_logprob": -0.30088495997201026, "compression_ratio": 1.5775193798449612, "no_speech_prob": 1.6093254089355469e-06, "words": [{"start": 1452.08, "end": 1453.06, "word": "وعمل", "probability": 0.4456583658854167}, {"start": 1453.06, "end": 1453.56, "word": " Complex", "probability": 0.7783203125}, {"start": 1453.56, "end": 1455.22, "word": " هذا", "probability": 0.1177978515625}, {"start": 1455.22, "end": 1455.88, "word": " بنشط", "probability": 0.7706705729166666}, {"start": 1455.88, "end": 1456.18, "word": " عشر", "probability": 0.6982421875}, {"start": 1456.18, "end": 1456.34, "word": " أو", "probability": 0.84326171875}, {"start": 1456.34, "end": 1456.8, "word": " تسعة", "probability": 0.9386393229166666}, {"start": 1456.8, "end": 1458.3, "word": " مظبوط", "probability": 0.808837890625}, {"start": 1458.3, "end": 1458.98, "word": " و", "probability": 0.8642578125}, {"start": 1458.98, "end": 1459.62, "word": " automatically", "probability": 0.25439453125}, {"start": 1459.62, "end": 1461.38, "word": " بمجرد", "probability": 0.941015625}, {"start": 1461.38, "end": 1461.52, "word": " ما", "probability": 0.93408203125}, {"start": 1461.52, "end": 1461.8, "word": " يصير", "probability": 0.7976888020833334}, {"start": 1461.8, "end": 1462.0, "word": " في", "probability": 0.560546875}, {"start": 1462.0, "end": 1462.58, "word": " activation", "probability": 0.7236328125}, {"start": 1462.58, "end": 1462.94, "word": " لهذا", "probability": 0.934814453125}, {"start": 1462.94, "end": 1463.08, "word": " ال", "probability": 0.82763671875}, {"start": 1463.08, "end": 1463.56, "word": " ميكانيزم", "probability": 0.75286865234375}, {"start": 1463.56, "end": 1463.78, "word": " كنت", "probability": 0.7724609375}, {"start": 1463.78, "end": 1463.9, "word": " اما", "probability": 0.3685302734375}, {"start": 1463.9, "end": 1464.0, "word": " هو", "probability": 0.970703125}, {"start": 1464.0, "end": 1464.46, "word": " بخوف", "probability": 0.9153645833333334}, {"start": 1464.46, "end": 1464.86, "word": " ربنا", "probability": 0.9441731770833334}, {"start": 1464.86, "end": 1465.54, "word": " اعطانا", "probability": 0.8673828125}, {"start": 1465.54, "end": 1466.38, "word": " له", "probability": 0.467529296875}, {"start": 1466.38, "end": 1467.22, "word": " حماية", "probability": 0.9892578125}, {"start": 1467.22, "end": 1468.72, "word": " عالمطح", "probability": 0.68194580078125}, {"start": 1468.72, "end": 1469.86, "word": " فورية", "probability": 0.95068359375}, {"start": 1469.86, "end": 1470.66, "word": " بيجي", "probability": 0.8897705078125}, {"start": 1470.66, "end": 1470.96, "word": " tissue", "probability": 0.389892578125}, {"start": 1470.96, "end": 1471.32, "word": " factor", "probability": 0.7646484375}, {"start": 1471.32, "end": 1471.68, "word": " pathway", "probability": 0.5087890625}, {"start": 1471.68, "end": 1472.14, "word": " inhibitor", "probability": 0.862548828125}, {"start": 1472.14, "end": 1472.74, "word": " وبيعملاش", "probability": 0.6896484375}, {"start": 1472.74, "end": 1473.18, "word": " blocking", "probability": 0.76171875}, {"start": 1473.18, "end": 1474.5, "word": " لهذا", "probability": 0.965087890625}, {"start": 1474.5, "end": 1474.84, "word": " ال", "probability": 0.75048828125}, {"start": 1474.84, "end": 1475.34, "word": " ميكانيزم", "probability": 0.840087890625}, {"start": 1475.34, "end": 1476.0, "word": " وشوفنا", "probability": 0.8565673828125}, {"start": 1476.0, "end": 1476.22, "word": " كيف", "probability": 0.973876953125}, {"start": 1476.22, "end": 1476.64, "word": " بتتم", "probability": 0.753662109375}, {"start": 1476.64, "end": 1477.62, "word": " التحويلة", "probability": 0.88642578125}, {"start": 1477.62, "end": 1478.22, "word": " ماشي", "probability": 0.90966796875}, {"start": 1478.22, "end": 1480.14, "word": " مفهوم", "probability": 0.843994140625}], "temperature": 1.0}, {"id": 57, "seek": 150077, "start": 1480.49, "end": 1500.77, "text": "بعمل blocking ليش لهذا ال mechanism اللي هو tissue factor plus one inhibitor بعمل inactivation لفكتور عشرة النشط وفكتور سبعة tissue factor complex أو سبعين المفهوم مش هيبقى هذا ميكانيزم موجود شبهناها المرة اللي فاتر هذا ال mechanism", "tokens": [3555, 25957, 1211, 17776, 32239, 8592, 46740, 15730, 2423, 7513, 13672, 1829, 31439, 12404, 5952, 1804, 472, 20406, 3029, 4724, 25957, 1211, 294, 23397, 399, 5296, 5172, 4117, 2655, 13063, 6225, 8592, 25720, 28239, 8592, 9566, 4032, 5172, 4117, 2655, 13063, 8608, 3555, 27884, 12404, 5952, 3997, 34051, 8608, 3555, 3615, 9957, 9673, 5172, 3224, 20498, 37893, 8032, 1829, 3555, 4587, 7578, 23758, 3714, 1829, 41361, 1829, 11622, 2304, 3714, 29245, 23328, 13412, 3555, 3224, 8315, 11296, 9673, 25720, 13672, 1829, 6156, 9307, 2288, 23758, 2423, 7513], "avg_logprob": -0.3149857907132669, "compression_ratio": 1.5981308411214954, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1480.49, "end": 1480.89, "word": "بعمل", "probability": 0.8077799479166666}, {"start": 1480.89, "end": 1481.43, "word": " blocking", "probability": 0.57275390625}, {"start": 1481.43, "end": 1482.13, "word": " ليش", "probability": 0.5533447265625}, {"start": 1482.13, "end": 1482.79, "word": " لهذا", "probability": 0.92236328125}, {"start": 1482.79, "end": 1483.33, "word": " ال", "probability": 0.91259765625}, {"start": 1483.33, "end": 1483.93, "word": " mechanism", "probability": 0.489990234375}, {"start": 1483.93, "end": 1484.73, "word": " اللي", "probability": 0.5865478515625}, {"start": 1484.73, "end": 1484.87, "word": " هو", "probability": 0.96875}, {"start": 1484.87, "end": 1485.01, "word": " tissue", "probability": 0.96044921875}, {"start": 1485.01, "end": 1485.35, "word": " factor", "probability": 0.7744140625}, {"start": 1485.35, "end": 1485.59, "word": " plus", "probability": 0.1884765625}, {"start": 1485.59, "end": 1485.71, "word": " one", "probability": 0.32958984375}, {"start": 1485.71, "end": 1486.19, "word": " inhibitor", "probability": 0.96484375}, {"start": 1486.19, "end": 1486.75, "word": " بعمل", "probability": 0.8660481770833334}, {"start": 1486.75, "end": 1487.31, "word": " inactivation", "probability": 0.7244466145833334}, {"start": 1487.31, "end": 1487.67, "word": " لفكتور", "probability": 0.6775390625}, {"start": 1487.67, "end": 1488.07, "word": " عشرة", "probability": 0.8141276041666666}, {"start": 1488.07, "end": 1488.93, "word": " النشط", "probability": 0.7750651041666666}, {"start": 1488.93, "end": 1489.49, "word": " وفكتور", "probability": 0.7593505859375}, {"start": 1489.49, "end": 1490.31, "word": " سبعة", "probability": 0.9580078125}, {"start": 1490.31, "end": 1491.07, "word": " tissue", "probability": 0.52783203125}, {"start": 1491.07, "end": 1491.61, "word": " factor", "probability": 0.8330078125}, {"start": 1491.61, "end": 1492.05, "word": " complex", "probability": 0.775390625}, {"start": 1492.05, "end": 1492.25, "word": " أو", "probability": 0.763671875}, {"start": 1492.25, "end": 1492.83, "word": " سبعين", "probability": 0.6363525390625}, {"start": 1492.83, "end": 1493.91, "word": " المفهوم", "probability": 0.83642578125}, {"start": 1493.91, "end": 1494.05, "word": " مش", "probability": 0.6005859375}, {"start": 1494.05, "end": 1494.49, "word": " هيبقى", "probability": 0.8107421875}, {"start": 1494.49, "end": 1495.57, "word": " هذا", "probability": 0.75927734375}, {"start": 1495.57, "end": 1496.37, "word": " ميكانيزم", "probability": 0.8245442708333334}, {"start": 1496.37, "end": 1498.45, "word": " موجود", "probability": 0.9685872395833334}, {"start": 1498.45, "end": 1498.99, "word": " شبهناها", "probability": 0.83935546875}, {"start": 1498.99, "end": 1499.19, "word": " المرة", "probability": 0.88232421875}, {"start": 1499.19, "end": 1499.37, "word": " اللي", "probability": 0.87255859375}, {"start": 1499.37, "end": 1499.83, "word": " فاتر", "probability": 0.758544921875}, {"start": 1499.83, "end": 1500.23, "word": " هذا", "probability": 0.8935546875}, {"start": 1500.23, "end": 1500.37, "word": " ال", "probability": 0.6884765625}, {"start": 1500.37, "end": 1500.77, "word": " mechanism", "probability": 0.73876953125}], "temperature": 1.0}, {"id": 58, "seek": 152941, "start": 1501.23, "end": 1529.41, "text": "وكأننا خشينا على غرفة مظلمة وضوينا الضوء وطفنا صح؟ يعني automatically بس يبدأ التفاعل بنطفي بس يبدأ التفاعل بنطفي لكن إنت لما بتضوي الضوء و بتطفي بتشوف إيش فيه في الغرفة و بتشوف إيش مية في المقادة اللي بيصير، بتكون كمية قليلة من factor 10A النشط اللي بيتكمل ال reaction حد عنده سؤال يا شباب", "tokens": [2407, 4117, 33456, 8315, 16490, 8592, 1829, 8315, 15844, 32771, 28480, 3660, 3714, 19913, 19528, 3660, 4032, 11242, 2407, 9957, 995, 6024, 114, 2407, 38207, 4032, 9566, 5172, 8315, 20328, 5016, 22807, 37495, 22653, 6772, 4724, 3794, 7251, 44510, 10721, 16712, 5172, 995, 30241, 44945, 9566, 41185, 4724, 3794, 7251, 44510, 10721, 16712, 5172, 995, 30241, 44945, 9566, 41185, 44381, 11933, 29399, 5296, 15042, 39894, 11242, 45865, 6024, 114, 2407, 38207, 4032, 39894, 9566, 41185, 39894, 8592, 38688, 11933, 1829, 8592, 8978, 3224, 8978, 6024, 118, 28480, 3660, 4032, 39894, 8592, 38688, 11933, 1829, 8592, 3714, 10632, 8978, 9673, 4587, 18513, 3660, 13672, 1829, 4724, 1829, 9381, 13546, 12399, 39894, 30544, 9122, 2304, 10632, 12174, 20292, 37977, 9154, 5952, 1266, 32, 28239, 8592, 9566, 13672, 1829, 4724, 36081, 24793, 1211, 2423, 5480, 11331, 3215, 43242, 3224, 8608, 33604, 6027, 35186, 13412, 3555, 16758], "avg_logprob": -0.2526041670805878, "compression_ratio": 1.8716981132075472, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 1501.23, "end": 1502.13, "word": "وكأننا", "probability": 0.77783203125}, {"start": 1502.13, "end": 1502.71, "word": " خشينا", "probability": 0.8519287109375}, {"start": 1502.71, "end": 1502.89, "word": " على", "probability": 0.90478515625}, {"start": 1502.89, "end": 1503.29, "word": " غرفة", "probability": 0.9666341145833334}, {"start": 1503.29, "end": 1503.63, "word": " مظلمة", "probability": 0.872802734375}, {"start": 1503.63, "end": 1504.15, "word": " وضوينا", "probability": 0.667919921875}, {"start": 1504.15, "end": 1504.45, "word": " الضوء", "probability": 0.8665771484375}, {"start": 1504.45, "end": 1505.01, "word": " وطفنا", "probability": 0.7957763671875}, {"start": 1505.01, "end": 1505.63, "word": " صح؟", "probability": 0.674072265625}, {"start": 1505.63, "end": 1506.09, "word": " يعني", "probability": 0.905517578125}, {"start": 1506.09, "end": 1506.63, "word": " automatically", "probability": 0.324951171875}, {"start": 1506.63, "end": 1507.17, "word": " بس", "probability": 0.766357421875}, {"start": 1507.17, "end": 1508.17, "word": " يبدأ", "probability": 0.9700520833333334}, {"start": 1508.17, "end": 1509.07, "word": " التفاعل", "probability": 0.8887939453125}, {"start": 1509.07, "end": 1510.39, "word": " بنطفي", "probability": 0.6143798828125}, {"start": 1510.39, "end": 1511.39, "word": " بس", "probability": 0.77685546875}, {"start": 1511.39, "end": 1511.73, "word": " يبدأ", "probability": 0.9851888020833334}, {"start": 1511.73, "end": 1512.19, "word": " التفاعل", "probability": 0.934326171875}, {"start": 1512.19, "end": 1512.51, "word": " بنطفي", "probability": 0.9192708333333334}, {"start": 1512.51, "end": 1512.75, "word": " لكن", "probability": 0.7001953125}, {"start": 1512.75, "end": 1513.13, "word": " إنت", "probability": 0.6483154296875}, {"start": 1513.13, "end": 1513.81, "word": " لما", "probability": 0.94775390625}, {"start": 1513.81, "end": 1514.17, "word": " بتضوي", "probability": 0.7548828125}, {"start": 1514.17, "end": 1514.53, "word": " الضوء", "probability": 0.9361572265625}, {"start": 1514.53, "end": 1514.61, "word": " و", "probability": 0.52294921875}, {"start": 1514.61, "end": 1514.87, "word": " بتطفي", "probability": 0.799560546875}, {"start": 1514.87, "end": 1515.31, "word": " بتشوف", "probability": 0.9401041666666666}, {"start": 1515.31, "end": 1516.11, "word": " إيش", "probability": 0.5966389973958334}, {"start": 1516.11, "end": 1516.35, "word": " فيه", "probability": 0.6715087890625}, {"start": 1516.35, "end": 1516.35, "word": " في", "probability": 0.92041015625}, {"start": 1516.35, "end": 1516.65, "word": " الغرفة", "probability": 0.9720458984375}, {"start": 1516.65, "end": 1516.71, "word": " و", "probability": 0.78369140625}, {"start": 1516.71, "end": 1516.99, "word": " بتشوف", "probability": 0.9318033854166666}, {"start": 1516.99, "end": 1517.17, "word": " إيش", "probability": 0.81103515625}, {"start": 1517.17, "end": 1517.95, "word": " مية", "probability": 0.4219970703125}, {"start": 1517.95, "end": 1518.07, "word": " في", "probability": 0.78857421875}, {"start": 1518.07, "end": 1518.33, "word": " المقادة", "probability": 0.635650634765625}, {"start": 1518.33, "end": 1518.45, "word": " اللي", "probability": 0.952880859375}, {"start": 1518.45, "end": 1518.91, "word": " بيصير،", "probability": 0.73349609375}, {"start": 1518.91, "end": 1519.55, "word": " بتكون", "probability": 0.77294921875}, {"start": 1519.55, "end": 1520.13, "word": " كمية", "probability": 0.9934895833333334}, {"start": 1520.13, "end": 1520.63, "word": " قليلة", "probability": 0.876953125}, {"start": 1520.63, "end": 1520.91, "word": " من", "probability": 0.990234375}, {"start": 1520.91, "end": 1521.55, "word": " factor", "probability": 0.7724609375}, {"start": 1521.55, "end": 1522.71, "word": " 10A", "probability": 0.59765625}, {"start": 1522.71, "end": 1523.71, "word": " النشط", "probability": 0.8629557291666666}, {"start": 1523.71, "end": 1524.57, "word": " اللي", "probability": 0.969482421875}, {"start": 1524.57, "end": 1526.01, "word": " بيتكمل", "probability": 0.826416015625}, {"start": 1526.01, "end": 1526.73, "word": " ال", "probability": 0.9609375}, {"start": 1526.73, "end": 1527.11, "word": " reaction", "probability": 0.93701171875}, {"start": 1527.11, "end": 1528.61, "word": " حد", "probability": 0.68701171875}, {"start": 1528.61, "end": 1528.81, "word": " عنده", "probability": 0.947021484375}, {"start": 1528.81, "end": 1529.05, "word": " سؤال", "probability": 0.98828125}, {"start": 1529.05, "end": 1529.23, "word": " يا", "probability": 0.352294921875}, {"start": 1529.23, "end": 1529.41, "word": " شباب", "probability": 0.9680989583333334}], "temperature": 1.0}, {"id": 59, "seek": 154976, "start": 1532.38, "end": 1549.76, "text": "حد مش فاهم، حد بيحب أعيد طيب هذه طبعا slide بتبين ليه سلسلة مناشر، سلسلة ال anticoagulant اللي موجودة وهذا عبارة عن ال coagulation cascade mechanism بيبين فيه دور", "tokens": [5016, 3215, 37893, 6156, 995, 16095, 12399, 11331, 3215, 4724, 1829, 5016, 3555, 5551, 3615, 25708, 23032, 1829, 3555, 29538, 23032, 3555, 3615, 995, 4137, 39894, 3555, 9957, 32239, 3224, 8608, 1211, 3794, 37977, 9154, 33599, 2288, 12399, 8608, 1211, 3794, 37977, 2423, 2511, 2789, 559, 425, 394, 13672, 1829, 3714, 29245, 23328, 3660, 37037, 15730, 6225, 3555, 9640, 3660, 18871, 2423, 598, 559, 2776, 50080, 7513, 4724, 1829, 3555, 9957, 8978, 3224, 11778, 13063], "avg_logprob": -0.21607730243551104, "compression_ratio": 1.4678362573099415, "no_speech_prob": 3.0219554901123047e-05, "words": [{"start": 1532.38, "end": 1532.64, "word": "حد", "probability": 0.542327880859375}, {"start": 1532.64, "end": 1532.8, "word": " مش", "probability": 0.9697265625}, {"start": 1532.8, "end": 1533.32, "word": " فاهم،", "probability": 0.8934326171875}, {"start": 1533.32, "end": 1533.52, "word": " حد", "probability": 0.950439453125}, {"start": 1533.52, "end": 1533.8, "word": " بيحب", "probability": 0.9161376953125}, {"start": 1533.8, "end": 1534.22, "word": " أعيد", "probability": 0.752197265625}, {"start": 1534.22, "end": 1536.18, "word": " طيب", "probability": 0.79345703125}, {"start": 1536.18, "end": 1536.9, "word": " هذه", "probability": 0.278076171875}, {"start": 1536.9, "end": 1537.18, "word": " طبعا", "probability": 0.9254150390625}, {"start": 1537.18, "end": 1537.5, "word": " slide", "probability": 0.362060546875}, {"start": 1537.5, "end": 1537.94, "word": " بتبين", "probability": 0.9661458333333334}, {"start": 1537.94, "end": 1538.28, "word": " ليه", "probability": 0.70751953125}, {"start": 1538.28, "end": 1539.02, "word": " سلسلة", "probability": 0.9659423828125}, {"start": 1539.02, "end": 1540.12, "word": " مناشر،", "probability": 0.60357666015625}, {"start": 1540.12, "end": 1540.76, "word": " سلسلة", "probability": 0.99169921875}, {"start": 1540.76, "end": 1540.98, "word": " ال", "probability": 0.8828125}, {"start": 1540.98, "end": 1541.88, "word": " anticoagulant", "probability": 0.77529296875}, {"start": 1541.88, "end": 1542.02, "word": " اللي", "probability": 0.86279296875}, {"start": 1542.02, "end": 1542.82, "word": " موجودة", "probability": 0.982177734375}, {"start": 1542.82, "end": 1543.36, "word": " وهذا", "probability": 0.789794921875}, {"start": 1543.36, "end": 1543.7, "word": " عبارة", "probability": 0.9942626953125}, {"start": 1543.7, "end": 1544.32, "word": " عن", "probability": 0.99755859375}, {"start": 1544.32, "end": 1545.32, "word": " ال", "probability": 0.9130859375}, {"start": 1545.32, "end": 1546.02, "word": " coagulation", "probability": 0.8109537760416666}, {"start": 1546.02, "end": 1546.56, "word": " cascade", "probability": 0.95654296875}, {"start": 1546.56, "end": 1547.24, "word": " mechanism", "probability": 0.79833984375}, {"start": 1547.24, "end": 1548.78, "word": " بيبين", "probability": 0.8995361328125}, {"start": 1548.78, "end": 1549.24, "word": " فيه", "probability": 0.83056640625}, {"start": 1549.24, "end": 1549.76, "word": " دور", "probability": 0.995361328125}], "temperature": 1.0}, {"id": 60, "seek": 157953, "start": 1550.37, "end": 1579.53, "text": "الـ Inheritor بشكل أساسي، زي ما بتلاحظوا فيه دوائر صفراء ماشي، هذه الدوائر مكتوب في جلبها plus و minus، plus يعني active و minus الميكروفينية، فلو تتبعت اللي شرحناه كله في الفترة اللي فاتت، هتلاقي انه المواقع الـ Pro-coagulation ومواقع الـ Anti-coagulation الاسعار، مفهوم يا شباب؟ حد عنده شوية؟", "tokens": [6027, 39184, 682, 511, 3029, 4724, 8592, 28820, 5551, 3794, 32277, 1829, 12399, 30767, 1829, 19446, 39894, 15040, 5016, 19913, 14407, 8978, 3224, 11778, 2407, 16373, 2288, 20328, 5172, 2288, 16606, 3714, 33599, 1829, 12399, 29538, 32748, 2407, 16373, 2288, 3714, 4117, 2655, 37746, 8978, 10874, 46152, 11296, 1804, 4032, 3175, 12399, 1804, 37495, 22653, 4967, 4032, 3175, 9673, 1829, 4117, 32887, 5172, 9957, 10632, 12399, 6156, 1211, 2407, 6055, 2655, 3555, 34268, 13672, 1829, 13412, 2288, 5016, 8315, 3224, 28242, 3224, 8978, 27188, 2655, 25720, 13672, 1829, 6156, 9307, 2655, 12399, 8032, 2655, 15040, 38436, 16472, 3224, 9673, 14407, 4587, 3615, 2423, 39184, 1705, 12, 1291, 559, 2776, 4032, 2304, 14407, 4587, 3615, 2423, 39184, 27757, 12, 1291, 559, 2776, 2423, 32277, 3615, 9640, 12399, 3714, 5172, 3224, 20498, 35186, 13412, 3555, 16758, 22807, 11331, 3215, 43242, 3224, 13412, 2407, 10632, 22807], "avg_logprob": -0.2569444452722867, "compression_ratio": 1.6527777777777777, "no_speech_prob": 4.76837158203125e-07, "words": [{"start": 1550.37, "end": 1550.81, "word": "الـ", "probability": 0.47723388671875}, {"start": 1550.81, "end": 1551.41, "word": " Inheritor", "probability": 0.6128743489583334}, {"start": 1551.41, "end": 1551.79, "word": " بشكل", "probability": 0.97314453125}, {"start": 1551.79, "end": 1552.33, "word": " أساسي،", "probability": 0.831591796875}, {"start": 1552.33, "end": 1552.39, "word": " زي", "probability": 0.658203125}, {"start": 1552.39, "end": 1552.49, "word": " ما", "probability": 0.6259765625}, {"start": 1552.49, "end": 1552.95, "word": " بتلاحظوا", "probability": 0.9126953125}, {"start": 1552.95, "end": 1553.13, "word": " فيه", "probability": 0.83837890625}, {"start": 1553.13, "end": 1553.47, "word": " دوائر", "probability": 0.8841552734375}, {"start": 1553.47, "end": 1554.09, "word": " صفراء", "probability": 0.8543701171875}, {"start": 1554.09, "end": 1555.53, "word": " ماشي،", "probability": 0.6673583984375}, {"start": 1555.53, "end": 1555.85, "word": " هذه", "probability": 0.91845703125}, {"start": 1555.85, "end": 1556.51, "word": " الدوائر", "probability": 0.966796875}, {"start": 1556.51, "end": 1557.55, "word": " مكتوب", "probability": 0.972412109375}, {"start": 1557.55, "end": 1557.83, "word": " في", "probability": 0.88916015625}, {"start": 1557.83, "end": 1558.59, "word": " جلبها", "probability": 0.9466145833333334}, {"start": 1558.59, "end": 1559.51, "word": " plus", "probability": 0.56494140625}, {"start": 1559.51, "end": 1559.85, "word": " و", "probability": 0.9580078125}, {"start": 1559.85, "end": 1560.93, "word": " minus،", "probability": 0.6893310546875}, {"start": 1560.93, "end": 1561.37, "word": " plus", "probability": 0.92236328125}, {"start": 1561.37, "end": 1561.87, "word": " يعني", "probability": 0.978515625}, {"start": 1561.87, "end": 1562.87, "word": " active", "probability": 0.62451171875}, {"start": 1562.87, "end": 1563.19, "word": " و", "probability": 0.84716796875}, {"start": 1563.19, "end": 1563.75, "word": " minus", "probability": 0.99072265625}, {"start": 1563.75, "end": 1564.97, "word": " الميكروفينية،", "probability": 0.47760009765625}, {"start": 1564.97, "end": 1565.19, "word": " فلو", "probability": 0.9278971354166666}, {"start": 1565.19, "end": 1565.81, "word": " تتبعت", "probability": 0.97216796875}, {"start": 1565.81, "end": 1566.47, "word": " اللي", "probability": 0.77392578125}, {"start": 1566.47, "end": 1567.17, "word": " شرحناه", "probability": 0.981640625}, {"start": 1567.17, "end": 1567.49, "word": " كله", "probability": 0.970458984375}, {"start": 1567.49, "end": 1567.61, "word": " في", "probability": 0.76806640625}, {"start": 1567.61, "end": 1567.91, "word": " الفترة", "probability": 0.9903971354166666}, {"start": 1567.91, "end": 1568.03, "word": " اللي", "probability": 0.6046142578125}, {"start": 1568.03, "end": 1568.47, "word": " فاتت،", "probability": 0.8052978515625}, {"start": 1568.47, "end": 1568.65, "word": " هتلاقي", "probability": 0.853515625}, {"start": 1568.65, "end": 1569.07, "word": " انه", "probability": 0.6907958984375}, {"start": 1569.07, "end": 1570.47, "word": " المواقع", "probability": 0.979736328125}, {"start": 1570.47, "end": 1571.29, "word": " الـ", "probability": 0.7205810546875}, {"start": 1571.29, "end": 1571.55, "word": " Pro", "probability": 0.6015625}, {"start": 1571.55, "end": 1572.19, "word": "-coagulation", "probability": 0.78448486328125}, {"start": 1572.19, "end": 1572.87, "word": " ومواقع", "probability": 0.94609375}, {"start": 1572.87, "end": 1573.05, "word": " الـ", "probability": 0.704833984375}, {"start": 1573.05, "end": 1574.07, "word": " Anti", "probability": 0.2156982421875}, {"start": 1574.07, "end": 1574.89, "word": "-coagulation", "probability": 0.9776611328125}, {"start": 1574.89, "end": 1575.93, "word": " الاسعار،", "probability": 0.52646484375}, {"start": 1575.93, "end": 1576.99, "word": " مفهوم", "probability": 0.785919189453125}, {"start": 1576.99, "end": 1577.13, "word": " يا", "probability": 0.89111328125}, {"start": 1577.13, "end": 1578.49, "word": " شباب؟", "probability": 0.9451904296875}, {"start": 1578.49, "end": 1578.75, "word": " حد", "probability": 0.90380859375}, {"start": 1578.75, "end": 1579.01, "word": " عنده", "probability": 0.94775390625}, {"start": 1579.01, "end": 1579.53, "word": " شوية؟", "probability": 0.8839111328125}], "temperature": 1.0}, {"id": 61, "seek": 160536, "start": 1580.68, "end": 1605.36, "text": "حد عنده سؤال نبدأ بالمحاضرة التانية المحاضرة التانية تحكي عن الـ Fiber Analytics System واتبعوا عليها يا شباب بداية", "tokens": [5016, 3215, 43242, 3224, 8608, 33604, 6027, 8717, 44510, 10721, 20666, 2304, 5016, 46958, 25720, 16712, 7649, 10632, 9673, 5016, 46958, 25720, 16712, 7649, 10632, 6055, 5016, 4117, 1829, 18871, 2423, 39184, 479, 5331, 25944, 8910, 4032, 9307, 3555, 3615, 14407, 25894, 11296, 35186, 13412, 3555, 16758, 4724, 28259, 10632], "avg_logprob": -0.2473958274897407, "compression_ratio": 1.381294964028777, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 1580.68, "end": 1581.1, "word": "حد", "probability": 0.5230712890625}, {"start": 1581.1, "end": 1581.96, "word": " عنده", "probability": 0.9912109375}, {"start": 1581.96, "end": 1582.48, "word": " سؤال", "probability": 0.8308919270833334}, {"start": 1582.48, "end": 1584.76, "word": " نبدأ", "probability": 0.912109375}, {"start": 1584.76, "end": 1585.42, "word": " بالمحاضرة", "probability": 0.97470703125}, {"start": 1585.42, "end": 1586.04, "word": " التانية", "probability": 0.984375}, {"start": 1586.04, "end": 1597.82, "word": " المحاضرة", "probability": 0.9744873046875}, {"start": 1597.82, "end": 1598.3, "word": " التانية", "probability": 0.98876953125}, {"start": 1598.3, "end": 1598.78, "word": " تحكي", "probability": 0.86962890625}, {"start": 1598.78, "end": 1598.92, "word": " عن", "probability": 0.74755859375}, {"start": 1598.92, "end": 1599.06, "word": " الـ", "probability": 0.502197265625}, {"start": 1599.06, "end": 1599.26, "word": " Fiber", "probability": 0.5018310546875}, {"start": 1599.26, "end": 1599.64, "word": " Analytics", "probability": 0.32177734375}, {"start": 1599.64, "end": 1600.4, "word": " System", "probability": 0.892578125}, {"start": 1600.4, "end": 1603.5, "word": " واتبعوا", "probability": 0.6656494140625}, {"start": 1603.5, "end": 1603.66, "word": " عليها", "probability": 0.836669921875}, {"start": 1603.66, "end": 1603.78, "word": " يا", "probability": 0.61279296875}, {"start": 1603.78, "end": 1604.0, "word": " شباب", "probability": 0.9913736979166666}, {"start": 1604.0, "end": 1605.36, "word": " بداية", "probability": 0.8387044270833334}], "temperature": 1.0}, {"id": 62, "seek": 163139, "start": 1607.41, "end": 1631.39, "text": "لما بدأنا نحكي في ال hemostasis عرفناه وقلنا له different components قلنا له ال blood vessel as a component أساسي ثم ال platelet ثم ال coordination factor ماشي ثم ال fibrolytic system و بعدين قلنا في inhibitors لكل هذه ال system", "tokens": [1211, 15042, 47525, 10721, 8315, 8717, 5016, 4117, 1829, 8978, 2423, 8636, 555, 26632, 6225, 28480, 8315, 3224, 4032, 4587, 1211, 8315, 46740, 819, 6677, 12174, 1211, 8315, 46740, 2423, 3390, 18098, 382, 257, 6542, 5551, 3794, 32277, 1829, 38637, 2304, 2423, 3403, 15966, 38637, 2304, 2423, 21252, 5952, 3714, 33599, 1829, 38637, 2304, 2423, 13116, 340, 356, 40907, 1185, 4032, 39182, 9957, 12174, 1211, 8315, 8978, 20406, 9862, 5296, 28820, 29538, 2423, 1185], "avg_logprob": -0.23125, "compression_ratio": 1.5532994923857868, "no_speech_prob": 0.0, "words": [{"start": 1607.41, "end": 1607.77, "word": "لما", "probability": 0.724609375}, {"start": 1607.77, "end": 1608.13, "word": " بدأنا", "probability": 0.8409830729166666}, {"start": 1608.13, "end": 1608.45, "word": " نحكي", "probability": 0.9754638671875}, {"start": 1608.45, "end": 1608.59, "word": " في", "probability": 0.8974609375}, {"start": 1608.59, "end": 1608.67, "word": " ال", "probability": 0.962890625}, {"start": 1608.67, "end": 1609.37, "word": " hemostasis", "probability": 0.6641438802083334}, {"start": 1609.37, "end": 1610.27, "word": " عرفناه", "probability": 0.932861328125}, {"start": 1610.27, "end": 1610.61, "word": " وقلنا", "probability": 0.8206787109375}, {"start": 1610.61, "end": 1610.85, "word": " له", "probability": 0.38330078125}, {"start": 1610.85, "end": 1611.35, "word": " different", "probability": 0.876953125}, {"start": 1611.35, "end": 1612.57, "word": " components", "probability": 0.9169921875}, {"start": 1612.57, "end": 1615.45, "word": " قلنا", "probability": 0.9290364583333334}, {"start": 1615.45, "end": 1615.81, "word": " له", "probability": 0.83984375}, {"start": 1615.81, "end": 1616.91, "word": " ال", "probability": 0.7080078125}, {"start": 1616.91, "end": 1617.11, "word": " blood", "probability": 0.92529296875}, {"start": 1617.11, "end": 1617.45, "word": " vessel", "probability": 0.6162109375}, {"start": 1617.45, "end": 1617.71, "word": " as", "probability": 0.82958984375}, {"start": 1617.71, "end": 1617.83, "word": " a", "probability": 0.951171875}, {"start": 1617.83, "end": 1618.35, "word": " component", "probability": 0.79736328125}, {"start": 1618.35, "end": 1619.35, "word": " أساسي", "probability": 0.884033203125}, {"start": 1619.35, "end": 1620.05, "word": " ثم", "probability": 0.900146484375}, {"start": 1620.05, "end": 1620.19, "word": " ال", "probability": 0.8154296875}, {"start": 1620.19, "end": 1620.79, "word": " platelet", "probability": 0.63720703125}, {"start": 1620.79, "end": 1621.65, "word": " ثم", "probability": 0.776611328125}, {"start": 1621.65, "end": 1621.79, "word": " ال", "probability": 0.4814453125}, {"start": 1621.79, "end": 1622.17, "word": " coordination", "probability": 0.54052734375}, {"start": 1622.17, "end": 1622.85, "word": " factor", "probability": 0.91552734375}, {"start": 1622.85, "end": 1623.71, "word": " ماشي", "probability": 0.7482096354166666}, {"start": 1623.71, "end": 1625.89, "word": " ثم", "probability": 0.9677734375}, {"start": 1625.89, "end": 1626.65, "word": " ال", "probability": 0.78515625}, {"start": 1626.65, "end": 1627.75, "word": " fibrolytic", "probability": 0.7337646484375}, {"start": 1627.75, "end": 1628.29, "word": " system", "probability": 0.96435546875}, {"start": 1628.29, "end": 1628.61, "word": " و", "probability": 0.72900390625}, {"start": 1628.61, "end": 1628.91, "word": " بعدين", "probability": 0.836181640625}, {"start": 1628.91, "end": 1629.13, "word": " قلنا", "probability": 0.9482421875}, {"start": 1629.13, "end": 1629.27, "word": " في", "probability": 0.7255859375}, {"start": 1629.27, "end": 1629.93, "word": " inhibitors", "probability": 0.74609375}, {"start": 1629.93, "end": 1630.61, "word": " لكل", "probability": 0.86279296875}, {"start": 1630.61, "end": 1630.93, "word": " هذه", "probability": 0.8115234375}, {"start": 1630.93, "end": 1631.11, "word": " ال", "probability": 0.96240234375}, {"start": 1631.11, "end": 1631.39, "word": " system", "probability": 0.97900390625}], "temperature": 1.0}, {"id": 63, "seek": 164872, "start": 1632.44, "end": 1648.72, "text": "ماشي؟ انا قدمت ال inhibitors عن ال fibrotic system لإن هو ال final هو النهائي اللى بيصير حملية healing، بيصير شفاء للجريحة، صار فيه مزج، كولنا جلطة، بنينا الجلطة", "tokens": [2304, 33599, 1829, 22807, 1975, 8315, 12174, 40448, 2655, 2423, 20406, 9862, 18871, 2423, 283, 6414, 9411, 1185, 5296, 28814, 1863, 31439, 2423, 2572, 31439, 28239, 3224, 16373, 1829, 13672, 7578, 4724, 1829, 9381, 13546, 11331, 42213, 10632, 9745, 12399, 4724, 1829, 9381, 13546, 13412, 5172, 16606, 24976, 7435, 16572, 5016, 3660, 12399, 20328, 9640, 8978, 3224, 3714, 11622, 7435, 12399, 9122, 12610, 8315, 10874, 1211, 9566, 3660, 12399, 44945, 1829, 8315, 25724, 1211, 9566, 3660], "avg_logprob": -0.20261769403110852, "compression_ratio": 1.449438202247191, "no_speech_prob": 2.4139881134033203e-05, "words": [{"start": 1632.44, "end": 1633.44, "word": "ماشي؟", "probability": 0.72607421875}, {"start": 1633.44, "end": 1633.66, "word": " انا", "probability": 0.7100830078125}, {"start": 1633.66, "end": 1634.18, "word": " قدمت", "probability": 0.85595703125}, {"start": 1634.18, "end": 1634.28, "word": " ال", "probability": 0.9873046875}, {"start": 1634.28, "end": 1634.82, "word": " inhibitors", "probability": 0.900390625}, {"start": 1634.82, "end": 1635.02, "word": " عن", "probability": 0.984375}, {"start": 1635.02, "end": 1635.12, "word": " ال", "probability": 0.896484375}, {"start": 1635.12, "end": 1635.54, "word": " fibrotic", "probability": 0.5548502604166666}, {"start": 1635.54, "end": 1635.92, "word": " system", "probability": 0.9638671875}, {"start": 1635.92, "end": 1636.22, "word": " لإن", "probability": 0.90283203125}, {"start": 1636.22, "end": 1636.46, "word": " هو", "probability": 0.94482421875}, {"start": 1636.46, "end": 1637.1, "word": " ال", "probability": 0.984375}, {"start": 1637.1, "end": 1637.64, "word": " final", "probability": 0.865234375}, {"start": 1637.64, "end": 1638.56, "word": " هو", "probability": 0.58544921875}, {"start": 1638.56, "end": 1640.1, "word": " النهائي", "probability": 0.9500732421875}, {"start": 1640.1, "end": 1640.22, "word": " اللى", "probability": 0.723876953125}, {"start": 1640.22, "end": 1640.5, "word": " بيصير", "probability": 0.956298828125}, {"start": 1640.5, "end": 1640.88, "word": " حملية", "probability": 0.7222493489583334}, {"start": 1640.88, "end": 1642.02, "word": " healing،", "probability": 0.6937255859375}, {"start": 1642.02, "end": 1642.58, "word": " بيصير", "probability": 0.9681396484375}, {"start": 1642.58, "end": 1643.14, "word": " شفاء", "probability": 0.9833984375}, {"start": 1643.14, "end": 1644.24, "word": " للجريحة،", "probability": 0.82373046875}, {"start": 1644.24, "end": 1644.48, "word": " صار", "probability": 0.991943359375}, {"start": 1644.48, "end": 1644.7, "word": " فيه", "probability": 0.73974609375}, {"start": 1644.7, "end": 1645.76, "word": " مزج،", "probability": 0.73956298828125}, {"start": 1645.76, "end": 1646.26, "word": " كولنا", "probability": 0.6993001302083334}, {"start": 1646.26, "end": 1647.54, "word": " جلطة،", "probability": 0.97412109375}, {"start": 1647.54, "end": 1647.94, "word": " بنينا", "probability": 0.8528645833333334}, {"start": 1647.94, "end": 1648.72, "word": " الجلطة", "probability": 0.9942626953125}], "temperature": 1.0}, {"id": 64, "seek": 167620, "start": 1649.04, "end": 1676.2, "text": "بضلنا نبني بحيث انه ما سكرناش ال blood vessels مظبوط و اول ما بدى يصير فيه تسكير في ال blood vessels بدى ال fibrolytic system يعمل إذابة لكل الجلطات اللى زوائد الجلطة اللى تكون مفهومش بقى هذا اللى هندرسه في ال fibrolytic system هذا اللى هندرسه في ال fibrolysis او ال fibrolytic system", "tokens": [3555, 11242, 1211, 8315, 8717, 3555, 22653, 4724, 5016, 1829, 12984, 16472, 3224, 19446, 8608, 37983, 1863, 33599, 2423, 3390, 20117, 3714, 19913, 3555, 2407, 9566, 4032, 1975, 12610, 19446, 47525, 7578, 7251, 9381, 13546, 8978, 3224, 6055, 3794, 4117, 13546, 8978, 2423, 3390, 20117, 47525, 7578, 2423, 13116, 340, 356, 40907, 1185, 7251, 25957, 1211, 11933, 15730, 49401, 5296, 28820, 25724, 1211, 9566, 9307, 13672, 7578, 30767, 14407, 19986, 3215, 25724, 1211, 9566, 3660, 13672, 7578, 6055, 30544, 3714, 5172, 3224, 20498, 8592, 4724, 4587, 7578, 23758, 13672, 7578, 8032, 41260, 2288, 3794, 3224, 8978, 2423, 13116, 340, 356, 40907, 1185, 23758, 13672, 7578, 8032, 41260, 2288, 3794, 3224, 8978, 2423, 13116, 340, 356, 17122, 1975, 2407, 2423, 13116, 340, 356, 40907, 1185], "avg_logprob": -0.19312500429153442, "compression_ratio": 1.9908256880733946, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1649.04, "end": 1649.68, "word": "بضلنا", "probability": 0.798828125}, {"start": 1649.68, "end": 1650.18, "word": " نبني", "probability": 0.9558919270833334}, {"start": 1650.18, "end": 1651.04, "word": " بحيث", "probability": 0.9901123046875}, {"start": 1651.04, "end": 1652.02, "word": " انه", "probability": 0.77001953125}, {"start": 1652.02, "end": 1652.24, "word": " ما", "probability": 0.8125}, {"start": 1652.24, "end": 1653.0, "word": " سكرناش", "probability": 0.8331298828125}, {"start": 1653.0, "end": 1653.18, "word": " ال", "probability": 0.4189453125}, {"start": 1653.18, "end": 1653.34, "word": " blood", "probability": 0.9189453125}, {"start": 1653.34, "end": 1653.84, "word": " vessels", "probability": 0.91015625}, {"start": 1653.84, "end": 1655.1, "word": " مظبوط", "probability": 0.871240234375}, {"start": 1655.1, "end": 1655.66, "word": " و", "probability": 0.59521484375}, {"start": 1655.66, "end": 1655.94, "word": " اول", "probability": 0.7158203125}, {"start": 1655.94, "end": 1656.06, "word": " ما", "probability": 0.9384765625}, {"start": 1656.06, "end": 1656.38, "word": " بدى", "probability": 0.853515625}, {"start": 1656.38, "end": 1656.74, "word": " يصير", "probability": 0.8479817708333334}, {"start": 1656.74, "end": 1657.02, "word": " فيه", "probability": 0.7724609375}, {"start": 1657.02, "end": 1657.5, "word": " تسكير", "probability": 0.9853515625}, {"start": 1657.5, "end": 1657.68, "word": " في", "probability": 0.34814453125}, {"start": 1657.68, "end": 1657.8, "word": " ال", "probability": 0.74560546875}, {"start": 1657.8, "end": 1658.08, "word": " blood", "probability": 0.9794921875}, {"start": 1658.08, "end": 1658.52, "word": " vessels", "probability": 0.88330078125}, {"start": 1658.52, "end": 1659.5, "word": " بدى", "probability": 0.84912109375}, {"start": 1659.5, "end": 1659.6, "word": " ال", "probability": 0.94482421875}, {"start": 1659.6, "end": 1660.14, "word": " fibrolytic", "probability": 0.7117919921875}, {"start": 1660.14, "end": 1660.54, "word": " system", "probability": 0.96044921875}, {"start": 1660.54, "end": 1661.4, "word": " يعمل", "probability": 0.9803059895833334}, {"start": 1661.4, "end": 1662.16, "word": " إذابة", "probability": 0.8505859375}, {"start": 1662.16, "end": 1663.16, "word": " لكل", "probability": 0.974853515625}, {"start": 1663.16, "end": 1664.46, "word": " الجلطات", "probability": 0.949951171875}, {"start": 1664.46, "end": 1664.84, "word": " اللى", "probability": 0.6669921875}, {"start": 1664.84, "end": 1665.64, "word": " زوائد", "probability": 0.708251953125}, {"start": 1665.64, "end": 1666.56, "word": " الجلطة", "probability": 0.9794921875}, {"start": 1666.56, "end": 1666.72, "word": " اللى", "probability": 0.99462890625}, {"start": 1666.72, "end": 1667.2, "word": " تكون", "probability": 0.700439453125}, {"start": 1667.2, "end": 1668.18, "word": " مفهومش", "probability": 0.732421875}, {"start": 1668.18, "end": 1668.34, "word": " بقى", "probability": 0.6988932291666666}, {"start": 1668.34, "end": 1669.28, "word": " هذا", "probability": 0.260009765625}, {"start": 1669.28, "end": 1669.54, "word": " اللى", "probability": 0.98388671875}, {"start": 1669.54, "end": 1670.12, "word": " هندرسه", "probability": 0.95244140625}, {"start": 1670.12, "end": 1670.24, "word": " في", "probability": 0.6845703125}, {"start": 1670.24, "end": 1670.36, "word": " ال", "probability": 0.87744140625}, {"start": 1670.36, "end": 1670.9, "word": " fibrolytic", "probability": 0.8607177734375}, {"start": 1670.9, "end": 1671.32, "word": " system", "probability": 0.9755859375}, {"start": 1671.32, "end": 1672.44, "word": " هذا", "probability": 0.81201171875}, {"start": 1672.44, "end": 1672.94, "word": " اللى", "probability": 0.985595703125}, {"start": 1672.94, "end": 1673.68, "word": " هندرسه", "probability": 0.99091796875}, {"start": 1673.68, "end": 1674.14, "word": " في", "probability": 0.935546875}, {"start": 1674.14, "end": 1674.28, "word": " ال", "probability": 0.80810546875}, {"start": 1674.28, "end": 1675.1, "word": " fibrolysis", "probability": 0.909423828125}, {"start": 1675.1, "end": 1675.34, "word": " او", "probability": 0.8291015625}, {"start": 1675.34, "end": 1675.42, "word": " ال", "probability": 0.73681640625}, {"start": 1675.42, "end": 1675.94, "word": " fibrolytic", "probability": 0.89306640625}, {"start": 1675.94, "end": 1676.2, "word": " system", "probability": 0.96533203125}], "temperature": 1.0}, {"id": 65, "seek": 170456, "start": 1685.62, "end": 1704.56, "text": "بالنسبة للتاريخ، بدأت الحكاية، بدأنا نحكي عن هذا الـsystem في الـ1937، لما مكفارلانس، scientist اسمه مكفارلانس، إيش جال؟ جال إن الـdamage in tissue، بيطلع منها substance، هذه ال substance فيها عبارة عن activator", "tokens": [3555, 6027, 1863, 35457, 3660, 24976, 2655, 9640, 1829, 9778, 12399, 47525, 10721, 2655, 21542, 4117, 995, 10632, 12399, 47525, 10721, 8315, 8717, 5016, 4117, 1829, 18871, 23758, 2423, 39184, 28215, 8978, 2423, 39184, 3405, 12851, 12399, 5296, 15042, 3714, 4117, 5172, 9640, 1211, 7649, 3794, 12399, 12662, 24525, 2304, 3224, 3714, 4117, 5172, 9640, 1211, 7649, 3794, 12399, 11933, 1829, 8592, 10874, 6027, 22807, 10874, 6027, 36145, 2423, 39184, 10170, 609, 294, 12404, 12399, 4724, 1829, 9566, 1211, 3615, 9154, 11296, 12961, 12399, 29538, 2423, 12961, 8978, 11296, 6225, 3555, 9640, 3660, 18871, 2430, 1639], "avg_logprob": -0.3479381443298969, "compression_ratio": 1.5714285714285714, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1685.6200000000001, "end": 1686.42, "word": "بالنسبة", "probability": 0.603173828125}, {"start": 1686.42, "end": 1687.22, "word": " للتاريخ،", "probability": 0.6790364583333334}, {"start": 1687.22, "end": 1687.76, "word": " بدأت", "probability": 0.740234375}, {"start": 1687.76, "end": 1688.22, "word": " الحكاية،", "probability": 0.7697265625}, {"start": 1688.22, "end": 1688.46, "word": " بدأنا", "probability": 0.8888346354166666}, {"start": 1688.46, "end": 1688.78, "word": " نحكي", "probability": 0.932861328125}, {"start": 1688.78, "end": 1688.9, "word": " عن", "probability": 0.490966796875}, {"start": 1688.9, "end": 1689.14, "word": " هذا", "probability": 0.96044921875}, {"start": 1689.14, "end": 1689.54, "word": " الـsystem", "probability": 0.4942220052083333}, {"start": 1689.54, "end": 1689.68, "word": " في", "probability": 0.84130859375}, {"start": 1689.68, "end": 1690.94, "word": " الـ1937،", "probability": 0.69052734375}, {"start": 1690.94, "end": 1691.08, "word": " لما", "probability": 0.806396484375}, {"start": 1691.08, "end": 1692.06, "word": " مكفارلانس،", "probability": 0.646697998046875}, {"start": 1692.06, "end": 1692.46, "word": " scientist", "probability": 0.0638427734375}, {"start": 1692.46, "end": 1693.58, "word": " اسمه", "probability": 0.8719075520833334}, {"start": 1693.58, "end": 1695.08, "word": " مكفارلانس،", "probability": 0.951904296875}, {"start": 1695.08, "end": 1695.32, "word": " إيش", "probability": 0.7586263020833334}, {"start": 1695.32, "end": 1696.06, "word": " جال؟", "probability": 0.7848307291666666}, {"start": 1696.06, "end": 1696.44, "word": " جال", "probability": 0.921142578125}, {"start": 1696.44, "end": 1696.62, "word": " إن", "probability": 0.84228515625}, {"start": 1696.62, "end": 1698.02, "word": " الـdamage", "probability": 0.6531982421875}, {"start": 1698.02, "end": 1698.34, "word": " in", "probability": 0.939453125}, {"start": 1698.34, "end": 1699.4, "word": " tissue،", "probability": 0.712646484375}, {"start": 1699.4, "end": 1700.2, "word": " بيطلع", "probability": 0.821142578125}, {"start": 1700.2, "end": 1700.56, "word": " منها", "probability": 0.99658203125}, {"start": 1700.56, "end": 1702.32, "word": " substance،", "probability": 0.781005859375}, {"start": 1702.32, "end": 1702.56, "word": " هذه", "probability": 0.93896484375}, {"start": 1702.56, "end": 1702.74, "word": " ال", "probability": 0.95751953125}, {"start": 1702.74, "end": 1703.12, "word": " substance", "probability": 0.6796875}, {"start": 1703.12, "end": 1703.52, "word": " فيها", "probability": 0.940673828125}, {"start": 1703.52, "end": 1703.7, "word": " عبارة", "probability": 0.8525390625}, {"start": 1703.7, "end": 1703.84, "word": " عن", "probability": 0.99658203125}, {"start": 1703.84, "end": 1704.56, "word": " activator", "probability": 0.848876953125}], "temperature": 1.0}, {"id": 66, "seek": 171833, "start": 1705.61, "end": 1718.33, "text": "ماشي باضعافة لل activators اللي بتطلع فيه activator طلع ماشي نشط substrate اسمه بلازمينوجين و حوله الى انزايم سموه Blaser", "tokens": [2304, 33599, 1829, 4724, 46958, 3615, 31845, 3660, 24976, 2430, 3391, 13672, 1829, 39894, 9566, 1211, 3615, 8978, 3224, 2430, 1639, 23032, 1211, 3615, 3714, 33599, 1829, 8717, 8592, 9566, 27585, 24525, 2304, 3224, 4724, 1211, 31377, 2304, 9957, 29245, 9957, 4032, 11331, 12610, 3224, 2423, 7578, 16472, 11622, 995, 32640, 8608, 2304, 2407, 3224, 2177, 17756], "avg_logprob": -0.3127694042592213, "compression_ratio": 1.3597122302158273, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1705.61, "end": 1706.13, "word": "ماشي", "probability": 0.72265625}, {"start": 1706.13, "end": 1707.47, "word": " باضعافة", "probability": 0.7802734375}, {"start": 1707.47, "end": 1707.59, "word": " لل", "probability": 0.61474609375}, {"start": 1707.59, "end": 1708.39, "word": " activators", "probability": 0.5877685546875}, {"start": 1708.39, "end": 1708.63, "word": " اللي", "probability": 0.794921875}, {"start": 1708.63, "end": 1709.07, "word": " بتطلع", "probability": 0.865966796875}, {"start": 1709.07, "end": 1709.29, "word": " فيه", "probability": 0.42333984375}, {"start": 1709.29, "end": 1709.93, "word": " activator", "probability": 0.794921875}, {"start": 1709.93, "end": 1710.67, "word": " طلع", "probability": 0.9375}, {"start": 1710.67, "end": 1711.33, "word": " ماشي", "probability": 0.8798828125}, {"start": 1711.33, "end": 1711.93, "word": " نشط", "probability": 0.9627278645833334}, {"start": 1711.93, "end": 1712.97, "word": " substrate", "probability": 0.91796875}, {"start": 1712.97, "end": 1713.59, "word": " اسمه", "probability": 0.9514973958333334}, {"start": 1713.59, "end": 1714.39, "word": " بلازمينوجين", "probability": 0.7069440569196429}, {"start": 1714.39, "end": 1715.21, "word": " و", "probability": 0.9033203125}, {"start": 1715.21, "end": 1715.75, "word": " حوله", "probability": 0.8020833333333334}, {"start": 1715.75, "end": 1716.39, "word": " الى", "probability": 0.771728515625}, {"start": 1716.39, "end": 1717.53, "word": " انزايم", "probability": 0.70501708984375}, {"start": 1717.53, "end": 1717.99, "word": " سموه", "probability": 0.7872314453125}, {"start": 1717.99, "end": 1718.33, "word": " Blaser", "probability": 0.2783203125}], "temperature": 1.0}, {"id": 67, "seek": 174790, "start": 1719.34, "end": 1747.9, "text": "بداية الحكية عن الموضوع كانت في سبعة و تلاتين، تلفة وتسعمائه و سبعة و تلاتين، MacFarlane's جاله انه بيطلع، اليوم مايصير فيه مزعج، في tissue plasminogen activator، a plasminogen activator، ماشي؟ وسموها tissue لإنها طالع من ال damage tissue، مظبوط، tissue plasminogen activator، which convert plasminogen as a substrate إلى إيش؟ إلى plasma، مفهوم عن أيه؟ هذا الكلام؟", "tokens": [3555, 28259, 10632, 21542, 4117, 10632, 18871, 9673, 2407, 11242, 45367, 25961, 2655, 8978, 8608, 3555, 27884, 4032, 6055, 1211, 9307, 9957, 12399, 6055, 1211, 5172, 3660, 34683, 3794, 25957, 16373, 3224, 4032, 8608, 3555, 27884, 4032, 6055, 1211, 9307, 9957, 12399, 5707, 37, 6843, 1929, 311, 10874, 6027, 3224, 16472, 3224, 4724, 1829, 9566, 1211, 3615, 12399, 45595, 20498, 19446, 1829, 9381, 13546, 8978, 3224, 3714, 11622, 3615, 7435, 12399, 8978, 12404, 499, 296, 2367, 8799, 2430, 1639, 12399, 257, 499, 296, 2367, 8799, 2430, 1639, 12399, 3714, 33599, 1829, 22807, 4032, 38251, 2407, 11296, 12404, 5296, 28814, 1863, 11296, 23032, 6027, 3615, 9154, 2423, 4344, 12404, 12399, 3714, 19913, 3555, 2407, 9566, 12399, 12404, 499, 296, 2367, 8799, 2430, 1639, 12399, 597, 7620, 499, 296, 2367, 8799, 382, 257, 27585, 30731, 11933, 1829, 8592, 22807, 30731, 22564, 12399, 3714, 5172, 3224, 20498, 18871, 36632, 3224, 22807, 23758, 2423, 28820, 10943, 22807], "avg_logprob": -0.2694805330270297, "compression_ratio": 1.7925170068027212, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 1719.34, "end": 1720.02, "word": "بداية", "probability": 0.630615234375}, {"start": 1720.02, "end": 1720.38, "word": " الحكية", "probability": 0.7760416666666666}, {"start": 1720.38, "end": 1720.52, "word": " عن", "probability": 0.498779296875}, {"start": 1720.52, "end": 1720.86, "word": " الموضوع", "probability": 0.994140625}, {"start": 1720.86, "end": 1721.14, "word": " كانت", "probability": 0.94873046875}, {"start": 1721.14, "end": 1721.22, "word": " في", "probability": 0.9296875}, {"start": 1721.22, "end": 1721.5, "word": " سبعة", "probability": 0.6314290364583334}, {"start": 1721.5, "end": 1721.58, "word": " و", "probability": 0.6123046875}, {"start": 1721.58, "end": 1722.22, "word": " تلاتين،", "probability": 0.830419921875}, {"start": 1722.22, "end": 1722.56, "word": " تلفة", "probability": 0.4415283203125}, {"start": 1722.56, "end": 1722.86, "word": " وتسعمائه", "probability": 0.7296875}, {"start": 1722.86, "end": 1723.02, "word": " و", "probability": 0.420654296875}, {"start": 1723.02, "end": 1723.12, "word": " سبعة", "probability": 0.8916015625}, {"start": 1723.12, "end": 1723.24, "word": " و", "probability": 0.958984375}, {"start": 1723.24, "end": 1724.02, "word": " تلاتين،", "probability": 0.841259765625}, {"start": 1724.02, "end": 1724.98, "word": " MacFarlane's", "probability": 0.56885986328125}, {"start": 1724.98, "end": 1725.3, "word": " جاله", "probability": 0.5330403645833334}, {"start": 1725.3, "end": 1725.5, "word": " انه", "probability": 0.726806640625}, {"start": 1725.5, "end": 1726.02, "word": " بيطلع،", "probability": 0.74658203125}, {"start": 1726.02, "end": 1726.32, "word": " اليوم", "probability": 0.9697265625}, {"start": 1726.32, "end": 1726.64, "word": " مايصير", "probability": 0.8909912109375}, {"start": 1726.64, "end": 1726.82, "word": " فيه", "probability": 0.91748046875}, {"start": 1726.82, "end": 1727.72, "word": " مزعج،", "probability": 0.82744140625}, {"start": 1727.72, "end": 1727.9, "word": " في", "probability": 0.89990234375}, {"start": 1727.9, "end": 1728.34, "word": " tissue", "probability": 0.69384765625}, {"start": 1728.34, "end": 1729.06, "word": " plasminogen", "probability": 0.835693359375}, {"start": 1729.06, "end": 1730.08, "word": " activator،", "probability": 0.9352213541666666}, {"start": 1730.08, "end": 1730.36, "word": " a", "probability": 0.238037109375}, {"start": 1730.36, "end": 1731.18, "word": " plasminogen", "probability": 0.938720703125}, {"start": 1731.18, "end": 1732.0, "word": " activator،", "probability": 0.8388671875}, {"start": 1732.0, "end": 1732.8, "word": " ماشي؟", "probability": 0.77783203125}, {"start": 1732.8, "end": 1733.26, "word": " وسموها", "probability": 0.89208984375}, {"start": 1733.26, "end": 1733.52, "word": " tissue", "probability": 0.9658203125}, {"start": 1733.52, "end": 1733.84, "word": " لإنها", "probability": 0.83447265625}, {"start": 1733.84, "end": 1734.06, "word": " طالع", "probability": 0.8649088541666666}, {"start": 1734.06, "end": 1734.18, "word": " من", "probability": 0.96484375}, {"start": 1734.18, "end": 1734.5, "word": " ال", "probability": 0.95361328125}, {"start": 1734.5, "end": 1735.18, "word": " damage", "probability": 0.71533203125}, {"start": 1735.18, "end": 1736.24, "word": " tissue،", "probability": 0.938720703125}, {"start": 1736.24, "end": 1737.26, "word": " مظبوط،", "probability": 0.765625}, {"start": 1737.26, "end": 1737.56, "word": " tissue", "probability": 0.9755859375}, {"start": 1737.56, "end": 1738.24, "word": " plasminogen", "probability": 0.945068359375}, {"start": 1738.24, "end": 1739.6, "word": " activator،", "probability": 0.8125}, {"start": 1739.6, "end": 1739.86, "word": " which", "probability": 0.962890625}, {"start": 1739.86, "end": 1740.66, "word": " convert", "probability": 0.53515625}, {"start": 1740.66, "end": 1742.06, "word": " plasminogen", "probability": 0.938720703125}, {"start": 1742.06, "end": 1742.4, "word": " as", "probability": 0.947265625}, {"start": 1742.4, "end": 1742.54, "word": " a", "probability": 0.9521484375}, {"start": 1742.54, "end": 1742.96, "word": " substrate", "probability": 0.9736328125}, {"start": 1742.96, "end": 1743.74, "word": " إلى", "probability": 0.73046875}, {"start": 1743.74, "end": 1744.32, "word": " إيش؟", "probability": 0.901611328125}, {"start": 1744.32, "end": 1744.86, "word": " إلى", "probability": 0.69677734375}, {"start": 1744.86, "end": 1745.62, "word": " plasma،", "probability": 0.596435546875}, {"start": 1745.62, "end": 1745.92, "word": " مفهوم", "probability": 0.810791015625}, {"start": 1745.92, "end": 1746.06, "word": " عن", "probability": 0.70556640625}, {"start": 1746.06, "end": 1747.02, "word": " أيه؟", "probability": 0.7921549479166666}, {"start": 1747.02, "end": 1747.22, "word": " هذا", "probability": 0.6337890625}, {"start": 1747.22, "end": 1747.9, "word": " الكلام؟", "probability": 0.9398193359375}], "temperature": 1.0}, {"id": 68, "seek": 177497, "start": 1748.47, "end": 1774.97, "text": "انحكى فيه في السابع و تلاتين تعريفاً الـ Fibrotic process هي عبارة عن عملية بنتور فيها دا process of removing fibrin ماشي from .. from ماشي from the vasculature vasculature يعني ايش الوسط الوسط الدمو اللي صار فيه مزاق صار فيه fibrin اكلت و fibrin", "tokens": [7649, 5016, 4117, 7578, 8978, 3224, 8978, 21136, 16758, 3615, 4032, 6055, 1211, 9307, 9957, 37279, 16572, 5172, 995, 14111, 2423, 39184, 479, 6414, 9411, 1399, 39896, 6225, 3555, 9640, 3660, 18871, 6225, 42213, 10632, 4724, 29399, 13063, 8978, 11296, 11778, 995, 1399, 295, 12720, 283, 6414, 259, 3714, 33599, 1829, 490, 4386, 490, 3714, 33599, 1829, 490, 264, 11481, 2444, 1503, 11481, 2444, 1503, 37495, 22653, 1975, 1829, 8592, 2423, 41779, 9566, 2423, 41779, 9566, 32748, 2304, 2407, 13672, 1829, 20328, 9640, 8978, 3224, 3714, 11622, 995, 4587, 20328, 9640, 8978, 3224, 283, 6414, 259, 1975, 28820, 2655, 4032, 283, 6414, 259], "avg_logprob": -0.3458533567877916, "compression_ratio": 1.6543778801843319, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 1748.47, "end": 1749.09, "word": "انحكى", "probability": 0.7108154296875}, {"start": 1749.09, "end": 1749.29, "word": " فيه", "probability": 0.92431640625}, {"start": 1749.29, "end": 1749.39, "word": " في", "probability": 0.849609375}, {"start": 1749.39, "end": 1749.75, "word": " السابع", "probability": 0.5327962239583334}, {"start": 1749.75, "end": 1749.81, "word": " و", "probability": 0.38037109375}, {"start": 1749.81, "end": 1750.27, "word": " تلاتين", "probability": 0.9166259765625}, {"start": 1750.27, "end": 1753.03, "word": " تعريفاً", "probability": 0.800244140625}, {"start": 1753.03, "end": 1753.59, "word": " الـ", "probability": 0.53594970703125}, {"start": 1753.59, "end": 1754.01, "word": " Fibrotic", "probability": 0.656982421875}, {"start": 1754.01, "end": 1754.61, "word": " process", "probability": 0.5908203125}, {"start": 1754.61, "end": 1755.27, "word": " هي", "probability": 0.890625}, {"start": 1755.27, "end": 1755.47, "word": " عبارة", "probability": 0.824462890625}, {"start": 1755.47, "end": 1755.59, "word": " عن", "probability": 0.99365234375}, {"start": 1755.59, "end": 1756.03, "word": " عملية", "probability": 0.9803059895833334}, {"start": 1756.03, "end": 1756.51, "word": " بنتور", "probability": 0.6029459635416666}, {"start": 1756.51, "end": 1756.77, "word": " فيها", "probability": 0.96630859375}, {"start": 1756.77, "end": 1757.07, "word": " دا", "probability": 0.65478515625}, {"start": 1757.07, "end": 1757.67, "word": " process", "probability": 0.5986328125}, {"start": 1757.67, "end": 1758.19, "word": " of", "probability": 0.9521484375}, {"start": 1758.19, "end": 1758.61, "word": " removing", "probability": 0.63818359375}, {"start": 1758.61, "end": 1759.49, "word": " fibrin", "probability": 0.8067220052083334}, {"start": 1759.49, "end": 1760.37, "word": " ماشي", "probability": 0.6642862955729166}, {"start": 1760.37, "end": 1762.07, "word": " from", "probability": 0.303955078125}, {"start": 1762.07, "end": 1762.55, "word": " ..", "probability": 0.341064453125}, {"start": 1762.55, "end": 1763.21, "word": " from", "probability": 0.77734375}, {"start": 1763.21, "end": 1764.15, "word": " ماشي", "probability": 0.8291015625}, {"start": 1764.15, "end": 1764.59, "word": " from", "probability": 0.654296875}, {"start": 1764.59, "end": 1765.19, "word": " the", "probability": 0.85009765625}, {"start": 1765.19, "end": 1766.79, "word": " vasculature", "probability": 0.9479166666666666}, {"start": 1766.79, "end": 1767.63, "word": " vasculature", "probability": 0.7281494140625}, {"start": 1767.63, "end": 1767.91, "word": " يعني", "probability": 0.9736328125}, {"start": 1767.91, "end": 1768.05, "word": " ايش", "probability": 0.7548014322916666}, {"start": 1768.05, "end": 1768.51, "word": " الوسط", "probability": 0.8509114583333334}, {"start": 1768.51, "end": 1769.53, "word": " الوسط", "probability": 0.87841796875}, {"start": 1769.53, "end": 1770.31, "word": " الدمو", "probability": 0.6643880208333334}, {"start": 1770.31, "end": 1770.51, "word": " اللي", "probability": 0.593505859375}, {"start": 1770.51, "end": 1770.89, "word": " صار", "probability": 0.67626953125}, {"start": 1770.89, "end": 1771.05, "word": " فيه", "probability": 0.9560546875}, {"start": 1771.05, "end": 1771.49, "word": " مزاق", "probability": 0.6668701171875}, {"start": 1771.49, "end": 1772.07, "word": " صار", "probability": 0.9482421875}, {"start": 1772.07, "end": 1772.23, "word": " فيه", "probability": 0.9677734375}, {"start": 1772.23, "end": 1772.89, "word": " fibrin", "probability": 0.9132486979166666}, {"start": 1772.89, "end": 1773.83, "word": " اكلت", "probability": 0.4347330729166667}, {"start": 1773.83, "end": 1774.27, "word": " و", "probability": 0.91455078125}, {"start": 1774.27, "end": 1774.97, "word": " fibrin", "probability": 0.8256022135416666}], "temperature": 1.0}, {"id": 69, "seek": 179280, "start": 1775.42, "end": 1792.8, "text": "تكوّن في وسط مين؟ لما المزعج بنيرال جالتر ده البقايا مين؟ البقايا الفايربرين احنا مانينا ندوّن هذا الفايربرين ونرجع ال blood vessels إلى وضعه 100 في النيل ف system هدواته", "tokens": [2655, 4117, 2407, 11703, 1863, 8978, 46952, 9566, 3714, 9957, 22807, 5296, 15042, 9673, 11622, 3615, 7435, 44945, 13546, 6027, 10874, 6027, 2655, 2288, 11778, 3224, 29739, 4587, 995, 25528, 3714, 9957, 22807, 29739, 4587, 995, 25528, 27188, 995, 13546, 26890, 9957, 1975, 5016, 8315, 3714, 7649, 1829, 8315, 8717, 3215, 2407, 11703, 1863, 23758, 27188, 995, 13546, 26890, 9957, 4032, 1863, 47341, 3615, 2423, 3390, 20117, 30731, 4032, 11242, 3615, 3224, 2319, 8978, 2423, 22653, 1211, 6156, 1185, 8032, 3215, 2407, 9307, 3224], "avg_logprob": -0.36801471569958855, "compression_ratio": 1.6111111111111112, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 1775.42, "end": 1776.16, "word": "تكوّن", "probability": 0.5966796875}, {"start": 1776.16, "end": 1776.34, "word": " في", "probability": 0.892578125}, {"start": 1776.34, "end": 1776.74, "word": " وسط", "probability": 0.988525390625}, {"start": 1776.74, "end": 1777.66, "word": " مين؟", "probability": 0.50048828125}, {"start": 1777.66, "end": 1778.28, "word": " لما", "probability": 0.744873046875}, {"start": 1778.28, "end": 1778.9, "word": " المزعج", "probability": 0.72412109375}, {"start": 1778.9, "end": 1779.36, "word": " بنيرال", "probability": 0.472412109375}, {"start": 1779.36, "end": 1779.8, "word": " جالتر", "probability": 0.73748779296875}, {"start": 1779.8, "end": 1779.98, "word": " ده", "probability": 0.651123046875}, {"start": 1779.98, "end": 1780.6, "word": " البقايا", "probability": 0.9205322265625}, {"start": 1780.6, "end": 1781.36, "word": " مين؟", "probability": 0.82763671875}, {"start": 1781.36, "end": 1781.72, "word": " البقايا", "probability": 0.9266357421875}, {"start": 1781.72, "end": 1782.28, "word": " الفايربرين", "probability": 0.60361328125}, {"start": 1782.28, "end": 1782.84, "word": " احنا", "probability": 0.794189453125}, {"start": 1782.84, "end": 1783.22, "word": " مانينا", "probability": 0.637237548828125}, {"start": 1783.22, "end": 1783.8, "word": " ندوّن", "probability": 0.812890625}, {"start": 1783.8, "end": 1784.06, "word": " هذا", "probability": 0.9375}, {"start": 1784.06, "end": 1784.88, "word": " الفايربرين", "probability": 0.9025390625}, {"start": 1784.88, "end": 1785.86, "word": " ونرجع", "probability": 0.7767333984375}, {"start": 1785.86, "end": 1785.96, "word": " ال", "probability": 0.68017578125}, {"start": 1785.96, "end": 1786.16, "word": " blood", "probability": 0.9111328125}, {"start": 1786.16, "end": 1786.56, "word": " vessels", "probability": 0.90966796875}, {"start": 1786.56, "end": 1786.76, "word": " إلى", "probability": 0.6640625}, {"start": 1786.76, "end": 1787.38, "word": " وضعه", "probability": 0.985595703125}, {"start": 1787.38, "end": 1788.64, "word": " 100", "probability": 0.30615234375}, {"start": 1788.64, "end": 1788.86, "word": " في", "probability": 0.5888671875}, {"start": 1788.86, "end": 1789.18, "word": " النيل", "probability": 0.4689534505208333}, {"start": 1789.18, "end": 1789.66, "word": " ف", "probability": 0.82470703125}, {"start": 1789.66, "end": 1791.26, "word": " system", "probability": 0.199462890625}, {"start": 1791.26, "end": 1792.8, "word": " هدواته", "probability": 0.77353515625}], "temperature": 1.0}, {"id": 70, "seek": 182196, "start": 1793.64, "end": 1821.96, "text": "ماشي اللاعيبين الأساسيين فيه نمر واحد بلازمينوجين بدنا substrate نمر واحد بلازمينوجين وهو ال substrate ثم activator لل substrate وهو بلازمينوجين activator ماشي which activate البلازمينوجين إلى ال enzyme إلى ال enzyme form وهو البلازمين ماشي شو بعمل البلازمين؟ بشتغل على ال fibre and fibrinogen لأنه بشتغل على الجهتين", "tokens": [2304, 33599, 1829, 13672, 45761, 1829, 3555, 9957, 16247, 3794, 32277, 1829, 9957, 8978, 3224, 8717, 29973, 36764, 24401, 4724, 1211, 31377, 2304, 9957, 29245, 9957, 47525, 8315, 27585, 8717, 29973, 36764, 24401, 4724, 1211, 31377, 2304, 9957, 29245, 9957, 37037, 2407, 2423, 27585, 38637, 2304, 2430, 1639, 24976, 27585, 37037, 2407, 4724, 1211, 31377, 2304, 9957, 29245, 9957, 2430, 1639, 3714, 33599, 1829, 597, 13615, 29739, 1211, 31377, 2304, 9957, 29245, 9957, 30731, 2423, 24521, 30731, 2423, 24521, 1254, 37037, 2407, 29739, 1211, 31377, 2304, 9957, 3714, 33599, 1829, 13412, 2407, 4724, 25957, 1211, 29739, 1211, 31377, 2304, 9957, 22807, 4724, 8592, 2655, 17082, 1211, 15844, 2423, 36738, 293, 283, 6414, 259, 8799, 5296, 33456, 3224, 4724, 8592, 2655, 17082, 1211, 15844, 25724, 3224, 2655, 9957], "avg_logprob": -0.23205566429533064, "compression_ratio": 2.2363636363636363, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1793.64, "end": 1794.22, "word": "ماشي", "probability": 0.52972412109375}, {"start": 1794.22, "end": 1795.44, "word": " اللاعيبين", "probability": 0.732861328125}, {"start": 1795.44, "end": 1796.36, "word": " الأساسيين", "probability": 0.96337890625}, {"start": 1796.36, "end": 1796.9, "word": " فيه", "probability": 0.90283203125}, {"start": 1796.9, "end": 1797.28, "word": " نمر", "probability": 0.5089111328125}, {"start": 1797.28, "end": 1797.52, "word": " واحد", "probability": 0.831298828125}, {"start": 1797.52, "end": 1798.2, "word": " بلازمينوجين", "probability": 0.7486746651785714}, {"start": 1798.2, "end": 1798.5, "word": " بدنا", "probability": 0.551239013671875}, {"start": 1798.5, "end": 1799.08, "word": " substrate", "probability": 0.7548828125}, {"start": 1799.08, "end": 1800.06, "word": " نمر", "probability": 0.88720703125}, {"start": 1800.06, "end": 1800.34, "word": " واحد", "probability": 0.988525390625}, {"start": 1800.34, "end": 1801.02, "word": " بلازمينوجين", "probability": 0.9442661830357143}, {"start": 1801.02, "end": 1801.24, "word": " وهو", "probability": 0.7091064453125}, {"start": 1801.24, "end": 1801.36, "word": " ال", "probability": 0.9296875}, {"start": 1801.36, "end": 1801.8, "word": " substrate", "probability": 0.8896484375}, {"start": 1801.8, "end": 1802.7, "word": " ثم", "probability": 0.961669921875}, {"start": 1802.7, "end": 1803.46, "word": " activator", "probability": 0.830810546875}, {"start": 1803.46, "end": 1803.74, "word": " لل", "probability": 0.68505859375}, {"start": 1803.74, "end": 1804.6, "word": " substrate", "probability": 0.90771484375}, {"start": 1804.6, "end": 1804.9, "word": " وهو", "probability": 0.895263671875}, {"start": 1804.9, "end": 1805.42, "word": " بلازمينوجين", "probability": 0.9336635044642857}, {"start": 1805.42, "end": 1806.26, "word": " activator", "probability": 0.87255859375}, {"start": 1806.26, "end": 1807.28, "word": " ماشي", "probability": 0.8209635416666666}, {"start": 1807.28, "end": 1807.96, "word": " which", "probability": 0.8525390625}, {"start": 1807.96, "end": 1808.68, "word": " activate", "probability": 0.64208984375}, {"start": 1808.68, "end": 1809.82, "word": " البلازمينوجين", "probability": 0.82861328125}, {"start": 1809.82, "end": 1810.08, "word": " إلى", "probability": 0.64794921875}, {"start": 1810.08, "end": 1810.26, "word": " ال", "probability": 0.34619140625}, {"start": 1810.26, "end": 1810.78, "word": " enzyme", "probability": 0.78955078125}, {"start": 1810.78, "end": 1811.58, "word": " إلى", "probability": 0.685546875}, {"start": 1811.58, "end": 1811.78, "word": " ال", "probability": 0.97802734375}, {"start": 1811.78, "end": 1812.28, "word": " enzyme", "probability": 0.984375}, {"start": 1812.28, "end": 1812.82, "word": " form", "probability": 0.74658203125}, {"start": 1812.82, "end": 1813.4, "word": " وهو", "probability": 0.921875}, {"start": 1813.4, "end": 1814.14, "word": " البلازمين", "probability": 0.8935546875}, {"start": 1814.14, "end": 1814.94, "word": " ماشي", "probability": 0.7814127604166666}, {"start": 1814.94, "end": 1815.12, "word": " شو", "probability": 0.8837890625}, {"start": 1815.12, "end": 1815.38, "word": " بعمل", "probability": 0.9464518229166666}, {"start": 1815.38, "end": 1816.6, "word": " البلازمين؟", "probability": 0.8602294921875}, {"start": 1816.6, "end": 1816.94, "word": " بشتغل", "probability": 0.9197265625}, {"start": 1816.94, "end": 1817.08, "word": " على", "probability": 0.79296875}, {"start": 1817.08, "end": 1817.2, "word": " ال", "probability": 0.65869140625}, {"start": 1817.2, "end": 1817.56, "word": " fibre", "probability": 0.390869140625}, {"start": 1817.56, "end": 1820.0, "word": " and", "probability": 0.876953125}, {"start": 1820.0, "end": 1820.82, "word": " fibrinogen", "probability": 0.765380859375}, {"start": 1820.82, "end": 1821.12, "word": " لأنه", "probability": 0.7784830729166666}, {"start": 1821.12, "end": 1821.36, "word": " بشتغل", "probability": 0.975390625}, {"start": 1821.36, "end": 1821.46, "word": " على", "probability": 0.833984375}, {"start": 1821.46, "end": 1821.96, "word": " الجهتين", "probability": 0.9312744140625}], "temperature": 1.0}, {"id": 71, "seek": 185205, "start": 1822.85, "end": 1852.05, "text": "وبيحولهم إلى degradation product شو بيحولهم؟ degradation product أو بيسموهم splitting product بيسموهم إيش؟ splitting split يعني إيش؟ التقطيع، split، التشجيف، التشجيف بالبلدي، ماشي؟ يبقى المختار الاختصار هو FDB أو FSP أو إيش؟ FSP، FSP اختصار لإيش؟", "tokens": [2407, 21292, 5016, 12610, 16095, 30731, 40519, 1674, 13412, 2407, 4724, 1829, 5016, 12610, 16095, 22807, 40519, 1674, 34051, 4724, 1829, 38251, 2407, 16095, 30348, 1674, 4724, 1829, 38251, 2407, 16095, 11933, 1829, 8592, 22807, 30348, 7472, 37495, 22653, 11933, 1829, 8592, 22807, 16712, 47432, 40228, 12399, 7472, 12399, 16712, 8592, 7435, 33911, 12399, 16712, 8592, 7435, 33911, 20666, 3555, 1211, 16254, 12399, 3714, 33599, 1829, 22807, 7251, 3555, 4587, 7578, 9673, 46456, 9640, 2423, 47283, 2655, 9381, 9640, 31439, 479, 35, 33, 34051, 479, 27921, 34051, 11933, 1829, 8592, 22807, 479, 27921, 12399, 479, 27921, 1975, 46456, 9381, 9640, 5296, 28814, 1829, 8592, 22807], "avg_logprob": -0.28375590606680456, "compression_ratio": 1.865, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 1822.85, "end": 1823.79, "word": "وبيحولهم", "probability": 0.8564453125}, {"start": 1823.79, "end": 1824.23, "word": " إلى", "probability": 0.79638671875}, {"start": 1824.23, "end": 1825.03, "word": " degradation", "probability": 0.64306640625}, {"start": 1825.03, "end": 1825.87, "word": " product", "probability": 0.95703125}, {"start": 1825.87, "end": 1826.53, "word": " شو", "probability": 0.53668212890625}, {"start": 1826.53, "end": 1827.25, "word": " بيحولهم؟", "probability": 0.9134114583333334}, {"start": 1827.25, "end": 1828.11, "word": " degradation", "probability": 0.78125}, {"start": 1828.11, "end": 1828.81, "word": " product", "probability": 0.974609375}, {"start": 1828.81, "end": 1828.95, "word": " أو", "probability": 0.77001953125}, {"start": 1828.95, "end": 1829.53, "word": " بيسموهم", "probability": 0.8767578125}, {"start": 1829.53, "end": 1831.03, "word": " splitting", "probability": 0.85498046875}, {"start": 1831.03, "end": 1831.95, "word": " product", "probability": 0.96728515625}, {"start": 1831.95, "end": 1833.21, "word": " بيسموهم", "probability": 0.94462890625}, {"start": 1833.21, "end": 1833.65, "word": " إيش؟", "probability": 0.764892578125}, {"start": 1833.65, "end": 1834.59, "word": " splitting", "probability": 0.70703125}, {"start": 1834.59, "end": 1835.01, "word": " split", "probability": 0.4482421875}, {"start": 1835.01, "end": 1835.37, "word": " يعني", "probability": 0.9638671875}, {"start": 1835.37, "end": 1836.61, "word": " إيش؟", "probability": 0.949462890625}, {"start": 1836.61, "end": 1837.37, "word": " التقطيع،", "probability": 0.789215087890625}, {"start": 1837.37, "end": 1838.61, "word": " split،", "probability": 0.42333984375}, {"start": 1838.61, "end": 1839.67, "word": " التشجيف،", "probability": 0.675244140625}, {"start": 1839.67, "end": 1840.63, "word": " التشجيف", "probability": 0.9791259765625}, {"start": 1840.63, "end": 1841.17, "word": " بالبلدي،", "probability": 0.586376953125}, {"start": 1841.17, "end": 1842.53, "word": " ماشي؟", "probability": 0.7037353515625}, {"start": 1842.53, "end": 1842.91, "word": " يبقى", "probability": 0.75244140625}, {"start": 1842.91, "end": 1843.95, "word": " المختار", "probability": 0.8289388020833334}, {"start": 1843.95, "end": 1844.65, "word": " الاختصار", "probability": 0.8260986328125}, {"start": 1844.65, "end": 1844.81, "word": " هو", "probability": 0.900390625}, {"start": 1844.81, "end": 1845.77, "word": " FDB", "probability": 0.4589436848958333}, {"start": 1845.77, "end": 1846.17, "word": " أو", "probability": 0.642578125}, {"start": 1846.17, "end": 1847.93, "word": " FSP", "probability": 0.89501953125}, {"start": 1847.93, "end": 1849.09, "word": " أو", "probability": 0.54736328125}, {"start": 1849.09, "end": 1850.01, "word": " إيش؟", "probability": 0.89697265625}, {"start": 1850.01, "end": 1850.69, "word": " FSP،", "probability": 0.7010091145833334}, {"start": 1850.69, "end": 1851.11, "word": " FSP", "probability": 0.824951171875}, {"start": 1851.11, "end": 1851.57, "word": " اختصار", "probability": 0.9149169921875}, {"start": 1851.57, "end": 1852.05, "word": " لإيش؟", "probability": 0.887109375}], "temperature": 1.0}, {"id": 72, "seek": 187043, "start": 1855.05, "end": 1870.43, "text": "فايرن فايرن فايرن فايرن فايرن فايرن فايرن فايرن فايرن فايرن فايرن فايرن", "tokens": [5172, 995, 13546, 1863, 6156, 995, 13546, 1863, 6156, 995, 13546, 1863, 6156, 995, 13546, 1863, 6156, 995, 13546, 1863, 6156, 995, 13546, 1863, 6156, 995, 13546, 1863, 6156, 995, 13546, 1863, 6156, 995, 13546, 1863, 6156, 995, 13546, 1863, 6156, 995, 13546, 1863, 6156, 995, 13546, 1863], "avg_logprob": -0.40848214285714285, "compression_ratio": 5.695652173913044, "no_speech_prob": 1.8477439880371094e-06, "words": [{"start": 1855.05, "end": 1856.45, "word": "فايرن", "probability": 0.567047119140625}, {"start": 1856.45, "end": 1856.91, "word": " فايرن", "probability": 0.65093994140625}, {"start": 1856.91, "end": 1856.97, "word": " فايرن", "probability": 0.620758056640625}, {"start": 1856.97, "end": 1857.07, "word": " فايرن", "probability": 0.730316162109375}, {"start": 1857.07, "end": 1857.37, "word": " فايرن", "probability": 0.7919921875}, {"start": 1857.37, "end": 1858.91, "word": " فايرن", "probability": 0.826904296875}, {"start": 1858.91, "end": 1860.95, "word": " فايرن", "probability": 0.8553466796875}, {"start": 1860.95, "end": 1861.63, "word": " فايرن", "probability": 0.8819580078125}, {"start": 1861.63, "end": 1862.09, "word": " فايرن", "probability": 0.9051513671875}, {"start": 1862.09, "end": 1866.47, "word": " فايرن", "probability": 0.923828125}, {"start": 1866.47, "end": 1869.27, "word": " فايرن", "probability": 0.93798828125}, {"start": 1869.27, "end": 1870.43, "word": " فايرن", "probability": 0.9488525390625}], "temperature": 1.0}, {"id": 73, "seek": 189118, "start": 1870.58, "end": 1891.18, "text": "طبعا هذا ال system كاملا، هذا ال system يحتاج إلى inhibitors، يحتاج إلى controller، مظبوط؟ فبدنا نحكي كمان على ال inhibitors أو الـ Plasminogenic Activator and Plasminogen مافهومش بقى؟ ماشي؟", "tokens": [9566, 3555, 3615, 995, 23758, 2423, 1185, 9122, 10943, 15040, 12399, 23758, 2423, 1185, 7251, 33753, 26108, 30731, 20406, 9862, 12399, 7251, 33753, 26108, 30731, 10561, 12399, 3714, 19913, 3555, 2407, 9566, 22807, 6156, 44510, 8315, 8717, 5016, 4117, 1829, 9122, 2304, 7649, 15844, 2423, 20406, 9862, 34051, 2423, 39184, 2149, 296, 2367, 8799, 299, 28550, 1639, 293, 2149, 296, 2367, 8799, 19446, 5172, 3224, 20498, 8592, 4724, 4587, 7578, 22807, 3714, 33599, 1829, 22807], "avg_logprob": -0.26274672464320536, "compression_ratio": 1.467741935483871, "no_speech_prob": 0.0, "words": [{"start": 1870.58, "end": 1870.9, "word": "طبعا", "probability": 0.9193115234375}, {"start": 1870.9, "end": 1871.06, "word": " هذا", "probability": 0.7578125}, {"start": 1871.06, "end": 1871.22, "word": " ال", "probability": 0.93701171875}, {"start": 1871.22, "end": 1871.52, "word": " system", "probability": 0.65478515625}, {"start": 1871.52, "end": 1872.58, "word": " كاملا،", "probability": 0.69464111328125}, {"start": 1872.58, "end": 1873.36, "word": " هذا", "probability": 0.92724609375}, {"start": 1873.36, "end": 1873.56, "word": " ال", "probability": 0.99267578125}, {"start": 1873.56, "end": 1873.86, "word": " system", "probability": 0.98193359375}, {"start": 1873.86, "end": 1874.88, "word": " يحتاج", "probability": 0.9890950520833334}, {"start": 1874.88, "end": 1875.48, "word": " إلى", "probability": 0.74072265625}, {"start": 1875.48, "end": 1876.48, "word": " inhibitors،", "probability": 0.8141276041666666}, {"start": 1876.48, "end": 1877.42, "word": " يحتاج", "probability": 0.9900716145833334}, {"start": 1877.42, "end": 1878.16, "word": " إلى", "probability": 0.92138671875}, {"start": 1878.16, "end": 1879.52, "word": " controller،", "probability": 0.6729736328125}, {"start": 1879.52, "end": 1880.44, "word": " مظبوط؟", "probability": 0.8871256510416666}, {"start": 1880.44, "end": 1880.82, "word": " فبدنا", "probability": 0.7455240885416666}, {"start": 1880.82, "end": 1881.08, "word": " نحكي", "probability": 0.9906005859375}, {"start": 1881.08, "end": 1881.38, "word": " كمان", "probability": 0.9744466145833334}, {"start": 1881.38, "end": 1881.58, "word": " على", "probability": 0.61083984375}, {"start": 1881.58, "end": 1881.74, "word": " ال", "probability": 0.62646484375}, {"start": 1881.74, "end": 1882.3, "word": " inhibitors", "probability": 0.906982421875}, {"start": 1882.3, "end": 1882.52, "word": " أو", "probability": 0.61767578125}, {"start": 1882.52, "end": 1882.66, "word": " الـ", "probability": 0.2689208984375}, {"start": 1882.66, "end": 1883.6, "word": " Plasminogenic", "probability": 0.709619140625}, {"start": 1883.6, "end": 1884.7, "word": " Activator", "probability": 0.67578125}, {"start": 1884.7, "end": 1885.82, "word": " and", "probability": 0.70361328125}, {"start": 1885.82, "end": 1886.62, "word": " Plasminogen", "probability": 0.8427734375}, {"start": 1886.62, "end": 1887.36, "word": " مافهومش", "probability": 0.68466796875}, {"start": 1887.36, "end": 1890.64, "word": " بقى؟", "probability": 0.80029296875}, {"start": 1890.64, "end": 1891.18, "word": " ماشي؟", "probability": 0.9063720703125}], "temperature": 1.0}, {"id": 74, "seek": 191877, "start": 1892.83, "end": 1918.77, "text": "وظيفة عرفنا تعريفا ووظيفة الـ Fibrotic System أو الـ Fibrolysis is a system whereby the temporary fibrin clot is systematically and gradually dissolved يعني احنا بندوب الجلطة اللتي تكون موقتا عشان تسد الجريح كل الجلطة اللي بدوبها", "tokens": [2407, 19913, 33911, 3660, 6225, 28480, 8315, 37279, 16572, 5172, 995, 4032, 2407, 19913, 33911, 3660, 2423, 39184, 479, 6414, 9411, 8910, 34051, 2423, 39184, 479, 6414, 401, 4642, 307, 257, 1185, 36998, 264, 13413, 283, 6414, 259, 48587, 307, 39531, 293, 13145, 30651, 37495, 22653, 1975, 5016, 8315, 44945, 3215, 37746, 25724, 1211, 9566, 3660, 13672, 31371, 6055, 30544, 3714, 2407, 38149, 995, 6225, 8592, 7649, 6055, 3794, 3215, 25724, 16572, 5016, 28242, 25724, 1211, 9566, 3660, 13672, 1829, 47525, 37746, 11296], "avg_logprob": -0.27659970415490015, "compression_ratio": 1.4953703703703705, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 1892.83, "end": 1893.55, "word": "وظيفة", "probability": 0.903076171875}, {"start": 1893.55, "end": 1894.61, "word": " عرفنا", "probability": 0.789794921875}, {"start": 1894.61, "end": 1895.35, "word": " تعريفا", "probability": 0.9471435546875}, {"start": 1895.35, "end": 1896.67, "word": " ووظيفة", "probability": 0.9076171875}, {"start": 1896.67, "end": 1896.91, "word": " الـ", "probability": 0.554931640625}, {"start": 1896.91, "end": 1897.31, "word": " Fibrotic", "probability": 0.420166015625}, {"start": 1897.31, "end": 1897.89, "word": " System", "probability": 0.4296875}, {"start": 1897.89, "end": 1898.65, "word": " أو", "probability": 0.497314453125}, {"start": 1898.65, "end": 1898.85, "word": " الـ", "probability": 0.859375}, {"start": 1898.85, "end": 1899.49, "word": " Fibrolysis", "probability": 0.65985107421875}, {"start": 1899.49, "end": 1899.77, "word": " is", "probability": 0.4541015625}, {"start": 1899.77, "end": 1899.89, "word": " a", "probability": 0.91455078125}, {"start": 1899.89, "end": 1900.27, "word": " system", "probability": 0.95947265625}, {"start": 1900.27, "end": 1900.77, "word": " whereby", "probability": 0.841796875}, {"start": 1900.77, "end": 1901.85, "word": " the", "probability": 0.6552734375}, {"start": 1901.85, "end": 1902.81, "word": " temporary", "probability": 0.74072265625}, {"start": 1902.81, "end": 1904.33, "word": " fibrin", "probability": 0.7937825520833334}, {"start": 1904.33, "end": 1904.75, "word": " clot", "probability": 0.75341796875}, {"start": 1904.75, "end": 1905.17, "word": " is", "probability": 0.94287109375}, {"start": 1905.17, "end": 1906.29, "word": " systematically", "probability": 0.87109375}, {"start": 1906.29, "end": 1907.47, "word": " and", "probability": 0.9375}, {"start": 1907.47, "end": 1908.01, "word": " gradually", "probability": 0.90478515625}, {"start": 1908.01, "end": 1908.53, "word": " dissolved", "probability": 0.70654296875}, {"start": 1908.53, "end": 1908.91, "word": " يعني", "probability": 0.8505859375}, {"start": 1908.91, "end": 1909.09, "word": " احنا", "probability": 0.8333333333333334}, {"start": 1909.09, "end": 1909.63, "word": " بندوب", "probability": 0.6830240885416666}, {"start": 1909.63, "end": 1911.19, "word": " الجلطة", "probability": 0.96728515625}, {"start": 1911.19, "end": 1911.45, "word": " اللتي", "probability": 0.646240234375}, {"start": 1911.45, "end": 1912.07, "word": " تكون", "probability": 0.78466796875}, {"start": 1912.07, "end": 1914.27, "word": " موقتا", "probability": 0.69586181640625}, {"start": 1914.27, "end": 1914.53, "word": " عشان", "probability": 0.9378255208333334}, {"start": 1914.53, "end": 1914.77, "word": " تسد", "probability": 0.8601888020833334}, {"start": 1914.77, "end": 1915.47, "word": " الجريح", "probability": 0.8064778645833334}, {"start": 1915.47, "end": 1916.13, "word": " كل", "probability": 0.69091796875}, {"start": 1916.13, "end": 1917.33, "word": " الجلطة", "probability": 0.81591796875}, {"start": 1917.33, "end": 1917.49, "word": " اللي", "probability": 0.705810546875}, {"start": 1917.49, "end": 1918.77, "word": " بدوبها", "probability": 0.8924153645833334}], "temperature": 1.0}, {"id": 75, "seek": 192477, "start": 1919.63, "end": 1924.77, "text": "اللي ممكن تعمل إنها إنسداد في ال blood vessels تمشيه لما بتدوبها", "tokens": [6027, 20292, 3714, 43020, 6055, 25957, 1211, 36145, 11296, 11933, 1863, 3794, 3215, 18513, 8978, 2423, 3390, 20117, 46811, 8592, 1829, 3224, 5296, 15042, 39894, 3215, 37746, 11296], "avg_logprob": -0.4385775820962314, "compression_ratio": 1.1170212765957446, "no_speech_prob": 0.0, "words": [{"start": 1919.63, "end": 1919.97, "word": "اللي", "probability": 0.3753662109375}, {"start": 1919.97, "end": 1920.33, "word": " ممكن", "probability": 0.967041015625}, {"start": 1920.33, "end": 1920.77, "word": " تعمل", "probability": 0.9703776041666666}, {"start": 1920.77, "end": 1921.05, "word": " إنها", "probability": 0.28509521484375}, {"start": 1921.05, "end": 1921.85, "word": " إنسداد", "probability": 0.7654296875}, {"start": 1921.85, "end": 1922.23, "word": " في", "probability": 0.8603515625}, {"start": 1922.23, "end": 1922.31, "word": " ال", "probability": 0.477783203125}, {"start": 1922.31, "end": 1922.55, "word": " blood", "probability": 0.6728515625}, {"start": 1922.55, "end": 1923.05, "word": " vessels", "probability": 0.921875}, {"start": 1923.05, "end": 1923.93, "word": " تمشيه", "probability": 0.56854248046875}, {"start": 1923.93, "end": 1924.11, "word": " لما", "probability": 0.80322265625}, {"start": 1924.11, "end": 1924.77, "word": " بتدوبها", "probability": 0.73779296875}], "temperature": 1.0}, {"id": 76, "seek": 195130, "start": 1926.1, "end": 1951.3, "text": "بصير، بنرجع ال blood vessels إلى وضع الطبيعي، لأن اليوم اللي بنجرطه في الوعاء الدموي، بنعمل occlusion، بنعمل إيش؟ انسداد، لكن الانسداد مش كامل، ماشي، انسداد، هذا الانسداد بيعمل ضيق في هذه المنطقة، بيأثر على ال blood flow ولا ما بيأثر؟ بيأثر على ال blood flow، فاحنا بنعملين رجع ال blood vessels بما يسمح normal blood flow", "tokens": [3555, 9381, 13546, 12399, 44945, 47341, 3615, 2423, 3390, 20117, 30731, 4032, 11242, 3615, 41950, 21292, 3615, 1829, 12399, 5296, 33456, 45595, 20498, 13672, 1829, 44945, 7435, 2288, 9566, 3224, 8978, 2423, 45367, 16606, 32748, 2304, 45865, 12399, 44945, 25957, 1211, 2678, 6485, 12399, 44945, 25957, 1211, 11933, 1829, 8592, 22807, 16472, 3794, 3215, 18513, 12399, 44381, 2423, 7649, 3794, 3215, 18513, 37893, 9122, 10943, 1211, 12399, 3714, 33599, 1829, 12399, 16472, 3794, 3215, 18513, 12399, 23758, 2423, 7649, 3794, 3215, 18513, 4724, 1829, 25957, 1211, 48812, 1829, 4587, 8978, 29538, 9673, 1863, 9566, 28671, 12399, 4724, 1829, 10721, 49115, 15844, 2423, 3390, 3095, 49429, 19446, 4724, 1829, 10721, 49115, 22807, 4724, 1829, 10721, 49115, 15844, 2423, 3390, 3095, 12399, 6156, 39319, 8315, 4724, 1863, 25957, 1211, 9957, 12602, 7435, 3615, 2423, 3390, 20117, 4724, 15042, 7251, 38251, 5016, 2710, 3390, 3095], "avg_logprob": -0.20159528472206809, "compression_ratio": 2.0155038759689923, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1926.1, "end": 1926.82, "word": "بصير،", "probability": 0.73388671875}, {"start": 1926.82, "end": 1927.5, "word": " بنرجع", "probability": 0.73486328125}, {"start": 1927.5, "end": 1927.58, "word": " ال", "probability": 0.8291015625}, {"start": 1927.58, "end": 1927.76, "word": " blood", "probability": 0.705078125}, {"start": 1927.76, "end": 1928.12, "word": " vessels", "probability": 0.9404296875}, {"start": 1928.12, "end": 1928.4, "word": " إلى", "probability": 0.521484375}, {"start": 1928.4, "end": 1928.64, "word": " وضع", "probability": 0.9654947916666666}, {"start": 1928.64, "end": 1929.16, "word": " الطبيعي،", "probability": 0.723583984375}, {"start": 1929.16, "end": 1929.36, "word": " لأن", "probability": 0.583251953125}, {"start": 1929.36, "end": 1929.58, "word": " اليوم", "probability": 0.7880859375}, {"start": 1929.58, "end": 1929.72, "word": " اللي", "probability": 0.608642578125}, {"start": 1929.72, "end": 1930.44, "word": " بنجرطه", "probability": 0.8046875}, {"start": 1930.44, "end": 1930.52, "word": " في", "probability": 0.9208984375}, {"start": 1930.52, "end": 1930.86, "word": " الوعاء", "probability": 0.7685546875}, {"start": 1930.86, "end": 1931.84, "word": " الدموي،", "probability": 0.64532470703125}, {"start": 1931.84, "end": 1932.32, "word": " بنعمل", "probability": 0.9383138020833334}, {"start": 1932.32, "end": 1933.04, "word": " occlusion،", "probability": 0.79345703125}, {"start": 1933.04, "end": 1933.44, "word": " بنعمل", "probability": 0.95654296875}, {"start": 1933.44, "end": 1934.24, "word": " إيش؟", "probability": 0.844970703125}, {"start": 1934.24, "end": 1935.32, "word": " انسداد،", "probability": 0.7392578125}, {"start": 1935.32, "end": 1935.56, "word": " لكن", "probability": 0.966796875}, {"start": 1935.56, "end": 1936.12, "word": " الانسداد", "probability": 0.927734375}, {"start": 1936.12, "end": 1936.32, "word": " مش", "probability": 0.99462890625}, {"start": 1936.32, "end": 1936.84, "word": " كامل،", "probability": 0.9573974609375}, {"start": 1936.84, "end": 1937.36, "word": " ماشي،", "probability": 0.8460693359375}, {"start": 1937.36, "end": 1938.44, "word": " انسداد،", "probability": 0.9560546875}, {"start": 1938.44, "end": 1938.7, "word": " هذا", "probability": 0.9169921875}, {"start": 1938.7, "end": 1939.28, "word": " الانسداد", "probability": 0.9947265625}, {"start": 1939.28, "end": 1939.66, "word": " بيعمل", "probability": 0.8995361328125}, {"start": 1939.66, "end": 1940.04, "word": " ضيق", "probability": 0.9627278645833334}, {"start": 1940.04, "end": 1940.2, "word": " في", "probability": 0.98095703125}, {"start": 1940.2, "end": 1940.44, "word": " هذه", "probability": 0.94921875}, {"start": 1940.44, "end": 1941.48, "word": " المنطقة،", "probability": 0.9783203125}, {"start": 1941.48, "end": 1941.82, "word": " بيأثر", "probability": 0.9547119140625}, {"start": 1941.82, "end": 1941.98, "word": " على", "probability": 0.90185546875}, {"start": 1941.98, "end": 1942.06, "word": " ال", "probability": 0.6416015625}, {"start": 1942.06, "end": 1942.22, "word": " blood", "probability": 0.970703125}, {"start": 1942.22, "end": 1942.56, "word": " flow", "probability": 0.896484375}, {"start": 1942.56, "end": 1942.76, "word": " ولا", "probability": 0.72265625}, {"start": 1942.76, "end": 1942.92, "word": " ما", "probability": 0.79541015625}, {"start": 1942.92, "end": 1943.62, "word": " بيأثر؟", "probability": 0.82958984375}, {"start": 1943.62, "end": 1944.26, "word": " بيأثر", "probability": 0.9588623046875}, {"start": 1944.26, "end": 1944.4, "word": " على", "probability": 0.9306640625}, {"start": 1944.4, "end": 1944.44, "word": " ال", "probability": 0.634765625}, {"start": 1944.44, "end": 1944.6, "word": " blood", "probability": 0.97314453125}, {"start": 1944.6, "end": 1945.24, "word": " flow،", "probability": 0.821044921875}, {"start": 1945.24, "end": 1945.48, "word": " فاحنا", "probability": 0.93115234375}, {"start": 1945.48, "end": 1946.2, "word": " بنعملين", "probability": 0.646484375}, {"start": 1946.2, "end": 1947.14, "word": " رجع", "probability": 0.7816569010416666}, {"start": 1947.14, "end": 1947.22, "word": " ال", "probability": 0.95947265625}, {"start": 1947.22, "end": 1947.44, "word": " blood", "probability": 0.9736328125}, {"start": 1947.44, "end": 1947.82, "word": " vessels", "probability": 0.9169921875}, {"start": 1947.82, "end": 1948.44, "word": " بما", "probability": 0.969482421875}, {"start": 1948.44, "end": 1949.6, "word": " يسمح", "probability": 0.9915364583333334}, {"start": 1949.6, "end": 1950.64, "word": " normal", "probability": 0.395263671875}, {"start": 1950.64, "end": 1950.94, "word": " blood", "probability": 0.98291015625}, {"start": 1950.94, "end": 1951.3, "word": " flow", "probability": 0.92236328125}], "temperature": 1.0}, {"id": 77, "seek": 197957, "start": 1951.41, "end": 1979.57, "text": "بما يسمح أن الـ blood flow يرجع إلى وضعه الطبيعي طيب، هي عبارة عن Defense Mechanism أداة، هذه الأداة، هذا الـ mechanism هو عبارة عن Defense Mechanism against what؟ against occlusion أو blood vessels يبقى هو عبارة عن Defense خط دفاع أساسي ردت فعل الجسم الطبيعية لبناء جلطة إنها تهدمها", "tokens": [3555, 15042, 7251, 38251, 5016, 14739, 2423, 39184, 3390, 3095, 7251, 47341, 3615, 30731, 4032, 11242, 3615, 3224, 41950, 21292, 3615, 1829, 23032, 1829, 3555, 12399, 39896, 6225, 3555, 9640, 3660, 18871, 17410, 30175, 1434, 5551, 28259, 3660, 12399, 29538, 16247, 28259, 3660, 12399, 23758, 2423, 39184, 7513, 31439, 6225, 3555, 9640, 3660, 18871, 17410, 30175, 1434, 1970, 437, 22807, 1970, 2678, 6485, 34051, 3390, 20117, 7251, 3555, 4587, 7578, 31439, 6225, 3555, 9640, 3660, 18871, 17410, 16490, 9566, 11778, 5172, 45761, 5551, 3794, 32277, 1829, 12602, 3215, 2655, 6156, 30241, 25724, 38251, 41950, 21292, 3615, 10632, 5296, 3555, 1863, 16606, 10874, 1211, 9566, 3660, 36145, 11296, 6055, 3224, 40448, 11296], "avg_logprob": -0.1756417416036129, "compression_ratio": 1.7325102880658436, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 1951.41, "end": 1951.89, "word": "بما", "probability": 0.968017578125}, {"start": 1951.89, "end": 1952.73, "word": " يسمح", "probability": 0.9928385416666666}, {"start": 1952.73, "end": 1952.89, "word": " أن", "probability": 0.439697265625}, {"start": 1952.89, "end": 1953.03, "word": " الـ", "probability": 0.73486328125}, {"start": 1953.03, "end": 1953.19, "word": " blood", "probability": 0.375}, {"start": 1953.19, "end": 1953.51, "word": " flow", "probability": 0.908203125}, {"start": 1953.51, "end": 1953.83, "word": " يرجع", "probability": 0.96533203125}, {"start": 1953.83, "end": 1953.95, "word": " إلى", "probability": 0.86083984375}, {"start": 1953.95, "end": 1954.39, "word": " وضعه", "probability": 0.938232421875}, {"start": 1954.39, "end": 1955.59, "word": " الطبيعي", "probability": 0.760009765625}, {"start": 1955.59, "end": 1958.23, "word": " طيب،", "probability": 0.7064208984375}, {"start": 1958.23, "end": 1958.43, "word": " هي", "probability": 0.6474609375}, {"start": 1958.43, "end": 1958.67, "word": " عبارة", "probability": 0.982421875}, {"start": 1958.67, "end": 1958.95, "word": " عن", "probability": 0.9970703125}, {"start": 1958.95, "end": 1959.81, "word": " Defense", "probability": 0.25537109375}, {"start": 1959.81, "end": 1960.49, "word": " Mechanism", "probability": 0.892578125}, {"start": 1960.49, "end": 1961.79, "word": " أداة،", "probability": 0.56500244140625}, {"start": 1961.79, "end": 1962.13, "word": " هذه", "probability": 0.9345703125}, {"start": 1962.13, "end": 1963.23, "word": " الأداة،", "probability": 0.915283203125}, {"start": 1963.23, "end": 1963.47, "word": " هذا", "probability": 0.85595703125}, {"start": 1963.47, "end": 1963.67, "word": " الـ", "probability": 0.8779296875}, {"start": 1963.67, "end": 1964.11, "word": " mechanism", "probability": 0.55615234375}, {"start": 1964.11, "end": 1964.79, "word": " هو", "probability": 0.80322265625}, {"start": 1964.79, "end": 1964.99, "word": " عبارة", "probability": 0.99365234375}, {"start": 1964.99, "end": 1965.13, "word": " عن", "probability": 0.9970703125}, {"start": 1965.13, "end": 1965.63, "word": " Defense", "probability": 0.814453125}, {"start": 1965.63, "end": 1966.31, "word": " Mechanism", "probability": 0.981689453125}, {"start": 1966.31, "end": 1967.07, "word": " against", "probability": 0.5009765625}, {"start": 1967.07, "end": 1967.87, "word": " what؟", "probability": 0.902587890625}, {"start": 1967.87, "end": 1968.29, "word": " against", "probability": 0.6982421875}, {"start": 1968.29, "end": 1969.25, "word": " occlusion", "probability": 0.847900390625}, {"start": 1969.25, "end": 1969.85, "word": " أو", "probability": 0.5205078125}, {"start": 1969.85, "end": 1970.03, "word": " blood", "probability": 0.9033203125}, {"start": 1970.03, "end": 1970.45, "word": " vessels", "probability": 0.95068359375}, {"start": 1970.45, "end": 1971.17, "word": " يبقى", "probability": 0.7767333984375}, {"start": 1971.17, "end": 1971.33, "word": " هو", "probability": 0.814453125}, {"start": 1971.33, "end": 1971.57, "word": " عبارة", "probability": 0.994384765625}, {"start": 1971.57, "end": 1971.73, "word": " عن", "probability": 0.9921875}, {"start": 1971.73, "end": 1972.23, "word": " Defense", "probability": 0.70458984375}, {"start": 1972.23, "end": 1972.65, "word": " خط", "probability": 0.8935546875}, {"start": 1972.65, "end": 1973.33, "word": " دفاع", "probability": 0.9703776041666666}, {"start": 1973.33, "end": 1974.15, "word": " أساسي", "probability": 0.961669921875}, {"start": 1974.15, "end": 1974.97, "word": " ردت", "probability": 0.7081705729166666}, {"start": 1974.97, "end": 1975.29, "word": " فعل", "probability": 0.97900390625}, {"start": 1975.29, "end": 1975.77, "word": " الجسم", "probability": 0.994140625}, {"start": 1975.77, "end": 1976.57, "word": " الطبيعية", "probability": 0.9215087890625}, {"start": 1976.57, "end": 1977.59, "word": " لبناء", "probability": 0.9539794921875}, {"start": 1977.59, "end": 1978.43, "word": " جلطة", "probability": 0.950927734375}, {"start": 1978.43, "end": 1979.05, "word": " إنها", "probability": 0.8876953125}, {"start": 1979.05, "end": 1979.57, "word": " تهدمها", "probability": 0.956298828125}], "temperature": 1.0}, {"id": 78, "seek": 200577, "start": 1980.23, "end": 2005.77, "text": "صح؟ عشان يصير فيه blood flow ان هي ايش تهدمها يبقى رد فعل الجسم الطبيعي أحد وسائل الدفاع في الجسم لتكوين الجلطة هو عبارة عن ايش؟ عبارة عن وجود system يهدمها يا ايش؟ system يهدمها وبالتالي بيصير فيه restoring for blood flow إلى الوضع الطبيعي إلى ايش؟ الوضع الطبيعي", "tokens": [9381, 5016, 22807, 6225, 8592, 7649, 7251, 9381, 13546, 8978, 3224, 3390, 3095, 16472, 39896, 1975, 1829, 8592, 6055, 3224, 40448, 11296, 7251, 3555, 4587, 7578, 12602, 3215, 6156, 30241, 25724, 38251, 41950, 21292, 3615, 1829, 5551, 24401, 46952, 16373, 1211, 32748, 5172, 45761, 8978, 25724, 38251, 5296, 2655, 4117, 2407, 9957, 25724, 1211, 9566, 3660, 31439, 6225, 3555, 9640, 3660, 18871, 1975, 1829, 8592, 22807, 6225, 3555, 9640, 3660, 18871, 49610, 23328, 1185, 7251, 3224, 40448, 11296, 35186, 1975, 1829, 8592, 22807, 1185, 7251, 3224, 40448, 11296, 46599, 6027, 2655, 6027, 1829, 4724, 1829, 9381, 13546, 8978, 3224, 36349, 337, 3390, 3095, 30731, 2423, 2407, 11242, 3615, 41950, 21292, 3615, 1829, 30731, 1975, 1829, 8592, 22807, 2423, 2407, 11242, 3615, 41950, 21292, 3615, 1829], "avg_logprob": -0.19146825763441266, "compression_ratio": 1.9464285714285714, "no_speech_prob": 5.364418029785156e-07, "words": [{"start": 1980.23, "end": 1980.79, "word": "صح؟", "probability": 0.4239298502604167}, {"start": 1980.79, "end": 1980.93, "word": " عشان", "probability": 0.96484375}, {"start": 1980.93, "end": 1981.19, "word": " يصير", "probability": 0.93505859375}, {"start": 1981.19, "end": 1981.35, "word": " فيه", "probability": 0.72802734375}, {"start": 1981.35, "end": 1981.55, "word": " blood", "probability": 0.78173828125}, {"start": 1981.55, "end": 1981.95, "word": " flow", "probability": 0.88525390625}, {"start": 1981.95, "end": 1983.29, "word": " ان", "probability": 0.435546875}, {"start": 1983.29, "end": 1983.51, "word": " هي", "probability": 0.6044921875}, {"start": 1983.51, "end": 1983.83, "word": " ايش", "probability": 0.6534016927083334}, {"start": 1983.83, "end": 1984.53, "word": " تهدمها", "probability": 0.8641357421875}, {"start": 1984.53, "end": 1985.33, "word": " يبقى", "probability": 0.8326416015625}, {"start": 1985.33, "end": 1985.65, "word": " رد", "probability": 0.838134765625}, {"start": 1985.65, "end": 1986.11, "word": " فعل", "probability": 0.841796875}, {"start": 1986.11, "end": 1986.57, "word": " الجسم", "probability": 0.987060546875}, {"start": 1986.57, "end": 1987.19, "word": " الطبيعي", "probability": 0.8441162109375}, {"start": 1987.19, "end": 1987.99, "word": " أحد", "probability": 0.752685546875}, {"start": 1987.99, "end": 1988.43, "word": " وسائل", "probability": 0.962890625}, {"start": 1988.43, "end": 1989.21, "word": " الدفاع", "probability": 0.9786783854166666}, {"start": 1989.21, "end": 1989.39, "word": " في", "probability": 0.958984375}, {"start": 1989.39, "end": 1989.97, "word": " الجسم", "probability": 0.9716796875}, {"start": 1989.97, "end": 1993.13, "word": " لتكوين", "probability": 0.936328125}, {"start": 1993.13, "end": 1993.83, "word": " الجلطة", "probability": 0.89794921875}, {"start": 1993.83, "end": 1994.01, "word": " هو", "probability": 0.92041015625}, {"start": 1994.01, "end": 1994.27, "word": " عبارة", "probability": 0.984130859375}, {"start": 1994.27, "end": 1994.43, "word": " عن", "probability": 0.9931640625}, {"start": 1994.43, "end": 1995.05, "word": " ايش؟", "probability": 0.854248046875}, {"start": 1995.05, "end": 1995.29, "word": " عبارة", "probability": 0.9906005859375}, {"start": 1995.29, "end": 1995.41, "word": " عن", "probability": 0.99462890625}, {"start": 1995.41, "end": 1995.69, "word": " وجود", "probability": 0.966552734375}, {"start": 1995.69, "end": 1996.09, "word": " system", "probability": 0.919921875}, {"start": 1996.09, "end": 1996.77, "word": " يهدمها", "probability": 0.9830322265625}, {"start": 1996.77, "end": 1997.33, "word": " يا", "probability": 0.301513671875}, {"start": 1997.33, "end": 1997.71, "word": " ايش؟", "probability": 0.674072265625}, {"start": 1997.71, "end": 1998.15, "word": " system", "probability": 0.76611328125}, {"start": 1998.15, "end": 1998.77, "word": " يهدمها", "probability": 0.956787109375}, {"start": 1998.77, "end": 1999.15, "word": " وبالتالي", "probability": 0.838720703125}, {"start": 1999.15, "end": 1999.55, "word": " بيصير", "probability": 0.902099609375}, {"start": 1999.55, "end": 2000.05, "word": " فيه", "probability": 0.793701171875}, {"start": 2000.05, "end": 2000.81, "word": " restoring", "probability": 0.6630859375}, {"start": 2000.81, "end": 2001.43, "word": " for", "probability": 0.8662109375}, {"start": 2001.43, "end": 2001.73, "word": " blood", "probability": 0.90576171875}, {"start": 2001.73, "end": 2002.23, "word": " flow", "probability": 0.9111328125}, {"start": 2002.23, "end": 2002.69, "word": " إلى", "probability": 0.453125}, {"start": 2002.69, "end": 2003.15, "word": " الوضع", "probability": 0.992919921875}, {"start": 2003.15, "end": 2004.11, "word": " الطبيعي", "probability": 0.9212646484375}, {"start": 2004.11, "end": 2004.35, "word": " إلى", "probability": 0.7861328125}, {"start": 2004.35, "end": 2005.11, "word": " ايش؟", "probability": 0.92529296875}, {"start": 2005.11, "end": 2005.63, "word": " الوضع", "probability": 0.947509765625}, {"start": 2005.63, "end": 2005.77, "word": " الطبيعي", "probability": 0.90869140625}], "temperature": 1.0}, {"id": 79, "seek": 203747, "start": 2010.77, "end": 2037.47, "text": "من صفات هذا ال system انه it's sensitive to imbalances شو يعني sensitive to imbalances؟ حساس لعدم .. الانه انه imbalance؟ ال hemostatic balances مظبوط يوم ما يصير فيه اي تغير في الميزان اللي احنا متفاقين عليه و يصير فيه imbalancing ببدأ يشتغل هذا، ماشي؟", "tokens": [27842, 20328, 5172, 9307, 23758, 2423, 1185, 16472, 3224, 309, 311, 9477, 281, 566, 2645, 2676, 13412, 2407, 37495, 22653, 9477, 281, 566, 2645, 2676, 22807, 11331, 3794, 32277, 5296, 22488, 2304, 4386, 2423, 7649, 3224, 16472, 3224, 43007, 22807, 2423, 415, 1761, 2399, 33993, 3714, 19913, 3555, 2407, 9566, 7251, 20498, 19446, 7251, 9381, 13546, 8978, 3224, 1975, 1829, 6055, 17082, 13546, 8978, 9673, 1829, 11622, 7649, 13672, 1829, 1975, 5016, 8315, 44650, 5172, 995, 4587, 9957, 47356, 4032, 7251, 9381, 13546, 8978, 3224, 566, 2645, 8779, 4724, 44510, 10721, 7251, 8592, 2655, 17082, 1211, 23758, 12399, 3714, 33599, 1829, 22807], "avg_logprob": -0.25834345933303093, "compression_ratio": 1.6228070175438596, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2010.77, "end": 2011.05, "word": "من", "probability": 0.84912109375}, {"start": 2011.05, "end": 2011.61, "word": " صفات", "probability": 0.99169921875}, {"start": 2011.61, "end": 2012.51, "word": " هذا", "probability": 0.9541015625}, {"start": 2012.51, "end": 2012.83, "word": " ال", "probability": 0.947265625}, {"start": 2012.83, "end": 2013.33, "word": " system", "probability": 0.55615234375}, {"start": 2013.33, "end": 2014.73, "word": " انه", "probability": 0.6331787109375}, {"start": 2014.73, "end": 2015.07, "word": " it's", "probability": 0.3575439453125}, {"start": 2015.07, "end": 2015.61, "word": " sensitive", "probability": 0.931640625}, {"start": 2015.61, "end": 2016.11, "word": " to", "probability": 0.9736328125}, {"start": 2016.11, "end": 2017.37, "word": " imbalances", "probability": 0.908203125}, {"start": 2017.37, "end": 2018.63, "word": " شو", "probability": 0.6070556640625}, {"start": 2018.63, "end": 2018.81, "word": " يعني", "probability": 0.96435546875}, {"start": 2018.81, "end": 2019.27, "word": " sensitive", "probability": 0.9169921875}, {"start": 2019.27, "end": 2019.51, "word": " to", "probability": 0.9765625}, {"start": 2019.51, "end": 2020.61, "word": " imbalances؟", "probability": 0.8909912109375}, {"start": 2020.61, "end": 2022.21, "word": " حساس", "probability": 0.8470052083333334}, {"start": 2022.21, "end": 2022.79, "word": " لعدم", "probability": 0.7389322916666666}, {"start": 2022.79, "end": 2023.17, "word": " ..", "probability": 0.1982421875}, {"start": 2023.17, "end": 2023.63, "word": " الانه", "probability": 0.6088053385416666}, {"start": 2023.63, "end": 2023.87, "word": " انه", "probability": 0.6673583984375}, {"start": 2023.87, "end": 2025.77, "word": " imbalance؟", "probability": 0.5821533203125}, {"start": 2025.77, "end": 2025.89, "word": " ال", "probability": 0.294921875}, {"start": 2025.89, "end": 2026.47, "word": " hemostatic", "probability": 0.6851399739583334}, {"start": 2026.47, "end": 2027.03, "word": " balances", "probability": 0.708984375}, {"start": 2027.03, "end": 2027.85, "word": " مظبوط", "probability": 0.840625}, {"start": 2027.85, "end": 2028.13, "word": " يوم", "probability": 0.9052734375}, {"start": 2028.13, "end": 2028.25, "word": " ما", "probability": 0.60107421875}, {"start": 2028.25, "end": 2028.53, "word": " يصير", "probability": 0.8128255208333334}, {"start": 2028.53, "end": 2028.75, "word": " فيه", "probability": 0.959228515625}, {"start": 2028.75, "end": 2029.07, "word": " اي", "probability": 0.68310546875}, {"start": 2029.07, "end": 2030.49, "word": " تغير", "probability": 0.9645182291666666}, {"start": 2030.49, "end": 2030.71, "word": " في", "probability": 0.943359375}, {"start": 2030.71, "end": 2031.33, "word": " الميزان", "probability": 0.9617919921875}, {"start": 2031.33, "end": 2031.49, "word": " اللي", "probability": 0.81005859375}, {"start": 2031.49, "end": 2031.67, "word": " احنا", "probability": 0.9480794270833334}, {"start": 2031.67, "end": 2032.39, "word": " متفاقين", "probability": 0.84462890625}, {"start": 2032.39, "end": 2032.91, "word": " عليه", "probability": 0.98828125}, {"start": 2032.91, "end": 2033.75, "word": " و", "probability": 0.9228515625}, {"start": 2033.75, "end": 2034.11, "word": " يصير", "probability": 0.91455078125}, {"start": 2034.11, "end": 2034.41, "word": " فيه", "probability": 0.9638671875}, {"start": 2034.41, "end": 2035.27, "word": " imbalancing", "probability": 0.7779947916666666}, {"start": 2035.27, "end": 2036.13, "word": " ببدأ", "probability": 0.7459309895833334}, {"start": 2036.13, "end": 2036.51, "word": " يشتغل", "probability": 0.99541015625}, {"start": 2036.51, "end": 2036.97, "word": " هذا،", "probability": 0.6971435546875}, {"start": 2036.97, "end": 2037.47, "word": " ماشي؟", "probability": 0.899169921875}], "temperature": 1.0}, {"id": 80, "seek": 205694, "start": 2037.81, "end": 2056.95, "text": "ببدأ يشتغل هذا الـ mechanism وبالتالي عند أي تغير في ال hemostatic balance ال fiber optic system can be done من صفاته أيضا انه لما بيشتغل بيدمن لنا ان ال thrombus formation", "tokens": [3555, 44510, 10721, 7251, 8592, 2655, 17082, 1211, 23758, 2423, 39184, 7513, 46599, 6027, 2655, 6027, 1829, 43242, 36632, 6055, 17082, 13546, 8978, 2423, 8636, 555, 2399, 4772, 2423, 12874, 48269, 1185, 393, 312, 1096, 9154, 20328, 5172, 9307, 3224, 36632, 11242, 995, 16472, 3224, 5296, 15042, 4724, 1829, 8592, 2655, 17082, 1211, 4724, 25708, 27842, 5296, 8315, 16472, 2423, 739, 3548, 301, 11723], "avg_logprob": -0.27379807692307695, "compression_ratio": 1.4080459770114941, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 2037.81, "end": 2038.43, "word": "ببدأ", "probability": 0.5533040364583334}, {"start": 2038.43, "end": 2039.23, "word": " يشتغل", "probability": 0.97734375}, {"start": 2039.23, "end": 2039.53, "word": " هذا", "probability": 0.95654296875}, {"start": 2039.53, "end": 2039.71, "word": " الـ", "probability": 0.6885986328125}, {"start": 2039.71, "end": 2040.13, "word": " mechanism", "probability": 0.66357421875}, {"start": 2040.13, "end": 2041.33, "word": " وبالتالي", "probability": 0.88154296875}, {"start": 2041.33, "end": 2041.87, "word": " عند", "probability": 0.9501953125}, {"start": 2041.87, "end": 2042.07, "word": " أي", "probability": 0.77099609375}, {"start": 2042.07, "end": 2042.65, "word": " تغير", "probability": 0.95703125}, {"start": 2042.65, "end": 2043.33, "word": " في", "probability": 0.95166015625}, {"start": 2043.33, "end": 2043.65, "word": " ال", "probability": 0.955078125}, {"start": 2043.65, "end": 2044.79, "word": " hemostatic", "probability": 0.6589152018229166}, {"start": 2044.79, "end": 2045.33, "word": " balance", "probability": 0.92431640625}, {"start": 2045.33, "end": 2046.19, "word": " ال", "probability": 0.82373046875}, {"start": 2046.19, "end": 2046.55, "word": " fiber", "probability": 0.2108154296875}, {"start": 2046.55, "end": 2046.87, "word": " optic", "probability": 0.322021484375}, {"start": 2046.87, "end": 2047.29, "word": " system", "probability": 0.93505859375}, {"start": 2047.29, "end": 2047.55, "word": " can", "probability": 0.79248046875}, {"start": 2047.55, "end": 2047.85, "word": " be", "probability": 0.9580078125}, {"start": 2047.85, "end": 2048.49, "word": " done", "probability": 0.182861328125}, {"start": 2048.49, "end": 2049.29, "word": " من", "probability": 0.845703125}, {"start": 2049.29, "end": 2050.03, "word": " صفاته", "probability": 0.9842529296875}, {"start": 2050.03, "end": 2050.65, "word": " أيضا", "probability": 0.7841796875}, {"start": 2050.65, "end": 2051.79, "word": " انه", "probability": 0.699462890625}, {"start": 2051.79, "end": 2052.05, "word": " لما", "probability": 0.968017578125}, {"start": 2052.05, "end": 2052.83, "word": " بيشتغل", "probability": 0.9293619791666666}, {"start": 2052.83, "end": 2054.01, "word": " بيدمن", "probability": 0.8544921875}, {"start": 2054.01, "end": 2054.59, "word": " لنا", "probability": 0.98486328125}, {"start": 2054.59, "end": 2055.35, "word": " ان", "probability": 0.81494140625}, {"start": 2055.35, "end": 2055.57, "word": " ال", "probability": 0.7373046875}, {"start": 2055.57, "end": 2056.17, "word": " thrombus", "probability": 0.6990559895833334}, {"start": 2056.17, "end": 2056.95, "word": " formation", "probability": 0.97900390625}], "temperature": 1.0}, {"id": 81, "seek": 208248, "start": 2057.4, "end": 2082.48, "text": "is localized بمعنى بيصير لها restriction في منطقة المزعج، منطقة ال damage، ال blood vessel is damaged، تفهم عليا؟ لإن لو صار في تكون في أماكن أخرى ل-fibrinic load، ال-fibrinetic system بيسمح لهاش إنها تنتقل إلى منطقة أخرى، لإنه أول بأول بيكسرها", "tokens": [271, 44574, 4724, 2304, 3615, 1863, 7578, 4724, 1829, 9381, 13546, 5296, 11296, 29529, 8978, 9154, 9566, 28671, 9673, 11622, 3615, 7435, 12399, 9154, 9566, 28671, 2423, 4344, 12399, 2423, 3390, 18098, 307, 14080, 12399, 6055, 5172, 16095, 11203, 25528, 22807, 5296, 28814, 1863, 45164, 20328, 9640, 8978, 6055, 30544, 8978, 5551, 15042, 19452, 5551, 34740, 7578, 5296, 12, 69, 6414, 259, 299, 3677, 12399, 2423, 12, 69, 6414, 259, 3532, 1185, 4724, 1829, 38251, 5016, 5296, 11296, 8592, 36145, 11296, 6055, 29399, 4587, 1211, 30731, 9154, 9566, 28671, 5551, 34740, 7578, 12399, 5296, 28814, 1863, 3224, 5551, 12610, 4724, 10721, 12610, 4724, 1829, 4117, 3794, 2288, 11296], "avg_logprob": -0.27580276323021, "compression_ratio": 1.5635593220338984, "no_speech_prob": 3.4570693969726562e-06, "words": [{"start": 2057.4, "end": 2057.78, "word": "is", "probability": 0.16943359375}, {"start": 2057.78, "end": 2058.58, "word": " localized", "probability": 0.91455078125}, {"start": 2058.58, "end": 2059.4, "word": " بمعنى", "probability": 0.9125}, {"start": 2059.4, "end": 2060.06, "word": " بيصير", "probability": 0.8436279296875}, {"start": 2060.06, "end": 2060.22, "word": " لها", "probability": 0.86083984375}, {"start": 2060.22, "end": 2060.84, "word": " restriction", "probability": 0.83056640625}, {"start": 2060.84, "end": 2061.34, "word": " في", "probability": 0.884765625}, {"start": 2061.34, "end": 2061.88, "word": " منطقة", "probability": 0.9832356770833334}, {"start": 2061.88, "end": 2062.94, "word": " المزعج،", "probability": 0.631396484375}, {"start": 2062.94, "end": 2063.46, "word": " منطقة", "probability": 0.984375}, {"start": 2063.46, "end": 2063.62, "word": " ال", "probability": 0.58349609375}, {"start": 2063.62, "end": 2064.06, "word": " damage،", "probability": 0.5059814453125}, {"start": 2064.06, "end": 2064.12, "word": " ال", "probability": 0.58740234375}, {"start": 2064.12, "end": 2064.34, "word": " blood", "probability": 0.87890625}, {"start": 2064.34, "end": 2064.72, "word": " vessel", "probability": 0.302490234375}, {"start": 2064.72, "end": 2065.04, "word": " is", "probability": 0.329345703125}, {"start": 2065.04, "end": 2065.76, "word": " damaged،", "probability": 0.5810546875}, {"start": 2065.76, "end": 2066.46, "word": " تفهم", "probability": 0.777099609375}, {"start": 2066.46, "end": 2067.44, "word": " عليا؟", "probability": 0.7745768229166666}, {"start": 2067.44, "end": 2067.82, "word": " لإن", "probability": 0.85498046875}, {"start": 2067.82, "end": 2068.12, "word": " لو", "probability": 0.81005859375}, {"start": 2068.12, "end": 2068.4, "word": " صار", "probability": 0.98291015625}, {"start": 2068.4, "end": 2068.6, "word": " في", "probability": 0.919921875}, {"start": 2068.6, "end": 2069.36, "word": " تكون", "probability": 0.6290283203125}, {"start": 2069.36, "end": 2070.54, "word": " في", "probability": 0.9697265625}, {"start": 2070.54, "end": 2071.12, "word": " أماكن", "probability": 0.9383138020833334}, {"start": 2071.12, "end": 2071.64, "word": " أخرى", "probability": 0.9814453125}, {"start": 2071.64, "end": 2071.82, "word": " ل", "probability": 0.85009765625}, {"start": 2071.82, "end": 2072.46, "word": "-fibrinic", "probability": 0.5968994140625}, {"start": 2072.46, "end": 2073.26, "word": " load،", "probability": 0.5948486328125}, {"start": 2073.26, "end": 2073.44, "word": " ال", "probability": 0.93798828125}, {"start": 2073.44, "end": 2074.1, "word": "-fibrinetic", "probability": 0.687841796875}, {"start": 2074.1, "end": 2074.62, "word": " system", "probability": 0.96728515625}, {"start": 2074.62, "end": 2075.5, "word": " بيسمح", "probability": 0.929443359375}, {"start": 2075.5, "end": 2076.24, "word": " لهاش", "probability": 0.4785970052083333}, {"start": 2076.24, "end": 2076.82, "word": " إنها", "probability": 0.7802734375}, {"start": 2076.82, "end": 2077.54, "word": " تنتقل", "probability": 0.989013671875}, {"start": 2077.54, "end": 2077.8, "word": " إلى", "probability": 0.9208984375}, {"start": 2077.8, "end": 2078.34, "word": " منطقة", "probability": 0.9881184895833334}, {"start": 2078.34, "end": 2079.16, "word": " أخرى،", "probability": 0.8868408203125}, {"start": 2079.16, "end": 2079.42, "word": " لإنه", "probability": 0.7774658203125}, {"start": 2079.42, "end": 2079.64, "word": " أول", "probability": 0.924072265625}, {"start": 2079.64, "end": 2079.96, "word": " بأول", "probability": 0.9544270833333334}, {"start": 2079.96, "end": 2082.48, "word": " بيكسرها", "probability": 0.89794921875}], "temperature": 1.0}, {"id": 82, "seek": 210760, "start": 2083.88, "end": 2107.6, "text": "كمان من صفاته انه it's initiated with يعني جنبا إلى جنب مع ال cooperation كاس كلمة كذا وعشان تتخيلوا الموضوع عشان ما يعني مش معقول التلين يبدو مع بعض، مظبوط عشان تتخيلوا الموضوع كيف يتم يبدأ يصير في مزع", "tokens": [4117, 2304, 7649, 9154, 20328, 5172, 9307, 3224, 16472, 3224, 309, 311, 28578, 365, 37495, 22653, 10874, 1863, 3555, 995, 30731, 10874, 1863, 3555, 20449, 2423, 14968, 9122, 32277, 9122, 19528, 3660, 9122, 15730, 4032, 3615, 8592, 7649, 6055, 2655, 9778, 26895, 14407, 9673, 2407, 11242, 45367, 6225, 8592, 7649, 19446, 37495, 22653, 37893, 20449, 39648, 16712, 1211, 9957, 7251, 44510, 2407, 20449, 45030, 11242, 12399, 3714, 19913, 3555, 2407, 9566, 6225, 8592, 7649, 6055, 2655, 9778, 26895, 14407, 9673, 2407, 11242, 45367, 9122, 33911, 7251, 39237, 7251, 44510, 10721, 7251, 9381, 13546, 8978, 3714, 11622, 3615], "avg_logprob": -0.25318877216504543, "compression_ratio": 1.648780487804878, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2083.88, "end": 2084.4, "word": "كمان", "probability": 0.5962320963541666}, {"start": 2084.4, "end": 2084.7, "word": " من", "probability": 0.9677734375}, {"start": 2084.7, "end": 2085.42, "word": " صفاته", "probability": 0.87451171875}, {"start": 2085.42, "end": 2086.22, "word": " انه", "probability": 0.457763671875}, {"start": 2086.22, "end": 2086.54, "word": " it's", "probability": 0.4754638671875}, {"start": 2086.54, "end": 2087.62, "word": " initiated", "probability": 0.92724609375}, {"start": 2087.62, "end": 2089.18, "word": " with", "probability": 0.8935546875}, {"start": 2089.18, "end": 2090.48, "word": " يعني", "probability": 0.6405029296875}, {"start": 2090.48, "end": 2092.44, "word": " جنبا", "probability": 0.9296875}, {"start": 2092.44, "end": 2092.64, "word": " إلى", "probability": 0.83154296875}, {"start": 2092.64, "end": 2093.2, "word": " جنب", "probability": 0.9833984375}, {"start": 2093.2, "end": 2093.4, "word": " مع", "probability": 0.90380859375}, {"start": 2093.4, "end": 2093.54, "word": " ال", "probability": 0.9453125}, {"start": 2093.54, "end": 2093.96, "word": " cooperation", "probability": 0.1783447265625}, {"start": 2093.96, "end": 2094.36, "word": " كاس", "probability": 0.27337646484375}, {"start": 2094.36, "end": 2094.66, "word": " كلمة", "probability": 0.54541015625}, {"start": 2094.66, "end": 2094.92, "word": " كذا", "probability": 0.55999755859375}, {"start": 2094.92, "end": 2097.18, "word": " وعشان", "probability": 0.8387451171875}, {"start": 2097.18, "end": 2097.86, "word": " تتخيلوا", "probability": 0.93173828125}, {"start": 2097.86, "end": 2098.26, "word": " الموضوع", "probability": 0.9981689453125}, {"start": 2098.26, "end": 2098.54, "word": " عشان", "probability": 0.7779134114583334}, {"start": 2098.54, "end": 2098.9, "word": " ما", "probability": 0.56787109375}, {"start": 2098.9, "end": 2099.76, "word": " يعني", "probability": 0.76025390625}, {"start": 2099.76, "end": 2099.92, "word": " مش", "probability": 0.98681640625}, {"start": 2099.92, "end": 2100.28, "word": " معقول", "probability": 0.945068359375}, {"start": 2100.28, "end": 2100.64, "word": " التلين", "probability": 0.7008463541666666}, {"start": 2100.64, "end": 2100.88, "word": " يبدو", "probability": 0.8938802083333334}, {"start": 2100.88, "end": 2101.04, "word": " مع", "probability": 0.98974609375}, {"start": 2101.04, "end": 2101.88, "word": " بعض،", "probability": 0.8728841145833334}, {"start": 2101.88, "end": 2102.6, "word": " مظبوط", "probability": 0.890625}, {"start": 2102.6, "end": 2103.64, "word": " عشان", "probability": 0.9544270833333334}, {"start": 2103.64, "end": 2104.22, "word": " تتخيلوا", "probability": 0.9974609375}, {"start": 2104.22, "end": 2104.58, "word": " الموضوع", "probability": 0.998046875}, {"start": 2104.58, "end": 2104.96, "word": " كيف", "probability": 0.984619140625}, {"start": 2104.96, "end": 2105.48, "word": " يتم", "probability": 0.938720703125}, {"start": 2105.48, "end": 2106.6, "word": " يبدأ", "probability": 0.9296875}, {"start": 2106.6, "end": 2107.08, "word": " يصير", "probability": 0.7359212239583334}, {"start": 2107.08, "end": 2107.2, "word": " في", "probability": 0.8984375}, {"start": 2107.2, "end": 2107.6, "word": " مزع", "probability": 0.9075520833333334}], "temperature": 1.0}, {"id": 83, "seek": 213416, "start": 2108.48, "end": 2134.16, "text": "في blood vessel injury يبدأ ال coagulation cascade بالنشاط زي ما قلتلكوا جنبا إلى جنب مع ال fibrolytic system شو الفرق بين اتنين ان واحد بمشي بسرعة طيارة و واحد بمشي بسرعة سيارة فاهمين عليها؟ واحد بمشي سريع جدا و التاني اللي هو ال coagulation cascade و التاني بمشي ببطء", "tokens": [41185, 3390, 18098, 10454, 7251, 44510, 10721, 2423, 598, 559, 2776, 50080, 20666, 1863, 8592, 41193, 30767, 1829, 19446, 12174, 1211, 2655, 23275, 14407, 10874, 1863, 3555, 995, 30731, 10874, 1863, 3555, 20449, 2423, 13116, 340, 356, 40907, 1185, 13412, 2407, 27188, 2288, 4587, 49374, 1975, 2655, 1863, 9957, 16472, 36764, 24401, 4724, 2304, 8592, 1829, 4724, 3794, 2288, 27884, 23032, 1829, 9640, 3660, 4032, 36764, 24401, 4724, 2304, 8592, 1829, 4724, 3794, 2288, 27884, 8608, 1829, 9640, 3660, 6156, 995, 16095, 9957, 25894, 11296, 22807, 36764, 24401, 4724, 2304, 8592, 1829, 8608, 16572, 3615, 10874, 28259, 4032, 16712, 7649, 1829, 13672, 1829, 31439, 2423, 598, 559, 2776, 50080, 4032, 16712, 7649, 1829, 4724, 2304, 8592, 1829, 4724, 3555, 9566, 38207], "avg_logprob": -0.15740265758311162, "compression_ratio": 1.7872340425531914, "no_speech_prob": 1.0728836059570312e-06, "words": [{"start": 2108.48, "end": 2108.72, "word": "في", "probability": 0.68994140625}, {"start": 2108.72, "end": 2109.0, "word": " blood", "probability": 0.54052734375}, {"start": 2109.0, "end": 2109.3, "word": " vessel", "probability": 0.72509765625}, {"start": 2109.3, "end": 2109.92, "word": " injury", "probability": 0.8876953125}, {"start": 2109.92, "end": 2111.38, "word": " يبدأ", "probability": 0.9513346354166666}, {"start": 2111.38, "end": 2111.62, "word": " ال", "probability": 0.89892578125}, {"start": 2111.62, "end": 2112.46, "word": " coagulation", "probability": 0.77978515625}, {"start": 2112.46, "end": 2113.18, "word": " cascade", "probability": 0.94775390625}, {"start": 2113.18, "end": 2114.46, "word": " بالنشاط", "probability": 0.991455078125}, {"start": 2114.46, "end": 2115.3, "word": " زي", "probability": 0.638427734375}, {"start": 2115.3, "end": 2115.38, "word": " ما", "probability": 0.95166015625}, {"start": 2115.38, "end": 2115.82, "word": " قلتلكوا", "probability": 0.835302734375}, {"start": 2115.82, "end": 2116.4, "word": " جنبا", "probability": 0.9088134765625}, {"start": 2116.4, "end": 2116.66, "word": " إلى", "probability": 0.77685546875}, {"start": 2116.66, "end": 2117.2, "word": " جنب", "probability": 0.9890950520833334}, {"start": 2117.2, "end": 2117.5, "word": " مع", "probability": 0.93408203125}, {"start": 2117.5, "end": 2117.64, "word": " ال", "probability": 0.9248046875}, {"start": 2117.64, "end": 2118.14, "word": " fibrolytic", "probability": 0.612884521484375}, {"start": 2118.14, "end": 2118.64, "word": " system", "probability": 0.9501953125}, {"start": 2118.64, "end": 2119.82, "word": " شو", "probability": 0.824951171875}, {"start": 2119.82, "end": 2120.14, "word": " الفرق", "probability": 0.9713541666666666}, {"start": 2120.14, "end": 2120.32, "word": " بين", "probability": 0.95556640625}, {"start": 2120.32, "end": 2120.92, "word": " اتنين", "probability": 0.77728271484375}, {"start": 2120.92, "end": 2121.74, "word": " ان", "probability": 0.458984375}, {"start": 2121.74, "end": 2122.18, "word": " واحد", "probability": 0.993408203125}, {"start": 2122.18, "end": 2122.66, "word": " بمشي", "probability": 0.90576171875}, {"start": 2122.66, "end": 2123.02, "word": " بسرعة", "probability": 0.9703369140625}, {"start": 2123.02, "end": 2123.7, "word": " طيارة", "probability": 0.98388671875}, {"start": 2123.7, "end": 2124.34, "word": " و", "probability": 0.69580078125}, {"start": 2124.34, "end": 2124.62, "word": " واحد", "probability": 0.912841796875}, {"start": 2124.62, "end": 2124.98, "word": " بمشي", "probability": 0.9742431640625}, {"start": 2124.98, "end": 2125.32, "word": " بسرعة", "probability": 0.9739990234375}, {"start": 2125.32, "end": 2126.96, "word": " سيارة", "probability": 0.9913330078125}, {"start": 2126.96, "end": 2127.56, "word": " فاهمين", "probability": 0.7916259765625}, {"start": 2127.56, "end": 2128.16, "word": " عليها؟", "probability": 0.5989583333333334}, {"start": 2128.16, "end": 2128.74, "word": " واحد", "probability": 0.919189453125}, {"start": 2128.74, "end": 2129.06, "word": " بمشي", "probability": 0.986083984375}, {"start": 2129.06, "end": 2129.42, "word": " سريع", "probability": 0.96875}, {"start": 2129.42, "end": 2130.0, "word": " جدا", "probability": 0.994873046875}, {"start": 2130.0, "end": 2130.72, "word": " و", "probability": 0.444580078125}, {"start": 2130.72, "end": 2131.28, "word": " التاني", "probability": 0.8486328125}, {"start": 2131.28, "end": 2131.44, "word": " اللي", "probability": 0.8525390625}, {"start": 2131.44, "end": 2131.62, "word": " هو", "probability": 0.98486328125}, {"start": 2131.62, "end": 2131.72, "word": " ال", "probability": 0.65576171875}, {"start": 2131.72, "end": 2132.3, "word": " coagulation", "probability": 0.9490559895833334}, {"start": 2132.3, "end": 2132.78, "word": " cascade", "probability": 0.94921875}, {"start": 2132.78, "end": 2132.96, "word": " و", "probability": 0.76904296875}, {"start": 2132.96, "end": 2133.24, "word": " التاني", "probability": 0.9619140625}, {"start": 2133.24, "end": 2133.62, "word": " بمشي", "probability": 0.966796875}, {"start": 2133.62, "end": 2134.16, "word": " ببطء", "probability": 0.9375}], "temperature": 1.0}, {"id": 84, "seek": 214614, "start": 2134.74, "end": 2146.14, "text": "ماشي، لو ابتديش هذا الكلام بدل ما بيبقى ماشي، لإله أن تتكون الجلطة، ويصير فيه imbalance في الhemostatic", "tokens": [2304, 33599, 1829, 12399, 45164, 48127, 2655, 16254, 8592, 23758, 2423, 28820, 10943, 47525, 1211, 19446, 4724, 1829, 3555, 4587, 7578, 3714, 33599, 1829, 12399, 5296, 28814, 1211, 3224, 14739, 6055, 2655, 30544, 25724, 1211, 9566, 3660, 12399, 4032, 1829, 9381, 13546, 8978, 3224, 43007, 8978, 2423, 28005, 555, 2399], "avg_logprob": -0.40808822594436944, "compression_ratio": 1.3076923076923077, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2134.74, "end": 2135.9, "word": "ماشي،", "probability": 0.4410247802734375}, {"start": 2135.9, "end": 2136.34, "word": " لو", "probability": 0.81005859375}, {"start": 2136.34, "end": 2136.84, "word": " ابتديش", "probability": 0.619384765625}, {"start": 2136.84, "end": 2137.0, "word": " هذا", "probability": 0.62255859375}, {"start": 2137.0, "end": 2137.36, "word": " الكلام", "probability": 0.8001302083333334}, {"start": 2137.36, "end": 2138.0, "word": " بدل", "probability": 0.8037109375}, {"start": 2138.0, "end": 2138.16, "word": " ما", "probability": 0.89599609375}, {"start": 2138.16, "end": 2138.54, "word": " بيبقى", "probability": 0.729736328125}, {"start": 2138.54, "end": 2139.54, "word": " ماشي،", "probability": 0.7406005859375}, {"start": 2139.54, "end": 2140.96, "word": " لإله", "probability": 0.511749267578125}, {"start": 2140.96, "end": 2141.24, "word": " أن", "probability": 0.86376953125}, {"start": 2141.24, "end": 2141.98, "word": " تتكون", "probability": 0.8748372395833334}, {"start": 2141.98, "end": 2143.5, "word": " الجلطة،", "probability": 0.76904296875}, {"start": 2143.5, "end": 2143.98, "word": " ويصير", "probability": 0.8447265625}, {"start": 2143.98, "end": 2144.32, "word": " فيه", "probability": 0.75830078125}, {"start": 2144.32, "end": 2144.92, "word": " imbalance", "probability": 0.9462890625}, {"start": 2144.92, "end": 2145.28, "word": " في", "probability": 0.96630859375}, {"start": 2145.28, "end": 2146.14, "word": " الhemostatic", "probability": 0.7452239990234375}], "temperature": 1.0}, {"id": 85, "seek": 217129, "start": 2147.11, "end": 2171.29, "text": "Balance، ماشي؟ بعدها شو بيصير؟ تلعكس الآية الفيبراليتك سيستم بيصير سريع جدا و مين بيصير يعمل degradation ال coagulation cascade بيتراجع وبالتالي بيصير سرعة الهدم أكتر من سرعة البناء في هذه الحكاية يبقى في البداية سرعة البناء بتكون عالية", "tokens": [33, 304, 719, 12399, 3714, 33599, 1829, 22807, 39182, 11296, 13412, 2407, 4724, 1829, 9381, 13546, 22807, 6055, 1211, 3615, 4117, 3794, 6024, 95, 10632, 27188, 1829, 26890, 6027, 36081, 4117, 8608, 1829, 14851, 2304, 4724, 1829, 9381, 13546, 8608, 16572, 3615, 10874, 28259, 4032, 3714, 9957, 4724, 1829, 9381, 13546, 7251, 25957, 1211, 40519, 2423, 598, 559, 2776, 50080, 4724, 36081, 2288, 26108, 3615, 46599, 6027, 2655, 6027, 1829, 4724, 1829, 9381, 13546, 8608, 2288, 27884, 2423, 3224, 40448, 5551, 4117, 2655, 2288, 9154, 8608, 2288, 27884, 29739, 1863, 16606, 8978, 29538, 21542, 4117, 995, 10632, 7251, 3555, 4587, 7578, 8978, 29739, 28259, 10632, 8608, 2288, 27884, 29739, 1863, 16606, 39894, 30544, 6225, 6027, 10632], "avg_logprob": -0.21047007833790576, "compression_ratio": 1.7767857142857142, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 2147.11, "end": 2147.77, "word": "Balance،", "probability": 0.5401840209960938}, {"start": 2147.77, "end": 2149.13, "word": " ماشي؟", "probability": 0.70880126953125}, {"start": 2149.13, "end": 2149.51, "word": " بعدها", "probability": 0.6876220703125}, {"start": 2149.51, "end": 2149.67, "word": " شو", "probability": 0.94580078125}, {"start": 2149.67, "end": 2150.51, "word": " بيصير؟", "probability": 0.89072265625}, {"start": 2150.51, "end": 2151.17, "word": " تلعكس", "probability": 0.651416015625}, {"start": 2151.17, "end": 2151.73, "word": " الآية", "probability": 0.9173177083333334}, {"start": 2151.73, "end": 2153.61, "word": " الفيبراليتك", "probability": 0.64404296875}, {"start": 2153.61, "end": 2154.11, "word": " سيستم", "probability": 0.78515625}, {"start": 2154.11, "end": 2154.77, "word": " بيصير", "probability": 0.9691162109375}, {"start": 2154.77, "end": 2155.37, "word": " سريع", "probability": 0.98291015625}, {"start": 2155.37, "end": 2155.83, "word": " جدا", "probability": 0.994140625}, {"start": 2155.83, "end": 2156.05, "word": " و", "probability": 0.8095703125}, {"start": 2156.05, "end": 2156.81, "word": " مين", "probability": 0.77685546875}, {"start": 2156.81, "end": 2157.21, "word": " بيصير", "probability": 0.961181640625}, {"start": 2157.21, "end": 2157.59, "word": " يعمل", "probability": 0.9752604166666666}, {"start": 2157.59, "end": 2158.01, "word": " degradation", "probability": 0.496826171875}, {"start": 2158.01, "end": 2159.61, "word": " ال", "probability": 0.59228515625}, {"start": 2159.61, "end": 2160.19, "word": " coagulation", "probability": 0.6568196614583334}, {"start": 2160.19, "end": 2160.59, "word": " cascade", "probability": 0.859375}, {"start": 2160.59, "end": 2161.51, "word": " بيتراجع", "probability": 0.88330078125}, {"start": 2161.51, "end": 2162.39, "word": " وبالتالي", "probability": 0.89755859375}, {"start": 2162.39, "end": 2163.05, "word": " بيصير", "probability": 0.9659423828125}, {"start": 2163.05, "end": 2163.95, "word": " سرعة", "probability": 0.9441731770833334}, {"start": 2163.95, "end": 2164.41, "word": " الهدم", "probability": 0.9490559895833334}, {"start": 2164.41, "end": 2164.83, "word": " أكتر", "probability": 0.9443359375}, {"start": 2164.83, "end": 2164.99, "word": " من", "probability": 0.9931640625}, {"start": 2164.99, "end": 2165.33, "word": " سرعة", "probability": 0.9744466145833334}, {"start": 2165.33, "end": 2165.97, "word": " البناء", "probability": 0.9640299479166666}, {"start": 2165.97, "end": 2166.45, "word": " في", "probability": 0.87353515625}, {"start": 2166.45, "end": 2166.67, "word": " هذه", "probability": 0.912109375}, {"start": 2166.67, "end": 2167.15, "word": " الحكاية", "probability": 0.8953857421875}, {"start": 2167.15, "end": 2167.69, "word": " يبقى", "probability": 0.8826904296875}, {"start": 2167.69, "end": 2167.79, "word": " في", "probability": 0.8212890625}, {"start": 2167.79, "end": 2168.45, "word": " البداية", "probability": 0.9905598958333334}, {"start": 2168.45, "end": 2169.15, "word": " سرعة", "probability": 0.9474283854166666}, {"start": 2169.15, "end": 2169.93, "word": " البناء", "probability": 0.9664713541666666}, {"start": 2169.93, "end": 2170.73, "word": " بتكون", "probability": 0.91015625}, {"start": 2170.73, "end": 2171.29, "word": " عالية", "probability": 0.994140625}], "temperature": 1.0}, {"id": 86, "seek": 219817, "start": 2172.62, "end": 2198.18, "text": "أسرع من سرعة الهدم ثم بعد ذلك بتنعكس الآية و بيصير سرعة الهدم أسرع مناش من سرعة البناء مفهوم عليها بشبه بقى عشان هيك التنين بيبدو مع بعض صحيح لكن بسرعات مختلفة بسرعات مختلفة واحد ممشي بسرعة أعلى من التاني لغاية ما ينهي دوره ثم بيبدأ التاني في العمل", "tokens": [10721, 3794, 2288, 3615, 9154, 8608, 2288, 27884, 2423, 3224, 40448, 38637, 2304, 39182, 29910, 23275, 39894, 1863, 3615, 4117, 3794, 6024, 95, 10632, 4032, 4724, 1829, 9381, 13546, 8608, 2288, 27884, 2423, 3224, 40448, 5551, 3794, 2288, 3615, 9154, 33599, 9154, 8608, 2288, 27884, 29739, 1863, 16606, 3714, 5172, 3224, 20498, 25894, 11296, 4724, 8592, 3555, 3224, 4724, 4587, 7578, 6225, 8592, 7649, 39896, 4117, 16712, 1863, 9957, 4724, 1829, 44510, 2407, 20449, 45030, 11242, 20328, 5016, 1829, 5016, 44381, 4724, 3794, 2288, 3615, 9307, 3714, 46456, 46538, 3660, 4724, 3794, 2288, 3615, 9307, 3714, 46456, 46538, 3660, 36764, 24401, 3714, 2304, 8592, 1829, 4724, 3794, 2288, 27884, 5551, 3615, 23942, 9154, 16712, 7649, 1829, 5296, 17082, 995, 10632, 19446, 7251, 1863, 3224, 1829, 11778, 13063, 3224, 38637, 2304, 4724, 1829, 44510, 10721, 16712, 7649, 1829, 8978, 18863, 42213], "avg_logprob": -0.12621897543575747, "compression_ratio": 2.083720930232558, "no_speech_prob": 4.172325134277344e-07, "words": [{"start": 2172.62, "end": 2173.32, "word": "أسرع", "probability": 0.9033203125}, {"start": 2173.32, "end": 2173.52, "word": " من", "probability": 0.9873046875}, {"start": 2173.52, "end": 2173.86, "word": " سرعة", "probability": 0.9503580729166666}, {"start": 2173.86, "end": 2174.36, "word": " الهدم", "probability": 0.9479166666666666}, {"start": 2174.36, "end": 2175.64, "word": " ثم", "probability": 0.846435546875}, {"start": 2175.64, "end": 2175.88, "word": " بعد", "probability": 0.90380859375}, {"start": 2175.88, "end": 2176.38, "word": " ذلك", "probability": 0.993896484375}, {"start": 2176.38, "end": 2177.4, "word": " بتنعكس", "probability": 0.82587890625}, {"start": 2177.4, "end": 2177.74, "word": " الآية", "probability": 0.8419596354166666}, {"start": 2177.74, "end": 2177.86, "word": " و", "probability": 0.51904296875}, {"start": 2177.86, "end": 2178.06, "word": " بيصير", "probability": 0.78369140625}, {"start": 2178.06, "end": 2178.36, "word": " سرعة", "probability": 0.9524739583333334}, {"start": 2178.36, "end": 2178.78, "word": " الهدم", "probability": 0.9803059895833334}, {"start": 2178.78, "end": 2179.5, "word": " أسرع", "probability": 0.98681640625}, {"start": 2179.5, "end": 2179.96, "word": " مناش", "probability": 0.759521484375}, {"start": 2179.96, "end": 2180.28, "word": " من", "probability": 0.95361328125}, {"start": 2180.28, "end": 2180.6, "word": " سرعة", "probability": 0.97021484375}, {"start": 2180.6, "end": 2181.06, "word": " البناء", "probability": 0.8037109375}, {"start": 2181.06, "end": 2181.4, "word": " مفهوم", "probability": 0.850341796875}, {"start": 2181.4, "end": 2181.66, "word": " عليها", "probability": 0.938232421875}, {"start": 2181.66, "end": 2181.96, "word": " بشبه", "probability": 0.6483154296875}, {"start": 2181.96, "end": 2182.42, "word": " بقى", "probability": 0.749755859375}, {"start": 2182.42, "end": 2182.78, "word": " عشان", "probability": 0.8260091145833334}, {"start": 2182.78, "end": 2182.96, "word": " هيك", "probability": 0.775634765625}, {"start": 2182.96, "end": 2183.28, "word": " التنين", "probability": 0.7047526041666666}, {"start": 2183.28, "end": 2183.56, "word": " بيبدو", "probability": 0.8319091796875}, {"start": 2183.56, "end": 2183.72, "word": " مع", "probability": 0.9599609375}, {"start": 2183.72, "end": 2184.0, "word": " بعض", "probability": 0.99169921875}, {"start": 2184.0, "end": 2185.0, "word": " صحيح", "probability": 0.9822998046875}, {"start": 2185.0, "end": 2186.14, "word": " لكن", "probability": 0.91015625}, {"start": 2186.14, "end": 2187.72, "word": " بسرعات", "probability": 0.98037109375}, {"start": 2187.72, "end": 2188.52, "word": " مختلفة", "probability": 0.995361328125}, {"start": 2188.52, "end": 2189.44, "word": " بسرعات", "probability": 0.93486328125}, {"start": 2189.44, "end": 2190.54, "word": " مختلفة", "probability": 0.9925537109375}, {"start": 2190.54, "end": 2191.02, "word": " واحد", "probability": 0.949951171875}, {"start": 2191.02, "end": 2191.58, "word": " ممشي", "probability": 0.83551025390625}, {"start": 2191.58, "end": 2192.36, "word": " بسرعة", "probability": 0.96875}, {"start": 2192.36, "end": 2192.82, "word": " أعلى", "probability": 0.94873046875}, {"start": 2192.82, "end": 2193.0, "word": " من", "probability": 0.99267578125}, {"start": 2193.0, "end": 2193.54, "word": " التاني", "probability": 0.8992513020833334}, {"start": 2193.54, "end": 2194.12, "word": " لغاية", "probability": 0.9910888671875}, {"start": 2194.12, "end": 2194.22, "word": " ما", "probability": 0.95263671875}, {"start": 2194.22, "end": 2194.58, "word": " ينهي", "probability": 0.841552734375}, {"start": 2194.58, "end": 2195.0, "word": " دوره", "probability": 0.9879557291666666}, {"start": 2195.0, "end": 2195.7, "word": " ثم", "probability": 0.974365234375}, {"start": 2195.7, "end": 2197.02, "word": " بيبدأ", "probability": 0.963134765625}, {"start": 2197.02, "end": 2197.66, "word": " التاني", "probability": 0.982421875}, {"start": 2197.66, "end": 2197.82, "word": " في", "probability": 0.9501953125}, {"start": 2197.82, "end": 2198.18, "word": " العمل", "probability": 0.98193359375}], "temperature": 1.0}, {"id": 87, "seek": 222644, "start": 2199.79, "end": 2226.45, "text": "طبعا محصلة من أدواره من صفات الـ Fibrotic System انه it dissolves the clot by digestion of fiber يعني شو بيعمل؟ بيكسر .. بيدوّب الجلدة من خلال digestion اللي هو عبارة عن عملية هدم ال .. ال fiber و الموضوع ببساطة يا شباب بشكل عام", "tokens": [9566, 3555, 3615, 995, 3714, 5016, 36520, 3660, 9154, 5551, 3215, 2407, 9640, 3224, 9154, 20328, 5172, 9307, 2423, 39184, 479, 6414, 9411, 8910, 16472, 3224, 309, 15840, 977, 264, 48587, 538, 40560, 295, 12874, 37495, 22653, 13412, 2407, 4724, 1829, 25957, 1211, 22807, 4724, 1829, 4117, 3794, 2288, 4386, 4724, 25708, 2407, 11703, 3555, 25724, 1211, 41891, 9154, 16490, 1211, 6027, 40560, 13672, 1829, 31439, 6225, 3555, 9640, 3660, 18871, 6225, 42213, 10632, 8032, 40448, 2423, 4386, 2423, 12874, 4032, 9673, 2407, 11242, 45367, 4724, 3555, 3794, 41193, 3660, 35186, 13412, 3555, 16758, 4724, 8592, 28820, 6225, 10943], "avg_logprob": -0.2920312517881393, "compression_ratio": 1.4638297872340424, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2199.79, "end": 2200.15, "word": "طبعا", "probability": 0.8145751953125}, {"start": 2200.15, "end": 2200.83, "word": " محصلة", "probability": 0.66455078125}, {"start": 2200.83, "end": 2200.95, "word": " من", "probability": 0.90771484375}, {"start": 2200.95, "end": 2201.39, "word": " أدواره", "probability": 0.559130859375}, {"start": 2201.39, "end": 2201.53, "word": " من", "probability": 0.8740234375}, {"start": 2201.53, "end": 2201.85, "word": " صفات", "probability": 0.9814453125}, {"start": 2201.85, "end": 2202.01, "word": " الـ", "probability": 0.789794921875}, {"start": 2202.01, "end": 2202.35, "word": " Fibrotic", "probability": 0.4178059895833333}, {"start": 2202.35, "end": 2202.75, "word": " System", "probability": 0.56103515625}, {"start": 2202.75, "end": 2203.07, "word": " انه", "probability": 0.579345703125}, {"start": 2203.07, "end": 2203.41, "word": " it", "probability": 0.2103271484375}, {"start": 2203.41, "end": 2204.09, "word": " dissolves", "probability": 0.6517333984375}, {"start": 2204.09, "end": 2205.29, "word": " the", "probability": 0.8037109375}, {"start": 2205.29, "end": 2205.71, "word": " clot", "probability": 0.8857421875}, {"start": 2205.71, "end": 2207.17, "word": " by", "probability": 0.73681640625}, {"start": 2207.17, "end": 2207.71, "word": " digestion", "probability": 0.9208984375}, {"start": 2207.71, "end": 2208.19, "word": " of", "probability": 0.97509765625}, {"start": 2208.19, "end": 2208.87, "word": " fiber", "probability": 0.296630859375}, {"start": 2208.87, "end": 2209.11, "word": " يعني", "probability": 0.5899658203125}, {"start": 2209.11, "end": 2209.37, "word": " شو", "probability": 0.75927734375}, {"start": 2209.37, "end": 2210.25, "word": " بيعمل؟", "probability": 0.91025390625}, {"start": 2210.25, "end": 2210.97, "word": " بيكسر", "probability": 0.9107421875}, {"start": 2210.97, "end": 2211.03, "word": " ..", "probability": 0.3662109375}, {"start": 2211.03, "end": 2211.75, "word": " بيدوّب", "probability": 0.8046875}, {"start": 2211.75, "end": 2212.55, "word": " الجلدة", "probability": 0.76416015625}, {"start": 2212.55, "end": 2213.11, "word": " من", "probability": 0.98974609375}, {"start": 2213.11, "end": 2213.91, "word": " خلال", "probability": 0.9894205729166666}, {"start": 2213.91, "end": 2215.77, "word": " digestion", "probability": 0.59228515625}, {"start": 2215.77, "end": 2216.17, "word": " اللي", "probability": 0.896240234375}, {"start": 2216.17, "end": 2216.41, "word": " هو", "probability": 0.9736328125}, {"start": 2216.41, "end": 2216.65, "word": " عبارة", "probability": 0.9842529296875}, {"start": 2216.65, "end": 2216.81, "word": " عن", "probability": 0.99169921875}, {"start": 2216.81, "end": 2217.21, "word": " عملية", "probability": 0.9778645833333334}, {"start": 2217.21, "end": 2217.65, "word": " هدم", "probability": 0.97607421875}, {"start": 2217.65, "end": 2218.41, "word": " ال", "probability": 0.410400390625}, {"start": 2218.41, "end": 2218.61, "word": " ..", "probability": 0.27734375}, {"start": 2218.61, "end": 2219.23, "word": " ال", "probability": 0.8623046875}, {"start": 2219.23, "end": 2219.65, "word": " fiber", "probability": 0.6201171875}, {"start": 2219.65, "end": 2221.49, "word": " و", "probability": 0.58056640625}, {"start": 2221.49, "end": 2222.77, "word": " الموضوع", "probability": 0.94775390625}, {"start": 2222.77, "end": 2223.41, "word": " ببساطة", "probability": 0.9921875}, {"start": 2223.41, "end": 2223.61, "word": " يا", "probability": 0.93408203125}, {"start": 2223.61, "end": 2223.97, "word": " شباب", "probability": 0.98876953125}, {"start": 2223.97, "end": 2225.93, "word": " بشكل", "probability": 0.8318684895833334}, {"start": 2225.93, "end": 2226.45, "word": " عام", "probability": 0.997314453125}], "temperature": 1.0}, {"id": 88, "seek": 223102, "start": 2228.9, "end": 2231.02, "text": "بيتم كالقادة", "tokens": [21292, 39237, 9122, 6027, 4587, 18513, 3660], "avg_logprob": -0.3291015587747097, "compression_ratio": 0.6764705882352942, "no_speech_prob": 0.0, "words": [{"start": 2228.9, "end": 2229.54, "word": "بيتم", "probability": 0.933349609375}, {"start": 2229.54, "end": 2231.02, "word": " كالقادة", "probability": 0.695166015625}], "temperature": 1.0}, {"id": 89, "seek": 226087, "start": 2232.39, "end": 2260.87, "text": "كلنا بنعرف إنه بداية زي ما الحاجات متفقين إحنا إنه التنين بيبدوا مع بعض واحد بيبدأ سريع واحد بيبدأ بطيئي، مظبوط؟ يبقى في البداية بيبدأ الـ Coagulation Cascade Mechanism ويبدأ تكوين كميات كبيرة من الـ Thrombin الـ Thrombin هيشتغل على الـ Fibrinogen ويحوله إلى Fibrin Polymer ثم Fibrin Polymer ثم X-Linked Fibrin، صح؟ مش هيك متفقين؟ وتتكون الجلبة", "tokens": [28820, 8315, 44945, 3615, 28480, 36145, 3224, 4724, 28259, 10632, 30767, 1829, 19446, 21542, 26108, 9307, 44650, 5172, 4587, 9957, 11933, 5016, 8315, 36145, 3224, 16712, 1863, 9957, 4724, 1829, 44510, 14407, 20449, 45030, 11242, 36764, 24401, 4724, 1829, 44510, 10721, 8608, 16572, 3615, 36764, 24401, 4724, 1829, 44510, 10721, 4724, 9566, 1829, 19986, 1829, 12399, 3714, 19913, 3555, 2407, 9566, 22807, 7251, 3555, 4587, 7578, 8978, 29739, 28259, 10632, 4724, 1829, 44510, 10721, 2423, 39184, 3066, 559, 2776, 383, 4806, 762, 30175, 1434, 4032, 1829, 44510, 10721, 6055, 4117, 2407, 9957, 9122, 2304, 1829, 9307, 9122, 3555, 48923, 9154, 2423, 39184, 41645, 3548, 259, 2423, 39184, 41645, 3548, 259, 39896, 8592, 2655, 17082, 1211, 15844, 2423, 39184, 479, 6414, 259, 8799, 4032, 1829, 5016, 12610, 3224, 30731, 479, 6414, 259, 18553, 936, 38637, 2304, 479, 6414, 259, 18553, 936, 38637, 2304, 1783, 12, 26822, 292, 479, 6414, 259, 12399, 20328, 5016, 22807, 37893, 39896, 4117, 44650, 5172, 4587, 9957, 22807, 34683, 2655, 30544, 25724, 46152, 3660], "avg_logprob": -0.21651785687676497, "compression_ratio": 1.7467532467532467, "no_speech_prob": 2.562999725341797e-06, "words": [{"start": 2232.39, "end": 2232.71, "word": "كلنا", "probability": 0.718017578125}, {"start": 2232.71, "end": 2233.17, "word": " بنعرف", "probability": 0.9337565104166666}, {"start": 2233.17, "end": 2233.97, "word": " إنه", "probability": 0.586181640625}, {"start": 2233.97, "end": 2234.37, "word": " بداية", "probability": 0.9396158854166666}, {"start": 2234.37, "end": 2234.79, "word": " زي", "probability": 0.7685546875}, {"start": 2234.79, "end": 2234.87, "word": " ما", "probability": 0.67919921875}, {"start": 2234.87, "end": 2235.15, "word": " الحاجات", "probability": 0.75048828125}, {"start": 2235.15, "end": 2235.73, "word": " متفقين", "probability": 0.782958984375}, {"start": 2235.73, "end": 2235.99, "word": " إحنا", "probability": 0.8169759114583334}, {"start": 2235.99, "end": 2236.79, "word": " إنه", "probability": 0.62646484375}, {"start": 2236.79, "end": 2237.05, "word": " التنين", "probability": 0.7644856770833334}, {"start": 2237.05, "end": 2237.29, "word": " بيبدوا", "probability": 0.8221435546875}, {"start": 2237.29, "end": 2237.45, "word": " مع", "probability": 0.98046875}, {"start": 2237.45, "end": 2238.05, "word": " بعض", "probability": 0.974853515625}, {"start": 2238.05, "end": 2238.67, "word": " واحد", "probability": 0.893798828125}, {"start": 2238.67, "end": 2238.93, "word": " بيبدأ", "probability": 0.924072265625}, {"start": 2238.93, "end": 2239.19, "word": " سريع", "probability": 0.98095703125}, {"start": 2239.19, "end": 2239.35, "word": " واحد", "probability": 0.7178955078125}, {"start": 2239.35, "end": 2239.63, "word": " بيبدأ", "probability": 0.947265625}, {"start": 2239.63, "end": 2240.19, "word": " بطيئي،", "probability": 0.7119547526041666}, {"start": 2240.19, "end": 2240.87, "word": " مظبوط؟", "probability": 0.6946614583333334}, {"start": 2240.87, "end": 2241.07, "word": " يبقى", "probability": 0.9180908203125}, {"start": 2241.07, "end": 2241.19, "word": " في", "probability": 0.92578125}, {"start": 2241.19, "end": 2241.81, "word": " البداية", "probability": 0.9827473958333334}, {"start": 2241.81, "end": 2242.61, "word": " بيبدأ", "probability": 0.9591064453125}, {"start": 2242.61, "end": 2242.79, "word": " الـ", "probability": 0.55059814453125}, {"start": 2242.79, "end": 2243.25, "word": " Coagulation", "probability": 0.7687174479166666}, {"start": 2243.25, "end": 2243.73, "word": " Cascade", "probability": 0.7062174479166666}, {"start": 2243.73, "end": 2244.27, "word": " Mechanism", "probability": 0.953125}, {"start": 2244.27, "end": 2245.19, "word": " ويبدأ", "probability": 0.946044921875}, {"start": 2245.19, "end": 2245.57, "word": " تكوين", "probability": 0.958740234375}, {"start": 2245.57, "end": 2245.97, "word": " كميات", "probability": 0.9903564453125}, {"start": 2245.97, "end": 2246.37, "word": " كبيرة", "probability": 0.9866536458333334}, {"start": 2246.37, "end": 2246.61, "word": " من", "probability": 0.99560546875}, {"start": 2246.61, "end": 2246.79, "word": " الـ", "probability": 0.46484375}, {"start": 2246.79, "end": 2247.33, "word": " Thrombin", "probability": 0.7552897135416666}, {"start": 2247.33, "end": 2248.13, "word": " الـ", "probability": 0.780029296875}, {"start": 2248.13, "end": 2248.59, "word": " Thrombin", "probability": 0.8411458333333334}, {"start": 2248.59, "end": 2249.15, "word": " هيشتغل", "probability": 0.846142578125}, {"start": 2249.15, "end": 2249.41, "word": " على", "probability": 0.96533203125}, {"start": 2249.41, "end": 2249.73, "word": " الـ", "probability": 0.846923828125}, {"start": 2249.73, "end": 2250.63, "word": " Fibrinogen", "probability": 0.851318359375}, {"start": 2250.63, "end": 2251.75, "word": " ويحوله", "probability": 0.91123046875}, {"start": 2251.75, "end": 2251.95, "word": " إلى", "probability": 0.9580078125}, {"start": 2251.95, "end": 2252.71, "word": " Fibrin", "probability": 0.939453125}, {"start": 2252.71, "end": 2253.73, "word": " Polymer", "probability": 0.61474609375}, {"start": 2253.73, "end": 2254.33, "word": " ثم", "probability": 0.96240234375}, {"start": 2254.33, "end": 2254.97, "word": " Fibrin", "probability": 0.9150390625}, {"start": 2254.97, "end": 2256.05, "word": " Polymer", "probability": 0.916015625}, {"start": 2256.05, "end": 2256.71, "word": " ثم", "probability": 0.955322265625}, {"start": 2256.71, "end": 2256.99, "word": " X", "probability": 0.92529296875}, {"start": 2256.99, "end": 2257.35, "word": "-Linked", "probability": 0.6514485677083334}, {"start": 2257.35, "end": 2257.83, "word": " Fibrin،", "probability": 0.7900390625}, {"start": 2257.83, "end": 2258.27, "word": " صح؟", "probability": 0.9417317708333334}, {"start": 2258.27, "end": 2258.41, "word": " مش", "probability": 0.66455078125}, {"start": 2258.41, "end": 2258.55, "word": " هيك", "probability": 0.7255859375}, {"start": 2258.55, "end": 2259.31, "word": " متفقين؟", "probability": 0.838232421875}, {"start": 2259.31, "end": 2260.15, "word": " وتتكون", "probability": 0.8359375}, {"start": 2260.15, "end": 2260.87, "word": " الجلبة", "probability": 0.9140625}], "temperature": 1.0}, {"id": 90, "seek": 227682, "start": 2261.1, "end": 2276.82, "text": "شو بيعمل ال .. ال .. ال .. البلازمينوجين أو البلازمين؟ بيروح وشغال على الـ Fibril Monomer وعمله degradation، بيشتغل على ال Fibril Gen و Fibril Monomer وواش بيعمل؟ بيعمل degradation، ماشي؟", "tokens": [8592, 2407, 4724, 1829, 25957, 1211, 2423, 4386, 2423, 4386, 2423, 4386, 29739, 1211, 31377, 2304, 9957, 29245, 9957, 34051, 29739, 1211, 31377, 2304, 9957, 22807, 4724, 1829, 32887, 5016, 4032, 8592, 17082, 6027, 15844, 2423, 39184, 479, 6414, 388, 4713, 14301, 4032, 25957, 43761, 40519, 12399, 4724, 1829, 8592, 2655, 17082, 1211, 15844, 2423, 479, 6414, 388, 3632, 4032, 479, 6414, 388, 4713, 14301, 4032, 2407, 33599, 4724, 1829, 25957, 1211, 22807, 4724, 1829, 25957, 1211, 40519, 12399, 3714, 33599, 1829, 22807], "avg_logprob": -0.30059523241860525, "compression_ratio": 1.7391304347826086, "no_speech_prob": 0.0, "words": [{"start": 2261.1, "end": 2261.38, "word": "شو", "probability": 0.6024169921875}, {"start": 2261.38, "end": 2261.72, "word": " بيعمل", "probability": 0.980712890625}, {"start": 2261.72, "end": 2261.98, "word": " ال", "probability": 0.921875}, {"start": 2261.98, "end": 2262.5, "word": " ..", "probability": 0.6552734375}, {"start": 2262.5, "end": 2262.88, "word": " ال", "probability": 0.904296875}, {"start": 2262.88, "end": 2262.98, "word": " ..", "probability": 0.9541015625}, {"start": 2262.98, "end": 2263.56, "word": " ال", "probability": 0.9423828125}, {"start": 2263.56, "end": 2263.76, "word": " ..", "probability": 0.69677734375}, {"start": 2263.76, "end": 2264.86, "word": " البلازمينوجين", "probability": 0.7216884068080357}, {"start": 2264.86, "end": 2265.14, "word": " أو", "probability": 0.61083984375}, {"start": 2265.14, "end": 2266.3, "word": " البلازمين؟", "probability": 0.7520345052083334}, {"start": 2266.3, "end": 2266.62, "word": " بيروح", "probability": 0.683624267578125}, {"start": 2266.62, "end": 2267.34, "word": " وشغال", "probability": 0.8616943359375}, {"start": 2267.34, "end": 2267.58, "word": " على", "probability": 0.7431640625}, {"start": 2267.58, "end": 2267.78, "word": " الـ", "probability": 0.5438232421875}, {"start": 2267.78, "end": 2268.28, "word": " Fibril", "probability": 0.6029459635416666}, {"start": 2268.28, "end": 2268.9, "word": " Monomer", "probability": 0.6185302734375}, {"start": 2268.9, "end": 2269.82, "word": " وعمله", "probability": 0.9069010416666666}, {"start": 2269.82, "end": 2270.84, "word": " degradation،", "probability": 0.596923828125}, {"start": 2270.84, "end": 2271.9, "word": " بيشتغل", "probability": 0.8894856770833334}, {"start": 2271.9, "end": 2272.02, "word": " على", "probability": 0.95751953125}, {"start": 2272.02, "end": 2272.1, "word": " ال", "probability": 0.931640625}, {"start": 2272.1, "end": 2272.46, "word": " Fibril", "probability": 0.7691243489583334}, {"start": 2272.46, "end": 2272.64, "word": " Gen", "probability": 0.1488037109375}, {"start": 2272.64, "end": 2272.74, "word": " و", "probability": 0.98291015625}, {"start": 2272.74, "end": 2273.12, "word": " Fibril", "probability": 0.8287760416666666}, {"start": 2273.12, "end": 2273.44, "word": " Monomer", "probability": 0.94775390625}, {"start": 2273.44, "end": 2273.72, "word": " وواش", "probability": 0.5011393229166666}, {"start": 2273.72, "end": 2274.88, "word": " بيعمل؟", "probability": 0.98193359375}, {"start": 2274.88, "end": 2275.2, "word": " بيعمل", "probability": 0.97900390625}, {"start": 2275.2, "end": 2276.26, "word": " degradation،", "probability": 0.8046875}, {"start": 2276.26, "end": 2276.82, "word": " ماشي؟", "probability": 0.923828125}], "temperature": 1.0}, {"id": 91, "seek": 230521, "start": 2277.84, "end": 2305.22, "text": "عيد تاني يبقى في البداية بيبدأ تحت تأثير الترابين الـfibrin gel is cleaved وبيتحول إلى fibrin monomer الـfibrin monomer بيشتغل عليها اللي هو البلازمن و بيعمل degradation و بيكون فيه fibrin degradation product أو fibrin process product المفهوم يا شباب", "tokens": [3615, 25708, 6055, 7649, 1829, 7251, 3555, 4587, 7578, 8978, 29739, 28259, 10632, 4724, 1829, 44510, 10721, 6055, 33753, 6055, 10721, 12984, 13546, 16712, 2288, 16758, 9957, 2423, 39184, 69, 6414, 259, 4087, 307, 1233, 12865, 4032, 21292, 2655, 5016, 12610, 30731, 283, 6414, 259, 1108, 14301, 2423, 39184, 69, 6414, 259, 1108, 14301, 4724, 1829, 8592, 2655, 17082, 1211, 25894, 11296, 13672, 1829, 31439, 29739, 1211, 31377, 27842, 4032, 4724, 1829, 25957, 1211, 40519, 4032, 4724, 1829, 30544, 8978, 3224, 283, 6414, 259, 40519, 1674, 34051, 283, 6414, 259, 1399, 1674, 9673, 5172, 3224, 20498, 35186, 13412, 3555, 16758], "avg_logprob": -0.30724010845222094, "compression_ratio": 1.6454545454545455, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 2277.84, "end": 2278.22, "word": "عيد", "probability": 0.4862060546875}, {"start": 2278.22, "end": 2278.62, "word": " تاني", "probability": 0.8883463541666666}, {"start": 2278.62, "end": 2279.5, "word": " يبقى", "probability": 0.8248291015625}, {"start": 2279.5, "end": 2279.6, "word": " في", "probability": 0.86572265625}, {"start": 2279.6, "end": 2280.26, "word": " البداية", "probability": 0.9617513020833334}, {"start": 2280.26, "end": 2281.58, "word": " بيبدأ", "probability": 0.7841796875}, {"start": 2281.58, "end": 2282.52, "word": " تحت", "probability": 0.990234375}, {"start": 2282.52, "end": 2283.0, "word": " تأثير", "probability": 0.86419677734375}, {"start": 2283.0, "end": 2284.3, "word": " الترابين", "probability": 0.43714141845703125}, {"start": 2284.3, "end": 2285.58, "word": " الـfibrin", "probability": 0.527587890625}, {"start": 2285.58, "end": 2285.9, "word": " gel", "probability": 0.42724609375}, {"start": 2285.9, "end": 2286.22, "word": " is", "probability": 0.8115234375}, {"start": 2286.22, "end": 2287.1, "word": " cleaved", "probability": 0.879150390625}, {"start": 2287.1, "end": 2288.4, "word": " وبيتحول", "probability": 0.93779296875}, {"start": 2288.4, "end": 2288.62, "word": " إلى", "probability": 0.50732421875}, {"start": 2288.62, "end": 2289.2, "word": " fibrin", "probability": 0.7609049479166666}, {"start": 2289.2, "end": 2289.86, "word": " monomer", "probability": 0.89111328125}, {"start": 2289.86, "end": 2291.06, "word": " الـfibrin", "probability": 0.77548828125}, {"start": 2291.06, "end": 2291.5, "word": " monomer", "probability": 0.931884765625}, {"start": 2291.5, "end": 2292.3, "word": " بيشتغل", "probability": 0.970947265625}, {"start": 2292.3, "end": 2292.86, "word": " عليها", "probability": 0.984619140625}, {"start": 2292.86, "end": 2293.58, "word": " اللي", "probability": 0.921142578125}, {"start": 2293.58, "end": 2294.0, "word": " هو", "probability": 0.9921875}, {"start": 2294.0, "end": 2295.92, "word": " البلازمن", "probability": 0.69720458984375}, {"start": 2295.92, "end": 2296.5, "word": " و", "probability": 0.62255859375}, {"start": 2296.5, "end": 2298.52, "word": " بيعمل", "probability": 0.8377685546875}, {"start": 2298.52, "end": 2299.1, "word": " degradation", "probability": 0.90869140625}, {"start": 2299.1, "end": 2299.3, "word": " و", "probability": 0.73095703125}, {"start": 2299.3, "end": 2299.64, "word": " بيكون", "probability": 0.7216796875}, {"start": 2299.64, "end": 2300.34, "word": " فيه", "probability": 0.4427490234375}, {"start": 2300.34, "end": 2300.94, "word": " fibrin", "probability": 0.7831217447916666}, {"start": 2300.94, "end": 2301.4, "word": " degradation", "probability": 0.951171875}, {"start": 2301.4, "end": 2301.94, "word": " product", "probability": 0.9755859375}, {"start": 2301.94, "end": 2302.12, "word": " أو", "probability": 0.6611328125}, {"start": 2302.12, "end": 2302.6, "word": " fibrin", "probability": 0.7767740885416666}, {"start": 2302.6, "end": 2303.1, "word": " process", "probability": 0.9287109375}, {"start": 2303.1, "end": 2303.96, "word": " product", "probability": 0.79345703125}, {"start": 2303.96, "end": 2304.8, "word": " المفهوم", "probability": 0.80718994140625}, {"start": 2304.8, "end": 2305.0, "word": " يا", "probability": 0.460693359375}, {"start": 2305.0, "end": 2305.22, "word": " شباب", "probability": 0.9840494791666666}], "temperature": 1.0}, {"id": 92, "seek": 233377, "start": 2306.69, "end": 2333.77, "text": "هذا بشكل عام بشكل تفصيلي نبدأ بشكل تفصيلي نبدأ ونقول ان once clot once clotting begins the fibrotic system comes to life يعني بيبدأ ياش كان ميت؟ لأ ماكنش ميت بس كان يزحف كسلحفاة كان ياش كان بطيئا بيحيط و لما بيحيط شو بيعمل؟ plasmalogen شو بيعمل؟ binds to fibrin", "tokens": [3224, 15730, 4724, 8592, 28820, 6225, 10943, 4724, 8592, 28820, 6055, 5172, 9381, 1829, 20292, 8717, 44510, 10721, 4724, 8592, 28820, 6055, 5172, 9381, 1829, 20292, 8717, 44510, 10721, 4032, 1863, 39648, 16472, 1564, 48587, 1564, 48587, 783, 7338, 264, 283, 6414, 9411, 1185, 1487, 281, 993, 37495, 22653, 4724, 1829, 44510, 10721, 7251, 33599, 25961, 3714, 36081, 22807, 5296, 10721, 19446, 19452, 8592, 3714, 36081, 4724, 3794, 25961, 7251, 11622, 5016, 5172, 9122, 3794, 1211, 5016, 5172, 995, 3660, 25961, 7251, 33599, 25961, 4724, 9566, 1829, 19986, 995, 4724, 1829, 5016, 1829, 9566, 4032, 5296, 15042, 4724, 1829, 5016, 1829, 9566, 13412, 2407, 4724, 1829, 25957, 1211, 22807, 499, 296, 5579, 8799, 13412, 2407, 4724, 1829, 25957, 1211, 22807, 41515, 281, 283, 6414, 259], "avg_logprob": -0.24119544301241164, "compression_ratio": 1.747787610619469, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 2306.69, "end": 2306.93, "word": "هذا", "probability": 0.697021484375}, {"start": 2306.93, "end": 2307.21, "word": " بشكل", "probability": 0.9541015625}, {"start": 2307.21, "end": 2307.53, "word": " عام", "probability": 0.93310546875}, {"start": 2307.53, "end": 2308.49, "word": " بشكل", "probability": 0.7782389322916666}, {"start": 2308.49, "end": 2309.05, "word": " تفصيلي", "probability": 0.926953125}, {"start": 2309.05, "end": 2310.15, "word": " نبدأ", "probability": 0.953125}, {"start": 2310.15, "end": 2312.45, "word": " بشكل", "probability": 0.7548828125}, {"start": 2312.45, "end": 2313.05, "word": " تفصيلي", "probability": 0.9646484375}, {"start": 2313.05, "end": 2313.63, "word": " نبدأ", "probability": 0.9659830729166666}, {"start": 2313.63, "end": 2314.37, "word": " ونقول", "probability": 0.8077799479166666}, {"start": 2314.37, "end": 2314.53, "word": " ان", "probability": 0.38330078125}, {"start": 2314.53, "end": 2314.75, "word": " once", "probability": 0.63818359375}, {"start": 2314.75, "end": 2315.25, "word": " clot", "probability": 0.36962890625}, {"start": 2315.25, "end": 2316.73, "word": " once", "probability": 0.28857421875}, {"start": 2316.73, "end": 2317.31, "word": " clotting", "probability": 0.771240234375}, {"start": 2317.31, "end": 2317.79, "word": " begins", "probability": 0.783203125}, {"start": 2317.79, "end": 2318.01, "word": " the", "probability": 0.69091796875}, {"start": 2318.01, "end": 2318.41, "word": " fibrotic", "probability": 0.6881510416666666}, {"start": 2318.41, "end": 2318.79, "word": " system", "probability": 0.958984375}, {"start": 2318.79, "end": 2319.11, "word": " comes", "probability": 0.81884765625}, {"start": 2319.11, "end": 2319.33, "word": " to", "probability": 0.98193359375}, {"start": 2319.33, "end": 2319.59, "word": " life", "probability": 0.9580078125}, {"start": 2319.59, "end": 2319.83, "word": " يعني", "probability": 0.852294921875}, {"start": 2319.83, "end": 2320.19, "word": " بيبدأ", "probability": 0.8961181640625}, {"start": 2320.19, "end": 2320.39, "word": " ياش", "probability": 0.464599609375}, {"start": 2320.39, "end": 2320.75, "word": " كان", "probability": 0.6591796875}, {"start": 2320.75, "end": 2321.49, "word": " ميت؟", "probability": 0.7735188802083334}, {"start": 2321.49, "end": 2321.97, "word": " لأ", "probability": 0.84228515625}, {"start": 2321.97, "end": 2322.31, "word": " ماكنش", "probability": 0.7021484375}, {"start": 2322.31, "end": 2322.61, "word": " ميت", "probability": 0.99462890625}, {"start": 2322.61, "end": 2322.89, "word": " بس", "probability": 0.93798828125}, {"start": 2322.89, "end": 2323.07, "word": " كان", "probability": 0.978515625}, {"start": 2323.07, "end": 2323.53, "word": " يزحف", "probability": 0.9617919921875}, {"start": 2323.53, "end": 2324.53, "word": " كسلحفاة", "probability": 0.5969587053571429}, {"start": 2324.53, "end": 2325.07, "word": " كان", "probability": 0.79443359375}, {"start": 2325.07, "end": 2325.41, "word": " ياش", "probability": 0.923095703125}, {"start": 2325.41, "end": 2325.61, "word": " كان", "probability": 0.8271484375}, {"start": 2325.61, "end": 2326.61, "word": " بطيئا", "probability": 0.892578125}, {"start": 2326.61, "end": 2327.89, "word": " بيحيط", "probability": 0.631494140625}, {"start": 2327.89, "end": 2328.27, "word": " و", "probability": 0.83544921875}, {"start": 2328.27, "end": 2328.47, "word": " لما", "probability": 0.6795654296875}, {"start": 2328.47, "end": 2328.93, "word": " بيحيط", "probability": 0.95419921875}, {"start": 2328.93, "end": 2329.13, "word": " شو", "probability": 0.9638671875}, {"start": 2329.13, "end": 2329.99, "word": " بيعمل؟", "probability": 0.94951171875}, {"start": 2329.99, "end": 2330.67, "word": " plasmalogen", "probability": 0.49017333984375}, {"start": 2330.67, "end": 2331.75, "word": " شو", "probability": 0.96044921875}, {"start": 2331.75, "end": 2332.21, "word": " بيعمل؟", "probability": 0.9890625}, {"start": 2332.21, "end": 2332.65, "word": " binds", "probability": 0.77880859375}, {"start": 2332.65, "end": 2332.97, "word": " to", "probability": 0.97900390625}, {"start": 2332.97, "end": 2333.77, "word": " fibrin", "probability": 0.8487955729166666}], "temperature": 1.0}, {"id": 93, "seek": 236404, "start": 2358.84, "end": 2364.04, "text": "الجلطة تتكوّن ومفجّرها في مكان ..", "tokens": [6027, 7435, 1211, 9566, 3660, 6055, 2655, 4117, 2407, 11703, 1863, 4032, 2304, 5172, 7435, 11703, 2288, 11296, 8978, 3714, 41361, 4386], "avg_logprob": -0.49388589029726776, "compression_ratio": 0.921875, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 2358.8399999999997, "end": 2360.24, "word": "الجلطة", "probability": 0.7067626953125}, {"start": 2360.24, "end": 2361.2, "word": " تتكوّن", "probability": 0.7460123697916666}, {"start": 2361.2, "end": 2362.44, "word": " ومفجّرها", "probability": 0.7196916852678571}, {"start": 2362.44, "end": 2363.12, "word": " في", "probability": 0.55908203125}, {"start": 2363.12, "end": 2363.32, "word": " مكان", "probability": 0.4873046875}, {"start": 2363.32, "end": 2364.04, "word": " ..", "probability": 0.260498046875}], "temperature": 1.0}, {"id": 94, "seek": 239162, "start": 2364.4, "end": 2391.62, "text": "في داخلها ومفجرها موجود وين؟ في داخلها إيش اللي يصير؟ وإحنا بنعمل Formation بيدخل الـ Plasminogen فيه في تركيب الجسم طبعا هذا الكلام بيصير وين؟ في منطقة المزيع، منطقة الحدث بعد ذلك في منطقة المزيع أو الحدث بيطلع من ال tissue المنزوع tissue plasminogen activator", "tokens": [41185, 11778, 47283, 1211, 11296, 4032, 2304, 5172, 7435, 2288, 11296, 3714, 29245, 23328, 4032, 9957, 22807, 8978, 11778, 47283, 1211, 11296, 11933, 1829, 8592, 13672, 1829, 7251, 9381, 13546, 22807, 4032, 28814, 5016, 8315, 44945, 25957, 1211, 10126, 399, 4724, 1829, 3215, 9778, 1211, 2423, 39184, 2149, 296, 2367, 8799, 8978, 3224, 8978, 6055, 31747, 1829, 3555, 25724, 38251, 23032, 3555, 3615, 995, 23758, 2423, 28820, 10943, 4724, 1829, 9381, 13546, 4032, 9957, 22807, 8978, 9154, 9566, 28671, 9673, 11622, 40228, 12399, 9154, 9566, 28671, 21542, 3215, 12984, 39182, 29910, 23275, 8978, 9154, 9566, 28671, 9673, 11622, 40228, 34051, 21542, 3215, 12984, 4724, 1829, 9566, 1211, 3615, 9154, 2423, 12404, 9673, 1863, 11622, 45367, 12404, 499, 296, 2367, 8799, 2430, 1639], "avg_logprob": -0.1793699191837776, "compression_ratio": 1.805084745762712, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 2364.4, "end": 2364.76, "word": "في", "probability": 0.58447265625}, {"start": 2364.76, "end": 2365.58, "word": " داخلها", "probability": 0.9827880859375}, {"start": 2365.58, "end": 2366.72, "word": " ومفجرها", "probability": 0.8151041666666666}, {"start": 2366.72, "end": 2367.16, "word": " موجود", "probability": 0.9729817708333334}, {"start": 2367.16, "end": 2367.72, "word": " وين؟", "probability": 0.7041015625}, {"start": 2367.72, "end": 2368.06, "word": " في", "probability": 0.8525390625}, {"start": 2368.06, "end": 2368.76, "word": " داخلها", "probability": 0.9881591796875}, {"start": 2368.76, "end": 2369.34, "word": " إيش", "probability": 0.6829630533854166}, {"start": 2369.34, "end": 2369.48, "word": " اللي", "probability": 0.96435546875}, {"start": 2369.48, "end": 2370.42, "word": " يصير؟", "probability": 0.826416015625}, {"start": 2370.42, "end": 2370.72, "word": " وإحنا", "probability": 0.7611083984375}, {"start": 2370.72, "end": 2371.28, "word": " بنعمل", "probability": 0.9640299479166666}, {"start": 2371.28, "end": 2373.1, "word": " Formation", "probability": 0.4403076171875}, {"start": 2373.1, "end": 2374.0, "word": " بيدخل", "probability": 0.8189453125}, {"start": 2374.0, "end": 2374.14, "word": " الـ", "probability": 0.5264892578125}, {"start": 2374.14, "end": 2374.72, "word": " Plasminogen", "probability": 0.8321533203125}, {"start": 2374.72, "end": 2375.06, "word": " فيه", "probability": 0.825439453125}, {"start": 2375.06, "end": 2375.6, "word": " في", "probability": 0.3349609375}, {"start": 2375.6, "end": 2376.36, "word": " تركيب", "probability": 0.986328125}, {"start": 2376.36, "end": 2376.94, "word": " الجسم", "probability": 0.6585693359375}, {"start": 2376.94, "end": 2377.7, "word": " طبعا", "probability": 0.8846435546875}, {"start": 2377.7, "end": 2378.76, "word": " هذا", "probability": 0.496337890625}, {"start": 2378.76, "end": 2379.0, "word": " الكلام", "probability": 0.9122721354166666}, {"start": 2379.0, "end": 2379.28, "word": " بيصير", "probability": 0.85223388671875}, {"start": 2379.28, "end": 2379.84, "word": " وين؟", "probability": 0.9609375}, {"start": 2379.84, "end": 2379.98, "word": " في", "probability": 0.91552734375}, {"start": 2379.98, "end": 2380.34, "word": " منطقة", "probability": 0.9835611979166666}, {"start": 2380.34, "end": 2380.88, "word": " المزيع،", "probability": 0.817626953125}, {"start": 2380.88, "end": 2381.18, "word": " منطقة", "probability": 0.9638671875}, {"start": 2381.18, "end": 2381.72, "word": " الحدث", "probability": 0.9694010416666666}, {"start": 2381.72, "end": 2382.68, "word": " بعد", "probability": 0.82470703125}, {"start": 2382.68, "end": 2383.16, "word": " ذلك", "probability": 0.9951171875}, {"start": 2383.16, "end": 2383.74, "word": " في", "probability": 0.81298828125}, {"start": 2383.74, "end": 2384.18, "word": " منطقة", "probability": 0.9938151041666666}, {"start": 2384.18, "end": 2384.7, "word": " المزيع", "probability": 0.9757486979166666}, {"start": 2384.7, "end": 2384.9, "word": " أو", "probability": 0.9033203125}, {"start": 2384.9, "end": 2385.5, "word": " الحدث", "probability": 0.97705078125}, {"start": 2385.5, "end": 2386.66, "word": " بيطلع", "probability": 0.9552734375}, {"start": 2386.66, "end": 2386.86, "word": " من", "probability": 0.99609375}, {"start": 2386.86, "end": 2387.14, "word": " ال", "probability": 0.9521484375}, {"start": 2387.14, "end": 2387.52, "word": " tissue", "probability": 0.8486328125}, {"start": 2387.52, "end": 2388.62, "word": " المنزوع", "probability": 0.959716796875}, {"start": 2388.62, "end": 2389.76, "word": " tissue", "probability": 0.343505859375}, {"start": 2389.76, "end": 2390.8, "word": " plasminogen", "probability": 0.80804443359375}, {"start": 2390.8, "end": 2391.62, "word": " activator", "probability": 0.97900390625}], "temperature": 1.0}, {"id": 95, "seek": 242067, "start": 2392.38, "end": 2420.68, "text": "منشط من البلازمينوجين اللي هو دخل في بناء الجلطة فال tissue plasminogen activator بيشترع بلازمينوجين و بيحوله إلى بلازمين يعني بيحوله من زيوموجين إلى أنزاين من زيوموجين إلى أنزاين فبيحوله إلى أنزاين اللي هو ال complex formation of tissue plasminogen activator مع البلازمينوجين", "tokens": [27842, 8592, 9566, 9154, 29739, 1211, 31377, 2304, 9957, 29245, 9957, 13672, 1829, 31439, 11778, 9778, 1211, 8978, 44945, 16606, 25724, 1211, 9566, 3660, 6156, 6027, 12404, 499, 296, 2367, 8799, 2430, 1639, 4724, 1829, 8592, 2655, 2288, 3615, 4724, 1211, 31377, 2304, 9957, 29245, 9957, 4032, 4724, 1829, 5016, 12610, 3224, 30731, 4724, 1211, 31377, 2304, 9957, 37495, 22653, 4724, 1829, 5016, 12610, 3224, 9154, 30767, 1829, 20498, 29245, 9957, 30731, 14739, 11622, 995, 9957, 9154, 30767, 1829, 20498, 29245, 9957, 30731, 14739, 11622, 995, 9957, 6156, 21292, 5016, 12610, 3224, 30731, 14739, 11622, 995, 9957, 13672, 1829, 31439, 2423, 3997, 11723, 295, 12404, 499, 296, 2367, 8799, 2430, 1639, 20449, 29739, 1211, 31377, 2304, 9957, 29245, 9957], "avg_logprob": -0.2278645789871613, "compression_ratio": 2.189054726368159, "no_speech_prob": 0.0, "words": [{"start": 2392.38, "end": 2393.1, "word": "منشط", "probability": 0.6529947916666666}, {"start": 2393.1, "end": 2393.4, "word": " من", "probability": 0.81982421875}, {"start": 2393.4, "end": 2395.22, "word": " البلازمينوجين", "probability": 0.7128557477678571}, {"start": 2395.22, "end": 2395.96, "word": " اللي", "probability": 0.491943359375}, {"start": 2395.96, "end": 2396.1, "word": " هو", "probability": 0.485595703125}, {"start": 2396.1, "end": 2396.54, "word": " دخل", "probability": 0.8720703125}, {"start": 2396.54, "end": 2397.38, "word": " في", "probability": 0.5712890625}, {"start": 2397.38, "end": 2399.14, "word": " بناء", "probability": 0.871337890625}, {"start": 2399.14, "end": 2400.12, "word": " الجلطة", "probability": 0.90576171875}, {"start": 2400.12, "end": 2401.34, "word": " فال", "probability": 0.55908203125}, {"start": 2401.34, "end": 2401.58, "word": " tissue", "probability": 0.63427734375}, {"start": 2401.58, "end": 2402.16, "word": " plasminogen", "probability": 0.69586181640625}, {"start": 2402.16, "end": 2402.74, "word": " activator", "probability": 0.95947265625}, {"start": 2402.74, "end": 2403.92, "word": " بيشترع", "probability": 0.7429606119791666}, {"start": 2403.92, "end": 2404.58, "word": " بلازمينوجين", "probability": 0.91650390625}, {"start": 2404.58, "end": 2405.3, "word": " و", "probability": 0.59423828125}, {"start": 2405.3, "end": 2405.74, "word": " بيحوله", "probability": 0.79130859375}, {"start": 2405.74, "end": 2405.96, "word": " إلى", "probability": 0.7080078125}, {"start": 2405.96, "end": 2406.72, "word": " بلازمين", "probability": 0.8509765625}, {"start": 2406.72, "end": 2407.42, "word": " يعني", "probability": 0.86328125}, {"start": 2407.42, "end": 2407.94, "word": " بيحوله", "probability": 0.91064453125}, {"start": 2407.94, "end": 2408.12, "word": " من", "probability": 0.9921875}, {"start": 2408.12, "end": 2408.84, "word": " زيوموجين", "probability": 0.80771484375}, {"start": 2408.84, "end": 2409.82, "word": " إلى", "probability": 0.9267578125}, {"start": 2409.82, "end": 2410.48, "word": " أنزاين", "probability": 0.78497314453125}, {"start": 2410.48, "end": 2411.24, "word": " من", "probability": 0.85498046875}, {"start": 2411.24, "end": 2412.0, "word": " زيوموجين", "probability": 0.96640625}, {"start": 2412.0, "end": 2412.54, "word": " إلى", "probability": 0.9501953125}, {"start": 2412.54, "end": 2414.02, "word": " أنزاين", "probability": 0.8634033203125}, {"start": 2414.02, "end": 2414.54, "word": " فبيحوله", "probability": 0.79697265625}, {"start": 2414.54, "end": 2414.68, "word": " إلى", "probability": 0.95263671875}, {"start": 2414.68, "end": 2415.28, "word": " أنزاين", "probability": 0.946044921875}, {"start": 2415.28, "end": 2416.0, "word": " اللي", "probability": 0.892578125}, {"start": 2416.0, "end": 2416.38, "word": " هو", "probability": 0.98681640625}, {"start": 2416.38, "end": 2416.98, "word": " ال", "probability": 0.47216796875}, {"start": 2416.98, "end": 2417.54, "word": " complex", "probability": 0.7216796875}, {"start": 2417.54, "end": 2418.08, "word": " formation", "probability": 0.93798828125}, {"start": 2418.08, "end": 2418.24, "word": " of", "probability": 0.859375}, {"start": 2418.24, "end": 2418.42, "word": " tissue", "probability": 0.8935546875}, {"start": 2418.42, "end": 2418.92, "word": " plasminogen", "probability": 0.889892578125}, {"start": 2418.92, "end": 2419.56, "word": " activator", "probability": 0.964111328125}, {"start": 2419.56, "end": 2419.76, "word": " مع", "probability": 0.91552734375}, {"start": 2419.76, "end": 2420.68, "word": " البلازمينوجين", "probability": 0.8739188058035714}], "temperature": 1.0}, {"id": 96, "seek": 244187, "start": 2422.39, "end": 2441.87, "text": "مشي المخصصة اللى تبعته عبارة عن بلازمن البلازمن بقبط مع ال fiber انه بعمل fiber splitting او degradation بشته العلانيل البلازمن على ال fiber اللى بعمله degradation مافهومش هبقى ببساطة البلازمن بيدخل في تركيب الجلطة", "tokens": [2304, 8592, 1829, 9673, 9778, 9381, 9381, 3660, 13672, 7578, 6055, 3555, 34268, 3224, 6225, 3555, 9640, 3660, 18871, 4724, 1211, 31377, 27842, 29739, 1211, 31377, 27842, 4724, 4587, 3555, 9566, 20449, 2423, 12874, 16472, 3224, 4724, 25957, 1211, 12874, 30348, 1975, 2407, 40519, 4724, 8592, 47395, 18863, 1211, 7649, 26895, 29739, 1211, 31377, 27842, 15844, 2423, 12874, 13672, 7578, 4724, 25957, 43761, 40519, 19446, 5172, 3224, 20498, 8592, 8032, 3555, 4587, 7578, 4724, 3555, 3794, 41193, 3660, 29739, 1211, 31377, 27842, 4724, 25708, 9778, 1211, 8978, 6055, 31747, 1829, 3555, 25724, 1211, 9566, 3660], "avg_logprob": -0.34635416076829034, "compression_ratio": 1.8219895287958114, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 2422.39, "end": 2422.91, "word": "مشي", "probability": 0.3853759765625}, {"start": 2422.91, "end": 2423.69, "word": " المخصصة", "probability": 0.791796875}, {"start": 2423.69, "end": 2423.71, "word": " اللى", "probability": 0.673828125}, {"start": 2423.71, "end": 2424.11, "word": " تبعته", "probability": 0.8035888671875}, {"start": 2424.11, "end": 2424.45, "word": " عبارة", "probability": 0.8026123046875}, {"start": 2424.45, "end": 2424.55, "word": " عن", "probability": 0.9892578125}, {"start": 2424.55, "end": 2424.99, "word": " بلازمن", "probability": 0.646240234375}, {"start": 2424.99, "end": 2425.63, "word": " البلازمن", "probability": 0.7664794921875}, {"start": 2425.63, "end": 2426.05, "word": " بقبط", "probability": 0.794921875}, {"start": 2426.05, "end": 2426.21, "word": " مع", "probability": 0.9775390625}, {"start": 2426.21, "end": 2426.37, "word": " ال", "probability": 0.703125}, {"start": 2426.37, "end": 2426.67, "word": " fiber", "probability": 0.513671875}, {"start": 2426.67, "end": 2426.89, "word": " انه", "probability": 0.60791015625}, {"start": 2426.89, "end": 2427.21, "word": " بعمل", "probability": 0.9728190104166666}, {"start": 2427.21, "end": 2427.57, "word": " fiber", "probability": 0.80078125}, {"start": 2427.57, "end": 2428.13, "word": " splitting", "probability": 0.89501953125}, {"start": 2428.13, "end": 2428.37, "word": " او", "probability": 0.662109375}, {"start": 2428.37, "end": 2428.95, "word": " degradation", "probability": 0.95361328125}, {"start": 2428.95, "end": 2430.33, "word": " بشته", "probability": 0.3722330729166667}, {"start": 2430.33, "end": 2430.79, "word": " العلانيل", "probability": 0.62542724609375}, {"start": 2430.79, "end": 2431.41, "word": " البلازمن", "probability": 0.8741455078125}, {"start": 2431.41, "end": 2432.41, "word": " على", "probability": 0.393310546875}, {"start": 2432.41, "end": 2432.55, "word": " ال", "probability": 0.7724609375}, {"start": 2432.55, "end": 2432.83, "word": " fiber", "probability": 0.92236328125}, {"start": 2432.83, "end": 2432.95, "word": " اللى", "probability": 0.853759765625}, {"start": 2432.95, "end": 2433.33, "word": " بعمله", "probability": 0.9674479166666666}, {"start": 2433.33, "end": 2434.01, "word": " degradation", "probability": 0.908203125}, {"start": 2434.01, "end": 2436.03, "word": " مافهومش", "probability": 0.7152099609375}, {"start": 2436.03, "end": 2436.33, "word": " هبقى", "probability": 0.6553955078125}, {"start": 2436.33, "end": 2438.15, "word": " ببساطة", "probability": 0.675732421875}, {"start": 2438.15, "end": 2439.21, "word": " البلازمن", "probability": 0.8680419921875}, {"start": 2439.21, "end": 2440.39, "word": " بيدخل", "probability": 0.80572509765625}, {"start": 2440.39, "end": 2440.57, "word": " في", "probability": 0.8037109375}, {"start": 2440.57, "end": 2441.09, "word": " تركيب", "probability": 0.975341796875}, {"start": 2441.09, "end": 2441.87, "word": " الجلطة", "probability": 0.9141845703125}], "temperature": 1.0}, {"id": 97, "seek": 246891, "start": 2443.21, "end": 2468.91, "text": "في داخل الجلطة ماشي في نفس الوقت التشو بلازميرج اكتيفيتر هو بنى اكتيفيتر بنشط بطلع من التشو الممجوة من ال damage تشو وإيش بيعمل بروح و بنشط البلازميرج و بيحول لبلازمير اللى بلازمير بيشتغل على ال fibrin و بيحوله أو ال fibrin ال monomer و بيحوله إلى أيش؟ ال fibrin degradation ماشي حدده شوية؟", "tokens": [41185, 11778, 47283, 1211, 25724, 1211, 9566, 3660, 3714, 33599, 1829, 8978, 8717, 36178, 2423, 30543, 2655, 16712, 8592, 2407, 4724, 1211, 31377, 2304, 13546, 7435, 1975, 4117, 31371, 5172, 36081, 2288, 31439, 44945, 7578, 1975, 4117, 31371, 5172, 36081, 2288, 44945, 8592, 9566, 4724, 9566, 1211, 3615, 9154, 16712, 8592, 2407, 9673, 2304, 7435, 2407, 3660, 9154, 2423, 4344, 6055, 8592, 2407, 4032, 28814, 1829, 8592, 4724, 1829, 25957, 1211, 4724, 32887, 5016, 4032, 44945, 8592, 9566, 29739, 1211, 31377, 2304, 13546, 7435, 4032, 4724, 1829, 5016, 12610, 5296, 3555, 15040, 11622, 2304, 13546, 13672, 7578, 4724, 15040, 11622, 2304, 13546, 4724, 1829, 8592, 2655, 17082, 1211, 15844, 2423, 283, 6414, 259, 4032, 4724, 1829, 5016, 12610, 3224, 34051, 2423, 283, 6414, 259, 2423, 1108, 14301, 4032, 4724, 1829, 5016, 12610, 3224, 30731, 36632, 8592, 22807, 2423, 283, 6414, 259, 40519, 3714, 33599, 1829, 11331, 3215, 3215, 3224, 13412, 2407, 10632, 22807], "avg_logprob": -0.2987013105061147, "compression_ratio": 2.0041322314049586, "no_speech_prob": 1.7285346984863281e-06, "words": [{"start": 2443.21, "end": 2443.53, "word": "في", "probability": 0.560546875}, {"start": 2443.53, "end": 2444.47, "word": " داخل", "probability": 0.9593098958333334}, {"start": 2444.47, "end": 2445.91, "word": " الجلطة", "probability": 0.9801025390625}, {"start": 2445.91, "end": 2446.59, "word": " ماشي", "probability": 0.7632649739583334}, {"start": 2446.59, "end": 2447.31, "word": " في", "probability": 0.58203125}, {"start": 2447.31, "end": 2447.47, "word": " نفس", "probability": 0.99560546875}, {"start": 2447.47, "end": 2447.77, "word": " الوقت", "probability": 0.9567057291666666}, {"start": 2447.77, "end": 2448.21, "word": " التشو", "probability": 0.695556640625}, {"start": 2448.21, "end": 2448.69, "word": " بلازميرج", "probability": 0.7022298177083334}, {"start": 2448.69, "end": 2449.21, "word": " اكتيفيتر", "probability": 0.691162109375}, {"start": 2449.21, "end": 2449.35, "word": " هو", "probability": 0.91259765625}, {"start": 2449.35, "end": 2449.53, "word": " بنى", "probability": 0.40771484375}, {"start": 2449.53, "end": 2450.19, "word": " اكتيفيتر", "probability": 0.8349202473958334}, {"start": 2450.19, "end": 2450.79, "word": " بنشط", "probability": 0.7596028645833334}, {"start": 2450.79, "end": 2451.41, "word": " بطلع", "probability": 0.696258544921875}, {"start": 2451.41, "end": 2451.57, "word": " من", "probability": 0.98779296875}, {"start": 2451.57, "end": 2451.97, "word": " التشو", "probability": 0.8673502604166666}, {"start": 2451.97, "end": 2452.43, "word": " الممجوة", "probability": 0.600146484375}, {"start": 2452.43, "end": 2452.57, "word": " من", "probability": 0.84423828125}, {"start": 2452.57, "end": 2452.69, "word": " ال", "probability": 0.85205078125}, {"start": 2452.69, "end": 2452.95, "word": " damage", "probability": 0.80224609375}, {"start": 2452.95, "end": 2453.39, "word": " تشو", "probability": 0.7969563802083334}, {"start": 2453.39, "end": 2454.07, "word": " وإيش", "probability": 0.80389404296875}, {"start": 2454.07, "end": 2454.57, "word": " بيعمل", "probability": 0.93359375}, {"start": 2454.57, "end": 2455.19, "word": " بروح", "probability": 0.66064453125}, {"start": 2455.19, "end": 2455.33, "word": " و", "probability": 0.5712890625}, {"start": 2455.33, "end": 2455.79, "word": " بنشط", "probability": 0.728271484375}, {"start": 2455.79, "end": 2456.31, "word": " البلازميرج", "probability": 0.8731282552083334}, {"start": 2456.31, "end": 2456.43, "word": " و", "probability": 0.78271484375}, {"start": 2456.43, "end": 2456.69, "word": " بيحول", "probability": 0.8236083984375}, {"start": 2456.69, "end": 2457.27, "word": " لبلازمير", "probability": 0.712158203125}, {"start": 2457.27, "end": 2457.39, "word": " اللى", "probability": 0.48748779296875}, {"start": 2457.39, "end": 2457.81, "word": " بلازمير", "probability": 0.9099609375}, {"start": 2457.81, "end": 2458.19, "word": " بيشتغل", "probability": 0.937255859375}, {"start": 2458.19, "end": 2458.31, "word": " على", "probability": 0.75341796875}, {"start": 2458.31, "end": 2458.43, "word": " ال", "probability": 0.69580078125}, {"start": 2458.43, "end": 2458.89, "word": " fibrin", "probability": 0.7147623697916666}, {"start": 2458.89, "end": 2459.33, "word": " و", "probability": 0.83642578125}, {"start": 2459.33, "end": 2459.89, "word": " بيحوله", "probability": 0.92451171875}, {"start": 2459.89, "end": 2460.51, "word": " أو", "probability": 0.52734375}, {"start": 2460.51, "end": 2460.67, "word": " ال", "probability": 0.9189453125}, {"start": 2460.67, "end": 2461.01, "word": " fibrin", "probability": 0.7974446614583334}, {"start": 2461.01, "end": 2461.07, "word": " ال", "probability": 0.1912841796875}, {"start": 2461.07, "end": 2461.39, "word": " monomer", "probability": 0.7626953125}, {"start": 2461.39, "end": 2461.49, "word": " و", "probability": 0.89599609375}, {"start": 2461.49, "end": 2461.87, "word": " بيحوله", "probability": 0.94384765625}, {"start": 2461.87, "end": 2462.07, "word": " إلى", "probability": 0.6611328125}, {"start": 2462.07, "end": 2462.57, "word": " أيش؟", "probability": 0.5415852864583334}, {"start": 2462.57, "end": 2462.69, "word": " ال", "probability": 0.43603515625}, {"start": 2462.69, "end": 2463.71, "word": " fibrin", "probability": 0.779052734375}, {"start": 2463.71, "end": 2464.07, "word": " degradation", "probability": 0.88720703125}, {"start": 2464.07, "end": 2466.25, "word": " ماشي", "probability": 0.8291015625}, {"start": 2466.25, "end": 2468.51, "word": " حدده", "probability": 0.74005126953125}, {"start": 2468.51, "end": 2468.91, "word": " شوية؟", "probability": 0.954345703125}], "temperature": 1.0}, {"id": 98, "seek": 249846, "start": 2469.67, "end": 2498.47, "text": "ما هو الـ Plasminogen؟ عندنا هنجل الميكانيزمة شغالة، تلاقوش ما هو الـ Plasminogen؟ هو عبارة عن protein أمينواسد يصنع في الـ liver نكلارويتر 94 ألف دالتر، ماشي، اللايكوبرتين موجود في البلازمة normally هو عبارة عن زيموجين يعني inactive، يعني inert، ماشي، موجود normally في البلازمة البلازمين", "tokens": [15042, 31439, 2423, 39184, 2149, 296, 2367, 8799, 22807, 6225, 41260, 8315, 8032, 1863, 7435, 1211, 9673, 1829, 41361, 1829, 11622, 46007, 13412, 17082, 6027, 3660, 12399, 6055, 15040, 4587, 2407, 8592, 19446, 31439, 2423, 39184, 2149, 296, 2367, 8799, 22807, 31439, 6225, 3555, 9640, 3660, 18871, 7944, 5551, 2304, 9957, 2407, 32277, 3215, 7251, 9381, 1863, 3615, 8978, 2423, 39184, 15019, 8717, 4117, 1211, 9640, 2407, 36081, 2288, 30849, 5551, 46538, 11778, 6027, 2655, 2288, 12399, 3714, 33599, 1829, 12399, 13672, 47302, 4117, 2407, 26890, 2655, 9957, 3714, 29245, 23328, 8978, 29739, 1211, 31377, 46007, 5646, 31439, 6225, 3555, 9640, 3660, 18871, 30767, 1829, 2304, 29245, 9957, 37495, 22653, 294, 12596, 12399, 37495, 22653, 25832, 12399, 3714, 33599, 1829, 12399, 3714, 29245, 23328, 5646, 8978, 29739, 1211, 31377, 46007, 29739, 1211, 31377, 2304, 9957], "avg_logprob": -0.3508731722831726, "compression_ratio": 1.8870967741935485, "no_speech_prob": 0.0, "words": [{"start": 2469.67, "end": 2469.89, "word": "ما", "probability": 0.8994140625}, {"start": 2469.89, "end": 2470.03, "word": " هو", "probability": 0.958984375}, {"start": 2470.03, "end": 2470.21, "word": " الـ", "probability": 0.793701171875}, {"start": 2470.21, "end": 2470.61, "word": " Plasminogen؟", "probability": 0.76357421875}, {"start": 2470.61, "end": 2470.81, "word": " عندنا", "probability": 0.4775187174479167}, {"start": 2470.81, "end": 2471.17, "word": " هنجل", "probability": 0.7252197265625}, {"start": 2471.17, "end": 2471.77, "word": " الميكانيزمة", "probability": 0.7204996744791666}, {"start": 2471.77, "end": 2472.23, "word": " شغالة،", "probability": 0.5885498046875}, {"start": 2472.23, "end": 2472.77, "word": " تلاقوش", "probability": 0.83955078125}, {"start": 2472.77, "end": 2473.53, "word": " ما", "probability": 0.68212890625}, {"start": 2473.53, "end": 2473.73, "word": " هو", "probability": 0.99609375}, {"start": 2473.73, "end": 2473.93, "word": " الـ", "probability": 0.88671875}, {"start": 2473.93, "end": 2475.51, "word": " Plasminogen؟", "probability": 0.9212890625}, {"start": 2475.51, "end": 2475.75, "word": " هو", "probability": 0.97021484375}, {"start": 2475.75, "end": 2475.91, "word": " عبارة", "probability": 0.82720947265625}, {"start": 2475.91, "end": 2476.01, "word": " عن", "probability": 0.99853515625}, {"start": 2476.01, "end": 2476.45, "word": " protein", "probability": 0.501953125}, {"start": 2476.45, "end": 2477.95, "word": " أمينواسد", "probability": 0.7780354817708334}, {"start": 2477.95, "end": 2479.07, "word": " يصنع", "probability": 0.919677734375}, {"start": 2479.07, "end": 2479.25, "word": " في", "probability": 0.97314453125}, {"start": 2479.25, "end": 2479.33, "word": " الـ", "probability": 0.4468994140625}, {"start": 2479.33, "end": 2479.59, "word": " liver", "probability": 0.8330078125}, {"start": 2479.59, "end": 2481.01, "word": " نكلارويتر", "probability": 0.3975917271205357}, {"start": 2481.01, "end": 2481.51, "word": " 94", "probability": 0.57861328125}, {"start": 2481.51, "end": 2481.77, "word": " ألف", "probability": 0.56573486328125}, {"start": 2481.77, "end": 2482.55, "word": " دالتر،", "probability": 0.739208984375}, {"start": 2482.55, "end": 2483.21, "word": " ماشي،", "probability": 0.68426513671875}, {"start": 2483.21, "end": 2484.17, "word": " اللايكوبرتين", "probability": 0.6631905691964286}, {"start": 2484.17, "end": 2486.25, "word": " موجود", "probability": 0.8605143229166666}, {"start": 2486.25, "end": 2486.41, "word": " في", "probability": 0.96826171875}, {"start": 2486.41, "end": 2487.05, "word": " البلازمة", "probability": 0.7476806640625}, {"start": 2487.05, "end": 2488.21, "word": " normally", "probability": 0.458740234375}, {"start": 2488.21, "end": 2489.29, "word": " هو", "probability": 0.9150390625}, {"start": 2489.29, "end": 2489.49, "word": " عبارة", "probability": 0.9947509765625}, {"start": 2489.49, "end": 2489.81, "word": " عن", "probability": 0.9970703125}, {"start": 2489.81, "end": 2491.23, "word": " زيموجين", "probability": 0.68740234375}, {"start": 2491.23, "end": 2492.21, "word": " يعني", "probability": 0.872314453125}, {"start": 2492.21, "end": 2493.37, "word": " inactive،", "probability": 0.7205403645833334}, {"start": 2493.37, "end": 2493.67, "word": " يعني", "probability": 0.980224609375}, {"start": 2493.67, "end": 2494.43, "word": " inert،", "probability": 0.897705078125}, {"start": 2494.43, "end": 2495.43, "word": " ماشي،", "probability": 0.8138427734375}, {"start": 2495.43, "end": 2495.79, "word": " موجود", "probability": 0.9847005208333334}, {"start": 2495.79, "end": 2496.29, "word": " normally", "probability": 0.9072265625}, {"start": 2496.29, "end": 2496.51, "word": " في", "probability": 0.98193359375}, {"start": 2496.51, "end": 2497.09, "word": " البلازمة", "probability": 0.9608154296875}, {"start": 2497.09, "end": 2498.47, "word": " البلازمين", "probability": 0.794482421875}], "temperature": 1.0}, {"id": 99, "seek": 252787, "start": 2499.59, "end": 2527.87, "text": "مش موجود، ماشي؟ لإنما الـ Plasminogen هو موجود لإن هو عبارة عن substrate، ماشي؟ طبعاً بيتحول زي ما اتفقنا إلى Trypsin-like Serine Protease اللي هو الـ Plasmin وهو اللي مشتغل على الغضب، ماشي، following injury يعني بعد المزر، شو اللي بيصير؟ it binds، مين اللي binds؟", "tokens": [2304, 8592, 3714, 29245, 23328, 12399, 3714, 33599, 1829, 22807, 5296, 28814, 1863, 15042, 2423, 39184, 2149, 296, 2367, 8799, 31439, 3714, 29245, 23328, 5296, 28814, 1863, 31439, 6225, 3555, 9640, 3660, 18871, 27585, 12399, 3714, 33599, 1829, 22807, 23032, 3555, 3615, 995, 14111, 4724, 36081, 5016, 12610, 30767, 1829, 19446, 1975, 2655, 5172, 4587, 8315, 30731, 6526, 1878, 259, 12, 4092, 4210, 533, 43371, 651, 13672, 1829, 31439, 2423, 39184, 2149, 296, 2367, 37037, 2407, 13672, 1829, 37893, 2655, 17082, 1211, 15844, 6024, 118, 11242, 3555, 12399, 3714, 33599, 1829, 12399, 3480, 10454, 37495, 22653, 39182, 9673, 11622, 2288, 12399, 13412, 2407, 13672, 1829, 4724, 1829, 9381, 13546, 22807, 309, 41515, 12399, 3714, 9957, 13672, 1829, 41515, 22807], "avg_logprob": -0.2591145771245162, "compression_ratio": 1.5992063492063493, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 2499.59, "end": 2499.89, "word": "مش", "probability": 0.6324462890625}, {"start": 2499.89, "end": 2501.91, "word": " موجود،", "probability": 0.7614593505859375}, {"start": 2501.91, "end": 2502.77, "word": " ماشي؟", "probability": 0.73468017578125}, {"start": 2502.77, "end": 2503.27, "word": " لإنما", "probability": 0.59429931640625}, {"start": 2503.27, "end": 2503.47, "word": " الـ", "probability": 0.767822265625}, {"start": 2503.47, "end": 2504.19, "word": " Plasminogen", "probability": 0.730712890625}, {"start": 2504.19, "end": 2505.33, "word": " هو", "probability": 0.78759765625}, {"start": 2505.33, "end": 2505.85, "word": " موجود", "probability": 0.9903971354166666}, {"start": 2505.85, "end": 2506.87, "word": " لإن", "probability": 0.7156575520833334}, {"start": 2506.87, "end": 2507.17, "word": " هو", "probability": 0.85107421875}, {"start": 2507.17, "end": 2507.45, "word": " عبارة", "probability": 0.9517822265625}, {"start": 2507.45, "end": 2507.63, "word": " عن", "probability": 0.99755859375}, {"start": 2507.63, "end": 2509.11, "word": " substrate،", "probability": 0.73779296875}, {"start": 2509.11, "end": 2510.79, "word": " ماشي؟", "probability": 0.8878173828125}, {"start": 2510.79, "end": 2511.55, "word": " طبعاً", "probability": 0.863037109375}, {"start": 2511.55, "end": 2512.05, "word": " بيتحول", "probability": 0.9669189453125}, {"start": 2512.05, "end": 2512.23, "word": " زي", "probability": 0.89990234375}, {"start": 2512.23, "end": 2512.31, "word": " ما", "probability": 0.98779296875}, {"start": 2512.31, "end": 2512.79, "word": " اتفقنا", "probability": 0.97626953125}, {"start": 2512.79, "end": 2512.99, "word": " إلى", "probability": 0.8466796875}, {"start": 2512.99, "end": 2513.43, "word": " Trypsin", "probability": 0.7425130208333334}, {"start": 2513.43, "end": 2513.61, "word": "-like", "probability": 0.756591796875}, {"start": 2513.61, "end": 2513.99, "word": " Serine", "probability": 0.690185546875}, {"start": 2513.99, "end": 2514.55, "word": " Protease", "probability": 0.865966796875}, {"start": 2514.55, "end": 2515.13, "word": " اللي", "probability": 0.87646484375}, {"start": 2515.13, "end": 2515.25, "word": " هو", "probability": 0.97265625}, {"start": 2515.25, "end": 2515.39, "word": " الـ", "probability": 0.5706787109375}, {"start": 2515.39, "end": 2515.89, "word": " Plasmin", "probability": 0.74951171875}, {"start": 2515.89, "end": 2516.55, "word": " وهو", "probability": 0.8408203125}, {"start": 2516.55, "end": 2516.67, "word": " اللي", "probability": 0.97607421875}, {"start": 2516.67, "end": 2517.59, "word": " مشتغل", "probability": 0.86865234375}, {"start": 2517.59, "end": 2518.01, "word": " على", "probability": 0.9365234375}, {"start": 2518.01, "end": 2519.41, "word": " الغضب،", "probability": 0.5950439453125}, {"start": 2519.41, "end": 2521.43, "word": " ماشي،", "probability": 0.735748291015625}, {"start": 2521.43, "end": 2522.41, "word": " following", "probability": 0.53271484375}, {"start": 2522.41, "end": 2522.99, "word": " injury", "probability": 0.91552734375}, {"start": 2522.99, "end": 2523.29, "word": " يعني", "probability": 0.865966796875}, {"start": 2523.29, "end": 2523.51, "word": " بعد", "probability": 0.97314453125}, {"start": 2523.51, "end": 2524.53, "word": " المزر،", "probability": 0.65594482421875}, {"start": 2524.53, "end": 2524.71, "word": " شو", "probability": 0.907470703125}, {"start": 2524.71, "end": 2524.81, "word": " اللي", "probability": 0.98486328125}, {"start": 2524.81, "end": 2525.71, "word": " بيصير؟", "probability": 0.95810546875}, {"start": 2525.71, "end": 2525.91, "word": " it", "probability": 0.63525390625}, {"start": 2525.91, "end": 2526.59, "word": " binds،", "probability": 0.741455078125}, {"start": 2526.59, "end": 2527.05, "word": " مين", "probability": 0.70751953125}, {"start": 2527.05, "end": 2527.17, "word": " اللي", "probability": 0.989501953125}, {"start": 2527.17, "end": 2527.87, "word": " binds؟", "probability": 0.894775390625}], "temperature": 1.0}, {"id": 100, "seek": 255046, "start": 2529.46, "end": 2550.46, "text": "البلايزمينوجين تتبع إلى الفيبرين خلال تطبيق التجارب ترتبط مع الفيبرين المونيكول أثناء تكوين الجلطة مع بلايزمينوجين اكتيفيتر هو من المنشط طبعه هو وميه؟ كلهم بيخشوا أين؟ في تكوين الجلطة", "tokens": [6027, 3555, 15040, 1829, 11622, 2304, 9957, 29245, 9957, 6055, 2655, 3555, 3615, 30731, 27188, 1829, 26890, 9957, 16490, 1211, 6027, 6055, 9566, 21292, 4587, 16712, 7435, 9640, 3555, 6055, 43500, 3555, 9566, 20449, 27188, 1829, 26890, 9957, 9673, 11536, 1829, 4117, 12610, 5551, 12984, 1863, 16606, 6055, 4117, 2407, 9957, 25724, 1211, 9566, 3660, 20449, 4724, 15040, 1829, 11622, 2304, 9957, 29245, 9957, 1975, 4117, 31371, 5172, 36081, 2288, 31439, 9154, 9673, 1863, 8592, 9566, 23032, 3555, 3615, 3224, 31439, 4032, 2304, 1829, 3224, 22807, 28242, 16095, 4724, 1829, 9778, 8592, 14407, 5551, 9957, 22807, 8978, 6055, 4117, 2407, 9957, 25724, 1211, 9566, 3660], "avg_logprob": -0.5229953071981106, "compression_ratio": 1.988235294117647, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2529.46, "end": 2530.34, "word": "البلايزمينوجين", "probability": 0.6604953342013888}, {"start": 2530.34, "end": 2531.16, "word": " تتبع", "probability": 0.2203826904296875}, {"start": 2531.16, "end": 2531.44, "word": " إلى", "probability": 0.52685546875}, {"start": 2531.44, "end": 2532.14, "word": " الفيبرين", "probability": 0.562042236328125}, {"start": 2532.14, "end": 2533.44, "word": " خلال", "probability": 0.694580078125}, {"start": 2533.44, "end": 2534.4, "word": " تطبيق", "probability": 0.50286865234375}, {"start": 2534.4, "end": 2534.42, "word": " التجارب", "probability": 0.3602752685546875}, {"start": 2534.42, "end": 2535.58, "word": " ترتبط", "probability": 0.6472930908203125}, {"start": 2535.58, "end": 2535.8, "word": " مع", "probability": 0.74365234375}, {"start": 2535.8, "end": 2537.76, "word": " الفيبرين", "probability": 0.8731689453125}, {"start": 2537.76, "end": 2540.96, "word": " المونيكول", "probability": 0.560302734375}, {"start": 2540.96, "end": 2541.54, "word": " أثناء", "probability": 0.819580078125}, {"start": 2541.54, "end": 2541.92, "word": " تكوين", "probability": 0.9693603515625}, {"start": 2541.92, "end": 2542.58, "word": " الجلطة", "probability": 0.891357421875}, {"start": 2542.58, "end": 2543.06, "word": " مع", "probability": 0.1253662109375}, {"start": 2543.06, "end": 2543.98, "word": " بلايزمينوجين", "probability": 0.8701171875}, {"start": 2543.98, "end": 2544.58, "word": " اكتيفيتر", "probability": 0.626068115234375}, {"start": 2544.58, "end": 2544.8, "word": " هو", "probability": 0.55126953125}, {"start": 2544.8, "end": 2545.0, "word": " من", "probability": 0.2132568359375}, {"start": 2545.0, "end": 2545.76, "word": " المنشط", "probability": 0.658447265625}, {"start": 2545.76, "end": 2546.16, "word": " طبعه", "probability": 0.798583984375}, {"start": 2546.16, "end": 2546.36, "word": " هو", "probability": 0.72802734375}, {"start": 2546.36, "end": 2547.48, "word": " وميه؟", "probability": 0.579248046875}, {"start": 2547.48, "end": 2548.12, "word": " كلهم", "probability": 0.714599609375}, {"start": 2548.12, "end": 2548.56, "word": " بيخشوا", "probability": 0.8259765625}, {"start": 2548.56, "end": 2549.2, "word": " أين؟", "probability": 0.7589518229166666}, {"start": 2549.2, "end": 2549.5, "word": " في", "probability": 0.88330078125}, {"start": 2549.5, "end": 2549.88, "word": " تكوين", "probability": 0.9857177734375}, {"start": 2549.88, "end": 2550.46, "word": " الجلطة", "probability": 0.9876708984375}], "temperature": 1.0}, {"id": 101, "seek": 257073, "start": 2552.16, "end": 2570.74, "text": "وبالتالي Plasminogen once needed يوم ما فضيع it converts Plasminogen to mean من الداخل، من داخل الجمطة عشان هيك سهل جدا تفجيرها يا شباب، ماشي؟ لإنه زي ما كتبتكوا تدمير ذاتي أحسن", "tokens": [37746, 6027, 2655, 6027, 1829, 2149, 296, 2367, 8799, 1564, 2978, 7251, 20498, 19446, 6156, 11242, 40228, 309, 38874, 2149, 296, 2367, 8799, 281, 914, 9154, 32748, 47283, 1211, 12399, 9154, 11778, 47283, 1211, 25724, 2304, 9566, 3660, 6225, 8592, 7649, 39896, 4117, 8608, 3224, 1211, 10874, 28259, 6055, 5172, 7435, 13546, 11296, 35186, 13412, 3555, 16758, 12399, 3714, 33599, 1829, 22807, 5296, 28814, 1863, 3224, 30767, 1829, 19446, 9122, 2655, 3555, 2655, 4117, 14407, 6055, 3215, 2304, 13546, 29910, 9307, 1829, 5551, 5016, 3794, 1863], "avg_logprob": -0.340158053513231, "compression_ratio": 1.4248704663212435, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 2552.16, "end": 2552.74, "word": "وبالتالي", "probability": 0.9001953125}, {"start": 2552.74, "end": 2553.42, "word": " Plasminogen", "probability": 0.73553466796875}, {"start": 2553.42, "end": 2554.64, "word": " once", "probability": 0.391357421875}, {"start": 2554.64, "end": 2555.16, "word": " needed", "probability": 0.791015625}, {"start": 2555.16, "end": 2555.8, "word": " يوم", "probability": 0.835693359375}, {"start": 2555.8, "end": 2555.88, "word": " ما", "probability": 0.72216796875}, {"start": 2555.88, "end": 2556.32, "word": " فضيع", "probability": 0.447509765625}, {"start": 2556.32, "end": 2556.88, "word": " it", "probability": 0.475830078125}, {"start": 2556.88, "end": 2557.36, "word": " converts", "probability": 0.6220703125}, {"start": 2557.36, "end": 2558.02, "word": " Plasminogen", "probability": 0.924072265625}, {"start": 2558.02, "end": 2558.24, "word": " to", "probability": 0.7041015625}, {"start": 2558.24, "end": 2558.56, "word": " mean", "probability": 0.314697265625}, {"start": 2558.56, "end": 2560.22, "word": " من", "probability": 0.75537109375}, {"start": 2560.22, "end": 2561.04, "word": " الداخل،", "probability": 0.80963134765625}, {"start": 2561.04, "end": 2561.18, "word": " من", "probability": 0.962890625}, {"start": 2561.18, "end": 2561.64, "word": " داخل", "probability": 0.8860677083333334}, {"start": 2561.64, "end": 2562.3, "word": " الجمطة", "probability": 0.6964111328125}, {"start": 2562.3, "end": 2562.74, "word": " عشان", "probability": 0.892578125}, {"start": 2562.74, "end": 2562.92, "word": " هيك", "probability": 0.735107421875}, {"start": 2562.92, "end": 2563.12, "word": " سهل", "probability": 0.98876953125}, {"start": 2563.12, "end": 2563.38, "word": " جدا", "probability": 0.954833984375}, {"start": 2563.38, "end": 2563.94, "word": " تفجيرها", "probability": 0.8966796875}, {"start": 2563.94, "end": 2564.06, "word": " يا", "probability": 0.2607421875}, {"start": 2564.06, "end": 2564.66, "word": " شباب،", "probability": 0.837646484375}, {"start": 2564.66, "end": 2565.4, "word": " ماشي؟", "probability": 0.77716064453125}, {"start": 2565.4, "end": 2565.86, "word": " لإنه", "probability": 0.71234130859375}, {"start": 2565.86, "end": 2566.16, "word": " زي", "probability": 0.86474609375}, {"start": 2566.16, "end": 2566.24, "word": " ما", "probability": 0.9677734375}, {"start": 2566.24, "end": 2566.74, "word": " كتبتكوا", "probability": 0.5997721354166666}, {"start": 2566.74, "end": 2569.0, "word": " تدمير", "probability": 0.56964111328125}, {"start": 2569.0, "end": 2570.24, "word": " ذاتي", "probability": 0.9054361979166666}, {"start": 2570.24, "end": 2570.74, "word": " أحسن", "probability": 0.90283203125}], "temperature": 1.0}, {"id": 102, "seek": 259929, "start": 2571.61, "end": 2599.29, "text": "التركيز والتركيز يتم تشكيله بالإنفلاميشن تشكيله بالإنفلاميشن تشكيله بالإنفلاميشن تشكيله بالإنفلاميشن تشكيله بالإنفلاميشن تشكيله بالإنفلاميشن تشكيله بالإنفلاميشن تشكيله بالإنفلاميشن تشكيله بالإنفلاميشن تشكيله بالإنفلاميشن تشكيله بالإنفلاميشن تشكيله بالإنفلاميشن تشكيله بالإنفلاميشن تشكيله بالإنفلاميشن تشكيله بالإنفلاميشن تشكيله بالإنفلاميشن تشك", "tokens": [6027, 2655, 31747, 1829, 11622, 16070, 2655, 31747, 1829, 11622, 7251, 39237, 6055, 8592, 4117, 26895, 3224, 20666, 28814, 1863, 5172, 1211, 10943, 1829, 8592, 1863, 6055, 8592, 4117, 26895, 3224, 20666, 28814, 1863, 5172, 37440, 1829, 8592, 1863, 6055, 8592, 4117, 26895, 3224, 20666, 28814, 1863, 5172, 37440, 1829, 8592, 1863, 6055, 8592, 4117, 26895, 3224, 20666, 28814, 1863, 5172, 37440, 1829, 8592, 1863, 6055, 8592, 4117, 26895, 3224, 20666, 28814, 1863, 5172, 37440, 1829, 8592, 1863, 6055, 8592, 4117, 26895, 3224, 20666, 28814, 1863, 5172, 37440, 1829, 8592, 1863, 6055, 8592, 4117, 26895, 3224, 20666, 28814, 1863, 5172, 37440, 1829, 8592, 1863, 6055, 8592, 4117, 26895, 3224, 20666, 28814, 1863, 5172, 37440, 1829, 8592, 1863, 6055, 8592, 4117, 26895, 3224, 20666, 28814, 1863, 5172, 37440, 1829, 8592, 1863, 6055, 8592, 4117, 26895, 3224, 20666, 28814, 1863, 5172, 37440, 1829, 8592, 1863, 6055, 8592, 4117, 26895, 3224, 20666, 28814, 1863, 5172, 37440, 1829, 8592, 1863, 6055, 8592, 4117, 26895, 3224, 20666, 28814, 1863, 5172, 37440, 1829, 8592, 1863, 6055, 8592, 4117, 26895, 3224, 20666, 28814, 1863, 5172, 37440, 1829, 8592, 1863, 6055, 8592, 4117, 26895, 3224, 20666, 28814, 1863, 5172, 37440, 1829, 8592, 1863, 6055, 8592, 4117, 26895, 3224, 20666, 28814, 1863, 5172, 37440, 1829, 8592, 1863, 6055, 8592, 4117, 26895, 3224, 20666, 28814, 1863, 5172, 37440, 1829, 8592, 1863, 6055, 8592, 4117], "avg_logprob": -0.159722228580051, "compression_ratio": 9.19718309859155, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 2571.61, "end": 2572.41, "word": "التركيز", "probability": 0.574072265625}, {"start": 2572.41, "end": 2573.15, "word": " والتركيز", "probability": 0.64810791015625}, {"start": 2573.15, "end": 2573.49, "word": " يتم", "probability": 0.362548828125}, {"start": 2573.49, "end": 2574.61, "word": " تشكيله", "probability": 0.5263427734375}, {"start": 2574.61, "end": 2575.33, "word": " بالإنفلاميشن", "probability": 0.6361490885416666}, {"start": 2575.33, "end": 2576.65, "word": " تشكيله", "probability": 0.44822998046875}, {"start": 2576.65, "end": 2576.91, "word": " بالإنفلاميشن", "probability": 0.93145751953125}, {"start": 2576.91, "end": 2577.19, "word": " تشكيله", "probability": 0.80546875}, {"start": 2577.19, "end": 2577.19, "word": " بالإنفلاميشن", "probability": 0.95330810546875}, {"start": 2577.19, "end": 2577.19, "word": " تشكيله", "probability": 0.81708984375}, {"start": 2577.19, "end": 2577.19, "word": " بالإنفلاميشن", "probability": 0.959716796875}, {"start": 2577.19, "end": 2577.19, "word": " تشكيله", "probability": 0.84560546875}, {"start": 2577.19, "end": 2577.99, "word": " بالإنفلاميشن", "probability": 0.95989990234375}, {"start": 2577.99, "end": 2578.71, "word": " تشكيله", "probability": 0.89443359375}, {"start": 2578.71, "end": 2579.33, "word": " بالإنفلاميشن", "probability": 0.95880126953125}, {"start": 2579.33, "end": 2579.67, "word": " تشكيله", "probability": 0.93779296875}, {"start": 2579.67, "end": 2580.99, "word": " بالإنفلاميشن", "probability": 0.96197509765625}, {"start": 2580.99, "end": 2581.09, "word": " تشكيله", "probability": 0.961328125}, {"start": 2581.09, "end": 2583.21, "word": " بالإنفلاميشن", "probability": 0.9644775390625}, {"start": 2583.21, "end": 2583.81, "word": " تشكيله", "probability": 0.9728515625}, {"start": 2583.81, "end": 2584.55, "word": " بالإنفلاميشن", "probability": 0.9676513671875}, {"start": 2584.55, "end": 2584.87, "word": " تشكيله", "probability": 0.9775390625}, {"start": 2584.87, "end": 2585.55, "word": " بالإنفلاميشن", "probability": 0.970703125}, {"start": 2585.55, "end": 2585.91, "word": " تشكيله", "probability": 0.981640625}, {"start": 2585.91, "end": 2586.63, "word": " بالإنفلاميشن", "probability": 0.9737548828125}, {"start": 2586.63, "end": 2586.81, "word": " تشكيله", "probability": 0.9841796875}, {"start": 2586.81, "end": 2587.83, "word": " بالإنفلاميشن", "probability": 0.975830078125}, {"start": 2587.83, "end": 2588.13, "word": " تشكيله", "probability": 0.985546875}, {"start": 2588.13, "end": 2589.63, "word": " بالإنفلاميشن", "probability": 0.9781494140625}, {"start": 2589.63, "end": 2589.99, "word": " تشكيله", "probability": 0.9875}, {"start": 2589.99, "end": 2591.83, "word": " بالإنفلاميشن", "probability": 0.9801025390625}, {"start": 2591.83, "end": 2592.37, "word": " تشكيله", "probability": 0.98857421875}, {"start": 2592.37, "end": 2594.43, "word": " بالإنفلاميشن", "probability": 0.98114013671875}, {"start": 2594.43, "end": 2595.67, "word": " تشكيله", "probability": 0.9890625}, {"start": 2595.67, "end": 2597.89, "word": " بالإنفلاميشن", "probability": 0.98248291015625}, {"start": 2597.89, "end": 2599.29, "word": " تشك", "probability": 0.9861653645833334}], "temperature": 1.0}, {"id": 103, "seek": 261515, "start": 2600.27, "end": 2615.15, "text": "مع تزيادة التركيز على البلازمينوجين بشكل كبير. ماذا يحصل يا شباب؟ جالوا once activated البلازمينوجين، كل اللي بيصير عشان يتحول للـ enzymatic form، انه بيفرد، بيصيرله folding.", "tokens": [2304, 3615, 6055, 11622, 1829, 18513, 3660, 16712, 31747, 1829, 11622, 15844, 29739, 1211, 31377, 2304, 9957, 2407, 7435, 9957, 4724, 8592, 28820, 9122, 3555, 13546, 13, 3714, 45636, 995, 7251, 5016, 36520, 35186, 13412, 3555, 16758, 22807, 10874, 6027, 14407, 1564, 18157, 29739, 1211, 31377, 2304, 9957, 2407, 7435, 9957, 12399, 28242, 13672, 1829, 4724, 1829, 9381, 13546, 6225, 8592, 7649, 7251, 2655, 5016, 12610, 24976, 39184, 16272, 25915, 1254, 12399, 16472, 3224, 4724, 33911, 2288, 3215, 12399, 4724, 1829, 9381, 13546, 43761, 25335, 13], "avg_logprob": -0.5657327558802462, "compression_ratio": 1.5026178010471205, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 2600.27, "end": 2600.67, "word": "مع", "probability": 0.48590087890625}, {"start": 2600.67, "end": 2601.51, "word": " تزيادة", "probability": 0.41947021484375}, {"start": 2601.51, "end": 2602.11, "word": " التركيز", "probability": 0.676116943359375}, {"start": 2602.11, "end": 2602.23, "word": " على", "probability": 0.1463623046875}, {"start": 2602.23, "end": 2602.77, "word": " البلازمينوجين", "probability": 0.5853271484375}, {"start": 2602.77, "end": 2603.05, "word": " بشكل", "probability": 0.3479817708333333}, {"start": 2603.05, "end": 2603.05, "word": " كبير.", "probability": 0.5504557291666666}, {"start": 2603.05, "end": 2603.25, "word": " ماذا", "probability": 0.60784912109375}, {"start": 2603.25, "end": 2603.63, "word": " يحصل", "probability": 0.6456705729166666}, {"start": 2603.63, "end": 2603.75, "word": " يا", "probability": 0.3818359375}, {"start": 2603.75, "end": 2604.91, "word": " شباب؟", "probability": 0.8902587890625}, {"start": 2604.91, "end": 2605.33, "word": " جالوا", "probability": 0.41455078125}, {"start": 2605.33, "end": 2605.97, "word": " once", "probability": 0.193359375}, {"start": 2605.97, "end": 2606.75, "word": " activated", "probability": 0.6484375}, {"start": 2606.75, "end": 2608.27, "word": " البلازمينوجين،", "probability": 0.7948676215277778}, {"start": 2608.27, "end": 2608.51, "word": " كل", "probability": 0.865234375}, {"start": 2608.51, "end": 2608.67, "word": " اللي", "probability": 0.9404296875}, {"start": 2608.67, "end": 2609.23, "word": " بيصير", "probability": 0.8648681640625}, {"start": 2609.23, "end": 2609.91, "word": " عشان", "probability": 0.8787434895833334}, {"start": 2609.91, "end": 2610.49, "word": " يتحول", "probability": 0.9903564453125}, {"start": 2610.49, "end": 2610.95, "word": " للـ", "probability": 0.331298828125}, {"start": 2610.95, "end": 2611.99, "word": " enzymatic", "probability": 0.7103271484375}, {"start": 2611.99, "end": 2613.25, "word": " form،", "probability": 0.667236328125}, {"start": 2613.25, "end": 2613.55, "word": " انه", "probability": 0.760986328125}, {"start": 2613.55, "end": 2614.27, "word": " بيفرد،", "probability": 0.826953125}, {"start": 2614.27, "end": 2614.69, "word": " بيصيرله", "probability": 0.85693359375}, {"start": 2614.69, "end": 2615.15, "word": " folding.", "probability": 0.90380859375}], "temperature": 1.0}, {"id": 104, "seek": 264509, "start": 2616.37, "end": 2645.09, "text": "يعني على خلاف الـ Coagulation Factors المتعرف عليه يا شباب أن الـ Coagulation Factors يصبحوا كليفجي صح؟ وين عند ال active site؟ فبتبين ال active site هذا بيكون كامش و بفرد بيكون كامش و بفتح يوم يصبح له opening بتبين ال active site ولا بتبينش؟ بتبين ال active site وبالتالي بيشتغل وبتحول إلى potent enzymatic domain", "tokens": [40228, 22653, 15844, 16490, 15040, 5172, 2423, 39184, 3066, 559, 2776, 33375, 830, 9673, 2655, 3615, 28480, 47356, 35186, 13412, 3555, 16758, 14739, 2423, 39184, 3066, 559, 2776, 33375, 830, 7251, 9381, 49628, 14407, 9122, 20292, 5172, 7435, 1829, 20328, 5016, 22807, 4032, 9957, 43242, 2423, 4967, 3621, 22807, 6156, 3555, 2655, 3555, 9957, 2423, 4967, 3621, 23758, 4724, 1829, 30544, 9122, 10943, 8592, 4032, 4724, 5172, 2288, 3215, 4724, 1829, 30544, 9122, 10943, 8592, 4032, 4724, 5172, 2655, 5016, 7251, 20498, 7251, 9381, 49628, 46740, 5193, 39894, 3555, 9957, 2423, 4967, 3621, 49429, 39894, 3555, 9957, 8592, 22807, 39894, 3555, 9957, 2423, 4967, 3621, 46599, 6027, 2655, 6027, 1829, 4724, 1829, 8592, 2655, 17082, 1211, 46599, 2655, 5016, 12610, 30731, 27073, 16272, 25915, 9274], "avg_logprob": -0.2104414715653374, "compression_ratio": 1.9065040650406504, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2616.37, "end": 2616.77, "word": "يعني", "probability": 0.9130859375}, {"start": 2616.77, "end": 2617.05, "word": " على", "probability": 0.828125}, {"start": 2617.05, "end": 2617.91, "word": " خلاف", "probability": 0.9402669270833334}, {"start": 2617.91, "end": 2618.69, "word": " الـ", "probability": 0.72607421875}, {"start": 2618.69, "end": 2619.45, "word": " Coagulation", "probability": 0.8048502604166666}, {"start": 2619.45, "end": 2620.15, "word": " Factors", "probability": 0.793212890625}, {"start": 2620.15, "end": 2621.05, "word": " المتعرف", "probability": 0.74993896484375}, {"start": 2621.05, "end": 2621.35, "word": " عليه", "probability": 0.451416015625}, {"start": 2621.35, "end": 2621.53, "word": " يا", "probability": 0.96044921875}, {"start": 2621.53, "end": 2621.99, "word": " شباب", "probability": 0.9913736979166666}, {"start": 2621.99, "end": 2622.63, "word": " أن", "probability": 0.443359375}, {"start": 2622.63, "end": 2622.81, "word": " الـ", "probability": 0.812744140625}, {"start": 2622.81, "end": 2623.31, "word": " Coagulation", "probability": 0.9715169270833334}, {"start": 2623.31, "end": 2623.69, "word": " Factors", "probability": 0.87744140625}, {"start": 2623.69, "end": 2623.99, "word": " يصبحوا", "probability": 0.65924072265625}, {"start": 2623.99, "end": 2624.55, "word": " كليفجي", "probability": 0.70587158203125}, {"start": 2624.55, "end": 2625.69, "word": " صح؟", "probability": 0.7425130208333334}, {"start": 2625.69, "end": 2626.79, "word": " وين", "probability": 0.691650390625}, {"start": 2626.79, "end": 2627.01, "word": " عند", "probability": 0.87109375}, {"start": 2627.01, "end": 2627.15, "word": " ال", "probability": 0.98681640625}, {"start": 2627.15, "end": 2627.39, "word": " active", "probability": 0.4345703125}, {"start": 2627.39, "end": 2627.83, "word": " site؟", "probability": 0.614501953125}, {"start": 2627.83, "end": 2628.31, "word": " فبتبين", "probability": 0.9552734375}, {"start": 2628.31, "end": 2628.45, "word": " ال", "probability": 0.94775390625}, {"start": 2628.45, "end": 2628.67, "word": " active", "probability": 0.93505859375}, {"start": 2628.67, "end": 2628.97, "word": " site", "probability": 0.89501953125}, {"start": 2628.97, "end": 2629.85, "word": " هذا", "probability": 0.734375}, {"start": 2629.85, "end": 2630.91, "word": " بيكون", "probability": 0.9173177083333334}, {"start": 2630.91, "end": 2631.55, "word": " كامش", "probability": 0.9645182291666666}, {"start": 2631.55, "end": 2632.19, "word": " و", "probability": 0.7666015625}, {"start": 2632.19, "end": 2632.73, "word": " بفرد", "probability": 0.7872314453125}, {"start": 2632.73, "end": 2633.79, "word": " بيكون", "probability": 0.794921875}, {"start": 2633.79, "end": 2634.31, "word": " كامش", "probability": 0.9972330729166666}, {"start": 2634.31, "end": 2634.45, "word": " و", "probability": 0.94580078125}, {"start": 2634.45, "end": 2634.99, "word": " بفتح", "probability": 0.9805908203125}, {"start": 2634.99, "end": 2636.05, "word": " يوم", "probability": 0.955322265625}, {"start": 2636.05, "end": 2636.37, "word": " يصبح", "probability": 0.8489583333333334}, {"start": 2636.37, "end": 2636.53, "word": " له", "probability": 0.22021484375}, {"start": 2636.53, "end": 2637.07, "word": " opening", "probability": 0.849609375}, {"start": 2637.07, "end": 2638.31, "word": " بتبين", "probability": 0.84521484375}, {"start": 2638.31, "end": 2638.45, "word": " ال", "probability": 0.98388671875}, {"start": 2638.45, "end": 2638.75, "word": " active", "probability": 0.96875}, {"start": 2638.75, "end": 2639.03, "word": " site", "probability": 0.88037109375}, {"start": 2639.03, "end": 2639.19, "word": " ولا", "probability": 0.76611328125}, {"start": 2639.19, "end": 2640.25, "word": " بتبينش؟", "probability": 0.81748046875}, {"start": 2640.25, "end": 2640.47, "word": " بتبين", "probability": 0.9186197916666666}, {"start": 2640.47, "end": 2640.59, "word": " ال", "probability": 0.99072265625}, {"start": 2640.59, "end": 2640.85, "word": " active", "probability": 0.9775390625}, {"start": 2640.85, "end": 2641.17, "word": " site", "probability": 0.8798828125}, {"start": 2641.17, "end": 2641.61, "word": " وبالتالي", "probability": 0.8923828125}, {"start": 2641.61, "end": 2642.21, "word": " بيشتغل", "probability": 0.8765462239583334}, {"start": 2642.21, "end": 2643.43, "word": " وبتحول", "probability": 0.8013916015625}, {"start": 2643.43, "end": 2643.63, "word": " إلى", "probability": 0.90625}, {"start": 2643.63, "end": 2643.89, "word": " potent", "probability": 0.78271484375}, {"start": 2643.89, "end": 2644.71, "word": " enzymatic", "probability": 0.949462890625}, {"start": 2644.71, "end": 2645.09, "word": " domain", "probability": 0.970703125}], "temperature": 1.0}, {"id": 105, "seek": 266448, "start": 2647.42, "end": 2664.48, "text": "حد عنده سؤال؟ ماشي؟ حد عنده سؤال اشي بقى، مرة جاية ان شاء الله بنبدأ في ال mechanism و بالذات و بنبدأ في واحدة واحدة بنشوف ال activator، بنشوف ال plasma energy، كيف هي اتحول ال plasma", "tokens": [5016, 3215, 43242, 3224, 8608, 33604, 6027, 22807, 3714, 33599, 1829, 22807, 11331, 3215, 43242, 3224, 8608, 33604, 6027, 1975, 8592, 1829, 4724, 4587, 7578, 12399, 3714, 25720, 10874, 995, 10632, 16472, 13412, 16606, 21984, 44945, 44510, 10721, 8978, 2423, 7513, 4032, 20666, 8848, 9307, 4032, 44945, 44510, 10721, 8978, 36764, 24401, 3660, 36764, 24401, 3660, 44945, 8592, 38688, 2423, 2430, 1639, 12399, 44945, 8592, 38688, 2423, 22564, 2281, 12399, 9122, 33911, 39896, 1975, 2655, 5016, 12610, 2423, 22564], "avg_logprob": -0.23339843545109035, "compression_ratio": 1.68, "no_speech_prob": 1.0728836059570312e-06, "words": [{"start": 2647.42, "end": 2647.68, "word": "حد", "probability": 0.586181640625}, {"start": 2647.68, "end": 2647.9, "word": " عنده", "probability": 0.987060546875}, {"start": 2647.9, "end": 2649.36, "word": " سؤال؟", "probability": 0.9124755859375}, {"start": 2649.36, "end": 2651.12, "word": " ماشي؟", "probability": 0.8687744140625}, {"start": 2651.12, "end": 2652.6, "word": " حد", "probability": 0.924560546875}, {"start": 2652.6, "end": 2652.78, "word": " عنده", "probability": 0.977783203125}, {"start": 2652.78, "end": 2653.0, "word": " سؤال", "probability": 0.9957682291666666}, {"start": 2653.0, "end": 2653.2, "word": " اشي", "probability": 0.55615234375}, {"start": 2653.2, "end": 2653.46, "word": " بقى،", "probability": 0.84136962890625}, {"start": 2653.46, "end": 2653.74, "word": " مرة", "probability": 0.93798828125}, {"start": 2653.74, "end": 2654.0, "word": " جاية", "probability": 0.6966145833333334}, {"start": 2654.0, "end": 2654.1, "word": " ان", "probability": 0.7626953125}, {"start": 2654.1, "end": 2654.3, "word": " شاء", "probability": 0.9873046875}, {"start": 2654.3, "end": 2654.56, "word": " الله", "probability": 0.951171875}, {"start": 2654.56, "end": 2655.56, "word": " بنبدأ", "probability": 0.8780924479166666}, {"start": 2655.56, "end": 2655.68, "word": " في", "probability": 0.890625}, {"start": 2655.68, "end": 2655.82, "word": " ال", "probability": 0.93798828125}, {"start": 2655.82, "end": 2656.32, "word": " mechanism", "probability": 0.82177734375}, {"start": 2656.32, "end": 2657.36, "word": " و", "probability": 0.5546875}, {"start": 2657.36, "end": 2657.86, "word": " بالذات", "probability": 0.5387369791666666}, {"start": 2657.86, "end": 2657.98, "word": " و", "probability": 0.52978515625}, {"start": 2657.98, "end": 2658.9, "word": " بنبدأ", "probability": 0.8553059895833334}, {"start": 2658.9, "end": 2659.02, "word": " في", "probability": 0.68798828125}, {"start": 2659.02, "end": 2659.36, "word": " واحدة", "probability": 0.9576822916666666}, {"start": 2659.36, "end": 2659.66, "word": " واحدة", "probability": 0.9793294270833334}, {"start": 2659.66, "end": 2660.0, "word": " بنشوف", "probability": 0.8175455729166666}, {"start": 2660.0, "end": 2660.1, "word": " ال", "probability": 0.98046875}, {"start": 2660.1, "end": 2661.56, "word": " activator،", "probability": 0.8455403645833334}, {"start": 2661.56, "end": 2661.76, "word": " بنشوف", "probability": 0.8343098958333334}, {"start": 2661.76, "end": 2661.88, "word": " ال", "probability": 0.95068359375}, {"start": 2661.88, "end": 2662.14, "word": " plasma", "probability": 0.27490234375}, {"start": 2662.14, "end": 2663.3, "word": " energy،", "probability": 0.849609375}, {"start": 2663.3, "end": 2663.5, "word": " كيف", "probability": 0.984619140625}, {"start": 2663.5, "end": 2663.62, "word": " هي", "probability": 0.86083984375}, {"start": 2663.62, "end": 2663.98, "word": " اتحول", "probability": 0.80474853515625}, {"start": 2663.98, "end": 2664.08, "word": " ال", "probability": 0.61865234375}, {"start": 2664.08, "end": 2664.48, "word": " plasma", "probability": 0.99072265625}], "temperature": 1.0}, {"id": 106, "seek": 268536, "start": 2665.08, "end": 2685.36, "text": "والـ Plasmin بنتعرف على ال inhibitors تبعهم، ثم بنتعرف على عملية التقطيع لل-fibrin كيف يقطع الـ Plasmin ليهش ال-fibrin وبيحولوا إلى FDBs أو Fibrin Degradation Product متفق عليه؟ حد عنده سؤال؟", "tokens": [2407, 6027, 39184, 2149, 296, 2367, 44945, 2655, 3615, 28480, 15844, 2423, 20406, 9862, 6055, 3555, 3615, 16095, 12399, 38637, 2304, 44945, 2655, 3615, 28480, 15844, 6225, 42213, 10632, 16712, 47432, 40228, 24976, 12, 69, 6414, 259, 9122, 33911, 7251, 47432, 3615, 2423, 39184, 2149, 296, 2367, 32239, 3224, 8592, 2423, 12, 69, 6414, 259, 4032, 21292, 5016, 12610, 14407, 30731, 479, 27735, 82, 34051, 479, 6414, 259, 1346, 7165, 399, 22005, 44650, 5172, 4587, 47356, 22807, 11331, 3215, 43242, 3224, 8608, 33604, 6027, 22807], "avg_logprob": -0.28488373340562334, "compression_ratio": 1.4444444444444444, "no_speech_prob": 8.52346420288086e-06, "words": [{"start": 2665.08, "end": 2665.44, "word": "والـ", "probability": 0.626953125}, {"start": 2665.44, "end": 2665.96, "word": " Plasmin", "probability": 0.576904296875}, {"start": 2665.96, "end": 2667.04, "word": " بنتعرف", "probability": 0.801025390625}, {"start": 2667.04, "end": 2667.26, "word": " على", "probability": 0.73974609375}, {"start": 2667.26, "end": 2667.38, "word": " ال", "probability": 0.87548828125}, {"start": 2667.38, "end": 2667.98, "word": " inhibitors", "probability": 0.837646484375}, {"start": 2667.98, "end": 2669.12, "word": " تبعهم،", "probability": 0.8013671875}, {"start": 2669.12, "end": 2669.34, "word": " ثم", "probability": 0.978271484375}, {"start": 2669.34, "end": 2669.92, "word": " بنتعرف", "probability": 0.950439453125}, {"start": 2669.92, "end": 2670.16, "word": " على", "probability": 0.91162109375}, {"start": 2670.16, "end": 2670.88, "word": " عملية", "probability": 0.9890950520833334}, {"start": 2670.88, "end": 2671.9, "word": " التقطيع", "probability": 0.8357747395833334}, {"start": 2671.9, "end": 2673.0, "word": " لل", "probability": 0.94921875}, {"start": 2673.0, "end": 2673.72, "word": "-fibrin", "probability": 0.7164306640625}, {"start": 2673.72, "end": 2674.42, "word": " كيف", "probability": 0.781982421875}, {"start": 2674.42, "end": 2675.12, "word": " يقطع", "probability": 0.94921875}, {"start": 2675.12, "end": 2675.66, "word": " الـ", "probability": 0.6611328125}, {"start": 2675.66, "end": 2676.16, "word": " Plasmin", "probability": 0.8123372395833334}, {"start": 2676.16, "end": 2676.62, "word": " ليهش", "probability": 0.4982096354166667}, {"start": 2676.62, "end": 2676.74, "word": " ال", "probability": 0.93408203125}, {"start": 2676.74, "end": 2677.14, "word": "-fibrin", "probability": 0.79339599609375}, {"start": 2677.14, "end": 2677.76, "word": " وبيحولوا", "probability": 0.81298828125}, {"start": 2677.76, "end": 2678.02, "word": " إلى", "probability": 0.63134765625}, {"start": 2678.02, "end": 2679.22, "word": " FDBs", "probability": 0.6636555989583334}, {"start": 2679.22, "end": 2679.48, "word": " أو", "probability": 0.86669921875}, {"start": 2679.48, "end": 2679.98, "word": " Fibrin", "probability": 0.7732747395833334}, {"start": 2679.98, "end": 2680.72, "word": " Degradation", "probability": 0.87060546875}, {"start": 2680.72, "end": 2681.22, "word": " Product", "probability": 0.12451171875}, {"start": 2681.22, "end": 2682.76, "word": " متفق", "probability": 0.81494140625}, {"start": 2682.76, "end": 2683.48, "word": " عليه؟", "probability": 0.845458984375}, {"start": 2683.48, "end": 2684.32, "word": " حد", "probability": 0.9443359375}, {"start": 2684.32, "end": 2684.56, "word": " عنده", "probability": 0.744873046875}, {"start": 2684.56, "end": 2685.36, "word": " سؤال؟", "probability": 0.920166015625}], "temperature": 1.0}], "language": "ar", "language_probability": 1.0, "duration": 2686.8275, "duration_after_vad": 2554.978124999988} \ No newline at end of file diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/-VA6U2qwaG0.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/-VA6U2qwaG0.srt new file mode 100644 index 0000000000000000000000000000000000000000..c02bf3b8627534a81e701b72f947162a7417707b --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/-VA6U2qwaG0.srt @@ -0,0 +1,2431 @@ + +1 +00:00:09,320 --> 00:00:15,760 +Last time we discussed hypothesis test for + +2 +00:00:15,760 --> 00:00:19,440 +two population proportions. And we mentioned that + +3 +00:00:19,440 --> 00:00:25,750 +the assumptions are for the first sample. n times + +4 +00:00:25,750 --> 00:00:28,910 +pi should be at least 5, and also n times 1 minus + +5 +00:00:28,910 --> 00:00:33,050 +pi is also at least 5. The same for the second + +6 +00:00:33,050 --> 00:00:37,570 +sample, n 2 times pi 2 is at least 5, as well as n + +7 +00:00:37,570 --> 00:00:42,860 +times 1 minus pi 2 is also at least 5. Also, we + +8 +00:00:42,860 --> 00:00:46,000 +discussed that the point estimate for the + +9 +00:00:46,000 --> 00:00:51,700 +difference of Pi 1 minus Pi 2 is given by P1 minus + +10 +00:00:51,700 --> 00:00:57,160 +P2. That means this difference is unbiased point + +11 +00:00:57,160 --> 00:01:03,160 +estimate of Pi 1 minus Pi 2. Similarly, P2 minus + +12 +00:01:03,160 --> 00:01:06,700 +P1 is the point estimate of the difference Pi 2 + +13 +00:01:06,700 --> 00:01:08,160 +minus Pi 1. + +14 +00:01:11,260 --> 00:01:16,140 +We also discussed that the bold estimate for the + +15 +00:01:16,140 --> 00:01:20,900 +overall proportion is given by this equation. So B + +16 +00:01:20,900 --> 00:01:25,980 +dash is called the bold estimate for the overall + +17 +00:01:25,980 --> 00:01:31,740 +proportion. X1 and X2 are the number of items of + +18 +00:01:31,740 --> 00:01:35,170 +interest. And the two samples that you have in one + +19 +00:01:35,170 --> 00:01:39,150 +and two, where in one and two are the sample sizes + +20 +00:01:39,150 --> 00:01:42,110 +for the first and the second sample respectively. + +21 +00:01:43,470 --> 00:01:46,830 +The appropriate statistic in this course is given + +22 +00:01:46,830 --> 00:01:52,160 +by this equation. Z-score or Z-statistic is the + +23 +00:01:52,160 --> 00:01:56,340 +point estimate of the difference pi 1 minus pi 2 + +24 +00:01:56,340 --> 00:02:00,620 +minus the hypothesized value under if 0, I mean if + +25 +00:02:00,620 --> 00:02:05,200 +0 is true, most of the time this term equals 0, + +26 +00:02:05,320 --> 00:02:10,480 +divided by this quantity is called the standard + +27 +00:02:10,480 --> 00:02:14,100 +error of the estimate, which is square root of B + +28 +00:02:14,100 --> 00:02:17,660 +dash 1 minus B dash times 1 over N1 plus 1 over + +29 +00:02:17,660 --> 00:02:22,160 +N2. So this is your Z statistic. The critical + +30 +00:02:22,160 --> 00:02:27,980 +regions. I'm sorry, first, the appropriate null + +31 +00:02:27,980 --> 00:02:32,200 +and alternative hypothesis are given by three + +32 +00:02:32,200 --> 00:02:38,280 +cases we have. Either two-tailed test or one + +33 +00:02:38,280 --> 00:02:42,540 +-tailed and it has either upper or lower tail. So + +34 +00:02:42,540 --> 00:02:46,140 +for example, for lower-tailed test, We are going + +35 +00:02:46,140 --> 00:02:51,500 +to test to see if a proportion 1 is smaller than a + +36 +00:02:51,500 --> 00:02:54,560 +proportion 2. This one can be written as pi 1 + +37 +00:02:54,560 --> 00:02:59,080 +smaller than pi 2 under H1, or the difference + +38 +00:02:59,080 --> 00:03:01,160 +between these two population proportions is + +39 +00:03:01,160 --> 00:03:04,940 +negative, is smaller than 0. So either you may + +40 +00:03:04,940 --> 00:03:08,660 +write the alternative as pi 1 smaller than pi 2, + +41 +00:03:09,180 --> 00:03:11,860 +or the difference, which is pi 1 minus pi 2 + +42 +00:03:11,860 --> 00:03:15,730 +smaller than 0. For sure, the null hypothesis is + +43 +00:03:15,730 --> 00:03:18,830 +the opposite of the alternative hypothesis. So if + +44 +00:03:18,830 --> 00:03:22,310 +this is one by one smaller than by two, so the + +45 +00:03:22,310 --> 00:03:24,710 +opposite by one is greater than or equal to two. + +46 +00:03:25,090 --> 00:03:27,670 +Similarly, but the opposite side here, we are + +47 +00:03:27,670 --> 00:03:31,530 +talking about the upper tail of probability. So + +48 +00:03:31,530 --> 00:03:33,910 +under the alternative hypothesis, by one is + +49 +00:03:33,910 --> 00:03:37,870 +greater than by two. Or it could be written as by + +50 +00:03:37,870 --> 00:03:40,150 +one minus by two is positive, that means greater + +51 +00:03:40,150 --> 00:03:45,970 +than zero. While for the two-tailed test, for the + +52 +00:03:45,970 --> 00:03:49,310 +alternative hypothesis, we have Y1 does not equal + +53 +00:03:49,310 --> 00:03:51,870 +Y2. In this case, we are saying there is no + +54 +00:03:51,870 --> 00:03:55,950 +difference under H0, and there is a difference. + +55 +00:03:56,920 --> 00:03:59,680 +should be under each one. Difference means either + +56 +00:03:59,680 --> 00:04:03,220 +greater than or smaller than. So we have this not + +57 +00:04:03,220 --> 00:04:06,800 +equal sign. So by one does not equal by two. Or it + +58 +00:04:06,800 --> 00:04:08,980 +could be written as by one minus by two is not + +59 +00:04:08,980 --> 00:04:12,320 +equal to zero. It's the same as the one we have + +60 +00:04:12,320 --> 00:04:15,100 +discussed when we are talking about comparison of + +61 +00:04:15,100 --> 00:04:19,500 +two population means. We just replaced these by's + +62 +00:04:19,500 --> 00:04:24,960 +by mus. Finally, the rejection regions are given + +63 +00:04:24,960 --> 00:04:30,000 +by three different charts here for the lower tail + +64 +00:04:30,000 --> 00:04:35,500 +test. We reject the null hypothesis if the value + +65 +00:04:35,500 --> 00:04:37,500 +of the test statistic fall in the rejection + +66 +00:04:37,500 --> 00:04:40,940 +region, which is in the left side. So that means + +67 +00:04:40,940 --> 00:04:44,040 +we reject zero if this statistic is smaller than + +68 +00:04:44,040 --> 00:04:49,440 +negative zero. That's for lower tail test. On the + +69 +00:04:49,440 --> 00:04:51,620 +other hand, for other tailed tests, your rejection + +70 +00:04:51,620 --> 00:04:54,800 +region is the right side, so you reject the null + +71 +00:04:54,800 --> 00:04:57,160 +hypothesis if this statistic is greater than Z + +72 +00:04:57,160 --> 00:05:01,700 +alpha. In addition, for two-tailed tests, there + +73 +00:05:01,700 --> 00:05:04,300 +are two rejection regions. One is on the right + +74 +00:05:04,300 --> 00:05:07,000 +side, the other on the left side. Here, alpha is + +75 +00:05:07,000 --> 00:05:10,960 +split into two halves, alpha over two to the + +76 +00:05:10,960 --> 00:05:14,060 +right, similarly alpha over two to the left side. + +77 +00:05:14,640 --> 00:05:16,900 +Here, we reject the null hypothesis if your Z + +78 +00:05:16,900 --> 00:05:20,900 +statistic falls in the rejection region here, that + +79 +00:05:20,900 --> 00:05:24,820 +means z is smaller than negative z alpha over 2 or + +80 +00:05:24,820 --> 00:05:30,360 +z is greater than z alpha over 2. Now this one, I + +81 +00:05:30,360 --> 00:05:33,980 +mean the rejection regions are the same for either + +82 +00:05:33,980 --> 00:05:38,540 +one sample t-test or two sample t-test, either for + +83 +00:05:38,540 --> 00:05:41,560 +the population proportion or the population mean. + +84 +00:05:42,180 --> 00:05:46,120 +We have the same rejection regions. Sometimes we + +85 +00:05:46,120 --> 00:05:49,800 +replace z by t. It depends if we are talking about + +86 +00:05:49,800 --> 00:05:54,760 +small samples and sigmas unknown. So that's the + +87 +00:05:54,760 --> 00:05:58,160 +basic concepts about testing or hypothesis testing + +88 +00:05:58,160 --> 00:06:01,200 +for the comparison between two population + +89 +00:06:01,200 --> 00:06:05,140 +proportions. And we stopped at this point. I will + +90 +00:06:05,140 --> 00:06:08,780 +give three examples, three examples for testing + +91 +00:06:08,780 --> 00:06:11,660 +about two population proportions. The first one is + +92 +00:06:11,660 --> 00:06:17,050 +given here. It says that, is there a significant + +93 +00:06:17,050 --> 00:06:20,490 +difference between the proportion of men and the + +94 +00:06:20,490 --> 00:06:24,170 +proportion of women who will vote yes on a + +95 +00:06:24,170 --> 00:06:24,630 +proposition? + +96 +00:06:28,220 --> 00:06:30,480 +In this case, we are talking about a proportion. + +97 +00:06:30,840 --> 00:06:34,520 +So this problem tests for a proportion. We have + +98 +00:06:34,520 --> 00:06:38,980 +two proportions here because we have two samples + +99 +00:06:38,980 --> 00:06:43,800 +for two population spheres, men and women. So + +100 +00:06:43,800 --> 00:06:46,600 +there are two populations. So we are talking about + +101 +00:06:46,600 --> 00:06:50,620 +two population proportions. Now, we have to state + +102 +00:06:50,620 --> 00:06:53,440 +carefully now an alternative hypothesis. So for + +103 +00:06:53,440 --> 00:06:57,640 +example, let's say that phi 1 is the population + +104 +00:06:57,640 --> 00:07:07,140 +proportion, proportion of men who will vote for a + +105 +00:07:07,140 --> 00:07:11,740 +proposition A for example, for vote yes, for vote + +106 +00:07:11,740 --> 00:07:13,300 +yes for proposition A. + +107 +00:07:30,860 --> 00:07:36,460 +is the same but of men, of women, I'm sorry. So + +108 +00:07:36,460 --> 00:07:42,160 +the first one for men and the other of + +109 +00:07:42,160 --> 00:07:48,400 +women. Now, in a random, so in this case, we are + +110 +00:07:48,400 --> 00:07:51,020 +talking about difference between two population + +111 +00:07:51,020 --> 00:07:52,940 +proportions, so by one equals by two. + +112 +00:07:56,920 --> 00:08:00,820 +Your alternate hypothesis should be, since the + +113 +00:08:00,820 --> 00:08:03,220 +problem talks about, is there a significant + +114 +00:08:03,220 --> 00:08:07,140 +difference? Difference means two tails. So it + +115 +00:08:07,140 --> 00:08:12,740 +should be pi 1 does not equal pi 2. Pi 1 does not + +116 +00:08:12,740 --> 00:08:17,400 +equal pi 2. So there's still one state null and + +117 +00:08:17,400 --> 00:08:20,680 +alternate hypothesis. Now, in a random sample of + +118 +00:08:20,680 --> 00:08:28,880 +36 out of 72 men, And 31 of 50 women indicated + +119 +00:08:28,880 --> 00:08:33,380 +they would vote yes. So for example, if X1 + +120 +00:08:33,380 --> 00:08:39,000 +represents number of men who would vote yes, that + +121 +00:08:39,000 --> 00:08:45,720 +means X1 equals 36 in + +122 +00:08:45,720 --> 00:08:54,950 +172. So that's for men. Now for women. 31 out of + +123 +00:08:54,950 --> 00:08:59,370 +50. So 50 is the sample size for the second + +124 +00:08:59,370 --> 00:09:05,890 +sample. Now it's ask about this test about the + +125 +00:09:05,890 --> 00:09:08,230 +difference between the two population proportion + +126 +00:09:08,230 --> 00:09:13,890 +at 5% level of significance. So alpha is given to + +127 +00:09:13,890 --> 00:09:19,390 +be 5%. So that's all the information you have in + +128 +00:09:19,390 --> 00:09:23,740 +order to answer this question. So based on this + +129 +00:09:23,740 --> 00:09:27,220 +statement, we state null and alternative + +130 +00:09:27,220 --> 00:09:30,160 +hypothesis. Now based on this information, we can + +131 +00:09:30,160 --> 00:09:32,220 +solve the problem by using three different + +132 +00:09:32,220 --> 00:09:39,220 +approaches. Critical value approach, B value, and + +133 +00:09:39,220 --> 00:09:42,320 +confidence interval approach. Because we can use + +134 +00:09:42,320 --> 00:09:44,220 +confidence interval approach because we are + +135 +00:09:44,220 --> 00:09:47,380 +talking about two-tailed test. So let's start with + +136 +00:09:47,380 --> 00:09:50,240 +the basic one, critical value approach. So + +137 +00:09:50,240 --> 00:09:50,980 +approach A. + +138 +00:10:01,140 --> 00:10:03,400 +Now since we are talking about two-tailed test, + +139 +00:10:04,340 --> 00:10:08,120 +your critical value should be plus or minus z + +140 +00:10:08,120 --> 00:10:12,780 +alpha over 2. And since alpha is 5% so the + +141 +00:10:12,780 --> 00:10:18,420 +critical values are z + +142 +00:10:18,420 --> 00:10:26,650 +plus or minus 0.25 which is 196. Or you may use + +143 +00:10:26,650 --> 00:10:30,050 +the standard normal table in order to find the + +144 +00:10:30,050 --> 00:10:33,330 +critical values. Or just if you remember that + +145 +00:10:33,330 --> 00:10:37,150 +values from previous time. So the critical regions + +146 +00:10:37,150 --> 00:10:47,030 +are above 196 or smaller than negative 196. I have + +147 +00:10:47,030 --> 00:10:51,090 +to compute the Z statistic. Now Z statistic is + +148 +00:10:51,090 --> 00:10:55,290 +given by this equation. Z stat equals B1 minus B2. + +149 +00:10:55,730 --> 00:11:03,010 +minus Pi 1 minus Pi 2. This quantity divided by P + +150 +00:11:03,010 --> 00:11:09,690 +dash 1 minus P dash multiplied by 1 over N1 plus 1 + +151 +00:11:09,690 --> 00:11:17,950 +over N1. Here we have to find B1, B2. So B1 equals + +152 +00:11:17,950 --> 00:11:21,910 +X1 over N1. X1 is given. + +153 +00:11:27,180 --> 00:11:32,160 +to that means 50%. Similarly, + +154 +00:11:32,920 --> 00:11:39,840 +B2 is A equals X2 over into X to the third power + +155 +00:11:39,840 --> 00:11:48,380 +over 50, so that's 60%. Also, we have to compute + +156 +00:11:48,380 --> 00:11:55,500 +the bold estimate of the overall proportion of B + +157 +00:11:55,500 --> 00:11:55,860 +dash + +158 +00:12:01,890 --> 00:12:07,130 +What are the sample sizes we have? X1 and X2. 36 + +159 +00:12:07,130 --> 00:12:14,550 +plus 31. Over 72 plus 7. 72 plus 7. So that means + +160 +00:12:14,550 --> 00:12:22,310 +67 over 152.549. + +161 +00:12:24,690 --> 00:12:25,610 +120. + +162 +00:12:30,400 --> 00:12:34,620 +So simple calculations give B1 and B2, as well as + +163 +00:12:34,620 --> 00:12:39,340 +B dash. Now, plug these values on the Z-state + +164 +00:12:39,340 --> 00:12:43,540 +formula, we get the value that is this. So first, + +165 +00:12:44,600 --> 00:12:47,560 +state null and alternative hypothesis, pi 1 minus + +166 +00:12:47,560 --> 00:12:50,080 +pi 2 equals 0. That means the two populations are + +167 +00:12:50,080 --> 00:12:55,290 +equal. We are going to test this one against Pi 1 + +168 +00:12:55,290 --> 00:12:58,570 +minus Pi 2 is not zero. That means there is a + +169 +00:12:58,570 --> 00:13:02,430 +significant difference between proportions. Now + +170 +00:13:02,430 --> 00:13:06,290 +for men, we got proportion of 50%. That's for the + +171 +00:13:06,290 --> 00:13:09,370 +similar proportion. And similar proportion for + +172 +00:13:09,370 --> 00:13:15,390 +women who will vote yes for position A is 62%. The + +173 +00:13:15,390 --> 00:13:19,530 +pooled estimate for the overall proportion equals + +174 +00:13:19,530 --> 00:13:24,530 +0.549. Now, based on this information, we can + +175 +00:13:24,530 --> 00:13:27,610 +calculate the Z statistic. Straightforward + +176 +00:13:27,610 --> 00:13:33,470 +calculation, you will end with this result. So, Z + +177 +00:13:33,470 --> 00:13:39,350 +start negative 1.31. + +178 +00:13:41,790 --> 00:13:44,950 +So, we have to compute this one before either + +179 +00:13:44,950 --> 00:13:47,650 +before using any of the approaches we have. + +180 +00:13:50,940 --> 00:13:52,960 +If we are going to use their critical value + +181 +00:13:52,960 --> 00:13:55,140 +approach, we have to find Z alpha over 2 which is + +182 +00:13:55,140 --> 00:13:59,320 +1 more than 6. Now the question is, is this value + +183 +00:13:59,320 --> 00:14:05,140 +falling the rejection regions right or left? it's + +184 +00:14:05,140 --> 00:14:10,660 +clear that this value, negative 1.31, lies in the + +185 +00:14:10,660 --> 00:14:12,960 +non-rejection region, so we don't reject a null + +186 +00:14:12,960 --> 00:14:17,900 +hypothesis. So my decision is don't reject H0. My + +187 +00:14:17,900 --> 00:14:22,580 +conclusion is there is not significant evidence of + +188 +00:14:22,580 --> 00:14:25,160 +a difference in proportions who will vote yes + +189 +00:14:25,160 --> 00:14:31,300 +between men and women. Even it seems to me that + +190 +00:14:31,300 --> 00:14:34,550 +there is a difference between Similar proportions, + +191 +00:14:34,790 --> 00:14:38,290 +50% and 62%. Still, this difference is not + +192 +00:14:38,290 --> 00:14:41,670 +significant in order to say that there is + +193 +00:14:41,670 --> 00:14:44,730 +significant difference between the proportions of + +194 +00:14:44 + +223 +00:16:48,920 --> 00:16:53,940 +for pi 1 minus pi 2 is given by this equation. Now + +224 +00:16:53,940 --> 00:16:58,250 +let's see how can we use the other two approaches + +225 +00:16:58,250 --> 00:17:01,570 +in order to test if there is a significant + +226 +00:17:01,570 --> 00:17:04,230 +difference between the proportions of men and + +227 +00:17:04,230 --> 00:17:07,910 +women. I'm sure you don't have this slide for + +228 +00:17:07,910 --> 00:17:12,730 +computing B value and confidence interval. + +229 +00:17:30,230 --> 00:17:35,050 +Now since we are talking about two-tails, your B + +230 +00:17:35,050 --> 00:17:37,670 +value should be the probability of Z greater than + +231 +00:17:37,670 --> 00:17:45,430 +1.31 and smaller than negative 1.31. So my B value + +232 +00:17:45,430 --> 00:17:53,330 +in this case equals Z greater than 1.31 plus Z + +233 +00:17:55,430 --> 00:17:59,570 +smaller than negative 1.31. Since we are talking + +234 +00:17:59,570 --> 00:18:03,810 +about two-tailed tests, so there are two rejection + +235 +00:18:03,810 --> 00:18:08,910 +regions. My Z statistic is 1.31, so it should be + +236 +00:18:08,910 --> 00:18:14,990 +here 1.31 to the right, and negative 1.31 to the left. Now, what's + +237 +00:18:14,990 --> 00:18:20,150 +the probability that the Z statistic will fall in + +238 +00:18:20,150 --> 00:18:23,330 +the rejection regions, right or left? So we have + +239 +00:18:23,330 --> 00:18:27,650 +to add. B of Z greater than 1.31 and B of Z + +240 +00:18:27,650 --> 00:18:30,750 +smaller than negative 1.31. Now the two areas to the + +241 +00:18:30,750 --> 00:18:34,790 +right of 1.31 and to the left of negative 1.31 are + +242 +00:18:34,790 --> 00:18:38,110 +equal because of symmetry. So just compute one and + +243 +00:18:38,110 --> 00:18:43,030 +multiply that by two, you will get the B value. So + +244 +00:18:43,030 --> 00:18:47,110 +two times. Now by using the concept in chapter + +245 +00:18:47,110 --> 00:18:50,550 +six, easily you can compute either this one or the + +246 +00:18:50,550 --> 00:18:53,030 +other one. The other one directly from the + +247 +00:18:53,030 --> 00:18:55,870 +negative z-score table. The other one you should + +248 +00:18:55,870 --> 00:18:58,710 +have the complement 1 minus, because it's smaller + +249 +00:18:58,710 --> 00:19:02,170 +than 1.1. And either way you will get this result. + +250 +00:19:05,110 --> 00:19:11,750 +Now my p-value is around 19%. Always we reject the + +251 +00:19:11,750 --> 00:19:14,930 +null hypothesis. If your B value is smaller than + +252 +00:19:14,930 --> 00:19:20,410 +alpha, that always we reject null hypothesis, if my + +253 +00:19:20,410 --> 00:19:25,950 +B value is smaller than alpha, alpha is given 5% + +254 +00:19:25,950 --> 00:19:31,830 +since B value equals + +255 +00:19:31,830 --> 00:19:36,910 +19%, which is much bigger than 5%, so we don't reject our analysis. So my + +256 +00:19:36,910 --> 00:19:41,170 +decision is we don't reject at zero. So the same + +257 +00:19:41,170 --> 00:19:48,390 +conclusion as we reached by using critical + +258 +00:19:48,390 --> 00:19:52,690 +value approach. So again, by using B value, we have to + +259 +00:19:52,690 --> 00:19:57,850 +compute the probability that your Z statistic + +260 +00:19:57,850 --> 00:20:00,770 +falls in the rejection regions. I end with this + +261 +00:20:00,770 --> 00:20:05,320 +result, my B value is around 19%. As we mentioned + +262 +00:20:05,320 --> 00:20:10,600 +before, we reject null hypothesis if my B value is + +263 +00:20:10,600 --> 00:20:14,180 +smaller than alpha. Now, my B value in this case + +264 +00:20:14,180 --> 00:20:17,920 +is much, much bigger than 5%, so my decision is + +265 +00:20:17,920 --> 00:20:22,740 +don't reject null hypothesis. Any questions? + +267 +00:20:36,140 --> 00:20:41,160 +The other approach, the third one, confidence + +268 +00:20:41,160 --> 00:20:42,520 +interval approach. + +269 +00:20:46,260 --> 00:20:48,980 +Now, for the confidence interval approach, we have + +270 +00:20:48,980 --> 00:20:53,960 +this equation, b1 minus b2. Again, the point + +271 +00:20:53,960 --> 00:21:03,760 +estimate, plus or minus z square root b1 times 1 + +272 +00:21:03,760 --> 00:21:09,810 +minus b1 divided by a1. B2 times 1 minus B2 + +273 +00:21:09,810 --> 00:21:11,650 +divided by N2. + +274 +00:21:13,850 --> 00:21:20,730 +Now we have B1 and B2, so 0.5 minus 0.62. That's + +275 +00:21:20,730 --> 00:21:25,170 +your calculations from previous information we + +276 +00:21:25,170 --> 00:21:28,470 +have. Plus or minus Z alpha over 2, the critical + +277 +00:21:28,470 --> 00:21:35,030 +value again is 1.96 times Square root of P1, 0.5 + +278 +00:21:35,030 --> 00:21:41,090 +times 1 minus 0.5 divided by N1 plus P2, 62 percent + +279 +00:21:41,090 --> 00:21:46,550 +times 1 minus P2 divided by N2. 0.5 minus 62 + +280 +00:21:46,550 --> 00:21:50,650 +percent is negative 12 percent plus or minus the + +281 +00:21:50,650 --> 00:21:53,090 +margin of error. This amount is again as we + +282 +00:21:53,090 --> 00:21:56,730 +mentioned before, is the margin of error, 0.177. + +283 +00:21:57,530 --> 00:21:59,830 +Now simple calculation will end with this result + +284 +00:21:59,830 --> 00:22:03,300 +that is the difference between the two proportions + +285 +00:22:03,300 --> 00:22:09,820 +lie between negative 0.296 and 0.057. That means + +286 +00:22:09,820 --> 00:22:14,580 +we are 95% confident that the difference between + +287 +00:22:14,580 --> 00:22:19,100 +the proportions of men who will vote yes for a + +288 +00:22:19,100 --> 00:22:27,640 +position A and women equals negative 0.297 up to 0 + +289 +00:22:27,640 --> 00:22:31,680 +.057. Now the question is, since we are testing + +290 +00:22:31,680 --> 00:22:37,380 +if the difference between p1 and p2 equals zero, the + +291 +00:22:37,380 --> 00:22:41,700 +question is does this interval contain zero or + +292 +00:22:41,700 --> 00:22:47,680 +capture zero? Now since we start here from + +293 +00:22:47,680 --> 00:22:51,230 +negative and end with positive, I mean the lower + +294 +00:22:51,230 --> 00:22:55,330 +bound is negative 0.297 and the upper bound is 0 + +295 +00:22:55,330 --> 00:23:00,610 +.057. So zero is inside the interval, I mean the + +296 +00:23:00,610 --> 00:23:03,870 +confidence interval contains zero in this case, so + +297 +00:23:03,870 --> 00:23:06,650 +we don't reject the null hypothesis because maybe + +298 +00:23:06,650 --> 00:23:11,780 +the difference equals zero. So since this interval + +299 +00:23:11,780 --> 00:23:16,300 +does contain the hypothesized difference of zero, so we + +300 +00:23:16,300 --> 00:23:21,100 +don't reject null hypothesis at 5% level. So the + +301 +00:23:21,100 --> 00:23:24,880 +same conclusion as we got before by using critical + +302 +00:23:24,880 --> 00:23:27,460 +value approach and p-value approach. So either + +303 +00:23:27,460 --> 00:23:32,100 +one will end with the same decision. Either reject + +304 +00:23:32,100 --> 00:23:37,020 +or fail to reject, it depends on the test itself. + +305 +00:23:38,760 --> 00:23:43,820 +That's all. Do you have any question? Any + +306 +00:23:43,820 --> 00:23:47,540 +question? So again, there are three different + +307 +00:23:47,540 --> 00:23:51,600 +approaches in order to solve this problem. One is + +308 +00:23:51,600 --> 00:23:55,680 +critical value approach, the standard one. The + +309 +00:23:55,680 --> 00:23:58,900 +other two are the p-value approach and confidence + +310 +00:23:58,900 --> 00:24:02,140 +interval. One more time, confidence interval is + +311 +00:24:02,140 --> 00:24:07,080 +only valid for + +312 +00:24:08,770 --> 00:24:13,110 +two-tailed tests. Because the confidence interval + +313 +00:24:13,110 --> 00:24:16,430 +we have is just for two-tailed tests, so it could + +314 +00:24:16,430 --> 00:24:20,210 +be used only for testing about two-tailed tests. + +315 +00:24:23,350 --> 00:24:25,990 +As we mentioned before, I'm going to skip + +316 +00:24:25,990 --> 00:24:32,390 +hypothesis for variances as well as ANOVA test. So + +317 +00:24:32,390 --> 00:24:36,410 +that's all for chapter ten. + +318 +00:24:37,670 --> 00:24:42,390 +But now I'm going to do some of the practice + +319 +00:24:42,390 --> 00:24:43,730 +problems. + +320 +00:24:46,750 --> 00:24:52,630 +Chapter 10. To practice, let's start with some + +321 +00:24:52,630 --> 00:24:55,270 +practice problems for Chapter 10. + +322 +00:24:59,270 --> 00:25:03,770 +A few years ago, Pepsi invited consumers to take + +323 +00:25:03,770 --> 00:25:08,870 +the Pepsi challenge. Consumers were asked to + +324 +00:25:08,870 --> 00:25:13,790 +decide which of two sodas, Coke or Pepsi, they + +325 +00:25:13,790 --> 00:25:17,930 +preferred in a blind taste test. Pepsi was + +326 +00:25:17,930 --> 00:25:21,930 +interested in determining what factors played a + +327 +00:25:21,930 --> 00:25:25,930 +role in people's taste preferences. One of the + +328 +00:25:25,930 --> 00:25:28,630 +factors studied was the gender of the consumer. + +329 +00:25:29,650 --> 00:25:32,350 +Below are the results of the analysis comparing + +330 +00:25:32,350 --> 00:25:36,870 +the taste preferences of men and women with the + +331 +00:25:36,870 --> 00:25:41,630 +proportions depicting preference in or for Pepsi. + +332 +00:25:42,810 --> 00:25:49,190 +For men, the sample size + +333 +00:25:49,190 --> 00:25:57,990 +is 109. So that's your N1. And the proportion + +334 +00:26:00,480 --> 00:26:09,100 +for men is around 42%. For women, + +335 +00:26:11,640 --> 00:26:25,720 +N2 equals 52, and the proportion of females, 25%. The + +336 +00:26:25,720 --> 00:26:29,870 +difference between the proportions of men and women or + +337 +00:26:29,870 --> 00:26:35,590 +males and females is 0.172, around 0.172. And this + +338 +00:26:35,590 --> 00:26:41,530 +statistic is given by 2.118, so approximately 2 + +339 +00:26:41,530 --> 00:26:47,170 +.12. Now, based on this result, based on this + +340 +00:26:47,170 --> 00:26:49,090 +information, question number one, + +341 +00:26:53,910 --> 00:26:58,690 +To determine if a difference exists in the taste + +342 +00:26:58,690 --> 00:27:04,490 +preferences of men and women, give the correct + +343 +00:27:04,490 --> 00:27:06,970 +alternative hypothesis that will guide the test. + +344 +00:27:08,830 --> 00:27:15,830 +A, B, Why B? Because the test defines between the + +345 +00:27:15,830 --> 00:27:18,650 +new form A and the new form B. Because if we say + +346 +00:27:18,650 --> 00:27:21,910 +that H1 is equal to U1 minus M equals F, + +347 +00:27:28,970 --> 00:27:34,190 +So the correct answer is B? B. So that's + +348 +00:27:34,190 --> 00:27:40,830 +incorrect. C. Why? Why C is the correct answer? + +349 +00:27:45,470 --> 00:27:46,070 +Because + +350 +00:27:52,720 --> 00:27:56,500 +p1 is not equal because we have difference. So + +351 +00:27:56,500 --> 00:27:59,380 +since we have difference here, it should be not + +352 +00:27:59,380 --> 00:28:02,240 +equal to. And since we are talking about + +353 +00:28:02,240 --> 00:28:06,120 +proportions, so you have to ignore A and B. So A + +354 +00:28:06,120 --> 00:28:10,020 +and B should be ignored first. Then you either + +355 +00:28:10,020 --> 00:28:15,220 +choose C or D. C is the correct answer. So C is + +356 +00:28:15,220 --> 00:28:20,440 +the correct answer. That's for number one. Part + +357 +00:28:20,440 --> 00:28:27,100 +two. Now suppose Pepsi wanted to test to determine + +358 +00:28:27,100 --> 00:28:35,680 +if men preferred Pepsi more than women. Using + +359 +00:28:35,680 --> 00:28:38,400 +the test statistic given, compute the appropriate + +360 +00:28:38,400 --> 00:28:43,940 +p-value for the test. Let's assume that pi 1 is + +361 +00:28:43,940 --> 00:28:48,640 +the population proportion for men who preferred + +362 +00:28:48,640 --> 00:28:56,440 +Pepsi, and pi 2 for women who prefer Pepsi. Now + +363 +00:28:56,440 --> 00:29:00,140 +he asks about suppose the company wanted to test + +364 +00:29:00,140 --> 00:29:02,760 +to determine if males prefer Pepsi more than + +365 +00:29:02,760 --> 00:29:08,080 +females. Using again the statistic given, which is + +366 +00:29:08,080 --> 00:29:13,400 +2.12 for example, compute the appropriate p-value. Now + +367 +00:29:13,400 --> 00:29:18,160 +let's state first H0 and H1. + +368 +00:29:27,450 --> 00:29:31,970 +H1, pi 1 + +369 +00:29:31,970 --> 00:29:34,410 +minus pi 2 is greater than zero. + +370 +00:29:37,980 --> 00:29:42,740 +Because it says that men prefer Pepsi more than + +371 +00:29:42,740 --> 00:29:46,940 +women. pi 1 for men, pi 2 for women. So I + +372 +00:29:46,940 --> 00:29:50,800 +should have pi 1 greater than pi 2, or pi 1 minus + +373 +00:29:50,800 --> 00:29:54,940 +pi 2 is positive. So it's upper-tailed. Now, in this + +374 +00:29:54,940 --> 00:30:01,940 +case, my p-value, its probability, is p. + +375 +00:30:05,680 --> 00:30:07,320 +It's around this value. + +376 +00:30:12,410 --> 00:30:18,230 +1 minus p of z smaller than 2.12. So 1 minus, + +377 +00:30:18,350 --> 00:30:21,530 +now by using the table or the z table we have. + +378 +00:30:25,510 --> 00:30:29,370 +Since we are talking about 2.12, so + +379 +00:30:29,370 --> 00:30:34,670 +the answer is .983. So + +380 +00:30:34,670 --> 00:30:40,590 +1 minus .983, so the answer is 0.017. So my p value + +381 +00:30:43,430 --> 00:30:49,890 +equals 0.017. So A is the correct answer. Now if + +382 +00:30:49,890 --> 00:30:53,970 +the problem is a two-tailed test, it should be + +383 +00:30:53,970 --> 00:30:57,450 +multiplied by 2. So the answer, the correct one, should + +384 +00:30:57,450 --> 00:31:02,230 +be B. So you have A and B. If it is one-tailed, + +385 +00:31:02,390 --> 00:31:06,310 +your correct answer is A. If it is two-tailed, I + +386 +00:31:06,310 --> 00:31:10,550 +mean, if we are testing to determine if a + +387 +00:31:10,550 --> 00:31:13,890 +difference exists, then you have to multiply this + +388 +00:31:13,890 --> 00:31:19,030 +one by two. So that's your p value. Any questions? + +389 +00:31:23,010 --> 00:31:27,550 +Number three. Suppose Pepsi wanted to test to + +390 +00:31:27,550 --> 00:31:33,230 +determine if men prefer Pepsi less than + +391 +00:31:33,230 --> 00:31:36,810 +women, using the statistic given, compute the + +392 +00:31:36,810 --> 00:31:42,990 +appropriate p-value. Now, H1 in this case, p1 is + +393 +00:31:42,990 --> 00:31:48,490 +smaller than p2, p1 smaller than p2. Now your + +394 +00:31:48,490 --> 00:31:54,490 +p-value, z is smaller than, because here it is + +395 +00:31:54,490 --> 00:31:58,050 +smaller than my statistic 2.12. + +396 +00:32:01,570 --> 00:32:04,790 +We don't write a negative sign. Because the value of + +397 +00:32:04,790 --> 00:32:08,150 +the statistic is 2.12. But here we are going to + +398 +00:32:08,150 --> 00:32:11,790 +test a lower-tailed test. So my p-value is p of Z + +399 +00:32:11,790 --> 00:32:15,250 +smaller than. So smaller comes from the + +400 +00:32:15,250 --> 00:32:17,730 +alternative. This is the sign under the alternative. + +401 +00:32:18,910 --> 00:32:21,810 +And you have to take the value of the Z statistic + +402 +00:32:21,810 --> 00:32:22,510 +as it is. + +403 +00:32:25,610 --> 00:32:34,100 +So p of Z is smaller than 2.12. So they need, if + +404 +00:32:34,100 --> 00:32:38,060 +you got a correct answer, D is the correct one. If p is + +405 +00:32:38,060 --> 00:32:40,420 +the correct answer, you will get .9996 + +406 +00:32:40,420 --> 00:32:47,620 +.6, that's the incorrect answer. Any questions? The + +407 +00:32:47,620 --> 00:32:53,920 +correct answer is D, number + +408 +00:32:53,920 --> 00:32:57,620 +four. Suppose + +409 +00:32:57,620 --> 00:33:03,650 +that Now, for example, forget the information we + +410 +00:33:03,650 --> 00:33:07,390 +have so far for p-value. Suppose that the two + +411 +00:33:07,390 --> 00:33:11,910 +-tailed p-value was really + +445 +00:35:55,720 --> 00:35:58,800 +we if we reject it means that we have sufficient + +446 +00:35:58,800 --> 00:36:02,700 +evidence to support the alternative so D is + +447 +00:36:02,700 --> 00:36:07,470 +incorrect Now what's about C at five percent Five, + +448 +00:36:07,830 --> 00:36:10,570 +so this value is greater than five, so we don't + +449 +00:36:10,570 --> 00:36:13,270 +reject. So that's incorrect. + +450 +00:36:21,370 --> 00:36:28,030 +B. At five, at 10% now, there is sufficient + +451 +00:36:28,030 --> 00:36:34,550 +evidence. Sufficient means we reject. We reject. + +452 +00:36:35,220 --> 00:36:40,440 +Since this B value, 0.7, is smaller than alpha. 7% + +453 +00:36:40,440 --> 00:36:44,240 +is smaller than 10%. So we reject. That means you + +454 +00:36:44,240 --> 00:36:46,960 +have to read carefully. There is sufficient + +455 +00:36:46,960 --> 00:36:50,280 +evidence to include, to indicate the proportion of + +456 +00:36:50,280 --> 00:36:54,820 +males preferring Pepsi differs from the proportion + +457 +00:36:54,820 --> 00:36:58,660 +of females. That's correct. So B is the correct + +458 +00:36:58,660 --> 00:37:05,570 +state. Now look at A. A, at 5% there is sufficient + +459 +00:37:05,570 --> 00:37:09,710 +evidence? No, because this value is greater than + +460 +00:37:09,710 --> 00:37:16,970 +alpha, so we don't reject. For this one. Here we + +461 +00:37:16,970 --> 00:37:21,050 +reject because at 10% we reject. So B is the + +462 +00:37:21,050 --> 00:37:27,670 +correct answer. Make sense? Yeah, exactly, for + +463 +00:37:27,670 --> 00:37:31,850 +10%. If this value is 5%, then B is incorrect. + +464 +00:37:34,190 --> 00:37:38,690 +Again, if we change this one to be 5%, still this + +465 +00:37:38,690 --> 00:37:39,870 +statement is false. + +466 +00:37:43,050 --> 00:37:48,670 +It should be smaller than alpha in order to reject + +467 +00:37:48,670 --> 00:37:53,770 +the null hypothesis. So, B is the correct + +468 +00:37:53,770 --> 00:37:56,350 +statement. + +469 +00:37:58,180 --> 00:38:02,080 +Always insufficient means you don't reject null + +470 +00:38:02,080 --> 00:38:06,000 +hypothesis. Now for D, we reject null hypothesis + +471 +00:38:06,000 --> 00:38:10,500 +at 8%. Since this value 0.7 is smaller than alpha, + +472 +00:38:10,740 --> 00:38:14,700 +so we reject. So this is incorrect. Now for C, be + +473 +00:38:14,700 --> 00:38:19,440 +careful. At 5%, if this, if we change this one + +474 +00:38:19,440 --> 00:38:23,560 +little bit, there is insufficient evidence. What + +475 +00:38:23,560 --> 00:38:32,320 +do you think? About C. If we change part C as at 5 + +476 +00:38:32,320 --> 00:38:36,540 +% there is insufficient evidence to indicate the + +477 +00:38:36,540 --> 00:38:39,840 +proportion of males preferring Pepsi equals. + +478 +00:38:44,600 --> 00:38:49,940 +You cannot say equal because this one maybe yes + +479 +00:38:49,940 --> 00:38:53,200 +maybe no you don't know the exact answer. So if we + +480 +00:38:53,200 --> 00:38:56,380 +don't reject the null hypothesis then you don't + +481 +00:38:56,380 --> 00:38:58,780 +have sufficient evidence in order to support each + +482 +00:38:58,780 --> 00:39:03,800 +one. So, don't reject the zero as we mentioned + +483 +00:39:03,800 --> 00:39:10,660 +before. Don't reject the zero does not imply + +484 +00:39:10,660 --> 00:39:16,840 +if zero is true. It means the evidence, the data + +485 +00:39:16,840 --> 00:39:19,500 +you have is not sufficient to support the + +486 +00:39:19,500 --> 00:39:25,260 +alternative evidence. So, don't say equal to. So + +487 +00:39:25,260 --> 00:39:30,560 +say don't reject rather than saying accept. So V + +488 +00:39:30,560 --> 00:39:31,460 +is the correct answer. + +489 +00:39:35,940 --> 00:39:43,020 +Six, seven, and eight. Construct 90% confidence + +490 +00:39:43,020 --> 00:39:48,380 +interval, construct 95, construct 99. It's + +491 +00:39:48,380 --> 00:39:52,700 +similar, just the critical value will be changed. + +492 +00:39:53,620 --> 00:39:58,380 +Now my question is, which is the widest confidence + +493 +00:39:58,380 --> 00:40:03,080 +interval in this case? 99. The last one is the + +494 +00:40:03,080 --> 00:40:08,040 +widest because here 99 is the largest confidence + +495 +00:40:08,040 --> 00:40:11,160 +limit. So that means the width of the interval is + +496 +00:40:11,160 --> 00:40:12,620 +the largest in this case. + +497 +00:40:17,960 --> 00:40:23,770 +For 5, 6 and 7. The question is construct either + +498 +00:40:23,770 --> 00:40:30,930 +90%, 95% or 99% for the same question. Simple + +499 +00:40:30,930 --> 00:40:33,510 +calculation will give the confidence interval for + +500 +00:40:33,510 --> 00:40:38,590 +each one. My question was, which one is the widest + +501 +00:40:38,590 --> 00:40:43,630 +confidence interval? Based on the C level, 99% + +502 +00:40:43,630 --> 00:40:47,350 +gives the widest confidence interval comparing to + +503 +00:40:47,350 --> 00:41:02,100 +90% and 95%. The exact answers for 5, 6 and 7, 0.5 + +504 +00:41:02,100 --> 00:41:08,900 +to 30 percent. For 95 percent, 0.2 to 32 percent. + +505 +00:41:10,750 --> 00:41:16,030 +For 99, negative 0.3 to 0.37. So this is the + +506 +00:41:16,030 --> 00:41:21,970 +widest. Because here we start from 5 to 30. Here + +507 +00:41:21,970 --> 00:41:26,030 +we start from lower than 5, 2%, up to upper, for + +508 +00:41:26,030 --> 00:41:31,190 +greater than 30, 32. Here we start from negative 3 + +509 +00:41:31,190 --> 00:41:35,330 +% up to 37. So this is the widest confidence + +510 +00:41:35,330 --> 00:41:41,950 +interval. Number six. Number six. number six five + +511 +00:41:41,950 --> 00:41:44,850 +six and seven are the same except we just share + +512 +00:41:44,850 --> 00:41:49,710 +the confidence level z so here we have one nine + +513 +00:41:49,710 --> 00:41:54,070 +six instead of one six four and two point five + +514 +00:41:54,070 --> 00:42:01,170 +seven it's our seven six next read the table e + +515 +00:42:12,610 --> 00:42:19,330 +Table A. Corporation randomly selects 150 + +516 +00:42:19,330 --> 00:42:25,830 +salespeople and finds that 66% who have never + +517 +00:42:25,830 --> 00:42:29,070 +taken self-improvement course would like such a + +518 +00:42:29,070 --> 00:42:33,830 +course. So currently, or in recent, + +519 +00:42:37,660 --> 00:42:46,940 +It says that out of 150 sales people, find that 66 + +520 +00:42:46,940 --> 00:42:51,000 +% would + +521 +00:42:51,000 --> 00:42:56,720 +like to take such course. The firm did a similar + +522 +00:42:56,720 --> 00:43:01,480 +study 10 years ago. So in the past, they had the + +523 +00:43:01,480 --> 00:43:07,430 +same study in which 60% of a random sample of 160 + +524 +00:43:07,430 --> 00:43:12,430 +salespeople wanted a self-improvement course. So + +525 +00:43:12,430 --> 00:43:13,710 +in the past, + +526 +00:43:16,430 --> 00:43:25,230 +into 160, and proportion is 60%. The groups are + +527 +00:43:25,230 --> 00:43:29,690 +assumed to be independent random samples. Let Pi 1 + +528 +00:43:29,690 --> 00:43:32,890 +and Pi 2 represent the true proportion of workers + +529 +00:43:32,890 --> 00:43:36,030 +who would like to attend a self-improvement course + +530 +00:43:36,030 --> 00:43:39,550 +in the recent study and the past study + +531 +00:43:39,550 --> 00:43:44,490 +respectively. So suppose Pi 1 and Pi 2. Pi 1 for + +532 +00:43:44,490 --> 00:43:49,470 +recent study and Pi 2 for the past study. So + +533 +00:43:49,470 --> 00:43:53,590 +that's the question. Now, question number one. + +534 +00:43:56,580 --> 00:44:00,220 +If the firm wanted to test whether this proportion + +535 +00:44:00,220 --> 00:44:06,800 +has changed from the previous study, which + +536 +00:44:06,800 --> 00:44:09,100 +represents the relevant hypothesis? + +537 +00:44:14,160 --> 00:44:18,540 +Again, the firm wanted to test whether this + +538 +00:44:18,540 --> 00:44:21,740 +proportion has changed. From the previous study, + +539 +00:44:22,160 --> 00:44:25,900 +which represents the relevant hypothesis in this + +540 +00:44:25,900 --> 00:44:26,140 +case? + +541 +00:44:33,560 --> 00:44:40,120 +Which is the correct? A is + +542 +00:44:40,120 --> 00:44:44,500 +the correct answer. Why A is the correct answer? + +543 +00:44:45,000 --> 00:44:48,040 +Since we are talking about proportions, so it + +544 +00:44:48,040 --> 00:44:51,750 +should have pi. It changed, it means does not + +545 +00:44:51,750 --> 00:44:55,410 +equal 2. So A is the correct answer. Now B is + +546 +00:44:55,410 --> 00:45:00,850 +incorrect because why B is incorrect? Exactly + +547 +00:45:00,850 --> 00:45:03,770 +because under H0 we have pi 1 minus pi 2 does not + +548 +00:45:03,770 --> 00:45:08,570 +equal 0. Always equal sign appears only under the + +549 +00:45:08,570 --> 00:45:14,950 +null hypothesis. So it's the opposite here. Now C + +550 +00:45:14,950 --> 00:45:21,190 +and D talking about Upper tier or lower tier, but + +551 +00:45:21,190 --> 00:45:23,890 +here we're talking about two-tiered test, so A is + +552 +00:45:23,890 --> 00:45:24,750 +the correct answer. + +553 +00:45:29,490 --> 00:45:33,090 +This sign null hypothesis states incorrectly, + +554 +00:45:34,030 --> 00:45:38,010 +because under H0 should have equal sign, and for + +555 +00:45:38,010 --> 00:45:39,730 +alternate it should be not equal to. + +556 +00:45:42,770 --> 00:45:43,630 +Number two. + +557 +00:45:47,860 --> 00:45:51,840 +If the firm wanted to test whether a greater + +558 +00:45:51,840 --> 00:45:56,680 +proportion of workers would currently like to + +559 +00:45:56,680 --> 00:46:00,180 +attend a self-improvement course than in the past, + +560 +00:46:00,900 --> 00:46:05,840 +currently, the proportion is greater than in the + +561 +00:46:05,840 --> 00:46:13,680 +past. Which represents the relevant hypothesis? C + +562 +00:46:13,680 --> 00:46:18,180 +is the correct answer. Because it says a greater + +563 +00:46:18,180 --> 00:46:22,340 +proportion of workers work currently. So by one, + +564 +00:46:22,420 --> 00:46:26,340 +greater than by two. So C is the correct answer. + +565 +00:46:31,340 --> 00:46:40,140 +It says that the firm wanted to test proportion of + +566 +00:46:40,140 --> 00:46:46,640 +workers currently study + +567 +00:46:46,640 --> 00:46:50,320 +or recent study by one represents the proportion + +568 +00:46:50,320 --> 00:46:55,140 +of workers who would like to attend the course so + +569 +00:46:55,140 --> 00:46:58,080 +that's by one greater than + +570 +00:47:01,730 --> 00:47:05,350 +In the past. So it means by one is greater than by + +571 +00:47:05,350 --> 00:47:11,870 +two. It means by one minus by two is positive. So + +572 +00:47:11,870 --> 00:47:14,590 +the alternative is by one minus two by two is + +573 +00:47:14,590 --> 00:47:16,430 +positive. So this one is the correct answer. + +574 +00:47:21,530 --> 00:47:26,910 +Exactly. If if here we have what in the past + +575 +00:47:26,910 --> 00:47:30,430 +should be it should be the correct answer. + +576 +00:47:34,690 --> 00:47:40,450 +That's to three. Any question for going to number + +577 +00:47:40,450 --> 00:47:49,590 +three? Any question for number two? Three. What is + +578 +00:47:49,590 --> 00:47:52,790 +the unbiased point estimate for the difference + +579 +00:47:52,790 --> 00:47:54,410 +between the two population proportions? + +580 +00:47:58,960 --> 00:48:04,360 +B1 minus B2 which is straight forward calculation + +581 +00:48:04,360 --> 00:48:06,980 +gives A the correct answer. Because the point + +582 +00:48:06,980 --> 00:48:13,320 +estimate in this case is B1 minus B2. B1 is 66 + +583 +00:48:13,320 --> 00:48:18,560 +percent, B2 is 60 percent, so the answer is 6 + +584 +00:48:18,560 --> 00:48:26,190 +percent. So B1 minus B2 which is 6 percent. I + +585 +00:48:26,190 --> 00:48:32,450 +think three is straightforward. Number four, what + +586 +00:48:32,450 --> 00:48:38,450 +is or are the critical values which, when + +587 +00:48:38,450 --> 00:48:41,870 +performing a z-test on whether population + +588 +00:48:41,870 --> 00:48:46,570 +proportions are different at 5%. Here, yes, we are + +589 +00:48:46,570 --> 00:48:52,250 +talking about two-tailed test, and alpha is 5%. So + +590 +00:48:52,250 --> 00:48:55,550 +my critical values, they are two critical values, + +591 +00:48:55,630 --> 00:48:55,830 +actually. + +592 +00:49:27,080 --> 00:49:31,000 +What is or are the critical values when testing + +593 +00:49:31,000 --> 00:49:34,260 +whether population proportions are different at 10 + +594 +00:49:34,260 --> 00:49:39,240 +%? The same instead here we have 10 instead of 5%. + +595 +00:49:40,920 --> 00:49:45,100 +So A is the correct answer. So just use the table. + +596 +00:49:47,340 --> 00:49:51,440 +Now for the previous one, we have 0 to 5, 0 to 5. + +597 +00:49:51,980 --> 00:49:57,740 +The other one, alpha is 10%. So 0, 5 to the right, + +598 +00:49:57,880 --> 00:50:03,580 +the same as to the left. So plus or minus 164. + +599 +00:50:06,700 --> 00:50:11,580 +So 4 and 5 by using the z table. + +600 +00:50:20,560 --> 00:50:25,280 +So exactly, since alpha here is 1, 0, 2, 5, so the + +601 +00:50:25,280 --> 00:50:27,880 +area becomes smaller than, so it should be z + +602 +00:50:27,880 --> 00:50:32,380 +greater than. So 1.106, the other one 1.645, + +603 +00:50:32,800 --> 00:50:38,030 +number 6. What is or are? The critical value in + +604 +00:50:38,030 --> 00:50:42,450 +testing whether the current population is higher + +605 +00:50:42,450 --> 00:50:50,990 +than. Higher means above. Above 10. Above 10, 5%. + +606 +00:50:50,990 --> 00:50:55,870 +So which? B. + +607 +00:50:58,470 --> 00:51:00,810 +B is the correct. Z alpha. + +608 +00:51:06,700 --> 00:51:08,440 +So, B is the correct answer. + +609 +00:51:11,200 --> 00:51:11,840 +7. + +610 +00:51:14,740 --> 00:51:21,320 +7 and 8 we should have to calculate number 1. 7 + +611 +00:51:21,320 --> 00:51:25,880 +was the estimated standard error of the difference + +612 +00:51:25,880 --> 00:51:29,660 +between the two sample proportions. We should have + +613 +00:51:29,660 --> 00:51:30,740 +a standard error. + +614 +00:51:34,620 --> 00:51:40,320 +Square root, B dash 1 minus B dash multiplied by 1 + +615 +00:51:40,320 --> 00:51:45,300 +over N1 plus 1 over N2. And we have to find B dash + +616 +00:51:45,300 --> 00:51:49,220 +here. Let's see how can we find B dash. + +617 +00:51:52,720 --> 00:51:59,700 +B dash + +618 +00:51:59,700 --> 00:52:05,800 +equal x1 plus x2. Now what's the value of X1? + +619 +00:52:10,400 --> 00:52:16,220 +Exactly. Since B1 is X1 over N1. So that means X1 + +620 +00:52:16,220 --> 00:52:26,600 +is N1 times B1. So N1 is 150 times 60%. So that's + +621 +00:52:26,600 --> 00:52:35,980 +99. And similarly, X2 N2, which is 160, times 60% + +622 +00:52:35,980 --> 00:52:48,420 +gives 96. So your B dash is x1 plus x2 divided by + +623 +00:52:48,420 --> 00:52:55,200 +N1 plus N2, which is 150 plus 310. So complete B + +624 +00:52:55,200 --> 00:52:58,760 +dash versus the bold estimate of overall + +625 +00:52:58,760 --> 00:53:03,570 +proportion So 9 and 9 plus 9 is 6. + +626 +00:53:06,390 --> 00:53:07,730 +That's just B-. + +627 +00:53:13,210 --> 00:53:14,290 +6 to 9. + +628 +00:53:17,150 --> 00:53:23,190 +6 to 9. So this is not your answer. It's just B-. + +629 +00:53:23,770 --> 00:53:29,030 +Now take this value and the square root of 6 to 9. + +630 +00:53:30,060 --> 00:53:36,280 +times 1.629 multiplied by 1 over N1 which is 150 + +631 +00:53:36,280 --> 00:53:44,980 +plus 160. That's your standard error. B dash is + +632 +00:53:44,980 --> 00:53:49,080 +not standard error. B dash is the bold estimate of + +633 +00:53:49,080 --> 00:53:53,740 +overall + +667 +00:56:53,150 --> 00:56:58,230 +critical regions are 1.96 and above or smaller + +668 +00:56:58,230 --> 00:57:07,550 +than minus 1.96. Now, my z statistic is 1.903. Now + +669 +00:57:07,550 --> 00:57:12,610 +this value falls in the non-rejection region. So + +670 +00:57:12,610 --> 00:57:14,310 +we don't reject the null hypothesis. + +671 +00:57:16,900 --> 00:57:21,400 +Ignore A and C, so the answer is either B or D. + +672 +00:57:22,260 --> 00:57:26,360 +Now let's read B. Don't reject the null and + +673 +00:57:26,360 --> 00:57:28,820 +conclude that the proportion of employees who are + +674 +00:57:28,820 --> 00:57:31,600 +interested in self-improvement course has not + +675 +00:57:31,600 --> 00:57:32,100 +changed. + +676 +00:57:37,040 --> 00:57:40,060 +That's correct. Because we don't reject the null + +677 +00:57:40,060 --> 00:57:42,900 +hypothesis. It means there is no significant + +678 +00:57:42,900 --> 00:57:45,760 +difference. So it has not changed. Now, D, don't + +679 +00:57:45,760 --> 00:57:47,540 +reject the null hypothesis and conclude the + +680 +00:57:47,540 --> 00:57:49,760 +proportion of Obliques who are interested in a + +681 +00:57:49,760 --> 00:57:52,700 +certain point has increased, which is incorrect. + +682 +00:57:53,640 --> 00:57:57,960 +So B is the correct answer. So again, since my Z + +683 +00:57:57,960 --> 00:58:01,080 +statistic falls in the non-rejection region, we + +684 +00:58:01,080 --> 00:58:04,380 +don't reject the null hypothesis. So either B or D + +685 +00:58:04,380 --> 00:58:07,350 +is the correct answer. But here we are talking + +686 +00:58:07,350 --> 00:58:12,190 +about none or don't reject the null hypothesis. + +687 +00:58:12,470 --> 00:58:14,310 +That means we don't have sufficient evidence + +688 +00:58:14,310 --> 00:58:17,610 +support that there is significant change between + +689 +00:58:17,610 --> 00:58:20,670 +the two proportions. So there is no difference. So + +690 +00:58:20,670 --> 00:58:23,270 +it has not changed. It's the correct one. So you + +691 +00:58:23,270 --> 00:58:29,890 +have to choose B. So B is the most correct answer. + +692 +00:58:30,830 --> 00:58:35,600 +Now, 10, 11, and 12. Talking about constructing + +693 +00:58:35,600 --> 00:58:41,700 +confidence interval 99, 95, and 90%. It's similar. + +694 +00:58:42,620 --> 00:58:46,140 +And as we mentioned before, 99% will give the + +695 +00:58:46,140 --> 00:58:50,940 +widest confidence interval. And the answers for + +696 +00:58:50,940 --> 00:59:04,300 +these are 14, 11, 14, is negative 0.8 to 20%. For + +697 +00:59:04,300 --> 00:59:11,720 +11, 0.5, negative 0.5 to 17. For 90%, negative 0.3 + +698 +00:59:11,720 --> 00:59:15,420 +to 0.15. So this is the widest confidence + +699 +00:59:15,420 --> 00:59:22,220 +interval, which was for 99%. So similar as the + +700 +00:59:22,220 --> 00:59:26,360 +previous one we had discussed. So for 99, always + +701 +00:59:26,360 --> 00:59:32,230 +we get The widest confidence interval. Any + +702 +00:59:32,230 --> 00:59:37,490 +question? That's all. Next time shall start + +703 +00:59:37,490 --> 00:59:41,350 +chapter 12, Chi-square test of independence. diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/-VA6U2qwaG0_postprocess.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/-VA6U2qwaG0_postprocess.srt new file mode 100644 index 0000000000000000000000000000000000000000..11751689fa5c3e7e1345945aa46cab9e2c2ea1ff --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/-VA6U2qwaG0_postprocess.srt @@ -0,0 +1,2812 @@ +1 +00:00:09,320 --> 00:00:15,760 +Last time we have discussed hypothesis test for + +2 +00:00:15,760 --> 00:00:19,440 +two population proportions. And we mentioned that + +3 +00:00:19,440 --> 00:00:25,750 +the assumptions are for the first sample. n times + +4 +00:00:25,750 --> 00:00:28,910 +pi should be at least 5, and also n times 1 minus + +5 +00:00:28,910 --> 00:00:33,050 +pi is also at least 5. The same for the second + +6 +00:00:33,050 --> 00:00:37,570 +sample, n 2 times pi 2 is at least 5, as well as n + +7 +00:00:37,570 --> 00:00:42,860 +times 1 minus pi 2 is also at least 5. Also, we + +8 +00:00:42,860 --> 00:00:46,000 +discussed that the point estimate for the + +9 +00:00:46,000 --> 00:00:51,700 +difference of Pi 1 minus Pi 2 is given by P1 minus + +10 +00:00:51,700 --> 00:00:57,160 +P2. That means this difference is unbiased point + +11 +00:00:57,160 --> 00:01:03,160 +estimate of Pi 1 minus Pi 2. Similarly, P2 minus + +12 +00:01:03,160 --> 00:01:06,700 +P1 is the point estimate of the difference Pi 2 + +13 +00:01:06,700 --> 00:01:08,160 +minus Pi 1. + +14 +00:01:11,260 --> 00:01:16,140 +We also discussed that the bold estimate for the + +15 +00:01:16,140 --> 00:01:20,900 +overall proportion is given by this equation. So B + +16 +00:01:20,900 --> 00:01:25,980 +dash is called the bold estimate for the overall + +17 +00:01:25,980 --> 00:01:31,740 +proportion. X1 and X2 are the number of items of + +18 +00:01:31,740 --> 00:01:35,170 +interest. And the two samples that you have in one + +19 +00:01:35,170 --> 00:01:39,150 +and two, where in one and two are the sample sizes + +20 +00:01:39,150 --> 00:01:42,110 +for the first and the second sample respectively. + +21 +00:01:43,470 --> 00:01:46,830 +The appropriate statistic in this course is given + +22 +00:01:46,830 --> 00:01:52,160 +by this equation. Z-score or Z-statistic is the + +23 +00:01:52,160 --> 00:01:56,340 +point estimate of the difference pi 1 minus pi 2 + +24 +00:01:56,340 --> 00:02:00,620 +minus the hypothesized value under if 0, I mean if + +25 +00:02:00,620 --> 00:02:05,200 +0 is true, most of the time this term equals 0, + +26 +00:02:05,320 --> 00:02:10,480 +divided by this quantity is called the standard + +27 +00:02:10,480 --> 00:02:14,100 +error of the estimate, which is square root of B + +28 +00:02:14,100 --> 00:02:17,660 +dash 1 minus B dash times 1 over N1 plus 1 over + +29 +00:02:17,660 --> 00:02:22,160 +N2. So this is your Z statistic. The critical + +30 +00:02:22,160 --> 00:02:27,980 +regions. I'm sorry, first, the appropriate null + +31 +00:02:27,980 --> 00:02:32,200 +and alternative hypothesis are given by three + +32 +00:02:32,200 --> 00:02:38,280 +cases we have. Either two-tailed test or one + +33 +00:02:38,280 --> 00:02:42,540 +-tailed and it has either upper or lower tail. So + +34 +00:02:42,540 --> 00:02:46,140 +for example, for lower-tailed test, We are going + +35 +00:02:46,140 --> 00:02:51,500 +to test to see if a proportion 1 is smaller than a + +36 +00:02:51,500 --> 00:02:54,560 +proportion 2. This one can be written as pi 1 + +37 +00:02:54,560 --> 00:02:59,080 +smaller than pi 2 under H1, or the difference + +38 +00:02:59,080 --> 00:03:01,160 +between these two population proportions is + +39 +00:03:01,160 --> 00:03:04,940 +negative, is smaller than 0. So either you may + +40 +00:03:04,940 --> 00:03:08,660 +write the alternative as pi 1 smaller than pi 2, + +41 +00:03:09,180 --> 00:03:11,860 +or the difference, which is pi 1 minus pi 2 + +42 +00:03:11,860 --> 00:03:15,730 +smaller than 0. For sure, the null hypothesis is + +43 +00:03:15,730 --> 00:03:18,830 +the opposite of the alternative hypothesis. So if + +44 +00:03:18,830 --> 00:03:22,310 +this is one by one smaller than by two, so the + +45 +00:03:22,310 --> 00:03:24,710 +opposite by one is greater than or equal to two. + +46 +00:03:25,090 --> 00:03:27,670 +Similarly, but the opposite side here, we are + +47 +00:03:27,670 --> 00:03:31,530 +talking about the upper tail of probability. So + +48 +00:03:31,530 --> 00:03:33,910 +under the alternative hypothesis, by one is + +49 +00:03:33,910 --> 00:03:37,870 +greater than by two. Or it could be written as by + +50 +00:03:37,870 --> 00:03:40,150 +one minus by two is positive, that means greater + +51 +00:03:40,150 --> 00:03:45,970 +than zero. While for the two-tailed test, for the + +52 +00:03:45,970 --> 00:03:49,310 +alternative hypothesis, we have Y1 does not equal + +53 +00:03:49,310 --> 00:03:51,870 +Y2. In this case, we are saying there is no + +54 +00:03:51,870 --> 00:03:55,950 +difference under H0, and there is a difference. + +55 +00:03:56,920 --> 00:03:59,680 +should be under each one. Difference means either + +56 +00:03:59,680 --> 00:04:03,220 +greater than or smaller than. So we have this not + +57 +00:04:03,220 --> 00:04:06,800 +equal sign. So by one does not equal by two. Or it + +58 +00:04:06,800 --> 00:04:08,980 +could be written as by one minus by two is not + +59 +00:04:08,980 --> 00:04:12,320 +equal to zero. It's the same as the one we have + +60 +00:04:12,320 --> 00:04:15,100 +discussed when we are talking about comparison of + +61 +00:04:15,100 --> 00:04:19,500 +two population means. We just replaced these by's + +62 +00:04:19,500 --> 00:04:24,960 +by mus. Finally, the rejection regions are given + +63 +00:04:24,960 --> 00:04:30,000 +by three different charts here for the lower tail + +64 +00:04:30,000 --> 00:04:35,500 +test. We reject the null hypothesis if the value + +65 +00:04:35,500 --> 00:04:37,500 +of the test statistic fall in the rejection + +66 +00:04:37,500 --> 00:04:40,940 +region, which is in the left side. So that means + +67 +00:04:40,940 --> 00:04:44,040 +we reject zero if this statistic is smaller than + +68 +00:04:44,040 --> 00:04:49,440 +negative zero. That's for lower tail test. On the + +69 +00:04:49,440 --> 00:04:51,620 +other hand, for other tailed tests, your rejection + +70 +00:04:51,620 --> 00:04:54,800 +region is the right side, so you reject the null + +71 +00:04:54,800 --> 00:04:57,160 +hypothesis if this statistic is greater than Z + +72 +00:04:57,160 --> 00:05:01,700 +alpha. In addition, for two-tailed tests, there + +73 +00:05:01,700 --> 00:05:04,300 +are two rejection regions. One is on the right + +74 +00:05:04,300 --> 00:05:07,000 +side, the other on the left side. Here, alpha is + +75 +00:05:07,000 --> 00:05:10,960 +split into two halves, alpha over two to the + +76 +00:05:10,960 --> 00:05:14,060 +right, similarly alpha over two to the left side. + +77 +00:05:14,640 --> 00:05:16,900 +Here, we reject the null hypothesis if your Z + +78 +00:05:16,900 --> 00:05:20,900 +statistic falls in the rejection region here, that + +79 +00:05:20,900 --> 00:05:24,820 +means z is smaller than negative z alpha over 2 or + +80 +00:05:24,820 --> 00:05:30,360 +z is greater than z alpha over 2. Now this one, I + +81 +00:05:30,360 --> 00:05:33,980 +mean the rejection regions are the same for either + +82 +00:05:33,980 --> 00:05:38,540 +one sample t-test or two sample t-test, either for + +83 +00:05:38,540 --> 00:05:41,560 +the population proportion or the population mean. + +84 +00:05:42,180 --> 00:05:46,120 +We have the same rejection regions. Sometimes we + +85 +00:05:46,120 --> 00:05:49,800 +replace z by t. It depends if we are talking about + +86 +00:05:49,800 --> 00:05:54,760 +small samples and sigmas unknown. So that's the + +87 +00:05:54,760 --> 00:05:58,160 +basic concepts about testing or hypothesis testing + +88 +00:05:58,160 --> 00:06:01,200 +for the comparison between two population + +89 +00:06:01,200 --> 00:06:05,140 +proportions. And we stopped at this point. I will + +90 +00:06:05,140 --> 00:06:08,780 +give three examples, three examples for testing + +91 +00:06:08,780 --> 00:06:11,660 +about two population proportions. The first one is + +92 +00:06:11,660 --> 00:06:17,050 +given here. It says that, is there a significant + +93 +00:06:17,050 --> 00:06:20,490 +difference between the proportion of men and the + +94 +00:06:20,490 --> 00:06:24,170 +proportion of women who will vote yes on a + +95 +00:06:24,170 --> 00:06:24,630 +proposition? + +96 +00:06:28,220 --> 00:06:30,480 +In this case, we are talking about a proportion. + +97 +00:06:30,840 --> 00:06:34,520 +So this problem tests for a proportion. We have + +98 +00:06:34,520 --> 00:06:38,980 +two proportions here because we have two samples + +99 +00:06:38,980 --> 00:06:43,800 +for two population spheres, men and women. So + +100 +00:06:43,800 --> 00:06:46,600 +there are two populations. So we are talking about + +101 +00:06:46,600 --> 00:06:50,620 +two population proportions. Now, we have to state + +102 +00:06:50,620 --> 00:06:53,440 +carefully now an alternative hypothesis. So for + +103 +00:06:53,440 --> 00:06:57,640 +example, let's say that phi 1 is the population + +104 +00:06:57,640 --> 00:07:07,140 +proportion, proportion of men who will vote for a + +105 +00:07:07,140 --> 00:07:11,740 +proposition A for example, for vote yes, for vote + +106 +00:07:11,740 --> 00:07:13,300 +yes for proposition A. + +107 +00:07:30,860 --> 00:07:36,460 +is the same but of men, of women, I'm sorry. So + +108 +00:07:36,460 --> 00:07:42,160 +the first one for men and the other of + +109 +00:07:42,160 --> 00:07:48,400 +women. Now, in a random, so in this case, we are + +110 +00:07:48,400 --> 00:07:51,020 +talking about difference between two population + +111 +00:07:51,020 --> 00:07:52,940 +proportions, so by one equals by two. + +112 +00:07:56,920 --> 00:08:00,820 +Your alternate hypothesis should be, since the + +113 +00:08:00,820 --> 00:08:03,220 +problem talks about, is there a significant + +114 +00:08:03,220 --> 00:08:07,140 +difference? Difference means two tails. So it + +115 +00:08:07,140 --> 00:08:12,740 +should be pi 1 does not equal pi 2. Pi 1 does not + +116 +00:08:12,740 --> 00:08:17,400 +equal pi 2. So there's still one state null and + +117 +00:08:17,400 --> 00:08:20,680 +alternate hypothesis. Now, in a random sample of + +118 +00:08:20,680 --> 00:08:28,880 +36 out of 72 men, And 31 of 50 women indicated + +119 +00:08:28,880 --> 00:08:33,380 +they would vote yes. So for example, if X1 + +120 +00:08:33,380 --> 00:08:39,000 +represents number of men who would vote yes, that + +121 +00:08:39,000 --> 00:08:45,720 +means X1 equals 36 in + +122 +00:08:45,720 --> 00:08:54,950 +172. So that's for men. Now for women. 31 out of + +123 +00:08:54,950 --> 00:08:59,370 +50. So 50 is the sample size for the second + +124 +00:08:59,370 --> 00:09:05,890 +sample. Now it's ask about this test about the + +125 +00:09:05,890 --> 00:09:08,230 +difference between the two population proportion + +126 +00:09:08,230 --> 00:09:13,890 +at 5% level of significance. So alpha is given to + +127 +00:09:13,890 --> 00:09:19,390 +be 5%. So that's all the information you have in + +128 +00:09:19,390 --> 00:09:23,740 +order to answer this question. So based on this + +129 +00:09:23,740 --> 00:09:27,220 +statement, we state null and alternative + +130 +00:09:27,220 --> 00:09:30,160 +hypothesis. Now based on this information, we can + +131 +00:09:30,160 --> 00:09:32,220 +solve the problem by using three different + +132 +00:09:32,220 --> 00:09:39,220 +approaches. Critical value approach, B value, and + +133 +00:09:39,220 --> 00:09:42,320 +confidence interval approach. Because we can use + +134 +00:09:42,320 --> 00:09:44,220 +confidence interval approach because we are + +135 +00:09:44,220 --> 00:09:47,380 +talking about two-tailed test. So let's start with + +136 +00:09:47,380 --> 00:09:50,240 +the basic one, critical value approach. So + +137 +00:09:50,240 --> 00:09:50,980 +approach A. + +138 +00:10:01,140 --> 00:10:03,400 +Now since we are talking about two-tailed test, + +139 +00:10:04,340 --> 00:10:08,120 +your critical value should be plus or minus z + +140 +00:10:08,120 --> 00:10:12,780 +alpha over 2. And since alpha is 5% so the + +141 +00:10:12,780 --> 00:10:18,420 +critical values are z + +142 +00:10:18,420 --> 00:10:26,650 +plus or minus 0.25 which is 196. Or you may use + +143 +00:10:26,650 --> 00:10:30,050 +the standard normal table in order to find the + +144 +00:10:30,050 --> 00:10:33,330 +critical values. Or just if you remember that + +145 +00:10:33,330 --> 00:10:37,150 +values from previous time. So the critical regions + +146 +00:10:37,150 --> 00:10:47,030 +are above 196 or smaller than negative 196. I have + +147 +00:10:47,030 --> 00:10:51,090 +to compute the Z statistic. Now Z statistic is + +148 +00:10:51,090 --> 00:10:55,290 +given by this equation. Z stat equals B1 minus B2. + +149 +00:10:55,730 --> 00:11:03,010 +minus Pi 1 minus Pi 2. This quantity divided by P + +150 +00:11:03,010 --> 00:11:09,690 +dash 1 minus P dash multiplied by 1 over N1 plus 1 + +151 +00:11:09,690 --> 00:11:17,950 +over N1. Here we have to find B1, B2. So B1 equals + +152 +00:11:17,950 --> 00:11:21,910 +X1 over N1. X1 is given. + +153 +00:11:27,180 --> 00:11:32,160 +to that means 50%. Similarly, + +154 +00:11:32,920 --> 00:11:39,840 +B2 is A equals X2 over into X to the third power + +155 +00:11:39,840 --> 00:11:48,380 +over 50, so that's 60%. Also, we have to compute + +156 +00:11:48,380 --> 00:11:55,500 +the bold estimate of the overall proportion of B + +157 +00:11:55,500 --> 00:11:55,860 +dash + +158 +00:12:01,890 --> 00:12:07,130 +What are the sample sizes we have? X1 and X2. 36 + +159 +00:12:07,130 --> 00:12:14,550 +plus 31. Over 72 plus 7. 72 plus 7. So that means + +160 +00:12:14,550 --> 00:12:22,310 +67 over 152.549. + +161 +00:12:24,690 --> 00:12:25,610 +120. + +162 +00:12:30,400 --> 00:12:34,620 +So simple calculations give B1 and B2, as well as + +163 +00:12:34,620 --> 00:12:39,340 +B dash. Now, plug these values on the Z-state + +164 +00:12:39,340 --> 00:12:43,540 +formula, we get the value that is this. So first, + +165 +00:12:44,600 --> 00:12:47,560 +state null and alternative hypothesis, pi 1 minus + +166 +00:12:47,560 --> 00:12:50,080 +pi 2 equals 0. That means the two populations are + +167 +00:12:50,080 --> 00:12:55,290 +equal. We are going to test this one against Pi 1 + +168 +00:12:55,290 --> 00:12:58,570 +minus Pi 2 is not zero. That means there is a + +169 +00:12:58,570 --> 00:13:02,430 +significant difference between proportions. Now + +170 +00:13:02,430 --> 00:13:06,290 +for men, we got proportion of 50%. That's for the + +171 +00:13:06,290 --> 00:13:09,370 +similar proportion. And similar proportion for + +172 +00:13:09,370 --> 00:13:15,390 +women who will vote yes for position A is 62%. The + +173 +00:13:15,390 --> 00:13:19,530 +pooled estimate for the overall proportion equals + +174 +00:13:19,530 --> 00:13:24,530 +0.549. Now, based on this information, we can + +175 +00:13:24,530 --> 00:13:27,610 +calculate the Z statistic. Straightforward + +176 +00:13:27,610 --> 00:13:33,470 +calculation, you will end with this result. So, Z + +177 +00:13:33,470 --> 00:13:39,350 +start negative 1.31. + +178 +00:13:41,790 --> 00:13:44,950 +So, we have to compute this one before either + +179 +00:13:44,950 --> 00:13:47,650 +before using any of the approaches we have. + +180 +00:13:50,940 --> 00:13:52,960 +If we are going to use their critical value + +181 +00:13:52,960 --> 00:13:55,140 +approach, we have to find Z alpha over 2 which is + +182 +00:13:55,140 --> 00:13:59,320 +1 more than 6. Now the question is, is this value + +183 +00:13:59,320 --> 00:14:05,140 +falling the rejection regions right or left? it's + +184 +00:14:05,140 --> 00:14:10,660 +clear that this value, negative 1.31, lies in the + +185 +00:14:10,660 --> 00:14:12,960 +non-rejection region, so we don't reject a null + +186 +00:14:12,960 --> 00:14:17,900 +hypothesis. So my decision is don't reject H0. My + +187 +00:14:17,900 --> 00:14:22,580 +conclusion is there is not significant evidence of + +188 +00:14:22,580 --> 00:14:25,160 +a difference in proportions who will vote yes + +189 +00:14:25,160 --> 00:14:31,300 +between men and women. Even it seems to me that + +190 +00:14:31,300 --> 00:14:34,550 +there is a difference between Similar proportions, + +191 +00:14:34,790 --> 00:14:38,290 +50% and 62%. Still, this difference is not + +192 +00:14:38,290 --> 00:14:41,670 +significant in order to say that there is + +193 +00:14:41,670 --> 00:14:44,730 +significant difference between the proportions of + +194 +00:14:44,730 --> 00:14:49,390 +men and women. So based on the critical value + +195 +00:14:49,390 --> 00:14:52,860 +approach. We end with this result, which is we + +196 +00:14:52,860 --> 00:14:56,120 +don't reject null hypotheses. That means the + +197 +00:14:56,120 --> 00:15:00,620 +information you have is not sufficient in order to + +198 +00:15:00,620 --> 00:15:05,080 +support alternative hypotheses. So your managerial + +199 +00:15:05,080 --> 00:15:07,020 +conclusion should be there is not significant + +200 +00:15:07,020 --> 00:15:12,500 +difference in proportion and proportions who will + +201 +00:15:12,500 --> 00:15:16,300 +vote yes between men and women. That's for using + +202 +00:15:16,300 --> 00:15:21,350 +critical value approach. Before continue, we have + +203 +00:15:21,350 --> 00:15:24,930 +to discuss the confidence interval for the + +204 +00:15:24,930 --> 00:15:28,890 +difference pi 1 minus pi 2. The confidence + +205 +00:15:28,890 --> 00:15:32,010 +interval, as we mentioned before, can be + +206 +00:15:32,010 --> 00:15:38,110 +constructed by point estimate, plus or minus + +207 +00:15:38,110 --> 00:15:41,590 +critical value times the standard error of the + +208 +00:15:41,590 --> 00:15:47,930 +point estimate. In this case, the point estimate + +209 +00:15:47,930 --> 00:15:52,950 +for pi 1 minus pi 2 is b1 minus b2. So that's your + +210 +00:15:52,950 --> 00:15:58,490 +point estimate, plus or minus z alpha over 2. Now + +211 +00:15:58,490 --> 00:16:03,550 +from the information from chapter 8, the standard + +212 +00:16:03,550 --> 00:16:07,070 +error of the difference, b1 minus pi 2, is given + +213 +00:16:07,070 --> 00:16:11,350 +by this equation. B1 times 1 minus B1, so B1 and + +214 +00:16:11,350 --> 00:16:14,550 +its complement, divided by the first sample size, + +215 +00:16:14,990 --> 00:16:18,030 +plus the second sample proportion times its + +216 +00:16:18,030 --> 00:16:20,510 +complement divided by the sample size of the + +217 +00:16:20,510 --> 00:16:23,830 +second sample. So that's your confidence interval. + +218 +00:16:24,870 --> 00:16:27,710 +So it looks similar to the one we have discussed + +219 +00:16:27,710 --> 00:16:34,580 +for the mu 1 minus mu 2. And that one we had x1 + +220 +00:16:34,580 --> 00:16:38,240 +bar minus x2 bar plus or minus z or t, it depends + +221 +00:16:38,240 --> 00:16:44,620 +on the sample sizes, times s square b times 1 over + +222 +00:16:44,620 --> 00:16:48,920 +n1 plus 1 over n2. Anyway, the confidence interval + +223 +00:16:48,920 --> 00:16:53,940 +for pi 1 minus pi 2 is given by this equation. Now + +224 +00:16:53,940 --> 00:16:58,250 +let's see how can we use the other two approaches + +225 +00:16:58,250 --> 00:17:01,570 +in order to test if there is significant + +226 +00:17:01,570 --> 00:17:04,230 +difference between the proportions of men and + +227 +00:17:04,230 --> 00:17:07,910 +women. I'm sure you don't have this slide for + +228 +00:17:07,910 --> 00:17:12,730 +computing B value and confidence interval. + +229 +00:17:30,230 --> 00:17:35,050 +Now since we are talking about two-thirds, your B + +230 +00:17:35,050 --> 00:17:37,670 +value should be the probability of Z greater than + +231 +00:17:37,670 --> 00:17:45,430 +1.31 and smaller than negative 1.31. So my B value + +232 +00:17:45,430 --> 00:17:53,330 +in this case equals Z greater than 1.31 plus Z + +233 +00:17:55,430 --> 00:17:59,570 +smaller than negative 1.31. Since we are talking + +234 +00:17:59,570 --> 00:18:03,810 +about two tail tests, so there are two rejection + +235 +00:18:03,810 --> 00:18:08,910 +regions. My Z statistic is 1.31, so it should be + +236 +00:18:08,910 --> 00:18:14,990 +here 1.31 to the right, and negative. Now, what's + +237 +00:18:14,990 --> 00:18:20,150 +the probability that the Z statistic will fall in + +238 +00:18:20,150 --> 00:18:23,330 +the rejection regions, right or left? So we have + +239 +00:18:23,330 --> 00:18:27,650 +to add. B of Z greater than 1.31 and B of Z + +240 +00:18:27,650 --> 00:18:30,750 +smaller than negative. Now the two areas to the + +241 +00:18:30,750 --> 00:18:34,790 +right of 1.31 and to the left of negative 1.31 are + +242 +00:18:34,790 --> 00:18:38,110 +equal because of symmetry. So just compute one and + +243 +00:18:38,110 --> 00:18:43,030 +multiply that by two, you will get the B value. So + +244 +00:18:43,030 --> 00:18:47,110 +two times. Now by using the concept in chapter + +245 +00:18:47,110 --> 00:18:50,550 +six, easily you can compute either this one or the + +246 +00:18:50,550 --> 00:18:53,030 +other one. The other one directly from the + +247 +00:18:53,030 --> 00:18:55,870 +negative z-score table. The other one you should + +248 +00:18:55,870 --> 00:18:58,710 +have the complement 1 minus, because it's smaller + +249 +00:18:58,710 --> 00:19:02,170 +than 1.1. And either way you will get this result. + +250 +00:19:05,110 --> 00:19:11,750 +Now my p-value is around 19%. Always we reject the + +251 +00:19:11,750 --> 00:19:14,930 +null hypothesis. if your B value is smaller than + +252 +00:19:14,930 --> 00:19:20,410 +alpha that always we reject null hypothesis if my + +253 +00:19:20,410 --> 00:19:25,950 +B value is smaller than alpha alpha is given 5% + +254 +00:19:25,950 --> 00:19:31,830 +since B value equals + +255 +00:19:31,830 --> 00:19:36,910 +19% which is much bigger than Much greater than + +256 +00:19:36,910 --> 00:19:41,170 +5%, so we don't reject our analysis. So my + +257 +00:19:41,170 --> 00:19:48,390 +decision is we don't reject at zero. So the same + +258 +00:19:48,390 --> 00:19:52,690 +conclusion as we reached by using critical + +259 +00:19:52,690 --> 00:19:57,850 +penalty. So again, by using B value, we have to + +260 +00:19:57,850 --> 00:20:00,770 +compute the probability that your Z statistic + +261 +00:20:00,770 --> 00:20:05,320 +falls in the rejection regions. I end with this + +262 +00:20:05,320 --> 00:20:10,600 +result, my B value is around 19%. As we mentioned + +263 +00:20:10,600 --> 00:20:14,180 +before, we reject null hypothesis if my B value is + +264 +00:20:14,180 --> 00:20:17,920 +smaller than alpha. Now, my B value in this case + +265 +00:20:17,920 --> 00:20:22,740 +is much, much bigger than 5%, so my decision is + +266 +00:20:22,740 --> 00:20:26,860 +don't reject null hypothesis. Any questions? + +267 +00:20:36,140 --> 00:20:41,160 +The other approach, the third one, confidence + +268 +00:20:41,160 --> 00:20:42,520 +interval approach. + +269 +00:20:46,260 --> 00:20:48,980 +Now, for the confidence interval approach, we have + +270 +00:20:48,980 --> 00:20:53,960 +this equation, b1 minus b2. Again, the point + +271 +00:20:53,960 --> 00:21:03,760 +estimate, plus or minus z square root b1 times 1 + +272 +00:21:03,760 --> 00:21:09,810 +minus b1 divided by a1. B2 times 1 minus B2 + +273 +00:21:09,810 --> 00:21:11,650 +divided by N2. + +274 +00:21:13,850 --> 00:21:20,730 +Now we have B1 and B2, so 0.5 minus 0.62. That's + +275 +00:21:20,730 --> 00:21:25,170 +your calculations from previous information we + +276 +00:21:25,170 --> 00:21:28,470 +have. Plus or minus Z alpha over 2, the critical + +277 +00:21:28,470 --> 00:21:35,030 +value again is 1.96 times Square root of P1.5 + +278 +00:21:35,030 --> 00:21:41,090 +times 1 minus 0.5 divided by N1 plus P2 62 percent + +279 +00:21:41,090 --> 00:21:46,550 +times 1 minus P2 divided by N2. 0.5 minus 62 + +280 +00:21:46,550 --> 00:21:50,650 +percent is negative 12 percent plus or minus the + +281 +00:21:50,650 --> 00:21:53,090 +margin of error. This amount is again as we + +282 +00:21:53,090 --> 00:21:56,730 +mentioned before is the margin of error is 0.177. + +283 +00:21:57,530 --> 00:21:59,830 +Now simple calculation will end with this result + +284 +00:21:59,830 --> 00:22:03,300 +that is The difference between the two proportions + +285 +00:22:03,300 --> 00:22:09,820 +lie between negative 0.296 and 0.057. That means + +286 +00:22:09,820 --> 00:22:14,580 +we are 95% confident that the difference between + +287 +00:22:14,580 --> 00:22:19,100 +the proportions of men who will vote yes for a + +288 +00:22:19,100 --> 00:22:27,640 +position A and men equals negative 0.297 up to 0 + +289 +00:22:27,640 --> 00:22:31,680 +.057. Now the question is since we are testing + +290 +00:22:31,680 --> 00:22:37,380 +it's zero by one minus by two equals zero the + +291 +00:22:37,380 --> 00:22:41,700 +question is does this interval contain zero or + +292 +00:22:41,700 --> 00:22:47,680 +capture zero? Now since we start here from + +293 +00:22:47,680 --> 00:22:51,230 +negative and end with positive, I mean the lower + +294 +00:22:51,230 --> 00:22:55,330 +bound is negative 0.297 and the upper bound is 0 + +295 +00:22:55,330 --> 00:23:00,610 +.057. So zero inside the interval, I mean the + +296 +00:23:00,610 --> 00:23:03,870 +confidence interval contains zero in this case, so + +297 +00:23:03,870 --> 00:23:06,650 +we don't reject the null hypothesis because maybe + +298 +00:23:06,650 --> 00:23:11,780 +the difference equals zero. So since this interval + +299 +00:23:11,780 --> 00:23:16,300 +does contain the hypothesis difference zero, so we + +300 +00:23:16,300 --> 00:23:21,100 +don't reject null hypothesis at 5% level. So the + +301 +00:23:21,100 --> 00:23:24,880 +same conclusion as we got before by using critical + +302 +00:23:24,880 --> 00:23:27,460 +value approach and de-value approach. So either + +303 +00:23:27,460 --> 00:23:32,100 +one will end with the same decision. Either reject + +304 +00:23:32,100 --> 00:23:37,020 +or fail to reject, it depends on the test itself. + +305 +00:23:38,760 --> 00:23:43,820 +That's all. Do you have any question? Any + +306 +00:23:43,820 --> 00:23:47,540 +question? So again, there are three different + +307 +00:23:47,540 --> 00:23:51,600 +approaches in order to solve this problem. One is + +308 +00:23:51,600 --> 00:23:55,680 +critical value approach, the standard one. The + +309 +00:23:55,680 --> 00:23:58,900 +other two are the value approach and confidence + +310 +00:23:58,900 --> 00:24:02,140 +interval. One more time, confidence interval is + +311 +00:24:02,140 --> 00:24:07,080 +only valid for + +312 +00:24:08,770 --> 00:24:13,110 +two-tailed test. Because the confidence interval + +313 +00:24:13,110 --> 00:24:16,430 +we have is just for two-tailed test, so it could + +314 +00:24:16,430 --> 00:24:20,210 +be used only for testing about two-tailed test. + +315 +00:24:23,350 --> 00:24:25,990 +As we mentioned before, I'm going to skip + +316 +00:24:25,990 --> 00:24:32,390 +hypothesis for variances as well as ANOVA test. So + +317 +00:24:32,390 --> 00:24:36,410 +that's all for chapter ten. + +318 +00:24:37,670 --> 00:24:42,390 +But now I'm going to do some of the practice + +319 +00:24:42,390 --> 00:24:43,730 +problems. + +320 +00:24:46,750 --> 00:24:52,630 +Chapter 10. To practice, let's start with some + +321 +00:24:52,630 --> 00:24:55,270 +practice problems for Chapter 10. + +322 +00:24:59,270 --> 00:25:03,770 +A few years ago, Pepsi invited consumers to take + +323 +00:25:03,770 --> 00:25:08,870 +the Pepsi challenge. Consumers were asked to + +324 +00:25:08,870 --> 00:25:13,790 +decide which of two sodas, Coke or Pepsi. They + +325 +00:25:13,790 --> 00:25:17,930 +preferred an applied taste test. Pepsi was + +326 +00:25:17,930 --> 00:25:21,930 +interested in determining what factors played a + +327 +00:25:21,930 --> 00:25:25,930 +role in people's taste preferences. One of the + +328 +00:25:25,930 --> 00:25:28,630 +factors studied was the gender of the consumer. + +329 +00:25:29,650 --> 00:25:32,350 +Below are the results of the analysis comparing + +330 +00:25:32,350 --> 00:25:36,870 +the taste preferences of men and women with the + +331 +00:25:36,870 --> 00:25:41,630 +proportions depicting preference in or for Pepsi. + +332 +00:25:42,810 --> 00:25:49,190 +For meals, size + +333 +00:25:49,190 --> 00:25:57,990 +of 109. So that's your N1. And proportion. + +334 +00:26:00,480 --> 00:26:09,100 +for males is around 4.2. For females, + +335 +00:26:11,640 --> 00:26:25,720 +N2 equals 52, and proportion of females, 25%. The + +336 +00:26:25,720 --> 00:26:29,870 +difference between proportions of men and women or + +337 +00:26:29,870 --> 00:26:35,590 +males and females is 0.172, around 0.172. And this + +338 +00:26:35,590 --> 00:26:41,530 +statistic is given by 2.118, so approximately 2 + +339 +00:26:41,530 --> 00:26:47,170 +.12. Now, based on this result, based on this + +340 +00:26:47,170 --> 00:26:49,090 +information, question number one, + +341 +00:26:53,910 --> 00:26:58,690 +To determine if a difference exists in the test + +342 +00:26:58,690 --> 00:27:04,490 +preferences of men and women, give the correct + +343 +00:27:04,490 --> 00:27:06,970 +alternative hypothesis that lives through a test. + +344 +00:27:08,830 --> 00:27:15,830 +A. B. Why B? Because the test defines between the + +345 +00:27:15,830 --> 00:27:18,650 +new form A and the new form B. Because if we say + +346 +00:27:18,650 --> 00:27:21,910 +that H1 is equal to U1 minus M equals F, + +347 +00:27:28,970 --> 00:27:34,190 +So the correct answer is B? B. So that's + +348 +00:27:34,190 --> 00:27:40,830 +incorrect. C. Why? Why C is the correct answer? + +349 +00:27:45,470 --> 00:27:46,070 +Because + +350 +00:27:52,720 --> 00:27:56,500 +Y is not equal because we have difference. So + +351 +00:27:56,500 --> 00:27:59,380 +since we have difference here, it should be not + +352 +00:27:59,380 --> 00:28:02,240 +equal to. And since we are talking about + +353 +00:28:02,240 --> 00:28:06,120 +proportions, so you have to ignore A and B. So A + +354 +00:28:06,120 --> 00:28:10,020 +and B should be ignored first. Then you either + +355 +00:28:10,020 --> 00:28:15,220 +choose C or D. C is the correct answer. So C is + +356 +00:28:15,220 --> 00:28:20,440 +the correct answer. That's for number one. Part + +357 +00:28:20,440 --> 00:28:27,100 +two. Now suppose Pepsi wanted to test to determine + +358 +00:28:27,100 --> 00:28:35,680 +if males preferred Pepsi more than females. Using + +359 +00:28:35,680 --> 00:28:38,400 +the test statistic given, compute the appropriate + +360 +00:28:38,400 --> 00:28:43,940 +p-value for the test. Let's assume that pi 1 is + +361 +00:28:43,940 --> 00:28:48,640 +the population proportion for males who preferred + +362 +00:28:48,640 --> 00:28:56,440 +Pepsi, and pi 2 for females who prefer Pepsi. Now + +363 +00:28:56,440 --> 00:29:00,140 +he asks about suppose the company wanted to test + +364 +00:29:00,140 --> 00:29:02,760 +to determine if males prefer Pepsi more than + +365 +00:29:02,760 --> 00:29:08,080 +females. Using again the statistic given which is + +366 +00:29:08,080 --> 00:29:13,400 +2.12 for example, compute appropriately value. Now + +367 +00:29:13,400 --> 00:29:18,160 +let's state first H0 and H8. + +368 +00:29:27,450 --> 00:29:31,970 +H1 pi 1 + +369 +00:29:31,970 --> 00:29:34,410 +minus pi 2 is greater than zero. + +370 +00:29:37,980 --> 00:29:42,740 +Because it says that males prefer Pepsi more than + +371 +00:29:42,740 --> 00:29:46,940 +females. Bi-1 for males, Bi-2 for females. So I + +372 +00:29:46,940 --> 00:29:50,800 +should have Bi-1 greater than Bi-2 or Bi-1 minus + +373 +00:29:50,800 --> 00:29:54,940 +Bi-2 is positive. So it's upper case. Now, in this + +374 +00:29:54,940 --> 00:30:01,940 +case, my B value, its probability is B. + +375 +00:30:05,680 --> 00:30:07,320 +It's around this value. + +376 +00:30:12,410 --> 00:30:18,230 +1 minus b of z is smaller than 2.12. So 1 minus, + +377 +00:30:18,350 --> 00:30:21,530 +now by using the table or the z table we have. + +378 +00:30:25,510 --> 00:30:29,370 +Since we are talking about 2, 1, 12, 2, 1, 2, I'm + +379 +00:30:29,370 --> 00:30:34,670 +sorry, 2, 1, 2, 2, 1, 2, so the answer is 983. So + +380 +00:30:34,670 --> 00:30:40,590 +1 minus 893, so the answer is 017. So my b value. + +381 +00:30:43,430 --> 00:30:49,890 +equals 0 and 7. So A is the correct answer. Now if + +382 +00:30:49,890 --> 00:30:53,970 +the problem is two-tailed test, it should be + +383 +00:30:53,970 --> 00:30:57,450 +multiplied by 2. So the answer, the correct should + +384 +00:30:57,450 --> 00:31:02,230 +be B. So you have A and B. If it is one-third, + +385 +00:31:02,390 --> 00:31:06,310 +your correct answer is A. If it is two-thirds, I + +386 +00:31:06,310 --> 00:31:10,550 +mean if we are testing to determine if a + +387 +00:31:10,550 --> 00:31:13,890 +difference exists, then you have to multiply this + +388 +00:31:13,890 --> 00:31:19,030 +one by two. So that's your B value. Any question? + +389 +00:31:23,010 --> 00:31:27,550 +Number three. Suppose Babs wanted to test to + +390 +00:31:27,550 --> 00:31:33,230 +determine if meals If males prefer Pepsi less than + +391 +00:31:33,230 --> 00:31:36,810 +females, using the statistic given, compute the + +392 +00:31:36,810 --> 00:31:42,990 +product B value. Now, H1 in this case, B1 is + +393 +00:31:42,990 --> 00:31:48,490 +smaller than Z by 2, by 1 smaller than 1. Now your + +394 +00:31:48,490 --> 00:31:54,490 +B value, Z is smaller than, because here it is + +395 +00:31:54,490 --> 00:31:58,050 +smaller than my statistic 2.12. + +396 +00:32:01,570 --> 00:32:04,790 +We don't write negative sign. Because the value of + +397 +00:32:04,790 --> 00:32:08,150 +the statistic is 2.12. But here we are going to + +398 +00:32:08,150 --> 00:32:11,790 +test lower tail test. So my B value is B of Z + +399 +00:32:11,790 --> 00:32:15,250 +smaller than. So smaller comes from the + +400 +00:32:15,250 --> 00:32:17,730 +alternator. This is the sign under the alternator. + +401 +00:32:18,910 --> 00:32:21,810 +And you have to take the value of the Z statistic + +402 +00:32:21,810 --> 00:32:22,510 +as it is. + +403 +00:32:25,610 --> 00:32:34,100 +So B of Z is smaller than minus 3. so they need if + +404 +00:32:34,100 --> 00:32:38,060 +you got a correct answer D is the correct if B is + +405 +00:32:38,060 --> 00:32:40,420 +the correct answer you will get nine nine nine six + +406 +00:32:40,420 --> 00:32:47,620 +six that's incorrect answer any question the + +407 +00:32:47,620 --> 00:32:53,920 +correct is D number + +408 +00:32:53,920 --> 00:32:57,620 +four suppose + +409 +00:32:57,620 --> 00:33:03,650 +that Now for example, forget the information we + +410 +00:33:03,650 --> 00:33:07,390 +have so far for B value. Suppose that the two + +411 +00:33:07,390 --> 00:33:11,910 +-tailed B value was really 0734. Now suppose my B + +412 +00:33:11,910 --> 00:33:19,010 +value for two-tailed is 0734. That's for two + +413 +00:33:19,010 --> 00:33:20,210 +-tailed. This is my B value. + +414 +00:33:23,070 --> 00:33:28,490 +This is my B value. It's 0, 7, 3, 4. Now we have + +415 +00:33:28,490 --> 00:33:33,650 +four answers. Part A, B, C, and D. Which one is + +416 +00:33:33,650 --> 00:33:34,050 +the correct? + +417 +00:33:42,030 --> 00:33:46,610 +A says at 5% level, there is sufficient evidence + +418 +00:33:46,610 --> 00:33:51,510 +to conclude the proportion of males Preferring + +419 +00:33:51,510 --> 00:33:53,930 +Pepsi differs from the proportion of females + +420 +00:33:53,930 --> 00:33:58,970 +preferring Pepsi. Which one is the correct answer? + +421 +00:34:02,290 --> 00:34:04,550 +B value is 0.734. + +422 +00:34:10,370 --> 00:34:16,650 +B it says at alpha equals 10 percent. There is + +423 +00:34:16,650 --> 00:34:20,320 +sufficient evidence. to indicate the proportion of + +424 +00:34:20,320 --> 00:34:22,900 +males preferring Pepsi differs from the proportion + +425 +00:34:22,900 --> 00:34:24,160 +of females preferring Pepsi. + +426 +00:34:27,240 --> 00:34:30,720 +C. At 5%, there is sufficient evidence to indicate + +427 +00:34:30,720 --> 00:34:33,260 +the proportion of males preferring Pepsi equals + +428 +00:34:33,260 --> 00:34:38,860 +the proportion of females preferring Pepsi. D. At + +429 +00:34:38,860 --> 00:34:42,480 +8% level, there is insufficient evidence to + +430 +00:34:42,480 --> 00:34:45,860 +include to indicate the proportion of males + +431 +00:34:45,860 --> 00:34:48,580 +preferring babies differs from the proportion of + +432 +00:34:48,580 --> 00:34:49,720 +females preferring babies. + +433 +00:34:54,300 --> 00:34:59,360 +Again, suppose that here it's two-tailed test. It + +434 +00:34:59,360 --> 00:35:03,420 +says two-tailed test. Two-tailed means Y1 does not + +435 +00:35:03,420 --> 00:35:09,540 +equal Y2. So in this case, we are testing Y1 + +436 +00:35:09,540 --> 00:35:16,190 +equals Y2. against by one is not by two and your B + +437 +00:35:16,190 --> 00:35:19,090 +value is zero seven three four. So which one is + +438 +00:35:19,090 --> 00:35:27,950 +the correct answer? B? D. Let's look at D. Let's + +439 +00:35:27,950 --> 00:35:29,270 +look at D. + +440 +00:35:34,610 --> 00:35:39,900 +Since B value is smaller than alpha, Since it + +441 +00:35:39,900 --> 00:35:44,840 +means we reject Insufficient means we don't reject + +442 +00:35:44,840 --> 00:35:50,380 +So D is incorrect D + +443 +00:35:50,380 --> 00:35:52,440 +is incorrect because here there is insufficient + +444 +00:35:52,440 --> 00:35:55,720 +Since + +445 +00:35:55,720 --> 00:35:58,800 +we if we reject it means that we have sufficient + +446 +00:35:58,800 --> 00:36:02,700 +evidence so support The alternative so D is + +447 +00:36:02,700 --> 00:36:07,470 +incorrect Now what's about C at five percent Five, + +448 +00:36:07,830 --> 00:36:10,570 +so this value is greater than five, so we don't + +449 +00:36:10,570 --> 00:36:13,270 +reject. So that's incorrect. + +450 +00:36:21,370 --> 00:36:28,030 +B. At five, at 10% now, there is sufficient + +451 +00:36:28,030 --> 00:36:34,550 +evidence. Sufficient means we reject. We reject. + +452 +00:36:35,220 --> 00:36:40,440 +Since this B value, 0.7, is smaller than alpha. 7% + +453 +00:36:40,440 --> 00:36:44,240 +is smaller than 10%. So we reject. That means you + +454 +00:36:44,240 --> 00:36:46,960 +have to read carefully. There is sufficient + +455 +00:36:46,960 --> 00:36:50,280 +evidence to include, to indicate the proportion of + +456 +00:36:50,280 --> 00:36:54,820 +males preferring Pepsi differs from the proportion + +457 +00:36:54,820 --> 00:36:58,660 +of females. That's correct. So B is the correct + +458 +00:36:58,660 --> 00:37:05,570 +state. Now look at A. A, at 5% there is sufficient + +459 +00:37:05,570 --> 00:37:09,710 +evidence? No, because this value is greater than + +460 +00:37:09,710 --> 00:37:16,970 +alpha, so we don't reject. For this one. Here we + +461 +00:37:16,970 --> 00:37:21,050 +reject because at 10% we reject. So B is the + +462 +00:37:21,050 --> 00:37:27,670 +correct answer. Make sense? Yeah, exactly, for + +463 +00:37:27,670 --> 00:37:31,850 +10%. If this value is 5%, then B is incorrect. + +464 +00:37:34,190 --> 00:37:38,690 +Again, if we change this one to be 5%, still this + +465 +00:37:38,690 --> 00:37:39,870 +statement is false. + +466 +00:37:43,050 --> 00:37:48,670 +It should be smaller than alpha in order to reject + +467 +00:37:48,670 --> 00:37:53,770 +the null hypothesis. So, B is the correct + +468 +00:37:53,770 --> 00:37:56,350 +statement. + +469 +00:37:58,180 --> 00:38:02,080 +Always insufficient means you don't reject null + +470 +00:38:02,080 --> 00:38:06,000 +hypothesis. Now for D, we reject null hypothesis + +471 +00:38:06,000 --> 00:38:10,500 +at 8%. Since this value 0.7 is smaller than alpha, + +472 +00:38:10,740 --> 00:38:14,700 +so we reject. So this is incorrect. Now for C, be + +473 +00:38:14,700 --> 00:38:19,440 +careful. At 5%, if this, if we change this one + +474 +00:38:19,440 --> 00:38:23,560 +little bit, there is insufficient evidence. What + +475 +00:38:23,560 --> 00:38:32,320 +do you think? About C. If we change part C as at 5 + +476 +00:38:32,320 --> 00:38:36,540 +% there is insufficient evidence to indicate the + +477 +00:38:36,540 --> 00:38:39,840 +proportion of males preferring Pepsi equals. + +478 +00:38:44,600 --> 00:38:49,940 +You cannot say equal because this one maybe yes + +479 +00:38:49,940 --> 00:38:53,200 +maybe no you don't know the exact answer. So if we + +480 +00:38:53,200 --> 00:38:56,380 +don't reject the null hypothesis then you don't + +481 +00:38:56,380 --> 00:38:58,780 +have sufficient evidence in order to support each + +482 +00:38:58,780 --> 00:39:03,800 +one. So, don't reject the zero as we mentioned + +483 +00:39:03,800 --> 00:39:10,660 +before. Don't reject the zero does not imply + +484 +00:39:10,660 --> 00:39:16,840 +if zero is true. It means the evidence, the data + +485 +00:39:16,840 --> 00:39:19,500 +you have is not sufficient to support the + +486 +00:39:19,500 --> 00:39:25,260 +alternative evidence. So, don't say equal to. So + +487 +00:39:25,260 --> 00:39:30,560 +say don't reject rather than saying accept. So V + +488 +00:39:30,560 --> 00:39:31,460 +is the correct answer. + +489 +00:39:35,940 --> 00:39:43,020 +Six, seven, and eight. Construct 90% confidence + +490 +00:39:43,020 --> 00:39:48,380 +interval, construct 95, construct 99. It's + +491 +00:39:48,380 --> 00:39:52,700 +similar, just the critical value will be changed. + +492 +00:39:53,620 --> 00:39:58,380 +Now my question is, which is the widest continence + +493 +00:39:58,380 --> 00:40:03,080 +interval in this case? 99. The last one is the + +494 +00:40:03,080 --> 00:40:08,040 +widest because here 99 is the largest continence + +495 +00:40:08,040 --> 00:40:11,160 +limit. So that means the width of the interval is + +496 +00:40:11,160 --> 00:40:12,620 +the largest in this case. + +497 +00:40:17,960 --> 00:40:23,770 +For 5, 6 and 7. The question is construct either + +498 +00:40:23,770 --> 00:40:30,930 +90%, 95% or 99% for the same question. Simple + +499 +00:40:30,930 --> 00:40:33,510 +calculation will give the confidence interval for + +500 +00:40:33,510 --> 00:40:38,590 +each one. My question was, which one is the widest + +501 +00:40:38,590 --> 00:40:43,630 +confidence interval? Based on the C level, 99% + +502 +00:40:43,630 --> 00:40:47,350 +gives the widest confidence interval comparing to + +503 +00:40:47,350 --> 00:41:02,100 +90% and 95%. The exact answers for 5, 6 and 7, 0.5 + +504 +00:41:02,100 --> 00:41:08,900 +to 30 percent. For 95 percent, 0.2 to 32 percent. + +505 +00:41:10,750 --> 00:41:16,030 +For 99, negative 0.3 to 0.37. So this is the + +506 +00:41:16,030 --> 00:41:21,970 +widest. Because here we start from 5 to 30. Here + +507 +00:41:21,970 --> 00:41:26,030 +we start from lower than 5, 2%, up to upper, for + +508 +00:41:26,030 --> 00:41:31,190 +greater than 30, 32. Here we start from negative 3 + +509 +00:41:31,190 --> 00:41:35,330 +% up to 37. So this is the widest confidence + +510 +00:41:35,330 --> 00:41:41,950 +interval. Number six. Number six. number six five + +511 +00:41:41,950 --> 00:41:44,850 +six and seven are the same except we just share + +512 +00:41:44,850 --> 00:41:49,710 +the confidence level z so here we have one nine + +513 +00:41:49,710 --> 00:41:54,070 +six instead of one six four and two point five + +514 +00:41:54,070 --> 00:42:01,170 +seven it's our seven six next read the table e + +515 +00:42:12,610 --> 00:42:19,330 +Table A. Corporation randomly selects 150 + +516 +00:42:19,330 --> 00:42:25,830 +salespeople and finds that 66% who have never + +517 +00:42:25,830 --> 00:42:29,070 +taken self-improvement course would like such a + +518 +00:42:29,070 --> 00:42:33,830 +course. So currently, or in recent, + +519 +00:42:37,660 --> 00:42:46,940 +It says that out of 150 sales people, find that 66 + +520 +00:42:46,940 --> 00:42:51,000 +% would + +521 +00:42:51,000 --> 00:42:56,720 +like to take such course. The firm did a similar + +522 +00:42:56,720 --> 00:43:01,480 +study 10 years ago. So in the past, they had the + +523 +00:43:01,480 --> 00:43:07,430 +same study in which 60% of a random sample of 160 + +524 +00:43:07,430 --> 00:43:12,430 +salespeople wanted a self-improvement course. So + +525 +00:43:12,430 --> 00:43:13,710 +in the past, + +526 +00:43:16,430 --> 00:43:25,230 +into 160, and proportion is 60%. The groups are + +527 +00:43:25,230 --> 00:43:29,690 +assumed to be independent random samples. Let Pi 1 + +528 +00:43:29,690 --> 00:43:32,890 +and Pi 2 represent the true proportion of workers + +529 +00:43:32,890 --> 00:43:36,030 +who would like to attend a self-improvement course + +530 +00:43:36,030 --> 00:43:39,550 +in the recent study and the past study + +531 +00:43:39,550 --> 00:43:44,490 +respectively. So suppose Pi 1 and Pi 2. Pi 1 for + +532 +00:43:44,490 --> 00:43:49,470 +recent study and Pi 2 for the past study. So + +533 +00:43:49,470 --> 00:43:53,590 +that's the question. Now, question number one. + +534 +00:43:56,580 --> 00:44:00,220 +If the firm wanted to test whether this proportion + +535 +00:44:00,220 --> 00:44:06,800 +has changed from the previous study, which + +536 +00:44:06,800 --> 00:44:09,100 +represents the relevant hypothesis? + +537 +00:44:14,160 --> 00:44:18,540 +Again, the firm wanted to test whether this + +538 +00:44:18,540 --> 00:44:21,740 +proportion has changed. From the previous study, + +539 +00:44:22,160 --> 00:44:25,900 +which represents the relevant hypothesis in this + +540 +00:44:25,900 --> 00:44:26,140 +case? + +541 +00:44:33,560 --> 00:44:40,120 +Which is the correct? A is + +542 +00:44:40,120 --> 00:44:44,500 +the correct answer. Why A is the correct answer? + +543 +00:44:45,000 --> 00:44:48,040 +Since we are talking about proportions, so it + +544 +00:44:48,040 --> 00:44:51,750 +should have pi. It changed, it means does not + +545 +00:44:51,750 --> 00:44:55,410 +equal 2. So A is the correct answer. Now B is + +546 +00:44:55,410 --> 00:45:00,850 +incorrect because why B is incorrect? Exactly + +547 +00:45:00,850 --> 00:45:03,770 +because under H0 we have pi 1 minus pi 2 does not + +548 +00:45:03,770 --> 00:45:08,570 +equal 0. Always equal sign appears only under the + +549 +00:45:08,570 --> 00:45:14,950 +null hypothesis. So it's the opposite here. Now C + +550 +00:45:14,950 --> 00:45:21,190 +and D talking about Upper tier or lower tier, but + +551 +00:45:21,190 --> 00:45:23,890 +here we're talking about two-tiered test, so A is + +552 +00:45:23,890 --> 00:45:24,750 +the correct answer. + +553 +00:45:29,490 --> 00:45:33,090 +This sign null hypothesis states incorrectly, + +554 +00:45:34,030 --> 00:45:38,010 +because under H0 should have equal sign, and for + +555 +00:45:38,010 --> 00:45:39,730 +alternate it should be not equal to. + +556 +00:45:42,770 --> 00:45:43,630 +Number two. + +557 +00:45:47,860 --> 00:45:51,840 +If the firm wanted to test whether a greater + +558 +00:45:51,840 --> 00:45:56,680 +proportion of workers would currently like to + +559 +00:45:56,680 --> 00:46:00,180 +attend a self-improvement course than in the past, + +560 +00:46:00,900 --> 00:46:05,840 +currently, the proportion is greater than in the + +561 +00:46:05,840 --> 00:46:13,680 +past. Which represents the relevant hypothesis? C + +562 +00:46:13,680 --> 00:46:18,180 +is the correct answer. Because it says a greater + +563 +00:46:18,180 --> 00:46:22,340 +proportion of workers work currently. So by one, + +564 +00:46:22,420 --> 00:46:26,340 +greater than by two. So C is the correct answer. + +565 +00:46:31,340 --> 00:46:40,140 +It says that the firm wanted to test proportion of + +566 +00:46:40,140 --> 00:46:46,640 +workers currently study + +567 +00:46:46,640 --> 00:46:50,320 +or recent study by one represents the proportion + +568 +00:46:50,320 --> 00:46:55,140 +of workers who would like to attend the course so + +569 +00:46:55,140 --> 00:46:58,080 +that's by one greater than + +570 +00:47:01,730 --> 00:47:05,350 +In the past. So it means by one is greater than by + +571 +00:47:05,350 --> 00:47:11,870 +two. It means by one minus by two is positive. So + +572 +00:47:11,870 --> 00:47:14,590 +the alternative is by one minus two by two is + +573 +00:47:14,590 --> 00:47:16,430 +positive. So this one is the correct answer. + +574 +00:47:21,530 --> 00:47:26,910 +Exactly. If if here we have what in the past + +575 +00:47:26,910 --> 00:47:30,430 +should be it should be the correct answer. + +576 +00:47:34,690 --> 00:47:40,450 +That's to three. Any question for going to number + +577 +00:47:40,450 --> 00:47:49,590 +three? Any question for number two? Three. What is + +578 +00:47:49,590 --> 00:47:52,790 +the unbiased point estimate for the difference + +579 +00:47:52,790 --> 00:47:54,410 +between the two population proportions? + +580 +00:47:58,960 --> 00:48:04,360 +B1 minus B2 which is straight forward calculation + +581 +00:48:04,360 --> 00:48:06,980 +gives A the correct answer. Because the point + +582 +00:48:06,980 --> 00:48:13,320 +estimate in this case is B1 minus B2. B1 is 66 + +583 +00:48:13,320 --> 00:48:18,560 +percent, B2 is 60 percent, so the answer is 6 + +584 +00:48:18,560 --> 00:48:26,190 +percent. So B1 minus B2 which is 6 percent. I + +585 +00:48:26,190 --> 00:48:32,450 +think three is straightforward. Number four, what + +586 +00:48:32,450 --> 00:48:38,450 +is or are the critical values which, when + +587 +00:48:38,450 --> 00:48:41,870 +performing a z-test on whether population + +588 +00:48:41,870 --> 00:48:46,570 +proportions are different at 5%. Here, yes, we are + +589 +00:48:46,570 --> 00:48:52,250 +talking about two-tailed test, and alpha is 5%. So + +590 +00:48:52,250 --> 00:48:55,550 +my critical values, they are two critical values, + +591 +00:48:55,630 --> 00:48:55,830 +actually. + +592 +00:49:27,080 --> 00:49:31,000 +What is or are the critical values when testing + +593 +00:49:31,000 --> 00:49:34,260 +whether population proportions are different at 10 + +594 +00:49:34,260 --> 00:49:39,240 +%? The same instead here we have 10 instead of 5%. + +595 +00:49:40,920 --> 00:49:45,100 +So A is the correct answer. So just use the table. + +596 +00:49:47,340 --> 00:49:51,440 +Now for the previous one, we have 0 to 5, 0 to 5. + +597 +00:49:51,980 --> 00:49:57,740 +The other one, alpha is 10%. So 0, 5 to the right, + +598 +00:49:57,880 --> 00:50:03,580 +the same as to the left. So plus or minus 164. + +599 +00:50:06,700 --> 00:50:11,580 +So 4 and 5 by using the z table. + +600 +00:50:20,560 --> 00:50:25,280 +So exactly, since alpha here is 1, 0, 2, 5, so the + +601 +00:50:25,280 --> 00:50:27,880 +area becomes smaller than, so it should be z + +602 +00:50:27,880 --> 00:50:32,380 +greater than. So 1.106, the other one 1.645, + +603 +00:50:32,800 --> 00:50:38,030 +number 6. What is or are? The critical value in + +604 +00:50:38,030 --> 00:50:42,450 +testing whether the current population is higher + +605 +00:50:42,450 --> 00:50:50,990 +than. Higher means above. Above 10. Above 10, 5%. + +606 +00:50:50,990 --> 00:50:55,870 +So which? B. + +607 +00:50:58,470 --> 00:51:00,810 +B is the correct. Z alpha. + +608 +00:51:06,700 --> 00:51:08,440 +So, B is the correct answer. + +609 +00:51:11,200 --> 00:51:11,840 +7. + +610 +00:51:14,740 --> 00:51:21,320 +7 and 8 we should have to calculate number 1. 7 + +611 +00:51:21,320 --> 00:51:25,880 +was the estimated standard error of the difference + +612 +00:51:25,880 --> 00:51:29,660 +between the two sample proportions. We should have + +613 +00:51:29,660 --> 00:51:30,740 +a standard error. + +614 +00:51:34,620 --> 00:51:40,320 +Square root, B dash 1 minus B dash multiplied by 1 + +615 +00:51:40,320 --> 00:51:45,300 +over N1 plus 1 over N2. And we have to find B dash + +616 +00:51:45,300 --> 00:51:49,220 +here. Let's see how can we find B dash. + +617 +00:51:52,720 --> 00:51:59,700 +B dash + +618 +00:51:59,700 --> 00:52:05,800 +equal x1 plus x2. Now what's the value of X1? + +619 +00:52:10,400 --> 00:52:16,220 +Exactly. Since B1 is X1 over N1. So that means X1 + +620 +00:52:16,220 --> 00:52:26,600 +is N1 times B1. So N1 is 150 times 60%. So that's + +621 +00:52:26,600 --> 00:52:35,980 +99. And similarly, X2 N2, which is 160, times 60% + +622 +00:52:35,980 --> 00:52:48,420 +gives 96. So your B dash is x1 plus x2 divided by + +623 +00:52:48,420 --> 00:52:55,200 +N1 plus N2, which is 150 plus 310. So complete B + +624 +00:52:55,200 --> 00:52:58,760 +dash versus the bold estimate of overall + +625 +00:52:58,760 --> 00:53:03,570 +proportion So 9 and 9 plus 9 is 6. + +626 +00:53:06,390 --> 00:53:07,730 +That's just B-. + +627 +00:53:13,210 --> 00:53:14,290 +6 to 9. + +628 +00:53:17,150 --> 00:53:23,190 +6 to 9. So this is not your answer. It's just B-. + +629 +00:53:23,770 --> 00:53:29,030 +Now take this value and the square root of 6 to 9. + +630 +00:53:30,060 --> 00:53:36,280 +times 1.629 multiplied by 1 over N1 which is 150 + +631 +00:53:36,280 --> 00:53:44,980 +plus 160. That's your standard error. B dash is + +632 +00:53:44,980 --> 00:53:49,080 +not standard error. B dash is the bold estimate of + +633 +00:53:49,080 --> 00:53:53,740 +overall proportion. Now simple calculation will + +634 +00:53:53,740 --> 00:53:59,740 +give C. So C is the correct answer. + +635 +00:54:07,060 --> 00:54:15,600 +What's the standard error of the difference + +636 +00:54:15,600 --> 00:54:17,600 +between the two proportions given by this + +637 +00:54:17,600 --> 00:54:23,320 +equation? Here first we have to compute P' by + +638 +00:54:23,320 --> 00:54:28,300 +using x1 plus x2 over n1 plus n2. In this example, + +639 +00:54:29,420 --> 00:54:31,280 +the x's are not given, but we have the + +640 +00:54:31,280 --> 00:54:35,010 +proportions. And we know that B1 equals X1 over + +641 +00:54:35,010 --> 00:54:39,590 +N1. So X1 equals N1 times B1. So I got 99. + +642 +00:54:40,290 --> 00:54:45,070 +Similarly, X2 and 2 times B2 is 96. So B dash is + +643 +00:54:45,070 --> 00:54:51,610 +629. So plug this value here, you will get 055. + +644 +00:54:56,170 --> 00:55:00,530 +What's the value that is satisfactory to use in + +645 +00:55:00,530 --> 00:55:02,790 +evaluating the alternative hypothesis? That there + +646 +00:55:02,790 --> 00:55:04,710 +is a difference in the two population proportions. + +647 +00:55:05,350 --> 00:55:12,250 +So we have to compute Z score, Z stat, which is V1 + +648 +00:55:12,250 --> 00:55:16,870 +minus V2, which is 6%, minus 0, divided by this + +649 +00:55:16,870 --> 00:55:24,840 +amount, 0.55. Now, 0.6 over 0.5 around 1. Six over + +650 +00:55:24,840 --> 00:55:30,700 +five, so the answer is one. So that's my Z + +651 +00:55:30,700 --> 00:55:31,000 +statistic. + +652 +00:55:33,920 --> 00:55:34,900 +That's number eight. + +653 +00:55:38,880 --> 00:55:43,100 +So the answer is C is the correct answer. So + +654 +00:55:43,100 --> 00:55:48,140 +straightforward calculations for C and D gives C + +655 +00:55:48,140 --> 00:55:51,260 +correct answer for both seven and eight. + +656 +00:55:54,240 --> 00:55:59,300 +So C is correct for each one. Now 9. + +657 +00:56:08,960 --> 00:56:15,240 +In 9, the company tests to determine at 5% level + +658 +00:56:15,240 --> 00:56:18,680 +of significance whether the population proportion + +659 +00:56:18,680 --> 00:56:22,850 +has changed. from the previous study. As it + +660 +00:56:22,850 --> 00:56:24,910 +changed, it means we are talking about two-tiered + +661 +00:56:24,910 --> 00:56:30,750 +tests. Which of the following is most correct? So + +662 +00:56:30,750 --> 00:56:33,750 +here we are talking about two-tiered tests and + +663 +00:56:33,750 --> 00:56:39,210 +keep in mind your Z statistic is 1.093. And again, + +664 +00:56:39,470 --> 00:56:43,350 +we are talking about two-tiered tests. So my + +665 +00:56:43,350 --> 00:56:44,690 +rejection regions are + +666 +00:56:48,170 --> 00:56:53,150 +Negative 196, critical values I mean. So the + +667 +00:56:53,150 --> 00:56:58,230 +critical regions are 1.96 and above or smaller + +668 +00:56:58,230 --> 00:57:07,550 +than minus 1.96. Now, my z statistic is 1.903. Now + +669 +00:57:07,550 --> 00:57:12,610 +this value falls in the non-rejection region. So + +670 +00:57:12,610 --> 00:57:14,310 +we don't reject the non-hypothesis. + +671 +00:57:16,900 --> 00:57:21,400 +Ignore A and C, so the answer is either B or D. + +672 +00:57:22,260 --> 00:57:26,360 +Now let's read B. Don't reject the null and + +673 +00:57:26,360 --> 00:57:28,820 +conclude that the proportion of employees who are + +674 +00:57:28,820 --> 00:57:31,600 +interested in self-improvement course has not + +675 +00:57:31,600 --> 00:57:32,100 +changed. + +676 +00:57:37,040 --> 00:57:40,060 +That's correct. Because we don't reject the null + +677 +00:57:40,060 --> 00:57:42,900 +hypothesis. It means there is no significant + +678 +00:57:42,900 --> 00:57:45,760 +difference. So it has not changed. Now, D, don't + +679 +00:57:45,760 --> 00:57:47,540 +reject the null hypothesis and conclude the + +680 +00:57:47,540 --> 00:57:49,760 +proportion of Obliques who are interested in a + +681 +00:57:49,760 --> 00:57:52,700 +certain point has increased, which is incorrect. + +682 +00:57:53,640 --> 00:57:57,960 +So B is the correct answer. So again, since my Z + +683 +00:57:57,960 --> 00:58:01,080 +statistic falls in the non-rejection region, we + +684 +00:58:01,080 --> 00:58:04,380 +don't reject the null hypothesis. So either B or D + +685 +00:58:04,380 --> 00:58:07,350 +is the correct answer. But here we are talking + +686 +00:58:07,350 --> 00:58:12,190 +about none or don't reject the null hypothesis. + +687 +00:58:12,470 --> 00:58:14,310 +That means we don't have sufficient evidence + +688 +00:58:14,310 --> 00:58:17,610 +support that there is significant change between + +689 +00:58:17,610 --> 00:58:20,670 +the two proportions. So there is no difference. So + +690 +00:58:20,670 --> 00:58:23,270 +it has not changed. It's the correct one. So you + +691 +00:58:23,270 --> 00:58:29,890 +have to choose B. So B is the most correct answer. + +692 +00:58:30,830 --> 00:58:35,600 +Now, 10, 11, and 12. Talking about constructing + +693 +00:58:35,600 --> 00:58:41,700 +confidence interval 99, 95, and 90%. It's similar. + +694 +00:58:42,620 --> 00:58:46,140 +And as we mentioned before, 99% will give the + +695 +00:58:46,140 --> 00:58:50,940 +widest confidence interval. And the answers for + +696 +00:58:50,940 --> 00:59:04,300 +these are 14, 11, 14, is negative 0.8 to 20%. For + +697 +00:59:04,300 --> 00:59:11,720 +11, 0.5, negative 0.5 to 17. For 90%, negative 0.3 + +698 +00:59:11,720 --> 00:59:15,420 +to 0.15. So this is the widest confidence + +699 +00:59:15,420 --> 00:59:22,220 +interval, which was for 99%. So similar as the + +700 +00:59:22,220 --> 00:59:26,360 +previous one we had discussed. So for 99, always + +701 +00:59:26,360 --> 00:59:32,230 +we get The widest confidence interval. Any + +702 +00:59:32,230 --> 00:59:37,490 +question? That's all. Next time shall start + +703 +00:59:37,490 --> 00:59:41,350 +chapter 12, Chi-square test of independence. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/-VA6U2qwaG0_raw.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/-VA6U2qwaG0_raw.srt new file mode 100644 index 0000000000000000000000000000000000000000..11751689fa5c3e7e1345945aa46cab9e2c2ea1ff --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/-VA6U2qwaG0_raw.srt @@ -0,0 +1,2812 @@ +1 +00:00:09,320 --> 00:00:15,760 +Last time we have discussed hypothesis test for + +2 +00:00:15,760 --> 00:00:19,440 +two population proportions. And we mentioned that + +3 +00:00:19,440 --> 00:00:25,750 +the assumptions are for the first sample. n times + +4 +00:00:25,750 --> 00:00:28,910 +pi should be at least 5, and also n times 1 minus + +5 +00:00:28,910 --> 00:00:33,050 +pi is also at least 5. The same for the second + +6 +00:00:33,050 --> 00:00:37,570 +sample, n 2 times pi 2 is at least 5, as well as n + +7 +00:00:37,570 --> 00:00:42,860 +times 1 minus pi 2 is also at least 5. Also, we + +8 +00:00:42,860 --> 00:00:46,000 +discussed that the point estimate for the + +9 +00:00:46,000 --> 00:00:51,700 +difference of Pi 1 minus Pi 2 is given by P1 minus + +10 +00:00:51,700 --> 00:00:57,160 +P2. That means this difference is unbiased point + +11 +00:00:57,160 --> 00:01:03,160 +estimate of Pi 1 minus Pi 2. Similarly, P2 minus + +12 +00:01:03,160 --> 00:01:06,700 +P1 is the point estimate of the difference Pi 2 + +13 +00:01:06,700 --> 00:01:08,160 +minus Pi 1. + +14 +00:01:11,260 --> 00:01:16,140 +We also discussed that the bold estimate for the + +15 +00:01:16,140 --> 00:01:20,900 +overall proportion is given by this equation. So B + +16 +00:01:20,900 --> 00:01:25,980 +dash is called the bold estimate for the overall + +17 +00:01:25,980 --> 00:01:31,740 +proportion. X1 and X2 are the number of items of + +18 +00:01:31,740 --> 00:01:35,170 +interest. And the two samples that you have in one + +19 +00:01:35,170 --> 00:01:39,150 +and two, where in one and two are the sample sizes + +20 +00:01:39,150 --> 00:01:42,110 +for the first and the second sample respectively. + +21 +00:01:43,470 --> 00:01:46,830 +The appropriate statistic in this course is given + +22 +00:01:46,830 --> 00:01:52,160 +by this equation. Z-score or Z-statistic is the + +23 +00:01:52,160 --> 00:01:56,340 +point estimate of the difference pi 1 minus pi 2 + +24 +00:01:56,340 --> 00:02:00,620 +minus the hypothesized value under if 0, I mean if + +25 +00:02:00,620 --> 00:02:05,200 +0 is true, most of the time this term equals 0, + +26 +00:02:05,320 --> 00:02:10,480 +divided by this quantity is called the standard + +27 +00:02:10,480 --> 00:02:14,100 +error of the estimate, which is square root of B + +28 +00:02:14,100 --> 00:02:17,660 +dash 1 minus B dash times 1 over N1 plus 1 over + +29 +00:02:17,660 --> 00:02:22,160 +N2. So this is your Z statistic. The critical + +30 +00:02:22,160 --> 00:02:27,980 +regions. I'm sorry, first, the appropriate null + +31 +00:02:27,980 --> 00:02:32,200 +and alternative hypothesis are given by three + +32 +00:02:32,200 --> 00:02:38,280 +cases we have. Either two-tailed test or one + +33 +00:02:38,280 --> 00:02:42,540 +-tailed and it has either upper or lower tail. So + +34 +00:02:42,540 --> 00:02:46,140 +for example, for lower-tailed test, We are going + +35 +00:02:46,140 --> 00:02:51,500 +to test to see if a proportion 1 is smaller than a + +36 +00:02:51,500 --> 00:02:54,560 +proportion 2. This one can be written as pi 1 + +37 +00:02:54,560 --> 00:02:59,080 +smaller than pi 2 under H1, or the difference + +38 +00:02:59,080 --> 00:03:01,160 +between these two population proportions is + +39 +00:03:01,160 --> 00:03:04,940 +negative, is smaller than 0. So either you may + +40 +00:03:04,940 --> 00:03:08,660 +write the alternative as pi 1 smaller than pi 2, + +41 +00:03:09,180 --> 00:03:11,860 +or the difference, which is pi 1 minus pi 2 + +42 +00:03:11,860 --> 00:03:15,730 +smaller than 0. For sure, the null hypothesis is + +43 +00:03:15,730 --> 00:03:18,830 +the opposite of the alternative hypothesis. So if + +44 +00:03:18,830 --> 00:03:22,310 +this is one by one smaller than by two, so the + +45 +00:03:22,310 --> 00:03:24,710 +opposite by one is greater than or equal to two. + +46 +00:03:25,090 --> 00:03:27,670 +Similarly, but the opposite side here, we are + +47 +00:03:27,670 --> 00:03:31,530 +talking about the upper tail of probability. So + +48 +00:03:31,530 --> 00:03:33,910 +under the alternative hypothesis, by one is + +49 +00:03:33,910 --> 00:03:37,870 +greater than by two. Or it could be written as by + +50 +00:03:37,870 --> 00:03:40,150 +one minus by two is positive, that means greater + +51 +00:03:40,150 --> 00:03:45,970 +than zero. While for the two-tailed test, for the + +52 +00:03:45,970 --> 00:03:49,310 +alternative hypothesis, we have Y1 does not equal + +53 +00:03:49,310 --> 00:03:51,870 +Y2. In this case, we are saying there is no + +54 +00:03:51,870 --> 00:03:55,950 +difference under H0, and there is a difference. + +55 +00:03:56,920 --> 00:03:59,680 +should be under each one. Difference means either + +56 +00:03:59,680 --> 00:04:03,220 +greater than or smaller than. So we have this not + +57 +00:04:03,220 --> 00:04:06,800 +equal sign. So by one does not equal by two. Or it + +58 +00:04:06,800 --> 00:04:08,980 +could be written as by one minus by two is not + +59 +00:04:08,980 --> 00:04:12,320 +equal to zero. It's the same as the one we have + +60 +00:04:12,320 --> 00:04:15,100 +discussed when we are talking about comparison of + +61 +00:04:15,100 --> 00:04:19,500 +two population means. We just replaced these by's + +62 +00:04:19,500 --> 00:04:24,960 +by mus. Finally, the rejection regions are given + +63 +00:04:24,960 --> 00:04:30,000 +by three different charts here for the lower tail + +64 +00:04:30,000 --> 00:04:35,500 +test. We reject the null hypothesis if the value + +65 +00:04:35,500 --> 00:04:37,500 +of the test statistic fall in the rejection + +66 +00:04:37,500 --> 00:04:40,940 +region, which is in the left side. So that means + +67 +00:04:40,940 --> 00:04:44,040 +we reject zero if this statistic is smaller than + +68 +00:04:44,040 --> 00:04:49,440 +negative zero. That's for lower tail test. On the + +69 +00:04:49,440 --> 00:04:51,620 +other hand, for other tailed tests, your rejection + +70 +00:04:51,620 --> 00:04:54,800 +region is the right side, so you reject the null + +71 +00:04:54,800 --> 00:04:57,160 +hypothesis if this statistic is greater than Z + +72 +00:04:57,160 --> 00:05:01,700 +alpha. In addition, for two-tailed tests, there + +73 +00:05:01,700 --> 00:05:04,300 +are two rejection regions. One is on the right + +74 +00:05:04,300 --> 00:05:07,000 +side, the other on the left side. Here, alpha is + +75 +00:05:07,000 --> 00:05:10,960 +split into two halves, alpha over two to the + +76 +00:05:10,960 --> 00:05:14,060 +right, similarly alpha over two to the left side. + +77 +00:05:14,640 --> 00:05:16,900 +Here, we reject the null hypothesis if your Z + +78 +00:05:16,900 --> 00:05:20,900 +statistic falls in the rejection region here, that + +79 +00:05:20,900 --> 00:05:24,820 +means z is smaller than negative z alpha over 2 or + +80 +00:05:24,820 --> 00:05:30,360 +z is greater than z alpha over 2. Now this one, I + +81 +00:05:30,360 --> 00:05:33,980 +mean the rejection regions are the same for either + +82 +00:05:33,980 --> 00:05:38,540 +one sample t-test or two sample t-test, either for + +83 +00:05:38,540 --> 00:05:41,560 +the population proportion or the population mean. + +84 +00:05:42,180 --> 00:05:46,120 +We have the same rejection regions. Sometimes we + +85 +00:05:46,120 --> 00:05:49,800 +replace z by t. It depends if we are talking about + +86 +00:05:49,800 --> 00:05:54,760 +small samples and sigmas unknown. So that's the + +87 +00:05:54,760 --> 00:05:58,160 +basic concepts about testing or hypothesis testing + +88 +00:05:58,160 --> 00:06:01,200 +for the comparison between two population + +89 +00:06:01,200 --> 00:06:05,140 +proportions. And we stopped at this point. I will + +90 +00:06:05,140 --> 00:06:08,780 +give three examples, three examples for testing + +91 +00:06:08,780 --> 00:06:11,660 +about two population proportions. The first one is + +92 +00:06:11,660 --> 00:06:17,050 +given here. It says that, is there a significant + +93 +00:06:17,050 --> 00:06:20,490 +difference between the proportion of men and the + +94 +00:06:20,490 --> 00:06:24,170 +proportion of women who will vote yes on a + +95 +00:06:24,170 --> 00:06:24,630 +proposition? + +96 +00:06:28,220 --> 00:06:30,480 +In this case, we are talking about a proportion. + +97 +00:06:30,840 --> 00:06:34,520 +So this problem tests for a proportion. We have + +98 +00:06:34,520 --> 00:06:38,980 +two proportions here because we have two samples + +99 +00:06:38,980 --> 00:06:43,800 +for two population spheres, men and women. So + +100 +00:06:43,800 --> 00:06:46,600 +there are two populations. So we are talking about + +101 +00:06:46,600 --> 00:06:50,620 +two population proportions. Now, we have to state + +102 +00:06:50,620 --> 00:06:53,440 +carefully now an alternative hypothesis. So for + +103 +00:06:53,440 --> 00:06:57,640 +example, let's say that phi 1 is the population + +104 +00:06:57,640 --> 00:07:07,140 +proportion, proportion of men who will vote for a + +105 +00:07:07,140 --> 00:07:11,740 +proposition A for example, for vote yes, for vote + +106 +00:07:11,740 --> 00:07:13,300 +yes for proposition A. + +107 +00:07:30,860 --> 00:07:36,460 +is the same but of men, of women, I'm sorry. So + +108 +00:07:36,460 --> 00:07:42,160 +the first one for men and the other of + +109 +00:07:42,160 --> 00:07:48,400 +women. Now, in a random, so in this case, we are + +110 +00:07:48,400 --> 00:07:51,020 +talking about difference between two population + +111 +00:07:51,020 --> 00:07:52,940 +proportions, so by one equals by two. + +112 +00:07:56,920 --> 00:08:00,820 +Your alternate hypothesis should be, since the + +113 +00:08:00,820 --> 00:08:03,220 +problem talks about, is there a significant + +114 +00:08:03,220 --> 00:08:07,140 +difference? Difference means two tails. So it + +115 +00:08:07,140 --> 00:08:12,740 +should be pi 1 does not equal pi 2. Pi 1 does not + +116 +00:08:12,740 --> 00:08:17,400 +equal pi 2. So there's still one state null and + +117 +00:08:17,400 --> 00:08:20,680 +alternate hypothesis. Now, in a random sample of + +118 +00:08:20,680 --> 00:08:28,880 +36 out of 72 men, And 31 of 50 women indicated + +119 +00:08:28,880 --> 00:08:33,380 +they would vote yes. So for example, if X1 + +120 +00:08:33,380 --> 00:08:39,000 +represents number of men who would vote yes, that + +121 +00:08:39,000 --> 00:08:45,720 +means X1 equals 36 in + +122 +00:08:45,720 --> 00:08:54,950 +172. So that's for men. Now for women. 31 out of + +123 +00:08:54,950 --> 00:08:59,370 +50. So 50 is the sample size for the second + +124 +00:08:59,370 --> 00:09:05,890 +sample. Now it's ask about this test about the + +125 +00:09:05,890 --> 00:09:08,230 +difference between the two population proportion + +126 +00:09:08,230 --> 00:09:13,890 +at 5% level of significance. So alpha is given to + +127 +00:09:13,890 --> 00:09:19,390 +be 5%. So that's all the information you have in + +128 +00:09:19,390 --> 00:09:23,740 +order to answer this question. So based on this + +129 +00:09:23,740 --> 00:09:27,220 +statement, we state null and alternative + +130 +00:09:27,220 --> 00:09:30,160 +hypothesis. Now based on this information, we can + +131 +00:09:30,160 --> 00:09:32,220 +solve the problem by using three different + +132 +00:09:32,220 --> 00:09:39,220 +approaches. Critical value approach, B value, and + +133 +00:09:39,220 --> 00:09:42,320 +confidence interval approach. Because we can use + +134 +00:09:42,320 --> 00:09:44,220 +confidence interval approach because we are + +135 +00:09:44,220 --> 00:09:47,380 +talking about two-tailed test. So let's start with + +136 +00:09:47,380 --> 00:09:50,240 +the basic one, critical value approach. So + +137 +00:09:50,240 --> 00:09:50,980 +approach A. + +138 +00:10:01,140 --> 00:10:03,400 +Now since we are talking about two-tailed test, + +139 +00:10:04,340 --> 00:10:08,120 +your critical value should be plus or minus z + +140 +00:10:08,120 --> 00:10:12,780 +alpha over 2. And since alpha is 5% so the + +141 +00:10:12,780 --> 00:10:18,420 +critical values are z + +142 +00:10:18,420 --> 00:10:26,650 +plus or minus 0.25 which is 196. Or you may use + +143 +00:10:26,650 --> 00:10:30,050 +the standard normal table in order to find the + +144 +00:10:30,050 --> 00:10:33,330 +critical values. Or just if you remember that + +145 +00:10:33,330 --> 00:10:37,150 +values from previous time. So the critical regions + +146 +00:10:37,150 --> 00:10:47,030 +are above 196 or smaller than negative 196. I have + +147 +00:10:47,030 --> 00:10:51,090 +to compute the Z statistic. Now Z statistic is + +148 +00:10:51,090 --> 00:10:55,290 +given by this equation. Z stat equals B1 minus B2. + +149 +00:10:55,730 --> 00:11:03,010 +minus Pi 1 minus Pi 2. This quantity divided by P + +150 +00:11:03,010 --> 00:11:09,690 +dash 1 minus P dash multiplied by 1 over N1 plus 1 + +151 +00:11:09,690 --> 00:11:17,950 +over N1. Here we have to find B1, B2. So B1 equals + +152 +00:11:17,950 --> 00:11:21,910 +X1 over N1. X1 is given. + +153 +00:11:27,180 --> 00:11:32,160 +to that means 50%. Similarly, + +154 +00:11:32,920 --> 00:11:39,840 +B2 is A equals X2 over into X to the third power + +155 +00:11:39,840 --> 00:11:48,380 +over 50, so that's 60%. Also, we have to compute + +156 +00:11:48,380 --> 00:11:55,500 +the bold estimate of the overall proportion of B + +157 +00:11:55,500 --> 00:11:55,860 +dash + +158 +00:12:01,890 --> 00:12:07,130 +What are the sample sizes we have? X1 and X2. 36 + +159 +00:12:07,130 --> 00:12:14,550 +plus 31. Over 72 plus 7. 72 plus 7. So that means + +160 +00:12:14,550 --> 00:12:22,310 +67 over 152.549. + +161 +00:12:24,690 --> 00:12:25,610 +120. + +162 +00:12:30,400 --> 00:12:34,620 +So simple calculations give B1 and B2, as well as + +163 +00:12:34,620 --> 00:12:39,340 +B dash. Now, plug these values on the Z-state + +164 +00:12:39,340 --> 00:12:43,540 +formula, we get the value that is this. So first, + +165 +00:12:44,600 --> 00:12:47,560 +state null and alternative hypothesis, pi 1 minus + +166 +00:12:47,560 --> 00:12:50,080 +pi 2 equals 0. That means the two populations are + +167 +00:12:50,080 --> 00:12:55,290 +equal. We are going to test this one against Pi 1 + +168 +00:12:55,290 --> 00:12:58,570 +minus Pi 2 is not zero. That means there is a + +169 +00:12:58,570 --> 00:13:02,430 +significant difference between proportions. Now + +170 +00:13:02,430 --> 00:13:06,290 +for men, we got proportion of 50%. That's for the + +171 +00:13:06,290 --> 00:13:09,370 +similar proportion. And similar proportion for + +172 +00:13:09,370 --> 00:13:15,390 +women who will vote yes for position A is 62%. The + +173 +00:13:15,390 --> 00:13:19,530 +pooled estimate for the overall proportion equals + +174 +00:13:19,530 --> 00:13:24,530 +0.549. Now, based on this information, we can + +175 +00:13:24,530 --> 00:13:27,610 +calculate the Z statistic. Straightforward + +176 +00:13:27,610 --> 00:13:33,470 +calculation, you will end with this result. So, Z + +177 +00:13:33,470 --> 00:13:39,350 +start negative 1.31. + +178 +00:13:41,790 --> 00:13:44,950 +So, we have to compute this one before either + +179 +00:13:44,950 --> 00:13:47,650 +before using any of the approaches we have. + +180 +00:13:50,940 --> 00:13:52,960 +If we are going to use their critical value + +181 +00:13:52,960 --> 00:13:55,140 +approach, we have to find Z alpha over 2 which is + +182 +00:13:55,140 --> 00:13:59,320 +1 more than 6. Now the question is, is this value + +183 +00:13:59,320 --> 00:14:05,140 +falling the rejection regions right or left? it's + +184 +00:14:05,140 --> 00:14:10,660 +clear that this value, negative 1.31, lies in the + +185 +00:14:10,660 --> 00:14:12,960 +non-rejection region, so we don't reject a null + +186 +00:14:12,960 --> 00:14:17,900 +hypothesis. So my decision is don't reject H0. My + +187 +00:14:17,900 --> 00:14:22,580 +conclusion is there is not significant evidence of + +188 +00:14:22,580 --> 00:14:25,160 +a difference in proportions who will vote yes + +189 +00:14:25,160 --> 00:14:31,300 +between men and women. Even it seems to me that + +190 +00:14:31,300 --> 00:14:34,550 +there is a difference between Similar proportions, + +191 +00:14:34,790 --> 00:14:38,290 +50% and 62%. Still, this difference is not + +192 +00:14:38,290 --> 00:14:41,670 +significant in order to say that there is + +193 +00:14:41,670 --> 00:14:44,730 +significant difference between the proportions of + +194 +00:14:44,730 --> 00:14:49,390 +men and women. So based on the critical value + +195 +00:14:49,390 --> 00:14:52,860 +approach. We end with this result, which is we + +196 +00:14:52,860 --> 00:14:56,120 +don't reject null hypotheses. That means the + +197 +00:14:56,120 --> 00:15:00,620 +information you have is not sufficient in order to + +198 +00:15:00,620 --> 00:15:05,080 +support alternative hypotheses. So your managerial + +199 +00:15:05,080 --> 00:15:07,020 +conclusion should be there is not significant + +200 +00:15:07,020 --> 00:15:12,500 +difference in proportion and proportions who will + +201 +00:15:12,500 --> 00:15:16,300 +vote yes between men and women. That's for using + +202 +00:15:16,300 --> 00:15:21,350 +critical value approach. Before continue, we have + +203 +00:15:21,350 --> 00:15:24,930 +to discuss the confidence interval for the + +204 +00:15:24,930 --> 00:15:28,890 +difference pi 1 minus pi 2. The confidence + +205 +00:15:28,890 --> 00:15:32,010 +interval, as we mentioned before, can be + +206 +00:15:32,010 --> 00:15:38,110 +constructed by point estimate, plus or minus + +207 +00:15:38,110 --> 00:15:41,590 +critical value times the standard error of the + +208 +00:15:41,590 --> 00:15:47,930 +point estimate. In this case, the point estimate + +209 +00:15:47,930 --> 00:15:52,950 +for pi 1 minus pi 2 is b1 minus b2. So that's your + +210 +00:15:52,950 --> 00:15:58,490 +point estimate, plus or minus z alpha over 2. Now + +211 +00:15:58,490 --> 00:16:03,550 +from the information from chapter 8, the standard + +212 +00:16:03,550 --> 00:16:07,070 +error of the difference, b1 minus pi 2, is given + +213 +00:16:07,070 --> 00:16:11,350 +by this equation. B1 times 1 minus B1, so B1 and + +214 +00:16:11,350 --> 00:16:14,550 +its complement, divided by the first sample size, + +215 +00:16:14,990 --> 00:16:18,030 +plus the second sample proportion times its + +216 +00:16:18,030 --> 00:16:20,510 +complement divided by the sample size of the + +217 +00:16:20,510 --> 00:16:23,830 +second sample. So that's your confidence interval. + +218 +00:16:24,870 --> 00:16:27,710 +So it looks similar to the one we have discussed + +219 +00:16:27,710 --> 00:16:34,580 +for the mu 1 minus mu 2. And that one we had x1 + +220 +00:16:34,580 --> 00:16:38,240 +bar minus x2 bar plus or minus z or t, it depends + +221 +00:16:38,240 --> 00:16:44,620 +on the sample sizes, times s square b times 1 over + +222 +00:16:44,620 --> 00:16:48,920 +n1 plus 1 over n2. Anyway, the confidence interval + +223 +00:16:48,920 --> 00:16:53,940 +for pi 1 minus pi 2 is given by this equation. Now + +224 +00:16:53,940 --> 00:16:58,250 +let's see how can we use the other two approaches + +225 +00:16:58,250 --> 00:17:01,570 +in order to test if there is significant + +226 +00:17:01,570 --> 00:17:04,230 +difference between the proportions of men and + +227 +00:17:04,230 --> 00:17:07,910 +women. I'm sure you don't have this slide for + +228 +00:17:07,910 --> 00:17:12,730 +computing B value and confidence interval. + +229 +00:17:30,230 --> 00:17:35,050 +Now since we are talking about two-thirds, your B + +230 +00:17:35,050 --> 00:17:37,670 +value should be the probability of Z greater than + +231 +00:17:37,670 --> 00:17:45,430 +1.31 and smaller than negative 1.31. So my B value + +232 +00:17:45,430 --> 00:17:53,330 +in this case equals Z greater than 1.31 plus Z + +233 +00:17:55,430 --> 00:17:59,570 +smaller than negative 1.31. Since we are talking + +234 +00:17:59,570 --> 00:18:03,810 +about two tail tests, so there are two rejection + +235 +00:18:03,810 --> 00:18:08,910 +regions. My Z statistic is 1.31, so it should be + +236 +00:18:08,910 --> 00:18:14,990 +here 1.31 to the right, and negative. Now, what's + +237 +00:18:14,990 --> 00:18:20,150 +the probability that the Z statistic will fall in + +238 +00:18:20,150 --> 00:18:23,330 +the rejection regions, right or left? So we have + +239 +00:18:23,330 --> 00:18:27,650 +to add. B of Z greater than 1.31 and B of Z + +240 +00:18:27,650 --> 00:18:30,750 +smaller than negative. Now the two areas to the + +241 +00:18:30,750 --> 00:18:34,790 +right of 1.31 and to the left of negative 1.31 are + +242 +00:18:34,790 --> 00:18:38,110 +equal because of symmetry. So just compute one and + +243 +00:18:38,110 --> 00:18:43,030 +multiply that by two, you will get the B value. So + +244 +00:18:43,030 --> 00:18:47,110 +two times. Now by using the concept in chapter + +245 +00:18:47,110 --> 00:18:50,550 +six, easily you can compute either this one or the + +246 +00:18:50,550 --> 00:18:53,030 +other one. The other one directly from the + +247 +00:18:53,030 --> 00:18:55,870 +negative z-score table. The other one you should + +248 +00:18:55,870 --> 00:18:58,710 +have the complement 1 minus, because it's smaller + +249 +00:18:58,710 --> 00:19:02,170 +than 1.1. And either way you will get this result. + +250 +00:19:05,110 --> 00:19:11,750 +Now my p-value is around 19%. Always we reject the + +251 +00:19:11,750 --> 00:19:14,930 +null hypothesis. if your B value is smaller than + +252 +00:19:14,930 --> 00:19:20,410 +alpha that always we reject null hypothesis if my + +253 +00:19:20,410 --> 00:19:25,950 +B value is smaller than alpha alpha is given 5% + +254 +00:19:25,950 --> 00:19:31,830 +since B value equals + +255 +00:19:31,830 --> 00:19:36,910 +19% which is much bigger than Much greater than + +256 +00:19:36,910 --> 00:19:41,170 +5%, so we don't reject our analysis. So my + +257 +00:19:41,170 --> 00:19:48,390 +decision is we don't reject at zero. So the same + +258 +00:19:48,390 --> 00:19:52,690 +conclusion as we reached by using critical + +259 +00:19:52,690 --> 00:19:57,850 +penalty. So again, by using B value, we have to + +260 +00:19:57,850 --> 00:20:00,770 +compute the probability that your Z statistic + +261 +00:20:00,770 --> 00:20:05,320 +falls in the rejection regions. I end with this + +262 +00:20:05,320 --> 00:20:10,600 +result, my B value is around 19%. As we mentioned + +263 +00:20:10,600 --> 00:20:14,180 +before, we reject null hypothesis if my B value is + +264 +00:20:14,180 --> 00:20:17,920 +smaller than alpha. Now, my B value in this case + +265 +00:20:17,920 --> 00:20:22,740 +is much, much bigger than 5%, so my decision is + +266 +00:20:22,740 --> 00:20:26,860 +don't reject null hypothesis. Any questions? + +267 +00:20:36,140 --> 00:20:41,160 +The other approach, the third one, confidence + +268 +00:20:41,160 --> 00:20:42,520 +interval approach. + +269 +00:20:46,260 --> 00:20:48,980 +Now, for the confidence interval approach, we have + +270 +00:20:48,980 --> 00:20:53,960 +this equation, b1 minus b2. Again, the point + +271 +00:20:53,960 --> 00:21:03,760 +estimate, plus or minus z square root b1 times 1 + +272 +00:21:03,760 --> 00:21:09,810 +minus b1 divided by a1. B2 times 1 minus B2 + +273 +00:21:09,810 --> 00:21:11,650 +divided by N2. + +274 +00:21:13,850 --> 00:21:20,730 +Now we have B1 and B2, so 0.5 minus 0.62. That's + +275 +00:21:20,730 --> 00:21:25,170 +your calculations from previous information we + +276 +00:21:25,170 --> 00:21:28,470 +have. Plus or minus Z alpha over 2, the critical + +277 +00:21:28,470 --> 00:21:35,030 +value again is 1.96 times Square root of P1.5 + +278 +00:21:35,030 --> 00:21:41,090 +times 1 minus 0.5 divided by N1 plus P2 62 percent + +279 +00:21:41,090 --> 00:21:46,550 +times 1 minus P2 divided by N2. 0.5 minus 62 + +280 +00:21:46,550 --> 00:21:50,650 +percent is negative 12 percent plus or minus the + +281 +00:21:50,650 --> 00:21:53,090 +margin of error. This amount is again as we + +282 +00:21:53,090 --> 00:21:56,730 +mentioned before is the margin of error is 0.177. + +283 +00:21:57,530 --> 00:21:59,830 +Now simple calculation will end with this result + +284 +00:21:59,830 --> 00:22:03,300 +that is The difference between the two proportions + +285 +00:22:03,300 --> 00:22:09,820 +lie between negative 0.296 and 0.057. That means + +286 +00:22:09,820 --> 00:22:14,580 +we are 95% confident that the difference between + +287 +00:22:14,580 --> 00:22:19,100 +the proportions of men who will vote yes for a + +288 +00:22:19,100 --> 00:22:27,640 +position A and men equals negative 0.297 up to 0 + +289 +00:22:27,640 --> 00:22:31,680 +.057. Now the question is since we are testing + +290 +00:22:31,680 --> 00:22:37,380 +it's zero by one minus by two equals zero the + +291 +00:22:37,380 --> 00:22:41,700 +question is does this interval contain zero or + +292 +00:22:41,700 --> 00:22:47,680 +capture zero? Now since we start here from + +293 +00:22:47,680 --> 00:22:51,230 +negative and end with positive, I mean the lower + +294 +00:22:51,230 --> 00:22:55,330 +bound is negative 0.297 and the upper bound is 0 + +295 +00:22:55,330 --> 00:23:00,610 +.057. So zero inside the interval, I mean the + +296 +00:23:00,610 --> 00:23:03,870 +confidence interval contains zero in this case, so + +297 +00:23:03,870 --> 00:23:06,650 +we don't reject the null hypothesis because maybe + +298 +00:23:06,650 --> 00:23:11,780 +the difference equals zero. So since this interval + +299 +00:23:11,780 --> 00:23:16,300 +does contain the hypothesis difference zero, so we + +300 +00:23:16,300 --> 00:23:21,100 +don't reject null hypothesis at 5% level. So the + +301 +00:23:21,100 --> 00:23:24,880 +same conclusion as we got before by using critical + +302 +00:23:24,880 --> 00:23:27,460 +value approach and de-value approach. So either + +303 +00:23:27,460 --> 00:23:32,100 +one will end with the same decision. Either reject + +304 +00:23:32,100 --> 00:23:37,020 +or fail to reject, it depends on the test itself. + +305 +00:23:38,760 --> 00:23:43,820 +That's all. Do you have any question? Any + +306 +00:23:43,820 --> 00:23:47,540 +question? So again, there are three different + +307 +00:23:47,540 --> 00:23:51,600 +approaches in order to solve this problem. One is + +308 +00:23:51,600 --> 00:23:55,680 +critical value approach, the standard one. The + +309 +00:23:55,680 --> 00:23:58,900 +other two are the value approach and confidence + +310 +00:23:58,900 --> 00:24:02,140 +interval. One more time, confidence interval is + +311 +00:24:02,140 --> 00:24:07,080 +only valid for + +312 +00:24:08,770 --> 00:24:13,110 +two-tailed test. Because the confidence interval + +313 +00:24:13,110 --> 00:24:16,430 +we have is just for two-tailed test, so it could + +314 +00:24:16,430 --> 00:24:20,210 +be used only for testing about two-tailed test. + +315 +00:24:23,350 --> 00:24:25,990 +As we mentioned before, I'm going to skip + +316 +00:24:25,990 --> 00:24:32,390 +hypothesis for variances as well as ANOVA test. So + +317 +00:24:32,390 --> 00:24:36,410 +that's all for chapter ten. + +318 +00:24:37,670 --> 00:24:42,390 +But now I'm going to do some of the practice + +319 +00:24:42,390 --> 00:24:43,730 +problems. + +320 +00:24:46,750 --> 00:24:52,630 +Chapter 10. To practice, let's start with some + +321 +00:24:52,630 --> 00:24:55,270 +practice problems for Chapter 10. + +322 +00:24:59,270 --> 00:25:03,770 +A few years ago, Pepsi invited consumers to take + +323 +00:25:03,770 --> 00:25:08,870 +the Pepsi challenge. Consumers were asked to + +324 +00:25:08,870 --> 00:25:13,790 +decide which of two sodas, Coke or Pepsi. They + +325 +00:25:13,790 --> 00:25:17,930 +preferred an applied taste test. Pepsi was + +326 +00:25:17,930 --> 00:25:21,930 +interested in determining what factors played a + +327 +00:25:21,930 --> 00:25:25,930 +role in people's taste preferences. One of the + +328 +00:25:25,930 --> 00:25:28,630 +factors studied was the gender of the consumer. + +329 +00:25:29,650 --> 00:25:32,350 +Below are the results of the analysis comparing + +330 +00:25:32,350 --> 00:25:36,870 +the taste preferences of men and women with the + +331 +00:25:36,870 --> 00:25:41,630 +proportions depicting preference in or for Pepsi. + +332 +00:25:42,810 --> 00:25:49,190 +For meals, size + +333 +00:25:49,190 --> 00:25:57,990 +of 109. So that's your N1. And proportion. + +334 +00:26:00,480 --> 00:26:09,100 +for males is around 4.2. For females, + +335 +00:26:11,640 --> 00:26:25,720 +N2 equals 52, and proportion of females, 25%. The + +336 +00:26:25,720 --> 00:26:29,870 +difference between proportions of men and women or + +337 +00:26:29,870 --> 00:26:35,590 +males and females is 0.172, around 0.172. And this + +338 +00:26:35,590 --> 00:26:41,530 +statistic is given by 2.118, so approximately 2 + +339 +00:26:41,530 --> 00:26:47,170 +.12. Now, based on this result, based on this + +340 +00:26:47,170 --> 00:26:49,090 +information, question number one, + +341 +00:26:53,910 --> 00:26:58,690 +To determine if a difference exists in the test + +342 +00:26:58,690 --> 00:27:04,490 +preferences of men and women, give the correct + +343 +00:27:04,490 --> 00:27:06,970 +alternative hypothesis that lives through a test. + +344 +00:27:08,830 --> 00:27:15,830 +A. B. Why B? Because the test defines between the + +345 +00:27:15,830 --> 00:27:18,650 +new form A and the new form B. Because if we say + +346 +00:27:18,650 --> 00:27:21,910 +that H1 is equal to U1 minus M equals F, + +347 +00:27:28,970 --> 00:27:34,190 +So the correct answer is B? B. So that's + +348 +00:27:34,190 --> 00:27:40,830 +incorrect. C. Why? Why C is the correct answer? + +349 +00:27:45,470 --> 00:27:46,070 +Because + +350 +00:27:52,720 --> 00:27:56,500 +Y is not equal because we have difference. So + +351 +00:27:56,500 --> 00:27:59,380 +since we have difference here, it should be not + +352 +00:27:59,380 --> 00:28:02,240 +equal to. And since we are talking about + +353 +00:28:02,240 --> 00:28:06,120 +proportions, so you have to ignore A and B. So A + +354 +00:28:06,120 --> 00:28:10,020 +and B should be ignored first. Then you either + +355 +00:28:10,020 --> 00:28:15,220 +choose C or D. C is the correct answer. So C is + +356 +00:28:15,220 --> 00:28:20,440 +the correct answer. That's for number one. Part + +357 +00:28:20,440 --> 00:28:27,100 +two. Now suppose Pepsi wanted to test to determine + +358 +00:28:27,100 --> 00:28:35,680 +if males preferred Pepsi more than females. Using + +359 +00:28:35,680 --> 00:28:38,400 +the test statistic given, compute the appropriate + +360 +00:28:38,400 --> 00:28:43,940 +p-value for the test. Let's assume that pi 1 is + +361 +00:28:43,940 --> 00:28:48,640 +the population proportion for males who preferred + +362 +00:28:48,640 --> 00:28:56,440 +Pepsi, and pi 2 for females who prefer Pepsi. Now + +363 +00:28:56,440 --> 00:29:00,140 +he asks about suppose the company wanted to test + +364 +00:29:00,140 --> 00:29:02,760 +to determine if males prefer Pepsi more than + +365 +00:29:02,760 --> 00:29:08,080 +females. Using again the statistic given which is + +366 +00:29:08,080 --> 00:29:13,400 +2.12 for example, compute appropriately value. Now + +367 +00:29:13,400 --> 00:29:18,160 +let's state first H0 and H8. + +368 +00:29:27,450 --> 00:29:31,970 +H1 pi 1 + +369 +00:29:31,970 --> 00:29:34,410 +minus pi 2 is greater than zero. + +370 +00:29:37,980 --> 00:29:42,740 +Because it says that males prefer Pepsi more than + +371 +00:29:42,740 --> 00:29:46,940 +females. Bi-1 for males, Bi-2 for females. So I + +372 +00:29:46,940 --> 00:29:50,800 +should have Bi-1 greater than Bi-2 or Bi-1 minus + +373 +00:29:50,800 --> 00:29:54,940 +Bi-2 is positive. So it's upper case. Now, in this + +374 +00:29:54,940 --> 00:30:01,940 +case, my B value, its probability is B. + +375 +00:30:05,680 --> 00:30:07,320 +It's around this value. + +376 +00:30:12,410 --> 00:30:18,230 +1 minus b of z is smaller than 2.12. So 1 minus, + +377 +00:30:18,350 --> 00:30:21,530 +now by using the table or the z table we have. + +378 +00:30:25,510 --> 00:30:29,370 +Since we are talking about 2, 1, 12, 2, 1, 2, I'm + +379 +00:30:29,370 --> 00:30:34,670 +sorry, 2, 1, 2, 2, 1, 2, so the answer is 983. So + +380 +00:30:34,670 --> 00:30:40,590 +1 minus 893, so the answer is 017. So my b value. + +381 +00:30:43,430 --> 00:30:49,890 +equals 0 and 7. So A is the correct answer. Now if + +382 +00:30:49,890 --> 00:30:53,970 +the problem is two-tailed test, it should be + +383 +00:30:53,970 --> 00:30:57,450 +multiplied by 2. So the answer, the correct should + +384 +00:30:57,450 --> 00:31:02,230 +be B. So you have A and B. If it is one-third, + +385 +00:31:02,390 --> 00:31:06,310 +your correct answer is A. If it is two-thirds, I + +386 +00:31:06,310 --> 00:31:10,550 +mean if we are testing to determine if a + +387 +00:31:10,550 --> 00:31:13,890 +difference exists, then you have to multiply this + +388 +00:31:13,890 --> 00:31:19,030 +one by two. So that's your B value. Any question? + +389 +00:31:23,010 --> 00:31:27,550 +Number three. Suppose Babs wanted to test to + +390 +00:31:27,550 --> 00:31:33,230 +determine if meals If males prefer Pepsi less than + +391 +00:31:33,230 --> 00:31:36,810 +females, using the statistic given, compute the + +392 +00:31:36,810 --> 00:31:42,990 +product B value. Now, H1 in this case, B1 is + +393 +00:31:42,990 --> 00:31:48,490 +smaller than Z by 2, by 1 smaller than 1. Now your + +394 +00:31:48,490 --> 00:31:54,490 +B value, Z is smaller than, because here it is + +395 +00:31:54,490 --> 00:31:58,050 +smaller than my statistic 2.12. + +396 +00:32:01,570 --> 00:32:04,790 +We don't write negative sign. Because the value of + +397 +00:32:04,790 --> 00:32:08,150 +the statistic is 2.12. But here we are going to + +398 +00:32:08,150 --> 00:32:11,790 +test lower tail test. So my B value is B of Z + +399 +00:32:11,790 --> 00:32:15,250 +smaller than. So smaller comes from the + +400 +00:32:15,250 --> 00:32:17,730 +alternator. This is the sign under the alternator. + +401 +00:32:18,910 --> 00:32:21,810 +And you have to take the value of the Z statistic + +402 +00:32:21,810 --> 00:32:22,510 +as it is. + +403 +00:32:25,610 --> 00:32:34,100 +So B of Z is smaller than minus 3. so they need if + +404 +00:32:34,100 --> 00:32:38,060 +you got a correct answer D is the correct if B is + +405 +00:32:38,060 --> 00:32:40,420 +the correct answer you will get nine nine nine six + +406 +00:32:40,420 --> 00:32:47,620 +six that's incorrect answer any question the + +407 +00:32:47,620 --> 00:32:53,920 +correct is D number + +408 +00:32:53,920 --> 00:32:57,620 +four suppose + +409 +00:32:57,620 --> 00:33:03,650 +that Now for example, forget the information we + +410 +00:33:03,650 --> 00:33:07,390 +have so far for B value. Suppose that the two + +411 +00:33:07,390 --> 00:33:11,910 +-tailed B value was really 0734. Now suppose my B + +412 +00:33:11,910 --> 00:33:19,010 +value for two-tailed is 0734. That's for two + +413 +00:33:19,010 --> 00:33:20,210 +-tailed. This is my B value. + +414 +00:33:23,070 --> 00:33:28,490 +This is my B value. It's 0, 7, 3, 4. Now we have + +415 +00:33:28,490 --> 00:33:33,650 +four answers. Part A, B, C, and D. Which one is + +416 +00:33:33,650 --> 00:33:34,050 +the correct? + +417 +00:33:42,030 --> 00:33:46,610 +A says at 5% level, there is sufficient evidence + +418 +00:33:46,610 --> 00:33:51,510 +to conclude the proportion of males Preferring + +419 +00:33:51,510 --> 00:33:53,930 +Pepsi differs from the proportion of females + +420 +00:33:53,930 --> 00:33:58,970 +preferring Pepsi. Which one is the correct answer? + +421 +00:34:02,290 --> 00:34:04,550 +B value is 0.734. + +422 +00:34:10,370 --> 00:34:16,650 +B it says at alpha equals 10 percent. There is + +423 +00:34:16,650 --> 00:34:20,320 +sufficient evidence. to indicate the proportion of + +424 +00:34:20,320 --> 00:34:22,900 +males preferring Pepsi differs from the proportion + +425 +00:34:22,900 --> 00:34:24,160 +of females preferring Pepsi. + +426 +00:34:27,240 --> 00:34:30,720 +C. At 5%, there is sufficient evidence to indicate + +427 +00:34:30,720 --> 00:34:33,260 +the proportion of males preferring Pepsi equals + +428 +00:34:33,260 --> 00:34:38,860 +the proportion of females preferring Pepsi. D. At + +429 +00:34:38,860 --> 00:34:42,480 +8% level, there is insufficient evidence to + +430 +00:34:42,480 --> 00:34:45,860 +include to indicate the proportion of males + +431 +00:34:45,860 --> 00:34:48,580 +preferring babies differs from the proportion of + +432 +00:34:48,580 --> 00:34:49,720 +females preferring babies. + +433 +00:34:54,300 --> 00:34:59,360 +Again, suppose that here it's two-tailed test. It + +434 +00:34:59,360 --> 00:35:03,420 +says two-tailed test. Two-tailed means Y1 does not + +435 +00:35:03,420 --> 00:35:09,540 +equal Y2. So in this case, we are testing Y1 + +436 +00:35:09,540 --> 00:35:16,190 +equals Y2. against by one is not by two and your B + +437 +00:35:16,190 --> 00:35:19,090 +value is zero seven three four. So which one is + +438 +00:35:19,090 --> 00:35:27,950 +the correct answer? B? D. Let's look at D. Let's + +439 +00:35:27,950 --> 00:35:29,270 +look at D. + +440 +00:35:34,610 --> 00:35:39,900 +Since B value is smaller than alpha, Since it + +441 +00:35:39,900 --> 00:35:44,840 +means we reject Insufficient means we don't reject + +442 +00:35:44,840 --> 00:35:50,380 +So D is incorrect D + +443 +00:35:50,380 --> 00:35:52,440 +is incorrect because here there is insufficient + +444 +00:35:52,440 --> 00:35:55,720 +Since + +445 +00:35:55,720 --> 00:35:58,800 +we if we reject it means that we have sufficient + +446 +00:35:58,800 --> 00:36:02,700 +evidence so support The alternative so D is + +447 +00:36:02,700 --> 00:36:07,470 +incorrect Now what's about C at five percent Five, + +448 +00:36:07,830 --> 00:36:10,570 +so this value is greater than five, so we don't + +449 +00:36:10,570 --> 00:36:13,270 +reject. So that's incorrect. + +450 +00:36:21,370 --> 00:36:28,030 +B. At five, at 10% now, there is sufficient + +451 +00:36:28,030 --> 00:36:34,550 +evidence. Sufficient means we reject. We reject. + +452 +00:36:35,220 --> 00:36:40,440 +Since this B value, 0.7, is smaller than alpha. 7% + +453 +00:36:40,440 --> 00:36:44,240 +is smaller than 10%. So we reject. That means you + +454 +00:36:44,240 --> 00:36:46,960 +have to read carefully. There is sufficient + +455 +00:36:46,960 --> 00:36:50,280 +evidence to include, to indicate the proportion of + +456 +00:36:50,280 --> 00:36:54,820 +males preferring Pepsi differs from the proportion + +457 +00:36:54,820 --> 00:36:58,660 +of females. That's correct. So B is the correct + +458 +00:36:58,660 --> 00:37:05,570 +state. Now look at A. A, at 5% there is sufficient + +459 +00:37:05,570 --> 00:37:09,710 +evidence? No, because this value is greater than + +460 +00:37:09,710 --> 00:37:16,970 +alpha, so we don't reject. For this one. Here we + +461 +00:37:16,970 --> 00:37:21,050 +reject because at 10% we reject. So B is the + +462 +00:37:21,050 --> 00:37:27,670 +correct answer. Make sense? Yeah, exactly, for + +463 +00:37:27,670 --> 00:37:31,850 +10%. If this value is 5%, then B is incorrect. + +464 +00:37:34,190 --> 00:37:38,690 +Again, if we change this one to be 5%, still this + +465 +00:37:38,690 --> 00:37:39,870 +statement is false. + +466 +00:37:43,050 --> 00:37:48,670 +It should be smaller than alpha in order to reject + +467 +00:37:48,670 --> 00:37:53,770 +the null hypothesis. So, B is the correct + +468 +00:37:53,770 --> 00:37:56,350 +statement. + +469 +00:37:58,180 --> 00:38:02,080 +Always insufficient means you don't reject null + +470 +00:38:02,080 --> 00:38:06,000 +hypothesis. Now for D, we reject null hypothesis + +471 +00:38:06,000 --> 00:38:10,500 +at 8%. Since this value 0.7 is smaller than alpha, + +472 +00:38:10,740 --> 00:38:14,700 +so we reject. So this is incorrect. Now for C, be + +473 +00:38:14,700 --> 00:38:19,440 +careful. At 5%, if this, if we change this one + +474 +00:38:19,440 --> 00:38:23,560 +little bit, there is insufficient evidence. What + +475 +00:38:23,560 --> 00:38:32,320 +do you think? About C. If we change part C as at 5 + +476 +00:38:32,320 --> 00:38:36,540 +% there is insufficient evidence to indicate the + +477 +00:38:36,540 --> 00:38:39,840 +proportion of males preferring Pepsi equals. + +478 +00:38:44,600 --> 00:38:49,940 +You cannot say equal because this one maybe yes + +479 +00:38:49,940 --> 00:38:53,200 +maybe no you don't know the exact answer. So if we + +480 +00:38:53,200 --> 00:38:56,380 +don't reject the null hypothesis then you don't + +481 +00:38:56,380 --> 00:38:58,780 +have sufficient evidence in order to support each + +482 +00:38:58,780 --> 00:39:03,800 +one. So, don't reject the zero as we mentioned + +483 +00:39:03,800 --> 00:39:10,660 +before. Don't reject the zero does not imply + +484 +00:39:10,660 --> 00:39:16,840 +if zero is true. It means the evidence, the data + +485 +00:39:16,840 --> 00:39:19,500 +you have is not sufficient to support the + +486 +00:39:19,500 --> 00:39:25,260 +alternative evidence. So, don't say equal to. So + +487 +00:39:25,260 --> 00:39:30,560 +say don't reject rather than saying accept. So V + +488 +00:39:30,560 --> 00:39:31,460 +is the correct answer. + +489 +00:39:35,940 --> 00:39:43,020 +Six, seven, and eight. Construct 90% confidence + +490 +00:39:43,020 --> 00:39:48,380 +interval, construct 95, construct 99. It's + +491 +00:39:48,380 --> 00:39:52,700 +similar, just the critical value will be changed. + +492 +00:39:53,620 --> 00:39:58,380 +Now my question is, which is the widest continence + +493 +00:39:58,380 --> 00:40:03,080 +interval in this case? 99. The last one is the + +494 +00:40:03,080 --> 00:40:08,040 +widest because here 99 is the largest continence + +495 +00:40:08,040 --> 00:40:11,160 +limit. So that means the width of the interval is + +496 +00:40:11,160 --> 00:40:12,620 +the largest in this case. + +497 +00:40:17,960 --> 00:40:23,770 +For 5, 6 and 7. The question is construct either + +498 +00:40:23,770 --> 00:40:30,930 +90%, 95% or 99% for the same question. Simple + +499 +00:40:30,930 --> 00:40:33,510 +calculation will give the confidence interval for + +500 +00:40:33,510 --> 00:40:38,590 +each one. My question was, which one is the widest + +501 +00:40:38,590 --> 00:40:43,630 +confidence interval? Based on the C level, 99% + +502 +00:40:43,630 --> 00:40:47,350 +gives the widest confidence interval comparing to + +503 +00:40:47,350 --> 00:41:02,100 +90% and 95%. The exact answers for 5, 6 and 7, 0.5 + +504 +00:41:02,100 --> 00:41:08,900 +to 30 percent. For 95 percent, 0.2 to 32 percent. + +505 +00:41:10,750 --> 00:41:16,030 +For 99, negative 0.3 to 0.37. So this is the + +506 +00:41:16,030 --> 00:41:21,970 +widest. Because here we start from 5 to 30. Here + +507 +00:41:21,970 --> 00:41:26,030 +we start from lower than 5, 2%, up to upper, for + +508 +00:41:26,030 --> 00:41:31,190 +greater than 30, 32. Here we start from negative 3 + +509 +00:41:31,190 --> 00:41:35,330 +% up to 37. So this is the widest confidence + +510 +00:41:35,330 --> 00:41:41,950 +interval. Number six. Number six. number six five + +511 +00:41:41,950 --> 00:41:44,850 +six and seven are the same except we just share + +512 +00:41:44,850 --> 00:41:49,710 +the confidence level z so here we have one nine + +513 +00:41:49,710 --> 00:41:54,070 +six instead of one six four and two point five + +514 +00:41:54,070 --> 00:42:01,170 +seven it's our seven six next read the table e + +515 +00:42:12,610 --> 00:42:19,330 +Table A. Corporation randomly selects 150 + +516 +00:42:19,330 --> 00:42:25,830 +salespeople and finds that 66% who have never + +517 +00:42:25,830 --> 00:42:29,070 +taken self-improvement course would like such a + +518 +00:42:29,070 --> 00:42:33,830 +course. So currently, or in recent, + +519 +00:42:37,660 --> 00:42:46,940 +It says that out of 150 sales people, find that 66 + +520 +00:42:46,940 --> 00:42:51,000 +% would + +521 +00:42:51,000 --> 00:42:56,720 +like to take such course. The firm did a similar + +522 +00:42:56,720 --> 00:43:01,480 +study 10 years ago. So in the past, they had the + +523 +00:43:01,480 --> 00:43:07,430 +same study in which 60% of a random sample of 160 + +524 +00:43:07,430 --> 00:43:12,430 +salespeople wanted a self-improvement course. So + +525 +00:43:12,430 --> 00:43:13,710 +in the past, + +526 +00:43:16,430 --> 00:43:25,230 +into 160, and proportion is 60%. The groups are + +527 +00:43:25,230 --> 00:43:29,690 +assumed to be independent random samples. Let Pi 1 + +528 +00:43:29,690 --> 00:43:32,890 +and Pi 2 represent the true proportion of workers + +529 +00:43:32,890 --> 00:43:36,030 +who would like to attend a self-improvement course + +530 +00:43:36,030 --> 00:43:39,550 +in the recent study and the past study + +531 +00:43:39,550 --> 00:43:44,490 +respectively. So suppose Pi 1 and Pi 2. Pi 1 for + +532 +00:43:44,490 --> 00:43:49,470 +recent study and Pi 2 for the past study. So + +533 +00:43:49,470 --> 00:43:53,590 +that's the question. Now, question number one. + +534 +00:43:56,580 --> 00:44:00,220 +If the firm wanted to test whether this proportion + +535 +00:44:00,220 --> 00:44:06,800 +has changed from the previous study, which + +536 +00:44:06,800 --> 00:44:09,100 +represents the relevant hypothesis? + +537 +00:44:14,160 --> 00:44:18,540 +Again, the firm wanted to test whether this + +538 +00:44:18,540 --> 00:44:21,740 +proportion has changed. From the previous study, + +539 +00:44:22,160 --> 00:44:25,900 +which represents the relevant hypothesis in this + +540 +00:44:25,900 --> 00:44:26,140 +case? + +541 +00:44:33,560 --> 00:44:40,120 +Which is the correct? A is + +542 +00:44:40,120 --> 00:44:44,500 +the correct answer. Why A is the correct answer? + +543 +00:44:45,000 --> 00:44:48,040 +Since we are talking about proportions, so it + +544 +00:44:48,040 --> 00:44:51,750 +should have pi. It changed, it means does not + +545 +00:44:51,750 --> 00:44:55,410 +equal 2. So A is the correct answer. Now B is + +546 +00:44:55,410 --> 00:45:00,850 +incorrect because why B is incorrect? Exactly + +547 +00:45:00,850 --> 00:45:03,770 +because under H0 we have pi 1 minus pi 2 does not + +548 +00:45:03,770 --> 00:45:08,570 +equal 0. Always equal sign appears only under the + +549 +00:45:08,570 --> 00:45:14,950 +null hypothesis. So it's the opposite here. Now C + +550 +00:45:14,950 --> 00:45:21,190 +and D talking about Upper tier or lower tier, but + +551 +00:45:21,190 --> 00:45:23,890 +here we're talking about two-tiered test, so A is + +552 +00:45:23,890 --> 00:45:24,750 +the correct answer. + +553 +00:45:29,490 --> 00:45:33,090 +This sign null hypothesis states incorrectly, + +554 +00:45:34,030 --> 00:45:38,010 +because under H0 should have equal sign, and for + +555 +00:45:38,010 --> 00:45:39,730 +alternate it should be not equal to. + +556 +00:45:42,770 --> 00:45:43,630 +Number two. + +557 +00:45:47,860 --> 00:45:51,840 +If the firm wanted to test whether a greater + +558 +00:45:51,840 --> 00:45:56,680 +proportion of workers would currently like to + +559 +00:45:56,680 --> 00:46:00,180 +attend a self-improvement course than in the past, + +560 +00:46:00,900 --> 00:46:05,840 +currently, the proportion is greater than in the + +561 +00:46:05,840 --> 00:46:13,680 +past. Which represents the relevant hypothesis? C + +562 +00:46:13,680 --> 00:46:18,180 +is the correct answer. Because it says a greater + +563 +00:46:18,180 --> 00:46:22,340 +proportion of workers work currently. So by one, + +564 +00:46:22,420 --> 00:46:26,340 +greater than by two. So C is the correct answer. + +565 +00:46:31,340 --> 00:46:40,140 +It says that the firm wanted to test proportion of + +566 +00:46:40,140 --> 00:46:46,640 +workers currently study + +567 +00:46:46,640 --> 00:46:50,320 +or recent study by one represents the proportion + +568 +00:46:50,320 --> 00:46:55,140 +of workers who would like to attend the course so + +569 +00:46:55,140 --> 00:46:58,080 +that's by one greater than + +570 +00:47:01,730 --> 00:47:05,350 +In the past. So it means by one is greater than by + +571 +00:47:05,350 --> 00:47:11,870 +two. It means by one minus by two is positive. So + +572 +00:47:11,870 --> 00:47:14,590 +the alternative is by one minus two by two is + +573 +00:47:14,590 --> 00:47:16,430 +positive. So this one is the correct answer. + +574 +00:47:21,530 --> 00:47:26,910 +Exactly. If if here we have what in the past + +575 +00:47:26,910 --> 00:47:30,430 +should be it should be the correct answer. + +576 +00:47:34,690 --> 00:47:40,450 +That's to three. Any question for going to number + +577 +00:47:40,450 --> 00:47:49,590 +three? Any question for number two? Three. What is + +578 +00:47:49,590 --> 00:47:52,790 +the unbiased point estimate for the difference + +579 +00:47:52,790 --> 00:47:54,410 +between the two population proportions? + +580 +00:47:58,960 --> 00:48:04,360 +B1 minus B2 which is straight forward calculation + +581 +00:48:04,360 --> 00:48:06,980 +gives A the correct answer. Because the point + +582 +00:48:06,980 --> 00:48:13,320 +estimate in this case is B1 minus B2. B1 is 66 + +583 +00:48:13,320 --> 00:48:18,560 +percent, B2 is 60 percent, so the answer is 6 + +584 +00:48:18,560 --> 00:48:26,190 +percent. So B1 minus B2 which is 6 percent. I + +585 +00:48:26,190 --> 00:48:32,450 +think three is straightforward. Number four, what + +586 +00:48:32,450 --> 00:48:38,450 +is or are the critical values which, when + +587 +00:48:38,450 --> 00:48:41,870 +performing a z-test on whether population + +588 +00:48:41,870 --> 00:48:46,570 +proportions are different at 5%. Here, yes, we are + +589 +00:48:46,570 --> 00:48:52,250 +talking about two-tailed test, and alpha is 5%. So + +590 +00:48:52,250 --> 00:48:55,550 +my critical values, they are two critical values, + +591 +00:48:55,630 --> 00:48:55,830 +actually. + +592 +00:49:27,080 --> 00:49:31,000 +What is or are the critical values when testing + +593 +00:49:31,000 --> 00:49:34,260 +whether population proportions are different at 10 + +594 +00:49:34,260 --> 00:49:39,240 +%? The same instead here we have 10 instead of 5%. + +595 +00:49:40,920 --> 00:49:45,100 +So A is the correct answer. So just use the table. + +596 +00:49:47,340 --> 00:49:51,440 +Now for the previous one, we have 0 to 5, 0 to 5. + +597 +00:49:51,980 --> 00:49:57,740 +The other one, alpha is 10%. So 0, 5 to the right, + +598 +00:49:57,880 --> 00:50:03,580 +the same as to the left. So plus or minus 164. + +599 +00:50:06,700 --> 00:50:11,580 +So 4 and 5 by using the z table. + +600 +00:50:20,560 --> 00:50:25,280 +So exactly, since alpha here is 1, 0, 2, 5, so the + +601 +00:50:25,280 --> 00:50:27,880 +area becomes smaller than, so it should be z + +602 +00:50:27,880 --> 00:50:32,380 +greater than. So 1.106, the other one 1.645, + +603 +00:50:32,800 --> 00:50:38,030 +number 6. What is or are? The critical value in + +604 +00:50:38,030 --> 00:50:42,450 +testing whether the current population is higher + +605 +00:50:42,450 --> 00:50:50,990 +than. Higher means above. Above 10. Above 10, 5%. + +606 +00:50:50,990 --> 00:50:55,870 +So which? B. + +607 +00:50:58,470 --> 00:51:00,810 +B is the correct. Z alpha. + +608 +00:51:06,700 --> 00:51:08,440 +So, B is the correct answer. + +609 +00:51:11,200 --> 00:51:11,840 +7. + +610 +00:51:14,740 --> 00:51:21,320 +7 and 8 we should have to calculate number 1. 7 + +611 +00:51:21,320 --> 00:51:25,880 +was the estimated standard error of the difference + +612 +00:51:25,880 --> 00:51:29,660 +between the two sample proportions. We should have + +613 +00:51:29,660 --> 00:51:30,740 +a standard error. + +614 +00:51:34,620 --> 00:51:40,320 +Square root, B dash 1 minus B dash multiplied by 1 + +615 +00:51:40,320 --> 00:51:45,300 +over N1 plus 1 over N2. And we have to find B dash + +616 +00:51:45,300 --> 00:51:49,220 +here. Let's see how can we find B dash. + +617 +00:51:52,720 --> 00:51:59,700 +B dash + +618 +00:51:59,700 --> 00:52:05,800 +equal x1 plus x2. Now what's the value of X1? + +619 +00:52:10,400 --> 00:52:16,220 +Exactly. Since B1 is X1 over N1. So that means X1 + +620 +00:52:16,220 --> 00:52:26,600 +is N1 times B1. So N1 is 150 times 60%. So that's + +621 +00:52:26,600 --> 00:52:35,980 +99. And similarly, X2 N2, which is 160, times 60% + +622 +00:52:35,980 --> 00:52:48,420 +gives 96. So your B dash is x1 plus x2 divided by + +623 +00:52:48,420 --> 00:52:55,200 +N1 plus N2, which is 150 plus 310. So complete B + +624 +00:52:55,200 --> 00:52:58,760 +dash versus the bold estimate of overall + +625 +00:52:58,760 --> 00:53:03,570 +proportion So 9 and 9 plus 9 is 6. + +626 +00:53:06,390 --> 00:53:07,730 +That's just B-. + +627 +00:53:13,210 --> 00:53:14,290 +6 to 9. + +628 +00:53:17,150 --> 00:53:23,190 +6 to 9. So this is not your answer. It's just B-. + +629 +00:53:23,770 --> 00:53:29,030 +Now take this value and the square root of 6 to 9. + +630 +00:53:30,060 --> 00:53:36,280 +times 1.629 multiplied by 1 over N1 which is 150 + +631 +00:53:36,280 --> 00:53:44,980 +plus 160. That's your standard error. B dash is + +632 +00:53:44,980 --> 00:53:49,080 +not standard error. B dash is the bold estimate of + +633 +00:53:49,080 --> 00:53:53,740 +overall proportion. Now simple calculation will + +634 +00:53:53,740 --> 00:53:59,740 +give C. So C is the correct answer. + +635 +00:54:07,060 --> 00:54:15,600 +What's the standard error of the difference + +636 +00:54:15,600 --> 00:54:17,600 +between the two proportions given by this + +637 +00:54:17,600 --> 00:54:23,320 +equation? Here first we have to compute P' by + +638 +00:54:23,320 --> 00:54:28,300 +using x1 plus x2 over n1 plus n2. In this example, + +639 +00:54:29,420 --> 00:54:31,280 +the x's are not given, but we have the + +640 +00:54:31,280 --> 00:54:35,010 +proportions. And we know that B1 equals X1 over + +641 +00:54:35,010 --> 00:54:39,590 +N1. So X1 equals N1 times B1. So I got 99. + +642 +00:54:40,290 --> 00:54:45,070 +Similarly, X2 and 2 times B2 is 96. So B dash is + +643 +00:54:45,070 --> 00:54:51,610 +629. So plug this value here, you will get 055. + +644 +00:54:56,170 --> 00:55:00,530 +What's the value that is satisfactory to use in + +645 +00:55:00,530 --> 00:55:02,790 +evaluating the alternative hypothesis? That there + +646 +00:55:02,790 --> 00:55:04,710 +is a difference in the two population proportions. + +647 +00:55:05,350 --> 00:55:12,250 +So we have to compute Z score, Z stat, which is V1 + +648 +00:55:12,250 --> 00:55:16,870 +minus V2, which is 6%, minus 0, divided by this + +649 +00:55:16,870 --> 00:55:24,840 +amount, 0.55. Now, 0.6 over 0.5 around 1. Six over + +650 +00:55:24,840 --> 00:55:30,700 +five, so the answer is one. So that's my Z + +651 +00:55:30,700 --> 00:55:31,000 +statistic. + +652 +00:55:33,920 --> 00:55:34,900 +That's number eight. + +653 +00:55:38,880 --> 00:55:43,100 +So the answer is C is the correct answer. So + +654 +00:55:43,100 --> 00:55:48,140 +straightforward calculations for C and D gives C + +655 +00:55:48,140 --> 00:55:51,260 +correct answer for both seven and eight. + +656 +00:55:54,240 --> 00:55:59,300 +So C is correct for each one. Now 9. + +657 +00:56:08,960 --> 00:56:15,240 +In 9, the company tests to determine at 5% level + +658 +00:56:15,240 --> 00:56:18,680 +of significance whether the population proportion + +659 +00:56:18,680 --> 00:56:22,850 +has changed. from the previous study. As it + +660 +00:56:22,850 --> 00:56:24,910 +changed, it means we are talking about two-tiered + +661 +00:56:24,910 --> 00:56:30,750 +tests. Which of the following is most correct? So + +662 +00:56:30,750 --> 00:56:33,750 +here we are talking about two-tiered tests and + +663 +00:56:33,750 --> 00:56:39,210 +keep in mind your Z statistic is 1.093. And again, + +664 +00:56:39,470 --> 00:56:43,350 +we are talking about two-tiered tests. So my + +665 +00:56:43,350 --> 00:56:44,690 +rejection regions are + +666 +00:56:48,170 --> 00:56:53,150 +Negative 196, critical values I mean. So the + +667 +00:56:53,150 --> 00:56:58,230 +critical regions are 1.96 and above or smaller + +668 +00:56:58,230 --> 00:57:07,550 +than minus 1.96. Now, my z statistic is 1.903. Now + +669 +00:57:07,550 --> 00:57:12,610 +this value falls in the non-rejection region. So + +670 +00:57:12,610 --> 00:57:14,310 +we don't reject the non-hypothesis. + +671 +00:57:16,900 --> 00:57:21,400 +Ignore A and C, so the answer is either B or D. + +672 +00:57:22,260 --> 00:57:26,360 +Now let's read B. Don't reject the null and + +673 +00:57:26,360 --> 00:57:28,820 +conclude that the proportion of employees who are + +674 +00:57:28,820 --> 00:57:31,600 +interested in self-improvement course has not + +675 +00:57:31,600 --> 00:57:32,100 +changed. + +676 +00:57:37,040 --> 00:57:40,060 +That's correct. Because we don't reject the null + +677 +00:57:40,060 --> 00:57:42,900 +hypothesis. It means there is no significant + +678 +00:57:42,900 --> 00:57:45,760 +difference. So it has not changed. Now, D, don't + +679 +00:57:45,760 --> 00:57:47,540 +reject the null hypothesis and conclude the + +680 +00:57:47,540 --> 00:57:49,760 +proportion of Obliques who are interested in a + +681 +00:57:49,760 --> 00:57:52,700 +certain point has increased, which is incorrect. + +682 +00:57:53,640 --> 00:57:57,960 +So B is the correct answer. So again, since my Z + +683 +00:57:57,960 --> 00:58:01,080 +statistic falls in the non-rejection region, we + +684 +00:58:01,080 --> 00:58:04,380 +don't reject the null hypothesis. So either B or D + +685 +00:58:04,380 --> 00:58:07,350 +is the correct answer. But here we are talking + +686 +00:58:07,350 --> 00:58:12,190 +about none or don't reject the null hypothesis. + +687 +00:58:12,470 --> 00:58:14,310 +That means we don't have sufficient evidence + +688 +00:58:14,310 --> 00:58:17,610 +support that there is significant change between + +689 +00:58:17,610 --> 00:58:20,670 +the two proportions. So there is no difference. So + +690 +00:58:20,670 --> 00:58:23,270 +it has not changed. It's the correct one. So you + +691 +00:58:23,270 --> 00:58:29,890 +have to choose B. So B is the most correct answer. + +692 +00:58:30,830 --> 00:58:35,600 +Now, 10, 11, and 12. Talking about constructing + +693 +00:58:35,600 --> 00:58:41,700 +confidence interval 99, 95, and 90%. It's similar. + +694 +00:58:42,620 --> 00:58:46,140 +And as we mentioned before, 99% will give the + +695 +00:58:46,140 --> 00:58:50,940 +widest confidence interval. And the answers for + +696 +00:58:50,940 --> 00:59:04,300 +these are 14, 11, 14, is negative 0.8 to 20%. For + +697 +00:59:04,300 --> 00:59:11,720 +11, 0.5, negative 0.5 to 17. For 90%, negative 0.3 + +698 +00:59:11,720 --> 00:59:15,420 +to 0.15. So this is the widest confidence + +699 +00:59:15,420 --> 00:59:22,220 +interval, which was for 99%. So similar as the + +700 +00:59:22,220 --> 00:59:26,360 +previous one we had discussed. So for 99, always + +701 +00:59:26,360 --> 00:59:32,230 +we get The widest confidence interval. Any + +702 +00:59:32,230 --> 00:59:37,490 +question? That's all. Next time shall start + +703 +00:59:37,490 --> 00:59:41,350 +chapter 12, Chi-square test of independence. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/0UQx5fYO0DE_postprocess.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/0UQx5fYO0DE_postprocess.srt new file mode 100644 index 0000000000000000000000000000000000000000..d582f9bc969875b5a39b092afe6f25f0a263b2ec --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/0UQx5fYO0DE_postprocess.srt @@ -0,0 +1,2628 @@ +1 +00:00:11,850 --> 00:00:16,370 +Inshallah we'll start numerical descriptive majors + +2 +00:00:16,370 --> 00:00:22,270 +for the population. Last time we talked about the + +3 +00:00:22,270 --> 00:00:25,780 +same majors. I mean the same descriptive measures + +4 +00:00:25,780 --> 00:00:29,180 +for a sample. And we have already talked about the + +5 +00:00:29,180 --> 00:00:35,080 +mean, variance, and standard deviation. These are + +6 +00:00:35,080 --> 00:00:38,580 +called statistics because they are computed from + +7 +00:00:38,580 --> 00:00:43,140 +the sample. Here we'll see how can we do the same + +8 +00:00:44,300 --> 00:00:47,320 +measures but for a population, I mean for the + +9 +00:00:47,320 --> 00:00:53,020 +entire dataset. So descriptive statistics + +10 +00:00:53,020 --> 00:00:57,860 +described previously in the last two lectures was + +11 +00:00:57,860 --> 00:01:04,200 +for a sample. Here we'll just see how can we + +12 +00:01:04,200 --> 00:01:07,740 +compute these measures for the entire population. + +13 +00:01:08,480 --> 00:01:11,600 +In this case, the statistics we talked about + +14 +00:01:11,600 --> 00:01:17,200 +before are called And if you remember the first + +15 +00:01:17,200 --> 00:01:19,800 +lecture, we said there is a difference between + +16 +00:01:19,800 --> 00:01:24,300 +statistics and parameters. A statistic is a value + +17 +00:01:24,300 --> 00:01:27,520 +that computed from a sample, but parameter is a + +18 +00:01:27,520 --> 00:01:32,140 +value computed from population. So the important + +19 +00:01:32,140 --> 00:01:37,020 +population parameters are population mean, + +20 +00:01:37,660 --> 00:01:43,560 +variance, and standard deviation. Let's start with + +21 +00:01:43,560 --> 00:01:45,880 +the first one, the mean, or the population mean. + +22 +00:01:46,980 --> 00:01:50,720 +As the sample mean is defined by the sum of the + +23 +00:01:50,720 --> 00:01:55,120 +values divided by the sample size. But here, we + +24 +00:01:55,120 --> 00:01:57,880 +have to divide by the population size. So that's + +25 +00:01:57,880 --> 00:02:01,140 +the difference between sample mean and population + +26 +00:02:01,140 --> 00:02:08,950 +mean. For the sample mean, we use x bar. Here we + +27 +00:02:08,950 --> 00:02:14,790 +use Greek letter, mu. This is pronounced as mu. So + +28 +00:02:14,790 --> 00:02:18,790 +mu is the sum of the x values divided by the + +29 +00:02:18,790 --> 00:02:21,210 +population size, not the sample size. So it's + +30 +00:02:21,210 --> 00:02:24,570 +quite similar to the sample mean. So mu is the + +31 +00:02:24,570 --> 00:02:28,030 +population mean, n is the population size, and xi + +32 +00:02:28,030 --> 00:02:33,270 +is the it value of the variable x. Similarly, for + +33 +00:02:33,270 --> 00:02:37,310 +the other parameter, which is the variance, the + +34 +00:02:37,310 --> 00:02:41,520 +variance There is a little difference between the + +35 +00:02:41,520 --> 00:02:45,480 +sample and population variance. Here, we subtract + +36 +00:02:45,480 --> 00:02:49,700 +the population mean instead of the sample mean. So + +37 +00:02:49,700 --> 00:02:55,140 +sum of xi minus mu squared, then divide by this + +38 +00:02:55,140 --> 00:02:59,140 +population size, capital N, instead of N minus 1. + +39 +00:02:59,520 --> 00:03:02,260 +So that's the difference between sample and + +40 +00:03:02,260 --> 00:03:07,020 +population variance. So again, in the sample + +41 +00:03:07,020 --> 00:03:12,080 +variance, we subtracted x bar. Here, we subtract + +42 +00:03:12,080 --> 00:03:15,640 +the mean of the population, mu, then divide by + +43 +00:03:15,640 --> 00:03:20,200 +capital N instead of N minus 1. So the + +44 +00:03:20,200 --> 00:03:24,000 +computations for the sample and the population + +45 +00:03:24,000 --> 00:03:30,220 +mean or variance are quite similar. Finally, the + +46 +00:03:30,220 --> 00:03:35,390 +population standard deviation. is the same as the + +47 +00:03:35,390 --> 00:03:38,810 +sample population variance and here just take the + +48 +00:03:38,810 --> 00:03:43,170 +square root of the population variance and again + +49 +00:03:43,170 --> 00:03:47,170 +as we did as we explained before the standard + +50 +00:03:47,170 --> 00:03:51,550 +deviation has the same units as the original unit + +51 +00:03:51,550 --> 00:03:57,130 +so nothing is new we just extend the sample + +52 +00:03:57,130 --> 00:04:02,410 +statistic to the population parameter and again + +53 +00:04:04,030 --> 00:04:08,790 +The mean is denoted by mu, it's a Greek letter. + +54 +00:04:10,210 --> 00:04:12,790 +The population variance is denoted by sigma + +55 +00:04:12,790 --> 00:04:17,030 +squared. And finally, the population standard + +56 +00:04:17,030 --> 00:04:21,130 +deviation is denoted by sigma. So that's the + +57 +00:04:21,130 --> 00:04:24,250 +numerical descriptive measures either for a sample + +58 +00:04:24,250 --> 00:04:28,590 +or a population. So just summary for these + +59 +00:04:28,590 --> 00:04:33,330 +measures. The measures are mean variance, standard + +60 +00:04:33,330 --> 00:04:38,250 +deviation. Population parameters are mu for the + +61 +00:04:38,250 --> 00:04:43,830 +mean, sigma squared for variance, and sigma for + +62 +00:04:43,830 --> 00:04:46,710 +standard deviation. On the other hand, for the + +63 +00:04:46,710 --> 00:04:51,430 +sample statistics, we have x bar for sample mean, + +64 +00:04:52,110 --> 00:04:56,750 +s squared for the sample variance, and s is the + +65 +00:04:56,750 --> 00:05:00,410 +sample standard deviation. That's sample + +66 +00:05:00,410 --> 00:05:05,360 +statistics against population parameters. Any + +67 +00:05:05,360 --> 00:05:05,700 +question? + +68 +00:05:10,940 --> 00:05:17,240 +Let's move to new topic, which is empirical role. + +69 +00:05:19,340 --> 00:05:25,620 +Now, empirical role is just we + +70 +00:05:25,620 --> 00:05:30,120 +have to approximate the variation of data in case + +71 +00:05:30,120 --> 00:05:34,950 +of They'll shift. I mean suppose the data is + +72 +00:05:34,950 --> 00:05:37,770 +symmetric around the mean. I mean by symmetric + +73 +00:05:37,770 --> 00:05:42,310 +around the mean, the mean is the vertical line + +74 +00:05:42,310 --> 00:05:46,570 +that splits the data into two halves. One to the + +75 +00:05:46,570 --> 00:05:49,570 +right and the other to the left. I mean, the mean, + +76 +00:05:49,870 --> 00:05:52,650 +the area to the right of the mean equals 50%, + +77 +00:05:52,650 --> 00:05:54,970 +which is the same as the area to the left of the + +78 +00:05:54,970 --> 00:05:58,710 +mean. Now suppose or consider the data is bell + +79 +00:05:58,710 --> 00:06:02,570 +-shaped. Bell-shaped, normal, or symmetric? So + +80 +00:06:02,570 --> 00:06:04,290 +it's not skewed either to the right or to the + +81 +00:06:04,290 --> 00:06:08,030 +left. So here we assume, okay, the data is bell + +82 +00:06:08,030 --> 00:06:13,430 +-shaped. In this scenario, in this case, there is + +83 +00:06:13,430 --> 00:06:22,100 +a rule called 68, 95, 99.7 rule. Number one, + +84 +00:06:22,960 --> 00:06:26,300 +approximately 68% of the data in a bill shipped + +85 +00:06:26,300 --> 00:06:31,780 +lies within one standard deviation of the + +86 +00:06:31,780 --> 00:06:37,100 +population. So this is the first rule, 68% of the + +87 +00:06:37,100 --> 00:06:43,920 +data or of the observations Lie within a mu minus + +88 +00:06:43,920 --> 00:06:48,880 +sigma and a mu plus sigma. That's the meaning of + +89 +00:06:48,880 --> 00:06:51,800 +the data in bell shape distribution is within one + +90 +00:06:51,800 --> 00:06:55,900 +standard deviation of mean or mu plus or minus + +91 +00:06:55,900 --> 00:07:01,480 +sigma. So again, you can say that if the data is + +92 +00:07:01,480 --> 00:07:04,100 +normally distributed or if the data is bell + +93 +00:07:04,100 --> 00:07:12,210 +shaped, that is 68% of the data lies within one + +94 +00:07:12,210 --> 00:07:16,250 +standard deviation of the mean, either below or + +95 +00:07:16,250 --> 00:07:21,710 +above it. So 68% of the data. So this is the first + +96 +00:07:21,710 --> 00:07:22,090 +rule. + +97 +00:07:29,050 --> 00:07:37,170 +68% of the data lies between mu minus sigma and mu + +98 +00:07:37,170 --> 00:07:37,750 +plus sigma. + +99 +00:07:40,480 --> 00:07:46,260 +The other rule is approximately 95% of the data in + +100 +00:07:46,260 --> 00:07:48,980 +a bell-shaped distribution lies within two + +101 +00:07:48,980 --> 00:07:53,240 +standard deviations of the mean. That means this + +102 +00:07:53,240 --> 00:08:00,880 +area covers between minus two sigma and plus mu + +103 +00:08:00,880 --> 00:08:08,360 +plus two sigma. So 95% of the data lies between + +104 +00:08:08,360 --> 00:08:15,410 +minus mu two sigma And finally, + +105 +00:08:15,790 --> 00:08:21,270 +approximately 99.7% of the data, it means almost + +106 +00:08:21,270 --> 00:08:25,490 +the data. Because we are saying 99.7 means most of + +107 +00:08:25,490 --> 00:08:29,930 +the data falls or lies within three standard + +108 +00:08:29,930 --> 00:08:37,770 +deviations of the mean. So 99.7% of the data lies + +109 +00:08:37,770 --> 00:08:41,470 +between mu minus the pre-sigma and the mu plus of + +110 +00:08:41,470 --> 00:08:41,870 +pre-sigma. + +111 +00:08:45,030 --> 00:08:49,810 +68, 95, 99.7 are fixed numbers. Later in chapter + +112 +00:08:49,810 --> 00:08:55,010 +6, we will explain in details other coefficients. + +113 +00:08:55,530 --> 00:08:58,250 +Maybe suppose we are interested not in one of + +114 +00:08:58,250 --> 00:09:03,010 +these. Suppose we are interested in 90% or 80% or + +115 +00:09:03,010 --> 00:09:11,500 +85%. This rule just for 689599.7. This rule is + +116 +00:09:11,500 --> 00:09:15,560 +called 689599 + +117 +00:09:15,560 --> 00:09:22,960 +.7 rule. That is, again, 68% of the data lies + +118 +00:09:22,960 --> 00:09:27,030 +within one standard deviation of the mean. 95% of + +119 +00:09:27,030 --> 00:09:30,370 +the data lies within two standard deviations of + +120 +00:09:30,370 --> 00:09:33,850 +the mean. And finally, most of the data falls + +121 +00:09:33,850 --> 00:09:36,950 +within three standard deviations of the mean. + +122 +00:09:39,870 --> 00:09:43,330 +Let's see how can we use this empirical rule for a + +123 +00:09:43,330 --> 00:09:49,850 +specific example. Imagine that the variable math + +124 +00:09:49,850 --> 00:09:54,070 +set scores is bell shaped. So here we assume that + +125 +00:09:55,230 --> 00:10:00,950 +The math status score has symmetric shape or bell + +126 +00:10:00,950 --> 00:10:04,230 +shape. In this case, we can use the previous rule. + +127 +00:10:04,350 --> 00:10:09,610 +Otherwise, we cannot. So assume the math status + +128 +00:10:09,610 --> 00:10:15,750 +score is bell-shaped with a mean of 500. I mean, + +129 +00:10:16,410 --> 00:10:19,750 +the population mean is 500 and standard deviation + +130 +00:10:19,750 --> 00:10:24,620 +of 90. And let's see how can we apply the + +131 +00:10:24,620 --> 00:10:29,220 +empirical rule. So again, meta score has a mean of + +132 +00:10:29,220 --> 00:10:35,300 +500 and standard deviation sigma is 90. Then we + +133 +00:10:35,300 --> 00:10:43,200 +can say that 60% of all test takers scored between + +134 +00:10:43,200 --> 00:10:46,640 +68%. + +135 +00:10:46,640 --> 00:10:56,550 +So mu is 500. minus sigma is 90. And mu plus + +136 +00:10:56,550 --> 00:11:05,390 +sigma, 500 plus 90. So you can say that 68% or 230 + +137 +00:11:05,390 --> 00:11:15,610 +of all test takers scored between 410 and 590. So + +138 +00:11:15,610 --> 00:11:22,900 +68% of all test takers who took that exam scored + +139 +00:11:22,900 --> 00:11:27,740 +between 14 and 590. That if we assume previously + +140 +00:11:27,740 --> 00:11:29,980 +the data is well shaped, otherwise we cannot say + +141 +00:11:29,980 --> 00:11:36,420 +that. For the other rule, 95% of all test takers + +142 +00:11:36,420 --> 00:11:44,400 +scored between mu is 500 minus 2 times sigma, 500 + +143 +00:11:44,400 --> 00:11:49,760 +plus 2 times sigma. So that means 500 minus 180 is + +144 +00:11:49,760 --> 00:11:55,100 +320. 500 plus 180 is 680. So you can say that + +145 +00:11:55,100 --> 00:11:59,080 +approximately 95% of all test takers scored + +146 +00:11:59,080 --> 00:12:07,860 +between 320 and 680. Finally, you can say that + +147 +00:12:10,770 --> 00:12:13,570 +all of the test takers, approximately all, because + +148 +00:12:13,570 --> 00:12:20,030 +when we are saying 99.7 it means just 0.3 is the + +149 +00:12:20,030 --> 00:12:23,590 +rest, so you can say approximately all test takers + +150 +00:12:23,590 --> 00:12:30,730 +scored between mu minus three sigma which is 90 + +151 +00:12:30,730 --> 00:12:39,830 +and mu It lost 3 seconds. So 500 minus 3 times 9 + +152 +00:12:39,830 --> 00:12:45,950 +is 270. So that's 230. 500 plus 270 is 770. So we + +153 +00:12:45,950 --> 00:12:49,690 +can say that 99.7% of all the stackers scored + +154 +00:12:49,690 --> 00:12:55,610 +between 230 and 770. I will give another example + +155 +00:12:55,610 --> 00:12:59,210 +just to make sure that you understand the meaning + +156 +00:12:59,210 --> 00:13:00,870 +of this rule. + +157 +00:13:03,620 --> 00:13:09,720 +For business, a statistic goes. + +158 +00:13:15,720 --> 00:13:20,720 +For business, a statistic example. Suppose the + +159 +00:13:20,720 --> 00:13:29,740 +scores are bell-shaped. So we are assuming the + +160 +00:13:29,740 --> 00:13:40,970 +data is bell-shaped. with mean of 75 and standard + +161 +00:13:40,970 --> 00:13:41,950 +deviation of 5. + +162 +00:13:44,990 --> 00:13:53,810 +Also, let's assume that 100 students took + +163 +00:13:53,810 --> 00:14:00,840 +the exam. So we have 100 students. Last year took + +164 +00:14:00,840 --> 00:14:05,360 +the exam of business statistics. The mean was 75. + +165 +00:14:06,240 --> 00:14:10,920 +And standard deviation was 5. And let's see how it + +166 +00:14:10,920 --> 00:14:17,100 +can tell about 6 to 8% rule. It means that 6 to 8% + +167 +00:14:17,100 --> 00:14:22,100 +of all the students score + +168 +00:14:22,100 --> 00:14:28,650 +between mu minus sigma. Mu is 75. minus sigma and + +169 +00:14:28,650 --> 00:14:29,610 +the mu plus sigma. + +170 +00:14:33,590 --> 00:14:39,290 +So that means 68 students, because we have 100, so + +171 +00:14:39,290 --> 00:14:45,410 +you can say 68 students scored between 70 and 80. + +172 +00:14:46,610 --> 00:14:53,290 +So 60 students out of 100 scored between 70 and + +173 +00:14:53,290 --> 00:15:02,990 +80. About 95 students out of 100 scored between 75 + +174 +00:15:02,990 --> 00:15:12,190 +minus 2 times 5. 75 plus 2 times 5. So that gives + +175 +00:15:12,190 --> 00:15:13,770 +65. + +176 +00:15:15,550 --> 00:15:20,950 +The minimum and the maximum is 85. So you can say + +177 +00:15:20,950 --> 00:15:25,930 +that around 95 students scored between 65 and 85. + +178 +00:15:26,650 --> 00:15:33,510 +Finally, maybe you can see all students. Because + +179 +00:15:33,510 --> 00:15:38,650 +when you're saying 99.7, it means almost all the + +180 +00:15:38,650 --> 00:15:47,210 +students scored between 75 minus 3 times Y. and 75 + +181 +00:15:47,210 --> 00:15:52,970 +plus three times one. So that's six days in two + +182 +00:15:52,970 --> 00:15:59,150 +nights. Now let's look carefully at these three + +183 +00:15:59,150 --> 00:16:04,910 +intervals. The first one is seven to eight, the + +184 +00:16:04,910 --> 00:16:11,050 +other one 65 to 85, then six to 90. When we are + +185 +00:16:11,050 --> 00:16:11,790 +more confident, + +186 +00:16:15,170 --> 00:16:20,630 +When we are more confident here for 99.7%, the + +187 +00:16:20,630 --> 00:16:25,930 +interval becomes wider. So this is the widest + +188 +00:16:25,930 --> 00:16:31,430 +interval. Because here, the length of the interval + +189 +00:16:31,430 --> 00:16:37,090 +is around 10. The other one is 20. Here is 30. So + +190 +00:16:37,090 --> 00:16:42,570 +the last interval has the highest width. So as the + +191 +00:16:42,570 --> 00:16:48,380 +confidence coefficient increases, the length of + +192 +00:16:48,380 --> 00:16:54,080 +the interval becomes larger and larger because it + +193 +00:16:54,080 --> 00:16:59,160 +starts with 10, 20, and we end with 30. So that's + +194 +00:16:59,160 --> 00:17:04,460 +another example of empirical load. And again, here + +195 +00:17:04,460 --> 00:17:10,400 +we assume the data is bell shape. Let's move. to + +196 +00:17:10,400 --> 00:17:15,320 +another one when the data is not in shape. I mean, + +197 +00:17:15,600 --> 00:17:21,840 +if we have data and that data is not symmetric. So + +198 +00:17:21,840 --> 00:17:24,440 +that rule is no longer valid. So we have to use + +199 +00:17:24,440 --> 00:17:27,940 +another rule. It's called shape-example rule. + +200 +00:17:37,450 --> 00:17:41,610 +Any questions before we move to the next topic? + +201 +00:17:44,390 --> 00:17:48,150 +At shape and shape rule, it says that regardless + +202 +00:17:48,150 --> 00:17:53,890 +of how the data are distributed, I mean, if the + +203 +00:17:53,890 --> 00:17:58,190 +data is not symmetric or + +204 +00:17:58,190 --> 00:18:02,910 +not bell-shaped, then we can say that at least + +205 +00:18:05,150 --> 00:18:10,990 +Instead of saying 68, 95, or 99.7, just say around + +206 +00:18:10,990 --> 00:18:18,690 +1 minus 1 over k squared. Multiply this by 100. + +207 +00:18:19,650 --> 00:18:25,190 +All of the values will fall within k. So k is + +208 +00:18:25,190 --> 00:18:30,410 +number of standard deviations. I mean number of + +209 +00:18:30,410 --> 00:18:33,990 +signals. So if the data is not bell shaped, then + +210 +00:18:33,990 --> 00:18:38,790 +you can say that approximately at least 1 minus 1 + +211 +00:18:38,790 --> 00:18:43,410 +over k squared times 100% of the values will fall + +212 +00:18:43,410 --> 00:18:47,630 +within k standard deviations of the mean. In this + +213 +00:18:47,630 --> 00:18:50,950 +case, we assume that k is greater than 1. I mean, + +214 +00:18:51,030 --> 00:18:54,550 +you cannot apply this rule if k equals 1. Because + +215 +00:18:54,550 --> 00:19:00,090 +if k is 1. Then 1 minus 1 is 0. That makes no + +216 +00:19:00,090 --> 00:19:03,410 +sense. For this reason, k is above 1 or greater + +217 +00:19:03,410 --> 00:19:09,110 +than 1. So this rule is valid only for k greater + +218 +00:19:09,110 --> 00:19:14,390 +than 1. So you can see that at least 1 minus 1 + +219 +00:19:14,390 --> 00:19:19,270 +over k squared of the data or of the values will + +220 +00:19:19,270 --> 00:19:24,230 +fall within k standard equations. So now, for + +221 +00:19:24,230 --> 00:19:25,830 +example, suppose k equals 2. + +222 +00:19:28,690 --> 00:19:32,970 +When k equals 2, we said that 95% of the data + +223 +00:19:32,970 --> 00:19:36,370 +falls within two standard ratios. That if the data + +224 +00:19:36,370 --> 00:19:39,350 +is bell shaped. Now what's about if the data is + +225 +00:19:39,350 --> 00:19:43,210 +not bell shaped? We have to use shape shape rule. + +226 +00:19:43,830 --> 00:19:51,170 +So 1 minus 1 over k is 2. So 2, 2, 2 squared. So 1 + +227 +00:19:51,170 --> 00:19:58,130 +minus 1 fourth. That gives. three quarters, I + +228 +00:19:58,130 --> 00:20:03,370 +mean, 75%. So instead of saying 95% of the data + +229 +00:20:03,370 --> 00:20:06,850 +lies within one or two standard deviations of the + +230 +00:20:06,850 --> 00:20:13,070 +mean, if the data is bell-shaped, if the data is + +231 +00:20:13,070 --> 00:20:17,590 +not bell-shaped, you have to say that 75% of the + +232 +00:20:17,590 --> 00:20:22,190 +data falls within two standard deviations. For + +233 +00:20:22,190 --> 00:20:26,570 +bell shape, you are 95% confident there. But here, + +234 +00:20:27,190 --> 00:20:36,710 +you're just 75% confident. Suppose k is 3. Now for + +235 +00:20:36,710 --> 00:20:41,110 +k equal 3, we said 99.7% of the data falls within + +236 +00:20:41,110 --> 00:20:44,890 +three standard deviations. Now here, if the data + +237 +00:20:44,890 --> 00:20:51,940 +is not bell shape, 1 minus 1 over k squared. 1 + +238 +00:20:51,940 --> 00:20:56,540 +minus 1 + +239 +00:20:56,540 --> 00:21:00,760 +over 3 squared is one-ninth. One-ninth is 0.11. 1 + +240 +00:21:00,760 --> 00:21:06,440 +minus 0.11 means 89% of the data, instead of + +241 +00:21:06,440 --> 00:21:13,900 +saying 99.7. So 89% of the data will fall within + +242 +00:21:13,900 --> 00:21:16,460 +three standard deviations of the population mean. + +243 +00:21:18,510 --> 00:21:22,610 +regardless of how the data are distributed around + +244 +00:21:22,610 --> 00:21:26,350 +them. So here, we have two scenarios. One, if the + +245 +00:21:26,350 --> 00:21:29,390 +data is symmetric, which is called empirical rule + +246 +00:21:29,390 --> 00:21:34,710 +68959917. And the other one is called shape-by + +247 +00:21:34,710 --> 00:21:38,370 +-shape rule, and that regardless of the shape of + +248 +00:21:38,370 --> 00:21:38,710 +the data. + +249 +00:21:41,890 --> 00:21:49,210 +Excuse me? Yes. In this case, you don't know the + +250 +00:21:49,210 --> 00:21:51,490 +distribution of the data. And the reality is + +251 +00:21:51,490 --> 00:21:58,650 +sometimes the data has unknown distribution. For + +252 +00:21:58,650 --> 00:22:02,590 +this reason, we have to use chip-chip portions. + +253 +00:22:05,410 --> 00:22:09,830 +That's all for empirical rule and chip-chip rule. + +254 +00:22:11,230 --> 00:22:18,150 +The next topic is quartile measures. So far, we + +255 +00:22:18,150 --> 00:22:24,330 +have discussed central tendency measures, and we + +256 +00:22:24,330 --> 00:22:28,450 +have talked about mean, median, and more. Then we + +257 +00:22:28,450 --> 00:22:32,830 +moved to location of variability or spread or + +258 +00:22:32,830 --> 00:22:37,810 +dispersion. And we talked about range, variance, + +259 +00:22:37,950 --> 00:22:38,890 +and standardization. + +260 +00:22:41,570 --> 00:22:48,230 +And we said that outliers affect the mean much + +261 +00:22:48,230 --> 00:22:51,470 +more than the median. And also, outliers affect + +262 +00:22:51,470 --> 00:22:55,730 +the range. Here, we'll talk about other measures + +263 +00:22:55,730 --> 00:22:59,570 +of the data, which is called quartile measures. + +264 +00:23:01,190 --> 00:23:03,450 +Here, actually, we'll talk about two measures. + +265 +00:23:04,270 --> 00:23:10,130 +First one is called first quartile, And the other + +266 +00:23:10,130 --> 00:23:14,150 +one is third quartile. So we have two measures, + +267 +00:23:15,470 --> 00:23:26,030 +first and third quartile. Quartiles split the rank + +268 +00:23:26,030 --> 00:23:32,930 +data into four equal segments. I mean, these + +269 +00:23:32,930 --> 00:23:37,190 +measures split the data you have into four equal + +270 +00:23:37,190 --> 00:23:37,730 +parts. + +271 +00:23:42,850 --> 00:23:48,690 +Q1 has 25% of the data fall below it. I mean 25% + +272 +00:23:48,690 --> 00:23:56,410 +of the values lie below Q1. So it means 75% of the + +273 +00:23:56,410 --> 00:24:04,410 +values above it. So 25 below and 75 above. But you + +274 +00:24:04,410 --> 00:24:07,370 +have to be careful that the data is arranged from + +275 +00:24:07,370 --> 00:24:12,430 +smallest to largest. So in this case, Q1. is a + +276 +00:24:12,430 --> 00:24:19,630 +value that has 25% below it. So Q2 is called the + +277 +00:24:19,630 --> 00:24:22,450 +median. The median, the value in the middle when + +278 +00:24:22,450 --> 00:24:26,250 +we arrange the data from smallest to largest. So + +279 +00:24:26,250 --> 00:24:31,190 +that means 50% of the data below and also 50% of + +280 +00:24:31,190 --> 00:24:36,370 +the data above. The other measure is called + +281 +00:24:36,370 --> 00:24:41,730 +theoretical qualifying. In this case, we have 25% + +282 +00:24:41,730 --> 00:24:47,950 +of the data above Q3 and 75% of the data below Q3. + +283 +00:24:49,010 --> 00:24:54,410 +So quartiles split the rank data into four equal + +284 +00:24:54,410 --> 00:25:00,190 +segments, Q1 25% to the left, Q2 50% to the left, + +285 +00:25:00,970 --> 00:25:08,590 +Q3 75% to the left, and 25% to the right. Before, + +286 +00:25:09,190 --> 00:25:13,830 +we explained how to compute the median, and let's + +287 +00:25:13,830 --> 00:25:18,850 +see how can we compute first and third quartile. + +288 +00:25:19,750 --> 00:25:23,650 +If you remember, when we computed the median, + +289 +00:25:24,350 --> 00:25:28,480 +first we locate the position of the median. And we + +290 +00:25:28,480 --> 00:25:33,540 +said that the rank of n is odd. Yes, it was n plus + +291 +00:25:33,540 --> 00:25:37,800 +1 divided by 2. This is the location of the + +292 +00:25:37,800 --> 00:25:41,100 +median, not the value. Sometimes the value may be + +293 +00:25:41,100 --> 00:25:44,900 +equal to the location, but most of the time it's + +294 +00:25:44,900 --> 00:25:48,340 +not. It's not the case. Now let's see how can we + +295 +00:25:48,340 --> 00:25:54,130 +locate the fair support. The first quartile after + +296 +00:25:54,130 --> 00:25:56,690 +you arrange the data from smallest to largest, the + +297 +00:25:56,690 --> 00:26:01,290 +location is n plus 1 divided by 2. So that's the + +298 +00:26:01,290 --> 00:26:06,890 +location of the first quartile. The median, as we + +299 +00:26:06,890 --> 00:26:10,390 +mentioned before, is located in the middle. So it + +300 +00:26:10,390 --> 00:26:15,210 +makes sense that if n is odd, the location of the + +301 +00:26:15,210 --> 00:26:20,490 +median is n plus 1 over 2. Now, for the third + +302 +00:26:20,490 --> 00:26:27,160 +quartile position, The location is N plus 1 + +303 +00:26:27,160 --> 00:26:31,160 +divided by 4 times 3. So 3 times N plus 1 divided + +304 +00:26:31,160 --> 00:26:39,920 +by 4. That's how can we locate Q1, Q2, and Q3. So + +305 +00:26:39,920 --> 00:26:42,080 +one more time, the median, the value in the + +306 +00:26:42,080 --> 00:26:46,260 +middle, and it's located exactly at the position N + +307 +00:26:46,260 --> 00:26:52,590 +plus 1 over 2 for the range data. Q1 is located at + +308 +00:26:52,590 --> 00:26:56,770 +n plus one divided by four. Q3 is located at the + +309 +00:26:56,770 --> 00:26:59,670 +position three times n plus one divided by four. + +310 +00:27:03,630 --> 00:27:07,490 +Now, when calculating the rank position, we can + +311 +00:27:07,490 --> 00:27:14,690 +use one of these rules. First, if the result of + +312 +00:27:14,690 --> 00:27:18,010 +the location, I mean, is a whole number, I mean, + +313 +00:27:18,250 --> 00:27:24,050 +if it is an integer. Then the rank position is the + +314 +00:27:24,050 --> 00:27:28,590 +same number. For example, suppose the rank + +315 +00:27:28,590 --> 00:27:34,610 +position is four. So position number four is your + +316 +00:27:34,610 --> 00:27:38,450 +quartile, either first or third or second + +317 +00:27:38,450 --> 00:27:42,510 +quartile. So if the result is a whole number, then + +318 +00:27:42,510 --> 00:27:48,350 +it is the rank position used. Now, if the result + +319 +00:27:48,350 --> 00:27:52,250 +is a fractional half, I mean if the right position + +320 +00:27:52,250 --> 00:27:58,830 +is 2.5, 3.5, 4.5. In this case, average the two + +321 +00:27:58,830 --> 00:28:02,050 +corresponding data values. For example, if the + +322 +00:28:02,050 --> 00:28:10,170 +right position is 2.5. So the rank position is 2 + +323 +00:28:10,170 --> 00:28:13,210 +.5. So take the average of the corresponding + +324 +00:28:13,210 --> 00:28:18,950 +values for the rank 2 and 3. So look at the value. + +325 +00:28:19,280 --> 00:28:24,740 +at rank 2, value at rank 3, then take the average + +326 +00:28:24,740 --> 00:28:29,300 +of the corresponding values. That if the rank + +327 +00:28:29,300 --> 00:28:31,280 +position is fractional. + +328 +00:28:34,380 --> 00:28:37,900 +So if the result is whole number, just take it as + +329 +00:28:37,900 --> 00:28:41,160 +it is. If it is a fractional half, take the + +330 +00:28:41,160 --> 00:28:44,460 +corresponding data values and take the average of + +331 +00:28:44,460 --> 00:28:49,110 +these two values. Now, if the result is not a + +332 +00:28:49,110 --> 00:28:53,930 +whole number or a fraction of it. For example, + +333 +00:28:54,070 --> 00:29:01,910 +suppose the location is 2.1. So the position is 2, + +334 +00:29:02,390 --> 00:29:06,550 +just round, up to the nearest integer. So that's + +335 +00:29:06,550 --> 00:29:11,350 +2. What's about if the position rank is 2.6? Just + +336 +00:29:11,350 --> 00:29:16,060 +rank up to 3. So that's 3. So that's the rule you + +337 +00:29:16,060 --> 00:29:21,280 +have to follow if the result is a number, a whole + +338 +00:29:21,280 --> 00:29:27,200 +number, I mean integer, fraction of half, or not + +339 +00:29:27,200 --> 00:29:31,500 +real number, I mean, not whole number, or fraction + +340 +00:29:31,500 --> 00:29:35,540 +of half. Look at this specific example. Suppose we + +341 +00:29:35,540 --> 00:29:40,180 +have this data. This is ordered array, 11, 12, up + +342 +00:29:40,180 --> 00:29:45,680 +to 22. And let's see how can we compute These + +343 +00:29:45,680 --> 00:29:46,240 +measures. + +344 +00:29:50,080 --> 00:29:51,700 +Look carefully here. + +345 +00:29:55,400 --> 00:29:59,260 +First, let's compute the median. The median and + +346 +00:29:59,260 --> 00:30:02,360 +the value in the middle. How many values we have? + +347 +00:30:02,800 --> 00:30:08,920 +There are nine values. So the middle is number + +348 +00:30:08,920 --> 00:30:15,390 +five. One, two, three, four, five. So 16. This + +349 +00:30:15,390 --> 00:30:23,010 +value is the median. Now look at the values below + +350 +00:30:23,010 --> 00:30:29,650 +the median. There are 4 and 4 below and above the + +351 +00:30:29,650 --> 00:30:34,970 +median. Now let's see how can we compute Q1. The + +352 +00:30:34,970 --> 00:30:38,250 +position of Q1, as we mentioned, is N plus 1 + +353 +00:30:38,250 --> 00:30:42,630 +divided by 4. So N is 9 plus 1 divided by 4 is 2 + +354 +00:30:42,630 --> 00:30:50,330 +.5. 2.5 position, it means you have to take the + +355 +00:30:50,330 --> 00:30:54,490 +average of the two corresponding values, 2 and 3. + +356 +00:30:55,130 --> 00:31:01,010 +So 2 and 3, so 12 plus 13 divided by 2. That gives + +357 +00:31:01,010 --> 00:31:08,390 +12.5. So this is Q1. + +358 +00:31:08,530 --> 00:31:18,210 +So Q1 is 12.5. Now what's about Q3? The Q3, the + +359 +00:31:18,210 --> 00:31:27,810 +rank position, Q1 was 2.5. So Q3 should be three + +360 +00:31:27,810 --> 00:31:32,410 +times that value, because it's three times A plus + +361 +00:31:32,410 --> 00:31:36,090 +1 over 4. That means the rank position is 7.5. + +362 +00:31:36,590 --> 00:31:39,410 +That means you have to take the average of the 7 + +363 +00:31:39,410 --> 00:31:44,890 +and 8 position. 7 and 8 is 18. + +364 +00:31:45,880 --> 00:31:56,640 +which is 19.5. So that's Q3, 19.5. + +365 +00:32:00,360 --> 00:32:09,160 +So this is Q3. This value is Q1. And this value + +366 +00:32:09,160 --> 00:32:15,910 +is? Now, Q2 is the center. is located in the + +367 +00:32:15,910 --> 00:32:18,570 +center because, as we mentioned, four below and + +368 +00:32:18,570 --> 00:32:22,950 +four above. Now what's about Q1? Q1 is not in the + +369 +00:32:22,950 --> 00:32:28,150 +center of the entire data. Because Q1, 12.5, so + +370 +00:32:28,150 --> 00:32:31,830 +two points below and the others maybe how many + +371 +00:32:31,830 --> 00:32:34,750 +above two, four, six, seven observations above it. + +372 +00:32:35,390 --> 00:32:40,130 +So that means Q1 is not center. Also Q3 is not + +373 +00:32:40,130 --> 00:32:43,170 +center because two observations above it and seven + +374 +00:32:43,170 --> 00:32:48,780 +below it. So that means Q1 and Q3 are measures of + +375 +00:32:48,780 --> 00:32:52,480 +non-central location, while the median is a + +376 +00:32:52,480 --> 00:32:56,080 +measure of central location. But if you just look + +377 +00:32:56,080 --> 00:33:03,720 +at the data below the median, just focus on the + +378 +00:33:03,720 --> 00:33:09,100 +data below the median, 12.5 lies exactly in the + +379 +00:33:09,100 --> 00:33:13,130 +middle of the data. So 12.5 is the center of the + +380 +00:33:13,130 --> 00:33:18,090 +data. I mean, Q1 is the center of the data below + +381 +00:33:18,090 --> 00:33:22,810 +the overall median. The overall median was 16. So + +382 +00:33:22,810 --> 00:33:27,490 +the data before 16, the median for this data is 12 + +383 +00:33:27,490 --> 00:33:31,770 +.5, which is the first part. Similarly, if you + +384 +00:33:31,770 --> 00:33:36,870 +look at the data above Q2, + +385 +00:33:37,770 --> 00:33:42,190 +now 19.5. is located in the middle of the line. So + +386 +00:33:42,190 --> 00:33:46,470 +Q3 is a measure of center for the data above the + +387 +00:33:46,470 --> 00:33:48,390 +line. Make sense? + +388 +00:33:51,370 --> 00:33:56,430 +So that's how can we compute first, second, and + +389 +00:33:56,430 --> 00:34:03,510 +third part. Any questions? Yes, but it's a whole + +390 +00:34:03,510 --> 00:34:09,370 +number. Whole number, it means any integer. For + +391 +00:34:09,370 --> 00:34:14,450 +example, yeah, exactly, yes. Suppose we have + +392 +00:34:14,450 --> 00:34:18,090 +number of data is seven. + +393 +00:34:22,070 --> 00:34:25,070 +Number of observations we have is seven. So the + +394 +00:34:25,070 --> 00:34:29,730 +rank position n plus one divided by two, seven + +395 +00:34:29,730 --> 00:34:33,890 +plus one over two is four. Four means the whole + +396 +00:34:33,890 --> 00:34:37,780 +number, I mean an integer. then this case just use + +397 +00:34:37,780 --> 00:34:45,280 +it as it is. Now let's see the benefit or the + +398 +00:34:45,280 --> 00:34:48,680 +feature of using Q1 and Q3. + +399 +00:34:55,180 --> 00:35:01,300 +So let's move at the inter-equilateral range or + +400 +00:35:01,300 --> 00:35:01,760 +IQ1. + +401 +00:35:08,020 --> 00:35:14,580 +2.5 is the position. So the rank data of the rank + +402 +00:35:14,580 --> 00:35:19,180 +data. So take the average of the two corresponding + +403 +00:35:19,180 --> 00:35:25,700 +values of this one, which is 2 and 3. So 2 and 3. + +404 +00:35:27,400 --> 00:35:31,940 +The average of these two values is 12.5. One more + +405 +00:35:31,940 --> 00:35:40,920 +time, 2.5 is not the value. It is the rank + +406 +00:35:40,920 --> 00:35:47,880 +position of the first quartile. So in this case, 2 + +407 +00:35:47,880 --> 00:35:57,740 +.5 takes position 2 and 3. The average of these + +408 +00:35:57,740 --> 00:36:02,580 +two rank positions the corresponding one, which + +409 +00:36:02,580 --> 00:36:10,080 +are 12 and 13. So 12 for position number 2, 13 for + +410 +00:36:10,080 --> 00:36:13,580 +the other one. So the average is just divided by + +411 +00:36:13,580 --> 00:36:16,660 +2. That will give 12.5. + +412 +00:36:28,760 --> 00:36:34,900 +Next, again, the inter-quartile range, which is + +413 +00:36:34,900 --> 00:36:44,160 +denoted by IQR. Now IQR is the distance between Q3 + +414 +00:36:44,160 --> 00:36:48,000 +and Q1. I mean the difference between Q3 and Q1 is + +415 +00:36:48,000 --> 00:36:53,460 +called the inter-quartile range. And this one + +416 +00:36:53,460 --> 00:36:56,680 +measures the spread in the middle 50% of the data. + +417 +00:36:57,680 --> 00:36:59,060 +Because if you imagine that, + +418 +00:37:02,250 --> 00:37:10,250 +This is Q1 and Q3. IQR is the distance between + +419 +00:37:10,250 --> 00:37:14,130 +these two values. Now imagine that we have just + +420 +00:37:14,130 --> 00:37:19,570 +this data, which represents 50%. + +421 +00:37:21,540 --> 00:37:25,440 +And IQR, the definition is a Q3. So we have just + +422 +00:37:25,440 --> 00:37:31,480 +this data, for example. And IQ3 is Q3 minus Q1. It + +423 +00:37:31,480 --> 00:37:37,080 +means IQ3 is the maximum minus the minimum of the + +424 +00:37:37,080 --> 00:37:41,540 +50% of the middle data. So it means this is your + +425 +00:37:41,540 --> 00:37:46,980 +range, new range. After you've secluded 25% to the + +426 +00:37:46,980 --> 00:37:52,450 +left of Q1, And also you ignored totally 25% of + +427 +00:37:52,450 --> 00:37:57,070 +the data above Q3. So that means you're focused on + +428 +00:37:57,070 --> 00:38:00,630 +50% of the data. And just take the average of + +429 +00:38:00,630 --> 00:38:04,070 +these two points, I'm sorry, the distance of these + +430 +00:38:04,070 --> 00:38:07,670 +two points Q3 minus Q1. So you will get the range. + +431 +00:38:07,990 --> 00:38:11,170 +But not exactly the range. It's called, sometimes + +432 +00:38:11,170 --> 00:38:16,390 +it's called mid-spread range. Because mid-spread, + +433 +00:38:16,510 --> 00:38:19,910 +because we are talking about middle of the data, + +434 +00:38:19,990 --> 00:38:22,430 +50% of the data, which is located in the middle. + +435 +00:38:23,110 --> 00:38:28,550 +So do you think in this case, outliers actually, + +436 +00:38:29,090 --> 00:38:32,930 +they are extreme values, the data below Q1 and + +437 +00:38:32,930 --> 00:38:38,150 +data above Q3. That means inter-quartile range, Q3 + +438 +00:38:38,150 --> 00:38:42,410 +minus Q1, is not affected by outliers. Because you + +439 +00:38:42,410 --> 00:38:49,150 +ignored the small values And the high values. So + +440 +00:38:49,150 --> 00:38:53,890 +IQR is not affected by outliers. So in case of + +441 +00:38:53,890 --> 00:38:58,930 +outliers, it's better to use IQR. Because the + +442 +00:38:58,930 --> 00:39:01,610 +range is maximum minus minimum. And as we + +443 +00:39:01,610 --> 00:39:05,030 +mentioned before, the range is affected by + +444 +00:39:05,030 --> 00:39:11,650 +outliers. So IQR is again called the mid-spread + +445 +00:39:11,650 --> 00:39:17,940 +because it covers the middle 50% of the data. IQR + +446 +00:39:17,940 --> 00:39:20,120 +again is a measure of variability that is not + +447 +00:39:20,120 --> 00:39:23,900 +influenced or affected by outliers or extreme + +448 +00:39:23,900 --> 00:39:26,680 +values. So in the presence of outliers, it's + +449 +00:39:26,680 --> 00:39:34,160 +better to use IQR instead of using the range. So + +450 +00:39:34,160 --> 00:39:39,140 +again, median and the range are not affected by + +451 +00:39:39,140 --> 00:39:43,180 +outliers. So in case of the presence of outliers, + +452 +00:39:43,340 --> 00:39:46,380 +we have to use these measures, one as measure of + +453 +00:39:46,380 --> 00:39:49,780 +central and the other as measure of spread. So + +454 +00:39:49,780 --> 00:39:54,420 +measures like Q1, Q3, and IQR that are not + +455 +00:39:54,420 --> 00:39:57,400 +influenced by outliers are called resistant + +456 +00:39:57,400 --> 00:40:01,980 +measures. Resistance means in case of outliers, + +457 +00:40:02,380 --> 00:40:06,120 +they remain in the same position or approximately + +458 +00:40:06,120 --> 00:40:09,870 +in the same position. Because outliers don't + +459 +00:40:09,870 --> 00:40:13,870 +affect these measures. I mean, don't affect Q1, + +460 +00:40:14,830 --> 00:40:20,130 +Q3, and consequently IQR, because IQR is just the + +461 +00:40:20,130 --> 00:40:24,990 +distance between Q3 and Q1. So to determine the + +462 +00:40:24,990 --> 00:40:29,430 +value of IQR, you have first to compute Q1, Q3, + +463 +00:40:29,750 --> 00:40:35,780 +then take the difference between these two. So, + +464 +00:40:36,120 --> 00:40:41,120 +for example, suppose we have a data, and that data + +465 +00:40:41,120 --> 00:40:51,400 +has Q1 equals 30, and Q3 is 55. Suppose for a data + +466 +00:40:51,400 --> 00:41:00,140 +set, that data set has Q1 30, Q3 is 57. The IQR, + +467 +00:41:00,800 --> 00:41:07,240 +or Inter Equal Hyper Range, 57 minus 30 is 27. Now + +468 +00:41:07,240 --> 00:41:12,460 +what's the range? The range is maximum for the + +469 +00:41:12,460 --> 00:41:17,380 +largest value, which is 17 minus 12. That gives + +470 +00:41:17,380 --> 00:41:21,420 +58. Now look at the difference between the two + +471 +00:41:21,420 --> 00:41:26,900 +ranges. The inter-quartile range is 27. The range + +472 +00:41:26,900 --> 00:41:29,800 +is 58. There is a big difference between these two + +473 +00:41:29,800 --> 00:41:35,750 +values because range depends only on smallest and + +474 +00:41:35,750 --> 00:41:40,190 +largest. And these values could be outliers. For + +475 +00:41:40,190 --> 00:41:44,410 +this reason, the range value is higher or greater + +476 +00:41:44,410 --> 00:41:48,410 +than the required range, which is just the + +477 +00:41:48,410 --> 00:41:54,050 +distance of the 50% of the middle data. For this + +478 +00:41:54,050 --> 00:41:59,470 +reason, it's better to use the range in case of + +479 +00:41:59,470 --> 00:42:03,940 +outliers. Make sense? Any question? + +480 +00:42:08,680 --> 00:42:19,320 +Five-number summary are smallest + +481 +00:42:19,320 --> 00:42:27,380 +value, largest value, also first quartile, third + +482 +00:42:27,380 --> 00:42:32,250 +quartile, and the median. These five numbers are + +483 +00:42:32,250 --> 00:42:35,870 +called five-number summary, because by using these + +484 +00:42:35,870 --> 00:42:41,590 +statistics, smallest, first, median, third + +485 +00:42:41,590 --> 00:42:46,010 +quarter, and largest, you can describe the center + +486 +00:42:46,010 --> 00:42:52,590 +spread and the shape of the distribution. So by + +487 +00:42:52,590 --> 00:42:56,450 +using five-number summary, you can tell something + +488 +00:42:56,450 --> 00:43:00,090 +about it. The center of the data, I mean the value + +489 +00:43:00,090 --> 00:43:02,070 +in the middle, because the median is the value in + +490 +00:43:02,070 --> 00:43:06,550 +the middle. Spread, because we can talk about the + +491 +00:43:06,550 --> 00:43:11,070 +IQR, which is the range, and also the shape of the + +492 +00:43:11,070 --> 00:43:15,450 +data. And let's see, let's move to this slide, + +493 +00:43:16,670 --> 00:43:18,530 +slide number 50. + +494 +00:43:21,530 --> 00:43:25,090 +Let's see how can we construct something called + +495 +00:43:25,090 --> 00:43:31,850 +box plot. Box plot. Box plot can be constructed by + +496 +00:43:31,850 --> 00:43:34,990 +using the five number summary. We have smallest + +497 +00:43:34,990 --> 00:43:37,550 +value. On the other hand, we have the largest + +498 +00:43:37,550 --> 00:43:43,430 +value. Also, we have Q1, the first quartile, the + +499 +00:43:43,430 --> 00:43:47,510 +median, and Q3. For symmetric distribution, I mean + +500 +00:43:47,510 --> 00:43:52,490 +if the data is bell-shaped. In this case, the + +501 +00:43:52,490 --> 00:43:56,570 +vertical line in the box which represents the + +502 +00:43:56,570 --> 00:43:59,730 +median should be located in the middle of this + +503 +00:43:59,730 --> 00:44:05,510 +box, also in the middle of the entire data. Look + +504 +00:44:05,510 --> 00:44:11,350 +carefully at this vertical line. This line splits + +505 +00:44:11,350 --> 00:44:16,070 +the data into two halves, 25% to the left and 25% + +506 +00:44:16,070 --> 00:44:19,960 +to the right. And also this vertical line splits + +507 +00:44:19,960 --> 00:44:24,720 +the data into two halves, from the smallest to + +508 +00:44:24,720 --> 00:44:29,760 +largest, because there are 50% of the observations + +509 +00:44:29,760 --> 00:44:34,560 +lie below, and 50% lies above. So that means by + +510 +00:44:34,560 --> 00:44:37,840 +using box plot, you can tell something about the + +511 +00:44:37,840 --> 00:44:42,520 +shape of the distribution. So again, if the data + +512 +00:44:42,520 --> 00:44:48,270 +are symmetric around the median, And the central + +513 +00:44:48,270 --> 00:44:53,910 +line, this box, and central line are centered + +514 +00:44:53,910 --> 00:44:57,550 +between the endpoints. I mean, this vertical line + +515 +00:44:57,550 --> 00:45:00,720 +is centered between these two endpoints. between + +516 +00:45:00,720 --> 00:45:04,180 +Q1 and Q3. And the whole box plot is centered + +517 +00:45:04,180 --> 00:45:07,100 +between the smallest and the largest value. And + +518 +00:45:07,100 --> 00:45:10,840 +also the distance between the median and the + +519 +00:45:10,840 --> 00:45:14,320 +smallest is roughly equal to the distance between + +520 +00:45:14,320 --> 00:45:19,760 +the median and the largest. So you can tell + +521 +00:45:19,760 --> 00:45:22,660 +something about the shape of the distribution by + +522 +00:45:22,660 --> 00:45:26,780 +using the box plot. + +523 +00:45:32,870 --> 00:45:36,110 +The graph in the middle. Here median and median + +524 +00:45:36,110 --> 00:45:40,110 +are the same. The box plot, we have here the + +525 +00:45:40,110 --> 00:45:43,830 +median in the middle of the box, also in the + +526 +00:45:43,830 --> 00:45:47,390 +middle of the entire data. So you can say that the + +527 +00:45:47,390 --> 00:45:50,210 +distribution of this data is symmetric or is bell + +528 +00:45:50,210 --> 00:45:55,750 +-shaped. It's normal distribution. On the other + +529 +00:45:55,750 --> 00:46:00,110 +hand, if you look here, you will see that the + +530 +00:46:00,110 --> 00:46:06,160 +median is not in the center of the box. It's near + +531 +00:46:06,160 --> 00:46:12,580 +Q3. So the left tail, I mean, the distance between + +532 +00:46:12,580 --> 00:46:16,620 +the median and the smallest, this tail is longer + +533 +00:46:16,620 --> 00:46:20,600 +than the right tail. In this case, it's called + +534 +00:46:20,600 --> 00:46:24,850 +left skewed or skewed to the left. or negative + +535 +00:46:24,850 --> 00:46:29,510 +skewness. So if the data is not symmetric, it + +536 +00:46:29,510 --> 00:46:35,630 +might be left skewed. I mean, the left tail is + +537 +00:46:35,630 --> 00:46:40,590 +longer than the right tail. On the other hand, if + +538 +00:46:40,590 --> 00:46:45,950 +the median is located near Q1, it means the right + +539 +00:46:45,950 --> 00:46:49,930 +tail is longer than the left tail, and it's called + +540 +00:46:49,930 --> 00:46:56,470 +positive skewed or right skewed. So for symmetric + +541 +00:46:56,470 --> 00:47:00,310 +distribution, the median in the middle, for left + +542 +00:47:00,310 --> 00:47:04,570 +or right skewed, the median either is close to the + +543 +00:47:04,570 --> 00:47:09,930 +Q3 or skewed distribution to the left, or the + +544 +00:47:09,930 --> 00:47:14,910 +median is close to Q1 and the distribution is + +545 +00:47:14,910 --> 00:47:20,570 +right skewed or has positive skewness. That's how + +546 +00:47:20,570 --> 00:47:25,860 +can we tell spread center and the shape by using + +547 +00:47:25,860 --> 00:47:28,460 +the box plot. So center is the value in the + +548 +00:47:28,460 --> 00:47:32,860 +middle, Q2 or the median. Spread is the distance + +549 +00:47:32,860 --> 00:47:38,340 +between Q1 and Q3. So Q3 minus Q1 gives IQR. And + +550 +00:47:38,340 --> 00:47:41,880 +finally, you can tell something about the shape of + +551 +00:47:41,880 --> 00:47:45,140 +the distribution by just looking at the scatter + +552 +00:47:45,140 --> 00:47:46,440 +plot. + +553 +00:47:49,700 --> 00:47:56,330 +Let's look at This example, and suppose we have + +554 +00:47:56,330 --> 00:48:02,430 +small data set. And let's see how can we construct + +555 +00:48:02,430 --> 00:48:05,750 +the MaxPlot. In order to construct MaxPlot, you + +556 +00:48:05,750 --> 00:48:09,510 +have to compute minimum first or smallest value, + +557 +00:48:09,810 --> 00:48:14,650 +largest value. Besides that, you have to compute + +558 +00:48:14,650 --> 00:48:21,110 +first and third part time and also Q2. For this + +559 +00:48:21,110 --> 00:48:27,570 +simple example, Q1 is 2, Q3 is 5, and the median + +560 +00:48:27,570 --> 00:48:33,990 +is 3. Smallest is 0, largest is 1 7. Now, be + +561 +00:48:33,990 --> 00:48:38,130 +careful here, 1 7 seems to be an outlier. But so + +562 +00:48:38,130 --> 00:48:44,190 +far, we don't explain how can we decide if a data + +563 +00:48:44,190 --> 00:48:47,550 +value is considered to be an outlier. But at least + +564 +00:48:47,550 --> 00:48:53,080 +1 7. is a suspected value to be an outlier, seems + +565 +00:48:53,080 --> 00:48:57,200 +to be. Sometimes you are 95% sure that that point + +566 +00:48:57,200 --> 00:49:00,160 +is an outlier, but you cannot tell, because you + +567 +00:49:00,160 --> 00:49:04,060 +have to have a specific rule that can decide if + +568 +00:49:04,060 --> 00:49:07,400 +that point is an outlier or not. But at least it + +569 +00:49:07,400 --> 00:49:12,060 +makes sense that that point is considered maybe an + +570 +00:49:12,060 --> 00:49:14,700 +outlier. But let's see how can we construct that + +571 +00:49:14,700 --> 00:49:18,190 +first. The box plot. Again, as we mentioned, the + +572 +00:49:18,190 --> 00:49:21,630 +minimum value is zero. The maximum is 27. The Q1 + +573 +00:49:21,630 --> 00:49:27,830 +is 2. The median is 3. The Q3 is 5. Now, if you + +574 +00:49:27,830 --> 00:49:32,010 +look at the distance between, does this vertical + +575 +00:49:32,010 --> 00:49:35,790 +line lie between the line in the middle or the + +576 +00:49:35,790 --> 00:49:40,090 +center of the box? It's not exactly. But if you + +577 +00:49:40,090 --> 00:49:45,260 +look at this line, vertical line, and the location + +578 +00:49:45,260 --> 00:49:50,600 +of this with respect to the minimum and the + +579 +00:49:50,600 --> 00:49:56,640 +maximum. You will see that the right tail is much + +580 +00:49:56,640 --> 00:50:01,560 +longer than the left tail because it starts from 3 + +581 +00:50:01,560 --> 00:50:06,180 +up to 27. And the other one, from zero to three, + +582 +00:50:06,380 --> 00:50:09,760 +is a big distance between three and 27, compared + +583 +00:50:09,760 --> 00:50:13,140 +to the other one, zero to three. So it seems to be + +584 +00:50:13,140 --> 00:50:16,600 +this is quite skewed, so it's not at all + +585 +00:50:16,600 --> 00:50:23,700 +symmetric, because of this value. So maybe by + +586 +00:50:23,700 --> 00:50:25,580 +using MaxPlot, you can tell that point is + +587 +00:50:25,580 --> 00:50:31,440 +suspected to be an outlier. It has a very long + +588 +00:50:31,440 --> 00:50:32,800 +right tail. + +589 +00:50:35,560 --> 00:50:41,120 +So let's see how can we determine if a point is an + +590 +00:50:41,120 --> 00:50:50,400 +outlier or not. Sometimes we can use box plot to + +591 +00:50:50,400 --> 00:50:53,840 +determine if the point is an outlier or not. The + +592 +00:50:53,840 --> 00:51:00,860 +rule is that a value is considered an outlier It + +593 +00:51:00,860 --> 00:51:04,780 +is more than 1.5 times the entire quartile range + +594 +00:51:04,780 --> 00:51:11,420 +below Q1 or above it. Let's explain the meaning of + +595 +00:51:11,420 --> 00:51:12,260 +this sentence. + +596 +00:51:15,260 --> 00:51:20,100 +First, let's compute something called lower. + +597 +00:51:23,740 --> 00:51:28,540 +The lower limit is + +598 +00:51:28,540 --> 00:51:38,680 +not the minimum. It's Q1 minus 1.5 IQR. This is + +599 +00:51:38,680 --> 00:51:39,280 +the lower limit. + +600 +00:51:42,280 --> 00:51:47,560 +So it's 1.5 times IQR below Q1. This is the lower + +601 +00:51:47,560 --> 00:51:50,620 +limit. The upper limit, + +602 +00:51:54,680 --> 00:51:57,460 +Q3, + +603 +00:51:58,790 --> 00:52:06,890 +plus 1.5 times IQR. So we computed lower and upper + +604 +00:52:06,890 --> 00:52:13,350 +limit by using these rules. Q1 minus 1.5 IQR. So + +605 +00:52:13,350 --> 00:52:20,510 +it's 1.5 times IQR below Q1 and 1.5 times IQR + +606 +00:52:20,510 --> 00:52:25,070 +above Q1. Now, any value. + +607 +00:52:31,150 --> 00:52:38,610 +Is it smaller than the + +608 +00:52:38,610 --> 00:52:45,990 +lower limit or + +609 +00:52:45,990 --> 00:52:53,290 +greater than the + +610 +00:52:53,290 --> 00:52:54,150 +upper limit? + +611 +00:52:58,330 --> 00:53:04,600 +Any value. smaller than the lower limit and + +612 +00:53:04,600 --> 00:53:13,260 +greater than the upper limit is considered to + +613 +00:53:13,260 --> 00:53:20,720 +be an outlier. This is the rule how can you tell + +614 +00:53:20,720 --> 00:53:24,780 +if the point or data value is outlier or not. Just + +615 +00:53:24,780 --> 00:53:27,100 +compute lower limit and upper limit. + +616 +00:53:29,780 --> 00:53:35,580 +So lower limit, Q1 minus 1.5IQ3. Upper limit, Q3 + +617 +00:53:35,580 --> 00:53:38,620 +plus 1.5. This is a constant. + +618 +00:53:43,200 --> 00:53:47,040 +Now let's go back to the previous example, which + +619 +00:53:47,040 --> 00:53:53,800 +was, which Q1 was, what's the value of Q1? Q1 was + +620 +00:53:53,800 --> 00:53:57,680 +2. Q3 is 5. + +621 +00:54:00,650 --> 00:54:05,230 +In order to turn an outlier, you don't need the + +622 +00:54:05,230 --> 00:54:11,150 +value, the median. Now, Q3 is 5, Q1 is 2, so IQR + +623 +00:54:11,150 --> 00:54:21,050 +is 3. That's the value of IQR. Now, lower limit, A + +624 +00:54:21,050 --> 00:54:31,830 +times 2 minus 1.5 times IQR3. So that's minus 2.5. + +625 +00:54:33,550 --> 00:54:41,170 +U3 plus U3 is 3. It's 5, sorry. It's 5 plus 1.5. + +626 +00:54:41,650 --> 00:54:48,570 +That gives 9.5. Now, any point or any data value, + +627 +00:54:49,450 --> 00:54:55,950 +any data value falls below minus 2.5. I mean + +628 +00:54:55,950 --> 00:55:00,380 +smaller than minus 2.5. Or greater than 9.5 is an + +629 +00:55:00,380 --> 00:55:05,420 +outlier. If you look at the data you have, we have + +630 +00:55:05,420 --> 00:55:09,520 +0 up to 9. So none of these is considered to be an + +631 +00:55:09,520 --> 00:55:16,200 +outlier. But what's about 27? 27 is greater than, + +632 +00:55:16,260 --> 00:55:23,160 +much bigger than actually 9.5. So for that data, + +633 +00:55:24,020 --> 00:55:27,920 +27 is an outlier. So this is the way how can we + +634 +00:55:27,920 --> 00:55:36,120 +compute the outlier for the sample. Another + +635 +00:55:36,120 --> 00:55:39,620 +method. The score is another method to determine + +636 +00:55:39,620 --> 00:55:43,600 +if that point is an outlier or not. So, so far we + +637 +00:55:43,600 --> 00:55:48,300 +have two rules. One by using quartiles and the + +638 +00:55:48,300 --> 00:55:50,540 +other, as we mentioned last time, by using the + +639 +00:55:50,540 --> 00:55:54,200 +score. And for these scores, if you remember, any + +640 +00:55:54,200 --> 00:56:00,030 +values below lie Below minus three. And above + +641 +00:56:00,030 --> 00:56:03,430 +three is considered to be irrelevant. That's + +642 +00:56:03,430 --> 00:56:07,950 +another example. That's another way to figure out + +643 +00:56:07,950 --> 00:56:09,190 +if the data is irrelevant. + +644 +00:56:13,730 --> 00:56:17,110 +You can apply the two rules either for the sample + +645 +00:56:17,110 --> 00:56:20,190 +or the population. If you have the entire data, + +646 +00:56:20,890 --> 00:56:23,950 +you can also determine out there for the entire + +647 +00:56:23,950 --> 00:56:29,110 +dataset, even if that data is the population. But + +648 +00:56:29,110 --> 00:56:34,490 +most of the time, we select a sample, which is a + +649 +00:56:34,490 --> 00:56:37,790 +subset or a portion of that population. + +650 +00:56:40,570 --> 00:56:41,290 +Questions? + +651 +00:56:53,360 --> 00:57:00,000 +And locating outliers. So again, outlier is any + +652 +00:57:00,000 --> 00:57:05,000 +value that is above the upper limit or below the + +653 +00:57:05,000 --> 00:57:08,340 +lower limit. And also we can use this score also + +654 +00:57:08,340 --> 00:57:12,680 +to determine if that point is outlier or not. Next + +655 +00:57:12,680 --> 00:57:16,340 +time, Inshallah, we will go over the covariance + +656 +00:57:16,340 --> 00:57:19,420 +and the relationship and I will give some practice + +657 +00:57:19,420 --> 00:57:22,180 +problems for Chapter 3. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/0UQx5fYO0DE_raw.json b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/0UQx5fYO0DE_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..f98e4b490c41b8660a9c3d6ec63bc80f8556dbd7 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/0UQx5fYO0DE_raw.json @@ -0,0 +1 @@ +{"segments": [{"id": 1, "seek": 2291, "start": 11.85, "end": 22.91, "text": " Inshallah we'll start numerical descriptive majors for the population. Last time we talked about the same majors.", "tokens": [682, 2716, 13492, 321, 603, 722, 29054, 42585, 31770, 337, 264, 4415, 13, 5264, 565, 321, 2825, 466, 264, 912, 31770, 13], "avg_logprob": -0.3590353364529817, "compression_ratio": 1.175257731958763, "no_speech_prob": 1.0728836059570312e-06, "words": [{"start": 11.85, "end": 12.69, "word": " Inshallah", "probability": 0.6461588541666666}, {"start": 12.69, "end": 12.91, "word": " we'll", "probability": 0.5450439453125}, {"start": 12.91, "end": 13.33, "word": " start", "probability": 0.92724609375}, {"start": 13.33, "end": 14.09, "word": " numerical", "probability": 0.55029296875}, {"start": 14.09, "end": 15.07, "word": " descriptive", "probability": 0.66552734375}, {"start": 15.07, "end": 16.37, "word": " majors", "probability": 0.340576171875}, {"start": 16.37, "end": 16.89, "word": " for", "probability": 0.9267578125}, {"start": 16.89, "end": 17.17, "word": " the", "probability": 0.30712890625}, {"start": 17.17, "end": 17.77, "word": " population.", "probability": 0.7412109375}, {"start": 18.87, "end": 19.11, "word": " Last", "probability": 0.849609375}, {"start": 19.11, "end": 19.37, "word": " time", "probability": 0.8916015625}, {"start": 19.37, "end": 19.65, "word": " we", "probability": 0.8369140625}, {"start": 19.65, "end": 20.51, "word": " talked", "probability": 0.88330078125}, {"start": 20.51, "end": 20.93, "word": " about", "probability": 0.90625}, {"start": 20.93, "end": 22.27, "word": " the", "probability": 0.91357421875}, {"start": 22.27, "end": 22.51, "word": " same", "probability": 0.90283203125}, {"start": 22.51, "end": 22.91, "word": " majors.", "probability": 0.9013671875}], "temperature": 1.0}, {"id": 2, "seek": 4314, "start": 24.2, "end": 43.14, "text": " I mean the same descriptive measures for a sample. And we have already talked about the mean, variance, and standard deviation. These are called statistics because they are computed from the sample. Here we'll see how can we do the same", "tokens": [286, 914, 264, 912, 42585, 8000, 337, 257, 6889, 13, 400, 321, 362, 1217, 2825, 466, 264, 914, 11, 21977, 11, 293, 3832, 25163, 13, 1981, 366, 1219, 12523, 570, 436, 366, 40610, 490, 264, 6889, 13, 1692, 321, 603, 536, 577, 393, 321, 360, 264, 912], "avg_logprob": -0.18131510261446238, "compression_ratio": 1.462962962962963, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 24.2, "end": 24.46, "word": " I", "probability": 0.70166015625}, {"start": 24.46, "end": 24.62, "word": " mean", "probability": 0.95947265625}, {"start": 24.62, "end": 24.82, "word": " the", "probability": 0.740234375}, {"start": 24.82, "end": 25.02, "word": " same", "probability": 0.865234375}, {"start": 25.02, "end": 25.48, "word": " descriptive", "probability": 0.83935546875}, {"start": 25.48, "end": 25.78, "word": " measures", "probability": 0.83154296875}, {"start": 25.78, "end": 26.12, "word": " for", "probability": 0.93896484375}, {"start": 26.12, "end": 26.28, "word": " a", "probability": 0.83544921875}, {"start": 26.28, "end": 26.58, "word": " sample.", "probability": 0.86328125}, {"start": 27.38, "end": 27.54, "word": " And", "probability": 0.86865234375}, {"start": 27.54, "end": 27.68, "word": " we", "probability": 0.9345703125}, {"start": 27.68, "end": 27.86, "word": " have", "probability": 0.9384765625}, {"start": 27.86, "end": 28.08, "word": " already", "probability": 0.91552734375}, {"start": 28.08, "end": 28.36, "word": " talked", "probability": 0.87744140625}, {"start": 28.36, "end": 28.82, "word": " about", "probability": 0.9072265625}, {"start": 28.82, "end": 29.18, "word": " the", "probability": 0.662109375}, {"start": 29.18, "end": 29.46, "word": " mean,", "probability": 0.681640625}, {"start": 30.28, "end": 31.04, "word": " variance,", "probability": 0.88134765625}, {"start": 31.86, "end": 32.04, "word": " and", "probability": 0.93505859375}, {"start": 32.04, "end": 32.5, "word": " standard", "probability": 0.95654296875}, {"start": 32.5, "end": 33.2, "word": " deviation.", "probability": 0.92578125}, {"start": 33.86, "end": 34.36, "word": " These", "probability": 0.892578125}, {"start": 34.36, "end": 35.08, "word": " are", "probability": 0.94140625}, {"start": 35.08, "end": 35.56, "word": " called", "probability": 0.853515625}, {"start": 35.56, "end": 36.24, "word": " statistics", "probability": 0.89404296875}, {"start": 36.24, "end": 37.18, "word": " because", "probability": 0.6513671875}, {"start": 37.18, "end": 37.42, "word": " they", "probability": 0.8935546875}, {"start": 37.42, "end": 37.6, "word": " are", "probability": 0.94189453125}, {"start": 37.6, "end": 38.02, "word": " computed", "probability": 0.916015625}, {"start": 38.02, "end": 38.58, "word": " from", "probability": 0.8876953125}, {"start": 38.58, "end": 39.34, "word": " the", "probability": 0.869140625}, {"start": 39.34, "end": 39.68, "word": " sample.", "probability": 0.7958984375}, {"start": 40.08, "end": 40.32, "word": " Here", "probability": 0.82666015625}, {"start": 40.32, "end": 40.54, "word": " we'll", "probability": 0.58544921875}, {"start": 40.54, "end": 40.8, "word": " see", "probability": 0.91455078125}, {"start": 40.8, "end": 41.22, "word": " how", "probability": 0.8701171875}, {"start": 41.22, "end": 41.42, "word": " can", "probability": 0.923828125}, {"start": 41.42, "end": 41.66, "word": " we", "probability": 0.9599609375}, {"start": 41.66, "end": 42.58, "word": " do", "probability": 0.958984375}, {"start": 42.58, "end": 42.78, "word": " the", "probability": 0.916015625}, {"start": 42.78, "end": 43.14, "word": " same", "probability": 0.904296875}], "temperature": 1.0}, {"id": 3, "seek": 7354, "start": 44.3, "end": 73.54, "text": " measures but for a population, I mean for the entire dataset. So descriptive statistics described previously in the last two lectures was for a sample. Here we'll just see how can we compute these measures for the entire population. In this case, the statistics we talked about before are called", "tokens": [8000, 457, 337, 257, 4415, 11, 286, 914, 337, 264, 2302, 28872, 13, 407, 42585, 12523, 7619, 8046, 294, 264, 1036, 732, 16564, 390, 337, 257, 6889, 13, 1692, 321, 603, 445, 536, 577, 393, 321, 14722, 613, 8000, 337, 264, 2302, 4415, 13, 682, 341, 1389, 11, 264, 12523, 321, 2825, 466, 949, 366, 1219], "avg_logprob": -0.19147477861036333, "compression_ratio": 1.6174863387978142, "no_speech_prob": 0.0, "words": [{"start": 44.3, "end": 44.88, "word": " measures", "probability": 0.332275390625}, {"start": 44.88, "end": 45.38, "word": " but", "probability": 0.4345703125}, {"start": 45.38, "end": 45.76, "word": " for", "probability": 0.93896484375}, {"start": 45.76, "end": 45.96, "word": " a", "probability": 0.890625}, {"start": 45.96, "end": 46.34, "word": " population,", "probability": 0.95556640625}, {"start": 46.54, "end": 46.56, "word": " I", "probability": 0.912109375}, {"start": 46.56, "end": 46.76, "word": " mean", "probability": 0.96923828125}, {"start": 46.76, "end": 47.16, "word": " for", "probability": 0.6884765625}, {"start": 47.16, "end": 47.32, "word": " the", "probability": 0.8974609375}, {"start": 47.32, "end": 47.68, "word": " entire", "probability": 0.8720703125}, {"start": 47.68, "end": 48.12, "word": " dataset.", "probability": 0.53173828125}, {"start": 50.44, "end": 51.08, "word": " So", "probability": 0.87646484375}, {"start": 51.08, "end": 52.34, "word": " descriptive", "probability": 0.5966796875}, {"start": 52.34, "end": 53.02, "word": " statistics", "probability": 0.9345703125}, {"start": 53.02, "end": 54.22, "word": " described", "probability": 0.8271484375}, {"start": 54.22, "end": 55.1, "word": " previously", "probability": 0.728515625}, {"start": 55.1, "end": 55.36, "word": " in", "probability": 0.93798828125}, {"start": 55.36, "end": 55.5, "word": " the", "probability": 0.9169921875}, {"start": 55.5, "end": 55.92, "word": " last", "probability": 0.8427734375}, {"start": 55.92, "end": 56.12, "word": " two", "probability": 0.89404296875}, {"start": 56.12, "end": 56.6, "word": " lectures", "probability": 0.8759765625}, {"start": 56.6, "end": 57.86, "word": " was", "probability": 0.8779296875}, {"start": 57.86, "end": 58.1, "word": " for", "probability": 0.95166015625}, {"start": 58.1, "end": 58.32, "word": " a", "probability": 0.88720703125}, {"start": 58.32, "end": 58.6, "word": " sample.", "probability": 0.8427734375}, {"start": 59.68, "end": 59.96, "word": " Here", "probability": 0.8515625}, {"start": 59.96, "end": 60.96, "word": " we'll", "probability": 0.565673828125}, {"start": 60.96, "end": 61.56, "word": " just", "probability": 0.916015625}, {"start": 61.56, "end": 63.5, "word": " see", "probability": 0.91455078125}, {"start": 63.5, "end": 63.72, "word": " how", "probability": 0.9345703125}, {"start": 63.72, "end": 63.96, "word": " can", "probability": 0.81982421875}, {"start": 63.96, "end": 64.2, "word": " we", "probability": 0.9560546875}, {"start": 64.2, "end": 64.78, "word": " compute", "probability": 0.92578125}, {"start": 64.78, "end": 65.04, "word": " these", "probability": 0.8291015625}, {"start": 65.04, "end": 65.44, "word": " measures", "probability": 0.82470703125}, {"start": 65.44, "end": 66.56, "word": " for", "probability": 0.94775390625}, {"start": 66.56, "end": 66.84, "word": " the", "probability": 0.91845703125}, {"start": 66.84, "end": 67.26, "word": " entire", "probability": 0.88427734375}, {"start": 67.26, "end": 67.74, "word": " population.", "probability": 0.94873046875}, {"start": 68.48, "end": 68.72, "word": " In", "probability": 0.96044921875}, {"start": 68.72, "end": 68.96, "word": " this", "probability": 0.94580078125}, {"start": 68.96, "end": 69.38, "word": " case,", "probability": 0.9091796875}, {"start": 69.76, "end": 70.0, "word": " the", "probability": 0.9091796875}, {"start": 70.0, "end": 70.5, "word": " statistics", "probability": 0.908203125}, {"start": 70.5, "end": 70.92, "word": " we", "probability": 0.939453125}, {"start": 70.92, "end": 71.24, "word": " talked", "probability": 0.84814453125}, {"start": 71.24, "end": 71.6, "word": " about", "probability": 0.9033203125}, {"start": 71.6, "end": 72.38, "word": " before", "probability": 0.8681640625}, {"start": 72.38, "end": 73.1, "word": " are", "probability": 0.921875}, {"start": 73.1, "end": 73.54, "word": " called", "probability": 0.884765625}], "temperature": 1.0}, {"id": 4, "seek": 10164, "start": 75.14, "end": 101.64, "text": " And if you remember the first lecture, we said there is a difference between statistics and parameters. A statistic is a value that computed from a sample, but parameter is a value computed from population. So the important population parameters are population mean, variance, and standard deviation.", "tokens": [400, 498, 291, 1604, 264, 700, 7991, 11, 321, 848, 456, 307, 257, 2649, 1296, 12523, 293, 9834, 13, 316, 29588, 307, 257, 2158, 300, 40610, 490, 257, 6889, 11, 457, 13075, 307, 257, 2158, 40610, 490, 4415, 13, 407, 264, 1021, 4415, 9834, 366, 4415, 914, 11, 21977, 11, 293, 3832, 25163, 13], "avg_logprob": -0.21051136526194486, "compression_ratio": 1.7705882352941176, "no_speech_prob": 0.0, "words": [{"start": 75.14, "end": 75.94, "word": " And", "probability": 0.650390625}, {"start": 75.94, "end": 76.08, "word": " if", "probability": 0.90283203125}, {"start": 76.08, "end": 76.16, "word": " you", "probability": 0.96240234375}, {"start": 76.16, "end": 76.46, "word": " remember", "probability": 0.869140625}, {"start": 76.46, "end": 76.94, "word": " the", "probability": 0.322509765625}, {"start": 76.94, "end": 77.2, "word": " first", "probability": 0.88232421875}, {"start": 77.2, "end": 77.54, "word": " lecture,", "probability": 0.93310546875}, {"start": 77.66, "end": 77.72, "word": " we", "probability": 0.92138671875}, {"start": 77.72, "end": 77.92, "word": " said", "probability": 0.91015625}, {"start": 77.92, "end": 78.12, "word": " there", "probability": 0.85595703125}, {"start": 78.12, "end": 78.32, "word": " is", "probability": 0.818359375}, {"start": 78.32, "end": 78.56, "word": " a", "probability": 0.8388671875}, {"start": 78.56, "end": 79.2, "word": " difference", "probability": 0.496826171875}, {"start": 79.2, "end": 79.8, "word": " between", "probability": 0.85009765625}, {"start": 79.8, "end": 80.96, "word": " statistics", "probability": 0.896484375}, {"start": 80.96, "end": 81.86, "word": " and", "probability": 0.9404296875}, {"start": 81.86, "end": 82.34, "word": " parameters.", "probability": 0.94921875}, {"start": 83.08, "end": 83.26, "word": " A", "probability": 0.381591796875}, {"start": 83.26, "end": 83.64, "word": " statistic", "probability": 0.8935546875}, {"start": 83.64, "end": 83.88, "word": " is", "probability": 0.9462890625}, {"start": 83.88, "end": 84.02, "word": " a", "probability": 0.9853515625}, {"start": 84.02, "end": 84.3, "word": " value", "probability": 0.95556640625}, {"start": 84.3, "end": 84.54, "word": " that", "probability": 0.9189453125}, {"start": 84.54, "end": 84.98, "word": " computed", "probability": 0.488037109375}, {"start": 84.98, "end": 85.28, "word": " from", "probability": 0.8486328125}, {"start": 85.28, "end": 85.44, "word": " a", "probability": 0.57470703125}, {"start": 85.44, "end": 85.78, "word": " sample,", "probability": 0.5908203125}, {"start": 86.4, "end": 86.64, "word": " but", "probability": 0.80615234375}, {"start": 86.64, "end": 87.12, "word": " parameter", "probability": 0.5625}, {"start": 87.12, "end": 87.42, "word": " is", "probability": 0.9423828125}, {"start": 87.42, "end": 87.52, "word": " a", "probability": 0.93359375}, {"start": 87.52, "end": 87.68, "word": " value", "probability": 0.96142578125}, {"start": 87.68, "end": 88.14, "word": " computed", "probability": 0.904296875}, {"start": 88.14, "end": 88.5, "word": " from", "probability": 0.88037109375}, {"start": 88.5, "end": 88.98, "word": " population.", "probability": 0.87841796875}, {"start": 89.82, "end": 90.44, "word": " So", "probability": 0.94677734375}, {"start": 90.44, "end": 91.38, "word": " the", "probability": 0.57958984375}, {"start": 91.38, "end": 92.14, "word": " important", "probability": 0.88916015625}, {"start": 92.14, "end": 93.44, "word": " population", "probability": 0.96142578125}, {"start": 93.44, "end": 94.0, "word": " parameters", "probability": 0.96630859375}, {"start": 94.0, "end": 95.68, "word": " are", "probability": 0.9208984375}, {"start": 95.68, "end": 96.64, "word": " population", "probability": 0.943359375}, {"start": 96.64, "end": 97.02, "word": " mean,", "probability": 0.88720703125}, {"start": 97.66, "end": 98.56, "word": " variance,", "probability": 0.87890625}, {"start": 98.98, "end": 99.6, "word": " and", "probability": 0.94482421875}, {"start": 99.6, "end": 100.44, "word": " standard", "probability": 0.935546875}, {"start": 100.44, "end": 101.64, "word": " deviation.", "probability": 0.90380859375}], "temperature": 1.0}, {"id": 5, "seek": 12686, "start": 102.68, "end": 126.86, "text": " Let's start with the first one, the mean, or the population mean. As the sample mean is defined by the sum of the values divided by the sample size. But here, we have to divide by the population size. So that's the difference between sample mean and population mean. For the sample mean, we use x bar.", "tokens": [961, 311, 722, 365, 264, 700, 472, 11, 264, 914, 11, 420, 264, 4415, 914, 13, 1018, 264, 6889, 914, 307, 7642, 538, 264, 2408, 295, 264, 4190, 6666, 538, 264, 6889, 2744, 13, 583, 510, 11, 321, 362, 281, 9845, 538, 264, 4415, 2744, 13, 407, 300, 311, 264, 2649, 1296, 6889, 914, 293, 4415, 914, 13, 1171, 264, 6889, 914, 11, 321, 764, 2031, 2159, 13], "avg_logprob": -0.15930707212807477, "compression_ratio": 1.776470588235294, "no_speech_prob": 0.0, "words": [{"start": 102.68, "end": 103.06, "word": " Let's", "probability": 0.83935546875}, {"start": 103.06, "end": 103.4, "word": " start", "probability": 0.9296875}, {"start": 103.4, "end": 103.56, "word": " with", "probability": 0.90283203125}, {"start": 103.56, "end": 103.68, "word": " the", "probability": 0.91748046875}, {"start": 103.68, "end": 103.94, "word": " first", "probability": 0.861328125}, {"start": 103.94, "end": 104.24, "word": " one,", "probability": 0.92236328125}, {"start": 104.42, "end": 104.5, "word": " the", "probability": 0.90087890625}, {"start": 104.5, "end": 104.68, "word": " mean,", "probability": 0.9130859375}, {"start": 104.9, "end": 105.04, "word": " or", "probability": 0.94287109375}, {"start": 105.04, "end": 105.18, "word": " the", "probability": 0.84765625}, {"start": 105.18, "end": 105.54, "word": " population", "probability": 0.96142578125}, {"start": 105.54, "end": 105.88, "word": " mean.", "probability": 0.93896484375}, {"start": 106.98, "end": 107.34, "word": " As", "probability": 0.64208984375}, {"start": 107.34, "end": 108.04, "word": " the", "probability": 0.8486328125}, {"start": 108.04, "end": 108.3, "word": " sample", "probability": 0.8701171875}, {"start": 108.3, "end": 108.52, "word": " mean", "probability": 0.8173828125}, {"start": 108.52, "end": 108.72, "word": " is", "probability": 0.93505859375}, {"start": 108.72, "end": 109.76, "word": " defined", "probability": 0.87841796875}, {"start": 109.76, "end": 110.02, "word": " by", "probability": 0.9638671875}, {"start": 110.02, "end": 110.24, "word": " the", "probability": 0.9130859375}, {"start": 110.24, "end": 110.46, "word": " sum", "probability": 0.94482421875}, {"start": 110.46, "end": 110.62, "word": " of", "probability": 0.97412109375}, {"start": 110.62, "end": 110.72, "word": " the", "probability": 0.892578125}, {"start": 110.72, "end": 111.16, "word": " values", "probability": 0.96240234375}, {"start": 111.16, "end": 112.08, "word": " divided", "probability": 0.62353515625}, {"start": 112.08, "end": 112.5, "word": " by", "probability": 0.96728515625}, {"start": 112.5, "end": 112.96, "word": " the", "probability": 0.9130859375}, {"start": 112.96, "end": 113.2, "word": " sample", "probability": 0.9072265625}, {"start": 113.2, "end": 113.72, "word": " size.", "probability": 0.8505859375}, {"start": 114.36, "end": 114.6, "word": " But", "probability": 0.8974609375}, {"start": 114.6, "end": 114.94, "word": " here,", "probability": 0.8388671875}, {"start": 115.02, "end": 115.12, "word": " we", "probability": 0.951171875}, {"start": 115.12, "end": 115.28, "word": " have", "probability": 0.9375}, {"start": 115.28, "end": 115.38, "word": " to", "probability": 0.96630859375}, {"start": 115.38, "end": 115.68, "word": " divide", "probability": 0.9462890625}, {"start": 115.68, "end": 115.88, "word": " by", "probability": 0.9453125}, {"start": 115.88, "end": 116.04, "word": " the", "probability": 0.91015625}, {"start": 116.04, "end": 116.5, "word": " population", "probability": 0.94580078125}, {"start": 116.5, "end": 117.0, "word": " size.", "probability": 0.84228515625}, {"start": 117.46, "end": 117.64, "word": " So", "probability": 0.9453125}, {"start": 117.64, "end": 117.88, "word": " that's", "probability": 0.880615234375}, {"start": 117.88, "end": 118.0, "word": " the", "probability": 0.91748046875}, {"start": 118.0, "end": 118.36, "word": " difference", "probability": 0.859375}, {"start": 118.36, "end": 118.74, "word": " between", "probability": 0.89794921875}, {"start": 118.74, "end": 119.14, "word": " sample", "probability": 0.869140625}, {"start": 119.14, "end": 119.36, "word": " mean", "probability": 0.931640625}, {"start": 119.36, "end": 119.56, "word": " and", "probability": 0.8896484375}, {"start": 119.56, "end": 121.14, "word": " population", "probability": 0.61962890625}, {"start": 121.14, "end": 122.04, "word": " mean.", "probability": 0.970703125}, {"start": 124.04, "end": 124.56, "word": " For", "probability": 0.94189453125}, {"start": 124.56, "end": 124.74, "word": " the", "probability": 0.904296875}, {"start": 124.74, "end": 125.02, "word": " sample", "probability": 0.89892578125}, {"start": 125.02, "end": 125.38, "word": " mean,", "probability": 0.97412109375}, {"start": 125.54, "end": 125.78, "word": " we", "probability": 0.9541015625}, {"start": 125.78, "end": 126.14, "word": " use", "probability": 0.6591796875}, {"start": 126.14, "end": 126.46, "word": " x", "probability": 0.57861328125}, {"start": 126.46, "end": 126.86, "word": " bar.", "probability": 0.73779296875}], "temperature": 1.0}, {"id": 6, "seek": 15777, "start": 128.47, "end": 157.77, "text": " Here we use Greek letter, mu. This is pronounced as mu. So mu is the sum of the x values divided by the population size, not the sample size. So it's quite similar to the sample mean. So mu is the population mean, n is the population size, and xi is the it value of the variable x. Similarly, for the other parameter, which is the variance, the variance", "tokens": [1692, 321, 764, 10281, 5063, 11, 2992, 13, 639, 307, 23155, 382, 2992, 13, 407, 2992, 307, 264, 2408, 295, 264, 2031, 4190, 6666, 538, 264, 4415, 2744, 11, 406, 264, 6889, 2744, 13, 407, 309, 311, 1596, 2531, 281, 264, 6889, 914, 13, 407, 2992, 307, 264, 4415, 914, 11, 297, 307, 264, 4415, 2744, 11, 293, 36800, 307, 264, 309, 2158, 295, 264, 7006, 2031, 13, 13157, 11, 337, 264, 661, 13075, 11, 597, 307, 264, 21977, 11, 264, 21977], "avg_logprob": -0.16029744514499802, "compression_ratio": 1.824742268041237, "no_speech_prob": 0.0, "words": [{"start": 128.47, "end": 128.81, "word": " Here", "probability": 0.7890625}, {"start": 128.81, "end": 128.95, "word": " we", "probability": 0.62646484375}, {"start": 128.95, "end": 129.13, "word": " use", "probability": 0.87060546875}, {"start": 129.13, "end": 129.63, "word": " Greek", "probability": 0.57763671875}, {"start": 129.63, "end": 130.09, "word": " letter,", "probability": 0.9453125}, {"start": 130.61, "end": 130.85, "word": " mu.", "probability": 0.47412109375}, {"start": 131.01, "end": 131.39, "word": " This", "probability": 0.86572265625}, {"start": 131.39, "end": 131.51, "word": " is", "probability": 0.88916015625}, {"start": 131.51, "end": 131.95, "word": " pronounced", "probability": 0.7822265625}, {"start": 131.95, "end": 132.51, "word": " as", "probability": 0.96875}, {"start": 132.51, "end": 132.87, "word": " mu.", "probability": 0.3134765625}, {"start": 134.51, "end": 134.79, "word": " So", "probability": 0.96142578125}, {"start": 134.79, "end": 135.07, "word": " mu", "probability": 0.79638671875}, {"start": 135.07, "end": 135.29, "word": " is", "probability": 0.9462890625}, {"start": 135.29, "end": 135.47, "word": " the", "probability": 0.92236328125}, {"start": 135.47, "end": 135.81, "word": " sum", "probability": 0.94091796875}, {"start": 135.81, "end": 136.61, "word": " of", "probability": 0.96533203125}, {"start": 136.61, "end": 136.75, "word": " the", "probability": 0.91748046875}, {"start": 136.75, "end": 136.95, "word": " x", "probability": 0.74462890625}, {"start": 136.95, "end": 137.47, "word": " values", "probability": 0.80810546875}, {"start": 137.47, "end": 138.35, "word": " divided", "probability": 0.6591796875}, {"start": 138.35, "end": 138.61, "word": " by", "probability": 0.96630859375}, {"start": 138.61, "end": 138.79, "word": " the", "probability": 0.919921875}, {"start": 138.79, "end": 139.19, "word": " population", "probability": 0.9453125}, {"start": 139.19, "end": 139.71, "word": " size,", "probability": 0.83349609375}, {"start": 139.87, "end": 140.03, "word": " not", "probability": 0.93896484375}, {"start": 140.03, "end": 140.23, "word": " the", "probability": 0.923828125}, {"start": 140.23, "end": 140.43, "word": " sample", "probability": 0.8828125}, {"start": 140.43, "end": 140.73, "word": " size.", "probability": 0.8515625}, {"start": 140.81, "end": 140.95, "word": " So", "probability": 0.958984375}, {"start": 140.95, "end": 141.21, "word": " it's", "probability": 0.965087890625}, {"start": 141.21, "end": 141.53, "word": " quite", "probability": 0.9111328125}, {"start": 141.53, "end": 141.99, "word": " similar", "probability": 0.96337890625}, {"start": 141.99, "end": 142.81, "word": " to", "probability": 0.9638671875}, {"start": 142.81, "end": 142.97, "word": " the", "probability": 0.916015625}, {"start": 142.97, "end": 143.23, "word": " sample", "probability": 0.6787109375}, {"start": 143.23, "end": 143.53, "word": " mean.", "probability": 0.53955078125}, {"start": 143.85, "end": 144.13, "word": " So", "probability": 0.966796875}, {"start": 144.13, "end": 144.33, "word": " mu", "probability": 0.9287109375}, {"start": 144.33, "end": 144.47, "word": " is", "probability": 0.94384765625}, {"start": 144.47, "end": 144.57, "word": " the", "probability": 0.9150390625}, {"start": 144.57, "end": 144.95, "word": " population", "probability": 0.93896484375}, {"start": 144.95, "end": 145.35, "word": " mean,", "probability": 0.97705078125}, {"start": 145.85, "end": 145.95, "word": " n", "probability": 0.75537109375}, {"start": 145.95, "end": 146.15, "word": " is", "probability": 0.94482421875}, {"start": 146.15, "end": 146.23, "word": " the", "probability": 0.8837890625}, {"start": 146.23, "end": 146.55, "word": " population", "probability": 0.9404296875}, {"start": 146.55, "end": 147.13, "word": " size,", "probability": 0.849609375}, {"start": 147.43, "end": 147.69, "word": " and", "probability": 0.9443359375}, {"start": 147.69, "end": 148.03, "word": " xi", "probability": 0.79052734375}, {"start": 148.03, "end": 148.35, "word": " is", "probability": 0.9423828125}, {"start": 148.35, "end": 148.53, "word": " the", "probability": 0.88232421875}, {"start": 148.53, "end": 148.77, "word": " it", "probability": 0.68359375}, {"start": 148.77, "end": 149.15, "word": " value", "probability": 0.791015625}, {"start": 149.15, "end": 149.61, "word": " of", "probability": 0.966796875}, {"start": 149.61, "end": 149.77, "word": " the", "probability": 0.91552734375}, {"start": 149.77, "end": 150.09, "word": " variable", "probability": 0.87451171875}, {"start": 150.09, "end": 150.55, "word": " x.", "probability": 0.927734375}, {"start": 151.87, "end": 152.47, "word": " Similarly,", "probability": 0.80712890625}, {"start": 152.91, "end": 153.27, "word": " for", "probability": 0.95166015625}, {"start": 153.27, "end": 153.49, "word": " the", "probability": 0.9169921875}, {"start": 153.49, "end": 153.87, "word": " other", "probability": 0.89697265625}, {"start": 153.87, "end": 155.29, "word": " parameter,", "probability": 0.9501953125}, {"start": 155.47, "end": 155.65, "word": " which", "probability": 0.94873046875}, {"start": 155.65, "end": 155.79, "word": " is", "probability": 0.939453125}, {"start": 155.79, "end": 155.97, "word": " the", "probability": 0.92041015625}, {"start": 155.97, "end": 156.45, "word": " variance,", "probability": 0.91357421875}, {"start": 157.05, "end": 157.31, "word": " the", "probability": 0.912109375}, {"start": 157.31, "end": 157.77, "word": " variance", "probability": 0.90869140625}], "temperature": 1.0}, {"id": 7, "seek": 18352, "start": 159.28, "end": 183.52, "text": " There is a little difference between the sample and population variance. Here, we subtract the population mean instead of the sample mean. So sum of xi minus mu squared, then divide by this population size, capital N, instead of N minus 1. So that's the difference between sample and population variance.", "tokens": [821, 307, 257, 707, 2649, 1296, 264, 6889, 293, 4415, 21977, 13, 1692, 11, 321, 16390, 264, 4415, 914, 2602, 295, 264, 6889, 914, 13, 407, 2408, 295, 36800, 3175, 2992, 8889, 11, 550, 9845, 538, 341, 4415, 2744, 11, 4238, 426, 11, 2602, 295, 426, 3175, 502, 13, 407, 300, 311, 264, 2649, 1296, 6889, 293, 4415, 21977, 13], "avg_logprob": -0.17174692916088416, "compression_ratio": 1.8263473053892216, "no_speech_prob": 0.0, "words": [{"start": 159.28, "end": 159.6, "word": " There", "probability": 0.3828125}, {"start": 159.6, "end": 159.8, "word": " is", "probability": 0.916015625}, {"start": 159.8, "end": 160.12, "word": " a", "probability": 0.97705078125}, {"start": 160.12, "end": 160.54, "word": " little", "probability": 0.791015625}, {"start": 160.54, "end": 161.04, "word": " difference", "probability": 0.87060546875}, {"start": 161.04, "end": 161.32, "word": " between", "probability": 0.8828125}, {"start": 161.32, "end": 161.52, "word": " the", "probability": 0.759765625}, {"start": 161.52, "end": 161.78, "word": " sample", "probability": 0.82568359375}, {"start": 161.78, "end": 161.96, "word": " and", "probability": 0.9453125}, {"start": 161.96, "end": 162.38, "word": " population", "probability": 0.9052734375}, {"start": 162.38, "end": 162.9, "word": " variance.", "probability": 0.8916015625}, {"start": 163.64, "end": 164.02, "word": " Here,", "probability": 0.85595703125}, {"start": 164.18, "end": 164.56, "word": " we", "probability": 0.96044921875}, {"start": 164.56, "end": 165.48, "word": " subtract", "probability": 0.8681640625}, {"start": 165.48, "end": 166.02, "word": " the", "probability": 0.8896484375}, {"start": 166.02, "end": 166.46, "word": " population", "probability": 0.95556640625}, {"start": 166.46, "end": 166.92, "word": " mean", "probability": 0.97314453125}, {"start": 166.92, "end": 167.68, "word": " instead", "probability": 0.771484375}, {"start": 167.68, "end": 168.04, "word": " of", "probability": 0.9677734375}, {"start": 168.04, "end": 168.2, "word": " the", "probability": 0.92333984375}, {"start": 168.2, "end": 168.52, "word": " sample", "probability": 0.84130859375}, {"start": 168.52, "end": 168.78, "word": " mean.", "probability": 0.8623046875}, {"start": 169.42, "end": 169.7, "word": " So", "probability": 0.9541015625}, {"start": 169.7, "end": 169.96, "word": " sum", "probability": 0.66845703125}, {"start": 169.96, "end": 170.08, "word": " of", "probability": 0.947265625}, {"start": 170.08, "end": 170.36, "word": " xi", "probability": 0.473876953125}, {"start": 170.36, "end": 170.76, "word": " minus", "probability": 0.970703125}, {"start": 170.76, "end": 171.16, "word": " mu", "probability": 0.853515625}, {"start": 171.16, "end": 172.12, "word": " squared,", "probability": 0.81787109375}, {"start": 172.78, "end": 173.1, "word": " then", "probability": 0.8671875}, {"start": 173.1, "end": 173.56, "word": " divide", "probability": 0.80615234375}, {"start": 173.56, "end": 174.84, "word": " by", "probability": 0.955078125}, {"start": 174.84, "end": 175.14, "word": " this", "probability": 0.57861328125}, {"start": 175.14, "end": 175.7, "word": " population", "probability": 0.94677734375}, {"start": 175.7, "end": 176.38, "word": " size,", "probability": 0.8427734375}, {"start": 176.5, "end": 176.82, "word": " capital", "probability": 0.91845703125}, {"start": 176.82, "end": 177.58, "word": " N,", "probability": 0.94384765625}, {"start": 177.7, "end": 178.14, "word": " instead", "probability": 0.85546875}, {"start": 178.14, "end": 178.44, "word": " of", "probability": 0.9658203125}, {"start": 178.44, "end": 178.58, "word": " N", "probability": 0.86376953125}, {"start": 178.58, "end": 178.86, "word": " minus", "probability": 0.970703125}, {"start": 178.86, "end": 179.14, "word": " 1.", "probability": 0.76416015625}, {"start": 179.52, "end": 179.72, "word": " So", "probability": 0.95654296875}, {"start": 179.72, "end": 180.02, "word": " that's", "probability": 0.947021484375}, {"start": 180.02, "end": 180.14, "word": " the", "probability": 0.90478515625}, {"start": 180.14, "end": 180.56, "word": " difference", "probability": 0.861328125}, {"start": 180.56, "end": 181.08, "word": " between", "probability": 0.88427734375}, {"start": 181.08, "end": 181.82, "word": " sample", "probability": 0.87255859375}, {"start": 181.82, "end": 182.26, "word": " and", "probability": 0.94873046875}, {"start": 182.26, "end": 182.74, "word": " population", "probability": 0.93798828125}, {"start": 182.74, "end": 183.52, "word": " variance.", "probability": 0.9140625}], "temperature": 1.0}, {"id": 8, "seek": 21154, "start": 184.32, "end": 211.54, "text": " So again, in the sample variance, we subtracted x bar. Here, we subtract the mean of the population, mu, then divide by capital N instead of N minus 1. So the computations for the sample and the population mean or variance are quite similar. Finally, the population standard deviation.", "tokens": [407, 797, 11, 294, 264, 6889, 21977, 11, 321, 16390, 292, 2031, 2159, 13, 1692, 11, 321, 16390, 264, 914, 295, 264, 4415, 11, 2992, 11, 550, 9845, 538, 4238, 426, 2602, 295, 426, 3175, 502, 13, 407, 264, 2807, 763, 337, 264, 6889, 293, 264, 4415, 914, 420, 21977, 366, 1596, 2531, 13, 6288, 11, 264, 4415, 3832, 25163, 13], "avg_logprob": -0.19329637144842454, "compression_ratio": 1.6823529411764706, "no_speech_prob": 0.0, "words": [{"start": 184.32, "end": 184.78, "word": " So", "probability": 0.9091796875}, {"start": 184.78, "end": 185.08, "word": " again,", "probability": 0.85009765625}, {"start": 185.98, "end": 186.58, "word": " in", "probability": 0.9052734375}, {"start": 186.58, "end": 186.72, "word": " the", "probability": 0.92138671875}, {"start": 186.72, "end": 187.02, "word": " sample", "probability": 0.677734375}, {"start": 187.02, "end": 187.44, "word": " variance,", "probability": 0.8603515625}, {"start": 187.56, "end": 187.68, "word": " we", "probability": 0.955078125}, {"start": 187.68, "end": 188.56, "word": " subtracted", "probability": 0.899169921875}, {"start": 188.56, "end": 189.04, "word": " x", "probability": 0.6220703125}, {"start": 189.04, "end": 189.46, "word": " bar.", "probability": 0.6630859375}, {"start": 190.76, "end": 191.28, "word": " Here,", "probability": 0.80322265625}, {"start": 191.32, "end": 191.46, "word": " we", "probability": 0.89404296875}, {"start": 191.46, "end": 192.08, "word": " subtract", "probability": 0.8642578125}, {"start": 192.08, "end": 192.52, "word": " the", "probability": 0.83984375}, {"start": 192.52, "end": 192.74, "word": " mean", "probability": 0.9755859375}, {"start": 192.74, "end": 193.34, "word": " of", "probability": 0.95166015625}, {"start": 193.34, "end": 193.5, "word": " the", "probability": 0.83447265625}, {"start": 193.5, "end": 193.88, "word": " population,", "probability": 0.95654296875}, {"start": 194.08, "end": 194.28, "word": " mu,", "probability": 0.65869140625}, {"start": 194.7, "end": 195.0, "word": " then", "probability": 0.7412109375}, {"start": 195.0, "end": 195.36, "word": " divide", "probability": 0.751953125}, {"start": 195.36, "end": 195.64, "word": " by", "probability": 0.8798828125}, {"start": 195.64, "end": 196.04, "word": " capital", "probability": 0.908203125}, {"start": 196.04, "end": 196.28, "word": " N", "probability": 0.9619140625}, {"start": 196.28, "end": 196.68, "word": " instead", "probability": 0.357177734375}, {"start": 196.68, "end": 197.26, "word": " of", "probability": 0.96728515625}, {"start": 197.26, "end": 198.2, "word": " N", "probability": 0.91015625}, {"start": 198.2, "end": 198.48, "word": " minus", "probability": 0.96484375}, {"start": 198.48, "end": 198.78, "word": " 1.", "probability": 0.7919921875}, {"start": 199.24, "end": 199.68, "word": " So", "probability": 0.9658203125}, {"start": 199.68, "end": 200.2, "word": " the", "probability": 0.873046875}, {"start": 200.2, "end": 201.24, "word": " computations", "probability": 0.98095703125}, {"start": 201.24, "end": 202.2, "word": " for", "probability": 0.94873046875}, {"start": 202.2, "end": 202.4, "word": " the", "probability": 0.92724609375}, {"start": 202.4, "end": 202.74, "word": " sample", "probability": 0.85302734375}, {"start": 202.74, "end": 203.46, "word": " and", "probability": 0.93798828125}, {"start": 203.46, "end": 203.58, "word": " the", "probability": 0.90283203125}, {"start": 203.58, "end": 204.0, "word": " population", "probability": 0.958984375}, {"start": 204.0, "end": 204.34, "word": " mean", "probability": 0.90673828125}, {"start": 204.34, "end": 204.54, "word": " or", "probability": 0.48193359375}, {"start": 204.54, "end": 204.94, "word": " variance", "probability": 0.95068359375}, {"start": 204.94, "end": 205.46, "word": " are", "probability": 0.411865234375}, {"start": 205.46, "end": 205.72, "word": " quite", "probability": 0.89892578125}, {"start": 205.72, "end": 206.08, "word": " similar.", "probability": 0.9619140625}, {"start": 207.94, "end": 208.74, "word": " Finally,", "probability": 0.69580078125}, {"start": 209.96, "end": 210.22, "word": " the", "probability": 0.87060546875}, {"start": 210.22, "end": 210.66, "word": " population", "probability": 0.96337890625}, {"start": 210.66, "end": 211.18, "word": " standard", "probability": 0.91015625}, {"start": 211.18, "end": 211.54, "word": " deviation.", "probability": 0.90869140625}], "temperature": 1.0}, {"id": 9, "seek": 24241, "start": 213.97, "end": 242.41, "text": " is the same as the sample population variance and here just take the square root of the population variance and again as we did as we explained before the standard deviation has the same units as the original unit so nothing is new we just extend the sample statistic to the population parameter and again", "tokens": [307, 264, 912, 382, 264, 6889, 4415, 21977, 293, 510, 445, 747, 264, 3732, 5593, 295, 264, 4415, 21977, 293, 797, 382, 321, 630, 382, 321, 8825, 949, 264, 3832, 25163, 575, 264, 912, 6815, 382, 264, 3380, 4985, 370, 1825, 307, 777, 321, 445, 10101, 264, 6889, 29588, 281, 264, 4415, 13075, 293, 797], "avg_logprob": -0.1713169618908848, "compression_ratio": 1.9245283018867925, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 213.97, "end": 214.29, "word": " is", "probability": 0.314453125}, {"start": 214.29, "end": 214.47, "word": " the", "probability": 0.90478515625}, {"start": 214.47, "end": 214.67, "word": " same", "probability": 0.87646484375}, {"start": 214.67, "end": 215.09, "word": " as", "probability": 0.95263671875}, {"start": 215.09, "end": 215.39, "word": " the", "probability": 0.8759765625}, {"start": 215.39, "end": 215.73, "word": " sample", "probability": 0.82275390625}, {"start": 215.73, "end": 216.45, "word": " population", "probability": 0.91162109375}, {"start": 216.45, "end": 216.93, "word": " variance", "probability": 0.8974609375}, {"start": 216.93, "end": 217.55, "word": " and", "probability": 0.347412109375}, {"start": 217.55, "end": 217.85, "word": " here", "probability": 0.79345703125}, {"start": 217.85, "end": 218.31, "word": " just", "probability": 0.70703125}, {"start": 218.31, "end": 218.61, "word": " take", "probability": 0.865234375}, {"start": 218.61, "end": 218.81, "word": " the", "probability": 0.91162109375}, {"start": 218.81, "end": 219.07, "word": " square", "probability": 0.88525390625}, {"start": 219.07, "end": 219.53, "word": " root", "probability": 0.91064453125}, {"start": 219.53, "end": 220.41, "word": " of", "probability": 0.95654296875}, {"start": 220.41, "end": 220.77, "word": " the", "probability": 0.89697265625}, {"start": 220.77, "end": 221.85, "word": " population", "probability": 0.95263671875}, {"start": 221.85, "end": 222.39, "word": " variance", "probability": 0.85595703125}, {"start": 222.39, "end": 222.95, "word": " and", "probability": 0.5771484375}, {"start": 222.95, "end": 223.17, "word": " again", "probability": 0.93994140625}, {"start": 223.17, "end": 223.35, "word": " as", "probability": 0.8583984375}, {"start": 223.35, "end": 223.49, "word": " we", "probability": 0.9580078125}, {"start": 223.49, "end": 223.71, "word": " did", "probability": 0.76611328125}, {"start": 223.71, "end": 224.41, "word": " as", "probability": 0.654296875}, {"start": 224.41, "end": 224.57, "word": " we", "probability": 0.94189453125}, {"start": 224.57, "end": 225.03, "word": " explained", "probability": 0.86083984375}, {"start": 225.03, "end": 225.51, "word": " before", "probability": 0.85009765625}, {"start": 225.51, "end": 226.69, "word": " the", "probability": 0.7548828125}, {"start": 226.69, "end": 227.17, "word": " standard", "probability": 0.87744140625}, {"start": 227.17, "end": 227.73, "word": " deviation", "probability": 0.93603515625}, {"start": 227.73, "end": 229.01, "word": " has", "probability": 0.8896484375}, {"start": 229.01, "end": 229.27, "word": " the", "probability": 0.91015625}, {"start": 229.27, "end": 229.53, "word": " same", "probability": 0.90234375}, {"start": 229.53, "end": 230.05, "word": " units", "probability": 0.91796875}, {"start": 230.05, "end": 230.61, "word": " as", "probability": 0.951171875}, {"start": 230.61, "end": 230.77, "word": " the", "probability": 0.89599609375}, {"start": 230.77, "end": 231.15, "word": " original", "probability": 0.95556640625}, {"start": 231.15, "end": 231.55, "word": " unit", "probability": 0.94091796875}, {"start": 231.55, "end": 232.57, "word": " so", "probability": 0.6484375}, {"start": 232.57, "end": 232.85, "word": " nothing", "probability": 0.89697265625}, {"start": 232.85, "end": 233.09, "word": " is", "probability": 0.9365234375}, {"start": 233.09, "end": 233.33, "word": " new", "probability": 0.88427734375}, {"start": 233.33, "end": 234.77, "word": " we", "probability": 0.7060546875}, {"start": 234.77, "end": 235.19, "word": " just", "probability": 0.92626953125}, {"start": 235.19, "end": 235.83, "word": " extend", "probability": 0.85205078125}, {"start": 235.83, "end": 236.77, "word": " the", "probability": 0.9140625}, {"start": 236.77, "end": 237.13, "word": " sample", "probability": 0.84716796875}, {"start": 237.13, "end": 237.69, "word": " statistic", "probability": 0.8642578125}, {"start": 237.69, "end": 239.27, "word": " to", "probability": 0.9130859375}, {"start": 239.27, "end": 239.43, "word": " the", "probability": 0.91552734375}, {"start": 239.43, "end": 239.85, "word": " population", "probability": 0.94287109375}, {"start": 239.85, "end": 240.31, "word": " parameter", "probability": 0.9609375}, {"start": 240.31, "end": 241.97, "word": " and", "probability": 0.900390625}, {"start": 241.97, "end": 242.41, "word": " again", "probability": 0.955078125}], "temperature": 1.0}, {"id": 10, "seek": 26893, "start": 244.03, "end": 268.93, "text": " The mean is denoted by mu, it's a Greek letter. The population variance is denoted by sigma squared. And finally, the population standard deviation is denoted by sigma. So that's the numerical descriptive measures either for a sample or a population. So just summary for these measures.", "tokens": [440, 914, 307, 1441, 23325, 538, 2992, 11, 309, 311, 257, 10281, 5063, 13, 440, 4415, 21977, 307, 1441, 23325, 538, 12771, 8889, 13, 400, 2721, 11, 264, 4415, 3832, 25163, 307, 1441, 23325, 538, 12771, 13, 407, 300, 311, 264, 29054, 42585, 8000, 2139, 337, 257, 6889, 420, 257, 4415, 13, 407, 445, 12691, 337, 613, 8000, 13], "avg_logprob": -0.14804687798023225, "compression_ratio": 1.6783625730994152, "no_speech_prob": 0.0, "words": [{"start": 244.03, "end": 244.31, "word": " The", "probability": 0.6767578125}, {"start": 244.31, "end": 244.63, "word": " mean", "probability": 0.962890625}, {"start": 244.63, "end": 245.97, "word": " is", "probability": 0.93017578125}, {"start": 245.97, "end": 247.31, "word": " denoted", "probability": 0.91845703125}, {"start": 247.31, "end": 247.65, "word": " by", "probability": 0.978515625}, {"start": 247.65, "end": 247.97, "word": " mu,", "probability": 0.446533203125}, {"start": 248.13, "end": 248.23, "word": " it's", "probability": 0.7744140625}, {"start": 248.23, "end": 248.33, "word": " a", "probability": 0.464599609375}, {"start": 248.33, "end": 248.47, "word": " Greek", "probability": 0.89990234375}, {"start": 248.47, "end": 248.79, "word": " letter.", "probability": 0.955078125}, {"start": 250.21, "end": 250.51, "word": " The", "probability": 0.89306640625}, {"start": 250.51, "end": 251.01, "word": " population", "probability": 0.958984375}, {"start": 251.01, "end": 251.61, "word": " variance", "probability": 0.8359375}, {"start": 251.61, "end": 251.87, "word": " is", "probability": 0.94140625}, {"start": 251.87, "end": 252.11, "word": " denoted", "probability": 0.957275390625}, {"start": 252.11, "end": 252.43, "word": " by", "probability": 0.9755859375}, {"start": 252.43, "end": 252.79, "word": " sigma", "probability": 0.87158203125}, {"start": 252.79, "end": 253.21, "word": " squared.", "probability": 0.77587890625}, {"start": 254.59, "end": 255.03, "word": " And", "probability": 0.943359375}, {"start": 255.03, "end": 255.59, "word": " finally,", "probability": 0.8291015625}, {"start": 255.93, "end": 256.09, "word": " the", "probability": 0.91748046875}, {"start": 256.09, "end": 256.49, "word": " population", "probability": 0.92529296875}, {"start": 256.49, "end": 257.03, "word": " standard", "probability": 0.86474609375}, {"start": 257.03, "end": 257.41, "word": " deviation", "probability": 0.92724609375}, {"start": 257.41, "end": 257.65, "word": " is", "probability": 0.9453125}, {"start": 257.65, "end": 258.01, "word": " denoted", "probability": 0.962890625}, {"start": 258.01, "end": 258.45, "word": " by", "probability": 0.97509765625}, {"start": 258.45, "end": 259.53, "word": " sigma.", "probability": 0.90185546875}, {"start": 259.95, "end": 260.25, "word": " So", "probability": 0.95166015625}, {"start": 260.25, "end": 260.61, "word": " that's", "probability": 0.925537109375}, {"start": 260.61, "end": 261.13, "word": " the", "probability": 0.89794921875}, {"start": 261.13, "end": 262.01, "word": " numerical", "probability": 0.857421875}, {"start": 262.01, "end": 262.73, "word": " descriptive", "probability": 0.85791015625}, {"start": 262.73, "end": 263.11, "word": " measures", "probability": 0.79248046875}, {"start": 263.11, "end": 263.45, "word": " either", "probability": 0.61865234375}, {"start": 263.45, "end": 263.81, "word": " for", "probability": 0.94873046875}, {"start": 263.81, "end": 264.01, "word": " a", "probability": 0.9208984375}, {"start": 264.01, "end": 264.25, "word": " sample", "probability": 0.7509765625}, {"start": 264.25, "end": 265.29, "word": " or", "probability": 0.93212890625}, {"start": 265.29, "end": 266.01, "word": " a", "probability": 0.955078125}, {"start": 266.01, "end": 266.25, "word": " population.", "probability": 0.96337890625}, {"start": 267.33, "end": 267.57, "word": " So", "probability": 0.95458984375}, {"start": 267.57, "end": 267.79, "word": " just", "probability": 0.84814453125}, {"start": 267.79, "end": 268.13, "word": " summary", "probability": 0.6357421875}, {"start": 268.13, "end": 268.39, "word": " for", "probability": 0.91796875}, {"start": 268.39, "end": 268.59, "word": " these", "probability": 0.80322265625}, {"start": 268.59, "end": 268.93, "word": " measures.", "probability": 0.8740234375}], "temperature": 1.0}, {"id": 11, "seek": 27381, "start": 271.11, "end": 273.81, "text": " The measures are mean variance, standard deviation.", "tokens": [440, 8000, 366, 914, 21977, 11, 3832, 25163, 13], "avg_logprob": -0.3990234375, "compression_ratio": 0.9285714285714286, "no_speech_prob": 0.0, "words": [{"start": 271.11, "end": 271.41, "word": " The", "probability": 0.7216796875}, {"start": 271.41, "end": 271.67, "word": " measures", "probability": 0.39013671875}, {"start": 271.67, "end": 272.07, "word": " are", "probability": 0.92138671875}, {"start": 272.07, "end": 272.29, "word": " mean", "probability": 0.5810546875}, {"start": 272.29, "end": 272.87, "word": " variance,", "probability": 0.71337890625}, {"start": 273.07, "end": 273.33, "word": " standard", "probability": 0.93408203125}, {"start": 273.33, "end": 273.81, "word": " deviation.", "probability": 0.93408203125}], "temperature": 1.0}, {"id": 12, "seek": 30408, "start": 275.23, "end": 304.09, "text": " Population parameters are mu for the mean, sigma squared for variance, and sigma for standard deviation. On the other hand, for the sample statistics, we have x bar for sample mean, s squared for the sample variance, and s is the sample standard deviation. That's sample statistics against population parameters.", "tokens": [10215, 2776, 9834, 366, 2992, 337, 264, 914, 11, 12771, 8889, 337, 21977, 11, 293, 12771, 337, 3832, 25163, 13, 1282, 264, 661, 1011, 11, 337, 264, 6889, 12523, 11, 321, 362, 2031, 2159, 337, 6889, 914, 11, 262, 8889, 337, 264, 6889, 21977, 11, 293, 262, 307, 264, 6889, 3832, 25163, 13, 663, 311, 6889, 12523, 1970, 4415, 9834, 13], "avg_logprob": -0.14112902528816654, "compression_ratio": 1.981012658227848, "no_speech_prob": 0.0, "words": [{"start": 275.23, "end": 275.85, "word": " Population", "probability": 0.89794921875}, {"start": 275.85, "end": 276.39, "word": " parameters", "probability": 0.95751953125}, {"start": 276.39, "end": 277.01, "word": " are", "probability": 0.93896484375}, {"start": 277.01, "end": 277.43, "word": " mu", "probability": 0.57470703125}, {"start": 277.43, "end": 278.11, "word": " for", "probability": 0.8427734375}, {"start": 278.11, "end": 278.25, "word": " the", "probability": 0.92333984375}, {"start": 278.25, "end": 278.49, "word": " mean,", "probability": 0.96435546875}, {"start": 279.45, "end": 279.65, "word": " sigma", "probability": 0.8974609375}, {"start": 279.65, "end": 280.13, "word": " squared", "probability": 0.80859375}, {"start": 280.13, "end": 280.53, "word": " for", "probability": 0.94970703125}, {"start": 280.53, "end": 281.51, "word": " variance,", "probability": 0.763671875}, {"start": 282.21, "end": 282.51, "word": " and", "probability": 0.93603515625}, {"start": 282.51, "end": 282.89, "word": " sigma", "probability": 0.9208984375}, {"start": 282.89, "end": 283.83, "word": " for", "probability": 0.94091796875}, {"start": 283.83, "end": 284.23, "word": " standard", "probability": 0.77734375}, {"start": 284.23, "end": 284.67, "word": " deviation.", "probability": 0.90673828125}, {"start": 285.47, "end": 285.65, "word": " On", "probability": 0.94580078125}, {"start": 285.65, "end": 285.79, "word": " the", "probability": 0.92529296875}, {"start": 285.79, "end": 285.99, "word": " other", "probability": 0.88916015625}, {"start": 285.99, "end": 286.35, "word": " hand,", "probability": 0.91259765625}, {"start": 286.43, "end": 286.53, "word": " for", "probability": 0.93994140625}, {"start": 286.53, "end": 286.71, "word": " the", "probability": 0.88037109375}, {"start": 286.71, "end": 286.93, "word": " sample", "probability": 0.55810546875}, {"start": 286.93, "end": 287.61, "word": " statistics,", "probability": 0.89208984375}, {"start": 288.25, "end": 288.87, "word": " we", "probability": 0.96044921875}, {"start": 288.87, "end": 289.25, "word": " have", "probability": 0.94921875}, {"start": 289.25, "end": 290.25, "word": " x", "probability": 0.75537109375}, {"start": 290.25, "end": 290.55, "word": " bar", "probability": 0.60888671875}, {"start": 290.55, "end": 290.79, "word": " for", "probability": 0.9150390625}, {"start": 290.79, "end": 291.09, "word": " sample", "probability": 0.59228515625}, {"start": 291.09, "end": 291.43, "word": " mean,", "probability": 0.97119140625}, {"start": 292.11, "end": 292.35, "word": " s", "probability": 0.5107421875}, {"start": 292.35, "end": 292.75, "word": " squared", "probability": 0.7998046875}, {"start": 292.75, "end": 293.01, "word": " for", "probability": 0.94287109375}, {"start": 293.01, "end": 293.17, "word": " the", "probability": 0.8818359375}, {"start": 293.17, "end": 293.45, "word": " sample", "probability": 0.916015625}, {"start": 293.45, "end": 294.01, "word": " variance,", "probability": 0.88916015625}, {"start": 294.93, "end": 295.57, "word": " and", "probability": 0.93896484375}, {"start": 295.57, "end": 296.23, "word": " s", "probability": 0.77734375}, {"start": 296.23, "end": 296.53, "word": " is", "probability": 0.9287109375}, {"start": 296.53, "end": 296.75, "word": " the", "probability": 0.91650390625}, {"start": 296.75, "end": 297.15, "word": " sample", "probability": 0.904296875}, {"start": 297.15, "end": 298.01, "word": " standard", "probability": 0.939453125}, {"start": 298.01, "end": 298.73, "word": " deviation.", "probability": 0.90478515625}, {"start": 299.57, "end": 299.97, "word": " That's", "probability": 0.821533203125}, {"start": 299.97, "end": 300.41, "word": " sample", "probability": 0.8564453125}, {"start": 300.41, "end": 301.25, "word": " statistics", "probability": 0.9306640625}, {"start": 301.25, "end": 302.01, "word": " against", "probability": 0.83935546875}, {"start": 302.01, "end": 302.77, "word": " population", "probability": 0.9384765625}, {"start": 302.77, "end": 304.09, "word": " parameters.", "probability": 0.9033203125}], "temperature": 1.0}, {"id": 13, "seek": 33062, "start": 305.1, "end": 330.62, "text": " Any question? Let's move to new topic, which is empirical role. Now, empirical role is just we have to approximate the variation of data in case of", "tokens": [2639, 1168, 30, 961, 311, 1286, 281, 777, 4829, 11, 597, 307, 31886, 3090, 13, 823, 11, 31886, 3090, 307, 445, 321, 362, 281, 30874, 264, 12990, 295, 1412, 294, 1389, 295], "avg_logprob": -0.19874526515151514, "compression_ratio": 1.2758620689655173, "no_speech_prob": 0.0, "words": [{"start": 305.1, "end": 305.36, "word": " Any", "probability": 0.8330078125}, {"start": 305.36, "end": 305.7, "word": " question?", "probability": 0.57373046875}, {"start": 310.94, "end": 311.78, "word": " Let's", "probability": 0.825439453125}, {"start": 311.78, "end": 312.2, "word": " move", "probability": 0.94775390625}, {"start": 312.2, "end": 312.6, "word": " to", "probability": 0.91845703125}, {"start": 312.6, "end": 312.8, "word": " new", "probability": 0.62744140625}, {"start": 312.8, "end": 313.02, "word": " topic,", "probability": 0.99169921875}, {"start": 314.32, "end": 314.96, "word": " which", "probability": 0.95751953125}, {"start": 314.96, "end": 315.88, "word": " is", "probability": 0.9521484375}, {"start": 315.88, "end": 316.86, "word": " empirical", "probability": 0.8046875}, {"start": 316.86, "end": 317.24, "word": " role.", "probability": 0.51708984375}, {"start": 319.34, "end": 320.18, "word": " Now,", "probability": 0.94873046875}, {"start": 320.24, "end": 320.54, "word": " empirical", "probability": 0.8701171875}, {"start": 320.54, "end": 321.02, "word": " role", "probability": 0.93798828125}, {"start": 321.02, "end": 321.88, "word": " is", "probability": 0.89794921875}, {"start": 321.88, "end": 322.3, "word": " just", "probability": 0.91943359375}, {"start": 322.3, "end": 325.62, "word": " we", "probability": 0.427490234375}, {"start": 325.62, "end": 326.0, "word": " have", "probability": 0.95166015625}, {"start": 326.0, "end": 326.16, "word": " to", "probability": 0.9765625}, {"start": 326.16, "end": 326.74, "word": " approximate", "probability": 0.8837890625}, {"start": 326.74, "end": 327.14, "word": " the", "probability": 0.89697265625}, {"start": 327.14, "end": 327.52, "word": " variation", "probability": 0.9267578125}, {"start": 327.52, "end": 327.78, "word": " of", "probability": 0.95751953125}, {"start": 327.78, "end": 328.16, "word": " data", "probability": 0.9345703125}, {"start": 328.16, "end": 329.74, "word": " in", "probability": 0.88525390625}, {"start": 329.74, "end": 330.12, "word": " case", "probability": 0.919921875}, {"start": 330.12, "end": 330.62, "word": " of", "probability": 0.97119140625}], "temperature": 1.0}, {"id": 14, "seek": 34537, "start": 331.15, "end": 345.37, "text": " They'll shift. I mean suppose the data is symmetric around the mean. I mean by symmetric around the mean, the mean is the vertical line that splits the data into two halves.", "tokens": [814, 603, 5513, 13, 286, 914, 7297, 264, 1412, 307, 32330, 926, 264, 914, 13, 286, 914, 538, 32330, 926, 264, 914, 11, 264, 914, 307, 264, 9429, 1622, 300, 37741, 264, 1412, 666, 732, 38490, 13], "avg_logprob": -0.20990954221863495, "compression_ratio": 1.5675675675675675, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 331.15, "end": 331.61, "word": " They'll", "probability": 0.3543701171875}, {"start": 331.61, "end": 331.97, "word": " shift.", "probability": 0.63427734375}, {"start": 332.53, "end": 332.65, "word": " I", "probability": 0.892578125}, {"start": 332.65, "end": 332.87, "word": " mean", "probability": 0.96435546875}, {"start": 332.87, "end": 333.33, "word": " suppose", "probability": 0.499755859375}, {"start": 333.33, "end": 333.65, "word": " the", "probability": 0.873046875}, {"start": 333.65, "end": 333.99, "word": " data", "probability": 0.94921875}, {"start": 333.99, "end": 334.95, "word": " is", "probability": 0.9140625}, {"start": 334.95, "end": 335.35, "word": " symmetric", "probability": 0.80419921875}, {"start": 335.35, "end": 335.91, "word": " around", "probability": 0.9287109375}, {"start": 335.91, "end": 336.17, "word": " the", "probability": 0.923828125}, {"start": 336.17, "end": 336.33, "word": " mean.", "probability": 0.9580078125}, {"start": 336.59, "end": 337.05, "word": " I", "probability": 0.93359375}, {"start": 337.05, "end": 337.19, "word": " mean", "probability": 0.96728515625}, {"start": 337.19, "end": 337.37, "word": " by", "probability": 0.8642578125}, {"start": 337.37, "end": 337.77, "word": " symmetric", "probability": 0.72021484375}, {"start": 337.77, "end": 338.09, "word": " around", "probability": 0.92822265625}, {"start": 338.09, "end": 338.31, "word": " the", "probability": 0.92724609375}, {"start": 338.31, "end": 338.49, "word": " mean,", "probability": 0.96337890625}, {"start": 339.75, "end": 340.25, "word": " the", "probability": 0.86962890625}, {"start": 340.25, "end": 340.59, "word": " mean", "probability": 0.9560546875}, {"start": 340.59, "end": 341.27, "word": " is", "probability": 0.93701171875}, {"start": 341.27, "end": 341.41, "word": " the", "probability": 0.83935546875}, {"start": 341.41, "end": 341.77, "word": " vertical", "probability": 0.94482421875}, {"start": 341.77, "end": 342.31, "word": " line", "probability": 0.93017578125}, {"start": 342.31, "end": 342.85, "word": " that", "probability": 0.931640625}, {"start": 342.85, "end": 343.69, "word": " splits", "probability": 0.888671875}, {"start": 343.69, "end": 344.23, "word": " the", "probability": 0.91357421875}, {"start": 344.23, "end": 344.55, "word": " data", "probability": 0.9482421875}, {"start": 344.55, "end": 344.83, "word": " into", "probability": 0.87060546875}, {"start": 344.83, "end": 345.03, "word": " two", "probability": 0.89892578125}, {"start": 345.03, "end": 345.37, "word": " halves.", "probability": 0.94287109375}], "temperature": 1.0}, {"id": 15, "seek": 37381, "start": 345.97, "end": 373.81, "text": " One to the right and the other to the left. I mean, the mean, the area to the right of the mean equals 50%, which is the same as the area to the left of the mean. Now suppose or consider the data is bell-shaped. Bell-shaped, normal, or symmetric? So it's not skewed either to the right or to the left. So here we assume, okay, the data is bell-shaped. In this scenario, in this case, there is a rule", "tokens": [1485, 281, 264, 558, 293, 264, 661, 281, 264, 1411, 13, 286, 914, 11, 264, 914, 11, 264, 1859, 281, 264, 558, 295, 264, 914, 6915, 2625, 8923, 597, 307, 264, 912, 382, 264, 1859, 281, 264, 1411, 295, 264, 914, 13, 823, 7297, 420, 1949, 264, 1412, 307, 4549, 12, 23103, 13, 11485, 12, 23103, 11, 2710, 11, 420, 32330, 30, 407, 309, 311, 406, 8756, 26896, 2139, 281, 264, 558, 420, 281, 264, 1411, 13, 407, 510, 321, 6552, 11, 1392, 11, 264, 1412, 307, 4549, 12, 23103, 13, 682, 341, 9005, 11, 294, 341, 1389, 11, 456, 307, 257, 4978], "avg_logprob": -0.18449518929880399, "compression_ratio": 1.8779342723004695, "no_speech_prob": 0.0, "words": [{"start": 345.97, "end": 346.27, "word": " One", "probability": 0.437255859375}, {"start": 346.27, "end": 346.45, "word": " to", "probability": 0.9658203125}, {"start": 346.45, "end": 346.57, "word": " the", "probability": 0.916015625}, {"start": 346.57, "end": 346.81, "word": " right", "probability": 0.90869140625}, {"start": 346.81, "end": 347.49, "word": " and", "probability": 0.61767578125}, {"start": 347.49, "end": 347.63, "word": " the", "probability": 0.673828125}, {"start": 347.63, "end": 347.83, "word": " other", "probability": 0.89501953125}, {"start": 347.83, "end": 348.03, "word": " to", "probability": 0.958984375}, {"start": 348.03, "end": 348.15, "word": " the", "probability": 0.91748046875}, {"start": 348.15, "end": 348.31, "word": " left.", "probability": 0.9296875}, {"start": 348.41, "end": 348.45, "word": " I", "probability": 0.89501953125}, {"start": 348.45, "end": 348.69, "word": " mean,", "probability": 0.9619140625}, {"start": 349.13, "end": 349.29, "word": " the", "probability": 0.86865234375}, {"start": 349.29, "end": 349.57, "word": " mean,", "probability": 0.58447265625}, {"start": 349.87, "end": 349.95, "word": " the", "probability": 0.912109375}, {"start": 349.95, "end": 350.21, "word": " area", "probability": 0.92041015625}, {"start": 350.21, "end": 350.39, "word": " to", "probability": 0.95751953125}, {"start": 350.39, "end": 350.63, "word": " the", "probability": 0.91552734375}, {"start": 350.63, "end": 350.95, "word": " right", "probability": 0.923828125}, {"start": 350.95, "end": 351.09, "word": " of", "probability": 0.935546875}, {"start": 351.09, "end": 351.21, "word": " the", "probability": 0.91650390625}, {"start": 351.21, "end": 351.39, "word": " mean", "probability": 0.96533203125}, {"start": 351.39, "end": 351.83, "word": " equals", "probability": 0.8994140625}, {"start": 351.83, "end": 352.65, "word": " 50%,", "probability": 0.5587158203125}, {"start": 352.65, "end": 353.31, "word": " which", "probability": 0.84130859375}, {"start": 353.31, "end": 353.41, "word": " is", "probability": 0.94677734375}, {"start": 353.41, "end": 353.57, "word": " the", "probability": 0.90625}, {"start": 353.57, "end": 353.75, "word": " same", "probability": 0.90283203125}, {"start": 353.75, "end": 353.95, "word": " as", "probability": 0.82861328125}, {"start": 353.95, "end": 354.05, "word": " the", "probability": 0.859375}, {"start": 354.05, "end": 354.21, "word": " area", "probability": 0.89111328125}, {"start": 354.21, "end": 354.37, "word": " to", "probability": 0.95263671875}, {"start": 354.37, "end": 354.47, "word": " the", "probability": 0.912109375}, {"start": 354.47, "end": 354.69, "word": " left", "probability": 0.94970703125}, {"start": 354.69, "end": 354.89, "word": " of", "probability": 0.71826171875}, {"start": 354.89, "end": 354.97, "word": " the", "probability": 0.81787109375}, {"start": 354.97, "end": 355.11, "word": " mean.", "probability": 0.92236328125}, {"start": 355.55, "end": 355.77, "word": " Now", "probability": 0.93212890625}, {"start": 355.77, "end": 356.15, "word": " suppose", "probability": 0.958984375}, {"start": 356.15, "end": 356.37, "word": " or", "probability": 0.56103515625}, {"start": 356.37, "end": 356.83, "word": " consider", "probability": 0.90771484375}, {"start": 356.83, "end": 358.05, "word": " the", "probability": 0.81884765625}, {"start": 358.05, "end": 358.33, "word": " data", "probability": 0.9560546875}, {"start": 358.33, "end": 358.55, "word": " is", "probability": 0.92724609375}, {"start": 358.55, "end": 358.71, "word": " bell", "probability": 0.89697265625}, {"start": 358.71, "end": 358.93, "word": "-shaped.", "probability": 0.6153564453125}, {"start": 359.09, "end": 359.31, "word": " Bell", "probability": 0.888671875}, {"start": 359.31, "end": 359.59, "word": "-shaped,", "probability": 0.80029296875}, {"start": 359.85, "end": 360.19, "word": " normal,", "probability": 0.87451171875}, {"start": 360.93, "end": 361.35, "word": " or", "probability": 0.96923828125}, {"start": 361.35, "end": 361.75, "word": " symmetric?", "probability": 0.744140625}, {"start": 362.29, "end": 362.57, "word": " So", "probability": 0.888671875}, {"start": 362.57, "end": 362.67, "word": " it's", "probability": 0.93017578125}, {"start": 362.67, "end": 362.89, "word": " not", "probability": 0.93212890625}, {"start": 362.89, "end": 363.25, "word": " skewed", "probability": 0.97509765625}, {"start": 363.25, "end": 363.47, "word": " either", "probability": 0.7626953125}, {"start": 363.47, "end": 363.65, "word": " to", "probability": 0.9638671875}, {"start": 363.65, "end": 363.79, "word": " the", "probability": 0.9111328125}, {"start": 363.79, "end": 363.97, "word": " right", "probability": 0.9130859375}, {"start": 363.97, "end": 364.13, "word": " or", "probability": 0.85400390625}, {"start": 364.13, "end": 364.15, "word": " to", "probability": 0.82373046875}, {"start": 364.15, "end": 364.29, "word": " the", "probability": 0.9150390625}, {"start": 364.29, "end": 364.45, "word": " left.", "probability": 0.951171875}, {"start": 365.11, "end": 365.37, "word": " So", "probability": 0.9130859375}, {"start": 365.37, "end": 365.57, "word": " here", "probability": 0.82763671875}, {"start": 365.57, "end": 365.71, "word": " we", "probability": 0.8134765625}, {"start": 365.71, "end": 366.19, "word": " assume,", "probability": 0.90185546875}, {"start": 366.75, "end": 366.87, "word": " okay,", "probability": 0.7646484375}, {"start": 366.91, "end": 367.01, "word": " the", "probability": 0.9091796875}, {"start": 367.01, "end": 367.27, "word": " data", "probability": 0.9453125}, {"start": 367.27, "end": 367.63, "word": " is", "probability": 0.9462890625}, {"start": 367.63, "end": 368.03, "word": " bell", "probability": 0.95361328125}, {"start": 368.03, "end": 368.21, "word": "-shaped.", "probability": 0.839111328125}, {"start": 369.37, "end": 369.81, "word": " In", "probability": 0.96484375}, {"start": 369.81, "end": 370.01, "word": " this", "probability": 0.9462890625}, {"start": 370.01, "end": 370.41, "word": " scenario,", "probability": 0.8544921875}, {"start": 370.61, "end": 370.79, "word": " in", "probability": 0.90673828125}, {"start": 370.79, "end": 370.95, "word": " this", "probability": 0.947265625}, {"start": 370.95, "end": 371.27, "word": " case,", "probability": 0.908203125}, {"start": 372.07, "end": 373.25, "word": " there", "probability": 0.90966796875}, {"start": 373.25, "end": 373.43, "word": " is", "probability": 0.9384765625}, {"start": 373.43, "end": 373.57, "word": " a", "probability": 0.994140625}, {"start": 373.57, "end": 373.81, "word": " rule", "probability": 0.916015625}], "temperature": 1.0}, {"id": 16, "seek": 40076, "start": 374.18, "end": 400.76, "text": " called 68, 95, 99.7 rule. Number one, approximately 68% of the data in a bill shipped lies within one standard deviation of the population. So this is the first rule, 68% of the data or of the observations", "tokens": [1219, 23317, 11, 13420, 11, 11803, 13, 22, 4978, 13, 5118, 472, 11, 10447, 23317, 4, 295, 264, 1412, 294, 257, 2961, 25312, 9134, 1951, 472, 3832, 25163, 295, 264, 4415, 13, 407, 341, 307, 264, 700, 4978, 11, 23317, 4, 295, 264, 1412, 420, 295, 264, 18163], "avg_logprob": -0.19403698493023308, "compression_ratio": 1.4206896551724137, "no_speech_prob": 0.0, "words": [{"start": 374.18, "end": 374.74, "word": " called", "probability": 0.67919921875}, {"start": 374.74, "end": 375.82, "word": " 68,", "probability": 0.90087890625}, {"start": 376.18, "end": 377.76, "word": " 95,", "probability": 0.52685546875}, {"start": 378.16, "end": 378.54, "word": " 99", "probability": 0.9658203125}, {"start": 378.54, "end": 379.16, "word": ".7", "probability": 0.977783203125}, {"start": 379.16, "end": 379.5, "word": " rule.", "probability": 0.6552734375}, {"start": 380.96, "end": 381.8, "word": " Number", "probability": 0.8642578125}, {"start": 381.8, "end": 382.1, "word": " one,", "probability": 0.87451171875}, {"start": 382.96, "end": 383.54, "word": " approximately", "probability": 0.82861328125}, {"start": 383.54, "end": 384.06, "word": " 68", "probability": 0.96044921875}, {"start": 384.06, "end": 384.48, "word": "%", "probability": 0.410888671875}, {"start": 384.48, "end": 384.62, "word": " of", "probability": 0.97265625}, {"start": 384.62, "end": 384.76, "word": " the", "probability": 0.8642578125}, {"start": 384.76, "end": 385.18, "word": " data", "probability": 0.9521484375}, {"start": 385.18, "end": 385.6, "word": " in", "probability": 0.92919921875}, {"start": 385.6, "end": 385.72, "word": " a", "probability": 0.98388671875}, {"start": 385.72, "end": 385.86, "word": " bill", "probability": 0.370849609375}, {"start": 385.86, "end": 386.3, "word": " shipped", "probability": 0.77392578125}, {"start": 386.3, "end": 388.48, "word": " lies", "probability": 0.869140625}, {"start": 388.48, "end": 388.96, "word": " within", "probability": 0.91552734375}, {"start": 388.96, "end": 389.86, "word": " one", "probability": 0.927734375}, {"start": 389.86, "end": 390.3, "word": " standard", "probability": 0.9443359375}, {"start": 390.3, "end": 390.74, "word": " deviation", "probability": 0.93115234375}, {"start": 390.74, "end": 391.6, "word": " of", "probability": 0.9677734375}, {"start": 391.6, "end": 391.78, "word": " the", "probability": 0.91796875}, {"start": 391.78, "end": 392.22, "word": " population.", "probability": 0.96484375}, {"start": 394.0, "end": 394.84, "word": " So", "probability": 0.89208984375}, {"start": 394.84, "end": 394.98, "word": " this", "probability": 0.83203125}, {"start": 394.98, "end": 395.08, "word": " is", "probability": 0.9296875}, {"start": 395.08, "end": 395.16, "word": " the", "probability": 0.87939453125}, {"start": 395.16, "end": 395.44, "word": " first", "probability": 0.8828125}, {"start": 395.44, "end": 395.68, "word": " rule,", "probability": 0.86181640625}, {"start": 395.8, "end": 396.16, "word": " 68", "probability": 0.984375}, {"start": 396.16, "end": 396.62, "word": "%", "probability": 0.99267578125}, {"start": 396.62, "end": 396.98, "word": " of", "probability": 0.95166015625}, {"start": 396.98, "end": 397.1, "word": " the", "probability": 0.91552734375}, {"start": 397.1, "end": 397.46, "word": " data", "probability": 0.9462890625}, {"start": 397.46, "end": 399.68, "word": " or", "probability": 0.564453125}, {"start": 399.68, "end": 400.0, "word": " of", "probability": 0.943359375}, {"start": 400.0, "end": 400.14, "word": " the", "probability": 0.91064453125}, {"start": 400.14, "end": 400.76, "word": " observations", "probability": 0.802734375}], "temperature": 1.0}, {"id": 17, "seek": 42860, "start": 401.76, "end": 428.6, "text": " Lie within a mu minus sigma and a mu plus sigma. That's the meaning of the data in bell shape distribution is within one standard deviation of mean or mu plus or minus sigma. So again, you can say that if the data is normally distributed or if the data is bell shaped, that is 68% of the data", "tokens": [11197, 1951, 257, 2992, 3175, 12771, 293, 257, 2992, 1804, 12771, 13, 663, 311, 264, 3620, 295, 264, 1412, 294, 4549, 3909, 7316, 307, 1951, 472, 3832, 25163, 295, 914, 420, 2992, 1804, 420, 3175, 12771, 13, 407, 797, 11, 291, 393, 584, 300, 498, 264, 1412, 307, 5646, 12631, 420, 498, 264, 1412, 307, 4549, 13475, 11, 300, 307, 23317, 4, 295, 264, 1412], "avg_logprob": -0.1853693208911202, "compression_ratio": 1.7544910179640718, "no_speech_prob": 0.0, "words": [{"start": 401.76, "end": 402.4, "word": " Lie", "probability": 0.47607421875}, {"start": 402.4, "end": 402.84, "word": " within", "probability": 0.8603515625}, {"start": 402.84, "end": 403.44, "word": " a", "probability": 0.408935546875}, {"start": 403.44, "end": 403.6, "word": " mu", "probability": 0.66650390625}, {"start": 403.6, "end": 403.92, "word": " minus", "probability": 0.97900390625}, {"start": 403.92, "end": 404.3, "word": " sigma", "probability": 0.91259765625}, {"start": 404.3, "end": 405.58, "word": " and", "probability": 0.8173828125}, {"start": 405.58, "end": 405.76, "word": " a", "probability": 0.7822265625}, {"start": 405.76, "end": 405.98, "word": " mu", "probability": 0.9619140625}, {"start": 405.98, "end": 406.24, "word": " plus", "probability": 0.94677734375}, {"start": 406.24, "end": 406.56, "word": " sigma.", "probability": 0.9296875}, {"start": 407.0, "end": 407.62, "word": " That's", "probability": 0.89697265625}, {"start": 407.62, "end": 407.76, "word": " the", "probability": 0.9248046875}, {"start": 407.76, "end": 408.08, "word": " meaning", "probability": 0.8828125}, {"start": 408.08, "end": 408.88, "word": " of", "probability": 0.96240234375}, {"start": 408.88, "end": 409.18, "word": " the", "probability": 0.91259765625}, {"start": 409.18, "end": 409.54, "word": " data", "probability": 0.9462890625}, {"start": 409.54, "end": 409.78, "word": " in", "probability": 0.8037109375}, {"start": 409.78, "end": 409.98, "word": " bell", "probability": 0.5029296875}, {"start": 409.98, "end": 410.3, "word": " shape", "probability": 0.609375}, {"start": 410.3, "end": 411.0, "word": " distribution", "probability": 0.75732421875}, {"start": 411.0, "end": 411.26, "word": " is", "probability": 0.64306640625}, {"start": 411.26, "end": 411.54, "word": " within", "probability": 0.8623046875}, {"start": 411.54, "end": 411.8, "word": " one", "probability": 0.8642578125}, {"start": 411.8, "end": 412.14, "word": " standard", "probability": 0.9482421875}, {"start": 412.14, "end": 412.62, "word": " deviation", "probability": 0.80908203125}, {"start": 412.62, "end": 413.38, "word": " of", "probability": 0.935546875}, {"start": 413.38, "end": 413.7, "word": " mean", "probability": 0.953125}, {"start": 413.7, "end": 414.5, "word": " or", "probability": 0.7470703125}, {"start": 414.5, "end": 414.94, "word": " mu", "probability": 0.92041015625}, {"start": 414.94, "end": 415.28, "word": " plus", "probability": 0.94873046875}, {"start": 415.28, "end": 415.62, "word": " or", "probability": 0.9560546875}, {"start": 415.62, "end": 415.9, "word": " minus", "probability": 0.99072265625}, {"start": 415.9, "end": 416.28, "word": " sigma.", "probability": 0.84423828125}, {"start": 417.54, "end": 418.14, "word": " So", "probability": 0.94384765625}, {"start": 418.14, "end": 418.4, "word": " again,", "probability": 0.81884765625}, {"start": 419.34, "end": 419.76, "word": " you", "probability": 0.9501953125}, {"start": 419.76, "end": 420.02, "word": " can", "probability": 0.93701171875}, {"start": 420.02, "end": 420.22, "word": " say", "probability": 0.51123046875}, {"start": 420.22, "end": 420.5, "word": " that", "probability": 0.9287109375}, {"start": 420.5, "end": 420.82, "word": " if", "probability": 0.873046875}, {"start": 420.82, "end": 421.0, "word": " the", "probability": 0.9189453125}, {"start": 421.0, "end": 421.26, "word": " data", "probability": 0.94287109375}, {"start": 421.26, "end": 421.48, "word": " is", "probability": 0.94970703125}, {"start": 421.48, "end": 421.9, "word": " normally", "probability": 0.8955078125}, {"start": 421.9, "end": 422.6, "word": " distributed", "probability": 0.90576171875}, {"start": 422.6, "end": 423.16, "word": " or", "probability": 0.599609375}, {"start": 423.16, "end": 423.32, "word": " if", "probability": 0.939453125}, {"start": 423.32, "end": 423.46, "word": " the", "probability": 0.91455078125}, {"start": 423.46, "end": 423.68, "word": " data", "probability": 0.93896484375}, {"start": 423.68, "end": 423.88, "word": " is", "probability": 0.93701171875}, {"start": 423.88, "end": 424.1, "word": " bell", "probability": 0.73681640625}, {"start": 424.1, "end": 424.44, "word": " shaped,", "probability": 0.5302734375}, {"start": 424.94, "end": 425.12, "word": " that", "probability": 0.9326171875}, {"start": 425.12, "end": 425.42, "word": " is", "probability": 0.91455078125}, {"start": 425.42, "end": 426.82, "word": " 68", "probability": 0.87255859375}, {"start": 426.82, "end": 427.22, "word": "%", "probability": 0.888671875}, {"start": 427.22, "end": 428.0, "word": " of", "probability": 0.95703125}, {"start": 428.0, "end": 428.2, "word": " the", "probability": 0.919921875}, {"start": 428.2, "end": 428.6, "word": " data", "probability": 0.9375}], "temperature": 1.0}, {"id": 18, "seek": 45775, "start": 430.33, "end": 457.75, "text": " lies within one standard deviation of the mean, either below or above it. So 68% of the data. So this is the first rule. 68% of the data lies between mu minus sigma and mu plus sigma.", "tokens": [9134, 1951, 472, 3832, 25163, 295, 264, 914, 11, 2139, 2507, 420, 3673, 309, 13, 407, 23317, 4, 295, 264, 1412, 13, 407, 341, 307, 264, 700, 4978, 13, 23317, 4, 295, 264, 1412, 9134, 1296, 2992, 3175, 12771, 293, 2992, 1804, 12771, 13], "avg_logprob": -0.17491319974263508, "compression_ratio": 1.4603174603174602, "no_speech_prob": 0.0, "words": [{"start": 430.33, "end": 430.81, "word": " lies", "probability": 0.260498046875}, {"start": 430.81, "end": 431.23, "word": " within", "probability": 0.89453125}, {"start": 431.23, "end": 432.21, "word": " one", "probability": 0.85595703125}, {"start": 432.21, "end": 432.69, "word": " standard", "probability": 0.93115234375}, {"start": 432.69, "end": 433.21, "word": " deviation", "probability": 0.904296875}, {"start": 433.21, "end": 434.23, "word": " of", "probability": 0.9296875}, {"start": 434.23, "end": 434.41, "word": " the", "probability": 0.92236328125}, {"start": 434.41, "end": 434.61, "word": " mean,", "probability": 0.96142578125}, {"start": 435.05, "end": 435.33, "word": " either", "probability": 0.90478515625}, {"start": 435.33, "end": 435.77, "word": " below", "probability": 0.8984375}, {"start": 435.77, "end": 436.25, "word": " or", "probability": 0.92919921875}, {"start": 436.25, "end": 436.45, "word": " above", "probability": 0.9794921875}, {"start": 436.45, "end": 436.81, "word": " it.", "probability": 0.9521484375}, {"start": 437.33, "end": 437.59, "word": " So", "probability": 0.583984375}, {"start": 437.59, "end": 437.93, "word": " 68", "probability": 0.7822265625}, {"start": 437.93, "end": 438.25, "word": "%", "probability": 0.8642578125}, {"start": 438.25, "end": 438.95, "word": " of", "probability": 0.953125}, {"start": 438.95, "end": 439.11, "word": " the", "probability": 0.91748046875}, {"start": 439.11, "end": 439.43, "word": " data.", "probability": 0.9580078125}, {"start": 440.15, "end": 440.71, "word": " So", "probability": 0.84521484375}, {"start": 440.71, "end": 440.89, "word": " this", "probability": 0.84033203125}, {"start": 440.89, "end": 441.31, "word": " is", "probability": 0.87890625}, {"start": 441.31, "end": 441.45, "word": " the", "probability": 0.92041015625}, {"start": 441.45, "end": 441.71, "word": " first", "probability": 0.86962890625}, {"start": 441.71, "end": 442.09, "word": " rule.", "probability": 0.90771484375}, {"start": 449.05, "end": 449.81, "word": " 68", "probability": 0.92822265625}, {"start": 449.81, "end": 451.21, "word": "%", "probability": 0.9736328125}, {"start": 451.21, "end": 451.63, "word": " of", "probability": 0.91845703125}, {"start": 451.63, "end": 451.77, "word": " the", "probability": 0.90966796875}, {"start": 451.77, "end": 452.01, "word": " data", "probability": 0.95361328125}, {"start": 452.01, "end": 452.39, "word": " lies", "probability": 0.93310546875}, {"start": 452.39, "end": 453.39, "word": " between", "probability": 0.88671875}, {"start": 453.39, "end": 453.67, "word": " mu", "probability": 0.615234375}, {"start": 453.67, "end": 453.95, "word": " minus", "probability": 0.9609375}, {"start": 453.95, "end": 454.27, "word": " sigma", "probability": 0.908203125}, {"start": 454.27, "end": 454.67, "word": " and", "probability": 0.93896484375}, {"start": 454.67, "end": 457.17, "word": " mu", "probability": 0.89892578125}, {"start": 457.17, "end": 457.43, "word": " plus", "probability": 0.9462890625}, {"start": 457.43, "end": 457.75, "word": " sigma.", "probability": 0.9248046875}], "temperature": 1.0}, {"id": 19, "seek": 48962, "start": 460.48, "end": 489.62, "text": " The other rule is approximately 95% of the data in a bell-shaped distribution lies within two standard deviations of the mean. That means this area covers between minus two sigma and plus mu plus two sigma. So 95% of the data lies between minus mu two sigma", "tokens": [440, 661, 4978, 307, 10447, 13420, 4, 295, 264, 1412, 294, 257, 4549, 12, 23103, 7316, 9134, 1951, 732, 3832, 31219, 763, 295, 264, 914, 13, 663, 1355, 341, 1859, 10538, 1296, 3175, 732, 12771, 293, 1804, 2992, 1804, 732, 12771, 13, 407, 13420, 4, 295, 264, 1412, 9134, 1296, 3175, 2992, 732, 12771], "avg_logprob": -0.14999999837441877, "compression_ratio": 1.6226415094339623, "no_speech_prob": 0.0, "words": [{"start": 460.48, "end": 460.64, "word": " The", "probability": 0.86474609375}, {"start": 460.64, "end": 460.94, "word": " other", "probability": 0.89990234375}, {"start": 460.94, "end": 461.24, "word": " rule", "probability": 0.9013671875}, {"start": 461.24, "end": 461.64, "word": " is", "probability": 0.953125}, {"start": 461.64, "end": 463.96, "word": " approximately", "probability": 0.73681640625}, {"start": 463.96, "end": 464.42, "word": " 95", "probability": 0.974609375}, {"start": 464.42, "end": 464.8, "word": "%", "probability": 0.8779296875}, {"start": 464.8, "end": 465.0, "word": " of", "probability": 0.96142578125}, {"start": 465.0, "end": 465.12, "word": " the", "probability": 0.9111328125}, {"start": 465.12, "end": 465.48, "word": " data", "probability": 0.951171875}, {"start": 465.48, "end": 466.26, "word": " in", "probability": 0.90380859375}, {"start": 466.26, "end": 466.38, "word": " a", "probability": 0.93115234375}, {"start": 466.38, "end": 466.54, "word": " bell", "probability": 0.292724609375}, {"start": 466.54, "end": 466.78, "word": "-shaped", "probability": 0.736328125}, {"start": 466.78, "end": 467.54, "word": " distribution", "probability": 0.869140625}, {"start": 467.54, "end": 468.44, "word": " lies", "probability": 0.81591796875}, {"start": 468.44, "end": 468.7, "word": " within", "probability": 0.79541015625}, {"start": 468.7, "end": 468.98, "word": " two", "probability": 0.9052734375}, {"start": 468.98, "end": 469.38, "word": " standard", "probability": 0.92724609375}, {"start": 469.38, "end": 469.9, "word": " deviations", "probability": 0.931884765625}, {"start": 469.9, "end": 470.2, "word": " of", "probability": 0.9677734375}, {"start": 470.2, "end": 470.34, "word": " the", "probability": 0.93115234375}, {"start": 470.34, "end": 470.52, "word": " mean.", "probability": 0.9775390625}, {"start": 471.08, "end": 471.48, "word": " That", "probability": 0.9111328125}, {"start": 471.48, "end": 471.8, "word": " means", "probability": 0.927734375}, {"start": 471.8, "end": 473.24, "word": " this", "probability": 0.90966796875}, {"start": 473.24, "end": 473.64, "word": " area", "probability": 0.90185546875}, {"start": 473.64, "end": 475.32, "word": " covers", "probability": 0.84716796875}, {"start": 475.32, "end": 478.02, "word": " between", "probability": 0.8798828125}, {"start": 478.02, "end": 478.82, "word": " minus", "probability": 0.9150390625}, {"start": 478.82, "end": 479.52, "word": " two", "probability": 0.489501953125}, {"start": 479.52, "end": 479.88, "word": " sigma", "probability": 0.869140625}, {"start": 479.88, "end": 480.18, "word": " and", "probability": 0.9169921875}, {"start": 480.18, "end": 480.6, "word": " plus", "probability": 0.794921875}, {"start": 480.6, "end": 480.88, "word": " mu", "probability": 0.53955078125}, {"start": 480.88, "end": 481.14, "word": " plus", "probability": 0.94921875}, {"start": 481.14, "end": 481.36, "word": " two", "probability": 0.8935546875}, {"start": 481.36, "end": 481.6, "word": " sigma.", "probability": 0.9208984375}, {"start": 482.64, "end": 483.38, "word": " So", "probability": 0.9580078125}, {"start": 483.38, "end": 483.78, "word": " 95", "probability": 0.89697265625}, {"start": 483.78, "end": 484.14, "word": "%", "probability": 0.9931640625}, {"start": 484.14, "end": 485.9, "word": " of", "probability": 0.96240234375}, {"start": 485.9, "end": 486.04, "word": " the", "probability": 0.919921875}, {"start": 486.04, "end": 486.42, "word": " data", "probability": 0.94580078125}, {"start": 486.42, "end": 487.92, "word": " lies", "probability": 0.9150390625}, {"start": 487.92, "end": 488.36, "word": " between", "probability": 0.87841796875}, {"start": 488.36, "end": 488.88, "word": " minus", "probability": 0.98486328125}, {"start": 488.88, "end": 489.1, "word": " mu", "probability": 0.84814453125}, {"start": 489.1, "end": 489.26, "word": " two", "probability": 0.63037109375}, {"start": 489.26, "end": 489.62, "word": " sigma", "probability": 0.9130859375}], "temperature": 1.0}, {"id": 20, "seek": 51899, "start": 490.59, "end": 518.99, "text": " And finally, approximately 99.7% of the data, it means almost the data. Because we are saying 99.7 means most of the data falls or lies within three standard deviations of the mean. So 99.7% of the data lies between mu minus", "tokens": [400, 2721, 11, 10447, 11803, 13, 22, 4, 295, 264, 1412, 11, 309, 1355, 1920, 264, 1412, 13, 1436, 321, 366, 1566, 11803, 13, 22, 1355, 881, 295, 264, 1412, 8804, 420, 9134, 1951, 1045, 3832, 31219, 763, 295, 264, 914, 13, 407, 11803, 13, 22, 4, 295, 264, 1412, 9134, 1296, 2992, 3175], "avg_logprob": -0.21619317639957775, "compression_ratio": 1.5410958904109588, "no_speech_prob": 0.0, "words": [{"start": 490.59, "end": 491.13, "word": " And", "probability": 0.344482421875}, {"start": 491.13, "end": 495.41, "word": " finally,", "probability": 0.254150390625}, {"start": 495.79, "end": 497.73, "word": " approximately", "probability": 0.85986328125}, {"start": 497.73, "end": 498.23, "word": " 99", "probability": 0.92138671875}, {"start": 498.23, "end": 498.85, "word": ".7", "probability": 0.98486328125}, {"start": 498.85, "end": 499.25, "word": "%", "probability": 0.8203125}, {"start": 499.25, "end": 499.61, "word": " of", "probability": 0.958984375}, {"start": 499.61, "end": 499.73, "word": " the", "probability": 0.9208984375}, {"start": 499.73, "end": 499.97, "word": " data,", "probability": 0.94921875}, {"start": 500.11, "end": 500.21, "word": " it", "probability": 0.8564453125}, {"start": 500.21, "end": 500.57, "word": " means", "probability": 0.91357421875}, {"start": 500.57, "end": 501.27, "word": " almost", "probability": 0.76220703125}, {"start": 501.27, "end": 501.47, "word": " the", "probability": 0.65966796875}, {"start": 501.47, "end": 501.73, "word": " data.", "probability": 0.91748046875}, {"start": 502.15, "end": 502.49, "word": " Because", "probability": 0.80859375}, {"start": 502.49, "end": 502.61, "word": " we", "probability": 0.90087890625}, {"start": 502.61, "end": 502.73, "word": " are", "probability": 0.92041015625}, {"start": 502.73, "end": 503.11, "word": " saying", "probability": 0.791015625}, {"start": 503.11, "end": 503.99, "word": " 99", "probability": 0.87841796875}, {"start": 503.99, "end": 504.55, "word": ".7", "probability": 0.9951171875}, {"start": 504.55, "end": 504.91, "word": " means", "probability": 0.4228515625}, {"start": 504.91, "end": 505.35, "word": " most", "probability": 0.85595703125}, {"start": 505.35, "end": 505.49, "word": " of", "probability": 0.96826171875}, {"start": 505.49, "end": 505.61, "word": " the", "probability": 0.91845703125}, {"start": 505.61, "end": 505.97, "word": " data", "probability": 0.93408203125}, {"start": 505.97, "end": 507.51, "word": " falls", "probability": 0.68017578125}, {"start": 507.51, "end": 508.39, "word": " or", "probability": 0.85595703125}, {"start": 508.39, "end": 508.85, "word": " lies", "probability": 0.92626953125}, {"start": 508.85, "end": 509.21, "word": " within", "probability": 0.90087890625}, {"start": 509.21, "end": 509.55, "word": " three", "probability": 0.7060546875}, {"start": 509.55, "end": 509.93, "word": " standard", "probability": 0.90673828125}, {"start": 509.93, "end": 510.51, "word": " deviations", "probability": 0.929931640625}, {"start": 510.51, "end": 511.01, "word": " of", "probability": 0.9609375}, {"start": 511.01, "end": 511.17, "word": " the", "probability": 0.921875}, {"start": 511.17, "end": 511.37, "word": " mean.", "probability": 0.94775390625}, {"start": 512.85, "end": 513.23, "word": " So", "probability": 0.962890625}, {"start": 513.23, "end": 513.73, "word": " 99", "probability": 0.8408203125}, {"start": 513.73, "end": 515.35, "word": ".7", "probability": 0.99658203125}, {"start": 515.35, "end": 515.83, "word": "%", "probability": 0.98681640625}, {"start": 515.83, "end": 516.13, "word": " of", "probability": 0.958984375}, {"start": 516.13, "end": 516.27, "word": " the", "probability": 0.921875}, {"start": 516.27, "end": 516.63, "word": " data", "probability": 0.9453125}, {"start": 516.63, "end": 517.77, "word": " lies", "probability": 0.888671875}, {"start": 517.77, "end": 518.23, "word": " between", "probability": 0.888671875}, {"start": 518.23, "end": 518.51, "word": " mu", "probability": 0.70703125}, {"start": 518.51, "end": 518.99, "word": " minus", "probability": 0.966796875}], "temperature": 1.0}, {"id": 21, "seek": 54371, "start": 519.73, "end": 543.71, "text": " the pre-sigma and the mu plus of pre-sigma. 68, 95, 99.7 are fixed numbers. Later in chapter 6, we will explain in details other coefficients. Maybe suppose we are interested not in one of these. Suppose we are interested in 90% or 80% or 85%.", "tokens": [264, 659, 12, 82, 16150, 293, 264, 2992, 1804, 295, 659, 12, 82, 16150, 13, 23317, 11, 13420, 11, 11803, 13, 22, 366, 6806, 3547, 13, 11965, 294, 7187, 1386, 11, 321, 486, 2903, 294, 4365, 661, 31994, 13, 2704, 7297, 321, 366, 3102, 406, 294, 472, 295, 613, 13, 21360, 321, 366, 3102, 294, 4289, 4, 420, 4688, 4, 420, 14695, 6856], "avg_logprob": -0.24462891183793545, "compression_ratio": 1.4787878787878788, "no_speech_prob": 0.0, "words": [{"start": 519.73, "end": 519.97, "word": " the", "probability": 0.105712890625}, {"start": 519.97, "end": 520.17, "word": " pre", "probability": 0.2587890625}, {"start": 520.17, "end": 520.49, "word": "-sigma", "probability": 0.8709309895833334}, {"start": 520.49, "end": 520.85, "word": " and", "probability": 0.70654296875}, {"start": 520.85, "end": 520.99, "word": " the", "probability": 0.7373046875}, {"start": 520.99, "end": 521.05, "word": " mu", "probability": 0.6845703125}, {"start": 521.05, "end": 521.31, "word": " plus", "probability": 0.78759765625}, {"start": 521.31, "end": 521.47, "word": " of", "probability": 0.5224609375}, {"start": 521.47, "end": 521.67, "word": " pre", "probability": 0.383544921875}, {"start": 521.67, "end": 521.87, "word": "-sigma.", "probability": 0.9557291666666666}, {"start": 525.03, "end": 525.55, "word": " 68,", "probability": 0.8681640625}, {"start": 525.79, "end": 526.27, "word": " 95,", "probability": 0.95703125}, {"start": 526.47, "end": 526.69, "word": " 99", "probability": 0.95654296875}, {"start": 526.69, "end": 527.19, "word": ".7", "probability": 0.7978515625}, {"start": 527.19, "end": 527.55, "word": " are", "probability": 0.89111328125}, {"start": 527.55, "end": 527.87, "word": " fixed", "probability": 0.91650390625}, {"start": 527.87, "end": 528.31, "word": " numbers.", "probability": 0.89892578125}, {"start": 529.09, "end": 529.37, "word": " Later", "probability": 0.87890625}, {"start": 529.37, "end": 529.57, "word": " in", "probability": 0.79736328125}, {"start": 529.57, "end": 529.81, "word": " chapter", "probability": 0.53076171875}, {"start": 529.81, "end": 530.29, "word": " 6,", "probability": 0.51220703125}, {"start": 530.45, "end": 530.61, "word": " we", "probability": 0.94384765625}, {"start": 530.61, "end": 530.87, "word": " will", "probability": 0.85693359375}, {"start": 530.87, "end": 531.77, "word": " explain", "probability": 0.94921875}, {"start": 531.77, "end": 532.03, "word": " in", "probability": 0.8857421875}, {"start": 532.03, "end": 532.53, "word": " details", "probability": 0.81787109375}, {"start": 532.53, "end": 533.97, "word": " other", "probability": 0.76806640625}, {"start": 533.97, "end": 535.01, "word": " coefficients.", "probability": 0.9482421875}, {"start": 535.53, "end": 535.65, "word": " Maybe", "probability": 0.84033203125}, {"start": 535.65, "end": 536.13, "word": " suppose", "probability": 0.6337890625}, {"start": 536.13, "end": 536.33, "word": " we", "probability": 0.9296875}, {"start": 536.33, "end": 536.67, "word": " are", "probability": 0.9296875}, {"start": 536.67, "end": 537.07, "word": " interested", "probability": 0.7744140625}, {"start": 537.07, "end": 537.71, "word": " not", "probability": 0.63232421875}, {"start": 537.71, "end": 537.91, "word": " in", "probability": 0.93017578125}, {"start": 537.91, "end": 538.09, "word": " one", "probability": 0.92041015625}, {"start": 538.09, "end": 538.25, "word": " of", "probability": 0.9677734375}, {"start": 538.25, "end": 538.47, "word": " these.", "probability": 0.853515625}, {"start": 538.69, "end": 538.99, "word": " Suppose", "probability": 0.82373046875}, {"start": 538.99, "end": 540.27, "word": " we", "probability": 0.9287109375}, {"start": 540.27, "end": 540.49, "word": " are", "probability": 0.94140625}, {"start": 540.49, "end": 540.97, "word": " interested", "probability": 0.8369140625}, {"start": 540.97, "end": 541.21, "word": " in", "probability": 0.91796875}, {"start": 541.21, "end": 541.47, "word": " 90", "probability": 0.9833984375}, {"start": 541.47, "end": 541.71, "word": "%", "probability": 0.64111328125}, {"start": 541.71, "end": 542.07, "word": " or", "probability": 0.9482421875}, {"start": 542.07, "end": 542.37, "word": " 80", "probability": 0.9599609375}, {"start": 542.37, "end": 542.75, "word": "%", "probability": 0.97998046875}, {"start": 542.75, "end": 543.01, "word": " or", "probability": 0.96240234375}, {"start": 543.01, "end": 543.71, "word": " 85%.", "probability": 0.75244140625}], "temperature": 1.0}, {"id": 22, "seek": 56486, "start": 545.08, "end": 564.86, "text": " This rule just for 689599.7. This rule is called 689599.7 rule. That is, again, 68% of the data lies within one standard deviation of the mean.", "tokens": [639, 4978, 445, 337, 23317, 15718, 8494, 13, 22, 13, 639, 4978, 307, 1219, 23317, 15718, 8494, 13, 22, 4978, 13, 663, 307, 11, 797, 11, 23317, 4, 295, 264, 1412, 9134, 1951, 472, 3832, 25163, 295, 264, 914, 13], "avg_logprob": -0.1795922256097561, "compression_ratio": 1.3211009174311927, "no_speech_prob": 0.0, "words": [{"start": 545.08, "end": 545.5, "word": " This", "probability": 0.5888671875}, {"start": 545.5, "end": 545.74, "word": " rule", "probability": 0.8662109375}, {"start": 545.74, "end": 546.16, "word": " just", "probability": 0.4150390625}, {"start": 546.16, "end": 547.24, "word": " for", "probability": 0.91748046875}, {"start": 547.24, "end": 549.68, "word": " 689599", "probability": 0.8634440104166666}, {"start": 549.68, "end": 550.34, "word": ".7.", "probability": 0.77978515625}, {"start": 550.64, "end": 551.14, "word": " This", "probability": 0.859375}, {"start": 551.14, "end": 551.36, "word": " rule", "probability": 0.88916015625}, {"start": 551.36, "end": 551.5, "word": " is", "probability": 0.9453125}, {"start": 551.5, "end": 551.96, "word": " called", "probability": 0.87841796875}, {"start": 551.96, "end": 555.56, "word": " 689599", "probability": 0.9527994791666666}, {"start": 555.56, "end": 556.26, "word": ".7", "probability": 0.982177734375}, {"start": 556.26, "end": 556.94, "word": " rule.", "probability": 0.83984375}, {"start": 557.66, "end": 557.86, "word": " That", "probability": 0.880859375}, {"start": 557.86, "end": 558.2, "word": " is,", "probability": 0.94775390625}, {"start": 558.84, "end": 559.04, "word": " again,", "probability": 0.9501953125}, {"start": 560.08, "end": 560.5, "word": " 68", "probability": 0.9453125}, {"start": 560.5, "end": 560.84, "word": "%", "probability": 0.77001953125}, {"start": 560.84, "end": 561.14, "word": " of", "probability": 0.95166015625}, {"start": 561.14, "end": 561.28, "word": " the", "probability": 0.91552734375}, {"start": 561.28, "end": 561.6, "word": " data", "probability": 0.9541015625}, {"start": 561.6, "end": 562.96, "word": " lies", "probability": 0.880859375}, {"start": 562.96, "end": 563.32, "word": " within", "probability": 0.904296875}, {"start": 563.32, "end": 563.58, "word": " one", "probability": 0.91064453125}, {"start": 563.58, "end": 564.0, "word": " standard", "probability": 0.93701171875}, {"start": 564.0, "end": 564.38, "word": " deviation", "probability": 0.89990234375}, {"start": 564.38, "end": 564.6, "word": " of", "probability": 0.966796875}, {"start": 564.6, "end": 564.72, "word": " the", "probability": 0.9267578125}, {"start": 564.72, "end": 564.86, "word": " mean.", "probability": 0.9814453125}], "temperature": 1.0}, {"id": 23, "seek": 59407, "start": 565.95, "end": 594.07, "text": " 95% of the data lies within two standard deviations of the mean. And finally, most of the data falls within three standard deviations of the mean. Let's see how can we use this empirical rule for a specific example. Imagine that the variable math set scores is bell shaped. So here we assume that", "tokens": [13420, 4, 295, 264, 1412, 9134, 1951, 732, 3832, 31219, 763, 295, 264, 914, 13, 400, 2721, 11, 881, 295, 264, 1412, 8804, 1951, 1045, 3832, 31219, 763, 295, 264, 914, 13, 961, 311, 536, 577, 393, 321, 764, 341, 31886, 4978, 337, 257, 2685, 1365, 13, 11739, 300, 264, 7006, 5221, 992, 13444, 307, 4549, 13475, 13, 407, 510, 321, 6552, 300], "avg_logprob": -0.17236328381113708, "compression_ratio": 1.6141304347826086, "no_speech_prob": 0.0, "words": [{"start": 565.95, "end": 566.37, "word": " 95", "probability": 0.7705078125}, {"start": 566.37, "end": 566.77, "word": "%", "probability": 0.80224609375}, {"start": 566.77, "end": 567.03, "word": " of", "probability": 0.9501953125}, {"start": 567.03, "end": 567.17, "word": " the", "probability": 0.9033203125}, {"start": 567.17, "end": 567.51, "word": " data", "probability": 0.94189453125}, {"start": 567.51, "end": 568.79, "word": " lies", "probability": 0.837890625}, {"start": 568.79, "end": 569.11, "word": " within", "probability": 0.876953125}, {"start": 569.11, "end": 569.39, "word": " two", "probability": 0.81005859375}, {"start": 569.39, "end": 569.77, "word": " standard", "probability": 0.90869140625}, {"start": 569.77, "end": 570.17, "word": " deviations", "probability": 0.917724609375}, {"start": 570.17, "end": 570.37, "word": " of", "probability": 0.96728515625}, {"start": 570.37, "end": 570.49, "word": " the", "probability": 0.92919921875}, {"start": 570.49, "end": 570.67, "word": " mean.", "probability": 0.94775390625}, {"start": 571.17, "end": 571.31, "word": " And", "probability": 0.9384765625}, {"start": 571.31, "end": 571.79, "word": " finally,", "probability": 0.80810546875}, {"start": 571.97, "end": 572.35, "word": " most", "probability": 0.9013671875}, {"start": 572.35, "end": 572.53, "word": " of", "probability": 0.96826171875}, {"start": 572.53, "end": 572.63, "word": " the", "probability": 0.91162109375}, {"start": 572.63, "end": 573.07, "word": " data", "probability": 0.9287109375}, {"start": 573.07, "end": 573.85, "word": " falls", "probability": 0.7763671875}, {"start": 573.85, "end": 574.27, "word": " within", "probability": 0.9111328125}, {"start": 574.27, "end": 575.31, "word": " three", "probability": 0.90576171875}, {"start": 575.31, "end": 575.87, "word": " standard", "probability": 0.9365234375}, {"start": 575.87, "end": 576.39, "word": " deviations", "probability": 0.952392578125}, {"start": 576.39, "end": 576.65, "word": " of", "probability": 0.96533203125}, {"start": 576.65, "end": 576.77, "word": " the", "probability": 0.927734375}, {"start": 576.77, "end": 576.95, "word": " mean.", "probability": 0.94921875}, {"start": 579.87, "end": 580.63, "word": " Let's", "probability": 0.964599609375}, {"start": 580.63, "end": 580.75, "word": " see", "probability": 0.9248046875}, {"start": 580.75, "end": 580.87, "word": " how", "probability": 0.90966796875}, {"start": 580.87, "end": 581.03, "word": " can", "probability": 0.8408203125}, {"start": 581.03, "end": 581.19, "word": " we", "probability": 0.93896484375}, {"start": 581.19, "end": 581.53, "word": " use", "probability": 0.87548828125}, {"start": 581.53, "end": 582.05, "word": " this", "probability": 0.9462890625}, {"start": 582.05, "end": 582.55, "word": " empirical", "probability": 0.89892578125}, {"start": 582.55, "end": 582.93, "word": " rule", "probability": 0.91748046875}, {"start": 582.93, "end": 583.17, "word": " for", "probability": 0.94921875}, {"start": 583.17, "end": 583.33, "word": " a", "probability": 0.83740234375}, {"start": 583.33, "end": 583.77, "word": " specific", "probability": 0.9033203125}, {"start": 583.77, "end": 584.21, "word": " example.", "probability": 0.85791015625}, {"start": 585.29, "end": 585.71, "word": " Imagine", "probability": 0.85400390625}, {"start": 585.71, "end": 586.21, "word": " that", "probability": 0.9345703125}, {"start": 586.21, "end": 588.75, "word": " the", "probability": 0.84716796875}, {"start": 588.75, "end": 589.19, "word": " variable", "probability": 0.91796875}, {"start": 589.19, "end": 589.85, "word": " math", "probability": 0.671875}, {"start": 589.85, "end": 590.19, "word": " set", "probability": 0.291259765625}, {"start": 590.19, "end": 590.79, "word": " scores", "probability": 0.744140625}, {"start": 590.79, "end": 591.13, "word": " is", "probability": 0.84814453125}, {"start": 591.13, "end": 591.33, "word": " bell", "probability": 0.5986328125}, {"start": 591.33, "end": 591.63, "word": " shaped.", "probability": 0.55078125}, {"start": 592.01, "end": 592.17, "word": " So", "probability": 0.939453125}, {"start": 592.17, "end": 592.39, "word": " here", "probability": 0.81201171875}, {"start": 592.39, "end": 592.61, "word": " we", "probability": 0.5263671875}, {"start": 592.61, "end": 593.09, "word": " assume", "probability": 0.75146484375}, {"start": 593.09, "end": 594.07, "word": " that", "probability": 0.93212890625}], "temperature": 1.0}, {"id": 24, "seek": 62059, "start": 595.23, "end": 620.59, "text": " The math status score has symmetric shape or bell shape. In this case, we can use the previous rule. Otherwise, we cannot. So assume the math status score is bell-shaped with a mean of 500. I mean, the population mean is 500 and standard deviation of 90.", "tokens": [440, 5221, 6558, 6175, 575, 32330, 3909, 420, 4549, 3909, 13, 682, 341, 1389, 11, 321, 393, 764, 264, 3894, 4978, 13, 10328, 11, 321, 2644, 13, 407, 6552, 264, 5221, 6558, 6175, 307, 4549, 12, 23103, 365, 257, 914, 295, 5923, 13, 286, 914, 11, 264, 4415, 914, 307, 5923, 293, 3832, 25163, 295, 4289, 13], "avg_logprob": -0.25121229270408896, "compression_ratio": 1.5548780487804879, "no_speech_prob": 0.0, "words": [{"start": 595.23, "end": 595.55, "word": " The", "probability": 0.42626953125}, {"start": 595.55, "end": 595.83, "word": " math", "probability": 0.63037109375}, {"start": 595.83, "end": 596.17, "word": " status", "probability": 0.283447265625}, {"start": 596.17, "end": 596.61, "word": " score", "probability": 0.66259765625}, {"start": 596.61, "end": 597.35, "word": " has", "probability": 0.869140625}, {"start": 597.35, "end": 599.11, "word": " symmetric", "probability": 0.51318359375}, {"start": 599.11, "end": 599.63, "word": " shape", "probability": 0.181884765625}, {"start": 599.63, "end": 600.55, "word": " or", "probability": 0.78076171875}, {"start": 600.55, "end": 600.95, "word": " bell", "probability": 0.86669921875}, {"start": 600.95, "end": 601.39, "word": " shape.", "probability": 0.740234375}, {"start": 601.81, "end": 602.33, "word": " In", "probability": 0.91650390625}, {"start": 602.33, "end": 602.59, "word": " this", "probability": 0.94970703125}, {"start": 602.59, "end": 602.87, "word": " case,", "probability": 0.91357421875}, {"start": 602.91, "end": 603.07, "word": " we", "probability": 0.9423828125}, {"start": 603.07, "end": 603.27, "word": " can", "probability": 0.9423828125}, {"start": 603.27, "end": 603.45, "word": " use", "probability": 0.86669921875}, {"start": 603.45, "end": 603.57, "word": " the", "probability": 0.8955078125}, {"start": 603.57, "end": 603.93, "word": " previous", "probability": 0.84765625}, {"start": 603.93, "end": 604.23, "word": " rule.", "probability": 0.91064453125}, {"start": 604.35, "end": 604.57, "word": " Otherwise,", "probability": 0.90771484375}, {"start": 604.77, "end": 604.89, "word": " we", "probability": 0.93505859375}, {"start": 604.89, "end": 605.17, "word": " cannot.", "probability": 0.85205078125}, {"start": 606.19, "end": 606.55, "word": " So", "probability": 0.8076171875}, {"start": 606.55, "end": 607.39, "word": " assume", "probability": 0.71533203125}, {"start": 607.39, "end": 608.79, "word": " the", "probability": 0.83935546875}, {"start": 608.79, "end": 609.33, "word": " math", "probability": 0.90966796875}, {"start": 609.33, "end": 609.61, "word": " status", "probability": 0.919921875}, {"start": 609.61, "end": 609.91, "word": " score", "probability": 0.7919921875}, {"start": 609.91, "end": 610.07, "word": " is", "probability": 0.89892578125}, {"start": 610.07, "end": 610.31, "word": " bell", "probability": 0.8994140625}, {"start": 610.31, "end": 610.67, "word": "-shaped", "probability": 0.700927734375}, {"start": 610.67, "end": 611.73, "word": " with", "probability": 0.77197265625}, {"start": 611.73, "end": 611.85, "word": " a", "probability": 0.955078125}, {"start": 611.85, "end": 611.99, "word": " mean", "probability": 0.978515625}, {"start": 611.99, "end": 612.15, "word": " of", "probability": 0.9677734375}, {"start": 612.15, "end": 612.67, "word": " 500.", "probability": 0.88720703125}, {"start": 614.85, "end": 615.55, "word": " I", "probability": 0.83740234375}, {"start": 615.55, "end": 615.75, "word": " mean,", "probability": 0.96142578125}, {"start": 616.41, "end": 616.65, "word": " the", "probability": 0.9052734375}, {"start": 616.65, "end": 617.01, "word": " population", "probability": 0.96484375}, {"start": 617.01, "end": 617.27, "word": " mean", "probability": 0.537109375}, {"start": 617.27, "end": 617.41, "word": " is", "probability": 0.9443359375}, {"start": 617.41, "end": 617.87, "word": " 500", "probability": 0.97265625}, {"start": 617.87, "end": 618.93, "word": " and", "probability": 0.64404296875}, {"start": 618.93, "end": 619.35, "word": " standard", "probability": 0.90576171875}, {"start": 619.35, "end": 619.75, "word": " deviation", "probability": 0.95166015625}, {"start": 619.75, "end": 620.15, "word": " of", "probability": 0.95166015625}, {"start": 620.15, "end": 620.59, "word": " 90.", "probability": 0.9765625}], "temperature": 1.0}, {"id": 25, "seek": 64848, "start": 621.66, "end": 648.48, "text": " And let's see how can we apply the empirical rule. So again, meta score has a mean of 500 and standard deviation sigma is 90. Then we can say that 60% of all test takers scored between 68%. So mu is 500.", "tokens": [400, 718, 311, 536, 577, 393, 321, 3079, 264, 31886, 4978, 13, 407, 797, 11, 19616, 6175, 575, 257, 914, 295, 5923, 293, 3832, 25163, 12771, 307, 4289, 13, 1396, 321, 393, 584, 300, 4060, 4, 295, 439, 1500, 991, 433, 18139, 1296, 23317, 6856, 407, 2992, 307, 5923, 13], "avg_logprob": -0.2005208356707704, "compression_ratio": 1.3333333333333333, "no_speech_prob": 0.0, "words": [{"start": 621.66, "end": 621.98, "word": " And", "probability": 0.71630859375}, {"start": 621.98, "end": 622.32, "word": " let's", "probability": 0.94775390625}, {"start": 622.32, "end": 622.58, "word": " see", "probability": 0.86474609375}, {"start": 622.58, "end": 622.94, "word": " how", "probability": 0.77734375}, {"start": 622.94, "end": 623.24, "word": " can", "probability": 0.86376953125}, {"start": 623.24, "end": 623.38, "word": " we", "probability": 0.91259765625}, {"start": 623.38, "end": 623.88, "word": " apply", "probability": 0.9375}, {"start": 623.88, "end": 624.62, "word": " the", "probability": 0.875}, {"start": 624.62, "end": 625.12, "word": " empirical", "probability": 0.9208984375}, {"start": 625.12, "end": 625.46, "word": " rule.", "probability": 0.6728515625}, {"start": 626.12, "end": 626.3, "word": " So", "probability": 0.94873046875}, {"start": 626.3, "end": 626.62, "word": " again,", "probability": 0.7890625}, {"start": 627.48, "end": 627.7, "word": " meta", "probability": 0.1298828125}, {"start": 627.7, "end": 628.3, "word": " score", "probability": 0.6826171875}, {"start": 628.3, "end": 628.76, "word": " has", "probability": 0.9384765625}, {"start": 628.76, "end": 628.9, "word": " a", "probability": 0.97265625}, {"start": 628.9, "end": 629.02, "word": " mean", "probability": 0.9755859375}, {"start": 629.02, "end": 629.22, "word": " of", "probability": 0.97119140625}, {"start": 629.22, "end": 629.7, "word": " 500", "probability": 0.94384765625}, {"start": 629.7, "end": 631.12, "word": " and", "probability": 0.479248046875}, {"start": 631.12, "end": 631.86, "word": " standard", "probability": 0.93896484375}, {"start": 631.86, "end": 632.32, "word": " deviation", "probability": 0.9580078125}, {"start": 632.32, "end": 633.22, "word": " sigma", "probability": 0.60888671875}, {"start": 633.22, "end": 633.62, "word": " is", "probability": 0.94970703125}, {"start": 633.62, "end": 634.06, "word": " 90.", "probability": 0.9736328125}, {"start": 634.84, "end": 635.14, "word": " Then", "probability": 0.83544921875}, {"start": 635.14, "end": 635.3, "word": " we", "probability": 0.8798828125}, {"start": 635.3, "end": 635.5, "word": " can", "probability": 0.90576171875}, {"start": 635.5, "end": 635.72, "word": " say", "probability": 0.86767578125}, {"start": 635.72, "end": 635.96, "word": " that", "probability": 0.93310546875}, {"start": 635.96, "end": 637.66, "word": " 60", "probability": 0.8701171875}, {"start": 637.66, "end": 638.0, "word": "%", "probability": 0.845703125}, {"start": 638.0, "end": 638.5, "word": " of", "probability": 0.96923828125}, {"start": 638.5, "end": 638.9, "word": " all", "probability": 0.9326171875}, {"start": 638.9, "end": 639.34, "word": " test", "probability": 0.712890625}, {"start": 639.34, "end": 639.94, "word": " takers", "probability": 0.840576171875}, {"start": 639.94, "end": 641.28, "word": " scored", "probability": 0.89013671875}, {"start": 641.28, "end": 643.2, "word": " between", "probability": 0.859375}, {"start": 643.2, "end": 646.64, "word": " 68%.", "probability": 0.556884765625}, {"start": 646.64, "end": 647.16, "word": " So", "probability": 0.95556640625}, {"start": 647.16, "end": 647.54, "word": " mu", "probability": 0.63134765625}, {"start": 647.54, "end": 647.86, "word": " is", "probability": 0.9462890625}, {"start": 647.86, "end": 648.48, "word": " 500.", "probability": 0.96826171875}], "temperature": 1.0}, {"id": 26, "seek": 67929, "start": 650.11, "end": 679.29, "text": " minus sigma is 90. And mu plus sigma, 500 plus 90. So you can say that 68% or 230 of all test takers scored between 410 and 590. So 68% of all test takers who took", "tokens": [3175, 12771, 307, 4289, 13, 400, 2992, 1804, 12771, 11, 5923, 1804, 4289, 13, 407, 291, 393, 584, 300, 23317, 4, 420, 35311, 295, 439, 1500, 991, 433, 18139, 1296, 1017, 3279, 293, 1025, 7771, 13, 407, 23317, 4, 295, 439, 1500, 991, 433, 567, 1890], "avg_logprob": -0.2202460119064818, "compression_ratio": 1.3442622950819672, "no_speech_prob": 0.0, "words": [{"start": 650.11, "end": 650.73, "word": " minus", "probability": 0.470703125}, {"start": 650.73, "end": 651.91, "word": " sigma", "probability": 0.7080078125}, {"start": 651.91, "end": 652.71, "word": " is", "probability": 0.8759765625}, {"start": 652.71, "end": 653.23, "word": " 90.", "probability": 0.8896484375}, {"start": 654.47, "end": 655.51, "word": " And", "probability": 0.341552734375}, {"start": 655.51, "end": 655.75, "word": " mu", "probability": 0.6689453125}, {"start": 655.75, "end": 656.55, "word": " plus", "probability": 0.94384765625}, {"start": 656.55, "end": 657.05, "word": " sigma,", "probability": 0.93310546875}, {"start": 657.81, "end": 658.37, "word": " 500", "probability": 0.96044921875}, {"start": 658.37, "end": 659.37, "word": " plus", "probability": 0.87451171875}, {"start": 659.37, "end": 659.85, "word": " 90.", "probability": 0.974609375}, {"start": 660.47, "end": 660.77, "word": " So", "probability": 0.9482421875}, {"start": 660.77, "end": 660.95, "word": " you", "probability": 0.84716796875}, {"start": 660.95, "end": 661.15, "word": " can", "probability": 0.9423828125}, {"start": 661.15, "end": 661.39, "word": " say", "probability": 0.71435546875}, {"start": 661.39, "end": 661.69, "word": " that", "probability": 0.8798828125}, {"start": 661.69, "end": 663.49, "word": " 68", "probability": 0.9287109375}, {"start": 663.49, "end": 663.93, "word": "%", "probability": 0.63916015625}, {"start": 663.93, "end": 664.71, "word": " or", "probability": 0.900390625}, {"start": 664.71, "end": 665.39, "word": " 230", "probability": 0.253662109375}, {"start": 665.39, "end": 667.17, "word": " of", "probability": 0.9033203125}, {"start": 667.17, "end": 667.63, "word": " all", "probability": 0.94287109375}, {"start": 667.63, "end": 667.89, "word": " test", "probability": 0.873046875}, {"start": 667.89, "end": 668.47, "word": " takers", "probability": 0.796875}, {"start": 668.47, "end": 669.31, "word": " scored", "probability": 0.8369140625}, {"start": 669.31, "end": 669.89, "word": " between", "probability": 0.87939453125}, {"start": 669.89, "end": 672.03, "word": " 410", "probability": 0.95166015625}, {"start": 672.03, "end": 673.01, "word": " and", "probability": 0.92919921875}, {"start": 673.01, "end": 674.39, "word": " 590.", "probability": 0.780029296875}, {"start": 674.91, "end": 675.61, "word": " So", "probability": 0.8876953125}, {"start": 675.61, "end": 675.99, "word": " 68", "probability": 0.97119140625}, {"start": 675.99, "end": 676.25, "word": "%", "probability": 0.9853515625}, {"start": 676.25, "end": 676.75, "word": " of", "probability": 0.96337890625}, {"start": 676.75, "end": 677.71, "word": " all", "probability": 0.7021484375}, {"start": 677.71, "end": 677.97, "word": " test", "probability": 0.7763671875}, {"start": 677.97, "end": 678.47, "word": " takers", "probability": 0.90869140625}, {"start": 678.47, "end": 678.93, "word": " who", "probability": 0.88720703125}, {"start": 678.93, "end": 679.29, "word": " took", "probability": 0.9208984375}], "temperature": 1.0}, {"id": 27, "seek": 70774, "start": 679.82, "end": 707.74, "text": " that exam scored between 14 and 590. That if we assume previously the data is well shaped, otherwise we cannot say that. For the other rule, 95% of all test takers scored between mu is 500 minus 2 times sigma, 500 plus 2 times sigma. So that means", "tokens": [300, 1139, 18139, 1296, 3499, 293, 1025, 7771, 13, 663, 498, 321, 6552, 8046, 264, 1412, 307, 731, 13475, 11, 5911, 321, 2644, 584, 300, 13, 1171, 264, 661, 4978, 11, 13420, 4, 295, 439, 1500, 991, 433, 18139, 1296, 2992, 307, 5923, 3175, 568, 1413, 12771, 11, 5923, 1804, 568, 1413, 12771, 13, 407, 300, 1355], "avg_logprob": -0.2147090527518042, "compression_ratio": 1.503030303030303, "no_speech_prob": 0.0, "words": [{"start": 679.82, "end": 680.24, "word": " that", "probability": 0.312255859375}, {"start": 680.24, "end": 681.66, "word": " exam", "probability": 0.9228515625}, {"start": 681.66, "end": 682.9, "word": " scored", "probability": 0.55224609375}, {"start": 682.9, "end": 683.38, "word": " between", "probability": 0.88330078125}, {"start": 683.38, "end": 684.16, "word": " 14", "probability": 0.865234375}, {"start": 684.16, "end": 684.68, "word": " and", "probability": 0.91357421875}, {"start": 684.68, "end": 685.8, "word": " 590.", "probability": 0.787841796875}, {"start": 685.98, "end": 686.44, "word": " That", "probability": 0.85546875}, {"start": 686.44, "end": 686.64, "word": " if", "probability": 0.791015625}, {"start": 686.64, "end": 686.8, "word": " we", "probability": 0.84130859375}, {"start": 686.8, "end": 687.26, "word": " assume", "probability": 0.86865234375}, {"start": 687.26, "end": 687.74, "word": " previously", "probability": 0.63232421875}, {"start": 687.74, "end": 688.02, "word": " the", "probability": 0.78955078125}, {"start": 688.02, "end": 688.3, "word": " data", "probability": 0.9306640625}, {"start": 688.3, "end": 688.5, "word": " is", "probability": 0.8720703125}, {"start": 688.5, "end": 688.66, "word": " well", "probability": 0.826171875}, {"start": 688.66, "end": 688.92, "word": " shaped,", "probability": 0.4921875}, {"start": 689.0, "end": 689.22, "word": " otherwise", "probability": 0.84326171875}, {"start": 689.22, "end": 689.52, "word": " we", "probability": 0.6298828125}, {"start": 689.52, "end": 689.72, "word": " cannot", "probability": 0.87451171875}, {"start": 689.72, "end": 689.98, "word": " say", "probability": 0.60595703125}, {"start": 689.98, "end": 690.16, "word": " that.", "probability": 0.93505859375}, {"start": 691.18, "end": 691.56, "word": " For", "probability": 0.9443359375}, {"start": 691.56, "end": 691.7, "word": " the", "probability": 0.9228515625}, {"start": 691.7, "end": 691.94, "word": " other", "probability": 0.90185546875}, {"start": 691.94, "end": 692.28, "word": " rule,", "probability": 0.6513671875}, {"start": 692.74, "end": 693.14, "word": " 95", "probability": 0.97021484375}, {"start": 693.14, "end": 693.84, "word": "%", "probability": 0.77490234375}, {"start": 693.84, "end": 695.1, "word": " of", "probability": 0.96728515625}, {"start": 695.1, "end": 695.52, "word": " all", "probability": 0.94580078125}, {"start": 695.52, "end": 695.9, "word": " test", "probability": 0.8740234375}, {"start": 695.9, "end": 696.42, "word": " takers", "probability": 0.8095703125}, {"start": 696.42, "end": 696.84, "word": " scored", "probability": 0.89306640625}, {"start": 696.84, "end": 697.32, "word": " between", "probability": 0.88330078125}, {"start": 697.32, "end": 698.56, "word": " mu", "probability": 0.6259765625}, {"start": 698.56, "end": 698.84, "word": " is", "probability": 0.8818359375}, {"start": 698.84, "end": 699.4, "word": " 500", "probability": 0.96337890625}, {"start": 699.4, "end": 701.2, "word": " minus", "probability": 0.9072265625}, {"start": 701.2, "end": 701.5, "word": " 2", "probability": 0.736328125}, {"start": 701.5, "end": 701.8, "word": " times", "probability": 0.94091796875}, {"start": 701.8, "end": 702.18, "word": " sigma,", "probability": 0.9228515625}, {"start": 703.54, "end": 704.4, "word": " 500", "probability": 0.96435546875}, {"start": 704.4, "end": 704.92, "word": " plus", "probability": 0.95166015625}, {"start": 704.92, "end": 705.14, "word": " 2", "probability": 0.97314453125}, {"start": 705.14, "end": 705.42, "word": " times", "probability": 0.9365234375}, {"start": 705.42, "end": 705.78, "word": " sigma.", "probability": 0.93603515625}, {"start": 706.52, "end": 707.12, "word": " So", "probability": 0.953125}, {"start": 707.12, "end": 707.36, "word": " that", "probability": 0.830078125}, {"start": 707.36, "end": 707.74, "word": " means", "probability": 0.9306640625}], "temperature": 1.0}, {"id": 28, "seek": 72786, "start": 708.16, "end": 727.86, "text": " 500 minus 180 is 320. 500 plus 180 is 680. So you can say that approximately 95% of all test takers scored between 320 and 680. Finally, you can say that", "tokens": [5923, 3175, 11971, 307, 42429, 13, 5923, 1804, 11971, 307, 1386, 4702, 13, 407, 291, 393, 584, 300, 10447, 13420, 4, 295, 439, 1500, 991, 433, 18139, 1296, 42429, 293, 1386, 4702, 13, 6288, 11, 291, 393, 584, 300], "avg_logprob": -0.1736328162252903, "compression_ratio": 1.2833333333333334, "no_speech_prob": 0.0, "words": [{"start": 708.16, "end": 708.68, "word": " 500", "probability": 0.8740234375}, {"start": 708.68, "end": 709.1, "word": " minus", "probability": 0.88916015625}, {"start": 709.1, "end": 709.54, "word": " 180", "probability": 0.3466796875}, {"start": 709.54, "end": 709.76, "word": " is", "probability": 0.9111328125}, {"start": 709.76, "end": 710.16, "word": " 320.", "probability": 0.9091796875}, {"start": 711.42, "end": 712.28, "word": " 500", "probability": 0.935546875}, {"start": 712.28, "end": 712.72, "word": " plus", "probability": 0.9462890625}, {"start": 712.72, "end": 713.1, "word": " 180", "probability": 0.91015625}, {"start": 713.1, "end": 713.36, "word": " is", "probability": 0.935546875}, {"start": 713.36, "end": 714.0, "word": " 680.", "probability": 0.872802734375}, {"start": 714.2, "end": 714.34, "word": " So", "probability": 0.94482421875}, {"start": 714.34, "end": 714.48, "word": " you", "probability": 0.8388671875}, {"start": 714.48, "end": 714.64, "word": " can", "probability": 0.94775390625}, {"start": 714.64, "end": 714.8, "word": " say", "probability": 0.666015625}, {"start": 714.8, "end": 715.1, "word": " that", "probability": 0.92724609375}, {"start": 715.1, "end": 716.1, "word": " approximately", "probability": 0.86376953125}, {"start": 716.1, "end": 716.6, "word": " 95", "probability": 0.97314453125}, {"start": 716.6, "end": 717.0, "word": "%", "probability": 0.72119140625}, {"start": 717.0, "end": 717.28, "word": " of", "probability": 0.9658203125}, {"start": 717.28, "end": 717.56, "word": " all", "probability": 0.939453125}, {"start": 717.56, "end": 717.84, "word": " test", "probability": 0.75537109375}, {"start": 717.84, "end": 718.32, "word": " takers", "probability": 0.810546875}, {"start": 718.32, "end": 719.08, "word": " scored", "probability": 0.8759765625}, {"start": 719.08, "end": 719.58, "word": " between", "probability": 0.873046875}, {"start": 719.58, "end": 720.84, "word": " 320", "probability": 0.88916015625}, {"start": 720.84, "end": 722.42, "word": " and", "probability": 0.9384765625}, {"start": 722.42, "end": 723.68, "word": " 680.", "probability": 0.89794921875}, {"start": 724.98, "end": 725.84, "word": " Finally,", "probability": 0.6806640625}, {"start": 726.3, "end": 727.12, "word": " you", "probability": 0.96142578125}, {"start": 727.12, "end": 727.36, "word": " can", "probability": 0.9443359375}, {"start": 727.36, "end": 727.56, "word": " say", "probability": 0.896484375}, {"start": 727.56, "end": 727.86, "word": " that", "probability": 0.927734375}], "temperature": 1.0}, {"id": 29, "seek": 75251, "start": 730.77, "end": 752.51, "text": " all of the test takers, approximately all, because when we are saying 99.7 it means just 0.3 is the rest, so you can say approximately all test takers scored between mu minus three sigma which is 90 and mu", "tokens": [439, 295, 264, 1500, 991, 433, 11, 10447, 439, 11, 570, 562, 321, 366, 1566, 11803, 13, 22, 309, 1355, 445, 1958, 13, 18, 307, 264, 1472, 11, 370, 291, 393, 584, 10447, 439, 1500, 991, 433, 18139, 1296, 2992, 3175, 1045, 12771, 597, 307, 4289, 293, 2992], "avg_logprob": -0.28523595965638454, "compression_ratio": 1.4609929078014185, "no_speech_prob": 0.0, "words": [{"start": 730.77, "end": 731.23, "word": " all", "probability": 0.393310546875}, {"start": 731.23, "end": 731.53, "word": " of", "probability": 0.95458984375}, {"start": 731.53, "end": 731.71, "word": " the", "probability": 0.92431640625}, {"start": 731.71, "end": 731.95, "word": " test", "probability": 0.87353515625}, {"start": 731.95, "end": 732.35, "word": " takers,", "probability": 0.750244140625}, {"start": 732.47, "end": 732.83, "word": " approximately", "probability": 0.75244140625}, {"start": 732.83, "end": 733.21, "word": " all,", "probability": 0.9287109375}, {"start": 733.31, "end": 733.57, "word": " because", "probability": 0.88134765625}, {"start": 733.57, "end": 733.99, "word": " when", "probability": 0.82470703125}, {"start": 733.99, "end": 734.49, "word": " we", "probability": 0.8330078125}, {"start": 734.49, "end": 734.63, "word": " are", "probability": 0.91748046875}, {"start": 734.63, "end": 735.03, "word": " saying", "probability": 0.88037109375}, {"start": 735.03, "end": 736.25, "word": " 99", "probability": 0.77197265625}, {"start": 736.25, "end": 736.77, "word": ".7", "probability": 0.94775390625}, {"start": 736.77, "end": 736.93, "word": " it", "probability": 0.460693359375}, {"start": 736.93, "end": 737.15, "word": " means", "probability": 0.9267578125}, {"start": 737.15, "end": 739.35, "word": " just", "probability": 0.2587890625}, {"start": 739.35, "end": 739.61, "word": " 0", "probability": 0.6376953125}, {"start": 739.61, "end": 739.77, "word": ".3", "probability": 0.989990234375}, {"start": 739.77, "end": 739.91, "word": " is", "probability": 0.5205078125}, {"start": 739.91, "end": 740.03, "word": " the", "probability": 0.87841796875}, {"start": 740.03, "end": 740.27, "word": " rest,", "probability": 0.93994140625}, {"start": 740.45, "end": 740.61, "word": " so", "probability": 0.91455078125}, {"start": 740.61, "end": 741.15, "word": " you", "probability": 0.92529296875}, {"start": 741.15, "end": 741.31, "word": " can", "probability": 0.94677734375}, {"start": 741.31, "end": 741.61, "word": " say", "probability": 0.91552734375}, {"start": 741.61, "end": 742.37, "word": " approximately", "probability": 0.85693359375}, {"start": 742.37, "end": 742.77, "word": " all", "probability": 0.9375}, {"start": 742.77, "end": 743.07, "word": " test", "probability": 0.8388671875}, {"start": 743.07, "end": 743.59, "word": " takers", "probability": 0.866455078125}, {"start": 743.59, "end": 744.45, "word": " scored", "probability": 0.72998046875}, {"start": 744.45, "end": 744.95, "word": " between", "probability": 0.8935546875}, {"start": 744.95, "end": 746.31, "word": " mu", "probability": 0.457763671875}, {"start": 746.31, "end": 748.07, "word": " minus", "probability": 0.89453125}, {"start": 748.07, "end": 748.35, "word": " three", "probability": 0.5693359375}, {"start": 748.35, "end": 748.73, "word": " sigma", "probability": 0.916015625}, {"start": 748.73, "end": 750.15, "word": " which", "probability": 0.61279296875}, {"start": 750.15, "end": 750.35, "word": " is", "probability": 0.95263671875}, {"start": 750.35, "end": 750.73, "word": " 90", "probability": 0.7109375}, {"start": 750.73, "end": 752.05, "word": " and", "probability": 0.7626953125}, {"start": 752.05, "end": 752.51, "word": " mu", "probability": 0.84814453125}], "temperature": 1.0}, {"id": 30, "seek": 78087, "start": 753.43, "end": 780.87, "text": " It lost 3 seconds. So 500 minus 3 times 9 is 270. So that's 230. 500 plus 270 is 770. So we can say that 99.7% of all the stackers scored between 230 and 770. I will give another example just to make sure that you understand the meaning of this rule.", "tokens": [467, 2731, 805, 3949, 13, 407, 5923, 3175, 805, 1413, 1722, 307, 40774, 13, 407, 300, 311, 35311, 13, 5923, 1804, 40774, 307, 1614, 5867, 13, 407, 321, 393, 584, 300, 11803, 13, 22, 4, 295, 439, 264, 8630, 433, 18139, 1296, 35311, 293, 1614, 5867, 13, 286, 486, 976, 1071, 1365, 445, 281, 652, 988, 300, 291, 1223, 264, 3620, 295, 341, 4978, 13], "avg_logprob": -0.22194601821176935, "compression_ratio": 1.4342857142857144, "no_speech_prob": 0.0, "words": [{"start": 753.43, "end": 753.71, "word": " It", "probability": 0.1190185546875}, {"start": 753.71, "end": 754.01, "word": " lost", "probability": 0.52783203125}, {"start": 754.01, "end": 754.39, "word": " 3", "probability": 0.281982421875}, {"start": 754.39, "end": 754.65, "word": " seconds.", "probability": 0.271484375}, {"start": 756.47, "end": 756.67, "word": " So", "probability": 0.9365234375}, {"start": 756.67, "end": 757.13, "word": " 500", "probability": 0.89404296875}, {"start": 757.13, "end": 757.73, "word": " minus", "probability": 0.986328125}, {"start": 757.73, "end": 758.83, "word": " 3", "probability": 0.7802734375}, {"start": 758.83, "end": 759.35, "word": " times", "probability": 0.93896484375}, {"start": 759.35, "end": 759.83, "word": " 9", "probability": 0.904296875}, {"start": 759.83, "end": 760.05, "word": " is", "probability": 0.779296875}, {"start": 760.05, "end": 760.55, "word": " 270.", "probability": 0.9716796875}, {"start": 760.77, "end": 760.97, "word": " So", "probability": 0.9482421875}, {"start": 760.97, "end": 761.31, "word": " that's", "probability": 0.95947265625}, {"start": 761.31, "end": 761.75, "word": " 230.", "probability": 0.951171875}, {"start": 762.69, "end": 763.41, "word": " 500", "probability": 0.9482421875}, {"start": 763.41, "end": 763.67, "word": " plus", "probability": 0.79248046875}, {"start": 763.67, "end": 764.07, "word": " 270", "probability": 0.958984375}, {"start": 764.07, "end": 764.37, "word": " is", "probability": 0.935546875}, {"start": 764.37, "end": 765.03, "word": " 770.", "probability": 0.88427734375}, {"start": 765.57, "end": 765.81, "word": " So", "probability": 0.955078125}, {"start": 765.81, "end": 765.95, "word": " we", "probability": 0.4306640625}, {"start": 765.95, "end": 766.07, "word": " can", "probability": 0.9423828125}, {"start": 766.07, "end": 766.25, "word": " say", "probability": 0.69873046875}, {"start": 766.25, "end": 766.45, "word": " that", "probability": 0.923828125}, {"start": 766.45, "end": 766.77, "word": " 99", "probability": 0.412841796875}, {"start": 766.77, "end": 767.27, "word": ".7", "probability": 0.989013671875}, {"start": 767.27, "end": 767.65, "word": "%", "probability": 0.96533203125}, {"start": 767.65, "end": 767.99, "word": " of", "probability": 0.9609375}, {"start": 767.99, "end": 768.71, "word": " all", "probability": 0.94140625}, {"start": 768.71, "end": 768.93, "word": " the", "probability": 0.69873046875}, {"start": 768.93, "end": 769.47, "word": " stackers", "probability": 0.587890625}, {"start": 769.47, "end": 769.69, "word": " scored", "probability": 0.796875}, {"start": 769.69, "end": 770.19, "word": " between", "probability": 0.861328125}, {"start": 770.19, "end": 771.65, "word": " 230", "probability": 0.9501953125}, {"start": 771.65, "end": 772.13, "word": " and", "probability": 0.93798828125}, {"start": 772.13, "end": 772.97, "word": " 770.", "probability": 0.9599609375}, {"start": 774.27, "end": 774.49, "word": " I", "probability": 0.9990234375}, {"start": 774.49, "end": 774.61, "word": " will", "probability": 0.8310546875}, {"start": 774.61, "end": 774.81, "word": " give", "probability": 0.876953125}, {"start": 774.81, "end": 775.15, "word": " another", "probability": 0.91943359375}, {"start": 775.15, "end": 775.61, "word": " example", "probability": 0.97412109375}, {"start": 775.61, "end": 776.31, "word": " just", "probability": 0.61083984375}, {"start": 776.31, "end": 776.45, "word": " to", "probability": 0.96923828125}, {"start": 776.45, "end": 776.59, "word": " make", "probability": 0.9306640625}, {"start": 776.59, "end": 776.75, "word": " sure", "probability": 0.912109375}, {"start": 776.75, "end": 776.93, "word": " that", "probability": 0.931640625}, {"start": 776.93, "end": 777.03, "word": " you", "probability": 0.9365234375}, {"start": 777.03, "end": 777.69, "word": " understand", "probability": 0.8046875}, {"start": 777.69, "end": 778.93, "word": " the", "probability": 0.9189453125}, {"start": 778.93, "end": 779.21, "word": " meaning", "probability": 0.87646484375}, {"start": 779.21, "end": 780.19, "word": " of", "probability": 0.96533203125}, {"start": 780.19, "end": 780.49, "word": " this", "probability": 0.94482421875}, {"start": 780.49, "end": 780.87, "word": " rule.", "probability": 0.93701171875}], "temperature": 1.0}, {"id": 31, "seek": 81154, "start": 783.62, "end": 811.54, "text": " For business, a statistic goes. For business, a statistic example. Suppose the scores are bell-shaped. So we are assuming the data is bell-shaped.", "tokens": [1171, 1606, 11, 257, 29588, 1709, 13, 1171, 1606, 11, 257, 29588, 1365, 13, 21360, 264, 13444, 366, 4549, 12, 23103, 13, 407, 321, 366, 11926, 264, 1412, 307, 4549, 12, 23103, 13], "avg_logprob": -0.331112141994869, "compression_ratio": 1.4848484848484849, "no_speech_prob": 0.0, "words": [{"start": 783.62, "end": 784.62, "word": " For", "probability": 0.564453125}, {"start": 784.62, "end": 785.2, "word": " business,", "probability": 0.87939453125}, {"start": 786.5, "end": 788.82, "word": " a", "probability": 0.1746826171875}, {"start": 788.82, "end": 789.22, "word": " statistic", "probability": 0.54833984375}, {"start": 789.22, "end": 789.72, "word": " goes.", "probability": 0.7255859375}, {"start": 795.72, "end": 796.34, "word": " For", "probability": 0.89990234375}, {"start": 796.34, "end": 796.68, "word": " business,", "probability": 0.9169921875}, {"start": 796.82, "end": 796.92, "word": " a", "probability": 0.61865234375}, {"start": 796.92, "end": 797.18, "word": " statistic", "probability": 0.8134765625}, {"start": 797.18, "end": 797.66, "word": " example.", "probability": 0.37255859375}, {"start": 799.74, "end": 800.26, "word": " Suppose", "probability": 0.8037109375}, {"start": 800.26, "end": 800.72, "word": " the", "probability": 0.89892578125}, {"start": 800.72, "end": 802.84, "word": " scores", "probability": 0.62451171875}, {"start": 802.84, "end": 803.78, "word": " are", "probability": 0.93603515625}, {"start": 803.78, "end": 804.12, "word": " bell", "probability": 0.60546875}, {"start": 804.12, "end": 804.48, "word": "-shaped.", "probability": 0.659912109375}, {"start": 805.02, "end": 805.56, "word": " So", "probability": 0.59521484375}, {"start": 805.56, "end": 805.74, "word": " we", "probability": 0.8466796875}, {"start": 805.74, "end": 805.92, "word": " are", "probability": 0.444091796875}, {"start": 805.92, "end": 807.44, "word": " assuming", "probability": 0.89306640625}, {"start": 807.44, "end": 809.74, "word": " the", "probability": 0.89453125}, {"start": 809.74, "end": 810.16, "word": " data", "probability": 0.9580078125}, {"start": 810.16, "end": 811.04, "word": " is", "probability": 0.9189453125}, {"start": 811.04, "end": 811.28, "word": " bell", "probability": 0.91162109375}, {"start": 811.28, "end": 811.54, "word": "-shaped.", "probability": 0.846923828125}], "temperature": 1.0}, {"id": 32, "seek": 83843, "start": 813.73, "end": 838.43, "text": " with mean of 75 and standard deviation of 5. Also, let's assume that 100 students took the exam. So we have 100 students.", "tokens": [365, 914, 295, 9562, 293, 3832, 25163, 295, 1025, 13, 2743, 11, 718, 311, 6552, 300, 2319, 1731, 1890, 264, 1139, 13, 407, 321, 362, 2319, 1731, 13], "avg_logprob": -0.129445049269446, "compression_ratio": 1.1844660194174756, "no_speech_prob": 0.0, "words": [{"start": 813.73, "end": 814.11, "word": " with", "probability": 0.47216796875}, {"start": 814.11, "end": 814.39, "word": " mean", "probability": 0.92333984375}, {"start": 814.39, "end": 814.91, "word": " of", "probability": 0.9521484375}, {"start": 814.91, "end": 817.55, "word": " 75", "probability": 0.78955078125}, {"start": 817.55, "end": 819.67, "word": " and", "probability": 0.75390625}, {"start": 819.67, "end": 820.97, "word": " standard", "probability": 0.89990234375}, {"start": 820.97, "end": 821.37, "word": " deviation", "probability": 0.95263671875}, {"start": 821.37, "end": 821.63, "word": " of", "probability": 0.96875}, {"start": 821.63, "end": 821.95, "word": " 5.", "probability": 0.74267578125}, {"start": 824.99, "end": 825.81, "word": " Also,", "probability": 0.94287109375}, {"start": 826.11, "end": 826.39, "word": " let's", "probability": 0.964599609375}, {"start": 826.39, "end": 826.71, "word": " assume", "probability": 0.9140625}, {"start": 826.71, "end": 827.19, "word": " that", "probability": 0.931640625}, {"start": 827.19, "end": 828.09, "word": " 100", "probability": 0.88623046875}, {"start": 828.09, "end": 828.95, "word": " students", "probability": 0.970703125}, {"start": 828.95, "end": 833.81, "word": " took", "probability": 0.87744140625}, {"start": 833.81, "end": 834.01, "word": " the", "probability": 0.919921875}, {"start": 834.01, "end": 834.27, "word": " exam.", "probability": 0.97021484375}, {"start": 835.71, "end": 836.67, "word": " So", "probability": 0.95751953125}, {"start": 836.67, "end": 837.07, "word": " we", "probability": 0.677734375}, {"start": 837.07, "end": 837.37, "word": " have", "probability": 0.94677734375}, {"start": 837.37, "end": 837.85, "word": " 100", "probability": 0.9326171875}, {"start": 837.85, "end": 838.43, "word": " students.", "probability": 0.97509765625}], "temperature": 1.0}, {"id": 33, "seek": 86558, "start": 839.72, "end": 865.58, "text": " Last year took the exam of business statistics. The mean was 75. And standard deviation was 5. And let's see how it can tell about 6 to 8% rule. It means that 6 to 8% of all the students score between mu minus sigma. Mu is 75.", "tokens": [5264, 1064, 1890, 264, 1139, 295, 1606, 12523, 13, 440, 914, 390, 9562, 13, 400, 3832, 25163, 390, 1025, 13, 400, 718, 311, 536, 577, 309, 393, 980, 466, 1386, 281, 1649, 4, 4978, 13, 467, 1355, 300, 1386, 281, 1649, 4, 295, 439, 264, 1731, 6175, 1296, 2992, 3175, 12771, 13, 15601, 307, 9562, 13], "avg_logprob": -0.23177083124194228, "compression_ratio": 1.41875, "no_speech_prob": 0.0, "words": [{"start": 839.72, "end": 840.08, "word": " Last", "probability": 0.6611328125}, {"start": 840.08, "end": 840.38, "word": " year", "probability": 0.93505859375}, {"start": 840.38, "end": 840.84, "word": " took", "probability": 0.39013671875}, {"start": 840.84, "end": 841.04, "word": " the", "probability": 0.90673828125}, {"start": 841.04, "end": 841.42, "word": " exam", "probability": 0.96875}, {"start": 841.42, "end": 841.8, "word": " of", "probability": 0.94384765625}, {"start": 841.8, "end": 842.24, "word": " business", "probability": 0.7861328125}, {"start": 842.24, "end": 843.0, "word": " statistics.", "probability": 0.9072265625}, {"start": 844.18, "end": 844.36, "word": " The", "probability": 0.8583984375}, {"start": 844.36, "end": 844.5, "word": " mean", "probability": 0.96826171875}, {"start": 844.5, "end": 844.82, "word": " was", "probability": 0.953125}, {"start": 844.82, "end": 845.36, "word": " 75.", "probability": 0.91357421875}, {"start": 846.24, "end": 846.34, "word": " And", "probability": 0.393310546875}, {"start": 846.34, "end": 847.24, "word": " standard", "probability": 0.548828125}, {"start": 847.24, "end": 847.58, "word": " deviation", "probability": 0.818359375}, {"start": 847.58, "end": 847.96, "word": " was", "probability": 0.94970703125}, {"start": 847.96, "end": 848.5, "word": " 5.", "probability": 0.67529296875}, {"start": 849.48, "end": 849.8, "word": " And", "probability": 0.9150390625}, {"start": 849.8, "end": 850.08, "word": " let's", "probability": 0.86474609375}, {"start": 850.08, "end": 850.38, "word": " see", "probability": 0.69140625}, {"start": 850.38, "end": 850.78, "word": " how", "probability": 0.8408203125}, {"start": 850.78, "end": 850.92, "word": " it", "probability": 0.416748046875}, {"start": 850.92, "end": 851.12, "word": " can", "probability": 0.92822265625}, {"start": 851.12, "end": 851.4, "word": " tell", "probability": 0.8037109375}, {"start": 851.4, "end": 851.72, "word": " about", "probability": 0.90869140625}, {"start": 851.72, "end": 852.02, "word": " 6", "probability": 0.701171875}, {"start": 852.02, "end": 852.14, "word": " to", "probability": 0.4375}, {"start": 852.14, "end": 852.3, "word": " 8", "probability": 0.9970703125}, {"start": 852.3, "end": 852.68, "word": "%", "probability": 0.64208984375}, {"start": 852.68, "end": 853.98, "word": " rule.", "probability": 0.708984375}, {"start": 854.56, "end": 854.9, "word": " It", "probability": 0.95947265625}, {"start": 854.9, "end": 855.3, "word": " means", "probability": 0.931640625}, {"start": 855.3, "end": 855.68, "word": " that", "probability": 0.93408203125}, {"start": 855.68, "end": 856.6, "word": " 6", "probability": 0.93798828125}, {"start": 856.6, "end": 856.68, "word": " to", "probability": 0.892578125}, {"start": 856.68, "end": 856.84, "word": " 8", "probability": 0.9990234375}, {"start": 856.84, "end": 857.1, "word": "%", "probability": 0.99072265625}, {"start": 857.1, "end": 857.82, "word": " of", "probability": 0.95849609375}, {"start": 857.82, "end": 858.12, "word": " all", "probability": 0.9501953125}, {"start": 858.12, "end": 858.3, "word": " the", "probability": 0.87255859375}, {"start": 858.3, "end": 858.86, "word": " students", "probability": 0.97265625}, {"start": 858.86, "end": 862.1, "word": " score", "probability": 0.49267578125}, {"start": 862.1, "end": 862.62, "word": " between", "probability": 0.86083984375}, {"start": 862.62, "end": 863.92, "word": " mu", "probability": 0.424560546875}, {"start": 863.92, "end": 864.28, "word": " minus", "probability": 0.984375}, {"start": 864.28, "end": 864.7, "word": " sigma.", "probability": 0.93994140625}, {"start": 864.88, "end": 865.02, "word": " Mu", "probability": 0.91455078125}, {"start": 865.02, "end": 865.2, "word": " is", "probability": 0.92919921875}, {"start": 865.2, "end": 865.58, "word": " 75.", "probability": 0.95947265625}], "temperature": 1.0}, {"id": 34, "seek": 89359, "start": 866.85, "end": 893.59, "text": " minus sigma and the mu plus sigma. So that means 68 students, because we have 100, so you can say 68 students scored between 70 and 80. So 60 students out of 100 scored between 70 and 80.", "tokens": [3175, 12771, 293, 264, 2992, 1804, 12771, 13, 407, 300, 1355, 23317, 1731, 11, 570, 321, 362, 2319, 11, 370, 291, 393, 584, 23317, 1731, 18139, 1296, 5285, 293, 4688, 13, 407, 4060, 1731, 484, 295, 2319, 18139, 1296, 5285, 293, 4688, 13], "avg_logprob": -0.2563920366493138, "compression_ratio": 1.5666666666666667, "no_speech_prob": 0.0, "words": [{"start": 866.85, "end": 867.29, "word": " minus", "probability": 0.6240234375}, {"start": 867.29, "end": 867.71, "word": " sigma", "probability": 0.72998046875}, {"start": 867.71, "end": 868.65, "word": " and", "probability": 0.740234375}, {"start": 868.65, "end": 868.81, "word": " the", "probability": 0.316162109375}, {"start": 868.81, "end": 868.99, "word": " mu", "probability": 0.60400390625}, {"start": 868.99, "end": 869.31, "word": " plus", "probability": 0.95166015625}, {"start": 869.31, "end": 869.61, "word": " sigma.", "probability": 0.91162109375}, {"start": 873.59, "end": 874.47, "word": " So", "probability": 0.87353515625}, {"start": 874.47, "end": 874.71, "word": " that", "probability": 0.8173828125}, {"start": 874.71, "end": 875.05, "word": " means", "probability": 0.9306640625}, {"start": 875.05, "end": 875.79, "word": " 68", "probability": 0.8603515625}, {"start": 875.79, "end": 876.85, "word": " students,", "probability": 0.974609375}, {"start": 877.15, "end": 877.47, "word": " because", "probability": 0.890625}, {"start": 877.47, "end": 877.61, "word": " we", "probability": 0.90673828125}, {"start": 877.61, "end": 877.75, "word": " have", "probability": 0.91357421875}, {"start": 877.75, "end": 878.01, "word": " 100,", "probability": 0.1400146484375}, {"start": 879.01, "end": 879.29, "word": " so", "probability": 0.61328125}, {"start": 879.29, "end": 879.41, "word": " you", "probability": 0.90283203125}, {"start": 879.41, "end": 879.57, "word": " can", "probability": 0.93505859375}, {"start": 879.57, "end": 879.77, "word": " say", "probability": 0.60888671875}, {"start": 879.77, "end": 880.29, "word": " 68", "probability": 0.96044921875}, {"start": 880.29, "end": 880.89, "word": " students", "probability": 0.97998046875}, {"start": 880.89, "end": 881.93, "word": " scored", "probability": 0.71435546875}, {"start": 881.93, "end": 882.37, "word": " between", "probability": 0.8720703125}, {"start": 882.37, "end": 883.15, "word": " 70", "probability": 0.6943359375}, {"start": 883.15, "end": 883.67, "word": " and", "probability": 0.904296875}, {"start": 883.67, "end": 885.41, "word": " 80.", "probability": 0.8740234375}, {"start": 886.61, "end": 887.49, "word": " So", "probability": 0.916015625}, {"start": 887.49, "end": 887.81, "word": " 60", "probability": 0.82080078125}, {"start": 887.81, "end": 888.33, "word": " students", "probability": 0.970703125}, {"start": 888.33, "end": 888.67, "word": " out", "probability": 0.8837890625}, {"start": 888.67, "end": 888.83, "word": " of", "probability": 0.974609375}, {"start": 888.83, "end": 889.29, "word": " 100", "probability": 0.927734375}, {"start": 889.29, "end": 890.65, "word": " scored", "probability": 0.81640625}, {"start": 890.65, "end": 891.17, "word": " between", "probability": 0.8720703125}, {"start": 891.17, "end": 892.61, "word": " 70", "probability": 0.98681640625}, {"start": 892.61, "end": 893.29, "word": " and", "probability": 0.94287109375}, {"start": 893.29, "end": 893.59, "word": " 80.", "probability": 0.98583984375}], "temperature": 1.0}, {"id": 35, "seek": 91377, "start": 894.95, "end": 913.77, "text": " About 95 students out of 100 scored between 75 minus 2 times 5. 75 plus 2 times 5. So that gives 65.", "tokens": [7769, 13420, 1731, 484, 295, 2319, 18139, 1296, 9562, 3175, 568, 1413, 1025, 13, 9562, 1804, 568, 1413, 1025, 13, 407, 300, 2709, 11624, 13], "avg_logprob": -0.2546574633855086, "compression_ratio": 1.1744186046511629, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 894.95, "end": 895.25, "word": " About", "probability": 0.48095703125}, {"start": 895.25, "end": 897.19, "word": " 95", "probability": 0.88623046875}, {"start": 897.19, "end": 897.89, "word": " students", "probability": 0.90087890625}, {"start": 897.89, "end": 898.21, "word": " out", "probability": 0.8623046875}, {"start": 898.21, "end": 898.39, "word": " of", "probability": 0.97412109375}, {"start": 898.39, "end": 899.09, "word": " 100", "probability": 0.89111328125}, {"start": 899.09, "end": 900.77, "word": " scored", "probability": 0.62451171875}, {"start": 900.77, "end": 901.23, "word": " between", "probability": 0.90087890625}, {"start": 901.23, "end": 902.99, "word": " 75", "probability": 0.6630859375}, {"start": 902.99, "end": 903.57, "word": " minus", "probability": 0.92919921875}, {"start": 903.57, "end": 903.89, "word": " 2", "probability": 0.70849609375}, {"start": 903.89, "end": 904.17, "word": " times", "probability": 0.8994140625}, {"start": 904.17, "end": 904.53, "word": " 5.", "probability": 0.95361328125}, {"start": 905.55, "end": 906.71, "word": " 75", "probability": 0.78515625}, {"start": 906.71, "end": 907.27, "word": " plus", "probability": 0.9345703125}, {"start": 907.27, "end": 907.55, "word": " 2", "probability": 0.96923828125}, {"start": 907.55, "end": 908.29, "word": " times", "probability": 0.8154296875}, {"start": 908.29, "end": 908.73, "word": " 5.", "probability": 0.95947265625}, {"start": 909.85, "end": 910.73, "word": " So", "probability": 0.908203125}, {"start": 910.73, "end": 911.01, "word": " that", "probability": 0.8486328125}, {"start": 911.01, "end": 912.19, "word": " gives", "probability": 0.62890625}, {"start": 912.19, "end": 913.77, "word": " 65.", "probability": 0.951171875}], "temperature": 1.0}, {"id": 36, "seek": 94461, "start": 915.55, "end": 944.61, "text": " The minimum and the maximum is 85. So you can say that around 95 students scored between 65 and 85. Finally, maybe you can see all students. Because when you're saying 99.7, it means almost all the students scored between 75 minus 3 times Y.", "tokens": [440, 7285, 293, 264, 6674, 307, 14695, 13, 407, 291, 393, 584, 300, 926, 13420, 1731, 18139, 1296, 11624, 293, 14695, 13, 6288, 11, 1310, 291, 393, 536, 439, 1731, 13, 1436, 562, 291, 434, 1566, 11803, 13, 22, 11, 309, 1355, 1920, 439, 264, 1731, 18139, 1296, 9562, 3175, 805, 1413, 398, 13], "avg_logprob": -0.2305397711016915, "compression_ratio": 1.5031055900621118, "no_speech_prob": 0.0, "words": [{"start": 915.55, "end": 916.19, "word": " The", "probability": 0.4423828125}, {"start": 916.19, "end": 916.47, "word": " minimum", "probability": 0.91845703125}, {"start": 916.47, "end": 917.17, "word": " and", "probability": 0.80908203125}, {"start": 917.17, "end": 917.33, "word": " the", "probability": 0.83203125}, {"start": 917.33, "end": 917.71, "word": " maximum", "probability": 0.9111328125}, {"start": 917.71, "end": 918.09, "word": " is", "probability": 0.8212890625}, {"start": 918.09, "end": 918.41, "word": " 85.", "probability": 0.9169921875}, {"start": 919.41, "end": 920.19, "word": " So", "probability": 0.9052734375}, {"start": 920.19, "end": 920.53, "word": " you", "probability": 0.77880859375}, {"start": 920.53, "end": 920.77, "word": " can", "probability": 0.93310546875}, {"start": 920.77, "end": 920.95, "word": " say", "probability": 0.63427734375}, {"start": 920.95, "end": 921.29, "word": " that", "probability": 0.91943359375}, {"start": 921.29, "end": 921.73, "word": " around", "probability": 0.79443359375}, {"start": 921.73, "end": 922.29, "word": " 95", "probability": 0.9638671875}, {"start": 922.29, "end": 923.71, "word": " students", "probability": 0.48095703125}, {"start": 923.71, "end": 924.61, "word": " scored", "probability": 0.87158203125}, {"start": 924.61, "end": 924.95, "word": " between", "probability": 0.82861328125}, {"start": 924.95, "end": 925.39, "word": " 65", "probability": 0.97998046875}, {"start": 925.39, "end": 925.65, "word": " and", "probability": 0.93310546875}, {"start": 925.65, "end": 925.93, "word": " 85.", "probability": 0.78076171875}, {"start": 926.65, "end": 927.43, "word": " Finally,", "probability": 0.642578125}, {"start": 928.13, "end": 929.23, "word": " maybe", "probability": 0.87841796875}, {"start": 929.23, "end": 929.81, "word": " you", "probability": 0.90283203125}, {"start": 929.81, "end": 930.01, "word": " can", "probability": 0.94482421875}, {"start": 930.01, "end": 930.21, "word": " see", "probability": 0.7568359375}, {"start": 930.21, "end": 930.61, "word": " all", "probability": 0.84814453125}, {"start": 930.61, "end": 931.93, "word": " students.", "probability": 0.833984375}, {"start": 933.11, "end": 933.51, "word": " Because", "probability": 0.92626953125}, {"start": 933.51, "end": 933.69, "word": " when", "probability": 0.8291015625}, {"start": 933.69, "end": 933.99, "word": " you're", "probability": 0.7080078125}, {"start": 933.99, "end": 934.35, "word": " saying", "probability": 0.8876953125}, {"start": 934.35, "end": 934.65, "word": " 99", "probability": 0.274169921875}, {"start": 934.65, "end": 935.25, "word": ".7,", "probability": 0.772216796875}, {"start": 936.47, "end": 936.73, "word": " it", "probability": 0.828125}, {"start": 936.73, "end": 937.09, "word": " means", "probability": 0.91357421875}, {"start": 937.09, "end": 938.19, "word": " almost", "probability": 0.78076171875}, {"start": 938.19, "end": 938.47, "word": " all", "probability": 0.9453125}, {"start": 938.47, "end": 938.65, "word": " the", "probability": 0.85009765625}, {"start": 938.65, "end": 939.11, "word": " students", "probability": 0.97412109375}, {"start": 939.11, "end": 940.25, "word": " scored", "probability": 0.8486328125}, {"start": 940.25, "end": 940.73, "word": " between", "probability": 0.88232421875}, {"start": 940.73, "end": 943.23, "word": " 75", "probability": 0.9306640625}, {"start": 943.23, "end": 943.71, "word": " minus", "probability": 0.97021484375}, {"start": 943.71, "end": 944.05, "word": " 3", "probability": 0.71533203125}, {"start": 944.05, "end": 944.35, "word": " times", "probability": 0.90087890625}, {"start": 944.35, "end": 944.61, "word": " Y.", "probability": 0.290283203125}], "temperature": 1.0}, {"id": 37, "seek": 97179, "start": 946.31, "end": 971.79, "text": " and 75 plus three times one. So that's six days in two nights. Now let's look carefully at these three intervals. The first one is seven to eight, the other one 65 to 85, then six to 90. When we are more confident,", "tokens": [293, 9562, 1804, 1045, 1413, 472, 13, 407, 300, 311, 2309, 1708, 294, 732, 13249, 13, 823, 718, 311, 574, 7500, 412, 613, 1045, 26651, 13, 440, 700, 472, 307, 3407, 281, 3180, 11, 264, 661, 472, 11624, 281, 14695, 11, 550, 2309, 281, 4289, 13, 1133, 321, 366, 544, 6679, 11], "avg_logprob": -0.31574291553137435, "compression_ratio": 1.3870967741935485, "no_speech_prob": 0.0, "words": [{"start": 946.31, "end": 946.73, "word": " and", "probability": 0.2266845703125}, {"start": 946.73, "end": 947.21, "word": " 75", "probability": 0.6943359375}, {"start": 947.21, "end": 948.09, "word": " plus", "probability": 0.86572265625}, {"start": 948.09, "end": 948.41, "word": " three", "probability": 0.36474609375}, {"start": 948.41, "end": 948.77, "word": " times", "probability": 0.91455078125}, {"start": 948.77, "end": 949.05, "word": " one.", "probability": 0.41064453125}, {"start": 950.71, "end": 950.95, "word": " So", "probability": 0.7919921875}, {"start": 950.95, "end": 951.19, "word": " that's", "probability": 0.80712890625}, {"start": 951.19, "end": 951.45, "word": " six", "probability": 0.416259765625}, {"start": 951.45, "end": 951.77, "word": " days", "probability": 0.476806640625}, {"start": 951.77, "end": 952.17, "word": " in", "probability": 0.177978515625}, {"start": 952.17, "end": 952.97, "word": " two", "probability": 0.66796875}, {"start": 952.97, "end": 953.17, "word": " nights.", "probability": 0.7861328125}, {"start": 954.15, "end": 954.77, "word": " Now", "probability": 0.8291015625}, {"start": 954.77, "end": 955.01, "word": " let's", "probability": 0.851318359375}, {"start": 955.01, "end": 955.21, "word": " look", "probability": 0.96142578125}, {"start": 955.21, "end": 955.59, "word": " carefully", "probability": 0.70361328125}, {"start": 955.59, "end": 955.83, "word": " at", "probability": 0.95849609375}, {"start": 955.83, "end": 956.17, "word": " these", "probability": 0.84423828125}, {"start": 956.17, "end": 959.15, "word": " three", "probability": 0.943359375}, {"start": 959.15, "end": 959.89, "word": " intervals.", "probability": 0.84375}, {"start": 962.15, "end": 962.77, "word": " The", "probability": 0.88427734375}, {"start": 962.77, "end": 963.01, "word": " first", "probability": 0.8798828125}, {"start": 963.01, "end": 963.27, "word": " one", "probability": 0.92578125}, {"start": 963.27, "end": 964.03, "word": " is", "probability": 0.451904296875}, {"start": 964.03, "end": 964.33, "word": " seven", "probability": 0.82275390625}, {"start": 964.33, "end": 964.49, "word": " to", "probability": 0.9658203125}, {"start": 964.49, "end": 964.71, "word": " eight,", "probability": 0.693359375}, {"start": 964.81, "end": 964.91, "word": " the", "probability": 0.75}, {"start": 964.91, "end": 965.19, "word": " other", "probability": 0.8837890625}, {"start": 965.19, "end": 965.47, "word": " one", "probability": 0.91455078125}, {"start": 965.47, "end": 966.09, "word": " 65", "probability": 0.67919921875}, {"start": 966.09, "end": 966.27, "word": " to", "probability": 0.491943359375}, {"start": 966.27, "end": 966.77, "word": " 85,", "probability": 0.9716796875}, {"start": 966.93, "end": 967.13, "word": " then", "probability": 0.5166015625}, {"start": 967.13, "end": 967.45, "word": " six", "probability": 0.87939453125}, {"start": 967.45, "end": 967.65, "word": " to", "probability": 0.96875}, {"start": 967.65, "end": 967.93, "word": " 90.", "probability": 0.72216796875}, {"start": 970.11, "end": 970.73, "word": " When", "probability": 0.89453125}, {"start": 970.73, "end": 970.89, "word": " we", "probability": 0.9462890625}, {"start": 970.89, "end": 971.05, "word": " are", "probability": 0.8642578125}, {"start": 971.05, "end": 971.31, "word": " more", "probability": 0.9375}, {"start": 971.31, "end": 971.79, "word": " confident,", "probability": 0.8818359375}], "temperature": 1.0}, {"id": 38, "seek": 100409, "start": 975.17, "end": 1004.09, "text": " When we are more confident here for 99.7%, the interval becomes wider. So this is the widest interval. Because here, the length of the interval is around 10. The other one is 20. Here is 30. So the last interval has the highest width. So as the confidence coefficient", "tokens": [1133, 321, 366, 544, 6679, 510, 337, 11803, 13, 22, 8923, 264, 15035, 3643, 11842, 13, 407, 341, 307, 264, 5274, 377, 15035, 13, 1436, 510, 11, 264, 4641, 295, 264, 15035, 307, 926, 1266, 13, 440, 661, 472, 307, 945, 13, 1692, 307, 2217, 13, 407, 264, 1036, 15035, 575, 264, 6343, 11402, 13, 407, 382, 264, 6687, 17619], "avg_logprob": -0.15048668521349548, "compression_ratio": 1.5857988165680474, "no_speech_prob": 0.0, "words": [{"start": 975.17, "end": 975.47, "word": " When", "probability": 0.7431640625}, {"start": 975.47, "end": 975.65, "word": " we", "probability": 0.953125}, {"start": 975.65, "end": 975.81, "word": " are", "probability": 0.93603515625}, {"start": 975.81, "end": 976.07, "word": " more", "probability": 0.91552734375}, {"start": 976.07, "end": 976.49, "word": " confident", "probability": 0.95849609375}, {"start": 976.49, "end": 977.41, "word": " here", "probability": 0.4765625}, {"start": 977.41, "end": 977.93, "word": " for", "probability": 0.258056640625}, {"start": 977.93, "end": 978.77, "word": " 99", "probability": 0.91162109375}, {"start": 978.77, "end": 979.81, "word": ".7%,", "probability": 0.88330078125}, {"start": 979.81, "end": 980.63, "word": " the", "probability": 0.8984375}, {"start": 980.63, "end": 981.03, "word": " interval", "probability": 0.9677734375}, {"start": 981.03, "end": 982.49, "word": " becomes", "probability": 0.8828125}, {"start": 982.49, "end": 984.07, "word": " wider.", "probability": 0.87939453125}, {"start": 984.65, "end": 984.89, "word": " So", "probability": 0.85791015625}, {"start": 984.89, "end": 985.09, "word": " this", "probability": 0.88916015625}, {"start": 985.09, "end": 985.17, "word": " is", "probability": 0.94140625}, {"start": 985.17, "end": 985.33, "word": " the", "probability": 0.92236328125}, {"start": 985.33, "end": 985.93, "word": " widest", "probability": 0.9248046875}, {"start": 985.93, "end": 988.23, "word": " interval.", "probability": 0.9619140625}, {"start": 988.91, "end": 989.67, "word": " Because", "probability": 0.88232421875}, {"start": 989.67, "end": 989.87, "word": " here,", "probability": 0.7919921875}, {"start": 989.97, "end": 990.17, "word": " the", "probability": 0.91748046875}, {"start": 990.17, "end": 990.53, "word": " length", "probability": 0.85107421875}, {"start": 990.53, "end": 990.91, "word": " of", "probability": 0.966796875}, {"start": 990.91, "end": 991.05, "word": " the", "probability": 0.92236328125}, {"start": 991.05, "end": 991.43, "word": " interval", "probability": 0.96435546875}, {"start": 991.43, "end": 991.65, "word": " is", "probability": 0.94287109375}, {"start": 991.65, "end": 991.93, "word": " around", "probability": 0.93359375}, {"start": 991.93, "end": 992.25, "word": " 10.", "probability": 0.86572265625}, {"start": 993.29, "end": 993.67, "word": " The", "probability": 0.8828125}, {"start": 993.67, "end": 993.91, "word": " other", "probability": 0.88818359375}, {"start": 993.91, "end": 994.11, "word": " one", "probability": 0.921875}, {"start": 994.11, "end": 994.27, "word": " is", "probability": 0.9443359375}, {"start": 994.27, "end": 994.65, "word": " 20.", "probability": 0.96435546875}, {"start": 995.09, "end": 995.37, "word": " Here", "probability": 0.84814453125}, {"start": 995.37, "end": 995.53, "word": " is", "probability": 0.607421875}, {"start": 995.53, "end": 995.87, "word": " 30.", "probability": 0.951171875}, {"start": 996.57, "end": 997.09, "word": " So", "probability": 0.90771484375}, {"start": 997.09, "end": 997.23, "word": " the", "probability": 0.9169921875}, {"start": 997.23, "end": 997.51, "word": " last", "probability": 0.8779296875}, {"start": 997.51, "end": 998.11, "word": " interval", "probability": 0.97216796875}, {"start": 998.11, "end": 998.61, "word": " has", "probability": 0.9404296875}, {"start": 998.61, "end": 998.91, "word": " the", "probability": 0.90869140625}, {"start": 998.91, "end": 999.39, "word": " highest", "probability": 0.943359375}, {"start": 999.39, "end": 1000.23, "word": " width.", "probability": 0.8232421875}, {"start": 1001.23, "end": 1001.99, "word": " So", "probability": 0.9599609375}, {"start": 1001.99, "end": 1002.39, "word": " as", "probability": 0.92919921875}, {"start": 1002.39, "end": 1002.57, "word": " the", "probability": 0.9013671875}, {"start": 1002.57, "end": 1003.13, "word": " confidence", "probability": 0.97314453125}, {"start": 1003.13, "end": 1004.09, "word": " coefficient", "probability": 0.95263671875}], "temperature": 1.0}, {"id": 39, "seek": 102924, "start": 1005.04, "end": 1029.24, "text": " increases, the length of the interval becomes larger and larger because it starts with 10, 20, and we end with 30. So that's another example of empirical load. And again, here we assume the data is bell shape. Let's move.", "tokens": [8637, 11, 264, 4641, 295, 264, 15035, 3643, 4833, 293, 4833, 570, 309, 3719, 365, 1266, 11, 945, 11, 293, 321, 917, 365, 2217, 13, 407, 300, 311, 1071, 1365, 295, 31886, 3677, 13, 400, 797, 11, 510, 321, 6552, 264, 1412, 307, 4549, 3909, 13, 961, 311, 1286, 13], "avg_logprob": -0.23054534605905122, "compression_ratio": 1.4050632911392404, "no_speech_prob": 0.0, "words": [{"start": 1005.0400000000001, "end": 1005.72, "word": " increases,", "probability": 0.3544921875}, {"start": 1007.08, "end": 1007.34, "word": " the", "probability": 0.9013671875}, {"start": 1007.34, "end": 1008.18, "word": " length", "probability": 0.826171875}, {"start": 1008.18, "end": 1008.38, "word": " of", "probability": 0.9609375}, {"start": 1008.38, "end": 1008.54, "word": " the", "probability": 0.916015625}, {"start": 1008.54, "end": 1008.9, "word": " interval", "probability": 0.9716796875}, {"start": 1008.9, "end": 1009.58, "word": " becomes", "probability": 0.87109375}, {"start": 1009.58, "end": 1012.4, "word": " larger", "probability": 0.837890625}, {"start": 1012.4, "end": 1012.74, "word": " and", "probability": 0.90771484375}, {"start": 1012.74, "end": 1013.04, "word": " larger", "probability": 0.9541015625}, {"start": 1013.04, "end": 1013.52, "word": " because", "probability": 0.436767578125}, {"start": 1013.52, "end": 1014.08, "word": " it", "probability": 0.94677734375}, {"start": 1014.08, "end": 1014.5, "word": " starts", "probability": 0.85791015625}, {"start": 1014.5, "end": 1014.72, "word": " with", "probability": 0.90234375}, {"start": 1014.72, "end": 1015.06, "word": " 10,", "probability": 0.80419921875}, {"start": 1015.26, "end": 1015.64, "word": " 20,", "probability": 0.939453125}, {"start": 1015.82, "end": 1016.2, "word": " and", "probability": 0.9365234375}, {"start": 1016.2, "end": 1016.4, "word": " we", "probability": 0.849609375}, {"start": 1016.4, "end": 1016.62, "word": " end", "probability": 0.91552734375}, {"start": 1016.62, "end": 1017.16, "word": " with", "probability": 0.88232421875}, {"start": 1017.16, "end": 1018.0, "word": " 30.", "probability": 0.27099609375}, {"start": 1018.6, "end": 1018.88, "word": " So", "probability": 0.93896484375}, {"start": 1018.88, "end": 1019.16, "word": " that's", "probability": 0.912353515625}, {"start": 1019.16, "end": 1019.4, "word": " another", "probability": 0.916015625}, {"start": 1019.4, "end": 1019.94, "word": " example", "probability": 0.97216796875}, {"start": 1019.94, "end": 1020.8, "word": " of", "probability": 0.95947265625}, {"start": 1020.8, "end": 1023.14, "word": " empirical", "probability": 0.8671875}, {"start": 1023.14, "end": 1023.42, "word": " load.", "probability": 0.5419921875}, {"start": 1023.8, "end": 1024.0, "word": " And", "probability": 0.94873046875}, {"start": 1024.0, "end": 1024.22, "word": " again,", "probability": 0.91162109375}, {"start": 1024.3, "end": 1024.46, "word": " here", "probability": 0.85400390625}, {"start": 1024.46, "end": 1024.6, "word": " we", "probability": 0.66796875}, {"start": 1024.6, "end": 1025.06, "word": " assume", "probability": 0.87060546875}, {"start": 1025.06, "end": 1025.34, "word": " the", "probability": 0.81884765625}, {"start": 1025.34, "end": 1025.58, "word": " data", "probability": 0.92431640625}, {"start": 1025.58, "end": 1025.98, "word": " is", "probability": 0.92822265625}, {"start": 1025.98, "end": 1027.32, "word": " bell", "probability": 0.404052734375}, {"start": 1027.32, "end": 1027.6, "word": " shape.", "probability": 0.5791015625}, {"start": 1028.24, "end": 1028.92, "word": " Let's", "probability": 0.959716796875}, {"start": 1028.92, "end": 1029.24, "word": " move.", "probability": 0.94189453125}], "temperature": 1.0}, {"id": 40, "seek": 104794, "start": 1030.08, "end": 1047.94, "text": " to another one when the data is not in shape. I mean, if we have data and that data is not symmetric. So that rule is no longer valid. So we have to use another rule. It's called shape-example rule.", "tokens": [281, 1071, 472, 562, 264, 1412, 307, 406, 294, 3909, 13, 286, 914, 11, 498, 321, 362, 1412, 293, 300, 1412, 307, 406, 32330, 13, 407, 300, 4978, 307, 572, 2854, 7363, 13, 407, 321, 362, 281, 764, 1071, 4978, 13, 467, 311, 1219, 3909, 12, 3121, 335, 781, 4978, 13], "avg_logprob": -0.2854567301961092, "compression_ratio": 1.5307692307692307, "no_speech_prob": 8.940696716308594e-07, "words": [{"start": 1030.08, "end": 1030.4, "word": " to", "probability": 0.19482421875}, {"start": 1030.4, "end": 1031.02, "word": " another", "probability": 0.90673828125}, {"start": 1031.02, "end": 1031.32, "word": " one", "probability": 0.92431640625}, {"start": 1031.32, "end": 1031.62, "word": " when", "probability": 0.76416015625}, {"start": 1031.62, "end": 1032.34, "word": " the", "probability": 0.75439453125}, {"start": 1032.34, "end": 1032.74, "word": " data", "probability": 0.90185546875}, {"start": 1032.74, "end": 1033.46, "word": " is", "probability": 0.9423828125}, {"start": 1033.46, "end": 1034.42, "word": " not", "probability": 0.90966796875}, {"start": 1034.42, "end": 1034.58, "word": " in", "probability": 0.1075439453125}, {"start": 1034.58, "end": 1034.84, "word": " shape.", "probability": 0.8037109375}, {"start": 1034.96, "end": 1035.08, "word": " I", "probability": 0.8935546875}, {"start": 1035.08, "end": 1035.32, "word": " mean,", "probability": 0.96435546875}, {"start": 1035.6, "end": 1036.52, "word": " if", "probability": 0.9541015625}, {"start": 1036.52, "end": 1036.66, "word": " we", "probability": 0.44873046875}, {"start": 1036.66, "end": 1036.8, "word": " have", "probability": 0.94482421875}, {"start": 1036.8, "end": 1037.2, "word": " data", "probability": 0.93994140625}, {"start": 1037.2, "end": 1038.32, "word": " and", "probability": 0.57470703125}, {"start": 1038.32, "end": 1038.56, "word": " that", "probability": 0.9365234375}, {"start": 1038.56, "end": 1038.88, "word": " data", "probability": 0.9326171875}, {"start": 1038.88, "end": 1039.1, "word": " is", "probability": 0.9482421875}, {"start": 1039.1, "end": 1039.3, "word": " not", "probability": 0.947265625}, {"start": 1039.3, "end": 1039.74, "word": " symmetric.", "probability": 0.56005859375}, {"start": 1041.74, "end": 1041.84, "word": " So", "probability": 0.493896484375}, {"start": 1041.84, "end": 1042.14, "word": " that", "probability": 0.81689453125}, {"start": 1042.14, "end": 1042.46, "word": " rule", "probability": 0.81689453125}, {"start": 1042.46, "end": 1042.66, "word": " is", "probability": 0.94970703125}, {"start": 1042.66, "end": 1042.88, "word": " no", "probability": 0.939453125}, {"start": 1042.88, "end": 1043.16, "word": " longer", "probability": 0.92578125}, {"start": 1043.16, "end": 1043.56, "word": " valid.", "probability": 0.93310546875}, {"start": 1043.72, "end": 1043.84, "word": " So", "probability": 0.94287109375}, {"start": 1043.84, "end": 1043.96, "word": " we", "probability": 0.8701171875}, {"start": 1043.96, "end": 1044.1, "word": " have", "probability": 0.9443359375}, {"start": 1044.1, "end": 1044.22, "word": " to", "probability": 0.9658203125}, {"start": 1044.22, "end": 1044.44, "word": " use", "probability": 0.87451171875}, {"start": 1044.44, "end": 1044.82, "word": " another", "probability": 0.92333984375}, {"start": 1044.82, "end": 1045.2, "word": " rule.", "probability": 0.93212890625}, {"start": 1045.9, "end": 1046.2, "word": " It's", "probability": 0.96142578125}, {"start": 1046.2, "end": 1046.66, "word": " called", "probability": 0.88720703125}, {"start": 1046.66, "end": 1047.26, "word": " shape", "probability": 0.7001953125}, {"start": 1047.26, "end": 1047.72, "word": "-example", "probability": 0.563507080078125}, {"start": 1047.72, "end": 1047.94, "word": " rule.", "probability": 0.8779296875}], "temperature": 1.0}, {"id": 41, "seek": 108291, "start": 1057.45, "end": 1082.91, "text": " Any questions before we move to the next topic? At shape and shape rule, it says that regardless of how the data are distributed, I mean, if the data is not symmetric or not bell-shaped, then we can say that at least", "tokens": [2639, 1651, 949, 321, 1286, 281, 264, 958, 4829, 30, 1711, 3909, 293, 3909, 4978, 11, 309, 1619, 300, 10060, 295, 577, 264, 1412, 366, 12631, 11, 286, 914, 11, 498, 264, 1412, 307, 406, 32330, 420, 406, 4549, 12, 23103, 11, 550, 321, 393, 584, 300, 412, 1935], "avg_logprob": -0.22796875447034837, "compression_ratio": 1.4466666666666668, "no_speech_prob": 0.0, "words": [{"start": 1057.45, "end": 1057.69, "word": " Any", "probability": 0.85205078125}, {"start": 1057.69, "end": 1058.03, "word": " questions", "probability": 0.580078125}, {"start": 1058.03, "end": 1059.33, "word": " before", "probability": 0.546875}, {"start": 1059.33, "end": 1059.79, "word": " we", "probability": 0.95458984375}, {"start": 1059.79, "end": 1060.35, "word": " move", "probability": 0.93310546875}, {"start": 1060.35, "end": 1060.53, "word": " to", "probability": 0.92822265625}, {"start": 1060.53, "end": 1060.71, "word": " the", "probability": 0.91552734375}, {"start": 1060.71, "end": 1060.99, "word": " next", "probability": 0.94140625}, {"start": 1060.99, "end": 1061.61, "word": " topic?", "probability": 0.93994140625}, {"start": 1064.39, "end": 1064.71, "word": " At", "probability": 0.26513671875}, {"start": 1064.71, "end": 1064.89, "word": " shape", "probability": 0.65380859375}, {"start": 1064.89, "end": 1065.01, "word": " and", "probability": 0.6064453125}, {"start": 1065.01, "end": 1065.21, "word": " shape", "probability": 0.66552734375}, {"start": 1065.21, "end": 1065.45, "word": " rule,", "probability": 0.468505859375}, {"start": 1065.63, "end": 1065.71, "word": " it", "probability": 0.93017578125}, {"start": 1065.71, "end": 1065.95, "word": " says", "probability": 0.87646484375}, {"start": 1065.95, "end": 1066.25, "word": " that", "probability": 0.89208984375}, {"start": 1066.25, "end": 1068.15, "word": " regardless", "probability": 0.7509765625}, {"start": 1068.15, "end": 1069.21, "word": " of", "probability": 0.96435546875}, {"start": 1069.21, "end": 1069.49, "word": " how", "probability": 0.935546875}, {"start": 1069.49, "end": 1069.69, "word": " the", "probability": 0.91796875}, {"start": 1069.69, "end": 1070.01, "word": " data", "probability": 0.94921875}, {"start": 1070.01, "end": 1070.33, "word": " are", "probability": 0.92822265625}, {"start": 1070.33, "end": 1071.03, "word": " distributed,", "probability": 0.91845703125}, {"start": 1072.07, "end": 1072.29, "word": " I", "probability": 0.814453125}, {"start": 1072.29, "end": 1072.47, "word": " mean,", "probability": 0.96630859375}, {"start": 1073.47, "end": 1073.75, "word": " if", "probability": 0.94873046875}, {"start": 1073.75, "end": 1073.89, "word": " the", "probability": 0.91748046875}, {"start": 1073.89, "end": 1074.11, "word": " data", "probability": 0.943359375}, {"start": 1074.11, "end": 1074.31, "word": " is", "probability": 0.89892578125}, {"start": 1074.31, "end": 1074.53, "word": " not", "probability": 0.9423828125}, {"start": 1074.53, "end": 1074.99, "word": " symmetric", "probability": 0.806640625}, {"start": 1074.99, "end": 1078.19, "word": " or", "probability": 0.71728515625}, {"start": 1078.19, "end": 1078.51, "word": " not", "probability": 0.9365234375}, {"start": 1078.51, "end": 1078.75, "word": " bell", "probability": 0.7421875}, {"start": 1078.75, "end": 1079.19, "word": "-shaped,", "probability": 0.720703125}, {"start": 1080.05, "end": 1080.35, "word": " then", "probability": 0.8466796875}, {"start": 1080.35, "end": 1080.53, "word": " we", "probability": 0.9501953125}, {"start": 1080.53, "end": 1080.75, "word": " can", "probability": 0.94482421875}, {"start": 1080.75, "end": 1080.95, "word": " say", "probability": 0.94384765625}, {"start": 1080.95, "end": 1081.23, "word": " that", "probability": 0.93701171875}, {"start": 1081.23, "end": 1082.51, "word": " at", "probability": 0.7900390625}, {"start": 1082.51, "end": 1082.91, "word": " least", "probability": 0.95849609375}], "temperature": 1.0}, {"id": 42, "seek": 111067, "start": 1085.15, "end": 1110.67, "text": " Instead of saying 68, 95, or 99.7, just say around 1 minus 1 over k squared. Multiply this by 100. All of the values will fall within k. So k is number of standard deviations. I mean number of signals.", "tokens": [7156, 295, 1566, 23317, 11, 13420, 11, 420, 11803, 13, 22, 11, 445, 584, 926, 502, 3175, 502, 670, 350, 8889, 13, 31150, 356, 341, 538, 2319, 13, 1057, 295, 264, 4190, 486, 2100, 1951, 350, 13, 407, 350, 307, 1230, 295, 3832, 31219, 763, 13, 286, 914, 1230, 295, 12354, 13], "avg_logprob": -0.19074292115445407, "compression_ratio": 1.286624203821656, "no_speech_prob": 0.0, "words": [{"start": 1085.15, "end": 1085.59, "word": " Instead", "probability": 0.50048828125}, {"start": 1085.59, "end": 1085.81, "word": " of", "probability": 0.96923828125}, {"start": 1085.81, "end": 1086.09, "word": " saying", "probability": 0.86328125}, {"start": 1086.09, "end": 1086.69, "word": " 68,", "probability": 0.77978515625}, {"start": 1086.85, "end": 1087.27, "word": " 95,", "probability": 0.9580078125}, {"start": 1087.43, "end": 1087.53, "word": " or", "probability": 0.85986328125}, {"start": 1087.53, "end": 1087.79, "word": " 99", "probability": 0.71533203125}, {"start": 1087.79, "end": 1088.45, "word": ".7,", "probability": 0.953857421875}, {"start": 1089.07, "end": 1089.43, "word": " just", "probability": 0.90869140625}, {"start": 1089.43, "end": 1089.77, "word": " say", "probability": 0.90625}, {"start": 1089.77, "end": 1090.99, "word": " around", "probability": 0.88037109375}, {"start": 1090.99, "end": 1091.31, "word": " 1", "probability": 0.84033203125}, {"start": 1091.31, "end": 1091.73, "word": " minus", "probability": 0.919921875}, {"start": 1091.73, "end": 1092.11, "word": " 1", "probability": 0.9697265625}, {"start": 1092.11, "end": 1093.45, "word": " over", "probability": 0.87744140625}, {"start": 1093.45, "end": 1093.77, "word": " k", "probability": 0.71337890625}, {"start": 1093.77, "end": 1094.25, "word": " squared.", "probability": 0.80224609375}, {"start": 1095.29, "end": 1096.13, "word": " Multiply", "probability": 0.7734375}, {"start": 1096.13, "end": 1096.47, "word": " this", "probability": 0.94091796875}, {"start": 1096.47, "end": 1098.07, "word": " by", "probability": 0.96826171875}, {"start": 1098.07, "end": 1098.69, "word": " 100.", "probability": 0.91162109375}, {"start": 1099.65, "end": 1100.05, "word": " All", "probability": 0.63232421875}, {"start": 1100.05, "end": 1100.13, "word": " of", "probability": 0.68310546875}, {"start": 1100.13, "end": 1100.25, "word": " the", "probability": 0.90673828125}, {"start": 1100.25, "end": 1100.75, "word": " values", "probability": 0.91552734375}, {"start": 1100.75, "end": 1101.25, "word": " will", "probability": 0.73974609375}, {"start": 1101.25, "end": 1101.63, "word": " fall", "probability": 0.85986328125}, {"start": 1101.63, "end": 1102.07, "word": " within", "probability": 0.9111328125}, {"start": 1102.07, "end": 1102.53, "word": " k.", "probability": 0.94140625}, {"start": 1104.03, "end": 1104.43, "word": " So", "probability": 0.90478515625}, {"start": 1104.43, "end": 1104.77, "word": " k", "probability": 0.7763671875}, {"start": 1104.77, "end": 1105.19, "word": " is", "probability": 0.9453125}, {"start": 1105.19, "end": 1105.75, "word": " number", "probability": 0.80322265625}, {"start": 1105.75, "end": 1106.51, "word": " of", "probability": 0.9716796875}, {"start": 1106.51, "end": 1107.91, "word": " standard", "probability": 0.80517578125}, {"start": 1107.91, "end": 1108.51, "word": " deviations.", "probability": 0.921142578125}, {"start": 1109.49, "end": 1109.87, "word": " I", "probability": 0.9912109375}, {"start": 1109.87, "end": 1109.99, "word": " mean", "probability": 0.9677734375}, {"start": 1109.99, "end": 1110.25, "word": " number", "probability": 0.63916015625}, {"start": 1110.25, "end": 1110.41, "word": " of", "probability": 0.96337890625}, {"start": 1110.41, "end": 1110.67, "word": " signals.", "probability": 0.86962890625}], "temperature": 1.0}, {"id": 43, "seek": 113543, "start": 1111.89, "end": 1135.43, "text": " So if the data is not bell shaped, then you can say that approximately at least 1 minus 1 over k squared times 100% of the values will fall within k standard deviations of the mean. In this case, we assume that k is greater than 1. I mean, you cannot apply this rule if k equals 1. Because if k is 1.", "tokens": [407, 498, 264, 1412, 307, 406, 4549, 13475, 11, 550, 291, 393, 584, 300, 10447, 412, 1935, 502, 3175, 502, 670, 350, 8889, 1413, 2319, 4, 295, 264, 4190, 486, 2100, 1951, 350, 3832, 31219, 763, 295, 264, 914, 13, 682, 341, 1389, 11, 321, 6552, 300, 350, 307, 5044, 813, 502, 13, 286, 914, 11, 291, 2644, 3079, 341, 4978, 498, 350, 6915, 502, 13, 1436, 498, 350, 307, 502, 13], "avg_logprob": -0.15560787834533274, "compression_ratio": 1.5357142857142858, "no_speech_prob": 0.0, "words": [{"start": 1111.89, "end": 1112.15, "word": " So", "probability": 0.86376953125}, {"start": 1112.15, "end": 1112.31, "word": " if", "probability": 0.7958984375}, {"start": 1112.31, "end": 1112.43, "word": " the", "probability": 0.8994140625}, {"start": 1112.43, "end": 1112.65, "word": " data", "probability": 0.94287109375}, {"start": 1112.65, "end": 1112.85, "word": " is", "probability": 0.81787109375}, {"start": 1112.85, "end": 1113.09, "word": " not", "probability": 0.93896484375}, {"start": 1113.09, "end": 1113.35, "word": " bell", "probability": 0.33935546875}, {"start": 1113.35, "end": 1113.67, "word": " shaped,", "probability": 0.54736328125}, {"start": 1113.85, "end": 1113.99, "word": " then", "probability": 0.8525390625}, {"start": 1113.99, "end": 1114.13, "word": " you", "probability": 0.95068359375}, {"start": 1114.13, "end": 1114.33, "word": " can", "probability": 0.94287109375}, {"start": 1114.33, "end": 1114.59, "word": " say", "probability": 0.923828125}, {"start": 1114.59, "end": 1114.91, "word": " that", "probability": 0.92236328125}, {"start": 1114.91, "end": 1115.73, "word": " approximately", "probability": 0.84716796875}, {"start": 1115.73, "end": 1117.09, "word": " at", "probability": 0.736328125}, {"start": 1117.09, "end": 1117.35, "word": " least", "probability": 0.9638671875}, {"start": 1117.35, "end": 1118.27, "word": " 1", "probability": 0.69775390625}, {"start": 1118.27, "end": 1118.59, "word": " minus", "probability": 0.8017578125}, {"start": 1118.59, "end": 1118.79, "word": " 1", "probability": 0.90576171875}, {"start": 1118.79, "end": 1118.95, "word": " over", "probability": 0.89599609375}, {"start": 1118.95, "end": 1119.15, "word": " k", "probability": 0.796875}, {"start": 1119.15, "end": 1119.45, "word": " squared", "probability": 0.77392578125}, {"start": 1119.45, "end": 1119.75, "word": " times", "probability": 0.90087890625}, {"start": 1119.75, "end": 1120.17, "word": " 100", "probability": 0.90185546875}, {"start": 1120.17, "end": 1120.65, "word": "%", "probability": 0.8046875}, {"start": 1120.65, "end": 1121.43, "word": " of", "probability": 0.94580078125}, {"start": 1121.43, "end": 1121.59, "word": " the", "probability": 0.91552734375}, {"start": 1121.59, "end": 1122.43, "word": " values", "probability": 0.751953125}, {"start": 1122.43, "end": 1122.89, "word": " will", "probability": 0.845703125}, {"start": 1122.89, "end": 1123.41, "word": " fall", "probability": 0.81494140625}, {"start": 1123.41, "end": 1123.81, "word": " within", "probability": 0.88134765625}, {"start": 1123.81, "end": 1124.69, "word": " k", "probability": 0.86865234375}, {"start": 1124.69, "end": 1125.05, "word": " standard", "probability": 0.93603515625}, {"start": 1125.05, "end": 1125.53, "word": " deviations", "probability": 0.847900390625}, {"start": 1125.53, "end": 1125.77, "word": " of", "probability": 0.96142578125}, {"start": 1125.77, "end": 1125.89, "word": " the", "probability": 0.923828125}, {"start": 1125.89, "end": 1126.03, "word": " mean.", "probability": 0.97314453125}, {"start": 1127.15, "end": 1127.43, "word": " In", "probability": 0.96044921875}, {"start": 1127.43, "end": 1127.63, "word": " this", "probability": 0.9443359375}, {"start": 1127.63, "end": 1127.93, "word": " case,", "probability": 0.9033203125}, {"start": 1128.03, "end": 1128.21, "word": " we", "probability": 0.958984375}, {"start": 1128.21, "end": 1129.15, "word": " assume", "probability": 0.888671875}, {"start": 1129.15, "end": 1129.43, "word": " that", "probability": 0.93701171875}, {"start": 1129.43, "end": 1129.71, "word": " k", "probability": 0.953125}, {"start": 1129.71, "end": 1129.91, "word": " is", "probability": 0.93115234375}, {"start": 1129.91, "end": 1130.19, "word": " greater", "probability": 0.91552734375}, {"start": 1130.19, "end": 1130.45, "word": " than", "probability": 0.9482421875}, {"start": 1130.45, "end": 1130.63, "word": " 1.", "probability": 0.8544921875}, {"start": 1130.69, "end": 1130.79, "word": " I", "probability": 0.91650390625}, {"start": 1130.79, "end": 1130.95, "word": " mean,", "probability": 0.96484375}, {"start": 1131.03, "end": 1131.19, "word": " you", "probability": 0.9560546875}, {"start": 1131.19, "end": 1131.41, "word": " cannot", "probability": 0.87158203125}, {"start": 1131.41, "end": 1131.75, "word": " apply", "probability": 0.93212890625}, {"start": 1131.75, "end": 1132.01, "word": " this", "probability": 0.9462890625}, {"start": 1132.01, "end": 1132.33, "word": " rule", "probability": 0.92724609375}, {"start": 1132.33, "end": 1132.67, "word": " if", "probability": 0.759765625}, {"start": 1132.67, "end": 1132.83, "word": " k", "probability": 0.984375}, {"start": 1132.83, "end": 1133.13, "word": " equals", "probability": 0.8291015625}, {"start": 1133.13, "end": 1133.41, "word": " 1.", "probability": 0.93408203125}, {"start": 1134.03, "end": 1134.55, "word": " Because", "probability": 0.463623046875}, {"start": 1134.55, "end": 1134.75, "word": " if", "probability": 0.9287109375}, {"start": 1134.75, "end": 1134.91, "word": " k", "probability": 0.98828125}, {"start": 1134.91, "end": 1135.11, "word": " is", "probability": 0.94384765625}, {"start": 1135.11, "end": 1135.43, "word": " 1.", "probability": 0.87890625}], "temperature": 1.0}, {"id": 44, "seek": 116583, "start": 1137.39, "end": 1165.83, "text": " Then 1 minus 1 is 0. That makes no sense. For this reason, k is above 1 or greater than 1. So this rule is valid only for k greater than 1. So you can see that at least 1 minus 1 over k squared of the data or of the values will fall within k standard equations. So now, for example, suppose k equals 2.", "tokens": [1396, 502, 3175, 502, 307, 1958, 13, 663, 1669, 572, 2020, 13, 1171, 341, 1778, 11, 350, 307, 3673, 502, 420, 5044, 813, 502, 13, 407, 341, 4978, 307, 7363, 787, 337, 350, 5044, 813, 502, 13, 407, 291, 393, 536, 300, 412, 1935, 502, 3175, 502, 670, 350, 8889, 295, 264, 1412, 420, 295, 264, 4190, 486, 2100, 1951, 350, 3832, 11787, 13, 407, 586, 11, 337, 1365, 11, 7297, 350, 6915, 568, 13], "avg_logprob": -0.15481085467495417, "compression_ratio": 1.5947368421052632, "no_speech_prob": 0.0, "words": [{"start": 1137.39, "end": 1137.71, "word": " Then", "probability": 0.37353515625}, {"start": 1137.71, "end": 1137.97, "word": " 1", "probability": 0.53271484375}, {"start": 1137.97, "end": 1138.29, "word": " minus", "probability": 0.81494140625}, {"start": 1138.29, "end": 1138.57, "word": " 1", "probability": 0.9658203125}, {"start": 1138.57, "end": 1138.79, "word": " is", "probability": 0.93017578125}, {"start": 1138.79, "end": 1139.07, "word": " 0.", "probability": 0.67626953125}, {"start": 1139.33, "end": 1139.45, "word": " That", "probability": 0.79150390625}, {"start": 1139.45, "end": 1139.83, "word": " makes", "probability": 0.83642578125}, {"start": 1139.83, "end": 1140.09, "word": " no", "probability": 0.9462890625}, {"start": 1140.09, "end": 1140.47, "word": " sense.", "probability": 0.83349609375}, {"start": 1141.17, "end": 1141.43, "word": " For", "probability": 0.95654296875}, {"start": 1141.43, "end": 1141.65, "word": " this", "probability": 0.94384765625}, {"start": 1141.65, "end": 1142.01, "word": " reason,", "probability": 0.974609375}, {"start": 1142.15, "end": 1142.25, "word": " k", "probability": 0.79443359375}, {"start": 1142.25, "end": 1142.43, "word": " is", "probability": 0.9423828125}, {"start": 1142.43, "end": 1142.73, "word": " above", "probability": 0.93994140625}, {"start": 1142.73, "end": 1142.97, "word": " 1", "probability": 0.8671875}, {"start": 1142.97, "end": 1143.15, "word": " or", "probability": 0.71923828125}, {"start": 1143.15, "end": 1143.41, "word": " greater", "probability": 0.92041015625}, {"start": 1143.41, "end": 1143.67, "word": " than", "probability": 0.9453125}, {"start": 1143.67, "end": 1143.85, "word": " 1.", "probability": 0.9814453125}, {"start": 1144.43, "end": 1144.99, "word": " So", "probability": 0.953125}, {"start": 1144.99, "end": 1145.23, "word": " this", "probability": 0.86669921875}, {"start": 1145.23, "end": 1145.53, "word": " rule", "probability": 0.91259765625}, {"start": 1145.53, "end": 1146.03, "word": " is", "probability": 0.94482421875}, {"start": 1146.03, "end": 1146.47, "word": " valid", "probability": 0.9619140625}, {"start": 1146.47, "end": 1147.35, "word": " only", "probability": 0.921875}, {"start": 1147.35, "end": 1148.29, "word": " for", "probability": 0.9365234375}, {"start": 1148.29, "end": 1148.59, "word": " k", "probability": 0.98193359375}, {"start": 1148.59, "end": 1149.11, "word": " greater", "probability": 0.92529296875}, {"start": 1149.11, "end": 1149.39, "word": " than", "probability": 0.94873046875}, {"start": 1149.39, "end": 1149.55, "word": " 1.", "probability": 0.94775390625}, {"start": 1151.55, "end": 1152.11, "word": " So", "probability": 0.9306640625}, {"start": 1152.11, "end": 1152.23, "word": " you", "probability": 0.89111328125}, {"start": 1152.23, "end": 1152.37, "word": " can", "probability": 0.9453125}, {"start": 1152.37, "end": 1152.53, "word": " see", "probability": 0.62646484375}, {"start": 1152.53, "end": 1152.73, "word": " that", "probability": 0.92041015625}, {"start": 1152.73, "end": 1152.93, "word": " at", "probability": 0.8603515625}, {"start": 1152.93, "end": 1153.19, "word": " least", "probability": 0.96240234375}, {"start": 1153.19, "end": 1153.81, "word": " 1", "probability": 0.9326171875}, {"start": 1153.81, "end": 1154.13, "word": " minus", "probability": 0.9814453125}, {"start": 1154.13, "end": 1154.39, "word": " 1", "probability": 0.98876953125}, {"start": 1154.39, "end": 1154.57, "word": " over", "probability": 0.91650390625}, {"start": 1154.57, "end": 1154.81, "word": " k", "probability": 0.98681640625}, {"start": 1154.81, "end": 1155.21, "word": " squared", "probability": 0.7880859375}, {"start": 1155.21, "end": 1156.67, "word": " of", "probability": 0.8388671875}, {"start": 1156.67, "end": 1156.85, "word": " the", "probability": 0.92138671875}, {"start": 1156.85, "end": 1157.23, "word": " data", "probability": 0.939453125}, {"start": 1157.23, "end": 1157.91, "word": " or", "probability": 0.73291015625}, {"start": 1157.91, "end": 1158.09, "word": " of", "probability": 0.9560546875}, {"start": 1158.09, "end": 1158.23, "word": " the", "probability": 0.9189453125}, {"start": 1158.23, "end": 1158.69, "word": " values", "probability": 0.96484375}, {"start": 1158.69, "end": 1159.27, "word": " will", "probability": 0.86279296875}, {"start": 1159.27, "end": 1159.71, "word": " fall", "probability": 0.85791015625}, {"start": 1159.71, "end": 1160.15, "word": " within", "probability": 0.9033203125}, {"start": 1160.15, "end": 1161.19, "word": " k", "probability": 0.89111328125}, {"start": 1161.19, "end": 1161.53, "word": " standard", "probability": 0.85888671875}, {"start": 1161.53, "end": 1161.89, "word": " equations.", "probability": 0.26611328125}, {"start": 1162.97, "end": 1163.47, "word": " So", "probability": 0.93896484375}, {"start": 1163.47, "end": 1163.71, "word": " now,", "probability": 0.92529296875}, {"start": 1164.07, "end": 1164.23, "word": " for", "probability": 0.9501953125}, {"start": 1164.23, "end": 1164.57, "word": " example,", "probability": 0.97412109375}, {"start": 1164.67, "end": 1165.05, "word": " suppose", "probability": 0.87646484375}, {"start": 1165.05, "end": 1165.27, "word": " k", "probability": 0.98876953125}, {"start": 1165.27, "end": 1165.57, "word": " equals", "probability": 0.481689453125}, {"start": 1165.57, "end": 1165.83, "word": " 2.", "probability": 0.896484375}], "temperature": 1.0}, {"id": 45, "seek": 119419, "start": 1168.69, "end": 1194.19, "text": " When k equals 2, we said that 95% of the data falls within two standard ratios. That if the data is bell shaped. Now what's about if the data is not bell shaped? We have to use shape shape rule. So 1 minus 1 over k is 2. So 2, 2, 2 squared. So 1 minus 1 fourth. That gives.", "tokens": [1133, 350, 6915, 568, 11, 321, 848, 300, 13420, 4, 295, 264, 1412, 8804, 1951, 732, 3832, 32435, 13, 663, 498, 264, 1412, 307, 4549, 13475, 13, 823, 437, 311, 466, 498, 264, 1412, 307, 406, 4549, 13475, 30, 492, 362, 281, 764, 3909, 3909, 4978, 13, 407, 502, 3175, 502, 670, 350, 307, 568, 13, 407, 568, 11, 568, 11, 568, 8889, 13, 407, 502, 3175, 502, 6409, 13, 663, 2709, 13], "avg_logprob": -0.28251690196024404, "compression_ratio": 1.583815028901734, "no_speech_prob": 0.0, "words": [{"start": 1168.69, "end": 1169.09, "word": " When", "probability": 0.76025390625}, {"start": 1169.09, "end": 1169.43, "word": " k", "probability": 0.67333984375}, {"start": 1169.43, "end": 1169.91, "word": " equals", "probability": 0.499267578125}, {"start": 1169.91, "end": 1170.25, "word": " 2,", "probability": 0.52734375}, {"start": 1170.65, "end": 1170.91, "word": " we", "probability": 0.921875}, {"start": 1170.91, "end": 1171.19, "word": " said", "probability": 0.402099609375}, {"start": 1171.19, "end": 1171.41, "word": " that", "probability": 0.91650390625}, {"start": 1171.41, "end": 1171.91, "word": " 95", "probability": 0.97705078125}, {"start": 1171.91, "end": 1172.43, "word": "%", "probability": 0.89501953125}, {"start": 1172.43, "end": 1172.61, "word": " of", "probability": 0.95751953125}, {"start": 1172.61, "end": 1172.75, "word": " the", "probability": 0.900390625}, {"start": 1172.75, "end": 1172.97, "word": " data", "probability": 0.95068359375}, {"start": 1172.97, "end": 1173.29, "word": " falls", "probability": 0.83447265625}, {"start": 1173.29, "end": 1173.55, "word": " within", "probability": 0.88427734375}, {"start": 1173.55, "end": 1173.75, "word": " two", "probability": 0.68408203125}, {"start": 1173.75, "end": 1174.05, "word": " standard", "probability": 0.89208984375}, {"start": 1174.05, "end": 1174.45, "word": " ratios.", "probability": 0.25048828125}, {"start": 1175.19, "end": 1175.75, "word": " That", "probability": 0.73046875}, {"start": 1175.75, "end": 1175.99, "word": " if", "probability": 0.7490234375}, {"start": 1175.99, "end": 1176.13, "word": " the", "probability": 0.916015625}, {"start": 1176.13, "end": 1176.37, "word": " data", "probability": 0.947265625}, {"start": 1176.37, "end": 1176.65, "word": " is", "probability": 0.73046875}, {"start": 1176.65, "end": 1176.85, "word": " bell", "probability": 0.50390625}, {"start": 1176.85, "end": 1177.13, "word": " shaped.", "probability": 0.47412109375}, {"start": 1177.67, "end": 1178.03, "word": " Now", "probability": 0.94384765625}, {"start": 1178.03, "end": 1178.29, "word": " what's", "probability": 0.674560546875}, {"start": 1178.29, "end": 1178.59, "word": " about", "probability": 0.89599609375}, {"start": 1178.59, "end": 1178.85, "word": " if", "probability": 0.93798828125}, {"start": 1178.85, "end": 1178.95, "word": " the", "probability": 0.9072265625}, {"start": 1178.95, "end": 1179.15, "word": " data", "probability": 0.94873046875}, {"start": 1179.15, "end": 1179.35, "word": " is", "probability": 0.923828125}, {"start": 1179.35, "end": 1179.57, "word": " not", "probability": 0.94482421875}, {"start": 1179.57, "end": 1179.85, "word": " bell", "probability": 0.94287109375}, {"start": 1179.85, "end": 1180.15, "word": " shaped?", "probability": 0.787109375}, {"start": 1181.19, "end": 1181.55, "word": " We", "probability": 0.94970703125}, {"start": 1181.55, "end": 1181.75, "word": " have", "probability": 0.94677734375}, {"start": 1181.75, "end": 1181.85, "word": " to", "probability": 0.9609375}, {"start": 1181.85, "end": 1182.09, "word": " use", "probability": 0.90673828125}, {"start": 1182.09, "end": 1182.37, "word": " shape", "probability": 0.388916015625}, {"start": 1182.37, "end": 1182.75, "word": " shape", "probability": 0.19140625}, {"start": 1182.75, "end": 1183.21, "word": " rule.", "probability": 0.90283203125}, {"start": 1183.83, "end": 1184.05, "word": " So", "probability": 0.95751953125}, {"start": 1184.05, "end": 1184.39, "word": " 1", "probability": 0.79736328125}, {"start": 1184.39, "end": 1185.01, "word": " minus", "probability": 0.92529296875}, {"start": 1185.01, "end": 1187.25, "word": " 1", "probability": 0.82470703125}, {"start": 1187.25, "end": 1187.63, "word": " over", "probability": 0.9130859375}, {"start": 1187.63, "end": 1188.11, "word": " k", "probability": 0.79248046875}, {"start": 1188.11, "end": 1188.33, "word": " is", "probability": 0.86376953125}, {"start": 1188.33, "end": 1188.53, "word": " 2.", "probability": 0.962890625}, {"start": 1188.63, "end": 1188.75, "word": " So", "probability": 0.93408203125}, {"start": 1188.75, "end": 1188.95, "word": " 2,", "probability": 0.8388671875}, {"start": 1189.03, "end": 1189.17, "word": " 2,", "probability": 0.8837890625}, {"start": 1189.29, "end": 1189.47, "word": " 2", "probability": 0.96826171875}, {"start": 1189.47, "end": 1189.89, "word": " squared.", "probability": 0.8544921875}, {"start": 1190.73, "end": 1190.91, "word": " So", "probability": 0.7705078125}, {"start": 1190.91, "end": 1191.17, "word": " 1", "probability": 0.9150390625}, {"start": 1191.17, "end": 1192.23, "word": " minus", "probability": 0.9755859375}, {"start": 1192.23, "end": 1192.49, "word": " 1", "probability": 0.5927734375}, {"start": 1192.49, "end": 1192.89, "word": " fourth.", "probability": 0.319580078125}, {"start": 1193.51, "end": 1193.81, "word": " That", "probability": 0.798828125}, {"start": 1193.81, "end": 1194.19, "word": " gives.", "probability": 0.908203125}], "temperature": 1.0}, {"id": 46, "seek": 121993, "start": 1196.17, "end": 1219.93, "text": " three quarters, I mean, 75%. So instead of saying 95% of the data lies within one or two standard deviations of the mean, if the data is bell-shaped, if the data is not bell-shaped, you have to say that 75% of the data falls within two standard deviations.", "tokens": [1045, 20612, 11, 286, 914, 11, 9562, 6856, 407, 2602, 295, 1566, 13420, 4, 295, 264, 1412, 9134, 1951, 472, 420, 732, 3832, 31219, 763, 295, 264, 914, 11, 498, 264, 1412, 307, 4549, 12, 23103, 11, 498, 264, 1412, 307, 406, 4549, 12, 23103, 11, 291, 362, 281, 584, 300, 9562, 4, 295, 264, 1412, 8804, 1951, 732, 3832, 31219, 763, 13], "avg_logprob": -0.20153807988390326, "compression_ratio": 1.7602739726027397, "no_speech_prob": 0.0, "words": [{"start": 1196.17, "end": 1196.47, "word": " three", "probability": 0.454345703125}, {"start": 1196.47, "end": 1196.93, "word": " quarters,", "probability": 0.80078125}, {"start": 1197.71, "end": 1198.13, "word": " I", "probability": 0.94921875}, {"start": 1198.13, "end": 1198.35, "word": " mean,", "probability": 0.96875}, {"start": 1198.51, "end": 1199.99, "word": " 75%.", "probability": 0.632080078125}, {"start": 1199.99, "end": 1200.89, "word": " So", "probability": 0.94580078125}, {"start": 1200.89, "end": 1201.29, "word": " instead", "probability": 0.6376953125}, {"start": 1201.29, "end": 1201.49, "word": " of", "probability": 0.96923828125}, {"start": 1201.49, "end": 1201.83, "word": " saying", "probability": 0.8798828125}, {"start": 1201.83, "end": 1202.21, "word": " 95", "probability": 0.9150390625}, {"start": 1202.21, "end": 1202.75, "word": "%", "probability": 0.98388671875}, {"start": 1202.75, "end": 1202.91, "word": " of", "probability": 0.95947265625}, {"start": 1202.91, "end": 1203.03, "word": " the", "probability": 0.9169921875}, {"start": 1203.03, "end": 1203.37, "word": " data", "probability": 0.94677734375}, {"start": 1203.37, "end": 1204.19, "word": " lies", "probability": 0.89501953125}, {"start": 1204.19, "end": 1205.35, "word": " within", "probability": 0.90234375}, {"start": 1205.35, "end": 1205.55, "word": " one", "probability": 0.6953125}, {"start": 1205.55, "end": 1205.71, "word": " or", "probability": 0.489013671875}, {"start": 1205.71, "end": 1205.89, "word": " two", "probability": 0.9326171875}, {"start": 1205.89, "end": 1206.17, "word": " standard", "probability": 0.9326171875}, {"start": 1206.17, "end": 1206.53, "word": " deviations", "probability": 0.92626953125}, {"start": 1206.53, "end": 1206.71, "word": " of", "probability": 0.94873046875}, {"start": 1206.71, "end": 1206.85, "word": " the", "probability": 0.92919921875}, {"start": 1206.85, "end": 1207.07, "word": " mean,", "probability": 0.97119140625}, {"start": 1208.81, "end": 1209.13, "word": " if", "probability": 0.9404296875}, {"start": 1209.13, "end": 1209.27, "word": " the", "probability": 0.89306640625}, {"start": 1209.27, "end": 1209.45, "word": " data", "probability": 0.9365234375}, {"start": 1209.45, "end": 1209.69, "word": " is", "probability": 0.92919921875}, {"start": 1209.69, "end": 1209.93, "word": " bell", "probability": 0.17041015625}, {"start": 1209.93, "end": 1210.23, "word": "-shaped,", "probability": 0.6300048828125}, {"start": 1211.69, "end": 1212.51, "word": " if", "probability": 0.9345703125}, {"start": 1212.51, "end": 1212.67, "word": " the", "probability": 0.908203125}, {"start": 1212.67, "end": 1212.85, "word": " data", "probability": 0.93310546875}, {"start": 1212.85, "end": 1213.07, "word": " is", "probability": 0.94580078125}, {"start": 1213.07, "end": 1213.35, "word": " not", "probability": 0.9462890625}, {"start": 1213.35, "end": 1213.63, "word": " bell", "probability": 0.9462890625}, {"start": 1213.63, "end": 1213.93, "word": "-shaped,", "probability": 0.85791015625}, {"start": 1214.39, "end": 1214.51, "word": " you", "probability": 0.93408203125}, {"start": 1214.51, "end": 1214.69, "word": " have", "probability": 0.94384765625}, {"start": 1214.69, "end": 1214.81, "word": " to", "probability": 0.96923828125}, {"start": 1214.81, "end": 1214.99, "word": " say", "probability": 0.94970703125}, {"start": 1214.99, "end": 1215.27, "word": " that", "probability": 0.93310546875}, {"start": 1215.27, "end": 1215.83, "word": " 75", "probability": 0.9521484375}, {"start": 1215.83, "end": 1216.27, "word": "%", "probability": 0.99462890625}, {"start": 1216.27, "end": 1217.41, "word": " of", "probability": 0.96484375}, {"start": 1217.41, "end": 1217.59, "word": " the", "probability": 0.91943359375}, {"start": 1217.59, "end": 1217.89, "word": " data", "probability": 0.94091796875}, {"start": 1217.89, "end": 1218.33, "word": " falls", "probability": 0.7939453125}, {"start": 1218.33, "end": 1218.73, "word": " within", "probability": 0.90869140625}, {"start": 1218.73, "end": 1219.11, "word": " two", "probability": 0.90869140625}, {"start": 1219.11, "end": 1219.41, "word": " standard", "probability": 0.9150390625}, {"start": 1219.41, "end": 1219.93, "word": " deviations.", "probability": 0.976318359375}], "temperature": 1.0}, {"id": 47, "seek": 125067, "start": 1221.85, "end": 1250.67, "text": " For bell shape, you are 95% confident there. But here, you're just 75% confident. Suppose k is 3. Now for k equal 3, we said 99.7% of the data falls within three standard deviations. Now here, if the data is not bell shape, 1 minus 1 over k squared.", "tokens": [1171, 4549, 3909, 11, 291, 366, 13420, 4, 6679, 456, 13, 583, 510, 11, 291, 434, 445, 9562, 4, 6679, 13, 21360, 350, 307, 805, 13, 823, 337, 350, 2681, 805, 11, 321, 848, 11803, 13, 22, 4, 295, 264, 1412, 8804, 1951, 1045, 3832, 31219, 763, 13, 823, 510, 11, 498, 264, 1412, 307, 406, 4549, 3909, 11, 502, 3175, 502, 670, 350, 8889, 13], "avg_logprob": -0.1965951452504343, "compression_ratio": 1.4619883040935673, "no_speech_prob": 0.0, "words": [{"start": 1221.85, "end": 1222.19, "word": " For", "probability": 0.83447265625}, {"start": 1222.19, "end": 1222.47, "word": " bell", "probability": 0.2147216796875}, {"start": 1222.47, "end": 1222.77, "word": " shape,", "probability": 0.6171875}, {"start": 1223.31, "end": 1223.71, "word": " you", "probability": 0.95703125}, {"start": 1223.71, "end": 1223.95, "word": " are", "probability": 0.9404296875}, {"start": 1223.95, "end": 1224.41, "word": " 95", "probability": 0.970703125}, {"start": 1224.41, "end": 1224.95, "word": "%", "probability": 0.8759765625}, {"start": 1224.95, "end": 1225.53, "word": " confident", "probability": 0.97265625}, {"start": 1225.53, "end": 1225.89, "word": " there.", "probability": 0.65673828125}, {"start": 1226.03, "end": 1226.31, "word": " But", "probability": 0.9423828125}, {"start": 1226.31, "end": 1226.57, "word": " here,", "probability": 0.85498046875}, {"start": 1227.19, "end": 1227.45, "word": " you're", "probability": 0.743408203125}, {"start": 1227.45, "end": 1227.87, "word": " just", "probability": 0.92431640625}, {"start": 1227.87, "end": 1228.75, "word": " 75", "probability": 0.96630859375}, {"start": 1228.75, "end": 1229.37, "word": "%", "probability": 0.9921875}, {"start": 1229.37, "end": 1229.79, "word": " confident.", "probability": 0.92041015625}, {"start": 1231.95, "end": 1232.53, "word": " Suppose", "probability": 0.8212890625}, {"start": 1232.53, "end": 1232.73, "word": " k", "probability": 0.47900390625}, {"start": 1232.73, "end": 1232.89, "word": " is", "probability": 0.7099609375}, {"start": 1232.89, "end": 1233.13, "word": " 3.", "probability": 0.7685546875}, {"start": 1235.81, "end": 1236.41, "word": " Now", "probability": 0.93603515625}, {"start": 1236.41, "end": 1236.71, "word": " for", "probability": 0.51611328125}, {"start": 1236.71, "end": 1237.17, "word": " k", "probability": 0.94091796875}, {"start": 1237.17, "end": 1237.37, "word": " equal", "probability": 0.65087890625}, {"start": 1237.37, "end": 1237.61, "word": " 3,", "probability": 0.7216796875}, {"start": 1237.71, "end": 1237.81, "word": " we", "probability": 0.93798828125}, {"start": 1237.81, "end": 1238.07, "word": " said", "probability": 0.875}, {"start": 1238.07, "end": 1239.01, "word": " 99", "probability": 0.94482421875}, {"start": 1239.01, "end": 1239.59, "word": ".7", "probability": 0.992431640625}, {"start": 1239.59, "end": 1239.89, "word": "%", "probability": 0.986328125}, {"start": 1239.89, "end": 1240.17, "word": " of", "probability": 0.9287109375}, {"start": 1240.17, "end": 1240.31, "word": " the", "probability": 0.9189453125}, {"start": 1240.31, "end": 1240.55, "word": " data", "probability": 0.943359375}, {"start": 1240.55, "end": 1240.85, "word": " falls", "probability": 0.82763671875}, {"start": 1240.85, "end": 1241.11, "word": " within", "probability": 0.8671875}, {"start": 1241.11, "end": 1241.43, "word": " three", "probability": 0.5693359375}, {"start": 1241.43, "end": 1241.73, "word": " standard", "probability": 0.80810546875}, {"start": 1241.73, "end": 1242.09, "word": " deviations.", "probability": 0.67919921875}, {"start": 1243.49, "end": 1243.77, "word": " Now", "probability": 0.9443359375}, {"start": 1243.77, "end": 1244.09, "word": " here,", "probability": 0.75390625}, {"start": 1244.37, "end": 1244.61, "word": " if", "probability": 0.93994140625}, {"start": 1244.61, "end": 1244.71, "word": " the", "probability": 0.916015625}, {"start": 1244.71, "end": 1244.89, "word": " data", "probability": 0.9482421875}, {"start": 1244.89, "end": 1245.11, "word": " is", "probability": 0.943359375}, {"start": 1245.11, "end": 1245.45, "word": " not", "probability": 0.94677734375}, {"start": 1245.45, "end": 1245.85, "word": " bell", "probability": 0.94189453125}, {"start": 1245.85, "end": 1246.11, "word": " shape,", "probability": 0.455078125}, {"start": 1246.85, "end": 1247.39, "word": " 1", "probability": 0.96044921875}, {"start": 1247.39, "end": 1247.89, "word": " minus", "probability": 0.9814453125}, {"start": 1247.89, "end": 1249.75, "word": " 1", "probability": 0.986328125}, {"start": 1249.75, "end": 1250.01, "word": " over", "probability": 0.90380859375}, {"start": 1250.01, "end": 1250.29, "word": " k", "probability": 0.98583984375}, {"start": 1250.29, "end": 1250.67, "word": " squared.", "probability": 0.81787109375}], "temperature": 1.0}, {"id": 48, "seek": 127646, "start": 1251.58, "end": 1276.46, "text": " 1 minus 1 over 3 squared is one-ninth. One-ninth is 0.11. 1 minus 0.11 means 89% of the data, instead of saying 99.7. So 89% of the data will fall within three standard deviations of the population mean.", "tokens": [502, 3175, 502, 670, 805, 8889, 307, 472, 12, 22955, 392, 13, 1485, 12, 22955, 392, 307, 1958, 13, 5348, 13, 502, 3175, 1958, 13, 5348, 1355, 31877, 4, 295, 264, 1412, 11, 2602, 295, 1566, 11803, 13, 22, 13, 407, 31877, 4, 295, 264, 1412, 486, 2100, 1951, 1045, 3832, 31219, 763, 295, 264, 4415, 914, 13], "avg_logprob": -0.21146716253232148, "compression_ratio": 1.4366197183098592, "no_speech_prob": 0.0, "words": [{"start": 1251.58, "end": 1251.94, "word": " 1", "probability": 0.465087890625}, {"start": 1251.94, "end": 1252.4, "word": " minus", "probability": 0.861328125}, {"start": 1252.4, "end": 1256.54, "word": " 1", "probability": 0.73876953125}, {"start": 1256.54, "end": 1256.74, "word": " over", "probability": 0.8935546875}, {"start": 1256.74, "end": 1257.04, "word": " 3", "probability": 0.880859375}, {"start": 1257.04, "end": 1257.3, "word": " squared", "probability": 0.7783203125}, {"start": 1257.3, "end": 1257.48, "word": " is", "probability": 0.93603515625}, {"start": 1257.48, "end": 1257.68, "word": " one", "probability": 0.466552734375}, {"start": 1257.68, "end": 1258.06, "word": "-ninth.", "probability": 0.75634765625}, {"start": 1258.22, "end": 1258.44, "word": " One", "probability": 0.88134765625}, {"start": 1258.44, "end": 1258.76, "word": "-ninth", "probability": 0.9513346354166666}, {"start": 1258.76, "end": 1258.94, "word": " is", "probability": 0.9326171875}, {"start": 1258.94, "end": 1259.16, "word": " 0", "probability": 0.62353515625}, {"start": 1259.16, "end": 1259.48, "word": ".11.", "probability": 0.98974609375}, {"start": 1260.22, "end": 1260.76, "word": " 1", "probability": 0.52978515625}, {"start": 1260.76, "end": 1261.04, "word": " minus", "probability": 0.9326171875}, {"start": 1261.04, "end": 1261.26, "word": " 0", "probability": 0.98388671875}, {"start": 1261.26, "end": 1261.7, "word": ".11", "probability": 0.99658203125}, {"start": 1261.7, "end": 1262.28, "word": " means", "probability": 0.91162109375}, {"start": 1262.28, "end": 1264.16, "word": " 89", "probability": 0.95166015625}, {"start": 1264.16, "end": 1264.68, "word": "%", "probability": 0.75830078125}, {"start": 1264.68, "end": 1264.98, "word": " of", "probability": 0.95361328125}, {"start": 1264.98, "end": 1265.1, "word": " the", "probability": 0.91650390625}, {"start": 1265.1, "end": 1265.36, "word": " data,", "probability": 0.923828125}, {"start": 1265.94, "end": 1266.22, "word": " instead", "probability": 0.8037109375}, {"start": 1266.22, "end": 1266.44, "word": " of", "probability": 0.9697265625}, {"start": 1266.44, "end": 1266.78, "word": " saying", "probability": 0.91650390625}, {"start": 1266.78, "end": 1267.2, "word": " 99", "probability": 0.88525390625}, {"start": 1267.2, "end": 1267.76, "word": ".7.", "probability": 0.667724609375}, {"start": 1268.64, "end": 1269.36, "word": " So", "probability": 0.95703125}, {"start": 1269.36, "end": 1270.24, "word": " 89", "probability": 0.3818359375}, {"start": 1270.24, "end": 1271.54, "word": "%", "probability": 0.90380859375}, {"start": 1271.54, "end": 1271.86, "word": " of", "probability": 0.96435546875}, {"start": 1271.86, "end": 1271.98, "word": " the", "probability": 0.91796875}, {"start": 1271.98, "end": 1272.34, "word": " data", "probability": 0.943359375}, {"start": 1272.34, "end": 1273.04, "word": " will", "probability": 0.84130859375}, {"start": 1273.04, "end": 1273.44, "word": " fall", "probability": 0.8583984375}, {"start": 1273.44, "end": 1273.9, "word": " within", "probability": 0.9208984375}, {"start": 1273.9, "end": 1274.48, "word": " three", "probability": 0.60400390625}, {"start": 1274.48, "end": 1274.9, "word": " standard", "probability": 0.8798828125}, {"start": 1274.9, "end": 1275.38, "word": " deviations", "probability": 0.78515625}, {"start": 1275.38, "end": 1275.66, "word": " of", "probability": 0.96044921875}, {"start": 1275.66, "end": 1275.78, "word": " the", "probability": 0.794921875}, {"start": 1275.78, "end": 1276.14, "word": " population", "probability": 0.7314453125}, {"start": 1276.14, "end": 1276.46, "word": " mean.", "probability": 0.9755859375}], "temperature": 1.0}, {"id": 49, "seek": 129871, "start": 1278.51, "end": 1298.71, "text": " regardless of how the data are distributed around them. So here, we have two scenarios. One, if the data is symmetric, which is called empirical rule 68959917. And the other one is called shape-by-shape rule, and that regardless of the shape of the data.", "tokens": [10060, 295, 577, 264, 1412, 366, 12631, 926, 552, 13, 407, 510, 11, 321, 362, 732, 15077, 13, 1485, 11, 498, 264, 1412, 307, 32330, 11, 597, 307, 1219, 31886, 4978, 23317, 15718, 8494, 7773, 13, 400, 264, 661, 472, 307, 1219, 3909, 12, 2322, 12, 82, 42406, 4978, 11, 293, 300, 10060, 295, 264, 3909, 295, 264, 1412, 13], "avg_logprob": -0.22643443013800948, "compression_ratio": 1.526946107784431, "no_speech_prob": 0.0, "words": [{"start": 1278.51, "end": 1279.03, "word": " regardless", "probability": 0.4345703125}, {"start": 1279.03, "end": 1279.55, "word": " of", "probability": 0.95703125}, {"start": 1279.55, "end": 1279.83, "word": " how", "probability": 0.93017578125}, {"start": 1279.83, "end": 1280.03, "word": " the", "probability": 0.91943359375}, {"start": 1280.03, "end": 1280.41, "word": " data", "probability": 0.95068359375}, {"start": 1280.41, "end": 1281.37, "word": " are", "probability": 0.93798828125}, {"start": 1281.37, "end": 1282.07, "word": " distributed", "probability": 0.91796875}, {"start": 1282.07, "end": 1282.61, "word": " around", "probability": 0.93994140625}, {"start": 1282.61, "end": 1282.81, "word": " them.", "probability": 0.646484375}, {"start": 1283.33, "end": 1283.83, "word": " So", "probability": 0.92626953125}, {"start": 1283.83, "end": 1284.09, "word": " here,", "probability": 0.7255859375}, {"start": 1284.19, "end": 1284.29, "word": " we", "probability": 0.96044921875}, {"start": 1284.29, "end": 1284.47, "word": " have", "probability": 0.9453125}, {"start": 1284.47, "end": 1284.63, "word": " two", "probability": 0.89794921875}, {"start": 1284.63, "end": 1285.11, "word": " scenarios.", "probability": 0.90087890625}, {"start": 1285.73, "end": 1286.03, "word": " One,", "probability": 0.923828125}, {"start": 1286.13, "end": 1286.23, "word": " if", "probability": 0.94677734375}, {"start": 1286.23, "end": 1286.35, "word": " the", "probability": 0.91015625}, {"start": 1286.35, "end": 1286.55, "word": " data", "probability": 0.9423828125}, {"start": 1286.55, "end": 1286.75, "word": " is", "probability": 0.93408203125}, {"start": 1286.75, "end": 1287.13, "word": " symmetric,", "probability": 0.79833984375}, {"start": 1288.07, "end": 1288.31, "word": " which", "probability": 0.403076171875}, {"start": 1288.31, "end": 1288.39, "word": " is", "probability": 0.9462890625}, {"start": 1288.39, "end": 1288.63, "word": " called", "probability": 0.904296875}, {"start": 1288.63, "end": 1289.01, "word": " empirical", "probability": 0.67529296875}, {"start": 1289.01, "end": 1289.39, "word": " rule", "probability": 0.88720703125}, {"start": 1289.39, "end": 1291.41, "word": " 68959917.", "probability": 0.8505859375}, {"start": 1292.37, "end": 1292.83, "word": " And", "probability": 0.93310546875}, {"start": 1292.83, "end": 1292.93, "word": " the", "probability": 0.89404296875}, {"start": 1292.93, "end": 1293.13, "word": " other", "probability": 0.884765625}, {"start": 1293.13, "end": 1293.33, "word": " one", "probability": 0.921875}, {"start": 1293.33, "end": 1293.51, "word": " is", "probability": 0.94189453125}, {"start": 1293.51, "end": 1293.97, "word": " called", "probability": 0.892578125}, {"start": 1293.97, "end": 1294.53, "word": " shape", "probability": 0.84912109375}, {"start": 1294.53, "end": 1294.71, "word": "-by", "probability": 0.4078369140625}, {"start": 1294.71, "end": 1294.91, "word": "-shape", "probability": 0.9104817708333334}, {"start": 1294.91, "end": 1295.27, "word": " rule,", "probability": 0.89501953125}, {"start": 1296.01, "end": 1296.19, "word": " and", "probability": 0.908203125}, {"start": 1296.19, "end": 1296.45, "word": " that", "probability": 0.73974609375}, {"start": 1296.45, "end": 1297.01, "word": " regardless", "probability": 0.59423828125}, {"start": 1297.01, "end": 1297.59, "word": " of", "probability": 0.96630859375}, {"start": 1297.59, "end": 1297.79, "word": " the", "probability": 0.91259765625}, {"start": 1297.79, "end": 1298.13, "word": " shape", "probability": 0.9111328125}, {"start": 1298.13, "end": 1298.37, "word": " of", "probability": 0.96875}, {"start": 1298.37, "end": 1298.49, "word": " the", "probability": 0.91845703125}, {"start": 1298.49, "end": 1298.71, "word": " data.", "probability": 0.888671875}], "temperature": 1.0}, {"id": 50, "seek": 132983, "start": 1301.89, "end": 1329.83, "text": " Excuse me? Yes. In this case, you don't know the distribution of the data. And the reality is sometimes the data has unknown distribution. For this reason, we have to use chip-chip portions. That's all for empirical rule and chip-chip rule.", "tokens": [11359, 385, 30, 1079, 13, 682, 341, 1389, 11, 291, 500, 380, 458, 264, 7316, 295, 264, 1412, 13, 400, 264, 4103, 307, 2171, 264, 1412, 575, 9841, 7316, 13, 1171, 341, 1778, 11, 321, 362, 281, 764, 11409, 12, 339, 647, 25070, 13, 663, 311, 439, 337, 31886, 4978, 293, 11409, 12, 339, 647, 4978, 13], "avg_logprob": -0.20757004207578197, "compression_ratio": 1.4695121951219512, "no_speech_prob": 0.0, "words": [{"start": 1301.89, "end": 1302.25, "word": " Excuse", "probability": 0.552734375}, {"start": 1302.25, "end": 1302.43, "word": " me?", "probability": 0.966796875}, {"start": 1304.99, "end": 1305.63, "word": " Yes.", "probability": 0.159423828125}, {"start": 1307.19, "end": 1307.31, "word": " In", "probability": 0.7197265625}, {"start": 1307.31, "end": 1307.47, "word": " this", "probability": 0.9228515625}, {"start": 1307.47, "end": 1307.71, "word": " case,", "probability": 0.91357421875}, {"start": 1308.03, "end": 1308.39, "word": " you", "probability": 0.9580078125}, {"start": 1308.39, "end": 1308.67, "word": " don't", "probability": 0.97607421875}, {"start": 1308.67, "end": 1308.93, "word": " know", "probability": 0.900390625}, {"start": 1308.93, "end": 1309.21, "word": " the", "probability": 0.92041015625}, {"start": 1309.21, "end": 1309.87, "word": " distribution", "probability": 0.8515625}, {"start": 1309.87, "end": 1310.07, "word": " of", "probability": 0.96728515625}, {"start": 1310.07, "end": 1310.19, "word": " the", "probability": 0.9130859375}, {"start": 1310.19, "end": 1310.47, "word": " data.", "probability": 0.94189453125}, {"start": 1310.61, "end": 1310.75, "word": " And", "probability": 0.86376953125}, {"start": 1310.75, "end": 1310.91, "word": " the", "probability": 0.55517578125}, {"start": 1310.91, "end": 1311.25, "word": " reality", "probability": 0.96826171875}, {"start": 1311.25, "end": 1311.49, "word": " is", "probability": 0.79931640625}, {"start": 1311.49, "end": 1311.97, "word": " sometimes", "probability": 0.76416015625}, {"start": 1311.97, "end": 1313.09, "word": " the", "probability": 0.67138671875}, {"start": 1313.09, "end": 1313.79, "word": " data", "probability": 0.9326171875}, {"start": 1313.79, "end": 1315.29, "word": " has", "probability": 0.9248046875}, {"start": 1315.29, "end": 1316.67, "word": " unknown", "probability": 0.90625}, {"start": 1316.67, "end": 1317.95, "word": " distribution.", "probability": 0.8662109375}, {"start": 1318.37, "end": 1318.65, "word": " For", "probability": 0.962890625}, {"start": 1318.65, "end": 1318.87, "word": " this", "probability": 0.94189453125}, {"start": 1318.87, "end": 1319.13, "word": " reason,", "probability": 0.97021484375}, {"start": 1319.17, "end": 1319.29, "word": " we", "probability": 0.947265625}, {"start": 1319.29, "end": 1319.45, "word": " have", "probability": 0.94140625}, {"start": 1319.45, "end": 1319.57, "word": " to", "probability": 0.96435546875}, {"start": 1319.57, "end": 1319.93, "word": " use", "probability": 0.8740234375}, {"start": 1319.93, "end": 1320.69, "word": " chip", "probability": 0.09423828125}, {"start": 1320.69, "end": 1321.07, "word": "-chip", "probability": 0.7648111979166666}, {"start": 1321.07, "end": 1322.59, "word": " portions.", "probability": 0.385009765625}, {"start": 1325.41, "end": 1326.05, "word": " That's", "probability": 0.931396484375}, {"start": 1326.05, "end": 1326.37, "word": " all", "probability": 0.94775390625}, {"start": 1326.37, "end": 1326.93, "word": " for", "probability": 0.94384765625}, {"start": 1326.93, "end": 1328.39, "word": " empirical", "probability": 0.89404296875}, {"start": 1328.39, "end": 1328.75, "word": " rule", "probability": 0.7236328125}, {"start": 1328.75, "end": 1329.05, "word": " and", "probability": 0.9306640625}, {"start": 1329.05, "end": 1329.27, "word": " chip", "probability": 0.94775390625}, {"start": 1329.27, "end": 1329.59, "word": "-chip", "probability": 0.95361328125}, {"start": 1329.59, "end": 1329.83, "word": " rule.", "probability": 0.859375}], "temperature": 1.0}, {"id": 51, "seek": 135889, "start": 1331.23, "end": 1358.89, "text": " The next topic is quartile measures. So far, we have discussed central tendency measures, and we have talked about mean, median, and more. Then we moved to location of variability or spread or dispersion. And we talked about range, variance, and standardization.", "tokens": [440, 958, 4829, 307, 20837, 794, 8000, 13, 407, 1400, 11, 321, 362, 7152, 5777, 18187, 8000, 11, 293, 321, 362, 2825, 466, 914, 11, 26779, 11, 293, 544, 13, 1396, 321, 4259, 281, 4914, 295, 35709, 420, 3974, 420, 24631, 313, 13, 400, 321, 2825, 466, 3613, 11, 21977, 11, 293, 3832, 2144, 13], "avg_logprob": -0.17954798814441478, "compression_ratio": 1.5470588235294118, "no_speech_prob": 0.0, "words": [{"start": 1331.23, "end": 1331.51, "word": " The", "probability": 0.61669921875}, {"start": 1331.51, "end": 1331.81, "word": " next", "probability": 0.9443359375}, {"start": 1331.81, "end": 1332.31, "word": " topic", "probability": 0.95458984375}, {"start": 1332.31, "end": 1334.65, "word": " is", "probability": 0.9345703125}, {"start": 1334.65, "end": 1336.03, "word": " quartile", "probability": 0.91259765625}, {"start": 1336.03, "end": 1336.35, "word": " measures.", "probability": 0.82470703125}, {"start": 1337.19, "end": 1337.63, "word": " So", "probability": 0.970703125}, {"start": 1337.63, "end": 1337.93, "word": " far,", "probability": 0.9453125}, {"start": 1338.03, "end": 1338.15, "word": " we", "probability": 0.96337890625}, {"start": 1338.15, "end": 1338.39, "word": " have", "probability": 0.94287109375}, {"start": 1338.39, "end": 1339.47, "word": " discussed", "probability": 0.8837890625}, {"start": 1339.47, "end": 1342.41, "word": " central", "probability": 0.8642578125}, {"start": 1342.41, "end": 1342.85, "word": " tendency", "probability": 0.90185546875}, {"start": 1342.85, "end": 1343.31, "word": " measures,", "probability": 0.84814453125}, {"start": 1343.71, "end": 1344.17, "word": " and", "probability": 0.9345703125}, {"start": 1344.17, "end": 1344.33, "word": " we", "probability": 0.9638671875}, {"start": 1344.33, "end": 1344.49, "word": " have", "probability": 0.94140625}, {"start": 1344.49, "end": 1344.77, "word": " talked", "probability": 0.89453125}, {"start": 1344.77, "end": 1345.31, "word": " about", "probability": 0.91162109375}, {"start": 1345.31, "end": 1345.85, "word": " mean,", "probability": 0.8388671875}, {"start": 1346.21, "end": 1346.21, "word": " median,", "probability": 0.9365234375}, {"start": 1346.35, "end": 1346.69, "word": " and", "probability": 0.53271484375}, {"start": 1346.69, "end": 1346.83, "word": " more.", "probability": 0.88427734375}, {"start": 1347.95, "end": 1348.27, "word": " Then", "probability": 0.64794921875}, {"start": 1348.27, "end": 1348.45, "word": " we", "probability": 0.74267578125}, {"start": 1348.45, "end": 1348.81, "word": " moved", "probability": 0.67431640625}, {"start": 1348.81, "end": 1349.35, "word": " to", "probability": 0.9609375}, {"start": 1349.35, "end": 1350.27, "word": " location", "probability": 0.7060546875}, {"start": 1350.27, "end": 1350.63, "word": " of", "probability": 0.88720703125}, {"start": 1350.63, "end": 1351.21, "word": " variability", "probability": 0.97509765625}, {"start": 1351.21, "end": 1351.65, "word": " or", "probability": 0.73681640625}, {"start": 1351.65, "end": 1352.11, "word": " spread", "probability": 0.88818359375}, {"start": 1352.11, "end": 1352.83, "word": " or", "probability": 0.6572265625}, {"start": 1352.83, "end": 1353.43, "word": " dispersion.", "probability": 0.9482421875}, {"start": 1353.61, "end": 1354.29, "word": " And", "probability": 0.94580078125}, {"start": 1354.29, "end": 1354.43, "word": " we", "probability": 0.95654296875}, {"start": 1354.43, "end": 1354.69, "word": " talked", "probability": 0.771484375}, {"start": 1354.69, "end": 1355.25, "word": " about", "probability": 0.896484375}, {"start": 1355.25, "end": 1356.57, "word": " range,", "probability": 0.87548828125}, {"start": 1357.29, "end": 1357.81, "word": " variance,", "probability": 0.8896484375}, {"start": 1357.95, "end": 1358.21, "word": " and", "probability": 0.94580078125}, {"start": 1358.21, "end": 1358.89, "word": " standardization.", "probability": 0.7275390625}], "temperature": 1.0}, {"id": 52, "seek": 138715, "start": 1361.57, "end": 1387.15, "text": " And we said that outliers affect the mean much more than the median. And also, outliers affect the range. Here, we'll talk about other measures of the data, which is called quartile measures. Here, actually, we'll talk about two measures. First one is called first quartile,", "tokens": [400, 321, 848, 300, 484, 23646, 3345, 264, 914, 709, 544, 813, 264, 26779, 13, 400, 611, 11, 484, 23646, 3345, 264, 3613, 13, 1692, 11, 321, 603, 751, 466, 661, 8000, 295, 264, 1412, 11, 597, 307, 1219, 20837, 794, 8000, 13, 1692, 11, 767, 11, 321, 603, 751, 466, 732, 8000, 13, 2386, 472, 307, 1219, 700, 20837, 794, 11], "avg_logprob": -0.19543651219398256, "compression_ratio": 1.7973856209150327, "no_speech_prob": 0.0, "words": [{"start": 1361.5700000000002, "end": 1362.17, "word": " And", "probability": 0.77099609375}, {"start": 1362.17, "end": 1362.77, "word": " we", "probability": 0.73291015625}, {"start": 1362.77, "end": 1362.99, "word": " said", "probability": 0.9248046875}, {"start": 1362.99, "end": 1363.87, "word": " that", "probability": 0.88671875}, {"start": 1363.87, "end": 1364.59, "word": " outliers", "probability": 0.91845703125}, {"start": 1364.59, "end": 1365.89, "word": " affect", "probability": 0.8515625}, {"start": 1365.89, "end": 1366.69, "word": " the", "probability": 0.87353515625}, {"start": 1366.69, "end": 1366.89, "word": " mean", "probability": 0.9658203125}, {"start": 1366.89, "end": 1368.23, "word": " much", "probability": 0.8642578125}, {"start": 1368.23, "end": 1368.51, "word": " more", "probability": 0.93505859375}, {"start": 1368.51, "end": 1368.67, "word": " than", "probability": 0.94482421875}, {"start": 1368.67, "end": 1368.87, "word": " the", "probability": 0.91552734375}, {"start": 1368.87, "end": 1369.11, "word": " median.", "probability": 0.8974609375}, {"start": 1370.17, "end": 1370.39, "word": " And", "probability": 0.9462890625}, {"start": 1370.39, "end": 1370.69, "word": " also,", "probability": 0.822265625}, {"start": 1370.73, "end": 1371.09, "word": " outliers", "probability": 0.948974609375}, {"start": 1371.09, "end": 1371.47, "word": " affect", "probability": 0.9296875}, {"start": 1371.47, "end": 1371.65, "word": " the", "probability": 0.8681640625}, {"start": 1371.65, "end": 1371.95, "word": " range.", "probability": 0.8994140625}, {"start": 1372.99, "end": 1373.29, "word": " Here,", "probability": 0.8203125}, {"start": 1373.35, "end": 1373.47, "word": " we'll", "probability": 0.86328125}, {"start": 1373.47, "end": 1373.75, "word": " talk", "probability": 0.880859375}, {"start": 1373.75, "end": 1374.89, "word": " about", "probability": 0.325439453125}, {"start": 1374.89, "end": 1375.37, "word": " other", "probability": 0.642578125}, {"start": 1375.37, "end": 1375.73, "word": " measures", "probability": 0.83837890625}, {"start": 1375.73, "end": 1376.85, "word": " of", "probability": 0.95849609375}, {"start": 1376.85, "end": 1377.01, "word": " the", "probability": 0.90869140625}, {"start": 1377.01, "end": 1377.29, "word": " data,", "probability": 0.92724609375}, {"start": 1377.69, "end": 1377.93, "word": " which", "probability": 0.947265625}, {"start": 1377.93, "end": 1378.13, "word": " is", "probability": 0.912109375}, {"start": 1378.13, "end": 1378.57, "word": " called", "probability": 0.8984375}, {"start": 1378.57, "end": 1379.31, "word": " quartile", "probability": 0.901123046875}, {"start": 1379.31, "end": 1379.57, "word": " measures.", "probability": 0.859375}, {"start": 1381.19, "end": 1381.79, "word": " Here,", "probability": 0.8525390625}, {"start": 1381.91, "end": 1382.29, "word": " actually,", "probability": 0.880859375}, {"start": 1382.31, "end": 1382.45, "word": " we'll", "probability": 0.766357421875}, {"start": 1382.45, "end": 1382.61, "word": " talk", "probability": 0.89697265625}, {"start": 1382.61, "end": 1382.85, "word": " about", "probability": 0.90380859375}, {"start": 1382.85, "end": 1383.07, "word": " two", "probability": 0.93310546875}, {"start": 1383.07, "end": 1383.45, "word": " measures.", "probability": 0.89404296875}, {"start": 1384.27, "end": 1384.65, "word": " First", "probability": 0.7001953125}, {"start": 1384.65, "end": 1384.85, "word": " one", "probability": 0.92431640625}, {"start": 1384.85, "end": 1385.01, "word": " is", "probability": 0.939453125}, {"start": 1385.01, "end": 1385.41, "word": " called", "probability": 0.90625}, {"start": 1385.41, "end": 1386.45, "word": " first", "probability": 0.271728515625}, {"start": 1386.45, "end": 1387.15, "word": " quartile,", "probability": 0.947265625}], "temperature": 1.0}, {"id": 53, "seek": 141773, "start": 1389.33, "end": 1417.73, "text": " And the other one is third quartile. So we have two measures, first and third quartile. Quartiles split the rank data into four equal segments. I mean, these measures split the data you have into four equal parts.", "tokens": [400, 264, 661, 472, 307, 2636, 20837, 794, 13, 407, 321, 362, 732, 8000, 11, 700, 293, 2636, 20837, 794, 13, 2326, 446, 4680, 7472, 264, 6181, 1412, 666, 1451, 2681, 19904, 13, 286, 914, 11, 613, 8000, 7472, 264, 1412, 291, 362, 666, 1451, 2681, 3166, 13], "avg_logprob": -0.15425701530612246, "compression_ratio": 1.5970149253731343, "no_speech_prob": 0.0, "words": [{"start": 1389.33, "end": 1389.71, "word": " And", "probability": 0.68359375}, {"start": 1389.71, "end": 1389.89, "word": " the", "probability": 0.8974609375}, {"start": 1389.89, "end": 1390.13, "word": " other", "probability": 0.884765625}, {"start": 1390.13, "end": 1390.53, "word": " one", "probability": 0.92333984375}, {"start": 1390.53, "end": 1391.55, "word": " is", "probability": 0.92626953125}, {"start": 1391.55, "end": 1391.95, "word": " third", "probability": 0.7587890625}, {"start": 1391.95, "end": 1392.51, "word": " quartile.", "probability": 0.9208984375}, {"start": 1392.75, "end": 1392.85, "word": " So", "probability": 0.94970703125}, {"start": 1392.85, "end": 1393.01, "word": " we", "probability": 0.76611328125}, {"start": 1393.01, "end": 1393.17, "word": " have", "probability": 0.94287109375}, {"start": 1393.17, "end": 1393.73, "word": " two", "probability": 0.939453125}, {"start": 1393.73, "end": 1394.15, "word": " measures,", "probability": 0.751953125}, {"start": 1395.47, "end": 1396.95, "word": " first", "probability": 0.88671875}, {"start": 1396.95, "end": 1398.47, "word": " and", "probability": 0.611328125}, {"start": 1398.47, "end": 1398.79, "word": " third", "probability": 0.90478515625}, {"start": 1398.79, "end": 1399.39, "word": " quartile.", "probability": 0.954833984375}, {"start": 1400.93, "end": 1401.79, "word": " Quartiles", "probability": 0.9456380208333334}, {"start": 1401.79, "end": 1403.89, "word": " split", "probability": 0.95068359375}, {"start": 1403.89, "end": 1405.53, "word": " the", "probability": 0.9189453125}, {"start": 1405.53, "end": 1406.03, "word": " rank", "probability": 0.5888671875}, {"start": 1406.03, "end": 1406.59, "word": " data", "probability": 0.93994140625}, {"start": 1406.59, "end": 1407.33, "word": " into", "probability": 0.84375}, {"start": 1407.33, "end": 1407.79, "word": " four", "probability": 0.9375}, {"start": 1407.79, "end": 1408.31, "word": " equal", "probability": 0.58056640625}, {"start": 1408.31, "end": 1408.83, "word": " segments.", "probability": 0.72314453125}, {"start": 1410.77, "end": 1410.99, "word": " I", "probability": 0.9736328125}, {"start": 1410.99, "end": 1411.27, "word": " mean,", "probability": 0.96337890625}, {"start": 1412.57, "end": 1412.93, "word": " these", "probability": 0.79833984375}, {"start": 1412.93, "end": 1413.39, "word": " measures", "probability": 0.86474609375}, {"start": 1413.39, "end": 1414.87, "word": " split", "probability": 0.94970703125}, {"start": 1414.87, "end": 1415.07, "word": " the", "probability": 0.9208984375}, {"start": 1415.07, "end": 1415.35, "word": " data", "probability": 0.94580078125}, {"start": 1415.35, "end": 1415.57, "word": " you", "probability": 0.95166015625}, {"start": 1415.57, "end": 1415.89, "word": " have", "probability": 0.94970703125}, {"start": 1415.89, "end": 1416.33, "word": " into", "probability": 0.845703125}, {"start": 1416.33, "end": 1416.73, "word": " four", "probability": 0.94482421875}, {"start": 1416.73, "end": 1417.19, "word": " equal", "probability": 0.8349609375}, {"start": 1417.19, "end": 1417.73, "word": " parts.", "probability": 0.84423828125}], "temperature": 1.0}, {"id": 54, "seek": 145117, "start": 1422.85, "end": 1451.17, "text": " Q1 has 25% of the data fall below it. I mean 25% of the values lie below Q1. So it means 75% of the values above it. So 25 below and 75 above. But you have to be careful that the data is arranged from smallest to largest. So in this case, Q1.", "tokens": [1249, 16, 575, 3552, 4, 295, 264, 1412, 2100, 2507, 309, 13, 286, 914, 3552, 4, 295, 264, 4190, 4544, 2507, 1249, 16, 13, 407, 309, 1355, 9562, 4, 295, 264, 4190, 3673, 309, 13, 407, 3552, 2507, 293, 9562, 3673, 13, 583, 291, 362, 281, 312, 5026, 300, 264, 1412, 307, 18721, 490, 16998, 281, 6443, 13, 407, 294, 341, 1389, 11, 1249, 16, 13], "avg_logprob": -0.1223180941236553, "compression_ratio": 1.5576923076923077, "no_speech_prob": 0.0, "words": [{"start": 1422.85, "end": 1423.57, "word": " Q1", "probability": 0.8154296875}, {"start": 1423.57, "end": 1424.29, "word": " has", "probability": 0.85498046875}, {"start": 1424.29, "end": 1424.85, "word": " 25", "probability": 0.84033203125}, {"start": 1424.85, "end": 1425.31, "word": "%", "probability": 0.84765625}, {"start": 1425.31, "end": 1425.69, "word": " of", "probability": 0.9580078125}, {"start": 1425.69, "end": 1425.83, "word": " the", "probability": 0.90380859375}, {"start": 1425.83, "end": 1426.11, "word": " data", "probability": 0.94580078125}, {"start": 1426.11, "end": 1426.43, "word": " fall", "probability": 0.65380859375}, {"start": 1426.43, "end": 1426.73, "word": " below", "probability": 0.9189453125}, {"start": 1426.73, "end": 1427.05, "word": " it.", "probability": 0.95458984375}, {"start": 1427.63, "end": 1427.83, "word": " I", "probability": 0.98876953125}, {"start": 1427.83, "end": 1427.93, "word": " mean", "probability": 0.9638671875}, {"start": 1427.93, "end": 1428.35, "word": " 25", "probability": 0.60693359375}, {"start": 1428.35, "end": 1428.69, "word": "%", "probability": 0.99609375}, {"start": 1428.69, "end": 1428.97, "word": " of", "probability": 0.95849609375}, {"start": 1428.97, "end": 1429.11, "word": " the", "probability": 0.9140625}, {"start": 1429.11, "end": 1429.49, "word": " values", "probability": 0.96142578125}, {"start": 1429.49, "end": 1430.95, "word": " lie", "probability": 0.70556640625}, {"start": 1430.95, "end": 1431.97, "word": " below", "probability": 0.92333984375}, {"start": 1431.97, "end": 1432.53, "word": " Q1.", "probability": 0.984130859375}, {"start": 1433.05, "end": 1433.47, "word": " So", "probability": 0.9501953125}, {"start": 1433.47, "end": 1433.63, "word": " it", "probability": 0.8896484375}, {"start": 1433.63, "end": 1433.99, "word": " means", "probability": 0.9248046875}, {"start": 1433.99, "end": 1434.63, "word": " 75", "probability": 0.94873046875}, {"start": 1434.63, "end": 1435.41, "word": "%", "probability": 0.9951171875}, {"start": 1435.41, "end": 1436.21, "word": " of", "probability": 0.955078125}, {"start": 1436.21, "end": 1436.41, "word": " the", "probability": 0.912109375}, {"start": 1436.41, "end": 1436.85, "word": " values", "probability": 0.96533203125}, {"start": 1436.85, "end": 1437.45, "word": " above", "probability": 0.95068359375}, {"start": 1437.45, "end": 1437.89, "word": " it.", "probability": 0.9482421875}, {"start": 1438.23, "end": 1438.45, "word": " So", "probability": 0.9501953125}, {"start": 1438.45, "end": 1438.89, "word": " 25", "probability": 0.9150390625}, {"start": 1438.89, "end": 1439.59, "word": " below", "probability": 0.54541015625}, {"start": 1439.59, "end": 1440.87, "word": " and", "probability": 0.79150390625}, {"start": 1440.87, "end": 1441.51, "word": " 75", "probability": 0.96337890625}, {"start": 1441.51, "end": 1441.99, "word": " above.", "probability": 0.951171875}, {"start": 1443.47, "end": 1444.19, "word": " But", "probability": 0.83447265625}, {"start": 1444.19, "end": 1444.41, "word": " you", "probability": 0.9501953125}, {"start": 1444.41, "end": 1444.59, "word": " have", "probability": 0.94873046875}, {"start": 1444.59, "end": 1444.71, "word": " to", "probability": 0.970703125}, {"start": 1444.71, "end": 1444.85, "word": " be", "probability": 0.9599609375}, {"start": 1444.85, "end": 1445.13, "word": " careful", "probability": 0.966796875}, {"start": 1445.13, "end": 1445.41, "word": " that", "probability": 0.849609375}, {"start": 1445.41, "end": 1445.63, "word": " the", "probability": 0.9130859375}, {"start": 1445.63, "end": 1445.89, "word": " data", "probability": 0.94287109375}, {"start": 1445.89, "end": 1446.15, "word": " is", "probability": 0.94189453125}, {"start": 1446.15, "end": 1447.05, "word": " arranged", "probability": 0.74609375}, {"start": 1447.05, "end": 1447.37, "word": " from", "probability": 0.87548828125}, {"start": 1447.37, "end": 1447.95, "word": " smallest", "probability": 0.89013671875}, {"start": 1447.95, "end": 1448.41, "word": " to", "probability": 0.96728515625}, {"start": 1448.41, "end": 1448.73, "word": " largest.", "probability": 0.90576171875}, {"start": 1449.79, "end": 1450.07, "word": " So", "probability": 0.951171875}, {"start": 1450.07, "end": 1450.19, "word": " in", "probability": 0.81591796875}, {"start": 1450.19, "end": 1450.35, "word": " this", "probability": 0.94482421875}, {"start": 1450.35, "end": 1450.63, "word": " case,", "probability": 0.91845703125}, {"start": 1450.67, "end": 1451.17, "word": " Q1.", "probability": 0.994384765625}], "temperature": 1.0}, {"id": 55, "seek": 147879, "start": 1452.01, "end": 1478.79, "text": " is a value that has 25% below it. So Q2 is called the median. The median, the value in the middle when we arrange the data from smallest to largest. So that means 50% of the data below and also 50% of the data above. The other measure is called theoretical qualifying.", "tokens": [307, 257, 2158, 300, 575, 3552, 4, 2507, 309, 13, 407, 1249, 17, 307, 1219, 264, 26779, 13, 440, 26779, 11, 264, 2158, 294, 264, 2808, 562, 321, 9424, 264, 1412, 490, 16998, 281, 6443, 13, 407, 300, 1355, 2625, 4, 295, 264, 1412, 2507, 293, 611, 2625, 4, 295, 264, 1412, 3673, 13, 440, 661, 3481, 307, 1219, 20864, 41793, 13], "avg_logprob": -0.22259424272037687, "compression_ratio": 1.6402439024390243, "no_speech_prob": 0.0, "words": [{"start": 1452.01, "end": 1452.29, "word": " is", "probability": 0.322998046875}, {"start": 1452.29, "end": 1452.43, "word": " a", "probability": 0.9755859375}, {"start": 1452.43, "end": 1452.75, "word": " value", "probability": 0.9775390625}, {"start": 1452.75, "end": 1453.21, "word": " that", "probability": 0.93603515625}, {"start": 1453.21, "end": 1453.73, "word": " has", "probability": 0.8828125}, {"start": 1453.73, "end": 1454.97, "word": " 25", "probability": 0.78564453125}, {"start": 1454.97, "end": 1455.79, "word": "%", "probability": 0.88623046875}, {"start": 1455.79, "end": 1456.53, "word": " below", "probability": 0.90380859375}, {"start": 1456.53, "end": 1457.51, "word": " it.", "probability": 0.8359375}, {"start": 1458.17, "end": 1458.23, "word": " So", "probability": 0.330810546875}, {"start": 1458.23, "end": 1459.03, "word": " Q2", "probability": 0.80517578125}, {"start": 1459.03, "end": 1459.25, "word": " is", "probability": 0.9365234375}, {"start": 1459.25, "end": 1459.47, "word": " called", "probability": 0.88427734375}, {"start": 1459.47, "end": 1459.63, "word": " the", "probability": 0.8759765625}, {"start": 1459.63, "end": 1459.91, "word": " median.", "probability": 0.93701171875}, {"start": 1460.79, "end": 1461.23, "word": " The", "probability": 0.89501953125}, {"start": 1461.23, "end": 1461.43, "word": " median,", "probability": 0.91943359375}, {"start": 1461.57, "end": 1461.61, "word": " the", "probability": 0.90283203125}, {"start": 1461.61, "end": 1461.77, "word": " value", "probability": 0.97509765625}, {"start": 1461.77, "end": 1461.91, "word": " in", "probability": 0.89892578125}, {"start": 1461.91, "end": 1462.01, "word": " the", "probability": 0.92041015625}, {"start": 1462.01, "end": 1462.21, "word": " middle", "probability": 0.94580078125}, {"start": 1462.21, "end": 1462.45, "word": " when", "probability": 0.57421875}, {"start": 1462.45, "end": 1462.57, "word": " we", "probability": 0.845703125}, {"start": 1462.57, "end": 1462.89, "word": " arrange", "probability": 0.85546875}, {"start": 1462.89, "end": 1463.03, "word": " the", "probability": 0.71630859375}, {"start": 1463.03, "end": 1463.23, "word": " data", "probability": 0.9384765625}, {"start": 1463.23, "end": 1463.51, "word": " from", "probability": 0.87158203125}, {"start": 1463.51, "end": 1463.89, "word": " smallest", "probability": 0.9404296875}, {"start": 1463.89, "end": 1464.15, "word": " to", "probability": 0.96533203125}, {"start": 1464.15, "end": 1464.51, "word": " largest.", "probability": 0.90673828125}, {"start": 1465.61, "end": 1466.25, "word": " So", "probability": 0.95556640625}, {"start": 1466.25, "end": 1466.51, "word": " that", "probability": 0.9130859375}, {"start": 1466.51, "end": 1466.85, "word": " means", "probability": 0.93994140625}, {"start": 1466.85, "end": 1467.35, "word": " 50", "probability": 0.9169921875}, {"start": 1467.35, "end": 1467.61, "word": "%", "probability": 0.99169921875}, {"start": 1467.61, "end": 1467.87, "word": " of", "probability": 0.95751953125}, {"start": 1467.87, "end": 1468.01, "word": " the", "probability": 0.91650390625}, {"start": 1468.01, "end": 1468.39, "word": " data", "probability": 0.9443359375}, {"start": 1468.39, "end": 1469.23, "word": " below", "probability": 0.896484375}, {"start": 1469.23, "end": 1469.85, "word": " and", "probability": 0.63330078125}, {"start": 1469.85, "end": 1470.29, "word": " also", "probability": 0.84814453125}, {"start": 1470.29, "end": 1470.65, "word": " 50", "probability": 0.9443359375}, {"start": 1470.65, "end": 1471.01, "word": "%", "probability": 0.9990234375}, {"start": 1471.01, "end": 1471.19, "word": " of", "probability": 0.9560546875}, {"start": 1471.19, "end": 1471.33, "word": " the", "probability": 0.91650390625}, {"start": 1471.33, "end": 1471.55, "word": " data", "probability": 0.93896484375}, {"start": 1471.55, "end": 1471.87, "word": " above.", "probability": 0.96142578125}, {"start": 1474.33, "end": 1474.97, "word": " The", "probability": 0.88916015625}, {"start": 1474.97, "end": 1475.33, "word": " other", "probability": 0.8857421875}, {"start": 1475.33, "end": 1475.61, "word": " measure", "probability": 0.6904296875}, {"start": 1475.61, "end": 1475.89, "word": " is", "probability": 0.89111328125}, {"start": 1475.89, "end": 1476.37, "word": " called", "probability": 0.90234375}, {"start": 1476.37, "end": 1478.25, "word": " theoretical", "probability": 0.379150390625}, {"start": 1478.25, "end": 1478.79, "word": " qualifying.", "probability": 0.04119873046875}], "temperature": 1.0}, {"id": 56, "seek": 150573, "start": 1479.67, "end": 1505.73, "text": " In this case, we have 25% of the data above Q3 and 75% of the data below Q3. So quartiles split the rank data into four equal segments, Q1 25% to the left, Q2 50% to the left, Q3 75% to the left, and 25% to the right.", "tokens": [682, 341, 1389, 11, 321, 362, 3552, 4, 295, 264, 1412, 3673, 1249, 18, 293, 9562, 4, 295, 264, 1412, 2507, 1249, 18, 13, 407, 20837, 4680, 7472, 264, 6181, 1412, 666, 1451, 2681, 19904, 11, 1249, 16, 3552, 4, 281, 264, 1411, 11, 1249, 17, 2625, 4, 281, 264, 1411, 11, 1249, 18, 9562, 4, 281, 264, 1411, 11, 293, 3552, 4, 281, 264, 558, 13], "avg_logprob": -0.14751838038072868, "compression_ratio": 1.5797101449275361, "no_speech_prob": 0.0, "words": [{"start": 1479.67, "end": 1479.91, "word": " In", "probability": 0.89013671875}, {"start": 1479.91, "end": 1480.17, "word": " this", "probability": 0.943359375}, {"start": 1480.17, "end": 1480.45, "word": " case,", "probability": 0.91357421875}, {"start": 1480.59, "end": 1480.65, "word": " we", "probability": 0.94873046875}, {"start": 1480.65, "end": 1480.81, "word": " have", "probability": 0.9228515625}, {"start": 1480.81, "end": 1481.27, "word": " 25", "probability": 0.734375}, {"start": 1481.27, "end": 1481.73, "word": "%", "probability": 0.89208984375}, {"start": 1481.73, "end": 1481.91, "word": " of", "probability": 0.95458984375}, {"start": 1481.91, "end": 1482.07, "word": " the", "probability": 0.9111328125}, {"start": 1482.07, "end": 1482.31, "word": " data", "probability": 0.93798828125}, {"start": 1482.31, "end": 1482.79, "word": " above", "probability": 0.94482421875}, {"start": 1482.79, "end": 1483.39, "word": " Q3", "probability": 0.73974609375}, {"start": 1483.39, "end": 1485.27, "word": " and", "probability": 0.3310546875}, {"start": 1485.27, "end": 1485.85, "word": " 75", "probability": 0.93115234375}, {"start": 1485.85, "end": 1486.37, "word": "%", "probability": 0.99609375}, {"start": 1486.37, "end": 1486.59, "word": " of", "probability": 0.888671875}, {"start": 1486.59, "end": 1486.71, "word": " the", "probability": 0.82421875}, {"start": 1486.71, "end": 1486.93, "word": " data", "probability": 0.93017578125}, {"start": 1486.93, "end": 1487.27, "word": " below", "probability": 0.90380859375}, {"start": 1487.27, "end": 1487.95, "word": " Q3.", "probability": 0.864990234375}, {"start": 1489.01, "end": 1489.69, "word": " So", "probability": 0.9501953125}, {"start": 1489.69, "end": 1490.49, "word": " quartiles", "probability": 0.7734375}, {"start": 1490.49, "end": 1491.73, "word": " split", "probability": 0.931640625}, {"start": 1491.73, "end": 1492.69, "word": " the", "probability": 0.859375}, {"start": 1492.69, "end": 1492.95, "word": " rank", "probability": 0.33544921875}, {"start": 1492.95, "end": 1493.31, "word": " data", "probability": 0.93603515625}, {"start": 1493.31, "end": 1493.59, "word": " into", "probability": 0.83984375}, {"start": 1493.59, "end": 1493.95, "word": " four", "probability": 0.8720703125}, {"start": 1493.95, "end": 1494.41, "word": " equal", "probability": 0.896484375}, {"start": 1494.41, "end": 1495.05, "word": " segments,", "probability": 0.896484375}, {"start": 1495.85, "end": 1496.21, "word": " Q1", "probability": 0.96484375}, {"start": 1496.21, "end": 1496.55, "word": " 25", "probability": 0.7197265625}, {"start": 1496.55, "end": 1496.91, "word": "%", "probability": 0.986328125}, {"start": 1496.91, "end": 1497.13, "word": " to", "probability": 0.9560546875}, {"start": 1497.13, "end": 1497.27, "word": " the", "probability": 0.9140625}, {"start": 1497.27, "end": 1497.51, "word": " left,", "probability": 0.9404296875}, {"start": 1498.49, "end": 1498.91, "word": " Q2", "probability": 0.99365234375}, {"start": 1498.91, "end": 1499.31, "word": " 50", "probability": 0.90234375}, {"start": 1499.31, "end": 1499.59, "word": "%", "probability": 0.99853515625}, {"start": 1499.59, "end": 1499.83, "word": " to", "probability": 0.9609375}, {"start": 1499.83, "end": 1499.97, "word": " the", "probability": 0.91162109375}, {"start": 1499.97, "end": 1500.19, "word": " left,", "probability": 0.94091796875}, {"start": 1500.97, "end": 1501.53, "word": " Q3", "probability": 0.91552734375}, {"start": 1501.53, "end": 1502.15, "word": " 75", "probability": 0.9365234375}, {"start": 1502.15, "end": 1502.89, "word": "%", "probability": 0.99853515625}, {"start": 1502.89, "end": 1503.21, "word": " to", "probability": 0.9609375}, {"start": 1503.21, "end": 1503.35, "word": " the", "probability": 0.9072265625}, {"start": 1503.35, "end": 1503.57, "word": " left,", "probability": 0.9384765625}, {"start": 1503.69, "end": 1503.83, "word": " and", "probability": 0.93994140625}, {"start": 1503.83, "end": 1504.21, "word": " 25", "probability": 0.9580078125}, {"start": 1504.21, "end": 1504.79, "word": "%", "probability": 0.998046875}, {"start": 1504.79, "end": 1505.41, "word": " to", "probability": 0.958984375}, {"start": 1505.41, "end": 1505.53, "word": " the", "probability": 0.9111328125}, {"start": 1505.53, "end": 1505.73, "word": " right.", "probability": 0.9130859375}], "temperature": 1.0}, {"id": 57, "seek": 152657, "start": 1507.87, "end": 1526.57, "text": " Before, we explained how to compute the median, and let's see how can we compute first and third quartile. If you remember, when we computed the median, first we locate the position of the median.", "tokens": [4546, 11, 321, 8825, 577, 281, 14722, 264, 26779, 11, 293, 718, 311, 536, 577, 393, 321, 14722, 700, 293, 2636, 20837, 794, 13, 759, 291, 1604, 11, 562, 321, 40610, 264, 26779, 11, 700, 321, 22370, 264, 2535, 295, 264, 26779, 13], "avg_logprob": -0.1693004322322932, "compression_ratio": 1.5511811023622046, "no_speech_prob": 0.0, "words": [{"start": 1507.87, "end": 1508.59, "word": " Before,", "probability": 0.8056640625}, {"start": 1509.19, "end": 1509.59, "word": " we", "probability": 0.94287109375}, {"start": 1509.59, "end": 1509.87, "word": " explained", "probability": 0.58349609375}, {"start": 1509.87, "end": 1510.19, "word": " how", "probability": 0.93798828125}, {"start": 1510.19, "end": 1510.33, "word": " to", "probability": 0.97314453125}, {"start": 1510.33, "end": 1510.71, "word": " compute", "probability": 0.9052734375}, {"start": 1510.71, "end": 1512.05, "word": " the", "probability": 0.87158203125}, {"start": 1512.05, "end": 1512.35, "word": " median,", "probability": 0.95361328125}, {"start": 1512.85, "end": 1513.51, "word": " and", "probability": 0.90625}, {"start": 1513.51, "end": 1513.83, "word": " let's", "probability": 0.941162109375}, {"start": 1513.83, "end": 1514.09, "word": " see", "probability": 0.8544921875}, {"start": 1514.09, "end": 1514.45, "word": " how", "probability": 0.904296875}, {"start": 1514.45, "end": 1514.75, "word": " can", "probability": 0.580078125}, {"start": 1514.75, "end": 1514.93, "word": " we", "probability": 0.943359375}, {"start": 1514.93, "end": 1515.39, "word": " compute", "probability": 0.92041015625}, {"start": 1515.39, "end": 1516.93, "word": " first", "probability": 0.70166015625}, {"start": 1516.93, "end": 1517.23, "word": " and", "probability": 0.93505859375}, {"start": 1517.23, "end": 1517.61, "word": " third", "probability": 0.94189453125}, {"start": 1517.61, "end": 1518.85, "word": " quartile.", "probability": 0.737548828125}, {"start": 1519.75, "end": 1520.41, "word": " If", "probability": 0.94873046875}, {"start": 1520.41, "end": 1520.49, "word": " you", "probability": 0.947265625}, {"start": 1520.49, "end": 1520.81, "word": " remember,", "probability": 0.8857421875}, {"start": 1521.27, "end": 1521.47, "word": " when", "probability": 0.9169921875}, {"start": 1521.47, "end": 1521.63, "word": " we", "probability": 0.9541015625}, {"start": 1521.63, "end": 1522.05, "word": " computed", "probability": 0.91943359375}, {"start": 1522.05, "end": 1522.49, "word": " the", "probability": 0.91015625}, {"start": 1522.49, "end": 1523.65, "word": " median,", "probability": 0.97119140625}, {"start": 1524.35, "end": 1524.63, "word": " first", "probability": 0.88330078125}, {"start": 1524.63, "end": 1524.81, "word": " we", "probability": 0.759765625}, {"start": 1524.81, "end": 1525.33, "word": " locate", "probability": 0.70361328125}, {"start": 1525.33, "end": 1525.65, "word": " the", "probability": 0.89501953125}, {"start": 1525.65, "end": 1526.01, "word": " position", "probability": 0.94580078125}, {"start": 1526.01, "end": 1526.21, "word": " of", "probability": 0.96728515625}, {"start": 1526.21, "end": 1526.35, "word": " the", "probability": 0.919921875}, {"start": 1526.35, "end": 1526.57, "word": " median.", "probability": 0.9296875}], "temperature": 1.0}, {"id": 58, "seek": 155118, "start": 1527.98, "end": 1551.18, "text": " And we said that the rank of n is odd. Yes, it was n plus 1 divided by 2. This is the location of the median, not the value. Sometimes the value may be equal to the location, but most of the time it's not. It's not the case. Now let's see how can we locate the fair support.", "tokens": [400, 321, 848, 300, 264, 6181, 295, 297, 307, 7401, 13, 1079, 11, 309, 390, 297, 1804, 502, 6666, 538, 568, 13, 639, 307, 264, 4914, 295, 264, 26779, 11, 406, 264, 2158, 13, 4803, 264, 2158, 815, 312, 2681, 281, 264, 4914, 11, 457, 881, 295, 264, 565, 309, 311, 406, 13, 467, 311, 406, 264, 1389, 13, 823, 718, 311, 536, 577, 393, 321, 22370, 264, 3143, 1406, 13], "avg_logprob": -0.1980251750598351, "compression_ratio": 1.510989010989011, "no_speech_prob": 0.0, "words": [{"start": 1527.98, "end": 1528.34, "word": " And", "probability": 0.623046875}, {"start": 1528.34, "end": 1528.48, "word": " we", "probability": 0.70947265625}, {"start": 1528.48, "end": 1528.74, "word": " said", "probability": 0.89208984375}, {"start": 1528.74, "end": 1529.02, "word": " that", "probability": 0.89697265625}, {"start": 1529.02, "end": 1529.76, "word": " the", "probability": 0.77685546875}, {"start": 1529.76, "end": 1530.1, "word": " rank", "probability": 0.9453125}, {"start": 1530.1, "end": 1530.26, "word": " of", "probability": 0.77978515625}, {"start": 1530.26, "end": 1530.42, "word": " n", "probability": 0.471923828125}, {"start": 1530.42, "end": 1530.58, "word": " is", "probability": 0.90673828125}, {"start": 1530.58, "end": 1530.92, "word": " odd.", "probability": 0.93310546875}, {"start": 1532.52, "end": 1532.52, "word": " Yes,", "probability": 0.291259765625}, {"start": 1532.52, "end": 1532.94, "word": " it", "probability": 0.921875}, {"start": 1532.94, "end": 1533.14, "word": " was", "probability": 0.94140625}, {"start": 1533.14, "end": 1533.36, "word": " n", "probability": 0.91455078125}, {"start": 1533.36, "end": 1533.54, "word": " plus", "probability": 0.84423828125}, {"start": 1533.54, "end": 1533.76, "word": " 1", "probability": 0.5185546875}, {"start": 1533.76, "end": 1533.98, "word": " divided", "probability": 0.79541015625}, {"start": 1533.98, "end": 1534.18, "word": " by", "probability": 0.96826171875}, {"start": 1534.18, "end": 1534.38, "word": " 2.", "probability": 0.93994140625}, {"start": 1535.12, "end": 1535.36, "word": " This", "probability": 0.85595703125}, {"start": 1535.36, "end": 1535.52, "word": " is", "probability": 0.9404296875}, {"start": 1535.52, "end": 1535.78, "word": " the", "probability": 0.919921875}, {"start": 1535.78, "end": 1536.86, "word": " location", "probability": 0.927734375}, {"start": 1536.86, "end": 1537.64, "word": " of", "probability": 0.9404296875}, {"start": 1537.64, "end": 1537.8, "word": " the", "probability": 0.919921875}, {"start": 1537.8, "end": 1538.02, "word": " median,", "probability": 0.93408203125}, {"start": 1538.16, "end": 1538.24, "word": " not", "probability": 0.9345703125}, {"start": 1538.24, "end": 1538.38, "word": " the", "probability": 0.9169921875}, {"start": 1538.38, "end": 1538.58, "word": " value.", "probability": 0.978515625}, {"start": 1539.98, "end": 1540.4, "word": " Sometimes", "probability": 0.81982421875}, {"start": 1540.4, "end": 1540.62, "word": " the", "probability": 0.75244140625}, {"start": 1540.62, "end": 1540.8, "word": " value", "probability": 0.97265625}, {"start": 1540.8, "end": 1541.0, "word": " may", "probability": 0.8203125}, {"start": 1541.0, "end": 1541.1, "word": " be", "probability": 0.86181640625}, {"start": 1541.1, "end": 1541.36, "word": " equal", "probability": 0.90869140625}, {"start": 1541.36, "end": 1541.54, "word": " to", "probability": 0.77978515625}, {"start": 1541.54, "end": 1542.24, "word": " the", "probability": 0.75830078125}, {"start": 1542.24, "end": 1542.66, "word": " location,", "probability": 0.943359375}, {"start": 1542.92, "end": 1542.92, "word": " but", "probability": 0.8203125}, {"start": 1542.92, "end": 1544.36, "word": " most", "probability": 0.89697265625}, {"start": 1544.36, "end": 1544.5, "word": " of", "probability": 0.96875}, {"start": 1544.5, "end": 1544.58, "word": " the", "probability": 0.9169921875}, {"start": 1544.58, "end": 1544.76, "word": " time", "probability": 0.880859375}, {"start": 1544.76, "end": 1544.9, "word": " it's", "probability": 0.73388671875}, {"start": 1544.9, "end": 1545.12, "word": " not.", "probability": 0.943359375}, {"start": 1545.78, "end": 1546.22, "word": " It's", "probability": 0.946533203125}, {"start": 1546.22, "end": 1546.34, "word": " not", "probability": 0.93603515625}, {"start": 1546.34, "end": 1546.46, "word": " the", "probability": 0.87255859375}, {"start": 1546.46, "end": 1546.72, "word": " case.", "probability": 0.92041015625}, {"start": 1547.32, "end": 1547.52, "word": " Now", "probability": 0.94580078125}, {"start": 1547.52, "end": 1547.76, "word": " let's", "probability": 0.82470703125}, {"start": 1547.76, "end": 1547.88, "word": " see", "probability": 0.9150390625}, {"start": 1547.88, "end": 1548.0, "word": " how", "probability": 0.9052734375}, {"start": 1548.0, "end": 1548.2, "word": " can", "probability": 0.74755859375}, {"start": 1548.2, "end": 1548.34, "word": " we", "probability": 0.95361328125}, {"start": 1548.34, "end": 1548.78, "word": " locate", "probability": 0.94921875}, {"start": 1548.78, "end": 1550.58, "word": " the", "probability": 0.841796875}, {"start": 1550.58, "end": 1550.84, "word": " fair", "probability": 0.170166015625}, {"start": 1550.84, "end": 1551.18, "word": " support.", "probability": 0.84228515625}], "temperature": 1.0}, {"id": 59, "seek": 158147, "start": 1552.75, "end": 1581.47, "text": " The first quartile after you arrange the data from smallest to largest, the location is n plus 1 divided by 2. So that's the location of the first quartile. The median, as we mentioned before, is located in the middle. So it makes sense that if n is odd, the location of the median is n plus 1 over 2. Now, for the third quartile position,", "tokens": [440, 700, 20837, 794, 934, 291, 9424, 264, 1412, 490, 16998, 281, 6443, 11, 264, 4914, 307, 297, 1804, 502, 6666, 538, 568, 13, 407, 300, 311, 264, 4914, 295, 264, 700, 20837, 794, 13, 440, 26779, 11, 382, 321, 2835, 949, 11, 307, 6870, 294, 264, 2808, 13, 407, 309, 1669, 2020, 300, 498, 297, 307, 7401, 11, 264, 4914, 295, 264, 26779, 307, 297, 1804, 502, 670, 568, 13, 823, 11, 337, 264, 2636, 20837, 794, 2535, 11], "avg_logprob": -0.12413194150100519, "compression_ratio": 1.7525773195876289, "no_speech_prob": 0.0, "words": [{"start": 1552.75, "end": 1552.95, "word": " The", "probability": 0.76611328125}, {"start": 1552.95, "end": 1553.23, "word": " first", "probability": 0.8408203125}, {"start": 1553.23, "end": 1553.73, "word": " quartile", "probability": 0.83642578125}, {"start": 1553.73, "end": 1554.13, "word": " after", "probability": 0.80322265625}, {"start": 1554.13, "end": 1554.35, "word": " you", "probability": 0.92236328125}, {"start": 1554.35, "end": 1554.69, "word": " arrange", "probability": 0.8046875}, {"start": 1554.69, "end": 1554.83, "word": " the", "probability": 0.90087890625}, {"start": 1554.83, "end": 1555.07, "word": " data", "probability": 0.9423828125}, {"start": 1555.07, "end": 1555.31, "word": " from", "probability": 0.841796875}, {"start": 1555.31, "end": 1555.73, "word": " smallest", "probability": 0.91259765625}, {"start": 1555.73, "end": 1555.95, "word": " to", "probability": 0.96923828125}, {"start": 1555.95, "end": 1556.37, "word": " largest,", "probability": 0.9072265625}, {"start": 1556.57, "end": 1556.69, "word": " the", "probability": 0.91015625}, {"start": 1556.69, "end": 1557.11, "word": " location", "probability": 0.939453125}, {"start": 1557.11, "end": 1557.65, "word": " is", "probability": 0.93359375}, {"start": 1557.65, "end": 1558.25, "word": " n", "probability": 0.455810546875}, {"start": 1558.25, "end": 1558.49, "word": " plus", "probability": 0.8505859375}, {"start": 1558.49, "end": 1558.69, "word": " 1", "probability": 0.441162109375}, {"start": 1558.69, "end": 1558.93, "word": " divided", "probability": 0.798828125}, {"start": 1558.93, "end": 1559.11, "word": " by", "probability": 0.97021484375}, {"start": 1559.11, "end": 1559.25, "word": " 2.", "probability": 0.6015625}, {"start": 1560.29, "end": 1560.95, "word": " So", "probability": 0.9560546875}, {"start": 1560.95, "end": 1561.19, "word": " that's", "probability": 0.903076171875}, {"start": 1561.19, "end": 1561.29, "word": " the", "probability": 0.921875}, {"start": 1561.29, "end": 1561.73, "word": " location", "probability": 0.93017578125}, {"start": 1561.73, "end": 1562.17, "word": " of", "probability": 0.90869140625}, {"start": 1562.17, "end": 1563.27, "word": " the", "probability": 0.88232421875}, {"start": 1563.27, "end": 1563.69, "word": " first", "probability": 0.87548828125}, {"start": 1563.69, "end": 1564.21, "word": " quartile.", "probability": 0.967529296875}, {"start": 1565.13, "end": 1565.79, "word": " The", "probability": 0.89306640625}, {"start": 1565.79, "end": 1566.09, "word": " median,", "probability": 0.978515625}, {"start": 1566.61, "end": 1566.79, "word": " as", "probability": 0.96337890625}, {"start": 1566.79, "end": 1566.89, "word": " we", "probability": 0.943359375}, {"start": 1566.89, "end": 1567.15, "word": " mentioned", "probability": 0.837890625}, {"start": 1567.15, "end": 1567.53, "word": " before,", "probability": 0.8583984375}, {"start": 1567.59, "end": 1567.77, "word": " is", "probability": 0.93310546875}, {"start": 1567.77, "end": 1568.15, "word": " located", "probability": 0.9306640625}, {"start": 1568.15, "end": 1568.39, "word": " in", "probability": 0.94384765625}, {"start": 1568.39, "end": 1568.53, "word": " the", "probability": 0.91748046875}, {"start": 1568.53, "end": 1568.77, "word": " middle.", "probability": 0.95849609375}, {"start": 1570.07, "end": 1570.27, "word": " So", "probability": 0.9609375}, {"start": 1570.27, "end": 1570.39, "word": " it", "probability": 0.9248046875}, {"start": 1570.39, "end": 1570.59, "word": " makes", "probability": 0.8154296875}, {"start": 1570.59, "end": 1570.93, "word": " sense", "probability": 0.81640625}, {"start": 1570.93, "end": 1571.33, "word": " that", "probability": 0.92333984375}, {"start": 1571.33, "end": 1571.69, "word": " if", "probability": 0.81005859375}, {"start": 1571.69, "end": 1571.87, "word": " n", "probability": 0.8740234375}, {"start": 1571.87, "end": 1572.03, "word": " is", "probability": 0.9453125}, {"start": 1572.03, "end": 1572.37, "word": " odd,", "probability": 0.841796875}, {"start": 1572.87, "end": 1573.33, "word": " the", "probability": 0.908203125}, {"start": 1573.33, "end": 1574.27, "word": " location", "probability": 0.9267578125}, {"start": 1574.27, "end": 1575.05, "word": " of", "probability": 0.96728515625}, {"start": 1575.05, "end": 1575.21, "word": " the", "probability": 0.92041015625}, {"start": 1575.21, "end": 1575.47, "word": " median", "probability": 0.962890625}, {"start": 1575.47, "end": 1575.87, "word": " is", "probability": 0.94384765625}, {"start": 1575.87, "end": 1576.03, "word": " n", "probability": 0.98291015625}, {"start": 1576.03, "end": 1576.21, "word": " plus", "probability": 0.95849609375}, {"start": 1576.21, "end": 1576.41, "word": " 1", "probability": 0.97412109375}, {"start": 1576.41, "end": 1576.53, "word": " over", "probability": 0.8916015625}, {"start": 1576.53, "end": 1576.85, "word": " 2.", "probability": 0.98876953125}, {"start": 1578.39, "end": 1579.05, "word": " Now,", "probability": 0.95703125}, {"start": 1579.43, "end": 1579.81, "word": " for", "probability": 0.953125}, {"start": 1579.81, "end": 1580.23, "word": " the", "probability": 0.91259765625}, {"start": 1580.23, "end": 1580.49, "word": " third", "probability": 0.85595703125}, {"start": 1580.49, "end": 1581.03, "word": " quartile", "probability": 0.955322265625}, {"start": 1581.03, "end": 1581.47, "word": " position,", "probability": 0.9248046875}], "temperature": 1.0}, {"id": 60, "seek": 161032, "start": 1583.8, "end": 1610.32, "text": " The location is N plus 1 divided by 4 times 3. So 3 times N plus 1 divided by 4. That's how can we locate Q1, Q2, and Q3. So one more time, the median, the value in the middle, and it's located exactly at the position N plus 1 over 2 for the range data.", "tokens": [440, 4914, 307, 426, 1804, 502, 6666, 538, 1017, 1413, 805, 13, 407, 805, 1413, 426, 1804, 502, 6666, 538, 1017, 13, 663, 311, 577, 393, 321, 22370, 1249, 16, 11, 1249, 17, 11, 293, 1249, 18, 13, 407, 472, 544, 565, 11, 264, 26779, 11, 264, 2158, 294, 264, 2808, 11, 293, 309, 311, 6870, 2293, 412, 264, 2535, 426, 1804, 502, 670, 568, 337, 264, 3613, 1412, 13], "avg_logprob": -0.1592209515437274, "compression_ratio": 1.5974842767295598, "no_speech_prob": 0.0, "words": [{"start": 1583.8, "end": 1584.0, "word": " The", "probability": 0.69775390625}, {"start": 1584.0, "end": 1584.4, "word": " location", "probability": 0.88623046875}, {"start": 1584.4, "end": 1584.98, "word": " is", "probability": 0.9462890625}, {"start": 1584.98, "end": 1586.7, "word": " N", "probability": 0.42431640625}, {"start": 1586.7, "end": 1586.96, "word": " plus", "probability": 0.82080078125}, {"start": 1586.96, "end": 1587.16, "word": " 1", "probability": 0.5390625}, {"start": 1587.16, "end": 1587.44, "word": " divided", "probability": 0.783203125}, {"start": 1587.44, "end": 1587.62, "word": " by", "probability": 0.96044921875}, {"start": 1587.62, "end": 1587.96, "word": " 4", "probability": 0.859375}, {"start": 1587.96, "end": 1588.32, "word": " times", "probability": 0.826171875}, {"start": 1588.32, "end": 1588.64, "word": " 3.", "probability": 0.93359375}, {"start": 1588.82, "end": 1588.98, "word": " So", "probability": 0.96044921875}, {"start": 1588.98, "end": 1589.24, "word": " 3", "probability": 0.583984375}, {"start": 1589.24, "end": 1589.76, "word": " times", "probability": 0.91357421875}, {"start": 1589.76, "end": 1590.22, "word": " N", "probability": 0.95654296875}, {"start": 1590.22, "end": 1590.46, "word": " plus", "probability": 0.95458984375}, {"start": 1590.46, "end": 1590.78, "word": " 1", "probability": 0.99267578125}, {"start": 1590.78, "end": 1591.16, "word": " divided", "probability": 0.7783203125}, {"start": 1591.16, "end": 1591.36, "word": " by", "probability": 0.95947265625}, {"start": 1591.36, "end": 1591.7, "word": " 4.", "probability": 0.97900390625}, {"start": 1592.06, "end": 1592.54, "word": " That's", "probability": 0.866943359375}, {"start": 1592.54, "end": 1592.76, "word": " how", "probability": 0.86962890625}, {"start": 1592.76, "end": 1593.0, "word": " can", "probability": 0.79150390625}, {"start": 1593.0, "end": 1593.2, "word": " we", "probability": 0.9482421875}, {"start": 1593.2, "end": 1593.8, "word": " locate", "probability": 0.93017578125}, {"start": 1593.8, "end": 1594.74, "word": " Q1,", "probability": 0.7047119140625}, {"start": 1595.42, "end": 1595.96, "word": " Q2,", "probability": 0.995849609375}, {"start": 1596.2, "end": 1596.62, "word": " and", "probability": 0.9443359375}, {"start": 1596.62, "end": 1598.2, "word": " Q3.", "probability": 0.9755859375}, {"start": 1599.32, "end": 1599.92, "word": " So", "probability": 0.95654296875}, {"start": 1599.92, "end": 1600.1, "word": " one", "probability": 0.88037109375}, {"start": 1600.1, "end": 1600.26, "word": " more", "probability": 0.931640625}, {"start": 1600.26, "end": 1600.58, "word": " time,", "probability": 0.8818359375}, {"start": 1600.76, "end": 1600.94, "word": " the", "probability": 0.90087890625}, {"start": 1600.94, "end": 1601.18, "word": " median,", "probability": 0.72021484375}, {"start": 1601.52, "end": 1601.64, "word": " the", "probability": 0.9130859375}, {"start": 1601.64, "end": 1601.86, "word": " value", "probability": 0.9677734375}, {"start": 1601.86, "end": 1601.98, "word": " in", "probability": 0.8564453125}, {"start": 1601.98, "end": 1602.08, "word": " the", "probability": 0.9189453125}, {"start": 1602.08, "end": 1602.32, "word": " middle,", "probability": 0.8193359375}, {"start": 1602.6, "end": 1602.72, "word": " and", "probability": 0.6064453125}, {"start": 1602.72, "end": 1602.86, "word": " it's", "probability": 0.83984375}, {"start": 1602.86, "end": 1603.26, "word": " located", "probability": 0.93310546875}, {"start": 1603.26, "end": 1604.0, "word": " exactly", "probability": 0.87841796875}, {"start": 1604.0, "end": 1604.4, "word": " at", "probability": 0.958984375}, {"start": 1604.4, "end": 1605.5, "word": " the", "probability": 0.90966796875}, {"start": 1605.5, "end": 1605.96, "word": " position", "probability": 0.94580078125}, {"start": 1605.96, "end": 1606.26, "word": " N", "probability": 0.919921875}, {"start": 1606.26, "end": 1606.5, "word": " plus", "probability": 0.95654296875}, {"start": 1606.5, "end": 1606.7, "word": " 1", "probability": 0.986328125}, {"start": 1606.7, "end": 1606.84, "word": " over", "probability": 0.9111328125}, {"start": 1606.84, "end": 1607.16, "word": " 2", "probability": 0.98779296875}, {"start": 1607.16, "end": 1609.2, "word": " for", "probability": 0.7275390625}, {"start": 1609.2, "end": 1609.38, "word": " the", "probability": 0.91650390625}, {"start": 1609.38, "end": 1609.64, "word": " range", "probability": 0.80810546875}, {"start": 1609.64, "end": 1610.32, "word": " data.", "probability": 0.80859375}], "temperature": 1.0}, {"id": 61, "seek": 163913, "start": 1611.01, "end": 1639.13, "text": " Q1 is located at n plus one divided by four. Q3 is located at the position three times n plus one divided by four. Now, when calculating the rank position, we can use one of these rules. First, if the result of the location, I mean, is a whole number, I mean, if it is an integer.", "tokens": [1249, 16, 307, 6870, 412, 297, 1804, 472, 6666, 538, 1451, 13, 1249, 18, 307, 6870, 412, 264, 2535, 1045, 1413, 297, 1804, 472, 6666, 538, 1451, 13, 823, 11, 562, 28258, 264, 6181, 2535, 11, 321, 393, 764, 472, 295, 613, 4474, 13, 2386, 11, 498, 264, 1874, 295, 264, 4914, 11, 286, 914, 11, 307, 257, 1379, 1230, 11, 286, 914, 11, 498, 309, 307, 364, 24922, 13], "avg_logprob": -0.15746039194120487, "compression_ratio": 1.6927710843373494, "no_speech_prob": 0.0, "words": [{"start": 1611.01, "end": 1611.53, "word": " Q1", "probability": 0.6434326171875}, {"start": 1611.53, "end": 1611.69, "word": " is", "probability": 0.8994140625}, {"start": 1611.69, "end": 1612.13, "word": " located", "probability": 0.93310546875}, {"start": 1612.13, "end": 1612.59, "word": " at", "probability": 0.92724609375}, {"start": 1612.59, "end": 1612.99, "word": " n", "probability": 0.521484375}, {"start": 1612.99, "end": 1613.23, "word": " plus", "probability": 0.55517578125}, {"start": 1613.23, "end": 1613.41, "word": " one", "probability": 0.49072265625}, {"start": 1613.41, "end": 1613.65, "word": " divided", "probability": 0.78564453125}, {"start": 1613.65, "end": 1613.79, "word": " by", "probability": 0.95849609375}, {"start": 1613.79, "end": 1614.11, "word": " four.", "probability": 0.90087890625}, {"start": 1615.21, "end": 1615.73, "word": " Q3", "probability": 0.945556640625}, {"start": 1615.73, "end": 1616.05, "word": " is", "probability": 0.9326171875}, {"start": 1616.05, "end": 1616.45, "word": " located", "probability": 0.91455078125}, {"start": 1616.45, "end": 1616.65, "word": " at", "probability": 0.9423828125}, {"start": 1616.65, "end": 1616.77, "word": " the", "probability": 0.8701171875}, {"start": 1616.77, "end": 1617.17, "word": " position", "probability": 0.95068359375}, {"start": 1617.17, "end": 1617.55, "word": " three", "probability": 0.87451171875}, {"start": 1617.55, "end": 1618.03, "word": " times", "probability": 0.912109375}, {"start": 1618.03, "end": 1618.29, "word": " n", "probability": 0.90869140625}, {"start": 1618.29, "end": 1618.51, "word": " plus", "probability": 0.95458984375}, {"start": 1618.51, "end": 1618.75, "word": " one", "probability": 0.9345703125}, {"start": 1618.75, "end": 1619.03, "word": " divided", "probability": 0.826171875}, {"start": 1619.03, "end": 1619.33, "word": " by", "probability": 0.96533203125}, {"start": 1619.33, "end": 1619.67, "word": " four.", "probability": 0.9228515625}, {"start": 1623.63, "end": 1624.05, "word": " Now,", "probability": 0.94189453125}, {"start": 1624.13, "end": 1624.31, "word": " when", "probability": 0.7802734375}, {"start": 1624.31, "end": 1624.95, "word": " calculating", "probability": 0.927734375}, {"start": 1624.95, "end": 1625.25, "word": " the", "probability": 0.904296875}, {"start": 1625.25, "end": 1625.47, "word": " rank", "probability": 0.90625}, {"start": 1625.47, "end": 1625.93, "word": " position,", "probability": 0.7333984375}, {"start": 1627.01, "end": 1627.23, "word": " we", "probability": 0.94775390625}, {"start": 1627.23, "end": 1627.49, "word": " can", "probability": 0.94677734375}, {"start": 1627.49, "end": 1627.73, "word": " use", "probability": 0.87890625}, {"start": 1627.73, "end": 1627.91, "word": " one", "probability": 0.92236328125}, {"start": 1627.91, "end": 1628.09, "word": " of", "probability": 0.96630859375}, {"start": 1628.09, "end": 1628.27, "word": " these", "probability": 0.67529296875}, {"start": 1628.27, "end": 1628.69, "word": " rules.", "probability": 0.77978515625}, {"start": 1630.25, "end": 1630.69, "word": " First,", "probability": 0.90673828125}, {"start": 1631.01, "end": 1633.21, "word": " if", "probability": 0.93896484375}, {"start": 1633.21, "end": 1633.37, "word": " the", "probability": 0.9150390625}, {"start": 1633.37, "end": 1633.83, "word": " result", "probability": 0.95263671875}, {"start": 1633.83, "end": 1634.69, "word": " of", "probability": 0.95947265625}, {"start": 1634.69, "end": 1634.79, "word": " the", "probability": 0.91748046875}, {"start": 1634.79, "end": 1635.19, "word": " location,", "probability": 0.85986328125}, {"start": 1635.29, "end": 1635.41, "word": " I", "probability": 0.97216796875}, {"start": 1635.41, "end": 1635.65, "word": " mean,", "probability": 0.96533203125}, {"start": 1636.51, "end": 1636.77, "word": " is", "probability": 0.90673828125}, {"start": 1636.77, "end": 1636.97, "word": " a", "probability": 0.99462890625}, {"start": 1636.97, "end": 1637.15, "word": " whole", "probability": 0.9111328125}, {"start": 1637.15, "end": 1637.51, "word": " number,", "probability": 0.939453125}, {"start": 1637.65, "end": 1637.79, "word": " I", "probability": 0.97607421875}, {"start": 1637.79, "end": 1638.01, "word": " mean,", "probability": 0.96826171875}, {"start": 1638.25, "end": 1638.41, "word": " if", "probability": 0.89404296875}, {"start": 1638.41, "end": 1638.53, "word": " it", "probability": 0.93310546875}, {"start": 1638.53, "end": 1638.63, "word": " is", "probability": 0.88330078125}, {"start": 1638.63, "end": 1638.83, "word": " an", "probability": 0.955078125}, {"start": 1638.83, "end": 1639.13, "word": " integer.", "probability": 0.86767578125}], "temperature": 1.0}, {"id": 62, "seek": 166977, "start": 1640.49, "end": 1669.77, "text": " Then the rank position is the same number. For example, suppose the rank position is four. So position number four is your quartile, either first or third or second quartile. So if the result is a whole number, then it is the rank position used. Now, if the result is a fractional half,", "tokens": [1396, 264, 6181, 2535, 307, 264, 912, 1230, 13, 1171, 1365, 11, 7297, 264, 6181, 2535, 307, 1451, 13, 407, 2535, 1230, 1451, 307, 428, 20837, 794, 11, 2139, 700, 420, 2636, 420, 1150, 20837, 794, 13, 407, 498, 264, 1874, 307, 257, 1379, 1230, 11, 550, 309, 307, 264, 6181, 2535, 1143, 13, 823, 11, 498, 264, 1874, 307, 257, 17948, 1966, 1922, 11], "avg_logprob": -0.1974431865594604, "compression_ratio": 1.8050314465408805, "no_speech_prob": 0.0, "words": [{"start": 1640.49, "end": 1641.23, "word": " Then", "probability": 0.397705078125}, {"start": 1641.23, "end": 1641.97, "word": " the", "probability": 0.490966796875}, {"start": 1641.97, "end": 1642.31, "word": " rank", "probability": 0.6748046875}, {"start": 1642.31, "end": 1643.23, "word": " position", "probability": 0.92333984375}, {"start": 1643.23, "end": 1643.89, "word": " is", "probability": 0.9189453125}, {"start": 1643.89, "end": 1644.05, "word": " the", "probability": 0.912109375}, {"start": 1644.05, "end": 1644.23, "word": " same", "probability": 0.91064453125}, {"start": 1644.23, "end": 1644.51, "word": " number.", "probability": 0.9248046875}, {"start": 1645.41, "end": 1645.65, "word": " For", "probability": 0.9296875}, {"start": 1645.65, "end": 1645.93, "word": " example,", "probability": 0.96923828125}, {"start": 1646.07, "end": 1646.51, "word": " suppose", "probability": 0.83837890625}, {"start": 1646.51, "end": 1648.35, "word": " the", "probability": 0.7294921875}, {"start": 1648.35, "end": 1648.59, "word": " rank", "probability": 0.916015625}, {"start": 1648.59, "end": 1648.95, "word": " position", "probability": 0.951171875}, {"start": 1648.95, "end": 1649.49, "word": " is", "probability": 0.9501953125}, {"start": 1649.49, "end": 1649.95, "word": " four.", "probability": 0.454833984375}, {"start": 1651.37, "end": 1651.93, "word": " So", "probability": 0.88427734375}, {"start": 1651.93, "end": 1652.37, "word": " position", "probability": 0.88134765625}, {"start": 1652.37, "end": 1652.69, "word": " number", "probability": 0.88916015625}, {"start": 1652.69, "end": 1653.15, "word": " four", "probability": 0.89111328125}, {"start": 1653.15, "end": 1653.63, "word": " is", "probability": 0.921875}, {"start": 1653.63, "end": 1654.61, "word": " your", "probability": 0.89306640625}, {"start": 1654.61, "end": 1655.83, "word": " quartile,", "probability": 0.86083984375}, {"start": 1655.91, "end": 1656.09, "word": " either", "probability": 0.9111328125}, {"start": 1656.09, "end": 1656.59, "word": " first", "probability": 0.859375}, {"start": 1656.59, "end": 1656.89, "word": " or", "probability": 0.8486328125}, {"start": 1656.89, "end": 1657.31, "word": " third", "probability": 0.880859375}, {"start": 1657.31, "end": 1658.17, "word": " or", "probability": 0.497314453125}, {"start": 1658.17, "end": 1658.45, "word": " second", "probability": 0.904296875}, {"start": 1658.45, "end": 1658.97, "word": " quartile.", "probability": 0.969970703125}, {"start": 1659.99, "end": 1660.73, "word": " So", "probability": 0.896484375}, {"start": 1660.73, "end": 1661.09, "word": " if", "probability": 0.86083984375}, {"start": 1661.09, "end": 1661.25, "word": " the", "probability": 0.86865234375}, {"start": 1661.25, "end": 1661.53, "word": " result", "probability": 0.93603515625}, {"start": 1661.53, "end": 1661.73, "word": " is", "probability": 0.83349609375}, {"start": 1661.73, "end": 1661.83, "word": " a", "probability": 0.70166015625}, {"start": 1661.83, "end": 1661.99, "word": " whole", "probability": 0.9140625}, {"start": 1661.99, "end": 1662.25, "word": " number,", "probability": 0.94384765625}, {"start": 1662.35, "end": 1662.51, "word": " then", "probability": 0.8291015625}, {"start": 1662.51, "end": 1662.65, "word": " it", "probability": 0.935546875}, {"start": 1662.65, "end": 1662.85, "word": " is", "probability": 0.9296875}, {"start": 1662.85, "end": 1663.01, "word": " the", "probability": 0.4345703125}, {"start": 1663.01, "end": 1663.43, "word": " rank", "probability": 0.96240234375}, {"start": 1663.43, "end": 1663.91, "word": " position", "probability": 0.95166015625}, {"start": 1663.91, "end": 1664.37, "word": " used.", "probability": 0.34033203125}, {"start": 1665.95, "end": 1666.69, "word": " Now,", "probability": 0.94287109375}, {"start": 1667.41, "end": 1667.77, "word": " if", "probability": 0.9501953125}, {"start": 1667.77, "end": 1667.97, "word": " the", "probability": 0.912109375}, {"start": 1667.97, "end": 1668.35, "word": " result", "probability": 0.958984375}, {"start": 1668.35, "end": 1668.61, "word": " is", "probability": 0.94970703125}, {"start": 1668.61, "end": 1668.79, "word": " a", "probability": 0.9580078125}, {"start": 1668.79, "end": 1669.33, "word": " fractional", "probability": 0.780029296875}, {"start": 1669.33, "end": 1669.77, "word": " half,", "probability": 0.8095703125}], "temperature": 1.0}, {"id": 63, "seek": 169895, "start": 1670.29, "end": 1698.95, "text": " I mean if the right position is 2.5, 3.5, 4.5. In this case, average the two corresponding data values. For example, if the right position is 2.5. So the rank position is 2.5. So take the average of the corresponding values for the rank 2 and 3. So look at the value.", "tokens": [286, 914, 498, 264, 558, 2535, 307, 568, 13, 20, 11, 805, 13, 20, 11, 1017, 13, 20, 13, 682, 341, 1389, 11, 4274, 264, 732, 11760, 1412, 4190, 13, 1171, 1365, 11, 498, 264, 558, 2535, 307, 568, 13, 20, 13, 407, 264, 6181, 2535, 307, 568, 13, 20, 13, 407, 747, 264, 4274, 295, 264, 11760, 4190, 337, 264, 6181, 568, 293, 805, 13, 407, 574, 412, 264, 2158, 13], "avg_logprob": -0.1491866405696085, "compression_ratio": 1.7748344370860927, "no_speech_prob": 0.0, "words": [{"start": 1670.29, "end": 1670.53, "word": " I", "probability": 0.7705078125}, {"start": 1670.53, "end": 1670.77, "word": " mean", "probability": 0.9599609375}, {"start": 1670.77, "end": 1671.57, "word": " if", "probability": 0.5234375}, {"start": 1671.57, "end": 1671.71, "word": " the", "probability": 0.900390625}, {"start": 1671.71, "end": 1671.87, "word": " right", "probability": 0.498046875}, {"start": 1671.87, "end": 1672.25, "word": " position", "probability": 0.9404296875}, {"start": 1672.25, "end": 1672.51, "word": " is", "probability": 0.93896484375}, {"start": 1672.51, "end": 1672.67, "word": " 2", "probability": 0.88623046875}, {"start": 1672.67, "end": 1673.17, "word": ".5,", "probability": 0.985595703125}, {"start": 1673.33, "end": 1673.69, "word": " 3", "probability": 0.98193359375}, {"start": 1673.69, "end": 1674.25, "word": ".5,", "probability": 0.99853515625}, {"start": 1674.43, "end": 1674.63, "word": " 4", "probability": 0.94384765625}, {"start": 1674.63, "end": 1675.21, "word": ".5.", "probability": 0.9990234375}, {"start": 1675.71, "end": 1676.07, "word": " In", "probability": 0.935546875}, {"start": 1676.07, "end": 1676.27, "word": " this", "probability": 0.947265625}, {"start": 1676.27, "end": 1676.71, "word": " case,", "probability": 0.91552734375}, {"start": 1677.87, "end": 1678.23, "word": " average", "probability": 0.7783203125}, {"start": 1678.23, "end": 1678.65, "word": " the", "probability": 0.90087890625}, {"start": 1678.65, "end": 1678.83, "word": " two", "probability": 0.89599609375}, {"start": 1678.83, "end": 1679.43, "word": " corresponding", "probability": 0.8310546875}, {"start": 1679.43, "end": 1679.75, "word": " data", "probability": 0.93505859375}, {"start": 1679.75, "end": 1680.23, "word": " values.", "probability": 0.962890625}, {"start": 1680.37, "end": 1680.55, "word": " For", "probability": 0.9541015625}, {"start": 1680.55, "end": 1680.91, "word": " example,", "probability": 0.974609375}, {"start": 1681.55, "end": 1681.85, "word": " if", "probability": 0.9541015625}, {"start": 1681.85, "end": 1682.05, "word": " the", "probability": 0.91748046875}, {"start": 1682.05, "end": 1682.21, "word": " right", "probability": 0.89306640625}, {"start": 1682.21, "end": 1682.73, "word": " position", "probability": 0.9462890625}, {"start": 1682.73, "end": 1684.01, "word": " is", "probability": 0.94287109375}, {"start": 1684.01, "end": 1684.19, "word": " 2", "probability": 0.9931640625}, {"start": 1684.19, "end": 1684.73, "word": ".5.", "probability": 0.998779296875}, {"start": 1686.77, "end": 1687.41, "word": " So", "probability": 0.939453125}, {"start": 1687.41, "end": 1688.53, "word": " the", "probability": 0.3779296875}, {"start": 1688.53, "end": 1688.83, "word": " rank", "probability": 0.66943359375}, {"start": 1688.83, "end": 1689.77, "word": " position", "probability": 0.8291015625}, {"start": 1689.77, "end": 1690.01, "word": " is", "probability": 0.89013671875}, {"start": 1690.01, "end": 1690.17, "word": " 2", "probability": 0.9541015625}, {"start": 1690.17, "end": 1690.67, "word": ".5.", "probability": 0.993896484375}, {"start": 1691.23, "end": 1691.47, "word": " So", "probability": 0.912109375}, {"start": 1691.47, "end": 1691.73, "word": " take", "probability": 0.83349609375}, {"start": 1691.73, "end": 1691.91, "word": " the", "probability": 0.9189453125}, {"start": 1691.91, "end": 1692.33, "word": " average", "probability": 0.7939453125}, {"start": 1692.33, "end": 1692.59, "word": " of", "probability": 0.953125}, {"start": 1692.59, "end": 1692.79, "word": " the", "probability": 0.91357421875}, {"start": 1692.79, "end": 1693.21, "word": " corresponding", "probability": 0.75634765625}, {"start": 1693.21, "end": 1693.79, "word": " values", "probability": 0.96240234375}, {"start": 1693.79, "end": 1694.07, "word": " for", "probability": 0.9423828125}, {"start": 1694.07, "end": 1694.27, "word": " the", "probability": 0.83447265625}, {"start": 1694.27, "end": 1694.55, "word": " rank", "probability": 0.93701171875}, {"start": 1694.55, "end": 1695.07, "word": " 2", "probability": 0.56591796875}, {"start": 1695.07, "end": 1695.39, "word": " and", "probability": 0.9208984375}, {"start": 1695.39, "end": 1695.63, "word": " 3.", "probability": 0.96875}, {"start": 1697.45, "end": 1697.77, "word": " So", "probability": 0.94677734375}, {"start": 1697.77, "end": 1697.97, "word": " look", "probability": 0.8974609375}, {"start": 1697.97, "end": 1698.25, "word": " at", "probability": 0.95947265625}, {"start": 1698.25, "end": 1698.57, "word": " the", "probability": 0.62109375}, {"start": 1698.57, "end": 1698.95, "word": " value.", "probability": 0.974609375}], "temperature": 1.0}, {"id": 64, "seek": 172532, "start": 1699.28, "end": 1725.32, "text": " at rank 2, value at rank 3, then take the average of the corresponding values. That if the rank position is fractional. So if the result is whole number, just take it as it is. If it is a fractional half, take the corresponding data values and take the average of these two values.", "tokens": [412, 6181, 568, 11, 2158, 412, 6181, 805, 11, 550, 747, 264, 4274, 295, 264, 11760, 4190, 13, 663, 498, 264, 6181, 2535, 307, 17948, 1966, 13, 407, 498, 264, 1874, 307, 1379, 1230, 11, 445, 747, 309, 382, 309, 307, 13, 759, 309, 307, 257, 17948, 1966, 1922, 11, 747, 264, 11760, 1412, 4190, 293, 747, 264, 4274, 295, 613, 732, 4190, 13], "avg_logprob": -0.18461538461538463, "compression_ratio": 1.7961783439490446, "no_speech_prob": 0.0, "words": [{"start": 1699.28, "end": 1699.58, "word": " at", "probability": 0.2042236328125}, {"start": 1699.58, "end": 1699.82, "word": " rank", "probability": 0.9248046875}, {"start": 1699.82, "end": 1700.1, "word": " 2,", "probability": 0.44873046875}, {"start": 1701.38, "end": 1702.22, "word": " value", "probability": 0.9013671875}, {"start": 1702.22, "end": 1702.46, "word": " at", "probability": 0.92041015625}, {"start": 1702.46, "end": 1702.74, "word": " rank", "probability": 0.94287109375}, {"start": 1702.74, "end": 1703.26, "word": " 3,", "probability": 0.96044921875}, {"start": 1703.5, "end": 1703.8, "word": " then", "probability": 0.8076171875}, {"start": 1703.8, "end": 1704.1, "word": " take", "probability": 0.85302734375}, {"start": 1704.1, "end": 1704.28, "word": " the", "probability": 0.91796875}, {"start": 1704.28, "end": 1704.74, "word": " average", "probability": 0.80078125}, {"start": 1704.74, "end": 1705.52, "word": " of", "probability": 0.96142578125}, {"start": 1705.52, "end": 1705.96, "word": " the", "probability": 0.91552734375}, {"start": 1705.96, "end": 1706.44, "word": " corresponding", "probability": 0.81396484375}, {"start": 1706.44, "end": 1707.34, "word": " values.", "probability": 0.9267578125}, {"start": 1707.94, "end": 1708.36, "word": " That", "probability": 0.74169921875}, {"start": 1708.36, "end": 1708.72, "word": " if", "probability": 0.63232421875}, {"start": 1708.72, "end": 1709.14, "word": " the", "probability": 0.8916015625}, {"start": 1709.14, "end": 1709.3, "word": " rank", "probability": 0.9619140625}, {"start": 1709.3, "end": 1709.72, "word": " position", "probability": 0.93408203125}, {"start": 1709.72, "end": 1710.14, "word": " is", "probability": 0.943359375}, {"start": 1710.14, "end": 1711.28, "word": " fractional.", "probability": 0.640625}, {"start": 1714.38, "end": 1714.98, "word": " So", "probability": 0.88525390625}, {"start": 1714.98, "end": 1715.28, "word": " if", "probability": 0.77001953125}, {"start": 1715.28, "end": 1715.9, "word": " the", "probability": 0.888671875}, {"start": 1715.9, "end": 1716.24, "word": " result", "probability": 0.92822265625}, {"start": 1716.24, "end": 1716.44, "word": " is", "probability": 0.8857421875}, {"start": 1716.44, "end": 1716.62, "word": " whole", "probability": 0.55126953125}, {"start": 1716.62, "end": 1716.9, "word": " number,", "probability": 0.94873046875}, {"start": 1717.02, "end": 1717.22, "word": " just", "probability": 0.904296875}, {"start": 1717.22, "end": 1717.52, "word": " take", "probability": 0.884765625}, {"start": 1717.52, "end": 1717.7, "word": " it", "probability": 0.943359375}, {"start": 1717.7, "end": 1717.9, "word": " as", "probability": 0.9619140625}, {"start": 1717.9, "end": 1718.06, "word": " it", "probability": 0.9482421875}, {"start": 1718.06, "end": 1718.3, "word": " is.", "probability": 0.9423828125}, {"start": 1718.74, "end": 1719.34, "word": " If", "probability": 0.93505859375}, {"start": 1719.34, "end": 1719.48, "word": " it", "probability": 0.9248046875}, {"start": 1719.48, "end": 1719.58, "word": " is", "probability": 0.91357421875}, {"start": 1719.58, "end": 1719.68, "word": " a", "probability": 0.81591796875}, {"start": 1719.68, "end": 1720.14, "word": " fractional", "probability": 0.8662109375}, {"start": 1720.14, "end": 1720.52, "word": " half,", "probability": 0.72412109375}, {"start": 1720.76, "end": 1720.96, "word": " take", "probability": 0.87353515625}, {"start": 1720.96, "end": 1721.16, "word": " the", "probability": 0.861328125}, {"start": 1721.16, "end": 1721.7, "word": " corresponding", "probability": 0.83642578125}, {"start": 1721.7, "end": 1723.06, "word": " data", "probability": 0.9482421875}, {"start": 1723.06, "end": 1723.52, "word": " values", "probability": 0.96435546875}, {"start": 1723.52, "end": 1723.72, "word": " and", "probability": 0.8134765625}, {"start": 1723.72, "end": 1723.86, "word": " take", "probability": 0.8603515625}, {"start": 1723.86, "end": 1723.98, "word": " the", "probability": 0.89697265625}, {"start": 1723.98, "end": 1724.28, "word": " average", "probability": 0.814453125}, {"start": 1724.28, "end": 1724.46, "word": " of", "probability": 0.95849609375}, {"start": 1724.46, "end": 1724.66, "word": " these", "probability": 0.84814453125}, {"start": 1724.66, "end": 1724.86, "word": " two", "probability": 0.90478515625}, {"start": 1724.86, "end": 1725.32, "word": " values.", "probability": 0.9677734375}], "temperature": 1.0}, {"id": 65, "seek": 175351, "start": 1726.77, "end": 1753.51, "text": " Now, if the result is not a whole number or a fraction of it. For example, suppose the location is 2.1. So the position is 2, just round, up to the nearest integer. So that's 2. What's about if the position rank is 2.6? Just rank up to 3. So that's 3.", "tokens": [823, 11, 498, 264, 1874, 307, 406, 257, 1379, 1230, 420, 257, 14135, 295, 309, 13, 1171, 1365, 11, 7297, 264, 4914, 307, 568, 13, 16, 13, 407, 264, 2535, 307, 568, 11, 445, 3098, 11, 493, 281, 264, 23831, 24922, 13, 407, 300, 311, 568, 13, 708, 311, 466, 498, 264, 2535, 6181, 307, 568, 13, 21, 30, 1449, 6181, 493, 281, 805, 13, 407, 300, 311, 805, 13], "avg_logprob": -0.20202464410956478, "compression_ratio": 1.5460122699386503, "no_speech_prob": 0.0, "words": [{"start": 1726.77, "end": 1727.21, "word": " Now,", "probability": 0.8544921875}, {"start": 1727.59, "end": 1727.91, "word": " if", "probability": 0.94921875}, {"start": 1727.91, "end": 1728.07, "word": " the", "probability": 0.8974609375}, {"start": 1728.07, "end": 1728.43, "word": " result", "probability": 0.9404296875}, {"start": 1728.43, "end": 1728.71, "word": " is", "probability": 0.94873046875}, {"start": 1728.71, "end": 1728.95, "word": " not", "probability": 0.94580078125}, {"start": 1728.95, "end": 1729.11, "word": " a", "probability": 0.869140625}, {"start": 1729.11, "end": 1729.25, "word": " whole", "probability": 0.92919921875}, {"start": 1729.25, "end": 1729.63, "word": " number", "probability": 0.9501953125}, {"start": 1729.63, "end": 1731.53, "word": " or", "probability": 0.59423828125}, {"start": 1731.53, "end": 1731.95, "word": " a", "probability": 0.34716796875}, {"start": 1731.95, "end": 1732.29, "word": " fraction", "probability": 0.64599609375}, {"start": 1732.29, "end": 1732.53, "word": " of", "probability": 0.47216796875}, {"start": 1732.53, "end": 1732.71, "word": " it.", "probability": 0.450927734375}, {"start": 1733.27, "end": 1733.61, "word": " For", "probability": 0.9501953125}, {"start": 1733.61, "end": 1733.93, "word": " example,", "probability": 0.9716796875}, {"start": 1734.07, "end": 1734.49, "word": " suppose", "probability": 0.8466796875}, {"start": 1734.49, "end": 1735.01, "word": " the", "probability": 0.8701171875}, {"start": 1735.01, "end": 1735.43, "word": " location", "probability": 0.94287109375}, {"start": 1735.43, "end": 1735.73, "word": " is", "probability": 0.94580078125}, {"start": 1735.73, "end": 1735.93, "word": " 2", "probability": 0.9091796875}, {"start": 1735.93, "end": 1736.39, "word": ".1.", "probability": 0.7646484375}, {"start": 1738.33, "end": 1739.01, "word": " So", "probability": 0.9384765625}, {"start": 1739.01, "end": 1739.31, "word": " the", "probability": 0.5546875}, {"start": 1739.31, "end": 1739.69, "word": " position", "probability": 0.95361328125}, {"start": 1739.69, "end": 1740.95, "word": " is", "probability": 0.947265625}, {"start": 1740.95, "end": 1741.91, "word": " 2,", "probability": 0.73779296875}, {"start": 1742.39, "end": 1742.85, "word": " just", "probability": 0.88818359375}, {"start": 1742.85, "end": 1743.23, "word": " round,", "probability": 0.6044921875}, {"start": 1743.79, "end": 1744.27, "word": " up", "probability": 0.9619140625}, {"start": 1744.27, "end": 1744.41, "word": " to", "probability": 0.9658203125}, {"start": 1744.41, "end": 1744.59, "word": " the", "probability": 0.91259765625}, {"start": 1744.59, "end": 1744.95, "word": " nearest", "probability": 0.85888671875}, {"start": 1744.95, "end": 1745.51, "word": " integer.", "probability": 0.85302734375}, {"start": 1746.13, "end": 1746.27, "word": " So", "probability": 0.92626953125}, {"start": 1746.27, "end": 1746.55, "word": " that's", "probability": 0.91455078125}, {"start": 1746.55, "end": 1746.73, "word": " 2.", "probability": 0.8251953125}, {"start": 1747.31, "end": 1747.99, "word": " What's", "probability": 0.880615234375}, {"start": 1747.99, "end": 1748.27, "word": " about", "probability": 0.88525390625}, {"start": 1748.27, "end": 1748.49, "word": " if", "probability": 0.9267578125}, {"start": 1748.49, "end": 1748.61, "word": " the", "probability": 0.9111328125}, {"start": 1748.61, "end": 1748.99, "word": " position", "probability": 0.95458984375}, {"start": 1748.99, "end": 1749.29, "word": " rank", "probability": 0.92138671875}, {"start": 1749.29, "end": 1749.59, "word": " is", "probability": 0.94482421875}, {"start": 1749.59, "end": 1750.17, "word": " 2", "probability": 0.96923828125}, {"start": 1750.17, "end": 1750.75, "word": ".6?", "probability": 0.997802734375}, {"start": 1751.15, "end": 1751.35, "word": " Just", "probability": 0.69384765625}, {"start": 1751.35, "end": 1751.67, "word": " rank", "probability": 0.62353515625}, {"start": 1751.67, "end": 1752.11, "word": " up", "probability": 0.7509765625}, {"start": 1752.11, "end": 1752.25, "word": " to", "probability": 0.9345703125}, {"start": 1752.25, "end": 1752.43, "word": " 3.", "probability": 0.7099609375}, {"start": 1752.67, "end": 1752.99, "word": " So", "probability": 0.9521484375}, {"start": 1752.99, "end": 1753.31, "word": " that's", "probability": 0.9521484375}, {"start": 1753.31, "end": 1753.51, "word": " 3.", "probability": 0.9677734375}], "temperature": 1.0}, {"id": 66, "seek": 178358, "start": 1754.34, "end": 1783.58, "text": " So that's the rule you have to follow if the result is a number, a whole number, I mean integer, fraction of half, or not real number, I mean, not whole number, or fraction of half. Look at this specific example. Suppose we have this data. This is ordered array, 11, 12, up to 22. And let's see how can we compute", "tokens": [407, 300, 311, 264, 4978, 291, 362, 281, 1524, 498, 264, 1874, 307, 257, 1230, 11, 257, 1379, 1230, 11, 286, 914, 24922, 11, 14135, 295, 1922, 11, 420, 406, 957, 1230, 11, 286, 914, 11, 406, 1379, 1230, 11, 420, 14135, 295, 1922, 13, 2053, 412, 341, 2685, 1365, 13, 21360, 321, 362, 341, 1412, 13, 639, 307, 8866, 10225, 11, 2975, 11, 2272, 11, 493, 281, 5853, 13, 400, 718, 311, 536, 577, 393, 321, 14722], "avg_logprob": -0.2088607617571384, "compression_ratio": 1.5858585858585859, "no_speech_prob": 0.0, "words": [{"start": 1754.34, "end": 1754.62, "word": " So", "probability": 0.5341796875}, {"start": 1754.62, "end": 1755.06, "word": " that's", "probability": 0.8017578125}, {"start": 1755.06, "end": 1755.46, "word": " the", "probability": 0.9052734375}, {"start": 1755.46, "end": 1755.88, "word": " rule", "probability": 0.892578125}, {"start": 1755.88, "end": 1756.06, "word": " you", "probability": 0.9033203125}, {"start": 1756.06, "end": 1756.24, "word": " have", "probability": 0.939453125}, {"start": 1756.24, "end": 1756.4, "word": " to", "probability": 0.96826171875}, {"start": 1756.4, "end": 1756.7, "word": " follow", "probability": 0.90087890625}, {"start": 1756.7, "end": 1757.98, "word": " if", "probability": 0.705078125}, {"start": 1757.98, "end": 1758.4, "word": " the", "probability": 0.89697265625}, {"start": 1758.4, "end": 1759.36, "word": " result", "probability": 0.9130859375}, {"start": 1759.36, "end": 1759.92, "word": " is", "probability": 0.94970703125}, {"start": 1759.92, "end": 1760.22, "word": " a", "probability": 0.95751953125}, {"start": 1760.22, "end": 1760.52, "word": " number,", "probability": 0.869140625}, {"start": 1761.02, "end": 1761.12, "word": " a", "probability": 0.9091796875}, {"start": 1761.12, "end": 1761.28, "word": " whole", "probability": 0.923828125}, {"start": 1761.28, "end": 1761.64, "word": " number,", "probability": 0.9423828125}, {"start": 1762.04, "end": 1762.28, "word": " I", "probability": 0.85498046875}, {"start": 1762.28, "end": 1762.38, "word": " mean", "probability": 0.9697265625}, {"start": 1762.38, "end": 1762.78, "word": " integer,", "probability": 0.79296875}, {"start": 1763.62, "end": 1764.0, "word": " fraction", "probability": 0.69189453125}, {"start": 1764.0, "end": 1764.16, "word": " of", "probability": 0.438720703125}, {"start": 1764.16, "end": 1764.58, "word": " half,", "probability": 0.515625}, {"start": 1765.16, "end": 1765.4, "word": " or", "probability": 0.83935546875}, {"start": 1765.4, "end": 1767.2, "word": " not", "probability": 0.6318359375}, {"start": 1767.2, "end": 1767.48, "word": " real", "probability": 0.40185546875}, {"start": 1767.48, "end": 1767.72, "word": " number,", "probability": 0.93603515625}, {"start": 1767.82, "end": 1767.82, "word": " I", "probability": 0.81689453125}, {"start": 1767.82, "end": 1768.0, "word": " mean,", "probability": 0.96435546875}, {"start": 1768.32, "end": 1769.02, "word": " not", "probability": 0.6181640625}, {"start": 1769.02, "end": 1769.3, "word": " whole", "probability": 0.82470703125}, {"start": 1769.3, "end": 1769.74, "word": " number,", "probability": 0.94091796875}, {"start": 1770.68, "end": 1770.92, "word": " or", "probability": 0.94384765625}, {"start": 1770.92, "end": 1771.5, "word": " fraction", "probability": 0.69140625}, {"start": 1771.5, "end": 1771.68, "word": " of", "probability": 0.85302734375}, {"start": 1771.68, "end": 1771.88, "word": " half.", "probability": 0.85400390625}, {"start": 1773.22, "end": 1773.7, "word": " Look", "probability": 0.84228515625}, {"start": 1773.7, "end": 1773.82, "word": " at", "probability": 0.966796875}, {"start": 1773.82, "end": 1774.0, "word": " this", "probability": 0.9423828125}, {"start": 1774.0, "end": 1774.48, "word": " specific", "probability": 0.900390625}, {"start": 1774.48, "end": 1774.9, "word": " example.", "probability": 0.9716796875}, {"start": 1775.1, "end": 1775.34, "word": " Suppose", "probability": 0.76318359375}, {"start": 1775.34, "end": 1775.54, "word": " we", "probability": 0.93310546875}, {"start": 1775.54, "end": 1775.7, "word": " have", "probability": 0.94287109375}, {"start": 1775.7, "end": 1775.98, "word": " this", "probability": 0.94384765625}, {"start": 1775.98, "end": 1776.36, "word": " data.", "probability": 0.94677734375}, {"start": 1777.02, "end": 1777.4, "word": " This", "probability": 0.8076171875}, {"start": 1777.4, "end": 1777.52, "word": " is", "probability": 0.95068359375}, {"start": 1777.52, "end": 1777.92, "word": " ordered", "probability": 0.71337890625}, {"start": 1777.92, "end": 1778.36, "word": " array,", "probability": 0.89453125}, {"start": 1779.06, "end": 1779.36, "word": " 11,", "probability": 0.7890625}, {"start": 1779.5, "end": 1779.82, "word": " 12,", "probability": 0.931640625}, {"start": 1779.94, "end": 1780.18, "word": " up", "probability": 0.97412109375}, {"start": 1780.18, "end": 1780.38, "word": " to", "probability": 0.97216796875}, {"start": 1780.38, "end": 1780.96, "word": " 22.", "probability": 0.95947265625}, {"start": 1781.72, "end": 1782.2, "word": " And", "probability": 0.9375}, {"start": 1782.2, "end": 1782.48, "word": " let's", "probability": 0.966064453125}, {"start": 1782.48, "end": 1782.62, "word": " see", "probability": 0.916015625}, {"start": 1782.62, "end": 1782.78, "word": " how", "probability": 0.90185546875}, {"start": 1782.78, "end": 1782.98, "word": " can", "probability": 0.87353515625}, {"start": 1782.98, "end": 1783.14, "word": " we", "probability": 0.95458984375}, {"start": 1783.14, "end": 1783.58, "word": " compute", "probability": 0.8984375}], "temperature": 1.0}, {"id": 67, "seek": 181294, "start": 1785.12, "end": 1812.94, "text": " These measures. Look carefully here. First, let's compute the median. The median and the value in the middle. How many values we have? There are nine values. So the middle is number five. One, two, three, four, five. So 16.", "tokens": [1981, 8000, 13, 2053, 7500, 510, 13, 2386, 11, 718, 311, 14722, 264, 26779, 13, 440, 26779, 293, 264, 2158, 294, 264, 2808, 13, 1012, 867, 4190, 321, 362, 30, 821, 366, 4949, 4190, 13, 407, 264, 2808, 307, 1230, 1732, 13, 1485, 11, 732, 11, 1045, 11, 1451, 11, 1732, 13, 407, 3165, 13], "avg_logprob": -0.22656250345919812, "compression_ratio": 1.4088050314465408, "no_speech_prob": 0.0, "words": [{"start": 1785.1200000000001, "end": 1785.68, "word": " These", "probability": 0.2076416015625}, {"start": 1785.68, "end": 1786.24, "word": " measures.", "probability": 0.70556640625}, {"start": 1790.08, "end": 1790.64, "word": " Look", "probability": 0.71337890625}, {"start": 1790.64, "end": 1791.12, "word": " carefully", "probability": 0.8134765625}, {"start": 1791.12, "end": 1791.7, "word": " here.", "probability": 0.79833984375}, {"start": 1795.4, "end": 1795.96, "word": " First,", "probability": 0.84765625}, {"start": 1796.12, "end": 1796.4, "word": " let's", "probability": 0.931884765625}, {"start": 1796.4, "end": 1796.86, "word": " compute", "probability": 0.83544921875}, {"start": 1796.86, "end": 1798.24, "word": " the", "probability": 0.83203125}, {"start": 1798.24, "end": 1798.48, "word": " median.", "probability": 0.7939453125}, {"start": 1798.68, "end": 1798.9, "word": " The", "probability": 0.87841796875}, {"start": 1798.9, "end": 1799.1, "word": " median", "probability": 0.93310546875}, {"start": 1799.1, "end": 1799.26, "word": " and", "probability": 0.328369140625}, {"start": 1799.26, "end": 1799.34, "word": " the", "probability": 0.84912109375}, {"start": 1799.34, "end": 1799.52, "word": " value", "probability": 0.9638671875}, {"start": 1799.52, "end": 1799.64, "word": " in", "probability": 0.77734375}, {"start": 1799.64, "end": 1799.74, "word": " the", "probability": 0.9248046875}, {"start": 1799.74, "end": 1799.92, "word": " middle.", "probability": 0.93701171875}, {"start": 1800.82, "end": 1801.32, "word": " How", "probability": 0.95654296875}, {"start": 1801.32, "end": 1801.56, "word": " many", "probability": 0.900390625}, {"start": 1801.56, "end": 1801.82, "word": " values", "probability": 0.9228515625}, {"start": 1801.82, "end": 1802.02, "word": " we", "probability": 0.69970703125}, {"start": 1802.02, "end": 1802.36, "word": " have?", "probability": 0.94482421875}, {"start": 1802.8, "end": 1803.28, "word": " There", "probability": 0.78466796875}, {"start": 1803.28, "end": 1803.46, "word": " are", "probability": 0.947265625}, {"start": 1803.46, "end": 1803.8, "word": " nine", "probability": 0.75244140625}, {"start": 1803.8, "end": 1804.16, "word": " values.", "probability": 0.958984375}, {"start": 1805.88, "end": 1806.44, "word": " So", "probability": 0.80615234375}, {"start": 1806.44, "end": 1806.64, "word": " the", "probability": 0.7060546875}, {"start": 1806.64, "end": 1806.86, "word": " middle", "probability": 0.93310546875}, {"start": 1806.86, "end": 1807.32, "word": " is", "probability": 0.94677734375}, {"start": 1807.32, "end": 1808.92, "word": " number", "probability": 0.85498046875}, {"start": 1808.92, "end": 1809.34, "word": " five.", "probability": 0.6591796875}, {"start": 1809.84, "end": 1810.4, "word": " One,", "probability": 0.487060546875}, {"start": 1810.52, "end": 1810.6, "word": " two,", "probability": 0.94091796875}, {"start": 1810.7, "end": 1810.92, "word": " three,", "probability": 0.9423828125}, {"start": 1811.0, "end": 1811.24, "word": " four,", "probability": 0.94677734375}, {"start": 1811.36, "end": 1811.64, "word": " five.", "probability": 0.90673828125}, {"start": 1811.9, "end": 1812.28, "word": " So", "probability": 0.93212890625}, {"start": 1812.28, "end": 1812.94, "word": " 16.", "probability": 0.445556640625}], "temperature": 1.0}, {"id": 68, "seek": 184317, "start": 1814.83, "end": 1843.17, "text": " This value is the median. Now look at the values below the median. There are 4 and 4 below and above the median. Now let's see how can we compute Q1. The position of Q1, as we mentioned, is N plus 1 divided by 4. So N is 9 plus 1 divided by 4 is 2.5.", "tokens": [639, 2158, 307, 264, 26779, 13, 823, 574, 412, 264, 4190, 2507, 264, 26779, 13, 821, 366, 1017, 293, 1017, 2507, 293, 3673, 264, 26779, 13, 823, 718, 311, 536, 577, 393, 321, 14722, 1249, 16, 13, 440, 2535, 295, 1249, 16, 11, 382, 321, 2835, 11, 307, 426, 1804, 502, 6666, 538, 1017, 13, 407, 426, 307, 1722, 1804, 502, 6666, 538, 1017, 307, 568, 13, 20, 13], "avg_logprob": -0.1900669685431889, "compression_ratio": 1.5886075949367089, "no_speech_prob": 0.0, "words": [{"start": 1814.8300000000002, "end": 1815.39, "word": " This", "probability": 0.25146484375}, {"start": 1815.39, "end": 1815.87, "word": " value", "probability": 0.9560546875}, {"start": 1815.87, "end": 1817.89, "word": " is", "probability": 0.6484375}, {"start": 1817.89, "end": 1818.05, "word": " the", "probability": 0.84619140625}, {"start": 1818.05, "end": 1818.21, "word": " median.", "probability": 0.66357421875}, {"start": 1819.53, "end": 1819.81, "word": " Now", "probability": 0.87890625}, {"start": 1819.81, "end": 1820.41, "word": " look", "probability": 0.5673828125}, {"start": 1820.41, "end": 1820.67, "word": " at", "probability": 0.9609375}, {"start": 1820.67, "end": 1820.85, "word": " the", "probability": 0.9111328125}, {"start": 1820.85, "end": 1821.49, "word": " values", "probability": 0.9453125}, {"start": 1821.49, "end": 1823.01, "word": " below", "probability": 0.857421875}, {"start": 1823.01, "end": 1823.25, "word": " the", "probability": 0.8447265625}, {"start": 1823.25, "end": 1823.45, "word": " median.", "probability": 0.92822265625}, {"start": 1825.25, "end": 1825.59, "word": " There", "probability": 0.473388671875}, {"start": 1825.59, "end": 1825.79, "word": " are", "probability": 0.93603515625}, {"start": 1825.79, "end": 1826.09, "word": " 4", "probability": 0.462890625}, {"start": 1826.09, "end": 1827.19, "word": " and", "probability": 0.83544921875}, {"start": 1827.19, "end": 1827.59, "word": " 4", "probability": 0.9599609375}, {"start": 1827.59, "end": 1828.23, "word": " below", "probability": 0.765625}, {"start": 1828.23, "end": 1828.47, "word": " and", "probability": 0.931640625}, {"start": 1828.47, "end": 1828.79, "word": " above", "probability": 0.962890625}, {"start": 1828.79, "end": 1829.65, "word": " the", "probability": 0.8818359375}, {"start": 1829.65, "end": 1829.89, "word": " median.", "probability": 0.93701171875}, {"start": 1830.91, "end": 1831.19, "word": " Now", "probability": 0.78173828125}, {"start": 1831.19, "end": 1831.41, "word": " let's", "probability": 0.87451171875}, {"start": 1831.41, "end": 1831.53, "word": " see", "probability": 0.87158203125}, {"start": 1831.53, "end": 1831.63, "word": " how", "probability": 0.9052734375}, {"start": 1831.63, "end": 1831.81, "word": " can", "probability": 0.61376953125}, {"start": 1831.81, "end": 1831.95, "word": " we", "probability": 0.9326171875}, {"start": 1831.95, "end": 1832.41, "word": " compute", "probability": 0.9296875}, {"start": 1832.41, "end": 1833.11, "word": " Q1.", "probability": 0.900146484375}, {"start": 1834.51, "end": 1834.97, "word": " The", "probability": 0.87939453125}, {"start": 1834.97, "end": 1835.33, "word": " position", "probability": 0.77880859375}, {"start": 1835.33, "end": 1835.53, "word": " of", "probability": 0.9638671875}, {"start": 1835.53, "end": 1836.07, "word": " Q1,", "probability": 0.78076171875}, {"start": 1836.49, "end": 1837.01, "word": " as", "probability": 0.95703125}, {"start": 1837.01, "end": 1837.13, "word": " we", "probability": 0.9345703125}, {"start": 1837.13, "end": 1837.45, "word": " mentioned,", "probability": 0.818359375}, {"start": 1837.57, "end": 1837.67, "word": " is", "probability": 0.7705078125}, {"start": 1837.67, "end": 1837.79, "word": " N", "probability": 0.5400390625}, {"start": 1837.79, "end": 1838.05, "word": " plus", "probability": 0.783203125}, {"start": 1838.05, "end": 1838.25, "word": " 1", "probability": 0.826171875}, {"start": 1838.25, "end": 1838.49, "word": " divided", "probability": 0.79296875}, {"start": 1838.49, "end": 1838.71, "word": " by", "probability": 0.9638671875}, {"start": 1838.71, "end": 1839.03, "word": " 4.", "probability": 0.95361328125}, {"start": 1839.81, "end": 1840.05, "word": " So", "probability": 0.95263671875}, {"start": 1840.05, "end": 1840.25, "word": " N", "probability": 0.83203125}, {"start": 1840.25, "end": 1840.39, "word": " is", "probability": 0.94775390625}, {"start": 1840.39, "end": 1840.69, "word": " 9", "probability": 0.88671875}, {"start": 1840.69, "end": 1841.01, "word": " plus", "probability": 0.7041015625}, {"start": 1841.01, "end": 1841.23, "word": " 1", "probability": 0.95263671875}, {"start": 1841.23, "end": 1841.43, "word": " divided", "probability": 0.81640625}, {"start": 1841.43, "end": 1841.61, "word": " by", "probability": 0.95068359375}, {"start": 1841.61, "end": 1841.97, "word": " 4", "probability": 0.98974609375}, {"start": 1841.97, "end": 1842.47, "word": " is", "probability": 0.85205078125}, {"start": 1842.47, "end": 1842.63, "word": " 2", "probability": 0.990234375}, {"start": 1842.63, "end": 1843.17, "word": ".5.", "probability": 0.990234375}], "temperature": 1.0}, {"id": 69, "seek": 187411, "start": 1845.41, "end": 1874.11, "text": " 2.5 position, it means you have to take the average of the two corresponding values, 2 and 3. So 2 and 3, so 12 plus 13 divided by 2. That gives 12.5. So this is Q1. So Q1 is 12.5. Now what's about Q3?", "tokens": [568, 13, 20, 2535, 11, 309, 1355, 291, 362, 281, 747, 264, 4274, 295, 264, 732, 11760, 4190, 11, 568, 293, 805, 13, 407, 568, 293, 805, 11, 370, 2272, 1804, 3705, 6666, 538, 568, 13, 663, 2709, 2272, 13, 20, 13, 407, 341, 307, 1249, 16, 13, 407, 1249, 16, 307, 2272, 13, 20, 13, 823, 437, 311, 466, 1249, 18, 30], "avg_logprob": -0.1383056645281613, "compression_ratio": 1.3289473684210527, "no_speech_prob": 0.0, "words": [{"start": 1845.41, "end": 1845.63, "word": " 2", "probability": 0.49609375}, {"start": 1845.63, "end": 1846.19, "word": ".5", "probability": 0.979248046875}, {"start": 1846.19, "end": 1846.65, "word": " position,", "probability": 0.84130859375}, {"start": 1846.97, "end": 1847.09, "word": " it", "probability": 0.880859375}, {"start": 1847.09, "end": 1847.47, "word": " means", "probability": 0.92724609375}, {"start": 1847.47, "end": 1848.29, "word": " you", "probability": 0.90380859375}, {"start": 1848.29, "end": 1848.71, "word": " have", "probability": 0.939453125}, {"start": 1848.71, "end": 1849.57, "word": " to", "probability": 0.95654296875}, {"start": 1849.57, "end": 1849.91, "word": " take", "probability": 0.8828125}, {"start": 1849.91, "end": 1850.33, "word": " the", "probability": 0.919921875}, {"start": 1850.33, "end": 1851.07, "word": " average", "probability": 0.8251953125}, {"start": 1851.07, "end": 1851.91, "word": " of", "probability": 0.94677734375}, {"start": 1851.91, "end": 1852.07, "word": " the", "probability": 0.8125}, {"start": 1852.07, "end": 1852.27, "word": " two", "probability": 0.88232421875}, {"start": 1852.27, "end": 1852.75, "word": " corresponding", "probability": 0.8515625}, {"start": 1852.75, "end": 1853.43, "word": " values,", "probability": 0.9638671875}, {"start": 1853.93, "end": 1854.05, "word": " 2", "probability": 0.6494140625}, {"start": 1854.05, "end": 1854.19, "word": " and", "probability": 0.9375}, {"start": 1854.19, "end": 1854.49, "word": " 3.", "probability": 0.9765625}, {"start": 1855.13, "end": 1855.77, "word": " So", "probability": 0.9541015625}, {"start": 1855.77, "end": 1856.13, "word": " 2", "probability": 0.701171875}, {"start": 1856.13, "end": 1857.03, "word": " and", "probability": 0.89599609375}, {"start": 1857.03, "end": 1857.39, "word": " 3,", "probability": 0.99462890625}, {"start": 1857.77, "end": 1857.95, "word": " so", "probability": 0.9326171875}, {"start": 1857.95, "end": 1858.41, "word": " 12", "probability": 0.958984375}, {"start": 1858.41, "end": 1858.73, "word": " plus", "probability": 0.90087890625}, {"start": 1858.73, "end": 1859.15, "word": " 13", "probability": 0.9345703125}, {"start": 1859.15, "end": 1859.37, "word": " divided", "probability": 0.720703125}, {"start": 1859.37, "end": 1859.59, "word": " by", "probability": 0.97021484375}, {"start": 1859.59, "end": 1859.93, "word": " 2.", "probability": 0.96240234375}, {"start": 1860.25, "end": 1860.63, "word": " That", "probability": 0.890625}, {"start": 1860.63, "end": 1861.01, "word": " gives", "probability": 0.90478515625}, {"start": 1861.01, "end": 1861.89, "word": " 12", "probability": 0.96875}, {"start": 1861.89, "end": 1862.35, "word": ".5.", "probability": 0.996826171875}, {"start": 1862.69, "end": 1863.01, "word": " So", "probability": 0.9560546875}, {"start": 1863.01, "end": 1864.75, "word": " this", "probability": 0.82958984375}, {"start": 1864.75, "end": 1865.39, "word": " is", "probability": 0.94873046875}, {"start": 1865.39, "end": 1868.39, "word": " Q1.", "probability": 0.764404296875}, {"start": 1868.53, "end": 1868.71, "word": " So", "probability": 0.947265625}, {"start": 1868.71, "end": 1869.15, "word": " Q1", "probability": 0.98095703125}, {"start": 1869.15, "end": 1869.47, "word": " is", "probability": 0.9228515625}, {"start": 1869.47, "end": 1869.79, "word": " 12", "probability": 0.951171875}, {"start": 1869.79, "end": 1870.25, "word": ".5.", "probability": 0.997314453125}, {"start": 1872.69, "end": 1873.17, "word": " Now", "probability": 0.9462890625}, {"start": 1873.17, "end": 1873.41, "word": " what's", "probability": 0.769775390625}, {"start": 1873.41, "end": 1873.65, "word": " about", "probability": 0.916015625}, {"start": 1873.65, "end": 1874.11, "word": " Q3?", "probability": 0.9970703125}], "temperature": 1.0}, {"id": 70, "seek": 190489, "start": 1876.11, "end": 1904.89, "text": " The Q3, the rank position, Q1 was 2.5. So Q3 should be three times that value, because it's three times A plus 1 over 4. That means the rank position is 7.5. That means you have to take the average of the 7 and 8 position. 7 and 8 is 18.", "tokens": [440, 1249, 18, 11, 264, 6181, 2535, 11, 1249, 16, 390, 568, 13, 20, 13, 407, 1249, 18, 820, 312, 1045, 1413, 300, 2158, 11, 570, 309, 311, 1045, 1413, 316, 1804, 502, 670, 1017, 13, 663, 1355, 264, 6181, 2535, 307, 1614, 13, 20, 13, 663, 1355, 291, 362, 281, 747, 264, 4274, 295, 264, 1614, 293, 1649, 2535, 13, 1614, 293, 1649, 307, 2443, 13], "avg_logprob": -0.1938189290025655, "compression_ratio": 1.5354838709677419, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1876.11, "end": 1876.33, "word": " The", "probability": 0.2489013671875}, {"start": 1876.33, "end": 1876.83, "word": " Q3,", "probability": 0.824951171875}, {"start": 1877.93, "end": 1878.21, "word": " the", "probability": 0.89697265625}, {"start": 1878.21, "end": 1878.45, "word": " rank", "probability": 0.94921875}, {"start": 1878.45, "end": 1878.89, "word": " position,", "probability": 0.95556640625}, {"start": 1880.69, "end": 1881.41, "word": " Q1", "probability": 0.95068359375}, {"start": 1881.41, "end": 1881.77, "word": " was", "probability": 0.94189453125}, {"start": 1881.77, "end": 1882.07, "word": " 2", "probability": 0.82666015625}, {"start": 1882.07, "end": 1883.91, "word": ".5.", "probability": 0.87451171875}, {"start": 1884.35, "end": 1884.61, "word": " So", "probability": 0.96533203125}, {"start": 1884.61, "end": 1885.09, "word": " Q3", "probability": 0.94775390625}, {"start": 1885.09, "end": 1885.35, "word": " should", "probability": 0.96875}, {"start": 1885.35, "end": 1885.63, "word": " be", "probability": 0.80712890625}, {"start": 1885.63, "end": 1887.81, "word": " three", "probability": 0.7158203125}, {"start": 1887.81, "end": 1888.27, "word": " times", "probability": 0.8935546875}, {"start": 1888.27, "end": 1888.53, "word": " that", "probability": 0.89453125}, {"start": 1888.53, "end": 1888.95, "word": " value,", "probability": 0.97998046875}, {"start": 1889.09, "end": 1889.37, "word": " because", "probability": 0.84130859375}, {"start": 1889.37, "end": 1889.67, "word": " it's", "probability": 0.941650390625}, {"start": 1889.67, "end": 1890.07, "word": " three", "probability": 0.748046875}, {"start": 1890.07, "end": 1890.69, "word": " times", "probability": 0.923828125}, {"start": 1890.69, "end": 1892.13, "word": " A", "probability": 0.2022705078125}, {"start": 1892.13, "end": 1892.41, "word": " plus", "probability": 0.92578125}, {"start": 1892.41, "end": 1892.59, "word": " 1", "probability": 0.64404296875}, {"start": 1892.59, "end": 1892.77, "word": " over", "probability": 0.87060546875}, {"start": 1892.77, "end": 1893.11, "word": " 4.", "probability": 0.90234375}, {"start": 1893.23, "end": 1893.49, "word": " That", "probability": 0.90673828125}, {"start": 1893.49, "end": 1893.83, "word": " means", "probability": 0.93115234375}, {"start": 1893.83, "end": 1894.27, "word": " the", "probability": 0.8671875}, {"start": 1894.27, "end": 1894.47, "word": " rank", "probability": 0.9296875}, {"start": 1894.47, "end": 1894.83, "word": " position", "probability": 0.94287109375}, {"start": 1894.83, "end": 1895.13, "word": " is", "probability": 0.94091796875}, {"start": 1895.13, "end": 1895.49, "word": " 7", "probability": 0.99072265625}, {"start": 1895.49, "end": 1896.09, "word": ".5.", "probability": 0.998779296875}, {"start": 1896.59, "end": 1896.85, "word": " That", "probability": 0.90234375}, {"start": 1896.85, "end": 1897.07, "word": " means", "probability": 0.92822265625}, {"start": 1897.07, "end": 1897.21, "word": " you", "probability": 0.92333984375}, {"start": 1897.21, "end": 1897.35, "word": " have", "probability": 0.93896484375}, {"start": 1897.35, "end": 1897.47, "word": " to", "probability": 0.96142578125}, {"start": 1897.47, "end": 1897.69, "word": " take", "probability": 0.84375}, {"start": 1897.69, "end": 1897.91, "word": " the", "probability": 0.916015625}, {"start": 1897.91, "end": 1898.27, "word": " average", "probability": 0.814453125}, {"start": 1898.27, "end": 1898.61, "word": " of", "probability": 0.96435546875}, {"start": 1898.61, "end": 1898.91, "word": " the", "probability": 0.91796875}, {"start": 1898.91, "end": 1899.41, "word": " 7", "probability": 0.5634765625}, {"start": 1899.41, "end": 1900.29, "word": " and", "probability": 0.857421875}, {"start": 1900.29, "end": 1900.57, "word": " 8", "probability": 0.93603515625}, {"start": 1900.57, "end": 1901.01, "word": " position.", "probability": 0.5673828125}, {"start": 1901.61, "end": 1901.89, "word": " 7", "probability": 0.78759765625}, {"start": 1901.89, "end": 1902.09, "word": " and", "probability": 0.9345703125}, {"start": 1902.09, "end": 1902.35, "word": " 8", "probability": 0.998046875}, {"start": 1902.35, "end": 1902.79, "word": " is", "probability": 0.94384765625}, {"start": 1902.79, "end": 1904.89, "word": " 18.", "probability": 0.39013671875}], "temperature": 1.0}, {"id": 71, "seek": 193398, "start": 1905.88, "end": 1933.98, "text": " which is 19.5. So that's Q3, 19.5. So this is Q3. This value is Q1. And this value is? Now, Q2 is the center.", "tokens": [597, 307, 1294, 13, 20, 13, 407, 300, 311, 1249, 18, 11, 1294, 13, 20, 13, 407, 341, 307, 1249, 18, 13, 639, 2158, 307, 1249, 16, 13, 400, 341, 2158, 307, 30, 823, 11, 1249, 17, 307, 264, 3056, 13], "avg_logprob": -0.16713170068604605, "compression_ratio": 1.264367816091954, "no_speech_prob": 0.0, "words": [{"start": 1905.88, "end": 1906.26, "word": " which", "probability": 0.298583984375}, {"start": 1906.26, "end": 1907.82, "word": " is", "probability": 0.943359375}, {"start": 1907.82, "end": 1910.22, "word": " 19", "probability": 0.6953125}, {"start": 1910.22, "end": 1912.28, "word": ".5.", "probability": 0.94287109375}, {"start": 1912.38, "end": 1912.5, "word": " So", "probability": 0.861328125}, {"start": 1912.5, "end": 1912.96, "word": " that's", "probability": 0.844970703125}, {"start": 1912.96, "end": 1913.5, "word": " Q3,", "probability": 0.8583984375}, {"start": 1915.24, "end": 1916.16, "word": " 19", "probability": 0.9091796875}, {"start": 1916.16, "end": 1916.64, "word": ".5.", "probability": 0.96484375}, {"start": 1920.36, "end": 1921.28, "word": " So", "probability": 0.91552734375}, {"start": 1921.28, "end": 1921.5, "word": " this", "probability": 0.9091796875}, {"start": 1921.5, "end": 1921.6, "word": " is", "probability": 0.927734375}, {"start": 1921.6, "end": 1922.04, "word": " Q3.", "probability": 0.80126953125}, {"start": 1923.18, "end": 1923.64, "word": " This", "probability": 0.8857421875}, {"start": 1923.64, "end": 1924.06, "word": " value", "probability": 0.96533203125}, {"start": 1924.06, "end": 1925.32, "word": " is", "probability": 0.88232421875}, {"start": 1925.32, "end": 1925.8, "word": " Q1.", "probability": 0.94921875}, {"start": 1928.12, "end": 1928.58, "word": " And", "probability": 0.85400390625}, {"start": 1928.58, "end": 1928.82, "word": " this", "probability": 0.9384765625}, {"start": 1928.82, "end": 1929.16, "word": " value", "probability": 0.97705078125}, {"start": 1929.16, "end": 1929.6, "word": " is?", "probability": 0.94580078125}, {"start": 1931.98, "end": 1932.9, "word": " Now,", "probability": 0.87744140625}, {"start": 1932.96, "end": 1933.3, "word": " Q2", "probability": 0.993896484375}, {"start": 1933.3, "end": 1933.54, "word": " is", "probability": 0.94384765625}, {"start": 1933.54, "end": 1933.72, "word": " the", "probability": 0.9287109375}, {"start": 1933.72, "end": 1933.98, "word": " center.", "probability": 0.91015625}], "temperature": 1.0}, {"id": 72, "seek": 196375, "start": 1934.99, "end": 1963.75, "text": " is located in the center because, as we mentioned, four below and four above. Now what's about Q1? Q1 is not in the center of the entire data. Because Q1, 12.5, so two points below and the others maybe how many above two, four, six, seven observations above it. So that means Q1 is not center. Also Q3 is not center because two observations above it and seven below it.", "tokens": [307, 6870, 294, 264, 3056, 570, 11, 382, 321, 2835, 11, 1451, 2507, 293, 1451, 3673, 13, 823, 437, 311, 466, 1249, 16, 30, 1249, 16, 307, 406, 294, 264, 3056, 295, 264, 2302, 1412, 13, 1436, 1249, 16, 11, 2272, 13, 20, 11, 370, 732, 2793, 2507, 293, 264, 2357, 1310, 577, 867, 3673, 732, 11, 1451, 11, 2309, 11, 3407, 18163, 3673, 309, 13, 407, 300, 1355, 1249, 16, 307, 406, 3056, 13, 2743, 1249, 18, 307, 406, 3056, 570, 732, 18163, 3673, 309, 293, 3407, 2507, 309, 13], "avg_logprob": -0.1866508197525273, "compression_ratio": 1.770334928229665, "no_speech_prob": 0.0, "words": [{"start": 1934.99, "end": 1935.25, "word": " is", "probability": 0.343994140625}, {"start": 1935.25, "end": 1935.61, "word": " located", "probability": 0.9091796875}, {"start": 1935.61, "end": 1935.79, "word": " in", "probability": 0.90869140625}, {"start": 1935.79, "end": 1935.91, "word": " the", "probability": 0.92138671875}, {"start": 1935.91, "end": 1936.15, "word": " center", "probability": 0.8974609375}, {"start": 1936.15, "end": 1936.59, "word": " because,", "probability": 0.64990234375}, {"start": 1937.21, "end": 1937.29, "word": " as", "probability": 0.95947265625}, {"start": 1937.29, "end": 1937.39, "word": " we", "probability": 0.4658203125}, {"start": 1937.39, "end": 1937.61, "word": " mentioned,", "probability": 0.81982421875}, {"start": 1937.73, "end": 1937.95, "word": " four", "probability": 0.8076171875}, {"start": 1937.95, "end": 1938.31, "word": " below", "probability": 0.92529296875}, {"start": 1938.31, "end": 1938.57, "word": " and", "probability": 0.91748046875}, {"start": 1938.57, "end": 1938.83, "word": " four", "probability": 0.9326171875}, {"start": 1938.83, "end": 1939.19, "word": " above.", "probability": 0.96630859375}, {"start": 1939.85, "end": 1940.41, "word": " Now", "probability": 0.888671875}, {"start": 1940.41, "end": 1940.61, "word": " what's", "probability": 0.68798828125}, {"start": 1940.61, "end": 1940.89, "word": " about", "probability": 0.90478515625}, {"start": 1940.89, "end": 1941.33, "word": " Q1?", "probability": 0.947998046875}, {"start": 1941.89, "end": 1942.25, "word": " Q1", "probability": 0.760498046875}, {"start": 1942.25, "end": 1942.41, "word": " is", "probability": 0.9384765625}, {"start": 1942.41, "end": 1942.63, "word": " not", "probability": 0.94873046875}, {"start": 1942.63, "end": 1942.81, "word": " in", "probability": 0.9306640625}, {"start": 1942.81, "end": 1942.95, "word": " the", "probability": 0.92041015625}, {"start": 1942.95, "end": 1943.23, "word": " center", "probability": 0.8984375}, {"start": 1943.23, "end": 1943.49, "word": " of", "probability": 0.966796875}, {"start": 1943.49, "end": 1943.61, "word": " the", "probability": 0.9091796875}, {"start": 1943.61, "end": 1943.93, "word": " entire", "probability": 0.8994140625}, {"start": 1943.93, "end": 1944.27, "word": " data.", "probability": 0.87744140625}, {"start": 1945.23, "end": 1945.47, "word": " Because", "probability": 0.7890625}, {"start": 1945.47, "end": 1945.91, "word": " Q1,", "probability": 0.989990234375}, {"start": 1946.65, "end": 1946.93, "word": " 12", "probability": 0.9052734375}, {"start": 1946.93, "end": 1947.55, "word": ".5,", "probability": 0.981201171875}, {"start": 1947.99, "end": 1948.15, "word": " so", "probability": 0.8759765625}, {"start": 1948.15, "end": 1948.37, "word": " two", "probability": 0.91796875}, {"start": 1948.37, "end": 1948.67, "word": " points", "probability": 0.8818359375}, {"start": 1948.67, "end": 1949.11, "word": " below", "probability": 0.90625}, {"start": 1949.11, "end": 1950.59, "word": " and", "probability": 0.5048828125}, {"start": 1950.59, "end": 1950.73, "word": " the", "probability": 0.6181640625}, {"start": 1950.73, "end": 1951.03, "word": " others", "probability": 0.83544921875}, {"start": 1951.03, "end": 1951.33, "word": " maybe", "probability": 0.52880859375}, {"start": 1951.33, "end": 1951.59, "word": " how", "probability": 0.59423828125}, {"start": 1951.59, "end": 1951.83, "word": " many", "probability": 0.8984375}, {"start": 1951.83, "end": 1952.39, "word": " above", "probability": 0.85546875}, {"start": 1952.39, "end": 1952.67, "word": " two,", "probability": 0.3828125}, {"start": 1952.79, "end": 1952.97, "word": " four,", "probability": 0.93017578125}, {"start": 1953.09, "end": 1953.31, "word": " six,", "probability": 0.94482421875}, {"start": 1953.39, "end": 1953.65, "word": " seven", "probability": 0.90283203125}, {"start": 1953.65, "end": 1954.23, "word": " observations", "probability": 0.7177734375}, {"start": 1954.23, "end": 1954.53, "word": " above", "probability": 0.900390625}, {"start": 1954.53, "end": 1954.75, "word": " it.", "probability": 0.94580078125}, {"start": 1955.39, "end": 1955.61, "word": " So", "probability": 0.9521484375}, {"start": 1955.61, "end": 1955.83, "word": " that", "probability": 0.88330078125}, {"start": 1955.83, "end": 1956.13, "word": " means", "probability": 0.94189453125}, {"start": 1956.13, "end": 1956.57, "word": " Q1", "probability": 0.97314453125}, {"start": 1956.57, "end": 1956.71, "word": " is", "probability": 0.94287109375}, {"start": 1956.71, "end": 1956.93, "word": " not", "probability": 0.94677734375}, {"start": 1956.93, "end": 1957.29, "word": " center.", "probability": 0.5390625}, {"start": 1959.03, "end": 1959.39, "word": " Also", "probability": 0.7626953125}, {"start": 1959.39, "end": 1959.77, "word": " Q3", "probability": 0.8681640625}, {"start": 1959.77, "end": 1959.95, "word": " is", "probability": 0.93505859375}, {"start": 1959.95, "end": 1960.13, "word": " not", "probability": 0.9365234375}, {"start": 1960.13, "end": 1960.37, "word": " center", "probability": 0.89599609375}, {"start": 1960.37, "end": 1960.83, "word": " because", "probability": 0.62451171875}, {"start": 1960.83, "end": 1961.21, "word": " two", "probability": 0.90478515625}, {"start": 1961.21, "end": 1961.75, "word": " observations", "probability": 0.767578125}, {"start": 1961.75, "end": 1962.15, "word": " above", "probability": 0.94677734375}, {"start": 1962.15, "end": 1962.45, "word": " it", "probability": 0.94677734375}, {"start": 1962.45, "end": 1962.87, "word": " and", "probability": 0.88427734375}, {"start": 1962.87, "end": 1963.17, "word": " seven", "probability": 0.9052734375}, {"start": 1963.17, "end": 1963.47, "word": " below", "probability": 0.9150390625}, {"start": 1963.47, "end": 1963.75, "word": " it.", "probability": 0.9033203125}], "temperature": 1.0}, {"id": 73, "seek": 198984, "start": 1964.82, "end": 1989.84, "text": " So that means Q1 and Q3 are measures of non-central location, while the median is a measure of central location. But if you just look at the data below the median, just focus on the data below the median, 12.5 lies exactly in the middle of the data.", "tokens": [407, 300, 1355, 1249, 16, 293, 1249, 18, 366, 8000, 295, 2107, 12, 2207, 2155, 4914, 11, 1339, 264, 26779, 307, 257, 3481, 295, 5777, 4914, 13, 583, 498, 291, 445, 574, 412, 264, 1412, 2507, 264, 26779, 11, 445, 1879, 322, 264, 1412, 2507, 264, 26779, 11, 2272, 13, 20, 9134, 2293, 294, 264, 2808, 295, 264, 1412, 13], "avg_logprob": -0.10892674326896667, "compression_ratio": 1.6666666666666667, "no_speech_prob": 0.0, "words": [{"start": 1964.82, "end": 1965.08, "word": " So", "probability": 0.916015625}, {"start": 1965.08, "end": 1965.3, "word": " that", "probability": 0.83203125}, {"start": 1965.3, "end": 1965.62, "word": " means", "probability": 0.9365234375}, {"start": 1965.62, "end": 1967.02, "word": " Q1", "probability": 0.8359375}, {"start": 1967.02, "end": 1967.14, "word": " and", "probability": 0.9013671875}, {"start": 1967.14, "end": 1967.5, "word": " Q3", "probability": 0.989501953125}, {"start": 1967.5, "end": 1967.74, "word": " are", "probability": 0.93212890625}, {"start": 1967.74, "end": 1968.2, "word": " measures", "probability": 0.87939453125}, {"start": 1968.2, "end": 1968.78, "word": " of", "probability": 0.9619140625}, {"start": 1968.78, "end": 1969.1, "word": " non", "probability": 0.955078125}, {"start": 1969.1, "end": 1969.64, "word": "-central", "probability": 0.904296875}, {"start": 1969.64, "end": 1970.26, "word": " location,", "probability": 0.77099609375}, {"start": 1970.42, "end": 1971.22, "word": " while", "probability": 0.91455078125}, {"start": 1971.22, "end": 1971.44, "word": " the", "probability": 0.9189453125}, {"start": 1971.44, "end": 1971.7, "word": " median", "probability": 0.94873046875}, {"start": 1971.7, "end": 1972.36, "word": " is", "probability": 0.9453125}, {"start": 1972.36, "end": 1972.48, "word": " a", "probability": 0.9736328125}, {"start": 1972.48, "end": 1972.74, "word": " measure", "probability": 0.88623046875}, {"start": 1972.74, "end": 1972.94, "word": " of", "probability": 0.96728515625}, {"start": 1972.94, "end": 1973.3, "word": " central", "probability": 0.90576171875}, {"start": 1973.3, "end": 1973.76, "word": " location.", "probability": 0.93701171875}, {"start": 1974.66, "end": 1974.94, "word": " But", "probability": 0.94873046875}, {"start": 1974.94, "end": 1975.12, "word": " if", "probability": 0.92724609375}, {"start": 1975.12, "end": 1975.3, "word": " you", "probability": 0.96240234375}, {"start": 1975.3, "end": 1975.64, "word": " just", "probability": 0.9091796875}, {"start": 1975.64, "end": 1976.08, "word": " look", "probability": 0.96142578125}, {"start": 1976.08, "end": 1977.34, "word": " at", "probability": 0.95361328125}, {"start": 1977.34, "end": 1977.58, "word": " the", "probability": 0.919921875}, {"start": 1977.58, "end": 1977.98, "word": " data", "probability": 0.94873046875}, {"start": 1977.98, "end": 1978.64, "word": " below", "probability": 0.919921875}, {"start": 1978.64, "end": 1979.12, "word": " the", "probability": 0.91650390625}, {"start": 1979.12, "end": 1979.4, "word": " median,", "probability": 0.9638671875}, {"start": 1981.42, "end": 1981.68, "word": " just", "probability": 0.88818359375}, {"start": 1981.68, "end": 1982.24, "word": " focus", "probability": 0.93896484375}, {"start": 1982.24, "end": 1983.56, "word": " on", "probability": 0.93701171875}, {"start": 1983.56, "end": 1983.72, "word": " the", "probability": 0.919921875}, {"start": 1983.72, "end": 1984.08, "word": " data", "probability": 0.94189453125}, {"start": 1984.08, "end": 1984.44, "word": " below", "probability": 0.8935546875}, {"start": 1984.44, "end": 1984.68, "word": " the", "probability": 0.9208984375}, {"start": 1984.68, "end": 1984.96, "word": " median,", "probability": 0.96240234375}, {"start": 1985.84, "end": 1986.1, "word": " 12", "probability": 0.6337890625}, {"start": 1986.1, "end": 1986.84, "word": ".5", "probability": 0.99365234375}, {"start": 1986.84, "end": 1987.78, "word": " lies", "probability": 0.9482421875}, {"start": 1987.78, "end": 1988.66, "word": " exactly", "probability": 0.90087890625}, {"start": 1988.66, "end": 1988.96, "word": " in", "probability": 0.94482421875}, {"start": 1988.96, "end": 1989.1, "word": " the", "probability": 0.91650390625}, {"start": 1989.1, "end": 1989.3, "word": " middle", "probability": 0.94970703125}, {"start": 1989.3, "end": 1989.46, "word": " of", "probability": 0.96142578125}, {"start": 1989.46, "end": 1989.58, "word": " the", "probability": 0.91064453125}, {"start": 1989.58, "end": 1989.84, "word": " data.", "probability": 0.91357421875}], "temperature": 1.0}, {"id": 74, "seek": 201923, "start": 1990.73, "end": 2019.23, "text": " So 12.5 is the center of the data. I mean, Q1 is the center of the data below the overall median. The overall median was 16. So the data before 16, the median for this data is 12.5, which is the first part. Similarly, if you look at the data above Q2, now 19.5.", "tokens": [407, 2272, 13, 20, 307, 264, 3056, 295, 264, 1412, 13, 286, 914, 11, 1249, 16, 307, 264, 3056, 295, 264, 1412, 2507, 264, 4787, 26779, 13, 440, 4787, 26779, 390, 3165, 13, 407, 264, 1412, 949, 3165, 11, 264, 26779, 337, 341, 1412, 307, 2272, 13, 20, 11, 597, 307, 264, 700, 644, 13, 13157, 11, 498, 291, 574, 412, 264, 1412, 3673, 1249, 17, 11, 586, 1294, 13, 20, 13], "avg_logprob": -0.13035102249824837, "compression_ratio": 1.6687898089171975, "no_speech_prob": 0.0, "words": [{"start": 1990.73, "end": 1990.99, "word": " So", "probability": 0.87060546875}, {"start": 1990.99, "end": 1991.31, "word": " 12", "probability": 0.716796875}, {"start": 1991.31, "end": 1991.89, "word": ".5", "probability": 0.992431640625}, {"start": 1991.89, "end": 1992.31, "word": " is", "probability": 0.9365234375}, {"start": 1992.31, "end": 1992.49, "word": " the", "probability": 0.9248046875}, {"start": 1992.49, "end": 1992.79, "word": " center", "probability": 0.892578125}, {"start": 1992.79, "end": 1993.01, "word": " of", "probability": 0.96484375}, {"start": 1993.01, "end": 1993.13, "word": " the", "probability": 0.91845703125}, {"start": 1993.13, "end": 1993.39, "word": " data.", "probability": 0.90234375}, {"start": 1993.79, "end": 1993.83, "word": " I", "probability": 0.97216796875}, {"start": 1993.83, "end": 1994.11, "word": " mean,", "probability": 0.96435546875}, {"start": 1994.45, "end": 1994.95, "word": " Q1", "probability": 0.6844482421875}, {"start": 1994.95, "end": 1995.19, "word": " is", "probability": 0.94091796875}, {"start": 1995.19, "end": 1995.37, "word": " the", "probability": 0.92138671875}, {"start": 1995.37, "end": 1995.77, "word": " center", "probability": 0.90087890625}, {"start": 1995.77, "end": 1996.09, "word": " of", "probability": 0.96435546875}, {"start": 1996.09, "end": 1996.25, "word": " the", "probability": 0.91943359375}, {"start": 1996.25, "end": 1996.69, "word": " data", "probability": 0.9443359375}, {"start": 1996.69, "end": 1998.09, "word": " below", "probability": 0.82421875}, {"start": 1998.09, "end": 1999.05, "word": " the", "probability": 0.8857421875}, {"start": 1999.05, "end": 1999.51, "word": " overall", "probability": 0.9189453125}, {"start": 1999.51, "end": 1999.89, "word": " median.", "probability": 0.87255859375}, {"start": 2000.05, "end": 2000.17, "word": " The", "probability": 0.8818359375}, {"start": 2000.17, "end": 2000.47, "word": " overall", "probability": 0.9208984375}, {"start": 2000.47, "end": 2000.93, "word": " median", "probability": 0.96044921875}, {"start": 2000.93, "end": 2001.27, "word": " was", "probability": 0.931640625}, {"start": 2001.27, "end": 2001.89, "word": " 16.", "probability": 0.9521484375}, {"start": 2002.47, "end": 2002.81, "word": " So", "probability": 0.9609375}, {"start": 2002.81, "end": 2002.99, "word": " the", "probability": 0.83642578125}, {"start": 2002.99, "end": 2003.33, "word": " data", "probability": 0.94921875}, {"start": 2003.33, "end": 2003.83, "word": " before", "probability": 0.86767578125}, {"start": 2003.83, "end": 2004.47, "word": " 16,", "probability": 0.9658203125}, {"start": 2005.07, "end": 2005.23, "word": " the", "probability": 0.91552734375}, {"start": 2005.23, "end": 2005.49, "word": " median", "probability": 0.95947265625}, {"start": 2005.49, "end": 2006.23, "word": " for", "probability": 0.60693359375}, {"start": 2006.23, "end": 2006.51, "word": " this", "probability": 0.93798828125}, {"start": 2006.51, "end": 2006.89, "word": " data", "probability": 0.93115234375}, {"start": 2006.89, "end": 2007.15, "word": " is", "probability": 0.92919921875}, {"start": 2007.15, "end": 2007.49, "word": " 12", "probability": 0.96728515625}, {"start": 2007.49, "end": 2007.97, "word": ".5,", "probability": 0.998779296875}, {"start": 2008.07, "end": 2008.23, "word": " which", "probability": 0.947265625}, {"start": 2008.23, "end": 2008.37, "word": " is", "probability": 0.921875}, {"start": 2008.37, "end": 2008.53, "word": " the", "probability": 0.8916015625}, {"start": 2008.53, "end": 2009.07, "word": " first", "probability": 0.783203125}, {"start": 2009.07, "end": 2009.39, "word": " part.", "probability": 0.54833984375}, {"start": 2009.83, "end": 2010.51, "word": " Similarly,", "probability": 0.79833984375}, {"start": 2011.47, "end": 2011.65, "word": " if", "probability": 0.9443359375}, {"start": 2011.65, "end": 2011.77, "word": " you", "probability": 0.93994140625}, {"start": 2011.77, "end": 2011.97, "word": " look", "probability": 0.96484375}, {"start": 2011.97, "end": 2012.13, "word": " at", "probability": 0.95703125}, {"start": 2012.13, "end": 2012.27, "word": " the", "probability": 0.91259765625}, {"start": 2012.27, "end": 2012.63, "word": " data", "probability": 0.9384765625}, {"start": 2012.63, "end": 2014.21, "word": " above", "probability": 0.9423828125}, {"start": 2014.21, "end": 2016.87, "word": " Q2,", "probability": 0.823974609375}, {"start": 2017.77, "end": 2018.15, "word": " now", "probability": 0.92333984375}, {"start": 2018.15, "end": 2018.55, "word": " 19", "probability": 0.79931640625}, {"start": 2018.55, "end": 2019.23, "word": ".5.", "probability": 0.99853515625}], "temperature": 1.0}, {"id": 75, "seek": 204649, "start": 2019.51, "end": 2046.49, "text": " is located in the middle of the line. So Q3 is a measure of center for the data above the line. Make sense? So that's how can we compute first, second, and third part. Any questions? Yes, but it's a whole number. Whole number, it means any integer.", "tokens": [307, 6870, 294, 264, 2808, 295, 264, 1622, 13, 407, 1249, 18, 307, 257, 3481, 295, 3056, 337, 264, 1412, 3673, 264, 1622, 13, 4387, 2020, 30, 407, 300, 311, 577, 393, 321, 14722, 700, 11, 1150, 11, 293, 2636, 644, 13, 2639, 1651, 30, 1079, 11, 457, 309, 311, 257, 1379, 1230, 13, 30336, 1230, 11, 309, 1355, 604, 24922, 13], "avg_logprob": -0.2508680546094501, "compression_ratio": 1.4310344827586208, "no_speech_prob": 0.0, "words": [{"start": 2019.51, "end": 2019.79, "word": " is", "probability": 0.341796875}, {"start": 2019.79, "end": 2020.17, "word": " located", "probability": 0.93505859375}, {"start": 2020.17, "end": 2020.47, "word": " in", "probability": 0.947265625}, {"start": 2020.47, "end": 2020.61, "word": " the", "probability": 0.91748046875}, {"start": 2020.61, "end": 2020.81, "word": " middle", "probability": 0.94775390625}, {"start": 2020.81, "end": 2020.99, "word": " of", "probability": 0.88623046875}, {"start": 2020.99, "end": 2021.13, "word": " the", "probability": 0.75}, {"start": 2021.13, "end": 2021.25, "word": " line.", "probability": 0.139892578125}, {"start": 2021.89, "end": 2022.19, "word": " So", "probability": 0.9560546875}, {"start": 2022.19, "end": 2022.79, "word": " Q3", "probability": 0.777587890625}, {"start": 2022.79, "end": 2023.27, "word": " is", "probability": 0.9462890625}, {"start": 2023.27, "end": 2023.47, "word": " a", "probability": 0.9423828125}, {"start": 2023.47, "end": 2023.71, "word": " measure", "probability": 0.6171875}, {"start": 2023.71, "end": 2023.95, "word": " of", "probability": 0.900390625}, {"start": 2023.95, "end": 2024.35, "word": " center", "probability": 0.86328125}, {"start": 2024.35, "end": 2025.01, "word": " for", "probability": 0.93212890625}, {"start": 2025.01, "end": 2025.23, "word": " the", "probability": 0.90869140625}, {"start": 2025.23, "end": 2025.61, "word": " data", "probability": 0.939453125}, {"start": 2025.61, "end": 2026.25, "word": " above", "probability": 0.8759765625}, {"start": 2026.25, "end": 2026.47, "word": " the", "probability": 0.78369140625}, {"start": 2026.47, "end": 2026.63, "word": " line.", "probability": 0.74609375}, {"start": 2027.73, "end": 2028.07, "word": " Make", "probability": 0.73046875}, {"start": 2028.07, "end": 2028.39, "word": " sense?", "probability": 0.85498046875}, {"start": 2031.37, "end": 2031.97, "word": " So", "probability": 0.9296875}, {"start": 2031.97, "end": 2032.47, "word": " that's", "probability": 0.941162109375}, {"start": 2032.47, "end": 2032.77, "word": " how", "probability": 0.8876953125}, {"start": 2032.77, "end": 2033.03, "word": " can", "probability": 0.759765625}, {"start": 2033.03, "end": 2033.19, "word": " we", "probability": 0.85888671875}, {"start": 2033.19, "end": 2033.59, "word": " compute", "probability": 0.87890625}, {"start": 2033.59, "end": 2034.07, "word": " first,", "probability": 0.81103515625}, {"start": 2034.41, "end": 2035.09, "word": " second,", "probability": 0.9052734375}, {"start": 2036.15, "end": 2036.43, "word": " and", "probability": 0.93310546875}, {"start": 2036.43, "end": 2036.69, "word": " third", "probability": 0.9296875}, {"start": 2036.69, "end": 2037.01, "word": " part.", "probability": 0.214111328125}, {"start": 2038.57, "end": 2039.17, "word": " Any", "probability": 0.90234375}, {"start": 2039.17, "end": 2039.53, "word": " questions?", "probability": 0.95361328125}, {"start": 2042.13, "end": 2042.73, "word": " Yes,", "probability": 0.158447265625}, {"start": 2042.85, "end": 2042.95, "word": " but", "probability": 0.3583984375}, {"start": 2042.95, "end": 2043.21, "word": " it's", "probability": 0.635498046875}, {"start": 2043.21, "end": 2043.29, "word": " a", "probability": 0.98095703125}, {"start": 2043.29, "end": 2043.51, "word": " whole", "probability": 0.9052734375}, {"start": 2043.51, "end": 2043.83, "word": " number.", "probability": 0.9345703125}, {"start": 2044.71, "end": 2045.29, "word": " Whole", "probability": 0.84765625}, {"start": 2045.29, "end": 2045.63, "word": " number,", "probability": 0.93408203125}, {"start": 2045.67, "end": 2045.79, "word": " it", "probability": 0.92919921875}, {"start": 2045.79, "end": 2046.03, "word": " means", "probability": 0.92626953125}, {"start": 2046.03, "end": 2046.23, "word": " any", "probability": 0.52587890625}, {"start": 2046.23, "end": 2046.49, "word": " integer.", "probability": 0.93359375}], "temperature": 1.0}, {"id": 76, "seek": 207503, "start": 2048.77, "end": 2075.03, "text": " For example, yeah, exactly, yes. Suppose we have number of data is seven. Number of observations we have is seven. So the rank position n plus one divided by two, seven plus one over two is four. Four means the whole number, I mean an integer.", "tokens": [1171, 1365, 11, 1338, 11, 2293, 11, 2086, 13, 21360, 321, 362, 1230, 295, 1412, 307, 3407, 13, 5118, 295, 18163, 321, 362, 307, 3407, 13, 407, 264, 6181, 2535, 297, 1804, 472, 6666, 538, 732, 11, 3407, 1804, 472, 670, 732, 307, 1451, 13, 7451, 1355, 264, 1379, 1230, 11, 286, 914, 364, 24922, 13], "avg_logprob": -0.2219024185548749, "compression_ratio": 1.5443037974683544, "no_speech_prob": 0.0, "words": [{"start": 2048.77, "end": 2049.37, "word": " For", "probability": 0.5888671875}, {"start": 2049.37, "end": 2049.71, "word": " example,", "probability": 0.970703125}, {"start": 2049.83, "end": 2049.93, "word": " yeah,", "probability": 0.482666015625}, {"start": 2049.99, "end": 2050.25, "word": " exactly,", "probability": 0.8173828125}, {"start": 2050.39, "end": 2050.51, "word": " yes.", "probability": 0.75390625}, {"start": 2051.19, "end": 2051.79, "word": " Suppose", "probability": 0.77587890625}, {"start": 2051.79, "end": 2054.11, "word": " we", "probability": 0.62841796875}, {"start": 2054.11, "end": 2054.45, "word": " have", "probability": 0.9482421875}, {"start": 2054.45, "end": 2056.93, "word": " number", "probability": 0.64111328125}, {"start": 2056.93, "end": 2057.13, "word": " of", "probability": 0.96875}, {"start": 2057.13, "end": 2057.37, "word": " data", "probability": 0.94384765625}, {"start": 2057.37, "end": 2057.67, "word": " is", "probability": 0.8828125}, {"start": 2057.67, "end": 2058.09, "word": " seven.", "probability": 0.70068359375}, {"start": 2062.07, "end": 2062.67, "word": " Number", "probability": 0.7529296875}, {"start": 2062.67, "end": 2062.83, "word": " of", "probability": 0.95166015625}, {"start": 2062.83, "end": 2063.17, "word": " observations", "probability": 0.80126953125}, {"start": 2063.17, "end": 2063.39, "word": " we", "probability": 0.7197265625}, {"start": 2063.39, "end": 2063.53, "word": " have", "probability": 0.94677734375}, {"start": 2063.53, "end": 2063.65, "word": " is", "probability": 0.6357421875}, {"start": 2063.65, "end": 2063.89, "word": " seven.", "probability": 0.89404296875}, {"start": 2064.35, "end": 2064.73, "word": " So", "probability": 0.94921875}, {"start": 2064.73, "end": 2065.07, "word": " the", "probability": 0.60888671875}, {"start": 2065.07, "end": 2065.37, "word": " rank", "probability": 0.91796875}, {"start": 2065.37, "end": 2066.03, "word": " position", "probability": 0.9365234375}, {"start": 2066.03, "end": 2066.87, "word": " n", "probability": 0.49755859375}, {"start": 2066.87, "end": 2067.13, "word": " plus", "probability": 0.8896484375}, {"start": 2067.13, "end": 2067.33, "word": " one", "probability": 0.60888671875}, {"start": 2067.33, "end": 2067.59, "word": " divided", "probability": 0.79150390625}, {"start": 2067.59, "end": 2067.77, "word": " by", "probability": 0.97265625}, {"start": 2067.77, "end": 2068.07, "word": " two,", "probability": 0.92626953125}, {"start": 2068.99, "end": 2069.73, "word": " seven", "probability": 0.904296875}, {"start": 2069.73, "end": 2070.09, "word": " plus", "probability": 0.955078125}, {"start": 2070.09, "end": 2070.41, "word": " one", "probability": 0.93603515625}, {"start": 2070.41, "end": 2070.63, "word": " over", "probability": 0.9189453125}, {"start": 2070.63, "end": 2070.91, "word": " two", "probability": 0.94189453125}, {"start": 2070.91, "end": 2071.05, "word": " is", "probability": 0.919921875}, {"start": 2071.05, "end": 2071.35, "word": " four.", "probability": 0.94580078125}, {"start": 2072.19, "end": 2072.79, "word": " Four", "probability": 0.92822265625}, {"start": 2072.79, "end": 2073.41, "word": " means", "probability": 0.564453125}, {"start": 2073.41, "end": 2073.71, "word": " the", "probability": 0.91015625}, {"start": 2073.71, "end": 2073.89, "word": " whole", "probability": 0.88916015625}, {"start": 2073.89, "end": 2074.21, "word": " number,", "probability": 0.93310546875}, {"start": 2074.31, "end": 2074.41, "word": " I", "probability": 0.7958984375}, {"start": 2074.41, "end": 2074.55, "word": " mean", "probability": 0.966796875}, {"start": 2074.55, "end": 2074.77, "word": " an", "probability": 0.5498046875}, {"start": 2074.77, "end": 2075.03, "word": " integer.", "probability": 0.9248046875}], "temperature": 1.0}, {"id": 77, "seek": 210176, "start": 2076.4, "end": 2101.76, "text": " then this case just use it as it is. Now let's see the benefit or the feature of using Q1 and Q3. So let's move at the inter-equilateral range or IQ1.", "tokens": [550, 341, 1389, 445, 764, 309, 382, 309, 307, 13, 823, 718, 311, 536, 264, 5121, 420, 264, 4111, 295, 1228, 1249, 16, 293, 1249, 18, 13, 407, 718, 311, 1286, 412, 264, 728, 12, 12816, 37751, 3613, 420, 28921, 16, 13], "avg_logprob": -0.30595929955327233, "compression_ratio": 1.2583333333333333, "no_speech_prob": 0.0, "words": [{"start": 2076.4, "end": 2076.74, "word": " then", "probability": 0.492431640625}, {"start": 2076.74, "end": 2077.0, "word": " this", "probability": 0.515625}, {"start": 2077.0, "end": 2077.16, "word": " case", "probability": 0.86669921875}, {"start": 2077.16, "end": 2077.46, "word": " just", "probability": 0.7568359375}, {"start": 2077.46, "end": 2077.78, "word": " use", "probability": 0.84423828125}, {"start": 2077.78, "end": 2078.18, "word": " it", "probability": 0.9384765625}, {"start": 2078.18, "end": 2079.48, "word": " as", "probability": 0.896484375}, {"start": 2079.48, "end": 2079.66, "word": " it", "probability": 0.94482421875}, {"start": 2079.66, "end": 2079.92, "word": " is.", "probability": 0.9423828125}, {"start": 2082.1, "end": 2082.32, "word": " Now", "probability": 0.50244140625}, {"start": 2082.32, "end": 2082.66, "word": " let's", "probability": 0.8837890625}, {"start": 2082.66, "end": 2082.96, "word": " see", "probability": 0.92529296875}, {"start": 2082.96, "end": 2084.3, "word": " the", "probability": 0.87158203125}, {"start": 2084.3, "end": 2084.62, "word": " benefit", "probability": 0.783203125}, {"start": 2084.62, "end": 2085.14, "word": " or", "probability": 0.90576171875}, {"start": 2085.14, "end": 2085.28, "word": " the", "probability": 0.90087890625}, {"start": 2085.28, "end": 2085.62, "word": " feature", "probability": 0.76513671875}, {"start": 2085.62, "end": 2086.44, "word": " of", "probability": 0.9384765625}, {"start": 2086.44, "end": 2086.96, "word": " using", "probability": 0.9365234375}, {"start": 2086.96, "end": 2087.94, "word": " Q1", "probability": 0.79931640625}, {"start": 2087.94, "end": 2088.14, "word": " and", "probability": 0.9287109375}, {"start": 2088.14, "end": 2088.68, "word": " Q3.", "probability": 0.99462890625}, {"start": 2095.18, "end": 2095.98, "word": " So", "probability": 0.7734375}, {"start": 2095.98, "end": 2096.5, "word": " let's", "probability": 0.929931640625}, {"start": 2096.5, "end": 2097.66, "word": " move", "probability": 0.92724609375}, {"start": 2097.66, "end": 2098.1, "word": " at", "probability": 0.89306640625}, {"start": 2098.1, "end": 2099.16, "word": " the", "probability": 0.91455078125}, {"start": 2099.16, "end": 2099.52, "word": " inter", "probability": 0.188232421875}, {"start": 2099.52, "end": 2099.88, "word": "-equilateral", "probability": 0.55419921875}, {"start": 2099.88, "end": 2100.52, "word": " range", "probability": 0.88330078125}, {"start": 2100.52, "end": 2101.3, "word": " or", "probability": 0.72607421875}, {"start": 2101.3, "end": 2101.76, "word": " IQ1.", "probability": 0.466064453125}], "temperature": 1.0}, {"id": 78, "seek": 213060, "start": 2108.02, "end": 2130.6, "text": " 2.5 is the position. So the rank data of the rank data. So take the average of the two corresponding values of this one, which is 2 and 3. So 2 and 3. The average of these two values is 12.5.", "tokens": [568, 13, 20, 307, 264, 2535, 13, 407, 264, 6181, 1412, 295, 264, 6181, 1412, 13, 407, 747, 264, 4274, 295, 264, 732, 11760, 4190, 295, 341, 472, 11, 597, 307, 568, 293, 805, 13, 407, 568, 293, 805, 13, 440, 4274, 295, 613, 732, 4190, 307, 2272, 13, 20, 13], "avg_logprob": -0.13972355941167244, "compression_ratio": 1.6, "no_speech_prob": 0.0, "words": [{"start": 2108.02, "end": 2108.7, "word": " 2", "probability": 0.431396484375}, {"start": 2108.7, "end": 2109.36, "word": ".5", "probability": 0.984130859375}, {"start": 2109.36, "end": 2110.16, "word": " is", "probability": 0.89306640625}, {"start": 2110.16, "end": 2110.32, "word": " the", "probability": 0.9052734375}, {"start": 2110.32, "end": 2110.8, "word": " position.", "probability": 0.9296875}, {"start": 2112.06, "end": 2112.74, "word": " So", "probability": 0.8857421875}, {"start": 2112.74, "end": 2113.28, "word": " the", "probability": 0.59521484375}, {"start": 2113.28, "end": 2113.56, "word": " rank", "probability": 0.87451171875}, {"start": 2113.56, "end": 2113.86, "word": " data", "probability": 0.83984375}, {"start": 2113.86, "end": 2114.2, "word": " of", "probability": 0.7490234375}, {"start": 2114.2, "end": 2114.38, "word": " the", "probability": 0.880859375}, {"start": 2114.38, "end": 2114.58, "word": " rank", "probability": 0.91845703125}, {"start": 2114.58, "end": 2114.96, "word": " data.", "probability": 0.904296875}, {"start": 2115.74, "end": 2116.02, "word": " So", "probability": 0.93896484375}, {"start": 2116.02, "end": 2116.38, "word": " take", "probability": 0.734375}, {"start": 2116.38, "end": 2117.28, "word": " the", "probability": 0.91552734375}, {"start": 2117.28, "end": 2117.84, "word": " average", "probability": 0.79052734375}, {"start": 2117.84, "end": 2118.3, "word": " of", "probability": 0.96337890625}, {"start": 2118.3, "end": 2118.48, "word": " the", "probability": 0.90576171875}, {"start": 2118.48, "end": 2118.68, "word": " two", "probability": 0.8876953125}, {"start": 2118.68, "end": 2119.18, "word": " corresponding", "probability": 0.81982421875}, {"start": 2119.18, "end": 2119.72, "word": " values", "probability": 0.9658203125}, {"start": 2119.72, "end": 2119.88, "word": " of", "probability": 0.94873046875}, {"start": 2119.88, "end": 2120.08, "word": " this", "probability": 0.9453125}, {"start": 2120.08, "end": 2120.34, "word": " one,", "probability": 0.9189453125}, {"start": 2120.42, "end": 2120.58, "word": " which", "probability": 0.93798828125}, {"start": 2120.58, "end": 2120.7, "word": " is", "probability": 0.9375}, {"start": 2120.7, "end": 2120.88, "word": " 2", "probability": 0.53515625}, {"start": 2120.88, "end": 2121.02, "word": " and", "probability": 0.931640625}, {"start": 2121.02, "end": 2121.34, "word": " 3.", "probability": 0.98974609375}, {"start": 2122.52, "end": 2122.92, "word": " So", "probability": 0.84033203125}, {"start": 2122.92, "end": 2123.96, "word": " 2", "probability": 0.7880859375}, {"start": 2123.96, "end": 2125.3, "word": " and", "probability": 0.921875}, {"start": 2125.3, "end": 2125.7, "word": " 3.", "probability": 0.99755859375}, {"start": 2127.4, "end": 2127.62, "word": " The", "probability": 0.85986328125}, {"start": 2127.62, "end": 2127.96, "word": " average", "probability": 0.78955078125}, {"start": 2127.96, "end": 2128.14, "word": " of", "probability": 0.953125}, {"start": 2128.14, "end": 2128.32, "word": " these", "probability": 0.85205078125}, {"start": 2128.32, "end": 2128.5, "word": " two", "probability": 0.9248046875}, {"start": 2128.5, "end": 2129.06, "word": " values", "probability": 0.96923828125}, {"start": 2129.06, "end": 2129.76, "word": " is", "probability": 0.93896484375}, {"start": 2129.76, "end": 2130.08, "word": " 12", "probability": 0.908203125}, {"start": 2130.08, "end": 2130.6, "word": ".5.", "probability": 0.996337890625}], "temperature": 1.0}, {"id": 79, "seek": 215952, "start": 2131.48, "end": 2159.52, "text": " One more time, 2.5 is not the value. It is the rank position of the first quartile. So in this case, 2.5 takes position 2 and 3. The average of these two rank positions", "tokens": [1485, 544, 565, 11, 568, 13, 20, 307, 406, 264, 2158, 13, 467, 307, 264, 6181, 2535, 295, 264, 700, 20837, 794, 13, 407, 294, 341, 1389, 11, 568, 13, 20, 2516, 2535, 568, 293, 805, 13, 440, 4274, 295, 613, 732, 6181, 8432], "avg_logprob": -0.14835069047080146, "compression_ratio": 1.4083333333333334, "no_speech_prob": 0.0, "words": [{"start": 2131.48, "end": 2131.74, "word": " One", "probability": 0.708984375}, {"start": 2131.74, "end": 2131.94, "word": " more", "probability": 0.93359375}, {"start": 2131.94, "end": 2132.14, "word": " time,", "probability": 0.88134765625}, {"start": 2132.28, "end": 2133.52, "word": " 2", "probability": 0.53173828125}, {"start": 2133.52, "end": 2134.12, "word": ".5", "probability": 0.99169921875}, {"start": 2134.12, "end": 2136.6, "word": " is", "probability": 0.87646484375}, {"start": 2136.6, "end": 2136.88, "word": " not", "probability": 0.94921875}, {"start": 2136.88, "end": 2137.08, "word": " the", "probability": 0.9072265625}, {"start": 2137.08, "end": 2137.38, "word": " value.", "probability": 0.9716796875}, {"start": 2139.72, "end": 2140.3, "word": " It", "probability": 0.94921875}, {"start": 2140.3, "end": 2140.48, "word": " is", "probability": 0.92626953125}, {"start": 2140.48, "end": 2140.7, "word": " the", "probability": 0.90966796875}, {"start": 2140.7, "end": 2140.92, "word": " rank", "probability": 0.95263671875}, {"start": 2140.92, "end": 2141.36, "word": " position", "probability": 0.93896484375}, {"start": 2141.36, "end": 2142.68, "word": " of", "probability": 0.94921875}, {"start": 2142.68, "end": 2143.2, "word": " the", "probability": 0.919921875}, {"start": 2143.2, "end": 2143.52, "word": " first", "probability": 0.87255859375}, {"start": 2143.52, "end": 2144.1, "word": " quartile.", "probability": 0.831787109375}, {"start": 2145.5, "end": 2145.9, "word": " So", "probability": 0.91259765625}, {"start": 2145.9, "end": 2146.04, "word": " in", "probability": 0.72607421875}, {"start": 2146.04, "end": 2146.24, "word": " this", "probability": 0.947265625}, {"start": 2146.24, "end": 2146.7, "word": " case,", "probability": 0.91552734375}, {"start": 2147.52, "end": 2147.88, "word": " 2", "probability": 0.98828125}, {"start": 2147.88, "end": 2148.5, "word": ".5", "probability": 0.998291015625}, {"start": 2148.5, "end": 2150.76, "word": " takes", "probability": 0.423583984375}, {"start": 2150.76, "end": 2152.14, "word": " position", "probability": 0.8544921875}, {"start": 2152.14, "end": 2152.6, "word": " 2", "probability": 0.5390625}, {"start": 2152.6, "end": 2153.76, "word": " and", "probability": 0.8740234375}, {"start": 2153.76, "end": 2154.12, "word": " 3.", "probability": 0.96630859375}, {"start": 2155.5, "end": 2156.42, "word": " The", "probability": 0.876953125}, {"start": 2156.42, "end": 2156.94, "word": " average", "probability": 0.7978515625}, {"start": 2156.94, "end": 2157.42, "word": " of", "probability": 0.96435546875}, {"start": 2157.42, "end": 2157.74, "word": " these", "probability": 0.8505859375}, {"start": 2157.74, "end": 2158.2, "word": " two", "probability": 0.9013671875}, {"start": 2158.2, "end": 2158.86, "word": " rank", "probability": 0.93798828125}, {"start": 2158.86, "end": 2159.52, "word": " positions", "probability": 0.86669921875}], "temperature": 1.0}, {"id": 80, "seek": 219000, "start": 2160.34, "end": 2190.0, "text": " the corresponding one, which are 12 and 13. So 12 for position number 2, 13 for the other one. So the average is just divided by 2. That will give 12.5. Next, again,", "tokens": [264, 11760, 472, 11, 597, 366, 2272, 293, 3705, 13, 407, 2272, 337, 2535, 1230, 568, 11, 3705, 337, 264, 661, 472, 13, 407, 264, 4274, 307, 445, 6666, 538, 568, 13, 663, 486, 976, 2272, 13, 20, 13, 3087, 11, 797, 11], "avg_logprob": -0.23987925560636955, "compression_ratio": 1.2868217054263567, "no_speech_prob": 0.0, "words": [{"start": 2160.34, "end": 2160.6, "word": " the", "probability": 0.22216796875}, {"start": 2160.6, "end": 2161.06, "word": " corresponding", "probability": 0.87548828125}, {"start": 2161.06, "end": 2161.48, "word": " one,", "probability": 0.5859375}, {"start": 2162.26, "end": 2162.58, "word": " which", "probability": 0.947265625}, {"start": 2162.58, "end": 2163.02, "word": " are", "probability": 0.89404296875}, {"start": 2163.02, "end": 2163.64, "word": " 12", "probability": 0.79931640625}, {"start": 2163.64, "end": 2163.94, "word": " and", "probability": 0.923828125}, {"start": 2163.94, "end": 2164.38, "word": " 13.", "probability": 0.94384765625}, {"start": 2164.86, "end": 2165.32, "word": " So", "probability": 0.92333984375}, {"start": 2165.32, "end": 2165.74, "word": " 12", "probability": 0.89453125}, {"start": 2165.74, "end": 2166.74, "word": " for", "probability": 0.8544921875}, {"start": 2166.74, "end": 2167.78, "word": " position", "probability": 0.7236328125}, {"start": 2167.78, "end": 2168.1, "word": " number", "probability": 0.94775390625}, {"start": 2168.1, "end": 2168.4, "word": " 2,", "probability": 0.55615234375}, {"start": 2169.18, "end": 2169.72, "word": " 13", "probability": 0.953125}, {"start": 2169.72, "end": 2170.08, "word": " for", "probability": 0.9501953125}, {"start": 2170.08, "end": 2170.54, "word": " the", "probability": 0.91015625}, {"start": 2170.54, "end": 2170.78, "word": " other", "probability": 0.89453125}, {"start": 2170.78, "end": 2171.02, "word": " one.", "probability": 0.9150390625}, {"start": 2171.9, "end": 2172.16, "word": " So", "probability": 0.85986328125}, {"start": 2172.16, "end": 2172.34, "word": " the", "probability": 0.88916015625}, {"start": 2172.34, "end": 2172.68, "word": " average", "probability": 0.79833984375}, {"start": 2172.68, "end": 2173.02, "word": " is", "probability": 0.28857421875}, {"start": 2173.02, "end": 2173.16, "word": " just", "probability": 0.8017578125}, {"start": 2173.16, "end": 2173.4, "word": " divided", "probability": 0.71142578125}, {"start": 2173.4, "end": 2173.58, "word": " by", "probability": 0.9697265625}, {"start": 2173.58, "end": 2173.84, "word": " 2.", "probability": 0.82958984375}, {"start": 2174.24, "end": 2174.74, "word": " That", "probability": 0.85791015625}, {"start": 2174.74, "end": 2174.94, "word": " will", "probability": 0.8779296875}, {"start": 2174.94, "end": 2175.32, "word": " give", "probability": 0.87744140625}, {"start": 2175.32, "end": 2176.1, "word": " 12", "probability": 0.87158203125}, {"start": 2176.1, "end": 2176.66, "word": ".5.", "probability": 0.9951171875}, {"start": 2188.76, "end": 2189.54, "word": " Next,", "probability": 0.5302734375}, {"start": 2189.68, "end": 2190.0, "word": " again,", "probability": 0.958984375}], "temperature": 1.0}, {"id": 81, "seek": 221906, "start": 2191.5, "end": 2219.06, "text": " the inter-quartile range, which is denoted by IQR. Now IQR is the distance between Q3 and Q1. I mean the difference between Q3 and Q1 is called the inter-quartile range. And this one measures the spread in the middle 50% of the data. Because if you imagine that,", "tokens": [264, 728, 12, 358, 446, 794, 3613, 11, 597, 307, 1441, 23325, 538, 28921, 49, 13, 823, 28921, 49, 307, 264, 4560, 1296, 1249, 18, 293, 1249, 16, 13, 286, 914, 264, 2649, 1296, 1249, 18, 293, 1249, 16, 307, 1219, 264, 728, 12, 358, 446, 794, 3613, 13, 400, 341, 472, 8000, 264, 3974, 294, 264, 2808, 2625, 4, 295, 264, 1412, 13, 1436, 498, 291, 3811, 300, 11], "avg_logprob": -0.14711708040304586, "compression_ratio": 1.5654761904761905, "no_speech_prob": 0.0, "words": [{"start": 2191.5, "end": 2191.96, "word": " the", "probability": 0.2626953125}, {"start": 2191.96, "end": 2192.28, "word": " inter", "probability": 0.4306640625}, {"start": 2192.28, "end": 2192.74, "word": "-quartile", "probability": 0.8492431640625}, {"start": 2192.74, "end": 2193.16, "word": " range,", "probability": 0.88671875}, {"start": 2193.42, "end": 2194.68, "word": " which", "probability": 0.9521484375}, {"start": 2194.68, "end": 2194.9, "word": " is", "probability": 0.9482421875}, {"start": 2194.9, "end": 2195.4, "word": " denoted", "probability": 0.953369140625}, {"start": 2195.4, "end": 2196.5, "word": " by", "probability": 0.97607421875}, {"start": 2196.5, "end": 2197.36, "word": " IQR.", "probability": 0.933349609375}, {"start": 2199.28, "end": 2199.86, "word": " Now", "probability": 0.9365234375}, {"start": 2199.86, "end": 2200.84, "word": " IQR", "probability": 0.792236328125}, {"start": 2200.84, "end": 2202.58, "word": " is", "probability": 0.88330078125}, {"start": 2202.58, "end": 2202.76, "word": " the", "probability": 0.91552734375}, {"start": 2202.76, "end": 2203.24, "word": " distance", "probability": 0.931640625}, {"start": 2203.24, "end": 2203.68, "word": " between", "probability": 0.876953125}, {"start": 2203.68, "end": 2204.16, "word": " Q3", "probability": 0.902587890625}, {"start": 2204.16, "end": 2204.34, "word": " and", "probability": 0.94140625}, {"start": 2204.34, "end": 2204.8, "word": " Q1.", "probability": 0.99658203125}, {"start": 2204.92, "end": 2205.06, "word": " I", "probability": 0.97314453125}, {"start": 2205.06, "end": 2205.2, "word": " mean", "probability": 0.95947265625}, {"start": 2205.2, "end": 2205.32, "word": " the", "probability": 0.68310546875}, {"start": 2205.32, "end": 2205.84, "word": " difference", "probability": 0.81591796875}, {"start": 2205.84, "end": 2206.8, "word": " between", "probability": 0.88134765625}, {"start": 2206.8, "end": 2207.26, "word": " Q3", "probability": 0.981201171875}, {"start": 2207.26, "end": 2207.4, "word": " and", "probability": 0.94580078125}, {"start": 2207.4, "end": 2207.78, "word": " Q1", "probability": 0.994384765625}, {"start": 2207.78, "end": 2208.0, "word": " is", "probability": 0.85107421875}, {"start": 2208.0, "end": 2208.32, "word": " called", "probability": 0.88232421875}, {"start": 2208.32, "end": 2209.42, "word": " the", "probability": 0.873046875}, {"start": 2209.42, "end": 2209.7, "word": " inter", "probability": 0.81689453125}, {"start": 2209.7, "end": 2210.16, "word": "-quartile", "probability": 0.9407958984375}, {"start": 2210.16, "end": 2210.56, "word": " range.", "probability": 0.87646484375}, {"start": 2212.0, "end": 2212.56, "word": " And", "probability": 0.91552734375}, {"start": 2212.56, "end": 2213.2, "word": " this", "probability": 0.9404296875}, {"start": 2213.2, "end": 2213.46, "word": " one", "probability": 0.92431640625}, {"start": 2213.46, "end": 2213.86, "word": " measures", "probability": 0.8505859375}, {"start": 2213.86, "end": 2214.22, "word": " the", "probability": 0.89306640625}, {"start": 2214.22, "end": 2214.62, "word": " spread", "probability": 0.880859375}, {"start": 2214.62, "end": 2215.06, "word": " in", "probability": 0.9443359375}, {"start": 2215.06, "end": 2215.2, "word": " the", "probability": 0.92138671875}, {"start": 2215.2, "end": 2215.42, "word": " middle", "probability": 0.9052734375}, {"start": 2215.42, "end": 2215.78, "word": " 50", "probability": 0.94189453125}, {"start": 2215.78, "end": 2216.0, "word": "%", "probability": 0.87158203125}, {"start": 2216.0, "end": 2216.26, "word": " of", "probability": 0.96435546875}, {"start": 2216.26, "end": 2216.4, "word": " the", "probability": 0.92041015625}, {"start": 2216.4, "end": 2216.68, "word": " data.", "probability": 0.921875}, {"start": 2217.68, "end": 2218.18, "word": " Because", "probability": 0.931640625}, {"start": 2218.18, "end": 2218.34, "word": " if", "probability": 0.89599609375}, {"start": 2218.34, "end": 2218.42, "word": " you", "probability": 0.9619140625}, {"start": 2218.42, "end": 2218.74, "word": " imagine", "probability": 0.89697265625}, {"start": 2218.74, "end": 2219.06, "word": " that,", "probability": 0.8994140625}], "temperature": 1.0}, {"id": 82, "seek": 223957, "start": 2222.25, "end": 2239.57, "text": " This is Q1 and Q3. IQR is the distance between these two values. Now imagine that we have just this data, which represents 50%.", "tokens": [639, 307, 1249, 16, 293, 1249, 18, 13, 28921, 49, 307, 264, 4560, 1296, 613, 732, 4190, 13, 823, 3811, 300, 321, 362, 445, 341, 1412, 11, 597, 8855, 2625, 6856], "avg_logprob": -0.10705566662363708, "compression_ratio": 1.1428571428571428, "no_speech_prob": 0.0, "words": [{"start": 2222.25, "end": 2222.59, "word": " This", "probability": 0.75439453125}, {"start": 2222.59, "end": 2222.77, "word": " is", "probability": 0.94091796875}, {"start": 2222.77, "end": 2223.19, "word": " Q1", "probability": 0.90087890625}, {"start": 2223.19, "end": 2224.61, "word": " and", "probability": 0.89892578125}, {"start": 2224.61, "end": 2225.31, "word": " Q3.", "probability": 0.9794921875}, {"start": 2227.21, "end": 2227.97, "word": " IQR", "probability": 0.896240234375}, {"start": 2227.97, "end": 2229.27, "word": " is", "probability": 0.931640625}, {"start": 2229.27, "end": 2229.41, "word": " the", "probability": 0.919921875}, {"start": 2229.41, "end": 2229.89, "word": " distance", "probability": 0.93212890625}, {"start": 2229.89, "end": 2230.25, "word": " between", "probability": 0.8740234375}, {"start": 2230.25, "end": 2230.53, "word": " these", "probability": 0.86279296875}, {"start": 2230.53, "end": 2230.71, "word": " two", "probability": 0.91552734375}, {"start": 2230.71, "end": 2231.15, "word": " values.", "probability": 0.9697265625}, {"start": 2232.33, "end": 2232.71, "word": " Now", "probability": 0.955078125}, {"start": 2232.71, "end": 2233.05, "word": " imagine", "probability": 0.70556640625}, {"start": 2233.05, "end": 2233.37, "word": " that", "probability": 0.92919921875}, {"start": 2233.37, "end": 2233.55, "word": " we", "probability": 0.953125}, {"start": 2233.55, "end": 2233.75, "word": " have", "probability": 0.93603515625}, {"start": 2233.75, "end": 2234.13, "word": " just", "probability": 0.9189453125}, {"start": 2234.13, "end": 2234.49, "word": " this", "probability": 0.94580078125}, {"start": 2234.49, "end": 2234.87, "word": " data,", "probability": 0.9326171875}, {"start": 2236.25, "end": 2236.59, "word": " which", "probability": 0.9482421875}, {"start": 2236.59, "end": 2237.37, "word": " represents", "probability": 0.86474609375}, {"start": 2237.37, "end": 2239.57, "word": " 50%.", "probability": 0.833740234375}], "temperature": 1.0}, {"id": 83, "seek": 226782, "start": 2241.54, "end": 2267.82, "text": " And IQR, the definition is a Q3. So we have just this data, for example. And IQ3 is Q3 minus Q1. It means IQ3 is the maximum minus the minimum of the 50% of the middle data. So it means this is your range, new range. After you've secluded 25% to the left of Q1,", "tokens": [400, 28921, 49, 11, 264, 7123, 307, 257, 1249, 18, 13, 407, 321, 362, 445, 341, 1412, 11, 337, 1365, 13, 400, 28921, 18, 307, 1249, 18, 3175, 1249, 16, 13, 467, 1355, 28921, 18, 307, 264, 6674, 3175, 264, 7285, 295, 264, 2625, 4, 295, 264, 2808, 1412, 13, 407, 309, 1355, 341, 307, 428, 3613, 11, 777, 3613, 13, 2381, 291, 600, 907, 44412, 3552, 4, 281, 264, 1411, 295, 1249, 16, 11], "avg_logprob": -0.13342928141355515, "compression_ratio": 1.497142857142857, "no_speech_prob": 0.0, "words": [{"start": 2241.54, "end": 2241.9, "word": " And", "probability": 0.86865234375}, {"start": 2241.9, "end": 2242.58, "word": " IQR,", "probability": 0.926513671875}, {"start": 2242.66, "end": 2242.72, "word": " the", "probability": 0.9130859375}, {"start": 2242.72, "end": 2243.12, "word": " definition", "probability": 0.93603515625}, {"start": 2243.12, "end": 2243.58, "word": " is", "probability": 0.8818359375}, {"start": 2243.58, "end": 2243.98, "word": " a", "probability": 0.4169921875}, {"start": 2243.98, "end": 2244.42, "word": " Q3.", "probability": 0.95703125}, {"start": 2244.7, "end": 2244.8, "word": " So", "probability": 0.9619140625}, {"start": 2244.8, "end": 2244.98, "word": " we", "probability": 0.794921875}, {"start": 2244.98, "end": 2245.16, "word": " have", "probability": 0.93603515625}, {"start": 2245.16, "end": 2245.44, "word": " just", "probability": 0.91357421875}, {"start": 2245.44, "end": 2245.7, "word": " this", "probability": 0.92333984375}, {"start": 2245.7, "end": 2246.0, "word": " data,", "probability": 0.92236328125}, {"start": 2246.54, "end": 2246.8, "word": " for", "probability": 0.95068359375}, {"start": 2246.8, "end": 2247.1, "word": " example.", "probability": 0.9736328125}, {"start": 2247.76, "end": 2248.06, "word": " And", "probability": 0.951171875}, {"start": 2248.06, "end": 2248.82, "word": " IQ3", "probability": 0.963623046875}, {"start": 2248.82, "end": 2249.18, "word": " is", "probability": 0.9345703125}, {"start": 2249.18, "end": 2249.84, "word": " Q3", "probability": 0.991455078125}, {"start": 2249.84, "end": 2250.18, "word": " minus", "probability": 0.962890625}, {"start": 2250.18, "end": 2250.8, "word": " Q1.", "probability": 0.996826171875}, {"start": 2251.12, "end": 2251.48, "word": " It", "probability": 0.92529296875}, {"start": 2251.48, "end": 2251.74, "word": " means", "probability": 0.9345703125}, {"start": 2251.74, "end": 2252.36, "word": " IQ3", "probability": 0.9853515625}, {"start": 2252.36, "end": 2252.6, "word": " is", "probability": 0.94287109375}, {"start": 2252.6, "end": 2252.78, "word": " the", "probability": 0.91796875}, {"start": 2252.78, "end": 2253.26, "word": " maximum", "probability": 0.92919921875}, {"start": 2253.26, "end": 2254.64, "word": " minus", "probability": 0.97705078125}, {"start": 2254.64, "end": 2254.94, "word": " the", "probability": 0.9208984375}, {"start": 2254.94, "end": 2255.32, "word": " minimum", "probability": 0.974609375}, {"start": 2255.32, "end": 2256.8, "word": " of", "probability": 0.78857421875}, {"start": 2256.8, "end": 2257.08, "word": " the", "probability": 0.90869140625}, {"start": 2257.08, "end": 2257.66, "word": " 50", "probability": 0.96484375}, {"start": 2257.66, "end": 2258.1, "word": "%", "probability": 0.9287109375}, {"start": 2258.1, "end": 2258.56, "word": " of", "probability": 0.9345703125}, {"start": 2258.56, "end": 2258.7, "word": " the", "probability": 0.923828125}, {"start": 2258.7, "end": 2258.88, "word": " middle", "probability": 0.78076171875}, {"start": 2258.88, "end": 2259.28, "word": " data.", "probability": 0.91845703125}, {"start": 2260.08, "end": 2260.24, "word": " So", "probability": 0.958984375}, {"start": 2260.24, "end": 2260.38, "word": " it", "probability": 0.923828125}, {"start": 2260.38, "end": 2260.64, "word": " means", "probability": 0.92919921875}, {"start": 2260.64, "end": 2261.22, "word": " this", "probability": 0.86767578125}, {"start": 2261.22, "end": 2261.36, "word": " is", "probability": 0.94091796875}, {"start": 2261.36, "end": 2261.54, "word": " your", "probability": 0.888671875}, {"start": 2261.54, "end": 2261.88, "word": " range,", "probability": 0.86767578125}, {"start": 2261.98, "end": 2262.12, "word": " new", "probability": 0.467041015625}, {"start": 2262.12, "end": 2262.36, "word": " range.", "probability": 0.88330078125}, {"start": 2262.48, "end": 2262.76, "word": " After", "probability": 0.88232421875}, {"start": 2262.76, "end": 2263.08, "word": " you've", "probability": 0.69580078125}, {"start": 2263.08, "end": 2263.5, "word": " secluded", "probability": 0.757568359375}, {"start": 2263.5, "end": 2265.06, "word": " 25", "probability": 0.958984375}, {"start": 2265.06, "end": 2265.78, "word": "%", "probability": 0.96728515625}, {"start": 2265.78, "end": 2266.82, "word": " to", "probability": 0.9677734375}, {"start": 2266.82, "end": 2266.98, "word": " the", "probability": 0.91552734375}, {"start": 2266.98, "end": 2267.2, "word": " left", "probability": 0.9404296875}, {"start": 2267.2, "end": 2267.38, "word": " of", "probability": 0.966796875}, {"start": 2267.38, "end": 2267.82, "word": " Q1,", "probability": 0.997802734375}], "temperature": 1.0}, {"id": 84, "seek": 229773, "start": 2268.57, "end": 2297.73, "text": " And also you ignored totally 25% of the data above Q3. So that means you're focused on 50% of the data. And just take the average of these two points, I'm sorry, the distance of these two points Q3 minus Q1. So you will get the range. But not exactly the range. It's called, sometimes it's called mid-spread range. Because mid-spread, because we are talking about", "tokens": [400, 611, 291, 19735, 3879, 3552, 4, 295, 264, 1412, 3673, 1249, 18, 13, 407, 300, 1355, 291, 434, 5178, 322, 2625, 4, 295, 264, 1412, 13, 400, 445, 747, 264, 4274, 295, 613, 732, 2793, 11, 286, 478, 2597, 11, 264, 4560, 295, 613, 732, 2793, 1249, 18, 3175, 1249, 16, 13, 407, 291, 486, 483, 264, 3613, 13, 583, 406, 2293, 264, 3613, 13, 467, 311, 1219, 11, 2171, 309, 311, 1219, 2062, 12, 4952, 2538, 3613, 13, 1436, 2062, 12, 4952, 2538, 11, 570, 321, 366, 1417, 466], "avg_logprob": -0.1858016359417335, "compression_ratio": 1.6545454545454545, "no_speech_prob": 0.0, "words": [{"start": 2268.57, "end": 2268.93, "word": " And", "probability": 0.7119140625}, {"start": 2268.93, "end": 2269.41, "word": " also", "probability": 0.82470703125}, {"start": 2269.41, "end": 2269.63, "word": " you", "probability": 0.62451171875}, {"start": 2269.63, "end": 2270.11, "word": " ignored", "probability": 0.7783203125}, {"start": 2270.11, "end": 2271.01, "word": " totally", "probability": 0.7333984375}, {"start": 2271.01, "end": 2271.51, "word": " 25", "probability": 0.8515625}, {"start": 2271.51, "end": 2271.91, "word": "%", "probability": 0.8583984375}, {"start": 2271.91, "end": 2272.45, "word": " of", "probability": 0.9677734375}, {"start": 2272.45, "end": 2272.61, "word": " the", "probability": 0.92041015625}, {"start": 2272.61, "end": 2272.89, "word": " data", "probability": 0.94482421875}, {"start": 2272.89, "end": 2273.23, "word": " above", "probability": 0.90478515625}, {"start": 2273.23, "end": 2273.75, "word": " Q3.", "probability": 0.671142578125}, {"start": 2274.67, "end": 2275.05, "word": " So", "probability": 0.95654296875}, {"start": 2275.05, "end": 2275.37, "word": " that", "probability": 0.84033203125}, {"start": 2275.37, "end": 2275.63, "word": " means", "probability": 0.9345703125}, {"start": 2275.63, "end": 2275.91, "word": " you're", "probability": 0.620361328125}, {"start": 2275.91, "end": 2276.39, "word": " focused", "probability": 0.9140625}, {"start": 2276.39, "end": 2277.07, "word": " on", "probability": 0.953125}, {"start": 2277.07, "end": 2277.67, "word": " 50", "probability": 0.95947265625}, {"start": 2277.67, "end": 2277.95, "word": "%", "probability": 0.9970703125}, {"start": 2277.95, "end": 2278.19, "word": " of", "probability": 0.9658203125}, {"start": 2278.19, "end": 2278.33, "word": " the", "probability": 0.9169921875}, {"start": 2278.33, "end": 2278.63, "word": " data.", "probability": 0.92919921875}, {"start": 2278.93, "end": 2279.39, "word": " And", "probability": 0.92138671875}, {"start": 2279.39, "end": 2279.65, "word": " just", "probability": 0.91015625}, {"start": 2279.65, "end": 2279.87, "word": " take", "probability": 0.8779296875}, {"start": 2279.87, "end": 2280.05, "word": " the", "probability": 0.9130859375}, {"start": 2280.05, "end": 2280.41, "word": " average", "probability": 0.77734375}, {"start": 2280.41, "end": 2280.63, "word": " of", "probability": 0.96142578125}, {"start": 2280.63, "end": 2280.83, "word": " these", "probability": 0.8564453125}, {"start": 2280.83, "end": 2281.11, "word": " two", "probability": 0.92529296875}, {"start": 2281.11, "end": 2281.87, "word": " points,", "probability": 0.935546875}, {"start": 2282.19, "end": 2282.33, "word": " I'm", "probability": 0.986572265625}, {"start": 2282.33, "end": 2282.53, "word": " sorry,", "probability": 0.86181640625}, {"start": 2282.63, "end": 2282.73, "word": " the", "probability": 0.91650390625}, {"start": 2282.73, "end": 2283.19, "word": " distance", "probability": 0.927734375}, {"start": 2283.19, "end": 2283.89, "word": " of", "probability": 0.8984375}, {"start": 2283.89, "end": 2284.07, "word": " these", "probability": 0.8505859375}, {"start": 2284.07, "end": 2284.25, "word": " two", "probability": 0.9248046875}, {"start": 2284.25, "end": 2284.55, "word": " points", "probability": 0.91845703125}, {"start": 2284.55, "end": 2284.95, "word": " Q3", "probability": 0.75048828125}, {"start": 2284.95, "end": 2285.23, "word": " minus", "probability": 0.79296875}, {"start": 2285.23, "end": 2285.73, "word": " Q1.", "probability": 0.994384765625}, {"start": 2286.37, "end": 2286.63, "word": " So", "probability": 0.9189453125}, {"start": 2286.63, "end": 2286.87, "word": " you", "probability": 0.669921875}, {"start": 2286.87, "end": 2287.01, "word": " will", "probability": 0.8310546875}, {"start": 2287.01, "end": 2287.19, "word": " get", "probability": 0.88330078125}, {"start": 2287.19, "end": 2287.35, "word": " the", "probability": 0.9130859375}, {"start": 2287.35, "end": 2287.67, "word": " range.", "probability": 0.8837890625}, {"start": 2287.99, "end": 2288.19, "word": " But", "probability": 0.94580078125}, {"start": 2288.19, "end": 2288.41, "word": " not", "probability": 0.93798828125}, {"start": 2288.41, "end": 2288.81, "word": " exactly", "probability": 0.87109375}, {"start": 2288.81, "end": 2289.03, "word": " the", "probability": 0.908203125}, {"start": 2289.03, "end": 2289.27, "word": " range.", "probability": 0.87353515625}, {"start": 2289.43, "end": 2289.61, "word": " It's", "probability": 0.8955078125}, {"start": 2289.61, "end": 2289.99, "word": " called,", "probability": 0.7705078125}, {"start": 2290.57, "end": 2291.17, "word": " sometimes", "probability": 0.93994140625}, {"start": 2291.17, "end": 2291.45, "word": " it's", "probability": 0.907958984375}, {"start": 2291.45, "end": 2291.75, "word": " called", "probability": 0.86767578125}, {"start": 2291.75, "end": 2292.89, "word": " mid", "probability": 0.6943359375}, {"start": 2292.89, "end": 2293.39, "word": "-spread", "probability": 0.77294921875}, {"start": 2293.39, "end": 2294.43, "word": " range.", "probability": 0.8740234375}, {"start": 2294.99, "end": 2295.47, "word": " Because", "probability": 0.91259765625}, {"start": 2295.47, "end": 2295.95, "word": " mid", "probability": 0.8447265625}, {"start": 2295.95, "end": 2296.39, "word": "-spread,", "probability": 0.9226888020833334}, {"start": 2296.51, "end": 2296.73, "word": " because", "probability": 0.89208984375}, {"start": 2296.73, "end": 2296.85, "word": " we", "probability": 0.95068359375}, {"start": 2296.85, "end": 2296.97, "word": " are", "probability": 0.92431640625}, {"start": 2296.97, "end": 2297.29, "word": " talking", "probability": 0.8583984375}, {"start": 2297.29, "end": 2297.73, "word": " about", "probability": 0.90087890625}], "temperature": 1.0}, {"id": 85, "seek": 232573, "start": 2298.93, "end": 2325.73, "text": " middle of the data, 50% of the data, which is located in the middle. So do you think in this case, outliers actually, they are extreme values, the data below Q1 and data above Q3. That means inter-quartile range, Q3 minus Q1, is not affected by outliers. Because you ignored the small values", "tokens": [2808, 295, 264, 1412, 11, 2625, 4, 295, 264, 1412, 11, 597, 307, 6870, 294, 264, 2808, 13, 407, 360, 291, 519, 294, 341, 1389, 11, 484, 23646, 767, 11, 436, 366, 8084, 4190, 11, 264, 1412, 2507, 1249, 16, 293, 1412, 3673, 1249, 18, 13, 663, 1355, 728, 12, 358, 446, 794, 3613, 11, 1249, 18, 3175, 1249, 16, 11, 307, 406, 8028, 538, 484, 23646, 13, 1436, 291, 19735, 264, 1359, 4190], "avg_logprob": -0.21541666507720947, "compression_ratio": 1.5129533678756477, "no_speech_prob": 0.0, "words": [{"start": 2298.93, "end": 2299.37, "word": " middle", "probability": 0.6396484375}, {"start": 2299.37, "end": 2299.57, "word": " of", "probability": 0.96044921875}, {"start": 2299.57, "end": 2299.71, "word": " the", "probability": 0.89599609375}, {"start": 2299.71, "end": 2299.91, "word": " data,", "probability": 0.90478515625}, {"start": 2299.99, "end": 2300.25, "word": " 50", "probability": 0.90869140625}, {"start": 2300.25, "end": 2300.53, "word": "%", "probability": 0.802734375}, {"start": 2300.53, "end": 2300.79, "word": " of", "probability": 0.9609375}, {"start": 2300.79, "end": 2300.93, "word": " the", "probability": 0.87353515625}, {"start": 2300.93, "end": 2301.13, "word": " data,", "probability": 0.9345703125}, {"start": 2301.19, "end": 2301.37, "word": " which", "probability": 0.9111328125}, {"start": 2301.37, "end": 2301.49, "word": " is", "probability": 0.93603515625}, {"start": 2301.49, "end": 2301.89, "word": " located", "probability": 0.93310546875}, {"start": 2301.89, "end": 2302.09, "word": " in", "probability": 0.904296875}, {"start": 2302.09, "end": 2302.19, "word": " the", "probability": 0.91796875}, {"start": 2302.19, "end": 2302.43, "word": " middle.", "probability": 0.9404296875}, {"start": 2303.11, "end": 2303.31, "word": " So", "probability": 0.93408203125}, {"start": 2303.31, "end": 2304.43, "word": " do", "probability": 0.51123046875}, {"start": 2304.43, "end": 2304.55, "word": " you", "probability": 0.96337890625}, {"start": 2304.55, "end": 2304.75, "word": " think", "probability": 0.91064453125}, {"start": 2304.75, "end": 2304.89, "word": " in", "probability": 0.84912109375}, {"start": 2304.89, "end": 2305.09, "word": " this", "probability": 0.94677734375}, {"start": 2305.09, "end": 2306.93, "word": " case,", "probability": 0.9091796875}, {"start": 2307.47, "end": 2307.93, "word": " outliers", "probability": 0.87255859375}, {"start": 2307.93, "end": 2308.55, "word": " actually,", "probability": 0.8125}, {"start": 2309.09, "end": 2309.17, "word": " they", "probability": 0.292236328125}, {"start": 2309.17, "end": 2309.43, "word": " are", "probability": 0.64208984375}, {"start": 2309.43, "end": 2309.83, "word": " extreme", "probability": 0.83251953125}, {"start": 2309.83, "end": 2310.29, "word": " values,", "probability": 0.97119140625}, {"start": 2310.41, "end": 2310.53, "word": " the", "probability": 0.8125}, {"start": 2310.53, "end": 2310.81, "word": " data", "probability": 0.9404296875}, {"start": 2310.81, "end": 2311.29, "word": " below", "probability": 0.8427734375}, {"start": 2311.29, "end": 2312.03, "word": " Q1", "probability": 0.893310546875}, {"start": 2312.03, "end": 2312.93, "word": " and", "probability": 0.787109375}, {"start": 2312.93, "end": 2313.23, "word": " data", "probability": 0.91015625}, {"start": 2313.23, "end": 2313.67, "word": " above", "probability": 0.9541015625}, {"start": 2313.67, "end": 2315.03, "word": " Q3.", "probability": 0.97314453125}, {"start": 2315.21, "end": 2315.33, "word": " That", "probability": 0.90771484375}, {"start": 2315.33, "end": 2315.71, "word": " means", "probability": 0.9306640625}, {"start": 2315.71, "end": 2316.91, "word": " inter", "probability": 0.4150390625}, {"start": 2316.91, "end": 2317.45, "word": "-quartile", "probability": 0.846923828125}, {"start": 2317.45, "end": 2317.73, "word": " range,", "probability": 0.83935546875}, {"start": 2317.79, "end": 2318.15, "word": " Q3", "probability": 0.982421875}, {"start": 2318.15, "end": 2318.41, "word": " minus", "probability": 0.8896484375}, {"start": 2318.41, "end": 2318.99, "word": " Q1,", "probability": 0.993896484375}, {"start": 2319.07, "end": 2319.19, "word": " is", "probability": 0.9208984375}, {"start": 2319.19, "end": 2319.39, "word": " not", "probability": 0.94384765625}, {"start": 2319.39, "end": 2319.81, "word": " affected", "probability": 0.8310546875}, {"start": 2319.81, "end": 2320.09, "word": " by", "probability": 0.96875}, {"start": 2320.09, "end": 2320.49, "word": " outliers.", "probability": 0.950439453125}, {"start": 2321.67, "end": 2322.23, "word": " Because", "probability": 0.916015625}, {"start": 2322.23, "end": 2322.41, "word": " you", "probability": 0.8857421875}, {"start": 2322.41, "end": 2322.89, "word": " ignored", "probability": 0.87255859375}, {"start": 2322.89, "end": 2324.19, "word": " the", "probability": 0.9091796875}, {"start": 2324.19, "end": 2324.61, "word": " small", "probability": 0.93994140625}, {"start": 2324.61, "end": 2325.73, "word": " values", "probability": 0.96435546875}], "temperature": 1.0}, {"id": 86, "seek": 235535, "start": 2327.53, "end": 2355.35, "text": " And the high values. So IQR is not affected by outliers. So in case of outliers, it's better to use IQR. Because the range is maximum minus minimum. And as we mentioned before, the range is affected by outliers. So IQR is again called the mid-spread because it covers the middle 50% of the data.", "tokens": [400, 264, 1090, 4190, 13, 407, 28921, 49, 307, 406, 8028, 538, 484, 23646, 13, 407, 294, 1389, 295, 484, 23646, 11, 309, 311, 1101, 281, 764, 28921, 49, 13, 1436, 264, 3613, 307, 6674, 3175, 7285, 13, 400, 382, 321, 2835, 949, 11, 264, 3613, 307, 8028, 538, 484, 23646, 13, 407, 28921, 49, 307, 797, 1219, 264, 2062, 12, 4952, 2538, 570, 309, 10538, 264, 2808, 2625, 4, 295, 264, 1412, 13], "avg_logprob": -0.15604167381922404, "compression_ratio": 1.6174863387978142, "no_speech_prob": 0.0, "words": [{"start": 2327.53, "end": 2327.89, "word": " And", "probability": 0.2734375}, {"start": 2327.89, "end": 2328.07, "word": " the", "probability": 0.79541015625}, {"start": 2328.07, "end": 2328.27, "word": " high", "probability": 0.8984375}, {"start": 2328.27, "end": 2328.71, "word": " values.", "probability": 0.6025390625}, {"start": 2329.15, "end": 2329.15, "word": " So", "probability": 0.947265625}, {"start": 2329.15, "end": 2330.57, "word": " IQR", "probability": 0.915283203125}, {"start": 2330.57, "end": 2330.83, "word": " is", "probability": 0.92822265625}, {"start": 2330.83, "end": 2331.05, "word": " not", "probability": 0.947265625}, {"start": 2331.05, "end": 2331.45, "word": " affected", "probability": 0.828125}, {"start": 2331.45, "end": 2331.75, "word": " by", "probability": 0.96435546875}, {"start": 2331.75, "end": 2332.11, "word": " outliers.", "probability": 0.8134765625}, {"start": 2332.65, "end": 2333.21, "word": " So", "probability": 0.95263671875}, {"start": 2333.21, "end": 2333.43, "word": " in", "probability": 0.89794921875}, {"start": 2333.43, "end": 2333.69, "word": " case", "probability": 0.91064453125}, {"start": 2333.69, "end": 2333.89, "word": " of", "probability": 0.93212890625}, {"start": 2333.89, "end": 2334.45, "word": " outliers,", "probability": 0.961181640625}, {"start": 2334.77, "end": 2335.01, "word": " it's", "probability": 0.968994140625}, {"start": 2335.01, "end": 2335.25, "word": " better", "probability": 0.91748046875}, {"start": 2335.25, "end": 2335.49, "word": " to", "probability": 0.96337890625}, {"start": 2335.49, "end": 2336.13, "word": " use", "probability": 0.8974609375}, {"start": 2336.13, "end": 2337.19, "word": " IQR.", "probability": 0.745849609375}, {"start": 2338.09, "end": 2338.73, "word": " Because", "probability": 0.94140625}, {"start": 2338.73, "end": 2338.93, "word": " the", "probability": 0.90625}, {"start": 2338.93, "end": 2339.23, "word": " range", "probability": 0.8955078125}, {"start": 2339.23, "end": 2339.43, "word": " is", "probability": 0.919921875}, {"start": 2339.43, "end": 2339.75, "word": " maximum", "probability": 0.9072265625}, {"start": 2339.75, "end": 2340.11, "word": " minus", "probability": 0.97509765625}, {"start": 2340.11, "end": 2340.43, "word": " minimum.", "probability": 0.9697265625}, {"start": 2341.21, "end": 2341.35, "word": " And", "probability": 0.955078125}, {"start": 2341.35, "end": 2341.51, "word": " as", "probability": 0.92041015625}, {"start": 2341.51, "end": 2341.61, "word": " we", "probability": 0.9169921875}, {"start": 2341.61, "end": 2341.85, "word": " mentioned", "probability": 0.83935546875}, {"start": 2341.85, "end": 2342.29, "word": " before,", "probability": 0.8515625}, {"start": 2342.67, "end": 2343.67, "word": " the", "probability": 0.91455078125}, {"start": 2343.67, "end": 2343.97, "word": " range", "probability": 0.884765625}, {"start": 2343.97, "end": 2344.49, "word": " is", "probability": 0.93994140625}, {"start": 2344.49, "end": 2344.77, "word": " affected", "probability": 0.85693359375}, {"start": 2344.77, "end": 2345.03, "word": " by", "probability": 0.97021484375}, {"start": 2345.03, "end": 2345.39, "word": " outliers.", "probability": 0.9521484375}, {"start": 2347.15, "end": 2347.79, "word": " So", "probability": 0.96240234375}, {"start": 2347.79, "end": 2348.67, "word": " IQR", "probability": 0.978271484375}, {"start": 2348.67, "end": 2350.15, "word": " is", "probability": 0.9091796875}, {"start": 2350.15, "end": 2350.49, "word": " again", "probability": 0.7021484375}, {"start": 2350.49, "end": 2350.83, "word": " called", "probability": 0.8505859375}, {"start": 2350.83, "end": 2351.05, "word": " the", "probability": 0.91162109375}, {"start": 2351.05, "end": 2351.23, "word": " mid", "probability": 0.76171875}, {"start": 2351.23, "end": 2351.65, "word": "-spread", "probability": 0.7752278645833334}, {"start": 2351.65, "end": 2352.45, "word": " because", "probability": 0.416259765625}, {"start": 2352.45, "end": 2352.69, "word": " it", "probability": 0.94287109375}, {"start": 2352.69, "end": 2353.07, "word": " covers", "probability": 0.8857421875}, {"start": 2353.07, "end": 2353.67, "word": " the", "probability": 0.90771484375}, {"start": 2353.67, "end": 2353.97, "word": " middle", "probability": 0.908203125}, {"start": 2353.97, "end": 2354.45, "word": " 50", "probability": 0.9365234375}, {"start": 2354.45, "end": 2354.69, "word": "%", "probability": 0.91748046875}, {"start": 2354.69, "end": 2354.95, "word": " of", "probability": 0.95556640625}, {"start": 2354.95, "end": 2355.09, "word": " the", "probability": 0.91943359375}, {"start": 2355.09, "end": 2355.35, "word": " data.", "probability": 0.92333984375}], "temperature": 1.0}, {"id": 87, "seek": 237956, "start": 2357.3, "end": 2379.56, "text": " IQR again is a measure of variability that is not influenced or affected by outliers or extreme values. So in the presence of outliers, it's better to use IQR instead of using the range. So again, median and the range are not affected by outliers.", "tokens": [28921, 49, 797, 307, 257, 3481, 295, 35709, 300, 307, 406, 15269, 420, 8028, 538, 484, 23646, 420, 8084, 4190, 13, 407, 294, 264, 6814, 295, 484, 23646, 11, 309, 311, 1101, 281, 764, 28921, 49, 2602, 295, 1228, 264, 3613, 13, 407, 797, 11, 26779, 293, 264, 3613, 366, 406, 8028, 538, 484, 23646, 13], "avg_logprob": -0.16461074352264404, "compression_ratio": 1.5696202531645569, "no_speech_prob": 0.0, "words": [{"start": 2357.3, "end": 2357.94, "word": " IQR", "probability": 0.78466796875}, {"start": 2357.94, "end": 2358.18, "word": " again", "probability": 0.42578125}, {"start": 2358.18, "end": 2358.36, "word": " is", "probability": 0.87646484375}, {"start": 2358.36, "end": 2358.46, "word": " a", "probability": 0.91650390625}, {"start": 2358.46, "end": 2358.62, "word": " measure", "probability": 0.796875}, {"start": 2358.62, "end": 2358.8, "word": " of", "probability": 0.94384765625}, {"start": 2358.8, "end": 2359.22, "word": " variability", "probability": 0.98291015625}, {"start": 2359.22, "end": 2359.58, "word": " that", "probability": 0.87841796875}, {"start": 2359.58, "end": 2359.8, "word": " is", "probability": 0.912109375}, {"start": 2359.8, "end": 2360.12, "word": " not", "probability": 0.92724609375}, {"start": 2360.12, "end": 2360.7, "word": " influenced", "probability": 0.79248046875}, {"start": 2360.7, "end": 2361.66, "word": " or", "probability": 0.73779296875}, {"start": 2361.66, "end": 2362.08, "word": " affected", "probability": 0.78125}, {"start": 2362.08, "end": 2362.76, "word": " by", "probability": 0.9560546875}, {"start": 2362.76, "end": 2363.2, "word": " outliers", "probability": 0.886962890625}, {"start": 2363.2, "end": 2363.56, "word": " or", "probability": 0.9169921875}, {"start": 2363.56, "end": 2363.9, "word": " extreme", "probability": 0.84521484375}, {"start": 2363.9, "end": 2364.34, "word": " values.", "probability": 0.9677734375}, {"start": 2365.16, "end": 2365.28, "word": " So", "probability": 0.81396484375}, {"start": 2365.28, "end": 2365.4, "word": " in", "probability": 0.73681640625}, {"start": 2365.4, "end": 2365.52, "word": " the", "probability": 0.9140625}, {"start": 2365.52, "end": 2365.8, "word": " presence", "probability": 0.9677734375}, {"start": 2365.8, "end": 2366.02, "word": " of", "probability": 0.96435546875}, {"start": 2366.02, "end": 2366.44, "word": " outliers,", "probability": 0.943115234375}, {"start": 2366.6, "end": 2366.68, "word": " it's", "probability": 0.720458984375}, {"start": 2366.68, "end": 2366.94, "word": " better", "probability": 0.90966796875}, {"start": 2366.94, "end": 2367.14, "word": " to", "probability": 0.9677734375}, {"start": 2367.14, "end": 2368.0, "word": " use", "probability": 0.888671875}, {"start": 2368.0, "end": 2369.2, "word": " IQR", "probability": 0.98095703125}, {"start": 2369.2, "end": 2369.58, "word": " instead", "probability": 0.8173828125}, {"start": 2369.58, "end": 2369.76, "word": " of", "probability": 0.9638671875}, {"start": 2369.76, "end": 2370.2, "word": " using", "probability": 0.93310546875}, {"start": 2370.2, "end": 2371.24, "word": " the", "probability": 0.671875}, {"start": 2371.24, "end": 2371.52, "word": " range.", "probability": 0.86767578125}, {"start": 2373.46, "end": 2374.16, "word": " So", "probability": 0.89794921875}, {"start": 2374.16, "end": 2374.48, "word": " again,", "probability": 0.9287109375}, {"start": 2375.26, "end": 2375.5, "word": " median", "probability": 0.76513671875}, {"start": 2375.5, "end": 2376.34, "word": " and", "probability": 0.9423828125}, {"start": 2376.34, "end": 2376.5, "word": " the", "probability": 0.6494140625}, {"start": 2376.5, "end": 2376.84, "word": " range", "probability": 0.88720703125}, {"start": 2376.84, "end": 2377.98, "word": " are", "probability": 0.9296875}, {"start": 2377.98, "end": 2378.28, "word": " not", "probability": 0.9443359375}, {"start": 2378.28, "end": 2378.82, "word": " affected", "probability": 0.87255859375}, {"start": 2378.82, "end": 2379.14, "word": " by", "probability": 0.97314453125}, {"start": 2379.14, "end": 2379.56, "word": " outliers.", "probability": 0.94482421875}], "temperature": 1.0}, {"id": 88, "seek": 240700, "start": 2380.32, "end": 2407.0, "text": " So in case of the presence of outliers, we have to use these measures, one as measure of central and the other as measure of spread. So measures like Q1, Q3, and IQR that are not influenced by outliers are called resistant measures. Resistance means in case of outliers, they remain in the same position or approximately in the same position.", "tokens": [407, 294, 1389, 295, 264, 6814, 295, 484, 23646, 11, 321, 362, 281, 764, 613, 8000, 11, 472, 382, 3481, 295, 5777, 293, 264, 661, 382, 3481, 295, 3974, 13, 407, 8000, 411, 1249, 16, 11, 1249, 18, 11, 293, 28921, 49, 300, 366, 406, 15269, 538, 484, 23646, 366, 1219, 20383, 8000, 13, 45647, 1355, 294, 1389, 295, 484, 23646, 11, 436, 6222, 294, 264, 912, 2535, 420, 10447, 294, 264, 912, 2535, 13], "avg_logprob": -0.12469161792021048, "compression_ratio": 1.758974358974359, "no_speech_prob": 0.0, "words": [{"start": 2380.32, "end": 2380.66, "word": " So", "probability": 0.8828125}, {"start": 2380.66, "end": 2380.88, "word": " in", "probability": 0.70703125}, {"start": 2380.88, "end": 2381.18, "word": " case", "probability": 0.9052734375}, {"start": 2381.18, "end": 2382.04, "word": " of", "probability": 0.95654296875}, {"start": 2382.04, "end": 2382.18, "word": " the", "probability": 0.81689453125}, {"start": 2382.18, "end": 2382.44, "word": " presence", "probability": 0.951171875}, {"start": 2382.44, "end": 2382.76, "word": " of", "probability": 0.96337890625}, {"start": 2382.76, "end": 2383.18, "word": " outliers,", "probability": 0.892822265625}, {"start": 2383.34, "end": 2383.4, "word": " we", "probability": 0.927734375}, {"start": 2383.4, "end": 2383.56, "word": " have", "probability": 0.94384765625}, {"start": 2383.56, "end": 2383.66, "word": " to", "probability": 0.96533203125}, {"start": 2383.66, "end": 2383.88, "word": " use", "probability": 0.87353515625}, {"start": 2383.88, "end": 2384.12, "word": " these", "probability": 0.82666015625}, {"start": 2384.12, "end": 2384.54, "word": " measures,", "probability": 0.82373046875}, {"start": 2385.22, "end": 2385.48, "word": " one", "probability": 0.92822265625}, {"start": 2385.48, "end": 2385.68, "word": " as", "probability": 0.79345703125}, {"start": 2385.68, "end": 2385.96, "word": " measure", "probability": 0.76953125}, {"start": 2385.96, "end": 2386.38, "word": " of", "probability": 0.966796875}, {"start": 2386.38, "end": 2386.94, "word": " central", "probability": 0.87451171875}, {"start": 2386.94, "end": 2387.44, "word": " and", "probability": 0.77783203125}, {"start": 2387.44, "end": 2387.56, "word": " the", "probability": 0.68896484375}, {"start": 2387.56, "end": 2387.8, "word": " other", "probability": 0.88330078125}, {"start": 2387.8, "end": 2388.04, "word": " as", "probability": 0.921875}, {"start": 2388.04, "end": 2388.28, "word": " measure", "probability": 0.8837890625}, {"start": 2388.28, "end": 2388.46, "word": " of", "probability": 0.9609375}, {"start": 2388.46, "end": 2388.8, "word": " spread.", "probability": 0.91455078125}, {"start": 2389.58, "end": 2389.78, "word": " So", "probability": 0.94287109375}, {"start": 2389.78, "end": 2390.1, "word": " measures", "probability": 0.76025390625}, {"start": 2390.1, "end": 2390.5, "word": " like", "probability": 0.93310546875}, {"start": 2390.5, "end": 2391.1, "word": " Q1,", "probability": 0.940185546875}, {"start": 2391.48, "end": 2392.02, "word": " Q3,", "probability": 0.991943359375}, {"start": 2392.22, "end": 2392.32, "word": " and", "probability": 0.9443359375}, {"start": 2392.32, "end": 2393.08, "word": " IQR", "probability": 0.926513671875}, {"start": 2393.08, "end": 2393.9, "word": " that", "probability": 0.69580078125}, {"start": 2393.9, "end": 2394.1, "word": " are", "probability": 0.94091796875}, {"start": 2394.1, "end": 2394.42, "word": " not", "probability": 0.943359375}, {"start": 2394.42, "end": 2395.04, "word": " influenced", "probability": 0.77490234375}, {"start": 2395.04, "end": 2395.32, "word": " by", "probability": 0.9697265625}, {"start": 2395.32, "end": 2395.84, "word": " outliers", "probability": 0.945068359375}, {"start": 2395.84, "end": 2396.28, "word": " are", "probability": 0.87841796875}, {"start": 2396.28, "end": 2396.68, "word": " called", "probability": 0.88623046875}, {"start": 2396.68, "end": 2397.4, "word": " resistant", "probability": 0.7685546875}, {"start": 2397.4, "end": 2397.86, "word": " measures.", "probability": 0.8447265625}, {"start": 2398.86, "end": 2399.26, "word": " Resistance", "probability": 0.90869140625}, {"start": 2399.26, "end": 2399.9, "word": " means", "probability": 0.919921875}, {"start": 2399.9, "end": 2400.92, "word": " in", "probability": 0.73291015625}, {"start": 2400.92, "end": 2401.22, "word": " case", "probability": 0.91259765625}, {"start": 2401.22, "end": 2401.4, "word": " of", "probability": 0.962890625}, {"start": 2401.4, "end": 2401.98, "word": " outliers,", "probability": 0.9443359375}, {"start": 2402.38, "end": 2403.12, "word": " they", "probability": 0.87548828125}, {"start": 2403.12, "end": 2403.98, "word": " remain", "probability": 0.89501953125}, {"start": 2403.98, "end": 2404.34, "word": " in", "probability": 0.9501953125}, {"start": 2404.34, "end": 2404.54, "word": " the", "probability": 0.9189453125}, {"start": 2404.54, "end": 2404.86, "word": " same", "probability": 0.908203125}, {"start": 2404.86, "end": 2405.3, "word": " position", "probability": 0.93896484375}, {"start": 2405.3, "end": 2405.66, "word": " or", "probability": 0.6796875}, {"start": 2405.66, "end": 2406.12, "word": " approximately", "probability": 0.798828125}, {"start": 2406.12, "end": 2406.38, "word": " in", "probability": 0.9296875}, {"start": 2406.38, "end": 2406.5, "word": " the", "probability": 0.91796875}, {"start": 2406.5, "end": 2406.66, "word": " same", "probability": 0.8935546875}, {"start": 2406.66, "end": 2407.0, "word": " position.", "probability": 0.9306640625}], "temperature": 1.0}, {"id": 89, "seek": 243421, "start": 2407.69, "end": 2434.21, "text": " Because outliers don't affect these measures. I mean, don't affect Q1, Q3, and consequently IQR, because IQR is just the distance between Q3 and Q1. So to determine the value of IQR, you have first to compute Q1, Q3, then take the difference between these two.", "tokens": [1436, 484, 23646, 500, 380, 3345, 613, 8000, 13, 286, 914, 11, 500, 380, 3345, 1249, 16, 11, 1249, 18, 11, 293, 47259, 28921, 49, 11, 570, 28921, 49, 307, 445, 264, 4560, 1296, 1249, 18, 293, 1249, 16, 13, 407, 281, 6997, 264, 2158, 295, 28921, 49, 11, 291, 362, 700, 281, 14722, 1249, 16, 11, 1249, 18, 11, 550, 747, 264, 2649, 1296, 613, 732, 13], "avg_logprob": -0.1543251759764077, "compression_ratio": 1.5263157894736843, "no_speech_prob": 0.0, "words": [{"start": 2407.69, "end": 2408.11, "word": " Because", "probability": 0.70556640625}, {"start": 2408.11, "end": 2408.79, "word": " outliers", "probability": 0.83203125}, {"start": 2408.79, "end": 2409.87, "word": " don't", "probability": 0.94921875}, {"start": 2409.87, "end": 2410.33, "word": " affect", "probability": 0.8759765625}, {"start": 2410.33, "end": 2410.85, "word": " these", "probability": 0.78857421875}, {"start": 2410.85, "end": 2411.25, "word": " measures.", "probability": 0.59521484375}, {"start": 2411.39, "end": 2411.47, "word": " I", "probability": 0.98095703125}, {"start": 2411.47, "end": 2411.59, "word": " mean,", "probability": 0.9658203125}, {"start": 2411.65, "end": 2411.81, "word": " don't", "probability": 0.93505859375}, {"start": 2411.81, "end": 2412.23, "word": " affect", "probability": 0.923828125}, {"start": 2412.23, "end": 2413.87, "word": " Q1,", "probability": 0.801513671875}, {"start": 2414.83, "end": 2415.33, "word": " Q3,", "probability": 0.902587890625}, {"start": 2415.91, "end": 2416.77, "word": " and", "probability": 0.94091796875}, {"start": 2416.77, "end": 2417.47, "word": " consequently", "probability": 0.79541015625}, {"start": 2417.47, "end": 2418.11, "word": " IQR,", "probability": 0.93017578125}, {"start": 2418.21, "end": 2418.39, "word": " because", "probability": 0.8935546875}, {"start": 2418.39, "end": 2418.89, "word": " IQR", "probability": 0.9873046875}, {"start": 2418.89, "end": 2419.09, "word": " is", "probability": 0.943359375}, {"start": 2419.09, "end": 2419.43, "word": " just", "probability": 0.916015625}, {"start": 2419.43, "end": 2420.13, "word": " the", "probability": 0.90234375}, {"start": 2420.13, "end": 2420.55, "word": " distance", "probability": 0.94287109375}, {"start": 2420.55, "end": 2420.91, "word": " between", "probability": 0.87939453125}, {"start": 2420.91, "end": 2421.41, "word": " Q3", "probability": 0.9853515625}, {"start": 2421.41, "end": 2421.63, "word": " and", "probability": 0.9404296875}, {"start": 2421.63, "end": 2422.07, "word": " Q1.", "probability": 0.94384765625}, {"start": 2422.95, "end": 2423.53, "word": " So", "probability": 0.962890625}, {"start": 2423.53, "end": 2423.99, "word": " to", "probability": 0.68017578125}, {"start": 2423.99, "end": 2424.59, "word": " determine", "probability": 0.91357421875}, {"start": 2424.59, "end": 2424.99, "word": " the", "probability": 0.85009765625}, {"start": 2424.99, "end": 2425.25, "word": " value", "probability": 0.9716796875}, {"start": 2425.25, "end": 2425.49, "word": " of", "probability": 0.71728515625}, {"start": 2425.49, "end": 2426.15, "word": " IQR,", "probability": 0.97998046875}, {"start": 2426.21, "end": 2426.31, "word": " you", "probability": 0.9580078125}, {"start": 2426.31, "end": 2426.55, "word": " have", "probability": 0.919921875}, {"start": 2426.55, "end": 2426.89, "word": " first", "probability": 0.5126953125}, {"start": 2426.89, "end": 2427.03, "word": " to", "probability": 0.60498046875}, {"start": 2427.03, "end": 2427.33, "word": " compute", "probability": 0.9140625}, {"start": 2427.33, "end": 2427.85, "word": " Q1,", "probability": 0.996826171875}, {"start": 2428.87, "end": 2429.43, "word": " Q3,", "probability": 0.9423828125}, {"start": 2429.75, "end": 2430.05, "word": " then", "probability": 0.82080078125}, {"start": 2430.05, "end": 2430.53, "word": " take", "probability": 0.86962890625}, {"start": 2430.53, "end": 2431.05, "word": " the", "probability": 0.9267578125}, {"start": 2431.05, "end": 2432.91, "word": " difference", "probability": 0.73583984375}, {"start": 2432.91, "end": 2433.77, "word": " between", "probability": 0.794921875}, {"start": 2433.77, "end": 2434.05, "word": " these", "probability": 0.8125}, {"start": 2434.05, "end": 2434.21, "word": " two.", "probability": 0.830078125}], "temperature": 1.0}, {"id": 90, "seek": 246217, "start": 2435.38, "end": 2462.18, "text": " So, for example, suppose we have a data, and that data has Q1 equals 30, and Q3 is 55. Suppose for a data set, that data set has Q1 30, Q3 is 57. The IQR, or Inter Equal Hyper Range,", "tokens": [407, 11, 337, 1365, 11, 7297, 321, 362, 257, 1412, 11, 293, 300, 1412, 575, 1249, 16, 6915, 2217, 11, 293, 1249, 18, 307, 12330, 13, 21360, 337, 257, 1412, 992, 11, 300, 1412, 992, 575, 1249, 16, 2217, 11, 1249, 18, 307, 21423, 13, 440, 28921, 49, 11, 420, 5751, 15624, 304, 29592, 33778, 11], "avg_logprob": -0.2605537406185217, "compression_ratio": 1.3863636363636365, "no_speech_prob": 0.0, "words": [{"start": 2435.38, "end": 2435.78, "word": " So,", "probability": 0.7822265625}, {"start": 2436.12, "end": 2436.32, "word": " for", "probability": 0.93408203125}, {"start": 2436.32, "end": 2436.7, "word": " example,", "probability": 0.97314453125}, {"start": 2438.4, "end": 2438.74, "word": " suppose", "probability": 0.869140625}, {"start": 2438.74, "end": 2438.94, "word": " we", "probability": 0.9248046875}, {"start": 2438.94, "end": 2439.06, "word": " have", "probability": 0.9482421875}, {"start": 2439.06, "end": 2439.16, "word": " a", "probability": 0.85009765625}, {"start": 2439.16, "end": 2439.4, "word": " data,", "probability": 0.943359375}, {"start": 2440.28, "end": 2440.62, "word": " and", "probability": 0.93212890625}, {"start": 2440.62, "end": 2440.84, "word": " that", "probability": 0.9384765625}, {"start": 2440.84, "end": 2441.12, "word": " data", "probability": 0.927734375}, {"start": 2441.12, "end": 2441.58, "word": " has", "probability": 0.943359375}, {"start": 2441.58, "end": 2443.88, "word": " Q1", "probability": 0.913818359375}, {"start": 2443.88, "end": 2445.52, "word": " equals", "probability": 0.619140625}, {"start": 2445.52, "end": 2446.12, "word": " 30,", "probability": 0.93359375}, {"start": 2447.02, "end": 2447.86, "word": " and", "probability": 0.94189453125}, {"start": 2447.86, "end": 2448.72, "word": " Q3", "probability": 0.983154296875}, {"start": 2448.72, "end": 2449.04, "word": " is", "probability": 0.90087890625}, {"start": 2449.04, "end": 2449.54, "word": " 55.", "probability": 0.955078125}, {"start": 2449.72, "end": 2450.1, "word": " Suppose", "probability": 0.78515625}, {"start": 2450.1, "end": 2450.44, "word": " for", "probability": 0.888671875}, {"start": 2450.44, "end": 2451.2, "word": " a", "probability": 0.95654296875}, {"start": 2451.2, "end": 2451.4, "word": " data", "probability": 0.496337890625}, {"start": 2451.4, "end": 2451.74, "word": " set,", "probability": 0.95166015625}, {"start": 2452.9, "end": 2453.18, "word": " that", "probability": 0.8896484375}, {"start": 2453.18, "end": 2453.36, "word": " data", "probability": 0.7802734375}, {"start": 2453.36, "end": 2453.54, "word": " set", "probability": 0.8984375}, {"start": 2453.54, "end": 2453.94, "word": " has", "probability": 0.9423828125}, {"start": 2453.94, "end": 2455.0, "word": " Q1", "probability": 0.96630859375}, {"start": 2455.0, "end": 2455.42, "word": " 30,", "probability": 0.5390625}, {"start": 2455.94, "end": 2456.42, "word": " Q3", "probability": 0.902587890625}, {"start": 2456.42, "end": 2456.58, "word": " is", "probability": 0.88818359375}, {"start": 2456.58, "end": 2457.32, "word": " 57.", "probability": 0.94482421875}, {"start": 2458.84, "end": 2459.2, "word": " The", "probability": 0.86865234375}, {"start": 2459.2, "end": 2460.14, "word": " IQR,", "probability": 0.85009765625}, {"start": 2460.8, "end": 2460.8, "word": " or", "probability": 0.8017578125}, {"start": 2460.8, "end": 2461.36, "word": " Inter", "probability": 0.53076171875}, {"start": 2461.36, "end": 2461.62, "word": " Equal", "probability": 0.6173095703125}, {"start": 2461.62, "end": 2461.86, "word": " Hyper", "probability": 0.1107177734375}, {"start": 2461.86, "end": 2462.18, "word": " Range,", "probability": 0.93212890625}], "temperature": 1.0}, {"id": 91, "seek": 249182, "start": 2462.96, "end": 2491.82, "text": " 57 minus 30 is 27. Now what's the range? The range is maximum for the largest value, which is 17 minus 12. That gives 58. Now look at the difference between the two ranges. The inter-quartile range is 27. The range is 58. There is a big difference between these two values because range", "tokens": [21423, 3175, 2217, 307, 7634, 13, 823, 437, 311, 264, 3613, 30, 440, 3613, 307, 6674, 337, 264, 6443, 2158, 11, 597, 307, 3282, 3175, 2272, 13, 663, 2709, 21786, 13, 823, 574, 412, 264, 2649, 1296, 264, 732, 22526, 13, 440, 728, 12, 358, 446, 794, 3613, 307, 7634, 13, 440, 3613, 307, 21786, 13, 821, 307, 257, 955, 2649, 1296, 613, 732, 4190, 570, 3613], "avg_logprob": -0.18589154499418595, "compression_ratio": 1.6494252873563218, "no_speech_prob": 0.0, "words": [{"start": 2462.96, "end": 2463.54, "word": " 57", "probability": 0.78466796875}, {"start": 2463.54, "end": 2463.88, "word": " minus", "probability": 0.96435546875}, {"start": 2463.88, "end": 2464.18, "word": " 30", "probability": 0.5244140625}, {"start": 2464.18, "end": 2464.32, "word": " is", "probability": 0.89990234375}, {"start": 2464.32, "end": 2464.7, "word": " 27.", "probability": 0.97705078125}, {"start": 2466.52, "end": 2467.24, "word": " Now", "probability": 0.9072265625}, {"start": 2467.24, "end": 2467.54, "word": " what's", "probability": 0.74462890625}, {"start": 2467.54, "end": 2467.64, "word": " the", "probability": 0.91357421875}, {"start": 2467.64, "end": 2467.92, "word": " range?", "probability": 0.884765625}, {"start": 2469.2, "end": 2469.92, "word": " The", "probability": 0.740234375}, {"start": 2469.92, "end": 2470.16, "word": " range", "probability": 0.90380859375}, {"start": 2470.16, "end": 2470.34, "word": " is", "probability": 0.9453125}, {"start": 2470.34, "end": 2470.92, "word": " maximum", "probability": 0.92431640625}, {"start": 2470.92, "end": 2472.22, "word": " for", "probability": 0.6328125}, {"start": 2472.22, "end": 2472.46, "word": " the", "probability": 0.91015625}, {"start": 2472.46, "end": 2472.92, "word": " largest", "probability": 0.91748046875}, {"start": 2472.92, "end": 2473.32, "word": " value,", "probability": 0.89111328125}, {"start": 2473.46, "end": 2473.5, "word": " which", "probability": 0.90673828125}, {"start": 2473.5, "end": 2473.62, "word": " is", "probability": 0.94873046875}, {"start": 2473.62, "end": 2474.16, "word": " 17", "probability": 0.7216796875}, {"start": 2474.16, "end": 2475.4, "word": " minus", "probability": 0.74951171875}, {"start": 2475.4, "end": 2475.76, "word": " 12.", "probability": 0.74072265625}, {"start": 2476.76, "end": 2477.12, "word": " That", "probability": 0.86181640625}, {"start": 2477.12, "end": 2477.38, "word": " gives", "probability": 0.88720703125}, {"start": 2477.38, "end": 2477.88, "word": " 58.", "probability": 0.9833984375}, {"start": 2478.78, "end": 2479.16, "word": " Now", "probability": 0.9130859375}, {"start": 2479.16, "end": 2479.82, "word": " look", "probability": 0.732421875}, {"start": 2479.82, "end": 2479.96, "word": " at", "probability": 0.96826171875}, {"start": 2479.96, "end": 2480.08, "word": " the", "probability": 0.91943359375}, {"start": 2480.08, "end": 2480.54, "word": " difference", "probability": 0.8623046875}, {"start": 2480.54, "end": 2481.02, "word": " between", "probability": 0.873046875}, {"start": 2481.02, "end": 2481.26, "word": " the", "probability": 0.91845703125}, {"start": 2481.26, "end": 2481.42, "word": " two", "probability": 0.93310546875}, {"start": 2481.42, "end": 2481.82, "word": " ranges.", "probability": 0.9091796875}, {"start": 2482.6, "end": 2482.72, "word": " The", "probability": 0.86669921875}, {"start": 2482.72, "end": 2482.96, "word": " inter", "probability": 0.26318359375}, {"start": 2482.96, "end": 2483.44, "word": "-quartile", "probability": 0.72381591796875}, {"start": 2483.44, "end": 2483.7, "word": " range", "probability": 0.83251953125}, {"start": 2483.7, "end": 2483.94, "word": " is", "probability": 0.93896484375}, {"start": 2483.94, "end": 2484.62, "word": " 27.", "probability": 0.970703125}, {"start": 2486.04, "end": 2486.6, "word": " The", "probability": 0.869140625}, {"start": 2486.6, "end": 2486.9, "word": " range", "probability": 0.88720703125}, {"start": 2486.9, "end": 2487.1, "word": " is", "probability": 0.94580078125}, {"start": 2487.1, "end": 2487.56, "word": " 58.", "probability": 0.98291015625}, {"start": 2487.88, "end": 2488.08, "word": " There", "probability": 0.8017578125}, {"start": 2488.08, "end": 2488.22, "word": " is", "probability": 0.91650390625}, {"start": 2488.22, "end": 2488.34, "word": " a", "probability": 0.99365234375}, {"start": 2488.34, "end": 2488.5, "word": " big", "probability": 0.92333984375}, {"start": 2488.5, "end": 2489.02, "word": " difference", "probability": 0.86376953125}, {"start": 2489.02, "end": 2489.4, "word": " between", "probability": 0.86767578125}, {"start": 2489.4, "end": 2489.62, "word": " these", "probability": 0.865234375}, {"start": 2489.62, "end": 2489.8, "word": " two", "probability": 0.9248046875}, {"start": 2489.8, "end": 2490.1, "word": " values", "probability": 0.974609375}, {"start": 2490.1, "end": 2490.58, "word": " because", "probability": 0.52734375}, {"start": 2490.58, "end": 2491.82, "word": " range", "probability": 0.67822265625}], "temperature": 1.0}, {"id": 92, "seek": 252183, "start": 2492.89, "end": 2521.83, "text": " depends only on smallest and largest. And these values could be outliers. For this reason, the range value is higher or greater than the required range, which is just the distance of the 50% of the middle data. For this reason, it's better to use the range in case of outliers. Make sense?", "tokens": [5946, 787, 322, 16998, 293, 6443, 13, 400, 613, 4190, 727, 312, 484, 23646, 13, 1171, 341, 1778, 11, 264, 3613, 2158, 307, 2946, 420, 5044, 813, 264, 4739, 3613, 11, 597, 307, 445, 264, 4560, 295, 264, 2625, 4, 295, 264, 2808, 1412, 13, 1171, 341, 1778, 11, 309, 311, 1101, 281, 764, 264, 3613, 294, 1389, 295, 484, 23646, 13, 4387, 2020, 30], "avg_logprob": -0.16394412314349954, "compression_ratio": 1.6111111111111112, "no_speech_prob": 0.0, "words": [{"start": 2492.89, "end": 2493.47, "word": " depends", "probability": 0.327392578125}, {"start": 2493.47, "end": 2494.13, "word": " only", "probability": 0.873046875}, {"start": 2494.13, "end": 2494.97, "word": " on", "probability": 0.8955078125}, {"start": 2494.97, "end": 2495.41, "word": " smallest", "probability": 0.8134765625}, {"start": 2495.41, "end": 2495.75, "word": " and", "probability": 0.94482421875}, {"start": 2495.75, "end": 2496.11, "word": " largest.", "probability": 0.8828125}, {"start": 2496.65, "end": 2496.91, "word": " And", "probability": 0.908203125}, {"start": 2496.91, "end": 2497.17, "word": " these", "probability": 0.8271484375}, {"start": 2497.17, "end": 2497.57, "word": " values", "probability": 0.96142578125}, {"start": 2497.57, "end": 2497.89, "word": " could", "probability": 0.88330078125}, {"start": 2497.89, "end": 2498.65, "word": " be", "probability": 0.95166015625}, {"start": 2498.65, "end": 2499.45, "word": " outliers.", "probability": 0.879150390625}, {"start": 2499.97, "end": 2500.19, "word": " For", "probability": 0.92431640625}, {"start": 2500.19, "end": 2500.41, "word": " this", "probability": 0.9384765625}, {"start": 2500.41, "end": 2500.81, "word": " reason,", "probability": 0.97216796875}, {"start": 2500.91, "end": 2501.01, "word": " the", "probability": 0.91357421875}, {"start": 2501.01, "end": 2501.33, "word": " range", "probability": 0.90234375}, {"start": 2501.33, "end": 2503.07, "word": " value", "probability": 0.94140625}, {"start": 2503.07, "end": 2503.33, "word": " is", "probability": 0.9306640625}, {"start": 2503.33, "end": 2503.75, "word": " higher", "probability": 0.912109375}, {"start": 2503.75, "end": 2504.09, "word": " or", "probability": 0.92431640625}, {"start": 2504.09, "end": 2504.41, "word": " greater", "probability": 0.91650390625}, {"start": 2504.41, "end": 2504.87, "word": " than", "probability": 0.9453125}, {"start": 2504.87, "end": 2505.81, "word": " the", "probability": 0.859375}, {"start": 2505.81, "end": 2506.21, "word": " required", "probability": 0.09564208984375}, {"start": 2506.21, "end": 2506.93, "word": " range,", "probability": 0.685546875}, {"start": 2507.29, "end": 2507.39, "word": " which", "probability": 0.94580078125}, {"start": 2507.39, "end": 2507.57, "word": " is", "probability": 0.91748046875}, {"start": 2507.57, "end": 2508.09, "word": " just", "probability": 0.73583984375}, {"start": 2508.09, "end": 2508.41, "word": " the", "probability": 0.82470703125}, {"start": 2508.41, "end": 2508.97, "word": " distance", "probability": 0.87060546875}, {"start": 2508.97, "end": 2510.35, "word": " of", "probability": 0.95556640625}, {"start": 2510.35, "end": 2510.77, "word": " the", "probability": 0.86328125}, {"start": 2510.77, "end": 2511.19, "word": " 50", "probability": 0.9501953125}, {"start": 2511.19, "end": 2511.51, "word": "%", "probability": 0.81640625}, {"start": 2511.51, "end": 2512.27, "word": " of", "probability": 0.962890625}, {"start": 2512.27, "end": 2512.45, "word": " the", "probability": 0.92333984375}, {"start": 2512.45, "end": 2512.65, "word": " middle", "probability": 0.83935546875}, {"start": 2512.65, "end": 2512.99, "word": " data.", "probability": 0.822265625}, {"start": 2513.61, "end": 2513.85, "word": " For", "probability": 0.9541015625}, {"start": 2513.85, "end": 2514.05, "word": " this", "probability": 0.943359375}, {"start": 2514.05, "end": 2514.41, "word": " reason,", "probability": 0.97216796875}, {"start": 2514.75, "end": 2515.05, "word": " it's", "probability": 0.962890625}, {"start": 2515.05, "end": 2515.31, "word": " better", "probability": 0.90966796875}, {"start": 2515.31, "end": 2515.53, "word": " to", "probability": 0.96484375}, {"start": 2515.53, "end": 2515.89, "word": " use", "probability": 0.87939453125}, {"start": 2515.89, "end": 2517.17, "word": " the", "probability": 0.90234375}, {"start": 2517.17, "end": 2517.57, "word": " range", "probability": 0.89013671875}, {"start": 2517.57, "end": 2518.63, "word": " in", "probability": 0.89599609375}, {"start": 2518.63, "end": 2519.07, "word": " case", "probability": 0.892578125}, {"start": 2519.07, "end": 2519.47, "word": " of", "probability": 0.966796875}, {"start": 2519.47, "end": 2520.11, "word": " outliers.", "probability": 0.9482421875}, {"start": 2520.75, "end": 2521.51, "word": " Make", "probability": 0.7548828125}, {"start": 2521.51, "end": 2521.83, "word": " sense?", "probability": 0.8515625}], "temperature": 1.0}, {"id": 93, "seek": 254942, "start": 2523.3, "end": 2549.42, "text": " Any question? Five-number summary are smallest value, largest value, also first quartile, third quartile, and the median.", "tokens": [2639, 1168, 30, 9436, 12, 41261, 12691, 366, 16998, 2158, 11, 6443, 2158, 11, 611, 700, 20837, 794, 11, 2636, 20837, 794, 11, 293, 264, 26779, 13], "avg_logprob": -0.19126674426453455, "compression_ratio": 1.2708333333333333, "no_speech_prob": 0.0, "words": [{"start": 2523.3, "end": 2523.56, "word": " Any", "probability": 0.82373046875}, {"start": 2523.56, "end": 2523.94, "word": " question?", "probability": 0.56640625}, {"start": 2528.68, "end": 2529.34, "word": " Five", "probability": 0.426025390625}, {"start": 2529.34, "end": 2529.62, "word": "-number", "probability": 0.650634765625}, {"start": 2529.62, "end": 2529.98, "word": " summary", "probability": 0.69580078125}, {"start": 2529.98, "end": 2532.7, "word": " are", "probability": 0.85009765625}, {"start": 2532.7, "end": 2539.32, "word": " smallest", "probability": 0.83154296875}, {"start": 2539.32, "end": 2539.92, "word": " value,", "probability": 0.96630859375}, {"start": 2541.26, "end": 2541.66, "word": " largest", "probability": 0.89306640625}, {"start": 2541.66, "end": 2542.12, "word": " value,", "probability": 0.9736328125}, {"start": 2543.88, "end": 2544.3, "word": " also", "probability": 0.86083984375}, {"start": 2544.3, "end": 2545.24, "word": " first", "probability": 0.75244140625}, {"start": 2545.24, "end": 2545.94, "word": " quartile,", "probability": 0.95361328125}, {"start": 2547.06, "end": 2547.38, "word": " third", "probability": 0.9306640625}, {"start": 2547.38, "end": 2548.12, "word": " quartile,", "probability": 0.987060546875}, {"start": 2548.66, "end": 2549.02, "word": " and", "probability": 0.94140625}, {"start": 2549.02, "end": 2549.18, "word": " the", "probability": 0.8974609375}, {"start": 2549.18, "end": 2549.42, "word": " median.", "probability": 0.92626953125}], "temperature": 1.0}, {"id": 94, "seek": 257723, "start": 2550.15, "end": 2577.23, "text": " These five numbers are called five-number summary, because by using these statistics, smallest, first, median, third quarter, and largest, you can describe the center spread and the shape of the distribution. So by using five-number summary, you can tell something about it.", "tokens": [1981, 1732, 3547, 366, 1219, 1732, 12, 41261, 12691, 11, 570, 538, 1228, 613, 12523, 11, 16998, 11, 700, 11, 26779, 11, 2636, 6555, 11, 293, 6443, 11, 291, 393, 6786, 264, 3056, 3974, 293, 264, 3909, 295, 264, 7316, 13, 407, 538, 1228, 1732, 12, 41261, 12691, 11, 291, 393, 980, 746, 466, 309, 13], "avg_logprob": -0.19654605681436105, "compression_ratio": 1.6272189349112427, "no_speech_prob": 0.0, "words": [{"start": 2550.15, "end": 2550.55, "word": " These", "probability": 0.8017578125}, {"start": 2550.55, "end": 2550.97, "word": " five", "probability": 0.83154296875}, {"start": 2550.97, "end": 2551.51, "word": " numbers", "probability": 0.8974609375}, {"start": 2551.51, "end": 2552.25, "word": " are", "probability": 0.9462890625}, {"start": 2552.25, "end": 2552.69, "word": " called", "probability": 0.89013671875}, {"start": 2552.69, "end": 2553.13, "word": " five", "probability": 0.78076171875}, {"start": 2553.13, "end": 2553.41, "word": "-number", "probability": 0.74609375}, {"start": 2553.41, "end": 2553.73, "word": " summary,", "probability": 0.76904296875}, {"start": 2554.19, "end": 2554.67, "word": " because", "probability": 0.89697265625}, {"start": 2554.67, "end": 2554.99, "word": " by", "probability": 0.93212890625}, {"start": 2554.99, "end": 2555.39, "word": " using", "probability": 0.92822265625}, {"start": 2555.39, "end": 2555.87, "word": " these", "probability": 0.7353515625}, {"start": 2555.87, "end": 2557.71, "word": " statistics,", "probability": 0.86376953125}, {"start": 2558.11, "end": 2558.95, "word": " smallest,", "probability": 0.91455078125}, {"start": 2559.37, "end": 2559.73, "word": " first,", "probability": 0.6884765625}, {"start": 2560.69, "end": 2561.05, "word": " median,", "probability": 0.8916015625}, {"start": 2561.35, "end": 2561.59, "word": " third", "probability": 0.9189453125}, {"start": 2561.59, "end": 2561.91, "word": " quarter,", "probability": 0.666015625}, {"start": 2562.07, "end": 2562.17, "word": " and", "probability": 0.912109375}, {"start": 2562.17, "end": 2562.55, "word": " largest,", "probability": 0.9072265625}, {"start": 2563.01, "end": 2563.25, "word": " you", "probability": 0.9619140625}, {"start": 2563.25, "end": 2563.47, "word": " can", "probability": 0.9462890625}, {"start": 2563.47, "end": 2564.05, "word": " describe", "probability": 0.8544921875}, {"start": 2564.05, "end": 2565.69, "word": " the", "probability": 0.9052734375}, {"start": 2565.69, "end": 2566.01, "word": " center", "probability": 0.82275390625}, {"start": 2566.01, "end": 2568.07, "word": " spread", "probability": 0.5107421875}, {"start": 2568.07, "end": 2569.05, "word": " and", "probability": 0.79736328125}, {"start": 2569.05, "end": 2569.25, "word": " the", "probability": 0.89453125}, {"start": 2569.25, "end": 2569.49, "word": " shape", "probability": 0.90625}, {"start": 2569.49, "end": 2569.63, "word": " of", "probability": 0.9599609375}, {"start": 2569.63, "end": 2569.77, "word": " the", "probability": 0.411865234375}, {"start": 2569.77, "end": 2570.13, "word": " distribution.", "probability": 0.57373046875}, {"start": 2571.65, "end": 2572.19, "word": " So", "probability": 0.9296875}, {"start": 2572.19, "end": 2572.59, "word": " by", "probability": 0.80322265625}, {"start": 2572.59, "end": 2573.07, "word": " using", "probability": 0.93359375}, {"start": 2573.07, "end": 2574.45, "word": " five", "probability": 0.75927734375}, {"start": 2574.45, "end": 2574.73, "word": "-number", "probability": 0.947021484375}, {"start": 2574.73, "end": 2575.05, "word": " summary,", "probability": 0.85986328125}, {"start": 2575.19, "end": 2575.29, "word": " you", "probability": 0.94873046875}, {"start": 2575.29, "end": 2575.57, "word": " can", "probability": 0.935546875}, {"start": 2575.57, "end": 2575.97, "word": " tell", "probability": 0.828125}, {"start": 2575.97, "end": 2576.45, "word": " something", "probability": 0.86083984375}, {"start": 2576.45, "end": 2576.81, "word": " about", "probability": 0.9072265625}, {"start": 2576.81, "end": 2577.23, "word": " it.", "probability": 0.71875}], "temperature": 1.0}, {"id": 95, "seek": 260553, "start": 2578.33, "end": 2605.53, "text": " The center of the data, I mean the value in the middle, because the median is the value in the middle. Spread, because we can talk about the IQR, which is the range, and also the shape of the data. And let's see, let's move to this slide, slide number 50. Let's see how can we construct something called box plot.", "tokens": [440, 3056, 295, 264, 1412, 11, 286, 914, 264, 2158, 294, 264, 2808, 11, 570, 264, 26779, 307, 264, 2158, 294, 264, 2808, 13, 30308, 11, 570, 321, 393, 751, 466, 264, 28921, 49, 11, 597, 307, 264, 3613, 11, 293, 611, 264, 3909, 295, 264, 1412, 13, 400, 718, 311, 536, 11, 718, 311, 1286, 281, 341, 4137, 11, 4137, 1230, 2625, 13, 961, 311, 536, 577, 393, 321, 7690, 746, 1219, 2424, 7542, 13], "avg_logprob": -0.16670049088341848, "compression_ratio": 1.643979057591623, "no_speech_prob": 0.0, "words": [{"start": 2578.33, "end": 2578.55, "word": " The", "probability": 0.75634765625}, {"start": 2578.55, "end": 2578.85, "word": " center", "probability": 0.88232421875}, {"start": 2578.85, "end": 2579.05, "word": " of", "probability": 0.966796875}, {"start": 2579.05, "end": 2579.15, "word": " the", "probability": 0.916015625}, {"start": 2579.15, "end": 2579.45, "word": " data,", "probability": 0.94482421875}, {"start": 2579.57, "end": 2579.65, "word": " I", "probability": 0.94775390625}, {"start": 2579.65, "end": 2579.75, "word": " mean", "probability": 0.9501953125}, {"start": 2579.75, "end": 2579.89, "word": " the", "probability": 0.80859375}, {"start": 2579.89, "end": 2580.09, "word": " value", "probability": 0.96728515625}, {"start": 2580.09, "end": 2580.23, "word": " in", "probability": 0.8798828125}, {"start": 2580.23, "end": 2580.33, "word": " the", "probability": 0.919921875}, {"start": 2580.33, "end": 2580.55, "word": " middle,", "probability": 0.958984375}, {"start": 2580.67, "end": 2580.93, "word": " because", "probability": 0.91259765625}, {"start": 2580.93, "end": 2581.13, "word": " the", "probability": 0.91259765625}, {"start": 2581.13, "end": 2581.41, "word": " median", "probability": 0.9619140625}, {"start": 2581.41, "end": 2581.65, "word": " is", "probability": 0.921875}, {"start": 2581.65, "end": 2581.77, "word": " the", "probability": 0.9013671875}, {"start": 2581.77, "end": 2581.95, "word": " value", "probability": 0.9716796875}, {"start": 2581.95, "end": 2582.07, "word": " in", "probability": 0.91162109375}, {"start": 2582.07, "end": 2582.19, "word": " the", "probability": 0.91845703125}, {"start": 2582.19, "end": 2582.43, "word": " middle.", "probability": 0.9482421875}, {"start": 2583.77, "end": 2584.23, "word": " Spread,", "probability": 0.791015625}, {"start": 2584.77, "end": 2585.11, "word": " because", "probability": 0.89990234375}, {"start": 2585.11, "end": 2585.29, "word": " we", "probability": 0.92919921875}, {"start": 2585.29, "end": 2585.47, "word": " can", "probability": 0.93994140625}, {"start": 2585.47, "end": 2585.75, "word": " talk", "probability": 0.8798828125}, {"start": 2585.75, "end": 2586.17, "word": " about", "probability": 0.904296875}, {"start": 2586.17, "end": 2586.55, "word": " the", "probability": 0.8896484375}, {"start": 2586.55, "end": 2587.43, "word": " IQR,", "probability": 0.957275390625}, {"start": 2587.55, "end": 2587.69, "word": " which", "probability": 0.73876953125}, {"start": 2587.69, "end": 2587.79, "word": " is", "probability": 0.94482421875}, {"start": 2587.79, "end": 2587.89, "word": " the", "probability": 0.828125}, {"start": 2587.89, "end": 2588.27, "word": " range,", "probability": 0.9052734375}, {"start": 2589.01, "end": 2589.29, "word": " and", "probability": 0.93408203125}, {"start": 2589.29, "end": 2589.75, "word": " also", "probability": 0.87939453125}, {"start": 2589.75, "end": 2590.37, "word": " the", "probability": 0.8486328125}, {"start": 2590.37, "end": 2590.63, "word": " shape", "probability": 0.91845703125}, {"start": 2590.63, "end": 2590.85, "word": " of", "probability": 0.96630859375}, {"start": 2590.85, "end": 2591.07, "word": " the", "probability": 0.9169921875}, {"start": 2591.07, "end": 2591.59, "word": " data.", "probability": 0.9375}, {"start": 2592.21, "end": 2592.39, "word": " And", "probability": 0.890625}, {"start": 2592.39, "end": 2592.65, "word": " let's", "probability": 0.962646484375}, {"start": 2592.65, "end": 2592.85, "word": " see,", "probability": 0.81103515625}, {"start": 2593.71, "end": 2594.21, "word": " let's", "probability": 0.968505859375}, {"start": 2594.21, "end": 2594.41, "word": " move", "probability": 0.9384765625}, {"start": 2594.41, "end": 2594.63, "word": " to", "probability": 0.96630859375}, {"start": 2594.63, "end": 2595.01, "word": " this", "probability": 0.86279296875}, {"start": 2595.01, "end": 2595.45, "word": " slide,", "probability": 0.96630859375}, {"start": 2596.67, "end": 2597.39, "word": " slide", "probability": 0.9326171875}, {"start": 2597.39, "end": 2598.07, "word": " number", "probability": 0.92919921875}, {"start": 2598.07, "end": 2598.53, "word": " 50.", "probability": 0.72509765625}, {"start": 2601.53, "end": 2602.05, "word": " Let's", "probability": 0.973876953125}, {"start": 2602.05, "end": 2602.31, "word": " see", "probability": 0.7802734375}, {"start": 2602.31, "end": 2602.95, "word": " how", "probability": 0.7412109375}, {"start": 2602.95, "end": 2603.21, "word": " can", "probability": 0.9013671875}, {"start": 2603.21, "end": 2603.39, "word": " we", "probability": 0.96142578125}, {"start": 2603.39, "end": 2604.01, "word": " construct", "probability": 0.96630859375}, {"start": 2604.01, "end": 2604.73, "word": " something", "probability": 0.8701171875}, {"start": 2604.73, "end": 2605.09, "word": " called", "probability": 0.86572265625}, {"start": 2605.09, "end": 2605.33, "word": " box", "probability": 0.4228515625}, {"start": 2605.33, "end": 2605.53, "word": " plot.", "probability": 0.4072265625}], "temperature": 1.0}, {"id": 96, "seek": 263459, "start": 2607.11, "end": 2634.59, "text": " Box plot. Box plot can be constructed by using the five number summary. We have smallest value. On the other hand, we have the largest value. Also, we have Q1, the first quartile, the median, and Q3. For symmetric distribution, I mean if the data is bell-shaped. In this case, the vertical line in the box", "tokens": [15112, 7542, 13, 15112, 7542, 393, 312, 17083, 538, 1228, 264, 1732, 1230, 12691, 13, 492, 362, 16998, 2158, 13, 1282, 264, 661, 1011, 11, 321, 362, 264, 6443, 2158, 13, 2743, 11, 321, 362, 1249, 16, 11, 264, 700, 20837, 794, 11, 264, 26779, 11, 293, 1249, 18, 13, 1171, 32330, 7316, 11, 286, 914, 498, 264, 1412, 307, 4549, 12, 23103, 13, 682, 341, 1389, 11, 264, 9429, 1622, 294, 264, 2424], "avg_logprob": -0.170625003973643, "compression_ratio": 1.5148514851485149, "no_speech_prob": 0.0, "words": [{"start": 2607.11, "end": 2607.73, "word": " Box", "probability": 0.52685546875}, {"start": 2607.73, "end": 2608.35, "word": " plot.", "probability": 0.343017578125}, {"start": 2609.83, "end": 2610.45, "word": " Box", "probability": 0.919921875}, {"start": 2610.45, "end": 2610.67, "word": " plot", "probability": 0.95751953125}, {"start": 2610.67, "end": 2610.83, "word": " can", "probability": 0.93505859375}, {"start": 2610.83, "end": 2610.99, "word": " be", "probability": 0.95458984375}, {"start": 2610.99, "end": 2611.59, "word": " constructed", "probability": 0.962890625}, {"start": 2611.59, "end": 2611.85, "word": " by", "probability": 0.9443359375}, {"start": 2611.85, "end": 2612.13, "word": " using", "probability": 0.93212890625}, {"start": 2612.13, "end": 2612.31, "word": " the", "probability": 0.736328125}, {"start": 2612.31, "end": 2612.53, "word": " five", "probability": 0.81884765625}, {"start": 2612.53, "end": 2612.79, "word": " number", "probability": 0.56787109375}, {"start": 2612.79, "end": 2613.09, "word": " summary.", "probability": 0.53662109375}, {"start": 2613.83, "end": 2614.35, "word": " We", "probability": 0.95947265625}, {"start": 2614.35, "end": 2614.53, "word": " have", "probability": 0.955078125}, {"start": 2614.53, "end": 2614.99, "word": " smallest", "probability": 0.86474609375}, {"start": 2614.99, "end": 2615.41, "word": " value.", "probability": 0.97265625}, {"start": 2615.99, "end": 2616.31, "word": " On", "probability": 0.9365234375}, {"start": 2616.31, "end": 2616.43, "word": " the", "probability": 0.90673828125}, {"start": 2616.43, "end": 2616.61, "word": " other", "probability": 0.888671875}, {"start": 2616.61, "end": 2616.83, "word": " hand,", "probability": 0.904296875}, {"start": 2616.87, "end": 2616.93, "word": " we", "probability": 0.94189453125}, {"start": 2616.93, "end": 2617.09, "word": " have", "probability": 0.94775390625}, {"start": 2617.09, "end": 2617.23, "word": " the", "probability": 0.90771484375}, {"start": 2617.23, "end": 2617.55, "word": " largest", "probability": 0.88330078125}, {"start": 2617.55, "end": 2617.97, "word": " value.", "probability": 0.970703125}, {"start": 2619.01, "end": 2619.63, "word": " Also,", "probability": 0.9384765625}, {"start": 2619.87, "end": 2619.95, "word": " we", "probability": 0.95703125}, {"start": 2619.95, "end": 2620.15, "word": " have", "probability": 0.93994140625}, {"start": 2620.15, "end": 2620.67, "word": " Q1,", "probability": 0.85400390625}, {"start": 2622.09, "end": 2622.35, "word": " the", "probability": 0.9111328125}, {"start": 2622.35, "end": 2622.67, "word": " first", "probability": 0.8720703125}, {"start": 2622.67, "end": 2623.21, "word": " quartile,", "probability": 0.9287109375}, {"start": 2623.31, "end": 2623.43, "word": " the", "probability": 0.908203125}, {"start": 2623.43, "end": 2623.75, "word": " median,", "probability": 0.9443359375}, {"start": 2624.23, "end": 2624.49, "word": " and", "probability": 0.9453125}, {"start": 2624.49, "end": 2625.03, "word": " Q3.", "probability": 0.986083984375}, {"start": 2625.77, "end": 2626.05, "word": " For", "probability": 0.9560546875}, {"start": 2626.05, "end": 2626.45, "word": " symmetric", "probability": 0.83984375}, {"start": 2626.45, "end": 2627.13, "word": " distribution,", "probability": 0.85546875}, {"start": 2627.27, "end": 2627.35, "word": " I", "probability": 0.94677734375}, {"start": 2627.35, "end": 2627.51, "word": " mean", "probability": 0.966796875}, {"start": 2627.51, "end": 2627.69, "word": " if", "probability": 0.69775390625}, {"start": 2627.69, "end": 2627.83, "word": " the", "probability": 0.9208984375}, {"start": 2627.83, "end": 2628.19, "word": " data", "probability": 0.94970703125}, {"start": 2628.19, "end": 2629.27, "word": " is", "probability": 0.9140625}, {"start": 2629.27, "end": 2629.49, "word": " bell", "probability": 0.8583984375}, {"start": 2629.49, "end": 2629.83, "word": "-shaped.", "probability": 0.685546875}, {"start": 2630.69, "end": 2630.91, "word": " In", "probability": 0.9580078125}, {"start": 2630.91, "end": 2631.15, "word": " this", "probability": 0.94580078125}, {"start": 2631.15, "end": 2631.51, "word": " case,", "probability": 0.90869140625}, {"start": 2632.29, "end": 2632.49, "word": " the", "probability": 0.90966796875}, {"start": 2632.49, "end": 2632.87, "word": " vertical", "probability": 0.93701171875}, {"start": 2632.87, "end": 2633.29, "word": " line", "probability": 0.90185546875}, {"start": 2633.29, "end": 2634.11, "word": " in", "probability": 0.89501953125}, {"start": 2634.11, "end": 2634.23, "word": " the", "probability": 0.9208984375}, {"start": 2634.23, "end": 2634.59, "word": " box", "probability": 0.9453125}], "temperature": 1.0}, {"id": 97, "seek": 265657, "start": 2635.47, "end": 2656.57, "text": " which represents the median should be located in the middle of this box, also in the middle of the entire data. Look carefully at this vertical line. This line splits the data into two halves, 25% to the left and 25% to the right.", "tokens": [597, 8855, 264, 26779, 820, 312, 6870, 294, 264, 2808, 295, 341, 2424, 11, 611, 294, 264, 2808, 295, 264, 2302, 1412, 13, 2053, 7500, 412, 341, 9429, 1622, 13, 639, 1622, 37741, 264, 1412, 666, 732, 38490, 11, 3552, 4, 281, 264, 1411, 293, 3552, 4, 281, 264, 558, 13], "avg_logprob": -0.12928185124809927, "compression_ratio": 1.5298013245033113, "no_speech_prob": 0.0, "words": [{"start": 2635.47, "end": 2635.79, "word": " which", "probability": 0.54150390625}, {"start": 2635.79, "end": 2636.35, "word": " represents", "probability": 0.8505859375}, {"start": 2636.35, "end": 2636.57, "word": " the", "probability": 0.9140625}, {"start": 2636.57, "end": 2636.89, "word": " median", "probability": 0.9384765625}, {"start": 2636.89, "end": 2637.85, "word": " should", "probability": 0.63427734375}, {"start": 2637.85, "end": 2638.07, "word": " be", "probability": 0.94921875}, {"start": 2638.07, "end": 2638.57, "word": " located", "probability": 0.95654296875}, {"start": 2638.57, "end": 2639.03, "word": " in", "probability": 0.94189453125}, {"start": 2639.03, "end": 2639.17, "word": " the", "probability": 0.921875}, {"start": 2639.17, "end": 2639.37, "word": " middle", "probability": 0.95849609375}, {"start": 2639.37, "end": 2639.51, "word": " of", "probability": 0.966796875}, {"start": 2639.51, "end": 2639.73, "word": " this", "probability": 0.94189453125}, {"start": 2639.73, "end": 2640.09, "word": " box,", "probability": 0.93310546875}, {"start": 2641.07, "end": 2641.45, "word": " also", "probability": 0.82958984375}, {"start": 2641.45, "end": 2641.85, "word": " in", "probability": 0.8974609375}, {"start": 2641.85, "end": 2641.99, "word": " the", "probability": 0.92138671875}, {"start": 2641.99, "end": 2642.27, "word": " middle", "probability": 0.953125}, {"start": 2642.27, "end": 2642.59, "word": " of", "probability": 0.9677734375}, {"start": 2642.59, "end": 2642.73, "word": " the", "probability": 0.9130859375}, {"start": 2642.73, "end": 2643.13, "word": " entire", "probability": 0.90087890625}, {"start": 2643.13, "end": 2643.43, "word": " data.", "probability": 0.890625}, {"start": 2645.15, "end": 2645.51, "word": " Look", "probability": 0.76416015625}, {"start": 2645.51, "end": 2645.95, "word": " carefully", "probability": 0.78271484375}, {"start": 2645.95, "end": 2646.15, "word": " at", "probability": 0.94140625}, {"start": 2646.15, "end": 2646.35, "word": " this", "probability": 0.94189453125}, {"start": 2646.35, "end": 2646.79, "word": " vertical", "probability": 0.89208984375}, {"start": 2646.79, "end": 2647.17, "word": " line.", "probability": 0.9287109375}, {"start": 2649.53, "end": 2650.17, "word": " This", "probability": 0.87158203125}, {"start": 2650.17, "end": 2650.63, "word": " line", "probability": 0.93359375}, {"start": 2650.63, "end": 2651.35, "word": " splits", "probability": 0.9619140625}, {"start": 2651.35, "end": 2651.55, "word": " the", "probability": 0.90625}, {"start": 2651.55, "end": 2651.73, "word": " data", "probability": 0.9404296875}, {"start": 2651.73, "end": 2651.95, "word": " into", "probability": 0.83447265625}, {"start": 2651.95, "end": 2652.15, "word": " two", "probability": 0.8994140625}, {"start": 2652.15, "end": 2652.51, "word": " halves,", "probability": 0.87451171875}, {"start": 2653.55, "end": 2653.85, "word": " 25", "probability": 0.91943359375}, {"start": 2653.85, "end": 2654.23, "word": "%", "probability": 0.77734375}, {"start": 2654.23, "end": 2654.61, "word": " to", "probability": 0.89501953125}, {"start": 2654.61, "end": 2654.77, "word": " the", "probability": 0.9140625}, {"start": 2654.77, "end": 2655.05, "word": " left", "probability": 0.94580078125}, {"start": 2655.05, "end": 2655.29, "word": " and", "probability": 0.79443359375}, {"start": 2655.29, "end": 2655.67, "word": " 25", "probability": 0.96435546875}, {"start": 2655.67, "end": 2656.07, "word": "%", "probability": 0.99755859375}, {"start": 2656.07, "end": 2656.29, "word": " to", "probability": 0.962890625}, {"start": 2656.29, "end": 2656.39, "word": " the", "probability": 0.91064453125}, {"start": 2656.39, "end": 2656.57, "word": " right.", "probability": 0.9150390625}], "temperature": 1.0}, {"id": 98, "seek": 268486, "start": 2657.24, "end": 2684.86, "text": " And also this vertical line splits the data into two halves, from the smallest to largest, because there are 50% of the observations lie below, and 50% lies above. So that means by using box plot, you can tell something about the shape of the distribution. So again, if the data are symmetric around the median,", "tokens": [400, 611, 341, 9429, 1622, 37741, 264, 1412, 666, 732, 38490, 11, 490, 264, 16998, 281, 6443, 11, 570, 456, 366, 2625, 4, 295, 264, 18163, 4544, 2507, 11, 293, 2625, 4, 9134, 3673, 13, 407, 300, 1355, 538, 1228, 2424, 7542, 11, 291, 393, 980, 746, 466, 264, 3909, 295, 264, 7316, 13, 407, 797, 11, 498, 264, 1412, 366, 32330, 926, 264, 26779, 11], "avg_logprob": -0.18178637970739336, "compression_ratio": 1.5219512195121951, "no_speech_prob": 0.0, "words": [{"start": 2657.24, "end": 2657.54, "word": " And", "probability": 0.80419921875}, {"start": 2657.54, "end": 2657.86, "word": " also", "probability": 0.82080078125}, {"start": 2657.86, "end": 2658.26, "word": " this", "probability": 0.70068359375}, {"start": 2658.26, "end": 2658.88, "word": " vertical", "probability": 0.91943359375}, {"start": 2658.88, "end": 2659.32, "word": " line", "probability": 0.9111328125}, {"start": 2659.32, "end": 2659.96, "word": " splits", "probability": 0.9501953125}, {"start": 2659.96, "end": 2660.12, "word": " the", "probability": 0.90478515625}, {"start": 2660.12, "end": 2660.36, "word": " data", "probability": 0.94677734375}, {"start": 2660.36, "end": 2660.66, "word": " into", "probability": 0.83251953125}, {"start": 2660.66, "end": 2660.92, "word": " two", "probability": 0.9248046875}, {"start": 2660.92, "end": 2661.28, "word": " halves,", "probability": 0.8701171875}, {"start": 2661.82, "end": 2662.5, "word": " from", "probability": 0.8837890625}, {"start": 2662.5, "end": 2663.26, "word": " the", "probability": 0.91162109375}, {"start": 2663.26, "end": 2663.8, "word": " smallest", "probability": 0.94677734375}, {"start": 2663.8, "end": 2664.72, "word": " to", "probability": 0.9501953125}, {"start": 2664.72, "end": 2665.04, "word": " largest,", "probability": 0.875}, {"start": 2665.22, "end": 2665.44, "word": " because", "probability": 0.89892578125}, {"start": 2665.44, "end": 2665.64, "word": " there", "probability": 0.7861328125}, {"start": 2665.64, "end": 2665.92, "word": " are", "probability": 0.9365234375}, {"start": 2665.92, "end": 2667.34, "word": " 50", "probability": 0.80224609375}, {"start": 2667.34, "end": 2668.36, "word": "%", "probability": 0.56494140625}, {"start": 2668.36, "end": 2669.06, "word": " of", "probability": 0.95556640625}, {"start": 2669.06, "end": 2669.24, "word": " the", "probability": 0.89501953125}, {"start": 2669.24, "end": 2669.76, "word": " observations", "probability": 0.73681640625}, {"start": 2669.76, "end": 2670.12, "word": " lie", "probability": 0.8291015625}, {"start": 2670.12, "end": 2670.46, "word": " below,", "probability": 0.9169921875}, {"start": 2670.92, "end": 2671.18, "word": " and", "probability": 0.93896484375}, {"start": 2671.18, "end": 2671.5, "word": " 50", "probability": 0.96875}, {"start": 2671.5, "end": 2671.8, "word": "%", "probability": 0.998046875}, {"start": 2671.8, "end": 2672.6, "word": " lies", "probability": 0.88037109375}, {"start": 2672.6, "end": 2673.1, "word": " above.", "probability": 0.96728515625}, {"start": 2673.58, "end": 2673.8, "word": " So", "probability": 0.9658203125}, {"start": 2673.8, "end": 2674.04, "word": " that", "probability": 0.88427734375}, {"start": 2674.04, "end": 2674.32, "word": " means", "probability": 0.9326171875}, {"start": 2674.32, "end": 2674.56, "word": " by", "probability": 0.8193359375}, {"start": 2674.56, "end": 2674.9, "word": " using", "probability": 0.93310546875}, {"start": 2674.9, "end": 2675.14, "word": " box", "probability": 0.386474609375}, {"start": 2675.14, "end": 2675.42, "word": " plot,", "probability": 0.75439453125}, {"start": 2675.58, "end": 2675.7, "word": " you", "probability": 0.94384765625}, {"start": 2675.7, "end": 2675.94, "word": " can", "probability": 0.87353515625}, {"start": 2675.94, "end": 2676.1, "word": " tell", "probability": 0.74853515625}, {"start": 2676.1, "end": 2676.54, "word": " something", "probability": 0.86376953125}, {"start": 2676.54, "end": 2677.06, "word": " about", "probability": 0.90234375}, {"start": 2677.06, "end": 2677.84, "word": " the", "probability": 0.90576171875}, {"start": 2677.84, "end": 2678.2, "word": " shape", "probability": 0.90234375}, {"start": 2678.2, "end": 2678.66, "word": " of", "probability": 0.96728515625}, {"start": 2678.66, "end": 2679.12, "word": " the", "probability": 0.919921875}, {"start": 2679.12, "end": 2680.1, "word": " distribution.", "probability": 0.8466796875}, {"start": 2681.3, "end": 2681.52, "word": " So", "probability": 0.95556640625}, {"start": 2681.52, "end": 2681.8, "word": " again,", "probability": 0.85400390625}, {"start": 2681.88, "end": 2682.04, "word": " if", "probability": 0.927734375}, {"start": 2682.04, "end": 2682.14, "word": " the", "probability": 0.919921875}, {"start": 2682.14, "end": 2682.52, "word": " data", "probability": 0.9423828125}, {"start": 2682.52, "end": 2683.3, "word": " are", "probability": 0.9443359375}, {"start": 2683.3, "end": 2683.76, "word": " symmetric", "probability": 0.85205078125}, {"start": 2683.76, "end": 2684.28, "word": " around", "probability": 0.8974609375}, {"start": 2684.28, "end": 2684.52, "word": " the", "probability": 0.9140625}, {"start": 2684.52, "end": 2684.86, "word": " median,", "probability": 0.94775390625}], "temperature": 1.0}, {"id": 99, "seek": 269973, "start": 2687.33, "end": 2699.73, "text": " And the central line, this box, and central line are centered between the endpoints. I mean, this vertical line is centered between these two endpoints.", "tokens": [400, 264, 5777, 1622, 11, 341, 2424, 11, 293, 5777, 1622, 366, 18988, 1296, 264, 917, 20552, 13, 286, 914, 11, 341, 9429, 1622, 307, 18988, 1296, 613, 732, 917, 20552, 13], "avg_logprob": -0.16015625, "compression_ratio": 1.6105263157894736, "no_speech_prob": 0.0, "words": [{"start": 2687.33, "end": 2687.73, "word": " And", "probability": 0.84912109375}, {"start": 2687.73, "end": 2687.97, "word": " the", "probability": 0.70654296875}, {"start": 2687.97, "end": 2688.27, "word": " central", "probability": 0.92333984375}, {"start": 2688.27, "end": 2688.73, "word": " line,", "probability": 0.9306640625}, {"start": 2689.09, "end": 2689.35, "word": " this", "probability": 0.93115234375}, {"start": 2689.35, "end": 2689.79, "word": " box,", "probability": 0.93994140625}, {"start": 2690.61, "end": 2690.69, "word": " and", "probability": 0.93017578125}, {"start": 2690.69, "end": 2691.11, "word": " central", "probability": 0.9013671875}, {"start": 2691.11, "end": 2691.61, "word": " line", "probability": 0.93310546875}, {"start": 2691.61, "end": 2693.13, "word": " are", "probability": 0.7802734375}, {"start": 2693.13, "end": 2693.91, "word": " centered", "probability": 0.8759765625}, {"start": 2693.91, "end": 2694.35, "word": " between", "probability": 0.880859375}, {"start": 2694.35, "end": 2694.55, "word": " the", "probability": 0.9091796875}, {"start": 2694.55, "end": 2695.05, "word": " endpoints.", "probability": 0.75146484375}, {"start": 2695.53, "end": 2695.65, "word": " I", "probability": 0.96826171875}, {"start": 2695.65, "end": 2695.89, "word": " mean,", "probability": 0.9658203125}, {"start": 2696.71, "end": 2696.91, "word": " this", "probability": 0.9375}, {"start": 2696.91, "end": 2697.25, "word": " vertical", "probability": 0.6123046875}, {"start": 2697.25, "end": 2697.55, "word": " line", "probability": 0.91064453125}, {"start": 2697.55, "end": 2697.95, "word": " is", "probability": 0.79296875}, {"start": 2697.95, "end": 2698.31, "word": " centered", "probability": 0.87939453125}, {"start": 2698.31, "end": 2698.75, "word": " between", "probability": 0.8720703125}, {"start": 2698.75, "end": 2699.03, "word": " these", "probability": 0.86328125}, {"start": 2699.03, "end": 2699.21, "word": " two", "probability": 0.93212890625}, {"start": 2699.21, "end": 2699.73, "word": " endpoints.", "probability": 0.880126953125}], "temperature": 1.0}, {"id": 100, "seek": 272678, "start": 2700.42, "end": 2726.78, "text": " between Q1 and Q3. And the whole box plot is centered between the smallest and the largest value. And also the distance between the median and the smallest is roughly equal to the distance between the median and the largest. So you can tell something about the shape of the distribution by using the box plot.", "tokens": [1296, 1249, 16, 293, 1249, 18, 13, 400, 264, 1379, 2424, 7542, 307, 18988, 1296, 264, 16998, 293, 264, 6443, 2158, 13, 400, 611, 264, 4560, 1296, 264, 26779, 293, 264, 16998, 307, 9810, 2681, 281, 264, 4560, 1296, 264, 26779, 293, 264, 6443, 13, 407, 291, 393, 980, 746, 466, 264, 3909, 295, 264, 7316, 538, 1228, 264, 2424, 7542, 13], "avg_logprob": -0.20052082907585872, "compression_ratio": 1.834319526627219, "no_speech_prob": 0.0, "words": [{"start": 2700.42, "end": 2700.72, "word": " between", "probability": 0.47412109375}, {"start": 2700.72, "end": 2701.12, "word": " Q1", "probability": 0.83544921875}, {"start": 2701.12, "end": 2701.26, "word": " and", "probability": 0.81396484375}, {"start": 2701.26, "end": 2701.74, "word": " Q3.", "probability": 0.99609375}, {"start": 2702.22, "end": 2702.78, "word": " And", "probability": 0.89111328125}, {"start": 2702.78, "end": 2702.94, "word": " the", "probability": 0.88818359375}, {"start": 2702.94, "end": 2703.12, "word": " whole", "probability": 0.8779296875}, {"start": 2703.12, "end": 2703.38, "word": " box", "probability": 0.419189453125}, {"start": 2703.38, "end": 2703.56, "word": " plot", "probability": 0.658203125}, {"start": 2703.56, "end": 2703.92, "word": " is", "probability": 0.84423828125}, {"start": 2703.92, "end": 2704.18, "word": " centered", "probability": 0.70654296875}, {"start": 2704.18, "end": 2704.64, "word": " between", "probability": 0.876953125}, {"start": 2704.64, "end": 2704.86, "word": " the", "probability": 0.67822265625}, {"start": 2704.86, "end": 2705.18, "word": " smallest", "probability": 0.93359375}, {"start": 2705.18, "end": 2705.66, "word": " and", "probability": 0.93408203125}, {"start": 2705.66, "end": 2705.8, "word": " the", "probability": 0.84033203125}, {"start": 2705.8, "end": 2706.14, "word": " largest", "probability": 0.9072265625}, {"start": 2706.14, "end": 2706.52, "word": " value.", "probability": 0.8701171875}, {"start": 2706.84, "end": 2707.1, "word": " And", "probability": 0.93896484375}, {"start": 2707.1, "end": 2707.38, "word": " also", "probability": 0.85546875}, {"start": 2707.38, "end": 2707.6, "word": " the", "probability": 0.625}, {"start": 2707.6, "end": 2707.9, "word": " distance", "probability": 0.94091796875}, {"start": 2707.9, "end": 2708.42, "word": " between", "probability": 0.86669921875}, {"start": 2708.42, "end": 2709.32, "word": " the", "probability": 0.912109375}, {"start": 2709.32, "end": 2709.6, "word": " median", "probability": 0.91796875}, {"start": 2709.6, "end": 2710.68, "word": " and", "probability": 0.92236328125}, {"start": 2710.68, "end": 2710.84, "word": " the", "probability": 0.90625}, {"start": 2710.84, "end": 2711.3, "word": " smallest", "probability": 0.91845703125}, {"start": 2711.3, "end": 2712.5, "word": " is", "probability": 0.892578125}, {"start": 2712.5, "end": 2712.92, "word": " roughly", "probability": 0.84912109375}, {"start": 2712.92, "end": 2713.32, "word": " equal", "probability": 0.89404296875}, {"start": 2713.32, "end": 2713.54, "word": " to", "probability": 0.83544921875}, {"start": 2713.54, "end": 2713.64, "word": " the", "probability": 0.7734375}, {"start": 2713.64, "end": 2713.92, "word": " distance", "probability": 0.916015625}, {"start": 2713.92, "end": 2714.32, "word": " between", "probability": 0.873046875}, {"start": 2714.32, "end": 2715.0, "word": " the", "probability": 0.9111328125}, {"start": 2715.0, "end": 2715.28, "word": " median", "probability": 0.97119140625}, {"start": 2715.28, "end": 2716.18, "word": " and", "probability": 0.9345703125}, {"start": 2716.18, "end": 2717.24, "word": " the", "probability": 0.900390625}, {"start": 2717.24, "end": 2717.96, "word": " largest.", "probability": 0.9072265625}, {"start": 2718.28, "end": 2718.64, "word": " So", "probability": 0.939453125}, {"start": 2718.64, "end": 2719.3, "word": " you", "probability": 0.75341796875}, {"start": 2719.3, "end": 2719.56, "word": " can", "probability": 0.939453125}, {"start": 2719.56, "end": 2719.76, "word": " tell", "probability": 0.75537109375}, {"start": 2719.76, "end": 2720.06, "word": " something", "probability": 0.86181640625}, {"start": 2720.06, "end": 2720.4, "word": " about", "probability": 0.9052734375}, {"start": 2720.4, "end": 2720.62, "word": " the", "probability": 0.916015625}, {"start": 2720.62, "end": 2720.98, "word": " shape", "probability": 0.90869140625}, {"start": 2720.98, "end": 2721.88, "word": " of", "probability": 0.96533203125}, {"start": 2721.88, "end": 2722.0, "word": " the", "probability": 0.853515625}, {"start": 2722.0, "end": 2722.4, "word": " distribution", "probability": 0.85302734375}, {"start": 2722.4, "end": 2722.66, "word": " by", "probability": 0.96337890625}, {"start": 2722.66, "end": 2723.82, "word": " using", "probability": 0.931640625}, {"start": 2723.82, "end": 2724.88, "word": " the", "probability": 0.658203125}, {"start": 2724.88, "end": 2725.78, "word": " box", "probability": 0.350341796875}, {"start": 2725.78, "end": 2726.78, "word": " plot.", "probability": 0.5654296875}], "temperature": 1.0}, {"id": 101, "seek": 276041, "start": 2732.87, "end": 2760.41, "text": " The graph in the middle. Here median and median are the same. The box plot, we have here the median in the middle of the box, also in the middle of the entire data. So you can say that the distribution of this data is symmetric or is bell-shaped. It's normal distribution. On the other hand, if you look here, you will see that the median", "tokens": [440, 4295, 294, 264, 2808, 13, 1692, 26779, 293, 26779, 366, 264, 912, 13, 440, 2424, 7542, 11, 321, 362, 510, 264, 26779, 294, 264, 2808, 295, 264, 2424, 11, 611, 294, 264, 2808, 295, 264, 2302, 1412, 13, 407, 291, 393, 584, 300, 264, 7316, 295, 341, 1412, 307, 32330, 420, 307, 4549, 12, 23103, 13, 467, 311, 2710, 7316, 13, 1282, 264, 661, 1011, 11, 498, 291, 574, 510, 11, 291, 486, 536, 300, 264, 26779], "avg_logprob": -0.19471914292890816, "compression_ratio": 1.765625, "no_speech_prob": 0.0, "words": [{"start": 2732.87, "end": 2733.07, "word": " The", "probability": 0.22216796875}, {"start": 2733.07, "end": 2733.31, "word": " graph", "probability": 0.90576171875}, {"start": 2733.31, "end": 2733.51, "word": " in", "probability": 0.8935546875}, {"start": 2733.51, "end": 2733.69, "word": " the", "probability": 0.923828125}, {"start": 2733.69, "end": 2733.99, "word": " middle.", "probability": 0.94091796875}, {"start": 2734.85, "end": 2735.25, "word": " Here", "probability": 0.80224609375}, {"start": 2735.25, "end": 2735.69, "word": " median", "probability": 0.67236328125}, {"start": 2735.69, "end": 2735.81, "word": " and", "probability": 0.69921875}, {"start": 2735.81, "end": 2736.11, "word": " median", "probability": 0.92529296875}, {"start": 2736.11, "end": 2736.39, "word": " are", "probability": 0.935546875}, {"start": 2736.39, "end": 2736.55, "word": " the", "probability": 0.91943359375}, {"start": 2736.55, "end": 2736.89, "word": " same.", "probability": 0.90576171875}, {"start": 2738.17, "end": 2738.29, "word": " The", "probability": 0.873046875}, {"start": 2738.29, "end": 2738.49, "word": " box", "probability": 0.92626953125}, {"start": 2738.49, "end": 2738.93, "word": " plot,", "probability": 0.8994140625}, {"start": 2739.21, "end": 2739.51, "word": " we", "probability": 0.9423828125}, {"start": 2739.51, "end": 2739.69, "word": " have", "probability": 0.9453125}, {"start": 2739.69, "end": 2739.89, "word": " here", "probability": 0.85693359375}, {"start": 2739.89, "end": 2740.11, "word": " the", "probability": 0.7978515625}, {"start": 2740.11, "end": 2740.41, "word": " median", "probability": 0.9736328125}, {"start": 2740.41, "end": 2742.13, "word": " in", "probability": 0.79052734375}, {"start": 2742.13, "end": 2742.27, "word": " the", "probability": 0.92333984375}, {"start": 2742.27, "end": 2742.43, "word": " middle", "probability": 0.9609375}, {"start": 2742.43, "end": 2742.55, "word": " of", "probability": 0.96630859375}, {"start": 2742.55, "end": 2742.67, "word": " the", "probability": 0.919921875}, {"start": 2742.67, "end": 2742.99, "word": " box,", "probability": 0.94873046875}, {"start": 2743.31, "end": 2743.57, "word": " also", "probability": 0.80322265625}, {"start": 2743.57, "end": 2743.73, "word": " in", "probability": 0.89013671875}, {"start": 2743.73, "end": 2743.83, "word": " the", "probability": 0.9169921875}, {"start": 2743.83, "end": 2743.99, "word": " middle", "probability": 0.955078125}, {"start": 2743.99, "end": 2744.15, "word": " of", "probability": 0.9658203125}, {"start": 2744.15, "end": 2744.27, "word": " the", "probability": 0.91796875}, {"start": 2744.27, "end": 2744.63, "word": " entire", "probability": 0.896484375}, {"start": 2744.63, "end": 2745.09, "word": " data.", "probability": 0.9482421875}, {"start": 2745.49, "end": 2745.87, "word": " So", "probability": 0.953125}, {"start": 2745.87, "end": 2746.01, "word": " you", "probability": 0.763671875}, {"start": 2746.01, "end": 2746.15, "word": " can", "probability": 0.94580078125}, {"start": 2746.15, "end": 2746.37, "word": " say", "probability": 0.52685546875}, {"start": 2746.37, "end": 2746.67, "word": " that", "probability": 0.927734375}, {"start": 2746.67, "end": 2747.39, "word": " the", "probability": 0.8505859375}, {"start": 2747.39, "end": 2747.87, "word": " distribution", "probability": 0.810546875}, {"start": 2747.87, "end": 2748.13, "word": " of", "probability": 0.9443359375}, {"start": 2748.13, "end": 2748.27, "word": " this", "probability": 0.91357421875}, {"start": 2748.27, "end": 2748.49, "word": " data", "probability": 0.94775390625}, {"start": 2748.49, "end": 2748.71, "word": " is", "probability": 0.943359375}, {"start": 2748.71, "end": 2749.07, "word": " symmetric", "probability": 0.73291015625}, {"start": 2749.07, "end": 2749.87, "word": " or", "probability": 0.446044921875}, {"start": 2749.87, "end": 2750.03, "word": " is", "probability": 0.40478515625}, {"start": 2750.03, "end": 2750.21, "word": " bell", "probability": 0.93310546875}, {"start": 2750.21, "end": 2750.53, "word": "-shaped.", "probability": 0.5921630859375}, {"start": 2753.03, "end": 2753.57, "word": " It's", "probability": 0.85888671875}, {"start": 2753.57, "end": 2753.85, "word": " normal", "probability": 0.7236328125}, {"start": 2753.85, "end": 2754.31, "word": " distribution.", "probability": 0.83837890625}, {"start": 2755.13, "end": 2755.41, "word": " On", "probability": 0.9521484375}, {"start": 2755.41, "end": 2755.55, "word": " the", "probability": 0.923828125}, {"start": 2755.55, "end": 2755.75, "word": " other", "probability": 0.890625}, {"start": 2755.75, "end": 2756.13, "word": " hand,", "probability": 0.90576171875}, {"start": 2756.89, "end": 2757.03, "word": " if", "probability": 0.9365234375}, {"start": 2757.03, "end": 2757.05, "word": " you", "probability": 0.904296875}, {"start": 2757.05, "end": 2757.23, "word": " look", "probability": 0.966796875}, {"start": 2757.23, "end": 2757.51, "word": " here,", "probability": 0.85888671875}, {"start": 2758.29, "end": 2758.43, "word": " you", "probability": 0.88623046875}, {"start": 2758.43, "end": 2758.55, "word": " will", "probability": 0.84130859375}, {"start": 2758.55, "end": 2758.71, "word": " see", "probability": 0.923828125}, {"start": 2758.71, "end": 2759.19, "word": " that", "probability": 0.93603515625}, {"start": 2759.19, "end": 2760.11, "word": " the", "probability": 0.90283203125}, {"start": 2760.11, "end": 2760.41, "word": " median", "probability": 0.9765625}], "temperature": 1.0}, {"id": 102, "seek": 278306, "start": 2761.76, "end": 2783.06, "text": " is not in the center of the box. It's near Q3. So the left tail, I mean, the distance between the median and the smallest, this tail is longer than the right tail. In this case, it's called left skewed or skewed to the left.", "tokens": [307, 406, 294, 264, 3056, 295, 264, 2424, 13, 467, 311, 2651, 1249, 18, 13, 407, 264, 1411, 6838, 11, 286, 914, 11, 264, 4560, 1296, 264, 26779, 293, 264, 16998, 11, 341, 6838, 307, 2854, 813, 264, 558, 6838, 13, 682, 341, 1389, 11, 309, 311, 1219, 1411, 8756, 26896, 420, 8756, 26896, 281, 264, 1411, 13], "avg_logprob": -0.14512711409795082, "compression_ratio": 1.5517241379310345, "no_speech_prob": 0.0, "words": [{"start": 2761.76, "end": 2762.02, "word": " is", "probability": 0.40234375}, {"start": 2762.02, "end": 2762.2, "word": " not", "probability": 0.94970703125}, {"start": 2762.2, "end": 2762.36, "word": " in", "probability": 0.92138671875}, {"start": 2762.36, "end": 2762.5, "word": " the", "probability": 0.92138671875}, {"start": 2762.5, "end": 2762.78, "word": " center", "probability": 0.8916015625}, {"start": 2762.78, "end": 2763.08, "word": " of", "probability": 0.96826171875}, {"start": 2763.08, "end": 2763.2, "word": " the", "probability": 0.92529296875}, {"start": 2763.2, "end": 2763.5, "word": " box.", "probability": 0.94921875}, {"start": 2765.26, "end": 2765.86, "word": " It's", "probability": 0.964599609375}, {"start": 2765.86, "end": 2766.16, "word": " near", "probability": 0.8271484375}, {"start": 2766.16, "end": 2766.72, "word": " Q3.", "probability": 0.849609375}, {"start": 2767.92, "end": 2768.28, "word": " So", "probability": 0.92431640625}, {"start": 2768.28, "end": 2768.68, "word": " the", "probability": 0.60009765625}, {"start": 2768.68, "end": 2768.98, "word": " left", "probability": 0.734375}, {"start": 2768.98, "end": 2769.52, "word": " tail,", "probability": 0.86572265625}, {"start": 2770.7, "end": 2770.9, "word": " I", "probability": 0.9423828125}, {"start": 2770.9, "end": 2771.12, "word": " mean,", "probability": 0.9677734375}, {"start": 2771.5, "end": 2771.6, "word": " the", "probability": 0.91064453125}, {"start": 2771.6, "end": 2772.02, "word": " distance", "probability": 0.9365234375}, {"start": 2772.02, "end": 2772.58, "word": " between", "probability": 0.865234375}, {"start": 2772.58, "end": 2772.88, "word": " the", "probability": 0.9228515625}, {"start": 2772.88, "end": 2773.16, "word": " median", "probability": 0.8349609375}, {"start": 2773.16, "end": 2773.44, "word": " and", "probability": 0.9345703125}, {"start": 2773.44, "end": 2773.6, "word": " the", "probability": 0.8818359375}, {"start": 2773.6, "end": 2773.98, "word": " smallest,", "probability": 0.93505859375}, {"start": 2774.74, "end": 2775.14, "word": " this", "probability": 0.9453125}, {"start": 2775.14, "end": 2775.5, "word": " tail", "probability": 0.85791015625}, {"start": 2775.5, "end": 2776.24, "word": " is", "probability": 0.87939453125}, {"start": 2776.24, "end": 2776.62, "word": " longer", "probability": 0.935546875}, {"start": 2776.62, "end": 2776.94, "word": " than", "probability": 0.9443359375}, {"start": 2776.94, "end": 2777.14, "word": " the", "probability": 0.90087890625}, {"start": 2777.14, "end": 2777.32, "word": " right", "probability": 0.927734375}, {"start": 2777.32, "end": 2777.66, "word": " tail.", "probability": 0.8583984375}, {"start": 2779.22, "end": 2779.5, "word": " In", "probability": 0.84619140625}, {"start": 2779.5, "end": 2779.7, "word": " this", "probability": 0.9453125}, {"start": 2779.7, "end": 2780.0, "word": " case,", "probability": 0.9130859375}, {"start": 2780.08, "end": 2780.2, "word": " it's", "probability": 0.962158203125}, {"start": 2780.2, "end": 2780.6, "word": " called", "probability": 0.89306640625}, {"start": 2780.6, "end": 2781.5, "word": " left", "probability": 0.78662109375}, {"start": 2781.5, "end": 2782.08, "word": " skewed", "probability": 0.934814453125}, {"start": 2782.08, "end": 2782.3, "word": " or", "probability": 0.638671875}, {"start": 2782.3, "end": 2782.62, "word": " skewed", "probability": 0.95263671875}, {"start": 2782.62, "end": 2782.76, "word": " to", "probability": 0.95166015625}, {"start": 2782.76, "end": 2782.88, "word": " the", "probability": 0.92138671875}, {"start": 2782.88, "end": 2783.06, "word": " left.", "probability": 0.9541015625}], "temperature": 1.0}, {"id": 103, "seek": 281371, "start": 2784.17, "end": 2813.71, "text": " or negative skewness. So if the data is not symmetric, it might be left skewed. I mean, the left tail is longer than the right tail. On the other hand, if the median is located near Q1, it means the right tail is longer than the left tail, and it's called positive skewed or right skewed.", "tokens": [420, 3671, 8756, 895, 442, 13, 407, 498, 264, 1412, 307, 406, 32330, 11, 309, 1062, 312, 1411, 8756, 26896, 13, 286, 914, 11, 264, 1411, 6838, 307, 2854, 813, 264, 558, 6838, 13, 1282, 264, 661, 1011, 11, 498, 264, 26779, 307, 6870, 2651, 1249, 16, 11, 309, 1355, 264, 558, 6838, 307, 2854, 813, 264, 1411, 6838, 11, 293, 309, 311, 1219, 3353, 8756, 26896, 420, 558, 8756, 26896, 13], "avg_logprob": -0.11386986219719665, "compression_ratio": 1.7202380952380953, "no_speech_prob": 0.0, "words": [{"start": 2784.17, "end": 2784.49, "word": " or", "probability": 0.36328125}, {"start": 2784.49, "end": 2784.85, "word": " negative", "probability": 0.921875}, {"start": 2784.85, "end": 2785.57, "word": " skewness.", "probability": 0.9739583333333334}, {"start": 2786.09, "end": 2786.69, "word": " So", "probability": 0.94384765625}, {"start": 2786.69, "end": 2786.83, "word": " if", "probability": 0.77099609375}, {"start": 2786.83, "end": 2786.95, "word": " the", "probability": 0.9130859375}, {"start": 2786.95, "end": 2787.25, "word": " data", "probability": 0.95166015625}, {"start": 2787.25, "end": 2787.89, "word": " is", "probability": 0.943359375}, {"start": 2787.89, "end": 2788.11, "word": " not", "probability": 0.9228515625}, {"start": 2788.11, "end": 2788.51, "word": " symmetric,", "probability": 0.87353515625}, {"start": 2789.33, "end": 2789.51, "word": " it", "probability": 0.931640625}, {"start": 2789.51, "end": 2789.77, "word": " might", "probability": 0.880859375}, {"start": 2789.77, "end": 2790.35, "word": " be", "probability": 0.9462890625}, {"start": 2790.35, "end": 2791.21, "word": " left", "probability": 0.91064453125}, {"start": 2791.21, "end": 2791.91, "word": " skewed.", "probability": 0.931640625}, {"start": 2792.31, "end": 2792.67, "word": " I", "probability": 0.99267578125}, {"start": 2792.67, "end": 2792.89, "word": " mean,", "probability": 0.96337890625}, {"start": 2793.05, "end": 2793.27, "word": " the", "probability": 0.90576171875}, {"start": 2793.27, "end": 2793.57, "word": " left", "probability": 0.9423828125}, {"start": 2793.57, "end": 2793.91, "word": " tail", "probability": 0.890625}, {"start": 2793.91, "end": 2795.63, "word": " is", "probability": 0.79833984375}, {"start": 2795.63, "end": 2795.93, "word": " longer", "probability": 0.9453125}, {"start": 2795.93, "end": 2796.23, "word": " than", "probability": 0.92822265625}, {"start": 2796.23, "end": 2796.37, "word": " the", "probability": 0.7763671875}, {"start": 2796.37, "end": 2796.59, "word": " right", "probability": 0.92138671875}, {"start": 2796.59, "end": 2796.89, "word": " tail.", "probability": 0.8369140625}, {"start": 2797.79, "end": 2798.43, "word": " On", "probability": 0.9248046875}, {"start": 2798.43, "end": 2798.53, "word": " the", "probability": 0.92822265625}, {"start": 2798.53, "end": 2798.73, "word": " other", "probability": 0.8837890625}, {"start": 2798.73, "end": 2799.11, "word": " hand,", "probability": 0.90673828125}, {"start": 2800.35, "end": 2800.59, "word": " if", "probability": 0.94873046875}, {"start": 2800.59, "end": 2800.77, "word": " the", "probability": 0.92041015625}, {"start": 2800.77, "end": 2801.13, "word": " median", "probability": 0.94775390625}, {"start": 2801.13, "end": 2802.63, "word": " is", "probability": 0.9453125}, {"start": 2802.63, "end": 2803.23, "word": " located", "probability": 0.955078125}, {"start": 2803.23, "end": 2803.63, "word": " near", "probability": 0.912109375}, {"start": 2803.63, "end": 2804.09, "word": " Q1,", "probability": 0.91796875}, {"start": 2804.91, "end": 2805.19, "word": " it", "probability": 0.94140625}, {"start": 2805.19, "end": 2805.45, "word": " means", "probability": 0.923828125}, {"start": 2805.45, "end": 2805.67, "word": " the", "probability": 0.888671875}, {"start": 2805.67, "end": 2805.95, "word": " right", "probability": 0.92724609375}, {"start": 2805.95, "end": 2806.35, "word": " tail", "probability": 0.90185546875}, {"start": 2806.35, "end": 2807.93, "word": " is", "probability": 0.84619140625}, {"start": 2807.93, "end": 2808.21, "word": " longer", "probability": 0.93994140625}, {"start": 2808.21, "end": 2808.43, "word": " than", "probability": 0.896484375}, {"start": 2808.43, "end": 2808.63, "word": " the", "probability": 0.89208984375}, {"start": 2808.63, "end": 2808.85, "word": " left", "probability": 0.94873046875}, {"start": 2808.85, "end": 2809.19, "word": " tail,", "probability": 0.88720703125}, {"start": 2809.25, "end": 2809.45, "word": " and", "probability": 0.93017578125}, {"start": 2809.45, "end": 2809.61, "word": " it's", "probability": 0.873291015625}, {"start": 2809.61, "end": 2809.93, "word": " called", "probability": 0.86328125}, {"start": 2809.93, "end": 2811.09, "word": " positive", "probability": 0.89697265625}, {"start": 2811.09, "end": 2811.77, "word": " skewed", "probability": 0.953125}, {"start": 2811.77, "end": 2812.61, "word": " or", "probability": 0.86572265625}, {"start": 2812.61, "end": 2813.49, "word": " right", "probability": 0.9228515625}, {"start": 2813.49, "end": 2813.71, "word": " skewed.", "probability": 0.956298828125}], "temperature": 1.0}, {"id": 104, "seek": 284117, "start": 2815.49, "end": 2841.17, "text": " So for symmetric distribution, the median in the middle, for left or right skewed, the median either is close to the Q3 or skewed distribution to the left, or the median is close to Q1 and the distribution is right skewed or has positive skewness. That's how can we tell", "tokens": [407, 337, 32330, 7316, 11, 264, 26779, 294, 264, 2808, 11, 337, 1411, 420, 558, 8756, 26896, 11, 264, 26779, 2139, 307, 1998, 281, 264, 1249, 18, 420, 8756, 26896, 7316, 281, 264, 1411, 11, 420, 264, 26779, 307, 1998, 281, 1249, 16, 293, 264, 7316, 307, 558, 8756, 26896, 420, 575, 3353, 8756, 895, 442, 13, 663, 311, 577, 393, 321, 980], "avg_logprob": -0.20312500256113708, "compression_ratio": 1.7828947368421053, "no_speech_prob": 0.0, "words": [{"start": 2815.49, "end": 2815.87, "word": " So", "probability": 0.8359375}, {"start": 2815.87, "end": 2816.15, "word": " for", "probability": 0.75146484375}, {"start": 2816.15, "end": 2816.47, "word": " symmetric", "probability": 0.73486328125}, {"start": 2816.47, "end": 2817.13, "word": " distribution,", "probability": 0.8134765625}, {"start": 2817.33, "end": 2817.43, "word": " the", "probability": 0.87646484375}, {"start": 2817.43, "end": 2817.77, "word": " median", "probability": 0.93212890625}, {"start": 2817.77, "end": 2818.33, "word": " in", "probability": 0.3798828125}, {"start": 2818.33, "end": 2818.47, "word": " the", "probability": 0.9296875}, {"start": 2818.47, "end": 2818.71, "word": " middle,", "probability": 0.90576171875}, {"start": 2819.49, "end": 2820.01, "word": " for", "probability": 0.79296875}, {"start": 2820.01, "end": 2820.31, "word": " left", "probability": 0.935546875}, {"start": 2820.31, "end": 2820.55, "word": " or", "probability": 0.9560546875}, {"start": 2820.55, "end": 2821.25, "word": " right", "probability": 0.92138671875}, {"start": 2821.25, "end": 2821.85, "word": " skewed,", "probability": 0.95703125}, {"start": 2822.35, "end": 2822.47, "word": " the", "probability": 0.9091796875}, {"start": 2822.47, "end": 2822.81, "word": " median", "probability": 0.9560546875}, {"start": 2822.81, "end": 2823.35, "word": " either", "probability": 0.3662109375}, {"start": 2823.35, "end": 2823.69, "word": " is", "probability": 0.92626953125}, {"start": 2823.69, "end": 2824.19, "word": " close", "probability": 0.73486328125}, {"start": 2824.19, "end": 2824.41, "word": " to", "probability": 0.96337890625}, {"start": 2824.41, "end": 2824.57, "word": " the", "probability": 0.85400390625}, {"start": 2824.57, "end": 2824.99, "word": " Q3", "probability": 0.904541015625}, {"start": 2824.99, "end": 2825.99, "word": " or", "probability": 0.40185546875}, {"start": 2825.99, "end": 2826.35, "word": " skewed", "probability": 0.952392578125}, {"start": 2826.35, "end": 2826.97, "word": " distribution", "probability": 0.85205078125}, {"start": 2826.97, "end": 2827.77, "word": " to", "probability": 0.80859375}, {"start": 2827.77, "end": 2827.89, "word": " the", "probability": 0.91796875}, {"start": 2827.89, "end": 2828.09, "word": " left,", "probability": 0.9423828125}, {"start": 2828.83, "end": 2829.23, "word": " or", "probability": 0.95654296875}, {"start": 2829.23, "end": 2829.93, "word": " the", "probability": 0.90869140625}, {"start": 2829.93, "end": 2830.27, "word": " median", "probability": 0.9677734375}, {"start": 2830.27, "end": 2831.31, "word": " is", "probability": 0.92919921875}, {"start": 2831.31, "end": 2831.73, "word": " close", "probability": 0.84423828125}, {"start": 2831.73, "end": 2831.95, "word": " to", "probability": 0.958984375}, {"start": 2831.95, "end": 2832.39, "word": " Q1", "probability": 0.985595703125}, {"start": 2832.39, "end": 2834.15, "word": " and", "probability": 0.353271484375}, {"start": 2834.15, "end": 2834.29, "word": " the", "probability": 0.5693359375}, {"start": 2834.29, "end": 2834.69, "word": " distribution", "probability": 0.71630859375}, {"start": 2834.69, "end": 2834.91, "word": " is", "probability": 0.935546875}, {"start": 2834.91, "end": 2835.19, "word": " right", "probability": 0.87109375}, {"start": 2835.19, "end": 2835.79, "word": " skewed", "probability": 0.91455078125}, {"start": 2835.79, "end": 2836.53, "word": " or", "probability": 0.830078125}, {"start": 2836.53, "end": 2837.33, "word": " has", "probability": 0.935546875}, {"start": 2837.33, "end": 2838.05, "word": " positive", "probability": 0.9267578125}, {"start": 2838.05, "end": 2838.93, "word": " skewness.", "probability": 0.9773763020833334}, {"start": 2840.05, "end": 2840.47, "word": " That's", "probability": 0.935546875}, {"start": 2840.47, "end": 2840.57, "word": " how", "probability": 0.904296875}, {"start": 2840.57, "end": 2840.75, "word": " can", "probability": 0.82080078125}, {"start": 2840.75, "end": 2840.89, "word": " we", "probability": 0.94482421875}, {"start": 2840.89, "end": 2841.17, "word": " tell", "probability": 0.88671875}], "temperature": 1.0}, {"id": 105, "seek": 287088, "start": 2842.86, "end": 2870.88, "text": " spread center and the shape by using the box plot. So center is the value in the middle, Q2 or the median. Spread is the distance between Q1 and Q3. So Q3 minus Q1 gives IQR. And finally, you can tell something about the shape of the distribution by just looking at the scatter plot. Let's look at", "tokens": [3974, 3056, 293, 264, 3909, 538, 1228, 264, 2424, 7542, 13, 407, 3056, 307, 264, 2158, 294, 264, 2808, 11, 1249, 17, 420, 264, 26779, 13, 30308, 307, 264, 4560, 1296, 1249, 16, 293, 1249, 18, 13, 407, 1249, 18, 3175, 1249, 16, 2709, 28921, 49, 13, 400, 2721, 11, 291, 393, 980, 746, 466, 264, 3909, 295, 264, 7316, 538, 445, 1237, 412, 264, 34951, 7542, 13, 961, 311, 574, 412], "avg_logprob": -0.18867722276138932, "compression_ratio": 1.5520833333333333, "no_speech_prob": 0.0, "words": [{"start": 2842.86, "end": 2843.5, "word": " spread", "probability": 0.1402587890625}, {"start": 2843.5, "end": 2844.14, "word": " center", "probability": 0.55810546875}, {"start": 2844.14, "end": 2844.34, "word": " and", "probability": 0.671875}, {"start": 2844.34, "end": 2844.48, "word": " the", "probability": 0.544921875}, {"start": 2844.48, "end": 2844.74, "word": " shape", "probability": 0.9169921875}, {"start": 2844.74, "end": 2845.48, "word": " by", "probability": 0.83349609375}, {"start": 2845.48, "end": 2845.86, "word": " using", "probability": 0.9296875}, {"start": 2845.86, "end": 2846.06, "word": " the", "probability": 0.87646484375}, {"start": 2846.06, "end": 2846.26, "word": " box", "probability": 0.72900390625}, {"start": 2846.26, "end": 2846.46, "word": " plot.", "probability": 0.8154296875}, {"start": 2846.7, "end": 2846.82, "word": " So", "probability": 0.88525390625}, {"start": 2846.82, "end": 2847.14, "word": " center", "probability": 0.7763671875}, {"start": 2847.14, "end": 2847.94, "word": " is", "probability": 0.923828125}, {"start": 2847.94, "end": 2848.06, "word": " the", "probability": 0.90380859375}, {"start": 2848.06, "end": 2848.24, "word": " value", "probability": 0.9296875}, {"start": 2848.24, "end": 2848.38, "word": " in", "probability": 0.92236328125}, {"start": 2848.38, "end": 2848.46, "word": " the", "probability": 0.91650390625}, {"start": 2848.46, "end": 2848.7, "word": " middle,", "probability": 0.93798828125}, {"start": 2848.9, "end": 2849.32, "word": " Q2", "probability": 0.6654052734375}, {"start": 2849.32, "end": 2849.64, "word": " or", "probability": 0.495849609375}, {"start": 2849.64, "end": 2849.84, "word": " the", "probability": 0.82080078125}, {"start": 2849.84, "end": 2850.14, "word": " median.", "probability": 0.89501953125}, {"start": 2851.08, "end": 2851.66, "word": " Spread", "probability": 0.93212890625}, {"start": 2851.66, "end": 2852.32, "word": " is", "probability": 0.93212890625}, {"start": 2852.32, "end": 2852.5, "word": " the", "probability": 0.9140625}, {"start": 2852.5, "end": 2852.86, "word": " distance", "probability": 0.9296875}, {"start": 2852.86, "end": 2853.22, "word": " between", "probability": 0.87060546875}, {"start": 2853.22, "end": 2853.66, "word": " Q1", "probability": 0.978515625}, {"start": 2853.66, "end": 2853.8, "word": " and", "probability": 0.8896484375}, {"start": 2853.8, "end": 2854.26, "word": " Q3.", "probability": 0.990478515625}, {"start": 2854.38, "end": 2854.52, "word": " So", "probability": 0.94921875}, {"start": 2854.52, "end": 2855.02, "word": " Q3", "probability": 0.966064453125}, {"start": 2855.02, "end": 2855.32, "word": " minus", "probability": 0.84716796875}, {"start": 2855.32, "end": 2855.9, "word": " Q1", "probability": 0.9931640625}, {"start": 2855.9, "end": 2856.58, "word": " gives", "probability": 0.90283203125}, {"start": 2856.58, "end": 2857.42, "word": " IQR.", "probability": 0.891357421875}, {"start": 2858.08, "end": 2858.34, "word": " And", "probability": 0.9306640625}, {"start": 2858.34, "end": 2858.84, "word": " finally,", "probability": 0.82763671875}, {"start": 2858.96, "end": 2859.1, "word": " you", "probability": 0.818359375}, {"start": 2859.1, "end": 2859.32, "word": " can", "probability": 0.94384765625}, {"start": 2859.32, "end": 2859.68, "word": " tell", "probability": 0.8759765625}, {"start": 2859.68, "end": 2860.04, "word": " something", "probability": 0.85986328125}, {"start": 2860.04, "end": 2860.5, "word": " about", "probability": 0.90283203125}, {"start": 2860.5, "end": 2861.2, "word": " the", "probability": 0.91650390625}, {"start": 2861.2, "end": 2861.54, "word": " shape", "probability": 0.904296875}, {"start": 2861.54, "end": 2861.88, "word": " of", "probability": 0.96728515625}, {"start": 2861.88, "end": 2862.02, "word": " the", "probability": 0.85498046875}, {"start": 2862.02, "end": 2862.44, "word": " distribution", "probability": 0.84716796875}, {"start": 2862.44, "end": 2862.72, "word": " by", "probability": 0.9609375}, {"start": 2862.72, "end": 2862.94, "word": " just", "probability": 0.9091796875}, {"start": 2862.94, "end": 2863.32, "word": " looking", "probability": 0.90869140625}, {"start": 2863.32, "end": 2863.6, "word": " at", "probability": 0.9599609375}, {"start": 2863.6, "end": 2864.66, "word": " the", "probability": 0.91845703125}, {"start": 2864.66, "end": 2865.14, "word": " scatter", "probability": 0.90576171875}, {"start": 2865.14, "end": 2866.44, "word": " plot.", "probability": 0.85546875}, {"start": 2869.7, "end": 2870.34, "word": " Let's", "probability": 0.929931640625}, {"start": 2870.34, "end": 2870.62, "word": " look", "probability": 0.96337890625}, {"start": 2870.62, "end": 2870.88, "word": " at", "probability": 0.96533203125}], "temperature": 1.0}, {"id": 106, "seek": 289903, "start": 2872.45, "end": 2899.03, "text": " This example, and suppose we have small data set. And let's see how can we construct the MaxPlot. In order to construct MaxPlot, you have to compute minimum first or smallest value, largest value. Besides that, you have to compute first and third part time and also Q2.", "tokens": [639, 1365, 11, 293, 7297, 321, 362, 1359, 1412, 992, 13, 400, 718, 311, 536, 577, 393, 321, 7690, 264, 7402, 33710, 310, 13, 682, 1668, 281, 7690, 7402, 33710, 310, 11, 291, 362, 281, 14722, 7285, 700, 420, 16998, 2158, 11, 6443, 2158, 13, 13212, 300, 11, 291, 362, 281, 14722, 700, 293, 2636, 644, 565, 293, 611, 1249, 17, 13], "avg_logprob": -0.2924107067168705, "compression_ratio": 1.569767441860465, "no_speech_prob": 0.0, "words": [{"start": 2872.4500000000003, "end": 2873.15, "word": " This", "probability": 0.40234375}, {"start": 2873.15, "end": 2873.71, "word": " example,", "probability": 0.95556640625}, {"start": 2875.25, "end": 2875.51, "word": " and", "probability": 0.521484375}, {"start": 2875.51, "end": 2875.77, "word": " suppose", "probability": 0.9091796875}, {"start": 2875.77, "end": 2876.03, "word": " we", "probability": 0.93115234375}, {"start": 2876.03, "end": 2876.33, "word": " have", "probability": 0.95068359375}, {"start": 2876.33, "end": 2878.71, "word": " small", "probability": 0.64599609375}, {"start": 2878.71, "end": 2878.97, "word": " data", "probability": 0.6572265625}, {"start": 2878.97, "end": 2879.23, "word": " set.", "probability": 0.8037109375}, {"start": 2880.39, "end": 2881.09, "word": " And", "probability": 0.90380859375}, {"start": 2881.09, "end": 2881.29, "word": " let's", "probability": 0.880615234375}, {"start": 2881.29, "end": 2881.39, "word": " see", "probability": 0.908203125}, {"start": 2881.39, "end": 2881.51, "word": " how", "probability": 0.8974609375}, {"start": 2881.51, "end": 2881.69, "word": " can", "probability": 0.83154296875}, {"start": 2881.69, "end": 2881.89, "word": " we", "probability": 0.953125}, {"start": 2881.89, "end": 2882.43, "word": " construct", "probability": 0.96728515625}, {"start": 2882.43, "end": 2882.59, "word": " the", "probability": 0.73291015625}, {"start": 2882.59, "end": 2882.95, "word": " MaxPlot.", "probability": 0.4643961588541667}, {"start": 2883.57, "end": 2884.23, "word": " In", "probability": 0.9580078125}, {"start": 2884.23, "end": 2884.41, "word": " order", "probability": 0.9208984375}, {"start": 2884.41, "end": 2884.65, "word": " to", "probability": 0.97314453125}, {"start": 2884.65, "end": 2885.13, "word": " construct", "probability": 0.97509765625}, {"start": 2885.13, "end": 2885.57, "word": " MaxPlot,", "probability": 0.9267578125}, {"start": 2885.65, "end": 2885.75, "word": " you", "probability": 0.94091796875}, {"start": 2885.75, "end": 2885.91, "word": " have", "probability": 0.9404296875}, {"start": 2885.91, "end": 2886.03, "word": " to", "probability": 0.96533203125}, {"start": 2886.03, "end": 2886.39, "word": " compute", "probability": 0.91845703125}, {"start": 2886.39, "end": 2887.99, "word": " minimum", "probability": 0.55126953125}, {"start": 2887.99, "end": 2888.43, "word": " first", "probability": 0.7998046875}, {"start": 2888.43, "end": 2888.71, "word": " or", "probability": 0.572265625}, {"start": 2888.71, "end": 2889.11, "word": " smallest", "probability": 0.9267578125}, {"start": 2889.11, "end": 2889.51, "word": " value,", "probability": 0.97119140625}, {"start": 2889.81, "end": 2890.95, "word": " largest", "probability": 0.859375}, {"start": 2890.95, "end": 2891.99, "word": " value.", "probability": 0.96923828125}, {"start": 2892.59, "end": 2893.27, "word": " Besides", "probability": 0.392578125}, {"start": 2893.27, "end": 2893.73, "word": " that,", "probability": 0.91845703125}, {"start": 2893.83, "end": 2893.93, "word": " you", "probability": 0.95703125}, {"start": 2893.93, "end": 2894.15, "word": " have", "probability": 0.94580078125}, {"start": 2894.15, "end": 2894.29, "word": " to", "probability": 0.96240234375}, {"start": 2894.29, "end": 2894.65, "word": " compute", "probability": 0.92529296875}, {"start": 2894.65, "end": 2895.25, "word": " first", "probability": 0.86474609375}, {"start": 2895.25, "end": 2896.29, "word": " and", "probability": 0.7275390625}, {"start": 2896.29, "end": 2896.63, "word": " third", "probability": 0.6376953125}, {"start": 2896.63, "end": 2897.39, "word": " part", "probability": 0.47021484375}, {"start": 2897.39, "end": 2897.65, "word": " time", "probability": 0.4453125}, {"start": 2897.65, "end": 2897.85, "word": " and", "probability": 0.60791015625}, {"start": 2897.85, "end": 2898.25, "word": " also", "probability": 0.87451171875}, {"start": 2898.25, "end": 2899.03, "word": " Q2.", "probability": 0.65966796875}], "temperature": 1.0}, {"id": 107, "seek": 292819, "start": 2900.41, "end": 2928.19, "text": " For this simple example, Q1 is 2, Q3 is 5, and the median is 3. Smallest is 0, largest is 1 7. Now, be careful here, 1 7 seems to be an outlier. But so far, we don't explain how can we decide if a data value is considered to be an outlier. But at least 1 7.", "tokens": [1171, 341, 2199, 1365, 11, 1249, 16, 307, 568, 11, 1249, 18, 307, 1025, 11, 293, 264, 26779, 307, 805, 13, 15287, 377, 307, 1958, 11, 6443, 307, 502, 1614, 13, 823, 11, 312, 5026, 510, 11, 502, 1614, 2544, 281, 312, 364, 484, 2753, 13, 583, 370, 1400, 11, 321, 500, 380, 2903, 577, 393, 321, 4536, 498, 257, 1412, 2158, 307, 4888, 281, 312, 364, 484, 2753, 13, 583, 412, 1935, 502, 1614, 13], "avg_logprob": -0.1943993444566603, "compression_ratio": 1.4913294797687862, "no_speech_prob": 0.0, "words": [{"start": 2900.41, "end": 2900.77, "word": " For", "probability": 0.82177734375}, {"start": 2900.77, "end": 2901.11, "word": " this", "probability": 0.943359375}, {"start": 2901.11, "end": 2901.63, "word": " simple", "probability": 0.91259765625}, {"start": 2901.63, "end": 2902.23, "word": " example,", "probability": 0.97314453125}, {"start": 2902.75, "end": 2903.77, "word": " Q1", "probability": 0.62353515625}, {"start": 2903.77, "end": 2903.97, "word": " is", "probability": 0.8994140625}, {"start": 2903.97, "end": 2904.31, "word": " 2,", "probability": 0.62158203125}, {"start": 2904.89, "end": 2905.39, "word": " Q3", "probability": 0.9755859375}, {"start": 2905.39, "end": 2905.63, "word": " is", "probability": 0.94921875}, {"start": 2905.63, "end": 2906.03, "word": " 5,", "probability": 0.96630859375}, {"start": 2906.75, "end": 2907.01, "word": " and", "probability": 0.93798828125}, {"start": 2907.01, "end": 2907.15, "word": " the", "probability": 0.90673828125}, {"start": 2907.15, "end": 2907.57, "word": " median", "probability": 0.94873046875}, {"start": 2907.57, "end": 2908.37, "word": " is", "probability": 0.919921875}, {"start": 2908.37, "end": 2908.91, "word": " 3.", "probability": 0.91943359375}, {"start": 2909.37, "end": 2910.07, "word": " Smallest", "probability": 0.784423828125}, {"start": 2910.07, "end": 2910.23, "word": " is", "probability": 0.94189453125}, {"start": 2910.23, "end": 2910.53, "word": " 0,", "probability": 0.68603515625}, {"start": 2910.93, "end": 2911.47, "word": " largest", "probability": 0.70654296875}, {"start": 2911.47, "end": 2911.85, "word": " is", "probability": 0.9345703125}, {"start": 2911.85, "end": 2912.05, "word": " 1", "probability": 0.654296875}, {"start": 2912.05, "end": 2912.29, "word": " 7.", "probability": 0.197021484375}, {"start": 2912.79, "end": 2913.19, "word": " Now,", "probability": 0.9404296875}, {"start": 2913.67, "end": 2913.99, "word": " be", "probability": 0.94580078125}, {"start": 2913.99, "end": 2914.27, "word": " careful", "probability": 0.935546875}, {"start": 2914.27, "end": 2914.51, "word": " here,", "probability": 0.82080078125}, {"start": 2914.53, "end": 2914.75, "word": " 1", "probability": 0.72705078125}, {"start": 2914.75, "end": 2915.03, "word": " 7", "probability": 0.9501953125}, {"start": 2915.03, "end": 2915.53, "word": " seems", "probability": 0.80078125}, {"start": 2915.53, "end": 2915.73, "word": " to", "probability": 0.96826171875}, {"start": 2915.73, "end": 2915.91, "word": " be", "probability": 0.94384765625}, {"start": 2915.91, "end": 2916.17, "word": " an", "probability": 0.537109375}, {"start": 2916.17, "end": 2916.61, "word": " outlier.", "probability": 0.7293701171875}, {"start": 2917.19, "end": 2917.91, "word": " But", "probability": 0.7548828125}, {"start": 2917.91, "end": 2918.13, "word": " so", "probability": 0.89111328125}, {"start": 2918.13, "end": 2918.49, "word": " far,", "probability": 0.947265625}, {"start": 2918.81, "end": 2918.99, "word": " we", "probability": 0.93994140625}, {"start": 2918.99, "end": 2919.55, "word": " don't", "probability": 0.951416015625}, {"start": 2919.55, "end": 2920.29, "word": " explain", "probability": 0.85791015625}, {"start": 2920.29, "end": 2921.01, "word": " how", "probability": 0.6083984375}, {"start": 2921.01, "end": 2921.23, "word": " can", "probability": 0.8798828125}, {"start": 2921.23, "end": 2922.49, "word": " we", "probability": 0.87060546875}, {"start": 2922.49, "end": 2922.93, "word": " decide", "probability": 0.7314453125}, {"start": 2922.93, "end": 2923.85, "word": " if", "probability": 0.92578125}, {"start": 2923.85, "end": 2924.01, "word": " a", "probability": 0.9521484375}, {"start": 2924.01, "end": 2924.19, "word": " data", "probability": 0.9375}, {"start": 2924.19, "end": 2924.57, "word": " value", "probability": 0.9677734375}, {"start": 2924.57, "end": 2924.83, "word": " is", "probability": 0.9326171875}, {"start": 2924.83, "end": 2925.19, "word": " considered", "probability": 0.78857421875}, {"start": 2925.19, "end": 2925.41, "word": " to", "probability": 0.9521484375}, {"start": 2925.41, "end": 2925.53, "word": " be", "probability": 0.95166015625}, {"start": 2925.53, "end": 2925.65, "word": " an", "probability": 0.962890625}, {"start": 2925.65, "end": 2926.05, "word": " outlier.", "probability": 0.954833984375}, {"start": 2926.83, "end": 2927.15, "word": " But", "probability": 0.9130859375}, {"start": 2927.15, "end": 2927.33, "word": " at", "probability": 0.94091796875}, {"start": 2927.33, "end": 2927.55, "word": " least", "probability": 0.96044921875}, {"start": 2927.55, "end": 2927.83, "word": " 1", "probability": 0.7099609375}, {"start": 2927.83, "end": 2928.19, "word": " 7.", "probability": 0.9716796875}], "temperature": 1.0}, {"id": 108, "seek": 295504, "start": 2929.64, "end": 2955.04, "text": " is a suspected value to be an outlier, seems to be. Sometimes you are 95% sure that that point is an outlier, but you cannot tell, because you have to have a specific rule that can decide if that point is an outlier or not. But at least it makes sense that that point is considered maybe an outlier. But let's see how can we construct that first.", "tokens": [307, 257, 26439, 2158, 281, 312, 364, 484, 2753, 11, 2544, 281, 312, 13, 4803, 291, 366, 13420, 4, 988, 300, 300, 935, 307, 364, 484, 2753, 11, 457, 291, 2644, 980, 11, 570, 291, 362, 281, 362, 257, 2685, 4978, 300, 393, 4536, 498, 300, 935, 307, 364, 484, 2753, 420, 406, 13, 583, 412, 1935, 309, 1669, 2020, 300, 300, 935, 307, 4888, 1310, 364, 484, 2753, 13, 583, 718, 311, 536, 577, 393, 321, 7690, 300, 700, 13], "avg_logprob": -0.1668254636773249, "compression_ratio": 1.7794871794871794, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2929.64, "end": 2930.16, "word": " is", "probability": 0.1961669921875}, {"start": 2930.16, "end": 2930.3, "word": " a", "probability": 0.8759765625}, {"start": 2930.3, "end": 2930.76, "word": " suspected", "probability": 0.85693359375}, {"start": 2930.76, "end": 2931.12, "word": " value", "probability": 0.95166015625}, {"start": 2931.12, "end": 2931.3, "word": " to", "probability": 0.95849609375}, {"start": 2931.3, "end": 2931.96, "word": " be", "probability": 0.9521484375}, {"start": 2931.96, "end": 2932.2, "word": " an", "probability": 0.875}, {"start": 2932.2, "end": 2932.62, "word": " outlier,", "probability": 0.931640625}, {"start": 2932.84, "end": 2933.08, "word": " seems", "probability": 0.4716796875}, {"start": 2933.08, "end": 2933.34, "word": " to", "probability": 0.96484375}, {"start": 2933.34, "end": 2933.5, "word": " be.", "probability": 0.67626953125}, {"start": 2934.38, "end": 2934.9, "word": " Sometimes", "probability": 0.91162109375}, {"start": 2934.9, "end": 2935.12, "word": " you", "probability": 0.8154296875}, {"start": 2935.12, "end": 2935.3, "word": " are", "probability": 0.91943359375}, {"start": 2935.3, "end": 2935.82, "word": " 95", "probability": 0.953125}, {"start": 2935.82, "end": 2936.16, "word": "%", "probability": 0.79443359375}, {"start": 2936.16, "end": 2936.56, "word": " sure", "probability": 0.91796875}, {"start": 2936.56, "end": 2936.76, "word": " that", "probability": 0.9287109375}, {"start": 2936.76, "end": 2936.98, "word": " that", "probability": 0.74755859375}, {"start": 2936.98, "end": 2937.2, "word": " point", "probability": 0.9365234375}, {"start": 2937.2, "end": 2937.36, "word": " is", "probability": 0.943359375}, {"start": 2937.36, "end": 2937.5, "word": " an", "probability": 0.95703125}, {"start": 2937.5, "end": 2937.8, "word": " outlier,", "probability": 0.943359375}, {"start": 2938.12, "end": 2938.22, "word": " but", "probability": 0.91015625}, {"start": 2938.22, "end": 2938.34, "word": " you", "probability": 0.96044921875}, {"start": 2938.34, "end": 2938.54, "word": " cannot", "probability": 0.8681640625}, {"start": 2938.54, "end": 2938.84, "word": " tell,", "probability": 0.87109375}, {"start": 2939.58, "end": 2939.96, "word": " because", "probability": 0.89794921875}, {"start": 2939.96, "end": 2940.16, "word": " you", "probability": 0.89404296875}, {"start": 2940.16, "end": 2940.34, "word": " have", "probability": 0.9462890625}, {"start": 2940.34, "end": 2940.46, "word": " to", "probability": 0.96875}, {"start": 2940.46, "end": 2940.78, "word": " have", "probability": 0.94873046875}, {"start": 2940.78, "end": 2941.12, "word": " a", "probability": 0.9814453125}, {"start": 2941.12, "end": 2941.62, "word": " specific", "probability": 0.90283203125}, {"start": 2941.62, "end": 2942.04, "word": " rule", "probability": 0.91162109375}, {"start": 2942.04, "end": 2943.14, "word": " that", "probability": 0.908203125}, {"start": 2943.14, "end": 2943.44, "word": " can", "probability": 0.94140625}, {"start": 2943.44, "end": 2943.88, "word": " decide", "probability": 0.935546875}, {"start": 2943.88, "end": 2944.06, "word": " if", "probability": 0.91259765625}, {"start": 2944.06, "end": 2944.26, "word": " that", "probability": 0.9375}, {"start": 2944.26, "end": 2944.52, "word": " point", "probability": 0.96044921875}, {"start": 2944.52, "end": 2944.72, "word": " is", "probability": 0.94482421875}, {"start": 2944.72, "end": 2944.82, "word": " an", "probability": 0.9599609375}, {"start": 2944.82, "end": 2945.12, "word": " outlier", "probability": 0.952880859375}, {"start": 2945.12, "end": 2945.3, "word": " or", "probability": 0.94482421875}, {"start": 2945.3, "end": 2945.5, "word": " not.", "probability": 0.94287109375}, {"start": 2946.16, "end": 2946.36, "word": " But", "probability": 0.93212890625}, {"start": 2946.36, "end": 2946.56, "word": " at", "probability": 0.9267578125}, {"start": 2946.56, "end": 2946.8, "word": " least", "probability": 0.95703125}, {"start": 2946.8, "end": 2947.4, "word": " it", "probability": 0.60693359375}, {"start": 2947.4, "end": 2947.64, "word": " makes", "probability": 0.8212890625}, {"start": 2947.64, "end": 2947.94, "word": " sense", "probability": 0.818359375}, {"start": 2947.94, "end": 2948.18, "word": " that", "probability": 0.9267578125}, {"start": 2948.18, "end": 2948.46, "word": " that", "probability": 0.8095703125}, {"start": 2948.46, "end": 2948.8, "word": " point", "probability": 0.95703125}, {"start": 2948.8, "end": 2949.88, "word": " is", "probability": 0.92333984375}, {"start": 2949.88, "end": 2950.3, "word": " considered", "probability": 0.78662109375}, {"start": 2950.3, "end": 2950.8, "word": " maybe", "probability": 0.8515625}, {"start": 2950.8, "end": 2952.06, "word": " an", "probability": 0.94287109375}, {"start": 2952.06, "end": 2952.44, "word": " outlier.", "probability": 0.953857421875}, {"start": 2952.8, "end": 2953.02, "word": " But", "probability": 0.93115234375}, {"start": 2953.02, "end": 2953.24, "word": " let's", "probability": 0.960693359375}, {"start": 2953.24, "end": 2953.36, "word": " see", "probability": 0.91943359375}, {"start": 2953.36, "end": 2953.48, "word": " how", "probability": 0.8994140625}, {"start": 2953.48, "end": 2953.66, "word": " can", "probability": 0.86767578125}, {"start": 2953.66, "end": 2953.78, "word": " we", "probability": 0.923828125}, {"start": 2953.78, "end": 2954.32, "word": " construct", "probability": 0.95751953125}, {"start": 2954.32, "end": 2954.7, "word": " that", "probability": 0.267822265625}, {"start": 2954.7, "end": 2955.04, "word": " first.", "probability": 0.8232421875}], "temperature": 1.0}, {"id": 109, "seek": 298335, "start": 2955.91, "end": 2983.35, "text": " The box plot. Again, as we mentioned, the minimum value is zero. The maximum is 27. The Q1 is 2. The median is 3. The Q3 is 5. Now, if you look at the distance between, does this vertical line lie between the line in the middle or the center of the box? It's not exactly. But if you look at this line, vertical line,", "tokens": [440, 2424, 7542, 13, 3764, 11, 382, 321, 2835, 11, 264, 7285, 2158, 307, 4018, 13, 440, 6674, 307, 7634, 13, 440, 1249, 16, 307, 568, 13, 440, 26779, 307, 805, 13, 440, 1249, 18, 307, 1025, 13, 823, 11, 498, 291, 574, 412, 264, 4560, 1296, 11, 775, 341, 9429, 1622, 4544, 1296, 264, 1622, 294, 264, 2808, 420, 264, 3056, 295, 264, 2424, 30, 467, 311, 406, 2293, 13, 583, 498, 291, 574, 412, 341, 1622, 11, 9429, 1622, 11], "avg_logprob": -0.20199548803180098, "compression_ratio": 1.6091370558375635, "no_speech_prob": 0.0, "words": [{"start": 2955.91, "end": 2956.15, "word": " The", "probability": 0.441162109375}, {"start": 2956.15, "end": 2956.35, "word": " box", "probability": 0.58740234375}, {"start": 2956.35, "end": 2956.57, "word": " plot.", "probability": 0.85498046875}, {"start": 2957.19, "end": 2957.49, "word": " Again,", "probability": 0.9384765625}, {"start": 2957.57, "end": 2957.63, "word": " as", "probability": 0.95458984375}, {"start": 2957.63, "end": 2957.71, "word": " we", "probability": 0.93115234375}, {"start": 2957.71, "end": 2957.99, "word": " mentioned,", "probability": 0.81494140625}, {"start": 2958.11, "end": 2958.19, "word": " the", "probability": 0.89453125}, {"start": 2958.19, "end": 2958.43, "word": " minimum", "probability": 0.95947265625}, {"start": 2958.43, "end": 2958.75, "word": " value", "probability": 0.89892578125}, {"start": 2958.75, "end": 2958.95, "word": " is", "probability": 0.86376953125}, {"start": 2958.95, "end": 2959.15, "word": " zero.", "probability": 0.60400390625}, {"start": 2959.21, "end": 2959.33, "word": " The", "probability": 0.87451171875}, {"start": 2959.33, "end": 2959.61, "word": " maximum", "probability": 0.90185546875}, {"start": 2959.61, "end": 2959.79, "word": " is", "probability": 0.91259765625}, {"start": 2959.79, "end": 2960.29, "word": " 27.", "probability": 0.9130859375}, {"start": 2961.17, "end": 2961.29, "word": " The", "probability": 0.427490234375}, {"start": 2961.29, "end": 2961.63, "word": " Q1", "probability": 0.9072265625}, {"start": 2961.63, "end": 2961.81, "word": " is", "probability": 0.943359375}, {"start": 2961.81, "end": 2962.07, "word": " 2.", "probability": 0.61181640625}, {"start": 2962.87, "end": 2963.01, "word": " The", "probability": 0.4716796875}, {"start": 2963.01, "end": 2963.23, "word": " median", "probability": 0.9482421875}, {"start": 2963.23, "end": 2963.49, "word": " is", "probability": 0.9443359375}, {"start": 2963.49, "end": 2963.81, "word": " 3.", "probability": 0.5458984375}, {"start": 2964.87, "end": 2965.05, "word": " The", "probability": 0.62744140625}, {"start": 2965.05, "end": 2965.51, "word": " Q3", "probability": 0.9921875}, {"start": 2965.51, "end": 2965.75, "word": " is", "probability": 0.9443359375}, {"start": 2965.75, "end": 2966.19, "word": " 5.", "probability": 0.96826171875}, {"start": 2967.43, "end": 2967.65, "word": " Now,", "probability": 0.935546875}, {"start": 2967.69, "end": 2967.77, "word": " if", "probability": 0.94580078125}, {"start": 2967.77, "end": 2967.83, "word": " you", "probability": 0.92578125}, {"start": 2967.83, "end": 2967.97, "word": " look", "probability": 0.95361328125}, {"start": 2967.97, "end": 2968.07, "word": " at", "probability": 0.96484375}, {"start": 2968.07, "end": 2968.17, "word": " the", "probability": 0.9091796875}, {"start": 2968.17, "end": 2968.49, "word": " distance", "probability": 0.94384765625}, {"start": 2968.49, "end": 2969.07, "word": " between,", "probability": 0.8505859375}, {"start": 2971.15, "end": 2971.41, "word": " does", "probability": 0.771484375}, {"start": 2971.41, "end": 2971.63, "word": " this", "probability": 0.94482421875}, {"start": 2971.63, "end": 2972.01, "word": " vertical", "probability": 0.931640625}, {"start": 2972.01, "end": 2972.41, "word": " line", "probability": 0.92431640625}, {"start": 2972.41, "end": 2972.85, "word": " lie", "probability": 0.6064453125}, {"start": 2972.85, "end": 2973.23, "word": " between", "probability": 0.84765625}, {"start": 2973.23, "end": 2973.93, "word": " the", "probability": 0.90771484375}, {"start": 2973.93, "end": 2974.73, "word": " line", "probability": 0.9248046875}, {"start": 2974.73, "end": 2974.97, "word": " in", "probability": 0.9169921875}, {"start": 2974.97, "end": 2975.11, "word": " the", "probability": 0.9267578125}, {"start": 2975.11, "end": 2975.35, "word": " middle", "probability": 0.8955078125}, {"start": 2975.35, "end": 2975.63, "word": " or", "probability": 0.732421875}, {"start": 2975.63, "end": 2975.79, "word": " the", "probability": 0.904296875}, {"start": 2975.79, "end": 2976.09, "word": " center", "probability": 0.88671875}, {"start": 2976.09, "end": 2976.27, "word": " of", "probability": 0.9169921875}, {"start": 2976.27, "end": 2976.39, "word": " the", "probability": 0.91845703125}, {"start": 2976.39, "end": 2976.67, "word": " box?", "probability": 0.93408203125}, {"start": 2977.75, "end": 2978.23, "word": " It's", "probability": 0.805419921875}, {"start": 2978.23, "end": 2978.37, "word": " not", "probability": 0.9482421875}, {"start": 2978.37, "end": 2978.79, "word": " exactly.", "probability": 0.8837890625}, {"start": 2979.45, "end": 2979.85, "word": " But", "probability": 0.95263671875}, {"start": 2979.85, "end": 2979.99, "word": " if", "probability": 0.8876953125}, {"start": 2979.99, "end": 2980.09, "word": " you", "probability": 0.9609375}, {"start": 2980.09, "end": 2980.33, "word": " look", "probability": 0.96240234375}, {"start": 2980.33, "end": 2980.67, "word": " at", "probability": 0.9658203125}, {"start": 2980.67, "end": 2982.09, "word": " this", "probability": 0.94775390625}, {"start": 2982.09, "end": 2982.43, "word": " line,", "probability": 0.900390625}, {"start": 2982.59, "end": 2982.89, "word": " vertical", "probability": 0.79638671875}, {"start": 2982.89, "end": 2983.35, "word": " line,", "probability": 0.93505859375}], "temperature": 1.0}, {"id": 110, "seek": 300262, "start": 2984.34, "end": 3002.62, "text": " and the location of this with respect to the minimum and the maximum. You will see that the right tail is much longer than the left tail because it starts from 3 up to 27.", "tokens": [293, 264, 4914, 295, 341, 365, 3104, 281, 264, 7285, 293, 264, 6674, 13, 509, 486, 536, 300, 264, 558, 6838, 307, 709, 2854, 813, 264, 1411, 6838, 570, 309, 3719, 490, 805, 493, 281, 7634, 13], "avg_logprob": -0.15224095433950424, "compression_ratio": 1.3983739837398375, "no_speech_prob": 0.0, "words": [{"start": 2984.34, "end": 2984.68, "word": " and", "probability": 0.42236328125}, {"start": 2984.68, "end": 2984.88, "word": " the", "probability": 0.884765625}, {"start": 2984.88, "end": 2985.26, "word": " location", "probability": 0.9521484375}, {"start": 2985.26, "end": 2985.46, "word": " of", "probability": 0.96142578125}, {"start": 2985.46, "end": 2985.8, "word": " this", "probability": 0.93310546875}, {"start": 2985.8, "end": 2987.78, "word": " with", "probability": 0.79638671875}, {"start": 2987.78, "end": 2988.4, "word": " respect", "probability": 0.927734375}, {"start": 2988.4, "end": 2988.78, "word": " to", "probability": 0.96875}, {"start": 2988.78, "end": 2989.24, "word": " the", "probability": 0.91357421875}, {"start": 2989.24, "end": 2989.46, "word": " minimum", "probability": 0.96826171875}, {"start": 2989.46, "end": 2990.46, "word": " and", "probability": 0.93359375}, {"start": 2990.46, "end": 2990.6, "word": " the", "probability": 0.84521484375}, {"start": 2990.6, "end": 2991.04, "word": " maximum.", "probability": 0.93505859375}, {"start": 2992.28, "end": 2992.34, "word": " You", "probability": 0.6591796875}, {"start": 2992.34, "end": 2992.48, "word": " will", "probability": 0.7392578125}, {"start": 2992.48, "end": 2992.62, "word": " see", "probability": 0.92626953125}, {"start": 2992.62, "end": 2992.9, "word": " that", "probability": 0.92236328125}, {"start": 2992.9, "end": 2993.96, "word": " the", "probability": 0.8984375}, {"start": 2993.96, "end": 2994.6, "word": " right", "probability": 0.9326171875}, {"start": 2994.6, "end": 2995.08, "word": " tail", "probability": 0.89892578125}, {"start": 2995.08, "end": 2996.22, "word": " is", "probability": 0.94384765625}, {"start": 2996.22, "end": 2996.64, "word": " much", "probability": 0.90869140625}, {"start": 2996.64, "end": 2997.12, "word": " longer", "probability": 0.93994140625}, {"start": 2997.12, "end": 2998.0, "word": " than", "probability": 0.93994140625}, {"start": 2998.0, "end": 2998.22, "word": " the", "probability": 0.91552734375}, {"start": 2998.22, "end": 2998.46, "word": " left", "probability": 0.94091796875}, {"start": 2998.46, "end": 2998.76, "word": " tail", "probability": 0.87646484375}, {"start": 2998.76, "end": 2999.78, "word": " because", "probability": 0.52734375}, {"start": 2999.78, "end": 3000.5, "word": " it", "probability": 0.9521484375}, {"start": 3000.5, "end": 3000.98, "word": " starts", "probability": 0.84375}, {"start": 3000.98, "end": 3001.24, "word": " from", "probability": 0.8955078125}, {"start": 3001.24, "end": 3001.56, "word": " 3", "probability": 0.5341796875}, {"start": 3001.56, "end": 3001.88, "word": " up", "probability": 0.95556640625}, {"start": 3001.88, "end": 3002.04, "word": " to", "probability": 0.96728515625}, {"start": 3002.04, "end": 3002.62, "word": " 27.", "probability": 0.96630859375}], "temperature": 1.0}, {"id": 111, "seek": 303280, "start": 3003.88, "end": 3032.8, "text": " And the other one, from zero to three, is a big distance between three and 27, compared to the other one, zero to three. So it seems to be this is quite skewed, so it's not at all symmetric, because of this value. So maybe by using MaxPlot, you can tell that point is suspected to be an outlier. It has a very long right tail.", "tokens": [400, 264, 661, 472, 11, 490, 4018, 281, 1045, 11, 307, 257, 955, 4560, 1296, 1045, 293, 7634, 11, 5347, 281, 264, 661, 472, 11, 4018, 281, 1045, 13, 407, 309, 2544, 281, 312, 341, 307, 1596, 8756, 26896, 11, 370, 309, 311, 406, 412, 439, 32330, 11, 570, 295, 341, 2158, 13, 407, 1310, 538, 1228, 7402, 33710, 310, 11, 291, 393, 980, 300, 935, 307, 26439, 281, 312, 364, 484, 2753, 13, 467, 575, 257, 588, 938, 558, 6838, 13], "avg_logprob": -0.21743223501975278, "compression_ratio": 1.5352112676056338, "no_speech_prob": 0.0, "words": [{"start": 3003.88, "end": 3004.16, "word": " And", "probability": 0.7509765625}, {"start": 3004.16, "end": 3004.3, "word": " the", "probability": 0.81787109375}, {"start": 3004.3, "end": 3004.52, "word": " other", "probability": 0.88525390625}, {"start": 3004.52, "end": 3004.88, "word": " one,", "probability": 0.91455078125}, {"start": 3005.14, "end": 3005.36, "word": " from", "probability": 0.880859375}, {"start": 3005.36, "end": 3005.64, "word": " zero", "probability": 0.625}, {"start": 3005.64, "end": 3005.84, "word": " to", "probability": 0.9697265625}, {"start": 3005.84, "end": 3006.18, "word": " three,", "probability": 0.91796875}, {"start": 3006.38, "end": 3006.78, "word": " is", "probability": 0.83935546875}, {"start": 3006.78, "end": 3006.9, "word": " a", "probability": 0.98876953125}, {"start": 3006.9, "end": 3007.08, "word": " big", "probability": 0.92626953125}, {"start": 3007.08, "end": 3007.58, "word": " distance", "probability": 0.93017578125}, {"start": 3007.58, "end": 3008.0, "word": " between", "probability": 0.85400390625}, {"start": 3008.0, "end": 3008.3, "word": " three", "probability": 0.7373046875}, {"start": 3008.3, "end": 3008.48, "word": " and", "probability": 0.91552734375}, {"start": 3008.48, "end": 3008.84, "word": " 27,", "probability": 0.492431640625}, {"start": 3009.38, "end": 3009.76, "word": " compared", "probability": 0.84033203125}, {"start": 3009.76, "end": 3010.0, "word": " to", "probability": 0.955078125}, {"start": 3010.0, "end": 3010.12, "word": " the", "probability": 0.89306640625}, {"start": 3010.12, "end": 3010.3, "word": " other", "probability": 0.87060546875}, {"start": 3010.3, "end": 3010.46, "word": " one,", "probability": 0.55712890625}, {"start": 3010.52, "end": 3011.16, "word": " zero", "probability": 0.802734375}, {"start": 3011.16, "end": 3011.34, "word": " to", "probability": 0.9580078125}, {"start": 3011.34, "end": 3011.6, "word": " three.", "probability": 0.9326171875}, {"start": 3012.04, "end": 3012.28, "word": " So", "probability": 0.953125}, {"start": 3012.28, "end": 3012.52, "word": " it", "probability": 0.78759765625}, {"start": 3012.52, "end": 3012.78, "word": " seems", "probability": 0.82080078125}, {"start": 3012.78, "end": 3012.98, "word": " to", "probability": 0.9619140625}, {"start": 3012.98, "end": 3013.14, "word": " be", "probability": 0.88037109375}, {"start": 3013.14, "end": 3013.36, "word": " this", "probability": 0.775390625}, {"start": 3013.36, "end": 3013.64, "word": " is", "probability": 0.94384765625}, {"start": 3013.64, "end": 3014.38, "word": " quite", "probability": 0.498291015625}, {"start": 3014.38, "end": 3014.9, "word": " skewed,", "probability": 0.972412109375}, {"start": 3015.6, "end": 3015.88, "word": " so", "probability": 0.94482421875}, {"start": 3015.88, "end": 3016.04, "word": " it's", "probability": 0.948486328125}, {"start": 3016.04, "end": 3016.22, "word": " not", "probability": 0.9462890625}, {"start": 3016.22, "end": 3016.38, "word": " at", "probability": 0.87841796875}, {"start": 3016.38, "end": 3016.6, "word": " all", "probability": 0.95556640625}, {"start": 3016.6, "end": 3017.02, "word": " symmetric,", "probability": 0.8515625}, {"start": 3017.7, "end": 3018.44, "word": " because", "probability": 0.55078125}, {"start": 3018.44, "end": 3020.74, "word": " of", "probability": 0.95849609375}, {"start": 3020.74, "end": 3020.98, "word": " this", "probability": 0.93603515625}, {"start": 3020.98, "end": 3021.32, "word": " value.", "probability": 0.94921875}, {"start": 3021.96, "end": 3022.22, "word": " So", "probability": 0.9287109375}, {"start": 3022.22, "end": 3023.42, "word": " maybe", "probability": 0.845703125}, {"start": 3023.42, "end": 3023.7, "word": " by", "probability": 0.669921875}, {"start": 3023.7, "end": 3023.84, "word": " using", "probability": 0.90576171875}, {"start": 3023.84, "end": 3024.32, "word": " MaxPlot,", "probability": 0.6173502604166666}, {"start": 3024.36, "end": 3024.46, "word": " you", "probability": 0.9375}, {"start": 3024.46, "end": 3024.6, "word": " can", "probability": 0.939453125}, {"start": 3024.6, "end": 3024.78, "word": " tell", "probability": 0.8779296875}, {"start": 3024.78, "end": 3024.96, "word": " that", "probability": 0.8486328125}, {"start": 3024.96, "end": 3025.24, "word": " point", "probability": 0.8994140625}, {"start": 3025.24, "end": 3025.58, "word": " is", "probability": 0.9482421875}, {"start": 3025.58, "end": 3026.42, "word": " suspected", "probability": 0.88037109375}, {"start": 3026.42, "end": 3026.62, "word": " to", "probability": 0.9677734375}, {"start": 3026.62, "end": 3026.74, "word": " be", "probability": 0.95068359375}, {"start": 3026.74, "end": 3026.88, "word": " an", "probability": 0.8359375}, {"start": 3026.88, "end": 3027.28, "word": " outlier.", "probability": 0.87060546875}, {"start": 3028.32, "end": 3028.5, "word": " It", "probability": 0.76220703125}, {"start": 3028.5, "end": 3029.16, "word": " has", "probability": 0.8017578125}, {"start": 3029.16, "end": 3029.48, "word": " a", "probability": 0.6259765625}, {"start": 3029.48, "end": 3030.98, "word": " very", "probability": 0.85791015625}, {"start": 3030.98, "end": 3031.44, "word": " long", "probability": 0.900390625}, {"start": 3031.44, "end": 3032.14, "word": " right", "probability": 0.89892578125}, {"start": 3032.14, "end": 3032.8, "word": " tail.", "probability": 0.71533203125}], "temperature": 1.0}, {"id": 112, "seek": 305900, "start": 3035.56, "end": 3059.0, "text": " So let's see how can we determine if a point is an outlier or not. Sometimes we can use box plot to determine if the point is an outlier or not. The rule is that a value is considered an outlier", "tokens": [407, 718, 311, 536, 577, 393, 321, 6997, 498, 257, 935, 307, 364, 484, 2753, 420, 406, 13, 4803, 321, 393, 764, 2424, 7542, 281, 6997, 498, 264, 935, 307, 364, 484, 2753, 420, 406, 13, 440, 4978, 307, 300, 257, 2158, 307, 4888, 364, 484, 2753], "avg_logprob": -0.15608723647892475, "compression_ratio": 1.6666666666666667, "no_speech_prob": 0.0, "words": [{"start": 3035.56, "end": 3035.82, "word": " So", "probability": 0.708984375}, {"start": 3035.82, "end": 3036.08, "word": " let's", "probability": 0.82861328125}, {"start": 3036.08, "end": 3036.34, "word": " see", "probability": 0.90576171875}, {"start": 3036.34, "end": 3036.78, "word": " how", "probability": 0.82666015625}, {"start": 3036.78, "end": 3037.1, "word": " can", "probability": 0.642578125}, {"start": 3037.1, "end": 3037.36, "word": " we", "probability": 0.955078125}, {"start": 3037.36, "end": 3038.6, "word": " determine", "probability": 0.908203125}, {"start": 3038.6, "end": 3039.66, "word": " if", "probability": 0.9306640625}, {"start": 3039.66, "end": 3039.82, "word": " a", "probability": 0.97705078125}, {"start": 3039.82, "end": 3040.22, "word": " point", "probability": 0.974609375}, {"start": 3040.22, "end": 3040.96, "word": " is", "probability": 0.95263671875}, {"start": 3040.96, "end": 3041.12, "word": " an", "probability": 0.70166015625}, {"start": 3041.12, "end": 3041.46, "word": " outlier", "probability": 0.6654052734375}, {"start": 3041.46, "end": 3041.62, "word": " or", "probability": 0.90380859375}, {"start": 3041.62, "end": 3041.8, "word": " not.", "probability": 0.95458984375}, {"start": 3044.42, "end": 3045.06, "word": " Sometimes", "probability": 0.8720703125}, {"start": 3045.06, "end": 3045.3, "word": " we", "probability": 0.8095703125}, {"start": 3045.3, "end": 3045.64, "word": " can", "probability": 0.93994140625}, {"start": 3045.64, "end": 3046.32, "word": " use", "probability": 0.875}, {"start": 3046.32, "end": 3047.64, "word": " box", "probability": 0.681640625}, {"start": 3047.64, "end": 3048.04, "word": " plot", "probability": 0.6064453125}, {"start": 3048.04, "end": 3050.4, "word": " to", "probability": 0.87353515625}, {"start": 3050.4, "end": 3050.9, "word": " determine", "probability": 0.90478515625}, {"start": 3050.9, "end": 3051.16, "word": " if", "probability": 0.93994140625}, {"start": 3051.16, "end": 3051.28, "word": " the", "probability": 0.7958984375}, {"start": 3051.28, "end": 3051.54, "word": " point", "probability": 0.9658203125}, {"start": 3051.54, "end": 3051.74, "word": " is", "probability": 0.953125}, {"start": 3051.74, "end": 3051.86, "word": " an", "probability": 0.94580078125}, {"start": 3051.86, "end": 3052.2, "word": " outlier", "probability": 0.95458984375}, {"start": 3052.2, "end": 3052.38, "word": " or", "probability": 0.9287109375}, {"start": 3052.38, "end": 3052.62, "word": " not.", "probability": 0.95068359375}, {"start": 3053.58, "end": 3053.84, "word": " The", "probability": 0.86572265625}, {"start": 3053.84, "end": 3054.1, "word": " rule", "probability": 0.91748046875}, {"start": 3054.1, "end": 3054.62, "word": " is", "probability": 0.94775390625}, {"start": 3054.62, "end": 3054.9, "word": " that", "probability": 0.8583984375}, {"start": 3054.9, "end": 3057.28, "word": " a", "probability": 0.8466796875}, {"start": 3057.28, "end": 3057.62, "word": " value", "probability": 0.97412109375}, {"start": 3057.62, "end": 3057.84, "word": " is", "probability": 0.93798828125}, {"start": 3057.84, "end": 3058.34, "word": " considered", "probability": 0.78271484375}, {"start": 3058.34, "end": 3058.58, "word": " an", "probability": 0.93994140625}, {"start": 3058.58, "end": 3059.0, "word": " outlier", "probability": 0.95751953125}], "temperature": 1.0}, {"id": 113, "seek": 308910, "start": 3060.72, "end": 3089.1, "text": " It is more than 1.5 times the entire quartile range below Q1 or above it. Let's explain the meaning of this sentence. First, let's compute something called lower. The lower limit is not the minimum.", "tokens": [467, 307, 544, 813, 502, 13, 20, 1413, 264, 2302, 20837, 794, 3613, 2507, 1249, 16, 420, 3673, 309, 13, 961, 311, 2903, 264, 3620, 295, 341, 8174, 13, 2386, 11, 718, 311, 14722, 746, 1219, 3126, 13, 440, 3126, 4948, 307, 406, 264, 7285, 13], "avg_logprob": -0.19498005319148937, "compression_ratio": 1.3445945945945945, "no_speech_prob": 0.0, "words": [{"start": 3060.72, "end": 3060.86, "word": " It", "probability": 0.447021484375}, {"start": 3060.86, "end": 3061.04, "word": " is", "probability": 0.8359375}, {"start": 3061.04, "end": 3061.32, "word": " more", "probability": 0.9287109375}, {"start": 3061.32, "end": 3061.58, "word": " than", "probability": 0.9423828125}, {"start": 3061.58, "end": 3061.86, "word": " 1", "probability": 0.89453125}, {"start": 3061.86, "end": 3062.56, "word": ".5", "probability": 0.986083984375}, {"start": 3062.56, "end": 3063.16, "word": " times", "probability": 0.82080078125}, {"start": 3063.16, "end": 3063.48, "word": " the", "probability": 0.80615234375}, {"start": 3063.48, "end": 3063.86, "word": " entire", "probability": 0.61376953125}, {"start": 3063.86, "end": 3064.42, "word": " quartile", "probability": 0.8935546875}, {"start": 3064.42, "end": 3064.78, "word": " range", "probability": 0.86865234375}, {"start": 3064.78, "end": 3065.1, "word": " below", "probability": 0.67822265625}, {"start": 3065.1, "end": 3065.7, "word": " Q1", "probability": 0.6715087890625}, {"start": 3065.7, "end": 3066.12, "word": " or", "probability": 0.826171875}, {"start": 3066.12, "end": 3066.54, "word": " above", "probability": 0.94873046875}, {"start": 3066.54, "end": 3066.8, "word": " it.", "probability": 0.4404296875}, {"start": 3068.44, "end": 3069.28, "word": " Let's", "probability": 0.93408203125}, {"start": 3069.28, "end": 3069.7, "word": " explain", "probability": 0.8701171875}, {"start": 3069.7, "end": 3070.06, "word": " the", "probability": 0.92236328125}, {"start": 3070.06, "end": 3070.36, "word": " meaning", "probability": 0.86279296875}, {"start": 3070.36, "end": 3071.42, "word": " of", "probability": 0.9580078125}, {"start": 3071.42, "end": 3071.68, "word": " this", "probability": 0.9365234375}, {"start": 3071.68, "end": 3072.26, "word": " sentence.", "probability": 0.92578125}, {"start": 3075.26, "end": 3076.1, "word": " First,", "probability": 0.90771484375}, {"start": 3076.2, "end": 3076.44, "word": " let's", "probability": 0.973388671875}, {"start": 3076.44, "end": 3076.88, "word": " compute", "probability": 0.9228515625}, {"start": 3076.88, "end": 3078.26, "word": " something", "probability": 0.861328125}, {"start": 3078.26, "end": 3079.46, "word": " called", "probability": 0.87255859375}, {"start": 3079.46, "end": 3080.1, "word": " lower.", "probability": 0.4814453125}, {"start": 3083.74, "end": 3084.58, "word": " The", "probability": 0.59130859375}, {"start": 3084.58, "end": 3084.82, "word": " lower", "probability": 0.85546875}, {"start": 3084.82, "end": 3085.3, "word": " limit", "probability": 0.96533203125}, {"start": 3085.3, "end": 3088.54, "word": " is", "probability": 0.81640625}, {"start": 3088.54, "end": 3088.72, "word": " not", "probability": 0.94384765625}, {"start": 3088.72, "end": 3088.86, "word": " the", "probability": 0.90380859375}, {"start": 3088.86, "end": 3089.1, "word": " minimum.", "probability": 0.974609375}], "temperature": 1.0}, {"id": 114, "seek": 311746, "start": 3090.1, "end": 3117.46, "text": " It's Q1 minus 1.5 IQR. This is the lower limit. So it's 1.5 times IQR below Q1. This is the lower limit. The upper limit, Q3,", "tokens": [467, 311, 1249, 16, 3175, 502, 13, 20, 28921, 49, 13, 639, 307, 264, 3126, 4948, 13, 407, 309, 311, 502, 13, 20, 1413, 28921, 49, 2507, 1249, 16, 13, 639, 307, 264, 3126, 4948, 13, 440, 6597, 4948, 11, 1249, 18, 11], "avg_logprob": -0.1624644896523519, "compression_ratio": 1.3695652173913044, "no_speech_prob": 0.0, "words": [{"start": 3090.1, "end": 3090.98, "word": " It's", "probability": 0.65234375}, {"start": 3090.98, "end": 3091.86, "word": " Q1", "probability": 0.7110595703125}, {"start": 3091.86, "end": 3094.16, "word": " minus", "probability": 0.88232421875}, {"start": 3094.16, "end": 3094.94, "word": " 1", "probability": 0.89208984375}, {"start": 3094.94, "end": 3095.66, "word": ".5", "probability": 0.990234375}, {"start": 3095.66, "end": 3097.78, "word": " IQR.", "probability": 0.7919921875}, {"start": 3098.1, "end": 3098.58, "word": " This", "probability": 0.82763671875}, {"start": 3098.58, "end": 3098.68, "word": " is", "probability": 0.95068359375}, {"start": 3098.68, "end": 3098.82, "word": " the", "probability": 0.9013671875}, {"start": 3098.82, "end": 3099.0, "word": " lower", "probability": 0.796875}, {"start": 3099.0, "end": 3099.28, "word": " limit.", "probability": 0.9189453125}, {"start": 3102.28, "end": 3103.16, "word": " So", "probability": 0.94580078125}, {"start": 3103.16, "end": 3103.54, "word": " it's", "probability": 0.891357421875}, {"start": 3103.54, "end": 3103.96, "word": " 1", "probability": 0.99267578125}, {"start": 3103.96, "end": 3104.6, "word": ".5", "probability": 0.99853515625}, {"start": 3104.6, "end": 3105.12, "word": " times", "probability": 0.8857421875}, {"start": 3105.12, "end": 3105.84, "word": " IQR", "probability": 0.97802734375}, {"start": 3105.84, "end": 3106.22, "word": " below", "probability": 0.87939453125}, {"start": 3106.22, "end": 3106.8, "word": " Q1.", "probability": 0.990966796875}, {"start": 3107.0, "end": 3107.18, "word": " This", "probability": 0.69873046875}, {"start": 3107.18, "end": 3107.26, "word": " is", "probability": 0.9482421875}, {"start": 3107.26, "end": 3107.38, "word": " the", "probability": 0.8798828125}, {"start": 3107.38, "end": 3107.56, "word": " lower", "probability": 0.8935546875}, {"start": 3107.56, "end": 3107.84, "word": " limit.", "probability": 0.96728515625}, {"start": 3109.7, "end": 3109.9, "word": " The", "probability": 0.884765625}, {"start": 3109.9, "end": 3110.16, "word": " upper", "probability": 0.7939453125}, {"start": 3110.16, "end": 3110.62, "word": " limit,", "probability": 0.96923828125}, {"start": 3114.68, "end": 3117.46, "word": " Q3,", "probability": 0.97509765625}], "temperature": 1.0}, {"id": 115, "seek": 314507, "start": 3118.79, "end": 3145.07, "text": " plus 1.5 times IQR. So we computed lower and upper limit by using these rules. Q1 minus 1.5 IQR. So it's 1.5 times IQR below Q1 and 1.5 times IQR above Q1. Now, any value.", "tokens": [1804, 502, 13, 20, 1413, 28921, 49, 13, 407, 321, 40610, 3126, 293, 6597, 4948, 538, 1228, 613, 4474, 13, 1249, 16, 3175, 502, 13, 20, 28921, 49, 13, 407, 309, 311, 502, 13, 20, 1413, 28921, 49, 2507, 1249, 16, 293, 502, 13, 20, 1413, 28921, 49, 3673, 1249, 16, 13, 823, 11, 604, 2158, 13], "avg_logprob": -0.1577316797498999, "compression_ratio": 1.376, "no_speech_prob": 0.0, "words": [{"start": 3118.79, "end": 3119.27, "word": " plus", "probability": 0.2666015625}, {"start": 3119.27, "end": 3119.55, "word": " 1", "probability": 0.93994140625}, {"start": 3119.55, "end": 3120.21, "word": ".5", "probability": 0.97900390625}, {"start": 3120.21, "end": 3120.71, "word": " times", "probability": 0.82861328125}, {"start": 3120.71, "end": 3121.51, "word": " IQR.", "probability": 0.92822265625}, {"start": 3122.65, "end": 3123.61, "word": " So", "probability": 0.9521484375}, {"start": 3123.61, "end": 3123.79, "word": " we", "probability": 0.7021484375}, {"start": 3123.79, "end": 3124.23, "word": " computed", "probability": 0.91796875}, {"start": 3124.23, "end": 3125.53, "word": " lower", "probability": 0.89404296875}, {"start": 3125.53, "end": 3126.61, "word": " and", "probability": 0.9248046875}, {"start": 3126.61, "end": 3126.89, "word": " upper", "probability": 0.8154296875}, {"start": 3126.89, "end": 3127.17, "word": " limit", "probability": 0.9072265625}, {"start": 3127.17, "end": 3127.37, "word": " by", "probability": 0.349853515625}, {"start": 3127.37, "end": 3127.55, "word": " using", "probability": 0.916015625}, {"start": 3127.55, "end": 3127.81, "word": " these", "probability": 0.58837890625}, {"start": 3127.81, "end": 3128.19, "word": " rules.", "probability": 0.57861328125}, {"start": 3129.13, "end": 3129.45, "word": " Q1", "probability": 0.964599609375}, {"start": 3129.45, "end": 3129.75, "word": " minus", "probability": 0.94921875}, {"start": 3129.75, "end": 3129.97, "word": " 1", "probability": 0.99267578125}, {"start": 3129.97, "end": 3130.35, "word": ".5", "probability": 0.99609375}, {"start": 3130.35, "end": 3130.99, "word": " IQR.", "probability": 0.92333984375}, {"start": 3132.39, "end": 3133.35, "word": " So", "probability": 0.9560546875}, {"start": 3133.35, "end": 3133.79, "word": " it's", "probability": 0.931396484375}, {"start": 3133.79, "end": 3134.29, "word": " 1", "probability": 0.98583984375}, {"start": 3134.29, "end": 3134.93, "word": ".5", "probability": 0.998779296875}, {"start": 3134.93, "end": 3135.45, "word": " times", "probability": 0.91259765625}, {"start": 3135.45, "end": 3136.27, "word": " IQR", "probability": 0.984375}, {"start": 3136.27, "end": 3136.67, "word": " below", "probability": 0.8740234375}, {"start": 3136.67, "end": 3137.17, "word": " Q1", "probability": 0.990478515625}, {"start": 3137.17, "end": 3138.69, "word": " and", "probability": 0.462890625}, {"start": 3138.69, "end": 3138.95, "word": " 1", "probability": 0.99365234375}, {"start": 3138.95, "end": 3139.45, "word": ".5", "probability": 0.998779296875}, {"start": 3139.45, "end": 3139.85, "word": " times", "probability": 0.89404296875}, {"start": 3139.85, "end": 3140.51, "word": " IQR", "probability": 0.98193359375}, {"start": 3140.51, "end": 3140.95, "word": " above", "probability": 0.95361328125}, {"start": 3140.95, "end": 3141.57, "word": " Q1.", "probability": 0.8212890625}, {"start": 3143.05, "end": 3143.45, "word": " Now,", "probability": 0.8701171875}, {"start": 3144.39, "end": 3144.71, "word": " any", "probability": 0.87451171875}, {"start": 3144.71, "end": 3145.07, "word": " value.", "probability": 0.97119140625}], "temperature": 1.0}, {"id": 116, "seek": 317945, "start": 3151.15, "end": 3179.45, "text": " Is it smaller than the lower limit or greater than the upper limit? Any value.", "tokens": [1119, 309, 4356, 813, 264, 3126, 4948, 420, 5044, 813, 264, 6597, 4948, 30, 2639, 2158, 13], "avg_logprob": -0.3680555688010322, "compression_ratio": 1.1791044776119404, "no_speech_prob": 0.0, "words": [{"start": 3151.15, "end": 3151.43, "word": " Is", "probability": 0.2724609375}, {"start": 3151.43, "end": 3151.65, "word": " it", "probability": 0.58251953125}, {"start": 3151.65, "end": 3152.19, "word": " smaller", "probability": 0.818359375}, {"start": 3152.19, "end": 3152.53, "word": " than", "probability": 0.9140625}, {"start": 3152.53, "end": 3158.61, "word": " the", "probability": 0.297607421875}, {"start": 3158.61, "end": 3158.87, "word": " lower", "probability": 0.83935546875}, {"start": 3158.87, "end": 3159.51, "word": " limit", "probability": 0.966796875}, {"start": 3159.51, "end": 3165.99, "word": " or", "probability": 0.372314453125}, {"start": 3165.99, "end": 3166.39, "word": " greater", "probability": 0.896484375}, {"start": 3166.39, "end": 3166.79, "word": " than", "probability": 0.9453125}, {"start": 3166.79, "end": 3173.29, "word": " the", "probability": 0.685546875}, {"start": 3173.29, "end": 3173.63, "word": " upper", "probability": 0.85498046875}, {"start": 3173.63, "end": 3174.15, "word": " limit?", "probability": 0.96630859375}, {"start": 3178.33, "end": 3179.11, "word": " Any", "probability": 0.8525390625}, {"start": 3179.11, "end": 3179.45, "word": " value.", "probability": 0.96728515625}], "temperature": 1.0}, {"id": 117, "seek": 320710, "start": 3180.5, "end": 3207.1, "text": " smaller than the lower limit and greater than the upper limit is considered to be an outlier. This is the rule how can you tell if the point or data value is outlier or not. Just compute lower limit and upper limit.", "tokens": [4356, 813, 264, 3126, 4948, 293, 5044, 813, 264, 6597, 4948, 307, 4888, 281, 312, 364, 484, 2753, 13, 639, 307, 264, 4978, 577, 393, 291, 980, 498, 264, 935, 420, 1412, 2158, 307, 484, 2753, 420, 406, 13, 1449, 14722, 3126, 4948, 293, 6597, 4948, 13], "avg_logprob": -0.20556641183793545, "compression_ratio": 1.6119402985074627, "no_speech_prob": 0.0, "words": [{"start": 3180.5, "end": 3181.04, "word": " smaller", "probability": 0.187744140625}, {"start": 3181.04, "end": 3181.42, "word": " than", "probability": 0.931640625}, {"start": 3181.42, "end": 3183.08, "word": " the", "probability": 0.81640625}, {"start": 3183.08, "end": 3183.3, "word": " lower", "probability": 0.84228515625}, {"start": 3183.3, "end": 3183.62, "word": " limit", "probability": 0.94384765625}, {"start": 3183.62, "end": 3184.6, "word": " and", "probability": 0.6484375}, {"start": 3184.6, "end": 3184.92, "word": " greater", "probability": 0.8515625}, {"start": 3184.92, "end": 3185.26, "word": " than", "probability": 0.93701171875}, {"start": 3185.26, "end": 3185.56, "word": " the", "probability": 0.904296875}, {"start": 3185.56, "end": 3186.1, "word": " upper", "probability": 0.861328125}, {"start": 3186.1, "end": 3186.42, "word": " limit", "probability": 0.966796875}, {"start": 3186.42, "end": 3186.72, "word": " is", "probability": 0.599609375}, {"start": 3186.72, "end": 3187.26, "word": " considered", "probability": 0.7880859375}, {"start": 3187.26, "end": 3193.26, "word": " to", "probability": 0.8837890625}, {"start": 3193.26, "end": 3194.98, "word": " be", "probability": 0.955078125}, {"start": 3194.98, "end": 3195.88, "word": " an", "probability": 0.9189453125}, {"start": 3195.88, "end": 3196.42, "word": " outlier.", "probability": 0.799072265625}, {"start": 3198.8, "end": 3199.26, "word": " This", "probability": 0.578125}, {"start": 3199.26, "end": 3199.4, "word": " is", "probability": 0.93017578125}, {"start": 3199.4, "end": 3199.54, "word": " the", "probability": 0.72998046875}, {"start": 3199.54, "end": 3199.76, "word": " rule", "probability": 0.9208984375}, {"start": 3199.76, "end": 3200.04, "word": " how", "probability": 0.30712890625}, {"start": 3200.04, "end": 3200.28, "word": " can", "probability": 0.7197265625}, {"start": 3200.28, "end": 3200.46, "word": " you", "probability": 0.95166015625}, {"start": 3200.46, "end": 3200.72, "word": " tell", "probability": 0.880859375}, {"start": 3200.72, "end": 3201.04, "word": " if", "probability": 0.93359375}, {"start": 3201.04, "end": 3201.18, "word": " the", "probability": 0.82177734375}, {"start": 3201.18, "end": 3201.48, "word": " point", "probability": 0.9609375}, {"start": 3201.48, "end": 3201.98, "word": " or", "probability": 0.92041015625}, {"start": 3201.98, "end": 3202.2, "word": " data", "probability": 0.87255859375}, {"start": 3202.2, "end": 3202.66, "word": " value", "probability": 0.96240234375}, {"start": 3202.66, "end": 3203.64, "word": " is", "probability": 0.9443359375}, {"start": 3203.64, "end": 3204.1, "word": " outlier", "probability": 0.8271484375}, {"start": 3204.1, "end": 3204.28, "word": " or", "probability": 0.9052734375}, {"start": 3204.28, "end": 3204.42, "word": " not.", "probability": 0.9443359375}, {"start": 3204.54, "end": 3204.78, "word": " Just", "probability": 0.85009765625}, {"start": 3204.78, "end": 3205.16, "word": " compute", "probability": 0.89697265625}, {"start": 3205.16, "end": 3205.62, "word": " lower", "probability": 0.771484375}, {"start": 3205.62, "end": 3206.02, "word": " limit", "probability": 0.943359375}, {"start": 3206.02, "end": 3206.38, "word": " and", "probability": 0.939453125}, {"start": 3206.38, "end": 3206.74, "word": " upper", "probability": 0.826171875}, {"start": 3206.74, "end": 3207.1, "word": " limit.", "probability": 0.95947265625}], "temperature": 1.0}, {"id": 118, "seek": 323768, "start": 3209.78, "end": 3237.68, "text": " So lower limit, Q1 minus 1.5IQ3. Upper limit, Q3 plus 1.5. This is a constant. Now let's go back to the previous example, which was, which Q1 was, what's the value of Q1? Q1 was 2. Q3 is 5.", "tokens": [407, 3126, 4948, 11, 1249, 16, 3175, 502, 13, 20, 40, 48, 18, 13, 36926, 4948, 11, 1249, 18, 1804, 502, 13, 20, 13, 639, 307, 257, 5754, 13, 823, 718, 311, 352, 646, 281, 264, 3894, 1365, 11, 597, 390, 11, 597, 1249, 16, 390, 11, 437, 311, 264, 2158, 295, 1249, 16, 30, 1249, 16, 390, 568, 13, 1249, 18, 307, 1025, 13], "avg_logprob": -0.2402935673793157, "compression_ratio": 1.3380281690140845, "no_speech_prob": 0.0, "words": [{"start": 3209.78, "end": 3210.04, "word": " So", "probability": 0.7666015625}, {"start": 3210.04, "end": 3210.28, "word": " lower", "probability": 0.6044921875}, {"start": 3210.28, "end": 3210.6, "word": " limit,", "probability": 0.95068359375}, {"start": 3210.78, "end": 3211.14, "word": " Q1", "probability": 0.86279296875}, {"start": 3211.14, "end": 3211.54, "word": " minus", "probability": 0.8427734375}, {"start": 3211.54, "end": 3211.88, "word": " 1", "probability": 0.96044921875}, {"start": 3211.88, "end": 3213.14, "word": ".5IQ3.", "probability": 0.821044921875}, {"start": 3214.18, "end": 3214.56, "word": " Upper", "probability": 0.8974609375}, {"start": 3214.56, "end": 3214.92, "word": " limit,", "probability": 0.96484375}, {"start": 3215.14, "end": 3215.58, "word": " Q3", "probability": 0.985107421875}, {"start": 3215.58, "end": 3215.92, "word": " plus", "probability": 0.94482421875}, {"start": 3215.92, "end": 3216.24, "word": " 1", "probability": 0.96484375}, {"start": 3216.24, "end": 3217.8, "word": ".5.", "probability": 0.87255859375}, {"start": 3217.94, "end": 3218.22, "word": " This", "probability": 0.60986328125}, {"start": 3218.22, "end": 3218.32, "word": " is", "probability": 0.91015625}, {"start": 3218.32, "end": 3218.4, "word": " a", "probability": 0.284912109375}, {"start": 3218.4, "end": 3218.62, "word": " constant.", "probability": 0.904296875}, {"start": 3223.2, "end": 3223.9, "word": " Now", "probability": 0.94921875}, {"start": 3223.9, "end": 3224.14, "word": " let's", "probability": 0.841552734375}, {"start": 3224.14, "end": 3224.28, "word": " go", "probability": 0.9658203125}, {"start": 3224.28, "end": 3224.52, "word": " back", "probability": 0.87158203125}, {"start": 3224.52, "end": 3224.66, "word": " to", "probability": 0.96533203125}, {"start": 3224.66, "end": 3224.78, "word": " the", "probability": 0.919921875}, {"start": 3224.78, "end": 3224.98, "word": " previous", "probability": 0.80322265625}, {"start": 3224.98, "end": 3225.54, "word": " example,", "probability": 0.9736328125}, {"start": 3226.64, "end": 3227.04, "word": " which", "probability": 0.94921875}, {"start": 3227.04, "end": 3227.44, "word": " was,", "probability": 0.4541015625}, {"start": 3228.22, "end": 3228.58, "word": " which", "probability": 0.912109375}, {"start": 3228.58, "end": 3229.04, "word": " Q1", "probability": 0.989013671875}, {"start": 3229.04, "end": 3229.42, "word": " was,", "probability": 0.9501953125}, {"start": 3229.64, "end": 3229.96, "word": " what's", "probability": 0.756103515625}, {"start": 3229.96, "end": 3230.04, "word": " the", "probability": 0.919921875}, {"start": 3230.04, "end": 3230.16, "word": " value", "probability": 0.98193359375}, {"start": 3230.16, "end": 3230.34, "word": " of", "probability": 0.953125}, {"start": 3230.34, "end": 3230.72, "word": " Q1?", "probability": 0.995849609375}, {"start": 3232.72, "end": 3233.42, "word": " Q1", "probability": 0.990234375}, {"start": 3233.42, "end": 3233.8, "word": " was", "probability": 0.9453125}, {"start": 3233.8, "end": 3234.14, "word": " 2.", "probability": 0.69580078125}, {"start": 3236.42, "end": 3237.12, "word": " Q3", "probability": 0.78369140625}, {"start": 3237.12, "end": 3237.32, "word": " is", "probability": 0.3203125}, {"start": 3237.32, "end": 3237.68, "word": " 5.", "probability": 0.35205078125}], "temperature": 1.0}, {"id": 119, "seek": 326795, "start": 3240.65, "end": 3267.95, "text": " In order to turn an outlier, you don't need the value, the median. Now, Q3 is 5, Q1 is 2, so IQR is 3. That's the value of IQR. Now, lower limit, A times 2 minus 1.5 times IQR3.", "tokens": [682, 1668, 281, 1261, 364, 484, 2753, 11, 291, 500, 380, 643, 264, 2158, 11, 264, 26779, 13, 823, 11, 1249, 18, 307, 1025, 11, 1249, 16, 307, 568, 11, 370, 28921, 49, 307, 805, 13, 663, 311, 264, 2158, 295, 28921, 49, 13, 823, 11, 3126, 4948, 11, 316, 1413, 568, 3175, 502, 13, 20, 1413, 28921, 49, 18, 13], "avg_logprob": -0.3026713656802331, "compression_ratio": 1.3185185185185184, "no_speech_prob": 0.0, "words": [{"start": 3240.6500000000005, "end": 3241.3700000000003, "word": " In", "probability": 0.449462890625}, {"start": 3241.3700000000003, "end": 3242.09, "word": " order", "probability": 0.908203125}, {"start": 3242.09, "end": 3242.33, "word": " to", "probability": 0.96240234375}, {"start": 3242.33, "end": 3242.59, "word": " turn", "probability": 0.591796875}, {"start": 3242.59, "end": 3243.37, "word": " an", "probability": 0.1346435546875}, {"start": 3243.37, "end": 3243.73, "word": " outlier,", "probability": 0.7783203125}, {"start": 3243.93, "end": 3243.95, "word": " you", "probability": 0.93505859375}, {"start": 3243.95, "end": 3244.13, "word": " don't", "probability": 0.967529296875}, {"start": 3244.13, "end": 3244.51, "word": " need", "probability": 0.9267578125}, {"start": 3244.51, "end": 3245.23, "word": " the", "probability": 0.8271484375}, {"start": 3245.23, "end": 3245.49, "word": " value,", "probability": 0.892578125}, {"start": 3245.89, "end": 3246.31, "word": " the", "probability": 0.77392578125}, {"start": 3246.31, "end": 3246.57, "word": " median.", "probability": 0.955078125}, {"start": 3247.11, "end": 3247.65, "word": " Now,", "probability": 0.50048828125}, {"start": 3248.49, "end": 3249.07, "word": " Q3", "probability": 0.66943359375}, {"start": 3249.07, "end": 3249.19, "word": " is", "probability": 0.1859130859375}, {"start": 3249.19, "end": 3249.47, "word": " 5,", "probability": 0.66796875}, {"start": 3249.59, "end": 3249.91, "word": " Q1", "probability": 0.9736328125}, {"start": 3249.91, "end": 3250.07, "word": " is", "probability": 0.9482421875}, {"start": 3250.07, "end": 3250.27, "word": " 2,", "probability": 0.9619140625}, {"start": 3250.37, "end": 3250.49, "word": " so", "probability": 0.9326171875}, {"start": 3250.49, "end": 3251.15, "word": " IQR", "probability": 0.89501953125}, {"start": 3251.15, "end": 3254.13, "word": " is", "probability": 0.80078125}, {"start": 3254.13, "end": 3254.47, "word": " 3.", "probability": 0.87841796875}, {"start": 3255.27, "end": 3255.65, "word": " That's", "probability": 0.814208984375}, {"start": 3255.65, "end": 3255.77, "word": " the", "probability": 0.91650390625}, {"start": 3255.77, "end": 3256.03, "word": " value", "probability": 0.97802734375}, {"start": 3256.03, "end": 3256.41, "word": " of", "probability": 0.962890625}, {"start": 3256.41, "end": 3257.43, "word": " IQR.", "probability": 0.708740234375}, {"start": 3257.63, "end": 3258.13, "word": " Now,", "probability": 0.8974609375}, {"start": 3258.25, "end": 3258.45, "word": " lower", "probability": 0.69189453125}, {"start": 3258.45, "end": 3258.91, "word": " limit,", "probability": 0.9755859375}, {"start": 3260.83, "end": 3261.05, "word": " A", "probability": 0.61376953125}, {"start": 3261.05, "end": 3261.39, "word": " times", "probability": 0.1827392578125}, {"start": 3261.39, "end": 3263.35, "word": " 2", "probability": 0.85107421875}, {"start": 3263.35, "end": 3265.15, "word": " minus", "probability": 0.90576171875}, {"start": 3265.15, "end": 3265.43, "word": " 1", "probability": 0.97705078125}, {"start": 3265.43, "end": 3266.03, "word": ".5", "probability": 0.986328125}, {"start": 3266.03, "end": 3266.53, "word": " times", "probability": 0.904296875}, {"start": 3266.53, "end": 3267.95, "word": " IQR3.", "probability": 0.7833658854166666}], "temperature": 1.0}, {"id": 120, "seek": 329769, "start": 3269.11, "end": 3297.69, "text": " So that's minus 2.5. U3 plus U3 is 3. It's 5, sorry. It's 5 plus 1.5. That gives 9.5. Now, any point or any data value, any data value falls below minus 2.5. I mean smaller than minus 2.5.", "tokens": [407, 300, 311, 3175, 568, 13, 20, 13, 624, 18, 1804, 624, 18, 307, 805, 13, 467, 311, 1025, 11, 2597, 13, 467, 311, 1025, 1804, 502, 13, 20, 13, 663, 2709, 1722, 13, 20, 13, 823, 11, 604, 935, 420, 604, 1412, 2158, 11, 604, 1412, 2158, 8804, 2507, 3175, 568, 13, 20, 13, 286, 914, 4356, 813, 3175, 568, 13, 20, 13], "avg_logprob": -0.18485576923076924, "compression_ratio": 1.4427480916030535, "no_speech_prob": 0.0, "words": [{"start": 3269.11, "end": 3269.37, "word": " So", "probability": 0.166748046875}, {"start": 3269.37, "end": 3269.63, "word": " that's", "probability": 0.920654296875}, {"start": 3269.63, "end": 3270.11, "word": " minus", "probability": 0.9052734375}, {"start": 3270.11, "end": 3271.35, "word": " 2", "probability": 0.93115234375}, {"start": 3271.35, "end": 3271.83, "word": ".5.", "probability": 0.98486328125}, {"start": 3273.55, "end": 3274.31, "word": " U3", "probability": 0.722900390625}, {"start": 3274.31, "end": 3274.81, "word": " plus", "probability": 0.9267578125}, {"start": 3274.81, "end": 3275.75, "word": " U3", "probability": 0.70166015625}, {"start": 3275.75, "end": 3275.91, "word": " is", "probability": 0.84716796875}, {"start": 3275.91, "end": 3276.23, "word": " 3.", "probability": 0.701171875}, {"start": 3276.41, "end": 3276.77, "word": " It's", "probability": 0.830078125}, {"start": 3276.77, "end": 3276.97, "word": " 5,", "probability": 0.9521484375}, {"start": 3277.09, "end": 3277.27, "word": " sorry.", "probability": 0.85888671875}, {"start": 3277.97, "end": 3278.39, "word": " It's", "probability": 0.930908203125}, {"start": 3278.39, "end": 3278.67, "word": " 5", "probability": 0.9833984375}, {"start": 3278.67, "end": 3279.09, "word": " plus", "probability": 0.94189453125}, {"start": 3279.09, "end": 3280.43, "word": " 1", "probability": 0.2197265625}, {"start": 3280.43, "end": 3281.17, "word": ".5.", "probability": 0.949462890625}, {"start": 3281.65, "end": 3282.05, "word": " That", "probability": 0.880859375}, {"start": 3282.05, "end": 3282.41, "word": " gives", "probability": 0.8779296875}, {"start": 3282.41, "end": 3284.25, "word": " 9", "probability": 0.9873046875}, {"start": 3284.25, "end": 3284.73, "word": ".5.", "probability": 0.995361328125}, {"start": 3285.31, "end": 3285.63, "word": " Now,", "probability": 0.94580078125}, {"start": 3286.79, "end": 3287.13, "word": " any", "probability": 0.89208984375}, {"start": 3287.13, "end": 3287.47, "word": " point", "probability": 0.9716796875}, {"start": 3287.47, "end": 3287.71, "word": " or", "probability": 0.7939453125}, {"start": 3287.71, "end": 3287.95, "word": " any", "probability": 0.9228515625}, {"start": 3287.95, "end": 3288.21, "word": " data", "probability": 0.9423828125}, {"start": 3288.21, "end": 3288.57, "word": " value,", "probability": 0.89892578125}, {"start": 3289.45, "end": 3291.09, "word": " any", "probability": 0.79150390625}, {"start": 3291.09, "end": 3291.39, "word": " data", "probability": 0.9384765625}, {"start": 3291.39, "end": 3291.81, "word": " value", "probability": 0.96630859375}, {"start": 3291.81, "end": 3293.97, "word": " falls", "probability": 0.6630859375}, {"start": 3293.97, "end": 3294.59, "word": " below", "probability": 0.91796875}, {"start": 3294.59, "end": 3295.01, "word": " minus", "probability": 0.96484375}, {"start": 3295.01, "end": 3295.23, "word": " 2", "probability": 0.98291015625}, {"start": 3295.23, "end": 3295.63, "word": ".5.", "probability": 0.9990234375}, {"start": 3295.71, "end": 3295.81, "word": " I", "probability": 0.96630859375}, {"start": 3295.81, "end": 3295.95, "word": " mean", "probability": 0.9619140625}, {"start": 3295.95, "end": 3296.39, "word": " smaller", "probability": 0.39208984375}, {"start": 3296.39, "end": 3296.71, "word": " than", "probability": 0.93603515625}, {"start": 3296.71, "end": 3297.03, "word": " minus", "probability": 0.98193359375}, {"start": 3297.03, "end": 3297.23, "word": " 2", "probability": 0.99853515625}, {"start": 3297.23, "end": 3297.69, "word": ".5.", "probability": 0.999755859375}], "temperature": 1.0}, {"id": 121, "seek": 332526, "start": 3298.3, "end": 3325.26, "text": " Or greater than 9.5 is an outlier. If you look at the data you have, we have 0 up to 9. So none of these is considered to be an outlier. But what's about 27? 27 is greater than, much bigger than actually 9.5. So for that data, 27 is an outlier.", "tokens": [1610, 5044, 813, 1722, 13, 20, 307, 364, 484, 2753, 13, 759, 291, 574, 412, 264, 1412, 291, 362, 11, 321, 362, 1958, 493, 281, 1722, 13, 407, 6022, 295, 613, 307, 4888, 281, 312, 364, 484, 2753, 13, 583, 437, 311, 466, 7634, 30, 7634, 307, 5044, 813, 11, 709, 3801, 813, 767, 1722, 13, 20, 13, 407, 337, 300, 1412, 11, 7634, 307, 364, 484, 2753, 13], "avg_logprob": -0.14352679188762393, "compression_ratio": 1.53125, "no_speech_prob": 0.0, "words": [{"start": 3298.3, "end": 3298.58, "word": " Or", "probability": 0.39697265625}, {"start": 3298.58, "end": 3298.96, "word": " greater", "probability": 0.771484375}, {"start": 3298.96, "end": 3299.28, "word": " than", "probability": 0.93994140625}, {"start": 3299.28, "end": 3299.5, "word": " 9", "probability": 0.9365234375}, {"start": 3299.5, "end": 3299.98, "word": ".5", "probability": 0.98974609375}, {"start": 3299.98, "end": 3300.22, "word": " is", "probability": 0.8662109375}, {"start": 3300.22, "end": 3300.38, "word": " an", "probability": 0.63427734375}, {"start": 3300.38, "end": 3300.76, "word": " outlier.", "probability": 0.794189453125}, {"start": 3302.32, "end": 3302.96, "word": " If", "probability": 0.9033203125}, {"start": 3302.96, "end": 3303.1, "word": " you", "probability": 0.96240234375}, {"start": 3303.1, "end": 3303.36, "word": " look", "probability": 0.96240234375}, {"start": 3303.36, "end": 3303.8, "word": " at", "probability": 0.9619140625}, {"start": 3303.8, "end": 3304.02, "word": " the", "probability": 0.916015625}, {"start": 3304.02, "end": 3304.34, "word": " data", "probability": 0.9404296875}, {"start": 3304.34, "end": 3304.52, "word": " you", "probability": 0.9228515625}, {"start": 3304.52, "end": 3304.84, "word": " have,", "probability": 0.94482421875}, {"start": 3305.06, "end": 3305.24, "word": " we", "probability": 0.8984375}, {"start": 3305.24, "end": 3305.42, "word": " have", "probability": 0.94580078125}, {"start": 3305.42, "end": 3305.76, "word": " 0", "probability": 0.46142578125}, {"start": 3305.76, "end": 3306.0, "word": " up", "probability": 0.9462890625}, {"start": 3306.0, "end": 3306.1, "word": " to", "probability": 0.96435546875}, {"start": 3306.1, "end": 3306.42, "word": " 9.", "probability": 0.97607421875}, {"start": 3306.84, "end": 3307.24, "word": " So", "probability": 0.94921875}, {"start": 3307.24, "end": 3307.54, "word": " none", "probability": 0.8447265625}, {"start": 3307.54, "end": 3307.7, "word": " of", "probability": 0.96875}, {"start": 3307.7, "end": 3308.16, "word": " these", "probability": 0.61376953125}, {"start": 3308.16, "end": 3308.72, "word": " is", "probability": 0.90771484375}, {"start": 3308.72, "end": 3309.1, "word": " considered", "probability": 0.83740234375}, {"start": 3309.1, "end": 3309.3, "word": " to", "probability": 0.9619140625}, {"start": 3309.3, "end": 3309.4, "word": " be", "probability": 0.95263671875}, {"start": 3309.4, "end": 3309.52, "word": " an", "probability": 0.96484375}, {"start": 3309.52, "end": 3309.9, "word": " outlier.", "probability": 0.93798828125}, {"start": 3310.44, "end": 3310.74, "word": " But", "probability": 0.88623046875}, {"start": 3310.74, "end": 3311.0, "word": " what's", "probability": 0.84814453125}, {"start": 3311.0, "end": 3311.18, "word": " about", "probability": 0.89599609375}, {"start": 3311.18, "end": 3311.6, "word": " 27?", "probability": 0.92578125}, {"start": 3312.94, "end": 3313.58, "word": " 27", "probability": 0.7265625}, {"start": 3313.58, "end": 3315.26, "word": " is", "probability": 0.91796875}, {"start": 3315.26, "end": 3315.66, "word": " greater", "probability": 0.88134765625}, {"start": 3315.66, "end": 3316.2, "word": " than,", "probability": 0.94580078125}, {"start": 3316.26, "end": 3318.84, "word": " much", "probability": 0.8896484375}, {"start": 3318.84, "end": 3319.34, "word": " bigger", "probability": 0.8857421875}, {"start": 3319.34, "end": 3319.72, "word": " than", "probability": 0.9375}, {"start": 3319.72, "end": 3320.4, "word": " actually", "probability": 0.646484375}, {"start": 3320.4, "end": 3320.86, "word": " 9", "probability": 0.970703125}, {"start": 3320.86, "end": 3321.46, "word": ".5.", "probability": 0.99853515625}, {"start": 3321.8, "end": 3322.22, "word": " So", "probability": 0.95849609375}, {"start": 3322.22, "end": 3322.5, "word": " for", "probability": 0.8955078125}, {"start": 3322.5, "end": 3322.74, "word": " that", "probability": 0.93505859375}, {"start": 3322.74, "end": 3323.16, "word": " data,", "probability": 0.931640625}, {"start": 3324.02, "end": 3324.34, "word": " 27", "probability": 0.9765625}, {"start": 3324.34, "end": 3324.78, "word": " is", "probability": 0.94384765625}, {"start": 3324.78, "end": 3324.94, "word": " an", "probability": 0.96337890625}, {"start": 3324.94, "end": 3325.26, "word": " outlier.", "probability": 0.94775390625}], "temperature": 1.0}, {"id": 122, "seek": 335584, "start": 3326.38, "end": 3355.84, "text": " So this is the way how can we compute the outlier for the sample. Another method. The score is another method to determine if that point is an outlier or not. So, so far we have two rules. One by using quartiles and the other, as we mentioned last time, by using the score. And for these scores, if you remember, any values below lie", "tokens": [407, 341, 307, 264, 636, 577, 393, 321, 14722, 264, 484, 2753, 337, 264, 6889, 13, 3996, 3170, 13, 440, 6175, 307, 1071, 3170, 281, 6997, 498, 300, 935, 307, 364, 484, 2753, 420, 406, 13, 407, 11, 370, 1400, 321, 362, 732, 4474, 13, 1485, 538, 1228, 20837, 4680, 293, 264, 661, 11, 382, 321, 2835, 1036, 565, 11, 538, 1228, 264, 6175, 13, 400, 337, 613, 13444, 11, 498, 291, 1604, 11, 604, 4190, 2507, 4544], "avg_logprob": -0.21617879331866396, "compression_ratio": 1.6292682926829267, "no_speech_prob": 0.0, "words": [{"start": 3326.38, "end": 3326.74, "word": " So", "probability": 0.80615234375}, {"start": 3326.74, "end": 3327.12, "word": " this", "probability": 0.76806640625}, {"start": 3327.12, "end": 3327.24, "word": " is", "probability": 0.93798828125}, {"start": 3327.24, "end": 3327.36, "word": " the", "probability": 0.85595703125}, {"start": 3327.36, "end": 3327.48, "word": " way", "probability": 0.962890625}, {"start": 3327.48, "end": 3327.6, "word": " how", "probability": 0.6982421875}, {"start": 3327.6, "end": 3327.78, "word": " can", "probability": 0.7939453125}, {"start": 3327.78, "end": 3327.92, "word": " we", "probability": 0.92724609375}, {"start": 3327.92, "end": 3328.42, "word": " compute", "probability": 0.8779296875}, {"start": 3328.42, "end": 3329.94, "word": " the", "probability": 0.8564453125}, {"start": 3329.94, "end": 3330.48, "word": " outlier", "probability": 0.71533203125}, {"start": 3330.48, "end": 3333.26, "word": " for", "probability": 0.455810546875}, {"start": 3333.26, "end": 3333.48, "word": " the", "probability": 0.68359375}, {"start": 3333.48, "end": 3333.74, "word": " sample.", "probability": 0.8046875}, {"start": 3335.84, "end": 3336.12, "word": " Another", "probability": 0.58349609375}, {"start": 3336.12, "end": 3336.5, "word": " method.", "probability": 0.931640625}, {"start": 3336.88, "end": 3337.0, "word": " The", "probability": 0.64990234375}, {"start": 3337.0, "end": 3337.3, "word": " score", "probability": 0.62158203125}, {"start": 3337.3, "end": 3337.52, "word": " is", "probability": 0.9423828125}, {"start": 3337.52, "end": 3337.82, "word": " another", "probability": 0.9150390625}, {"start": 3337.82, "end": 3338.14, "word": " method", "probability": 0.94287109375}, {"start": 3338.14, "end": 3338.58, "word": " to", "probability": 0.96435546875}, {"start": 3338.58, "end": 3339.62, "word": " determine", "probability": 0.9091796875}, {"start": 3339.62, "end": 3339.92, "word": " if", "probability": 0.685546875}, {"start": 3339.92, "end": 3340.1, "word": " that", "probability": 0.91259765625}, {"start": 3340.1, "end": 3340.34, "word": " point", "probability": 0.9580078125}, {"start": 3340.34, "end": 3340.5, "word": " is", "probability": 0.935546875}, {"start": 3340.5, "end": 3340.62, "word": " an", "probability": 0.82763671875}, {"start": 3340.62, "end": 3340.9, "word": " outlier", "probability": 0.96240234375}, {"start": 3340.9, "end": 3341.06, "word": " or", "probability": 0.95654296875}, {"start": 3341.06, "end": 3341.24, "word": " not.", "probability": 0.9462890625}, {"start": 3342.62, "end": 3343.04, "word": " So,", "probability": 0.96240234375}, {"start": 3343.1, "end": 3343.26, "word": " so", "probability": 0.9462890625}, {"start": 3343.26, "end": 3343.44, "word": " far", "probability": 0.9501953125}, {"start": 3343.44, "end": 3343.6, "word": " we", "probability": 0.6728515625}, {"start": 3343.6, "end": 3343.8, "word": " have", "probability": 0.94384765625}, {"start": 3343.8, "end": 3344.02, "word": " two", "probability": 0.8837890625}, {"start": 3344.02, "end": 3344.4, "word": " rules.", "probability": 0.81787109375}, {"start": 3345.16, "end": 3345.34, "word": " One", "probability": 0.923828125}, {"start": 3345.34, "end": 3345.54, "word": " by", "probability": 0.86669921875}, {"start": 3345.54, "end": 3345.98, "word": " using", "probability": 0.93994140625}, {"start": 3345.98, "end": 3347.1, "word": " quartiles", "probability": 0.938232421875}, {"start": 3347.1, "end": 3348.16, "word": " and", "probability": 0.525390625}, {"start": 3348.16, "end": 3348.3, "word": " the", "probability": 0.59814453125}, {"start": 3348.3, "end": 3348.56, "word": " other,", "probability": 0.89013671875}, {"start": 3348.8, "end": 3348.9, "word": " as", "probability": 0.958984375}, {"start": 3348.9, "end": 3349.04, "word": " we", "probability": 0.9404296875}, {"start": 3349.04, "end": 3349.24, "word": " mentioned", "probability": 0.837890625}, {"start": 3349.24, "end": 3349.54, "word": " last", "probability": 0.830078125}, {"start": 3349.54, "end": 3349.82, "word": " time,", "probability": 0.8916015625}, {"start": 3349.84, "end": 3350.02, "word": " by", "probability": 0.958984375}, {"start": 3350.02, "end": 3350.36, "word": " using", "probability": 0.93310546875}, {"start": 3350.36, "end": 3350.54, "word": " the", "probability": 0.498779296875}, {"start": 3350.54, "end": 3350.86, "word": " score.", "probability": 0.6953125}, {"start": 3351.72, "end": 3351.92, "word": " And", "probability": 0.79833984375}, {"start": 3351.92, "end": 3352.12, "word": " for", "probability": 0.91650390625}, {"start": 3352.12, "end": 3352.34, "word": " these", "probability": 0.4755859375}, {"start": 3352.34, "end": 3352.62, "word": " scores,", "probability": 0.78125}, {"start": 3352.74, "end": 3352.84, "word": " if", "probability": 0.8818359375}, {"start": 3352.84, "end": 3352.92, "word": " you", "probability": 0.9541015625}, {"start": 3352.92, "end": 3353.46, "word": " remember,", "probability": 0.86279296875}, {"start": 3353.96, "end": 3354.2, "word": " any", "probability": 0.91015625}, {"start": 3354.2, "end": 3354.74, "word": " values", "probability": 0.95263671875}, {"start": 3354.74, "end": 3355.32, "word": " below", "probability": 0.64404296875}, {"start": 3355.32, "end": 3355.84, "word": " lie", "probability": 0.2548828125}], "temperature": 1.0}, {"id": 123, "seek": 338019, "start": 3356.69, "end": 3380.19, "text": " Below minus three. And above three is considered to be irrelevant. That's another example. That's another way to figure out if the data is irrelevant. You can apply the two rules either for the sample or the population. If you have the entire data,", "tokens": [36261, 3175, 1045, 13, 400, 3673, 1045, 307, 4888, 281, 312, 28682, 13, 663, 311, 1071, 1365, 13, 663, 311, 1071, 636, 281, 2573, 484, 498, 264, 1412, 307, 28682, 13, 509, 393, 3079, 264, 732, 4474, 2139, 337, 264, 6889, 420, 264, 4415, 13, 759, 291, 362, 264, 2302, 1412, 11], "avg_logprob": -0.276975247095216, "compression_ratio": 1.55625, "no_speech_prob": 0.0, "words": [{"start": 3356.69, "end": 3357.37, "word": " Below", "probability": 0.3935546875}, {"start": 3357.37, "end": 3357.75, "word": " minus", "probability": 0.92333984375}, {"start": 3357.75, "end": 3358.01, "word": " three.", "probability": 0.78125}, {"start": 3359.15, "end": 3359.57, "word": " And", "probability": 0.9140625}, {"start": 3359.57, "end": 3360.03, "word": " above", "probability": 0.9541015625}, {"start": 3360.03, "end": 3360.53, "word": " three", "probability": 0.73046875}, {"start": 3360.53, "end": 3362.21, "word": " is", "probability": 0.4697265625}, {"start": 3362.21, "end": 3362.57, "word": " considered", "probability": 0.578125}, {"start": 3362.57, "end": 3362.71, "word": " to", "probability": 0.87451171875}, {"start": 3362.71, "end": 3362.79, "word": " be", "probability": 0.93701171875}, {"start": 3362.79, "end": 3362.99, "word": " irrelevant.", "probability": 0.236572265625}, {"start": 3363.11, "end": 3363.43, "word": " That's", "probability": 0.8896484375}, {"start": 3363.43, "end": 3363.69, "word": " another", "probability": 0.93212890625}, {"start": 3363.69, "end": 3364.11, "word": " example.", "probability": 0.8974609375}, {"start": 3364.65, "end": 3364.93, "word": " That's", "probability": 0.950927734375}, {"start": 3364.93, "end": 3365.21, "word": " another", "probability": 0.91796875}, {"start": 3365.21, "end": 3365.57, "word": " way", "probability": 0.953125}, {"start": 3365.57, "end": 3367.03, "word": " to", "probability": 0.93603515625}, {"start": 3367.03, "end": 3367.57, "word": " figure", "probability": 0.9794921875}, {"start": 3367.57, "end": 3367.95, "word": " out", "probability": 0.8603515625}, {"start": 3367.95, "end": 3368.39, "word": " if", "probability": 0.81591796875}, {"start": 3368.39, "end": 3368.55, "word": " the", "probability": 0.38330078125}, {"start": 3368.55, "end": 3368.67, "word": " data", "probability": 0.853515625}, {"start": 3368.67, "end": 3368.87, "word": " is", "probability": 0.62841796875}, {"start": 3368.87, "end": 3369.19, "word": " irrelevant.", "probability": 0.1815185546875}, {"start": 3373.73, "end": 3374.41, "word": " You", "probability": 0.61572265625}, {"start": 3374.41, "end": 3374.71, "word": " can", "probability": 0.63330078125}, {"start": 3374.71, "end": 3375.13, "word": " apply", "probability": 0.9462890625}, {"start": 3375.13, "end": 3375.37, "word": " the", "probability": 0.9111328125}, {"start": 3375.37, "end": 3375.59, "word": " two", "probability": 0.8779296875}, {"start": 3375.59, "end": 3375.97, "word": " rules", "probability": 0.80029296875}, {"start": 3375.97, "end": 3376.37, "word": " either", "probability": 0.80859375}, {"start": 3376.37, "end": 3376.65, "word": " for", "probability": 0.9365234375}, {"start": 3376.65, "end": 3376.83, "word": " the", "probability": 0.85986328125}, {"start": 3376.83, "end": 3377.11, "word": " sample", "probability": 0.6181640625}, {"start": 3377.11, "end": 3377.61, "word": " or", "probability": 0.5205078125}, {"start": 3377.61, "end": 3378.19, "word": " the", "probability": 0.7763671875}, {"start": 3378.19, "end": 3378.57, "word": " population.", "probability": 0.861328125}, {"start": 3378.77, "end": 3378.91, "word": " If", "probability": 0.9619140625}, {"start": 3378.91, "end": 3379.03, "word": " you", "probability": 0.95947265625}, {"start": 3379.03, "end": 3379.19, "word": " have", "probability": 0.912109375}, {"start": 3379.19, "end": 3379.31, "word": " the", "probability": 0.89111328125}, {"start": 3379.31, "end": 3379.69, "word": " entire", "probability": 0.90673828125}, {"start": 3379.69, "end": 3380.19, "word": " data,", "probability": 0.9345703125}], "temperature": 1.0}, {"id": 124, "seek": 340129, "start": 3380.89, "end": 3401.29, "text": " you can also determine out there for the entire dataset, even if that data is the population. But most of the time, we select a sample, which is a subset or a portion of that population. Questions?", "tokens": [291, 393, 611, 6997, 484, 456, 337, 264, 2302, 28872, 11, 754, 498, 300, 1412, 307, 264, 4415, 13, 583, 881, 295, 264, 565, 11, 321, 3048, 257, 6889, 11, 597, 307, 257, 25993, 420, 257, 8044, 295, 300, 4415, 13, 27738, 30], "avg_logprob": -0.2059659053656188, "compression_ratio": 1.4666666666666666, "no_speech_prob": 0.0, "words": [{"start": 3380.89, "end": 3381.21, "word": " you", "probability": 0.42041015625}, {"start": 3381.21, "end": 3381.59, "word": " can", "probability": 0.93798828125}, {"start": 3381.59, "end": 3382.15, "word": " also", "probability": 0.84033203125}, {"start": 3382.15, "end": 3382.73, "word": " determine", "probability": 0.8798828125}, {"start": 3382.73, "end": 3383.07, "word": " out", "probability": 0.175537109375}, {"start": 3383.07, "end": 3383.29, "word": " there", "probability": 0.921875}, {"start": 3383.29, "end": 3383.49, "word": " for", "probability": 0.8720703125}, {"start": 3383.49, "end": 3383.63, "word": " the", "probability": 0.90478515625}, {"start": 3383.63, "end": 3383.95, "word": " entire", "probability": 0.92529296875}, {"start": 3383.95, "end": 3384.31, "word": " dataset,", "probability": 0.5341796875}, {"start": 3385.01, "end": 3385.29, "word": " even", "probability": 0.87890625}, {"start": 3385.29, "end": 3385.55, "word": " if", "probability": 0.9443359375}, {"start": 3385.55, "end": 3385.71, "word": " that", "probability": 0.74462890625}, {"start": 3385.71, "end": 3386.05, "word": " data", "probability": 0.9287109375}, {"start": 3386.05, "end": 3386.93, "word": " is", "probability": 0.94189453125}, {"start": 3386.93, "end": 3387.37, "word": " the", "probability": 0.89404296875}, {"start": 3387.37, "end": 3387.89, "word": " population.", "probability": 0.94091796875}, {"start": 3388.89, "end": 3389.11, "word": " But", "probability": 0.88818359375}, {"start": 3389.11, "end": 3389.75, "word": " most", "probability": 0.83447265625}, {"start": 3389.75, "end": 3389.89, "word": " of", "probability": 0.96826171875}, {"start": 3389.89, "end": 3390.01, "word": " the", "probability": 0.9189453125}, {"start": 3390.01, "end": 3390.31, "word": " time,", "probability": 0.890625}, {"start": 3391.17, "end": 3391.37, "word": " we", "probability": 0.94580078125}, {"start": 3391.37, "end": 3391.87, "word": " select", "probability": 0.84326171875}, {"start": 3391.87, "end": 3392.61, "word": " a", "probability": 0.98095703125}, {"start": 3392.61, "end": 3392.95, "word": " sample,", "probability": 0.89013671875}, {"start": 3393.59, "end": 3394.19, "word": " which", "probability": 0.94970703125}, {"start": 3394.19, "end": 3394.35, "word": " is", "probability": 0.94287109375}, {"start": 3394.35, "end": 3394.49, "word": " a", "probability": 0.98486328125}, {"start": 3394.49, "end": 3394.81, "word": " subset", "probability": 0.90966796875}, {"start": 3394.81, "end": 3395.19, "word": " or", "probability": 0.87255859375}, {"start": 3395.19, "end": 3395.63, "word": " a", "probability": 0.73095703125}, {"start": 3395.63, "end": 3395.99, "word": " portion", "probability": 0.87890625}, {"start": 3395.99, "end": 3396.21, "word": " of", "probability": 0.96826171875}, {"start": 3396.21, "end": 3396.51, "word": " that", "probability": 0.9091796875}, {"start": 3396.51, "end": 3397.79, "word": " population.", "probability": 0.931640625}, {"start": 3400.57, "end": 3401.29, "word": " Questions?", "probability": 0.6611328125}], "temperature": 1.0}, {"id": 125, "seek": 342556, "start": 3413.36, "end": 3425.56, "text": " And locating outliers. So again, outlier is any value that is above the upper limit or below the lower limit.", "tokens": [400, 1628, 990, 484, 23646, 13, 407, 797, 11, 484, 2753, 307, 604, 2158, 300, 307, 3673, 264, 6597, 4948, 420, 2507, 264, 3126, 4948, 13], "avg_logprob": -0.23379629850387573, "compression_ratio": 1.25, "no_speech_prob": 0.0, "words": [{"start": 3413.3599999999997, "end": 3414.24, "word": " And", "probability": 0.18603515625}, {"start": 3414.24, "end": 3415.12, "word": " locating", "probability": 0.6510009765625}, {"start": 3415.12, "end": 3415.68, "word": " outliers.", "probability": 0.759521484375}, {"start": 3417.06, "end": 3417.52, "word": " So", "probability": 0.87060546875}, {"start": 3417.52, "end": 3417.8, "word": " again,", "probability": 0.86865234375}, {"start": 3417.9, "end": 3418.38, "word": " outlier", "probability": 0.878662109375}, {"start": 3418.38, "end": 3419.74, "word": " is", "probability": 0.90966796875}, {"start": 3419.74, "end": 3420.0, "word": " any", "probability": 0.9052734375}, {"start": 3420.0, "end": 3420.28, "word": " value", "probability": 0.97705078125}, {"start": 3420.28, "end": 3420.6, "word": " that", "probability": 0.9345703125}, {"start": 3420.6, "end": 3420.84, "word": " is", "probability": 0.9404296875}, {"start": 3420.84, "end": 3421.28, "word": " above", "probability": 0.96826171875}, {"start": 3421.28, "end": 3422.22, "word": " the", "probability": 0.91552734375}, {"start": 3422.22, "end": 3422.46, "word": " upper", "probability": 0.8427734375}, {"start": 3422.46, "end": 3422.9, "word": " limit", "probability": 0.96923828125}, {"start": 3422.9, "end": 3424.04, "word": " or", "probability": 0.814453125}, {"start": 3424.04, "end": 3424.44, "word": " below", "probability": 0.90380859375}, {"start": 3424.44, "end": 3425.0, "word": " the", "probability": 0.9130859375}, {"start": 3425.0, "end": 3425.18, "word": " lower", "probability": 0.8369140625}, {"start": 3425.18, "end": 3425.56, "word": " limit.", "probability": 0.89501953125}], "temperature": 1.0}, {"id": 126, "seek": 344218, "start": 3426.18, "end": 3442.18, "text": " And also we can use this score also to determine if that point is outlier or not. Next time, Inshallah, we will go over the covariance and the relationship and I will give some practice problems for Chapter 3.", "tokens": [400, 611, 321, 393, 764, 341, 6175, 611, 281, 6997, 498, 300, 935, 307, 484, 2753, 420, 406, 13, 3087, 565, 11, 682, 2716, 13492, 11, 321, 486, 352, 670, 264, 49851, 719, 293, 264, 2480, 293, 286, 486, 976, 512, 3124, 2740, 337, 18874, 805, 13], "avg_logprob": -0.28694662141303223, "compression_ratio": 1.390728476821192, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 3426.18, "end": 3426.5, "word": " And", "probability": 0.50146484375}, {"start": 3426.5, "end": 3426.74, "word": " also", "probability": 0.7763671875}, {"start": 3426.74, "end": 3426.9, "word": " we", "probability": 0.5}, {"start": 3426.9, "end": 3427.06, "word": " can", "probability": 0.93017578125}, {"start": 3427.06, "end": 3427.32, "word": " use", "probability": 0.8681640625}, {"start": 3427.32, "end": 3427.6, "word": " this", "probability": 0.34521484375}, {"start": 3427.6, "end": 3427.98, "word": " score", "probability": 0.84375}, {"start": 3427.98, "end": 3428.34, "word": " also", "probability": 0.486083984375}, {"start": 3428.34, "end": 3428.56, "word": " to", "probability": 0.90234375}, {"start": 3428.56, "end": 3428.96, "word": " determine", "probability": 0.9130859375}, {"start": 3428.96, "end": 3429.2, "word": " if", "probability": 0.85107421875}, {"start": 3429.2, "end": 3429.38, "word": " that", "probability": 0.876953125}, {"start": 3429.38, "end": 3429.72, "word": " point", "probability": 0.9658203125}, {"start": 3429.72, "end": 3430.44, "word": " is", "probability": 0.93701171875}, {"start": 3430.44, "end": 3431.04, "word": " outlier", "probability": 0.5916748046875}, {"start": 3431.04, "end": 3431.62, "word": " or", "probability": 0.935546875}, {"start": 3431.62, "end": 3431.9, "word": " not.", "probability": 0.94580078125}, {"start": 3432.4, "end": 3432.68, "word": " Next", "probability": 0.90673828125}, {"start": 3432.68, "end": 3432.94, "word": " time,", "probability": 0.880859375}, {"start": 3433.0, "end": 3433.2, "word": " Inshallah,", "probability": 0.57763671875}, {"start": 3433.3, "end": 3433.42, "word": " we", "probability": 0.93896484375}, {"start": 3433.42, "end": 3433.74, "word": " will", "probability": 0.8583984375}, {"start": 3433.74, "end": 3435.18, "word": " go", "probability": 0.9501953125}, {"start": 3435.18, "end": 3435.48, "word": " over", "probability": 0.91552734375}, {"start": 3435.48, "end": 3435.74, "word": " the", "probability": 0.8955078125}, {"start": 3435.74, "end": 3436.34, "word": " covariance", "probability": 0.902099609375}, {"start": 3436.34, "end": 3436.58, "word": " and", "probability": 0.85107421875}, {"start": 3436.58, "end": 3436.66, "word": " the", "probability": 0.463623046875}, {"start": 3436.66, "end": 3437.08, "word": " relationship", "probability": 0.916015625}, {"start": 3437.08, "end": 3437.86, "word": " and", "probability": 0.51220703125}, {"start": 3437.86, "end": 3438.12, "word": " I", "probability": 0.9111328125}, {"start": 3438.12, "end": 3438.3, "word": " will", "probability": 0.89453125}, {"start": 3438.3, "end": 3438.52, "word": " give", "probability": 0.86962890625}, {"start": 3438.52, "end": 3438.86, "word": " some", "probability": 0.9013671875}, {"start": 3438.86, "end": 3439.42, "word": " practice", "probability": 0.9423828125}, {"start": 3439.42, "end": 3440.92, "word": " problems", "probability": 0.630859375}, {"start": 3440.92, "end": 3441.3, "word": " for", "probability": 0.939453125}, {"start": 3441.3, "end": 3441.66, "word": " Chapter", "probability": 0.47216796875}, {"start": 3441.66, "end": 3442.18, "word": " 3.", "probability": 0.720703125}], "temperature": 1.0}], "language": "en", "language_probability": 1.0, "duration": 3447.1415, "duration_after_vad": 3307.3899999999844} \ No newline at end of file diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/0UQx5fYO0DE_raw.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/0UQx5fYO0DE_raw.srt new file mode 100644 index 0000000000000000000000000000000000000000..d582f9bc969875b5a39b092afe6f25f0a263b2ec --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/0UQx5fYO0DE_raw.srt @@ -0,0 +1,2628 @@ +1 +00:00:11,850 --> 00:00:16,370 +Inshallah we'll start numerical descriptive majors + +2 +00:00:16,370 --> 00:00:22,270 +for the population. Last time we talked about the + +3 +00:00:22,270 --> 00:00:25,780 +same majors. I mean the same descriptive measures + +4 +00:00:25,780 --> 00:00:29,180 +for a sample. And we have already talked about the + +5 +00:00:29,180 --> 00:00:35,080 +mean, variance, and standard deviation. These are + +6 +00:00:35,080 --> 00:00:38,580 +called statistics because they are computed from + +7 +00:00:38,580 --> 00:00:43,140 +the sample. Here we'll see how can we do the same + +8 +00:00:44,300 --> 00:00:47,320 +measures but for a population, I mean for the + +9 +00:00:47,320 --> 00:00:53,020 +entire dataset. So descriptive statistics + +10 +00:00:53,020 --> 00:00:57,860 +described previously in the last two lectures was + +11 +00:00:57,860 --> 00:01:04,200 +for a sample. Here we'll just see how can we + +12 +00:01:04,200 --> 00:01:07,740 +compute these measures for the entire population. + +13 +00:01:08,480 --> 00:01:11,600 +In this case, the statistics we talked about + +14 +00:01:11,600 --> 00:01:17,200 +before are called And if you remember the first + +15 +00:01:17,200 --> 00:01:19,800 +lecture, we said there is a difference between + +16 +00:01:19,800 --> 00:01:24,300 +statistics and parameters. A statistic is a value + +17 +00:01:24,300 --> 00:01:27,520 +that computed from a sample, but parameter is a + +18 +00:01:27,520 --> 00:01:32,140 +value computed from population. So the important + +19 +00:01:32,140 --> 00:01:37,020 +population parameters are population mean, + +20 +00:01:37,660 --> 00:01:43,560 +variance, and standard deviation. Let's start with + +21 +00:01:43,560 --> 00:01:45,880 +the first one, the mean, or the population mean. + +22 +00:01:46,980 --> 00:01:50,720 +As the sample mean is defined by the sum of the + +23 +00:01:50,720 --> 00:01:55,120 +values divided by the sample size. But here, we + +24 +00:01:55,120 --> 00:01:57,880 +have to divide by the population size. So that's + +25 +00:01:57,880 --> 00:02:01,140 +the difference between sample mean and population + +26 +00:02:01,140 --> 00:02:08,950 +mean. For the sample mean, we use x bar. Here we + +27 +00:02:08,950 --> 00:02:14,790 +use Greek letter, mu. This is pronounced as mu. So + +28 +00:02:14,790 --> 00:02:18,790 +mu is the sum of the x values divided by the + +29 +00:02:18,790 --> 00:02:21,210 +population size, not the sample size. So it's + +30 +00:02:21,210 --> 00:02:24,570 +quite similar to the sample mean. So mu is the + +31 +00:02:24,570 --> 00:02:28,030 +population mean, n is the population size, and xi + +32 +00:02:28,030 --> 00:02:33,270 +is the it value of the variable x. Similarly, for + +33 +00:02:33,270 --> 00:02:37,310 +the other parameter, which is the variance, the + +34 +00:02:37,310 --> 00:02:41,520 +variance There is a little difference between the + +35 +00:02:41,520 --> 00:02:45,480 +sample and population variance. Here, we subtract + +36 +00:02:45,480 --> 00:02:49,700 +the population mean instead of the sample mean. So + +37 +00:02:49,700 --> 00:02:55,140 +sum of xi minus mu squared, then divide by this + +38 +00:02:55,140 --> 00:02:59,140 +population size, capital N, instead of N minus 1. + +39 +00:02:59,520 --> 00:03:02,260 +So that's the difference between sample and + +40 +00:03:02,260 --> 00:03:07,020 +population variance. So again, in the sample + +41 +00:03:07,020 --> 00:03:12,080 +variance, we subtracted x bar. Here, we subtract + +42 +00:03:12,080 --> 00:03:15,640 +the mean of the population, mu, then divide by + +43 +00:03:15,640 --> 00:03:20,200 +capital N instead of N minus 1. So the + +44 +00:03:20,200 --> 00:03:24,000 +computations for the sample and the population + +45 +00:03:24,000 --> 00:03:30,220 +mean or variance are quite similar. Finally, the + +46 +00:03:30,220 --> 00:03:35,390 +population standard deviation. is the same as the + +47 +00:03:35,390 --> 00:03:38,810 +sample population variance and here just take the + +48 +00:03:38,810 --> 00:03:43,170 +square root of the population variance and again + +49 +00:03:43,170 --> 00:03:47,170 +as we did as we explained before the standard + +50 +00:03:47,170 --> 00:03:51,550 +deviation has the same units as the original unit + +51 +00:03:51,550 --> 00:03:57,130 +so nothing is new we just extend the sample + +52 +00:03:57,130 --> 00:04:02,410 +statistic to the population parameter and again + +53 +00:04:04,030 --> 00:04:08,790 +The mean is denoted by mu, it's a Greek letter. + +54 +00:04:10,210 --> 00:04:12,790 +The population variance is denoted by sigma + +55 +00:04:12,790 --> 00:04:17,030 +squared. And finally, the population standard + +56 +00:04:17,030 --> 00:04:21,130 +deviation is denoted by sigma. So that's the + +57 +00:04:21,130 --> 00:04:24,250 +numerical descriptive measures either for a sample + +58 +00:04:24,250 --> 00:04:28,590 +or a population. So just summary for these + +59 +00:04:28,590 --> 00:04:33,330 +measures. The measures are mean variance, standard + +60 +00:04:33,330 --> 00:04:38,250 +deviation. Population parameters are mu for the + +61 +00:04:38,250 --> 00:04:43,830 +mean, sigma squared for variance, and sigma for + +62 +00:04:43,830 --> 00:04:46,710 +standard deviation. On the other hand, for the + +63 +00:04:46,710 --> 00:04:51,430 +sample statistics, we have x bar for sample mean, + +64 +00:04:52,110 --> 00:04:56,750 +s squared for the sample variance, and s is the + +65 +00:04:56,750 --> 00:05:00,410 +sample standard deviation. That's sample + +66 +00:05:00,410 --> 00:05:05,360 +statistics against population parameters. Any + +67 +00:05:05,360 --> 00:05:05,700 +question? + +68 +00:05:10,940 --> 00:05:17,240 +Let's move to new topic, which is empirical role. + +69 +00:05:19,340 --> 00:05:25,620 +Now, empirical role is just we + +70 +00:05:25,620 --> 00:05:30,120 +have to approximate the variation of data in case + +71 +00:05:30,120 --> 00:05:34,950 +of They'll shift. I mean suppose the data is + +72 +00:05:34,950 --> 00:05:37,770 +symmetric around the mean. I mean by symmetric + +73 +00:05:37,770 --> 00:05:42,310 +around the mean, the mean is the vertical line + +74 +00:05:42,310 --> 00:05:46,570 +that splits the data into two halves. One to the + +75 +00:05:46,570 --> 00:05:49,570 +right and the other to the left. I mean, the mean, + +76 +00:05:49,870 --> 00:05:52,650 +the area to the right of the mean equals 50%, + +77 +00:05:52,650 --> 00:05:54,970 +which is the same as the area to the left of the + +78 +00:05:54,970 --> 00:05:58,710 +mean. Now suppose or consider the data is bell + +79 +00:05:58,710 --> 00:06:02,570 +-shaped. Bell-shaped, normal, or symmetric? So + +80 +00:06:02,570 --> 00:06:04,290 +it's not skewed either to the right or to the + +81 +00:06:04,290 --> 00:06:08,030 +left. So here we assume, okay, the data is bell + +82 +00:06:08,030 --> 00:06:13,430 +-shaped. In this scenario, in this case, there is + +83 +00:06:13,430 --> 00:06:22,100 +a rule called 68, 95, 99.7 rule. Number one, + +84 +00:06:22,960 --> 00:06:26,300 +approximately 68% of the data in a bill shipped + +85 +00:06:26,300 --> 00:06:31,780 +lies within one standard deviation of the + +86 +00:06:31,780 --> 00:06:37,100 +population. So this is the first rule, 68% of the + +87 +00:06:37,100 --> 00:06:43,920 +data or of the observations Lie within a mu minus + +88 +00:06:43,920 --> 00:06:48,880 +sigma and a mu plus sigma. That's the meaning of + +89 +00:06:48,880 --> 00:06:51,800 +the data in bell shape distribution is within one + +90 +00:06:51,800 --> 00:06:55,900 +standard deviation of mean or mu plus or minus + +91 +00:06:55,900 --> 00:07:01,480 +sigma. So again, you can say that if the data is + +92 +00:07:01,480 --> 00:07:04,100 +normally distributed or if the data is bell + +93 +00:07:04,100 --> 00:07:12,210 +shaped, that is 68% of the data lies within one + +94 +00:07:12,210 --> 00:07:16,250 +standard deviation of the mean, either below or + +95 +00:07:16,250 --> 00:07:21,710 +above it. So 68% of the data. So this is the first + +96 +00:07:21,710 --> 00:07:22,090 +rule. + +97 +00:07:29,050 --> 00:07:37,170 +68% of the data lies between mu minus sigma and mu + +98 +00:07:37,170 --> 00:07:37,750 +plus sigma. + +99 +00:07:40,480 --> 00:07:46,260 +The other rule is approximately 95% of the data in + +100 +00:07:46,260 --> 00:07:48,980 +a bell-shaped distribution lies within two + +101 +00:07:48,980 --> 00:07:53,240 +standard deviations of the mean. That means this + +102 +00:07:53,240 --> 00:08:00,880 +area covers between minus two sigma and plus mu + +103 +00:08:00,880 --> 00:08:08,360 +plus two sigma. So 95% of the data lies between + +104 +00:08:08,360 --> 00:08:15,410 +minus mu two sigma And finally, + +105 +00:08:15,790 --> 00:08:21,270 +approximately 99.7% of the data, it means almost + +106 +00:08:21,270 --> 00:08:25,490 +the data. Because we are saying 99.7 means most of + +107 +00:08:25,490 --> 00:08:29,930 +the data falls or lies within three standard + +108 +00:08:29,930 --> 00:08:37,770 +deviations of the mean. So 99.7% of the data lies + +109 +00:08:37,770 --> 00:08:41,470 +between mu minus the pre-sigma and the mu plus of + +110 +00:08:41,470 --> 00:08:41,870 +pre-sigma. + +111 +00:08:45,030 --> 00:08:49,810 +68, 95, 99.7 are fixed numbers. Later in chapter + +112 +00:08:49,810 --> 00:08:55,010 +6, we will explain in details other coefficients. + +113 +00:08:55,530 --> 00:08:58,250 +Maybe suppose we are interested not in one of + +114 +00:08:58,250 --> 00:09:03,010 +these. Suppose we are interested in 90% or 80% or + +115 +00:09:03,010 --> 00:09:11,500 +85%. This rule just for 689599.7. This rule is + +116 +00:09:11,500 --> 00:09:15,560 +called 689599 + +117 +00:09:15,560 --> 00:09:22,960 +.7 rule. That is, again, 68% of the data lies + +118 +00:09:22,960 --> 00:09:27,030 +within one standard deviation of the mean. 95% of + +119 +00:09:27,030 --> 00:09:30,370 +the data lies within two standard deviations of + +120 +00:09:30,370 --> 00:09:33,850 +the mean. And finally, most of the data falls + +121 +00:09:33,850 --> 00:09:36,950 +within three standard deviations of the mean. + +122 +00:09:39,870 --> 00:09:43,330 +Let's see how can we use this empirical rule for a + +123 +00:09:43,330 --> 00:09:49,850 +specific example. Imagine that the variable math + +124 +00:09:49,850 --> 00:09:54,070 +set scores is bell shaped. So here we assume that + +125 +00:09:55,230 --> 00:10:00,950 +The math status score has symmetric shape or bell + +126 +00:10:00,950 --> 00:10:04,230 +shape. In this case, we can use the previous rule. + +127 +00:10:04,350 --> 00:10:09,610 +Otherwise, we cannot. So assume the math status + +128 +00:10:09,610 --> 00:10:15,750 +score is bell-shaped with a mean of 500. I mean, + +129 +00:10:16,410 --> 00:10:19,750 +the population mean is 500 and standard deviation + +130 +00:10:19,750 --> 00:10:24,620 +of 90. And let's see how can we apply the + +131 +00:10:24,620 --> 00:10:29,220 +empirical rule. So again, meta score has a mean of + +132 +00:10:29,220 --> 00:10:35,300 +500 and standard deviation sigma is 90. Then we + +133 +00:10:35,300 --> 00:10:43,200 +can say that 60% of all test takers scored between + +134 +00:10:43,200 --> 00:10:46,640 +68%. + +135 +00:10:46,640 --> 00:10:56,550 +So mu is 500. minus sigma is 90. And mu plus + +136 +00:10:56,550 --> 00:11:05,390 +sigma, 500 plus 90. So you can say that 68% or 230 + +137 +00:11:05,390 --> 00:11:15,610 +of all test takers scored between 410 and 590. So + +138 +00:11:15,610 --> 00:11:22,900 +68% of all test takers who took that exam scored + +139 +00:11:22,900 --> 00:11:27,740 +between 14 and 590. That if we assume previously + +140 +00:11:27,740 --> 00:11:29,980 +the data is well shaped, otherwise we cannot say + +141 +00:11:29,980 --> 00:11:36,420 +that. For the other rule, 95% of all test takers + +142 +00:11:36,420 --> 00:11:44,400 +scored between mu is 500 minus 2 times sigma, 500 + +143 +00:11:44,400 --> 00:11:49,760 +plus 2 times sigma. So that means 500 minus 180 is + +144 +00:11:49,760 --> 00:11:55,100 +320. 500 plus 180 is 680. So you can say that + +145 +00:11:55,100 --> 00:11:59,080 +approximately 95% of all test takers scored + +146 +00:11:59,080 --> 00:12:07,860 +between 320 and 680. Finally, you can say that + +147 +00:12:10,770 --> 00:12:13,570 +all of the test takers, approximately all, because + +148 +00:12:13,570 --> 00:12:20,030 +when we are saying 99.7 it means just 0.3 is the + +149 +00:12:20,030 --> 00:12:23,590 +rest, so you can say approximately all test takers + +150 +00:12:23,590 --> 00:12:30,730 +scored between mu minus three sigma which is 90 + +151 +00:12:30,730 --> 00:12:39,830 +and mu It lost 3 seconds. So 500 minus 3 times 9 + +152 +00:12:39,830 --> 00:12:45,950 +is 270. So that's 230. 500 plus 270 is 770. So we + +153 +00:12:45,950 --> 00:12:49,690 +can say that 99.7% of all the stackers scored + +154 +00:12:49,690 --> 00:12:55,610 +between 230 and 770. I will give another example + +155 +00:12:55,610 --> 00:12:59,210 +just to make sure that you understand the meaning + +156 +00:12:59,210 --> 00:13:00,870 +of this rule. + +157 +00:13:03,620 --> 00:13:09,720 +For business, a statistic goes. + +158 +00:13:15,720 --> 00:13:20,720 +For business, a statistic example. Suppose the + +159 +00:13:20,720 --> 00:13:29,740 +scores are bell-shaped. So we are assuming the + +160 +00:13:29,740 --> 00:13:40,970 +data is bell-shaped. with mean of 75 and standard + +161 +00:13:40,970 --> 00:13:41,950 +deviation of 5. + +162 +00:13:44,990 --> 00:13:53,810 +Also, let's assume that 100 students took + +163 +00:13:53,810 --> 00:14:00,840 +the exam. So we have 100 students. Last year took + +164 +00:14:00,840 --> 00:14:05,360 +the exam of business statistics. The mean was 75. + +165 +00:14:06,240 --> 00:14:10,920 +And standard deviation was 5. And let's see how it + +166 +00:14:10,920 --> 00:14:17,100 +can tell about 6 to 8% rule. It means that 6 to 8% + +167 +00:14:17,100 --> 00:14:22,100 +of all the students score + +168 +00:14:22,100 --> 00:14:28,650 +between mu minus sigma. Mu is 75. minus sigma and + +169 +00:14:28,650 --> 00:14:29,610 +the mu plus sigma. + +170 +00:14:33,590 --> 00:14:39,290 +So that means 68 students, because we have 100, so + +171 +00:14:39,290 --> 00:14:45,410 +you can say 68 students scored between 70 and 80. + +172 +00:14:46,610 --> 00:14:53,290 +So 60 students out of 100 scored between 70 and + +173 +00:14:53,290 --> 00:15:02,990 +80. About 95 students out of 100 scored between 75 + +174 +00:15:02,990 --> 00:15:12,190 +minus 2 times 5. 75 plus 2 times 5. So that gives + +175 +00:15:12,190 --> 00:15:13,770 +65. + +176 +00:15:15,550 --> 00:15:20,950 +The minimum and the maximum is 85. So you can say + +177 +00:15:20,950 --> 00:15:25,930 +that around 95 students scored between 65 and 85. + +178 +00:15:26,650 --> 00:15:33,510 +Finally, maybe you can see all students. Because + +179 +00:15:33,510 --> 00:15:38,650 +when you're saying 99.7, it means almost all the + +180 +00:15:38,650 --> 00:15:47,210 +students scored between 75 minus 3 times Y. and 75 + +181 +00:15:47,210 --> 00:15:52,970 +plus three times one. So that's six days in two + +182 +00:15:52,970 --> 00:15:59,150 +nights. Now let's look carefully at these three + +183 +00:15:59,150 --> 00:16:04,910 +intervals. The first one is seven to eight, the + +184 +00:16:04,910 --> 00:16:11,050 +other one 65 to 85, then six to 90. When we are + +185 +00:16:11,050 --> 00:16:11,790 +more confident, + +186 +00:16:15,170 --> 00:16:20,630 +When we are more confident here for 99.7%, the + +187 +00:16:20,630 --> 00:16:25,930 +interval becomes wider. So this is the widest + +188 +00:16:25,930 --> 00:16:31,430 +interval. Because here, the length of the interval + +189 +00:16:31,430 --> 00:16:37,090 +is around 10. The other one is 20. Here is 30. So + +190 +00:16:37,090 --> 00:16:42,570 +the last interval has the highest width. So as the + +191 +00:16:42,570 --> 00:16:48,380 +confidence coefficient increases, the length of + +192 +00:16:48,380 --> 00:16:54,080 +the interval becomes larger and larger because it + +193 +00:16:54,080 --> 00:16:59,160 +starts with 10, 20, and we end with 30. So that's + +194 +00:16:59,160 --> 00:17:04,460 +another example of empirical load. And again, here + +195 +00:17:04,460 --> 00:17:10,400 +we assume the data is bell shape. Let's move. to + +196 +00:17:10,400 --> 00:17:15,320 +another one when the data is not in shape. I mean, + +197 +00:17:15,600 --> 00:17:21,840 +if we have data and that data is not symmetric. So + +198 +00:17:21,840 --> 00:17:24,440 +that rule is no longer valid. So we have to use + +199 +00:17:24,440 --> 00:17:27,940 +another rule. It's called shape-example rule. + +200 +00:17:37,450 --> 00:17:41,610 +Any questions before we move to the next topic? + +201 +00:17:44,390 --> 00:17:48,150 +At shape and shape rule, it says that regardless + +202 +00:17:48,150 --> 00:17:53,890 +of how the data are distributed, I mean, if the + +203 +00:17:53,890 --> 00:17:58,190 +data is not symmetric or + +204 +00:17:58,190 --> 00:18:02,910 +not bell-shaped, then we can say that at least + +205 +00:18:05,150 --> 00:18:10,990 +Instead of saying 68, 95, or 99.7, just say around + +206 +00:18:10,990 --> 00:18:18,690 +1 minus 1 over k squared. Multiply this by 100. + +207 +00:18:19,650 --> 00:18:25,190 +All of the values will fall within k. So k is + +208 +00:18:25,190 --> 00:18:30,410 +number of standard deviations. I mean number of + +209 +00:18:30,410 --> 00:18:33,990 +signals. So if the data is not bell shaped, then + +210 +00:18:33,990 --> 00:18:38,790 +you can say that approximately at least 1 minus 1 + +211 +00:18:38,790 --> 00:18:43,410 +over k squared times 100% of the values will fall + +212 +00:18:43,410 --> 00:18:47,630 +within k standard deviations of the mean. In this + +213 +00:18:47,630 --> 00:18:50,950 +case, we assume that k is greater than 1. I mean, + +214 +00:18:51,030 --> 00:18:54,550 +you cannot apply this rule if k equals 1. Because + +215 +00:18:54,550 --> 00:19:00,090 +if k is 1. Then 1 minus 1 is 0. That makes no + +216 +00:19:00,090 --> 00:19:03,410 +sense. For this reason, k is above 1 or greater + +217 +00:19:03,410 --> 00:19:09,110 +than 1. So this rule is valid only for k greater + +218 +00:19:09,110 --> 00:19:14,390 +than 1. So you can see that at least 1 minus 1 + +219 +00:19:14,390 --> 00:19:19,270 +over k squared of the data or of the values will + +220 +00:19:19,270 --> 00:19:24,230 +fall within k standard equations. So now, for + +221 +00:19:24,230 --> 00:19:25,830 +example, suppose k equals 2. + +222 +00:19:28,690 --> 00:19:32,970 +When k equals 2, we said that 95% of the data + +223 +00:19:32,970 --> 00:19:36,370 +falls within two standard ratios. That if the data + +224 +00:19:36,370 --> 00:19:39,350 +is bell shaped. Now what's about if the data is + +225 +00:19:39,350 --> 00:19:43,210 +not bell shaped? We have to use shape shape rule. + +226 +00:19:43,830 --> 00:19:51,170 +So 1 minus 1 over k is 2. So 2, 2, 2 squared. So 1 + +227 +00:19:51,170 --> 00:19:58,130 +minus 1 fourth. That gives. three quarters, I + +228 +00:19:58,130 --> 00:20:03,370 +mean, 75%. So instead of saying 95% of the data + +229 +00:20:03,370 --> 00:20:06,850 +lies within one or two standard deviations of the + +230 +00:20:06,850 --> 00:20:13,070 +mean, if the data is bell-shaped, if the data is + +231 +00:20:13,070 --> 00:20:17,590 +not bell-shaped, you have to say that 75% of the + +232 +00:20:17,590 --> 00:20:22,190 +data falls within two standard deviations. For + +233 +00:20:22,190 --> 00:20:26,570 +bell shape, you are 95% confident there. But here, + +234 +00:20:27,190 --> 00:20:36,710 +you're just 75% confident. Suppose k is 3. Now for + +235 +00:20:36,710 --> 00:20:41,110 +k equal 3, we said 99.7% of the data falls within + +236 +00:20:41,110 --> 00:20:44,890 +three standard deviations. Now here, if the data + +237 +00:20:44,890 --> 00:20:51,940 +is not bell shape, 1 minus 1 over k squared. 1 + +238 +00:20:51,940 --> 00:20:56,540 +minus 1 + +239 +00:20:56,540 --> 00:21:00,760 +over 3 squared is one-ninth. One-ninth is 0.11. 1 + +240 +00:21:00,760 --> 00:21:06,440 +minus 0.11 means 89% of the data, instead of + +241 +00:21:06,440 --> 00:21:13,900 +saying 99.7. So 89% of the data will fall within + +242 +00:21:13,900 --> 00:21:16,460 +three standard deviations of the population mean. + +243 +00:21:18,510 --> 00:21:22,610 +regardless of how the data are distributed around + +244 +00:21:22,610 --> 00:21:26,350 +them. So here, we have two scenarios. One, if the + +245 +00:21:26,350 --> 00:21:29,390 +data is symmetric, which is called empirical rule + +246 +00:21:29,390 --> 00:21:34,710 +68959917. And the other one is called shape-by + +247 +00:21:34,710 --> 00:21:38,370 +-shape rule, and that regardless of the shape of + +248 +00:21:38,370 --> 00:21:38,710 +the data. + +249 +00:21:41,890 --> 00:21:49,210 +Excuse me? Yes. In this case, you don't know the + +250 +00:21:49,210 --> 00:21:51,490 +distribution of the data. And the reality is + +251 +00:21:51,490 --> 00:21:58,650 +sometimes the data has unknown distribution. For + +252 +00:21:58,650 --> 00:22:02,590 +this reason, we have to use chip-chip portions. + +253 +00:22:05,410 --> 00:22:09,830 +That's all for empirical rule and chip-chip rule. + +254 +00:22:11,230 --> 00:22:18,150 +The next topic is quartile measures. So far, we + +255 +00:22:18,150 --> 00:22:24,330 +have discussed central tendency measures, and we + +256 +00:22:24,330 --> 00:22:28,450 +have talked about mean, median, and more. Then we + +257 +00:22:28,450 --> 00:22:32,830 +moved to location of variability or spread or + +258 +00:22:32,830 --> 00:22:37,810 +dispersion. And we talked about range, variance, + +259 +00:22:37,950 --> 00:22:38,890 +and standardization. + +260 +00:22:41,570 --> 00:22:48,230 +And we said that outliers affect the mean much + +261 +00:22:48,230 --> 00:22:51,470 +more than the median. And also, outliers affect + +262 +00:22:51,470 --> 00:22:55,730 +the range. Here, we'll talk about other measures + +263 +00:22:55,730 --> 00:22:59,570 +of the data, which is called quartile measures. + +264 +00:23:01,190 --> 00:23:03,450 +Here, actually, we'll talk about two measures. + +265 +00:23:04,270 --> 00:23:10,130 +First one is called first quartile, And the other + +266 +00:23:10,130 --> 00:23:14,150 +one is third quartile. So we have two measures, + +267 +00:23:15,470 --> 00:23:26,030 +first and third quartile. Quartiles split the rank + +268 +00:23:26,030 --> 00:23:32,930 +data into four equal segments. I mean, these + +269 +00:23:32,930 --> 00:23:37,190 +measures split the data you have into four equal + +270 +00:23:37,190 --> 00:23:37,730 +parts. + +271 +00:23:42,850 --> 00:23:48,690 +Q1 has 25% of the data fall below it. I mean 25% + +272 +00:23:48,690 --> 00:23:56,410 +of the values lie below Q1. So it means 75% of the + +273 +00:23:56,410 --> 00:24:04,410 +values above it. So 25 below and 75 above. But you + +274 +00:24:04,410 --> 00:24:07,370 +have to be careful that the data is arranged from + +275 +00:24:07,370 --> 00:24:12,430 +smallest to largest. So in this case, Q1. is a + +276 +00:24:12,430 --> 00:24:19,630 +value that has 25% below it. So Q2 is called the + +277 +00:24:19,630 --> 00:24:22,450 +median. The median, the value in the middle when + +278 +00:24:22,450 --> 00:24:26,250 +we arrange the data from smallest to largest. So + +279 +00:24:26,250 --> 00:24:31,190 +that means 50% of the data below and also 50% of + +280 +00:24:31,190 --> 00:24:36,370 +the data above. The other measure is called + +281 +00:24:36,370 --> 00:24:41,730 +theoretical qualifying. In this case, we have 25% + +282 +00:24:41,730 --> 00:24:47,950 +of the data above Q3 and 75% of the data below Q3. + +283 +00:24:49,010 --> 00:24:54,410 +So quartiles split the rank data into four equal + +284 +00:24:54,410 --> 00:25:00,190 +segments, Q1 25% to the left, Q2 50% to the left, + +285 +00:25:00,970 --> 00:25:08,590 +Q3 75% to the left, and 25% to the right. Before, + +286 +00:25:09,190 --> 00:25:13,830 +we explained how to compute the median, and let's + +287 +00:25:13,830 --> 00:25:18,850 +see how can we compute first and third quartile. + +288 +00:25:19,750 --> 00:25:23,650 +If you remember, when we computed the median, + +289 +00:25:24,350 --> 00:25:28,480 +first we locate the position of the median. And we + +290 +00:25:28,480 --> 00:25:33,540 +said that the rank of n is odd. Yes, it was n plus + +291 +00:25:33,540 --> 00:25:37,800 +1 divided by 2. This is the location of the + +292 +00:25:37,800 --> 00:25:41,100 +median, not the value. Sometimes the value may be + +293 +00:25:41,100 --> 00:25:44,900 +equal to the location, but most of the time it's + +294 +00:25:44,900 --> 00:25:48,340 +not. It's not the case. Now let's see how can we + +295 +00:25:48,340 --> 00:25:54,130 +locate the fair support. The first quartile after + +296 +00:25:54,130 --> 00:25:56,690 +you arrange the data from smallest to largest, the + +297 +00:25:56,690 --> 00:26:01,290 +location is n plus 1 divided by 2. So that's the + +298 +00:26:01,290 --> 00:26:06,890 +location of the first quartile. The median, as we + +299 +00:26:06,890 --> 00:26:10,390 +mentioned before, is located in the middle. So it + +300 +00:26:10,390 --> 00:26:15,210 +makes sense that if n is odd, the location of the + +301 +00:26:15,210 --> 00:26:20,490 +median is n plus 1 over 2. Now, for the third + +302 +00:26:20,490 --> 00:26:27,160 +quartile position, The location is N plus 1 + +303 +00:26:27,160 --> 00:26:31,160 +divided by 4 times 3. So 3 times N plus 1 divided + +304 +00:26:31,160 --> 00:26:39,920 +by 4. That's how can we locate Q1, Q2, and Q3. So + +305 +00:26:39,920 --> 00:26:42,080 +one more time, the median, the value in the + +306 +00:26:42,080 --> 00:26:46,260 +middle, and it's located exactly at the position N + +307 +00:26:46,260 --> 00:26:52,590 +plus 1 over 2 for the range data. Q1 is located at + +308 +00:26:52,590 --> 00:26:56,770 +n plus one divided by four. Q3 is located at the + +309 +00:26:56,770 --> 00:26:59,670 +position three times n plus one divided by four. + +310 +00:27:03,630 --> 00:27:07,490 +Now, when calculating the rank position, we can + +311 +00:27:07,490 --> 00:27:14,690 +use one of these rules. First, if the result of + +312 +00:27:14,690 --> 00:27:18,010 +the location, I mean, is a whole number, I mean, + +313 +00:27:18,250 --> 00:27:24,050 +if it is an integer. Then the rank position is the + +314 +00:27:24,050 --> 00:27:28,590 +same number. For example, suppose the rank + +315 +00:27:28,590 --> 00:27:34,610 +position is four. So position number four is your + +316 +00:27:34,610 --> 00:27:38,450 +quartile, either first or third or second + +317 +00:27:38,450 --> 00:27:42,510 +quartile. So if the result is a whole number, then + +318 +00:27:42,510 --> 00:27:48,350 +it is the rank position used. Now, if the result + +319 +00:27:48,350 --> 00:27:52,250 +is a fractional half, I mean if the right position + +320 +00:27:52,250 --> 00:27:58,830 +is 2.5, 3.5, 4.5. In this case, average the two + +321 +00:27:58,830 --> 00:28:02,050 +corresponding data values. For example, if the + +322 +00:28:02,050 --> 00:28:10,170 +right position is 2.5. So the rank position is 2 + +323 +00:28:10,170 --> 00:28:13,210 +.5. So take the average of the corresponding + +324 +00:28:13,210 --> 00:28:18,950 +values for the rank 2 and 3. So look at the value. + +325 +00:28:19,280 --> 00:28:24,740 +at rank 2, value at rank 3, then take the average + +326 +00:28:24,740 --> 00:28:29,300 +of the corresponding values. That if the rank + +327 +00:28:29,300 --> 00:28:31,280 +position is fractional. + +328 +00:28:34,380 --> 00:28:37,900 +So if the result is whole number, just take it as + +329 +00:28:37,900 --> 00:28:41,160 +it is. If it is a fractional half, take the + +330 +00:28:41,160 --> 00:28:44,460 +corresponding data values and take the average of + +331 +00:28:44,460 --> 00:28:49,110 +these two values. Now, if the result is not a + +332 +00:28:49,110 --> 00:28:53,930 +whole number or a fraction of it. For example, + +333 +00:28:54,070 --> 00:29:01,910 +suppose the location is 2.1. So the position is 2, + +334 +00:29:02,390 --> 00:29:06,550 +just round, up to the nearest integer. So that's + +335 +00:29:06,550 --> 00:29:11,350 +2. What's about if the position rank is 2.6? Just + +336 +00:29:11,350 --> 00:29:16,060 +rank up to 3. So that's 3. So that's the rule you + +337 +00:29:16,060 --> 00:29:21,280 +have to follow if the result is a number, a whole + +338 +00:29:21,280 --> 00:29:27,200 +number, I mean integer, fraction of half, or not + +339 +00:29:27,200 --> 00:29:31,500 +real number, I mean, not whole number, or fraction + +340 +00:29:31,500 --> 00:29:35,540 +of half. Look at this specific example. Suppose we + +341 +00:29:35,540 --> 00:29:40,180 +have this data. This is ordered array, 11, 12, up + +342 +00:29:40,180 --> 00:29:45,680 +to 22. And let's see how can we compute These + +343 +00:29:45,680 --> 00:29:46,240 +measures. + +344 +00:29:50,080 --> 00:29:51,700 +Look carefully here. + +345 +00:29:55,400 --> 00:29:59,260 +First, let's compute the median. The median and + +346 +00:29:59,260 --> 00:30:02,360 +the value in the middle. How many values we have? + +347 +00:30:02,800 --> 00:30:08,920 +There are nine values. So the middle is number + +348 +00:30:08,920 --> 00:30:15,390 +five. One, two, three, four, five. So 16. This + +349 +00:30:15,390 --> 00:30:23,010 +value is the median. Now look at the values below + +350 +00:30:23,010 --> 00:30:29,650 +the median. There are 4 and 4 below and above the + +351 +00:30:29,650 --> 00:30:34,970 +median. Now let's see how can we compute Q1. The + +352 +00:30:34,970 --> 00:30:38,250 +position of Q1, as we mentioned, is N plus 1 + +353 +00:30:38,250 --> 00:30:42,630 +divided by 4. So N is 9 plus 1 divided by 4 is 2 + +354 +00:30:42,630 --> 00:30:50,330 +.5. 2.5 position, it means you have to take the + +355 +00:30:50,330 --> 00:30:54,490 +average of the two corresponding values, 2 and 3. + +356 +00:30:55,130 --> 00:31:01,010 +So 2 and 3, so 12 plus 13 divided by 2. That gives + +357 +00:31:01,010 --> 00:31:08,390 +12.5. So this is Q1. + +358 +00:31:08,530 --> 00:31:18,210 +So Q1 is 12.5. Now what's about Q3? The Q3, the + +359 +00:31:18,210 --> 00:31:27,810 +rank position, Q1 was 2.5. So Q3 should be three + +360 +00:31:27,810 --> 00:31:32,410 +times that value, because it's three times A plus + +361 +00:31:32,410 --> 00:31:36,090 +1 over 4. That means the rank position is 7.5. + +362 +00:31:36,590 --> 00:31:39,410 +That means you have to take the average of the 7 + +363 +00:31:39,410 --> 00:31:44,890 +and 8 position. 7 and 8 is 18. + +364 +00:31:45,880 --> 00:31:56,640 +which is 19.5. So that's Q3, 19.5. + +365 +00:32:00,360 --> 00:32:09,160 +So this is Q3. This value is Q1. And this value + +366 +00:32:09,160 --> 00:32:15,910 +is? Now, Q2 is the center. is located in the + +367 +00:32:15,910 --> 00:32:18,570 +center because, as we mentioned, four below and + +368 +00:32:18,570 --> 00:32:22,950 +four above. Now what's about Q1? Q1 is not in the + +369 +00:32:22,950 --> 00:32:28,150 +center of the entire data. Because Q1, 12.5, so + +370 +00:32:28,150 --> 00:32:31,830 +two points below and the others maybe how many + +371 +00:32:31,830 --> 00:32:34,750 +above two, four, six, seven observations above it. + +372 +00:32:35,390 --> 00:32:40,130 +So that means Q1 is not center. Also Q3 is not + +373 +00:32:40,130 --> 00:32:43,170 +center because two observations above it and seven + +374 +00:32:43,170 --> 00:32:48,780 +below it. So that means Q1 and Q3 are measures of + +375 +00:32:48,780 --> 00:32:52,480 +non-central location, while the median is a + +376 +00:32:52,480 --> 00:32:56,080 +measure of central location. But if you just look + +377 +00:32:56,080 --> 00:33:03,720 +at the data below the median, just focus on the + +378 +00:33:03,720 --> 00:33:09,100 +data below the median, 12.5 lies exactly in the + +379 +00:33:09,100 --> 00:33:13,130 +middle of the data. So 12.5 is the center of the + +380 +00:33:13,130 --> 00:33:18,090 +data. I mean, Q1 is the center of the data below + +381 +00:33:18,090 --> 00:33:22,810 +the overall median. The overall median was 16. So + +382 +00:33:22,810 --> 00:33:27,490 +the data before 16, the median for this data is 12 + +383 +00:33:27,490 --> 00:33:31,770 +.5, which is the first part. Similarly, if you + +384 +00:33:31,770 --> 00:33:36,870 +look at the data above Q2, + +385 +00:33:37,770 --> 00:33:42,190 +now 19.5. is located in the middle of the line. So + +386 +00:33:42,190 --> 00:33:46,470 +Q3 is a measure of center for the data above the + +387 +00:33:46,470 --> 00:33:48,390 +line. Make sense? + +388 +00:33:51,370 --> 00:33:56,430 +So that's how can we compute first, second, and + +389 +00:33:56,430 --> 00:34:03,510 +third part. Any questions? Yes, but it's a whole + +390 +00:34:03,510 --> 00:34:09,370 +number. Whole number, it means any integer. For + +391 +00:34:09,370 --> 00:34:14,450 +example, yeah, exactly, yes. Suppose we have + +392 +00:34:14,450 --> 00:34:18,090 +number of data is seven. + +393 +00:34:22,070 --> 00:34:25,070 +Number of observations we have is seven. So the + +394 +00:34:25,070 --> 00:34:29,730 +rank position n plus one divided by two, seven + +395 +00:34:29,730 --> 00:34:33,890 +plus one over two is four. Four means the whole + +396 +00:34:33,890 --> 00:34:37,780 +number, I mean an integer. then this case just use + +397 +00:34:37,780 --> 00:34:45,280 +it as it is. Now let's see the benefit or the + +398 +00:34:45,280 --> 00:34:48,680 +feature of using Q1 and Q3. + +399 +00:34:55,180 --> 00:35:01,300 +So let's move at the inter-equilateral range or + +400 +00:35:01,300 --> 00:35:01,760 +IQ1. + +401 +00:35:08,020 --> 00:35:14,580 +2.5 is the position. So the rank data of the rank + +402 +00:35:14,580 --> 00:35:19,180 +data. So take the average of the two corresponding + +403 +00:35:19,180 --> 00:35:25,700 +values of this one, which is 2 and 3. So 2 and 3. + +404 +00:35:27,400 --> 00:35:31,940 +The average of these two values is 12.5. One more + +405 +00:35:31,940 --> 00:35:40,920 +time, 2.5 is not the value. It is the rank + +406 +00:35:40,920 --> 00:35:47,880 +position of the first quartile. So in this case, 2 + +407 +00:35:47,880 --> 00:35:57,740 +.5 takes position 2 and 3. The average of these + +408 +00:35:57,740 --> 00:36:02,580 +two rank positions the corresponding one, which + +409 +00:36:02,580 --> 00:36:10,080 +are 12 and 13. So 12 for position number 2, 13 for + +410 +00:36:10,080 --> 00:36:13,580 +the other one. So the average is just divided by + +411 +00:36:13,580 --> 00:36:16,660 +2. That will give 12.5. + +412 +00:36:28,760 --> 00:36:34,900 +Next, again, the inter-quartile range, which is + +413 +00:36:34,900 --> 00:36:44,160 +denoted by IQR. Now IQR is the distance between Q3 + +414 +00:36:44,160 --> 00:36:48,000 +and Q1. I mean the difference between Q3 and Q1 is + +415 +00:36:48,000 --> 00:36:53,460 +called the inter-quartile range. And this one + +416 +00:36:53,460 --> 00:36:56,680 +measures the spread in the middle 50% of the data. + +417 +00:36:57,680 --> 00:36:59,060 +Because if you imagine that, + +418 +00:37:02,250 --> 00:37:10,250 +This is Q1 and Q3. IQR is the distance between + +419 +00:37:10,250 --> 00:37:14,130 +these two values. Now imagine that we have just + +420 +00:37:14,130 --> 00:37:19,570 +this data, which represents 50%. + +421 +00:37:21,540 --> 00:37:25,440 +And IQR, the definition is a Q3. So we have just + +422 +00:37:25,440 --> 00:37:31,480 +this data, for example. And IQ3 is Q3 minus Q1. It + +423 +00:37:31,480 --> 00:37:37,080 +means IQ3 is the maximum minus the minimum of the + +424 +00:37:37,080 --> 00:37:41,540 +50% of the middle data. So it means this is your + +425 +00:37:41,540 --> 00:37:46,980 +range, new range. After you've secluded 25% to the + +426 +00:37:46,980 --> 00:37:52,450 +left of Q1, And also you ignored totally 25% of + +427 +00:37:52,450 --> 00:37:57,070 +the data above Q3. So that means you're focused on + +428 +00:37:57,070 --> 00:38:00,630 +50% of the data. And just take the average of + +429 +00:38:00,630 --> 00:38:04,070 +these two points, I'm sorry, the distance of these + +430 +00:38:04,070 --> 00:38:07,670 +two points Q3 minus Q1. So you will get the range. + +431 +00:38:07,990 --> 00:38:11,170 +But not exactly the range. It's called, sometimes + +432 +00:38:11,170 --> 00:38:16,390 +it's called mid-spread range. Because mid-spread, + +433 +00:38:16,510 --> 00:38:19,910 +because we are talking about middle of the data, + +434 +00:38:19,990 --> 00:38:22,430 +50% of the data, which is located in the middle. + +435 +00:38:23,110 --> 00:38:28,550 +So do you think in this case, outliers actually, + +436 +00:38:29,090 --> 00:38:32,930 +they are extreme values, the data below Q1 and + +437 +00:38:32,930 --> 00:38:38,150 +data above Q3. That means inter-quartile range, Q3 + +438 +00:38:38,150 --> 00:38:42,410 +minus Q1, is not affected by outliers. Because you + +439 +00:38:42,410 --> 00:38:49,150 +ignored the small values And the high values. So + +440 +00:38:49,150 --> 00:38:53,890 +IQR is not affected by outliers. So in case of + +441 +00:38:53,890 --> 00:38:58,930 +outliers, it's better to use IQR. Because the + +442 +00:38:58,930 --> 00:39:01,610 +range is maximum minus minimum. And as we + +443 +00:39:01,610 --> 00:39:05,030 +mentioned before, the range is affected by + +444 +00:39:05,030 --> 00:39:11,650 +outliers. So IQR is again called the mid-spread + +445 +00:39:11,650 --> 00:39:17,940 +because it covers the middle 50% of the data. IQR + +446 +00:39:17,940 --> 00:39:20,120 +again is a measure of variability that is not + +447 +00:39:20,120 --> 00:39:23,900 +influenced or affected by outliers or extreme + +448 +00:39:23,900 --> 00:39:26,680 +values. So in the presence of outliers, it's + +449 +00:39:26,680 --> 00:39:34,160 +better to use IQR instead of using the range. So + +450 +00:39:34,160 --> 00:39:39,140 +again, median and the range are not affected by + +451 +00:39:39,140 --> 00:39:43,180 +outliers. So in case of the presence of outliers, + +452 +00:39:43,340 --> 00:39:46,380 +we have to use these measures, one as measure of + +453 +00:39:46,380 --> 00:39:49,780 +central and the other as measure of spread. So + +454 +00:39:49,780 --> 00:39:54,420 +measures like Q1, Q3, and IQR that are not + +455 +00:39:54,420 --> 00:39:57,400 +influenced by outliers are called resistant + +456 +00:39:57,400 --> 00:40:01,980 +measures. Resistance means in case of outliers, + +457 +00:40:02,380 --> 00:40:06,120 +they remain in the same position or approximately + +458 +00:40:06,120 --> 00:40:09,870 +in the same position. Because outliers don't + +459 +00:40:09,870 --> 00:40:13,870 +affect these measures. I mean, don't affect Q1, + +460 +00:40:14,830 --> 00:40:20,130 +Q3, and consequently IQR, because IQR is just the + +461 +00:40:20,130 --> 00:40:24,990 +distance between Q3 and Q1. So to determine the + +462 +00:40:24,990 --> 00:40:29,430 +value of IQR, you have first to compute Q1, Q3, + +463 +00:40:29,750 --> 00:40:35,780 +then take the difference between these two. So, + +464 +00:40:36,120 --> 00:40:41,120 +for example, suppose we have a data, and that data + +465 +00:40:41,120 --> 00:40:51,400 +has Q1 equals 30, and Q3 is 55. Suppose for a data + +466 +00:40:51,400 --> 00:41:00,140 +set, that data set has Q1 30, Q3 is 57. The IQR, + +467 +00:41:00,800 --> 00:41:07,240 +or Inter Equal Hyper Range, 57 minus 30 is 27. Now + +468 +00:41:07,240 --> 00:41:12,460 +what's the range? The range is maximum for the + +469 +00:41:12,460 --> 00:41:17,380 +largest value, which is 17 minus 12. That gives + +470 +00:41:17,380 --> 00:41:21,420 +58. Now look at the difference between the two + +471 +00:41:21,420 --> 00:41:26,900 +ranges. The inter-quartile range is 27. The range + +472 +00:41:26,900 --> 00:41:29,800 +is 58. There is a big difference between these two + +473 +00:41:29,800 --> 00:41:35,750 +values because range depends only on smallest and + +474 +00:41:35,750 --> 00:41:40,190 +largest. And these values could be outliers. For + +475 +00:41:40,190 --> 00:41:44,410 +this reason, the range value is higher or greater + +476 +00:41:44,410 --> 00:41:48,410 +than the required range, which is just the + +477 +00:41:48,410 --> 00:41:54,050 +distance of the 50% of the middle data. For this + +478 +00:41:54,050 --> 00:41:59,470 +reason, it's better to use the range in case of + +479 +00:41:59,470 --> 00:42:03,940 +outliers. Make sense? Any question? + +480 +00:42:08,680 --> 00:42:19,320 +Five-number summary are smallest + +481 +00:42:19,320 --> 00:42:27,380 +value, largest value, also first quartile, third + +482 +00:42:27,380 --> 00:42:32,250 +quartile, and the median. These five numbers are + +483 +00:42:32,250 --> 00:42:35,870 +called five-number summary, because by using these + +484 +00:42:35,870 --> 00:42:41,590 +statistics, smallest, first, median, third + +485 +00:42:41,590 --> 00:42:46,010 +quarter, and largest, you can describe the center + +486 +00:42:46,010 --> 00:42:52,590 +spread and the shape of the distribution. So by + +487 +00:42:52,590 --> 00:42:56,450 +using five-number summary, you can tell something + +488 +00:42:56,450 --> 00:43:00,090 +about it. The center of the data, I mean the value + +489 +00:43:00,090 --> 00:43:02,070 +in the middle, because the median is the value in + +490 +00:43:02,070 --> 00:43:06,550 +the middle. Spread, because we can talk about the + +491 +00:43:06,550 --> 00:43:11,070 +IQR, which is the range, and also the shape of the + +492 +00:43:11,070 --> 00:43:15,450 +data. And let's see, let's move to this slide, + +493 +00:43:16,670 --> 00:43:18,530 +slide number 50. + +494 +00:43:21,530 --> 00:43:25,090 +Let's see how can we construct something called + +495 +00:43:25,090 --> 00:43:31,850 +box plot. Box plot. Box plot can be constructed by + +496 +00:43:31,850 --> 00:43:34,990 +using the five number summary. We have smallest + +497 +00:43:34,990 --> 00:43:37,550 +value. On the other hand, we have the largest + +498 +00:43:37,550 --> 00:43:43,430 +value. Also, we have Q1, the first quartile, the + +499 +00:43:43,430 --> 00:43:47,510 +median, and Q3. For symmetric distribution, I mean + +500 +00:43:47,510 --> 00:43:52,490 +if the data is bell-shaped. In this case, the + +501 +00:43:52,490 --> 00:43:56,570 +vertical line in the box which represents the + +502 +00:43:56,570 --> 00:43:59,730 +median should be located in the middle of this + +503 +00:43:59,730 --> 00:44:05,510 +box, also in the middle of the entire data. Look + +504 +00:44:05,510 --> 00:44:11,350 +carefully at this vertical line. This line splits + +505 +00:44:11,350 --> 00:44:16,070 +the data into two halves, 25% to the left and 25% + +506 +00:44:16,070 --> 00:44:19,960 +to the right. And also this vertical line splits + +507 +00:44:19,960 --> 00:44:24,720 +the data into two halves, from the smallest to + +508 +00:44:24,720 --> 00:44:29,760 +largest, because there are 50% of the observations + +509 +00:44:29,760 --> 00:44:34,560 +lie below, and 50% lies above. So that means by + +510 +00:44:34,560 --> 00:44:37,840 +using box plot, you can tell something about the + +511 +00:44:37,840 --> 00:44:42,520 +shape of the distribution. So again, if the data + +512 +00:44:42,520 --> 00:44:48,270 +are symmetric around the median, And the central + +513 +00:44:48,270 --> 00:44:53,910 +line, this box, and central line are centered + +514 +00:44:53,910 --> 00:44:57,550 +between the endpoints. I mean, this vertical line + +515 +00:44:57,550 --> 00:45:00,720 +is centered between these two endpoints. between + +516 +00:45:00,720 --> 00:45:04,180 +Q1 and Q3. And the whole box plot is centered + +517 +00:45:04,180 --> 00:45:07,100 +between the smallest and the largest value. And + +518 +00:45:07,100 --> 00:45:10,840 +also the distance between the median and the + +519 +00:45:10,840 --> 00:45:14,320 +smallest is roughly equal to the distance between + +520 +00:45:14,320 --> 00:45:19,760 +the median and the largest. So you can tell + +521 +00:45:19,760 --> 00:45:22,660 +something about the shape of the distribution by + +522 +00:45:22,660 --> 00:45:26,780 +using the box plot. + +523 +00:45:32,870 --> 00:45:36,110 +The graph in the middle. Here median and median + +524 +00:45:36,110 --> 00:45:40,110 +are the same. The box plot, we have here the + +525 +00:45:40,110 --> 00:45:43,830 +median in the middle of the box, also in the + +526 +00:45:43,830 --> 00:45:47,390 +middle of the entire data. So you can say that the + +527 +00:45:47,390 --> 00:45:50,210 +distribution of this data is symmetric or is bell + +528 +00:45:50,210 --> 00:45:55,750 +-shaped. It's normal distribution. On the other + +529 +00:45:55,750 --> 00:46:00,110 +hand, if you look here, you will see that the + +530 +00:46:00,110 --> 00:46:06,160 +median is not in the center of the box. It's near + +531 +00:46:06,160 --> 00:46:12,580 +Q3. So the left tail, I mean, the distance between + +532 +00:46:12,580 --> 00:46:16,620 +the median and the smallest, this tail is longer + +533 +00:46:16,620 --> 00:46:20,600 +than the right tail. In this case, it's called + +534 +00:46:20,600 --> 00:46:24,850 +left skewed or skewed to the left. or negative + +535 +00:46:24,850 --> 00:46:29,510 +skewness. So if the data is not symmetric, it + +536 +00:46:29,510 --> 00:46:35,630 +might be left skewed. I mean, the left tail is + +537 +00:46:35,630 --> 00:46:40,590 +longer than the right tail. On the other hand, if + +538 +00:46:40,590 --> 00:46:45,950 +the median is located near Q1, it means the right + +539 +00:46:45,950 --> 00:46:49,930 +tail is longer than the left tail, and it's called + +540 +00:46:49,930 --> 00:46:56,470 +positive skewed or right skewed. So for symmetric + +541 +00:46:56,470 --> 00:47:00,310 +distribution, the median in the middle, for left + +542 +00:47:00,310 --> 00:47:04,570 +or right skewed, the median either is close to the + +543 +00:47:04,570 --> 00:47:09,930 +Q3 or skewed distribution to the left, or the + +544 +00:47:09,930 --> 00:47:14,910 +median is close to Q1 and the distribution is + +545 +00:47:14,910 --> 00:47:20,570 +right skewed or has positive skewness. That's how + +546 +00:47:20,570 --> 00:47:25,860 +can we tell spread center and the shape by using + +547 +00:47:25,860 --> 00:47:28,460 +the box plot. So center is the value in the + +548 +00:47:28,460 --> 00:47:32,860 +middle, Q2 or the median. Spread is the distance + +549 +00:47:32,860 --> 00:47:38,340 +between Q1 and Q3. So Q3 minus Q1 gives IQR. And + +550 +00:47:38,340 --> 00:47:41,880 +finally, you can tell something about the shape of + +551 +00:47:41,880 --> 00:47:45,140 +the distribution by just looking at the scatter + +552 +00:47:45,140 --> 00:47:46,440 +plot. + +553 +00:47:49,700 --> 00:47:56,330 +Let's look at This example, and suppose we have + +554 +00:47:56,330 --> 00:48:02,430 +small data set. And let's see how can we construct + +555 +00:48:02,430 --> 00:48:05,750 +the MaxPlot. In order to construct MaxPlot, you + +556 +00:48:05,750 --> 00:48:09,510 +have to compute minimum first or smallest value, + +557 +00:48:09,810 --> 00:48:14,650 +largest value. Besides that, you have to compute + +558 +00:48:14,650 --> 00:48:21,110 +first and third part time and also Q2. For this + +559 +00:48:21,110 --> 00:48:27,570 +simple example, Q1 is 2, Q3 is 5, and the median + +560 +00:48:27,570 --> 00:48:33,990 +is 3. Smallest is 0, largest is 1 7. Now, be + +561 +00:48:33,990 --> 00:48:38,130 +careful here, 1 7 seems to be an outlier. But so + +562 +00:48:38,130 --> 00:48:44,190 +far, we don't explain how can we decide if a data + +563 +00:48:44,190 --> 00:48:47,550 +value is considered to be an outlier. But at least + +564 +00:48:47,550 --> 00:48:53,080 +1 7. is a suspected value to be an outlier, seems + +565 +00:48:53,080 --> 00:48:57,200 +to be. Sometimes you are 95% sure that that point + +566 +00:48:57,200 --> 00:49:00,160 +is an outlier, but you cannot tell, because you + +567 +00:49:00,160 --> 00:49:04,060 +have to have a specific rule that can decide if + +568 +00:49:04,060 --> 00:49:07,400 +that point is an outlier or not. But at least it + +569 +00:49:07,400 --> 00:49:12,060 +makes sense that that point is considered maybe an + +570 +00:49:12,060 --> 00:49:14,700 +outlier. But let's see how can we construct that + +571 +00:49:14,700 --> 00:49:18,190 +first. The box plot. Again, as we mentioned, the + +572 +00:49:18,190 --> 00:49:21,630 +minimum value is zero. The maximum is 27. The Q1 + +573 +00:49:21,630 --> 00:49:27,830 +is 2. The median is 3. The Q3 is 5. Now, if you + +574 +00:49:27,830 --> 00:49:32,010 +look at the distance between, does this vertical + +575 +00:49:32,010 --> 00:49:35,790 +line lie between the line in the middle or the + +576 +00:49:35,790 --> 00:49:40,090 +center of the box? It's not exactly. But if you + +577 +00:49:40,090 --> 00:49:45,260 +look at this line, vertical line, and the location + +578 +00:49:45,260 --> 00:49:50,600 +of this with respect to the minimum and the + +579 +00:49:50,600 --> 00:49:56,640 +maximum. You will see that the right tail is much + +580 +00:49:56,640 --> 00:50:01,560 +longer than the left tail because it starts from 3 + +581 +00:50:01,560 --> 00:50:06,180 +up to 27. And the other one, from zero to three, + +582 +00:50:06,380 --> 00:50:09,760 +is a big distance between three and 27, compared + +583 +00:50:09,760 --> 00:50:13,140 +to the other one, zero to three. So it seems to be + +584 +00:50:13,140 --> 00:50:16,600 +this is quite skewed, so it's not at all + +585 +00:50:16,600 --> 00:50:23,700 +symmetric, because of this value. So maybe by + +586 +00:50:23,700 --> 00:50:25,580 +using MaxPlot, you can tell that point is + +587 +00:50:25,580 --> 00:50:31,440 +suspected to be an outlier. It has a very long + +588 +00:50:31,440 --> 00:50:32,800 +right tail. + +589 +00:50:35,560 --> 00:50:41,120 +So let's see how can we determine if a point is an + +590 +00:50:41,120 --> 00:50:50,400 +outlier or not. Sometimes we can use box plot to + +591 +00:50:50,400 --> 00:50:53,840 +determine if the point is an outlier or not. The + +592 +00:50:53,840 --> 00:51:00,860 +rule is that a value is considered an outlier It + +593 +00:51:00,860 --> 00:51:04,780 +is more than 1.5 times the entire quartile range + +594 +00:51:04,780 --> 00:51:11,420 +below Q1 or above it. Let's explain the meaning of + +595 +00:51:11,420 --> 00:51:12,260 +this sentence. + +596 +00:51:15,260 --> 00:51:20,100 +First, let's compute something called lower. + +597 +00:51:23,740 --> 00:51:28,540 +The lower limit is + +598 +00:51:28,540 --> 00:51:38,680 +not the minimum. It's Q1 minus 1.5 IQR. This is + +599 +00:51:38,680 --> 00:51:39,280 +the lower limit. + +600 +00:51:42,280 --> 00:51:47,560 +So it's 1.5 times IQR below Q1. This is the lower + +601 +00:51:47,560 --> 00:51:50,620 +limit. The upper limit, + +602 +00:51:54,680 --> 00:51:57,460 +Q3, + +603 +00:51:58,790 --> 00:52:06,890 +plus 1.5 times IQR. So we computed lower and upper + +604 +00:52:06,890 --> 00:52:13,350 +limit by using these rules. Q1 minus 1.5 IQR. So + +605 +00:52:13,350 --> 00:52:20,510 +it's 1.5 times IQR below Q1 and 1.5 times IQR + +606 +00:52:20,510 --> 00:52:25,070 +above Q1. Now, any value. + +607 +00:52:31,150 --> 00:52:38,610 +Is it smaller than the + +608 +00:52:38,610 --> 00:52:45,990 +lower limit or + +609 +00:52:45,990 --> 00:52:53,290 +greater than the + +610 +00:52:53,290 --> 00:52:54,150 +upper limit? + +611 +00:52:58,330 --> 00:53:04,600 +Any value. smaller than the lower limit and + +612 +00:53:04,600 --> 00:53:13,260 +greater than the upper limit is considered to + +613 +00:53:13,260 --> 00:53:20,720 +be an outlier. This is the rule how can you tell + +614 +00:53:20,720 --> 00:53:24,780 +if the point or data value is outlier or not. Just + +615 +00:53:24,780 --> 00:53:27,100 +compute lower limit and upper limit. + +616 +00:53:29,780 --> 00:53:35,580 +So lower limit, Q1 minus 1.5IQ3. Upper limit, Q3 + +617 +00:53:35,580 --> 00:53:38,620 +plus 1.5. This is a constant. + +618 +00:53:43,200 --> 00:53:47,040 +Now let's go back to the previous example, which + +619 +00:53:47,040 --> 00:53:53,800 +was, which Q1 was, what's the value of Q1? Q1 was + +620 +00:53:53,800 --> 00:53:57,680 +2. Q3 is 5. + +621 +00:54:00,650 --> 00:54:05,230 +In order to turn an outlier, you don't need the + +622 +00:54:05,230 --> 00:54:11,150 +value, the median. Now, Q3 is 5, Q1 is 2, so IQR + +623 +00:54:11,150 --> 00:54:21,050 +is 3. That's the value of IQR. Now, lower limit, A + +624 +00:54:21,050 --> 00:54:31,830 +times 2 minus 1.5 times IQR3. So that's minus 2.5. + +625 +00:54:33,550 --> 00:54:41,170 +U3 plus U3 is 3. It's 5, sorry. It's 5 plus 1.5. + +626 +00:54:41,650 --> 00:54:48,570 +That gives 9.5. Now, any point or any data value, + +627 +00:54:49,450 --> 00:54:55,950 +any data value falls below minus 2.5. I mean + +628 +00:54:55,950 --> 00:55:00,380 +smaller than minus 2.5. Or greater than 9.5 is an + +629 +00:55:00,380 --> 00:55:05,420 +outlier. If you look at the data you have, we have + +630 +00:55:05,420 --> 00:55:09,520 +0 up to 9. So none of these is considered to be an + +631 +00:55:09,520 --> 00:55:16,200 +outlier. But what's about 27? 27 is greater than, + +632 +00:55:16,260 --> 00:55:23,160 +much bigger than actually 9.5. So for that data, + +633 +00:55:24,020 --> 00:55:27,920 +27 is an outlier. So this is the way how can we + +634 +00:55:27,920 --> 00:55:36,120 +compute the outlier for the sample. Another + +635 +00:55:36,120 --> 00:55:39,620 +method. The score is another method to determine + +636 +00:55:39,620 --> 00:55:43,600 +if that point is an outlier or not. So, so far we + +637 +00:55:43,600 --> 00:55:48,300 +have two rules. One by using quartiles and the + +638 +00:55:48,300 --> 00:55:50,540 +other, as we mentioned last time, by using the + +639 +00:55:50,540 --> 00:55:54,200 +score. And for these scores, if you remember, any + +640 +00:55:54,200 --> 00:56:00,030 +values below lie Below minus three. And above + +641 +00:56:00,030 --> 00:56:03,430 +three is considered to be irrelevant. That's + +642 +00:56:03,430 --> 00:56:07,950 +another example. That's another way to figure out + +643 +00:56:07,950 --> 00:56:09,190 +if the data is irrelevant. + +644 +00:56:13,730 --> 00:56:17,110 +You can apply the two rules either for the sample + +645 +00:56:17,110 --> 00:56:20,190 +or the population. If you have the entire data, + +646 +00:56:20,890 --> 00:56:23,950 +you can also determine out there for the entire + +647 +00:56:23,950 --> 00:56:29,110 +dataset, even if that data is the population. But + +648 +00:56:29,110 --> 00:56:34,490 +most of the time, we select a sample, which is a + +649 +00:56:34,490 --> 00:56:37,790 +subset or a portion of that population. + +650 +00:56:40,570 --> 00:56:41,290 +Questions? + +651 +00:56:53,360 --> 00:57:00,000 +And locating outliers. So again, outlier is any + +652 +00:57:00,000 --> 00:57:05,000 +value that is above the upper limit or below the + +653 +00:57:05,000 --> 00:57:08,340 +lower limit. And also we can use this score also + +654 +00:57:08,340 --> 00:57:12,680 +to determine if that point is outlier or not. Next + +655 +00:57:12,680 --> 00:57:16,340 +time, Inshallah, we will go over the covariance + +656 +00:57:16,340 --> 00:57:19,420 +and the relationship and I will give some practice + +657 +00:57:19,420 --> 00:57:22,180 +problems for Chapter 3. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/4oMFiRBOjhY.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/4oMFiRBOjhY.srt new file mode 100644 index 0000000000000000000000000000000000000000..0efd069963f9a7999fa7705fa5b2bcbca2883299 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/4oMFiRBOjhY.srt @@ -0,0 +1,2558 @@ + +1 +00:00:17,750 --> 00:00:21,350 +So let's again go back to chapter number one. Last + +2 +00:00:21,350 --> 00:00:25,730 +time we discussed chapter one, production and data + +3 +00:00:25,730 --> 00:00:32,390 +collection. And I think we described why learning + +4 +00:00:32,390 --> 00:00:36,510 +statistics distinguish between some of these + +5 +00:00:36,510 --> 00:00:43,810 +topics. And also we explained in details types of + +6 +00:00:43,810 --> 00:00:47,010 +statistics and we mentioned that statistics mainly + +7 +00:00:47,010 --> 00:00:52,430 +has two types either descriptive statistics which + +8 +00:00:52,430 --> 00:00:56,810 +means collecting summarizing and obtaining data + +9 +00:00:56,810 --> 00:00:59,910 +and other type of statistics is called inferential + +10 +00:00:59,910 --> 00:01:04,430 +statistics or statistical inference and this type + +11 +00:01:04,430 --> 00:01:11,070 +of statistics we can draw drawing conclusions and + +12 +00:01:11,070 --> 00:01:14,090 +making decision concerning a population based only + +13 +00:01:14,090 --> 00:01:17,510 +on a sample. That means we have a sample and + +14 +00:01:17,510 --> 00:01:20,970 +sample is just a subset of the population or the + +15 +00:01:20,970 --> 00:01:26,230 +portion of the population and we use the data from + +16 +00:01:26,230 --> 00:01:29,130 +that sample to make some conclusion about the + +17 +00:01:29,130 --> 00:01:32,390 +entire population. This type of statistic is + +18 +00:01:32,390 --> 00:01:34,710 +called inferential statistics. Later, Inshallah, + +19 +00:01:34,750 --> 00:01:37,710 +we'll talk in details about inferential statistics + +20 +00:01:37,710 --> 00:01:45,290 +that will start in Chapter 7. Also, we gave some + +21 +00:01:45,290 --> 00:01:50,630 +definitions for variables, data, and we + +22 +00:01:50,630 --> 00:01:53,510 +distinguished between population and sample. And + +23 +00:01:53,510 --> 00:01:56,630 +we know that the population consists of all items + +24 +00:01:56,630 --> 00:02:00,270 +or individuals about which you want to draw a + +25 +00:02:00,270 --> 00:02:05,770 +conclusion. But in some cases, it's very hard to + +26 +00:02:05,770 --> 00:02:07,750 +talk about the population or the entire + +27 +00:02:07,750 --> 00:02:13,340 +population, so we can select a sample. A sample is + +28 +00:02:13,340 --> 00:02:18,480 +just a portion or subset of the entire population. + +29 +00:02:19,060 --> 00:02:21,860 +So we know now the definition of population and + +30 +00:02:21,860 --> 00:02:25,360 +sample. The other two types, parameter and + +31 +00:02:25,360 --> 00:02:28,860 +statistics. Parameter is a numerical measure that + +32 +00:02:28,860 --> 00:02:32,300 +describes characteristics of a population, while + +33 +00:02:32,300 --> 00:02:36,000 +on the other hand, a sample, a statistic is just + +34 +00:02:36,430 --> 00:02:39,730 +numerical measures that describe characteristic of + +35 +00:02:39,730 --> 00:02:44,930 +a sample. So parameter is computed from the + +36 +00:02:44,930 --> 00:02:48,930 +population while statistic is computed from the + +37 +00:02:48,930 --> 00:02:54,030 +sample. I think we stopped at this point. Why + +38 +00:02:54,030 --> 00:02:56,770 +collect data? I mean what are the reasons for + +39 +00:02:59,580 --> 00:03:01,980 +One of these reasons, for example, a marketing + +40 +00:03:01,980 --> 00:03:04,660 +research analyst needs to assess the effectiveness + +41 +00:03:04,660 --> 00:03:07,700 +of a new television advertisement. For example, + +42 +00:03:07,840 --> 00:03:13,380 +suppose you are a manager and you want to increase + +43 +00:03:13,380 --> 00:03:18,060 +your salaries or your sales. Now, sales may be + +44 +00:03:18,060 --> 00:03:23,380 +affected by advertising. So I mean, if you spend + +45 +00:03:23,380 --> 00:03:26,320 +more on advertising, it means your sales becomes + +46 +00:03:26,320 --> 00:03:29,740 +larger and larger. So you want to know if this + +47 +00:03:29,740 --> 00:03:34,160 +variable, I mean if advertisement is an effective + +48 +00:03:34,160 --> 00:03:38,900 +variable that maybe increase your sales. So that's + +49 +00:03:38,900 --> 00:03:43,900 +one of the reasons why we use data. The other one, + +50 +00:03:44,120 --> 00:03:46,880 +for example, pharmaceutical manufacturers needs to + +51 +00:03:46,880 --> 00:03:49,800 +determine whether a new drug is more effective + +52 +00:03:49,800 --> 00:03:53,240 +than those currently used. For example, for a + +53 +00:03:53,240 --> 00:03:59,330 +headache, we use drug A. Now, a new drug is + +54 +00:03:59,330 --> 00:04:04,510 +produced and you want to see if this new drug is + +55 +00:04:04,510 --> 00:04:10,090 +more effective than drug A that I mean if headache + +56 +00:04:10,090 --> 00:04:13,410 +suppose for example is removed after three days by + +57 +00:04:13,410 --> 00:04:20,490 +using drug A now the question is does B is more + +58 +00:04:20,490 --> 00:04:23,410 +effective it means it reduces your headache in + +59 +00:04:23,410 --> 00:04:26,070 +fewer than three days I mean maybe in two days + +60 +00:04:26,510 --> 00:04:30,810 +That means a drug B is more effective than a drug + +61 +00:04:30,810 --> 00:04:34,510 +A. So we want to know the difference between these + +62 +00:04:34,510 --> 00:04:37,250 +two drugs. I mean, we have two samples. Some + +63 +00:04:37,250 --> 00:04:40,810 +people used drug A and the other used drug B. And + +64 +00:04:40,810 --> 00:04:43,190 +we want to see if there is a significant + +65 +00:04:43,190 --> 00:04:47,690 +difference between the times that is used to + +66 +00:04:47,690 --> 00:04:51,150 +reduce the headache. So that's one of the reasons + +67 +00:04:51,150 --> 00:04:55,260 +why we use statistics. Sometimes an operation + +68 +00:04:55,260 --> 00:04:58,500 +manager wants to monitor manufacturing process to + +69 +00:04:58,500 --> 00:05:00,720 +find out whether the quality of a product being + +70 +00:05:00,720 --> 00:05:02,840 +manufactured is conforming to a company's + +71 +00:05:02,840 --> 00:05:05,700 +standards. Do you know what the meaning of + +72 +00:05:05,700 --> 00:05:06,520 +company's standards? + +73 +00:05:09,900 --> 00:05:15,320 +The regulations of the firm itself. Another + +74 +00:05:15,320 --> 00:05:21,020 +example, suppose here in the school last year, we + +75 +00:05:21,020 --> 00:05:25,150 +teach statistics by using method A. traditional + +76 +00:05:25,150 --> 00:05:29,350 +method. This year we developed a new method for + +77 +00:05:29,350 --> 00:05:32,370 +teaching and our goal is to see if the new method + +78 +00:05:32,370 --> 00:05:36,510 +is better than method A which was used in last + +79 +00:05:36,510 --> 00:05:38,910 +year. So we want to see if there is a big + +80 +00:05:38,910 --> 00:05:42,410 +difference between scores or the average scores + +81 +00:05:42,410 --> 00:05:47,310 +last year and this year. The same you can do for + +82 +00:05:47,310 --> 00:05:52,350 +your weight. Suppose there are 20 students in this + +83 +00:05:52,350 --> 00:05:56,960 +class and their weights are high. And our goal is + +84 +00:05:56,960 --> 00:06:04,280 +to reduce their weights. Suppose they + +85 +00:06:04,280 --> 00:06:09,640 +have a regime or diet for three months or + +86 +00:06:09,640 --> 00:06:12,140 +exercise, whatever it is, then after three months, + +87 +00:06:12,220 --> 00:06:17,060 +we have new weights for these persons. And we want + +88 +00:06:17,060 --> 00:06:19,840 +to see if the diet is effective. I mean, if the + +89 +00:06:19,840 --> 00:06:24,120 +average weight was greater than or smaller than + +90 +00:06:24,120 --> 00:06:28,600 +before diet. Is it clear? So there are many, many + +91 +00:06:28,600 --> 00:06:31,920 +reasons behind using statistics and collecting + +92 +00:06:31,920 --> 00:06:37,500 +data. Now, what are the sources of data? Since + +93 +00:06:37,500 --> 00:06:41,840 +statistics mainly, first step, we have to collect + +94 +00:06:41,840 --> 00:06:44,120 +data. Now, what are the sources of the data? + +95 +00:06:45,360 --> 00:06:48,420 +Generally speaking, there are two sources. One is + +96 +00:06:48,420 --> 00:06:52,430 +called The primary sources and the others + +97 +00:06:52,430 --> 00:06:55,770 +secondary sources. What do you think is the + +98 +00:06:55,770 --> 00:06:57,830 +difference between these two? I mean, what's the + +99 +00:06:57,830 --> 00:07:02,730 +difference between primary and secondary sources? + +100 +00:07:03,510 --> 00:07:07,250 +The primary source is the collector of the data. + +101 +00:07:07,670 --> 00:07:11,550 +He is the analyzer. He analyzes it. And then the + +102 +00:07:11,550 --> 00:07:14,230 +secondary, who collects the data, isn't there. + +103 +00:07:16,030 --> 00:07:18,910 +That's correct. So the primary sources means the + +104 +00:07:18,910 --> 00:07:22,490 +researcher by himself. He should collect the data, + +105 +00:07:23,890 --> 00:07:27,750 +then he can use the data to do his analysis. + +106 +00:07:28,310 --> 00:07:31,550 +That's for the primary. Now, the primary could be + +107 +00:07:31,550 --> 00:07:35,230 +data from political survey. You can distribute + +108 +00:07:35,230 --> 00:07:38,750 +questionnaire, for example, data collected from an + +109 +00:07:38,750 --> 00:07:42,530 +experiment. I mean maybe control or experimental + +110 +00:07:42,530 --> 00:07:45,730 +groups. We have two groups, maybe healthy people + +111 +00:07:45,730 --> 00:07:48,490 +and patient people. So that's experimental group. + +112 +00:07:49,010 --> 00:07:53,390 +Or observed data. That's the primary sources. + +113 +00:07:53,870 --> 00:07:56,450 +Secondary sources, the person performing data + +114 +00:07:56,450 --> 00:08:00,310 +analysis is not the data collector. So he obtained + +115 +00:08:00,310 --> 00:08:03,880 +the data from other sources. For example, it could + +116 +00:08:03,880 --> 00:08:07,140 +be analyzing census data or for example, examining + +117 +00:08:07,140 --> 00:08:10,160 +data from print journals or data published on the + +118 +00:08:10,160 --> 00:08:14,780 +internet. So maybe he goes to the Ministry of + +119 +00:08:14,780 --> 00:08:18,820 +Education and he can get some data. So the data is + +120 +00:08:18,820 --> 00:08:22,520 +already there and he just used the data to do some + +121 +00:08:22,520 --> 00:08:25,540 +analysis. So that's the difference between a + +122 +00:08:25,540 --> 00:08:29,420 +primary and secondary sources. So primary, the + +123 +00:08:29,420 --> 00:08:33,650 +researcher himself, should collect the data by + +124 +00:08:33,650 --> 00:08:35,590 +using one of the tools, either survey, + +125 +00:08:36,110 --> 00:08:39,050 +questionnaire, experiment, and so on. But + +126 +00:08:39,050 --> 00:08:41,450 +secondary, you can use the data that is published + +127 +00:08:41,450 --> 00:08:44,510 +in the internet, for example, in the books, in + +128 +00:08:44,510 --> 00:08:48,250 +governments and NGOs and so on. So these are the + +129 +00:08:48,250 --> 00:08:53,590 +two sources of data. Sources of data fall into + +130 +00:08:53,590 --> 00:08:57,610 +four categories. Number one, data distributed by + +131 +00:08:57,610 --> 00:09:01,190 +an organization or an individual. So that's + +132 +00:09:01,190 --> 00:09:06,170 +secondary source. A design experiment is primary + +133 +00:09:06,170 --> 00:09:10,350 +because you have to design the experiment, a + +134 +00:09:10,350 --> 00:09:14,610 +survey. It's also primary. An observational study + +135 +00:09:14,610 --> 00:09:17,590 +is also a primary source. So you have to + +136 +00:09:17,590 --> 00:09:21,410 +distinguish between a primary and secondary + +137 +00:09:21,410 --> 00:09:28,810 +sources. Any question? Comments? Next. + +138 +00:09:31,460 --> 00:09:34,540 +We'll talk a little bit about types of variables. + +139 +00:09:35,360 --> 00:09:37,580 +In general, there are two types of variables. One + +140 +00:09:37,580 --> 00:09:40,760 +is called categorical variables or qualitative + +141 +00:09:40,760 --> 00:09:43,160 +variables, and the other one is called numerical + +142 +00:09:43,160 --> 00:09:46,520 +or quantitative variables. Now, for example, if I + +143 +00:09:46,520 --> 00:09:50,560 +ask you, what's + +144 +00:09:50,560 --> 00:09:55,100 +your favorite color? You may say white, black, + +145 +00:09:55,220 --> 00:09:59,390 +red, and so on. What's your marital status? Maybe + +146 +00:09:59,390 --> 00:10:02,670 +married or unmarried, and so on. Gender, male, + +147 +00:10:02,850 --> 00:10:07,050 +either male or female, and so on. So this type of + +148 +00:10:07,050 --> 00:10:10,090 +variable is called qualitative variables. So + +149 +00:10:10,090 --> 00:10:13,130 +qualitative variables have values that can only be + +150 +00:10:13,130 --> 00:10:16,370 +placed into categories, such as, for example, yes + +151 +00:10:16,370 --> 00:10:21,350 +or no. For example, do you like orange? + +152 +00:10:22,270 --> 00:10:26,200 +The answer is either yes or no. Do you like + +153 +00:10:26,200 --> 00:10:30,040 +candidate A, for example, whatever his party is? + +154 +00:10:30,260 --> 00:10:34,620 +Do you like it? Either yes or no, and so on. As I + +155 +00:10:34,620 --> 00:10:37,480 +mentioned before, gender, marital status, race, + +156 +00:10:37,640 --> 00:10:41,820 +religions, these are examples of qualitative or + +157 +00:10:41,820 --> 00:10:46,240 +categorical variables. The other type of variable + +158 +00:10:46,240 --> 00:10:49,480 +which is commonly used is called numerical or + +159 +00:10:49,480 --> 00:10:53,230 +quantitative data. Quantitative variables have + +160 +00:10:53,230 --> 00:10:56,350 +values that represent quantities. For example, if + +161 +00:10:56,350 --> 00:11:00,470 +I ask you, what's your age? My age is 20 years old + +162 +00:11:00,470 --> 00:11:04,770 +or 18 years old. What's your weight? Income. + +163 +00:11:07,510 --> 00:11:12,550 +Height? Temperature? Income. So it's a number. + +164 +00:11:13,610 --> 00:11:18,030 +Weight, maybe my weight is 70 kilograms. So + +165 +00:11:18,030 --> 00:11:22,450 +weight, age, height, salary, income, number of + +166 +00:11:22,450 --> 00:11:26,510 +students, number of phone calls you received on + +167 +00:11:26,510 --> 00:11:28,770 +your cell phone during one hour, number of + +168 +00:11:28,770 --> 00:11:33,210 +accidents happened in street and so on. So that's + +169 +00:11:33,210 --> 00:11:36,470 +the difference between numerical variables and + +170 +00:11:36,470 --> 00:11:37,710 +qualitative variables. + +171 +00:11:40,270 --> 00:11:42,650 +Anyone of you just give me one example of + +172 +00:11:42,650 --> 00:11:47,780 +qualitative and quantitative variables. Another + +173 +00:11:47,780 --> 00:11:51,700 +examples. Just give me one example for qualitative + +174 +00:11:51,700 --> 00:11:52,160 +data. + +175 +00:11:56,900 --> 00:11:58,780 +Qualitative or quantitative. + +176 +00:12:01,380 --> 00:12:04,880 +Political party, either party A or party B. So + +177 +00:12:04,880 --> 00:12:08,320 +suppose there are two parties, so I like party A, + +178 +00:12:08,720 --> 00:12:12,320 +she likes party B and so on. So party in this case + +179 +00:12:12,320 --> 00:12:15,060 +is qualitative variable, another one. + +180 +00:12:25,400 --> 00:12:28,820 +So types of courses, maybe business, economics, + +181 +00:12:29,480 --> 00:12:33,260 +administration, and so on. So types of courses. + +182 +00:12:34,260 --> 00:12:36,600 +Another example for quantitative variable or + +183 +00:12:36,600 --> 00:12:37,600 +numerical variables. + +184 +00:12:45,020 --> 00:12:51,440 +So production is a numerical variable. Another + +185 +00:12:51,440 --> 00:12:52,840 +example for quantitative. + +186 +00:12:56,350 --> 00:12:58,970 +Is that produced by this company? Number of cell + +187 +00:12:58,970 --> 00:13:03,950 +phones, maybe 20, 17, and so on. Any question? + +188 +00:13:06,190 --> 00:13:12,410 +Next. So generally speaking, types of data, data + +189 +00:13:12,410 --> 00:13:17,960 +has two types, categorical and numerical data. As + +190 +00:13:17,960 --> 00:13:21,240 +we mentioned, marital status, political party, eye + +191 +00:13:21,240 --> 00:13:25,120 +color, and so on. These are examples of + +192 +00:13:25,120 --> 00:13:28,180 +categorical variables. On the other hand, a + +193 +00:13:28,180 --> 00:13:30,720 +numerical variable can be split or divided into + +194 +00:13:30,720 --> 00:13:34,020 +two parts. One is called discrete and the other is + +195 +00:13:34,020 --> 00:13:35,680 +continuous, and we have to distinguish between + +196 +00:13:35,680 --> 00:13:40,240 +these two. For example, Number of students in this + +197 +00:13:40,240 --> 00:13:43,400 +class, you can say there are 60 or 50 students in + +198 +00:13:43,400 --> 00:13:47,260 +this class. You cannot say there are 50.5 + +199 +00:13:47,260 --> 00:13:52,160 +students. + +223 +00:15:31,560 --> 00:15:36,240 +distribution. That will be later, inshallah. As we + +224 +00:15:36,240 --> 00:15:39,000 +mentioned last time, at the end of each chapter, + +225 +00:15:39,280 --> 00:15:41,640 +there is a section or sections, sometimes there + +226 +00:15:41,640 --> 00:15:45,000 +are two sections, talks about computer programs. + +227 +00:15:45,120 --> 00:15:49,200 +How can we use computer programs in order to + +228 +00:15:49,200 --> 00:15:52,300 +analyze the data? And as we mentioned last time, + +229 +00:15:52,420 --> 00:15:54,600 +you should take a course on that. It's called + +230 +00:15:54,600 --> 00:15:57,960 +Computer and Data Analysis or SPSS course. So we + +231 +00:15:57,960 --> 00:16:03,860 +are going to skip the computer programs used for + +232 +00:16:03,860 --> 00:16:08,720 +any chapters in this book. And that's the end of + +233 +00:16:08,720 --> 00:16:13,060 +chapter number three. Any questions? + +234 +00:16:18,380 --> 00:16:22,550 +Let's move quickly on chapter three. + +235 +00:16:31,290 --> 00:16:35,530 +Chapter three maybe is the easiest chapter in this + +236 +00:16:35,530 --> 00:16:39,950 +book. It's straightforward. We have some formulas + +237 +00:16:39,950 --> 00:16:45,280 +to compute some statistical measures. And we + +238 +00:16:45,280 --> 00:16:47,400 +should know how can we calculate these measures + +239 +00:16:47,400 --> 00:16:52,620 +and what are the meaning of your results. So + +240 +00:16:52,620 --> 00:16:56,140 +chapter three talks about numerical descriptive + +241 +00:16:56,140 --> 00:17:03,220 +measures. In this chapter, you will learn, number + +242 +00:17:03,220 --> 00:17:06,480 +one, describe the probabilities of central + +243 +00:17:06,480 --> 00:17:11,880 +tendency, variation, and shape in numerical data. + +244 +00:17:12,730 --> 00:17:16,250 +In this lecture, we'll talk in more details about + +245 +00:17:16,250 --> 00:17:21,370 +some of the center tendency measures. Later, we'll + +246 +00:17:21,370 --> 00:17:26,490 +talk about the variation, or spread, or + +247 +00:17:26,490 --> 00:17:29,690 +dispersion, and the shape in numerical data. So + +248 +00:17:29,690 --> 00:17:31,630 +that's part number one. We have to know something + +249 +00:17:31,630 --> 00:17:36,390 +about center tendency, variation, and the shape of + +250 +00:17:36,390 --> 00:17:42,020 +the data we have. to calculate descriptive summary + +251 +00:17:42,020 --> 00:17:45,360 +measures for a population. So we have to calculate + +252 +00:17:45,360 --> 00:17:48,460 +these measures for the sample. And if we have the + +253 +00:17:48,460 --> 00:17:51,420 +entire population, we can compute these measures + +254 +00:17:51,420 --> 00:17:58,440 +also for that population. Then I will introduce in + +255 +00:17:58,440 --> 00:18:01,920 +more details about something called Paxiplot. How + +256 +00:18:01,920 --> 00:18:06,660 +can we construct and interpret a Paxiplot? That's, + +257 +00:18:06,960 --> 00:18:11,040 +inshallah, next time on Tuesday. Finally, we'll + +258 +00:18:11,040 --> 00:18:13,020 +see how can we calculate the covariance and + +259 +00:18:13,020 --> 00:18:15,700 +coefficient of variation and coefficient, I'm + +260 +00:18:15,700 --> 00:18:18,480 +sorry, coefficient of correlation. This topic + +261 +00:18:18,480 --> 00:18:24,280 +we'll introduce in more details in chapter 11 + +262 +00:18:24,280 --> 00:18:31,240 +later on. So just I will give some brief + +263 +00:18:31,240 --> 00:18:35,630 +notation about coefficient of correlation, how can + +264 +00:18:35,630 --> 00:18:39,030 +we compute the correlation coefficient? What's the + +265 +00:18:39,030 --> 00:18:41,870 +meaning of your result? And later in chapter 11, + +266 +00:18:42,030 --> 00:18:44,510 +we'll talk in more details about correlation and + +267 +00:18:44,510 --> 00:18:48,930 +regression. So these are the objectives of this + +268 +00:18:48,930 --> 00:18:52,870 +chapter. There are some basic definitions before + +269 +00:18:52,870 --> 00:18:57,410 +we start. One is called central tendency. What do + +270 +00:18:57,410 --> 00:19:00,750 +you mean by central tendency? Central tendency is + +271 +00:19:00,750 --> 00:19:04,990 +the extent to which all data value group around a + +272 +00:19:04,990 --> 00:19:08,890 +typical or numerical or central value. So we are + +273 +00:19:08,890 --> 00:19:12,510 +looking for a point that in the center, I mean, + +274 +00:19:12,810 --> 00:19:18,870 +the data points are gathered or collected around a + +275 +00:19:18,870 --> 00:19:21,670 +middle point, and that middle point is called the + +276 +00:19:21,670 --> 00:19:24,450 +central tendency. And the question is, how can we + +277 +00:19:24,450 --> 00:19:27,780 +measure that value? We'll talk in details about + +278 +00:19:27,780 --> 00:19:32,620 +mean, median, and mode in a few minutes. So the + +279 +00:19:32,620 --> 00:19:35,660 +central tendency, in this case, the data values + +280 +00:19:35,660 --> 00:19:40,080 +grouped around a typical or central value. Is it + +281 +00:19:40,080 --> 00:19:44,380 +clear? So we have data set, large data set. Then + +282 +00:19:44,380 --> 00:19:47,860 +these points are gathered or grouped around a + +283 +00:19:47,860 --> 00:19:51,440 +middle point, and this point is called central + +284 +00:19:51,440 --> 00:19:56,120 +tendency, and it can be measured by using mean, + +285 +00:19:56,420 --> 00:19:59,960 +which is the most common one, median and the mode. + +286 +00:20:01,020 --> 00:20:04,480 +Next is the variation, which is the amount of + +287 +00:20:04,480 --> 00:20:09,420 +dispersion. Variation is the amount of dispersion + +288 +00:20:09,420 --> 00:20:13,900 +or scattering of values. And we'll use, for + +289 +00:20:13,900 --> 00:20:18,400 +example, range, variance or standard deviation in + +290 +00:20:18,400 --> 00:20:22,960 +order to compute the variation. Finally, We have + +291 +00:20:22,960 --> 00:20:26,300 +data, and my question is, what's the shape of the + +292 +00:20:26,300 --> 00:20:29,920 +data? So the shape is the pattern of distribution + +293 +00:20:29,920 --> 00:20:35,220 +of values from the lowest value to the highest. So + +294 +00:20:35,220 --> 00:20:39,400 +that's the three definitions we need to know + +295 +00:20:39,400 --> 00:20:44,580 +before we start. So we'll start with the easiest + +296 +00:20:44,580 --> 00:20:48,680 +one, measures of central tendency. As I mentioned, + +297 +00:20:49,160 --> 00:20:55,110 +there are three measures: mean, median, and mode. And our + +298 +00:20:55,110 --> 00:20:58,270 +goal or we have two goals actually. We have to + +299 +00:20:58,270 --> 00:21:02,290 +know how to compute these measures. Number two, + +300 +00:21:03,270 --> 00:21:06,390 +which one is better? The mean or the median or the + +301 +00:21:06,390 --> 00:21:06,550 +mode? + +302 +00:21:11,310 --> 00:21:14,770 +So the mean sometimes called the arithmetic mean. + +303 +00:21:15,680 --> 00:21:20,020 +Or in general, just say the mean. So often we use + +304 +00:21:20,020 --> 00:21:26,860 +the mean. And the mean is just sum + +305 +00:21:26,860 --> 00:21:33,220 +of the values divided by the sample size. So it's + +306 +00:21:33,220 --> 00:21:36,800 +straightforward. We have, for example, three data + +307 +00:21:36,800 --> 00:21:42,180 +points. And your goal is to find the average or + +308 +00:21:42,180 --> 00:21:45,890 +the mean of these points. They mean it's just some + +309 +00:21:45,890 --> 00:21:50,230 +of these values divided by the sample size. So for + +310 +00:21:50,230 --> 00:21:54,570 +example, if we have a data X1, X2, X3 up to Xn. So + +311 +00:21:54,570 --> 00:21:59,650 +the average is denoted by X bar. This one is + +312 +00:21:59,650 --> 00:22:04,530 +pronounced as X bar and X bar is just sum of Xi. + +313 +00:22:05,010 --> 00:22:08,250 +It is summation, you know this symbol, summation + +314 +00:22:08,250 --> 00:22:11,350 +of sigma, summation of Xi and I goes from one to + +315 +00:22:11,350 --> 00:22:14,490 +N. divided by N which is the total number of + +316 +00:22:14,490 --> 00:22:19,710 +observations or the sample size. So it means X1 + +317 +00:22:19,710 --> 00:22:23,290 +plus X2 all the way up to XN divided by N gives + +318 +00:22:23,290 --> 00:22:28,530 +the mean or the arithmetic mean. So X bar is the + +319 +00:22:28,530 --> 00:22:32,690 +average which is the sum of values divided by the + +320 +00:22:32,690 --> 00:22:36,270 +number of observations. So that's the first + +321 +00:22:36,270 --> 00:22:38,830 +definition. For example, + +322 +00:22:42,180 --> 00:22:46,920 +So again, the mean is the most common measure of + +323 +00:22:46,920 --> 00:22:51,780 +center tendency. Number two, the definition of the + +324 +00:22:51,780 --> 00:22:55,440 +mean. Sum of values divided by the number of + +325 +00:22:55,440 --> 00:23:02,960 +values. That means the mean takes all the values, + +326 +00:23:04,140 --> 00:23:09,740 +then divided by N. it makes sense that the mean is + +327 +00:23:09,740 --> 00:23:13,380 +affected by extreme values or outliers. I mean, if + +328 +00:23:13,380 --> 00:23:17,840 +the data has outliers or extreme values, I mean by + +329 +00:23:17,840 --> 00:23:21,400 +extreme values, large or very, very large values + +330 +00:23:21,400 --> 00:23:24,980 +and small, small values. Large values or small + +331 +00:23:24,980 --> 00:23:31,100 +values are extreme values. Since the mean takes + +332 +00:23:31,100 --> 00:23:33,420 +all these values and sums all together, doesn't + +333 +00:23:33,420 --> 00:23:38,550 +divide by n, that means The mean is affected by + +334 +00:23:38,550 --> 00:23:41,350 +outliers or by extreme values. For example, + +335 +00:23:42,030 --> 00:23:45,110 +imagine we have simple data as 1, 2, 3, 4, and 5. + +336 +00:23:46,110 --> 00:23:49,830 +Simple example. Now, what's the mean? The mean is + +337 +00:23:49,830 --> 00:23:53,570 +just add these values, then divide by the total + +338 +00:23:53,570 --> 00:23:56,910 +number of observations. In this case, the sum of + +339 +00:23:56,910 --> 00:24:01,710 +these is 15. N is five because there are five + +340 +00:24:01,710 --> 00:24:05,920 +observations. So X bar is 15 divided by 5, which + +341 +00:24:05,920 --> 00:24:10,240 +is 3. So straightforward. Now imagine instead of + +342 +00:24:10,240 --> 00:24:16,480 +5, this number 5, we have a 10. Now 10, there is a + +343 +00:24:16,480 --> 00:24:21,400 +gap between 4, which is the second largest, and + +344 +00:24:21,400 --> 00:24:25,600 +the maximum, which is 10. Now if we add these + +345 +00:24:25,600 --> 00:24:30,540 +values, 1, 2, 3, 4, and 10, then divide by 5, the + +346 +00:24:30,540 --> 00:24:36,680 +mean will be 4. If you see here, we just added one + +347 +00:24:36,680 --> 00:24:41,060 +value, or I mean, we replaced five by 10, and the + +348 +00:24:41,060 --> 00:24:44,700 +mean changed dramatically from three to four. + +349 +00:24:45,520 --> 00:24:48,860 +There is big change between three and four, around + +350 +00:24:48,860 --> 00:24:55,560 +25% more. So that means outliers or extreme values + +351 +00:24:55,560 --> 00:25:01,200 +affected the mean. So take this information in + +352 +00:25:01,200 --> 00:25:03,560 +your mind because later we'll talk a little bit + +353 +00:25:03,560 --> 00:25:07,360 +about another one. So the mean is affected by + +354 +00:25:07,360 --> 00:25:13,100 +extreme values. Imagine another example. Suppose + +355 +00:25:13,100 --> 00:25:20,060 +we have data from 1 to 9. 1, 2, 3, 4, 6, 7, 8, 9. + +356 +00:25:21,040 --> 00:25:26,690 +Now the mean of these values, some divide by n. If + +357 +00:25:26,690 --> 00:25:31,970 +you sum 1 through 9, summation is 45. Divide by 9, + +358 +00:25:32,510 --> 00:25:36,230 +which is 5. So the sum of these values divided by + +359 +00:25:36,230 --> 00:25:41,590 +N gives the average, so the average is 5. Now + +360 +00:25:41,590 --> 00:25:46,670 +suppose we add 100 to the end of this data. So the + +361 +00:25:46,670 --> 00:25:53,670 +sum will be 145 divided by 10, that's 14.5. Now + +362 +00:25:53,670 --> 00:25:58,850 +the mean was 5. Then after we added 100, it + +363 +00:25:58,850 --> 00:26:05,470 +becomes 14.5. Imagine the mean was 5, it changed + +364 +00:26:05,470 --> 00:26:11,650 +to 14.5. It means around three times. So that + +365 +00:26:11,650 --> 00:26:17,510 +means outliers affect the mean much more than the + +366 +00:26:17,510 --> 00:26:19,890 +other one. We'll talk a little later about it, + +367 +00:26:19,990 --> 00:26:23,950 +which is the median. So keep in mind outliers + +368 +00:26:25,290 --> 00:26:34,790 +affected the mean in this case. Any question? Is + +369 +00:26:34,790 --> 00:26:41,590 +it clear? Yes. So, one more time. The mean is + +370 +00:26:41,590 --> 00:26:46,990 +affected by extreme values. So that's for the + +371 +00:26:46,990 --> 00:26:50,910 +mean. The other measure of center tendency is + +372 +00:26:50,910 --> 00:26:57,600 +called the median. Now, what's the median? What's + +373 +00:26:57,600 --> 00:27:00,760 +the definition of the median from your previous + +374 +00:27:00,760 --> 00:27:05,880 +studies? What's the median? I mean, what's the + +375 +00:27:05,880 --> 00:27:09,360 +definition of the median? Now the middle value, + +376 +00:27:09,760 --> 00:27:12,980 +that's correct, but after we arrange the data from + +377 +00:27:12,980 --> 00:27:17,040 +smallest to largest or largest to smallest, so we + +378 +00:27:17,040 --> 00:27:20,160 +should arrange the data, then we can figure out + +379 +00:27:20,160 --> 00:27:24,280 +the median. So the median is the middle point, but + +380 +00:27:24,280 --> 00:27:27,060 +after we arrange the data from smallest to largest + +381 +00:27:27,060 --> 00:27:30,030 +or vice versa. So that's the definition of the + +382 +00:27:30,030 --> 00:27:33,930 +median. So in an ordered array, so we have to have + +383 +00:27:33,930 --> 00:27:39,230 +an order array, the median is the middle number. The + +384 +00:27:39,230 --> 00:27:42,810 +middle number means 50 percent of the data below + +385 +00:27:42,810 --> 00:27:50,370 +and 50 percent above the median because it's + +386 +00:27:50,370 --> 00:27:52,190 +called the median, the value in the middle after + +387 +00:27:52,190 --> 00:27:55,990 +you arrange the data from smallest to largest. + +388 +00:28:00,130 --> 00:28:02,770 +Suppose I again go back to the previous example + +389 +00:28:02,770 --> 00:28:09,690 +when we have data 1, 2, 3, 4, and 5. Now for this + +390 +00:28:09,690 --> 00:28:14,210 +specific example as we did before, now the data is + +391 +00:28:14,210 --> 00:28:18,670 +already ordered. The value in the middle is 3 + +392 +00:28:18,670 --> 00:28:22,330 +because there are two values. + +393 +00:28:24,860 --> 00:28:27,300 +And also there are the same number of observations + +394 +00:28:27,300 --> 00:28:33,140 +above it. So 3 is the median. Now again imagine we + +395 +00:28:33,140 --> 00:28:37,320 +replace 5, which is the maximum value, by another + +396 +00:28:37,320 --> 00:28:42,140 +one which is extreme one, for example 10. In this + +397 +00:28:42,140 --> 00:28:47,600 +case, the median is still 3. Because the median is + +398 +00:28:47,600 --> 00:28:49,380 +just the value of the middle after you arrange the + +399 +00:28:49,380 --> 00:28:53,900 +data. So it doesn't matter what is the highest or + +400 +00:28:53,900 --> 00:28:58,860 +the maximum value is, the median in this case is + +401 +00:28:58,860 --> 00:29:03,700 +three. It doesn't change. That means the median is + +402 +00:29:03,700 --> 00:29:08,020 +not affected by extreme values. Or to be more + +403 +00:29:08,020 --> 00:29:12,910 +precise, we can say that The median is affected by + +404 +00:29:12,910 --> 00:29:18,990 +outliers, but not the same as the mean. So affect + +405 +00:29:18,990 --> 00:29:23,610 +the mean much more than the median. I mean, you + +406 +00:29:23,610 --> 00:29:26,550 +cannot say for this example, yes, the median is + +407 +00:29:26,550 --> 00:29:29,310 +not affected because the median was three, it + +408 +00:29:29,310 --> 00:29:33,590 +becomes three. But in another examples, there is + +409 +00:29:33,590 --> 00:29:36,750 +small difference between all. + +410 +00:29:40,770 --> 00:29:44,850 +Extreme values affected the mean much more than + +411 +00:29:44,850 --> 00:29:51,450 +the median. If the dataset has + +445 +00:32:11,200 --> 00:32:15,820 +need a rule that locates the median. The location + +446 +00:32:15,820 --> 00:32:18,020 +of the median when the values are in numerical + +447 +00:32:18,020 --> 00:32:23,580 +order from smallest to largest is N plus one + +448 +00:32:23,580 --> 00:32:26,140 +divided by two. That's the position of the median. + +449 +00:32:26,640 --> 00:32:28,860 +If we go back a little bit to the previous + +450 +00:32:28,860 --> 00:32:34,980 +example, here N was five. So the location was + +451 +00:32:34,980 --> 00:32:40,000 +number three, because n plus one divided by two, + +452 +00:32:40,120 --> 00:32:43,120 +five plus one divided by two is three. So location + +453 +00:32:43,120 --> 00:32:47,340 +number three is the median. Location number one is + +454 +00:32:47,340 --> 00:32:50,840 +one, in this case, then two, then three. So + +455 +00:32:50,840 --> 00:32:53,740 +location number three is three. But maybe this + +456 +00:32:53,740 --> 00:32:57,280 +number is not three, and other value maybe 3.1 or + +457 +00:32:57,280 --> 00:33:02,440 +3.2. But the location is number three. Is it + +458 +00:33:02,440 --> 00:33:08,470 +clear? So that's the location. If it is odd, you + +459 +00:33:08,470 --> 00:33:13,270 +mean by odd number, five, seven and so on. So if + +460 +00:33:13,270 --> 00:33:17,090 +the number of values is odd, the median is the + +461 +00:33:17,090 --> 00:33:21,210 +middle number. Now let's imagine if we have even + +462 +00:33:21,210 --> 00:33:24,570 +number of observations. For example, we have one, + +463 +00:33:24,610 --> 00:33:28,270 +two, three, four, five and six. So imagine numbers + +464 +00:33:28,270 --> 00:33:32,390 +from one up to six. What's the median? Now three + +465 +00:33:32,390 --> 00:33:35,610 +is not the median because there are two + +466 +00:33:35,610 --> 00:33:43,390 +observations below three. And three above it. And + +467 +00:33:43,390 --> 00:33:46,210 +four is not the median because three observations + +468 +00:33:46,210 --> 00:33:53,290 +below, two above. So three and four is the middle + +469 +00:33:53,290 --> 00:33:56,870 +value. So just take the average of two middle + +470 +00:33:56,870 --> 00:34:01,570 +points, And that will be the median. So if n is + +471 +00:34:01,570 --> 00:34:07,990 +even, you have to locate two middle points. For + +472 +00:34:07,990 --> 00:34:10,310 +example, n over 2, in this case, we have six + +473 +00:34:10,310 --> 00:34:13,910 +observations. So divide by 2, not n plus 1 divided + +474 +00:34:13,910 --> 00:34:17,970 +by 2, just n over 2. So n over 2 is 3. So place + +475 +00:34:17,970 --> 00:34:22,930 +number 3, and the next one, place number 4, these + +476 +00:34:22,930 --> 00:34:25,930 +are the two middle points. Take the average of + +477 +00:34:25,930 --> 00:34:32,300 +these values, then that's your median. So if N is + +478 +00:34:32,300 --> 00:34:37,080 +even, you have to be careful. You have to find two + +479 +00:34:37,080 --> 00:34:40,860 +middle points and just take the average of these + +480 +00:34:40,860 --> 00:34:45,100 +two. So if N is even, the median is the average of + +481 +00:34:45,100 --> 00:34:49,200 +the two middle numbers. Keep in mind, when we are + +482 +00:34:49,200 --> 00:34:54,600 +saying N plus 2, N plus 2 is just the position of + +483 +00:34:54,600 --> 00:34:58,670 +the median, not the value, location. Not the + +484 +00:34:58,670 --> 00:35:07,770 +value. Is it clear? Any question? So location is + +485 +00:35:07,770 --> 00:35:10,150 +not the value. Location is just the place or the + +486 +00:35:10,150 --> 00:35:13,450 +position of the medium. If N is odd, the position + +487 +00:35:13,450 --> 00:35:17,710 +is N plus one divided by two. If N is even, the + +488 +00:35:17,710 --> 00:35:20,870 +positions of the two middle points are N over two + +489 +00:35:20,870 --> 00:35:23,090 +and the next term or the next point. + +490 +00:35:28,390 --> 00:35:32,510 +Last measure of center tendency is called the + +491 +00:35:32,510 --> 00:35:32,750 +mode. + +492 +00:35:35,890 --> 00:35:39,010 +The definition of the mode, the mode is the most + +493 +00:35:39,010 --> 00:35:44,250 +frequent value. So sometimes the mode exists, + +494 +00:35:45,230 --> 00:35:48,570 +sometimes the mode does not exist. Or sometimes + +495 +00:35:48,570 --> 00:35:53,730 +there is only one mode, in other cases maybe there + +496 +00:35:53,730 --> 00:35:58,730 +are several modes. So a value that occurs most + +497 +00:35:58,730 --> 00:36:03,010 +often is called the mode. The mode is not affected + +498 +00:36:03,010 --> 00:36:07,610 +by extreme values. It can be used for either + +499 +00:36:07,610 --> 00:36:11,190 +numerical or categorical data. And that's the + +500 +00:36:11,190 --> 00:36:13,910 +difference between mean and median and the mode. + +501 +00:36:14,590 --> 00:36:16,930 +Mean and median is used just for numerical data. + +502 +00:36:17,430 --> 00:36:21,270 +Here, the mode can be used for both, categorical + +503 +00:36:21,270 --> 00:36:25,610 +and numerical data. Sometimes, as I mentioned, + +504 +00:36:25,930 --> 00:36:29,570 +there may be no mode or the mode does not exist. + +505 +00:36:30,130 --> 00:36:34,190 +In other cases, there may be several events. So + +506 +00:36:34,190 --> 00:36:36,870 +the mode is the value that has the most frequent. + +507 +00:36:37,490 --> 00:36:43,650 +For example, if you look at this data, one is + +508 +00:36:43,650 --> 00:36:48,370 +repeated once, three is the same one time, five is + +509 +00:36:48,370 --> 00:36:52,290 +repeated twice. seven is one nine is repeated + +510 +00:36:52,290 --> 00:36:57,330 +three times and so on so in this case nine is the + +511 +00:36:57,330 --> 00:37:00,290 +mode because the mode again is the most frequent + +512 +00:37:00,290 --> 00:37:05,030 +value on + +513 +00:37:05,030 --> 00:37:08,550 +the right side there are some values zero one two + +514 +00:37:08,550 --> 00:37:12,830 +three up to six now each one is repeated once so + +515 +00:37:12,830 --> 00:37:15,350 +in this case the mode does not exist I mean there + +516 +00:37:15,350 --> 00:37:22,310 +is no mode So generally speaking, the mode is the + +517 +00:37:22,310 --> 00:37:26,310 +value that you care most often. It can be used for + +518 +00:37:26,310 --> 00:37:29,790 +numerical or categorical data, not affected by + +519 +00:37:29,790 --> 00:37:32,970 +extreme values or outliers. Sometimes there is + +520 +00:37:32,970 --> 00:37:36,150 +only one mode as this example. Sometimes the mode + +521 +00:37:36,150 --> 00:37:40,390 +does not exist. Or sometimes there are several + +522 +00:37:40,390 --> 00:37:45,190 +modes. And so that's the definitions for mean, + +523 +00:37:46,430 --> 00:37:52,540 +median, and the mode. I will give just a numerical + +524 +00:37:52,540 --> 00:37:56,380 +example to know how can we compute these measures. + +525 +00:37:57,420 --> 00:38:01,540 +This data, simple data, just for illustration, we + +526 +00:38:01,540 --> 00:38:07,580 +have house prices. We have five data points, $2 + +527 +00:38:07,580 --> 00:38:10,940 +million. This is the price of house A, for + +528 +00:38:10,940 --> 00:38:15,880 +example. House B price is 500,000. The other one + +529 +00:38:15,880 --> 00:38:19,120 +is 300,000. And two houses have the same price as + +530 +00:38:19,120 --> 00:38:25,850 +100,000. Now, just to compute the mean, add these + +531 +00:38:25,850 --> 00:38:29,350 +values or sum these values, which is three + +532 +00:38:29,350 --> 00:38:34,030 +million, divide by number of houses here, there + +533 +00:38:34,030 --> 00:38:38,550 +are five houses, so just three thousand divided by + +534 +00:38:38,550 --> 00:38:44,170 +five, six hundred thousand. The median, the value + +535 +00:38:44,170 --> 00:38:46,150 +in the median, after you arrange the data from + +536 +00:38:46,150 --> 00:38:51,470 +smallest to largest, Or largest smallest. This + +537 +00:38:51,470 --> 00:38:55,410 +data is already arranged from largest smallest or + +538 +00:38:55,410 --> 00:38:58,150 +smallest large. It doesn't matter actually. So the + +539 +00:38:58,150 --> 00:39:02,930 +median is $300,000. Make sense? Because there are + +540 +00:39:02,930 --> 00:39:09,490 +two house prices above and two below. So the + +541 +00:39:09,490 --> 00:39:13,610 +median is $300,000. Now if you look at these two + +542 +00:39:13,610 --> 00:39:21,350 +values, the mean for this data equals 600,000 and + +543 +00:39:21,350 --> 00:39:26,690 +the median is 300,000. The mean is double the + +544 +00:39:26,690 --> 00:39:31,750 +median. Do you think why there is a big difference + +545 +00:39:31,750 --> 00:39:36,030 +in this data between the mean and the median? + +546 +00:39:36,190 --> 00:39:42,290 +Which one? Two million dollars is extreme value, + +547 +00:39:42,510 --> 00:39:45,940 +very large number. I mean, if you compare two + +548 +00:39:45,940 --> 00:39:48,860 +million dollars with the other data sets or other + +549 +00:39:48,860 --> 00:39:51,320 +data values, you will see there is a big + +550 +00:39:51,320 --> 00:39:53,260 +difference between two million and five hundred. + +551 +00:39:53,620 --> 00:39:56,280 +It's four times, plus about three hundred + +552 +00:39:56,280 --> 00:39:59,780 +thousands, around seven times and so on. For this + +553 +00:39:59,780 --> 00:40:07,880 +value, the mean is affected. Exactly. The median + +554 +00:40:07,880 --> 00:40:11,740 +is resistant to outliers. It's affected but little + +555 +00:40:11,740 --> 00:40:17,100 +bit. For this reason, we have to use the median. + +556 +00:40:17,300 --> 00:40:20,720 +So the median makes more sense than using the + +557 +00:40:20,720 --> 00:40:24,480 +mean. The mode is just the most frequent value, + +558 +00:40:24,660 --> 00:40:28,720 +which is 100,000, because this value is repeated + +559 +00:40:28,720 --> 00:40:33,820 +twice. So that's the whole story for central + +560 +00:40:33,820 --> 00:40:40,720 +tendency measures, mean, median, and mode. Now the + +561 +00:40:40,720 --> 00:40:45,640 +question again is which measure to use? The mean + +562 +00:40:45,640 --> 00:40:49,280 +is generally used. The most common center tendency + +563 +00:40:49,280 --> 00:40:53,420 +is the mean. We can use it or we should use it + +564 +00:40:53,420 --> 00:40:59,920 +unless extreme values exist. I mean if the data + +565 +00:40:59,920 --> 00:41:03,960 +set has no outliers or extreme values, we have to + +566 +00:41:03,960 --> 00:41:06,240 +use the mean instead of the median. + +567 +00:41:09,810 --> 00:41:14,670 +The median is often used since the median is not + +568 +00:41:14,670 --> 00:41:18,330 +sensitive to extreme values. I mean, the median is + +569 +00:41:18,330 --> 00:41:22,030 +resistant to outliers. It remains nearly in the + +570 +00:41:22,030 --> 00:41:26,490 +same position if the dataset has outliers. But the + +571 +00:41:26,490 --> 00:41:29,850 +median will be affected either to the right or to + +572 +00:41:29,850 --> 00:41:34,350 +the left tail. So we have to use the median if the + +573 +00:41:34,350 --> 00:41:40,060 +data has extreme values. For example, median home + +574 +00:41:40,060 --> 00:41:44,100 +prices for the previous one may be reported for a + +575 +00:41:44,100 --> 00:41:48,000 +region that is less sensitive to outliers. So the + +576 +00:41:48,000 --> 00:41:52,880 +mean is more sensitive to outliers than the + +577 +00:41:52,880 --> 00:41:56,520 +median. Sometimes, I mean in some situations, it + +578 +00:41:56,520 --> 00:41:58,760 +makes sense to report both the mean and the + +579 +00:41:58,760 --> 00:42:01,860 +median. Just say the mean for this data for home + +580 +00:42:01,860 --> 00:42:07,570 +prices is 600,000 while the median is 300,000. If + +581 +00:42:07,570 --> 00:42:10,150 +you look at these two figures, you can tell that + +582 +00:42:10,150 --> 00:42:13,830 +there exists outlier or the outlier exists because + +583 +00:42:13,830 --> 00:42:17,230 +there is a big difference between the mean and the + +584 +00:42:17,230 --> 00:42:24,310 +median. So that's all for measures of central + +585 +00:42:24,310 --> 00:42:28,830 +tendency. Again, we explained three measures, + +586 +00:42:29,450 --> 00:42:33,930 +arithmetic mean, median, and mode. And arithmetic + +587 +00:42:33,930 --> 00:42:38,990 +mean again is denoted by X bar is pronounced as X + +588 +00:42:38,990 --> 00:42:44,410 +bar and just summation of X divided by N. So + +589 +00:42:44,410 --> 00:42:48,070 +summation Xi, i goes from 1 up to N divided by the + +590 +00:42:48,070 --> 00:42:52,170 +total number of observations. The median, as we + +591 +00:42:52,170 --> 00:42:55,690 +mentioned, is the value in the middle in ordered + +592 +00:42:55,690 --> 00:42:59,150 +array. After you arrange the data from smallest to + +593 +00:42:59,150 --> 00:43:01,930 +largest or vice versa, then the median is the + +594 +00:43:01,930 --> 00:43:06,330 +value in the middle. The mode is the most frequent + +595 +00:43:06,330 --> 00:43:09,030 +observed value. And we have to know that mean and + +596 +00:43:09,030 --> 00:43:13,870 +median are used only for numerical data, while the + +597 +00:43:13,870 --> 00:43:17,510 +mode can be used for both numerical and + +598 +00:43:17,510 --> 00:43:24,290 +categorical data. That's all about measures of + +599 +00:43:24,290 --> 00:43:27,210 +central tendency. Any question? + +600 +00:43:33,210 --> 00:43:40,230 +Let's move to measures of variation. It's another + +601 +00:43:40,230 --> 00:43:43,750 +type of measures. It's called measures of + +602 +00:43:43,750 --> 00:43:47,490 +variation, sometimes called measures of spread. + +603 +00:43:50,490 --> 00:43:53,850 +Now, variation can be computed by using range, + +604 +00:43:55,590 --> 00:44:00,850 +variance, standard deviation, and coefficient of + +605 +00:44:00,850 --> 00:44:08,430 +variation. So we have four types, range, variance, + +606 +00:44:09,250 --> 00:44:12,050 +standard deviation, and coefficient of variation. + +607 +00:44:13,710 --> 00:44:16,150 +Now, measures of variation give information on the + +608 +00:44:16,150 --> 00:44:19,410 +spread. Now, this is the first difference between + +609 +00:44:19,410 --> 00:44:24,210 +central tendency measures and measures of + +610 +00:44:24,210 --> 00:44:28,270 +variation. That one measures the central value or + +611 +00:44:28,270 --> 00:44:30,790 +the value in the middle. Here, it measures the + +612 +00:44:30,790 --> 00:44:36,310 +spread. Or variability. Or dispersion of the data. + +613 +00:44:36,450 --> 00:44:40,310 +Do you know what is dispersion? Dispersion. + +614 +00:44:40,630 --> 00:44:45,590 +Tabaad. So major variation given formation with + +615 +00:44:45,590 --> 00:44:48,350 +the spread. Spread or variation or dispersion of + +616 +00:44:48,350 --> 00:44:52,250 +the data values. Now if you look at these two bell + +617 +00:44:52,250 --> 00:44:52,650 +shapes. + +618 +00:44:55,670 --> 00:44:59,170 +Both have the same center. The center I mean the + +619 +00:44:59,170 --> 00:45:01,730 +value in the middle. So the value in the middle + +620 +00:45:01,730 --> 00:45:06,990 +here for figure + +621 +00:45:06,990 --> 00:45:10,150 +graph number one is the same as the value for the + +622 +00:45:10,150 --> 00:45:16,270 +other graph. So both graphs have the same center. + +623 +00:45:17,430 --> 00:45:20,670 +But if you look at the spread, you will see that + +624 +00:45:20,670 --> 00:45:26,230 +figure A is less spread than figure B. Now if you + +625 +00:45:26,230 --> 00:45:29,720 +look at this one, the spread here, is much less + +626 +00:45:29,720 --> 00:45:34,120 +than the other one. Even they have the same + +627 +00:45:34,120 --> 00:45:39,260 +center, the same mean, but figure A is more spread + +628 +00:45:39,260 --> 00:45:45,140 +than figure B. It means that the variation in A is + +629 +00:45:45,140 --> 00:45:49,920 +much less than the variation in figure B. So it + +630 +00:45:49,920 --> 00:45:55,960 +means that the mean is not sufficient to describe + +631 +00:45:55,960 --> 00:45:59,970 +your data. Because maybe you have two datasets and + +632 +00:45:59,970 --> 00:46:03,330 +both have the same mean, but the spread or the + +633 +00:46:03,330 --> 00:46:07,350 +variation is completely different. Again, maybe we + +634 +00:46:07,350 --> 00:46:10,250 +have two classes of statistics, class A and class + +635 +00:46:10,250 --> 00:46:13,230 +B. The center or the mean or the average is the + +6 + +667 +00:48:25,880 --> 00:48:32,360 +data. The minimum value is one. I mean, the + +668 +00:48:32,360 --> 00:48:34,680 +smallest value is one, and the largest or the + +669 +00:48:34,680 --> 00:48:38,880 +maximum is 13. So it makes sense that the range of + +670 +00:48:38,880 --> 00:48:41,840 +the data is the difference between these two + +671 +00:48:41,840 --> 00:48:48,540 +values. So 13 minus one is 12. Now, imagine that + +672 +00:48:48,540 --> 00:48:58,040 +we just replace 13 by 100. So the new range will + +673 +00:48:58,040 --> 00:49:03,820 +be equal to 100 minus 1, 99. So the previous range + +674 +00:49:03,820 --> 00:49:08,340 +was 12. It becomes now 99 after we replace the + +675 +00:49:08,340 --> 00:49:12,100 +maximum by 100. So it means that range is affected + +676 +00:49:12,100 --> 00:49:18,740 +by extreme values. So the mean and range both are + +677 +00:49:18,740 --> 00:49:23,040 +sensitive to outliers. So you have to link between + +678 +00:49:26,410 --> 00:49:30,210 +measures of center tendency and measures of + +679 +00:49:30,210 --> 00:49:33,130 +variation. Mean and range are affected by + +680 +00:49:33,130 --> 00:49:37,910 +outliers. The mean and range are affected by + +681 +00:49:37,910 --> 00:49:41,450 +outliers. This is an example. So it's very easy to + +682 +00:49:41,450 --> 00:49:49,550 +compute the mean. Next, if you look at why the + +683 +00:49:49,550 --> 00:49:51,190 +range can be misleading. + +684 +00:49:53,830 --> 00:49:56,810 +Sometimes you report the range and the range does + +685 +00:49:56,810 --> 00:50:00,310 +not give an appropriate answer or appropriate + +686 +00:50:00,310 --> 00:50:04,450 +result because number + +687 +00:50:04,450 --> 00:50:06,790 +one ignores the way in which the data are + +688 +00:50:06,790 --> 00:50:10,770 +distributed. For example, if you look at this + +689 +00:50:10,770 --> 00:50:15,430 +specific data, we have data seven, eight, nine, + +690 +00:50:15,590 --> 00:50:18,110 +ten, eleven and twelve. So the range is five. + +691 +00:50:19,270 --> 00:50:21,910 +Twelve minus seven is five. Now if you look at the + +692 +00:50:21,910 --> 00:50:26,360 +other data, The smallest value was seven. + +693 +00:50:29,600 --> 00:50:33,260 +And there is a gap between the smallest and the + +694 +00:50:33,260 --> 00:50:38,220 +next smallest value, which is 10. And also we have + +695 +00:50:38,220 --> 00:50:44,480 +12 is repeated three times. Still the range is the + +696 +00:50:44,480 --> 00:50:48,140 +same. Even there is a difference between these two + +697 +00:50:48,140 --> 00:50:53,640 +values, between two sets. we have seven, eight, + +698 +00:50:53,760 --> 00:50:57,020 +nine up to 12. And then the other data, we have + +699 +00:50:57,020 --> 00:51:02,180 +seven, 10, 11, and 12 three times. Still, the + +700 +00:51:02,180 --> 00:51:06,360 +range equals five. So it doesn't make sense to + +701 +00:51:06,360 --> 00:51:09,620 +report the range as a measure of variation. + +702 +00:51:10,520 --> 00:51:12,640 +Because if you look at the distribution for this + +703 +00:51:12,640 --> 00:51:15,500 +data, it's completely different from the other + +704 +00:51:15,500 --> 00:51:20,860 +dataset. Even though it has the same range. So + +705 +00:51:20,860 --> 00:51:25,220 +range is not used in this case. Look at another + +706 +00:51:25,220 --> 00:51:25,680 +example. + +707 +00:51:28,300 --> 00:51:32,920 +We have data. All the data ranges, I mean, starts + +708 +00:51:32,920 --> 00:51:38,680 +from 1 up to 5. So the range is 4. If we just + +709 +00:51:38,680 --> 00:51:46,200 +replace the maximum, which is 5, by 120. So the + +710 +00:51:46,200 --> 00:51:49,190 +range is completely different. The range becomes + +711 +00:51:49,190 --> 00:51:55,010 +119. So that means range + +712 +00:51:55,010 --> 00:51:59,230 +is sensitive to outliers. So we have to avoid + +713 +00:51:59,230 --> 00:52:06,030 +using range in case of outliers or extreme values. + +714 +00:52:08,930 --> 00:52:14,410 +I will stop at the most important one, the + +715 +00:52:14,410 --> 00:52:18,350 +variance, for next time insha'allah. Up to this + +716 +00:52:18,350 --> 00:52:19,310 +point, any questions? + +717 +00:52:22,330 --> 00:52:29,730 +Okay, stop at this point if + +718 +00:52:29,730 --> 00:52:30,510 +you have any question. + +719 +00:52:35,430 --> 00:52:39,430 +So later we'll discuss measures of variation and + +720 +00:52:39,430 --> 00:52:44,810 +variance, standard deviation up to the end of this + +721 +00:52:44,810 --> 00:52:45,090 +chapter. + +722 +00:52:54,630 --> 00:53:00,690 +So again, the range is sensitive to outliers. So + +723 +00:53:00,690 --> 00:53:03,850 +we have to avoid using range in this case. And + +724 +00:53:03,850 --> 00:53:06,270 +later we'll talk about the variance, which is the + +725 +00:53:06,270 --> 00:53:09,750 +most common measures of variation for next time, + +726 +00:53:09,830 --> 00:53:10,130 +insha'allah. diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/4oMFiRBOjhY_postprocess.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/4oMFiRBOjhY_postprocess.srt new file mode 100644 index 0000000000000000000000000000000000000000..cfa181a410f70840cbe65d8f97d9f4a235f6489e --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/4oMFiRBOjhY_postprocess.srt @@ -0,0 +1,2904 @@ +1 +00:00:17,750 --> 00:00:21,350 +So let's again go back to chapter number one. Last + +2 +00:00:21,350 --> 00:00:25,730 +time we discussed chapter one, production and data + +3 +00:00:25,730 --> 00:00:32,390 +collection. And I think we described why learning + +4 +00:00:32,390 --> 00:00:36,510 +statistics distinguish between some of these + +5 +00:00:36,510 --> 00:00:43,810 +topics. And also we explained in details types of + +6 +00:00:43,810 --> 00:00:47,010 +statistics and we mentioned that statistics mainly + +7 +00:00:47,010 --> 00:00:52,430 +has two types either descriptive statistics which + +8 +00:00:52,430 --> 00:00:56,810 +means collecting summarizing and obtaining data + +9 +00:00:56,810 --> 00:00:59,910 +and other type of statistics is called inferential + +10 +00:00:59,910 --> 00:01:04,430 +statistics or statistical inference and this type + +11 +00:01:04,430 --> 00:01:11,070 +of statistics we can draw drawing conclusions and + +12 +00:01:11,070 --> 00:01:14,090 +making decision concerning a population based only + +13 +00:01:14,090 --> 00:01:17,510 +on a sample. That means we have a sample and + +14 +00:01:17,510 --> 00:01:20,970 +sample is just a subset of the population or the + +15 +00:01:20,970 --> 00:01:26,230 +portion of the population and we use the data from + +16 +00:01:26,230 --> 00:01:29,130 +that sample to make some conclusion about the + +17 +00:01:29,130 --> 00:01:32,390 +entire population. This type of statistic is + +18 +00:01:32,390 --> 00:01:34,710 +called inferential statistics. Later, Inshallah, + +19 +00:01:34,750 --> 00:01:37,710 +we'll talk in details about inferential statistics + +20 +00:01:37,710 --> 00:01:45,290 +that will start in Chapter 7. Also, we gave some + +21 +00:01:45,290 --> 00:01:50,630 +definitions for variables, data, and we + +22 +00:01:50,630 --> 00:01:53,510 +distinguished between population and sample. And + +23 +00:01:53,510 --> 00:01:56,630 +we know that the population consists of all items + +24 +00:01:56,630 --> 00:02:00,270 +or individuals about which you want to draw a + +25 +00:02:00,270 --> 00:02:05,770 +conclusion. But in some cases, it's very hard to + +26 +00:02:05,770 --> 00:02:07,750 +talk about the population or the entire + +27 +00:02:07,750 --> 00:02:13,340 +population, so we can select a sample. A sample is + +28 +00:02:13,340 --> 00:02:18,480 +just a portion or subset of the entire population. + +29 +00:02:19,060 --> 00:02:21,860 +So we know now the definition of population and + +30 +00:02:21,860 --> 00:02:25,360 +sample. The other two types, parameter and + +31 +00:02:25,360 --> 00:02:28,860 +statistics. Parameter is a numerical measure that + +32 +00:02:28,860 --> 00:02:32,300 +describes characteristics of a population, while + +33 +00:02:32,300 --> 00:02:36,000 +on the other hand, a sample, a statistic is just + +34 +00:02:36,430 --> 00:02:39,730 +numerical measures that describe characteristic of + +35 +00:02:39,730 --> 00:02:44,930 +a sample. So parameter is computed from the + +36 +00:02:44,930 --> 00:02:48,930 +population while statistic is computed from the + +37 +00:02:48,930 --> 00:02:54,030 +sample. I think we stopped at this point. Why + +38 +00:02:54,030 --> 00:02:56,770 +collect data? I mean what are the reasons for + +39 +00:02:59,580 --> 00:03:01,980 +One of these reasons, for example, a marketing + +40 +00:03:01,980 --> 00:03:04,660 +research analyst needs to assess the effectiveness + +41 +00:03:04,660 --> 00:03:07,700 +of a new television advertisement. For example, + +42 +00:03:07,840 --> 00:03:13,380 +suppose you are a manager and you want to increase + +43 +00:03:13,380 --> 00:03:18,060 +your salaries or your sales. Now, sales may be + +44 +00:03:18,060 --> 00:03:23,380 +affected by advertising. So I mean, if you spend + +45 +00:03:23,380 --> 00:03:26,320 +more on advertising, it means your sales becomes + +46 +00:03:26,320 --> 00:03:29,740 +larger and larger. So you want to know if this + +47 +00:03:29,740 --> 00:03:34,160 +variable, I mean if advertisement is an effective + +48 +00:03:34,160 --> 00:03:38,900 +variable that maybe increase your sales. So that's + +49 +00:03:38,900 --> 00:03:43,900 +one of the reasons why we use data. The other one, + +50 +00:03:44,120 --> 00:03:46,880 +for example, pharmaceutical manufacturers needs to + +51 +00:03:46,880 --> 00:03:49,800 +determine whether a new drug is more effective + +52 +00:03:49,800 --> 00:03:53,240 +than those currently used. For example, for a + +53 +00:03:53,240 --> 00:03:59,330 +headache, we use drug A. Now, a new drug is + +54 +00:03:59,330 --> 00:04:04,510 +produced and you want to see if this new drug is + +55 +00:04:04,510 --> 00:04:10,090 +more effective than drug A that I mean if headache + +56 +00:04:10,090 --> 00:04:13,410 +suppose for example is removed after three days by + +57 +00:04:13,410 --> 00:04:20,490 +using drug A now the question is does B is more + +58 +00:04:20,490 --> 00:04:23,410 +effective it means it reduces your headache in + +59 +00:04:23,410 --> 00:04:26,070 +fewer than three days I mean maybe in two days + +60 +00:04:26,510 --> 00:04:30,810 +That means a drug B is more effective than a drug + +61 +00:04:30,810 --> 00:04:34,510 +A. So we want to know the difference between these + +62 +00:04:34,510 --> 00:04:37,250 +two drugs. I mean, we have two samples. Some + +63 +00:04:37,250 --> 00:04:40,810 +people used drug A and the other used drug B. And + +64 +00:04:40,810 --> 00:04:43,190 +we want to see if there is a significant + +65 +00:04:43,190 --> 00:04:47,690 +difference between the times that is used to + +66 +00:04:47,690 --> 00:04:51,150 +reduce the headache. So that's one of the reasons + +67 +00:04:51,150 --> 00:04:55,260 +why we use statistics. Sometimes an operation + +68 +00:04:55,260 --> 00:04:58,500 +manager wants to monitor manufacturing process to + +69 +00:04:58,500 --> 00:05:00,720 +find out whether the quality of a product being + +70 +00:05:00,720 --> 00:05:02,840 +manufactured is conforming to a company's + +71 +00:05:02,840 --> 00:05:05,700 +standards. Do you know what the meaning of + +72 +00:05:05,700 --> 00:05:06,520 +company's standards? + +73 +00:05:09,900 --> 00:05:15,320 +The regulations of the firm itself. Another + +74 +00:05:15,320 --> 00:05:21,020 +example, suppose here in the school last year, we + +75 +00:05:21,020 --> 00:05:25,150 +teach statistics by using method A. traditional + +76 +00:05:25,150 --> 00:05:29,350 +method. This year we developed a new method for + +77 +00:05:29,350 --> 00:05:32,370 +teaching and our goal is to see if the new method + +78 +00:05:32,370 --> 00:05:36,510 +is better than method A which was used in last + +79 +00:05:36,510 --> 00:05:38,910 +year. So we want to see if there is a big + +80 +00:05:38,910 --> 00:05:42,410 +difference between scores or the average scores + +81 +00:05:42,410 --> 00:05:47,310 +last year and this year. The same you can do for + +82 +00:05:47,310 --> 00:05:52,350 +your weight. Suppose there are 20 students in this + +83 +00:05:52,350 --> 00:05:56,960 +class and their weights are high. And our goal is + +84 +00:05:56,960 --> 00:06:04,280 +to reduce their weights. Suppose they + +85 +00:06:04,280 --> 00:06:09,640 +have a regime or diet for three months or + +86 +00:06:09,640 --> 00:06:12,140 +exercise, whatever it is, then after three months, + +87 +00:06:12,220 --> 00:06:17,060 +we have new weights for these persons. And we want + +88 +00:06:17,060 --> 00:06:19,840 +to see if the diet is effective. I mean, if the + +89 +00:06:19,840 --> 00:06:24,120 +average weight was greater than or smaller than + +90 +00:06:24,120 --> 00:06:28,600 +before diet. Is it clear? So there are many, many + +91 +00:06:28,600 --> 00:06:31,920 +reasons behind using statistics and collecting + +92 +00:06:31,920 --> 00:06:37,500 +data. Now, what are the sources of data? Since + +93 +00:06:37,500 --> 00:06:41,840 +statistics mainly, first step, we have to collect + +94 +00:06:41,840 --> 00:06:44,120 +data. Now, what are the sources of the data? + +95 +00:06:45,360 --> 00:06:48,420 +Generally speaking, there are two sources. One is + +96 +00:06:48,420 --> 00:06:52,430 +called The primary sources and the others + +97 +00:06:52,430 --> 00:06:55,770 +secondary sources. What do you think is the + +98 +00:06:55,770 --> 00:06:57,830 +difference between these two? I mean, what's the + +99 +00:06:57,830 --> 00:07:02,730 +difference between primary and secondary sources? + +100 +00:07:03,510 --> 00:07:07,250 +The primary source is the collector of the data. + +101 +00:07:07,670 --> 00:07:11,550 +He is the analyzer. He analyzes it. And then the + +102 +00:07:11,550 --> 00:07:14,230 +secondary, who collects the data, isn't there. + +103 +00:07:16,030 --> 00:07:18,910 +That's correct. So the primary sources means the + +104 +00:07:18,910 --> 00:07:22,490 +researcher by himself. He should collect the data, + +105 +00:07:23,890 --> 00:07:27,750 +then he can use the data to do his analysis. + +106 +00:07:28,310 --> 00:07:31,550 +That's for the primary. Now, the primary could be + +107 +00:07:31,550 --> 00:07:35,230 +data from political survey. You can distribute + +108 +00:07:35,230 --> 00:07:38,750 +questionnaire, for example, data collected from an + +109 +00:07:38,750 --> 00:07:42,530 +experiment. I mean maybe control or experimental + +110 +00:07:42,530 --> 00:07:45,730 +groups. We have two groups, maybe healthy people + +111 +00:07:45,730 --> 00:07:48,490 +and patient people. So that's experimental group. + +112 +00:07:49,010 --> 00:07:53,390 +Or observed data. That's the primary sources. + +113 +00:07:53,870 --> 00:07:56,450 +Secondary sources, the person performing data + +114 +00:07:56,450 --> 00:08:00,310 +analysis is not the data collector. So he obtained + +115 +00:08:00,310 --> 00:08:03,880 +the data from other sources. For example, it could + +116 +00:08:03,880 --> 00:08:07,140 +be analyzing census data or for example, examining + +117 +00:08:07,140 --> 00:08:10,160 +data from print journals or data published on the + +118 +00:08:10,160 --> 00:08:14,780 +internet. So maybe he goes to the Ministry of + +119 +00:08:14,780 --> 00:08:18,820 +Education and he can get some data. So the data is + +120 +00:08:18,820 --> 00:08:22,520 +already there and he just used the data to do some + +121 +00:08:22,520 --> 00:08:25,540 +analysis. So that's the difference between a + +122 +00:08:25,540 --> 00:08:29,420 +primary and secondary sources. So primary, the + +123 +00:08:29,420 --> 00:08:33,650 +researcher himself, should collect the data by + +124 +00:08:33,650 --> 00:08:35,590 +using one of the tools, either survey, + +125 +00:08:36,110 --> 00:08:39,050 +questionnaire, experiment, and so on. But + +126 +00:08:39,050 --> 00:08:41,450 +secondary, you can use the data that is published + +127 +00:08:41,450 --> 00:08:44,510 +in the internet, for example, in the books, in + +128 +00:08:44,510 --> 00:08:48,250 +governments and NGOs and so on. So these are the + +129 +00:08:48,250 --> 00:08:53,590 +two sources of data. Sources of data fall into + +130 +00:08:53,590 --> 00:08:57,610 +four categories. Number one, data distributed by + +131 +00:08:57,610 --> 00:09:01,190 +an organization or an individual. So that's + +132 +00:09:01,190 --> 00:09:06,170 +secondary source. A design experiment is primary + +133 +00:09:06,170 --> 00:09:10,350 +because you have to design the experiment, a + +134 +00:09:10,350 --> 00:09:14,610 +survey. It's also primary. An observational study + +135 +00:09:14,610 --> 00:09:17,590 +is also a primary source. So you have to + +136 +00:09:17,590 --> 00:09:21,410 +distinguish between a primary and secondary + +137 +00:09:21,410 --> 00:09:28,810 +sources. Any question? Comments? Next. + +138 +00:09:31,460 --> 00:09:34,540 +We'll talk a little bit about types of variables. + +139 +00:09:35,360 --> 00:09:37,580 +In general, there are two types of variables. One + +140 +00:09:37,580 --> 00:09:40,760 +is called categorical variables or qualitative + +141 +00:09:40,760 --> 00:09:43,160 +variables, and the other one is called numerical + +142 +00:09:43,160 --> 00:09:46,520 +or quantitative variables. Now, for example, if I + +143 +00:09:46,520 --> 00:09:50,560 +ask you, what's + +144 +00:09:50,560 --> 00:09:55,100 +your favorite color? You may say white, black, + +145 +00:09:55,220 --> 00:09:59,390 +red, and so on. What's your marital status? Maybe + +146 +00:09:59,390 --> 00:10:02,670 +married or unmarried, and so on. Gender, male, + +147 +00:10:02,850 --> 00:10:07,050 +either male or female, and so on. So this type of + +148 +00:10:07,050 --> 00:10:10,090 +variable is called qualitative variables. So + +149 +00:10:10,090 --> 00:10:13,130 +qualitative variables have values that can only be + +150 +00:10:13,130 --> 00:10:16,370 +placed into categories, such as, for example, yes + +151 +00:10:16,370 --> 00:10:21,350 +or no. For example, do you like orange? + +152 +00:10:22,270 --> 00:10:26,200 +The answer is either yes or no. Do you like + +153 +00:10:26,200 --> 00:10:30,040 +candidate A, for example, whatever his party is? + +154 +00:10:30,260 --> 00:10:34,620 +Do you like it? Either yes or no, and so on. As I + +155 +00:10:34,620 --> 00:10:37,480 +mentioned before, gender, marital status, race, + +156 +00:10:37,640 --> 00:10:41,820 +religions, these are examples of qualitative or + +157 +00:10:41,820 --> 00:10:46,240 +categorical variables. The other type of variable + +158 +00:10:46,240 --> 00:10:49,480 +which is commonly used is called numerical or + +159 +00:10:49,480 --> 00:10:53,230 +quantitative data. Quantitative variables have + +160 +00:10:53,230 --> 00:10:56,350 +values that represent quantities. For example, if + +161 +00:10:56,350 --> 00:11:00,470 +I ask you, what's your age? My age is 20 years old + +162 +00:11:00,470 --> 00:11:04,770 +or 18 years old. What's your weight? Income. + +163 +00:11:07,510 --> 00:11:12,550 +Height? Temperature? Income. So it's a number. + +164 +00:11:13,610 --> 00:11:18,030 +Weight, maybe my weight is 70 kilograms. So + +165 +00:11:18,030 --> 00:11:22,450 +weight, age, height, salary, income, number of + +166 +00:11:22,450 --> 00:11:26,510 +students, number of phone calls you received on + +167 +00:11:26,510 --> 00:11:28,770 +your cell phone during one hour, number of + +168 +00:11:28,770 --> 00:11:33,210 +accidents happened in street and so on. So that's + +169 +00:11:33,210 --> 00:11:36,470 +the difference between numerical variables and + +170 +00:11:36,470 --> 00:11:37,710 +qualitative variables. + +171 +00:11:40,270 --> 00:11:42,650 +Anyone of you just give me one example of + +172 +00:11:42,650 --> 00:11:47,780 +qualitative and quantitative variables. Another + +173 +00:11:47,780 --> 00:11:51,700 +examples. Just give me one example for qualitative + +174 +00:11:51,700 --> 00:11:52,160 +data. + +175 +00:11:56,900 --> 00:11:58,780 +Qualitative or quantitative. + +176 +00:12:01,380 --> 00:12:04,880 +Political party, either party A or party B. So + +177 +00:12:04,880 --> 00:12:08,320 +suppose there are two parties, so I like party A, + +178 +00:12:08,720 --> 00:12:12,320 +she likes party B and so on. So party in this case + +179 +00:12:12,320 --> 00:12:15,060 +is qualitative variable, another one. + +180 +00:12:25,400 --> 00:12:28,820 +So types of courses, maybe business, economics, + +181 +00:12:29,480 --> 00:12:33,260 +administration, and so on. So types of courses. + +182 +00:12:34,260 --> 00:12:36,600 +Another example for quantitative variable or + +183 +00:12:36,600 --> 00:12:37,600 +numerical variables. + +184 +00:12:45,020 --> 00:12:51,440 +So production is a numerical variable. Another + +185 +00:12:51,440 --> 00:12:52,840 +example for quantitative. + +186 +00:12:56,350 --> 00:12:58,970 +Is that produced by this company? Number of cell + +187 +00:12:58,970 --> 00:13:03,950 +phones, maybe 20, 17, and so on. Any question? + +188 +00:13:06,190 --> 00:13:12,410 +Next. So generally speaking, types of data, data + +189 +00:13:12,410 --> 00:13:17,960 +has two types, categorical and numerical data. As + +190 +00:13:17,960 --> 00:13:21,240 +we mentioned, marital status, political party, eye + +191 +00:13:21,240 --> 00:13:25,120 +color, and so on. These are examples of + +192 +00:13:25,120 --> 00:13:28,180 +categorical variables. On the other hand, a + +193 +00:13:28,180 --> 00:13:30,720 +numerical variable can be split or divided into + +194 +00:13:30,720 --> 00:13:34,020 +two parts. One is called discrete and the other is + +195 +00:13:34,020 --> 00:13:35,680 +continuous, and we have to distinguish between + +196 +00:13:35,680 --> 00:13:40,240 +these two. For example, Number of students in this + +197 +00:13:40,240 --> 00:13:43,400 +class, you can say there are 60 or 50 students in + +198 +00:13:43,400 --> 00:13:47,260 +this class. You cannot say there are 50.5 + +199 +00:13:47,260 --> 00:13:52,160 +students. So number of students is discrete + +200 +00:13:52,160 --> 00:13:57,040 +because it takes only integers. While for + +201 +00:13:57,040 --> 00:13:59,800 +continuous type of numerical variables, you can + +202 +00:13:59,800 --> 00:14:06,060 +say that my weight is 80.5 kilograms. so it makes + +203 +00:14:06,060 --> 00:14:09,260 +sense that your weight is not exactly 80 kilograms + +204 +00:14:09,260 --> 00:14:15,580 +it might be 80.6 or 80.5 and so on so discrete + +205 +00:14:15,580 --> 00:14:20,420 +takes only integers while continuous takes any + +206 +00:14:20,420 --> 00:14:24,140 +value I mean any real number so that's the + +207 +00:14:24,140 --> 00:14:28,980 +difference between discrete and continuous number + +208 +00:14:28,980 --> 00:14:33,930 +of phone or number of calls you have received this + +209 +00:14:33,930 --> 00:14:37,670 +morning, maybe one, zero, nine, and so on, + +210 +00:14:38,390 --> 00:14:41,950 +discrete. Number of patients in the hospital, + +211 +00:14:42,350 --> 00:14:46,590 +discrete, and so on. But when we are talking about + +212 +00:14:46,590 --> 00:14:50,850 +income, maybe my income is 1,000.5 shekel. It + +213 +00:14:50,850 --> 00:14:55,790 +could be. It's continuous because my income can be + +214 +00:14:55,790 --> 00:14:59,210 +any number between, for example, 1,000 and 10,000. + +215 +00:15:00,470 --> 00:15:03,550 +It takes any value in this interval from 1,000 to + +216 +00:15:03,550 --> 00:15:08,410 +10,000. So it types of continuous rather than our + +217 +00:15:08,410 --> 00:15:12,450 +continuous variable. So that's the two types of + +218 +00:15:12,450 --> 00:15:15,590 +data, categorical and numerical. And numerical + +219 +00:15:15,590 --> 00:15:18,510 +also has two types, either discrete or continuous. + +220 +00:15:19,750 --> 00:15:24,430 +Later in Chapter 6, we'll talk more details about + +221 +00:15:24,430 --> 00:15:28,940 +one of the most distribution statistics, for + +222 +00:15:28,940 --> 00:15:31,560 +continuous, one which is called normal + +223 +00:15:31,560 --> 00:15:36,240 +distribution. That will be later, inshallah. As we + +224 +00:15:36,240 --> 00:15:39,000 +mentioned last time, at the end of each chapter, + +225 +00:15:39,280 --> 00:15:41,640 +there is a section or sections, sometimes there + +226 +00:15:41,640 --> 00:15:45,000 +are two sections, talks about computer programs. + +227 +00:15:45,120 --> 00:15:49,200 +How can we use computer programs in order to + +228 +00:15:49,200 --> 00:15:52,300 +analyze the data? And as we mentioned last time, + +229 +00:15:52,420 --> 00:15:54,600 +you should take a course on that. It's called + +230 +00:15:54,600 --> 00:15:57,960 +Computer and Data Analysis or SPSS course. So we + +231 +00:15:57,960 --> 00:16:03,860 +are going to skip the computer programs used for + +232 +00:16:03,860 --> 00:16:08,720 +any chapters in this book. And that's the end of + +233 +00:16:08,720 --> 00:16:13,060 +chapter number three. Any questions? + +234 +00:16:18,380 --> 00:16:22,550 +Let's move. quickly on chapter three. + +235 +00:16:31,290 --> 00:16:35,530 +Chapter three maybe is the easiest chapter in this + +236 +00:16:35,530 --> 00:16:39,950 +book. It's straightforward. We have some formulas + +237 +00:16:39,950 --> 00:16:45,280 +to compute some statistical measures. And we + +238 +00:16:45,280 --> 00:16:47,400 +should know how can we calculate these measures + +239 +00:16:47,400 --> 00:16:52,620 +and what are the meaning of your results. So + +240 +00:16:52,620 --> 00:16:56,140 +chapter three talks about numerical descriptive + +241 +00:16:56,140 --> 00:17:03,220 +measures. In this chapter, you will learn, number + +242 +00:17:03,220 --> 00:17:06,480 +one, describe the probabilities of central + +243 +00:17:06,480 --> 00:17:11,880 +tendency, variation, and shape in numerical data. + +244 +00:17:12,730 --> 00:17:16,250 +In this lecture, we'll talk in more details about + +245 +00:17:16,250 --> 00:17:21,370 +some of the center tendency measures. Later, we'll + +246 +00:17:21,370 --> 00:17:26,490 +talk about the variation, or spread, or + +247 +00:17:26,490 --> 00:17:29,690 +dispersion, and the shape in numerical data. So + +248 +00:17:29,690 --> 00:17:31,630 +that's part number one. We have to know something + +249 +00:17:31,630 --> 00:17:36,390 +about center tendency, variation, and the shape of + +250 +00:17:36,390 --> 00:17:42,020 +the data we have. to calculate descriptive summary + +251 +00:17:42,020 --> 00:17:45,360 +measures for a population. So we have to calculate + +252 +00:17:45,360 --> 00:17:48,460 +these measures for the sample. And if we have the + +253 +00:17:48,460 --> 00:17:51,420 +entire population, we can compute these measures + +254 +00:17:51,420 --> 00:17:58,440 +also for that population. Then I will introduce in + +255 +00:17:58,440 --> 00:18:01,920 +more details about something called Paxiplot. How + +256 +00:18:01,920 --> 00:18:06,660 +can we construct and interpret a Paxiplot? That's, + +257 +00:18:06,960 --> 00:18:11,040 +inshallah, next time on Tuesday. Finally, we'll + +258 +00:18:11,040 --> 00:18:13,020 +see how can we calculate the covariance and + +259 +00:18:13,020 --> 00:18:15,700 +coefficient of variation and coefficient, I'm + +260 +00:18:15,700 --> 00:18:18,480 +sorry, coefficient of correlation. This topic + +261 +00:18:18,480 --> 00:18:24,280 +we'll introduce in more details in chapter 11 + +262 +00:18:24,280 --> 00:18:31,240 +later on. So just I will give some brief + +263 +00:18:31,240 --> 00:18:35,630 +notation about coefficient of correlation, how can + +264 +00:18:35,630 --> 00:18:39,030 +we compute the correlation coefficient? What's the + +265 +00:18:39,030 --> 00:18:41,870 +meaning of your result? And later in chapter 11, + +266 +00:18:42,030 --> 00:18:44,510 +we'll talk in more details about correlation and + +267 +00:18:44,510 --> 00:18:48,930 +regression. So these are the objectives of this + +268 +00:18:48,930 --> 00:18:52,870 +chapter. There are some basic definitions before + +269 +00:18:52,870 --> 00:18:57,410 +we start. One is called central tendency. What do + +270 +00:18:57,410 --> 00:19:00,750 +you mean by central tendency? Central tendency is + +271 +00:19:00,750 --> 00:19:04,990 +the extent to which all data value group around a + +272 +00:19:04,990 --> 00:19:08,890 +typical or numerical or central value. So we are + +273 +00:19:08,890 --> 00:19:12,510 +looking for a point that in the center, I mean, + +274 +00:19:12,810 --> 00:19:18,870 +the data points are gathered or collected around a + +275 +00:19:18,870 --> 00:19:21,670 +middle point, and that middle point is called the + +276 +00:19:21,670 --> 00:19:24,450 +central tendency. And the question is, how can we + +277 +00:19:24,450 --> 00:19:27,780 +measure that value? We'll talk in details about + +278 +00:19:27,780 --> 00:19:32,620 +mean, median, and mode in a few minutes. So the + +279 +00:19:32,620 --> 00:19:35,660 +central tendency, in this case, the data values + +280 +00:19:35,660 --> 00:19:40,080 +grouped around a typical or central value. Is it + +281 +00:19:40,080 --> 00:19:44,380 +clear? So we have data set, large data set. Then + +282 +00:19:44,380 --> 00:19:47,860 +these points are gathered or grouped around a + +283 +00:19:47,860 --> 00:19:51,440 +middle point, and this point is called central + +284 +00:19:51,440 --> 00:19:56,120 +tendency, and it can be measured by using mean, + +285 +00:19:56,420 --> 00:19:59,960 +which is the most common one, median and the moon. + +286 +00:20:01,020 --> 00:20:04,480 +Next is the variation, which is the amount of + +287 +00:20:04,480 --> 00:20:09,420 +dispersion. Variation is the amount of dispersion + +288 +00:20:09,420 --> 00:20:13,900 +or scattering of values. And we'll use, for + +289 +00:20:13,900 --> 00:20:18,400 +example, range, variance or standard deviation in + +290 +00:20:18,400 --> 00:20:22,960 +order to compute the variation. Finally, We have + +291 +00:20:22,960 --> 00:20:26,300 +data, and my question is, what's the shape of the + +292 +00:20:26,300 --> 00:20:29,920 +data? So the shape is the pattern of distribution + +293 +00:20:29,920 --> 00:20:35,220 +of values from the lowest value to the highest. So + +294 +00:20:35,220 --> 00:20:39,400 +that's the three definitions we need to know + +295 +00:20:39,400 --> 00:20:44,580 +before we start. So we'll start with the easiest + +296 +00:20:44,580 --> 00:20:48,680 +one, measures of central tendency. As I mentioned, + +297 +00:20:49,160 --> 00:20:55,110 +there are three measures. median and moon. And our + +298 +00:20:55,110 --> 00:20:58,270 +goal or we have two goals actually. We have to + +299 +00:20:58,270 --> 00:21:02,290 +know how to compute these measures. Number two, + +300 +00:21:03,270 --> 00:21:06,390 +which one is better? The mean or the median or the + +301 +00:21:06,390 --> 00:21:06,550 +moon? + +302 +00:21:11,310 --> 00:21:14,770 +So the mean sometimes called the arithmetic mean. + +303 +00:21:15,680 --> 00:21:20,020 +Or in general, just say the mean. So often we use + +304 +00:21:20,020 --> 00:21:26,860 +the mean. And the mean is just sum + +305 +00:21:26,860 --> 00:21:33,220 +of the values divided by the sample size. So it's + +306 +00:21:33,220 --> 00:21:36,800 +straightforward. We have, for example, three data + +307 +00:21:36,800 --> 00:21:42,180 +points. And your goal is to find the average or + +308 +00:21:42,180 --> 00:21:45,890 +the mean of these points. They mean it's just some + +309 +00:21:45,890 --> 00:21:50,230 +of these values divided by the sample size. So for + +310 +00:21:50,230 --> 00:21:54,570 +example, if we have a data X1, X2, X3 up to Xn. So + +311 +00:21:54,570 --> 00:21:59,650 +the average is denoted by X bar. This one is + +312 +00:21:59,650 --> 00:22:04,530 +pronounced as X bar and X bar is just sum of Xi. + +313 +00:22:05,010 --> 00:22:08,250 +It is summation, you know this symbol, summation + +314 +00:22:08,250 --> 00:22:11,350 +of sigma, summation of Xi and I goes from one to + +315 +00:22:11,350 --> 00:22:14,490 +N. divided by N which is the total number of + +316 +00:22:14,490 --> 00:22:19,710 +observations or the sample size. So it means X1 + +317 +00:22:19,710 --> 00:22:23,290 +plus X2 all the way up to XN divided by N gives + +318 +00:22:23,290 --> 00:22:28,530 +the mean or the arithmetic mean. So X bar is the + +319 +00:22:28,530 --> 00:22:32,690 +average which is the sum of values divided by the + +320 +00:22:32,690 --> 00:22:36,270 +number of observations. So that's the first + +321 +00:22:36,270 --> 00:22:38,830 +definition. For example, + +322 +00:22:42,180 --> 00:22:46,920 +So again, the mean is the most common measure of + +323 +00:22:46,920 --> 00:22:51,780 +center tendency. Number two, the definition of the + +324 +00:22:51,780 --> 00:22:55,440 +mean. Sum of values divided by the number of + +325 +00:22:55,440 --> 00:23:02,960 +values. That means the mean takes all the values, + +326 +00:23:04,140 --> 00:23:09,740 +then divided by N. it makes sense that the mean is + +327 +00:23:09,740 --> 00:23:13,380 +affected by extreme values or outliers. I mean, if + +328 +00:23:13,380 --> 00:23:17,840 +the data has outliers or extreme values, I mean by + +329 +00:23:17,840 --> 00:23:21,400 +extreme values, large or very, very large values + +330 +00:23:21,400 --> 00:23:24,980 +and small, small values. Large values or small + +331 +00:23:24,980 --> 00:23:31,100 +values are extreme values. Since the mean takes + +332 +00:23:31,100 --> 00:23:33,420 +all these values and sums all together, doesn't + +333 +00:23:33,420 --> 00:23:38,550 +divide by n, that means The mean is affected by + +334 +00:23:38,550 --> 00:23:41,350 +outliers or by extreme values. For example, + +335 +00:23:42,030 --> 00:23:45,110 +imagine we have simple data as 1, 2, 3, 4, and 5. + +336 +00:23:46,110 --> 00:23:49,830 +Simple example. Now, what's the mean? The mean is + +337 +00:23:49,830 --> 00:23:53,570 +just add these values, then divide by the total + +338 +00:23:53,570 --> 00:23:56,910 +number of observations. In this case, the sum of + +339 +00:23:56,910 --> 00:24:01,710 +these is 15. N is five because there are five + +340 +00:24:01,710 --> 00:24:05,920 +observations. So X bar is 15 divided by 5, which + +341 +00:24:05,920 --> 00:24:10,240 +is 3. So straightforward. Now imagine instead of + +342 +00:24:10,240 --> 00:24:16,480 +5, this number 5, we have a 10. Now 10, there is a + +343 +00:24:16,480 --> 00:24:21,400 +gap between 4, which is the second largest, and + +344 +00:24:21,400 --> 00:24:25,600 +the maximum, which is 10. Now if we add these + +345 +00:24:25,600 --> 00:24:30,540 +values, 1, 2, 3, 4, and 10, then divide by 5, the + +346 +00:24:30,540 --> 00:24:36,680 +mean will be 4. If you see here, we just added one + +347 +00:24:36,680 --> 00:24:41,060 +value, or I mean, we replaced five by 10, and the + +348 +00:24:41,060 --> 00:24:44,700 +mean changed dramatically from three to four. + +349 +00:24:45,520 --> 00:24:48,860 +There is big change between three and four, around + +350 +00:24:48,860 --> 00:24:55,560 +25% more. So that means outliers or extreme values + +351 +00:24:55,560 --> 00:25:01,200 +affected the mean. So take this information in + +352 +00:25:01,200 --> 00:25:03,560 +your mind because later we'll talk a little bit + +353 +00:25:03,560 --> 00:25:07,360 +about another one. So the mean is affected by + +354 +00:25:07,360 --> 00:25:13,100 +extreme values. Imagine another example. Suppose + +355 +00:25:13,100 --> 00:25:20,060 +we have data from 1 to 9. 1, 2, 3, 4, 6, 7, 8, 9. + +356 +00:25:21,040 --> 00:25:26,690 +Now the mean of these values, some divide by n. If + +357 +00:25:26,690 --> 00:25:31,970 +you sum 1 through 9, summation is 45. Divide by 9, + +358 +00:25:32,510 --> 00:25:36,230 +which is 5. So the sum of these values divided by + +359 +00:25:36,230 --> 00:25:41,590 +N gives the average, so the average is 5. Now + +360 +00:25:41,590 --> 00:25:46,670 +suppose we add 100 to the end of this data. So the + +361 +00:25:46,670 --> 00:25:53,670 +sum will be 145 divided by 10, that's 14.5. Now + +362 +00:25:53,670 --> 00:25:58,850 +the mean was 5. Then after we added 100, it + +363 +00:25:58,850 --> 00:26:05,470 +becomes 14.5. Imagine the mean was 5, it changed + +364 +00:26:05,470 --> 00:26:11,650 +to 14.5. It means around three times. So that + +365 +00:26:11,650 --> 00:26:17,510 +means outliers affect the mean much more than the + +366 +00:26:17,510 --> 00:26:19,890 +other one. We'll talk a little later about it, + +367 +00:26:19,990 --> 00:26:23,950 +which is the median. So keep in mind outliers + +368 +00:26:25,290 --> 00:26:34,790 +affected the mean in this case. Any question? Is + +369 +00:26:34,790 --> 00:26:41,590 +it clear? Yes. So, one more time. The mean is + +370 +00:26:41,590 --> 00:26:46,990 +affected by extreme values. So that's for the + +371 +00:26:46,990 --> 00:26:50,910 +mean. The other measure of center tendency is + +372 +00:26:50,910 --> 00:26:57,600 +called the median. Now, what's the median? What's + +373 +00:26:57,600 --> 00:27:00,760 +the definition of the median from your previous + +374 +00:27:00,760 --> 00:27:05,880 +studies? What's the median? I mean, what's the + +375 +00:27:05,880 --> 00:27:09,360 +definition of the median? Now the middle value, + +376 +00:27:09,760 --> 00:27:12,980 +that's correct, but after we arrange the data from + +377 +00:27:12,980 --> 00:27:17,040 +smallest to largest or largest to smallest, so we + +378 +00:27:17,040 --> 00:27:20,160 +should arrange the data, then we can figure out + +379 +00:27:20,160 --> 00:27:24,280 +the median. So the median is the middle point, but + +380 +00:27:24,280 --> 00:27:27,060 +after we arrange the data from smallest to largest + +381 +00:27:27,060 --> 00:27:30,030 +or vice versa. So that's the definition of the + +382 +00:27:30,030 --> 00:27:33,930 +median. So in an ordered array, so we have to have + +383 +00:27:33,930 --> 00:27:39,230 +order array, the median is the middle number. The + +384 +00:27:39,230 --> 00:27:42,810 +middle number means 50 percent of the data below + +385 +00:27:42,810 --> 00:27:50,370 +and 50 percent above the median because it's + +386 +00:27:50,370 --> 00:27:52,190 +called the median, the value in the middle after + +387 +00:27:52,190 --> 00:27:55,990 +you arrange the data from smallest to largest. + +388 +00:28:00,130 --> 00:28:02,770 +Suppose I again go back to the previous example + +389 +00:28:02,770 --> 00:28:09,690 +when we have data 1, 2, 3, 4, and 5. Now for this + +390 +00:28:09,690 --> 00:28:14,210 +specific example as we did before, now the data is + +391 +00:28:14,210 --> 00:28:18,670 +already ordered. The value in the middle is 3 + +392 +00:28:18,670 --> 00:28:22,330 +because there are two pillows. + +393 +00:28:24,860 --> 00:28:27,300 +And also there are the same number of observations + +394 +00:28:27,300 --> 00:28:33,140 +above it. So 3 is the median. Now again imagine we + +395 +00:28:33,140 --> 00:28:37,320 +replace 5, which is the maximum value, by another + +396 +00:28:37,320 --> 00:28:42,140 +one which is extreme one, for example 10. In this + +397 +00:28:42,140 --> 00:28:47,600 +case, the median is still 3. Because the median is + +398 +00:28:47,600 --> 00:28:49,380 +just the value of the middle after you arrange the + +399 +00:28:49,380 --> 00:28:53,900 +data. So it doesn't matter what is the highest or + +400 +00:28:53,900 --> 00:28:58,860 +the maximum value is, the median in this case is + +401 +00:28:58,860 --> 00:29:03,700 +three. It doesn't change. That means the median is + +402 +00:29:03,700 --> 00:29:08,020 +not affected by extreme values. Or to be more + +403 +00:29:08,020 --> 00:29:12,910 +precise, we can say that The median is affected by + +404 +00:29:12,910 --> 00:29:18,990 +outlier, but not the same as the mean. So affect + +405 +00:29:18,990 --> 00:29:23,610 +the mean much more than the median. I mean, you + +406 +00:29:23,610 --> 00:29:26,550 +cannot say for this example, yes, the median is + +407 +00:29:26,550 --> 00:29:29,310 +not affected because the median was three, it + +408 +00:29:29,310 --> 00:29:33,590 +becomes three. But in another examples, there is + +409 +00:29:33,590 --> 00:29:36,750 +small difference between all. + +410 +00:29:40,770 --> 00:29:44,850 +Extreme values affected the mean much more than + +411 +00:29:44,850 --> 00:29:51,450 +the median. If the dataset has extreme values, we + +412 +00:29:51,450 --> 00:29:54,510 +have to use, what do you think, the mean or the + +413 +00:29:54,510 --> 00:29:58,090 +median? The median. So in case or in the presence + +414 +00:29:58,090 --> 00:30:01,910 +of extreme values or outliers, we have to use the + +415 +00:30:01,910 --> 00:30:07,010 +median, not the mean. But in general, we use If + +416 +00:30:07,010 --> 00:30:10,770 +the data is free of outliers, I mean if the data + +417 +00:30:10,770 --> 00:30:16,410 +has not extreme values, then you can use the mean. + +418 +00:30:16,510 --> 00:30:19,230 +The mean is much better than the median in this + +419 +00:30:19,230 --> 00:30:22,490 +case. But if the data has extreme values or + +420 +00:30:22,490 --> 00:30:27,190 +outliers, we should use the median instead of the + +421 +00:30:27,190 --> 00:30:31,310 +mean. Any question? So these are the most common + +422 +00:30:31,310 --> 00:30:36,710 +center tendency measures in statistics, the mean + +423 +00:30:36,710 --> 00:30:42,390 +and the median. And keep in mind, your data should + +424 +00:30:42,390 --> 00:30:46,170 +be numeric. I mean, you cannot use the mean or the + +425 +00:30:46,170 --> 00:30:50,730 +median for qualitative or categorical data, for + +426 +00:30:50,730 --> 00:30:54,310 +example, gender, males or females. You cannot say + +427 +00:30:54,310 --> 00:30:59,490 +the mean of gender or sex is 1.5. It doesn't make + +428 +00:30:59,490 --> 00:31:05,490 +sense. It should be numerical data to use the mean + +429 +00:31:05,490 --> 00:31:07,590 +or the median. So the mean and the median is used + +430 +00:31:07,590 --> 00:31:11,210 +only for numerical data. And we have to + +431 +00:31:11,210 --> 00:31:14,170 +distinguish between mean and median. Mean is used + +432 +00:31:14,170 --> 00:31:16,870 +for data that has not outliers or extreme values, + +433 +00:31:17,370 --> 00:31:21,450 +while the median is used for data that has + +434 +00:31:21,450 --> 00:31:25,230 +outliers or extreme values. Sometimes better to + +435 +00:31:25,230 --> 00:31:27,990 +report both. I mean, sometimes better to report + +436 +00:31:27,990 --> 00:31:33,450 +mean and the median. So you just say the sales for + +437 +00:31:33,450 --> 00:31:40,560 +this company is, for example, $500,000. And the + +438 +00:31:40,560 --> 00:31:43,900 +median, for example, is 550,000. You can see that. + +439 +00:31:45,680 --> 00:31:46,400 +Is it clear? + +440 +00:31:51,440 --> 00:31:55,560 +If you have a small data, it's straightforward and + +441 +00:31:55,560 --> 00:31:59,180 +it's very easy to locate the median. But in case + +442 +00:31:59,180 --> 00:32:02,120 +of large dataset, how can we locate the median? + +443 +00:32:02,340 --> 00:32:06,640 +It's not easy. Just look at the data and you can + +444 +00:32:06,640 --> 00:32:11,200 +say this is the median. It's not easy task. So we + +445 +00:32:11,200 --> 00:32:15,820 +need a rule that locate the median. The location + +446 +00:32:15,820 --> 00:32:18,020 +of the median when the values are in numerical + +447 +00:32:18,020 --> 00:32:23,580 +order from smallest to largest is N plus one + +448 +00:32:23,580 --> 00:32:26,140 +divided by two. That's the position of the median. + +449 +00:32:26,640 --> 00:32:28,860 +If we go back a little bit to the previous + +450 +00:32:28,860 --> 00:32:34,980 +example, here N was five. So the location was + +451 +00:32:34,980 --> 00:32:40,000 +number three, because n plus one divided by two, + +452 +00:32:40,120 --> 00:32:43,120 +five plus one divided by two is three. So location + +453 +00:32:43,120 --> 00:32:47,340 +number three is the median. Location number one is + +454 +00:32:47,340 --> 00:32:50,840 +one, in this case, then two, then three. So + +455 +00:32:50,840 --> 00:32:53,740 +location number three is three. But maybe this + +456 +00:32:53,740 --> 00:32:57,280 +number is not three, and other value maybe 3.1 or + +457 +00:32:57,280 --> 00:33:02,440 +3.2. But the location is number three. Is it + +458 +00:33:02,440 --> 00:33:08,470 +clear? So that's the location. If it is odd, you + +459 +00:33:08,470 --> 00:33:13,270 +mean by odd number, five, seven and so on. So if + +460 +00:33:13,270 --> 00:33:17,090 +the number of values is odd, the median is the + +461 +00:33:17,090 --> 00:33:21,210 +middle number. Now let's imagine if we have even + +462 +00:33:21,210 --> 00:33:24,570 +number of observations. For example, we have one, + +463 +00:33:24,610 --> 00:33:28,270 +two, three, four, five and six. So imagine numbers + +464 +00:33:28,270 --> 00:33:32,390 +from one up to six. What's the median? Now three + +465 +00:33:32,390 --> 00:33:35,610 +is not the median because there are two + +466 +00:33:35,610 --> 00:33:43,390 +observations below three. And three above it. And + +467 +00:33:43,390 --> 00:33:46,210 +four is not the median because three observations + +468 +00:33:46,210 --> 00:33:53,290 +below, two above. So three and four is the middle + +469 +00:33:53,290 --> 00:33:56,870 +value. So just take the average of two middle + +470 +00:33:56,870 --> 00:34:01,570 +points, And that will be the median. So if n is + +471 +00:34:01,570 --> 00:34:07,990 +even, you have to locate two middle points. For + +472 +00:34:07,990 --> 00:34:10,310 +example, n over 2, in this case, we have six + +473 +00:34:10,310 --> 00:34:13,910 +observations. So divide by 2, not n plus 1 divided + +474 +00:34:13,910 --> 00:34:17,970 +by 2, just n over 2. So n over 2 is 3. So place + +475 +00:34:17,970 --> 00:34:22,930 +number 3, and the next one, place number 4, these + +476 +00:34:22,930 --> 00:34:25,930 +are the two middle points. Take the average of + +477 +00:34:25,930 --> 00:34:32,300 +these values, then that's your median. So if N is + +478 +00:34:32,300 --> 00:34:37,080 +even, you have to be careful. You have to find two + +479 +00:34:37,080 --> 00:34:40,860 +middle points and just take the average of these + +480 +00:34:40,860 --> 00:34:45,100 +two. So if N is even, the median is the average of + +481 +00:34:45,100 --> 00:34:49,200 +the two middle numbers. Keep in mind, when we are + +482 +00:34:49,200 --> 00:34:54,600 +saying N plus 2, N plus 2 is just the position of + +483 +00:34:54,600 --> 00:34:58,670 +the median, not the value, location. Not the + +484 +00:34:58,670 --> 00:35:07,770 +value. Is it clear? Any question? So location is + +485 +00:35:07,770 --> 00:35:10,150 +not the value. Location is just the place or the + +486 +00:35:10,150 --> 00:35:13,450 +position of the medium. If N is odd, the position + +487 +00:35:13,450 --> 00:35:17,710 +is N plus one divided by two. If N is even, the + +488 +00:35:17,710 --> 00:35:20,870 +positions of the two middle points are N over two + +489 +00:35:20,870 --> 00:35:23,090 +and the next term or the next point. + +490 +00:35:28,390 --> 00:35:32,510 +Last measure of center tendency is called the + +491 +00:35:32,510 --> 00:35:32,750 +mood. + +492 +00:35:35,890 --> 00:35:39,010 +The definition of the mood, the mood is the most + +493 +00:35:39,010 --> 00:35:44,250 +frequent value. So sometimes the mood exists, + +494 +00:35:45,230 --> 00:35:48,570 +sometimes the mood does not exist. Or sometimes + +495 +00:35:48,570 --> 00:35:53,730 +there is only one mood, in other cases maybe there + +496 +00:35:53,730 --> 00:35:58,730 +are several moods. So a value that occurs most + +497 +00:35:58,730 --> 00:36:03,010 +often is called the mood. The mood is not affected + +498 +00:36:03,010 --> 00:36:07,610 +by extreme values. It can be used for either + +499 +00:36:07,610 --> 00:36:11,190 +numerical or categorical data. And that's the + +500 +00:36:11,190 --> 00:36:13,910 +difference between mean and median and the mood. + +501 +00:36:14,590 --> 00:36:16,930 +Mean and median is used just for numerical data. + +502 +00:36:17,430 --> 00:36:21,270 +Here, the mood can be used for both, categorical + +503 +00:36:21,270 --> 00:36:25,610 +and numerical data. Sometimes, as I mentioned, + +504 +00:36:25,930 --> 00:36:29,570 +there may be no mood or the mood does not exist. + +505 +00:36:30,130 --> 00:36:34,190 +In other cases, there may be several events. So + +506 +00:36:34,190 --> 00:36:36,870 +the mood is the value that has the most frequent. + +507 +00:36:37,490 --> 00:36:43,650 +For example, if you look at this data, one is + +508 +00:36:43,650 --> 00:36:48,370 +repeated once, three is the same one time, five is + +509 +00:36:48,370 --> 00:36:52,290 +repeated twice. seven is one nine is repeated + +510 +00:36:52,290 --> 00:36:57,330 +three times and so on so in this case nine is the + +511 +00:36:57,330 --> 00:37:00,290 +mood because the mood again is the most frequent + +512 +00:37:00,290 --> 00:37:05,030 +value on + +513 +00:37:05,030 --> 00:37:08,550 +the right side there are some values zero one two + +514 +00:37:08,550 --> 00:37:12,830 +three up to six now each one is repeated once so + +515 +00:37:12,830 --> 00:37:15,350 +in this case the mood does not exist I mean there + +516 +00:37:15,350 --> 00:37:22,310 +is no mood So generally speaking, the mood is the + +517 +00:37:22,310 --> 00:37:26,310 +value that you care most often. It can be used for + +518 +00:37:26,310 --> 00:37:29,790 +numerical or categorical data, not affected by + +519 +00:37:29,790 --> 00:37:32,970 +extreme values or outliers. Sometimes there is + +520 +00:37:32,970 --> 00:37:36,150 +only one mood as this example. Sometimes the mood + +521 +00:37:36,150 --> 00:37:40,390 +does not exist. Or sometimes there are several + +522 +00:37:40,390 --> 00:37:45,190 +moods. And so that's the definitions for mean, + +523 +00:37:46,430 --> 00:37:52,540 +median, and the mood. I will give just a numerical + +524 +00:37:52,540 --> 00:37:56,380 +example to know how can we compute these measures. + +525 +00:37:57,420 --> 00:38:01,540 +This data, simple data, just for illustration, we + +526 +00:38:01,540 --> 00:38:07,580 +have house prices. We have five data points, $2 + +527 +00:38:07,580 --> 00:38:10,940 +million. This is the price of house A, for + +528 +00:38:10,940 --> 00:38:15,880 +example. House B price is 500,000. The other one + +529 +00:38:15,880 --> 00:38:19,120 +is 300,000. And two houses have the same price as + +530 +00:38:19,120 --> 00:38:25,850 +100,000. Now, just to compute the mean, add these + +531 +00:38:25,850 --> 00:38:29,350 +values or sum these values, which is three + +532 +00:38:29,350 --> 00:38:34,030 +million, divide by number of houses here, there + +533 +00:38:34,030 --> 00:38:38,550 +are five houses, so just three thousand divided by + +534 +00:38:38,550 --> 00:38:44,170 +five, six hundred thousand. The median, the value + +535 +00:38:44,170 --> 00:38:46,150 +in the median, after you arrange the data from + +536 +00:38:46,150 --> 00:38:51,470 +smallest to largest, Or largest smallest. This + +537 +00:38:51,470 --> 00:38:55,410 +data is already arranged from largest smallest or + +538 +00:38:55,410 --> 00:38:58,150 +smallest large. It doesn't matter actually. So the + +539 +00:38:58,150 --> 00:39:02,930 +median is $300,000. Make sense? Because there are + +540 +00:39:02,930 --> 00:39:09,490 +two house prices above and two below. So the + +541 +00:39:09,490 --> 00:39:13,610 +median is $300,000. Now if you look at these two + +542 +00:39:13,610 --> 00:39:21,350 +values, the mean for this data equals 600,000 and + +543 +00:39:21,350 --> 00:39:26,690 +the median is 300,000. The mean is double the + +544 +00:39:26,690 --> 00:39:31,750 +median. Do you think why there is a big difference + +545 +00:39:31,750 --> 00:39:36,030 +in this data between the mean and the median? + +546 +00:39:36,190 --> 00:39:42,290 +Which one? Two million dollars is extreme value, + +547 +00:39:42,510 --> 00:39:45,940 +very large number. I mean, if you compare two + +548 +00:39:45,940 --> 00:39:48,860 +million dollars with the other data sets or other + +549 +00:39:48,860 --> 00:39:51,320 +data values, you will see there is a big + +550 +00:39:51,320 --> 00:39:53,260 +difference between two million and five hundred. + +551 +00:39:53,620 --> 00:39:56,280 +It's four times, plus about three hundred + +552 +00:39:56,280 --> 00:39:59,780 +thousands, around seven times and so on. For this + +553 +00:39:59,780 --> 00:40:07,880 +value, the mean is affected. Exactly. The median + +554 +00:40:07,880 --> 00:40:11,740 +is resistant to outliers. It's affected but little + +555 +00:40:11,740 --> 00:40:17,100 +bit. For this reason, we have to use the median. + +556 +00:40:17,300 --> 00:40:20,720 +So the median makes more sense than using the + +557 +00:40:20,720 --> 00:40:24,480 +mean. The mode is just the most frequent value, + +558 +00:40:24,660 --> 00:40:28,720 +which is 100,000, because this value is repeated + +559 +00:40:28,720 --> 00:40:33,820 +twice. So that's the whole story for central + +560 +00:40:33,820 --> 00:40:40,720 +tendency measures, mean, median, and 1. Now the + +561 +00:40:40,720 --> 00:40:45,640 +question again is which measure to use? The mean + +562 +00:40:45,640 --> 00:40:49,280 +is generally used. The most common center tendency + +563 +00:40:49,280 --> 00:40:53,420 +is the mean. We can use it or we should use it + +564 +00:40:53,420 --> 00:40:59,920 +unless extreme values exist. I mean if the data + +565 +00:40:59,920 --> 00:41:03,960 +set has no outliers or extreme values, we have to + +566 +00:41:03,960 --> 00:41:06,240 +use the mean instead of the median. + +567 +00:41:09,810 --> 00:41:14,670 +The median is often used since the median is not + +568 +00:41:14,670 --> 00:41:18,330 +sensitive to extreme values. I mean, the median is + +569 +00:41:18,330 --> 00:41:22,030 +resistant to outliers. It remains nearly in the + +570 +00:41:22,030 --> 00:41:26,490 +same position if the dataset has outliers. But the + +571 +00:41:26,490 --> 00:41:29,850 +median will be affected either to the right or to + +572 +00:41:29,850 --> 00:41:34,350 +the left tail. So we have to use the median if the + +573 +00:41:34,350 --> 00:41:40,060 +data has extreme values. For example, median home + +574 +00:41:40,060 --> 00:41:44,100 +prices for the previous one may be reported for a + +575 +00:41:44,100 --> 00:41:48,000 +region that is less sensitive to outliers. So the + +576 +00:41:48,000 --> 00:41:52,880 +mean is more sensitive to outliers than the + +577 +00:41:52,880 --> 00:41:56,520 +median. Sometimes, I mean in some situations, it + +578 +00:41:56,520 --> 00:41:58,760 +makes sense to report both the mean and the + +579 +00:41:58,760 --> 00:42:01,860 +median. Just say the mean for this data for home + +580 +00:42:01,860 --> 00:42:07,570 +prices is 600,000 while the median is 300,000. If + +581 +00:42:07,570 --> 00:42:10,150 +you look at these two figures, you can tell that + +582 +00:42:10,150 --> 00:42:13,830 +there exists outlier or the outlier exists because + +583 +00:42:13,830 --> 00:42:17,230 +there is a big difference between the mean and the + +584 +00:42:17,230 --> 00:42:24,310 +median. So that's all for measures of central + +585 +00:42:24,310 --> 00:42:28,830 +tendency. Again, we explained three measures, + +586 +00:42:29,450 --> 00:42:33,930 +arithmetic mean, median, and mode. And arithmetic + +587 +00:42:33,930 --> 00:42:38,990 +mean again is denoted by X bar is pronounced as X + +588 +00:42:38,990 --> 00:42:44,410 +bar and just summation of X divided by N. So + +589 +00:42:44,410 --> 00:42:48,070 +summation Xi, i goes from 1 up to N divided by the + +590 +00:42:48,070 --> 00:42:52,170 +total number of observations. The median, as we + +591 +00:42:52,170 --> 00:42:55,690 +mentioned, is the value in the middle in ordered + +592 +00:42:55,690 --> 00:42:59,150 +array. After you arrange the data from smallest to + +593 +00:42:59,150 --> 00:43:01,930 +largest or vice versa, then the median is the + +594 +00:43:01,930 --> 00:43:06,330 +value in the middle. The mode is the most frequent + +595 +00:43:06,330 --> 00:43:09,030 +observed value. And we have to know that mean and + +596 +00:43:09,030 --> 00:43:13,870 +median are used only for numerical data, while the + +597 +00:43:13,870 --> 00:43:17,510 +mode can be used for both numerical and + +598 +00:43:17,510 --> 00:43:24,290 +categorical data. That's all about measures of + +599 +00:43:24,290 --> 00:43:27,210 +central tendency. Any question? + +600 +00:43:33,210 --> 00:43:40,230 +Let's move to measures of variation. It's another + +601 +00:43:40,230 --> 00:43:43,750 +type of measures. It's called measures of + +602 +00:43:43,750 --> 00:43:47,490 +variation, sometimes called measures of spread. + +603 +00:43:50,490 --> 00:43:53,850 +Now, variation can be computed by using range, + +604 +00:43:55,590 --> 00:44:00,850 +variance, standard deviation, and coefficient of + +605 +00:44:00,850 --> 00:44:08,430 +variation. So we have four types, range, variance, + +606 +00:44:09,250 --> 00:44:12,050 +standard deviation, and coefficient of variation. + +607 +00:44:13,710 --> 00:44:16,150 +Now, measures of variation give information on the + +608 +00:44:16,150 --> 00:44:19,410 +spread. Now, this is the first difference between + +609 +00:44:19,410 --> 00:44:24,210 +central tendency measures and measures of + +610 +00:44:24,210 --> 00:44:28,270 +variation. That one measures the central value or + +611 +00:44:28,270 --> 00:44:30,790 +the value in the middle. Here, it measures the + +612 +00:44:30,790 --> 00:44:36,310 +spread. Or variability. Or dispersion of the data. + +613 +00:44:36,450 --> 00:44:40,310 +Do you know what is dispersion? Dispersion. + +614 +00:44:40,630 --> 00:44:45,590 +Tabaad. So major variation given formation with + +615 +00:44:45,590 --> 00:44:48,350 +the spread. Spread or variation or dispersion of + +616 +00:44:48,350 --> 00:44:52,250 +the data values. Now if you look at these two bell + +617 +00:44:52,250 --> 00:44:52,650 +shapes. + +618 +00:44:55,670 --> 00:44:59,170 +Both have the same center. The center I mean the + +619 +00:44:59,170 --> 00:45:01,730 +value in the middle. So the value in the middle + +620 +00:45:01,730 --> 00:45:06,990 +here for figure + +621 +00:45:06,990 --> 00:45:10,150 +graph number one is the same as the value for the + +622 +00:45:10,150 --> 00:45:16,270 +other graph. So both graphs have the same center. + +623 +00:45:17,430 --> 00:45:20,670 +But if you look at the spread, you will see that + +624 +00:45:20,670 --> 00:45:26,230 +figure A is less spread than figure B. Now if you + +625 +00:45:26,230 --> 00:45:29,720 +look at this one, the spread here, is much less + +626 +00:45:29,720 --> 00:45:34,120 +than the other one. Even they have the same + +627 +00:45:34,120 --> 00:45:39,260 +center, the same mean, but figure A is more spread + +628 +00:45:39,260 --> 00:45:45,140 +than figure B. It means that the variation in A is + +629 +00:45:45,140 --> 00:45:49,920 +much less than the variation in figure B. So it + +630 +00:45:49,920 --> 00:45:55,960 +means that the mean is not sufficient to describe + +631 +00:45:55,960 --> 00:45:59,970 +your data. Because maybe you have two datasets and + +632 +00:45:59,970 --> 00:46:03,330 +both have the same mean, but the spread or the + +633 +00:46:03,330 --> 00:46:07,350 +variation is completely different. Again, maybe we + +634 +00:46:07,350 --> 00:46:10,250 +have two classes of statistics, class A and class + +635 +00:46:10,250 --> 00:46:13,230 +B. The center or the mean or the average is the + +636 +00:46:13,230 --> 00:46:16,150 +same for each one. For example, maybe the average + +637 +00:46:16,150 --> 00:46:19,810 +of this class is 70. The average of class B is + +638 +00:46:19,810 --> 00:46:26,640 +also 70. But the scores are scattered. or spread + +639 +00:46:26,640 --> 00:46:32,580 +out in class A maybe much more than in class B. So + +640 +00:46:32,580 --> 00:46:34,280 +the mean is not sufficient to describe the data. + +641 +00:46:34,360 --> 00:46:37,100 +You have to say that the mean equals such and such + +642 +00:46:37,100 --> 00:46:41,000 +and the spread. And one of these measures we'll + +643 +00:46:41,000 --> 00:46:44,500 +talk later about range and variance standard + +644 +00:46:44,500 --> 00:46:49,030 +deviation. So I mean, The mean by itself is not + +645 +00:46:49,030 --> 00:46:51,890 +sufficient to describe the data. You have to use + +646 +00:46:51,890 --> 00:46:55,730 +something else to measure the variation or the + +647 +00:46:55,730 --> 00:46:57,950 +spread of the data. Make sense? + +648 +00:47:02,170 --> 00:47:05,670 +The first measure of variation, the easiest one, + +649 +00:47:05,810 --> 00:47:11,230 +is called the range. The range is the simplest + +650 +00:47:11,230 --> 00:47:15,590 +measure of variation. The range is just the + +651 +00:47:15,590 --> 00:47:19,750 +difference or the distance between the largest and + +652 +00:47:19,750 --> 00:47:23,550 +the smallest value. For example, suppose the + +653 +00:47:23,550 --> 00:47:27,070 +minimum score for this class is 40 and the maximum + +654 +00:47:27,070 --> 00:47:33,230 +is 90. So the range is 50, 90 minus 40. Now + +655 +00:47:33,230 --> 00:47:38,850 +imagine that the minimum score for this class is + +656 +00:47:38,850 --> 00:47:47,330 +60 and the maximum is 80, so 20. If we replace 80 + +657 +00:47:47,330 --> 00:47:51,450 +by 100, I mean the minimum is 60 and the maximum + +658 +00:47:51,450 --> 00:47:57,030 +is 100, it's 40. That means a range is affected by + +659 +00:47:57,030 --> 00:48:02,170 +outliers because it depends only on two values. + +660 +00:48:03,480 --> 00:48:06,100 +maximum and minimum value. So it should be + +661 +00:48:06,100 --> 00:48:09,320 +affected by outliers. So range is sensitive to + +662 +00:48:09,320 --> 00:48:12,780 +outliers. So if the data has the data set has + +663 +00:48:12,780 --> 00:48:15,660 +outliers, then in this case, you have to avoid + +664 +00:48:15,660 --> 00:48:19,640 +using range because range only based on two + +665 +00:48:19,640 --> 00:48:23,480 +values. So it should be affected by outliers. Now + +666 +00:48:23,480 --> 00:48:25,880 +for the for simple example, suppose we have this + +667 +00:48:25,880 --> 00:48:32,360 +data. The minimum value is one. I mean, the + +668 +00:48:32,360 --> 00:48:34,680 +smallest value is one, and the largest or the + +669 +00:48:34,680 --> 00:48:38,880 +maximum is 13. So it makes sense that the range of + +670 +00:48:38,880 --> 00:48:41,840 +the data is the difference between these two + +671 +00:48:41,840 --> 00:48:48,540 +values. So 13 minus one is 12. Now, imagine that + +672 +00:48:48,540 --> 00:48:58,040 +we just replace 13 by 100. So the new range will + +673 +00:48:58,040 --> 00:49:03,820 +be equal to 100 minus 199. So the previous range + +674 +00:49:03,820 --> 00:49:08,340 +was 12. It becomes now 99 after we replace the + +675 +00:49:08,340 --> 00:49:12,100 +maximum by 100. So it means that range is affected + +676 +00:49:12,100 --> 00:49:18,740 +by extreme values. So the mean and range both are + +677 +00:49:18,740 --> 00:49:23,040 +sensitive to outliers. So you have to link between + +678 +00:49:26,410 --> 00:49:30,210 +measures of center tendency and measures of + +679 +00:49:30,210 --> 00:49:33,130 +variation. Mean and range are affected by + +680 +00:49:33,130 --> 00:49:37,910 +outliers. The mean and range are affected by + +681 +00:49:37,910 --> 00:49:41,450 +outliers. This is an example. So it's very easy to + +682 +00:49:41,450 --> 00:49:49,550 +compute the mean. Next, if you look at why the + +683 +00:49:49,550 --> 00:49:51,190 +range can be misleading. + +684 +00:49:53,830 --> 00:49:56,810 +Sometimes you report the range and the range does + +685 +00:49:56,810 --> 00:50:00,310 +not give an appropriate answer or appropriate + +686 +00:50:00,310 --> 00:50:04,450 +result because number + +687 +00:50:04,450 --> 00:50:06,790 +one ignores the way in which the data are + +688 +00:50:06,790 --> 00:50:10,770 +distributed. For example, if you look at this + +689 +00:50:10,770 --> 00:50:15,430 +specific data, we have data seven, eight, nine, + +690 +00:50:15,590 --> 00:50:18,110 +ten, eleven and twelve. So the range is five. + +691 +00:50:19,270 --> 00:50:21,910 +Twelve minus seven is five. Now if you look at the + +692 +00:50:21,910 --> 00:50:26,360 +other data, The smallest value was seven. + +693 +00:50:29,600 --> 00:50:33,260 +And there is a gap between the smallest and the + +694 +00:50:33,260 --> 00:50:38,220 +next smallest value, which is 10. And also we have + +695 +00:50:38,220 --> 00:50:44,480 +12 is repeated three times. Still the range is the + +696 +00:50:44,480 --> 00:50:48,140 +same. Even there is a difference between these two + +697 +00:50:48,140 --> 00:50:53,640 +values, between two sets. we have seven, eight, + +698 +00:50:53,760 --> 00:50:57,020 +nine up to 12. And then the other data, we have + +699 +00:50:57,020 --> 00:51:02,180 +seven, 10, 11, and 12 three times. Still, the + +700 +00:51:02,180 --> 00:51:06,360 +range equals five. So it doesn't make sense to + +701 +00:51:06,360 --> 00:51:09,620 +report the range as a measure of variation. + +702 +00:51:10,520 --> 00:51:12,640 +Because if you look at the distribution for this + +703 +00:51:12,640 --> 00:51:15,500 +data, it's completely different from the other + +704 +00:51:15,500 --> 00:51:20,860 +dataset. Even though it has the same range. So + +705 +00:51:20,860 --> 00:51:25,220 +range is not used in this case. Look at another + +706 +00:51:25,220 --> 00:51:25,680 +example. + +707 +00:51:28,300 --> 00:51:32,920 +We have data. All the data ranges, I mean, starts + +708 +00:51:32,920 --> 00:51:38,680 +from 1 up to 5. So the range is 4. If we just + +709 +00:51:38,680 --> 00:51:46,200 +replace the maximum, which is 5, by 120. So the + +710 +00:51:46,200 --> 00:51:49,190 +range is completely different. the range becomes + +711 +00:51:49,190 --> 00:51:55,010 +119. So that means range + +712 +00:51:55,010 --> 00:51:59,230 +is sensitive to outliers. So we have to avoid + +713 +00:51:59,230 --> 00:52:06,030 +using range in case of outliers or extreme values. + +714 +00:52:08,930 --> 00:52:14,410 +I will stop at the most important one, the + +715 +00:52:14,410 --> 00:52:18,350 +variance, for next time inshallah. Up to this + +716 +00:52:18,350 --> 00:52:19,310 +point, any questions? + +717 +00:52:22,330 --> 00:52:29,730 +Okay, stop at this point if + +718 +00:52:29,730 --> 00:52:30,510 +you have any question. + +719 +00:52:35,430 --> 00:52:39,430 +So later we'll discuss measures of variation and + +720 +00:52:39,430 --> 00:52:44,810 +variance, standard deviation up to the end of this + +721 +00:52:44,810 --> 00:52:45,090 +chapter. + +722 +00:52:54,630 --> 00:53:00,690 +So again, the range is sensitive to outliers. So + +723 +00:53:00,690 --> 00:53:03,850 +we have to avoid using range in this case. And + +724 +00:53:03,850 --> 00:53:06,270 +later we'll talk about the variance, which is the + +725 +00:53:06,270 --> 00:53:09,750 +most common measures of variation for next time, + +726 +00:53:09,830 --> 00:53:10,130 +inshallah. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/4oMFiRBOjhY_raw.json b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/4oMFiRBOjhY_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..f9a52a3ef9b49e15d0066da862dbc8c06f8cf448 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/4oMFiRBOjhY_raw.json @@ -0,0 +1 @@ +{"segments": [{"id": 1, "seek": 4267, "start": 17.75, "end": 42.67, "text": " So let's again go back to chapter number one. Last time we discussed chapter one, production and data collection. And I think we described why learning statistics distinguish between some of these topics. And also we explained in details", "tokens": [407, 718, 311, 797, 352, 646, 281, 7187, 1230, 472, 13, 5264, 565, 321, 7152, 7187, 472, 11, 4265, 293, 1412, 5765, 13, 400, 286, 519, 321, 7619, 983, 2539, 12523, 20206, 1296, 512, 295, 613, 8378, 13, 400, 611, 321, 8825, 294, 4365], "avg_logprob": -0.2621527645323012, "compression_ratio": 1.4251497005988023, "no_speech_prob": 0.0, "words": [{"start": 17.75, "end": 18.25, "word": " So", "probability": 0.8837890625}, {"start": 18.25, "end": 18.57, "word": " let's", "probability": 0.887939453125}, {"start": 18.57, "end": 18.93, "word": " again", "probability": 0.884765625}, {"start": 18.93, "end": 19.11, "word": " go", "probability": 0.919921875}, {"start": 19.11, "end": 19.47, "word": " back", "probability": 0.876953125}, {"start": 19.47, "end": 19.79, "word": " to", "probability": 0.9638671875}, {"start": 19.79, "end": 20.25, "word": " chapter", "probability": 0.436767578125}, {"start": 20.25, "end": 20.57, "word": " number", "probability": 0.8779296875}, {"start": 20.57, "end": 20.89, "word": " one.", "probability": 0.78369140625}, {"start": 21.13, "end": 21.35, "word": " Last", "probability": 0.880859375}, {"start": 21.35, "end": 21.57, "word": " time", "probability": 0.89208984375}, {"start": 21.57, "end": 21.73, "word": " we", "probability": 0.80419921875}, {"start": 21.73, "end": 22.37, "word": " discussed", "probability": 0.87646484375}, {"start": 22.37, "end": 24.17, "word": " chapter", "probability": 0.591796875}, {"start": 24.17, "end": 24.53, "word": " one,", "probability": 0.8232421875}, {"start": 24.59, "end": 24.95, "word": " production", "probability": 0.5751953125}, {"start": 24.95, "end": 25.49, "word": " and", "probability": 0.92822265625}, {"start": 25.49, "end": 25.73, "word": " data", "probability": 0.94091796875}, {"start": 25.73, "end": 26.17, "word": " collection.", "probability": 0.74365234375}, {"start": 26.93, "end": 27.11, "word": " And", "probability": 0.830078125}, {"start": 27.11, "end": 27.25, "word": " I", "probability": 0.99169921875}, {"start": 27.25, "end": 27.47, "word": " think", "probability": 0.91357421875}, {"start": 27.47, "end": 27.79, "word": " we", "probability": 0.95703125}, {"start": 27.79, "end": 29.45, "word": " described", "probability": 0.7763671875}, {"start": 29.45, "end": 31.57, "word": " why", "probability": 0.9072265625}, {"start": 31.57, "end": 32.39, "word": " learning", "probability": 0.29052734375}, {"start": 32.39, "end": 33.11, "word": " statistics", "probability": 0.8779296875}, {"start": 33.11, "end": 34.99, "word": " distinguish", "probability": 0.248046875}, {"start": 34.99, "end": 35.55, "word": " between", "probability": 0.8642578125}, {"start": 35.55, "end": 35.93, "word": " some", "probability": 0.88916015625}, {"start": 35.93, "end": 36.09, "word": " of", "probability": 0.96728515625}, {"start": 36.09, "end": 36.51, "word": " these", "probability": 0.83837890625}, {"start": 36.51, "end": 37.05, "word": " topics.", "probability": 0.953125}, {"start": 38.37, "end": 38.67, "word": " And", "probability": 0.9423828125}, {"start": 38.67, "end": 39.13, "word": " also", "probability": 0.87548828125}, {"start": 39.13, "end": 40.03, "word": " we", "probability": 0.63671875}, {"start": 40.03, "end": 41.65, "word": " explained", "probability": 0.82470703125}, {"start": 41.65, "end": 42.17, "word": " in", "probability": 0.9296875}, {"start": 42.17, "end": 42.67, "word": " details", "probability": 0.75}], "temperature": 1.0}, {"id": 2, "seek": 6883, "start": 43.13, "end": 68.83, "text": " types of statistics and we mentioned that statistics mainly has two types either descriptive statistics which means collecting summarizing and obtaining data and other type of statistics is called inferential statistics or statistical inference and this type of statistics we can draw drawing conclusions", "tokens": [3467, 295, 12523, 293, 321, 2835, 300, 12523, 8704, 575, 732, 3467, 2139, 42585, 12523, 597, 1355, 12510, 14611, 3319, 293, 36749, 1412, 293, 661, 2010, 295, 12523, 307, 1219, 13596, 2549, 12523, 420, 22820, 38253, 293, 341, 2010, 295, 12523, 321, 393, 2642, 6316, 22865], "avg_logprob": -0.20894281217392455, "compression_ratio": 1.8944099378881987, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 43.13, "end": 43.63, "word": " types", "probability": 0.625}, {"start": 43.63, "end": 43.81, "word": " of", "probability": 0.9677734375}, {"start": 43.81, "end": 44.31, "word": " statistics", "probability": 0.88916015625}, {"start": 44.31, "end": 44.67, "word": " and", "probability": 0.55517578125}, {"start": 44.67, "end": 44.81, "word": " we", "probability": 0.94873046875}, {"start": 44.81, "end": 45.13, "word": " mentioned", "probability": 0.744140625}, {"start": 45.13, "end": 45.55, "word": " that", "probability": 0.9384765625}, {"start": 45.55, "end": 46.47, "word": " statistics", "probability": 0.54638671875}, {"start": 46.47, "end": 47.01, "word": " mainly", "probability": 0.95556640625}, {"start": 47.01, "end": 47.23, "word": " has", "probability": 0.84814453125}, {"start": 47.23, "end": 47.39, "word": " two", "probability": 0.88623046875}, {"start": 47.39, "end": 47.83, "word": " types", "probability": 0.82958984375}, {"start": 47.83, "end": 49.13, "word": " either", "probability": 0.6142578125}, {"start": 49.13, "end": 50.59, "word": " descriptive", "probability": 0.806640625}, {"start": 50.59, "end": 51.19, "word": " statistics", "probability": 0.91455078125}, {"start": 51.19, "end": 52.43, "word": " which", "probability": 0.71044921875}, {"start": 52.43, "end": 53.83, "word": " means", "probability": 0.9404296875}, {"start": 53.83, "end": 54.47, "word": " collecting", "probability": 0.865234375}, {"start": 54.47, "end": 55.61, "word": " summarizing", "probability": 0.6541748046875}, {"start": 55.61, "end": 55.89, "word": " and", "probability": 0.92529296875}, {"start": 55.89, "end": 56.35, "word": " obtaining", "probability": 0.91259765625}, {"start": 56.35, "end": 56.81, "word": " data", "probability": 0.921875}, {"start": 56.81, "end": 57.59, "word": " and", "probability": 0.82861328125}, {"start": 57.59, "end": 57.87, "word": " other", "probability": 0.67236328125}, {"start": 57.87, "end": 58.11, "word": " type", "probability": 0.66748046875}, {"start": 58.11, "end": 58.23, "word": " of", "probability": 0.87841796875}, {"start": 58.23, "end": 58.63, "word": " statistics", "probability": 0.75830078125}, {"start": 58.63, "end": 58.93, "word": " is", "probability": 0.5947265625}, {"start": 58.93, "end": 59.29, "word": " called", "probability": 0.8857421875}, {"start": 59.29, "end": 59.91, "word": " inferential", "probability": 0.965087890625}, {"start": 59.91, "end": 60.51, "word": " statistics", "probability": 0.9111328125}, {"start": 60.51, "end": 61.05, "word": " or", "probability": 0.94482421875}, {"start": 61.05, "end": 62.09, "word": " statistical", "probability": 0.791015625}, {"start": 62.09, "end": 62.77, "word": " inference", "probability": 0.83544921875}, {"start": 62.77, "end": 63.97, "word": " and", "probability": 0.70703125}, {"start": 63.97, "end": 64.17, "word": " this", "probability": 0.91455078125}, {"start": 64.17, "end": 64.43, "word": " type", "probability": 0.9716796875}, {"start": 64.43, "end": 64.59, "word": " of", "probability": 0.96728515625}, {"start": 64.59, "end": 65.09, "word": " statistics", "probability": 0.92431640625}, {"start": 65.09, "end": 67.17, "word": " we", "probability": 0.88623046875}, {"start": 67.17, "end": 67.47, "word": " can", "probability": 0.9482421875}, {"start": 67.47, "end": 67.81, "word": " draw", "probability": 0.7705078125}, {"start": 67.81, "end": 68.27, "word": " drawing", "probability": 0.5419921875}, {"start": 68.27, "end": 68.83, "word": " conclusions", "probability": 0.9326171875}], "temperature": 1.0}, {"id": 3, "seek": 9971, "start": 70.47, "end": 99.71, "text": " and making decision concerning a population based only on a sample. That means we have a sample and sample is just a subset of the population or the portion of the population and we use the data from that sample to make some conclusion about the entire population. This type of statistic is called inferential statistics. Later, Inshallah, we'll talk in details about inferential statistics that will start in Chapter 7.", "tokens": [293, 1455, 3537, 18087, 257, 4415, 2361, 787, 322, 257, 6889, 13, 663, 1355, 321, 362, 257, 6889, 293, 6889, 307, 445, 257, 25993, 295, 264, 4415, 420, 264, 8044, 295, 264, 4415, 293, 321, 764, 264, 1412, 490, 300, 6889, 281, 652, 512, 10063, 466, 264, 2302, 4415, 13, 639, 2010, 295, 29588, 307, 1219, 13596, 2549, 12523, 13, 11965, 11, 682, 2716, 13492, 11, 321, 603, 751, 294, 4365, 466, 13596, 2549, 12523, 300, 486, 722, 294, 18874, 1614, 13], "avg_logprob": -0.18298192986522813, "compression_ratio": 1.8384279475982532, "no_speech_prob": 1.4901161193847656e-06, "words": [{"start": 70.47, "end": 71.07, "word": " and", "probability": 0.79931640625}, {"start": 71.07, "end": 71.51, "word": " making", "probability": 0.8857421875}, {"start": 71.51, "end": 71.99, "word": " decision", "probability": 0.453125}, {"start": 71.99, "end": 72.67, "word": " concerning", "probability": 0.82275390625}, {"start": 72.67, "end": 72.87, "word": " a", "probability": 0.9521484375}, {"start": 72.87, "end": 73.29, "word": " population", "probability": 0.94287109375}, {"start": 73.29, "end": 73.65, "word": " based", "probability": 0.87939453125}, {"start": 73.65, "end": 74.09, "word": " only", "probability": 0.89404296875}, {"start": 74.09, "end": 74.29, "word": " on", "probability": 0.95361328125}, {"start": 74.29, "end": 74.47, "word": " a", "probability": 0.93994140625}, {"start": 74.47, "end": 74.71, "word": " sample.", "probability": 0.87890625}, {"start": 75.13, "end": 75.37, "word": " That", "probability": 0.87646484375}, {"start": 75.37, "end": 75.57, "word": " means", "probability": 0.9296875}, {"start": 75.57, "end": 75.75, "word": " we", "probability": 0.9072265625}, {"start": 75.75, "end": 76.35, "word": " have", "probability": 0.94677734375}, {"start": 76.35, "end": 77.01, "word": " a", "probability": 0.978515625}, {"start": 77.01, "end": 77.27, "word": " sample", "probability": 0.88671875}, {"start": 77.27, "end": 77.51, "word": " and", "probability": 0.5712890625}, {"start": 77.51, "end": 77.79, "word": " sample", "probability": 0.74951171875}, {"start": 77.79, "end": 78.07, "word": " is", "probability": 0.9404296875}, {"start": 78.07, "end": 78.45, "word": " just", "probability": 0.91845703125}, {"start": 78.45, "end": 79.03, "word": " a", "probability": 0.97216796875}, {"start": 79.03, "end": 79.33, "word": " subset", "probability": 0.8828125}, {"start": 79.33, "end": 79.79, "word": " of", "probability": 0.96923828125}, {"start": 79.79, "end": 80.01, "word": " the", "probability": 0.91845703125}, {"start": 80.01, "end": 80.53, "word": " population", "probability": 0.93408203125}, {"start": 80.53, "end": 80.83, "word": " or", "probability": 0.86279296875}, {"start": 80.83, "end": 80.97, "word": " the", "probability": 0.63330078125}, {"start": 80.97, "end": 81.33, "word": " portion", "probability": 0.7783203125}, {"start": 81.33, "end": 82.05, "word": " of", "probability": 0.96875}, {"start": 82.05, "end": 82.15, "word": " the", "probability": 0.9150390625}, {"start": 82.15, "end": 82.53, "word": " population", "probability": 0.9267578125}, {"start": 82.53, "end": 83.19, "word": " and", "probability": 0.669921875}, {"start": 83.19, "end": 83.35, "word": " we", "probability": 0.95703125}, {"start": 83.35, "end": 83.63, "word": " use", "probability": 0.85791015625}, {"start": 83.63, "end": 83.87, "word": " the", "probability": 0.91455078125}, {"start": 83.87, "end": 84.25, "word": " data", "probability": 0.9287109375}, {"start": 84.25, "end": 86.23, "word": " from", "probability": 0.86865234375}, {"start": 86.23, "end": 86.51, "word": " that", "probability": 0.9365234375}, {"start": 86.51, "end": 86.97, "word": " sample", "probability": 0.88525390625}, {"start": 86.97, "end": 87.31, "word": " to", "probability": 0.9619140625}, {"start": 87.31, "end": 87.51, "word": " make", "probability": 0.93505859375}, {"start": 87.51, "end": 87.81, "word": " some", "probability": 0.89453125}, {"start": 87.81, "end": 88.41, "word": " conclusion", "probability": 0.869140625}, {"start": 88.41, "end": 88.91, "word": " about", "probability": 0.900390625}, {"start": 88.91, "end": 89.13, "word": " the", "probability": 0.9140625}, {"start": 89.13, "end": 89.55, "word": " entire", "probability": 0.90185546875}, {"start": 89.55, "end": 90.51, "word": " population.", "probability": 0.92919921875}, {"start": 90.99, "end": 91.25, "word": " This", "probability": 0.8828125}, {"start": 91.25, "end": 91.51, "word": " type", "probability": 0.974609375}, {"start": 91.51, "end": 91.67, "word": " of", "probability": 0.96337890625}, {"start": 91.67, "end": 92.05, "word": " statistic", "probability": 0.66259765625}, {"start": 92.05, "end": 92.39, "word": " is", "probability": 0.9462890625}, {"start": 92.39, "end": 92.77, "word": " called", "probability": 0.890625}, {"start": 92.77, "end": 93.31, "word": " inferential", "probability": 0.9482421875}, {"start": 93.31, "end": 93.91, "word": " statistics.", "probability": 0.87890625}, {"start": 94.13, "end": 94.37, "word": " Later,", "probability": 0.85302734375}, {"start": 94.49, "end": 94.71, "word": " Inshallah,", "probability": 0.6131184895833334}, {"start": 94.75, "end": 94.93, "word": " we'll", "probability": 0.783935546875}, {"start": 94.93, "end": 95.21, "word": " talk", "probability": 0.88818359375}, {"start": 95.21, "end": 95.73, "word": " in", "probability": 0.93212890625}, {"start": 95.73, "end": 96.03, "word": " details", "probability": 0.72509765625}, {"start": 96.03, "end": 96.51, "word": " about", "probability": 0.89208984375}, {"start": 96.51, "end": 97.01, "word": " inferential", "probability": 0.931396484375}, {"start": 97.01, "end": 97.71, "word": " statistics", "probability": 0.9267578125}, {"start": 97.71, "end": 98.43, "word": " that", "probability": 0.5517578125}, {"start": 98.43, "end": 98.57, "word": " will", "probability": 0.8095703125}, {"start": 98.57, "end": 99.01, "word": " start", "probability": 0.904296875}, {"start": 99.01, "end": 99.21, "word": " in", "probability": 0.76220703125}, {"start": 99.21, "end": 99.45, "word": " Chapter", "probability": 0.38134765625}, {"start": 99.45, "end": 99.71, "word": " 7.", "probability": 0.7548828125}], "temperature": 1.0}, {"id": 4, "seek": 12953, "start": 101.93, "end": 129.53, "text": " Also, we gave some definitions for variables, data, and we distinguished between population and sample. And we know that the population consists of all items or individuals about which you want to draw a conclusion. But in some cases, it's very hard to talk about the population or the entire population, so we can", "tokens": [2743, 11, 321, 2729, 512, 21988, 337, 9102, 11, 1412, 11, 293, 321, 21702, 1296, 4415, 293, 6889, 13, 400, 321, 458, 300, 264, 4415, 14689, 295, 439, 4754, 420, 5346, 466, 597, 291, 528, 281, 2642, 257, 10063, 13, 583, 294, 512, 3331, 11, 309, 311, 588, 1152, 281, 751, 466, 264, 4415, 420, 264, 2302, 4415, 11, 370, 321, 393], "avg_logprob": -0.1251240022598751, "compression_ratio": 1.6153846153846154, "no_speech_prob": 0.0, "words": [{"start": 101.93, "end": 102.65, "word": " Also,", "probability": 0.86865234375}, {"start": 102.65, "end": 103.37, "word": " we", "probability": 0.96142578125}, {"start": 103.37, "end": 104.83, "word": " gave", "probability": 0.69140625}, {"start": 104.83, "end": 105.29, "word": " some", "probability": 0.89599609375}, {"start": 105.29, "end": 106.41, "word": " definitions", "probability": 0.853515625}, {"start": 106.41, "end": 106.83, "word": " for", "probability": 0.951171875}, {"start": 106.83, "end": 107.49, "word": " variables,", "probability": 0.9248046875}, {"start": 108.47, "end": 109.13, "word": " data,", "probability": 0.92822265625}, {"start": 109.95, "end": 110.49, "word": " and", "probability": 0.9404296875}, {"start": 110.49, "end": 110.63, "word": " we", "probability": 0.95947265625}, {"start": 110.63, "end": 111.09, "word": " distinguished", "probability": 0.79052734375}, {"start": 111.09, "end": 111.81, "word": " between", "probability": 0.857421875}, {"start": 111.81, "end": 112.73, "word": " population", "probability": 0.9404296875}, {"start": 112.73, "end": 113.01, "word": " and", "probability": 0.927734375}, {"start": 113.01, "end": 113.33, "word": " sample.", "probability": 0.912109375}, {"start": 113.45, "end": 113.51, "word": " And", "probability": 0.7509765625}, {"start": 113.51, "end": 113.61, "word": " we", "probability": 0.95166015625}, {"start": 113.61, "end": 113.73, "word": " know", "probability": 0.87890625}, {"start": 113.73, "end": 114.01, "word": " that", "probability": 0.93115234375}, {"start": 114.01, "end": 114.47, "word": " the", "probability": 0.88623046875}, {"start": 114.47, "end": 114.93, "word": " population", "probability": 0.93017578125}, {"start": 114.93, "end": 115.57, "word": " consists", "probability": 0.82373046875}, {"start": 115.57, "end": 115.87, "word": " of", "probability": 0.966796875}, {"start": 115.87, "end": 116.15, "word": " all", "probability": 0.9501953125}, {"start": 116.15, "end": 116.63, "word": " items", "probability": 0.85986328125}, {"start": 116.63, "end": 116.85, "word": " or", "probability": 0.921875}, {"start": 116.85, "end": 117.37, "word": " individuals", "probability": 0.845703125}, {"start": 117.37, "end": 118.25, "word": " about", "probability": 0.90576171875}, {"start": 118.25, "end": 119.29, "word": " which", "probability": 0.9501953125}, {"start": 119.29, "end": 119.55, "word": " you", "probability": 0.96337890625}, {"start": 119.55, "end": 119.85, "word": " want", "probability": 0.8798828125}, {"start": 119.85, "end": 119.97, "word": " to", "probability": 0.96044921875}, {"start": 119.97, "end": 120.13, "word": " draw", "probability": 0.8828125}, {"start": 120.13, "end": 120.27, "word": " a", "probability": 0.86767578125}, {"start": 120.27, "end": 120.61, "word": " conclusion.", "probability": 0.88623046875}, {"start": 122.71, "end": 123.19, "word": " But", "probability": 0.95068359375}, {"start": 123.19, "end": 123.69, "word": " in", "probability": 0.90478515625}, {"start": 123.69, "end": 124.05, "word": " some", "probability": 0.89404296875}, {"start": 124.05, "end": 124.51, "word": " cases,", "probability": 0.923828125}, {"start": 124.57, "end": 124.87, "word": " it's", "probability": 0.97412109375}, {"start": 124.87, "end": 125.13, "word": " very", "probability": 0.85107421875}, {"start": 125.13, "end": 125.41, "word": " hard", "probability": 0.89501953125}, {"start": 125.41, "end": 125.77, "word": " to", "probability": 0.97119140625}, {"start": 125.77, "end": 126.23, "word": " talk", "probability": 0.890625}, {"start": 126.23, "end": 126.55, "word": " about", "probability": 0.9091796875}, {"start": 126.55, "end": 126.73, "word": " the", "probability": 0.9189453125}, {"start": 126.73, "end": 127.19, "word": " population", "probability": 0.9267578125}, {"start": 127.19, "end": 127.39, "word": " or", "probability": 0.9345703125}, {"start": 127.39, "end": 127.51, "word": " the", "probability": 0.91845703125}, {"start": 127.51, "end": 127.75, "word": " entire", "probability": 0.92236328125}, {"start": 127.75, "end": 128.15, "word": " population,", "probability": 0.9326171875}, {"start": 128.29, "end": 128.57, "word": " so", "probability": 0.94921875}, {"start": 128.57, "end": 129.23, "word": " we", "probability": 0.94873046875}, {"start": 129.23, "end": 129.53, "word": " can", "probability": 0.94775390625}], "temperature": 1.0}, {"id": 5, "seek": 15600, "start": 130.02, "end": 156.0, "text": " select a sample. A sample is just a portion or subset of the entire population. So we know now the definition of population and sample. The other two types, parameter and statistics. Parameter is a numerical measure that describes characteristics of a population, while on the other hand, a sample, a statistic is just", "tokens": [3048, 257, 6889, 13, 316, 6889, 307, 445, 257, 8044, 420, 25993, 295, 264, 2302, 4415, 13, 407, 321, 458, 586, 264, 7123, 295, 4415, 293, 6889, 13, 440, 661, 732, 3467, 11, 13075, 293, 12523, 13, 34882, 2398, 307, 257, 29054, 3481, 300, 15626, 10891, 295, 257, 4415, 11, 1339, 322, 264, 661, 1011, 11, 257, 6889, 11, 257, 29588, 307, 445], "avg_logprob": -0.18261718028225005, "compression_ratio": 1.733695652173913, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 130.02, "end": 130.82, "word": " select", "probability": 0.54736328125}, {"start": 130.82, "end": 132.28, "word": " a", "probability": 0.7900390625}, {"start": 132.28, "end": 132.54, "word": " sample.", "probability": 0.66943359375}, {"start": 132.74, "end": 132.92, "word": " A", "probability": 0.689453125}, {"start": 132.92, "end": 133.12, "word": " sample", "probability": 0.88232421875}, {"start": 133.12, "end": 133.34, "word": " is", "probability": 0.943359375}, {"start": 133.34, "end": 133.68, "word": " just", "probability": 0.9111328125}, {"start": 133.68, "end": 134.38, "word": " a", "probability": 0.98828125}, {"start": 134.38, "end": 134.8, "word": " portion", "probability": 0.859375}, {"start": 134.8, "end": 135.76, "word": " or", "probability": 0.865234375}, {"start": 135.76, "end": 136.38, "word": " subset", "probability": 0.6630859375}, {"start": 136.38, "end": 137.32, "word": " of", "probability": 0.9697265625}, {"start": 137.32, "end": 137.56, "word": " the", "probability": 0.9208984375}, {"start": 137.56, "end": 137.96, "word": " entire", "probability": 0.89990234375}, {"start": 137.96, "end": 138.48, "word": " population.", "probability": 0.9375}, {"start": 139.06, "end": 139.72, "word": " So", "probability": 0.939453125}, {"start": 139.72, "end": 139.96, "word": " we", "probability": 0.75390625}, {"start": 139.96, "end": 140.1, "word": " know", "probability": 0.83935546875}, {"start": 140.1, "end": 140.32, "word": " now", "probability": 0.912109375}, {"start": 140.32, "end": 140.5, "word": " the", "probability": 0.904296875}, {"start": 140.5, "end": 140.9, "word": " definition", "probability": 0.93701171875}, {"start": 140.9, "end": 141.2, "word": " of", "probability": 0.97021484375}, {"start": 141.2, "end": 141.6, "word": " population", "probability": 0.931640625}, {"start": 141.6, "end": 141.86, "word": " and", "probability": 0.884765625}, {"start": 141.86, "end": 142.18, "word": " sample.", "probability": 0.8740234375}, {"start": 143.2, "end": 143.38, "word": " The", "probability": 0.89306640625}, {"start": 143.38, "end": 143.6, "word": " other", "probability": 0.8984375}, {"start": 143.6, "end": 143.82, "word": " two", "probability": 0.93408203125}, {"start": 143.82, "end": 144.32, "word": " types,", "probability": 0.8251953125}, {"start": 144.72, "end": 145.0, "word": " parameter", "probability": 0.89208984375}, {"start": 145.0, "end": 145.36, "word": " and", "probability": 0.91552734375}, {"start": 145.36, "end": 145.88, "word": " statistics.", "probability": 0.84765625}, {"start": 147.1, "end": 147.58, "word": " Parameter", "probability": 0.965087890625}, {"start": 147.58, "end": 147.88, "word": " is", "probability": 0.9482421875}, {"start": 147.88, "end": 148.06, "word": " a", "probability": 0.94384765625}, {"start": 148.06, "end": 148.32, "word": " numerical", "probability": 0.9462890625}, {"start": 148.32, "end": 148.64, "word": " measure", "probability": 0.88525390625}, {"start": 148.64, "end": 148.86, "word": " that", "probability": 0.92578125}, {"start": 148.86, "end": 149.26, "word": " describes", "probability": 0.69140625}, {"start": 149.26, "end": 149.94, "word": " characteristics", "probability": 0.47265625}, {"start": 149.94, "end": 150.4, "word": " of", "probability": 0.9658203125}, {"start": 150.4, "end": 150.52, "word": " a", "probability": 0.9833984375}, {"start": 150.52, "end": 151.0, "word": " population,", "probability": 0.93359375}, {"start": 151.9, "end": 152.3, "word": " while", "probability": 0.9482421875}, {"start": 152.3, "end": 152.5, "word": " on", "probability": 0.89208984375}, {"start": 152.5, "end": 152.62, "word": " the", "probability": 0.92333984375}, {"start": 152.62, "end": 152.84, "word": " other", "probability": 0.89208984375}, {"start": 152.84, "end": 153.22, "word": " hand,", "probability": 0.927734375}, {"start": 153.58, "end": 153.7, "word": " a", "probability": 0.9609375}, {"start": 153.7, "end": 154.0, "word": " sample,", "probability": 0.60546875}, {"start": 154.18, "end": 154.28, "word": " a", "probability": 0.93798828125}, {"start": 154.28, "end": 154.68, "word": " statistic", "probability": 0.88818359375}, {"start": 154.68, "end": 155.52, "word": " is", "probability": 0.71923828125}, {"start": 155.52, "end": 156.0, "word": " just", "probability": 0.921875}], "temperature": 1.0}, {"id": 6, "seek": 17677, "start": 156.43, "end": 176.77, "text": " numerical measures that describe characteristic of a sample. So parameter is computed from the population while statistic is computed from the sample. I think we stopped at this point. Why collect data? I mean what are the reasons for", "tokens": [29054, 8000, 300, 6786, 16282, 295, 257, 6889, 13, 407, 13075, 307, 40610, 490, 264, 4415, 1339, 29588, 307, 40610, 490, 264, 6889, 13, 286, 519, 321, 5936, 412, 341, 935, 13, 1545, 2500, 1412, 30, 286, 914, 437, 366, 264, 4112, 337], "avg_logprob": -0.22230114042758942, "compression_ratio": 1.525974025974026, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 156.43, "end": 157.11, "word": " numerical", "probability": 0.39990234375}, {"start": 157.11, "end": 157.59, "word": " measures", "probability": 0.685546875}, {"start": 157.59, "end": 157.93, "word": " that", "probability": 0.87353515625}, {"start": 157.93, "end": 158.37, "word": " describe", "probability": 0.81201171875}, {"start": 158.37, "end": 159.05, "word": " characteristic", "probability": 0.384765625}, {"start": 159.05, "end": 159.73, "word": " of", "probability": 0.96826171875}, {"start": 159.73, "end": 160.71, "word": " a", "probability": 0.94384765625}, {"start": 160.71, "end": 161.43, "word": " sample.", "probability": 0.86767578125}, {"start": 161.91, "end": 162.07, "word": " So", "probability": 0.8388671875}, {"start": 162.07, "end": 162.93, "word": " parameter", "probability": 0.308349609375}, {"start": 162.93, "end": 163.33, "word": " is", "probability": 0.8720703125}, {"start": 163.33, "end": 163.83, "word": " computed", "probability": 0.927734375}, {"start": 163.83, "end": 164.33, "word": " from", "probability": 0.892578125}, {"start": 164.33, "end": 164.93, "word": " the", "probability": 0.90625}, {"start": 164.93, "end": 165.39, "word": " population", "probability": 0.95263671875}, {"start": 165.39, "end": 166.33, "word": " while", "probability": 0.71875}, {"start": 166.33, "end": 167.03, "word": " statistic", "probability": 0.49267578125}, {"start": 167.03, "end": 167.35, "word": " is", "probability": 0.93505859375}, {"start": 167.35, "end": 167.77, "word": " computed", "probability": 0.9306640625}, {"start": 167.77, "end": 168.25, "word": " from", "probability": 0.8916015625}, {"start": 168.25, "end": 168.93, "word": " the", "probability": 0.91796875}, {"start": 168.93, "end": 169.23, "word": " sample.", "probability": 0.875}, {"start": 171.01, "end": 171.29, "word": " I", "probability": 0.9931640625}, {"start": 171.29, "end": 171.47, "word": " think", "probability": 0.9248046875}, {"start": 171.47, "end": 171.71, "word": " we", "probability": 0.9541015625}, {"start": 171.71, "end": 172.27, "word": " stopped", "probability": 0.5322265625}, {"start": 172.27, "end": 172.77, "word": " at", "probability": 0.96484375}, {"start": 172.77, "end": 172.99, "word": " this", "probability": 0.94873046875}, {"start": 172.99, "end": 173.39, "word": " point.", "probability": 0.970703125}, {"start": 173.91, "end": 174.03, "word": " Why", "probability": 0.86865234375}, {"start": 174.03, "end": 174.43, "word": " collect", "probability": 0.90234375}, {"start": 174.43, "end": 174.77, "word": " data?", "probability": 0.9296875}, {"start": 175.11, "end": 175.25, "word": " I", "probability": 0.96875}, {"start": 175.25, "end": 175.41, "word": " mean", "probability": 0.9658203125}, {"start": 175.41, "end": 175.71, "word": " what", "probability": 0.60302734375}, {"start": 175.71, "end": 175.91, "word": " are", "probability": 0.94189453125}, {"start": 175.91, "end": 176.07, "word": " the", "probability": 0.92626953125}, {"start": 176.07, "end": 176.55, "word": " reasons", "probability": 0.93212890625}, {"start": 176.55, "end": 176.77, "word": " for", "probability": 0.80712890625}], "temperature": 1.0}, {"id": 7, "seek": 20760, "start": 179.58, "end": 207.6, "text": " One of these reasons, for example, a marketing research analyst needs to assess the effectiveness of a new television advertisement. For example, suppose you are a manager and you want to increase your salaries or your sales. Now, sales may be affected by advertising. So I mean, if you spend more on advertising, it means your sales becomes larger and larger.", "tokens": [1485, 295, 613, 4112, 11, 337, 1365, 11, 257, 6370, 2132, 19085, 2203, 281, 5877, 264, 21208, 295, 257, 777, 8815, 31370, 13, 1171, 1365, 11, 7297, 291, 366, 257, 6598, 293, 291, 528, 281, 3488, 428, 35057, 420, 428, 5763, 13, 823, 11, 5763, 815, 312, 8028, 538, 13097, 13, 407, 286, 914, 11, 498, 291, 3496, 544, 322, 13097, 11, 309, 1355, 428, 5763, 3643, 4833, 293, 4833, 13], "avg_logprob": -0.120117184602552, "compression_ratio": 1.7109004739336493, "no_speech_prob": 0.0, "words": [{"start": 179.58, "end": 179.88, "word": " One", "probability": 0.80224609375}, {"start": 179.88, "end": 180.0, "word": " of", "probability": 0.970703125}, {"start": 180.0, "end": 180.16, "word": " these", "probability": 0.78515625}, {"start": 180.16, "end": 180.46, "word": " reasons,", "probability": 0.7802734375}, {"start": 180.58, "end": 180.66, "word": " for", "probability": 0.94970703125}, {"start": 180.66, "end": 181.04, "word": " example,", "probability": 0.9736328125}, {"start": 181.54, "end": 181.68, "word": " a", "probability": 0.974609375}, {"start": 181.68, "end": 181.98, "word": " marketing", "probability": 0.78662109375}, {"start": 181.98, "end": 182.42, "word": " research", "probability": 0.765625}, {"start": 182.42, "end": 182.92, "word": " analyst", "probability": 0.84375}, {"start": 182.92, "end": 183.36, "word": " needs", "probability": 0.86181640625}, {"start": 183.36, "end": 183.56, "word": " to", "probability": 0.97021484375}, {"start": 183.56, "end": 183.98, "word": " assess", "probability": 0.9482421875}, {"start": 183.98, "end": 184.2, "word": " the", "probability": 0.91748046875}, {"start": 184.2, "end": 184.66, "word": " effectiveness", "probability": 0.88525390625}, {"start": 184.66, "end": 184.92, "word": " of", "probability": 0.96337890625}, {"start": 184.92, "end": 185.02, "word": " a", "probability": 0.62646484375}, {"start": 185.02, "end": 185.24, "word": " new", "probability": 0.90185546875}, {"start": 185.24, "end": 186.16, "word": " television", "probability": 0.8818359375}, {"start": 186.16, "end": 186.94, "word": " advertisement.", "probability": 0.83837890625}, {"start": 187.24, "end": 187.36, "word": " For", "probability": 0.95947265625}, {"start": 187.36, "end": 187.7, "word": " example,", "probability": 0.97265625}, {"start": 187.84, "end": 188.12, "word": " suppose", "probability": 0.90185546875}, {"start": 188.12, "end": 188.28, "word": " you", "probability": 0.95703125}, {"start": 188.28, "end": 188.38, "word": " are", "probability": 0.8125}, {"start": 188.38, "end": 188.48, "word": " a", "probability": 0.98681640625}, {"start": 188.48, "end": 188.7, "word": " manager", "probability": 0.95556640625}, {"start": 188.7, "end": 189.94, "word": " and", "probability": 0.76123046875}, {"start": 189.94, "end": 192.2, "word": " you", "probability": 0.955078125}, {"start": 192.2, "end": 192.5, "word": " want", "probability": 0.88037109375}, {"start": 192.5, "end": 192.82, "word": " to", "probability": 0.96826171875}, {"start": 192.82, "end": 193.38, "word": " increase", "probability": 0.8486328125}, {"start": 193.38, "end": 193.6, "word": " your", "probability": 0.89453125}, {"start": 193.6, "end": 194.02, "word": " salaries", "probability": 0.9345703125}, {"start": 194.02, "end": 195.08, "word": " or", "probability": 0.8642578125}, {"start": 195.08, "end": 195.36, "word": " your", "probability": 0.89404296875}, {"start": 195.36, "end": 195.8, "word": " sales.", "probability": 0.923828125}, {"start": 197.04, "end": 197.18, "word": " Now,", "probability": 0.91064453125}, {"start": 197.24, "end": 197.62, "word": " sales", "probability": 0.87060546875}, {"start": 197.62, "end": 197.96, "word": " may", "probability": 0.90380859375}, {"start": 197.96, "end": 198.06, "word": " be", "probability": 0.95068359375}, {"start": 198.06, "end": 198.54, "word": " affected", "probability": 0.85107421875}, {"start": 198.54, "end": 199.18, "word": " by", "probability": 0.970703125}, {"start": 199.18, "end": 200.5, "word": " advertising.", "probability": 0.96435546875}, {"start": 201.36, "end": 201.6, "word": " So", "probability": 0.9345703125}, {"start": 201.6, "end": 201.72, "word": " I", "probability": 0.65380859375}, {"start": 201.72, "end": 201.92, "word": " mean,", "probability": 0.9658203125}, {"start": 202.46, "end": 202.7, "word": " if", "probability": 0.953125}, {"start": 202.7, "end": 202.9, "word": " you", "probability": 0.962890625}, {"start": 202.9, "end": 203.38, "word": " spend", "probability": 0.8017578125}, {"start": 203.38, "end": 203.88, "word": " more", "probability": 0.943359375}, {"start": 203.88, "end": 204.02, "word": " on", "probability": 0.87451171875}, {"start": 204.02, "end": 204.52, "word": " advertising,", "probability": 0.9697265625}, {"start": 204.62, "end": 204.7, "word": " it", "probability": 0.927734375}, {"start": 204.7, "end": 204.92, "word": " means", "probability": 0.9345703125}, {"start": 204.92, "end": 205.34, "word": " your", "probability": 0.8798828125}, {"start": 205.34, "end": 205.8, "word": " sales", "probability": 0.9208984375}, {"start": 205.8, "end": 206.32, "word": " becomes", "probability": 0.865234375}, {"start": 206.32, "end": 207.04, "word": " larger", "probability": 0.93994140625}, {"start": 207.04, "end": 207.28, "word": " and", "probability": 0.94091796875}, {"start": 207.28, "end": 207.6, "word": " larger.", "probability": 0.93798828125}], "temperature": 1.0}, {"id": 8, "seek": 23738, "start": 208.0, "end": 237.38, "text": " So you want to know if this variable, I mean if advertisement is an effective variable that maybe increase your sales. So that's one of the reasons why we use data. The other one, for example, pharmaceutical manufacturers needs to determine whether a new drug is more effective than those currently used. For example, for a headache, we use drug A. Now,", "tokens": [407, 291, 528, 281, 458, 498, 341, 7006, 11, 286, 914, 498, 31370, 307, 364, 4942, 7006, 300, 1310, 3488, 428, 5763, 13, 407, 300, 311, 472, 295, 264, 4112, 983, 321, 764, 1412, 13, 440, 661, 472, 11, 337, 1365, 11, 27130, 18455, 2203, 281, 6997, 1968, 257, 777, 4110, 307, 544, 4942, 813, 729, 4362, 1143, 13, 1171, 1365, 11, 337, 257, 23520, 11, 321, 764, 4110, 316, 13, 823, 11], "avg_logprob": -0.1361908733442023, "compression_ratio": 1.5803571428571428, "no_speech_prob": 0.0, "words": [{"start": 208.0, "end": 208.34, "word": " So", "probability": 0.771484375}, {"start": 208.34, "end": 208.46, "word": " you", "probability": 0.75341796875}, {"start": 208.46, "end": 208.66, "word": " want", "probability": 0.865234375}, {"start": 208.66, "end": 208.76, "word": " to", "probability": 0.966796875}, {"start": 208.76, "end": 208.94, "word": " know", "probability": 0.8896484375}, {"start": 208.94, "end": 209.24, "word": " if", "probability": 0.9482421875}, {"start": 209.24, "end": 209.74, "word": " this", "probability": 0.9306640625}, {"start": 209.74, "end": 210.16, "word": " variable,", "probability": 0.92431640625}, {"start": 210.36, "end": 210.36, "word": " I", "probability": 0.8837890625}, {"start": 210.36, "end": 210.46, "word": " mean", "probability": 0.97021484375}, {"start": 210.46, "end": 210.64, "word": " if", "probability": 0.6962890625}, {"start": 210.64, "end": 211.26, "word": " advertisement", "probability": 0.748046875}, {"start": 211.26, "end": 212.28, "word": " is", "probability": 0.74951171875}, {"start": 212.28, "end": 212.84, "word": " an", "probability": 0.94677734375}, {"start": 212.84, "end": 214.16, "word": " effective", "probability": 0.83642578125}, {"start": 214.16, "end": 215.16, "word": " variable", "probability": 0.91796875}, {"start": 215.16, "end": 215.66, "word": " that", "probability": 0.9169921875}, {"start": 215.66, "end": 216.98, "word": " maybe", "probability": 0.671875}, {"start": 216.98, "end": 217.68, "word": " increase", "probability": 0.72998046875}, {"start": 217.68, "end": 217.94, "word": " your", "probability": 0.875}, {"start": 217.94, "end": 218.26, "word": " sales.", "probability": 0.93896484375}, {"start": 218.48, "end": 218.58, "word": " So", "probability": 0.9345703125}, {"start": 218.58, "end": 218.9, "word": " that's", "probability": 0.918701171875}, {"start": 218.9, "end": 219.12, "word": " one", "probability": 0.92431640625}, {"start": 219.12, "end": 219.76, "word": " of", "probability": 0.7392578125}, {"start": 219.76, "end": 219.9, "word": " the", "probability": 0.88671875}, {"start": 219.9, "end": 220.12, "word": " reasons", "probability": 0.921875}, {"start": 220.12, "end": 220.38, "word": " why", "probability": 0.90771484375}, {"start": 220.38, "end": 220.58, "word": " we", "probability": 0.96044921875}, {"start": 220.58, "end": 220.92, "word": " use", "probability": 0.8818359375}, {"start": 220.92, "end": 222.48, "word": " data.", "probability": 0.87158203125}, {"start": 223.12, "end": 223.4, "word": " The", "probability": 0.8876953125}, {"start": 223.4, "end": 223.6, "word": " other", "probability": 0.8916015625}, {"start": 223.6, "end": 223.9, "word": " one,", "probability": 0.92822265625}, {"start": 224.12, "end": 224.2, "word": " for", "probability": 0.95703125}, {"start": 224.2, "end": 224.54, "word": " example,", "probability": 0.97265625}, {"start": 224.72, "end": 225.16, "word": " pharmaceutical", "probability": 0.83056640625}, {"start": 225.16, "end": 226.12, "word": " manufacturers", "probability": 0.8154296875}, {"start": 226.12, "end": 226.56, "word": " needs", "probability": 0.58203125}, {"start": 226.56, "end": 226.88, "word": " to", "probability": 0.96435546875}, {"start": 226.88, "end": 227.38, "word": " determine", "probability": 0.90771484375}, {"start": 227.38, "end": 228.24, "word": " whether", "probability": 0.91650390625}, {"start": 228.24, "end": 228.48, "word": " a", "probability": 0.98046875}, {"start": 228.48, "end": 228.66, "word": " new", "probability": 0.9140625}, {"start": 228.66, "end": 228.92, "word": " drug", "probability": 0.9326171875}, {"start": 228.92, "end": 229.1, "word": " is", "probability": 0.9462890625}, {"start": 229.1, "end": 229.32, "word": " more", "probability": 0.943359375}, {"start": 229.32, "end": 229.8, "word": " effective", "probability": 0.87255859375}, {"start": 229.8, "end": 230.18, "word": " than", "probability": 0.947265625}, {"start": 230.18, "end": 230.68, "word": " those", "probability": 0.8935546875}, {"start": 230.68, "end": 231.2, "word": " currently", "probability": 0.822265625}, {"start": 231.2, "end": 231.56, "word": " used.", "probability": 0.8994140625}, {"start": 232.4, "end": 232.64, "word": " For", "probability": 0.96044921875}, {"start": 232.64, "end": 232.88, "word": " example,", "probability": 0.974609375}, {"start": 232.94, "end": 233.12, "word": " for", "probability": 0.9501953125}, {"start": 233.12, "end": 233.24, "word": " a", "probability": 0.9453125}, {"start": 233.24, "end": 233.48, "word": " headache,", "probability": 0.90869140625}, {"start": 233.96, "end": 234.26, "word": " we", "probability": 0.9580078125}, {"start": 234.26, "end": 234.58, "word": " use", "probability": 0.86376953125}, {"start": 234.58, "end": 234.9, "word": " drug", "probability": 0.7880859375}, {"start": 234.9, "end": 235.18, "word": " A.", "probability": 0.86767578125}, {"start": 236.74, "end": 237.38, "word": " Now,", "probability": 0.94970703125}], "temperature": 1.0}, {"id": 9, "seek": 26607, "start": 237.97, "end": 266.07, "text": " a new drug is produced and you want to see if this new drug is more effective than drug A that I mean if headache suppose for example is removed after three days by using drug A now the question is does B is more effective it means it reduces your headache in fewer than three days I mean maybe in two days", "tokens": [257, 777, 4110, 307, 7126, 293, 291, 528, 281, 536, 498, 341, 777, 4110, 307, 544, 4942, 813, 4110, 316, 300, 286, 914, 498, 23520, 7297, 337, 1365, 307, 7261, 934, 1045, 1708, 538, 1228, 4110, 316, 586, 264, 1168, 307, 775, 363, 307, 544, 4942, 309, 1355, 309, 18081, 428, 23520, 294, 13366, 813, 1045, 1708, 286, 914, 1310, 294, 732, 1708], "avg_logprob": -0.20483398041687906, "compression_ratio": 1.7443181818181819, "no_speech_prob": 0.0, "words": [{"start": 237.97, "end": 238.25, "word": " a", "probability": 0.333251953125}, {"start": 238.25, "end": 238.43, "word": " new", "probability": 0.8955078125}, {"start": 238.43, "end": 238.73, "word": " drug", "probability": 0.92236328125}, {"start": 238.73, "end": 239.33, "word": " is", "probability": 0.9248046875}, {"start": 239.33, "end": 239.87, "word": " produced", "probability": 0.88623046875}, {"start": 239.87, "end": 240.85, "word": " and", "probability": 0.6015625}, {"start": 240.85, "end": 240.97, "word": " you", "probability": 0.87744140625}, {"start": 240.97, "end": 241.15, "word": " want", "probability": 0.8779296875}, {"start": 241.15, "end": 241.29, "word": " to", "probability": 0.96728515625}, {"start": 241.29, "end": 241.51, "word": " see", "probability": 0.92431640625}, {"start": 241.51, "end": 241.81, "word": " if", "probability": 0.9404296875}, {"start": 241.81, "end": 242.07, "word": " this", "probability": 0.9306640625}, {"start": 242.07, "end": 242.35, "word": " new", "probability": 0.89892578125}, {"start": 242.35, "end": 242.69, "word": " drug", "probability": 0.93017578125}, {"start": 242.69, "end": 244.51, "word": " is", "probability": 0.896484375}, {"start": 244.51, "end": 244.77, "word": " more", "probability": 0.94189453125}, {"start": 244.77, "end": 245.29, "word": " effective", "probability": 0.85400390625}, {"start": 245.29, "end": 245.81, "word": " than", "probability": 0.9150390625}, {"start": 245.81, "end": 246.49, "word": " drug", "probability": 0.7412109375}, {"start": 246.49, "end": 246.81, "word": " A", "probability": 0.82080078125}, {"start": 246.81, "end": 247.27, "word": " that", "probability": 0.345703125}, {"start": 247.27, "end": 247.59, "word": " I", "probability": 0.796875}, {"start": 247.59, "end": 247.87, "word": " mean", "probability": 0.9599609375}, {"start": 247.87, "end": 249.73, "word": " if", "probability": 0.7490234375}, {"start": 249.73, "end": 250.09, "word": " headache", "probability": 0.50146484375}, {"start": 250.09, "end": 250.63, "word": " suppose", "probability": 0.818359375}, {"start": 250.63, "end": 250.97, "word": " for", "probability": 0.83056640625}, {"start": 250.97, "end": 251.35, "word": " example", "probability": 0.97265625}, {"start": 251.35, "end": 251.55, "word": " is", "probability": 0.8857421875}, {"start": 251.55, "end": 251.85, "word": " removed", "probability": 0.79150390625}, {"start": 251.85, "end": 252.19, "word": " after", "probability": 0.8447265625}, {"start": 252.19, "end": 252.45, "word": " three", "probability": 0.58056640625}, {"start": 252.45, "end": 252.81, "word": " days", "probability": 0.9326171875}, {"start": 252.81, "end": 253.41, "word": " by", "probability": 0.9345703125}, {"start": 253.41, "end": 253.71, "word": " using", "probability": 0.931640625}, {"start": 253.71, "end": 253.97, "word": " drug", "probability": 0.7568359375}, {"start": 253.97, "end": 254.27, "word": " A", "probability": 0.9716796875}, {"start": 254.27, "end": 255.03, "word": " now", "probability": 0.484130859375}, {"start": 255.03, "end": 255.21, "word": " the", "probability": 0.8916015625}, {"start": 255.21, "end": 255.53, "word": " question", "probability": 0.912109375}, {"start": 255.53, "end": 255.99, "word": " is", "probability": 0.953125}, {"start": 255.99, "end": 256.55, "word": " does", "probability": 0.86572265625}, {"start": 256.55, "end": 258.81, "word": " B", "probability": 0.5068359375}, {"start": 258.81, "end": 260.23, "word": " is", "probability": 0.90966796875}, {"start": 260.23, "end": 260.49, "word": " more", "probability": 0.9423828125}, {"start": 260.49, "end": 260.89, "word": " effective", "probability": 0.8603515625}, {"start": 260.89, "end": 261.09, "word": " it", "probability": 0.57763671875}, {"start": 261.09, "end": 261.35, "word": " means", "probability": 0.9306640625}, {"start": 261.35, "end": 261.59, "word": " it", "probability": 0.90966796875}, {"start": 261.59, "end": 262.15, "word": " reduces", "probability": 0.425048828125}, {"start": 262.15, "end": 262.77, "word": " your", "probability": 0.80224609375}, {"start": 262.77, "end": 263.07, "word": " headache", "probability": 0.85400390625}, {"start": 263.07, "end": 263.41, "word": " in", "probability": 0.94140625}, {"start": 263.41, "end": 263.75, "word": " fewer", "probability": 0.77490234375}, {"start": 263.75, "end": 264.01, "word": " than", "probability": 0.92724609375}, {"start": 264.01, "end": 264.23, "word": " three", "probability": 0.8720703125}, {"start": 264.23, "end": 264.55, "word": " days", "probability": 0.927734375}, {"start": 264.55, "end": 264.77, "word": " I", "probability": 0.79931640625}, {"start": 264.77, "end": 264.97, "word": " mean", "probability": 0.96240234375}, {"start": 264.97, "end": 265.43, "word": " maybe", "probability": 0.88037109375}, {"start": 265.43, "end": 265.59, "word": " in", "probability": 0.92578125}, {"start": 265.59, "end": 265.75, "word": " two", "probability": 0.9208984375}, {"start": 265.75, "end": 266.07, "word": " days", "probability": 0.92626953125}], "temperature": 1.0}, {"id": 10, "seek": 29317, "start": 266.51, "end": 293.17, "text": " That means a drug B is more effective than a drug A. So we want to know the difference between these two drugs. I mean, we have two samples. Some people used drug A and the other used drug B. And we want to see if there is a significant difference between the times that is used to reduce the headache. So that's one of the reasons why we use statistics.", "tokens": [663, 1355, 257, 4110, 363, 307, 544, 4942, 813, 257, 4110, 316, 13, 407, 321, 528, 281, 458, 264, 2649, 1296, 613, 732, 7766, 13, 286, 914, 11, 321, 362, 732, 10938, 13, 2188, 561, 1143, 4110, 316, 293, 264, 661, 1143, 4110, 363, 13, 400, 321, 528, 281, 536, 498, 456, 307, 257, 4776, 2649, 1296, 264, 1413, 300, 307, 1143, 281, 5407, 264, 23520, 13, 407, 300, 311, 472, 295, 264, 4112, 983, 321, 764, 12523, 13], "avg_logprob": -0.13906249832361936, "compression_ratio": 1.7317073170731707, "no_speech_prob": 0.0, "words": [{"start": 266.51, "end": 266.93, "word": " That", "probability": 0.802734375}, {"start": 266.93, "end": 267.33, "word": " means", "probability": 0.93017578125}, {"start": 267.33, "end": 267.81, "word": " a", "probability": 0.47265625}, {"start": 267.81, "end": 268.03, "word": " drug", "probability": 0.80859375}, {"start": 268.03, "end": 268.31, "word": " B", "probability": 0.96337890625}, {"start": 268.31, "end": 268.97, "word": " is", "probability": 0.9453125}, {"start": 268.97, "end": 269.39, "word": " more", "probability": 0.9404296875}, {"start": 269.39, "end": 270.03, "word": " effective", "probability": 0.873046875}, {"start": 270.03, "end": 270.43, "word": " than", "probability": 0.9482421875}, {"start": 270.43, "end": 270.59, "word": " a", "probability": 0.69970703125}, {"start": 270.59, "end": 270.81, "word": " drug", "probability": 0.947265625}, {"start": 270.81, "end": 270.99, "word": " A.", "probability": 0.9775390625}, {"start": 271.13, "end": 271.39, "word": " So", "probability": 0.87060546875}, {"start": 271.39, "end": 271.65, "word": " we", "probability": 0.95068359375}, {"start": 271.65, "end": 272.31, "word": " want", "probability": 0.697265625}, {"start": 272.31, "end": 272.45, "word": " to", "probability": 0.97021484375}, {"start": 272.45, "end": 272.53, "word": " know", "probability": 0.88427734375}, {"start": 272.53, "end": 272.67, "word": " the", "probability": 0.92431640625}, {"start": 272.67, "end": 273.15, "word": " difference", "probability": 0.8564453125}, {"start": 273.15, "end": 273.61, "word": " between", "probability": 0.86181640625}, {"start": 273.61, "end": 274.51, "word": " these", "probability": 0.85986328125}, {"start": 274.51, "end": 274.91, "word": " two", "probability": 0.93310546875}, {"start": 274.91, "end": 275.29, "word": " drugs.", "probability": 0.849609375}, {"start": 275.39, "end": 275.47, "word": " I", "probability": 0.8701171875}, {"start": 275.47, "end": 275.59, "word": " mean,", "probability": 0.95947265625}, {"start": 275.61, "end": 275.71, "word": " we", "probability": 0.96240234375}, {"start": 275.71, "end": 275.83, "word": " have", "probability": 0.94921875}, {"start": 275.83, "end": 275.97, "word": " two", "probability": 0.9189453125}, {"start": 275.97, "end": 276.37, "word": " samples.", "probability": 0.890625}, {"start": 276.93, "end": 277.25, "word": " Some", "probability": 0.90966796875}, {"start": 277.25, "end": 277.61, "word": " people", "probability": 0.97314453125}, {"start": 277.61, "end": 277.97, "word": " used", "probability": 0.6962890625}, {"start": 277.97, "end": 278.35, "word": " drug", "probability": 0.82763671875}, {"start": 278.35, "end": 278.63, "word": " A", "probability": 0.99365234375}, {"start": 278.63, "end": 279.39, "word": " and", "probability": 0.7548828125}, {"start": 279.39, "end": 279.55, "word": " the", "probability": 0.8642578125}, {"start": 279.55, "end": 279.79, "word": " other", "probability": 0.82373046875}, {"start": 279.79, "end": 280.11, "word": " used", "probability": 0.755859375}, {"start": 280.11, "end": 280.39, "word": " drug", "probability": 0.74169921875}, {"start": 280.39, "end": 280.63, "word": " B.", "probability": 0.99755859375}, {"start": 280.71, "end": 280.81, "word": " And", "probability": 0.79296875}, {"start": 280.81, "end": 280.95, "word": " we", "probability": 0.9609375}, {"start": 280.95, "end": 281.19, "word": " want", "probability": 0.88525390625}, {"start": 281.19, "end": 281.31, "word": " to", "probability": 0.97119140625}, {"start": 281.31, "end": 281.55, "word": " see", "probability": 0.9248046875}, {"start": 281.55, "end": 282.07, "word": " if", "probability": 0.94677734375}, {"start": 282.07, "end": 282.25, "word": " there", "probability": 0.91162109375}, {"start": 282.25, "end": 282.45, "word": " is", "probability": 0.9443359375}, {"start": 282.45, "end": 282.69, "word": " a", "probability": 0.99853515625}, {"start": 282.69, "end": 283.19, "word": " significant", "probability": 0.857421875}, {"start": 283.19, "end": 283.81, "word": " difference", "probability": 0.85595703125}, {"start": 283.81, "end": 284.27, "word": " between", "probability": 0.86328125}, {"start": 284.27, "end": 285.09, "word": " the", "probability": 0.9248046875}, {"start": 285.09, "end": 285.61, "word": " times", "probability": 0.92919921875}, {"start": 285.61, "end": 286.15, "word": " that", "probability": 0.93359375}, {"start": 286.15, "end": 286.95, "word": " is", "probability": 0.92529296875}, {"start": 286.95, "end": 287.31, "word": " used", "probability": 0.923828125}, {"start": 287.31, "end": 287.69, "word": " to", "probability": 0.96826171875}, {"start": 287.69, "end": 288.27, "word": " reduce", "probability": 0.841796875}, {"start": 288.27, "end": 288.45, "word": " the", "probability": 0.92626953125}, {"start": 288.45, "end": 288.73, "word": " headache.", "probability": 0.8857421875}, {"start": 289.79, "end": 290.15, "word": " So", "probability": 0.94921875}, {"start": 290.15, "end": 290.45, "word": " that's", "probability": 0.962890625}, {"start": 290.45, "end": 290.63, "word": " one", "probability": 0.92919921875}, {"start": 290.63, "end": 290.77, "word": " of", "probability": 0.9697265625}, {"start": 290.77, "end": 290.89, "word": " the", "probability": 0.92431640625}, {"start": 290.89, "end": 291.15, "word": " reasons", "probability": 0.92822265625}, {"start": 291.15, "end": 291.41, "word": " why", "probability": 0.91259765625}, {"start": 291.41, "end": 291.57, "word": " we", "probability": 0.9560546875}, {"start": 291.57, "end": 291.87, "word": " use", "probability": 0.833984375}, {"start": 291.87, "end": 293.17, "word": " statistics.", "probability": 0.3720703125}], "temperature": 1.0}, {"id": 11, "seek": 32330, "start": 293.72, "end": 323.3, "text": " Sometimes an operation manager wants to monitor manufacturing process to find out whether the quality of a product being manufactured is conforming to a company's standards. Do you know what the meaning of company's standards? The regulations of the firm itself. Another example, suppose here in the school last year, we teach statistics by using method A.", "tokens": [4803, 364, 6916, 6598, 2738, 281, 6002, 11096, 1399, 281, 915, 484, 1968, 264, 3125, 295, 257, 1674, 885, 25738, 307, 18975, 278, 281, 257, 2237, 311, 7787, 13, 1144, 291, 458, 437, 264, 3620, 295, 2237, 311, 7787, 30, 440, 12563, 295, 264, 6174, 2564, 13, 3996, 1365, 11, 7297, 510, 294, 264, 1395, 1036, 1064, 11, 321, 2924, 12523, 538, 1228, 3170, 316, 13], "avg_logprob": -0.24603544064422153, "compression_ratio": 1.59375, "no_speech_prob": 0.0, "words": [{"start": 293.72, "end": 294.42, "word": " Sometimes", "probability": 0.74267578125}, {"start": 294.42, "end": 294.92, "word": " an", "probability": 0.611328125}, {"start": 294.92, "end": 295.26, "word": " operation", "probability": 0.85595703125}, {"start": 295.26, "end": 295.68, "word": " manager", "probability": 0.95458984375}, {"start": 295.68, "end": 295.98, "word": " wants", "probability": 0.7724609375}, {"start": 295.98, "end": 296.22, "word": " to", "probability": 0.96630859375}, {"start": 296.22, "end": 296.46, "word": " monitor", "probability": 0.397705078125}, {"start": 296.46, "end": 297.1, "word": " manufacturing", "probability": 0.72509765625}, {"start": 297.1, "end": 297.84, "word": " process", "probability": 0.91064453125}, {"start": 297.84, "end": 298.5, "word": " to", "probability": 0.7607421875}, {"start": 298.5, "end": 298.76, "word": " find", "probability": 0.89501953125}, {"start": 298.76, "end": 299.0, "word": " out", "probability": 0.8876953125}, {"start": 299.0, "end": 299.22, "word": " whether", "probability": 0.92724609375}, {"start": 299.22, "end": 299.46, "word": " the", "probability": 0.9208984375}, {"start": 299.46, "end": 299.8, "word": " quality", "probability": 0.85888671875}, {"start": 299.8, "end": 299.96, "word": " of", "probability": 0.9609375}, {"start": 299.96, "end": 300.06, "word": " a", "probability": 0.63916015625}, {"start": 300.06, "end": 300.38, "word": " product", "probability": 0.91259765625}, {"start": 300.38, "end": 300.72, "word": " being", "probability": 0.953125}, {"start": 300.72, "end": 301.3, "word": " manufactured", "probability": 0.89794921875}, {"start": 301.3, "end": 301.56, "word": " is", "probability": 0.85693359375}, {"start": 301.56, "end": 302.14, "word": " conforming", "probability": 0.95068359375}, {"start": 302.14, "end": 302.28, "word": " to", "probability": 0.96728515625}, {"start": 302.28, "end": 302.42, "word": " a", "probability": 0.65625}, {"start": 302.42, "end": 302.84, "word": " company's", "probability": 0.7314453125}, {"start": 302.84, "end": 303.2, "word": " standards.", "probability": 0.8896484375}, {"start": 304.58, "end": 305.02, "word": " Do", "probability": 0.79541015625}, {"start": 305.02, "end": 305.06, "word": " you", "probability": 0.96630859375}, {"start": 305.06, "end": 305.16, "word": " know", "probability": 0.892578125}, {"start": 305.16, "end": 305.32, "word": " what", "probability": 0.91162109375}, {"start": 305.32, "end": 305.46, "word": " the", "probability": 0.4111328125}, {"start": 305.46, "end": 305.6, "word": " meaning", "probability": 0.849609375}, {"start": 305.6, "end": 305.7, "word": " of", "probability": 0.96728515625}, {"start": 305.7, "end": 306.14, "word": " company's", "probability": 0.578857421875}, {"start": 306.14, "end": 306.52, "word": " standards?", "probability": 0.9130859375}, {"start": 309.9, "end": 310.22, "word": " The", "probability": 0.1842041015625}, {"start": 310.22, "end": 310.66, "word": " regulations", "probability": 0.82177734375}, {"start": 310.66, "end": 310.88, "word": " of", "probability": 0.52001953125}, {"start": 310.88, "end": 311.42, "word": " the", "probability": 0.86181640625}, {"start": 311.42, "end": 312.5, "word": " firm", "probability": 0.436767578125}, {"start": 312.5, "end": 312.86, "word": " itself.", "probability": 0.69091796875}, {"start": 314.86, "end": 315.32, "word": " Another", "probability": 0.876953125}, {"start": 315.32, "end": 315.72, "word": " example,", "probability": 0.96826171875}, {"start": 315.82, "end": 316.18, "word": " suppose", "probability": 0.91259765625}, {"start": 316.18, "end": 316.8, "word": " here", "probability": 0.751953125}, {"start": 316.8, "end": 316.92, "word": " in", "probability": 0.904296875}, {"start": 316.92, "end": 317.06, "word": " the", "probability": 0.89892578125}, {"start": 317.06, "end": 317.34, "word": " school", "probability": 0.94482421875}, {"start": 317.34, "end": 318.78, "word": " last", "probability": 0.5537109375}, {"start": 318.78, "end": 319.12, "word": " year,", "probability": 0.93212890625}, {"start": 320.66, "end": 321.02, "word": " we", "probability": 0.9521484375}, {"start": 321.02, "end": 321.36, "word": " teach", "probability": 0.91845703125}, {"start": 321.36, "end": 321.9, "word": " statistics", "probability": 0.892578125}, {"start": 321.9, "end": 322.32, "word": " by", "probability": 0.9638671875}, {"start": 322.32, "end": 322.72, "word": " using", "probability": 0.94677734375}, {"start": 322.72, "end": 323.1, "word": " method", "probability": 0.76123046875}, {"start": 323.1, "end": 323.3, "word": " A.", "probability": 0.828125}], "temperature": 1.0}, {"id": 12, "seek": 35279, "start": 324.59, "end": 352.79, "text": " traditional method. This year we developed a new method for teaching and our goal is to see if the new method is better than method A which was used in last year. So we want to see if there is a big difference between scores or the average scores last year and this year. The same you can do for your weight. Suppose there are 20 students in this class", "tokens": [5164, 3170, 13, 639, 1064, 321, 4743, 257, 777, 3170, 337, 4571, 293, 527, 3387, 307, 281, 536, 498, 264, 777, 3170, 307, 1101, 813, 3170, 316, 597, 390, 1143, 294, 1036, 1064, 13, 407, 321, 528, 281, 536, 498, 456, 307, 257, 955, 2649, 1296, 13444, 420, 264, 4274, 13444, 1036, 1064, 293, 341, 1064, 13, 440, 912, 291, 393, 360, 337, 428, 3364, 13, 21360, 456, 366, 945, 1731, 294, 341, 1508], "avg_logprob": -0.1346874992052714, "compression_ratio": 1.7053140096618358, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 324.59, "end": 325.15, "word": " traditional", "probability": 0.66259765625}, {"start": 325.15, "end": 325.65, "word": " method.", "probability": 0.93359375}, {"start": 326.07, "end": 326.53, "word": " This", "probability": 0.873046875}, {"start": 326.53, "end": 326.77, "word": " year", "probability": 0.94140625}, {"start": 326.77, "end": 327.53, "word": " we", "probability": 0.49560546875}, {"start": 327.53, "end": 328.43, "word": " developed", "probability": 0.62060546875}, {"start": 328.43, "end": 328.65, "word": " a", "probability": 0.9755859375}, {"start": 328.65, "end": 328.81, "word": " new", "probability": 0.9130859375}, {"start": 328.81, "end": 329.09, "word": " method", "probability": 0.93701171875}, {"start": 329.09, "end": 329.35, "word": " for", "probability": 0.9521484375}, {"start": 329.35, "end": 329.69, "word": " teaching", "probability": 0.9208984375}, {"start": 329.69, "end": 330.45, "word": " and", "probability": 0.6259765625}, {"start": 330.45, "end": 330.73, "word": " our", "probability": 0.8916015625}, {"start": 330.73, "end": 330.93, "word": " goal", "probability": 0.96923828125}, {"start": 330.93, "end": 331.09, "word": " is", "probability": 0.9443359375}, {"start": 331.09, "end": 331.21, "word": " to", "probability": 0.9658203125}, {"start": 331.21, "end": 331.39, "word": " see", "probability": 0.92431640625}, {"start": 331.39, "end": 331.63, "word": " if", "probability": 0.94970703125}, {"start": 331.63, "end": 331.85, "word": " the", "probability": 0.91064453125}, {"start": 331.85, "end": 332.07, "word": " new", "probability": 0.91162109375}, {"start": 332.07, "end": 332.37, "word": " method", "probability": 0.94189453125}, {"start": 332.37, "end": 333.21, "word": " is", "probability": 0.94873046875}, {"start": 333.21, "end": 334.29, "word": " better", "probability": 0.89111328125}, {"start": 334.29, "end": 334.67, "word": " than", "probability": 0.9423828125}, {"start": 334.67, "end": 335.09, "word": " method", "probability": 0.8818359375}, {"start": 335.09, "end": 335.39, "word": " A", "probability": 0.7861328125}, {"start": 335.39, "end": 335.63, "word": " which", "probability": 0.7197265625}, {"start": 335.63, "end": 335.87, "word": " was", "probability": 0.9541015625}, {"start": 335.87, "end": 336.11, "word": " used", "probability": 0.90771484375}, {"start": 336.11, "end": 336.27, "word": " in", "probability": 0.783203125}, {"start": 336.27, "end": 336.51, "word": " last", "probability": 0.84423828125}, {"start": 336.51, "end": 336.87, "word": " year.", "probability": 0.93896484375}, {"start": 337.15, "end": 337.43, "word": " So", "probability": 0.94384765625}, {"start": 337.43, "end": 337.55, "word": " we", "probability": 0.82470703125}, {"start": 337.55, "end": 337.73, "word": " want", "probability": 0.8154296875}, {"start": 337.73, "end": 337.87, "word": " to", "probability": 0.96337890625}, {"start": 337.87, "end": 338.07, "word": " see", "probability": 0.92333984375}, {"start": 338.07, "end": 338.29, "word": " if", "probability": 0.95068359375}, {"start": 338.29, "end": 338.49, "word": " there", "probability": 0.9072265625}, {"start": 338.49, "end": 338.63, "word": " is", "probability": 0.93994140625}, {"start": 338.63, "end": 338.73, "word": " a", "probability": 0.99853515625}, {"start": 338.73, "end": 338.91, "word": " big", "probability": 0.92333984375}, {"start": 338.91, "end": 339.43, "word": " difference", "probability": 0.8623046875}, {"start": 339.43, "end": 339.93, "word": " between", "probability": 0.87255859375}, {"start": 339.93, "end": 340.65, "word": " scores", "probability": 0.7763671875}, {"start": 340.65, "end": 341.33, "word": " or", "probability": 0.8466796875}, {"start": 341.33, "end": 341.45, "word": " the", "probability": 0.88525390625}, {"start": 341.45, "end": 341.75, "word": " average", "probability": 0.7412109375}, {"start": 341.75, "end": 342.41, "word": " scores", "probability": 0.77783203125}, {"start": 342.41, "end": 343.83, "word": " last", "probability": 0.82958984375}, {"start": 343.83, "end": 344.19, "word": " year", "probability": 0.93359375}, {"start": 344.19, "end": 344.67, "word": " and", "probability": 0.91064453125}, {"start": 344.67, "end": 344.89, "word": " this", "probability": 0.94140625}, {"start": 344.89, "end": 345.15, "word": " year.", "probability": 0.9384765625}, {"start": 345.73, "end": 346.13, "word": " The", "probability": 0.88818359375}, {"start": 346.13, "end": 346.35, "word": " same", "probability": 0.90625}, {"start": 346.35, "end": 346.57, "word": " you", "probability": 0.94384765625}, {"start": 346.57, "end": 346.75, "word": " can", "probability": 0.94580078125}, {"start": 346.75, "end": 346.91, "word": " do", "probability": 0.96484375}, {"start": 346.91, "end": 347.31, "word": " for", "probability": 0.9501953125}, {"start": 347.31, "end": 347.83, "word": " your", "probability": 0.89306640625}, {"start": 347.83, "end": 348.27, "word": " weight.", "probability": 0.94775390625}, {"start": 349.47, "end": 349.93, "word": " Suppose", "probability": 0.81494140625}, {"start": 349.93, "end": 351.03, "word": " there", "probability": 0.875}, {"start": 351.03, "end": 351.27, "word": " are", "probability": 0.943359375}, {"start": 351.27, "end": 351.61, "word": " 20", "probability": 0.76025390625}, {"start": 351.61, "end": 352.01, "word": " students", "probability": 0.9736328125}, {"start": 352.01, "end": 352.21, "word": " in", "probability": 0.9384765625}, {"start": 352.21, "end": 352.35, "word": " this", "probability": 0.9443359375}, {"start": 352.35, "end": 352.79, "word": " class", "probability": 0.96728515625}], "temperature": 1.0}, {"id": 13, "seek": 38248, "start": 353.5, "end": 382.48, "text": " and their weights are high. And our goal is to reduce their weights. Suppose they have a regime or diet for three months or exercise, whatever it is, then after three months, we have new weights for these persons. And we want to see if the diet is effective. I mean, if the average weight was greater than", "tokens": [293, 641, 17443, 366, 1090, 13, 400, 527, 3387, 307, 281, 5407, 641, 17443, 13, 21360, 436, 362, 257, 13120, 420, 6339, 337, 1045, 2493, 420, 5380, 11, 2035, 309, 307, 11, 550, 934, 1045, 2493, 11, 321, 362, 777, 17443, 337, 613, 14453, 13, 400, 321, 528, 281, 536, 498, 264, 6339, 307, 4942, 13, 286, 914, 11, 498, 264, 4274, 3364, 390, 5044, 813], "avg_logprob": -0.17817164801839572, "compression_ratio": 1.6906077348066297, "no_speech_prob": 0.0, "words": [{"start": 353.5, "end": 353.94, "word": " and", "probability": 0.63916015625}, {"start": 353.94, "end": 354.18, "word": " their", "probability": 0.95849609375}, {"start": 354.18, "end": 354.44, "word": " weights", "probability": 0.8603515625}, {"start": 354.44, "end": 355.16, "word": " are", "probability": 0.9423828125}, {"start": 355.16, "end": 355.5, "word": " high.", "probability": 0.908203125}, {"start": 356.06, "end": 356.34, "word": " And", "probability": 0.8994140625}, {"start": 356.34, "end": 356.56, "word": " our", "probability": 0.88720703125}, {"start": 356.56, "end": 356.76, "word": " goal", "probability": 0.9697265625}, {"start": 356.76, "end": 356.96, "word": " is", "probability": 0.9462890625}, {"start": 356.96, "end": 357.24, "word": " to", "probability": 0.9677734375}, {"start": 357.24, "end": 357.68, "word": " reduce", "probability": 0.86083984375}, {"start": 357.68, "end": 358.56, "word": " their", "probability": 0.96240234375}, {"start": 358.56, "end": 358.94, "word": " weights.", "probability": 0.82080078125}, {"start": 360.52, "end": 361.08, "word": " Suppose", "probability": 0.7919921875}, {"start": 361.08, "end": 364.28, "word": " they", "probability": 0.77099609375}, {"start": 364.28, "end": 365.84, "word": " have", "probability": 0.95166015625}, {"start": 365.84, "end": 367.18, "word": " a", "probability": 0.8984375}, {"start": 367.18, "end": 367.52, "word": " regime", "probability": 0.93994140625}, {"start": 367.52, "end": 367.76, "word": " or", "probability": 0.88134765625}, {"start": 367.76, "end": 368.06, "word": " diet", "probability": 0.89404296875}, {"start": 368.06, "end": 368.46, "word": " for", "probability": 0.9443359375}, {"start": 368.46, "end": 368.74, "word": " three", "probability": 0.79443359375}, {"start": 368.74, "end": 369.14, "word": " months", "probability": 0.82958984375}, {"start": 369.14, "end": 369.64, "word": " or", "probability": 0.64208984375}, {"start": 369.64, "end": 370.06, "word": " exercise,", "probability": 0.95556640625}, {"start": 370.2, "end": 370.38, "word": " whatever", "probability": 0.93212890625}, {"start": 370.38, "end": 370.62, "word": " it", "probability": 0.74755859375}, {"start": 370.62, "end": 370.76, "word": " is,", "probability": 0.9482421875}, {"start": 371.14, "end": 371.38, "word": " then", "probability": 0.82373046875}, {"start": 371.38, "end": 371.62, "word": " after", "probability": 0.78759765625}, {"start": 371.62, "end": 371.9, "word": " three", "probability": 0.91748046875}, {"start": 371.9, "end": 372.14, "word": " months,", "probability": 0.83154296875}, {"start": 372.22, "end": 372.36, "word": " we", "probability": 0.95556640625}, {"start": 372.36, "end": 372.68, "word": " have", "probability": 0.9404296875}, {"start": 372.68, "end": 373.4, "word": " new", "probability": 0.64794921875}, {"start": 373.4, "end": 374.24, "word": " weights", "probability": 0.8720703125}, {"start": 374.24, "end": 374.5, "word": " for", "probability": 0.947265625}, {"start": 374.5, "end": 374.86, "word": " these", "probability": 0.77978515625}, {"start": 374.86, "end": 376.26, "word": " persons.", "probability": 0.8466796875}, {"start": 376.52, "end": 376.82, "word": " And", "probability": 0.89990234375}, {"start": 376.82, "end": 376.9, "word": " we", "probability": 0.7236328125}, {"start": 376.9, "end": 377.06, "word": " want", "probability": 0.8818359375}, {"start": 377.06, "end": 377.16, "word": " to", "probability": 0.9677734375}, {"start": 377.16, "end": 377.3, "word": " see", "probability": 0.923828125}, {"start": 377.3, "end": 377.46, "word": " if", "probability": 0.94970703125}, {"start": 377.46, "end": 377.62, "word": " the", "probability": 0.84521484375}, {"start": 377.62, "end": 377.84, "word": " diet", "probability": 0.923828125}, {"start": 377.84, "end": 378.02, "word": " is", "probability": 0.93994140625}, {"start": 378.02, "end": 378.48, "word": " effective.", "probability": 0.86767578125}, {"start": 378.64, "end": 378.8, "word": " I", "probability": 0.98046875}, {"start": 378.8, "end": 379.02, "word": " mean,", "probability": 0.96435546875}, {"start": 379.46, "end": 379.7, "word": " if", "probability": 0.9521484375}, {"start": 379.7, "end": 379.84, "word": " the", "probability": 0.916015625}, {"start": 379.84, "end": 380.16, "word": " average", "probability": 0.79541015625}, {"start": 380.16, "end": 380.36, "word": " weight", "probability": 0.4697265625}, {"start": 380.36, "end": 380.88, "word": " was", "probability": 0.93212890625}, {"start": 380.88, "end": 382.06, "word": " greater", "probability": 0.90234375}, {"start": 382.06, "end": 382.48, "word": " than", "probability": 0.9462890625}], "temperature": 1.0}, {"id": 14, "seek": 40880, "start": 382.98, "end": 408.8, "text": " or smaller than before diet. Is it clear? So there are many, many reasons behind using statistics and collecting data. Now, what are the sources of data? Since statistics mainly, first step, we have to collect data. Now, what are the sources of the data? Generally speaking, there are two sources. One is called", "tokens": [420, 4356, 813, 949, 6339, 13, 1119, 309, 1850, 30, 407, 456, 366, 867, 11, 867, 4112, 2261, 1228, 12523, 293, 12510, 1412, 13, 823, 11, 437, 366, 264, 7139, 295, 1412, 30, 4162, 12523, 8704, 11, 700, 1823, 11, 321, 362, 281, 2500, 1412, 13, 823, 11, 437, 366, 264, 7139, 295, 264, 1412, 30, 21082, 4124, 11, 456, 366, 732, 7139, 13, 1485, 307, 1219], "avg_logprob": -0.15268842108985958, "compression_ratio": 1.7142857142857142, "no_speech_prob": 0.0, "words": [{"start": 382.98, "end": 383.34, "word": " or", "probability": 0.6923828125}, {"start": 383.34, "end": 383.8, "word": " smaller", "probability": 0.85986328125}, {"start": 383.8, "end": 384.12, "word": " than", "probability": 0.9482421875}, {"start": 384.12, "end": 384.48, "word": " before", "probability": 0.8740234375}, {"start": 384.48, "end": 384.8, "word": " diet.", "probability": 0.8671875}, {"start": 386.16, "end": 386.68, "word": " Is", "probability": 0.92138671875}, {"start": 386.68, "end": 386.8, "word": " it", "probability": 0.92626953125}, {"start": 386.8, "end": 387.06, "word": " clear?", "probability": 0.89208984375}, {"start": 387.4, "end": 387.84, "word": " So", "probability": 0.9072265625}, {"start": 387.84, "end": 388.0, "word": " there", "probability": 0.77099609375}, {"start": 388.0, "end": 388.16, "word": " are", "probability": 0.93994140625}, {"start": 388.16, "end": 388.4, "word": " many,", "probability": 0.916015625}, {"start": 388.5, "end": 388.6, "word": " many", "probability": 0.9150390625}, {"start": 388.6, "end": 389.1, "word": " reasons", "probability": 0.9287109375}, {"start": 389.1, "end": 389.68, "word": " behind", "probability": 0.931640625}, {"start": 389.68, "end": 390.16, "word": " using", "probability": 0.927734375}, {"start": 390.16, "end": 390.82, "word": " statistics", "probability": 0.89697265625}, {"start": 390.82, "end": 391.46, "word": " and", "probability": 0.92138671875}, {"start": 391.46, "end": 391.92, "word": " collecting", "probability": 0.87841796875}, {"start": 391.92, "end": 393.36, "word": " data.", "probability": 0.90673828125}, {"start": 394.84, "end": 395.44, "word": " Now,", "probability": 0.95263671875}, {"start": 395.52, "end": 395.68, "word": " what", "probability": 0.94384765625}, {"start": 395.68, "end": 395.88, "word": " are", "probability": 0.939453125}, {"start": 395.88, "end": 396.06, "word": " the", "probability": 0.92138671875}, {"start": 396.06, "end": 396.36, "word": " sources", "probability": 0.8369140625}, {"start": 396.36, "end": 396.58, "word": " of", "probability": 0.97119140625}, {"start": 396.58, "end": 396.76, "word": " data?", "probability": 0.92041015625}, {"start": 397.2, "end": 397.5, "word": " Since", "probability": 0.86474609375}, {"start": 397.5, "end": 398.34, "word": " statistics", "probability": 0.92724609375}, {"start": 398.34, "end": 399.6, "word": " mainly,", "probability": 0.6376953125}, {"start": 400.16, "end": 400.68, "word": " first", "probability": 0.52685546875}, {"start": 400.68, "end": 401.04, "word": " step,", "probability": 0.943359375}, {"start": 401.12, "end": 401.18, "word": " we", "probability": 0.95556640625}, {"start": 401.18, "end": 401.38, "word": " have", "probability": 0.94384765625}, {"start": 401.38, "end": 401.5, "word": " to", "probability": 0.9716796875}, {"start": 401.5, "end": 401.84, "word": " collect", "probability": 0.88134765625}, {"start": 401.84, "end": 402.24, "word": " data.", "probability": 0.90380859375}, {"start": 402.38, "end": 402.56, "word": " Now,", "probability": 0.96435546875}, {"start": 402.7, "end": 402.96, "word": " what", "probability": 0.9423828125}, {"start": 402.96, "end": 403.16, "word": " are", "probability": 0.93896484375}, {"start": 403.16, "end": 403.36, "word": " the", "probability": 0.92138671875}, {"start": 403.36, "end": 403.64, "word": " sources", "probability": 0.8251953125}, {"start": 403.64, "end": 403.8, "word": " of", "probability": 0.9677734375}, {"start": 403.8, "end": 403.9, "word": " the", "probability": 0.8818359375}, {"start": 403.9, "end": 404.12, "word": " data?", "probability": 0.92724609375}, {"start": 405.36, "end": 405.78, "word": " Generally", "probability": 0.58154296875}, {"start": 405.78, "end": 406.18, "word": " speaking,", "probability": 0.876953125}, {"start": 406.28, "end": 406.4, "word": " there", "probability": 0.90380859375}, {"start": 406.4, "end": 406.52, "word": " are", "probability": 0.939453125}, {"start": 406.52, "end": 406.66, "word": " two", "probability": 0.93603515625}, {"start": 406.66, "end": 407.08, "word": " sources.", "probability": 0.84814453125}, {"start": 407.8, "end": 408.2, "word": " One", "probability": 0.93359375}, {"start": 408.2, "end": 408.42, "word": " is", "probability": 0.94580078125}, {"start": 408.42, "end": 408.8, "word": " called", "probability": 0.9072265625}], "temperature": 1.0}, {"id": 15, "seek": 43423, "start": 410.11, "end": 434.23, "text": " The primary sources and the others secondary sources. What do you think is the difference between these two? I mean, what's the difference between primary and secondary sources? The primary source is the collector of the data. He is the analyzer. He analyzes it. And then the secondary, who collects the data, isn't there.", "tokens": [440, 6194, 7139, 293, 264, 2357, 11396, 7139, 13, 708, 360, 291, 519, 307, 264, 2649, 1296, 613, 732, 30, 286, 914, 11, 437, 311, 264, 2649, 1296, 6194, 293, 11396, 7139, 30, 440, 6194, 4009, 307, 264, 23960, 295, 264, 1412, 13, 634, 307, 264, 6459, 4527, 13, 634, 6459, 12214, 309, 13, 400, 550, 264, 11396, 11, 567, 39897, 264, 1412, 11, 1943, 380, 456, 13], "avg_logprob": -0.33378621979036194, "compression_ratio": 1.9112426035502958, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 410.11, "end": 410.41, "word": " The", "probability": 0.148193359375}, {"start": 410.41, "end": 410.71, "word": " primary", "probability": 0.9404296875}, {"start": 410.71, "end": 411.15, "word": " sources", "probability": 0.7978515625}, {"start": 411.15, "end": 412.03, "word": " and", "probability": 0.8330078125}, {"start": 412.03, "end": 412.17, "word": " the", "probability": 0.5634765625}, {"start": 412.17, "end": 412.43, "word": " others", "probability": 0.556640625}, {"start": 412.43, "end": 413.27, "word": " secondary", "probability": 0.385498046875}, {"start": 413.27, "end": 414.79, "word": " sources.", "probability": 0.80615234375}, {"start": 415.09, "end": 415.29, "word": " What", "probability": 0.87451171875}, {"start": 415.29, "end": 415.39, "word": " do", "probability": 0.93310546875}, {"start": 415.39, "end": 415.47, "word": " you", "probability": 0.9638671875}, {"start": 415.47, "end": 415.63, "word": " think", "probability": 0.919921875}, {"start": 415.63, "end": 415.73, "word": " is", "probability": 0.407470703125}, {"start": 415.73, "end": 415.77, "word": " the", "probability": 0.92041015625}, {"start": 415.77, "end": 416.13, "word": " difference", "probability": 0.8583984375}, {"start": 416.13, "end": 416.45, "word": " between", "probability": 0.86865234375}, {"start": 416.45, "end": 416.67, "word": " these", "probability": 0.85107421875}, {"start": 416.67, "end": 416.85, "word": " two?", "probability": 0.9189453125}, {"start": 417.23, "end": 417.33, "word": " I", "probability": 0.869140625}, {"start": 417.33, "end": 417.51, "word": " mean,", "probability": 0.970703125}, {"start": 417.57, "end": 417.75, "word": " what's", "probability": 0.8916015625}, {"start": 417.75, "end": 417.83, "word": " the", "probability": 0.9248046875}, {"start": 417.83, "end": 418.11, "word": " difference", "probability": 0.87158203125}, {"start": 418.11, "end": 418.49, "word": " between", "probability": 0.8857421875}, {"start": 418.49, "end": 419.87, "word": " primary", "probability": 0.591796875}, {"start": 419.87, "end": 420.39, "word": " and", "probability": 0.89453125}, {"start": 420.39, "end": 420.85, "word": " secondary", "probability": 0.85546875}, {"start": 420.85, "end": 422.73, "word": " sources?", "probability": 0.814453125}, {"start": 423.51, "end": 424.11, "word": " The", "probability": 0.34228515625}, {"start": 424.11, "end": 424.43, "word": " primary", "probability": 0.966796875}, {"start": 424.43, "end": 424.73, "word": " source", "probability": 0.6435546875}, {"start": 424.73, "end": 424.97, "word": " is", "probability": 0.82373046875}, {"start": 424.97, "end": 425.23, "word": " the", "probability": 0.89208984375}, {"start": 425.23, "end": 425.83, "word": " collector", "probability": 0.6005859375}, {"start": 425.83, "end": 426.19, "word": " of", "probability": 0.93603515625}, {"start": 426.19, "end": 426.49, "word": " the", "probability": 0.83935546875}, {"start": 426.49, "end": 427.25, "word": " data.", "probability": 0.935546875}, {"start": 427.67, "end": 427.81, "word": " He", "probability": 0.92578125}, {"start": 427.81, "end": 428.11, "word": " is", "probability": 0.83837890625}, {"start": 428.11, "end": 428.49, "word": " the", "probability": 0.87158203125}, {"start": 428.49, "end": 429.03, "word": " analyzer.", "probability": 0.75341796875}, {"start": 429.41, "end": 429.59, "word": " He", "probability": 0.75537109375}, {"start": 429.59, "end": 430.67, "word": " analyzes", "probability": 0.6685791015625}, {"start": 430.67, "end": 430.85, "word": " it.", "probability": 0.63232421875}, {"start": 430.97, "end": 431.07, "word": " And", "probability": 0.84130859375}, {"start": 431.07, "end": 431.29, "word": " then", "probability": 0.537109375}, {"start": 431.29, "end": 431.55, "word": " the", "probability": 0.697265625}, {"start": 431.55, "end": 431.89, "word": " secondary,", "probability": 0.8427734375}, {"start": 432.15, "end": 432.79, "word": " who", "probability": 0.412353515625}, {"start": 432.79, "end": 433.25, "word": " collects", "probability": 0.87353515625}, {"start": 433.25, "end": 433.45, "word": " the", "probability": 0.8935546875}, {"start": 433.45, "end": 433.65, "word": " data,", "probability": 0.9306640625}, {"start": 433.73, "end": 434.01, "word": " isn't", "probability": 0.7763671875}, {"start": 434.01, "end": 434.23, "word": " there.", "probability": 0.26806640625}], "temperature": 1.0}, {"id": 16, "seek": 45935, "start": 436.03, "end": 459.35, "text": " That's correct. So the primary sources means the researcher by himself. He should collect the data, then he can use the data to do his analysis. That's for the primary. Now, the primary could be data from political survey. You can distribute questionnaire, for example, data collected from an experiment.", "tokens": [663, 311, 3006, 13, 407, 264, 6194, 7139, 1355, 264, 21751, 538, 3647, 13, 634, 820, 2500, 264, 1412, 11, 550, 415, 393, 764, 264, 1412, 281, 360, 702, 5215, 13, 663, 311, 337, 264, 6194, 13, 823, 11, 264, 6194, 727, 312, 1412, 490, 3905, 8984, 13, 509, 393, 20594, 44702, 11, 337, 1365, 11, 1412, 11087, 490, 364, 5120, 13], "avg_logprob": -0.1502976223589882, "compression_ratio": 1.5968586387434556, "no_speech_prob": 0.0, "words": [{"start": 436.03, "end": 436.49, "word": " That's", "probability": 0.80615234375}, {"start": 436.49, "end": 436.79, "word": " correct.", "probability": 0.9013671875}, {"start": 437.05, "end": 437.15, "word": " So", "probability": 0.8955078125}, {"start": 437.15, "end": 437.33, "word": " the", "probability": 0.67333984375}, {"start": 437.33, "end": 437.79, "word": " primary", "probability": 0.9287109375}, {"start": 437.79, "end": 438.25, "word": " sources", "probability": 0.61376953125}, {"start": 438.25, "end": 438.67, "word": " means", "probability": 0.7578125}, {"start": 438.67, "end": 438.91, "word": " the", "probability": 0.87646484375}, {"start": 438.91, "end": 439.33, "word": " researcher", "probability": 0.95947265625}, {"start": 439.33, "end": 439.71, "word": " by", "probability": 0.8818359375}, {"start": 439.71, "end": 440.23, "word": " himself.", "probability": 0.89990234375}, {"start": 440.61, "end": 441.05, "word": " He", "probability": 0.9619140625}, {"start": 441.05, "end": 441.41, "word": " should", "probability": 0.94775390625}, {"start": 441.41, "end": 441.95, "word": " collect", "probability": 0.888671875}, {"start": 441.95, "end": 442.17, "word": " the", "probability": 0.91796875}, {"start": 442.17, "end": 442.49, "word": " data,", "probability": 0.9326171875}, {"start": 443.89, "end": 444.25, "word": " then", "probability": 0.84765625}, {"start": 444.25, "end": 444.67, "word": " he", "probability": 0.958984375}, {"start": 444.67, "end": 444.91, "word": " can", "probability": 0.9443359375}, {"start": 444.91, "end": 445.13, "word": " use", "probability": 0.875}, {"start": 445.13, "end": 445.29, "word": " the", "probability": 0.91015625}, {"start": 445.29, "end": 445.55, "word": " data", "probability": 0.92822265625}, {"start": 445.55, "end": 445.99, "word": " to", "probability": 0.966796875}, {"start": 445.99, "end": 446.93, "word": " do", "probability": 0.958984375}, {"start": 446.93, "end": 447.33, "word": " his", "probability": 0.962890625}, {"start": 447.33, "end": 447.75, "word": " analysis.", "probability": 0.86376953125}, {"start": 448.31, "end": 448.67, "word": " That's", "probability": 0.952392578125}, {"start": 448.67, "end": 448.83, "word": " for", "probability": 0.93212890625}, {"start": 448.83, "end": 448.99, "word": " the", "probability": 0.91943359375}, {"start": 448.99, "end": 449.33, "word": " primary.", "probability": 0.9296875}, {"start": 450.03, "end": 450.31, "word": " Now,", "probability": 0.95654296875}, {"start": 450.45, "end": 450.59, "word": " the", "probability": 0.91064453125}, {"start": 450.59, "end": 450.91, "word": " primary", "probability": 0.9453125}, {"start": 450.91, "end": 451.25, "word": " could", "probability": 0.64794921875}, {"start": 451.25, "end": 451.55, "word": " be", "probability": 0.95849609375}, {"start": 451.55, "end": 452.01, "word": " data", "probability": 0.92626953125}, {"start": 452.01, "end": 452.33, "word": " from", "probability": 0.88525390625}, {"start": 452.33, "end": 452.69, "word": " political", "probability": 0.84765625}, {"start": 452.69, "end": 453.11, "word": " survey.", "probability": 0.88525390625}, {"start": 454.25, "end": 454.47, "word": " You", "probability": 0.9599609375}, {"start": 454.47, "end": 454.75, "word": " can", "probability": 0.93896484375}, {"start": 454.75, "end": 455.23, "word": " distribute", "probability": 0.9326171875}, {"start": 455.23, "end": 455.73, "word": " questionnaire,", "probability": 0.64208984375}, {"start": 455.89, "end": 455.97, "word": " for", "probability": 0.94873046875}, {"start": 455.97, "end": 456.33, "word": " example,", "probability": 0.97509765625}, {"start": 457.41, "end": 457.69, "word": " data", "probability": 0.935546875}, {"start": 457.69, "end": 458.13, "word": " collected", "probability": 0.89599609375}, {"start": 458.13, "end": 458.41, "word": " from", "probability": 0.8837890625}, {"start": 458.41, "end": 458.75, "word": " an", "probability": 0.9560546875}, {"start": 458.75, "end": 459.35, "word": " experiment.", "probability": 0.95166015625}], "temperature": 1.0}, {"id": 17, "seek": 48193, "start": 459.91, "end": 481.93, "text": " I mean maybe control or experimental groups. We have two groups, maybe healthy people and patient people. So that's experimental group. Or observed data. That's the primary sources. Secondary sources, the person performing data analysis is not the data collector. So he obtained the data from other sources.", "tokens": [286, 914, 1310, 1969, 420, 17069, 3935, 13, 492, 362, 732, 3935, 11, 1310, 4627, 561, 293, 4537, 561, 13, 407, 300, 311, 17069, 1594, 13, 1610, 13095, 1412, 13, 663, 311, 264, 6194, 7139, 13, 5736, 822, 7139, 11, 264, 954, 10205, 1412, 5215, 307, 406, 264, 1412, 23960, 13, 407, 415, 14879, 264, 1412, 490, 661, 7139, 13], "avg_logprob": -0.14062499413724805, "compression_ratio": 1.6470588235294117, "no_speech_prob": 0.0, "words": [{"start": 459.91, "end": 460.23, "word": " I", "probability": 0.79638671875}, {"start": 460.23, "end": 460.37, "word": " mean", "probability": 0.9677734375}, {"start": 460.37, "end": 460.61, "word": " maybe", "probability": 0.51513671875}, {"start": 460.61, "end": 461.17, "word": " control", "probability": 0.8388671875}, {"start": 461.17, "end": 461.83, "word": " or", "probability": 0.93408203125}, {"start": 461.83, "end": 462.53, "word": " experimental", "probability": 0.865234375}, {"start": 462.53, "end": 462.97, "word": " groups.", "probability": 0.94873046875}, {"start": 463.09, "end": 463.13, "word": " We", "probability": 0.96240234375}, {"start": 463.13, "end": 463.39, "word": " have", "probability": 0.94873046875}, {"start": 463.39, "end": 463.87, "word": " two", "probability": 0.93603515625}, {"start": 463.87, "end": 464.21, "word": " groups,", "probability": 0.9482421875}, {"start": 464.37, "end": 464.57, "word": " maybe", "probability": 0.91650390625}, {"start": 464.57, "end": 465.31, "word": " healthy", "probability": 0.8935546875}, {"start": 465.31, "end": 465.73, "word": " people", "probability": 0.97119140625}, {"start": 465.73, "end": 466.13, "word": " and", "probability": 0.9296875}, {"start": 466.13, "end": 466.47, "word": " patient", "probability": 0.92919921875}, {"start": 466.47, "end": 466.87, "word": " people.", "probability": 0.96484375}, {"start": 467.17, "end": 467.43, "word": " So", "probability": 0.95458984375}, {"start": 467.43, "end": 467.65, "word": " that's", "probability": 0.94775390625}, {"start": 467.65, "end": 468.03, "word": " experimental", "probability": 0.8359375}, {"start": 468.03, "end": 468.49, "word": " group.", "probability": 0.95751953125}, {"start": 469.01, "end": 469.21, "word": " Or", "probability": 0.8212890625}, {"start": 469.21, "end": 469.59, "word": " observed", "probability": 0.5341796875}, {"start": 469.59, "end": 469.89, "word": " data.", "probability": 0.92431640625}, {"start": 470.07, "end": 470.75, "word": " That's", "probability": 0.95166015625}, {"start": 470.75, "end": 471.03, "word": " the", "probability": 0.82861328125}, {"start": 471.03, "end": 471.49, "word": " primary", "probability": 0.9296875}, {"start": 471.49, "end": 473.39, "word": " sources.", "probability": 0.82568359375}, {"start": 473.87, "end": 474.39, "word": " Secondary", "probability": 0.961669921875}, {"start": 474.39, "end": 474.73, "word": " sources,", "probability": 0.82177734375}, {"start": 475.39, "end": 475.47, "word": " the", "probability": 0.92431640625}, {"start": 475.47, "end": 475.71, "word": " person", "probability": 0.90478515625}, {"start": 475.71, "end": 476.07, "word": " performing", "probability": 0.65625}, {"start": 476.07, "end": 476.45, "word": " data", "probability": 0.9130859375}, {"start": 476.45, "end": 476.87, "word": " analysis", "probability": 0.890625}, {"start": 476.87, "end": 477.21, "word": " is", "probability": 0.88818359375}, {"start": 477.21, "end": 477.47, "word": " not", "probability": 0.94873046875}, {"start": 477.47, "end": 477.69, "word": " the", "probability": 0.92041015625}, {"start": 477.69, "end": 477.91, "word": " data", "probability": 0.9365234375}, {"start": 477.91, "end": 478.33, "word": " collector.", "probability": 0.8271484375}, {"start": 478.97, "end": 479.19, "word": " So", "probability": 0.92724609375}, {"start": 479.19, "end": 479.35, "word": " he", "probability": 0.94189453125}, {"start": 479.35, "end": 480.31, "word": " obtained", "probability": 0.82470703125}, {"start": 480.31, "end": 480.47, "word": " the", "probability": 0.90966796875}, {"start": 480.47, "end": 480.73, "word": " data", "probability": 0.9296875}, {"start": 480.73, "end": 481.01, "word": " from", "probability": 0.88330078125}, {"start": 481.01, "end": 481.39, "word": " other", "probability": 0.88671875}, {"start": 481.39, "end": 481.93, "word": " sources.", "probability": 0.84228515625}], "temperature": 1.0}, {"id": 18, "seek": 51062, "start": 482.34, "end": 510.62, "text": " For example, it could be analyzing census data or for example, examining data from print journals or data published on the internet. So maybe he goes to the Ministry of Education and he can get some data. So the data is already there and he just used the data to do some analysis. So that's the difference between a primary and secondary sources. So primary, the researcher himself,", "tokens": [1171, 1365, 11, 309, 727, 312, 23663, 23725, 1412, 420, 337, 1365, 11, 34662, 1412, 490, 4482, 29621, 420, 1412, 6572, 322, 264, 4705, 13, 407, 1310, 415, 1709, 281, 264, 19720, 295, 10680, 293, 415, 393, 483, 512, 1412, 13, 407, 264, 1412, 307, 1217, 456, 293, 415, 445, 1143, 264, 1412, 281, 360, 512, 5215, 13, 407, 300, 311, 264, 2649, 1296, 257, 6194, 293, 11396, 7139, 13, 407, 6194, 11, 264, 21751, 3647, 11], "avg_logprob": -0.13120993685263854, "compression_ratio": 1.6872246696035242, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 482.34, "end": 482.66, "word": " For", "probability": 0.91162109375}, {"start": 482.66, "end": 483.0, "word": " example,", "probability": 0.9755859375}, {"start": 483.48, "end": 483.7, "word": " it", "probability": 0.9560546875}, {"start": 483.7, "end": 483.88, "word": " could", "probability": 0.88671875}, {"start": 483.88, "end": 484.14, "word": " be", "probability": 0.9619140625}, {"start": 484.14, "end": 484.72, "word": " analyzing", "probability": 0.814453125}, {"start": 484.72, "end": 485.56, "word": " census", "probability": 0.84033203125}, {"start": 485.56, "end": 485.96, "word": " data", "probability": 0.93505859375}, {"start": 485.96, "end": 486.26, "word": " or", "probability": 0.68310546875}, {"start": 486.26, "end": 486.42, "word": " for", "probability": 0.67041015625}, {"start": 486.42, "end": 486.74, "word": " example,", "probability": 0.97412109375}, {"start": 486.84, "end": 487.14, "word": " examining", "probability": 0.94287109375}, {"start": 487.14, "end": 487.42, "word": " data", "probability": 0.931640625}, {"start": 487.42, "end": 487.68, "word": " from", "probability": 0.89794921875}, {"start": 487.68, "end": 488.02, "word": " print", "probability": 0.75390625}, {"start": 488.02, "end": 488.5, "word": " journals", "probability": 0.931640625}, {"start": 488.5, "end": 489.18, "word": " or", "probability": 0.8583984375}, {"start": 489.18, "end": 489.46, "word": " data", "probability": 0.935546875}, {"start": 489.46, "end": 489.86, "word": " published", "probability": 0.7529296875}, {"start": 489.86, "end": 490.06, "word": " on", "probability": 0.94775390625}, {"start": 490.06, "end": 490.16, "word": " the", "probability": 0.91748046875}, {"start": 490.16, "end": 490.46, "word": " internet.", "probability": 0.50927734375}, {"start": 491.34, "end": 491.44, "word": " So", "probability": 0.8701171875}, {"start": 491.44, "end": 491.8, "word": " maybe", "probability": 0.8359375}, {"start": 491.8, "end": 492.06, "word": " he", "probability": 0.93408203125}, {"start": 492.06, "end": 493.14, "word": " goes", "probability": 0.900390625}, {"start": 493.14, "end": 493.68, "word": " to", "probability": 0.93115234375}, {"start": 493.68, "end": 494.08, "word": " the", "probability": 0.9189453125}, {"start": 494.08, "end": 494.48, "word": " Ministry", "probability": 0.78955078125}, {"start": 494.48, "end": 494.78, "word": " of", "probability": 0.97119140625}, {"start": 494.78, "end": 495.24, "word": " Education", "probability": 0.8505859375}, {"start": 495.24, "end": 495.9, "word": " and", "probability": 0.8134765625}, {"start": 495.9, "end": 496.06, "word": " he", "probability": 0.96630859375}, {"start": 496.06, "end": 496.3, "word": " can", "probability": 0.94384765625}, {"start": 496.3, "end": 496.66, "word": " get", "probability": 0.94482421875}, {"start": 496.66, "end": 497.0, "word": " some", "probability": 0.91015625}, {"start": 497.0, "end": 497.32, "word": " data.", "probability": 0.919921875}, {"start": 498.06, "end": 498.16, "word": " So", "probability": 0.9501953125}, {"start": 498.16, "end": 498.32, "word": " the", "probability": 0.91455078125}, {"start": 498.32, "end": 498.58, "word": " data", "probability": 0.92626953125}, {"start": 498.58, "end": 498.82, "word": " is", "probability": 0.9462890625}, {"start": 498.82, "end": 499.12, "word": " already", "probability": 0.92919921875}, {"start": 499.12, "end": 499.5, "word": " there", "probability": 0.9130859375}, {"start": 499.5, "end": 500.14, "word": " and", "probability": 0.654296875}, {"start": 500.14, "end": 500.38, "word": " he", "probability": 0.962890625}, {"start": 500.38, "end": 500.72, "word": " just", "probability": 0.9111328125}, {"start": 500.72, "end": 501.06, "word": " used", "probability": 0.56103515625}, {"start": 501.06, "end": 501.2, "word": " the", "probability": 0.9140625}, {"start": 501.2, "end": 501.44, "word": " data", "probability": 0.93017578125}, {"start": 501.44, "end": 501.78, "word": " to", "probability": 0.9697265625}, {"start": 501.78, "end": 502.22, "word": " do", "probability": 0.95849609375}, {"start": 502.22, "end": 502.52, "word": " some", "probability": 0.91552734375}, {"start": 502.52, "end": 502.94, "word": " analysis.", "probability": 0.8583984375}, {"start": 503.42, "end": 503.62, "word": " So", "probability": 0.9619140625}, {"start": 503.62, "end": 503.86, "word": " that's", "probability": 0.8935546875}, {"start": 503.86, "end": 503.98, "word": " the", "probability": 0.90087890625}, {"start": 503.98, "end": 504.38, "word": " difference", "probability": 0.85888671875}, {"start": 504.38, "end": 504.9, "word": " between", "probability": 0.87646484375}, {"start": 504.9, "end": 505.54, "word": " a", "probability": 0.7392578125}, {"start": 505.54, "end": 505.9, "word": " primary", "probability": 0.91064453125}, {"start": 505.9, "end": 506.5, "word": " and", "probability": 0.94189453125}, {"start": 506.5, "end": 506.94, "word": " secondary", "probability": 0.94970703125}, {"start": 506.94, "end": 507.38, "word": " sources.", "probability": 0.794921875}, {"start": 507.78, "end": 507.9, "word": " So", "probability": 0.94677734375}, {"start": 507.9, "end": 508.3, "word": " primary,", "probability": 0.8203125}, {"start": 509.3, "end": 509.42, "word": " the", "probability": 0.916015625}, {"start": 509.42, "end": 509.86, "word": " researcher", "probability": 0.94677734375}, {"start": 509.86, "end": 510.62, "word": " himself,", "probability": 0.89013671875}], "temperature": 1.0}, {"id": 19, "seek": 53930, "start": 511.99, "end": 539.31, "text": " should collect the data by using one of the tools, either survey, questionnaire, experiment, and so on. But secondary, you can use the data that is published in the internet, for example, in the books, in governments and NGOs and so on. So these are the two sources of data. Sources of data fall into four categories. Number one, data distributed by an organization or an individual.", "tokens": [820, 2500, 264, 1412, 538, 1228, 472, 295, 264, 3873, 11, 2139, 8984, 11, 44702, 11, 5120, 11, 293, 370, 322, 13, 583, 11396, 11, 291, 393, 764, 264, 1412, 300, 307, 6572, 294, 264, 4705, 11, 337, 1365, 11, 294, 264, 3642, 11, 294, 11280, 293, 46454, 293, 370, 322, 13, 407, 613, 366, 264, 732, 7139, 295, 1412, 13, 318, 2749, 295, 1412, 2100, 666, 1451, 10479, 13, 5118, 472, 11, 1412, 12631, 538, 364, 4475, 420, 364, 2609, 13], "avg_logprob": -0.13187123063098954, "compression_ratio": 1.6340425531914893, "no_speech_prob": 0.0, "words": [{"start": 511.99, "end": 512.53, "word": " should", "probability": 0.943359375}, {"start": 512.53, "end": 512.97, "word": " collect", "probability": 0.8876953125}, {"start": 512.97, "end": 513.17, "word": " the", "probability": 0.8828125}, {"start": 513.17, "end": 513.45, "word": " data", "probability": 0.9365234375}, {"start": 513.45, "end": 513.65, "word": " by", "probability": 0.9638671875}, {"start": 513.65, "end": 513.97, "word": " using", "probability": 0.9287109375}, {"start": 513.97, "end": 514.23, "word": " one", "probability": 0.92822265625}, {"start": 514.23, "end": 514.37, "word": " of", "probability": 0.96728515625}, {"start": 514.37, "end": 514.49, "word": " the", "probability": 0.92333984375}, {"start": 514.49, "end": 514.79, "word": " tools,", "probability": 0.8876953125}, {"start": 514.91, "end": 515.11, "word": " either", "probability": 0.923828125}, {"start": 515.11, "end": 515.59, "word": " survey,", "probability": 0.85107421875}, {"start": 516.11, "end": 516.63, "word": " questionnaire,", "probability": 0.9521484375}, {"start": 517.03, "end": 517.61, "word": " experiment,", "probability": 0.94873046875}, {"start": 517.79, "end": 517.95, "word": " and", "probability": 0.93994140625}, {"start": 517.95, "end": 518.11, "word": " so", "probability": 0.9560546875}, {"start": 518.11, "end": 518.33, "word": " on.", "probability": 0.94580078125}, {"start": 518.65, "end": 519.05, "word": " But", "probability": 0.92578125}, {"start": 519.05, "end": 519.49, "word": " secondary,", "probability": 0.42333984375}, {"start": 519.67, "end": 519.77, "word": " you", "probability": 0.962890625}, {"start": 519.77, "end": 519.97, "word": " can", "probability": 0.94580078125}, {"start": 519.97, "end": 520.19, "word": " use", "probability": 0.87890625}, {"start": 520.19, "end": 520.33, "word": " the", "probability": 0.91845703125}, {"start": 520.33, "end": 520.63, "word": " data", "probability": 0.9306640625}, {"start": 520.63, "end": 520.87, "word": " that", "probability": 0.93505859375}, {"start": 520.87, "end": 521.01, "word": " is", "probability": 0.60986328125}, {"start": 521.01, "end": 521.45, "word": " published", "probability": 0.76513671875}, {"start": 521.45, "end": 521.95, "word": " in", "probability": 0.884765625}, {"start": 521.95, "end": 522.05, "word": " the", "probability": 0.908203125}, {"start": 522.05, "end": 522.39, "word": " internet,", "probability": 0.66259765625}, {"start": 522.51, "end": 522.59, "word": " for", "probability": 0.94873046875}, {"start": 522.59, "end": 522.93, "word": " example,", "probability": 0.9716796875}, {"start": 523.03, "end": 523.13, "word": " in", "probability": 0.9462890625}, {"start": 523.13, "end": 523.23, "word": " the", "probability": 0.91015625}, {"start": 523.23, "end": 523.47, "word": " books,", "probability": 0.73046875}, {"start": 524.17, "end": 524.51, "word": " in", "probability": 0.9140625}, {"start": 524.51, "end": 525.19, "word": " governments", "probability": 0.83447265625}, {"start": 525.19, "end": 525.49, "word": " and", "probability": 0.6728515625}, {"start": 525.49, "end": 525.95, "word": " NGOs", "probability": 0.9326171875}, {"start": 525.95, "end": 526.29, "word": " and", "probability": 0.52783203125}, {"start": 526.29, "end": 526.45, "word": " so", "probability": 0.955078125}, {"start": 526.45, "end": 526.63, "word": " on.", "probability": 0.947265625}, {"start": 527.09, "end": 527.35, "word": " So", "probability": 0.921875}, {"start": 527.35, "end": 527.83, "word": " these", "probability": 0.83056640625}, {"start": 527.83, "end": 528.09, "word": " are", "probability": 0.94091796875}, {"start": 528.09, "end": 528.25, "word": " the", "probability": 0.9228515625}, {"start": 528.25, "end": 528.43, "word": " two", "probability": 0.9384765625}, {"start": 528.43, "end": 528.87, "word": " sources", "probability": 0.845703125}, {"start": 528.87, "end": 529.41, "word": " of", "probability": 0.9697265625}, {"start": 529.41, "end": 529.67, "word": " data.", "probability": 0.92919921875}, {"start": 531.99, "end": 532.55, "word": " Sources", "probability": 0.9267578125}, {"start": 532.55, "end": 532.73, "word": " of", "probability": 0.96728515625}, {"start": 532.73, "end": 532.99, "word": " data", "probability": 0.93408203125}, {"start": 532.99, "end": 533.31, "word": " fall", "probability": 0.8818359375}, {"start": 533.31, "end": 533.59, "word": " into", "probability": 0.84716796875}, {"start": 533.59, "end": 533.85, "word": " four", "probability": 0.9267578125}, {"start": 533.85, "end": 534.37, "word": " categories.", "probability": 0.9404296875}, {"start": 535.29, "end": 535.67, "word": " Number", "probability": 0.84423828125}, {"start": 535.67, "end": 535.91, "word": " one,", "probability": 0.8359375}, {"start": 536.51, "end": 536.73, "word": " data", "probability": 0.9033203125}, {"start": 536.73, "end": 537.33, "word": " distributed", "probability": 0.9052734375}, {"start": 537.33, "end": 537.61, "word": " by", "probability": 0.97265625}, {"start": 537.61, "end": 537.85, "word": " an", "probability": 0.9658203125}, {"start": 537.85, "end": 538.35, "word": " organization", "probability": 0.87939453125}, {"start": 538.35, "end": 538.71, "word": " or", "probability": 0.95556640625}, {"start": 538.71, "end": 538.89, "word": " an", "probability": 0.89599609375}, {"start": 538.89, "end": 539.31, "word": " individual.", "probability": 0.9052734375}], "temperature": 1.0}, {"id": 20, "seek": 56880, "start": 540.17, "end": 568.81, "text": " So that's secondary source. A design experiment is primary because you have to design the experiment, a survey. It's also primary. An observational study is also a primary source. So you have to distinguish between a primary and secondary sources. Any question? Comments? Next.", "tokens": [407, 300, 311, 11396, 4009, 13, 316, 1715, 5120, 307, 6194, 570, 291, 362, 281, 1715, 264, 5120, 11, 257, 8984, 13, 467, 311, 611, 6194, 13, 1107, 9951, 1478, 2979, 307, 611, 257, 6194, 4009, 13, 407, 291, 362, 281, 20206, 1296, 257, 6194, 293, 11396, 7139, 13, 2639, 1168, 30, 2432, 1117, 30, 3087, 13], "avg_logprob": -0.1807650849223137, "compression_ratio": 1.6646706586826348, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 540.17, "end": 540.55, "word": " So", "probability": 0.88330078125}, {"start": 540.55, "end": 541.19, "word": " that's", "probability": 0.9462890625}, {"start": 541.19, "end": 541.79, "word": " secondary", "probability": 0.92333984375}, {"start": 541.79, "end": 543.17, "word": " source.", "probability": 0.81640625}, {"start": 544.07, "end": 544.49, "word": " A", "probability": 0.8046875}, {"start": 544.49, "end": 544.89, "word": " design", "probability": 0.4140625}, {"start": 544.89, "end": 545.47, "word": " experiment", "probability": 0.60791015625}, {"start": 545.47, "end": 545.77, "word": " is", "probability": 0.53369140625}, {"start": 545.77, "end": 546.17, "word": " primary", "probability": 0.8134765625}, {"start": 546.17, "end": 546.95, "word": " because", "probability": 0.65087890625}, {"start": 546.95, "end": 547.23, "word": " you", "probability": 0.9638671875}, {"start": 547.23, "end": 547.83, "word": " have", "probability": 0.953125}, {"start": 547.83, "end": 548.11, "word": " to", "probability": 0.97314453125}, {"start": 548.11, "end": 549.09, "word": " design", "probability": 0.97802734375}, {"start": 549.09, "end": 549.61, "word": " the", "probability": 0.8974609375}, {"start": 549.61, "end": 550.11, "word": " experiment,", "probability": 0.9541015625}, {"start": 550.21, "end": 550.35, "word": " a", "probability": 0.9677734375}, {"start": 550.35, "end": 550.63, "word": " survey.", "probability": 0.89794921875}, {"start": 551.13, "end": 551.59, "word": " It's", "probability": 0.982666015625}, {"start": 551.59, "end": 551.85, "word": " also", "probability": 0.8818359375}, {"start": 551.85, "end": 552.25, "word": " primary.", "probability": 0.9248046875}, {"start": 552.81, "end": 552.95, "word": " An", "probability": 0.50146484375}, {"start": 552.95, "end": 554.23, "word": " observational", "probability": 0.8408203125}, {"start": 554.23, "end": 554.61, "word": " study", "probability": 0.93359375}, {"start": 554.61, "end": 554.79, "word": " is", "probability": 0.94775390625}, {"start": 554.79, "end": 555.23, "word": " also", "probability": 0.87890625}, {"start": 555.23, "end": 556.29, "word": " a", "probability": 0.99072265625}, {"start": 556.29, "end": 556.61, "word": " primary", "probability": 0.93115234375}, {"start": 556.61, "end": 556.95, "word": " source.", "probability": 0.7958984375}, {"start": 557.05, "end": 557.17, "word": " So", "probability": 0.9560546875}, {"start": 557.17, "end": 557.33, "word": " you", "probability": 0.84912109375}, {"start": 557.33, "end": 557.47, "word": " have", "probability": 0.94775390625}, {"start": 557.47, "end": 557.59, "word": " to", "probability": 0.97216796875}, {"start": 557.59, "end": 557.99, "word": " distinguish", "probability": 0.8779296875}, {"start": 557.99, "end": 558.67, "word": " between", "probability": 0.8759765625}, {"start": 558.67, "end": 559.67, "word": " a", "probability": 0.65478515625}, {"start": 559.67, "end": 560.01, "word": " primary", "probability": 0.9306640625}, {"start": 560.01, "end": 560.73, "word": " and", "probability": 0.9443359375}, {"start": 560.73, "end": 561.41, "word": " secondary", "probability": 0.92041015625}, {"start": 561.41, "end": 562.27, "word": " sources.", "probability": 0.740234375}, {"start": 563.11, "end": 563.49, "word": " Any", "probability": 0.892578125}, {"start": 563.49, "end": 563.89, "word": " question?", "probability": 0.71533203125}, {"start": 565.27, "end": 565.83, "word": " Comments?", "probability": 0.92236328125}, {"start": 568.11, "end": 568.81, "word": " Next.", "probability": 0.8896484375}], "temperature": 1.0}, {"id": 21, "seek": 59614, "start": 571.46, "end": 596.14, "text": " We'll talk a little bit about types of variables. In general, there are two types of variables. One is called categorical variables or qualitative variables, and the other one is called numerical or quantitative variables. Now, for example, if I ask you, what's your favorite color? You may say white, black, red, and so on.", "tokens": [492, 603, 751, 257, 707, 857, 466, 3467, 295, 9102, 13, 682, 2674, 11, 456, 366, 732, 3467, 295, 9102, 13, 1485, 307, 1219, 19250, 804, 9102, 420, 31312, 9102, 11, 293, 264, 661, 472, 307, 1219, 29054, 420, 27778, 9102, 13, 823, 11, 337, 1365, 11, 498, 286, 1029, 291, 11, 437, 311, 428, 2954, 2017, 30, 509, 815, 584, 2418, 11, 2211, 11, 2182, 11, 293, 370, 322, 13], "avg_logprob": -0.11414930400335127, "compression_ratio": 1.6839378238341969, "no_speech_prob": 0.0, "words": [{"start": 571.46, "end": 571.94, "word": " We'll", "probability": 0.80029296875}, {"start": 571.94, "end": 572.12, "word": " talk", "probability": 0.9033203125}, {"start": 572.12, "end": 572.26, "word": " a", "probability": 0.39013671875}, {"start": 572.26, "end": 572.7, "word": " little", "probability": 0.689453125}, {"start": 572.7, "end": 572.96, "word": " bit", "probability": 0.94580078125}, {"start": 572.96, "end": 573.22, "word": " about", "probability": 0.91162109375}, {"start": 573.22, "end": 573.88, "word": " types", "probability": 0.728515625}, {"start": 573.88, "end": 574.14, "word": " of", "probability": 0.9716796875}, {"start": 574.14, "end": 574.54, "word": " variables.", "probability": 0.9306640625}, {"start": 575.36, "end": 575.7, "word": " In", "probability": 0.91455078125}, {"start": 575.7, "end": 575.94, "word": " general,", "probability": 0.904296875}, {"start": 576.08, "end": 576.22, "word": " there", "probability": 0.908203125}, {"start": 576.22, "end": 576.36, "word": " are", "probability": 0.94140625}, {"start": 576.36, "end": 576.52, "word": " two", "probability": 0.94384765625}, {"start": 576.52, "end": 576.82, "word": " types", "probability": 0.8349609375}, {"start": 576.82, "end": 576.98, "word": " of", "probability": 0.97119140625}, {"start": 576.98, "end": 577.3, "word": " variables.", "probability": 0.93798828125}, {"start": 577.46, "end": 577.58, "word": " One", "probability": 0.9287109375}, {"start": 577.58, "end": 577.76, "word": " is", "probability": 0.95068359375}, {"start": 577.76, "end": 578.2, "word": " called", "probability": 0.89990234375}, {"start": 578.2, "end": 579.42, "word": " categorical", "probability": 0.94677734375}, {"start": 579.42, "end": 580.06, "word": " variables", "probability": 0.9296875}, {"start": 580.06, "end": 580.3, "word": " or", "probability": 0.87353515625}, {"start": 580.3, "end": 580.76, "word": " qualitative", "probability": 0.9296875}, {"start": 580.76, "end": 581.42, "word": " variables,", "probability": 0.92138671875}, {"start": 581.72, "end": 581.98, "word": " and", "probability": 0.9375}, {"start": 581.98, "end": 582.1, "word": " the", "probability": 0.7587890625}, {"start": 582.1, "end": 582.3, "word": " other", "probability": 0.89306640625}, {"start": 582.3, "end": 582.46, "word": " one", "probability": 0.91943359375}, {"start": 582.46, "end": 582.6, "word": " is", "probability": 0.94140625}, {"start": 582.6, "end": 582.78, "word": " called", "probability": 0.86376953125}, {"start": 582.78, "end": 583.16, "word": " numerical", "probability": 0.89013671875}, {"start": 583.16, "end": 583.76, "word": " or", "probability": 0.951171875}, {"start": 583.76, "end": 584.26, "word": " quantitative", "probability": 0.94580078125}, {"start": 584.26, "end": 584.82, "word": " variables.", "probability": 0.91552734375}, {"start": 585.58, "end": 585.78, "word": " Now,", "probability": 0.9287109375}, {"start": 585.82, "end": 585.96, "word": " for", "probability": 0.9521484375}, {"start": 585.96, "end": 586.22, "word": " example,", "probability": 0.9765625}, {"start": 586.28, "end": 586.4, "word": " if", "probability": 0.95361328125}, {"start": 586.4, "end": 586.52, "word": " I", "probability": 0.99951171875}, {"start": 586.52, "end": 586.76, "word": " ask", "probability": 0.92529296875}, {"start": 586.76, "end": 587.02, "word": " you,", "probability": 0.96435546875}, {"start": 587.34, "end": 590.56, "word": " what's", "probability": 0.88330078125}, {"start": 590.56, "end": 590.78, "word": " your", "probability": 0.88623046875}, {"start": 590.78, "end": 591.2, "word": " favorite", "probability": 0.9140625}, {"start": 591.2, "end": 591.66, "word": " color?", "probability": 0.87744140625}, {"start": 592.76, "end": 592.94, "word": " You", "probability": 0.9267578125}, {"start": 592.94, "end": 593.04, "word": " may", "probability": 0.927734375}, {"start": 593.04, "end": 593.28, "word": " say", "probability": 0.83837890625}, {"start": 593.28, "end": 594.64, "word": " white,", "probability": 0.65234375}, {"start": 594.78, "end": 595.1, "word": " black,", "probability": 0.8955078125}, {"start": 595.22, "end": 595.5, "word": " red,", "probability": 0.923828125}, {"start": 595.58, "end": 595.74, "word": " and", "probability": 0.94140625}, {"start": 595.74, "end": 595.92, "word": " so", "probability": 0.95751953125}, {"start": 595.92, "end": 596.14, "word": " on.", "probability": 0.94921875}], "temperature": 1.0}, {"id": 22, "seek": 62492, "start": 597.19, "end": 624.93, "text": " What's your marital status? Maybe married or unmarried, and so on. Gender, male, either male or female, and so on. So this type of variable is called qualitative variables. So qualitative variables have values that can only be placed into categories, such as, for example, yes or no. For example, do you like orange? The answer is either yes or no.", "tokens": [708, 311, 428, 1849, 1686, 6558, 30, 2704, 5259, 420, 517, 6209, 2428, 11, 293, 370, 322, 13, 48039, 11, 7133, 11, 2139, 7133, 420, 6556, 11, 293, 370, 322, 13, 407, 341, 2010, 295, 7006, 307, 1219, 31312, 9102, 13, 407, 31312, 9102, 362, 4190, 300, 393, 787, 312, 7074, 666, 10479, 11, 1270, 382, 11, 337, 1365, 11, 2086, 420, 572, 13, 1171, 1365, 11, 360, 291, 411, 7671, 30, 440, 1867, 307, 2139, 2086, 420, 572, 13], "avg_logprob": -0.14814815403502665, "compression_ratio": 1.6859903381642511, "no_speech_prob": 0.0, "words": [{"start": 597.19, "end": 597.71, "word": " What's", "probability": 0.7314453125}, {"start": 597.71, "end": 597.91, "word": " your", "probability": 0.89306640625}, {"start": 597.91, "end": 598.23, "word": " marital", "probability": 0.970947265625}, {"start": 598.23, "end": 598.87, "word": " status?", "probability": 0.939453125}, {"start": 599.17, "end": 599.39, "word": " Maybe", "probability": 0.75}, {"start": 599.39, "end": 599.93, "word": " married", "probability": 0.8896484375}, {"start": 599.93, "end": 600.17, "word": " or", "probability": 0.9638671875}, {"start": 600.17, "end": 600.61, "word": " unmarried,", "probability": 0.9088541666666666}, {"start": 600.73, "end": 600.79, "word": " and", "probability": 0.92724609375}, {"start": 600.79, "end": 601.01, "word": " so", "probability": 0.95458984375}, {"start": 601.01, "end": 601.23, "word": " on.", "probability": 0.94677734375}, {"start": 601.79, "end": 602.11, "word": " Gender,", "probability": 0.87548828125}, {"start": 602.41, "end": 602.67, "word": " male,", "probability": 0.6513671875}, {"start": 602.85, "end": 603.07, "word": " either", "probability": 0.92333984375}, {"start": 603.07, "end": 603.39, "word": " male", "probability": 0.8876953125}, {"start": 603.39, "end": 603.63, "word": " or", "probability": 0.935546875}, {"start": 603.63, "end": 603.95, "word": " female,", "probability": 0.82177734375}, {"start": 604.01, "end": 604.19, "word": " and", "probability": 0.9384765625}, {"start": 604.19, "end": 604.33, "word": " so", "probability": 0.955078125}, {"start": 604.33, "end": 604.45, "word": " on.", "probability": 0.94580078125}, {"start": 605.25, "end": 605.59, "word": " So", "probability": 0.9091796875}, {"start": 605.59, "end": 606.53, "word": " this", "probability": 0.82421875}, {"start": 606.53, "end": 606.87, "word": " type", "probability": 0.9765625}, {"start": 606.87, "end": 607.05, "word": " of", "probability": 0.9697265625}, {"start": 607.05, "end": 607.33, "word": " variable", "probability": 0.454345703125}, {"start": 607.33, "end": 607.59, "word": " is", "probability": 0.90185546875}, {"start": 607.59, "end": 607.89, "word": " called", "probability": 0.89892578125}, {"start": 607.89, "end": 608.83, "word": " qualitative", "probability": 0.9091796875}, {"start": 608.83, "end": 609.39, "word": " variables.", "probability": 0.9033203125}, {"start": 609.97, "end": 610.09, "word": " So", "probability": 0.951171875}, {"start": 610.09, "end": 610.49, "word": " qualitative", "probability": 0.96142578125}, {"start": 610.49, "end": 611.01, "word": " variables", "probability": 0.93212890625}, {"start": 611.01, "end": 611.47, "word": " have", "probability": 0.9453125}, {"start": 611.47, "end": 612.21, "word": " values", "probability": 0.974609375}, {"start": 612.21, "end": 612.49, "word": " that", "probability": 0.9423828125}, {"start": 612.49, "end": 612.73, "word": " can", "probability": 0.9482421875}, {"start": 612.73, "end": 613.01, "word": " only", "probability": 0.91943359375}, {"start": 613.01, "end": 613.13, "word": " be", "probability": 0.38427734375}, {"start": 613.13, "end": 613.33, "word": " placed", "probability": 0.92041015625}, {"start": 613.33, "end": 613.59, "word": " into", "probability": 0.8544921875}, {"start": 613.59, "end": 614.15, "word": " categories,", "probability": 0.93701171875}, {"start": 614.69, "end": 615.43, "word": " such", "probability": 0.94384765625}, {"start": 615.43, "end": 615.65, "word": " as,", "probability": 0.96630859375}, {"start": 615.73, "end": 615.79, "word": " for", "probability": 0.947265625}, {"start": 615.79, "end": 616.07, "word": " example,", "probability": 0.9736328125}, {"start": 616.17, "end": 616.37, "word": " yes", "probability": 0.9296875}, {"start": 616.37, "end": 616.57, "word": " or", "probability": 0.96875}, {"start": 616.57, "end": 616.77, "word": " no.", "probability": 0.96484375}, {"start": 617.25, "end": 617.45, "word": " For", "probability": 0.96240234375}, {"start": 617.45, "end": 617.73, "word": " example,", "probability": 0.974609375}, {"start": 617.99, "end": 618.17, "word": " do", "probability": 0.83740234375}, {"start": 618.17, "end": 618.27, "word": " you", "probability": 0.96826171875}, {"start": 618.27, "end": 618.67, "word": " like", "probability": 0.94677734375}, {"start": 618.67, "end": 621.35, "word": " orange?", "probability": 0.85009765625}, {"start": 622.27, "end": 622.51, "word": " The", "probability": 0.78955078125}, {"start": 622.51, "end": 622.91, "word": " answer", "probability": 0.95947265625}, {"start": 622.91, "end": 623.17, "word": " is", "probability": 0.9287109375}, {"start": 623.17, "end": 623.49, "word": " either", "probability": 0.93896484375}, {"start": 623.49, "end": 623.95, "word": " yes", "probability": 0.95263671875}, {"start": 623.95, "end": 624.65, "word": " or", "probability": 0.96533203125}, {"start": 624.65, "end": 624.93, "word": " no.", "probability": 0.96337890625}], "temperature": 1.0}, {"id": 23, "seek": 65038, "start": 625.5, "end": 650.38, "text": " Do you like candidate A, for example, whatever his party is? Do you like it? Either yes or no, and so on. As I mentioned before, gender, marital status, race, religions, these are examples of qualitative or categorical variables. The other type of variable which is commonly used is called numerical or quantitative data.", "tokens": [1144, 291, 411, 11532, 316, 11, 337, 1365, 11, 2035, 702, 3595, 307, 30, 1144, 291, 411, 309, 30, 13746, 2086, 420, 572, 11, 293, 370, 322, 13, 1018, 286, 2835, 949, 11, 7898, 11, 1849, 1686, 6558, 11, 4569, 11, 21212, 11, 613, 366, 5110, 295, 31312, 420, 19250, 804, 9102, 13, 440, 661, 2010, 295, 7006, 597, 307, 12719, 1143, 307, 1219, 29054, 420, 27778, 1412, 13], "avg_logprob": -0.1277901719723429, "compression_ratio": 1.5260663507109005, "no_speech_prob": 0.0, "words": [{"start": 625.5, "end": 625.88, "word": " Do", "probability": 0.68408203125}, {"start": 625.88, "end": 625.98, "word": " you", "probability": 0.966796875}, {"start": 625.98, "end": 626.2, "word": " like", "probability": 0.94970703125}, {"start": 626.2, "end": 626.68, "word": " candidate", "probability": 0.8134765625}, {"start": 626.68, "end": 627.08, "word": " A,", "probability": 0.83349609375}, {"start": 627.48, "end": 627.62, "word": " for", "probability": 0.9462890625}, {"start": 627.62, "end": 627.92, "word": " example,", "probability": 0.9755859375}, {"start": 628.02, "end": 628.72, "word": " whatever", "probability": 0.444091796875}, {"start": 628.72, "end": 629.34, "word": " his", "probability": 0.95703125}, {"start": 629.34, "end": 629.66, "word": " party", "probability": 0.875}, {"start": 629.66, "end": 630.04, "word": " is?", "probability": 0.95068359375}, {"start": 630.26, "end": 630.54, "word": " Do", "probability": 0.95361328125}, {"start": 630.54, "end": 630.6, "word": " you", "probability": 0.9599609375}, {"start": 630.6, "end": 630.82, "word": " like", "probability": 0.9453125}, {"start": 630.82, "end": 631.04, "word": " it?", "probability": 0.94677734375}, {"start": 631.12, "end": 631.36, "word": " Either", "probability": 0.89453125}, {"start": 631.36, "end": 631.74, "word": " yes", "probability": 0.927734375}, {"start": 631.74, "end": 632.12, "word": " or", "probability": 0.9580078125}, {"start": 632.12, "end": 632.42, "word": " no,", "probability": 0.96630859375}, {"start": 633.0, "end": 633.26, "word": " and", "probability": 0.939453125}, {"start": 633.26, "end": 633.46, "word": " so", "probability": 0.95751953125}, {"start": 633.46, "end": 633.64, "word": " on.", "probability": 0.94921875}, {"start": 633.88, "end": 634.26, "word": " As", "probability": 0.5576171875}, {"start": 634.26, "end": 634.62, "word": " I", "probability": 0.99658203125}, {"start": 634.62, "end": 634.84, "word": " mentioned", "probability": 0.83203125}, {"start": 634.84, "end": 635.18, "word": " before,", "probability": 0.861328125}, {"start": 635.32, "end": 635.72, "word": " gender,", "probability": 0.81005859375}, {"start": 636.24, "end": 636.5, "word": " marital", "probability": 0.97119140625}, {"start": 636.5, "end": 636.92, "word": " status,", "probability": 0.93701171875}, {"start": 637.24, "end": 637.48, "word": " race,", "probability": 0.94677734375}, {"start": 637.64, "end": 638.06, "word": " religions,", "probability": 0.89794921875}, {"start": 638.4, "end": 638.72, "word": " these", "probability": 0.8564453125}, {"start": 638.72, "end": 638.9, "word": " are", "probability": 0.93896484375}, {"start": 638.9, "end": 639.32, "word": " examples", "probability": 0.8681640625}, {"start": 639.32, "end": 639.82, "word": " of", "probability": 0.974609375}, {"start": 639.82, "end": 640.52, "word": " qualitative", "probability": 0.896484375}, {"start": 640.52, "end": 641.82, "word": " or", "probability": 0.97216796875}, {"start": 641.82, "end": 642.8, "word": " categorical", "probability": 0.952392578125}, {"start": 642.8, "end": 643.64, "word": " variables.", "probability": 0.94091796875}, {"start": 644.66, "end": 645.24, "word": " The", "probability": 0.8955078125}, {"start": 645.24, "end": 645.46, "word": " other", "probability": 0.89697265625}, {"start": 645.46, "end": 645.74, "word": " type", "probability": 0.9716796875}, {"start": 645.74, "end": 645.88, "word": " of", "probability": 0.96826171875}, {"start": 645.88, "end": 646.24, "word": " variable", "probability": 0.90869140625}, {"start": 646.24, "end": 646.52, "word": " which", "probability": 0.8349609375}, {"start": 646.52, "end": 646.74, "word": " is", "probability": 0.94775390625}, {"start": 646.74, "end": 647.2, "word": " commonly", "probability": 0.86328125}, {"start": 647.2, "end": 647.6, "word": " used", "probability": 0.91455078125}, {"start": 647.6, "end": 647.84, "word": " is", "probability": 0.8466796875}, {"start": 647.84, "end": 648.26, "word": " called", "probability": 0.87548828125}, {"start": 648.26, "end": 649.02, "word": " numerical", "probability": 0.8212890625}, {"start": 649.02, "end": 649.48, "word": " or", "probability": 0.95654296875}, {"start": 649.48, "end": 649.92, "word": " quantitative", "probability": 0.92919921875}, {"start": 649.92, "end": 650.38, "word": " data.", "probability": 0.92822265625}], "temperature": 1.0}, {"id": 24, "seek": 67563, "start": 651.89, "end": 675.63, "text": " Quantitative variables have values that represent quantities. For example, if I ask you, what's your age? My age is 20 years old or 18 years old. What's your weight? Income. Height? Temperature? Income. So it's a number. Weight, maybe my weight is 70 kilograms.", "tokens": [26968, 14275, 9102, 362, 4190, 300, 2906, 22927, 13, 1171, 1365, 11, 498, 286, 1029, 291, 11, 437, 311, 428, 3205, 30, 1222, 3205, 307, 945, 924, 1331, 420, 2443, 924, 1331, 13, 708, 311, 428, 3364, 30, 682, 1102, 13, 634, 397, 30, 34864, 1503, 30, 682, 1102, 13, 407, 309, 311, 257, 1230, 13, 44464, 11, 1310, 452, 3364, 307, 5285, 30690, 13], "avg_logprob": -0.11612216180021112, "compression_ratio": 1.4475138121546962, "no_speech_prob": 0.0, "words": [{"start": 651.89, "end": 652.49, "word": " Quantitative", "probability": 0.969482421875}, {"start": 652.49, "end": 652.93, "word": " variables", "probability": 0.9326171875}, {"start": 652.93, "end": 653.23, "word": " have", "probability": 0.9443359375}, {"start": 653.23, "end": 653.57, "word": " values", "probability": 0.970703125}, {"start": 653.57, "end": 653.87, "word": " that", "probability": 0.939453125}, {"start": 653.87, "end": 654.47, "word": " represent", "probability": 0.83642578125}, {"start": 654.47, "end": 655.13, "word": " quantities.", "probability": 0.94677734375}, {"start": 655.27, "end": 655.37, "word": " For", "probability": 0.96484375}, {"start": 655.37, "end": 655.71, "word": " example,", "probability": 0.97509765625}, {"start": 656.13, "end": 656.35, "word": " if", "probability": 0.95556640625}, {"start": 656.35, "end": 656.49, "word": " I", "probability": 0.9921875}, {"start": 656.49, "end": 656.69, "word": " ask", "probability": 0.923828125}, {"start": 656.69, "end": 656.81, "word": " you,", "probability": 0.95849609375}, {"start": 656.91, "end": 657.05, "word": " what's", "probability": 0.7281494140625}, {"start": 657.05, "end": 657.19, "word": " your", "probability": 0.89794921875}, {"start": 657.19, "end": 657.55, "word": " age?", "probability": 0.951171875}, {"start": 658.61, "end": 659.21, "word": " My", "probability": 0.96240234375}, {"start": 659.21, "end": 659.43, "word": " age", "probability": 0.95947265625}, {"start": 659.43, "end": 659.59, "word": " is", "probability": 0.94775390625}, {"start": 659.59, "end": 659.89, "word": " 20", "probability": 0.6708984375}, {"start": 659.89, "end": 660.19, "word": " years", "probability": 0.9228515625}, {"start": 660.19, "end": 660.47, "word": " old", "probability": 0.91748046875}, {"start": 660.47, "end": 660.65, "word": " or", "probability": 0.728515625}, {"start": 660.65, "end": 660.89, "word": " 18", "probability": 0.90380859375}, {"start": 660.89, "end": 662.17, "word": " years", "probability": 0.86669921875}, {"start": 662.17, "end": 662.45, "word": " old.", "probability": 0.90234375}, {"start": 662.57, "end": 662.95, "word": " What's", "probability": 0.94091796875}, {"start": 662.95, "end": 663.11, "word": " your", "probability": 0.89697265625}, {"start": 663.11, "end": 663.53, "word": " weight?", "probability": 0.91455078125}, {"start": 664.27, "end": 664.77, "word": " Income.", "probability": 0.966552734375}, {"start": 667.51, "end": 668.11, "word": " Height?", "probability": 0.936279296875}, {"start": 668.53, "end": 669.13, "word": " Temperature?", "probability": 0.97705078125}, {"start": 670.29, "end": 670.89, "word": " Income.", "probability": 0.961181640625}, {"start": 671.01, "end": 671.15, "word": " So", "probability": 0.9296875}, {"start": 671.15, "end": 671.43, "word": " it's", "probability": 0.91259765625}, {"start": 671.43, "end": 672.21, "word": " a", "probability": 0.6767578125}, {"start": 672.21, "end": 672.55, "word": " number.", "probability": 0.93408203125}, {"start": 673.61, "end": 673.87, "word": " Weight,", "probability": 0.8994140625}, {"start": 673.95, "end": 674.07, "word": " maybe", "probability": 0.9423828125}, {"start": 674.07, "end": 674.29, "word": " my", "probability": 0.9560546875}, {"start": 674.29, "end": 674.51, "word": " weight", "probability": 0.93115234375}, {"start": 674.51, "end": 674.75, "word": " is", "probability": 0.9453125}, {"start": 674.75, "end": 675.21, "word": " 70", "probability": 0.96435546875}, {"start": 675.21, "end": 675.63, "word": " kilograms.", "probability": 0.8173828125}], "temperature": 1.0}, {"id": 25, "seek": 70611, "start": 677.41, "end": 706.11, "text": " So weight, age, height, salary, income, number of students, number of phone calls you received on your cell phone during one hour, number of accidents happened in street and so on. So that's the difference between numerical variables and qualitative variables. Anyone of you just give me one example of qualitative and quantitative variables.", "tokens": [407, 3364, 11, 3205, 11, 6681, 11, 15360, 11, 5742, 11, 1230, 295, 1731, 11, 1230, 295, 2593, 5498, 291, 4613, 322, 428, 2815, 2593, 1830, 472, 1773, 11, 1230, 295, 23875, 2011, 294, 4838, 293, 370, 322, 13, 407, 300, 311, 264, 2649, 1296, 29054, 9102, 293, 31312, 9102, 13, 14643, 295, 291, 445, 976, 385, 472, 1365, 295, 31312, 293, 27778, 9102, 13], "avg_logprob": -0.12606533752246338, "compression_ratio": 1.7411167512690355, "no_speech_prob": 0.0, "words": [{"start": 677.41, "end": 678.03, "word": " So", "probability": 0.82958984375}, {"start": 678.03, "end": 678.67, "word": " weight,", "probability": 0.82421875}, {"start": 679.25, "end": 680.25, "word": " age,", "probability": 0.9140625}, {"start": 680.51, "end": 680.91, "word": " height,", "probability": 0.97021484375}, {"start": 681.17, "end": 681.49, "word": " salary,", "probability": 0.958984375}, {"start": 681.77, "end": 681.99, "word": " income,", "probability": 0.93994140625}, {"start": 682.15, "end": 682.33, "word": " number", "probability": 0.94384765625}, {"start": 682.33, "end": 682.45, "word": " of", "probability": 0.96923828125}, {"start": 682.45, "end": 682.79, "word": " students,", "probability": 0.92041015625}, {"start": 683.33, "end": 683.63, "word": " number", "probability": 0.93896484375}, {"start": 683.63, "end": 684.43, "word": " of", "probability": 0.96875}, {"start": 684.43, "end": 684.67, "word": " phone", "probability": 0.947265625}, {"start": 684.67, "end": 684.97, "word": " calls", "probability": 0.88427734375}, {"start": 684.97, "end": 685.13, "word": " you", "probability": 0.96484375}, {"start": 685.13, "end": 685.73, "word": " received", "probability": 0.802734375}, {"start": 685.73, "end": 686.51, "word": " on", "probability": 0.4091796875}, {"start": 686.51, "end": 686.69, "word": " your", "probability": 0.8896484375}, {"start": 686.69, "end": 686.95, "word": " cell", "probability": 0.55126953125}, {"start": 686.95, "end": 687.21, "word": " phone", "probability": 0.939453125}, {"start": 687.21, "end": 688.01, "word": " during", "probability": 0.86328125}, {"start": 688.01, "end": 688.23, "word": " one", "probability": 0.91650390625}, {"start": 688.23, "end": 688.43, "word": " hour,", "probability": 0.9462890625}, {"start": 688.47, "end": 688.63, "word": " number", "probability": 0.93896484375}, {"start": 688.63, "end": 688.77, "word": " of", "probability": 0.97509765625}, {"start": 688.77, "end": 689.17, "word": " accidents", "probability": 0.89990234375}, {"start": 689.17, "end": 689.75, "word": " happened", "probability": 0.78759765625}, {"start": 689.75, "end": 690.43, "word": " in", "probability": 0.869140625}, {"start": 690.43, "end": 691.09, "word": " street", "probability": 0.81298828125}, {"start": 691.09, "end": 691.33, "word": " and", "probability": 0.60498046875}, {"start": 691.33, "end": 691.53, "word": " so", "probability": 0.95703125}, {"start": 691.53, "end": 691.73, "word": " on.", "probability": 0.94482421875}, {"start": 692.49, "end": 692.93, "word": " So", "probability": 0.89013671875}, {"start": 692.93, "end": 693.21, "word": " that's", "probability": 0.96435546875}, {"start": 693.21, "end": 693.31, "word": " the", "probability": 0.92236328125}, {"start": 693.31, "end": 693.63, "word": " difference", "probability": 0.85302734375}, {"start": 693.63, "end": 694.07, "word": " between", "probability": 0.8662109375}, {"start": 694.07, "end": 694.87, "word": " numerical", "probability": 0.86865234375}, {"start": 694.87, "end": 695.97, "word": " variables", "probability": 0.9248046875}, {"start": 695.97, "end": 696.47, "word": " and", "probability": 0.9384765625}, {"start": 696.47, "end": 697.13, "word": " qualitative", "probability": 0.9248046875}, {"start": 697.13, "end": 697.71, "word": " variables.", "probability": 0.927734375}, {"start": 700.27, "end": 700.91, "word": " Anyone", "probability": 0.429443359375}, {"start": 700.91, "end": 701.15, "word": " of", "probability": 0.947265625}, {"start": 701.15, "end": 701.23, "word": " you", "probability": 0.9677734375}, {"start": 701.23, "end": 701.41, "word": " just", "probability": 0.79345703125}, {"start": 701.41, "end": 701.55, "word": " give", "probability": 0.8935546875}, {"start": 701.55, "end": 701.65, "word": " me", "probability": 0.96435546875}, {"start": 701.65, "end": 701.81, "word": " one", "probability": 0.9267578125}, {"start": 701.81, "end": 702.21, "word": " example", "probability": 0.9736328125}, {"start": 702.21, "end": 702.65, "word": " of", "probability": 0.97216796875}, {"start": 702.65, "end": 703.37, "word": " qualitative", "probability": 0.9375}, {"start": 703.37, "end": 705.13, "word": " and", "probability": 0.951171875}, {"start": 705.13, "end": 705.69, "word": " quantitative", "probability": 0.95654296875}, {"start": 705.69, "end": 706.11, "word": " variables.", "probability": 0.58935546875}], "temperature": 1.0}, {"id": 26, "seek": 73506, "start": 707.4, "end": 735.06, "text": " Another examples. Just give me one example for qualitative data. Qualitative or quantitative. Political party, either party A or party B. So suppose there are two parties, so I like party A, she likes party B and so on. So party in this case is qualitative variable, another one.", "tokens": [3996, 5110, 13, 1449, 976, 385, 472, 1365, 337, 31312, 1412, 13, 13616, 14275, 420, 27778, 13, 34265, 3595, 11, 2139, 3595, 316, 420, 3595, 363, 13, 407, 7297, 456, 366, 732, 8265, 11, 370, 286, 411, 3595, 316, 11, 750, 5902, 3595, 363, 293, 370, 322, 13, 407, 3595, 294, 341, 1389, 307, 31312, 7006, 11, 1071, 472, 13], "avg_logprob": -0.17482069672131148, "compression_ratio": 1.6766467065868262, "no_speech_prob": 0.0, "words": [{"start": 707.4, "end": 707.78, "word": " Another", "probability": 0.603515625}, {"start": 707.78, "end": 708.28, "word": " examples.", "probability": 0.74951171875}, {"start": 709.78, "end": 710.34, "word": " Just", "probability": 0.84619140625}, {"start": 710.34, "end": 710.48, "word": " give", "probability": 0.8798828125}, {"start": 710.48, "end": 710.56, "word": " me", "probability": 0.95068359375}, {"start": 710.56, "end": 710.72, "word": " one", "probability": 0.9326171875}, {"start": 710.72, "end": 711.04, "word": " example", "probability": 0.97265625}, {"start": 711.04, "end": 711.26, "word": " for", "probability": 0.94775390625}, {"start": 711.26, "end": 711.7, "word": " qualitative", "probability": 0.81640625}, {"start": 711.7, "end": 712.16, "word": " data.", "probability": 0.93212890625}, {"start": 716.9, "end": 717.46, "word": " Qualitative", "probability": 0.967529296875}, {"start": 717.46, "end": 718.3, "word": " or", "probability": 0.93408203125}, {"start": 718.3, "end": 718.78, "word": " quantitative.", "probability": 0.93359375}, {"start": 721.38, "end": 721.94, "word": " Political", "probability": 0.2337646484375}, {"start": 721.94, "end": 722.34, "word": " party,", "probability": 0.5751953125}, {"start": 722.44, "end": 722.66, "word": " either", "probability": 0.83935546875}, {"start": 722.66, "end": 723.28, "word": " party", "probability": 0.7578125}, {"start": 723.28, "end": 723.52, "word": " A", "probability": 0.93310546875}, {"start": 723.52, "end": 723.72, "word": " or", "probability": 0.9716796875}, {"start": 723.72, "end": 724.04, "word": " party", "probability": 0.87548828125}, {"start": 724.04, "end": 724.26, "word": " B.", "probability": 0.998046875}, {"start": 724.7, "end": 724.88, "word": " So", "probability": 0.642578125}, {"start": 724.88, "end": 725.16, "word": " suppose", "probability": 0.88330078125}, {"start": 725.16, "end": 725.34, "word": " there", "probability": 0.89599609375}, {"start": 725.34, "end": 725.48, "word": " are", "probability": 0.943359375}, {"start": 725.48, "end": 725.7, "word": " two", "probability": 0.93603515625}, {"start": 725.7, "end": 726.06, "word": " parties,", "probability": 0.97607421875}, {"start": 726.26, "end": 726.4, "word": " so", "probability": 0.92041015625}, {"start": 726.4, "end": 727.14, "word": " I", "probability": 0.99609375}, {"start": 727.14, "end": 727.58, "word": " like", "probability": 0.94677734375}, {"start": 727.58, "end": 728.08, "word": " party", "probability": 0.88037109375}, {"start": 728.08, "end": 728.32, "word": " A,", "probability": 0.994140625}, {"start": 728.72, "end": 728.9, "word": " she", "probability": 0.91455078125}, {"start": 728.9, "end": 729.18, "word": " likes", "probability": 0.861328125}, {"start": 729.18, "end": 729.54, "word": " party", "probability": 0.892578125}, {"start": 729.54, "end": 729.68, "word": " B", "probability": 0.99755859375}, {"start": 729.68, "end": 729.84, "word": " and", "probability": 0.4619140625}, {"start": 729.84, "end": 730.0, "word": " so", "probability": 0.9541015625}, {"start": 730.0, "end": 730.2, "word": " on.", "probability": 0.9482421875}, {"start": 730.42, "end": 730.6, "word": " So", "probability": 0.93359375}, {"start": 730.6, "end": 730.92, "word": " party", "probability": 0.88525390625}, {"start": 730.92, "end": 731.88, "word": " in", "probability": 0.48046875}, {"start": 731.88, "end": 732.08, "word": " this", "probability": 0.94775390625}, {"start": 732.08, "end": 732.32, "word": " case", "probability": 0.90185546875}, {"start": 732.32, "end": 732.58, "word": " is", "probability": 0.88037109375}, {"start": 732.58, "end": 733.02, "word": " qualitative", "probability": 0.8046875}, {"start": 733.02, "end": 733.88, "word": " variable,", "probability": 0.9208984375}, {"start": 734.48, "end": 734.76, "word": " another", "probability": 0.9248046875}, {"start": 734.76, "end": 735.06, "word": " one.", "probability": 0.9326171875}], "temperature": 1.0}, {"id": 27, "seek": 77284, "start": 745.4, "end": 772.84, "text": " So types of courses, maybe business, economics, administration, and so on. So types of courses. Another example for quantitative variable or numerical variables. So production is a numerical variable. Another example for quantitative.", "tokens": [407, 3467, 295, 7712, 11, 1310, 1606, 11, 14564, 11, 7236, 11, 293, 370, 322, 13, 407, 3467, 295, 7712, 13, 3996, 1365, 337, 27778, 7006, 420, 29054, 9102, 13, 407, 4265, 307, 257, 29054, 7006, 13, 3996, 1365, 337, 27778, 13], "avg_logprob": -0.17796148047890775, "compression_ratio": 1.7803030303030303, "no_speech_prob": 0.0, "words": [{"start": 745.4, "end": 745.72, "word": " So", "probability": 0.7265625}, {"start": 745.72, "end": 746.14, "word": " types", "probability": 0.67578125}, {"start": 746.14, "end": 746.78, "word": " of", "probability": 0.97119140625}, {"start": 746.78, "end": 747.1, "word": " courses,", "probability": 0.884765625}, {"start": 747.36, "end": 747.52, "word": " maybe", "probability": 0.9033203125}, {"start": 747.52, "end": 748.12, "word": " business,", "probability": 0.81298828125}, {"start": 748.34, "end": 748.82, "word": " economics,", "probability": 0.86376953125}, {"start": 749.48, "end": 750.1, "word": " administration,", "probability": 0.921875}, {"start": 750.44, "end": 751.04, "word": " and", "probability": 0.93701171875}, {"start": 751.04, "end": 751.28, "word": " so", "probability": 0.95556640625}, {"start": 751.28, "end": 751.54, "word": " on.", "probability": 0.95068359375}, {"start": 752.12, "end": 752.28, "word": " So", "probability": 0.88037109375}, {"start": 752.28, "end": 752.62, "word": " types", "probability": 0.7958984375}, {"start": 752.62, "end": 752.88, "word": " of", "probability": 0.97216796875}, {"start": 752.88, "end": 753.26, "word": " courses.", "probability": 0.8916015625}, {"start": 754.26, "end": 754.62, "word": " Another", "probability": 0.869140625}, {"start": 754.62, "end": 755.06, "word": " example", "probability": 0.97509765625}, {"start": 755.06, "end": 755.38, "word": " for", "probability": 0.87939453125}, {"start": 755.38, "end": 755.88, "word": " quantitative", "probability": 0.58349609375}, {"start": 755.88, "end": 756.38, "word": " variable", "probability": 0.89306640625}, {"start": 756.38, "end": 756.6, "word": " or", "probability": 0.83349609375}, {"start": 756.6, "end": 757.1, "word": " numerical", "probability": 0.91357421875}, {"start": 757.1, "end": 757.6, "word": " variables.", "probability": 0.8623046875}, {"start": 765.02, "end": 765.9, "word": " So", "probability": 0.30029296875}, {"start": 765.9, "end": 768.1, "word": " production", "probability": 0.94287109375}, {"start": 768.1, "end": 768.98, "word": " is", "probability": 0.90673828125}, {"start": 768.98, "end": 769.12, "word": " a", "probability": 0.7724609375}, {"start": 769.12, "end": 769.4, "word": " numerical", "probability": 0.9521484375}, {"start": 769.4, "end": 769.82, "word": " variable.", "probability": 0.91015625}, {"start": 770.9, "end": 771.44, "word": " Another", "probability": 0.8837890625}, {"start": 771.44, "end": 771.9, "word": " example", "probability": 0.97265625}, {"start": 771.9, "end": 772.4, "word": " for", "probability": 0.85986328125}, {"start": 772.4, "end": 772.84, "word": " quantitative.", "probability": 0.85205078125}], "temperature": 1.0}, {"id": 28, "seek": 79641, "start": 776.35, "end": 796.41, "text": " Is that produced by this company? Number of cell phones, maybe 20, 17, and so on. Any question? Next. So generally speaking, types of data, data has two types, categorical and numerical data.", "tokens": [1119, 300, 7126, 538, 341, 2237, 30, 5118, 295, 2815, 10216, 11, 1310, 945, 11, 3282, 11, 293, 370, 322, 13, 2639, 1168, 30, 3087, 13, 407, 5101, 4124, 11, 3467, 295, 1412, 11, 1412, 575, 732, 3467, 11, 19250, 804, 293, 29054, 1412, 13], "avg_logprob": -0.23692255434782608, "compression_ratio": 1.2972972972972974, "no_speech_prob": 0.0, "words": [{"start": 776.35, "end": 776.43, "word": " Is", "probability": 0.1485595703125}, {"start": 776.43, "end": 776.55, "word": " that", "probability": 0.73876953125}, {"start": 776.55, "end": 776.89, "word": " produced", "probability": 0.81884765625}, {"start": 776.89, "end": 777.23, "word": " by", "probability": 0.97021484375}, {"start": 777.23, "end": 777.53, "word": " this", "probability": 0.89208984375}, {"start": 777.53, "end": 777.97, "word": " company?", "probability": 0.89697265625}, {"start": 778.31, "end": 778.57, "word": " Number", "probability": 0.77734375}, {"start": 778.57, "end": 778.75, "word": " of", "probability": 0.96826171875}, {"start": 778.75, "end": 778.97, "word": " cell", "probability": 0.89013671875}, {"start": 778.97, "end": 779.17, "word": " phones,", "probability": 0.69189453125}, {"start": 779.27, "end": 779.35, "word": " maybe", "probability": 0.83349609375}, {"start": 779.35, "end": 779.79, "word": " 20,", "probability": 0.83935546875}, {"start": 780.03, "end": 780.69, "word": " 17,", "probability": 0.84912109375}, {"start": 780.81, "end": 781.01, "word": " and", "probability": 0.939453125}, {"start": 781.01, "end": 781.21, "word": " so", "probability": 0.9541015625}, {"start": 781.21, "end": 781.45, "word": " on.", "probability": 0.94873046875}, {"start": 783.23, "end": 783.59, "word": " Any", "probability": 0.89111328125}, {"start": 783.59, "end": 783.95, "word": " question?", "probability": 0.77880859375}, {"start": 786.19, "end": 786.87, "word": " Next.", "probability": 0.92333984375}, {"start": 789.07, "end": 789.61, "word": " So", "probability": 0.8984375}, {"start": 789.61, "end": 789.99, "word": " generally", "probability": 0.751953125}, {"start": 789.99, "end": 790.45, "word": " speaking,", "probability": 0.865234375}, {"start": 790.65, "end": 790.89, "word": " types", "probability": 0.82861328125}, {"start": 790.89, "end": 791.07, "word": " of", "probability": 0.97216796875}, {"start": 791.07, "end": 791.43, "word": " data,", "probability": 0.93701171875}, {"start": 792.05, "end": 792.41, "word": " data", "probability": 0.8447265625}, {"start": 792.41, "end": 792.71, "word": " has", "probability": 0.91650390625}, {"start": 792.71, "end": 792.89, "word": " two", "probability": 0.90234375}, {"start": 792.89, "end": 793.33, "word": " types,", "probability": 0.8310546875}, {"start": 794.09, "end": 794.57, "word": " categorical", "probability": 0.6741943359375}, {"start": 794.57, "end": 795.49, "word": " and", "probability": 0.9189453125}, {"start": 795.49, "end": 795.85, "word": " numerical", "probability": 0.8193359375}, {"start": 795.85, "end": 796.41, "word": " data.", "probability": 0.93310546875}], "temperature": 1.0}, {"id": 29, "seek": 81704, "start": 797.6, "end": 817.04, "text": " As we mentioned, marital status, political party, eye color, and so on. These are examples of categorical variables. On the other hand, a numerical variable can be split or divided into two parts. One is called discrete and the other is continuous, and we have to distinguish between these two. For example,", "tokens": [1018, 321, 2835, 11, 1849, 1686, 6558, 11, 3905, 3595, 11, 3313, 2017, 11, 293, 370, 322, 13, 1981, 366, 5110, 295, 19250, 804, 9102, 13, 1282, 264, 661, 1011, 11, 257, 29054, 7006, 393, 312, 7472, 420, 6666, 666, 732, 3166, 13, 1485, 307, 1219, 27706, 293, 264, 661, 307, 10957, 11, 293, 321, 362, 281, 20206, 1296, 613, 732, 13, 1171, 1365, 11], "avg_logprob": -0.16785037336927472, "compression_ratio": 1.5876288659793814, "no_speech_prob": 0.0, "words": [{"start": 797.6, "end": 797.96, "word": " As", "probability": 0.88720703125}, {"start": 797.96, "end": 798.08, "word": " we", "probability": 0.94775390625}, {"start": 798.08, "end": 798.5, "word": " mentioned,", "probability": 0.81982421875}, {"start": 798.9, "end": 799.18, "word": " marital", "probability": 0.935302734375}, {"start": 799.18, "end": 799.64, "word": " status,", "probability": 0.9384765625}, {"start": 799.94, "end": 800.28, "word": " political", "probability": 0.919921875}, {"start": 800.28, "end": 800.86, "word": " party,", "probability": 0.8876953125}, {"start": 801.02, "end": 801.24, "word": " eye", "probability": 0.8984375}, {"start": 801.24, "end": 801.44, "word": " color,", "probability": 0.61865234375}, {"start": 801.6, "end": 801.72, "word": " and", "probability": 0.93017578125}, {"start": 801.72, "end": 801.92, "word": " so", "probability": 0.95361328125}, {"start": 801.92, "end": 802.18, "word": " on.", "probability": 0.9482421875}, {"start": 802.9, "end": 803.42, "word": " These", "probability": 0.92138671875}, {"start": 803.42, "end": 804.38, "word": " are", "probability": 0.93994140625}, {"start": 804.38, "end": 804.86, "word": " examples", "probability": 0.8544921875}, {"start": 804.86, "end": 805.12, "word": " of", "probability": 0.9697265625}, {"start": 805.12, "end": 805.64, "word": " categorical", "probability": 0.95751953125}, {"start": 805.64, "end": 806.1, "word": " variables.", "probability": 0.93798828125}, {"start": 807.08, "end": 807.5, "word": " On", "probability": 0.95458984375}, {"start": 807.5, "end": 807.62, "word": " the", "probability": 0.923828125}, {"start": 807.62, "end": 807.82, "word": " other", "probability": 0.8828125}, {"start": 807.82, "end": 808.06, "word": " hand,", "probability": 0.9150390625}, {"start": 808.1, "end": 808.18, "word": " a", "probability": 0.51513671875}, {"start": 808.18, "end": 808.46, "word": " numerical", "probability": 0.9580078125}, {"start": 808.46, "end": 808.84, "word": " variable", "probability": 0.89501953125}, {"start": 808.84, "end": 809.1, "word": " can", "probability": 0.94482421875}, {"start": 809.1, "end": 809.3, "word": " be", "probability": 0.95458984375}, {"start": 809.3, "end": 809.64, "word": " split", "probability": 0.54345703125}, {"start": 809.64, "end": 810.0, "word": " or", "probability": 0.80615234375}, {"start": 810.0, "end": 810.46, "word": " divided", "probability": 0.75634765625}, {"start": 810.46, "end": 810.72, "word": " into", "probability": 0.84814453125}, {"start": 810.72, "end": 810.9, "word": " two", "probability": 0.9130859375}, {"start": 810.9, "end": 811.28, "word": " parts.", "probability": 0.86376953125}, {"start": 811.42, "end": 811.54, "word": " One", "probability": 0.9267578125}, {"start": 811.54, "end": 811.74, "word": " is", "probability": 0.9326171875}, {"start": 811.74, "end": 812.2, "word": " called", "probability": 0.9033203125}, {"start": 812.2, "end": 813.26, "word": " discrete", "probability": 0.9287109375}, {"start": 813.26, "end": 813.6, "word": " and", "probability": 0.75537109375}, {"start": 813.6, "end": 813.68, "word": " the", "probability": 0.36181640625}, {"start": 813.68, "end": 813.84, "word": " other", "probability": 0.8330078125}, {"start": 813.84, "end": 814.02, "word": " is", "probability": 0.89404296875}, {"start": 814.02, "end": 814.36, "word": " continuous,", "probability": 0.8359375}, {"start": 814.54, "end": 814.64, "word": " and", "probability": 0.9306640625}, {"start": 814.64, "end": 814.76, "word": " we", "probability": 0.94189453125}, {"start": 814.76, "end": 814.9, "word": " have", "probability": 0.939453125}, {"start": 814.9, "end": 815.02, "word": " to", "probability": 0.95361328125}, {"start": 815.02, "end": 815.32, "word": " distinguish", "probability": 0.765625}, {"start": 815.32, "end": 815.68, "word": " between", "probability": 0.85400390625}, {"start": 815.68, "end": 815.94, "word": " these", "probability": 0.85400390625}, {"start": 815.94, "end": 816.16, "word": " two.", "probability": 0.9365234375}, {"start": 816.52, "end": 816.72, "word": " For", "probability": 0.96337890625}, {"start": 816.72, "end": 817.04, "word": " example,", "probability": 0.97412109375}], "temperature": 1.0}, {"id": 30, "seek": 84442, "start": 818.98, "end": 844.42, "text": " Number of students in this class, you can say there are 60 or 50 students in this class. You cannot say there are 50.5 students. So number of students is discrete because it takes only integers. While for continuous type of numerical variables, you can say that my weight is 80.5 kilograms.", "tokens": [5118, 295, 1731, 294, 341, 1508, 11, 291, 393, 584, 456, 366, 4060, 420, 2625, 1731, 294, 341, 1508, 13, 509, 2644, 584, 456, 366, 2625, 13, 20, 1731, 13, 407, 1230, 295, 1731, 307, 27706, 570, 309, 2516, 787, 41674, 13, 3987, 337, 10957, 2010, 295, 29054, 9102, 11, 291, 393, 584, 300, 452, 3364, 307, 4688, 13, 20, 30690, 13], "avg_logprob": -0.11693948625572144, "compression_ratio": 1.6820809248554913, "no_speech_prob": 0.0, "words": [{"start": 818.98, "end": 819.36, "word": " Number", "probability": 0.498779296875}, {"start": 819.36, "end": 819.54, "word": " of", "probability": 0.9697265625}, {"start": 819.54, "end": 819.84, "word": " students", "probability": 0.9609375}, {"start": 819.84, "end": 820.04, "word": " in", "probability": 0.9423828125}, {"start": 820.04, "end": 820.24, "word": " this", "probability": 0.947265625}, {"start": 820.24, "end": 820.56, "word": " class,", "probability": 0.966796875}, {"start": 820.66, "end": 820.74, "word": " you", "probability": 0.9521484375}, {"start": 820.74, "end": 820.9, "word": " can", "probability": 0.947265625}, {"start": 820.9, "end": 821.14, "word": " say", "probability": 0.88232421875}, {"start": 821.14, "end": 821.46, "word": " there", "probability": 0.859375}, {"start": 821.46, "end": 821.68, "word": " are", "probability": 0.9453125}, {"start": 821.68, "end": 822.14, "word": " 60", "probability": 0.841796875}, {"start": 822.14, "end": 822.36, "word": " or", "probability": 0.962890625}, {"start": 822.36, "end": 822.68, "word": " 50", "probability": 0.96142578125}, {"start": 822.68, "end": 823.18, "word": " students", "probability": 0.97021484375}, {"start": 823.18, "end": 823.4, "word": " in", "probability": 0.93994140625}, {"start": 823.4, "end": 823.56, "word": " this", "probability": 0.94091796875}, {"start": 823.56, "end": 823.86, "word": " class.", "probability": 0.96240234375}, {"start": 824.14, "end": 824.44, "word": " You", "probability": 0.958984375}, {"start": 824.44, "end": 824.68, "word": " cannot", "probability": 0.8671875}, {"start": 824.68, "end": 824.98, "word": " say", "probability": 0.9482421875}, {"start": 824.98, "end": 825.2, "word": " there", "probability": 0.87548828125}, {"start": 825.2, "end": 825.42, "word": " are", "probability": 0.94287109375}, {"start": 825.42, "end": 825.94, "word": " 50", "probability": 0.91064453125}, {"start": 825.94, "end": 827.26, "word": ".5", "probability": 0.94580078125}, {"start": 827.26, "end": 827.84, "word": " students.", "probability": 0.96484375}, {"start": 828.56, "end": 829.24, "word": " So", "probability": 0.9052734375}, {"start": 829.24, "end": 829.64, "word": " number", "probability": 0.73095703125}, {"start": 829.64, "end": 829.82, "word": " of", "probability": 0.9677734375}, {"start": 829.82, "end": 830.36, "word": " students", "probability": 0.96484375}, {"start": 830.36, "end": 831.72, "word": " is", "probability": 0.87255859375}, {"start": 831.72, "end": 832.16, "word": " discrete", "probability": 0.76220703125}, {"start": 832.16, "end": 832.7, "word": " because", "probability": 0.68408203125}, {"start": 832.7, "end": 832.88, "word": " it", "probability": 0.939453125}, {"start": 832.88, "end": 833.1, "word": " takes", "probability": 0.79150390625}, {"start": 833.1, "end": 833.4, "word": " only", "probability": 0.9287109375}, {"start": 833.4, "end": 833.84, "word": " integers.", "probability": 0.9150390625}, {"start": 835.44, "end": 836.12, "word": " While", "probability": 0.7744140625}, {"start": 836.12, "end": 837.04, "word": " for", "probability": 0.88916015625}, {"start": 837.04, "end": 837.7, "word": " continuous", "probability": 0.84130859375}, {"start": 837.7, "end": 838.22, "word": " type", "probability": 0.95654296875}, {"start": 838.22, "end": 838.58, "word": " of", "probability": 0.96630859375}, {"start": 838.58, "end": 838.94, "word": " numerical", "probability": 0.9462890625}, {"start": 838.94, "end": 839.42, "word": " variables,", "probability": 0.90283203125}, {"start": 839.56, "end": 839.64, "word": " you", "probability": 0.951171875}, {"start": 839.64, "end": 839.8, "word": " can", "probability": 0.9453125}, {"start": 839.8, "end": 839.98, "word": " say", "probability": 0.904296875}, {"start": 839.98, "end": 840.34, "word": " that", "probability": 0.93798828125}, {"start": 840.34, "end": 841.38, "word": " my", "probability": 0.8623046875}, {"start": 841.38, "end": 841.74, "word": " weight", "probability": 0.908203125}, {"start": 841.74, "end": 842.52, "word": " is", "probability": 0.951171875}, {"start": 842.52, "end": 843.34, "word": " 80", "probability": 0.97412109375}, {"start": 843.34, "end": 844.04, "word": ".5", "probability": 0.997314453125}, {"start": 844.04, "end": 844.42, "word": " kilograms.", "probability": 0.82568359375}], "temperature": 1.0}, {"id": 31, "seek": 87284, "start": 845.54, "end": 872.84, "text": " so it makes sense that your weight is not exactly 80 kilograms it might be 80.6 or 80.5 and so on so discrete takes only integers while continuous takes any value I mean any real number so that's the difference between discrete and continuous number of phone or number of calls you have received", "tokens": [370, 309, 1669, 2020, 300, 428, 3364, 307, 406, 2293, 4688, 30690, 309, 1062, 312, 4688, 13, 21, 420, 4688, 13, 20, 293, 370, 322, 370, 27706, 2516, 787, 41674, 1339, 10957, 2516, 604, 2158, 286, 914, 604, 957, 1230, 370, 300, 311, 264, 2649, 1296, 27706, 293, 10957, 1230, 295, 2593, 420, 1230, 295, 5498, 291, 362, 4613], "avg_logprob": -0.18072916145126025, "compression_ratio": 1.5828877005347595, "no_speech_prob": 0.0, "words": [{"start": 845.54, "end": 845.78, "word": " so", "probability": 0.246826171875}, {"start": 845.78, "end": 845.9, "word": " it", "probability": 0.923828125}, {"start": 845.9, "end": 846.06, "word": " makes", "probability": 0.818359375}, {"start": 846.06, "end": 846.32, "word": " sense", "probability": 0.8173828125}, {"start": 846.32, "end": 846.58, "word": " that", "probability": 0.90625}, {"start": 846.58, "end": 846.78, "word": " your", "probability": 0.88671875}, {"start": 846.78, "end": 847.12, "word": " weight", "probability": 0.91796875}, {"start": 847.12, "end": 847.36, "word": " is", "probability": 0.94189453125}, {"start": 847.36, "end": 847.56, "word": " not", "probability": 0.94287109375}, {"start": 847.56, "end": 848.24, "word": " exactly", "probability": 0.448974609375}, {"start": 848.24, "end": 848.88, "word": " 80", "probability": 0.875}, {"start": 848.88, "end": 849.26, "word": " kilograms", "probability": 0.365478515625}, {"start": 849.26, "end": 849.98, "word": " it", "probability": 0.5361328125}, {"start": 849.98, "end": 850.18, "word": " might", "probability": 0.88232421875}, {"start": 850.18, "end": 850.34, "word": " be", "probability": 0.9462890625}, {"start": 850.34, "end": 850.62, "word": " 80", "probability": 0.93359375}, {"start": 850.62, "end": 851.18, "word": ".6", "probability": 0.97802734375}, {"start": 851.18, "end": 851.34, "word": " or", "probability": 0.90478515625}, {"start": 851.34, "end": 851.54, "word": " 80", "probability": 0.96337890625}, {"start": 851.54, "end": 851.98, "word": ".5", "probability": 0.995849609375}, {"start": 851.98, "end": 852.14, "word": " and", "probability": 0.84228515625}, {"start": 852.14, "end": 852.34, "word": " so", "probability": 0.94775390625}, {"start": 852.34, "end": 853.24, "word": " on", "probability": 0.9296875}, {"start": 853.24, "end": 854.48, "word": " so", "probability": 0.34765625}, {"start": 854.48, "end": 855.58, "word": " discrete", "probability": 0.78857421875}, {"start": 855.58, "end": 855.98, "word": " takes", "probability": 0.8134765625}, {"start": 855.98, "end": 856.4, "word": " only", "probability": 0.93212890625}, {"start": 856.4, "end": 857.02, "word": " integers", "probability": 0.9111328125}, {"start": 857.02, "end": 858.5, "word": " while", "probability": 0.91064453125}, {"start": 858.5, "end": 859.06, "word": " continuous", "probability": 0.87060546875}, {"start": 859.06, "end": 859.76, "word": " takes", "probability": 0.81494140625}, {"start": 859.76, "end": 860.42, "word": " any", "probability": 0.91796875}, {"start": 860.42, "end": 861.18, "word": " value", "probability": 0.97119140625}, {"start": 861.18, "end": 861.52, "word": " I", "probability": 0.56640625}, {"start": 861.52, "end": 861.72, "word": " mean", "probability": 0.96875}, {"start": 861.72, "end": 862.1, "word": " any", "probability": 0.8671875}, {"start": 862.1, "end": 862.5, "word": " real", "probability": 0.96240234375}, {"start": 862.5, "end": 862.88, "word": " number", "probability": 0.939453125}, {"start": 862.88, "end": 863.78, "word": " so", "probability": 0.80224609375}, {"start": 863.78, "end": 864.02, "word": " that's", "probability": 0.89599609375}, {"start": 864.02, "end": 864.14, "word": " the", "probability": 0.8994140625}, {"start": 864.14, "end": 864.48, "word": " difference", "probability": 0.86181640625}, {"start": 864.48, "end": 864.96, "word": " between", "probability": 0.90185546875}, {"start": 864.96, "end": 867.54, "word": " discrete", "probability": 0.873046875}, {"start": 867.54, "end": 867.94, "word": " and", "probability": 0.9443359375}, {"start": 867.94, "end": 868.56, "word": " continuous", "probability": 0.8583984375}, {"start": 868.56, "end": 868.98, "word": " number", "probability": 0.841796875}, {"start": 868.98, "end": 869.4, "word": " of", "probability": 0.96435546875}, {"start": 869.4, "end": 870.24, "word": " phone", "probability": 0.861328125}, {"start": 870.24, "end": 870.6, "word": " or", "probability": 0.89599609375}, {"start": 870.6, "end": 870.86, "word": " number", "probability": 0.92333984375}, {"start": 870.86, "end": 871.08, "word": " of", "probability": 0.9697265625}, {"start": 871.08, "end": 871.4, "word": " calls", "probability": 0.865234375}, {"start": 871.4, "end": 872.12, "word": " you", "probability": 0.96533203125}, {"start": 872.12, "end": 872.32, "word": " have", "probability": 0.947265625}, {"start": 872.32, "end": 872.84, "word": " received", "probability": 0.7998046875}], "temperature": 1.0}, {"id": 32, "seek": 89921, "start": 873.57, "end": 899.21, "text": " this morning, maybe one, zero, nine, and so on, discrete. Number of patients in the hospital, discrete, and so on. But when we are talking about income, maybe my income is 1,000.5 shekel. It could be. It's continuous because my income can be any number between, for example, 1,000 and 10,000.", "tokens": [341, 2446, 11, 1310, 472, 11, 4018, 11, 4949, 11, 293, 370, 322, 11, 27706, 13, 5118, 295, 4209, 294, 264, 4530, 11, 27706, 11, 293, 370, 322, 13, 583, 562, 321, 366, 1417, 466, 5742, 11, 1310, 452, 5742, 307, 502, 11, 1360, 13, 20, 750, 7124, 13, 467, 727, 312, 13, 467, 311, 10957, 570, 452, 5742, 393, 312, 604, 1230, 1296, 11, 337, 1365, 11, 502, 11, 1360, 293, 1266, 11, 1360, 13], "avg_logprob": -0.18405032854575615, "compression_ratio": 1.5340314136125655, "no_speech_prob": 0.0, "words": [{"start": 873.57, "end": 873.93, "word": " this", "probability": 0.61328125}, {"start": 873.93, "end": 874.37, "word": " morning,", "probability": 0.8955078125}, {"start": 874.85, "end": 875.11, "word": " maybe", "probability": 0.86865234375}, {"start": 875.11, "end": 875.67, "word": " one,", "probability": 0.475830078125}, {"start": 875.99, "end": 876.47, "word": " zero,", "probability": 0.72900390625}, {"start": 876.75, "end": 877.07, "word": " nine,", "probability": 0.93994140625}, {"start": 877.21, "end": 877.27, "word": " and", "probability": 0.8994140625}, {"start": 877.27, "end": 877.45, "word": " so", "probability": 0.95263671875}, {"start": 877.45, "end": 877.67, "word": " on,", "probability": 0.94580078125}, {"start": 878.39, "end": 878.85, "word": " discrete.", "probability": 0.82275390625}, {"start": 879.37, "end": 880.01, "word": " Number", "probability": 0.8642578125}, {"start": 880.01, "end": 880.41, "word": " of", "probability": 0.97216796875}, {"start": 880.41, "end": 880.95, "word": " patients", "probability": 0.8759765625}, {"start": 880.95, "end": 881.45, "word": " in", "probability": 0.935546875}, {"start": 881.45, "end": 881.57, "word": " the", "probability": 0.91748046875}, {"start": 881.57, "end": 881.95, "word": " hospital,", "probability": 0.8798828125}, {"start": 882.35, "end": 883.43, "word": " discrete,", "probability": 0.9130859375}, {"start": 883.59, "end": 883.99, "word": " and", "probability": 0.927734375}, {"start": 883.99, "end": 884.21, "word": " so", "probability": 0.94970703125}, {"start": 884.21, "end": 884.43, "word": " on.", "probability": 0.94384765625}, {"start": 884.99, "end": 885.27, "word": " But", "probability": 0.93408203125}, {"start": 885.27, "end": 885.79, "word": " when", "probability": 0.88623046875}, {"start": 885.79, "end": 885.89, "word": " we", "probability": 0.79931640625}, {"start": 885.89, "end": 886.01, "word": " are", "probability": 0.91845703125}, {"start": 886.01, "end": 886.25, "word": " talking", "probability": 0.8671875}, {"start": 886.25, "end": 886.59, "word": " about", "probability": 0.908203125}, {"start": 886.59, "end": 887.13, "word": " income,", "probability": 0.93798828125}, {"start": 887.73, "end": 887.97, "word": " maybe", "probability": 0.94140625}, {"start": 887.97, "end": 888.23, "word": " my", "probability": 0.97216796875}, {"start": 888.23, "end": 888.51, "word": " income", "probability": 0.9521484375}, {"start": 888.51, "end": 888.69, "word": " is", "probability": 0.9482421875}, {"start": 888.69, "end": 888.91, "word": " 1", "probability": 0.447265625}, {"start": 888.91, "end": 889.25, "word": ",000", "probability": 0.958251953125}, {"start": 889.25, "end": 889.79, "word": ".5", "probability": 0.895751953125}, {"start": 889.79, "end": 890.15, "word": " shekel.", "probability": 0.871337890625}, {"start": 890.69, "end": 890.85, "word": " It", "probability": 0.8564453125}, {"start": 890.85, "end": 891.01, "word": " could", "probability": 0.8662109375}, {"start": 891.01, "end": 891.23, "word": " be.", "probability": 0.95556640625}, {"start": 891.79, "end": 892.43, "word": " It's", "probability": 0.97509765625}, {"start": 892.43, "end": 892.91, "word": " continuous", "probability": 0.86962890625}, {"start": 892.91, "end": 893.79, "word": " because", "probability": 0.323486328125}, {"start": 893.79, "end": 894.13, "word": " my", "probability": 0.97216796875}, {"start": 894.13, "end": 894.57, "word": " income", "probability": 0.93798828125}, {"start": 894.57, "end": 895.63, "word": " can", "probability": 0.9453125}, {"start": 895.63, "end": 895.79, "word": " be", "probability": 0.94921875}, {"start": 895.79, "end": 895.99, "word": " any", "probability": 0.916015625}, {"start": 895.99, "end": 896.41, "word": " number", "probability": 0.943359375}, {"start": 896.41, "end": 896.83, "word": " between,", "probability": 0.66943359375}, {"start": 896.97, "end": 897.07, "word": " for", "probability": 0.951171875}, {"start": 897.07, "end": 897.35, "word": " example,", "probability": 0.97314453125}, {"start": 897.67, "end": 897.93, "word": " 1", "probability": 0.85595703125}, {"start": 897.93, "end": 898.37, "word": ",000", "probability": 0.996826171875}, {"start": 898.37, "end": 898.67, "word": " and", "probability": 0.931640625}, {"start": 898.67, "end": 898.91, "word": " 10", "probability": 0.9677734375}, {"start": 898.91, "end": 899.21, "word": ",000.", "probability": 0.99609375}], "temperature": 1.0}, {"id": 33, "seek": 92745, "start": 900.47, "end": 927.45, "text": " It takes any value in this interval from 1,000 to 10,000. So it types of continuous rather than our continuous variable. So that's the two types of data, categorical and numerical. And numerical also has two types, either discrete or continuous. Later in Chapter 6, we'll talk more details about one of the most distribution statistics,", "tokens": [467, 2516, 604, 2158, 294, 341, 15035, 490, 502, 11, 1360, 281, 1266, 11, 1360, 13, 407, 309, 3467, 295, 10957, 2831, 813, 527, 10957, 7006, 13, 407, 300, 311, 264, 732, 3467, 295, 1412, 11, 19250, 804, 293, 29054, 13, 400, 29054, 611, 575, 732, 3467, 11, 2139, 27706, 420, 10957, 13, 11965, 294, 18874, 1386, 11, 321, 603, 751, 544, 4365, 466, 472, 295, 264, 881, 7316, 12523, 11], "avg_logprob": -0.206922748643491, "compression_ratio": 1.6047619047619048, "no_speech_prob": 0.0, "words": [{"start": 900.47, "end": 900.71, "word": " It", "probability": 0.5869140625}, {"start": 900.71, "end": 901.01, "word": " takes", "probability": 0.83203125}, {"start": 901.01, "end": 901.39, "word": " any", "probability": 0.91455078125}, {"start": 901.39, "end": 901.77, "word": " value", "probability": 0.97509765625}, {"start": 901.77, "end": 901.93, "word": " in", "probability": 0.94482421875}, {"start": 901.93, "end": 902.11, "word": " this", "probability": 0.9453125}, {"start": 902.11, "end": 902.51, "word": " interval", "probability": 0.96826171875}, {"start": 902.51, "end": 902.79, "word": " from", "probability": 0.86669921875}, {"start": 902.79, "end": 903.05, "word": " 1", "probability": 0.378173828125}, {"start": 903.05, "end": 903.35, "word": ",000", "probability": 0.98681640625}, {"start": 903.35, "end": 903.55, "word": " to", "probability": 0.96875}, {"start": 903.55, "end": 903.77, "word": " 10", "probability": 0.9697265625}, {"start": 903.77, "end": 904.29, "word": ",000.", "probability": 0.998046875}, {"start": 904.53, "end": 904.87, "word": " So", "probability": 0.931640625}, {"start": 904.87, "end": 905.03, "word": " it", "probability": 0.7978515625}, {"start": 905.03, "end": 905.39, "word": " types", "probability": 0.7744140625}, {"start": 905.39, "end": 905.83, "word": " of", "probability": 0.908203125}, {"start": 905.83, "end": 906.51, "word": " continuous", "probability": 0.775390625}, {"start": 906.51, "end": 907.57, "word": " rather", "probability": 0.173583984375}, {"start": 907.57, "end": 908.17, "word": " than", "probability": 0.7177734375}, {"start": 908.17, "end": 908.41, "word": " our", "probability": 0.74658203125}, {"start": 908.41, "end": 908.91, "word": " continuous", "probability": 0.83447265625}, {"start": 908.91, "end": 909.25, "word": " variable.", "probability": 0.6005859375}, {"start": 909.91, "end": 910.11, "word": " So", "probability": 0.93115234375}, {"start": 910.11, "end": 910.47, "word": " that's", "probability": 0.9619140625}, {"start": 910.47, "end": 910.61, "word": " the", "probability": 0.78759765625}, {"start": 910.61, "end": 911.97, "word": " two", "probability": 0.884765625}, {"start": 911.97, "end": 912.29, "word": " types", "probability": 0.81103515625}, {"start": 912.29, "end": 912.45, "word": " of", "probability": 0.96826171875}, {"start": 912.45, "end": 912.73, "word": " data,", "probability": 0.93310546875}, {"start": 913.13, "end": 913.65, "word": " categorical", "probability": 0.942138671875}, {"start": 913.65, "end": 914.61, "word": " and", "probability": 0.9052734375}, {"start": 914.61, "end": 915.03, "word": " numerical.", "probability": 0.919921875}, {"start": 915.27, "end": 915.35, "word": " And", "probability": 0.76171875}, {"start": 915.35, "end": 915.59, "word": " numerical", "probability": 0.8115234375}, {"start": 915.59, "end": 915.95, "word": " also", "probability": 0.859375}, {"start": 915.95, "end": 916.15, "word": " has", "probability": 0.9296875}, {"start": 916.15, "end": 916.33, "word": " two", "probability": 0.93505859375}, {"start": 916.33, "end": 916.73, "word": " types,", "probability": 0.826171875}, {"start": 917.15, "end": 917.33, "word": " either", "probability": 0.94384765625}, {"start": 917.33, "end": 917.81, "word": " discrete", "probability": 0.91845703125}, {"start": 917.81, "end": 918.17, "word": " or", "probability": 0.95703125}, {"start": 918.17, "end": 918.51, "word": " continuous.", "probability": 0.83154296875}, {"start": 919.75, "end": 920.13, "word": " Later", "probability": 0.9033203125}, {"start": 920.13, "end": 920.29, "word": " in", "probability": 0.65966796875}, {"start": 920.29, "end": 920.53, "word": " Chapter", "probability": 0.56689453125}, {"start": 920.53, "end": 920.91, "word": " 6,", "probability": 0.68359375}, {"start": 921.87, "end": 922.41, "word": " we'll", "probability": 0.920166015625}, {"start": 922.41, "end": 922.69, "word": " talk", "probability": 0.880859375}, {"start": 922.69, "end": 923.65, "word": " more", "probability": 0.56689453125}, {"start": 923.65, "end": 924.01, "word": " details", "probability": 0.87255859375}, {"start": 924.01, "end": 924.43, "word": " about", "probability": 0.90478515625}, {"start": 924.43, "end": 925.23, "word": " one", "probability": 0.92724609375}, {"start": 925.23, "end": 925.43, "word": " of", "probability": 0.96826171875}, {"start": 925.43, "end": 925.57, "word": " the", "probability": 0.9169921875}, {"start": 925.57, "end": 926.01, "word": " most", "probability": 0.90966796875}, {"start": 926.01, "end": 926.81, "word": " distribution", "probability": 0.85888671875}, {"start": 926.81, "end": 927.45, "word": " statistics,", "probability": 0.60205078125}], "temperature": 1.0}, {"id": 34, "seek": 95014, "start": 928.56, "end": 950.14, "text": " for continuous, one which is called normal distribution. That will be later, inshallah. As we mentioned last time, at the end of each chapter, there is a section or sections, sometimes there are two sections, talks about computer programs. How can we use computer programs in order to analyze the data?", "tokens": [337, 10957, 11, 472, 597, 307, 1219, 2710, 7316, 13, 663, 486, 312, 1780, 11, 1028, 71, 13492, 13, 1018, 321, 2835, 1036, 565, 11, 412, 264, 917, 295, 1184, 7187, 11, 456, 307, 257, 3541, 420, 10863, 11, 2171, 456, 366, 732, 10863, 11, 6686, 466, 3820, 4268, 13, 1012, 393, 321, 764, 3820, 4268, 294, 1668, 281, 12477, 264, 1412, 30], "avg_logprob": -0.19836426386609674, "compression_ratio": 1.578125, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 928.56, "end": 928.94, "word": " for", "probability": 0.77197265625}, {"start": 928.94, "end": 929.46, "word": " continuous,", "probability": 0.79248046875}, {"start": 930.12, "end": 930.54, "word": " one", "probability": 0.79248046875}, {"start": 930.54, "end": 930.74, "word": " which", "probability": 0.9033203125}, {"start": 930.74, "end": 930.9, "word": " is", "probability": 0.943359375}, {"start": 930.9, "end": 931.14, "word": " called", "probability": 0.89208984375}, {"start": 931.14, "end": 931.56, "word": " normal", "probability": 0.697265625}, {"start": 931.56, "end": 932.14, "word": " distribution.", "probability": 0.83984375}, {"start": 932.32, "end": 932.44, "word": " That", "probability": 0.444091796875}, {"start": 932.44, "end": 932.64, "word": " will", "probability": 0.7861328125}, {"start": 932.64, "end": 932.84, "word": " be", "probability": 0.95458984375}, {"start": 932.84, "end": 933.7, "word": " later,", "probability": 0.908203125}, {"start": 933.82, "end": 934.08, "word": " inshallah.", "probability": 0.680419921875}, {"start": 935.6, "end": 936.12, "word": " As", "probability": 0.9638671875}, {"start": 936.12, "end": 936.24, "word": " we", "probability": 0.70849609375}, {"start": 936.24, "end": 936.46, "word": " mentioned", "probability": 0.8330078125}, {"start": 936.46, "end": 936.72, "word": " last", "probability": 0.8603515625}, {"start": 936.72, "end": 937.1, "word": " time,", "probability": 0.88330078125}, {"start": 937.5, "end": 937.94, "word": " at", "probability": 0.95556640625}, {"start": 937.94, "end": 938.08, "word": " the", "probability": 0.91845703125}, {"start": 938.08, "end": 938.3, "word": " end", "probability": 0.8984375}, {"start": 938.3, "end": 938.46, "word": " of", "probability": 0.9599609375}, {"start": 938.46, "end": 938.62, "word": " each", "probability": 0.86767578125}, {"start": 938.62, "end": 939.0, "word": " chapter,", "probability": 0.86083984375}, {"start": 939.28, "end": 939.5, "word": " there", "probability": 0.90771484375}, {"start": 939.5, "end": 939.74, "word": " is", "probability": 0.94091796875}, {"start": 939.74, "end": 939.9, "word": " a", "probability": 0.99755859375}, {"start": 939.9, "end": 940.26, "word": " section", "probability": 0.8955078125}, {"start": 940.26, "end": 940.46, "word": " or", "probability": 0.8740234375}, {"start": 940.46, "end": 940.96, "word": " sections,", "probability": 0.92041015625}, {"start": 941.22, "end": 941.48, "word": " sometimes", "probability": 0.9404296875}, {"start": 941.48, "end": 941.64, "word": " there", "probability": 0.85791015625}, {"start": 941.64, "end": 941.8, "word": " are", "probability": 0.94287109375}, {"start": 941.8, "end": 942.1, "word": " two", "probability": 0.92919921875}, {"start": 942.1, "end": 942.48, "word": " sections,", "probability": 0.8818359375}, {"start": 942.86, "end": 943.14, "word": " talks", "probability": 0.78564453125}, {"start": 943.14, "end": 943.6, "word": " about", "probability": 0.89697265625}, {"start": 943.6, "end": 944.5, "word": " computer", "probability": 0.85107421875}, {"start": 944.5, "end": 945.0, "word": " programs.", "probability": 0.86865234375}, {"start": 945.12, "end": 945.2, "word": " How", "probability": 0.95947265625}, {"start": 945.2, "end": 945.42, "word": " can", "probability": 0.94140625}, {"start": 945.42, "end": 945.58, "word": " we", "probability": 0.96240234375}, {"start": 945.58, "end": 945.94, "word": " use", "probability": 0.88427734375}, {"start": 945.94, "end": 947.22, "word": " computer", "probability": 0.85205078125}, {"start": 947.22, "end": 947.74, "word": " programs", "probability": 0.8505859375}, {"start": 947.74, "end": 948.78, "word": " in", "probability": 0.90966796875}, {"start": 948.78, "end": 948.98, "word": " order", "probability": 0.91845703125}, {"start": 948.98, "end": 949.2, "word": " to", "probability": 0.9736328125}, {"start": 949.2, "end": 949.64, "word": " analyze", "probability": 0.7919921875}, {"start": 949.64, "end": 949.84, "word": " the", "probability": 0.9140625}, {"start": 949.84, "end": 950.14, "word": " data?", "probability": 0.93359375}], "temperature": 1.0}, {"id": 35, "seek": 97944, "start": 950.9, "end": 979.44, "text": " And as we mentioned last time, you should take a course on that. It's called Computer and Data Analysis or SPSS course. So we are going to skip the computer programs used for any chapters in this book. And that's the end of chapter number three. Any questions? Let's move.", "tokens": [400, 382, 321, 2835, 1036, 565, 11, 291, 820, 747, 257, 1164, 322, 300, 13, 467, 311, 1219, 22289, 293, 11888, 38172, 420, 318, 6273, 50, 1164, 13, 407, 321, 366, 516, 281, 10023, 264, 3820, 4268, 1143, 337, 604, 20013, 294, 341, 1446, 13, 400, 300, 311, 264, 917, 295, 7187, 1230, 1045, 13, 2639, 1651, 30, 961, 311, 1286, 13], "avg_logprob": -0.13306051729217408, "compression_ratio": 1.436842105263158, "no_speech_prob": 0.0, "words": [{"start": 950.9, "end": 951.24, "word": " And", "probability": 0.6904296875}, {"start": 951.24, "end": 951.4, "word": " as", "probability": 0.92578125}, {"start": 951.4, "end": 951.52, "word": " we", "probability": 0.908203125}, {"start": 951.52, "end": 951.74, "word": " mentioned", "probability": 0.82568359375}, {"start": 951.74, "end": 952.0, "word": " last", "probability": 0.8427734375}, {"start": 952.0, "end": 952.3, "word": " time,", "probability": 0.8876953125}, {"start": 952.42, "end": 952.56, "word": " you", "probability": 0.9580078125}, {"start": 952.56, "end": 952.76, "word": " should", "probability": 0.9658203125}, {"start": 952.76, "end": 953.04, "word": " take", "probability": 0.78515625}, {"start": 953.04, "end": 953.18, "word": " a", "probability": 0.99658203125}, {"start": 953.18, "end": 953.48, "word": " course", "probability": 0.9658203125}, {"start": 953.48, "end": 953.68, "word": " on", "probability": 0.89111328125}, {"start": 953.68, "end": 954.04, "word": " that.", "probability": 0.93115234375}, {"start": 954.4, "end": 954.4, "word": " It's", "probability": 0.960205078125}, {"start": 954.4, "end": 954.6, "word": " called", "probability": 0.89111328125}, {"start": 954.6, "end": 955.04, "word": " Computer", "probability": 0.71923828125}, {"start": 955.04, "end": 955.24, "word": " and", "probability": 0.85498046875}, {"start": 955.24, "end": 955.42, "word": " Data", "probability": 0.94482421875}, {"start": 955.42, "end": 955.82, "word": " Analysis", "probability": 0.97607421875}, {"start": 955.82, "end": 956.08, "word": " or", "probability": 0.57080078125}, {"start": 956.08, "end": 956.72, "word": " SPSS", "probability": 0.80029296875}, {"start": 956.72, "end": 957.08, "word": " course.", "probability": 0.9423828125}, {"start": 957.42, "end": 957.8, "word": " So", "probability": 0.95068359375}, {"start": 957.8, "end": 957.96, "word": " we", "probability": 0.7998046875}, {"start": 957.96, "end": 958.1, "word": " are", "probability": 0.9248046875}, {"start": 958.1, "end": 958.3, "word": " going", "probability": 0.94921875}, {"start": 958.3, "end": 958.5, "word": " to", "probability": 0.96630859375}, {"start": 958.5, "end": 959.32, "word": " skip", "probability": 0.97998046875}, {"start": 959.32, "end": 961.26, "word": " the", "probability": 0.771484375}, {"start": 961.26, "end": 961.72, "word": " computer", "probability": 0.8818359375}, {"start": 961.72, "end": 962.42, "word": " programs", "probability": 0.87060546875}, {"start": 962.42, "end": 963.46, "word": " used", "probability": 0.90625}, {"start": 963.46, "end": 963.86, "word": " for", "probability": 0.95361328125}, {"start": 963.86, "end": 964.36, "word": " any", "probability": 0.9072265625}, {"start": 964.36, "end": 965.22, "word": " chapters", "probability": 0.91748046875}, {"start": 965.22, "end": 965.56, "word": " in", "probability": 0.94482421875}, {"start": 965.56, "end": 965.78, "word": " this", "probability": 0.94775390625}, {"start": 965.78, "end": 966.12, "word": " book.", "probability": 0.9443359375}, {"start": 966.86, "end": 967.1, "word": " And", "probability": 0.888671875}, {"start": 967.1, "end": 967.52, "word": " that's", "probability": 0.964599609375}, {"start": 967.52, "end": 967.74, "word": " the", "probability": 0.9228515625}, {"start": 967.74, "end": 968.04, "word": " end", "probability": 0.89404296875}, {"start": 968.04, "end": 968.72, "word": " of", "probability": 0.96826171875}, {"start": 968.72, "end": 969.18, "word": " chapter", "probability": 0.71142578125}, {"start": 969.18, "end": 969.6, "word": " number", "probability": 0.9169921875}, {"start": 969.6, "end": 970.02, "word": " three.", "probability": 0.76611328125}, {"start": 971.96, "end": 972.56, "word": " Any", "probability": 0.8740234375}, {"start": 972.56, "end": 973.06, "word": " questions?", "probability": 0.955078125}, {"start": 978.38, "end": 979.08, "word": " Let's", "probability": 0.966064453125}, {"start": 979.08, "end": 979.44, "word": " move.", "probability": 0.943359375}], "temperature": 1.0}, {"id": 36, "seek": 100379, "start": 980.71, "end": 1003.79, "text": " quickly on chapter three. Chapter three maybe is the easiest chapter in this book. It's straightforward. We have some formulas to compute some statistical measures.", "tokens": [2661, 322, 7187, 1045, 13, 18874, 1045, 1310, 307, 264, 12889, 7187, 294, 341, 1446, 13, 467, 311, 15325, 13, 492, 362, 512, 30546, 281, 14722, 512, 22820, 8000, 13], "avg_logprob": -0.23198084004463687, "compression_ratio": 1.3636363636363635, "no_speech_prob": 0.0, "words": [{"start": 980.71, "end": 981.19, "word": " quickly", "probability": 0.44580078125}, {"start": 981.19, "end": 981.99, "word": " on", "probability": 0.662109375}, {"start": 981.99, "end": 982.21, "word": " chapter", "probability": 0.51953125}, {"start": 982.21, "end": 982.55, "word": " three.", "probability": 0.66650390625}, {"start": 991.29, "end": 992.21, "word": " Chapter", "probability": 0.8759765625}, {"start": 992.21, "end": 992.59, "word": " three", "probability": 0.716796875}, {"start": 992.59, "end": 993.73, "word": " maybe", "probability": 0.68701171875}, {"start": 993.73, "end": 994.33, "word": " is", "probability": 0.896484375}, {"start": 994.33, "end": 994.49, "word": " the", "probability": 0.92041015625}, {"start": 994.49, "end": 994.75, "word": " easiest", "probability": 0.87744140625}, {"start": 994.75, "end": 995.21, "word": " chapter", "probability": 0.8896484375}, {"start": 995.21, "end": 995.37, "word": " in", "probability": 0.91162109375}, {"start": 995.37, "end": 995.53, "word": " this", "probability": 0.86865234375}, {"start": 995.53, "end": 995.85, "word": " book.", "probability": 0.9775390625}, {"start": 996.69, "end": 996.91, "word": " It's", "probability": 0.66259765625}, {"start": 996.91, "end": 997.37, "word": " straightforward.", "probability": 0.70166015625}, {"start": 997.93, "end": 998.21, "word": " We", "probability": 0.95947265625}, {"start": 998.21, "end": 998.41, "word": " have", "probability": 0.9482421875}, {"start": 998.41, "end": 998.89, "word": " some", "probability": 0.8994140625}, {"start": 998.89, "end": 999.95, "word": " formulas", "probability": 0.9755859375}, {"start": 999.95, "end": 1001.63, "word": " to", "probability": 0.91455078125}, {"start": 1001.63, "end": 1002.03, "word": " compute", "probability": 0.9150390625}, {"start": 1002.03, "end": 1002.51, "word": " some", "probability": 0.8955078125}, {"start": 1002.51, "end": 1003.23, "word": " statistical", "probability": 0.9013671875}, {"start": 1003.23, "end": 1003.79, "word": " measures.", "probability": 0.8466796875}], "temperature": 1.0}, {"id": 37, "seek": 103188, "start": 1004.72, "end": 1031.88, "text": " And we should know how can we calculate these measures and what are the meaning of your results. So chapter three talks about numerical descriptive measures. In this chapter, you will learn, number one, describe the probabilities of central tendency, variation, and shape in numerical data.", "tokens": [400, 321, 820, 458, 577, 393, 321, 8873, 613, 8000, 293, 437, 366, 264, 3620, 295, 428, 3542, 13, 407, 7187, 1045, 6686, 466, 29054, 42585, 8000, 13, 682, 341, 7187, 11, 291, 486, 1466, 11, 1230, 472, 11, 6786, 264, 33783, 295, 5777, 18187, 11, 12990, 11, 293, 3909, 294, 29054, 1412, 13], "avg_logprob": -0.1982954588803378, "compression_ratio": 1.5561497326203209, "no_speech_prob": 0.0, "words": [{"start": 1004.72, "end": 1005.1, "word": " And", "probability": 0.693359375}, {"start": 1005.1, "end": 1005.28, "word": " we", "probability": 0.95166015625}, {"start": 1005.28, "end": 1005.5, "word": " should", "probability": 0.9677734375}, {"start": 1005.5, "end": 1005.72, "word": " know", "probability": 0.8974609375}, {"start": 1005.72, "end": 1005.94, "word": " how", "probability": 0.8818359375}, {"start": 1005.94, "end": 1006.18, "word": " can", "probability": 0.919921875}, {"start": 1006.18, "end": 1006.36, "word": " we", "probability": 0.96142578125}, {"start": 1006.36, "end": 1006.8, "word": " calculate", "probability": 0.90234375}, {"start": 1006.8, "end": 1007.08, "word": " these", "probability": 0.85107421875}, {"start": 1007.08, "end": 1007.4, "word": " measures", "probability": 0.8623046875}, {"start": 1007.4, "end": 1008.0, "word": " and", "probability": 0.57763671875}, {"start": 1008.0, "end": 1008.5, "word": " what", "probability": 0.9521484375}, {"start": 1008.5, "end": 1008.74, "word": " are", "probability": 0.9384765625}, {"start": 1008.74, "end": 1008.92, "word": " the", "probability": 0.921875}, {"start": 1008.92, "end": 1009.22, "word": " meaning", "probability": 0.48046875}, {"start": 1009.22, "end": 1009.64, "word": " of", "probability": 0.94775390625}, {"start": 1009.64, "end": 1009.9, "word": " your", "probability": 0.8896484375}, {"start": 1009.9, "end": 1010.34, "word": " results.", "probability": 0.87841796875}, {"start": 1012.26, "end": 1012.62, "word": " So", "probability": 0.85302734375}, {"start": 1012.62, "end": 1013.06, "word": " chapter", "probability": 0.52099609375}, {"start": 1013.06, "end": 1013.46, "word": " three", "probability": 0.70166015625}, {"start": 1013.46, "end": 1013.82, "word": " talks", "probability": 0.861328125}, {"start": 1013.82, "end": 1014.4, "word": " about", "probability": 0.90283203125}, {"start": 1014.4, "end": 1015.06, "word": " numerical", "probability": 0.765625}, {"start": 1015.06, "end": 1016.14, "word": " descriptive", "probability": 0.8466796875}, {"start": 1016.14, "end": 1016.58, "word": " measures.", "probability": 0.84814453125}, {"start": 1018.86, "end": 1019.5, "word": " In", "probability": 0.9580078125}, {"start": 1019.5, "end": 1019.92, "word": " this", "probability": 0.94384765625}, {"start": 1019.92, "end": 1020.66, "word": " chapter,", "probability": 0.84228515625}, {"start": 1020.86, "end": 1020.96, "word": " you", "probability": 0.95849609375}, {"start": 1020.96, "end": 1021.12, "word": " will", "probability": 0.61376953125}, {"start": 1021.12, "end": 1021.46, "word": " learn,", "probability": 0.9716796875}, {"start": 1022.18, "end": 1023.22, "word": " number", "probability": 0.9150390625}, {"start": 1023.22, "end": 1023.54, "word": " one,", "probability": 0.88134765625}, {"start": 1024.58, "end": 1025.04, "word": " describe", "probability": 0.81103515625}, {"start": 1025.04, "end": 1025.26, "word": " the", "probability": 0.92431640625}, {"start": 1025.26, "end": 1025.68, "word": " probabilities", "probability": 0.50634765625}, {"start": 1025.68, "end": 1026.04, "word": " of", "probability": 0.9697265625}, {"start": 1026.04, "end": 1026.48, "word": " central", "probability": 0.921875}, {"start": 1026.48, "end": 1027.02, "word": " tendency,", "probability": 0.91064453125}, {"start": 1028.64, "end": 1029.96, "word": " variation,", "probability": 0.88623046875}, {"start": 1030.28, "end": 1030.52, "word": " and", "probability": 0.9375}, {"start": 1030.52, "end": 1030.92, "word": " shape", "probability": 0.919921875}, {"start": 1030.92, "end": 1031.1, "word": " in", "probability": 0.91357421875}, {"start": 1031.1, "end": 1031.48, "word": " numerical", "probability": 0.92578125}, {"start": 1031.48, "end": 1031.88, "word": " data.", "probability": 0.92919921875}], "temperature": 1.0}, {"id": 38, "seek": 105795, "start": 1032.73, "end": 1057.95, "text": " In this lecture, we'll talk in more details about some of the center tendency measures. Later, we'll talk about the variation, or spread, or dispersion, and the shape in numerical data. So that's part number one. We have to know something about center tendency, variation, and the shape of the data we have.", "tokens": [682, 341, 7991, 11, 321, 603, 751, 294, 544, 4365, 466, 512, 295, 264, 3056, 18187, 8000, 13, 11965, 11, 321, 603, 751, 466, 264, 12990, 11, 420, 3974, 11, 420, 24631, 313, 11, 293, 264, 3909, 294, 29054, 1412, 13, 407, 300, 311, 644, 1230, 472, 13, 492, 362, 281, 458, 746, 466, 3056, 18187, 11, 12990, 11, 293, 264, 3909, 295, 264, 1412, 321, 362, 13], "avg_logprob": -0.13405796669531558, "compression_ratio": 1.6830601092896176, "no_speech_prob": 0.0, "words": [{"start": 1032.73, "end": 1032.99, "word": " In", "probability": 0.52001953125}, {"start": 1032.99, "end": 1033.23, "word": " this", "probability": 0.94384765625}, {"start": 1033.23, "end": 1033.59, "word": " lecture,", "probability": 0.93603515625}, {"start": 1033.73, "end": 1034.21, "word": " we'll", "probability": 0.8642578125}, {"start": 1034.21, "end": 1034.81, "word": " talk", "probability": 0.890625}, {"start": 1034.81, "end": 1035.17, "word": " in", "probability": 0.9345703125}, {"start": 1035.17, "end": 1035.45, "word": " more", "probability": 0.9208984375}, {"start": 1035.45, "end": 1035.79, "word": " details", "probability": 0.85009765625}, {"start": 1035.79, "end": 1036.25, "word": " about", "probability": 0.90087890625}, {"start": 1036.25, "end": 1037.01, "word": " some", "probability": 0.9013671875}, {"start": 1037.01, "end": 1037.21, "word": " of", "probability": 0.96875}, {"start": 1037.21, "end": 1037.37, "word": " the", "probability": 0.92431640625}, {"start": 1037.37, "end": 1037.61, "word": " center", "probability": 0.6416015625}, {"start": 1037.61, "end": 1037.89, "word": " tendency", "probability": 0.8037109375}, {"start": 1037.89, "end": 1038.45, "word": " measures.", "probability": 0.8310546875}, {"start": 1040.37, "end": 1040.97, "word": " Later,", "probability": 0.90283203125}, {"start": 1041.11, "end": 1041.37, "word": " we'll", "probability": 0.926513671875}, {"start": 1041.37, "end": 1041.59, "word": " talk", "probability": 0.89404296875}, {"start": 1041.59, "end": 1042.09, "word": " about", "probability": 0.89990234375}, {"start": 1042.09, "end": 1042.63, "word": " the", "probability": 0.67822265625}, {"start": 1042.63, "end": 1043.13, "word": " variation,", "probability": 0.88818359375}, {"start": 1043.97, "end": 1044.09, "word": " or", "probability": 0.966796875}, {"start": 1044.09, "end": 1044.59, "word": " spread,", "probability": 0.9091796875}, {"start": 1046.23, "end": 1046.49, "word": " or", "probability": 0.9658203125}, {"start": 1046.49, "end": 1047.15, "word": " dispersion,", "probability": 0.95068359375}, {"start": 1047.45, "end": 1047.81, "word": " and", "probability": 0.9345703125}, {"start": 1047.81, "end": 1047.99, "word": " the", "probability": 0.9072265625}, {"start": 1047.99, "end": 1048.29, "word": " shape", "probability": 0.92724609375}, {"start": 1048.29, "end": 1048.51, "word": " in", "probability": 0.90771484375}, {"start": 1048.51, "end": 1048.85, "word": " numerical", "probability": 0.84228515625}, {"start": 1048.85, "end": 1049.25, "word": " data.", "probability": 0.9326171875}, {"start": 1049.53, "end": 1049.69, "word": " So", "probability": 0.94140625}, {"start": 1049.69, "end": 1050.01, "word": " that's", "probability": 0.927490234375}, {"start": 1050.01, "end": 1050.21, "word": " part", "probability": 0.892578125}, {"start": 1050.21, "end": 1050.41, "word": " number", "probability": 0.935546875}, {"start": 1050.41, "end": 1050.63, "word": " one.", "probability": 0.78173828125}, {"start": 1050.67, "end": 1050.77, "word": " We", "probability": 0.96142578125}, {"start": 1050.77, "end": 1050.91, "word": " have", "probability": 0.9482421875}, {"start": 1050.91, "end": 1051.05, "word": " to", "probability": 0.9599609375}, {"start": 1051.05, "end": 1051.27, "word": " know", "probability": 0.75439453125}, {"start": 1051.27, "end": 1051.63, "word": " something", "probability": 0.86962890625}, {"start": 1051.63, "end": 1052.27, "word": " about", "probability": 0.89892578125}, {"start": 1052.27, "end": 1053.21, "word": " center", "probability": 0.82666015625}, {"start": 1053.21, "end": 1053.69, "word": " tendency,", "probability": 0.9150390625}, {"start": 1054.25, "end": 1054.89, "word": " variation,", "probability": 0.9013671875}, {"start": 1055.17, "end": 1055.37, "word": " and", "probability": 0.93603515625}, {"start": 1055.37, "end": 1055.53, "word": " the", "probability": 0.91064453125}, {"start": 1055.53, "end": 1055.91, "word": " shape", "probability": 0.90576171875}, {"start": 1055.91, "end": 1056.39, "word": " of", "probability": 0.9677734375}, {"start": 1056.39, "end": 1056.71, "word": " the", "probability": 0.921875}, {"start": 1056.71, "end": 1057.51, "word": " data", "probability": 0.9423828125}, {"start": 1057.51, "end": 1057.71, "word": " we", "probability": 0.95751953125}, {"start": 1057.71, "end": 1057.95, "word": " have.", "probability": 0.92041015625}], "temperature": 1.0}, {"id": 39, "seek": 108854, "start": 1059.92, "end": 1088.54, "text": " to calculate descriptive summary measures for a population. So we have to calculate these measures for the sample. And if we have the entire population, we can compute these measures also for that population. Then I will introduce in more details about something called Paxiplot. How can we construct and interpret a Paxiplot? That's, inshallah, next time on Tuesday.", "tokens": [281, 8873, 42585, 12691, 8000, 337, 257, 4415, 13, 407, 321, 362, 281, 8873, 613, 8000, 337, 264, 6889, 13, 400, 498, 321, 362, 264, 2302, 4415, 11, 321, 393, 14722, 613, 8000, 611, 337, 300, 4415, 13, 1396, 286, 486, 5366, 294, 544, 4365, 466, 746, 1219, 430, 2797, 34442, 310, 13, 1012, 393, 321, 7690, 293, 7302, 257, 430, 2797, 34442, 310, 30, 663, 311, 11, 1028, 71, 13492, 11, 958, 565, 322, 10017, 13], "avg_logprob": -0.18068910466554838, "compression_ratio": 1.7116279069767442, "no_speech_prob": 0.0, "words": [{"start": 1059.92, "end": 1060.16, "word": " to", "probability": 0.83349609375}, {"start": 1060.16, "end": 1060.6, "word": " calculate", "probability": 0.9208984375}, {"start": 1060.6, "end": 1061.36, "word": " descriptive", "probability": 0.5869140625}, {"start": 1061.36, "end": 1062.02, "word": " summary", "probability": 0.83935546875}, {"start": 1062.02, "end": 1062.32, "word": " measures", "probability": 0.79931640625}, {"start": 1062.32, "end": 1062.64, "word": " for", "probability": 0.94677734375}, {"start": 1062.64, "end": 1062.78, "word": " a", "probability": 0.921875}, {"start": 1062.78, "end": 1063.2, "word": " population.", "probability": 0.94287109375}, {"start": 1063.88, "end": 1064.18, "word": " So", "probability": 0.9404296875}, {"start": 1064.18, "end": 1064.26, "word": " we", "probability": 0.41552734375}, {"start": 1064.26, "end": 1064.38, "word": " have", "probability": 0.94384765625}, {"start": 1064.38, "end": 1064.64, "word": " to", "probability": 0.97412109375}, {"start": 1064.64, "end": 1065.36, "word": " calculate", "probability": 0.9228515625}, {"start": 1065.36, "end": 1065.68, "word": " these", "probability": 0.82373046875}, {"start": 1065.68, "end": 1065.98, "word": " measures", "probability": 0.84228515625}, {"start": 1065.98, "end": 1066.34, "word": " for", "probability": 0.9482421875}, {"start": 1066.34, "end": 1066.5, "word": " the", "probability": 0.849609375}, {"start": 1066.5, "end": 1066.8, "word": " sample.", "probability": 0.8876953125}, {"start": 1067.54, "end": 1067.76, "word": " And", "probability": 0.91796875}, {"start": 1067.76, "end": 1067.98, "word": " if", "probability": 0.951171875}, {"start": 1067.98, "end": 1068.12, "word": " we", "probability": 0.962890625}, {"start": 1068.12, "end": 1068.32, "word": " have", "probability": 0.94775390625}, {"start": 1068.32, "end": 1068.46, "word": " the", "probability": 0.9208984375}, {"start": 1068.46, "end": 1068.96, "word": " entire", "probability": 0.8857421875}, {"start": 1068.96, "end": 1069.8, "word": " population,", "probability": 0.9541015625}, {"start": 1069.98, "end": 1070.04, "word": " we", "probability": 0.9013671875}, {"start": 1070.04, "end": 1070.22, "word": " can", "probability": 0.94384765625}, {"start": 1070.22, "end": 1070.6, "word": " compute", "probability": 0.9189453125}, {"start": 1070.6, "end": 1070.88, "word": " these", "probability": 0.732421875}, {"start": 1070.88, "end": 1071.42, "word": " measures", "probability": 0.81591796875}, {"start": 1071.42, "end": 1071.96, "word": " also", "probability": 0.849609375}, {"start": 1071.96, "end": 1073.76, "word": " for", "probability": 0.89794921875}, {"start": 1073.76, "end": 1074.04, "word": " that", "probability": 0.91650390625}, {"start": 1074.04, "end": 1074.52, "word": " population.", "probability": 0.9482421875}, {"start": 1075.92, "end": 1076.28, "word": " Then", "probability": 0.86279296875}, {"start": 1076.28, "end": 1077.12, "word": " I", "probability": 0.76416015625}, {"start": 1077.12, "end": 1077.3, "word": " will", "probability": 0.88671875}, {"start": 1077.3, "end": 1077.98, "word": " introduce", "probability": 0.84521484375}, {"start": 1077.98, "end": 1078.44, "word": " in", "probability": 0.8623046875}, {"start": 1078.44, "end": 1078.64, "word": " more", "probability": 0.935546875}, {"start": 1078.64, "end": 1079.02, "word": " details", "probability": 0.85986328125}, {"start": 1079.02, "end": 1079.6, "word": " about", "probability": 0.90087890625}, {"start": 1079.6, "end": 1080.46, "word": " something", "probability": 0.8603515625}, {"start": 1080.46, "end": 1080.82, "word": " called", "probability": 0.880859375}, {"start": 1080.82, "end": 1081.38, "word": " Paxiplot.", "probability": 0.65753173828125}, {"start": 1081.64, "end": 1081.92, "word": " How", "probability": 0.96484375}, {"start": 1081.92, "end": 1082.18, "word": " can", "probability": 0.93994140625}, {"start": 1082.18, "end": 1082.36, "word": " we", "probability": 0.96240234375}, {"start": 1082.36, "end": 1083.12, "word": " construct", "probability": 0.96826171875}, {"start": 1083.12, "end": 1084.4, "word": " and", "probability": 0.9326171875}, {"start": 1084.4, "end": 1085.12, "word": " interpret", "probability": 0.89404296875}, {"start": 1085.12, "end": 1085.48, "word": " a", "probability": 0.93408203125}, {"start": 1085.48, "end": 1085.9, "word": " Paxiplot?", "probability": 0.8831787109375}, {"start": 1086.14, "end": 1086.66, "word": " That's,", "probability": 0.914306640625}, {"start": 1086.96, "end": 1087.06, "word": " inshallah,", "probability": 0.7020670572916666}, {"start": 1087.14, "end": 1087.4, "word": " next", "probability": 0.92919921875}, {"start": 1087.4, "end": 1087.84, "word": " time", "probability": 0.88037109375}, {"start": 1087.84, "end": 1088.18, "word": " on", "probability": 0.7861328125}, {"start": 1088.18, "end": 1088.54, "word": " Tuesday.", "probability": 0.890625}], "temperature": 1.0}, {"id": 40, "seek": 111259, "start": 1089.44, "end": 1112.6, "text": " Finally, we'll see how can we calculate the covariance and coefficient of variation and coefficient, I'm sorry, coefficient of correlation. This topic we'll introduce in more details in chapter 11 later on. So just I will give some brief notation about", "tokens": [6288, 11, 321, 603, 536, 577, 393, 321, 8873, 264, 49851, 719, 293, 17619, 295, 12990, 293, 17619, 11, 286, 478, 2597, 11, 17619, 295, 20009, 13, 639, 4829, 321, 603, 5366, 294, 544, 4365, 294, 7187, 2975, 1780, 322, 13, 407, 445, 286, 486, 976, 512, 5353, 24657, 466], "avg_logprob": -0.2461703384623808, "compression_ratio": 1.5521472392638036, "no_speech_prob": 0.0, "words": [{"start": 1089.44, "end": 1090.22, "word": " Finally,", "probability": 0.462158203125}, {"start": 1090.48, "end": 1091.04, "word": " we'll", "probability": 0.761962890625}, {"start": 1091.04, "end": 1091.2, "word": " see", "probability": 0.9228515625}, {"start": 1091.2, "end": 1091.32, "word": " how", "probability": 0.927734375}, {"start": 1091.32, "end": 1091.5, "word": " can", "probability": 0.703125}, {"start": 1091.5, "end": 1091.62, "word": " we", "probability": 0.939453125}, {"start": 1091.62, "end": 1092.04, "word": " calculate", "probability": 0.9130859375}, {"start": 1092.04, "end": 1092.28, "word": " the", "probability": 0.87548828125}, {"start": 1092.28, "end": 1092.8, "word": " covariance", "probability": 0.92919921875}, {"start": 1092.8, "end": 1093.02, "word": " and", "probability": 0.89599609375}, {"start": 1093.02, "end": 1093.5, "word": " coefficient", "probability": 0.91943359375}, {"start": 1093.5, "end": 1093.9, "word": " of", "probability": 0.95458984375}, {"start": 1093.9, "end": 1094.26, "word": " variation", "probability": 0.7861328125}, {"start": 1094.26, "end": 1095.18, "word": " and", "probability": 0.533203125}, {"start": 1095.18, "end": 1095.52, "word": " coefficient,", "probability": 0.92041015625}, {"start": 1095.62, "end": 1095.7, "word": " I'm", "probability": 0.919189453125}, {"start": 1095.7, "end": 1095.86, "word": " sorry,", "probability": 0.8642578125}, {"start": 1095.94, "end": 1096.36, "word": " coefficient", "probability": 0.8271484375}, {"start": 1096.36, "end": 1096.6, "word": " of", "probability": 0.95166015625}, {"start": 1096.6, "end": 1097.02, "word": " correlation.", "probability": 0.9228515625}, {"start": 1097.74, "end": 1098.18, "word": " This", "probability": 0.88134765625}, {"start": 1098.18, "end": 1098.48, "word": " topic", "probability": 0.9453125}, {"start": 1098.48, "end": 1099.84, "word": " we'll", "probability": 0.5128173828125}, {"start": 1099.84, "end": 1100.28, "word": " introduce", "probability": 0.6083984375}, {"start": 1100.28, "end": 1100.74, "word": " in", "probability": 0.478515625}, {"start": 1100.74, "end": 1102.18, "word": " more", "probability": 0.92041015625}, {"start": 1102.18, "end": 1102.68, "word": " details", "probability": 0.81640625}, {"start": 1102.68, "end": 1103.52, "word": " in", "probability": 0.91259765625}, {"start": 1103.52, "end": 1103.76, "word": " chapter", "probability": 0.49951171875}, {"start": 1103.76, "end": 1104.28, "word": " 11", "probability": 0.8388671875}, {"start": 1104.28, "end": 1104.66, "word": " later", "probability": 0.8291015625}, {"start": 1104.66, "end": 1104.94, "word": " on.", "probability": 0.9462890625}, {"start": 1106.2, "end": 1106.44, "word": " So", "probability": 0.91015625}, {"start": 1106.44, "end": 1106.64, "word": " just", "probability": 0.662109375}, {"start": 1106.64, "end": 1106.78, "word": " I", "probability": 0.912109375}, {"start": 1106.78, "end": 1106.88, "word": " will", "probability": 0.83642578125}, {"start": 1106.88, "end": 1107.24, "word": " give", "probability": 0.873046875}, {"start": 1107.24, "end": 1108.0, "word": " some", "probability": 0.89306640625}, {"start": 1108.0, "end": 1111.24, "word": " brief", "probability": 0.5625}, {"start": 1111.24, "end": 1112.14, "word": " notation", "probability": 0.89453125}, {"start": 1112.14, "end": 1112.6, "word": " about", "probability": 0.8896484375}], "temperature": 1.0}, {"id": 41, "seek": 113845, "start": 1113.91, "end": 1138.45, "text": " coefficient of correlation, how can we compute the correlation coefficient? What's the meaning of your result? And later in chapter 11, we'll talk in more details about correlation and regression. So these are the objectives of this chapter. There are some basic definitions before we start. One is called central tendency. What do you mean by central tendency?", "tokens": [17619, 295, 20009, 11, 577, 393, 321, 14722, 264, 20009, 17619, 30, 708, 311, 264, 3620, 295, 428, 1874, 30, 400, 1780, 294, 7187, 2975, 11, 321, 603, 751, 294, 544, 4365, 466, 20009, 293, 24590, 13, 407, 613, 366, 264, 15961, 295, 341, 7187, 13, 821, 366, 512, 3875, 21988, 949, 321, 722, 13, 1485, 307, 1219, 5777, 18187, 13, 708, 360, 291, 914, 538, 5777, 18187, 30], "avg_logprob": -0.1329241103359631, "compression_ratio": 1.6380090497737556, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1113.91, "end": 1114.51, "word": " coefficient", "probability": 0.9052734375}, {"start": 1114.51, "end": 1114.75, "word": " of", "probability": 0.93359375}, {"start": 1114.75, "end": 1115.11, "word": " correlation,", "probability": 0.93896484375}, {"start": 1115.29, "end": 1115.39, "word": " how", "probability": 0.9091796875}, {"start": 1115.39, "end": 1115.63, "word": " can", "probability": 0.93994140625}, {"start": 1115.63, "end": 1115.93, "word": " we", "probability": 0.95947265625}, {"start": 1115.93, "end": 1117.07, "word": " compute", "probability": 0.88623046875}, {"start": 1117.07, "end": 1117.33, "word": " the", "probability": 0.9033203125}, {"start": 1117.33, "end": 1117.67, "word": " correlation", "probability": 0.9287109375}, {"start": 1117.67, "end": 1118.17, "word": " coefficient?", "probability": 0.962890625}, {"start": 1118.63, "end": 1118.83, "word": " What's", "probability": 0.9267578125}, {"start": 1118.83, "end": 1119.03, "word": " the", "probability": 0.91943359375}, {"start": 1119.03, "end": 1119.31, "word": " meaning", "probability": 0.876953125}, {"start": 1119.31, "end": 1119.51, "word": " of", "probability": 0.9658203125}, {"start": 1119.51, "end": 1119.67, "word": " your", "probability": 0.8759765625}, {"start": 1119.67, "end": 1119.99, "word": " result?", "probability": 0.9267578125}, {"start": 1120.55, "end": 1120.85, "word": " And", "probability": 0.91845703125}, {"start": 1120.85, "end": 1121.15, "word": " later", "probability": 0.93994140625}, {"start": 1121.15, "end": 1121.33, "word": " in", "probability": 0.8564453125}, {"start": 1121.33, "end": 1121.57, "word": " chapter", "probability": 0.494384765625}, {"start": 1121.57, "end": 1121.87, "word": " 11,", "probability": 0.8251953125}, {"start": 1122.03, "end": 1122.19, "word": " we'll", "probability": 0.856689453125}, {"start": 1122.19, "end": 1122.39, "word": " talk", "probability": 0.8740234375}, {"start": 1122.39, "end": 1122.51, "word": " in", "probability": 0.9443359375}, {"start": 1122.51, "end": 1122.71, "word": " more", "probability": 0.93408203125}, {"start": 1122.71, "end": 1123.07, "word": " details", "probability": 0.81103515625}, {"start": 1123.07, "end": 1123.53, "word": " about", "probability": 0.89697265625}, {"start": 1123.53, "end": 1124.21, "word": " correlation", "probability": 0.9130859375}, {"start": 1124.21, "end": 1124.51, "word": " and", "probability": 0.94140625}, {"start": 1124.51, "end": 1124.81, "word": " regression.", "probability": 0.9541015625}, {"start": 1125.79, "end": 1126.07, "word": " So", "probability": 0.93115234375}, {"start": 1126.07, "end": 1126.43, "word": " these", "probability": 0.57177734375}, {"start": 1126.43, "end": 1126.99, "word": " are", "probability": 0.9384765625}, {"start": 1126.99, "end": 1127.25, "word": " the", "probability": 0.92236328125}, {"start": 1127.25, "end": 1127.69, "word": " objectives", "probability": 0.8330078125}, {"start": 1127.69, "end": 1128.53, "word": " of", "probability": 0.966796875}, {"start": 1128.53, "end": 1128.93, "word": " this", "probability": 0.94287109375}, {"start": 1128.93, "end": 1129.43, "word": " chapter.", "probability": 0.85546875}, {"start": 1130.57, "end": 1131.17, "word": " There", "probability": 0.79833984375}, {"start": 1131.17, "end": 1131.31, "word": " are", "probability": 0.935546875}, {"start": 1131.31, "end": 1131.61, "word": " some", "probability": 0.89501953125}, {"start": 1131.61, "end": 1131.99, "word": " basic", "probability": 0.966796875}, {"start": 1131.99, "end": 1132.49, "word": " definitions", "probability": 0.83056640625}, {"start": 1132.49, "end": 1132.87, "word": " before", "probability": 0.80322265625}, {"start": 1132.87, "end": 1133.07, "word": " we", "probability": 0.9599609375}, {"start": 1133.07, "end": 1133.43, "word": " start.", "probability": 0.92626953125}, {"start": 1134.33, "end": 1134.67, "word": " One", "probability": 0.92919921875}, {"start": 1134.67, "end": 1134.85, "word": " is", "probability": 0.94384765625}, {"start": 1134.85, "end": 1135.25, "word": " called", "probability": 0.90576171875}, {"start": 1135.25, "end": 1135.81, "word": " central", "probability": 0.69384765625}, {"start": 1135.81, "end": 1136.25, "word": " tendency.", "probability": 0.89599609375}, {"start": 1136.99, "end": 1137.27, "word": " What", "probability": 0.89306640625}, {"start": 1137.27, "end": 1137.41, "word": " do", "probability": 0.95361328125}, {"start": 1137.41, "end": 1137.43, "word": " you", "probability": 0.64501953125}, {"start": 1137.43, "end": 1137.55, "word": " mean", "probability": 0.96826171875}, {"start": 1137.55, "end": 1137.71, "word": " by", "probability": 0.970703125}, {"start": 1137.71, "end": 1138.15, "word": " central", "probability": 0.86669921875}, {"start": 1138.15, "end": 1138.45, "word": " tendency?", "probability": 0.9267578125}], "temperature": 1.0}, {"id": 42, "seek": 116543, "start": 1139.35, "end": 1165.43, "text": " Central tendency is the extent to which all data value group around a typical or numerical or central value. So we are looking for a point that in the center, I mean, the data points are gathered or collected around a middle point, and that middle point is called the central tendency. And the question is, how can we measure that value?", "tokens": [9701, 18187, 307, 264, 8396, 281, 597, 439, 1412, 2158, 1594, 926, 257, 7476, 420, 29054, 420, 5777, 2158, 13, 407, 321, 366, 1237, 337, 257, 935, 300, 294, 264, 3056, 11, 286, 914, 11, 264, 1412, 2793, 366, 13032, 420, 11087, 926, 257, 2808, 935, 11, 293, 300, 2808, 935, 307, 1219, 264, 5777, 18187, 13, 400, 264, 1168, 307, 11, 577, 393, 321, 3481, 300, 2158, 30], "avg_logprob": -0.14040178528853825, "compression_ratio": 1.7244897959183674, "no_speech_prob": 0.0, "words": [{"start": 1139.35, "end": 1139.87, "word": " Central", "probability": 0.669921875}, {"start": 1139.87, "end": 1140.25, "word": " tendency", "probability": 0.841796875}, {"start": 1140.25, "end": 1140.75, "word": " is", "probability": 0.9443359375}, {"start": 1140.75, "end": 1140.95, "word": " the", "probability": 0.91650390625}, {"start": 1140.95, "end": 1141.43, "word": " extent", "probability": 0.94140625}, {"start": 1141.43, "end": 1141.77, "word": " to", "probability": 0.95654296875}, {"start": 1141.77, "end": 1142.07, "word": " which", "probability": 0.951171875}, {"start": 1142.07, "end": 1142.37, "word": " all", "probability": 0.9267578125}, {"start": 1142.37, "end": 1142.89, "word": " data", "probability": 0.9453125}, {"start": 1142.89, "end": 1143.59, "word": " value", "probability": 0.77783203125}, {"start": 1143.59, "end": 1144.07, "word": " group", "probability": 0.67578125}, {"start": 1144.07, "end": 1144.73, "word": " around", "probability": 0.92919921875}, {"start": 1144.73, "end": 1144.99, "word": " a", "probability": 0.9521484375}, {"start": 1144.99, "end": 1145.27, "word": " typical", "probability": 0.85498046875}, {"start": 1145.27, "end": 1145.57, "word": " or", "probability": 0.88671875}, {"start": 1145.57, "end": 1145.93, "word": " numerical", "probability": 0.748046875}, {"start": 1145.93, "end": 1146.53, "word": " or", "probability": 0.93896484375}, {"start": 1146.53, "end": 1146.89, "word": " central", "probability": 0.69287109375}, {"start": 1146.89, "end": 1147.39, "word": " value.", "probability": 0.9775390625}, {"start": 1147.69, "end": 1148.03, "word": " So", "probability": 0.931640625}, {"start": 1148.03, "end": 1148.77, "word": " we", "probability": 0.708984375}, {"start": 1148.77, "end": 1148.89, "word": " are", "probability": 0.9130859375}, {"start": 1148.89, "end": 1149.13, "word": " looking", "probability": 0.9140625}, {"start": 1149.13, "end": 1149.43, "word": " for", "probability": 0.95068359375}, {"start": 1149.43, "end": 1149.63, "word": " a", "probability": 0.994140625}, {"start": 1149.63, "end": 1149.97, "word": " point", "probability": 0.97265625}, {"start": 1149.97, "end": 1151.05, "word": " that", "probability": 0.8046875}, {"start": 1151.05, "end": 1151.45, "word": " in", "probability": 0.5390625}, {"start": 1151.45, "end": 1151.61, "word": " the", "probability": 0.92236328125}, {"start": 1151.61, "end": 1151.93, "word": " center,", "probability": 0.8251953125}, {"start": 1152.19, "end": 1152.33, "word": " I", "probability": 0.97021484375}, {"start": 1152.33, "end": 1152.51, "word": " mean,", "probability": 0.9658203125}, {"start": 1152.81, "end": 1153.03, "word": " the", "probability": 0.92138671875}, {"start": 1153.03, "end": 1153.31, "word": " data", "probability": 0.94091796875}, {"start": 1153.31, "end": 1153.79, "word": " points", "probability": 0.9267578125}, {"start": 1153.79, "end": 1154.51, "word": " are", "probability": 0.94287109375}, {"start": 1154.51, "end": 1154.95, "word": " gathered", "probability": 0.79443359375}, {"start": 1154.95, "end": 1156.49, "word": " or", "probability": 0.90576171875}, {"start": 1156.49, "end": 1157.03, "word": " collected", "probability": 0.80029296875}, {"start": 1157.03, "end": 1158.23, "word": " around", "probability": 0.9189453125}, {"start": 1158.23, "end": 1158.87, "word": " a", "probability": 0.98193359375}, {"start": 1158.87, "end": 1159.05, "word": " middle", "probability": 0.9482421875}, {"start": 1159.05, "end": 1159.49, "word": " point,", "probability": 0.94580078125}, {"start": 1159.71, "end": 1159.97, "word": " and", "probability": 0.93359375}, {"start": 1159.97, "end": 1160.23, "word": " that", "probability": 0.9345703125}, {"start": 1160.23, "end": 1160.47, "word": " middle", "probability": 0.94873046875}, {"start": 1160.47, "end": 1160.71, "word": " point", "probability": 0.96728515625}, {"start": 1160.71, "end": 1160.95, "word": " is", "probability": 0.94140625}, {"start": 1160.95, "end": 1161.33, "word": " called", "probability": 0.89892578125}, {"start": 1161.33, "end": 1161.67, "word": " the", "probability": 0.9091796875}, {"start": 1161.67, "end": 1161.97, "word": " central", "probability": 0.87890625}, {"start": 1161.97, "end": 1162.51, "word": " tendency.", "probability": 0.9375}, {"start": 1162.87, "end": 1163.01, "word": " And", "probability": 0.76220703125}, {"start": 1163.01, "end": 1163.11, "word": " the", "probability": 0.92041015625}, {"start": 1163.11, "end": 1163.39, "word": " question", "probability": 0.91796875}, {"start": 1163.39, "end": 1163.63, "word": " is,", "probability": 0.94921875}, {"start": 1163.93, "end": 1164.05, "word": " how", "probability": 0.88330078125}, {"start": 1164.05, "end": 1164.31, "word": " can", "probability": 0.943359375}, {"start": 1164.31, "end": 1164.45, "word": " we", "probability": 0.96435546875}, {"start": 1164.45, "end": 1164.77, "word": " measure", "probability": 0.85986328125}, {"start": 1164.77, "end": 1165.05, "word": " that", "probability": 0.92919921875}, {"start": 1165.05, "end": 1165.43, "word": " value?", "probability": 0.9736328125}], "temperature": 1.0}, {"id": 43, "seek": 119406, "start": 1166.06, "end": 1194.06, "text": " We'll talk in details about mean, median, and mode in a few minutes. So the central tendency, in this case, the data values grouped around a typical or central value. Is it clear? So we have data set, large data set. Then these points are gathered or grouped around a middle point, and this point is called central tendency, and it can be measured by using", "tokens": [492, 603, 751, 294, 4365, 466, 914, 11, 26779, 11, 293, 4391, 294, 257, 1326, 2077, 13, 407, 264, 5777, 18187, 11, 294, 341, 1389, 11, 264, 1412, 4190, 41877, 926, 257, 7476, 420, 5777, 2158, 13, 1119, 309, 1850, 30, 407, 321, 362, 1412, 992, 11, 2416, 1412, 992, 13, 1396, 613, 2793, 366, 13032, 420, 41877, 926, 257, 2808, 935, 11, 293, 341, 935, 307, 1219, 5777, 18187, 11, 293, 309, 393, 312, 12690, 538, 1228], "avg_logprob": -0.17672073501574842, "compression_ratio": 1.708133971291866, "no_speech_prob": 0.0, "words": [{"start": 1166.06, "end": 1166.4, "word": " We'll", "probability": 0.6492919921875}, {"start": 1166.4, "end": 1166.66, "word": " talk", "probability": 0.9013671875}, {"start": 1166.66, "end": 1166.94, "word": " in", "probability": 0.9365234375}, {"start": 1166.94, "end": 1167.32, "word": " details", "probability": 0.76416015625}, {"start": 1167.32, "end": 1167.78, "word": " about", "probability": 0.90966796875}, {"start": 1167.78, "end": 1168.42, "word": " mean,", "probability": 0.87744140625}, {"start": 1168.92, "end": 1169.3, "word": " median,", "probability": 0.953125}, {"start": 1169.44, "end": 1169.58, "word": " and", "probability": 0.94580078125}, {"start": 1169.58, "end": 1169.9, "word": " mode", "probability": 0.8779296875}, {"start": 1169.9, "end": 1171.04, "word": " in", "probability": 0.8134765625}, {"start": 1171.04, "end": 1171.14, "word": " a", "probability": 0.99365234375}, {"start": 1171.14, "end": 1171.24, "word": " few", "probability": 0.908203125}, {"start": 1171.24, "end": 1171.56, "word": " minutes.", "probability": 0.927734375}, {"start": 1172.26, "end": 1172.5, "word": " So", "probability": 0.9384765625}, {"start": 1172.5, "end": 1172.62, "word": " the", "probability": 0.274169921875}, {"start": 1172.62, "end": 1172.82, "word": " central", "probability": 0.53271484375}, {"start": 1172.82, "end": 1173.12, "word": " tendency,", "probability": 0.91259765625}, {"start": 1173.8, "end": 1174.4, "word": " in", "probability": 0.94970703125}, {"start": 1174.4, "end": 1174.64, "word": " this", "probability": 0.94677734375}, {"start": 1174.64, "end": 1174.88, "word": " case,", "probability": 0.90576171875}, {"start": 1174.98, "end": 1175.06, "word": " the", "probability": 0.9228515625}, {"start": 1175.06, "end": 1175.28, "word": " data", "probability": 0.953125}, {"start": 1175.28, "end": 1175.66, "word": " values", "probability": 0.58154296875}, {"start": 1175.66, "end": 1176.08, "word": " grouped", "probability": 0.55224609375}, {"start": 1176.08, "end": 1176.72, "word": " around", "probability": 0.935546875}, {"start": 1176.72, "end": 1176.98, "word": " a", "probability": 0.96240234375}, {"start": 1176.98, "end": 1177.28, "word": " typical", "probability": 0.900390625}, {"start": 1177.28, "end": 1177.58, "word": " or", "probability": 0.96533203125}, {"start": 1177.58, "end": 1177.96, "word": " central", "probability": 0.92236328125}, {"start": 1177.96, "end": 1178.42, "word": " value.", "probability": 0.97412109375}, {"start": 1179.42, "end": 1179.98, "word": " Is", "probability": 0.9404296875}, {"start": 1179.98, "end": 1180.08, "word": " it", "probability": 0.78662109375}, {"start": 1180.08, "end": 1180.34, "word": " clear?", "probability": 0.89013671875}, {"start": 1180.68, "end": 1181.22, "word": " So", "probability": 0.9384765625}, {"start": 1181.22, "end": 1181.38, "word": " we", "probability": 0.92822265625}, {"start": 1181.38, "end": 1181.52, "word": " have", "probability": 0.94873046875}, {"start": 1181.52, "end": 1181.8, "word": " data", "probability": 0.364013671875}, {"start": 1181.8, "end": 1182.12, "word": " set,", "probability": 0.89111328125}, {"start": 1182.32, "end": 1182.6, "word": " large", "probability": 0.95556640625}, {"start": 1182.6, "end": 1182.86, "word": " data", "probability": 0.837890625}, {"start": 1182.86, "end": 1183.24, "word": " set.", "probability": 0.95458984375}, {"start": 1184.16, "end": 1184.38, "word": " Then", "probability": 0.8818359375}, {"start": 1184.38, "end": 1184.72, "word": " these", "probability": 0.7998046875}, {"start": 1184.72, "end": 1185.14, "word": " points", "probability": 0.908203125}, {"start": 1185.14, "end": 1185.44, "word": " are", "probability": 0.9462890625}, {"start": 1185.44, "end": 1185.82, "word": " gathered", "probability": 0.7705078125}, {"start": 1185.82, "end": 1186.3, "word": " or", "probability": 0.94384765625}, {"start": 1186.3, "end": 1186.76, "word": " grouped", "probability": 0.82373046875}, {"start": 1186.76, "end": 1187.34, "word": " around", "probability": 0.9296875}, {"start": 1187.34, "end": 1187.86, "word": " a", "probability": 0.98828125}, {"start": 1187.86, "end": 1188.06, "word": " middle", "probability": 0.9404296875}, {"start": 1188.06, "end": 1188.5, "word": " point,", "probability": 0.94384765625}, {"start": 1188.7, "end": 1188.98, "word": " and", "probability": 0.93798828125}, {"start": 1188.98, "end": 1189.2, "word": " this", "probability": 0.93310546875}, {"start": 1189.2, "end": 1189.46, "word": " point", "probability": 0.94580078125}, {"start": 1189.46, "end": 1189.68, "word": " is", "probability": 0.86279296875}, {"start": 1189.68, "end": 1190.18, "word": " called", "probability": 0.91455078125}, {"start": 1190.18, "end": 1191.44, "word": " central", "probability": 0.8662109375}, {"start": 1191.44, "end": 1191.94, "word": " tendency,", "probability": 0.94580078125}, {"start": 1192.26, "end": 1192.6, "word": " and", "probability": 0.9345703125}, {"start": 1192.6, "end": 1192.76, "word": " it", "probability": 0.9462890625}, {"start": 1192.76, "end": 1192.98, "word": " can", "probability": 0.94775390625}, {"start": 1192.98, "end": 1193.16, "word": " be", "probability": 0.95703125}, {"start": 1193.16, "end": 1193.48, "word": " measured", "probability": 0.8505859375}, {"start": 1193.48, "end": 1193.72, "word": " by", "probability": 0.97021484375}, {"start": 1193.72, "end": 1194.06, "word": " using", "probability": 0.93505859375}], "temperature": 1.0}, {"id": 44, "seek": 122118, "start": 1195.56, "end": 1221.18, "text": " mean, which is the most common one, median and the moon. Next is the variation, which is the amount of dispersion. Variation is the amount of dispersion or scattering of values. And we'll use, for example, range, variance or standard deviation in order to compute the variation. Finally,", "tokens": [914, 11, 597, 307, 264, 881, 2689, 472, 11, 26779, 293, 264, 7135, 13, 3087, 307, 264, 12990, 11, 597, 307, 264, 2372, 295, 24631, 313, 13, 32511, 399, 307, 264, 2372, 295, 24631, 313, 420, 42314, 295, 4190, 13, 400, 321, 603, 764, 11, 337, 1365, 11, 3613, 11, 21977, 420, 3832, 25163, 294, 1668, 281, 14722, 264, 12990, 13, 6288, 11], "avg_logprob": -0.19165039295330644, "compression_ratio": 1.7142857142857142, "no_speech_prob": 0.0, "words": [{"start": 1195.56, "end": 1196.12, "word": " mean,", "probability": 0.275390625}, {"start": 1196.42, "end": 1196.7, "word": " which", "probability": 0.9375}, {"start": 1196.7, "end": 1196.82, "word": " is", "probability": 0.9521484375}, {"start": 1196.82, "end": 1196.98, "word": " the", "probability": 0.91162109375}, {"start": 1196.98, "end": 1197.26, "word": " most", "probability": 0.9140625}, {"start": 1197.26, "end": 1197.64, "word": " common", "probability": 0.88037109375}, {"start": 1197.64, "end": 1198.0, "word": " one,", "probability": 0.93115234375}, {"start": 1198.44, "end": 1199.06, "word": " median", "probability": 0.90087890625}, {"start": 1199.06, "end": 1199.62, "word": " and", "probability": 0.71923828125}, {"start": 1199.62, "end": 1199.74, "word": " the", "probability": 0.7109375}, {"start": 1199.74, "end": 1199.96, "word": " moon.", "probability": 0.42529296875}, {"start": 1201.02, "end": 1201.52, "word": " Next", "probability": 0.90576171875}, {"start": 1201.52, "end": 1202.34, "word": " is", "probability": 0.7802734375}, {"start": 1202.34, "end": 1202.48, "word": " the", "probability": 0.9013671875}, {"start": 1202.48, "end": 1202.86, "word": " variation,", "probability": 0.90283203125}, {"start": 1203.22, "end": 1203.6, "word": " which", "probability": 0.94287109375}, {"start": 1203.6, "end": 1203.78, "word": " is", "probability": 0.943359375}, {"start": 1203.78, "end": 1203.94, "word": " the", "probability": 0.9150390625}, {"start": 1203.94, "end": 1204.26, "word": " amount", "probability": 0.888671875}, {"start": 1204.26, "end": 1204.48, "word": " of", "probability": 0.97021484375}, {"start": 1204.48, "end": 1205.1, "word": " dispersion.", "probability": 0.789794921875}, {"start": 1206.68, "end": 1207.4, "word": " Variation", "probability": 0.92138671875}, {"start": 1207.4, "end": 1207.8, "word": " is", "probability": 0.9404296875}, {"start": 1207.8, "end": 1207.94, "word": " the", "probability": 0.90966796875}, {"start": 1207.94, "end": 1208.24, "word": " amount", "probability": 0.89111328125}, {"start": 1208.24, "end": 1208.4, "word": " of", "probability": 0.9658203125}, {"start": 1208.4, "end": 1209.42, "word": " dispersion", "probability": 0.939208984375}, {"start": 1209.42, "end": 1209.8, "word": " or", "probability": 0.8759765625}, {"start": 1209.8, "end": 1210.44, "word": " scattering", "probability": 0.8564453125}, {"start": 1210.44, "end": 1210.82, "word": " of", "probability": 0.96435546875}, {"start": 1210.82, "end": 1211.26, "word": " values.", "probability": 0.97314453125}, {"start": 1212.8, "end": 1213.1, "word": " And", "probability": 0.91162109375}, {"start": 1213.1, "end": 1213.4, "word": " we'll", "probability": 0.7034912109375}, {"start": 1213.4, "end": 1213.68, "word": " use,", "probability": 0.86474609375}, {"start": 1213.78, "end": 1213.9, "word": " for", "probability": 0.94970703125}, {"start": 1213.9, "end": 1214.32, "word": " example,", "probability": 0.970703125}, {"start": 1214.76, "end": 1215.28, "word": " range,", "probability": 0.81787109375}, {"start": 1216.54, "end": 1217.1, "word": " variance", "probability": 0.794921875}, {"start": 1217.1, "end": 1217.32, "word": " or", "probability": 0.64599609375}, {"start": 1217.32, "end": 1217.68, "word": " standard", "probability": 0.9013671875}, {"start": 1217.68, "end": 1218.12, "word": " deviation", "probability": 0.9208984375}, {"start": 1218.12, "end": 1218.4, "word": " in", "probability": 0.86669921875}, {"start": 1218.4, "end": 1218.6, "word": " order", "probability": 0.9140625}, {"start": 1218.6, "end": 1218.84, "word": " to", "probability": 0.97119140625}, {"start": 1218.84, "end": 1219.22, "word": " compute", "probability": 0.89892578125}, {"start": 1219.22, "end": 1219.46, "word": " the", "probability": 0.8994140625}, {"start": 1219.46, "end": 1219.8, "word": " variation.", "probability": 0.87646484375}, {"start": 1220.66, "end": 1221.18, "word": " Finally,", "probability": 0.681640625}], "temperature": 1.0}, {"id": 45, "seek": 125022, "start": 1222.56, "end": 1250.22, "text": " We have data, and my question is, what's the shape of the data? So the shape is the pattern of distribution of values from the lowest value to the highest. So that's the three definitions we need to know before we start. So we'll start with the easiest one, measures of central tendency. As I mentioned, there are three measures.", "tokens": [492, 362, 1412, 11, 293, 452, 1168, 307, 11, 437, 311, 264, 3909, 295, 264, 1412, 30, 407, 264, 3909, 307, 264, 5102, 295, 7316, 295, 4190, 490, 264, 12437, 2158, 281, 264, 6343, 13, 407, 300, 311, 264, 1045, 21988, 321, 643, 281, 458, 949, 321, 722, 13, 407, 321, 603, 722, 365, 264, 12889, 472, 11, 8000, 295, 5777, 18187, 13, 1018, 286, 2835, 11, 456, 366, 1045, 8000, 13], "avg_logprob": -0.13805650195030317, "compression_ratio": 1.6582914572864322, "no_speech_prob": 0.0, "words": [{"start": 1222.56, "end": 1222.78, "word": " We", "probability": 0.83349609375}, {"start": 1222.78, "end": 1222.96, "word": " have", "probability": 0.94921875}, {"start": 1222.96, "end": 1223.3, "word": " data,", "probability": 0.94580078125}, {"start": 1223.86, "end": 1224.26, "word": " and", "probability": 0.93408203125}, {"start": 1224.26, "end": 1224.4, "word": " my", "probability": 0.970703125}, {"start": 1224.4, "end": 1224.68, "word": " question", "probability": 0.92431640625}, {"start": 1224.68, "end": 1225.0, "word": " is,", "probability": 0.94970703125}, {"start": 1225.14, "end": 1225.44, "word": " what's", "probability": 0.842529296875}, {"start": 1225.44, "end": 1225.62, "word": " the", "probability": 0.90576171875}, {"start": 1225.62, "end": 1225.98, "word": " shape", "probability": 0.9267578125}, {"start": 1225.98, "end": 1226.16, "word": " of", "probability": 0.97119140625}, {"start": 1226.16, "end": 1226.3, "word": " the", "probability": 0.90673828125}, {"start": 1226.3, "end": 1226.52, "word": " data?", "probability": 0.90283203125}, {"start": 1227.04, "end": 1227.22, "word": " So", "probability": 0.91552734375}, {"start": 1227.22, "end": 1227.38, "word": " the", "probability": 0.796875}, {"start": 1227.38, "end": 1227.68, "word": " shape", "probability": 0.9130859375}, {"start": 1227.68, "end": 1228.86, "word": " is", "probability": 0.92822265625}, {"start": 1228.86, "end": 1229.04, "word": " the", "probability": 0.91943359375}, {"start": 1229.04, "end": 1229.3, "word": " pattern", "probability": 0.87255859375}, {"start": 1229.3, "end": 1229.46, "word": " of", "probability": 0.87890625}, {"start": 1229.46, "end": 1229.92, "word": " distribution", "probability": 0.89111328125}, {"start": 1229.92, "end": 1230.24, "word": " of", "probability": 0.966796875}, {"start": 1230.24, "end": 1230.66, "word": " values", "probability": 0.96923828125}, {"start": 1230.66, "end": 1231.1, "word": " from", "probability": 0.84716796875}, {"start": 1231.1, "end": 1231.56, "word": " the", "probability": 0.9150390625}, {"start": 1231.56, "end": 1231.92, "word": " lowest", "probability": 0.97314453125}, {"start": 1231.92, "end": 1232.66, "word": " value", "probability": 0.966796875}, {"start": 1232.66, "end": 1233.74, "word": " to", "probability": 0.88525390625}, {"start": 1233.74, "end": 1233.88, "word": " the", "probability": 0.91064453125}, {"start": 1233.88, "end": 1234.2, "word": " highest.", "probability": 0.9462890625}, {"start": 1234.58, "end": 1235.22, "word": " So", "probability": 0.94921875}, {"start": 1235.22, "end": 1235.7, "word": " that's", "probability": 0.95751953125}, {"start": 1235.7, "end": 1236.92, "word": " the", "probability": 0.91064453125}, {"start": 1236.92, "end": 1237.48, "word": " three", "probability": 0.92529296875}, {"start": 1237.48, "end": 1237.98, "word": " definitions", "probability": 0.8251953125}, {"start": 1237.98, "end": 1238.3, "word": " we", "probability": 0.9599609375}, {"start": 1238.3, "end": 1238.62, "word": " need", "probability": 0.923828125}, {"start": 1238.62, "end": 1239.22, "word": " to", "probability": 0.958984375}, {"start": 1239.22, "end": 1239.4, "word": " know", "probability": 0.888671875}, {"start": 1239.4, "end": 1239.66, "word": " before", "probability": 0.86474609375}, {"start": 1239.66, "end": 1239.88, "word": " we", "probability": 0.955078125}, {"start": 1239.88, "end": 1240.24, "word": " start.", "probability": 0.9267578125}, {"start": 1241.5, "end": 1241.8, "word": " So", "probability": 0.95166015625}, {"start": 1241.8, "end": 1241.96, "word": " we'll", "probability": 0.5927734375}, {"start": 1241.96, "end": 1242.26, "word": " start", "probability": 0.92431640625}, {"start": 1242.26, "end": 1242.6, "word": " with", "probability": 0.89599609375}, {"start": 1242.6, "end": 1244.32, "word": " the", "probability": 0.9111328125}, {"start": 1244.32, "end": 1244.58, "word": " easiest", "probability": 0.89892578125}, {"start": 1244.58, "end": 1245.0, "word": " one,", "probability": 0.92138671875}, {"start": 1245.74, "end": 1246.16, "word": " measures", "probability": 0.80322265625}, {"start": 1246.16, "end": 1246.58, "word": " of", "probability": 0.9697265625}, {"start": 1246.58, "end": 1247.02, "word": " central", "probability": 0.91796875}, {"start": 1247.02, "end": 1247.54, "word": " tendency.", "probability": 0.912109375}, {"start": 1247.88, "end": 1248.2, "word": " As", "probability": 0.97119140625}, {"start": 1248.2, "end": 1248.32, "word": " I", "probability": 0.99853515625}, {"start": 1248.32, "end": 1248.68, "word": " mentioned,", "probability": 0.83154296875}, {"start": 1249.16, "end": 1249.42, "word": " there", "probability": 0.90771484375}, {"start": 1249.42, "end": 1249.58, "word": " are", "probability": 0.93896484375}, {"start": 1249.58, "end": 1249.82, "word": " three", "probability": 0.93017578125}, {"start": 1249.82, "end": 1250.22, "word": " measures.", "probability": 0.8310546875}], "temperature": 1.0}, {"id": 46, "seek": 127477, "start": 1252.73, "end": 1274.77, "text": " median and moon. And our goal or we have two goals actually. We have to know how to compute these measures. Number two, which one is better? The mean or the median or the moon? So the mean sometimes called the arithmetic mean.", "tokens": [26779, 293, 7135, 13, 400, 527, 3387, 420, 321, 362, 732, 5493, 767, 13, 492, 362, 281, 458, 577, 281, 14722, 613, 8000, 13, 5118, 732, 11, 597, 472, 307, 1101, 30, 440, 914, 420, 264, 26779, 420, 264, 7135, 30, 407, 264, 914, 2171, 1219, 264, 42973, 914, 13], "avg_logprob": -0.20450368231418087, "compression_ratio": 1.4836601307189543, "no_speech_prob": 0.0, "words": [{"start": 1252.73, "end": 1253.23, "word": " median", "probability": 0.53564453125}, {"start": 1253.23, "end": 1253.73, "word": " and", "probability": 0.78173828125}, {"start": 1253.73, "end": 1254.01, "word": " moon.", "probability": 0.572265625}, {"start": 1254.53, "end": 1254.89, "word": " And", "probability": 0.8154296875}, {"start": 1254.89, "end": 1255.11, "word": " our", "probability": 0.89111328125}, {"start": 1255.11, "end": 1255.43, "word": " goal", "probability": 0.9677734375}, {"start": 1255.43, "end": 1256.29, "word": " or", "probability": 0.318603515625}, {"start": 1256.29, "end": 1256.57, "word": " we", "probability": 0.8623046875}, {"start": 1256.57, "end": 1256.71, "word": " have", "probability": 0.951171875}, {"start": 1256.71, "end": 1256.85, "word": " two", "probability": 0.9052734375}, {"start": 1256.85, "end": 1257.07, "word": " goals", "probability": 0.943359375}, {"start": 1257.07, "end": 1257.51, "word": " actually.", "probability": 0.82763671875}, {"start": 1257.79, "end": 1257.89, "word": " We", "probability": 0.927734375}, {"start": 1257.89, "end": 1258.13, "word": " have", "probability": 0.916015625}, {"start": 1258.13, "end": 1258.27, "word": " to", "probability": 0.96533203125}, {"start": 1258.27, "end": 1258.37, "word": " know", "probability": 0.6435546875}, {"start": 1258.37, "end": 1258.51, "word": " how", "probability": 0.89013671875}, {"start": 1258.51, "end": 1258.65, "word": " to", "probability": 0.97265625}, {"start": 1258.65, "end": 1259.09, "word": " compute", "probability": 0.8984375}, {"start": 1259.09, "end": 1260.63, "word": " these", "probability": 0.83642578125}, {"start": 1260.63, "end": 1261.13, "word": " measures.", "probability": 0.8427734375}, {"start": 1261.79, "end": 1261.97, "word": " Number", "probability": 0.85986328125}, {"start": 1261.97, "end": 1262.29, "word": " two,", "probability": 0.86376953125}, {"start": 1263.27, "end": 1263.57, "word": " which", "probability": 0.94970703125}, {"start": 1263.57, "end": 1263.77, "word": " one", "probability": 0.9287109375}, {"start": 1263.77, "end": 1263.91, "word": " is", "probability": 0.94970703125}, {"start": 1263.91, "end": 1264.19, "word": " better?", "probability": 0.89794921875}, {"start": 1264.93, "end": 1265.15, "word": " The", "probability": 0.85595703125}, {"start": 1265.15, "end": 1265.35, "word": " mean", "probability": 0.9775390625}, {"start": 1265.35, "end": 1265.57, "word": " or", "probability": 0.849609375}, {"start": 1265.57, "end": 1265.75, "word": " the", "probability": 0.92529296875}, {"start": 1265.75, "end": 1266.01, "word": " median", "probability": 0.93505859375}, {"start": 1266.01, "end": 1266.23, "word": " or", "probability": 0.9404296875}, {"start": 1266.23, "end": 1266.39, "word": " the", "probability": 0.92041015625}, {"start": 1266.39, "end": 1266.55, "word": " moon?", "probability": 0.8935546875}, {"start": 1271.31, "end": 1271.49, "word": " So", "probability": 0.8583984375}, {"start": 1271.49, "end": 1271.63, "word": " the", "probability": 0.76220703125}, {"start": 1271.63, "end": 1271.85, "word": " mean", "probability": 0.97265625}, {"start": 1271.85, "end": 1273.25, "word": " sometimes", "probability": 0.556640625}, {"start": 1273.25, "end": 1273.73, "word": " called", "probability": 0.6728515625}, {"start": 1273.73, "end": 1273.95, "word": " the", "probability": 0.87890625}, {"start": 1273.95, "end": 1274.39, "word": " arithmetic", "probability": 0.927734375}, {"start": 1274.39, "end": 1274.77, "word": " mean.", "probability": 0.97021484375}], "temperature": 1.0}, {"id": 47, "seek": 130324, "start": 1275.68, "end": 1303.24, "text": " Or in general, just say the mean. So often we use the mean. And the mean is just sum of the values divided by the sample size. So it's straightforward. We have, for example, three data points. And your goal is to find the average or the mean of these points.", "tokens": [1610, 294, 2674, 11, 445, 584, 264, 914, 13, 407, 2049, 321, 764, 264, 914, 13, 400, 264, 914, 307, 445, 2408, 295, 264, 4190, 6666, 538, 264, 6889, 2744, 13, 407, 309, 311, 15325, 13, 492, 362, 11, 337, 1365, 11, 1045, 1412, 2793, 13, 400, 428, 3387, 307, 281, 915, 264, 4274, 420, 264, 914, 295, 613, 2793, 13], "avg_logprob": -0.1500756000318835, "compression_ratio": 1.5696969696969696, "no_speech_prob": 0.0, "words": [{"start": 1275.68, "end": 1276.04, "word": " Or", "probability": 0.287109375}, {"start": 1276.04, "end": 1276.86, "word": " in", "probability": 0.86572265625}, {"start": 1276.86, "end": 1277.22, "word": " general,", "probability": 0.9072265625}, {"start": 1277.4, "end": 1277.6, "word": " just", "probability": 0.9091796875}, {"start": 1277.6, "end": 1278.06, "word": " say", "probability": 0.94384765625}, {"start": 1278.06, "end": 1278.64, "word": " the", "probability": 0.7998046875}, {"start": 1278.64, "end": 1278.82, "word": " mean.", "probability": 0.96923828125}, {"start": 1279.06, "end": 1279.18, "word": " So", "probability": 0.9599609375}, {"start": 1279.18, "end": 1279.48, "word": " often", "probability": 0.78759765625}, {"start": 1279.48, "end": 1279.66, "word": " we", "probability": 0.67529296875}, {"start": 1279.66, "end": 1280.02, "word": " use", "probability": 0.873046875}, {"start": 1280.02, "end": 1280.98, "word": " the", "probability": 0.91015625}, {"start": 1280.98, "end": 1281.18, "word": " mean.", "probability": 0.962890625}, {"start": 1281.68, "end": 1281.86, "word": " And", "probability": 0.7548828125}, {"start": 1281.86, "end": 1282.0, "word": " the", "probability": 0.92333984375}, {"start": 1282.0, "end": 1282.2, "word": " mean", "probability": 0.96826171875}, {"start": 1282.2, "end": 1282.46, "word": " is", "probability": 0.94287109375}, {"start": 1282.46, "end": 1283.36, "word": " just", "probability": 0.89111328125}, {"start": 1283.36, "end": 1286.86, "word": " sum", "probability": 0.5849609375}, {"start": 1286.86, "end": 1287.08, "word": " of", "probability": 0.9619140625}, {"start": 1287.08, "end": 1287.22, "word": " the", "probability": 0.91845703125}, {"start": 1287.22, "end": 1287.66, "word": " values", "probability": 0.97265625}, {"start": 1287.66, "end": 1289.74, "word": " divided", "probability": 0.55224609375}, {"start": 1289.74, "end": 1290.12, "word": " by", "probability": 0.96923828125}, {"start": 1290.12, "end": 1290.8, "word": " the", "probability": 0.92822265625}, {"start": 1290.8, "end": 1291.24, "word": " sample", "probability": 0.8935546875}, {"start": 1291.24, "end": 1291.72, "word": " size.", "probability": 0.86328125}, {"start": 1292.38, "end": 1292.7, "word": " So", "probability": 0.9130859375}, {"start": 1292.7, "end": 1293.22, "word": " it's", "probability": 0.718505859375}, {"start": 1293.22, "end": 1293.7, "word": " straightforward.", "probability": 0.880859375}, {"start": 1294.22, "end": 1294.42, "word": " We", "probability": 0.951171875}, {"start": 1294.42, "end": 1294.68, "word": " have,", "probability": 0.94873046875}, {"start": 1295.14, "end": 1295.22, "word": " for", "probability": 0.94873046875}, {"start": 1295.22, "end": 1295.58, "word": " example,", "probability": 0.974609375}, {"start": 1296.12, "end": 1296.4, "word": " three", "probability": 0.9130859375}, {"start": 1296.4, "end": 1296.8, "word": " data", "probability": 0.9501953125}, {"start": 1296.8, "end": 1297.16, "word": " points.", "probability": 0.88525390625}, {"start": 1298.28, "end": 1298.74, "word": " And", "probability": 0.93408203125}, {"start": 1298.74, "end": 1298.96, "word": " your", "probability": 0.87744140625}, {"start": 1298.96, "end": 1299.18, "word": " goal", "probability": 0.974609375}, {"start": 1299.18, "end": 1299.34, "word": " is", "probability": 0.94580078125}, {"start": 1299.34, "end": 1299.48, "word": " to", "probability": 0.9716796875}, {"start": 1299.48, "end": 1299.88, "word": " find", "probability": 0.8916015625}, {"start": 1299.88, "end": 1301.52, "word": " the", "probability": 0.916015625}, {"start": 1301.52, "end": 1301.9, "word": " average", "probability": 0.83203125}, {"start": 1301.9, "end": 1302.18, "word": " or", "probability": 0.8388671875}, {"start": 1302.18, "end": 1302.34, "word": " the", "probability": 0.9306640625}, {"start": 1302.34, "end": 1302.52, "word": " mean", "probability": 0.97509765625}, {"start": 1302.52, "end": 1302.66, "word": " of", "probability": 0.95947265625}, {"start": 1302.66, "end": 1302.84, "word": " these", "probability": 0.84814453125}, {"start": 1302.84, "end": 1303.24, "word": " points.", "probability": 0.9384765625}], "temperature": 1.0}, {"id": 48, "seek": 133151, "start": 1304.27, "end": 1331.51, "text": " They mean it's just some of these values divided by the sample size. So for example, if we have a data X1, X2, X3 up to Xn. So the average is denoted by X bar. This one is pronounced as X bar and X bar is just sum of Xi. It is summation, you know this symbol, summation of sigma, summation of Xi and I goes from one to N.", "tokens": [814, 914, 309, 311, 445, 512, 295, 613, 4190, 6666, 538, 264, 6889, 2744, 13, 407, 337, 1365, 11, 498, 321, 362, 257, 1412, 1783, 16, 11, 1783, 17, 11, 1783, 18, 493, 281, 1783, 77, 13, 407, 264, 4274, 307, 1441, 23325, 538, 1783, 2159, 13, 639, 472, 307, 23155, 382, 1783, 2159, 293, 1783, 2159, 307, 445, 2408, 295, 15712, 13, 467, 307, 28811, 11, 291, 458, 341, 5986, 11, 28811, 295, 12771, 11, 28811, 295, 15712, 293, 286, 1709, 490, 472, 281, 426, 13], "avg_logprob": -0.25142044946551323, "compression_ratio": 1.5480769230769231, "no_speech_prob": 0.0, "words": [{"start": 1304.27, "end": 1304.55, "word": " They", "probability": 0.238037109375}, {"start": 1304.55, "end": 1304.71, "word": " mean", "probability": 0.9423828125}, {"start": 1304.71, "end": 1304.85, "word": " it's", "probability": 0.6510009765625}, {"start": 1304.85, "end": 1305.13, "word": " just", "probability": 0.92236328125}, {"start": 1305.13, "end": 1305.89, "word": " some", "probability": 0.490478515625}, {"start": 1305.89, "end": 1306.05, "word": " of", "probability": 0.97314453125}, {"start": 1306.05, "end": 1306.25, "word": " these", "probability": 0.8564453125}, {"start": 1306.25, "end": 1306.71, "word": " values", "probability": 0.9697265625}, {"start": 1306.71, "end": 1307.81, "word": " divided", "probability": 0.4609375}, {"start": 1307.81, "end": 1308.05, "word": " by", "probability": 0.96826171875}, {"start": 1308.05, "end": 1308.25, "word": " the", "probability": 0.89599609375}, {"start": 1308.25, "end": 1308.49, "word": " sample", "probability": 0.74365234375}, {"start": 1308.49, "end": 1308.95, "word": " size.", "probability": 0.85595703125}, {"start": 1309.65, "end": 1309.93, "word": " So", "probability": 0.86083984375}, {"start": 1309.93, "end": 1310.23, "word": " for", "probability": 0.58203125}, {"start": 1310.23, "end": 1310.49, "word": " example,", "probability": 0.97412109375}, {"start": 1310.57, "end": 1310.71, "word": " if", "probability": 0.953125}, {"start": 1310.71, "end": 1310.85, "word": " we", "probability": 0.95654296875}, {"start": 1310.85, "end": 1311.05, "word": " have", "probability": 0.9443359375}, {"start": 1311.05, "end": 1311.13, "word": " a", "probability": 0.65087890625}, {"start": 1311.13, "end": 1311.41, "word": " data", "probability": 0.9443359375}, {"start": 1311.41, "end": 1311.85, "word": " X1,", "probability": 0.7138671875}, {"start": 1311.95, "end": 1312.23, "word": " X2,", "probability": 0.98876953125}, {"start": 1312.37, "end": 1312.77, "word": " X3", "probability": 0.9912109375}, {"start": 1312.77, "end": 1312.99, "word": " up", "probability": 0.6904296875}, {"start": 1312.99, "end": 1313.17, "word": " to", "probability": 0.966796875}, {"start": 1313.17, "end": 1313.67, "word": " Xn.", "probability": 0.722412109375}, {"start": 1314.11, "end": 1314.57, "word": " So", "probability": 0.935546875}, {"start": 1314.57, "end": 1314.71, "word": " the", "probability": 0.80517578125}, {"start": 1314.71, "end": 1315.15, "word": " average", "probability": 0.7783203125}, {"start": 1315.15, "end": 1317.11, "word": " is", "probability": 0.923828125}, {"start": 1317.11, "end": 1317.49, "word": " denoted", "probability": 0.975830078125}, {"start": 1317.49, "end": 1317.83, "word": " by", "probability": 0.97216796875}, {"start": 1317.83, "end": 1318.07, "word": " X", "probability": 0.95263671875}, {"start": 1318.07, "end": 1318.31, "word": " bar.", "probability": 0.75341796875}, {"start": 1318.57, "end": 1318.67, "word": " This", "probability": 0.8583984375}, {"start": 1318.67, "end": 1319.49, "word": " one", "probability": 0.6298828125}, {"start": 1319.49, "end": 1319.65, "word": " is", "probability": 0.93798828125}, {"start": 1319.65, "end": 1320.37, "word": " pronounced", "probability": 0.76123046875}, {"start": 1320.37, "end": 1320.93, "word": " as", "probability": 0.962890625}, {"start": 1320.93, "end": 1321.15, "word": " X", "probability": 0.974609375}, {"start": 1321.15, "end": 1321.45, "word": " bar", "probability": 0.9423828125}, {"start": 1321.45, "end": 1322.21, "word": " and", "probability": 0.40966796875}, {"start": 1322.21, "end": 1322.45, "word": " X", "probability": 0.95654296875}, {"start": 1322.45, "end": 1322.63, "word": " bar", "probability": 0.9404296875}, {"start": 1322.63, "end": 1322.79, "word": " is", "probability": 0.92578125}, {"start": 1322.79, "end": 1323.13, "word": " just", "probability": 0.92236328125}, {"start": 1323.13, "end": 1323.89, "word": " sum", "probability": 0.76806640625}, {"start": 1323.89, "end": 1324.03, "word": " of", "probability": 0.9716796875}, {"start": 1324.03, "end": 1324.53, "word": " Xi.", "probability": 0.771484375}, {"start": 1325.01, "end": 1325.27, "word": " It", "probability": 0.67626953125}, {"start": 1325.27, "end": 1325.41, "word": " is", "probability": 0.55029296875}, {"start": 1325.41, "end": 1325.87, "word": " summation,", "probability": 0.87890625}, {"start": 1326.41, "end": 1326.55, "word": " you", "probability": 0.8564453125}, {"start": 1326.55, "end": 1326.65, "word": " know", "probability": 0.8837890625}, {"start": 1326.65, "end": 1326.91, "word": " this", "probability": 0.73291015625}, {"start": 1326.91, "end": 1327.39, "word": " symbol,", "probability": 0.74560546875}, {"start": 1327.87, "end": 1328.25, "word": " summation", "probability": 0.76416015625}, {"start": 1328.25, "end": 1328.45, "word": " of", "probability": 0.615234375}, {"start": 1328.45, "end": 1328.75, "word": " sigma,", "probability": 0.50390625}, {"start": 1329.15, "end": 1329.49, "word": " summation", "probability": 0.87353515625}, {"start": 1329.49, "end": 1329.71, "word": " of", "probability": 0.9697265625}, {"start": 1329.71, "end": 1330.07, "word": " Xi", "probability": 0.94873046875}, {"start": 1330.07, "end": 1330.41, "word": " and", "probability": 0.70458984375}, {"start": 1330.41, "end": 1330.61, "word": " I", "probability": 0.5732421875}, {"start": 1330.61, "end": 1330.85, "word": " goes", "probability": 0.89697265625}, {"start": 1330.85, "end": 1331.07, "word": " from", "probability": 0.884765625}, {"start": 1331.07, "end": 1331.21, "word": " one", "probability": 0.5478515625}, {"start": 1331.21, "end": 1331.35, "word": " to", "probability": 0.9560546875}, {"start": 1331.35, "end": 1331.51, "word": " N.", "probability": 0.52001953125}], "temperature": 1.0}, {"id": 49, "seek": 135883, "start": 1331.89, "end": 1358.83, "text": " divided by N which is the total number of observations or the sample size. So it means X1 plus X2 all the way up to XN divided by N gives the mean or the arithmetic mean. So X bar is the average which is the sum of values divided by the number of observations. So that's the first definition. For example,", "tokens": [6666, 538, 426, 597, 307, 264, 3217, 1230, 295, 18163, 420, 264, 6889, 2744, 13, 407, 309, 1355, 1783, 16, 1804, 1783, 17, 439, 264, 636, 493, 281, 1783, 45, 6666, 538, 426, 2709, 264, 914, 420, 264, 42973, 914, 13, 407, 1783, 2159, 307, 264, 4274, 597, 307, 264, 2408, 295, 4190, 6666, 538, 264, 1230, 295, 18163, 13, 407, 300, 311, 264, 700, 7123, 13, 1171, 1365, 11], "avg_logprob": -0.1516285244847687, "compression_ratio": 1.7, "no_speech_prob": 0.0, "words": [{"start": 1331.89, "end": 1332.39, "word": " divided", "probability": 0.3310546875}, {"start": 1332.39, "end": 1332.67, "word": " by", "probability": 0.97216796875}, {"start": 1332.67, "end": 1332.97, "word": " N", "probability": 0.37841796875}, {"start": 1332.97, "end": 1333.25, "word": " which", "probability": 0.634765625}, {"start": 1333.25, "end": 1333.43, "word": " is", "probability": 0.94384765625}, {"start": 1333.43, "end": 1333.63, "word": " the", "probability": 0.8779296875}, {"start": 1333.63, "end": 1333.91, "word": " total", "probability": 0.8603515625}, {"start": 1333.91, "end": 1334.29, "word": " number", "probability": 0.9296875}, {"start": 1334.29, "end": 1334.49, "word": " of", "probability": 0.93701171875}, {"start": 1334.49, "end": 1334.97, "word": " observations", "probability": 0.78857421875}, {"start": 1334.97, "end": 1335.45, "word": " or", "probability": 0.7880859375}, {"start": 1335.45, "end": 1336.15, "word": " the", "probability": 0.75048828125}, {"start": 1336.15, "end": 1336.41, "word": " sample", "probability": 0.84765625}, {"start": 1336.41, "end": 1336.87, "word": " size.", "probability": 0.7685546875}, {"start": 1338.23, "end": 1338.87, "word": " So", "probability": 0.80859375}, {"start": 1338.87, "end": 1338.99, "word": " it", "probability": 0.8134765625}, {"start": 1338.99, "end": 1339.23, "word": " means", "probability": 0.923828125}, {"start": 1339.23, "end": 1339.71, "word": " X1", "probability": 0.752197265625}, {"start": 1339.71, "end": 1339.95, "word": " plus", "probability": 0.7490234375}, {"start": 1339.95, "end": 1340.41, "word": " X2", "probability": 0.985107421875}, {"start": 1340.41, "end": 1340.87, "word": " all", "probability": 0.888671875}, {"start": 1340.87, "end": 1341.01, "word": " the", "probability": 0.91552734375}, {"start": 1341.01, "end": 1341.17, "word": " way", "probability": 0.95068359375}, {"start": 1341.17, "end": 1341.39, "word": " up", "probability": 0.9482421875}, {"start": 1341.39, "end": 1341.55, "word": " to", "probability": 0.9619140625}, {"start": 1341.55, "end": 1341.99, "word": " XN", "probability": 0.841796875}, {"start": 1341.99, "end": 1342.31, "word": " divided", "probability": 0.73583984375}, {"start": 1342.31, "end": 1342.57, "word": " by", "probability": 0.9697265625}, {"start": 1342.57, "end": 1342.81, "word": " N", "probability": 0.986328125}, {"start": 1342.81, "end": 1343.29, "word": " gives", "probability": 0.82958984375}, {"start": 1343.29, "end": 1344.33, "word": " the", "probability": 0.89599609375}, {"start": 1344.33, "end": 1344.75, "word": " mean", "probability": 0.96533203125}, {"start": 1344.75, "end": 1345.03, "word": " or", "probability": 0.8759765625}, {"start": 1345.03, "end": 1345.17, "word": " the", "probability": 0.75244140625}, {"start": 1345.17, "end": 1345.57, "word": " arithmetic", "probability": 0.9384765625}, {"start": 1345.57, "end": 1345.97, "word": " mean.", "probability": 0.9384765625}, {"start": 1346.63, "end": 1346.77, "word": " So", "probability": 0.94140625}, {"start": 1346.77, "end": 1347.01, "word": " X", "probability": 0.96044921875}, {"start": 1347.01, "end": 1347.27, "word": " bar", "probability": 0.85107421875}, {"start": 1347.27, "end": 1348.39, "word": " is", "probability": 0.94140625}, {"start": 1348.39, "end": 1348.53, "word": " the", "probability": 0.9169921875}, {"start": 1348.53, "end": 1348.91, "word": " average", "probability": 0.79150390625}, {"start": 1348.91, "end": 1349.93, "word": " which", "probability": 0.6904296875}, {"start": 1349.93, "end": 1350.09, "word": " is", "probability": 0.94775390625}, {"start": 1350.09, "end": 1350.25, "word": " the", "probability": 0.91845703125}, {"start": 1350.25, "end": 1350.55, "word": " sum", "probability": 0.9306640625}, {"start": 1350.55, "end": 1350.77, "word": " of", "probability": 0.96923828125}, {"start": 1350.77, "end": 1351.13, "word": " values", "probability": 0.95361328125}, {"start": 1351.13, "end": 1351.51, "word": " divided", "probability": 0.84423828125}, {"start": 1351.51, "end": 1351.83, "word": " by", "probability": 0.96728515625}, {"start": 1351.83, "end": 1352.69, "word": " the", "probability": 0.91162109375}, {"start": 1352.69, "end": 1353.07, "word": " number", "probability": 0.93017578125}, {"start": 1353.07, "end": 1353.71, "word": " of", "probability": 0.97216796875}, {"start": 1353.71, "end": 1354.53, "word": " observations.", "probability": 0.7958984375}, {"start": 1355.21, "end": 1355.43, "word": " So", "probability": 0.935546875}, {"start": 1355.43, "end": 1355.73, "word": " that's", "probability": 0.892822265625}, {"start": 1355.73, "end": 1356.01, "word": " the", "probability": 0.9189453125}, {"start": 1356.01, "end": 1356.27, "word": " first", "probability": 0.86962890625}, {"start": 1356.27, "end": 1356.73, "word": " definition.", "probability": 0.9453125}, {"start": 1357.87, "end": 1358.51, "word": " For", "probability": 0.95751953125}, {"start": 1358.51, "end": 1358.83, "word": " example,", "probability": 0.9716796875}], "temperature": 1.0}, {"id": 50, "seek": 138544, "start": 1362.18, "end": 1385.44, "text": " So again, the mean is the most common measure of center tendency. Number two, the definition of the mean. Sum of values divided by the number of values. That means the mean takes all the values, then divided by N.", "tokens": [407, 797, 11, 264, 914, 307, 264, 881, 2689, 3481, 295, 3056, 18187, 13, 5118, 732, 11, 264, 7123, 295, 264, 914, 13, 8626, 295, 4190, 6666, 538, 264, 1230, 295, 4190, 13, 663, 1355, 264, 914, 2516, 439, 264, 4190, 11, 550, 6666, 538, 426, 13], "avg_logprob": -0.17887369574358067, "compression_ratio": 1.5970149253731343, "no_speech_prob": 0.0, "words": [{"start": 1362.18, "end": 1362.42, "word": " So", "probability": 0.82763671875}, {"start": 1362.42, "end": 1362.72, "word": " again,", "probability": 0.8232421875}, {"start": 1364.04, "end": 1364.2, "word": " the", "probability": 0.88916015625}, {"start": 1364.2, "end": 1364.4, "word": " mean", "probability": 0.91943359375}, {"start": 1364.4, "end": 1365.46, "word": " is", "probability": 0.93408203125}, {"start": 1365.46, "end": 1365.68, "word": " the", "probability": 0.923828125}, {"start": 1365.68, "end": 1366.02, "word": " most", "probability": 0.9140625}, {"start": 1366.02, "end": 1366.44, "word": " common", "probability": 0.875}, {"start": 1366.44, "end": 1366.72, "word": " measure", "probability": 0.86865234375}, {"start": 1366.72, "end": 1366.92, "word": " of", "probability": 0.96728515625}, {"start": 1366.92, "end": 1367.18, "word": " center", "probability": 0.5556640625}, {"start": 1367.18, "end": 1367.7, "word": " tendency.", "probability": 0.87744140625}, {"start": 1368.86, "end": 1369.08, "word": " Number", "probability": 0.7451171875}, {"start": 1369.08, "end": 1369.36, "word": " two,", "probability": 0.75}, {"start": 1370.56, "end": 1371.12, "word": " the", "probability": 0.904296875}, {"start": 1371.12, "end": 1371.48, "word": " definition", "probability": 0.9453125}, {"start": 1371.48, "end": 1371.66, "word": " of", "probability": 0.9697265625}, {"start": 1371.66, "end": 1371.78, "word": " the", "probability": 0.9150390625}, {"start": 1371.78, "end": 1371.96, "word": " mean.", "probability": 0.96044921875}, {"start": 1372.92, "end": 1373.34, "word": " Sum", "probability": 0.81884765625}, {"start": 1373.34, "end": 1373.54, "word": " of", "probability": 0.97509765625}, {"start": 1373.54, "end": 1373.92, "word": " values", "probability": 0.96240234375}, {"start": 1373.92, "end": 1374.34, "word": " divided", "probability": 0.791015625}, {"start": 1374.34, "end": 1374.54, "word": " by", "probability": 0.96826171875}, {"start": 1374.54, "end": 1374.72, "word": " the", "probability": 0.89501953125}, {"start": 1374.72, "end": 1374.98, "word": " number", "probability": 0.9326171875}, {"start": 1374.98, "end": 1375.44, "word": " of", "probability": 0.97216796875}, {"start": 1375.44, "end": 1376.94, "word": " values.", "probability": 0.9189453125}, {"start": 1378.38, "end": 1379.06, "word": " That", "probability": 0.89697265625}, {"start": 1379.06, "end": 1379.52, "word": " means", "probability": 0.927734375}, {"start": 1379.52, "end": 1380.16, "word": " the", "probability": 0.6796875}, {"start": 1380.16, "end": 1380.48, "word": " mean", "probability": 0.97705078125}, {"start": 1380.48, "end": 1381.92, "word": " takes", "probability": 0.80029296875}, {"start": 1381.92, "end": 1382.34, "word": " all", "probability": 0.95361328125}, {"start": 1382.34, "end": 1382.5, "word": " the", "probability": 0.91259765625}, {"start": 1382.5, "end": 1382.96, "word": " values,", "probability": 0.96484375}, {"start": 1384.14, "end": 1384.76, "word": " then", "probability": 0.83984375}, {"start": 1384.76, "end": 1385.06, "word": " divided", "probability": 0.347412109375}, {"start": 1385.06, "end": 1385.24, "word": " by", "probability": 0.96875}, {"start": 1385.24, "end": 1385.44, "word": " N.", "probability": 0.485107421875}], "temperature": 1.0}, {"id": 51, "seek": 141556, "start": 1387.02, "end": 1415.56, "text": " it makes sense that the mean is affected by extreme values or outliers. I mean, if the data has outliers or extreme values, I mean by extreme values, large or very, very large values and small, small values. Large values or small values are extreme values. Since the mean takes all these values and sums all together, doesn't divide by n, that means", "tokens": [309, 1669, 2020, 300, 264, 914, 307, 8028, 538, 8084, 4190, 420, 484, 23646, 13, 286, 914, 11, 498, 264, 1412, 575, 484, 23646, 420, 8084, 4190, 11, 286, 914, 538, 8084, 4190, 11, 2416, 420, 588, 11, 588, 2416, 4190, 293, 1359, 11, 1359, 4190, 13, 33092, 4190, 420, 1359, 4190, 366, 8084, 4190, 13, 4162, 264, 914, 2516, 439, 613, 4190, 293, 34499, 439, 1214, 11, 1177, 380, 9845, 538, 297, 11, 300, 1355], "avg_logprob": -0.19399350184898873, "compression_ratio": 1.9553072625698324, "no_speech_prob": 0.0, "words": [{"start": 1387.02, "end": 1387.22, "word": " it", "probability": 0.572265625}, {"start": 1387.22, "end": 1387.6, "word": " makes", "probability": 0.82958984375}, {"start": 1387.6, "end": 1387.96, "word": " sense", "probability": 0.822265625}, {"start": 1387.96, "end": 1388.46, "word": " that", "probability": 0.92626953125}, {"start": 1388.46, "end": 1389.04, "word": " the", "probability": 0.8310546875}, {"start": 1389.04, "end": 1389.3, "word": " mean", "probability": 0.919921875}, {"start": 1389.3, "end": 1389.74, "word": " is", "probability": 0.93603515625}, {"start": 1389.74, "end": 1390.32, "word": " affected", "probability": 0.8173828125}, {"start": 1390.32, "end": 1390.68, "word": " by", "probability": 0.97119140625}, {"start": 1390.68, "end": 1391.08, "word": " extreme", "probability": 0.802734375}, {"start": 1391.08, "end": 1391.4, "word": " values", "probability": 0.95361328125}, {"start": 1391.4, "end": 1391.54, "word": " or", "probability": 0.7578125}, {"start": 1391.54, "end": 1391.96, "word": " outliers.", "probability": 0.8916015625}, {"start": 1392.42, "end": 1392.54, "word": " I", "probability": 0.904296875}, {"start": 1392.54, "end": 1392.74, "word": " mean,", "probability": 0.96484375}, {"start": 1393.06, "end": 1393.38, "word": " if", "probability": 0.94921875}, {"start": 1393.38, "end": 1393.58, "word": " the", "probability": 0.9140625}, {"start": 1393.58, "end": 1393.9, "word": " data", "probability": 0.943359375}, {"start": 1393.9, "end": 1394.62, "word": " has", "probability": 0.93505859375}, {"start": 1394.62, "end": 1395.2, "word": " outliers", "probability": 0.9619140625}, {"start": 1395.2, "end": 1396.5, "word": " or", "probability": 0.78271484375}, {"start": 1396.5, "end": 1396.94, "word": " extreme", "probability": 0.87060546875}, {"start": 1396.94, "end": 1397.38, "word": " values,", "probability": 0.96923828125}, {"start": 1397.46, "end": 1397.54, "word": " I", "probability": 0.94921875}, {"start": 1397.54, "end": 1397.66, "word": " mean", "probability": 0.96630859375}, {"start": 1397.66, "end": 1397.84, "word": " by", "probability": 0.775390625}, {"start": 1397.84, "end": 1398.16, "word": " extreme", "probability": 0.8447265625}, {"start": 1398.16, "end": 1398.68, "word": " values,", "probability": 0.95703125}, {"start": 1399.02, "end": 1399.56, "word": " large", "probability": 0.9443359375}, {"start": 1399.56, "end": 1400.12, "word": " or", "probability": 0.88330078125}, {"start": 1400.12, "end": 1400.56, "word": " very,", "probability": 0.865234375}, {"start": 1400.62, "end": 1400.76, "word": " very", "probability": 0.8603515625}, {"start": 1400.76, "end": 1401.02, "word": " large", "probability": 0.95361328125}, {"start": 1401.02, "end": 1401.4, "word": " values", "probability": 0.95263671875}, {"start": 1401.4, "end": 1401.62, "word": " and", "probability": 0.73095703125}, {"start": 1401.62, "end": 1401.94, "word": " small,", "probability": 0.93017578125}, {"start": 1402.04, "end": 1402.22, "word": " small", "probability": 0.9443359375}, {"start": 1402.22, "end": 1402.66, "word": " values.", "probability": 0.96337890625}, {"start": 1403.74, "end": 1404.22, "word": " Large", "probability": 0.95556640625}, {"start": 1404.22, "end": 1404.56, "word": " values", "probability": 0.97119140625}, {"start": 1404.56, "end": 1404.68, "word": " or", "probability": 0.93798828125}, {"start": 1404.68, "end": 1404.98, "word": " small", "probability": 0.9169921875}, {"start": 1404.98, "end": 1405.42, "word": " values", "probability": 0.97216796875}, {"start": 1405.42, "end": 1405.84, "word": " are", "probability": 0.93212890625}, {"start": 1405.84, "end": 1406.84, "word": " extreme", "probability": 0.83349609375}, {"start": 1406.84, "end": 1407.2, "word": " values.", "probability": 0.9716796875}, {"start": 1408.42, "end": 1409.06, "word": " Since", "probability": 0.85107421875}, {"start": 1409.06, "end": 1409.28, "word": " the", "probability": 0.91552734375}, {"start": 1409.28, "end": 1409.52, "word": " mean", "probability": 0.96630859375}, {"start": 1409.52, "end": 1411.1, "word": " takes", "probability": 0.7490234375}, {"start": 1411.1, "end": 1411.5, "word": " all", "probability": 0.9453125}, {"start": 1411.5, "end": 1411.74, "word": " these", "probability": 0.8564453125}, {"start": 1411.74, "end": 1412.06, "word": " values", "probability": 0.96875}, {"start": 1412.06, "end": 1412.22, "word": " and", "probability": 0.72314453125}, {"start": 1412.22, "end": 1412.5, "word": " sums", "probability": 0.55810546875}, {"start": 1412.5, "end": 1412.76, "word": " all", "probability": 0.5224609375}, {"start": 1412.76, "end": 1413.12, "word": " together,", "probability": 0.83642578125}, {"start": 1413.22, "end": 1413.42, "word": " doesn't", "probability": 0.61053466796875}, {"start": 1413.42, "end": 1413.68, "word": " divide", "probability": 0.9248046875}, {"start": 1413.68, "end": 1413.9, "word": " by", "probability": 0.9638671875}, {"start": 1413.9, "end": 1414.12, "word": " n,", "probability": 0.454345703125}, {"start": 1414.82, "end": 1415.14, "word": " that", "probability": 0.93359375}, {"start": 1415.14, "end": 1415.56, "word": " means", "probability": 0.9345703125}], "temperature": 1.0}, {"id": 52, "seek": 144227, "start": 1416.55, "end": 1442.27, "text": " The mean is affected by outliers or by extreme values. For example, imagine we have simple data as 1, 2, 3, 4, and 5. Simple example. Now, what's the mean? The mean is just add these values, then divide by the total number of observations. In this case, the sum of these is 15. N is five because there are five observations.", "tokens": [440, 914, 307, 8028, 538, 484, 23646, 420, 538, 8084, 4190, 13, 1171, 1365, 11, 3811, 321, 362, 2199, 1412, 382, 502, 11, 568, 11, 805, 11, 1017, 11, 293, 1025, 13, 21532, 1365, 13, 823, 11, 437, 311, 264, 914, 30, 440, 914, 307, 445, 909, 613, 4190, 11, 550, 9845, 538, 264, 3217, 1230, 295, 18163, 13, 682, 341, 1389, 11, 264, 2408, 295, 613, 307, 2119, 13, 426, 307, 1732, 570, 456, 366, 1732, 18163, 13], "avg_logprob": -0.15595702435821296, "compression_ratio": 1.555023923444976, "no_speech_prob": 0.0, "words": [{"start": 1416.55, "end": 1416.87, "word": " The", "probability": 0.5458984375}, {"start": 1416.87, "end": 1417.01, "word": " mean", "probability": 0.95361328125}, {"start": 1417.01, "end": 1417.15, "word": " is", "probability": 0.9423828125}, {"start": 1417.15, "end": 1417.53, "word": " affected", "probability": 0.81396484375}, {"start": 1417.53, "end": 1418.55, "word": " by", "probability": 0.970703125}, {"start": 1418.55, "end": 1419.05, "word": " outliers", "probability": 0.779541015625}, {"start": 1419.05, "end": 1419.91, "word": " or", "probability": 0.712890625}, {"start": 1419.91, "end": 1420.07, "word": " by", "probability": 0.9677734375}, {"start": 1420.07, "end": 1420.45, "word": " extreme", "probability": 0.86181640625}, {"start": 1420.45, "end": 1420.79, "word": " values.", "probability": 0.943359375}, {"start": 1420.89, "end": 1421.01, "word": " For", "probability": 0.94580078125}, {"start": 1421.01, "end": 1421.35, "word": " example,", "probability": 0.97607421875}, {"start": 1422.03, "end": 1422.35, "word": " imagine", "probability": 0.9033203125}, {"start": 1422.35, "end": 1422.59, "word": " we", "probability": 0.94873046875}, {"start": 1422.59, "end": 1422.79, "word": " have", "probability": 0.94384765625}, {"start": 1422.79, "end": 1423.11, "word": " simple", "probability": 0.8291015625}, {"start": 1423.11, "end": 1423.39, "word": " data", "probability": 0.94580078125}, {"start": 1423.39, "end": 1423.81, "word": " as", "probability": 0.943359375}, {"start": 1423.81, "end": 1424.07, "word": " 1,", "probability": 0.5400390625}, {"start": 1424.11, "end": 1424.23, "word": " 2,", "probability": 0.873046875}, {"start": 1424.31, "end": 1424.45, "word": " 3,", "probability": 0.98828125}, {"start": 1424.51, "end": 1424.67, "word": " 4,", "probability": 0.99072265625}, {"start": 1424.71, "end": 1424.83, "word": " and", "probability": 0.8271484375}, {"start": 1424.83, "end": 1425.11, "word": " 5.", "probability": 0.9951171875}, {"start": 1426.11, "end": 1426.67, "word": " Simple", "probability": 0.89697265625}, {"start": 1426.67, "end": 1427.03, "word": " example.", "probability": 0.984375}, {"start": 1428.05, "end": 1428.25, "word": " Now,", "probability": 0.947265625}, {"start": 1428.27, "end": 1428.45, "word": " what's", "probability": 0.95166015625}, {"start": 1428.45, "end": 1428.57, "word": " the", "probability": 0.92431640625}, {"start": 1428.57, "end": 1428.79, "word": " mean?", "probability": 0.95849609375}, {"start": 1429.35, "end": 1429.53, "word": " The", "probability": 0.71923828125}, {"start": 1429.53, "end": 1429.69, "word": " mean", "probability": 0.94775390625}, {"start": 1429.69, "end": 1429.83, "word": " is", "probability": 0.84765625}, {"start": 1429.83, "end": 1430.13, "word": " just", "probability": 0.9052734375}, {"start": 1430.13, "end": 1430.49, "word": " add", "probability": 0.80322265625}, {"start": 1430.49, "end": 1430.81, "word": " these", "probability": 0.85546875}, {"start": 1430.81, "end": 1431.25, "word": " values,", "probability": 0.96484375}, {"start": 1432.05, "end": 1432.65, "word": " then", "probability": 0.85888671875}, {"start": 1432.65, "end": 1433.01, "word": " divide", "probability": 0.8740234375}, {"start": 1433.01, "end": 1433.17, "word": " by", "probability": 0.9560546875}, {"start": 1433.17, "end": 1433.35, "word": " the", "probability": 0.91552734375}, {"start": 1433.35, "end": 1433.57, "word": " total", "probability": 0.8623046875}, {"start": 1433.57, "end": 1433.89, "word": " number", "probability": 0.93408203125}, {"start": 1433.89, "end": 1434.07, "word": " of", "probability": 0.943359375}, {"start": 1434.07, "end": 1434.47, "word": " observations.", "probability": 0.84814453125}, {"start": 1434.91, "end": 1435.13, "word": " In", "probability": 0.9755859375}, {"start": 1435.13, "end": 1435.33, "word": " this", "probability": 0.94482421875}, {"start": 1435.33, "end": 1435.61, "word": " case,", "probability": 0.9072265625}, {"start": 1436.21, "end": 1436.51, "word": " the", "probability": 0.9130859375}, {"start": 1436.51, "end": 1436.73, "word": " sum", "probability": 0.958984375}, {"start": 1436.73, "end": 1436.91, "word": " of", "probability": 0.96923828125}, {"start": 1436.91, "end": 1437.09, "word": " these", "probability": 0.509765625}, {"start": 1437.09, "end": 1437.29, "word": " is", "probability": 0.94921875}, {"start": 1437.29, "end": 1437.75, "word": " 15.", "probability": 0.9716796875}, {"start": 1439.41, "end": 1439.93, "word": " N", "probability": 0.7080078125}, {"start": 1439.93, "end": 1440.13, "word": " is", "probability": 0.94970703125}, {"start": 1440.13, "end": 1440.49, "word": " five", "probability": 0.469482421875}, {"start": 1440.49, "end": 1440.91, "word": " because", "probability": 0.7548828125}, {"start": 1440.91, "end": 1441.19, "word": " there", "probability": 0.90576171875}, {"start": 1441.19, "end": 1441.37, "word": " are", "probability": 0.94580078125}, {"start": 1441.37, "end": 1441.71, "word": " five", "probability": 0.9052734375}, {"start": 1441.71, "end": 1442.27, "word": " observations.", "probability": 0.79248046875}], "temperature": 1.0}, {"id": 53, "seek": 147172, "start": 1443.52, "end": 1471.72, "text": " So X bar is 15 divided by 5, which is 3. So straightforward. Now imagine instead of 5, this number 5, we have a 10. Now 10, there is a gap between 4, which is the second largest, and the maximum, which is 10. Now if we add these values, 1, 2, 3, 4, and 10, then divide by 5, the mean will be 4.", "tokens": [407, 1783, 2159, 307, 2119, 6666, 538, 1025, 11, 597, 307, 805, 13, 407, 15325, 13, 823, 3811, 2602, 295, 1025, 11, 341, 1230, 1025, 11, 321, 362, 257, 1266, 13, 823, 1266, 11, 456, 307, 257, 7417, 1296, 1017, 11, 597, 307, 264, 1150, 6443, 11, 293, 264, 6674, 11, 597, 307, 1266, 13, 823, 498, 321, 909, 613, 4190, 11, 502, 11, 568, 11, 805, 11, 1017, 11, 293, 1266, 11, 550, 9845, 538, 1025, 11, 264, 914, 486, 312, 1017, 13], "avg_logprob": -0.15330882773679846, "compression_ratio": 1.5775401069518717, "no_speech_prob": 0.0, "words": [{"start": 1443.52, "end": 1443.8, "word": " So", "probability": 0.80859375}, {"start": 1443.8, "end": 1443.96, "word": " X", "probability": 0.50439453125}, {"start": 1443.96, "end": 1444.16, "word": " bar", "probability": 0.8671875}, {"start": 1444.16, "end": 1444.26, "word": " is", "probability": 0.92236328125}, {"start": 1444.26, "end": 1444.54, "word": " 15", "probability": 0.69189453125}, {"start": 1444.54, "end": 1444.74, "word": " divided", "probability": 0.73486328125}, {"start": 1444.74, "end": 1444.92, "word": " by", "probability": 0.96826171875}, {"start": 1444.92, "end": 1445.32, "word": " 5,", "probability": 0.62158203125}, {"start": 1445.66, "end": 1445.92, "word": " which", "probability": 0.95263671875}, {"start": 1445.92, "end": 1446.06, "word": " is", "probability": 0.94970703125}, {"start": 1446.06, "end": 1446.36, "word": " 3.", "probability": 0.806640625}, {"start": 1446.72, "end": 1446.86, "word": " So", "probability": 0.45751953125}, {"start": 1446.86, "end": 1447.24, "word": " straightforward.", "probability": 0.728515625}, {"start": 1448.2, "end": 1448.4, "word": " Now", "probability": 0.94921875}, {"start": 1448.4, "end": 1448.82, "word": " imagine", "probability": 0.810546875}, {"start": 1448.82, "end": 1449.96, "word": " instead", "probability": 0.5771484375}, {"start": 1449.96, "end": 1450.24, "word": " of", "probability": 0.97119140625}, {"start": 1450.24, "end": 1450.62, "word": " 5,", "probability": 0.82763671875}, {"start": 1451.48, "end": 1451.62, "word": " this", "probability": 0.94287109375}, {"start": 1451.62, "end": 1451.8, "word": " number", "probability": 0.95166015625}, {"start": 1451.8, "end": 1452.14, "word": " 5,", "probability": 0.9365234375}, {"start": 1452.38, "end": 1452.66, "word": " we", "probability": 0.95751953125}, {"start": 1452.66, "end": 1452.82, "word": " have", "probability": 0.9501953125}, {"start": 1452.82, "end": 1452.96, "word": " a", "probability": 0.798828125}, {"start": 1452.96, "end": 1453.12, "word": " 10.", "probability": 0.9658203125}, {"start": 1454.6, "end": 1455.14, "word": " Now", "probability": 0.94091796875}, {"start": 1455.14, "end": 1455.54, "word": " 10,", "probability": 0.8193359375}, {"start": 1455.76, "end": 1455.94, "word": " there", "probability": 0.90869140625}, {"start": 1455.94, "end": 1456.16, "word": " is", "probability": 0.9453125}, {"start": 1456.16, "end": 1456.48, "word": " a", "probability": 0.98681640625}, {"start": 1456.48, "end": 1456.76, "word": " gap", "probability": 0.9443359375}, {"start": 1456.76, "end": 1458.16, "word": " between", "probability": 0.8466796875}, {"start": 1458.16, "end": 1458.88, "word": " 4,", "probability": 0.92041015625}, {"start": 1459.04, "end": 1459.2, "word": " which", "probability": 0.92724609375}, {"start": 1459.2, "end": 1459.28, "word": " is", "probability": 0.94384765625}, {"start": 1459.28, "end": 1459.44, "word": " the", "probability": 0.9111328125}, {"start": 1459.44, "end": 1459.72, "word": " second", "probability": 0.8955078125}, {"start": 1459.72, "end": 1460.12, "word": " largest,", "probability": 0.8994140625}, {"start": 1460.78, "end": 1461.4, "word": " and", "probability": 0.9384765625}, {"start": 1461.4, "end": 1461.58, "word": " the", "probability": 0.90771484375}, {"start": 1461.58, "end": 1461.9, "word": " maximum,", "probability": 0.87353515625}, {"start": 1462.1, "end": 1462.16, "word": " which", "probability": 0.95703125}, {"start": 1462.16, "end": 1462.3, "word": " is", "probability": 0.94677734375}, {"start": 1462.3, "end": 1462.58, "word": " 10.", "probability": 0.9501953125}, {"start": 1464.12, "end": 1464.44, "word": " Now", "probability": 0.9560546875}, {"start": 1464.44, "end": 1464.7, "word": " if", "probability": 0.75390625}, {"start": 1464.7, "end": 1464.86, "word": " we", "probability": 0.9619140625}, {"start": 1464.86, "end": 1465.22, "word": " add", "probability": 0.90478515625}, {"start": 1465.22, "end": 1465.6, "word": " these", "probability": 0.849609375}, {"start": 1465.6, "end": 1466.08, "word": " values,", "probability": 0.96728515625}, {"start": 1466.22, "end": 1466.42, "word": " 1,", "probability": 0.919921875}, {"start": 1466.46, "end": 1466.54, "word": " 2,", "probability": 0.9560546875}, {"start": 1466.62, "end": 1466.78, "word": " 3,", "probability": 0.99560546875}, {"start": 1466.88, "end": 1467.32, "word": " 4,", "probability": 0.6728515625}, {"start": 1467.46, "end": 1467.54, "word": " and", "probability": 0.88916015625}, {"start": 1467.54, "end": 1467.82, "word": " 10,", "probability": 0.96875}, {"start": 1468.34, "end": 1468.56, "word": " then", "probability": 0.83349609375}, {"start": 1468.56, "end": 1468.82, "word": " divide", "probability": 0.70654296875}, {"start": 1468.82, "end": 1468.98, "word": " by", "probability": 0.9453125}, {"start": 1468.98, "end": 1469.44, "word": " 5,", "probability": 0.958984375}, {"start": 1470.26, "end": 1470.54, "word": " the", "probability": 0.90771484375}, {"start": 1470.54, "end": 1470.84, "word": " mean", "probability": 0.9765625}, {"start": 1470.84, "end": 1471.16, "word": " will", "probability": 0.88720703125}, {"start": 1471.16, "end": 1471.36, "word": " be", "probability": 0.95166015625}, {"start": 1471.36, "end": 1471.72, "word": " 4.", "probability": 0.96630859375}], "temperature": 1.0}, {"id": 54, "seek": 149638, "start": 1473.1, "end": 1496.38, "text": " If you see here, we just added one value, or I mean, we replaced five by 10, and the mean changed dramatically from three to four. There is big change between three and four, around 25% more. So that means outliers or extreme values affected", "tokens": [759, 291, 536, 510, 11, 321, 445, 3869, 472, 2158, 11, 420, 286, 914, 11, 321, 10772, 1732, 538, 1266, 11, 293, 264, 914, 3105, 17548, 490, 1045, 281, 1451, 13, 821, 307, 955, 1319, 1296, 1045, 293, 1451, 11, 926, 3552, 4, 544, 13, 407, 300, 1355, 484, 23646, 420, 8084, 4190, 8028], "avg_logprob": -0.21903408928350968, "compression_ratio": 1.4319526627218935, "no_speech_prob": 0.0, "words": [{"start": 1473.1, "end": 1473.82, "word": " If", "probability": 0.7275390625}, {"start": 1473.82, "end": 1474.16, "word": " you", "probability": 0.93701171875}, {"start": 1474.16, "end": 1474.36, "word": " see", "probability": 0.919921875}, {"start": 1474.36, "end": 1474.56, "word": " here,", "probability": 0.8662109375}, {"start": 1474.62, "end": 1474.74, "word": " we", "probability": 0.958984375}, {"start": 1474.74, "end": 1475.08, "word": " just", "probability": 0.8984375}, {"start": 1475.08, "end": 1476.0, "word": " added", "probability": 0.8740234375}, {"start": 1476.0, "end": 1476.68, "word": " one", "probability": 0.83984375}, {"start": 1476.68, "end": 1477.06, "word": " value,", "probability": 0.9775390625}, {"start": 1477.74, "end": 1478.02, "word": " or", "probability": 0.9462890625}, {"start": 1478.02, "end": 1478.16, "word": " I", "probability": 0.88818359375}, {"start": 1478.16, "end": 1478.3, "word": " mean,", "probability": 0.96875}, {"start": 1478.34, "end": 1478.46, "word": " we", "probability": 0.951171875}, {"start": 1478.46, "end": 1478.84, "word": " replaced", "probability": 0.481201171875}, {"start": 1478.84, "end": 1479.22, "word": " five", "probability": 0.642578125}, {"start": 1479.22, "end": 1479.44, "word": " by", "probability": 0.9697265625}, {"start": 1479.44, "end": 1479.82, "word": " 10,", "probability": 0.5390625}, {"start": 1480.6, "end": 1480.86, "word": " and", "probability": 0.92529296875}, {"start": 1480.86, "end": 1481.06, "word": " the", "probability": 0.493896484375}, {"start": 1481.06, "end": 1481.2, "word": " mean", "probability": 0.89501953125}, {"start": 1481.2, "end": 1481.88, "word": " changed", "probability": 0.5361328125}, {"start": 1481.88, "end": 1482.54, "word": " dramatically", "probability": 0.87548828125}, {"start": 1482.54, "end": 1483.08, "word": " from", "probability": 0.86767578125}, {"start": 1483.08, "end": 1484.14, "word": " three", "probability": 0.55419921875}, {"start": 1484.14, "end": 1484.34, "word": " to", "probability": 0.970703125}, {"start": 1484.34, "end": 1484.7, "word": " four.", "probability": 0.94287109375}, {"start": 1485.52, "end": 1485.72, "word": " There", "probability": 0.67333984375}, {"start": 1485.72, "end": 1485.86, "word": " is", "probability": 0.71044921875}, {"start": 1485.86, "end": 1486.08, "word": " big", "probability": 0.55517578125}, {"start": 1486.08, "end": 1486.52, "word": " change", "probability": 0.91357421875}, {"start": 1486.52, "end": 1486.88, "word": " between", "probability": 0.8720703125}, {"start": 1486.88, "end": 1487.14, "word": " three", "probability": 0.91943359375}, {"start": 1487.14, "end": 1487.28, "word": " and", "probability": 0.939453125}, {"start": 1487.28, "end": 1487.54, "word": " four,", "probability": 0.939453125}, {"start": 1488.58, "end": 1488.86, "word": " around", "probability": 0.92578125}, {"start": 1488.86, "end": 1489.26, "word": " 25", "probability": 0.9677734375}, {"start": 1489.26, "end": 1489.7, "word": "%", "probability": 0.8828125}, {"start": 1489.7, "end": 1490.72, "word": " more.", "probability": 0.9365234375}, {"start": 1492.3, "end": 1492.6, "word": " So", "probability": 0.91064453125}, {"start": 1492.6, "end": 1492.84, "word": " that", "probability": 0.8466796875}, {"start": 1492.84, "end": 1493.3, "word": " means", "probability": 0.935546875}, {"start": 1493.3, "end": 1494.4, "word": " outliers", "probability": 0.92919921875}, {"start": 1494.4, "end": 1494.68, "word": " or", "probability": 0.669921875}, {"start": 1494.68, "end": 1495.06, "word": " extreme", "probability": 0.89990234375}, {"start": 1495.06, "end": 1495.56, "word": " values", "probability": 0.9736328125}, {"start": 1495.56, "end": 1496.38, "word": " affected", "probability": 0.79638671875}], "temperature": 1.0}, {"id": 55, "seek": 152466, "start": 1497.44, "end": 1524.66, "text": " the mean. So take this information in your mind because later we'll talk a little bit about another one. So the mean is affected by extreme values. Imagine another example. Suppose we have data from 1 to 9. 1, 2, 3, 4, 6, 7, 8, 9. Now the mean of these values, some divide by n.", "tokens": [264, 914, 13, 407, 747, 341, 1589, 294, 428, 1575, 570, 1780, 321, 603, 751, 257, 707, 857, 466, 1071, 472, 13, 407, 264, 914, 307, 8028, 538, 8084, 4190, 13, 11739, 1071, 1365, 13, 21360, 321, 362, 1412, 490, 502, 281, 1722, 13, 502, 11, 568, 11, 805, 11, 1017, 11, 1386, 11, 1614, 11, 1649, 11, 1722, 13, 823, 264, 914, 295, 613, 4190, 11, 512, 9845, 538, 297, 13], "avg_logprob": -0.19210188192863986, "compression_ratio": 1.4455958549222798, "no_speech_prob": 0.0, "words": [{"start": 1497.44, "end": 1497.74, "word": " the", "probability": 0.34130859375}, {"start": 1497.74, "end": 1497.92, "word": " mean.", "probability": 0.86376953125}, {"start": 1499.18, "end": 1499.74, "word": " So", "probability": 0.7763671875}, {"start": 1499.74, "end": 1500.38, "word": " take", "probability": 0.398681640625}, {"start": 1500.38, "end": 1500.6, "word": " this", "probability": 0.94091796875}, {"start": 1500.6, "end": 1501.04, "word": " information", "probability": 0.8271484375}, {"start": 1501.04, "end": 1501.2, "word": " in", "probability": 0.689453125}, {"start": 1501.2, "end": 1501.34, "word": " your", "probability": 0.8603515625}, {"start": 1501.34, "end": 1501.6, "word": " mind", "probability": 0.90380859375}, {"start": 1501.6, "end": 1502.26, "word": " because", "probability": 0.556640625}, {"start": 1502.26, "end": 1502.54, "word": " later", "probability": 0.93017578125}, {"start": 1502.54, "end": 1502.78, "word": " we'll", "probability": 0.638427734375}, {"start": 1502.78, "end": 1503.0, "word": " talk", "probability": 0.88720703125}, {"start": 1503.0, "end": 1503.12, "word": " a", "probability": 0.82373046875}, {"start": 1503.12, "end": 1503.28, "word": " little", "probability": 0.85302734375}, {"start": 1503.28, "end": 1503.56, "word": " bit", "probability": 0.9208984375}, {"start": 1503.56, "end": 1503.96, "word": " about", "probability": 0.9013671875}, {"start": 1503.96, "end": 1504.58, "word": " another", "probability": 0.9111328125}, {"start": 1504.58, "end": 1504.86, "word": " one.", "probability": 0.919921875}, {"start": 1505.56, "end": 1505.78, "word": " So", "probability": 0.89794921875}, {"start": 1505.78, "end": 1505.92, "word": " the", "probability": 0.85498046875}, {"start": 1505.92, "end": 1506.12, "word": " mean", "probability": 0.94580078125}, {"start": 1506.12, "end": 1506.58, "word": " is", "probability": 0.9541015625}, {"start": 1506.58, "end": 1506.96, "word": " affected", "probability": 0.85302734375}, {"start": 1506.96, "end": 1507.36, "word": " by", "probability": 0.97265625}, {"start": 1507.36, "end": 1507.88, "word": " extreme", "probability": 0.83642578125}, {"start": 1507.88, "end": 1508.34, "word": " values.", "probability": 0.9755859375}, {"start": 1509.86, "end": 1510.42, "word": " Imagine", "probability": 0.8359375}, {"start": 1510.42, "end": 1510.82, "word": " another", "probability": 0.9287109375}, {"start": 1510.82, "end": 1511.26, "word": " example.", "probability": 0.9716796875}, {"start": 1512.68, "end": 1513.1, "word": " Suppose", "probability": 0.80859375}, {"start": 1513.1, "end": 1513.32, "word": " we", "probability": 0.9501953125}, {"start": 1513.32, "end": 1514.08, "word": " have", "probability": 0.94921875}, {"start": 1514.08, "end": 1515.78, "word": " data", "probability": 0.91455078125}, {"start": 1515.78, "end": 1516.12, "word": " from", "probability": 0.89208984375}, {"start": 1516.12, "end": 1516.42, "word": " 1", "probability": 0.64599609375}, {"start": 1516.42, "end": 1516.6, "word": " to", "probability": 0.96630859375}, {"start": 1516.6, "end": 1516.9, "word": " 9.", "probability": 0.99755859375}, {"start": 1517.52, "end": 1517.78, "word": " 1,", "probability": 0.76953125}, {"start": 1517.86, "end": 1517.94, "word": " 2,", "probability": 0.75390625}, {"start": 1518.02, "end": 1518.18, "word": " 3,", "probability": 0.98681640625}, {"start": 1518.32, "end": 1518.64, "word": " 4,", "probability": 0.984375}, {"start": 1518.94, "end": 1519.18, "word": " 6,", "probability": 0.93798828125}, {"start": 1519.3, "end": 1519.52, "word": " 7,", "probability": 0.984375}, {"start": 1519.58, "end": 1519.74, "word": " 8,", "probability": 0.9951171875}, {"start": 1519.86, "end": 1520.06, "word": " 9.", "probability": 0.99658203125}, {"start": 1521.04, "end": 1521.38, "word": " Now", "probability": 0.94189453125}, {"start": 1521.38, "end": 1521.66, "word": " the", "probability": 0.69482421875}, {"start": 1521.66, "end": 1521.84, "word": " mean", "probability": 0.9521484375}, {"start": 1521.84, "end": 1521.96, "word": " of", "probability": 0.96728515625}, {"start": 1521.96, "end": 1522.2, "word": " these", "probability": 0.8486328125}, {"start": 1522.2, "end": 1522.74, "word": " values,", "probability": 0.96533203125}, {"start": 1523.08, "end": 1523.4, "word": " some", "probability": 0.5498046875}, {"start": 1523.4, "end": 1524.16, "word": " divide", "probability": 0.79345703125}, {"start": 1524.16, "end": 1524.38, "word": " by", "probability": 0.9658203125}, {"start": 1524.38, "end": 1524.66, "word": " n.", "probability": 0.5}], "temperature": 1.0}, {"id": 56, "seek": 155533, "start": 1526.39, "end": 1555.33, "text": " If you sum 1 through 9, summation is 45. Divide by 9, which is 5. So the sum of these values divided by N gives the average, so the average is 5. Now suppose we add 100 to the end of this data. So the sum will be 145 divided by 10, that's 14.5. Now the mean was 5.", "tokens": [759, 291, 2408, 502, 807, 1722, 11, 28811, 307, 6905, 13, 9886, 482, 538, 1722, 11, 597, 307, 1025, 13, 407, 264, 2408, 295, 613, 4190, 6666, 538, 426, 2709, 264, 4274, 11, 370, 264, 4274, 307, 1025, 13, 823, 7297, 321, 909, 2319, 281, 264, 917, 295, 341, 1412, 13, 407, 264, 2408, 486, 312, 3499, 20, 6666, 538, 1266, 11, 300, 311, 3499, 13, 20, 13, 823, 264, 914, 390, 1025, 13], "avg_logprob": -0.1910416603088379, "compression_ratio": 1.5773809523809523, "no_speech_prob": 0.0, "words": [{"start": 1526.39, "end": 1526.69, "word": " If", "probability": 0.3935546875}, {"start": 1526.69, "end": 1526.85, "word": " you", "probability": 0.93310546875}, {"start": 1526.85, "end": 1527.09, "word": " sum", "probability": 0.923828125}, {"start": 1527.09, "end": 1527.35, "word": " 1", "probability": 0.7802734375}, {"start": 1527.35, "end": 1527.87, "word": " through", "probability": 0.896484375}, {"start": 1527.87, "end": 1528.29, "word": " 9,", "probability": 0.984375}, {"start": 1528.45, "end": 1528.93, "word": " summation", "probability": 0.68359375}, {"start": 1528.93, "end": 1529.21, "word": " is", "probability": 0.927734375}, {"start": 1529.21, "end": 1529.71, "word": " 45.", "probability": 0.95263671875}, {"start": 1530.69, "end": 1531.29, "word": " Divide", "probability": 0.88134765625}, {"start": 1531.29, "end": 1531.51, "word": " by", "probability": 0.95654296875}, {"start": 1531.51, "end": 1531.97, "word": " 9,", "probability": 0.951171875}, {"start": 1532.51, "end": 1533.17, "word": " which", "probability": 0.93212890625}, {"start": 1533.17, "end": 1533.31, "word": " is", "probability": 0.951171875}, {"start": 1533.31, "end": 1533.63, "word": " 5.", "probability": 0.93408203125}, {"start": 1534.37, "end": 1534.59, "word": " So", "probability": 0.93017578125}, {"start": 1534.59, "end": 1534.73, "word": " the", "probability": 0.751953125}, {"start": 1534.73, "end": 1534.95, "word": " sum", "probability": 0.947265625}, {"start": 1534.95, "end": 1535.17, "word": " of", "probability": 0.96875}, {"start": 1535.17, "end": 1535.37, "word": " these", "probability": 0.8447265625}, {"start": 1535.37, "end": 1535.75, "word": " values", "probability": 0.96728515625}, {"start": 1535.75, "end": 1536.01, "word": " divided", "probability": 0.66064453125}, {"start": 1536.01, "end": 1536.23, "word": " by", "probability": 0.96875}, {"start": 1536.23, "end": 1536.51, "word": " N", "probability": 0.53955078125}, {"start": 1536.51, "end": 1536.93, "word": " gives", "probability": 0.794921875}, {"start": 1536.93, "end": 1537.09, "word": " the", "probability": 0.908203125}, {"start": 1537.09, "end": 1537.43, "word": " average,", "probability": 0.78564453125}, {"start": 1537.85, "end": 1538.05, "word": " so", "probability": 0.93115234375}, {"start": 1538.05, "end": 1538.19, "word": " the", "probability": 0.912109375}, {"start": 1538.19, "end": 1538.41, "word": " average", "probability": 0.80419921875}, {"start": 1538.41, "end": 1538.63, "word": " is", "probability": 0.947265625}, {"start": 1538.63, "end": 1539.01, "word": " 5.", "probability": 0.95947265625}, {"start": 1540.99, "end": 1541.59, "word": " Now", "probability": 0.45849609375}, {"start": 1541.59, "end": 1541.89, "word": " suppose", "probability": 0.7509765625}, {"start": 1541.89, "end": 1542.15, "word": " we", "probability": 0.9404296875}, {"start": 1542.15, "end": 1542.49, "word": " add", "probability": 0.900390625}, {"start": 1542.49, "end": 1543.41, "word": " 100", "probability": 0.91015625}, {"start": 1543.41, "end": 1543.77, "word": " to", "probability": 0.9736328125}, {"start": 1543.77, "end": 1543.91, "word": " the", "probability": 0.9169921875}, {"start": 1543.91, "end": 1544.21, "word": " end", "probability": 0.51220703125}, {"start": 1544.21, "end": 1544.61, "word": " of", "probability": 0.96484375}, {"start": 1544.61, "end": 1544.89, "word": " this", "probability": 0.94091796875}, {"start": 1544.89, "end": 1545.31, "word": " data.", "probability": 0.93798828125}, {"start": 1546.27, "end": 1546.49, "word": " So", "probability": 0.9306640625}, {"start": 1546.49, "end": 1546.67, "word": " the", "probability": 0.892578125}, {"start": 1546.67, "end": 1546.95, "word": " sum", "probability": 0.94384765625}, {"start": 1546.95, "end": 1547.55, "word": " will", "probability": 0.86767578125}, {"start": 1547.55, "end": 1547.77, "word": " be", "probability": 0.95556640625}, {"start": 1547.77, "end": 1549.07, "word": " 145", "probability": 0.93701171875}, {"start": 1549.07, "end": 1550.19, "word": " divided", "probability": 0.54638671875}, {"start": 1550.19, "end": 1550.49, "word": " by", "probability": 0.97216796875}, {"start": 1550.49, "end": 1550.81, "word": " 10,", "probability": 0.9638671875}, {"start": 1550.99, "end": 1551.25, "word": " that's", "probability": 0.876708984375}, {"start": 1551.25, "end": 1551.57, "word": " 14", "probability": 0.9775390625}, {"start": 1551.57, "end": 1552.21, "word": ".5.", "probability": 0.99658203125}, {"start": 1553.41, "end": 1553.67, "word": " Now", "probability": 0.88671875}, {"start": 1553.67, "end": 1554.07, "word": " the", "probability": 0.625}, {"start": 1554.07, "end": 1554.29, "word": " mean", "probability": 0.939453125}, {"start": 1554.29, "end": 1554.65, "word": " was", "probability": 0.94873046875}, {"start": 1554.65, "end": 1555.33, "word": " 5.", "probability": 0.9638671875}], "temperature": 1.0}, {"id": 57, "seek": 158395, "start": 1556.77, "end": 1583.95, "text": " Then after we added 100, it becomes 14.5. Imagine the mean was 5, it changed to 14.5. It means around three times. So that means outliers affect the mean much more than the other one. We'll talk a little later about it, which is the median. So keep in mind outliers", "tokens": [1396, 934, 321, 3869, 2319, 11, 309, 3643, 3499, 13, 20, 13, 11739, 264, 914, 390, 1025, 11, 309, 3105, 281, 3499, 13, 20, 13, 467, 1355, 926, 1045, 1413, 13, 407, 300, 1355, 484, 23646, 3345, 264, 914, 709, 544, 813, 264, 661, 472, 13, 492, 603, 751, 257, 707, 1780, 466, 309, 11, 597, 307, 264, 26779, 13, 407, 1066, 294, 1575, 484, 23646], "avg_logprob": -0.19729477878826768, "compression_ratio": 1.52, "no_speech_prob": 0.0, "words": [{"start": 1556.77, "end": 1557.05, "word": " Then", "probability": 0.59423828125}, {"start": 1557.05, "end": 1557.45, "word": " after", "probability": 0.7548828125}, {"start": 1557.45, "end": 1557.61, "word": " we", "probability": 0.94384765625}, {"start": 1557.61, "end": 1557.97, "word": " added", "probability": 0.87060546875}, {"start": 1557.97, "end": 1558.43, "word": " 100,", "probability": 0.728515625}, {"start": 1558.65, "end": 1558.85, "word": " it", "probability": 0.953125}, {"start": 1558.85, "end": 1559.35, "word": " becomes", "probability": 0.8505859375}, {"start": 1559.35, "end": 1560.05, "word": " 14", "probability": 0.96923828125}, {"start": 1560.05, "end": 1560.63, "word": ".5.", "probability": 0.99462890625}, {"start": 1560.81, "end": 1561.35, "word": " Imagine", "probability": 0.89599609375}, {"start": 1561.35, "end": 1562.07, "word": " the", "probability": 0.69580078125}, {"start": 1562.07, "end": 1562.25, "word": " mean", "probability": 0.951171875}, {"start": 1562.25, "end": 1562.77, "word": " was", "probability": 0.57470703125}, {"start": 1562.77, "end": 1563.33, "word": " 5,", "probability": 0.66162109375}, {"start": 1564.17, "end": 1564.45, "word": " it", "probability": 0.93212890625}, {"start": 1564.45, "end": 1565.47, "word": " changed", "probability": 0.54833984375}, {"start": 1565.47, "end": 1566.39, "word": " to", "probability": 0.96484375}, {"start": 1566.39, "end": 1568.15, "word": " 14", "probability": 0.95556640625}, {"start": 1568.15, "end": 1568.67, "word": ".5.", "probability": 0.99853515625}, {"start": 1568.73, "end": 1568.89, "word": " It", "probability": 0.845703125}, {"start": 1568.89, "end": 1569.15, "word": " means", "probability": 0.90966796875}, {"start": 1569.15, "end": 1569.43, "word": " around", "probability": 0.9423828125}, {"start": 1569.43, "end": 1569.67, "word": " three", "probability": 0.56982421875}, {"start": 1569.67, "end": 1570.13, "word": " times.", "probability": 0.9208984375}, {"start": 1571.23, "end": 1571.45, "word": " So", "probability": 0.80126953125}, {"start": 1571.45, "end": 1571.65, "word": " that", "probability": 0.88525390625}, {"start": 1571.65, "end": 1572.03, "word": " means", "probability": 0.93505859375}, {"start": 1572.03, "end": 1572.73, "word": " outliers", "probability": 0.909912109375}, {"start": 1572.73, "end": 1574.59, "word": " affect", "probability": 0.92236328125}, {"start": 1574.59, "end": 1574.83, "word": " the", "probability": 0.921875}, {"start": 1574.83, "end": 1575.05, "word": " mean", "probability": 0.970703125}, {"start": 1575.05, "end": 1575.45, "word": " much", "probability": 0.8857421875}, {"start": 1575.45, "end": 1575.75, "word": " more", "probability": 0.9365234375}, {"start": 1575.75, "end": 1576.07, "word": " than", "probability": 0.91064453125}, {"start": 1576.07, "end": 1577.51, "word": " the", "probability": 0.24658203125}, {"start": 1577.51, "end": 1577.83, "word": " other", "probability": 0.86962890625}, {"start": 1577.83, "end": 1578.05, "word": " one.", "probability": 0.8828125}, {"start": 1578.11, "end": 1578.25, "word": " We'll", "probability": 0.814697265625}, {"start": 1578.25, "end": 1578.47, "word": " talk", "probability": 0.89892578125}, {"start": 1578.47, "end": 1578.87, "word": " a", "probability": 0.67919921875}, {"start": 1578.87, "end": 1578.87, "word": " little", "probability": 0.841796875}, {"start": 1578.87, "end": 1579.31, "word": " later", "probability": 0.8828125}, {"start": 1579.31, "end": 1579.69, "word": " about", "probability": 0.90234375}, {"start": 1579.69, "end": 1579.89, "word": " it,", "probability": 0.931640625}, {"start": 1579.99, "end": 1580.05, "word": " which", "probability": 0.94287109375}, {"start": 1580.05, "end": 1580.15, "word": " is", "probability": 0.9423828125}, {"start": 1580.15, "end": 1580.27, "word": " the", "probability": 0.921875}, {"start": 1580.27, "end": 1580.49, "word": " median.", "probability": 0.978515625}, {"start": 1581.11, "end": 1581.45, "word": " So", "probability": 0.9580078125}, {"start": 1581.45, "end": 1582.01, "word": " keep", "probability": 0.8466796875}, {"start": 1582.01, "end": 1582.13, "word": " in", "probability": 0.9443359375}, {"start": 1582.13, "end": 1582.43, "word": " mind", "probability": 0.88720703125}, {"start": 1582.43, "end": 1583.95, "word": " outliers", "probability": 0.775390625}], "temperature": 1.0}, {"id": 58, "seek": 161493, "start": 1585.29, "end": 1614.93, "text": " affected the mean in this case. Any question? Is it clear? Yes. So, one more time. The mean is affected by extreme values. So that's for the mean. The other measure of center tendency is called the median. Now, what's the median?", "tokens": [8028, 264, 914, 294, 341, 1389, 13, 2639, 1168, 30, 1119, 309, 1850, 30, 1079, 13, 407, 11, 472, 544, 565, 13, 440, 914, 307, 8028, 538, 8084, 4190, 13, 407, 300, 311, 337, 264, 914, 13, 440, 661, 3481, 295, 3056, 18187, 307, 1219, 264, 26779, 13, 823, 11, 437, 311, 264, 26779, 30], "avg_logprob": -0.17857143389327185, "compression_ratio": 1.5436241610738255, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1585.29, "end": 1585.91, "word": " affected", "probability": 0.64306640625}, {"start": 1585.91, "end": 1586.49, "word": " the", "probability": 0.81982421875}, {"start": 1586.49, "end": 1586.73, "word": " mean", "probability": 0.96435546875}, {"start": 1586.73, "end": 1588.93, "word": " in", "probability": 0.7490234375}, {"start": 1588.93, "end": 1589.15, "word": " this", "probability": 0.94970703125}, {"start": 1589.15, "end": 1589.51, "word": " case.", "probability": 0.91748046875}, {"start": 1591.81, "end": 1592.09, "word": " Any", "probability": 0.84765625}, {"start": 1592.09, "end": 1592.49, "word": " question?", "probability": 0.6826171875}, {"start": 1594.53, "end": 1594.79, "word": " Is", "probability": 0.79345703125}, {"start": 1594.79, "end": 1594.91, "word": " it", "probability": 0.857421875}, {"start": 1594.91, "end": 1595.19, "word": " clear?", "probability": 0.896484375}, {"start": 1595.51, "end": 1595.73, "word": " Yes.", "probability": 0.373046875}, {"start": 1597.35, "end": 1598.15, "word": " So,", "probability": 0.8740234375}, {"start": 1598.55, "end": 1599.27, "word": " one", "probability": 0.93408203125}, {"start": 1599.27, "end": 1599.45, "word": " more", "probability": 0.93798828125}, {"start": 1599.45, "end": 1599.71, "word": " time.", "probability": 0.8935546875}, {"start": 1600.51, "end": 1600.91, "word": " The", "probability": 0.8828125}, {"start": 1600.91, "end": 1601.15, "word": " mean", "probability": 0.97119140625}, {"start": 1601.15, "end": 1601.59, "word": " is", "probability": 0.9521484375}, {"start": 1601.59, "end": 1602.03, "word": " affected", "probability": 0.8583984375}, {"start": 1602.03, "end": 1602.69, "word": " by", "probability": 0.97265625}, {"start": 1602.69, "end": 1604.17, "word": " extreme", "probability": 0.876953125}, {"start": 1604.17, "end": 1605.17, "word": " values.", "probability": 0.970703125}, {"start": 1605.81, "end": 1606.09, "word": " So", "probability": 0.9462890625}, {"start": 1606.09, "end": 1606.49, "word": " that's", "probability": 0.8076171875}, {"start": 1606.49, "end": 1606.81, "word": " for", "probability": 0.94482421875}, {"start": 1606.81, "end": 1606.99, "word": " the", "probability": 0.92822265625}, {"start": 1606.99, "end": 1607.15, "word": " mean.", "probability": 0.943359375}, {"start": 1608.11, "end": 1608.91, "word": " The", "probability": 0.89208984375}, {"start": 1608.91, "end": 1609.19, "word": " other", "probability": 0.88916015625}, {"start": 1609.19, "end": 1609.47, "word": " measure", "probability": 0.82568359375}, {"start": 1609.47, "end": 1609.75, "word": " of", "probability": 0.96728515625}, {"start": 1609.75, "end": 1610.03, "word": " center", "probability": 0.62255859375}, {"start": 1610.03, "end": 1610.47, "word": " tendency", "probability": 0.445068359375}, {"start": 1610.47, "end": 1610.91, "word": " is", "probability": 0.93994140625}, {"start": 1610.91, "end": 1611.33, "word": " called", "probability": 0.884765625}, {"start": 1611.33, "end": 1611.83, "word": " the", "probability": 0.88623046875}, {"start": 1611.83, "end": 1612.05, "word": " median.", "probability": 0.9169921875}, {"start": 1613.87, "end": 1614.39, "word": " Now,", "probability": 0.94580078125}, {"start": 1614.45, "end": 1614.63, "word": " what's", "probability": 0.928466796875}, {"start": 1614.63, "end": 1614.75, "word": " the", "probability": 0.92333984375}, {"start": 1614.75, "end": 1614.93, "word": " median?", "probability": 0.94677734375}], "temperature": 1.0}, {"id": 59, "seek": 162670, "start": 1617.12, "end": 1626.7, "text": " What's the definition of the median from your previous studies? What's the median? I mean, what's the definition of the median?", "tokens": [708, 311, 264, 7123, 295, 264, 26779, 490, 428, 3894, 5313, 30, 708, 311, 264, 26779, 30, 286, 914, 11, 437, 311, 264, 7123, 295, 264, 26779, 30], "avg_logprob": -0.1697198275862069, "compression_ratio": 1.4883720930232558, "no_speech_prob": 0.0, "words": [{"start": 1617.12, "end": 1617.6, "word": " What's", "probability": 0.752197265625}, {"start": 1617.6, "end": 1617.68, "word": " the", "probability": 0.9287109375}, {"start": 1617.68, "end": 1618.0, "word": " definition", "probability": 0.923828125}, {"start": 1618.0, "end": 1618.22, "word": " of", "probability": 0.96728515625}, {"start": 1618.22, "end": 1618.36, "word": " the", "probability": 0.71044921875}, {"start": 1618.36, "end": 1618.6, "word": " median", "probability": 0.9541015625}, {"start": 1618.6, "end": 1620.12, "word": " from", "probability": 0.37353515625}, {"start": 1620.12, "end": 1620.38, "word": " your", "probability": 0.89453125}, {"start": 1620.38, "end": 1620.76, "word": " previous", "probability": 0.85107421875}, {"start": 1620.76, "end": 1621.24, "word": " studies?", "probability": 0.97265625}, {"start": 1622.22, "end": 1622.46, "word": " What's", "probability": 0.8203125}, {"start": 1622.46, "end": 1622.62, "word": " the", "probability": 0.92919921875}, {"start": 1622.62, "end": 1622.9, "word": " median?", "probability": 0.72802734375}, {"start": 1625.0, "end": 1625.32, "word": " I", "probability": 0.693359375}, {"start": 1625.32, "end": 1625.5, "word": " mean,", "probability": 0.96826171875}, {"start": 1625.58, "end": 1625.76, "word": " what's", "probability": 0.959716796875}, {"start": 1625.76, "end": 1625.88, "word": " the", "probability": 0.92919921875}, {"start": 1625.88, "end": 1626.18, "word": " definition", "probability": 0.94482421875}, {"start": 1626.18, "end": 1626.38, "word": " of", "probability": 0.96875}, {"start": 1626.38, "end": 1626.5, "word": " the", "probability": 0.912109375}, {"start": 1626.5, "end": 1626.7, "word": " median?", "probability": 0.962890625}], "temperature": 1.0}, {"id": 60, "seek": 164802, "start": 1627.86, "end": 1648.02, "text": " Now the middle value, that's correct, but after we arrange the data from smallest to largest or largest to smallest, so we should arrange the data, then we can figure out the median. So the median is the middle point, but after we arrange the data from smallest to largest or vice versa.", "tokens": [823, 264, 2808, 2158, 11, 300, 311, 3006, 11, 457, 934, 321, 9424, 264, 1412, 490, 16998, 281, 6443, 420, 6443, 281, 16998, 11, 370, 321, 820, 9424, 264, 1412, 11, 550, 321, 393, 2573, 484, 264, 26779, 13, 407, 264, 26779, 307, 264, 2808, 935, 11, 457, 934, 321, 9424, 264, 1412, 490, 16998, 281, 6443, 420, 11964, 25650, 13], "avg_logprob": -0.1500756000318835, "compression_ratio": 1.986206896551724, "no_speech_prob": 0.0, "words": [{"start": 1627.86, "end": 1628.22, "word": " Now", "probability": 0.85302734375}, {"start": 1628.22, "end": 1628.68, "word": " the", "probability": 0.4072265625}, {"start": 1628.68, "end": 1628.92, "word": " middle", "probability": 0.8955078125}, {"start": 1628.92, "end": 1629.36, "word": " value,", "probability": 0.97998046875}, {"start": 1629.76, "end": 1630.06, "word": " that's", "probability": 0.910888671875}, {"start": 1630.06, "end": 1630.48, "word": " correct,", "probability": 0.88818359375}, {"start": 1630.9, "end": 1631.08, "word": " but", "probability": 0.91455078125}, {"start": 1631.08, "end": 1631.48, "word": " after", "probability": 0.837890625}, {"start": 1631.48, "end": 1631.68, "word": " we", "probability": 0.9619140625}, {"start": 1631.68, "end": 1632.18, "word": " arrange", "probability": 0.83056640625}, {"start": 1632.18, "end": 1632.38, "word": " the", "probability": 0.90673828125}, {"start": 1632.38, "end": 1632.7, "word": " data", "probability": 0.94970703125}, {"start": 1632.7, "end": 1632.98, "word": " from", "probability": 0.87939453125}, {"start": 1632.98, "end": 1633.44, "word": " smallest", "probability": 0.92529296875}, {"start": 1633.44, "end": 1633.68, "word": " to", "probability": 0.91064453125}, {"start": 1633.68, "end": 1634.08, "word": " largest", "probability": 0.9130859375}, {"start": 1634.08, "end": 1634.78, "word": " or", "probability": 0.61669921875}, {"start": 1634.78, "end": 1635.28, "word": " largest", "probability": 0.93017578125}, {"start": 1635.28, "end": 1635.72, "word": " to", "probability": 0.93212890625}, {"start": 1635.72, "end": 1636.18, "word": " smallest,", "probability": 0.9423828125}, {"start": 1636.48, "end": 1636.82, "word": " so", "probability": 0.86669921875}, {"start": 1636.82, "end": 1637.04, "word": " we", "probability": 0.94091796875}, {"start": 1637.04, "end": 1637.3, "word": " should", "probability": 0.9619140625}, {"start": 1637.3, "end": 1637.96, "word": " arrange", "probability": 0.86865234375}, {"start": 1637.96, "end": 1638.14, "word": " the", "probability": 0.91748046875}, {"start": 1638.14, "end": 1638.46, "word": " data,", "probability": 0.93701171875}, {"start": 1639.04, "end": 1639.26, "word": " then", "probability": 0.8251953125}, {"start": 1639.26, "end": 1639.4, "word": " we", "probability": 0.95849609375}, {"start": 1639.4, "end": 1639.62, "word": " can", "probability": 0.939453125}, {"start": 1639.62, "end": 1639.9, "word": " figure", "probability": 0.97216796875}, {"start": 1639.9, "end": 1640.16, "word": " out", "probability": 0.88525390625}, {"start": 1640.16, "end": 1640.36, "word": " the", "probability": 0.92236328125}, {"start": 1640.36, "end": 1640.62, "word": " median.", "probability": 0.94970703125}, {"start": 1641.22, "end": 1641.44, "word": " So", "probability": 0.89990234375}, {"start": 1641.44, "end": 1641.62, "word": " the", "probability": 0.76806640625}, {"start": 1641.62, "end": 1641.86, "word": " median", "probability": 0.96728515625}, {"start": 1641.86, "end": 1642.8, "word": " is", "probability": 0.94482421875}, {"start": 1642.8, "end": 1643.02, "word": " the", "probability": 0.91943359375}, {"start": 1643.02, "end": 1643.22, "word": " middle", "probability": 0.939453125}, {"start": 1643.22, "end": 1643.62, "word": " point,", "probability": 0.95361328125}, {"start": 1644.12, "end": 1644.28, "word": " but", "probability": 0.919921875}, {"start": 1644.28, "end": 1644.68, "word": " after", "probability": 0.81884765625}, {"start": 1644.68, "end": 1644.86, "word": " we", "probability": 0.9462890625}, {"start": 1644.86, "end": 1645.38, "word": " arrange", "probability": 0.81201171875}, {"start": 1645.38, "end": 1645.54, "word": " the", "probability": 0.810546875}, {"start": 1645.54, "end": 1645.82, "word": " data", "probability": 0.93310546875}, {"start": 1645.82, "end": 1646.12, "word": " from", "probability": 0.822265625}, {"start": 1646.12, "end": 1646.52, "word": " smallest", "probability": 0.9443359375}, {"start": 1646.52, "end": 1646.74, "word": " to", "probability": 0.96484375}, {"start": 1646.74, "end": 1647.06, "word": " largest", "probability": 0.9072265625}, {"start": 1647.06, "end": 1647.44, "word": " or", "probability": 0.72021484375}, {"start": 1647.44, "end": 1647.68, "word": " vice", "probability": 0.916015625}, {"start": 1647.68, "end": 1648.02, "word": " versa.", "probability": 0.78466796875}], "temperature": 1.0}, {"id": 61, "seek": 167599, "start": 1648.85, "end": 1675.99, "text": " So that's the definition of the median. So in an ordered array, so we have to have order array, the median is the middle number. The middle number means 50 percent of the data below and 50 percent above the median because it's called the median, the value in the middle after you arrange the data from smallest to largest.", "tokens": [407, 300, 311, 264, 7123, 295, 264, 26779, 13, 407, 294, 364, 8866, 10225, 11, 370, 321, 362, 281, 362, 1668, 10225, 11, 264, 26779, 307, 264, 2808, 1230, 13, 440, 2808, 1230, 1355, 2625, 3043, 295, 264, 1412, 2507, 293, 2625, 3043, 3673, 264, 26779, 570, 309, 311, 1219, 264, 26779, 11, 264, 2158, 294, 264, 2808, 934, 291, 9424, 264, 1412, 490, 16998, 281, 6443, 13], "avg_logprob": -0.11967844418857408, "compression_ratio": 1.8044692737430168, "no_speech_prob": 0.0, "words": [{"start": 1648.85, "end": 1649.09, "word": " So", "probability": 0.873046875}, {"start": 1649.09, "end": 1649.33, "word": " that's", "probability": 0.92724609375}, {"start": 1649.33, "end": 1649.43, "word": " the", "probability": 0.8388671875}, {"start": 1649.43, "end": 1649.73, "word": " definition", "probability": 0.95068359375}, {"start": 1649.73, "end": 1649.91, "word": " of", "probability": 0.966796875}, {"start": 1649.91, "end": 1650.03, "word": " the", "probability": 0.92626953125}, {"start": 1650.03, "end": 1650.25, "word": " median.", "probability": 0.94287109375}, {"start": 1650.89, "end": 1651.39, "word": " So", "probability": 0.90576171875}, {"start": 1651.39, "end": 1651.83, "word": " in", "probability": 0.85693359375}, {"start": 1651.83, "end": 1652.09, "word": " an", "probability": 0.90185546875}, {"start": 1652.09, "end": 1652.47, "word": " ordered", "probability": 0.73876953125}, {"start": 1652.47, "end": 1652.85, "word": " array,", "probability": 0.88232421875}, {"start": 1652.97, "end": 1653.07, "word": " so", "probability": 0.90185546875}, {"start": 1653.07, "end": 1653.23, "word": " we", "probability": 0.96044921875}, {"start": 1653.23, "end": 1653.43, "word": " have", "probability": 0.9482421875}, {"start": 1653.43, "end": 1653.63, "word": " to", "probability": 0.97119140625}, {"start": 1653.63, "end": 1653.93, "word": " have", "probability": 0.9462890625}, {"start": 1653.93, "end": 1654.39, "word": " order", "probability": 0.449462890625}, {"start": 1654.39, "end": 1654.91, "word": " array,", "probability": 0.92431640625}, {"start": 1655.55, "end": 1655.71, "word": " the", "probability": 0.9140625}, {"start": 1655.71, "end": 1656.05, "word": " median", "probability": 0.95703125}, {"start": 1656.05, "end": 1657.23, "word": " is", "probability": 0.94140625}, {"start": 1657.23, "end": 1657.39, "word": " the", "probability": 0.92138671875}, {"start": 1657.39, "end": 1657.61, "word": " middle", "probability": 0.94970703125}, {"start": 1657.61, "end": 1657.91, "word": " number.", "probability": 0.9296875}, {"start": 1658.91, "end": 1659.23, "word": " The", "probability": 0.89892578125}, {"start": 1659.23, "end": 1659.41, "word": " middle", "probability": 0.93798828125}, {"start": 1659.41, "end": 1659.69, "word": " number", "probability": 0.9423828125}, {"start": 1659.69, "end": 1660.17, "word": " means", "probability": 0.9326171875}, {"start": 1660.17, "end": 1661.23, "word": " 50", "probability": 0.8935546875}, {"start": 1661.23, "end": 1661.67, "word": " percent", "probability": 0.8720703125}, {"start": 1661.67, "end": 1661.91, "word": " of", "probability": 0.96923828125}, {"start": 1661.91, "end": 1662.07, "word": " the", "probability": 0.92236328125}, {"start": 1662.07, "end": 1662.37, "word": " data", "probability": 0.94482421875}, {"start": 1662.37, "end": 1662.81, "word": " below", "probability": 0.9072265625}, {"start": 1662.81, "end": 1665.25, "word": " and", "probability": 0.6396484375}, {"start": 1665.25, "end": 1665.77, "word": " 50", "probability": 0.96240234375}, {"start": 1665.77, "end": 1666.31, "word": " percent", "probability": 0.95361328125}, {"start": 1666.31, "end": 1666.79, "word": " above", "probability": 0.94775390625}, {"start": 1666.79, "end": 1668.19, "word": " the", "probability": 0.89599609375}, {"start": 1668.19, "end": 1668.47, "word": " median", "probability": 0.96435546875}, {"start": 1668.47, "end": 1669.93, "word": " because", "probability": 0.335205078125}, {"start": 1669.93, "end": 1670.37, "word": " it's", "probability": 0.932373046875}, {"start": 1670.37, "end": 1670.55, "word": " called", "probability": 0.89404296875}, {"start": 1670.55, "end": 1670.73, "word": " the", "probability": 0.919921875}, {"start": 1670.73, "end": 1670.91, "word": " median,", "probability": 0.904296875}, {"start": 1671.03, "end": 1671.11, "word": " the", "probability": 0.9033203125}, {"start": 1671.11, "end": 1671.33, "word": " value", "probability": 0.9736328125}, {"start": 1671.33, "end": 1671.45, "word": " in", "probability": 0.93701171875}, {"start": 1671.45, "end": 1671.57, "word": " the", "probability": 0.91650390625}, {"start": 1671.57, "end": 1671.77, "word": " middle", "probability": 0.951171875}, {"start": 1671.77, "end": 1672.19, "word": " after", "probability": 0.64599609375}, {"start": 1672.19, "end": 1672.35, "word": " you", "probability": 0.93505859375}, {"start": 1672.35, "end": 1672.65, "word": " arrange", "probability": 0.77197265625}, {"start": 1672.65, "end": 1672.83, "word": " the", "probability": 0.91357421875}, {"start": 1672.83, "end": 1673.19, "word": " data", "probability": 0.939453125}, {"start": 1673.19, "end": 1674.01, "word": " from", "probability": 0.8818359375}, {"start": 1674.01, "end": 1674.67, "word": " smallest", "probability": 0.943359375}, {"start": 1674.67, "end": 1675.67, "word": " to", "probability": 0.97216796875}, {"start": 1675.67, "end": 1675.99, "word": " largest.", "probability": 0.91845703125}], "temperature": 1.0}, {"id": 62, "seek": 170233, "start": 1680.13, "end": 1702.33, "text": " Suppose I again go back to the previous example when we have data 1, 2, 3, 4, and 5. Now for this specific example as we did before, now the data is already ordered. The value in the middle is 3 because there are two pillows.", "tokens": [21360, 286, 797, 352, 646, 281, 264, 3894, 1365, 562, 321, 362, 1412, 502, 11, 568, 11, 805, 11, 1017, 11, 293, 1025, 13, 823, 337, 341, 2685, 1365, 382, 321, 630, 949, 11, 586, 264, 1412, 307, 1217, 8866, 13, 440, 2158, 294, 264, 2808, 307, 805, 570, 456, 366, 732, 38630, 13], "avg_logprob": -0.20980114069851963, "compression_ratio": 1.3614457831325302, "no_speech_prob": 0.0, "words": [{"start": 1680.13, "end": 1680.65, "word": " Suppose", "probability": 0.79150390625}, {"start": 1680.65, "end": 1681.13, "word": " I", "probability": 0.5654296875}, {"start": 1681.13, "end": 1681.35, "word": " again", "probability": 0.468994140625}, {"start": 1681.35, "end": 1681.51, "word": " go", "probability": 0.8779296875}, {"start": 1681.51, "end": 1681.75, "word": " back", "probability": 0.8740234375}, {"start": 1681.75, "end": 1681.89, "word": " to", "probability": 0.96923828125}, {"start": 1681.89, "end": 1682.03, "word": " the", "probability": 0.92041015625}, {"start": 1682.03, "end": 1682.27, "word": " previous", "probability": 0.86474609375}, {"start": 1682.27, "end": 1682.77, "word": " example", "probability": 0.97509765625}, {"start": 1682.77, "end": 1683.03, "word": " when", "probability": 0.50830078125}, {"start": 1683.03, "end": 1683.19, "word": " we", "probability": 0.9619140625}, {"start": 1683.19, "end": 1683.59, "word": " have", "probability": 0.93798828125}, {"start": 1683.59, "end": 1684.49, "word": " data", "probability": 0.875}, {"start": 1684.49, "end": 1684.95, "word": " 1,", "probability": 0.480712890625}, {"start": 1685.01, "end": 1685.13, "word": " 2,", "probability": 0.89794921875}, {"start": 1685.23, "end": 1685.37, "word": " 3,", "probability": 0.9931640625}, {"start": 1685.47, "end": 1685.63, "word": " 4,", "probability": 0.99609375}, {"start": 1685.65, "end": 1685.79, "word": " and", "probability": 0.89306640625}, {"start": 1685.79, "end": 1686.13, "word": " 5.", "probability": 0.9970703125}, {"start": 1688.63, "end": 1689.15, "word": " Now", "probability": 0.93798828125}, {"start": 1689.15, "end": 1689.39, "word": " for", "probability": 0.59033203125}, {"start": 1689.39, "end": 1689.69, "word": " this", "probability": 0.94873046875}, {"start": 1689.69, "end": 1690.25, "word": " specific", "probability": 0.904296875}, {"start": 1690.25, "end": 1690.79, "word": " example", "probability": 0.974609375}, {"start": 1690.79, "end": 1691.09, "word": " as", "probability": 0.56689453125}, {"start": 1691.09, "end": 1691.29, "word": " we", "probability": 0.96435546875}, {"start": 1691.29, "end": 1691.57, "word": " did", "probability": 0.962890625}, {"start": 1691.57, "end": 1692.03, "word": " before,", "probability": 0.87109375}, {"start": 1692.77, "end": 1693.13, "word": " now", "probability": 0.76611328125}, {"start": 1693.13, "end": 1693.33, "word": " the", "probability": 0.892578125}, {"start": 1693.33, "end": 1693.99, "word": " data", "probability": 0.8515625}, {"start": 1693.99, "end": 1694.21, "word": " is", "probability": 0.9287109375}, {"start": 1694.21, "end": 1694.57, "word": " already", "probability": 0.94384765625}, {"start": 1694.57, "end": 1696.05, "word": " ordered.", "probability": 0.7373046875}, {"start": 1697.09, "end": 1697.61, "word": " The", "probability": 0.89306640625}, {"start": 1697.61, "end": 1697.83, "word": " value", "probability": 0.9814453125}, {"start": 1697.83, "end": 1697.95, "word": " in", "probability": 0.802734375}, {"start": 1697.95, "end": 1698.03, "word": " the", "probability": 0.91552734375}, {"start": 1698.03, "end": 1698.21, "word": " middle", "probability": 0.95263671875}, {"start": 1698.21, "end": 1698.41, "word": " is", "probability": 0.94287109375}, {"start": 1698.41, "end": 1698.67, "word": " 3", "probability": 0.52001953125}, {"start": 1698.67, "end": 1699.99, "word": " because", "probability": 0.5986328125}, {"start": 1699.99, "end": 1701.19, "word": " there", "probability": 0.9111328125}, {"start": 1701.19, "end": 1701.43, "word": " are", "probability": 0.94189453125}, {"start": 1701.43, "end": 1701.63, "word": " two", "probability": 0.8662109375}, {"start": 1701.63, "end": 1702.33, "word": " pillows.", "probability": 0.238525390625}], "temperature": 1.0}, {"id": 63, "seek": 172542, "start": 1704.86, "end": 1725.42, "text": " And also there are the same number of observations above it. So 3 is the median. Now again imagine we replace 5, which is the maximum value, by another one which is extreme one, for example 10. In this case, the median is still 3.", "tokens": [400, 611, 456, 366, 264, 912, 1230, 295, 18163, 3673, 309, 13, 407, 805, 307, 264, 26779, 13, 823, 797, 3811, 321, 7406, 1025, 11, 597, 307, 264, 6674, 2158, 11, 538, 1071, 472, 597, 307, 8084, 472, 11, 337, 1365, 1266, 13, 682, 341, 1389, 11, 264, 26779, 307, 920, 805, 13], "avg_logprob": -0.16984953979651132, "compression_ratio": 1.4085365853658536, "no_speech_prob": 0.0, "words": [{"start": 1704.86, "end": 1705.24, "word": " And", "probability": 0.7255859375}, {"start": 1705.24, "end": 1705.62, "word": " also", "probability": 0.8349609375}, {"start": 1705.62, "end": 1705.82, "word": " there", "probability": 0.65673828125}, {"start": 1705.82, "end": 1705.96, "word": " are", "probability": 0.9306640625}, {"start": 1705.96, "end": 1706.12, "word": " the", "probability": 0.91552734375}, {"start": 1706.12, "end": 1706.34, "word": " same", "probability": 0.89892578125}, {"start": 1706.34, "end": 1706.64, "word": " number", "probability": 0.91796875}, {"start": 1706.64, "end": 1706.8, "word": " of", "probability": 0.947265625}, {"start": 1706.8, "end": 1707.3, "word": " observations", "probability": 0.80224609375}, {"start": 1707.3, "end": 1708.14, "word": " above", "probability": 0.94140625}, {"start": 1708.14, "end": 1708.48, "word": " it.", "probability": 0.95068359375}, {"start": 1708.94, "end": 1709.06, "word": " So", "probability": 0.95361328125}, {"start": 1709.06, "end": 1709.38, "word": " 3", "probability": 0.36962890625}, {"start": 1709.38, "end": 1709.64, "word": " is", "probability": 0.95068359375}, {"start": 1709.64, "end": 1709.82, "word": " the", "probability": 0.92041015625}, {"start": 1709.82, "end": 1710.0, "word": " median.", "probability": 0.93994140625}, {"start": 1711.54, "end": 1711.76, "word": " Now", "probability": 0.95751953125}, {"start": 1711.76, "end": 1711.98, "word": " again", "probability": 0.7978515625}, {"start": 1711.98, "end": 1712.38, "word": " imagine", "probability": 0.63037109375}, {"start": 1712.38, "end": 1713.14, "word": " we", "probability": 0.55126953125}, {"start": 1713.14, "end": 1713.54, "word": " replace", "probability": 0.90625}, {"start": 1713.54, "end": 1714.22, "word": " 5,", "probability": 0.95263671875}, {"start": 1714.36, "end": 1714.54, "word": " which", "probability": 0.94384765625}, {"start": 1714.54, "end": 1714.68, "word": " is", "probability": 0.94775390625}, {"start": 1714.68, "end": 1714.9, "word": " the", "probability": 0.9150390625}, {"start": 1714.9, "end": 1715.52, "word": " maximum", "probability": 0.91259765625}, {"start": 1715.52, "end": 1716.12, "word": " value,", "probability": 0.97265625}, {"start": 1716.42, "end": 1716.86, "word": " by", "probability": 0.94775390625}, {"start": 1716.86, "end": 1717.32, "word": " another", "probability": 0.9208984375}, {"start": 1717.32, "end": 1717.62, "word": " one", "probability": 0.90673828125}, {"start": 1717.62, "end": 1717.76, "word": " which", "probability": 0.5751953125}, {"start": 1717.76, "end": 1717.88, "word": " is", "probability": 0.951171875}, {"start": 1717.88, "end": 1718.36, "word": " extreme", "probability": 0.751953125}, {"start": 1718.36, "end": 1718.62, "word": " one,", "probability": 0.82177734375}, {"start": 1718.68, "end": 1718.82, "word": " for", "probability": 0.95166015625}, {"start": 1718.82, "end": 1719.14, "word": " example", "probability": 0.96875}, {"start": 1719.14, "end": 1719.46, "word": " 10.", "probability": 0.6640625}, {"start": 1721.28, "end": 1721.92, "word": " In", "probability": 0.96435546875}, {"start": 1721.92, "end": 1722.14, "word": " this", "probability": 0.94580078125}, {"start": 1722.14, "end": 1722.54, "word": " case,", "probability": 0.9072265625}, {"start": 1722.92, "end": 1724.14, "word": " the", "probability": 0.90380859375}, {"start": 1724.14, "end": 1724.46, "word": " median", "probability": 0.955078125}, {"start": 1724.46, "end": 1724.82, "word": " is", "probability": 0.9404296875}, {"start": 1724.82, "end": 1725.14, "word": " still", "probability": 0.96337890625}, {"start": 1725.14, "end": 1725.42, "word": " 3.", "probability": 0.7177734375}], "temperature": 1.0}, {"id": 64, "seek": 174954, "start": 1726.4, "end": 1749.54, "text": " Because the median is just the value of the middle after you arrange the data. So it doesn't matter what is the highest or the maximum value is, the median in this case is three. It doesn't change. That means the median is not affected by extreme values. Or to be more precise, we can say that", "tokens": [1436, 264, 26779, 307, 445, 264, 2158, 295, 264, 2808, 934, 291, 9424, 264, 1412, 13, 407, 309, 1177, 380, 1871, 437, 307, 264, 6343, 420, 264, 6674, 2158, 307, 11, 264, 26779, 294, 341, 1389, 307, 1045, 13, 467, 1177, 380, 1319, 13, 663, 1355, 264, 26779, 307, 406, 8028, 538, 8084, 4190, 13, 1610, 281, 312, 544, 13600, 11, 321, 393, 584, 300], "avg_logprob": -0.13529829793807233, "compression_ratio": 1.6153846153846154, "no_speech_prob": 0.0, "words": [{"start": 1726.4, "end": 1726.82, "word": " Because", "probability": 0.56591796875}, {"start": 1726.82, "end": 1727.0, "word": " the", "probability": 0.9169921875}, {"start": 1727.0, "end": 1727.24, "word": " median", "probability": 0.9697265625}, {"start": 1727.24, "end": 1727.6, "word": " is", "probability": 0.94287109375}, {"start": 1727.6, "end": 1727.82, "word": " just", "probability": 0.91259765625}, {"start": 1727.82, "end": 1727.98, "word": " the", "probability": 0.921875}, {"start": 1727.98, "end": 1728.18, "word": " value", "probability": 0.96484375}, {"start": 1728.18, "end": 1728.3, "word": " of", "probability": 0.87841796875}, {"start": 1728.3, "end": 1728.4, "word": " the", "probability": 0.91943359375}, {"start": 1728.4, "end": 1728.58, "word": " middle", "probability": 0.88623046875}, {"start": 1728.58, "end": 1728.84, "word": " after", "probability": 0.8115234375}, {"start": 1728.84, "end": 1729.0, "word": " you", "probability": 0.69921875}, {"start": 1729.0, "end": 1729.26, "word": " arrange", "probability": 0.63134765625}, {"start": 1729.26, "end": 1729.38, "word": " the", "probability": 0.7607421875}, {"start": 1729.38, "end": 1729.58, "word": " data.", "probability": 0.9248046875}, {"start": 1729.72, "end": 1729.84, "word": " So", "probability": 0.94384765625}, {"start": 1729.84, "end": 1730.38, "word": " it", "probability": 0.89404296875}, {"start": 1730.38, "end": 1730.76, "word": " doesn't", "probability": 0.968994140625}, {"start": 1730.76, "end": 1731.16, "word": " matter", "probability": 0.86279296875}, {"start": 1731.16, "end": 1731.96, "word": " what", "probability": 0.919921875}, {"start": 1731.96, "end": 1732.36, "word": " is", "probability": 0.923828125}, {"start": 1732.36, "end": 1732.78, "word": " the", "probability": 0.91943359375}, {"start": 1732.78, "end": 1733.36, "word": " highest", "probability": 0.94873046875}, {"start": 1733.36, "end": 1733.9, "word": " or", "probability": 0.93115234375}, {"start": 1733.9, "end": 1734.14, "word": " the", "probability": 0.92041015625}, {"start": 1734.14, "end": 1734.58, "word": " maximum", "probability": 0.927734375}, {"start": 1734.58, "end": 1735.08, "word": " value", "probability": 0.97119140625}, {"start": 1735.08, "end": 1735.48, "word": " is,", "probability": 0.89892578125}, {"start": 1735.8, "end": 1736.0, "word": " the", "probability": 0.91455078125}, {"start": 1736.0, "end": 1736.22, "word": " median", "probability": 0.94677734375}, {"start": 1736.22, "end": 1736.4, "word": " in", "probability": 0.83984375}, {"start": 1736.4, "end": 1736.54, "word": " this", "probability": 0.9453125}, {"start": 1736.54, "end": 1737.3, "word": " case", "probability": 0.919921875}, {"start": 1737.3, "end": 1738.86, "word": " is", "probability": 0.8798828125}, {"start": 1738.86, "end": 1739.18, "word": " three.", "probability": 0.734375}, {"start": 1739.5, "end": 1739.66, "word": " It", "probability": 0.63525390625}, {"start": 1739.66, "end": 1739.88, "word": " doesn't", "probability": 0.954345703125}, {"start": 1739.88, "end": 1740.2, "word": " change.", "probability": 0.8466796875}, {"start": 1740.48, "end": 1740.74, "word": " That", "probability": 0.92431640625}, {"start": 1740.74, "end": 1741.12, "word": " means", "probability": 0.93359375}, {"start": 1741.12, "end": 1742.72, "word": " the", "probability": 0.80029296875}, {"start": 1742.72, "end": 1743.06, "word": " median", "probability": 0.9638671875}, {"start": 1743.06, "end": 1743.7, "word": " is", "probability": 0.94580078125}, {"start": 1743.7, "end": 1743.98, "word": " not", "probability": 0.94775390625}, {"start": 1743.98, "end": 1744.52, "word": " affected", "probability": 0.8271484375}, {"start": 1744.52, "end": 1744.84, "word": " by", "probability": 0.97119140625}, {"start": 1744.84, "end": 1745.16, "word": " extreme", "probability": 0.876953125}, {"start": 1745.16, "end": 1745.54, "word": " values.", "probability": 0.9736328125}, {"start": 1746.36, "end": 1746.5, "word": " Or", "probability": 0.5380859375}, {"start": 1746.5, "end": 1747.38, "word": " to", "probability": 0.95751953125}, {"start": 1747.38, "end": 1747.6, "word": " be", "probability": 0.958984375}, {"start": 1747.6, "end": 1748.02, "word": " more", "probability": 0.9365234375}, {"start": 1748.02, "end": 1748.34, "word": " precise,", "probability": 0.958984375}, {"start": 1748.7, "end": 1748.86, "word": " we", "probability": 0.9619140625}, {"start": 1748.86, "end": 1749.06, "word": " can", "probability": 0.9462890625}, {"start": 1749.06, "end": 1749.26, "word": " say", "probability": 0.93603515625}, {"start": 1749.26, "end": 1749.54, "word": " that", "probability": 0.93701171875}], "temperature": 1.0}, {"id": 65, "seek": 177675, "start": 1750.29, "end": 1776.75, "text": " The median is affected by outlier, but not the same as the mean. So affect the mean much more than the median. I mean, you cannot say for this example, yes, the median is not affected because the median was three, it becomes three. But in another examples, there is small difference between all.", "tokens": [440, 26779, 307, 8028, 538, 484, 2753, 11, 457, 406, 264, 912, 382, 264, 914, 13, 407, 3345, 264, 914, 709, 544, 813, 264, 26779, 13, 286, 914, 11, 291, 2644, 584, 337, 341, 1365, 11, 2086, 11, 264, 26779, 307, 406, 8028, 570, 264, 26779, 390, 1045, 11, 309, 3643, 1045, 13, 583, 294, 1071, 5110, 11, 456, 307, 1359, 2649, 1296, 439, 13], "avg_logprob": -0.20572917366569693, "compression_ratio": 1.7209302325581395, "no_speech_prob": 0.00018799304962158203, "words": [{"start": 1750.29, "end": 1750.93, "word": " The", "probability": 0.489501953125}, {"start": 1750.93, "end": 1751.21, "word": " median", "probability": 0.96728515625}, {"start": 1751.21, "end": 1751.97, "word": " is", "probability": 0.94482421875}, {"start": 1751.97, "end": 1752.47, "word": " affected", "probability": 0.85986328125}, {"start": 1752.47, "end": 1752.91, "word": " by", "probability": 0.97021484375}, {"start": 1752.91, "end": 1753.37, "word": " outlier,", "probability": 0.933349609375}, {"start": 1753.59, "end": 1753.85, "word": " but", "probability": 0.9296875}, {"start": 1753.85, "end": 1754.71, "word": " not", "probability": 0.94287109375}, {"start": 1754.71, "end": 1754.95, "word": " the", "probability": 0.91748046875}, {"start": 1754.95, "end": 1755.15, "word": " same", "probability": 0.908203125}, {"start": 1755.15, "end": 1755.65, "word": " as", "probability": 0.96435546875}, {"start": 1755.65, "end": 1756.37, "word": " the", "probability": 0.9091796875}, {"start": 1756.37, "end": 1756.61, "word": " mean.", "probability": 0.9765625}, {"start": 1757.25, "end": 1757.45, "word": " So", "probability": 0.9365234375}, {"start": 1757.45, "end": 1758.99, "word": " affect", "probability": 0.1920166015625}, {"start": 1758.99, "end": 1759.33, "word": " the", "probability": 0.9228515625}, {"start": 1759.33, "end": 1759.65, "word": " mean", "probability": 0.9697265625}, {"start": 1759.65, "end": 1760.39, "word": " much", "probability": 0.85107421875}, {"start": 1760.39, "end": 1760.79, "word": " more", "probability": 0.9384765625}, {"start": 1760.79, "end": 1761.03, "word": " than", "probability": 0.93994140625}, {"start": 1761.03, "end": 1761.25, "word": " the", "probability": 0.91796875}, {"start": 1761.25, "end": 1761.51, "word": " median.", "probability": 0.947265625}, {"start": 1763.03, "end": 1763.17, "word": " I", "probability": 0.74755859375}, {"start": 1763.17, "end": 1763.29, "word": " mean,", "probability": 0.95556640625}, {"start": 1763.39, "end": 1763.61, "word": " you", "probability": 0.927734375}, {"start": 1763.61, "end": 1763.87, "word": " cannot", "probability": 0.890625}, {"start": 1763.87, "end": 1764.27, "word": " say", "probability": 0.94482421875}, {"start": 1764.27, "end": 1764.57, "word": " for", "probability": 0.83447265625}, {"start": 1764.57, "end": 1764.81, "word": " this", "probability": 0.94384765625}, {"start": 1764.81, "end": 1765.13, "word": " example,", "probability": 0.97705078125}, {"start": 1765.27, "end": 1765.45, "word": " yes,", "probability": 0.89892578125}, {"start": 1765.61, "end": 1766.21, "word": " the", "probability": 0.888671875}, {"start": 1766.21, "end": 1766.41, "word": " median", "probability": 0.9189453125}, {"start": 1766.41, "end": 1766.55, "word": " is", "probability": 0.802734375}, {"start": 1766.55, "end": 1766.71, "word": " not", "probability": 0.94287109375}, {"start": 1766.71, "end": 1767.05, "word": " affected", "probability": 0.86962890625}, {"start": 1767.05, "end": 1767.59, "word": " because", "probability": 0.80712890625}, {"start": 1767.59, "end": 1768.17, "word": " the", "probability": 0.9150390625}, {"start": 1768.17, "end": 1768.47, "word": " median", "probability": 0.953125}, {"start": 1768.47, "end": 1768.89, "word": " was", "probability": 0.9150390625}, {"start": 1768.89, "end": 1769.13, "word": " three,", "probability": 0.55908203125}, {"start": 1769.23, "end": 1769.31, "word": " it", "probability": 0.91552734375}, {"start": 1769.31, "end": 1769.61, "word": " becomes", "probability": 0.87890625}, {"start": 1769.61, "end": 1769.93, "word": " three.", "probability": 0.84912109375}, {"start": 1770.43, "end": 1770.73, "word": " But", "probability": 0.9482421875}, {"start": 1770.73, "end": 1770.93, "word": " in", "probability": 0.92138671875}, {"start": 1770.93, "end": 1771.49, "word": " another", "probability": 0.58056640625}, {"start": 1771.49, "end": 1771.99, "word": " examples,", "probability": 0.52587890625}, {"start": 1772.67, "end": 1772.93, "word": " there", "probability": 0.91552734375}, {"start": 1772.93, "end": 1773.59, "word": " is", "probability": 0.9384765625}, {"start": 1773.59, "end": 1774.73, "word": " small", "probability": 0.68017578125}, {"start": 1774.73, "end": 1775.93, "word": " difference", "probability": 0.8251953125}, {"start": 1775.93, "end": 1776.41, "word": " between", "probability": 0.86474609375}, {"start": 1776.41, "end": 1776.75, "word": " all.", "probability": 0.4150390625}], "temperature": 1.0}, {"id": 66, "seek": 180615, "start": 1780.77, "end": 1806.15, "text": " Extreme values affected the mean much more than the median. If the dataset has extreme values, we have to use, what do you think, the mean or the median? The median. So in case or in the presence of extreme values or outliers, we have to use the median, not the mean. But in general, we use", "tokens": [39525, 4190, 8028, 264, 914, 709, 544, 813, 264, 26779, 13, 759, 264, 28872, 575, 8084, 4190, 11, 321, 362, 281, 764, 11, 437, 360, 291, 519, 11, 264, 914, 420, 264, 26779, 30, 440, 26779, 13, 407, 294, 1389, 420, 294, 264, 6814, 295, 8084, 4190, 420, 484, 23646, 11, 321, 362, 281, 764, 264, 26779, 11, 406, 264, 914, 13, 583, 294, 2674, 11, 321, 764], "avg_logprob": -0.1337182979652847, "compression_ratio": 1.8074534161490683, "no_speech_prob": 5.960464477539062e-07, "words": [{"start": 1780.77, "end": 1781.25, "word": " Extreme", "probability": 0.7705078125}, {"start": 1781.25, "end": 1781.77, "word": " values", "probability": 0.96484375}, {"start": 1781.77, "end": 1782.61, "word": " affected", "probability": 0.5234375}, {"start": 1782.61, "end": 1782.87, "word": " the", "probability": 0.91845703125}, {"start": 1782.87, "end": 1783.09, "word": " mean", "probability": 0.9462890625}, {"start": 1783.09, "end": 1784.25, "word": " much", "probability": 0.82958984375}, {"start": 1784.25, "end": 1784.53, "word": " more", "probability": 0.9345703125}, {"start": 1784.53, "end": 1784.85, "word": " than", "probability": 0.951171875}, {"start": 1784.85, "end": 1785.69, "word": " the", "probability": 0.91845703125}, {"start": 1785.69, "end": 1785.91, "word": " median.", "probability": 0.94775390625}, {"start": 1788.27, "end": 1788.83, "word": " If", "probability": 0.9404296875}, {"start": 1788.83, "end": 1789.05, "word": " the", "probability": 0.921875}, {"start": 1789.05, "end": 1789.41, "word": " dataset", "probability": 0.59521484375}, {"start": 1789.41, "end": 1789.87, "word": " has", "probability": 0.94921875}, {"start": 1789.87, "end": 1790.33, "word": " extreme", "probability": 0.85107421875}, {"start": 1790.33, "end": 1790.73, "word": " values,", "probability": 0.96337890625}, {"start": 1791.25, "end": 1791.45, "word": " we", "probability": 0.9482421875}, {"start": 1791.45, "end": 1791.69, "word": " have", "probability": 0.94921875}, {"start": 1791.69, "end": 1791.81, "word": " to", "probability": 0.97314453125}, {"start": 1791.81, "end": 1792.19, "word": " use,", "probability": 0.8720703125}, {"start": 1793.13, "end": 1793.49, "word": " what", "probability": 0.9462890625}, {"start": 1793.49, "end": 1793.61, "word": " do", "probability": 0.9365234375}, {"start": 1793.61, "end": 1793.69, "word": " you", "probability": 0.91162109375}, {"start": 1793.69, "end": 1793.91, "word": " think,", "probability": 0.9140625}, {"start": 1793.97, "end": 1794.07, "word": " the", "probability": 0.927734375}, {"start": 1794.07, "end": 1794.21, "word": " mean", "probability": 0.97216796875}, {"start": 1794.21, "end": 1794.35, "word": " or", "probability": 0.95703125}, {"start": 1794.35, "end": 1794.51, "word": " the", "probability": 0.92724609375}, {"start": 1794.51, "end": 1794.77, "word": " median?", "probability": 0.95361328125}, {"start": 1795.05, "end": 1795.59, "word": " The", "probability": 0.732421875}, {"start": 1795.59, "end": 1795.83, "word": " median.", "probability": 0.96142578125}, {"start": 1796.35, "end": 1796.49, "word": " So", "probability": 0.7734375}, {"start": 1796.49, "end": 1796.69, "word": " in", "probability": 0.806640625}, {"start": 1796.69, "end": 1797.07, "word": " case", "probability": 0.75439453125}, {"start": 1797.07, "end": 1797.47, "word": " or", "probability": 0.62548828125}, {"start": 1797.47, "end": 1797.65, "word": " in", "probability": 0.9267578125}, {"start": 1797.65, "end": 1797.79, "word": " the", "probability": 0.9228515625}, {"start": 1797.79, "end": 1798.09, "word": " presence", "probability": 0.95849609375}, {"start": 1798.09, "end": 1798.55, "word": " of", "probability": 0.96923828125}, {"start": 1798.55, "end": 1798.93, "word": " extreme", "probability": 0.83984375}, {"start": 1798.93, "end": 1799.29, "word": " values", "probability": 0.974609375}, {"start": 1799.29, "end": 1799.47, "word": " or", "probability": 0.9404296875}, {"start": 1799.47, "end": 1799.95, "word": " outliers,", "probability": 0.948974609375}, {"start": 1800.21, "end": 1800.53, "word": " we", "probability": 0.96044921875}, {"start": 1800.53, "end": 1800.79, "word": " have", "probability": 0.94873046875}, {"start": 1800.79, "end": 1800.93, "word": " to", "probability": 0.97119140625}, {"start": 1800.93, "end": 1801.27, "word": " use", "probability": 0.8740234375}, {"start": 1801.27, "end": 1801.91, "word": " the", "probability": 0.9189453125}, {"start": 1801.91, "end": 1802.19, "word": " median,", "probability": 0.95703125}, {"start": 1802.47, "end": 1802.65, "word": " not", "probability": 0.9423828125}, {"start": 1802.65, "end": 1802.87, "word": " the", "probability": 0.91162109375}, {"start": 1802.87, "end": 1803.01, "word": " mean.", "probability": 0.96923828125}, {"start": 1803.67, "end": 1803.93, "word": " But", "probability": 0.94775390625}, {"start": 1803.93, "end": 1804.47, "word": " in", "probability": 0.87548828125}, {"start": 1804.47, "end": 1804.85, "word": " general,", "probability": 0.896484375}, {"start": 1805.65, "end": 1805.95, "word": " we", "probability": 0.939453125}, {"start": 1805.95, "end": 1806.15, "word": " use", "probability": 0.849609375}], "temperature": 1.0}, {"id": 67, "seek": 182731, "start": 1806.67, "end": 1827.31, "text": " If the data is free of outliers, I mean if the data has not extreme values, then you can use the mean. The mean is much better than the median in this case. But if the data has extreme values or outliers, we should use the median instead of the mean.", "tokens": [759, 264, 1412, 307, 1737, 295, 484, 23646, 11, 286, 914, 498, 264, 1412, 575, 406, 8084, 4190, 11, 550, 291, 393, 764, 264, 914, 13, 440, 914, 307, 709, 1101, 813, 264, 26779, 294, 341, 1389, 13, 583, 498, 264, 1412, 575, 8084, 4190, 420, 484, 23646, 11, 321, 820, 764, 264, 26779, 2602, 295, 264, 914, 13], "avg_logprob": -0.10455729104578496, "compression_ratio": 1.767605633802817, "no_speech_prob": 0.0, "words": [{"start": 1806.67, "end": 1807.01, "word": " If", "probability": 0.748046875}, {"start": 1807.01, "end": 1807.21, "word": " the", "probability": 0.904296875}, {"start": 1807.21, "end": 1807.55, "word": " data", "probability": 0.9482421875}, {"start": 1807.55, "end": 1807.91, "word": " is", "probability": 0.91162109375}, {"start": 1807.91, "end": 1808.25, "word": " free", "probability": 0.9267578125}, {"start": 1808.25, "end": 1808.45, "word": " of", "probability": 0.97021484375}, {"start": 1808.45, "end": 1808.97, "word": " outliers,", "probability": 0.943603515625}, {"start": 1809.45, "end": 1809.55, "word": " I", "probability": 0.90234375}, {"start": 1809.55, "end": 1809.73, "word": " mean", "probability": 0.96728515625}, {"start": 1809.73, "end": 1810.27, "word": " if", "probability": 0.5576171875}, {"start": 1810.27, "end": 1810.45, "word": " the", "probability": 0.91650390625}, {"start": 1810.45, "end": 1810.77, "word": " data", "probability": 0.94140625}, {"start": 1810.77, "end": 1812.95, "word": " has", "probability": 0.89892578125}, {"start": 1812.95, "end": 1813.21, "word": " not", "probability": 0.76806640625}, {"start": 1813.21, "end": 1813.57, "word": " extreme", "probability": 0.84423828125}, {"start": 1813.57, "end": 1814.05, "word": " values,", "probability": 0.96875}, {"start": 1814.59, "end": 1814.95, "word": " then", "probability": 0.859375}, {"start": 1814.95, "end": 1815.09, "word": " you", "probability": 0.94775390625}, {"start": 1815.09, "end": 1815.31, "word": " can", "probability": 0.9462890625}, {"start": 1815.31, "end": 1815.77, "word": " use", "probability": 0.873046875}, {"start": 1815.77, "end": 1816.27, "word": " the", "probability": 0.8896484375}, {"start": 1816.27, "end": 1816.41, "word": " mean.", "probability": 0.95751953125}, {"start": 1816.51, "end": 1816.65, "word": " The", "probability": 0.88330078125}, {"start": 1816.65, "end": 1816.75, "word": " mean", "probability": 0.97216796875}, {"start": 1816.75, "end": 1816.89, "word": " is", "probability": 0.95751953125}, {"start": 1816.89, "end": 1817.17, "word": " much", "probability": 0.91748046875}, {"start": 1817.17, "end": 1817.47, "word": " better", "probability": 0.9013671875}, {"start": 1817.47, "end": 1817.83, "word": " than", "probability": 0.94921875}, {"start": 1817.83, "end": 1818.63, "word": " the", "probability": 0.923828125}, {"start": 1818.63, "end": 1818.81, "word": " median", "probability": 0.9404296875}, {"start": 1818.81, "end": 1819.05, "word": " in", "probability": 0.896484375}, {"start": 1819.05, "end": 1819.23, "word": " this", "probability": 0.94873046875}, {"start": 1819.23, "end": 1819.47, "word": " case.", "probability": 0.90771484375}, {"start": 1819.53, "end": 1819.77, "word": " But", "probability": 0.93408203125}, {"start": 1819.77, "end": 1820.07, "word": " if", "probability": 0.87451171875}, {"start": 1820.07, "end": 1820.25, "word": " the", "probability": 0.9169921875}, {"start": 1820.25, "end": 1820.59, "word": " data", "probability": 0.9384765625}, {"start": 1820.59, "end": 1821.59, "word": " has", "probability": 0.94970703125}, {"start": 1821.59, "end": 1822.03, "word": " extreme", "probability": 0.841796875}, {"start": 1822.03, "end": 1822.35, "word": " values", "probability": 0.97265625}, {"start": 1822.35, "end": 1822.49, "word": " or", "probability": 0.90771484375}, {"start": 1822.49, "end": 1822.97, "word": " outliers,", "probability": 0.9560546875}, {"start": 1823.33, "end": 1823.59, "word": " we", "probability": 0.931640625}, {"start": 1823.59, "end": 1823.85, "word": " should", "probability": 0.96533203125}, {"start": 1823.85, "end": 1824.25, "word": " use", "probability": 0.87255859375}, {"start": 1824.25, "end": 1824.67, "word": " the", "probability": 0.92333984375}, {"start": 1824.67, "end": 1824.89, "word": " median", "probability": 0.953125}, {"start": 1824.89, "end": 1826.31, "word": " instead", "probability": 0.8779296875}, {"start": 1826.31, "end": 1826.85, "word": " of", "probability": 0.96826171875}, {"start": 1826.85, "end": 1827.19, "word": " the", "probability": 0.93115234375}, {"start": 1827.19, "end": 1827.31, "word": " mean.", "probability": 0.97998046875}], "temperature": 1.0}, {"id": 68, "seek": 184647, "start": 1827.97, "end": 1846.47, "text": " Any question? So these are the most common center tendency measures in statistics, the mean and the median. And keep in mind, your data should be numeric. I mean, you cannot use the mean or the median", "tokens": [2639, 1168, 30, 407, 613, 366, 264, 881, 2689, 3056, 18187, 8000, 294, 12523, 11, 264, 914, 293, 264, 26779, 13, 400, 1066, 294, 1575, 11, 428, 1412, 820, 312, 7866, 299, 13, 286, 914, 11, 291, 2644, 764, 264, 914, 420, 264, 26779], "avg_logprob": -0.13940972222222223, "compression_ratio": 1.488888888888889, "no_speech_prob": 0.0, "words": [{"start": 1827.97, "end": 1828.25, "word": " Any", "probability": 0.74560546875}, {"start": 1828.25, "end": 1828.55, "word": " question?", "probability": 0.76611328125}, {"start": 1829.19, "end": 1829.77, "word": " So", "probability": 0.92236328125}, {"start": 1829.77, "end": 1830.03, "word": " these", "probability": 0.7001953125}, {"start": 1830.03, "end": 1830.27, "word": " are", "probability": 0.9462890625}, {"start": 1830.27, "end": 1830.45, "word": " the", "probability": 0.9228515625}, {"start": 1830.45, "end": 1830.73, "word": " most", "probability": 0.90673828125}, {"start": 1830.73, "end": 1831.31, "word": " common", "probability": 0.884765625}, {"start": 1831.31, "end": 1833.65, "word": " center", "probability": 0.62109375}, {"start": 1833.65, "end": 1834.05, "word": " tendency", "probability": 0.83447265625}, {"start": 1834.05, "end": 1834.59, "word": " measures", "probability": 0.822265625}, {"start": 1834.59, "end": 1835.11, "word": " in", "probability": 0.93408203125}, {"start": 1835.11, "end": 1835.67, "word": " statistics,", "probability": 0.853515625}, {"start": 1836.33, "end": 1836.47, "word": " the", "probability": 0.9189453125}, {"start": 1836.47, "end": 1836.71, "word": " mean", "probability": 0.96484375}, {"start": 1836.71, "end": 1837.97, "word": " and", "probability": 0.8154296875}, {"start": 1837.97, "end": 1838.15, "word": " the", "probability": 0.91796875}, {"start": 1838.15, "end": 1838.37, "word": " median.", "probability": 0.95166015625}, {"start": 1839.23, "end": 1839.51, "word": " And", "probability": 0.9150390625}, {"start": 1839.51, "end": 1839.67, "word": " keep", "probability": 0.92333984375}, {"start": 1839.67, "end": 1839.79, "word": " in", "probability": 0.947265625}, {"start": 1839.79, "end": 1840.15, "word": " mind,", "probability": 0.88623046875}, {"start": 1841.29, "end": 1841.63, "word": " your", "probability": 0.8955078125}, {"start": 1841.63, "end": 1842.09, "word": " data", "probability": 0.9501953125}, {"start": 1842.09, "end": 1842.39, "word": " should", "probability": 0.96533203125}, {"start": 1842.39, "end": 1842.57, "word": " be", "probability": 0.958984375}, {"start": 1842.57, "end": 1843.17, "word": " numeric.", "probability": 0.90234375}, {"start": 1843.93, "end": 1844.07, "word": " I", "probability": 0.9541015625}, {"start": 1844.07, "end": 1844.23, "word": " mean,", "probability": 0.962890625}, {"start": 1844.53, "end": 1844.77, "word": " you", "probability": 0.9658203125}, {"start": 1844.77, "end": 1845.11, "word": " cannot", "probability": 0.89111328125}, {"start": 1845.11, "end": 1845.53, "word": " use", "probability": 0.8779296875}, {"start": 1845.53, "end": 1845.71, "word": " the", "probability": 0.9267578125}, {"start": 1845.71, "end": 1845.85, "word": " mean", "probability": 0.97509765625}, {"start": 1845.85, "end": 1846.01, "word": " or", "probability": 0.96240234375}, {"start": 1846.01, "end": 1846.17, "word": " the", "probability": 0.92578125}, {"start": 1846.17, "end": 1846.47, "word": " median", "probability": 0.9599609375}], "temperature": 1.0}, {"id": 69, "seek": 186981, "start": 1848.01, "end": 1869.81, "text": " for qualitative or categorical data, for example, gender, males or females. You cannot say the mean of gender or sex is 1.5. It doesn't make sense. It should be numerical data to use the mean or the median. So the mean and the median is used only for numerical data.", "tokens": [337, 31312, 420, 19250, 804, 1412, 11, 337, 1365, 11, 7898, 11, 20776, 420, 21529, 13, 509, 2644, 584, 264, 914, 295, 7898, 420, 3260, 307, 502, 13, 20, 13, 467, 1177, 380, 652, 2020, 13, 467, 820, 312, 29054, 1412, 281, 764, 264, 914, 420, 264, 26779, 13, 407, 264, 914, 293, 264, 26779, 307, 1143, 787, 337, 29054, 1412, 13], "avg_logprob": -0.13541666950498307, "compression_ratio": 1.6583850931677018, "no_speech_prob": 0.0, "words": [{"start": 1848.01, "end": 1848.61, "word": " for", "probability": 0.60693359375}, {"start": 1848.61, "end": 1849.19, "word": " qualitative", "probability": 0.88330078125}, {"start": 1849.19, "end": 1849.61, "word": " or", "probability": 0.91650390625}, {"start": 1849.61, "end": 1850.09, "word": " categorical", "probability": 0.748779296875}, {"start": 1850.09, "end": 1850.51, "word": " data,", "probability": 0.9384765625}, {"start": 1850.63, "end": 1850.73, "word": " for", "probability": 0.94677734375}, {"start": 1850.73, "end": 1851.03, "word": " example,", "probability": 0.97998046875}, {"start": 1851.15, "end": 1851.45, "word": " gender,", "probability": 0.83837890625}, {"start": 1851.59, "end": 1851.75, "word": " males", "probability": 0.947265625}, {"start": 1851.75, "end": 1851.95, "word": " or", "probability": 0.908203125}, {"start": 1851.95, "end": 1852.25, "word": " females.", "probability": 0.9541015625}, {"start": 1852.67, "end": 1852.89, "word": " You", "probability": 0.8291015625}, {"start": 1852.89, "end": 1853.65, "word": " cannot", "probability": 0.841796875}, {"start": 1853.65, "end": 1854.31, "word": " say", "probability": 0.9501953125}, {"start": 1854.31, "end": 1855.19, "word": " the", "probability": 0.80810546875}, {"start": 1855.19, "end": 1855.55, "word": " mean", "probability": 0.97265625}, {"start": 1855.55, "end": 1856.51, "word": " of", "probability": 0.970703125}, {"start": 1856.51, "end": 1856.99, "word": " gender", "probability": 0.87744140625}, {"start": 1856.99, "end": 1857.21, "word": " or", "probability": 0.95703125}, {"start": 1857.21, "end": 1857.51, "word": " sex", "probability": 0.95166015625}, {"start": 1857.51, "end": 1857.73, "word": " is", "probability": 0.94140625}, {"start": 1857.73, "end": 1857.97, "word": " 1", "probability": 0.978515625}, {"start": 1857.97, "end": 1858.51, "word": ".5.", "probability": 0.998779296875}, {"start": 1858.83, "end": 1858.99, "word": " It", "probability": 0.9560546875}, {"start": 1858.99, "end": 1859.27, "word": " doesn't", "probability": 0.967529296875}, {"start": 1859.27, "end": 1859.49, "word": " make", "probability": 0.93603515625}, {"start": 1859.49, "end": 1859.81, "word": " sense.", "probability": 0.8330078125}, {"start": 1861.29, "end": 1861.65, "word": " It", "probability": 0.96875}, {"start": 1861.65, "end": 1861.85, "word": " should", "probability": 0.9658203125}, {"start": 1861.85, "end": 1862.01, "word": " be", "probability": 0.95751953125}, {"start": 1862.01, "end": 1862.37, "word": " numerical", "probability": 0.91796875}, {"start": 1862.37, "end": 1862.87, "word": " data", "probability": 0.939453125}, {"start": 1862.87, "end": 1863.83, "word": " to", "probability": 0.95556640625}, {"start": 1863.83, "end": 1864.27, "word": " use", "probability": 0.87060546875}, {"start": 1864.27, "end": 1865.29, "word": " the", "probability": 0.9169921875}, {"start": 1865.29, "end": 1865.49, "word": " mean", "probability": 0.97998046875}, {"start": 1865.49, "end": 1865.67, "word": " or", "probability": 0.95849609375}, {"start": 1865.67, "end": 1865.81, "word": " the", "probability": 0.9228515625}, {"start": 1865.81, "end": 1865.99, "word": " median.", "probability": 0.91015625}, {"start": 1866.11, "end": 1866.21, "word": " So", "probability": 0.9248046875}, {"start": 1866.21, "end": 1866.35, "word": " the", "probability": 0.72998046875}, {"start": 1866.35, "end": 1866.49, "word": " mean", "probability": 0.970703125}, {"start": 1866.49, "end": 1866.63, "word": " and", "probability": 0.6748046875}, {"start": 1866.63, "end": 1866.77, "word": " the", "probability": 0.8740234375}, {"start": 1866.77, "end": 1866.97, "word": " median", "probability": 0.9599609375}, {"start": 1866.97, "end": 1867.25, "word": " is", "probability": 0.9267578125}, {"start": 1867.25, "end": 1867.59, "word": " used", "probability": 0.91650390625}, {"start": 1867.59, "end": 1868.01, "word": " only", "probability": 0.93212890625}, {"start": 1868.01, "end": 1868.47, "word": " for", "probability": 0.95263671875}, {"start": 1868.47, "end": 1869.27, "word": " numerical", "probability": 0.70556640625}, {"start": 1869.27, "end": 1869.81, "word": " data.", "probability": 0.92626953125}], "temperature": 1.0}, {"id": 70, "seek": 189759, "start": 1870.51, "end": 1897.59, "text": " And we have to distinguish between mean and median. Mean is used for data that has not outliers or extreme values, while the median is used for data that has outliers or extreme values. Sometimes better to report both. I mean, sometimes better to report mean and the median. So you just say the sales for this company is, for example, $500,000.", "tokens": [400, 321, 362, 281, 20206, 1296, 914, 293, 26779, 13, 12302, 307, 1143, 337, 1412, 300, 575, 406, 484, 23646, 420, 8084, 4190, 11, 1339, 264, 26779, 307, 1143, 337, 1412, 300, 575, 484, 23646, 420, 8084, 4190, 13, 4803, 1101, 281, 2275, 1293, 13, 286, 914, 11, 2171, 1101, 281, 2275, 914, 293, 264, 26779, 13, 407, 291, 445, 584, 264, 5763, 337, 341, 2237, 307, 11, 337, 1365, 11, 1848, 7526, 11, 1360, 13], "avg_logprob": -0.13900162260253707, "compression_ratio": 1.806282722513089, "no_speech_prob": 0.0, "words": [{"start": 1870.51, "end": 1870.83, "word": " And", "probability": 0.494873046875}, {"start": 1870.83, "end": 1870.95, "word": " we", "probability": 0.779296875}, {"start": 1870.95, "end": 1871.09, "word": " have", "probability": 0.9267578125}, {"start": 1871.09, "end": 1871.21, "word": " to", "probability": 0.958984375}, {"start": 1871.21, "end": 1871.63, "word": " distinguish", "probability": 0.890625}, {"start": 1871.63, "end": 1872.25, "word": " between", "probability": 0.8837890625}, {"start": 1872.25, "end": 1872.43, "word": " mean", "probability": 0.93310546875}, {"start": 1872.43, "end": 1872.61, "word": " and", "probability": 0.9453125}, {"start": 1872.61, "end": 1872.93, "word": " median.", "probability": 0.97119140625}, {"start": 1873.51, "end": 1873.67, "word": " Mean", "probability": 0.927734375}, {"start": 1873.67, "end": 1873.87, "word": " is", "probability": 0.9482421875}, {"start": 1873.87, "end": 1874.17, "word": " used", "probability": 0.91162109375}, {"start": 1874.17, "end": 1874.41, "word": " for", "probability": 0.951171875}, {"start": 1874.41, "end": 1874.71, "word": " data", "probability": 0.94580078125}, {"start": 1874.71, "end": 1874.97, "word": " that", "probability": 0.93505859375}, {"start": 1874.97, "end": 1875.21, "word": " has", "probability": 0.90087890625}, {"start": 1875.21, "end": 1875.45, "word": " not", "probability": 0.91552734375}, {"start": 1875.45, "end": 1875.93, "word": " outliers", "probability": 0.9453125}, {"start": 1875.93, "end": 1876.11, "word": " or", "probability": 0.89794921875}, {"start": 1876.11, "end": 1876.41, "word": " extreme", "probability": 0.89892578125}, {"start": 1876.41, "end": 1876.87, "word": " values,", "probability": 0.96826171875}, {"start": 1877.37, "end": 1877.79, "word": " while", "probability": 0.95458984375}, {"start": 1877.79, "end": 1878.01, "word": " the", "probability": 0.91357421875}, {"start": 1878.01, "end": 1878.23, "word": " median", "probability": 0.96484375}, {"start": 1878.23, "end": 1878.53, "word": " is", "probability": 0.94873046875}, {"start": 1878.53, "end": 1878.81, "word": " used", "probability": 0.9140625}, {"start": 1878.81, "end": 1879.25, "word": " for", "probability": 0.95361328125}, {"start": 1879.25, "end": 1880.35, "word": " data", "probability": 0.93310546875}, {"start": 1880.35, "end": 1880.95, "word": " that", "probability": 0.93701171875}, {"start": 1880.95, "end": 1881.45, "word": " has", "probability": 0.94287109375}, {"start": 1881.45, "end": 1882.05, "word": " outliers", "probability": 0.739501953125}, {"start": 1882.05, "end": 1882.37, "word": " or", "probability": 0.95703125}, {"start": 1882.37, "end": 1882.67, "word": " extreme", "probability": 0.89111328125}, {"start": 1882.67, "end": 1883.03, "word": " values.", "probability": 0.9423828125}, {"start": 1883.87, "end": 1884.49, "word": " Sometimes", "probability": 0.90673828125}, {"start": 1884.49, "end": 1884.83, "word": " better", "probability": 0.346435546875}, {"start": 1884.83, "end": 1885.23, "word": " to", "probability": 0.9638671875}, {"start": 1885.23, "end": 1885.67, "word": " report", "probability": 0.95263671875}, {"start": 1885.67, "end": 1886.09, "word": " both.", "probability": 0.90380859375}, {"start": 1886.21, "end": 1886.29, "word": " I", "probability": 0.96728515625}, {"start": 1886.29, "end": 1886.45, "word": " mean,", "probability": 0.96533203125}, {"start": 1886.49, "end": 1886.93, "word": " sometimes", "probability": 0.58642578125}, {"start": 1886.93, "end": 1887.31, "word": " better", "probability": 0.80810546875}, {"start": 1887.31, "end": 1887.61, "word": " to", "probability": 0.96435546875}, {"start": 1887.61, "end": 1887.99, "word": " report", "probability": 0.962890625}, {"start": 1887.99, "end": 1889.63, "word": " mean", "probability": 0.91845703125}, {"start": 1889.63, "end": 1889.81, "word": " and", "probability": 0.9033203125}, {"start": 1889.81, "end": 1889.93, "word": " the", "probability": 0.79931640625}, {"start": 1889.93, "end": 1890.11, "word": " median.", "probability": 0.9501953125}, {"start": 1890.47, "end": 1890.65, "word": " So", "probability": 0.95068359375}, {"start": 1890.65, "end": 1891.15, "word": " you", "probability": 0.8798828125}, {"start": 1891.15, "end": 1891.43, "word": " just", "probability": 0.458740234375}, {"start": 1891.43, "end": 1891.77, "word": " say", "probability": 0.7666015625}, {"start": 1891.77, "end": 1892.73, "word": " the", "probability": 0.56298828125}, {"start": 1892.73, "end": 1893.17, "word": " sales", "probability": 0.9208984375}, {"start": 1893.17, "end": 1893.45, "word": " for", "probability": 0.94287109375}, {"start": 1893.45, "end": 1893.71, "word": " this", "probability": 0.9462890625}, {"start": 1893.71, "end": 1894.17, "word": " company", "probability": 0.91064453125}, {"start": 1894.17, "end": 1895.39, "word": " is,", "probability": 0.9384765625}, {"start": 1895.59, "end": 1895.67, "word": " for", "probability": 0.9521484375}, {"start": 1895.67, "end": 1896.13, "word": " example,", "probability": 0.97607421875}, {"start": 1896.73, "end": 1897.11, "word": " $500", "probability": 0.91259765625}, {"start": 1897.11, "end": 1897.59, "word": ",000.", "probability": 0.972412109375}], "temperature": 1.0}, {"id": 71, "seek": 192516, "start": 1900.06, "end": 1925.16, "text": " And the median, for example, is 550,000. You can see that. Is it clear? If you have a small data, it's straightforward and it's very easy to locate the median. But in case of large dataset, how can we locate the median? It's not easy. Just look at the data and", "tokens": [400, 264, 26779, 11, 337, 1365, 11, 307, 42514, 11, 1360, 13, 509, 393, 536, 300, 13, 1119, 309, 1850, 30, 759, 291, 362, 257, 1359, 1412, 11, 309, 311, 15325, 293, 309, 311, 588, 1858, 281, 22370, 264, 26779, 13, 583, 294, 1389, 295, 2416, 28872, 11, 577, 393, 321, 22370, 264, 26779, 30, 467, 311, 406, 1858, 13, 1449, 574, 412, 264, 1412, 293], "avg_logprob": -0.126515858653766, "compression_ratio": 1.5174418604651163, "no_speech_prob": 0.0, "words": [{"start": 1900.06, "end": 1900.38, "word": " And", "probability": 0.74609375}, {"start": 1900.38, "end": 1900.56, "word": " the", "probability": 0.912109375}, {"start": 1900.56, "end": 1900.88, "word": " median,", "probability": 0.95361328125}, {"start": 1901.12, "end": 1901.22, "word": " for", "probability": 0.951171875}, {"start": 1901.22, "end": 1901.58, "word": " example,", "probability": 0.9765625}, {"start": 1901.7, "end": 1901.8, "word": " is", "probability": 0.9130859375}, {"start": 1901.8, "end": 1902.38, "word": " 550", "probability": 0.90185546875}, {"start": 1902.38, "end": 1902.82, "word": ",000.", "probability": 0.885498046875}, {"start": 1903.08, "end": 1903.3, "word": " You", "probability": 0.95703125}, {"start": 1903.3, "end": 1903.52, "word": " can", "probability": 0.947265625}, {"start": 1903.52, "end": 1903.7, "word": " see", "probability": 0.8056640625}, {"start": 1903.7, "end": 1903.9, "word": " that.", "probability": 0.88720703125}, {"start": 1905.68, "end": 1905.98, "word": " Is", "probability": 0.92041015625}, {"start": 1905.98, "end": 1906.1, "word": " it", "probability": 0.88720703125}, {"start": 1906.1, "end": 1906.4, "word": " clear?", "probability": 0.9013671875}, {"start": 1911.44, "end": 1911.96, "word": " If", "probability": 0.80419921875}, {"start": 1911.96, "end": 1912.74, "word": " you", "probability": 0.93603515625}, {"start": 1912.74, "end": 1912.88, "word": " have", "probability": 0.95263671875}, {"start": 1912.88, "end": 1912.98, "word": " a", "probability": 0.61962890625}, {"start": 1912.98, "end": 1913.22, "word": " small", "probability": 0.93994140625}, {"start": 1913.22, "end": 1913.58, "word": " data,", "probability": 0.935546875}, {"start": 1914.06, "end": 1914.38, "word": " it's", "probability": 0.961181640625}, {"start": 1914.38, "end": 1915.04, "word": " straightforward", "probability": 0.85595703125}, {"start": 1915.04, "end": 1915.56, "word": " and", "probability": 0.52490234375}, {"start": 1915.56, "end": 1915.8, "word": " it's", "probability": 0.95263671875}, {"start": 1915.8, "end": 1916.02, "word": " very", "probability": 0.85546875}, {"start": 1916.02, "end": 1916.34, "word": " easy", "probability": 0.8994140625}, {"start": 1916.34, "end": 1916.54, "word": " to", "probability": 0.970703125}, {"start": 1916.54, "end": 1916.92, "word": " locate", "probability": 0.94482421875}, {"start": 1916.92, "end": 1917.14, "word": " the", "probability": 0.91455078125}, {"start": 1917.14, "end": 1917.4, "word": " median.", "probability": 0.892578125}, {"start": 1918.44, "end": 1918.72, "word": " But", "probability": 0.9521484375}, {"start": 1918.72, "end": 1918.9, "word": " in", "probability": 0.923828125}, {"start": 1918.9, "end": 1919.18, "word": " case", "probability": 0.87255859375}, {"start": 1919.18, "end": 1919.34, "word": " of", "probability": 0.96728515625}, {"start": 1919.34, "end": 1919.68, "word": " large", "probability": 0.89892578125}, {"start": 1919.68, "end": 1920.14, "word": " dataset,", "probability": 0.45849609375}, {"start": 1920.72, "end": 1920.96, "word": " how", "probability": 0.93408203125}, {"start": 1920.96, "end": 1921.22, "word": " can", "probability": 0.94091796875}, {"start": 1921.22, "end": 1921.38, "word": " we", "probability": 0.748046875}, {"start": 1921.38, "end": 1921.68, "word": " locate", "probability": 0.8310546875}, {"start": 1921.68, "end": 1921.88, "word": " the", "probability": 0.919921875}, {"start": 1921.88, "end": 1922.12, "word": " median?", "probability": 0.9462890625}, {"start": 1922.34, "end": 1922.52, "word": " It's", "probability": 0.959228515625}, {"start": 1922.52, "end": 1922.7, "word": " not", "probability": 0.943359375}, {"start": 1922.7, "end": 1922.98, "word": " easy.", "probability": 0.91064453125}, {"start": 1923.34, "end": 1923.6, "word": " Just", "probability": 0.87158203125}, {"start": 1923.6, "end": 1923.82, "word": " look", "probability": 0.81005859375}, {"start": 1923.82, "end": 1923.94, "word": " at", "probability": 0.96630859375}, {"start": 1923.94, "end": 1924.06, "word": " the", "probability": 0.91552734375}, {"start": 1924.06, "end": 1924.36, "word": " data", "probability": 0.93212890625}, {"start": 1924.36, "end": 1925.16, "word": " and", "probability": 0.79248046875}], "temperature": 1.0}, {"id": 72, "seek": 195120, "start": 1926.2, "end": 1951.2, "text": " you can say this is the median. It's not easy task. So we need a rule that locate the median. The location of the median when the values are in numerical order from smallest to largest is N plus one divided by two. That's the position of the median. If we go back a little bit to the previous example, here N was five.", "tokens": [291, 393, 584, 341, 307, 264, 26779, 13, 467, 311, 406, 1858, 5633, 13, 407, 321, 643, 257, 4978, 300, 22370, 264, 26779, 13, 440, 4914, 295, 264, 26779, 562, 264, 4190, 366, 294, 29054, 1668, 490, 16998, 281, 6443, 307, 426, 1804, 472, 6666, 538, 732, 13, 663, 311, 264, 2535, 295, 264, 26779, 13, 759, 321, 352, 646, 257, 707, 857, 281, 264, 3894, 1365, 11, 510, 426, 390, 1732, 13], "avg_logprob": -0.13239019746715958, "compression_ratio": 1.5870646766169154, "no_speech_prob": 0.0, "words": [{"start": 1926.2, "end": 1926.44, "word": " you", "probability": 0.454833984375}, {"start": 1926.44, "end": 1926.64, "word": " can", "probability": 0.94140625}, {"start": 1926.64, "end": 1926.84, "word": " say", "probability": 0.79443359375}, {"start": 1926.84, "end": 1927.64, "word": " this", "probability": 0.609375}, {"start": 1927.64, "end": 1927.8, "word": " is", "probability": 0.94775390625}, {"start": 1927.8, "end": 1927.96, "word": " the", "probability": 0.92333984375}, {"start": 1927.96, "end": 1928.22, "word": " median.", "probability": 0.962890625}, {"start": 1928.86, "end": 1929.16, "word": " It's", "probability": 0.977783203125}, {"start": 1929.16, "end": 1929.64, "word": " not", "probability": 0.94580078125}, {"start": 1929.64, "end": 1929.96, "word": " easy", "probability": 0.857421875}, {"start": 1929.96, "end": 1930.4, "word": " task.", "probability": 0.8984375}, {"start": 1930.72, "end": 1931.04, "word": " So", "probability": 0.951171875}, {"start": 1931.04, "end": 1931.2, "word": " we", "probability": 0.84521484375}, {"start": 1931.2, "end": 1931.42, "word": " need", "probability": 0.93408203125}, {"start": 1931.42, "end": 1931.6, "word": " a", "probability": 0.990234375}, {"start": 1931.6, "end": 1931.78, "word": " rule", "probability": 0.9228515625}, {"start": 1931.78, "end": 1932.18, "word": " that", "probability": 0.935546875}, {"start": 1932.18, "end": 1932.84, "word": " locate", "probability": 0.88671875}, {"start": 1932.84, "end": 1933.06, "word": " the", "probability": 0.91357421875}, {"start": 1933.06, "end": 1933.3, "word": " median.", "probability": 0.95556640625}, {"start": 1935.02, "end": 1935.34, "word": " The", "probability": 0.857421875}, {"start": 1935.34, "end": 1935.82, "word": " location", "probability": 0.9189453125}, {"start": 1935.82, "end": 1935.98, "word": " of", "probability": 0.96875}, {"start": 1935.98, "end": 1936.12, "word": " the", "probability": 0.923828125}, {"start": 1936.12, "end": 1936.4, "word": " median", "probability": 0.958984375}, {"start": 1936.4, "end": 1936.66, "word": " when", "probability": 0.7880859375}, {"start": 1936.66, "end": 1936.8, "word": " the", "probability": 0.9140625}, {"start": 1936.8, "end": 1937.04, "word": " values", "probability": 0.9599609375}, {"start": 1937.04, "end": 1937.38, "word": " are", "probability": 0.947265625}, {"start": 1937.38, "end": 1937.66, "word": " in", "probability": 0.94775390625}, {"start": 1937.66, "end": 1938.02, "word": " numerical", "probability": 0.94775390625}, {"start": 1938.02, "end": 1938.5, "word": " order", "probability": 0.9287109375}, {"start": 1938.5, "end": 1939.1, "word": " from", "probability": 0.830078125}, {"start": 1939.1, "end": 1939.5, "word": " smallest", "probability": 0.9169921875}, {"start": 1939.5, "end": 1940.02, "word": " to", "probability": 0.966796875}, {"start": 1940.02, "end": 1940.42, "word": " largest", "probability": 0.90966796875}, {"start": 1940.42, "end": 1941.84, "word": " is", "probability": 0.7666015625}, {"start": 1941.84, "end": 1942.96, "word": " N", "probability": 0.54541015625}, {"start": 1942.96, "end": 1943.24, "word": " plus", "probability": 0.900390625}, {"start": 1943.24, "end": 1943.58, "word": " one", "probability": 0.5849609375}, {"start": 1943.58, "end": 1944.02, "word": " divided", "probability": 0.751953125}, {"start": 1944.02, "end": 1944.22, "word": " by", "probability": 0.96923828125}, {"start": 1944.22, "end": 1944.42, "word": " two.", "probability": 0.91943359375}, {"start": 1944.52, "end": 1944.86, "word": " That's", "probability": 0.946533203125}, {"start": 1944.86, "end": 1945.02, "word": " the", "probability": 0.921875}, {"start": 1945.02, "end": 1945.42, "word": " position", "probability": 0.94921875}, {"start": 1945.42, "end": 1945.76, "word": " of", "probability": 0.96826171875}, {"start": 1945.76, "end": 1945.9, "word": " the", "probability": 0.91845703125}, {"start": 1945.9, "end": 1946.14, "word": " median.", "probability": 0.94384765625}, {"start": 1946.64, "end": 1946.82, "word": " If", "probability": 0.96435546875}, {"start": 1946.82, "end": 1947.02, "word": " we", "probability": 0.96044921875}, {"start": 1947.02, "end": 1947.3, "word": " go", "probability": 0.96484375}, {"start": 1947.3, "end": 1947.62, "word": " back", "probability": 0.875}, {"start": 1947.62, "end": 1947.9, "word": " a", "probability": 0.69482421875}, {"start": 1947.9, "end": 1948.08, "word": " little", "probability": 0.8603515625}, {"start": 1948.08, "end": 1948.36, "word": " bit", "probability": 0.94677734375}, {"start": 1948.36, "end": 1948.48, "word": " to", "probability": 0.81591796875}, {"start": 1948.48, "end": 1948.62, "word": " the", "probability": 0.9208984375}, {"start": 1948.62, "end": 1948.86, "word": " previous", "probability": 0.86474609375}, {"start": 1948.86, "end": 1949.4, "word": " example,", "probability": 0.97216796875}, {"start": 1949.9, "end": 1950.18, "word": " here", "probability": 0.85009765625}, {"start": 1950.18, "end": 1950.7, "word": " N", "probability": 0.6982421875}, {"start": 1950.7, "end": 1950.92, "word": " was", "probability": 0.93505859375}, {"start": 1950.92, "end": 1951.2, "word": " five.", "probability": 0.8935546875}], "temperature": 1.0}, {"id": 73, "seek": 198268, "start": 1953.48, "end": 1982.68, "text": " So the location was number three, because n plus one divided by two, five plus one divided by two is three. So location number three is the median. Location number one is one, in this case, then two, then three. So location number three is three. But maybe this number is not three, and other value maybe 3.1 or 3.2. But the location is number three. Is it clear?", "tokens": [407, 264, 4914, 390, 1230, 1045, 11, 570, 297, 1804, 472, 6666, 538, 732, 11, 1732, 1804, 472, 6666, 538, 732, 307, 1045, 13, 407, 4914, 1230, 1045, 307, 264, 26779, 13, 12859, 399, 1230, 472, 307, 472, 11, 294, 341, 1389, 11, 550, 732, 11, 550, 1045, 13, 407, 4914, 1230, 1045, 307, 1045, 13, 583, 1310, 341, 1230, 307, 406, 1045, 11, 293, 661, 2158, 1310, 805, 13, 16, 420, 805, 13, 17, 13, 583, 264, 4914, 307, 1230, 1045, 13, 1119, 309, 1850, 30], "avg_logprob": -0.19335937652398238, "compression_ratio": 2.0335195530726256, "no_speech_prob": 0.0, "words": [{"start": 1953.48, "end": 1953.9, "word": " So", "probability": 0.85986328125}, {"start": 1953.9, "end": 1954.28, "word": " the", "probability": 0.70751953125}, {"start": 1954.28, "end": 1954.62, "word": " location", "probability": 0.947265625}, {"start": 1954.62, "end": 1954.98, "word": " was", "probability": 0.95458984375}, {"start": 1954.98, "end": 1955.68, "word": " number", "probability": 0.9345703125}, {"start": 1955.68, "end": 1956.06, "word": " three,", "probability": 0.60107421875}, {"start": 1956.62, "end": 1958.56, "word": " because", "probability": 0.89794921875}, {"start": 1958.56, "end": 1958.84, "word": " n", "probability": 0.5732421875}, {"start": 1958.84, "end": 1959.1, "word": " plus", "probability": 0.9169921875}, {"start": 1959.1, "end": 1959.28, "word": " one", "probability": 0.69921875}, {"start": 1959.28, "end": 1959.56, "word": " divided", "probability": 0.77880859375}, {"start": 1959.56, "end": 1959.76, "word": " by", "probability": 0.97021484375}, {"start": 1959.76, "end": 1960.0, "word": " two,", "probability": 0.9150390625}, {"start": 1960.12, "end": 1960.34, "word": " five", "probability": 0.89794921875}, {"start": 1960.34, "end": 1960.58, "word": " plus", "probability": 0.947265625}, {"start": 1960.58, "end": 1960.78, "word": " one", "probability": 0.93994140625}, {"start": 1960.78, "end": 1960.98, "word": " divided", "probability": 0.8349609375}, {"start": 1960.98, "end": 1961.18, "word": " by", "probability": 0.966796875}, {"start": 1961.18, "end": 1961.42, "word": " two", "probability": 0.94091796875}, {"start": 1961.42, "end": 1961.58, "word": " is", "probability": 0.8720703125}, {"start": 1961.58, "end": 1961.9, "word": " three.", "probability": 0.93408203125}, {"start": 1962.38, "end": 1962.66, "word": " So", "probability": 0.9580078125}, {"start": 1962.66, "end": 1963.12, "word": " location", "probability": 0.87255859375}, {"start": 1963.12, "end": 1963.54, "word": " number", "probability": 0.9248046875}, {"start": 1963.54, "end": 1964.0, "word": " three", "probability": 0.90283203125}, {"start": 1964.0, "end": 1964.24, "word": " is", "probability": 0.93798828125}, {"start": 1964.24, "end": 1964.4, "word": " the", "probability": 0.9072265625}, {"start": 1964.4, "end": 1964.64, "word": " median.", "probability": 0.787109375}, {"start": 1966.3, "end": 1966.82, "word": " Location", "probability": 0.936767578125}, {"start": 1966.82, "end": 1967.02, "word": " number", "probability": 0.9423828125}, {"start": 1967.02, "end": 1967.2, "word": " one", "probability": 0.92138671875}, {"start": 1967.2, "end": 1967.34, "word": " is", "probability": 0.94384765625}, {"start": 1967.34, "end": 1967.58, "word": " one,", "probability": 0.921875}, {"start": 1967.7, "end": 1967.82, "word": " in", "probability": 0.87548828125}, {"start": 1967.82, "end": 1968.06, "word": " this", "probability": 0.94482421875}, {"start": 1968.06, "end": 1968.6, "word": " case,", "probability": 0.91357421875}, {"start": 1969.14, "end": 1969.42, "word": " then", "probability": 0.78173828125}, {"start": 1969.42, "end": 1969.7, "word": " two,", "probability": 0.9326171875}, {"start": 1969.86, "end": 1969.98, "word": " then", "probability": 0.83056640625}, {"start": 1969.98, "end": 1970.22, "word": " three.", "probability": 0.94189453125}, {"start": 1970.52, "end": 1970.84, "word": " So", "probability": 0.9619140625}, {"start": 1970.84, "end": 1971.26, "word": " location", "probability": 0.77294921875}, {"start": 1971.26, "end": 1971.52, "word": " number", "probability": 0.9306640625}, {"start": 1971.52, "end": 1971.74, "word": " three", "probability": 0.73193359375}, {"start": 1971.74, "end": 1971.84, "word": " is", "probability": 0.7880859375}, {"start": 1971.84, "end": 1972.08, "word": " three.", "probability": 0.9287109375}, {"start": 1972.98, "end": 1973.26, "word": " But", "probability": 0.951171875}, {"start": 1973.26, "end": 1973.52, "word": " maybe", "probability": 0.9462890625}, {"start": 1973.52, "end": 1973.74, "word": " this", "probability": 0.93359375}, {"start": 1973.74, "end": 1973.98, "word": " number", "probability": 0.93212890625}, {"start": 1973.98, "end": 1974.12, "word": " is", "probability": 0.94189453125}, {"start": 1974.12, "end": 1974.24, "word": " not", "probability": 0.94775390625}, {"start": 1974.24, "end": 1974.54, "word": " three,", "probability": 0.93603515625}, {"start": 1974.84, "end": 1974.92, "word": " and", "probability": 0.252685546875}, {"start": 1974.92, "end": 1975.4, "word": " other", "probability": 0.53173828125}, {"start": 1975.4, "end": 1975.64, "word": " value", "probability": 0.66650390625}, {"start": 1975.64, "end": 1975.92, "word": " maybe", "probability": 0.270751953125}, {"start": 1975.92, "end": 1976.62, "word": " 3", "probability": 0.40625}, {"start": 1976.62, "end": 1977.14, "word": ".1", "probability": 0.989501953125}, {"start": 1977.14, "end": 1977.28, "word": " or", "probability": 0.84375}, {"start": 1977.28, "end": 1977.74, "word": " 3", "probability": 0.99072265625}, {"start": 1977.74, "end": 1978.28, "word": ".2.", "probability": 0.995361328125}, {"start": 1978.74, "end": 1978.96, "word": " But", "probability": 0.95556640625}, {"start": 1978.96, "end": 1979.12, "word": " the", "probability": 0.91943359375}, {"start": 1979.12, "end": 1979.52, "word": " location", "probability": 0.935546875}, {"start": 1979.52, "end": 1979.72, "word": " is", "probability": 0.943359375}, {"start": 1979.72, "end": 1979.94, "word": " number", "probability": 0.93359375}, {"start": 1979.94, "end": 1980.26, "word": " three.", "probability": 0.9326171875}, {"start": 1981.78, "end": 1982.3, "word": " Is", "probability": 0.9609375}, {"start": 1982.3, "end": 1982.44, "word": " it", "probability": 0.78173828125}, {"start": 1982.44, "end": 1982.68, "word": " clear?", "probability": 0.90234375}], "temperature": 1.0}, {"id": 74, "seek": 201085, "start": 1984.25, "end": 2010.85, "text": " So that's the location. If it is odd, you mean by odd number, five, seven and so on. So if the number of values is odd, the median is the middle number. Now let's imagine if we have even number of observations. For example, we have one, two, three, four, five and six. So imagine numbers from one up to six. What's the median?", "tokens": [407, 300, 311, 264, 4914, 13, 759, 309, 307, 7401, 11, 291, 914, 538, 7401, 1230, 11, 1732, 11, 3407, 293, 370, 322, 13, 407, 498, 264, 1230, 295, 4190, 307, 7401, 11, 264, 26779, 307, 264, 2808, 1230, 13, 823, 718, 311, 3811, 498, 321, 362, 754, 1230, 295, 18163, 13, 1171, 1365, 11, 321, 362, 472, 11, 732, 11, 1045, 11, 1451, 11, 1732, 293, 2309, 13, 407, 3811, 3547, 490, 472, 493, 281, 2309, 13, 708, 311, 264, 26779, 30], "avg_logprob": -0.14183407134953, "compression_ratio": 1.694300518134715, "no_speech_prob": 0.0, "words": [{"start": 1984.25, "end": 1984.55, "word": " So", "probability": 0.84423828125}, {"start": 1984.55, "end": 1984.81, "word": " that's", "probability": 0.82763671875}, {"start": 1984.81, "end": 1984.93, "word": " the", "probability": 0.91162109375}, {"start": 1984.93, "end": 1985.31, "word": " location.", "probability": 0.9111328125}, {"start": 1985.99, "end": 1986.51, "word": " If", "probability": 0.9462890625}, {"start": 1986.51, "end": 1986.75, "word": " it", "probability": 0.587890625}, {"start": 1986.75, "end": 1987.05, "word": " is", "probability": 0.93017578125}, {"start": 1987.05, "end": 1987.41, "word": " odd,", "probability": 0.91015625}, {"start": 1988.13, "end": 1988.47, "word": " you", "probability": 0.86474609375}, {"start": 1988.47, "end": 1988.73, "word": " mean", "probability": 0.29296875}, {"start": 1988.73, "end": 1989.01, "word": " by", "probability": 0.93359375}, {"start": 1989.01, "end": 1989.23, "word": " odd", "probability": 0.939453125}, {"start": 1989.23, "end": 1989.57, "word": " number,", "probability": 0.90966796875}, {"start": 1989.75, "end": 1990.03, "word": " five,", "probability": 0.59326171875}, {"start": 1990.17, "end": 1990.41, "word": " seven", "probability": 0.9052734375}, {"start": 1990.41, "end": 1990.61, "word": " and", "probability": 0.59130859375}, {"start": 1990.61, "end": 1990.83, "word": " so", "probability": 0.9541015625}, {"start": 1990.83, "end": 1990.97, "word": " on.", "probability": 0.9453125}, {"start": 1992.77, "end": 1993.13, "word": " So", "probability": 0.9375}, {"start": 1993.13, "end": 1993.27, "word": " if", "probability": 0.86083984375}, {"start": 1993.27, "end": 1993.39, "word": " the", "probability": 0.91796875}, {"start": 1993.39, "end": 1993.73, "word": " number", "probability": 0.9267578125}, {"start": 1993.73, "end": 1995.21, "word": " of", "probability": 0.9580078125}, {"start": 1995.21, "end": 1995.51, "word": " values", "probability": 0.8876953125}, {"start": 1995.51, "end": 1995.71, "word": " is", "probability": 0.78662109375}, {"start": 1995.71, "end": 1995.99, "word": " odd,", "probability": 0.94287109375}, {"start": 1996.11, "end": 1996.27, "word": " the", "probability": 0.90966796875}, {"start": 1996.27, "end": 1996.51, "word": " median", "probability": 0.955078125}, {"start": 1996.51, "end": 1996.91, "word": " is", "probability": 0.94677734375}, {"start": 1996.91, "end": 1997.09, "word": " the", "probability": 0.91796875}, {"start": 1997.09, "end": 1997.27, "word": " middle", "probability": 0.92578125}, {"start": 1997.27, "end": 1997.61, "word": " number.", "probability": 0.93115234375}, {"start": 1998.29, "end": 1998.45, "word": " Now", "probability": 0.9453125}, {"start": 1998.45, "end": 1998.69, "word": " let's", "probability": 0.868408203125}, {"start": 1998.69, "end": 1999.07, "word": " imagine", "probability": 0.900390625}, {"start": 1999.07, "end": 2000.19, "word": " if", "probability": 0.91650390625}, {"start": 2000.19, "end": 2000.49, "word": " we", "probability": 0.96044921875}, {"start": 2000.49, "end": 2000.89, "word": " have", "probability": 0.9384765625}, {"start": 2000.89, "end": 2001.21, "word": " even", "probability": 0.837890625}, {"start": 2001.21, "end": 2001.59, "word": " number", "probability": 0.92333984375}, {"start": 2001.59, "end": 2001.93, "word": " of", "probability": 0.95263671875}, {"start": 2001.93, "end": 2002.73, "word": " observations.", "probability": 0.80078125}, {"start": 2002.95, "end": 2003.09, "word": " For", "probability": 0.95849609375}, {"start": 2003.09, "end": 2003.43, "word": " example,", "probability": 0.97265625}, {"start": 2003.87, "end": 2004.19, "word": " we", "probability": 0.9541015625}, {"start": 2004.19, "end": 2004.35, "word": " have", "probability": 0.9482421875}, {"start": 2004.35, "end": 2004.57, "word": " one,", "probability": 0.802734375}, {"start": 2004.61, "end": 2004.73, "word": " two,", "probability": 0.94140625}, {"start": 2004.83, "end": 2004.97, "word": " three,", "probability": 0.931640625}, {"start": 2005.09, "end": 2005.27, "word": " four,", "probability": 0.93994140625}, {"start": 2005.39, "end": 2005.65, "word": " five", "probability": 0.90625}, {"start": 2005.65, "end": 2005.85, "word": " and", "probability": 0.76123046875}, {"start": 2005.85, "end": 2006.19, "word": " six.", "probability": 0.95166015625}, {"start": 2006.57, "end": 2006.85, "word": " So", "probability": 0.94580078125}, {"start": 2006.85, "end": 2007.39, "word": " imagine", "probability": 0.86962890625}, {"start": 2007.39, "end": 2008.27, "word": " numbers", "probability": 0.85400390625}, {"start": 2008.27, "end": 2008.51, "word": " from", "probability": 0.89111328125}, {"start": 2008.51, "end": 2008.81, "word": " one", "probability": 0.884765625}, {"start": 2008.81, "end": 2009.03, "word": " up", "probability": 0.95947265625}, {"start": 2009.03, "end": 2009.17, "word": " to", "probability": 0.96435546875}, {"start": 2009.17, "end": 2009.55, "word": " six.", "probability": 0.94287109375}, {"start": 2010.13, "end": 2010.45, "word": " What's", "probability": 0.93408203125}, {"start": 2010.45, "end": 2010.59, "word": " the", "probability": 0.91943359375}, {"start": 2010.59, "end": 2010.85, "word": " median?", "probability": 0.96826171875}], "temperature": 1.0}, {"id": 75, "seek": 203739, "start": 2011.67, "end": 2037.39, "text": " Now three is not the median because there are two observations below three. And three above it. And four is not the median because three observations below, two above. So three and four is the middle value. So just take the average of two middle points,", "tokens": [823, 1045, 307, 406, 264, 26779, 570, 456, 366, 732, 18163, 2507, 1045, 13, 400, 1045, 3673, 309, 13, 400, 1451, 307, 406, 264, 26779, 570, 1045, 18163, 2507, 11, 732, 3673, 13, 407, 1045, 293, 1451, 307, 264, 2808, 2158, 13, 407, 445, 747, 264, 4274, 295, 732, 2808, 2793, 11], "avg_logprob": -0.16597877808336942, "compression_ratio": 1.8142857142857143, "no_speech_prob": 0.0, "words": [{"start": 2011.67, "end": 2012.01, "word": " Now", "probability": 0.90576171875}, {"start": 2012.01, "end": 2012.39, "word": " three", "probability": 0.3642578125}, {"start": 2012.39, "end": 2013.31, "word": " is", "probability": 0.93359375}, {"start": 2013.31, "end": 2013.51, "word": " not", "probability": 0.95361328125}, {"start": 2013.51, "end": 2013.69, "word": " the", "probability": 0.90966796875}, {"start": 2013.69, "end": 2013.95, "word": " median", "probability": 0.958984375}, {"start": 2013.95, "end": 2014.91, "word": " because", "probability": 0.6728515625}, {"start": 2014.91, "end": 2015.19, "word": " there", "probability": 0.9091796875}, {"start": 2015.19, "end": 2015.37, "word": " are", "probability": 0.9423828125}, {"start": 2015.37, "end": 2015.61, "word": " two", "probability": 0.939453125}, {"start": 2015.61, "end": 2016.19, "word": " observations", "probability": 0.75634765625}, {"start": 2016.19, "end": 2016.65, "word": " below", "probability": 0.892578125}, {"start": 2016.65, "end": 2017.63, "word": " three.", "probability": 0.416259765625}, {"start": 2018.91, "end": 2019.59, "word": " And", "probability": 0.6796875}, {"start": 2019.59, "end": 2021.95, "word": " three", "probability": 0.6943359375}, {"start": 2021.95, "end": 2022.39, "word": " above", "probability": 0.94873046875}, {"start": 2022.39, "end": 2022.71, "word": " it.", "probability": 0.951171875}, {"start": 2023.21, "end": 2023.39, "word": " And", "probability": 0.9248046875}, {"start": 2023.39, "end": 2023.61, "word": " four", "probability": 0.935546875}, {"start": 2023.61, "end": 2023.77, "word": " is", "probability": 0.94384765625}, {"start": 2023.77, "end": 2023.95, "word": " not", "probability": 0.9482421875}, {"start": 2023.95, "end": 2024.11, "word": " the", "probability": 0.9267578125}, {"start": 2024.11, "end": 2024.27, "word": " median", "probability": 0.87841796875}, {"start": 2024.27, "end": 2024.75, "word": " because", "probability": 0.86572265625}, {"start": 2024.75, "end": 2025.71, "word": " three", "probability": 0.8876953125}, {"start": 2025.71, "end": 2026.21, "word": " observations", "probability": 0.755859375}, {"start": 2026.21, "end": 2026.73, "word": " below,", "probability": 0.916015625}, {"start": 2027.27, "end": 2027.51, "word": " two", "probability": 0.931640625}, {"start": 2027.51, "end": 2027.87, "word": " above.", "probability": 0.96533203125}, {"start": 2028.61, "end": 2029.29, "word": " So", "probability": 0.96240234375}, {"start": 2029.29, "end": 2029.57, "word": " three", "probability": 0.81689453125}, {"start": 2029.57, "end": 2029.81, "word": " and", "probability": 0.9423828125}, {"start": 2029.81, "end": 2030.29, "word": " four", "probability": 0.94287109375}, {"start": 2030.29, "end": 2032.85, "word": " is", "probability": 0.923828125}, {"start": 2032.85, "end": 2033.07, "word": " the", "probability": 0.91943359375}, {"start": 2033.07, "end": 2033.29, "word": " middle", "probability": 0.94921875}, {"start": 2033.29, "end": 2033.67, "word": " value.", "probability": 0.97998046875}, {"start": 2034.85, "end": 2035.09, "word": " So", "probability": 0.94384765625}, {"start": 2035.09, "end": 2035.39, "word": " just", "probability": 0.82666015625}, {"start": 2035.39, "end": 2035.61, "word": " take", "probability": 0.86572265625}, {"start": 2035.61, "end": 2035.79, "word": " the", "probability": 0.91796875}, {"start": 2035.79, "end": 2036.13, "word": " average", "probability": 0.8232421875}, {"start": 2036.13, "end": 2036.31, "word": " of", "probability": 0.962890625}, {"start": 2036.31, "end": 2036.63, "word": " two", "probability": 0.9013671875}, {"start": 2036.63, "end": 2036.87, "word": " middle", "probability": 0.89501953125}, {"start": 2036.87, "end": 2037.39, "word": " points,", "probability": 0.91259765625}], "temperature": 1.0}, {"id": 76, "seek": 206661, "start": 2038.23, "end": 2066.61, "text": " And that will be the median. So if n is even, you have to locate two middle points. For example, n over 2, in this case, we have six observations. So divide by 2, not n plus 1 divided by 2, just n over 2. So n over 2 is 3. So place number 3, and the next one, place number 4, these are the two middle points. Take the average of these values,", "tokens": [400, 300, 486, 312, 264, 26779, 13, 407, 498, 297, 307, 754, 11, 291, 362, 281, 22370, 732, 2808, 2793, 13, 1171, 1365, 11, 297, 670, 568, 11, 294, 341, 1389, 11, 321, 362, 2309, 18163, 13, 407, 9845, 538, 568, 11, 406, 297, 1804, 502, 6666, 538, 568, 11, 445, 297, 670, 568, 13, 407, 297, 670, 568, 307, 805, 13, 407, 1081, 1230, 805, 11, 293, 264, 958, 472, 11, 1081, 1230, 1017, 11, 613, 366, 264, 732, 2808, 2793, 13, 3664, 264, 4274, 295, 613, 4190, 11], "avg_logprob": -0.1684409281709692, "compression_ratio": 1.6570048309178744, "no_speech_prob": 0.0, "words": [{"start": 2038.23, "end": 2038.57, "word": " And", "probability": 0.52001953125}, {"start": 2038.57, "end": 2038.79, "word": " that", "probability": 0.72021484375}, {"start": 2038.79, "end": 2038.99, "word": " will", "probability": 0.833984375}, {"start": 2038.99, "end": 2039.63, "word": " be", "probability": 0.953125}, {"start": 2039.63, "end": 2040.03, "word": " the", "probability": 0.91357421875}, {"start": 2040.03, "end": 2040.27, "word": " median.", "probability": 0.943359375}, {"start": 2040.75, "end": 2041.01, "word": " So", "probability": 0.9453125}, {"start": 2041.01, "end": 2041.27, "word": " if", "probability": 0.87060546875}, {"start": 2041.27, "end": 2041.41, "word": " n", "probability": 0.392333984375}, {"start": 2041.41, "end": 2041.57, "word": " is", "probability": 0.9482421875}, {"start": 2041.57, "end": 2041.91, "word": " even,", "probability": 0.89794921875}, {"start": 2042.65, "end": 2042.97, "word": " you", "probability": 0.96142578125}, {"start": 2042.97, "end": 2043.17, "word": " have", "probability": 0.94580078125}, {"start": 2043.17, "end": 2043.31, "word": " to", "probability": 0.97021484375}, {"start": 2043.31, "end": 2043.79, "word": " locate", "probability": 0.8642578125}, {"start": 2043.79, "end": 2044.49, "word": " two", "probability": 0.9072265625}, {"start": 2044.49, "end": 2044.73, "word": " middle", "probability": 0.90283203125}, {"start": 2044.73, "end": 2045.13, "word": " points.", "probability": 0.88330078125}, {"start": 2047.51, "end": 2047.99, "word": " For", "probability": 0.9501953125}, {"start": 2047.99, "end": 2048.29, "word": " example,", "probability": 0.97412109375}, {"start": 2048.39, "end": 2048.53, "word": " n", "probability": 0.88671875}, {"start": 2048.53, "end": 2048.69, "word": " over", "probability": 0.87939453125}, {"start": 2048.69, "end": 2048.95, "word": " 2,", "probability": 0.62158203125}, {"start": 2049.19, "end": 2049.39, "word": " in", "probability": 0.81640625}, {"start": 2049.39, "end": 2049.59, "word": " this", "probability": 0.9462890625}, {"start": 2049.59, "end": 2049.75, "word": " case,", "probability": 0.92138671875}, {"start": 2049.83, "end": 2049.89, "word": " we", "probability": 0.9609375}, {"start": 2049.89, "end": 2050.03, "word": " have", "probability": 0.94189453125}, {"start": 2050.03, "end": 2050.31, "word": " six", "probability": 0.88525390625}, {"start": 2050.31, "end": 2050.83, "word": " observations.", "probability": 0.72802734375}, {"start": 2051.63, "end": 2051.91, "word": " So", "probability": 0.9609375}, {"start": 2051.91, "end": 2052.23, "word": " divide", "probability": 0.73291015625}, {"start": 2052.23, "end": 2052.43, "word": " by", "probability": 0.96875}, {"start": 2052.43, "end": 2052.73, "word": " 2,", "probability": 0.482666015625}, {"start": 2052.89, "end": 2053.09, "word": " not", "probability": 0.9169921875}, {"start": 2053.09, "end": 2053.35, "word": " n", "probability": 0.76513671875}, {"start": 2053.35, "end": 2053.57, "word": " plus", "probability": 0.9453125}, {"start": 2053.57, "end": 2053.71, "word": " 1", "probability": 0.83349609375}, {"start": 2053.71, "end": 2053.91, "word": " divided", "probability": 0.689453125}, {"start": 2053.91, "end": 2054.07, "word": " by", "probability": 0.96142578125}, {"start": 2054.07, "end": 2054.17, "word": " 2,", "probability": 0.97509765625}, {"start": 2054.19, "end": 2054.43, "word": " just", "probability": 0.90234375}, {"start": 2054.43, "end": 2054.67, "word": " n", "probability": 0.98681640625}, {"start": 2054.67, "end": 2054.85, "word": " over", "probability": 0.923828125}, {"start": 2054.85, "end": 2055.15, "word": " 2.", "probability": 0.974609375}, {"start": 2055.67, "end": 2055.91, "word": " So", "probability": 0.96044921875}, {"start": 2055.91, "end": 2056.07, "word": " n", "probability": 0.94580078125}, {"start": 2056.07, "end": 2056.19, "word": " over", "probability": 0.923828125}, {"start": 2056.19, "end": 2056.39, "word": " 2", "probability": 0.98095703125}, {"start": 2056.39, "end": 2056.59, "word": " is", "probability": 0.94775390625}, {"start": 2056.59, "end": 2056.91, "word": " 3.", "probability": 0.72412109375}, {"start": 2057.43, "end": 2057.67, "word": " So", "probability": 0.96875}, {"start": 2057.67, "end": 2057.97, "word": " place", "probability": 0.6728515625}, {"start": 2057.97, "end": 2058.29, "word": " number", "probability": 0.93115234375}, {"start": 2058.29, "end": 2058.67, "word": " 3,", "probability": 0.7861328125}, {"start": 2059.97, "end": 2060.25, "word": " and", "probability": 0.93310546875}, {"start": 2060.25, "end": 2060.37, "word": " the", "probability": 0.908203125}, {"start": 2060.37, "end": 2060.57, "word": " next", "probability": 0.9365234375}, {"start": 2060.57, "end": 2060.89, "word": " one,", "probability": 0.91748046875}, {"start": 2060.99, "end": 2061.25, "word": " place", "probability": 0.7919921875}, {"start": 2061.25, "end": 2061.57, "word": " number", "probability": 0.93896484375}, {"start": 2061.57, "end": 2061.95, "word": " 4,", "probability": 0.9501953125}, {"start": 2062.31, "end": 2062.93, "word": " these", "probability": 0.8486328125}, {"start": 2062.93, "end": 2063.17, "word": " are", "probability": 0.93798828125}, {"start": 2063.17, "end": 2063.33, "word": " the", "probability": 0.921875}, {"start": 2063.33, "end": 2063.51, "word": " two", "probability": 0.94189453125}, {"start": 2063.51, "end": 2063.73, "word": " middle", "probability": 0.94482421875}, {"start": 2063.73, "end": 2064.19, "word": " points.", "probability": 0.912109375}, {"start": 2065.03, "end": 2065.29, "word": " Take", "probability": 0.86328125}, {"start": 2065.29, "end": 2065.47, "word": " the", "probability": 0.91845703125}, {"start": 2065.47, "end": 2065.77, "word": " average", "probability": 0.8056640625}, {"start": 2065.77, "end": 2065.93, "word": " of", "probability": 0.96240234375}, {"start": 2065.93, "end": 2066.13, "word": " these", "probability": 0.837890625}, {"start": 2066.13, "end": 2066.61, "word": " values,", "probability": 0.96630859375}], "temperature": 1.0}, {"id": 77, "seek": 209740, "start": 2068.78, "end": 2097.4, "text": " then that's your median. So if N is even, you have to be careful. You have to find two middle points and just take the average of these two. So if N is even, the median is the average of the two middle numbers. Keep in mind, when we are saying N plus 2, N plus 2 is just the position of the median, not the value, location.", "tokens": [550, 300, 311, 428, 26779, 13, 407, 498, 426, 307, 754, 11, 291, 362, 281, 312, 5026, 13, 509, 362, 281, 915, 732, 2808, 2793, 293, 445, 747, 264, 4274, 295, 613, 732, 13, 407, 498, 426, 307, 754, 11, 264, 26779, 307, 264, 4274, 295, 264, 732, 2808, 3547, 13, 5527, 294, 1575, 11, 562, 321, 366, 1566, 426, 1804, 568, 11, 426, 1804, 568, 307, 445, 264, 2535, 295, 264, 26779, 11, 406, 264, 2158, 11, 4914, 13], "avg_logprob": -0.11930941063680767, "compression_ratio": 1.732620320855615, "no_speech_prob": 0.0, "words": [{"start": 2068.78, "end": 2069.34, "word": " then", "probability": 0.56884765625}, {"start": 2069.34, "end": 2069.9, "word": " that's", "probability": 0.905029296875}, {"start": 2069.9, "end": 2070.22, "word": " your", "probability": 0.90087890625}, {"start": 2070.22, "end": 2070.54, "word": " median.", "probability": 0.96484375}, {"start": 2071.46, "end": 2071.7, "word": " So", "probability": 0.87255859375}, {"start": 2071.7, "end": 2071.94, "word": " if", "probability": 0.79931640625}, {"start": 2071.94, "end": 2072.12, "word": " N", "probability": 0.56689453125}, {"start": 2072.12, "end": 2072.3, "word": " is", "probability": 0.94091796875}, {"start": 2072.3, "end": 2072.62, "word": " even,", "probability": 0.89892578125}, {"start": 2072.82, "end": 2073.0, "word": " you", "probability": 0.95458984375}, {"start": 2073.0, "end": 2073.16, "word": " have", "probability": 0.94775390625}, {"start": 2073.16, "end": 2073.24, "word": " to", "probability": 0.9716796875}, {"start": 2073.24, "end": 2073.38, "word": " be", "probability": 0.95849609375}, {"start": 2073.38, "end": 2073.76, "word": " careful.", "probability": 0.96484375}, {"start": 2075.22, "end": 2075.78, "word": " You", "probability": 0.94775390625}, {"start": 2075.78, "end": 2075.96, "word": " have", "probability": 0.947265625}, {"start": 2075.96, "end": 2076.1, "word": " to", "probability": 0.96923828125}, {"start": 2076.1, "end": 2076.52, "word": " find", "probability": 0.8984375}, {"start": 2076.52, "end": 2077.08, "word": " two", "probability": 0.77587890625}, {"start": 2077.08, "end": 2077.28, "word": " middle", "probability": 0.90380859375}, {"start": 2077.28, "end": 2077.74, "word": " points", "probability": 0.90869140625}, {"start": 2077.74, "end": 2079.34, "word": " and", "probability": 0.74365234375}, {"start": 2079.34, "end": 2079.68, "word": " just", "probability": 0.9140625}, {"start": 2079.68, "end": 2079.92, "word": " take", "probability": 0.87158203125}, {"start": 2079.92, "end": 2080.1, "word": " the", "probability": 0.91357421875}, {"start": 2080.1, "end": 2080.44, "word": " average", "probability": 0.82080078125}, {"start": 2080.44, "end": 2080.64, "word": " of", "probability": 0.962890625}, {"start": 2080.64, "end": 2080.86, "word": " these", "probability": 0.853515625}, {"start": 2080.86, "end": 2081.1, "word": " two.", "probability": 0.908203125}, {"start": 2081.46, "end": 2081.76, "word": " So", "probability": 0.93505859375}, {"start": 2081.76, "end": 2081.98, "word": " if", "probability": 0.90380859375}, {"start": 2081.98, "end": 2082.16, "word": " N", "probability": 0.96337890625}, {"start": 2082.16, "end": 2082.3, "word": " is", "probability": 0.95068359375}, {"start": 2082.3, "end": 2082.68, "word": " even,", "probability": 0.89111328125}, {"start": 2083.14, "end": 2083.52, "word": " the", "probability": 0.87939453125}, {"start": 2083.52, "end": 2083.78, "word": " median", "probability": 0.9658203125}, {"start": 2083.78, "end": 2084.14, "word": " is", "probability": 0.94580078125}, {"start": 2084.14, "end": 2084.3, "word": " the", "probability": 0.9111328125}, {"start": 2084.3, "end": 2084.62, "word": " average", "probability": 0.81201171875}, {"start": 2084.62, "end": 2085.1, "word": " of", "probability": 0.966796875}, {"start": 2085.1, "end": 2085.26, "word": " the", "probability": 0.9130859375}, {"start": 2085.26, "end": 2085.46, "word": " two", "probability": 0.93896484375}, {"start": 2085.46, "end": 2085.8, "word": " middle", "probability": 0.94580078125}, {"start": 2085.8, "end": 2086.68, "word": " numbers.", "probability": 0.900390625}, {"start": 2087.22, "end": 2087.42, "word": " Keep", "probability": 0.8740234375}, {"start": 2087.42, "end": 2087.56, "word": " in", "probability": 0.9443359375}, {"start": 2087.56, "end": 2087.84, "word": " mind,", "probability": 0.8828125}, {"start": 2088.68, "end": 2088.9, "word": " when", "probability": 0.9375}, {"start": 2088.9, "end": 2089.04, "word": " we", "probability": 0.94873046875}, {"start": 2089.04, "end": 2089.2, "word": " are", "probability": 0.91259765625}, {"start": 2089.2, "end": 2090.14, "word": " saying", "probability": 0.6220703125}, {"start": 2090.14, "end": 2090.4, "word": " N", "probability": 0.9150390625}, {"start": 2090.4, "end": 2090.68, "word": " plus", "probability": 0.634765625}, {"start": 2090.68, "end": 2091.04, "word": " 2,", "probability": 0.6357421875}, {"start": 2091.52, "end": 2091.68, "word": " N", "probability": 0.978515625}, {"start": 2091.68, "end": 2091.96, "word": " plus", "probability": 0.96044921875}, {"start": 2091.96, "end": 2092.24, "word": " 2", "probability": 0.98486328125}, {"start": 2092.24, "end": 2092.78, "word": " is", "probability": 0.9501953125}, {"start": 2092.78, "end": 2093.32, "word": " just", "probability": 0.91064453125}, {"start": 2093.32, "end": 2093.58, "word": " the", "probability": 0.908203125}, {"start": 2093.58, "end": 2094.0, "word": " position", "probability": 0.95166015625}, {"start": 2094.0, "end": 2094.6, "word": " of", "probability": 0.96875}, {"start": 2094.6, "end": 2094.78, "word": " the", "probability": 0.9169921875}, {"start": 2094.78, "end": 2095.06, "word": " median,", "probability": 0.95654296875}, {"start": 2095.4, "end": 2095.9, "word": " not", "probability": 0.9404296875}, {"start": 2095.9, "end": 2096.14, "word": " the", "probability": 0.912109375}, {"start": 2096.14, "end": 2096.52, "word": " value,", "probability": 0.966796875}, {"start": 2097.04, "end": 2097.4, "word": " location.", "probability": 0.9345703125}], "temperature": 1.0}, {"id": 78, "seek": 212309, "start": 2098.25, "end": 2123.09, "text": " Not the value. Is it clear? Any question? So location is not the value. Location is just the place or the position of the medium. If N is odd, the position is N plus one divided by two. If N is even, the positions of the two middle points are N over two and the next term or the next point.", "tokens": [1726, 264, 2158, 13, 1119, 309, 1850, 30, 2639, 1168, 30, 407, 4914, 307, 406, 264, 2158, 13, 12859, 399, 307, 445, 264, 1081, 420, 264, 2535, 295, 264, 6399, 13, 759, 426, 307, 7401, 11, 264, 2535, 307, 426, 1804, 472, 6666, 538, 732, 13, 759, 426, 307, 754, 11, 264, 8432, 295, 264, 732, 2808, 2793, 366, 426, 670, 732, 293, 264, 958, 1433, 420, 264, 958, 935, 13], "avg_logprob": -0.1371527738455269, "compression_ratio": 1.774390243902439, "no_speech_prob": 0.0, "words": [{"start": 2098.25, "end": 2098.51, "word": " Not", "probability": 0.242431640625}, {"start": 2098.51, "end": 2098.67, "word": " the", "probability": 0.919921875}, {"start": 2098.67, "end": 2098.95, "word": " value.", "probability": 0.9697265625}, {"start": 2100.93, "end": 2101.45, "word": " Is", "probability": 0.826171875}, {"start": 2101.45, "end": 2101.59, "word": " it", "probability": 0.8974609375}, {"start": 2101.59, "end": 2101.89, "word": " clear?", "probability": 0.88134765625}, {"start": 2103.27, "end": 2103.79, "word": " Any", "probability": 0.7509765625}, {"start": 2103.79, "end": 2104.15, "word": " question?", "probability": 0.72265625}, {"start": 2106.51, "end": 2107.03, "word": " So", "probability": 0.8515625}, {"start": 2107.03, "end": 2107.37, "word": " location", "probability": 0.77001953125}, {"start": 2107.37, "end": 2107.77, "word": " is", "probability": 0.94873046875}, {"start": 2107.77, "end": 2107.93, "word": " not", "probability": 0.94091796875}, {"start": 2107.93, "end": 2108.05, "word": " the", "probability": 0.681640625}, {"start": 2108.05, "end": 2108.23, "word": " value.", "probability": 0.97021484375}, {"start": 2108.35, "end": 2108.57, "word": " Location", "probability": 0.912353515625}, {"start": 2108.57, "end": 2108.71, "word": " is", "probability": 0.9521484375}, {"start": 2108.71, "end": 2109.03, "word": " just", "probability": 0.9150390625}, {"start": 2109.03, "end": 2109.21, "word": " the", "probability": 0.91064453125}, {"start": 2109.21, "end": 2109.59, "word": " place", "probability": 0.890625}, {"start": 2109.59, "end": 2109.99, "word": " or", "probability": 0.798828125}, {"start": 2109.99, "end": 2110.15, "word": " the", "probability": 0.880859375}, {"start": 2110.15, "end": 2110.53, "word": " position", "probability": 0.9599609375}, {"start": 2110.53, "end": 2110.75, "word": " of", "probability": 0.9677734375}, {"start": 2110.75, "end": 2110.89, "word": " the", "probability": 0.92041015625}, {"start": 2110.89, "end": 2111.13, "word": " medium.", "probability": 0.79443359375}, {"start": 2111.91, "end": 2112.13, "word": " If", "probability": 0.96142578125}, {"start": 2112.13, "end": 2112.25, "word": " N", "probability": 0.53125}, {"start": 2112.25, "end": 2112.39, "word": " is", "probability": 0.94970703125}, {"start": 2112.39, "end": 2112.73, "word": " odd,", "probability": 0.935546875}, {"start": 2112.93, "end": 2113.05, "word": " the", "probability": 0.8994140625}, {"start": 2113.05, "end": 2113.45, "word": " position", "probability": 0.95068359375}, {"start": 2113.45, "end": 2113.87, "word": " is", "probability": 0.94287109375}, {"start": 2113.87, "end": 2114.09, "word": " N", "probability": 0.92529296875}, {"start": 2114.09, "end": 2114.33, "word": " plus", "probability": 0.7666015625}, {"start": 2114.33, "end": 2114.55, "word": " one", "probability": 0.50732421875}, {"start": 2114.55, "end": 2114.83, "word": " divided", "probability": 0.79931640625}, {"start": 2114.83, "end": 2115.03, "word": " by", "probability": 0.97021484375}, {"start": 2115.03, "end": 2115.27, "word": " two.", "probability": 0.9287109375}, {"start": 2115.87, "end": 2116.15, "word": " If", "probability": 0.96435546875}, {"start": 2116.15, "end": 2116.35, "word": " N", "probability": 0.97900390625}, {"start": 2116.35, "end": 2116.51, "word": " is", "probability": 0.94677734375}, {"start": 2116.51, "end": 2116.91, "word": " even,", "probability": 0.8984375}, {"start": 2117.39, "end": 2117.71, "word": " the", "probability": 0.91455078125}, {"start": 2117.71, "end": 2118.15, "word": " positions", "probability": 0.82470703125}, {"start": 2118.15, "end": 2118.79, "word": " of", "probability": 0.96728515625}, {"start": 2118.79, "end": 2118.93, "word": " the", "probability": 0.9130859375}, {"start": 2118.93, "end": 2119.09, "word": " two", "probability": 0.94189453125}, {"start": 2119.09, "end": 2119.33, "word": " middle", "probability": 0.91064453125}, {"start": 2119.33, "end": 2119.77, "word": " points", "probability": 0.90771484375}, {"start": 2119.77, "end": 2120.17, "word": " are", "probability": 0.93115234375}, {"start": 2120.17, "end": 2120.43, "word": " N", "probability": 0.9697265625}, {"start": 2120.43, "end": 2120.59, "word": " over", "probability": 0.92724609375}, {"start": 2120.59, "end": 2120.87, "word": " two", "probability": 0.9375}, {"start": 2120.87, "end": 2121.13, "word": " and", "probability": 0.87646484375}, {"start": 2121.13, "end": 2121.31, "word": " the", "probability": 0.90869140625}, {"start": 2121.31, "end": 2121.53, "word": " next", "probability": 0.96044921875}, {"start": 2121.53, "end": 2121.91, "word": " term", "probability": 0.9404296875}, {"start": 2121.91, "end": 2122.15, "word": " or", "probability": 0.77001953125}, {"start": 2122.15, "end": 2122.33, "word": " the", "probability": 0.90869140625}, {"start": 2122.33, "end": 2122.53, "word": " next", "probability": 0.92919921875}, {"start": 2122.53, "end": 2123.09, "word": " point.", "probability": 0.96826171875}], "temperature": 1.0}, {"id": 79, "seek": 215535, "start": 2128.39, "end": 2155.35, "text": " Last measure of center tendency is called the mood. The definition of the mood, the mood is the most frequent value. So sometimes the mood exists, sometimes the mood does not exist. Or sometimes there is only one mood, in other cases maybe there are several moods.", "tokens": [5264, 3481, 295, 3056, 18187, 307, 1219, 264, 9268, 13, 440, 7123, 295, 264, 9268, 11, 264, 9268, 307, 264, 881, 18004, 2158, 13, 407, 2171, 264, 9268, 8198, 11, 2171, 264, 9268, 775, 406, 2514, 13, 1610, 2171, 456, 307, 787, 472, 9268, 11, 294, 661, 3331, 1310, 456, 366, 2940, 9268, 82, 13], "avg_logprob": -0.14718191831239633, "compression_ratio": 1.6987179487179487, "no_speech_prob": 0.0, "words": [{"start": 2128.39, "end": 2128.85, "word": " Last", "probability": 0.69091796875}, {"start": 2128.85, "end": 2129.23, "word": " measure", "probability": 0.80712890625}, {"start": 2129.23, "end": 2129.67, "word": " of", "probability": 0.966796875}, {"start": 2129.67, "end": 2129.95, "word": " center", "probability": 0.35009765625}, {"start": 2129.95, "end": 2130.47, "word": " tendency", "probability": 0.91455078125}, {"start": 2130.47, "end": 2130.91, "word": " is", "probability": 0.94189453125}, {"start": 2130.91, "end": 2131.47, "word": " called", "probability": 0.89306640625}, {"start": 2131.47, "end": 2132.51, "word": " the", "probability": 0.6884765625}, {"start": 2132.51, "end": 2132.75, "word": " mood.", "probability": 0.80810546875}, {"start": 2135.89, "end": 2136.65, "word": " The", "probability": 0.86279296875}, {"start": 2136.65, "end": 2137.01, "word": " definition", "probability": 0.94873046875}, {"start": 2137.01, "end": 2137.19, "word": " of", "probability": 0.96826171875}, {"start": 2137.19, "end": 2137.31, "word": " the", "probability": 0.85205078125}, {"start": 2137.31, "end": 2137.49, "word": " mood,", "probability": 0.98046875}, {"start": 2137.57, "end": 2137.69, "word": " the", "probability": 0.86083984375}, {"start": 2137.69, "end": 2137.97, "word": " mood", "probability": 0.97802734375}, {"start": 2137.97, "end": 2138.33, "word": " is", "probability": 0.947265625}, {"start": 2138.33, "end": 2138.59, "word": " the", "probability": 0.91015625}, {"start": 2138.59, "end": 2139.01, "word": " most", "probability": 0.90771484375}, {"start": 2139.01, "end": 2139.65, "word": " frequent", "probability": 0.91796875}, {"start": 2139.65, "end": 2140.29, "word": " value.", "probability": 0.97802734375}, {"start": 2142.29, "end": 2142.45, "word": " So", "probability": 0.546875}, {"start": 2142.45, "end": 2143.05, "word": " sometimes", "probability": 0.79248046875}, {"start": 2143.05, "end": 2143.49, "word": " the", "probability": 0.8671875}, {"start": 2143.49, "end": 2143.67, "word": " mood", "probability": 0.98193359375}, {"start": 2143.67, "end": 2144.25, "word": " exists,", "probability": 0.884765625}, {"start": 2145.23, "end": 2145.57, "word": " sometimes", "probability": 0.94482421875}, {"start": 2145.57, "end": 2145.97, "word": " the", "probability": 0.9140625}, {"start": 2145.97, "end": 2146.09, "word": " mood", "probability": 0.97998046875}, {"start": 2146.09, "end": 2146.29, "word": " does", "probability": 0.97802734375}, {"start": 2146.29, "end": 2146.47, "word": " not", "probability": 0.94921875}, {"start": 2146.47, "end": 2146.91, "word": " exist.", "probability": 0.9697265625}, {"start": 2147.89, "end": 2148.13, "word": " Or", "probability": 0.93017578125}, {"start": 2148.13, "end": 2148.57, "word": " sometimes", "probability": 0.947265625}, {"start": 2148.57, "end": 2148.85, "word": " there", "probability": 0.89501953125}, {"start": 2148.85, "end": 2149.03, "word": " is", "probability": 0.9423828125}, {"start": 2149.03, "end": 2149.35, "word": " only", "probability": 0.92919921875}, {"start": 2149.35, "end": 2149.61, "word": " one", "probability": 0.9228515625}, {"start": 2149.61, "end": 2149.99, "word": " mood,", "probability": 0.986328125}, {"start": 2151.83, "end": 2152.25, "word": " in", "probability": 0.7939453125}, {"start": 2152.25, "end": 2152.53, "word": " other", "probability": 0.892578125}, {"start": 2152.53, "end": 2153.05, "word": " cases", "probability": 0.91650390625}, {"start": 2153.05, "end": 2153.41, "word": " maybe", "probability": 0.7666015625}, {"start": 2153.41, "end": 2153.73, "word": " there", "probability": 0.904296875}, {"start": 2153.73, "end": 2154.11, "word": " are", "probability": 0.943359375}, {"start": 2154.11, "end": 2154.69, "word": " several", "probability": 0.92529296875}, {"start": 2154.69, "end": 2155.35, "word": " moods.", "probability": 0.965576171875}], "temperature": 1.0}, {"id": 80, "seek": 218367, "start": 2156.49, "end": 2183.67, "text": " So a value that occurs most often is called the mood. The mood is not affected by extreme values. It can be used for either numerical or categorical data. And that's the difference between mean and median and the mood. Mean and median is used just for numerical data. Here, the mood can be used for both, categorical and numerical data.", "tokens": [407, 257, 2158, 300, 11843, 881, 2049, 307, 1219, 264, 9268, 13, 440, 9268, 307, 406, 8028, 538, 8084, 4190, 13, 467, 393, 312, 1143, 337, 2139, 29054, 420, 19250, 804, 1412, 13, 400, 300, 311, 264, 2649, 1296, 914, 293, 26779, 293, 264, 9268, 13, 12302, 293, 26779, 307, 1143, 445, 337, 29054, 1412, 13, 1692, 11, 264, 9268, 393, 312, 1143, 337, 1293, 11, 19250, 804, 293, 29054, 1412, 13], "avg_logprob": -0.13184931180248521, "compression_ratio": 1.8216216216216217, "no_speech_prob": 0.0, "words": [{"start": 2156.49, "end": 2156.93, "word": " So", "probability": 0.88134765625}, {"start": 2156.93, "end": 2157.41, "word": " a", "probability": 0.43994140625}, {"start": 2157.41, "end": 2157.73, "word": " value", "probability": 0.9755859375}, {"start": 2157.73, "end": 2157.99, "word": " that", "probability": 0.93896484375}, {"start": 2157.99, "end": 2158.39, "word": " occurs", "probability": 0.77099609375}, {"start": 2158.39, "end": 2158.73, "word": " most", "probability": 0.91259765625}, {"start": 2158.73, "end": 2159.15, "word": " often", "probability": 0.8076171875}, {"start": 2159.15, "end": 2159.39, "word": " is", "probability": 0.91455078125}, {"start": 2159.39, "end": 2159.61, "word": " called", "probability": 0.89501953125}, {"start": 2159.61, "end": 2159.81, "word": " the", "probability": 0.89599609375}, {"start": 2159.81, "end": 2160.07, "word": " mood.", "probability": 0.5107421875}, {"start": 2161.77, "end": 2162.15, "word": " The", "probability": 0.89453125}, {"start": 2162.15, "end": 2162.33, "word": " mood", "probability": 0.9736328125}, {"start": 2162.33, "end": 2162.49, "word": " is", "probability": 0.94091796875}, {"start": 2162.49, "end": 2162.65, "word": " not", "probability": 0.94580078125}, {"start": 2162.65, "end": 2163.01, "word": " affected", "probability": 0.84716796875}, {"start": 2163.01, "end": 2163.25, "word": " by", "probability": 0.97021484375}, {"start": 2163.25, "end": 2163.59, "word": " extreme", "probability": 0.849609375}, {"start": 2163.59, "end": 2164.07, "word": " values.", "probability": 0.96533203125}, {"start": 2165.65, "end": 2166.25, "word": " It", "probability": 0.96337890625}, {"start": 2166.25, "end": 2166.51, "word": " can", "probability": 0.94580078125}, {"start": 2166.51, "end": 2166.73, "word": " be", "probability": 0.95556640625}, {"start": 2166.73, "end": 2167.03, "word": " used", "probability": 0.916015625}, {"start": 2167.03, "end": 2167.29, "word": " for", "probability": 0.94873046875}, {"start": 2167.29, "end": 2167.61, "word": " either", "probability": 0.94677734375}, {"start": 2167.61, "end": 2168.09, "word": " numerical", "probability": 0.8095703125}, {"start": 2168.09, "end": 2168.61, "word": " or", "probability": 0.9677734375}, {"start": 2168.61, "end": 2169.13, "word": " categorical", "probability": 0.7841796875}, {"start": 2169.13, "end": 2169.57, "word": " data.", "probability": 0.94384765625}, {"start": 2170.73, "end": 2170.89, "word": " And", "probability": 0.6162109375}, {"start": 2170.89, "end": 2171.07, "word": " that's", "probability": 0.954833984375}, {"start": 2171.07, "end": 2171.19, "word": " the", "probability": 0.916015625}, {"start": 2171.19, "end": 2171.53, "word": " difference", "probability": 0.86083984375}, {"start": 2171.53, "end": 2171.85, "word": " between", "probability": 0.8876953125}, {"start": 2171.85, "end": 2172.01, "word": " mean", "probability": 0.92333984375}, {"start": 2172.01, "end": 2172.19, "word": " and", "probability": 0.9453125}, {"start": 2172.19, "end": 2172.51, "word": " median", "probability": 0.68603515625}, {"start": 2172.51, "end": 2173.57, "word": " and", "probability": 0.65576171875}, {"start": 2173.57, "end": 2173.71, "word": " the", "probability": 0.8955078125}, {"start": 2173.71, "end": 2173.91, "word": " mood.", "probability": 0.9443359375}, {"start": 2174.59, "end": 2174.73, "word": " Mean", "probability": 0.9326171875}, {"start": 2174.73, "end": 2174.91, "word": " and", "probability": 0.94384765625}, {"start": 2174.91, "end": 2175.09, "word": " median", "probability": 0.95068359375}, {"start": 2175.09, "end": 2175.35, "word": " is", "probability": 0.93505859375}, {"start": 2175.35, "end": 2175.55, "word": " used", "probability": 0.91455078125}, {"start": 2175.55, "end": 2175.81, "word": " just", "probability": 0.91748046875}, {"start": 2175.81, "end": 2175.99, "word": " for", "probability": 0.95263671875}, {"start": 2175.99, "end": 2176.39, "word": " numerical", "probability": 0.9130859375}, {"start": 2176.39, "end": 2176.93, "word": " data.", "probability": 0.93310546875}, {"start": 2177.43, "end": 2177.65, "word": " Here,", "probability": 0.8583984375}, {"start": 2177.81, "end": 2177.95, "word": " the", "probability": 0.92529296875}, {"start": 2177.95, "end": 2178.13, "word": " mood", "probability": 0.986328125}, {"start": 2178.13, "end": 2178.49, "word": " can", "probability": 0.94189453125}, {"start": 2178.49, "end": 2178.69, "word": " be", "probability": 0.95703125}, {"start": 2178.69, "end": 2178.95, "word": " used", "probability": 0.91064453125}, {"start": 2178.95, "end": 2179.19, "word": " for", "probability": 0.94775390625}, {"start": 2179.19, "end": 2179.59, "word": " both,", "probability": 0.88623046875}, {"start": 2180.71, "end": 2181.27, "word": " categorical", "probability": 0.9248046875}, {"start": 2181.27, "end": 2181.85, "word": " and", "probability": 0.94140625}, {"start": 2181.85, "end": 2182.97, "word": " numerical", "probability": 0.91162109375}, {"start": 2182.97, "end": 2183.67, "word": " data.", "probability": 0.93310546875}], "temperature": 1.0}, {"id": 81, "seek": 220921, "start": 2184.33, "end": 2209.21, "text": " Sometimes, as I mentioned, there may be no mood or the mood does not exist. In other cases, there may be several events. So the mood is the value that has the most frequent. For example, if you look at this data, one is repeated once, three is the same one time, five is repeated twice.", "tokens": [4803, 11, 382, 286, 2835, 11, 456, 815, 312, 572, 9268, 420, 264, 9268, 775, 406, 2514, 13, 682, 661, 3331, 11, 456, 815, 312, 2940, 3931, 13, 407, 264, 9268, 307, 264, 2158, 300, 575, 264, 881, 18004, 13, 1171, 1365, 11, 498, 291, 574, 412, 341, 1412, 11, 472, 307, 10477, 1564, 11, 1045, 307, 264, 912, 472, 565, 11, 1732, 307, 10477, 6091, 13], "avg_logprob": -0.15211397212217836, "compression_ratio": 1.64, "no_speech_prob": 0.0, "words": [{"start": 2184.33, "end": 2184.89, "word": " Sometimes,", "probability": 0.80029296875}, {"start": 2185.03, "end": 2185.13, "word": " as", "probability": 0.9599609375}, {"start": 2185.13, "end": 2185.25, "word": " I", "probability": 0.9833984375}, {"start": 2185.25, "end": 2185.61, "word": " mentioned,", "probability": 0.80615234375}, {"start": 2185.93, "end": 2186.27, "word": " there", "probability": 0.90869140625}, {"start": 2186.27, "end": 2186.45, "word": " may", "probability": 0.923828125}, {"start": 2186.45, "end": 2186.57, "word": " be", "probability": 0.9609375}, {"start": 2186.57, "end": 2186.85, "word": " no", "probability": 0.88134765625}, {"start": 2186.85, "end": 2187.29, "word": " mood", "probability": 0.6748046875}, {"start": 2187.29, "end": 2188.65, "word": " or", "probability": 0.44384765625}, {"start": 2188.65, "end": 2188.79, "word": " the", "probability": 0.9111328125}, {"start": 2188.79, "end": 2188.89, "word": " mood", "probability": 0.9794921875}, {"start": 2188.89, "end": 2189.05, "word": " does", "probability": 0.97265625}, {"start": 2189.05, "end": 2189.19, "word": " not", "probability": 0.9521484375}, {"start": 2189.19, "end": 2189.57, "word": " exist.", "probability": 0.96826171875}, {"start": 2190.13, "end": 2190.31, "word": " In", "probability": 0.951171875}, {"start": 2190.31, "end": 2190.53, "word": " other", "probability": 0.89111328125}, {"start": 2190.53, "end": 2190.95, "word": " cases,", "probability": 0.91552734375}, {"start": 2191.13, "end": 2191.33, "word": " there", "probability": 0.91015625}, {"start": 2191.33, "end": 2191.55, "word": " may", "probability": 0.9462890625}, {"start": 2191.55, "end": 2191.75, "word": " be", "probability": 0.95751953125}, {"start": 2191.75, "end": 2192.17, "word": " several", "probability": 0.91796875}, {"start": 2192.17, "end": 2192.71, "word": " events.", "probability": 0.40869140625}, {"start": 2193.69, "end": 2194.19, "word": " So", "probability": 0.919921875}, {"start": 2194.19, "end": 2194.33, "word": " the", "probability": 0.67431640625}, {"start": 2194.33, "end": 2194.53, "word": " mood", "probability": 0.97265625}, {"start": 2194.53, "end": 2194.75, "word": " is", "probability": 0.94287109375}, {"start": 2194.75, "end": 2194.91, "word": " the", "probability": 0.91796875}, {"start": 2194.91, "end": 2195.17, "word": " value", "probability": 0.97412109375}, {"start": 2195.17, "end": 2195.55, "word": " that", "probability": 0.9287109375}, {"start": 2195.55, "end": 2195.93, "word": " has", "probability": 0.7822265625}, {"start": 2195.93, "end": 2196.11, "word": " the", "probability": 0.90234375}, {"start": 2196.11, "end": 2196.41, "word": " most", "probability": 0.9033203125}, {"start": 2196.41, "end": 2196.87, "word": " frequent.", "probability": 0.740234375}, {"start": 2197.49, "end": 2197.75, "word": " For", "probability": 0.95751953125}, {"start": 2197.75, "end": 2198.11, "word": " example,", "probability": 0.97265625}, {"start": 2198.55, "end": 2198.73, "word": " if", "probability": 0.94775390625}, {"start": 2198.73, "end": 2198.79, "word": " you", "probability": 0.9521484375}, {"start": 2198.79, "end": 2198.93, "word": " look", "probability": 0.96484375}, {"start": 2198.93, "end": 2199.05, "word": " at", "probability": 0.96630859375}, {"start": 2199.05, "end": 2199.25, "word": " this", "probability": 0.943359375}, {"start": 2199.25, "end": 2199.61, "word": " data,", "probability": 0.84326171875}, {"start": 2201.07, "end": 2201.69, "word": " one", "probability": 0.6279296875}, {"start": 2201.69, "end": 2203.65, "word": " is", "probability": 0.9384765625}, {"start": 2203.65, "end": 2204.03, "word": " repeated", "probability": 0.96435546875}, {"start": 2204.03, "end": 2204.53, "word": " once,", "probability": 0.90380859375}, {"start": 2205.89, "end": 2206.13, "word": " three", "probability": 0.82275390625}, {"start": 2206.13, "end": 2206.29, "word": " is", "probability": 0.935546875}, {"start": 2206.29, "end": 2206.47, "word": " the", "probability": 0.9140625}, {"start": 2206.47, "end": 2206.73, "word": " same", "probability": 0.9111328125}, {"start": 2206.73, "end": 2207.11, "word": " one", "probability": 0.57666015625}, {"start": 2207.11, "end": 2207.47, "word": " time,", "probability": 0.8671875}, {"start": 2207.65, "end": 2208.03, "word": " five", "probability": 0.89697265625}, {"start": 2208.03, "end": 2208.37, "word": " is", "probability": 0.92724609375}, {"start": 2208.37, "end": 2208.69, "word": " repeated", "probability": 0.95556640625}, {"start": 2208.69, "end": 2209.21, "word": " twice.", "probability": 0.95361328125}], "temperature": 1.0}, {"id": 82, "seek": 223601, "start": 2210.39, "end": 2236.01, "text": " seven is one nine is repeated three times and so on so in this case nine is the mood because the mood again is the most frequent value on the right side there are some values zero one two three up to six now each one is repeated once so in this case the mood does not exist I mean there is no mood", "tokens": [3407, 307, 472, 4949, 307, 10477, 1045, 1413, 293, 370, 322, 370, 294, 341, 1389, 4949, 307, 264, 9268, 570, 264, 9268, 797, 307, 264, 881, 18004, 2158, 322, 264, 558, 1252, 456, 366, 512, 4190, 4018, 472, 732, 1045, 493, 281, 2309, 586, 1184, 472, 307, 10477, 1564, 370, 294, 341, 1389, 264, 9268, 775, 406, 2514, 286, 914, 456, 307, 572, 9268], "avg_logprob": -0.16298076923076923, "compression_ratio": 1.8509316770186335, "no_speech_prob": 0.0, "words": [{"start": 2210.39, "end": 2210.81, "word": " seven", "probability": 0.256103515625}, {"start": 2210.81, "end": 2211.01, "word": " is", "probability": 0.935546875}, {"start": 2211.01, "end": 2211.29, "word": " one", "probability": 0.89111328125}, {"start": 2211.29, "end": 2211.77, "word": " nine", "probability": 0.3154296875}, {"start": 2211.77, "end": 2211.99, "word": " is", "probability": 0.91357421875}, {"start": 2211.99, "end": 2212.29, "word": " repeated", "probability": 0.84716796875}, {"start": 2212.29, "end": 2212.59, "word": " three", "probability": 0.9140625}, {"start": 2212.59, "end": 2213.11, "word": " times", "probability": 0.919921875}, {"start": 2213.11, "end": 2213.45, "word": " and", "probability": 0.89892578125}, {"start": 2213.45, "end": 2213.65, "word": " so", "probability": 0.947265625}, {"start": 2213.65, "end": 2213.85, "word": " on", "probability": 0.94921875}, {"start": 2213.85, "end": 2214.41, "word": " so", "probability": 0.482666015625}, {"start": 2214.41, "end": 2214.57, "word": " in", "probability": 0.89111328125}, {"start": 2214.57, "end": 2214.73, "word": " this", "probability": 0.9501953125}, {"start": 2214.73, "end": 2215.03, "word": " case", "probability": 0.91259765625}, {"start": 2215.03, "end": 2215.53, "word": " nine", "probability": 0.873046875}, {"start": 2215.53, "end": 2217.15, "word": " is", "probability": 0.69873046875}, {"start": 2217.15, "end": 2217.33, "word": " the", "probability": 0.7392578125}, {"start": 2217.33, "end": 2217.61, "word": " mood", "probability": 0.7314453125}, {"start": 2217.61, "end": 2218.35, "word": " because", "probability": 0.82568359375}, {"start": 2218.35, "end": 2218.53, "word": " the", "probability": 0.9033203125}, {"start": 2218.53, "end": 2218.67, "word": " mood", "probability": 0.96875}, {"start": 2218.67, "end": 2218.99, "word": " again", "probability": 0.9150390625}, {"start": 2218.99, "end": 2219.23, "word": " is", "probability": 0.93896484375}, {"start": 2219.23, "end": 2219.37, "word": " the", "probability": 0.91259765625}, {"start": 2219.37, "end": 2219.67, "word": " most", "probability": 0.904296875}, {"start": 2219.67, "end": 2220.29, "word": " frequent", "probability": 0.8974609375}, {"start": 2220.29, "end": 2221.29, "word": " value", "probability": 0.97265625}, {"start": 2221.29, "end": 2225.03, "word": " on", "probability": 0.52490234375}, {"start": 2225.03, "end": 2225.13, "word": " the", "probability": 0.916015625}, {"start": 2225.13, "end": 2225.33, "word": " right", "probability": 0.9150390625}, {"start": 2225.33, "end": 2225.67, "word": " side", "probability": 0.86669921875}, {"start": 2225.67, "end": 2226.25, "word": " there", "probability": 0.88037109375}, {"start": 2226.25, "end": 2226.95, "word": " are", "probability": 0.9423828125}, {"start": 2226.95, "end": 2227.29, "word": " some", "probability": 0.904296875}, {"start": 2227.29, "end": 2227.73, "word": " values", "probability": 0.966796875}, {"start": 2227.73, "end": 2228.07, "word": " zero", "probability": 0.69580078125}, {"start": 2228.07, "end": 2228.29, "word": " one", "probability": 0.82177734375}, {"start": 2228.29, "end": 2228.55, "word": " two", "probability": 0.939453125}, {"start": 2228.55, "end": 2228.79, "word": " three", "probability": 0.92724609375}, {"start": 2228.79, "end": 2228.95, "word": " up", "probability": 0.97119140625}, {"start": 2228.95, "end": 2229.07, "word": " to", "probability": 0.96630859375}, {"start": 2229.07, "end": 2229.41, "word": " six", "probability": 0.9482421875}, {"start": 2229.41, "end": 2230.29, "word": " now", "probability": 0.9169921875}, {"start": 2230.29, "end": 2230.53, "word": " each", "probability": 0.9443359375}, {"start": 2230.53, "end": 2230.81, "word": " one", "probability": 0.9296875}, {"start": 2230.81, "end": 2231.09, "word": " is", "probability": 0.94970703125}, {"start": 2231.09, "end": 2231.39, "word": " repeated", "probability": 0.9677734375}, {"start": 2231.39, "end": 2231.97, "word": " once", "probability": 0.91064453125}, {"start": 2231.97, "end": 2232.83, "word": " so", "probability": 0.7392578125}, {"start": 2232.83, "end": 2232.95, "word": " in", "probability": 0.8935546875}, {"start": 2232.95, "end": 2233.11, "word": " this", "probability": 0.94677734375}, {"start": 2233.11, "end": 2233.41, "word": " case", "probability": 0.9052734375}, {"start": 2233.41, "end": 2233.69, "word": " the", "probability": 0.9169921875}, {"start": 2233.69, "end": 2233.85, "word": " mood", "probability": 0.98291015625}, {"start": 2233.85, "end": 2234.07, "word": " does", "probability": 0.97900390625}, {"start": 2234.07, "end": 2234.27, "word": " not", "probability": 0.9521484375}, {"start": 2234.27, "end": 2234.69, "word": " exist", "probability": 0.9638671875}, {"start": 2234.69, "end": 2235.03, "word": " I", "probability": 0.469482421875}, {"start": 2235.03, "end": 2235.15, "word": " mean", "probability": 0.966796875}, {"start": 2235.15, "end": 2235.35, "word": " there", "probability": 0.90625}, {"start": 2235.35, "end": 2235.49, "word": " is", "probability": 0.94384765625}, {"start": 2235.49, "end": 2235.67, "word": " no", "probability": 0.9423828125}, {"start": 2235.67, "end": 2236.01, "word": " mood", "probability": 0.96533203125}], "temperature": 1.0}, {"id": 83, "seek": 226825, "start": 2238.45, "end": 2268.25, "text": " So generally speaking, the mood is the value that you care most often. It can be used for numerical or categorical data, not affected by extreme values or outliers. Sometimes there is only one mood as this example. Sometimes the mood does not exist. Or sometimes there are several moods. And so that's the definitions for mean, median, and the mood.", "tokens": [407, 5101, 4124, 11, 264, 9268, 307, 264, 2158, 300, 291, 1127, 881, 2049, 13, 467, 393, 312, 1143, 337, 29054, 420, 19250, 804, 1412, 11, 406, 8028, 538, 8084, 4190, 420, 484, 23646, 13, 4803, 456, 307, 787, 472, 9268, 382, 341, 1365, 13, 4803, 264, 9268, 775, 406, 2514, 13, 1610, 2171, 456, 366, 2940, 9268, 82, 13, 400, 370, 300, 311, 264, 21988, 337, 914, 11, 26779, 11, 293, 264, 9268, 13], "avg_logprob": -0.14360608827126653, "compression_ratio": 1.674641148325359, "no_speech_prob": 0.0, "words": [{"start": 2238.45, "end": 2238.93, "word": " So", "probability": 0.88330078125}, {"start": 2238.93, "end": 2239.47, "word": " generally", "probability": 0.62548828125}, {"start": 2239.47, "end": 2239.99, "word": " speaking,", "probability": 0.87158203125}, {"start": 2240.31, "end": 2240.41, "word": " the", "probability": 0.916015625}, {"start": 2240.41, "end": 2240.65, "word": " mood", "probability": 0.377197265625}, {"start": 2240.65, "end": 2242.17, "word": " is", "probability": 0.93505859375}, {"start": 2242.17, "end": 2242.31, "word": " the", "probability": 0.90625}, {"start": 2242.31, "end": 2242.59, "word": " value", "probability": 0.98046875}, {"start": 2242.59, "end": 2242.85, "word": " that", "probability": 0.92919921875}, {"start": 2242.85, "end": 2242.99, "word": " you", "probability": 0.69921875}, {"start": 2242.99, "end": 2243.17, "word": " care", "probability": 0.89306640625}, {"start": 2243.17, "end": 2243.49, "word": " most", "probability": 0.87255859375}, {"start": 2243.49, "end": 2243.89, "word": " often.", "probability": 0.76171875}, {"start": 2244.49, "end": 2244.97, "word": " It", "probability": 0.95947265625}, {"start": 2244.97, "end": 2245.21, "word": " can", "probability": 0.9462890625}, {"start": 2245.21, "end": 2245.39, "word": " be", "probability": 0.95751953125}, {"start": 2245.39, "end": 2245.73, "word": " used", "probability": 0.92236328125}, {"start": 2245.73, "end": 2246.31, "word": " for", "probability": 0.9521484375}, {"start": 2246.31, "end": 2246.83, "word": " numerical", "probability": 0.93603515625}, {"start": 2246.83, "end": 2247.19, "word": " or", "probability": 0.96484375}, {"start": 2247.19, "end": 2247.65, "word": " categorical", "probability": 0.921875}, {"start": 2247.65, "end": 2248.11, "word": " data,", "probability": 0.94873046875}, {"start": 2248.73, "end": 2249.01, "word": " not", "probability": 0.93896484375}, {"start": 2249.01, "end": 2249.39, "word": " affected", "probability": 0.8291015625}, {"start": 2249.39, "end": 2249.79, "word": " by", "probability": 0.9755859375}, {"start": 2249.79, "end": 2250.51, "word": " extreme", "probability": 0.845703125}, {"start": 2250.51, "end": 2250.83, "word": " values", "probability": 0.96728515625}, {"start": 2250.83, "end": 2251.01, "word": " or", "probability": 0.93408203125}, {"start": 2251.01, "end": 2251.55, "word": " outliers.", "probability": 0.955810546875}, {"start": 2252.09, "end": 2252.49, "word": " Sometimes", "probability": 0.91650390625}, {"start": 2252.49, "end": 2252.81, "word": " there", "probability": 0.81689453125}, {"start": 2252.81, "end": 2252.97, "word": " is", "probability": 0.93994140625}, {"start": 2252.97, "end": 2253.23, "word": " only", "probability": 0.93212890625}, {"start": 2253.23, "end": 2253.45, "word": " one", "probability": 0.9248046875}, {"start": 2253.45, "end": 2253.71, "word": " mood", "probability": 0.97705078125}, {"start": 2253.71, "end": 2253.93, "word": " as", "probability": 0.53955078125}, {"start": 2253.93, "end": 2254.17, "word": " this", "probability": 0.81298828125}, {"start": 2254.17, "end": 2254.63, "word": " example.", "probability": 0.97314453125}, {"start": 2255.23, "end": 2255.65, "word": " Sometimes", "probability": 0.92578125}, {"start": 2255.65, "end": 2256.03, "word": " the", "probability": 0.8671875}, {"start": 2256.03, "end": 2256.15, "word": " mood", "probability": 0.98681640625}, {"start": 2256.15, "end": 2256.37, "word": " does", "probability": 0.97705078125}, {"start": 2256.37, "end": 2256.65, "word": " not", "probability": 0.95068359375}, {"start": 2256.65, "end": 2257.15, "word": " exist.", "probability": 0.96923828125}, {"start": 2258.37, "end": 2258.85, "word": " Or", "probability": 0.9619140625}, {"start": 2258.85, "end": 2259.27, "word": " sometimes", "probability": 0.94384765625}, {"start": 2259.27, "end": 2259.65, "word": " there", "probability": 0.88818359375}, {"start": 2259.65, "end": 2259.91, "word": " are", "probability": 0.9404296875}, {"start": 2259.91, "end": 2260.39, "word": " several", "probability": 0.91357421875}, {"start": 2260.39, "end": 2261.93, "word": " moods.", "probability": 0.94189453125}, {"start": 2262.65, "end": 2263.07, "word": " And", "probability": 0.464599609375}, {"start": 2263.07, "end": 2263.49, "word": " so", "probability": 0.9423828125}, {"start": 2263.49, "end": 2263.83, "word": " that's", "probability": 0.9423828125}, {"start": 2263.83, "end": 2263.97, "word": " the", "probability": 0.921875}, {"start": 2263.97, "end": 2264.39, "word": " definitions", "probability": 0.75146484375}, {"start": 2264.39, "end": 2264.81, "word": " for", "probability": 0.9443359375}, {"start": 2264.81, "end": 2265.19, "word": " mean,", "probability": 0.89453125}, {"start": 2266.43, "end": 2266.75, "word": " median,", "probability": 0.95166015625}, {"start": 2267.67, "end": 2267.97, "word": " and", "probability": 0.9521484375}, {"start": 2267.97, "end": 2268.07, "word": " the", "probability": 0.8759765625}, {"start": 2268.07, "end": 2268.25, "word": " mood.", "probability": 0.91162109375}], "temperature": 1.0}, {"id": 84, "seek": 229992, "start": 2271.0, "end": 2299.92, "text": " I will give just a numerical example to know how can we compute these measures. This data, simple data, just for illustration, we have house prices. We have five data points, $2 million. This is the price of house A, for example. House B price is 500,000. The other one is 300,000. And two houses have the same price as 100,000.", "tokens": [286, 486, 976, 445, 257, 29054, 1365, 281, 458, 577, 393, 321, 14722, 613, 8000, 13, 639, 1412, 11, 2199, 1412, 11, 445, 337, 22645, 11, 321, 362, 1782, 7901, 13, 492, 362, 1732, 1412, 2793, 11, 1848, 17, 2459, 13, 639, 307, 264, 3218, 295, 1782, 316, 11, 337, 1365, 13, 4928, 363, 3218, 307, 5923, 11, 1360, 13, 440, 661, 472, 307, 6641, 11, 1360, 13, 400, 732, 8078, 362, 264, 912, 3218, 382, 2319, 11, 1360, 13], "avg_logprob": -0.1764081731254672, "compression_ratio": 1.645, "no_speech_prob": 0.0, "words": [{"start": 2271.0, "end": 2271.26, "word": " I", "probability": 0.97265625}, {"start": 2271.26, "end": 2271.4, "word": " will", "probability": 0.8681640625}, {"start": 2271.4, "end": 2271.68, "word": " give", "probability": 0.87255859375}, {"start": 2271.68, "end": 2272.1, "word": " just", "probability": 0.908203125}, {"start": 2272.1, "end": 2272.24, "word": " a", "probability": 0.712890625}, {"start": 2272.24, "end": 2272.54, "word": " numerical", "probability": 0.95703125}, {"start": 2272.54, "end": 2273.08, "word": " example", "probability": 0.94287109375}, {"start": 2273.08, "end": 2273.66, "word": " to", "probability": 0.91259765625}, {"start": 2273.66, "end": 2273.88, "word": " know", "probability": 0.9091796875}, {"start": 2273.88, "end": 2274.1, "word": " how", "probability": 0.92724609375}, {"start": 2274.1, "end": 2274.36, "word": " can", "probability": 0.8828125}, {"start": 2274.36, "end": 2274.64, "word": " we", "probability": 0.96044921875}, {"start": 2274.64, "end": 2275.52, "word": " compute", "probability": 0.89306640625}, {"start": 2275.52, "end": 2275.98, "word": " these", "probability": 0.794921875}, {"start": 2275.98, "end": 2276.38, "word": " measures.", "probability": 0.83984375}, {"start": 2277.42, "end": 2277.78, "word": " This", "probability": 0.86474609375}, {"start": 2277.78, "end": 2278.2, "word": " data,", "probability": 0.86474609375}, {"start": 2278.68, "end": 2279.04, "word": " simple", "probability": 0.802734375}, {"start": 2279.04, "end": 2279.34, "word": " data,", "probability": 0.92578125}, {"start": 2279.46, "end": 2279.68, "word": " just", "probability": 0.90771484375}, {"start": 2279.68, "end": 2280.0, "word": " for", "probability": 0.94775390625}, {"start": 2280.0, "end": 2280.54, "word": " illustration,", "probability": 0.94921875}, {"start": 2281.42, "end": 2281.54, "word": " we", "probability": 0.95556640625}, {"start": 2281.54, "end": 2281.92, "word": " have", "probability": 0.9501953125}, {"start": 2281.92, "end": 2283.06, "word": " house", "probability": 0.9033203125}, {"start": 2283.06, "end": 2283.5, "word": " prices.", "probability": 0.89990234375}, {"start": 2284.34, "end": 2285.06, "word": " We", "probability": 0.9677734375}, {"start": 2285.06, "end": 2285.26, "word": " have", "probability": 0.94921875}, {"start": 2285.26, "end": 2285.74, "word": " five", "probability": 0.87255859375}, {"start": 2285.74, "end": 2286.44, "word": " data", "probability": 0.94970703125}, {"start": 2286.44, "end": 2286.86, "word": " points,", "probability": 0.92578125}, {"start": 2287.42, "end": 2287.58, "word": " $2", "probability": 0.7447509765625}, {"start": 2287.58, "end": 2287.82, "word": " million.", "probability": 0.79541015625}, {"start": 2288.42, "end": 2288.98, "word": " This", "probability": 0.87109375}, {"start": 2288.98, "end": 2289.08, "word": " is", "probability": 0.9482421875}, {"start": 2289.08, "end": 2289.2, "word": " the", "probability": 0.90185546875}, {"start": 2289.2, "end": 2289.54, "word": " price", "probability": 0.912109375}, {"start": 2289.54, "end": 2289.92, "word": " of", "probability": 0.94873046875}, {"start": 2289.92, "end": 2290.58, "word": " house", "probability": 0.71484375}, {"start": 2290.58, "end": 2290.76, "word": " A,", "probability": 0.94873046875}, {"start": 2290.84, "end": 2290.94, "word": " for", "probability": 0.95068359375}, {"start": 2290.94, "end": 2291.3, "word": " example.", "probability": 0.974609375}, {"start": 2291.88, "end": 2292.2, "word": " House", "probability": 0.8427734375}, {"start": 2292.2, "end": 2292.4, "word": " B", "probability": 0.98876953125}, {"start": 2292.4, "end": 2292.84, "word": " price", "probability": 0.7998046875}, {"start": 2292.84, "end": 2293.24, "word": " is", "probability": 0.9560546875}, {"start": 2293.24, "end": 2294.12, "word": " 500", "probability": 0.86962890625}, {"start": 2294.12, "end": 2294.62, "word": ",000.", "probability": 0.950927734375}, {"start": 2295.26, "end": 2295.48, "word": " The", "probability": 0.873046875}, {"start": 2295.48, "end": 2295.72, "word": " other", "probability": 0.8857421875}, {"start": 2295.72, "end": 2295.88, "word": " one", "probability": 0.916015625}, {"start": 2295.88, "end": 2296.0, "word": " is", "probability": 0.576171875}, {"start": 2296.0, "end": 2296.34, "word": " 300", "probability": 0.9775390625}, {"start": 2296.34, "end": 2296.72, "word": ",000.", "probability": 0.998046875}, {"start": 2297.12, "end": 2297.38, "word": " And", "probability": 0.83203125}, {"start": 2297.38, "end": 2297.58, "word": " two", "probability": 0.91796875}, {"start": 2297.58, "end": 2297.94, "word": " houses", "probability": 0.84228515625}, {"start": 2297.94, "end": 2298.2, "word": " have", "probability": 0.921875}, {"start": 2298.2, "end": 2298.36, "word": " the", "probability": 0.8984375}, {"start": 2298.36, "end": 2298.52, "word": " same", "probability": 0.8974609375}, {"start": 2298.52, "end": 2298.9, "word": " price", "probability": 0.65576171875}, {"start": 2298.9, "end": 2299.12, "word": " as", "probability": 0.392578125}, {"start": 2299.12, "end": 2299.48, "word": " 100", "probability": 0.9033203125}, {"start": 2299.48, "end": 2299.92, "word": ",000.", "probability": 0.998046875}], "temperature": 1.0}, {"id": 85, "seek": 232703, "start": 2300.53, "end": 2327.03, "text": " Now, just to compute the mean, add these values or sum these values, which is three million, divide by number of houses here, there are five houses, so just three thousand divided by five, six hundred thousand. The median, the value in the median, after you arrange the data from smallest to largest,", "tokens": [823, 11, 445, 281, 14722, 264, 914, 11, 909, 613, 4190, 420, 2408, 613, 4190, 11, 597, 307, 1045, 2459, 11, 9845, 538, 1230, 295, 8078, 510, 11, 456, 366, 1732, 8078, 11, 370, 445, 1045, 4714, 6666, 538, 1732, 11, 2309, 3262, 4714, 13, 440, 26779, 11, 264, 2158, 294, 264, 26779, 11, 934, 291, 9424, 264, 1412, 490, 16998, 281, 6443, 11], "avg_logprob": -0.20877403846153847, "compression_ratio": 1.6358695652173914, "no_speech_prob": 0.0, "words": [{"start": 2300.53, "end": 2300.85, "word": " Now,", "probability": 0.91845703125}, {"start": 2301.01, "end": 2301.33, "word": " just", "probability": 0.9111328125}, {"start": 2301.33, "end": 2302.53, "word": " to", "probability": 0.96923828125}, {"start": 2302.53, "end": 2302.83, "word": " compute", "probability": 0.91650390625}, {"start": 2302.83, "end": 2303.01, "word": " the", "probability": 0.923828125}, {"start": 2303.01, "end": 2303.25, "word": " mean,", "probability": 0.90234375}, {"start": 2304.49, "end": 2305.61, "word": " add", "probability": 0.84814453125}, {"start": 2305.61, "end": 2305.85, "word": " these", "probability": 0.8330078125}, {"start": 2305.85, "end": 2306.23, "word": " values", "probability": 0.96533203125}, {"start": 2306.23, "end": 2306.45, "word": " or", "probability": 0.6962890625}, {"start": 2306.45, "end": 2306.85, "word": " sum", "probability": 0.865234375}, {"start": 2306.85, "end": 2307.33, "word": " these", "probability": 0.77197265625}, {"start": 2307.33, "end": 2307.91, "word": " values,", "probability": 0.96826171875}, {"start": 2308.69, "end": 2308.97, "word": " which", "probability": 0.9501953125}, {"start": 2308.97, "end": 2309.11, "word": " is", "probability": 0.9443359375}, {"start": 2309.11, "end": 2309.35, "word": " three", "probability": 0.447509765625}, {"start": 2309.35, "end": 2309.67, "word": " million,", "probability": 0.88134765625}, {"start": 2310.67, "end": 2311.41, "word": " divide", "probability": 0.48193359375}, {"start": 2311.41, "end": 2311.85, "word": " by", "probability": 0.93505859375}, {"start": 2311.85, "end": 2312.71, "word": " number", "probability": 0.841796875}, {"start": 2312.71, "end": 2312.95, "word": " of", "probability": 0.97021484375}, {"start": 2312.95, "end": 2313.29, "word": " houses", "probability": 0.84423828125}, {"start": 2313.29, "end": 2313.77, "word": " here,", "probability": 0.491943359375}, {"start": 2313.87, "end": 2314.03, "word": " there", "probability": 0.90234375}, {"start": 2314.03, "end": 2314.21, "word": " are", "probability": 0.9482421875}, {"start": 2314.21, "end": 2314.53, "word": " five", "probability": 0.9072265625}, {"start": 2314.53, "end": 2314.95, "word": " houses,", "probability": 0.8349609375}, {"start": 2315.55, "end": 2315.69, "word": " so", "probability": 0.94873046875}, {"start": 2315.69, "end": 2316.09, "word": " just", "probability": 0.89013671875}, {"start": 2316.09, "end": 2316.73, "word": " three", "probability": 0.556640625}, {"start": 2316.73, "end": 2318.11, "word": " thousand", "probability": 0.489501953125}, {"start": 2318.11, "end": 2318.37, "word": " divided", "probability": 0.578125}, {"start": 2318.37, "end": 2318.55, "word": " by", "probability": 0.96484375}, {"start": 2318.55, "end": 2318.95, "word": " five,", "probability": 0.89306640625}, {"start": 2319.57, "end": 2319.95, "word": " six", "probability": 0.8505859375}, {"start": 2319.95, "end": 2320.31, "word": " hundred", "probability": 0.92138671875}, {"start": 2320.31, "end": 2320.67, "word": " thousand.", "probability": 0.89208984375}, {"start": 2321.81, "end": 2322.05, "word": " The", "probability": 0.85595703125}, {"start": 2322.05, "end": 2322.35, "word": " median,", "probability": 0.90576171875}, {"start": 2323.77, "end": 2323.99, "word": " the", "probability": 0.90234375}, {"start": 2323.99, "end": 2324.17, "word": " value", "probability": 0.97607421875}, {"start": 2324.17, "end": 2324.29, "word": " in", "probability": 0.8828125}, {"start": 2324.29, "end": 2324.41, "word": " the", "probability": 0.92626953125}, {"start": 2324.41, "end": 2324.61, "word": " median,", "probability": 0.9404296875}, {"start": 2324.79, "end": 2325.07, "word": " after", "probability": 0.84716796875}, {"start": 2325.07, "end": 2325.21, "word": " you", "probability": 0.8955078125}, {"start": 2325.21, "end": 2325.53, "word": " arrange", "probability": 0.841796875}, {"start": 2325.53, "end": 2325.67, "word": " the", "probability": 0.91455078125}, {"start": 2325.67, "end": 2325.91, "word": " data", "probability": 0.94140625}, {"start": 2325.91, "end": 2326.15, "word": " from", "probability": 0.8740234375}, {"start": 2326.15, "end": 2326.49, "word": " smallest", "probability": 0.9462890625}, {"start": 2326.49, "end": 2326.73, "word": " to", "probability": 0.97119140625}, {"start": 2326.73, "end": 2327.03, "word": " largest,", "probability": 0.90576171875}], "temperature": 1.0}, {"id": 86, "seek": 235753, "start": 2328.95, "end": 2357.53, "text": " Or largest smallest. This data is already arranged from largest smallest or smallest large. It doesn't matter actually. So the median is $300,000. Make sense? Because there are two house prices above and two below. So the median is $300,000. Now if you look at these two values, the mean for this data", "tokens": [1610, 6443, 16998, 13, 639, 1412, 307, 1217, 18721, 490, 6443, 16998, 420, 16998, 2416, 13, 467, 1177, 380, 1871, 767, 13, 407, 264, 26779, 307, 1848, 12566, 11, 1360, 13, 4387, 2020, 30, 1436, 456, 366, 732, 1782, 7901, 3673, 293, 732, 2507, 13, 407, 264, 26779, 307, 1848, 12566, 11, 1360, 13, 823, 498, 291, 574, 412, 613, 732, 4190, 11, 264, 914, 337, 341, 1412], "avg_logprob": -0.1905570652173913, "compression_ratio": 1.5894736842105264, "no_speech_prob": 0.0, "words": [{"start": 2328.9500000000003, "end": 2329.59, "word": " Or", "probability": 0.71728515625}, {"start": 2329.59, "end": 2330.23, "word": " largest", "probability": 0.83056640625}, {"start": 2330.23, "end": 2330.79, "word": " smallest.", "probability": 0.4443359375}, {"start": 2331.19, "end": 2331.47, "word": " This", "probability": 0.849609375}, {"start": 2331.47, "end": 2331.75, "word": " data", "probability": 0.94189453125}, {"start": 2331.75, "end": 2332.01, "word": " is", "probability": 0.93359375}, {"start": 2332.01, "end": 2332.61, "word": " already", "probability": 0.91650390625}, {"start": 2332.61, "end": 2333.07, "word": " arranged", "probability": 0.8662109375}, {"start": 2333.07, "end": 2333.39, "word": " from", "probability": 0.81982421875}, {"start": 2333.39, "end": 2334.67, "word": " largest", "probability": 0.8818359375}, {"start": 2334.67, "end": 2335.13, "word": " smallest", "probability": 0.82470703125}, {"start": 2335.13, "end": 2335.41, "word": " or", "probability": 0.81787109375}, {"start": 2335.41, "end": 2335.79, "word": " smallest", "probability": 0.94384765625}, {"start": 2335.79, "end": 2336.15, "word": " large.", "probability": 0.88427734375}, {"start": 2336.17, "end": 2336.29, "word": " It", "probability": 0.609375}, {"start": 2336.29, "end": 2336.47, "word": " doesn't", "probability": 0.945556640625}, {"start": 2336.47, "end": 2336.69, "word": " matter", "probability": 0.861328125}, {"start": 2336.69, "end": 2337.07, "word": " actually.", "probability": 0.74658203125}, {"start": 2337.71, "end": 2337.99, "word": " So", "probability": 0.94873046875}, {"start": 2337.99, "end": 2338.15, "word": " the", "probability": 0.8515625}, {"start": 2338.15, "end": 2338.35, "word": " median", "probability": 0.9697265625}, {"start": 2338.35, "end": 2338.59, "word": " is", "probability": 0.94873046875}, {"start": 2338.59, "end": 2338.97, "word": " $300", "probability": 0.77197265625}, {"start": 2338.97, "end": 2339.43, "word": ",000.", "probability": 0.9423828125}, {"start": 2339.99, "end": 2340.43, "word": " Make", "probability": 0.64501953125}, {"start": 2340.43, "end": 2340.75, "word": " sense?", "probability": 0.83154296875}, {"start": 2341.57, "end": 2342.21, "word": " Because", "probability": 0.9501953125}, {"start": 2342.21, "end": 2342.77, "word": " there", "probability": 0.8916015625}, {"start": 2342.77, "end": 2342.93, "word": " are", "probability": 0.93994140625}, {"start": 2342.93, "end": 2343.09, "word": " two", "probability": 0.88623046875}, {"start": 2343.09, "end": 2343.43, "word": " house", "probability": 0.669921875}, {"start": 2343.43, "end": 2344.49, "word": " prices", "probability": 0.8974609375}, {"start": 2344.49, "end": 2344.87, "word": " above", "probability": 0.259521484375}, {"start": 2344.87, "end": 2347.83, "word": " and", "probability": 0.85693359375}, {"start": 2347.83, "end": 2347.95, "word": " two", "probability": 0.9404296875}, {"start": 2347.95, "end": 2348.19, "word": " below.", "probability": 0.8974609375}, {"start": 2349.03, "end": 2349.31, "word": " So", "probability": 0.955078125}, {"start": 2349.31, "end": 2349.49, "word": " the", "probability": 0.908203125}, {"start": 2349.49, "end": 2349.73, "word": " median", "probability": 0.96826171875}, {"start": 2349.73, "end": 2350.55, "word": " is", "probability": 0.94775390625}, {"start": 2350.55, "end": 2351.03, "word": " $300", "probability": 0.8349609375}, {"start": 2351.03, "end": 2351.51, "word": ",000.", "probability": 0.997314453125}, {"start": 2352.37, "end": 2352.59, "word": " Now", "probability": 0.95361328125}, {"start": 2352.59, "end": 2352.75, "word": " if", "probability": 0.6806640625}, {"start": 2352.75, "end": 2352.83, "word": " you", "probability": 0.921875}, {"start": 2352.83, "end": 2353.03, "word": " look", "probability": 0.96337890625}, {"start": 2353.03, "end": 2353.23, "word": " at", "probability": 0.966796875}, {"start": 2353.23, "end": 2353.43, "word": " these", "probability": 0.845703125}, {"start": 2353.43, "end": 2353.61, "word": " two", "probability": 0.93115234375}, {"start": 2353.61, "end": 2354.05, "word": " values,", "probability": 0.96630859375}, {"start": 2354.49, "end": 2354.65, "word": " the", "probability": 0.84326171875}, {"start": 2354.65, "end": 2354.87, "word": " mean", "probability": 0.9609375}, {"start": 2354.87, "end": 2356.99, "word": " for", "probability": 0.5087890625}, {"start": 2356.99, "end": 2357.23, "word": " this", "probability": 0.91455078125}, {"start": 2357.23, "end": 2357.53, "word": " data", "probability": 0.9404296875}], "temperature": 1.0}, {"id": 87, "seek": 238341, "start": 2358.89, "end": 2383.41, "text": " equals 600,000 and the median is 300,000. The mean is double the median. Do you think why there is a big difference in this data between the mean and the median? Which one? Two million dollars is extreme value, very large number.", "tokens": [6915, 11849, 11, 1360, 293, 264, 26779, 307, 6641, 11, 1360, 13, 440, 914, 307, 3834, 264, 26779, 13, 1144, 291, 519, 983, 456, 307, 257, 955, 2649, 294, 341, 1412, 1296, 264, 914, 293, 264, 26779, 30, 3013, 472, 30, 4453, 2459, 3808, 307, 8084, 2158, 11, 588, 2416, 1230, 13], "avg_logprob": -0.17482310758446748, "compression_ratio": 1.464968152866242, "no_speech_prob": 0.0, "words": [{"start": 2358.89, "end": 2359.43, "word": " equals", "probability": 0.53369140625}, {"start": 2359.43, "end": 2359.99, "word": " 600", "probability": 0.7900390625}, {"start": 2359.99, "end": 2360.43, "word": ",000", "probability": 0.831787109375}, {"start": 2360.43, "end": 2361.35, "word": " and", "probability": 0.73681640625}, {"start": 2361.35, "end": 2361.55, "word": " the", "probability": 0.916015625}, {"start": 2361.55, "end": 2361.77, "word": " median", "probability": 0.96728515625}, {"start": 2361.77, "end": 2362.05, "word": " is", "probability": 0.9423828125}, {"start": 2362.05, "end": 2362.47, "word": " 300", "probability": 0.9775390625}, {"start": 2362.47, "end": 2362.97, "word": ",000.", "probability": 0.996337890625}, {"start": 2364.35, "end": 2364.63, "word": " The", "probability": 0.83056640625}, {"start": 2364.63, "end": 2364.79, "word": " mean", "probability": 0.9501953125}, {"start": 2364.79, "end": 2364.97, "word": " is", "probability": 0.9501953125}, {"start": 2364.97, "end": 2365.21, "word": " double", "probability": 0.466552734375}, {"start": 2365.21, "end": 2366.69, "word": " the", "probability": 0.60302734375}, {"start": 2366.69, "end": 2367.05, "word": " median.", "probability": 0.95166015625}, {"start": 2369.11, "end": 2369.75, "word": " Do", "probability": 0.79833984375}, {"start": 2369.75, "end": 2369.81, "word": " you", "probability": 0.9619140625}, {"start": 2369.81, "end": 2370.03, "word": " think", "probability": 0.921875}, {"start": 2370.03, "end": 2370.41, "word": " why", "probability": 0.80126953125}, {"start": 2370.41, "end": 2370.71, "word": " there", "probability": 0.8310546875}, {"start": 2370.71, "end": 2370.85, "word": " is", "probability": 0.92822265625}, {"start": 2370.85, "end": 2370.97, "word": " a", "probability": 0.99609375}, {"start": 2370.97, "end": 2371.15, "word": " big", "probability": 0.92529296875}, {"start": 2371.15, "end": 2371.75, "word": " difference", "probability": 0.87109375}, {"start": 2371.75, "end": 2372.59, "word": " in", "probability": 0.92626953125}, {"start": 2372.59, "end": 2372.85, "word": " this", "probability": 0.94189453125}, {"start": 2372.85, "end": 2373.31, "word": " data", "probability": 0.927734375}, {"start": 2373.31, "end": 2374.81, "word": " between", "probability": 0.8525390625}, {"start": 2374.81, "end": 2375.35, "word": " the", "probability": 0.9267578125}, {"start": 2375.35, "end": 2375.49, "word": " mean", "probability": 0.9716796875}, {"start": 2375.49, "end": 2375.65, "word": " and", "probability": 0.93408203125}, {"start": 2375.65, "end": 2375.77, "word": " the", "probability": 0.888671875}, {"start": 2375.77, "end": 2376.03, "word": " median?", "probability": 0.9482421875}, {"start": 2376.19, "end": 2376.51, "word": " Which", "probability": 0.62109375}, {"start": 2376.51, "end": 2378.13, "word": " one?", "probability": 0.93115234375}, {"start": 2379.37, "end": 2380.01, "word": " Two", "probability": 0.6123046875}, {"start": 2380.01, "end": 2380.27, "word": " million", "probability": 0.8720703125}, {"start": 2380.27, "end": 2380.59, "word": " dollars", "probability": 0.90771484375}, {"start": 2380.59, "end": 2381.23, "word": " is", "probability": 0.197509765625}, {"start": 2381.23, "end": 2381.79, "word": " extreme", "probability": 0.779296875}, {"start": 2381.79, "end": 2382.29, "word": " value,", "probability": 0.97607421875}, {"start": 2382.51, "end": 2382.73, "word": " very", "probability": 0.74951171875}, {"start": 2382.73, "end": 2383.07, "word": " large", "probability": 0.9658203125}, {"start": 2383.07, "end": 2383.41, "word": " number.", "probability": 0.939453125}], "temperature": 1.0}, {"id": 88, "seek": 241214, "start": 2384.32, "end": 2412.14, "text": " I mean, if you compare two million dollars with the other data sets or other data values, you will see there is a big difference between two million and five hundred. It's four times, plus about three hundred thousands, around seven times and so on. For this value, the mean is affected. Exactly. The median is resistant to outliers. It's affected but little bit.", "tokens": [286, 914, 11, 498, 291, 6794, 732, 2459, 3808, 365, 264, 661, 1412, 6352, 420, 661, 1412, 4190, 11, 291, 486, 536, 456, 307, 257, 955, 2649, 1296, 732, 2459, 293, 1732, 3262, 13, 467, 311, 1451, 1413, 11, 1804, 466, 1045, 3262, 5383, 11, 926, 3407, 1413, 293, 370, 322, 13, 1171, 341, 2158, 11, 264, 914, 307, 8028, 13, 2111, 46831, 13, 440, 26779, 307, 20383, 281, 484, 23646, 13, 467, 311, 8028, 457, 707, 857, 13], "avg_logprob": -0.19697265047580004, "compression_ratio": 1.6396396396396395, "no_speech_prob": 0.0, "words": [{"start": 2384.32, "end": 2384.62, "word": " I", "probability": 0.89599609375}, {"start": 2384.62, "end": 2384.78, "word": " mean,", "probability": 0.96630859375}, {"start": 2384.86, "end": 2384.96, "word": " if", "probability": 0.95458984375}, {"start": 2384.96, "end": 2385.1, "word": " you", "probability": 0.96435546875}, {"start": 2385.1, "end": 2385.52, "word": " compare", "probability": 0.9326171875}, {"start": 2385.52, "end": 2385.94, "word": " two", "probability": 0.4248046875}, {"start": 2385.94, "end": 2386.24, "word": " million", "probability": 0.86279296875}, {"start": 2386.24, "end": 2386.62, "word": " dollars", "probability": 0.94970703125}, {"start": 2386.62, "end": 2386.88, "word": " with", "probability": 0.9013671875}, {"start": 2386.88, "end": 2387.02, "word": " the", "probability": 0.79931640625}, {"start": 2387.02, "end": 2387.28, "word": " other", "probability": 0.8974609375}, {"start": 2387.28, "end": 2387.58, "word": " data", "probability": 0.55322265625}, {"start": 2387.58, "end": 2388.06, "word": " sets", "probability": 0.91259765625}, {"start": 2388.06, "end": 2388.5, "word": " or", "probability": 0.68212890625}, {"start": 2388.5, "end": 2388.86, "word": " other", "probability": 0.59521484375}, {"start": 2388.86, "end": 2389.24, "word": " data", "probability": 0.94970703125}, {"start": 2389.24, "end": 2389.76, "word": " values,", "probability": 0.96484375}, {"start": 2390.16, "end": 2390.28, "word": " you", "probability": 0.94873046875}, {"start": 2390.28, "end": 2390.44, "word": " will", "probability": 0.873046875}, {"start": 2390.44, "end": 2390.62, "word": " see", "probability": 0.92626953125}, {"start": 2390.62, "end": 2390.84, "word": " there", "probability": 0.88916015625}, {"start": 2390.84, "end": 2390.98, "word": " is", "probability": 0.935546875}, {"start": 2390.98, "end": 2391.14, "word": " a", "probability": 0.99853515625}, {"start": 2391.14, "end": 2391.32, "word": " big", "probability": 0.92138671875}, {"start": 2391.32, "end": 2391.82, "word": " difference", "probability": 0.859375}, {"start": 2391.82, "end": 2392.14, "word": " between", "probability": 0.87744140625}, {"start": 2392.14, "end": 2392.38, "word": " two", "probability": 0.5966796875}, {"start": 2392.38, "end": 2392.54, "word": " million", "probability": 0.8642578125}, {"start": 2392.54, "end": 2392.7, "word": " and", "probability": 0.7734375}, {"start": 2392.7, "end": 2392.9, "word": " five", "probability": 0.79052734375}, {"start": 2392.9, "end": 2393.26, "word": " hundred.", "probability": 0.91455078125}, {"start": 2393.62, "end": 2393.98, "word": " It's", "probability": 0.88818359375}, {"start": 2393.98, "end": 2394.24, "word": " four", "probability": 0.9345703125}, {"start": 2394.24, "end": 2394.74, "word": " times,", "probability": 0.92822265625}, {"start": 2395.38, "end": 2395.54, "word": " plus", "probability": 0.78955078125}, {"start": 2395.54, "end": 2395.78, "word": " about", "probability": 0.90283203125}, {"start": 2395.78, "end": 2395.98, "word": " three", "probability": 0.65625}, {"start": 2395.98, "end": 2396.28, "word": " hundred", "probability": 0.88720703125}, {"start": 2396.28, "end": 2396.66, "word": " thousands,", "probability": 0.43212890625}, {"start": 2396.8, "end": 2397.04, "word": " around", "probability": 0.94189453125}, {"start": 2397.04, "end": 2397.68, "word": " seven", "probability": 0.896484375}, {"start": 2397.68, "end": 2398.08, "word": " times", "probability": 0.9208984375}, {"start": 2398.08, "end": 2398.28, "word": " and", "probability": 0.63720703125}, {"start": 2398.28, "end": 2398.44, "word": " so", "probability": 0.95166015625}, {"start": 2398.44, "end": 2398.62, "word": " on.", "probability": 0.94775390625}, {"start": 2399.2, "end": 2399.5, "word": " For", "probability": 0.95947265625}, {"start": 2399.5, "end": 2399.78, "word": " this", "probability": 0.94677734375}, {"start": 2399.78, "end": 2400.2, "word": " value,", "probability": 0.97607421875}, {"start": 2401.76, "end": 2402.02, "word": " the", "probability": 0.91748046875}, {"start": 2402.02, "end": 2402.28, "word": " mean", "probability": 0.974609375}, {"start": 2402.28, "end": 2403.9, "word": " is", "probability": 0.919921875}, {"start": 2403.9, "end": 2404.3, "word": " affected.", "probability": 0.87353515625}, {"start": 2406.56, "end": 2407.16, "word": " Exactly.", "probability": 0.5433349609375}, {"start": 2407.48, "end": 2407.66, "word": " The", "probability": 0.89013671875}, {"start": 2407.66, "end": 2407.88, "word": " median", "probability": 0.9677734375}, {"start": 2407.88, "end": 2408.2, "word": " is", "probability": 0.94580078125}, {"start": 2408.2, "end": 2408.74, "word": " resistant", "probability": 0.94677734375}, {"start": 2408.74, "end": 2409.4, "word": " to", "probability": 0.96728515625}, {"start": 2409.4, "end": 2409.9, "word": " outliers.", "probability": 0.94677734375}, {"start": 2410.64, "end": 2410.94, "word": " It's", "probability": 0.968505859375}, {"start": 2410.94, "end": 2411.26, "word": " affected", "probability": 0.85498046875}, {"start": 2411.26, "end": 2411.5, "word": " but", "probability": 0.50830078125}, {"start": 2411.5, "end": 2411.74, "word": " little", "probability": 0.6181640625}, {"start": 2411.74, "end": 2412.14, "word": " bit.", "probability": 0.9501953125}], "temperature": 1.0}, {"id": 89, "seek": 243834, "start": 2413.34, "end": 2438.34, "text": " For this reason, we have to use the median. So the median makes more sense than using the mean. The mode is just the most frequent value, which is 100,000, because this value is repeated twice. So that's the whole story for central tendency measures, mean, median, and 1.", "tokens": [1171, 341, 1778, 11, 321, 362, 281, 764, 264, 26779, 13, 407, 264, 26779, 1669, 544, 2020, 813, 1228, 264, 914, 13, 440, 4391, 307, 445, 264, 881, 18004, 2158, 11, 597, 307, 2319, 11, 1360, 11, 570, 341, 2158, 307, 10477, 6091, 13, 407, 300, 311, 264, 1379, 1657, 337, 5777, 18187, 8000, 11, 914, 11, 26779, 11, 293, 502, 13], "avg_logprob": -0.18055555933997744, "compression_ratio": 1.5195530726256983, "no_speech_prob": 0.0, "words": [{"start": 2413.34, "end": 2413.78, "word": " For", "probability": 0.67236328125}, {"start": 2413.78, "end": 2414.02, "word": " this", "probability": 0.93359375}, {"start": 2414.02, "end": 2414.24, "word": " reason,", "probability": 0.95947265625}, {"start": 2414.44, "end": 2415.04, "word": " we", "probability": 0.94482421875}, {"start": 2415.04, "end": 2415.26, "word": " have", "probability": 0.81201171875}, {"start": 2415.26, "end": 2415.44, "word": " to", "probability": 0.96923828125}, {"start": 2415.44, "end": 2415.8, "word": " use", "probability": 0.87548828125}, {"start": 2415.8, "end": 2416.82, "word": " the", "probability": 0.71533203125}, {"start": 2416.82, "end": 2417.1, "word": " median.", "probability": 0.955078125}, {"start": 2417.3, "end": 2417.4, "word": " So", "probability": 0.91357421875}, {"start": 2417.4, "end": 2417.56, "word": " the", "probability": 0.68896484375}, {"start": 2417.56, "end": 2417.76, "word": " median", "probability": 0.96630859375}, {"start": 2417.76, "end": 2418.52, "word": " makes", "probability": 0.84228515625}, {"start": 2418.52, "end": 2418.82, "word": " more", "probability": 0.9375}, {"start": 2418.82, "end": 2419.28, "word": " sense", "probability": 0.83154296875}, {"start": 2419.28, "end": 2420.0, "word": " than", "probability": 0.822265625}, {"start": 2420.0, "end": 2420.44, "word": " using", "probability": 0.9326171875}, {"start": 2420.44, "end": 2420.72, "word": " the", "probability": 0.9130859375}, {"start": 2420.72, "end": 2420.86, "word": " mean.", "probability": 0.97265625}, {"start": 2421.96, "end": 2422.46, "word": " The", "probability": 0.8681640625}, {"start": 2422.46, "end": 2422.68, "word": " mode", "probability": 0.6650390625}, {"start": 2422.68, "end": 2422.92, "word": " is", "probability": 0.943359375}, {"start": 2422.92, "end": 2423.18, "word": " just", "probability": 0.9111328125}, {"start": 2423.18, "end": 2423.38, "word": " the", "probability": 0.90966796875}, {"start": 2423.38, "end": 2423.6, "word": " most", "probability": 0.9013671875}, {"start": 2423.6, "end": 2423.98, "word": " frequent", "probability": 0.89453125}, {"start": 2423.98, "end": 2424.48, "word": " value,", "probability": 0.97607421875}, {"start": 2424.66, "end": 2424.72, "word": " which", "probability": 0.9541015625}, {"start": 2424.72, "end": 2425.0, "word": " is", "probability": 0.9482421875}, {"start": 2425.0, "end": 2425.56, "word": " 100", "probability": 0.66455078125}, {"start": 2425.56, "end": 2426.1, "word": ",000,", "probability": 0.82080078125}, {"start": 2426.38, "end": 2426.84, "word": " because", "probability": 0.90185546875}, {"start": 2426.84, "end": 2427.16, "word": " this", "probability": 0.943359375}, {"start": 2427.16, "end": 2427.52, "word": " value", "probability": 0.97509765625}, {"start": 2427.52, "end": 2428.32, "word": " is", "probability": 0.93603515625}, {"start": 2428.32, "end": 2428.72, "word": " repeated", "probability": 0.96923828125}, {"start": 2428.72, "end": 2429.8, "word": " twice.", "probability": 0.9345703125}, {"start": 2430.94, "end": 2431.5, "word": " So", "probability": 0.94140625}, {"start": 2431.5, "end": 2431.8, "word": " that's", "probability": 0.943359375}, {"start": 2431.8, "end": 2432.04, "word": " the", "probability": 0.9208984375}, {"start": 2432.04, "end": 2432.26, "word": " whole", "probability": 0.88623046875}, {"start": 2432.26, "end": 2432.6, "word": " story", "probability": 0.95556640625}, {"start": 2432.6, "end": 2433.04, "word": " for", "probability": 0.94580078125}, {"start": 2433.04, "end": 2433.82, "word": " central", "probability": 0.83642578125}, {"start": 2433.82, "end": 2434.28, "word": " tendency", "probability": 0.85546875}, {"start": 2434.28, "end": 2434.66, "word": " measures,", "probability": 0.8525390625}, {"start": 2435.14, "end": 2435.4, "word": " mean,", "probability": 0.94580078125}, {"start": 2436.28, "end": 2436.7, "word": " median,", "probability": 0.9677734375}, {"start": 2437.1, "end": 2438.14, "word": " and", "probability": 0.94287109375}, {"start": 2438.14, "end": 2438.34, "word": " 1.", "probability": 0.172119140625}], "temperature": 1.0}, {"id": 90, "seek": 246623, "start": 2440.28, "end": 2466.24, "text": " Now the question again is which measure to use? The mean is generally used. The most common center tendency is the mean. We can use it or we should use it unless extreme values exist. I mean if the data set has no outliers or extreme values, we have to use the mean instead of the median.", "tokens": [823, 264, 1168, 797, 307, 597, 3481, 281, 764, 30, 440, 914, 307, 5101, 1143, 13, 440, 881, 2689, 3056, 18187, 307, 264, 914, 13, 492, 393, 764, 309, 420, 321, 820, 764, 309, 5969, 8084, 4190, 2514, 13, 286, 914, 498, 264, 1412, 992, 575, 572, 484, 23646, 420, 8084, 4190, 11, 321, 362, 281, 764, 264, 914, 2602, 295, 264, 26779, 13], "avg_logprob": -0.1528846153846154, "compression_ratio": 1.6055555555555556, "no_speech_prob": 0.0, "words": [{"start": 2440.28, "end": 2440.54, "word": " Now", "probability": 0.861328125}, {"start": 2440.54, "end": 2440.72, "word": " the", "probability": 0.65478515625}, {"start": 2440.72, "end": 2441.02, "word": " question", "probability": 0.9248046875}, {"start": 2441.02, "end": 2441.44, "word": " again", "probability": 0.9248046875}, {"start": 2441.44, "end": 2441.88, "word": " is", "probability": 0.9306640625}, {"start": 2441.88, "end": 2442.2, "word": " which", "probability": 0.568359375}, {"start": 2442.2, "end": 2442.74, "word": " measure", "probability": 0.78857421875}, {"start": 2442.74, "end": 2443.0, "word": " to", "probability": 0.95556640625}, {"start": 2443.0, "end": 2443.32, "word": " use?", "probability": 0.87646484375}, {"start": 2445.3, "end": 2445.48, "word": " The", "probability": 0.8603515625}, {"start": 2445.48, "end": 2445.64, "word": " mean", "probability": 0.9404296875}, {"start": 2445.64, "end": 2445.84, "word": " is", "probability": 0.95556640625}, {"start": 2445.84, "end": 2446.24, "word": " generally", "probability": 0.90234375}, {"start": 2446.24, "end": 2446.7, "word": " used.", "probability": 0.919921875}, {"start": 2447.66, "end": 2447.78, "word": " The", "probability": 0.86865234375}, {"start": 2447.78, "end": 2448.06, "word": " most", "probability": 0.900390625}, {"start": 2448.06, "end": 2448.44, "word": " common", "probability": 0.890625}, {"start": 2448.44, "end": 2448.88, "word": " center", "probability": 0.59765625}, {"start": 2448.88, "end": 2449.28, "word": " tendency", "probability": 0.66552734375}, {"start": 2449.28, "end": 2449.56, "word": " is", "probability": 0.94970703125}, {"start": 2449.56, "end": 2449.68, "word": " the", "probability": 0.88818359375}, {"start": 2449.68, "end": 2449.82, "word": " mean.", "probability": 0.9541015625}, {"start": 2450.2, "end": 2450.74, "word": " We", "probability": 0.9326171875}, {"start": 2450.74, "end": 2451.12, "word": " can", "probability": 0.94384765625}, {"start": 2451.12, "end": 2451.5, "word": " use", "probability": 0.86865234375}, {"start": 2451.5, "end": 2451.72, "word": " it", "probability": 0.9560546875}, {"start": 2451.72, "end": 2452.26, "word": " or", "probability": 0.77783203125}, {"start": 2452.26, "end": 2452.46, "word": " we", "probability": 0.95654296875}, {"start": 2452.46, "end": 2452.74, "word": " should", "probability": 0.97021484375}, {"start": 2452.74, "end": 2453.06, "word": " use", "probability": 0.8818359375}, {"start": 2453.06, "end": 2453.42, "word": " it", "probability": 0.95654296875}, {"start": 2453.42, "end": 2454.18, "word": " unless", "probability": 0.75439453125}, {"start": 2454.18, "end": 2455.6, "word": " extreme", "probability": 0.8662109375}, {"start": 2455.6, "end": 2456.3, "word": " values", "probability": 0.9716796875}, {"start": 2456.3, "end": 2456.88, "word": " exist.", "probability": 0.966796875}, {"start": 2458.56, "end": 2458.76, "word": " I", "probability": 0.95166015625}, {"start": 2458.76, "end": 2458.94, "word": " mean", "probability": 0.96484375}, {"start": 2458.94, "end": 2459.5, "word": " if", "probability": 0.56591796875}, {"start": 2459.5, "end": 2459.7, "word": " the", "probability": 0.92041015625}, {"start": 2459.7, "end": 2459.92, "word": " data", "probability": 0.5517578125}, {"start": 2459.92, "end": 2460.28, "word": " set", "probability": 0.9365234375}, {"start": 2460.28, "end": 2461.04, "word": " has", "probability": 0.9482421875}, {"start": 2461.04, "end": 2461.26, "word": " no", "probability": 0.9541015625}, {"start": 2461.26, "end": 2461.74, "word": " outliers", "probability": 0.926513671875}, {"start": 2461.74, "end": 2461.88, "word": " or", "probability": 0.90966796875}, {"start": 2461.88, "end": 2462.18, "word": " extreme", "probability": 0.86474609375}, {"start": 2462.18, "end": 2462.68, "word": " values,", "probability": 0.97119140625}, {"start": 2463.22, "end": 2463.5, "word": " we", "probability": 0.9521484375}, {"start": 2463.5, "end": 2463.8, "word": " have", "probability": 0.94921875}, {"start": 2463.8, "end": 2463.96, "word": " to", "probability": 0.9697265625}, {"start": 2463.96, "end": 2464.16, "word": " use", "probability": 0.88330078125}, {"start": 2464.16, "end": 2464.34, "word": " the", "probability": 0.9267578125}, {"start": 2464.34, "end": 2464.46, "word": " mean", "probability": 0.9638671875}, {"start": 2464.46, "end": 2465.62, "word": " instead", "probability": 0.748046875}, {"start": 2465.62, "end": 2465.86, "word": " of", "probability": 0.96728515625}, {"start": 2465.86, "end": 2466.02, "word": " the", "probability": 0.92529296875}, {"start": 2466.02, "end": 2466.24, "word": " median.", "probability": 0.68359375}], "temperature": 1.0}, {"id": 91, "seek": 249619, "start": 2469.81, "end": 2496.19, "text": " The median is often used since the median is not sensitive to extreme values. I mean, the median is resistant to outliers. It remains nearly in the same position if the dataset has outliers. But the median will be affected either to the right or to the left tail. So we have to use the median if the data has extreme values.", "tokens": [440, 26779, 307, 2049, 1143, 1670, 264, 26779, 307, 406, 9477, 281, 8084, 4190, 13, 286, 914, 11, 264, 26779, 307, 20383, 281, 484, 23646, 13, 467, 7023, 6217, 294, 264, 912, 2535, 498, 264, 28872, 575, 484, 23646, 13, 583, 264, 26779, 486, 312, 8028, 2139, 281, 264, 558, 420, 281, 264, 1411, 6838, 13, 407, 321, 362, 281, 764, 264, 26779, 498, 264, 1412, 575, 8084, 4190, 13], "avg_logprob": -0.11333626865501135, "compression_ratio": 1.8258426966292134, "no_speech_prob": 0.0, "words": [{"start": 2469.81, "end": 2470.15, "word": " The", "probability": 0.75732421875}, {"start": 2470.15, "end": 2470.43, "word": " median", "probability": 0.96435546875}, {"start": 2470.43, "end": 2471.03, "word": " is", "probability": 0.94677734375}, {"start": 2471.03, "end": 2471.41, "word": " often", "probability": 0.8037109375}, {"start": 2471.41, "end": 2471.91, "word": " used", "probability": 0.92431640625}, {"start": 2471.91, "end": 2473.55, "word": " since", "probability": 0.71728515625}, {"start": 2473.55, "end": 2473.77, "word": " the", "probability": 0.91259765625}, {"start": 2473.77, "end": 2474.07, "word": " median", "probability": 0.96484375}, {"start": 2474.07, "end": 2474.33, "word": " is", "probability": 0.94580078125}, {"start": 2474.33, "end": 2474.67, "word": " not", "probability": 0.9501953125}, {"start": 2474.67, "end": 2475.63, "word": " sensitive", "probability": 0.90234375}, {"start": 2475.63, "end": 2475.91, "word": " to", "probability": 0.96240234375}, {"start": 2475.91, "end": 2476.25, "word": " extreme", "probability": 0.8525390625}, {"start": 2476.25, "end": 2476.69, "word": " values.", "probability": 0.97265625}, {"start": 2476.89, "end": 2476.91, "word": " I", "probability": 0.85498046875}, {"start": 2476.91, "end": 2477.07, "word": " mean,", "probability": 0.9638671875}, {"start": 2477.27, "end": 2477.61, "word": " the", "probability": 0.91845703125}, {"start": 2477.61, "end": 2477.85, "word": " median", "probability": 0.966796875}, {"start": 2477.85, "end": 2478.33, "word": " is", "probability": 0.9462890625}, {"start": 2478.33, "end": 2479.09, "word": " resistant", "probability": 0.9150390625}, {"start": 2479.09, "end": 2479.37, "word": " to", "probability": 0.9560546875}, {"start": 2479.37, "end": 2479.71, "word": " outliers.", "probability": 0.9462890625}, {"start": 2480.09, "end": 2480.39, "word": " It", "probability": 0.951171875}, {"start": 2480.39, "end": 2480.85, "word": " remains", "probability": 0.84814453125}, {"start": 2480.85, "end": 2481.67, "word": " nearly", "probability": 0.89208984375}, {"start": 2481.67, "end": 2481.89, "word": " in", "probability": 0.94873046875}, {"start": 2481.89, "end": 2482.03, "word": " the", "probability": 0.91943359375}, {"start": 2482.03, "end": 2482.25, "word": " same", "probability": 0.8759765625}, {"start": 2482.25, "end": 2482.69, "word": " position", "probability": 0.9375}, {"start": 2482.69, "end": 2483.55, "word": " if", "probability": 0.8525390625}, {"start": 2483.55, "end": 2483.71, "word": " the", "probability": 0.92138671875}, {"start": 2483.71, "end": 2484.09, "word": " dataset", "probability": 0.63818359375}, {"start": 2484.09, "end": 2484.53, "word": " has", "probability": 0.94775390625}, {"start": 2484.53, "end": 2485.11, "word": " outliers.", "probability": 0.95654296875}, {"start": 2485.99, "end": 2486.29, "word": " But", "probability": 0.94189453125}, {"start": 2486.29, "end": 2486.49, "word": " the", "probability": 0.87353515625}, {"start": 2486.49, "end": 2486.77, "word": " median", "probability": 0.97021484375}, {"start": 2486.77, "end": 2487.21, "word": " will", "probability": 0.87841796875}, {"start": 2487.21, "end": 2487.37, "word": " be", "probability": 0.94921875}, {"start": 2487.37, "end": 2487.81, "word": " affected", "probability": 0.84521484375}, {"start": 2487.81, "end": 2488.25, "word": " either", "probability": 0.8193359375}, {"start": 2488.25, "end": 2488.89, "word": " to", "probability": 0.96435546875}, {"start": 2488.89, "end": 2489.09, "word": " the", "probability": 0.9140625}, {"start": 2489.09, "end": 2489.33, "word": " right", "probability": 0.92041015625}, {"start": 2489.33, "end": 2489.63, "word": " or", "probability": 0.962890625}, {"start": 2489.63, "end": 2489.85, "word": " to", "probability": 0.95068359375}, {"start": 2489.85, "end": 2490.03, "word": " the", "probability": 0.91064453125}, {"start": 2490.03, "end": 2490.29, "word": " left", "probability": 0.947265625}, {"start": 2490.29, "end": 2491.41, "word": " tail.", "probability": 0.6806640625}, {"start": 2491.53, "end": 2491.81, "word": " So", "probability": 0.9482421875}, {"start": 2491.81, "end": 2492.15, "word": " we", "probability": 0.5986328125}, {"start": 2492.15, "end": 2492.33, "word": " have", "probability": 0.94775390625}, {"start": 2492.33, "end": 2492.47, "word": " to", "probability": 0.96923828125}, {"start": 2492.47, "end": 2492.71, "word": " use", "probability": 0.8759765625}, {"start": 2492.71, "end": 2493.21, "word": " the", "probability": 0.91455078125}, {"start": 2493.21, "end": 2493.45, "word": " median", "probability": 0.96728515625}, {"start": 2493.45, "end": 2494.17, "word": " if", "probability": 0.93212890625}, {"start": 2494.17, "end": 2494.35, "word": " the", "probability": 0.91845703125}, {"start": 2494.35, "end": 2494.61, "word": " data", "probability": 0.9345703125}, {"start": 2494.61, "end": 2495.09, "word": " has", "probability": 0.94580078125}, {"start": 2495.09, "end": 2495.77, "word": " extreme", "probability": 0.85498046875}, {"start": 2495.77, "end": 2496.19, "word": " values.", "probability": 0.9736328125}], "temperature": 1.0}, {"id": 92, "seek": 252682, "start": 2498.58, "end": 2526.82, "text": " For example, median home prices for the previous one may be reported for a region that is less sensitive to outliers. So the mean is more sensitive to outliers than the median. Sometimes, I mean in some situations, it makes sense to report both the mean and the median. Just say the mean for this data for home prices is 600,000 while the median is 300,000.", "tokens": [1171, 1365, 11, 26779, 1280, 7901, 337, 264, 3894, 472, 815, 312, 7055, 337, 257, 4458, 300, 307, 1570, 9477, 281, 484, 23646, 13, 407, 264, 914, 307, 544, 9477, 281, 484, 23646, 813, 264, 26779, 13, 4803, 11, 286, 914, 294, 512, 6851, 11, 309, 1669, 2020, 281, 2275, 1293, 264, 914, 293, 264, 26779, 13, 1449, 584, 264, 914, 337, 341, 1412, 337, 1280, 7901, 307, 11849, 11, 1360, 1339, 264, 26779, 307, 6641, 11, 1360, 13], "avg_logprob": -0.14365233946591616, "compression_ratio": 1.7810945273631842, "no_speech_prob": 0.0, "words": [{"start": 2498.58, "end": 2498.88, "word": " For", "probability": 0.87939453125}, {"start": 2498.88, "end": 2499.26, "word": " example,", "probability": 0.96923828125}, {"start": 2499.52, "end": 2499.8, "word": " median", "probability": 0.919921875}, {"start": 2499.8, "end": 2500.06, "word": " home", "probability": 0.76806640625}, {"start": 2500.06, "end": 2500.48, "word": " prices", "probability": 0.72509765625}, {"start": 2500.48, "end": 2501.38, "word": " for", "probability": 0.6796875}, {"start": 2501.38, "end": 2501.5, "word": " the", "probability": 0.92236328125}, {"start": 2501.5, "end": 2501.72, "word": " previous", "probability": 0.78173828125}, {"start": 2501.72, "end": 2502.12, "word": " one", "probability": 0.9326171875}, {"start": 2502.12, "end": 2502.92, "word": " may", "probability": 0.7490234375}, {"start": 2502.92, "end": 2503.1, "word": " be", "probability": 0.9462890625}, {"start": 2503.1, "end": 2503.64, "word": " reported", "probability": 0.8740234375}, {"start": 2503.64, "end": 2503.96, "word": " for", "probability": 0.93994140625}, {"start": 2503.96, "end": 2504.1, "word": " a", "probability": 0.9873046875}, {"start": 2504.1, "end": 2504.38, "word": " region", "probability": 0.9453125}, {"start": 2504.38, "end": 2504.6, "word": " that", "probability": 0.408203125}, {"start": 2504.6, "end": 2504.74, "word": " is", "probability": 0.916015625}, {"start": 2504.74, "end": 2505.04, "word": " less", "probability": 0.94873046875}, {"start": 2505.04, "end": 2505.82, "word": " sensitive", "probability": 0.904296875}, {"start": 2505.82, "end": 2506.62, "word": " to", "probability": 0.9580078125}, {"start": 2506.62, "end": 2506.96, "word": " outliers.", "probability": 0.94287109375}, {"start": 2507.46, "end": 2507.78, "word": " So", "probability": 0.91259765625}, {"start": 2507.78, "end": 2508.0, "word": " the", "probability": 0.7099609375}, {"start": 2508.0, "end": 2508.18, "word": " mean", "probability": 0.63134765625}, {"start": 2508.18, "end": 2508.58, "word": " is", "probability": 0.94921875}, {"start": 2508.58, "end": 2508.82, "word": " more", "probability": 0.9384765625}, {"start": 2508.82, "end": 2509.3, "word": " sensitive", "probability": 0.88525390625}, {"start": 2509.3, "end": 2510.6, "word": " to", "probability": 0.96337890625}, {"start": 2510.6, "end": 2511.1, "word": " outliers", "probability": 0.96240234375}, {"start": 2511.1, "end": 2511.72, "word": " than", "probability": 0.9130859375}, {"start": 2511.72, "end": 2512.88, "word": " the", "probability": 0.82763671875}, {"start": 2512.88, "end": 2513.1, "word": " median.", "probability": 0.955078125}, {"start": 2514.24, "end": 2514.8, "word": " Sometimes,", "probability": 0.86572265625}, {"start": 2515.12, "end": 2515.26, "word": " I", "probability": 0.9580078125}, {"start": 2515.26, "end": 2515.4, "word": " mean", "probability": 0.97021484375}, {"start": 2515.4, "end": 2515.56, "word": " in", "probability": 0.51953125}, {"start": 2515.56, "end": 2515.74, "word": " some", "probability": 0.89208984375}, {"start": 2515.74, "end": 2516.3, "word": " situations,", "probability": 0.77294921875}, {"start": 2516.46, "end": 2516.52, "word": " it", "probability": 0.9169921875}, {"start": 2516.52, "end": 2516.7, "word": " makes", "probability": 0.82275390625}, {"start": 2516.7, "end": 2516.96, "word": " sense", "probability": 0.79443359375}, {"start": 2516.96, "end": 2517.28, "word": " to", "probability": 0.96337890625}, {"start": 2517.28, "end": 2517.72, "word": " report", "probability": 0.93994140625}, {"start": 2517.72, "end": 2518.16, "word": " both", "probability": 0.87109375}, {"start": 2518.16, "end": 2518.36, "word": " the", "probability": 0.90673828125}, {"start": 2518.36, "end": 2518.5, "word": " mean", "probability": 0.95751953125}, {"start": 2518.5, "end": 2518.64, "word": " and", "probability": 0.9140625}, {"start": 2518.64, "end": 2518.76, "word": " the", "probability": 0.88232421875}, {"start": 2518.76, "end": 2518.86, "word": " median.", "probability": 0.96142578125}, {"start": 2518.96, "end": 2519.12, "word": " Just", "probability": 0.84521484375}, {"start": 2519.12, "end": 2519.42, "word": " say", "probability": 0.90478515625}, {"start": 2519.42, "end": 2520.42, "word": " the", "probability": 0.63037109375}, {"start": 2520.42, "end": 2520.66, "word": " mean", "probability": 0.970703125}, {"start": 2520.66, "end": 2520.94, "word": " for", "probability": 0.93994140625}, {"start": 2520.94, "end": 2521.18, "word": " this", "probability": 0.93994140625}, {"start": 2521.18, "end": 2521.46, "word": " data", "probability": 0.94384765625}, {"start": 2521.46, "end": 2521.68, "word": " for", "probability": 0.6640625}, {"start": 2521.68, "end": 2521.86, "word": " home", "probability": 0.87060546875}, {"start": 2521.86, "end": 2522.26, "word": " prices", "probability": 0.91357421875}, {"start": 2522.26, "end": 2523.26, "word": " is", "probability": 0.943359375}, {"start": 2523.26, "end": 2523.72, "word": " 600", "probability": 0.88671875}, {"start": 2523.72, "end": 2524.18, "word": ",000", "probability": 0.917236328125}, {"start": 2524.18, "end": 2525.1, "word": " while", "probability": 0.630859375}, {"start": 2525.1, "end": 2525.32, "word": " the", "probability": 0.92578125}, {"start": 2525.32, "end": 2525.5, "word": " median", "probability": 0.95361328125}, {"start": 2525.5, "end": 2525.76, "word": " is", "probability": 0.94384765625}, {"start": 2525.76, "end": 2526.34, "word": " 300", "probability": 0.98046875}, {"start": 2526.34, "end": 2526.82, "word": ",000.", "probability": 0.997314453125}], "temperature": 1.0}, {"id": 93, "seek": 255492, "start": 2527.25, "end": 2554.93, "text": " If you look at these two figures, you can tell that there exists outlier or the outlier exists because there is a big difference between the mean and the median. So that's all for measures of central tendency. Again, we explained three measures, arithmetic mean, median, and mode. And arithmetic mean again is", "tokens": [759, 291, 574, 412, 613, 732, 9624, 11, 291, 393, 980, 300, 456, 8198, 484, 2753, 420, 264, 484, 2753, 8198, 570, 456, 307, 257, 955, 2649, 1296, 264, 914, 293, 264, 26779, 13, 407, 300, 311, 439, 337, 8000, 295, 5777, 18187, 13, 3764, 11, 321, 8825, 1045, 8000, 11, 42973, 914, 11, 26779, 11, 293, 4391, 13, 400, 42973, 914, 797, 307], "avg_logprob": -0.18641826923076923, "compression_ratio": 1.684782608695652, "no_speech_prob": 0.0, "words": [{"start": 2527.25, "end": 2527.57, "word": " If", "probability": 0.83837890625}, {"start": 2527.57, "end": 2527.63, "word": " you", "probability": 0.888671875}, {"start": 2527.63, "end": 2527.75, "word": " look", "probability": 0.966796875}, {"start": 2527.75, "end": 2527.85, "word": " at", "probability": 0.96533203125}, {"start": 2527.85, "end": 2527.99, "word": " these", "probability": 0.84326171875}, {"start": 2527.99, "end": 2528.17, "word": " two", "probability": 0.91552734375}, {"start": 2528.17, "end": 2528.53, "word": " figures,", "probability": 0.86572265625}, {"start": 2528.85, "end": 2529.05, "word": " you", "probability": 0.95703125}, {"start": 2529.05, "end": 2529.35, "word": " can", "probability": 0.94677734375}, {"start": 2529.35, "end": 2529.69, "word": " tell", "probability": 0.87158203125}, {"start": 2529.69, "end": 2530.15, "word": " that", "probability": 0.9248046875}, {"start": 2530.15, "end": 2530.93, "word": " there", "probability": 0.77783203125}, {"start": 2530.93, "end": 2531.69, "word": " exists", "probability": 0.7763671875}, {"start": 2531.69, "end": 2532.21, "word": " outlier", "probability": 0.87939453125}, {"start": 2532.21, "end": 2532.71, "word": " or", "probability": 0.481201171875}, {"start": 2532.71, "end": 2532.83, "word": " the", "probability": 0.82177734375}, {"start": 2532.83, "end": 2533.13, "word": " outlier", "probability": 0.9580078125}, {"start": 2533.13, "end": 2533.45, "word": " exists", "probability": 0.8515625}, {"start": 2533.45, "end": 2533.83, "word": " because", "probability": 0.70263671875}, {"start": 2533.83, "end": 2534.35, "word": " there", "probability": 0.90185546875}, {"start": 2534.35, "end": 2534.49, "word": " is", "probability": 0.908203125}, {"start": 2534.49, "end": 2534.67, "word": " a", "probability": 0.99658203125}, {"start": 2534.67, "end": 2534.85, "word": " big", "probability": 0.92578125}, {"start": 2534.85, "end": 2535.29, "word": " difference", "probability": 0.8642578125}, {"start": 2535.29, "end": 2535.79, "word": " between", "probability": 0.87353515625}, {"start": 2535.79, "end": 2536.83, "word": " the", "probability": 0.861328125}, {"start": 2536.83, "end": 2536.95, "word": " mean", "probability": 0.521484375}, {"start": 2536.95, "end": 2537.13, "word": " and", "probability": 0.89990234375}, {"start": 2537.13, "end": 2537.23, "word": " the", "probability": 0.7646484375}, {"start": 2537.23, "end": 2537.41, "word": " median.", "probability": 0.966796875}, {"start": 2538.67, "end": 2539.39, "word": " So", "probability": 0.93603515625}, {"start": 2539.39, "end": 2539.75, "word": " that's", "probability": 0.85400390625}, {"start": 2539.75, "end": 2540.17, "word": " all", "probability": 0.95556640625}, {"start": 2540.17, "end": 2541.13, "word": " for", "probability": 0.93701171875}, {"start": 2541.13, "end": 2543.89, "word": " measures", "probability": 0.482421875}, {"start": 2543.89, "end": 2544.07, "word": " of", "probability": 0.884765625}, {"start": 2544.07, "end": 2544.31, "word": " central", "probability": 0.25341796875}, {"start": 2544.31, "end": 2544.95, "word": " tendency.", "probability": 0.89306640625}, {"start": 2545.15, "end": 2545.55, "word": " Again,", "probability": 0.91357421875}, {"start": 2546.43, "end": 2546.93, "word": " we", "probability": 0.96142578125}, {"start": 2546.93, "end": 2547.65, "word": " explained", "probability": 0.79443359375}, {"start": 2547.65, "end": 2548.09, "word": " three", "probability": 0.79150390625}, {"start": 2548.09, "end": 2548.83, "word": " measures,", "probability": 0.8427734375}, {"start": 2549.45, "end": 2549.83, "word": " arithmetic", "probability": 0.92578125}, {"start": 2549.83, "end": 2550.27, "word": " mean,", "probability": 0.94873046875}, {"start": 2551.41, "end": 2551.87, "word": " median,", "probability": 0.9541015625}, {"start": 2552.37, "end": 2552.69, "word": " and", "probability": 0.94287109375}, {"start": 2552.69, "end": 2553.01, "word": " mode.", "probability": 0.91455078125}, {"start": 2553.29, "end": 2553.59, "word": " And", "probability": 0.94970703125}, {"start": 2553.59, "end": 2553.93, "word": " arithmetic", "probability": 0.91015625}, {"start": 2553.93, "end": 2554.29, "word": " mean", "probability": 0.96435546875}, {"start": 2554.29, "end": 2554.55, "word": " again", "probability": 0.67529296875}, {"start": 2554.55, "end": 2554.93, "word": " is", "probability": 0.89306640625}], "temperature": 1.0}, {"id": 94, "seek": 258227, "start": 2555.75, "end": 2582.27, "text": " denoted by X bar is pronounced as X bar and just summation of X divided by N. So summation Xi, i goes from 1 up to N divided by the total number of observations. The median, as we mentioned, is the value in the middle in ordered array. After you arrange the data from smallest to largest or vice versa, then the median is the value", "tokens": [1441, 23325, 538, 1783, 2159, 307, 23155, 382, 1783, 2159, 293, 445, 28811, 295, 1783, 6666, 538, 426, 13, 407, 28811, 15712, 11, 741, 1709, 490, 502, 493, 281, 426, 6666, 538, 264, 3217, 1230, 295, 18163, 13, 440, 26779, 11, 382, 321, 2835, 11, 307, 264, 2158, 294, 264, 2808, 294, 8866, 10225, 13, 2381, 291, 9424, 264, 1412, 490, 16998, 281, 6443, 420, 11964, 25650, 11, 550, 264, 26779, 307, 264, 2158], "avg_logprob": -0.1881249972184499, "compression_ratio": 1.66, "no_speech_prob": 0.0, "words": [{"start": 2555.75, "end": 2556.27, "word": " denoted", "probability": 0.8564453125}, {"start": 2556.27, "end": 2556.73, "word": " by", "probability": 0.97265625}, {"start": 2556.73, "end": 2557.15, "word": " X", "probability": 0.67626953125}, {"start": 2557.15, "end": 2557.45, "word": " bar", "probability": 0.76806640625}, {"start": 2557.45, "end": 2557.79, "word": " is", "probability": 0.2364501953125}, {"start": 2557.79, "end": 2558.27, "word": " pronounced", "probability": 0.79052734375}, {"start": 2558.27, "end": 2558.79, "word": " as", "probability": 0.9609375}, {"start": 2558.79, "end": 2558.99, "word": " X", "probability": 0.95703125}, {"start": 2558.99, "end": 2559.31, "word": " bar", "probability": 0.9482421875}, {"start": 2559.31, "end": 2560.03, "word": " and", "probability": 0.55078125}, {"start": 2560.03, "end": 2560.35, "word": " just", "probability": 0.91845703125}, {"start": 2560.35, "end": 2560.87, "word": " summation", "probability": 0.822265625}, {"start": 2560.87, "end": 2561.07, "word": " of", "probability": 0.96728515625}, {"start": 2561.07, "end": 2561.45, "word": " X", "probability": 0.97216796875}, {"start": 2561.45, "end": 2562.87, "word": " divided", "probability": 0.6484375}, {"start": 2562.87, "end": 2563.09, "word": " by", "probability": 0.96630859375}, {"start": 2563.09, "end": 2563.27, "word": " N.", "probability": 0.79248046875}, {"start": 2564.13, "end": 2564.41, "word": " So", "probability": 0.93701171875}, {"start": 2564.41, "end": 2564.97, "word": " summation", "probability": 0.6962890625}, {"start": 2564.97, "end": 2565.49, "word": " Xi,", "probability": 0.5673828125}, {"start": 2565.79, "end": 2565.97, "word": " i", "probability": 0.521484375}, {"start": 2565.97, "end": 2566.23, "word": " goes", "probability": 0.90673828125}, {"start": 2566.23, "end": 2566.47, "word": " from", "probability": 0.87939453125}, {"start": 2566.47, "end": 2566.71, "word": " 1", "probability": 0.509765625}, {"start": 2566.71, "end": 2566.99, "word": " up", "probability": 0.94580078125}, {"start": 2566.99, "end": 2567.13, "word": " to", "probability": 0.95849609375}, {"start": 2567.13, "end": 2567.35, "word": " N", "probability": 0.93017578125}, {"start": 2567.35, "end": 2567.67, "word": " divided", "probability": 0.5595703125}, {"start": 2567.67, "end": 2567.89, "word": " by", "probability": 0.962890625}, {"start": 2567.89, "end": 2568.07, "word": " the", "probability": 0.90234375}, {"start": 2568.07, "end": 2568.31, "word": " total", "probability": 0.86083984375}, {"start": 2568.31, "end": 2568.67, "word": " number", "probability": 0.91943359375}, {"start": 2568.67, "end": 2569.55, "word": " of", "probability": 0.9697265625}, {"start": 2569.55, "end": 2570.13, "word": " observations.", "probability": 0.7783203125}, {"start": 2570.75, "end": 2570.91, "word": " The", "probability": 0.826171875}, {"start": 2570.91, "end": 2571.19, "word": " median,", "probability": 0.958984375}, {"start": 2571.91, "end": 2572.05, "word": " as", "probability": 0.96533203125}, {"start": 2572.05, "end": 2572.17, "word": " we", "probability": 0.92431640625}, {"start": 2572.17, "end": 2572.55, "word": " mentioned,", "probability": 0.83935546875}, {"start": 2572.95, "end": 2573.25, "word": " is", "probability": 0.93798828125}, {"start": 2573.25, "end": 2573.41, "word": " the", "probability": 0.92138671875}, {"start": 2573.41, "end": 2573.69, "word": " value", "probability": 0.970703125}, {"start": 2573.69, "end": 2573.83, "word": " in", "probability": 0.9423828125}, {"start": 2573.83, "end": 2573.93, "word": " the", "probability": 0.91796875}, {"start": 2573.93, "end": 2574.25, "word": " middle", "probability": 0.94677734375}, {"start": 2574.25, "end": 2574.95, "word": " in", "probability": 0.85302734375}, {"start": 2574.95, "end": 2575.69, "word": " ordered", "probability": 0.62158203125}, {"start": 2575.69, "end": 2576.03, "word": " array.", "probability": 0.85400390625}, {"start": 2576.39, "end": 2576.73, "word": " After", "probability": 0.87841796875}, {"start": 2576.73, "end": 2576.85, "word": " you", "probability": 0.9228515625}, {"start": 2576.85, "end": 2577.19, "word": " arrange", "probability": 0.83447265625}, {"start": 2577.19, "end": 2577.31, "word": " the", "probability": 0.76171875}, {"start": 2577.31, "end": 2577.65, "word": " data", "probability": 0.9404296875}, {"start": 2577.65, "end": 2578.57, "word": " from", "probability": 0.87109375}, {"start": 2578.57, "end": 2578.95, "word": " smallest", "probability": 0.74267578125}, {"start": 2578.95, "end": 2579.15, "word": " to", "probability": 0.86279296875}, {"start": 2579.15, "end": 2579.41, "word": " largest", "probability": 0.89794921875}, {"start": 2579.41, "end": 2579.77, "word": " or", "probability": 0.83935546875}, {"start": 2579.77, "end": 2580.01, "word": " vice", "probability": 0.91357421875}, {"start": 2580.01, "end": 2580.39, "word": " versa,", "probability": 0.80908203125}, {"start": 2580.85, "end": 2581.01, "word": " then", "probability": 0.837890625}, {"start": 2581.01, "end": 2581.19, "word": " the", "probability": 0.91455078125}, {"start": 2581.19, "end": 2581.45, "word": " median", "probability": 0.96826171875}, {"start": 2581.45, "end": 2581.81, "word": " is", "probability": 0.94580078125}, {"start": 2581.81, "end": 2581.93, "word": " the", "probability": 0.91650390625}, {"start": 2581.93, "end": 2582.27, "word": " value", "probability": 0.96923828125}], "temperature": 1.0}, {"id": 95, "seek": 260721, "start": 2583.09, "end": 2607.21, "text": " in the middle. The mode is the most frequent observed value. And we have to know that mean and median are used only for numerical data, while the mode can be used for both numerical and categorical data. That's all about measures of central tendency. Any question?", "tokens": [294, 264, 2808, 13, 440, 4391, 307, 264, 881, 18004, 13095, 2158, 13, 400, 321, 362, 281, 458, 300, 914, 293, 26779, 366, 1143, 787, 337, 29054, 1412, 11, 1339, 264, 4391, 393, 312, 1143, 337, 1293, 29054, 293, 19250, 804, 1412, 13, 663, 311, 439, 466, 8000, 295, 5777, 18187, 13, 2639, 1168, 30], "avg_logprob": -0.16657366284302302, "compression_ratio": 1.5229885057471264, "no_speech_prob": 0.0, "words": [{"start": 2583.09, "end": 2583.37, "word": " in", "probability": 0.380859375}, {"start": 2583.37, "end": 2583.51, "word": " the", "probability": 0.91748046875}, {"start": 2583.51, "end": 2583.73, "word": " middle.", "probability": 0.95263671875}, {"start": 2584.29, "end": 2584.45, "word": " The", "probability": 0.875}, {"start": 2584.45, "end": 2584.75, "word": " mode", "probability": 0.68359375}, {"start": 2584.75, "end": 2585.51, "word": " is", "probability": 0.94677734375}, {"start": 2585.51, "end": 2585.65, "word": " the", "probability": 0.9130859375}, {"start": 2585.65, "end": 2585.87, "word": " most", "probability": 0.90771484375}, {"start": 2585.87, "end": 2586.33, "word": " frequent", "probability": 0.87109375}, {"start": 2586.33, "end": 2586.69, "word": " observed", "probability": 0.84619140625}, {"start": 2586.69, "end": 2587.05, "word": " value.", "probability": 0.9599609375}, {"start": 2587.19, "end": 2587.33, "word": " And", "probability": 0.90283203125}, {"start": 2587.33, "end": 2587.43, "word": " we", "probability": 0.90869140625}, {"start": 2587.43, "end": 2587.57, "word": " have", "probability": 0.94873046875}, {"start": 2587.57, "end": 2587.67, "word": " to", "probability": 0.9716796875}, {"start": 2587.67, "end": 2587.79, "word": " know", "probability": 0.86767578125}, {"start": 2587.79, "end": 2588.09, "word": " that", "probability": 0.93017578125}, {"start": 2588.09, "end": 2588.83, "word": " mean", "probability": 0.8564453125}, {"start": 2588.83, "end": 2589.03, "word": " and", "probability": 0.94140625}, {"start": 2589.03, "end": 2589.33, "word": " median", "probability": 0.974609375}, {"start": 2589.33, "end": 2589.59, "word": " are", "probability": 0.485595703125}, {"start": 2589.59, "end": 2590.43, "word": " used", "probability": 0.927734375}, {"start": 2590.43, "end": 2590.83, "word": " only", "probability": 0.91748046875}, {"start": 2590.83, "end": 2591.17, "word": " for", "probability": 0.9521484375}, {"start": 2591.17, "end": 2591.55, "word": " numerical", "probability": 0.7041015625}, {"start": 2591.55, "end": 2592.13, "word": " data,", "probability": 0.9482421875}, {"start": 2592.83, "end": 2593.61, "word": " while", "probability": 0.95068359375}, {"start": 2593.61, "end": 2593.87, "word": " the", "probability": 0.9228515625}, {"start": 2593.87, "end": 2594.15, "word": " mode", "probability": 0.87890625}, {"start": 2594.15, "end": 2594.45, "word": " can", "probability": 0.9453125}, {"start": 2594.45, "end": 2594.59, "word": " be", "probability": 0.951171875}, {"start": 2594.59, "end": 2594.83, "word": " used", "probability": 0.91162109375}, {"start": 2594.83, "end": 2595.07, "word": " for", "probability": 0.9482421875}, {"start": 2595.07, "end": 2595.47, "word": " both", "probability": 0.89306640625}, {"start": 2595.47, "end": 2596.39, "word": " numerical", "probability": 0.54052734375}, {"start": 2596.39, "end": 2597.51, "word": " and", "probability": 0.947265625}, {"start": 2597.51, "end": 2598.49, "word": " categorical", "probability": 0.853271484375}, {"start": 2598.49, "end": 2598.81, "word": " data.", "probability": 0.92333984375}, {"start": 2600.73, "end": 2601.41, "word": " That's", "probability": 0.9384765625}, {"start": 2601.41, "end": 2601.75, "word": " all", "probability": 0.951171875}, {"start": 2601.75, "end": 2602.35, "word": " about", "probability": 0.90087890625}, {"start": 2602.35, "end": 2603.45, "word": " measures", "probability": 0.478515625}, {"start": 2603.45, "end": 2604.29, "word": " of", "probability": 0.96875}, {"start": 2604.29, "end": 2604.91, "word": " central", "probability": 0.9091796875}, {"start": 2604.91, "end": 2606.13, "word": " tendency.", "probability": 0.9306640625}, {"start": 2606.63, "end": 2606.87, "word": " Any", "probability": 0.91796875}, {"start": 2606.87, "end": 2607.21, "word": " question?", "probability": 0.615234375}], "temperature": 1.0}, {"id": 96, "seek": 264187, "start": 2613.21, "end": 2641.87, "text": " Let's move to measures of variation. It's another type of measures. It's called measures of variation, sometimes called measures of spread. Now, variation can be computed by using range, variance, standard deviation, and coefficient of variation.", "tokens": [961, 311, 1286, 281, 8000, 295, 12990, 13, 467, 311, 1071, 2010, 295, 8000, 13, 467, 311, 1219, 8000, 295, 12990, 11, 2171, 1219, 8000, 295, 3974, 13, 823, 11, 12990, 393, 312, 40610, 538, 1228, 3613, 11, 21977, 11, 3832, 25163, 11, 293, 17619, 295, 12990, 13], "avg_logprob": -0.18223852040816327, "compression_ratio": 1.75177304964539, "no_speech_prob": 0.0, "words": [{"start": 2613.21, "end": 2613.61, "word": " Let's", "probability": 0.7349853515625}, {"start": 2613.61, "end": 2613.99, "word": " move", "probability": 0.93994140625}, {"start": 2613.99, "end": 2616.85, "word": " to", "probability": 0.89892578125}, {"start": 2616.85, "end": 2617.47, "word": " measures", "probability": 0.712890625}, {"start": 2617.47, "end": 2618.27, "word": " of", "probability": 0.96728515625}, {"start": 2618.27, "end": 2619.03, "word": " variation.", "probability": 0.8662109375}, {"start": 2619.71, "end": 2619.85, "word": " It's", "probability": 0.6405029296875}, {"start": 2619.85, "end": 2620.23, "word": " another", "probability": 0.93115234375}, {"start": 2620.23, "end": 2620.75, "word": " type", "probability": 0.974609375}, {"start": 2620.75, "end": 2622.11, "word": " of", "probability": 0.953125}, {"start": 2622.11, "end": 2622.45, "word": " measures.", "probability": 0.57421875}, {"start": 2622.65, "end": 2622.71, "word": " It's", "probability": 0.931396484375}, {"start": 2622.71, "end": 2622.93, "word": " called", "probability": 0.8583984375}, {"start": 2622.93, "end": 2623.29, "word": " measures", "probability": 0.7587890625}, {"start": 2623.29, "end": 2623.75, "word": " of", "probability": 0.96533203125}, {"start": 2623.75, "end": 2624.95, "word": " variation,", "probability": 0.9189453125}, {"start": 2625.53, "end": 2625.95, "word": " sometimes", "probability": 0.94189453125}, {"start": 2625.95, "end": 2626.35, "word": " called", "probability": 0.78857421875}, {"start": 2626.35, "end": 2626.67, "word": " measures", "probability": 0.80126953125}, {"start": 2626.67, "end": 2627.01, "word": " of", "probability": 0.96484375}, {"start": 2627.01, "end": 2627.49, "word": " spread.", "probability": 0.89794921875}, {"start": 2630.49, "end": 2630.79, "word": " Now,", "probability": 0.9208984375}, {"start": 2630.91, "end": 2631.29, "word": " variation", "probability": 0.8779296875}, {"start": 2631.29, "end": 2631.61, "word": " can", "probability": 0.94482421875}, {"start": 2631.61, "end": 2631.79, "word": " be", "probability": 0.95947265625}, {"start": 2631.79, "end": 2632.21, "word": " computed", "probability": 0.9345703125}, {"start": 2632.21, "end": 2632.51, "word": " by", "probability": 0.9609375}, {"start": 2632.51, "end": 2632.89, "word": " using", "probability": 0.94189453125}, {"start": 2632.89, "end": 2633.85, "word": " range,", "probability": 0.8125}, {"start": 2635.59, "end": 2636.25, "word": " variance,", "probability": 0.92236328125}, {"start": 2637.67, "end": 2637.97, "word": " standard", "probability": 0.92822265625}, {"start": 2637.97, "end": 2638.41, "word": " deviation,", "probability": 0.92919921875}, {"start": 2638.99, "end": 2639.35, "word": " and", "probability": 0.9423828125}, {"start": 2639.35, "end": 2639.87, "word": " coefficient", "probability": 0.9150390625}, {"start": 2639.87, "end": 2640.85, "word": " of", "probability": 0.96875}, {"start": 2640.85, "end": 2641.87, "word": " variation.", "probability": 0.85498046875}], "temperature": 1.0}, {"id": 97, "seek": 267121, "start": 2642.27, "end": 2671.21, "text": " So we have four types, range, variance, standard deviation, and coefficient of variation. Now, measures of variation give information on the spread. Now, this is the first difference between central tendency measures and measures of variation. That one measures the central value or the value in the middle. Here, it measures the spread.", "tokens": [407, 321, 362, 1451, 3467, 11, 3613, 11, 21977, 11, 3832, 25163, 11, 293, 17619, 295, 12990, 13, 823, 11, 8000, 295, 12990, 976, 1589, 322, 264, 3974, 13, 823, 11, 341, 307, 264, 700, 2649, 1296, 5777, 18187, 8000, 293, 8000, 295, 12990, 13, 663, 472, 8000, 264, 5777, 2158, 420, 264, 2158, 294, 264, 2808, 13, 1692, 11, 309, 8000, 264, 3974, 13], "avg_logprob": -0.14618844696969696, "compression_ratio": 1.7883597883597884, "no_speech_prob": 0.0, "words": [{"start": 2642.27, "end": 2642.55, "word": " So", "probability": 0.869140625}, {"start": 2642.55, "end": 2642.69, "word": " we", "probability": 0.75}, {"start": 2642.69, "end": 2642.99, "word": " have", "probability": 0.95068359375}, {"start": 2642.99, "end": 2644.23, "word": " four", "probability": 0.865234375}, {"start": 2644.23, "end": 2645.79, "word": " types,", "probability": 0.818359375}, {"start": 2646.07, "end": 2646.51, "word": " range,", "probability": 0.79541015625}, {"start": 2647.75, "end": 2648.43, "word": " variance,", "probability": 0.859375}, {"start": 2649.25, "end": 2649.57, "word": " standard", "probability": 0.92578125}, {"start": 2649.57, "end": 2649.99, "word": " deviation,", "probability": 0.94384765625}, {"start": 2650.21, "end": 2650.37, "word": " and", "probability": 0.94189453125}, {"start": 2650.37, "end": 2650.79, "word": " coefficient", "probability": 0.91796875}, {"start": 2650.79, "end": 2651.45, "word": " of", "probability": 0.9677734375}, {"start": 2651.45, "end": 2652.05, "word": " variation.", "probability": 0.87744140625}, {"start": 2653.71, "end": 2654.17, "word": " Now,", "probability": 0.9462890625}, {"start": 2654.21, "end": 2654.45, "word": " measures", "probability": 0.76318359375}, {"start": 2654.45, "end": 2654.65, "word": " of", "probability": 0.96240234375}, {"start": 2654.65, "end": 2654.99, "word": " variation", "probability": 0.89208984375}, {"start": 2654.99, "end": 2655.25, "word": " give", "probability": 0.8125}, {"start": 2655.25, "end": 2655.75, "word": " information", "probability": 0.8486328125}, {"start": 2655.75, "end": 2655.97, "word": " on", "probability": 0.81396484375}, {"start": 2655.97, "end": 2656.15, "word": " the", "probability": 0.85791015625}, {"start": 2656.15, "end": 2656.47, "word": " spread.", "probability": 0.919921875}, {"start": 2657.27, "end": 2657.57, "word": " Now,", "probability": 0.91552734375}, {"start": 2657.59, "end": 2657.77, "word": " this", "probability": 0.9453125}, {"start": 2657.77, "end": 2657.89, "word": " is", "probability": 0.9453125}, {"start": 2657.89, "end": 2658.05, "word": " the", "probability": 0.923828125}, {"start": 2658.05, "end": 2658.31, "word": " first", "probability": 0.86279296875}, {"start": 2658.31, "end": 2658.83, "word": " difference", "probability": 0.86962890625}, {"start": 2658.83, "end": 2659.41, "word": " between", "probability": 0.876953125}, {"start": 2659.41, "end": 2660.95, "word": " central", "probability": 0.91064453125}, {"start": 2660.95, "end": 2661.53, "word": " tendency", "probability": 0.93212890625}, {"start": 2661.53, "end": 2661.97, "word": " measures", "probability": 0.8310546875}, {"start": 2661.97, "end": 2663.23, "word": " and", "probability": 0.912109375}, {"start": 2663.23, "end": 2664.01, "word": " measures", "probability": 0.85595703125}, {"start": 2664.01, "end": 2664.21, "word": " of", "probability": 0.96630859375}, {"start": 2664.21, "end": 2664.63, "word": " variation.", "probability": 0.89794921875}, {"start": 2665.11, "end": 2665.47, "word": " That", "probability": 0.91162109375}, {"start": 2665.47, "end": 2665.67, "word": " one", "probability": 0.8955078125}, {"start": 2665.67, "end": 2665.95, "word": " measures", "probability": 0.85009765625}, {"start": 2665.95, "end": 2666.33, "word": " the", "probability": 0.90771484375}, {"start": 2666.33, "end": 2666.93, "word": " central", "probability": 0.90576171875}, {"start": 2666.93, "end": 2667.41, "word": " value", "probability": 0.974609375}, {"start": 2667.41, "end": 2668.27, "word": " or", "probability": 0.734375}, {"start": 2668.27, "end": 2668.43, "word": " the", "probability": 0.919921875}, {"start": 2668.43, "end": 2668.65, "word": " value", "probability": 0.9755859375}, {"start": 2668.65, "end": 2668.79, "word": " in", "probability": 0.88623046875}, {"start": 2668.79, "end": 2668.89, "word": " the", "probability": 0.916015625}, {"start": 2668.89, "end": 2669.09, "word": " middle.", "probability": 0.9267578125}, {"start": 2669.51, "end": 2669.83, "word": " Here,", "probability": 0.85302734375}, {"start": 2669.91, "end": 2670.13, "word": " it", "probability": 0.9521484375}, {"start": 2670.13, "end": 2670.43, "word": " measures", "probability": 0.8486328125}, {"start": 2670.43, "end": 2670.79, "word": " the", "probability": 0.90966796875}, {"start": 2670.79, "end": 2671.21, "word": " spread.", "probability": 0.92333984375}], "temperature": 1.0}, {"id": 98, "seek": 269969, "start": 2671.55, "end": 2699.69, "text": " Or variability. Or dispersion of the data. Do you know what is dispersion? Dispersion. Tabaad. So major variation given formation with the spread. Spread or variation or dispersion of the data values. Now if you look at these two bell shapes. Both have the same center. The center I mean the value in the middle.", "tokens": [1610, 35709, 13, 1610, 24631, 313, 295, 264, 1412, 13, 1144, 291, 458, 437, 307, 24631, 313, 30, 4208, 21819, 313, 13, 14106, 64, 345, 13, 407, 2563, 12990, 2212, 11723, 365, 264, 3974, 13, 30308, 420, 12990, 420, 24631, 313, 295, 264, 1412, 4190, 13, 823, 498, 291, 574, 412, 613, 732, 4549, 10854, 13, 6767, 362, 264, 912, 3056, 13, 440, 3056, 286, 914, 264, 2158, 294, 264, 2808, 13], "avg_logprob": -0.2724743118024852, "compression_ratio": 1.7197802197802199, "no_speech_prob": 0.0, "words": [{"start": 2671.55, "end": 2671.97, "word": " Or", "probability": 0.52783203125}, {"start": 2671.97, "end": 2672.57, "word": " variability.", "probability": 0.95361328125}, {"start": 2674.13, "end": 2674.71, "word": " Or", "probability": 0.9716796875}, {"start": 2674.71, "end": 2675.45, "word": " dispersion", "probability": 0.9423828125}, {"start": 2675.45, "end": 2675.89, "word": " of", "probability": 0.9609375}, {"start": 2675.89, "end": 2676.01, "word": " the", "probability": 0.9150390625}, {"start": 2676.01, "end": 2676.31, "word": " data.", "probability": 0.94677734375}, {"start": 2676.45, "end": 2676.57, "word": " Do", "probability": 0.437744140625}, {"start": 2676.57, "end": 2676.57, "word": " you", "probability": 0.97021484375}, {"start": 2676.57, "end": 2676.63, "word": " know", "probability": 0.74609375}, {"start": 2676.63, "end": 2676.69, "word": " what", "probability": 0.69140625}, {"start": 2676.69, "end": 2676.85, "word": " is", "probability": 0.63232421875}, {"start": 2676.85, "end": 2677.39, "word": " dispersion?", "probability": 0.900390625}, {"start": 2679.73, "end": 2680.31, "word": " Dispersion.", "probability": 0.89306640625}, {"start": 2680.63, "end": 2681.13, "word": " Tabaad.", "probability": 0.2904866536458333}, {"start": 2682.93, "end": 2683.51, "word": " So", "probability": 0.91162109375}, {"start": 2683.51, "end": 2683.91, "word": " major", "probability": 0.1849365234375}, {"start": 2683.91, "end": 2684.55, "word": " variation", "probability": 0.63134765625}, {"start": 2684.55, "end": 2684.89, "word": " given", "probability": 0.5107421875}, {"start": 2684.89, "end": 2685.39, "word": " formation", "probability": 0.91943359375}, {"start": 2685.39, "end": 2685.59, "word": " with", "probability": 0.578125}, {"start": 2685.59, "end": 2685.73, "word": " the", "probability": 0.5087890625}, {"start": 2685.73, "end": 2686.09, "word": " spread.", "probability": 0.853515625}, {"start": 2686.27, "end": 2686.71, "word": " Spread", "probability": 0.9716796875}, {"start": 2686.71, "end": 2686.99, "word": " or", "probability": 0.84228515625}, {"start": 2686.99, "end": 2687.47, "word": " variation", "probability": 0.84375}, {"start": 2687.47, "end": 2687.71, "word": " or", "probability": 0.75244140625}, {"start": 2687.71, "end": 2688.21, "word": " dispersion", "probability": 0.97265625}, {"start": 2688.21, "end": 2688.35, "word": " of", "probability": 0.96240234375}, {"start": 2688.35, "end": 2688.47, "word": " the", "probability": 0.90283203125}, {"start": 2688.47, "end": 2688.77, "word": " data", "probability": 0.95703125}, {"start": 2688.77, "end": 2689.83, "word": " values.", "probability": 0.65869140625}, {"start": 2690.07, "end": 2690.23, "word": " Now", "probability": 0.94287109375}, {"start": 2690.23, "end": 2690.35, "word": " if", "probability": 0.67333984375}, {"start": 2690.35, "end": 2690.41, "word": " you", "probability": 0.91796875}, {"start": 2690.41, "end": 2690.55, "word": " look", "probability": 0.96484375}, {"start": 2690.55, "end": 2690.69, "word": " at", "probability": 0.966796875}, {"start": 2690.69, "end": 2690.93, "word": " these", "probability": 0.83984375}, {"start": 2690.93, "end": 2691.37, "word": " two", "probability": 0.923828125}, {"start": 2691.37, "end": 2692.25, "word": " bell", "probability": 0.71142578125}, {"start": 2692.25, "end": 2692.65, "word": " shapes.", "probability": 0.56689453125}, {"start": 2695.67, "end": 2696.25, "word": " Both", "probability": 0.83642578125}, {"start": 2696.25, "end": 2696.49, "word": " have", "probability": 0.9453125}, {"start": 2696.49, "end": 2696.65, "word": " the", "probability": 0.91796875}, {"start": 2696.65, "end": 2696.87, "word": " same", "probability": 0.89990234375}, {"start": 2696.87, "end": 2697.23, "word": " center.", "probability": 0.87548828125}, {"start": 2698.21, "end": 2698.57, "word": " The", "probability": 0.89404296875}, {"start": 2698.57, "end": 2698.83, "word": " center", "probability": 0.89599609375}, {"start": 2698.83, "end": 2698.95, "word": " I", "probability": 0.1990966796875}, {"start": 2698.95, "end": 2699.05, "word": " mean", "probability": 0.95458984375}, {"start": 2699.05, "end": 2699.17, "word": " the", "probability": 0.81640625}, {"start": 2699.17, "end": 2699.31, "word": " value", "probability": 0.91943359375}, {"start": 2699.31, "end": 2699.43, "word": " in", "probability": 0.85546875}, {"start": 2699.43, "end": 2699.53, "word": " the", "probability": 0.91796875}, {"start": 2699.53, "end": 2699.69, "word": " middle.", "probability": 0.95556640625}], "temperature": 1.0}, {"id": 99, "seek": 272837, "start": 2700.17, "end": 2728.37, "text": " So the value in the middle here for figure graph number one is the same as the value for the other graph. So both graphs have the same center. But if you look at the spread, you will see that figure A is less spread than figure B. Now if you look at this one, the spread here,", "tokens": [407, 264, 2158, 294, 264, 2808, 510, 337, 2573, 4295, 1230, 472, 307, 264, 912, 382, 264, 2158, 337, 264, 661, 4295, 13, 407, 1293, 24877, 362, 264, 912, 3056, 13, 583, 498, 291, 574, 412, 264, 3974, 11, 291, 486, 536, 300, 2573, 316, 307, 1570, 3974, 813, 2573, 363, 13, 823, 498, 291, 574, 412, 341, 472, 11, 264, 3974, 510, 11], "avg_logprob": -0.15264423076923078, "compression_ratio": 1.720496894409938, "no_speech_prob": 0.0, "words": [{"start": 2700.17, "end": 2700.69, "word": " So", "probability": 0.8623046875}, {"start": 2700.69, "end": 2701.05, "word": " the", "probability": 0.65966796875}, {"start": 2701.05, "end": 2701.25, "word": " value", "probability": 0.97314453125}, {"start": 2701.25, "end": 2701.39, "word": " in", "probability": 0.93896484375}, {"start": 2701.39, "end": 2701.51, "word": " the", "probability": 0.919921875}, {"start": 2701.51, "end": 2701.73, "word": " middle", "probability": 0.95458984375}, {"start": 2701.73, "end": 2702.11, "word": " here", "probability": 0.8515625}, {"start": 2702.11, "end": 2703.83, "word": " for", "probability": 0.7314453125}, {"start": 2703.83, "end": 2706.99, "word": " figure", "probability": 0.53955078125}, {"start": 2706.99, "end": 2707.47, "word": " graph", "probability": 0.8818359375}, {"start": 2707.47, "end": 2707.75, "word": " number", "probability": 0.90380859375}, {"start": 2707.75, "end": 2708.09, "word": " one", "probability": 0.6796875}, {"start": 2708.09, "end": 2708.79, "word": " is", "probability": 0.87158203125}, {"start": 2708.79, "end": 2708.93, "word": " the", "probability": 0.92138671875}, {"start": 2708.93, "end": 2709.17, "word": " same", "probability": 0.90087890625}, {"start": 2709.17, "end": 2709.49, "word": " as", "probability": 0.95654296875}, {"start": 2709.49, "end": 2709.65, "word": " the", "probability": 0.921875}, {"start": 2709.65, "end": 2709.85, "word": " value", "probability": 0.97607421875}, {"start": 2709.85, "end": 2710.03, "word": " for", "probability": 0.73828125}, {"start": 2710.03, "end": 2710.15, "word": " the", "probability": 0.9169921875}, {"start": 2710.15, "end": 2710.39, "word": " other", "probability": 0.9013671875}, {"start": 2710.39, "end": 2710.63, "word": " graph.", "probability": 0.8994140625}, {"start": 2711.19, "end": 2711.47, "word": " So", "probability": 0.7958984375}, {"start": 2711.47, "end": 2712.33, "word": " both", "probability": 0.82958984375}, {"start": 2712.33, "end": 2712.91, "word": " graphs", "probability": 0.923828125}, {"start": 2712.91, "end": 2713.41, "word": " have", "probability": 0.94140625}, {"start": 2713.41, "end": 2714.17, "word": " the", "probability": 0.904296875}, {"start": 2714.17, "end": 2714.67, "word": " same", "probability": 0.90771484375}, {"start": 2714.67, "end": 2716.27, "word": " center.", "probability": 0.837890625}, {"start": 2717.43, "end": 2717.99, "word": " But", "probability": 0.908203125}, {"start": 2717.99, "end": 2718.11, "word": " if", "probability": 0.3994140625}, {"start": 2718.11, "end": 2718.13, "word": " you", "probability": 0.94140625}, {"start": 2718.13, "end": 2718.27, "word": " look", "probability": 0.962890625}, {"start": 2718.27, "end": 2718.41, "word": " at", "probability": 0.96533203125}, {"start": 2718.41, "end": 2718.57, "word": " the", "probability": 0.7470703125}, {"start": 2718.57, "end": 2718.93, "word": " spread,", "probability": 0.904296875}, {"start": 2719.75, "end": 2719.93, "word": " you", "probability": 0.95263671875}, {"start": 2719.93, "end": 2720.09, "word": " will", "probability": 0.87890625}, {"start": 2720.09, "end": 2720.31, "word": " see", "probability": 0.92626953125}, {"start": 2720.31, "end": 2720.67, "word": " that", "probability": 0.90625}, {"start": 2720.67, "end": 2721.37, "word": " figure", "probability": 0.94091796875}, {"start": 2721.37, "end": 2721.71, "word": " A", "probability": 0.79931640625}, {"start": 2721.71, "end": 2723.07, "word": " is", "probability": 0.90673828125}, {"start": 2723.07, "end": 2723.53, "word": " less", "probability": 0.94140625}, {"start": 2723.53, "end": 2724.19, "word": " spread", "probability": 0.93408203125}, {"start": 2724.19, "end": 2724.89, "word": " than", "probability": 0.93017578125}, {"start": 2724.89, "end": 2725.15, "word": " figure", "probability": 0.9658203125}, {"start": 2725.15, "end": 2725.41, "word": " B.", "probability": 0.67138671875}, {"start": 2725.83, "end": 2726.05, "word": " Now", "probability": 0.8994140625}, {"start": 2726.05, "end": 2726.19, "word": " if", "probability": 0.6279296875}, {"start": 2726.19, "end": 2726.23, "word": " you", "probability": 0.93359375}, {"start": 2726.23, "end": 2726.39, "word": " look", "probability": 0.96044921875}, {"start": 2726.39, "end": 2726.55, "word": " at", "probability": 0.96337890625}, {"start": 2726.55, "end": 2726.75, "word": " this", "probability": 0.87451171875}, {"start": 2726.75, "end": 2726.93, "word": " one,", "probability": 0.92529296875}, {"start": 2727.15, "end": 2727.33, "word": " the", "probability": 0.72265625}, {"start": 2727.33, "end": 2728.01, "word": " spread", "probability": 0.90283203125}, {"start": 2728.01, "end": 2728.37, "word": " here,", "probability": 0.85498046875}], "temperature": 1.0}, {"id": 100, "seek": 275650, "start": 2728.82, "end": 2756.5, "text": " is much less than the other one. Even they have the same center, the same mean, but figure A is more spread than figure B. It means that the variation in A is much less than the variation in figure B. So it means that the mean is not sufficient to describe your data.", "tokens": [307, 709, 1570, 813, 264, 661, 472, 13, 2754, 436, 362, 264, 912, 3056, 11, 264, 912, 914, 11, 457, 2573, 316, 307, 544, 3974, 813, 2573, 363, 13, 467, 1355, 300, 264, 12990, 294, 316, 307, 709, 1570, 813, 264, 12990, 294, 2573, 363, 13, 407, 309, 1355, 300, 264, 914, 307, 406, 11563, 281, 6786, 428, 1412, 13], "avg_logprob": -0.12282274785589, "compression_ratio": 1.763157894736842, "no_speech_prob": 0.0, "words": [{"start": 2728.82, "end": 2729.18, "word": " is", "probability": 0.63720703125}, {"start": 2729.18, "end": 2729.4, "word": " much", "probability": 0.89697265625}, {"start": 2729.4, "end": 2729.72, "word": " less", "probability": 0.9306640625}, {"start": 2729.72, "end": 2730.06, "word": " than", "probability": 0.93994140625}, {"start": 2730.06, "end": 2731.02, "word": " the", "probability": 0.833984375}, {"start": 2731.02, "end": 2731.26, "word": " other", "probability": 0.88525390625}, {"start": 2731.26, "end": 2731.54, "word": " one.", "probability": 0.9287109375}, {"start": 2732.62, "end": 2733.06, "word": " Even", "probability": 0.78759765625}, {"start": 2733.06, "end": 2733.46, "word": " they", "probability": 0.5498046875}, {"start": 2733.46, "end": 2733.64, "word": " have", "probability": 0.94873046875}, {"start": 2733.64, "end": 2733.84, "word": " the", "probability": 0.91357421875}, {"start": 2733.84, "end": 2734.12, "word": " same", "probability": 0.9052734375}, {"start": 2734.12, "end": 2734.56, "word": " center,", "probability": 0.77001953125}, {"start": 2735.12, "end": 2735.28, "word": " the", "probability": 0.89208984375}, {"start": 2735.28, "end": 2735.54, "word": " same", "probability": 0.90673828125}, {"start": 2735.54, "end": 2735.8, "word": " mean,", "probability": 0.96044921875}, {"start": 2736.3, "end": 2736.66, "word": " but", "probability": 0.92041015625}, {"start": 2736.66, "end": 2737.74, "word": " figure", "probability": 0.79052734375}, {"start": 2737.74, "end": 2738.04, "word": " A", "probability": 0.66552734375}, {"start": 2738.04, "end": 2738.44, "word": " is", "probability": 0.95068359375}, {"start": 2738.44, "end": 2738.68, "word": " more", "probability": 0.931640625}, {"start": 2738.68, "end": 2739.26, "word": " spread", "probability": 0.923828125}, {"start": 2739.26, "end": 2740.64, "word": " than", "probability": 0.8955078125}, {"start": 2740.64, "end": 2740.9, "word": " figure", "probability": 0.93798828125}, {"start": 2740.9, "end": 2741.06, "word": " B.", "probability": 0.9921875}, {"start": 2741.16, "end": 2741.22, "word": " It", "probability": 0.91552734375}, {"start": 2741.22, "end": 2741.54, "word": " means", "probability": 0.92626953125}, {"start": 2741.54, "end": 2742.2, "word": " that", "probability": 0.91845703125}, {"start": 2742.2, "end": 2742.4, "word": " the", "probability": 0.90869140625}, {"start": 2742.4, "end": 2742.88, "word": " variation", "probability": 0.9169921875}, {"start": 2742.88, "end": 2744.26, "word": " in", "probability": 0.9404296875}, {"start": 2744.26, "end": 2744.6, "word": " A", "probability": 0.974609375}, {"start": 2744.6, "end": 2745.14, "word": " is", "probability": 0.9404296875}, {"start": 2745.14, "end": 2745.6, "word": " much", "probability": 0.90625}, {"start": 2745.6, "end": 2746.44, "word": " less", "probability": 0.94580078125}, {"start": 2746.44, "end": 2746.9, "word": " than", "probability": 0.94384765625}, {"start": 2746.9, "end": 2747.14, "word": " the", "probability": 0.9072265625}, {"start": 2747.14, "end": 2747.56, "word": " variation", "probability": 0.90576171875}, {"start": 2747.56, "end": 2748.26, "word": " in", "probability": 0.93701171875}, {"start": 2748.26, "end": 2748.5, "word": " figure", "probability": 0.9345703125}, {"start": 2748.5, "end": 2748.84, "word": " B.", "probability": 0.99658203125}, {"start": 2749.56, "end": 2749.78, "word": " So", "probability": 0.921875}, {"start": 2749.78, "end": 2749.92, "word": " it", "probability": 0.7890625}, {"start": 2749.92, "end": 2750.34, "word": " means", "probability": 0.9296875}, {"start": 2750.34, "end": 2750.82, "word": " that", "probability": 0.93359375}, {"start": 2750.82, "end": 2751.52, "word": " the", "probability": 0.91064453125}, {"start": 2751.52, "end": 2751.78, "word": " mean", "probability": 0.9697265625}, {"start": 2751.78, "end": 2752.2, "word": " is", "probability": 0.9541015625}, {"start": 2752.2, "end": 2752.5, "word": " not", "probability": 0.9482421875}, {"start": 2752.5, "end": 2753.14, "word": " sufficient", "probability": 0.869140625}, {"start": 2753.14, "end": 2755.48, "word": " to", "probability": 0.95556640625}, {"start": 2755.48, "end": 2755.96, "word": " describe", "probability": 0.86572265625}, {"start": 2755.96, "end": 2756.18, "word": " your", "probability": 0.888671875}, {"start": 2756.18, "end": 2756.5, "word": " data.", "probability": 0.9482421875}], "temperature": 1.0}, {"id": 101, "seek": 278455, "start": 2757.05, "end": 2784.55, "text": " Because maybe you have two datasets and both have the same mean, but the spread or the variation is completely different. Again, maybe we have two classes of statistics, class A and class B. The center or the mean or the average is the same for each one. For example, maybe the average of this class is 70. The average of class B is also 70. But the scores are scattered.", "tokens": [1436, 1310, 291, 362, 732, 42856, 293, 1293, 362, 264, 912, 914, 11, 457, 264, 3974, 420, 264, 12990, 307, 2584, 819, 13, 3764, 11, 1310, 321, 362, 732, 5359, 295, 12523, 11, 1508, 316, 293, 1508, 363, 13, 440, 3056, 420, 264, 914, 420, 264, 4274, 307, 264, 912, 337, 1184, 472, 13, 1171, 1365, 11, 1310, 264, 4274, 295, 341, 1508, 307, 5285, 13, 440, 4274, 295, 1508, 363, 307, 611, 5285, 13, 583, 264, 13444, 366, 21986, 13], "avg_logprob": -0.1271913115207742, "compression_ratio": 1.7799043062200957, "no_speech_prob": 0.0, "words": [{"start": 2757.05, "end": 2757.49, "word": " Because", "probability": 0.6611328125}, {"start": 2757.49, "end": 2757.85, "word": " maybe", "probability": 0.939453125}, {"start": 2757.85, "end": 2758.09, "word": " you", "probability": 0.95458984375}, {"start": 2758.09, "end": 2758.29, "word": " have", "probability": 0.94970703125}, {"start": 2758.29, "end": 2758.53, "word": " two", "probability": 0.92236328125}, {"start": 2758.53, "end": 2758.95, "word": " datasets", "probability": 0.76025390625}, {"start": 2758.95, "end": 2759.97, "word": " and", "probability": 0.67578125}, {"start": 2759.97, "end": 2760.31, "word": " both", "probability": 0.888671875}, {"start": 2760.31, "end": 2760.53, "word": " have", "probability": 0.94677734375}, {"start": 2760.53, "end": 2760.69, "word": " the", "probability": 0.916015625}, {"start": 2760.69, "end": 2760.95, "word": " same", "probability": 0.91259765625}, {"start": 2760.95, "end": 2761.23, "word": " mean,", "probability": 0.9677734375}, {"start": 2761.59, "end": 2762.03, "word": " but", "probability": 0.92578125}, {"start": 2762.03, "end": 2762.59, "word": " the", "probability": 0.91064453125}, {"start": 2762.59, "end": 2763.01, "word": " spread", "probability": 0.90771484375}, {"start": 2763.01, "end": 2763.23, "word": " or", "probability": 0.93505859375}, {"start": 2763.23, "end": 2763.33, "word": " the", "probability": 0.91162109375}, {"start": 2763.33, "end": 2763.69, "word": " variation", "probability": 0.91064453125}, {"start": 2763.69, "end": 2764.07, "word": " is", "probability": 0.94970703125}, {"start": 2764.07, "end": 2764.49, "word": " completely", "probability": 0.75927734375}, {"start": 2764.49, "end": 2764.91, "word": " different.", "probability": 0.88232421875}, {"start": 2766.05, "end": 2766.39, "word": " Again,", "probability": 0.82177734375}, {"start": 2766.91, "end": 2767.17, "word": " maybe", "probability": 0.94091796875}, {"start": 2767.17, "end": 2767.35, "word": " we", "probability": 0.95849609375}, {"start": 2767.35, "end": 2767.51, "word": " have", "probability": 0.94384765625}, {"start": 2767.51, "end": 2767.67, "word": " two", "probability": 0.93798828125}, {"start": 2767.67, "end": 2768.07, "word": " classes", "probability": 0.9140625}, {"start": 2768.07, "end": 2768.45, "word": " of", "probability": 0.958984375}, {"start": 2768.45, "end": 2769.01, "word": " statistics,", "probability": 0.8798828125}, {"start": 2769.57, "end": 2769.69, "word": " class", "probability": 0.62646484375}, {"start": 2769.69, "end": 2769.85, "word": " A", "probability": 0.89892578125}, {"start": 2769.85, "end": 2769.99, "word": " and", "probability": 0.93359375}, {"start": 2769.99, "end": 2770.25, "word": " class", "probability": 0.916015625}, {"start": 2770.25, "end": 2770.51, "word": " B.", "probability": 0.99853515625}, {"start": 2770.99, "end": 2771.23, "word": " The", "probability": 0.89208984375}, {"start": 2771.23, "end": 2771.59, "word": " center", "probability": 0.8134765625}, {"start": 2771.59, "end": 2771.89, "word": " or", "probability": 0.8486328125}, {"start": 2771.89, "end": 2772.01, "word": " the", "probability": 0.931640625}, {"start": 2772.01, "end": 2772.23, "word": " mean", "probability": 0.978515625}, {"start": 2772.23, "end": 2772.39, "word": " or", "probability": 0.890625}, {"start": 2772.39, "end": 2772.51, "word": " the", "probability": 0.92138671875}, {"start": 2772.51, "end": 2772.81, "word": " average", "probability": 0.80029296875}, {"start": 2772.81, "end": 2773.05, "word": " is", "probability": 0.9423828125}, {"start": 2773.05, "end": 2773.23, "word": " the", "probability": 0.91552734375}, {"start": 2773.23, "end": 2773.43, "word": " same", "probability": 0.9091796875}, {"start": 2773.43, "end": 2773.63, "word": " for", "probability": 0.94873046875}, {"start": 2773.63, "end": 2773.81, "word": " each", "probability": 0.93701171875}, {"start": 2773.81, "end": 2774.01, "word": " one.", "probability": 0.92724609375}, {"start": 2774.61, "end": 2774.93, "word": " For", "probability": 0.962890625}, {"start": 2774.93, "end": 2775.21, "word": " example,", "probability": 0.9765625}, {"start": 2775.61, "end": 2775.77, "word": " maybe", "probability": 0.8330078125}, {"start": 2775.77, "end": 2775.93, "word": " the", "probability": 0.916015625}, {"start": 2775.93, "end": 2776.15, "word": " average", "probability": 0.79150390625}, {"start": 2776.15, "end": 2776.33, "word": " of", "probability": 0.86376953125}, {"start": 2776.33, "end": 2776.55, "word": " this", "probability": 0.935546875}, {"start": 2776.55, "end": 2776.89, "word": " class", "probability": 0.97265625}, {"start": 2776.89, "end": 2777.11, "word": " is", "probability": 0.94384765625}, {"start": 2777.11, "end": 2777.47, "word": " 70.", "probability": 0.96484375}, {"start": 2778.45, "end": 2778.77, "word": " The", "probability": 0.88818359375}, {"start": 2778.77, "end": 2779.05, "word": " average", "probability": 0.77685546875}, {"start": 2779.05, "end": 2779.21, "word": " of", "probability": 0.9599609375}, {"start": 2779.21, "end": 2779.47, "word": " class", "probability": 0.94091796875}, {"start": 2779.47, "end": 2779.65, "word": " B", "probability": 0.99169921875}, {"start": 2779.65, "end": 2779.81, "word": " is", "probability": 0.94921875}, {"start": 2779.81, "end": 2780.11, "word": " also", "probability": 0.87451171875}, {"start": 2780.11, "end": 2780.51, "word": " 70.", "probability": 0.97802734375}, {"start": 2781.91, "end": 2782.47, "word": " But", "probability": 0.94970703125}, {"start": 2782.47, "end": 2782.69, "word": " the", "probability": 0.904296875}, {"start": 2782.69, "end": 2783.29, "word": " scores", "probability": 0.81201171875}, {"start": 2783.29, "end": 2783.95, "word": " are", "probability": 0.94580078125}, {"start": 2783.95, "end": 2784.55, "word": " scattered.", "probability": 0.84326171875}], "temperature": 1.0}, {"id": 102, "seek": 280604, "start": 2785.84, "end": 2806.04, "text": " or spread out in class A maybe much more than in class B. So the mean is not sufficient to describe the data. You have to say that the mean equals such and such and the spread. And one of these measures we'll talk later about range and variance standard deviation. So I mean,", "tokens": [420, 3974, 484, 294, 1508, 316, 1310, 709, 544, 813, 294, 1508, 363, 13, 407, 264, 914, 307, 406, 11563, 281, 6786, 264, 1412, 13, 509, 362, 281, 584, 300, 264, 914, 6915, 1270, 293, 1270, 293, 264, 3974, 13, 400, 472, 295, 613, 8000, 321, 603, 751, 1780, 466, 3613, 293, 21977, 3832, 25163, 13, 407, 286, 914, 11], "avg_logprob": -0.17213114460960763, "compression_ratio": 1.5593220338983051, "no_speech_prob": 0.0, "words": [{"start": 2785.84, "end": 2786.12, "word": " or", "probability": 0.58740234375}, {"start": 2786.12, "end": 2786.64, "word": " spread", "probability": 0.90185546875}, {"start": 2786.64, "end": 2787.2, "word": " out", "probability": 0.88818359375}, {"start": 2787.2, "end": 2787.9, "word": " in", "probability": 0.8876953125}, {"start": 2787.9, "end": 2788.3, "word": " class", "probability": 0.53271484375}, {"start": 2788.3, "end": 2788.66, "word": " A", "probability": 0.873046875}, {"start": 2788.66, "end": 2789.16, "word": " maybe", "probability": 0.310791015625}, {"start": 2789.16, "end": 2789.72, "word": " much", "probability": 0.86572265625}, {"start": 2789.72, "end": 2790.0, "word": " more", "probability": 0.91748046875}, {"start": 2790.0, "end": 2790.34, "word": " than", "probability": 0.89013671875}, {"start": 2790.34, "end": 2791.0, "word": " in", "probability": 0.86181640625}, {"start": 2791.0, "end": 2791.26, "word": " class", "probability": 0.87646484375}, {"start": 2791.26, "end": 2791.52, "word": " B.", "probability": 0.9970703125}, {"start": 2792.24, "end": 2792.58, "word": " So", "probability": 0.904296875}, {"start": 2792.58, "end": 2792.8, "word": " the", "probability": 0.69140625}, {"start": 2792.8, "end": 2792.92, "word": " mean", "probability": 0.96533203125}, {"start": 2792.92, "end": 2793.04, "word": " is", "probability": 0.947265625}, {"start": 2793.04, "end": 2793.18, "word": " not", "probability": 0.94775390625}, {"start": 2793.18, "end": 2793.6, "word": " sufficient", "probability": 0.857421875}, {"start": 2793.6, "end": 2793.76, "word": " to", "probability": 0.89013671875}, {"start": 2793.76, "end": 2793.96, "word": " describe", "probability": 0.91064453125}, {"start": 2793.96, "end": 2794.12, "word": " the", "probability": 0.8447265625}, {"start": 2794.12, "end": 2794.28, "word": " data.", "probability": 0.947265625}, {"start": 2794.36, "end": 2794.42, "word": " You", "probability": 0.94189453125}, {"start": 2794.42, "end": 2794.54, "word": " have", "probability": 0.9453125}, {"start": 2794.54, "end": 2794.66, "word": " to", "probability": 0.96923828125}, {"start": 2794.66, "end": 2794.82, "word": " say", "probability": 0.94140625}, {"start": 2794.82, "end": 2795.1, "word": " that", "probability": 0.92333984375}, {"start": 2795.1, "end": 2795.64, "word": " the", "probability": 0.90087890625}, {"start": 2795.64, "end": 2795.86, "word": " mean", "probability": 0.96435546875}, {"start": 2795.86, "end": 2796.42, "word": " equals", "probability": 0.85205078125}, {"start": 2796.42, "end": 2796.66, "word": " such", "probability": 0.943359375}, {"start": 2796.66, "end": 2796.8, "word": " and", "probability": 0.8232421875}, {"start": 2796.8, "end": 2797.1, "word": " such", "probability": 0.9521484375}, {"start": 2797.1, "end": 2798.36, "word": " and", "probability": 0.7060546875}, {"start": 2798.36, "end": 2798.64, "word": " the", "probability": 0.888671875}, {"start": 2798.64, "end": 2798.96, "word": " spread.", "probability": 0.91943359375}, {"start": 2799.78, "end": 2800.0, "word": " And", "probability": 0.84814453125}, {"start": 2800.0, "end": 2800.14, "word": " one", "probability": 0.921875}, {"start": 2800.14, "end": 2800.26, "word": " of", "probability": 0.9677734375}, {"start": 2800.26, "end": 2800.46, "word": " these", "probability": 0.8447265625}, {"start": 2800.46, "end": 2800.74, "word": " measures", "probability": 0.8486328125}, {"start": 2800.74, "end": 2801.0, "word": " we'll", "probability": 0.629150390625}, {"start": 2801.0, "end": 2801.22, "word": " talk", "probability": 0.896484375}, {"start": 2801.22, "end": 2801.56, "word": " later", "probability": 0.91845703125}, {"start": 2801.56, "end": 2802.14, "word": " about", "probability": 0.8974609375}, {"start": 2802.14, "end": 2803.06, "word": " range", "probability": 0.75341796875}, {"start": 2803.06, "end": 2803.44, "word": " and", "probability": 0.92578125}, {"start": 2803.44, "end": 2803.94, "word": " variance", "probability": 0.91796875}, {"start": 2803.94, "end": 2804.5, "word": " standard", "probability": 0.82568359375}, {"start": 2804.5, "end": 2804.86, "word": " deviation.", "probability": 0.91552734375}, {"start": 2805.44, "end": 2805.7, "word": " So", "probability": 0.947265625}, {"start": 2805.7, "end": 2805.84, "word": " I", "probability": 0.869140625}, {"start": 2805.84, "end": 2806.04, "word": " mean,", "probability": 0.96484375}], "temperature": 1.0}, {"id": 103, "seek": 283209, "start": 2807.19, "end": 2832.09, "text": " The mean by itself is not sufficient to describe the data. You have to use something else to measure the variation or the spread of the data. Make sense? The first measure of variation, the easiest one, is called the range. The range is the simplest measure of variation.", "tokens": [440, 914, 538, 2564, 307, 406, 11563, 281, 6786, 264, 1412, 13, 509, 362, 281, 764, 746, 1646, 281, 3481, 264, 12990, 420, 264, 3974, 295, 264, 1412, 13, 4387, 2020, 30, 440, 700, 3481, 295, 12990, 11, 264, 12889, 472, 11, 307, 1219, 264, 3613, 13, 440, 3613, 307, 264, 22811, 3481, 295, 12990, 13], "avg_logprob": -0.12842653927050138, "compression_ratio": 1.6790123456790123, "no_speech_prob": 0.0, "words": [{"start": 2807.19, "end": 2807.49, "word": " The", "probability": 0.79638671875}, {"start": 2807.49, "end": 2807.67, "word": " mean", "probability": 0.96435546875}, {"start": 2807.67, "end": 2807.91, "word": " by", "probability": 0.9267578125}, {"start": 2807.91, "end": 2808.41, "word": " itself", "probability": 0.8515625}, {"start": 2808.41, "end": 2808.83, "word": " is", "probability": 0.93017578125}, {"start": 2808.83, "end": 2809.03, "word": " not", "probability": 0.9482421875}, {"start": 2809.03, "end": 2809.39, "word": " sufficient", "probability": 0.8427734375}, {"start": 2809.39, "end": 2809.77, "word": " to", "probability": 0.97021484375}, {"start": 2809.77, "end": 2810.21, "word": " describe", "probability": 0.90771484375}, {"start": 2810.21, "end": 2810.39, "word": " the", "probability": 0.89306640625}, {"start": 2810.39, "end": 2810.63, "word": " data.", "probability": 0.95068359375}, {"start": 2811.23, "end": 2811.33, "word": " You", "probability": 0.95751953125}, {"start": 2811.33, "end": 2811.57, "word": " have", "probability": 0.94775390625}, {"start": 2811.57, "end": 2811.69, "word": " to", "probability": 0.96875}, {"start": 2811.69, "end": 2811.89, "word": " use", "probability": 0.88232421875}, {"start": 2811.89, "end": 2812.29, "word": " something", "probability": 0.86328125}, {"start": 2812.29, "end": 2812.83, "word": " else", "probability": 0.9306640625}, {"start": 2812.83, "end": 2813.65, "word": " to", "probability": 0.953125}, {"start": 2813.65, "end": 2814.03, "word": " measure", "probability": 0.8818359375}, {"start": 2814.03, "end": 2814.77, "word": " the", "probability": 0.9052734375}, {"start": 2814.77, "end": 2815.19, "word": " variation", "probability": 0.93505859375}, {"start": 2815.19, "end": 2815.55, "word": " or", "probability": 0.89501953125}, {"start": 2815.55, "end": 2815.73, "word": " the", "probability": 0.91015625}, {"start": 2815.73, "end": 2816.09, "word": " spread", "probability": 0.9052734375}, {"start": 2816.09, "end": 2816.35, "word": " of", "probability": 0.96875}, {"start": 2816.35, "end": 2816.49, "word": " the", "probability": 0.91455078125}, {"start": 2816.49, "end": 2816.77, "word": " data.", "probability": 0.9326171875}, {"start": 2817.47, "end": 2817.65, "word": " Make", "probability": 0.77880859375}, {"start": 2817.65, "end": 2817.95, "word": " sense?", "probability": 0.8408203125}, {"start": 2822.17, "end": 2822.77, "word": " The", "probability": 0.791015625}, {"start": 2822.77, "end": 2823.13, "word": " first", "probability": 0.89208984375}, {"start": 2823.13, "end": 2823.41, "word": " measure", "probability": 0.8681640625}, {"start": 2823.41, "end": 2823.59, "word": " of", "probability": 0.96435546875}, {"start": 2823.59, "end": 2824.01, "word": " variation,", "probability": 0.9091796875}, {"start": 2824.81, "end": 2824.93, "word": " the", "probability": 0.91943359375}, {"start": 2824.93, "end": 2825.23, "word": " easiest", "probability": 0.90234375}, {"start": 2825.23, "end": 2825.67, "word": " one,", "probability": 0.9267578125}, {"start": 2825.81, "end": 2825.87, "word": " is", "probability": 0.9404296875}, {"start": 2825.87, "end": 2826.15, "word": " called", "probability": 0.91650390625}, {"start": 2826.15, "end": 2826.37, "word": " the", "probability": 0.7861328125}, {"start": 2826.37, "end": 2826.65, "word": " range.", "probability": 0.7666015625}, {"start": 2828.67, "end": 2828.89, "word": " The", "probability": 0.83740234375}, {"start": 2828.89, "end": 2829.25, "word": " range", "probability": 0.89453125}, {"start": 2829.25, "end": 2830.85, "word": " is", "probability": 0.7490234375}, {"start": 2830.85, "end": 2830.93, "word": " the", "probability": 0.54443359375}, {"start": 2830.93, "end": 2831.23, "word": " simplest", "probability": 0.90185546875}, {"start": 2831.23, "end": 2831.59, "word": " measure", "probability": 0.857421875}, {"start": 2831.59, "end": 2831.77, "word": " of", "probability": 0.9658203125}, {"start": 2831.77, "end": 2832.09, "word": " variation.", "probability": 0.9091796875}], "temperature": 1.0}, {"id": 104, "seek": 286349, "start": 2834.21, "end": 2863.49, "text": " The range is just the difference or the distance between the largest and the smallest value. For example, suppose the minimum score for this class is 40 and the maximum is 90. So the range is 50, 90 minus 40. Now imagine that the minimum score for this class is 60 and the maximum is 80, so 20.", "tokens": [440, 3613, 307, 445, 264, 2649, 420, 264, 4560, 1296, 264, 6443, 293, 264, 16998, 2158, 13, 1171, 1365, 11, 7297, 264, 7285, 6175, 337, 341, 1508, 307, 3356, 293, 264, 6674, 307, 4289, 13, 407, 264, 3613, 307, 2625, 11, 4289, 3175, 3356, 13, 823, 3811, 300, 264, 7285, 6175, 337, 341, 1508, 307, 4060, 293, 264, 6674, 307, 4688, 11, 370, 945, 13], "avg_logprob": -0.12736742582285043, "compression_ratio": 1.755952380952381, "no_speech_prob": 0.0, "words": [{"start": 2834.21, "end": 2834.47, "word": " The", "probability": 0.69091796875}, {"start": 2834.47, "end": 2834.71, "word": " range", "probability": 0.84765625}, {"start": 2834.71, "end": 2834.91, "word": " is", "probability": 0.9375}, {"start": 2834.91, "end": 2835.35, "word": " just", "probability": 0.90576171875}, {"start": 2835.35, "end": 2835.59, "word": " the", "probability": 0.90869140625}, {"start": 2835.59, "end": 2836.15, "word": " difference", "probability": 0.82568359375}, {"start": 2836.15, "end": 2836.59, "word": " or", "probability": 0.76318359375}, {"start": 2836.59, "end": 2836.75, "word": " the", "probability": 0.880859375}, {"start": 2836.75, "end": 2837.33, "word": " distance", "probability": 0.9365234375}, {"start": 2837.33, "end": 2838.59, "word": " between", "probability": 0.8203125}, {"start": 2838.59, "end": 2838.85, "word": " the", "probability": 0.91455078125}, {"start": 2838.85, "end": 2839.21, "word": " largest", "probability": 0.919921875}, {"start": 2839.21, "end": 2839.75, "word": " and", "probability": 0.939453125}, {"start": 2839.75, "end": 2839.91, "word": " the", "probability": 0.7392578125}, {"start": 2839.91, "end": 2840.19, "word": " smallest", "probability": 0.9482421875}, {"start": 2840.19, "end": 2840.57, "word": " value.", "probability": 0.9541015625}, {"start": 2840.83, "end": 2840.97, "word": " For", "probability": 0.9453125}, {"start": 2840.97, "end": 2841.33, "word": " example,", "probability": 0.96923828125}, {"start": 2841.97, "end": 2842.41, "word": " suppose", "probability": 0.86376953125}, {"start": 2842.41, "end": 2843.55, "word": " the", "probability": 0.84814453125}, {"start": 2843.55, "end": 2843.83, "word": " minimum", "probability": 0.94482421875}, {"start": 2843.83, "end": 2844.21, "word": " score", "probability": 0.900390625}, {"start": 2844.21, "end": 2844.43, "word": " for", "probability": 0.93603515625}, {"start": 2844.43, "end": 2844.63, "word": " this", "probability": 0.947265625}, {"start": 2844.63, "end": 2844.93, "word": " class", "probability": 0.96533203125}, {"start": 2844.93, "end": 2845.13, "word": " is", "probability": 0.94873046875}, {"start": 2845.13, "end": 2845.49, "word": " 40", "probability": 0.92138671875}, {"start": 2845.49, "end": 2846.63, "word": " and", "probability": 0.74560546875}, {"start": 2846.63, "end": 2846.83, "word": " the", "probability": 0.91357421875}, {"start": 2846.83, "end": 2847.07, "word": " maximum", "probability": 0.92041015625}, {"start": 2847.07, "end": 2847.37, "word": " is", "probability": 0.943359375}, {"start": 2847.37, "end": 2847.79, "word": " 90.", "probability": 0.97998046875}, {"start": 2848.89, "end": 2849.53, "word": " So", "probability": 0.9296875}, {"start": 2849.53, "end": 2849.67, "word": " the", "probability": 0.6396484375}, {"start": 2849.67, "end": 2849.89, "word": " range", "probability": 0.88232421875}, {"start": 2849.89, "end": 2850.07, "word": " is", "probability": 0.94873046875}, {"start": 2850.07, "end": 2850.39, "word": " 50,", "probability": 0.923828125}, {"start": 2850.67, "end": 2850.99, "word": " 90", "probability": 0.970703125}, {"start": 2850.99, "end": 2851.25, "word": " minus", "probability": 0.8779296875}, {"start": 2851.25, "end": 2851.69, "word": " 40.", "probability": 0.974609375}, {"start": 2853.05, "end": 2853.23, "word": " Now", "probability": 0.94580078125}, {"start": 2853.23, "end": 2853.65, "word": " imagine", "probability": 0.7998046875}, {"start": 2853.65, "end": 2854.63, "word": " that", "probability": 0.86083984375}, {"start": 2854.63, "end": 2856.45, "word": " the", "probability": 0.8994140625}, {"start": 2856.45, "end": 2856.73, "word": " minimum", "probability": 0.95703125}, {"start": 2856.73, "end": 2857.25, "word": " score", "probability": 0.9052734375}, {"start": 2857.25, "end": 2858.15, "word": " for", "probability": 0.9453125}, {"start": 2858.15, "end": 2858.37, "word": " this", "probability": 0.947265625}, {"start": 2858.37, "end": 2858.67, "word": " class", "probability": 0.9716796875}, {"start": 2858.67, "end": 2858.85, "word": " is", "probability": 0.89306640625}, {"start": 2858.85, "end": 2859.21, "word": " 60", "probability": 0.978515625}, {"start": 2859.21, "end": 2860.21, "word": " and", "probability": 0.8369140625}, {"start": 2860.21, "end": 2860.41, "word": " the", "probability": 0.912109375}, {"start": 2860.41, "end": 2860.79, "word": " maximum", "probability": 0.923828125}, {"start": 2860.79, "end": 2861.07, "word": " is", "probability": 0.9482421875}, {"start": 2861.07, "end": 2861.49, "word": " 80,", "probability": 0.982421875}, {"start": 2862.29, "end": 2863.15, "word": " so", "probability": 0.8603515625}, {"start": 2863.15, "end": 2863.49, "word": " 20.", "probability": 0.939453125}], "temperature": 1.0}, {"id": 105, "seek": 288217, "start": 2865.85, "end": 2882.17, "text": " If we replace 80 by 100, I mean the minimum is 60 and the maximum is 100, it's 40. That means a range is affected by outliers because it depends only on two values.", "tokens": [759, 321, 7406, 4688, 538, 2319, 11, 286, 914, 264, 7285, 307, 4060, 293, 264, 6674, 307, 2319, 11, 309, 311, 3356, 13, 663, 1355, 257, 3613, 307, 8028, 538, 484, 23646, 570, 309, 5946, 787, 322, 732, 4190, 13], "avg_logprob": -0.19988567363925097, "compression_ratio": 1.2790697674418605, "no_speech_prob": 0.0, "words": [{"start": 2865.85, "end": 2866.13, "word": " If", "probability": 0.78173828125}, {"start": 2866.13, "end": 2866.33, "word": " we", "probability": 0.95166015625}, {"start": 2866.33, "end": 2866.75, "word": " replace", "probability": 0.39990234375}, {"start": 2866.75, "end": 2867.33, "word": " 80", "probability": 0.9208984375}, {"start": 2867.33, "end": 2867.81, "word": " by", "probability": 0.9580078125}, {"start": 2867.81, "end": 2868.31, "word": " 100,", "probability": 0.91357421875}, {"start": 2869.05, "end": 2869.17, "word": " I", "probability": 0.91162109375}, {"start": 2869.17, "end": 2869.25, "word": " mean", "probability": 0.9677734375}, {"start": 2869.25, "end": 2869.39, "word": " the", "probability": 0.6572265625}, {"start": 2869.39, "end": 2869.59, "word": " minimum", "probability": 0.9814453125}, {"start": 2869.59, "end": 2869.79, "word": " is", "probability": 0.93701171875}, {"start": 2869.79, "end": 2870.21, "word": " 60", "probability": 0.974609375}, {"start": 2870.21, "end": 2871.03, "word": " and", "probability": 0.6259765625}, {"start": 2871.03, "end": 2871.17, "word": " the", "probability": 0.9033203125}, {"start": 2871.17, "end": 2871.45, "word": " maximum", "probability": 0.93994140625}, {"start": 2871.45, "end": 2871.61, "word": " is", "probability": 0.947265625}, {"start": 2871.61, "end": 2872.11, "word": " 100,", "probability": 0.8203125}, {"start": 2872.51, "end": 2873.07, "word": " it's", "probability": 0.729736328125}, {"start": 2873.07, "end": 2873.55, "word": " 40.", "probability": 0.9697265625}, {"start": 2874.37, "end": 2874.75, "word": " That", "probability": 0.8388671875}, {"start": 2874.75, "end": 2875.21, "word": " means", "probability": 0.94189453125}, {"start": 2875.21, "end": 2875.55, "word": " a", "probability": 0.443115234375}, {"start": 2875.55, "end": 2875.93, "word": " range", "probability": 0.90283203125}, {"start": 2875.93, "end": 2876.31, "word": " is", "probability": 0.94482421875}, {"start": 2876.31, "end": 2876.75, "word": " affected", "probability": 0.86572265625}, {"start": 2876.75, "end": 2877.03, "word": " by", "probability": 0.9697265625}, {"start": 2877.03, "end": 2877.45, "word": " outliers", "probability": 0.705078125}, {"start": 2877.45, "end": 2880.39, "word": " because", "probability": 0.410888671875}, {"start": 2880.39, "end": 2880.63, "word": " it", "probability": 0.94384765625}, {"start": 2880.63, "end": 2880.95, "word": " depends", "probability": 0.92919921875}, {"start": 2880.95, "end": 2881.35, "word": " only", "probability": 0.90283203125}, {"start": 2881.35, "end": 2881.55, "word": " on", "probability": 0.8837890625}, {"start": 2881.55, "end": 2881.69, "word": " two", "probability": 0.90380859375}, {"start": 2881.69, "end": 2882.17, "word": " values.", "probability": 0.974609375}], "temperature": 1.0}, {"id": 106, "seek": 290936, "start": 2883.48, "end": 2909.36, "text": " maximum and minimum value. So it should be affected by outliers. So range is sensitive to outliers. So if the data has the data set has outliers, then in this case, you have to avoid using range because range only based on two values. So it should be affected by outliers. Now for the for simple example, suppose we have this data. The minimum value is one.", "tokens": [6674, 293, 7285, 2158, 13, 407, 309, 820, 312, 8028, 538, 484, 23646, 13, 407, 3613, 307, 9477, 281, 484, 23646, 13, 407, 498, 264, 1412, 575, 264, 1412, 992, 575, 484, 23646, 11, 550, 294, 341, 1389, 11, 291, 362, 281, 5042, 1228, 3613, 570, 3613, 787, 2361, 322, 732, 4190, 13, 407, 309, 820, 312, 8028, 538, 484, 23646, 13, 823, 337, 264, 337, 2199, 1365, 11, 7297, 321, 362, 341, 1412, 13, 440, 7285, 2158, 307, 472, 13], "avg_logprob": -0.1763529007027789, "compression_ratio": 1.8842105263157896, "no_speech_prob": 0.0, "words": [{"start": 2883.48, "end": 2884.04, "word": " maximum", "probability": 0.7216796875}, {"start": 2884.04, "end": 2884.6, "word": " and", "probability": 0.91162109375}, {"start": 2884.6, "end": 2884.9, "word": " minimum", "probability": 0.958984375}, {"start": 2884.9, "end": 2885.28, "word": " value.", "probability": 0.654296875}, {"start": 2885.42, "end": 2885.54, "word": " So", "probability": 0.91552734375}, {"start": 2885.54, "end": 2885.76, "word": " it", "probability": 0.83544921875}, {"start": 2885.76, "end": 2885.94, "word": " should", "probability": 0.95068359375}, {"start": 2885.94, "end": 2886.1, "word": " be", "probability": 0.9443359375}, {"start": 2886.1, "end": 2886.42, "word": " affected", "probability": 0.84228515625}, {"start": 2886.42, "end": 2886.62, "word": " by", "probability": 0.96435546875}, {"start": 2886.62, "end": 2886.98, "word": " outliers.", "probability": 0.862548828125}, {"start": 2887.34, "end": 2887.68, "word": " So", "probability": 0.888671875}, {"start": 2887.68, "end": 2888.04, "word": " range", "probability": 0.8212890625}, {"start": 2888.04, "end": 2888.48, "word": " is", "probability": 0.94970703125}, {"start": 2888.48, "end": 2889.08, "word": " sensitive", "probability": 0.89697265625}, {"start": 2889.08, "end": 2889.32, "word": " to", "probability": 0.96142578125}, {"start": 2889.32, "end": 2889.74, "word": " outliers.", "probability": 0.951171875}, {"start": 2890.08, "end": 2890.62, "word": " So", "probability": 0.92724609375}, {"start": 2890.62, "end": 2890.82, "word": " if", "probability": 0.9208984375}, {"start": 2890.82, "end": 2891.0, "word": " the", "probability": 0.908203125}, {"start": 2891.0, "end": 2891.24, "word": " data", "probability": 0.8623046875}, {"start": 2891.24, "end": 2891.5, "word": " has", "probability": 0.5078125}, {"start": 2891.5, "end": 2892.0, "word": " the", "probability": 0.49072265625}, {"start": 2892.0, "end": 2892.24, "word": " data", "probability": 0.71337890625}, {"start": 2892.24, "end": 2892.5, "word": " set", "probability": 0.92138671875}, {"start": 2892.5, "end": 2892.78, "word": " has", "probability": 0.86083984375}, {"start": 2892.78, "end": 2893.34, "word": " outliers,", "probability": 0.946044921875}, {"start": 2893.62, "end": 2894.0, "word": " then", "probability": 0.83447265625}, {"start": 2894.0, "end": 2894.38, "word": " in", "probability": 0.87548828125}, {"start": 2894.38, "end": 2894.58, "word": " this", "probability": 0.94873046875}, {"start": 2894.58, "end": 2894.76, "word": " case,", "probability": 0.921875}, {"start": 2894.82, "end": 2894.88, "word": " you", "probability": 0.9599609375}, {"start": 2894.88, "end": 2895.02, "word": " have", "probability": 0.94873046875}, {"start": 2895.02, "end": 2895.12, "word": " to", "probability": 0.974609375}, {"start": 2895.12, "end": 2895.66, "word": " avoid", "probability": 0.91357421875}, {"start": 2895.66, "end": 2896.66, "word": " using", "probability": 0.91748046875}, {"start": 2896.66, "end": 2897.58, "word": " range", "probability": 0.84619140625}, {"start": 2897.58, "end": 2897.92, "word": " because", "probability": 0.728515625}, {"start": 2897.92, "end": 2898.26, "word": " range", "probability": 0.86279296875}, {"start": 2898.26, "end": 2899.0, "word": " only", "probability": 0.86865234375}, {"start": 2899.0, "end": 2899.3, "word": " based", "probability": 0.76171875}, {"start": 2899.3, "end": 2899.48, "word": " on", "probability": 0.9453125}, {"start": 2899.48, "end": 2899.64, "word": " two", "probability": 0.91650390625}, {"start": 2899.64, "end": 2900.08, "word": " values.", "probability": 0.966796875}, {"start": 2900.58, "end": 2900.72, "word": " So", "probability": 0.958984375}, {"start": 2900.72, "end": 2901.12, "word": " it", "probability": 0.93115234375}, {"start": 2901.12, "end": 2901.3, "word": " should", "probability": 0.95849609375}, {"start": 2901.3, "end": 2901.44, "word": " be", "probability": 0.95068359375}, {"start": 2901.44, "end": 2901.78, "word": " affected", "probability": 0.86083984375}, {"start": 2901.78, "end": 2901.98, "word": " by", "probability": 0.875}, {"start": 2901.98, "end": 2902.36, "word": " outliers.", "probability": 0.95849609375}, {"start": 2903.34, "end": 2903.48, "word": " Now", "probability": 0.7216796875}, {"start": 2903.48, "end": 2903.7, "word": " for", "probability": 0.7373046875}, {"start": 2903.7, "end": 2903.94, "word": " the", "probability": 0.80078125}, {"start": 2903.94, "end": 2904.24, "word": " for", "probability": 0.7353515625}, {"start": 2904.24, "end": 2904.66, "word": " simple", "probability": 0.8525390625}, {"start": 2904.66, "end": 2905.02, "word": " example,", "probability": 0.87646484375}, {"start": 2905.14, "end": 2905.38, "word": " suppose", "probability": 0.89306640625}, {"start": 2905.38, "end": 2905.52, "word": " we", "probability": 0.951171875}, {"start": 2905.52, "end": 2905.66, "word": " have", "probability": 0.9462890625}, {"start": 2905.66, "end": 2905.88, "word": " this", "probability": 0.94921875}, {"start": 2905.88, "end": 2906.22, "word": " data.", "probability": 0.92822265625}, {"start": 2907.78, "end": 2908.34, "word": " The", "probability": 0.86181640625}, {"start": 2908.34, "end": 2908.54, "word": " minimum", "probability": 0.9677734375}, {"start": 2908.54, "end": 2908.92, "word": " value", "probability": 0.97509765625}, {"start": 2908.92, "end": 2909.1, "word": " is", "probability": 0.9482421875}, {"start": 2909.1, "end": 2909.36, "word": " one.", "probability": 0.81982421875}], "temperature": 1.0}, {"id": 107, "seek": 294024, "start": 2911.88, "end": 2940.24, "text": " I mean, the smallest value is one, and the largest or the maximum is 13. So it makes sense that the range of the data is the difference between these two values. So 13 minus one is 12. Now, imagine that we just replace 13 by 100. So the new range will be equal to 100 minus 199.", "tokens": [286, 914, 11, 264, 16998, 2158, 307, 472, 11, 293, 264, 6443, 420, 264, 6674, 307, 3705, 13, 407, 309, 1669, 2020, 300, 264, 3613, 295, 264, 1412, 307, 264, 2649, 1296, 613, 732, 4190, 13, 407, 3705, 3175, 472, 307, 2272, 13, 823, 11, 3811, 300, 321, 445, 7406, 3705, 538, 2319, 13, 407, 264, 777, 3613, 486, 312, 2681, 281, 2319, 3175, 4303, 13], "avg_logprob": -0.1677938410595282, "compression_ratio": 1.532967032967033, "no_speech_prob": 0.0, "words": [{"start": 2911.88, "end": 2912.06, "word": " I", "probability": 0.859375}, {"start": 2912.06, "end": 2912.2, "word": " mean,", "probability": 0.96044921875}, {"start": 2912.24, "end": 2912.36, "word": " the", "probability": 0.91162109375}, {"start": 2912.36, "end": 2912.68, "word": " smallest", "probability": 0.93505859375}, {"start": 2912.68, "end": 2912.96, "word": " value", "probability": 0.67919921875}, {"start": 2912.96, "end": 2913.16, "word": " is", "probability": 0.8623046875}, {"start": 2913.16, "end": 2913.32, "word": " one,", "probability": 0.59912109375}, {"start": 2913.52, "end": 2913.78, "word": " and", "probability": 0.9267578125}, {"start": 2913.78, "end": 2913.92, "word": " the", "probability": 0.9130859375}, {"start": 2913.92, "end": 2914.24, "word": " largest", "probability": 0.88525390625}, {"start": 2914.24, "end": 2914.52, "word": " or", "probability": 0.79638671875}, {"start": 2914.52, "end": 2914.68, "word": " the", "probability": 0.91357421875}, {"start": 2914.68, "end": 2914.98, "word": " maximum", "probability": 0.89013671875}, {"start": 2914.98, "end": 2915.18, "word": " is", "probability": 0.92822265625}, {"start": 2915.18, "end": 2915.62, "word": " 13.", "probability": 0.908203125}, {"start": 2916.14, "end": 2916.78, "word": " So", "probability": 0.93310546875}, {"start": 2916.78, "end": 2916.9, "word": " it", "probability": 0.8193359375}, {"start": 2916.9, "end": 2917.1, "word": " makes", "probability": 0.82373046875}, {"start": 2917.1, "end": 2917.34, "word": " sense", "probability": 0.82421875}, {"start": 2917.34, "end": 2917.58, "word": " that", "probability": 0.896484375}, {"start": 2917.58, "end": 2917.76, "word": " the", "probability": 0.9150390625}, {"start": 2917.76, "end": 2918.16, "word": " range", "probability": 0.88818359375}, {"start": 2918.16, "end": 2918.88, "word": " of", "probability": 0.96875}, {"start": 2918.88, "end": 2919.0, "word": " the", "probability": 0.92333984375}, {"start": 2919.0, "end": 2919.36, "word": " data", "probability": 0.9404296875}, {"start": 2919.36, "end": 2920.44, "word": " is", "probability": 0.92919921875}, {"start": 2920.44, "end": 2920.56, "word": " the", "probability": 0.92236328125}, {"start": 2920.56, "end": 2920.86, "word": " difference", "probability": 0.85400390625}, {"start": 2920.86, "end": 2921.14, "word": " between", "probability": 0.86572265625}, {"start": 2921.14, "end": 2921.4, "word": " these", "probability": 0.849609375}, {"start": 2921.4, "end": 2921.84, "word": " two", "probability": 0.47607421875}, {"start": 2921.84, "end": 2922.1, "word": " values.", "probability": 0.9453125}, {"start": 2922.28, "end": 2922.28, "word": " So", "probability": 0.94140625}, {"start": 2922.28, "end": 2922.6, "word": " 13", "probability": 0.8876953125}, {"start": 2922.6, "end": 2922.86, "word": " minus", "probability": 0.966796875}, {"start": 2922.86, "end": 2923.2, "word": " one", "probability": 0.60302734375}, {"start": 2923.2, "end": 2923.84, "word": " is", "probability": 0.923828125}, {"start": 2923.84, "end": 2924.18, "word": " 12.", "probability": 0.8271484375}, {"start": 2925.94, "end": 2926.46, "word": " Now,", "probability": 0.6015625}, {"start": 2927.9, "end": 2928.26, "word": " imagine", "probability": 0.92236328125}, {"start": 2928.26, "end": 2928.54, "word": " that", "probability": 0.9365234375}, {"start": 2928.54, "end": 2928.92, "word": " we", "probability": 0.95458984375}, {"start": 2928.92, "end": 2929.68, "word": " just", "probability": 0.9033203125}, {"start": 2929.68, "end": 2930.36, "word": " replace", "probability": 0.755859375}, {"start": 2930.36, "end": 2931.94, "word": " 13", "probability": 0.91845703125}, {"start": 2931.94, "end": 2932.66, "word": " by", "probability": 0.9765625}, {"start": 2932.66, "end": 2933.02, "word": " 100.", "probability": 0.80810546875}, {"start": 2935.4, "end": 2936.04, "word": " So", "probability": 0.943359375}, {"start": 2936.04, "end": 2936.4, "word": " the", "probability": 0.818359375}, {"start": 2936.4, "end": 2936.6, "word": " new", "probability": 0.9306640625}, {"start": 2936.6, "end": 2937.1, "word": " range", "probability": 0.89697265625}, {"start": 2937.1, "end": 2938.04, "word": " will", "probability": 0.88427734375}, {"start": 2938.04, "end": 2938.26, "word": " be", "probability": 0.95947265625}, {"start": 2938.26, "end": 2938.56, "word": " equal", "probability": 0.9072265625}, {"start": 2938.56, "end": 2938.9, "word": " to", "probability": 0.9755859375}, {"start": 2938.9, "end": 2939.42, "word": " 100", "probability": 0.9052734375}, {"start": 2939.42, "end": 2939.88, "word": " minus", "probability": 0.9814453125}, {"start": 2939.88, "end": 2940.24, "word": " 199.", "probability": 0.89794921875}], "temperature": 1.0}, {"id": 108, "seek": 296304, "start": 2941.78, "end": 2963.04, "text": " So the previous range was 12. It becomes now 99 after we replace the maximum by 100. So it means that range is affected by extreme values. So the mean and range both are sensitive to outliers. So you have to link between", "tokens": [407, 264, 3894, 3613, 390, 2272, 13, 467, 3643, 586, 11803, 934, 321, 7406, 264, 6674, 538, 2319, 13, 407, 309, 1355, 300, 3613, 307, 8028, 538, 8084, 4190, 13, 407, 264, 914, 293, 3613, 1293, 366, 9477, 281, 484, 23646, 13, 407, 291, 362, 281, 2113, 1296], "avg_logprob": -0.15082908649833834, "compression_ratio": 1.4444444444444444, "no_speech_prob": 0.0, "words": [{"start": 2941.78, "end": 2942.36, "word": " So", "probability": 0.8583984375}, {"start": 2942.36, "end": 2942.78, "word": " the", "probability": 0.66845703125}, {"start": 2942.78, "end": 2943.32, "word": " previous", "probability": 0.8486328125}, {"start": 2943.32, "end": 2943.82, "word": " range", "probability": 0.88330078125}, {"start": 2943.82, "end": 2944.24, "word": " was", "probability": 0.95556640625}, {"start": 2944.24, "end": 2944.78, "word": " 12.", "probability": 0.89453125}, {"start": 2945.42, "end": 2945.66, "word": " It", "probability": 0.958984375}, {"start": 2945.66, "end": 2946.1, "word": " becomes", "probability": 0.8173828125}, {"start": 2946.1, "end": 2946.4, "word": " now", "probability": 0.93310546875}, {"start": 2946.4, "end": 2946.76, "word": " 99", "probability": 0.96435546875}, {"start": 2946.76, "end": 2947.44, "word": " after", "probability": 0.6923828125}, {"start": 2947.44, "end": 2947.66, "word": " we", "probability": 0.91943359375}, {"start": 2947.66, "end": 2948.1, "word": " replace", "probability": 0.6416015625}, {"start": 2948.1, "end": 2948.34, "word": " the", "probability": 0.91650390625}, {"start": 2948.34, "end": 2948.72, "word": " maximum", "probability": 0.939453125}, {"start": 2948.72, "end": 2948.98, "word": " by", "probability": 0.9736328125}, {"start": 2948.98, "end": 2949.38, "word": " 100.", "probability": 0.79443359375}, {"start": 2949.92, "end": 2950.22, "word": " So", "probability": 0.94482421875}, {"start": 2950.22, "end": 2950.38, "word": " it", "probability": 0.92431640625}, {"start": 2950.38, "end": 2950.66, "word": " means", "probability": 0.9169921875}, {"start": 2950.66, "end": 2951.06, "word": " that", "probability": 0.880859375}, {"start": 2951.06, "end": 2951.56, "word": " range", "probability": 0.88720703125}, {"start": 2951.56, "end": 2951.72, "word": " is", "probability": 0.9375}, {"start": 2951.72, "end": 2952.1, "word": " affected", "probability": 0.84619140625}, {"start": 2952.1, "end": 2952.84, "word": " by", "probability": 0.97314453125}, {"start": 2952.84, "end": 2953.86, "word": " extreme", "probability": 0.70751953125}, {"start": 2953.86, "end": 2954.24, "word": " values.", "probability": 0.900390625}, {"start": 2954.82, "end": 2955.1, "word": " So", "probability": 0.94873046875}, {"start": 2955.1, "end": 2955.26, "word": " the", "probability": 0.8994140625}, {"start": 2955.26, "end": 2955.56, "word": " mean", "probability": 0.9755859375}, {"start": 2955.56, "end": 2956.26, "word": " and", "probability": 0.94189453125}, {"start": 2956.26, "end": 2956.78, "word": " range", "probability": 0.79736328125}, {"start": 2956.78, "end": 2958.32, "word": " both", "probability": 0.48388671875}, {"start": 2958.32, "end": 2958.74, "word": " are", "probability": 0.9404296875}, {"start": 2958.74, "end": 2959.26, "word": " sensitive", "probability": 0.88525390625}, {"start": 2959.26, "end": 2959.54, "word": " to", "probability": 0.95947265625}, {"start": 2959.54, "end": 2959.94, "word": " outliers.", "probability": 0.943115234375}, {"start": 2960.48, "end": 2961.26, "word": " So", "probability": 0.95361328125}, {"start": 2961.26, "end": 2961.4, "word": " you", "probability": 0.8994140625}, {"start": 2961.4, "end": 2961.58, "word": " have", "probability": 0.94775390625}, {"start": 2961.58, "end": 2961.82, "word": " to", "probability": 0.97021484375}, {"start": 2961.82, "end": 2962.62, "word": " link", "probability": 0.951171875}, {"start": 2962.62, "end": 2963.04, "word": " between", "probability": 0.8818359375}], "temperature": 1.0}, {"id": 109, "seek": 299119, "start": 2966.41, "end": 2991.19, "text": " measures of center tendency and measures of variation. Mean and range are affected by outliers. The mean and range are affected by outliers. This is an example. So it's very easy to compute the mean. Next, if you look at why the range can be misleading.", "tokens": [8000, 295, 3056, 18187, 293, 8000, 295, 12990, 13, 12302, 293, 3613, 366, 8028, 538, 484, 23646, 13, 440, 914, 293, 3613, 366, 8028, 538, 484, 23646, 13, 639, 307, 364, 1365, 13, 407, 309, 311, 588, 1858, 281, 14722, 264, 914, 13, 3087, 11, 498, 291, 574, 412, 983, 264, 3613, 393, 312, 36429, 13], "avg_logprob": -0.14775219298245615, "compression_ratio": 1.6493506493506493, "no_speech_prob": 0.0, "words": [{"start": 2966.41, "end": 2967.17, "word": " measures", "probability": 0.433837890625}, {"start": 2967.17, "end": 2967.59, "word": " of", "probability": 0.96142578125}, {"start": 2967.59, "end": 2967.85, "word": " center", "probability": 0.6396484375}, {"start": 2967.85, "end": 2968.31, "word": " tendency", "probability": 0.74853515625}, {"start": 2968.31, "end": 2969.29, "word": " and", "probability": 0.6875}, {"start": 2969.29, "end": 2969.67, "word": " measures", "probability": 0.8203125}, {"start": 2969.67, "end": 2970.21, "word": " of", "probability": 0.96728515625}, {"start": 2970.21, "end": 2970.87, "word": " variation.", "probability": 0.86669921875}, {"start": 2971.69, "end": 2971.79, "word": " Mean", "probability": 0.9287109375}, {"start": 2971.79, "end": 2971.99, "word": " and", "probability": 0.947265625}, {"start": 2971.99, "end": 2972.25, "word": " range", "probability": 0.8642578125}, {"start": 2972.25, "end": 2972.47, "word": " are", "probability": 0.9404296875}, {"start": 2972.47, "end": 2972.87, "word": " affected", "probability": 0.8779296875}, {"start": 2972.87, "end": 2973.13, "word": " by", "probability": 0.96923828125}, {"start": 2973.13, "end": 2973.61, "word": " outliers.", "probability": 0.943359375}, {"start": 2974.43, "end": 2975.05, "word": " The", "probability": 0.73095703125}, {"start": 2975.05, "end": 2975.23, "word": " mean", "probability": 0.96435546875}, {"start": 2975.23, "end": 2975.43, "word": " and", "probability": 0.93359375}, {"start": 2975.43, "end": 2976.91, "word": " range", "probability": 0.75}, {"start": 2976.91, "end": 2977.11, "word": " are", "probability": 0.92822265625}, {"start": 2977.11, "end": 2977.47, "word": " affected", "probability": 0.8701171875}, {"start": 2977.47, "end": 2977.91, "word": " by", "probability": 0.96875}, {"start": 2977.91, "end": 2978.65, "word": " outliers.", "probability": 0.927490234375}, {"start": 2978.79, "end": 2978.87, "word": " This", "probability": 0.63427734375}, {"start": 2978.87, "end": 2979.03, "word": " is", "probability": 0.943359375}, {"start": 2979.03, "end": 2979.23, "word": " an", "probability": 0.9580078125}, {"start": 2979.23, "end": 2979.87, "word": " example.", "probability": 0.95849609375}, {"start": 2980.23, "end": 2980.33, "word": " So", "probability": 0.83837890625}, {"start": 2980.33, "end": 2980.77, "word": " it's", "probability": 0.701904296875}, {"start": 2980.77, "end": 2980.99, "word": " very", "probability": 0.84375}, {"start": 2980.99, "end": 2981.25, "word": " easy", "probability": 0.91064453125}, {"start": 2981.25, "end": 2981.45, "word": " to", "probability": 0.970703125}, {"start": 2981.45, "end": 2981.77, "word": " compute", "probability": 0.86767578125}, {"start": 2981.77, "end": 2981.95, "word": " the", "probability": 0.921875}, {"start": 2981.95, "end": 2982.15, "word": " mean.", "probability": 0.9638671875}, {"start": 2984.15, "end": 2984.67, "word": " Next,", "probability": 0.92822265625}, {"start": 2985.61, "end": 2986.01, "word": " if", "probability": 0.943359375}, {"start": 2986.01, "end": 2986.09, "word": " you", "probability": 0.947265625}, {"start": 2986.09, "end": 2986.27, "word": " look", "probability": 0.96826171875}, {"start": 2986.27, "end": 2987.29, "word": " at", "probability": 0.9697265625}, {"start": 2987.29, "end": 2989.33, "word": " why", "probability": 0.900390625}, {"start": 2989.33, "end": 2989.55, "word": " the", "probability": 0.9228515625}, {"start": 2989.55, "end": 2989.97, "word": " range", "probability": 0.8935546875}, {"start": 2989.97, "end": 2990.51, "word": " can", "probability": 0.94482421875}, {"start": 2990.51, "end": 2990.71, "word": " be", "probability": 0.96044921875}, {"start": 2990.71, "end": 2991.19, "word": " misleading.", "probability": 0.9794921875}], "temperature": 1.0}, {"id": 110, "seek": 302249, "start": 2993.83, "end": 3022.49, "text": " Sometimes you report the range and the range does not give an appropriate answer or appropriate result because number one ignores the way in which the data are distributed. For example, if you look at this specific data, we have data seven, eight, nine, ten, eleven and twelve. So the range is five. Twelve minus seven is five. Now if you look at the other data,", "tokens": [4803, 291, 2275, 264, 3613, 293, 264, 3613, 775, 406, 976, 364, 6854, 1867, 420, 6854, 1874, 570, 1230, 472, 5335, 2706, 264, 636, 294, 597, 264, 1412, 366, 12631, 13, 1171, 1365, 11, 498, 291, 574, 412, 341, 2685, 1412, 11, 321, 362, 1412, 3407, 11, 3180, 11, 4949, 11, 2064, 11, 21090, 293, 14390, 13, 407, 264, 3613, 307, 1732, 13, 48063, 3175, 3407, 307, 1732, 13, 823, 498, 291, 574, 412, 264, 661, 1412, 11], "avg_logprob": -0.16327135585531405, "compression_ratio": 1.736842105263158, "no_speech_prob": 0.0, "words": [{"start": 2993.83, "end": 2994.35, "word": " Sometimes", "probability": 0.51318359375}, {"start": 2994.35, "end": 2994.87, "word": " you", "probability": 0.71728515625}, {"start": 2994.87, "end": 2995.19, "word": " report", "probability": 0.95458984375}, {"start": 2995.19, "end": 2995.39, "word": " the", "probability": 0.7333984375}, {"start": 2995.39, "end": 2995.69, "word": " range", "probability": 0.9130859375}, {"start": 2995.69, "end": 2996.25, "word": " and", "probability": 0.63330078125}, {"start": 2996.25, "end": 2996.41, "word": " the", "probability": 0.89111328125}, {"start": 2996.41, "end": 2996.61, "word": " range", "probability": 0.8857421875}, {"start": 2996.61, "end": 2996.81, "word": " does", "probability": 0.97216796875}, {"start": 2996.81, "end": 2997.03, "word": " not", "probability": 0.94921875}, {"start": 2997.03, "end": 2997.35, "word": " give", "probability": 0.88232421875}, {"start": 2997.35, "end": 2997.87, "word": " an", "probability": 0.9521484375}, {"start": 2997.87, "end": 2998.23, "word": " appropriate", "probability": 0.7763671875}, {"start": 2998.23, "end": 2998.89, "word": " answer", "probability": 0.96435546875}, {"start": 2998.89, "end": 2999.93, "word": " or", "probability": 0.75341796875}, {"start": 2999.93, "end": 3000.31, "word": " appropriate", "probability": 0.583984375}, {"start": 3000.31, "end": 3000.77, "word": " result", "probability": 0.87939453125}, {"start": 3000.77, "end": 3001.21, "word": " because", "probability": 0.603515625}, {"start": 3001.21, "end": 3004.45, "word": " number", "probability": 0.50634765625}, {"start": 3004.45, "end": 3004.67, "word": " one", "probability": 0.85791015625}, {"start": 3004.67, "end": 3005.25, "word": " ignores", "probability": 0.832275390625}, {"start": 3005.25, "end": 3005.61, "word": " the", "probability": 0.9111328125}, {"start": 3005.61, "end": 3005.83, "word": " way", "probability": 0.953125}, {"start": 3005.83, "end": 3006.03, "word": " in", "probability": 0.9404296875}, {"start": 3006.03, "end": 3006.23, "word": " which", "probability": 0.9462890625}, {"start": 3006.23, "end": 3006.39, "word": " the", "probability": 0.9130859375}, {"start": 3006.39, "end": 3006.61, "word": " data", "probability": 0.94775390625}, {"start": 3006.61, "end": 3006.79, "word": " are", "probability": 0.92041015625}, {"start": 3006.79, "end": 3007.33, "word": " distributed.", "probability": 0.9052734375}, {"start": 3007.71, "end": 3007.89, "word": " For", "probability": 0.94873046875}, {"start": 3007.89, "end": 3008.25, "word": " example,", "probability": 0.97412109375}, {"start": 3008.67, "end": 3009.83, "word": " if", "probability": 0.94287109375}, {"start": 3009.83, "end": 3009.95, "word": " you", "probability": 0.94189453125}, {"start": 3009.95, "end": 3010.15, "word": " look", "probability": 0.962890625}, {"start": 3010.15, "end": 3010.41, "word": " at", "probability": 0.96630859375}, {"start": 3010.41, "end": 3010.77, "word": " this", "probability": 0.94482421875}, {"start": 3010.77, "end": 3011.41, "word": " specific", "probability": 0.89794921875}, {"start": 3011.41, "end": 3011.83, "word": " data,", "probability": 0.931640625}, {"start": 3012.57, "end": 3013.67, "word": " we", "probability": 0.9462890625}, {"start": 3013.67, "end": 3013.85, "word": " have", "probability": 0.94580078125}, {"start": 3013.85, "end": 3014.09, "word": " data", "probability": 0.93798828125}, {"start": 3014.09, "end": 3014.43, "word": " seven,", "probability": 0.5126953125}, {"start": 3014.77, "end": 3015.05, "word": " eight,", "probability": 0.89111328125}, {"start": 3015.23, "end": 3015.43, "word": " nine,", "probability": 0.93359375}, {"start": 3015.59, "end": 3015.77, "word": " ten,", "probability": 0.53759765625}, {"start": 3015.85, "end": 3016.07, "word": " eleven", "probability": 0.85693359375}, {"start": 3016.07, "end": 3016.29, "word": " and", "probability": 0.521484375}, {"start": 3016.29, "end": 3016.71, "word": " twelve.", "probability": 0.9716796875}, {"start": 3017.01, "end": 3017.27, "word": " So", "probability": 0.95458984375}, {"start": 3017.27, "end": 3017.43, "word": " the", "probability": 0.78369140625}, {"start": 3017.43, "end": 3017.67, "word": " range", "probability": 0.88671875}, {"start": 3017.67, "end": 3017.83, "word": " is", "probability": 0.94677734375}, {"start": 3017.83, "end": 3018.11, "word": " five.", "probability": 0.83935546875}, {"start": 3019.27, "end": 3019.51, "word": " Twelve", "probability": 0.8779296875}, {"start": 3019.51, "end": 3019.73, "word": " minus", "probability": 0.97119140625}, {"start": 3019.73, "end": 3020.01, "word": " seven", "probability": 0.908203125}, {"start": 3020.01, "end": 3020.17, "word": " is", "probability": 0.93505859375}, {"start": 3020.17, "end": 3020.41, "word": " five.", "probability": 0.88427734375}, {"start": 3021.21, "end": 3021.41, "word": " Now", "probability": 0.9130859375}, {"start": 3021.41, "end": 3021.55, "word": " if", "probability": 0.64111328125}, {"start": 3021.55, "end": 3021.59, "word": " you", "probability": 0.873046875}, {"start": 3021.59, "end": 3021.69, "word": " look", "probability": 0.9619140625}, {"start": 3021.69, "end": 3021.81, "word": " at", "probability": 0.96240234375}, {"start": 3021.81, "end": 3021.91, "word": " the", "probability": 0.90869140625}, {"start": 3021.91, "end": 3022.11, "word": " other", "probability": 0.8916015625}, {"start": 3022.11, "end": 3022.49, "word": " data,", "probability": 0.93115234375}], "temperature": 1.0}, {"id": 111, "seek": 305078, "start": 3024.72, "end": 3050.78, "text": " The smallest value was seven. And there is a gap between the smallest and the next smallest value, which is 10. And also we have 12 is repeated three times. Still the range is the same. Even there is a difference between these two values, between two sets.", "tokens": [440, 16998, 2158, 390, 3407, 13, 400, 456, 307, 257, 7417, 1296, 264, 16998, 293, 264, 958, 16998, 2158, 11, 597, 307, 1266, 13, 400, 611, 321, 362, 2272, 307, 10477, 1045, 1413, 13, 8291, 264, 3613, 307, 264, 912, 13, 2754, 456, 307, 257, 2649, 1296, 613, 732, 4190, 11, 1296, 732, 6352, 13], "avg_logprob": -0.137904572433659, "compression_ratio": 1.6688311688311688, "no_speech_prob": 0.0, "words": [{"start": 3024.72, "end": 3025.0, "word": " The", "probability": 0.7373046875}, {"start": 3025.0, "end": 3025.38, "word": " smallest", "probability": 0.939453125}, {"start": 3025.38, "end": 3025.66, "word": " value", "probability": 0.97802734375}, {"start": 3025.66, "end": 3025.9, "word": " was", "probability": 0.93994140625}, {"start": 3025.9, "end": 3026.36, "word": " seven.", "probability": 0.76220703125}, {"start": 3029.6, "end": 3030.28, "word": " And", "probability": 0.7236328125}, {"start": 3030.28, "end": 3030.48, "word": " there", "probability": 0.90966796875}, {"start": 3030.48, "end": 3030.62, "word": " is", "probability": 0.91064453125}, {"start": 3030.62, "end": 3030.8, "word": " a", "probability": 0.95703125}, {"start": 3030.8, "end": 3031.06, "word": " gap", "probability": 0.9599609375}, {"start": 3031.06, "end": 3031.5, "word": " between", "probability": 0.8515625}, {"start": 3031.5, "end": 3032.08, "word": " the", "probability": 0.88427734375}, {"start": 3032.08, "end": 3032.6, "word": " smallest", "probability": 0.93212890625}, {"start": 3032.6, "end": 3033.08, "word": " and", "probability": 0.92041015625}, {"start": 3033.08, "end": 3033.26, "word": " the", "probability": 0.90625}, {"start": 3033.26, "end": 3033.5, "word": " next", "probability": 0.9150390625}, {"start": 3033.5, "end": 3033.96, "word": " smallest", "probability": 0.9453125}, {"start": 3033.96, "end": 3034.5, "word": " value,", "probability": 0.97119140625}, {"start": 3034.9, "end": 3035.26, "word": " which", "probability": 0.94384765625}, {"start": 3035.26, "end": 3035.5, "word": " is", "probability": 0.93798828125}, {"start": 3035.5, "end": 3035.84, "word": " 10.", "probability": 0.662109375}, {"start": 3037.18, "end": 3037.52, "word": " And", "probability": 0.9326171875}, {"start": 3037.52, "end": 3037.76, "word": " also", "probability": 0.826171875}, {"start": 3037.76, "end": 3037.92, "word": " we", "probability": 0.63525390625}, {"start": 3037.92, "end": 3038.22, "word": " have", "probability": 0.94677734375}, {"start": 3038.22, "end": 3039.3, "word": " 12", "probability": 0.837890625}, {"start": 3039.3, "end": 3039.6, "word": " is", "probability": 0.77734375}, {"start": 3039.6, "end": 3039.92, "word": " repeated", "probability": 0.95458984375}, {"start": 3039.92, "end": 3040.24, "word": " three", "probability": 0.9248046875}, {"start": 3040.24, "end": 3040.68, "word": " times.", "probability": 0.92578125}, {"start": 3042.12, "end": 3042.58, "word": " Still", "probability": 0.9326171875}, {"start": 3042.58, "end": 3042.78, "word": " the", "probability": 0.548828125}, {"start": 3042.78, "end": 3043.18, "word": " range", "probability": 0.8798828125}, {"start": 3043.18, "end": 3044.28, "word": " is", "probability": 0.94189453125}, {"start": 3044.28, "end": 3044.48, "word": " the", "probability": 0.91015625}, {"start": 3044.48, "end": 3044.74, "word": " same.", "probability": 0.9052734375}, {"start": 3045.34, "end": 3045.68, "word": " Even", "probability": 0.86865234375}, {"start": 3045.68, "end": 3046.52, "word": " there", "probability": 0.80712890625}, {"start": 3046.52, "end": 3046.68, "word": " is", "probability": 0.93798828125}, {"start": 3046.68, "end": 3046.86, "word": " a", "probability": 0.998046875}, {"start": 3046.86, "end": 3047.24, "word": " difference", "probability": 0.875}, {"start": 3047.24, "end": 3047.64, "word": " between", "probability": 0.8701171875}, {"start": 3047.64, "end": 3047.96, "word": " these", "probability": 0.8623046875}, {"start": 3047.96, "end": 3048.14, "word": " two", "probability": 0.93408203125}, {"start": 3048.14, "end": 3048.6, "word": " values,", "probability": 0.96923828125}, {"start": 3049.8, "end": 3050.22, "word": " between", "probability": 0.8818359375}, {"start": 3050.22, "end": 3050.44, "word": " two", "probability": 0.9267578125}, {"start": 3050.44, "end": 3050.78, "word": " sets.", "probability": 0.9404296875}], "temperature": 1.0}, {"id": 112, "seek": 307836, "start": 3051.52, "end": 3078.36, "text": " we have seven, eight, nine up to 12. And then the other data, we have seven, 10, 11, and 12 three times. Still, the range equals five. So it doesn't make sense to report the range as a measure of variation. Because if you look at the distribution for this data, it's completely different from the other dataset. Even though it has", "tokens": [321, 362, 3407, 11, 3180, 11, 4949, 493, 281, 2272, 13, 400, 550, 264, 661, 1412, 11, 321, 362, 3407, 11, 1266, 11, 2975, 11, 293, 2272, 1045, 1413, 13, 8291, 11, 264, 3613, 6915, 1732, 13, 407, 309, 1177, 380, 652, 2020, 281, 2275, 264, 3613, 382, 257, 3481, 295, 12990, 13, 1436, 498, 291, 574, 412, 264, 7316, 337, 341, 1412, 11, 309, 311, 2584, 819, 490, 264, 661, 28872, 13, 2754, 1673, 309, 575], "avg_logprob": -0.1798878149726452, "compression_ratio": 1.5761904761904761, "no_speech_prob": 0.0, "words": [{"start": 3051.52, "end": 3051.86, "word": " we", "probability": 0.60693359375}, {"start": 3051.86, "end": 3052.18, "word": " have", "probability": 0.94970703125}, {"start": 3052.18, "end": 3052.58, "word": " seven,", "probability": 0.72265625}, {"start": 3053.56, "end": 3053.64, "word": " eight,", "probability": 0.7216796875}, {"start": 3053.76, "end": 3053.9, "word": " nine", "probability": 0.93603515625}, {"start": 3053.9, "end": 3054.14, "word": " up", "probability": 0.62109375}, {"start": 3054.14, "end": 3054.22, "word": " to", "probability": 0.96826171875}, {"start": 3054.22, "end": 3054.52, "word": " 12.", "probability": 0.79052734375}, {"start": 3055.24, "end": 3055.66, "word": " And", "probability": 0.80712890625}, {"start": 3055.66, "end": 3055.88, "word": " then", "probability": 0.4755859375}, {"start": 3055.88, "end": 3056.08, "word": " the", "probability": 0.7841796875}, {"start": 3056.08, "end": 3056.34, "word": " other", "probability": 0.89013671875}, {"start": 3056.34, "end": 3056.64, "word": " data,", "probability": 0.947265625}, {"start": 3056.74, "end": 3056.78, "word": " we", "probability": 0.962890625}, {"start": 3056.78, "end": 3057.02, "word": " have", "probability": 0.94921875}, {"start": 3057.02, "end": 3057.54, "word": " seven,", "probability": 0.8427734375}, {"start": 3057.8, "end": 3057.98, "word": " 10,", "probability": 0.66015625}, {"start": 3058.12, "end": 3058.48, "word": " 11,", "probability": 0.970703125}, {"start": 3058.78, "end": 3059.12, "word": " and", "probability": 0.93017578125}, {"start": 3059.12, "end": 3059.4, "word": " 12", "probability": 0.91845703125}, {"start": 3059.4, "end": 3059.64, "word": " three", "probability": 0.7041015625}, {"start": 3059.64, "end": 3060.06, "word": " times.", "probability": 0.8740234375}, {"start": 3061.38, "end": 3061.98, "word": " Still,", "probability": 0.7841796875}, {"start": 3062.12, "end": 3062.18, "word": " the", "probability": 0.91552734375}, {"start": 3062.18, "end": 3062.56, "word": " range", "probability": 0.8955078125}, {"start": 3062.56, "end": 3063.24, "word": " equals", "probability": 0.5458984375}, {"start": 3063.24, "end": 3063.6, "word": " five.", "probability": 0.86767578125}, {"start": 3063.84, "end": 3064.08, "word": " So", "probability": 0.96435546875}, {"start": 3064.08, "end": 3064.2, "word": " it", "probability": 0.8359375}, {"start": 3064.2, "end": 3064.4, "word": " doesn't", "probability": 0.877197265625}, {"start": 3064.4, "end": 3064.64, "word": " make", "probability": 0.912109375}, {"start": 3064.64, "end": 3065.12, "word": " sense", "probability": 0.82763671875}, {"start": 3065.12, "end": 3066.36, "word": " to", "probability": 0.96142578125}, {"start": 3066.36, "end": 3066.98, "word": " report", "probability": 0.9599609375}, {"start": 3066.98, "end": 3067.32, "word": " the", "probability": 0.91552734375}, {"start": 3067.32, "end": 3067.68, "word": " range", "probability": 0.8896484375}, {"start": 3067.68, "end": 3068.08, "word": " as", "probability": 0.9619140625}, {"start": 3068.08, "end": 3068.22, "word": " a", "probability": 0.99072265625}, {"start": 3068.22, "end": 3068.4, "word": " measure", "probability": 0.8896484375}, {"start": 3068.4, "end": 3068.78, "word": " of", "probability": 0.9658203125}, {"start": 3068.78, "end": 3069.62, "word": " variation.", "probability": 0.72900390625}, {"start": 3070.52, "end": 3071.12, "word": " Because", "probability": 0.94482421875}, {"start": 3071.12, "end": 3071.3, "word": " if", "probability": 0.94482421875}, {"start": 3071.3, "end": 3071.36, "word": " you", "probability": 0.962890625}, {"start": 3071.36, "end": 3071.48, "word": " look", "probability": 0.9619140625}, {"start": 3071.48, "end": 3071.58, "word": " at", "probability": 0.9677734375}, {"start": 3071.58, "end": 3071.7, "word": " the", "probability": 0.76025390625}, {"start": 3071.7, "end": 3072.14, "word": " distribution", "probability": 0.86572265625}, {"start": 3072.14, "end": 3072.42, "word": " for", "probability": 0.9453125}, {"start": 3072.42, "end": 3072.64, "word": " this", "probability": 0.9443359375}, {"start": 3072.64, "end": 3073.0, "word": " data,", "probability": 0.93603515625}, {"start": 3073.38, "end": 3073.96, "word": " it's", "probability": 0.870849609375}, {"start": 3073.96, "end": 3074.38, "word": " completely", "probability": 0.74755859375}, {"start": 3074.38, "end": 3074.8, "word": " different", "probability": 0.88818359375}, {"start": 3074.8, "end": 3075.08, "word": " from", "probability": 0.880859375}, {"start": 3075.08, "end": 3075.26, "word": " the", "probability": 0.9189453125}, {"start": 3075.26, "end": 3075.5, "word": " other", "probability": 0.89111328125}, {"start": 3075.5, "end": 3075.9, "word": " dataset.", "probability": 0.73095703125}, {"start": 3076.5, "end": 3077.1, "word": " Even", "probability": 0.8564453125}, {"start": 3077.1, "end": 3077.46, "word": " though", "probability": 0.82666015625}, {"start": 3077.46, "end": 3077.84, "word": " it", "probability": 0.93408203125}, {"start": 3077.84, "end": 3078.36, "word": " has", "probability": 0.9365234375}], "temperature": 1.0}, {"id": 113, "seek": 310782, "start": 3079.28, "end": 3107.82, "text": " the same range. So range is not used in this case. Look at another example. We have data. All the data ranges, I mean, starts from 1 up to 5. So the range is 4. If we just replace the maximum, which is 5, by 120. So the range is completely different.", "tokens": [264, 912, 3613, 13, 407, 3613, 307, 406, 1143, 294, 341, 1389, 13, 2053, 412, 1071, 1365, 13, 492, 362, 1412, 13, 1057, 264, 1412, 22526, 11, 286, 914, 11, 3719, 490, 502, 493, 281, 1025, 13, 407, 264, 3613, 307, 1017, 13, 759, 321, 445, 7406, 264, 6674, 11, 597, 307, 1025, 11, 538, 10411, 13, 407, 264, 3613, 307, 2584, 819, 13], "avg_logprob": -0.15685096153846154, "compression_ratio": 1.4593023255813953, "no_speech_prob": 0.0, "words": [{"start": 3079.28, "end": 3079.54, "word": " the", "probability": 0.45751953125}, {"start": 3079.54, "end": 3079.94, "word": " same", "probability": 0.896484375}, {"start": 3079.94, "end": 3080.54, "word": " range.", "probability": 0.86669921875}, {"start": 3080.8, "end": 3080.86, "word": " So", "probability": 0.83935546875}, {"start": 3080.86, "end": 3082.24, "word": " range", "probability": 0.72265625}, {"start": 3082.24, "end": 3082.42, "word": " is", "probability": 0.9462890625}, {"start": 3082.42, "end": 3082.62, "word": " not", "probability": 0.951171875}, {"start": 3082.62, "end": 3082.92, "word": " used", "probability": 0.92236328125}, {"start": 3082.92, "end": 3083.1, "word": " in", "probability": 0.94677734375}, {"start": 3083.1, "end": 3083.36, "word": " this", "probability": 0.94775390625}, {"start": 3083.36, "end": 3083.7, "word": " case.", "probability": 0.91259765625}, {"start": 3084.4, "end": 3084.74, "word": " Look", "probability": 0.830078125}, {"start": 3084.74, "end": 3084.86, "word": " at", "probability": 0.96728515625}, {"start": 3084.86, "end": 3085.22, "word": " another", "probability": 0.7939453125}, {"start": 3085.22, "end": 3085.68, "word": " example.", "probability": 0.9697265625}, {"start": 3088.3, "end": 3088.98, "word": " We", "probability": 0.95556640625}, {"start": 3088.98, "end": 3089.14, "word": " have", "probability": 0.9443359375}, {"start": 3089.14, "end": 3089.48, "word": " data.", "probability": 0.943359375}, {"start": 3090.12, "end": 3090.32, "word": " All", "probability": 0.9755859375}, {"start": 3090.32, "end": 3090.44, "word": " the", "probability": 0.91015625}, {"start": 3090.44, "end": 3090.72, "word": " data", "probability": 0.94091796875}, {"start": 3090.72, "end": 3091.26, "word": " ranges,", "probability": 0.5947265625}, {"start": 3091.98, "end": 3092.38, "word": " I", "probability": 0.912109375}, {"start": 3092.38, "end": 3092.52, "word": " mean,", "probability": 0.9677734375}, {"start": 3092.58, "end": 3092.92, "word": " starts", "probability": 0.78759765625}, {"start": 3092.92, "end": 3093.16, "word": " from", "probability": 0.89501953125}, {"start": 3093.16, "end": 3093.4, "word": " 1", "probability": 0.43017578125}, {"start": 3093.4, "end": 3093.58, "word": " up", "probability": 0.96240234375}, {"start": 3093.58, "end": 3093.7, "word": " to", "probability": 0.9658203125}, {"start": 3093.7, "end": 3094.08, "word": " 5.", "probability": 0.98095703125}, {"start": 3094.64, "end": 3095.0, "word": " So", "probability": 0.9638671875}, {"start": 3095.0, "end": 3095.16, "word": " the", "probability": 0.89501953125}, {"start": 3095.16, "end": 3095.48, "word": " range", "probability": 0.892578125}, {"start": 3095.48, "end": 3095.9, "word": " is", "probability": 0.94482421875}, {"start": 3095.9, "end": 3096.24, "word": " 4.", "probability": 0.92724609375}, {"start": 3097.88, "end": 3098.12, "word": " If", "probability": 0.95654296875}, {"start": 3098.12, "end": 3098.3, "word": " we", "probability": 0.9599609375}, {"start": 3098.3, "end": 3098.68, "word": " just", "probability": 0.92236328125}, {"start": 3098.68, "end": 3099.28, "word": " replace", "probability": 0.84375}, {"start": 3099.28, "end": 3100.28, "word": " the", "probability": 0.90478515625}, {"start": 3100.28, "end": 3100.76, "word": " maximum,", "probability": 0.93798828125}, {"start": 3101.28, "end": 3101.58, "word": " which", "probability": 0.94580078125}, {"start": 3101.58, "end": 3101.74, "word": " is", "probability": 0.94873046875}, {"start": 3101.74, "end": 3102.14, "word": " 5,", "probability": 0.9775390625}, {"start": 3102.36, "end": 3103.0, "word": " by", "probability": 0.96826171875}, {"start": 3103.0, "end": 3103.52, "word": " 120.", "probability": 0.93701171875}, {"start": 3105.64, "end": 3106.0, "word": " So", "probability": 0.94873046875}, {"start": 3106.0, "end": 3106.2, "word": " the", "probability": 0.91064453125}, {"start": 3106.2, "end": 3106.52, "word": " range", "probability": 0.87646484375}, {"start": 3106.52, "end": 3106.82, "word": " is", "probability": 0.94873046875}, {"start": 3106.82, "end": 3107.46, "word": " completely", "probability": 0.74951171875}, {"start": 3107.46, "end": 3107.82, "word": " different.", "probability": 0.8759765625}], "temperature": 1.0}, {"id": 114, "seek": 313625, "start": 3108.39, "end": 3136.25, "text": " the range becomes 119. So that means range is sensitive to outliers. So we have to avoid using range in case of outliers or extreme values. I will stop at the most important one, the variance, for next time inshallah.", "tokens": [264, 3613, 3643, 2975, 24, 13, 407, 300, 1355, 3613, 307, 9477, 281, 484, 23646, 13, 407, 321, 362, 281, 5042, 1228, 3613, 294, 1389, 295, 484, 23646, 420, 8084, 4190, 13, 286, 486, 1590, 412, 264, 881, 1021, 472, 11, 264, 21977, 11, 337, 958, 565, 1028, 71, 13492, 13], "avg_logprob": -0.15730167925357819, "compression_ratio": 1.4630872483221478, "no_speech_prob": 0.0, "words": [{"start": 3108.39, "end": 3108.63, "word": " the", "probability": 0.457763671875}, {"start": 3108.63, "end": 3108.85, "word": " range", "probability": 0.80908203125}, {"start": 3108.85, "end": 3109.19, "word": " becomes", "probability": 0.73974609375}, {"start": 3109.19, "end": 3109.85, "word": " 119.", "probability": 0.890869140625}, {"start": 3110.37, "end": 3110.67, "word": " So", "probability": 0.91259765625}, {"start": 3110.67, "end": 3110.89, "word": " that", "probability": 0.8525390625}, {"start": 3110.89, "end": 3111.29, "word": " means", "probability": 0.9384765625}, {"start": 3111.29, "end": 3115.01, "word": " range", "probability": 0.71533203125}, {"start": 3115.01, "end": 3115.53, "word": " is", "probability": 0.947265625}, {"start": 3115.53, "end": 3116.05, "word": " sensitive", "probability": 0.9130859375}, {"start": 3116.05, "end": 3116.39, "word": " to", "probability": 0.96337890625}, {"start": 3116.39, "end": 3116.85, "word": " outliers.", "probability": 0.93603515625}, {"start": 3117.55, "end": 3118.27, "word": " So", "probability": 0.908203125}, {"start": 3118.27, "end": 3118.41, "word": " we", "probability": 0.9208984375}, {"start": 3118.41, "end": 3118.57, "word": " have", "probability": 0.95166015625}, {"start": 3118.57, "end": 3118.71, "word": " to", "probability": 0.974609375}, {"start": 3118.71, "end": 3119.23, "word": " avoid", "probability": 0.90185546875}, {"start": 3119.23, "end": 3120.87, "word": " using", "probability": 0.92919921875}, {"start": 3120.87, "end": 3121.83, "word": " range", "probability": 0.8623046875}, {"start": 3121.83, "end": 3122.37, "word": " in", "probability": 0.93115234375}, {"start": 3122.37, "end": 3122.81, "word": " case", "probability": 0.9150390625}, {"start": 3122.81, "end": 3123.45, "word": " of", "probability": 0.96875}, {"start": 3123.45, "end": 3124.21, "word": " outliers", "probability": 0.959228515625}, {"start": 3124.21, "end": 3124.51, "word": " or", "probability": 0.92724609375}, {"start": 3124.51, "end": 3125.63, "word": " extreme", "probability": 0.8740234375}, {"start": 3125.63, "end": 3126.03, "word": " values.", "probability": 0.939453125}, {"start": 3128.93, "end": 3129.47, "word": " I", "probability": 0.97998046875}, {"start": 3129.47, "end": 3129.59, "word": " will", "probability": 0.8798828125}, {"start": 3129.59, "end": 3130.01, "word": " stop", "probability": 0.92041015625}, {"start": 3130.01, "end": 3130.51, "word": " at", "probability": 0.962890625}, {"start": 3130.51, "end": 3132.89, "word": " the", "probability": 0.9111328125}, {"start": 3132.89, "end": 3133.17, "word": " most", "probability": 0.90478515625}, {"start": 3133.17, "end": 3133.63, "word": " important", "probability": 0.87890625}, {"start": 3133.63, "end": 3133.91, "word": " one,", "probability": 0.90087890625}, {"start": 3134.27, "end": 3134.41, "word": " the", "probability": 0.89111328125}, {"start": 3134.41, "end": 3134.95, "word": " variance,", "probability": 0.89013671875}, {"start": 3135.17, "end": 3135.29, "word": " for", "probability": 0.953125}, {"start": 3135.29, "end": 3135.57, "word": " next", "probability": 0.89111328125}, {"start": 3135.57, "end": 3135.95, "word": " time", "probability": 0.85009765625}, {"start": 3135.95, "end": 3136.25, "word": " inshallah.", "probability": 0.5673828125}], "temperature": 1.0}, {"id": 115, "seek": 316509, "start": 3137.75, "end": 3165.09, "text": " Up to this point, any questions? Okay, stop at this point if you have any question. So later we'll discuss measures of variation and variance, standard deviation up to the end of this chapter.", "tokens": [5858, 281, 341, 935, 11, 604, 1651, 30, 1033, 11, 1590, 412, 341, 935, 498, 291, 362, 604, 1168, 13, 407, 1780, 321, 603, 2248, 8000, 295, 12990, 293, 21977, 11, 3832, 25163, 493, 281, 264, 917, 295, 341, 7187, 13], "avg_logprob": -0.19401041347356068, "compression_ratio": 1.4511278195488722, "no_speech_prob": 0.0, "words": [{"start": 3137.75, "end": 3138.09, "word": " Up", "probability": 0.76708984375}, {"start": 3138.09, "end": 3138.17, "word": " to", "probability": 0.97119140625}, {"start": 3138.17, "end": 3138.35, "word": " this", "probability": 0.953125}, {"start": 3138.35, "end": 3138.61, "word": " point,", "probability": 0.96826171875}, {"start": 3138.67, "end": 3138.83, "word": " any", "probability": 0.91748046875}, {"start": 3138.83, "end": 3139.31, "word": " questions?", "probability": 0.94580078125}, {"start": 3142.33, "end": 3142.65, "word": " Okay,", "probability": 0.65234375}, {"start": 3143.27, "end": 3143.73, "word": " stop", "probability": 0.66162109375}, {"start": 3143.73, "end": 3143.95, "word": " at", "probability": 0.970703125}, {"start": 3143.95, "end": 3144.19, "word": " this", "probability": 0.94873046875}, {"start": 3144.19, "end": 3144.55, "word": " point", "probability": 0.96875}, {"start": 3144.55, "end": 3149.73, "word": " if", "probability": 0.52978515625}, {"start": 3149.73, "end": 3149.85, "word": " you", "probability": 0.9677734375}, {"start": 3149.85, "end": 3149.97, "word": " have", "probability": 0.9521484375}, {"start": 3149.97, "end": 3150.11, "word": " any", "probability": 0.89453125}, {"start": 3150.11, "end": 3150.51, "word": " question.", "probability": 0.7001953125}, {"start": 3155.43, "end": 3155.69, "word": " So", "probability": 0.92431640625}, {"start": 3155.69, "end": 3155.91, "word": " later", "probability": 0.73388671875}, {"start": 3155.91, "end": 3156.21, "word": " we'll", "probability": 0.6015625}, {"start": 3156.21, "end": 3156.77, "word": " discuss", "probability": 0.892578125}, {"start": 3156.77, "end": 3158.55, "word": " measures", "probability": 0.80712890625}, {"start": 3158.55, "end": 3158.89, "word": " of", "probability": 0.9482421875}, {"start": 3158.89, "end": 3159.27, "word": " variation", "probability": 0.8564453125}, {"start": 3159.27, "end": 3159.43, "word": " and", "probability": 0.457763671875}, {"start": 3159.43, "end": 3159.95, "word": " variance,", "probability": 0.88916015625}, {"start": 3160.55, "end": 3160.89, "word": " standard", "probability": 0.9267578125}, {"start": 3160.89, "end": 3161.33, "word": " deviation", "probability": 0.9169921875}, {"start": 3161.33, "end": 3163.33, "word": " up", "probability": 0.50927734375}, {"start": 3163.33, "end": 3163.49, "word": " to", "probability": 0.9658203125}, {"start": 3163.49, "end": 3163.61, "word": " the", "probability": 0.91796875}, {"start": 3163.61, "end": 3163.91, "word": " end", "probability": 0.8876953125}, {"start": 3163.91, "end": 3164.51, "word": " of", "probability": 0.9638671875}, {"start": 3164.51, "end": 3164.81, "word": " this", "probability": 0.93896484375}, {"start": 3164.81, "end": 3165.09, "word": " chapter.", "probability": 0.8701171875}], "temperature": 1.0}, {"id": 116, "seek": 319013, "start": 3174.63, "end": 3190.13, "text": " So again, the range is sensitive to outliers. So we have to avoid using range in this case. And later we'll talk about the variance, which is the most common measures of variation for next time, inshallah.", "tokens": [407, 797, 11, 264, 3613, 307, 9477, 281, 484, 23646, 13, 407, 321, 362, 281, 5042, 1228, 3613, 294, 341, 1389, 13, 400, 1780, 321, 603, 751, 466, 264, 21977, 11, 597, 307, 264, 881, 2689, 8000, 295, 12990, 337, 958, 565, 11, 1028, 71, 13492, 13], "avg_logprob": -0.23942057446887097, "compression_ratio": 1.4305555555555556, "no_speech_prob": 4.172325134277344e-07, "words": [{"start": 3174.63, "end": 3175.27, "word": " So", "probability": 0.2132568359375}, {"start": 3175.27, "end": 3175.91, "word": " again,", "probability": 0.72021484375}, {"start": 3176.47, "end": 3177.45, "word": " the", "probability": 0.873046875}, {"start": 3177.45, "end": 3177.81, "word": " range", "probability": 0.90234375}, {"start": 3177.81, "end": 3178.35, "word": " is", "probability": 0.935546875}, {"start": 3178.35, "end": 3178.73, "word": " sensitive", "probability": 0.888671875}, {"start": 3178.73, "end": 3178.95, "word": " to", "probability": 0.966796875}, {"start": 3178.95, "end": 3179.43, "word": " outliers.", "probability": 0.91015625}, {"start": 3180.39, "end": 3180.69, "word": " So", "probability": 0.908203125}, {"start": 3180.69, "end": 3180.81, "word": " we", "probability": 0.80908203125}, {"start": 3180.81, "end": 3180.97, "word": " have", "probability": 0.94921875}, {"start": 3180.97, "end": 3181.11, "word": " to", "probability": 0.97412109375}, {"start": 3181.11, "end": 3181.43, "word": " avoid", "probability": 0.892578125}, {"start": 3181.43, "end": 3181.75, "word": " using", "probability": 0.9326171875}, {"start": 3181.75, "end": 3182.19, "word": " range", "probability": 0.8720703125}, {"start": 3182.19, "end": 3182.41, "word": " in", "probability": 0.94091796875}, {"start": 3182.41, "end": 3182.61, "word": " this", "probability": 0.95068359375}, {"start": 3182.61, "end": 3182.97, "word": " case.", "probability": 0.91845703125}, {"start": 3183.57, "end": 3183.85, "word": " And", "probability": 0.87451171875}, {"start": 3183.85, "end": 3184.11, "word": " later", "probability": 0.9482421875}, {"start": 3184.11, "end": 3184.33, "word": " we'll", "probability": 0.619873046875}, {"start": 3184.33, "end": 3184.61, "word": " talk", "probability": 0.8935546875}, {"start": 3184.61, "end": 3185.07, "word": " about", "probability": 0.90576171875}, {"start": 3185.07, "end": 3185.49, "word": " the", "probability": 0.9072265625}, {"start": 3185.49, "end": 3185.83, "word": " variance,", "probability": 0.85986328125}, {"start": 3185.95, "end": 3186.01, "word": " which", "probability": 0.94189453125}, {"start": 3186.01, "end": 3186.13, "word": " is", "probability": 0.9443359375}, {"start": 3186.13, "end": 3186.27, "word": " the", "probability": 0.916015625}, {"start": 3186.27, "end": 3186.51, "word": " most", "probability": 0.90966796875}, {"start": 3186.51, "end": 3186.93, "word": " common", "probability": 0.875}, {"start": 3186.93, "end": 3187.99, "word": " measures", "probability": 0.67138671875}, {"start": 3187.99, "end": 3188.41, "word": " of", "probability": 0.955078125}, {"start": 3188.41, "end": 3188.97, "word": " variation", "probability": 0.787109375}, {"start": 3188.97, "end": 3189.27, "word": " for", "probability": 0.91796875}, {"start": 3189.27, "end": 3189.49, "word": " next", "probability": 0.9111328125}, {"start": 3189.49, "end": 3189.75, "word": " time,", "probability": 0.8642578125}, {"start": 3189.83, "end": 3190.13, "word": " inshallah.", "probability": 0.6304524739583334}], "temperature": 1.0}], "language": "en", "language_probability": 1.0, "duration": 3207.024, "duration_after_vad": 3055.7762499999853} \ No newline at end of file diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/4oMFiRBOjhY_raw.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/4oMFiRBOjhY_raw.srt new file mode 100644 index 0000000000000000000000000000000000000000..cfa181a410f70840cbe65d8f97d9f4a235f6489e --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/4oMFiRBOjhY_raw.srt @@ -0,0 +1,2904 @@ +1 +00:00:17,750 --> 00:00:21,350 +So let's again go back to chapter number one. Last + +2 +00:00:21,350 --> 00:00:25,730 +time we discussed chapter one, production and data + +3 +00:00:25,730 --> 00:00:32,390 +collection. And I think we described why learning + +4 +00:00:32,390 --> 00:00:36,510 +statistics distinguish between some of these + +5 +00:00:36,510 --> 00:00:43,810 +topics. And also we explained in details types of + +6 +00:00:43,810 --> 00:00:47,010 +statistics and we mentioned that statistics mainly + +7 +00:00:47,010 --> 00:00:52,430 +has two types either descriptive statistics which + +8 +00:00:52,430 --> 00:00:56,810 +means collecting summarizing and obtaining data + +9 +00:00:56,810 --> 00:00:59,910 +and other type of statistics is called inferential + +10 +00:00:59,910 --> 00:01:04,430 +statistics or statistical inference and this type + +11 +00:01:04,430 --> 00:01:11,070 +of statistics we can draw drawing conclusions and + +12 +00:01:11,070 --> 00:01:14,090 +making decision concerning a population based only + +13 +00:01:14,090 --> 00:01:17,510 +on a sample. That means we have a sample and + +14 +00:01:17,510 --> 00:01:20,970 +sample is just a subset of the population or the + +15 +00:01:20,970 --> 00:01:26,230 +portion of the population and we use the data from + +16 +00:01:26,230 --> 00:01:29,130 +that sample to make some conclusion about the + +17 +00:01:29,130 --> 00:01:32,390 +entire population. This type of statistic is + +18 +00:01:32,390 --> 00:01:34,710 +called inferential statistics. Later, Inshallah, + +19 +00:01:34,750 --> 00:01:37,710 +we'll talk in details about inferential statistics + +20 +00:01:37,710 --> 00:01:45,290 +that will start in Chapter 7. Also, we gave some + +21 +00:01:45,290 --> 00:01:50,630 +definitions for variables, data, and we + +22 +00:01:50,630 --> 00:01:53,510 +distinguished between population and sample. And + +23 +00:01:53,510 --> 00:01:56,630 +we know that the population consists of all items + +24 +00:01:56,630 --> 00:02:00,270 +or individuals about which you want to draw a + +25 +00:02:00,270 --> 00:02:05,770 +conclusion. But in some cases, it's very hard to + +26 +00:02:05,770 --> 00:02:07,750 +talk about the population or the entire + +27 +00:02:07,750 --> 00:02:13,340 +population, so we can select a sample. A sample is + +28 +00:02:13,340 --> 00:02:18,480 +just a portion or subset of the entire population. + +29 +00:02:19,060 --> 00:02:21,860 +So we know now the definition of population and + +30 +00:02:21,860 --> 00:02:25,360 +sample. The other two types, parameter and + +31 +00:02:25,360 --> 00:02:28,860 +statistics. Parameter is a numerical measure that + +32 +00:02:28,860 --> 00:02:32,300 +describes characteristics of a population, while + +33 +00:02:32,300 --> 00:02:36,000 +on the other hand, a sample, a statistic is just + +34 +00:02:36,430 --> 00:02:39,730 +numerical measures that describe characteristic of + +35 +00:02:39,730 --> 00:02:44,930 +a sample. So parameter is computed from the + +36 +00:02:44,930 --> 00:02:48,930 +population while statistic is computed from the + +37 +00:02:48,930 --> 00:02:54,030 +sample. I think we stopped at this point. Why + +38 +00:02:54,030 --> 00:02:56,770 +collect data? I mean what are the reasons for + +39 +00:02:59,580 --> 00:03:01,980 +One of these reasons, for example, a marketing + +40 +00:03:01,980 --> 00:03:04,660 +research analyst needs to assess the effectiveness + +41 +00:03:04,660 --> 00:03:07,700 +of a new television advertisement. For example, + +42 +00:03:07,840 --> 00:03:13,380 +suppose you are a manager and you want to increase + +43 +00:03:13,380 --> 00:03:18,060 +your salaries or your sales. Now, sales may be + +44 +00:03:18,060 --> 00:03:23,380 +affected by advertising. So I mean, if you spend + +45 +00:03:23,380 --> 00:03:26,320 +more on advertising, it means your sales becomes + +46 +00:03:26,320 --> 00:03:29,740 +larger and larger. So you want to know if this + +47 +00:03:29,740 --> 00:03:34,160 +variable, I mean if advertisement is an effective + +48 +00:03:34,160 --> 00:03:38,900 +variable that maybe increase your sales. So that's + +49 +00:03:38,900 --> 00:03:43,900 +one of the reasons why we use data. The other one, + +50 +00:03:44,120 --> 00:03:46,880 +for example, pharmaceutical manufacturers needs to + +51 +00:03:46,880 --> 00:03:49,800 +determine whether a new drug is more effective + +52 +00:03:49,800 --> 00:03:53,240 +than those currently used. For example, for a + +53 +00:03:53,240 --> 00:03:59,330 +headache, we use drug A. Now, a new drug is + +54 +00:03:59,330 --> 00:04:04,510 +produced and you want to see if this new drug is + +55 +00:04:04,510 --> 00:04:10,090 +more effective than drug A that I mean if headache + +56 +00:04:10,090 --> 00:04:13,410 +suppose for example is removed after three days by + +57 +00:04:13,410 --> 00:04:20,490 +using drug A now the question is does B is more + +58 +00:04:20,490 --> 00:04:23,410 +effective it means it reduces your headache in + +59 +00:04:23,410 --> 00:04:26,070 +fewer than three days I mean maybe in two days + +60 +00:04:26,510 --> 00:04:30,810 +That means a drug B is more effective than a drug + +61 +00:04:30,810 --> 00:04:34,510 +A. So we want to know the difference between these + +62 +00:04:34,510 --> 00:04:37,250 +two drugs. I mean, we have two samples. Some + +63 +00:04:37,250 --> 00:04:40,810 +people used drug A and the other used drug B. And + +64 +00:04:40,810 --> 00:04:43,190 +we want to see if there is a significant + +65 +00:04:43,190 --> 00:04:47,690 +difference between the times that is used to + +66 +00:04:47,690 --> 00:04:51,150 +reduce the headache. So that's one of the reasons + +67 +00:04:51,150 --> 00:04:55,260 +why we use statistics. Sometimes an operation + +68 +00:04:55,260 --> 00:04:58,500 +manager wants to monitor manufacturing process to + +69 +00:04:58,500 --> 00:05:00,720 +find out whether the quality of a product being + +70 +00:05:00,720 --> 00:05:02,840 +manufactured is conforming to a company's + +71 +00:05:02,840 --> 00:05:05,700 +standards. Do you know what the meaning of + +72 +00:05:05,700 --> 00:05:06,520 +company's standards? + +73 +00:05:09,900 --> 00:05:15,320 +The regulations of the firm itself. Another + +74 +00:05:15,320 --> 00:05:21,020 +example, suppose here in the school last year, we + +75 +00:05:21,020 --> 00:05:25,150 +teach statistics by using method A. traditional + +76 +00:05:25,150 --> 00:05:29,350 +method. This year we developed a new method for + +77 +00:05:29,350 --> 00:05:32,370 +teaching and our goal is to see if the new method + +78 +00:05:32,370 --> 00:05:36,510 +is better than method A which was used in last + +79 +00:05:36,510 --> 00:05:38,910 +year. So we want to see if there is a big + +80 +00:05:38,910 --> 00:05:42,410 +difference between scores or the average scores + +81 +00:05:42,410 --> 00:05:47,310 +last year and this year. The same you can do for + +82 +00:05:47,310 --> 00:05:52,350 +your weight. Suppose there are 20 students in this + +83 +00:05:52,350 --> 00:05:56,960 +class and their weights are high. And our goal is + +84 +00:05:56,960 --> 00:06:04,280 +to reduce their weights. Suppose they + +85 +00:06:04,280 --> 00:06:09,640 +have a regime or diet for three months or + +86 +00:06:09,640 --> 00:06:12,140 +exercise, whatever it is, then after three months, + +87 +00:06:12,220 --> 00:06:17,060 +we have new weights for these persons. And we want + +88 +00:06:17,060 --> 00:06:19,840 +to see if the diet is effective. I mean, if the + +89 +00:06:19,840 --> 00:06:24,120 +average weight was greater than or smaller than + +90 +00:06:24,120 --> 00:06:28,600 +before diet. Is it clear? So there are many, many + +91 +00:06:28,600 --> 00:06:31,920 +reasons behind using statistics and collecting + +92 +00:06:31,920 --> 00:06:37,500 +data. Now, what are the sources of data? Since + +93 +00:06:37,500 --> 00:06:41,840 +statistics mainly, first step, we have to collect + +94 +00:06:41,840 --> 00:06:44,120 +data. Now, what are the sources of the data? + +95 +00:06:45,360 --> 00:06:48,420 +Generally speaking, there are two sources. One is + +96 +00:06:48,420 --> 00:06:52,430 +called The primary sources and the others + +97 +00:06:52,430 --> 00:06:55,770 +secondary sources. What do you think is the + +98 +00:06:55,770 --> 00:06:57,830 +difference between these two? I mean, what's the + +99 +00:06:57,830 --> 00:07:02,730 +difference between primary and secondary sources? + +100 +00:07:03,510 --> 00:07:07,250 +The primary source is the collector of the data. + +101 +00:07:07,670 --> 00:07:11,550 +He is the analyzer. He analyzes it. And then the + +102 +00:07:11,550 --> 00:07:14,230 +secondary, who collects the data, isn't there. + +103 +00:07:16,030 --> 00:07:18,910 +That's correct. So the primary sources means the + +104 +00:07:18,910 --> 00:07:22,490 +researcher by himself. He should collect the data, + +105 +00:07:23,890 --> 00:07:27,750 +then he can use the data to do his analysis. + +106 +00:07:28,310 --> 00:07:31,550 +That's for the primary. Now, the primary could be + +107 +00:07:31,550 --> 00:07:35,230 +data from political survey. You can distribute + +108 +00:07:35,230 --> 00:07:38,750 +questionnaire, for example, data collected from an + +109 +00:07:38,750 --> 00:07:42,530 +experiment. I mean maybe control or experimental + +110 +00:07:42,530 --> 00:07:45,730 +groups. We have two groups, maybe healthy people + +111 +00:07:45,730 --> 00:07:48,490 +and patient people. So that's experimental group. + +112 +00:07:49,010 --> 00:07:53,390 +Or observed data. That's the primary sources. + +113 +00:07:53,870 --> 00:07:56,450 +Secondary sources, the person performing data + +114 +00:07:56,450 --> 00:08:00,310 +analysis is not the data collector. So he obtained + +115 +00:08:00,310 --> 00:08:03,880 +the data from other sources. For example, it could + +116 +00:08:03,880 --> 00:08:07,140 +be analyzing census data or for example, examining + +117 +00:08:07,140 --> 00:08:10,160 +data from print journals or data published on the + +118 +00:08:10,160 --> 00:08:14,780 +internet. So maybe he goes to the Ministry of + +119 +00:08:14,780 --> 00:08:18,820 +Education and he can get some data. So the data is + +120 +00:08:18,820 --> 00:08:22,520 +already there and he just used the data to do some + +121 +00:08:22,520 --> 00:08:25,540 +analysis. So that's the difference between a + +122 +00:08:25,540 --> 00:08:29,420 +primary and secondary sources. So primary, the + +123 +00:08:29,420 --> 00:08:33,650 +researcher himself, should collect the data by + +124 +00:08:33,650 --> 00:08:35,590 +using one of the tools, either survey, + +125 +00:08:36,110 --> 00:08:39,050 +questionnaire, experiment, and so on. But + +126 +00:08:39,050 --> 00:08:41,450 +secondary, you can use the data that is published + +127 +00:08:41,450 --> 00:08:44,510 +in the internet, for example, in the books, in + +128 +00:08:44,510 --> 00:08:48,250 +governments and NGOs and so on. So these are the + +129 +00:08:48,250 --> 00:08:53,590 +two sources of data. Sources of data fall into + +130 +00:08:53,590 --> 00:08:57,610 +four categories. Number one, data distributed by + +131 +00:08:57,610 --> 00:09:01,190 +an organization or an individual. So that's + +132 +00:09:01,190 --> 00:09:06,170 +secondary source. A design experiment is primary + +133 +00:09:06,170 --> 00:09:10,350 +because you have to design the experiment, a + +134 +00:09:10,350 --> 00:09:14,610 +survey. It's also primary. An observational study + +135 +00:09:14,610 --> 00:09:17,590 +is also a primary source. So you have to + +136 +00:09:17,590 --> 00:09:21,410 +distinguish between a primary and secondary + +137 +00:09:21,410 --> 00:09:28,810 +sources. Any question? Comments? Next. + +138 +00:09:31,460 --> 00:09:34,540 +We'll talk a little bit about types of variables. + +139 +00:09:35,360 --> 00:09:37,580 +In general, there are two types of variables. One + +140 +00:09:37,580 --> 00:09:40,760 +is called categorical variables or qualitative + +141 +00:09:40,760 --> 00:09:43,160 +variables, and the other one is called numerical + +142 +00:09:43,160 --> 00:09:46,520 +or quantitative variables. Now, for example, if I + +143 +00:09:46,520 --> 00:09:50,560 +ask you, what's + +144 +00:09:50,560 --> 00:09:55,100 +your favorite color? You may say white, black, + +145 +00:09:55,220 --> 00:09:59,390 +red, and so on. What's your marital status? Maybe + +146 +00:09:59,390 --> 00:10:02,670 +married or unmarried, and so on. Gender, male, + +147 +00:10:02,850 --> 00:10:07,050 +either male or female, and so on. So this type of + +148 +00:10:07,050 --> 00:10:10,090 +variable is called qualitative variables. So + +149 +00:10:10,090 --> 00:10:13,130 +qualitative variables have values that can only be + +150 +00:10:13,130 --> 00:10:16,370 +placed into categories, such as, for example, yes + +151 +00:10:16,370 --> 00:10:21,350 +or no. For example, do you like orange? + +152 +00:10:22,270 --> 00:10:26,200 +The answer is either yes or no. Do you like + +153 +00:10:26,200 --> 00:10:30,040 +candidate A, for example, whatever his party is? + +154 +00:10:30,260 --> 00:10:34,620 +Do you like it? Either yes or no, and so on. As I + +155 +00:10:34,620 --> 00:10:37,480 +mentioned before, gender, marital status, race, + +156 +00:10:37,640 --> 00:10:41,820 +religions, these are examples of qualitative or + +157 +00:10:41,820 --> 00:10:46,240 +categorical variables. The other type of variable + +158 +00:10:46,240 --> 00:10:49,480 +which is commonly used is called numerical or + +159 +00:10:49,480 --> 00:10:53,230 +quantitative data. Quantitative variables have + +160 +00:10:53,230 --> 00:10:56,350 +values that represent quantities. For example, if + +161 +00:10:56,350 --> 00:11:00,470 +I ask you, what's your age? My age is 20 years old + +162 +00:11:00,470 --> 00:11:04,770 +or 18 years old. What's your weight? Income. + +163 +00:11:07,510 --> 00:11:12,550 +Height? Temperature? Income. So it's a number. + +164 +00:11:13,610 --> 00:11:18,030 +Weight, maybe my weight is 70 kilograms. So + +165 +00:11:18,030 --> 00:11:22,450 +weight, age, height, salary, income, number of + +166 +00:11:22,450 --> 00:11:26,510 +students, number of phone calls you received on + +167 +00:11:26,510 --> 00:11:28,770 +your cell phone during one hour, number of + +168 +00:11:28,770 --> 00:11:33,210 +accidents happened in street and so on. So that's + +169 +00:11:33,210 --> 00:11:36,470 +the difference between numerical variables and + +170 +00:11:36,470 --> 00:11:37,710 +qualitative variables. + +171 +00:11:40,270 --> 00:11:42,650 +Anyone of you just give me one example of + +172 +00:11:42,650 --> 00:11:47,780 +qualitative and quantitative variables. Another + +173 +00:11:47,780 --> 00:11:51,700 +examples. Just give me one example for qualitative + +174 +00:11:51,700 --> 00:11:52,160 +data. + +175 +00:11:56,900 --> 00:11:58,780 +Qualitative or quantitative. + +176 +00:12:01,380 --> 00:12:04,880 +Political party, either party A or party B. So + +177 +00:12:04,880 --> 00:12:08,320 +suppose there are two parties, so I like party A, + +178 +00:12:08,720 --> 00:12:12,320 +she likes party B and so on. So party in this case + +179 +00:12:12,320 --> 00:12:15,060 +is qualitative variable, another one. + +180 +00:12:25,400 --> 00:12:28,820 +So types of courses, maybe business, economics, + +181 +00:12:29,480 --> 00:12:33,260 +administration, and so on. So types of courses. + +182 +00:12:34,260 --> 00:12:36,600 +Another example for quantitative variable or + +183 +00:12:36,600 --> 00:12:37,600 +numerical variables. + +184 +00:12:45,020 --> 00:12:51,440 +So production is a numerical variable. Another + +185 +00:12:51,440 --> 00:12:52,840 +example for quantitative. + +186 +00:12:56,350 --> 00:12:58,970 +Is that produced by this company? Number of cell + +187 +00:12:58,970 --> 00:13:03,950 +phones, maybe 20, 17, and so on. Any question? + +188 +00:13:06,190 --> 00:13:12,410 +Next. So generally speaking, types of data, data + +189 +00:13:12,410 --> 00:13:17,960 +has two types, categorical and numerical data. As + +190 +00:13:17,960 --> 00:13:21,240 +we mentioned, marital status, political party, eye + +191 +00:13:21,240 --> 00:13:25,120 +color, and so on. These are examples of + +192 +00:13:25,120 --> 00:13:28,180 +categorical variables. On the other hand, a + +193 +00:13:28,180 --> 00:13:30,720 +numerical variable can be split or divided into + +194 +00:13:30,720 --> 00:13:34,020 +two parts. One is called discrete and the other is + +195 +00:13:34,020 --> 00:13:35,680 +continuous, and we have to distinguish between + +196 +00:13:35,680 --> 00:13:40,240 +these two. For example, Number of students in this + +197 +00:13:40,240 --> 00:13:43,400 +class, you can say there are 60 or 50 students in + +198 +00:13:43,400 --> 00:13:47,260 +this class. You cannot say there are 50.5 + +199 +00:13:47,260 --> 00:13:52,160 +students. So number of students is discrete + +200 +00:13:52,160 --> 00:13:57,040 +because it takes only integers. While for + +201 +00:13:57,040 --> 00:13:59,800 +continuous type of numerical variables, you can + +202 +00:13:59,800 --> 00:14:06,060 +say that my weight is 80.5 kilograms. so it makes + +203 +00:14:06,060 --> 00:14:09,260 +sense that your weight is not exactly 80 kilograms + +204 +00:14:09,260 --> 00:14:15,580 +it might be 80.6 or 80.5 and so on so discrete + +205 +00:14:15,580 --> 00:14:20,420 +takes only integers while continuous takes any + +206 +00:14:20,420 --> 00:14:24,140 +value I mean any real number so that's the + +207 +00:14:24,140 --> 00:14:28,980 +difference between discrete and continuous number + +208 +00:14:28,980 --> 00:14:33,930 +of phone or number of calls you have received this + +209 +00:14:33,930 --> 00:14:37,670 +morning, maybe one, zero, nine, and so on, + +210 +00:14:38,390 --> 00:14:41,950 +discrete. Number of patients in the hospital, + +211 +00:14:42,350 --> 00:14:46,590 +discrete, and so on. But when we are talking about + +212 +00:14:46,590 --> 00:14:50,850 +income, maybe my income is 1,000.5 shekel. It + +213 +00:14:50,850 --> 00:14:55,790 +could be. It's continuous because my income can be + +214 +00:14:55,790 --> 00:14:59,210 +any number between, for example, 1,000 and 10,000. + +215 +00:15:00,470 --> 00:15:03,550 +It takes any value in this interval from 1,000 to + +216 +00:15:03,550 --> 00:15:08,410 +10,000. So it types of continuous rather than our + +217 +00:15:08,410 --> 00:15:12,450 +continuous variable. So that's the two types of + +218 +00:15:12,450 --> 00:15:15,590 +data, categorical and numerical. And numerical + +219 +00:15:15,590 --> 00:15:18,510 +also has two types, either discrete or continuous. + +220 +00:15:19,750 --> 00:15:24,430 +Later in Chapter 6, we'll talk more details about + +221 +00:15:24,430 --> 00:15:28,940 +one of the most distribution statistics, for + +222 +00:15:28,940 --> 00:15:31,560 +continuous, one which is called normal + +223 +00:15:31,560 --> 00:15:36,240 +distribution. That will be later, inshallah. As we + +224 +00:15:36,240 --> 00:15:39,000 +mentioned last time, at the end of each chapter, + +225 +00:15:39,280 --> 00:15:41,640 +there is a section or sections, sometimes there + +226 +00:15:41,640 --> 00:15:45,000 +are two sections, talks about computer programs. + +227 +00:15:45,120 --> 00:15:49,200 +How can we use computer programs in order to + +228 +00:15:49,200 --> 00:15:52,300 +analyze the data? And as we mentioned last time, + +229 +00:15:52,420 --> 00:15:54,600 +you should take a course on that. It's called + +230 +00:15:54,600 --> 00:15:57,960 +Computer and Data Analysis or SPSS course. So we + +231 +00:15:57,960 --> 00:16:03,860 +are going to skip the computer programs used for + +232 +00:16:03,860 --> 00:16:08,720 +any chapters in this book. And that's the end of + +233 +00:16:08,720 --> 00:16:13,060 +chapter number three. Any questions? + +234 +00:16:18,380 --> 00:16:22,550 +Let's move. quickly on chapter three. + +235 +00:16:31,290 --> 00:16:35,530 +Chapter three maybe is the easiest chapter in this + +236 +00:16:35,530 --> 00:16:39,950 +book. It's straightforward. We have some formulas + +237 +00:16:39,950 --> 00:16:45,280 +to compute some statistical measures. And we + +238 +00:16:45,280 --> 00:16:47,400 +should know how can we calculate these measures + +239 +00:16:47,400 --> 00:16:52,620 +and what are the meaning of your results. So + +240 +00:16:52,620 --> 00:16:56,140 +chapter three talks about numerical descriptive + +241 +00:16:56,140 --> 00:17:03,220 +measures. In this chapter, you will learn, number + +242 +00:17:03,220 --> 00:17:06,480 +one, describe the probabilities of central + +243 +00:17:06,480 --> 00:17:11,880 +tendency, variation, and shape in numerical data. + +244 +00:17:12,730 --> 00:17:16,250 +In this lecture, we'll talk in more details about + +245 +00:17:16,250 --> 00:17:21,370 +some of the center tendency measures. Later, we'll + +246 +00:17:21,370 --> 00:17:26,490 +talk about the variation, or spread, or + +247 +00:17:26,490 --> 00:17:29,690 +dispersion, and the shape in numerical data. So + +248 +00:17:29,690 --> 00:17:31,630 +that's part number one. We have to know something + +249 +00:17:31,630 --> 00:17:36,390 +about center tendency, variation, and the shape of + +250 +00:17:36,390 --> 00:17:42,020 +the data we have. to calculate descriptive summary + +251 +00:17:42,020 --> 00:17:45,360 +measures for a population. So we have to calculate + +252 +00:17:45,360 --> 00:17:48,460 +these measures for the sample. And if we have the + +253 +00:17:48,460 --> 00:17:51,420 +entire population, we can compute these measures + +254 +00:17:51,420 --> 00:17:58,440 +also for that population. Then I will introduce in + +255 +00:17:58,440 --> 00:18:01,920 +more details about something called Paxiplot. How + +256 +00:18:01,920 --> 00:18:06,660 +can we construct and interpret a Paxiplot? That's, + +257 +00:18:06,960 --> 00:18:11,040 +inshallah, next time on Tuesday. Finally, we'll + +258 +00:18:11,040 --> 00:18:13,020 +see how can we calculate the covariance and + +259 +00:18:13,020 --> 00:18:15,700 +coefficient of variation and coefficient, I'm + +260 +00:18:15,700 --> 00:18:18,480 +sorry, coefficient of correlation. This topic + +261 +00:18:18,480 --> 00:18:24,280 +we'll introduce in more details in chapter 11 + +262 +00:18:24,280 --> 00:18:31,240 +later on. So just I will give some brief + +263 +00:18:31,240 --> 00:18:35,630 +notation about coefficient of correlation, how can + +264 +00:18:35,630 --> 00:18:39,030 +we compute the correlation coefficient? What's the + +265 +00:18:39,030 --> 00:18:41,870 +meaning of your result? And later in chapter 11, + +266 +00:18:42,030 --> 00:18:44,510 +we'll talk in more details about correlation and + +267 +00:18:44,510 --> 00:18:48,930 +regression. So these are the objectives of this + +268 +00:18:48,930 --> 00:18:52,870 +chapter. There are some basic definitions before + +269 +00:18:52,870 --> 00:18:57,410 +we start. One is called central tendency. What do + +270 +00:18:57,410 --> 00:19:00,750 +you mean by central tendency? Central tendency is + +271 +00:19:00,750 --> 00:19:04,990 +the extent to which all data value group around a + +272 +00:19:04,990 --> 00:19:08,890 +typical or numerical or central value. So we are + +273 +00:19:08,890 --> 00:19:12,510 +looking for a point that in the center, I mean, + +274 +00:19:12,810 --> 00:19:18,870 +the data points are gathered or collected around a + +275 +00:19:18,870 --> 00:19:21,670 +middle point, and that middle point is called the + +276 +00:19:21,670 --> 00:19:24,450 +central tendency. And the question is, how can we + +277 +00:19:24,450 --> 00:19:27,780 +measure that value? We'll talk in details about + +278 +00:19:27,780 --> 00:19:32,620 +mean, median, and mode in a few minutes. So the + +279 +00:19:32,620 --> 00:19:35,660 +central tendency, in this case, the data values + +280 +00:19:35,660 --> 00:19:40,080 +grouped around a typical or central value. Is it + +281 +00:19:40,080 --> 00:19:44,380 +clear? So we have data set, large data set. Then + +282 +00:19:44,380 --> 00:19:47,860 +these points are gathered or grouped around a + +283 +00:19:47,860 --> 00:19:51,440 +middle point, and this point is called central + +284 +00:19:51,440 --> 00:19:56,120 +tendency, and it can be measured by using mean, + +285 +00:19:56,420 --> 00:19:59,960 +which is the most common one, median and the moon. + +286 +00:20:01,020 --> 00:20:04,480 +Next is the variation, which is the amount of + +287 +00:20:04,480 --> 00:20:09,420 +dispersion. Variation is the amount of dispersion + +288 +00:20:09,420 --> 00:20:13,900 +or scattering of values. And we'll use, for + +289 +00:20:13,900 --> 00:20:18,400 +example, range, variance or standard deviation in + +290 +00:20:18,400 --> 00:20:22,960 +order to compute the variation. Finally, We have + +291 +00:20:22,960 --> 00:20:26,300 +data, and my question is, what's the shape of the + +292 +00:20:26,300 --> 00:20:29,920 +data? So the shape is the pattern of distribution + +293 +00:20:29,920 --> 00:20:35,220 +of values from the lowest value to the highest. So + +294 +00:20:35,220 --> 00:20:39,400 +that's the three definitions we need to know + +295 +00:20:39,400 --> 00:20:44,580 +before we start. So we'll start with the easiest + +296 +00:20:44,580 --> 00:20:48,680 +one, measures of central tendency. As I mentioned, + +297 +00:20:49,160 --> 00:20:55,110 +there are three measures. median and moon. And our + +298 +00:20:55,110 --> 00:20:58,270 +goal or we have two goals actually. We have to + +299 +00:20:58,270 --> 00:21:02,290 +know how to compute these measures. Number two, + +300 +00:21:03,270 --> 00:21:06,390 +which one is better? The mean or the median or the + +301 +00:21:06,390 --> 00:21:06,550 +moon? + +302 +00:21:11,310 --> 00:21:14,770 +So the mean sometimes called the arithmetic mean. + +303 +00:21:15,680 --> 00:21:20,020 +Or in general, just say the mean. So often we use + +304 +00:21:20,020 --> 00:21:26,860 +the mean. And the mean is just sum + +305 +00:21:26,860 --> 00:21:33,220 +of the values divided by the sample size. So it's + +306 +00:21:33,220 --> 00:21:36,800 +straightforward. We have, for example, three data + +307 +00:21:36,800 --> 00:21:42,180 +points. And your goal is to find the average or + +308 +00:21:42,180 --> 00:21:45,890 +the mean of these points. They mean it's just some + +309 +00:21:45,890 --> 00:21:50,230 +of these values divided by the sample size. So for + +310 +00:21:50,230 --> 00:21:54,570 +example, if we have a data X1, X2, X3 up to Xn. So + +311 +00:21:54,570 --> 00:21:59,650 +the average is denoted by X bar. This one is + +312 +00:21:59,650 --> 00:22:04,530 +pronounced as X bar and X bar is just sum of Xi. + +313 +00:22:05,010 --> 00:22:08,250 +It is summation, you know this symbol, summation + +314 +00:22:08,250 --> 00:22:11,350 +of sigma, summation of Xi and I goes from one to + +315 +00:22:11,350 --> 00:22:14,490 +N. divided by N which is the total number of + +316 +00:22:14,490 --> 00:22:19,710 +observations or the sample size. So it means X1 + +317 +00:22:19,710 --> 00:22:23,290 +plus X2 all the way up to XN divided by N gives + +318 +00:22:23,290 --> 00:22:28,530 +the mean or the arithmetic mean. So X bar is the + +319 +00:22:28,530 --> 00:22:32,690 +average which is the sum of values divided by the + +320 +00:22:32,690 --> 00:22:36,270 +number of observations. So that's the first + +321 +00:22:36,270 --> 00:22:38,830 +definition. For example, + +322 +00:22:42,180 --> 00:22:46,920 +So again, the mean is the most common measure of + +323 +00:22:46,920 --> 00:22:51,780 +center tendency. Number two, the definition of the + +324 +00:22:51,780 --> 00:22:55,440 +mean. Sum of values divided by the number of + +325 +00:22:55,440 --> 00:23:02,960 +values. That means the mean takes all the values, + +326 +00:23:04,140 --> 00:23:09,740 +then divided by N. it makes sense that the mean is + +327 +00:23:09,740 --> 00:23:13,380 +affected by extreme values or outliers. I mean, if + +328 +00:23:13,380 --> 00:23:17,840 +the data has outliers or extreme values, I mean by + +329 +00:23:17,840 --> 00:23:21,400 +extreme values, large or very, very large values + +330 +00:23:21,400 --> 00:23:24,980 +and small, small values. Large values or small + +331 +00:23:24,980 --> 00:23:31,100 +values are extreme values. Since the mean takes + +332 +00:23:31,100 --> 00:23:33,420 +all these values and sums all together, doesn't + +333 +00:23:33,420 --> 00:23:38,550 +divide by n, that means The mean is affected by + +334 +00:23:38,550 --> 00:23:41,350 +outliers or by extreme values. For example, + +335 +00:23:42,030 --> 00:23:45,110 +imagine we have simple data as 1, 2, 3, 4, and 5. + +336 +00:23:46,110 --> 00:23:49,830 +Simple example. Now, what's the mean? The mean is + +337 +00:23:49,830 --> 00:23:53,570 +just add these values, then divide by the total + +338 +00:23:53,570 --> 00:23:56,910 +number of observations. In this case, the sum of + +339 +00:23:56,910 --> 00:24:01,710 +these is 15. N is five because there are five + +340 +00:24:01,710 --> 00:24:05,920 +observations. So X bar is 15 divided by 5, which + +341 +00:24:05,920 --> 00:24:10,240 +is 3. So straightforward. Now imagine instead of + +342 +00:24:10,240 --> 00:24:16,480 +5, this number 5, we have a 10. Now 10, there is a + +343 +00:24:16,480 --> 00:24:21,400 +gap between 4, which is the second largest, and + +344 +00:24:21,400 --> 00:24:25,600 +the maximum, which is 10. Now if we add these + +345 +00:24:25,600 --> 00:24:30,540 +values, 1, 2, 3, 4, and 10, then divide by 5, the + +346 +00:24:30,540 --> 00:24:36,680 +mean will be 4. If you see here, we just added one + +347 +00:24:36,680 --> 00:24:41,060 +value, or I mean, we replaced five by 10, and the + +348 +00:24:41,060 --> 00:24:44,700 +mean changed dramatically from three to four. + +349 +00:24:45,520 --> 00:24:48,860 +There is big change between three and four, around + +350 +00:24:48,860 --> 00:24:55,560 +25% more. So that means outliers or extreme values + +351 +00:24:55,560 --> 00:25:01,200 +affected the mean. So take this information in + +352 +00:25:01,200 --> 00:25:03,560 +your mind because later we'll talk a little bit + +353 +00:25:03,560 --> 00:25:07,360 +about another one. So the mean is affected by + +354 +00:25:07,360 --> 00:25:13,100 +extreme values. Imagine another example. Suppose + +355 +00:25:13,100 --> 00:25:20,060 +we have data from 1 to 9. 1, 2, 3, 4, 6, 7, 8, 9. + +356 +00:25:21,040 --> 00:25:26,690 +Now the mean of these values, some divide by n. If + +357 +00:25:26,690 --> 00:25:31,970 +you sum 1 through 9, summation is 45. Divide by 9, + +358 +00:25:32,510 --> 00:25:36,230 +which is 5. So the sum of these values divided by + +359 +00:25:36,230 --> 00:25:41,590 +N gives the average, so the average is 5. Now + +360 +00:25:41,590 --> 00:25:46,670 +suppose we add 100 to the end of this data. So the + +361 +00:25:46,670 --> 00:25:53,670 +sum will be 145 divided by 10, that's 14.5. Now + +362 +00:25:53,670 --> 00:25:58,850 +the mean was 5. Then after we added 100, it + +363 +00:25:58,850 --> 00:26:05,470 +becomes 14.5. Imagine the mean was 5, it changed + +364 +00:26:05,470 --> 00:26:11,650 +to 14.5. It means around three times. So that + +365 +00:26:11,650 --> 00:26:17,510 +means outliers affect the mean much more than the + +366 +00:26:17,510 --> 00:26:19,890 +other one. We'll talk a little later about it, + +367 +00:26:19,990 --> 00:26:23,950 +which is the median. So keep in mind outliers + +368 +00:26:25,290 --> 00:26:34,790 +affected the mean in this case. Any question? Is + +369 +00:26:34,790 --> 00:26:41,590 +it clear? Yes. So, one more time. The mean is + +370 +00:26:41,590 --> 00:26:46,990 +affected by extreme values. So that's for the + +371 +00:26:46,990 --> 00:26:50,910 +mean. The other measure of center tendency is + +372 +00:26:50,910 --> 00:26:57,600 +called the median. Now, what's the median? What's + +373 +00:26:57,600 --> 00:27:00,760 +the definition of the median from your previous + +374 +00:27:00,760 --> 00:27:05,880 +studies? What's the median? I mean, what's the + +375 +00:27:05,880 --> 00:27:09,360 +definition of the median? Now the middle value, + +376 +00:27:09,760 --> 00:27:12,980 +that's correct, but after we arrange the data from + +377 +00:27:12,980 --> 00:27:17,040 +smallest to largest or largest to smallest, so we + +378 +00:27:17,040 --> 00:27:20,160 +should arrange the data, then we can figure out + +379 +00:27:20,160 --> 00:27:24,280 +the median. So the median is the middle point, but + +380 +00:27:24,280 --> 00:27:27,060 +after we arrange the data from smallest to largest + +381 +00:27:27,060 --> 00:27:30,030 +or vice versa. So that's the definition of the + +382 +00:27:30,030 --> 00:27:33,930 +median. So in an ordered array, so we have to have + +383 +00:27:33,930 --> 00:27:39,230 +order array, the median is the middle number. The + +384 +00:27:39,230 --> 00:27:42,810 +middle number means 50 percent of the data below + +385 +00:27:42,810 --> 00:27:50,370 +and 50 percent above the median because it's + +386 +00:27:50,370 --> 00:27:52,190 +called the median, the value in the middle after + +387 +00:27:52,190 --> 00:27:55,990 +you arrange the data from smallest to largest. + +388 +00:28:00,130 --> 00:28:02,770 +Suppose I again go back to the previous example + +389 +00:28:02,770 --> 00:28:09,690 +when we have data 1, 2, 3, 4, and 5. Now for this + +390 +00:28:09,690 --> 00:28:14,210 +specific example as we did before, now the data is + +391 +00:28:14,210 --> 00:28:18,670 +already ordered. The value in the middle is 3 + +392 +00:28:18,670 --> 00:28:22,330 +because there are two pillows. + +393 +00:28:24,860 --> 00:28:27,300 +And also there are the same number of observations + +394 +00:28:27,300 --> 00:28:33,140 +above it. So 3 is the median. Now again imagine we + +395 +00:28:33,140 --> 00:28:37,320 +replace 5, which is the maximum value, by another + +396 +00:28:37,320 --> 00:28:42,140 +one which is extreme one, for example 10. In this + +397 +00:28:42,140 --> 00:28:47,600 +case, the median is still 3. Because the median is + +398 +00:28:47,600 --> 00:28:49,380 +just the value of the middle after you arrange the + +399 +00:28:49,380 --> 00:28:53,900 +data. So it doesn't matter what is the highest or + +400 +00:28:53,900 --> 00:28:58,860 +the maximum value is, the median in this case is + +401 +00:28:58,860 --> 00:29:03,700 +three. It doesn't change. That means the median is + +402 +00:29:03,700 --> 00:29:08,020 +not affected by extreme values. Or to be more + +403 +00:29:08,020 --> 00:29:12,910 +precise, we can say that The median is affected by + +404 +00:29:12,910 --> 00:29:18,990 +outlier, but not the same as the mean. So affect + +405 +00:29:18,990 --> 00:29:23,610 +the mean much more than the median. I mean, you + +406 +00:29:23,610 --> 00:29:26,550 +cannot say for this example, yes, the median is + +407 +00:29:26,550 --> 00:29:29,310 +not affected because the median was three, it + +408 +00:29:29,310 --> 00:29:33,590 +becomes three. But in another examples, there is + +409 +00:29:33,590 --> 00:29:36,750 +small difference between all. + +410 +00:29:40,770 --> 00:29:44,850 +Extreme values affected the mean much more than + +411 +00:29:44,850 --> 00:29:51,450 +the median. If the dataset has extreme values, we + +412 +00:29:51,450 --> 00:29:54,510 +have to use, what do you think, the mean or the + +413 +00:29:54,510 --> 00:29:58,090 +median? The median. So in case or in the presence + +414 +00:29:58,090 --> 00:30:01,910 +of extreme values or outliers, we have to use the + +415 +00:30:01,910 --> 00:30:07,010 +median, not the mean. But in general, we use If + +416 +00:30:07,010 --> 00:30:10,770 +the data is free of outliers, I mean if the data + +417 +00:30:10,770 --> 00:30:16,410 +has not extreme values, then you can use the mean. + +418 +00:30:16,510 --> 00:30:19,230 +The mean is much better than the median in this + +419 +00:30:19,230 --> 00:30:22,490 +case. But if the data has extreme values or + +420 +00:30:22,490 --> 00:30:27,190 +outliers, we should use the median instead of the + +421 +00:30:27,190 --> 00:30:31,310 +mean. Any question? So these are the most common + +422 +00:30:31,310 --> 00:30:36,710 +center tendency measures in statistics, the mean + +423 +00:30:36,710 --> 00:30:42,390 +and the median. And keep in mind, your data should + +424 +00:30:42,390 --> 00:30:46,170 +be numeric. I mean, you cannot use the mean or the + +425 +00:30:46,170 --> 00:30:50,730 +median for qualitative or categorical data, for + +426 +00:30:50,730 --> 00:30:54,310 +example, gender, males or females. You cannot say + +427 +00:30:54,310 --> 00:30:59,490 +the mean of gender or sex is 1.5. It doesn't make + +428 +00:30:59,490 --> 00:31:05,490 +sense. It should be numerical data to use the mean + +429 +00:31:05,490 --> 00:31:07,590 +or the median. So the mean and the median is used + +430 +00:31:07,590 --> 00:31:11,210 +only for numerical data. And we have to + +431 +00:31:11,210 --> 00:31:14,170 +distinguish between mean and median. Mean is used + +432 +00:31:14,170 --> 00:31:16,870 +for data that has not outliers or extreme values, + +433 +00:31:17,370 --> 00:31:21,450 +while the median is used for data that has + +434 +00:31:21,450 --> 00:31:25,230 +outliers or extreme values. Sometimes better to + +435 +00:31:25,230 --> 00:31:27,990 +report both. I mean, sometimes better to report + +436 +00:31:27,990 --> 00:31:33,450 +mean and the median. So you just say the sales for + +437 +00:31:33,450 --> 00:31:40,560 +this company is, for example, $500,000. And the + +438 +00:31:40,560 --> 00:31:43,900 +median, for example, is 550,000. You can see that. + +439 +00:31:45,680 --> 00:31:46,400 +Is it clear? + +440 +00:31:51,440 --> 00:31:55,560 +If you have a small data, it's straightforward and + +441 +00:31:55,560 --> 00:31:59,180 +it's very easy to locate the median. But in case + +442 +00:31:59,180 --> 00:32:02,120 +of large dataset, how can we locate the median? + +443 +00:32:02,340 --> 00:32:06,640 +It's not easy. Just look at the data and you can + +444 +00:32:06,640 --> 00:32:11,200 +say this is the median. It's not easy task. So we + +445 +00:32:11,200 --> 00:32:15,820 +need a rule that locate the median. The location + +446 +00:32:15,820 --> 00:32:18,020 +of the median when the values are in numerical + +447 +00:32:18,020 --> 00:32:23,580 +order from smallest to largest is N plus one + +448 +00:32:23,580 --> 00:32:26,140 +divided by two. That's the position of the median. + +449 +00:32:26,640 --> 00:32:28,860 +If we go back a little bit to the previous + +450 +00:32:28,860 --> 00:32:34,980 +example, here N was five. So the location was + +451 +00:32:34,980 --> 00:32:40,000 +number three, because n plus one divided by two, + +452 +00:32:40,120 --> 00:32:43,120 +five plus one divided by two is three. So location + +453 +00:32:43,120 --> 00:32:47,340 +number three is the median. Location number one is + +454 +00:32:47,340 --> 00:32:50,840 +one, in this case, then two, then three. So + +455 +00:32:50,840 --> 00:32:53,740 +location number three is three. But maybe this + +456 +00:32:53,740 --> 00:32:57,280 +number is not three, and other value maybe 3.1 or + +457 +00:32:57,280 --> 00:33:02,440 +3.2. But the location is number three. Is it + +458 +00:33:02,440 --> 00:33:08,470 +clear? So that's the location. If it is odd, you + +459 +00:33:08,470 --> 00:33:13,270 +mean by odd number, five, seven and so on. So if + +460 +00:33:13,270 --> 00:33:17,090 +the number of values is odd, the median is the + +461 +00:33:17,090 --> 00:33:21,210 +middle number. Now let's imagine if we have even + +462 +00:33:21,210 --> 00:33:24,570 +number of observations. For example, we have one, + +463 +00:33:24,610 --> 00:33:28,270 +two, three, four, five and six. So imagine numbers + +464 +00:33:28,270 --> 00:33:32,390 +from one up to six. What's the median? Now three + +465 +00:33:32,390 --> 00:33:35,610 +is not the median because there are two + +466 +00:33:35,610 --> 00:33:43,390 +observations below three. And three above it. And + +467 +00:33:43,390 --> 00:33:46,210 +four is not the median because three observations + +468 +00:33:46,210 --> 00:33:53,290 +below, two above. So three and four is the middle + +469 +00:33:53,290 --> 00:33:56,870 +value. So just take the average of two middle + +470 +00:33:56,870 --> 00:34:01,570 +points, And that will be the median. So if n is + +471 +00:34:01,570 --> 00:34:07,990 +even, you have to locate two middle points. For + +472 +00:34:07,990 --> 00:34:10,310 +example, n over 2, in this case, we have six + +473 +00:34:10,310 --> 00:34:13,910 +observations. So divide by 2, not n plus 1 divided + +474 +00:34:13,910 --> 00:34:17,970 +by 2, just n over 2. So n over 2 is 3. So place + +475 +00:34:17,970 --> 00:34:22,930 +number 3, and the next one, place number 4, these + +476 +00:34:22,930 --> 00:34:25,930 +are the two middle points. Take the average of + +477 +00:34:25,930 --> 00:34:32,300 +these values, then that's your median. So if N is + +478 +00:34:32,300 --> 00:34:37,080 +even, you have to be careful. You have to find two + +479 +00:34:37,080 --> 00:34:40,860 +middle points and just take the average of these + +480 +00:34:40,860 --> 00:34:45,100 +two. So if N is even, the median is the average of + +481 +00:34:45,100 --> 00:34:49,200 +the two middle numbers. Keep in mind, when we are + +482 +00:34:49,200 --> 00:34:54,600 +saying N plus 2, N plus 2 is just the position of + +483 +00:34:54,600 --> 00:34:58,670 +the median, not the value, location. Not the + +484 +00:34:58,670 --> 00:35:07,770 +value. Is it clear? Any question? So location is + +485 +00:35:07,770 --> 00:35:10,150 +not the value. Location is just the place or the + +486 +00:35:10,150 --> 00:35:13,450 +position of the medium. If N is odd, the position + +487 +00:35:13,450 --> 00:35:17,710 +is N plus one divided by two. If N is even, the + +488 +00:35:17,710 --> 00:35:20,870 +positions of the two middle points are N over two + +489 +00:35:20,870 --> 00:35:23,090 +and the next term or the next point. + +490 +00:35:28,390 --> 00:35:32,510 +Last measure of center tendency is called the + +491 +00:35:32,510 --> 00:35:32,750 +mood. + +492 +00:35:35,890 --> 00:35:39,010 +The definition of the mood, the mood is the most + +493 +00:35:39,010 --> 00:35:44,250 +frequent value. So sometimes the mood exists, + +494 +00:35:45,230 --> 00:35:48,570 +sometimes the mood does not exist. Or sometimes + +495 +00:35:48,570 --> 00:35:53,730 +there is only one mood, in other cases maybe there + +496 +00:35:53,730 --> 00:35:58,730 +are several moods. So a value that occurs most + +497 +00:35:58,730 --> 00:36:03,010 +often is called the mood. The mood is not affected + +498 +00:36:03,010 --> 00:36:07,610 +by extreme values. It can be used for either + +499 +00:36:07,610 --> 00:36:11,190 +numerical or categorical data. And that's the + +500 +00:36:11,190 --> 00:36:13,910 +difference between mean and median and the mood. + +501 +00:36:14,590 --> 00:36:16,930 +Mean and median is used just for numerical data. + +502 +00:36:17,430 --> 00:36:21,270 +Here, the mood can be used for both, categorical + +503 +00:36:21,270 --> 00:36:25,610 +and numerical data. Sometimes, as I mentioned, + +504 +00:36:25,930 --> 00:36:29,570 +there may be no mood or the mood does not exist. + +505 +00:36:30,130 --> 00:36:34,190 +In other cases, there may be several events. So + +506 +00:36:34,190 --> 00:36:36,870 +the mood is the value that has the most frequent. + +507 +00:36:37,490 --> 00:36:43,650 +For example, if you look at this data, one is + +508 +00:36:43,650 --> 00:36:48,370 +repeated once, three is the same one time, five is + +509 +00:36:48,370 --> 00:36:52,290 +repeated twice. seven is one nine is repeated + +510 +00:36:52,290 --> 00:36:57,330 +three times and so on so in this case nine is the + +511 +00:36:57,330 --> 00:37:00,290 +mood because the mood again is the most frequent + +512 +00:37:00,290 --> 00:37:05,030 +value on + +513 +00:37:05,030 --> 00:37:08,550 +the right side there are some values zero one two + +514 +00:37:08,550 --> 00:37:12,830 +three up to six now each one is repeated once so + +515 +00:37:12,830 --> 00:37:15,350 +in this case the mood does not exist I mean there + +516 +00:37:15,350 --> 00:37:22,310 +is no mood So generally speaking, the mood is the + +517 +00:37:22,310 --> 00:37:26,310 +value that you care most often. It can be used for + +518 +00:37:26,310 --> 00:37:29,790 +numerical or categorical data, not affected by + +519 +00:37:29,790 --> 00:37:32,970 +extreme values or outliers. Sometimes there is + +520 +00:37:32,970 --> 00:37:36,150 +only one mood as this example. Sometimes the mood + +521 +00:37:36,150 --> 00:37:40,390 +does not exist. Or sometimes there are several + +522 +00:37:40,390 --> 00:37:45,190 +moods. And so that's the definitions for mean, + +523 +00:37:46,430 --> 00:37:52,540 +median, and the mood. I will give just a numerical + +524 +00:37:52,540 --> 00:37:56,380 +example to know how can we compute these measures. + +525 +00:37:57,420 --> 00:38:01,540 +This data, simple data, just for illustration, we + +526 +00:38:01,540 --> 00:38:07,580 +have house prices. We have five data points, $2 + +527 +00:38:07,580 --> 00:38:10,940 +million. This is the price of house A, for + +528 +00:38:10,940 --> 00:38:15,880 +example. House B price is 500,000. The other one + +529 +00:38:15,880 --> 00:38:19,120 +is 300,000. And two houses have the same price as + +530 +00:38:19,120 --> 00:38:25,850 +100,000. Now, just to compute the mean, add these + +531 +00:38:25,850 --> 00:38:29,350 +values or sum these values, which is three + +532 +00:38:29,350 --> 00:38:34,030 +million, divide by number of houses here, there + +533 +00:38:34,030 --> 00:38:38,550 +are five houses, so just three thousand divided by + +534 +00:38:38,550 --> 00:38:44,170 +five, six hundred thousand. The median, the value + +535 +00:38:44,170 --> 00:38:46,150 +in the median, after you arrange the data from + +536 +00:38:46,150 --> 00:38:51,470 +smallest to largest, Or largest smallest. This + +537 +00:38:51,470 --> 00:38:55,410 +data is already arranged from largest smallest or + +538 +00:38:55,410 --> 00:38:58,150 +smallest large. It doesn't matter actually. So the + +539 +00:38:58,150 --> 00:39:02,930 +median is $300,000. Make sense? Because there are + +540 +00:39:02,930 --> 00:39:09,490 +two house prices above and two below. So the + +541 +00:39:09,490 --> 00:39:13,610 +median is $300,000. Now if you look at these two + +542 +00:39:13,610 --> 00:39:21,350 +values, the mean for this data equals 600,000 and + +543 +00:39:21,350 --> 00:39:26,690 +the median is 300,000. The mean is double the + +544 +00:39:26,690 --> 00:39:31,750 +median. Do you think why there is a big difference + +545 +00:39:31,750 --> 00:39:36,030 +in this data between the mean and the median? + +546 +00:39:36,190 --> 00:39:42,290 +Which one? Two million dollars is extreme value, + +547 +00:39:42,510 --> 00:39:45,940 +very large number. I mean, if you compare two + +548 +00:39:45,940 --> 00:39:48,860 +million dollars with the other data sets or other + +549 +00:39:48,860 --> 00:39:51,320 +data values, you will see there is a big + +550 +00:39:51,320 --> 00:39:53,260 +difference between two million and five hundred. + +551 +00:39:53,620 --> 00:39:56,280 +It's four times, plus about three hundred + +552 +00:39:56,280 --> 00:39:59,780 +thousands, around seven times and so on. For this + +553 +00:39:59,780 --> 00:40:07,880 +value, the mean is affected. Exactly. The median + +554 +00:40:07,880 --> 00:40:11,740 +is resistant to outliers. It's affected but little + +555 +00:40:11,740 --> 00:40:17,100 +bit. For this reason, we have to use the median. + +556 +00:40:17,300 --> 00:40:20,720 +So the median makes more sense than using the + +557 +00:40:20,720 --> 00:40:24,480 +mean. The mode is just the most frequent value, + +558 +00:40:24,660 --> 00:40:28,720 +which is 100,000, because this value is repeated + +559 +00:40:28,720 --> 00:40:33,820 +twice. So that's the whole story for central + +560 +00:40:33,820 --> 00:40:40,720 +tendency measures, mean, median, and 1. Now the + +561 +00:40:40,720 --> 00:40:45,640 +question again is which measure to use? The mean + +562 +00:40:45,640 --> 00:40:49,280 +is generally used. The most common center tendency + +563 +00:40:49,280 --> 00:40:53,420 +is the mean. We can use it or we should use it + +564 +00:40:53,420 --> 00:40:59,920 +unless extreme values exist. I mean if the data + +565 +00:40:59,920 --> 00:41:03,960 +set has no outliers or extreme values, we have to + +566 +00:41:03,960 --> 00:41:06,240 +use the mean instead of the median. + +567 +00:41:09,810 --> 00:41:14,670 +The median is often used since the median is not + +568 +00:41:14,670 --> 00:41:18,330 +sensitive to extreme values. I mean, the median is + +569 +00:41:18,330 --> 00:41:22,030 +resistant to outliers. It remains nearly in the + +570 +00:41:22,030 --> 00:41:26,490 +same position if the dataset has outliers. But the + +571 +00:41:26,490 --> 00:41:29,850 +median will be affected either to the right or to + +572 +00:41:29,850 --> 00:41:34,350 +the left tail. So we have to use the median if the + +573 +00:41:34,350 --> 00:41:40,060 +data has extreme values. For example, median home + +574 +00:41:40,060 --> 00:41:44,100 +prices for the previous one may be reported for a + +575 +00:41:44,100 --> 00:41:48,000 +region that is less sensitive to outliers. So the + +576 +00:41:48,000 --> 00:41:52,880 +mean is more sensitive to outliers than the + +577 +00:41:52,880 --> 00:41:56,520 +median. Sometimes, I mean in some situations, it + +578 +00:41:56,520 --> 00:41:58,760 +makes sense to report both the mean and the + +579 +00:41:58,760 --> 00:42:01,860 +median. Just say the mean for this data for home + +580 +00:42:01,860 --> 00:42:07,570 +prices is 600,000 while the median is 300,000. If + +581 +00:42:07,570 --> 00:42:10,150 +you look at these two figures, you can tell that + +582 +00:42:10,150 --> 00:42:13,830 +there exists outlier or the outlier exists because + +583 +00:42:13,830 --> 00:42:17,230 +there is a big difference between the mean and the + +584 +00:42:17,230 --> 00:42:24,310 +median. So that's all for measures of central + +585 +00:42:24,310 --> 00:42:28,830 +tendency. Again, we explained three measures, + +586 +00:42:29,450 --> 00:42:33,930 +arithmetic mean, median, and mode. And arithmetic + +587 +00:42:33,930 --> 00:42:38,990 +mean again is denoted by X bar is pronounced as X + +588 +00:42:38,990 --> 00:42:44,410 +bar and just summation of X divided by N. So + +589 +00:42:44,410 --> 00:42:48,070 +summation Xi, i goes from 1 up to N divided by the + +590 +00:42:48,070 --> 00:42:52,170 +total number of observations. The median, as we + +591 +00:42:52,170 --> 00:42:55,690 +mentioned, is the value in the middle in ordered + +592 +00:42:55,690 --> 00:42:59,150 +array. After you arrange the data from smallest to + +593 +00:42:59,150 --> 00:43:01,930 +largest or vice versa, then the median is the + +594 +00:43:01,930 --> 00:43:06,330 +value in the middle. The mode is the most frequent + +595 +00:43:06,330 --> 00:43:09,030 +observed value. And we have to know that mean and + +596 +00:43:09,030 --> 00:43:13,870 +median are used only for numerical data, while the + +597 +00:43:13,870 --> 00:43:17,510 +mode can be used for both numerical and + +598 +00:43:17,510 --> 00:43:24,290 +categorical data. That's all about measures of + +599 +00:43:24,290 --> 00:43:27,210 +central tendency. Any question? + +600 +00:43:33,210 --> 00:43:40,230 +Let's move to measures of variation. It's another + +601 +00:43:40,230 --> 00:43:43,750 +type of measures. It's called measures of + +602 +00:43:43,750 --> 00:43:47,490 +variation, sometimes called measures of spread. + +603 +00:43:50,490 --> 00:43:53,850 +Now, variation can be computed by using range, + +604 +00:43:55,590 --> 00:44:00,850 +variance, standard deviation, and coefficient of + +605 +00:44:00,850 --> 00:44:08,430 +variation. So we have four types, range, variance, + +606 +00:44:09,250 --> 00:44:12,050 +standard deviation, and coefficient of variation. + +607 +00:44:13,710 --> 00:44:16,150 +Now, measures of variation give information on the + +608 +00:44:16,150 --> 00:44:19,410 +spread. Now, this is the first difference between + +609 +00:44:19,410 --> 00:44:24,210 +central tendency measures and measures of + +610 +00:44:24,210 --> 00:44:28,270 +variation. That one measures the central value or + +611 +00:44:28,270 --> 00:44:30,790 +the value in the middle. Here, it measures the + +612 +00:44:30,790 --> 00:44:36,310 +spread. Or variability. Or dispersion of the data. + +613 +00:44:36,450 --> 00:44:40,310 +Do you know what is dispersion? Dispersion. + +614 +00:44:40,630 --> 00:44:45,590 +Tabaad. So major variation given formation with + +615 +00:44:45,590 --> 00:44:48,350 +the spread. Spread or variation or dispersion of + +616 +00:44:48,350 --> 00:44:52,250 +the data values. Now if you look at these two bell + +617 +00:44:52,250 --> 00:44:52,650 +shapes. + +618 +00:44:55,670 --> 00:44:59,170 +Both have the same center. The center I mean the + +619 +00:44:59,170 --> 00:45:01,730 +value in the middle. So the value in the middle + +620 +00:45:01,730 --> 00:45:06,990 +here for figure + +621 +00:45:06,990 --> 00:45:10,150 +graph number one is the same as the value for the + +622 +00:45:10,150 --> 00:45:16,270 +other graph. So both graphs have the same center. + +623 +00:45:17,430 --> 00:45:20,670 +But if you look at the spread, you will see that + +624 +00:45:20,670 --> 00:45:26,230 +figure A is less spread than figure B. Now if you + +625 +00:45:26,230 --> 00:45:29,720 +look at this one, the spread here, is much less + +626 +00:45:29,720 --> 00:45:34,120 +than the other one. Even they have the same + +627 +00:45:34,120 --> 00:45:39,260 +center, the same mean, but figure A is more spread + +628 +00:45:39,260 --> 00:45:45,140 +than figure B. It means that the variation in A is + +629 +00:45:45,140 --> 00:45:49,920 +much less than the variation in figure B. So it + +630 +00:45:49,920 --> 00:45:55,960 +means that the mean is not sufficient to describe + +631 +00:45:55,960 --> 00:45:59,970 +your data. Because maybe you have two datasets and + +632 +00:45:59,970 --> 00:46:03,330 +both have the same mean, but the spread or the + +633 +00:46:03,330 --> 00:46:07,350 +variation is completely different. Again, maybe we + +634 +00:46:07,350 --> 00:46:10,250 +have two classes of statistics, class A and class + +635 +00:46:10,250 --> 00:46:13,230 +B. The center or the mean or the average is the + +636 +00:46:13,230 --> 00:46:16,150 +same for each one. For example, maybe the average + +637 +00:46:16,150 --> 00:46:19,810 +of this class is 70. The average of class B is + +638 +00:46:19,810 --> 00:46:26,640 +also 70. But the scores are scattered. or spread + +639 +00:46:26,640 --> 00:46:32,580 +out in class A maybe much more than in class B. So + +640 +00:46:32,580 --> 00:46:34,280 +the mean is not sufficient to describe the data. + +641 +00:46:34,360 --> 00:46:37,100 +You have to say that the mean equals such and such + +642 +00:46:37,100 --> 00:46:41,000 +and the spread. And one of these measures we'll + +643 +00:46:41,000 --> 00:46:44,500 +talk later about range and variance standard + +644 +00:46:44,500 --> 00:46:49,030 +deviation. So I mean, The mean by itself is not + +645 +00:46:49,030 --> 00:46:51,890 +sufficient to describe the data. You have to use + +646 +00:46:51,890 --> 00:46:55,730 +something else to measure the variation or the + +647 +00:46:55,730 --> 00:46:57,950 +spread of the data. Make sense? + +648 +00:47:02,170 --> 00:47:05,670 +The first measure of variation, the easiest one, + +649 +00:47:05,810 --> 00:47:11,230 +is called the range. The range is the simplest + +650 +00:47:11,230 --> 00:47:15,590 +measure of variation. The range is just the + +651 +00:47:15,590 --> 00:47:19,750 +difference or the distance between the largest and + +652 +00:47:19,750 --> 00:47:23,550 +the smallest value. For example, suppose the + +653 +00:47:23,550 --> 00:47:27,070 +minimum score for this class is 40 and the maximum + +654 +00:47:27,070 --> 00:47:33,230 +is 90. So the range is 50, 90 minus 40. Now + +655 +00:47:33,230 --> 00:47:38,850 +imagine that the minimum score for this class is + +656 +00:47:38,850 --> 00:47:47,330 +60 and the maximum is 80, so 20. If we replace 80 + +657 +00:47:47,330 --> 00:47:51,450 +by 100, I mean the minimum is 60 and the maximum + +658 +00:47:51,450 --> 00:47:57,030 +is 100, it's 40. That means a range is affected by + +659 +00:47:57,030 --> 00:48:02,170 +outliers because it depends only on two values. + +660 +00:48:03,480 --> 00:48:06,100 +maximum and minimum value. So it should be + +661 +00:48:06,100 --> 00:48:09,320 +affected by outliers. So range is sensitive to + +662 +00:48:09,320 --> 00:48:12,780 +outliers. So if the data has the data set has + +663 +00:48:12,780 --> 00:48:15,660 +outliers, then in this case, you have to avoid + +664 +00:48:15,660 --> 00:48:19,640 +using range because range only based on two + +665 +00:48:19,640 --> 00:48:23,480 +values. So it should be affected by outliers. Now + +666 +00:48:23,480 --> 00:48:25,880 +for the for simple example, suppose we have this + +667 +00:48:25,880 --> 00:48:32,360 +data. The minimum value is one. I mean, the + +668 +00:48:32,360 --> 00:48:34,680 +smallest value is one, and the largest or the + +669 +00:48:34,680 --> 00:48:38,880 +maximum is 13. So it makes sense that the range of + +670 +00:48:38,880 --> 00:48:41,840 +the data is the difference between these two + +671 +00:48:41,840 --> 00:48:48,540 +values. So 13 minus one is 12. Now, imagine that + +672 +00:48:48,540 --> 00:48:58,040 +we just replace 13 by 100. So the new range will + +673 +00:48:58,040 --> 00:49:03,820 +be equal to 100 minus 199. So the previous range + +674 +00:49:03,820 --> 00:49:08,340 +was 12. It becomes now 99 after we replace the + +675 +00:49:08,340 --> 00:49:12,100 +maximum by 100. So it means that range is affected + +676 +00:49:12,100 --> 00:49:18,740 +by extreme values. So the mean and range both are + +677 +00:49:18,740 --> 00:49:23,040 +sensitive to outliers. So you have to link between + +678 +00:49:26,410 --> 00:49:30,210 +measures of center tendency and measures of + +679 +00:49:30,210 --> 00:49:33,130 +variation. Mean and range are affected by + +680 +00:49:33,130 --> 00:49:37,910 +outliers. The mean and range are affected by + +681 +00:49:37,910 --> 00:49:41,450 +outliers. This is an example. So it's very easy to + +682 +00:49:41,450 --> 00:49:49,550 +compute the mean. Next, if you look at why the + +683 +00:49:49,550 --> 00:49:51,190 +range can be misleading. + +684 +00:49:53,830 --> 00:49:56,810 +Sometimes you report the range and the range does + +685 +00:49:56,810 --> 00:50:00,310 +not give an appropriate answer or appropriate + +686 +00:50:00,310 --> 00:50:04,450 +result because number + +687 +00:50:04,450 --> 00:50:06,790 +one ignores the way in which the data are + +688 +00:50:06,790 --> 00:50:10,770 +distributed. For example, if you look at this + +689 +00:50:10,770 --> 00:50:15,430 +specific data, we have data seven, eight, nine, + +690 +00:50:15,590 --> 00:50:18,110 +ten, eleven and twelve. So the range is five. + +691 +00:50:19,270 --> 00:50:21,910 +Twelve minus seven is five. Now if you look at the + +692 +00:50:21,910 --> 00:50:26,360 +other data, The smallest value was seven. + +693 +00:50:29,600 --> 00:50:33,260 +And there is a gap between the smallest and the + +694 +00:50:33,260 --> 00:50:38,220 +next smallest value, which is 10. And also we have + +695 +00:50:38,220 --> 00:50:44,480 +12 is repeated three times. Still the range is the + +696 +00:50:44,480 --> 00:50:48,140 +same. Even there is a difference between these two + +697 +00:50:48,140 --> 00:50:53,640 +values, between two sets. we have seven, eight, + +698 +00:50:53,760 --> 00:50:57,020 +nine up to 12. And then the other data, we have + +699 +00:50:57,020 --> 00:51:02,180 +seven, 10, 11, and 12 three times. Still, the + +700 +00:51:02,180 --> 00:51:06,360 +range equals five. So it doesn't make sense to + +701 +00:51:06,360 --> 00:51:09,620 +report the range as a measure of variation. + +702 +00:51:10,520 --> 00:51:12,640 +Because if you look at the distribution for this + +703 +00:51:12,640 --> 00:51:15,500 +data, it's completely different from the other + +704 +00:51:15,500 --> 00:51:20,860 +dataset. Even though it has the same range. So + +705 +00:51:20,860 --> 00:51:25,220 +range is not used in this case. Look at another + +706 +00:51:25,220 --> 00:51:25,680 +example. + +707 +00:51:28,300 --> 00:51:32,920 +We have data. All the data ranges, I mean, starts + +708 +00:51:32,920 --> 00:51:38,680 +from 1 up to 5. So the range is 4. If we just + +709 +00:51:38,680 --> 00:51:46,200 +replace the maximum, which is 5, by 120. So the + +710 +00:51:46,200 --> 00:51:49,190 +range is completely different. the range becomes + +711 +00:51:49,190 --> 00:51:55,010 +119. So that means range + +712 +00:51:55,010 --> 00:51:59,230 +is sensitive to outliers. So we have to avoid + +713 +00:51:59,230 --> 00:52:06,030 +using range in case of outliers or extreme values. + +714 +00:52:08,930 --> 00:52:14,410 +I will stop at the most important one, the + +715 +00:52:14,410 --> 00:52:18,350 +variance, for next time inshallah. Up to this + +716 +00:52:18,350 --> 00:52:19,310 +point, any questions? + +717 +00:52:22,330 --> 00:52:29,730 +Okay, stop at this point if + +718 +00:52:29,730 --> 00:52:30,510 +you have any question. + +719 +00:52:35,430 --> 00:52:39,430 +So later we'll discuss measures of variation and + +720 +00:52:39,430 --> 00:52:44,810 +variance, standard deviation up to the end of this + +721 +00:52:44,810 --> 00:52:45,090 +chapter. + +722 +00:52:54,630 --> 00:53:00,690 +So again, the range is sensitive to outliers. So + +723 +00:53:00,690 --> 00:53:03,850 +we have to avoid using range in this case. And + +724 +00:53:03,850 --> 00:53:06,270 +later we'll talk about the variance, which is the + +725 +00:53:06,270 --> 00:53:09,750 +most common measures of variation for next time, + +726 +00:53:09,830 --> 00:53:10,130 +inshallah. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/BRRl21n6QWg_postprocess.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/BRRl21n6QWg_postprocess.srt new file mode 100644 index 0000000000000000000000000000000000000000..30859f56158c1d037f2ff419697e1d0dc59037e6 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/BRRl21n6QWg_postprocess.srt @@ -0,0 +1,1956 @@ +1 +00:00:07,390 --> 00:00:09,890 +So last time, we discussed how to find the + +2 +00:00:09,890 --> 00:00:14,570 +probabilities underneath the normal curve for + +3 +00:00:14,570 --> 00:00:20,450 +three cases. If the point lies in the lower tail, + +4 +00:00:21,690 --> 00:00:28,150 +as this one, or upper tail, or between. So we can + +5 +00:00:28,150 --> 00:00:32,290 +do the computations for this kind of + +6 +00:00:32,290 --> 00:00:36,090 +probabilities. And I think we gave two examples. + +7 +00:00:36,620 --> 00:00:40,400 +For example, if we are looking for probability of + +8 +00:00:40,400 --> 00:00:45,020 +Z is smaller than 0.1, and Z, as we mentioned last + +9 +00:00:45,020 --> 00:00:48,660 +time, is the standardized normal distribution that + +10 +00:00:48,660 --> 00:00:53,120 +has mean of 0 and sigma is 1. In this case, just + +11 +00:00:53,120 --> 00:00:57,540 +go to the table you have. Now we are looking for 0 + +12 +00:00:57,540 --> 00:01:03,700 +.12, for example. So here we have 0.1. Under 2, we + +13 +00:01:03,700 --> 00:01:07,260 +get this result. This value is the probability of + +14 +00:01:07,260 --> 00:01:10,480 +Z smaller than 0.5. But you have to keep in mind + +15 +00:01:10,480 --> 00:01:14,460 +that we have to transform first from normal + +16 +00:01:14,460 --> 00:01:17,580 +distribution to standardized normal distribution. + +17 +00:01:18,620 --> 00:01:21,220 +For this specific example, if we are looking for + +18 +00:01:21,220 --> 00:01:26,960 +mean of 8 and standard deviation of 5, that's for + +19 +00:01:26,960 --> 00:01:30,020 +the normal distribution. In this case, the z-score + +20 +00:01:30,020 --> 00:01:32,920 +is given by this equation, which is x minus 3 + +21 +00:01:32,920 --> 00:01:37,060 +divided by sigma. So 8.6 minus 8 divided by 5 + +22 +00:01:37,060 --> 00:01:41,360 +gives 0.12. In this case, we can use the + +23 +00:01:41,360 --> 00:01:46,480 +standardized normal table. Now, for the other + +24 +00:01:46,480 --> 00:01:48,700 +case, we are looking for the probability of x + +25 +00:01:48,700 --> 00:01:51,880 +greater than 8.6. So we are looking for the upper + +26 +00:01:51,880 --> 00:01:56,900 +tail probability. The table we have gives the area + +27 +00:01:56,900 --> 00:02:01,640 +to the left side. And we know that the total area + +28 +00:02:01,640 --> 00:02:04,760 +underneath the normal curve is 1. So the + +29 +00:02:04,760 --> 00:02:08,980 +probability of x greater than 8.6 is the same as 1 + +30 +00:02:08,980 --> 00:02:13,720 +minus the probability of x smaller than x less + +31 +00:02:13,720 --> 00:02:19,100 +than 8.6. So here, for 8.6, we got z squared to be + +32 +00:02:19,100 --> 00:02:24,300 +0.12. So the probability of z greater than 0.12 is + +33 +00:02:24,300 --> 00:02:27,620 +the same as 1 minus z of z is smaller than 0.12. + +34 +00:02:28,850 --> 00:02:33,050 +So 1 minus the answer we just got from part A will + +35 +00:02:33,050 --> 00:02:35,930 +get us to something. So this is the probability of + +36 +00:02:35,930 --> 00:02:41,170 +X is greater than 8.6. So all of the time, if you + +37 +00:02:41,170 --> 00:02:45,390 +are asking about computing the probability in the + +38 +00:02:45,390 --> 00:02:48,290 +upper tail, you have to first find the probability + +39 +00:02:48,290 --> 00:02:52,630 +in the lower tail, then subtract that value from + +40 +00:02:52,630 --> 00:02:56,710 +1. So that's the probability for the upper tail. + +41 +00:02:58,110 --> 00:03:02,470 +The last one, we are looking for probability + +42 +00:03:02,470 --> 00:03:06,970 +between two values. For example, x. What's the + +43 +00:03:06,970 --> 00:03:10,190 +probability of x greater than 8 and smaller than 8 + +44 +00:03:10,190 --> 00:03:15,670 +.6? So we are looking for this area, the red area. + +45 +00:03:16,250 --> 00:03:18,710 +So the probability of x between these two values + +46 +00:03:18,710 --> 00:03:21,850 +actually equals the probability of x smaller than. + +47 +00:03:23,040 --> 00:03:27,560 +8.6 minus the probability of X smaller than 8A. + +48 +00:03:27,880 --> 00:03:31,140 +And that, in this case, you have to compute two + +49 +00:03:31,140 --> 00:03:34,780 +values of this score. One for the first value, + +50 +00:03:34,880 --> 00:03:40,020 +which is A. This value is zero because the mean is + +51 +00:03:40,020 --> 00:03:45,240 +zero. And we know that Z can be negative. If X is + +52 +00:03:45,240 --> 00:03:49,460 +smaller than Mu, Z can positive if X is greater + +53 +00:03:49,460 --> 00:03:55,220 +than Mu and equals zero only if X equals Mu. In + +54 +00:03:55,220 --> 00:03:59,340 +this case, X equals Mu, so Z score is zero. The + +55 +00:03:59,340 --> 00:04:03,220 +other one as we got before is 0.12. So now, we + +56 +00:04:03,220 --> 00:04:07,340 +transform actually the probability of X between + +57 +00:04:08,870 --> 00:04:13,870 +8 and 8.6 to z-score between 0 and 0.12. In this + +58 +00:04:13,870 --> 00:04:17,170 +case, we can use the normal theorem. Now, this + +59 +00:04:17,170 --> 00:04:21,130 +area is, as we mentioned, just b of x smaller than + +60 +00:04:21,130 --> 00:04:28,850 +0.12 minus b of x, b of z smaller than 0. This + +61 +00:04:28,850 --> 00:04:34,330 +probability, we know that, 0.54878. Now, the + +62 +00:04:34,330 --> 00:04:38,170 +probability of z smaller than 0 is one-half. + +63 +00:04:38,860 --> 00:04:41,380 +Because the total area underneath the normal curve + +64 +00:04:41,380 --> 00:04:45,860 +is 1, and 0 divided the curve into two equally + +65 +00:04:45,860 --> 00:04:49,100 +parts. So the area to the right of 0 is the same + +66 +00:04:49,100 --> 00:04:52,100 +as the area to the left of 0. So in this case, + +67 +00:04:52,880 --> 00:04:57,160 +minus 0.5. So this is your answer. So the + +68 +00:04:57,160 --> 00:05:04,040 +probability of Z between 8 and 8.6 is around 0478. + +69 +00:05:04,320 --> 00:05:07,020 +I think we stopped last time at this point. + +70 +00:05:09,860 --> 00:05:17,760 +This is another example to compute the probability + +71 +00:05:17,760 --> 00:05:24,020 +of X greater than 7.4 and 8. Also, it's the same + +72 +00:05:24,020 --> 00:05:29,700 +idea here, just find these scores for the two + +73 +00:05:29,700 --> 00:05:30,160 +values. + +74 +00:05:33,850 --> 00:05:38,670 +L with B of Z greater than minus 0.12 up to 0. Now + +75 +00:05:38,670 --> 00:05:47,010 +this red area equals the area below 0. I mean B of + +76 +00:05:47,010 --> 00:05:50,330 +Z smaller than 0 minus the probability of Z + +77 +00:05:50,330 --> 00:05:57,090 +smaller than minus 0.12. Now by using symmetric + +78 +00:05:57,090 --> 00:06:00,470 +probability of the normal distribution, we know + +79 +00:06:00,470 --> 00:06:04,620 +that The probability of Z smaller than minus 0.12 + +80 +00:06:04,620 --> 00:06:11,420 +equals the probability of Z greater than 0.12. + +81 +00:06:11,780 --> 00:06:15,520 +Because this area, if we have Z smaller than minus + +82 +00:06:15,520 --> 00:06:19,300 +0.12, the area to the left, that equals the area + +83 +00:06:19,300 --> 00:06:21,560 +to the right of the same point, because of + +84 +00:06:21,560 --> 00:06:25,480 +symmetry. And finally, you will end with this + +85 +00:06:25,480 --> 00:06:25,740 +result. + +86 +00:06:30,670 --> 00:06:36,730 +D of z minus 0.12, all the way up to 0, this area, + +87 +00:06:37,630 --> 00:06:43,130 +is the same as the area from 0 up to 0.5. So this + +88 +00:06:43,130 --> 00:06:46,530 +area actually is the same as D of z between 0 and + +89 +00:06:46,530 --> 00:06:51,270 +0.5. So if you have negative sign, and then take + +90 +00:06:51,270 --> 00:06:54,390 +the opposite one, the answer will be the same + +91 +00:06:54,390 --> 00:06:57,610 +because the normal distribution is symmetric + +92 +00:06:57,610 --> 00:07:04,690 +around 0.9. The questions, I think we stopped + +93 +00:07:04,690 --> 00:07:09,330 +here. And also we talked about empirical rules. + +94 +00:07:10,930 --> 00:07:13,250 +The one we mentioned in chapter three, in chapter + +95 +00:07:13,250 --> 00:07:16,170 +three. And we know that, as we mentioned before, + +96 +00:07:16,310 --> 00:07:25,550 +that is 68.16% of the observations fall within one + +97 +00:07:25,550 --> 00:07:30,660 +standard deviation around the mean. So this area + +98 +00:07:30,660 --> 00:07:35,380 +from mu minus one sigma up to mu plus sigma, this + +99 +00:07:35,380 --> 00:07:42,080 +area covers around 68%. Also + +100 +00:07:42,080 --> 00:07:50,700 +95% or actually 95.44% of the data falls within + +101 +00:07:50,700 --> 00:07:54,840 +two standard deviations of the mean. And finally, + +102 +00:07:55,040 --> 00:08:00,220 +around most of the data, around 99.73% of the data + +103 +00:08:00,220 --> 00:08:06,860 +falls within three subdivisions of the population + +104 +00:08:06,860 --> 00:08:07,160 +mean. + +105 +00:08:11,550 --> 00:08:14,970 +And now the new topic is how can we find the X + +106 +00:08:14,970 --> 00:08:18,510 +value if the probability is given. It's vice + +107 +00:08:18,510 --> 00:08:21,810 +versa. In the previous questions, we were asking + +108 +00:08:21,810 --> 00:08:26,130 +about find the probability, for example, if X is + +109 +00:08:26,130 --> 00:08:30,450 +smaller than a certain number. Now suppose this + +110 +00:08:30,450 --> 00:08:34,070 +probability is given, and we are looking to find + +111 +00:08:34,070 --> 00:08:38,710 +this value. I mean, for example, suppose in the + +112 +00:08:38,710 --> 00:08:39,850 +previous examples here, + +113 +00:08:43,380 --> 00:08:46,760 +Suppose we know this probability. So the + +114 +00:08:46,760 --> 00:08:50,220 +probability is given. The question is, how can we + +115 +00:08:50,220 --> 00:08:54,140 +find this value? It's the opposite, sometimes + +116 +00:08:54,140 --> 00:08:57,180 +called backward normal calculations. + +117 +00:09:01,660 --> 00:09:05,580 +There are actually two steps. to find the x value + +118 +00:09:05,580 --> 00:09:10,040 +for a certain probability or for a given or for a + +119 +00:09:10,040 --> 00:09:12,900 +known probability the first step we have to find + +120 +00:09:12,900 --> 00:09:20,540 +the z score then use this equation to find the x + +121 +00:09:20,540 --> 00:09:25,100 +value corresponding to the z score you have and x + +122 +00:09:25,100 --> 00:09:30,120 +is just mu plus sigma times mu so first step you + +123 +00:09:30,120 --> 00:09:31,740 +have to find the z score + +124 +00:09:35,350 --> 00:09:38,690 +corresponding to the probability we have. So find + +125 +00:09:38,690 --> 00:09:43,870 +the z value for the non-probability, then use that + +126 +00:09:43,870 --> 00:09:47,910 +z score to find the value of x by using this + +127 +00:09:47,910 --> 00:09:52,010 +equation. So x equals mu plus z sigma. z could be + +128 +00:09:52,010 --> 00:09:54,490 +negative, could be positive, depends on the + +129 +00:09:54,490 --> 00:09:59,170 +probability you have. If the probability is above + +130 +00:09:59,170 --> 00:10:03,390 +0.5, I mean 0.5 and greater than 0.5, this + +131 +00:10:03,390 --> 00:10:08,880 +corresponds to 10. But if z-score is negative, I'm + +132 +00:10:08,880 --> 00:10:10,680 +sorry, if z-score is negative, then the + +133 +00:10:10,680 --> 00:10:14,880 +probability should be smaller than 0.5. So if the + +134 +00:10:14,880 --> 00:10:19,360 +probability is given less than 0.5, then your z + +135 +00:10:19,360 --> 00:10:21,100 +-score should be negative, otherwise should be + +136 +00:10:21,100 --> 00:10:23,720 +positive. So you have to be careful in this case. + +137 +00:10:25,700 --> 00:10:31,240 +Now look at this example. Let x represent the time + +138 +00:10:31,240 --> 00:10:35,770 +it takes in seconds. to download an image file + +139 +00:10:35,770 --> 00:10:39,730 +from the internet. The same example as we did + +140 +00:10:39,730 --> 00:10:43,590 +before. And here we assume that x is normal + +141 +00:10:43,590 --> 00:10:46,330 +distribution with mean of 8 and standard deviation + +142 +00:10:46,330 --> 00:10:51,710 +of 5. Now, let's see how can we find the value of + +143 +00:10:51,710 --> 00:10:58,050 +x such that 20% of download times are smaller than + +144 +00:10:58,050 --> 00:10:58,410 +x. + +145 +00:11:01,060 --> 00:11:04,580 +So, this probability is a fraction. Also, always + +146 +00:11:04,580 --> 00:11:07,840 +the probability is between 0 and 1. So, the + +147 +00:11:07,840 --> 00:11:11,820 +probability here is 20%. In this case, your z + +148 +00:11:11,820 --> 00:11:15,560 +-score should be negative. Because 20% is more + +149 +00:11:15,560 --> 00:11:18,660 +than 0.5. So, z-score should be in this side, in + +150 +00:11:18,660 --> 00:11:19,320 +the left side. + +151 +00:11:22,340 --> 00:11:26,380 +So, again, he asks about finding x-value such that + +152 +00:11:26,380 --> 00:11:27,140 +20%. + +153 +00:11:31,740 --> 00:11:35,400 +So here again we are looking for this value, for + +154 +00:11:35,400 --> 00:11:40,760 +the value of x, which is smaller than the area to + +155 +00:11:40,760 --> 00:11:45,680 +the left of this x, equals 0.2. + +156 +00:11:47,480 --> 00:11:51,100 +Now, the first step, we have to find the z-score. + +157 +00:11:52,650 --> 00:11:56,430 +It's backward, z-score first, then x. Find a z + +158 +00:11:56,430 --> 00:12:00,450 +-score corresponding to the probability of 0.2. + +159 +00:12:02,510 --> 00:12:07,710 +The approximate one, the near value, I mean, to + +160 +00:12:07,710 --> 00:12:12,190 +the 0.2 is 0.2005. Sometimes you have the exact + +161 +00:12:12,190 --> 00:12:16,570 +value from the table you have, but most of the + +162 +00:12:16,570 --> 00:12:19,050 +time you don't have it. So you have to look at the + +163 +00:12:19,050 --> 00:12:21,790 +approximate value, which is very close to the one + +164 +00:12:21,790 --> 00:12:25,840 +you have. So here, we are looking for 0.2. The + +165 +00:12:25,840 --> 00:12:30,660 +closest value to 0.2 is 0.2005. Now, the + +166 +00:12:30,660 --> 00:12:34,720 +corresponding value to this probability is minus 0 + +167 +00:12:34,720 --> 00:12:40,120 +.8 all the way up to 4. So your z-score is + +168 +00:12:40,120 --> 00:12:47,840 +negative 0.84. So this is the first step. Any + +169 +00:12:47,840 --> 00:12:51,120 +question? Again. + +170 +00:12:53,950 --> 00:12:57,050 +Now if we just go back to this equation, + +171 +00:12:59,930 --> 00:13:03,670 +z equals x minus mu over sigma. A cross + +172 +00:13:03,670 --> 00:13:07,810 +multiplication, I mean if you multiply both sides + +173 +00:13:07,810 --> 00:13:16,110 +by sigma, you will get sigma times z equals x + +174 +00:13:16,110 --> 00:13:17,510 +minus mu. + +175 +00:13:32,120 --> 00:13:35,500 +Now, in this question, + +176 +00:13:37,960 --> 00:13:43,160 +he asks about, find the value of x such that 20% + +177 +00:13:43,160 --> 00:13:46,560 +of download times are less than x. + +178 +00:13:50,740 --> 00:13:54,080 +Now the probability is less than 0.5, so your z + +179 +00:13:54,080 --> 00:13:57,780 +-score should be on the left side. So here we need + +180 +00:13:57,780 --> 00:14:03,660 +to find the value of z first. Go back to the + +181 +00:14:03,660 --> 00:14:04,640 +normal table you have. + +182 +00:14:07,680 --> 00:14:08,860 +This is the normal table. + +183 +00:14:16,800 --> 00:14:21,250 +We are looking for minus 0.2. I'm sorry, we are + +184 +00:14:21,250 --> 00:14:28,910 +looking for 0.2. So the closest value to 0.2 is + +185 +00:14:28,910 --> 00:14:34,750 +this one, 0.2005. So this is the closest value. + +186 +00:14:49,630 --> 00:14:54,470 +So the exact answer is sometimes not given. So the + +187 +00:14:54,470 --> 00:14:59,190 +approximate one, minus 0.8, all the way up to 4. + +188 +00:15:00,030 --> 00:15:06,410 +So z-score minus 0.8. Any question? + +189 +00:15:10,330 --> 00:15:15,330 +So the value of z-score is minus 0.84. So my + +190 +00:15:15,330 --> 00:15:21,430 +corresponding x-value equals X equal mu. The mu is + +191 +00:15:21,430 --> 00:15:31,770 +given as A plus Z is minus 0.84 times sigma. Sigma + +192 +00:15:31,770 --> 00:15:42,150 +is 5. You will end with 3.8. So this means the + +193 +00:15:42,150 --> 00:15:47,550 +probability of X less than 3.8. Equal point. + +194 +00:15:47,810 --> 00:15:48,950 +Exactly, equal point. + +195 +00:15:52,430 --> 00:15:57,230 +So in this case, the probability is given, which + +196 +00:15:57,230 --> 00:16:01,710 +is 0.20. And we ask about what's the value of x in + +197 +00:16:01,710 --> 00:16:07,830 +this case. So the first step was find the z-score. + +198 +00:16:09,210 --> 00:16:15,250 +Then use this value. I mean, plug this value in. + +199 +00:16:17,380 --> 00:16:22,520 +to find the corresponding X score. That's the + +200 +00:16:22,520 --> 00:16:25,240 +backward normal calculations. + +201 +00:16:28,820 --> 00:16:34,920 +Let's do one problem from the practice, which is + +202 +00:16:34,920 --> 00:16:36,820 +number 18. + +203 +00:16:53,390 --> 00:16:56,310 +Is it clear? + +204 +00:17:00,590 --> 00:17:10,310 +The owners of a fish market determined + +205 +00:17:10,310 --> 00:17:20,570 +that the average weight for a catfish is 3.2 So + +206 +00:17:20,570 --> 00:17:27,230 +this is the + +207 +00:17:27,230 --> 00:17:39,150 +value of the mean 40 + +208 +00:17:48,310 --> 00:17:57,010 +So again, the owner of a fish market determined + +209 +00:17:57,010 --> 00:18:02,350 +that the average weight for a catfish is 3.2 + +210 +00:18:02,350 --> 00:18:08,950 +pounds. So the mean is 3.2 with a standard + +211 +00:18:08,950 --> 00:18:18,280 +deviation of 0.8. So sigma is 0.8. Now, assuming + +212 +00:18:18,280 --> 00:18:22,640 +the weights of catfish are normally distributed. + +213 +00:18:23,580 --> 00:18:27,220 +In this case, you ask about what's the probability + +214 +00:18:27,220 --> 00:18:31,800 +that a randomly selected catfish will weigh more + +215 +00:18:31,800 --> 00:18:38,870 +than 4.4. So what's the probability of X More + +216 +00:18:38,870 --> 00:18:41,090 +than. So greater than 4. + +217 +00:18:45,830 --> 00:18:49,290 +I just gave the idea to solve this problem. At + +218 +00:18:49,290 --> 00:18:54,190 +home, you can compute it to find the exact answer. + +219 +00:18:54,590 --> 00:18:59,570 +So first step, find z score. z is 4.4. + +220 +00:19:01,730 --> 00:19:02,590 +Divide by z. + +221 +00:19:05,910 --> 00:19:12,360 +Just compute this value. It's 0.8 divided by 0.8 + +222 +00:19:12,360 --> 00:19:13,100 +equals 1. + +223 +00:19:18,560 --> 00:19:22,660 +So z-score is 1. So we are looking for the + +224 +00:19:22,660 --> 00:19:24,700 +probability of z greater than 1. + +225 +00:19:28,340 --> 00:19:35,220 +1.5. 1.2. 1.5. 1.5. 1.5. So I'm looking for the + +226 +00:19:35,220 --> 00:19:37,760 +probability of x of z greater than + +227 +00:19:40,980 --> 00:19:48,700 +1 minus P + +228 +00:19:48,700 --> 00:19:52,700 +of Z less than or equal to 1.5. Now go back to the + +229 +00:19:52,700 --> 00:19:53,040 +table. + +230 +00:20:01,540 --> 00:20:07,260 +Now 1.5 under 0. It's 0.9332. + +231 +00:20:11,410 --> 00:20:19,750 +So, 1 minus this probability gives 0.668. + +232 +00:20:21,210 --> 00:20:24,350 +That's the probability of X greater than 4.4. + +233 +00:20:28,130 --> 00:20:31,590 +So, the answer is 0.0668. + +234 +00:20:34,870 --> 00:20:37,550 +Now, for the same question. + +235 +00:20:41,320 --> 00:20:44,380 +What's the probability that a randomly selected + +236 +00:20:44,380 --> 00:20:47,080 +fish will weigh between 3 and 5 pounds? + +237 +00:20:49,660 --> 00:20:50,400 +3? + +238 +00:20:52,940 --> 00:21:01,040 +Up to 5. So first we have to find the score for 3 + +239 +00:21:01,040 --> 00:21:11,260 +out of 5. For it to be just 3 minus 3.2. Divide by + +240 +00:21:11,260 --> 00:21:17,380 +0.8 is the first z value. Negative 0.2 divided by + +241 +00:21:17,380 --> 00:21:30,360 +0.8 minus 0.25. The other one, 5 minus 3.2 divided + +242 +00:21:30,360 --> 00:21:36,680 +by 0.8. 1 minus 0.8 divided by 0.8 equals + +243 +00:21:42,680 --> 00:21:44,120 +2.25. + +244 +00:21:50,840 --> 00:21:57,020 +Just double check this result. So here, the + +245 +00:21:57,020 --> 00:22:04,520 +probability of X between 3 and 5 equals minus 0 + +246 +00:22:04,520 --> 00:22:08,200 +.25, smaller than Z, smaller than 2.5. + +247 +00:22:12,650 --> 00:22:17,210 +So it's transformed from normal distribution to + +248 +00:22:17,210 --> 00:22:20,750 +standardized normal distribution. So here instead + +249 +00:22:20,750 --> 00:22:23,530 +of computing the probability of X between three + +250 +00:22:23,530 --> 00:22:26,070 +and five, we are looking for the probability + +251 +00:22:26,070 --> 00:22:31,350 +between Z between actually minus. It's minus + +252 +00:22:31,350 --> 00:22:34,590 +because your value here is smaller than the + +253 +00:22:34,590 --> 00:22:37,790 +average. The average was 3.2, so it should be + +254 +00:22:37,790 --> 00:22:42,630 +negative. So z score between minus 0.25 all the + +255 +00:22:42,630 --> 00:22:48,150 +way up to 2.25. So now, this is the probability we + +256 +00:22:48,150 --> 00:22:56,590 +are looking for. Zero in the middle minus one + +257 +00:22:56,590 --> 00:23:03,610 +-fourth to the left of z of zero, mu of zero. And + +258 +00:23:03,610 --> 00:23:08,560 +this is the value of 2.25. Now we are looking + +259 +00:23:08,560 --> 00:23:09,940 +actually for this probability. + +260 +00:23:12,960 --> 00:23:18,360 +The area between minus 0.25 all the way up to 2.5. + +261 +00:23:19,980 --> 00:23:25,200 +So this area equals the + +262 +00:23:25,200 --> 00:23:29,000 +probability of Z less than 2.25 minus. + +263 +00:23:34,280 --> 00:23:38,780 +And again, use the normal. table to give this + +264 +00:23:38,780 --> 00:23:39,860 +value and another one. + +265 +00:23:42,980 --> 00:23:52,400 +Any questions? So first step here, we compute the + +266 +00:23:52,400 --> 00:23:56,880 +z-score for each value x. So the problem is + +267 +00:23:56,880 --> 00:24:01,380 +transformed from normal distribution to + +268 +00:24:01,380 --> 00:24:05,060 +standardized normal distribution. So it becomes z + +269 +00:24:05,060 --> 00:24:11,500 +between minus 1.25 up to 2.25. Now, this area, + +270 +00:24:11,960 --> 00:24:19,900 +this dashed area equals the area below 2.25 minus + +271 +00:24:19,900 --> 00:24:25,000 +the area below minus 1.25. Now, by using the + +272 +00:24:25,000 --> 00:24:27,760 +similar way we did before, you will compute the + +273 +00:24:27,760 --> 00:24:30,960 +value of z. The probability of z is smaller than 2 + +274 +00:24:30,960 --> 00:24:39,580 +.25 by using The normal table. So here, 2.2 up to + +275 +00:24:39,580 --> 00:24:47,900 +5. So 9, 8, 7, 8. 9, 8, 7, 8. + +276 +00:24:53,900 --> 00:25:00,260 +So the area below 2.25, 2.2, this value. All the + +277 +00:25:00,260 --> 00:25:03,940 +way up to 5 gives 987. + +278 +00:25:05,540 --> 00:25:08,860 +Now, what's about the probability of Z smaller + +279 +00:25:08,860 --> 00:25:15,320 +than minus 0.25? If you go back to the Z table, + +280 +00:25:15,380 --> 00:25:18,960 +but for the other one, the negative one. + +281 +00:25:23,120 --> 00:25:28,540 +Minus 2 minus 0.2 up + +282 +00:25:28,540 --> 00:25:34,780 +to 5. 0.4013 minus, + +283 +00:25:36,620 --> 00:25:43,100 +that will give the probability between three and + +284 +00:25:43,100 --> 00:25:43,280 +five. + +285 +00:25:46,180 --> 00:25:48,900 +This is the second part. + +286 +00:25:51,120 --> 00:25:52,460 +So the final answer. + +287 +00:26:00,630 --> 00:26:05,450 +So this is the probability that the selected cash + +288 +00:26:05,450 --> 00:26:10,650 +fish will weigh between three and five pounds. + +289 +00:26:11,810 --> 00:26:20,770 +Now, other question is, for the same problem, you + +290 +00:26:20,770 --> 00:26:28,020 +said a citation Catfish should be one of the top 2 + +291 +00:26:28,020 --> 00:26:33,860 +% in the weight. Assuming the weights of catfish + +292 +00:26:33,860 --> 00:26:38,660 +are normally distributed, at what weight in bounds + +293 +00:26:38,660 --> 00:26:43,680 +should the citation, the notation be established? + +294 +00:26:45,800 --> 00:26:50,600 +So in this board, he asked about what's the value + +295 +00:26:50,600 --> 00:26:52,120 +of x, for example. + +296 +00:26:57,160 --> 00:27:01,680 +is greater than what value here. And this + +297 +00:27:01,680 --> 00:27:07,880 +probability equals 2%. Because you said the + +298 +00:27:07,880 --> 00:27:14,720 +citation catfish should be one of the top 2%. So + +299 +00:27:14,720 --> 00:27:23,560 +the area in the right here, this area is 2%. + +300 +00:27:26,000 --> 00:27:34,080 +What's the value of x in this case? So here, the + +301 +00:27:34,080 --> 00:27:38,420 +value of x greater than a equals 0.02, and we are + +302 +00:27:38,420 --> 00:27:39,460 +looking for this value. + +303 +00:27:45,750 --> 00:27:49,170 +gives the area to the left side. So this + +304 +00:27:49,170 --> 00:27:54,490 +probability actually, the area to the right is 2%, + +305 +00:27:54,490 --> 00:28:00,810 +so the area to the left is 98%. So this is the + +306 +00:28:00,810 --> 00:28:01,410 +same as, + +307 +00:28:04,610 --> 00:28:07,930 +as we know, the equal sign does not matter because + +308 +00:28:07,930 --> 00:28:09,090 +we have continuous distribution. + +309 +00:28:12,050 --> 00:28:14,650 +continuous distribution, so equal sign does not + +310 +00:28:14,650 --> 00:28:18,510 +matter. So now, if you ask about P of X greater + +311 +00:28:18,510 --> 00:28:22,190 +than a certain value equals a probability of, for + +312 +00:28:22,190 --> 00:28:27,330 +example, 0.02, you have to find the probability to + +313 +00:28:27,330 --> 00:28:31,890 +the left, which is 0.98, because our table gives + +314 +00:28:31,890 --> 00:28:36,470 +the area to the left. Now, we have to find the + +315 +00:28:36,470 --> 00:28:40,820 +value of A such that Probability of X is more than + +316 +00:28:40,820 --> 00:28:44,820 +or equal to 0.98. So again, we have to look at the + +317 +00:28:44,820 --> 00:28:50,140 +normal table, but backwards, because this value is + +318 +00:28:50,140 --> 00:28:53,720 +given. If the probability is given, we have to + +319 +00:28:53,720 --> 00:28:58,900 +look inside the body of the table in order to find + +320 +00:28:58,900 --> 00:28:59,580 +the z-score. + +321 +00:29:03,350 --> 00:29:07,850 +x equals mu plus z sigma in order to find the + +322 +00:29:07,850 --> 00:29:12,290 +corresponding value x. So again, go back to the + +323 +00:29:12,290 --> 00:29:22,010 +normal table, and we are looking for 98%. The + +324 +00:29:22,010 --> 00:29:27,930 +closest value to 98%, look here, if you stop here + +325 +00:29:27,930 --> 00:29:30,850 +at 2, go to the right, + +326 +00:29:33,660 --> 00:29:39,380 +Here we have 9798 or + +327 +00:29:39,380 --> 00:29:41,480 +9803. + +328 +00:29:42,640 --> 00:29:50,460 +So the answer might be your z-score could be 2.05 + +329 +00:29:50,460 --> 00:29:59,440 +or 2.06. So again, in this case, the table does + +330 +00:29:59,440 --> 00:30:04,400 +not give the exact. So the approximate one might + +331 +00:30:04,400 --> 00:30:08,920 +be between them exactly. Or just take one of + +332 +00:30:08,920 --> 00:30:13,140 +these. So either you can take 9798, which is + +333 +00:30:13,140 --> 00:30:19,500 +closer to 98% than 9803, because it's three + +334 +00:30:19,500 --> 00:30:23,780 +distant apart. So maybe we can take this value. + +335 +00:30:24,500 --> 00:30:27,640 +Again, if you take the other one, you will be OK. + +336 +00:30:28,540 --> 00:30:36,730 +So you take either 2.05. or 2.06. So let's take + +337 +00:30:36,730 --> 00:30:45,270 +the first value, for example. So my x equals mu, z + +338 +00:30:45,270 --> 00:30:56,570 +is 2.05, times sigma, 0.8. Multiply 2.05 by 8, 0 + +339 +00:30:56,570 --> 00:31:03,610 +.8, then add 3.2, you will get What's your answer? + +340 +00:31:08,390 --> 00:31:17,450 +3.2 plus 2 point... So around 4.8. So your answer + +341 +00:31:17,450 --> 00:31:18,890 +is 4.84. + +342 +00:31:23,810 --> 00:31:29,470 +Now if you go back to the problem, and suppose you + +343 +00:31:29,470 --> 00:31:30,830 +know the value of x. + +344 +00:31:34,250 --> 00:31:36,010 +So the probability of X. + +345 +00:31:42,990 --> 00:31:45,110 +Double check to the answer. + +346 +00:31:49,490 --> 00:31:58,010 +4.84. Just check. V of X greater than this value + +347 +00:31:58,010 --> 00:32:03,290 +should + +348 +00:32:03,290 --> 00:32:09,500 +be Two percent. Two percent. So the probability of + +349 +00:32:09,500 --> 00:32:13,440 +X greater than this value should be equal to one + +350 +00:32:13,440 --> 00:32:18,980 +zero. So this problem is called backward normal + +351 +00:32:18,980 --> 00:32:24,960 +calculation because here first step we find the + +352 +00:32:24,960 --> 00:32:28,040 +value of this score corresponding to this + +353 +00:32:28,040 --> 00:32:33,420 +probability. Be careful. The probability of X + +354 +00:32:33,420 --> 00:32:38,740 +greater than 2 is 0.02. So my value here should be + +355 +00:32:38,740 --> 00:32:44,980 +to the right. Because it says greater than A is + +356 +00:32:44,980 --> 00:32:51,240 +just 2%. If you switch the position of A, for + +357 +00:32:51,240 --> 00:32:57,130 +example, if A is on this side, And he asked about + +358 +00:32:57,130 --> 00:33:02,850 +E of X greater than E is 2%. This area is not 2%. + +359 +00:33:02,850 --> 00:33:10,050 +From A up to infinity here, this area is not 2% + +360 +00:33:10,050 --> 00:33:14,070 +because at least it's greater than 0.5. Make + +361 +00:33:14,070 --> 00:33:16,610 +sense? So your A should be to the right side. + +362 +00:33:17,590 --> 00:33:21,810 +Because the value of X greater than E, 2% is on + +363 +00:33:21,810 --> 00:33:26,400 +the other side. Let's move to the next one. + +364 +00:33:36,180 --> 00:33:37,660 +For the same question. + +365 +00:33:52,790 --> 00:33:57,150 +Again, the owner of Catfish Market determined the + +366 +00:33:57,150 --> 00:34:00,930 +average weight of a catfish 3.2 with + +367 +00:34:00,930 --> 00:34:04,670 +standardization 0.8 and we are assuming the + +368 +00:34:04,670 --> 00:34:08,270 +weights of catfish are normally distributed, kiosk + +369 +00:34:08,270 --> 00:34:17,390 +above. Above what weight? Do 89.8% of the weights + +370 +00:34:17,390 --> 00:34:18,070 +care? + +371 +00:34:20,630 --> 00:34:27,980 +Above? And above, so x greater than. X minus. And + +372 +00:34:27,980 --> 00:34:34,900 +98, 89, sorry, 89. So this is a percentage he's + +373 +00:34:34,900 --> 00:34:45,860 +looking for. 89.8%. Now what's the value of A? Or + +374 +00:34:45,860 --> 00:34:51,560 +above what weight? Do 89.8% of the weights occur? + +375 +00:34:57,730 --> 00:35:02,670 +You just make the normal curve in order to + +376 +00:35:02,670 --> 00:35:07,010 +understand the probability. Now, A should be to + +377 +00:35:07,010 --> 00:35:11,550 +the right or to the left side? Imagine A in the + +378 +00:35:11,550 --> 00:35:15,990 +right side here. Do you think the area above A is + +379 +00:35:15,990 --> 00:35:21,670 +89%? It's smaller than 0.5 for sure. So it should + +380 +00:35:21,670 --> 00:35:29,950 +be the other side. So this is your 8. Now, this + +381 +00:35:29,950 --> 00:35:36,690 +area makes sense that it's above 0.5. It's 0.8980. + +382 +00:35:38,750 --> 00:35:42,850 +Now, B of X greater than equals this value. And + +383 +00:35:42,850 --> 00:35:46,990 +again, the table gives the area to the left. So + +384 +00:35:46,990 --> 00:35:53,740 +this is actually X less than A, 1 minus this + +385 +00:35:53,740 --> 00:35:56,600 +value, equals 0.1020. + +386 +00:36:01,480 --> 00:36:08,760 +Now go back to + +387 +00:36:08,760 --> 00:36:14,400 +the normal table. Here it's 0.1020. So it should + +388 +00:36:14,400 --> 00:36:17,500 +be negative. I mean, your z-scope should be + +389 +00:36:17,500 --> 00:36:17,800 +negative. + +390 +00:36:22,000 --> 00:36:25,640 +Now look at 0.102. + +391 +00:36:28,560 --> 00:36:38,680 +Exactly this value. 0.102 is minus 1.2 up to 7. So + +392 +00:36:38,680 --> 00:36:39,900 +minus 1.27. + +393 +00:36:49,120 --> 00:36:57,900 +Minus 1.2. All the way up to 7 is 0.102. So the + +394 +00:36:57,900 --> 00:37:04,160 +corresponding z-score is minus 1.17. Now x again + +395 +00:37:04,160 --> 00:37:05,980 +equals mu plus z sigma. + +396 +00:37:10,280 --> 00:37:19,960 +Mu is 3.2 plus z is negative 1.17 times sigma. + +397 +00:37:24,250 --> 00:37:34,390 +So it's equal to 3.2 minus 127 times 0.3. By + +398 +00:37:34,390 --> 00:37:36,510 +calculator, you'll get the final result. + +399 +00:37:51,120 --> 00:37:56,180 +If the probability is smaller than 0.5, then this + +400 +00:37:56,180 --> 00:38:00,480 +one is negative. Go to the other one. If the + +401 +00:38:00,480 --> 00:38:04,040 +probability is above 0.5, then use the positive z + +402 +00:38:04,040 --> 00:38:09,600 +-score. So what's the answer? 2.18. + +403 +00:38:12,680 --> 00:38:19,850 +Be careful. In the previous one, We had a + +404 +00:38:19,850 --> 00:38:28,870 +probability of X greater than A equal 2%. In + +405 +00:38:28,870 --> 00:38:33,070 +this case, the value of A, for example, is located + +406 +00:38:33,070 --> 00:38:35,930 +in the upper tail here. + +407 +00:38:40,210 --> 00:38:45,530 +For this part, you ask about B of X greater than A + +408 +00:38:45,530 --> 00:38:51,090 +equal 0.89. It's here more than 0.5 should be on + +409 +00:38:51,090 --> 00:38:54,290 +the other side. So you have U of X greater than + +410 +00:38:54,290 --> 00:38:58,910 +equal this value, which is the score in this case + +411 +00:38:58,910 --> 00:39:02,490 +minus 1.17. So the corresponding guess score + +412 +00:39:02,490 --> 00:39:11,810 +actually is 2.24. So this is the weight that 89.8% + +413 +00:39:11,810 --> 00:39:17,970 +of the weights are above it. So around 90% of the + +414 +00:39:17,970 --> 00:39:25,350 +catch fish have weights above this value. So + +415 +00:39:25,350 --> 00:39:31,170 +around 2 pounds. So around 90% of the weights are + +416 +00:39:31,170 --> 00:39:37,070 +above 2.18 pounds. Maybe this is one of the most + +417 +00:39:37,070 --> 00:39:41,690 +important questions in this chapter. Any question? + +418 +00:39:51,660 --> 00:39:57,580 +The last part here, for the same problem he asked + +419 +00:39:57,580 --> 00:40:01,700 +about, what's the probability that a randomly + +420 +00:40:01,700 --> 00:40:06,120 +selected fish will weigh less than 2.2 pounds? I + +421 +00:40:06,120 --> 00:40:12,680 +think straightforward. We did similar in part A. + +422 +00:40:14,900 --> 00:40:23,880 +So B of X less than 0.2. So he's looking for this + +423 +00:40:23,880 --> 00:40:28,300 +probability, which is straightforward one. This + +424 +00:40:28,300 --> 00:40:38,800 +score, 3.2 minus, I'm sorry, it's 2.2 minus minus. + +425 +00:40:39,280 --> 00:40:43,420 +It's 2.2 minus 3.2 divided by sigma. + +426 +00:41:04,900 --> 00:41:10,240 +So again, find the probability now of Z less than + +427 +00:41:10,240 --> 00:41:11,120 +or equal to negative 1.5. + +428 +00:41:14,870 --> 00:41:19,710 +Now, in this case, we have to use the negative z. + +429 +00:41:20,370 --> 00:41:25,010 +It's negative 1.15 minus 1.2 up to 5. + +430 +00:41:28,150 --> 00:41:30,070 +So 0.1056. + +431 +00:41:33,730 --> 00:41:40,660 +So the answer is around 10% of the catfish will + +432 +00:41:40,660 --> 00:41:46,420 +weigh less than 2 pounds. So the answer is 0.1056. + +433 +00:41:48,340 --> 00:41:49,220 +Questions? + +434 +00:41:52,780 --> 00:41:56,100 +So go back to the PowerPoint presentation we have. + +435 +00:41:57,780 --> 00:41:59,580 +The last topic, + +436 +00:42:02,560 --> 00:42:03,900 +evaluating normality. + +437 +00:42:06,930 --> 00:42:09,350 +Many times we mentioned something about normality + +438 +00:42:09,350 --> 00:42:14,750 +and outliers. For sure, if outliers exist, in this + +439 +00:42:14,750 --> 00:42:19,410 +case, the situation is not normal. Now, how can we + +440 +00:42:19,410 --> 00:42:21,370 +tell if a data point is an outlier? + +441 +00:42:24,650 --> 00:42:28,270 +If you remember, we talked about outliers in + +442 +00:42:28,270 --> 00:42:32,510 +Chapter 3. By two ways. + +443 +00:42:36,750 --> 00:42:38,270 +By this score. + +444 +00:42:42,650 --> 00:42:47,390 +And we mentioned that any data point. + +445 +00:42:54,950 --> 00:42:59,010 +Below minus 3, I mean smaller than minus 3, or + +446 +00:42:59,010 --> 00:43:04,010 +above 3, these points are suspected to be + +447 +00:43:04,010 --> 00:43:04,750 +outliers. + +448 +00:43:09,230 --> 00:43:16,230 +So any point, any data value smaller than minus 3 + +449 +00:43:16,230 --> 00:43:22,330 +in this form, or above plus 3 is considered to be + +450 +00:43:22,330 --> 00:43:30,250 +an outlier. If we compute the lower limit, which + +451 +00:43:30,250 --> 00:43:37,310 +is Q1 minus 1.5 IQR over the upper limit. + +452 +00:43:40,170 --> 00:43:47,490 +So we + +453 +00:43:47,490 --> 00:43:51,690 +have lower limit, upper limit. So lower limit. + +454 +00:43:55,400 --> 00:43:56,480 +And upper limit. + +455 +00:43:59,980 --> 00:44:08,420 +Any data point below lower limit or above upper + +456 +00:44:08,420 --> 00:44:13,080 +limit is considered to be a type. So therefore, we + +457 +00:44:13,080 --> 00:44:18,320 +have two methods to determine or to examine if the + +458 +00:44:18,320 --> 00:44:20,960 +observation is enough there or not. One by using + +459 +00:44:20,960 --> 00:44:24,060 +this score, straightforward. And the other one, we + +460 +00:44:24,060 --> 00:44:27,420 +have to look for The lower limit and upper limit. + +461 +00:44:28,960 --> 00:44:34,200 +The other method by using software and later on + +462 +00:44:34,200 --> 00:44:39,380 +you will take SPSS in order to determine if the + +463 +00:44:39,380 --> 00:44:43,100 +data is normally distributed by using something + +464 +00:44:43,100 --> 00:44:52,540 +called QQ plot or normal probability plot. So I'm + +465 +00:44:52,540 --> 00:44:56,710 +going to skip this part. Because data is taken by + +466 +00:44:56,710 --> 00:45:00,970 +using software. But in general, + +467 +00:45:04,330 --> 00:45:11,750 +you may look at this graph. Generally speaking, if + +468 +00:45:11,750 --> 00:45:17,730 +you have a probability plot of a data, and the + +469 +00:45:17,730 --> 00:45:25,530 +points lie on a straight line, or close to it, In + +470 +00:45:25,530 --> 00:45:29,650 +this case, the distribution is normal. It's hard + +471 +00:45:29,650 --> 00:45:33,390 +to make this graph manual. It's better to use + +472 +00:45:33,390 --> 00:45:38,510 +software. But at least if we have this graph, and + +473 +00:45:38,510 --> 00:45:42,150 +the points are close to the straight line. I mean, + +474 +00:45:42,250 --> 00:45:45,950 +the points are either on the straight line, lies + +475 +00:45:45,950 --> 00:45:49,550 +on the straight line, or close it. In this case, + +476 +00:45:50,870 --> 00:45:55,030 +the data is normally distributed. If the data + +477 +00:45:55,030 --> 00:46:00,870 +points scattered away of the straight line, then + +478 +00:46:00,870 --> 00:46:04,390 +the distribution is not normal either skewed to + +479 +00:46:04,390 --> 00:46:08,690 +the right or skewed to the left. So for this + +480 +00:46:08,690 --> 00:46:14,150 +specific graph, the plot is normally distributed, + +481 +00:46:14,290 --> 00:46:17,550 +approximately normally distributed. Because most + +482 +00:46:17,550 --> 00:46:23,230 +of the points here lie close to the line and few + +483 +00:46:24,260 --> 00:46:29,660 +are scattered away. Or it means that there are few + +484 +00:46:29,660 --> 00:46:31,940 +outliers in this case, we can ignore these values. + +485 +00:46:33,100 --> 00:46:35,620 +So here plot is approximately a straight line + +486 +00:46:35,620 --> 00:46:40,360 +except for a few outliers at the low and the + +487 +00:46:40,360 --> 00:46:44,780 +right, those points. So generally speaking, the + +488 +00:46:44,780 --> 00:46:51,200 +distribution is normal distribution. That's all + +489 +00:46:51,200 --> 00:46:53,700 +for this chapter. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/BRRl21n6QWg_raw.json b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/BRRl21n6QWg_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..dba9c9aa0f5f26d73feb44f2c19645b2b82fc0f4 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/BRRl21n6QWg_raw.json @@ -0,0 +1 @@ +{"segments": [{"id": 1, "seek": 3609, "start": 7.39, "end": 36.09, "text": " So last time, we discussed how to find the probabilities underneath the normal curve for three cases. If the point lies in the lower tail, as this one, or upper tail, or between. So we can do the computations for this kind of probabilities. And I think we gave two examples.", "tokens": [407, 1036, 565, 11, 321, 7152, 577, 281, 915, 264, 33783, 7223, 264, 2710, 7605, 337, 1045, 3331, 13, 759, 264, 935, 9134, 294, 264, 3126, 6838, 11, 382, 341, 472, 11, 420, 6597, 6838, 11, 420, 1296, 13, 407, 321, 393, 360, 264, 2807, 763, 337, 341, 733, 295, 33783, 13, 400, 286, 519, 321, 2729, 732, 5110, 13], "avg_logprob": -0.1269211046031264, "compression_ratio": 1.5536723163841808, "no_speech_prob": 0.0, "words": [{"start": 7.39, "end": 7.79, "word": " So", "probability": 0.7919921875}, {"start": 7.79, "end": 7.99, "word": " last", "probability": 0.76611328125}, {"start": 7.99, "end": 8.19, "word": " time,", "probability": 0.8916015625}, {"start": 8.27, "end": 8.37, "word": " we", "probability": 0.9638671875}, {"start": 8.37, "end": 8.87, "word": " discussed", "probability": 0.88134765625}, {"start": 8.87, "end": 9.11, "word": " how", "probability": 0.9453125}, {"start": 9.11, "end": 9.27, "word": " to", "probability": 0.970703125}, {"start": 9.27, "end": 9.55, "word": " find", "probability": 0.888671875}, {"start": 9.55, "end": 9.89, "word": " the", "probability": 0.912109375}, {"start": 9.89, "end": 10.59, "word": " probabilities", "probability": 0.8779296875}, {"start": 10.59, "end": 11.25, "word": " underneath", "probability": 0.94677734375}, {"start": 11.25, "end": 12.29, "word": " the", "probability": 0.91259765625}, {"start": 12.29, "end": 12.69, "word": " normal", "probability": 0.875}, {"start": 12.69, "end": 13.11, "word": " curve", "probability": 0.9052734375}, {"start": 13.11, "end": 14.57, "word": " for", "probability": 0.880859375}, {"start": 14.57, "end": 14.83, "word": " three", "probability": 0.8818359375}, {"start": 14.83, "end": 15.25, "word": " cases.", "probability": 0.9443359375}, {"start": 16.01, "end": 16.81, "word": " If", "probability": 0.96435546875}, {"start": 16.81, "end": 16.95, "word": " the", "probability": 0.91845703125}, {"start": 16.95, "end": 17.21, "word": " point", "probability": 0.97216796875}, {"start": 17.21, "end": 17.67, "word": " lies", "probability": 0.93359375}, {"start": 17.67, "end": 19.11, "word": " in", "probability": 0.92041015625}, {"start": 19.11, "end": 19.43, "word": " the", "probability": 0.91455078125}, {"start": 19.43, "end": 20.05, "word": " lower", "probability": 0.87060546875}, {"start": 20.05, "end": 20.45, "word": " tail,", "probability": 0.826171875}, {"start": 21.69, "end": 22.37, "word": " as", "probability": 0.869140625}, {"start": 22.37, "end": 22.59, "word": " this", "probability": 0.94921875}, {"start": 22.59, "end": 22.87, "word": " one,", "probability": 0.92333984375}, {"start": 23.69, "end": 24.15, "word": " or", "probability": 0.955078125}, {"start": 24.15, "end": 24.55, "word": " upper", "probability": 0.787109375}, {"start": 24.55, "end": 24.99, "word": " tail,", "probability": 0.8525390625}, {"start": 26.09, "end": 26.47, "word": " or", "probability": 0.96630859375}, {"start": 26.47, "end": 26.83, "word": " between.", "probability": 0.89990234375}, {"start": 27.55, "end": 27.83, "word": " So", "probability": 0.94970703125}, {"start": 27.83, "end": 27.95, "word": " we", "probability": 0.9130859375}, {"start": 27.95, "end": 28.15, "word": " can", "probability": 0.94677734375}, {"start": 28.15, "end": 28.51, "word": " do", "probability": 0.96142578125}, {"start": 28.51, "end": 29.03, "word": " the", "probability": 0.91552734375}, {"start": 29.03, "end": 29.81, "word": " computations", "probability": 0.97705078125}, {"start": 29.81, "end": 30.45, "word": " for", "probability": 0.94970703125}, {"start": 30.45, "end": 30.79, "word": " this", "probability": 0.90966796875}, {"start": 30.79, "end": 31.23, "word": " kind", "probability": 0.900390625}, {"start": 31.23, "end": 32.29, "word": " of", "probability": 0.97119140625}, {"start": 32.29, "end": 33.05, "word": " probabilities.", "probability": 0.8662109375}, {"start": 33.25, "end": 33.39, "word": " And", "probability": 0.93603515625}, {"start": 33.39, "end": 33.79, "word": " I", "probability": 0.7509765625}, {"start": 33.79, "end": 34.07, "word": " think", "probability": 0.9130859375}, {"start": 34.07, "end": 34.25, "word": " we", "probability": 0.94970703125}, {"start": 34.25, "end": 34.53, "word": " gave", "probability": 0.806640625}, {"start": 34.53, "end": 35.69, "word": " two", "probability": 0.92919921875}, {"start": 35.69, "end": 36.09, "word": " examples.", "probability": 0.86865234375}], "temperature": 1.0}, {"id": 2, "seek": 6120, "start": 36.62, "end": 61.2, "text": " For example, if we are looking for probability of Z is smaller than 0.1, and Z, as we mentioned last time, is the standardized normal distribution that has mean of 0 and sigma is 1. In this case, just go to the table you have. Now we are looking for 0.12, for example. So here we have 0.1.", "tokens": [1171, 1365, 11, 498, 321, 366, 1237, 337, 8482, 295, 1176, 307, 4356, 813, 1958, 13, 16, 11, 293, 1176, 11, 382, 321, 2835, 1036, 565, 11, 307, 264, 31677, 2710, 7316, 300, 575, 914, 295, 1958, 293, 12771, 307, 502, 13, 682, 341, 1389, 11, 445, 352, 281, 264, 3199, 291, 362, 13, 823, 321, 366, 1237, 337, 1958, 13, 4762, 11, 337, 1365, 13, 407, 510, 321, 362, 1958, 13, 16, 13], "avg_logprob": -0.16958333333333334, "compression_ratio": 1.5104166666666667, "no_speech_prob": 0.0, "words": [{"start": 36.62, "end": 36.84, "word": " For", "probability": 0.88037109375}, {"start": 36.84, "end": 37.14, "word": " example,", "probability": 0.9755859375}, {"start": 37.16, "end": 37.34, "word": " if", "probability": 0.94970703125}, {"start": 37.34, "end": 37.44, "word": " we", "probability": 0.85791015625}, {"start": 37.44, "end": 37.56, "word": " are", "probability": 0.927734375}, {"start": 37.56, "end": 37.86, "word": " looking", "probability": 0.9140625}, {"start": 37.86, "end": 38.26, "word": " for", "probability": 0.9560546875}, {"start": 38.26, "end": 40.04, "word": " probability", "probability": 0.626953125}, {"start": 40.04, "end": 40.4, "word": " of", "probability": 0.88525390625}, {"start": 40.4, "end": 40.56, "word": " Z", "probability": 0.611328125}, {"start": 40.56, "end": 40.78, "word": " is", "probability": 0.78515625}, {"start": 40.78, "end": 41.04, "word": " smaller", "probability": 0.497314453125}, {"start": 41.04, "end": 41.38, "word": " than", "probability": 0.94091796875}, {"start": 41.38, "end": 41.82, "word": " 0", "probability": 0.7431640625}, {"start": 41.82, "end": 42.2, "word": ".1,", "probability": 0.91357421875}, {"start": 43.38, "end": 43.8, "word": " and", "probability": 0.916015625}, {"start": 43.8, "end": 44.06, "word": " Z,", "probability": 0.9375}, {"start": 44.22, "end": 44.3, "word": " as", "probability": 0.96484375}, {"start": 44.3, "end": 44.44, "word": " we", "probability": 0.96142578125}, {"start": 44.44, "end": 44.74, "word": " mentioned", "probability": 0.8349609375}, {"start": 44.74, "end": 45.02, "word": " last", "probability": 0.85107421875}, {"start": 45.02, "end": 45.3, "word": " time,", "probability": 0.88818359375}, {"start": 45.4, "end": 45.5, "word": " is", "probability": 0.91943359375}, {"start": 45.5, "end": 45.74, "word": " the", "probability": 0.6064453125}, {"start": 45.74, "end": 46.5, "word": " standardized", "probability": 0.798828125}, {"start": 46.5, "end": 46.88, "word": " normal", "probability": 0.83642578125}, {"start": 46.88, "end": 47.58, "word": " distribution", "probability": 0.85693359375}, {"start": 47.58, "end": 48.66, "word": " that", "probability": 0.89208984375}, {"start": 48.66, "end": 48.94, "word": " has", "probability": 0.935546875}, {"start": 48.94, "end": 49.14, "word": " mean", "probability": 0.93798828125}, {"start": 49.14, "end": 49.28, "word": " of", "probability": 0.9736328125}, {"start": 49.28, "end": 49.54, "word": " 0", "probability": 0.35986328125}, {"start": 49.54, "end": 50.3, "word": " and", "probability": 0.7451171875}, {"start": 50.3, "end": 50.6, "word": " sigma", "probability": 0.84130859375}, {"start": 50.6, "end": 50.78, "word": " is", "probability": 0.76171875}, {"start": 50.78, "end": 50.98, "word": " 1.", "probability": 0.9169921875}, {"start": 51.92, "end": 52.38, "word": " In", "probability": 0.935546875}, {"start": 52.38, "end": 52.6, "word": " this", "probability": 0.9453125}, {"start": 52.6, "end": 52.84, "word": " case,", "probability": 0.9033203125}, {"start": 52.9, "end": 53.12, "word": " just", "probability": 0.904296875}, {"start": 53.12, "end": 53.44, "word": " go", "probability": 0.9462890625}, {"start": 53.44, "end": 53.96, "word": " to", "probability": 0.95166015625}, {"start": 53.96, "end": 54.08, "word": " the", "probability": 0.92041015625}, {"start": 54.08, "end": 54.34, "word": " table", "probability": 0.89111328125}, {"start": 54.34, "end": 54.54, "word": " you", "probability": 0.95263671875}, {"start": 54.54, "end": 54.8, "word": " have.", "probability": 0.8876953125}, {"start": 56.0, "end": 56.5, "word": " Now", "probability": 0.962890625}, {"start": 56.5, "end": 56.62, "word": " we", "probability": 0.60009765625}, {"start": 56.62, "end": 56.74, "word": " are", "probability": 0.92529296875}, {"start": 56.74, "end": 56.98, "word": " looking", "probability": 0.88671875}, {"start": 56.98, "end": 57.24, "word": " for", "probability": 0.95263671875}, {"start": 57.24, "end": 57.54, "word": " 0", "probability": 0.92822265625}, {"start": 57.54, "end": 58.04, "word": ".12,", "probability": 0.763671875}, {"start": 58.78, "end": 58.92, "word": " for", "probability": 0.94970703125}, {"start": 58.92, "end": 59.26, "word": " example.", "probability": 0.9736328125}, {"start": 59.4, "end": 59.58, "word": " So", "probability": 0.96142578125}, {"start": 59.58, "end": 59.76, "word": " here", "probability": 0.81787109375}, {"start": 59.76, "end": 59.92, "word": " we", "probability": 0.771484375}, {"start": 59.92, "end": 60.2, "word": " have", "probability": 0.94775390625}, {"start": 60.2, "end": 60.84, "word": " 0", "probability": 0.982421875}, {"start": 60.84, "end": 61.2, "word": ".1.", "probability": 0.99658203125}], "temperature": 1.0}, {"id": 3, "seek": 8792, "start": 62.52, "end": 87.92, "text": " Under 2, we get this result. This value is the probability of Z smaller than 0.5. But you have to keep in mind that we have to transform first from normal distribution to standardized normal distribution. For this specific example, if we are looking for mean of 8 and standard deviation of 5, that's for the normal distribution.", "tokens": [6974, 568, 11, 321, 483, 341, 1874, 13, 639, 2158, 307, 264, 8482, 295, 1176, 4356, 813, 1958, 13, 20, 13, 583, 291, 362, 281, 1066, 294, 1575, 300, 321, 362, 281, 4088, 700, 490, 2710, 7316, 281, 31677, 2710, 7316, 13, 1171, 341, 2685, 1365, 11, 498, 321, 366, 1237, 337, 914, 295, 1649, 293, 3832, 25163, 295, 1025, 11, 300, 311, 337, 264, 2710, 7316, 13], "avg_logprob": -0.19146286318267602, "compression_ratio": 1.5893719806763285, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 62.52, "end": 62.92, "word": " Under", "probability": 0.50390625}, {"start": 62.92, "end": 63.3, "word": " 2,", "probability": 0.5791015625}, {"start": 63.48, "end": 63.7, "word": " we", "probability": 0.94970703125}, {"start": 63.7, "end": 63.92, "word": " get", "probability": 0.9287109375}, {"start": 63.92, "end": 64.14, "word": " this", "probability": 0.9443359375}, {"start": 64.14, "end": 64.5, "word": " result.", "probability": 0.91796875}, {"start": 65.08, "end": 65.38, "word": " This", "probability": 0.84814453125}, {"start": 65.38, "end": 65.66, "word": " value", "probability": 0.93994140625}, {"start": 65.66, "end": 65.84, "word": " is", "probability": 0.87060546875}, {"start": 65.84, "end": 65.94, "word": " the", "probability": 0.79150390625}, {"start": 65.94, "end": 66.36, "word": " probability", "probability": 0.93994140625}, {"start": 66.36, "end": 67.26, "word": " of", "probability": 0.94677734375}, {"start": 67.26, "end": 67.48, "word": " Z", "probability": 0.6884765625}, {"start": 67.48, "end": 68.06, "word": " smaller", "probability": 0.68994140625}, {"start": 68.06, "end": 68.34, "word": " than", "probability": 0.9375}, {"start": 68.34, "end": 68.58, "word": " 0", "probability": 0.796875}, {"start": 68.58, "end": 68.86, "word": ".5.", "probability": 0.747802734375}, {"start": 69.34, "end": 69.6, "word": " But", "probability": 0.744140625}, {"start": 69.6, "end": 69.68, "word": " you", "probability": 0.7275390625}, {"start": 69.68, "end": 69.8, "word": " have", "probability": 0.9453125}, {"start": 69.8, "end": 69.9, "word": " to", "probability": 0.97021484375}, {"start": 69.9, "end": 70.04, "word": " keep", "probability": 0.93310546875}, {"start": 70.04, "end": 70.18, "word": " in", "probability": 0.943359375}, {"start": 70.18, "end": 70.48, "word": " mind", "probability": 0.88330078125}, {"start": 70.48, "end": 70.78, "word": " that", "probability": 0.92333984375}, {"start": 70.78, "end": 71.0, "word": " we", "probability": 0.89306640625}, {"start": 71.0, "end": 71.2, "word": " have", "probability": 0.94970703125}, {"start": 71.2, "end": 71.36, "word": " to", "probability": 0.96875}, {"start": 71.36, "end": 72.08, "word": " transform", "probability": 0.94140625}, {"start": 72.08, "end": 72.84, "word": " first", "probability": 0.58349609375}, {"start": 72.84, "end": 74.12, "word": " from", "probability": 0.53466796875}, {"start": 74.12, "end": 74.46, "word": " normal", "probability": 0.7529296875}, {"start": 74.46, "end": 75.06, "word": " distribution", "probability": 0.81591796875}, {"start": 75.06, "end": 75.98, "word": " to", "probability": 0.8095703125}, {"start": 75.98, "end": 76.44, "word": " standardized", "probability": 0.85546875}, {"start": 76.44, "end": 76.94, "word": " normal", "probability": 0.861328125}, {"start": 76.94, "end": 77.58, "word": " distribution.", "probability": 0.87060546875}, {"start": 78.62, "end": 78.98, "word": " For", "probability": 0.96240234375}, {"start": 78.98, "end": 79.22, "word": " this", "probability": 0.947265625}, {"start": 79.22, "end": 79.7, "word": " specific", "probability": 0.90185546875}, {"start": 79.7, "end": 80.08, "word": " example,", "probability": 0.97509765625}, {"start": 80.18, "end": 80.32, "word": " if", "probability": 0.939453125}, {"start": 80.32, "end": 80.42, "word": " we", "probability": 0.900390625}, {"start": 80.42, "end": 80.54, "word": " are", "probability": 0.93701171875}, {"start": 80.54, "end": 80.78, "word": " looking", "probability": 0.90625}, {"start": 80.78, "end": 81.22, "word": " for", "probability": 0.9541015625}, {"start": 81.22, "end": 83.24, "word": " mean", "probability": 0.71826171875}, {"start": 83.24, "end": 84.0, "word": " of", "probability": 0.9228515625}, {"start": 84.0, "end": 84.5, "word": " 8", "probability": 0.890625}, {"start": 84.5, "end": 85.04, "word": " and", "probability": 0.8408203125}, {"start": 85.04, "end": 85.46, "word": " standard", "probability": 0.619140625}, {"start": 85.46, "end": 85.86, "word": " deviation", "probability": 0.9189453125}, {"start": 85.86, "end": 86.12, "word": " of", "probability": 0.9677734375}, {"start": 86.12, "end": 86.48, "word": " 5,", "probability": 0.9736328125}, {"start": 86.6, "end": 86.84, "word": " that's", "probability": 0.42724609375}, {"start": 86.84, "end": 86.96, "word": " for", "probability": 0.73828125}, {"start": 86.96, "end": 87.12, "word": " the", "probability": 0.8359375}, {"start": 87.12, "end": 87.36, "word": " normal", "probability": 0.8642578125}, {"start": 87.36, "end": 87.92, "word": " distribution.", "probability": 0.85693359375}], "temperature": 1.0}, {"id": 4, "seek": 11782, "start": 88.56, "end": 117.82, "text": " In this case, the z-score is given by this equation, which is x minus 3 divided by sigma. So 8.6 minus 8 divided by 5 gives 0.12. In this case, we can use the standardized normal table. Now, for the other case, we are looking for the probability of x greater than 8.6. So we are looking for the upper tail probability. The table we have gives the area to the left side.", "tokens": [682, 341, 1389, 11, 264, 710, 12, 4417, 418, 307, 2212, 538, 341, 5367, 11, 597, 307, 2031, 3175, 805, 6666, 538, 12771, 13, 407, 1649, 13, 21, 3175, 1649, 6666, 538, 1025, 2709, 1958, 13, 4762, 13, 682, 341, 1389, 11, 321, 393, 764, 264, 31677, 2710, 3199, 13, 823, 11, 337, 264, 661, 1389, 11, 321, 366, 1237, 337, 264, 8482, 295, 2031, 5044, 813, 1649, 13, 21, 13, 407, 321, 366, 1237, 337, 264, 6597, 6838, 8482, 13, 440, 3199, 321, 362, 2709, 264, 1859, 281, 264, 1411, 1252, 13], "avg_logprob": -0.14478057971660127, "compression_ratio": 1.705069124423963, "no_speech_prob": 0.0, "words": [{"start": 88.56, "end": 88.84, "word": " In", "probability": 0.7763671875}, {"start": 88.84, "end": 89.04, "word": " this", "probability": 0.94580078125}, {"start": 89.04, "end": 89.28, "word": " case,", "probability": 0.91943359375}, {"start": 89.38, "end": 89.48, "word": " the", "probability": 0.830078125}, {"start": 89.48, "end": 89.66, "word": " z", "probability": 0.68603515625}, {"start": 89.66, "end": 90.02, "word": "-score", "probability": 0.822265625}, {"start": 90.02, "end": 90.44, "word": " is", "probability": 0.9365234375}, {"start": 90.44, "end": 90.7, "word": " given", "probability": 0.89990234375}, {"start": 90.7, "end": 91.04, "word": " by", "probability": 0.96875}, {"start": 91.04, "end": 91.32, "word": " this", "probability": 0.88330078125}, {"start": 91.32, "end": 91.72, "word": " equation,", "probability": 0.95458984375}, {"start": 91.94, "end": 92.02, "word": " which", "probability": 0.9501953125}, {"start": 92.02, "end": 92.2, "word": " is", "probability": 0.947265625}, {"start": 92.2, "end": 92.46, "word": " x", "probability": 0.91259765625}, {"start": 92.46, "end": 92.74, "word": " minus", "probability": 0.96728515625}, {"start": 92.74, "end": 92.92, "word": " 3", "probability": 0.1485595703125}, {"start": 92.92, "end": 93.14, "word": " divided", "probability": 0.79052734375}, {"start": 93.14, "end": 93.3, "word": " by", "probability": 0.97705078125}, {"start": 93.3, "end": 93.6, "word": " sigma.", "probability": 0.90673828125}, {"start": 94.64, "end": 95.16, "word": " So", "probability": 0.96337890625}, {"start": 95.16, "end": 95.4, "word": " 8", "probability": 0.8486328125}, {"start": 95.4, "end": 95.84, "word": ".6", "probability": 0.996337890625}, {"start": 95.84, "end": 96.14, "word": " minus", "probability": 0.98681640625}, {"start": 96.14, "end": 96.4, "word": " 8", "probability": 0.95068359375}, {"start": 96.4, "end": 96.6, "word": " divided", "probability": 0.8203125}, {"start": 96.6, "end": 96.78, "word": " by", "probability": 0.9501953125}, {"start": 96.78, "end": 97.06, "word": " 5", "probability": 0.986328125}, {"start": 97.06, "end": 97.36, "word": " gives", "probability": 0.89599609375}, {"start": 97.36, "end": 97.62, "word": " 0", "probability": 0.95263671875}, {"start": 97.62, "end": 97.94, "word": ".12.", "probability": 0.855224609375}, {"start": 98.08, "end": 98.26, "word": " In", "probability": 0.95166015625}, {"start": 98.26, "end": 98.48, "word": " this", "probability": 0.94384765625}, {"start": 98.48, "end": 98.76, "word": " case,", "probability": 0.9130859375}, {"start": 98.78, "end": 98.96, "word": " we", "probability": 0.96044921875}, {"start": 98.96, "end": 99.32, "word": " can", "probability": 0.947265625}, {"start": 99.32, "end": 99.96, "word": " use", "probability": 0.87158203125}, {"start": 99.96, "end": 101.36, "word": " the", "probability": 0.91064453125}, {"start": 101.36, "end": 102.52, "word": " standardized", "probability": 0.86279296875}, {"start": 102.52, "end": 103.12, "word": " normal", "probability": 0.8330078125}, {"start": 103.12, "end": 103.5, "word": " table.", "probability": 0.8583984375}, {"start": 104.92, "end": 105.32, "word": " Now,", "probability": 0.94775390625}, {"start": 105.92, "end": 106.16, "word": " for", "probability": 0.93310546875}, {"start": 106.16, "end": 106.3, "word": " the", "probability": 0.9189453125}, {"start": 106.3, "end": 106.48, "word": " other", "probability": 0.8876953125}, {"start": 106.48, "end": 106.8, "word": " case,", "probability": 0.9150390625}, {"start": 106.88, "end": 107.0, "word": " we", "probability": 0.93017578125}, {"start": 107.0, "end": 107.12, "word": " are", "probability": 0.93212890625}, {"start": 107.12, "end": 107.36, "word": " looking", "probability": 0.91552734375}, {"start": 107.36, "end": 107.58, "word": " for", "probability": 0.94873046875}, {"start": 107.58, "end": 107.76, "word": " the", "probability": 0.91748046875}, {"start": 107.76, "end": 108.12, "word": " probability", "probability": 0.95263671875}, {"start": 108.12, "end": 108.4, "word": " of", "probability": 0.9638671875}, {"start": 108.4, "end": 108.7, "word": " x", "probability": 0.9072265625}, {"start": 108.7, "end": 109.12, "word": " greater", "probability": 0.90673828125}, {"start": 109.12, "end": 109.42, "word": " than", "probability": 0.9423828125}, {"start": 109.42, "end": 109.58, "word": " 8", "probability": 0.76953125}, {"start": 109.58, "end": 110.02, "word": ".6.", "probability": 0.999267578125}, {"start": 110.4, "end": 110.7, "word": " So", "probability": 0.96484375}, {"start": 110.7, "end": 110.84, "word": " we", "probability": 0.923828125}, {"start": 110.84, "end": 110.94, "word": " are", "probability": 0.9296875}, {"start": 110.94, "end": 111.16, "word": " looking", "probability": 0.9130859375}, {"start": 111.16, "end": 111.44, "word": " for", "probability": 0.95068359375}, {"start": 111.44, "end": 111.64, "word": " the", "probability": 0.91748046875}, {"start": 111.64, "end": 111.88, "word": " upper", "probability": 0.2464599609375}, {"start": 111.88, "end": 112.18, "word": " tail", "probability": 0.3466796875}, {"start": 112.18, "end": 112.58, "word": " probability.", "probability": 0.828125}, {"start": 113.78, "end": 114.32, "word": " The", "probability": 0.86669921875}, {"start": 114.32, "end": 114.62, "word": " table", "probability": 0.8916015625}, {"start": 114.62, "end": 114.82, "word": " we", "probability": 0.81103515625}, {"start": 114.82, "end": 115.18, "word": " have", "probability": 0.9462890625}, {"start": 115.18, "end": 116.4, "word": " gives", "probability": 0.912109375}, {"start": 116.4, "end": 116.62, "word": " the", "probability": 0.91455078125}, {"start": 116.62, "end": 116.9, "word": " area", "probability": 0.88818359375}, {"start": 116.9, "end": 117.1, "word": " to", "probability": 0.9658203125}, {"start": 117.1, "end": 117.24, "word": " the", "probability": 0.91455078125}, {"start": 117.24, "end": 117.44, "word": " left", "probability": 0.94677734375}, {"start": 117.44, "end": 117.82, "word": " side.", "probability": 0.84326171875}], "temperature": 1.0}, {"id": 5, "seek": 14762, "start": 119.22, "end": 147.62, "text": " And we know that the total area underneath the normal curve is 1. So the probability of x greater than 8.6 is the same as 1 minus the probability of x smaller than x less than 8.6. So here, for 8.6, we got z squared to be 0.12. So the probability of z greater than 0.12 is the same as 1 minus z of z is smaller than 0.12.", "tokens": [400, 321, 458, 300, 264, 3217, 1859, 7223, 264, 2710, 7605, 307, 502, 13, 407, 264, 8482, 295, 2031, 5044, 813, 1649, 13, 21, 307, 264, 912, 382, 502, 3175, 264, 8482, 295, 2031, 4356, 813, 2031, 1570, 813, 1649, 13, 21, 13, 407, 510, 11, 337, 1649, 13, 21, 11, 321, 658, 710, 8889, 281, 312, 1958, 13, 4762, 13, 407, 264, 8482, 295, 710, 5044, 813, 1958, 13, 4762, 307, 264, 912, 382, 502, 3175, 710, 295, 710, 307, 4356, 813, 1958, 13, 4762, 13], "avg_logprob": -0.12215908769179475, "compression_ratio": 1.8830409356725146, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 119.22, "end": 119.82, "word": " And", "probability": 0.77587890625}, {"start": 119.82, "end": 119.96, "word": " we", "probability": 0.87890625}, {"start": 119.96, "end": 120.08, "word": " know", "probability": 0.8837890625}, {"start": 120.08, "end": 120.3, "word": " that", "probability": 0.92578125}, {"start": 120.3, "end": 120.66, "word": " the", "probability": 0.91162109375}, {"start": 120.66, "end": 121.16, "word": " total", "probability": 0.88232421875}, {"start": 121.16, "end": 121.64, "word": " area", "probability": 0.91455078125}, {"start": 121.64, "end": 122.16, "word": " underneath", "probability": 0.92333984375}, {"start": 122.16, "end": 122.42, "word": " the", "probability": 0.9130859375}, {"start": 122.42, "end": 122.72, "word": " normal", "probability": 0.884765625}, {"start": 122.72, "end": 122.98, "word": " curve", "probability": 0.8388671875}, {"start": 122.98, "end": 123.2, "word": " is", "probability": 0.94580078125}, {"start": 123.2, "end": 123.46, "word": " 1.", "probability": 0.48388671875}, {"start": 124.08, "end": 124.56, "word": " So", "probability": 0.9638671875}, {"start": 124.56, "end": 124.76, "word": " the", "probability": 0.78662109375}, {"start": 124.76, "end": 125.1, "word": " probability", "probability": 0.90771484375}, {"start": 125.1, "end": 125.36, "word": " of", "probability": 0.93896484375}, {"start": 125.36, "end": 125.54, "word": " x", "probability": 0.38720703125}, {"start": 125.54, "end": 125.92, "word": " greater", "probability": 0.8486328125}, {"start": 125.92, "end": 126.22, "word": " than", "probability": 0.94384765625}, {"start": 126.22, "end": 126.48, "word": " 8", "probability": 0.90185546875}, {"start": 126.48, "end": 127.0, "word": ".6", "probability": 0.995361328125}, {"start": 127.0, "end": 127.38, "word": " is", "probability": 0.916015625}, {"start": 127.38, "end": 127.6, "word": " the", "probability": 0.9111328125}, {"start": 127.6, "end": 127.8, "word": " same", "probability": 0.9140625}, {"start": 127.8, "end": 128.2, "word": " as", "probability": 0.96630859375}, {"start": 128.2, "end": 128.98, "word": " 1", "probability": 0.9482421875}, {"start": 128.98, "end": 129.48, "word": " minus", "probability": 0.97705078125}, {"start": 129.48, "end": 130.06, "word": " the", "probability": 0.84228515625}, {"start": 130.06, "end": 130.38, "word": " probability", "probability": 0.94140625}, {"start": 130.38, "end": 130.66, "word": " of", "probability": 0.94921875}, {"start": 130.66, "end": 130.84, "word": " x", "probability": 0.98095703125}, {"start": 130.84, "end": 131.22, "word": " smaller", "probability": 0.86083984375}, {"start": 131.22, "end": 131.68, "word": " than", "probability": 0.9296875}, {"start": 131.68, "end": 133.4, "word": " x", "probability": 0.84619140625}, {"start": 133.4, "end": 133.72, "word": " less", "probability": 0.939453125}, {"start": 133.72, "end": 134.1, "word": " than", "probability": 0.9423828125}, {"start": 134.1, "end": 134.84, "word": " 8", "probability": 0.99853515625}, {"start": 134.84, "end": 135.46, "word": ".6.", "probability": 0.99951171875}, {"start": 136.04, "end": 136.34, "word": " So", "probability": 0.95458984375}, {"start": 136.34, "end": 136.6, "word": " here,", "probability": 0.814453125}, {"start": 137.06, "end": 137.3, "word": " for", "probability": 0.9404296875}, {"start": 137.3, "end": 137.5, "word": " 8", "probability": 0.99755859375}, {"start": 137.5, "end": 137.94, "word": ".6,", "probability": 0.999755859375}, {"start": 138.04, "end": 138.12, "word": " we", "probability": 0.93505859375}, {"start": 138.12, "end": 138.34, "word": " got", "probability": 0.8291015625}, {"start": 138.34, "end": 138.58, "word": " z", "probability": 0.71533203125}, {"start": 138.58, "end": 138.82, "word": " squared", "probability": 0.255126953125}, {"start": 138.82, "end": 138.96, "word": " to", "probability": 0.87841796875}, {"start": 138.96, "end": 139.1, "word": " be", "probability": 0.9580078125}, {"start": 139.1, "end": 139.42, "word": " 0", "probability": 0.96923828125}, {"start": 139.42, "end": 139.88, "word": ".12.", "probability": 0.99169921875}, {"start": 140.6, "end": 141.1, "word": " So", "probability": 0.9609375}, {"start": 141.1, "end": 141.46, "word": " the", "probability": 0.81298828125}, {"start": 141.46, "end": 141.8, "word": " probability", "probability": 0.94482421875}, {"start": 141.8, "end": 142.06, "word": " of", "probability": 0.9345703125}, {"start": 142.06, "end": 142.2, "word": " z", "probability": 0.982421875}, {"start": 142.2, "end": 142.52, "word": " greater", "probability": 0.9296875}, {"start": 142.52, "end": 142.76, "word": " than", "probability": 0.93359375}, {"start": 142.76, "end": 143.0, "word": " 0", "probability": 0.98095703125}, {"start": 143.0, "end": 143.38, "word": ".12", "probability": 0.998779296875}, {"start": 143.38, "end": 144.3, "word": " is", "probability": 0.94189453125}, {"start": 144.3, "end": 144.48, "word": " the", "probability": 0.91845703125}, {"start": 144.48, "end": 144.64, "word": " same", "probability": 0.91162109375}, {"start": 144.64, "end": 144.84, "word": " as", "probability": 0.96484375}, {"start": 144.84, "end": 145.06, "word": " 1", "probability": 0.98486328125}, {"start": 145.06, "end": 145.5, "word": " minus", "probability": 0.98779296875}, {"start": 145.5, "end": 145.76, "word": " z", "probability": 0.779296875}, {"start": 145.76, "end": 145.9, "word": " of", "probability": 0.50830078125}, {"start": 145.9, "end": 146.06, "word": " z", "probability": 0.9853515625}, {"start": 146.06, "end": 146.36, "word": " is", "probability": 0.779296875}, {"start": 146.36, "end": 146.84, "word": " smaller", "probability": 0.87060546875}, {"start": 146.84, "end": 147.12, "word": " than", "probability": 0.93798828125}, {"start": 147.12, "end": 147.36, "word": " 0", "probability": 0.98583984375}, {"start": 147.36, "end": 147.62, "word": ".12.", "probability": 0.996337890625}], "temperature": 1.0}, {"id": 6, "seek": 17671, "start": 148.85, "end": 176.71, "text": " So 1 minus the answer we just got from part A will get us to something. So this is the probability of X is greater than 8.6. So all of the time, if you are asking about computing the probability in the upper tail, you have to first find the probability in the lower tail, then subtract that value from 1. So that's the probability for the upper tail.", "tokens": [407, 502, 3175, 264, 1867, 321, 445, 658, 490, 644, 316, 486, 483, 505, 281, 746, 13, 407, 341, 307, 264, 8482, 295, 1783, 307, 5044, 813, 1649, 13, 21, 13, 407, 439, 295, 264, 565, 11, 498, 291, 366, 3365, 466, 15866, 264, 8482, 294, 264, 6597, 6838, 11, 291, 362, 281, 700, 915, 264, 8482, 294, 264, 3126, 6838, 11, 550, 16390, 300, 2158, 490, 502, 13, 407, 300, 311, 264, 8482, 337, 264, 6597, 6838, 13], "avg_logprob": -0.16396484170109032, "compression_ratio": 1.7462686567164178, "no_speech_prob": 0.0, "words": [{"start": 148.85, "end": 149.11, "word": " So", "probability": 0.87939453125}, {"start": 149.11, "end": 149.31, "word": " 1", "probability": 0.5498046875}, {"start": 149.31, "end": 149.61, "word": " minus", "probability": 0.9755859375}, {"start": 149.61, "end": 149.91, "word": " the", "probability": 0.9150390625}, {"start": 149.91, "end": 150.43, "word": " answer", "probability": 0.94970703125}, {"start": 150.43, "end": 150.63, "word": " we", "probability": 0.88037109375}, {"start": 150.63, "end": 150.89, "word": " just", "probability": 0.90478515625}, {"start": 150.89, "end": 151.13, "word": " got", "probability": 0.87744140625}, {"start": 151.13, "end": 151.37, "word": " from", "probability": 0.876953125}, {"start": 151.37, "end": 151.67, "word": " part", "probability": 0.763671875}, {"start": 151.67, "end": 152.01, "word": " A", "probability": 0.6416015625}, {"start": 152.01, "end": 153.05, "word": " will", "probability": 0.69287109375}, {"start": 153.05, "end": 153.31, "word": " get", "probability": 0.560546875}, {"start": 153.31, "end": 153.51, "word": " us", "probability": 0.75}, {"start": 153.51, "end": 153.67, "word": " to", "probability": 0.4150390625}, {"start": 153.67, "end": 153.87, "word": " something.", "probability": 0.64208984375}, {"start": 154.43, "end": 154.95, "word": " So", "probability": 0.9501953125}, {"start": 154.95, "end": 155.15, "word": " this", "probability": 0.90576171875}, {"start": 155.15, "end": 155.27, "word": " is", "probability": 0.859375}, {"start": 155.27, "end": 155.37, "word": " the", "probability": 0.48779296875}, {"start": 155.37, "end": 155.67, "word": " probability", "probability": 0.95458984375}, {"start": 155.67, "end": 155.93, "word": " of", "probability": 0.8662109375}, {"start": 155.93, "end": 156.19, "word": " X", "probability": 0.8525390625}, {"start": 156.19, "end": 156.49, "word": " is", "probability": 0.81396484375}, {"start": 156.49, "end": 156.83, "word": " greater", "probability": 0.93359375}, {"start": 156.83, "end": 157.33, "word": " than", "probability": 0.951171875}, {"start": 157.33, "end": 158.57, "word": " 8", "probability": 0.95263671875}, {"start": 158.57, "end": 159.13, "word": ".6.", "probability": 0.988525390625}, {"start": 159.55, "end": 159.91, "word": " So", "probability": 0.9501953125}, {"start": 159.91, "end": 160.15, "word": " all", "probability": 0.91796875}, {"start": 160.15, "end": 160.25, "word": " of", "probability": 0.9443359375}, {"start": 160.25, "end": 160.35, "word": " the", "probability": 0.9072265625}, {"start": 160.35, "end": 160.59, "word": " time,", "probability": 0.87451171875}, {"start": 160.71, "end": 160.77, "word": " if", "probability": 0.916015625}, {"start": 160.77, "end": 161.17, "word": " you", "probability": 0.9619140625}, {"start": 161.17, "end": 161.35, "word": " are", "probability": 0.9013671875}, {"start": 161.35, "end": 161.69, "word": " asking", "probability": 0.8671875}, {"start": 161.69, "end": 162.17, "word": " about", "probability": 0.90185546875}, {"start": 162.17, "end": 163.53, "word": " computing", "probability": 0.82373046875}, {"start": 163.53, "end": 163.77, "word": " the", "probability": 0.9189453125}, {"start": 163.77, "end": 164.25, "word": " probability", "probability": 0.9404296875}, {"start": 164.25, "end": 165.25, "word": " in", "probability": 0.8935546875}, {"start": 165.25, "end": 165.39, "word": " the", "probability": 0.91943359375}, {"start": 165.39, "end": 165.61, "word": " upper", "probability": 0.80810546875}, {"start": 165.61, "end": 165.91, "word": " tail,", "probability": 0.75341796875}, {"start": 166.59, "end": 167.05, "word": " you", "probability": 0.95654296875}, {"start": 167.05, "end": 167.21, "word": " have", "probability": 0.921875}, {"start": 167.21, "end": 167.31, "word": " to", "probability": 0.65283203125}, {"start": 167.31, "end": 167.47, "word": " first", "probability": 0.6640625}, {"start": 167.47, "end": 167.77, "word": " find", "probability": 0.8251953125}, {"start": 167.77, "end": 167.93, "word": " the", "probability": 0.9111328125}, {"start": 167.93, "end": 168.29, "word": " probability", "probability": 0.93505859375}, {"start": 168.29, "end": 169.11, "word": " in", "probability": 0.93798828125}, {"start": 169.11, "end": 169.43, "word": " the", "probability": 0.9140625}, {"start": 169.43, "end": 170.29, "word": " lower", "probability": 0.7197265625}, {"start": 170.29, "end": 170.65, "word": " tail,", "probability": 0.84814453125}, {"start": 171.01, "end": 171.35, "word": " then", "probability": 0.49267578125}, {"start": 171.35, "end": 171.81, "word": " subtract", "probability": 0.845703125}, {"start": 171.81, "end": 172.09, "word": " that", "probability": 0.93505859375}, {"start": 172.09, "end": 172.39, "word": " value", "probability": 0.97265625}, {"start": 172.39, "end": 172.63, "word": " from", "probability": 0.88037109375}, {"start": 172.63, "end": 172.87, "word": " 1.", "probability": 0.90625}, {"start": 173.39, "end": 173.67, "word": " So", "probability": 0.95849609375}, {"start": 173.67, "end": 174.67, "word": " that's", "probability": 0.96630859375}, {"start": 174.67, "end": 175.29, "word": " the", "probability": 0.9140625}, {"start": 175.29, "end": 175.69, "word": " probability", "probability": 0.95703125}, {"start": 175.69, "end": 176.03, "word": " for", "probability": 0.9453125}, {"start": 176.03, "end": 176.17, "word": " the", "probability": 0.9169921875}, {"start": 176.17, "end": 176.39, "word": " upper", "probability": 0.798828125}, {"start": 176.39, "end": 176.71, "word": " tail.", "probability": 0.8671875}], "temperature": 1.0}, {"id": 7, "seek": 20185, "start": 178.11, "end": 201.85, "text": " The last one, we are looking for probability between two values. For example, x. What's the probability of x greater than 8 and smaller than 8.6? So we are looking for this area, the red area. So the probability of x between these two values actually equals the probability of x smaller than.", "tokens": [440, 1036, 472, 11, 321, 366, 1237, 337, 8482, 1296, 732, 4190, 13, 1171, 1365, 11, 2031, 13, 708, 311, 264, 8482, 295, 2031, 5044, 813, 1649, 293, 4356, 813, 1649, 13, 21, 30, 407, 321, 366, 1237, 337, 341, 1859, 11, 264, 2182, 1859, 13, 407, 264, 8482, 295, 2031, 1296, 613, 732, 4190, 767, 6915, 264, 8482, 295, 2031, 4356, 813, 13], "avg_logprob": -0.15432692307692308, "compression_ratio": 1.83125, "no_speech_prob": 0.0, "words": [{"start": 178.11, "end": 178.45, "word": " The", "probability": 0.841796875}, {"start": 178.45, "end": 178.79, "word": " last", "probability": 0.88134765625}, {"start": 178.79, "end": 179.11, "word": " one,", "probability": 0.9228515625}, {"start": 179.29, "end": 179.57, "word": " we", "probability": 0.3876953125}, {"start": 179.57, "end": 179.75, "word": " are", "probability": 0.9296875}, {"start": 179.75, "end": 180.05, "word": " looking", "probability": 0.91064453125}, {"start": 180.05, "end": 180.49, "word": " for", "probability": 0.94970703125}, {"start": 180.49, "end": 182.47, "word": " probability", "probability": 0.79443359375}, {"start": 182.47, "end": 183.01, "word": " between", "probability": 0.8876953125}, {"start": 183.01, "end": 183.23, "word": " two", "probability": 0.923828125}, {"start": 183.23, "end": 183.57, "word": " values.", "probability": 0.9609375}, {"start": 183.67, "end": 183.81, "word": " For", "probability": 0.9560546875}, {"start": 183.81, "end": 184.19, "word": " example,", "probability": 0.97119140625}, {"start": 185.07, "end": 185.51, "word": " x.", "probability": 0.462890625}, {"start": 186.57, "end": 186.85, "word": " What's", "probability": 0.908935546875}, {"start": 186.85, "end": 186.97, "word": " the", "probability": 0.8623046875}, {"start": 186.97, "end": 187.27, "word": " probability", "probability": 0.94873046875}, {"start": 187.27, "end": 187.57, "word": " of", "probability": 0.93408203125}, {"start": 187.57, "end": 187.89, "word": " x", "probability": 0.98095703125}, {"start": 187.89, "end": 188.39, "word": " greater", "probability": 0.916015625}, {"start": 188.39, "end": 188.69, "word": " than", "probability": 0.9541015625}, {"start": 188.69, "end": 188.99, "word": " 8", "probability": 0.375732421875}, {"start": 188.99, "end": 189.35, "word": " and", "probability": 0.8125}, {"start": 189.35, "end": 189.73, "word": " smaller", "probability": 0.876953125}, {"start": 189.73, "end": 190.03, "word": " than", "probability": 0.94287109375}, {"start": 190.03, "end": 190.19, "word": " 8", "probability": 0.98779296875}, {"start": 190.19, "end": 190.71, "word": ".6?", "probability": 0.98583984375}, {"start": 192.63, "end": 193.23, "word": " So", "probability": 0.9501953125}, {"start": 193.23, "end": 193.51, "word": " we", "probability": 0.75244140625}, {"start": 193.51, "end": 193.59, "word": " are", "probability": 0.74853515625}, {"start": 193.59, "end": 193.79, "word": " looking", "probability": 0.90625}, {"start": 193.79, "end": 194.11, "word": " for", "probability": 0.94873046875}, {"start": 194.11, "end": 194.43, "word": " this", "probability": 0.9423828125}, {"start": 194.43, "end": 194.91, "word": " area,", "probability": 0.9091796875}, {"start": 195.05, "end": 195.19, "word": " the", "probability": 0.90087890625}, {"start": 195.19, "end": 195.37, "word": " red", "probability": 0.94580078125}, {"start": 195.37, "end": 195.67, "word": " area.", "probability": 0.8681640625}, {"start": 196.25, "end": 196.67, "word": " So", "probability": 0.9541015625}, {"start": 196.67, "end": 196.83, "word": " the", "probability": 0.89697265625}, {"start": 196.83, "end": 197.09, "word": " probability", "probability": 0.9443359375}, {"start": 197.09, "end": 197.33, "word": " of", "probability": 0.95654296875}, {"start": 197.33, "end": 197.49, "word": " x", "probability": 0.98876953125}, {"start": 197.49, "end": 197.85, "word": " between", "probability": 0.87646484375}, {"start": 197.85, "end": 198.15, "word": " these", "probability": 0.8603515625}, {"start": 198.15, "end": 198.31, "word": " two", "probability": 0.91845703125}, {"start": 198.31, "end": 198.71, "word": " values", "probability": 0.9697265625}, {"start": 198.71, "end": 199.63, "word": " actually", "probability": 0.84326171875}, {"start": 199.63, "end": 200.15, "word": " equals", "probability": 0.87646484375}, {"start": 200.15, "end": 200.31, "word": " the", "probability": 0.8935546875}, {"start": 200.31, "end": 200.61, "word": " probability", "probability": 0.9521484375}, {"start": 200.61, "end": 200.79, "word": " of", "probability": 0.93017578125}, {"start": 200.79, "end": 201.01, "word": " x", "probability": 0.9951171875}, {"start": 201.01, "end": 201.45, "word": " smaller", "probability": 0.8740234375}, {"start": 201.45, "end": 201.85, "word": " than.", "probability": 0.9482421875}], "temperature": 1.0}, {"id": 8, "seek": 22358, "start": 203.04, "end": 223.58, "text": " 8.6 minus the probability of X smaller than 8A. And that, in this case, you have to compute two values of this score. One for the first value, which is A. This value is zero because the mean is zero. And we know that Z can be negative.", "tokens": [1649, 13, 21, 3175, 264, 8482, 295, 1783, 4356, 813, 1649, 32, 13, 400, 300, 11, 294, 341, 1389, 11, 291, 362, 281, 14722, 732, 4190, 295, 341, 6175, 13, 1485, 337, 264, 700, 2158, 11, 597, 307, 316, 13, 639, 2158, 307, 4018, 570, 264, 914, 307, 4018, 13, 400, 321, 458, 300, 1176, 393, 312, 3671, 13], "avg_logprob": -0.23085937922199568, "compression_ratio": 1.4303030303030304, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 203.04, "end": 203.38, "word": " 8", "probability": 0.1795654296875}, {"start": 203.38, "end": 204.06, "word": ".6", "probability": 0.98291015625}, {"start": 204.06, "end": 204.88, "word": " minus", "probability": 0.9462890625}, {"start": 204.88, "end": 205.16, "word": " the", "probability": 0.79833984375}, {"start": 205.16, "end": 205.46, "word": " probability", "probability": 0.912109375}, {"start": 205.46, "end": 205.74, "word": " of", "probability": 0.88818359375}, {"start": 205.74, "end": 205.94, "word": " X", "probability": 0.7548828125}, {"start": 205.94, "end": 206.34, "word": " smaller", "probability": 0.78515625}, {"start": 206.34, "end": 206.66, "word": " than", "probability": 0.94580078125}, {"start": 206.66, "end": 207.56, "word": " 8A.", "probability": 0.472412109375}, {"start": 207.88, "end": 208.18, "word": " And", "probability": 0.67041015625}, {"start": 208.18, "end": 208.38, "word": " that,", "probability": 0.58935546875}, {"start": 209.0, "end": 209.12, "word": " in", "probability": 0.939453125}, {"start": 209.12, "end": 209.3, "word": " this", "probability": 0.94580078125}, {"start": 209.3, "end": 209.54, "word": " case,", "probability": 0.91552734375}, {"start": 209.58, "end": 209.66, "word": " you", "probability": 0.95556640625}, {"start": 209.66, "end": 209.82, "word": " have", "probability": 0.93603515625}, {"start": 209.82, "end": 209.94, "word": " to", "probability": 0.96923828125}, {"start": 209.94, "end": 210.32, "word": " compute", "probability": 0.89697265625}, {"start": 210.32, "end": 211.14, "word": " two", "probability": 0.89501953125}, {"start": 211.14, "end": 211.58, "word": " values", "probability": 0.955078125}, {"start": 211.58, "end": 211.74, "word": " of", "probability": 0.75146484375}, {"start": 211.74, "end": 211.94, "word": " this", "probability": 0.4345703125}, {"start": 211.94, "end": 212.38, "word": " score.", "probability": 0.845703125}, {"start": 212.56, "end": 212.8, "word": " One", "probability": 0.91015625}, {"start": 212.8, "end": 213.04, "word": " for", "probability": 0.916015625}, {"start": 213.04, "end": 213.22, "word": " the", "probability": 0.91845703125}, {"start": 213.22, "end": 213.62, "word": " first", "probability": 0.869140625}, {"start": 213.62, "end": 214.78, "word": " value,", "probability": 0.9609375}, {"start": 214.88, "end": 214.96, "word": " which", "probability": 0.94677734375}, {"start": 214.96, "end": 215.06, "word": " is", "probability": 0.947265625}, {"start": 215.06, "end": 215.36, "word": " A.", "probability": 0.7998046875}, {"start": 217.0, "end": 217.44, "word": " This", "probability": 0.83837890625}, {"start": 217.44, "end": 217.68, "word": " value", "probability": 0.96875}, {"start": 217.68, "end": 217.88, "word": " is", "probability": 0.8828125}, {"start": 217.88, "end": 218.16, "word": " zero", "probability": 0.7099609375}, {"start": 218.16, "end": 219.24, "word": " because", "probability": 0.5546875}, {"start": 219.24, "end": 219.54, "word": " the", "probability": 0.92578125}, {"start": 219.54, "end": 219.8, "word": " mean", "probability": 0.96826171875}, {"start": 219.8, "end": 220.02, "word": " is", "probability": 0.95068359375}, {"start": 220.02, "end": 220.36, "word": " zero.", "probability": 0.890625}, {"start": 220.76, "end": 221.34, "word": " And", "probability": 0.93017578125}, {"start": 221.34, "end": 221.5, "word": " we", "probability": 0.90576171875}, {"start": 221.5, "end": 221.64, "word": " know", "probability": 0.88671875}, {"start": 221.64, "end": 222.04, "word": " that", "probability": 0.943359375}, {"start": 222.04, "end": 222.64, "word": " Z", "probability": 0.74267578125}, {"start": 222.64, "end": 222.98, "word": " can", "probability": 0.96337890625}, {"start": 222.98, "end": 223.14, "word": " be", "probability": 0.9501953125}, {"start": 223.14, "end": 223.58, "word": " negative.", "probability": 0.9462890625}], "temperature": 1.0}, {"id": 9, "seek": 24734, "start": 224.44, "end": 247.34, "text": " If X is smaller than Mu, Z can positive if X is greater than Mu and equals zero only if X equals Mu. In this case, X equals Mu, so Z score is zero. The other one as we got before is 0.12. So now, we transform actually the probability of X between", "tokens": [759, 1783, 307, 4356, 813, 15601, 11, 1176, 393, 3353, 498, 1783, 307, 5044, 813, 15601, 293, 6915, 4018, 787, 498, 1783, 6915, 15601, 13, 682, 341, 1389, 11, 1783, 6915, 15601, 11, 370, 1176, 6175, 307, 4018, 13, 440, 661, 472, 382, 321, 658, 949, 307, 1958, 13, 4762, 13, 407, 586, 11, 321, 4088, 767, 264, 8482, 295, 1783, 1296], "avg_logprob": -0.2899305650166103, "compression_ratio": 1.4879518072289157, "no_speech_prob": 0.0, "words": [{"start": 224.44, "end": 224.82, "word": " If", "probability": 0.486083984375}, {"start": 224.82, "end": 225.08, "word": " X", "probability": 0.60693359375}, {"start": 225.08, "end": 225.24, "word": " is", "probability": 0.8583984375}, {"start": 225.24, "end": 225.6, "word": " smaller", "probability": 0.81103515625}, {"start": 225.6, "end": 225.94, "word": " than", "probability": 0.93408203125}, {"start": 225.94, "end": 226.2, "word": " Mu,", "probability": 0.578125}, {"start": 227.24, "end": 227.42, "word": " Z", "probability": 0.380126953125}, {"start": 227.42, "end": 227.6, "word": " can", "probability": 0.73974609375}, {"start": 227.6, "end": 228.0, "word": " positive", "probability": 0.462890625}, {"start": 228.0, "end": 228.3, "word": " if", "probability": 0.7099609375}, {"start": 228.3, "end": 228.62, "word": " X", "probability": 0.71044921875}, {"start": 228.62, "end": 229.14, "word": " is", "probability": 0.67333984375}, {"start": 229.14, "end": 229.46, "word": " greater", "probability": 0.849609375}, {"start": 229.46, "end": 229.74, "word": " than", "probability": 0.9345703125}, {"start": 229.74, "end": 230.0, "word": " Mu", "probability": 0.974609375}, {"start": 230.0, "end": 231.3, "word": " and", "probability": 0.56982421875}, {"start": 231.3, "end": 231.8, "word": " equals", "probability": 0.3701171875}, {"start": 231.8, "end": 232.08, "word": " zero", "probability": 0.4658203125}, {"start": 232.08, "end": 232.46, "word": " only", "probability": 0.8603515625}, {"start": 232.46, "end": 232.84, "word": " if", "probability": 0.9404296875}, {"start": 232.84, "end": 233.08, "word": " X", "probability": 0.97998046875}, {"start": 233.08, "end": 233.42, "word": " equals", "probability": 0.6904296875}, {"start": 233.42, "end": 234.48, "word": " Mu.", "probability": 0.97216796875}, {"start": 234.98, "end": 235.22, "word": " In", "probability": 0.9287109375}, {"start": 235.22, "end": 235.42, "word": " this", "probability": 0.94970703125}, {"start": 235.42, "end": 235.72, "word": " case,", "probability": 0.9130859375}, {"start": 235.8, "end": 236.08, "word": " X", "probability": 0.96044921875}, {"start": 236.08, "end": 236.5, "word": " equals", "probability": 0.68115234375}, {"start": 236.5, "end": 236.72, "word": " Mu,", "probability": 0.98291015625}, {"start": 236.86, "end": 237.08, "word": " so", "probability": 0.92822265625}, {"start": 237.08, "end": 237.42, "word": " Z", "probability": 0.70361328125}, {"start": 237.42, "end": 237.68, "word": " score", "probability": 0.50537109375}, {"start": 237.68, "end": 237.86, "word": " is", "probability": 0.9384765625}, {"start": 237.86, "end": 238.14, "word": " zero.", "probability": 0.861328125}, {"start": 239.14, "end": 239.34, "word": " The", "probability": 0.62646484375}, {"start": 239.34, "end": 239.54, "word": " other", "probability": 0.90576171875}, {"start": 239.54, "end": 239.74, "word": " one", "probability": 0.89990234375}, {"start": 239.74, "end": 239.88, "word": " as", "probability": 0.1702880859375}, {"start": 239.88, "end": 240.04, "word": " we", "probability": 0.955078125}, {"start": 240.04, "end": 240.38, "word": " got", "probability": 0.689453125}, {"start": 240.38, "end": 240.7, "word": " before", "probability": 0.88134765625}, {"start": 240.7, "end": 240.84, "word": " is", "probability": 0.865234375}, {"start": 240.84, "end": 241.06, "word": " 0", "probability": 0.39794921875}, {"start": 241.06, "end": 241.42, "word": ".12.", "probability": 0.990478515625}, {"start": 242.02, "end": 242.32, "word": " So", "probability": 0.91796875}, {"start": 242.32, "end": 242.56, "word": " now,", "probability": 0.822265625}, {"start": 242.76, "end": 243.22, "word": " we", "probability": 0.943359375}, {"start": 243.22, "end": 243.76, "word": " transform", "probability": 0.90625}, {"start": 243.76, "end": 244.26, "word": " actually", "probability": 0.82861328125}, {"start": 244.26, "end": 244.5, "word": " the", "probability": 0.8779296875}, {"start": 244.5, "end": 244.76, "word": " probability", "probability": 0.93994140625}, {"start": 244.76, "end": 245.08, "word": " of", "probability": 0.95751953125}, {"start": 245.08, "end": 245.36, "word": " X", "probability": 0.994140625}, {"start": 245.36, "end": 247.34, "word": " between", "probability": 0.78662109375}], "temperature": 1.0}, {"id": 10, "seek": 27817, "start": 248.87, "end": 278.17, "text": " 8 and 8.6 to z-score between 0 and 0.12. In this case, we can use the normal theorem. Now, this area is, as we mentioned, just b of x smaller than 0.12 minus b of x, b of z smaller than 0. This probability, we know that, 0.54878. Now, the probability of z smaller than 0 is one-half.", "tokens": [1649, 293, 1649, 13, 21, 281, 710, 12, 4417, 418, 1296, 1958, 293, 1958, 13, 4762, 13, 682, 341, 1389, 11, 321, 393, 764, 264, 2710, 20904, 13, 823, 11, 341, 1859, 307, 11, 382, 321, 2835, 11, 445, 272, 295, 2031, 4356, 813, 1958, 13, 4762, 3175, 272, 295, 2031, 11, 272, 295, 710, 4356, 813, 1958, 13, 639, 8482, 11, 321, 458, 300, 11, 1958, 13, 20, 13318, 30693, 13, 823, 11, 264, 8482, 295, 710, 4356, 813, 1958, 307, 472, 12, 25461, 13], "avg_logprob": -0.21749281300895515, "compression_ratio": 1.6045197740112995, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 248.87, "end": 249.21, "word": " 8", "probability": 0.25927734375}, {"start": 249.21, "end": 249.67, "word": " and", "probability": 0.68896484375}, {"start": 249.67, "end": 249.89, "word": " 8", "probability": 0.96875}, {"start": 249.89, "end": 250.49, "word": ".6", "probability": 0.986328125}, {"start": 250.49, "end": 251.57, "word": " to", "probability": 0.82470703125}, {"start": 251.57, "end": 251.85, "word": " z", "probability": 0.693359375}, {"start": 251.85, "end": 252.19, "word": "-score", "probability": 0.7240397135416666}, {"start": 252.19, "end": 252.57, "word": " between", "probability": 0.87841796875}, {"start": 252.57, "end": 252.87, "word": " 0", "probability": 0.80712890625}, {"start": 252.87, "end": 253.05, "word": " and", "probability": 0.93603515625}, {"start": 253.05, "end": 253.29, "word": " 0", "probability": 0.91748046875}, {"start": 253.29, "end": 253.53, "word": ".12.", "probability": 0.975341796875}, {"start": 253.59, "end": 253.69, "word": " In", "probability": 0.94482421875}, {"start": 253.69, "end": 253.87, "word": " this", "probability": 0.947265625}, {"start": 253.87, "end": 254.07, "word": " case,", "probability": 0.91552734375}, {"start": 254.11, "end": 254.23, "word": " we", "probability": 0.92919921875}, {"start": 254.23, "end": 254.41, "word": " can", "probability": 0.9423828125}, {"start": 254.41, "end": 254.65, "word": " use", "probability": 0.88134765625}, {"start": 254.65, "end": 254.75, "word": " the", "probability": 0.7529296875}, {"start": 254.75, "end": 254.97, "word": " normal", "probability": 0.82177734375}, {"start": 254.97, "end": 255.31, "word": " theorem.", "probability": 0.1922607421875}, {"start": 256.31, "end": 256.83, "word": " Now,", "probability": 0.96337890625}, {"start": 256.93, "end": 257.17, "word": " this", "probability": 0.9384765625}, {"start": 257.17, "end": 257.59, "word": " area", "probability": 0.9130859375}, {"start": 257.59, "end": 258.05, "word": " is,", "probability": 0.7939453125}, {"start": 258.17, "end": 258.29, "word": " as", "probability": 0.9619140625}, {"start": 258.29, "end": 258.43, "word": " we", "probability": 0.87255859375}, {"start": 258.43, "end": 258.71, "word": " mentioned,", "probability": 0.83447265625}, {"start": 258.93, "end": 259.35, "word": " just", "probability": 0.486328125}, {"start": 259.35, "end": 259.89, "word": " b", "probability": 0.321044921875}, {"start": 259.89, "end": 260.05, "word": " of", "probability": 0.9072265625}, {"start": 260.05, "end": 260.25, "word": " x", "probability": 0.97314453125}, {"start": 260.25, "end": 260.85, "word": " smaller", "probability": 0.85546875}, {"start": 260.85, "end": 261.13, "word": " than", "probability": 0.93017578125}, {"start": 261.13, "end": 261.39, "word": " 0", "probability": 0.96484375}, {"start": 261.39, "end": 261.71, "word": ".12", "probability": 0.992431640625}, {"start": 261.71, "end": 262.91, "word": " minus", "probability": 0.93017578125}, {"start": 262.91, "end": 263.25, "word": " b", "probability": 0.91748046875}, {"start": 263.25, "end": 263.37, "word": " of", "probability": 0.94580078125}, {"start": 263.37, "end": 263.63, "word": " x,", "probability": 0.86083984375}, {"start": 264.05, "end": 264.27, "word": " b", "probability": 0.91943359375}, {"start": 264.27, "end": 264.45, "word": " of", "probability": 0.962890625}, {"start": 264.45, "end": 264.61, "word": " z", "probability": 0.94873046875}, {"start": 264.61, "end": 265.17, "word": " smaller", "probability": 0.66845703125}, {"start": 265.17, "end": 265.43, "word": " than", "probability": 0.9423828125}, {"start": 265.43, "end": 265.75, "word": " 0.", "probability": 0.86962890625}, {"start": 268.33, "end": 268.85, "word": " This", "probability": 0.76611328125}, {"start": 268.85, "end": 269.29, "word": " probability,", "probability": 0.94775390625}, {"start": 269.75, "end": 269.95, "word": " we", "probability": 0.9560546875}, {"start": 269.95, "end": 270.11, "word": " know", "probability": 0.88134765625}, {"start": 270.11, "end": 270.37, "word": " that,", "probability": 0.9267578125}, {"start": 270.47, "end": 270.69, "word": " 0", "probability": 0.5595703125}, {"start": 270.69, "end": 272.37, "word": ".54878.", "probability": 0.8927001953125}, {"start": 273.41, "end": 273.73, "word": " Now,", "probability": 0.96142578125}, {"start": 273.95, "end": 274.33, "word": " the", "probability": 0.90966796875}, {"start": 274.33, "end": 275.15, "word": " probability", "probability": 0.94921875}, {"start": 275.15, "end": 275.31, "word": " of", "probability": 0.89892578125}, {"start": 275.31, "end": 275.43, "word": " z", "probability": 0.93603515625}, {"start": 275.43, "end": 275.81, "word": " smaller", "probability": 0.6630859375}, {"start": 275.81, "end": 276.11, "word": " than", "probability": 0.93701171875}, {"start": 276.11, "end": 276.45, "word": " 0", "probability": 0.82421875}, {"start": 276.45, "end": 277.81, "word": " is", "probability": 0.6767578125}, {"start": 277.81, "end": 277.95, "word": " one", "probability": 0.55712890625}, {"start": 277.95, "end": 278.17, "word": "-half.", "probability": 0.734130859375}], "temperature": 1.0}, {"id": 11, "seek": 30702, "start": 278.86, "end": 307.02, "text": " Because the total area underneath the normal curve is 1, and 0 divided the curve into two equally parts. So the area to the right of 0 is the same as the area to the left of 0. So in this case, minus 0.5. So this is your answer. So the probability of Z between 8 and 8.6 is around 0478. I think we stopped last time at this point.", "tokens": [1436, 264, 3217, 1859, 7223, 264, 2710, 7605, 307, 502, 11, 293, 1958, 6666, 264, 7605, 666, 732, 12309, 3166, 13, 407, 264, 1859, 281, 264, 558, 295, 1958, 307, 264, 912, 382, 264, 1859, 281, 264, 1411, 295, 1958, 13, 407, 294, 341, 1389, 11, 3175, 1958, 13, 20, 13, 407, 341, 307, 428, 1867, 13, 407, 264, 8482, 295, 1176, 1296, 1649, 293, 1649, 13, 21, 307, 926, 1958, 14060, 23, 13, 286, 519, 321, 5936, 1036, 565, 412, 341, 935, 13], "avg_logprob": -0.15082721219343298, "compression_ratio": 1.5913461538461537, "no_speech_prob": 0.0, "words": [{"start": 278.86, "end": 279.3, "word": " Because", "probability": 0.61279296875}, {"start": 279.3, "end": 279.5, "word": " the", "probability": 0.88720703125}, {"start": 279.5, "end": 279.8, "word": " total", "probability": 0.8740234375}, {"start": 279.8, "end": 280.3, "word": " area", "probability": 0.9111328125}, {"start": 280.3, "end": 280.68, "word": " underneath", "probability": 0.90625}, {"start": 280.68, "end": 280.9, "word": " the", "probability": 0.85107421875}, {"start": 280.9, "end": 281.16, "word": " normal", "probability": 0.8818359375}, {"start": 281.16, "end": 281.38, "word": " curve", "probability": 0.89599609375}, {"start": 281.38, "end": 281.56, "word": " is", "probability": 0.94091796875}, {"start": 281.56, "end": 281.82, "word": " 1,", "probability": 0.48974609375}, {"start": 282.32, "end": 282.5, "word": " and", "probability": 0.9345703125}, {"start": 282.5, "end": 282.86, "word": " 0", "probability": 0.8818359375}, {"start": 282.86, "end": 283.76, "word": " divided", "probability": 0.779296875}, {"start": 283.76, "end": 284.82, "word": " the", "probability": 0.90087890625}, {"start": 284.82, "end": 285.1, "word": " curve", "probability": 0.89453125}, {"start": 285.1, "end": 285.3, "word": " into", "probability": 0.84814453125}, {"start": 285.3, "end": 285.5, "word": " two", "probability": 0.755859375}, {"start": 285.5, "end": 285.86, "word": " equally", "probability": 0.67041015625}, {"start": 285.86, "end": 286.28, "word": " parts.", "probability": 0.83740234375}, {"start": 286.44, "end": 286.82, "word": " So", "probability": 0.9580078125}, {"start": 286.82, "end": 287.5, "word": " the", "probability": 0.544921875}, {"start": 287.5, "end": 287.68, "word": " area", "probability": 0.91455078125}, {"start": 287.68, "end": 287.82, "word": " to", "probability": 0.96435546875}, {"start": 287.82, "end": 287.98, "word": " the", "probability": 0.9033203125}, {"start": 287.98, "end": 288.16, "word": " right", "probability": 0.9296875}, {"start": 288.16, "end": 288.32, "word": " of", "probability": 0.9541015625}, {"start": 288.32, "end": 288.56, "word": " 0", "probability": 0.86328125}, {"start": 288.56, "end": 288.7, "word": " is", "probability": 0.9326171875}, {"start": 288.7, "end": 288.84, "word": " the", "probability": 0.88720703125}, {"start": 288.84, "end": 289.1, "word": " same", "probability": 0.9072265625}, {"start": 289.1, "end": 289.4, "word": " as", "probability": 0.880859375}, {"start": 289.4, "end": 289.54, "word": " the", "probability": 0.90087890625}, {"start": 289.54, "end": 289.68, "word": " area", "probability": 0.90625}, {"start": 289.68, "end": 289.86, "word": " to", "probability": 0.9375}, {"start": 289.86, "end": 289.98, "word": " the", "probability": 0.92138671875}, {"start": 289.98, "end": 290.14, "word": " left", "probability": 0.912109375}, {"start": 290.14, "end": 290.34, "word": " of", "probability": 0.96630859375}, {"start": 290.34, "end": 290.58, "word": " 0.", "probability": 0.9892578125}, {"start": 291.08, "end": 291.4, "word": " So", "probability": 0.95556640625}, {"start": 291.4, "end": 291.54, "word": " in", "probability": 0.8720703125}, {"start": 291.54, "end": 291.72, "word": " this", "probability": 0.94677734375}, {"start": 291.72, "end": 292.1, "word": " case,", "probability": 0.91650390625}, {"start": 292.88, "end": 294.06, "word": " minus", "probability": 0.76025390625}, {"start": 294.06, "end": 294.28, "word": " 0", "probability": 0.5830078125}, {"start": 294.28, "end": 294.56, "word": ".5.", "probability": 0.99267578125}, {"start": 294.64, "end": 294.78, "word": " So", "probability": 0.9443359375}, {"start": 294.78, "end": 294.96, "word": " this", "probability": 0.87744140625}, {"start": 294.96, "end": 295.1, "word": " is", "probability": 0.92822265625}, {"start": 295.1, "end": 295.36, "word": " your", "probability": 0.8193359375}, {"start": 295.36, "end": 296.3, "word": " answer.", "probability": 0.94482421875}, {"start": 296.82, "end": 296.98, "word": " So", "probability": 0.9541015625}, {"start": 296.98, "end": 297.16, "word": " the", "probability": 0.90087890625}, {"start": 297.16, "end": 297.48, "word": " probability", "probability": 0.95361328125}, {"start": 297.48, "end": 297.76, "word": " of", "probability": 0.96533203125}, {"start": 297.76, "end": 297.88, "word": " Z", "probability": 0.65576171875}, {"start": 297.88, "end": 298.4, "word": " between", "probability": 0.88427734375}, {"start": 298.4, "end": 299.24, "word": " 8", "probability": 0.90771484375}, {"start": 299.24, "end": 300.34, "word": " and", "probability": 0.9052734375}, {"start": 300.34, "end": 300.58, "word": " 8", "probability": 0.9951171875}, {"start": 300.58, "end": 301.12, "word": ".6", "probability": 0.997314453125}, {"start": 301.12, "end": 301.44, "word": " is", "probability": 0.9443359375}, {"start": 301.44, "end": 301.82, "word": " around", "probability": 0.931640625}, {"start": 301.82, "end": 304.04, "word": " 0478.", "probability": 0.7027180989583334}, {"start": 304.32, "end": 304.52, "word": " I", "probability": 0.99609375}, {"start": 304.52, "end": 304.76, "word": " think", "probability": 0.9189453125}, {"start": 304.76, "end": 305.18, "word": " we", "probability": 0.9501953125}, {"start": 305.18, "end": 305.66, "word": " stopped", "probability": 0.91162109375}, {"start": 305.66, "end": 305.98, "word": " last", "probability": 0.8466796875}, {"start": 305.98, "end": 306.24, "word": " time", "probability": 0.88720703125}, {"start": 306.24, "end": 306.38, "word": " at", "probability": 0.9443359375}, {"start": 306.38, "end": 306.64, "word": " this", "probability": 0.94677734375}, {"start": 306.64, "end": 307.02, "word": " point.", "probability": 0.96435546875}], "temperature": 1.0}, {"id": 12, "seek": 33016, "start": 309.86, "end": 330.16, "text": " This is another example to compute the probability of X greater than 7.4 and 8. Also, it's the same idea here, just find these scores for the two values.", "tokens": [639, 307, 1071, 1365, 281, 14722, 264, 8482, 295, 1783, 5044, 813, 1614, 13, 19, 293, 1649, 13, 2743, 11, 309, 311, 264, 912, 1558, 510, 11, 445, 915, 613, 13444, 337, 264, 732, 4190, 13], "avg_logprob": -0.21558276704839757, "compression_ratio": 1.2222222222222223, "no_speech_prob": 0.0, "words": [{"start": 309.86, "end": 310.3, "word": " This", "probability": 0.61376953125}, {"start": 310.3, "end": 310.48, "word": " is", "probability": 0.9462890625}, {"start": 310.48, "end": 310.82, "word": " another", "probability": 0.9189453125}, {"start": 310.82, "end": 311.36, "word": " example", "probability": 0.96923828125}, {"start": 311.36, "end": 313.88, "word": " to", "probability": 0.67333984375}, {"start": 313.88, "end": 314.4, "word": " compute", "probability": 0.9072265625}, {"start": 314.4, "end": 317.28, "word": " the", "probability": 0.66845703125}, {"start": 317.28, "end": 317.76, "word": " probability", "probability": 0.900390625}, {"start": 317.76, "end": 318.02, "word": " of", "probability": 0.916015625}, {"start": 318.02, "end": 318.24, "word": " X", "probability": 0.78271484375}, {"start": 318.24, "end": 318.76, "word": " greater", "probability": 0.86376953125}, {"start": 318.76, "end": 319.04, "word": " than", "probability": 0.94384765625}, {"start": 319.04, "end": 319.28, "word": " 7", "probability": 0.88134765625}, {"start": 319.28, "end": 319.9, "word": ".4", "probability": 0.978759765625}, {"start": 319.9, "end": 320.2, "word": " and", "probability": 0.85595703125}, {"start": 320.2, "end": 320.48, "word": " 8.", "probability": 0.7958984375}, {"start": 321.6, "end": 322.46, "word": " Also,", "probability": 0.9287109375}, {"start": 322.76, "end": 323.52, "word": " it's", "probability": 0.93115234375}, {"start": 323.52, "end": 323.8, "word": " the", "probability": 0.91748046875}, {"start": 323.8, "end": 324.02, "word": " same", "probability": 0.90869140625}, {"start": 324.02, "end": 324.38, "word": " idea", "probability": 0.92138671875}, {"start": 324.38, "end": 324.8, "word": " here,", "probability": 0.66552734375}, {"start": 325.06, "end": 325.6, "word": " just", "probability": 0.8671875}, {"start": 325.6, "end": 327.34, "word": " find", "probability": 0.818359375}, {"start": 327.34, "end": 328.64, "word": " these", "probability": 0.34130859375}, {"start": 328.64, "end": 329.2, "word": " scores", "probability": 0.751953125}, {"start": 329.2, "end": 329.44, "word": " for", "probability": 0.9462890625}, {"start": 329.44, "end": 329.56, "word": " the", "probability": 0.904296875}, {"start": 329.56, "end": 329.7, "word": " two", "probability": 0.8974609375}, {"start": 329.7, "end": 330.16, "word": " values.", "probability": 0.9677734375}], "temperature": 1.0}, {"id": 13, "seek": 36071, "start": 333.85, "end": 360.71, "text": " L with B of Z greater than minus 0.12 up to 0. Now this red area equals the area below 0. I mean B of Z smaller than 0 minus the probability of Z smaller than minus 0.12. Now by using symmetric probability of the normal distribution, we know that", "tokens": [441, 365, 363, 295, 1176, 5044, 813, 3175, 1958, 13, 4762, 493, 281, 1958, 13, 823, 341, 2182, 1859, 6915, 264, 1859, 2507, 1958, 13, 286, 914, 363, 295, 1176, 4356, 813, 1958, 3175, 264, 8482, 295, 1176, 4356, 813, 3175, 1958, 13, 4762, 13, 823, 538, 1228, 32330, 8482, 295, 264, 2710, 7316, 11, 321, 458, 300], "avg_logprob": -0.219676911325778, "compression_ratio": 1.603896103896104, "no_speech_prob": 0.0, "words": [{"start": 333.85, "end": 334.23, "word": " L", "probability": 0.1065673828125}, {"start": 334.23, "end": 334.53, "word": " with", "probability": 0.8212890625}, {"start": 334.53, "end": 334.79, "word": " B", "probability": 0.458740234375}, {"start": 334.79, "end": 334.95, "word": " of", "probability": 0.8955078125}, {"start": 334.95, "end": 335.13, "word": " Z", "probability": 0.5869140625}, {"start": 335.13, "end": 335.67, "word": " greater", "probability": 0.84765625}, {"start": 335.67, "end": 335.97, "word": " than", "probability": 0.9365234375}, {"start": 335.97, "end": 336.29, "word": " minus", "probability": 0.90087890625}, {"start": 336.29, "end": 336.51, "word": " 0", "probability": 0.642578125}, {"start": 336.51, "end": 336.79, "word": ".12", "probability": 0.984130859375}, {"start": 336.79, "end": 337.01, "word": " up", "probability": 0.947265625}, {"start": 337.01, "end": 337.13, "word": " to", "probability": 0.95703125}, {"start": 337.13, "end": 337.39, "word": " 0.", "probability": 0.56591796875}, {"start": 338.37, "end": 338.67, "word": " Now", "probability": 0.85302734375}, {"start": 338.67, "end": 339.05, "word": " this", "probability": 0.63623046875}, {"start": 339.05, "end": 339.51, "word": " red", "probability": 0.912109375}, {"start": 339.51, "end": 339.97, "word": " area", "probability": 0.87646484375}, {"start": 339.97, "end": 342.47, "word": " equals", "probability": 0.7861328125}, {"start": 342.47, "end": 342.65, "word": " the", "probability": 0.85205078125}, {"start": 342.65, "end": 342.89, "word": " area", "probability": 0.87744140625}, {"start": 342.89, "end": 343.29, "word": " below", "probability": 0.89111328125}, {"start": 343.29, "end": 344.17, "word": " 0.", "probability": 0.69921875}, {"start": 346.21, "end": 346.29, "word": " I", "probability": 0.9658203125}, {"start": 346.29, "end": 346.59, "word": " mean", "probability": 0.962890625}, {"start": 346.59, "end": 346.87, "word": " B", "probability": 0.72412109375}, {"start": 346.87, "end": 347.01, "word": " of", "probability": 0.96533203125}, {"start": 347.01, "end": 347.13, "word": " Z", "probability": 0.94677734375}, {"start": 347.13, "end": 347.51, "word": " smaller", "probability": 0.71484375}, {"start": 347.51, "end": 347.79, "word": " than", "probability": 0.9404296875}, {"start": 347.79, "end": 348.19, "word": " 0", "probability": 0.88427734375}, {"start": 348.19, "end": 349.23, "word": " minus", "probability": 0.83154296875}, {"start": 349.23, "end": 349.53, "word": " the", "probability": 0.767578125}, {"start": 349.53, "end": 349.83, "word": " probability", "probability": 0.90966796875}, {"start": 349.83, "end": 350.11, "word": " of", "probability": 0.8916015625}, {"start": 350.11, "end": 350.33, "word": " Z", "probability": 0.9091796875}, {"start": 350.33, "end": 351.25, "word": " smaller", "probability": 0.83837890625}, {"start": 351.25, "end": 351.57, "word": " than", "probability": 0.93212890625}, {"start": 351.57, "end": 351.93, "word": " minus", "probability": 0.95751953125}, {"start": 351.93, "end": 352.29, "word": " 0", "probability": 0.9599609375}, {"start": 352.29, "end": 353.25, "word": ".12.", "probability": 0.995849609375}, {"start": 354.53, "end": 354.85, "word": " Now", "probability": 0.92578125}, {"start": 354.85, "end": 355.77, "word": " by", "probability": 0.61181640625}, {"start": 355.77, "end": 356.23, "word": " using", "probability": 0.9384765625}, {"start": 356.23, "end": 357.09, "word": " symmetric", "probability": 0.77587890625}, {"start": 357.09, "end": 357.79, "word": " probability", "probability": 0.87255859375}, {"start": 357.79, "end": 358.33, "word": " of", "probability": 0.9638671875}, {"start": 358.33, "end": 358.47, "word": " the", "probability": 0.8876953125}, {"start": 358.47, "end": 358.77, "word": " normal", "probability": 0.873046875}, {"start": 358.77, "end": 359.37, "word": " distribution,", "probability": 0.84814453125}, {"start": 360.13, "end": 360.33, "word": " we", "probability": 0.94140625}, {"start": 360.33, "end": 360.47, "word": " know", "probability": 0.87451171875}, {"start": 360.47, "end": 360.71, "word": " that", "probability": 0.93505859375}], "temperature": 1.0}, {"id": 14, "seek": 38574, "start": 361.12, "end": 385.74, "text": " The probability of Z smaller than minus 0.12 equals the probability of Z greater than 0.12. Because this area, if we have Z smaller than minus 0.12, the area to the left, that equals the area to the right of the same point, because of symmetry. And finally, you will end with this result.", "tokens": [440, 8482, 295, 1176, 4356, 813, 3175, 1958, 13, 4762, 6915, 264, 8482, 295, 1176, 5044, 813, 1958, 13, 4762, 13, 1436, 341, 1859, 11, 498, 321, 362, 1176, 4356, 813, 3175, 1958, 13, 4762, 11, 264, 1859, 281, 264, 1411, 11, 300, 6915, 264, 1859, 281, 264, 558, 295, 264, 912, 935, 11, 570, 295, 25440, 13, 400, 2721, 11, 291, 486, 917, 365, 341, 1874, 13], "avg_logprob": -0.23154439010481903, "compression_ratio": 1.7409638554216869, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 361.12, "end": 361.5, "word": " The", "probability": 0.322265625}, {"start": 361.5, "end": 362.1, "word": " probability", "probability": 0.89990234375}, {"start": 362.1, "end": 362.46, "word": " of", "probability": 0.94580078125}, {"start": 362.46, "end": 362.72, "word": " Z", "probability": 0.5146484375}, {"start": 362.72, "end": 363.32, "word": " smaller", "probability": 0.494873046875}, {"start": 363.32, "end": 363.66, "word": " than", "probability": 0.94140625}, {"start": 363.66, "end": 364.0, "word": " minus", "probability": 0.5888671875}, {"start": 364.0, "end": 364.22, "word": " 0", "probability": 0.57177734375}, {"start": 364.22, "end": 364.62, "word": ".12", "probability": 0.989013671875}, {"start": 364.62, "end": 366.08, "word": " equals", "probability": 0.82666015625}, {"start": 366.08, "end": 367.3, "word": " the", "probability": 0.82958984375}, {"start": 367.3, "end": 367.66, "word": " probability", "probability": 0.9482421875}, {"start": 367.66, "end": 367.98, "word": " of", "probability": 0.927734375}, {"start": 367.98, "end": 368.22, "word": " Z", "probability": 0.95068359375}, {"start": 368.22, "end": 369.32, "word": " greater", "probability": 0.89892578125}, {"start": 369.32, "end": 369.8, "word": " than", "probability": 0.94287109375}, {"start": 369.8, "end": 371.08, "word": " 0", "probability": 0.9345703125}, {"start": 371.08, "end": 371.42, "word": ".12.", "probability": 0.997314453125}, {"start": 371.78, "end": 372.14, "word": " Because", "probability": 0.8017578125}, {"start": 372.14, "end": 372.4, "word": " this", "probability": 0.88427734375}, {"start": 372.4, "end": 372.82, "word": " area,", "probability": 0.90478515625}, {"start": 373.4, "end": 373.62, "word": " if", "probability": 0.939453125}, {"start": 373.62, "end": 373.74, "word": " we", "probability": 0.6884765625}, {"start": 373.74, "end": 373.96, "word": " have", "probability": 0.93701171875}, {"start": 373.96, "end": 374.66, "word": " Z", "probability": 0.87744140625}, {"start": 374.66, "end": 375.08, "word": " smaller", "probability": 0.8349609375}, {"start": 375.08, "end": 375.28, "word": " than", "probability": 0.93994140625}, {"start": 375.28, "end": 375.52, "word": " minus", "probability": 0.94921875}, {"start": 375.52, "end": 375.7, "word": " 0", "probability": 0.947265625}, {"start": 375.7, "end": 376.08, "word": ".12,", "probability": 0.9970703125}, {"start": 376.52, "end": 376.66, "word": " the", "probability": 0.82080078125}, {"start": 376.66, "end": 376.82, "word": " area", "probability": 0.865234375}, {"start": 376.82, "end": 376.98, "word": " to", "probability": 0.96337890625}, {"start": 376.98, "end": 377.14, "word": " the", "probability": 0.91455078125}, {"start": 377.14, "end": 377.4, "word": " left,", "probability": 0.947265625}, {"start": 378.12, "end": 378.58, "word": " that", "probability": 0.537109375}, {"start": 378.58, "end": 378.98, "word": " equals", "probability": 0.74658203125}, {"start": 378.98, "end": 379.2, "word": " the", "probability": 0.7734375}, {"start": 379.2, "end": 379.3, "word": " area", "probability": 0.87841796875}, {"start": 379.3, "end": 379.48, "word": " to", "probability": 0.9482421875}, {"start": 379.48, "end": 379.62, "word": " the", "probability": 0.90478515625}, {"start": 379.62, "end": 379.84, "word": " right", "probability": 0.9130859375}, {"start": 379.84, "end": 379.98, "word": " of", "probability": 0.94677734375}, {"start": 379.98, "end": 380.12, "word": " the", "probability": 0.8662109375}, {"start": 380.12, "end": 380.3, "word": " same", "probability": 0.728515625}, {"start": 380.3, "end": 380.68, "word": " point,", "probability": 0.958984375}, {"start": 381.02, "end": 381.4, "word": " because", "probability": 0.900390625}, {"start": 381.4, "end": 381.56, "word": " of", "probability": 0.9560546875}, {"start": 381.56, "end": 381.9, "word": " symmetry.", "probability": 0.84375}, {"start": 383.44, "end": 384.12, "word": " And", "probability": 0.947265625}, {"start": 384.12, "end": 384.62, "word": " finally,", "probability": 0.82666015625}, {"start": 384.74, "end": 384.8, "word": " you", "probability": 0.388671875}, {"start": 384.8, "end": 384.94, "word": " will", "probability": 0.8095703125}, {"start": 384.94, "end": 385.12, "word": " end", "probability": 0.900390625}, {"start": 385.12, "end": 385.3, "word": " with", "probability": 0.859375}, {"start": 385.3, "end": 385.48, "word": " this", "probability": 0.1563720703125}, {"start": 385.48, "end": 385.74, "word": " result.", "probability": 0.321044921875}], "temperature": 1.0}, {"id": 15, "seek": 41891, "start": 390.67, "end": 418.91, "text": " D of z minus 0.12, all the way up to 0, this area, is the same as the area from 0 up to 0.5. So this area actually is the same as D of z between 0 and 0.5. So if you have negative sign, and then take the opposite one, the answer will be the same because the normal distribution is symmetric around 0.9.", "tokens": [413, 295, 710, 3175, 1958, 13, 4762, 11, 439, 264, 636, 493, 281, 1958, 11, 341, 1859, 11, 307, 264, 912, 382, 264, 1859, 490, 1958, 493, 281, 1958, 13, 20, 13, 407, 341, 1859, 767, 307, 264, 912, 382, 413, 295, 710, 1296, 1958, 293, 1958, 13, 20, 13, 407, 498, 291, 362, 3671, 1465, 11, 293, 550, 747, 264, 6182, 472, 11, 264, 1867, 486, 312, 264, 912, 570, 264, 2710, 7316, 307, 32330, 926, 1958, 13, 24, 13], "avg_logprob": -0.1895960394929095, "compression_ratio": 1.6117021276595744, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 390.67, "end": 391.01, "word": " D", "probability": 0.1973876953125}, {"start": 391.01, "end": 391.17, "word": " of", "probability": 0.74951171875}, {"start": 391.17, "end": 391.37, "word": " z", "probability": 0.75244140625}, {"start": 391.37, "end": 392.33, "word": " minus", "probability": 0.892578125}, {"start": 392.33, "end": 392.55, "word": " 0", "probability": 0.80029296875}, {"start": 392.55, "end": 392.91, "word": ".12,", "probability": 0.97900390625}, {"start": 393.25, "end": 394.85, "word": " all", "probability": 0.94384765625}, {"start": 394.85, "end": 395.01, "word": " the", "probability": 0.92041015625}, {"start": 395.01, "end": 395.15, "word": " way", "probability": 0.9541015625}, {"start": 395.15, "end": 395.33, "word": " up", "probability": 0.94677734375}, {"start": 395.33, "end": 395.49, "word": " to", "probability": 0.962890625}, {"start": 395.49, "end": 395.75, "word": " 0,", "probability": 0.59326171875}, {"start": 395.99, "end": 396.27, "word": " this", "probability": 0.75634765625}, {"start": 396.27, "end": 396.73, "word": " area,", "probability": 0.8720703125}, {"start": 397.63, "end": 398.05, "word": " is", "probability": 0.93212890625}, {"start": 398.05, "end": 398.23, "word": " the", "probability": 0.9208984375}, {"start": 398.23, "end": 398.45, "word": " same", "probability": 0.8984375}, {"start": 398.45, "end": 398.81, "word": " as", "probability": 0.92431640625}, {"start": 398.81, "end": 398.97, "word": " the", "probability": 0.91162109375}, {"start": 398.97, "end": 399.25, "word": " area", "probability": 0.8876953125}, {"start": 399.25, "end": 399.65, "word": " from", "probability": 0.8759765625}, {"start": 399.65, "end": 400.03, "word": " 0", "probability": 0.95068359375}, {"start": 400.03, "end": 400.45, "word": " up", "probability": 0.95654296875}, {"start": 400.45, "end": 400.73, "word": " to", "probability": 0.962890625}, {"start": 400.73, "end": 401.35, "word": " 0", "probability": 0.97412109375}, {"start": 401.35, "end": 401.65, "word": ".5.", "probability": 0.806640625}, {"start": 402.33, "end": 402.89, "word": " So", "probability": 0.95849609375}, {"start": 402.89, "end": 403.13, "word": " this", "probability": 0.84033203125}, {"start": 403.13, "end": 403.45, "word": " area", "probability": 0.8974609375}, {"start": 403.45, "end": 403.97, "word": " actually", "probability": 0.6923828125}, {"start": 403.97, "end": 404.31, "word": " is", "probability": 0.85009765625}, {"start": 404.31, "end": 404.47, "word": " the", "probability": 0.91455078125}, {"start": 404.47, "end": 404.65, "word": " same", "probability": 0.90625}, {"start": 404.65, "end": 404.89, "word": " as", "probability": 0.96435546875}, {"start": 404.89, "end": 405.07, "word": " D", "probability": 0.56005859375}, {"start": 405.07, "end": 405.19, "word": " of", "probability": 0.921875}, {"start": 405.19, "end": 405.41, "word": " z", "probability": 0.9150390625}, {"start": 405.41, "end": 406.07, "word": " between", "probability": 0.82763671875}, {"start": 406.07, "end": 406.39, "word": " 0", "probability": 0.95703125}, {"start": 406.39, "end": 406.53, "word": " and", "probability": 0.9248046875}, {"start": 406.53, "end": 406.69, "word": " 0", "probability": 0.96044921875}, {"start": 406.69, "end": 407.01, "word": ".5.", "probability": 0.965576171875}, {"start": 407.49, "end": 407.79, "word": " So", "probability": 0.96240234375}, {"start": 407.79, "end": 407.97, "word": " if", "probability": 0.88916015625}, {"start": 407.97, "end": 408.07, "word": " you", "probability": 0.95654296875}, {"start": 408.07, "end": 408.23, "word": " have", "probability": 0.94580078125}, {"start": 408.23, "end": 408.55, "word": " negative", "probability": 0.923828125}, {"start": 408.55, "end": 409.03, "word": " sign,", "probability": 0.93994140625}, {"start": 410.11, "end": 410.35, "word": " and", "probability": 0.42236328125}, {"start": 410.35, "end": 410.59, "word": " then", "probability": 0.84716796875}, {"start": 410.59, "end": 411.27, "word": " take", "probability": 0.79541015625}, {"start": 411.27, "end": 411.53, "word": " the", "probability": 0.9169921875}, {"start": 411.53, "end": 411.91, "word": " opposite", "probability": 0.95166015625}, {"start": 411.91, "end": 412.33, "word": " one,", "probability": 0.9287109375}, {"start": 413.25, "end": 413.45, "word": " the", "probability": 0.88525390625}, {"start": 413.45, "end": 413.71, "word": " answer", "probability": 0.9501953125}, {"start": 413.71, "end": 413.85, "word": " will", "probability": 0.69677734375}, {"start": 413.85, "end": 413.93, "word": " be", "probability": 0.95068359375}, {"start": 413.93, "end": 414.13, "word": " the", "probability": 0.91259765625}, {"start": 414.13, "end": 414.39, "word": " same", "probability": 0.9091796875}, {"start": 414.39, "end": 415.13, "word": " because", "probability": 0.478759765625}, {"start": 415.13, "end": 416.23, "word": " the", "probability": 0.85302734375}, {"start": 416.23, "end": 416.47, "word": " normal", "probability": 0.84619140625}, {"start": 416.47, "end": 417.03, "word": " distribution", "probability": 0.81689453125}, {"start": 417.03, "end": 417.25, "word": " is", "probability": 0.94921875}, {"start": 417.25, "end": 417.61, "word": " symmetric", "probability": 0.86767578125}, {"start": 417.61, "end": 418.31, "word": " around", "probability": 0.91943359375}, {"start": 418.31, "end": 418.67, "word": " 0", "probability": 0.62841796875}, {"start": 418.67, "end": 418.91, "word": ".9.", "probability": 0.7342529296875}], "temperature": 1.0}, {"id": 16, "seek": 44915, "start": 421.01, "end": 449.15, "text": " The questions, I think we stopped here. And also we talked about empirical rules. The one we mentioned in chapter three, in chapter three. And we know that, as we mentioned before, that is 68.16% of the observations fall within one standard deviation around the mean.", "tokens": [440, 1651, 11, 286, 519, 321, 5936, 510, 13, 400, 611, 321, 2825, 466, 31886, 4474, 13, 440, 472, 321, 2835, 294, 7187, 1045, 11, 294, 7187, 1045, 13, 400, 321, 458, 300, 11, 382, 321, 2835, 949, 11, 300, 307, 23317, 13, 6866, 4, 295, 264, 18163, 2100, 1951, 472, 3832, 25163, 926, 264, 914, 13], "avg_logprob": -0.18857758440848055, "compression_ratio": 1.5491329479768785, "no_speech_prob": 0.0, "words": [{"start": 421.01, "end": 421.21, "word": " The", "probability": 0.4375}, {"start": 421.21, "end": 421.63, "word": " questions,", "probability": 0.8828125}, {"start": 422.33, "end": 422.81, "word": " I", "probability": 0.99609375}, {"start": 422.81, "end": 423.05, "word": " think", "probability": 0.9189453125}, {"start": 423.05, "end": 423.37, "word": " we", "probability": 0.8251953125}, {"start": 423.37, "end": 424.69, "word": " stopped", "probability": 0.8603515625}, {"start": 424.69, "end": 425.05, "word": " here.", "probability": 0.857421875}, {"start": 426.57, "end": 427.27, "word": " And", "probability": 0.9384765625}, {"start": 427.27, "end": 427.53, "word": " also", "probability": 0.84423828125}, {"start": 427.53, "end": 427.75, "word": " we", "probability": 0.623046875}, {"start": 427.75, "end": 428.03, "word": " talked", "probability": 0.87158203125}, {"start": 428.03, "end": 428.37, "word": " about", "probability": 0.9013671875}, {"start": 428.37, "end": 428.79, "word": " empirical", "probability": 0.904296875}, {"start": 428.79, "end": 429.33, "word": " rules.", "probability": 0.78369140625}, {"start": 430.93, "end": 431.19, "word": " The", "probability": 0.83056640625}, {"start": 431.19, "end": 431.35, "word": " one", "probability": 0.92236328125}, {"start": 431.35, "end": 431.55, "word": " we", "probability": 0.955078125}, {"start": 431.55, "end": 431.99, "word": " mentioned", "probability": 0.82861328125}, {"start": 431.99, "end": 432.17, "word": " in", "probability": 0.9267578125}, {"start": 432.17, "end": 432.37, "word": " chapter", "probability": 0.68701171875}, {"start": 432.37, "end": 432.73, "word": " three,", "probability": 0.673828125}, {"start": 432.83, "end": 432.99, "word": " in", "probability": 0.67236328125}, {"start": 432.99, "end": 433.25, "word": " chapter", "probability": 0.84619140625}, {"start": 433.25, "end": 433.61, "word": " three.", "probability": 0.93310546875}, {"start": 434.29, "end": 434.79, "word": " And", "probability": 0.939453125}, {"start": 434.79, "end": 434.91, "word": " we", "probability": 0.94970703125}, {"start": 434.91, "end": 435.05, "word": " know", "probability": 0.8798828125}, {"start": 435.05, "end": 435.23, "word": " that,", "probability": 0.9296875}, {"start": 435.31, "end": 435.39, "word": " as", "probability": 0.9638671875}, {"start": 435.39, "end": 435.51, "word": " we", "probability": 0.90966796875}, {"start": 435.51, "end": 435.73, "word": " mentioned", "probability": 0.85498046875}, {"start": 435.73, "end": 436.17, "word": " before,", "probability": 0.859375}, {"start": 436.31, "end": 436.47, "word": " that", "probability": 0.923828125}, {"start": 436.47, "end": 436.85, "word": " is", "probability": 0.85693359375}, {"start": 436.85, "end": 438.69, "word": " 68", "probability": 0.93505859375}, {"start": 438.69, "end": 441.43, "word": ".16", "probability": 0.910400390625}, {"start": 441.43, "end": 441.83, "word": "%", "probability": 0.857421875}, {"start": 441.83, "end": 442.49, "word": " of", "probability": 0.96240234375}, {"start": 442.49, "end": 442.65, "word": " the", "probability": 0.91162109375}, {"start": 442.65, "end": 443.27, "word": " observations", "probability": 0.767578125}, {"start": 443.27, "end": 444.79, "word": " fall", "probability": 0.7802734375}, {"start": 444.79, "end": 445.19, "word": " within", "probability": 0.919921875}, {"start": 445.19, "end": 445.55, "word": " one", "probability": 0.92333984375}, {"start": 445.55, "end": 445.91, "word": " standard", "probability": 0.91357421875}, {"start": 445.91, "end": 446.33, "word": " deviation", "probability": 0.9404296875}, {"start": 446.33, "end": 448.77, "word": " around", "probability": 0.52197265625}, {"start": 448.77, "end": 449.03, "word": " the", "probability": 0.84521484375}, {"start": 449.03, "end": 449.15, "word": " mean.", "probability": 0.978515625}], "temperature": 1.0}, {"id": 17, "seek": 47344, "start": 449.78, "end": 473.44, "text": " So this area from mu minus one sigma up to mu plus sigma, this area covers around 68%. Also 95% or actually 95.44% of the data falls within two standard deviations of the mean.", "tokens": [407, 341, 1859, 490, 2992, 3175, 472, 12771, 493, 281, 2992, 1804, 12771, 11, 341, 1859, 10538, 926, 23317, 6856, 2743, 13420, 4, 420, 767, 13420, 13, 13912, 4, 295, 264, 1412, 8804, 1951, 732, 3832, 31219, 763, 295, 264, 914, 13], "avg_logprob": -0.13444767095321833, "compression_ratio": 1.330827067669173, "no_speech_prob": 0.0, "words": [{"start": 449.78, "end": 450.06, "word": " So", "probability": 0.94384765625}, {"start": 450.06, "end": 450.32, "word": " this", "probability": 0.79931640625}, {"start": 450.32, "end": 450.66, "word": " area", "probability": 0.91357421875}, {"start": 450.66, "end": 451.1, "word": " from", "probability": 0.8125}, {"start": 451.1, "end": 451.68, "word": " mu", "probability": 0.55419921875}, {"start": 451.68, "end": 452.0, "word": " minus", "probability": 0.93505859375}, {"start": 452.0, "end": 452.3, "word": " one", "probability": 0.66748046875}, {"start": 452.3, "end": 452.68, "word": " sigma", "probability": 0.90185546875}, {"start": 452.68, "end": 453.7, "word": " up", "probability": 0.857421875}, {"start": 453.7, "end": 453.86, "word": " to", "probability": 0.9697265625}, {"start": 453.86, "end": 454.02, "word": " mu", "probability": 0.9404296875}, {"start": 454.02, "end": 454.26, "word": " plus", "probability": 0.95556640625}, {"start": 454.26, "end": 454.66, "word": " sigma,", "probability": 0.9296875}, {"start": 455.1, "end": 455.38, "word": " this", "probability": 0.94384765625}, {"start": 455.38, "end": 455.66, "word": " area", "probability": 0.8876953125}, {"start": 455.66, "end": 456.1, "word": " covers", "probability": 0.85107421875}, {"start": 456.1, "end": 456.52, "word": " around", "probability": 0.93017578125}, {"start": 456.52, "end": 457.68, "word": " 68%.", "probability": 0.9091796875}, {"start": 457.68, "end": 462.08, "word": " Also", "probability": 0.94091796875}, {"start": 462.08, "end": 463.74, "word": " 95", "probability": 0.6826171875}, {"start": 463.74, "end": 464.16, "word": "%", "probability": 0.6044921875}, {"start": 464.16, "end": 464.52, "word": " or", "probability": 0.93359375}, {"start": 464.52, "end": 465.02, "word": " actually", "probability": 0.90234375}, {"start": 465.02, "end": 465.78, "word": " 95", "probability": 0.96923828125}, {"start": 465.78, "end": 466.62, "word": ".44", "probability": 0.991943359375}, {"start": 466.62, "end": 467.1, "word": "%", "probability": 0.9951171875}, {"start": 467.1, "end": 467.28, "word": " of", "probability": 0.95947265625}, {"start": 467.28, "end": 467.4, "word": " the", "probability": 0.9228515625}, {"start": 467.4, "end": 467.78, "word": " data", "probability": 0.94287109375}, {"start": 467.78, "end": 470.22, "word": " falls", "probability": 0.467041015625}, {"start": 470.22, "end": 470.7, "word": " within", "probability": 0.9072265625}, {"start": 470.7, "end": 471.16, "word": " two", "probability": 0.86328125}, {"start": 471.16, "end": 471.48, "word": " standard", "probability": 0.91064453125}, {"start": 471.48, "end": 472.08, "word": " deviations", "probability": 0.934814453125}, {"start": 472.08, "end": 473.06, "word": " of", "probability": 0.96240234375}, {"start": 473.06, "end": 473.22, "word": " the", "probability": 0.9306640625}, {"start": 473.22, "end": 473.44, "word": " mean.", "probability": 0.978515625}], "temperature": 1.0}, {"id": 18, "seek": 48716, "start": 474.1, "end": 487.16, "text": " And finally, around most of the data, around 99.73% of the data falls within three subdivisions of the population mean.", "tokens": [400, 2721, 11, 926, 881, 295, 264, 1412, 11, 926, 11803, 13, 33396, 4, 295, 264, 1412, 8804, 1951, 1045, 45331, 4252, 295, 264, 4415, 914, 13], "avg_logprob": -0.3007812521287373, "compression_ratio": 1.263157894736842, "no_speech_prob": 0.0, "words": [{"start": 474.1, "end": 474.4, "word": " And", "probability": 0.83056640625}, {"start": 474.4, "end": 474.84, "word": " finally,", "probability": 0.81201171875}, {"start": 475.04, "end": 475.32, "word": " around", "probability": 0.50048828125}, {"start": 475.32, "end": 476.1, "word": " most", "probability": 0.56396484375}, {"start": 476.1, "end": 476.24, "word": " of", "probability": 0.970703125}, {"start": 476.24, "end": 476.34, "word": " the", "probability": 0.900390625}, {"start": 476.34, "end": 476.58, "word": " data,", "probability": 0.78662109375}, {"start": 477.0, "end": 477.26, "word": " around", "probability": 0.875}, {"start": 477.26, "end": 477.64, "word": " 99", "probability": 0.9521484375}, {"start": 477.64, "end": 478.46, "word": ".73", "probability": 0.94384765625}, {"start": 478.46, "end": 479.56, "word": "%", "probability": 0.8046875}, {"start": 479.56, "end": 479.8, "word": " of", "probability": 0.9501953125}, {"start": 479.8, "end": 479.94, "word": " the", "probability": 0.90771484375}, {"start": 479.94, "end": 480.22, "word": " data", "probability": 0.947265625}, {"start": 480.22, "end": 480.62, "word": " falls", "probability": 0.77197265625}, {"start": 480.62, "end": 481.08, "word": " within", "probability": 0.90087890625}, {"start": 481.08, "end": 482.18, "word": " three", "probability": 0.1365966796875}, {"start": 482.18, "end": 482.92, "word": " subdivisions", "probability": 0.580322265625}, {"start": 482.92, "end": 483.5, "word": " of", "probability": 0.96630859375}, {"start": 483.5, "end": 484.5, "word": " the", "probability": 0.88330078125}, {"start": 484.5, "end": 486.86, "word": " population", "probability": 0.9375}, {"start": 486.86, "end": 487.16, "word": " mean.", "probability": 0.61376953125}], "temperature": 1.0}, {"id": 19, "seek": 51985, "start": 491.55, "end": 519.85, "text": " And now the new topic is how can we find the X value if the probability is given. It's vice versa. In the previous questions, we were asking about find the probability, for example, if X is smaller than a certain number. Now suppose this probability is given, and we are looking to find this value. I mean, for example, suppose in the previous examples here,", "tokens": [400, 586, 264, 777, 4829, 307, 577, 393, 321, 915, 264, 1783, 2158, 498, 264, 8482, 307, 2212, 13, 467, 311, 11964, 25650, 13, 682, 264, 3894, 1651, 11, 321, 645, 3365, 466, 915, 264, 8482, 11, 337, 1365, 11, 498, 1783, 307, 4356, 813, 257, 1629, 1230, 13, 823, 7297, 341, 8482, 307, 2212, 11, 293, 321, 366, 1237, 281, 915, 341, 2158, 13, 286, 914, 11, 337, 1365, 11, 7297, 294, 264, 3894, 5110, 510, 11], "avg_logprob": -0.15763449668884277, "compression_ratio": 1.751219512195122, "no_speech_prob": 0.0, "words": [{"start": 491.55, "end": 491.81, "word": " And", "probability": 0.39013671875}, {"start": 491.81, "end": 491.99, "word": " now", "probability": 0.83349609375}, {"start": 491.99, "end": 492.17, "word": " the", "probability": 0.60693359375}, {"start": 492.17, "end": 492.35, "word": " new", "probability": 0.916015625}, {"start": 492.35, "end": 492.63, "word": " topic", "probability": 0.95166015625}, {"start": 492.63, "end": 493.07, "word": " is", "probability": 0.93505859375}, {"start": 493.07, "end": 493.23, "word": " how", "probability": 0.70361328125}, {"start": 493.23, "end": 493.45, "word": " can", "probability": 0.91845703125}, {"start": 493.45, "end": 493.59, "word": " we", "probability": 0.7744140625}, {"start": 493.59, "end": 493.93, "word": " find", "probability": 0.88720703125}, {"start": 493.93, "end": 494.69, "word": " the", "probability": 0.88330078125}, {"start": 494.69, "end": 494.97, "word": " X", "probability": 0.68212890625}, {"start": 494.97, "end": 495.43, "word": " value", "probability": 0.93115234375}, {"start": 495.43, "end": 496.39, "word": " if", "probability": 0.8486328125}, {"start": 496.39, "end": 496.59, "word": " the", "probability": 0.9150390625}, {"start": 496.59, "end": 496.93, "word": " probability", "probability": 0.9384765625}, {"start": 496.93, "end": 497.31, "word": " is", "probability": 0.9521484375}, {"start": 497.31, "end": 497.57, "word": " given.", "probability": 0.8994140625}, {"start": 497.97, "end": 498.25, "word": " It's", "probability": 0.930908203125}, {"start": 498.25, "end": 498.51, "word": " vice", "probability": 0.84814453125}, {"start": 498.51, "end": 498.87, "word": " versa.", "probability": 0.75146484375}, {"start": 499.83, "end": 499.97, "word": " In", "probability": 0.9541015625}, {"start": 499.97, "end": 500.09, "word": " the", "probability": 0.91943359375}, {"start": 500.09, "end": 500.39, "word": " previous", "probability": 0.86181640625}, {"start": 500.39, "end": 500.95, "word": " questions,", "probability": 0.95849609375}, {"start": 501.09, "end": 501.23, "word": " we", "probability": 0.96337890625}, {"start": 501.23, "end": 501.45, "word": " were", "probability": 0.892578125}, {"start": 501.45, "end": 501.81, "word": " asking", "probability": 0.87060546875}, {"start": 501.81, "end": 502.29, "word": " about", "probability": 0.9052734375}, {"start": 502.29, "end": 504.09, "word": " find", "probability": 0.387451171875}, {"start": 504.09, "end": 504.23, "word": " the", "probability": 0.82421875}, {"start": 504.23, "end": 504.65, "word": " probability,", "probability": 0.955078125}, {"start": 505.07, "end": 505.19, "word": " for", "probability": 0.95068359375}, {"start": 505.19, "end": 505.51, "word": " example,", "probability": 0.9755859375}, {"start": 505.61, "end": 505.69, "word": " if", "probability": 0.51220703125}, {"start": 505.69, "end": 505.95, "word": " X", "probability": 0.9814453125}, {"start": 505.95, "end": 506.13, "word": " is", "probability": 0.94482421875}, {"start": 506.13, "end": 506.39, "word": " smaller", "probability": 0.89892578125}, {"start": 506.39, "end": 506.67, "word": " than", "probability": 0.94140625}, {"start": 506.67, "end": 506.87, "word": " a", "probability": 0.99609375}, {"start": 506.87, "end": 507.13, "word": " certain", "probability": 0.908203125}, {"start": 507.13, "end": 507.55, "word": " number.", "probability": 0.92626953125}, {"start": 509.09, "end": 509.35, "word": " Now", "probability": 0.95263671875}, {"start": 509.35, "end": 509.91, "word": " suppose", "probability": 0.77978515625}, {"start": 509.91, "end": 510.45, "word": " this", "probability": 0.9169921875}, {"start": 510.45, "end": 510.87, "word": " probability", "probability": 0.95654296875}, {"start": 510.87, "end": 511.25, "word": " is", "probability": 0.9404296875}, {"start": 511.25, "end": 511.49, "word": " given,", "probability": 0.89794921875}, {"start": 512.49, "end": 512.89, "word": " and", "probability": 0.93212890625}, {"start": 512.89, "end": 513.03, "word": " we", "probability": 0.9609375}, {"start": 513.03, "end": 513.15, "word": " are", "probability": 0.93212890625}, {"start": 513.15, "end": 513.41, "word": " looking", "probability": 0.91748046875}, {"start": 513.41, "end": 513.79, "word": " to", "probability": 0.9619140625}, {"start": 513.79, "end": 514.07, "word": " find", "probability": 0.89453125}, {"start": 514.07, "end": 514.33, "word": " this", "probability": 0.943359375}, {"start": 514.33, "end": 514.69, "word": " value.", "probability": 0.97607421875}, {"start": 515.57, "end": 515.75, "word": " I", "probability": 0.935546875}, {"start": 515.75, "end": 515.99, "word": " mean,", "probability": 0.962890625}, {"start": 516.29, "end": 516.47, "word": " for", "probability": 0.95166015625}, {"start": 516.47, "end": 516.79, "word": " example,", "probability": 0.9755859375}, {"start": 516.91, "end": 517.39, "word": " suppose", "probability": 0.87353515625}, {"start": 517.39, "end": 518.61, "word": " in", "probability": 0.755859375}, {"start": 518.61, "end": 518.71, "word": " the", "probability": 0.92138671875}, {"start": 518.71, "end": 518.99, "word": " previous", "probability": 0.853515625}, {"start": 518.99, "end": 519.45, "word": " examples", "probability": 0.85205078125}, {"start": 519.45, "end": 519.85, "word": " here,", "probability": 0.849609375}], "temperature": 1.0}, {"id": 20, "seek": 54334, "start": 523.38, "end": 543.34, "text": " Suppose we know this probability. So the probability is given. The question is, how can we find this value? It's the opposite, sometimes called backward normal calculations. There are actually two steps.", "tokens": [21360, 321, 458, 341, 8482, 13, 407, 264, 8482, 307, 2212, 13, 440, 1168, 307, 11, 577, 393, 321, 915, 341, 2158, 30, 467, 311, 264, 6182, 11, 2171, 1219, 23897, 2710, 20448, 13, 821, 366, 767, 732, 4439, 13], "avg_logprob": -0.19502667392172465, "compression_ratio": 1.3877551020408163, "no_speech_prob": 0.0, "words": [{"start": 523.38, "end": 523.86, "word": " Suppose", "probability": 0.73486328125}, {"start": 523.86, "end": 524.14, "word": " we", "probability": 0.88134765625}, {"start": 524.14, "end": 524.32, "word": " know", "probability": 0.576171875}, {"start": 524.32, "end": 524.5, "word": " this", "probability": 0.89794921875}, {"start": 524.5, "end": 524.9, "word": " probability.", "probability": 0.91455078125}, {"start": 526.06, "end": 526.66, "word": " So", "probability": 0.919921875}, {"start": 526.66, "end": 526.76, "word": " the", "probability": 0.67138671875}, {"start": 526.76, "end": 527.06, "word": " probability", "probability": 0.93896484375}, {"start": 527.06, "end": 527.32, "word": " is", "probability": 0.9130859375}, {"start": 527.32, "end": 527.54, "word": " given.", "probability": 0.8818359375}, {"start": 528.4, "end": 528.7, "word": " The", "probability": 0.88134765625}, {"start": 528.7, "end": 529.0, "word": " question", "probability": 0.91064453125}, {"start": 529.0, "end": 529.28, "word": " is,", "probability": 0.9501953125}, {"start": 529.62, "end": 529.78, "word": " how", "probability": 0.8642578125}, {"start": 529.78, "end": 530.04, "word": " can", "probability": 0.94091796875}, {"start": 530.04, "end": 530.22, "word": " we", "probability": 0.92529296875}, {"start": 530.22, "end": 530.54, "word": " find", "probability": 0.904296875}, {"start": 530.54, "end": 530.94, "word": " this", "probability": 0.9462890625}, {"start": 530.94, "end": 531.22, "word": " value?", "probability": 0.41650390625}, {"start": 532.26, "end": 532.68, "word": " It's", "probability": 0.7095947265625}, {"start": 532.68, "end": 532.88, "word": " the", "probability": 0.84521484375}, {"start": 532.88, "end": 533.16, "word": " opposite,", "probability": 0.9609375}, {"start": 533.72, "end": 534.14, "word": " sometimes", "probability": 0.94384765625}, {"start": 534.14, "end": 534.66, "word": " called", "probability": 0.845703125}, {"start": 534.66, "end": 535.52, "word": " backward", "probability": 0.86767578125}, {"start": 535.52, "end": 536.54, "word": " normal", "probability": 0.80859375}, {"start": 536.54, "end": 537.18, "word": " calculations.", "probability": 0.91015625}, {"start": 541.66, "end": 542.22, "word": " There", "probability": 0.7763671875}, {"start": 542.22, "end": 542.4, "word": " are", "probability": 0.93896484375}, {"start": 542.4, "end": 542.74, "word": " actually", "probability": 0.58349609375}, {"start": 542.74, "end": 542.96, "word": " two", "probability": 0.92138671875}, {"start": 542.96, "end": 543.34, "word": " steps.", "probability": 0.85888671875}], "temperature": 1.0}, {"id": 21, "seek": 57174, "start": 544.42, "end": 571.74, "text": " to find the x value for a certain probability or for a given or for a known probability the first step we have to find the z score then use this equation to find the x value corresponding to the z score you have and x is just mu plus sigma times mu so first step you have to find the z score", "tokens": [281, 915, 264, 2031, 2158, 337, 257, 1629, 8482, 420, 337, 257, 2212, 420, 337, 257, 2570, 8482, 264, 700, 1823, 321, 362, 281, 915, 264, 710, 6175, 550, 764, 341, 5367, 281, 915, 264, 2031, 2158, 11760, 281, 264, 710, 6175, 291, 362, 293, 2031, 307, 445, 2992, 1804, 12771, 1413, 2992, 370, 700, 1823, 291, 362, 281, 915, 264, 710, 6175], "avg_logprob": -0.2282714790198952, "compression_ratio": 2.0, "no_speech_prob": 0.0, "words": [{"start": 544.42, "end": 544.64, "word": " to", "probability": 0.4365234375}, {"start": 544.64, "end": 544.9, "word": " find", "probability": 0.87841796875}, {"start": 544.9, "end": 545.04, "word": " the", "probability": 0.54443359375}, {"start": 545.04, "end": 545.22, "word": " x", "probability": 0.5341796875}, {"start": 545.22, "end": 545.58, "word": " value", "probability": 0.765625}, {"start": 545.58, "end": 546.16, "word": " for", "probability": 0.88916015625}, {"start": 546.16, "end": 546.46, "word": " a", "probability": 0.9130859375}, {"start": 546.46, "end": 546.92, "word": " certain", "probability": 0.701171875}, {"start": 546.92, "end": 547.96, "word": " probability", "probability": 0.8037109375}, {"start": 547.96, "end": 548.26, "word": " or", "probability": 0.66455078125}, {"start": 548.26, "end": 548.44, "word": " for", "probability": 0.83349609375}, {"start": 548.44, "end": 548.56, "word": " a", "probability": 0.9716796875}, {"start": 548.56, "end": 548.8, "word": " given", "probability": 0.8388671875}, {"start": 548.8, "end": 549.6, "word": " or", "probability": 0.6357421875}, {"start": 549.6, "end": 549.82, "word": " for", "probability": 0.79443359375}, {"start": 549.82, "end": 550.04, "word": " a", "probability": 0.810546875}, {"start": 550.04, "end": 550.26, "word": " known", "probability": 0.60791015625}, {"start": 550.26, "end": 550.8, "word": " probability", "probability": 0.923828125}, {"start": 550.8, "end": 551.72, "word": " the", "probability": 0.276123046875}, {"start": 551.72, "end": 551.98, "word": " first", "probability": 0.890625}, {"start": 551.98, "end": 552.16, "word": " step", "probability": 0.89794921875}, {"start": 552.16, "end": 552.28, "word": " we", "probability": 0.806640625}, {"start": 552.28, "end": 552.42, "word": " have", "probability": 0.94677734375}, {"start": 552.42, "end": 552.54, "word": " to", "probability": 0.96630859375}, {"start": 552.54, "end": 552.9, "word": " find", "probability": 0.89501953125}, {"start": 552.9, "end": 554.3, "word": " the", "probability": 0.7880859375}, {"start": 554.3, "end": 554.96, "word": " z", "probability": 0.91552734375}, {"start": 554.96, "end": 555.36, "word": " score", "probability": 0.6123046875}, {"start": 555.36, "end": 557.3, "word": " then", "probability": 0.7724609375}, {"start": 557.3, "end": 557.92, "word": " use", "probability": 0.84765625}, {"start": 557.92, "end": 558.84, "word": " this", "probability": 0.9462890625}, {"start": 558.84, "end": 559.32, "word": " equation", "probability": 0.97705078125}, {"start": 559.32, "end": 559.74, "word": " to", "probability": 0.93359375}, {"start": 559.74, "end": 560.0, "word": " find", "probability": 0.88916015625}, {"start": 560.0, "end": 560.26, "word": " the", "probability": 0.8984375}, {"start": 560.26, "end": 560.54, "word": " x", "probability": 0.98193359375}, {"start": 560.54, "end": 561.02, "word": " value", "probability": 0.95361328125}, {"start": 561.02, "end": 562.1, "word": " corresponding", "probability": 0.8330078125}, {"start": 562.1, "end": 562.76, "word": " to", "probability": 0.96533203125}, {"start": 562.76, "end": 563.12, "word": " the", "probability": 0.476806640625}, {"start": 563.12, "end": 563.28, "word": " z", "probability": 0.923828125}, {"start": 563.28, "end": 563.46, "word": " score", "probability": 0.78857421875}, {"start": 563.46, "end": 563.62, "word": " you", "probability": 0.89111328125}, {"start": 563.62, "end": 563.88, "word": " have", "probability": 0.9140625}, {"start": 563.88, "end": 564.86, "word": " and", "probability": 0.74169921875}, {"start": 564.86, "end": 565.1, "word": " x", "probability": 0.8212890625}, {"start": 565.1, "end": 565.26, "word": " is", "probability": 0.8515625}, {"start": 565.26, "end": 565.5, "word": " just", "probability": 0.91064453125}, {"start": 565.5, "end": 565.84, "word": " mu", "probability": 0.5986328125}, {"start": 565.84, "end": 566.34, "word": " plus", "probability": 0.9091796875}, {"start": 566.34, "end": 566.84, "word": " sigma", "probability": 0.91015625}, {"start": 566.84, "end": 567.48, "word": " times", "probability": 0.91357421875}, {"start": 567.48, "end": 567.78, "word": " mu", "probability": 0.939453125}, {"start": 567.78, "end": 569.56, "word": " so", "probability": 0.6513671875}, {"start": 569.56, "end": 569.8, "word": " first", "probability": 0.7587890625}, {"start": 569.8, "end": 569.98, "word": " step", "probability": 0.89013671875}, {"start": 569.98, "end": 570.12, "word": " you", "probability": 0.658203125}, {"start": 570.12, "end": 570.26, "word": " have", "probability": 0.9423828125}, {"start": 570.26, "end": 570.4, "word": " to", "probability": 0.9658203125}, {"start": 570.4, "end": 570.7, "word": " find", "probability": 0.8955078125}, {"start": 570.7, "end": 571.34, "word": " the", "probability": 0.45166015625}, {"start": 571.34, "end": 571.48, "word": " z", "probability": 0.888671875}, {"start": 571.48, "end": 571.74, "word": " score", "probability": 0.85302734375}], "temperature": 1.0}, {"id": 22, "seek": 60425, "start": 575.35, "end": 604.25, "text": " corresponding to the probability we have. So find the z value for the non-probability, then use that z score to find the value of x by using this equation. So x equals mu plus z sigma. z could be negative, could be positive, depends on the probability you have. If the probability is above 0.5, I mean 0.5 and greater than 0.5, this corresponds to 10.", "tokens": [11760, 281, 264, 8482, 321, 362, 13, 407, 915, 264, 710, 2158, 337, 264, 2107, 12, 41990, 2310, 11, 550, 764, 300, 710, 6175, 281, 915, 264, 2158, 295, 2031, 538, 1228, 341, 5367, 13, 407, 2031, 6915, 2992, 1804, 710, 12771, 13, 710, 727, 312, 3671, 11, 727, 312, 3353, 11, 5946, 322, 264, 8482, 291, 362, 13, 759, 264, 8482, 307, 3673, 1958, 13, 20, 11, 286, 914, 1958, 13, 20, 293, 5044, 813, 1958, 13, 20, 11, 341, 23249, 281, 1266, 13], "avg_logprob": -0.1967659904513248, "compression_ratio": 1.7425742574257426, "no_speech_prob": 0.0, "words": [{"start": 575.35, "end": 575.95, "word": " corresponding", "probability": 0.373291015625}, {"start": 575.95, "end": 576.21, "word": " to", "probability": 0.96630859375}, {"start": 576.21, "end": 576.35, "word": " the", "probability": 0.9013671875}, {"start": 576.35, "end": 576.69, "word": " probability", "probability": 0.96533203125}, {"start": 576.69, "end": 576.95, "word": " we", "probability": 0.91943359375}, {"start": 576.95, "end": 577.25, "word": " have.", "probability": 0.94482421875}, {"start": 578.03, "end": 578.35, "word": " So", "probability": 0.9267578125}, {"start": 578.35, "end": 578.69, "word": " find", "probability": 0.5244140625}, {"start": 578.69, "end": 578.85, "word": " the", "probability": 0.77880859375}, {"start": 578.85, "end": 578.99, "word": " z", "probability": 0.6591796875}, {"start": 578.99, "end": 579.29, "word": " value", "probability": 0.89306640625}, {"start": 579.29, "end": 579.63, "word": " for", "probability": 0.93896484375}, {"start": 579.63, "end": 579.83, "word": " the", "probability": 0.90283203125}, {"start": 579.83, "end": 580.19, "word": " non", "probability": 0.390380859375}, {"start": 580.19, "end": 580.69, "word": "-probability,", "probability": 0.8310546875}, {"start": 581.49, "end": 582.41, "word": " then", "probability": 0.7822265625}, {"start": 582.41, "end": 583.59, "word": " use", "probability": 0.8291015625}, {"start": 583.59, "end": 583.87, "word": " that", "probability": 0.9150390625}, {"start": 583.87, "end": 584.11, "word": " z", "probability": 0.9443359375}, {"start": 584.11, "end": 584.43, "word": " score", "probability": 0.51904296875}, {"start": 584.43, "end": 585.57, "word": " to", "probability": 0.86376953125}, {"start": 585.57, "end": 585.81, "word": " find", "probability": 0.8916015625}, {"start": 585.81, "end": 585.95, "word": " the", "probability": 0.896484375}, {"start": 585.95, "end": 586.17, "word": " value", "probability": 0.9697265625}, {"start": 586.17, "end": 586.35, "word": " of", "probability": 0.6630859375}, {"start": 586.35, "end": 586.63, "word": " x", "probability": 0.89697265625}, {"start": 586.63, "end": 587.37, "word": " by", "probability": 0.83251953125}, {"start": 587.37, "end": 587.67, "word": " using", "probability": 0.931640625}, {"start": 587.67, "end": 587.91, "word": " this", "probability": 0.9287109375}, {"start": 587.91, "end": 588.39, "word": " equation.", "probability": 0.9755859375}, {"start": 588.89, "end": 589.13, "word": " So", "probability": 0.935546875}, {"start": 589.13, "end": 589.33, "word": " x", "probability": 0.88232421875}, {"start": 589.33, "end": 589.73, "word": " equals", "probability": 0.818359375}, {"start": 589.73, "end": 589.95, "word": " mu", "probability": 0.6064453125}, {"start": 589.95, "end": 590.35, "word": " plus", "probability": 0.953125}, {"start": 590.35, "end": 590.83, "word": " z", "probability": 0.94140625}, {"start": 590.83, "end": 591.13, "word": " sigma.", "probability": 0.84912109375}, {"start": 591.49, "end": 591.67, "word": " z", "probability": 0.6318359375}, {"start": 591.67, "end": 591.87, "word": " could", "probability": 0.87548828125}, {"start": 591.87, "end": 592.01, "word": " be", "probability": 0.947265625}, {"start": 592.01, "end": 592.27, "word": " negative,", "probability": 0.9189453125}, {"start": 592.77, "end": 592.99, "word": " could", "probability": 0.6552734375}, {"start": 592.99, "end": 593.17, "word": " be", "probability": 0.94677734375}, {"start": 593.17, "end": 593.47, "word": " positive,", "probability": 0.94140625}, {"start": 593.71, "end": 593.97, "word": " depends", "probability": 0.8349609375}, {"start": 593.97, "end": 594.29, "word": " on", "probability": 0.9443359375}, {"start": 594.29, "end": 594.49, "word": " the", "probability": 0.916015625}, {"start": 594.49, "end": 594.93, "word": " probability", "probability": 0.94482421875}, {"start": 594.93, "end": 595.17, "word": " you", "probability": 0.412841796875}, {"start": 595.17, "end": 595.37, "word": " have.", "probability": 0.943359375}, {"start": 596.51, "end": 596.85, "word": " If", "probability": 0.95361328125}, {"start": 596.85, "end": 597.81, "word": " the", "probability": 0.90673828125}, {"start": 597.81, "end": 598.13, "word": " probability", "probability": 0.93408203125}, {"start": 598.13, "end": 598.59, "word": " is", "probability": 0.93798828125}, {"start": 598.59, "end": 599.17, "word": " above", "probability": 0.9599609375}, {"start": 599.17, "end": 599.59, "word": " 0", "probability": 0.7119140625}, {"start": 599.59, "end": 599.99, "word": ".5,", "probability": 0.99609375}, {"start": 600.07, "end": 600.17, "word": " I", "probability": 0.86572265625}, {"start": 600.17, "end": 600.27, "word": " mean", "probability": 0.96533203125}, {"start": 600.27, "end": 600.45, "word": " 0", "probability": 0.81787109375}, {"start": 600.45, "end": 600.77, "word": ".5", "probability": 0.99853515625}, {"start": 600.77, "end": 601.01, "word": " and", "probability": 0.84130859375}, {"start": 601.01, "end": 601.29, "word": " greater", "probability": 0.8798828125}, {"start": 601.29, "end": 601.59, "word": " than", "probability": 0.9296875}, {"start": 601.59, "end": 601.79, "word": " 0", "probability": 0.93505859375}, {"start": 601.79, "end": 602.09, "word": ".5,", "probability": 0.998291015625}, {"start": 603.19, "end": 603.39, "word": " this", "probability": 0.537109375}, {"start": 603.39, "end": 603.77, "word": " corresponds", "probability": 0.8203125}, {"start": 603.77, "end": 604.09, "word": " to", "probability": 0.84326171875}, {"start": 604.09, "end": 604.25, "word": " 10.", "probability": 0.39990234375}], "temperature": 1.0}, {"id": 23, "seek": 63286, "start": 605.54, "end": 632.86, "text": " But if z-score is negative, I'm sorry, if z-score is negative, then the probability should be smaller than 0.5. So if the probability is given less than 0.5, then your z-score should be negative, otherwise should be positive. So you have to be careful in this case. Now look at this example. Let x represent the time it takes in seconds.", "tokens": [583, 498, 710, 12, 4417, 418, 307, 3671, 11, 286, 478, 2597, 11, 498, 710, 12, 4417, 418, 307, 3671, 11, 550, 264, 8482, 820, 312, 4356, 813, 1958, 13, 20, 13, 407, 498, 264, 8482, 307, 2212, 1570, 813, 1958, 13, 20, 11, 550, 428, 710, 12, 4417, 418, 820, 312, 3671, 11, 5911, 820, 312, 3353, 13, 407, 291, 362, 281, 312, 5026, 294, 341, 1389, 13, 823, 574, 412, 341, 1365, 13, 961, 2031, 2906, 264, 565, 309, 2516, 294, 3949, 13], "avg_logprob": -0.1478924385683481, "compression_ratio": 1.6984924623115578, "no_speech_prob": 0.0, "words": [{"start": 605.54, "end": 605.9, "word": " But", "probability": 0.66552734375}, {"start": 605.9, "end": 606.1, "word": " if", "probability": 0.90185546875}, {"start": 606.1, "end": 606.28, "word": " z", "probability": 0.55224609375}, {"start": 606.28, "end": 606.68, "word": "-score", "probability": 0.751953125}, {"start": 606.68, "end": 606.94, "word": " is", "probability": 0.94921875}, {"start": 606.94, "end": 607.36, "word": " negative,", "probability": 0.93017578125}, {"start": 608.7, "end": 608.88, "word": " I'm", "probability": 0.63916015625}, {"start": 608.88, "end": 609.06, "word": " sorry,", "probability": 0.86669921875}, {"start": 609.18, "end": 609.32, "word": " if", "probability": 0.9033203125}, {"start": 609.32, "end": 609.52, "word": " z", "probability": 0.95068359375}, {"start": 609.52, "end": 609.7, "word": "-score", "probability": 0.9225260416666666}, {"start": 609.7, "end": 609.88, "word": " is", "probability": 0.94873046875}, {"start": 609.88, "end": 610.18, "word": " negative,", "probability": 0.9482421875}, {"start": 610.34, "end": 610.52, "word": " then", "probability": 0.86669921875}, {"start": 610.52, "end": 610.68, "word": " the", "probability": 0.83642578125}, {"start": 610.68, "end": 610.9, "word": " probability", "probability": 0.9404296875}, {"start": 610.9, "end": 611.18, "word": " should", "probability": 0.95849609375}, {"start": 611.18, "end": 611.44, "word": " be", "probability": 0.95556640625}, {"start": 611.44, "end": 612.34, "word": " smaller", "probability": 0.8466796875}, {"start": 612.34, "end": 612.8, "word": " than", "probability": 0.94140625}, {"start": 612.8, "end": 613.3, "word": " 0", "probability": 0.74169921875}, {"start": 613.3, "end": 613.6, "word": ".5.", "probability": 0.99560546875}, {"start": 613.82, "end": 614.1, "word": " So", "probability": 0.95556640625}, {"start": 614.1, "end": 614.7, "word": " if", "probability": 0.7041015625}, {"start": 614.7, "end": 614.88, "word": " the", "probability": 0.91455078125}, {"start": 614.88, "end": 615.24, "word": " probability", "probability": 0.9521484375}, {"start": 615.24, "end": 615.54, "word": " is", "probability": 0.94287109375}, {"start": 615.54, "end": 615.88, "word": " given", "probability": 0.87060546875}, {"start": 615.88, "end": 616.98, "word": " less", "probability": 0.85986328125}, {"start": 616.98, "end": 617.16, "word": " than", "probability": 0.93017578125}, {"start": 617.16, "end": 617.36, "word": " 0", "probability": 0.982421875}, {"start": 617.36, "end": 617.8, "word": ".5,", "probability": 0.998779296875}, {"start": 618.24, "end": 618.94, "word": " then", "probability": 0.84912109375}, {"start": 618.94, "end": 619.16, "word": " your", "probability": 0.82763671875}, {"start": 619.16, "end": 619.36, "word": " z", "probability": 0.974609375}, {"start": 619.36, "end": 619.56, "word": "-score", "probability": 0.9248046875}, {"start": 619.56, "end": 619.78, "word": " should", "probability": 0.97412109375}, {"start": 619.78, "end": 619.9, "word": " be", "probability": 0.94873046875}, {"start": 619.9, "end": 620.24, "word": " negative,", "probability": 0.94287109375}, {"start": 620.34, "end": 620.6, "word": " otherwise", "probability": 0.86962890625}, {"start": 620.6, "end": 620.98, "word": " should", "probability": 0.63232421875}, {"start": 620.98, "end": 621.1, "word": " be", "probability": 0.947265625}, {"start": 621.1, "end": 621.44, "word": " positive.", "probability": 0.91943359375}, {"start": 621.74, "end": 621.92, "word": " So", "probability": 0.9619140625}, {"start": 621.92, "end": 622.06, "word": " you", "probability": 0.861328125}, {"start": 622.06, "end": 622.18, "word": " have", "probability": 0.9443359375}, {"start": 622.18, "end": 622.26, "word": " to", "probability": 0.97021484375}, {"start": 622.26, "end": 622.4, "word": " be", "probability": 0.95166015625}, {"start": 622.4, "end": 622.7, "word": " careful", "probability": 0.96337890625}, {"start": 622.7, "end": 623.02, "word": " in", "probability": 0.94287109375}, {"start": 623.02, "end": 623.3, "word": " this", "probability": 0.94482421875}, {"start": 623.3, "end": 623.72, "word": " case.", "probability": 0.9140625}, {"start": 625.7, "end": 626.22, "word": " Now", "probability": 0.9482421875}, {"start": 626.22, "end": 626.4, "word": " look", "probability": 0.7841796875}, {"start": 626.4, "end": 626.52, "word": " at", "probability": 0.96728515625}, {"start": 626.52, "end": 626.88, "word": " this", "probability": 0.9443359375}, {"start": 626.88, "end": 628.3, "word": " example.", "probability": 0.962890625}, {"start": 629.5, "end": 629.98, "word": " Let", "probability": 0.95361328125}, {"start": 629.98, "end": 630.24, "word": " x", "probability": 0.859375}, {"start": 630.24, "end": 630.76, "word": " represent", "probability": 0.75341796875}, {"start": 630.76, "end": 631.0, "word": " the", "probability": 0.91650390625}, {"start": 631.0, "end": 631.24, "word": " time", "probability": 0.896484375}, {"start": 631.24, "end": 631.44, "word": " it", "probability": 0.943359375}, {"start": 631.44, "end": 631.74, "word": " takes", "probability": 0.7666015625}, {"start": 631.74, "end": 632.38, "word": " in", "probability": 0.7958984375}, {"start": 632.38, "end": 632.86, "word": " seconds.", "probability": 0.810546875}], "temperature": 1.0}, {"id": 24, "seek": 65841, "start": 633.71, "end": 658.41, "text": " to download an image file from the internet. The same example as we did before. And here we assume that x is normal distribution with mean of 8 and standard deviation of 5. Now, let's see how can we find the value of x such that 20% of download times are smaller than x.", "tokens": [281, 5484, 364, 3256, 3991, 490, 264, 4705, 13, 440, 912, 1365, 382, 321, 630, 949, 13, 400, 510, 321, 6552, 300, 2031, 307, 2710, 7316, 365, 914, 295, 1649, 293, 3832, 25163, 295, 1025, 13, 823, 11, 718, 311, 536, 577, 393, 321, 915, 264, 2158, 295, 2031, 1270, 300, 945, 4, 295, 5484, 1413, 366, 4356, 813, 2031, 13], "avg_logprob": -0.1678427397724121, "compression_ratio": 1.433862433862434, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 633.71, "end": 633.97, "word": " to", "probability": 0.39404296875}, {"start": 633.97, "end": 634.45, "word": " download", "probability": 0.96142578125}, {"start": 634.45, "end": 634.67, "word": " an", "probability": 0.8779296875}, {"start": 634.67, "end": 635.05, "word": " image", "probability": 0.9228515625}, {"start": 635.05, "end": 635.77, "word": " file", "probability": 0.525390625}, {"start": 635.77, "end": 636.05, "word": " from", "probability": 0.767578125}, {"start": 636.05, "end": 636.21, "word": " the", "probability": 0.9189453125}, {"start": 636.21, "end": 636.55, "word": " internet.", "probability": 0.62841796875}, {"start": 637.17, "end": 637.41, "word": " The", "probability": 0.7998046875}, {"start": 637.41, "end": 637.59, "word": " same", "probability": 0.904296875}, {"start": 637.59, "end": 637.95, "word": " example", "probability": 0.97021484375}, {"start": 637.95, "end": 638.15, "word": " as", "probability": 0.849609375}, {"start": 638.15, "end": 638.39, "word": " we", "probability": 0.92724609375}, {"start": 638.39, "end": 639.73, "word": " did", "probability": 0.94873046875}, {"start": 639.73, "end": 640.11, "word": " before.", "probability": 0.841796875}, {"start": 640.75, "end": 641.35, "word": " And", "probability": 0.93994140625}, {"start": 641.35, "end": 641.57, "word": " here", "probability": 0.84375}, {"start": 641.57, "end": 641.75, "word": " we", "probability": 0.64697265625}, {"start": 641.75, "end": 642.15, "word": " assume", "probability": 0.892578125}, {"start": 642.15, "end": 642.57, "word": " that", "probability": 0.93408203125}, {"start": 642.57, "end": 643.03, "word": " x", "probability": 0.5224609375}, {"start": 643.03, "end": 643.27, "word": " is", "probability": 0.93994140625}, {"start": 643.27, "end": 643.59, "word": " normal", "probability": 0.7578125}, {"start": 643.59, "end": 644.23, "word": " distribution", "probability": 0.8681640625}, {"start": 644.23, "end": 644.63, "word": " with", "probability": 0.833984375}, {"start": 644.63, "end": 644.79, "word": " mean", "probability": 0.95703125}, {"start": 644.79, "end": 644.99, "word": " of", "probability": 0.951171875}, {"start": 644.99, "end": 645.31, "word": " 8", "probability": 0.6845703125}, {"start": 645.31, "end": 645.63, "word": " and", "probability": 0.8642578125}, {"start": 645.63, "end": 645.99, "word": " standard", "probability": 0.7900390625}, {"start": 645.99, "end": 646.33, "word": " deviation", "probability": 0.95361328125}, {"start": 646.33, "end": 646.55, "word": " of", "probability": 0.9697265625}, {"start": 646.55, "end": 646.83, "word": " 5.", "probability": 0.939453125}, {"start": 648.67, "end": 649.05, "word": " Now,", "probability": 0.92236328125}, {"start": 649.95, "end": 650.43, "word": " let's", "probability": 0.95263671875}, {"start": 650.43, "end": 650.53, "word": " see", "probability": 0.91064453125}, {"start": 650.53, "end": 650.63, "word": " how", "probability": 0.90380859375}, {"start": 650.63, "end": 650.81, "word": " can", "probability": 0.76416015625}, {"start": 650.81, "end": 650.91, "word": " we", "probability": 0.9140625}, {"start": 650.91, "end": 651.13, "word": " find", "probability": 0.88720703125}, {"start": 651.13, "end": 651.27, "word": " the", "probability": 0.8759765625}, {"start": 651.27, "end": 651.51, "word": " value", "probability": 0.97265625}, {"start": 651.51, "end": 651.71, "word": " of", "probability": 0.94384765625}, {"start": 651.71, "end": 651.95, "word": " x", "probability": 0.9736328125}, {"start": 651.95, "end": 653.95, "word": " such", "probability": 0.619140625}, {"start": 653.95, "end": 654.25, "word": " that", "probability": 0.9384765625}, {"start": 654.25, "end": 654.57, "word": " 20", "probability": 0.9296875}, {"start": 654.57, "end": 654.83, "word": "%", "probability": 0.8623046875}, {"start": 654.83, "end": 655.93, "word": " of", "probability": 0.94287109375}, {"start": 655.93, "end": 656.37, "word": " download", "probability": 0.88232421875}, {"start": 656.37, "end": 656.81, "word": " times", "probability": 0.94921875}, {"start": 656.81, "end": 657.19, "word": " are", "probability": 0.93798828125}, {"start": 657.19, "end": 657.73, "word": " smaller", "probability": 0.8701171875}, {"start": 657.73, "end": 658.05, "word": " than", "probability": 0.9326171875}, {"start": 658.05, "end": 658.41, "word": " x.", "probability": 0.93310546875}], "temperature": 1.0}, {"id": 25, "seek": 68714, "start": 661.06, "end": 687.14, "text": " So, this probability is a fraction. Also, always the probability is between 0 and 1. So, the probability here is 20%. In this case, your z-score should be negative. Because 20% is more than 0.5. So, z-score should be in this side, in the left side. So, again, he asks about finding x-value such that 20%.", "tokens": [407, 11, 341, 8482, 307, 257, 14135, 13, 2743, 11, 1009, 264, 8482, 307, 1296, 1958, 293, 502, 13, 407, 11, 264, 8482, 510, 307, 945, 6856, 682, 341, 1389, 11, 428, 710, 12, 4417, 418, 820, 312, 3671, 13, 1436, 945, 4, 307, 544, 813, 1958, 13, 20, 13, 407, 11, 710, 12, 4417, 418, 820, 312, 294, 341, 1252, 11, 294, 264, 1411, 1252, 13, 407, 11, 797, 11, 415, 8962, 466, 5006, 2031, 12, 29155, 1270, 300, 945, 6856], "avg_logprob": -0.2658132645021002, "compression_ratio": 1.622340425531915, "no_speech_prob": 0.0, "words": [{"start": 661.06, "end": 661.34, "word": " So,", "probability": 0.497802734375}, {"start": 661.56, "end": 661.56, "word": " this", "probability": 0.80908203125}, {"start": 661.56, "end": 661.94, "word": " probability", "probability": 0.919921875}, {"start": 661.94, "end": 662.24, "word": " is", "probability": 0.919921875}, {"start": 662.24, "end": 662.36, "word": " a", "probability": 0.87109375}, {"start": 662.36, "end": 662.74, "word": " fraction.", "probability": 0.84912109375}, {"start": 663.5, "end": 663.94, "word": " Also,", "probability": 0.47802734375}, {"start": 664.28, "end": 664.58, "word": " always", "probability": 0.75439453125}, {"start": 664.58, "end": 664.74, "word": " the", "probability": 0.65234375}, {"start": 664.74, "end": 664.98, "word": " probability", "probability": 0.91162109375}, {"start": 664.98, "end": 665.16, "word": " is", "probability": 0.880859375}, {"start": 665.16, "end": 665.4, "word": " between", "probability": 0.87548828125}, {"start": 665.4, "end": 665.64, "word": " 0", "probability": 0.77734375}, {"start": 665.64, "end": 665.78, "word": " and", "probability": 0.92138671875}, {"start": 665.78, "end": 666.06, "word": " 1.", "probability": 0.98876953125}, {"start": 666.7, "end": 666.96, "word": " So,", "probability": 0.8525390625}, {"start": 667.54, "end": 667.84, "word": " the", "probability": 0.8505859375}, {"start": 667.84, "end": 668.18, "word": " probability", "probability": 0.93798828125}, {"start": 668.18, "end": 668.68, "word": " here", "probability": 0.8134765625}, {"start": 668.68, "end": 668.86, "word": " is", "probability": 0.9462890625}, {"start": 668.86, "end": 669.56, "word": " 20%.", "probability": 0.8828125}, {"start": 669.56, "end": 670.86, "word": " In", "probability": 0.93017578125}, {"start": 670.86, "end": 671.04, "word": " this", "probability": 0.9462890625}, {"start": 671.04, "end": 671.4, "word": " case,", "probability": 0.912109375}, {"start": 671.5, "end": 671.66, "word": " your", "probability": 0.83740234375}, {"start": 671.66, "end": 671.82, "word": " z", "probability": 0.372802734375}, {"start": 671.82, "end": 672.08, "word": "-score", "probability": 0.8478190104166666}, {"start": 672.08, "end": 672.28, "word": " should", "probability": 0.96630859375}, {"start": 672.28, "end": 672.4, "word": " be", "probability": 0.95263671875}, {"start": 672.4, "end": 672.74, "word": " negative.", "probability": 0.912109375}, {"start": 674.0, "end": 674.52, "word": " Because", "probability": 0.912109375}, {"start": 674.52, "end": 674.82, "word": " 20", "probability": 0.87890625}, {"start": 674.82, "end": 675.14, "word": "%", "probability": 0.97802734375}, {"start": 675.14, "end": 675.38, "word": " is", "probability": 0.93212890625}, {"start": 675.38, "end": 675.56, "word": " more", "probability": 0.54541015625}, {"start": 675.56, "end": 675.76, "word": " than", "probability": 0.939453125}, {"start": 675.76, "end": 675.96, "word": " 0", "probability": 0.58740234375}, {"start": 675.96, "end": 676.34, "word": ".5.", "probability": 0.98828125}, {"start": 676.44, "end": 676.58, "word": " So,", "probability": 0.94482421875}, {"start": 676.66, "end": 676.78, "word": " z", "probability": 0.55224609375}, {"start": 676.78, "end": 677.0, "word": "-score", "probability": 0.919921875}, {"start": 677.0, "end": 677.2, "word": " should", "probability": 0.96826171875}, {"start": 677.2, "end": 677.32, "word": " be", "probability": 0.94677734375}, {"start": 677.32, "end": 677.44, "word": " in", "probability": 0.60546875}, {"start": 677.44, "end": 677.66, "word": " this", "probability": 0.9345703125}, {"start": 677.66, "end": 677.98, "word": " side,", "probability": 0.84375}, {"start": 678.46, "end": 678.66, "word": " in", "probability": 0.7109375}, {"start": 678.66, "end": 678.8, "word": " the", "probability": 0.9130859375}, {"start": 678.8, "end": 679.0, "word": " left", "probability": 0.9462890625}, {"start": 679.0, "end": 679.32, "word": " side.", "probability": 0.837890625}, {"start": 682.34, "end": 682.68, "word": " So,", "probability": 0.88427734375}, {"start": 682.8, "end": 683.22, "word": " again,", "probability": 0.58447265625}, {"start": 683.3, "end": 683.36, "word": " he", "probability": 0.505859375}, {"start": 683.36, "end": 683.56, "word": " asks", "probability": 0.615234375}, {"start": 683.56, "end": 683.88, "word": " about", "probability": 0.80224609375}, {"start": 683.88, "end": 684.38, "word": " finding", "probability": 0.370361328125}, {"start": 684.38, "end": 684.68, "word": " x", "probability": 0.76123046875}, {"start": 684.68, "end": 685.08, "word": "-value", "probability": 0.79150390625}, {"start": 685.08, "end": 686.12, "word": " such", "probability": 0.75390625}, {"start": 686.12, "end": 686.38, "word": " that", "probability": 0.93896484375}, {"start": 686.38, "end": 687.14, "word": " 20%.", "probability": 0.6085205078125}], "temperature": 1.0}, {"id": 26, "seek": 71110, "start": 691.74, "end": 711.1, "text": " So here again we are looking for this value, for the value of x, which is smaller than the area to the left of this x, equals 0.2. Now, the first step, we have to find the z-score.", "tokens": [407, 510, 797, 321, 366, 1237, 337, 341, 2158, 11, 337, 264, 2158, 295, 2031, 11, 597, 307, 4356, 813, 264, 1859, 281, 264, 1411, 295, 341, 2031, 11, 6915, 1958, 13, 17, 13, 823, 11, 264, 700, 1823, 11, 321, 362, 281, 915, 264, 710, 12, 4417, 418, 13], "avg_logprob": -0.321997556031919, "compression_ratio": 1.3923076923076922, "no_speech_prob": 0.0, "words": [{"start": 691.74, "end": 692.22, "word": " So", "probability": 0.0261077880859375}, {"start": 692.22, "end": 692.7, "word": " here", "probability": 0.34814453125}, {"start": 692.7, "end": 692.94, "word": " again", "probability": 0.7666015625}, {"start": 692.94, "end": 693.1, "word": " we", "probability": 0.6552734375}, {"start": 693.1, "end": 693.22, "word": " are", "probability": 0.84521484375}, {"start": 693.22, "end": 693.46, "word": " looking", "probability": 0.8876953125}, {"start": 693.46, "end": 693.72, "word": " for", "probability": 0.951171875}, {"start": 693.72, "end": 693.96, "word": " this", "probability": 0.83984375}, {"start": 693.96, "end": 694.32, "word": " value,", "probability": 0.962890625}, {"start": 694.96, "end": 695.4, "word": " for", "probability": 0.7666015625}, {"start": 695.4, "end": 695.5, "word": " the", "probability": 0.9091796875}, {"start": 695.5, "end": 695.66, "word": " value", "probability": 0.9755859375}, {"start": 695.66, "end": 695.84, "word": " of", "probability": 0.68115234375}, {"start": 695.84, "end": 695.98, "word": " x,", "probability": 0.673828125}, {"start": 696.96, "end": 698.6, "word": " which", "probability": 0.9443359375}, {"start": 698.6, "end": 698.8, "word": " is", "probability": 0.95849609375}, {"start": 698.8, "end": 699.22, "word": " smaller", "probability": 0.8583984375}, {"start": 699.22, "end": 699.64, "word": " than", "probability": 0.92919921875}, {"start": 699.64, "end": 700.24, "word": " the", "probability": 0.8896484375}, {"start": 700.24, "end": 700.56, "word": " area", "probability": 0.8994140625}, {"start": 700.56, "end": 700.76, "word": " to", "probability": 0.9091796875}, {"start": 700.76, "end": 700.86, "word": " the", "probability": 0.79931640625}, {"start": 700.86, "end": 701.02, "word": " left", "probability": 0.9501953125}, {"start": 701.02, "end": 701.16, "word": " of", "probability": 0.9658203125}, {"start": 701.16, "end": 701.36, "word": " this", "probability": 0.896484375}, {"start": 701.36, "end": 701.74, "word": " x,", "probability": 0.9375}, {"start": 702.54, "end": 703.58, "word": " equals", "probability": 0.564453125}, {"start": 703.58, "end": 703.98, "word": " 0", "probability": 0.54736328125}, {"start": 703.98, "end": 705.68, "word": ".2.", "probability": 0.98193359375}, {"start": 707.48, "end": 707.96, "word": " Now,", "probability": 0.93359375}, {"start": 708.08, "end": 708.18, "word": " the", "probability": 0.87255859375}, {"start": 708.18, "end": 708.42, "word": " first", "probability": 0.88232421875}, {"start": 708.42, "end": 708.6, "word": " step,", "probability": 0.70263671875}, {"start": 708.68, "end": 708.76, "word": " we", "probability": 0.9443359375}, {"start": 708.76, "end": 708.92, "word": " have", "probability": 0.93994140625}, {"start": 708.92, "end": 709.06, "word": " to", "probability": 0.96923828125}, {"start": 709.06, "end": 709.38, "word": " find", "probability": 0.89404296875}, {"start": 709.38, "end": 710.52, "word": " the", "probability": 0.908203125}, {"start": 710.52, "end": 710.74, "word": " z", "probability": 0.87841796875}, {"start": 710.74, "end": 711.1, "word": "-score.", "probability": 0.8474934895833334}], "temperature": 1.0}, {"id": 27, "seek": 74207, "start": 712.65, "end": 742.07, "text": " It's backward, z-score first, then x. Find a z-score corresponding to the probability of 0.2. The approximate one, the near value, I mean, to the 0.2 is 0.2005. Sometimes you have the exact value from the table you have, but most of the time you don't have it. So you have to look at the approximate value, which is very close to the one you have.", "tokens": [467, 311, 23897, 11, 710, 12, 4417, 418, 700, 11, 550, 2031, 13, 11809, 257, 710, 12, 4417, 418, 11760, 281, 264, 8482, 295, 1958, 13, 17, 13, 440, 30874, 472, 11, 264, 2651, 2158, 11, 286, 914, 11, 281, 264, 1958, 13, 17, 307, 1958, 13, 7629, 20, 13, 4803, 291, 362, 264, 1900, 2158, 490, 264, 3199, 291, 362, 11, 457, 881, 295, 264, 565, 291, 500, 380, 362, 309, 13, 407, 291, 362, 281, 574, 412, 264, 30874, 2158, 11, 597, 307, 588, 1998, 281, 264, 472, 291, 362, 13], "avg_logprob": -0.18766622134345642, "compression_ratio": 1.6415094339622642, "no_speech_prob": 0.0, "words": [{"start": 712.65, "end": 712.97, "word": " It's", "probability": 0.600341796875}, {"start": 712.97, "end": 713.39, "word": " backward,", "probability": 0.6982421875}, {"start": 713.73, "end": 713.87, "word": " z", "probability": 0.5810546875}, {"start": 713.87, "end": 714.09, "word": "-score", "probability": 0.7224934895833334}, {"start": 714.09, "end": 714.41, "word": " first,", "probability": 0.2025146484375}, {"start": 714.61, "end": 714.63, "word": " then", "probability": 0.86279296875}, {"start": 714.63, "end": 714.93, "word": " x.", "probability": 0.92724609375}, {"start": 715.69, "end": 716.11, "word": " Find", "probability": 0.8603515625}, {"start": 716.11, "end": 716.25, "word": " a", "probability": 0.583984375}, {"start": 716.25, "end": 716.43, "word": " z", "probability": 0.9892578125}, {"start": 716.43, "end": 716.77, "word": "-score", "probability": 0.919921875}, {"start": 716.77, "end": 717.53, "word": " corresponding", "probability": 0.7880859375}, {"start": 717.53, "end": 718.07, "word": " to", "probability": 0.9677734375}, {"start": 718.07, "end": 719.33, "word": " the", "probability": 0.916015625}, {"start": 719.33, "end": 719.71, "word": " probability", "probability": 0.95458984375}, {"start": 719.71, "end": 719.99, "word": " of", "probability": 0.9208984375}, {"start": 719.99, "end": 720.15, "word": " 0", "probability": 0.6708984375}, {"start": 720.15, "end": 720.45, "word": ".2.", "probability": 0.957763671875}, {"start": 722.51, "end": 722.99, "word": " The", "probability": 0.8740234375}, {"start": 722.99, "end": 724.75, "word": " approximate", "probability": 0.86474609375}, {"start": 724.75, "end": 725.37, "word": " one,", "probability": 0.90771484375}, {"start": 726.19, "end": 726.31, "word": " the", "probability": 0.89697265625}, {"start": 726.31, "end": 726.53, "word": " near", "probability": 0.85986328125}, {"start": 726.53, "end": 726.97, "word": " value,", "probability": 0.95849609375}, {"start": 727.05, "end": 727.21, "word": " I", "probability": 0.90966796875}, {"start": 727.21, "end": 727.39, "word": " mean,", "probability": 0.96142578125}, {"start": 727.57, "end": 727.71, "word": " to", "probability": 0.9150390625}, {"start": 727.71, "end": 727.91, "word": " the", "probability": 0.890625}, {"start": 727.91, "end": 728.29, "word": " 0", "probability": 0.794921875}, {"start": 728.29, "end": 728.75, "word": ".2", "probability": 0.9970703125}, {"start": 728.75, "end": 729.09, "word": " is", "probability": 0.6689453125}, {"start": 729.09, "end": 729.27, "word": " 0", "probability": 0.97705078125}, {"start": 729.27, "end": 730.19, "word": ".2005.", "probability": 0.9054361979166666}, {"start": 730.51, "end": 730.91, "word": " Sometimes", "probability": 0.92431640625}, {"start": 730.91, "end": 731.35, "word": " you", "probability": 0.814453125}, {"start": 731.35, "end": 731.57, "word": " have", "probability": 0.935546875}, {"start": 731.57, "end": 731.73, "word": " the", "probability": 0.90625}, {"start": 731.73, "end": 732.19, "word": " exact", "probability": 0.9423828125}, {"start": 732.19, "end": 733.79, "word": " value", "probability": 0.96728515625}, {"start": 733.79, "end": 734.05, "word": " from", "probability": 0.87548828125}, {"start": 734.05, "end": 734.21, "word": " the", "probability": 0.921875}, {"start": 734.21, "end": 734.45, "word": " table", "probability": 0.8984375}, {"start": 734.45, "end": 734.61, "word": " you", "probability": 0.93017578125}, {"start": 734.61, "end": 734.93, "word": " have,", "probability": 0.94677734375}, {"start": 735.81, "end": 736.07, "word": " but", "probability": 0.91748046875}, {"start": 736.07, "end": 736.37, "word": " most", "probability": 0.890625}, {"start": 736.37, "end": 736.49, "word": " of", "probability": 0.9658203125}, {"start": 736.49, "end": 736.57, "word": " the", "probability": 0.9169921875}, {"start": 736.57, "end": 736.77, "word": " time", "probability": 0.8857421875}, {"start": 736.77, "end": 736.89, "word": " you", "probability": 0.7001953125}, {"start": 736.89, "end": 737.03, "word": " don't", "probability": 0.9765625}, {"start": 737.03, "end": 737.27, "word": " have", "probability": 0.91943359375}, {"start": 737.27, "end": 737.43, "word": " it.", "probability": 0.6025390625}, {"start": 737.77, "end": 737.99, "word": " So", "probability": 0.9560546875}, {"start": 737.99, "end": 738.11, "word": " you", "probability": 0.6025390625}, {"start": 738.11, "end": 738.25, "word": " have", "probability": 0.94482421875}, {"start": 738.25, "end": 738.37, "word": " to", "probability": 0.9609375}, {"start": 738.37, "end": 738.53, "word": " look", "probability": 0.9638671875}, {"start": 738.53, "end": 738.69, "word": " at", "probability": 0.9619140625}, {"start": 738.69, "end": 739.05, "word": " the", "probability": 0.9072265625}, {"start": 739.05, "end": 739.99, "word": " approximate", "probability": 0.84814453125}, {"start": 739.99, "end": 740.43, "word": " value,", "probability": 0.92724609375}, {"start": 740.51, "end": 740.59, "word": " which", "probability": 0.93603515625}, {"start": 740.59, "end": 740.69, "word": " is", "probability": 0.9443359375}, {"start": 740.69, "end": 740.93, "word": " very", "probability": 0.8525390625}, {"start": 740.93, "end": 741.39, "word": " close", "probability": 0.888671875}, {"start": 741.39, "end": 741.55, "word": " to", "probability": 0.96240234375}, {"start": 741.55, "end": 741.67, "word": " the", "probability": 0.9111328125}, {"start": 741.67, "end": 741.79, "word": " one", "probability": 0.91650390625}, {"start": 741.79, "end": 741.93, "word": " you", "probability": 0.9404296875}, {"start": 741.93, "end": 742.07, "word": " have.", "probability": 0.93212890625}], "temperature": 1.0}, {"id": 28, "seek": 77112, "start": 742.6, "end": 771.12, "text": " So here, we are looking for 0.2. The closest value to 0.2 is 0.2005. Now, the corresponding value to this probability is minus 0.8 all the way up to 4. So your z-score is negative 0.84. So this is the first step. Any question? Again.", "tokens": [407, 510, 11, 321, 366, 1237, 337, 1958, 13, 17, 13, 440, 13699, 2158, 281, 1958, 13, 17, 307, 1958, 13, 7629, 20, 13, 823, 11, 264, 11760, 2158, 281, 341, 8482, 307, 3175, 1958, 13, 23, 439, 264, 636, 493, 281, 1017, 13, 407, 428, 710, 12, 4417, 418, 307, 3671, 1958, 13, 25494, 13, 407, 341, 307, 264, 700, 1823, 13, 2639, 1168, 30, 3764, 13], "avg_logprob": -0.1692708350610042, "compression_ratio": 1.3928571428571428, "no_speech_prob": 0.0, "words": [{"start": 742.6, "end": 742.86, "word": " So", "probability": 0.6748046875}, {"start": 742.86, "end": 743.08, "word": " here,", "probability": 0.65869140625}, {"start": 743.16, "end": 743.3, "word": " we", "probability": 0.95263671875}, {"start": 743.3, "end": 743.46, "word": " are", "probability": 0.9130859375}, {"start": 743.46, "end": 743.7, "word": " looking", "probability": 0.919921875}, {"start": 743.7, "end": 743.98, "word": " for", "probability": 0.9541015625}, {"start": 743.98, "end": 744.32, "word": " 0", "probability": 0.55908203125}, {"start": 744.32, "end": 744.68, "word": ".2.", "probability": 0.979248046875}, {"start": 745.38, "end": 745.84, "word": " The", "probability": 0.8779296875}, {"start": 745.84, "end": 746.22, "word": " closest", "probability": 0.91943359375}, {"start": 746.22, "end": 746.7, "word": " value", "probability": 0.95068359375}, {"start": 746.7, "end": 746.92, "word": " to", "probability": 0.6279296875}, {"start": 746.92, "end": 747.16, "word": " 0", "probability": 0.81396484375}, {"start": 747.16, "end": 747.28, "word": ".2", "probability": 0.98486328125}, {"start": 747.28, "end": 747.5, "word": " is", "probability": 0.92138671875}, {"start": 747.5, "end": 747.76, "word": " 0", "probability": 0.953125}, {"start": 747.76, "end": 748.74, "word": ".2005.", "probability": 0.7913411458333334}, {"start": 749.54, "end": 750.22, "word": " Now,", "probability": 0.95166015625}, {"start": 750.36, "end": 750.66, "word": " the", "probability": 0.91845703125}, {"start": 750.66, "end": 751.16, "word": " corresponding", "probability": 0.83935546875}, {"start": 751.16, "end": 751.46, "word": " value", "probability": 0.96826171875}, {"start": 751.46, "end": 751.7, "word": " to", "probability": 0.921875}, {"start": 751.7, "end": 752.16, "word": " this", "probability": 0.93359375}, {"start": 752.16, "end": 752.88, "word": " probability", "probability": 0.9609375}, {"start": 752.88, "end": 753.98, "word": " is", "probability": 0.9208984375}, {"start": 753.98, "end": 754.34, "word": " minus", "probability": 0.486572265625}, {"start": 754.34, "end": 754.72, "word": " 0", "probability": 0.88623046875}, {"start": 754.72, "end": 755.26, "word": ".8", "probability": 0.9892578125}, {"start": 755.26, "end": 755.94, "word": " all", "probability": 0.7705078125}, {"start": 755.94, "end": 756.12, "word": " the", "probability": 0.919921875}, {"start": 756.12, "end": 756.32, "word": " way", "probability": 0.95263671875}, {"start": 756.32, "end": 756.66, "word": " up", "probability": 0.96044921875}, {"start": 756.66, "end": 756.96, "word": " to", "probability": 0.96728515625}, {"start": 756.96, "end": 757.68, "word": " 4.", "probability": 0.71337890625}, {"start": 758.36, "end": 758.84, "word": " So", "probability": 0.9599609375}, {"start": 758.84, "end": 759.42, "word": " your", "probability": 0.7001953125}, {"start": 759.42, "end": 759.62, "word": " z", "probability": 0.43896484375}, {"start": 759.62, "end": 759.92, "word": "-score", "probability": 0.8284505208333334}, {"start": 759.92, "end": 760.12, "word": " is", "probability": 0.93896484375}, {"start": 760.12, "end": 760.54, "word": " negative", "probability": 0.875}, {"start": 760.54, "end": 761.72, "word": " 0", "probability": 0.98095703125}, {"start": 761.72, "end": 762.36, "word": ".84.", "probability": 0.93994140625}, {"start": 763.28, "end": 763.86, "word": " So", "probability": 0.951171875}, {"start": 763.86, "end": 764.04, "word": " this", "probability": 0.90673828125}, {"start": 764.04, "end": 764.16, "word": " is", "probability": 0.93603515625}, {"start": 764.16, "end": 764.26, "word": " the", "probability": 0.91748046875}, {"start": 764.26, "end": 764.5, "word": " first", "probability": 0.88330078125}, {"start": 764.5, "end": 764.84, "word": " step.", "probability": 0.9052734375}, {"start": 767.12, "end": 767.84, "word": " Any", "probability": 0.8974609375}, {"start": 767.84, "end": 768.2, "word": " question?", "probability": 0.64306640625}, {"start": 770.4, "end": 771.12, "word": " Again.", "probability": 0.9501953125}], "temperature": 1.0}, {"id": 29, "seek": 79751, "start": 773.95, "end": 797.51, "text": " Now if we just go back to this equation, z equals x minus mu over sigma. A cross multiplication, I mean if you multiply both sides by sigma, you will get sigma times z equals x minus mu.", "tokens": [823, 498, 321, 445, 352, 646, 281, 341, 5367, 11, 710, 6915, 2031, 3175, 2992, 670, 12771, 13, 316, 3278, 27290, 11, 286, 914, 498, 291, 12972, 1293, 4881, 538, 12771, 11, 291, 486, 483, 12771, 1413, 710, 6915, 2031, 3175, 2992, 13], "avg_logprob": -0.19708806750449268, "compression_ratio": 1.4724409448818898, "no_speech_prob": 0.0, "words": [{"start": 773.95, "end": 774.23, "word": " Now", "probability": 0.69970703125}, {"start": 774.23, "end": 774.43, "word": " if", "probability": 0.5537109375}, {"start": 774.43, "end": 774.57, "word": " we", "probability": 0.77294921875}, {"start": 774.57, "end": 775.29, "word": " just", "probability": 0.83349609375}, {"start": 775.29, "end": 775.63, "word": " go", "probability": 0.953125}, {"start": 775.63, "end": 775.91, "word": " back", "probability": 0.8779296875}, {"start": 775.91, "end": 776.37, "word": " to", "probability": 0.95654296875}, {"start": 776.37, "end": 776.57, "word": " this", "probability": 0.9375}, {"start": 776.57, "end": 777.05, "word": " equation,", "probability": 0.9736328125}, {"start": 779.93, "end": 780.33, "word": " z", "probability": 0.58935546875}, {"start": 780.33, "end": 780.63, "word": " equals", "probability": 0.595703125}, {"start": 780.63, "end": 780.83, "word": " x", "probability": 0.92919921875}, {"start": 780.83, "end": 781.13, "word": " minus", "probability": 0.974609375}, {"start": 781.13, "end": 781.31, "word": " mu", "probability": 0.84765625}, {"start": 781.31, "end": 781.51, "word": " over", "probability": 0.814453125}, {"start": 781.51, "end": 781.83, "word": " sigma.", "probability": 0.9130859375}, {"start": 782.93, "end": 783.47, "word": " A", "probability": 0.360107421875}, {"start": 783.47, "end": 783.67, "word": " cross", "probability": 0.9267578125}, {"start": 783.67, "end": 784.17, "word": " multiplication,", "probability": 0.80126953125}, {"start": 785.51, "end": 785.67, "word": " I", "probability": 0.91943359375}, {"start": 785.67, "end": 785.79, "word": " mean", "probability": 0.97216796875}, {"start": 785.79, "end": 785.95, "word": " if", "probability": 0.79443359375}, {"start": 785.95, "end": 786.11, "word": " you", "probability": 0.9599609375}, {"start": 786.11, "end": 786.53, "word": " multiply", "probability": 0.8974609375}, {"start": 786.53, "end": 787.37, "word": " both", "probability": 0.8984375}, {"start": 787.37, "end": 787.81, "word": " sides", "probability": 0.888671875}, {"start": 787.81, "end": 788.07, "word": " by", "probability": 0.96484375}, {"start": 788.07, "end": 788.51, "word": " sigma,", "probability": 0.92822265625}, {"start": 789.79, "end": 790.01, "word": " you", "probability": 0.9521484375}, {"start": 790.01, "end": 790.25, "word": " will", "probability": 0.8818359375}, {"start": 790.25, "end": 790.87, "word": " get", "probability": 0.9443359375}, {"start": 790.87, "end": 793.29, "word": " sigma", "probability": 0.88818359375}, {"start": 793.29, "end": 793.95, "word": " times", "probability": 0.9453125}, {"start": 793.95, "end": 794.25, "word": " z", "probability": 0.95703125}, {"start": 794.25, "end": 795.69, "word": " equals", "probability": 0.8125}, {"start": 795.69, "end": 796.11, "word": " x", "probability": 0.9228515625}, {"start": 796.11, "end": 797.21, "word": " minus", "probability": 0.9794921875}, {"start": 797.21, "end": 797.51, "word": " mu.", "probability": 0.9580078125}], "temperature": 1.0}, {"id": 30, "seek": 82656, "start": 812.12, "end": 826.56, "text": " Now, in this question, he asks about, find the value of x such that 20% of download times are less than x.", "tokens": [823, 11, 294, 341, 1168, 11, 415, 8962, 466, 11, 915, 264, 2158, 295, 2031, 1270, 300, 945, 4, 295, 5484, 1413, 366, 1570, 813, 2031, 13], "avg_logprob": -0.33677456208637785, "compression_ratio": 1.1505376344086022, "no_speech_prob": 0.0, "words": [{"start": 812.12, "end": 812.76, "word": " Now,", "probability": 0.10162353515625}, {"start": 814.54, "end": 814.64, "word": " in", "probability": 0.689453125}, {"start": 814.64, "end": 814.9, "word": " this", "probability": 0.89501953125}, {"start": 814.9, "end": 815.5, "word": " question,", "probability": 0.83642578125}, {"start": 817.96, "end": 818.08, "word": " he", "probability": 0.77392578125}, {"start": 818.08, "end": 818.28, "word": " asks", "probability": 0.54736328125}, {"start": 818.28, "end": 818.72, "word": " about,", "probability": 0.8857421875}, {"start": 819.78, "end": 820.1, "word": " find", "probability": 0.51123046875}, {"start": 820.1, "end": 820.24, "word": " the", "probability": 0.650390625}, {"start": 820.24, "end": 820.4, "word": " value", "probability": 0.96875}, {"start": 820.4, "end": 820.54, "word": " of", "probability": 0.93115234375}, {"start": 820.54, "end": 820.82, "word": " x", "probability": 0.48779296875}, {"start": 820.82, "end": 822.26, "word": " such", "probability": 0.8662109375}, {"start": 822.26, "end": 822.52, "word": " that", "probability": 0.94482421875}, {"start": 822.52, "end": 822.8, "word": " 20", "probability": 0.85888671875}, {"start": 822.8, "end": 823.16, "word": "%", "probability": 0.8505859375}, {"start": 823.16, "end": 823.66, "word": " of", "probability": 0.962890625}, {"start": 823.66, "end": 824.18, "word": " download", "probability": 0.67626953125}, {"start": 824.18, "end": 825.22, "word": " times", "probability": 0.92431640625}, {"start": 825.22, "end": 825.54, "word": " are", "probability": 0.8515625}, {"start": 825.54, "end": 826.0, "word": " less", "probability": 0.87158203125}, {"start": 826.0, "end": 826.22, "word": " than", "probability": 0.9345703125}, {"start": 826.22, "end": 826.56, "word": " x.", "probability": 0.94189453125}], "temperature": 1.0}, {"id": 31, "seek": 85918, "start": 830.74, "end": 859.18, "text": " Now the probability is less than 0.5, so your z-score should be on the left side. So here we need to find the value of z first. Go back to the normal table you have. This is the normal table. We are looking for minus 0.2.", "tokens": [823, 264, 8482, 307, 1570, 813, 1958, 13, 20, 11, 370, 428, 710, 12, 4417, 418, 820, 312, 322, 264, 1411, 1252, 13, 407, 510, 321, 643, 281, 915, 264, 2158, 295, 710, 700, 13, 1037, 646, 281, 264, 2710, 3199, 291, 362, 13, 639, 307, 264, 2710, 3199, 13, 492, 366, 1237, 337, 3175, 1958, 13, 17, 13], "avg_logprob": -0.16549479762713115, "compression_ratio": 1.4050632911392404, "no_speech_prob": 0.0, "words": [{"start": 830.74, "end": 831.0, "word": " Now", "probability": 0.5830078125}, {"start": 831.0, "end": 831.16, "word": " the", "probability": 0.63623046875}, {"start": 831.16, "end": 831.46, "word": " probability", "probability": 0.91015625}, {"start": 831.46, "end": 831.8, "word": " is", "probability": 0.91162109375}, {"start": 831.8, "end": 832.0, "word": " less", "probability": 0.880859375}, {"start": 832.0, "end": 832.16, "word": " than", "probability": 0.92822265625}, {"start": 832.16, "end": 832.36, "word": " 0", "probability": 0.65478515625}, {"start": 832.36, "end": 832.78, "word": ".5,", "probability": 0.9873046875}, {"start": 832.98, "end": 833.3, "word": " so", "probability": 0.87353515625}, {"start": 833.3, "end": 833.9, "word": " your", "probability": 0.81884765625}, {"start": 833.9, "end": 834.08, "word": " z", "probability": 0.6748046875}, {"start": 834.08, "end": 834.4, "word": "-score", "probability": 0.7716471354166666}, {"start": 834.4, "end": 834.68, "word": " should", "probability": 0.96435546875}, {"start": 834.68, "end": 834.94, "word": " be", "probability": 0.94775390625}, {"start": 834.94, "end": 835.22, "word": " on", "probability": 0.5009765625}, {"start": 835.22, "end": 835.34, "word": " the", "probability": 0.91259765625}, {"start": 835.34, "end": 835.52, "word": " left", "probability": 0.94384765625}, {"start": 835.52, "end": 835.96, "word": " side.", "probability": 0.77734375}, {"start": 836.54, "end": 836.7, "word": " So", "probability": 0.88916015625}, {"start": 836.7, "end": 836.98, "word": " here", "probability": 0.76904296875}, {"start": 836.98, "end": 837.48, "word": " we", "probability": 0.59326171875}, {"start": 837.48, "end": 837.78, "word": " need", "probability": 0.91455078125}, {"start": 837.78, "end": 838.36, "word": " to", "probability": 0.95068359375}, {"start": 838.36, "end": 838.88, "word": " find", "probability": 0.89599609375}, {"start": 838.88, "end": 839.08, "word": " the", "probability": 0.9072265625}, {"start": 839.08, "end": 839.5, "word": " value", "probability": 0.9765625}, {"start": 839.5, "end": 839.78, "word": " of", "probability": 0.96533203125}, {"start": 839.78, "end": 839.98, "word": " z", "probability": 0.90869140625}, {"start": 839.98, "end": 840.4, "word": " first.", "probability": 0.8076171875}, {"start": 842.3, "end": 842.68, "word": " Go", "probability": 0.9306640625}, {"start": 842.68, "end": 843.02, "word": " back", "probability": 0.876953125}, {"start": 843.02, "end": 843.5, "word": " to", "probability": 0.9521484375}, {"start": 843.5, "end": 843.66, "word": " the", "probability": 0.90771484375}, {"start": 843.66, "end": 843.92, "word": " normal", "probability": 0.88623046875}, {"start": 843.92, "end": 844.22, "word": " table", "probability": 0.865234375}, {"start": 844.22, "end": 844.42, "word": " you", "probability": 0.79443359375}, {"start": 844.42, "end": 844.64, "word": " have.", "probability": 0.8369140625}, {"start": 847.68, "end": 848.02, "word": " This", "probability": 0.845703125}, {"start": 848.02, "end": 848.12, "word": " is", "probability": 0.95166015625}, {"start": 848.12, "end": 848.24, "word": " the", "probability": 0.837890625}, {"start": 848.24, "end": 848.48, "word": " normal", "probability": 0.88525390625}, {"start": 848.48, "end": 848.86, "word": " table.", "probability": 0.89453125}, {"start": 856.8, "end": 857.36, "word": " We", "probability": 0.93994140625}, {"start": 857.36, "end": 857.5, "word": " are", "probability": 0.88427734375}, {"start": 857.5, "end": 857.78, "word": " looking", "probability": 0.91357421875}, {"start": 857.78, "end": 858.22, "word": " for", "probability": 0.95166015625}, {"start": 858.22, "end": 858.62, "word": " minus", "probability": 0.61376953125}, {"start": 858.62, "end": 858.88, "word": " 0", "probability": 0.94189453125}, {"start": 858.88, "end": 859.18, "word": ".2.", "probability": 0.992431640625}], "temperature": 1.0}, {"id": 32, "seek": 87475, "start": 860.27, "end": 874.75, "text": " I'm sorry, we are looking for 0.2. So the closest value to 0.2 is this one, 0.2005. So this is the closest value.", "tokens": [286, 478, 2597, 11, 321, 366, 1237, 337, 1958, 13, 17, 13, 407, 264, 13699, 2158, 281, 1958, 13, 17, 307, 341, 472, 11, 1958, 13, 7629, 20, 13, 407, 341, 307, 264, 13699, 2158, 13], "avg_logprob": -0.17694257401131294, "compression_ratio": 1.2527472527472527, "no_speech_prob": 7.152557373046875e-07, "words": [{"start": 860.27, "end": 860.83, "word": " I'm", "probability": 0.6453857421875}, {"start": 860.83, "end": 861.03, "word": " sorry,", "probability": 0.88037109375}, {"start": 861.11, "end": 861.15, "word": " we", "probability": 0.9404296875}, {"start": 861.15, "end": 861.25, "word": " are", "probability": 0.7265625}, {"start": 861.25, "end": 861.47, "word": " looking", "probability": 0.91650390625}, {"start": 861.47, "end": 861.91, "word": " for", "probability": 0.9521484375}, {"start": 861.91, "end": 862.81, "word": " 0", "probability": 0.331298828125}, {"start": 862.81, "end": 863.21, "word": ".2.", "probability": 0.96484375}, {"start": 865.23, "end": 865.91, "word": " So", "probability": 0.91650390625}, {"start": 865.91, "end": 866.11, "word": " the", "probability": 0.6240234375}, {"start": 866.11, "end": 866.45, "word": " closest", "probability": 0.92333984375}, {"start": 866.45, "end": 866.95, "word": " value", "probability": 0.97265625}, {"start": 866.95, "end": 868.13, "word": " to", "probability": 0.91015625}, {"start": 868.13, "end": 868.45, "word": " 0", "probability": 0.90673828125}, {"start": 868.45, "end": 868.69, "word": ".2", "probability": 0.993896484375}, {"start": 868.69, "end": 868.91, "word": " is", "probability": 0.92529296875}, {"start": 868.91, "end": 869.11, "word": " this", "probability": 0.908203125}, {"start": 869.11, "end": 869.41, "word": " one,", "probability": 0.8994140625}, {"start": 870.55, "end": 870.83, "word": " 0", "probability": 0.9716796875}, {"start": 870.83, "end": 871.97, "word": ".2005.", "probability": 0.9046223958333334}, {"start": 872.91, "end": 873.37, "word": " So", "probability": 0.94677734375}, {"start": 873.37, "end": 873.55, "word": " this", "probability": 0.76611328125}, {"start": 873.55, "end": 873.65, "word": " is", "probability": 0.9384765625}, {"start": 873.65, "end": 873.85, "word": " the", "probability": 0.89892578125}, {"start": 873.85, "end": 874.29, "word": " closest", "probability": 0.94677734375}, {"start": 874.29, "end": 874.75, "word": " value.", "probability": 0.9755859375}], "temperature": 1.0}, {"id": 33, "seek": 91755, "start": 889.63, "end": 917.55, "text": " So the exact answer is sometimes not given. So the approximate one, minus 0.8, all the way up to 4. So z-score minus 0.8. Any question? So the value of z-score is minus 0.84. So my corresponding x-value equals", "tokens": [407, 264, 1900, 1867, 307, 2171, 406, 2212, 13, 407, 264, 30874, 472, 11, 3175, 1958, 13, 23, 11, 439, 264, 636, 493, 281, 1017, 13, 407, 710, 12, 4417, 418, 3175, 1958, 13, 23, 13, 2639, 1168, 30, 407, 264, 2158, 295, 710, 12, 4417, 418, 307, 3175, 1958, 13, 25494, 13, 407, 452, 11760, 2031, 12, 29155, 6915], "avg_logprob": -0.21273052594700798, "compression_ratio": 1.4685314685314685, "no_speech_prob": 0.0, "words": [{"start": 889.63, "end": 890.11, "word": " So", "probability": 0.62890625}, {"start": 890.11, "end": 890.61, "word": " the", "probability": 0.6240234375}, {"start": 890.61, "end": 891.05, "word": " exact", "probability": 0.94677734375}, {"start": 891.05, "end": 891.71, "word": " answer", "probability": 0.94921875}, {"start": 891.71, "end": 892.15, "word": " is", "probability": 0.344970703125}, {"start": 892.15, "end": 892.61, "word": " sometimes", "probability": 0.93505859375}, {"start": 892.61, "end": 892.97, "word": " not", "probability": 0.93798828125}, {"start": 892.97, "end": 893.29, "word": " given.", "probability": 0.875}, {"start": 894.05, "end": 894.31, "word": " So", "probability": 0.93896484375}, {"start": 894.31, "end": 894.47, "word": " the", "probability": 0.849609375}, {"start": 894.47, "end": 894.85, "word": " approximate", "probability": 0.857421875}, {"start": 894.85, "end": 895.23, "word": " one,", "probability": 0.93115234375}, {"start": 895.47, "end": 895.73, "word": " minus", "probability": 0.76708984375}, {"start": 895.73, "end": 896.01, "word": " 0", "probability": 0.60693359375}, {"start": 896.01, "end": 896.43, "word": ".8,", "probability": 0.99267578125}, {"start": 896.59, "end": 896.99, "word": " all", "probability": 0.9453125}, {"start": 896.99, "end": 897.15, "word": " the", "probability": 0.91357421875}, {"start": 897.15, "end": 897.31, "word": " way", "probability": 0.95703125}, {"start": 897.31, "end": 897.55, "word": " up", "probability": 0.95947265625}, {"start": 897.55, "end": 898.25, "word": " to", "probability": 0.9677734375}, {"start": 898.25, "end": 899.19, "word": " 4.", "probability": 0.587890625}, {"start": 900.03, "end": 900.51, "word": " So", "probability": 0.93408203125}, {"start": 900.51, "end": 900.81, "word": " z", "probability": 0.382568359375}, {"start": 900.81, "end": 901.19, "word": "-score", "probability": 0.76318359375}, {"start": 901.19, "end": 903.59, "word": " minus", "probability": 0.3193359375}, {"start": 903.59, "end": 903.95, "word": " 0", "probability": 0.978515625}, {"start": 903.95, "end": 904.27, "word": ".8.", "probability": 0.978759765625}, {"start": 905.53, "end": 906.03, "word": " Any", "probability": 0.892578125}, {"start": 906.03, "end": 906.41, "word": " question?", "probability": 0.6435546875}, {"start": 910.33, "end": 911.09, "word": " So", "probability": 0.84912109375}, {"start": 911.09, "end": 911.75, "word": " the", "probability": 0.8349609375}, {"start": 911.75, "end": 911.99, "word": " value", "probability": 0.9677734375}, {"start": 911.99, "end": 912.11, "word": " of", "probability": 0.962890625}, {"start": 912.11, "end": 912.27, "word": " z", "probability": 0.751953125}, {"start": 912.27, "end": 912.53, "word": "-score", "probability": 0.9235026041666666}, {"start": 912.53, "end": 912.75, "word": " is", "probability": 0.94091796875}, {"start": 912.75, "end": 913.11, "word": " minus", "probability": 0.97314453125}, {"start": 913.11, "end": 913.37, "word": " 0", "probability": 0.99609375}, {"start": 913.37, "end": 913.71, "word": ".84.", "probability": 0.994384765625}, {"start": 914.65, "end": 914.93, "word": " So", "probability": 0.96240234375}, {"start": 914.93, "end": 915.33, "word": " my", "probability": 0.9609375}, {"start": 915.33, "end": 916.23, "word": " corresponding", "probability": 0.67822265625}, {"start": 916.23, "end": 916.73, "word": " x", "probability": 0.95361328125}, {"start": 916.73, "end": 917.03, "word": "-value", "probability": 0.709228515625}, {"start": 917.03, "end": 917.55, "word": " equals", "probability": 0.93359375}], "temperature": 1.0}, {"id": 34, "seek": 94535, "start": 919.15, "end": 945.35, "text": " X equal mu. The mu is given as A plus Z is minus 0.84 times sigma. Sigma is 5. You will end with 3.8. So this means the probability of X less than 3.8.", "tokens": [1783, 2681, 2992, 13, 440, 2992, 307, 2212, 382, 316, 1804, 1176, 307, 3175, 1958, 13, 25494, 1413, 12771, 13, 36595, 307, 1025, 13, 509, 486, 917, 365, 805, 13, 23, 13, 407, 341, 1355, 264, 8482, 295, 1783, 1570, 813, 805, 13, 23, 13], "avg_logprob": -0.26528534034024115, "compression_ratio": 1.2459016393442623, "no_speech_prob": 0.0, "words": [{"start": 919.15, "end": 919.81, "word": " X", "probability": 0.3984375}, {"start": 919.81, "end": 920.13, "word": " equal", "probability": 0.393798828125}, {"start": 920.13, "end": 920.45, "word": " mu.", "probability": 0.457763671875}, {"start": 920.95, "end": 921.17, "word": " The", "probability": 0.57177734375}, {"start": 921.17, "end": 921.29, "word": " mu", "probability": 0.90576171875}, {"start": 921.29, "end": 921.43, "word": " is", "probability": 0.65576171875}, {"start": 921.43, "end": 921.71, "word": " given", "probability": 0.88330078125}, {"start": 921.71, "end": 922.81, "word": " as", "probability": 0.6923828125}, {"start": 922.81, "end": 923.09, "word": " A", "probability": 0.7060546875}, {"start": 923.09, "end": 924.25, "word": " plus", "probability": 0.69482421875}, {"start": 924.25, "end": 926.87, "word": " Z", "probability": 0.42724609375}, {"start": 926.87, "end": 927.17, "word": " is", "probability": 0.892578125}, {"start": 927.17, "end": 927.61, "word": " minus", "probability": 0.95556640625}, {"start": 927.61, "end": 927.95, "word": " 0", "probability": 0.77734375}, {"start": 927.95, "end": 929.13, "word": ".84", "probability": 0.992431640625}, {"start": 929.13, "end": 930.49, "word": " times", "probability": 0.580078125}, {"start": 930.49, "end": 930.95, "word": " sigma.", "probability": 0.8447265625}, {"start": 931.33, "end": 931.77, "word": " Sigma", "probability": 0.8681640625}, {"start": 931.77, "end": 932.07, "word": " is", "probability": 0.95068359375}, {"start": 932.07, "end": 932.35, "word": " 5.", "probability": 0.66064453125}, {"start": 934.31, "end": 934.65, "word": " You", "probability": 0.76123046875}, {"start": 934.65, "end": 934.81, "word": " will", "probability": 0.86669921875}, {"start": 934.81, "end": 935.19, "word": " end", "probability": 0.8916015625}, {"start": 935.19, "end": 935.97, "word": " with", "probability": 0.80615234375}, {"start": 935.97, "end": 936.27, "word": " 3", "probability": 0.892578125}, {"start": 936.27, "end": 936.63, "word": ".8.", "probability": 0.9482421875}, {"start": 938.65, "end": 939.37, "word": " So", "probability": 0.95556640625}, {"start": 939.37, "end": 939.63, "word": " this", "probability": 0.85009765625}, {"start": 939.63, "end": 940.03, "word": " means", "probability": 0.9326171875}, {"start": 940.03, "end": 942.15, "word": " the", "probability": 0.76904296875}, {"start": 942.15, "end": 942.51, "word": " probability", "probability": 0.82177734375}, {"start": 942.51, "end": 942.79, "word": " of", "probability": 0.95654296875}, {"start": 942.79, "end": 943.15, "word": " X", "probability": 0.91162109375}, {"start": 943.15, "end": 944.29, "word": " less", "probability": 0.931640625}, {"start": 944.29, "end": 944.53, "word": " than", "probability": 0.94140625}, {"start": 944.53, "end": 944.81, "word": " 3", "probability": 0.98828125}, {"start": 944.81, "end": 945.35, "word": ".8.", "probability": 0.998779296875}], "temperature": 1.0}, {"id": 35, "seek": 97525, "start": 947.01, "end": 975.25, "text": " Equal point. Exactly, equal point. So in this case, the probability is given, which is 0.20. And we ask about what's the value of x in this case. So the first step was find the z-score. Then use this value. I mean, plug this value in.", "tokens": [15624, 304, 935, 13, 7587, 11, 2681, 935, 13, 407, 294, 341, 1389, 11, 264, 8482, 307, 2212, 11, 597, 307, 1958, 13, 2009, 13, 400, 321, 1029, 466, 437, 311, 264, 2158, 295, 2031, 294, 341, 1389, 13, 407, 264, 700, 1823, 390, 915, 264, 710, 12, 4417, 418, 13, 1396, 764, 341, 2158, 13, 286, 914, 11, 5452, 341, 2158, 294, 13], "avg_logprob": -0.26634615384615384, "compression_ratio": 1.46875, "no_speech_prob": 0.0, "words": [{"start": 947.01, "end": 947.29, "word": " Equal", "probability": 0.530792236328125}, {"start": 947.29, "end": 947.55, "word": " point.", "probability": 0.428955078125}, {"start": 947.81, "end": 948.35, "word": " Exactly,", "probability": 0.66650390625}, {"start": 948.47, "end": 948.67, "word": " equal", "probability": 0.92333984375}, {"start": 948.67, "end": 948.95, "word": " point.", "probability": 0.9677734375}, {"start": 952.43, "end": 952.99, "word": " So", "probability": 0.9267578125}, {"start": 952.99, "end": 953.13, "word": " in", "probability": 0.69482421875}, {"start": 953.13, "end": 953.29, "word": " this", "probability": 0.94677734375}, {"start": 953.29, "end": 953.53, "word": " case,", "probability": 0.9130859375}, {"start": 953.59, "end": 953.69, "word": " the", "probability": 0.91650390625}, {"start": 953.69, "end": 954.01, "word": " probability", "probability": 0.95703125}, {"start": 954.01, "end": 954.27, "word": " is", "probability": 0.921875}, {"start": 954.27, "end": 954.51, "word": " given,", "probability": 0.9013671875}, {"start": 955.67, "end": 957.23, "word": " which", "probability": 0.9501953125}, {"start": 957.23, "end": 957.37, "word": " is", "probability": 0.94580078125}, {"start": 957.37, "end": 957.55, "word": " 0", "probability": 0.58203125}, {"start": 957.55, "end": 957.91, "word": ".20.", "probability": 0.992919921875}, {"start": 958.87, "end": 959.43, "word": " And", "probability": 0.9501953125}, {"start": 959.43, "end": 959.55, "word": " we", "probability": 0.328125}, {"start": 959.55, "end": 959.75, "word": " ask", "probability": 0.86865234375}, {"start": 959.75, "end": 960.09, "word": " about", "probability": 0.9013671875}, {"start": 960.09, "end": 960.49, "word": " what's", "probability": 0.8505859375}, {"start": 960.49, "end": 960.59, "word": " the", "probability": 0.9208984375}, {"start": 960.59, "end": 960.79, "word": " value", "probability": 0.978515625}, {"start": 960.79, "end": 960.95, "word": " of", "probability": 0.96044921875}, {"start": 960.95, "end": 961.27, "word": " x", "probability": 0.66748046875}, {"start": 961.27, "end": 961.71, "word": " in", "probability": 0.728515625}, {"start": 961.71, "end": 961.91, "word": " this", "probability": 0.94775390625}, {"start": 961.91, "end": 962.15, "word": " case.", "probability": 0.89013671875}, {"start": 963.17, "end": 963.73, "word": " So", "probability": 0.96044921875}, {"start": 963.73, "end": 964.05, "word": " the", "probability": 0.8662109375}, {"start": 964.05, "end": 964.31, "word": " first", "probability": 0.86962890625}, {"start": 964.31, "end": 964.51, "word": " step", "probability": 0.921875}, {"start": 964.51, "end": 965.51, "word": " was", "probability": 0.9443359375}, {"start": 965.51, "end": 967.17, "word": " find", "probability": 0.224609375}, {"start": 967.17, "end": 967.37, "word": " the", "probability": 0.395751953125}, {"start": 967.37, "end": 967.55, "word": " z", "probability": 0.41015625}, {"start": 967.55, "end": 967.83, "word": "-score.", "probability": 0.84912109375}, {"start": 969.21, "end": 969.77, "word": " Then", "probability": 0.853515625}, {"start": 969.77, "end": 970.15, "word": " use", "probability": 0.7490234375}, {"start": 970.15, "end": 970.49, "word": " this", "probability": 0.94921875}, {"start": 970.49, "end": 970.99, "word": " value.", "probability": 0.97265625}, {"start": 972.97, "end": 973.53, "word": " I", "probability": 0.99169921875}, {"start": 973.53, "end": 973.69, "word": " mean,", "probability": 0.95654296875}, {"start": 973.69, "end": 973.95, "word": " plug", "probability": 0.73095703125}, {"start": 973.95, "end": 974.23, "word": " this", "probability": 0.9404296875}, {"start": 974.23, "end": 974.73, "word": " value", "probability": 0.97119140625}, {"start": 974.73, "end": 975.25, "word": " in.", "probability": 0.5615234375}], "temperature": 1.0}, {"id": 36, "seek": 99682, "start": 977.38, "end": 996.82, "text": " to find the corresponding X score. That's the backward normal calculations. Let's do one problem from the practice, which is number 18.", "tokens": [281, 915, 264, 11760, 1783, 6175, 13, 663, 311, 264, 23897, 2710, 20448, 13, 961, 311, 360, 472, 1154, 490, 264, 3124, 11, 597, 307, 1230, 2443, 13], "avg_logprob": -0.23208512519967966, "compression_ratio": 1.1929824561403508, "no_speech_prob": 0.0, "words": [{"start": 977.38, "end": 977.62, "word": " to", "probability": 0.476806640625}, {"start": 977.62, "end": 977.88, "word": " find", "probability": 0.890625}, {"start": 977.88, "end": 978.06, "word": " the", "probability": 0.640625}, {"start": 978.06, "end": 978.52, "word": " corresponding", "probability": 0.666015625}, {"start": 978.52, "end": 978.96, "word": " X", "probability": 0.5888671875}, {"start": 978.96, "end": 979.38, "word": " score.", "probability": 0.51318359375}, {"start": 981.5, "end": 982.22, "word": " That's", "probability": 0.76220703125}, {"start": 982.22, "end": 982.52, "word": " the", "probability": 0.86279296875}, {"start": 982.52, "end": 983.1, "word": " backward", "probability": 0.91064453125}, {"start": 983.1, "end": 984.08, "word": " normal", "probability": 0.7548828125}, {"start": 984.08, "end": 985.24, "word": " calculations.", "probability": 0.71533203125}, {"start": 988.82, "end": 989.82, "word": " Let's", "probability": 0.968505859375}, {"start": 989.82, "end": 990.08, "word": " do", "probability": 0.90625}, {"start": 990.08, "end": 990.98, "word": " one", "probability": 0.92626953125}, {"start": 990.98, "end": 991.36, "word": " problem", "probability": 0.89013671875}, {"start": 991.36, "end": 991.68, "word": " from", "probability": 0.88671875}, {"start": 991.68, "end": 991.84, "word": " the", "probability": 0.8359375}, {"start": 991.84, "end": 992.38, "word": " practice,", "probability": 0.93310546875}, {"start": 994.18, "end": 994.76, "word": " which", "probability": 0.951171875}, {"start": 994.76, "end": 994.92, "word": " is", "probability": 0.947265625}, {"start": 994.92, "end": 995.18, "word": " number", "probability": 0.86767578125}, {"start": 995.18, "end": 996.82, "word": " 18.", "probability": 0.9443359375}], "temperature": 1.0}, {"id": 37, "seek": 103471, "start": 1013.39, "end": 1034.71, "text": " Is it clear? The owners of a fish market determined that the average weight for a catfish", "tokens": [1119, 309, 1850, 30, 440, 7710, 295, 257, 3506, 2142, 9540, 300, 264, 4274, 3364, 337, 257, 3857, 11608], "avg_logprob": -0.21464843973517417, "compression_ratio": 1.125, "no_speech_prob": 0.0, "words": [{"start": 1013.3900000000001, "end": 1014.69, "word": " Is", "probability": 0.6357421875}, {"start": 1014.69, "end": 1015.99, "word": " it", "probability": 0.8623046875}, {"start": 1015.99, "end": 1016.31, "word": " clear?", "probability": 0.8837890625}, {"start": 1020.59, "end": 1021.89, "word": " The", "probability": 0.75634765625}, {"start": 1021.89, "end": 1022.51, "word": " owners", "probability": 0.1806640625}, {"start": 1022.51, "end": 1025.03, "word": " of", "probability": 0.95458984375}, {"start": 1025.03, "end": 1025.25, "word": " a", "probability": 0.98486328125}, {"start": 1025.25, "end": 1025.45, "word": " fish", "probability": 0.92529296875}, {"start": 1025.45, "end": 1026.13, "word": " market", "probability": 0.8974609375}, {"start": 1026.13, "end": 1030.31, "word": " determined", "probability": 0.91259765625}, {"start": 1030.31, "end": 1031.77, "word": " that", "probability": 0.923828125}, {"start": 1031.77, "end": 1032.77, "word": " the", "probability": 0.8671875}, {"start": 1032.77, "end": 1033.17, "word": " average", "probability": 0.76318359375}, {"start": 1033.17, "end": 1033.63, "word": " weight", "probability": 0.919921875}, {"start": 1033.63, "end": 1033.97, "word": " for", "probability": 0.70654296875}, {"start": 1033.97, "end": 1034.13, "word": " a", "probability": 0.97998046875}, {"start": 1034.13, "end": 1034.71, "word": " catfish", "probability": 0.945068359375}], "temperature": 1.0}, {"id": 38, "seek": 105915, "start": 1035.67, "end": 1059.15, "text": " is 3.2 So this is the value of the mean 40", "tokens": [307, 805, 13, 17, 407, 341, 307, 264, 2158, 295, 264, 914, 3356], "avg_logprob": -0.3722098299435207, "compression_ratio": 0.9555555555555556, "no_speech_prob": 0.0, "words": [{"start": 1035.67, "end": 1036.13, "word": " is", "probability": 0.468017578125}, {"start": 1036.13, "end": 1036.57, "word": " 3", "probability": 0.76611328125}, {"start": 1036.57, "end": 1037.07, "word": ".2", "probability": 0.940673828125}, {"start": 1037.07, "end": 1040.57, "word": " So", "probability": 0.162109375}, {"start": 1040.57, "end": 1042.47, "word": " this", "probability": 0.80615234375}, {"start": 1042.47, "end": 1042.57, "word": " is", "probability": 0.9365234375}, {"start": 1042.57, "end": 1047.23, "word": " the", "probability": 0.8798828125}, {"start": 1047.23, "end": 1047.25, "word": " value", "probability": 0.974609375}, {"start": 1047.25, "end": 1047.27, "word": " of", "probability": 0.9658203125}, {"start": 1047.27, "end": 1047.99, "word": " the", "probability": 0.7744140625}, {"start": 1047.99, "end": 1047.99, "word": " mean", "probability": 0.89697265625}, {"start": 1047.99, "end": 1059.15, "word": " 40", "probability": 0.3701171875}], "temperature": 1.0}, {"id": 39, "seek": 109163, "start": 1068.31, "end": 1091.63, "text": " So again, the owner of a fish market determined that the average weight for a catfish is 3.2 pounds. So the mean is 3.2 with a standard deviation of 0.8.", "tokens": [407, 797, 11, 264, 7289, 295, 257, 3506, 2142, 9540, 300, 264, 4274, 3364, 337, 257, 3857, 11608, 307, 805, 13, 17, 8319, 13, 407, 264, 914, 307, 805, 13, 17, 365, 257, 3832, 25163, 295, 1958, 13, 23, 13], "avg_logprob": -0.1697789605070905, "compression_ratio": 1.305084745762712, "no_speech_prob": 0.0, "words": [{"start": 1068.3099999999997, "end": 1069.1899999999998, "word": " So", "probability": 0.471923828125}, {"start": 1069.1899999999998, "end": 1070.07, "word": " again,", "probability": 0.8369140625}, {"start": 1071.31, "end": 1073.31, "word": " the", "probability": 0.82861328125}, {"start": 1073.31, "end": 1073.71, "word": " owner", "probability": 0.5107421875}, {"start": 1073.71, "end": 1074.03, "word": " of", "probability": 0.97412109375}, {"start": 1074.03, "end": 1074.13, "word": " a", "probability": 0.9462890625}, {"start": 1074.13, "end": 1074.33, "word": " fish", "probability": 0.92919921875}, {"start": 1074.33, "end": 1074.97, "word": " market", "probability": 0.88720703125}, {"start": 1074.97, "end": 1077.01, "word": " determined", "probability": 0.89306640625}, {"start": 1077.01, "end": 1077.59, "word": " that", "probability": 0.908203125}, {"start": 1077.59, "end": 1077.75, "word": " the", "probability": 0.89013671875}, {"start": 1077.75, "end": 1078.07, "word": " average", "probability": 0.7744140625}, {"start": 1078.07, "end": 1078.57, "word": " weight", "probability": 0.91845703125}, {"start": 1078.57, "end": 1080.13, "word": " for", "probability": 0.71484375}, {"start": 1080.13, "end": 1080.33, "word": " a", "probability": 0.90771484375}, {"start": 1080.33, "end": 1080.85, "word": " catfish", "probability": 0.933349609375}, {"start": 1080.85, "end": 1081.17, "word": " is", "probability": 0.94970703125}, {"start": 1081.17, "end": 1081.43, "word": " 3", "probability": 0.951171875}, {"start": 1081.43, "end": 1082.35, "word": ".2", "probability": 0.98388671875}, {"start": 1082.35, "end": 1082.77, "word": " pounds.", "probability": 0.86181640625}, {"start": 1083.37, "end": 1083.57, "word": " So", "probability": 0.8193359375}, {"start": 1083.57, "end": 1083.73, "word": " the", "probability": 0.845703125}, {"start": 1083.73, "end": 1084.01, "word": " mean", "probability": 0.9443359375}, {"start": 1084.01, "end": 1086.35, "word": " is", "probability": 0.8916015625}, {"start": 1086.35, "end": 1086.57, "word": " 3", "probability": 0.9892578125}, {"start": 1086.57, "end": 1087.03, "word": ".2", "probability": 0.9921875}, {"start": 1087.03, "end": 1088.43, "word": " with", "probability": 0.451416015625}, {"start": 1088.43, "end": 1088.73, "word": " a", "probability": 0.95068359375}, {"start": 1088.73, "end": 1088.95, "word": " standard", "probability": 0.9033203125}, {"start": 1088.95, "end": 1089.39, "word": " deviation", "probability": 0.923828125}, {"start": 1089.39, "end": 1089.95, "word": " of", "probability": 0.9716796875}, {"start": 1089.95, "end": 1091.19, "word": " 0", "probability": 0.69091796875}, {"start": 1091.19, "end": 1091.63, "word": ".8.", "probability": 0.993408203125}], "temperature": 1.0}, {"id": 40, "seek": 111706, "start": 1092.1, "end": 1117.06, "text": " So sigma is 0.8. Now, assuming the weights of catfish are normally distributed. In this case, you ask about what's the probability that a randomly selected catfish will weigh more than 4.4. So what's the probability of X", "tokens": [407, 12771, 307, 1958, 13, 23, 13, 823, 11, 11926, 264, 17443, 295, 3857, 11608, 366, 5646, 12631, 13, 682, 341, 1389, 11, 291, 1029, 466, 437, 311, 264, 8482, 300, 257, 16979, 8209, 3857, 11608, 486, 13843, 544, 813, 1017, 13, 19, 13, 407, 437, 311, 264, 8482, 295, 1783], "avg_logprob": -0.21259014480389082, "compression_ratio": 1.4258064516129032, "no_speech_prob": 0.0, "words": [{"start": 1092.1, "end": 1092.64, "word": " So", "probability": 0.239990234375}, {"start": 1092.64, "end": 1092.78, "word": " sigma", "probability": 0.435791015625}, {"start": 1092.78, "end": 1094.92, "word": " is", "probability": 0.89501953125}, {"start": 1094.92, "end": 1095.12, "word": " 0", "probability": 0.467041015625}, {"start": 1095.12, "end": 1095.36, "word": ".8.", "probability": 0.976318359375}, {"start": 1097.1, "end": 1097.68, "word": " Now,", "probability": 0.92626953125}, {"start": 1097.84, "end": 1098.28, "word": " assuming", "probability": 0.89111328125}, {"start": 1098.28, "end": 1099.32, "word": " the", "probability": 0.85400390625}, {"start": 1099.32, "end": 1099.6, "word": " weights", "probability": 0.77490234375}, {"start": 1099.6, "end": 1099.88, "word": " of", "probability": 0.9716796875}, {"start": 1099.88, "end": 1100.5, "word": " catfish", "probability": 0.95166015625}, {"start": 1100.5, "end": 1101.58, "word": " are", "probability": 0.935546875}, {"start": 1101.58, "end": 1102.0, "word": " normally", "probability": 0.90869140625}, {"start": 1102.0, "end": 1102.64, "word": " distributed.", "probability": 0.91552734375}, {"start": 1103.58, "end": 1104.16, "word": " In", "probability": 0.93359375}, {"start": 1104.16, "end": 1104.34, "word": " this", "probability": 0.947265625}, {"start": 1104.34, "end": 1104.58, "word": " case,", "probability": 0.90576171875}, {"start": 1104.66, "end": 1104.7, "word": " you", "probability": 0.338623046875}, {"start": 1104.7, "end": 1104.9, "word": " ask", "probability": 0.91796875}, {"start": 1104.9, "end": 1105.26, "word": " about", "probability": 0.90283203125}, {"start": 1105.26, "end": 1106.7, "word": " what's", "probability": 0.815185546875}, {"start": 1106.7, "end": 1106.84, "word": " the", "probability": 0.88037109375}, {"start": 1106.84, "end": 1107.22, "word": " probability", "probability": 0.98046875}, {"start": 1107.22, "end": 1108.26, "word": " that", "probability": 0.90673828125}, {"start": 1108.26, "end": 1108.6, "word": " a", "probability": 0.890625}, {"start": 1108.6, "end": 1108.98, "word": " randomly", "probability": 0.85302734375}, {"start": 1108.98, "end": 1109.68, "word": " selected", "probability": 0.81787109375}, {"start": 1109.68, "end": 1110.44, "word": " catfish", "probability": 0.966064453125}, {"start": 1110.44, "end": 1111.02, "word": " will", "probability": 0.87353515625}, {"start": 1111.02, "end": 1111.32, "word": " weigh", "probability": 0.845703125}, {"start": 1111.32, "end": 1111.8, "word": " more", "probability": 0.943359375}, {"start": 1111.8, "end": 1112.26, "word": " than", "probability": 0.9501953125}, {"start": 1112.26, "end": 1113.02, "word": " 4", "probability": 0.98486328125}, {"start": 1113.02, "end": 1113.5, "word": ".4.", "probability": 0.99072265625}, {"start": 1113.9, "end": 1114.66, "word": " So", "probability": 0.8916015625}, {"start": 1114.66, "end": 1115.16, "word": " what's", "probability": 0.89306640625}, {"start": 1115.16, "end": 1115.28, "word": " the", "probability": 0.86669921875}, {"start": 1115.28, "end": 1115.62, "word": " probability", "probability": 0.9736328125}, {"start": 1115.62, "end": 1116.68, "word": " of", "probability": 0.9609375}, {"start": 1116.68, "end": 1117.06, "word": " X", "probability": 0.7001953125}], "temperature": 1.0}, {"id": 41, "seek": 114741, "start": 1118.51, "end": 1147.41, "text": " More than. So greater than 4. I just gave the idea to solve this problem. At home, you can compute it to find the exact answer. So first step, find z score. z is 4.4. Divide by z. Just compute this value.", "tokens": [5048, 813, 13, 407, 5044, 813, 1017, 13, 286, 445, 2729, 264, 1558, 281, 5039, 341, 1154, 13, 1711, 1280, 11, 291, 393, 14722, 309, 281, 915, 264, 1900, 1867, 13, 407, 700, 1823, 11, 915, 710, 6175, 13, 710, 307, 1017, 13, 19, 13, 9886, 482, 538, 710, 13, 1449, 14722, 341, 2158, 13], "avg_logprob": -0.20424106690500463, "compression_ratio": 1.3576158940397351, "no_speech_prob": 0.0, "words": [{"start": 1118.51, "end": 1118.87, "word": " More", "probability": 0.261474609375}, {"start": 1118.87, "end": 1119.09, "word": " than.", "probability": 0.923828125}, {"start": 1119.57, "end": 1119.73, "word": " So", "probability": 0.7509765625}, {"start": 1119.73, "end": 1120.07, "word": " greater", "probability": 0.75048828125}, {"start": 1120.07, "end": 1120.77, "word": " than", "probability": 0.92431640625}, {"start": 1120.77, "end": 1121.09, "word": " 4.", "probability": 0.64892578125}, {"start": 1125.83, "end": 1126.39, "word": " I", "probability": 0.81884765625}, {"start": 1126.39, "end": 1126.73, "word": " just", "probability": 0.8798828125}, {"start": 1126.73, "end": 1127.19, "word": " gave", "probability": 0.74853515625}, {"start": 1127.19, "end": 1127.41, "word": " the", "probability": 0.84912109375}, {"start": 1127.41, "end": 1127.79, "word": " idea", "probability": 0.92626953125}, {"start": 1127.79, "end": 1128.01, "word": " to", "probability": 0.82421875}, {"start": 1128.01, "end": 1128.21, "word": " solve", "probability": 0.93359375}, {"start": 1128.21, "end": 1128.39, "word": " this", "probability": 0.80517578125}, {"start": 1128.39, "end": 1128.81, "word": " problem.", "probability": 0.8740234375}, {"start": 1129.09, "end": 1129.29, "word": " At", "probability": 0.77197265625}, {"start": 1129.29, "end": 1129.49, "word": " home,", "probability": 0.8837890625}, {"start": 1129.53, "end": 1129.67, "word": " you", "probability": 0.9619140625}, {"start": 1129.67, "end": 1129.85, "word": " can", "probability": 0.93994140625}, {"start": 1129.85, "end": 1130.13, "word": " compute", "probability": 0.50341796875}, {"start": 1130.13, "end": 1131.13, "word": " it", "probability": 0.73828125}, {"start": 1131.13, "end": 1133.21, "word": " to", "probability": 0.87744140625}, {"start": 1133.21, "end": 1133.43, "word": " find", "probability": 0.896484375}, {"start": 1133.43, "end": 1133.59, "word": " the", "probability": 0.916015625}, {"start": 1133.59, "end": 1133.87, "word": " exact", "probability": 0.927734375}, {"start": 1133.87, "end": 1134.19, "word": " answer.", "probability": 0.951171875}, {"start": 1134.59, "end": 1134.83, "word": " So", "probability": 0.900390625}, {"start": 1134.83, "end": 1135.07, "word": " first", "probability": 0.794921875}, {"start": 1135.07, "end": 1135.33, "word": " step,", "probability": 0.880859375}, {"start": 1135.39, "end": 1135.59, "word": " find", "probability": 0.86181640625}, {"start": 1135.59, "end": 1135.75, "word": " z", "probability": 0.703125}, {"start": 1135.75, "end": 1136.07, "word": " score.", "probability": 0.466064453125}, {"start": 1137.35, "end": 1137.85, "word": " z", "probability": 0.68408203125}, {"start": 1137.85, "end": 1138.15, "word": " is", "probability": 0.94482421875}, {"start": 1138.15, "end": 1138.59, "word": " 4", "probability": 0.99169921875}, {"start": 1138.59, "end": 1139.57, "word": ".4.", "probability": 0.989990234375}, {"start": 1141.73, "end": 1142.29, "word": " Divide", "probability": 0.8466796875}, {"start": 1142.29, "end": 1142.49, "word": " by", "probability": 0.9560546875}, {"start": 1142.49, "end": 1142.59, "word": " z.", "probability": 0.68896484375}, {"start": 1145.91, "end": 1146.47, "word": " Just", "probability": 0.85009765625}, {"start": 1146.47, "end": 1146.81, "word": " compute", "probability": 0.9482421875}, {"start": 1146.81, "end": 1147.07, "word": " this", "probability": 0.951171875}, {"start": 1147.07, "end": 1147.41, "word": " value.", "probability": 0.97509765625}], "temperature": 1.0}, {"id": 42, "seek": 117776, "start": 1148.68, "end": 1177.76, "text": " It's 0.8 divided by 0.8 equals 1. So z-score is 1. So we are looking for the probability of z greater than 1. 1.5. 1.2. 1.5. 1.5. 1.5. So I'm looking for the probability of x of z greater than", "tokens": [467, 311, 1958, 13, 23, 6666, 538, 1958, 13, 23, 6915, 502, 13, 407, 710, 12, 4417, 418, 307, 502, 13, 407, 321, 366, 1237, 337, 264, 8482, 295, 710, 5044, 813, 502, 13, 502, 13, 20, 13, 502, 13, 17, 13, 502, 13, 20, 13, 502, 13, 20, 13, 502, 13, 20, 13, 407, 286, 478, 1237, 337, 264, 8482, 295, 2031, 295, 710, 5044, 813], "avg_logprob": -0.2401194923064288, "compression_ratio": 1.5950413223140496, "no_speech_prob": 0.0, "words": [{"start": 1148.68, "end": 1149.08, "word": " It's", "probability": 0.7470703125}, {"start": 1149.08, "end": 1149.34, "word": " 0", "probability": 0.463623046875}, {"start": 1149.34, "end": 1149.72, "word": ".8", "probability": 0.99267578125}, {"start": 1149.72, "end": 1151.54, "word": " divided", "probability": 0.46875}, {"start": 1151.54, "end": 1151.78, "word": " by", "probability": 0.9677734375}, {"start": 1151.78, "end": 1152.06, "word": " 0", "probability": 0.97412109375}, {"start": 1152.06, "end": 1152.36, "word": ".8", "probability": 0.99853515625}, {"start": 1152.36, "end": 1152.8, "word": " equals", "probability": 0.87890625}, {"start": 1152.8, "end": 1153.1, "word": " 1.", "probability": 0.71826171875}, {"start": 1158.56, "end": 1159.16, "word": " So", "probability": 0.93408203125}, {"start": 1159.16, "end": 1159.4, "word": " z", "probability": 0.390625}, {"start": 1159.4, "end": 1159.62, "word": "-score", "probability": 0.6454264322916666}, {"start": 1159.62, "end": 1159.8, "word": " is", "probability": 0.9501953125}, {"start": 1159.8, "end": 1160.08, "word": " 1.", "probability": 0.82568359375}, {"start": 1161.22, "end": 1161.66, "word": " So", "probability": 0.95556640625}, {"start": 1161.66, "end": 1161.82, "word": " we", "probability": 0.87255859375}, {"start": 1161.82, "end": 1161.94, "word": " are", "probability": 0.9228515625}, {"start": 1161.94, "end": 1162.24, "word": " looking", "probability": 0.92236328125}, {"start": 1162.24, "end": 1162.48, "word": " for", "probability": 0.94775390625}, {"start": 1162.48, "end": 1162.66, "word": " the", "probability": 0.8994140625}, {"start": 1162.66, "end": 1163.0, "word": " probability", "probability": 0.66357421875}, {"start": 1163.0, "end": 1163.24, "word": " of", "probability": 0.90185546875}, {"start": 1163.24, "end": 1163.38, "word": " z", "probability": 0.958984375}, {"start": 1163.38, "end": 1163.8, "word": " greater", "probability": 0.91015625}, {"start": 1163.8, "end": 1164.08, "word": " than", "probability": 0.931640625}, {"start": 1164.08, "end": 1164.7, "word": " 1.", "probability": 0.79638671875}, {"start": 1168.34, "end": 1168.34, "word": " 1", "probability": 0.12213134765625}, {"start": 1168.34, "end": 1168.9, "word": ".5.", "probability": 0.70751953125}, {"start": 1170.02, "end": 1170.62, "word": " 1", "probability": 0.79541015625}, {"start": 1170.62, "end": 1171.16, "word": ".2.", "probability": 0.86669921875}, {"start": 1171.26, "end": 1171.6, "word": " 1", "probability": 0.9287109375}, {"start": 1171.6, "end": 1171.98, "word": ".5.", "probability": 0.930908203125}, {"start": 1172.52, "end": 1172.64, "word": " 1", "probability": 0.8095703125}, {"start": 1172.64, "end": 1173.08, "word": ".5.", "probability": 0.996337890625}, {"start": 1173.18, "end": 1173.48, "word": " 1", "probability": 0.55419921875}, {"start": 1173.48, "end": 1174.02, "word": ".5.", "probability": 0.99853515625}, {"start": 1174.18, "end": 1174.44, "word": " So", "probability": 0.79736328125}, {"start": 1174.44, "end": 1174.58, "word": " I'm", "probability": 0.6217041015625}, {"start": 1174.58, "end": 1174.8, "word": " looking", "probability": 0.91552734375}, {"start": 1174.8, "end": 1175.06, "word": " for", "probability": 0.951171875}, {"start": 1175.06, "end": 1175.22, "word": " the", "probability": 0.89697265625}, {"start": 1175.22, "end": 1175.56, "word": " probability", "probability": 0.95751953125}, {"start": 1175.56, "end": 1175.82, "word": " of", "probability": 0.87939453125}, {"start": 1175.82, "end": 1176.08, "word": " x", "probability": 0.61181640625}, {"start": 1176.08, "end": 1176.32, "word": " of", "probability": 0.9013671875}, {"start": 1176.32, "end": 1176.58, "word": " z", "probability": 0.9912109375}, {"start": 1176.58, "end": 1177.28, "word": " greater", "probability": 0.90234375}, {"start": 1177.28, "end": 1177.76, "word": " than", "probability": 0.94775390625}], "temperature": 1.0}, {"id": 43, "seek": 120726, "start": 1180.98, "end": 1207.26, "text": " 1 minus P of Z less than or equal to 1.5. Now go back to the table. Now 1.5 under 0. It's 0.9332.", "tokens": [502, 3175, 430, 295, 1176, 1570, 813, 420, 2681, 281, 502, 13, 20, 13, 823, 352, 646, 281, 264, 3199, 13, 823, 502, 13, 20, 833, 1958, 13, 467, 311, 1958, 13, 24, 10191, 17, 13], "avg_logprob": -0.2445101448007532, "compression_ratio": 1.0425531914893618, "no_speech_prob": 0.0, "words": [{"start": 1180.98, "end": 1181.66, "word": " 1", "probability": 0.3203125}, {"start": 1181.66, "end": 1182.18, "word": " minus", "probability": 0.367919921875}, {"start": 1182.18, "end": 1188.7, "word": " P", "probability": 0.2435302734375}, {"start": 1188.7, "end": 1188.88, "word": " of", "probability": 0.87890625}, {"start": 1188.88, "end": 1189.2, "word": " Z", "probability": 0.87841796875}, {"start": 1189.2, "end": 1190.2, "word": " less", "probability": 0.76416015625}, {"start": 1190.2, "end": 1190.34, "word": " than", "probability": 0.91259765625}, {"start": 1190.34, "end": 1190.56, "word": " or", "probability": 0.5712890625}, {"start": 1190.56, "end": 1190.62, "word": " equal", "probability": 0.89404296875}, {"start": 1190.62, "end": 1191.42, "word": " to", "probability": 0.9365234375}, {"start": 1191.42, "end": 1191.42, "word": " 1", "probability": 0.59912109375}, {"start": 1191.42, "end": 1191.8, "word": ".5.", "probability": 0.751220703125}, {"start": 1191.8, "end": 1192.04, "word": " Now", "probability": 0.6494140625}, {"start": 1192.04, "end": 1192.24, "word": " go", "probability": 0.689453125}, {"start": 1192.24, "end": 1192.46, "word": " back", "probability": 0.8779296875}, {"start": 1192.46, "end": 1192.58, "word": " to", "probability": 0.96630859375}, {"start": 1192.58, "end": 1192.7, "word": " the", "probability": 0.92578125}, {"start": 1192.7, "end": 1193.04, "word": " table.", "probability": 0.89697265625}, {"start": 1201.54, "end": 1202.22, "word": " Now", "probability": 0.92138671875}, {"start": 1202.22, "end": 1202.44, "word": " 1", "probability": 0.7841796875}, {"start": 1202.44, "end": 1202.98, "word": ".5", "probability": 0.9951171875}, {"start": 1202.98, "end": 1203.28, "word": " under", "probability": 0.90087890625}, {"start": 1203.28, "end": 1203.68, "word": " 0.", "probability": 0.6005859375}, {"start": 1205.0, "end": 1205.68, "word": " It's", "probability": 0.923583984375}, {"start": 1205.68, "end": 1205.9, "word": " 0", "probability": 0.83837890625}, {"start": 1205.9, "end": 1207.26, "word": ".9332.", "probability": 0.960205078125}], "temperature": 1.0}, {"id": 44, "seek": 123755, "start": 1211.41, "end": 1237.55, "text": " So, 1 minus this probability gives 0.668. That's the probability of X greater than 4.4. So, the answer is 0.0668. Now, for the same question.", "tokens": [407, 11, 502, 3175, 341, 8482, 2709, 1958, 13, 15237, 23, 13, 663, 311, 264, 8482, 295, 1783, 5044, 813, 1017, 13, 19, 13, 407, 11, 264, 1867, 307, 1958, 13, 12791, 27102, 13, 823, 11, 337, 264, 912, 1168, 13], "avg_logprob": -0.2157738073950722, "compression_ratio": 1.2456140350877194, "no_speech_prob": 0.0, "words": [{"start": 1211.4099999999999, "end": 1212.09, "word": " So,", "probability": 0.326416015625}, {"start": 1212.25, "end": 1212.29, "word": " 1", "probability": 0.609375}, {"start": 1212.29, "end": 1212.79, "word": " minus", "probability": 0.89990234375}, {"start": 1212.79, "end": 1215.73, "word": " this", "probability": 0.7822265625}, {"start": 1215.73, "end": 1216.23, "word": " probability", "probability": 0.93603515625}, {"start": 1216.23, "end": 1216.81, "word": " gives", "probability": 0.81640625}, {"start": 1216.81, "end": 1218.15, "word": " 0", "probability": 0.94091796875}, {"start": 1218.15, "end": 1219.75, "word": ".668.", "probability": 0.712646484375}, {"start": 1221.21, "end": 1221.89, "word": " That's", "probability": 0.8515625}, {"start": 1221.89, "end": 1222.03, "word": " the", "probability": 0.8759765625}, {"start": 1222.03, "end": 1222.25, "word": " probability", "probability": 0.94189453125}, {"start": 1222.25, "end": 1222.47, "word": " of", "probability": 0.93212890625}, {"start": 1222.47, "end": 1222.65, "word": " X", "probability": 0.736328125}, {"start": 1222.65, "end": 1222.99, "word": " greater", "probability": 0.88720703125}, {"start": 1222.99, "end": 1223.41, "word": " than", "probability": 0.94580078125}, {"start": 1223.41, "end": 1223.75, "word": " 4", "probability": 0.96728515625}, {"start": 1223.75, "end": 1224.35, "word": ".4.", "probability": 0.988037109375}, {"start": 1228.13, "end": 1228.81, "word": " So,", "probability": 0.931640625}, {"start": 1228.87, "end": 1228.93, "word": " the", "probability": 0.91845703125}, {"start": 1228.93, "end": 1229.17, "word": " answer", "probability": 0.9580078125}, {"start": 1229.17, "end": 1229.41, "word": " is", "probability": 0.94140625}, {"start": 1229.41, "end": 1229.59, "word": " 0", "probability": 0.92041015625}, {"start": 1229.59, "end": 1231.59, "word": ".0668.", "probability": 0.8068033854166666}, {"start": 1234.87, "end": 1235.55, "word": " Now,", "probability": 0.916015625}, {"start": 1235.67, "end": 1235.79, "word": " for", "probability": 0.939453125}, {"start": 1235.79, "end": 1235.95, "word": " the", "probability": 0.92236328125}, {"start": 1235.95, "end": 1236.17, "word": " same", "probability": 0.900390625}, {"start": 1236.17, "end": 1237.55, "word": " question.", "probability": 0.65966796875}], "temperature": 1.0}, {"id": 45, "seek": 126832, "start": 1241.32, "end": 1268.32, "text": " What's the probability that a randomly selected fish will weigh between 3 and 5 pounds? 3? Up to 5. So first we have to find the score for 3 out of 5. For it to be just 3 minus 3.2.", "tokens": [708, 311, 264, 8482, 300, 257, 16979, 8209, 3506, 486, 13843, 1296, 805, 293, 1025, 8319, 30, 805, 30, 5858, 281, 1025, 13, 407, 700, 321, 362, 281, 915, 264, 6175, 337, 805, 484, 295, 1025, 13, 1171, 309, 281, 312, 445, 805, 3175, 805, 13, 17, 13], "avg_logprob": -0.22417091350166166, "compression_ratio": 1.2816901408450705, "no_speech_prob": 0.0, "words": [{"start": 1241.32, "end": 1241.78, "word": " What's", "probability": 0.704833984375}, {"start": 1241.78, "end": 1241.92, "word": " the", "probability": 0.78173828125}, {"start": 1241.92, "end": 1242.26, "word": " probability", "probability": 0.9921875}, {"start": 1242.26, "end": 1243.06, "word": " that", "probability": 0.92041015625}, {"start": 1243.06, "end": 1243.42, "word": " a", "probability": 0.90966796875}, {"start": 1243.42, "end": 1243.78, "word": " randomly", "probability": 0.814453125}, {"start": 1243.78, "end": 1244.38, "word": " selected", "probability": 0.86181640625}, {"start": 1244.38, "end": 1244.86, "word": " fish", "probability": 0.904296875}, {"start": 1244.86, "end": 1245.2, "word": " will", "probability": 0.8408203125}, {"start": 1245.2, "end": 1245.46, "word": " weigh", "probability": 0.84326171875}, {"start": 1245.46, "end": 1245.88, "word": " between", "probability": 0.85546875}, {"start": 1245.88, "end": 1246.14, "word": " 3", "probability": 0.59375}, {"start": 1246.14, "end": 1246.3, "word": " and", "probability": 0.8740234375}, {"start": 1246.3, "end": 1246.6, "word": " 5", "probability": 0.986328125}, {"start": 1246.6, "end": 1247.08, "word": " pounds?", "probability": 0.88525390625}, {"start": 1249.66, "end": 1250.4, "word": " 3?", "probability": 0.48388671875}, {"start": 1252.94, "end": 1253.68, "word": " Up", "probability": 0.8759765625}, {"start": 1253.68, "end": 1253.82, "word": " to", "probability": 0.96728515625}, {"start": 1253.82, "end": 1254.12, "word": " 5.", "probability": 0.94970703125}, {"start": 1256.58, "end": 1257.32, "word": " So", "probability": 0.8837890625}, {"start": 1257.32, "end": 1257.64, "word": " first", "probability": 0.69482421875}, {"start": 1257.64, "end": 1257.82, "word": " we", "probability": 0.407958984375}, {"start": 1257.82, "end": 1258.06, "word": " have", "probability": 0.90771484375}, {"start": 1258.06, "end": 1258.16, "word": " to", "probability": 0.96533203125}, {"start": 1258.16, "end": 1258.46, "word": " find", "probability": 0.9130859375}, {"start": 1258.46, "end": 1258.84, "word": " the", "probability": 0.63818359375}, {"start": 1258.84, "end": 1259.26, "word": " score", "probability": 0.787109375}, {"start": 1259.26, "end": 1260.46, "word": " for", "probability": 0.82470703125}, {"start": 1260.46, "end": 1261.04, "word": " 3", "probability": 0.9482421875}, {"start": 1261.04, "end": 1261.76, "word": " out", "probability": 0.6865234375}, {"start": 1261.76, "end": 1261.86, "word": " of", "probability": 0.97265625}, {"start": 1261.86, "end": 1262.18, "word": " 5.", "probability": 0.9951171875}, {"start": 1264.16, "end": 1264.6, "word": " For", "probability": 0.5380859375}, {"start": 1264.6, "end": 1264.76, "word": " it", "probability": 0.521484375}, {"start": 1264.76, "end": 1264.8, "word": " to", "probability": 0.7626953125}, {"start": 1264.8, "end": 1264.88, "word": " be", "probability": 0.943359375}, {"start": 1264.88, "end": 1265.34, "word": " just", "probability": 0.8310546875}, {"start": 1265.34, "end": 1267.06, "word": " 3", "probability": 0.875}, {"start": 1267.06, "end": 1267.44, "word": " minus", "probability": 0.908203125}, {"start": 1267.44, "end": 1267.74, "word": " 3", "probability": 0.97998046875}, {"start": 1267.74, "end": 1268.32, "word": ".2.", "probability": 0.927734375}], "temperature": 1.0}, {"id": 46, "seek": 129668, "start": 1270.72, "end": 1296.68, "text": " Divide by 0.8 is the first z value. Negative 0.2 divided by 0.8 minus 0.25. The other one, 5 minus 3.2 divided by 0.8. 1 minus 0.8 divided by 0.8 equals", "tokens": [9886, 482, 538, 1958, 13, 23, 307, 264, 700, 710, 2158, 13, 43230, 1958, 13, 17, 6666, 538, 1958, 13, 23, 3175, 1958, 13, 6074, 13, 440, 661, 472, 11, 1025, 3175, 805, 13, 17, 6666, 538, 1958, 13, 23, 13, 502, 3175, 1958, 13, 23, 6666, 538, 1958, 13, 23, 6915], "avg_logprob": -0.19103773359982473, "compression_ratio": 1.4711538461538463, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1270.72, "end": 1271.1, "word": " Divide", "probability": 0.6146240234375}, {"start": 1271.1, "end": 1271.26, "word": " by", "probability": 0.9443359375}, {"start": 1271.26, "end": 1271.52, "word": " 0", "probability": 0.66259765625}, {"start": 1271.52, "end": 1271.82, "word": ".8", "probability": 0.98681640625}, {"start": 1271.82, "end": 1272.02, "word": " is", "probability": 0.391357421875}, {"start": 1272.02, "end": 1272.2, "word": " the", "probability": 0.89990234375}, {"start": 1272.2, "end": 1272.5, "word": " first", "probability": 0.85546875}, {"start": 1272.5, "end": 1272.74, "word": " z", "probability": 0.67236328125}, {"start": 1272.74, "end": 1273.1, "word": " value.", "probability": 0.8486328125}, {"start": 1275.14, "end": 1275.78, "word": " Negative", "probability": 0.74853515625}, {"start": 1275.78, "end": 1276.1, "word": " 0", "probability": 0.95947265625}, {"start": 1276.1, "end": 1276.48, "word": ".2", "probability": 0.99462890625}, {"start": 1276.48, "end": 1277.16, "word": " divided", "probability": 0.480224609375}, {"start": 1277.16, "end": 1277.38, "word": " by", "probability": 0.97607421875}, {"start": 1277.38, "end": 1277.66, "word": " 0", "probability": 0.97705078125}, {"start": 1277.66, "end": 1278.08, "word": ".8", "probability": 0.997802734375}, {"start": 1278.08, "end": 1280.0, "word": " minus", "probability": 0.81005859375}, {"start": 1280.0, "end": 1282.08, "word": " 0", "probability": 0.8447265625}, {"start": 1282.08, "end": 1283.1, "word": ".25.", "probability": 0.964599609375}, {"start": 1284.6, "end": 1285.24, "word": " The", "probability": 0.890625}, {"start": 1285.24, "end": 1285.48, "word": " other", "probability": 0.89013671875}, {"start": 1285.48, "end": 1285.86, "word": " one,", "probability": 0.92138671875}, {"start": 1288.44, "end": 1289.2, "word": " 5", "probability": 0.81689453125}, {"start": 1289.2, "end": 1289.46, "word": " minus", "probability": 0.9384765625}, {"start": 1289.46, "end": 1289.66, "word": " 3", "probability": 0.9228515625}, {"start": 1289.66, "end": 1290.08, "word": ".2", "probability": 0.982421875}, {"start": 1290.08, "end": 1290.36, "word": " divided", "probability": 0.791015625}, {"start": 1290.36, "end": 1290.54, "word": " by", "probability": 0.97607421875}, {"start": 1290.54, "end": 1290.8, "word": " 0", "probability": 0.92822265625}, {"start": 1290.8, "end": 1291.16, "word": ".8.", "probability": 0.99853515625}, {"start": 1292.68, "end": 1293.32, "word": " 1", "probability": 0.91552734375}, {"start": 1293.32, "end": 1293.64, "word": " minus", "probability": 0.96435546875}, {"start": 1293.64, "end": 1293.88, "word": " 0", "probability": 0.97607421875}, {"start": 1293.88, "end": 1294.12, "word": ".8", "probability": 0.998046875}, {"start": 1294.12, "end": 1294.38, "word": " divided", "probability": 0.85546875}, {"start": 1294.38, "end": 1294.62, "word": " by", "probability": 0.9736328125}, {"start": 1294.62, "end": 1294.88, "word": " 0", "probability": 0.9736328125}, {"start": 1294.88, "end": 1295.32, "word": ".8", "probability": 0.99951171875}, {"start": 1295.32, "end": 1296.68, "word": " equals", "probability": 0.88525390625}], "temperature": 1.0}, {"id": 47, "seek": 132820, "start": 1302.68, "end": 1328.2, "text": " 2.25. Just double check this result. So here, the probability of X between 3 and 5 equals minus 0.25, smaller than Z, smaller than 2.5.", "tokens": [568, 13, 6074, 13, 1449, 3834, 1520, 341, 1874, 13, 407, 510, 11, 264, 8482, 295, 1783, 1296, 805, 293, 1025, 6915, 3175, 1958, 13, 6074, 11, 4356, 813, 1176, 11, 4356, 813, 568, 13, 20, 13], "avg_logprob": -0.2993421154586892, "compression_ratio": 1.2142857142857142, "no_speech_prob": 0.0, "words": [{"start": 1302.6799999999998, "end": 1303.3999999999999, "word": " 2", "probability": 0.1766357421875}, {"start": 1303.3999999999999, "end": 1304.12, "word": ".25.", "probability": 0.924560546875}, {"start": 1310.84, "end": 1311.56, "word": " Just", "probability": 0.6767578125}, {"start": 1311.56, "end": 1311.76, "word": " double", "probability": 0.58935546875}, {"start": 1311.76, "end": 1312.02, "word": " check", "probability": 0.6484375}, {"start": 1312.02, "end": 1312.28, "word": " this", "probability": 0.9208984375}, {"start": 1312.28, "end": 1312.66, "word": " result.", "probability": 0.95166015625}, {"start": 1314.9, "end": 1315.62, "word": " So", "probability": 0.6494140625}, {"start": 1315.62, "end": 1316.22, "word": " here,", "probability": 0.6279296875}, {"start": 1316.6, "end": 1317.02, "word": " the", "probability": 0.912109375}, {"start": 1317.02, "end": 1318.66, "word": " probability", "probability": 0.2607421875}, {"start": 1318.66, "end": 1318.82, "word": " of", "probability": 0.94189453125}, {"start": 1318.82, "end": 1319.0, "word": " X", "probability": 0.84423828125}, {"start": 1319.0, "end": 1319.3, "word": " between", "probability": 0.89013671875}, {"start": 1319.3, "end": 1319.56, "word": " 3", "probability": 0.71923828125}, {"start": 1319.56, "end": 1319.66, "word": " and", "probability": 0.94384765625}, {"start": 1319.66, "end": 1320.06, "word": " 5", "probability": 0.98779296875}, {"start": 1320.06, "end": 1321.76, "word": " equals", "probability": 0.8466796875}, {"start": 1321.76, "end": 1324.24, "word": " minus", "probability": 0.76513671875}, {"start": 1324.24, "end": 1324.52, "word": " 0", "probability": 0.67138671875}, {"start": 1324.52, "end": 1325.14, "word": ".25,", "probability": 0.98779296875}, {"start": 1325.74, "end": 1326.44, "word": " smaller", "probability": 0.83203125}, {"start": 1326.44, "end": 1326.74, "word": " than", "probability": 0.9462890625}, {"start": 1326.74, "end": 1327.0, "word": " Z,", "probability": 0.830078125}, {"start": 1327.2, "end": 1327.5, "word": " smaller", "probability": 0.7861328125}, {"start": 1327.5, "end": 1327.7, "word": " than", "probability": 0.94970703125}, {"start": 1327.7, "end": 1327.86, "word": " 2", "probability": 0.94580078125}, {"start": 1327.86, "end": 1328.2, "word": ".5.", "probability": 0.70068359375}], "temperature": 1.0}, {"id": 48, "seek": 135809, "start": 1332.65, "end": 1358.09, "text": " So it's transformed from normal distribution to standardized normal distribution. So here instead of computing the probability of X between three and five, we are looking for the probability between Z between actually minus. It's minus because your value here is smaller than the average. The average was 3.2, so it should be negative.", "tokens": [407, 309, 311, 16894, 490, 2710, 7316, 281, 31677, 2710, 7316, 13, 407, 510, 2602, 295, 15866, 264, 8482, 295, 1783, 1296, 1045, 293, 1732, 11, 321, 366, 1237, 337, 264, 8482, 1296, 1176, 1296, 767, 3175, 13, 467, 311, 3175, 570, 428, 2158, 510, 307, 4356, 813, 264, 4274, 13, 440, 4274, 390, 805, 13, 17, 11, 370, 309, 820, 312, 3671, 13], "avg_logprob": -0.1753605769230769, "compression_ratio": 1.6231884057971016, "no_speech_prob": 0.0, "words": [{"start": 1332.65, "end": 1333.13, "word": " So", "probability": 0.8056640625}, {"start": 1333.13, "end": 1333.79, "word": " it's", "probability": 0.791748046875}, {"start": 1333.79, "end": 1334.55, "word": " transformed", "probability": 0.87353515625}, {"start": 1334.55, "end": 1335.25, "word": " from", "probability": 0.88232421875}, {"start": 1335.25, "end": 1336.09, "word": " normal", "probability": 0.69677734375}, {"start": 1336.09, "end": 1336.77, "word": " distribution", "probability": 0.83837890625}, {"start": 1336.77, "end": 1337.21, "word": " to", "probability": 0.7529296875}, {"start": 1337.21, "end": 1338.45, "word": " standardized", "probability": 0.83544921875}, {"start": 1338.45, "end": 1338.77, "word": " normal", "probability": 0.74658203125}, {"start": 1338.77, "end": 1339.21, "word": " distribution.", "probability": 0.859375}, {"start": 1339.99, "end": 1340.21, "word": " So", "probability": 0.9423828125}, {"start": 1340.21, "end": 1340.39, "word": " here", "probability": 0.6884765625}, {"start": 1340.39, "end": 1340.75, "word": " instead", "probability": 0.6005859375}, {"start": 1340.75, "end": 1341.19, "word": " of", "probability": 0.966796875}, {"start": 1341.19, "end": 1341.81, "word": " computing", "probability": 0.90478515625}, {"start": 1341.81, "end": 1342.11, "word": " the", "probability": 0.8935546875}, {"start": 1342.11, "end": 1342.43, "word": " probability", "probability": 0.931640625}, {"start": 1342.43, "end": 1342.71, "word": " of", "probability": 0.943359375}, {"start": 1342.71, "end": 1342.95, "word": " X", "probability": 0.6513671875}, {"start": 1342.95, "end": 1343.27, "word": " between", "probability": 0.87890625}, {"start": 1343.27, "end": 1343.53, "word": " three", "probability": 0.53857421875}, {"start": 1343.53, "end": 1343.67, "word": " and", "probability": 0.93896484375}, {"start": 1343.67, "end": 1344.05, "word": " five,", "probability": 0.89892578125}, {"start": 1344.57, "end": 1344.81, "word": " we", "probability": 0.95458984375}, {"start": 1344.81, "end": 1344.99, "word": " are", "probability": 0.9013671875}, {"start": 1344.99, "end": 1345.31, "word": " looking", "probability": 0.91064453125}, {"start": 1345.31, "end": 1345.57, "word": " for", "probability": 0.94580078125}, {"start": 1345.57, "end": 1345.71, "word": " the", "probability": 0.90087890625}, {"start": 1345.71, "end": 1346.07, "word": " probability", "probability": 0.9462890625}, {"start": 1346.07, "end": 1346.59, "word": " between", "probability": 0.88037109375}, {"start": 1346.59, "end": 1347.33, "word": " Z", "probability": 0.4130859375}, {"start": 1347.33, "end": 1347.69, "word": " between", "probability": 0.7578125}, {"start": 1347.69, "end": 1348.21, "word": " actually", "probability": 0.8623046875}, {"start": 1348.21, "end": 1349.33, "word": " minus.", "probability": 0.91650390625}, {"start": 1350.73, "end": 1351.01, "word": " It's", "probability": 0.6656494140625}, {"start": 1351.01, "end": 1351.35, "word": " minus", "probability": 0.98583984375}, {"start": 1351.35, "end": 1352.07, "word": " because", "probability": 0.876953125}, {"start": 1352.07, "end": 1352.63, "word": " your", "probability": 0.865234375}, {"start": 1352.63, "end": 1353.09, "word": " value", "probability": 0.96533203125}, {"start": 1353.09, "end": 1353.45, "word": " here", "probability": 0.845703125}, {"start": 1353.45, "end": 1353.77, "word": " is", "probability": 0.93798828125}, {"start": 1353.77, "end": 1354.13, "word": " smaller", "probability": 0.86376953125}, {"start": 1354.13, "end": 1354.43, "word": " than", "probability": 0.93408203125}, {"start": 1354.43, "end": 1354.59, "word": " the", "probability": 0.8896484375}, {"start": 1354.59, "end": 1354.91, "word": " average.", "probability": 0.77490234375}, {"start": 1355.77, "end": 1356.05, "word": " The", "probability": 0.85302734375}, {"start": 1356.05, "end": 1356.29, "word": " average", "probability": 0.783203125}, {"start": 1356.29, "end": 1356.51, "word": " was", "probability": 0.8876953125}, {"start": 1356.51, "end": 1356.79, "word": " 3", "probability": 0.6748046875}, {"start": 1356.79, "end": 1357.19, "word": ".2,", "probability": 0.98583984375}, {"start": 1357.27, "end": 1357.43, "word": " so", "probability": 0.93701171875}, {"start": 1357.43, "end": 1357.51, "word": " it", "probability": 0.9423828125}, {"start": 1357.51, "end": 1357.65, "word": " should", "probability": 0.9677734375}, {"start": 1357.65, "end": 1357.79, "word": " be", "probability": 0.9501953125}, {"start": 1357.79, "end": 1358.09, "word": " negative.", "probability": 0.896484375}], "temperature": 1.0}, {"id": 49, "seek": 138587, "start": 1359.63, "end": 1385.87, "text": " So z score between minus 0.25 all the way up to 2.25. So now, this is the probability we are looking for. Zero in the middle minus one-fourth to the left of z of zero, mu of zero. And this is the value of 2.25.", "tokens": [407, 710, 6175, 1296, 3175, 1958, 13, 6074, 439, 264, 636, 493, 281, 568, 13, 6074, 13, 407, 586, 11, 341, 307, 264, 8482, 321, 366, 1237, 337, 13, 17182, 294, 264, 2808, 3175, 472, 12, 23251, 392, 281, 264, 1411, 295, 710, 295, 4018, 11, 2992, 295, 4018, 13, 400, 341, 307, 264, 2158, 295, 568, 13, 6074, 13], "avg_logprob": -0.24641392660922692, "compression_ratio": 1.4256756756756757, "no_speech_prob": 0.0, "words": [{"start": 1359.63, "end": 1360.01, "word": " So", "probability": 0.5732421875}, {"start": 1360.01, "end": 1360.25, "word": " z", "probability": 0.1942138671875}, {"start": 1360.25, "end": 1360.49, "word": " score", "probability": 0.2239990234375}, {"start": 1360.49, "end": 1360.79, "word": " between", "probability": 0.8076171875}, {"start": 1360.79, "end": 1361.13, "word": " minus", "probability": 0.48486328125}, {"start": 1361.13, "end": 1361.35, "word": " 0", "probability": 0.27490234375}, {"start": 1361.35, "end": 1361.91, "word": ".25", "probability": 0.985107421875}, {"start": 1361.91, "end": 1362.47, "word": " all", "probability": 0.70556640625}, {"start": 1362.47, "end": 1362.63, "word": " the", "probability": 0.892578125}, {"start": 1362.63, "end": 1362.79, "word": " way", "probability": 0.95166015625}, {"start": 1362.79, "end": 1362.97, "word": " up", "probability": 0.85546875}, {"start": 1362.97, "end": 1363.11, "word": " to", "probability": 0.94677734375}, {"start": 1363.11, "end": 1363.29, "word": " 2", "probability": 0.93994140625}, {"start": 1363.29, "end": 1363.75, "word": ".25.", "probability": 0.956298828125}, {"start": 1365.07, "end": 1365.63, "word": " So", "probability": 0.91357421875}, {"start": 1365.63, "end": 1365.91, "word": " now,", "probability": 0.84912109375}, {"start": 1366.77, "end": 1367.23, "word": " this", "probability": 0.9267578125}, {"start": 1367.23, "end": 1367.35, "word": " is", "probability": 0.94189453125}, {"start": 1367.35, "end": 1367.49, "word": " the", "probability": 0.84521484375}, {"start": 1367.49, "end": 1367.89, "word": " probability", "probability": 0.9462890625}, {"start": 1367.89, "end": 1368.15, "word": " we", "probability": 0.890625}, {"start": 1368.15, "end": 1368.27, "word": " are", "probability": 0.9033203125}, {"start": 1368.27, "end": 1368.53, "word": " looking", "probability": 0.916015625}, {"start": 1368.53, "end": 1368.89, "word": " for.", "probability": 0.95703125}, {"start": 1370.85, "end": 1371.41, "word": " Zero", "probability": 0.6025390625}, {"start": 1371.41, "end": 1372.09, "word": " in", "probability": 0.865234375}, {"start": 1372.09, "end": 1372.21, "word": " the", "probability": 0.91748046875}, {"start": 1372.21, "end": 1372.55, "word": " middle", "probability": 0.94482421875}, {"start": 1372.55, "end": 1373.63, "word": " minus", "probability": 0.75341796875}, {"start": 1373.63, "end": 1376.59, "word": " one", "probability": 0.5947265625}, {"start": 1376.59, "end": 1378.37, "word": "-fourth", "probability": 0.8058268229166666}, {"start": 1378.37, "end": 1378.65, "word": " to", "probability": 0.72265625}, {"start": 1378.65, "end": 1378.75, "word": " the", "probability": 0.91796875}, {"start": 1378.75, "end": 1378.97, "word": " left", "probability": 0.93505859375}, {"start": 1378.97, "end": 1379.17, "word": " of", "probability": 0.96142578125}, {"start": 1379.17, "end": 1379.39, "word": " z", "probability": 0.8232421875}, {"start": 1379.39, "end": 1380.17, "word": " of", "probability": 0.7197265625}, {"start": 1380.17, "end": 1380.53, "word": " zero,", "probability": 0.81396484375}, {"start": 1381.51, "end": 1381.79, "word": " mu", "probability": 0.53125}, {"start": 1381.79, "end": 1381.95, "word": " of", "probability": 0.86474609375}, {"start": 1381.95, "end": 1382.19, "word": " zero.", "probability": 0.91015625}, {"start": 1383.23, "end": 1383.61, "word": " And", "probability": 0.9130859375}, {"start": 1383.61, "end": 1383.79, "word": " this", "probability": 0.82421875}, {"start": 1383.79, "end": 1383.91, "word": " is", "probability": 0.921875}, {"start": 1383.91, "end": 1384.01, "word": " the", "probability": 0.86669921875}, {"start": 1384.01, "end": 1384.27, "word": " value", "probability": 0.97509765625}, {"start": 1384.27, "end": 1384.49, "word": " of", "probability": 0.96435546875}, {"start": 1384.49, "end": 1384.75, "word": " 2", "probability": 0.9609375}, {"start": 1384.75, "end": 1385.87, "word": ".25.", "probability": 0.98876953125}], "temperature": 1.0}, {"id": 50, "seek": 141668, "start": 1387.7, "end": 1416.68, "text": " Now we are looking actually for this probability. The area between minus 0.25 all the way up to 2.5. So this area equals the probability of Z less than 2.25 minus. And again, use the normal.", "tokens": [823, 321, 366, 1237, 767, 337, 341, 8482, 13, 440, 1859, 1296, 3175, 1958, 13, 6074, 439, 264, 636, 493, 281, 568, 13, 20, 13, 407, 341, 1859, 6915, 264, 8482, 295, 1176, 1570, 813, 568, 13, 6074, 3175, 13, 400, 797, 11, 764, 264, 2710, 13], "avg_logprob": -0.18554687841484943, "compression_ratio": 1.3642857142857143, "no_speech_prob": 0.0, "words": [{"start": 1387.7, "end": 1388.0, "word": " Now", "probability": 0.88818359375}, {"start": 1388.0, "end": 1388.18, "word": " we", "probability": 0.6962890625}, {"start": 1388.18, "end": 1388.3, "word": " are", "probability": 0.91796875}, {"start": 1388.3, "end": 1388.56, "word": " looking", "probability": 0.8759765625}, {"start": 1388.56, "end": 1389.06, "word": " actually", "probability": 0.7666015625}, {"start": 1389.06, "end": 1389.32, "word": " for", "probability": 0.9375}, {"start": 1389.32, "end": 1389.52, "word": " this", "probability": 0.9150390625}, {"start": 1389.52, "end": 1389.94, "word": " probability.", "probability": 0.92431640625}, {"start": 1392.96, "end": 1393.6, "word": " The", "probability": 0.87353515625}, {"start": 1393.6, "end": 1393.86, "word": " area", "probability": 0.90625}, {"start": 1393.86, "end": 1394.36, "word": " between", "probability": 0.8701171875}, {"start": 1394.36, "end": 1395.32, "word": " minus", "probability": 0.87744140625}, {"start": 1395.32, "end": 1395.6, "word": " 0", "probability": 0.81640625}, {"start": 1395.6, "end": 1396.02, "word": ".25", "probability": 0.994140625}, {"start": 1396.02, "end": 1397.06, "word": " all", "probability": 0.85498046875}, {"start": 1397.06, "end": 1397.24, "word": " the", "probability": 0.92041015625}, {"start": 1397.24, "end": 1397.38, "word": " way", "probability": 0.953125}, {"start": 1397.38, "end": 1397.58, "word": " up", "probability": 0.94921875}, {"start": 1397.58, "end": 1397.72, "word": " to", "probability": 0.9638671875}, {"start": 1397.72, "end": 1397.94, "word": " 2", "probability": 0.986328125}, {"start": 1397.94, "end": 1398.36, "word": ".5.", "probability": 0.8134765625}, {"start": 1399.98, "end": 1400.62, "word": " So", "probability": 0.939453125}, {"start": 1400.62, "end": 1400.82, "word": " this", "probability": 0.859375}, {"start": 1400.82, "end": 1401.12, "word": " area", "probability": 0.9169921875}, {"start": 1401.12, "end": 1401.68, "word": " equals", "probability": 0.93994140625}, {"start": 1401.68, "end": 1405.2, "word": " the", "probability": 0.34326171875}, {"start": 1405.2, "end": 1405.48, "word": " probability", "probability": 0.92431640625}, {"start": 1405.48, "end": 1405.8, "word": " of", "probability": 0.953125}, {"start": 1405.8, "end": 1406.02, "word": " Z", "probability": 0.6142578125}, {"start": 1406.02, "end": 1406.68, "word": " less", "probability": 0.84716796875}, {"start": 1406.68, "end": 1407.0, "word": " than", "probability": 0.93994140625}, {"start": 1407.0, "end": 1407.64, "word": " 2", "probability": 0.974609375}, {"start": 1407.64, "end": 1408.36, "word": ".25", "probability": 0.913330078125}, {"start": 1408.36, "end": 1409.0, "word": " minus.", "probability": 0.974609375}, {"start": 1414.28, "end": 1414.92, "word": " And", "probability": 0.262939453125}, {"start": 1414.92, "end": 1415.28, "word": " again,", "probability": 0.93896484375}, {"start": 1415.94, "end": 1416.24, "word": " use", "probability": 0.87109375}, {"start": 1416.24, "end": 1416.4, "word": " the", "probability": 0.91552734375}, {"start": 1416.4, "end": 1416.68, "word": " normal.", "probability": 0.783203125}], "temperature": 1.0}, {"id": 51, "seek": 144278, "start": 1417.08, "end": 1442.78, "text": " table to give this value and another one. Any questions? So first step here, we compute the z-score for each value x. So the problem is transformed from normal distribution to standardized normal distribution.", "tokens": [3199, 281, 976, 341, 2158, 293, 1071, 472, 13, 2639, 1651, 30, 407, 700, 1823, 510, 11, 321, 14722, 264, 710, 12, 4417, 418, 337, 1184, 2158, 2031, 13, 407, 264, 1154, 307, 16894, 490, 2710, 7316, 281, 31677, 2710, 7316, 13], "avg_logprob": -0.23110465740048608, "compression_ratio": 1.4383561643835616, "no_speech_prob": 0.0, "words": [{"start": 1417.08, "end": 1417.5, "word": " table", "probability": 0.3759765625}, {"start": 1417.5, "end": 1418.34, "word": " to", "probability": 0.89501953125}, {"start": 1418.34, "end": 1418.54, "word": " give", "probability": 0.75634765625}, {"start": 1418.54, "end": 1418.78, "word": " this", "probability": 0.935546875}, {"start": 1418.78, "end": 1419.14, "word": " value", "probability": 0.9677734375}, {"start": 1419.14, "end": 1419.38, "word": " and", "probability": 0.8779296875}, {"start": 1419.38, "end": 1419.6, "word": " another", "probability": 0.376220703125}, {"start": 1419.6, "end": 1419.86, "word": " one.", "probability": 0.74560546875}, {"start": 1422.98, "end": 1423.62, "word": " Any", "probability": 0.8349609375}, {"start": 1423.62, "end": 1423.94, "word": " questions?", "probability": 0.496337890625}, {"start": 1425.18, "end": 1425.96, "word": " So", "probability": 0.876953125}, {"start": 1425.96, "end": 1426.26, "word": " first", "probability": 0.66455078125}, {"start": 1426.26, "end": 1426.5, "word": " step", "probability": 0.896484375}, {"start": 1426.5, "end": 1426.78, "word": " here,", "probability": 0.857421875}, {"start": 1427.82, "end": 1428.16, "word": " we", "probability": 0.9501953125}, {"start": 1428.16, "end": 1430.64, "word": " compute", "probability": 0.8916015625}, {"start": 1430.64, "end": 1432.4, "word": " the", "probability": 0.88720703125}, {"start": 1432.4, "end": 1432.68, "word": " z", "probability": 0.7333984375}, {"start": 1432.68, "end": 1433.02, "word": "-score", "probability": 0.7985026041666666}, {"start": 1433.02, "end": 1433.3, "word": " for", "probability": 0.9384765625}, {"start": 1433.3, "end": 1433.52, "word": " each", "probability": 0.93994140625}, {"start": 1433.52, "end": 1433.78, "word": " value", "probability": 0.873046875}, {"start": 1433.78, "end": 1434.14, "word": " x.", "probability": 0.5283203125}, {"start": 1435.6, "end": 1436.1, "word": " So", "probability": 0.95849609375}, {"start": 1436.1, "end": 1436.3, "word": " the", "probability": 0.8525390625}, {"start": 1436.3, "end": 1436.62, "word": " problem", "probability": 0.85205078125}, {"start": 1436.62, "end": 1436.88, "word": " is", "probability": 0.689453125}, {"start": 1436.88, "end": 1437.6, "word": " transformed", "probability": 0.875}, {"start": 1437.6, "end": 1438.02, "word": " from", "probability": 0.88720703125}, {"start": 1438.02, "end": 1438.44, "word": " normal", "probability": 0.8310546875}, {"start": 1438.44, "end": 1439.1, "word": " distribution", "probability": 0.83544921875}, {"start": 1439.1, "end": 1441.38, "word": " to", "probability": 0.6640625}, {"start": 1441.38, "end": 1441.86, "word": " standardized", "probability": 0.80712890625}, {"start": 1441.86, "end": 1442.3, "word": " normal", "probability": 0.84765625}, {"start": 1442.3, "end": 1442.78, "word": " distribution.", "probability": 0.85791015625}], "temperature": 1.0}, {"id": 52, "seek": 147310, "start": 1444.18, "end": 1473.1, "text": " So it becomes z between minus 1.25 up to 2.25. Now, this area, this dashed area equals the area below 2.25 minus the area below minus 1.25. Now, by using the similar way we did before, you will compute the value of z. The probability of z is smaller than 2.25 by using", "tokens": [407, 309, 3643, 710, 1296, 3175, 502, 13, 6074, 493, 281, 568, 13, 6074, 13, 823, 11, 341, 1859, 11, 341, 8240, 292, 1859, 6915, 264, 1859, 2507, 568, 13, 6074, 3175, 264, 1859, 2507, 3175, 502, 13, 6074, 13, 823, 11, 538, 1228, 264, 2531, 636, 321, 630, 949, 11, 291, 486, 14722, 264, 2158, 295, 710, 13, 440, 8482, 295, 710, 307, 4356, 813, 568, 13, 6074, 538, 1228], "avg_logprob": -0.1979166730824444, "compression_ratio": 1.6107784431137724, "no_speech_prob": 0.0, "words": [{"start": 1444.18, "end": 1444.44, "word": " So", "probability": 0.66552734375}, {"start": 1444.44, "end": 1444.56, "word": " it", "probability": 0.75}, {"start": 1444.56, "end": 1444.88, "word": " becomes", "probability": 0.85546875}, {"start": 1444.88, "end": 1445.06, "word": " z", "probability": 0.5732421875}, {"start": 1445.06, "end": 1445.36, "word": " between", "probability": 0.8818359375}, {"start": 1445.36, "end": 1445.74, "word": " minus", "probability": 0.76708984375}, {"start": 1445.74, "end": 1445.96, "word": " 1", "probability": 0.39892578125}, {"start": 1445.96, "end": 1446.5, "word": ".25", "probability": 0.91259765625}, {"start": 1446.5, "end": 1447.4, "word": " up", "probability": 0.94189453125}, {"start": 1447.4, "end": 1447.56, "word": " to", "probability": 0.95751953125}, {"start": 1447.56, "end": 1447.74, "word": " 2", "probability": 0.98779296875}, {"start": 1447.74, "end": 1448.22, "word": ".25.", "probability": 0.967529296875}, {"start": 1450.1, "end": 1450.7, "word": " Now,", "probability": 0.9326171875}, {"start": 1450.8, "end": 1451.06, "word": " this", "probability": 0.93408203125}, {"start": 1451.06, "end": 1451.5, "word": " area,", "probability": 0.88671875}, {"start": 1451.96, "end": 1452.26, "word": " this", "probability": 0.9482421875}, {"start": 1452.26, "end": 1452.66, "word": " dashed", "probability": 0.769287109375}, {"start": 1452.66, "end": 1453.04, "word": " area", "probability": 0.89306640625}, {"start": 1453.04, "end": 1454.08, "word": " equals", "probability": 0.5693359375}, {"start": 1454.08, "end": 1454.3, "word": " the", "probability": 0.896484375}, {"start": 1454.3, "end": 1454.54, "word": " area", "probability": 0.84423828125}, {"start": 1454.54, "end": 1455.0, "word": " below", "probability": 0.908203125}, {"start": 1455.0, "end": 1456.78, "word": " 2", "probability": 0.72265625}, {"start": 1456.78, "end": 1457.5, "word": ".25", "probability": 0.99169921875}, {"start": 1457.5, "end": 1459.9, "word": " minus", "probability": 0.89892578125}, {"start": 1459.9, "end": 1460.2, "word": " the", "probability": 0.91357421875}, {"start": 1460.2, "end": 1460.5, "word": " area", "probability": 0.87646484375}, {"start": 1460.5, "end": 1460.82, "word": " below", "probability": 0.90478515625}, {"start": 1460.82, "end": 1461.26, "word": " minus", "probability": 0.96142578125}, {"start": 1461.26, "end": 1461.54, "word": " 1", "probability": 0.97265625}, {"start": 1461.54, "end": 1461.96, "word": ".25.", "probability": 0.893310546875}, {"start": 1463.7, "end": 1464.06, "word": " Now,", "probability": 0.935546875}, {"start": 1464.32, "end": 1464.58, "word": " by", "probability": 0.92919921875}, {"start": 1464.58, "end": 1464.8, "word": " using", "probability": 0.9423828125}, {"start": 1464.8, "end": 1465.0, "word": " the", "probability": 0.55859375}, {"start": 1465.0, "end": 1465.24, "word": " similar", "probability": 0.9599609375}, {"start": 1465.24, "end": 1465.5, "word": " way", "probability": 0.84326171875}, {"start": 1465.5, "end": 1465.66, "word": " we", "probability": 0.9150390625}, {"start": 1465.66, "end": 1465.82, "word": " did", "probability": 0.96435546875}, {"start": 1465.82, "end": 1466.28, "word": " before,", "probability": 0.8701171875}, {"start": 1466.8, "end": 1466.98, "word": " you", "probability": 0.314208984375}, {"start": 1466.98, "end": 1467.12, "word": " will", "probability": 0.416748046875}, {"start": 1467.12, "end": 1467.56, "word": " compute", "probability": 0.9375}, {"start": 1467.56, "end": 1467.76, "word": " the", "probability": 0.916015625}, {"start": 1467.76, "end": 1468.02, "word": " value", "probability": 0.9716796875}, {"start": 1468.02, "end": 1468.18, "word": " of", "probability": 0.9140625}, {"start": 1468.18, "end": 1468.38, "word": " z.", "probability": 0.96484375}, {"start": 1468.68, "end": 1469.22, "word": " The", "probability": 0.86181640625}, {"start": 1469.22, "end": 1469.66, "word": " probability", "probability": 0.689453125}, {"start": 1469.66, "end": 1469.88, "word": " of", "probability": 0.8583984375}, {"start": 1469.88, "end": 1469.98, "word": " z", "probability": 0.9794921875}, {"start": 1469.98, "end": 1470.06, "word": " is", "probability": 0.84228515625}, {"start": 1470.06, "end": 1470.3, "word": " smaller", "probability": 0.49462890625}, {"start": 1470.3, "end": 1470.5, "word": " than", "probability": 0.9404296875}, {"start": 1470.5, "end": 1470.96, "word": " 2", "probability": 0.919921875}, {"start": 1470.96, "end": 1471.52, "word": ".25", "probability": 0.996337890625}, {"start": 1471.52, "end": 1472.7, "word": " by", "probability": 0.82080078125}, {"start": 1472.7, "end": 1473.1, "word": " using", "probability": 0.92529296875}], "temperature": 1.0}, {"id": 53, "seek": 149902, "start": 1473.84, "end": 1499.02, "text": " The normal table. So here, 2.2 up to 5. So 9, 8, 7, 8. 9, 8, 7, 8. So the area below 2.25, 2.2, this value.", "tokens": [440, 2710, 3199, 13, 407, 510, 11, 568, 13, 17, 493, 281, 1025, 13, 407, 1722, 11, 1649, 11, 1614, 11, 1649, 13, 1722, 11, 1649, 11, 1614, 11, 1649, 13, 407, 264, 1859, 2507, 568, 13, 6074, 11, 568, 13, 17, 11, 341, 2158, 13], "avg_logprob": -0.26213430343790256, "compression_ratio": 1.2, "no_speech_prob": 0.0, "words": [{"start": 1473.84, "end": 1474.16, "word": " The", "probability": 0.1510009765625}, {"start": 1474.16, "end": 1474.44, "word": " normal", "probability": 0.703125}, {"start": 1474.44, "end": 1474.82, "word": " table.", "probability": 0.78369140625}, {"start": 1476.16, "end": 1476.72, "word": " So", "probability": 0.7265625}, {"start": 1476.72, "end": 1476.92, "word": " here,", "probability": 0.642578125}, {"start": 1477.04, "end": 1477.18, "word": " 2", "probability": 0.95556640625}, {"start": 1477.18, "end": 1477.76, "word": ".2", "probability": 0.9755859375}, {"start": 1477.76, "end": 1479.42, "word": " up", "probability": 0.81689453125}, {"start": 1479.42, "end": 1479.58, "word": " to", "probability": 0.94189453125}, {"start": 1479.58, "end": 1479.9, "word": " 5.", "probability": 0.88232421875}, {"start": 1482.58, "end": 1483.14, "word": " So", "probability": 0.79443359375}, {"start": 1483.14, "end": 1483.48, "word": " 9,", "probability": 0.354736328125}, {"start": 1483.76, "end": 1484.26, "word": " 8,", "probability": 0.8603515625}, {"start": 1484.46, "end": 1484.7, "word": " 7,", "probability": 0.98681640625}, {"start": 1484.8, "end": 1485.0, "word": " 8.", "probability": 0.98681640625}, {"start": 1486.42, "end": 1486.98, "word": " 9,", "probability": 0.81640625}, {"start": 1487.04, "end": 1487.26, "word": " 8,", "probability": 0.9873046875}, {"start": 1487.4, "end": 1487.56, "word": " 7,", "probability": 0.998046875}, {"start": 1487.66, "end": 1487.9, "word": " 8.", "probability": 0.99609375}, {"start": 1493.9, "end": 1494.46, "word": " So", "probability": 0.9462890625}, {"start": 1494.46, "end": 1494.68, "word": " the", "probability": 0.75244140625}, {"start": 1494.68, "end": 1494.96, "word": " area", "probability": 0.89892578125}, {"start": 1494.96, "end": 1495.26, "word": " below", "probability": 0.91064453125}, {"start": 1495.26, "end": 1496.52, "word": " 2", "probability": 0.331787109375}, {"start": 1496.52, "end": 1497.6, "word": ".25,", "probability": 0.973876953125}, {"start": 1497.82, "end": 1498.1, "word": " 2", "probability": 0.97265625}, {"start": 1498.1, "end": 1498.5, "word": ".2,", "probability": 0.99365234375}, {"start": 1498.58, "end": 1498.72, "word": " this", "probability": 0.931640625}, {"start": 1498.72, "end": 1499.02, "word": " value.", "probability": 0.9736328125}], "temperature": 1.0}, {"id": 54, "seek": 152902, "start": 1499.76, "end": 1529.02, "text": " All the way up to 5 gives 987. Now, what's about the probability of Z smaller than minus 0.25? If you go back to the Z table, but for the other one, the negative one. Minus 2 minus 0.2 up to 5.", "tokens": [1057, 264, 636, 493, 281, 1025, 2709, 20860, 22, 13, 823, 11, 437, 311, 466, 264, 8482, 295, 1176, 4356, 813, 3175, 1958, 13, 6074, 30, 759, 291, 352, 646, 281, 264, 1176, 3199, 11, 457, 337, 264, 661, 472, 11, 264, 3671, 472, 13, 2829, 301, 568, 3175, 1958, 13, 17, 493, 281, 1025, 13], "avg_logprob": -0.18804824561403508, "compression_ratio": 1.3108108108108107, "no_speech_prob": 0.0, "words": [{"start": 1499.76, "end": 1500.08, "word": " All", "probability": 0.410400390625}, {"start": 1500.08, "end": 1500.26, "word": " the", "probability": 0.91162109375}, {"start": 1500.26, "end": 1500.44, "word": " way", "probability": 0.95703125}, {"start": 1500.44, "end": 1500.66, "word": " up", "probability": 0.92919921875}, {"start": 1500.66, "end": 1500.84, "word": " to", "probability": 0.89892578125}, {"start": 1500.84, "end": 1501.1, "word": " 5", "probability": 0.58447265625}, {"start": 1501.1, "end": 1501.62, "word": " gives", "probability": 0.771484375}, {"start": 1501.62, "end": 1503.94, "word": " 987.", "probability": 0.730712890625}, {"start": 1505.54, "end": 1506.18, "word": " Now,", "probability": 0.93017578125}, {"start": 1506.3, "end": 1506.68, "word": " what's", "probability": 0.861328125}, {"start": 1506.68, "end": 1506.9, "word": " about", "probability": 0.8583984375}, {"start": 1506.9, "end": 1507.1, "word": " the", "probability": 0.8994140625}, {"start": 1507.1, "end": 1507.46, "word": " probability", "probability": 0.95068359375}, {"start": 1507.46, "end": 1507.78, "word": " of", "probability": 0.94921875}, {"start": 1507.78, "end": 1508.08, "word": " Z", "probability": 0.5556640625}, {"start": 1508.08, "end": 1508.86, "word": " smaller", "probability": 0.76123046875}, {"start": 1508.86, "end": 1509.18, "word": " than", "probability": 0.9384765625}, {"start": 1509.18, "end": 1509.56, "word": " minus", "probability": 0.70849609375}, {"start": 1509.56, "end": 1511.22, "word": " 0", "probability": 0.7763671875}, {"start": 1511.22, "end": 1511.82, "word": ".25?", "probability": 0.99462890625}, {"start": 1512.98, "end": 1513.62, "word": " If", "probability": 0.9541015625}, {"start": 1513.62, "end": 1513.74, "word": " you", "probability": 0.95849609375}, {"start": 1513.74, "end": 1513.9, "word": " go", "probability": 0.9580078125}, {"start": 1513.9, "end": 1514.22, "word": " back", "probability": 0.875}, {"start": 1514.22, "end": 1514.62, "word": " to", "probability": 0.96142578125}, {"start": 1514.62, "end": 1514.82, "word": " the", "probability": 0.79248046875}, {"start": 1514.82, "end": 1515.06, "word": " Z", "probability": 0.93310546875}, {"start": 1515.06, "end": 1515.32, "word": " table,", "probability": 0.5302734375}, {"start": 1515.38, "end": 1515.54, "word": " but", "probability": 0.9140625}, {"start": 1515.54, "end": 1515.74, "word": " for", "probability": 0.88623046875}, {"start": 1515.74, "end": 1515.94, "word": " the", "probability": 0.92041015625}, {"start": 1515.94, "end": 1516.16, "word": " other", "probability": 0.87744140625}, {"start": 1516.16, "end": 1516.48, "word": " one,", "probability": 0.92626953125}, {"start": 1518.08, "end": 1518.3, "word": " the", "probability": 0.91015625}, {"start": 1518.3, "end": 1518.62, "word": " negative", "probability": 0.94775390625}, {"start": 1518.62, "end": 1518.96, "word": " one.", "probability": 0.912109375}, {"start": 1523.12, "end": 1523.76, "word": " Minus", "probability": 0.955810546875}, {"start": 1523.76, "end": 1524.28, "word": " 2", "probability": 0.666015625}, {"start": 1524.28, "end": 1524.7, "word": " minus", "probability": 0.4873046875}, {"start": 1524.7, "end": 1525.04, "word": " 0", "probability": 0.95556640625}, {"start": 1525.04, "end": 1525.46, "word": ".2", "probability": 0.982421875}, {"start": 1525.46, "end": 1528.54, "word": " up", "probability": 0.45947265625}, {"start": 1528.54, "end": 1528.7, "word": " to", "probability": 0.9345703125}, {"start": 1528.7, "end": 1529.02, "word": " 5.", "probability": 0.947265625}], "temperature": 1.0}, {"id": 55, "seek": 155246, "start": 1530.26, "end": 1552.46, "text": " 0.4013 minus, that will give the probability between three and five. This is the second part. So the final answer.", "tokens": [1958, 13, 5254, 7668, 3175, 11, 300, 486, 976, 264, 8482, 1296, 1045, 293, 1732, 13, 639, 307, 264, 1150, 644, 13, 407, 264, 2572, 1867, 13], "avg_logprob": -0.34207588114908766, "compression_ratio": 1.1386138613861385, "no_speech_prob": 0.0, "words": [{"start": 1530.26, "end": 1531.3, "word": " 0", "probability": 0.1712646484375}, {"start": 1531.3, "end": 1532.32, "word": ".4013", "probability": 0.9041341145833334}, {"start": 1532.32, "end": 1534.78, "word": " minus,", "probability": 0.810546875}, {"start": 1536.62, "end": 1538.7, "word": " that", "probability": 0.919921875}, {"start": 1538.7, "end": 1538.9, "word": " will", "probability": 0.8896484375}, {"start": 1538.9, "end": 1539.2, "word": " give", "probability": 0.83642578125}, {"start": 1539.2, "end": 1540.0, "word": " the", "probability": 0.8447265625}, {"start": 1540.0, "end": 1540.74, "word": " probability", "probability": 0.9453125}, {"start": 1540.74, "end": 1541.32, "word": " between", "probability": 0.86572265625}, {"start": 1541.32, "end": 1542.92, "word": " three", "probability": 0.180419921875}, {"start": 1542.92, "end": 1543.1, "word": " and", "probability": 0.8046875}, {"start": 1543.1, "end": 1543.28, "word": " five.", "probability": 0.65234375}, {"start": 1546.18, "end": 1547.34, "word": " This", "probability": 0.84765625}, {"start": 1547.34, "end": 1547.48, "word": " is", "probability": 0.94921875}, {"start": 1547.48, "end": 1547.64, "word": " the", "probability": 0.91650390625}, {"start": 1547.64, "end": 1548.06, "word": " second", "probability": 0.900390625}, {"start": 1548.06, "end": 1548.9, "word": " part.", "probability": 0.90625}, {"start": 1551.12, "end": 1551.66, "word": " So", "probability": 0.890625}, {"start": 1551.66, "end": 1551.82, "word": " the", "probability": 0.66357421875}, {"start": 1551.82, "end": 1552.08, "word": " final", "probability": 0.93896484375}, {"start": 1552.08, "end": 1552.46, "word": " answer.", "probability": 0.95703125}], "temperature": 1.0}, {"id": 56, "seek": 158229, "start": 1560.63, "end": 1582.29, "text": " So this is the probability that the selected cash fish will weigh between three and five pounds. Now, other question is, for the same problem, you said a citation", "tokens": [407, 341, 307, 264, 8482, 300, 264, 8209, 6388, 3506, 486, 13843, 1296, 1045, 293, 1732, 8319, 13, 823, 11, 661, 1168, 307, 11, 337, 264, 912, 1154, 11, 291, 848, 257, 45590], "avg_logprob": -0.3446691071285921, "compression_ratio": 1.314516129032258, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 1560.63, "end": 1561.63, "word": " So", "probability": 0.12744140625}, {"start": 1561.63, "end": 1562.63, "word": " this", "probability": 0.783203125}, {"start": 1562.63, "end": 1562.75, "word": " is", "probability": 0.88330078125}, {"start": 1562.75, "end": 1562.83, "word": " the", "probability": 0.47509765625}, {"start": 1562.83, "end": 1563.15, "word": " probability", "probability": 0.95263671875}, {"start": 1563.15, "end": 1563.85, "word": " that", "probability": 0.9091796875}, {"start": 1563.85, "end": 1564.47, "word": " the", "probability": 0.86279296875}, {"start": 1564.47, "end": 1565.09, "word": " selected", "probability": 0.88720703125}, {"start": 1565.09, "end": 1565.45, "word": " cash", "probability": 0.454345703125}, {"start": 1565.45, "end": 1565.87, "word": " fish", "probability": 0.68798828125}, {"start": 1565.87, "end": 1566.61, "word": " will", "probability": 0.86474609375}, {"start": 1566.61, "end": 1566.85, "word": " weigh", "probability": 0.89306640625}, {"start": 1566.85, "end": 1567.35, "word": " between", "probability": 0.8369140625}, {"start": 1567.35, "end": 1568.71, "word": " three", "probability": 0.5458984375}, {"start": 1568.71, "end": 1568.95, "word": " and", "probability": 0.93017578125}, {"start": 1568.95, "end": 1569.49, "word": " five", "probability": 0.90673828125}, {"start": 1569.49, "end": 1570.65, "word": " pounds.", "probability": 0.83056640625}, {"start": 1571.81, "end": 1572.11, "word": " Now,", "probability": 0.82080078125}, {"start": 1573.15, "end": 1573.45, "word": " other", "probability": 0.37109375}, {"start": 1573.45, "end": 1573.83, "word": " question", "probability": 0.89208984375}, {"start": 1573.83, "end": 1574.11, "word": " is,", "probability": 0.92626953125}, {"start": 1574.97, "end": 1577.27, "word": " for", "probability": 0.9189453125}, {"start": 1577.27, "end": 1577.45, "word": " the", "probability": 0.91796875}, {"start": 1577.45, "end": 1577.65, "word": " same", "probability": 0.89404296875}, {"start": 1577.65, "end": 1578.11, "word": " problem,", "probability": 0.861328125}, {"start": 1580.53, "end": 1580.77, "word": " you", "probability": 0.78125}, {"start": 1580.77, "end": 1581.05, "word": " said", "probability": 0.56591796875}, {"start": 1581.05, "end": 1581.79, "word": " a", "probability": 0.6142578125}, {"start": 1581.79, "end": 1582.29, "word": " citation", "probability": 0.802734375}], "temperature": 1.0}, {"id": 57, "seek": 161212, "start": 1584.4, "end": 1612.12, "text": " Catfish should be one of the top 2% in the weight. Assuming the weights of catfish are normally distributed, at what weight in bounds should the citation, the notation be established? So in this board, he asked about what's the value of x, for example.", "tokens": [9565, 11608, 820, 312, 472, 295, 264, 1192, 568, 4, 294, 264, 3364, 13, 6281, 24919, 264, 17443, 295, 3857, 11608, 366, 5646, 12631, 11, 412, 437, 3364, 294, 29905, 820, 264, 45590, 11, 264, 24657, 312, 7545, 30, 407, 294, 341, 3150, 11, 415, 2351, 466, 437, 311, 264, 2158, 295, 2031, 11, 337, 1365, 13], "avg_logprob": -0.24946119250922366, "compression_ratio": 1.4795321637426901, "no_speech_prob": 0.0, "words": [{"start": 1584.4, "end": 1585.16, "word": " Catfish", "probability": 0.55877685546875}, {"start": 1585.16, "end": 1585.92, "word": " should", "probability": 0.947265625}, {"start": 1585.92, "end": 1586.1, "word": " be", "probability": 0.9521484375}, {"start": 1586.1, "end": 1586.44, "word": " one", "probability": 0.90771484375}, {"start": 1586.44, "end": 1586.84, "word": " of", "probability": 0.96875}, {"start": 1586.84, "end": 1587.0, "word": " the", "probability": 0.92431640625}, {"start": 1587.0, "end": 1587.4, "word": " top", "probability": 0.94384765625}, {"start": 1587.4, "end": 1588.02, "word": " 2", "probability": 0.73046875}, {"start": 1588.02, "end": 1588.52, "word": "%", "probability": 0.93408203125}, {"start": 1588.52, "end": 1588.76, "word": " in", "probability": 0.91748046875}, {"start": 1588.76, "end": 1588.86, "word": " the", "probability": 0.77392578125}, {"start": 1588.86, "end": 1589.06, "word": " weight.", "probability": 0.9013671875}, {"start": 1591.64, "end": 1592.4, "word": " Assuming", "probability": 0.95849609375}, {"start": 1592.4, "end": 1592.62, "word": " the", "probability": 0.8984375}, {"start": 1592.62, "end": 1593.02, "word": " weights", "probability": 0.84423828125}, {"start": 1593.02, "end": 1593.42, "word": " of", "probability": 0.97021484375}, {"start": 1593.42, "end": 1593.86, "word": " catfish", "probability": 0.947265625}, {"start": 1593.86, "end": 1594.06, "word": " are", "probability": 0.93798828125}, {"start": 1594.06, "end": 1594.44, "word": " normally", "probability": 0.7978515625}, {"start": 1594.44, "end": 1595.02, "word": " distributed,", "probability": 0.8974609375}, {"start": 1596.54, "end": 1596.76, "word": " at", "probability": 0.92236328125}, {"start": 1596.76, "end": 1596.94, "word": " what", "probability": 0.9462890625}, {"start": 1596.94, "end": 1597.42, "word": " weight", "probability": 0.89013671875}, {"start": 1597.42, "end": 1598.24, "word": " in", "probability": 0.64501953125}, {"start": 1598.24, "end": 1598.66, "word": " bounds", "probability": 0.169189453125}, {"start": 1598.66, "end": 1599.56, "word": " should", "probability": 0.95068359375}, {"start": 1599.56, "end": 1599.74, "word": " the", "probability": 0.8671875}, {"start": 1599.74, "end": 1600.22, "word": " citation,", "probability": 0.84619140625}, {"start": 1601.58, "end": 1601.8, "word": " the", "probability": 0.81005859375}, {"start": 1601.8, "end": 1602.24, "word": " notation", "probability": 0.484619140625}, {"start": 1602.24, "end": 1602.62, "word": " be", "probability": 0.69677734375}, {"start": 1602.62, "end": 1603.68, "word": " established?", "probability": 0.91796875}, {"start": 1605.8, "end": 1606.56, "word": " So", "probability": 0.94970703125}, {"start": 1606.56, "end": 1606.9, "word": " in", "probability": 0.42724609375}, {"start": 1606.9, "end": 1607.24, "word": " this", "probability": 0.94384765625}, {"start": 1607.24, "end": 1608.9, "word": " board,", "probability": 0.75537109375}, {"start": 1609.04, "end": 1609.14, "word": " he", "probability": 0.892578125}, {"start": 1609.14, "end": 1609.3, "word": " asked", "probability": 0.42236328125}, {"start": 1609.3, "end": 1609.62, "word": " about", "probability": 0.8935546875}, {"start": 1609.62, "end": 1610.18, "word": " what's", "probability": 0.82373046875}, {"start": 1610.18, "end": 1610.32, "word": " the", "probability": 0.919921875}, {"start": 1610.32, "end": 1610.6, "word": " value", "probability": 0.97900390625}, {"start": 1610.6, "end": 1611.18, "word": " of", "probability": 0.363037109375}, {"start": 1611.18, "end": 1611.5, "word": " x,", "probability": 0.712890625}, {"start": 1611.62, "end": 1611.76, "word": " for", "probability": 0.9501953125}, {"start": 1611.76, "end": 1612.12, "word": " example.", "probability": 0.97216796875}], "temperature": 1.0}, {"id": 58, "seek": 164356, "start": 1617.16, "end": 1643.56, "text": " is greater than what value here. And this probability equals 2%. Because you said the citation catfish should be one of the top 2%. So the area in the right here, this area is 2%.", "tokens": [307, 5044, 813, 437, 2158, 510, 13, 400, 341, 8482, 6915, 568, 6856, 1436, 291, 848, 264, 45590, 3857, 11608, 820, 312, 472, 295, 264, 1192, 568, 6856, 407, 264, 1859, 294, 264, 558, 510, 11, 341, 1859, 307, 568, 6856], "avg_logprob": -0.23325892431395395, "compression_ratio": 1.3636363636363635, "no_speech_prob": 0.0, "words": [{"start": 1617.16, "end": 1617.36, "word": " is", "probability": 0.0919189453125}, {"start": 1617.36, "end": 1617.68, "word": " greater", "probability": 0.8818359375}, {"start": 1617.68, "end": 1618.18, "word": " than", "probability": 0.9423828125}, {"start": 1618.18, "end": 1620.26, "word": " what", "probability": 0.87109375}, {"start": 1620.26, "end": 1620.54, "word": " value", "probability": 0.95068359375}, {"start": 1620.54, "end": 1620.82, "word": " here.", "probability": 0.78759765625}, {"start": 1621.08, "end": 1621.48, "word": " And", "probability": 0.9404296875}, {"start": 1621.48, "end": 1621.68, "word": " this", "probability": 0.9375}, {"start": 1621.68, "end": 1622.12, "word": " probability", "probability": 0.90283203125}, {"start": 1622.12, "end": 1622.72, "word": " equals", "probability": 0.90087890625}, {"start": 1622.72, "end": 1624.76, "word": " 2%.", "probability": 0.809326171875}, {"start": 1624.76, "end": 1627.24, "word": " Because", "probability": 0.93603515625}, {"start": 1627.24, "end": 1627.58, "word": " you", "probability": 0.759765625}, {"start": 1627.58, "end": 1627.76, "word": " said", "probability": 0.7958984375}, {"start": 1627.76, "end": 1627.88, "word": " the", "probability": 0.56005859375}, {"start": 1627.88, "end": 1628.36, "word": " citation", "probability": 0.85986328125}, {"start": 1628.36, "end": 1629.08, "word": " catfish", "probability": 0.6478271484375}, {"start": 1629.08, "end": 1629.52, "word": " should", "probability": 0.951171875}, {"start": 1629.52, "end": 1629.84, "word": " be", "probability": 0.93017578125}, {"start": 1629.84, "end": 1630.28, "word": " one", "probability": 0.88037109375}, {"start": 1630.28, "end": 1630.54, "word": " of", "probability": 0.96630859375}, {"start": 1630.54, "end": 1630.7, "word": " the", "probability": 0.92333984375}, {"start": 1630.7, "end": 1630.96, "word": " top", "probability": 0.94287109375}, {"start": 1630.96, "end": 1631.7, "word": " 2%.", "probability": 0.95849609375}, {"start": 1631.7, "end": 1634.72, "word": " So", "probability": 0.951171875}, {"start": 1634.72, "end": 1634.94, "word": " the", "probability": 0.8583984375}, {"start": 1634.94, "end": 1635.42, "word": " area", "probability": 0.88671875}, {"start": 1635.42, "end": 1637.46, "word": " in", "probability": 0.287841796875}, {"start": 1637.46, "end": 1638.54, "word": " the", "probability": 0.828125}, {"start": 1638.54, "end": 1638.82, "word": " right", "probability": 0.923828125}, {"start": 1638.82, "end": 1639.2, "word": " here,", "probability": 0.8427734375}, {"start": 1640.48, "end": 1640.74, "word": " this", "probability": 0.9384765625}, {"start": 1640.74, "end": 1641.16, "word": " area", "probability": 0.89013671875}, {"start": 1641.16, "end": 1643.06, "word": " is", "probability": 0.82568359375}, {"start": 1643.06, "end": 1643.56, "word": " 2%.", "probability": 0.965087890625}], "temperature": 1.0}, {"id": 59, "seek": 165946, "start": 1646.0, "end": 1659.46, "text": " What's the value of x in this case? So here, the value of x greater than a equals 0.02, and we are looking for this value.", "tokens": [708, 311, 264, 2158, 295, 2031, 294, 341, 1389, 30, 407, 510, 11, 264, 2158, 295, 2031, 5044, 813, 257, 6915, 1958, 13, 12756, 11, 293, 321, 366, 1237, 337, 341, 2158, 13], "avg_logprob": -0.2828584681538975, "compression_ratio": 1.23, "no_speech_prob": 0.0, "words": [{"start": 1646.0, "end": 1646.6, "word": " What's", "probability": 0.6142578125}, {"start": 1646.6, "end": 1646.76, "word": " the", "probability": 0.92431640625}, {"start": 1646.76, "end": 1647.1, "word": " value", "probability": 0.97265625}, {"start": 1647.1, "end": 1648.34, "word": " of", "probability": 0.95751953125}, {"start": 1648.34, "end": 1648.78, "word": " x", "probability": 0.59619140625}, {"start": 1648.78, "end": 1649.3, "word": " in", "probability": 0.6943359375}, {"start": 1649.3, "end": 1649.52, "word": " this", "probability": 0.91748046875}, {"start": 1649.52, "end": 1649.9, "word": " case?", "probability": 0.8916015625}, {"start": 1651.98, "end": 1652.62, "word": " So", "probability": 0.81396484375}, {"start": 1652.62, "end": 1652.9, "word": " here,", "probability": 0.76806640625}, {"start": 1653.94, "end": 1654.08, "word": " the", "probability": 0.345703125}, {"start": 1654.08, "end": 1654.26, "word": " value", "probability": 0.1763916015625}, {"start": 1654.26, "end": 1654.52, "word": " of", "probability": 0.90869140625}, {"start": 1654.52, "end": 1654.72, "word": " x", "probability": 0.962890625}, {"start": 1654.72, "end": 1655.12, "word": " greater", "probability": 0.69384765625}, {"start": 1655.12, "end": 1655.44, "word": " than", "probability": 0.95703125}, {"start": 1655.44, "end": 1655.68, "word": " a", "probability": 0.58984375}, {"start": 1655.68, "end": 1656.16, "word": " equals", "probability": 0.81103515625}, {"start": 1656.16, "end": 1656.44, "word": " 0", "probability": 0.73681640625}, {"start": 1656.44, "end": 1657.14, "word": ".02,", "probability": 0.973388671875}, {"start": 1657.74, "end": 1658.14, "word": " and", "probability": 0.93212890625}, {"start": 1658.14, "end": 1658.28, "word": " we", "probability": 0.95263671875}, {"start": 1658.28, "end": 1658.42, "word": " are", "probability": 0.87451171875}, {"start": 1658.42, "end": 1658.64, "word": " looking", "probability": 0.912109375}, {"start": 1658.64, "end": 1658.92, "word": " for", "probability": 0.953125}, {"start": 1658.92, "end": 1659.14, "word": " this", "probability": 0.94677734375}, {"start": 1659.14, "end": 1659.46, "word": " value.", "probability": 0.9462890625}], "temperature": 1.0}, {"id": 60, "seek": 168909, "start": 1665.75, "end": 1689.09, "text": " gives the area to the left side. So this probability actually, the area to the right is 2%, so the area to the left is 98%. So this is the same as, as we know, the equal sign does not matter because we have continuous distribution.", "tokens": [2709, 264, 1859, 281, 264, 1411, 1252, 13, 407, 341, 8482, 767, 11, 264, 1859, 281, 264, 558, 307, 568, 8923, 370, 264, 1859, 281, 264, 1411, 307, 20860, 6856, 407, 341, 307, 264, 912, 382, 11, 382, 321, 458, 11, 264, 2681, 1465, 775, 406, 1871, 570, 321, 362, 10957, 7316, 13], "avg_logprob": -0.19285301201873356, "compression_ratio": 1.5466666666666666, "no_speech_prob": 0.0, "words": [{"start": 1665.75, "end": 1666.21, "word": " gives", "probability": 0.140380859375}, {"start": 1666.21, "end": 1666.37, "word": " the", "probability": 0.837890625}, {"start": 1666.37, "end": 1666.51, "word": " area", "probability": 0.8369140625}, {"start": 1666.51, "end": 1666.71, "word": " to", "probability": 0.94287109375}, {"start": 1666.71, "end": 1666.83, "word": " the", "probability": 0.9150390625}, {"start": 1666.83, "end": 1667.05, "word": " left", "probability": 0.93359375}, {"start": 1667.05, "end": 1667.37, "word": " side.", "probability": 0.76220703125}, {"start": 1668.49, "end": 1668.85, "word": " So", "probability": 0.85595703125}, {"start": 1668.85, "end": 1669.17, "word": " this", "probability": 0.71337890625}, {"start": 1669.17, "end": 1669.65, "word": " probability", "probability": 0.84130859375}, {"start": 1669.65, "end": 1670.39, "word": " actually,", "probability": 0.71533203125}, {"start": 1670.87, "end": 1672.59, "word": " the", "probability": 0.85888671875}, {"start": 1672.59, "end": 1672.79, "word": " area", "probability": 0.888671875}, {"start": 1672.79, "end": 1672.99, "word": " to", "probability": 0.95361328125}, {"start": 1672.99, "end": 1673.11, "word": " the", "probability": 0.9150390625}, {"start": 1673.11, "end": 1673.41, "word": " right", "probability": 0.90478515625}, {"start": 1673.41, "end": 1673.85, "word": " is", "probability": 0.92333984375}, {"start": 1673.85, "end": 1674.49, "word": " 2%,", "probability": 0.670654296875}, {"start": 1674.49, "end": 1674.97, "word": " so", "probability": 0.904296875}, {"start": 1674.97, "end": 1675.15, "word": " the", "probability": 0.9072265625}, {"start": 1675.15, "end": 1675.33, "word": " area", "probability": 0.884765625}, {"start": 1675.33, "end": 1675.53, "word": " to", "probability": 0.9609375}, {"start": 1675.53, "end": 1675.67, "word": " the", "probability": 0.91552734375}, {"start": 1675.67, "end": 1675.91, "word": " left", "probability": 0.94921875}, {"start": 1675.91, "end": 1677.17, "word": " is", "probability": 0.93505859375}, {"start": 1677.17, "end": 1678.01, "word": " 98%.", "probability": 0.924072265625}, {"start": 1678.01, "end": 1680.39, "word": " So", "probability": 0.90869140625}, {"start": 1680.39, "end": 1680.57, "word": " this", "probability": 0.8828125}, {"start": 1680.57, "end": 1680.67, "word": " is", "probability": 0.94677734375}, {"start": 1680.67, "end": 1680.81, "word": " the", "probability": 0.9111328125}, {"start": 1680.81, "end": 1681.03, "word": " same", "probability": 0.9189453125}, {"start": 1681.03, "end": 1681.41, "word": " as,", "probability": 0.96142578125}, {"start": 1684.61, "end": 1685.95, "word": " as", "probability": 0.92578125}, {"start": 1685.95, "end": 1686.09, "word": " we", "probability": 0.935546875}, {"start": 1686.09, "end": 1686.27, "word": " know,", "probability": 0.8857421875}, {"start": 1686.43, "end": 1686.53, "word": " the", "probability": 0.90869140625}, {"start": 1686.53, "end": 1686.75, "word": " equal", "probability": 0.8046875}, {"start": 1686.75, "end": 1687.03, "word": " sign", "probability": 0.8994140625}, {"start": 1687.03, "end": 1687.25, "word": " does", "probability": 0.90380859375}, {"start": 1687.25, "end": 1687.41, "word": " not", "probability": 0.95361328125}, {"start": 1687.41, "end": 1687.65, "word": " matter", "probability": 0.859375}, {"start": 1687.65, "end": 1687.93, "word": " because", "probability": 0.6884765625}, {"start": 1687.93, "end": 1688.09, "word": " we", "probability": 0.93017578125}, {"start": 1688.09, "end": 1688.19, "word": " have", "probability": 0.93994140625}, {"start": 1688.19, "end": 1688.67, "word": " continuous", "probability": 0.869140625}, {"start": 1688.67, "end": 1689.09, "word": " distribution.", "probability": 0.85693359375}], "temperature": 1.0}, {"id": 61, "seek": 171857, "start": 1692.05, "end": 1718.57, "text": " continuous distribution, so equal sign does not matter. So now, if you ask about P of X greater than a certain value equals a probability of, for example, 0.02, you have to find the probability to the left, which is 0.98, because our table gives the area to the left. Now, we have to find the value of A such that", "tokens": [10957, 7316, 11, 370, 2681, 1465, 775, 406, 1871, 13, 407, 586, 11, 498, 291, 1029, 466, 430, 295, 1783, 5044, 813, 257, 1629, 2158, 6915, 257, 8482, 295, 11, 337, 1365, 11, 1958, 13, 12756, 11, 291, 362, 281, 915, 264, 8482, 281, 264, 1411, 11, 597, 307, 1958, 13, 22516, 11, 570, 527, 3199, 2709, 264, 1859, 281, 264, 1411, 13, 823, 11, 321, 362, 281, 915, 264, 2158, 295, 316, 1270, 300], "avg_logprob": -0.18092105655293717, "compression_ratio": 1.5467980295566504, "no_speech_prob": 0.0, "words": [{"start": 1692.05, "end": 1692.41, "word": " continuous", "probability": 0.12353515625}, {"start": 1692.41, "end": 1692.85, "word": " distribution,", "probability": 0.72412109375}, {"start": 1693.31, "end": 1693.31, "word": " so", "probability": 0.9287109375}, {"start": 1693.31, "end": 1693.99, "word": " equal", "probability": 0.56787109375}, {"start": 1693.99, "end": 1694.29, "word": " sign", "probability": 0.8525390625}, {"start": 1694.29, "end": 1694.47, "word": " does", "probability": 0.90625}, {"start": 1694.47, "end": 1694.65, "word": " not", "probability": 0.94287109375}, {"start": 1694.65, "end": 1694.81, "word": " matter.", "probability": 0.68994140625}, {"start": 1695.43, "end": 1695.65, "word": " So", "probability": 0.90771484375}, {"start": 1695.65, "end": 1695.91, "word": " now,", "probability": 0.73583984375}, {"start": 1696.81, "end": 1697.11, "word": " if", "probability": 0.95703125}, {"start": 1697.11, "end": 1697.21, "word": " you", "probability": 0.919921875}, {"start": 1697.21, "end": 1697.41, "word": " ask", "probability": 0.9521484375}, {"start": 1697.41, "end": 1697.67, "word": " about", "probability": 0.91845703125}, {"start": 1697.67, "end": 1697.85, "word": " P", "probability": 0.3408203125}, {"start": 1697.85, "end": 1697.97, "word": " of", "probability": 0.82958984375}, {"start": 1697.97, "end": 1698.13, "word": " X", "probability": 0.85791015625}, {"start": 1698.13, "end": 1698.51, "word": " greater", "probability": 0.90478515625}, {"start": 1698.51, "end": 1698.85, "word": " than", "probability": 0.94677734375}, {"start": 1698.85, "end": 1699.05, "word": " a", "probability": 0.8701171875}, {"start": 1699.05, "end": 1699.29, "word": " certain", "probability": 0.89111328125}, {"start": 1699.29, "end": 1699.69, "word": " value", "probability": 0.96923828125}, {"start": 1699.69, "end": 1700.29, "word": " equals", "probability": 0.8623046875}, {"start": 1700.29, "end": 1701.33, "word": " a", "probability": 0.92626953125}, {"start": 1701.33, "end": 1701.65, "word": " probability", "probability": 0.94580078125}, {"start": 1701.65, "end": 1701.95, "word": " of,", "probability": 0.90576171875}, {"start": 1702.03, "end": 1702.19, "word": " for", "probability": 0.94677734375}, {"start": 1702.19, "end": 1702.47, "word": " example,", "probability": 0.9716796875}, {"start": 1702.55, "end": 1702.81, "word": " 0", "probability": 0.452880859375}, {"start": 1702.81, "end": 1703.43, "word": ".02,", "probability": 0.972900390625}, {"start": 1704.07, "end": 1704.61, "word": " you", "probability": 0.95703125}, {"start": 1704.61, "end": 1705.01, "word": " have", "probability": 0.9404296875}, {"start": 1705.01, "end": 1706.19, "word": " to", "probability": 0.9658203125}, {"start": 1706.19, "end": 1706.47, "word": " find", "probability": 0.8935546875}, {"start": 1706.47, "end": 1706.67, "word": " the", "probability": 0.89501953125}, {"start": 1706.67, "end": 1707.09, "word": " probability", "probability": 0.94775390625}, {"start": 1707.09, "end": 1707.33, "word": " to", "probability": 0.7783203125}, {"start": 1707.33, "end": 1707.47, "word": " the", "probability": 0.88330078125}, {"start": 1707.47, "end": 1707.69, "word": " left,", "probability": 0.91357421875}, {"start": 1708.25, "end": 1708.91, "word": " which", "probability": 0.94580078125}, {"start": 1708.91, "end": 1709.09, "word": " is", "probability": 0.9365234375}, {"start": 1709.09, "end": 1709.31, "word": " 0", "probability": 0.9560546875}, {"start": 1709.31, "end": 1709.79, "word": ".98,", "probability": 0.991455078125}, {"start": 1709.87, "end": 1710.15, "word": " because", "probability": 0.8994140625}, {"start": 1710.15, "end": 1710.45, "word": " our", "probability": 0.892578125}, {"start": 1710.45, "end": 1710.87, "word": " table", "probability": 0.84765625}, {"start": 1710.87, "end": 1711.89, "word": " gives", "probability": 0.88623046875}, {"start": 1711.89, "end": 1712.05, "word": " the", "probability": 0.6845703125}, {"start": 1712.05, "end": 1712.21, "word": " area", "probability": 0.87939453125}, {"start": 1712.21, "end": 1712.37, "word": " to", "probability": 0.94775390625}, {"start": 1712.37, "end": 1712.49, "word": " the", "probability": 0.9208984375}, {"start": 1712.49, "end": 1712.69, "word": " left.", "probability": 0.96044921875}, {"start": 1714.39, "end": 1714.89, "word": " Now,", "probability": 0.955078125}, {"start": 1715.09, "end": 1715.55, "word": " we", "probability": 0.9580078125}, {"start": 1715.55, "end": 1715.79, "word": " have", "probability": 0.94677734375}, {"start": 1715.79, "end": 1715.93, "word": " to", "probability": 0.96923828125}, {"start": 1715.93, "end": 1716.25, "word": " find", "probability": 0.89697265625}, {"start": 1716.25, "end": 1716.47, "word": " the", "probability": 0.91357421875}, {"start": 1716.47, "end": 1716.69, "word": " value", "probability": 0.97705078125}, {"start": 1716.69, "end": 1716.85, "word": " of", "probability": 0.9599609375}, {"start": 1716.85, "end": 1717.07, "word": " A", "probability": 0.921875}, {"start": 1717.07, "end": 1718.23, "word": " such", "probability": 0.92626953125}, {"start": 1718.23, "end": 1718.57, "word": " that", "probability": 0.94091796875}], "temperature": 1.0}, {"id": 62, "seek": 173958, "start": 1719.44, "end": 1739.58, "text": " Probability of X is more than or equal to 0.98. So again, we have to look at the normal table, but backwards, because this value is given. If the probability is given, we have to look inside the body of the table in order to find the z-score.", "tokens": [8736, 2310, 295, 1783, 307, 544, 813, 420, 2681, 281, 1958, 13, 22516, 13, 407, 797, 11, 321, 362, 281, 574, 412, 264, 2710, 3199, 11, 457, 12204, 11, 570, 341, 2158, 307, 2212, 13, 759, 264, 8482, 307, 2212, 11, 321, 362, 281, 574, 1854, 264, 1772, 295, 264, 3199, 294, 1668, 281, 915, 264, 710, 12, 4417, 418, 13], "avg_logprob": -0.2515120828343976, "compression_ratio": 1.5, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 1719.44, "end": 1719.92, "word": " Probability", "probability": 0.61273193359375}, {"start": 1719.92, "end": 1720.12, "word": " of", "probability": 0.8544921875}, {"start": 1720.12, "end": 1720.28, "word": " X", "probability": 0.8193359375}, {"start": 1720.28, "end": 1720.46, "word": " is", "probability": 0.282958984375}, {"start": 1720.46, "end": 1720.64, "word": " more", "probability": 0.52685546875}, {"start": 1720.64, "end": 1720.82, "word": " than", "probability": 0.9375}, {"start": 1720.82, "end": 1721.0, "word": " or", "probability": 0.6005859375}, {"start": 1721.0, "end": 1721.04, "word": " equal", "probability": 0.88330078125}, {"start": 1721.04, "end": 1721.04, "word": " to", "probability": 0.93359375}, {"start": 1721.04, "end": 1721.42, "word": " 0", "probability": 0.345703125}, {"start": 1721.42, "end": 1721.84, "word": ".98.", "probability": 0.820068359375}, {"start": 1723.06, "end": 1723.44, "word": " So", "probability": 0.7412109375}, {"start": 1723.44, "end": 1723.74, "word": " again,", "probability": 0.75927734375}, {"start": 1723.82, "end": 1723.9, "word": " we", "probability": 0.85986328125}, {"start": 1723.9, "end": 1724.08, "word": " have", "probability": 0.9072265625}, {"start": 1724.08, "end": 1724.22, "word": " to", "probability": 0.9609375}, {"start": 1724.22, "end": 1724.42, "word": " look", "probability": 0.9462890625}, {"start": 1724.42, "end": 1724.66, "word": " at", "probability": 0.93017578125}, {"start": 1724.66, "end": 1724.82, "word": " the", "probability": 0.7734375}, {"start": 1724.82, "end": 1725.16, "word": " normal", "probability": 0.89697265625}, {"start": 1725.16, "end": 1725.56, "word": " table,", "probability": 0.86376953125}, {"start": 1725.9, "end": 1725.9, "word": " but", "probability": 0.91162109375}, {"start": 1725.9, "end": 1727.86, "word": " backwards,", "probability": 0.638671875}, {"start": 1728.82, "end": 1729.52, "word": " because", "probability": 0.89306640625}, {"start": 1729.52, "end": 1729.74, "word": " this", "probability": 0.892578125}, {"start": 1729.74, "end": 1729.98, "word": " value", "probability": 0.93701171875}, {"start": 1729.98, "end": 1730.14, "word": " is", "probability": 0.8857421875}, {"start": 1730.14, "end": 1730.36, "word": " given.", "probability": 0.88232421875}, {"start": 1731.56, "end": 1732.02, "word": " If", "probability": 0.93359375}, {"start": 1732.02, "end": 1732.2, "word": " the", "probability": 0.8974609375}, {"start": 1732.2, "end": 1732.5, "word": " probability", "probability": 0.9296875}, {"start": 1732.5, "end": 1732.8, "word": " is", "probability": 0.93896484375}, {"start": 1732.8, "end": 1733.08, "word": " given,", "probability": 0.90380859375}, {"start": 1733.22, "end": 1733.42, "word": " we", "probability": 0.89599609375}, {"start": 1733.42, "end": 1733.6, "word": " have", "probability": 0.94091796875}, {"start": 1733.6, "end": 1733.72, "word": " to", "probability": 0.9677734375}, {"start": 1733.72, "end": 1733.88, "word": " look", "probability": 0.962890625}, {"start": 1733.88, "end": 1734.44, "word": " inside", "probability": 0.92822265625}, {"start": 1734.44, "end": 1735.0, "word": " the", "probability": 0.88671875}, {"start": 1735.0, "end": 1735.32, "word": " body", "probability": 0.86083984375}, {"start": 1735.32, "end": 1735.84, "word": " of", "probability": 0.958984375}, {"start": 1735.84, "end": 1736.0, "word": " the", "probability": 0.91943359375}, {"start": 1736.0, "end": 1736.3, "word": " table", "probability": 0.89794921875}, {"start": 1736.3, "end": 1737.34, "word": " in", "probability": 0.779296875}, {"start": 1737.34, "end": 1737.56, "word": " order", "probability": 0.91796875}, {"start": 1737.56, "end": 1738.06, "word": " to", "probability": 0.96630859375}, {"start": 1738.06, "end": 1738.9, "word": " find", "probability": 0.89111328125}, {"start": 1738.9, "end": 1739.08, "word": " the", "probability": 0.54541015625}, {"start": 1739.08, "end": 1739.24, "word": " z", "probability": 0.450927734375}, {"start": 1739.24, "end": 1739.58, "word": "-score.", "probability": 0.8575846354166666}], "temperature": 1.0}, {"id": 63, "seek": 177085, "start": 1743.35, "end": 1770.85, "text": " x equals mu plus z sigma in order to find the corresponding value x. So again, go back to the normal table, and we are looking for 98%. The closest value to 98%, look here, if you stop here at 2, go to the right,", "tokens": [2031, 6915, 2992, 1804, 710, 12771, 294, 1668, 281, 915, 264, 11760, 2158, 2031, 13, 407, 797, 11, 352, 646, 281, 264, 2710, 3199, 11, 293, 321, 366, 1237, 337, 20860, 6856, 440, 13699, 2158, 281, 20860, 8923, 574, 510, 11, 498, 291, 1590, 510, 412, 568, 11, 352, 281, 264, 558, 11], "avg_logprob": -0.26302083885228195, "compression_ratio": 1.4013157894736843, "no_speech_prob": 0.0, "words": [{"start": 1743.35, "end": 1743.81, "word": " x", "probability": 0.2802734375}, {"start": 1743.81, "end": 1744.27, "word": " equals", "probability": 0.198974609375}, {"start": 1744.27, "end": 1745.31, "word": " mu", "probability": 0.59814453125}, {"start": 1745.31, "end": 1745.79, "word": " plus", "probability": 0.90478515625}, {"start": 1745.79, "end": 1746.27, "word": " z", "probability": 0.8359375}, {"start": 1746.27, "end": 1746.67, "word": " sigma", "probability": 0.8046875}, {"start": 1746.67, "end": 1747.03, "word": " in", "probability": 0.68408203125}, {"start": 1747.03, "end": 1747.21, "word": " order", "probability": 0.9306640625}, {"start": 1747.21, "end": 1747.43, "word": " to", "probability": 0.9638671875}, {"start": 1747.43, "end": 1747.63, "word": " find", "probability": 0.88525390625}, {"start": 1747.63, "end": 1747.85, "word": " the", "probability": 0.83154296875}, {"start": 1747.85, "end": 1748.27, "word": " corresponding", "probability": 0.763671875}, {"start": 1748.27, "end": 1748.69, "word": " value", "probability": 0.9599609375}, {"start": 1748.69, "end": 1749.11, "word": " x.", "probability": 0.74169921875}, {"start": 1750.51, "end": 1751.13, "word": " So", "probability": 0.87744140625}, {"start": 1751.13, "end": 1751.41, "word": " again,", "probability": 0.6376953125}, {"start": 1751.47, "end": 1751.55, "word": " go", "probability": 0.830078125}, {"start": 1751.55, "end": 1751.85, "word": " back", "probability": 0.87548828125}, {"start": 1751.85, "end": 1752.17, "word": " to", "probability": 0.96337890625}, {"start": 1752.17, "end": 1752.29, "word": " the", "probability": 0.90234375}, {"start": 1752.29, "end": 1752.55, "word": " normal", "probability": 0.89599609375}, {"start": 1752.55, "end": 1752.95, "word": " table,", "probability": 0.8779296875}, {"start": 1753.79, "end": 1755.13, "word": " and", "probability": 0.90673828125}, {"start": 1755.13, "end": 1755.27, "word": " we", "probability": 0.95849609375}, {"start": 1755.27, "end": 1755.41, "word": " are", "probability": 0.90673828125}, {"start": 1755.41, "end": 1755.65, "word": " looking", "probability": 0.92529296875}, {"start": 1755.65, "end": 1756.07, "word": " for", "probability": 0.94775390625}, {"start": 1756.07, "end": 1757.23, "word": " 98%.", "probability": 0.67236328125}, {"start": 1757.23, "end": 1762.01, "word": " The", "probability": 0.8662109375}, {"start": 1762.01, "end": 1762.47, "word": " closest", "probability": 0.93115234375}, {"start": 1762.47, "end": 1762.97, "word": " value", "probability": 0.97021484375}, {"start": 1762.97, "end": 1764.69, "word": " to", "probability": 0.8388671875}, {"start": 1764.69, "end": 1765.65, "word": " 98%,", "probability": 0.7152099609375}, {"start": 1765.65, "end": 1766.01, "word": " look", "probability": 0.8916015625}, {"start": 1766.01, "end": 1766.23, "word": " here,", "probability": 0.83251953125}, {"start": 1766.95, "end": 1767.39, "word": " if", "probability": 0.728515625}, {"start": 1767.39, "end": 1767.51, "word": " you", "probability": 0.95166015625}, {"start": 1767.51, "end": 1767.75, "word": " stop", "probability": 0.94580078125}, {"start": 1767.75, "end": 1767.93, "word": " here", "probability": 0.8115234375}, {"start": 1767.93, "end": 1768.11, "word": " at", "probability": 0.88134765625}, {"start": 1768.11, "end": 1768.37, "word": " 2,", "probability": 0.7568359375}, {"start": 1769.89, "end": 1770.29, "word": " go", "probability": 0.5107421875}, {"start": 1770.29, "end": 1770.45, "word": " to", "probability": 0.9609375}, {"start": 1770.45, "end": 1770.61, "word": " the", "probability": 0.9140625}, {"start": 1770.61, "end": 1770.85, "word": " right,", "probability": 0.91357421875}], "temperature": 1.0}, {"id": 64, "seek": 180328, "start": 1773.66, "end": 1803.28, "text": " Here we have 9798 or 9803. So the answer might be your z-score could be 2.05 or 2.06. So again, in this case, the table does not give the exact. So the approximate one", "tokens": [1692, 321, 362, 23399, 22516, 420, 1722, 4702, 18, 13, 407, 264, 1867, 1062, 312, 428, 710, 12, 4417, 418, 727, 312, 568, 13, 13328, 420, 568, 13, 12791, 13, 407, 797, 11, 294, 341, 1389, 11, 264, 3199, 775, 406, 976, 264, 1900, 13, 407, 264, 30874, 472], "avg_logprob": -0.20281249821186065, "compression_ratio": 1.263157894736842, "no_speech_prob": 0.0, "words": [{"start": 1773.66, "end": 1773.98, "word": " Here", "probability": 0.392333984375}, {"start": 1773.98, "end": 1774.12, "word": " we", "probability": 0.78564453125}, {"start": 1774.12, "end": 1774.3, "word": " have", "probability": 0.9443359375}, {"start": 1774.3, "end": 1775.34, "word": " 9798", "probability": 0.88232421875}, {"start": 1775.34, "end": 1779.38, "word": " or", "probability": 0.316162109375}, {"start": 1779.38, "end": 1781.48, "word": " 9803.", "probability": 0.89599609375}, {"start": 1782.64, "end": 1783.08, "word": " So", "probability": 0.88525390625}, {"start": 1783.08, "end": 1783.48, "word": " the", "probability": 0.67578125}, {"start": 1783.48, "end": 1783.82, "word": " answer", "probability": 0.9658203125}, {"start": 1783.82, "end": 1784.42, "word": " might", "probability": 0.88623046875}, {"start": 1784.42, "end": 1784.76, "word": " be", "probability": 0.9521484375}, {"start": 1784.76, "end": 1786.46, "word": " your", "probability": 0.36865234375}, {"start": 1786.46, "end": 1786.7, "word": " z", "probability": 0.708984375}, {"start": 1786.7, "end": 1787.1, "word": "-score", "probability": 0.8190104166666666}, {"start": 1787.1, "end": 1788.06, "word": " could", "probability": 0.64013671875}, {"start": 1788.06, "end": 1788.4, "word": " be", "probability": 0.94580078125}, {"start": 1788.4, "end": 1789.92, "word": " 2", "probability": 0.96728515625}, {"start": 1789.92, "end": 1790.46, "word": ".05", "probability": 0.993896484375}, {"start": 1790.46, "end": 1792.62, "word": " or", "probability": 0.943359375}, {"start": 1792.62, "end": 1795.04, "word": " 2", "probability": 0.99072265625}, {"start": 1795.04, "end": 1795.72, "word": ".06.", "probability": 0.98974609375}, {"start": 1797.22, "end": 1797.54, "word": " So", "probability": 0.935546875}, {"start": 1797.54, "end": 1797.9, "word": " again,", "probability": 0.88427734375}, {"start": 1798.34, "end": 1798.44, "word": " in", "probability": 0.921875}, {"start": 1798.44, "end": 1798.6, "word": " this", "probability": 0.94677734375}, {"start": 1798.6, "end": 1798.82, "word": " case,", "probability": 0.90576171875}, {"start": 1798.9, "end": 1799.0, "word": " the", "probability": 0.9208984375}, {"start": 1799.0, "end": 1799.22, "word": " table", "probability": 0.89453125}, {"start": 1799.22, "end": 1799.44, "word": " does", "probability": 0.9716796875}, {"start": 1799.44, "end": 1799.62, "word": " not", "probability": 0.94921875}, {"start": 1799.62, "end": 1799.92, "word": " give", "probability": 0.88037109375}, {"start": 1799.92, "end": 1800.86, "word": " the", "probability": 0.91650390625}, {"start": 1800.86, "end": 1801.14, "word": " exact.", "probability": 0.9697265625}, {"start": 1802.02, "end": 1802.26, "word": " So", "probability": 0.947265625}, {"start": 1802.26, "end": 1802.42, "word": " the", "probability": 0.88232421875}, {"start": 1802.42, "end": 1802.86, "word": " approximate", "probability": 0.8896484375}, {"start": 1802.86, "end": 1803.28, "word": " one", "probability": 0.93212890625}], "temperature": 1.0}, {"id": 65, "seek": 183152, "start": 1803.72, "end": 1831.52, "text": " might be between them exactly. Or just take one of these. So either you can take 9798, which is closer to 98% than 9803, because it's three distant apart. So maybe we can take this value. Again, if you take the other one, you will be OK. So you take either 2.05.", "tokens": [1062, 312, 1296, 552, 2293, 13, 1610, 445, 747, 472, 295, 613, 13, 407, 2139, 291, 393, 747, 23399, 22516, 11, 597, 307, 4966, 281, 20860, 4, 813, 1722, 4702, 18, 11, 570, 309, 311, 1045, 17275, 4936, 13, 407, 1310, 321, 393, 747, 341, 2158, 13, 3764, 11, 498, 291, 747, 264, 661, 472, 11, 291, 486, 312, 2264, 13, 407, 291, 747, 2139, 568, 13, 13328, 13], "avg_logprob": -0.1970982076866286, "compression_ratio": 1.445054945054945, "no_speech_prob": 0.0, "words": [{"start": 1803.72, "end": 1804.4, "word": " might", "probability": 0.431640625}, {"start": 1804.4, "end": 1804.78, "word": " be", "probability": 0.79150390625}, {"start": 1804.78, "end": 1805.98, "word": " between", "probability": 0.6220703125}, {"start": 1805.98, "end": 1806.3, "word": " them", "probability": 0.88525390625}, {"start": 1806.3, "end": 1806.76, "word": " exactly.", "probability": 0.73681640625}, {"start": 1807.12, "end": 1807.78, "word": " Or", "probability": 0.9384765625}, {"start": 1807.78, "end": 1808.12, "word": " just", "probability": 0.87939453125}, {"start": 1808.12, "end": 1808.46, "word": " take", "probability": 0.77734375}, {"start": 1808.46, "end": 1808.76, "word": " one", "probability": 0.919921875}, {"start": 1808.76, "end": 1808.92, "word": " of", "probability": 0.9658203125}, {"start": 1808.92, "end": 1809.24, "word": " these.", "probability": 0.8408203125}, {"start": 1810.18, "end": 1810.66, "word": " So", "probability": 0.92626953125}, {"start": 1810.66, "end": 1811.08, "word": " either", "probability": 0.888671875}, {"start": 1811.08, "end": 1811.3, "word": " you", "probability": 0.947265625}, {"start": 1811.3, "end": 1811.48, "word": " can", "probability": 0.93505859375}, {"start": 1811.48, "end": 1811.82, "word": " take", "probability": 0.8466796875}, {"start": 1811.82, "end": 1812.76, "word": " 9798,", "probability": 0.824951171875}, {"start": 1812.9, "end": 1813.04, "word": " which", "probability": 0.80908203125}, {"start": 1813.04, "end": 1813.14, "word": " is", "probability": 0.939453125}, {"start": 1813.14, "end": 1813.54, "word": " closer", "probability": 0.91552734375}, {"start": 1813.54, "end": 1815.4, "word": " to", "probability": 0.94921875}, {"start": 1815.4, "end": 1815.88, "word": " 98", "probability": 0.9794921875}, {"start": 1815.88, "end": 1816.2, "word": "%", "probability": 0.75439453125}, {"start": 1816.2, "end": 1817.24, "word": " than", "probability": 0.89697265625}, {"start": 1817.24, "end": 1818.38, "word": " 9803,", "probability": 0.95947265625}, {"start": 1818.56, "end": 1818.78, "word": " because", "probability": 0.8916015625}, {"start": 1818.78, "end": 1819.14, "word": " it's", "probability": 0.885009765625}, {"start": 1819.14, "end": 1819.5, "word": " three", "probability": 0.57958984375}, {"start": 1819.5, "end": 1820.44, "word": " distant", "probability": 0.7353515625}, {"start": 1820.44, "end": 1820.82, "word": " apart.", "probability": 0.82568359375}, {"start": 1822.0, "end": 1822.28, "word": " So", "probability": 0.9580078125}, {"start": 1822.28, "end": 1822.56, "word": " maybe", "probability": 0.94921875}, {"start": 1822.56, "end": 1822.88, "word": " we", "probability": 0.853515625}, {"start": 1822.88, "end": 1823.1, "word": " can", "probability": 0.93505859375}, {"start": 1823.1, "end": 1823.38, "word": " take", "probability": 0.86572265625}, {"start": 1823.38, "end": 1823.58, "word": " this", "probability": 0.90576171875}, {"start": 1823.58, "end": 1823.78, "word": " value.", "probability": 0.650390625}, {"start": 1824.5, "end": 1824.84, "word": " Again,", "probability": 0.9521484375}, {"start": 1824.88, "end": 1824.98, "word": " if", "probability": 0.9501953125}, {"start": 1824.98, "end": 1825.1, "word": " you", "probability": 0.9453125}, {"start": 1825.1, "end": 1825.26, "word": " take", "probability": 0.8818359375}, {"start": 1825.26, "end": 1825.4, "word": " the", "probability": 0.916015625}, {"start": 1825.4, "end": 1825.6, "word": " other", "probability": 0.888671875}, {"start": 1825.6, "end": 1825.92, "word": " one,", "probability": 0.9267578125}, {"start": 1826.72, "end": 1826.92, "word": " you", "probability": 0.6845703125}, {"start": 1826.92, "end": 1827.24, "word": " will", "probability": 0.57763671875}, {"start": 1827.24, "end": 1827.38, "word": " be", "probability": 0.94140625}, {"start": 1827.38, "end": 1827.64, "word": " OK.", "probability": 0.775390625}, {"start": 1828.54, "end": 1828.72, "word": " So", "probability": 0.9140625}, {"start": 1828.72, "end": 1828.84, "word": " you", "probability": 0.89208984375}, {"start": 1828.84, "end": 1829.4, "word": " take", "probability": 0.84423828125}, {"start": 1829.4, "end": 1829.78, "word": " either", "probability": 0.9072265625}, {"start": 1829.78, "end": 1831.0, "word": " 2", "probability": 0.68896484375}, {"start": 1831.0, "end": 1831.52, "word": ".05.", "probability": 0.98681640625}], "temperature": 1.0}, {"id": 66, "seek": 186169, "start": 1832.37, "end": 1861.69, "text": " or 2.06. So let's take the first value, for example. So my x equals mu, z is 2.05, times sigma, 0.8. Multiply 2.05 by 8, 0.8, then add 3.2, you will get", "tokens": [420, 568, 13, 12791, 13, 407, 718, 311, 747, 264, 700, 2158, 11, 337, 1365, 13, 407, 452, 2031, 6915, 2992, 11, 710, 307, 568, 13, 13328, 11, 1413, 12771, 11, 1958, 13, 23, 13, 31150, 356, 568, 13, 13328, 538, 1649, 11, 1958, 13, 23, 11, 550, 909, 805, 13, 17, 11, 291, 486, 483], "avg_logprob": -0.2168311403508772, "compression_ratio": 1.224, "no_speech_prob": 0.0, "words": [{"start": 1832.37, "end": 1832.85, "word": " or", "probability": 0.1964111328125}, {"start": 1832.85, "end": 1833.13, "word": " 2", "probability": 0.921875}, {"start": 1833.13, "end": 1833.71, "word": ".06.", "probability": 0.95263671875}, {"start": 1835.21, "end": 1835.95, "word": " So", "probability": 0.9013671875}, {"start": 1835.95, "end": 1836.25, "word": " let's", "probability": 0.878173828125}, {"start": 1836.25, "end": 1836.73, "word": " take", "probability": 0.85498046875}, {"start": 1836.73, "end": 1837.93, "word": " the", "probability": 0.89453125}, {"start": 1837.93, "end": 1838.19, "word": " first", "probability": 0.8544921875}, {"start": 1838.19, "end": 1838.51, "word": " value,", "probability": 0.56982421875}, {"start": 1839.27, "end": 1839.55, "word": " for", "probability": 0.94921875}, {"start": 1839.55, "end": 1839.85, "word": " example.", "probability": 0.96826171875}, {"start": 1840.93, "end": 1841.37, "word": " So", "probability": 0.947265625}, {"start": 1841.37, "end": 1841.55, "word": " my", "probability": 0.8291015625}, {"start": 1841.55, "end": 1841.85, "word": " x", "probability": 0.7294921875}, {"start": 1841.85, "end": 1842.21, "word": " equals", "probability": 0.5986328125}, {"start": 1842.21, "end": 1842.77, "word": " mu,", "probability": 0.68603515625}, {"start": 1844.11, "end": 1845.27, "word": " z", "probability": 0.8935546875}, {"start": 1845.27, "end": 1845.73, "word": " is", "probability": 0.7822265625}, {"start": 1845.73, "end": 1845.93, "word": " 2", "probability": 0.9873046875}, {"start": 1845.93, "end": 1846.63, "word": ".05,", "probability": 0.99267578125}, {"start": 1847.39, "end": 1848.07, "word": " times", "probability": 0.9326171875}, {"start": 1848.07, "end": 1848.51, "word": " sigma,", "probability": 0.90576171875}, {"start": 1849.17, "end": 1849.47, "word": " 0", "probability": 0.75439453125}, {"start": 1849.47, "end": 1849.97, "word": ".8.", "probability": 0.996337890625}, {"start": 1851.61, "end": 1852.55, "word": " Multiply", "probability": 0.9189453125}, {"start": 1852.55, "end": 1854.55, "word": " 2", "probability": 0.97021484375}, {"start": 1854.55, "end": 1855.37, "word": ".05", "probability": 0.993896484375}, {"start": 1855.37, "end": 1855.85, "word": " by", "probability": 0.97314453125}, {"start": 1855.85, "end": 1856.19, "word": " 8,", "probability": 0.63916015625}, {"start": 1856.33, "end": 1856.57, "word": " 0", "probability": 0.8359375}, {"start": 1856.57, "end": 1856.93, "word": ".8,", "probability": 0.998046875}, {"start": 1857.33, "end": 1857.51, "word": " then", "probability": 0.83837890625}, {"start": 1857.51, "end": 1857.89, "word": " add", "probability": 0.873046875}, {"start": 1857.89, "end": 1858.99, "word": " 3", "probability": 0.93408203125}, {"start": 1858.99, "end": 1859.51, "word": ".2,", "probability": 0.991943359375}, {"start": 1860.67, "end": 1861.11, "word": " you", "probability": 0.94384765625}, {"start": 1861.11, "end": 1861.27, "word": " will", "probability": 0.8779296875}, {"start": 1861.27, "end": 1861.69, "word": " get", "probability": 0.9384765625}], "temperature": 1.0}, {"id": 67, "seek": 189083, "start": 1862.75, "end": 1890.83, "text": " What's your answer? 3.2 plus 2 point... So around 4.8. So your answer is 4.84. Now if you go back to the problem, and suppose you know the value of x.", "tokens": [708, 311, 428, 1867, 30, 805, 13, 17, 1804, 568, 935, 485, 407, 926, 1017, 13, 23, 13, 407, 428, 1867, 307, 1017, 13, 25494, 13, 823, 498, 291, 352, 646, 281, 264, 1154, 11, 293, 7297, 291, 458, 264, 2158, 295, 2031, 13], "avg_logprob": -0.20034721692403157, "compression_ratio": 1.2377049180327868, "no_speech_prob": 0.0, "words": [{"start": 1862.75, "end": 1863.11, "word": " What's", "probability": 0.732421875}, {"start": 1863.11, "end": 1863.25, "word": " your", "probability": 0.8955078125}, {"start": 1863.25, "end": 1863.61, "word": " answer?", "probability": 0.95263671875}, {"start": 1868.39, "end": 1869.11, "word": " 3", "probability": 0.79736328125}, {"start": 1869.11, "end": 1869.63, "word": ".2", "probability": 0.961669921875}, {"start": 1869.63, "end": 1870.23, "word": " plus", "probability": 0.73046875}, {"start": 1870.23, "end": 1870.57, "word": " 2", "probability": 0.81103515625}, {"start": 1870.57, "end": 1872.41, "word": " point...", "probability": 0.3919677734375}, {"start": 1872.41, "end": 1873.19, "word": " So", "probability": 0.428466796875}, {"start": 1873.19, "end": 1873.41, "word": " around", "probability": 0.568359375}, {"start": 1873.41, "end": 1873.71, "word": " 4", "probability": 0.9833984375}, {"start": 1873.71, "end": 1874.35, "word": ".8.", "probability": 0.98681640625}, {"start": 1876.13, "end": 1876.85, "word": " So", "probability": 0.9326171875}, {"start": 1876.85, "end": 1877.11, "word": " your", "probability": 0.83642578125}, {"start": 1877.11, "end": 1877.45, "word": " answer", "probability": 0.95703125}, {"start": 1877.45, "end": 1877.87, "word": " is", "probability": 0.9501953125}, {"start": 1877.87, "end": 1878.31, "word": " 4", "probability": 0.9755859375}, {"start": 1878.31, "end": 1878.89, "word": ".84.", "probability": 0.984375}, {"start": 1883.81, "end": 1884.53, "word": " Now", "probability": 0.94482421875}, {"start": 1884.53, "end": 1884.71, "word": " if", "probability": 0.6845703125}, {"start": 1884.71, "end": 1884.81, "word": " you", "probability": 0.95947265625}, {"start": 1884.81, "end": 1884.99, "word": " go", "probability": 0.9580078125}, {"start": 1884.99, "end": 1885.27, "word": " back", "probability": 0.8759765625}, {"start": 1885.27, "end": 1885.41, "word": " to", "probability": 0.94921875}, {"start": 1885.41, "end": 1885.49, "word": " the", "probability": 0.91552734375}, {"start": 1885.49, "end": 1885.83, "word": " problem,", "probability": 0.86865234375}, {"start": 1886.79, "end": 1887.19, "word": " and", "probability": 0.904296875}, {"start": 1887.19, "end": 1887.81, "word": " suppose", "probability": 0.91162109375}, {"start": 1887.81, "end": 1889.47, "word": " you", "probability": 0.8212890625}, {"start": 1889.47, "end": 1889.63, "word": " know", "probability": 0.89453125}, {"start": 1889.63, "end": 1889.79, "word": " the", "probability": 0.9189453125}, {"start": 1889.79, "end": 1890.15, "word": " value", "probability": 0.97314453125}, {"start": 1890.15, "end": 1890.49, "word": " of", "probability": 0.9677734375}, {"start": 1890.49, "end": 1890.83, "word": " x.", "probability": 0.4951171875}], "temperature": 1.0}, {"id": 68, "seek": 192363, "start": 1894.25, "end": 1923.63, "text": " So the probability of X. Double check to the answer. 4.84. Just check. V of X greater than this value should be", "tokens": [407, 264, 8482, 295, 1783, 13, 16633, 1520, 281, 264, 1867, 13, 1017, 13, 25494, 13, 1449, 1520, 13, 691, 295, 1783, 5044, 813, 341, 2158, 820, 312], "avg_logprob": -0.23356680417882986, "compression_ratio": 1.1313131313131313, "no_speech_prob": 0.0, "words": [{"start": 1894.25, "end": 1894.71, "word": " So", "probability": 0.759765625}, {"start": 1894.71, "end": 1894.95, "word": " the", "probability": 0.68408203125}, {"start": 1894.95, "end": 1895.33, "word": " probability", "probability": 0.951171875}, {"start": 1895.33, "end": 1895.61, "word": " of", "probability": 0.96142578125}, {"start": 1895.61, "end": 1896.01, "word": " X.", "probability": 0.77099609375}, {"start": 1902.99, "end": 1903.79, "word": " Double", "probability": 0.5556640625}, {"start": 1903.79, "end": 1904.17, "word": " check", "probability": 0.82861328125}, {"start": 1904.17, "end": 1904.59, "word": " to", "probability": 0.734375}, {"start": 1904.59, "end": 1904.71, "word": " the", "probability": 0.904296875}, {"start": 1904.71, "end": 1905.11, "word": " answer.", "probability": 0.958984375}, {"start": 1909.49, "end": 1910.29, "word": " 4", "probability": 0.6083984375}, {"start": 1910.29, "end": 1910.83, "word": ".84.", "probability": 0.923828125}, {"start": 1911.97, "end": 1912.43, "word": " Just", "probability": 0.78466796875}, {"start": 1912.43, "end": 1912.87, "word": " check.", "probability": 0.89208984375}, {"start": 1914.95, "end": 1915.17, "word": " V", "probability": 0.47314453125}, {"start": 1915.17, "end": 1915.33, "word": " of", "probability": 0.94091796875}, {"start": 1915.33, "end": 1915.71, "word": " X", "probability": 0.84228515625}, {"start": 1915.71, "end": 1917.03, "word": " greater", "probability": 0.70703125}, {"start": 1917.03, "end": 1917.35, "word": " than", "probability": 0.9482421875}, {"start": 1917.35, "end": 1917.61, "word": " this", "probability": 0.9521484375}, {"start": 1917.61, "end": 1918.01, "word": " value", "probability": 0.97705078125}, {"start": 1918.01, "end": 1923.29, "word": " should", "probability": 0.78759765625}, {"start": 1923.29, "end": 1923.63, "word": " be", "probability": 0.95458984375}], "temperature": 1.0}, {"id": 69, "seek": 195058, "start": 1924.24, "end": 1950.58, "text": " Two percent. Two percent. So the probability of X greater than this value should be equal to one zero. So this problem is called backward normal calculation because here first step we find the value of this score corresponding to this probability. Be careful.", "tokens": [4453, 3043, 13, 4453, 3043, 13, 407, 264, 8482, 295, 1783, 5044, 813, 341, 2158, 820, 312, 2681, 281, 472, 4018, 13, 407, 341, 1154, 307, 1219, 23897, 2710, 17108, 570, 510, 700, 1823, 321, 915, 264, 2158, 295, 341, 6175, 11760, 281, 341, 8482, 13, 879, 5026, 13], "avg_logprob": -0.2842187422513962, "compression_ratio": 1.5294117647058822, "no_speech_prob": 0.0, "words": [{"start": 1924.24, "end": 1924.46, "word": " Two", "probability": 0.13427734375}, {"start": 1924.46, "end": 1924.76, "word": " percent.", "probability": 0.9482421875}, {"start": 1925.08, "end": 1925.3, "word": " Two", "probability": 0.53173828125}, {"start": 1925.3, "end": 1925.66, "word": " percent.", "probability": 0.95751953125}, {"start": 1928.02, "end": 1928.66, "word": " So", "probability": 0.79443359375}, {"start": 1928.66, "end": 1929.02, "word": " the", "probability": 0.5380859375}, {"start": 1929.02, "end": 1929.3, "word": " probability", "probability": 0.8134765625}, {"start": 1929.3, "end": 1929.5, "word": " of", "probability": 0.9072265625}, {"start": 1929.5, "end": 1929.64, "word": " X", "probability": 0.81591796875}, {"start": 1929.64, "end": 1930.04, "word": " greater", "probability": 0.9052734375}, {"start": 1930.04, "end": 1930.32, "word": " than", "probability": 0.9453125}, {"start": 1930.32, "end": 1930.56, "word": " this", "probability": 0.94287109375}, {"start": 1930.56, "end": 1930.96, "word": " value", "probability": 0.97412109375}, {"start": 1930.96, "end": 1931.68, "word": " should", "probability": 0.91015625}, {"start": 1931.68, "end": 1931.9, "word": " be", "probability": 0.93994140625}, {"start": 1931.9, "end": 1932.18, "word": " equal", "probability": 0.447265625}, {"start": 1932.18, "end": 1932.94, "word": " to", "probability": 0.84423828125}, {"start": 1932.94, "end": 1933.44, "word": " one", "probability": 0.362060546875}, {"start": 1933.44, "end": 1933.74, "word": " zero.", "probability": 0.6611328125}, {"start": 1935.78, "end": 1936.42, "word": " So", "probability": 0.91845703125}, {"start": 1936.42, "end": 1936.62, "word": " this", "probability": 0.90185546875}, {"start": 1936.62, "end": 1937.4, "word": " problem", "probability": 0.56494140625}, {"start": 1937.4, "end": 1937.6, "word": " is", "probability": 0.93310546875}, {"start": 1937.6, "end": 1937.84, "word": " called", "probability": 0.88330078125}, {"start": 1937.84, "end": 1938.26, "word": " backward", "probability": 0.833984375}, {"start": 1938.26, "end": 1938.98, "word": " normal", "probability": 0.85400390625}, {"start": 1938.98, "end": 1939.56, "word": " calculation", "probability": 0.8583984375}, {"start": 1939.56, "end": 1940.8, "word": " because", "probability": 0.47900390625}, {"start": 1940.8, "end": 1941.04, "word": " here", "probability": 0.80419921875}, {"start": 1941.04, "end": 1941.56, "word": " first", "probability": 0.418212890625}, {"start": 1941.56, "end": 1942.18, "word": " step", "probability": 0.923828125}, {"start": 1942.18, "end": 1942.5, "word": " we", "probability": 0.80078125}, {"start": 1942.5, "end": 1944.72, "word": " find", "probability": 0.8544921875}, {"start": 1944.72, "end": 1944.96, "word": " the", "probability": 0.90380859375}, {"start": 1944.96, "end": 1945.18, "word": " value", "probability": 0.8837890625}, {"start": 1945.18, "end": 1945.3, "word": " of", "probability": 0.92822265625}, {"start": 1945.3, "end": 1945.44, "word": " this", "probability": 0.3857421875}, {"start": 1945.44, "end": 1945.82, "word": " score", "probability": 0.2078857421875}, {"start": 1945.82, "end": 1947.5, "word": " corresponding", "probability": 0.72119140625}, {"start": 1947.5, "end": 1947.82, "word": " to", "probability": 0.96337890625}, {"start": 1947.82, "end": 1948.04, "word": " this", "probability": 0.9453125}, {"start": 1948.04, "end": 1948.46, "word": " probability.", "probability": 0.96142578125}, {"start": 1949.92, "end": 1950.28, "word": " Be", "probability": 0.95947265625}, {"start": 1950.28, "end": 1950.58, "word": " careful.", "probability": 0.9619140625}], "temperature": 1.0}, {"id": 70, "seek": 197356, "start": 1952.32, "end": 1973.56, "text": " The probability of X greater than 2 is 0.02. So my value here should be to the right. Because it says greater than A is just 2%. If you switch the position of A, for example, if A is on this side,", "tokens": [440, 8482, 295, 1783, 5044, 813, 568, 307, 1958, 13, 12756, 13, 407, 452, 2158, 510, 820, 312, 281, 264, 558, 13, 1436, 309, 1619, 5044, 813, 316, 307, 445, 568, 6856, 759, 291, 3679, 264, 2535, 295, 316, 11, 337, 1365, 11, 498, 316, 307, 322, 341, 1252, 11], "avg_logprob": -0.2092524486429551, "compression_ratio": 1.3310810810810811, "no_speech_prob": 0.0, "words": [{"start": 1952.32, "end": 1952.64, "word": " The", "probability": 0.57958984375}, {"start": 1952.64, "end": 1953.08, "word": " probability", "probability": 0.79638671875}, {"start": 1953.08, "end": 1953.28, "word": " of", "probability": 0.94873046875}, {"start": 1953.28, "end": 1953.42, "word": " X", "probability": 0.83056640625}, {"start": 1953.42, "end": 1953.76, "word": " greater", "probability": 0.81103515625}, {"start": 1953.76, "end": 1954.1, "word": " than", "probability": 0.95263671875}, {"start": 1954.1, "end": 1954.28, "word": " 2", "probability": 0.67333984375}, {"start": 1954.28, "end": 1954.44, "word": " is", "probability": 0.92724609375}, {"start": 1954.44, "end": 1954.62, "word": " 0", "probability": 0.55126953125}, {"start": 1954.62, "end": 1954.96, "word": ".02.", "probability": 0.942626953125}, {"start": 1955.82, "end": 1956.08, "word": " So", "probability": 0.8974609375}, {"start": 1956.08, "end": 1956.4, "word": " my", "probability": 0.7333984375}, {"start": 1956.4, "end": 1957.42, "word": " value", "probability": 0.96728515625}, {"start": 1957.42, "end": 1957.82, "word": " here", "probability": 0.83154296875}, {"start": 1957.82, "end": 1958.56, "word": " should", "probability": 0.94189453125}, {"start": 1958.56, "end": 1958.74, "word": " be", "probability": 0.95263671875}, {"start": 1958.74, "end": 1958.88, "word": " to", "probability": 0.958984375}, {"start": 1958.88, "end": 1959.02, "word": " the", "probability": 0.92333984375}, {"start": 1959.02, "end": 1959.34, "word": " right.", "probability": 0.92236328125}, {"start": 1961.34, "end": 1962.02, "word": " Because", "probability": 0.88037109375}, {"start": 1962.02, "end": 1962.36, "word": " it", "probability": 0.90478515625}, {"start": 1962.36, "end": 1962.8, "word": " says", "probability": 0.880859375}, {"start": 1962.8, "end": 1963.82, "word": " greater", "probability": 0.5712890625}, {"start": 1963.82, "end": 1964.26, "word": " than", "probability": 0.953125}, {"start": 1964.26, "end": 1964.7, "word": " A", "probability": 0.669921875}, {"start": 1964.7, "end": 1964.98, "word": " is", "probability": 0.9365234375}, {"start": 1964.98, "end": 1965.22, "word": " just", "probability": 0.912109375}, {"start": 1965.22, "end": 1965.88, "word": " 2%.", "probability": 0.763916015625}, {"start": 1965.88, "end": 1966.7, "word": " If", "probability": 0.95654296875}, {"start": 1966.7, "end": 1966.84, "word": " you", "probability": 0.916015625}, {"start": 1966.84, "end": 1967.38, "word": " switch", "probability": 0.97216796875}, {"start": 1967.38, "end": 1969.6, "word": " the", "probability": 0.884765625}, {"start": 1969.6, "end": 1970.1, "word": " position", "probability": 0.94921875}, {"start": 1970.1, "end": 1970.4, "word": " of", "probability": 0.96728515625}, {"start": 1970.4, "end": 1970.72, "word": " A,", "probability": 0.97216796875}, {"start": 1971.08, "end": 1971.24, "word": " for", "probability": 0.95166015625}, {"start": 1971.24, "end": 1971.66, "word": " example,", "probability": 0.97607421875}, {"start": 1972.46, "end": 1972.74, "word": " if", "probability": 0.90771484375}, {"start": 1972.74, "end": 1972.92, "word": " A", "probability": 0.9521484375}, {"start": 1972.92, "end": 1973.02, "word": " is", "probability": 0.5263671875}, {"start": 1973.02, "end": 1973.06, "word": " on", "probability": 0.31396484375}, {"start": 1973.06, "end": 1973.22, "word": " this", "probability": 0.94482421875}, {"start": 1973.22, "end": 1973.56, "word": " side,", "probability": 0.86328125}], "temperature": 1.0}, {"id": 71, "seek": 200245, "start": 1976.15, "end": 2002.45, "text": " And he asked about E of X greater than E is 2%. This area is not 2%. From A up to infinity here, this area is not 2% because at least it's greater than 0.5. Make sense? So your A should be to the right side. Because the value of X greater than E, 2% is on the other side.", "tokens": [400, 415, 2351, 466, 462, 295, 1783, 5044, 813, 462, 307, 568, 6856, 639, 1859, 307, 406, 568, 6856, 3358, 316, 493, 281, 13202, 510, 11, 341, 1859, 307, 406, 568, 4, 570, 412, 1935, 309, 311, 5044, 813, 1958, 13, 20, 13, 4387, 2020, 30, 407, 428, 316, 820, 312, 281, 264, 558, 1252, 13, 1436, 264, 2158, 295, 1783, 5044, 813, 462, 11, 568, 4, 307, 322, 264, 661, 1252, 13], "avg_logprob": -0.2582348117957244, "compression_ratio": 1.5632183908045978, "no_speech_prob": 0.0, "words": [{"start": 1976.15, "end": 1976.45, "word": " And", "probability": 0.5810546875}, {"start": 1976.45, "end": 1976.55, "word": " he", "probability": 0.603515625}, {"start": 1976.55, "end": 1976.73, "word": " asked", "probability": 0.438232421875}, {"start": 1976.73, "end": 1977.13, "word": " about", "probability": 0.86474609375}, {"start": 1977.13, "end": 1977.47, "word": " E", "probability": 0.437255859375}, {"start": 1977.47, "end": 1977.59, "word": " of", "probability": 0.86279296875}, {"start": 1977.59, "end": 1977.85, "word": " X", "probability": 0.912109375}, {"start": 1977.85, "end": 1978.27, "word": " greater", "probability": 0.8583984375}, {"start": 1978.27, "end": 1978.55, "word": " than", "probability": 0.95703125}, {"start": 1978.55, "end": 1978.65, "word": " E", "probability": 0.5283203125}, {"start": 1978.65, "end": 1978.75, "word": " is", "probability": 0.71630859375}, {"start": 1978.75, "end": 1979.37, "word": " 2%.", "probability": 0.5806884765625}, {"start": 1979.37, "end": 1980.59, "word": " This", "probability": 0.82470703125}, {"start": 1980.59, "end": 1981.11, "word": " area", "probability": 0.8994140625}, {"start": 1981.11, "end": 1982.07, "word": " is", "probability": 0.9296875}, {"start": 1982.07, "end": 1982.25, "word": " not", "probability": 0.9453125}, {"start": 1982.25, "end": 1982.85, "word": " 2%.", "probability": 0.805908203125}, {"start": 1982.85, "end": 1984.39, "word": " From", "probability": 0.72314453125}, {"start": 1984.39, "end": 1984.61, "word": " A", "probability": 0.8740234375}, {"start": 1984.61, "end": 1984.95, "word": " up", "probability": 0.50830078125}, {"start": 1984.95, "end": 1985.11, "word": " to", "probability": 0.9609375}, {"start": 1985.11, "end": 1985.45, "word": " infinity", "probability": 0.85888671875}, {"start": 1985.45, "end": 1985.85, "word": " here,", "probability": 0.82568359375}, {"start": 1986.53, "end": 1987.57, "word": " this", "probability": 0.9267578125}, {"start": 1987.57, "end": 1988.01, "word": " area", "probability": 0.89306640625}, {"start": 1988.01, "end": 1988.31, "word": " is", "probability": 0.9033203125}, {"start": 1988.31, "end": 1988.81, "word": " not", "probability": 0.921875}, {"start": 1988.81, "end": 1989.83, "word": " 2", "probability": 0.96533203125}, {"start": 1989.83, "end": 1990.05, "word": "%", "probability": 0.435791015625}, {"start": 1990.05, "end": 1990.39, "word": " because", "probability": 0.8857421875}, {"start": 1990.39, "end": 1990.63, "word": " at", "probability": 0.73291015625}, {"start": 1990.63, "end": 1990.93, "word": " least", "probability": 0.958984375}, {"start": 1990.93, "end": 1991.57, "word": " it's", "probability": 0.640869140625}, {"start": 1991.57, "end": 1991.79, "word": " greater", "probability": 0.89599609375}, {"start": 1991.79, "end": 1992.07, "word": " than", "probability": 0.93994140625}, {"start": 1992.07, "end": 1992.29, "word": " 0", "probability": 0.87548828125}, {"start": 1992.29, "end": 1992.73, "word": ".5.", "probability": 0.996826171875}, {"start": 1993.61, "end": 1994.07, "word": " Make", "probability": 0.6806640625}, {"start": 1994.07, "end": 1994.37, "word": " sense?", "probability": 0.833984375}, {"start": 1994.85, "end": 1995.05, "word": " So", "probability": 0.93310546875}, {"start": 1995.05, "end": 1995.27, "word": " your", "probability": 0.77294921875}, {"start": 1995.27, "end": 1995.43, "word": " A", "probability": 0.91357421875}, {"start": 1995.43, "end": 1995.65, "word": " should", "probability": 0.9697265625}, {"start": 1995.65, "end": 1995.87, "word": " be", "probability": 0.9462890625}, {"start": 1995.87, "end": 1995.97, "word": " to", "probability": 0.89404296875}, {"start": 1995.97, "end": 1996.11, "word": " the", "probability": 0.91162109375}, {"start": 1996.11, "end": 1996.29, "word": " right", "probability": 0.9208984375}, {"start": 1996.29, "end": 1996.61, "word": " side.", "probability": 0.84814453125}, {"start": 1997.59, "end": 1997.95, "word": " Because", "probability": 0.9267578125}, {"start": 1997.95, "end": 1998.11, "word": " the", "probability": 0.481689453125}, {"start": 1998.11, "end": 1998.29, "word": " value", "probability": 0.1839599609375}, {"start": 1998.29, "end": 1998.49, "word": " of", "probability": 0.86279296875}, {"start": 1998.49, "end": 1998.61, "word": " X", "probability": 0.93408203125}, {"start": 1998.61, "end": 1998.91, "word": " greater", "probability": 0.77001953125}, {"start": 1998.91, "end": 1999.19, "word": " than", "probability": 0.9541015625}, {"start": 1999.19, "end": 1999.39, "word": " E,", "probability": 0.51806640625}, {"start": 1999.85, "end": 2000.01, "word": " 2", "probability": 0.9462890625}, {"start": 2000.01, "end": 2000.55, "word": "%", "probability": 0.83203125}, {"start": 2000.55, "end": 2001.61, "word": " is", "probability": 0.89599609375}, {"start": 2001.61, "end": 2001.81, "word": " on", "probability": 0.5390625}, {"start": 2001.81, "end": 2001.93, "word": " the", "probability": 0.9189453125}, {"start": 2001.93, "end": 2002.11, "word": " other", "probability": 0.87939453125}, {"start": 2002.11, "end": 2002.45, "word": " side.", "probability": 0.86962890625}], "temperature": 1.0}, {"id": 72, "seek": 201766, "start": 2004.7, "end": 2017.66, "text": " Let's move to the next one. For the same question.", "tokens": [961, 311, 1286, 281, 264, 958, 472, 13, 1171, 264, 912, 1168, 13], "avg_logprob": -0.13797432397093093, "compression_ratio": 0.9107142857142857, "no_speech_prob": 0.0, "words": [{"start": 2004.7, "end": 2005.2, "word": " Let's", "probability": 0.876708984375}, {"start": 2005.2, "end": 2005.54, "word": " move", "probability": 0.94677734375}, {"start": 2005.54, "end": 2005.74, "word": " to", "probability": 0.92724609375}, {"start": 2005.74, "end": 2005.86, "word": " the", "probability": 0.9189453125}, {"start": 2005.86, "end": 2006.04, "word": " next", "probability": 0.9423828125}, {"start": 2006.04, "end": 2006.4, "word": " one.", "probability": 0.9287109375}, {"start": 2016.18, "end": 2016.88, "word": " For", "probability": 0.8427734375}, {"start": 2016.88, "end": 2017.02, "word": " the", "probability": 0.93017578125}, {"start": 2017.02, "end": 2017.26, "word": " same", "probability": 0.90576171875}, {"start": 2017.26, "end": 2017.66, "word": " question.", "probability": 0.927734375}], "temperature": 1.0}, {"id": 73, "seek": 206151, "start": 2032.79, "end": 2061.51, "text": " Again, the owner of Catfish Market determined the average weight of a catfish 3.2 with standardization 0.8 and we are assuming the weights of catfish are normally distributed, kiosk above. Above what weight? Do 89.8% of the weights care? Above?", "tokens": [3764, 11, 264, 7289, 295, 9565, 11608, 15596, 9540, 264, 4274, 3364, 295, 257, 3857, 11608, 805, 13, 17, 365, 3832, 2144, 1958, 13, 23, 293, 321, 366, 11926, 264, 17443, 295, 3857, 11608, 366, 5646, 12631, 11, 350, 2717, 74, 3673, 13, 32691, 437, 3364, 30, 1144, 31877, 13, 23, 4, 295, 264, 17443, 1127, 30, 32691, 30], "avg_logprob": -0.2997395798563957, "compression_ratio": 1.467065868263473, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2032.79, "end": 2033.61, "word": " Again,", "probability": 0.6787109375}, {"start": 2033.77, "end": 2033.85, "word": " the", "probability": 0.83203125}, {"start": 2033.85, "end": 2034.09, "word": " owner", "probability": 0.89306640625}, {"start": 2034.09, "end": 2034.27, "word": " of", "probability": 0.9765625}, {"start": 2034.27, "end": 2034.77, "word": " Catfish", "probability": 0.6864013671875}, {"start": 2034.77, "end": 2035.39, "word": " Market", "probability": 0.60400390625}, {"start": 2035.39, "end": 2036.91, "word": " determined", "probability": 0.857421875}, {"start": 2036.91, "end": 2037.15, "word": " the", "probability": 0.701171875}, {"start": 2037.15, "end": 2037.39, "word": " average", "probability": 0.77490234375}, {"start": 2037.39, "end": 2037.71, "word": " weight", "probability": 0.8740234375}, {"start": 2037.71, "end": 2038.45, "word": " of", "probability": 0.96240234375}, {"start": 2038.45, "end": 2038.57, "word": " a", "probability": 0.9423828125}, {"start": 2038.57, "end": 2038.97, "word": " catfish", "probability": 0.9013671875}, {"start": 2038.97, "end": 2039.29, "word": " 3", "probability": 0.465576171875}, {"start": 2039.29, "end": 2039.81, "word": ".2", "probability": 0.98095703125}, {"start": 2039.81, "end": 2040.93, "word": " with", "probability": 0.56201171875}, {"start": 2040.93, "end": 2041.67, "word": " standardization", "probability": 0.600341796875}, {"start": 2041.67, "end": 2041.99, "word": " 0", "probability": 0.409912109375}, {"start": 2041.99, "end": 2042.41, "word": ".8", "probability": 0.99365234375}, {"start": 2042.41, "end": 2042.65, "word": " and", "probability": 0.415771484375}, {"start": 2042.65, "end": 2042.81, "word": " we", "probability": 0.9462890625}, {"start": 2042.81, "end": 2042.93, "word": " are", "probability": 0.896484375}, {"start": 2042.93, "end": 2043.39, "word": " assuming", "probability": 0.90869140625}, {"start": 2043.39, "end": 2044.67, "word": " the", "probability": 0.81982421875}, {"start": 2044.67, "end": 2044.95, "word": " weights", "probability": 0.85205078125}, {"start": 2044.95, "end": 2045.21, "word": " of", "probability": 0.96875}, {"start": 2045.21, "end": 2045.71, "word": " catfish", "probability": 0.926025390625}, {"start": 2045.71, "end": 2046.43, "word": " are", "probability": 0.92529296875}, {"start": 2046.43, "end": 2046.79, "word": " normally", "probability": 0.76611328125}, {"start": 2046.79, "end": 2047.39, "word": " distributed,", "probability": 0.912109375}, {"start": 2047.93, "end": 2048.27, "word": " kiosk", "probability": 0.6483561197916666}, {"start": 2048.27, "end": 2048.45, "word": " above.", "probability": 0.677734375}, {"start": 2049.27, "end": 2049.87, "word": " Above", "probability": 0.90283203125}, {"start": 2049.87, "end": 2050.25, "word": " what", "probability": 0.9384765625}, {"start": 2050.25, "end": 2050.71, "word": " weight?", "probability": 0.92236328125}, {"start": 2052.79, "end": 2053.17, "word": " Do", "probability": 0.8232421875}, {"start": 2053.17, "end": 2054.67, "word": " 89", "probability": 0.86279296875}, {"start": 2054.67, "end": 2055.47, "word": ".8", "probability": 0.677734375}, {"start": 2055.47, "end": 2055.95, "word": "%", "probability": 0.61962890625}, {"start": 2055.95, "end": 2056.17, "word": " of", "probability": 0.94677734375}, {"start": 2056.17, "end": 2056.35, "word": " the", "probability": 0.89013671875}, {"start": 2056.35, "end": 2057.39, "word": " weights", "probability": 0.453857421875}, {"start": 2057.39, "end": 2058.07, "word": " care?", "probability": 0.43798828125}, {"start": 2060.63, "end": 2061.51, "word": " Above?", "probability": 0.908203125}], "temperature": 1.0}, {"id": 74, "seek": 209156, "start": 2062.26, "end": 2091.56, "text": " And above, so x greater than. X minus. And 98, 89, sorry, 89. So this is a percentage he's looking for. 89.8%. Now what's the value of A? Or above what weight? Do 89.8% of the weights occur?", "tokens": [400, 3673, 11, 370, 2031, 5044, 813, 13, 1783, 3175, 13, 400, 20860, 11, 31877, 11, 2597, 11, 31877, 13, 407, 341, 307, 257, 9668, 415, 311, 1237, 337, 13, 31877, 13, 23, 6856, 823, 437, 311, 264, 2158, 295, 316, 30, 1610, 3673, 437, 3364, 30, 1144, 31877, 13, 23, 4, 295, 264, 17443, 5160, 30], "avg_logprob": -0.25471443451684095, "compression_ratio": 1.3082191780821917, "no_speech_prob": 0.0, "words": [{"start": 2062.26, "end": 2062.8, "word": " And", "probability": 0.259521484375}, {"start": 2062.8, "end": 2063.3, "word": " above,", "probability": 0.89404296875}, {"start": 2063.54, "end": 2063.76, "word": " so", "probability": 0.8408203125}, {"start": 2063.76, "end": 2064.02, "word": " x", "probability": 0.68896484375}, {"start": 2064.02, "end": 2064.36, "word": " greater", "probability": 0.88916015625}, {"start": 2064.36, "end": 2064.7, "word": " than.", "probability": 0.94677734375}, {"start": 2064.84, "end": 2065.02, "word": " X", "probability": 0.1619873046875}, {"start": 2065.02, "end": 2065.28, "word": " minus.", "probability": 0.97802734375}, {"start": 2067.68, "end": 2067.98, "word": " And", "probability": 0.50634765625}, {"start": 2067.98, "end": 2068.5, "word": " 98,", "probability": 0.56103515625}, {"start": 2069.74, "end": 2070.28, "word": " 89,", "probability": 0.966796875}, {"start": 2070.4, "end": 2070.56, "word": " sorry,", "probability": 0.8349609375}, {"start": 2070.62, "end": 2071.06, "word": " 89.", "probability": 0.9921875}, {"start": 2072.68, "end": 2073.36, "word": " So", "probability": 0.85986328125}, {"start": 2073.36, "end": 2073.58, "word": " this", "probability": 0.9267578125}, {"start": 2073.58, "end": 2073.7, "word": " is", "probability": 0.92822265625}, {"start": 2073.7, "end": 2073.8, "word": " a", "probability": 0.5107421875}, {"start": 2073.8, "end": 2074.32, "word": " percentage", "probability": 0.89111328125}, {"start": 2074.32, "end": 2074.9, "word": " he's", "probability": 0.68994140625}, {"start": 2074.9, "end": 2075.2, "word": " looking", "probability": 0.91845703125}, {"start": 2075.2, "end": 2075.7, "word": " for.", "probability": 0.94677734375}, {"start": 2077.04, "end": 2077.72, "word": " 89", "probability": 0.97412109375}, {"start": 2077.72, "end": 2080.58, "word": ".8%.", "probability": 0.9635416666666666}, {"start": 2080.58, "end": 2082.08, "word": " Now", "probability": 0.89208984375}, {"start": 2082.08, "end": 2082.3, "word": " what's", "probability": 0.78271484375}, {"start": 2082.3, "end": 2082.36, "word": " the", "probability": 0.916015625}, {"start": 2082.36, "end": 2082.52, "word": " value", "probability": 0.9794921875}, {"start": 2082.52, "end": 2082.68, "word": " of", "probability": 0.9560546875}, {"start": 2082.68, "end": 2082.9, "word": " A?", "probability": 0.630859375}, {"start": 2085.18, "end": 2085.86, "word": " Or", "probability": 0.92333984375}, {"start": 2085.86, "end": 2086.38, "word": " above", "probability": 0.93798828125}, {"start": 2086.38, "end": 2086.64, "word": " what", "probability": 0.939453125}, {"start": 2086.64, "end": 2087.08, "word": " weight?", "probability": 0.92822265625}, {"start": 2088.1, "end": 2088.32, "word": " Do", "probability": 0.7373046875}, {"start": 2088.32, "end": 2089.08, "word": " 89", "probability": 0.9921875}, {"start": 2089.08, "end": 2090.06, "word": ".8", "probability": 0.998779296875}, {"start": 2090.06, "end": 2090.48, "word": "%", "probability": 0.99365234375}, {"start": 2090.48, "end": 2090.82, "word": " of", "probability": 0.95068359375}, {"start": 2090.82, "end": 2091.02, "word": " the", "probability": 0.921875}, {"start": 2091.02, "end": 2091.24, "word": " weights", "probability": 0.68701171875}, {"start": 2091.24, "end": 2091.56, "word": " occur?", "probability": 0.338623046875}], "temperature": 1.0}, {"id": 75, "seek": 212039, "start": 2097.73, "end": 2120.39, "text": " You just make the normal curve in order to understand the probability. Now, A should be to the right or to the left side? Imagine A in the right side here. Do you think the area above A is 89%? It's smaller than 0.5 for sure.", "tokens": [509, 445, 652, 264, 2710, 7605, 294, 1668, 281, 1223, 264, 8482, 13, 823, 11, 316, 820, 312, 281, 264, 558, 420, 281, 264, 1411, 1252, 30, 11739, 316, 294, 264, 558, 1252, 510, 13, 1144, 291, 519, 264, 1859, 3673, 316, 307, 31877, 4, 30, 467, 311, 4356, 813, 1958, 13, 20, 337, 988, 13], "avg_logprob": -0.1888706182178698, "compression_ratio": 1.3696969696969696, "no_speech_prob": 0.0, "words": [{"start": 2097.73, "end": 2098.29, "word": " You", "probability": 0.11602783203125}, {"start": 2098.29, "end": 2098.85, "word": " just", "probability": 0.85546875}, {"start": 2098.85, "end": 2099.25, "word": " make", "probability": 0.9130859375}, {"start": 2099.25, "end": 2099.43, "word": " the", "probability": 0.78662109375}, {"start": 2099.43, "end": 2099.77, "word": " normal", "probability": 0.84375}, {"start": 2099.77, "end": 2100.19, "word": " curve", "probability": 0.5791015625}, {"start": 2100.19, "end": 2102.19, "word": " in", "probability": 0.58154296875}, {"start": 2102.19, "end": 2102.35, "word": " order", "probability": 0.9091796875}, {"start": 2102.35, "end": 2102.67, "word": " to", "probability": 0.9677734375}, {"start": 2102.67, "end": 2103.47, "word": " understand", "probability": 0.7548828125}, {"start": 2103.47, "end": 2103.65, "word": " the", "probability": 0.88916015625}, {"start": 2103.65, "end": 2104.01, "word": " probability.", "probability": 0.9375}, {"start": 2104.95, "end": 2105.51, "word": " Now,", "probability": 0.94775390625}, {"start": 2106.27, "end": 2106.53, "word": " A", "probability": 0.7568359375}, {"start": 2106.53, "end": 2106.73, "word": " should", "probability": 0.97021484375}, {"start": 2106.73, "end": 2106.89, "word": " be", "probability": 0.93115234375}, {"start": 2106.89, "end": 2107.01, "word": " to", "probability": 0.94384765625}, {"start": 2107.01, "end": 2107.15, "word": " the", "probability": 0.9130859375}, {"start": 2107.15, "end": 2107.39, "word": " right", "probability": 0.9072265625}, {"start": 2107.39, "end": 2107.55, "word": " or", "probability": 0.9306640625}, {"start": 2107.55, "end": 2107.67, "word": " to", "probability": 0.8994140625}, {"start": 2107.67, "end": 2107.79, "word": " the", "probability": 0.91259765625}, {"start": 2107.79, "end": 2107.93, "word": " left", "probability": 0.94189453125}, {"start": 2107.93, "end": 2108.07, "word": " side?", "probability": 0.708984375}, {"start": 2108.91, "end": 2109.47, "word": " Imagine", "probability": 0.55517578125}, {"start": 2109.47, "end": 2111.05, "word": " A", "probability": 0.93798828125}, {"start": 2111.05, "end": 2111.41, "word": " in", "probability": 0.64501953125}, {"start": 2111.41, "end": 2111.55, "word": " the", "probability": 0.91552734375}, {"start": 2111.55, "end": 2111.75, "word": " right", "probability": 0.91015625}, {"start": 2111.75, "end": 2112.07, "word": " side", "probability": 0.80419921875}, {"start": 2112.07, "end": 2112.33, "word": " here.", "probability": 0.7548828125}, {"start": 2113.57, "end": 2114.13, "word": " Do", "probability": 0.78955078125}, {"start": 2114.13, "end": 2114.25, "word": " you", "probability": 0.96044921875}, {"start": 2114.25, "end": 2114.47, "word": " think", "probability": 0.9111328125}, {"start": 2114.47, "end": 2114.65, "word": " the", "probability": 0.88916015625}, {"start": 2114.65, "end": 2114.93, "word": " area", "probability": 0.89453125}, {"start": 2114.93, "end": 2115.37, "word": " above", "probability": 0.9306640625}, {"start": 2115.37, "end": 2115.71, "word": " A", "probability": 0.97509765625}, {"start": 2115.71, "end": 2115.99, "word": " is", "probability": 0.95361328125}, {"start": 2115.99, "end": 2116.97, "word": " 89", "probability": 0.85595703125}, {"start": 2116.97, "end": 2117.33, "word": "%?", "probability": 0.68798828125}, {"start": 2118.33, "end": 2118.89, "word": " It's", "probability": 0.940185546875}, {"start": 2118.89, "end": 2119.25, "word": " smaller", "probability": 0.83154296875}, {"start": 2119.25, "end": 2119.43, "word": " than", "probability": 0.94091796875}, {"start": 2119.43, "end": 2119.63, "word": " 0", "probability": 0.748046875}, {"start": 2119.63, "end": 2119.89, "word": ".5", "probability": 0.991943359375}, {"start": 2119.89, "end": 2120.11, "word": " for", "probability": 0.45166015625}, {"start": 2120.11, "end": 2120.39, "word": " sure.", "probability": 0.9130859375}], "temperature": 1.0}, {"id": 76, "seek": 214807, "start": 2121.05, "end": 2148.07, "text": " So it should be the other side. So this is your 8. Now, this area makes sense that it's above 0.5. It's 0.8980. Now, B of X greater than equals this value. And again, the table gives the area to the left. So this is actually", "tokens": [407, 309, 820, 312, 264, 661, 1252, 13, 407, 341, 307, 428, 1649, 13, 823, 11, 341, 1859, 1669, 2020, 300, 309, 311, 3673, 1958, 13, 20, 13, 467, 311, 1958, 13, 21115, 4702, 13, 823, 11, 363, 295, 1783, 5044, 813, 6915, 341, 2158, 13, 400, 797, 11, 264, 3199, 2709, 264, 1859, 281, 264, 1411, 13, 407, 341, 307, 767], "avg_logprob": -0.2021329417115166, "compression_ratio": 1.4240506329113924, "no_speech_prob": 0.0, "words": [{"start": 2121.05, "end": 2121.35, "word": " So", "probability": 0.6787109375}, {"start": 2121.35, "end": 2121.49, "word": " it", "probability": 0.78515625}, {"start": 2121.49, "end": 2121.67, "word": " should", "probability": 0.96044921875}, {"start": 2121.67, "end": 2121.99, "word": " be", "probability": 0.953125}, {"start": 2121.99, "end": 2123.59, "word": " the", "probability": 0.82275390625}, {"start": 2123.59, "end": 2123.81, "word": " other", "probability": 0.89208984375}, {"start": 2123.81, "end": 2124.25, "word": " side.", "probability": 0.8662109375}, {"start": 2126.77, "end": 2127.27, "word": " So", "probability": 0.83447265625}, {"start": 2127.27, "end": 2127.49, "word": " this", "probability": 0.8974609375}, {"start": 2127.49, "end": 2127.59, "word": " is", "probability": 0.93896484375}, {"start": 2127.59, "end": 2127.75, "word": " your", "probability": 0.896484375}, {"start": 2127.75, "end": 2128.03, "word": " 8.", "probability": 0.5107421875}, {"start": 2129.43, "end": 2129.69, "word": " Now,", "probability": 0.94287109375}, {"start": 2129.77, "end": 2129.95, "word": " this", "probability": 0.9267578125}, {"start": 2129.95, "end": 2130.31, "word": " area", "probability": 0.91259765625}, {"start": 2130.31, "end": 2131.15, "word": " makes", "probability": 0.483154296875}, {"start": 2131.15, "end": 2131.83, "word": " sense", "probability": 0.77978515625}, {"start": 2131.83, "end": 2132.17, "word": " that", "probability": 0.90966796875}, {"start": 2132.17, "end": 2133.29, "word": " it's", "probability": 0.953369140625}, {"start": 2133.29, "end": 2133.53, "word": " above", "probability": 0.9306640625}, {"start": 2133.53, "end": 2133.85, "word": " 0", "probability": 0.63671875}, {"start": 2133.85, "end": 2134.17, "word": ".5.", "probability": 0.99169921875}, {"start": 2134.43, "end": 2134.75, "word": " It's", "probability": 0.935302734375}, {"start": 2134.75, "end": 2135.03, "word": " 0", "probability": 0.96923828125}, {"start": 2135.03, "end": 2136.69, "word": ".8980.", "probability": 0.91845703125}, {"start": 2138.75, "end": 2139.31, "word": " Now,", "probability": 0.955078125}, {"start": 2139.65, "end": 2139.81, "word": " B", "probability": 0.27392578125}, {"start": 2139.81, "end": 2139.93, "word": " of", "probability": 0.69287109375}, {"start": 2139.93, "end": 2140.19, "word": " X", "probability": 0.6103515625}, {"start": 2140.19, "end": 2140.55, "word": " greater", "probability": 0.87109375}, {"start": 2140.55, "end": 2140.79, "word": " than", "probability": 0.9501953125}, {"start": 2140.79, "end": 2141.05, "word": " equals", "probability": 0.501953125}, {"start": 2141.05, "end": 2141.49, "word": " this", "probability": 0.94189453125}, {"start": 2141.49, "end": 2141.83, "word": " value.", "probability": 0.97265625}, {"start": 2142.59, "end": 2142.85, "word": " And", "probability": 0.9384765625}, {"start": 2142.85, "end": 2143.09, "word": " again,", "probability": 0.87451171875}, {"start": 2143.15, "end": 2143.27, "word": " the", "probability": 0.9208984375}, {"start": 2143.27, "end": 2143.65, "word": " table", "probability": 0.8828125}, {"start": 2143.65, "end": 2144.93, "word": " gives", "probability": 0.90478515625}, {"start": 2144.93, "end": 2145.13, "word": " the", "probability": 0.9130859375}, {"start": 2145.13, "end": 2145.35, "word": " area", "probability": 0.884765625}, {"start": 2145.35, "end": 2145.55, "word": " to", "probability": 0.962890625}, {"start": 2145.55, "end": 2145.69, "word": " the", "probability": 0.91455078125}, {"start": 2145.69, "end": 2145.97, "word": " left.", "probability": 0.951171875}, {"start": 2146.71, "end": 2146.99, "word": " So", "probability": 0.89697265625}, {"start": 2146.99, "end": 2147.31, "word": " this", "probability": 0.92724609375}, {"start": 2147.31, "end": 2147.47, "word": " is", "probability": 0.94140625}, {"start": 2147.47, "end": 2148.07, "word": " actually", "probability": 0.884765625}], "temperature": 1.0}, {"id": 77, "seek": 217780, "start": 2148.96, "end": 2177.8, "text": " X less than A, 1 minus this value, equals 0.1020. Now go back to the normal table. Here it's 0.1020. So it should be negative. I mean, your z-scope should be negative.", "tokens": [1783, 1570, 813, 316, 11, 502, 3175, 341, 2158, 11, 6915, 1958, 13, 3279, 2009, 13, 823, 352, 646, 281, 264, 2710, 3199, 13, 1692, 309, 311, 1958, 13, 3279, 2009, 13, 407, 309, 820, 312, 3671, 13, 286, 914, 11, 428, 710, 12, 4417, 1114, 820, 312, 3671, 13], "avg_logprob": -0.22380514179959016, "compression_ratio": 1.3228346456692914, "no_speech_prob": 0.0, "words": [{"start": 2148.96, "end": 2149.4, "word": " X", "probability": 0.381103515625}, {"start": 2149.4, "end": 2150.88, "word": " less", "probability": 0.80078125}, {"start": 2150.88, "end": 2151.18, "word": " than", "probability": 0.94873046875}, {"start": 2151.18, "end": 2151.5, "word": " A,", "probability": 0.381591796875}, {"start": 2152.0, "end": 2153.08, "word": " 1", "probability": 0.666015625}, {"start": 2153.08, "end": 2153.46, "word": " minus", "probability": 0.98388671875}, {"start": 2153.46, "end": 2153.74, "word": " this", "probability": 0.92431640625}, {"start": 2153.74, "end": 2154.16, "word": " value,", "probability": 0.97265625}, {"start": 2155.18, "end": 2155.54, "word": " equals", "probability": 0.482177734375}, {"start": 2155.54, "end": 2155.82, "word": " 0", "probability": 0.8251953125}, {"start": 2155.82, "end": 2156.6, "word": ".1020.", "probability": 0.9065755208333334}, {"start": 2161.48, "end": 2162.2, "word": " Now", "probability": 0.85107421875}, {"start": 2162.2, "end": 2164.26, "word": " go", "probability": 0.42724609375}, {"start": 2164.26, "end": 2164.66, "word": " back", "probability": 0.87646484375}, {"start": 2164.66, "end": 2168.76, "word": " to", "probability": 0.900390625}, {"start": 2168.76, "end": 2168.9, "word": " the", "probability": 0.91650390625}, {"start": 2168.9, "end": 2169.2, "word": " normal", "probability": 0.90234375}, {"start": 2169.2, "end": 2169.58, "word": " table.", "probability": 0.88134765625}, {"start": 2170.52, "end": 2170.8, "word": " Here", "probability": 0.85595703125}, {"start": 2170.8, "end": 2171.06, "word": " it's", "probability": 0.844482421875}, {"start": 2171.06, "end": 2171.28, "word": " 0", "probability": 0.98046875}, {"start": 2171.28, "end": 2172.12, "word": ".1020.", "probability": 0.8680013020833334}, {"start": 2173.42, "end": 2174.04, "word": " So", "probability": 0.96337890625}, {"start": 2174.04, "end": 2174.2, "word": " it", "probability": 0.428466796875}, {"start": 2174.2, "end": 2174.4, "word": " should", "probability": 0.966796875}, {"start": 2174.4, "end": 2174.56, "word": " be", "probability": 0.9521484375}, {"start": 2174.56, "end": 2174.88, "word": " negative.", "probability": 0.94384765625}, {"start": 2175.72, "end": 2175.84, "word": " I", "probability": 0.84716796875}, {"start": 2175.84, "end": 2175.96, "word": " mean,", "probability": 0.9658203125}, {"start": 2175.98, "end": 2176.16, "word": " your", "probability": 0.7587890625}, {"start": 2176.16, "end": 2176.32, "word": " z", "probability": 0.51904296875}, {"start": 2176.32, "end": 2176.74, "word": "-scope", "probability": 0.85498046875}, {"start": 2176.74, "end": 2177.32, "word": " should", "probability": 0.96875}, {"start": 2177.32, "end": 2177.5, "word": " be", "probability": 0.951171875}, {"start": 2177.5, "end": 2177.8, "word": " negative.", "probability": 0.94482421875}], "temperature": 1.0}, {"id": 78, "seek": 221123, "start": 2182.0, "end": 2211.24, "text": " Now look at 0.102. Exactly this value. 0.102 is minus 1.2 up to 7. So minus 1.27. Minus 1.2.", "tokens": [823, 574, 412, 1958, 13, 3279, 17, 13, 7587, 341, 2158, 13, 1958, 13, 3279, 17, 307, 3175, 502, 13, 17, 493, 281, 1614, 13, 407, 3175, 502, 13, 10076, 13, 2829, 301, 502, 13, 17, 13], "avg_logprob": -0.16930509162576576, "compression_ratio": 1.1625, "no_speech_prob": 0.0, "words": [{"start": 2182.0, "end": 2182.24, "word": " Now", "probability": 0.86669921875}, {"start": 2182.24, "end": 2182.46, "word": " look", "probability": 0.77490234375}, {"start": 2182.46, "end": 2182.64, "word": " at", "probability": 0.9677734375}, {"start": 2182.64, "end": 2182.92, "word": " 0", "probability": 0.408203125}, {"start": 2182.92, "end": 2185.64, "word": ".102.", "probability": 0.9462890625}, {"start": 2188.56, "end": 2189.6, "word": " Exactly", "probability": 0.69580078125}, {"start": 2189.6, "end": 2189.94, "word": " this", "probability": 0.93017578125}, {"start": 2189.94, "end": 2190.22, "word": " value.", "probability": 0.96044921875}, {"start": 2191.54, "end": 2192.26, "word": " 0", "probability": 0.947265625}, {"start": 2192.26, "end": 2192.94, "word": ".102", "probability": 0.97802734375}, {"start": 2192.94, "end": 2193.14, "word": " is", "probability": 0.44677734375}, {"start": 2193.14, "end": 2193.84, "word": " minus", "probability": 0.94091796875}, {"start": 2193.84, "end": 2194.9, "word": " 1", "probability": 0.94580078125}, {"start": 2194.9, "end": 2195.6, "word": ".2", "probability": 0.97900390625}, {"start": 2195.6, "end": 2196.58, "word": " up", "probability": 0.88916015625}, {"start": 2196.58, "end": 2196.8, "word": " to", "probability": 0.970703125}, {"start": 2196.8, "end": 2197.1, "word": " 7.", "probability": 0.65283203125}, {"start": 2198.18, "end": 2198.68, "word": " So", "probability": 0.55810546875}, {"start": 2198.68, "end": 2198.96, "word": " minus", "probability": 0.84228515625}, {"start": 2198.96, "end": 2199.28, "word": " 1", "probability": 0.99267578125}, {"start": 2199.28, "end": 2199.9, "word": ".27.", "probability": 0.98486328125}, {"start": 2209.12, "end": 2210.16, "word": " Minus", "probability": 0.970458984375}, {"start": 2210.16, "end": 2210.68, "word": " 1", "probability": 0.99462890625}, {"start": 2210.68, "end": 2211.24, "word": ".2.", "probability": 0.986328125}], "temperature": 1.0}, {"id": 79, "seek": 223996, "start": 2212.4, "end": 2239.96, "text": " All the way up to 7 is 0.102. So the corresponding z-score is minus 1.17. Now x again equals mu plus z sigma. Mu is 3.2 plus z is negative 1.17 times sigma.", "tokens": [1057, 264, 636, 493, 281, 1614, 307, 1958, 13, 3279, 17, 13, 407, 264, 11760, 710, 12, 4417, 418, 307, 3175, 502, 13, 7773, 13, 823, 2031, 797, 6915, 2992, 1804, 710, 12771, 13, 15601, 307, 805, 13, 17, 1804, 710, 307, 3671, 502, 13, 7773, 1413, 12771, 13], "avg_logprob": -0.19484375566244125, "compression_ratio": 1.256, "no_speech_prob": 0.0, "words": [{"start": 2212.4, "end": 2212.7, "word": " All", "probability": 0.60888671875}, {"start": 2212.7, "end": 2212.86, "word": " the", "probability": 0.916015625}, {"start": 2212.86, "end": 2212.98, "word": " way", "probability": 0.9560546875}, {"start": 2212.98, "end": 2213.14, "word": " up", "probability": 0.9423828125}, {"start": 2213.14, "end": 2213.28, "word": " to", "probability": 0.95751953125}, {"start": 2213.28, "end": 2213.56, "word": " 7", "probability": 0.499755859375}, {"start": 2213.56, "end": 2214.26, "word": " is", "probability": 0.67431640625}, {"start": 2214.26, "end": 2214.46, "word": " 0", "probability": 0.7177734375}, {"start": 2214.46, "end": 2215.2, "word": ".102.", "probability": 0.9578450520833334}, {"start": 2216.28, "end": 2216.98, "word": " So", "probability": 0.95361328125}, {"start": 2216.98, "end": 2217.9, "word": " the", "probability": 0.68505859375}, {"start": 2217.9, "end": 2218.44, "word": " corresponding", "probability": 0.8408203125}, {"start": 2218.44, "end": 2218.72, "word": " z", "probability": 0.6474609375}, {"start": 2218.72, "end": 2219.08, "word": "-score", "probability": 0.7659505208333334}, {"start": 2219.08, "end": 2219.52, "word": " is", "probability": 0.94189453125}, {"start": 2219.52, "end": 2219.84, "word": " minus", "probability": 0.56201171875}, {"start": 2219.84, "end": 2220.08, "word": " 1", "probability": 0.9716796875}, {"start": 2220.08, "end": 2220.7, "word": ".17.", "probability": 0.900146484375}, {"start": 2222.52, "end": 2223.3, "word": " Now", "probability": 0.9619140625}, {"start": 2223.3, "end": 2223.68, "word": " x", "probability": 0.53271484375}, {"start": 2223.68, "end": 2224.16, "word": " again", "probability": 0.76025390625}, {"start": 2224.16, "end": 2224.96, "word": " equals", "probability": 0.89453125}, {"start": 2224.96, "end": 2225.16, "word": " mu", "probability": 0.71044921875}, {"start": 2225.16, "end": 2225.42, "word": " plus", "probability": 0.95849609375}, {"start": 2225.42, "end": 2225.64, "word": " z", "probability": 0.59814453125}, {"start": 2225.64, "end": 2225.98, "word": " sigma.", "probability": 0.80615234375}, {"start": 2230.28, "end": 2231.06, "word": " Mu", "probability": 0.736328125}, {"start": 2231.06, "end": 2231.58, "word": " is", "probability": 0.91162109375}, {"start": 2231.58, "end": 2231.82, "word": " 3", "probability": 0.94287109375}, {"start": 2231.82, "end": 2232.34, "word": ".2", "probability": 0.959228515625}, {"start": 2232.34, "end": 2233.14, "word": " plus", "probability": 0.7509765625}, {"start": 2233.14, "end": 2235.62, "word": " z", "probability": 0.61962890625}, {"start": 2235.62, "end": 2235.76, "word": " is", "probability": 0.89892578125}, {"start": 2235.76, "end": 2236.06, "word": " negative", "probability": 0.92919921875}, {"start": 2236.06, "end": 2236.36, "word": " 1", "probability": 0.98193359375}, {"start": 2236.36, "end": 2236.98, "word": ".17", "probability": 0.994873046875}, {"start": 2236.98, "end": 2239.56, "word": " times", "probability": 0.654296875}, {"start": 2239.56, "end": 2239.96, "word": " sigma.", "probability": 0.92431640625}], "temperature": 1.0}, {"id": 80, "seek": 225651, "start": 2244.25, "end": 2256.51, "text": " So it's equal to 3.2 minus 127 times 0.3. By calculator, you'll get the final result.", "tokens": [407, 309, 311, 2681, 281, 805, 13, 17, 3175, 47561, 1413, 1958, 13, 18, 13, 3146, 24993, 11, 291, 603, 483, 264, 2572, 1874, 13], "avg_logprob": -0.26998196083765763, "compression_ratio": 1.0, "no_speech_prob": 0.0, "words": [{"start": 2244.25, "end": 2245.21, "word": " So", "probability": 0.1641845703125}, {"start": 2245.21, "end": 2246.17, "word": " it's", "probability": 0.826904296875}, {"start": 2246.17, "end": 2246.55, "word": " equal", "probability": 0.8974609375}, {"start": 2246.55, "end": 2247.05, "word": " to", "probability": 0.97021484375}, {"start": 2247.05, "end": 2247.41, "word": " 3", "probability": 0.84130859375}, {"start": 2247.41, "end": 2247.91, "word": ".2", "probability": 0.985107421875}, {"start": 2247.91, "end": 2249.29, "word": " minus", "probability": 0.96435546875}, {"start": 2249.29, "end": 2251.05, "word": " 127", "probability": 0.72265625}, {"start": 2251.05, "end": 2251.69, "word": " times", "probability": 0.89697265625}, {"start": 2251.69, "end": 2251.95, "word": " 0", "probability": 0.50830078125}, {"start": 2251.95, "end": 2252.25, "word": ".3.", "probability": 0.853515625}, {"start": 2253.91, "end": 2254.39, "word": " By", "probability": 0.94775390625}, {"start": 2254.39, "end": 2254.89, "word": " calculator,", "probability": 0.65234375}, {"start": 2255.01, "end": 2255.15, "word": " you'll", "probability": 0.6572265625}, {"start": 2255.15, "end": 2255.41, "word": " get", "probability": 0.94189453125}, {"start": 2255.41, "end": 2255.69, "word": " the", "probability": 0.90771484375}, {"start": 2255.69, "end": 2256.09, "word": " final", "probability": 0.9501953125}, {"start": 2256.09, "end": 2256.51, "word": " result.", "probability": 0.9248046875}], "temperature": 1.0}, {"id": 81, "seek": 229602, "start": 2271.12, "end": 2296.02, "text": " If the probability is smaller than 0.5, then this one is negative. Go to the other one. If the probability is above 0.5, then use the positive z-score. So what's the answer? 2.18. Be careful. In the previous one,", "tokens": [759, 264, 8482, 307, 4356, 813, 1958, 13, 20, 11, 550, 341, 472, 307, 3671, 13, 1037, 281, 264, 661, 472, 13, 759, 264, 8482, 307, 3673, 1958, 13, 20, 11, 550, 764, 264, 3353, 710, 12, 4417, 418, 13, 407, 437, 311, 264, 1867, 30, 568, 13, 6494, 13, 879, 5026, 13, 682, 264, 3894, 472, 11], "avg_logprob": -0.1988877073182898, "compression_ratio": 1.4589041095890412, "no_speech_prob": 0.0, "words": [{"start": 2271.12, "end": 2271.46, "word": " If", "probability": 0.78173828125}, {"start": 2271.46, "end": 2271.66, "word": " the", "probability": 0.89697265625}, {"start": 2271.66, "end": 2272.04, "word": " probability", "probability": 0.9375}, {"start": 2272.04, "end": 2272.34, "word": " is", "probability": 0.92333984375}, {"start": 2272.34, "end": 2272.74, "word": " smaller", "probability": 0.8837890625}, {"start": 2272.74, "end": 2273.0, "word": " than", "probability": 0.94091796875}, {"start": 2273.0, "end": 2273.22, "word": " 0", "probability": 0.7138671875}, {"start": 2273.22, "end": 2273.62, "word": ".5,", "probability": 0.9912109375}, {"start": 2275.0, "end": 2275.56, "word": " then", "probability": 0.87109375}, {"start": 2275.56, "end": 2276.18, "word": " this", "probability": 0.55810546875}, {"start": 2276.18, "end": 2276.44, "word": " one", "probability": 0.45654296875}, {"start": 2276.44, "end": 2276.76, "word": " is", "probability": 0.7060546875}, {"start": 2276.76, "end": 2277.16, "word": " negative.", "probability": 0.8291015625}, {"start": 2277.82, "end": 2278.2, "word": " Go", "probability": 0.431884765625}, {"start": 2278.2, "end": 2278.28, "word": " to", "probability": 0.9541015625}, {"start": 2278.28, "end": 2278.4, "word": " the", "probability": 0.90771484375}, {"start": 2278.4, "end": 2278.6, "word": " other", "probability": 0.88623046875}, {"start": 2278.6, "end": 2278.84, "word": " one.", "probability": 0.92626953125}, {"start": 2279.44, "end": 2279.74, "word": " If", "probability": 0.96044921875}, {"start": 2279.74, "end": 2280.48, "word": " the", "probability": 0.9052734375}, {"start": 2280.48, "end": 2280.8, "word": " probability", "probability": 0.9375}, {"start": 2280.8, "end": 2281.02, "word": " is", "probability": 0.89453125}, {"start": 2281.02, "end": 2281.32, "word": " above", "probability": 0.962890625}, {"start": 2281.32, "end": 2281.62, "word": " 0", "probability": 0.6455078125}, {"start": 2281.62, "end": 2282.08, "word": ".5,", "probability": 0.996337890625}, {"start": 2282.84, "end": 2283.06, "word": " then", "probability": 0.83251953125}, {"start": 2283.06, "end": 2283.28, "word": " use", "probability": 0.8271484375}, {"start": 2283.28, "end": 2283.46, "word": " the", "probability": 0.90234375}, {"start": 2283.46, "end": 2283.74, "word": " positive", "probability": 0.9150390625}, {"start": 2283.74, "end": 2284.04, "word": " z", "probability": 0.57421875}, {"start": 2284.04, "end": 2284.2, "word": "-score.", "probability": 0.7373046875}, {"start": 2285.28, "end": 2285.52, "word": " So", "probability": 0.82763671875}, {"start": 2285.52, "end": 2285.72, "word": " what's", "probability": 0.9033203125}, {"start": 2285.72, "end": 2285.82, "word": " the", "probability": 0.9267578125}, {"start": 2285.82, "end": 2286.12, "word": " answer?", "probability": 0.955078125}, {"start": 2288.24, "end": 2288.86, "word": " 2", "probability": 0.75634765625}, {"start": 2288.86, "end": 2289.6, "word": ".18.", "probability": 0.93896484375}, {"start": 2292.68, "end": 2293.3, "word": " Be", "probability": 0.89404296875}, {"start": 2293.3, "end": 2293.78, "word": " careful.", "probability": 0.95556640625}, {"start": 2294.14, "end": 2294.5, "word": " In", "probability": 0.69384765625}, {"start": 2294.5, "end": 2294.68, "word": " the", "probability": 0.919921875}, {"start": 2294.68, "end": 2295.62, "word": " previous", "probability": 0.85009765625}, {"start": 2295.62, "end": 2296.02, "word": " one,", "probability": 0.9345703125}], "temperature": 1.0}, {"id": 82, "seek": 232657, "start": 2298.09, "end": 2326.57, "text": " We had a probability of X greater than A equal 2%. In this case, the value of A, for example, is located in the upper tail here. For this part, you ask about B of X greater than A equal 0.89.", "tokens": [492, 632, 257, 8482, 295, 1783, 5044, 813, 316, 2681, 568, 6856, 682, 341, 1389, 11, 264, 2158, 295, 316, 11, 337, 1365, 11, 307, 6870, 294, 264, 6597, 6838, 510, 13, 1171, 341, 644, 11, 291, 1029, 466, 363, 295, 1783, 5044, 813, 316, 2681, 1958, 13, 21115, 13], "avg_logprob": -0.2259497549019608, "compression_ratio": 1.381294964028777, "no_speech_prob": 0.0, "words": [{"start": 2298.09, "end": 2298.39, "word": " We", "probability": 0.4296875}, {"start": 2298.39, "end": 2298.81, "word": " had", "probability": 0.58837890625}, {"start": 2298.81, "end": 2299.85, "word": " a", "probability": 0.5869140625}, {"start": 2299.85, "end": 2300.15, "word": " probability", "probability": 0.94384765625}, {"start": 2300.15, "end": 2300.45, "word": " of", "probability": 0.94970703125}, {"start": 2300.45, "end": 2300.73, "word": " X", "probability": 0.71533203125}, {"start": 2300.73, "end": 2301.15, "word": " greater", "probability": 0.8798828125}, {"start": 2301.15, "end": 2301.47, "word": " than", "probability": 0.962890625}, {"start": 2301.47, "end": 2301.63, "word": " A", "probability": 0.384521484375}, {"start": 2301.63, "end": 2301.99, "word": " equal", "probability": 0.5947265625}, {"start": 2301.99, "end": 2303.15, "word": " 2%.", "probability": 0.727783203125}, {"start": 2303.15, "end": 2308.87, "word": " In", "probability": 0.9521484375}, {"start": 2308.87, "end": 2309.15, "word": " this", "probability": 0.9453125}, {"start": 2309.15, "end": 2309.55, "word": " case,", "probability": 0.91259765625}, {"start": 2309.75, "end": 2309.97, "word": " the", "probability": 0.91796875}, {"start": 2309.97, "end": 2310.31, "word": " value", "probability": 0.97998046875}, {"start": 2310.31, "end": 2310.55, "word": " of", "probability": 0.95068359375}, {"start": 2310.55, "end": 2310.85, "word": " A,", "probability": 0.97802734375}, {"start": 2310.99, "end": 2311.17, "word": " for", "probability": 0.94873046875}, {"start": 2311.17, "end": 2311.59, "word": " example,", "probability": 0.9736328125}, {"start": 2312.33, "end": 2312.57, "word": " is", "probability": 0.94482421875}, {"start": 2312.57, "end": 2313.07, "word": " located", "probability": 0.93408203125}, {"start": 2313.07, "end": 2314.97, "word": " in", "probability": 0.91162109375}, {"start": 2314.97, "end": 2315.09, "word": " the", "probability": 0.92236328125}, {"start": 2315.09, "end": 2315.31, "word": " upper", "probability": 0.6904296875}, {"start": 2315.31, "end": 2315.61, "word": " tail", "probability": 0.537109375}, {"start": 2315.61, "end": 2315.93, "word": " here.", "probability": 0.798828125}, {"start": 2320.21, "end": 2320.81, "word": " For", "probability": 0.9189453125}, {"start": 2320.81, "end": 2321.19, "word": " this", "probability": 0.94775390625}, {"start": 2321.19, "end": 2322.35, "word": " part,", "probability": 0.87744140625}, {"start": 2323.21, "end": 2323.43, "word": " you", "probability": 0.348388671875}, {"start": 2323.43, "end": 2323.71, "word": " ask", "probability": 0.88818359375}, {"start": 2323.71, "end": 2324.11, "word": " about", "probability": 0.90478515625}, {"start": 2324.11, "end": 2324.43, "word": " B", "probability": 0.5576171875}, {"start": 2324.43, "end": 2324.57, "word": " of", "probability": 0.92578125}, {"start": 2324.57, "end": 2324.75, "word": " X", "probability": 0.9638671875}, {"start": 2324.75, "end": 2325.11, "word": " greater", "probability": 0.90966796875}, {"start": 2325.11, "end": 2325.41, "word": " than", "probability": 0.958984375}, {"start": 2325.41, "end": 2325.53, "word": " A", "probability": 0.6044921875}, {"start": 2325.53, "end": 2325.75, "word": " equal", "probability": 0.765625}, {"start": 2325.75, "end": 2325.97, "word": " 0", "probability": 0.810546875}, {"start": 2325.97, "end": 2326.57, "word": ".89.", "probability": 0.989501953125}], "temperature": 1.0}, {"id": 83, "seek": 235411, "start": 2328.61, "end": 2354.11, "text": " It's here more than 0.5 should be on the other side. So you have U of X greater than equal this value, which is the score in this case minus 1.17. So the corresponding guess score actually is 2.24. So this is the weight that 89.8% of the weights are above it.", "tokens": [467, 311, 510, 544, 813, 1958, 13, 20, 820, 312, 322, 264, 661, 1252, 13, 407, 291, 362, 624, 295, 1783, 5044, 813, 2681, 341, 2158, 11, 597, 307, 264, 6175, 294, 341, 1389, 3175, 502, 13, 7773, 13, 407, 264, 11760, 2041, 6175, 767, 307, 568, 13, 7911, 13, 407, 341, 307, 264, 3364, 300, 31877, 13, 23, 4, 295, 264, 17443, 366, 3673, 309, 13], "avg_logprob": -0.21817554467741182, "compression_ratio": 1.452513966480447, "no_speech_prob": 0.0, "words": [{"start": 2328.61, "end": 2329.21, "word": " It's", "probability": 0.50543212890625}, {"start": 2329.21, "end": 2329.37, "word": " here", "probability": 0.646484375}, {"start": 2329.37, "end": 2329.63, "word": " more", "probability": 0.6943359375}, {"start": 2329.63, "end": 2329.81, "word": " than", "probability": 0.9365234375}, {"start": 2329.81, "end": 2330.07, "word": " 0", "probability": 0.40576171875}, {"start": 2330.07, "end": 2330.35, "word": ".5", "probability": 0.986572265625}, {"start": 2330.35, "end": 2330.55, "word": " should", "probability": 0.82958984375}, {"start": 2330.55, "end": 2330.79, "word": " be", "probability": 0.9443359375}, {"start": 2330.79, "end": 2331.09, "word": " on", "probability": 0.607421875}, {"start": 2331.09, "end": 2331.17, "word": " the", "probability": 0.91455078125}, {"start": 2331.17, "end": 2331.39, "word": " other", "probability": 0.88671875}, {"start": 2331.39, "end": 2331.81, "word": " side.", "probability": 0.85888671875}, {"start": 2332.59, "end": 2332.79, "word": " So", "probability": 0.94482421875}, {"start": 2332.79, "end": 2332.93, "word": " you", "probability": 0.62060546875}, {"start": 2332.93, "end": 2333.05, "word": " have", "probability": 0.9287109375}, {"start": 2333.05, "end": 2333.21, "word": " U", "probability": 0.432861328125}, {"start": 2333.21, "end": 2333.35, "word": " of", "probability": 0.80126953125}, {"start": 2333.35, "end": 2333.57, "word": " X", "probability": 0.93701171875}, {"start": 2333.57, "end": 2334.01, "word": " greater", "probability": 0.90185546875}, {"start": 2334.01, "end": 2334.29, "word": " than", "probability": 0.953125}, {"start": 2334.29, "end": 2334.53, "word": " equal", "probability": 0.27392578125}, {"start": 2334.53, "end": 2334.97, "word": " this", "probability": 0.755859375}, {"start": 2334.97, "end": 2335.35, "word": " value,", "probability": 0.9677734375}, {"start": 2336.25, "end": 2336.55, "word": " which", "probability": 0.94677734375}, {"start": 2336.55, "end": 2336.93, "word": " is", "probability": 0.931640625}, {"start": 2336.93, "end": 2338.07, "word": " the", "probability": 0.71337890625}, {"start": 2338.07, "end": 2338.43, "word": " score", "probability": 0.85498046875}, {"start": 2338.43, "end": 2338.57, "word": " in", "probability": 0.63818359375}, {"start": 2338.57, "end": 2338.71, "word": " this", "probability": 0.94189453125}, {"start": 2338.71, "end": 2338.91, "word": " case", "probability": 0.90283203125}, {"start": 2338.91, "end": 2339.27, "word": " minus", "probability": 0.7294921875}, {"start": 2339.27, "end": 2339.57, "word": " 1", "probability": 0.9228515625}, {"start": 2339.57, "end": 2340.23, "word": ".17.", "probability": 0.947509765625}, {"start": 2340.95, "end": 2341.21, "word": " So", "probability": 0.95947265625}, {"start": 2341.21, "end": 2341.43, "word": " the", "probability": 0.83642578125}, {"start": 2341.43, "end": 2341.87, "word": " corresponding", "probability": 0.68017578125}, {"start": 2341.87, "end": 2342.25, "word": " guess", "probability": 0.364501953125}, {"start": 2342.25, "end": 2342.49, "word": " score", "probability": 0.900390625}, {"start": 2342.49, "end": 2343.01, "word": " actually", "probability": 0.82763671875}, {"start": 2343.01, "end": 2343.73, "word": " is", "probability": 0.8994140625}, {"start": 2343.73, "end": 2343.89, "word": " 2", "probability": 0.990234375}, {"start": 2343.89, "end": 2344.43, "word": ".24.", "probability": 0.84375}, {"start": 2346.67, "end": 2347.27, "word": " So", "probability": 0.95263671875}, {"start": 2347.27, "end": 2348.11, "word": " this", "probability": 0.85400390625}, {"start": 2348.11, "end": 2348.27, "word": " is", "probability": 0.94873046875}, {"start": 2348.27, "end": 2348.43, "word": " the", "probability": 0.9150390625}, {"start": 2348.43, "end": 2348.79, "word": " weight", "probability": 0.72509765625}, {"start": 2348.79, "end": 2349.39, "word": " that", "probability": 0.7509765625}, {"start": 2349.39, "end": 2350.93, "word": " 89", "probability": 0.95361328125}, {"start": 2350.93, "end": 2351.47, "word": ".8", "probability": 0.99658203125}, {"start": 2351.47, "end": 2351.81, "word": "%", "probability": 0.95947265625}, {"start": 2351.81, "end": 2352.15, "word": " of", "probability": 0.96484375}, {"start": 2352.15, "end": 2352.31, "word": " the", "probability": 0.91650390625}, {"start": 2352.31, "end": 2352.61, "word": " weights", "probability": 0.81298828125}, {"start": 2352.61, "end": 2353.47, "word": " are", "probability": 0.93359375}, {"start": 2353.47, "end": 2353.79, "word": " above", "probability": 0.947265625}, {"start": 2353.79, "end": 2354.11, "word": " it.", "probability": 0.947265625}], "temperature": 1.0}, {"id": 84, "seek": 238169, "start": 2355.77, "end": 2381.69, "text": " So around 90% of the catch fish have weights above this value. So around 2 pounds. So around 90% of the weights are above 2.18 pounds. Maybe this is one of the most important questions in this chapter. Any question?", "tokens": [407, 926, 4289, 4, 295, 264, 3745, 3506, 362, 17443, 3673, 341, 2158, 13, 407, 926, 568, 8319, 13, 407, 926, 4289, 4, 295, 264, 17443, 366, 3673, 568, 13, 6494, 8319, 13, 2704, 341, 307, 472, 295, 264, 881, 1021, 1651, 294, 341, 7187, 13, 2639, 1168, 30], "avg_logprob": -0.13859375327825546, "compression_ratio": 1.5766423357664234, "no_speech_prob": 0.0, "words": [{"start": 2355.77, "end": 2356.07, "word": " So", "probability": 0.86669921875}, {"start": 2356.07, "end": 2356.41, "word": " around", "probability": 0.80224609375}, {"start": 2356.41, "end": 2356.91, "word": " 90", "probability": 0.97021484375}, {"start": 2356.91, "end": 2357.23, "word": "%", "probability": 0.83251953125}, {"start": 2357.23, "end": 2357.81, "word": " of", "probability": 0.9677734375}, {"start": 2357.81, "end": 2357.97, "word": " the", "probability": 0.916015625}, {"start": 2357.97, "end": 2358.29, "word": " catch", "probability": 0.54150390625}, {"start": 2358.29, "end": 2358.77, "word": " fish", "probability": 0.9072265625}, {"start": 2358.77, "end": 2360.63, "word": " have", "probability": 0.76611328125}, {"start": 2360.63, "end": 2361.05, "word": " weights", "probability": 0.8994140625}, {"start": 2361.05, "end": 2361.43, "word": " above", "probability": 0.95556640625}, {"start": 2361.43, "end": 2362.63, "word": " this", "probability": 0.9267578125}, {"start": 2362.63, "end": 2363.03, "word": " value.", "probability": 0.97412109375}, {"start": 2364.53, "end": 2365.35, "word": " So", "probability": 0.6123046875}, {"start": 2365.35, "end": 2365.59, "word": " around", "probability": 0.79833984375}, {"start": 2365.59, "end": 2365.75, "word": " 2", "probability": 0.64453125}, {"start": 2365.75, "end": 2366.37, "word": " pounds.", "probability": 0.7373046875}, {"start": 2367.05, "end": 2367.45, "word": " So", "probability": 0.90869140625}, {"start": 2367.45, "end": 2367.69, "word": " around", "probability": 0.87158203125}, {"start": 2367.69, "end": 2367.97, "word": " 90", "probability": 0.98388671875}, {"start": 2367.97, "end": 2368.29, "word": "%", "probability": 0.98974609375}, {"start": 2368.29, "end": 2368.59, "word": " of", "probability": 0.9697265625}, {"start": 2368.59, "end": 2368.73, "word": " the", "probability": 0.89111328125}, {"start": 2368.73, "end": 2369.05, "word": " weights", "probability": 0.85302734375}, {"start": 2369.05, "end": 2371.17, "word": " are", "probability": 0.931640625}, {"start": 2371.17, "end": 2371.75, "word": " above", "probability": 0.9609375}, {"start": 2371.75, "end": 2372.19, "word": " 2", "probability": 0.94775390625}, {"start": 2372.19, "end": 2372.71, "word": ".18", "probability": 0.9091796875}, {"start": 2372.71, "end": 2373.13, "word": " pounds.", "probability": 0.79736328125}, {"start": 2375.45, "end": 2375.93, "word": " Maybe", "probability": 0.85546875}, {"start": 2375.93, "end": 2376.19, "word": " this", "probability": 0.7666015625}, {"start": 2376.19, "end": 2376.27, "word": " is", "probability": 0.861328125}, {"start": 2376.27, "end": 2376.43, "word": " one", "probability": 0.92578125}, {"start": 2376.43, "end": 2376.55, "word": " of", "probability": 0.96484375}, {"start": 2376.55, "end": 2376.69, "word": " the", "probability": 0.91455078125}, {"start": 2376.69, "end": 2377.07, "word": " most", "probability": 0.90380859375}, {"start": 2377.07, "end": 2377.69, "word": " important", "probability": 0.86279296875}, {"start": 2377.69, "end": 2378.35, "word": " questions", "probability": 0.94091796875}, {"start": 2378.35, "end": 2378.59, "word": " in", "probability": 0.93115234375}, {"start": 2378.59, "end": 2379.01, "word": " this", "probability": 0.943359375}, {"start": 2379.01, "end": 2379.45, "word": " chapter.", "probability": 0.85302734375}, {"start": 2380.47, "end": 2381.27, "word": " Any", "probability": 0.90625}, {"start": 2381.27, "end": 2381.69, "word": " question?", "probability": 0.7109375}], "temperature": 1.0}, {"id": 85, "seek": 241267, "start": 2391.66, "end": 2412.68, "text": " The last part here, for the same problem he asked about, what's the probability that a randomly selected fish will weigh less than 2.2 pounds? I think straightforward. We did similar in part A.", "tokens": [440, 1036, 644, 510, 11, 337, 264, 912, 1154, 415, 2351, 466, 11, 437, 311, 264, 8482, 300, 257, 16979, 8209, 3506, 486, 13843, 1570, 813, 568, 13, 17, 8319, 30, 286, 519, 15325, 13, 492, 630, 2531, 294, 644, 316, 13], "avg_logprob": -0.21584302949350934, "compression_ratio": 1.3197278911564625, "no_speech_prob": 0.0, "words": [{"start": 2391.6600000000003, "end": 2392.34, "word": " The", "probability": 0.7353515625}, {"start": 2392.34, "end": 2392.68, "word": " last", "probability": 0.873046875}, {"start": 2392.68, "end": 2393.12, "word": " part", "probability": 0.8984375}, {"start": 2393.12, "end": 2393.46, "word": " here,", "probability": 0.82763671875}, {"start": 2393.74, "end": 2393.88, "word": " for", "probability": 0.9208984375}, {"start": 2393.88, "end": 2394.04, "word": " the", "probability": 0.921875}, {"start": 2394.04, "end": 2394.38, "word": " same", "probability": 0.884765625}, {"start": 2394.38, "end": 2396.96, "word": " problem", "probability": 0.76318359375}, {"start": 2396.96, "end": 2397.3, "word": " he", "probability": 0.32958984375}, {"start": 2397.3, "end": 2397.58, "word": " asked", "probability": 0.5390625}, {"start": 2397.58, "end": 2398.08, "word": " about,", "probability": 0.91357421875}, {"start": 2399.84, "end": 2400.14, "word": " what's", "probability": 0.838623046875}, {"start": 2400.14, "end": 2400.24, "word": " the", "probability": 0.92431640625}, {"start": 2400.24, "end": 2400.68, "word": " probability", "probability": 0.96044921875}, {"start": 2400.68, "end": 2401.12, "word": " that", "probability": 0.9365234375}, {"start": 2401.12, "end": 2401.4, "word": " a", "probability": 0.96533203125}, {"start": 2401.4, "end": 2401.7, "word": " randomly", "probability": 0.78125}, {"start": 2401.7, "end": 2402.32, "word": " selected", "probability": 0.85498046875}, {"start": 2402.32, "end": 2402.7, "word": " fish", "probability": 0.75537109375}, {"start": 2402.7, "end": 2402.92, "word": " will", "probability": 0.88525390625}, {"start": 2402.92, "end": 2403.18, "word": " weigh", "probability": 0.87548828125}, {"start": 2403.18, "end": 2403.7, "word": " less", "probability": 0.9462890625}, {"start": 2403.7, "end": 2403.92, "word": " than", "probability": 0.947265625}, {"start": 2403.92, "end": 2404.14, "word": " 2", "probability": 0.9560546875}, {"start": 2404.14, "end": 2404.5, "word": ".2", "probability": 0.986572265625}, {"start": 2404.5, "end": 2404.84, "word": " pounds?", "probability": 0.890625}, {"start": 2405.66, "end": 2406.12, "word": " I", "probability": 0.96337890625}, {"start": 2406.12, "end": 2406.28, "word": " think", "probability": 0.916015625}, {"start": 2406.28, "end": 2406.86, "word": " straightforward.", "probability": 0.65185546875}, {"start": 2408.98, "end": 2409.38, "word": " We", "probability": 0.484130859375}, {"start": 2409.38, "end": 2409.68, "word": " did", "probability": 0.94873046875}, {"start": 2409.68, "end": 2410.56, "word": " similar", "probability": 0.81982421875}, {"start": 2410.56, "end": 2412.2, "word": " in", "probability": 0.8916015625}, {"start": 2412.2, "end": 2412.44, "word": " part", "probability": 0.71337890625}, {"start": 2412.44, "end": 2412.68, "word": " A.", "probability": 0.74072265625}], "temperature": 1.0}, {"id": 86, "seek": 244342, "start": 2414.9, "end": 2443.42, "text": " So B of X less than 0.2. So he's looking for this probability, which is straightforward one. This score, 3.2 minus, I'm sorry, it's 2.2 minus minus. It's 2.2 minus 3.2 divided by sigma.", "tokens": [407, 363, 295, 1783, 1570, 813, 1958, 13, 17, 13, 407, 415, 311, 1237, 337, 341, 8482, 11, 597, 307, 15325, 472, 13, 639, 6175, 11, 805, 13, 17, 3175, 11, 286, 478, 2597, 11, 309, 311, 568, 13, 17, 3175, 3175, 13, 467, 311, 568, 13, 17, 3175, 805, 13, 17, 6666, 538, 12771, 13], "avg_logprob": -0.22793311194369667, "compression_ratio": 1.3285714285714285, "no_speech_prob": 0.0, "words": [{"start": 2414.9, "end": 2415.24, "word": " So", "probability": 0.75830078125}, {"start": 2415.24, "end": 2415.4, "word": " B", "probability": 0.335693359375}, {"start": 2415.4, "end": 2415.56, "word": " of", "probability": 0.54931640625}, {"start": 2415.56, "end": 2415.88, "word": " X", "probability": 0.81005859375}, {"start": 2415.88, "end": 2417.7, "word": " less", "probability": 0.62548828125}, {"start": 2417.7, "end": 2417.94, "word": " than", "probability": 0.9296875}, {"start": 2417.94, "end": 2418.22, "word": " 0", "probability": 0.5166015625}, {"start": 2418.22, "end": 2418.46, "word": ".2.", "probability": 0.990478515625}, {"start": 2420.44, "end": 2421.12, "word": " So", "probability": 0.9365234375}, {"start": 2421.12, "end": 2421.36, "word": " he's", "probability": 0.728515625}, {"start": 2421.36, "end": 2421.7, "word": " looking", "probability": 0.8916015625}, {"start": 2421.7, "end": 2422.2, "word": " for", "probability": 0.953125}, {"start": 2422.2, "end": 2423.88, "word": " this", "probability": 0.6201171875}, {"start": 2423.88, "end": 2424.32, "word": " probability,", "probability": 0.95166015625}, {"start": 2424.6, "end": 2424.76, "word": " which", "probability": 0.9580078125}, {"start": 2424.76, "end": 2424.96, "word": " is", "probability": 0.9482421875}, {"start": 2424.96, "end": 2425.54, "word": " straightforward", "probability": 0.63623046875}, {"start": 2425.54, "end": 2425.98, "word": " one.", "probability": 0.90576171875}, {"start": 2427.88, "end": 2428.3, "word": " This", "probability": 0.498779296875}, {"start": 2428.3, "end": 2428.68, "word": " score,", "probability": 0.52099609375}, {"start": 2430.98, "end": 2431.24, "word": " 3", "probability": 0.8701171875}, {"start": 2431.24, "end": 2431.8, "word": ".2", "probability": 0.993408203125}, {"start": 2431.8, "end": 2433.54, "word": " minus,", "probability": 0.947265625}, {"start": 2433.94, "end": 2434.14, "word": " I'm", "probability": 0.887451171875}, {"start": 2434.14, "end": 2434.3, "word": " sorry,", "probability": 0.86767578125}, {"start": 2434.5, "end": 2434.72, "word": " it's", "probability": 0.875244140625}, {"start": 2434.72, "end": 2434.9, "word": " 2", "probability": 0.99462890625}, {"start": 2434.9, "end": 2435.32, "word": ".2", "probability": 0.998046875}, {"start": 2435.32, "end": 2435.84, "word": " minus", "probability": 0.98779296875}, {"start": 2435.84, "end": 2438.8, "word": " minus.", "probability": 0.450927734375}, {"start": 2439.28, "end": 2439.96, "word": " It's", "probability": 0.962646484375}, {"start": 2439.96, "end": 2440.12, "word": " 2", "probability": 0.99267578125}, {"start": 2440.12, "end": 2440.6, "word": ".2", "probability": 0.99853515625}, {"start": 2440.6, "end": 2441.04, "word": " minus", "probability": 0.98193359375}, {"start": 2441.04, "end": 2441.36, "word": " 3", "probability": 0.9921875}, {"start": 2441.36, "end": 2441.92, "word": ".2", "probability": 0.99658203125}, {"start": 2441.92, "end": 2442.9, "word": " divided", "probability": 0.6904296875}, {"start": 2442.9, "end": 2443.14, "word": " by", "probability": 0.97314453125}, {"start": 2443.14, "end": 2443.42, "word": " sigma.", "probability": 0.8466796875}], "temperature": 1.0}, {"id": 87, "seek": 247112, "start": 2464.9, "end": 2471.12, "text": " So again, find the probability now of Z less than or equal to negative 1.5.", "tokens": [407, 797, 11, 915, 264, 8482, 586, 295, 1176, 1570, 813, 420, 2681, 281, 3671, 502, 13, 20, 13], "avg_logprob": -0.3701171770691872, "compression_ratio": 0.9743589743589743, "no_speech_prob": 0.0, "words": [{"start": 2464.9, "end": 2465.44, "word": " So", "probability": 0.2047119140625}, {"start": 2465.44, "end": 2465.94, "word": " again,", "probability": 0.50927734375}, {"start": 2466.88, "end": 2467.66, "word": " find", "probability": 0.7373046875}, {"start": 2467.66, "end": 2467.86, "word": " the", "probability": 0.90185546875}, {"start": 2467.86, "end": 2468.24, "word": " probability", "probability": 0.908203125}, {"start": 2468.24, "end": 2468.72, "word": " now", "probability": 0.83935546875}, {"start": 2468.72, "end": 2469.7, "word": " of", "probability": 0.82470703125}, {"start": 2469.7, "end": 2469.88, "word": " Z", "probability": 0.552734375}, {"start": 2469.88, "end": 2470.12, "word": " less", "probability": 0.7509765625}, {"start": 2470.12, "end": 2470.24, "word": " than", "probability": 0.93701171875}, {"start": 2470.24, "end": 2470.46, "word": " or", "probability": 0.32666015625}, {"start": 2470.46, "end": 2470.46, "word": " equal", "probability": 0.93994140625}, {"start": 2470.46, "end": 2470.64, "word": " to", "probability": 0.9765625}, {"start": 2470.64, "end": 2470.64, "word": " negative", "probability": 0.5546875}, {"start": 2470.64, "end": 2470.82, "word": " 1", "probability": 0.6083984375}, {"start": 2470.82, "end": 2471.12, "word": ".5.", "probability": 0.6976318359375}], "temperature": 1.0}, {"id": 88, "seek": 249913, "start": 2474.87, "end": 2499.13, "text": " Now, in this case, we have to use the negative z. It's negative 1.15 minus 1.2 up to 5. So 0.1056. So the answer is around 10% of the catfish", "tokens": [823, 11, 294, 341, 1389, 11, 321, 362, 281, 764, 264, 3671, 710, 13, 467, 311, 3671, 502, 13, 5211, 3175, 502, 13, 17, 493, 281, 1025, 13, 407, 1958, 13, 3279, 18317, 13, 407, 264, 1867, 307, 926, 1266, 4, 295, 264, 3857, 11608], "avg_logprob": -0.17663043154322583, "compression_ratio": 1.1932773109243697, "no_speech_prob": 0.0, "words": [{"start": 2474.87, "end": 2475.17, "word": " Now,", "probability": 0.8818359375}, {"start": 2475.27, "end": 2475.35, "word": " in", "probability": 0.94091796875}, {"start": 2475.35, "end": 2475.51, "word": " this", "probability": 0.9482421875}, {"start": 2475.51, "end": 2475.69, "word": " case,", "probability": 0.91796875}, {"start": 2475.79, "end": 2475.83, "word": " we", "probability": 0.89111328125}, {"start": 2475.83, "end": 2475.97, "word": " have", "probability": 0.92333984375}, {"start": 2475.97, "end": 2476.11, "word": " to", "probability": 0.96142578125}, {"start": 2476.11, "end": 2476.39, "word": " use", "probability": 0.859375}, {"start": 2476.39, "end": 2477.89, "word": " the", "probability": 0.90771484375}, {"start": 2477.89, "end": 2478.29, "word": " negative", "probability": 0.91357421875}, {"start": 2478.29, "end": 2479.71, "word": " z.", "probability": 0.469482421875}, {"start": 2480.37, "end": 2480.83, "word": " It's", "probability": 0.964599609375}, {"start": 2480.83, "end": 2481.11, "word": " negative", "probability": 0.69384765625}, {"start": 2481.11, "end": 2481.39, "word": " 1", "probability": 0.88427734375}, {"start": 2481.39, "end": 2482.01, "word": ".15", "probability": 0.8115234375}, {"start": 2482.01, "end": 2483.57, "word": " minus", "probability": 0.75927734375}, {"start": 2483.57, "end": 2483.91, "word": " 1", "probability": 0.97705078125}, {"start": 2483.91, "end": 2484.39, "word": ".2", "probability": 0.703125}, {"start": 2484.39, "end": 2484.55, "word": " up", "probability": 0.8427734375}, {"start": 2484.55, "end": 2484.67, "word": " to", "probability": 0.96484375}, {"start": 2484.67, "end": 2485.01, "word": " 5.", "probability": 0.8046875}, {"start": 2488.15, "end": 2488.83, "word": " So", "probability": 0.95068359375}, {"start": 2488.83, "end": 2489.17, "word": " 0", "probability": 0.576171875}, {"start": 2489.17, "end": 2490.07, "word": ".1056.", "probability": 0.9850260416666666}, {"start": 2493.73, "end": 2494.41, "word": " So", "probability": 0.9560546875}, {"start": 2494.41, "end": 2494.65, "word": " the", "probability": 0.85400390625}, {"start": 2494.65, "end": 2495.11, "word": " answer", "probability": 0.953125}, {"start": 2495.11, "end": 2495.61, "word": " is", "probability": 0.9453125}, {"start": 2495.61, "end": 2496.07, "word": " around", "probability": 0.890625}, {"start": 2496.07, "end": 2496.45, "word": " 10", "probability": 0.94482421875}, {"start": 2496.45, "end": 2496.75, "word": "%", "probability": 0.96337890625}, {"start": 2496.75, "end": 2497.91, "word": " of", "probability": 0.96630859375}, {"start": 2497.91, "end": 2498.09, "word": " the", "probability": 0.9228515625}, {"start": 2498.09, "end": 2499.13, "word": " catfish", "probability": 0.85546875}], "temperature": 1.0}, {"id": 89, "seek": 252390, "start": 2500.28, "end": 2523.9, "text": " will weigh less than 2 pounds. So the answer is 0.1056. Questions? So go back to the PowerPoint presentation we have. The last topic, evaluating normality.", "tokens": [486, 13843, 1570, 813, 568, 8319, 13, 407, 264, 1867, 307, 1958, 13, 3279, 18317, 13, 27738, 30, 407, 352, 646, 281, 264, 25584, 5860, 321, 362, 13, 440, 1036, 4829, 11, 27479, 2026, 1860, 13], "avg_logprob": -0.20682010618416038, "compression_ratio": 1.21875, "no_speech_prob": 0.0, "words": [{"start": 2500.28, "end": 2500.66, "word": " will", "probability": 0.33447265625}, {"start": 2500.66, "end": 2501.06, "word": " weigh", "probability": 0.830078125}, {"start": 2501.06, "end": 2501.56, "word": " less", "probability": 0.93017578125}, {"start": 2501.56, "end": 2502.2, "word": " than", "probability": 0.93701171875}, {"start": 2502.2, "end": 2502.64, "word": " 2", "probability": 0.81689453125}, {"start": 2502.64, "end": 2503.56, "word": " pounds.", "probability": 0.6171875}, {"start": 2503.68, "end": 2503.82, "word": " So", "probability": 0.79150390625}, {"start": 2503.82, "end": 2503.94, "word": " the", "probability": 0.7880859375}, {"start": 2503.94, "end": 2504.16, "word": " answer", "probability": 0.95751953125}, {"start": 2504.16, "end": 2504.48, "word": " is", "probability": 0.9423828125}, {"start": 2504.48, "end": 2505.26, "word": " 0", "probability": 0.8095703125}, {"start": 2505.26, "end": 2506.42, "word": ".1056.", "probability": 0.9775390625}, {"start": 2508.34, "end": 2509.22, "word": " Questions?", "probability": 0.63037109375}, {"start": 2512.78, "end": 2513.66, "word": " So", "probability": 0.86962890625}, {"start": 2513.66, "end": 2513.82, "word": " go", "probability": 0.65380859375}, {"start": 2513.82, "end": 2514.06, "word": " back", "probability": 0.87158203125}, {"start": 2514.06, "end": 2514.22, "word": " to", "probability": 0.9658203125}, {"start": 2514.22, "end": 2514.38, "word": " the", "probability": 0.92236328125}, {"start": 2514.38, "end": 2514.68, "word": " PowerPoint", "probability": 0.61474609375}, {"start": 2514.68, "end": 2515.54, "word": " presentation", "probability": 0.92578125}, {"start": 2515.54, "end": 2515.82, "word": " we", "probability": 0.7236328125}, {"start": 2515.82, "end": 2516.1, "word": " have.", "probability": 0.6591796875}, {"start": 2517.78, "end": 2518.12, "word": " The", "probability": 0.88330078125}, {"start": 2518.12, "end": 2519.12, "word": " last", "probability": 0.8876953125}, {"start": 2519.12, "end": 2519.58, "word": " topic,", "probability": 0.9306640625}, {"start": 2522.56, "end": 2523.14, "word": " evaluating", "probability": 0.88525390625}, {"start": 2523.14, "end": 2523.9, "word": " normality.", "probability": 0.9638671875}], "temperature": 1.0}, {"id": 90, "seek": 255251, "start": 2526.93, "end": 2552.51, "text": " Many times we mentioned something about normality and outliers. For sure, if outliers exist, in this case, the situation is not normal. Now, how can we tell if a data point is an outlier? If you remember, we talked about outliers in Chapter 3. By two ways.", "tokens": [5126, 1413, 321, 2835, 746, 466, 2026, 1860, 293, 484, 23646, 13, 1171, 988, 11, 498, 484, 23646, 2514, 11, 294, 341, 1389, 11, 264, 2590, 307, 406, 2710, 13, 823, 11, 577, 393, 321, 980, 498, 257, 1412, 935, 307, 364, 484, 2753, 30, 759, 291, 1604, 11, 321, 2825, 466, 484, 23646, 294, 18874, 805, 13, 3146, 732, 2098, 13], "avg_logprob": -0.18749999716168358, "compression_ratio": 1.4685714285714286, "no_speech_prob": 0.0, "words": [{"start": 2526.93, "end": 2527.29, "word": " Many", "probability": 0.77490234375}, {"start": 2527.29, "end": 2527.61, "word": " times", "probability": 0.91650390625}, {"start": 2527.61, "end": 2527.79, "word": " we", "probability": 0.76171875}, {"start": 2527.79, "end": 2528.09, "word": " mentioned", "probability": 0.533203125}, {"start": 2528.09, "end": 2528.47, "word": " something", "probability": 0.8642578125}, {"start": 2528.47, "end": 2528.83, "word": " about", "probability": 0.90234375}, {"start": 2528.83, "end": 2529.35, "word": " normality", "probability": 0.899658203125}, {"start": 2529.35, "end": 2529.77, "word": " and", "probability": 0.89697265625}, {"start": 2529.77, "end": 2530.25, "word": " outliers.", "probability": 0.9208984375}, {"start": 2531.21, "end": 2531.61, "word": " For", "probability": 0.95263671875}, {"start": 2531.61, "end": 2531.87, "word": " sure,", "probability": 0.91650390625}, {"start": 2531.95, "end": 2532.03, "word": " if", "probability": 0.94873046875}, {"start": 2532.03, "end": 2532.47, "word": " outliers", "probability": 0.93798828125}, {"start": 2532.47, "end": 2532.97, "word": " exist,", "probability": 0.91552734375}, {"start": 2534.29, "end": 2534.53, "word": " in", "probability": 0.8447265625}, {"start": 2534.53, "end": 2534.75, "word": " this", "probability": 0.94873046875}, {"start": 2534.75, "end": 2535.05, "word": " case,", "probability": 0.91796875}, {"start": 2535.13, "end": 2535.27, "word": " the", "probability": 0.73779296875}, {"start": 2535.27, "end": 2535.55, "word": " situation", "probability": 0.89990234375}, {"start": 2535.55, "end": 2535.81, "word": " is", "probability": 0.94580078125}, {"start": 2535.81, "end": 2536.13, "word": " not", "probability": 0.94384765625}, {"start": 2536.13, "end": 2537.37, "word": " normal.", "probability": 0.80908203125}, {"start": 2538.19, "end": 2538.57, "word": " Now,", "probability": 0.9443359375}, {"start": 2538.77, "end": 2539.03, "word": " how", "probability": 0.9404296875}, {"start": 2539.03, "end": 2539.25, "word": " can", "probability": 0.9443359375}, {"start": 2539.25, "end": 2539.41, "word": " we", "probability": 0.95263671875}, {"start": 2539.41, "end": 2539.65, "word": " tell", "probability": 0.8740234375}, {"start": 2539.65, "end": 2539.85, "word": " if", "probability": 0.94189453125}, {"start": 2539.85, "end": 2539.97, "word": " a", "probability": 0.437255859375}, {"start": 2539.97, "end": 2540.19, "word": " data", "probability": 0.94287109375}, {"start": 2540.19, "end": 2540.51, "word": " point", "probability": 0.9736328125}, {"start": 2540.51, "end": 2540.81, "word": " is", "probability": 0.9521484375}, {"start": 2540.81, "end": 2540.97, "word": " an", "probability": 0.96484375}, {"start": 2540.97, "end": 2541.37, "word": " outlier?", "probability": 0.953857421875}, {"start": 2544.65, "end": 2545.29, "word": " If", "probability": 0.474853515625}, {"start": 2545.29, "end": 2545.45, "word": " you", "probability": 0.962890625}, {"start": 2545.45, "end": 2545.73, "word": " remember,", "probability": 0.876953125}, {"start": 2546.43, "end": 2546.85, "word": " we", "probability": 0.95458984375}, {"start": 2546.85, "end": 2547.15, "word": " talked", "probability": 0.88330078125}, {"start": 2547.15, "end": 2547.47, "word": " about", "probability": 0.91064453125}, {"start": 2547.47, "end": 2548.05, "word": " outliers", "probability": 0.6485595703125}, {"start": 2548.05, "end": 2548.27, "word": " in", "probability": 0.91650390625}, {"start": 2548.27, "end": 2548.47, "word": " Chapter", "probability": 0.57568359375}, {"start": 2548.47, "end": 2548.95, "word": " 3.", "probability": 0.74267578125}, {"start": 2549.59, "end": 2549.91, "word": " By", "probability": 0.80419921875}, {"start": 2549.91, "end": 2552.03, "word": " two", "probability": 0.75146484375}, {"start": 2552.03, "end": 2552.51, "word": " ways.", "probability": 0.583984375}], "temperature": 1.0}, {"id": 91, "seek": 256739, "start": 2556.75, "end": 2567.39, "text": " By this score. And we mentioned that any data point.", "tokens": [3146, 341, 6175, 13, 400, 321, 2835, 300, 604, 1412, 935, 13], "avg_logprob": -0.4825720970447247, "compression_ratio": 0.8833333333333333, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2556.75, "end": 2557.59, "word": " By", "probability": 0.269775390625}, {"start": 2557.59, "end": 2557.87, "word": " this", "probability": 0.546875}, {"start": 2557.87, "end": 2558.27, "word": " score.", "probability": 0.63330078125}, {"start": 2562.65, "end": 2563.49, "word": " And", "probability": 0.892578125}, {"start": 2563.49, "end": 2563.69, "word": " we", "probability": 0.888671875}, {"start": 2563.69, "end": 2564.11, "word": " mentioned", "probability": 0.64306640625}, {"start": 2564.11, "end": 2564.51, "word": " that", "probability": 0.7568359375}, {"start": 2564.51, "end": 2566.71, "word": " any", "probability": 0.66015625}, {"start": 2566.71, "end": 2566.97, "word": " data", "probability": 0.92724609375}, {"start": 2566.97, "end": 2567.39, "word": " point.", "probability": 0.96826171875}], "temperature": 1.0}, {"id": 92, "seek": 260323, "start": 2574.95, "end": 2603.23, "text": " Below minus 3, I mean smaller than minus 3, or above 3, these points are suspected to be outliers. So any point, any data value smaller than minus 3 in this form, or above plus 3 is considered to be an outlier.", "tokens": [36261, 3175, 805, 11, 286, 914, 4356, 813, 3175, 805, 11, 420, 3673, 805, 11, 613, 2793, 366, 26439, 281, 312, 484, 23646, 13, 407, 604, 935, 11, 604, 1412, 2158, 4356, 813, 3175, 805, 294, 341, 1254, 11, 420, 3673, 1804, 805, 307, 4888, 281, 312, 364, 484, 2753, 13], "avg_logprob": -0.21484374541502732, "compression_ratio": 1.586466165413534, "no_speech_prob": 0.0, "words": [{"start": 2574.95, "end": 2575.33, "word": " Below", "probability": 0.324951171875}, {"start": 2575.33, "end": 2575.73, "word": " minus", "probability": 0.61376953125}, {"start": 2575.73, "end": 2576.05, "word": " 3,", "probability": 0.476318359375}, {"start": 2576.61, "end": 2576.81, "word": " I", "probability": 0.904296875}, {"start": 2576.81, "end": 2576.93, "word": " mean", "probability": 0.9638671875}, {"start": 2576.93, "end": 2577.27, "word": " smaller", "probability": 0.783203125}, {"start": 2577.27, "end": 2577.49, "word": " than", "probability": 0.9443359375}, {"start": 2577.49, "end": 2577.77, "word": " minus", "probability": 0.96728515625}, {"start": 2577.77, "end": 2578.09, "word": " 3,", "probability": 0.93505859375}, {"start": 2578.39, "end": 2579.01, "word": " or", "probability": 0.9423828125}, {"start": 2579.01, "end": 2579.35, "word": " above", "probability": 0.943359375}, {"start": 2579.35, "end": 2579.73, "word": " 3,", "probability": 0.77392578125}, {"start": 2580.21, "end": 2580.93, "word": " these", "probability": 0.83642578125}, {"start": 2580.93, "end": 2581.29, "word": " points", "probability": 0.9208984375}, {"start": 2581.29, "end": 2581.63, "word": " are", "probability": 0.94189453125}, {"start": 2581.63, "end": 2582.17, "word": " suspected", "probability": 0.85986328125}, {"start": 2582.17, "end": 2582.43, "word": " to", "probability": 0.97119140625}, {"start": 2582.43, "end": 2584.01, "word": " be", "probability": 0.95654296875}, {"start": 2584.01, "end": 2584.75, "word": " outliers.", "probability": 0.7490234375}, {"start": 2589.23, "end": 2589.77, "word": " So", "probability": 0.916015625}, {"start": 2589.77, "end": 2589.95, "word": " any", "probability": 0.64501953125}, {"start": 2589.95, "end": 2590.29, "word": " point,", "probability": 0.962890625}, {"start": 2591.65, "end": 2592.15, "word": " any", "probability": 0.908203125}, {"start": 2592.15, "end": 2592.41, "word": " data", "probability": 0.80712890625}, {"start": 2592.41, "end": 2592.79, "word": " value", "probability": 0.93896484375}, {"start": 2592.79, "end": 2595.03, "word": " smaller", "probability": 0.591796875}, {"start": 2595.03, "end": 2595.35, "word": " than", "probability": 0.94482421875}, {"start": 2595.35, "end": 2595.71, "word": " minus", "probability": 0.97119140625}, {"start": 2595.71, "end": 2596.23, "word": " 3", "probability": 0.94970703125}, {"start": 2596.23, "end": 2597.69, "word": " in", "probability": 0.578125}, {"start": 2597.69, "end": 2597.87, "word": " this", "probability": 0.88134765625}, {"start": 2597.87, "end": 2598.23, "word": " form,", "probability": 0.77734375}, {"start": 2598.95, "end": 2599.71, "word": " or", "probability": 0.95556640625}, {"start": 2599.71, "end": 2600.05, "word": " above", "probability": 0.95849609375}, {"start": 2600.05, "end": 2600.57, "word": " plus", "probability": 0.94384765625}, {"start": 2600.57, "end": 2600.93, "word": " 3", "probability": 0.95849609375}, {"start": 2600.93, "end": 2601.31, "word": " is", "probability": 0.71630859375}, {"start": 2601.31, "end": 2601.69, "word": " considered", "probability": 0.76953125}, {"start": 2601.69, "end": 2601.89, "word": " to", "probability": 0.96337890625}, {"start": 2601.89, "end": 2602.33, "word": " be", "probability": 0.9580078125}, {"start": 2602.33, "end": 2602.89, "word": " an", "probability": 0.48046875}, {"start": 2602.89, "end": 2603.23, "word": " outlier.", "probability": 0.933349609375}], "temperature": 1.0}, {"id": 93, "seek": 263169, "start": 2605.43, "end": 2631.69, "text": " If we compute the lower limit, which is Q1 minus 1.5 IQR over the upper limit. So we have lower limit, upper limit. So lower limit.", "tokens": [759, 321, 14722, 264, 3126, 4948, 11, 597, 307, 1249, 16, 3175, 502, 13, 20, 28921, 49, 670, 264, 6597, 4948, 13, 407, 321, 362, 3126, 4948, 11, 6597, 4948, 13, 407, 3126, 4948, 13], "avg_logprob": -0.2486979299121433, "compression_ratio": 1.4505494505494505, "no_speech_prob": 0.0, "words": [{"start": 2605.43, "end": 2605.59, "word": " If", "probability": 0.6318359375}, {"start": 2605.59, "end": 2605.81, "word": " we", "probability": 0.93017578125}, {"start": 2605.81, "end": 2606.25, "word": " compute", "probability": 0.91162109375}, {"start": 2606.25, "end": 2606.45, "word": " the", "probability": 0.9130859375}, {"start": 2606.45, "end": 2606.65, "word": " lower", "probability": 0.88525390625}, {"start": 2606.65, "end": 2607.07, "word": " limit,", "probability": 0.92041015625}, {"start": 2608.91, "end": 2610.25, "word": " which", "probability": 0.94677734375}, {"start": 2610.25, "end": 2610.51, "word": " is", "probability": 0.87939453125}, {"start": 2610.51, "end": 2611.17, "word": " Q1", "probability": 0.712890625}, {"start": 2611.17, "end": 2611.61, "word": " minus", "probability": 0.211669921875}, {"start": 2611.61, "end": 2611.93, "word": " 1", "probability": 0.896484375}, {"start": 2611.93, "end": 2612.55, "word": ".5", "probability": 0.947509765625}, {"start": 2612.55, "end": 2614.89, "word": " IQR", "probability": 0.765625}, {"start": 2614.89, "end": 2616.47, "word": " over", "probability": 0.18212890625}, {"start": 2616.47, "end": 2616.67, "word": " the", "probability": 0.919921875}, {"start": 2616.67, "end": 2616.89, "word": " upper", "probability": 0.83837890625}, {"start": 2616.89, "end": 2617.31, "word": " limit.", "probability": 0.97265625}, {"start": 2620.17, "end": 2620.17, "word": " So", "probability": 0.63427734375}, {"start": 2620.17, "end": 2627.49, "word": " we", "probability": 0.82666015625}, {"start": 2627.49, "end": 2627.61, "word": " have", "probability": 0.9453125}, {"start": 2627.61, "end": 2627.85, "word": " lower", "probability": 0.8359375}, {"start": 2627.85, "end": 2628.21, "word": " limit,", "probability": 0.97021484375}, {"start": 2628.65, "end": 2628.89, "word": " upper", "probability": 0.88330078125}, {"start": 2628.89, "end": 2629.23, "word": " limit.", "probability": 0.96484375}, {"start": 2630.21, "end": 2631.05, "word": " So", "probability": 0.853515625}, {"start": 2631.05, "end": 2631.25, "word": " lower", "probability": 0.7265625}, {"start": 2631.25, "end": 2631.69, "word": " limit.", "probability": 0.96630859375}], "temperature": 1.0}, {"id": 94, "seek": 266504, "start": 2635.4, "end": 2665.04, "text": " And upper limit. Any data point below lower limit or above upper limit is considered to be a type. So therefore, we have two methods to determine or to examine if the observation is enough there or not. One by using this score, straightforward. And the other one, we have to look for", "tokens": [400, 6597, 4948, 13, 2639, 1412, 935, 2507, 3126, 4948, 420, 3673, 6597, 4948, 307, 4888, 281, 312, 257, 2010, 13, 407, 4412, 11, 321, 362, 732, 7150, 281, 6997, 420, 281, 17496, 498, 264, 14816, 307, 1547, 456, 420, 406, 13, 1485, 538, 1228, 341, 6175, 11, 15325, 13, 400, 264, 661, 472, 11, 321, 362, 281, 574, 337], "avg_logprob": -0.2638319574418615, "compression_ratio": 1.569060773480663, "no_speech_prob": 0.0, "words": [{"start": 2635.4, "end": 2635.78, "word": " And", "probability": 0.28857421875}, {"start": 2635.78, "end": 2636.16, "word": " upper", "probability": 0.59228515625}, {"start": 2636.16, "end": 2636.48, "word": " limit.", "probability": 0.94970703125}, {"start": 2639.98, "end": 2640.6, "word": " Any", "probability": 0.92138671875}, {"start": 2640.6, "end": 2641.34, "word": " data", "probability": 0.9404296875}, {"start": 2641.34, "end": 2642.48, "word": " point", "probability": 0.9619140625}, {"start": 2642.48, "end": 2644.36, "word": " below", "probability": 0.67333984375}, {"start": 2644.36, "end": 2644.7, "word": " lower", "probability": 0.45654296875}, {"start": 2644.7, "end": 2645.1, "word": " limit", "probability": 0.966796875}, {"start": 2645.1, "end": 2646.8, "word": " or", "probability": 0.62890625}, {"start": 2646.8, "end": 2647.32, "word": " above", "probability": 0.9599609375}, {"start": 2647.32, "end": 2648.42, "word": " upper", "probability": 0.8447265625}, {"start": 2648.42, "end": 2648.74, "word": " limit", "probability": 0.96533203125}, {"start": 2648.74, "end": 2648.98, "word": " is", "probability": 0.8759765625}, {"start": 2648.98, "end": 2649.3, "word": " considered", "probability": 0.80322265625}, {"start": 2649.3, "end": 2649.56, "word": " to", "probability": 0.96240234375}, {"start": 2649.56, "end": 2649.7, "word": " be", "probability": 0.95068359375}, {"start": 2649.7, "end": 2649.8, "word": " a", "probability": 0.345947265625}, {"start": 2649.8, "end": 2650.18, "word": " type.", "probability": 0.3271484375}, {"start": 2651.34, "end": 2651.96, "word": " So", "probability": 0.826171875}, {"start": 2651.96, "end": 2652.8, "word": " therefore,", "probability": 0.67333984375}, {"start": 2652.94, "end": 2653.08, "word": " we", "probability": 0.94140625}, {"start": 2653.08, "end": 2653.36, "word": " have", "probability": 0.94384765625}, {"start": 2653.36, "end": 2654.22, "word": " two", "probability": 0.93310546875}, {"start": 2654.22, "end": 2654.8, "word": " methods", "probability": 0.9033203125}, {"start": 2654.8, "end": 2656.0, "word": " to", "probability": 0.9384765625}, {"start": 2656.0, "end": 2656.56, "word": " determine", "probability": 0.93212890625}, {"start": 2656.56, "end": 2656.84, "word": " or", "probability": 0.82666015625}, {"start": 2656.84, "end": 2656.96, "word": " to", "probability": 0.9306640625}, {"start": 2656.96, "end": 2657.34, "word": " examine", "probability": 0.955078125}, {"start": 2657.34, "end": 2658.06, "word": " if", "probability": 0.94140625}, {"start": 2658.06, "end": 2658.32, "word": " the", "probability": 0.91357421875}, {"start": 2658.32, "end": 2658.8, "word": " observation", "probability": 0.8701171875}, {"start": 2658.8, "end": 2659.06, "word": " is", "probability": 0.93359375}, {"start": 2659.06, "end": 2659.3, "word": " enough", "probability": 0.5966796875}, {"start": 2659.3, "end": 2659.48, "word": " there", "probability": 0.9013671875}, {"start": 2659.48, "end": 2659.6, "word": " or", "probability": 0.94775390625}, {"start": 2659.6, "end": 2659.86, "word": " not.", "probability": 0.94091796875}, {"start": 2660.22, "end": 2660.52, "word": " One", "probability": 0.9248046875}, {"start": 2660.52, "end": 2660.72, "word": " by", "probability": 0.76123046875}, {"start": 2660.72, "end": 2660.96, "word": " using", "probability": 0.9296875}, {"start": 2660.96, "end": 2661.22, "word": " this", "probability": 0.74853515625}, {"start": 2661.22, "end": 2661.58, "word": " score,", "probability": 0.1737060546875}, {"start": 2661.88, "end": 2662.46, "word": " straightforward.", "probability": 0.84814453125}, {"start": 2663.24, "end": 2663.44, "word": " And", "probability": 0.95703125}, {"start": 2663.44, "end": 2663.56, "word": " the", "probability": 0.7978515625}, {"start": 2663.56, "end": 2663.74, "word": " other", "probability": 0.8876953125}, {"start": 2663.74, "end": 2663.9, "word": " one,", "probability": 0.92724609375}, {"start": 2663.96, "end": 2664.06, "word": " we", "probability": 0.953125}, {"start": 2664.06, "end": 2664.22, "word": " have", "probability": 0.947265625}, {"start": 2664.22, "end": 2664.44, "word": " to", "probability": 0.966796875}, {"start": 2664.44, "end": 2664.66, "word": " look", "probability": 0.96630859375}, {"start": 2664.66, "end": 2665.04, "word": " for", "probability": 0.94970703125}], "temperature": 1.0}, {"id": 95, "seek": 269412, "start": 2665.44, "end": 2694.12, "text": " The lower limit and upper limit. The other method by using software and later on you will take SPSS in order to determine if the data is normally distributed by using something called QQ plot or normal probability plot. So I'm going to skip this part.", "tokens": [440, 3126, 4948, 293, 6597, 4948, 13, 440, 661, 3170, 538, 1228, 4722, 293, 1780, 322, 291, 486, 747, 318, 6273, 50, 294, 1668, 281, 6997, 498, 264, 1412, 307, 5646, 12631, 538, 1228, 746, 1219, 1249, 48, 7542, 420, 2710, 8482, 7542, 13, 407, 286, 478, 516, 281, 10023, 341, 644, 13], "avg_logprob": -0.21874999503294626, "compression_ratio": 1.5089820359281436, "no_speech_prob": 0.0, "words": [{"start": 2665.44, "end": 2665.92, "word": " The", "probability": 0.282958984375}, {"start": 2665.92, "end": 2666.32, "word": " lower", "probability": 0.63671875}, {"start": 2666.32, "end": 2666.64, "word": " limit", "probability": 0.86376953125}, {"start": 2666.64, "end": 2666.92, "word": " and", "probability": 0.86767578125}, {"start": 2666.92, "end": 2667.16, "word": " upper", "probability": 0.72802734375}, {"start": 2667.16, "end": 2667.42, "word": " limit.", "probability": 0.9345703125}, {"start": 2668.96, "end": 2669.18, "word": " The", "probability": 0.8662109375}, {"start": 2669.18, "end": 2669.5, "word": " other", "probability": 0.8828125}, {"start": 2669.5, "end": 2669.92, "word": " method", "probability": 0.9453125}, {"start": 2669.92, "end": 2671.2, "word": " by", "probability": 0.5703125}, {"start": 2671.2, "end": 2671.66, "word": " using", "probability": 0.9365234375}, {"start": 2671.66, "end": 2673.16, "word": " software", "probability": 0.92236328125}, {"start": 2673.16, "end": 2673.56, "word": " and", "probability": 0.6943359375}, {"start": 2673.56, "end": 2673.98, "word": " later", "probability": 0.89794921875}, {"start": 2673.98, "end": 2674.2, "word": " on", "probability": 0.91552734375}, {"start": 2674.2, "end": 2674.3, "word": " you", "probability": 0.85546875}, {"start": 2674.3, "end": 2674.44, "word": " will", "probability": 0.59423828125}, {"start": 2674.44, "end": 2674.74, "word": " take", "probability": 0.8583984375}, {"start": 2674.74, "end": 2676.02, "word": " SPSS", "probability": 0.9119466145833334}, {"start": 2676.02, "end": 2676.78, "word": " in", "probability": 0.79052734375}, {"start": 2676.78, "end": 2676.98, "word": " order", "probability": 0.91943359375}, {"start": 2676.98, "end": 2677.22, "word": " to", "probability": 0.95458984375}, {"start": 2677.22, "end": 2677.68, "word": " determine", "probability": 0.91748046875}, {"start": 2677.68, "end": 2679.2, "word": " if", "probability": 0.89306640625}, {"start": 2679.2, "end": 2679.38, "word": " the", "probability": 0.91357421875}, {"start": 2679.38, "end": 2679.72, "word": " data", "probability": 0.9423828125}, {"start": 2679.72, "end": 2681.18, "word": " is", "probability": 0.9384765625}, {"start": 2681.18, "end": 2681.56, "word": " normally", "probability": 0.8798828125}, {"start": 2681.56, "end": 2682.1, "word": " distributed", "probability": 0.92724609375}, {"start": 2682.1, "end": 2682.44, "word": " by", "probability": 0.857421875}, {"start": 2682.44, "end": 2682.76, "word": " using", "probability": 0.9287109375}, {"start": 2682.76, "end": 2683.1, "word": " something", "probability": 0.8623046875}, {"start": 2683.1, "end": 2683.58, "word": " called", "probability": 0.87939453125}, {"start": 2683.58, "end": 2686.54, "word": " QQ", "probability": 0.740478515625}, {"start": 2686.54, "end": 2686.9, "word": " plot", "probability": 0.53076171875}, {"start": 2686.9, "end": 2688.44, "word": " or", "probability": 0.63623046875}, {"start": 2688.44, "end": 2688.94, "word": " normal", "probability": 0.207275390625}, {"start": 2688.94, "end": 2691.0, "word": " probability", "probability": 0.9462890625}, {"start": 2691.0, "end": 2691.34, "word": " plot.", "probability": 0.84326171875}, {"start": 2691.86, "end": 2692.24, "word": " So", "probability": 0.86279296875}, {"start": 2692.24, "end": 2692.54, "word": " I'm", "probability": 0.77783203125}, {"start": 2692.54, "end": 2692.74, "word": " going", "probability": 0.9462890625}, {"start": 2692.74, "end": 2692.94, "word": " to", "probability": 0.96044921875}, {"start": 2692.94, "end": 2693.22, "word": " skip", "probability": 0.98779296875}, {"start": 2693.22, "end": 2693.66, "word": " this", "probability": 0.94580078125}, {"start": 2693.66, "end": 2694.12, "word": " part.", "probability": 0.8828125}], "temperature": 1.0}, {"id": 96, "seek": 272429, "start": 2695.41, "end": 2724.29, "text": " Because data is taken by using software. But in general, you may look at this graph. Generally speaking, if you have a probability plot of a data, and the points lie on a straight line, or close to it,", "tokens": [1436, 1412, 307, 2726, 538, 1228, 4722, 13, 583, 294, 2674, 11, 291, 815, 574, 412, 341, 4295, 13, 21082, 4124, 11, 498, 291, 362, 257, 8482, 7542, 295, 257, 1412, 11, 293, 264, 2793, 4544, 322, 257, 2997, 1622, 11, 420, 1998, 281, 309, 11], "avg_logprob": -0.20661569783028136, "compression_ratio": 1.3835616438356164, "no_speech_prob": 0.0, "words": [{"start": 2695.41, "end": 2695.75, "word": " Because", "probability": 0.428955078125}, {"start": 2695.75, "end": 2695.99, "word": " data", "probability": 0.472412109375}, {"start": 2695.99, "end": 2696.21, "word": " is", "probability": 0.367431640625}, {"start": 2696.21, "end": 2696.45, "word": " taken", "probability": 0.73828125}, {"start": 2696.45, "end": 2696.71, "word": " by", "probability": 0.83544921875}, {"start": 2696.71, "end": 2697.13, "word": " using", "probability": 0.90380859375}, {"start": 2697.13, "end": 2697.71, "word": " software.", "probability": 0.91796875}, {"start": 2699.83, "end": 2700.21, "word": " But", "probability": 0.9287109375}, {"start": 2700.21, "end": 2700.57, "word": " in", "probability": 0.7802734375}, {"start": 2700.57, "end": 2700.97, "word": " general,", "probability": 0.904296875}, {"start": 2704.33, "end": 2705.23, "word": " you", "probability": 0.93212890625}, {"start": 2705.23, "end": 2705.55, "word": " may", "probability": 0.93701171875}, {"start": 2705.55, "end": 2706.53, "word": " look", "probability": 0.9609375}, {"start": 2706.53, "end": 2706.81, "word": " at", "probability": 0.96826171875}, {"start": 2706.81, "end": 2707.25, "word": " this", "probability": 0.9501953125}, {"start": 2707.25, "end": 2708.15, "word": " graph.", "probability": 0.93701171875}, {"start": 2709.33, "end": 2709.71, "word": " Generally", "probability": 0.4892578125}, {"start": 2709.71, "end": 2710.23, "word": " speaking,", "probability": 0.87109375}, {"start": 2711.49, "end": 2711.75, "word": " if", "probability": 0.94873046875}, {"start": 2711.75, "end": 2711.91, "word": " you", "probability": 0.9638671875}, {"start": 2711.91, "end": 2712.33, "word": " have", "probability": 0.943359375}, {"start": 2712.33, "end": 2713.59, "word": " a", "probability": 0.83984375}, {"start": 2713.59, "end": 2714.03, "word": " probability", "probability": 0.962890625}, {"start": 2714.03, "end": 2714.59, "word": " plot", "probability": 0.9443359375}, {"start": 2714.59, "end": 2715.65, "word": " of", "probability": 0.90625}, {"start": 2715.65, "end": 2715.77, "word": " a", "probability": 0.8583984375}, {"start": 2715.77, "end": 2715.99, "word": " data,", "probability": 0.93798828125}, {"start": 2717.07, "end": 2717.55, "word": " and", "probability": 0.93115234375}, {"start": 2717.55, "end": 2717.73, "word": " the", "probability": 0.91748046875}, {"start": 2717.73, "end": 2718.25, "word": " points", "probability": 0.91357421875}, {"start": 2718.25, "end": 2720.47, "word": " lie", "probability": 0.89501953125}, {"start": 2720.47, "end": 2721.49, "word": " on", "probability": 0.93505859375}, {"start": 2721.49, "end": 2721.81, "word": " a", "probability": 0.79541015625}, {"start": 2721.81, "end": 2722.13, "word": " straight", "probability": 0.908203125}, {"start": 2722.13, "end": 2722.53, "word": " line,", "probability": 0.93310546875}, {"start": 2723.03, "end": 2723.41, "word": " or", "probability": 0.95849609375}, {"start": 2723.41, "end": 2723.83, "word": " close", "probability": 0.8408203125}, {"start": 2723.83, "end": 2724.05, "word": " to", "probability": 0.96533203125}, {"start": 2724.05, "end": 2724.29, "word": " it,", "probability": 0.92822265625}], "temperature": 1.0}, {"id": 97, "seek": 275305, "start": 2725.31, "end": 2753.05, "text": " In this case, the distribution is normal. It's hard to make this graph manual. It's better to use software. But at least if we have this graph, and the points are close to the straight line. I mean, the points are either on the straight line, lies on the straight line, or close it. In this case, the data is normally distributed.", "tokens": [682, 341, 1389, 11, 264, 7316, 307, 2710, 13, 467, 311, 1152, 281, 652, 341, 4295, 9688, 13, 467, 311, 1101, 281, 764, 4722, 13, 583, 412, 1935, 498, 321, 362, 341, 4295, 11, 293, 264, 2793, 366, 1998, 281, 264, 2997, 1622, 13, 286, 914, 11, 264, 2793, 366, 2139, 322, 264, 2997, 1622, 11, 9134, 322, 264, 2997, 1622, 11, 420, 1998, 309, 13, 682, 341, 1389, 11, 264, 1412, 307, 5646, 12631, 13], "avg_logprob": -0.18557224567834432, "compression_ratio": 1.9132947976878614, "no_speech_prob": 0.0, "words": [{"start": 2725.31, "end": 2725.53, "word": " In", "probability": 0.69970703125}, {"start": 2725.53, "end": 2725.77, "word": " this", "probability": 0.94873046875}, {"start": 2725.77, "end": 2726.13, "word": " case,", "probability": 0.92431640625}, {"start": 2726.37, "end": 2726.47, "word": " the", "probability": 0.5048828125}, {"start": 2726.47, "end": 2726.75, "word": " distribution", "probability": 0.3369140625}, {"start": 2726.75, "end": 2727.09, "word": " is", "probability": 0.9404296875}, {"start": 2727.09, "end": 2727.37, "word": " normal.", "probability": 0.81494140625}, {"start": 2728.75, "end": 2729.31, "word": " It's", "probability": 0.942626953125}, {"start": 2729.31, "end": 2729.65, "word": " hard", "probability": 0.890625}, {"start": 2729.65, "end": 2730.03, "word": " to", "probability": 0.970703125}, {"start": 2730.03, "end": 2730.55, "word": " make", "probability": 0.93603515625}, {"start": 2730.55, "end": 2730.79, "word": " this", "probability": 0.8828125}, {"start": 2730.79, "end": 2731.23, "word": " graph", "probability": 0.96240234375}, {"start": 2731.23, "end": 2732.21, "word": " manual.", "probability": 0.8623046875}, {"start": 2732.51, "end": 2732.67, "word": " It's", "probability": 0.963134765625}, {"start": 2732.67, "end": 2732.89, "word": " better", "probability": 0.91650390625}, {"start": 2732.89, "end": 2733.09, "word": " to", "probability": 0.96533203125}, {"start": 2733.09, "end": 2733.39, "word": " use", "probability": 0.8828125}, {"start": 2733.39, "end": 2734.27, "word": " software.", "probability": 0.9169921875}, {"start": 2734.73, "end": 2734.95, "word": " But", "probability": 0.94189453125}, {"start": 2734.95, "end": 2735.13, "word": " at", "probability": 0.90478515625}, {"start": 2735.13, "end": 2735.37, "word": " least", "probability": 0.9560546875}, {"start": 2735.37, "end": 2735.91, "word": " if", "probability": 0.623046875}, {"start": 2735.91, "end": 2736.05, "word": " we", "probability": 0.86328125}, {"start": 2736.05, "end": 2736.23, "word": " have", "probability": 0.9443359375}, {"start": 2736.23, "end": 2736.43, "word": " this", "probability": 0.93359375}, {"start": 2736.43, "end": 2736.85, "word": " graph,", "probability": 0.9580078125}, {"start": 2738.21, "end": 2738.51, "word": " and", "probability": 0.916015625}, {"start": 2738.51, "end": 2738.67, "word": " the", "probability": 0.916015625}, {"start": 2738.67, "end": 2739.05, "word": " points", "probability": 0.9228515625}, {"start": 2739.05, "end": 2739.37, "word": " are", "probability": 0.93603515625}, {"start": 2739.37, "end": 2739.93, "word": " close", "probability": 0.44140625}, {"start": 2739.93, "end": 2740.15, "word": " to", "probability": 0.939453125}, {"start": 2740.15, "end": 2740.31, "word": " the", "probability": 0.8486328125}, {"start": 2740.31, "end": 2740.53, "word": " straight", "probability": 0.9384765625}, {"start": 2740.53, "end": 2740.85, "word": " line.", "probability": 0.92138671875}, {"start": 2741.79, "end": 2741.95, "word": " I", "probability": 0.93994140625}, {"start": 2741.95, "end": 2742.15, "word": " mean,", "probability": 0.96484375}, {"start": 2742.25, "end": 2742.49, "word": " the", "probability": 0.9072265625}, {"start": 2742.49, "end": 2742.79, "word": " points", "probability": 0.91796875}, {"start": 2742.79, "end": 2743.15, "word": " are", "probability": 0.90673828125}, {"start": 2743.15, "end": 2744.39, "word": " either", "probability": 0.82861328125}, {"start": 2744.39, "end": 2744.89, "word": " on", "probability": 0.8740234375}, {"start": 2744.89, "end": 2745.05, "word": " the", "probability": 0.90625}, {"start": 2745.05, "end": 2745.33, "word": " straight", "probability": 0.9208984375}, {"start": 2745.33, "end": 2745.63, "word": " line,", "probability": 0.92333984375}, {"start": 2745.73, "end": 2745.95, "word": " lies", "probability": 0.85009765625}, {"start": 2745.95, "end": 2746.15, "word": " on", "probability": 0.943359375}, {"start": 2746.15, "end": 2746.27, "word": " the", "probability": 0.912109375}, {"start": 2746.27, "end": 2746.49, "word": " straight", "probability": 0.916015625}, {"start": 2746.49, "end": 2746.77, "word": " line,", "probability": 0.9365234375}, {"start": 2746.83, "end": 2747.45, "word": " or", "probability": 0.9560546875}, {"start": 2747.45, "end": 2747.85, "word": " close", "probability": 0.25927734375}, {"start": 2747.85, "end": 2748.05, "word": " it.", "probability": 0.92333984375}, {"start": 2748.67, "end": 2748.91, "word": " In", "probability": 0.943359375}, {"start": 2748.91, "end": 2749.17, "word": " this", "probability": 0.947265625}, {"start": 2749.17, "end": 2749.55, "word": " case,", "probability": 0.9091796875}, {"start": 2750.87, "end": 2751.59, "word": " the", "probability": 0.90185546875}, {"start": 2751.59, "end": 2751.91, "word": " data", "probability": 0.94140625}, {"start": 2751.91, "end": 2752.17, "word": " is", "probability": 0.94921875}, {"start": 2752.17, "end": 2752.55, "word": " normally", "probability": 0.295654296875}, {"start": 2752.55, "end": 2753.05, "word": " distributed.", "probability": 0.916015625}], "temperature": 1.0}, {"id": 98, "seek": 278323, "start": 2754.29, "end": 2783.23, "text": " If the data points scattered away of the straight line, then the distribution is not normal either skewed to the right or skewed to the left. So for this specific graph, the plot is normally distributed, approximately normally distributed. Because most of the points here lie close to the line and few", "tokens": [759, 264, 1412, 2793, 21986, 1314, 295, 264, 2997, 1622, 11, 550, 264, 7316, 307, 406, 2710, 2139, 8756, 26896, 281, 264, 558, 420, 8756, 26896, 281, 264, 1411, 13, 407, 337, 341, 2685, 4295, 11, 264, 7542, 307, 5646, 12631, 11, 10447, 5646, 12631, 13, 1436, 881, 295, 264, 2793, 510, 4544, 1998, 281, 264, 1622, 293, 1326], "avg_logprob": -0.20104166120290756, "compression_ratio": 1.7062146892655368, "no_speech_prob": 0.0, "words": [{"start": 2754.29, "end": 2754.61, "word": " If", "probability": 0.52490234375}, {"start": 2754.61, "end": 2754.81, "word": " the", "probability": 0.90283203125}, {"start": 2754.81, "end": 2755.03, "word": " data", "probability": 0.9326171875}, {"start": 2755.03, "end": 2755.35, "word": " points", "probability": 0.385986328125}, {"start": 2755.35, "end": 2755.95, "word": " scattered", "probability": 0.60693359375}, {"start": 2755.95, "end": 2756.81, "word": " away", "probability": 0.87451171875}, {"start": 2756.81, "end": 2758.75, "word": " of", "probability": 0.6787109375}, {"start": 2758.75, "end": 2759.07, "word": " the", "probability": 0.91064453125}, {"start": 2759.07, "end": 2759.65, "word": " straight", "probability": 0.9208984375}, {"start": 2759.65, "end": 2760.01, "word": " line,", "probability": 0.92529296875}, {"start": 2760.43, "end": 2760.87, "word": " then", "probability": 0.8427734375}, {"start": 2760.87, "end": 2761.25, "word": " the", "probability": 0.83740234375}, {"start": 2761.25, "end": 2761.71, "word": " distribution", "probability": 0.814453125}, {"start": 2761.71, "end": 2761.97, "word": " is", "probability": 0.947265625}, {"start": 2761.97, "end": 2762.17, "word": " not", "probability": 0.93603515625}, {"start": 2762.17, "end": 2762.51, "word": " normal", "probability": 0.80810546875}, {"start": 2762.51, "end": 2762.93, "word": " either", "probability": 0.52294921875}, {"start": 2762.93, "end": 2764.25, "word": " skewed", "probability": 0.9208984375}, {"start": 2764.25, "end": 2764.39, "word": " to", "probability": 0.95654296875}, {"start": 2764.39, "end": 2764.51, "word": " the", "probability": 0.91455078125}, {"start": 2764.51, "end": 2764.75, "word": " right", "probability": 0.91748046875}, {"start": 2764.75, "end": 2765.13, "word": " or", "probability": 0.90380859375}, {"start": 2765.13, "end": 2765.57, "word": " skewed", "probability": 0.957275390625}, {"start": 2765.57, "end": 2766.17, "word": " to", "probability": 0.9619140625}, {"start": 2766.17, "end": 2766.25, "word": " the", "probability": 0.91552734375}, {"start": 2766.25, "end": 2766.49, "word": " left.", "probability": 0.921875}, {"start": 2767.59, "end": 2768.21, "word": " So", "probability": 0.904296875}, {"start": 2768.21, "end": 2768.45, "word": " for", "probability": 0.71484375}, {"start": 2768.45, "end": 2768.69, "word": " this", "probability": 0.947265625}, {"start": 2768.69, "end": 2769.27, "word": " specific", "probability": 0.90673828125}, {"start": 2769.27, "end": 2770.05, "word": " graph,", "probability": 0.89794921875}, {"start": 2771.11, "end": 2771.25, "word": " the", "probability": 0.91650390625}, {"start": 2771.25, "end": 2771.49, "word": " plot", "probability": 0.97119140625}, {"start": 2771.49, "end": 2771.73, "word": " is", "probability": 0.94580078125}, {"start": 2771.73, "end": 2772.23, "word": " normally", "probability": 0.865234375}, {"start": 2772.23, "end": 2774.15, "word": " distributed,", "probability": 0.88818359375}, {"start": 2774.29, "end": 2774.67, "word": " approximately", "probability": 0.7890625}, {"start": 2774.67, "end": 2775.13, "word": " normally", "probability": 0.78857421875}, {"start": 2775.13, "end": 2775.69, "word": " distributed.", "probability": 0.943359375}, {"start": 2776.11, "end": 2776.63, "word": " Because", "probability": 0.9140625}, {"start": 2776.63, "end": 2777.55, "word": " most", "probability": 0.8857421875}, {"start": 2777.55, "end": 2777.69, "word": " of", "probability": 0.962890625}, {"start": 2777.69, "end": 2777.79, "word": " the", "probability": 0.91845703125}, {"start": 2777.79, "end": 2778.21, "word": " points", "probability": 0.92578125}, {"start": 2778.21, "end": 2778.61, "word": " here", "probability": 0.86083984375}, {"start": 2778.61, "end": 2780.49, "word": " lie", "probability": 0.75732421875}, {"start": 2780.49, "end": 2780.89, "word": " close", "probability": 0.833984375}, {"start": 2780.89, "end": 2781.03, "word": " to", "probability": 0.96142578125}, {"start": 2781.03, "end": 2781.11, "word": " the", "probability": 0.91455078125}, {"start": 2781.11, "end": 2781.37, "word": " line", "probability": 0.935546875}, {"start": 2781.37, "end": 2782.65, "word": " and", "probability": 0.460693359375}, {"start": 2782.65, "end": 2783.23, "word": " few", "probability": 0.75048828125}], "temperature": 1.0}, {"id": 99, "seek": 281370, "start": 2784.26, "end": 2813.7, "text": " are scattered away. Or it means that there are few outliers in this case, we can ignore these values. So here plot is approximately a straight line except for a few outliers at the low and the right, those points. So generally speaking, the distribution is normal distribution. That's all for this chapter.", "tokens": [366, 21986, 1314, 13, 1610, 309, 1355, 300, 456, 366, 1326, 484, 23646, 294, 341, 1389, 11, 321, 393, 11200, 613, 4190, 13, 407, 510, 7542, 307, 10447, 257, 2997, 1622, 3993, 337, 257, 1326, 484, 23646, 412, 264, 2295, 293, 264, 558, 11, 729, 2793, 13, 407, 5101, 4124, 11, 264, 7316, 307, 2710, 7316, 13, 663, 311, 439, 337, 341, 7187, 13], "avg_logprob": -0.1873798076923077, "compression_ratio": 1.5989583333333333, "no_speech_prob": 0.0, "words": [{"start": 2784.26, "end": 2784.48, "word": " are", "probability": 0.2265625}, {"start": 2784.48, "end": 2785.6, "word": " scattered", "probability": 0.77783203125}, {"start": 2785.6, "end": 2785.88, "word": " away.", "probability": 0.7880859375}, {"start": 2786.82, "end": 2787.16, "word": " Or", "probability": 0.8125}, {"start": 2787.16, "end": 2787.68, "word": " it", "probability": 0.76513671875}, {"start": 2787.68, "end": 2787.92, "word": " means", "probability": 0.92822265625}, {"start": 2787.92, "end": 2788.28, "word": " that", "probability": 0.91748046875}, {"start": 2788.28, "end": 2788.82, "word": " there", "probability": 0.84814453125}, {"start": 2788.82, "end": 2789.06, "word": " are", "probability": 0.9169921875}, {"start": 2789.06, "end": 2789.66, "word": " few", "probability": 0.82470703125}, {"start": 2789.66, "end": 2790.08, "word": " outliers", "probability": 0.935546875}, {"start": 2790.08, "end": 2790.34, "word": " in", "probability": 0.888671875}, {"start": 2790.34, "end": 2790.52, "word": " this", "probability": 0.935546875}, {"start": 2790.52, "end": 2790.74, "word": " case,", "probability": 0.912109375}, {"start": 2790.84, "end": 2790.92, "word": " we", "probability": 0.91162109375}, {"start": 2790.92, "end": 2791.12, "word": " can", "probability": 0.9453125}, {"start": 2791.12, "end": 2791.44, "word": " ignore", "probability": 0.84814453125}, {"start": 2791.44, "end": 2791.68, "word": " these", "probability": 0.82177734375}, {"start": 2791.68, "end": 2791.94, "word": " values.", "probability": 0.76904296875}, {"start": 2793.1, "end": 2793.38, "word": " So", "probability": 0.95751953125}, {"start": 2793.38, "end": 2793.62, "word": " here", "probability": 0.708984375}, {"start": 2793.62, "end": 2793.9, "word": " plot", "probability": 0.57666015625}, {"start": 2793.9, "end": 2794.04, "word": " is", "probability": 0.923828125}, {"start": 2794.04, "end": 2794.6, "word": " approximately", "probability": 0.8564453125}, {"start": 2794.6, "end": 2795.0, "word": " a", "probability": 0.5927734375}, {"start": 2795.0, "end": 2795.26, "word": " straight", "probability": 0.90576171875}, {"start": 2795.26, "end": 2795.62, "word": " line", "probability": 0.93603515625}, {"start": 2795.62, "end": 2796.38, "word": " except", "probability": 0.56884765625}, {"start": 2796.38, "end": 2796.66, "word": " for", "probability": 0.9453125}, {"start": 2796.66, "end": 2796.84, "word": " a", "probability": 0.97998046875}, {"start": 2796.84, "end": 2797.02, "word": " few", "probability": 0.90185546875}, {"start": 2797.02, "end": 2797.5, "word": " outliers", "probability": 0.94775390625}, {"start": 2797.5, "end": 2797.84, "word": " at", "probability": 0.91259765625}, {"start": 2797.84, "end": 2797.98, "word": " the", "probability": 0.892578125}, {"start": 2797.98, "end": 2798.22, "word": " low", "probability": 0.87109375}, {"start": 2798.22, "end": 2799.72, "word": " and", "probability": 0.85302734375}, {"start": 2799.72, "end": 2800.36, "word": " the", "probability": 0.87451171875}, {"start": 2800.36, "end": 2800.64, "word": " right,", "probability": 0.806640625}, {"start": 2801.2, "end": 2801.58, "word": " those", "probability": 0.7666015625}, {"start": 2801.58, "end": 2801.94, "word": " points.", "probability": 0.92724609375}, {"start": 2802.96, "end": 2803.28, "word": " So", "probability": 0.9501953125}, {"start": 2803.28, "end": 2803.96, "word": " generally", "probability": 0.66455078125}, {"start": 2803.96, "end": 2804.42, "word": " speaking,", "probability": 0.86474609375}, {"start": 2804.62, "end": 2804.78, "word": " the", "probability": 0.90087890625}, {"start": 2804.78, "end": 2805.3, "word": " distribution", "probability": 0.73828125}, {"start": 2805.3, "end": 2805.78, "word": " is", "probability": 0.95166015625}, {"start": 2805.78, "end": 2806.68, "word": " normal", "probability": 0.6962890625}, {"start": 2806.68, "end": 2807.18, "word": " distribution.", "probability": 0.6640625}, {"start": 2808.98, "end": 2809.7, "word": " That's", "probability": 0.9453125}, {"start": 2809.7, "end": 2811.2, "word": " all", "probability": 0.9365234375}, {"start": 2811.2, "end": 2812.42, "word": " for", "probability": 0.9482421875}, {"start": 2812.42, "end": 2813.2, "word": " this", "probability": 0.94580078125}, {"start": 2813.2, "end": 2813.7, "word": " chapter.", "probability": 0.87744140625}], "temperature": 1.0}], "language": "en", "language_probability": 1.0, "duration": 2817.997, "duration_after_vad": 2686.069374999988} \ No newline at end of file diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/BRRl21n6QWg_raw.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/BRRl21n6QWg_raw.srt new file mode 100644 index 0000000000000000000000000000000000000000..30859f56158c1d037f2ff419697e1d0dc59037e6 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/BRRl21n6QWg_raw.srt @@ -0,0 +1,1956 @@ +1 +00:00:07,390 --> 00:00:09,890 +So last time, we discussed how to find the + +2 +00:00:09,890 --> 00:00:14,570 +probabilities underneath the normal curve for + +3 +00:00:14,570 --> 00:00:20,450 +three cases. If the point lies in the lower tail, + +4 +00:00:21,690 --> 00:00:28,150 +as this one, or upper tail, or between. So we can + +5 +00:00:28,150 --> 00:00:32,290 +do the computations for this kind of + +6 +00:00:32,290 --> 00:00:36,090 +probabilities. And I think we gave two examples. + +7 +00:00:36,620 --> 00:00:40,400 +For example, if we are looking for probability of + +8 +00:00:40,400 --> 00:00:45,020 +Z is smaller than 0.1, and Z, as we mentioned last + +9 +00:00:45,020 --> 00:00:48,660 +time, is the standardized normal distribution that + +10 +00:00:48,660 --> 00:00:53,120 +has mean of 0 and sigma is 1. In this case, just + +11 +00:00:53,120 --> 00:00:57,540 +go to the table you have. Now we are looking for 0 + +12 +00:00:57,540 --> 00:01:03,700 +.12, for example. So here we have 0.1. Under 2, we + +13 +00:01:03,700 --> 00:01:07,260 +get this result. This value is the probability of + +14 +00:01:07,260 --> 00:01:10,480 +Z smaller than 0.5. But you have to keep in mind + +15 +00:01:10,480 --> 00:01:14,460 +that we have to transform first from normal + +16 +00:01:14,460 --> 00:01:17,580 +distribution to standardized normal distribution. + +17 +00:01:18,620 --> 00:01:21,220 +For this specific example, if we are looking for + +18 +00:01:21,220 --> 00:01:26,960 +mean of 8 and standard deviation of 5, that's for + +19 +00:01:26,960 --> 00:01:30,020 +the normal distribution. In this case, the z-score + +20 +00:01:30,020 --> 00:01:32,920 +is given by this equation, which is x minus 3 + +21 +00:01:32,920 --> 00:01:37,060 +divided by sigma. So 8.6 minus 8 divided by 5 + +22 +00:01:37,060 --> 00:01:41,360 +gives 0.12. In this case, we can use the + +23 +00:01:41,360 --> 00:01:46,480 +standardized normal table. Now, for the other + +24 +00:01:46,480 --> 00:01:48,700 +case, we are looking for the probability of x + +25 +00:01:48,700 --> 00:01:51,880 +greater than 8.6. So we are looking for the upper + +26 +00:01:51,880 --> 00:01:56,900 +tail probability. The table we have gives the area + +27 +00:01:56,900 --> 00:02:01,640 +to the left side. And we know that the total area + +28 +00:02:01,640 --> 00:02:04,760 +underneath the normal curve is 1. So the + +29 +00:02:04,760 --> 00:02:08,980 +probability of x greater than 8.6 is the same as 1 + +30 +00:02:08,980 --> 00:02:13,720 +minus the probability of x smaller than x less + +31 +00:02:13,720 --> 00:02:19,100 +than 8.6. So here, for 8.6, we got z squared to be + +32 +00:02:19,100 --> 00:02:24,300 +0.12. So the probability of z greater than 0.12 is + +33 +00:02:24,300 --> 00:02:27,620 +the same as 1 minus z of z is smaller than 0.12. + +34 +00:02:28,850 --> 00:02:33,050 +So 1 minus the answer we just got from part A will + +35 +00:02:33,050 --> 00:02:35,930 +get us to something. So this is the probability of + +36 +00:02:35,930 --> 00:02:41,170 +X is greater than 8.6. So all of the time, if you + +37 +00:02:41,170 --> 00:02:45,390 +are asking about computing the probability in the + +38 +00:02:45,390 --> 00:02:48,290 +upper tail, you have to first find the probability + +39 +00:02:48,290 --> 00:02:52,630 +in the lower tail, then subtract that value from + +40 +00:02:52,630 --> 00:02:56,710 +1. So that's the probability for the upper tail. + +41 +00:02:58,110 --> 00:03:02,470 +The last one, we are looking for probability + +42 +00:03:02,470 --> 00:03:06,970 +between two values. For example, x. What's the + +43 +00:03:06,970 --> 00:03:10,190 +probability of x greater than 8 and smaller than 8 + +44 +00:03:10,190 --> 00:03:15,670 +.6? So we are looking for this area, the red area. + +45 +00:03:16,250 --> 00:03:18,710 +So the probability of x between these two values + +46 +00:03:18,710 --> 00:03:21,850 +actually equals the probability of x smaller than. + +47 +00:03:23,040 --> 00:03:27,560 +8.6 minus the probability of X smaller than 8A. + +48 +00:03:27,880 --> 00:03:31,140 +And that, in this case, you have to compute two + +49 +00:03:31,140 --> 00:03:34,780 +values of this score. One for the first value, + +50 +00:03:34,880 --> 00:03:40,020 +which is A. This value is zero because the mean is + +51 +00:03:40,020 --> 00:03:45,240 +zero. And we know that Z can be negative. If X is + +52 +00:03:45,240 --> 00:03:49,460 +smaller than Mu, Z can positive if X is greater + +53 +00:03:49,460 --> 00:03:55,220 +than Mu and equals zero only if X equals Mu. In + +54 +00:03:55,220 --> 00:03:59,340 +this case, X equals Mu, so Z score is zero. The + +55 +00:03:59,340 --> 00:04:03,220 +other one as we got before is 0.12. So now, we + +56 +00:04:03,220 --> 00:04:07,340 +transform actually the probability of X between + +57 +00:04:08,870 --> 00:04:13,870 +8 and 8.6 to z-score between 0 and 0.12. In this + +58 +00:04:13,870 --> 00:04:17,170 +case, we can use the normal theorem. Now, this + +59 +00:04:17,170 --> 00:04:21,130 +area is, as we mentioned, just b of x smaller than + +60 +00:04:21,130 --> 00:04:28,850 +0.12 minus b of x, b of z smaller than 0. This + +61 +00:04:28,850 --> 00:04:34,330 +probability, we know that, 0.54878. Now, the + +62 +00:04:34,330 --> 00:04:38,170 +probability of z smaller than 0 is one-half. + +63 +00:04:38,860 --> 00:04:41,380 +Because the total area underneath the normal curve + +64 +00:04:41,380 --> 00:04:45,860 +is 1, and 0 divided the curve into two equally + +65 +00:04:45,860 --> 00:04:49,100 +parts. So the area to the right of 0 is the same + +66 +00:04:49,100 --> 00:04:52,100 +as the area to the left of 0. So in this case, + +67 +00:04:52,880 --> 00:04:57,160 +minus 0.5. So this is your answer. So the + +68 +00:04:57,160 --> 00:05:04,040 +probability of Z between 8 and 8.6 is around 0478. + +69 +00:05:04,320 --> 00:05:07,020 +I think we stopped last time at this point. + +70 +00:05:09,860 --> 00:05:17,760 +This is another example to compute the probability + +71 +00:05:17,760 --> 00:05:24,020 +of X greater than 7.4 and 8. Also, it's the same + +72 +00:05:24,020 --> 00:05:29,700 +idea here, just find these scores for the two + +73 +00:05:29,700 --> 00:05:30,160 +values. + +74 +00:05:33,850 --> 00:05:38,670 +L with B of Z greater than minus 0.12 up to 0. Now + +75 +00:05:38,670 --> 00:05:47,010 +this red area equals the area below 0. I mean B of + +76 +00:05:47,010 --> 00:05:50,330 +Z smaller than 0 minus the probability of Z + +77 +00:05:50,330 --> 00:05:57,090 +smaller than minus 0.12. Now by using symmetric + +78 +00:05:57,090 --> 00:06:00,470 +probability of the normal distribution, we know + +79 +00:06:00,470 --> 00:06:04,620 +that The probability of Z smaller than minus 0.12 + +80 +00:06:04,620 --> 00:06:11,420 +equals the probability of Z greater than 0.12. + +81 +00:06:11,780 --> 00:06:15,520 +Because this area, if we have Z smaller than minus + +82 +00:06:15,520 --> 00:06:19,300 +0.12, the area to the left, that equals the area + +83 +00:06:19,300 --> 00:06:21,560 +to the right of the same point, because of + +84 +00:06:21,560 --> 00:06:25,480 +symmetry. And finally, you will end with this + +85 +00:06:25,480 --> 00:06:25,740 +result. + +86 +00:06:30,670 --> 00:06:36,730 +D of z minus 0.12, all the way up to 0, this area, + +87 +00:06:37,630 --> 00:06:43,130 +is the same as the area from 0 up to 0.5. So this + +88 +00:06:43,130 --> 00:06:46,530 +area actually is the same as D of z between 0 and + +89 +00:06:46,530 --> 00:06:51,270 +0.5. So if you have negative sign, and then take + +90 +00:06:51,270 --> 00:06:54,390 +the opposite one, the answer will be the same + +91 +00:06:54,390 --> 00:06:57,610 +because the normal distribution is symmetric + +92 +00:06:57,610 --> 00:07:04,690 +around 0.9. The questions, I think we stopped + +93 +00:07:04,690 --> 00:07:09,330 +here. And also we talked about empirical rules. + +94 +00:07:10,930 --> 00:07:13,250 +The one we mentioned in chapter three, in chapter + +95 +00:07:13,250 --> 00:07:16,170 +three. And we know that, as we mentioned before, + +96 +00:07:16,310 --> 00:07:25,550 +that is 68.16% of the observations fall within one + +97 +00:07:25,550 --> 00:07:30,660 +standard deviation around the mean. So this area + +98 +00:07:30,660 --> 00:07:35,380 +from mu minus one sigma up to mu plus sigma, this + +99 +00:07:35,380 --> 00:07:42,080 +area covers around 68%. Also + +100 +00:07:42,080 --> 00:07:50,700 +95% or actually 95.44% of the data falls within + +101 +00:07:50,700 --> 00:07:54,840 +two standard deviations of the mean. And finally, + +102 +00:07:55,040 --> 00:08:00,220 +around most of the data, around 99.73% of the data + +103 +00:08:00,220 --> 00:08:06,860 +falls within three subdivisions of the population + +104 +00:08:06,860 --> 00:08:07,160 +mean. + +105 +00:08:11,550 --> 00:08:14,970 +And now the new topic is how can we find the X + +106 +00:08:14,970 --> 00:08:18,510 +value if the probability is given. It's vice + +107 +00:08:18,510 --> 00:08:21,810 +versa. In the previous questions, we were asking + +108 +00:08:21,810 --> 00:08:26,130 +about find the probability, for example, if X is + +109 +00:08:26,130 --> 00:08:30,450 +smaller than a certain number. Now suppose this + +110 +00:08:30,450 --> 00:08:34,070 +probability is given, and we are looking to find + +111 +00:08:34,070 --> 00:08:38,710 +this value. I mean, for example, suppose in the + +112 +00:08:38,710 --> 00:08:39,850 +previous examples here, + +113 +00:08:43,380 --> 00:08:46,760 +Suppose we know this probability. So the + +114 +00:08:46,760 --> 00:08:50,220 +probability is given. The question is, how can we + +115 +00:08:50,220 --> 00:08:54,140 +find this value? It's the opposite, sometimes + +116 +00:08:54,140 --> 00:08:57,180 +called backward normal calculations. + +117 +00:09:01,660 --> 00:09:05,580 +There are actually two steps. to find the x value + +118 +00:09:05,580 --> 00:09:10,040 +for a certain probability or for a given or for a + +119 +00:09:10,040 --> 00:09:12,900 +known probability the first step we have to find + +120 +00:09:12,900 --> 00:09:20,540 +the z score then use this equation to find the x + +121 +00:09:20,540 --> 00:09:25,100 +value corresponding to the z score you have and x + +122 +00:09:25,100 --> 00:09:30,120 +is just mu plus sigma times mu so first step you + +123 +00:09:30,120 --> 00:09:31,740 +have to find the z score + +124 +00:09:35,350 --> 00:09:38,690 +corresponding to the probability we have. So find + +125 +00:09:38,690 --> 00:09:43,870 +the z value for the non-probability, then use that + +126 +00:09:43,870 --> 00:09:47,910 +z score to find the value of x by using this + +127 +00:09:47,910 --> 00:09:52,010 +equation. So x equals mu plus z sigma. z could be + +128 +00:09:52,010 --> 00:09:54,490 +negative, could be positive, depends on the + +129 +00:09:54,490 --> 00:09:59,170 +probability you have. If the probability is above + +130 +00:09:59,170 --> 00:10:03,390 +0.5, I mean 0.5 and greater than 0.5, this + +131 +00:10:03,390 --> 00:10:08,880 +corresponds to 10. But if z-score is negative, I'm + +132 +00:10:08,880 --> 00:10:10,680 +sorry, if z-score is negative, then the + +133 +00:10:10,680 --> 00:10:14,880 +probability should be smaller than 0.5. So if the + +134 +00:10:14,880 --> 00:10:19,360 +probability is given less than 0.5, then your z + +135 +00:10:19,360 --> 00:10:21,100 +-score should be negative, otherwise should be + +136 +00:10:21,100 --> 00:10:23,720 +positive. So you have to be careful in this case. + +137 +00:10:25,700 --> 00:10:31,240 +Now look at this example. Let x represent the time + +138 +00:10:31,240 --> 00:10:35,770 +it takes in seconds. to download an image file + +139 +00:10:35,770 --> 00:10:39,730 +from the internet. The same example as we did + +140 +00:10:39,730 --> 00:10:43,590 +before. And here we assume that x is normal + +141 +00:10:43,590 --> 00:10:46,330 +distribution with mean of 8 and standard deviation + +142 +00:10:46,330 --> 00:10:51,710 +of 5. Now, let's see how can we find the value of + +143 +00:10:51,710 --> 00:10:58,050 +x such that 20% of download times are smaller than + +144 +00:10:58,050 --> 00:10:58,410 +x. + +145 +00:11:01,060 --> 00:11:04,580 +So, this probability is a fraction. Also, always + +146 +00:11:04,580 --> 00:11:07,840 +the probability is between 0 and 1. So, the + +147 +00:11:07,840 --> 00:11:11,820 +probability here is 20%. In this case, your z + +148 +00:11:11,820 --> 00:11:15,560 +-score should be negative. Because 20% is more + +149 +00:11:15,560 --> 00:11:18,660 +than 0.5. So, z-score should be in this side, in + +150 +00:11:18,660 --> 00:11:19,320 +the left side. + +151 +00:11:22,340 --> 00:11:26,380 +So, again, he asks about finding x-value such that + +152 +00:11:26,380 --> 00:11:27,140 +20%. + +153 +00:11:31,740 --> 00:11:35,400 +So here again we are looking for this value, for + +154 +00:11:35,400 --> 00:11:40,760 +the value of x, which is smaller than the area to + +155 +00:11:40,760 --> 00:11:45,680 +the left of this x, equals 0.2. + +156 +00:11:47,480 --> 00:11:51,100 +Now, the first step, we have to find the z-score. + +157 +00:11:52,650 --> 00:11:56,430 +It's backward, z-score first, then x. Find a z + +158 +00:11:56,430 --> 00:12:00,450 +-score corresponding to the probability of 0.2. + +159 +00:12:02,510 --> 00:12:07,710 +The approximate one, the near value, I mean, to + +160 +00:12:07,710 --> 00:12:12,190 +the 0.2 is 0.2005. Sometimes you have the exact + +161 +00:12:12,190 --> 00:12:16,570 +value from the table you have, but most of the + +162 +00:12:16,570 --> 00:12:19,050 +time you don't have it. So you have to look at the + +163 +00:12:19,050 --> 00:12:21,790 +approximate value, which is very close to the one + +164 +00:12:21,790 --> 00:12:25,840 +you have. So here, we are looking for 0.2. The + +165 +00:12:25,840 --> 00:12:30,660 +closest value to 0.2 is 0.2005. Now, the + +166 +00:12:30,660 --> 00:12:34,720 +corresponding value to this probability is minus 0 + +167 +00:12:34,720 --> 00:12:40,120 +.8 all the way up to 4. So your z-score is + +168 +00:12:40,120 --> 00:12:47,840 +negative 0.84. So this is the first step. Any + +169 +00:12:47,840 --> 00:12:51,120 +question? Again. + +170 +00:12:53,950 --> 00:12:57,050 +Now if we just go back to this equation, + +171 +00:12:59,930 --> 00:13:03,670 +z equals x minus mu over sigma. A cross + +172 +00:13:03,670 --> 00:13:07,810 +multiplication, I mean if you multiply both sides + +173 +00:13:07,810 --> 00:13:16,110 +by sigma, you will get sigma times z equals x + +174 +00:13:16,110 --> 00:13:17,510 +minus mu. + +175 +00:13:32,120 --> 00:13:35,500 +Now, in this question, + +176 +00:13:37,960 --> 00:13:43,160 +he asks about, find the value of x such that 20% + +177 +00:13:43,160 --> 00:13:46,560 +of download times are less than x. + +178 +00:13:50,740 --> 00:13:54,080 +Now the probability is less than 0.5, so your z + +179 +00:13:54,080 --> 00:13:57,780 +-score should be on the left side. So here we need + +180 +00:13:57,780 --> 00:14:03,660 +to find the value of z first. Go back to the + +181 +00:14:03,660 --> 00:14:04,640 +normal table you have. + +182 +00:14:07,680 --> 00:14:08,860 +This is the normal table. + +183 +00:14:16,800 --> 00:14:21,250 +We are looking for minus 0.2. I'm sorry, we are + +184 +00:14:21,250 --> 00:14:28,910 +looking for 0.2. So the closest value to 0.2 is + +185 +00:14:28,910 --> 00:14:34,750 +this one, 0.2005. So this is the closest value. + +186 +00:14:49,630 --> 00:14:54,470 +So the exact answer is sometimes not given. So the + +187 +00:14:54,470 --> 00:14:59,190 +approximate one, minus 0.8, all the way up to 4. + +188 +00:15:00,030 --> 00:15:06,410 +So z-score minus 0.8. Any question? + +189 +00:15:10,330 --> 00:15:15,330 +So the value of z-score is minus 0.84. So my + +190 +00:15:15,330 --> 00:15:21,430 +corresponding x-value equals X equal mu. The mu is + +191 +00:15:21,430 --> 00:15:31,770 +given as A plus Z is minus 0.84 times sigma. Sigma + +192 +00:15:31,770 --> 00:15:42,150 +is 5. You will end with 3.8. So this means the + +193 +00:15:42,150 --> 00:15:47,550 +probability of X less than 3.8. Equal point. + +194 +00:15:47,810 --> 00:15:48,950 +Exactly, equal point. + +195 +00:15:52,430 --> 00:15:57,230 +So in this case, the probability is given, which + +196 +00:15:57,230 --> 00:16:01,710 +is 0.20. And we ask about what's the value of x in + +197 +00:16:01,710 --> 00:16:07,830 +this case. So the first step was find the z-score. + +198 +00:16:09,210 --> 00:16:15,250 +Then use this value. I mean, plug this value in. + +199 +00:16:17,380 --> 00:16:22,520 +to find the corresponding X score. That's the + +200 +00:16:22,520 --> 00:16:25,240 +backward normal calculations. + +201 +00:16:28,820 --> 00:16:34,920 +Let's do one problem from the practice, which is + +202 +00:16:34,920 --> 00:16:36,820 +number 18. + +203 +00:16:53,390 --> 00:16:56,310 +Is it clear? + +204 +00:17:00,590 --> 00:17:10,310 +The owners of a fish market determined + +205 +00:17:10,310 --> 00:17:20,570 +that the average weight for a catfish is 3.2 So + +206 +00:17:20,570 --> 00:17:27,230 +this is the + +207 +00:17:27,230 --> 00:17:39,150 +value of the mean 40 + +208 +00:17:48,310 --> 00:17:57,010 +So again, the owner of a fish market determined + +209 +00:17:57,010 --> 00:18:02,350 +that the average weight for a catfish is 3.2 + +210 +00:18:02,350 --> 00:18:08,950 +pounds. So the mean is 3.2 with a standard + +211 +00:18:08,950 --> 00:18:18,280 +deviation of 0.8. So sigma is 0.8. Now, assuming + +212 +00:18:18,280 --> 00:18:22,640 +the weights of catfish are normally distributed. + +213 +00:18:23,580 --> 00:18:27,220 +In this case, you ask about what's the probability + +214 +00:18:27,220 --> 00:18:31,800 +that a randomly selected catfish will weigh more + +215 +00:18:31,800 --> 00:18:38,870 +than 4.4. So what's the probability of X More + +216 +00:18:38,870 --> 00:18:41,090 +than. So greater than 4. + +217 +00:18:45,830 --> 00:18:49,290 +I just gave the idea to solve this problem. At + +218 +00:18:49,290 --> 00:18:54,190 +home, you can compute it to find the exact answer. + +219 +00:18:54,590 --> 00:18:59,570 +So first step, find z score. z is 4.4. + +220 +00:19:01,730 --> 00:19:02,590 +Divide by z. + +221 +00:19:05,910 --> 00:19:12,360 +Just compute this value. It's 0.8 divided by 0.8 + +222 +00:19:12,360 --> 00:19:13,100 +equals 1. + +223 +00:19:18,560 --> 00:19:22,660 +So z-score is 1. So we are looking for the + +224 +00:19:22,660 --> 00:19:24,700 +probability of z greater than 1. + +225 +00:19:28,340 --> 00:19:35,220 +1.5. 1.2. 1.5. 1.5. 1.5. So I'm looking for the + +226 +00:19:35,220 --> 00:19:37,760 +probability of x of z greater than + +227 +00:19:40,980 --> 00:19:48,700 +1 minus P + +228 +00:19:48,700 --> 00:19:52,700 +of Z less than or equal to 1.5. Now go back to the + +229 +00:19:52,700 --> 00:19:53,040 +table. + +230 +00:20:01,540 --> 00:20:07,260 +Now 1.5 under 0. It's 0.9332. + +231 +00:20:11,410 --> 00:20:19,750 +So, 1 minus this probability gives 0.668. + +232 +00:20:21,210 --> 00:20:24,350 +That's the probability of X greater than 4.4. + +233 +00:20:28,130 --> 00:20:31,590 +So, the answer is 0.0668. + +234 +00:20:34,870 --> 00:20:37,550 +Now, for the same question. + +235 +00:20:41,320 --> 00:20:44,380 +What's the probability that a randomly selected + +236 +00:20:44,380 --> 00:20:47,080 +fish will weigh between 3 and 5 pounds? + +237 +00:20:49,660 --> 00:20:50,400 +3? + +238 +00:20:52,940 --> 00:21:01,040 +Up to 5. So first we have to find the score for 3 + +239 +00:21:01,040 --> 00:21:11,260 +out of 5. For it to be just 3 minus 3.2. Divide by + +240 +00:21:11,260 --> 00:21:17,380 +0.8 is the first z value. Negative 0.2 divided by + +241 +00:21:17,380 --> 00:21:30,360 +0.8 minus 0.25. The other one, 5 minus 3.2 divided + +242 +00:21:30,360 --> 00:21:36,680 +by 0.8. 1 minus 0.8 divided by 0.8 equals + +243 +00:21:42,680 --> 00:21:44,120 +2.25. + +244 +00:21:50,840 --> 00:21:57,020 +Just double check this result. So here, the + +245 +00:21:57,020 --> 00:22:04,520 +probability of X between 3 and 5 equals minus 0 + +246 +00:22:04,520 --> 00:22:08,200 +.25, smaller than Z, smaller than 2.5. + +247 +00:22:12,650 --> 00:22:17,210 +So it's transformed from normal distribution to + +248 +00:22:17,210 --> 00:22:20,750 +standardized normal distribution. So here instead + +249 +00:22:20,750 --> 00:22:23,530 +of computing the probability of X between three + +250 +00:22:23,530 --> 00:22:26,070 +and five, we are looking for the probability + +251 +00:22:26,070 --> 00:22:31,350 +between Z between actually minus. It's minus + +252 +00:22:31,350 --> 00:22:34,590 +because your value here is smaller than the + +253 +00:22:34,590 --> 00:22:37,790 +average. The average was 3.2, so it should be + +254 +00:22:37,790 --> 00:22:42,630 +negative. So z score between minus 0.25 all the + +255 +00:22:42,630 --> 00:22:48,150 +way up to 2.25. So now, this is the probability we + +256 +00:22:48,150 --> 00:22:56,590 +are looking for. Zero in the middle minus one + +257 +00:22:56,590 --> 00:23:03,610 +-fourth to the left of z of zero, mu of zero. And + +258 +00:23:03,610 --> 00:23:08,560 +this is the value of 2.25. Now we are looking + +259 +00:23:08,560 --> 00:23:09,940 +actually for this probability. + +260 +00:23:12,960 --> 00:23:18,360 +The area between minus 0.25 all the way up to 2.5. + +261 +00:23:19,980 --> 00:23:25,200 +So this area equals the + +262 +00:23:25,200 --> 00:23:29,000 +probability of Z less than 2.25 minus. + +263 +00:23:34,280 --> 00:23:38,780 +And again, use the normal. table to give this + +264 +00:23:38,780 --> 00:23:39,860 +value and another one. + +265 +00:23:42,980 --> 00:23:52,400 +Any questions? So first step here, we compute the + +266 +00:23:52,400 --> 00:23:56,880 +z-score for each value x. So the problem is + +267 +00:23:56,880 --> 00:24:01,380 +transformed from normal distribution to + +268 +00:24:01,380 --> 00:24:05,060 +standardized normal distribution. So it becomes z + +269 +00:24:05,060 --> 00:24:11,500 +between minus 1.25 up to 2.25. Now, this area, + +270 +00:24:11,960 --> 00:24:19,900 +this dashed area equals the area below 2.25 minus + +271 +00:24:19,900 --> 00:24:25,000 +the area below minus 1.25. Now, by using the + +272 +00:24:25,000 --> 00:24:27,760 +similar way we did before, you will compute the + +273 +00:24:27,760 --> 00:24:30,960 +value of z. The probability of z is smaller than 2 + +274 +00:24:30,960 --> 00:24:39,580 +.25 by using The normal table. So here, 2.2 up to + +275 +00:24:39,580 --> 00:24:47,900 +5. So 9, 8, 7, 8. 9, 8, 7, 8. + +276 +00:24:53,900 --> 00:25:00,260 +So the area below 2.25, 2.2, this value. All the + +277 +00:25:00,260 --> 00:25:03,940 +way up to 5 gives 987. + +278 +00:25:05,540 --> 00:25:08,860 +Now, what's about the probability of Z smaller + +279 +00:25:08,860 --> 00:25:15,320 +than minus 0.25? If you go back to the Z table, + +280 +00:25:15,380 --> 00:25:18,960 +but for the other one, the negative one. + +281 +00:25:23,120 --> 00:25:28,540 +Minus 2 minus 0.2 up + +282 +00:25:28,540 --> 00:25:34,780 +to 5. 0.4013 minus, + +283 +00:25:36,620 --> 00:25:43,100 +that will give the probability between three and + +284 +00:25:43,100 --> 00:25:43,280 +five. + +285 +00:25:46,180 --> 00:25:48,900 +This is the second part. + +286 +00:25:51,120 --> 00:25:52,460 +So the final answer. + +287 +00:26:00,630 --> 00:26:05,450 +So this is the probability that the selected cash + +288 +00:26:05,450 --> 00:26:10,650 +fish will weigh between three and five pounds. + +289 +00:26:11,810 --> 00:26:20,770 +Now, other question is, for the same problem, you + +290 +00:26:20,770 --> 00:26:28,020 +said a citation Catfish should be one of the top 2 + +291 +00:26:28,020 --> 00:26:33,860 +% in the weight. Assuming the weights of catfish + +292 +00:26:33,860 --> 00:26:38,660 +are normally distributed, at what weight in bounds + +293 +00:26:38,660 --> 00:26:43,680 +should the citation, the notation be established? + +294 +00:26:45,800 --> 00:26:50,600 +So in this board, he asked about what's the value + +295 +00:26:50,600 --> 00:26:52,120 +of x, for example. + +296 +00:26:57,160 --> 00:27:01,680 +is greater than what value here. And this + +297 +00:27:01,680 --> 00:27:07,880 +probability equals 2%. Because you said the + +298 +00:27:07,880 --> 00:27:14,720 +citation catfish should be one of the top 2%. So + +299 +00:27:14,720 --> 00:27:23,560 +the area in the right here, this area is 2%. + +300 +00:27:26,000 --> 00:27:34,080 +What's the value of x in this case? So here, the + +301 +00:27:34,080 --> 00:27:38,420 +value of x greater than a equals 0.02, and we are + +302 +00:27:38,420 --> 00:27:39,460 +looking for this value. + +303 +00:27:45,750 --> 00:27:49,170 +gives the area to the left side. So this + +304 +00:27:49,170 --> 00:27:54,490 +probability actually, the area to the right is 2%, + +305 +00:27:54,490 --> 00:28:00,810 +so the area to the left is 98%. So this is the + +306 +00:28:00,810 --> 00:28:01,410 +same as, + +307 +00:28:04,610 --> 00:28:07,930 +as we know, the equal sign does not matter because + +308 +00:28:07,930 --> 00:28:09,090 +we have continuous distribution. + +309 +00:28:12,050 --> 00:28:14,650 +continuous distribution, so equal sign does not + +310 +00:28:14,650 --> 00:28:18,510 +matter. So now, if you ask about P of X greater + +311 +00:28:18,510 --> 00:28:22,190 +than a certain value equals a probability of, for + +312 +00:28:22,190 --> 00:28:27,330 +example, 0.02, you have to find the probability to + +313 +00:28:27,330 --> 00:28:31,890 +the left, which is 0.98, because our table gives + +314 +00:28:31,890 --> 00:28:36,470 +the area to the left. Now, we have to find the + +315 +00:28:36,470 --> 00:28:40,820 +value of A such that Probability of X is more than + +316 +00:28:40,820 --> 00:28:44,820 +or equal to 0.98. So again, we have to look at the + +317 +00:28:44,820 --> 00:28:50,140 +normal table, but backwards, because this value is + +318 +00:28:50,140 --> 00:28:53,720 +given. If the probability is given, we have to + +319 +00:28:53,720 --> 00:28:58,900 +look inside the body of the table in order to find + +320 +00:28:58,900 --> 00:28:59,580 +the z-score. + +321 +00:29:03,350 --> 00:29:07,850 +x equals mu plus z sigma in order to find the + +322 +00:29:07,850 --> 00:29:12,290 +corresponding value x. So again, go back to the + +323 +00:29:12,290 --> 00:29:22,010 +normal table, and we are looking for 98%. The + +324 +00:29:22,010 --> 00:29:27,930 +closest value to 98%, look here, if you stop here + +325 +00:29:27,930 --> 00:29:30,850 +at 2, go to the right, + +326 +00:29:33,660 --> 00:29:39,380 +Here we have 9798 or + +327 +00:29:39,380 --> 00:29:41,480 +9803. + +328 +00:29:42,640 --> 00:29:50,460 +So the answer might be your z-score could be 2.05 + +329 +00:29:50,460 --> 00:29:59,440 +or 2.06. So again, in this case, the table does + +330 +00:29:59,440 --> 00:30:04,400 +not give the exact. So the approximate one might + +331 +00:30:04,400 --> 00:30:08,920 +be between them exactly. Or just take one of + +332 +00:30:08,920 --> 00:30:13,140 +these. So either you can take 9798, which is + +333 +00:30:13,140 --> 00:30:19,500 +closer to 98% than 9803, because it's three + +334 +00:30:19,500 --> 00:30:23,780 +distant apart. So maybe we can take this value. + +335 +00:30:24,500 --> 00:30:27,640 +Again, if you take the other one, you will be OK. + +336 +00:30:28,540 --> 00:30:36,730 +So you take either 2.05. or 2.06. So let's take + +337 +00:30:36,730 --> 00:30:45,270 +the first value, for example. So my x equals mu, z + +338 +00:30:45,270 --> 00:30:56,570 +is 2.05, times sigma, 0.8. Multiply 2.05 by 8, 0 + +339 +00:30:56,570 --> 00:31:03,610 +.8, then add 3.2, you will get What's your answer? + +340 +00:31:08,390 --> 00:31:17,450 +3.2 plus 2 point... So around 4.8. So your answer + +341 +00:31:17,450 --> 00:31:18,890 +is 4.84. + +342 +00:31:23,810 --> 00:31:29,470 +Now if you go back to the problem, and suppose you + +343 +00:31:29,470 --> 00:31:30,830 +know the value of x. + +344 +00:31:34,250 --> 00:31:36,010 +So the probability of X. + +345 +00:31:42,990 --> 00:31:45,110 +Double check to the answer. + +346 +00:31:49,490 --> 00:31:58,010 +4.84. Just check. V of X greater than this value + +347 +00:31:58,010 --> 00:32:03,290 +should + +348 +00:32:03,290 --> 00:32:09,500 +be Two percent. Two percent. So the probability of + +349 +00:32:09,500 --> 00:32:13,440 +X greater than this value should be equal to one + +350 +00:32:13,440 --> 00:32:18,980 +zero. So this problem is called backward normal + +351 +00:32:18,980 --> 00:32:24,960 +calculation because here first step we find the + +352 +00:32:24,960 --> 00:32:28,040 +value of this score corresponding to this + +353 +00:32:28,040 --> 00:32:33,420 +probability. Be careful. The probability of X + +354 +00:32:33,420 --> 00:32:38,740 +greater than 2 is 0.02. So my value here should be + +355 +00:32:38,740 --> 00:32:44,980 +to the right. Because it says greater than A is + +356 +00:32:44,980 --> 00:32:51,240 +just 2%. If you switch the position of A, for + +357 +00:32:51,240 --> 00:32:57,130 +example, if A is on this side, And he asked about + +358 +00:32:57,130 --> 00:33:02,850 +E of X greater than E is 2%. This area is not 2%. + +359 +00:33:02,850 --> 00:33:10,050 +From A up to infinity here, this area is not 2% + +360 +00:33:10,050 --> 00:33:14,070 +because at least it's greater than 0.5. Make + +361 +00:33:14,070 --> 00:33:16,610 +sense? So your A should be to the right side. + +362 +00:33:17,590 --> 00:33:21,810 +Because the value of X greater than E, 2% is on + +363 +00:33:21,810 --> 00:33:26,400 +the other side. Let's move to the next one. + +364 +00:33:36,180 --> 00:33:37,660 +For the same question. + +365 +00:33:52,790 --> 00:33:57,150 +Again, the owner of Catfish Market determined the + +366 +00:33:57,150 --> 00:34:00,930 +average weight of a catfish 3.2 with + +367 +00:34:00,930 --> 00:34:04,670 +standardization 0.8 and we are assuming the + +368 +00:34:04,670 --> 00:34:08,270 +weights of catfish are normally distributed, kiosk + +369 +00:34:08,270 --> 00:34:17,390 +above. Above what weight? Do 89.8% of the weights + +370 +00:34:17,390 --> 00:34:18,070 +care? + +371 +00:34:20,630 --> 00:34:27,980 +Above? And above, so x greater than. X minus. And + +372 +00:34:27,980 --> 00:34:34,900 +98, 89, sorry, 89. So this is a percentage he's + +373 +00:34:34,900 --> 00:34:45,860 +looking for. 89.8%. Now what's the value of A? Or + +374 +00:34:45,860 --> 00:34:51,560 +above what weight? Do 89.8% of the weights occur? + +375 +00:34:57,730 --> 00:35:02,670 +You just make the normal curve in order to + +376 +00:35:02,670 --> 00:35:07,010 +understand the probability. Now, A should be to + +377 +00:35:07,010 --> 00:35:11,550 +the right or to the left side? Imagine A in the + +378 +00:35:11,550 --> 00:35:15,990 +right side here. Do you think the area above A is + +379 +00:35:15,990 --> 00:35:21,670 +89%? It's smaller than 0.5 for sure. So it should + +380 +00:35:21,670 --> 00:35:29,950 +be the other side. So this is your 8. Now, this + +381 +00:35:29,950 --> 00:35:36,690 +area makes sense that it's above 0.5. It's 0.8980. + +382 +00:35:38,750 --> 00:35:42,850 +Now, B of X greater than equals this value. And + +383 +00:35:42,850 --> 00:35:46,990 +again, the table gives the area to the left. So + +384 +00:35:46,990 --> 00:35:53,740 +this is actually X less than A, 1 minus this + +385 +00:35:53,740 --> 00:35:56,600 +value, equals 0.1020. + +386 +00:36:01,480 --> 00:36:08,760 +Now go back to + +387 +00:36:08,760 --> 00:36:14,400 +the normal table. Here it's 0.1020. So it should + +388 +00:36:14,400 --> 00:36:17,500 +be negative. I mean, your z-scope should be + +389 +00:36:17,500 --> 00:36:17,800 +negative. + +390 +00:36:22,000 --> 00:36:25,640 +Now look at 0.102. + +391 +00:36:28,560 --> 00:36:38,680 +Exactly this value. 0.102 is minus 1.2 up to 7. So + +392 +00:36:38,680 --> 00:36:39,900 +minus 1.27. + +393 +00:36:49,120 --> 00:36:57,900 +Minus 1.2. All the way up to 7 is 0.102. So the + +394 +00:36:57,900 --> 00:37:04,160 +corresponding z-score is minus 1.17. Now x again + +395 +00:37:04,160 --> 00:37:05,980 +equals mu plus z sigma. + +396 +00:37:10,280 --> 00:37:19,960 +Mu is 3.2 plus z is negative 1.17 times sigma. + +397 +00:37:24,250 --> 00:37:34,390 +So it's equal to 3.2 minus 127 times 0.3. By + +398 +00:37:34,390 --> 00:37:36,510 +calculator, you'll get the final result. + +399 +00:37:51,120 --> 00:37:56,180 +If the probability is smaller than 0.5, then this + +400 +00:37:56,180 --> 00:38:00,480 +one is negative. Go to the other one. If the + +401 +00:38:00,480 --> 00:38:04,040 +probability is above 0.5, then use the positive z + +402 +00:38:04,040 --> 00:38:09,600 +-score. So what's the answer? 2.18. + +403 +00:38:12,680 --> 00:38:19,850 +Be careful. In the previous one, We had a + +404 +00:38:19,850 --> 00:38:28,870 +probability of X greater than A equal 2%. In + +405 +00:38:28,870 --> 00:38:33,070 +this case, the value of A, for example, is located + +406 +00:38:33,070 --> 00:38:35,930 +in the upper tail here. + +407 +00:38:40,210 --> 00:38:45,530 +For this part, you ask about B of X greater than A + +408 +00:38:45,530 --> 00:38:51,090 +equal 0.89. It's here more than 0.5 should be on + +409 +00:38:51,090 --> 00:38:54,290 +the other side. So you have U of X greater than + +410 +00:38:54,290 --> 00:38:58,910 +equal this value, which is the score in this case + +411 +00:38:58,910 --> 00:39:02,490 +minus 1.17. So the corresponding guess score + +412 +00:39:02,490 --> 00:39:11,810 +actually is 2.24. So this is the weight that 89.8% + +413 +00:39:11,810 --> 00:39:17,970 +of the weights are above it. So around 90% of the + +414 +00:39:17,970 --> 00:39:25,350 +catch fish have weights above this value. So + +415 +00:39:25,350 --> 00:39:31,170 +around 2 pounds. So around 90% of the weights are + +416 +00:39:31,170 --> 00:39:37,070 +above 2.18 pounds. Maybe this is one of the most + +417 +00:39:37,070 --> 00:39:41,690 +important questions in this chapter. Any question? + +418 +00:39:51,660 --> 00:39:57,580 +The last part here, for the same problem he asked + +419 +00:39:57,580 --> 00:40:01,700 +about, what's the probability that a randomly + +420 +00:40:01,700 --> 00:40:06,120 +selected fish will weigh less than 2.2 pounds? I + +421 +00:40:06,120 --> 00:40:12,680 +think straightforward. We did similar in part A. + +422 +00:40:14,900 --> 00:40:23,880 +So B of X less than 0.2. So he's looking for this + +423 +00:40:23,880 --> 00:40:28,300 +probability, which is straightforward one. This + +424 +00:40:28,300 --> 00:40:38,800 +score, 3.2 minus, I'm sorry, it's 2.2 minus minus. + +425 +00:40:39,280 --> 00:40:43,420 +It's 2.2 minus 3.2 divided by sigma. + +426 +00:41:04,900 --> 00:41:10,240 +So again, find the probability now of Z less than + +427 +00:41:10,240 --> 00:41:11,120 +or equal to negative 1.5. + +428 +00:41:14,870 --> 00:41:19,710 +Now, in this case, we have to use the negative z. + +429 +00:41:20,370 --> 00:41:25,010 +It's negative 1.15 minus 1.2 up to 5. + +430 +00:41:28,150 --> 00:41:30,070 +So 0.1056. + +431 +00:41:33,730 --> 00:41:40,660 +So the answer is around 10% of the catfish will + +432 +00:41:40,660 --> 00:41:46,420 +weigh less than 2 pounds. So the answer is 0.1056. + +433 +00:41:48,340 --> 00:41:49,220 +Questions? + +434 +00:41:52,780 --> 00:41:56,100 +So go back to the PowerPoint presentation we have. + +435 +00:41:57,780 --> 00:41:59,580 +The last topic, + +436 +00:42:02,560 --> 00:42:03,900 +evaluating normality. + +437 +00:42:06,930 --> 00:42:09,350 +Many times we mentioned something about normality + +438 +00:42:09,350 --> 00:42:14,750 +and outliers. For sure, if outliers exist, in this + +439 +00:42:14,750 --> 00:42:19,410 +case, the situation is not normal. Now, how can we + +440 +00:42:19,410 --> 00:42:21,370 +tell if a data point is an outlier? + +441 +00:42:24,650 --> 00:42:28,270 +If you remember, we talked about outliers in + +442 +00:42:28,270 --> 00:42:32,510 +Chapter 3. By two ways. + +443 +00:42:36,750 --> 00:42:38,270 +By this score. + +444 +00:42:42,650 --> 00:42:47,390 +And we mentioned that any data point. + +445 +00:42:54,950 --> 00:42:59,010 +Below minus 3, I mean smaller than minus 3, or + +446 +00:42:59,010 --> 00:43:04,010 +above 3, these points are suspected to be + +447 +00:43:04,010 --> 00:43:04,750 +outliers. + +448 +00:43:09,230 --> 00:43:16,230 +So any point, any data value smaller than minus 3 + +449 +00:43:16,230 --> 00:43:22,330 +in this form, or above plus 3 is considered to be + +450 +00:43:22,330 --> 00:43:30,250 +an outlier. If we compute the lower limit, which + +451 +00:43:30,250 --> 00:43:37,310 +is Q1 minus 1.5 IQR over the upper limit. + +452 +00:43:40,170 --> 00:43:47,490 +So we + +453 +00:43:47,490 --> 00:43:51,690 +have lower limit, upper limit. So lower limit. + +454 +00:43:55,400 --> 00:43:56,480 +And upper limit. + +455 +00:43:59,980 --> 00:44:08,420 +Any data point below lower limit or above upper + +456 +00:44:08,420 --> 00:44:13,080 +limit is considered to be a type. So therefore, we + +457 +00:44:13,080 --> 00:44:18,320 +have two methods to determine or to examine if the + +458 +00:44:18,320 --> 00:44:20,960 +observation is enough there or not. One by using + +459 +00:44:20,960 --> 00:44:24,060 +this score, straightforward. And the other one, we + +460 +00:44:24,060 --> 00:44:27,420 +have to look for The lower limit and upper limit. + +461 +00:44:28,960 --> 00:44:34,200 +The other method by using software and later on + +462 +00:44:34,200 --> 00:44:39,380 +you will take SPSS in order to determine if the + +463 +00:44:39,380 --> 00:44:43,100 +data is normally distributed by using something + +464 +00:44:43,100 --> 00:44:52,540 +called QQ plot or normal probability plot. So I'm + +465 +00:44:52,540 --> 00:44:56,710 +going to skip this part. Because data is taken by + +466 +00:44:56,710 --> 00:45:00,970 +using software. But in general, + +467 +00:45:04,330 --> 00:45:11,750 +you may look at this graph. Generally speaking, if + +468 +00:45:11,750 --> 00:45:17,730 +you have a probability plot of a data, and the + +469 +00:45:17,730 --> 00:45:25,530 +points lie on a straight line, or close to it, In + +470 +00:45:25,530 --> 00:45:29,650 +this case, the distribution is normal. It's hard + +471 +00:45:29,650 --> 00:45:33,390 +to make this graph manual. It's better to use + +472 +00:45:33,390 --> 00:45:38,510 +software. But at least if we have this graph, and + +473 +00:45:38,510 --> 00:45:42,150 +the points are close to the straight line. I mean, + +474 +00:45:42,250 --> 00:45:45,950 +the points are either on the straight line, lies + +475 +00:45:45,950 --> 00:45:49,550 +on the straight line, or close it. In this case, + +476 +00:45:50,870 --> 00:45:55,030 +the data is normally distributed. If the data + +477 +00:45:55,030 --> 00:46:00,870 +points scattered away of the straight line, then + +478 +00:46:00,870 --> 00:46:04,390 +the distribution is not normal either skewed to + +479 +00:46:04,390 --> 00:46:08,690 +the right or skewed to the left. So for this + +480 +00:46:08,690 --> 00:46:14,150 +specific graph, the plot is normally distributed, + +481 +00:46:14,290 --> 00:46:17,550 +approximately normally distributed. Because most + +482 +00:46:17,550 --> 00:46:23,230 +of the points here lie close to the line and few + +483 +00:46:24,260 --> 00:46:29,660 +are scattered away. Or it means that there are few + +484 +00:46:29,660 --> 00:46:31,940 +outliers in this case, we can ignore these values. + +485 +00:46:33,100 --> 00:46:35,620 +So here plot is approximately a straight line + +486 +00:46:35,620 --> 00:46:40,360 +except for a few outliers at the low and the + +487 +00:46:40,360 --> 00:46:44,780 +right, those points. So generally speaking, the + +488 +00:46:44,780 --> 00:46:51,200 +distribution is normal distribution. That's all + +489 +00:46:51,200 --> 00:46:53,700 +for this chapter. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/D4aO26sEGrc_postprocess.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/D4aO26sEGrc_postprocess.srt new file mode 100644 index 0000000000000000000000000000000000000000..c7af8c156f4c5113bdb6eeaa117249d6c553aefc --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/D4aO26sEGrc_postprocess.srt @@ -0,0 +1,1128 @@ +1 +00:00:15,580 --> 00:00:19,700 +In general, the regression equation is given by + +2 +00:00:19,700 --> 00:00:26,460 +this equation. Y represents the dependent variable + +3 +00:00:26,460 --> 00:00:30,680 +for each observation I. Beta 0 is called + +4 +00:00:30,680 --> 00:00:35,280 +population Y intercept. Beta 1 is the population + +5 +00:00:35,280 --> 00:00:39,400 +stop coefficient. Xi is the independent variable + +6 +00:00:39,400 --> 00:00:44,040 +for each observation, I. Epsilon I is the random + +7 +00:00:44,040 --> 00:00:48,420 +error theorem. Beta 0 plus beta 1 X is called + +8 +00:00:48,420 --> 00:00:53,410 +linear component. While Y and I are random error + +9 +00:00:53,410 --> 00:00:57,130 +components. So, the regression equation mainly has + +10 +00:00:57,130 --> 00:01:01,970 +two components. One is linear and the other is + +11 +00:01:01,970 --> 00:01:05,830 +random. In general, the expected value for this + +12 +00:01:05,830 --> 00:01:08,810 +error term is zero. So, for the predicted + +13 +00:01:08,810 --> 00:01:12,410 +equation, later we will see that Y hat equals B + +14 +00:01:12,410 --> 00:01:15,930 +zero plus B one X.this term will be ignored + +15 +00:01:15,930 --> 00:01:19,770 +because the expected value for the epsilon equals + +16 +00:01:19,770 --> 00:01:20,850 +zero. + +17 +00:01:36,460 --> 00:01:43,580 +So again linear component B0 plus B1 X I and the + +18 +00:01:43,580 --> 00:01:46,860 +random component is the epsilon term. + +19 +00:01:48,880 --> 00:01:53,560 +So if we have X and Y axis, this segment is called + +20 +00:01:53,560 --> 00:01:57,620 +Y intercept which is B0. The change in y divided + +21 +00:01:57,620 --> 00:02:01,480 +by change in x is called the slope. Epsilon i is + +22 +00:02:01,480 --> 00:02:04,480 +the difference between the observed value of y + +23 +00:02:04,480 --> 00:02:10,400 +minus the expected value or the predicted value. + +24 +00:02:10,800 --> 00:02:14,200 +The observed is the actual value. So actual minus + +25 +00:02:14,200 --> 00:02:17,480 +predicted, the difference between these two values + +26 +00:02:17,480 --> 00:02:20,800 +is called the epsilon. So epsilon i is the + +27 +00:02:20,800 --> 00:02:24,460 +difference between the observed value of y for x, + +28 +00:02:25,220 --> 00:02:28,820 +minus the predicted or the estimated value of Y + +29 +00:02:28,820 --> 00:02:33,360 +for XR. So this difference actually is called the + +30 +00:02:33,360 --> 00:02:36,920 +error tier. So the error is just observed minus + +31 +00:02:36,920 --> 00:02:38,240 +predicted. + +32 +00:02:40,980 --> 00:02:44,540 +The estimated regression equation is given by Y + +33 +00:02:44,540 --> 00:02:50,210 +hat equals V0 plus V1X. as i mentioned before the + +34 +00:02:50,210 --> 00:02:53,450 +epsilon term is cancelled because the expected + +35 +00:02:53,450 --> 00:02:57,590 +value for the epsilon equals zero here we have y + +36 +00:02:57,590 --> 00:03:00,790 +hat instead of y because this one is called the + +37 +00:03:00,790 --> 00:03:05,670 +estimated or the predicted value for y for the + +38 +00:03:05,670 --> 00:03:09,670 +observation i for example b zero is the estimated + +39 +00:03:09,670 --> 00:03:12,590 +of the regression intercept or is called y + +40 +00:03:12,590 --> 00:03:18,030 +intercept b one the estimate of the regression of + +41 +00:03:18,030 --> 00:03:21,930 +the slope so this is the estimated slope b1 xi + +42 +00:03:21,930 --> 00:03:26,270 +again is the independent variable so x1 It means + +43 +00:03:26,270 --> 00:03:28,630 +the value of the independent variable for + +44 +00:03:28,630 --> 00:03:31,350 +observation number one. Now this equation is + +45 +00:03:31,350 --> 00:03:34,530 +called linear regression equation or regression + +46 +00:03:34,530 --> 00:03:37,230 +model. It's a straight line because here we are + +47 +00:03:37,230 --> 00:03:41,170 +assuming that the relationship between x and y is + +48 +00:03:41,170 --> 00:03:43,490 +linear. It could be non-linear, but we are + +49 +00:03:43,490 --> 00:03:48,760 +focusing here in just linear regression. Now, the + +50 +00:03:48,760 --> 00:03:52,000 +values for B0 and B1 are given by these equations, + +51 +00:03:52,920 --> 00:03:56,480 +B1 equals RSY divided by SX. So, in order to + +52 +00:03:56,480 --> 00:04:01,040 +determine the values of B0 and B1, we have to know + +53 +00:04:01,040 --> 00:04:07,760 +first the value of R, the correlation coefficient. + +54 +00:04:16,640 --> 00:04:24,980 +Sx and Sy, standard deviations of x and y, as well + +55 +00:04:24,980 --> 00:04:29,880 +as the means of x and y. + +56 +00:04:32,920 --> 00:04:39,500 +B1 equals R times Sy divided by Sx. B0 is just y + +57 +00:04:39,500 --> 00:04:43,600 +bar minus b1 x bar, where Sx and Sy are the + +58 +00:04:43,600 --> 00:04:48,350 +standard deviations of x and y. So this, how can + +59 +00:04:48,350 --> 00:04:53,190 +we compute the values of B0 and B1? Now the + +60 +00:04:53,190 --> 00:04:59,350 +question is, what's our interpretation about B0 + +61 +00:04:59,350 --> 00:05:05,030 +and B1? And B0, as we mentioned before, is the Y + +62 +00:05:05,030 --> 00:05:10,510 +or the estimated mean value of Y when the value X + +63 +00:05:10,510 --> 00:05:10,910 +is 0. + +64 +00:05:17,420 --> 00:05:22,860 +So if X is 0, then Y hat equals B0. That means B0 + +65 +00:05:22,860 --> 00:05:26,420 +is the estimated mean value of Y when the value of + +66 +00:05:26,420 --> 00:05:32,280 +X equals 0. B1, which is called the estimated + +67 +00:05:32,280 --> 00:05:36,880 +change in the mean value of Y as a result of one + +68 +00:05:36,880 --> 00:05:42,360 +unit change in X. That means the sign of B1, + +69 +00:05:48,180 --> 00:05:55,180 +the direction of the relationship between X and Y. + +70 +00:06:03,020 --> 00:06:09,060 +So the sine of B1 tells us the exact direction. It + +71 +00:06:09,060 --> 00:06:12,300 +could be positive if the sine of B1 is positive or + +72 +00:06:12,300 --> 00:06:17,040 +negative. on the other side. So that's the meaning + +73 +00:06:17,040 --> 00:06:22,040 +of B0 and B1. Now first thing we have to do in + +74 +00:06:22,040 --> 00:06:23,980 +order to determine if there exists linear + +75 +00:06:23,980 --> 00:06:26,800 +relationship between X and Y, we have to draw + +76 +00:06:26,800 --> 00:06:30,620 +scatter plot, Y versus X. In this specific + +77 +00:06:30,620 --> 00:06:34,740 +example, X is the square feet, size of the house + +78 +00:06:34,740 --> 00:06:38,760 +is measured by square feet, and house selling + +79 +00:06:38,760 --> 00:06:43,220 +price in thousand dollars. So we have to draw Y + +80 +00:06:43,220 --> 00:06:47,420 +versus X. So house price versus size of the house. + +81 +00:06:48,140 --> 00:06:50,740 +Now by looking carefully at this scatter plot, + +82 +00:06:51,340 --> 00:06:54,200 +even if it's a small sample size, but you can see + +83 +00:06:54,200 --> 00:06:57,160 +that there exists positive relationship between + +84 +00:06:57,160 --> 00:07:02,640 +house price and size of the house. The points + +85 +00:07:03,750 --> 00:07:06,170 +Maybe they are close little bit to the straight + +86 +00:07:06,170 --> 00:07:08,370 +line, it means there exists maybe strong + +87 +00:07:08,370 --> 00:07:11,350 +relationship between X and Y. But you can tell the + +88 +00:07:11,350 --> 00:07:15,910 +exact strength of the relationship by using the + +89 +00:07:15,910 --> 00:07:19,270 +value of R. But here we can tell that there exists + +90 +00:07:19,270 --> 00:07:22,290 +positive relationship and that relation could be + +91 +00:07:22,290 --> 00:07:23,250 +strong. + +92 +00:07:25,730 --> 00:07:31,350 +Now simple calculations will give B1 and B0. + +93 +00:07:32,210 --> 00:07:37,510 +Suppose we know the values of R, Sy, and Sx. R, if + +94 +00:07:37,510 --> 00:07:41,550 +you remember last time, R was 0.762. It's moderate + +95 +00:07:41,550 --> 00:07:46,390 +relationship between X and Y. Sy and Sx, 60 + +96 +00:07:46,390 --> 00:07:52,350 +divided by 4 is 117. That will give 0.109. So B0, + +97 +00:07:53,250 --> 00:07:59,430 +in this case, 0.10977, B1. + +98 +00:08:02,960 --> 00:08:08,720 +B0 equals Y bar minus B1 X bar. B1 is computed in + +99 +00:08:08,720 --> 00:08:12,680 +the previous step, so plug that value here. In + +100 +00:08:12,680 --> 00:08:15,440 +addition, we know the values of X bar and Y bar. + +101 +00:08:15,980 --> 00:08:19,320 +Simple calculation will give the value of B0, + +102 +00:08:19,400 --> 00:08:25,340 +which is about 98.25. After computing the values + +103 +00:08:25,340 --> 00:08:30,600 +of B0 and B1, we can state the regression equation + +104 +00:08:30,600 --> 00:08:34,360 +by house price, the estimated value of house + +105 +00:08:34,360 --> 00:08:39,960 +price. Hat in this equation means the estimated or + +106 +00:08:39,960 --> 00:08:43,860 +the predicted value of the house price. Equals b0 + +107 +00:08:43,860 --> 00:08:49,980 +which is 98 plus b1 which is 0.10977 times square + +108 +00:08:49,980 --> 00:08:54,420 +feet. Now here, by using this equation, we can + +109 +00:08:54,420 --> 00:08:58,280 +tell number one. The direction of the relationship + +110 +00:08:58,280 --> 00:09:03,620 +between x and y, how surprised and its size. Since + +111 +00:09:03,620 --> 00:09:05,900 +the sign is positive, it means there exists + +112 +00:09:05,900 --> 00:09:09,000 +positive associations or relationship between + +113 +00:09:09,000 --> 00:09:12,420 +these two variables, number one. Number two, we + +114 +00:09:12,420 --> 00:09:17,060 +can interpret carefully the meaning of the + +115 +00:09:17,060 --> 00:09:21,340 +intercept. Now, as we mentioned before, y hat + +116 +00:09:21,340 --> 00:09:25,600 +equals b zero only if x equals zero. Now there is + +117 +00:09:25,600 --> 00:09:28,900 +no sense about square feet of zero because we + +118 +00:09:28,900 --> 00:09:32,960 +don't have a size of a house to be zero. But the + +119 +00:09:32,960 --> 00:09:37,880 +slope here is 0.109, it has sense because as the + +120 +00:09:37,880 --> 00:09:41,450 +size of the house increased by one unit. it's + +121 +00:09:41,450 --> 00:09:46,290 +selling price increased by this amount 0.109 but + +122 +00:09:46,290 --> 00:09:48,990 +here you have to be careful to multiply this value + +123 +00:09:48,990 --> 00:09:52,610 +by a thousand because the data is given in + +124 +00:09:52,610 --> 00:09:56,830 +thousand dollars for Y so here as the size of the + +125 +00:09:56,830 --> 00:10:00,590 +house increased by one unit by one feet one square + +126 +00:10:00,590 --> 00:10:05,310 +feet it's selling price increases by this amount 0 + +127 +00:10:05,310 --> 00:10:10,110 +.10977 should be multiplied by a thousand so + +128 +00:10:10,110 --> 00:10:18,560 +around $109.77. So that means extra one square + +129 +00:10:18,560 --> 00:10:24,040 +feet for the size of the house, it cost you around + +130 +00:10:24,040 --> 00:10:30,960 +$100 or $110. So that's the meaning of B1 and the + +131 +00:10:30,960 --> 00:10:35,060 +sign actually of the slope. In addition to that, + +132 +00:10:35,140 --> 00:10:39,340 +we can make some predictions about house price for + +133 +00:10:39,340 --> 00:10:42,900 +any given value of the size of the house. That + +134 +00:10:42,900 --> 00:10:46,940 +means if you know that the house size equals 2,000 + +135 +00:10:46,940 --> 00:10:50,580 +square feet. So just plug this value here and + +136 +00:10:50,580 --> 00:10:54,100 +simple calculation will give the predicted value + +137 +00:10:54,100 --> 00:10:58,230 +of the ceiling price of a house. That's the whole + +138 +00:10:58,230 --> 00:11:03,950 +story for the simple linear regression. In other + +139 +00:11:03,950 --> 00:11:08,030 +words, we have this equation, so the + +140 +00:11:08,030 --> 00:11:12,690 +interpretation of B0 again. B0 is the estimated + +141 +00:11:12,690 --> 00:11:16,110 +mean value of Y when the value of X is 0. That + +142 +00:11:16,110 --> 00:11:20,700 +means if X is 0, in this range of the observed X + +143 +00:11:20,700 --> 00:11:24,540 +-values. That's the meaning of the B0. But again, + +144 +00:11:24,820 --> 00:11:27,700 +because a house cannot have a square footage of + +145 +00:11:27,700 --> 00:11:31,680 +zero, so B0 has no practical application. + +146 +00:11:34,740 --> 00:11:38,760 +On the other hand, the interpretation for B1, B1 + +147 +00:11:38,760 --> 00:11:43,920 +equals 0.10977, that means B1 again estimates the + +148 +00:11:43,920 --> 00:11:46,880 +change in the mean value of Y as a result of one + +149 +00:11:46,880 --> 00:11:51,160 +unit increase in X. In other words, since B1 + +150 +00:11:51,160 --> 00:11:55,680 +equals 0.10977, that tells us that the mean value + +151 +00:11:55,680 --> 00:12:02,030 +of a house Increases by this amount, multiplied by + +152 +00:12:02,030 --> 00:12:05,730 +1,000 on average for each additional one square + +153 +00:12:05,730 --> 00:12:09,690 +foot of size. So that's the exact interpretation + +154 +00:12:09,690 --> 00:12:14,630 +about P0 and P1. For the prediction, as I + +155 +00:12:14,630 --> 00:12:18,430 +mentioned, since we have this equation, and our + +156 +00:12:18,430 --> 00:12:21,530 +goal is to predict the price for a house with 2 + +157 +00:12:21,530 --> 00:12:25,450 +,000 square feet, just plug this value here. + +158 +00:12:26,450 --> 00:12:31,130 +Multiply this value by 0.1098, then add the result + +159 +00:12:31,130 --> 00:12:37,750 +to 98.25 will give 317.85. This value should be + +160 +00:12:37,750 --> 00:12:41,590 +multiplied by 1000, so the predicted price for a + +161 +00:12:41,590 --> 00:12:49,050 +house with 2000 square feet is around 317,850 + +162 +00:12:49,050 --> 00:12:54,910 +dollars. That's for making the prediction for + +163 +00:12:54,910 --> 00:13:02,050 +selling a price. The last section in chapter 12 + +164 +00:13:02,050 --> 00:13:07,550 +talks about coefficient of determination R + +165 +00:13:07,550 --> 00:13:11,550 +squared. The definition for the coefficient of + +166 +00:13:11,550 --> 00:13:16,190 +determination is the portion of the total + +167 +00:13:16,190 --> 00:13:19,330 +variation in the dependent variable that is + +168 +00:13:19,330 --> 00:13:21,730 +explained by the variation in the independent + +169 +00:13:21,730 --> 00:13:25,130 +variable. Since we have two variables X and Y. + +170 +00:13:29,510 --> 00:13:34,490 +And the question is, what's the portion of the + +171 +00:13:34,490 --> 00:13:39,530 +total variation that can be explained by X? So the + +172 +00:13:39,530 --> 00:13:42,030 +question is, what's the portion of the total + +173 +00:13:42,030 --> 00:13:46,070 +variation in Y that is explained already by the + +174 +00:13:46,070 --> 00:13:54,450 +variation in X? For example, suppose R² is 90%, 0 + +175 +00:13:54,450 --> 00:13:59,770 +.90. That means 90% in the variation of the + +176 +00:13:59,770 --> 00:14:05,700 +selling price is explained by its size. That means + +177 +00:14:05,700 --> 00:14:12,580 +the size of the house contributes about 90% to + +178 +00:14:12,580 --> 00:14:17,700 +explain the variability of the selling price. So + +179 +00:14:17,700 --> 00:14:20,460 +we would like to have R squared to be large + +180 +00:14:20,460 --> 00:14:26,620 +enough. Now, R squared for simple regression only + +181 +00:14:26,620 --> 00:14:30,200 +is given by this equation, correlation between X + +182 +00:14:30,200 --> 00:14:31,100 +and Y squared. + +183 +00:14:34,090 --> 00:14:36,510 +So if we have the correlation between X and Y and + +184 +00:14:36,510 --> 00:14:40,070 +then you just square this value, that will give + +185 +00:14:40,070 --> 00:14:42,370 +the correlation or the coefficient of + +186 +00:14:42,370 --> 00:14:45,730 +determination. So simply, determination + +187 +00:14:45,730 --> 00:14:49,510 +coefficient is just the square of the correlation + +188 +00:14:49,510 --> 00:14:54,430 +between X and Y. We know that R ranges between + +189 +00:14:54,430 --> 00:14:55,670 +minus 1 and plus 1. + +190 +00:14:59,150 --> 00:15:05,590 +So R squared should be ranges between 0 and 1, + +191 +00:15:06,050 --> 00:15:09,830 +because minus sign will be cancelled since we are + +192 +00:15:09,830 --> 00:15:12,770 +squaring these values, so r squared is always + +193 +00:15:12,770 --> 00:15:17,690 +between 0 and 1. So again, r squared is used to + +194 +00:15:17,690 --> 00:15:22,430 +explain the portion of the total variability in + +195 +00:15:22,430 --> 00:15:24,950 +the dependent variable that is already explained + +196 +00:15:24,950 --> 00:15:31,310 +by the variability in x. For example, Sometimes R + +197 +00:15:31,310 --> 00:15:36,590 +squared is one. R squared is one only happens if R + +198 +00:15:36,590 --> 00:15:41,190 +is one or negative one. So if there exists perfect + +199 +00:15:41,190 --> 00:15:45,490 +relationship either negative or positive, I mean + +200 +00:15:45,490 --> 00:15:49,890 +if R is plus one or negative one, then R squared + +201 +00:15:49,890 --> 00:15:55,130 +is one. That means perfect linear relationship + +202 +00:15:55,130 --> 00:16:01,020 +between Y and X. Now the value. of 1 for R squared + +203 +00:16:01,020 --> 00:16:07,040 +means that 100% of the variation Y is explained by + +204 +00:16:07,040 --> 00:16:11,460 +variation X. And that's really never happened in + +205 +00:16:11,460 --> 00:16:15,720 +real life. Because R equals 1 or plus 1 or + +206 +00:16:15,720 --> 00:16:21,140 +negative 1 cannot be happened in real life. So R + +207 +00:16:21,140 --> 00:16:25,180 +squared always ranges between 0 and 1, never + +208 +00:16:25,180 --> 00:16:29,500 +equals 1, because if R squared is 1, that means + +209 +00:16:29,500 --> 00:16:33,440 +all the variation in Y is explained by the + +210 +00:16:33,440 --> 00:16:38,220 +variation in X. But for sure there is an error, + +211 +00:16:38,820 --> 00:16:41,700 +and that error may be due to some variables that + +212 +00:16:41,700 --> 00:16:45,540 +are not included in the regression model. Maybe + +213 +00:16:45,540 --> 00:16:50,870 +there is Random error in the selection, maybe the + +214 +00:16:50,870 --> 00:16:53,210 +sample size is not large enough in order to + +215 +00:16:53,210 --> 00:16:55,770 +determine the total variation in the dependent + +216 +00:16:55,770 --> 00:16:58,990 +variable. So it makes sense that R squared will be + +217 +00:16:58,990 --> 00:17:04,450 +less than 100. So generally speaking, R squared + +218 +00:17:04,450 --> 00:17:09,870 +always between 0 and 1. Weaker linear relationship + +219 +00:17:09,870 --> 00:17:15,690 +between X and Y, it means R squared is not 1. So + +220 +00:17:15,690 --> 00:17:20,070 +R², since it lies between 0 and 1, it means sum, + +221 +00:17:21,070 --> 00:17:24,830 +but not all the variation of Y is explained by the + +222 +00:17:24,830 --> 00:17:28,410 +variation X. Because as mentioned before, if R + +223 +00:17:28,410 --> 00:17:32,510 +squared is 90%, it means some, not all, the + +224 +00:17:32,510 --> 00:17:35,830 +variation Y is explained by the variation X. And + +225 +00:17:35,830 --> 00:17:38,590 +the remaining percent in this case, which is 10%, + +226 +00:17:38,590 --> 00:17:42,790 +this one due to, as I mentioned, maybe there + +227 +00:17:42,790 --> 00:17:46,490 +exists some other variables that affect the + +228 +00:17:46,490 --> 00:17:52,020 +selling price besides its size, maybe location. of + +229 +00:17:52,020 --> 00:17:57,900 +the house affects its selling price. So R squared + +230 +00:17:57,900 --> 00:18:02,640 +is always between 0 and 1, it's always positive. R + +231 +00:18:02,640 --> 00:18:07,180 +squared equals 0, that only happens if there is no + +232 +00:18:07,180 --> 00:18:12,620 +linear relationship between Y and X. Since R is 0, + +233 +00:18:13,060 --> 00:18:17,240 +then R squared equals 0. That means the value of Y + +234 +00:18:17,240 --> 00:18:20,870 +does not depend on X. Because here, as X + +235 +00:18:20,870 --> 00:18:26,830 +increases, Y stays nearly in the same position. It + +236 +00:18:26,830 --> 00:18:30,190 +means as X increases, Y stays the same, constant. + +237 +00:18:31,010 --> 00:18:33,730 +So that means there is no relationship or actually + +238 +00:18:33,730 --> 00:18:37,010 +there is no linear relationship because it could + +239 +00:18:37,010 --> 00:18:40,710 +be there exists non-linear relationship. But here + +240 +00:18:40,710 --> 00:18:44,980 +we are. Just focusing on linear relationship + +241 +00:18:44,980 --> 00:18:50,020 +between X and Y. So if R is zero, that means the + +242 +00:18:50,020 --> 00:18:52,400 +value of Y does not depend on the value of X. So + +243 +00:18:52,400 --> 00:18:58,360 +as X increases, Y is constant. Now for the + +244 +00:18:58,360 --> 00:19:03,620 +previous example, R was 0.7621. To determine the + +245 +00:19:03,620 --> 00:19:06,760 +coefficient of determination, One more time, + +246 +00:19:07,460 --> 00:19:11,760 +square this value, that's only valid for simple + +247 +00:19:11,760 --> 00:19:14,980 +linear regression. Otherwise, you cannot square + +248 +00:19:14,980 --> 00:19:17,580 +the value of R in order to determine the + +249 +00:19:17,580 --> 00:19:20,820 +coefficient of determination. So again, this is + +250 +00:19:20,820 --> 00:19:26,420 +only true for + +251 +00:19:26,420 --> 00:19:29,980 +simple linear regression. + +252 +00:19:35,460 --> 00:19:41,320 +So R squared is 0.7621 squared will give 0.5808. + +253 +00:19:42,240 --> 00:19:46,120 +Now, the meaning of this value, first you have to + +254 +00:19:46,120 --> 00:19:53,280 +multiply this by 100. So 58.08% of the variation + +255 +00:19:53,280 --> 00:19:57,440 +in house prices is explained by the variation in + +256 +00:19:57,440 --> 00:20:05,190 +square feet. So 58, around 0.08% of the variation + +257 +00:20:05,190 --> 00:20:12,450 +in size of the house, I'm sorry, in the price is + +258 +00:20:12,450 --> 00:20:16,510 +explained by + +259 +00:20:16,510 --> 00:20:25,420 +its size. So size by itself. Size only explains + +260 +00:20:25,420 --> 00:20:30,320 +around 50-80% of the selling price of a house. Now + +261 +00:20:30,320 --> 00:20:35,000 +the remaining percent which is around, this is the + +262 +00:20:35,000 --> 00:20:38,860 +error, or the remaining percent, this one is due + +263 +00:20:38,860 --> 00:20:50,040 +to other variables, other independent variables. + +264 +00:20:51,200 --> 00:20:53,820 +That might affect the change of price. + +265 +00:21:04,840 --> 00:21:11,160 +But since the size of the house explains 58%, that + +266 +00:21:11,160 --> 00:21:15,660 +means it's a significant variable. Now, if we add + +267 +00:21:15,660 --> 00:21:19,250 +more variables, to the regression equation for + +268 +00:21:19,250 --> 00:21:23,950 +sure this value will be increased. So maybe 60 or + +269 +00:21:23,950 --> 00:21:28,510 +65 or 67 and so on. But 60% or 50 is more enough + +270 +00:21:28,510 --> 00:21:31,870 +sometimes. But R squared, as R squared increases, + +271 +00:21:32,090 --> 00:21:35,530 +it means we have good fit of the model. That means + +272 +00:21:35,530 --> 00:21:41,230 +the model is accurate to determine or to make some + +273 +00:21:41,230 --> 00:21:46,430 +prediction. So that's for the coefficient of + +274 +00:21:46,430 --> 00:21:58,350 +determination. Any question? So we covered simple + +275 +00:21:58,350 --> 00:22:01,790 +linear regression model. We know now how can we + +276 +00:22:01,790 --> 00:22:06,390 +compute the values of B0 and B1. We can state or + +277 +00:22:06,390 --> 00:22:10,550 +write the regression equation, and we can do some + +278 +00:22:10,550 --> 00:22:14,370 +interpretation about P0 and P1, making + +279 +00:22:14,370 --> 00:22:21,530 +predictions, and make some comments about the + +280 +00:22:21,530 --> 00:22:27,390 +coefficient of determination. That's all. So I'm + +281 +00:22:27,390 --> 00:22:31,910 +going to stop now, and I will give some time to + +282 +00:22:31,910 --> 00:22:33,030 +discuss some practice. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/D4aO26sEGrc_raw.json b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/D4aO26sEGrc_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..f4721abaf5d3e710889c090d4a69f84b69668b7e --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/D4aO26sEGrc_raw.json @@ -0,0 +1 @@ +{"segments": [{"id": 1, "seek": 2794, "start": 15.58, "end": 27.94, "text": " In general, the regression equation is given by this equation. Y represents the dependent variable for each observation I.", "tokens": [682, 2674, 11, 264, 24590, 5367, 307, 2212, 538, 341, 5367, 13, 398, 8855, 264, 12334, 7006, 337, 1184, 14816, 286, 13], "avg_logprob": -0.20312499870424686, "compression_ratio": 1.2424242424242424, "no_speech_prob": 7.748603820800781e-07, "words": [{"start": 15.579999999999998, "end": 16.54, "word": " In", "probability": 0.466064453125}, {"start": 16.54, "end": 17.5, "word": " general,", "probability": 0.904296875}, {"start": 17.66, "end": 17.82, "word": " the", "probability": 0.87109375}, {"start": 17.82, "end": 18.18, "word": " regression", "probability": 0.97998046875}, {"start": 18.18, "end": 18.7, "word": " equation", "probability": 0.958984375}, {"start": 18.7, "end": 18.96, "word": " is", "probability": 0.94775390625}, {"start": 18.96, "end": 19.22, "word": " given", "probability": 0.9033203125}, {"start": 19.22, "end": 19.7, "word": " by", "probability": 0.97119140625}, {"start": 19.7, "end": 21.16, "word": " this", "probability": 0.935546875}, {"start": 21.16, "end": 21.72, "word": " equation.", "probability": 0.978515625}, {"start": 23.06, "end": 23.56, "word": " Y", "probability": 0.681640625}, {"start": 23.56, "end": 25.06, "word": " represents", "probability": 0.8408203125}, {"start": 25.06, "end": 25.58, "word": " the", "probability": 0.9169921875}, {"start": 25.58, "end": 25.96, "word": " dependent", "probability": 0.85009765625}, {"start": 25.96, "end": 26.46, "word": " variable", "probability": 0.92138671875}, {"start": 26.46, "end": 26.88, "word": " for", "probability": 0.92724609375}, {"start": 26.88, "end": 27.1, "word": " each", "probability": 0.95068359375}, {"start": 27.1, "end": 27.56, "word": " observation", "probability": 0.91162109375}, {"start": 27.56, "end": 27.94, "word": " I.", "probability": 0.445068359375}], "temperature": 1.0}, {"id": 2, "seek": 4976, "start": 29.24, "end": 49.76, "text": " Beta 0 is called population Y intercept. Beta 1 is the population stop coefficient. Xi is the independent variable for each observation, I. Epsilon I is the random error theorem. Beta 0 plus beta 1 X is called linear component.", "tokens": [33286, 1958, 307, 1219, 4415, 398, 24700, 13, 33286, 502, 307, 264, 4415, 1590, 17619, 13, 15712, 307, 264, 6695, 7006, 337, 1184, 14816, 11, 286, 13, 462, 16592, 286, 307, 264, 4974, 6713, 20904, 13, 33286, 1958, 1804, 9861, 502, 1783, 307, 1219, 8213, 6542, 13], "avg_logprob": -0.24397786923994622, "compression_ratio": 1.509933774834437, "no_speech_prob": 0.0, "words": [{"start": 29.24, "end": 29.78, "word": " Beta", "probability": 0.5380859375}, {"start": 29.78, "end": 30.12, "word": " 0", "probability": 0.46484375}, {"start": 30.12, "end": 30.34, "word": " is", "probability": 0.90966796875}, {"start": 30.34, "end": 30.68, "word": " called", "probability": 0.787109375}, {"start": 30.68, "end": 31.26, "word": " population", "probability": 0.68310546875}, {"start": 31.26, "end": 31.46, "word": " Y", "probability": 0.54931640625}, {"start": 31.46, "end": 31.96, "word": " intercept.", "probability": 0.6337890625}, {"start": 32.84, "end": 33.6, "word": " Beta", "probability": 0.9208984375}, {"start": 33.6, "end": 33.92, "word": " 1", "probability": 0.91162109375}, {"start": 33.92, "end": 34.26, "word": " is", "probability": 0.91943359375}, {"start": 34.26, "end": 34.54, "word": " the", "probability": 0.623046875}, {"start": 34.54, "end": 35.28, "word": " population", "probability": 0.951171875}, {"start": 35.28, "end": 35.82, "word": " stop", "probability": 0.461669921875}, {"start": 35.82, "end": 36.34, "word": " coefficient.", "probability": 0.8984375}, {"start": 37.32, "end": 37.68, "word": " Xi", "probability": 0.7666015625}, {"start": 37.68, "end": 38.16, "word": " is", "probability": 0.94287109375}, {"start": 38.16, "end": 38.32, "word": " the", "probability": 0.88525390625}, {"start": 38.32, "end": 38.7, "word": " independent", "probability": 0.759765625}, {"start": 38.7, "end": 39.4, "word": " variable", "probability": 0.912109375}, {"start": 39.4, "end": 39.86, "word": " for", "probability": 0.9375}, {"start": 39.86, "end": 40.24, "word": " each", "probability": 0.95458984375}, {"start": 40.24, "end": 40.76, "word": " observation,", "probability": 0.919921875}, {"start": 41.32, "end": 41.52, "word": " I.", "probability": 0.64013671875}, {"start": 42.28, "end": 43.06, "word": " Epsilon", "probability": 0.7012939453125}, {"start": 43.06, "end": 43.4, "word": " I", "probability": 0.8271484375}, {"start": 43.4, "end": 43.66, "word": " is", "probability": 0.95166015625}, {"start": 43.66, "end": 43.78, "word": " the", "probability": 0.796875}, {"start": 43.78, "end": 44.04, "word": " random", "probability": 0.8447265625}, {"start": 44.04, "end": 44.32, "word": " error", "probability": 0.85498046875}, {"start": 44.32, "end": 44.76, "word": " theorem.", "probability": 0.7138671875}, {"start": 46.2, "end": 46.6, "word": " Beta", "probability": 0.80908203125}, {"start": 46.6, "end": 46.88, "word": " 0", "probability": 0.951171875}, {"start": 46.88, "end": 47.12, "word": " plus", "probability": 0.8798828125}, {"start": 47.12, "end": 47.36, "word": " beta", "probability": 0.81982421875}, {"start": 47.36, "end": 47.56, "word": " 1", "probability": 0.98046875}, {"start": 47.56, "end": 47.78, "word": " X", "probability": 0.583984375}, {"start": 47.78, "end": 48.02, "word": " is", "probability": 0.93408203125}, {"start": 48.02, "end": 48.42, "word": " called", "probability": 0.87060546875}, {"start": 48.42, "end": 49.14, "word": " linear", "probability": 0.7099609375}, {"start": 49.14, "end": 49.76, "word": " component.", "probability": 0.77001953125}], "temperature": 1.0}, {"id": 3, "seek": 7356, "start": 51.03, "end": 73.57, "text": " While Y and I are random error components. So, the regression equation mainly has two components. One is linear and the other is random. In general, the expected value for this error term is zero. So, for the predicted equation, later we will see that Y hat equals B zero plus B one X.", "tokens": [3987, 398, 293, 286, 366, 4974, 6713, 6677, 13, 407, 11, 264, 24590, 5367, 8704, 575, 732, 6677, 13, 1485, 307, 8213, 293, 264, 661, 307, 4974, 13, 682, 2674, 11, 264, 5176, 2158, 337, 341, 6713, 1433, 307, 4018, 13, 407, 11, 337, 264, 19147, 5367, 11, 1780, 321, 486, 536, 300, 398, 2385, 6915, 363, 4018, 1804, 363, 472, 1783, 13], "avg_logprob": -0.29956054827198386, "compression_ratio": 1.5714285714285714, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 51.03, "end": 51.57, "word": " While", "probability": 0.452392578125}, {"start": 51.57, "end": 51.95, "word": " Y", "probability": 0.5380859375}, {"start": 51.95, "end": 52.25, "word": " and", "probability": 0.12030029296875}, {"start": 52.25, "end": 52.45, "word": " I", "probability": 0.728515625}, {"start": 52.45, "end": 52.69, "word": " are", "probability": 0.490966796875}, {"start": 52.69, "end": 53.13, "word": " random", "probability": 0.70556640625}, {"start": 53.13, "end": 53.41, "word": " error", "probability": 0.69921875}, {"start": 53.41, "end": 53.91, "word": " components.", "probability": 0.8974609375}, {"start": 54.17, "end": 54.33, "word": " So,", "probability": 0.626953125}, {"start": 55.15, "end": 55.43, "word": " the", "probability": 0.75634765625}, {"start": 55.43, "end": 55.81, "word": " regression", "probability": 0.97900390625}, {"start": 55.81, "end": 56.27, "word": " equation", "probability": 0.96044921875}, {"start": 56.27, "end": 56.83, "word": " mainly", "probability": 0.8974609375}, {"start": 56.83, "end": 57.13, "word": " has", "probability": 0.9345703125}, {"start": 57.13, "end": 57.45, "word": " two", "probability": 0.8125}, {"start": 57.45, "end": 58.49, "word": " components.", "probability": 0.88232421875}, {"start": 59.19, "end": 59.49, "word": " One", "probability": 0.880859375}, {"start": 59.49, "end": 59.79, "word": " is", "probability": 0.93701171875}, {"start": 59.79, "end": 60.05, "word": " linear", "probability": 0.84912109375}, {"start": 60.05, "end": 60.87, "word": " and", "probability": 0.63525390625}, {"start": 60.87, "end": 61.01, "word": " the", "probability": 0.787109375}, {"start": 61.01, "end": 61.23, "word": " other", "probability": 0.88623046875}, {"start": 61.23, "end": 61.97, "word": " is", "probability": 0.923828125}, {"start": 61.97, "end": 62.35, "word": " random.", "probability": 0.83984375}, {"start": 63.29, "end": 63.73, "word": " In", "probability": 0.86962890625}, {"start": 63.73, "end": 64.05, "word": " general,", "probability": 0.89892578125}, {"start": 64.23, "end": 64.31, "word": " the", "probability": 0.90673828125}, {"start": 64.31, "end": 64.85, "word": " expected", "probability": 0.88623046875}, {"start": 64.85, "end": 65.23, "word": " value", "probability": 0.96630859375}, {"start": 65.23, "end": 65.59, "word": " for", "probability": 0.88427734375}, {"start": 65.59, "end": 65.83, "word": " this", "probability": 0.92236328125}, {"start": 65.83, "end": 66.09, "word": " error", "probability": 0.81689453125}, {"start": 66.09, "end": 66.41, "word": " term", "probability": 0.92822265625}, {"start": 66.41, "end": 66.57, "word": " is", "probability": 0.9482421875}, {"start": 66.57, "end": 66.83, "word": " zero.", "probability": 0.71484375}, {"start": 67.75, "end": 68.01, "word": " So,", "probability": 0.78369140625}, {"start": 68.13, "end": 68.25, "word": " for", "probability": 0.927734375}, {"start": 68.25, "end": 68.41, "word": " the", "probability": 0.90283203125}, {"start": 68.41, "end": 68.81, "word": " predicted", "probability": 0.76025390625}, {"start": 68.81, "end": 69.39, "word": " equation,", "probability": 0.9794921875}, {"start": 69.63, "end": 69.93, "word": " later", "probability": 0.65576171875}, {"start": 69.93, "end": 70.09, "word": " we", "probability": 0.7060546875}, {"start": 70.09, "end": 70.13, "word": " will", "probability": 0.58056640625}, {"start": 70.13, "end": 70.31, "word": " see", "probability": 0.92138671875}, {"start": 70.31, "end": 70.61, "word": " that", "probability": 0.90673828125}, {"start": 70.61, "end": 71.47, "word": " Y", "probability": 0.63134765625}, {"start": 71.47, "end": 71.91, "word": " hat", "probability": 0.80908203125}, {"start": 71.91, "end": 72.19, "word": " equals", "probability": 0.501953125}, {"start": 72.19, "end": 72.41, "word": " B", "probability": 0.2412109375}, {"start": 72.41, "end": 72.59, "word": " zero", "probability": 0.365966796875}, {"start": 72.59, "end": 72.83, "word": " plus", "probability": 0.95068359375}, {"start": 72.83, "end": 73.05, "word": " B", "probability": 0.94775390625}, {"start": 73.05, "end": 73.25, "word": " one", "probability": 0.7265625}, {"start": 73.25, "end": 73.57, "word": " X.", "probability": 0.7939453125}], "temperature": 1.0}, {"id": 4, "seek": 8084, "start": 74.43, "end": 80.85, "text": "this term will be ignored because the expected value for the epsilon equals zero.", "tokens": [11176, 1433, 486, 312, 19735, 570, 264, 5176, 2158, 337, 264, 17889, 6915, 4018, 13], "avg_logprob": -0.23364257533103228, "compression_ratio": 1.0945945945945945, "no_speech_prob": 0.0, "words": [{"start": 74.43, "end": 74.83, "word": "this", "probability": 0.3828125}, {"start": 74.83, "end": 75.13, "word": " term", "probability": 0.91796875}, {"start": 75.13, "end": 75.29, "word": " will", "probability": 0.85791015625}, {"start": 75.29, "end": 75.45, "word": " be", "probability": 0.95166015625}, {"start": 75.45, "end": 75.93, "word": " ignored", "probability": 0.82568359375}, {"start": 75.93, "end": 76.59, "word": " because", "probability": 0.70947265625}, {"start": 76.59, "end": 76.85, "word": " the", "probability": 0.8408203125}, {"start": 76.85, "end": 77.33, "word": " expected", "probability": 0.87109375}, {"start": 77.33, "end": 77.71, "word": " value", "probability": 0.96875}, {"start": 77.71, "end": 77.97, "word": " for", "probability": 0.87451171875}, {"start": 77.97, "end": 78.13, "word": " the", "probability": 0.74267578125}, {"start": 78.13, "end": 78.47, "word": " epsilon", "probability": 0.69970703125}, {"start": 78.47, "end": 79.77, "word": " equals", "probability": 0.8251953125}, {"start": 79.77, "end": 80.85, "word": " zero.", "probability": 0.56298828125}], "temperature": 1.0}, {"id": 5, "seek": 11528, "start": 96.46, "end": 115.28, "text": " So again linear component B0 plus B1 X I and the random component is the epsilon term. So if we have X and Y axis, this segment is called Y intercept which is B0.", "tokens": [407, 797, 8213, 6542, 363, 15, 1804, 363, 16, 1783, 286, 293, 264, 4974, 6542, 307, 264, 17889, 1433, 13, 407, 498, 321, 362, 1783, 293, 398, 10298, 11, 341, 9469, 307, 1219, 398, 24700, 597, 307, 363, 15, 13], "avg_logprob": -0.29782774971752635, "compression_ratio": 1.314516129032258, "no_speech_prob": 0.0, "words": [{"start": 96.46000000000001, "end": 97.18, "word": " So", "probability": 0.67138671875}, {"start": 97.18, "end": 97.54, "word": " again", "probability": 0.66357421875}, {"start": 97.54, "end": 99.26, "word": " linear", "probability": 0.330810546875}, {"start": 99.26, "end": 99.8, "word": " component", "probability": 0.712890625}, {"start": 99.8, "end": 100.8, "word": " B0", "probability": 0.568115234375}, {"start": 100.8, "end": 101.16, "word": " plus", "probability": 0.55859375}, {"start": 101.16, "end": 101.6, "word": " B1", "probability": 0.918212890625}, {"start": 101.6, "end": 101.94, "word": " X", "probability": 0.2435302734375}, {"start": 101.94, "end": 102.44, "word": " I", "probability": 0.58740234375}, {"start": 102.44, "end": 103.44, "word": " and", "probability": 0.75341796875}, {"start": 103.44, "end": 103.58, "word": " the", "probability": 0.84765625}, {"start": 103.58, "end": 103.78, "word": " random", "probability": 0.82177734375}, {"start": 103.78, "end": 104.28, "word": " component", "probability": 0.83056640625}, {"start": 104.28, "end": 104.8, "word": " is", "probability": 0.9501953125}, {"start": 104.8, "end": 105.0, "word": " the", "probability": 0.8984375}, {"start": 105.0, "end": 105.4, "word": " epsilon", "probability": 0.720703125}, {"start": 105.4, "end": 106.86, "word": " term.", "probability": 0.8056640625}, {"start": 108.88, "end": 109.2, "word": " So", "probability": 0.9228515625}, {"start": 109.2, "end": 109.36, "word": " if", "probability": 0.76416015625}, {"start": 109.36, "end": 109.46, "word": " we", "probability": 0.55517578125}, {"start": 109.46, "end": 109.62, "word": " have", "probability": 0.94873046875}, {"start": 109.62, "end": 109.84, "word": " X", "probability": 0.81884765625}, {"start": 109.84, "end": 110.0, "word": " and", "probability": 0.93212890625}, {"start": 110.0, "end": 110.2, "word": " Y", "probability": 0.99072265625}, {"start": 110.2, "end": 110.72, "word": " axis,", "probability": 0.391357421875}, {"start": 112.2, "end": 112.54, "word": " this", "probability": 0.94384765625}, {"start": 112.54, "end": 112.96, "word": " segment", "probability": 0.9326171875}, {"start": 112.96, "end": 113.26, "word": " is", "probability": 0.94580078125}, {"start": 113.26, "end": 113.56, "word": " called", "probability": 0.88232421875}, {"start": 113.56, "end": 113.86, "word": " Y", "probability": 0.923828125}, {"start": 113.86, "end": 114.18, "word": " intercept", "probability": 0.7666015625}, {"start": 114.18, "end": 114.62, "word": " which", "probability": 0.69580078125}, {"start": 114.62, "end": 114.76, "word": " is", "probability": 0.94482421875}, {"start": 114.76, "end": 115.28, "word": " B0.", "probability": 0.95556640625}], "temperature": 1.0}, {"id": 6, "seek": 14446, "start": 116.12, "end": 144.46, "text": " The change in y divided by change in x is called the slope. Epsilon i is the difference between the observed value of y minus the expected value or the predicted value. The observed is the actual value. So actual minus predicted, the difference between these two values is called the epsilon. So epsilon i is the difference between the observed value of y for x,", "tokens": [440, 1319, 294, 288, 6666, 538, 1319, 294, 2031, 307, 1219, 264, 13525, 13, 462, 16592, 741, 307, 264, 2649, 1296, 264, 13095, 2158, 295, 288, 3175, 264, 5176, 2158, 420, 264, 19147, 2158, 13, 440, 13095, 307, 264, 3539, 2158, 13, 407, 3539, 3175, 19147, 11, 264, 2649, 1296, 613, 732, 4190, 307, 1219, 264, 17889, 13, 407, 17889, 741, 307, 264, 2649, 1296, 264, 13095, 2158, 295, 288, 337, 2031, 11], "avg_logprob": -0.19573480031780294, "compression_ratio": 2.2546583850931676, "no_speech_prob": 0.0, "words": [{"start": 116.12, "end": 116.36, "word": " The", "probability": 0.2685546875}, {"start": 116.36, "end": 116.68, "word": " change", "probability": 0.8935546875}, {"start": 116.68, "end": 116.86, "word": " in", "probability": 0.9306640625}, {"start": 116.86, "end": 117.14, "word": " y", "probability": 0.453369140625}, {"start": 117.14, "end": 117.62, "word": " divided", "probability": 0.61865234375}, {"start": 117.62, "end": 117.82, "word": " by", "probability": 0.9736328125}, {"start": 117.82, "end": 118.12, "word": " change", "probability": 0.54443359375}, {"start": 118.12, "end": 118.26, "word": " in", "probability": 0.9169921875}, {"start": 118.26, "end": 118.44, "word": " x", "probability": 0.96923828125}, {"start": 118.44, "end": 118.66, "word": " is", "probability": 0.8876953125}, {"start": 118.66, "end": 118.92, "word": " called", "probability": 0.87548828125}, {"start": 118.92, "end": 119.32, "word": " the", "probability": 0.8359375}, {"start": 119.32, "end": 119.68, "word": " slope.", "probability": 0.81396484375}, {"start": 120.5, "end": 121.0, "word": " Epsilon", "probability": 0.6744384765625}, {"start": 121.0, "end": 121.3, "word": " i", "probability": 0.473388671875}, {"start": 121.3, "end": 121.48, "word": " is", "probability": 0.875}, {"start": 121.48, "end": 121.62, "word": " the", "probability": 0.8994140625}, {"start": 121.62, "end": 122.04, "word": " difference", "probability": 0.8662109375}, {"start": 122.04, "end": 122.52, "word": " between", "probability": 0.8798828125}, {"start": 122.52, "end": 122.94, "word": " the", "probability": 0.89404296875}, {"start": 122.94, "end": 123.46, "word": " observed", "probability": 0.8779296875}, {"start": 123.46, "end": 123.9, "word": " value", "probability": 0.970703125}, {"start": 123.9, "end": 124.14, "word": " of", "probability": 0.962890625}, {"start": 124.14, "end": 124.48, "word": " y", "probability": 0.93994140625}, {"start": 124.48, "end": 126.42, "word": " minus", "probability": 0.87451171875}, {"start": 126.42, "end": 127.78, "word": " the", "probability": 0.87939453125}, {"start": 127.78, "end": 128.3, "word": " expected", "probability": 0.87744140625}, {"start": 128.3, "end": 129.38, "word": " value", "probability": 0.96484375}, {"start": 129.38, "end": 129.58, "word": " or", "probability": 0.7216796875}, {"start": 129.58, "end": 129.68, "word": " the", "probability": 0.8984375}, {"start": 129.68, "end": 130.02, "word": " predicted", "probability": 0.8359375}, {"start": 130.02, "end": 130.4, "word": " value.", "probability": 0.97314453125}, {"start": 130.8, "end": 131.22, "word": " The", "probability": 0.87109375}, {"start": 131.22, "end": 131.62, "word": " observed", "probability": 0.8642578125}, {"start": 131.62, "end": 131.8, "word": " is", "probability": 0.90966796875}, {"start": 131.8, "end": 131.96, "word": " the", "probability": 0.92041015625}, {"start": 131.96, "end": 132.34, "word": " actual", "probability": 0.88916015625}, {"start": 132.34, "end": 132.72, "word": " value.", "probability": 0.97802734375}, {"start": 133.24, "end": 133.38, "word": " So", "probability": 0.87158203125}, {"start": 133.38, "end": 133.82, "word": " actual", "probability": 0.63134765625}, {"start": 133.82, "end": 134.2, "word": " minus", "probability": 0.982421875}, {"start": 134.2, "end": 134.74, "word": " predicted,", "probability": 0.74462890625}, {"start": 135.66, "end": 135.86, "word": " the", "probability": 0.90869140625}, {"start": 135.86, "end": 136.24, "word": " difference", "probability": 0.853515625}, {"start": 136.24, "end": 136.58, "word": " between", "probability": 0.8740234375}, {"start": 136.58, "end": 136.86, "word": " these", "probability": 0.8603515625}, {"start": 136.86, "end": 137.04, "word": " two", "probability": 0.91455078125}, {"start": 137.04, "end": 137.48, "word": " values", "probability": 0.9619140625}, {"start": 137.48, "end": 137.76, "word": " is", "probability": 0.8759765625}, {"start": 137.76, "end": 138.18, "word": " called", "probability": 0.888671875}, {"start": 138.18, "end": 138.82, "word": " the", "probability": 0.859375}, {"start": 138.82, "end": 139.16, "word": " epsilon.", "probability": 0.8642578125}, {"start": 139.66, "end": 139.88, "word": " So", "probability": 0.94970703125}, {"start": 139.88, "end": 140.26, "word": " epsilon", "probability": 0.8203125}, {"start": 140.26, "end": 140.56, "word": " i", "probability": 0.8671875}, {"start": 140.56, "end": 140.68, "word": " is", "probability": 0.9287109375}, {"start": 140.68, "end": 140.8, "word": " the", "probability": 0.90869140625}, {"start": 140.8, "end": 141.16, "word": " difference", "probability": 0.87109375}, {"start": 141.16, "end": 141.58, "word": " between", "probability": 0.86767578125}, {"start": 141.58, "end": 142.56, "word": " the", "probability": 0.90283203125}, {"start": 142.56, "end": 143.02, "word": " observed", "probability": 0.857421875}, {"start": 143.02, "end": 143.32, "word": " value", "probability": 0.9716796875}, {"start": 143.32, "end": 143.52, "word": " of", "probability": 0.96240234375}, {"start": 143.52, "end": 143.78, "word": " y", "probability": 0.98046875}, {"start": 143.78, "end": 144.08, "word": " for", "probability": 0.9482421875}, {"start": 144.08, "end": 144.46, "word": " x,", "probability": 0.9892578125}], "temperature": 1.0}, {"id": 7, "seek": 16738, "start": 145.22, "end": 167.38, "text": " minus the predicted or the estimated value of Y for XR. So this difference actually is called the error tier. So the error is just observed minus predicted. The estimated regression equation is given by Y hat equals V0 plus V1X.", "tokens": [3175, 264, 19147, 420, 264, 14109, 2158, 295, 398, 337, 1783, 49, 13, 407, 341, 2649, 767, 307, 1219, 264, 6713, 12362, 13, 407, 264, 6713, 307, 445, 13095, 3175, 19147, 13, 440, 14109, 24590, 5367, 307, 2212, 538, 398, 2385, 6915, 691, 15, 1804, 691, 16, 55, 13], "avg_logprob": -0.2321874988079071, "compression_ratio": 1.506578947368421, "no_speech_prob": 0.0, "words": [{"start": 145.22, "end": 145.82, "word": " minus", "probability": 0.265625}, {"start": 145.82, "end": 146.12, "word": " the", "probability": 0.896484375}, {"start": 146.12, "end": 146.52, "word": " predicted", "probability": 0.81201171875}, {"start": 146.52, "end": 146.84, "word": " or", "probability": 0.77685546875}, {"start": 146.84, "end": 146.98, "word": " the", "probability": 0.80810546875}, {"start": 146.98, "end": 147.32, "word": " estimated", "probability": 0.495361328125}, {"start": 147.32, "end": 147.68, "word": " value", "probability": 0.9541015625}, {"start": 147.68, "end": 148.56, "word": " of", "probability": 0.9443359375}, {"start": 148.56, "end": 148.82, "word": " Y", "probability": 0.63232421875}, {"start": 148.82, "end": 149.1, "word": " for", "probability": 0.84033203125}, {"start": 149.1, "end": 149.58, "word": " XR.", "probability": 0.6505126953125}, {"start": 150.4, "end": 150.62, "word": " So", "probability": 0.88916015625}, {"start": 150.62, "end": 150.84, "word": " this", "probability": 0.7314453125}, {"start": 150.84, "end": 151.34, "word": " difference", "probability": 0.8798828125}, {"start": 151.34, "end": 152.0, "word": " actually", "probability": 0.79052734375}, {"start": 152.0, "end": 152.74, "word": " is", "probability": 0.873046875}, {"start": 152.74, "end": 153.02, "word": " called", "probability": 0.89208984375}, {"start": 153.02, "end": 153.36, "word": " the", "probability": 0.90869140625}, {"start": 153.36, "end": 153.74, "word": " error", "probability": 0.7822265625}, {"start": 153.74, "end": 154.02, "word": " tier.", "probability": 0.3525390625}, {"start": 154.78, "end": 154.98, "word": " So", "probability": 0.931640625}, {"start": 154.98, "end": 155.12, "word": " the", "probability": 0.8349609375}, {"start": 155.12, "end": 155.34, "word": " error", "probability": 0.841796875}, {"start": 155.34, "end": 155.5, "word": " is", "probability": 0.7724609375}, {"start": 155.5, "end": 155.74, "word": " just", "probability": 0.91259765625}, {"start": 155.74, "end": 156.34, "word": " observed", "probability": 0.82421875}, {"start": 156.34, "end": 156.92, "word": " minus", "probability": 0.98486328125}, {"start": 156.92, "end": 158.24, "word": " predicted.", "probability": 0.84326171875}, {"start": 160.98, "end": 161.74, "word": " The", "probability": 0.880859375}, {"start": 161.74, "end": 162.44, "word": " estimated", "probability": 0.88720703125}, {"start": 162.44, "end": 162.86, "word": " regression", "probability": 0.9208984375}, {"start": 162.86, "end": 163.4, "word": " equation", "probability": 0.9482421875}, {"start": 163.4, "end": 163.62, "word": " is", "probability": 0.94970703125}, {"start": 163.62, "end": 163.82, "word": " given", "probability": 0.8955078125}, {"start": 163.82, "end": 164.22, "word": " by", "probability": 0.97265625}, {"start": 164.22, "end": 164.54, "word": " Y", "probability": 0.93212890625}, {"start": 164.54, "end": 164.92, "word": " hat", "probability": 0.87841796875}, {"start": 164.92, "end": 165.98, "word": " equals", "probability": 0.8583984375}, {"start": 165.98, "end": 166.44, "word": " V0", "probability": 0.56884765625}, {"start": 166.44, "end": 166.7, "word": " plus", "probability": 0.93798828125}, {"start": 166.7, "end": 167.38, "word": " V1X.", "probability": 0.7810872395833334}], "temperature": 1.0}, {"id": 8, "seek": 19493, "start": 168.93, "end": 194.93, "text": " as i mentioned before the epsilon term is cancelled because the expected value for the epsilon equals zero here we have y hat instead of y because this one is called the estimated or the predicted value for y for the observation i for example b zero is the estimated of the regression intercept or is called y intercept b one", "tokens": [382, 741, 2835, 949, 264, 17889, 1433, 307, 25103, 570, 264, 5176, 2158, 337, 264, 17889, 6915, 4018, 510, 321, 362, 288, 2385, 2602, 295, 288, 570, 341, 472, 307, 1219, 264, 14109, 420, 264, 19147, 2158, 337, 288, 337, 264, 14816, 741, 337, 1365, 272, 4018, 307, 264, 14109, 295, 264, 24590, 24700, 420, 307, 1219, 288, 24700, 272, 472], "avg_logprob": -0.15940020401631633, "compression_ratio": 1.9289940828402368, "no_speech_prob": 0.0, "words": [{"start": 168.93, "end": 169.23, "word": " as", "probability": 0.38037109375}, {"start": 169.23, "end": 169.35, "word": " i", "probability": 0.326416015625}, {"start": 169.35, "end": 169.61, "word": " mentioned", "probability": 0.8349609375}, {"start": 169.61, "end": 169.99, "word": " before", "probability": 0.86279296875}, {"start": 169.99, "end": 170.21, "word": " the", "probability": 0.8388671875}, {"start": 170.21, "end": 170.49, "word": " epsilon", "probability": 0.8671875}, {"start": 170.49, "end": 170.85, "word": " term", "probability": 0.9189453125}, {"start": 170.85, "end": 171.21, "word": " is", "probability": 0.9521484375}, {"start": 171.21, "end": 171.93, "word": " cancelled", "probability": 0.58349609375}, {"start": 171.93, "end": 172.87, "word": " because", "probability": 0.81591796875}, {"start": 172.87, "end": 173.05, "word": " the", "probability": 0.91552734375}, {"start": 173.05, "end": 173.45, "word": " expected", "probability": 0.91455078125}, {"start": 173.45, "end": 173.83, "word": " value", "probability": 0.97705078125}, {"start": 173.83, "end": 174.33, "word": " for", "probability": 0.92529296875}, {"start": 174.33, "end": 174.49, "word": " the", "probability": 0.90869140625}, {"start": 174.49, "end": 174.75, "word": " epsilon", "probability": 0.888671875}, {"start": 174.75, "end": 175.27, "word": " equals", "probability": 0.91796875}, {"start": 175.27, "end": 175.63, "word": " zero", "probability": 0.81591796875}, {"start": 175.63, "end": 176.73, "word": " here", "probability": 0.52978515625}, {"start": 176.73, "end": 176.87, "word": " we", "probability": 0.94921875}, {"start": 176.87, "end": 177.23, "word": " have", "probability": 0.9462890625}, {"start": 177.23, "end": 177.59, "word": " y", "probability": 0.90478515625}, {"start": 177.59, "end": 177.85, "word": " hat", "probability": 0.91552734375}, {"start": 177.85, "end": 178.19, "word": " instead", "probability": 0.9150390625}, {"start": 178.19, "end": 178.43, "word": " of", "probability": 0.97265625}, {"start": 178.43, "end": 178.69, "word": " y", "probability": 0.97802734375}, {"start": 178.69, "end": 179.33, "word": " because", "probability": 0.90234375}, {"start": 179.33, "end": 179.61, "word": " this", "probability": 0.94921875}, {"start": 179.61, "end": 179.81, "word": " one", "probability": 0.92822265625}, {"start": 179.81, "end": 180.11, "word": " is", "probability": 0.9501953125}, {"start": 180.11, "end": 180.53, "word": " called", "probability": 0.85693359375}, {"start": 180.53, "end": 180.79, "word": " the", "probability": 0.88427734375}, {"start": 180.79, "end": 181.27, "word": " estimated", "probability": 0.92138671875}, {"start": 181.27, "end": 182.39, "word": " or", "probability": 0.94873046875}, {"start": 182.39, "end": 182.59, "word": " the", "probability": 0.9208984375}, {"start": 182.59, "end": 183.01, "word": " predicted", "probability": 0.85546875}, {"start": 183.01, "end": 183.85, "word": " value", "probability": 0.9716796875}, {"start": 183.85, "end": 184.33, "word": " for", "probability": 0.95703125}, {"start": 184.33, "end": 184.65, "word": " y", "probability": 0.97802734375}, {"start": 184.65, "end": 185.51, "word": " for", "probability": 0.92138671875}, {"start": 185.51, "end": 185.67, "word": " the", "probability": 0.92041015625}, {"start": 185.67, "end": 186.11, "word": " observation", "probability": 0.8857421875}, {"start": 186.11, "end": 187.15, "word": " i", "probability": 0.71435546875}, {"start": 187.15, "end": 187.37, "word": " for", "probability": 0.92431640625}, {"start": 187.37, "end": 187.75, "word": " example", "probability": 0.9755859375}, {"start": 187.75, "end": 188.51, "word": " b", "probability": 0.80810546875}, {"start": 188.51, "end": 188.81, "word": " zero", "probability": 0.70703125}, {"start": 188.81, "end": 189.05, "word": " is", "probability": 0.9541015625}, {"start": 189.05, "end": 189.17, "word": " the", "probability": 0.8994140625}, {"start": 189.17, "end": 189.67, "word": " estimated", "probability": 0.89501953125}, {"start": 189.67, "end": 190.13, "word": " of", "probability": 0.97314453125}, {"start": 190.13, "end": 190.25, "word": " the", "probability": 0.91943359375}, {"start": 190.25, "end": 190.55, "word": " regression", "probability": 0.9521484375}, {"start": 190.55, "end": 191.13, "word": " intercept", "probability": 0.9638671875}, {"start": 191.13, "end": 191.89, "word": " or", "probability": 0.8017578125}, {"start": 191.89, "end": 192.05, "word": " is", "probability": 0.61181640625}, {"start": 192.05, "end": 192.29, "word": " called", "probability": 0.89599609375}, {"start": 192.29, "end": 192.59, "word": " y", "probability": 0.97705078125}, {"start": 192.59, "end": 193.07, "word": " intercept", "probability": 0.82373046875}, {"start": 193.07, "end": 194.67, "word": " b", "probability": 0.8740234375}, {"start": 194.67, "end": 194.93, "word": " one", "probability": 0.60302734375}], "temperature": 1.0}, {"id": 9, "seek": 20491, "start": 196.19, "end": 204.91, "text": " the estimate of the regression of the slope so this is the estimated slope b1 xi again is the independent variable so x1", "tokens": [264, 12539, 295, 264, 24590, 295, 264, 13525, 370, 341, 307, 264, 14109, 13525, 272, 16, 36800, 797, 307, 264, 6695, 7006, 370, 2031, 16], "avg_logprob": -0.213641830934928, "compression_ratio": 1.4578313253012047, "no_speech_prob": 3.5762786865234375e-07, "words": [{"start": 196.19, "end": 196.47, "word": " the", "probability": 0.1484375}, {"start": 196.47, "end": 196.89, "word": " estimate", "probability": 0.91162109375}, {"start": 196.89, "end": 197.17, "word": " of", "probability": 0.96337890625}, {"start": 197.17, "end": 197.29, "word": " the", "probability": 0.8623046875}, {"start": 197.29, "end": 197.59, "word": " regression", "probability": 0.9638671875}, {"start": 197.59, "end": 198.03, "word": " of", "probability": 0.96337890625}, {"start": 198.03, "end": 198.21, "word": " the", "probability": 0.90576171875}, {"start": 198.21, "end": 198.51, "word": " slope", "probability": 0.8662109375}, {"start": 198.51, "end": 199.55, "word": " so", "probability": 0.44384765625}, {"start": 199.55, "end": 199.75, "word": " this", "probability": 0.94677734375}, {"start": 199.75, "end": 199.87, "word": " is", "probability": 0.9521484375}, {"start": 199.87, "end": 199.97, "word": " the", "probability": 0.91845703125}, {"start": 199.97, "end": 200.43, "word": " estimated", "probability": 0.8095703125}, {"start": 200.43, "end": 200.81, "word": " slope", "probability": 0.84765625}, {"start": 200.81, "end": 201.29, "word": " b1", "probability": 0.800048828125}, {"start": 201.29, "end": 201.93, "word": " xi", "probability": 0.6904296875}, {"start": 201.93, "end": 202.53, "word": " again", "probability": 0.9404296875}, {"start": 202.53, "end": 202.81, "word": " is", "probability": 0.9541015625}, {"start": 202.81, "end": 203.03, "word": " the", "probability": 0.92724609375}, {"start": 203.03, "end": 203.47, "word": " independent", "probability": 0.90625}, {"start": 203.47, "end": 203.93, "word": " variable", "probability": 0.9287109375}, {"start": 203.93, "end": 204.41, "word": " so", "probability": 0.84033203125}, {"start": 204.41, "end": 204.91, "word": " x1", "probability": 0.934814453125}], "temperature": 1.0}, {"id": 10, "seek": 22737, "start": 205.89, "end": 227.37, "text": " It means the value of the independent variable for observation number one. Now this equation is called linear regression equation or regression model. It's a straight line because here we are assuming that the relationship between x and y is linear. It could be non-linear, but we are focusing here in just linear regression.", "tokens": [467, 1355, 264, 2158, 295, 264, 6695, 7006, 337, 14816, 1230, 472, 13, 823, 341, 5367, 307, 1219, 8213, 24590, 5367, 420, 24590, 2316, 13, 467, 311, 257, 2997, 1622, 570, 510, 321, 366, 11926, 300, 264, 2480, 1296, 2031, 293, 288, 307, 8213, 13, 467, 727, 312, 2107, 12, 28263, 11, 457, 321, 366, 8416, 510, 294, 445, 8213, 24590, 13], "avg_logprob": -0.1741071381266155, "compression_ratio": 1.6717948717948719, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 205.89, "end": 205.99, "word": " It", "probability": 0.6748046875}, {"start": 205.99, "end": 206.27, "word": " means", "probability": 0.91064453125}, {"start": 206.27, "end": 206.45, "word": " the", "probability": 0.8994140625}, {"start": 206.45, "end": 206.79, "word": " value", "probability": 0.96728515625}, {"start": 206.79, "end": 207.03, "word": " of", "probability": 0.9541015625}, {"start": 207.03, "end": 207.17, "word": " the", "probability": 0.88037109375}, {"start": 207.17, "end": 207.49, "word": " independent", "probability": 0.791015625}, {"start": 207.49, "end": 207.93, "word": " variable", "probability": 0.916015625}, {"start": 207.93, "end": 208.63, "word": " for", "probability": 0.86083984375}, {"start": 208.63, "end": 209.09, "word": " observation", "probability": 0.875}, {"start": 209.09, "end": 209.37, "word": " number", "probability": 0.892578125}, {"start": 209.37, "end": 209.61, "word": " one.", "probability": 0.5673828125}, {"start": 210.15, "end": 210.41, "word": " Now", "probability": 0.94873046875}, {"start": 210.41, "end": 210.71, "word": " this", "probability": 0.62060546875}, {"start": 210.71, "end": 211.09, "word": " equation", "probability": 0.94482421875}, {"start": 211.09, "end": 211.35, "word": " is", "probability": 0.921875}, {"start": 211.35, "end": 211.77, "word": " called", "probability": 0.89599609375}, {"start": 211.77, "end": 212.63, "word": " linear", "probability": 0.8203125}, {"start": 212.63, "end": 213.07, "word": " regression", "probability": 0.95654296875}, {"start": 213.07, "end": 213.65, "word": " equation", "probability": 0.9794921875}, {"start": 213.65, "end": 214.19, "word": " or", "probability": 0.7275390625}, {"start": 214.19, "end": 214.53, "word": " regression", "probability": 0.953125}, {"start": 214.53, "end": 214.93, "word": " model.", "probability": 0.935546875}, {"start": 215.33, "end": 215.71, "word": " It's", "probability": 0.543701171875}, {"start": 215.71, "end": 215.79, "word": " a", "probability": 0.92724609375}, {"start": 215.79, "end": 216.05, "word": " straight", "probability": 0.91552734375}, {"start": 216.05, "end": 216.39, "word": " line", "probability": 0.93798828125}, {"start": 216.39, "end": 216.77, "word": " because", "probability": 0.619140625}, {"start": 216.77, "end": 216.95, "word": " here", "probability": 0.8251953125}, {"start": 216.95, "end": 217.09, "word": " we", "probability": 0.93212890625}, {"start": 217.09, "end": 217.23, "word": " are", "probability": 0.9248046875}, {"start": 217.23, "end": 217.65, "word": " assuming", "probability": 0.89892578125}, {"start": 217.65, "end": 218.07, "word": " that", "probability": 0.935546875}, {"start": 218.07, "end": 218.93, "word": " the", "probability": 0.859375}, {"start": 218.93, "end": 219.51, "word": " relationship", "probability": 0.91259765625}, {"start": 219.51, "end": 219.91, "word": " between", "probability": 0.8994140625}, {"start": 219.91, "end": 220.07, "word": " x", "probability": 0.5751953125}, {"start": 220.07, "end": 220.23, "word": " and", "probability": 0.94775390625}, {"start": 220.23, "end": 220.49, "word": " y", "probability": 0.9951171875}, {"start": 220.49, "end": 221.17, "word": " is", "probability": 0.9384765625}, {"start": 221.17, "end": 221.49, "word": " linear.", "probability": 0.89990234375}, {"start": 221.81, "end": 222.01, "word": " It", "probability": 0.9580078125}, {"start": 222.01, "end": 222.17, "word": " could", "probability": 0.884765625}, {"start": 222.17, "end": 222.31, "word": " be", "probability": 0.9462890625}, {"start": 222.31, "end": 222.51, "word": " non", "probability": 0.98486328125}, {"start": 222.51, "end": 222.75, "word": "-linear,", "probability": 0.677001953125}, {"start": 222.93, "end": 223.11, "word": " but", "probability": 0.927734375}, {"start": 223.11, "end": 223.31, "word": " we", "probability": 0.9599609375}, {"start": 223.31, "end": 223.49, "word": " are", "probability": 0.93505859375}, {"start": 223.49, "end": 223.97, "word": " focusing", "probability": 0.90234375}, {"start": 223.97, "end": 224.43, "word": " here", "probability": 0.857421875}, {"start": 224.43, "end": 225.51, "word": " in", "probability": 0.5439453125}, {"start": 225.51, "end": 225.91, "word": " just", "probability": 0.92529296875}, {"start": 225.91, "end": 226.33, "word": " linear", "probability": 0.9287109375}, {"start": 226.33, "end": 227.37, "word": " regression.", "probability": 0.9560546875}], "temperature": 1.0}, {"id": 11, "seek": 24776, "start": 227.94, "end": 247.76, "text": " Now, the values for B0 and B1 are given by these equations, B1 equals RSY divided by SX. So, in order to determine the values of B0 and B1, we have to know first the value of R, the correlation coefficient.", "tokens": [823, 11, 264, 4190, 337, 363, 15, 293, 363, 16, 366, 2212, 538, 613, 11787, 11, 363, 16, 6915, 25855, 56, 6666, 538, 318, 55, 13, 407, 11, 294, 1668, 281, 6997, 264, 4190, 295, 363, 15, 293, 363, 16, 11, 321, 362, 281, 458, 700, 264, 2158, 295, 497, 11, 264, 20009, 17619, 13], "avg_logprob": -0.1637834869325161, "compression_ratio": 1.4475524475524475, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 227.94, "end": 228.56, "word": " Now,", "probability": 0.85986328125}, {"start": 228.64, "end": 228.76, "word": " the", "probability": 0.91943359375}, {"start": 228.76, "end": 229.1, "word": " values", "probability": 0.95361328125}, {"start": 229.1, "end": 229.42, "word": " for", "probability": 0.8984375}, {"start": 229.42, "end": 229.84, "word": " B0", "probability": 0.580078125}, {"start": 229.84, "end": 230.1, "word": " and", "probability": 0.9482421875}, {"start": 230.1, "end": 230.42, "word": " B1", "probability": 0.99609375}, {"start": 230.42, "end": 230.62, "word": " are", "probability": 0.93310546875}, {"start": 230.62, "end": 230.84, "word": " given", "probability": 0.900390625}, {"start": 230.84, "end": 231.12, "word": " by", "probability": 0.97119140625}, {"start": 231.12, "end": 231.4, "word": " these", "probability": 0.76220703125}, {"start": 231.4, "end": 232.0, "word": " equations,", "probability": 0.92578125}, {"start": 232.92, "end": 233.26, "word": " B1", "probability": 0.982666015625}, {"start": 233.26, "end": 233.74, "word": " equals", "probability": 0.85986328125}, {"start": 233.74, "end": 234.6, "word": " RSY", "probability": 0.56597900390625}, {"start": 234.6, "end": 234.8, "word": " divided", "probability": 0.75341796875}, {"start": 234.8, "end": 235.04, "word": " by", "probability": 0.97216796875}, {"start": 235.04, "end": 235.52, "word": " SX.", "probability": 0.851806640625}, {"start": 235.66, "end": 235.84, "word": " So,", "probability": 0.90771484375}, {"start": 235.92, "end": 236.02, "word": " in", "probability": 0.94580078125}, {"start": 236.02, "end": 236.26, "word": " order", "probability": 0.91943359375}, {"start": 236.26, "end": 236.48, "word": " to", "probability": 0.97021484375}, {"start": 236.48, "end": 236.96, "word": " determine", "probability": 0.91357421875}, {"start": 236.96, "end": 238.52, "word": " the", "probability": 0.91552734375}, {"start": 238.52, "end": 239.28, "word": " values", "probability": 0.9609375}, {"start": 239.28, "end": 239.42, "word": " of", "probability": 0.92529296875}, {"start": 239.42, "end": 239.78, "word": " B0", "probability": 0.984130859375}, {"start": 239.78, "end": 239.94, "word": " and", "probability": 0.9482421875}, {"start": 239.94, "end": 240.26, "word": " B1,", "probability": 0.998779296875}, {"start": 240.36, "end": 240.5, "word": " we", "probability": 0.91015625}, {"start": 240.5, "end": 240.7, "word": " have", "probability": 0.94482421875}, {"start": 240.7, "end": 240.88, "word": " to", "probability": 0.96923828125}, {"start": 240.88, "end": 241.04, "word": " know", "probability": 0.87109375}, {"start": 241.04, "end": 241.52, "word": " first", "probability": 0.83740234375}, {"start": 241.52, "end": 242.62, "word": " the", "probability": 0.78564453125}, {"start": 242.62, "end": 242.94, "word": " value", "probability": 0.97314453125}, {"start": 242.94, "end": 243.42, "word": " of", "probability": 0.96923828125}, {"start": 243.42, "end": 244.54, "word": " R,", "probability": 0.9794921875}, {"start": 246.3, "end": 246.76, "word": " the", "probability": 0.8671875}, {"start": 246.76, "end": 247.2, "word": " correlation", "probability": 0.95458984375}, {"start": 247.2, "end": 247.76, "word": " coefficient.", "probability": 0.95556640625}], "temperature": 1.0}, {"id": 12, "seek": 28539, "start": 256.64, "end": 285.4, "text": " Sx and Sy, standard deviations of x and y, as well as the means of x and y. B1 equals R times Sy divided by Sx. B0 is just y bar minus b1 x bar, where Sx and Sy are the standard deviations of x and y.", "tokens": [318, 87, 293, 3902, 11, 3832, 31219, 763, 295, 2031, 293, 288, 11, 382, 731, 382, 264, 1355, 295, 2031, 293, 288, 13, 363, 16, 6915, 497, 1413, 3902, 6666, 538, 318, 87, 13, 363, 15, 307, 445, 288, 2159, 3175, 272, 16, 2031, 2159, 11, 689, 318, 87, 293, 3902, 366, 264, 3832, 31219, 763, 295, 2031, 293, 288, 13], "avg_logprob": -0.16355846702091156, "compression_ratio": 1.558139534883721, "no_speech_prob": 0.0, "words": [{"start": 256.64, "end": 257.36, "word": " Sx", "probability": 0.6368408203125}, {"start": 257.36, "end": 257.88, "word": " and", "probability": 0.9326171875}, {"start": 257.88, "end": 258.4, "word": " Sy,", "probability": 0.92626953125}, {"start": 258.72, "end": 259.16, "word": " standard", "probability": 0.8583984375}, {"start": 259.16, "end": 259.82, "word": " deviations", "probability": 0.931640625}, {"start": 259.82, "end": 261.58, "word": " of", "probability": 0.93310546875}, {"start": 261.58, "end": 262.1, "word": " x", "probability": 0.7919921875}, {"start": 262.1, "end": 262.78, "word": " and", "probability": 0.9482421875}, {"start": 262.78, "end": 263.14, "word": " y,", "probability": 0.99755859375}, {"start": 263.88, "end": 264.78, "word": " as", "probability": 0.947265625}, {"start": 264.78, "end": 264.98, "word": " well", "probability": 0.9296875}, {"start": 264.98, "end": 265.44, "word": " as", "probability": 0.966796875}, {"start": 265.44, "end": 266.54, "word": " the", "probability": 0.88916015625}, {"start": 266.54, "end": 266.9, "word": " means", "probability": 0.810546875}, {"start": 266.9, "end": 269.18, "word": " of", "probability": 0.935546875}, {"start": 269.18, "end": 269.42, "word": " x", "probability": 0.9873046875}, {"start": 269.42, "end": 269.58, "word": " and", "probability": 0.94677734375}, {"start": 269.58, "end": 269.88, "word": " y.", "probability": 0.99853515625}, {"start": 272.92, "end": 273.64, "word": " B1", "probability": 0.813232421875}, {"start": 273.64, "end": 274.08, "word": " equals", "probability": 0.796875}, {"start": 274.08, "end": 274.56, "word": " R", "probability": 0.67919921875}, {"start": 274.56, "end": 275.14, "word": " times", "probability": 0.9052734375}, {"start": 275.14, "end": 275.54, "word": " Sy", "probability": 0.85302734375}, {"start": 275.54, "end": 275.8, "word": " divided", "probability": 0.80908203125}, {"start": 275.8, "end": 276.0, "word": " by", "probability": 0.97216796875}, {"start": 276.0, "end": 276.5, "word": " Sx.", "probability": 0.9892578125}, {"start": 278.14, "end": 278.86, "word": " B0", "probability": 0.90869140625}, {"start": 278.86, "end": 279.06, "word": " is", "probability": 0.6806640625}, {"start": 279.06, "end": 279.26, "word": " just", "probability": 0.90625}, {"start": 279.26, "end": 279.5, "word": " y", "probability": 0.7138671875}, {"start": 279.5, "end": 279.7, "word": " bar", "probability": 0.732421875}, {"start": 279.7, "end": 280.0, "word": " minus", "probability": 0.98046875}, {"start": 280.0, "end": 280.32, "word": " b1", "probability": 0.791259765625}, {"start": 280.32, "end": 280.5, "word": " x", "probability": 0.79736328125}, {"start": 280.5, "end": 280.82, "word": " bar,", "probability": 0.9306640625}, {"start": 280.94, "end": 281.24, "word": " where", "probability": 0.93017578125}, {"start": 281.24, "end": 281.96, "word": " Sx", "probability": 0.9873046875}, {"start": 281.96, "end": 282.2, "word": " and", "probability": 0.9306640625}, {"start": 282.2, "end": 282.6, "word": " Sy", "probability": 0.9267578125}, {"start": 282.6, "end": 283.38, "word": " are", "probability": 0.9384765625}, {"start": 283.38, "end": 283.6, "word": " the", "probability": 0.87841796875}, {"start": 283.6, "end": 283.84, "word": " standard", "probability": 0.9482421875}, {"start": 283.84, "end": 284.32, "word": " deviations", "probability": 0.94091796875}, {"start": 284.32, "end": 284.5, "word": " of", "probability": 0.96240234375}, {"start": 284.5, "end": 284.72, "word": " x", "probability": 0.9697265625}, {"start": 284.72, "end": 285.08, "word": " and", "probability": 0.94384765625}, {"start": 285.08, "end": 285.4, "word": " y.", "probability": 0.99853515625}], "temperature": 1.0}, {"id": 13, "seek": 31091, "start": 286.73, "end": 310.91, "text": " So this, how can we compute the values of B0 and B1? Now the question is, what's our interpretation about B0 and B1? And B0, as we mentioned before, is the Y or the estimated mean value of Y when the value X is 0.", "tokens": [407, 341, 11, 577, 393, 321, 14722, 264, 4190, 295, 363, 15, 293, 363, 16, 30, 823, 264, 1168, 307, 11, 437, 311, 527, 14174, 466, 363, 15, 293, 363, 16, 30, 400, 363, 15, 11, 382, 321, 2835, 949, 11, 307, 264, 398, 420, 264, 14109, 914, 2158, 295, 398, 562, 264, 2158, 1783, 307, 1958, 13], "avg_logprob": -0.21477754085750903, "compression_ratio": 1.445945945945946, "no_speech_prob": 0.0, "words": [{"start": 286.73, "end": 287.41, "word": " So", "probability": 0.54833984375}, {"start": 287.41, "end": 287.79, "word": " this,", "probability": 0.5185546875}, {"start": 287.97, "end": 288.09, "word": " how", "probability": 0.93701171875}, {"start": 288.09, "end": 288.35, "word": " can", "probability": 0.93994140625}, {"start": 288.35, "end": 288.75, "word": " we", "probability": 0.95068359375}, {"start": 288.75, "end": 289.83, "word": " compute", "probability": 0.90869140625}, {"start": 289.83, "end": 290.83, "word": " the", "probability": 0.86279296875}, {"start": 290.83, "end": 291.17, "word": " values", "probability": 0.95458984375}, {"start": 291.17, "end": 291.33, "word": " of", "probability": 0.955078125}, {"start": 291.33, "end": 291.67, "word": " B0", "probability": 0.688232421875}, {"start": 291.67, "end": 291.79, "word": " and", "probability": 0.9453125}, {"start": 291.79, "end": 292.11, "word": " B1?", "probability": 0.9951171875}, {"start": 292.71, "end": 292.99, "word": " Now", "probability": 0.93994140625}, {"start": 292.99, "end": 293.19, "word": " the", "probability": 0.56884765625}, {"start": 293.19, "end": 293.85, "word": " question", "probability": 0.83447265625}, {"start": 293.85, "end": 294.21, "word": " is,", "probability": 0.94775390625}, {"start": 295.15, "end": 295.49, "word": " what's", "probability": 0.950927734375}, {"start": 295.49, "end": 295.73, "word": " our", "probability": 0.89697265625}, {"start": 295.73, "end": 296.41, "word": " interpretation", "probability": 0.8447265625}, {"start": 296.41, "end": 297.37, "word": " about", "probability": 0.86376953125}, {"start": 297.37, "end": 299.35, "word": " B0", "probability": 0.94873046875}, {"start": 299.35, "end": 299.87, "word": " and", "probability": 0.94091796875}, {"start": 299.87, "end": 300.27, "word": " B1?", "probability": 0.99658203125}, {"start": 301.89, "end": 302.13, "word": " And", "probability": 0.83984375}, {"start": 302.13, "end": 302.53, "word": " B0,", "probability": 0.94482421875}, {"start": 302.63, "end": 302.71, "word": " as", "probability": 0.96240234375}, {"start": 302.71, "end": 302.85, "word": " we", "probability": 0.83251953125}, {"start": 302.85, "end": 303.11, "word": " mentioned", "probability": 0.84423828125}, {"start": 303.11, "end": 303.63, "word": " before,", "probability": 0.85546875}, {"start": 303.85, "end": 304.19, "word": " is", "probability": 0.9189453125}, {"start": 304.19, "end": 304.55, "word": " the", "probability": 0.8857421875}, {"start": 304.55, "end": 305.03, "word": " Y", "probability": 0.63037109375}, {"start": 305.03, "end": 305.79, "word": " or", "probability": 0.230224609375}, {"start": 305.79, "end": 306.09, "word": " the", "probability": 0.77734375}, {"start": 306.09, "end": 306.69, "word": " estimated", "probability": 0.477294921875}, {"start": 306.69, "end": 308.89, "word": " mean", "probability": 0.7734375}, {"start": 308.89, "end": 309.29, "word": " value", "probability": 0.96533203125}, {"start": 309.29, "end": 309.49, "word": " of", "probability": 0.95458984375}, {"start": 309.49, "end": 309.77, "word": " Y", "probability": 0.9833984375}, {"start": 309.77, "end": 310.03, "word": " when", "probability": 0.734375}, {"start": 310.03, "end": 310.13, "word": " the", "probability": 0.8994140625}, {"start": 310.13, "end": 310.33, "word": " value", "probability": 0.91552734375}, {"start": 310.33, "end": 310.51, "word": " X", "probability": 0.4345703125}, {"start": 310.51, "end": 310.63, "word": " is", "probability": 0.93359375}, {"start": 310.63, "end": 310.91, "word": " 0.", "probability": 0.5029296875}], "temperature": 1.0}, {"id": 14, "seek": 34236, "start": 317.42, "end": 342.36, "text": " So if X is 0, then Y hat equals B0. That means B0 is the estimated mean value of Y when the value of X equals 0. B1, which is called the estimated change in the mean value of Y as a result of one unit change in X. That means the sign of B1,", "tokens": [407, 498, 1783, 307, 1958, 11, 550, 398, 2385, 6915, 363, 15, 13, 663, 1355, 363, 15, 307, 264, 14109, 914, 2158, 295, 398, 562, 264, 2158, 295, 1783, 6915, 1958, 13, 363, 16, 11, 597, 307, 1219, 264, 14109, 1319, 294, 264, 914, 2158, 295, 398, 382, 257, 1874, 295, 472, 4985, 1319, 294, 1783, 13, 663, 1355, 264, 1465, 295, 363, 16, 11], "avg_logprob": -0.2201704579320821, "compression_ratio": 1.7214285714285715, "no_speech_prob": 0.0, "words": [{"start": 317.42, "end": 317.6, "word": " So", "probability": 0.87255859375}, {"start": 317.6, "end": 317.86, "word": " if", "probability": 0.7744140625}, {"start": 317.86, "end": 318.08, "word": " X", "probability": 0.339599609375}, {"start": 318.08, "end": 318.24, "word": " is", "probability": 0.8427734375}, {"start": 318.24, "end": 318.54, "word": " 0,", "probability": 0.62646484375}, {"start": 318.72, "end": 318.96, "word": " then", "probability": 0.85498046875}, {"start": 318.96, "end": 319.32, "word": " Y", "probability": 0.85888671875}, {"start": 319.32, "end": 319.56, "word": " hat", "probability": 0.8017578125}, {"start": 319.56, "end": 319.9, "word": " equals", "probability": 0.50244140625}, {"start": 319.9, "end": 320.3, "word": " B0.", "probability": 0.648681640625}, {"start": 320.38, "end": 320.58, "word": " That", "probability": 0.88134765625}, {"start": 320.58, "end": 320.98, "word": " means", "probability": 0.919921875}, {"start": 320.98, "end": 322.86, "word": " B0", "probability": 0.81591796875}, {"start": 322.86, "end": 323.04, "word": " is", "probability": 0.93017578125}, {"start": 323.04, "end": 323.2, "word": " the", "probability": 0.79052734375}, {"start": 323.2, "end": 323.7, "word": " estimated", "probability": 0.88623046875}, {"start": 323.7, "end": 324.02, "word": " mean", "probability": 0.943359375}, {"start": 324.02, "end": 324.38, "word": " value", "probability": 0.97119140625}, {"start": 324.38, "end": 324.58, "word": " of", "probability": 0.966796875}, {"start": 324.58, "end": 324.88, "word": " Y", "probability": 0.97021484375}, {"start": 324.88, "end": 325.62, "word": " when", "probability": 0.71240234375}, {"start": 325.62, "end": 326.04, "word": " the", "probability": 0.90478515625}, {"start": 326.04, "end": 326.24, "word": " value", "probability": 0.89599609375}, {"start": 326.24, "end": 326.42, "word": " of", "probability": 0.89404296875}, {"start": 326.42, "end": 326.52, "word": " X", "probability": 0.96826171875}, {"start": 326.52, "end": 326.82, "word": " equals", "probability": 0.84521484375}, {"start": 326.82, "end": 327.16, "word": " 0.", "probability": 0.87744140625}, {"start": 328.54, "end": 329.14, "word": " B1,", "probability": 0.974609375}, {"start": 330.28, "end": 331.22, "word": " which", "probability": 0.642578125}, {"start": 331.22, "end": 331.32, "word": " is", "probability": 0.93994140625}, {"start": 331.32, "end": 331.54, "word": " called", "probability": 0.8427734375}, {"start": 331.54, "end": 331.72, "word": " the", "probability": 0.88525390625}, {"start": 331.72, "end": 332.28, "word": " estimated", "probability": 0.892578125}, {"start": 332.28, "end": 332.94, "word": " change", "probability": 0.89306640625}, {"start": 332.94, "end": 333.16, "word": " in", "probability": 0.93603515625}, {"start": 333.16, "end": 333.3, "word": " the", "probability": 0.92041015625}, {"start": 333.3, "end": 333.52, "word": " mean", "probability": 0.9619140625}, {"start": 333.52, "end": 333.9, "word": " value", "probability": 0.91015625}, {"start": 333.9, "end": 334.14, "word": " of", "probability": 0.9658203125}, {"start": 334.14, "end": 334.44, "word": " Y", "probability": 0.98974609375}, {"start": 334.44, "end": 335.02, "word": " as", "probability": 0.6787109375}, {"start": 335.02, "end": 335.16, "word": " a", "probability": 0.98681640625}, {"start": 335.16, "end": 335.5, "word": " result", "probability": 0.9443359375}, {"start": 335.5, "end": 336.56, "word": " of", "probability": 0.94482421875}, {"start": 336.56, "end": 336.88, "word": " one", "probability": 0.814453125}, {"start": 336.88, "end": 337.32, "word": " unit", "probability": 0.94580078125}, {"start": 337.32, "end": 338.3, "word": " change", "probability": 0.2205810546875}, {"start": 338.3, "end": 338.5, "word": " in", "probability": 0.60205078125}, {"start": 338.5, "end": 338.76, "word": " X.", "probability": 0.9521484375}, {"start": 339.64, "end": 340.06, "word": " That", "probability": 0.91259765625}, {"start": 340.06, "end": 340.52, "word": " means", "probability": 0.87939453125}, {"start": 340.52, "end": 341.44, "word": " the", "probability": 0.7353515625}, {"start": 341.44, "end": 341.82, "word": " sign", "probability": 0.56689453125}, {"start": 341.82, "end": 342.0, "word": " of", "probability": 0.953125}, {"start": 342.0, "end": 342.36, "word": " B1,", "probability": 0.985595703125}], "temperature": 1.0}, {"id": 15, "seek": 37264, "start": 348.18, "end": 372.64, "text": " the direction of the relationship between X and Y. So the sine of B1 tells us the exact direction. It could be positive if the sine of B1 is positive or negative.", "tokens": [264, 3513, 295, 264, 2480, 1296, 1783, 293, 398, 13, 407, 264, 18609, 295, 363, 16, 5112, 505, 264, 1900, 3513, 13, 467, 727, 312, 3353, 498, 264, 18609, 295, 363, 16, 307, 3353, 420, 3671, 13], "avg_logprob": -0.20775081687851957, "compression_ratio": 1.4553571428571428, "no_speech_prob": 0.0, "words": [{"start": 348.18, "end": 348.92, "word": " the", "probability": 0.29150390625}, {"start": 348.92, "end": 349.66, "word": " direction", "probability": 0.93798828125}, {"start": 349.66, "end": 352.58, "word": " of", "probability": 0.88916015625}, {"start": 352.58, "end": 353.64, "word": " the", "probability": 0.88916015625}, {"start": 353.64, "end": 354.2, "word": " relationship", "probability": 0.908203125}, {"start": 354.2, "end": 354.6, "word": " between", "probability": 0.8935546875}, {"start": 354.6, "end": 354.78, "word": " X", "probability": 0.58740234375}, {"start": 354.78, "end": 354.9, "word": " and", "probability": 0.94091796875}, {"start": 354.9, "end": 355.18, "word": " Y.", "probability": 0.9921875}, {"start": 363.02, "end": 363.76, "word": " So", "probability": 0.916015625}, {"start": 363.76, "end": 364.64, "word": " the", "probability": 0.70166015625}, {"start": 364.64, "end": 364.86, "word": " sine", "probability": 0.339111328125}, {"start": 364.86, "end": 364.98, "word": " of", "probability": 0.93994140625}, {"start": 364.98, "end": 365.34, "word": " B1", "probability": 0.8349609375}, {"start": 365.34, "end": 366.28, "word": " tells", "probability": 0.77880859375}, {"start": 366.28, "end": 366.88, "word": " us", "probability": 0.94287109375}, {"start": 366.88, "end": 368.06, "word": " the", "probability": 0.8994140625}, {"start": 368.06, "end": 368.42, "word": " exact", "probability": 0.9345703125}, {"start": 368.42, "end": 368.9, "word": " direction.", "probability": 0.9541015625}, {"start": 369.06, "end": 369.06, "word": " It", "probability": 0.89892578125}, {"start": 369.06, "end": 369.22, "word": " could", "probability": 0.87744140625}, {"start": 369.22, "end": 369.38, "word": " be", "probability": 0.93603515625}, {"start": 369.38, "end": 369.72, "word": " positive", "probability": 0.947265625}, {"start": 369.72, "end": 370.02, "word": " if", "probability": 0.86328125}, {"start": 370.02, "end": 370.18, "word": " the", "probability": 0.59033203125}, {"start": 370.18, "end": 370.38, "word": " sine", "probability": 0.92529296875}, {"start": 370.38, "end": 371.0, "word": " of", "probability": 0.57666015625}, {"start": 371.0, "end": 371.22, "word": " B1", "probability": 0.98828125}, {"start": 371.22, "end": 371.34, "word": " is", "probability": 0.94482421875}, {"start": 371.34, "end": 371.8, "word": " positive", "probability": 0.935546875}, {"start": 371.8, "end": 372.3, "word": " or", "probability": 0.685546875}, {"start": 372.3, "end": 372.64, "word": " negative.", "probability": 0.94873046875}], "temperature": 1.0}, {"id": 16, "seek": 40008, "start": 373.48, "end": 400.08, "text": " on the other side. So that's the meaning of B0 and B1. Now first thing we have to do in order to determine if there exists linear relationship between X and Y, we have to draw scatter plot, Y versus X. In this specific example, X is the square feet, size of the house is measured by square feet, and house selling price in thousand dollars.", "tokens": [322, 264, 661, 1252, 13, 407, 300, 311, 264, 3620, 295, 363, 15, 293, 363, 16, 13, 823, 700, 551, 321, 362, 281, 360, 294, 1668, 281, 6997, 498, 456, 8198, 8213, 2480, 1296, 1783, 293, 398, 11, 321, 362, 281, 2642, 34951, 7542, 11, 398, 5717, 1783, 13, 682, 341, 2685, 1365, 11, 1783, 307, 264, 3732, 3521, 11, 2744, 295, 264, 1782, 307, 12690, 538, 3732, 3521, 11, 293, 1782, 6511, 3218, 294, 4714, 3808, 13], "avg_logprob": -0.17958860608595836, "compression_ratio": 1.5714285714285714, "no_speech_prob": 0.0, "words": [{"start": 373.48, "end": 373.74, "word": " on", "probability": 0.1884765625}, {"start": 373.74, "end": 373.84, "word": " the", "probability": 0.90771484375}, {"start": 373.84, "end": 374.02, "word": " other", "probability": 0.88720703125}, {"start": 374.02, "end": 374.38, "word": " side.", "probability": 0.873046875}, {"start": 375.26, "end": 375.48, "word": " So", "probability": 0.93017578125}, {"start": 375.48, "end": 375.8, "word": " that's", "probability": 0.882080078125}, {"start": 375.8, "end": 376.18, "word": " the", "probability": 0.92138671875}, {"start": 376.18, "end": 377.04, "word": " meaning", "probability": 0.82421875}, {"start": 377.04, "end": 378.08, "word": " of", "probability": 0.958984375}, {"start": 378.08, "end": 378.42, "word": " B0", "probability": 0.701904296875}, {"start": 378.42, "end": 378.56, "word": " and", "probability": 0.93896484375}, {"start": 378.56, "end": 378.88, "word": " B1.", "probability": 0.995361328125}, {"start": 380.2, "end": 380.46, "word": " Now", "probability": 0.88720703125}, {"start": 380.46, "end": 380.7, "word": " first", "probability": 0.337158203125}, {"start": 380.7, "end": 380.88, "word": " thing", "probability": 0.8251953125}, {"start": 380.88, "end": 381.0, "word": " we", "probability": 0.923828125}, {"start": 381.0, "end": 381.16, "word": " have", "probability": 0.94873046875}, {"start": 381.16, "end": 381.28, "word": " to", "probability": 0.96923828125}, {"start": 381.28, "end": 381.5, "word": " do", "probability": 0.95947265625}, {"start": 381.5, "end": 382.04, "word": " in", "probability": 0.8505859375}, {"start": 382.04, "end": 382.26, "word": " order", "probability": 0.9169921875}, {"start": 382.26, "end": 382.48, "word": " to", "probability": 0.9638671875}, {"start": 382.48, "end": 382.9, "word": " determine", "probability": 0.92041015625}, {"start": 382.9, "end": 383.1, "word": " if", "probability": 0.91552734375}, {"start": 383.1, "end": 383.24, "word": " there", "probability": 0.908203125}, {"start": 383.24, "end": 383.6, "word": " exists", "probability": 0.75390625}, {"start": 383.6, "end": 383.98, "word": " linear", "probability": 0.798828125}, {"start": 383.98, "end": 384.5, "word": " relationship", "probability": 0.91650390625}, {"start": 384.5, "end": 384.84, "word": " between", "probability": 0.904296875}, {"start": 384.84, "end": 384.98, "word": " X", "probability": 0.71337890625}, {"start": 384.98, "end": 385.14, "word": " and", "probability": 0.94091796875}, {"start": 385.14, "end": 385.42, "word": " Y,", "probability": 0.9951171875}, {"start": 385.8, "end": 386.34, "word": " we", "probability": 0.947265625}, {"start": 386.34, "end": 386.5, "word": " have", "probability": 0.9462890625}, {"start": 386.5, "end": 386.62, "word": " to", "probability": 0.9658203125}, {"start": 386.62, "end": 386.8, "word": " draw", "probability": 0.88232421875}, {"start": 386.8, "end": 387.22, "word": " scatter", "probability": 0.80517578125}, {"start": 387.22, "end": 387.54, "word": " plot,", "probability": 0.80322265625}, {"start": 387.94, "end": 388.24, "word": " Y", "probability": 0.9814453125}, {"start": 388.24, "end": 388.62, "word": " versus", "probability": 0.849609375}, {"start": 388.62, "end": 389.02, "word": " X.", "probability": 0.994140625}, {"start": 389.7, "end": 389.92, "word": " In", "probability": 0.767578125}, {"start": 389.92, "end": 390.12, "word": " this", "probability": 0.94775390625}, {"start": 390.12, "end": 390.62, "word": " specific", "probability": 0.90771484375}, {"start": 390.62, "end": 391.18, "word": " example,", "probability": 0.97412109375}, {"start": 391.4, "end": 391.64, "word": " X", "probability": 0.97216796875}, {"start": 391.64, "end": 391.88, "word": " is", "probability": 0.94580078125}, {"start": 391.88, "end": 392.06, "word": " the", "probability": 0.91748046875}, {"start": 392.06, "end": 392.36, "word": " square", "probability": 0.8994140625}, {"start": 392.36, "end": 392.8, "word": " feet,", "probability": 0.955078125}, {"start": 393.4, "end": 393.94, "word": " size", "probability": 0.81201171875}, {"start": 393.94, "end": 394.28, "word": " of", "probability": 0.958984375}, {"start": 394.28, "end": 394.42, "word": " the", "probability": 0.91650390625}, {"start": 394.42, "end": 394.74, "word": " house", "probability": 0.87890625}, {"start": 394.74, "end": 395.1, "word": " is", "probability": 0.69482421875}, {"start": 395.1, "end": 395.34, "word": " measured", "probability": 0.83740234375}, {"start": 395.34, "end": 395.74, "word": " by", "probability": 0.9658203125}, {"start": 395.74, "end": 396.54, "word": " square", "probability": 0.8857421875}, {"start": 396.54, "end": 396.9, "word": " feet,", "probability": 0.96923828125}, {"start": 397.84, "end": 398.12, "word": " and", "probability": 0.935546875}, {"start": 398.12, "end": 398.46, "word": " house", "probability": 0.81201171875}, {"start": 398.46, "end": 398.76, "word": " selling", "probability": 0.89111328125}, {"start": 398.76, "end": 399.22, "word": " price", "probability": 0.92724609375}, {"start": 399.22, "end": 399.4, "word": " in", "probability": 0.64111328125}, {"start": 399.4, "end": 399.66, "word": " thousand", "probability": 0.53369140625}, {"start": 399.66, "end": 400.08, "word": " dollars.", "probability": 0.9306640625}], "temperature": 1.0}, {"id": 17, "seek": 42264, "start": 400.8, "end": 422.64, "text": " So we have to draw Y versus X. So house price versus size of the house. Now by looking carefully at this scatter plot, even if it's a small sample size, but you can see that there exists positive relationship between house price and size of the house. The points", "tokens": [407, 321, 362, 281, 2642, 398, 5717, 1783, 13, 407, 1782, 3218, 5717, 2744, 295, 264, 1782, 13, 823, 538, 1237, 7500, 412, 341, 34951, 7542, 11, 754, 498, 309, 311, 257, 1359, 6889, 2744, 11, 457, 291, 393, 536, 300, 456, 8198, 3353, 2480, 1296, 1782, 3218, 293, 2744, 295, 264, 1782, 13, 440, 2793], "avg_logprob": -0.22354714912280702, "compression_ratio": 1.5290697674418605, "no_speech_prob": 0.0, "words": [{"start": 400.8, "end": 401.06, "word": " So", "probability": 0.4560546875}, {"start": 401.06, "end": 401.16, "word": " we", "probability": 0.59912109375}, {"start": 401.16, "end": 401.34, "word": " have", "probability": 0.89794921875}, {"start": 401.34, "end": 401.5, "word": " to", "probability": 0.9658203125}, {"start": 401.5, "end": 401.78, "word": " draw", "probability": 0.90283203125}, {"start": 401.78, "end": 403.22, "word": " Y", "probability": 0.64599609375}, {"start": 403.22, "end": 403.98, "word": " versus", "probability": 0.65771484375}, {"start": 403.98, "end": 404.4, "word": " X.", "probability": 0.98046875}, {"start": 404.98, "end": 405.06, "word": " So", "probability": 0.693359375}, {"start": 405.06, "end": 405.32, "word": " house", "probability": 0.72900390625}, {"start": 405.32, "end": 405.7, "word": " price", "probability": 0.90087890625}, {"start": 405.7, "end": 406.2, "word": " versus", "probability": 0.89501953125}, {"start": 406.2, "end": 406.8, "word": " size", "probability": 0.712890625}, {"start": 406.8, "end": 406.98, "word": " of", "probability": 0.96630859375}, {"start": 406.98, "end": 407.1, "word": " the", "probability": 0.9072265625}, {"start": 407.1, "end": 407.42, "word": " house.", "probability": 0.8681640625}, {"start": 408.14, "end": 408.3, "word": " Now", "probability": 0.83056640625}, {"start": 408.3, "end": 408.8, "word": " by", "probability": 0.69140625}, {"start": 408.8, "end": 409.14, "word": " looking", "probability": 0.9111328125}, {"start": 409.14, "end": 409.6, "word": " carefully", "probability": 0.798828125}, {"start": 409.6, "end": 409.84, "word": " at", "probability": 0.94140625}, {"start": 409.84, "end": 410.04, "word": " this", "probability": 0.90966796875}, {"start": 410.04, "end": 410.38, "word": " scatter", "probability": 0.91796875}, {"start": 410.38, "end": 410.74, "word": " plot,", "probability": 0.82763671875}, {"start": 411.34, "end": 411.64, "word": " even", "probability": 0.8466796875}, {"start": 411.64, "end": 411.82, "word": " if", "probability": 0.50634765625}, {"start": 411.82, "end": 411.92, "word": " it's", "probability": 0.810791015625}, {"start": 411.92, "end": 411.98, "word": " a", "probability": 0.79931640625}, {"start": 411.98, "end": 412.18, "word": " small", "probability": 0.9521484375}, {"start": 412.18, "end": 412.48, "word": " sample", "probability": 0.751953125}, {"start": 412.48, "end": 412.94, "word": " size,", "probability": 0.869140625}, {"start": 413.36, "end": 413.58, "word": " but", "probability": 0.5537109375}, {"start": 413.58, "end": 413.72, "word": " you", "probability": 0.9599609375}, {"start": 413.72, "end": 413.96, "word": " can", "probability": 0.9501953125}, {"start": 413.96, "end": 414.2, "word": " see", "probability": 0.74072265625}, {"start": 414.2, "end": 414.46, "word": " that", "probability": 0.92919921875}, {"start": 414.46, "end": 414.7, "word": " there", "probability": 0.91748046875}, {"start": 414.7, "end": 415.08, "word": " exists", "probability": 0.8056640625}, {"start": 415.08, "end": 415.62, "word": " positive", "probability": 0.71630859375}, {"start": 415.62, "end": 416.62, "word": " relationship", "probability": 0.90771484375}, {"start": 416.62, "end": 417.16, "word": " between", "probability": 0.8837890625}, {"start": 417.16, "end": 418.3, "word": " house", "probability": 0.87158203125}, {"start": 418.3, "end": 418.78, "word": " price", "probability": 0.9130859375}, {"start": 418.78, "end": 419.26, "word": " and", "probability": 0.94091796875}, {"start": 419.26, "end": 420.08, "word": " size", "probability": 0.72802734375}, {"start": 420.08, "end": 420.22, "word": " of", "probability": 0.96484375}, {"start": 420.22, "end": 420.34, "word": " the", "probability": 0.91259765625}, {"start": 420.34, "end": 420.64, "word": " house.", "probability": 0.8779296875}, {"start": 421.9, "end": 422.24, "word": " The", "probability": 0.76025390625}, {"start": 422.24, "end": 422.64, "word": " points", "probability": 0.8828125}], "temperature": 1.0}, {"id": 18, "seek": 45135, "start": 423.75, "end": 451.35, "text": " Maybe they are close little bit to the straight line, it means there exists maybe strong relationship between X and Y. But you can tell the exact strength of the relationship by using the value of R. But here we can tell that there exists positive relationship and that relation could be strong. Now simple calculations will give B1 and B0.", "tokens": [2704, 436, 366, 1998, 707, 857, 281, 264, 2997, 1622, 11, 309, 1355, 456, 8198, 1310, 2068, 2480, 1296, 1783, 293, 398, 13, 583, 291, 393, 980, 264, 1900, 3800, 295, 264, 2480, 538, 1228, 264, 2158, 295, 497, 13, 583, 510, 321, 393, 980, 300, 456, 8198, 3353, 2480, 293, 300, 9721, 727, 312, 2068, 13, 823, 2199, 20448, 486, 976, 363, 16, 293, 363, 15, 13], "avg_logprob": -0.1893115985220757, "compression_ratio": 1.7487179487179487, "no_speech_prob": 0.0, "words": [{"start": 423.75, "end": 424.35, "word": " Maybe", "probability": 0.71142578125}, {"start": 424.35, "end": 424.55, "word": " they", "probability": 0.85009765625}, {"start": 424.55, "end": 424.69, "word": " are", "probability": 0.90283203125}, {"start": 424.69, "end": 425.11, "word": " close", "probability": 0.455810546875}, {"start": 425.11, "end": 425.43, "word": " little", "probability": 0.35546875}, {"start": 425.43, "end": 425.63, "word": " bit", "probability": 0.9501953125}, {"start": 425.63, "end": 425.77, "word": " to", "probability": 0.79052734375}, {"start": 425.77, "end": 425.87, "word": " the", "probability": 0.86376953125}, {"start": 425.87, "end": 426.17, "word": " straight", "probability": 0.94140625}, {"start": 426.17, "end": 426.39, "word": " line,", "probability": 0.91357421875}, {"start": 426.55, "end": 426.57, "word": " it", "probability": 0.580078125}, {"start": 426.57, "end": 426.71, "word": " means", "probability": 0.91748046875}, {"start": 426.71, "end": 426.85, "word": " there", "probability": 0.78662109375}, {"start": 426.85, "end": 427.25, "word": " exists", "probability": 0.63720703125}, {"start": 427.25, "end": 427.87, "word": " maybe", "probability": 0.81201171875}, {"start": 427.87, "end": 428.37, "word": " strong", "probability": 0.76513671875}, {"start": 428.37, "end": 429.17, "word": " relationship", "probability": 0.90478515625}, {"start": 429.17, "end": 429.53, "word": " between", "probability": 0.89306640625}, {"start": 429.53, "end": 429.71, "word": " X", "probability": 0.54443359375}, {"start": 429.71, "end": 429.85, "word": " and", "probability": 0.93994140625}, {"start": 429.85, "end": 430.07, "word": " Y.", "probability": 0.99609375}, {"start": 430.39, "end": 430.67, "word": " But", "probability": 0.9423828125}, {"start": 430.67, "end": 430.81, "word": " you", "probability": 0.85107421875}, {"start": 430.81, "end": 430.99, "word": " can", "probability": 0.92431640625}, {"start": 430.99, "end": 431.17, "word": " tell", "probability": 0.76416015625}, {"start": 431.17, "end": 431.35, "word": " the", "probability": 0.90380859375}, {"start": 431.35, "end": 431.85, "word": " exact", "probability": 0.9365234375}, {"start": 431.85, "end": 432.41, "word": " strength", "probability": 0.88427734375}, {"start": 432.41, "end": 433.33, "word": " of", "probability": 0.9482421875}, {"start": 433.33, "end": 433.47, "word": " the", "probability": 0.91455078125}, {"start": 433.47, "end": 434.01, "word": " relationship", "probability": 0.9013671875}, {"start": 434.01, "end": 435.39, "word": " by", "probability": 0.90087890625}, {"start": 435.39, "end": 435.75, "word": " using", "probability": 0.93017578125}, {"start": 435.75, "end": 435.91, "word": " the", "probability": 0.916015625}, {"start": 435.91, "end": 436.11, "word": " value", "probability": 0.9765625}, {"start": 436.11, "end": 436.27, "word": " of", "probability": 0.8232421875}, {"start": 436.27, "end": 436.47, "word": " R.", "probability": 0.93701171875}, {"start": 437.41, "end": 437.67, "word": " But", "probability": 0.94189453125}, {"start": 437.67, "end": 437.85, "word": " here", "probability": 0.84814453125}, {"start": 437.85, "end": 438.01, "word": " we", "probability": 0.75537109375}, {"start": 438.01, "end": 438.27, "word": " can", "probability": 0.94775390625}, {"start": 438.27, "end": 438.55, "word": " tell", "probability": 0.8779296875}, {"start": 438.55, "end": 438.83, "word": " that", "probability": 0.90234375}, {"start": 438.83, "end": 439.03, "word": " there", "probability": 0.89892578125}, {"start": 439.03, "end": 439.27, "word": " exists", "probability": 0.81982421875}, {"start": 439.27, "end": 439.73, "word": " positive", "probability": 0.9013671875}, {"start": 439.73, "end": 440.33, "word": " relationship", "probability": 0.9169921875}, {"start": 440.33, "end": 441.05, "word": " and", "probability": 0.53125}, {"start": 441.05, "end": 441.29, "word": " that", "probability": 0.9306640625}, {"start": 441.29, "end": 441.61, "word": " relation", "probability": 0.55859375}, {"start": 441.61, "end": 441.95, "word": " could", "probability": 0.8955078125}, {"start": 441.95, "end": 442.29, "word": " be", "probability": 0.95556640625}, {"start": 442.29, "end": 443.25, "word": " strong.", "probability": 0.89306640625}, {"start": 445.73, "end": 446.35, "word": " Now", "probability": 0.95166015625}, {"start": 446.35, "end": 447.53, "word": " simple", "probability": 0.56591796875}, {"start": 447.53, "end": 448.15, "word": " calculations", "probability": 0.91552734375}, {"start": 448.15, "end": 448.51, "word": " will", "probability": 0.87890625}, {"start": 448.51, "end": 448.91, "word": " give", "probability": 0.8876953125}, {"start": 448.91, "end": 449.57, "word": " B1", "probability": 0.858154296875}, {"start": 449.57, "end": 450.83, "word": " and", "probability": 0.91796875}, {"start": 450.83, "end": 451.35, "word": " B0.", "probability": 0.97998046875}], "temperature": 1.0}, {"id": 19, "seek": 47943, "start": 452.21, "end": 479.43, "text": " Suppose we know the values of R, Sy, and Sx. R, if you remember last time, R was 0.762. It's moderate relationship between X and Y. Sy and Sx, 60 divided by 4 is 117. That will give 0.109. So B0, in this case, 0.10977, B1.", "tokens": [21360, 321, 458, 264, 4190, 295, 497, 11, 3902, 11, 293, 318, 87, 13, 497, 11, 498, 291, 1604, 1036, 565, 11, 497, 390, 1958, 13, 25026, 17, 13, 467, 311, 18174, 2480, 1296, 1783, 293, 398, 13, 3902, 293, 318, 87, 11, 4060, 6666, 538, 1017, 307, 2975, 22, 13, 663, 486, 976, 1958, 13, 3279, 24, 13, 407, 363, 15, 11, 294, 341, 1389, 11, 1958, 13, 3279, 24, 17512, 11, 363, 16, 13], "avg_logprob": -0.1749188257502271, "compression_ratio": 1.2816091954022988, "no_speech_prob": 0.0, "words": [{"start": 452.21, "end": 452.81, "word": " Suppose", "probability": 0.78076171875}, {"start": 452.81, "end": 453.25, "word": " we", "probability": 0.80908203125}, {"start": 453.25, "end": 453.43, "word": " know", "probability": 0.89697265625}, {"start": 453.43, "end": 453.57, "word": " the", "probability": 0.9189453125}, {"start": 453.57, "end": 453.91, "word": " values", "probability": 0.96435546875}, {"start": 453.91, "end": 454.07, "word": " of", "probability": 0.96435546875}, {"start": 454.07, "end": 454.37, "word": " R,", "probability": 0.7197265625}, {"start": 454.61, "end": 454.85, "word": " Sy,", "probability": 0.564453125}, {"start": 455.03, "end": 455.17, "word": " and", "probability": 0.9453125}, {"start": 455.17, "end": 455.77, "word": " Sx.", "probability": 0.97216796875}, {"start": 456.69, "end": 457.27, "word": " R,", "probability": 0.974609375}, {"start": 457.41, "end": 457.51, "word": " if", "probability": 0.94921875}, {"start": 457.51, "end": 457.61, "word": " you", "probability": 0.9638671875}, {"start": 457.61, "end": 457.87, "word": " remember", "probability": 0.8759765625}, {"start": 457.87, "end": 458.17, "word": " last", "probability": 0.6494140625}, {"start": 458.17, "end": 458.49, "word": " time,", "probability": 0.8740234375}, {"start": 458.55, "end": 458.67, "word": " R", "probability": 0.88037109375}, {"start": 458.67, "end": 459.05, "word": " was", "probability": 0.951171875}, {"start": 459.05, "end": 459.35, "word": " 0", "probability": 0.6279296875}, {"start": 459.35, "end": 460.37, "word": ".762.", "probability": 0.9278971354166666}, {"start": 460.87, "end": 461.19, "word": " It's", "probability": 0.932861328125}, {"start": 461.19, "end": 461.55, "word": " moderate", "probability": 0.84521484375}, {"start": 461.55, "end": 462.23, "word": " relationship", "probability": 0.9140625}, {"start": 462.23, "end": 462.55, "word": " between", "probability": 0.89404296875}, {"start": 462.55, "end": 462.73, "word": " X", "probability": 0.56396484375}, {"start": 462.73, "end": 462.89, "word": " and", "probability": 0.9443359375}, {"start": 462.89, "end": 463.15, "word": " Y.", "probability": 0.99755859375}, {"start": 463.77, "end": 464.13, "word": " Sy", "probability": 0.91943359375}, {"start": 464.13, "end": 464.35, "word": " and", "probability": 0.904296875}, {"start": 464.35, "end": 464.93, "word": " Sx,", "probability": 0.9794921875}, {"start": 465.85, "end": 466.39, "word": " 60", "probability": 0.75830078125}, {"start": 466.39, "end": 466.81, "word": " divided", "probability": 0.658203125}, {"start": 466.81, "end": 467.05, "word": " by", "probability": 0.9619140625}, {"start": 467.05, "end": 467.33, "word": " 4", "probability": 0.84716796875}, {"start": 467.33, "end": 467.49, "word": " is", "probability": 0.5234375}, {"start": 467.49, "end": 468.27, "word": " 117.", "probability": 0.906494140625}, {"start": 468.59, "end": 469.07, "word": " That", "probability": 0.916015625}, {"start": 469.07, "end": 469.27, "word": " will", "probability": 0.8447265625}, {"start": 469.27, "end": 469.49, "word": " give", "probability": 0.8447265625}, {"start": 469.49, "end": 469.69, "word": " 0", "probability": 0.96044921875}, {"start": 469.69, "end": 470.59, "word": ".109.", "probability": 0.9856770833333334}, {"start": 471.37, "end": 471.75, "word": " So", "probability": 0.96240234375}, {"start": 471.75, "end": 472.35, "word": " B0,", "probability": 0.723388671875}, {"start": 473.25, "end": 473.53, "word": " in", "probability": 0.927734375}, {"start": 473.53, "end": 473.81, "word": " this", "probability": 0.947265625}, {"start": 473.81, "end": 474.23, "word": " case,", "probability": 0.91162109375}, {"start": 475.27, "end": 476.19, "word": " 0", "probability": 0.96826171875}, {"start": 476.19, "end": 477.91, "word": ".10977,", "probability": 0.965087890625}, {"start": 479.07, "end": 479.43, "word": " B1.", "probability": 0.944091796875}], "temperature": 1.0}, {"id": 20, "seek": 50838, "start": 482.96, "end": 508.38, "text": " B0 equals Y bar minus B1 X bar. B1 is computed in the previous step, so plug that value here. In addition, we know the values of X bar and Y bar. Simple calculation will give the value of B0, which is about 98.25. After computing the values of B0 and B1, we can state", "tokens": [363, 15, 6915, 398, 2159, 3175, 363, 16, 1783, 2159, 13, 363, 16, 307, 40610, 294, 264, 3894, 1823, 11, 370, 5452, 300, 2158, 510, 13, 682, 4500, 11, 321, 458, 264, 4190, 295, 1783, 2159, 293, 398, 2159, 13, 21532, 17108, 486, 976, 264, 2158, 295, 363, 15, 11, 597, 307, 466, 20860, 13, 6074, 13, 2381, 15866, 264, 4190, 295, 363, 15, 293, 363, 16, 11, 321, 393, 1785], "avg_logprob": -0.1519097187038925, "compression_ratio": 1.4725274725274726, "no_speech_prob": 0.0, "words": [{"start": 482.96, "end": 483.52, "word": " B0", "probability": 0.62939453125}, {"start": 483.52, "end": 484.06, "word": " equals", "probability": 0.498779296875}, {"start": 484.06, "end": 484.34, "word": " Y", "probability": 0.75439453125}, {"start": 484.34, "end": 484.56, "word": " bar", "probability": 0.841796875}, {"start": 484.56, "end": 484.86, "word": " minus", "probability": 0.96826171875}, {"start": 484.86, "end": 485.2, "word": " B1", "probability": 0.961669921875}, {"start": 485.2, "end": 485.4, "word": " X", "probability": 0.74267578125}, {"start": 485.4, "end": 485.74, "word": " bar.", "probability": 0.94189453125}, {"start": 486.92, "end": 487.38, "word": " B1", "probability": 0.980224609375}, {"start": 487.38, "end": 487.6, "word": " is", "probability": 0.93115234375}, {"start": 487.6, "end": 488.38, "word": " computed", "probability": 0.92822265625}, {"start": 488.38, "end": 488.72, "word": " in", "probability": 0.93798828125}, {"start": 488.72, "end": 488.9, "word": " the", "probability": 0.89599609375}, {"start": 488.9, "end": 489.24, "word": " previous", "probability": 0.8544921875}, {"start": 489.24, "end": 489.7, "word": " step,", "probability": 0.9326171875}, {"start": 490.12, "end": 490.3, "word": " so", "probability": 0.93798828125}, {"start": 490.3, "end": 490.56, "word": " plug", "probability": 0.86669921875}, {"start": 490.56, "end": 490.8, "word": " that", "probability": 0.91845703125}, {"start": 490.8, "end": 491.14, "word": " value", "probability": 0.9697265625}, {"start": 491.14, "end": 491.48, "word": " here.", "probability": 0.8046875}, {"start": 492.38, "end": 492.68, "word": " In", "probability": 0.9443359375}, {"start": 492.68, "end": 493.02, "word": " addition,", "probability": 0.96533203125}, {"start": 493.12, "end": 493.18, "word": " we", "probability": 0.890625}, {"start": 493.18, "end": 493.32, "word": " know", "probability": 0.89306640625}, {"start": 493.32, "end": 493.5, "word": " the", "probability": 0.90380859375}, {"start": 493.5, "end": 493.84, "word": " values", "probability": 0.94580078125}, {"start": 493.84, "end": 494.14, "word": " of", "probability": 0.95849609375}, {"start": 494.14, "end": 494.52, "word": " X", "probability": 0.97314453125}, {"start": 494.52, "end": 494.72, "word": " bar", "probability": 0.94873046875}, {"start": 494.72, "end": 494.88, "word": " and", "probability": 0.93359375}, {"start": 494.88, "end": 495.1, "word": " Y", "probability": 0.99365234375}, {"start": 495.1, "end": 495.44, "word": " bar.", "probability": 0.9560546875}, {"start": 495.98, "end": 496.24, "word": " Simple", "probability": 0.90966796875}, {"start": 496.24, "end": 496.8, "word": " calculation", "probability": 0.919921875}, {"start": 496.8, "end": 497.1, "word": " will", "probability": 0.87841796875}, {"start": 497.1, "end": 497.44, "word": " give", "probability": 0.8857421875}, {"start": 497.44, "end": 498.52, "word": " the", "probability": 0.853515625}, {"start": 498.52, "end": 498.78, "word": " value", "probability": 0.97509765625}, {"start": 498.78, "end": 498.9, "word": " of", "probability": 0.39697265625}, {"start": 498.9, "end": 499.32, "word": " B0,", "probability": 0.970947265625}, {"start": 499.4, "end": 499.52, "word": " which", "probability": 0.951171875}, {"start": 499.52, "end": 499.64, "word": " is", "probability": 0.947265625}, {"start": 499.64, "end": 500.0, "word": " about", "probability": 0.8779296875}, {"start": 500.0, "end": 500.76, "word": " 98", "probability": 0.90087890625}, {"start": 500.76, "end": 501.54, "word": ".25.", "probability": 0.85107421875}, {"start": 503.22, "end": 503.86, "word": " After", "probability": 0.86572265625}, {"start": 503.86, "end": 504.44, "word": " computing", "probability": 0.875}, {"start": 504.44, "end": 505.04, "word": " the", "probability": 0.88037109375}, {"start": 505.04, "end": 505.34, "word": " values", "probability": 0.751953125}, {"start": 505.34, "end": 505.44, "word": " of", "probability": 0.8515625}, {"start": 505.44, "end": 505.74, "word": " B0", "probability": 0.959716796875}, {"start": 505.74, "end": 505.9, "word": " and", "probability": 0.94921875}, {"start": 505.9, "end": 507.16, "word": " B1,", "probability": 0.979248046875}, {"start": 507.24, "end": 507.46, "word": " we", "probability": 0.951171875}, {"start": 507.46, "end": 507.78, "word": " can", "probability": 0.94580078125}, {"start": 507.78, "end": 508.38, "word": " state", "probability": 0.94482421875}], "temperature": 1.0}, {"id": 21, "seek": 53550, "start": 509.12, "end": 535.5, "text": " the regression equation by house price, the estimated value of house price. Hat in this equation means the estimated or the predicted value of the house price. Equals b0 which is 98 plus b1 which is 0.10977 times square feet. Now here, by using this equation, we can tell number one.", "tokens": [264, 24590, 5367, 538, 1782, 3218, 11, 264, 14109, 2158, 295, 1782, 3218, 13, 15867, 294, 341, 5367, 1355, 264, 14109, 420, 264, 19147, 2158, 295, 264, 1782, 3218, 13, 15624, 1124, 272, 15, 597, 307, 20860, 1804, 272, 16, 597, 307, 1958, 13, 3279, 24, 17512, 1413, 3732, 3521, 13, 823, 510, 11, 538, 1228, 341, 5367, 11, 321, 393, 980, 1230, 472, 13], "avg_logprob": -0.1916429897149404, "compression_ratio": 1.6904761904761905, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 509.12, "end": 509.46, "word": " the", "probability": 0.2646484375}, {"start": 509.46, "end": 510.04, "word": " regression", "probability": 0.9560546875}, {"start": 510.04, "end": 510.6, "word": " equation", "probability": 0.97509765625}, {"start": 510.6, "end": 511.1, "word": " by", "probability": 0.943359375}, {"start": 511.1, "end": 512.0, "word": " house", "probability": 0.471923828125}, {"start": 512.0, "end": 512.4, "word": " price,", "probability": 0.896484375}, {"start": 512.6, "end": 512.7, "word": " the", "probability": 0.89111328125}, {"start": 512.7, "end": 513.16, "word": " estimated", "probability": 0.884765625}, {"start": 513.16, "end": 513.7, "word": " value", "probability": 0.96533203125}, {"start": 513.7, "end": 514.08, "word": " of", "probability": 0.826171875}, {"start": 514.08, "end": 514.36, "word": " house", "probability": 0.86767578125}, {"start": 514.36, "end": 514.84, "word": " price.", "probability": 0.91357421875}, {"start": 515.44, "end": 516.1, "word": " Hat", "probability": 0.642578125}, {"start": 516.1, "end": 517.24, "word": " in", "probability": 0.81982421875}, {"start": 517.24, "end": 517.52, "word": " this", "probability": 0.9482421875}, {"start": 517.52, "end": 518.0, "word": " equation", "probability": 0.97119140625}, {"start": 518.0, "end": 518.4, "word": " means", "probability": 0.92041015625}, {"start": 518.4, "end": 519.06, "word": " the", "probability": 0.7861328125}, {"start": 519.06, "end": 519.58, "word": " estimated", "probability": 0.86328125}, {"start": 519.58, "end": 519.96, "word": " or", "probability": 0.92333984375}, {"start": 519.96, "end": 520.06, "word": " the", "probability": 0.8837890625}, {"start": 520.06, "end": 520.42, "word": " predicted", "probability": 0.8349609375}, {"start": 520.42, "end": 520.88, "word": " value", "probability": 0.97021484375}, {"start": 520.88, "end": 521.2, "word": " of", "probability": 0.96923828125}, {"start": 521.2, "end": 521.44, "word": " the", "probability": 0.9150390625}, {"start": 521.44, "end": 521.78, "word": " house", "probability": 0.91162109375}, {"start": 521.78, "end": 522.24, "word": " price.", "probability": 0.91845703125}, {"start": 522.54, "end": 523.34, "word": " Equals", "probability": 0.94384765625}, {"start": 523.34, "end": 523.86, "word": " b0", "probability": 0.6368408203125}, {"start": 523.86, "end": 524.04, "word": " which", "probability": 0.59033203125}, {"start": 524.04, "end": 524.14, "word": " is", "probability": 0.94873046875}, {"start": 524.14, "end": 524.66, "word": " 98", "probability": 0.96923828125}, {"start": 524.66, "end": 525.92, "word": " plus", "probability": 0.7041015625}, {"start": 525.92, "end": 526.48, "word": " b1", "probability": 0.98291015625}, {"start": 526.48, "end": 527.68, "word": " which", "probability": 0.68994140625}, {"start": 527.68, "end": 527.86, "word": " is", "probability": 0.943359375}, {"start": 527.86, "end": 528.06, "word": " 0", "probability": 0.89111328125}, {"start": 528.06, "end": 529.26, "word": ".10977", "probability": 0.947021484375}, {"start": 529.26, "end": 529.66, "word": " times", "probability": 0.880859375}, {"start": 529.66, "end": 529.98, "word": " square", "probability": 0.849609375}, {"start": 529.98, "end": 530.3, "word": " feet.", "probability": 0.9638671875}, {"start": 531.66, "end": 531.86, "word": " Now", "probability": 0.95556640625}, {"start": 531.86, "end": 532.18, "word": " here,", "probability": 0.765625}, {"start": 532.7, "end": 533.08, "word": " by", "probability": 0.97265625}, {"start": 533.08, "end": 533.34, "word": " using", "probability": 0.9296875}, {"start": 533.34, "end": 533.56, "word": " this", "probability": 0.94482421875}, {"start": 533.56, "end": 533.96, "word": " equation,", "probability": 0.97265625}, {"start": 534.08, "end": 534.18, "word": " we", "probability": 0.9521484375}, {"start": 534.18, "end": 534.42, "word": " can", "probability": 0.94873046875}, {"start": 534.42, "end": 534.7, "word": " tell", "probability": 0.90771484375}, {"start": 534.7, "end": 535.22, "word": " number", "probability": 0.85595703125}, {"start": 535.22, "end": 535.5, "word": " one.", "probability": 0.7421875}], "temperature": 1.0}, {"id": 22, "seek": 56355, "start": 536.54, "end": 563.56, "text": " The direction of the relationship between x and y, how surprised and its size. Since the sign is positive, it means there exists positive associations or relationship between these two variables, number one. Number two, we can interpret carefully the meaning of the intercept. Now, as we mentioned before, y hat equals b zero only if x equals zero.", "tokens": [440, 3513, 295, 264, 2480, 1296, 2031, 293, 288, 11, 577, 6100, 293, 1080, 2744, 13, 4162, 264, 1465, 307, 3353, 11, 309, 1355, 456, 8198, 3353, 26597, 420, 2480, 1296, 613, 732, 9102, 11, 1230, 472, 13, 5118, 732, 11, 321, 393, 7302, 7500, 264, 3620, 295, 264, 24700, 13, 823, 11, 382, 321, 2835, 949, 11, 288, 2385, 6915, 272, 4018, 787, 498, 2031, 6915, 4018, 13], "avg_logprob": -0.19743304188762392, "compression_ratio": 1.6540284360189574, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 536.54, "end": 536.94, "word": " The", "probability": 0.61767578125}, {"start": 536.94, "end": 537.5, "word": " direction", "probability": 0.9599609375}, {"start": 537.5, "end": 537.74, "word": " of", "probability": 0.9599609375}, {"start": 537.74, "end": 537.84, "word": " the", "probability": 0.8544921875}, {"start": 537.84, "end": 538.28, "word": " relationship", "probability": 0.900390625}, {"start": 538.28, "end": 538.82, "word": " between", "probability": 0.90087890625}, {"start": 538.82, "end": 540.8, "word": " x", "probability": 0.33984375}, {"start": 540.8, "end": 540.96, "word": " and", "probability": 0.935546875}, {"start": 540.96, "end": 541.18, "word": " y,", "probability": 0.98779296875}, {"start": 541.3, "end": 541.42, "word": " how", "probability": 0.59716796875}, {"start": 541.42, "end": 541.78, "word": " surprised", "probability": 0.71826171875}, {"start": 541.78, "end": 542.02, "word": " and", "probability": 0.61474609375}, {"start": 542.02, "end": 542.26, "word": " its", "probability": 0.64306640625}, {"start": 542.26, "end": 542.64, "word": " size.", "probability": 0.859375}, {"start": 543.14, "end": 543.62, "word": " Since", "probability": 0.828125}, {"start": 543.62, "end": 543.86, "word": " the", "probability": 0.90869140625}, {"start": 543.86, "end": 544.08, "word": " sign", "probability": 0.865234375}, {"start": 544.08, "end": 544.24, "word": " is", "probability": 0.9462890625}, {"start": 544.24, "end": 544.6, "word": " positive,", "probability": 0.92578125}, {"start": 544.84, "end": 544.9, "word": " it", "probability": 0.9287109375}, {"start": 544.9, "end": 545.3, "word": " means", "probability": 0.93115234375}, {"start": 545.3, "end": 545.6, "word": " there", "probability": 0.818359375}, {"start": 545.6, "end": 545.9, "word": " exists", "probability": 0.6064453125}, {"start": 545.9, "end": 546.34, "word": " positive", "probability": 0.85888671875}, {"start": 546.34, "end": 547.04, "word": " associations", "probability": 0.63623046875}, {"start": 547.04, "end": 548.04, "word": " or", "probability": 0.75537109375}, {"start": 548.04, "end": 548.6, "word": " relationship", "probability": 0.7822265625}, {"start": 548.6, "end": 549.0, "word": " between", "probability": 0.8671875}, {"start": 549.0, "end": 549.22, "word": " these", "probability": 0.859375}, {"start": 549.22, "end": 549.42, "word": " two", "probability": 0.89013671875}, {"start": 549.42, "end": 549.88, "word": " variables,", "probability": 0.9501953125}, {"start": 550.26, "end": 550.38, "word": " number", "probability": 0.9267578125}, {"start": 550.38, "end": 550.6, "word": " one.", "probability": 0.8046875}, {"start": 550.98, "end": 551.32, "word": " Number", "probability": 0.85986328125}, {"start": 551.32, "end": 551.66, "word": " two,", "probability": 0.92138671875}, {"start": 552.26, "end": 552.42, "word": " we", "probability": 0.95361328125}, {"start": 552.42, "end": 552.64, "word": " can", "probability": 0.939453125}, {"start": 552.64, "end": 553.12, "word": " interpret", "probability": 0.900390625}, {"start": 553.12, "end": 554.42, "word": " carefully", "probability": 0.8193359375}, {"start": 554.42, "end": 555.82, "word": " the", "probability": 0.79052734375}, {"start": 555.82, "end": 556.26, "word": " meaning", "probability": 0.865234375}, {"start": 556.26, "end": 556.92, "word": " of", "probability": 0.95654296875}, {"start": 556.92, "end": 557.06, "word": " the", "probability": 0.92236328125}, {"start": 557.06, "end": 557.44, "word": " intercept.", "probability": 0.97265625}, {"start": 558.66, "end": 559.18, "word": " Now,", "probability": 0.95263671875}, {"start": 559.28, "end": 559.5, "word": " as", "probability": 0.96435546875}, {"start": 559.5, "end": 559.62, "word": " we", "probability": 0.95947265625}, {"start": 559.62, "end": 559.9, "word": " mentioned", "probability": 0.82666015625}, {"start": 559.9, "end": 560.36, "word": " before,", "probability": 0.8564453125}, {"start": 560.84, "end": 561.06, "word": " y", "probability": 0.92138671875}, {"start": 561.06, "end": 561.34, "word": " hat", "probability": 0.8232421875}, {"start": 561.34, "end": 561.62, "word": " equals", "probability": 0.49267578125}, {"start": 561.62, "end": 561.8, "word": " b", "probability": 0.68798828125}, {"start": 561.8, "end": 562.08, "word": " zero", "probability": 0.7412109375}, {"start": 562.08, "end": 562.44, "word": " only", "probability": 0.841796875}, {"start": 562.44, "end": 562.66, "word": " if", "probability": 0.93798828125}, {"start": 562.66, "end": 562.9, "word": " x", "probability": 0.9853515625}, {"start": 562.9, "end": 563.22, "word": " equals", "probability": 0.91162109375}, {"start": 563.22, "end": 563.56, "word": " zero.", "probability": 0.82177734375}], "temperature": 1.0}, {"id": 23, "seek": 57984, "start": 564.86, "end": 579.84, "text": " Now there is no sense about square feet of zero because we don't have a size of a house to be zero. But the slope here is 0.109, it has sense because as the size of the house increased by one unit.", "tokens": [823, 456, 307, 572, 2020, 466, 3732, 3521, 295, 4018, 570, 321, 500, 380, 362, 257, 2744, 295, 257, 1782, 281, 312, 4018, 13, 583, 264, 13525, 510, 307, 1958, 13, 3279, 24, 11, 309, 575, 2020, 570, 382, 264, 2744, 295, 264, 1782, 6505, 538, 472, 4985, 13], "avg_logprob": -0.19265624940395354, "compression_ratio": 1.4666666666666666, "no_speech_prob": 0.0, "words": [{"start": 564.86, "end": 565.18, "word": " Now", "probability": 0.86865234375}, {"start": 565.18, "end": 565.44, "word": " there", "probability": 0.53466796875}, {"start": 565.44, "end": 565.6, "word": " is", "probability": 0.755859375}, {"start": 565.6, "end": 565.88, "word": " no", "probability": 0.9521484375}, {"start": 565.88, "end": 566.24, "word": " sense", "probability": 0.83740234375}, {"start": 566.24, "end": 566.62, "word": " about", "probability": 0.85546875}, {"start": 566.62, "end": 567.02, "word": " square", "probability": 0.63916015625}, {"start": 567.02, "end": 567.28, "word": " feet", "probability": 0.95166015625}, {"start": 567.28, "end": 567.44, "word": " of", "probability": 0.96435546875}, {"start": 567.44, "end": 567.74, "word": " zero", "probability": 0.75830078125}, {"start": 567.74, "end": 568.44, "word": " because", "probability": 0.525390625}, {"start": 568.44, "end": 568.9, "word": " we", "probability": 0.94287109375}, {"start": 568.9, "end": 569.08, "word": " don't", "probability": 0.923828125}, {"start": 569.08, "end": 569.32, "word": " have", "probability": 0.93994140625}, {"start": 569.32, "end": 569.52, "word": " a", "probability": 0.947265625}, {"start": 569.52, "end": 569.9, "word": " size", "probability": 0.80908203125}, {"start": 569.9, "end": 570.14, "word": " of", "probability": 0.958984375}, {"start": 570.14, "end": 570.22, "word": " a", "probability": 0.97412109375}, {"start": 570.22, "end": 570.44, "word": " house", "probability": 0.87646484375}, {"start": 570.44, "end": 570.6, "word": " to", "probability": 0.96630859375}, {"start": 570.6, "end": 570.72, "word": " be", "probability": 0.95068359375}, {"start": 570.72, "end": 571.0, "word": " zero.", "probability": 0.8828125}, {"start": 572.14, "end": 572.66, "word": " But", "probability": 0.927734375}, {"start": 572.66, "end": 572.96, "word": " the", "probability": 0.89453125}, {"start": 572.96, "end": 573.28, "word": " slope", "probability": 0.880859375}, {"start": 573.28, "end": 573.48, "word": " here", "probability": 0.8544921875}, {"start": 573.48, "end": 573.62, "word": " is", "probability": 0.93798828125}, {"start": 573.62, "end": 573.78, "word": " 0", "probability": 0.73095703125}, {"start": 573.78, "end": 574.6, "word": ".109,", "probability": 0.9825846354166666}, {"start": 574.72, "end": 574.8, "word": " it", "probability": 0.880859375}, {"start": 574.8, "end": 575.0, "word": " has", "probability": 0.9228515625}, {"start": 575.0, "end": 575.44, "word": " sense", "probability": 0.6171875}, {"start": 575.44, "end": 575.98, "word": " because", "probability": 0.634765625}, {"start": 575.98, "end": 577.06, "word": " as", "probability": 0.86279296875}, {"start": 577.06, "end": 577.88, "word": " the", "probability": 0.91796875}, {"start": 577.88, "end": 578.22, "word": " size", "probability": 0.8583984375}, {"start": 578.22, "end": 578.34, "word": " of", "probability": 0.9609375}, {"start": 578.34, "end": 578.46, "word": " the", "probability": 0.89892578125}, {"start": 578.46, "end": 578.68, "word": " house", "probability": 0.8662109375}, {"start": 578.68, "end": 579.06, "word": " increased", "probability": 0.55712890625}, {"start": 579.06, "end": 579.26, "word": " by", "probability": 0.96728515625}, {"start": 579.26, "end": 579.48, "word": " one", "probability": 0.81396484375}, {"start": 579.48, "end": 579.84, "word": " unit.", "probability": 0.9541015625}], "temperature": 1.0}, {"id": 24, "seek": 61049, "start": 580.95, "end": 610.49, "text": " it's selling price increased by this amount 0.109 but here you have to be careful to multiply this value by a thousand because the data is given in thousand dollars for Y so here as the size of the house increased by one unit by one feet one square feet it's selling price increases by this amount 0.10977 should be multiplied by a thousand so around", "tokens": [309, 311, 6511, 3218, 6505, 538, 341, 2372, 1958, 13, 3279, 24, 457, 510, 291, 362, 281, 312, 5026, 281, 12972, 341, 2158, 538, 257, 4714, 570, 264, 1412, 307, 2212, 294, 4714, 3808, 337, 398, 370, 510, 382, 264, 2744, 295, 264, 1782, 6505, 538, 472, 4985, 538, 472, 3521, 472, 3732, 3521, 309, 311, 6511, 3218, 8637, 538, 341, 2372, 1958, 13, 3279, 24, 17512, 820, 312, 17207, 538, 257, 4714, 370, 926], "avg_logprob": -0.18914473292074704, "compression_ratio": 1.8571428571428572, "no_speech_prob": 0.039459228515625, "words": [{"start": 580.95, "end": 581.45, "word": " it's", "probability": 0.501190185546875}, {"start": 581.45, "end": 582.03, "word": " selling", "probability": 0.87158203125}, {"start": 582.03, "end": 582.57, "word": " price", "probability": 0.91162109375}, {"start": 582.57, "end": 583.19, "word": " increased", "probability": 0.70068359375}, {"start": 583.19, "end": 583.49, "word": " by", "probability": 0.97607421875}, {"start": 583.49, "end": 583.77, "word": " this", "probability": 0.90673828125}, {"start": 583.77, "end": 584.17, "word": " amount", "probability": 0.884765625}, {"start": 584.17, "end": 584.61, "word": " 0", "probability": 0.437744140625}, {"start": 584.61, "end": 585.53, "word": ".109", "probability": 0.9713541666666666}, {"start": 585.53, "end": 586.29, "word": " but", "probability": 0.57080078125}, {"start": 586.29, "end": 586.57, "word": " here", "probability": 0.841796875}, {"start": 586.57, "end": 587.09, "word": " you", "probability": 0.9453125}, {"start": 587.09, "end": 587.27, "word": " have", "probability": 0.951171875}, {"start": 587.27, "end": 587.35, "word": " to", "probability": 0.9755859375}, {"start": 587.35, "end": 587.51, "word": " be", "probability": 0.955078125}, {"start": 587.51, "end": 587.77, "word": " careful", "probability": 0.9599609375}, {"start": 587.77, "end": 588.01, "word": " to", "probability": 0.96337890625}, {"start": 588.01, "end": 588.37, "word": " multiply", "probability": 0.8994140625}, {"start": 588.37, "end": 588.73, "word": " this", "probability": 0.94287109375}, {"start": 588.73, "end": 588.99, "word": " value", "probability": 0.96826171875}, {"start": 588.99, "end": 589.21, "word": " by", "probability": 0.97509765625}, {"start": 589.21, "end": 589.39, "word": " a", "probability": 0.724609375}, {"start": 589.39, "end": 589.71, "word": " thousand", "probability": 0.76806640625}, {"start": 589.71, "end": 590.73, "word": " because", "probability": 0.7744140625}, {"start": 590.73, "end": 591.01, "word": " the", "probability": 0.9306640625}, {"start": 591.01, "end": 591.35, "word": " data", "probability": 0.951171875}, {"start": 591.35, "end": 592.03, "word": " is", "probability": 0.955078125}, {"start": 592.03, "end": 592.35, "word": " given", "probability": 0.88671875}, {"start": 592.35, "end": 592.61, "word": " in", "probability": 0.9404296875}, {"start": 592.61, "end": 592.89, "word": " thousand", "probability": 0.86279296875}, {"start": 592.89, "end": 593.27, "word": " dollars", "probability": 0.93212890625}, {"start": 593.27, "end": 593.53, "word": " for", "probability": 0.6015625}, {"start": 593.53, "end": 593.85, "word": " Y", "probability": 0.407470703125}, {"start": 593.85, "end": 594.75, "word": " so", "probability": 0.314697265625}, {"start": 594.75, "end": 595.05, "word": " here", "probability": 0.8427734375}, {"start": 595.05, "end": 595.59, "word": " as", "probability": 0.92236328125}, {"start": 595.59, "end": 596.29, "word": " the", "probability": 0.92138671875}, {"start": 596.29, "end": 596.57, "word": " size", "probability": 0.86962890625}, {"start": 596.57, "end": 596.71, "word": " of", "probability": 0.96728515625}, {"start": 596.71, "end": 596.83, "word": " the", "probability": 0.9189453125}, {"start": 596.83, "end": 597.07, "word": " house", "probability": 0.87841796875}, {"start": 597.07, "end": 597.47, "word": " increased", "probability": 0.86083984375}, {"start": 597.47, "end": 597.69, "word": " by", "probability": 0.97119140625}, {"start": 597.69, "end": 597.95, "word": " one", "probability": 0.89404296875}, {"start": 597.95, "end": 598.31, "word": " unit", "probability": 0.9609375}, {"start": 598.31, "end": 598.67, "word": " by", "probability": 0.9208984375}, {"start": 598.67, "end": 599.37, "word": " one", "probability": 0.91015625}, {"start": 599.37, "end": 599.73, "word": " feet", "probability": 0.7744140625}, {"start": 599.73, "end": 600.09, "word": " one", "probability": 0.87353515625}, {"start": 600.09, "end": 600.59, "word": " square", "probability": 0.92236328125}, {"start": 600.59, "end": 601.03, "word": " feet", "probability": 0.96484375}, {"start": 601.03, "end": 601.91, "word": " it's", "probability": 0.76611328125}, {"start": 601.91, "end": 602.15, "word": " selling", "probability": 0.86376953125}, {"start": 602.15, "end": 602.65, "word": " price", "probability": 0.9267578125}, {"start": 602.65, "end": 603.23, "word": " increases", "probability": 0.94970703125}, {"start": 603.23, "end": 603.71, "word": " by", "probability": 0.9716796875}, {"start": 603.71, "end": 604.63, "word": " this", "probability": 0.94873046875}, {"start": 604.63, "end": 604.97, "word": " amount", "probability": 0.90234375}, {"start": 604.97, "end": 605.31, "word": " 0", "probability": 0.75927734375}, {"start": 605.31, "end": 606.69, "word": ".10977", "probability": 0.9501953125}, {"start": 606.69, "end": 607.81, "word": " should", "probability": 0.8779296875}, {"start": 607.81, "end": 608.07, "word": " be", "probability": 0.95361328125}, {"start": 608.07, "end": 608.47, "word": " multiplied", "probability": 0.7158203125}, {"start": 608.47, "end": 608.79, "word": " by", "probability": 0.9716796875}, {"start": 608.79, "end": 608.97, "word": " a", "probability": 0.90283203125}, {"start": 608.97, "end": 609.25, "word": " thousand", "probability": 0.84326171875}, {"start": 609.25, "end": 610.11, "word": " so", "probability": 0.8291015625}, {"start": 610.11, "end": 610.49, "word": " around", "probability": 0.93798828125}], "temperature": 1.0}, {"id": 25, "seek": 62662, "start": 611.04, "end": 626.62, "text": " $109.77. So that means extra one square feet for the size of the house, it cost you around $100 or $110.", "tokens": [1848, 3279, 24, 13, 17512, 13, 407, 300, 1355, 2857, 472, 3732, 3521, 337, 264, 2744, 295, 264, 1782, 11, 309, 2063, 291, 926, 1848, 6879, 420, 1848, 43435, 13], "avg_logprob": -0.2696572676781685, "compression_ratio": 1.09375, "no_speech_prob": 0.0, "words": [{"start": 611.04, "end": 612.6, "word": " $109", "probability": 0.6735432942708334}, {"start": 612.6, "end": 613.34, "word": ".77.", "probability": 0.96337890625}, {"start": 613.92, "end": 614.42, "word": " So", "probability": 0.87060546875}, {"start": 614.42, "end": 614.66, "word": " that", "probability": 0.8564453125}, {"start": 614.66, "end": 615.04, "word": " means", "probability": 0.9375}, {"start": 615.04, "end": 616.74, "word": " extra", "probability": 0.73388671875}, {"start": 616.74, "end": 617.94, "word": " one", "probability": 0.74072265625}, {"start": 617.94, "end": 618.56, "word": " square", "probability": 0.91845703125}, {"start": 618.56, "end": 619.02, "word": " feet", "probability": 0.90234375}, {"start": 619.02, "end": 620.3, "word": " for", "probability": 0.72509765625}, {"start": 620.3, "end": 620.52, "word": " the", "probability": 0.91796875}, {"start": 620.52, "end": 620.76, "word": " size", "probability": 0.85595703125}, {"start": 620.76, "end": 620.88, "word": " of", "probability": 0.96728515625}, {"start": 620.88, "end": 621.0, "word": " the", "probability": 0.916015625}, {"start": 621.0, "end": 621.4, "word": " house,", "probability": 0.8701171875}, {"start": 621.76, "end": 622.66, "word": " it", "probability": 0.9169921875}, {"start": 622.66, "end": 623.06, "word": " cost", "probability": 0.454833984375}, {"start": 623.06, "end": 623.42, "word": " you", "probability": 0.9599609375}, {"start": 623.42, "end": 624.04, "word": " around", "probability": 0.9228515625}, {"start": 624.04, "end": 625.14, "word": " $100", "probability": 0.794921875}, {"start": 625.14, "end": 626.02, "word": " or", "probability": 0.869140625}, {"start": 626.02, "end": 626.62, "word": " $110.", "probability": 0.79345703125}], "temperature": 1.0}, {"id": 26, "seek": 65696, "start": 628.54, "end": 656.96, "text": " So that's the meaning of B1 and the sign actually of the slope. In addition to that, we can make some predictions about house price for any given value of the size of the house. That means if you know that the house size equals 2,000 square feet. So just plug this value here and simple calculation will give the predicted value of the ceiling price of a house.", "tokens": [407, 300, 311, 264, 3620, 295, 363, 16, 293, 264, 1465, 767, 295, 264, 13525, 13, 682, 4500, 281, 300, 11, 321, 393, 652, 512, 21264, 466, 1782, 3218, 337, 604, 2212, 2158, 295, 264, 2744, 295, 264, 1782, 13, 663, 1355, 498, 291, 458, 300, 264, 1782, 2744, 6915, 568, 11, 1360, 3732, 3521, 13, 407, 445, 5452, 341, 2158, 510, 293, 2199, 17108, 486, 976, 264, 19147, 2158, 295, 264, 13655, 3218, 295, 257, 1782, 13], "avg_logprob": -0.16307357519487792, "compression_ratio": 1.6682027649769586, "no_speech_prob": 0.0, "words": [{"start": 628.54, "end": 628.8, "word": " So", "probability": 0.8994140625}, {"start": 628.8, "end": 629.04, "word": " that's", "probability": 0.8740234375}, {"start": 629.04, "end": 629.16, "word": " the", "probability": 0.919921875}, {"start": 629.16, "end": 629.42, "word": " meaning", "probability": 0.86474609375}, {"start": 629.42, "end": 629.68, "word": " of", "probability": 0.96337890625}, {"start": 629.68, "end": 630.62, "word": " B1", "probability": 0.650390625}, {"start": 630.62, "end": 630.8, "word": " and", "probability": 0.65625}, {"start": 630.8, "end": 630.96, "word": " the", "probability": 0.79638671875}, {"start": 630.96, "end": 631.28, "word": " sign", "probability": 0.794921875}, {"start": 631.28, "end": 631.88, "word": " actually", "probability": 0.68408203125}, {"start": 631.88, "end": 632.44, "word": " of", "probability": 0.9375}, {"start": 632.44, "end": 633.02, "word": " the", "probability": 0.8798828125}, {"start": 633.02, "end": 633.5, "word": " slope.", "probability": 0.82958984375}, {"start": 634.16, "end": 634.32, "word": " In", "probability": 0.9609375}, {"start": 634.32, "end": 634.62, "word": " addition", "probability": 0.9521484375}, {"start": 634.62, "end": 634.86, "word": " to", "probability": 0.96630859375}, {"start": 634.86, "end": 635.06, "word": " that,", "probability": 0.9375}, {"start": 635.14, "end": 635.22, "word": " we", "probability": 0.9462890625}, {"start": 635.22, "end": 635.44, "word": " can", "probability": 0.93994140625}, {"start": 635.44, "end": 635.66, "word": " make", "probability": 0.94287109375}, {"start": 635.66, "end": 635.9, "word": " some", "probability": 0.71044921875}, {"start": 635.9, "end": 636.42, "word": " predictions", "probability": 0.822265625}, {"start": 636.42, "end": 637.16, "word": " about", "probability": 0.90087890625}, {"start": 637.16, "end": 637.96, "word": " house", "probability": 0.83740234375}, {"start": 637.96, "end": 638.42, "word": " price", "probability": 0.9091796875}, {"start": 638.42, "end": 639.34, "word": " for", "probability": 0.876953125}, {"start": 639.34, "end": 639.64, "word": " any", "probability": 0.90283203125}, {"start": 639.64, "end": 639.94, "word": " given", "probability": 0.88818359375}, {"start": 639.94, "end": 640.34, "word": " value", "probability": 0.96923828125}, {"start": 640.34, "end": 640.94, "word": " of", "probability": 0.953125}, {"start": 640.94, "end": 641.38, "word": " the", "probability": 0.88525390625}, {"start": 641.38, "end": 641.64, "word": " size", "probability": 0.85791015625}, {"start": 641.64, "end": 641.76, "word": " of", "probability": 0.962890625}, {"start": 641.76, "end": 641.9, "word": " the", "probability": 0.87060546875}, {"start": 641.9, "end": 642.2, "word": " house.", "probability": 0.86962890625}, {"start": 642.6, "end": 642.9, "word": " That", "probability": 0.91015625}, {"start": 642.9, "end": 643.16, "word": " means", "probability": 0.92626953125}, {"start": 643.16, "end": 643.32, "word": " if", "probability": 0.81787109375}, {"start": 643.32, "end": 643.44, "word": " you", "probability": 0.94482421875}, {"start": 643.44, "end": 643.58, "word": " know", "probability": 0.88134765625}, {"start": 643.58, "end": 643.82, "word": " that", "probability": 0.916015625}, {"start": 643.82, "end": 644.1, "word": " the", "probability": 0.888671875}, {"start": 644.1, "end": 644.52, "word": " house", "probability": 0.884765625}, {"start": 644.52, "end": 645.72, "word": " size", "probability": 0.79931640625}, {"start": 645.72, "end": 646.26, "word": " equals", "probability": 0.9111328125}, {"start": 646.26, "end": 646.52, "word": " 2", "probability": 0.4013671875}, {"start": 646.52, "end": 646.94, "word": ",000", "probability": 0.97412109375}, {"start": 646.94, "end": 647.44, "word": " square", "probability": 0.8564453125}, {"start": 647.44, "end": 647.86, "word": " feet.", "probability": 0.96484375}, {"start": 648.24, "end": 648.44, "word": " So", "probability": 0.95556640625}, {"start": 648.44, "end": 648.7, "word": " just", "probability": 0.75390625}, {"start": 648.7, "end": 648.94, "word": " plug", "probability": 0.60595703125}, {"start": 648.94, "end": 649.18, "word": " this", "probability": 0.9326171875}, {"start": 649.18, "end": 649.46, "word": " value", "probability": 0.97509765625}, {"start": 649.46, "end": 649.78, "word": " here", "probability": 0.4970703125}, {"start": 649.78, "end": 650.58, "word": " and", "probability": 0.494384765625}, {"start": 650.58, "end": 650.94, "word": " simple", "probability": 0.7919921875}, {"start": 650.94, "end": 652.18, "word": " calculation", "probability": 0.94140625}, {"start": 652.18, "end": 652.62, "word": " will", "probability": 0.85546875}, {"start": 652.62, "end": 652.9, "word": " give", "probability": 0.88232421875}, {"start": 652.9, "end": 653.08, "word": " the", "probability": 0.912109375}, {"start": 653.08, "end": 653.5, "word": " predicted", "probability": 0.86328125}, {"start": 653.5, "end": 654.1, "word": " value", "probability": 0.96533203125}, {"start": 654.1, "end": 655.14, "word": " of", "probability": 0.94482421875}, {"start": 655.14, "end": 655.46, "word": " the", "probability": 0.826171875}, {"start": 655.46, "end": 655.66, "word": " ceiling", "probability": 0.485107421875}, {"start": 655.66, "end": 656.26, "word": " price", "probability": 0.94140625}, {"start": 656.26, "end": 656.72, "word": " of", "probability": 0.95849609375}, {"start": 656.72, "end": 656.8, "word": " a", "probability": 0.8466796875}, {"start": 656.8, "end": 656.96, "word": " house.", "probability": 0.8828125}], "temperature": 1.0}, {"id": 27, "seek": 67715, "start": 657.21, "end": 677.15, "text": " That's the whole story for the simple linear regression. In other words, we have this equation, so the interpretation of B0 again. B0 is the estimated mean value of Y when the value of X is 0. That means if X is 0,", "tokens": [663, 311, 264, 1379, 1657, 337, 264, 2199, 8213, 24590, 13, 682, 661, 2283, 11, 321, 362, 341, 5367, 11, 370, 264, 14174, 295, 363, 15, 797, 13, 363, 15, 307, 264, 14109, 914, 2158, 295, 398, 562, 264, 2158, 295, 1783, 307, 1958, 13, 663, 1355, 498, 1783, 307, 1958, 11], "avg_logprob": -0.18425707659631405, "compression_ratio": 1.4527027027027026, "no_speech_prob": 0.0, "words": [{"start": 657.21, "end": 657.75, "word": " That's", "probability": 0.830322265625}, {"start": 657.75, "end": 657.99, "word": " the", "probability": 0.9169921875}, {"start": 657.99, "end": 658.23, "word": " whole", "probability": 0.875}, {"start": 658.23, "end": 658.73, "word": " story", "probability": 0.93408203125}, {"start": 658.73, "end": 659.15, "word": " for", "probability": 0.88232421875}, {"start": 659.15, "end": 659.89, "word": " the", "probability": 0.8701171875}, {"start": 659.89, "end": 660.53, "word": " simple", "probability": 0.84912109375}, {"start": 660.53, "end": 661.15, "word": " linear", "probability": 0.9208984375}, {"start": 661.15, "end": 661.63, "word": " regression.", "probability": 0.705078125}, {"start": 663.19, "end": 663.71, "word": " In", "probability": 0.9375}, {"start": 663.71, "end": 663.95, "word": " other", "probability": 0.896484375}, {"start": 663.95, "end": 664.35, "word": " words,", "probability": 0.8720703125}, {"start": 664.95, "end": 665.45, "word": " we", "probability": 0.94189453125}, {"start": 665.45, "end": 665.61, "word": " have", "probability": 0.9453125}, {"start": 665.61, "end": 665.77, "word": " this", "probability": 0.94091796875}, {"start": 665.77, "end": 666.21, "word": " equation,", "probability": 0.951171875}, {"start": 667.51, "end": 667.85, "word": " so", "probability": 0.9111328125}, {"start": 667.85, "end": 668.03, "word": " the", "probability": 0.8779296875}, {"start": 668.03, "end": 668.67, "word": " interpretation", "probability": 0.9169921875}, {"start": 668.67, "end": 669.13, "word": " of", "probability": 0.9384765625}, {"start": 669.13, "end": 669.51, "word": " B0", "probability": 0.659912109375}, {"start": 669.51, "end": 669.89, "word": " again.", "probability": 0.85205078125}, {"start": 671.49, "end": 671.97, "word": " B0", "probability": 0.970703125}, {"start": 671.97, "end": 672.11, "word": " is", "probability": 0.94775390625}, {"start": 672.11, "end": 672.29, "word": " the", "probability": 0.89501953125}, {"start": 672.29, "end": 672.69, "word": " estimated", "probability": 0.86962890625}, {"start": 672.69, "end": 672.93, "word": " mean", "probability": 0.96142578125}, {"start": 672.93, "end": 673.21, "word": " value", "probability": 0.9736328125}, {"start": 673.21, "end": 673.39, "word": " of", "probability": 0.96240234375}, {"start": 673.39, "end": 673.67, "word": " Y", "probability": 0.54541015625}, {"start": 673.67, "end": 674.01, "word": " when", "probability": 0.7978515625}, {"start": 674.01, "end": 674.15, "word": " the", "probability": 0.89892578125}, {"start": 674.15, "end": 674.37, "word": " value", "probability": 0.978515625}, {"start": 674.37, "end": 674.51, "word": " of", "probability": 0.94677734375}, {"start": 674.51, "end": 674.63, "word": " X", "probability": 0.94384765625}, {"start": 674.63, "end": 674.81, "word": " is", "probability": 0.92919921875}, {"start": 674.81, "end": 675.11, "word": " 0.", "probability": 0.52783203125}, {"start": 675.79, "end": 676.11, "word": " That", "probability": 0.90234375}, {"start": 676.11, "end": 676.39, "word": " means", "probability": 0.93603515625}, {"start": 676.39, "end": 676.57, "word": " if", "probability": 0.810546875}, {"start": 676.57, "end": 676.77, "word": " X", "probability": 0.9716796875}, {"start": 676.77, "end": 676.85, "word": " is", "probability": 0.736328125}, {"start": 676.85, "end": 677.15, "word": " 0,", "probability": 0.9130859375}], "temperature": 1.0}, {"id": 28, "seek": 69168, "start": 678.12, "end": 691.68, "text": " in this range of the observed X-values. That's the meaning of the B0. But again, because a house cannot have a square footage of zero, so B0 has no practical application.", "tokens": [294, 341, 3613, 295, 264, 13095, 1783, 12, 46033, 13, 663, 311, 264, 3620, 295, 264, 363, 15, 13, 583, 797, 11, 570, 257, 1782, 2644, 362, 257, 3732, 9556, 295, 4018, 11, 370, 363, 15, 575, 572, 8496, 3861, 13], "avg_logprob": -0.20535713611614137, "compression_ratio": 1.3255813953488371, "no_speech_prob": 0.0, "words": [{"start": 678.12, "end": 678.38, "word": " in", "probability": 0.2685546875}, {"start": 678.38, "end": 678.62, "word": " this", "probability": 0.92822265625}, {"start": 678.62, "end": 679.08, "word": " range", "probability": 0.8759765625}, {"start": 679.08, "end": 679.94, "word": " of", "probability": 0.87744140625}, {"start": 679.94, "end": 680.06, "word": " the", "probability": 0.71435546875}, {"start": 680.06, "end": 680.46, "word": " observed", "probability": 0.87646484375}, {"start": 680.46, "end": 680.7, "word": " X", "probability": 0.53955078125}, {"start": 680.7, "end": 681.14, "word": "-values.", "probability": 0.6773681640625}, {"start": 681.96, "end": 682.34, "word": " That's", "probability": 0.9033203125}, {"start": 682.34, "end": 682.5, "word": " the", "probability": 0.9248046875}, {"start": 682.5, "end": 682.68, "word": " meaning", "probability": 0.87841796875}, {"start": 682.68, "end": 682.9, "word": " of", "probability": 0.96728515625}, {"start": 682.9, "end": 683.1, "word": " the", "probability": 0.66357421875}, {"start": 683.1, "end": 683.78, "word": " B0.", "probability": 0.834716796875}, {"start": 683.96, "end": 684.18, "word": " But", "probability": 0.92724609375}, {"start": 684.18, "end": 684.54, "word": " again,", "probability": 0.87255859375}, {"start": 684.82, "end": 685.02, "word": " because", "probability": 0.8623046875}, {"start": 685.02, "end": 685.18, "word": " a", "probability": 0.8984375}, {"start": 685.18, "end": 685.5, "word": " house", "probability": 0.88232421875}, {"start": 685.5, "end": 685.82, "word": " cannot", "probability": 0.861328125}, {"start": 685.82, "end": 686.3, "word": " have", "probability": 0.94775390625}, {"start": 686.3, "end": 686.54, "word": " a", "probability": 0.828125}, {"start": 686.54, "end": 686.9, "word": " square", "probability": 0.955078125}, {"start": 686.9, "end": 687.44, "word": " footage", "probability": 0.9404296875}, {"start": 687.44, "end": 687.7, "word": " of", "probability": 0.9716796875}, {"start": 687.7, "end": 688.08, "word": " zero,", "probability": 0.57275390625}, {"start": 688.68, "end": 688.8, "word": " so", "probability": 0.88427734375}, {"start": 688.8, "end": 689.22, "word": " B0", "probability": 0.973876953125}, {"start": 689.22, "end": 689.54, "word": " has", "probability": 0.9462890625}, {"start": 689.54, "end": 689.78, "word": " no", "probability": 0.95263671875}, {"start": 689.78, "end": 690.32, "word": " practical", "probability": 0.93896484375}, {"start": 690.32, "end": 691.68, "word": " application.", "probability": 0.92138671875}], "temperature": 1.0}, {"id": 29, "seek": 71666, "start": 694.74, "end": 716.66, "text": " On the other hand, the interpretation for B1, B1 equals 0.10977, that means B1 again estimates the change in the mean value of Y as a result of one unit increase in X. In other words, since B1 equals 0.10977, that tells us that the mean value of a house", "tokens": [1282, 264, 661, 1011, 11, 264, 14174, 337, 363, 16, 11, 363, 16, 6915, 1958, 13, 3279, 24, 17512, 11, 300, 1355, 363, 16, 797, 20561, 264, 1319, 294, 264, 914, 2158, 295, 398, 382, 257, 1874, 295, 472, 4985, 3488, 294, 1783, 13, 682, 661, 2283, 11, 1670, 363, 16, 6915, 1958, 13, 3279, 24, 17512, 11, 300, 5112, 505, 300, 264, 914, 2158, 295, 257, 1782], "avg_logprob": -0.1305480124293894, "compression_ratio": 1.6387096774193548, "no_speech_prob": 0.0, "words": [{"start": 694.74, "end": 694.98, "word": " On", "probability": 0.81103515625}, {"start": 694.98, "end": 695.14, "word": " the", "probability": 0.92529296875}, {"start": 695.14, "end": 695.34, "word": " other", "probability": 0.8896484375}, {"start": 695.34, "end": 695.7, "word": " hand,", "probability": 0.91162109375}, {"start": 695.9, "end": 695.92, "word": " the", "probability": 0.7626953125}, {"start": 695.92, "end": 696.34, "word": " interpretation", "probability": 0.90576171875}, {"start": 696.34, "end": 697.08, "word": " for", "probability": 0.931640625}, {"start": 697.08, "end": 697.76, "word": " B1,", "probability": 0.799560546875}, {"start": 698.3, "end": 698.76, "word": " B1", "probability": 0.96337890625}, {"start": 698.76, "end": 698.96, "word": " equals", "probability": 0.252197265625}, {"start": 698.96, "end": 699.22, "word": " 0", "probability": 0.61376953125}, {"start": 699.22, "end": 700.44, "word": ".10977,", "probability": 0.96337890625}, {"start": 700.78, "end": 700.96, "word": " that", "probability": 0.9072265625}, {"start": 700.96, "end": 701.38, "word": " means", "probability": 0.91552734375}, {"start": 701.38, "end": 702.54, "word": " B1", "probability": 0.8828125}, {"start": 702.54, "end": 702.84, "word": " again", "probability": 0.89697265625}, {"start": 702.84, "end": 703.58, "word": " estimates", "probability": 0.87353515625}, {"start": 703.58, "end": 703.92, "word": " the", "probability": 0.86767578125}, {"start": 703.92, "end": 704.34, "word": " change", "probability": 0.896484375}, {"start": 704.34, "end": 704.56, "word": " in", "probability": 0.94384765625}, {"start": 704.56, "end": 704.7, "word": " the", "probability": 0.92041015625}, {"start": 704.7, "end": 704.88, "word": " mean", "probability": 0.96484375}, {"start": 704.88, "end": 705.18, "word": " value", "probability": 0.9736328125}, {"start": 705.18, "end": 705.42, "word": " of", "probability": 0.9599609375}, {"start": 705.42, "end": 705.7, "word": " Y", "probability": 0.82080078125}, {"start": 705.7, "end": 706.02, "word": " as", "probability": 0.93017578125}, {"start": 706.02, "end": 706.16, "word": " a", "probability": 0.98046875}, {"start": 706.16, "end": 706.44, "word": " result", "probability": 0.94677734375}, {"start": 706.44, "end": 706.7, "word": " of", "probability": 0.966796875}, {"start": 706.7, "end": 706.88, "word": " one", "probability": 0.8193359375}, {"start": 706.88, "end": 707.16, "word": " unit", "probability": 0.908203125}, {"start": 707.16, "end": 707.46, "word": " increase", "probability": 0.79638671875}, {"start": 707.46, "end": 707.68, "word": " in", "probability": 0.9326171875}, {"start": 707.68, "end": 707.94, "word": " X.", "probability": 0.9453125}, {"start": 709.2, "end": 709.46, "word": " In", "probability": 0.955078125}, {"start": 709.46, "end": 709.72, "word": " other", "probability": 0.89599609375}, {"start": 709.72, "end": 710.16, "word": " words,", "probability": 0.86328125}, {"start": 710.46, "end": 710.78, "word": " since", "probability": 0.88134765625}, {"start": 710.78, "end": 711.16, "word": " B1", "probability": 0.990234375}, {"start": 711.16, "end": 711.38, "word": " equals", "probability": 0.85546875}, {"start": 711.38, "end": 711.68, "word": " 0", "probability": 0.95654296875}, {"start": 711.68, "end": 712.8, "word": ".10977,", "probability": 0.965087890625}, {"start": 713.16, "end": 713.4, "word": " that", "probability": 0.93603515625}, {"start": 713.4, "end": 713.7, "word": " tells", "probability": 0.8525390625}, {"start": 713.7, "end": 714.0, "word": " us", "probability": 0.9326171875}, {"start": 714.0, "end": 714.34, "word": " that", "probability": 0.93359375}, {"start": 714.34, "end": 715.08, "word": " the", "probability": 0.8994140625}, {"start": 715.08, "end": 715.26, "word": " mean", "probability": 0.970703125}, {"start": 715.26, "end": 715.68, "word": " value", "probability": 0.97509765625}, {"start": 715.68, "end": 716.06, "word": " of", "probability": 0.96630859375}, {"start": 716.06, "end": 716.22, "word": " a", "probability": 0.97509765625}, {"start": 716.22, "end": 716.66, "word": " house", "probability": 0.8916015625}], "temperature": 1.0}, {"id": 30, "seek": 74545, "start": 718.01, "end": 745.45, "text": " Increases by this amount, multiplied by 1,000 on average for each additional one square foot of size. So that's the exact interpretation about P0 and P1. For the prediction, as I mentioned, since we have this equation, and our goal is to predict the price for a house with 2,000 square feet, just plug this value here.", "tokens": [30367, 1957, 538, 341, 2372, 11, 17207, 538, 502, 11, 1360, 322, 4274, 337, 1184, 4497, 472, 3732, 2671, 295, 2744, 13, 407, 300, 311, 264, 1900, 14174, 466, 430, 15, 293, 430, 16, 13, 1171, 264, 17630, 11, 382, 286, 2835, 11, 1670, 321, 362, 341, 5367, 11, 293, 527, 3387, 307, 281, 6069, 264, 3218, 337, 257, 1782, 365, 568, 11, 1360, 3732, 3521, 11, 445, 5452, 341, 2158, 510, 13], "avg_logprob": -0.17694256877576983, "compression_ratio": 1.519047619047619, "no_speech_prob": 0.0, "words": [{"start": 718.01, "end": 718.73, "word": " Increases", "probability": 0.630859375}, {"start": 718.73, "end": 719.13, "word": " by", "probability": 0.9658203125}, {"start": 719.13, "end": 719.47, "word": " this", "probability": 0.9443359375}, {"start": 719.47, "end": 720.07, "word": " amount,", "probability": 0.90673828125}, {"start": 720.95, "end": 721.65, "word": " multiplied", "probability": 0.4091796875}, {"start": 721.65, "end": 722.03, "word": " by", "probability": 0.9775390625}, {"start": 722.03, "end": 722.41, "word": " 1", "probability": 0.293701171875}, {"start": 722.41, "end": 722.47, "word": ",000", "probability": 0.9912109375}, {"start": 722.47, "end": 723.67, "word": " on", "probability": 0.481689453125}, {"start": 723.67, "end": 724.07, "word": " average", "probability": 0.79833984375}, {"start": 724.07, "end": 724.31, "word": " for", "probability": 0.81494140625}, {"start": 724.31, "end": 724.59, "word": " each", "probability": 0.951171875}, {"start": 724.59, "end": 725.07, "word": " additional", "probability": 0.89501953125}, {"start": 725.07, "end": 725.31, "word": " one", "probability": 0.751953125}, {"start": 725.31, "end": 725.73, "word": " square", "probability": 0.86767578125}, {"start": 725.73, "end": 726.15, "word": " foot", "probability": 0.890625}, {"start": 726.15, "end": 726.33, "word": " of", "probability": 0.837890625}, {"start": 726.33, "end": 726.67, "word": " size.", "probability": 0.8154296875}, {"start": 727.19, "end": 727.41, "word": " So", "probability": 0.95947265625}, {"start": 727.41, "end": 727.77, "word": " that's", "probability": 0.90380859375}, {"start": 727.77, "end": 728.05, "word": " the", "probability": 0.91796875}, {"start": 728.05, "end": 728.49, "word": " exact", "probability": 0.94140625}, {"start": 728.49, "end": 729.69, "word": " interpretation", "probability": 0.8798828125}, {"start": 729.69, "end": 730.11, "word": " about", "probability": 0.8876953125}, {"start": 730.11, "end": 730.61, "word": " P0", "probability": 0.754638671875}, {"start": 730.61, "end": 730.83, "word": " and", "probability": 0.93994140625}, {"start": 730.83, "end": 731.21, "word": " P1.", "probability": 0.972412109375}, {"start": 732.65, "end": 733.19, "word": " For", "probability": 0.96533203125}, {"start": 733.19, "end": 733.35, "word": " the", "probability": 0.91845703125}, {"start": 733.35, "end": 733.73, "word": " prediction,", "probability": 0.91455078125}, {"start": 734.33, "end": 734.49, "word": " as", "probability": 0.96435546875}, {"start": 734.49, "end": 734.63, "word": " I", "probability": 0.984375}, {"start": 734.63, "end": 735.03, "word": " mentioned,", "probability": 0.84716796875}, {"start": 735.33, "end": 735.51, "word": " since", "probability": 0.876953125}, {"start": 735.51, "end": 736.43, "word": " we", "probability": 0.90966796875}, {"start": 736.43, "end": 736.59, "word": " have", "probability": 0.947265625}, {"start": 736.59, "end": 736.83, "word": " this", "probability": 0.94970703125}, {"start": 736.83, "end": 737.33, "word": " equation,", "probability": 0.96923828125}, {"start": 737.97, "end": 738.21, "word": " and", "probability": 0.93408203125}, {"start": 738.21, "end": 738.43, "word": " our", "probability": 0.87890625}, {"start": 738.43, "end": 738.65, "word": " goal", "probability": 0.97216796875}, {"start": 738.65, "end": 738.83, "word": " is", "probability": 0.94482421875}, {"start": 738.83, "end": 738.97, "word": " to", "probability": 0.96875}, {"start": 738.97, "end": 739.37, "word": " predict", "probability": 0.9189453125}, {"start": 739.37, "end": 739.61, "word": " the", "probability": 0.91455078125}, {"start": 739.61, "end": 739.99, "word": " price", "probability": 0.94580078125}, {"start": 739.99, "end": 740.23, "word": " for", "probability": 0.81591796875}, {"start": 740.23, "end": 740.39, "word": " a", "probability": 0.9326171875}, {"start": 740.39, "end": 740.63, "word": " house", "probability": 0.88623046875}, {"start": 740.63, "end": 740.93, "word": " with", "probability": 0.8916015625}, {"start": 740.93, "end": 741.53, "word": " 2", "probability": 0.91650390625}, {"start": 741.53, "end": 742.01, "word": ",000", "probability": 0.995849609375}, {"start": 742.01, "end": 742.93, "word": " square", "probability": 0.904296875}, {"start": 742.93, "end": 743.29, "word": " feet,", "probability": 0.96630859375}, {"start": 743.85, "end": 744.11, "word": " just", "probability": 0.9169921875}, {"start": 744.11, "end": 744.41, "word": " plug", "probability": 0.48779296875}, {"start": 744.41, "end": 744.73, "word": " this", "probability": 0.94580078125}, {"start": 744.73, "end": 745.09, "word": " value", "probability": 0.9697265625}, {"start": 745.09, "end": 745.45, "word": " here.", "probability": 0.845703125}], "temperature": 1.0}, {"id": 31, "seek": 76937, "start": 746.45, "end": 769.37, "text": " Multiply this value by 0.1098, then add the result to 98.25 will give 317.85. This value should be multiplied by 1000, so the predicted price for a house with 2000 square feet is around 317,850 dollars.", "tokens": [31150, 356, 341, 2158, 538, 1958, 13, 3279, 22516, 11, 550, 909, 264, 1874, 281, 20860, 13, 6074, 486, 976, 805, 7773, 13, 19287, 13, 639, 2158, 820, 312, 17207, 538, 9714, 11, 370, 264, 19147, 3218, 337, 257, 1782, 365, 8132, 3732, 3521, 307, 926, 805, 7773, 11, 23, 2803, 3808, 13], "avg_logprob": -0.19444444002928557, "compression_ratio": 1.326797385620915, "no_speech_prob": 0.0, "words": [{"start": 746.45, "end": 747.03, "word": " Multiply", "probability": 0.6953125}, {"start": 747.03, "end": 747.25, "word": " this", "probability": 0.91162109375}, {"start": 747.25, "end": 747.47, "word": " value", "probability": 0.95166015625}, {"start": 747.47, "end": 747.69, "word": " by", "probability": 0.966796875}, {"start": 747.69, "end": 748.11, "word": " 0", "probability": 0.65234375}, {"start": 748.11, "end": 748.89, "word": ".1098,", "probability": 0.9661458333333334}, {"start": 749.53, "end": 749.99, "word": " then", "probability": 0.81591796875}, {"start": 749.99, "end": 750.53, "word": " add", "probability": 0.88916015625}, {"start": 750.53, "end": 750.77, "word": " the", "probability": 0.880859375}, {"start": 750.77, "end": 751.13, "word": " result", "probability": 0.88330078125}, {"start": 751.13, "end": 751.59, "word": " to", "probability": 0.9248046875}, {"start": 751.59, "end": 752.07, "word": " 98", "probability": 0.81591796875}, {"start": 752.07, "end": 753.09, "word": ".25", "probability": 0.984375}, {"start": 753.09, "end": 753.65, "word": " will", "probability": 0.358642578125}, {"start": 753.65, "end": 753.91, "word": " give", "probability": 0.8310546875}, {"start": 753.91, "end": 755.41, "word": " 317", "probability": 0.8095703125}, {"start": 755.41, "end": 756.03, "word": ".85.", "probability": 0.99267578125}, {"start": 756.51, "end": 757.13, "word": " This", "probability": 0.86474609375}, {"start": 757.13, "end": 757.41, "word": " value", "probability": 0.98046875}, {"start": 757.41, "end": 757.59, "word": " should", "probability": 0.9609375}, {"start": 757.59, "end": 757.75, "word": " be", "probability": 0.939453125}, {"start": 757.75, "end": 758.09, "word": " multiplied", "probability": 0.65234375}, {"start": 758.09, "end": 758.37, "word": " by", "probability": 0.9765625}, {"start": 758.37, "end": 758.79, "word": " 1000,", "probability": 0.7080078125}, {"start": 759.83, "end": 760.09, "word": " so", "probability": 0.92724609375}, {"start": 760.09, "end": 760.29, "word": " the", "probability": 0.90771484375}, {"start": 760.29, "end": 760.69, "word": " predicted", "probability": 0.84765625}, {"start": 760.69, "end": 761.19, "word": " price", "probability": 0.91748046875}, {"start": 761.19, "end": 761.43, "word": " for", "probability": 0.931640625}, {"start": 761.43, "end": 761.59, "word": " a", "probability": 0.96435546875}, {"start": 761.59, "end": 761.83, "word": " house", "probability": 0.88720703125}, {"start": 761.83, "end": 762.13, "word": " with", "probability": 0.9072265625}, {"start": 762.13, "end": 762.65, "word": " 2000", "probability": 0.841796875}, {"start": 762.65, "end": 763.09, "word": " square", "probability": 0.64599609375}, {"start": 763.09, "end": 763.51, "word": " feet", "probability": 0.95361328125}, {"start": 763.51, "end": 764.67, "word": " is", "probability": 0.91162109375}, {"start": 764.67, "end": 765.25, "word": " around", "probability": 0.9169921875}, {"start": 765.25, "end": 767.11, "word": " 317", "probability": 0.869384765625}, {"start": 767.11, "end": 769.05, "word": ",850", "probability": 0.8043619791666666}, {"start": 769.05, "end": 769.37, "word": " dollars.", "probability": 0.6689453125}], "temperature": 1.0}, {"id": 32, "seek": 79421, "start": 770.31, "end": 794.21, "text": " That's for making the prediction for selling a price. The last section in chapter 12 talks about coefficient of determination R squared. The definition for the coefficient of determination is the portion", "tokens": [663, 311, 337, 1455, 264, 17630, 337, 6511, 257, 3218, 13, 440, 1036, 3541, 294, 7187, 2272, 6686, 466, 17619, 295, 18432, 497, 8889, 13, 440, 7123, 337, 264, 17619, 295, 18432, 307, 264, 8044], "avg_logprob": -0.18901909184124735, "compression_ratio": 1.59375, "no_speech_prob": 0.0, "words": [{"start": 770.31, "end": 770.93, "word": " That's", "probability": 0.814453125}, {"start": 770.93, "end": 771.27, "word": " for", "probability": 0.91064453125}, {"start": 771.27, "end": 772.07, "word": " making", "probability": 0.5908203125}, {"start": 772.07, "end": 772.93, "word": " the", "probability": 0.65185546875}, {"start": 772.93, "end": 773.45, "word": " prediction", "probability": 0.91650390625}, {"start": 773.45, "end": 774.91, "word": " for", "probability": 0.9228515625}, {"start": 774.91, "end": 776.57, "word": " selling", "probability": 0.8173828125}, {"start": 776.57, "end": 776.73, "word": " a", "probability": 0.68310546875}, {"start": 776.73, "end": 777.03, "word": " price.", "probability": 0.88232421875}, {"start": 778.29, "end": 778.85, "word": " The", "probability": 0.888671875}, {"start": 778.85, "end": 779.23, "word": " last", "probability": 0.88671875}, {"start": 779.23, "end": 780.43, "word": " section", "probability": 0.86865234375}, {"start": 780.43, "end": 781.17, "word": " in", "probability": 0.88232421875}, {"start": 781.17, "end": 781.47, "word": " chapter", "probability": 0.61181640625}, {"start": 781.47, "end": 782.05, "word": " 12", "probability": 0.80126953125}, {"start": 782.05, "end": 783.31, "word": " talks", "probability": 0.7490234375}, {"start": 783.31, "end": 784.03, "word": " about", "probability": 0.90576171875}, {"start": 784.03, "end": 786.33, "word": " coefficient", "probability": 0.77978515625}, {"start": 786.33, "end": 786.69, "word": " of", "probability": 0.94580078125}, {"start": 786.69, "end": 787.27, "word": " determination", "probability": 0.93505859375}, {"start": 787.27, "end": 787.55, "word": " R", "probability": 0.6572265625}, {"start": 787.55, "end": 787.87, "word": " squared.", "probability": 0.78759765625}, {"start": 789.53, "end": 789.97, "word": " The", "probability": 0.90380859375}, {"start": 789.97, "end": 790.43, "word": " definition", "probability": 0.9384765625}, {"start": 790.43, "end": 790.71, "word": " for", "probability": 0.90673828125}, {"start": 790.71, "end": 790.87, "word": " the", "probability": 0.90234375}, {"start": 790.87, "end": 791.29, "word": " coefficient", "probability": 0.953125}, {"start": 791.29, "end": 791.55, "word": " of", "probability": 0.95166015625}, {"start": 791.55, "end": 792.09, "word": " determination", "probability": 0.9453125}, {"start": 792.09, "end": 793.53, "word": " is", "probability": 0.9091796875}, {"start": 793.53, "end": 793.69, "word": " the", "probability": 0.92431640625}, {"start": 793.69, "end": 794.21, "word": " portion", "probability": 0.89306640625}], "temperature": 1.0}, {"id": 33, "seek": 81819, "start": 795.45, "end": 818.19, "text": " of the total variation in the dependent variable that is explained by the variation in the independent variable. Since we have two variables X and Y. And the question is, what's the portion of the total variation that can be explained by X?", "tokens": [295, 264, 3217, 12990, 294, 264, 12334, 7006, 300, 307, 8825, 538, 264, 12990, 294, 264, 6695, 7006, 13, 4162, 321, 362, 732, 9102, 1783, 293, 398, 13, 400, 264, 1168, 307, 11, 437, 311, 264, 8044, 295, 264, 3217, 12990, 300, 393, 312, 8825, 538, 1783, 30], "avg_logprob": -0.2021683624812535, "compression_ratio": 1.7851851851851852, "no_speech_prob": 0.0, "words": [{"start": 795.45, "end": 795.75, "word": " of", "probability": 0.30419921875}, {"start": 795.75, "end": 795.91, "word": " the", "probability": 0.9072265625}, {"start": 795.91, "end": 796.19, "word": " total", "probability": 0.89404296875}, {"start": 796.19, "end": 796.73, "word": " variation", "probability": 0.90869140625}, {"start": 796.73, "end": 797.07, "word": " in", "probability": 0.8642578125}, {"start": 797.07, "end": 797.19, "word": " the", "probability": 0.81640625}, {"start": 797.19, "end": 797.51, "word": " dependent", "probability": 0.52490234375}, {"start": 797.51, "end": 798.05, "word": " variable", "probability": 0.9306640625}, {"start": 798.05, "end": 799.17, "word": " that", "probability": 0.52880859375}, {"start": 799.17, "end": 799.33, "word": " is", "probability": 0.6611328125}, {"start": 799.33, "end": 799.75, "word": " explained", "probability": 0.75341796875}, {"start": 799.75, "end": 800.17, "word": " by", "probability": 0.95849609375}, {"start": 800.17, "end": 800.35, "word": " the", "probability": 0.888671875}, {"start": 800.35, "end": 800.81, "word": " variation", "probability": 0.90185546875}, {"start": 800.81, "end": 801.21, "word": " in", "probability": 0.9296875}, {"start": 801.21, "end": 801.33, "word": " the", "probability": 0.9169921875}, {"start": 801.33, "end": 801.73, "word": " independent", "probability": 0.8935546875}, {"start": 801.73, "end": 802.19, "word": " variable.", "probability": 0.8994140625}, {"start": 802.89, "end": 803.29, "word": " Since", "probability": 0.7763671875}, {"start": 803.29, "end": 803.47, "word": " we", "probability": 0.9228515625}, {"start": 803.47, "end": 803.67, "word": " have", "probability": 0.9501953125}, {"start": 803.67, "end": 803.89, "word": " two", "probability": 0.8779296875}, {"start": 803.89, "end": 804.33, "word": " variables", "probability": 0.931640625}, {"start": 804.33, "end": 804.61, "word": " X", "probability": 0.357177734375}, {"start": 804.61, "end": 804.81, "word": " and", "probability": 0.9375}, {"start": 804.81, "end": 805.13, "word": " Y.", "probability": 0.994140625}, {"start": 809.51, "end": 809.99, "word": " And", "probability": 0.82568359375}, {"start": 809.99, "end": 810.09, "word": " the", "probability": 0.90380859375}, {"start": 810.09, "end": 810.37, "word": " question", "probability": 0.92919921875}, {"start": 810.37, "end": 810.75, "word": " is,", "probability": 0.9482421875}, {"start": 811.81, "end": 812.23, "word": " what's", "probability": 0.86376953125}, {"start": 812.23, "end": 812.43, "word": " the", "probability": 0.92431640625}, {"start": 812.43, "end": 812.95, "word": " portion", "probability": 0.888671875}, {"start": 812.95, "end": 814.35, "word": " of", "probability": 0.96337890625}, {"start": 814.35, "end": 814.49, "word": " the", "probability": 0.9140625}, {"start": 814.49, "end": 814.75, "word": " total", "probability": 0.8759765625}, {"start": 814.75, "end": 815.15, "word": " variation", "probability": 0.880859375}, {"start": 815.15, "end": 815.69, "word": " that", "probability": 0.93603515625}, {"start": 815.69, "end": 815.95, "word": " can", "probability": 0.939453125}, {"start": 815.95, "end": 816.77, "word": " be", "probability": 0.9560546875}, {"start": 816.77, "end": 817.51, "word": " explained", "probability": 0.78173828125}, {"start": 817.51, "end": 817.83, "word": " by", "probability": 0.96923828125}, {"start": 817.83, "end": 818.19, "word": " X?", "probability": 0.966796875}], "temperature": 1.0}, {"id": 34, "seek": 84343, "start": 819.09, "end": 843.43, "text": " So the question is, what's the portion of the total variation in Y that is explained already by the variation in X? For example, suppose R² is 90%, 0.90. That means 90% in the variation of the selling price is explained by its size.", "tokens": [407, 264, 1168, 307, 11, 437, 311, 264, 8044, 295, 264, 3217, 12990, 294, 398, 300, 307, 8825, 1217, 538, 264, 12990, 294, 1783, 30, 1171, 1365, 11, 7297, 497, 27643, 307, 4289, 8923, 1958, 13, 7771, 13, 663, 1355, 4289, 4, 294, 264, 12990, 295, 264, 6511, 3218, 307, 8825, 538, 1080, 2744, 13], "avg_logprob": -0.19405691751411983, "compression_ratio": 1.5, "no_speech_prob": 0.0, "words": [{"start": 819.09, "end": 819.39, "word": " So", "probability": 0.7080078125}, {"start": 819.39, "end": 819.53, "word": " the", "probability": 0.666015625}, {"start": 819.53, "end": 819.83, "word": " question", "probability": 0.9111328125}, {"start": 819.83, "end": 820.15, "word": " is,", "probability": 0.9501953125}, {"start": 820.63, "end": 820.81, "word": " what's", "probability": 0.8095703125}, {"start": 820.81, "end": 821.01, "word": " the", "probability": 0.92626953125}, {"start": 821.01, "end": 821.33, "word": " portion", "probability": 0.8154296875}, {"start": 821.33, "end": 821.65, "word": " of", "probability": 0.96875}, {"start": 821.65, "end": 821.79, "word": " the", "probability": 0.84912109375}, {"start": 821.79, "end": 822.03, "word": " total", "probability": 0.86865234375}, {"start": 822.03, "end": 822.51, "word": " variation", "probability": 0.87353515625}, {"start": 822.51, "end": 822.77, "word": " in", "probability": 0.8310546875}, {"start": 822.77, "end": 823.05, "word": " Y", "probability": 0.7080078125}, {"start": 823.05, "end": 823.93, "word": " that", "probability": 0.81884765625}, {"start": 823.93, "end": 824.15, "word": " is", "probability": 0.88037109375}, {"start": 824.15, "end": 824.53, "word": " explained", "probability": 0.88623046875}, {"start": 824.53, "end": 825.23, "word": " already", "probability": 0.8935546875}, {"start": 825.23, "end": 825.89, "word": " by", "probability": 0.95263671875}, {"start": 825.89, "end": 826.07, "word": " the", "probability": 0.88671875}, {"start": 826.07, "end": 826.45, "word": " variation", "probability": 0.86376953125}, {"start": 826.45, "end": 826.67, "word": " in", "probability": 0.923828125}, {"start": 826.67, "end": 827.01, "word": " X?", "probability": 0.96923828125}, {"start": 828.27, "end": 828.71, "word": " For", "probability": 0.9150390625}, {"start": 828.71, "end": 829.05, "word": " example,", "probability": 0.96630859375}, {"start": 829.25, "end": 829.87, "word": " suppose", "probability": 0.861328125}, {"start": 829.87, "end": 831.59, "word": " R²", "probability": 0.47613525390625}, {"start": 831.59, "end": 832.75, "word": " is", "probability": 0.8076171875}, {"start": 832.75, "end": 834.03, "word": " 90%,", "probability": 0.6419677734375}, {"start": 834.03, "end": 834.45, "word": " 0", "probability": 0.537109375}, {"start": 834.45, "end": 834.89, "word": ".90.", "probability": 0.98095703125}, {"start": 835.55, "end": 836.31, "word": " That", "probability": 0.90185546875}, {"start": 836.31, "end": 836.71, "word": " means", "probability": 0.9296875}, {"start": 836.71, "end": 837.81, "word": " 90", "probability": 0.8193359375}, {"start": 837.81, "end": 838.13, "word": "%", "probability": 0.97119140625}, {"start": 838.13, "end": 838.91, "word": " in", "probability": 0.87939453125}, {"start": 838.91, "end": 839.05, "word": " the", "probability": 0.916015625}, {"start": 839.05, "end": 839.43, "word": " variation", "probability": 0.91064453125}, {"start": 839.43, "end": 839.63, "word": " of", "probability": 0.94775390625}, {"start": 839.63, "end": 839.77, "word": " the", "probability": 0.8251953125}, {"start": 839.77, "end": 840.01, "word": " selling", "probability": 0.87109375}, {"start": 840.01, "end": 840.61, "word": " price", "probability": 0.919921875}, {"start": 840.61, "end": 841.87, "word": " is", "probability": 0.91162109375}, {"start": 841.87, "end": 842.27, "word": " explained", "probability": 0.8408203125}, {"start": 842.27, "end": 842.73, "word": " by", "probability": 0.96923828125}, {"start": 842.73, "end": 842.99, "word": " its", "probability": 0.83056640625}, {"start": 842.99, "end": 843.43, "word": " size.", "probability": 0.845703125}], "temperature": 1.0}, {"id": 35, "seek": 87110, "start": 844.86, "end": 871.1, "text": " That means the size of the house contributes about 90% to explain the variability of the selling price. So we would like to have R squared to be large enough. Now, R squared for simple regression only is given by this equation, correlation between X and Y squared.", "tokens": [663, 1355, 264, 2744, 295, 264, 1782, 32035, 466, 4289, 4, 281, 2903, 264, 35709, 295, 264, 6511, 3218, 13, 407, 321, 576, 411, 281, 362, 497, 8889, 281, 312, 2416, 1547, 13, 823, 11, 497, 8889, 337, 2199, 24590, 787, 307, 2212, 538, 341, 5367, 11, 20009, 1296, 1783, 293, 398, 8889, 13], "avg_logprob": -0.13572443344376303, "compression_ratio": 1.440217391304348, "no_speech_prob": 0.0, "words": [{"start": 844.86, "end": 845.26, "word": " That", "probability": 0.873046875}, {"start": 845.26, "end": 845.7, "word": " means", "probability": 0.92626953125}, {"start": 845.7, "end": 846.24, "word": " the", "probability": 0.78173828125}, {"start": 846.24, "end": 846.6, "word": " size", "probability": 0.83154296875}, {"start": 846.6, "end": 846.78, "word": " of", "probability": 0.9697265625}, {"start": 846.78, "end": 846.9, "word": " the", "probability": 0.92236328125}, {"start": 846.9, "end": 847.32, "word": " house", "probability": 0.88134765625}, {"start": 847.32, "end": 848.36, "word": " contributes", "probability": 0.93408203125}, {"start": 848.36, "end": 848.9, "word": " about", "probability": 0.904296875}, {"start": 848.9, "end": 849.86, "word": " 90", "probability": 0.95947265625}, {"start": 849.86, "end": 850.52, "word": "%", "probability": 0.912109375}, {"start": 850.52, "end": 852.58, "word": " to", "probability": 0.402587890625}, {"start": 852.58, "end": 853.16, "word": " explain", "probability": 0.92578125}, {"start": 853.16, "end": 853.48, "word": " the", "probability": 0.90869140625}, {"start": 853.48, "end": 853.96, "word": " variability", "probability": 0.9638671875}, {"start": 853.96, "end": 854.5, "word": " of", "probability": 0.96728515625}, {"start": 854.5, "end": 854.7, "word": " the", "probability": 0.91552734375}, {"start": 854.7, "end": 855.16, "word": " selling", "probability": 0.8076171875}, {"start": 855.16, "end": 855.64, "word": " price.", "probability": 0.91650390625}, {"start": 857.22, "end": 857.7, "word": " So", "probability": 0.96728515625}, {"start": 857.7, "end": 857.98, "word": " we", "probability": 0.8212890625}, {"start": 857.98, "end": 858.24, "word": " would", "probability": 0.91357421875}, {"start": 858.24, "end": 858.5, "word": " like", "probability": 0.93603515625}, {"start": 858.5, "end": 858.84, "word": " to", "probability": 0.9658203125}, {"start": 858.84, "end": 859.3, "word": " have", "probability": 0.9404296875}, {"start": 859.3, "end": 859.56, "word": " R", "probability": 0.71044921875}, {"start": 859.56, "end": 859.78, "word": " squared", "probability": 0.75634765625}, {"start": 859.78, "end": 860.02, "word": " to", "probability": 0.9365234375}, {"start": 860.02, "end": 860.14, "word": " be", "probability": 0.95166015625}, {"start": 860.14, "end": 860.46, "word": " large", "probability": 0.96484375}, {"start": 860.46, "end": 860.84, "word": " enough.", "probability": 0.869140625}, {"start": 862.46, "end": 862.76, "word": " Now,", "probability": 0.94140625}, {"start": 863.0, "end": 863.3, "word": " R", "probability": 0.97802734375}, {"start": 863.3, "end": 863.72, "word": " squared", "probability": 0.83740234375}, {"start": 863.72, "end": 864.92, "word": " for", "probability": 0.8076171875}, {"start": 864.92, "end": 865.5, "word": " simple", "probability": 0.89599609375}, {"start": 865.5, "end": 866.02, "word": " regression", "probability": 0.95068359375}, {"start": 866.02, "end": 866.62, "word": " only", "probability": 0.8994140625}, {"start": 866.62, "end": 867.78, "word": " is", "probability": 0.74560546875}, {"start": 867.78, "end": 868.02, "word": " given", "probability": 0.8984375}, {"start": 868.02, "end": 868.24, "word": " by", "probability": 0.96875}, {"start": 868.24, "end": 868.46, "word": " this", "probability": 0.9111328125}, {"start": 868.46, "end": 868.94, "word": " equation,", "probability": 0.962890625}, {"start": 869.1, "end": 869.54, "word": " correlation", "probability": 0.86572265625}, {"start": 869.54, "end": 869.98, "word": " between", "probability": 0.90380859375}, {"start": 869.98, "end": 870.2, "word": " X", "probability": 0.53271484375}, {"start": 870.2, "end": 870.38, "word": " and", "probability": 0.94384765625}, {"start": 870.38, "end": 870.64, "word": " Y", "probability": 0.99462890625}, {"start": 870.64, "end": 871.1, "word": " squared.", "probability": 0.86279296875}], "temperature": 1.0}, {"id": 36, "seek": 90051, "start": 874.09, "end": 900.51, "text": " So if we have the correlation between X and Y and then you just square this value, that will give the correlation or the coefficient of determination. So simply, determination coefficient is just the square of the correlation between X and Y. We know that R ranges between minus 1 and plus 1. So R squared", "tokens": [407, 498, 321, 362, 264, 20009, 1296, 1783, 293, 398, 293, 550, 291, 445, 3732, 341, 2158, 11, 300, 486, 976, 264, 20009, 420, 264, 17619, 295, 18432, 13, 407, 2935, 11, 18432, 17619, 307, 445, 264, 3732, 295, 264, 20009, 1296, 1783, 293, 398, 13, 492, 458, 300, 497, 22526, 1296, 3175, 502, 293, 1804, 502, 13, 407, 497, 8889], "avg_logprob": -0.1668346764579896, "compression_ratio": 1.8323353293413174, "no_speech_prob": 0.0, "words": [{"start": 874.09, "end": 874.35, "word": " So", "probability": 0.87646484375}, {"start": 874.35, "end": 874.55, "word": " if", "probability": 0.78759765625}, {"start": 874.55, "end": 874.67, "word": " we", "probability": 0.5244140625}, {"start": 874.67, "end": 874.83, "word": " have", "probability": 0.9423828125}, {"start": 874.83, "end": 874.97, "word": " the", "probability": 0.84228515625}, {"start": 874.97, "end": 875.39, "word": " correlation", "probability": 0.9013671875}, {"start": 875.39, "end": 875.69, "word": " between", "probability": 0.88232421875}, {"start": 875.69, "end": 875.87, "word": " X", "probability": 0.473876953125}, {"start": 875.87, "end": 876.03, "word": " and", "probability": 0.9404296875}, {"start": 876.03, "end": 876.29, "word": " Y", "probability": 0.99169921875}, {"start": 876.29, "end": 876.51, "word": " and", "probability": 0.494140625}, {"start": 876.51, "end": 876.73, "word": " then", "probability": 0.8203125}, {"start": 876.73, "end": 877.19, "word": " you", "probability": 0.91064453125}, {"start": 877.19, "end": 877.59, "word": " just", "probability": 0.90478515625}, {"start": 877.59, "end": 878.09, "word": " square", "probability": 0.91015625}, {"start": 878.09, "end": 878.41, "word": " this", "probability": 0.947265625}, {"start": 878.41, "end": 878.83, "word": " value,", "probability": 0.97265625}, {"start": 879.21, "end": 879.65, "word": " that", "probability": 0.93310546875}, {"start": 879.65, "end": 879.85, "word": " will", "probability": 0.892578125}, {"start": 879.85, "end": 880.07, "word": " give", "probability": 0.8837890625}, {"start": 880.07, "end": 880.25, "word": " the", "probability": 0.90185546875}, {"start": 880.25, "end": 880.71, "word": " correlation", "probability": 0.904296875}, {"start": 880.71, "end": 881.57, "word": " or", "probability": 0.845703125}, {"start": 881.57, "end": 881.75, "word": " the", "probability": 0.91357421875}, {"start": 881.75, "end": 882.17, "word": " coefficient", "probability": 0.91796875}, {"start": 882.17, "end": 882.37, "word": " of", "probability": 0.96142578125}, {"start": 882.37, "end": 882.87, "word": " determination.", "probability": 0.921875}, {"start": 883.77, "end": 883.91, "word": " So", "probability": 0.931640625}, {"start": 883.91, "end": 884.33, "word": " simply,", "probability": 0.80322265625}, {"start": 885.17, "end": 885.73, "word": " determination", "probability": 0.80322265625}, {"start": 885.73, "end": 886.33, "word": " coefficient", "probability": 0.95166015625}, {"start": 886.33, "end": 886.81, "word": " is", "probability": 0.94921875}, {"start": 886.81, "end": 887.15, "word": " just", "probability": 0.91796875}, {"start": 887.15, "end": 887.45, "word": " the", "probability": 0.9208984375}, {"start": 887.45, "end": 887.91, "word": " square", "probability": 0.90625}, {"start": 887.91, "end": 888.51, "word": " of", "probability": 0.97509765625}, {"start": 888.51, "end": 888.83, "word": " the", "probability": 0.9169921875}, {"start": 888.83, "end": 889.51, "word": " correlation", "probability": 0.89697265625}, {"start": 889.51, "end": 889.85, "word": " between", "probability": 0.89501953125}, {"start": 889.85, "end": 890.03, "word": " X", "probability": 0.98291015625}, {"start": 890.03, "end": 890.19, "word": " and", "probability": 0.947265625}, {"start": 890.19, "end": 890.49, "word": " Y.", "probability": 0.9970703125}, {"start": 892.09, "end": 892.33, "word": " We", "probability": 0.94873046875}, {"start": 892.33, "end": 892.51, "word": " know", "probability": 0.8798828125}, {"start": 892.51, "end": 892.79, "word": " that", "probability": 0.93359375}, {"start": 892.79, "end": 893.19, "word": " R", "probability": 0.9140625}, {"start": 893.19, "end": 894.13, "word": " ranges", "probability": 0.8583984375}, {"start": 894.13, "end": 894.43, "word": " between", "probability": 0.876953125}, {"start": 894.43, "end": 894.73, "word": " minus", "probability": 0.71142578125}, {"start": 894.73, "end": 894.93, "word": " 1", "probability": 0.4755859375}, {"start": 894.93, "end": 895.07, "word": " and", "probability": 0.93603515625}, {"start": 895.07, "end": 895.37, "word": " plus", "probability": 0.9580078125}, {"start": 895.37, "end": 895.67, "word": " 1.", "probability": 0.98388671875}, {"start": 899.15, "end": 899.75, "word": " So", "probability": 0.939453125}, {"start": 899.75, "end": 900.05, "word": " R", "probability": 0.8486328125}, {"start": 900.05, "end": 900.51, "word": " squared", "probability": 0.63037109375}], "temperature": 1.0}, {"id": 37, "seek": 92865, "start": 902.51, "end": 928.65, "text": " should be ranges between 0 and 1, because minus sign will be cancelled since we are squaring these values, so r squared is always between 0 and 1. So again, r squared is used to explain the portion of the total variability in the dependent variable that is already explained by the variability in x. For example,", "tokens": [820, 312, 22526, 1296, 1958, 293, 502, 11, 570, 3175, 1465, 486, 312, 25103, 1670, 321, 366, 2339, 1921, 613, 4190, 11, 370, 367, 8889, 307, 1009, 1296, 1958, 293, 502, 13, 407, 797, 11, 367, 8889, 307, 1143, 281, 2903, 264, 8044, 295, 264, 3217, 35709, 294, 264, 12334, 7006, 300, 307, 1217, 8825, 538, 264, 35709, 294, 2031, 13, 1171, 1365, 11], "avg_logprob": -0.17548076923076922, "compression_ratio": 1.701086956521739, "no_speech_prob": 0.0, "words": [{"start": 902.51, "end": 902.83, "word": " should", "probability": 0.82275390625}, {"start": 902.83, "end": 903.15, "word": " be", "probability": 0.91845703125}, {"start": 903.15, "end": 904.13, "word": " ranges", "probability": 0.83642578125}, {"start": 904.13, "end": 904.63, "word": " between", "probability": 0.8955078125}, {"start": 904.63, "end": 905.19, "word": " 0", "probability": 0.7646484375}, {"start": 905.19, "end": 905.35, "word": " and", "probability": 0.9033203125}, {"start": 905.35, "end": 905.59, "word": " 1,", "probability": 0.9736328125}, {"start": 906.05, "end": 906.43, "word": " because", "probability": 0.88330078125}, {"start": 906.43, "end": 907.23, "word": " minus", "probability": 0.83837890625}, {"start": 907.23, "end": 907.69, "word": " sign", "probability": 0.8603515625}, {"start": 907.69, "end": 907.97, "word": " will", "probability": 0.86767578125}, {"start": 907.97, "end": 908.35, "word": " be", "probability": 0.8779296875}, {"start": 908.35, "end": 908.77, "word": " cancelled", "probability": 0.748046875}, {"start": 908.77, "end": 909.55, "word": " since", "probability": 0.63232421875}, {"start": 909.55, "end": 909.69, "word": " we", "probability": 0.94580078125}, {"start": 909.69, "end": 909.83, "word": " are", "probability": 0.93505859375}, {"start": 909.83, "end": 910.25, "word": " squaring", "probability": 0.970703125}, {"start": 910.25, "end": 910.53, "word": " these", "probability": 0.8173828125}, {"start": 910.53, "end": 910.95, "word": " values,", "probability": 0.95458984375}, {"start": 911.39, "end": 911.53, "word": " so", "probability": 0.93310546875}, {"start": 911.53, "end": 911.81, "word": " r", "probability": 0.35693359375}, {"start": 911.81, "end": 912.09, "word": " squared", "probability": 0.373291015625}, {"start": 912.09, "end": 912.35, "word": " is", "probability": 0.9384765625}, {"start": 912.35, "end": 912.77, "word": " always", "probability": 0.912109375}, {"start": 912.77, "end": 913.53, "word": " between", "probability": 0.91015625}, {"start": 913.53, "end": 914.13, "word": " 0", "probability": 0.9287109375}, {"start": 914.13, "end": 914.31, "word": " and", "probability": 0.93896484375}, {"start": 914.31, "end": 914.53, "word": " 1.", "probability": 0.98193359375}, {"start": 915.09, "end": 915.31, "word": " So", "probability": 0.8955078125}, {"start": 915.31, "end": 915.61, "word": " again,", "probability": 0.8173828125}, {"start": 916.05, "end": 916.33, "word": " r", "probability": 0.826171875}, {"start": 916.33, "end": 916.67, "word": " squared", "probability": 0.771484375}, {"start": 916.67, "end": 916.93, "word": " is", "probability": 0.9501953125}, {"start": 916.93, "end": 917.29, "word": " used", "probability": 0.91552734375}, {"start": 917.29, "end": 917.69, "word": " to", "probability": 0.966796875}, {"start": 917.69, "end": 918.33, "word": " explain", "probability": 0.86279296875}, {"start": 918.33, "end": 919.13, "word": " the", "probability": 0.91015625}, {"start": 919.13, "end": 919.57, "word": " portion", "probability": 0.8876953125}, {"start": 919.57, "end": 920.13, "word": " of", "probability": 0.9677734375}, {"start": 920.13, "end": 920.33, "word": " the", "probability": 0.89697265625}, {"start": 920.33, "end": 920.61, "word": " total", "probability": 0.87060546875}, {"start": 920.61, "end": 921.21, "word": " variability", "probability": 0.9658203125}, {"start": 921.21, "end": 922.43, "word": " in", "probability": 0.8505859375}, {"start": 922.43, "end": 922.61, "word": " the", "probability": 0.89892578125}, {"start": 922.61, "end": 922.93, "word": " dependent", "probability": 0.81591796875}, {"start": 922.93, "end": 923.41, "word": " variable", "probability": 0.9189453125}, {"start": 923.41, "end": 923.85, "word": " that", "probability": 0.89208984375}, {"start": 923.85, "end": 924.11, "word": " is", "probability": 0.94775390625}, {"start": 924.11, "end": 924.47, "word": " already", "probability": 0.931640625}, {"start": 924.47, "end": 924.95, "word": " explained", "probability": 0.83837890625}, {"start": 924.95, "end": 925.49, "word": " by", "probability": 0.9677734375}, {"start": 925.49, "end": 925.75, "word": " the", "probability": 0.89599609375}, {"start": 925.75, "end": 926.17, "word": " variability", "probability": 0.94384765625}, {"start": 926.17, "end": 926.49, "word": " in", "probability": 0.931640625}, {"start": 926.49, "end": 926.71, "word": " x.", "probability": 0.7470703125}, {"start": 928.05, "end": 928.29, "word": " For", "probability": 0.92919921875}, {"start": 928.29, "end": 928.65, "word": " example,", "probability": 0.97216796875}], "temperature": 1.0}, {"id": 38, "seek": 95835, "start": 930.45, "end": 958.35, "text": " Sometimes R squared is one. R squared is one only happens if R is one or negative one. So if there exists perfect relationship either negative or positive, I mean if R is plus one or negative one, then R squared is one. That means perfect linear relationship between Y and X. Now the value.", "tokens": [4803, 497, 8889, 307, 472, 13, 497, 8889, 307, 472, 787, 2314, 498, 497, 307, 472, 420, 3671, 472, 13, 407, 498, 456, 8198, 2176, 2480, 2139, 3671, 420, 3353, 11, 286, 914, 498, 497, 307, 1804, 472, 420, 3671, 472, 11, 550, 497, 8889, 307, 472, 13, 663, 1355, 2176, 8213, 2480, 1296, 398, 293, 1783, 13, 823, 264, 2158, 13], "avg_logprob": -0.19394841837504553, "compression_ratio": 1.830188679245283, "no_speech_prob": 0.0, "words": [{"start": 930.45, "end": 931.03, "word": " Sometimes", "probability": 0.52099609375}, {"start": 931.03, "end": 931.31, "word": " R", "probability": 0.57421875}, {"start": 931.31, "end": 931.51, "word": " squared", "probability": 0.2724609375}, {"start": 931.51, "end": 931.71, "word": " is", "probability": 0.9248046875}, {"start": 931.71, "end": 931.95, "word": " one.", "probability": 0.44921875}, {"start": 932.75, "end": 933.21, "word": " R", "probability": 0.92333984375}, {"start": 933.21, "end": 933.45, "word": " squared", "probability": 0.81591796875}, {"start": 933.45, "end": 933.71, "word": " is", "probability": 0.9189453125}, {"start": 933.71, "end": 933.99, "word": " one", "probability": 0.89599609375}, {"start": 933.99, "end": 935.29, "word": " only", "probability": 0.73291015625}, {"start": 935.29, "end": 935.79, "word": " happens", "probability": 0.9482421875}, {"start": 935.79, "end": 936.23, "word": " if", "probability": 0.92333984375}, {"start": 936.23, "end": 936.59, "word": " R", "probability": 0.935546875}, {"start": 936.59, "end": 936.75, "word": " is", "probability": 0.9267578125}, {"start": 936.75, "end": 937.09, "word": " one", "probability": 0.87890625}, {"start": 937.09, "end": 937.55, "word": " or", "probability": 0.93701171875}, {"start": 937.55, "end": 938.01, "word": " negative", "probability": 0.884765625}, {"start": 938.01, "end": 938.39, "word": " one.", "probability": 0.91357421875}, {"start": 939.31, "end": 939.67, "word": " So", "probability": 0.9443359375}, {"start": 939.67, "end": 939.89, "word": " if", "probability": 0.783203125}, {"start": 939.89, "end": 940.17, "word": " there", "probability": 0.91015625}, {"start": 940.17, "end": 940.47, "word": " exists", "probability": 0.6611328125}, {"start": 940.47, "end": 941.19, "word": " perfect", "probability": 0.83740234375}, {"start": 941.19, "end": 942.17, "word": " relationship", "probability": 0.9150390625}, {"start": 942.17, "end": 942.53, "word": " either", "probability": 0.77392578125}, {"start": 942.53, "end": 943.57, "word": " negative", "probability": 0.88525390625}, {"start": 943.57, "end": 943.81, "word": " or", "probability": 0.958984375}, {"start": 943.81, "end": 944.37, "word": " positive,", "probability": 0.9287109375}, {"start": 945.11, "end": 945.33, "word": " I", "probability": 0.9609375}, {"start": 945.33, "end": 945.49, "word": " mean", "probability": 0.96728515625}, {"start": 945.49, "end": 945.65, "word": " if", "probability": 0.7216796875}, {"start": 945.65, "end": 945.87, "word": " R", "probability": 0.97900390625}, {"start": 945.87, "end": 946.03, "word": " is", "probability": 0.9208984375}, {"start": 946.03, "end": 946.33, "word": " plus", "probability": 0.9228515625}, {"start": 946.33, "end": 946.65, "word": " one", "probability": 0.8974609375}, {"start": 946.65, "end": 947.07, "word": " or", "probability": 0.94921875}, {"start": 947.07, "end": 947.57, "word": " negative", "probability": 0.9296875}, {"start": 947.57, "end": 948.01, "word": " one,", "probability": 0.9208984375}, {"start": 949.13, "end": 949.39, "word": " then", "probability": 0.85302734375}, {"start": 949.39, "end": 949.65, "word": " R", "probability": 0.986328125}, {"start": 949.65, "end": 949.89, "word": " squared", "probability": 0.83349609375}, {"start": 949.89, "end": 950.11, "word": " is", "probability": 0.9453125}, {"start": 950.11, "end": 950.31, "word": " one.", "probability": 0.919921875}, {"start": 951.23, "end": 951.63, "word": " That", "probability": 0.90771484375}, {"start": 951.63, "end": 952.01, "word": " means", "probability": 0.92724609375}, {"start": 952.01, "end": 952.77, "word": " perfect", "probability": 0.84619140625}, {"start": 952.77, "end": 954.43, "word": " linear", "probability": 0.87646484375}, {"start": 954.43, "end": 955.13, "word": " relationship", "probability": 0.9072265625}, {"start": 955.13, "end": 955.43, "word": " between", "probability": 0.908203125}, {"start": 955.43, "end": 955.61, "word": " Y", "probability": 0.80126953125}, {"start": 955.61, "end": 955.79, "word": " and", "probability": 0.94775390625}, {"start": 955.79, "end": 956.13, "word": " X.", "probability": 0.99560546875}, {"start": 957.45, "end": 957.75, "word": " Now", "probability": 0.95947265625}, {"start": 957.75, "end": 957.93, "word": " the", "probability": 0.6376953125}, {"start": 957.93, "end": 958.35, "word": " value.", "probability": 0.97509765625}], "temperature": 1.0}, {"id": 39, "seek": 98158, "start": 959.14, "end": 981.58, "text": " of 1 for R squared means that 100% of the variation Y is explained by variation X. And that's really never happened in real life. Because R equals 1 or plus 1 or negative 1 cannot be happened in real life. So R squared", "tokens": [295, 502, 337, 497, 8889, 1355, 300, 2319, 4, 295, 264, 12990, 398, 307, 8825, 538, 12990, 1783, 13, 400, 300, 311, 534, 1128, 2011, 294, 957, 993, 13, 1436, 497, 6915, 502, 420, 1804, 502, 420, 3671, 502, 2644, 312, 2011, 294, 957, 993, 13, 407, 497, 8889], "avg_logprob": -0.2723437625169754, "compression_ratio": 1.5, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 959.14, "end": 959.56, "word": " of", "probability": 0.0887451171875}, {"start": 959.56, "end": 959.94, "word": " 1", "probability": 0.66552734375}, {"start": 959.94, "end": 960.56, "word": " for", "probability": 0.88720703125}, {"start": 960.56, "end": 960.8, "word": " R", "probability": 0.619140625}, {"start": 960.8, "end": 961.02, "word": " squared", "probability": 0.50341796875}, {"start": 961.02, "end": 961.4, "word": " means", "probability": 0.88623046875}, {"start": 961.4, "end": 961.86, "word": " that", "probability": 0.93603515625}, {"start": 961.86, "end": 963.54, "word": " 100", "probability": 0.81494140625}, {"start": 963.54, "end": 964.0, "word": "%", "probability": 0.9580078125}, {"start": 964.0, "end": 964.34, "word": " of", "probability": 0.96533203125}, {"start": 964.34, "end": 964.48, "word": " the", "probability": 0.8740234375}, {"start": 964.48, "end": 964.94, "word": " variation", "probability": 0.9306640625}, {"start": 964.94, "end": 965.62, "word": " Y", "probability": 0.48193359375}, {"start": 965.62, "end": 966.36, "word": " is", "probability": 0.88623046875}, {"start": 966.36, "end": 966.78, "word": " explained", "probability": 0.83154296875}, {"start": 966.78, "end": 967.04, "word": " by", "probability": 0.97119140625}, {"start": 967.04, "end": 967.46, "word": " variation", "probability": 0.8173828125}, {"start": 967.46, "end": 967.96, "word": " X.", "probability": 0.966796875}, {"start": 969.06, "end": 969.64, "word": " And", "probability": 0.9189453125}, {"start": 969.64, "end": 970.16, "word": " that's", "probability": 0.91796875}, {"start": 970.16, "end": 970.5, "word": " really", "probability": 0.86328125}, {"start": 970.5, "end": 970.92, "word": " never", "probability": 0.94384765625}, {"start": 970.92, "end": 971.22, "word": " happened", "probability": 0.5263671875}, {"start": 971.22, "end": 971.46, "word": " in", "probability": 0.93603515625}, {"start": 971.46, "end": 971.64, "word": " real", "probability": 0.80126953125}, {"start": 971.64, "end": 972.0, "word": " life.", "probability": 0.93603515625}, {"start": 972.56, "end": 973.32, "word": " Because", "probability": 0.92431640625}, {"start": 973.32, "end": 973.66, "word": " R", "probability": 0.84716796875}, {"start": 973.66, "end": 974.02, "word": " equals", "probability": 0.765625}, {"start": 974.02, "end": 974.3, "word": " 1", "probability": 0.78466796875}, {"start": 974.3, "end": 974.5, "word": " or", "probability": 0.84033203125}, {"start": 974.5, "end": 974.82, "word": " plus", "probability": 0.91552734375}, {"start": 974.82, "end": 975.2, "word": " 1", "probability": 0.93408203125}, {"start": 975.2, "end": 975.72, "word": " or", "probability": 0.8408203125}, {"start": 975.72, "end": 976.08, "word": " negative", "probability": 0.93310546875}, {"start": 976.08, "end": 976.52, "word": " 1", "probability": 0.984375}, {"start": 976.52, "end": 978.64, "word": " cannot", "probability": 0.7421875}, {"start": 978.64, "end": 978.88, "word": " be", "probability": 0.7509765625}, {"start": 978.88, "end": 979.04, "word": " happened", "probability": 0.295166015625}, {"start": 979.04, "end": 979.26, "word": " in", "probability": 0.9375}, {"start": 979.26, "end": 979.44, "word": " real", "probability": 0.8203125}, {"start": 979.44, "end": 979.72, "word": " life.", "probability": 0.94189453125}, {"start": 979.84, "end": 980.06, "word": " So", "probability": 0.95849609375}, {"start": 980.06, "end": 981.14, "word": " R", "probability": 0.7421875}, {"start": 981.14, "end": 981.58, "word": " squared", "probability": 0.81103515625}], "temperature": 1.0}, {"id": 40, "seek": 100606, "start": 982.68, "end": 1006.06, "text": " always ranges between 0 and 1, never equals 1, because if R squared is 1, that means all the variation in Y is explained by the variation in X. But for sure there is an error, and that error may be due to some variables that are not included in the regression model. Maybe there is", "tokens": [1009, 22526, 1296, 1958, 293, 502, 11, 1128, 6915, 502, 11, 570, 498, 497, 8889, 307, 502, 11, 300, 1355, 439, 264, 12990, 294, 398, 307, 8825, 538, 264, 12990, 294, 1783, 13, 583, 337, 988, 456, 307, 364, 6713, 11, 293, 300, 6713, 815, 312, 3462, 281, 512, 9102, 300, 366, 406, 5556, 294, 264, 24590, 2316, 13, 2704, 456, 307], "avg_logprob": -0.20560516062236966, "compression_ratio": 1.558011049723757, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 982.68, "end": 983.1, "word": " always", "probability": 0.2666015625}, {"start": 983.1, "end": 983.6, "word": " ranges", "probability": 0.896484375}, {"start": 983.6, "end": 983.94, "word": " between", "probability": 0.8388671875}, {"start": 983.94, "end": 984.2, "word": " 0", "probability": 0.6865234375}, {"start": 984.2, "end": 984.4, "word": " and", "probability": 0.9287109375}, {"start": 984.4, "end": 984.64, "word": " 1,", "probability": 0.97705078125}, {"start": 985.0, "end": 985.18, "word": " never", "probability": 0.890625}, {"start": 985.18, "end": 985.66, "word": " equals", "probability": 0.91357421875}, {"start": 985.66, "end": 986.02, "word": " 1,", "probability": 0.8955078125}, {"start": 986.44, "end": 986.9, "word": " because", "probability": 0.88818359375}, {"start": 986.9, "end": 988.0, "word": " if", "probability": 0.71435546875}, {"start": 988.0, "end": 988.26, "word": " R", "probability": 0.57177734375}, {"start": 988.26, "end": 988.46, "word": " squared", "probability": 0.48388671875}, {"start": 988.46, "end": 988.7, "word": " is", "probability": 0.91943359375}, {"start": 988.7, "end": 988.9, "word": " 1,", "probability": 0.85400390625}, {"start": 989.0, "end": 989.14, "word": " that", "probability": 0.8876953125}, {"start": 989.14, "end": 989.5, "word": " means", "probability": 0.92578125}, {"start": 989.5, "end": 990.14, "word": " all", "probability": 0.87451171875}, {"start": 990.14, "end": 990.3, "word": " the", "probability": 0.8740234375}, {"start": 990.3, "end": 990.72, "word": " variation", "probability": 0.8837890625}, {"start": 990.72, "end": 991.0, "word": " in", "probability": 0.5732421875}, {"start": 991.0, "end": 991.32, "word": " Y", "probability": 0.8232421875}, {"start": 991.32, "end": 992.44, "word": " is", "probability": 0.880859375}, {"start": 992.44, "end": 992.98, "word": " explained", "probability": 0.85009765625}, {"start": 992.98, "end": 993.28, "word": " by", "probability": 0.9677734375}, {"start": 993.28, "end": 993.44, "word": " the", "probability": 0.87890625}, {"start": 993.44, "end": 993.74, "word": " variation", "probability": 0.85498046875}, {"start": 993.74, "end": 993.94, "word": " in", "probability": 0.55810546875}, {"start": 993.94, "end": 994.24, "word": " X.", "probability": 0.9892578125}, {"start": 996.28, "end": 996.66, "word": " But", "probability": 0.828125}, {"start": 996.66, "end": 997.22, "word": " for", "probability": 0.7998046875}, {"start": 997.22, "end": 997.46, "word": " sure", "probability": 0.92236328125}, {"start": 997.46, "end": 997.66, "word": " there", "probability": 0.69873046875}, {"start": 997.66, "end": 997.8, "word": " is", "probability": 0.91748046875}, {"start": 997.8, "end": 997.98, "word": " an", "probability": 0.9521484375}, {"start": 997.98, "end": 998.22, "word": " error,", "probability": 0.896484375}, {"start": 998.82, "end": 999.02, "word": " and", "probability": 0.93212890625}, {"start": 999.02, "end": 999.24, "word": " that", "probability": 0.94287109375}, {"start": 999.24, "end": 999.5, "word": " error", "probability": 0.90771484375}, {"start": 999.5, "end": 999.7, "word": " may", "probability": 0.7216796875}, {"start": 999.7, "end": 999.78, "word": " be", "probability": 0.947265625}, {"start": 999.78, "end": 1000.04, "word": " due", "probability": 0.93994140625}, {"start": 1000.04, "end": 1000.38, "word": " to", "probability": 0.96337890625}, {"start": 1000.38, "end": 1000.9, "word": " some", "probability": 0.89892578125}, {"start": 1000.9, "end": 1001.36, "word": " variables", "probability": 0.9306640625}, {"start": 1001.36, "end": 1001.7, "word": " that", "probability": 0.91943359375}, {"start": 1001.7, "end": 1001.92, "word": " are", "probability": 0.9423828125}, {"start": 1001.92, "end": 1002.3, "word": " not", "probability": 0.94091796875}, {"start": 1002.3, "end": 1003.34, "word": " included", "probability": 0.89111328125}, {"start": 1003.34, "end": 1003.9, "word": " in", "probability": 0.94580078125}, {"start": 1003.9, "end": 1004.02, "word": " the", "probability": 0.9072265625}, {"start": 1004.02, "end": 1004.32, "word": " regression", "probability": 0.962890625}, {"start": 1004.32, "end": 1004.68, "word": " model.", "probability": 0.94873046875}, {"start": 1005.28, "end": 1005.54, "word": " Maybe", "probability": 0.9296875}, {"start": 1005.54, "end": 1005.84, "word": " there", "probability": 0.890625}, {"start": 1005.84, "end": 1006.06, "word": " is", "probability": 0.92333984375}], "temperature": 1.0}, {"id": 41, "seek": 103358, "start": 1006.93, "end": 1033.59, "text": " Random error in the selection, maybe the sample size is not large enough in order to determine the total variation in the dependent variable. So it makes sense that R squared will be less than 100. So generally speaking, R squared always between 0 and 1. Weaker linear relationship between X and Y, it means R squared is not 1.", "tokens": [37603, 6713, 294, 264, 9450, 11, 1310, 264, 6889, 2744, 307, 406, 2416, 1547, 294, 1668, 281, 6997, 264, 3217, 12990, 294, 264, 12334, 7006, 13, 407, 309, 1669, 2020, 300, 497, 8889, 486, 312, 1570, 813, 2319, 13, 407, 5101, 4124, 11, 497, 8889, 1009, 1296, 1958, 293, 502, 13, 492, 4003, 8213, 2480, 1296, 1783, 293, 398, 11, 309, 1355, 497, 8889, 307, 406, 502, 13], "avg_logprob": -0.16451539941456006, "compression_ratio": 1.607843137254902, "no_speech_prob": 0.0, "words": [{"start": 1006.93, "end": 1007.53, "word": " Random", "probability": 0.335205078125}, {"start": 1007.53, "end": 1007.95, "word": " error", "probability": 0.84619140625}, {"start": 1007.95, "end": 1008.81, "word": " in", "probability": 0.93017578125}, {"start": 1008.81, "end": 1008.99, "word": " the", "probability": 0.91162109375}, {"start": 1008.99, "end": 1009.63, "word": " selection,", "probability": 0.8623046875}, {"start": 1009.99, "end": 1010.61, "word": " maybe", "probability": 0.9111328125}, {"start": 1010.61, "end": 1010.87, "word": " the", "probability": 0.90625}, {"start": 1010.87, "end": 1011.17, "word": " sample", "probability": 0.8740234375}, {"start": 1011.17, "end": 1011.51, "word": " size", "probability": 0.8427734375}, {"start": 1011.51, "end": 1011.71, "word": " is", "probability": 0.9453125}, {"start": 1011.71, "end": 1011.91, "word": " not", "probability": 0.94482421875}, {"start": 1011.91, "end": 1012.21, "word": " large", "probability": 0.962890625}, {"start": 1012.21, "end": 1012.61, "word": " enough", "probability": 0.87109375}, {"start": 1012.61, "end": 1012.81, "word": " in", "probability": 0.87548828125}, {"start": 1012.81, "end": 1012.99, "word": " order", "probability": 0.91943359375}, {"start": 1012.99, "end": 1013.21, "word": " to", "probability": 0.9619140625}, {"start": 1013.21, "end": 1013.67, "word": " determine", "probability": 0.93310546875}, {"start": 1013.67, "end": 1014.35, "word": " the", "probability": 0.89013671875}, {"start": 1014.35, "end": 1014.65, "word": " total", "probability": 0.88720703125}, {"start": 1014.65, "end": 1015.19, "word": " variation", "probability": 0.93701171875}, {"start": 1015.19, "end": 1015.43, "word": " in", "probability": 0.86767578125}, {"start": 1015.43, "end": 1015.53, "word": " the", "probability": 0.845703125}, {"start": 1015.53, "end": 1015.77, "word": " dependent", "probability": 0.60693359375}, {"start": 1015.77, "end": 1016.23, "word": " variable.", "probability": 0.916015625}, {"start": 1016.81, "end": 1017.07, "word": " So", "probability": 0.95849609375}, {"start": 1017.07, "end": 1017.53, "word": " it", "probability": 0.57763671875}, {"start": 1017.53, "end": 1017.63, "word": " makes", "probability": 0.82568359375}, {"start": 1017.63, "end": 1017.87, "word": " sense", "probability": 0.81689453125}, {"start": 1017.87, "end": 1018.17, "word": " that", "probability": 0.93017578125}, {"start": 1018.17, "end": 1018.41, "word": " R", "probability": 0.7373046875}, {"start": 1018.41, "end": 1018.67, "word": " squared", "probability": 0.67138671875}, {"start": 1018.67, "end": 1018.87, "word": " will", "probability": 0.77490234375}, {"start": 1018.87, "end": 1018.99, "word": " be", "probability": 0.9560546875}, {"start": 1018.99, "end": 1019.25, "word": " less", "probability": 0.9423828125}, {"start": 1019.25, "end": 1019.43, "word": " than", "probability": 0.94189453125}, {"start": 1019.43, "end": 1019.81, "word": " 100.", "probability": 0.91259765625}, {"start": 1022.27, "end": 1022.61, "word": " So", "probability": 0.95654296875}, {"start": 1022.61, "end": 1022.97, "word": " generally", "probability": 0.78076171875}, {"start": 1022.97, "end": 1023.51, "word": " speaking,", "probability": 0.86083984375}, {"start": 1023.87, "end": 1024.11, "word": " R", "probability": 0.97412109375}, {"start": 1024.11, "end": 1024.45, "word": " squared", "probability": 0.845703125}, {"start": 1024.45, "end": 1025.09, "word": " always", "probability": 0.6064453125}, {"start": 1025.09, "end": 1025.53, "word": " between", "probability": 0.88232421875}, {"start": 1025.53, "end": 1025.77, "word": " 0", "probability": 0.7392578125}, {"start": 1025.77, "end": 1025.91, "word": " and", "probability": 0.9443359375}, {"start": 1025.91, "end": 1026.17, "word": " 1.", "probability": 0.99462890625}, {"start": 1028.09, "end": 1028.57, "word": " Weaker", "probability": 0.85498046875}, {"start": 1028.57, "end": 1029.41, "word": " linear", "probability": 0.71923828125}, {"start": 1029.41, "end": 1029.87, "word": " relationship", "probability": 0.9111328125}, {"start": 1029.87, "end": 1030.23, "word": " between", "probability": 0.892578125}, {"start": 1030.23, "end": 1030.43, "word": " X", "probability": 0.62060546875}, {"start": 1030.43, "end": 1030.55, "word": " and", "probability": 0.94384765625}, {"start": 1030.55, "end": 1030.81, "word": " Y,", "probability": 0.9970703125}, {"start": 1031.69, "end": 1031.93, "word": " it", "probability": 0.8681640625}, {"start": 1031.93, "end": 1032.23, "word": " means", "probability": 0.92578125}, {"start": 1032.23, "end": 1032.75, "word": " R", "probability": 0.93798828125}, {"start": 1032.75, "end": 1032.97, "word": " squared", "probability": 0.84423828125}, {"start": 1032.97, "end": 1033.13, "word": " is", "probability": 0.94775390625}, {"start": 1033.13, "end": 1033.31, "word": " not", "probability": 0.94384765625}, {"start": 1033.31, "end": 1033.59, "word": " 1.", "probability": 0.8125}], "temperature": 1.0}, {"id": 42, "seek": 104551, "start": 1035.33, "end": 1045.51, "text": " So R², since it lies between 0 and 1, it means sum, but not all the variation of Y is explained by the variation X.", "tokens": [407, 497, 27643, 11, 1670, 309, 9134, 1296, 1958, 293, 502, 11, 309, 1355, 2408, 11, 457, 406, 439, 264, 12990, 295, 398, 307, 8825, 538, 264, 12990, 1783, 13], "avg_logprob": -0.2271925345543892, "compression_ratio": 1.1818181818181819, "no_speech_prob": 0.0, "words": [{"start": 1035.33, "end": 1035.69, "word": " So", "probability": 0.7666015625}, {"start": 1035.69, "end": 1036.25, "word": " R²,", "probability": 0.434326171875}, {"start": 1036.63, "end": 1037.47, "word": " since", "probability": 0.85595703125}, {"start": 1037.47, "end": 1037.71, "word": " it", "probability": 0.93798828125}, {"start": 1037.71, "end": 1038.11, "word": " lies", "probability": 0.81396484375}, {"start": 1038.11, "end": 1038.45, "word": " between", "probability": 0.890625}, {"start": 1038.45, "end": 1038.67, "word": " 0", "probability": 0.93115234375}, {"start": 1038.67, "end": 1038.81, "word": " and", "probability": 0.9384765625}, {"start": 1038.81, "end": 1039.01, "word": " 1,", "probability": 0.99267578125}, {"start": 1039.09, "end": 1039.23, "word": " it", "probability": 0.77880859375}, {"start": 1039.23, "end": 1039.51, "word": " means", "probability": 0.92529296875}, {"start": 1039.51, "end": 1040.07, "word": " sum,", "probability": 0.40087890625}, {"start": 1041.07, "end": 1041.39, "word": " but", "probability": 0.93359375}, {"start": 1041.39, "end": 1041.75, "word": " not", "probability": 0.94677734375}, {"start": 1041.75, "end": 1042.11, "word": " all", "probability": 0.93701171875}, {"start": 1042.11, "end": 1042.31, "word": " the", "probability": 0.826171875}, {"start": 1042.31, "end": 1042.79, "word": " variation", "probability": 0.68603515625}, {"start": 1042.79, "end": 1043.55, "word": " of", "probability": 0.96875}, {"start": 1043.55, "end": 1043.85, "word": " Y", "probability": 0.888671875}, {"start": 1043.85, "end": 1044.01, "word": " is", "probability": 0.90478515625}, {"start": 1044.01, "end": 1044.39, "word": " explained", "probability": 0.83935546875}, {"start": 1044.39, "end": 1044.67, "word": " by", "probability": 0.9677734375}, {"start": 1044.67, "end": 1044.83, "word": " the", "probability": 0.8603515625}, {"start": 1044.83, "end": 1045.09, "word": " variation", "probability": 0.814453125}, {"start": 1045.09, "end": 1045.51, "word": " X.", "probability": 0.6845703125}], "temperature": 1.0}, {"id": 43, "seek": 107034, "start": 1046.67, "end": 1070.35, "text": " Because as mentioned before, if R squared is 90%, it means some, not all, the variation Y is explained by the variation X. And the remaining percent in this case, which is 10%, this one due to, as I mentioned, maybe there exists some other variables that affect the selling price besides its size, maybe location.", "tokens": [1436, 382, 2835, 949, 11, 498, 497, 8889, 307, 4289, 8923, 309, 1355, 512, 11, 406, 439, 11, 264, 12990, 398, 307, 8825, 538, 264, 12990, 1783, 13, 400, 264, 8877, 3043, 294, 341, 1389, 11, 597, 307, 1266, 8923, 341, 472, 3462, 281, 11, 382, 286, 2835, 11, 1310, 456, 8198, 512, 661, 9102, 300, 3345, 264, 6511, 3218, 11868, 1080, 2744, 11, 1310, 4914, 13], "avg_logprob": -0.18945312565740416, "compression_ratio": 1.57, "no_speech_prob": 0.0, "words": [{"start": 1046.67, "end": 1046.97, "word": " Because", "probability": 0.66455078125}, {"start": 1046.97, "end": 1047.17, "word": " as", "probability": 0.74853515625}, {"start": 1047.17, "end": 1047.57, "word": " mentioned", "probability": 0.56591796875}, {"start": 1047.57, "end": 1047.97, "word": " before,", "probability": 0.86279296875}, {"start": 1048.13, "end": 1048.21, "word": " if", "probability": 0.95263671875}, {"start": 1048.21, "end": 1048.41, "word": " R", "probability": 0.6826171875}, {"start": 1048.41, "end": 1048.61, "word": " squared", "probability": 0.52685546875}, {"start": 1048.61, "end": 1048.85, "word": " is", "probability": 0.9453125}, {"start": 1048.85, "end": 1049.87, "word": " 90%,", "probability": 0.48974609375}, {"start": 1049.87, "end": 1050.59, "word": " it", "probability": 0.916015625}, {"start": 1050.59, "end": 1051.01, "word": " means", "probability": 0.93212890625}, {"start": 1051.01, "end": 1051.45, "word": " some,", "probability": 0.79150390625}, {"start": 1051.53, "end": 1051.63, "word": " not", "probability": 0.94189453125}, {"start": 1051.63, "end": 1052.01, "word": " all,", "probability": 0.9541015625}, {"start": 1052.39, "end": 1052.51, "word": " the", "probability": 0.8330078125}, {"start": 1052.51, "end": 1052.87, "word": " variation", "probability": 0.88427734375}, {"start": 1052.87, "end": 1053.23, "word": " Y", "probability": 0.81494140625}, {"start": 1053.23, "end": 1053.47, "word": " is", "probability": 0.927734375}, {"start": 1053.47, "end": 1053.81, "word": " explained", "probability": 0.8583984375}, {"start": 1053.81, "end": 1054.13, "word": " by", "probability": 0.96875}, {"start": 1054.13, "end": 1054.31, "word": " the", "probability": 0.8701171875}, {"start": 1054.31, "end": 1054.53, "word": " variation", "probability": 0.84326171875}, {"start": 1054.53, "end": 1054.85, "word": " X.", "probability": 0.984375}, {"start": 1055.45, "end": 1055.83, "word": " And", "probability": 0.91943359375}, {"start": 1055.83, "end": 1055.97, "word": " the", "probability": 0.89013671875}, {"start": 1055.97, "end": 1056.31, "word": " remaining", "probability": 0.900390625}, {"start": 1056.31, "end": 1056.69, "word": " percent", "probability": 0.90576171875}, {"start": 1056.69, "end": 1056.99, "word": " in", "probability": 0.7041015625}, {"start": 1056.99, "end": 1057.21, "word": " this", "probability": 0.9443359375}, {"start": 1057.21, "end": 1057.45, "word": " case,", "probability": 0.9130859375}, {"start": 1057.55, "end": 1057.63, "word": " which", "probability": 0.9404296875}, {"start": 1057.63, "end": 1057.79, "word": " is", "probability": 0.9423828125}, {"start": 1057.79, "end": 1058.59, "word": " 10%,", "probability": 0.848388671875}, {"start": 1058.59, "end": 1059.67, "word": " this", "probability": 0.90673828125}, {"start": 1059.67, "end": 1059.91, "word": " one", "probability": 0.91015625}, {"start": 1059.91, "end": 1060.15, "word": " due", "probability": 0.68310546875}, {"start": 1060.15, "end": 1060.49, "word": " to,", "probability": 0.96875}, {"start": 1061.13, "end": 1061.37, "word": " as", "probability": 0.96435546875}, {"start": 1061.37, "end": 1061.49, "word": " I", "probability": 0.99658203125}, {"start": 1061.49, "end": 1061.87, "word": " mentioned,", "probability": 0.83642578125}, {"start": 1062.39, "end": 1062.59, "word": " maybe", "probability": 0.9345703125}, {"start": 1062.59, "end": 1062.79, "word": " there", "probability": 0.912109375}, {"start": 1062.79, "end": 1063.15, "word": " exists", "probability": 0.64013671875}, {"start": 1063.15, "end": 1063.49, "word": " some", "probability": 0.89013671875}, {"start": 1063.49, "end": 1063.83, "word": " other", "probability": 0.892578125}, {"start": 1063.83, "end": 1064.29, "word": " variables", "probability": 0.9248046875}, {"start": 1064.29, "end": 1064.71, "word": " that", "probability": 0.9287109375}, {"start": 1064.71, "end": 1065.39, "word": " affect", "probability": 0.86328125}, {"start": 1065.39, "end": 1066.49, "word": " the", "probability": 0.9052734375}, {"start": 1066.49, "end": 1066.75, "word": " selling", "probability": 0.888671875}, {"start": 1066.75, "end": 1067.33, "word": " price", "probability": 0.921875}, {"start": 1067.33, "end": 1068.41, "word": " besides", "probability": 0.52392578125}, {"start": 1068.41, "end": 1068.73, "word": " its", "probability": 0.86669921875}, {"start": 1068.73, "end": 1069.27, "word": " size,", "probability": 0.8564453125}, {"start": 1069.55, "end": 1069.75, "word": " maybe", "probability": 0.9296875}, {"start": 1069.75, "end": 1070.35, "word": " location.", "probability": 0.9326171875}], "temperature": 1.0}, {"id": 44, "seek": 109861, "start": 1071.7, "end": 1098.62, "text": " of the house affects its selling price. So R squared is always between 0 and 1, it's always positive. R squared equals 0, that only happens if there is no linear relationship between Y and X. Since R is 0, then R squared equals 0. That means the value of Y does not depend on X.", "tokens": [295, 264, 1782, 11807, 1080, 6511, 3218, 13, 407, 497, 8889, 307, 1009, 1296, 1958, 293, 502, 11, 309, 311, 1009, 3353, 13, 497, 8889, 6915, 1958, 11, 300, 787, 2314, 498, 456, 307, 572, 8213, 2480, 1296, 398, 293, 1783, 13, 4162, 497, 307, 1958, 11, 550, 497, 8889, 6915, 1958, 13, 663, 1355, 264, 2158, 295, 398, 775, 406, 5672, 322, 1783, 13], "avg_logprob": -0.1808712123469873, "compression_ratio": 1.5942857142857143, "no_speech_prob": 0.0, "words": [{"start": 1071.7, "end": 1072.02, "word": " of", "probability": 0.1278076171875}, {"start": 1072.02, "end": 1072.34, "word": " the", "probability": 0.91748046875}, {"start": 1072.34, "end": 1073.16, "word": " house", "probability": 0.88623046875}, {"start": 1073.16, "end": 1074.72, "word": " affects", "probability": 0.65478515625}, {"start": 1074.72, "end": 1075.74, "word": " its", "probability": 0.59423828125}, {"start": 1075.74, "end": 1076.08, "word": " selling", "probability": 0.88720703125}, {"start": 1076.08, "end": 1076.52, "word": " price.", "probability": 0.765625}, {"start": 1077.16, "end": 1077.44, "word": " So", "probability": 0.95458984375}, {"start": 1077.44, "end": 1077.66, "word": " R", "probability": 0.43017578125}, {"start": 1077.66, "end": 1077.9, "word": " squared", "probability": 0.69482421875}, {"start": 1077.9, "end": 1078.14, "word": " is", "probability": 0.9375}, {"start": 1078.14, "end": 1078.7, "word": " always", "probability": 0.9140625}, {"start": 1078.7, "end": 1079.3, "word": " between", "probability": 0.89599609375}, {"start": 1079.3, "end": 1079.68, "word": " 0", "probability": 0.58544921875}, {"start": 1079.68, "end": 1079.84, "word": " and", "probability": 0.92919921875}, {"start": 1079.84, "end": 1080.06, "word": " 1,", "probability": 0.9765625}, {"start": 1080.16, "end": 1080.4, "word": " it's", "probability": 0.8828125}, {"start": 1080.4, "end": 1080.76, "word": " always", "probability": 0.90087890625}, {"start": 1080.76, "end": 1081.26, "word": " positive.", "probability": 0.9384765625}, {"start": 1082.3, "end": 1082.64, "word": " R", "probability": 0.9921875}, {"start": 1082.64, "end": 1082.96, "word": " squared", "probability": 0.8193359375}, {"start": 1082.96, "end": 1083.36, "word": " equals", "probability": 0.84033203125}, {"start": 1083.36, "end": 1083.76, "word": " 0,", "probability": 0.82568359375}, {"start": 1084.14, "end": 1084.48, "word": " that", "probability": 0.8955078125}, {"start": 1084.48, "end": 1085.06, "word": " only", "probability": 0.8896484375}, {"start": 1085.06, "end": 1085.4, "word": " happens", "probability": 0.6376953125}, {"start": 1085.4, "end": 1086.26, "word": " if", "probability": 0.91845703125}, {"start": 1086.26, "end": 1086.52, "word": " there", "probability": 0.908203125}, {"start": 1086.52, "end": 1086.72, "word": " is", "probability": 0.90673828125}, {"start": 1086.72, "end": 1087.18, "word": " no", "probability": 0.95068359375}, {"start": 1087.18, "end": 1088.74, "word": " linear", "probability": 0.896484375}, {"start": 1088.74, "end": 1089.52, "word": " relationship", "probability": 0.91748046875}, {"start": 1089.52, "end": 1089.86, "word": " between", "probability": 0.8994140625}, {"start": 1089.86, "end": 1090.08, "word": " Y", "probability": 0.50048828125}, {"start": 1090.08, "end": 1090.3, "word": " and", "probability": 0.9462890625}, {"start": 1090.3, "end": 1090.62, "word": " X.", "probability": 0.99072265625}, {"start": 1091.32, "end": 1091.82, "word": " Since", "probability": 0.8583984375}, {"start": 1091.82, "end": 1092.1, "word": " R", "probability": 0.9580078125}, {"start": 1092.1, "end": 1092.26, "word": " is", "probability": 0.93359375}, {"start": 1092.26, "end": 1092.62, "word": " 0,", "probability": 0.79833984375}, {"start": 1093.06, "end": 1093.4, "word": " then", "probability": 0.84423828125}, {"start": 1093.4, "end": 1093.7, "word": " R", "probability": 0.98828125}, {"start": 1093.7, "end": 1093.98, "word": " squared", "probability": 0.86328125}, {"start": 1093.98, "end": 1094.28, "word": " equals", "probability": 0.9111328125}, {"start": 1094.28, "end": 1094.58, "word": " 0.", "probability": 0.97119140625}, {"start": 1095.8, "end": 1096.26, "word": " That", "probability": 0.916015625}, {"start": 1096.26, "end": 1096.52, "word": " means", "probability": 0.92919921875}, {"start": 1096.52, "end": 1096.68, "word": " the", "probability": 0.876953125}, {"start": 1096.68, "end": 1096.94, "word": " value", "probability": 0.978515625}, {"start": 1096.94, "end": 1097.1, "word": " of", "probability": 0.93115234375}, {"start": 1097.1, "end": 1097.24, "word": " Y", "probability": 0.98779296875}, {"start": 1097.24, "end": 1097.5, "word": " does", "probability": 0.97802734375}, {"start": 1097.5, "end": 1097.72, "word": " not", "probability": 0.943359375}, {"start": 1097.72, "end": 1098.12, "word": " depend", "probability": 0.927734375}, {"start": 1098.12, "end": 1098.3, "word": " on", "probability": 0.94677734375}, {"start": 1098.3, "end": 1098.62, "word": " X.", "probability": 0.98974609375}], "temperature": 1.0}, {"id": 45, "seek": 112105, "start": 1099.35, "end": 1121.05, "text": " Because here, as X increases, Y stays nearly in the same position. It means as X increases, Y stays the same, constant. So that means there is no relationship or actually there is no linear relationship because it could be there exists non-linear relationship. But here we are.", "tokens": [1436, 510, 11, 382, 1783, 8637, 11, 398, 10834, 6217, 294, 264, 912, 2535, 13, 467, 1355, 382, 1783, 8637, 11, 398, 10834, 264, 912, 11, 5754, 13, 407, 300, 1355, 456, 307, 572, 2480, 420, 767, 456, 307, 572, 8213, 2480, 570, 309, 727, 312, 456, 8198, 2107, 12, 28263, 2480, 13, 583, 510, 321, 366, 13], "avg_logprob": -0.24708685733504215, "compression_ratio": 1.8051948051948052, "no_speech_prob": 0.0, "words": [{"start": 1099.35, "end": 1099.71, "word": " Because", "probability": 0.65771484375}, {"start": 1099.71, "end": 1100.07, "word": " here,", "probability": 0.8017578125}, {"start": 1100.53, "end": 1100.65, "word": " as", "probability": 0.92529296875}, {"start": 1100.65, "end": 1100.87, "word": " X", "probability": 0.54248046875}, {"start": 1100.87, "end": 1101.37, "word": " increases,", "probability": 0.9306640625}, {"start": 1101.93, "end": 1102.27, "word": " Y", "probability": 0.9765625}, {"start": 1102.27, "end": 1103.95, "word": " stays", "probability": 0.6298828125}, {"start": 1103.95, "end": 1104.49, "word": " nearly", "probability": 0.60205078125}, {"start": 1104.49, "end": 1104.71, "word": " in", "probability": 0.89453125}, {"start": 1104.71, "end": 1104.87, "word": " the", "probability": 0.91259765625}, {"start": 1104.87, "end": 1105.21, "word": " same", "probability": 0.90380859375}, {"start": 1105.21, "end": 1106.07, "word": " position.", "probability": 0.9345703125}, {"start": 1106.69, "end": 1106.83, "word": " It", "probability": 0.8779296875}, {"start": 1106.83, "end": 1107.19, "word": " means", "probability": 0.9267578125}, {"start": 1107.19, "end": 1107.59, "word": " as", "probability": 0.517578125}, {"start": 1107.59, "end": 1107.87, "word": " X", "probability": 0.98291015625}, {"start": 1107.87, "end": 1108.43, "word": " increases,", "probability": 0.9375}, {"start": 1108.85, "end": 1109.01, "word": " Y", "probability": 0.9892578125}, {"start": 1109.01, "end": 1109.31, "word": " stays", "probability": 0.85791015625}, {"start": 1109.31, "end": 1109.47, "word": " the", "probability": 0.3916015625}, {"start": 1109.47, "end": 1109.73, "word": " same,", "probability": 0.8916015625}, {"start": 1109.83, "end": 1110.19, "word": " constant.", "probability": 0.91943359375}, {"start": 1111.01, "end": 1111.25, "word": " So", "probability": 0.91650390625}, {"start": 1111.25, "end": 1111.55, "word": " that", "probability": 0.7568359375}, {"start": 1111.55, "end": 1111.75, "word": " means", "probability": 0.93701171875}, {"start": 1111.75, "end": 1111.93, "word": " there", "probability": 0.84326171875}, {"start": 1111.93, "end": 1112.07, "word": " is", "probability": 0.92626953125}, {"start": 1112.07, "end": 1112.23, "word": " no", "probability": 0.94970703125}, {"start": 1112.23, "end": 1112.79, "word": " relationship", "probability": 0.890625}, {"start": 1112.79, "end": 1113.25, "word": " or", "probability": 0.44873046875}, {"start": 1113.25, "end": 1113.73, "word": " actually", "probability": 0.85107421875}, {"start": 1113.73, "end": 1114.49, "word": " there", "probability": 0.76416015625}, {"start": 1114.49, "end": 1114.67, "word": " is", "probability": 0.9453125}, {"start": 1114.67, "end": 1114.89, "word": " no", "probability": 0.9443359375}, {"start": 1114.89, "end": 1115.41, "word": " linear", "probability": 0.91748046875}, {"start": 1115.41, "end": 1116.07, "word": " relationship", "probability": 0.92138671875}, {"start": 1116.07, "end": 1116.57, "word": " because", "probability": 0.400390625}, {"start": 1116.57, "end": 1116.87, "word": " it", "probability": 0.677734375}, {"start": 1116.87, "end": 1117.01, "word": " could", "probability": 0.90185546875}, {"start": 1117.01, "end": 1117.29, "word": " be", "probability": 0.9248046875}, {"start": 1117.29, "end": 1118.17, "word": " there", "probability": 0.73583984375}, {"start": 1118.17, "end": 1118.63, "word": " exists", "probability": 0.82470703125}, {"start": 1118.63, "end": 1119.23, "word": " non", "probability": 0.953125}, {"start": 1119.23, "end": 1119.57, "word": "-linear", "probability": 0.727294921875}, {"start": 1119.57, "end": 1120.07, "word": " relationship.", "probability": 0.853515625}, {"start": 1120.37, "end": 1120.51, "word": " But", "probability": 0.92919921875}, {"start": 1120.51, "end": 1120.71, "word": " here", "probability": 0.84423828125}, {"start": 1120.71, "end": 1120.85, "word": " we", "probability": 0.86669921875}, {"start": 1120.85, "end": 1121.05, "word": " are.", "probability": 0.92919921875}], "temperature": 1.0}, {"id": 46, "seek": 114474, "start": 1121.88, "end": 1144.74, "text": " Just focusing on linear relationship between X and Y. So if R is zero, that means the value of Y does not depend on the value of X. So as X increases, Y is constant. Now for the previous example, R was 0.7621. To determine the coefficient of determination,", "tokens": [1449, 8416, 322, 8213, 2480, 1296, 1783, 293, 398, 13, 407, 498, 497, 307, 4018, 11, 300, 1355, 264, 2158, 295, 398, 775, 406, 5672, 322, 264, 2158, 295, 1783, 13, 407, 382, 1783, 8637, 11, 398, 307, 5754, 13, 823, 337, 264, 3894, 1365, 11, 497, 390, 1958, 13, 25026, 4436, 13, 1407, 6997, 264, 17619, 295, 18432, 11], "avg_logprob": -0.1704661816847129, "compression_ratio": 1.4120879120879122, "no_speech_prob": 0.0, "words": [{"start": 1121.88, "end": 1122.44, "word": " Just", "probability": 0.428466796875}, {"start": 1122.44, "end": 1123.0, "word": " focusing", "probability": 0.89453125}, {"start": 1123.0, "end": 1123.44, "word": " on", "probability": 0.94677734375}, {"start": 1123.44, "end": 1124.3, "word": " linear", "probability": 0.7041015625}, {"start": 1124.3, "end": 1124.98, "word": " relationship", "probability": 0.90625}, {"start": 1124.98, "end": 1125.32, "word": " between", "probability": 0.91259765625}, {"start": 1125.32, "end": 1125.48, "word": " X", "probability": 0.56396484375}, {"start": 1125.48, "end": 1125.64, "word": " and", "probability": 0.9345703125}, {"start": 1125.64, "end": 1125.86, "word": " Y.", "probability": 0.9892578125}, {"start": 1126.38, "end": 1126.58, "word": " So", "probability": 0.8447265625}, {"start": 1126.58, "end": 1127.78, "word": " if", "probability": 0.60400390625}, {"start": 1127.78, "end": 1128.06, "word": " R", "probability": 0.82080078125}, {"start": 1128.06, "end": 1128.3, "word": " is", "probability": 0.86376953125}, {"start": 1128.3, "end": 1128.56, "word": " zero,", "probability": 0.54443359375}, {"start": 1128.68, "end": 1128.86, "word": " that", "probability": 0.93017578125}, {"start": 1128.86, "end": 1129.28, "word": " means", "probability": 0.93017578125}, {"start": 1129.28, "end": 1130.02, "word": " the", "probability": 0.81591796875}, {"start": 1130.02, "end": 1130.3, "word": " value", "probability": 0.9755859375}, {"start": 1130.3, "end": 1130.44, "word": " of", "probability": 0.9482421875}, {"start": 1130.44, "end": 1130.56, "word": " Y", "probability": 0.966796875}, {"start": 1130.56, "end": 1130.76, "word": " does", "probability": 0.97021484375}, {"start": 1130.76, "end": 1130.94, "word": " not", "probability": 0.9521484375}, {"start": 1130.94, "end": 1131.24, "word": " depend", "probability": 0.91748046875}, {"start": 1131.24, "end": 1131.5, "word": " on", "probability": 0.94775390625}, {"start": 1131.5, "end": 1131.66, "word": " the", "probability": 0.90380859375}, {"start": 1131.66, "end": 1131.9, "word": " value", "probability": 0.97607421875}, {"start": 1131.9, "end": 1132.06, "word": " of", "probability": 0.818359375}, {"start": 1132.06, "end": 1132.18, "word": " X.", "probability": 0.98876953125}, {"start": 1132.26, "end": 1132.4, "word": " So", "probability": 0.94140625}, {"start": 1132.4, "end": 1132.68, "word": " as", "probability": 0.72314453125}, {"start": 1132.68, "end": 1133.02, "word": " X", "probability": 0.98486328125}, {"start": 1133.02, "end": 1133.72, "word": " increases,", "probability": 0.93505859375}, {"start": 1134.36, "end": 1134.66, "word": " Y", "probability": 0.9853515625}, {"start": 1134.66, "end": 1135.48, "word": " is", "probability": 0.345947265625}, {"start": 1135.48, "end": 1136.22, "word": " constant.", "probability": 0.83251953125}, {"start": 1137.5, "end": 1138.06, "word": " Now", "probability": 0.89013671875}, {"start": 1138.06, "end": 1138.22, "word": " for", "probability": 0.6513671875}, {"start": 1138.22, "end": 1138.36, "word": " the", "probability": 0.9208984375}, {"start": 1138.36, "end": 1138.56, "word": " previous", "probability": 0.80615234375}, {"start": 1138.56, "end": 1139.08, "word": " example,", "probability": 0.974609375}, {"start": 1139.8, "end": 1140.08, "word": " R", "probability": 0.97900390625}, {"start": 1140.08, "end": 1140.42, "word": " was", "probability": 0.93798828125}, {"start": 1140.42, "end": 1140.64, "word": " 0", "probability": 0.78369140625}, {"start": 1140.64, "end": 1141.58, "word": ".7621.", "probability": 0.9210611979166666}, {"start": 1142.72, "end": 1142.92, "word": " To", "probability": 0.9462890625}, {"start": 1142.92, "end": 1143.34, "word": " determine", "probability": 0.90771484375}, {"start": 1143.34, "end": 1143.62, "word": " the", "probability": 0.9189453125}, {"start": 1143.62, "end": 1144.04, "word": " coefficient", "probability": 0.89990234375}, {"start": 1144.04, "end": 1144.26, "word": " of", "probability": 0.96923828125}, {"start": 1144.26, "end": 1144.74, "word": " determination,", "probability": 0.89404296875}], "temperature": 1.0}, {"id": 47, "seek": 116998, "start": 1145.84, "end": 1169.98, "text": " One more time, square this value, that's only valid for simple linear regression. Otherwise, you cannot square the value of R in order to determine the coefficient of determination. So again, this is only true for simple linear regression.", "tokens": [1485, 544, 565, 11, 3732, 341, 2158, 11, 300, 311, 787, 7363, 337, 2199, 8213, 24590, 13, 10328, 11, 291, 2644, 3732, 264, 2158, 295, 497, 294, 1668, 281, 6997, 264, 17619, 295, 18432, 13, 407, 797, 11, 341, 307, 787, 2074, 337, 2199, 8213, 24590, 13], "avg_logprob": -0.15429688214013973, "compression_ratio": 1.643835616438356, "no_speech_prob": 0.0, "words": [{"start": 1145.84, "end": 1146.24, "word": " One", "probability": 0.41650390625}, {"start": 1146.24, "end": 1146.42, "word": " more", "probability": 0.93701171875}, {"start": 1146.42, "end": 1146.76, "word": " time,", "probability": 0.88916015625}, {"start": 1147.46, "end": 1147.76, "word": " square", "probability": 0.65625}, {"start": 1147.76, "end": 1148.1, "word": " this", "probability": 0.89697265625}, {"start": 1148.1, "end": 1148.5, "word": " value,", "probability": 0.9677734375}, {"start": 1148.9, "end": 1149.36, "word": " that's", "probability": 0.889892578125}, {"start": 1149.36, "end": 1149.72, "word": " only", "probability": 0.9287109375}, {"start": 1149.72, "end": 1150.22, "word": " valid", "probability": 0.95751953125}, {"start": 1150.22, "end": 1151.2, "word": " for", "probability": 0.92822265625}, {"start": 1151.2, "end": 1151.76, "word": " simple", "probability": 0.86572265625}, {"start": 1151.76, "end": 1152.2, "word": " linear", "probability": 0.92236328125}, {"start": 1152.2, "end": 1152.62, "word": " regression.", "probability": 0.93505859375}, {"start": 1152.94, "end": 1153.34, "word": " Otherwise,", "probability": 0.904296875}, {"start": 1154.02, "end": 1154.2, "word": " you", "probability": 0.9619140625}, {"start": 1154.2, "end": 1154.54, "word": " cannot", "probability": 0.84423828125}, {"start": 1154.54, "end": 1154.98, "word": " square", "probability": 0.92431640625}, {"start": 1154.98, "end": 1155.14, "word": " the", "probability": 0.91357421875}, {"start": 1155.14, "end": 1155.36, "word": " value", "probability": 0.97509765625}, {"start": 1155.36, "end": 1155.56, "word": " of", "probability": 0.9580078125}, {"start": 1155.56, "end": 1155.8, "word": " R", "probability": 0.62109375}, {"start": 1155.8, "end": 1156.06, "word": " in", "probability": 0.82666015625}, {"start": 1156.06, "end": 1156.28, "word": " order", "probability": 0.9326171875}, {"start": 1156.28, "end": 1156.46, "word": " to", "probability": 0.95751953125}, {"start": 1156.46, "end": 1156.88, "word": " determine", "probability": 0.91943359375}, {"start": 1156.88, "end": 1157.58, "word": " the", "probability": 0.90185546875}, {"start": 1157.58, "end": 1158.04, "word": " coefficient", "probability": 0.9072265625}, {"start": 1158.04, "end": 1158.26, "word": " of", "probability": 0.96337890625}, {"start": 1158.26, "end": 1158.76, "word": " determination.", "probability": 0.9287109375}, {"start": 1159.66, "end": 1159.94, "word": " So", "probability": 0.93408203125}, {"start": 1159.94, "end": 1160.24, "word": " again,", "probability": 0.8203125}, {"start": 1160.42, "end": 1160.68, "word": " this", "probability": 0.93115234375}, {"start": 1160.68, "end": 1160.82, "word": " is", "probability": 0.94677734375}, {"start": 1160.82, "end": 1161.2, "word": " only", "probability": 0.92138671875}, {"start": 1161.2, "end": 1161.54, "word": " true", "probability": 0.9677734375}, {"start": 1161.54, "end": 1166.42, "word": " for", "probability": 0.8818359375}, {"start": 1166.42, "end": 1167.54, "word": " simple", "probability": 0.9111328125}, {"start": 1167.54, "end": 1169.62, "word": " linear", "probability": 0.89306640625}, {"start": 1169.62, "end": 1169.98, "word": " regression.", "probability": 0.8486328125}], "temperature": 1.0}, {"id": 48, "seek": 119804, "start": 1175.46, "end": 1198.04, "text": " So R squared is 0.7621 squared will give 0.5808. Now, the meaning of this value, first you have to multiply this by 100. So 58.08% of the variation in house prices is explained by the variation in square feet.", "tokens": [407, 497, 8889, 307, 1958, 13, 25026, 4436, 8889, 486, 976, 1958, 13, 20, 4702, 23, 13, 823, 11, 264, 3620, 295, 341, 2158, 11, 700, 291, 362, 281, 12972, 341, 538, 2319, 13, 407, 21786, 13, 16133, 4, 295, 264, 12990, 294, 1782, 7901, 307, 8825, 538, 264, 12990, 294, 3732, 3521, 13], "avg_logprob": -0.16178977001797068, "compression_ratio": 1.381578947368421, "no_speech_prob": 0.0, "words": [{"start": 1175.46, "end": 1175.72, "word": " So", "probability": 0.83349609375}, {"start": 1175.72, "end": 1175.92, "word": " R", "probability": 0.443115234375}, {"start": 1175.92, "end": 1176.38, "word": " squared", "probability": 0.471435546875}, {"start": 1176.38, "end": 1177.24, "word": " is", "probability": 0.8740234375}, {"start": 1177.24, "end": 1177.42, "word": " 0", "probability": 0.71337890625}, {"start": 1177.42, "end": 1178.86, "word": ".7621", "probability": 0.86328125}, {"start": 1178.86, "end": 1179.46, "word": " squared", "probability": 0.72021484375}, {"start": 1179.46, "end": 1179.86, "word": " will", "probability": 0.6826171875}, {"start": 1179.86, "end": 1180.08, "word": " give", "probability": 0.7744140625}, {"start": 1180.08, "end": 1180.28, "word": " 0", "probability": 0.951171875}, {"start": 1180.28, "end": 1181.32, "word": ".5808.", "probability": 0.96337890625}, {"start": 1182.24, "end": 1182.78, "word": " Now,", "probability": 0.92724609375}, {"start": 1183.3, "end": 1183.78, "word": " the", "probability": 0.9033203125}, {"start": 1183.78, "end": 1184.04, "word": " meaning", "probability": 0.88037109375}, {"start": 1184.04, "end": 1184.26, "word": " of", "probability": 0.96435546875}, {"start": 1184.26, "end": 1184.48, "word": " this", "probability": 0.94384765625}, {"start": 1184.48, "end": 1184.88, "word": " value,", "probability": 0.9765625}, {"start": 1185.3, "end": 1185.66, "word": " first", "probability": 0.865234375}, {"start": 1185.66, "end": 1185.82, "word": " you", "probability": 0.83349609375}, {"start": 1185.82, "end": 1185.98, "word": " have", "probability": 0.93603515625}, {"start": 1185.98, "end": 1186.12, "word": " to", "probability": 0.9677734375}, {"start": 1186.12, "end": 1186.54, "word": " multiply", "probability": 0.90625}, {"start": 1186.54, "end": 1186.84, "word": " this", "probability": 0.92529296875}, {"start": 1186.84, "end": 1187.16, "word": " by", "probability": 0.97412109375}, {"start": 1187.16, "end": 1187.9, "word": " 100.", "probability": 0.86181640625}, {"start": 1188.74, "end": 1189.38, "word": " So", "probability": 0.8798828125}, {"start": 1189.38, "end": 1189.82, "word": " 58", "probability": 0.802734375}, {"start": 1189.82, "end": 1190.74, "word": ".08", "probability": 0.991455078125}, {"start": 1190.74, "end": 1191.68, "word": "%", "probability": 0.78857421875}, {"start": 1191.68, "end": 1192.68, "word": " of", "probability": 0.95068359375}, {"start": 1192.68, "end": 1192.84, "word": " the", "probability": 0.91796875}, {"start": 1192.84, "end": 1193.28, "word": " variation", "probability": 0.89208984375}, {"start": 1193.28, "end": 1193.6, "word": " in", "probability": 0.90283203125}, {"start": 1193.6, "end": 1193.84, "word": " house", "probability": 0.87890625}, {"start": 1193.84, "end": 1194.5, "word": " prices", "probability": 0.923828125}, {"start": 1194.5, "end": 1195.26, "word": " is", "probability": 0.90478515625}, {"start": 1195.26, "end": 1195.7, "word": " explained", "probability": 0.88427734375}, {"start": 1195.7, "end": 1196.3, "word": " by", "probability": 0.96875}, {"start": 1196.3, "end": 1196.48, "word": " the", "probability": 0.8662109375}, {"start": 1196.48, "end": 1196.92, "word": " variation", "probability": 0.896484375}, {"start": 1196.92, "end": 1197.44, "word": " in", "probability": 0.919921875}, {"start": 1197.44, "end": 1197.74, "word": " square", "probability": 0.9072265625}, {"start": 1197.74, "end": 1198.04, "word": " feet.", "probability": 0.95947265625}], "temperature": 1.0}, {"id": 49, "seek": 122191, "start": 1199.87, "end": 1221.91, "text": " So 58, around 0.08% of the variation in size of the house, I'm sorry, in the price is explained by its size. So size by itself.", "tokens": [407, 21786, 11, 926, 1958, 13, 16133, 4, 295, 264, 12990, 294, 2744, 295, 264, 1782, 11, 286, 478, 2597, 11, 294, 264, 3218, 307, 8825, 538, 1080, 2744, 13, 407, 2744, 538, 2564, 13], "avg_logprob": -0.2847222197386954, "compression_ratio": 1.2075471698113207, "no_speech_prob": 0.0, "words": [{"start": 1199.87, "end": 1200.79, "word": " So", "probability": 0.67626953125}, {"start": 1200.79, "end": 1201.71, "word": " 58,", "probability": 0.50634765625}, {"start": 1202.25, "end": 1202.73, "word": " around", "probability": 0.91748046875}, {"start": 1202.73, "end": 1203.19, "word": " 0", "probability": 0.60693359375}, {"start": 1203.19, "end": 1203.61, "word": ".08", "probability": 0.84228515625}, {"start": 1203.61, "end": 1204.09, "word": "%", "probability": 0.6845703125}, {"start": 1204.09, "end": 1204.65, "word": " of", "probability": 0.953125}, {"start": 1204.65, "end": 1204.79, "word": " the", "probability": 0.9052734375}, {"start": 1204.79, "end": 1205.19, "word": " variation", "probability": 0.853515625}, {"start": 1205.19, "end": 1205.63, "word": " in", "probability": 0.88818359375}, {"start": 1205.63, "end": 1206.11, "word": " size", "probability": 0.329345703125}, {"start": 1206.11, "end": 1206.27, "word": " of", "probability": 0.94921875}, {"start": 1206.27, "end": 1206.39, "word": " the", "probability": 0.9091796875}, {"start": 1206.39, "end": 1206.73, "word": " house,", "probability": 0.7900390625}, {"start": 1207.61, "end": 1207.71, "word": " I'm", "probability": 0.872314453125}, {"start": 1207.71, "end": 1207.93, "word": " sorry,", "probability": 0.87451171875}, {"start": 1208.33, "end": 1208.49, "word": " in", "probability": 0.865234375}, {"start": 1208.49, "end": 1208.91, "word": " the", "probability": 0.8984375}, {"start": 1208.91, "end": 1210.31, "word": " price", "probability": 0.943359375}, {"start": 1210.31, "end": 1212.45, "word": " is", "probability": 0.55712890625}, {"start": 1212.45, "end": 1213.07, "word": " explained", "probability": 0.9072265625}, {"start": 1213.07, "end": 1216.51, "word": " by", "probability": 0.96337890625}, {"start": 1216.51, "end": 1217.27, "word": " its", "probability": 0.82666015625}, {"start": 1217.27, "end": 1217.69, "word": " size.", "probability": 0.85693359375}, {"start": 1219.77, "end": 1220.69, "word": " So", "probability": 0.89013671875}, {"start": 1220.69, "end": 1221.03, "word": " size", "probability": 0.650390625}, {"start": 1221.03, "end": 1221.27, "word": " by", "probability": 0.9560546875}, {"start": 1221.27, "end": 1221.91, "word": " itself.", "probability": 0.83251953125}], "temperature": 1.0}, {"id": 50, "seek": 125004, "start": 1223.42, "end": 1250.04, "text": " Size only explains around 50-80% of the selling price of a house. Now the remaining percent which is around, this is the error, or the remaining percent, this one is due to other variables, other independent variables.", "tokens": [35818, 787, 13948, 926, 2625, 12, 4702, 4, 295, 264, 6511, 3218, 295, 257, 1782, 13, 823, 264, 8877, 3043, 597, 307, 926, 11, 341, 307, 264, 6713, 11, 420, 264, 8877, 3043, 11, 341, 472, 307, 3462, 281, 661, 9102, 11, 661, 6695, 9102, 13], "avg_logprob": -0.2062832383399314, "compression_ratio": 1.5314685314685315, "no_speech_prob": 0.0, "words": [{"start": 1223.42, "end": 1223.86, "word": " Size", "probability": 0.41845703125}, {"start": 1223.86, "end": 1224.32, "word": " only", "probability": 0.65185546875}, {"start": 1224.32, "end": 1225.42, "word": " explains", "probability": 0.72314453125}, {"start": 1225.42, "end": 1225.8, "word": " around", "probability": 0.88916015625}, {"start": 1225.8, "end": 1226.24, "word": " 50", "probability": 0.88671875}, {"start": 1226.24, "end": 1226.52, "word": "-80", "probability": 0.83544921875}, {"start": 1226.52, "end": 1227.06, "word": "%", "probability": 0.79443359375}, {"start": 1227.06, "end": 1227.88, "word": " of", "probability": 0.96630859375}, {"start": 1227.88, "end": 1228.12, "word": " the", "probability": 0.88623046875}, {"start": 1228.12, "end": 1228.38, "word": " selling", "probability": 0.861328125}, {"start": 1228.38, "end": 1228.84, "word": " price", "probability": 0.93408203125}, {"start": 1228.84, "end": 1228.98, "word": " of", "probability": 0.9521484375}, {"start": 1228.98, "end": 1229.08, "word": " a", "probability": 0.95751953125}, {"start": 1229.08, "end": 1229.42, "word": " house.", "probability": 0.87255859375}, {"start": 1230.2, "end": 1230.32, "word": " Now", "probability": 0.85986328125}, {"start": 1230.32, "end": 1230.52, "word": " the", "probability": 0.59423828125}, {"start": 1230.52, "end": 1230.82, "word": " remaining", "probability": 0.8798828125}, {"start": 1230.82, "end": 1231.34, "word": " percent", "probability": 0.7802734375}, {"start": 1231.34, "end": 1231.62, "word": " which", "probability": 0.62744140625}, {"start": 1231.62, "end": 1231.8, "word": " is", "probability": 0.9404296875}, {"start": 1231.8, "end": 1232.2, "word": " around,", "probability": 0.93359375}, {"start": 1234.26, "end": 1234.74, "word": " this", "probability": 0.87890625}, {"start": 1234.74, "end": 1234.86, "word": " is", "probability": 0.9462890625}, {"start": 1234.86, "end": 1235.0, "word": " the", "probability": 0.87939453125}, {"start": 1235.0, "end": 1235.32, "word": " error,", "probability": 0.91455078125}, {"start": 1235.8, "end": 1235.94, "word": " or", "probability": 0.75146484375}, {"start": 1235.94, "end": 1236.1, "word": " the", "probability": 0.8984375}, {"start": 1236.1, "end": 1236.36, "word": " remaining", "probability": 0.8994140625}, {"start": 1236.36, "end": 1236.86, "word": " percent,", "probability": 0.93505859375}, {"start": 1237.88, "end": 1238.2, "word": " this", "probability": 0.93505859375}, {"start": 1238.2, "end": 1238.42, "word": " one", "probability": 0.9208984375}, {"start": 1238.42, "end": 1238.58, "word": " is", "probability": 0.951171875}, {"start": 1238.58, "end": 1238.86, "word": " due", "probability": 0.93408203125}, {"start": 1238.86, "end": 1240.04, "word": " to", "probability": 0.96923828125}, {"start": 1240.04, "end": 1241.78, "word": " other", "probability": 0.87939453125}, {"start": 1241.78, "end": 1242.38, "word": " variables,", "probability": 0.935546875}, {"start": 1243.78, "end": 1244.18, "word": " other", "probability": 0.88671875}, {"start": 1244.18, "end": 1246.94, "word": " independent", "probability": 0.88720703125}, {"start": 1246.94, "end": 1250.04, "word": " variables.", "probability": 0.92529296875}], "temperature": 1.0}, {"id": 51, "seek": 127650, "start": 1251.2, "end": 1276.5, "text": " That might affect the change of price. But since the size of the house explains 58%, that means it's a significant variable. Now, if we add more variables,", "tokens": [663, 1062, 3345, 264, 1319, 295, 3218, 13, 583, 1670, 264, 2744, 295, 264, 1782, 13948, 21786, 8923, 300, 1355, 309, 311, 257, 4776, 7006, 13, 823, 11, 498, 321, 909, 544, 9102, 11], "avg_logprob": -0.20491071428571428, "compression_ratio": 1.2380952380952381, "no_speech_prob": 0.0, "words": [{"start": 1251.2, "end": 1251.58, "word": " That", "probability": 0.45751953125}, {"start": 1251.58, "end": 1251.92, "word": " might", "probability": 0.89404296875}, {"start": 1251.92, "end": 1252.58, "word": " affect", "probability": 0.8125}, {"start": 1252.58, "end": 1253.02, "word": " the", "probability": 0.89306640625}, {"start": 1253.02, "end": 1253.34, "word": " change", "probability": 0.76611328125}, {"start": 1253.34, "end": 1253.54, "word": " of", "probability": 0.94482421875}, {"start": 1253.54, "end": 1253.82, "word": " price.", "probability": 0.4775390625}, {"start": 1264.84, "end": 1265.22, "word": " But", "probability": 0.6494140625}, {"start": 1265.22, "end": 1266.72, "word": " since", "probability": 0.68505859375}, {"start": 1266.72, "end": 1267.0, "word": " the", "probability": 0.91357421875}, {"start": 1267.0, "end": 1267.3, "word": " size", "probability": 0.84033203125}, {"start": 1267.3, "end": 1267.44, "word": " of", "probability": 0.96826171875}, {"start": 1267.44, "end": 1267.58, "word": " the", "probability": 0.90234375}, {"start": 1267.58, "end": 1267.86, "word": " house", "probability": 0.87744140625}, {"start": 1267.86, "end": 1268.48, "word": " explains", "probability": 0.9296875}, {"start": 1268.48, "end": 1269.62, "word": " 58%,", "probability": 0.7685546875}, {"start": 1269.62, "end": 1271.16, "word": " that", "probability": 0.818359375}, {"start": 1271.16, "end": 1271.54, "word": " means", "probability": 0.93212890625}, {"start": 1271.54, "end": 1272.08, "word": " it's", "probability": 0.891357421875}, {"start": 1272.08, "end": 1272.16, "word": " a", "probability": 0.8486328125}, {"start": 1272.16, "end": 1272.62, "word": " significant", "probability": 0.87841796875}, {"start": 1272.62, "end": 1273.1, "word": " variable.", "probability": 0.91064453125}, {"start": 1274.4, "end": 1275.02, "word": " Now,", "probability": 0.93896484375}, {"start": 1275.1, "end": 1275.22, "word": " if", "probability": 0.95068359375}, {"start": 1275.22, "end": 1275.38, "word": " we", "probability": 0.95556640625}, {"start": 1275.38, "end": 1275.66, "word": " add", "probability": 0.89013671875}, {"start": 1275.66, "end": 1275.96, "word": " more", "probability": 0.94091796875}, {"start": 1275.96, "end": 1276.5, "word": " variables,", "probability": 0.931640625}], "temperature": 1.0}, {"id": 52, "seek": 129395, "start": 1277.69, "end": 1293.95, "text": " to the regression equation for sure this value will be increased. So maybe 60 or 65 or 67 and so on. But 60% or 50 is more enough sometimes. But R squared, as R squared increases, it means we have good fit of the model.", "tokens": [281, 264, 24590, 5367, 337, 988, 341, 2158, 486, 312, 6505, 13, 407, 1310, 4060, 420, 11624, 420, 23879, 293, 370, 322, 13, 583, 4060, 4, 420, 2625, 307, 544, 1547, 2171, 13, 583, 497, 8889, 11, 382, 497, 8889, 8637, 11, 309, 1355, 321, 362, 665, 3318, 295, 264, 2316, 13], "avg_logprob": -0.21550707884554593, "compression_ratio": 1.4193548387096775, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1277.69, "end": 1277.89, "word": " to", "probability": 0.25146484375}, {"start": 1277.89, "end": 1278.01, "word": " the", "probability": 0.8701171875}, {"start": 1278.01, "end": 1278.33, "word": " regression", "probability": 0.9609375}, {"start": 1278.33, "end": 1278.91, "word": " equation", "probability": 0.97265625}, {"start": 1278.91, "end": 1279.25, "word": " for", "probability": 0.57177734375}, {"start": 1279.25, "end": 1279.73, "word": " sure", "probability": 0.92041015625}, {"start": 1279.73, "end": 1280.65, "word": " this", "probability": 0.82861328125}, {"start": 1280.65, "end": 1281.03, "word": " value", "probability": 0.97607421875}, {"start": 1281.03, "end": 1281.25, "word": " will", "probability": 0.88916015625}, {"start": 1281.25, "end": 1281.57, "word": " be", "probability": 0.93994140625}, {"start": 1281.57, "end": 1282.59, "word": " increased.", "probability": 0.9482421875}, {"start": 1283.09, "end": 1283.23, "word": " So", "probability": 0.84765625}, {"start": 1283.23, "end": 1283.41, "word": " maybe", "probability": 0.76611328125}, {"start": 1283.41, "end": 1283.81, "word": " 60", "probability": 0.86083984375}, {"start": 1283.81, "end": 1283.95, "word": " or", "probability": 0.8173828125}, {"start": 1283.95, "end": 1284.49, "word": " 65", "probability": 0.9765625}, {"start": 1284.49, "end": 1284.71, "word": " or", "probability": 0.64501953125}, {"start": 1284.71, "end": 1285.13, "word": " 67", "probability": 0.97265625}, {"start": 1285.13, "end": 1285.35, "word": " and", "probability": 0.865234375}, {"start": 1285.35, "end": 1285.51, "word": " so", "probability": 0.94775390625}, {"start": 1285.51, "end": 1285.69, "word": " on.", "probability": 0.9482421875}, {"start": 1285.97, "end": 1286.17, "word": " But", "probability": 0.90185546875}, {"start": 1286.17, "end": 1286.47, "word": " 60", "probability": 0.90771484375}, {"start": 1286.47, "end": 1286.77, "word": "%", "probability": 0.55419921875}, {"start": 1286.77, "end": 1286.99, "word": " or", "probability": 0.95263671875}, {"start": 1286.99, "end": 1287.35, "word": " 50", "probability": 0.94873046875}, {"start": 1287.35, "end": 1288.03, "word": " is", "probability": 0.78466796875}, {"start": 1288.03, "end": 1288.23, "word": " more", "probability": 0.9326171875}, {"start": 1288.23, "end": 1288.51, "word": " enough", "probability": 0.736328125}, {"start": 1288.51, "end": 1289.07, "word": " sometimes.", "probability": 0.93603515625}, {"start": 1289.79, "end": 1289.95, "word": " But", "probability": 0.8720703125}, {"start": 1289.95, "end": 1290.27, "word": " R", "probability": 0.5078125}, {"start": 1290.27, "end": 1290.63, "word": " squared,", "probability": 0.455322265625}, {"start": 1290.75, "end": 1290.93, "word": " as", "probability": 0.96142578125}, {"start": 1290.93, "end": 1291.17, "word": " R", "probability": 0.99072265625}, {"start": 1291.17, "end": 1291.39, "word": " squared", "probability": 0.79931640625}, {"start": 1291.39, "end": 1291.87, "word": " increases,", "probability": 0.93212890625}, {"start": 1292.09, "end": 1292.19, "word": " it", "probability": 0.7861328125}, {"start": 1292.19, "end": 1292.37, "word": " means", "probability": 0.92919921875}, {"start": 1292.37, "end": 1292.53, "word": " we", "probability": 0.9345703125}, {"start": 1292.53, "end": 1292.73, "word": " have", "probability": 0.94580078125}, {"start": 1292.73, "end": 1292.97, "word": " good", "probability": 0.85546875}, {"start": 1292.97, "end": 1293.35, "word": " fit", "probability": 0.96142578125}, {"start": 1293.35, "end": 1293.63, "word": " of", "probability": 0.962890625}, {"start": 1293.63, "end": 1293.77, "word": " the", "probability": 0.92138671875}, {"start": 1293.77, "end": 1293.95, "word": " model.", "probability": 0.93701171875}], "temperature": 1.0}, {"id": 53, "seek": 132371, "start": 1294.95, "end": 1323.71, "text": " That means the model is accurate to determine or to make some prediction. So that's for the coefficient of determination. Any question? So we covered simple linear regression model. We know now how can we compute the values of B0 and B1.", "tokens": [663, 1355, 264, 2316, 307, 8559, 281, 6997, 420, 281, 652, 512, 17630, 13, 407, 300, 311, 337, 264, 17619, 295, 18432, 13, 2639, 1168, 30, 407, 321, 5343, 2199, 8213, 24590, 2316, 13, 492, 458, 586, 577, 393, 321, 14722, 264, 4190, 295, 363, 15, 293, 363, 16, 13], "avg_logprob": -0.16681985820040984, "compression_ratio": 1.451219512195122, "no_speech_prob": 0.0, "words": [{"start": 1294.95, "end": 1295.25, "word": " That", "probability": 0.77734375}, {"start": 1295.25, "end": 1295.53, "word": " means", "probability": 0.927734375}, {"start": 1295.53, "end": 1295.91, "word": " the", "probability": 0.87841796875}, {"start": 1295.91, "end": 1296.49, "word": " model", "probability": 0.94482421875}, {"start": 1296.49, "end": 1297.07, "word": " is", "probability": 0.94580078125}, {"start": 1297.07, "end": 1297.75, "word": " accurate", "probability": 0.88623046875}, {"start": 1297.75, "end": 1298.61, "word": " to", "probability": 0.85009765625}, {"start": 1298.61, "end": 1299.07, "word": " determine", "probability": 0.76416015625}, {"start": 1299.07, "end": 1300.39, "word": " or", "probability": 0.59326171875}, {"start": 1300.39, "end": 1300.65, "word": " to", "probability": 0.955078125}, {"start": 1300.65, "end": 1300.89, "word": " make", "probability": 0.94287109375}, {"start": 1300.89, "end": 1301.23, "word": " some", "probability": 0.89697265625}, {"start": 1301.23, "end": 1301.75, "word": " prediction.", "probability": 0.89697265625}, {"start": 1302.69, "end": 1303.01, "word": " So", "probability": 0.9609375}, {"start": 1303.01, "end": 1303.95, "word": " that's", "probability": 0.927734375}, {"start": 1303.95, "end": 1304.41, "word": " for", "probability": 0.93115234375}, {"start": 1304.41, "end": 1304.77, "word": " the", "probability": 0.92431640625}, {"start": 1304.77, "end": 1305.77, "word": " coefficient", "probability": 0.896484375}, {"start": 1305.77, "end": 1306.43, "word": " of", "probability": 0.96875}, {"start": 1306.43, "end": 1307.47, "word": " determination.", "probability": 0.794921875}, {"start": 1308.61, "end": 1309.11, "word": " Any", "probability": 0.919921875}, {"start": 1309.11, "end": 1309.47, "word": " question?", "probability": 0.783203125}, {"start": 1310.97, "end": 1311.33, "word": " So", "probability": 0.9423828125}, {"start": 1311.33, "end": 1312.01, "word": " we", "probability": 0.80615234375}, {"start": 1312.01, "end": 1312.87, "word": " covered", "probability": 0.76318359375}, {"start": 1312.87, "end": 1318.35, "word": " simple", "probability": 0.2291259765625}, {"start": 1318.35, "end": 1318.59, "word": " linear", "probability": 0.7578125}, {"start": 1318.59, "end": 1318.95, "word": " regression", "probability": 0.97265625}, {"start": 1318.95, "end": 1319.31, "word": " model.", "probability": 0.9443359375}, {"start": 1320.41, "end": 1320.77, "word": " We", "probability": 0.96240234375}, {"start": 1320.77, "end": 1320.99, "word": " know", "probability": 0.8818359375}, {"start": 1320.99, "end": 1321.23, "word": " now", "probability": 0.90625}, {"start": 1321.23, "end": 1321.43, "word": " how", "probability": 0.8642578125}, {"start": 1321.43, "end": 1321.63, "word": " can", "probability": 0.869140625}, {"start": 1321.63, "end": 1321.79, "word": " we", "probability": 0.93798828125}, {"start": 1321.79, "end": 1322.19, "word": " compute", "probability": 0.93212890625}, {"start": 1322.19, "end": 1322.41, "word": " the", "probability": 0.91552734375}, {"start": 1322.41, "end": 1322.75, "word": " values", "probability": 0.9521484375}, {"start": 1322.75, "end": 1322.89, "word": " of", "probability": 0.9150390625}, {"start": 1322.89, "end": 1323.21, "word": " B0", "probability": 0.5699462890625}, {"start": 1323.21, "end": 1323.35, "word": " and", "probability": 0.9443359375}, {"start": 1323.35, "end": 1323.71, "word": " B1.", "probability": 0.996337890625}], "temperature": 1.0}, {"id": 54, "seek": 135303, "start": 1324.99, "end": 1353.03, "text": " We can state or write the regression equation, and we can do some interpretation about P0 and P1, making predictions, and make some comments about the coefficient of determination. That's all. So I'm going to stop now, and I will give some time to discuss some practice.", "tokens": [492, 393, 1785, 420, 2464, 264, 24590, 5367, 11, 293, 321, 393, 360, 512, 14174, 466, 430, 15, 293, 430, 16, 11, 1455, 21264, 11, 293, 652, 512, 3053, 466, 264, 17619, 295, 18432, 13, 663, 311, 439, 13, 407, 286, 478, 516, 281, 1590, 586, 11, 293, 286, 486, 976, 512, 565, 281, 2248, 512, 3124, 13], "avg_logprob": -0.18723517151202185, "compression_ratio": 1.5224719101123596, "no_speech_prob": 0.0, "words": [{"start": 1324.99, "end": 1325.23, "word": " We", "probability": 0.60302734375}, {"start": 1325.23, "end": 1325.55, "word": " can", "probability": 0.94287109375}, {"start": 1325.55, "end": 1326.13, "word": " state", "probability": 0.931640625}, {"start": 1326.13, "end": 1326.39, "word": " or", "probability": 0.8759765625}, {"start": 1326.39, "end": 1326.73, "word": " write", "probability": 0.9033203125}, {"start": 1326.73, "end": 1327.45, "word": " the", "probability": 0.8232421875}, {"start": 1327.45, "end": 1327.81, "word": " regression", "probability": 0.96826171875}, {"start": 1327.81, "end": 1328.43, "word": " equation,", "probability": 0.982421875}, {"start": 1329.41, "end": 1329.65, "word": " and", "probability": 0.92822265625}, {"start": 1329.65, "end": 1329.81, "word": " we", "probability": 0.95849609375}, {"start": 1329.81, "end": 1329.99, "word": " can", "probability": 0.94580078125}, {"start": 1329.99, "end": 1330.15, "word": " do", "probability": 0.92333984375}, {"start": 1330.15, "end": 1330.55, "word": " some", "probability": 0.89990234375}, {"start": 1330.55, "end": 1331.97, "word": " interpretation", "probability": 0.8857421875}, {"start": 1331.97, "end": 1332.31, "word": " about", "probability": 0.890625}, {"start": 1332.31, "end": 1332.73, "word": " P0", "probability": 0.593994140625}, {"start": 1332.73, "end": 1332.89, "word": " and", "probability": 0.9287109375}, {"start": 1332.89, "end": 1333.29, "word": " P1,", "probability": 0.978271484375}, {"start": 1333.79, "end": 1334.37, "word": " making", "probability": 0.89453125}, {"start": 1334.37, "end": 1336.39, "word": " predictions,", "probability": 0.79345703125}, {"start": 1337.19, "end": 1337.95, "word": " and", "probability": 0.93115234375}, {"start": 1337.95, "end": 1338.53, "word": " make", "probability": 0.93115234375}, {"start": 1338.53, "end": 1338.91, "word": " some", "probability": 0.908203125}, {"start": 1338.91, "end": 1339.71, "word": " comments", "probability": 0.9013671875}, {"start": 1339.71, "end": 1340.29, "word": " about", "probability": 0.8974609375}, {"start": 1340.29, "end": 1341.53, "word": " the", "probability": 0.87255859375}, {"start": 1341.53, "end": 1342.19, "word": " coefficient", "probability": 0.9150390625}, {"start": 1342.19, "end": 1342.81, "word": " of", "probability": 0.85107421875}, {"start": 1342.81, "end": 1343.87, "word": " determination.", "probability": 0.890625}, {"start": 1344.97, "end": 1345.77, "word": " That's", "probability": 0.927734375}, {"start": 1345.77, "end": 1346.05, "word": " all.", "probability": 0.94384765625}, {"start": 1346.67, "end": 1347.03, "word": " So", "probability": 0.9580078125}, {"start": 1347.03, "end": 1347.39, "word": " I'm", "probability": 0.86962890625}, {"start": 1347.39, "end": 1347.57, "word": " going", "probability": 0.9384765625}, {"start": 1347.57, "end": 1347.77, "word": " to", "probability": 0.96826171875}, {"start": 1347.77, "end": 1348.07, "word": " stop", "probability": 0.92529296875}, {"start": 1348.07, "end": 1349.73, "word": " now,", "probability": 0.587890625}, {"start": 1349.83, "end": 1349.99, "word": " and", "probability": 0.92822265625}, {"start": 1349.99, "end": 1350.21, "word": " I", "probability": 0.83837890625}, {"start": 1350.21, "end": 1350.37, "word": " will", "probability": 0.8564453125}, {"start": 1350.37, "end": 1350.53, "word": " give", "probability": 0.837890625}, {"start": 1350.53, "end": 1350.93, "word": " some", "probability": 0.89453125}, {"start": 1350.93, "end": 1351.73, "word": " time", "probability": 0.8994140625}, {"start": 1351.73, "end": 1351.91, "word": " to", "probability": 0.96826171875}, {"start": 1351.91, "end": 1352.31, "word": " discuss", "probability": 0.90771484375}, {"start": 1352.31, "end": 1352.59, "word": " some", "probability": 0.89453125}, {"start": 1352.59, "end": 1353.03, "word": " practice.", "probability": 0.9072265625}], "temperature": 1.0}], "language": "en", "language_probability": 1.0, "duration": 1353.700125, "duration_after_vad": 1292.010343749999} \ No newline at end of file diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/D4aO26sEGrc_raw.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/D4aO26sEGrc_raw.srt new file mode 100644 index 0000000000000000000000000000000000000000..c7af8c156f4c5113bdb6eeaa117249d6c553aefc --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/D4aO26sEGrc_raw.srt @@ -0,0 +1,1128 @@ +1 +00:00:15,580 --> 00:00:19,700 +In general, the regression equation is given by + +2 +00:00:19,700 --> 00:00:26,460 +this equation. Y represents the dependent variable + +3 +00:00:26,460 --> 00:00:30,680 +for each observation I. Beta 0 is called + +4 +00:00:30,680 --> 00:00:35,280 +population Y intercept. Beta 1 is the population + +5 +00:00:35,280 --> 00:00:39,400 +stop coefficient. Xi is the independent variable + +6 +00:00:39,400 --> 00:00:44,040 +for each observation, I. Epsilon I is the random + +7 +00:00:44,040 --> 00:00:48,420 +error theorem. Beta 0 plus beta 1 X is called + +8 +00:00:48,420 --> 00:00:53,410 +linear component. While Y and I are random error + +9 +00:00:53,410 --> 00:00:57,130 +components. So, the regression equation mainly has + +10 +00:00:57,130 --> 00:01:01,970 +two components. One is linear and the other is + +11 +00:01:01,970 --> 00:01:05,830 +random. In general, the expected value for this + +12 +00:01:05,830 --> 00:01:08,810 +error term is zero. So, for the predicted + +13 +00:01:08,810 --> 00:01:12,410 +equation, later we will see that Y hat equals B + +14 +00:01:12,410 --> 00:01:15,930 +zero plus B one X.this term will be ignored + +15 +00:01:15,930 --> 00:01:19,770 +because the expected value for the epsilon equals + +16 +00:01:19,770 --> 00:01:20,850 +zero. + +17 +00:01:36,460 --> 00:01:43,580 +So again linear component B0 plus B1 X I and the + +18 +00:01:43,580 --> 00:01:46,860 +random component is the epsilon term. + +19 +00:01:48,880 --> 00:01:53,560 +So if we have X and Y axis, this segment is called + +20 +00:01:53,560 --> 00:01:57,620 +Y intercept which is B0. The change in y divided + +21 +00:01:57,620 --> 00:02:01,480 +by change in x is called the slope. Epsilon i is + +22 +00:02:01,480 --> 00:02:04,480 +the difference between the observed value of y + +23 +00:02:04,480 --> 00:02:10,400 +minus the expected value or the predicted value. + +24 +00:02:10,800 --> 00:02:14,200 +The observed is the actual value. So actual minus + +25 +00:02:14,200 --> 00:02:17,480 +predicted, the difference between these two values + +26 +00:02:17,480 --> 00:02:20,800 +is called the epsilon. So epsilon i is the + +27 +00:02:20,800 --> 00:02:24,460 +difference between the observed value of y for x, + +28 +00:02:25,220 --> 00:02:28,820 +minus the predicted or the estimated value of Y + +29 +00:02:28,820 --> 00:02:33,360 +for XR. So this difference actually is called the + +30 +00:02:33,360 --> 00:02:36,920 +error tier. So the error is just observed minus + +31 +00:02:36,920 --> 00:02:38,240 +predicted. + +32 +00:02:40,980 --> 00:02:44,540 +The estimated regression equation is given by Y + +33 +00:02:44,540 --> 00:02:50,210 +hat equals V0 plus V1X. as i mentioned before the + +34 +00:02:50,210 --> 00:02:53,450 +epsilon term is cancelled because the expected + +35 +00:02:53,450 --> 00:02:57,590 +value for the epsilon equals zero here we have y + +36 +00:02:57,590 --> 00:03:00,790 +hat instead of y because this one is called the + +37 +00:03:00,790 --> 00:03:05,670 +estimated or the predicted value for y for the + +38 +00:03:05,670 --> 00:03:09,670 +observation i for example b zero is the estimated + +39 +00:03:09,670 --> 00:03:12,590 +of the regression intercept or is called y + +40 +00:03:12,590 --> 00:03:18,030 +intercept b one the estimate of the regression of + +41 +00:03:18,030 --> 00:03:21,930 +the slope so this is the estimated slope b1 xi + +42 +00:03:21,930 --> 00:03:26,270 +again is the independent variable so x1 It means + +43 +00:03:26,270 --> 00:03:28,630 +the value of the independent variable for + +44 +00:03:28,630 --> 00:03:31,350 +observation number one. Now this equation is + +45 +00:03:31,350 --> 00:03:34,530 +called linear regression equation or regression + +46 +00:03:34,530 --> 00:03:37,230 +model. It's a straight line because here we are + +47 +00:03:37,230 --> 00:03:41,170 +assuming that the relationship between x and y is + +48 +00:03:41,170 --> 00:03:43,490 +linear. It could be non-linear, but we are + +49 +00:03:43,490 --> 00:03:48,760 +focusing here in just linear regression. Now, the + +50 +00:03:48,760 --> 00:03:52,000 +values for B0 and B1 are given by these equations, + +51 +00:03:52,920 --> 00:03:56,480 +B1 equals RSY divided by SX. So, in order to + +52 +00:03:56,480 --> 00:04:01,040 +determine the values of B0 and B1, we have to know + +53 +00:04:01,040 --> 00:04:07,760 +first the value of R, the correlation coefficient. + +54 +00:04:16,640 --> 00:04:24,980 +Sx and Sy, standard deviations of x and y, as well + +55 +00:04:24,980 --> 00:04:29,880 +as the means of x and y. + +56 +00:04:32,920 --> 00:04:39,500 +B1 equals R times Sy divided by Sx. B0 is just y + +57 +00:04:39,500 --> 00:04:43,600 +bar minus b1 x bar, where Sx and Sy are the + +58 +00:04:43,600 --> 00:04:48,350 +standard deviations of x and y. So this, how can + +59 +00:04:48,350 --> 00:04:53,190 +we compute the values of B0 and B1? Now the + +60 +00:04:53,190 --> 00:04:59,350 +question is, what's our interpretation about B0 + +61 +00:04:59,350 --> 00:05:05,030 +and B1? And B0, as we mentioned before, is the Y + +62 +00:05:05,030 --> 00:05:10,510 +or the estimated mean value of Y when the value X + +63 +00:05:10,510 --> 00:05:10,910 +is 0. + +64 +00:05:17,420 --> 00:05:22,860 +So if X is 0, then Y hat equals B0. That means B0 + +65 +00:05:22,860 --> 00:05:26,420 +is the estimated mean value of Y when the value of + +66 +00:05:26,420 --> 00:05:32,280 +X equals 0. B1, which is called the estimated + +67 +00:05:32,280 --> 00:05:36,880 +change in the mean value of Y as a result of one + +68 +00:05:36,880 --> 00:05:42,360 +unit change in X. That means the sign of B1, + +69 +00:05:48,180 --> 00:05:55,180 +the direction of the relationship between X and Y. + +70 +00:06:03,020 --> 00:06:09,060 +So the sine of B1 tells us the exact direction. It + +71 +00:06:09,060 --> 00:06:12,300 +could be positive if the sine of B1 is positive or + +72 +00:06:12,300 --> 00:06:17,040 +negative. on the other side. So that's the meaning + +73 +00:06:17,040 --> 00:06:22,040 +of B0 and B1. Now first thing we have to do in + +74 +00:06:22,040 --> 00:06:23,980 +order to determine if there exists linear + +75 +00:06:23,980 --> 00:06:26,800 +relationship between X and Y, we have to draw + +76 +00:06:26,800 --> 00:06:30,620 +scatter plot, Y versus X. In this specific + +77 +00:06:30,620 --> 00:06:34,740 +example, X is the square feet, size of the house + +78 +00:06:34,740 --> 00:06:38,760 +is measured by square feet, and house selling + +79 +00:06:38,760 --> 00:06:43,220 +price in thousand dollars. So we have to draw Y + +80 +00:06:43,220 --> 00:06:47,420 +versus X. So house price versus size of the house. + +81 +00:06:48,140 --> 00:06:50,740 +Now by looking carefully at this scatter plot, + +82 +00:06:51,340 --> 00:06:54,200 +even if it's a small sample size, but you can see + +83 +00:06:54,200 --> 00:06:57,160 +that there exists positive relationship between + +84 +00:06:57,160 --> 00:07:02,640 +house price and size of the house. The points + +85 +00:07:03,750 --> 00:07:06,170 +Maybe they are close little bit to the straight + +86 +00:07:06,170 --> 00:07:08,370 +line, it means there exists maybe strong + +87 +00:07:08,370 --> 00:07:11,350 +relationship between X and Y. But you can tell the + +88 +00:07:11,350 --> 00:07:15,910 +exact strength of the relationship by using the + +89 +00:07:15,910 --> 00:07:19,270 +value of R. But here we can tell that there exists + +90 +00:07:19,270 --> 00:07:22,290 +positive relationship and that relation could be + +91 +00:07:22,290 --> 00:07:23,250 +strong. + +92 +00:07:25,730 --> 00:07:31,350 +Now simple calculations will give B1 and B0. + +93 +00:07:32,210 --> 00:07:37,510 +Suppose we know the values of R, Sy, and Sx. R, if + +94 +00:07:37,510 --> 00:07:41,550 +you remember last time, R was 0.762. It's moderate + +95 +00:07:41,550 --> 00:07:46,390 +relationship between X and Y. Sy and Sx, 60 + +96 +00:07:46,390 --> 00:07:52,350 +divided by 4 is 117. That will give 0.109. So B0, + +97 +00:07:53,250 --> 00:07:59,430 +in this case, 0.10977, B1. + +98 +00:08:02,960 --> 00:08:08,720 +B0 equals Y bar minus B1 X bar. B1 is computed in + +99 +00:08:08,720 --> 00:08:12,680 +the previous step, so plug that value here. In + +100 +00:08:12,680 --> 00:08:15,440 +addition, we know the values of X bar and Y bar. + +101 +00:08:15,980 --> 00:08:19,320 +Simple calculation will give the value of B0, + +102 +00:08:19,400 --> 00:08:25,340 +which is about 98.25. After computing the values + +103 +00:08:25,340 --> 00:08:30,600 +of B0 and B1, we can state the regression equation + +104 +00:08:30,600 --> 00:08:34,360 +by house price, the estimated value of house + +105 +00:08:34,360 --> 00:08:39,960 +price. Hat in this equation means the estimated or + +106 +00:08:39,960 --> 00:08:43,860 +the predicted value of the house price. Equals b0 + +107 +00:08:43,860 --> 00:08:49,980 +which is 98 plus b1 which is 0.10977 times square + +108 +00:08:49,980 --> 00:08:54,420 +feet. Now here, by using this equation, we can + +109 +00:08:54,420 --> 00:08:58,280 +tell number one. The direction of the relationship + +110 +00:08:58,280 --> 00:09:03,620 +between x and y, how surprised and its size. Since + +111 +00:09:03,620 --> 00:09:05,900 +the sign is positive, it means there exists + +112 +00:09:05,900 --> 00:09:09,000 +positive associations or relationship between + +113 +00:09:09,000 --> 00:09:12,420 +these two variables, number one. Number two, we + +114 +00:09:12,420 --> 00:09:17,060 +can interpret carefully the meaning of the + +115 +00:09:17,060 --> 00:09:21,340 +intercept. Now, as we mentioned before, y hat + +116 +00:09:21,340 --> 00:09:25,600 +equals b zero only if x equals zero. Now there is + +117 +00:09:25,600 --> 00:09:28,900 +no sense about square feet of zero because we + +118 +00:09:28,900 --> 00:09:32,960 +don't have a size of a house to be zero. But the + +119 +00:09:32,960 --> 00:09:37,880 +slope here is 0.109, it has sense because as the + +120 +00:09:37,880 --> 00:09:41,450 +size of the house increased by one unit. it's + +121 +00:09:41,450 --> 00:09:46,290 +selling price increased by this amount 0.109 but + +122 +00:09:46,290 --> 00:09:48,990 +here you have to be careful to multiply this value + +123 +00:09:48,990 --> 00:09:52,610 +by a thousand because the data is given in + +124 +00:09:52,610 --> 00:09:56,830 +thousand dollars for Y so here as the size of the + +125 +00:09:56,830 --> 00:10:00,590 +house increased by one unit by one feet one square + +126 +00:10:00,590 --> 00:10:05,310 +feet it's selling price increases by this amount 0 + +127 +00:10:05,310 --> 00:10:10,110 +.10977 should be multiplied by a thousand so + +128 +00:10:10,110 --> 00:10:18,560 +around $109.77. So that means extra one square + +129 +00:10:18,560 --> 00:10:24,040 +feet for the size of the house, it cost you around + +130 +00:10:24,040 --> 00:10:30,960 +$100 or $110. So that's the meaning of B1 and the + +131 +00:10:30,960 --> 00:10:35,060 +sign actually of the slope. In addition to that, + +132 +00:10:35,140 --> 00:10:39,340 +we can make some predictions about house price for + +133 +00:10:39,340 --> 00:10:42,900 +any given value of the size of the house. That + +134 +00:10:42,900 --> 00:10:46,940 +means if you know that the house size equals 2,000 + +135 +00:10:46,940 --> 00:10:50,580 +square feet. So just plug this value here and + +136 +00:10:50,580 --> 00:10:54,100 +simple calculation will give the predicted value + +137 +00:10:54,100 --> 00:10:58,230 +of the ceiling price of a house. That's the whole + +138 +00:10:58,230 --> 00:11:03,950 +story for the simple linear regression. In other + +139 +00:11:03,950 --> 00:11:08,030 +words, we have this equation, so the + +140 +00:11:08,030 --> 00:11:12,690 +interpretation of B0 again. B0 is the estimated + +141 +00:11:12,690 --> 00:11:16,110 +mean value of Y when the value of X is 0. That + +142 +00:11:16,110 --> 00:11:20,700 +means if X is 0, in this range of the observed X + +143 +00:11:20,700 --> 00:11:24,540 +-values. That's the meaning of the B0. But again, + +144 +00:11:24,820 --> 00:11:27,700 +because a house cannot have a square footage of + +145 +00:11:27,700 --> 00:11:31,680 +zero, so B0 has no practical application. + +146 +00:11:34,740 --> 00:11:38,760 +On the other hand, the interpretation for B1, B1 + +147 +00:11:38,760 --> 00:11:43,920 +equals 0.10977, that means B1 again estimates the + +148 +00:11:43,920 --> 00:11:46,880 +change in the mean value of Y as a result of one + +149 +00:11:46,880 --> 00:11:51,160 +unit increase in X. In other words, since B1 + +150 +00:11:51,160 --> 00:11:55,680 +equals 0.10977, that tells us that the mean value + +151 +00:11:55,680 --> 00:12:02,030 +of a house Increases by this amount, multiplied by + +152 +00:12:02,030 --> 00:12:05,730 +1,000 on average for each additional one square + +153 +00:12:05,730 --> 00:12:09,690 +foot of size. So that's the exact interpretation + +154 +00:12:09,690 --> 00:12:14,630 +about P0 and P1. For the prediction, as I + +155 +00:12:14,630 --> 00:12:18,430 +mentioned, since we have this equation, and our + +156 +00:12:18,430 --> 00:12:21,530 +goal is to predict the price for a house with 2 + +157 +00:12:21,530 --> 00:12:25,450 +,000 square feet, just plug this value here. + +158 +00:12:26,450 --> 00:12:31,130 +Multiply this value by 0.1098, then add the result + +159 +00:12:31,130 --> 00:12:37,750 +to 98.25 will give 317.85. This value should be + +160 +00:12:37,750 --> 00:12:41,590 +multiplied by 1000, so the predicted price for a + +161 +00:12:41,590 --> 00:12:49,050 +house with 2000 square feet is around 317,850 + +162 +00:12:49,050 --> 00:12:54,910 +dollars. That's for making the prediction for + +163 +00:12:54,910 --> 00:13:02,050 +selling a price. The last section in chapter 12 + +164 +00:13:02,050 --> 00:13:07,550 +talks about coefficient of determination R + +165 +00:13:07,550 --> 00:13:11,550 +squared. The definition for the coefficient of + +166 +00:13:11,550 --> 00:13:16,190 +determination is the portion of the total + +167 +00:13:16,190 --> 00:13:19,330 +variation in the dependent variable that is + +168 +00:13:19,330 --> 00:13:21,730 +explained by the variation in the independent + +169 +00:13:21,730 --> 00:13:25,130 +variable. Since we have two variables X and Y. + +170 +00:13:29,510 --> 00:13:34,490 +And the question is, what's the portion of the + +171 +00:13:34,490 --> 00:13:39,530 +total variation that can be explained by X? So the + +172 +00:13:39,530 --> 00:13:42,030 +question is, what's the portion of the total + +173 +00:13:42,030 --> 00:13:46,070 +variation in Y that is explained already by the + +174 +00:13:46,070 --> 00:13:54,450 +variation in X? For example, suppose R² is 90%, 0 + +175 +00:13:54,450 --> 00:13:59,770 +.90. That means 90% in the variation of the + +176 +00:13:59,770 --> 00:14:05,700 +selling price is explained by its size. That means + +177 +00:14:05,700 --> 00:14:12,580 +the size of the house contributes about 90% to + +178 +00:14:12,580 --> 00:14:17,700 +explain the variability of the selling price. So + +179 +00:14:17,700 --> 00:14:20,460 +we would like to have R squared to be large + +180 +00:14:20,460 --> 00:14:26,620 +enough. Now, R squared for simple regression only + +181 +00:14:26,620 --> 00:14:30,200 +is given by this equation, correlation between X + +182 +00:14:30,200 --> 00:14:31,100 +and Y squared. + +183 +00:14:34,090 --> 00:14:36,510 +So if we have the correlation between X and Y and + +184 +00:14:36,510 --> 00:14:40,070 +then you just square this value, that will give + +185 +00:14:40,070 --> 00:14:42,370 +the correlation or the coefficient of + +186 +00:14:42,370 --> 00:14:45,730 +determination. So simply, determination + +187 +00:14:45,730 --> 00:14:49,510 +coefficient is just the square of the correlation + +188 +00:14:49,510 --> 00:14:54,430 +between X and Y. We know that R ranges between + +189 +00:14:54,430 --> 00:14:55,670 +minus 1 and plus 1. + +190 +00:14:59,150 --> 00:15:05,590 +So R squared should be ranges between 0 and 1, + +191 +00:15:06,050 --> 00:15:09,830 +because minus sign will be cancelled since we are + +192 +00:15:09,830 --> 00:15:12,770 +squaring these values, so r squared is always + +193 +00:15:12,770 --> 00:15:17,690 +between 0 and 1. So again, r squared is used to + +194 +00:15:17,690 --> 00:15:22,430 +explain the portion of the total variability in + +195 +00:15:22,430 --> 00:15:24,950 +the dependent variable that is already explained + +196 +00:15:24,950 --> 00:15:31,310 +by the variability in x. For example, Sometimes R + +197 +00:15:31,310 --> 00:15:36,590 +squared is one. R squared is one only happens if R + +198 +00:15:36,590 --> 00:15:41,190 +is one or negative one. So if there exists perfect + +199 +00:15:41,190 --> 00:15:45,490 +relationship either negative or positive, I mean + +200 +00:15:45,490 --> 00:15:49,890 +if R is plus one or negative one, then R squared + +201 +00:15:49,890 --> 00:15:55,130 +is one. That means perfect linear relationship + +202 +00:15:55,130 --> 00:16:01,020 +between Y and X. Now the value. of 1 for R squared + +203 +00:16:01,020 --> 00:16:07,040 +means that 100% of the variation Y is explained by + +204 +00:16:07,040 --> 00:16:11,460 +variation X. And that's really never happened in + +205 +00:16:11,460 --> 00:16:15,720 +real life. Because R equals 1 or plus 1 or + +206 +00:16:15,720 --> 00:16:21,140 +negative 1 cannot be happened in real life. So R + +207 +00:16:21,140 --> 00:16:25,180 +squared always ranges between 0 and 1, never + +208 +00:16:25,180 --> 00:16:29,500 +equals 1, because if R squared is 1, that means + +209 +00:16:29,500 --> 00:16:33,440 +all the variation in Y is explained by the + +210 +00:16:33,440 --> 00:16:38,220 +variation in X. But for sure there is an error, + +211 +00:16:38,820 --> 00:16:41,700 +and that error may be due to some variables that + +212 +00:16:41,700 --> 00:16:45,540 +are not included in the regression model. Maybe + +213 +00:16:45,540 --> 00:16:50,870 +there is Random error in the selection, maybe the + +214 +00:16:50,870 --> 00:16:53,210 +sample size is not large enough in order to + +215 +00:16:53,210 --> 00:16:55,770 +determine the total variation in the dependent + +216 +00:16:55,770 --> 00:16:58,990 +variable. So it makes sense that R squared will be + +217 +00:16:58,990 --> 00:17:04,450 +less than 100. So generally speaking, R squared + +218 +00:17:04,450 --> 00:17:09,870 +always between 0 and 1. Weaker linear relationship + +219 +00:17:09,870 --> 00:17:15,690 +between X and Y, it means R squared is not 1. So + +220 +00:17:15,690 --> 00:17:20,070 +R², since it lies between 0 and 1, it means sum, + +221 +00:17:21,070 --> 00:17:24,830 +but not all the variation of Y is explained by the + +222 +00:17:24,830 --> 00:17:28,410 +variation X. Because as mentioned before, if R + +223 +00:17:28,410 --> 00:17:32,510 +squared is 90%, it means some, not all, the + +224 +00:17:32,510 --> 00:17:35,830 +variation Y is explained by the variation X. And + +225 +00:17:35,830 --> 00:17:38,590 +the remaining percent in this case, which is 10%, + +226 +00:17:38,590 --> 00:17:42,790 +this one due to, as I mentioned, maybe there + +227 +00:17:42,790 --> 00:17:46,490 +exists some other variables that affect the + +228 +00:17:46,490 --> 00:17:52,020 +selling price besides its size, maybe location. of + +229 +00:17:52,020 --> 00:17:57,900 +the house affects its selling price. So R squared + +230 +00:17:57,900 --> 00:18:02,640 +is always between 0 and 1, it's always positive. R + +231 +00:18:02,640 --> 00:18:07,180 +squared equals 0, that only happens if there is no + +232 +00:18:07,180 --> 00:18:12,620 +linear relationship between Y and X. Since R is 0, + +233 +00:18:13,060 --> 00:18:17,240 +then R squared equals 0. That means the value of Y + +234 +00:18:17,240 --> 00:18:20,870 +does not depend on X. Because here, as X + +235 +00:18:20,870 --> 00:18:26,830 +increases, Y stays nearly in the same position. It + +236 +00:18:26,830 --> 00:18:30,190 +means as X increases, Y stays the same, constant. + +237 +00:18:31,010 --> 00:18:33,730 +So that means there is no relationship or actually + +238 +00:18:33,730 --> 00:18:37,010 +there is no linear relationship because it could + +239 +00:18:37,010 --> 00:18:40,710 +be there exists non-linear relationship. But here + +240 +00:18:40,710 --> 00:18:44,980 +we are. Just focusing on linear relationship + +241 +00:18:44,980 --> 00:18:50,020 +between X and Y. So if R is zero, that means the + +242 +00:18:50,020 --> 00:18:52,400 +value of Y does not depend on the value of X. So + +243 +00:18:52,400 --> 00:18:58,360 +as X increases, Y is constant. Now for the + +244 +00:18:58,360 --> 00:19:03,620 +previous example, R was 0.7621. To determine the + +245 +00:19:03,620 --> 00:19:06,760 +coefficient of determination, One more time, + +246 +00:19:07,460 --> 00:19:11,760 +square this value, that's only valid for simple + +247 +00:19:11,760 --> 00:19:14,980 +linear regression. Otherwise, you cannot square + +248 +00:19:14,980 --> 00:19:17,580 +the value of R in order to determine the + +249 +00:19:17,580 --> 00:19:20,820 +coefficient of determination. So again, this is + +250 +00:19:20,820 --> 00:19:26,420 +only true for + +251 +00:19:26,420 --> 00:19:29,980 +simple linear regression. + +252 +00:19:35,460 --> 00:19:41,320 +So R squared is 0.7621 squared will give 0.5808. + +253 +00:19:42,240 --> 00:19:46,120 +Now, the meaning of this value, first you have to + +254 +00:19:46,120 --> 00:19:53,280 +multiply this by 100. So 58.08% of the variation + +255 +00:19:53,280 --> 00:19:57,440 +in house prices is explained by the variation in + +256 +00:19:57,440 --> 00:20:05,190 +square feet. So 58, around 0.08% of the variation + +257 +00:20:05,190 --> 00:20:12,450 +in size of the house, I'm sorry, in the price is + +258 +00:20:12,450 --> 00:20:16,510 +explained by + +259 +00:20:16,510 --> 00:20:25,420 +its size. So size by itself. Size only explains + +260 +00:20:25,420 --> 00:20:30,320 +around 50-80% of the selling price of a house. Now + +261 +00:20:30,320 --> 00:20:35,000 +the remaining percent which is around, this is the + +262 +00:20:35,000 --> 00:20:38,860 +error, or the remaining percent, this one is due + +263 +00:20:38,860 --> 00:20:50,040 +to other variables, other independent variables. + +264 +00:20:51,200 --> 00:20:53,820 +That might affect the change of price. + +265 +00:21:04,840 --> 00:21:11,160 +But since the size of the house explains 58%, that + +266 +00:21:11,160 --> 00:21:15,660 +means it's a significant variable. Now, if we add + +267 +00:21:15,660 --> 00:21:19,250 +more variables, to the regression equation for + +268 +00:21:19,250 --> 00:21:23,950 +sure this value will be increased. So maybe 60 or + +269 +00:21:23,950 --> 00:21:28,510 +65 or 67 and so on. But 60% or 50 is more enough + +270 +00:21:28,510 --> 00:21:31,870 +sometimes. But R squared, as R squared increases, + +271 +00:21:32,090 --> 00:21:35,530 +it means we have good fit of the model. That means + +272 +00:21:35,530 --> 00:21:41,230 +the model is accurate to determine or to make some + +273 +00:21:41,230 --> 00:21:46,430 +prediction. So that's for the coefficient of + +274 +00:21:46,430 --> 00:21:58,350 +determination. Any question? So we covered simple + +275 +00:21:58,350 --> 00:22:01,790 +linear regression model. We know now how can we + +276 +00:22:01,790 --> 00:22:06,390 +compute the values of B0 and B1. We can state or + +277 +00:22:06,390 --> 00:22:10,550 +write the regression equation, and we can do some + +278 +00:22:10,550 --> 00:22:14,370 +interpretation about P0 and P1, making + +279 +00:22:14,370 --> 00:22:21,530 +predictions, and make some comments about the + +280 +00:22:21,530 --> 00:22:27,390 +coefficient of determination. That's all. So I'm + +281 +00:22:27,390 --> 00:22:31,910 +going to stop now, and I will give some time to + +282 +00:22:31,910 --> 00:22:33,030 +discuss some practice. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/EuXnSmrCFuE_raw.json b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/EuXnSmrCFuE_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..7becec2336a502822732425f44d14d6c26e82115 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/EuXnSmrCFuE_raw.json @@ -0,0 +1 @@ +{"segments": [{"id": 1, "seek": 2828, "start": 6.48, "end": 28.28, "text": " Last time, I mean Tuesday, we discussed box plot and we introduced how can we use box plot to determine if any point is suspected to be an outlier by using the lower limit and upper limit.", "tokens": [5264, 565, 11, 286, 914, 10017, 11, 321, 7152, 2424, 7542, 293, 321, 7268, 577, 393, 321, 764, 2424, 7542, 281, 6997, 498, 604, 935, 307, 26439, 281, 312, 364, 484, 2753, 538, 1228, 264, 3126, 4948, 293, 6597, 4948, 13], "avg_logprob": -0.20684524448145003, "compression_ratio": 1.4104477611940298, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 6.48, "end": 7.04, "word": " Last", "probability": 0.6337890625}, {"start": 7.04, "end": 7.4, "word": " time,", "probability": 0.87939453125}, {"start": 7.6, "end": 7.64, "word": " I", "probability": 0.640625}, {"start": 7.64, "end": 7.8, "word": " mean", "probability": 0.962890625}, {"start": 7.8, "end": 8.36, "word": " Tuesday,", "probability": 0.5576171875}, {"start": 9.36, "end": 10.08, "word": " we", "probability": 0.955078125}, {"start": 10.08, "end": 10.86, "word": " discussed", "probability": 0.87451171875}, {"start": 10.86, "end": 11.86, "word": " box", "probability": 0.1781005859375}, {"start": 11.86, "end": 12.1, "word": " plot", "probability": 0.81005859375}, {"start": 12.1, "end": 13.66, "word": " and", "probability": 0.51708984375}, {"start": 13.66, "end": 14.04, "word": " we", "probability": 0.82373046875}, {"start": 14.04, "end": 15.24, "word": " introduced", "probability": 0.7802734375}, {"start": 15.24, "end": 16.12, "word": " how", "probability": 0.88916015625}, {"start": 16.12, "end": 16.38, "word": " can", "probability": 0.91552734375}, {"start": 16.38, "end": 16.66, "word": " we", "probability": 0.94677734375}, {"start": 16.66, "end": 17.72, "word": " use", "probability": 0.86962890625}, {"start": 17.72, "end": 18.0, "word": " box", "probability": 0.9287109375}, {"start": 18.0, "end": 18.42, "word": " plot", "probability": 0.97509765625}, {"start": 18.42, "end": 19.54, "word": " to", "probability": 0.9453125}, {"start": 19.54, "end": 20.02, "word": " determine", "probability": 0.92138671875}, {"start": 20.02, "end": 20.74, "word": " if", "probability": 0.94580078125}, {"start": 20.74, "end": 21.64, "word": " any", "probability": 0.8896484375}, {"start": 21.64, "end": 22.08, "word": " point", "probability": 0.97900390625}, {"start": 22.08, "end": 23.18, "word": " is", "probability": 0.9384765625}, {"start": 23.18, "end": 23.64, "word": " suspected", "probability": 0.8779296875}, {"start": 23.64, "end": 23.88, "word": " to", "probability": 0.97216796875}, {"start": 23.88, "end": 24.02, "word": " be", "probability": 0.9541015625}, {"start": 24.02, "end": 24.16, "word": " an", "probability": 0.9365234375}, {"start": 24.16, "end": 24.56, "word": " outlier", "probability": 0.950439453125}, {"start": 24.56, "end": 26.28, "word": " by", "probability": 0.30517578125}, {"start": 26.28, "end": 26.68, "word": " using", "probability": 0.92822265625}, {"start": 26.68, "end": 26.9, "word": " the", "probability": 0.88623046875}, {"start": 26.9, "end": 27.08, "word": " lower", "probability": 0.8515625}, {"start": 27.08, "end": 27.44, "word": " limit", "probability": 0.92529296875}, {"start": 27.44, "end": 27.72, "word": " and", "probability": 0.94091796875}, {"start": 27.72, "end": 27.98, "word": " upper", "probability": 0.8291015625}, {"start": 27.98, "end": 28.28, "word": " limit.", "probability": 0.83056640625}], "temperature": 1.0}, {"id": 2, "seek": 5308, "start": 29.46, "end": 53.08, "text": " And we mentioned last time that if any point is below the lower limit or is above the upper limit, that point is considered to be an outlier. So that's one of the usage of the backsplat. I mean, for this specific example, we mentioned last time 27 is an outlier.", "tokens": [400, 321, 2835, 1036, 565, 300, 498, 604, 935, 307, 2507, 264, 3126, 4948, 420, 307, 3673, 264, 6597, 4948, 11, 300, 935, 307, 4888, 281, 312, 364, 484, 2753, 13, 407, 300, 311, 472, 295, 264, 14924, 295, 264, 646, 46535, 267, 13, 286, 914, 11, 337, 341, 2685, 1365, 11, 321, 2835, 1036, 565, 7634, 307, 364, 484, 2753, 13], "avg_logprob": -0.20275297713658166, "compression_ratio": 1.623456790123457, "no_speech_prob": 0.0, "words": [{"start": 29.46, "end": 29.78, "word": " And", "probability": 0.77880859375}, {"start": 29.78, "end": 29.94, "word": " we", "probability": 0.93994140625}, {"start": 29.94, "end": 30.3, "word": " mentioned", "probability": 0.82958984375}, {"start": 30.3, "end": 30.82, "word": " last", "probability": 0.82958984375}, {"start": 30.82, "end": 31.02, "word": " time", "probability": 0.88720703125}, {"start": 31.02, "end": 31.3, "word": " that", "probability": 0.8876953125}, {"start": 31.3, "end": 31.6, "word": " if", "probability": 0.88134765625}, {"start": 31.6, "end": 31.94, "word": " any", "probability": 0.8974609375}, {"start": 31.94, "end": 32.26, "word": " point", "probability": 0.9716796875}, {"start": 32.26, "end": 32.98, "word": " is", "probability": 0.93896484375}, {"start": 32.98, "end": 33.3, "word": " below", "probability": 0.904296875}, {"start": 33.3, "end": 33.56, "word": " the", "probability": 0.916015625}, {"start": 33.56, "end": 33.88, "word": " lower", "probability": 0.859375}, {"start": 33.88, "end": 34.4, "word": " limit", "probability": 0.9638671875}, {"start": 34.4, "end": 35.3, "word": " or", "probability": 0.50439453125}, {"start": 35.3, "end": 37.18, "word": " is", "probability": 0.82763671875}, {"start": 37.18, "end": 37.6, "word": " above", "probability": 0.966796875}, {"start": 37.6, "end": 37.78, "word": " the", "probability": 0.87548828125}, {"start": 37.78, "end": 38.02, "word": " upper", "probability": 0.79638671875}, {"start": 38.02, "end": 38.58, "word": " limit,", "probability": 0.95849609375}, {"start": 39.2, "end": 39.54, "word": " that", "probability": 0.8876953125}, {"start": 39.54, "end": 39.88, "word": " point", "probability": 0.9462890625}, {"start": 39.88, "end": 40.12, "word": " is", "probability": 0.9208984375}, {"start": 40.12, "end": 40.46, "word": " considered", "probability": 0.80810546875}, {"start": 40.46, "end": 40.72, "word": " to", "probability": 0.9658203125}, {"start": 40.72, "end": 41.0, "word": " be", "probability": 0.953125}, {"start": 41.0, "end": 41.54, "word": " an", "probability": 0.92041015625}, {"start": 41.54, "end": 41.96, "word": " outlier.", "probability": 0.96240234375}, {"start": 42.86, "end": 43.0, "word": " So", "probability": 0.2032470703125}, {"start": 43.0, "end": 43.28, "word": " that's", "probability": 0.794921875}, {"start": 43.28, "end": 43.38, "word": " one", "probability": 0.88720703125}, {"start": 43.38, "end": 43.5, "word": " of", "probability": 0.94384765625}, {"start": 43.5, "end": 43.8, "word": " the", "probability": 0.9169921875}, {"start": 43.8, "end": 44.56, "word": " usage", "probability": 0.759765625}, {"start": 44.56, "end": 44.92, "word": " of", "probability": 0.970703125}, {"start": 44.92, "end": 45.2, "word": " the", "probability": 0.30322265625}, {"start": 45.2, "end": 46.1, "word": " backsplat.", "probability": 0.4542643229166667}, {"start": 46.6, "end": 46.84, "word": " I", "probability": 0.90185546875}, {"start": 46.84, "end": 46.94, "word": " mean,", "probability": 0.95751953125}, {"start": 47.0, "end": 47.1, "word": " for", "probability": 0.94775390625}, {"start": 47.1, "end": 47.32, "word": " this", "probability": 0.94580078125}, {"start": 47.32, "end": 47.88, "word": " specific", "probability": 0.8984375}, {"start": 47.88, "end": 48.34, "word": " example,", "probability": 0.9736328125}, {"start": 49.78, "end": 50.6, "word": " we", "probability": 0.9501953125}, {"start": 50.6, "end": 50.88, "word": " mentioned", "probability": 0.82568359375}, {"start": 50.88, "end": 51.14, "word": " last", "probability": 0.8623046875}, {"start": 51.14, "end": 51.36, "word": " time", "probability": 0.88671875}, {"start": 51.36, "end": 51.72, "word": " 27", "probability": 0.556640625}, {"start": 51.72, "end": 52.56, "word": " is", "probability": 0.9404296875}, {"start": 52.56, "end": 52.74, "word": " an", "probability": 0.96142578125}, {"start": 52.74, "end": 53.08, "word": " outlier.", "probability": 0.97265625}], "temperature": 1.0}, {"id": 3, "seek": 7695, "start": 54.37, "end": 76.95, "text": " And also here you can tell also the data are right skewed because the right tail exactly is much longer than the left tail. I mean the distance between or from the median and the maximum value is bigger or larger than the distance from the median to the smallest value.", "tokens": [400, 611, 510, 291, 393, 980, 611, 264, 1412, 366, 558, 8756, 26896, 570, 264, 558, 6838, 2293, 307, 709, 2854, 813, 264, 1411, 6838, 13, 286, 914, 264, 4560, 1296, 420, 490, 264, 26779, 293, 264, 6674, 2158, 307, 3801, 420, 4833, 813, 264, 4560, 490, 264, 26779, 281, 264, 16998, 2158, 13], "avg_logprob": -0.15965909632769498, "compression_ratio": 1.6981132075471699, "no_speech_prob": 0.0, "words": [{"start": 54.37, "end": 54.71, "word": " And", "probability": 0.75}, {"start": 54.71, "end": 55.03, "word": " also", "probability": 0.74169921875}, {"start": 55.03, "end": 55.35, "word": " here", "probability": 0.7890625}, {"start": 55.35, "end": 56.11, "word": " you", "probability": 0.412353515625}, {"start": 56.11, "end": 56.35, "word": " can", "probability": 0.9482421875}, {"start": 56.35, "end": 56.53, "word": " tell", "probability": 0.8671875}, {"start": 56.53, "end": 56.91, "word": " also", "probability": 0.8056640625}, {"start": 56.91, "end": 57.31, "word": " the", "probability": 0.7236328125}, {"start": 57.31, "end": 57.71, "word": " data", "probability": 0.90234375}, {"start": 57.71, "end": 58.19, "word": " are", "probability": 0.9072265625}, {"start": 58.19, "end": 58.45, "word": " right", "probability": 0.884765625}, {"start": 58.45, "end": 59.03, "word": " skewed", "probability": 0.93994140625}, {"start": 59.03, "end": 59.77, "word": " because", "probability": 0.60546875}, {"start": 59.77, "end": 61.13, "word": " the", "probability": 0.48828125}, {"start": 61.13, "end": 62.41, "word": " right", "probability": 0.64111328125}, {"start": 62.41, "end": 62.77, "word": " tail", "probability": 0.7841796875}, {"start": 62.77, "end": 63.27, "word": " exactly", "probability": 0.58740234375}, {"start": 63.27, "end": 63.95, "word": " is", "probability": 0.90283203125}, {"start": 63.95, "end": 64.29, "word": " much", "probability": 0.9150390625}, {"start": 64.29, "end": 64.77, "word": " longer", "probability": 0.93896484375}, {"start": 64.77, "end": 65.13, "word": " than", "probability": 0.94189453125}, {"start": 65.13, "end": 65.33, "word": " the", "probability": 0.91162109375}, {"start": 65.33, "end": 65.53, "word": " left", "probability": 0.94580078125}, {"start": 65.53, "end": 65.75, "word": " tail.", "probability": 0.86279296875}, {"start": 65.89, "end": 65.91, "word": " I", "probability": 0.99072265625}, {"start": 65.91, "end": 66.09, "word": " mean", "probability": 0.96630859375}, {"start": 66.09, "end": 66.29, "word": " the", "probability": 0.77490234375}, {"start": 66.29, "end": 66.69, "word": " distance", "probability": 0.943359375}, {"start": 66.69, "end": 67.13, "word": " between", "probability": 0.81591796875}, {"start": 67.13, "end": 67.91, "word": " or", "probability": 0.8525390625}, {"start": 67.91, "end": 68.31, "word": " from", "probability": 0.892578125}, {"start": 68.31, "end": 68.99, "word": " the", "probability": 0.92236328125}, {"start": 68.99, "end": 69.31, "word": " median", "probability": 0.9609375}, {"start": 69.31, "end": 70.23, "word": " and", "probability": 0.84619140625}, {"start": 70.23, "end": 70.45, "word": " the", "probability": 0.8935546875}, {"start": 70.45, "end": 70.81, "word": " maximum", "probability": 0.92041015625}, {"start": 70.81, "end": 71.37, "word": " value", "probability": 0.97509765625}, {"start": 71.37, "end": 72.37, "word": " is", "probability": 0.72021484375}, {"start": 72.37, "end": 72.81, "word": " bigger", "probability": 0.9228515625}, {"start": 72.81, "end": 73.27, "word": " or", "probability": 0.89208984375}, {"start": 73.27, "end": 73.65, "word": " larger", "probability": 0.9384765625}, {"start": 73.65, "end": 73.95, "word": " than", "probability": 0.93505859375}, {"start": 73.95, "end": 74.15, "word": " the", "probability": 0.91796875}, {"start": 74.15, "end": 74.51, "word": " distance", "probability": 0.94189453125}, {"start": 74.51, "end": 74.89, "word": " from", "probability": 0.87939453125}, {"start": 74.89, "end": 75.57, "word": " the", "probability": 0.91796875}, {"start": 75.57, "end": 75.87, "word": " median", "probability": 0.9736328125}, {"start": 75.87, "end": 76.13, "word": " to", "probability": 0.9619140625}, {"start": 76.13, "end": 76.29, "word": " the", "probability": 0.91259765625}, {"start": 76.29, "end": 76.63, "word": " smallest", "probability": 0.9306640625}, {"start": 76.63, "end": 76.95, "word": " value.", "probability": 0.865234375}], "temperature": 1.0}, {"id": 4, "seek": 9225, "start": 77.45, "end": 92.25, "text": " That means the data is not symmetric, it's quite skewed to the right. In this case, you cannot use the mean or the range as a measure of spread and median and, I'm sorry, mean as a measure of tendency.", "tokens": [663, 1355, 264, 1412, 307, 406, 32330, 11, 309, 311, 1596, 8756, 26896, 281, 264, 558, 13, 682, 341, 1389, 11, 291, 2644, 764, 264, 914, 420, 264, 3613, 382, 257, 3481, 295, 3974, 293, 26779, 293, 11, 286, 478, 2597, 11, 914, 382, 257, 3481, 295, 18187, 13], "avg_logprob": -0.18484374582767488, "compression_ratio": 1.4852941176470589, "no_speech_prob": 0.0, "words": [{"start": 77.45, "end": 77.77, "word": " That", "probability": 0.57177734375}, {"start": 77.77, "end": 78.01, "word": " means", "probability": 0.92236328125}, {"start": 78.01, "end": 78.17, "word": " the", "probability": 0.841796875}, {"start": 78.17, "end": 78.47, "word": " data", "probability": 0.9482421875}, {"start": 78.47, "end": 78.77, "word": " is", "probability": 0.94384765625}, {"start": 78.77, "end": 79.01, "word": " not", "probability": 0.94580078125}, {"start": 79.01, "end": 79.49, "word": " symmetric,", "probability": 0.86083984375}, {"start": 80.03, "end": 80.37, "word": " it's", "probability": 0.915771484375}, {"start": 80.37, "end": 80.87, "word": " quite", "probability": 0.9267578125}, {"start": 80.87, "end": 81.33, "word": " skewed", "probability": 0.96875}, {"start": 81.33, "end": 81.47, "word": " to", "probability": 0.9580078125}, {"start": 81.47, "end": 81.59, "word": " the", "probability": 0.91943359375}, {"start": 81.59, "end": 81.83, "word": " right.", "probability": 0.9208984375}, {"start": 82.43, "end": 82.69, "word": " In", "probability": 0.94873046875}, {"start": 82.69, "end": 82.89, "word": " this", "probability": 0.9462890625}, {"start": 82.89, "end": 83.15, "word": " case,", "probability": 0.91259765625}, {"start": 83.23, "end": 83.33, "word": " you", "probability": 0.94970703125}, {"start": 83.33, "end": 83.57, "word": " cannot", "probability": 0.86474609375}, {"start": 83.57, "end": 84.05, "word": " use", "probability": 0.87890625}, {"start": 84.05, "end": 84.57, "word": " the", "probability": 0.92041015625}, {"start": 84.57, "end": 84.79, "word": " mean", "probability": 0.96142578125}, {"start": 84.79, "end": 86.67, "word": " or", "probability": 0.85107421875}, {"start": 86.67, "end": 86.99, "word": " the", "probability": 0.9130859375}, {"start": 86.99, "end": 87.31, "word": " range", "probability": 0.86767578125}, {"start": 87.31, "end": 87.63, "word": " as", "probability": 0.947265625}, {"start": 87.63, "end": 87.79, "word": " a", "probability": 0.372802734375}, {"start": 87.79, "end": 87.97, "word": " measure", "probability": 0.8828125}, {"start": 87.97, "end": 88.21, "word": " of", "probability": 0.96240234375}, {"start": 88.21, "end": 88.65, "word": " spread", "probability": 0.701171875}, {"start": 88.65, "end": 89.37, "word": " and", "probability": 0.80322265625}, {"start": 89.37, "end": 89.77, "word": " median", "probability": 0.908203125}, {"start": 89.77, "end": 90.17, "word": " and,", "probability": 0.5302734375}, {"start": 90.21, "end": 90.35, "word": " I'm", "probability": 0.943603515625}, {"start": 90.35, "end": 90.49, "word": " sorry,", "probability": 0.85107421875}, {"start": 90.59, "end": 90.83, "word": " mean", "probability": 0.81640625}, {"start": 90.83, "end": 91.09, "word": " as", "probability": 0.923828125}, {"start": 91.09, "end": 91.25, "word": " a", "probability": 0.5634765625}, {"start": 91.25, "end": 91.47, "word": " measure", "probability": 0.86962890625}, {"start": 91.47, "end": 91.73, "word": " of", "probability": 0.96044921875}, {"start": 91.73, "end": 92.25, "word": " tendency.", "probability": 0.9287109375}], "temperature": 1.0}, {"id": 5, "seek": 11333, "start": 93.69, "end": 113.33, "text": " Because these measures are affected by outcomes. In this case, you have to use the median instead of the mean and IQR instead of the range because IQR is the mid-spread of the data because we just take the range from Q3 to Q1. That means we ignore", "tokens": [1436, 613, 8000, 366, 8028, 538, 10070, 13, 682, 341, 1389, 11, 291, 362, 281, 764, 264, 26779, 2602, 295, 264, 914, 293, 28921, 49, 2602, 295, 264, 3613, 570, 28921, 49, 307, 264, 2062, 12, 4952, 2538, 295, 264, 1412, 570, 321, 445, 747, 264, 3613, 490, 1249, 18, 281, 1249, 16, 13, 663, 1355, 321, 11200], "avg_logprob": -0.21173199455616837, "compression_ratio": 1.5897435897435896, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 93.69, "end": 94.27, "word": " Because", "probability": 0.489013671875}, {"start": 94.27, "end": 94.57, "word": " these", "probability": 0.765625}, {"start": 94.57, "end": 94.99, "word": " measures", "probability": 0.77587890625}, {"start": 94.99, "end": 95.31, "word": " are", "probability": 0.93359375}, {"start": 95.31, "end": 95.65, "word": " affected", "probability": 0.7861328125}, {"start": 95.65, "end": 96.13, "word": " by", "probability": 0.97021484375}, {"start": 96.13, "end": 96.79, "word": " outcomes.", "probability": 0.10870361328125}, {"start": 97.17, "end": 97.25, "word": " In", "probability": 0.9150390625}, {"start": 97.25, "end": 97.45, "word": " this", "probability": 0.9443359375}, {"start": 97.45, "end": 97.69, "word": " case,", "probability": 0.916015625}, {"start": 97.79, "end": 97.83, "word": " you", "probability": 0.92626953125}, {"start": 97.83, "end": 97.99, "word": " have", "probability": 0.9404296875}, {"start": 97.99, "end": 98.09, "word": " to", "probability": 0.9677734375}, {"start": 98.09, "end": 98.41, "word": " use", "probability": 0.8818359375}, {"start": 98.41, "end": 99.13, "word": " the", "probability": 0.87548828125}, {"start": 99.13, "end": 99.45, "word": " median", "probability": 0.94287109375}, {"start": 99.45, "end": 99.91, "word": " instead", "probability": 0.83837890625}, {"start": 99.91, "end": 100.05, "word": " of", "probability": 0.96923828125}, {"start": 100.05, "end": 100.19, "word": " the", "probability": 0.92333984375}, {"start": 100.19, "end": 100.41, "word": " mean", "probability": 0.9755859375}, {"start": 100.41, "end": 100.97, "word": " and", "probability": 0.5654296875}, {"start": 100.97, "end": 102.19, "word": " IQR", "probability": 0.9736328125}, {"start": 102.19, "end": 103.01, "word": " instead", "probability": 0.79541015625}, {"start": 103.01, "end": 103.39, "word": " of", "probability": 0.96435546875}, {"start": 103.39, "end": 103.51, "word": " the", "probability": 0.9033203125}, {"start": 103.51, "end": 103.69, "word": " range", "probability": 0.7890625}, {"start": 103.69, "end": 104.07, "word": " because", "probability": 0.4013671875}, {"start": 104.07, "end": 105.17, "word": " IQR", "probability": 0.96826171875}, {"start": 105.17, "end": 105.41, "word": " is", "probability": 0.921875}, {"start": 105.41, "end": 105.75, "word": " the", "probability": 0.91650390625}, {"start": 105.75, "end": 106.43, "word": " mid", "probability": 0.81982421875}, {"start": 106.43, "end": 106.99, "word": "-spread", "probability": 0.7516276041666666}, {"start": 106.99, "end": 107.25, "word": " of", "probability": 0.95361328125}, {"start": 107.25, "end": 107.39, "word": " the", "probability": 0.916015625}, {"start": 107.39, "end": 107.67, "word": " data", "probability": 0.9306640625}, {"start": 107.67, "end": 108.09, "word": " because", "probability": 0.482177734375}, {"start": 108.09, "end": 108.31, "word": " we", "probability": 0.94384765625}, {"start": 108.31, "end": 109.65, "word": " just", "probability": 0.8818359375}, {"start": 109.65, "end": 109.93, "word": " take", "probability": 0.83740234375}, {"start": 109.93, "end": 110.13, "word": " the", "probability": 0.90478515625}, {"start": 110.13, "end": 110.47, "word": " range", "probability": 0.884765625}, {"start": 110.47, "end": 110.93, "word": " from", "probability": 0.88818359375}, {"start": 110.93, "end": 111.55, "word": " Q3", "probability": 0.96533203125}, {"start": 111.55, "end": 111.77, "word": " to", "probability": 0.9658203125}, {"start": 111.77, "end": 112.17, "word": " Q1.", "probability": 0.99755859375}, {"start": 112.27, "end": 112.47, "word": " That", "probability": 0.89306640625}, {"start": 112.47, "end": 112.79, "word": " means", "probability": 0.9345703125}, {"start": 112.79, "end": 112.97, "word": " we", "probability": 0.888671875}, {"start": 112.97, "end": 113.33, "word": " ignore", "probability": 0.8486328125}], "temperature": 1.0}, {"id": 6, "seek": 14225, "start": 113.79, "end": 142.25, "text": " The data below Q1 and data after Q3. That means IQR is not affected by outlier and it's better to use it instead of R, of the range. If the data has an outlier, it's better just to make a star or circle for the box plot because this one mentioned that that point is an outlier. Sometimes outlier is maximum value or the largest value you have.", "tokens": [440, 1412, 2507, 1249, 16, 293, 1412, 934, 1249, 18, 13, 663, 1355, 28921, 49, 307, 406, 8028, 538, 484, 2753, 293, 309, 311, 1101, 281, 764, 309, 2602, 295, 497, 11, 295, 264, 3613, 13, 759, 264, 1412, 575, 364, 484, 2753, 11, 309, 311, 1101, 445, 281, 652, 257, 3543, 420, 6329, 337, 264, 2424, 7542, 570, 341, 472, 2835, 300, 300, 935, 307, 364, 484, 2753, 13, 4803, 484, 2753, 307, 6674, 2158, 420, 264, 6443, 2158, 291, 362, 13], "avg_logprob": -0.22321428660125958, "compression_ratio": 1.5925925925925926, "no_speech_prob": 0.0, "words": [{"start": 113.79, "end": 114.11, "word": " The", "probability": 0.45556640625}, {"start": 114.11, "end": 114.31, "word": " data", "probability": 0.91455078125}, {"start": 114.31, "end": 114.71, "word": " below", "probability": 0.31494140625}, {"start": 114.71, "end": 115.45, "word": " Q1", "probability": 0.80029296875}, {"start": 115.45, "end": 115.65, "word": " and", "probability": 0.8583984375}, {"start": 115.65, "end": 115.87, "word": " data", "probability": 0.8134765625}, {"start": 115.87, "end": 116.21, "word": " after", "probability": 0.43994140625}, {"start": 116.21, "end": 117.45, "word": " Q3.", "probability": 0.962646484375}, {"start": 117.97, "end": 118.29, "word": " That", "probability": 0.857421875}, {"start": 118.29, "end": 118.61, "word": " means", "probability": 0.92724609375}, {"start": 118.61, "end": 119.21, "word": " IQR", "probability": 0.9140625}, {"start": 119.21, "end": 119.39, "word": " is", "probability": 0.93896484375}, {"start": 119.39, "end": 119.61, "word": " not", "probability": 0.94287109375}, {"start": 119.61, "end": 120.15, "word": " affected", "probability": 0.80908203125}, {"start": 120.15, "end": 120.69, "word": " by", "probability": 0.96142578125}, {"start": 120.69, "end": 121.11, "word": " outlier", "probability": 0.754638671875}, {"start": 121.11, "end": 121.31, "word": " and", "probability": 0.5048828125}, {"start": 121.31, "end": 121.37, "word": " it's", "probability": 0.61767578125}, {"start": 121.37, "end": 121.57, "word": " better", "probability": 0.9130859375}, {"start": 121.57, "end": 121.79, "word": " to", "probability": 0.94189453125}, {"start": 121.79, "end": 121.99, "word": " use", "probability": 0.87890625}, {"start": 121.99, "end": 122.23, "word": " it", "probability": 0.93115234375}, {"start": 122.23, "end": 122.89, "word": " instead", "probability": 0.80615234375}, {"start": 122.89, "end": 123.27, "word": " of", "probability": 0.96533203125}, {"start": 123.27, "end": 123.59, "word": " R,", "probability": 0.91845703125}, {"start": 123.83, "end": 124.17, "word": " of", "probability": 0.60986328125}, {"start": 124.17, "end": 124.31, "word": " the", "probability": 0.84326171875}, {"start": 124.31, "end": 124.61, "word": " range.", "probability": 0.451171875}, {"start": 127.47, "end": 128.07, "word": " If", "probability": 0.8447265625}, {"start": 128.07, "end": 128.25, "word": " the", "probability": 0.91650390625}, {"start": 128.25, "end": 128.45, "word": " data", "probability": 0.95068359375}, {"start": 128.45, "end": 128.73, "word": " has", "probability": 0.90380859375}, {"start": 128.73, "end": 128.89, "word": " an", "probability": 0.90185546875}, {"start": 128.89, "end": 129.23, "word": " outlier,", "probability": 0.95654296875}, {"start": 129.37, "end": 129.47, "word": " it's", "probability": 0.949951171875}, {"start": 129.47, "end": 129.67, "word": " better", "probability": 0.90185546875}, {"start": 129.67, "end": 130.13, "word": " just", "probability": 0.7939453125}, {"start": 130.13, "end": 130.95, "word": " to", "probability": 0.9423828125}, {"start": 130.95, "end": 131.13, "word": " make", "probability": 0.93310546875}, {"start": 131.13, "end": 131.25, "word": " a", "probability": 0.93408203125}, {"start": 131.25, "end": 131.51, "word": " star", "probability": 0.86865234375}, {"start": 131.51, "end": 131.69, "word": " or", "probability": 0.92333984375}, {"start": 131.69, "end": 132.03, "word": " circle", "probability": 0.70068359375}, {"start": 132.03, "end": 132.67, "word": " for", "probability": 0.93017578125}, {"start": 132.67, "end": 132.79, "word": " the", "probability": 0.90478515625}, {"start": 132.79, "end": 132.97, "word": " box", "probability": 0.31787109375}, {"start": 132.97, "end": 133.25, "word": " plot", "probability": 0.857421875}, {"start": 133.25, "end": 133.99, "word": " because", "probability": 0.552734375}, {"start": 133.99, "end": 134.23, "word": " this", "probability": 0.94140625}, {"start": 134.23, "end": 134.43, "word": " one", "probability": 0.92431640625}, {"start": 134.43, "end": 134.81, "word": " mentioned", "probability": 0.6552734375}, {"start": 134.81, "end": 135.17, "word": " that", "probability": 0.888671875}, {"start": 135.17, "end": 136.19, "word": " that", "probability": 0.64501953125}, {"start": 136.19, "end": 136.51, "word": " point", "probability": 0.974609375}, {"start": 136.51, "end": 136.73, "word": " is", "probability": 0.94580078125}, {"start": 136.73, "end": 136.93, "word": " an", "probability": 0.951171875}, {"start": 136.93, "end": 137.25, "word": " outlier.", "probability": 0.9609375}, {"start": 138.39, "end": 138.99, "word": " Sometimes", "probability": 0.9306640625}, {"start": 138.99, "end": 139.41, "word": " outlier", "probability": 0.8271484375}, {"start": 139.41, "end": 139.61, "word": " is", "probability": 0.8720703125}, {"start": 139.61, "end": 139.99, "word": " maximum", "probability": 0.88232421875}, {"start": 139.99, "end": 140.47, "word": " value", "probability": 0.97705078125}, {"start": 140.47, "end": 140.95, "word": " or", "probability": 0.82763671875}, {"start": 140.95, "end": 141.09, "word": " the", "probability": 0.91455078125}, {"start": 141.09, "end": 141.39, "word": " largest", "probability": 0.91943359375}, {"start": 141.39, "end": 141.83, "word": " value", "probability": 0.974609375}, {"start": 141.83, "end": 142.03, "word": " you", "probability": 0.52294921875}, {"start": 142.03, "end": 142.25, "word": " have.", "probability": 0.94677734375}], "temperature": 1.0}, {"id": 7, "seek": 17174, "start": 143.3, "end": 171.74, "text": " sometimes maybe the minimum value. So it depends on the data. For this example, 27, which was the maximum, is an outlier. But zero is not outlier in this case, because zero is above the lower limit. Let's move to the next topic, which talks about covariance and correlation. Later, we'll talk in more details about", "tokens": [2171, 1310, 264, 7285, 2158, 13, 407, 309, 5946, 322, 264, 1412, 13, 1171, 341, 1365, 11, 7634, 11, 597, 390, 264, 6674, 11, 307, 364, 484, 2753, 13, 583, 4018, 307, 406, 484, 2753, 294, 341, 1389, 11, 570, 4018, 307, 3673, 264, 3126, 4948, 13, 961, 311, 1286, 281, 264, 958, 4829, 11, 597, 6686, 466, 49851, 719, 293, 20009, 13, 11965, 11, 321, 603, 751, 294, 544, 4365, 466], "avg_logprob": -0.1477953750793248, "compression_ratio": 1.5594059405940595, "no_speech_prob": 0.0, "words": [{"start": 143.3, "end": 143.88, "word": " sometimes", "probability": 0.51708984375}, {"start": 143.88, "end": 144.16, "word": " maybe", "probability": 0.7294921875}, {"start": 144.16, "end": 144.34, "word": " the", "probability": 0.82421875}, {"start": 144.34, "end": 144.64, "word": " minimum", "probability": 0.91259765625}, {"start": 144.64, "end": 145.0, "word": " value.", "probability": 0.77587890625}, {"start": 145.52, "end": 145.86, "word": " So", "probability": 0.888671875}, {"start": 145.86, "end": 145.96, "word": " it", "probability": 0.72998046875}, {"start": 145.96, "end": 146.2, "word": " depends", "probability": 0.896484375}, {"start": 146.2, "end": 146.42, "word": " on", "probability": 0.9521484375}, {"start": 146.42, "end": 146.52, "word": " the", "probability": 0.916015625}, {"start": 146.52, "end": 146.76, "word": " data.", "probability": 0.8427734375}, {"start": 147.24, "end": 147.4, "word": " For", "probability": 0.95703125}, {"start": 147.4, "end": 147.62, "word": " this", "probability": 0.939453125}, {"start": 147.62, "end": 147.96, "word": " example,", "probability": 0.97216796875}, {"start": 148.06, "end": 148.48, "word": " 27,", "probability": 0.90576171875}, {"start": 148.72, "end": 148.92, "word": " which", "probability": 0.9482421875}, {"start": 148.92, "end": 149.18, "word": " was", "probability": 0.9501953125}, {"start": 149.18, "end": 149.34, "word": " the", "probability": 0.91650390625}, {"start": 149.34, "end": 149.74, "word": " maximum,", "probability": 0.93212890625}, {"start": 150.4, "end": 150.9, "word": " is", "probability": 0.9345703125}, {"start": 150.9, "end": 151.06, "word": " an", "probability": 0.9453125}, {"start": 151.06, "end": 151.42, "word": " outlier.", "probability": 0.9365234375}, {"start": 152.26, "end": 152.6, "word": " But", "probability": 0.92822265625}, {"start": 152.6, "end": 153.2, "word": " zero", "probability": 0.67578125}, {"start": 153.2, "end": 153.36, "word": " is", "probability": 0.7861328125}, {"start": 153.36, "end": 153.58, "word": " not", "probability": 0.94189453125}, {"start": 153.58, "end": 153.98, "word": " outlier", "probability": 0.815673828125}, {"start": 153.98, "end": 154.14, "word": " in", "probability": 0.93212890625}, {"start": 154.14, "end": 154.34, "word": " this", "probability": 0.9462890625}, {"start": 154.34, "end": 154.68, "word": " case,", "probability": 0.91015625}, {"start": 154.98, "end": 155.3, "word": " because", "probability": 0.89404296875}, {"start": 155.3, "end": 155.68, "word": " zero", "probability": 0.87841796875}, {"start": 155.68, "end": 156.06, "word": " is", "probability": 0.94970703125}, {"start": 156.06, "end": 156.52, "word": " above", "probability": 0.96435546875}, {"start": 156.52, "end": 157.54, "word": " the", "probability": 0.91015625}, {"start": 157.54, "end": 157.98, "word": " lower", "probability": 0.83251953125}, {"start": 157.98, "end": 158.42, "word": " limit.", "probability": 0.96533203125}, {"start": 159.22, "end": 159.62, "word": " Let's", "probability": 0.97265625}, {"start": 159.62, "end": 159.98, "word": " move", "probability": 0.94140625}, {"start": 159.98, "end": 160.8, "word": " to", "probability": 0.9501953125}, {"start": 160.8, "end": 160.96, "word": " the", "probability": 0.9140625}, {"start": 160.96, "end": 161.14, "word": " next", "probability": 0.9384765625}, {"start": 161.14, "end": 161.5, "word": " topic,", "probability": 0.96533203125}, {"start": 162.14, "end": 162.8, "word": " which", "probability": 0.95068359375}, {"start": 162.8, "end": 163.14, "word": " talks", "probability": 0.875}, {"start": 163.14, "end": 163.72, "word": " about", "probability": 0.91259765625}, {"start": 163.72, "end": 166.14, "word": " covariance", "probability": 0.9013671875}, {"start": 166.14, "end": 167.26, "word": " and", "probability": 0.93359375}, {"start": 167.26, "end": 168.06, "word": " correlation.", "probability": 0.92626953125}, {"start": 168.96, "end": 169.4, "word": " Later,", "probability": 0.8974609375}, {"start": 169.86, "end": 170.28, "word": " we'll", "probability": 0.902587890625}, {"start": 170.28, "end": 170.6, "word": " talk", "probability": 0.8779296875}, {"start": 170.6, "end": 170.76, "word": " in", "probability": 0.94091796875}, {"start": 170.76, "end": 171.0, "word": " more", "probability": 0.93212890625}, {"start": 171.0, "end": 171.34, "word": " details", "probability": 0.77001953125}, {"start": 171.34, "end": 171.74, "word": " about", "probability": 0.90380859375}], "temperature": 1.0}, {"id": 8, "seek": 19636, "start": 173.02, "end": 196.36, "text": " Correlation and regression, that's when maybe chapter 11 or 12. But here we just show how can we compute the covariance of the correlation coefficient and what's the meaning of that value we have. The covariance means it measures the strength", "tokens": [3925, 4419, 399, 293, 24590, 11, 300, 311, 562, 1310, 7187, 2975, 420, 2272, 13, 583, 510, 321, 445, 855, 577, 393, 321, 14722, 264, 49851, 719, 295, 264, 20009, 17619, 293, 437, 311, 264, 3620, 295, 300, 2158, 321, 362, 13, 440, 49851, 719, 1355, 309, 8000, 264, 3800], "avg_logprob": -0.21553308356042003, "compression_ratio": 1.51875, "no_speech_prob": 0.0, "words": [{"start": 173.02, "end": 173.84, "word": " Correlation", "probability": 0.75244140625}, {"start": 173.84, "end": 174.24, "word": " and", "probability": 0.904296875}, {"start": 174.24, "end": 174.7, "word": " regression,", "probability": 0.923828125}, {"start": 175.12, "end": 175.38, "word": " that's", "probability": 0.92529296875}, {"start": 175.38, "end": 175.64, "word": " when", "probability": 0.36767578125}, {"start": 175.64, "end": 176.06, "word": " maybe", "probability": 0.70068359375}, {"start": 176.06, "end": 177.2, "word": " chapter", "probability": 0.609375}, {"start": 177.2, "end": 177.52, "word": " 11", "probability": 0.9052734375}, {"start": 177.52, "end": 177.74, "word": " or", "probability": 0.9521484375}, {"start": 177.74, "end": 178.06, "word": " 12.", "probability": 0.97265625}, {"start": 179.34, "end": 179.74, "word": " But", "probability": 0.92578125}, {"start": 179.74, "end": 180.08, "word": " here", "probability": 0.8544921875}, {"start": 180.08, "end": 180.28, "word": " we", "probability": 0.67919921875}, {"start": 180.28, "end": 180.88, "word": " just", "probability": 0.90087890625}, {"start": 180.88, "end": 182.24, "word": " show", "probability": 0.91162109375}, {"start": 182.24, "end": 182.48, "word": " how", "probability": 0.9248046875}, {"start": 182.48, "end": 182.68, "word": " can", "probability": 0.90625}, {"start": 182.68, "end": 182.84, "word": " we", "probability": 0.9326171875}, {"start": 182.84, "end": 183.32, "word": " compute", "probability": 0.90087890625}, {"start": 183.32, "end": 184.1, "word": " the", "probability": 0.8125}, {"start": 184.1, "end": 184.68, "word": " covariance", "probability": 0.91552734375}, {"start": 184.68, "end": 184.9, "word": " of", "probability": 0.51025390625}, {"start": 184.9, "end": 185.06, "word": " the", "probability": 0.87939453125}, {"start": 185.06, "end": 185.42, "word": " correlation", "probability": 0.94189453125}, {"start": 185.42, "end": 185.96, "word": " coefficient", "probability": 0.94677734375}, {"start": 185.96, "end": 187.0, "word": " and", "probability": 0.435302734375}, {"start": 187.0, "end": 187.36, "word": " what's", "probability": 0.957763671875}, {"start": 187.36, "end": 187.58, "word": " the", "probability": 0.91162109375}, {"start": 187.58, "end": 188.08, "word": " meaning", "probability": 0.87744140625}, {"start": 188.08, "end": 188.74, "word": " of", "probability": 0.95166015625}, {"start": 188.74, "end": 189.88, "word": " that", "probability": 0.9267578125}, {"start": 189.88, "end": 190.22, "word": " value", "probability": 0.966796875}, {"start": 190.22, "end": 190.4, "word": " we", "probability": 0.58935546875}, {"start": 190.4, "end": 190.68, "word": " have.", "probability": 0.71142578125}, {"start": 192.2, "end": 192.5, "word": " The", "probability": 0.80615234375}, {"start": 192.5, "end": 193.26, "word": " covariance", "probability": 0.9365234375}, {"start": 193.26, "end": 193.86, "word": " means", "probability": 0.94384765625}, {"start": 193.86, "end": 194.98, "word": " it", "probability": 0.8505859375}, {"start": 194.98, "end": 195.38, "word": " measures", "probability": 0.88671875}, {"start": 195.38, "end": 195.84, "word": " the", "probability": 0.90771484375}, {"start": 195.84, "end": 196.36, "word": " strength", "probability": 0.85107421875}], "temperature": 1.0}, {"id": 9, "seek": 22553, "start": 197.27, "end": 225.53, "text": " of the linear relationship between two numerical variables. That means if the data set is numeric, I mean if both variables are numeric, in this case we can use the covariance to measure the strength of the linear association or relationship between two numerical variables. Now the formula is used to compute the covariance given by this one.", "tokens": [295, 264, 8213, 2480, 1296, 732, 29054, 9102, 13, 663, 1355, 498, 264, 1412, 992, 307, 7866, 299, 11, 286, 914, 498, 1293, 9102, 366, 7866, 299, 11, 294, 341, 1389, 321, 393, 764, 264, 49851, 719, 281, 3481, 264, 3800, 295, 264, 8213, 14598, 420, 2480, 1296, 732, 29054, 9102, 13, 823, 264, 8513, 307, 1143, 281, 14722, 264, 49851, 719, 2212, 538, 341, 472, 13], "avg_logprob": -0.16659006848931313, "compression_ratio": 1.9325842696629214, "no_speech_prob": 0.0, "words": [{"start": 197.27, "end": 197.67, "word": " of", "probability": 0.431396484375}, {"start": 197.67, "end": 197.89, "word": " the", "probability": 0.86083984375}, {"start": 197.89, "end": 198.27, "word": " linear", "probability": 0.93798828125}, {"start": 198.27, "end": 199.09, "word": " relationship", "probability": 0.91015625}, {"start": 199.09, "end": 200.15, "word": " between", "probability": 0.875}, {"start": 200.15, "end": 201.09, "word": " two", "probability": 0.9033203125}, {"start": 201.09, "end": 201.75, "word": " numerical", "probability": 0.93994140625}, {"start": 201.75, "end": 202.87, "word": " variables.", "probability": 0.890625}, {"start": 203.61, "end": 204.31, "word": " That", "probability": 0.87109375}, {"start": 204.31, "end": 204.55, "word": " means", "probability": 0.8984375}, {"start": 204.55, "end": 204.67, "word": " if", "probability": 0.55126953125}, {"start": 204.67, "end": 204.79, "word": " the", "probability": 0.90478515625}, {"start": 204.79, "end": 204.99, "word": " data", "probability": 0.67578125}, {"start": 204.99, "end": 205.25, "word": " set", "probability": 0.91357421875}, {"start": 205.25, "end": 205.41, "word": " is", "probability": 0.923828125}, {"start": 205.41, "end": 205.95, "word": " numeric,", "probability": 0.718994140625}, {"start": 206.83, "end": 206.99, "word": " I", "probability": 0.85400390625}, {"start": 206.99, "end": 207.13, "word": " mean", "probability": 0.96630859375}, {"start": 207.13, "end": 207.35, "word": " if", "probability": 0.642578125}, {"start": 207.35, "end": 208.01, "word": " both", "probability": 0.6484375}, {"start": 208.01, "end": 208.47, "word": " variables", "probability": 0.92578125}, {"start": 208.47, "end": 208.71, "word": " are", "probability": 0.94287109375}, {"start": 208.71, "end": 209.15, "word": " numeric,", "probability": 0.899169921875}, {"start": 209.57, "end": 209.77, "word": " in", "probability": 0.83935546875}, {"start": 209.77, "end": 209.95, "word": " this", "probability": 0.94482421875}, {"start": 209.95, "end": 210.15, "word": " case", "probability": 0.90283203125}, {"start": 210.15, "end": 210.33, "word": " we", "probability": 0.5927734375}, {"start": 210.33, "end": 210.51, "word": " can", "probability": 0.94287109375}, {"start": 210.51, "end": 210.85, "word": " use", "probability": 0.8828125}, {"start": 210.85, "end": 211.83, "word": " the", "probability": 0.88916015625}, {"start": 211.83, "end": 212.37, "word": " covariance", "probability": 0.950439453125}, {"start": 212.37, "end": 212.61, "word": " to", "probability": 0.9609375}, {"start": 212.61, "end": 212.81, "word": " measure", "probability": 0.88623046875}, {"start": 212.81, "end": 213.05, "word": " the", "probability": 0.90869140625}, {"start": 213.05, "end": 213.41, "word": " strength", "probability": 0.84228515625}, {"start": 213.41, "end": 214.99, "word": " of", "probability": 0.91357421875}, {"start": 214.99, "end": 215.23, "word": " the", "probability": 0.91015625}, {"start": 215.23, "end": 215.63, "word": " linear", "probability": 0.916015625}, {"start": 215.63, "end": 216.51, "word": " association", "probability": 0.9130859375}, {"start": 216.51, "end": 217.25, "word": " or", "probability": 0.826171875}, {"start": 217.25, "end": 218.39, "word": " relationship", "probability": 0.88427734375}, {"start": 218.39, "end": 218.87, "word": " between", "probability": 0.876953125}, {"start": 218.87, "end": 219.11, "word": " two", "probability": 0.92529296875}, {"start": 219.11, "end": 219.51, "word": " numerical", "probability": 0.923828125}, {"start": 219.51, "end": 220.83, "word": " variables.", "probability": 0.93359375}, {"start": 221.53, "end": 221.73, "word": " Now", "probability": 0.9130859375}, {"start": 221.73, "end": 221.91, "word": " the", "probability": 0.60986328125}, {"start": 221.91, "end": 222.31, "word": " formula", "probability": 0.9111328125}, {"start": 222.31, "end": 222.85, "word": " is", "probability": 0.85498046875}, {"start": 222.85, "end": 223.11, "word": " used", "probability": 0.91259765625}, {"start": 223.11, "end": 223.33, "word": " to", "probability": 0.9677734375}, {"start": 223.33, "end": 223.71, "word": " compute", "probability": 0.91015625}, {"start": 223.71, "end": 224.23, "word": " the", "probability": 0.802734375}, {"start": 224.23, "end": 224.67, "word": " covariance", "probability": 0.89501953125}, {"start": 224.67, "end": 224.95, "word": " given", "probability": 0.46630859375}, {"start": 224.95, "end": 225.13, "word": " by", "probability": 0.96044921875}, {"start": 225.13, "end": 225.33, "word": " this", "probability": 0.94775390625}, {"start": 225.33, "end": 225.53, "word": " one.", "probability": 0.880859375}], "temperature": 1.0}, {"id": 10, "seek": 25116, "start": 226.92, "end": 251.16, "text": " It's summation of the product of xi minus x bar, yi minus y bar, divided by n minus 1. So we need first to compute the means of x and y, then find x minus x bar times y minus y bar, then sum all of these values, then divide by n minus 1.", "tokens": [467, 311, 28811, 295, 264, 1674, 295, 36800, 3175, 2031, 2159, 11, 288, 72, 3175, 288, 2159, 11, 6666, 538, 297, 3175, 502, 13, 407, 321, 643, 700, 281, 14722, 264, 1355, 295, 2031, 293, 288, 11, 550, 915, 2031, 3175, 2031, 2159, 1413, 288, 3175, 288, 2159, 11, 550, 2408, 439, 295, 613, 4190, 11, 550, 9845, 538, 297, 3175, 502, 13], "avg_logprob": -0.19299316126853228, "compression_ratio": 1.676056338028169, "no_speech_prob": 0.0, "words": [{"start": 226.92, "end": 227.48, "word": " It's", "probability": 0.7154541015625}, {"start": 227.48, "end": 228.04, "word": " summation", "probability": 0.802734375}, {"start": 228.04, "end": 229.18, "word": " of", "probability": 0.96142578125}, {"start": 229.18, "end": 229.48, "word": " the", "probability": 0.91455078125}, {"start": 229.48, "end": 230.16, "word": " product", "probability": 0.935546875}, {"start": 230.16, "end": 230.6, "word": " of", "probability": 0.966796875}, {"start": 230.6, "end": 231.74, "word": " xi", "probability": 0.389404296875}, {"start": 231.74, "end": 232.24, "word": " minus", "probability": 0.90576171875}, {"start": 232.24, "end": 232.54, "word": " x", "probability": 0.96923828125}, {"start": 232.54, "end": 232.84, "word": " bar,", "probability": 0.84423828125}, {"start": 232.94, "end": 233.26, "word": " yi", "probability": 0.91650390625}, {"start": 233.26, "end": 233.6, "word": " minus", "probability": 0.9892578125}, {"start": 233.6, "end": 233.84, "word": " y", "probability": 0.99755859375}, {"start": 233.84, "end": 234.2, "word": " bar,", "probability": 0.94873046875}, {"start": 234.74, "end": 235.18, "word": " divided", "probability": 0.70166015625}, {"start": 235.18, "end": 235.68, "word": " by", "probability": 0.97607421875}, {"start": 235.68, "end": 236.28, "word": " n", "probability": 0.63232421875}, {"start": 236.28, "end": 236.32, "word": " minus", "probability": 0.9794921875}, {"start": 236.32, "end": 236.38, "word": " 1.", "probability": 0.31884765625}, {"start": 239.66, "end": 240.22, "word": " So", "probability": 0.919921875}, {"start": 240.22, "end": 240.38, "word": " we", "probability": 0.80078125}, {"start": 240.38, "end": 240.64, "word": " need", "probability": 0.91845703125}, {"start": 240.64, "end": 241.02, "word": " first", "probability": 0.79248046875}, {"start": 241.02, "end": 241.28, "word": " to", "probability": 0.6826171875}, {"start": 241.28, "end": 241.6, "word": " compute", "probability": 0.91162109375}, {"start": 241.6, "end": 242.06, "word": " the", "probability": 0.90673828125}, {"start": 242.06, "end": 242.34, "word": " means", "probability": 0.86474609375}, {"start": 242.34, "end": 242.5, "word": " of", "probability": 0.96533203125}, {"start": 242.5, "end": 242.66, "word": " x", "probability": 0.97314453125}, {"start": 242.66, "end": 242.82, "word": " and", "probability": 0.94970703125}, {"start": 242.82, "end": 243.12, "word": " y,", "probability": 0.99755859375}, {"start": 243.62, "end": 243.9, "word": " then", "probability": 0.83251953125}, {"start": 243.9, "end": 244.18, "word": " find", "probability": 0.880859375}, {"start": 244.18, "end": 244.42, "word": " x", "probability": 0.99072265625}, {"start": 244.42, "end": 244.66, "word": " minus", "probability": 0.9873046875}, {"start": 244.66, "end": 244.9, "word": " x", "probability": 0.99609375}, {"start": 244.9, "end": 245.18, "word": " bar", "probability": 0.8984375}, {"start": 245.18, "end": 246.4, "word": " times", "probability": 0.708984375}, {"start": 246.4, "end": 246.68, "word": " y", "probability": 0.99462890625}, {"start": 246.68, "end": 246.96, "word": " minus", "probability": 0.9873046875}, {"start": 246.96, "end": 247.24, "word": " y", "probability": 0.99609375}, {"start": 247.24, "end": 247.46, "word": " bar,", "probability": 0.93798828125}, {"start": 247.54, "end": 247.68, "word": " then", "probability": 0.82861328125}, {"start": 247.68, "end": 247.96, "word": " sum", "probability": 0.8701171875}, {"start": 247.96, "end": 249.1, "word": " all", "probability": 0.87841796875}, {"start": 249.1, "end": 249.22, "word": " of", "probability": 0.9443359375}, {"start": 249.22, "end": 249.4, "word": " these", "probability": 0.8447265625}, {"start": 249.4, "end": 249.78, "word": " values,", "probability": 0.96533203125}, {"start": 249.86, "end": 249.98, "word": " then", "probability": 0.7470703125}, {"start": 249.98, "end": 250.24, "word": " divide", "probability": 0.8759765625}, {"start": 250.24, "end": 250.52, "word": " by", "probability": 0.94873046875}, {"start": 250.52, "end": 250.74, "word": " n", "probability": 0.96337890625}, {"start": 250.74, "end": 250.98, "word": " minus", "probability": 0.98583984375}, {"start": 250.98, "end": 251.16, "word": " 1.", "probability": 0.81396484375}], "temperature": 1.0}, {"id": 11, "seek": 28108, "start": 252.87, "end": 281.09, "text": " The covariance only concerned with the strength of the relationship. By using the sign of the covariance, you can tell if there exists positive or negative relationship between the two variables. For example, if the covariance between x and y is positive, that means x and y move", "tokens": [440, 49851, 719, 787, 5922, 365, 264, 3800, 295, 264, 2480, 13, 3146, 1228, 264, 1465, 295, 264, 49851, 719, 11, 291, 393, 980, 498, 456, 8198, 3353, 420, 3671, 2480, 1296, 264, 732, 9102, 13, 1171, 1365, 11, 498, 264, 49851, 719, 1296, 2031, 293, 288, 307, 3353, 11, 300, 1355, 2031, 293, 288, 1286], "avg_logprob": -0.21299342105263158, "compression_ratio": 1.7177914110429449, "no_speech_prob": 0.0, "words": [{"start": 252.87, "end": 253.23, "word": " The", "probability": 0.344482421875}, {"start": 253.23, "end": 253.71, "word": " covariance", "probability": 0.89111328125}, {"start": 253.71, "end": 256.03, "word": " only", "probability": 0.568359375}, {"start": 256.03, "end": 256.43, "word": " concerned", "probability": 0.28466796875}, {"start": 256.43, "end": 256.81, "word": " with", "probability": 0.88671875}, {"start": 256.81, "end": 257.01, "word": " the", "probability": 0.8583984375}, {"start": 257.01, "end": 257.41, "word": " strength", "probability": 0.8056640625}, {"start": 257.41, "end": 257.77, "word": " of", "probability": 0.9619140625}, {"start": 257.77, "end": 257.89, "word": " the", "probability": 0.89794921875}, {"start": 257.89, "end": 258.31, "word": " relationship.", "probability": 0.869140625}, {"start": 259.43, "end": 260.07, "word": " By", "probability": 0.921875}, {"start": 260.07, "end": 260.49, "word": " using", "probability": 0.9296875}, {"start": 260.49, "end": 260.91, "word": " the", "probability": 0.890625}, {"start": 260.91, "end": 261.45, "word": " sign", "probability": 0.81689453125}, {"start": 261.45, "end": 263.21, "word": " of", "probability": 0.931640625}, {"start": 263.21, "end": 263.37, "word": " the", "probability": 0.90673828125}, {"start": 263.37, "end": 263.97, "word": " covariance,", "probability": 0.912109375}, {"start": 264.43, "end": 264.71, "word": " you", "probability": 0.91943359375}, {"start": 264.71, "end": 264.89, "word": " can", "probability": 0.93896484375}, {"start": 264.89, "end": 265.13, "word": " tell", "probability": 0.83642578125}, {"start": 265.13, "end": 265.27, "word": " if", "probability": 0.9296875}, {"start": 265.27, "end": 265.39, "word": " there", "probability": 0.9130859375}, {"start": 265.39, "end": 265.81, "word": " exists", "probability": 0.7783203125}, {"start": 265.81, "end": 267.01, "word": " positive", "probability": 0.73583984375}, {"start": 267.01, "end": 269.07, "word": " or", "probability": 0.9208984375}, {"start": 269.07, "end": 269.51, "word": " negative", "probability": 0.92724609375}, {"start": 269.51, "end": 270.43, "word": " relationship", "probability": 0.8798828125}, {"start": 270.43, "end": 270.75, "word": " between", "probability": 0.86572265625}, {"start": 270.75, "end": 270.91, "word": " the", "probability": 0.8046875}, {"start": 270.91, "end": 271.07, "word": " two", "probability": 0.79931640625}, {"start": 271.07, "end": 271.45, "word": " variables.", "probability": 0.9619140625}, {"start": 271.57, "end": 271.69, "word": " For", "probability": 0.935546875}, {"start": 271.69, "end": 272.05, "word": " example,", "probability": 0.9736328125}, {"start": 272.15, "end": 272.35, "word": " if", "probability": 0.92236328125}, {"start": 272.35, "end": 273.13, "word": " the", "probability": 0.54931640625}, {"start": 273.13, "end": 273.45, "word": " covariance", "probability": 0.881103515625}, {"start": 273.45, "end": 273.71, "word": " between", "probability": 0.88037109375}, {"start": 273.71, "end": 273.87, "word": " x", "probability": 0.464111328125}, {"start": 273.87, "end": 274.01, "word": " and", "probability": 0.94873046875}, {"start": 274.01, "end": 274.17, "word": " y", "probability": 0.9892578125}, {"start": 274.17, "end": 274.29, "word": " is", "probability": 0.93994140625}, {"start": 274.29, "end": 274.71, "word": " positive,", "probability": 0.92138671875}, {"start": 276.69, "end": 277.17, "word": " that", "probability": 0.5458984375}, {"start": 277.17, "end": 277.63, "word": " means", "probability": 0.9287109375}, {"start": 277.63, "end": 278.99, "word": " x", "probability": 0.8935546875}, {"start": 278.99, "end": 279.83, "word": " and", "probability": 0.947265625}, {"start": 279.83, "end": 280.29, "word": " y", "probability": 0.99609375}, {"start": 280.29, "end": 281.09, "word": " move", "probability": 0.89892578125}], "temperature": 1.0}, {"id": 12, "seek": 31112, "start": 282.54, "end": 311.12, "text": " In the same direction. It means that if X goes up, Y will go in the same position. If X goes down, also Y goes down. For example, suppose we are interested in the relationship between consumption and income. We know that if income increases, if income goes up, if your salary goes up, that means consumption also will go up.", "tokens": [682, 264, 912, 3513, 13, 467, 1355, 300, 498, 1783, 1709, 493, 11, 398, 486, 352, 294, 264, 912, 2535, 13, 759, 1783, 1709, 760, 11, 611, 398, 1709, 760, 13, 1171, 1365, 11, 7297, 321, 366, 3102, 294, 264, 2480, 1296, 12126, 293, 5742, 13, 492, 458, 300, 498, 5742, 8637, 11, 498, 5742, 1709, 493, 11, 498, 428, 15360, 1709, 493, 11, 300, 1355, 12126, 611, 486, 352, 493, 13], "avg_logprob": -0.13131420742975522, "compression_ratio": 1.7759562841530054, "no_speech_prob": 0.0, "words": [{"start": 282.54, "end": 282.76, "word": " In", "probability": 0.465087890625}, {"start": 282.76, "end": 282.9, "word": " the", "probability": 0.92041015625}, {"start": 282.9, "end": 283.06, "word": " same", "probability": 0.9013671875}, {"start": 283.06, "end": 283.5, "word": " direction.", "probability": 0.974609375}, {"start": 283.72, "end": 284.4, "word": " It", "probability": 0.90576171875}, {"start": 284.4, "end": 284.8, "word": " means", "probability": 0.93115234375}, {"start": 284.8, "end": 285.22, "word": " that", "probability": 0.9130859375}, {"start": 285.22, "end": 285.54, "word": " if", "probability": 0.908203125}, {"start": 285.54, "end": 285.88, "word": " X", "probability": 0.6484375}, {"start": 285.88, "end": 286.14, "word": " goes", "probability": 0.9423828125}, {"start": 286.14, "end": 286.48, "word": " up,", "probability": 0.9736328125}, {"start": 287.64, "end": 288.08, "word": " Y", "probability": 0.9853515625}, {"start": 288.08, "end": 288.4, "word": " will", "probability": 0.88427734375}, {"start": 288.4, "end": 288.76, "word": " go", "probability": 0.96728515625}, {"start": 288.76, "end": 289.14, "word": " in", "probability": 0.89892578125}, {"start": 289.14, "end": 289.3, "word": " the", "probability": 0.9150390625}, {"start": 289.3, "end": 289.5, "word": " same", "probability": 0.91015625}, {"start": 289.5, "end": 289.94, "word": " position.", "probability": 0.92919921875}, {"start": 290.72, "end": 290.9, "word": " If", "probability": 0.943359375}, {"start": 290.9, "end": 291.16, "word": " X", "probability": 0.9912109375}, {"start": 291.16, "end": 291.4, "word": " goes", "probability": 0.93212890625}, {"start": 291.4, "end": 291.78, "word": " down,", "probability": 0.8486328125}, {"start": 291.94, "end": 292.26, "word": " also", "probability": 0.873046875}, {"start": 292.26, "end": 292.44, "word": " Y", "probability": 0.9599609375}, {"start": 292.44, "end": 292.66, "word": " goes", "probability": 0.93359375}, {"start": 292.66, "end": 292.96, "word": " down.", "probability": 0.84814453125}, {"start": 293.72, "end": 293.88, "word": " For", "probability": 0.9521484375}, {"start": 293.88, "end": 294.26, "word": " example,", "probability": 0.97265625}, {"start": 295.06, "end": 295.4, "word": " suppose", "probability": 0.87109375}, {"start": 295.4, "end": 295.54, "word": " we", "probability": 0.90966796875}, {"start": 295.54, "end": 295.66, "word": " are", "probability": 0.88037109375}, {"start": 295.66, "end": 296.08, "word": " interested", "probability": 0.8544921875}, {"start": 296.08, "end": 296.42, "word": " in", "probability": 0.93603515625}, {"start": 296.42, "end": 296.52, "word": " the", "probability": 0.89453125}, {"start": 296.52, "end": 296.9, "word": " relationship", "probability": 0.90576171875}, {"start": 296.9, "end": 297.3, "word": " between", "probability": 0.87353515625}, {"start": 297.3, "end": 297.92, "word": " consumption", "probability": 0.96435546875}, {"start": 297.92, "end": 298.42, "word": " and", "probability": 0.9404296875}, {"start": 298.42, "end": 298.72, "word": " income.", "probability": 0.93994140625}, {"start": 299.88, "end": 300.06, "word": " We", "probability": 0.923828125}, {"start": 300.06, "end": 300.2, "word": " know", "probability": 0.88720703125}, {"start": 300.2, "end": 300.46, "word": " that", "probability": 0.9306640625}, {"start": 300.46, "end": 300.88, "word": " if", "probability": 0.91455078125}, {"start": 300.88, "end": 301.36, "word": " income", "probability": 0.9287109375}, {"start": 301.36, "end": 301.8, "word": " increases,", "probability": 0.92919921875}, {"start": 302.24, "end": 302.44, "word": " if", "probability": 0.810546875}, {"start": 302.44, "end": 302.72, "word": " income", "probability": 0.94873046875}, {"start": 302.72, "end": 302.98, "word": " goes", "probability": 0.9345703125}, {"start": 302.98, "end": 303.3, "word": " up,", "probability": 0.966796875}, {"start": 303.96, "end": 304.1, "word": " if", "probability": 0.8984375}, {"start": 304.1, "end": 304.28, "word": " your", "probability": 0.85302734375}, {"start": 304.28, "end": 304.64, "word": " salary", "probability": 0.9453125}, {"start": 304.64, "end": 304.94, "word": " goes", "probability": 0.94140625}, {"start": 304.94, "end": 305.28, "word": " up,", "probability": 0.96826171875}, {"start": 306.3, "end": 306.74, "word": " that", "probability": 0.81689453125}, {"start": 306.74, "end": 307.16, "word": " means", "probability": 0.923828125}, {"start": 307.16, "end": 308.84, "word": " consumption", "probability": 0.591796875}, {"start": 308.84, "end": 309.36, "word": " also", "probability": 0.70947265625}, {"start": 309.36, "end": 310.72, "word": " will", "probability": 0.541015625}, {"start": 310.72, "end": 310.9, "word": " go", "probability": 0.96435546875}, {"start": 310.9, "end": 311.12, "word": " up.", "probability": 0.96826171875}], "temperature": 1.0}, {"id": 13, "seek": 33111, "start": 311.63, "end": 331.11, "text": " So that means they go in the same or move in the same position. So for sure, the covariance between X and Y should be positive. On the other hand, if the covariance between X and Y is negative, that means X goes up.", "tokens": [407, 300, 1355, 436, 352, 294, 264, 912, 420, 1286, 294, 264, 912, 2535, 13, 407, 337, 988, 11, 264, 49851, 719, 1296, 1783, 293, 398, 820, 312, 3353, 13, 1282, 264, 661, 1011, 11, 498, 264, 49851, 719, 1296, 1783, 293, 398, 307, 3671, 11, 300, 1355, 1783, 1709, 493, 13], "avg_logprob": -0.16081958221939374, "compression_ratio": 1.6363636363636365, "no_speech_prob": 0.0, "words": [{"start": 311.63, "end": 311.91, "word": " So", "probability": 0.60107421875}, {"start": 311.91, "end": 312.05, "word": " that", "probability": 0.74267578125}, {"start": 312.05, "end": 312.29, "word": " means", "probability": 0.92138671875}, {"start": 312.29, "end": 312.93, "word": " they", "probability": 0.6630859375}, {"start": 312.93, "end": 313.51, "word": " go", "probability": 0.80126953125}, {"start": 313.51, "end": 314.53, "word": " in", "probability": 0.904296875}, {"start": 314.53, "end": 314.71, "word": " the", "probability": 0.92529296875}, {"start": 314.71, "end": 314.99, "word": " same", "probability": 0.9189453125}, {"start": 314.99, "end": 315.19, "word": " or", "probability": 0.6484375}, {"start": 315.19, "end": 315.51, "word": " move", "probability": 0.94482421875}, {"start": 315.51, "end": 315.69, "word": " in", "probability": 0.9462890625}, {"start": 315.69, "end": 315.85, "word": " the", "probability": 0.9189453125}, {"start": 315.85, "end": 316.03, "word": " same", "probability": 0.8994140625}, {"start": 316.03, "end": 316.47, "word": " position.", "probability": 0.92626953125}, {"start": 317.81, "end": 318.29, "word": " So", "probability": 0.9228515625}, {"start": 318.29, "end": 318.65, "word": " for", "probability": 0.71826171875}, {"start": 318.65, "end": 318.95, "word": " sure,", "probability": 0.9130859375}, {"start": 319.03, "end": 319.19, "word": " the", "probability": 0.91357421875}, {"start": 319.19, "end": 319.63, "word": " covariance", "probability": 0.870361328125}, {"start": 319.63, "end": 319.91, "word": " between", "probability": 0.8955078125}, {"start": 319.91, "end": 320.07, "word": " X", "probability": 0.5654296875}, {"start": 320.07, "end": 320.21, "word": " and", "probability": 0.939453125}, {"start": 320.21, "end": 320.35, "word": " Y", "probability": 0.99462890625}, {"start": 320.35, "end": 320.55, "word": " should", "probability": 0.9609375}, {"start": 320.55, "end": 320.69, "word": " be", "probability": 0.94580078125}, {"start": 320.69, "end": 321.01, "word": " positive.", "probability": 0.88330078125}, {"start": 322.93, "end": 323.49, "word": " On", "probability": 0.92236328125}, {"start": 323.49, "end": 323.69, "word": " the", "probability": 0.92529296875}, {"start": 323.69, "end": 323.97, "word": " other", "probability": 0.88720703125}, {"start": 323.97, "end": 324.33, "word": " hand,", "probability": 0.91552734375}, {"start": 324.63, "end": 324.81, "word": " if", "probability": 0.955078125}, {"start": 324.81, "end": 324.99, "word": " the", "probability": 0.92138671875}, {"start": 324.99, "end": 325.55, "word": " covariance", "probability": 0.913818359375}, {"start": 325.55, "end": 325.99, "word": " between", "probability": 0.88818359375}, {"start": 325.99, "end": 326.17, "word": " X", "probability": 0.99072265625}, {"start": 326.17, "end": 326.31, "word": " and", "probability": 0.943359375}, {"start": 326.31, "end": 326.43, "word": " Y", "probability": 0.99755859375}, {"start": 326.43, "end": 326.59, "word": " is", "probability": 0.95361328125}, {"start": 326.59, "end": 326.93, "word": " negative,", "probability": 0.9423828125}, {"start": 328.25, "end": 328.53, "word": " that", "probability": 0.92626953125}, {"start": 328.53, "end": 328.95, "word": " means", "probability": 0.92919921875}, {"start": 328.95, "end": 330.29, "word": " X", "probability": 0.92626953125}, {"start": 330.29, "end": 330.71, "word": " goes", "probability": 0.92529296875}, {"start": 330.71, "end": 331.11, "word": " up.", "probability": 0.97216796875}], "temperature": 1.0}, {"id": 14, "seek": 35309, "start": 332.93, "end": 353.09, "text": " Y will go to the same, to the opposite direction. I mean they move to opposite direction. That means there exists negative relationship between X and Y. For example, you score in statistics a number of missing classes. If you miss more classes,", "tokens": [398, 486, 352, 281, 264, 912, 11, 281, 264, 6182, 3513, 13, 286, 914, 436, 1286, 281, 6182, 3513, 13, 663, 1355, 456, 8198, 3671, 2480, 1296, 1783, 293, 398, 13, 1171, 1365, 11, 291, 6175, 294, 12523, 257, 1230, 295, 5361, 5359, 13, 759, 291, 1713, 544, 5359, 11], "avg_logprob": -0.21384803980004555, "compression_ratio": 1.53125, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 332.93, "end": 333.37, "word": " Y", "probability": 0.211181640625}, {"start": 333.37, "end": 333.73, "word": " will", "probability": 0.72314453125}, {"start": 333.73, "end": 333.99, "word": " go", "probability": 0.8466796875}, {"start": 333.99, "end": 334.31, "word": " to", "probability": 0.7138671875}, {"start": 334.31, "end": 334.47, "word": " the", "probability": 0.88720703125}, {"start": 334.47, "end": 334.79, "word": " same,", "probability": 0.67919921875}, {"start": 334.97, "end": 335.13, "word": " to", "probability": 0.8955078125}, {"start": 335.13, "end": 335.43, "word": " the", "probability": 0.90673828125}, {"start": 335.43, "end": 335.73, "word": " opposite", "probability": 0.9521484375}, {"start": 335.73, "end": 336.37, "word": " direction.", "probability": 0.970703125}, {"start": 336.59, "end": 336.61, "word": " I", "probability": 0.91943359375}, {"start": 336.61, "end": 336.79, "word": " mean", "probability": 0.96533203125}, {"start": 336.79, "end": 337.13, "word": " they", "probability": 0.64501953125}, {"start": 337.13, "end": 338.11, "word": " move", "probability": 0.91064453125}, {"start": 338.11, "end": 338.33, "word": " to", "probability": 0.931640625}, {"start": 338.33, "end": 338.65, "word": " opposite", "probability": 0.90380859375}, {"start": 338.65, "end": 339.27, "word": " direction.", "probability": 0.81982421875}, {"start": 339.59, "end": 339.87, "word": " That", "probability": 0.85888671875}, {"start": 339.87, "end": 340.09, "word": " means", "probability": 0.9296875}, {"start": 340.09, "end": 340.25, "word": " there", "probability": 0.8544921875}, {"start": 340.25, "end": 340.55, "word": " exists", "probability": 0.7021484375}, {"start": 340.55, "end": 340.91, "word": " negative", "probability": 0.79833984375}, {"start": 340.91, "end": 341.41, "word": " relationship", "probability": 0.8974609375}, {"start": 341.41, "end": 341.97, "word": " between", "probability": 0.869140625}, {"start": 341.97, "end": 342.13, "word": " X", "probability": 0.73486328125}, {"start": 342.13, "end": 342.23, "word": " and", "probability": 0.92529296875}, {"start": 342.23, "end": 342.39, "word": " Y.", "probability": 0.9951171875}, {"start": 342.47, "end": 342.63, "word": " For", "probability": 0.95263671875}, {"start": 342.63, "end": 342.97, "word": " example,", "probability": 0.97216796875}, {"start": 343.93, "end": 344.13, "word": " you", "probability": 0.818359375}, {"start": 344.13, "end": 344.59, "word": " score", "probability": 0.77587890625}, {"start": 344.59, "end": 344.97, "word": " in", "probability": 0.91650390625}, {"start": 344.97, "end": 345.67, "word": " statistics", "probability": 0.85400390625}, {"start": 345.67, "end": 347.19, "word": " a", "probability": 0.60693359375}, {"start": 347.19, "end": 347.63, "word": " number", "probability": 0.9443359375}, {"start": 347.63, "end": 348.55, "word": " of", "probability": 0.96826171875}, {"start": 348.55, "end": 349.59, "word": " missing", "probability": 0.87744140625}, {"start": 349.59, "end": 350.13, "word": " classes.", "probability": 0.90087890625}, {"start": 351.63, "end": 351.95, "word": " If", "probability": 0.9580078125}, {"start": 351.95, "end": 352.11, "word": " you", "probability": 0.96435546875}, {"start": 352.11, "end": 352.31, "word": " miss", "probability": 0.89453125}, {"start": 352.31, "end": 352.55, "word": " more", "probability": 0.9521484375}, {"start": 352.55, "end": 353.09, "word": " classes,", "probability": 0.89306640625}], "temperature": 1.0}, {"id": 15, "seek": 38084, "start": 354.96, "end": 380.84, "text": " it means your score will go down so as x increases y will go down so there is positive relationship or negative relationship between x and y i mean x goes up the other go in the same direction sometimes there is exist no relationship between x and y", "tokens": [309, 1355, 428, 6175, 486, 352, 760, 370, 382, 2031, 8637, 288, 486, 352, 760, 370, 456, 307, 3353, 2480, 420, 3671, 2480, 1296, 2031, 293, 288, 741, 914, 2031, 1709, 493, 264, 661, 352, 294, 264, 912, 3513, 2171, 456, 307, 2514, 572, 2480, 1296, 2031, 293, 288], "avg_logprob": -0.13164062976837157, "compression_ratio": 1.8796992481203008, "no_speech_prob": 0.0, "words": [{"start": 354.96, "end": 355.22, "word": " it", "probability": 0.703125}, {"start": 355.22, "end": 355.48, "word": " means", "probability": 0.91552734375}, {"start": 355.48, "end": 355.7, "word": " your", "probability": 0.875}, {"start": 355.7, "end": 356.0, "word": " score", "probability": 0.90185546875}, {"start": 356.0, "end": 356.34, "word": " will", "probability": 0.89306640625}, {"start": 356.34, "end": 356.98, "word": " go", "probability": 0.95654296875}, {"start": 356.98, "end": 357.32, "word": " down", "probability": 0.8671875}, {"start": 357.32, "end": 357.9, "word": " so", "probability": 0.330078125}, {"start": 357.9, "end": 358.18, "word": " as", "probability": 0.94921875}, {"start": 358.18, "end": 358.48, "word": " x", "probability": 0.87158203125}, {"start": 358.48, "end": 359.04, "word": " increases", "probability": 0.94482421875}, {"start": 359.04, "end": 359.86, "word": " y", "probability": 0.86767578125}, {"start": 359.86, "end": 360.56, "word": " will", "probability": 0.88134765625}, {"start": 360.56, "end": 361.66, "word": " go", "probability": 0.96044921875}, {"start": 361.66, "end": 361.94, "word": " down", "probability": 0.86279296875}, {"start": 361.94, "end": 362.46, "word": " so", "probability": 0.794921875}, {"start": 362.46, "end": 362.7, "word": " there", "probability": 0.90673828125}, {"start": 362.7, "end": 362.94, "word": " is", "probability": 0.951171875}, {"start": 362.94, "end": 363.48, "word": " positive", "probability": 0.873046875}, {"start": 363.48, "end": 364.14, "word": " relationship", "probability": 0.9228515625}, {"start": 364.14, "end": 364.82, "word": " or", "probability": 0.9658203125}, {"start": 364.82, "end": 365.18, "word": " negative", "probability": 0.9404296875}, {"start": 365.18, "end": 365.82, "word": " relationship", "probability": 0.92236328125}, {"start": 365.82, "end": 366.22, "word": " between", "probability": 0.8896484375}, {"start": 366.22, "end": 367.2, "word": " x", "probability": 0.9853515625}, {"start": 367.2, "end": 367.38, "word": " and", "probability": 0.9501953125}, {"start": 367.38, "end": 367.66, "word": " y", "probability": 0.998046875}, {"start": 367.66, "end": 368.14, "word": " i", "probability": 0.53125}, {"start": 368.14, "end": 368.3, "word": " mean", "probability": 0.9638671875}, {"start": 368.3, "end": 368.72, "word": " x", "probability": 0.98779296875}, {"start": 368.72, "end": 369.18, "word": " goes", "probability": 0.923828125}, {"start": 369.18, "end": 369.46, "word": " up", "probability": 0.9609375}, {"start": 369.46, "end": 369.58, "word": " the", "probability": 0.89404296875}, {"start": 369.58, "end": 369.78, "word": " other", "probability": 0.8955078125}, {"start": 369.78, "end": 370.12, "word": " go", "probability": 0.8251953125}, {"start": 370.12, "end": 370.46, "word": " in", "probability": 0.9482421875}, {"start": 370.46, "end": 370.64, "word": " the", "probability": 0.91748046875}, {"start": 370.64, "end": 371.44, "word": " same", "probability": 0.91015625}, {"start": 371.44, "end": 372.02, "word": " direction", "probability": 0.7109375}, {"start": 372.02, "end": 376.5, "word": " sometimes", "probability": 0.89111328125}, {"start": 376.5, "end": 378.24, "word": " there", "probability": 0.91064453125}, {"start": 378.24, "end": 378.5, "word": " is", "probability": 0.9111328125}, {"start": 378.5, "end": 379.2, "word": " exist", "probability": 0.66552734375}, {"start": 379.2, "end": 379.5, "word": " no", "probability": 0.94482421875}, {"start": 379.5, "end": 379.98, "word": " relationship", "probability": 0.90185546875}, {"start": 379.98, "end": 380.3, "word": " between", "probability": 0.88916015625}, {"start": 380.3, "end": 380.46, "word": " x", "probability": 0.9970703125}, {"start": 380.46, "end": 380.62, "word": " and", "probability": 0.94873046875}, {"start": 380.62, "end": 380.84, "word": " y", "probability": 0.99853515625}], "temperature": 1.0}, {"id": 16, "seek": 40788, "start": 381.58, "end": 407.88, "text": " In that case, covariance between x and y equals zero. For example, your score in statistics and your weight. It makes sense that there is no relationship between your weight and your score. In this case, we are saying x and y are independent. I mean, they are uncorrelated.", "tokens": [682, 300, 1389, 11, 49851, 719, 1296, 2031, 293, 288, 6915, 4018, 13, 1171, 1365, 11, 428, 6175, 294, 12523, 293, 428, 3364, 13, 467, 1669, 2020, 300, 456, 307, 572, 2480, 1296, 428, 3364, 293, 428, 6175, 13, 682, 341, 1389, 11, 321, 366, 1566, 2031, 293, 288, 366, 6695, 13, 286, 914, 11, 436, 366, 6219, 284, 12004, 13], "avg_logprob": -0.1362147184629594, "compression_ratio": 1.6506024096385543, "no_speech_prob": 0.0, "words": [{"start": 381.58, "end": 381.8, "word": " In", "probability": 0.81787109375}, {"start": 381.8, "end": 382.02, "word": " that", "probability": 0.93310546875}, {"start": 382.02, "end": 382.34, "word": " case,", "probability": 0.921875}, {"start": 382.44, "end": 382.84, "word": " covariance", "probability": 0.7890625}, {"start": 382.84, "end": 383.04, "word": " between", "probability": 0.88818359375}, {"start": 383.04, "end": 383.26, "word": " x", "probability": 0.55859375}, {"start": 383.26, "end": 383.4, "word": " and", "probability": 0.94580078125}, {"start": 383.4, "end": 383.62, "word": " y", "probability": 0.99462890625}, {"start": 383.62, "end": 384.44, "word": " equals", "probability": 0.86279296875}, {"start": 384.44, "end": 384.78, "word": " zero.", "probability": 0.69580078125}, {"start": 384.88, "end": 385.0, "word": " For", "probability": 0.9697265625}, {"start": 385.0, "end": 385.42, "word": " example,", "probability": 0.9736328125}, {"start": 386.12, "end": 386.3, "word": " your", "probability": 0.861328125}, {"start": 386.3, "end": 386.82, "word": " score", "probability": 0.87158203125}, {"start": 386.82, "end": 388.4, "word": " in", "probability": 0.6796875}, {"start": 388.4, "end": 388.88, "word": " statistics", "probability": 0.8564453125}, {"start": 388.88, "end": 390.56, "word": " and", "probability": 0.79345703125}, {"start": 390.56, "end": 391.32, "word": " your", "probability": 0.8994140625}, {"start": 391.32, "end": 391.7, "word": " weight.", "probability": 0.92041015625}, {"start": 394.54, "end": 395.16, "word": " It", "probability": 0.55419921875}, {"start": 395.16, "end": 395.42, "word": " makes", "probability": 0.8212890625}, {"start": 395.42, "end": 395.7, "word": " sense", "probability": 0.82666015625}, {"start": 395.7, "end": 395.92, "word": " that", "probability": 0.91650390625}, {"start": 395.92, "end": 396.08, "word": " there", "probability": 0.91064453125}, {"start": 396.08, "end": 396.22, "word": " is", "probability": 0.89404296875}, {"start": 396.22, "end": 396.38, "word": " no", "probability": 0.94287109375}, {"start": 396.38, "end": 396.76, "word": " relationship", "probability": 0.91015625}, {"start": 396.76, "end": 397.24, "word": " between", "probability": 0.869140625}, {"start": 397.24, "end": 398.66, "word": " your", "probability": 0.89697265625}, {"start": 398.66, "end": 398.92, "word": " weight", "probability": 0.91845703125}, {"start": 398.92, "end": 399.3, "word": " and", "probability": 0.9375}, {"start": 399.3, "end": 400.44, "word": " your", "probability": 0.9052734375}, {"start": 400.44, "end": 400.88, "word": " score.", "probability": 0.87939453125}, {"start": 401.5, "end": 402.0, "word": " In", "probability": 0.85107421875}, {"start": 402.0, "end": 402.28, "word": " this", "probability": 0.9453125}, {"start": 402.28, "end": 402.68, "word": " case,", "probability": 0.91259765625}, {"start": 403.58, "end": 403.76, "word": " we", "probability": 0.955078125}, {"start": 403.76, "end": 403.92, "word": " are", "probability": 0.9306640625}, {"start": 403.92, "end": 404.36, "word": " saying", "probability": 0.75341796875}, {"start": 404.36, "end": 404.94, "word": " x", "probability": 0.939453125}, {"start": 404.94, "end": 405.14, "word": " and", "probability": 0.9453125}, {"start": 405.14, "end": 405.42, "word": " y", "probability": 0.998046875}, {"start": 405.42, "end": 405.72, "word": " are", "probability": 0.94091796875}, {"start": 405.72, "end": 406.22, "word": " independent.", "probability": 0.8857421875}, {"start": 406.48, "end": 406.62, "word": " I", "probability": 0.93408203125}, {"start": 406.62, "end": 406.76, "word": " mean,", "probability": 0.96484375}, {"start": 406.84, "end": 406.96, "word": " they", "probability": 0.88623046875}, {"start": 406.96, "end": 407.18, "word": " are", "probability": 0.9404296875}, {"start": 407.18, "end": 407.88, "word": " uncorrelated.", "probability": 0.9685872395833334}], "temperature": 1.0}, {"id": 17, "seek": 43515, "start": 408.71, "end": 435.15, "text": " Because as one variable increases, the other maybe go up or go down. Or maybe constant. So that means there exists no relationship between the two variables. In that case, the covariance between x and y equals zero. Now, by using the covariance, you can determine the direction of the relationship. I mean, you can just figure out if the relation is positive or negative.", "tokens": [1436, 382, 472, 7006, 8637, 11, 264, 661, 1310, 352, 493, 420, 352, 760, 13, 1610, 1310, 5754, 13, 407, 300, 1355, 456, 8198, 572, 2480, 1296, 264, 732, 9102, 13, 682, 300, 1389, 11, 264, 49851, 719, 1296, 2031, 293, 288, 6915, 4018, 13, 823, 11, 538, 1228, 264, 49851, 719, 11, 291, 393, 6997, 264, 3513, 295, 264, 2480, 13, 286, 914, 11, 291, 393, 445, 2573, 484, 498, 264, 9721, 307, 3353, 420, 3671, 13], "avg_logprob": -0.13706486662731895, "compression_ratio": 1.7064220183486238, "no_speech_prob": 0.0, "words": [{"start": 408.71, "end": 409.19, "word": " Because", "probability": 0.57763671875}, {"start": 409.19, "end": 409.79, "word": " as", "probability": 0.8076171875}, {"start": 409.79, "end": 410.35, "word": " one", "probability": 0.9208984375}, {"start": 410.35, "end": 410.79, "word": " variable", "probability": 0.91357421875}, {"start": 410.79, "end": 411.55, "word": " increases,", "probability": 0.92529296875}, {"start": 412.09, "end": 412.71, "word": " the", "probability": 0.8994140625}, {"start": 412.71, "end": 413.01, "word": " other", "probability": 0.85888671875}, {"start": 413.01, "end": 413.43, "word": " maybe", "probability": 0.6552734375}, {"start": 413.43, "end": 413.77, "word": " go", "probability": 0.76953125}, {"start": 413.77, "end": 414.09, "word": " up", "probability": 0.97509765625}, {"start": 414.09, "end": 414.51, "word": " or", "probability": 0.91796875}, {"start": 414.51, "end": 414.71, "word": " go", "probability": 0.93798828125}, {"start": 414.71, "end": 415.03, "word": " down.", "probability": 0.859375}, {"start": 415.41, "end": 416.01, "word": " Or", "probability": 0.689453125}, {"start": 416.01, "end": 416.19, "word": " maybe", "probability": 0.7578125}, {"start": 416.19, "end": 417.45, "word": " constant.", "probability": 0.84912109375}, {"start": 418.05, "end": 418.39, "word": " So", "probability": 0.89453125}, {"start": 418.39, "end": 418.71, "word": " that", "probability": 0.85693359375}, {"start": 418.71, "end": 418.93, "word": " means", "probability": 0.93310546875}, {"start": 418.93, "end": 419.13, "word": " there", "probability": 0.7998046875}, {"start": 419.13, "end": 419.47, "word": " exists", "probability": 0.82373046875}, {"start": 419.47, "end": 419.69, "word": " no", "probability": 0.94482421875}, {"start": 419.69, "end": 420.13, "word": " relationship", "probability": 0.90673828125}, {"start": 420.13, "end": 420.67, "word": " between", "probability": 0.8701171875}, {"start": 420.67, "end": 421.39, "word": " the", "probability": 0.88671875}, {"start": 421.39, "end": 421.55, "word": " two", "probability": 0.9248046875}, {"start": 421.55, "end": 421.95, "word": " variables.", "probability": 0.92724609375}, {"start": 422.03, "end": 422.17, "word": " In", "probability": 0.8388671875}, {"start": 422.17, "end": 422.39, "word": " that", "probability": 0.935546875}, {"start": 422.39, "end": 422.77, "word": " case,", "probability": 0.92041015625}, {"start": 423.21, "end": 423.39, "word": " the", "probability": 0.90234375}, {"start": 423.39, "end": 423.93, "word": " covariance", "probability": 0.955810546875}, {"start": 423.93, "end": 424.23, "word": " between", "probability": 0.89697265625}, {"start": 424.23, "end": 424.39, "word": " x", "probability": 0.53173828125}, {"start": 424.39, "end": 424.53, "word": " and", "probability": 0.9384765625}, {"start": 424.53, "end": 424.81, "word": " y", "probability": 0.99365234375}, {"start": 424.81, "end": 425.71, "word": " equals", "probability": 0.88818359375}, {"start": 425.71, "end": 425.95, "word": " zero.", "probability": 0.64501953125}, {"start": 426.45, "end": 426.75, "word": " Now,", "probability": 0.9443359375}, {"start": 426.93, "end": 427.13, "word": " by", "probability": 0.9697265625}, {"start": 427.13, "end": 427.45, "word": " using", "probability": 0.9345703125}, {"start": 427.45, "end": 427.63, "word": " the", "probability": 0.91650390625}, {"start": 427.63, "end": 428.23, "word": " covariance,", "probability": 0.955078125}, {"start": 428.35, "end": 428.55, "word": " you", "probability": 0.96142578125}, {"start": 428.55, "end": 428.75, "word": " can", "probability": 0.94091796875}, {"start": 428.75, "end": 429.21, "word": " determine", "probability": 0.923828125}, {"start": 429.21, "end": 430.03, "word": " the", "probability": 0.91845703125}, {"start": 430.03, "end": 430.65, "word": " direction", "probability": 0.97705078125}, {"start": 430.65, "end": 430.99, "word": " of", "probability": 0.9677734375}, {"start": 430.99, "end": 431.11, "word": " the", "probability": 0.91357421875}, {"start": 431.11, "end": 431.49, "word": " relationship.", "probability": 0.89697265625}, {"start": 432.09, "end": 432.21, "word": " I", "probability": 0.9580078125}, {"start": 432.21, "end": 432.37, "word": " mean,", "probability": 0.96728515625}, {"start": 432.43, "end": 432.55, "word": " you", "probability": 0.9560546875}, {"start": 432.55, "end": 432.71, "word": " can", "probability": 0.9423828125}, {"start": 432.71, "end": 432.93, "word": " just", "probability": 0.90185546875}, {"start": 432.93, "end": 433.17, "word": " figure", "probability": 0.97265625}, {"start": 433.17, "end": 433.45, "word": " out", "probability": 0.88037109375}, {"start": 433.45, "end": 433.67, "word": " if", "probability": 0.9423828125}, {"start": 433.67, "end": 433.79, "word": " the", "probability": 0.9111328125}, {"start": 433.79, "end": 434.09, "word": " relation", "probability": 0.826171875}, {"start": 434.09, "end": 434.25, "word": " is", "probability": 0.83349609375}, {"start": 434.25, "end": 434.53, "word": " positive", "probability": 0.92626953125}, {"start": 434.53, "end": 434.85, "word": " or", "probability": 0.9658203125}, {"start": 434.85, "end": 435.15, "word": " negative.", "probability": 0.9462890625}], "temperature": 1.0}, {"id": 18, "seek": 44244, "start": 435.88, "end": 442.44, "text": " But you cannot tell exactly the strength of the relationship. I mean you cannot tell if they exist.", "tokens": [583, 291, 2644, 980, 2293, 264, 3800, 295, 264, 2480, 13, 286, 914, 291, 2644, 980, 498, 436, 2514, 13], "avg_logprob": -0.2604166666666667, "compression_ratio": 1.2195121951219512, "no_speech_prob": 0.0, "words": [{"start": 435.88, "end": 436.2, "word": " But", "probability": 0.5380859375}, {"start": 436.2, "end": 436.36, "word": " you", "probability": 0.6796875}, {"start": 436.36, "end": 436.6, "word": " cannot", "probability": 0.7802734375}, {"start": 436.6, "end": 436.9, "word": " tell", "probability": 0.86767578125}, {"start": 436.9, "end": 437.52, "word": " exactly", "probability": 0.87109375}, {"start": 437.52, "end": 438.46, "word": " the", "probability": 0.8671875}, {"start": 438.46, "end": 438.98, "word": " strength", "probability": 0.857421875}, {"start": 438.98, "end": 439.52, "word": " of", "probability": 0.9677734375}, {"start": 439.52, "end": 439.64, "word": " the", "probability": 0.81494140625}, {"start": 439.64, "end": 440.24, "word": " relationship.", "probability": 0.8896484375}, {"start": 440.56, "end": 440.68, "word": " I", "probability": 0.966796875}, {"start": 440.68, "end": 440.82, "word": " mean", "probability": 0.96533203125}, {"start": 440.82, "end": 440.9, "word": " you", "probability": 0.60888671875}, {"start": 440.9, "end": 441.12, "word": " cannot", "probability": 0.8798828125}, {"start": 441.12, "end": 441.5, "word": " tell", "probability": 0.87451171875}, {"start": 441.5, "end": 442.1, "word": " if", "probability": 0.93212890625}, {"start": 442.1, "end": 442.2, "word": " they", "probability": 0.32177734375}, {"start": 442.2, "end": 442.44, "word": " exist.", "probability": 0.7705078125}], "temperature": 1.0}, {"id": 19, "seek": 47042, "start": 444.8, "end": 470.42, "text": " strong moderate or weak relationship just you can tell there exists positive or negative or maybe the relationship does not exist but you cannot tell the exact strength of the relationship by using the value of the covariance I mean the size of the covariance does not tell anything about the strength so generally speaking covariance between x and y", "tokens": [2068, 18174, 420, 5336, 2480, 445, 291, 393, 980, 456, 8198, 3353, 420, 3671, 420, 1310, 264, 2480, 775, 406, 2514, 457, 291, 2644, 980, 264, 1900, 3800, 295, 264, 2480, 538, 1228, 264, 2158, 295, 264, 49851, 719, 286, 914, 264, 2744, 295, 264, 49851, 719, 775, 406, 980, 1340, 466, 264, 3800, 370, 5101, 4124, 49851, 719, 1296, 2031, 293, 288], "avg_logprob": -0.19067383417859674, "compression_ratio": 1.8972972972972972, "no_speech_prob": 0.0, "words": [{"start": 444.8, "end": 445.48, "word": " strong", "probability": 0.33154296875}, {"start": 445.48, "end": 446.16, "word": " moderate", "probability": 0.408447265625}, {"start": 446.16, "end": 446.76, "word": " or", "probability": 0.85205078125}, {"start": 446.76, "end": 447.04, "word": " weak", "probability": 0.943359375}, {"start": 447.04, "end": 447.64, "word": " relationship", "probability": 0.68017578125}, {"start": 447.64, "end": 448.62, "word": " just", "probability": 0.445068359375}, {"start": 448.62, "end": 448.8, "word": " you", "probability": 0.90576171875}, {"start": 448.8, "end": 448.98, "word": " can", "probability": 0.93359375}, {"start": 448.98, "end": 449.3, "word": " tell", "probability": 0.78173828125}, {"start": 449.3, "end": 450.0, "word": " there", "probability": 0.474853515625}, {"start": 450.0, "end": 450.3, "word": " exists", "probability": 0.69287109375}, {"start": 450.3, "end": 450.72, "word": " positive", "probability": 0.88720703125}, {"start": 450.72, "end": 451.04, "word": " or", "probability": 0.95068359375}, {"start": 451.04, "end": 451.38, "word": " negative", "probability": 0.9443359375}, {"start": 451.38, "end": 451.66, "word": " or", "probability": 0.83740234375}, {"start": 451.66, "end": 451.92, "word": " maybe", "probability": 0.9140625}, {"start": 451.92, "end": 452.24, "word": " the", "probability": 0.76171875}, {"start": 452.24, "end": 452.74, "word": " relationship", "probability": 0.88671875}, {"start": 452.74, "end": 453.02, "word": " does", "probability": 0.95068359375}, {"start": 453.02, "end": 453.16, "word": " not", "probability": 0.95068359375}, {"start": 453.16, "end": 453.52, "word": " exist", "probability": 0.9404296875}, {"start": 453.52, "end": 454.08, "word": " but", "probability": 0.79736328125}, {"start": 454.08, "end": 454.22, "word": " you", "probability": 0.93896484375}, {"start": 454.22, "end": 454.44, "word": " cannot", "probability": 0.8408203125}, {"start": 454.44, "end": 454.7, "word": " tell", "probability": 0.890625}, {"start": 454.7, "end": 454.92, "word": " the", "probability": 0.88671875}, {"start": 454.92, "end": 455.38, "word": " exact", "probability": 0.943359375}, {"start": 455.38, "end": 456.0, "word": " strength", "probability": 0.87060546875}, {"start": 456.0, "end": 456.34, "word": " of", "probability": 0.96923828125}, {"start": 456.34, "end": 456.58, "word": " the", "probability": 0.91796875}, {"start": 456.58, "end": 457.16, "word": " relationship", "probability": 0.8896484375}, {"start": 457.16, "end": 458.22, "word": " by", "probability": 0.95166015625}, {"start": 458.22, "end": 458.66, "word": " using", "probability": 0.93408203125}, {"start": 458.66, "end": 459.04, "word": " the", "probability": 0.916015625}, {"start": 459.04, "end": 459.38, "word": " value", "probability": 0.96484375}, {"start": 459.38, "end": 459.56, "word": " of", "probability": 0.97021484375}, {"start": 459.56, "end": 459.7, "word": " the", "probability": 0.91748046875}, {"start": 459.7, "end": 460.12, "word": " covariance", "probability": 0.9111328125}, {"start": 460.12, "end": 460.28, "word": " I", "probability": 0.60888671875}, {"start": 460.28, "end": 460.4, "word": " mean", "probability": 0.96533203125}, {"start": 460.4, "end": 460.6, "word": " the", "probability": 0.8740234375}, {"start": 460.6, "end": 460.96, "word": " size", "probability": 0.83642578125}, {"start": 460.96, "end": 461.1, "word": " of", "probability": 0.97021484375}, {"start": 461.1, "end": 461.22, "word": " the", "probability": 0.91748046875}, {"start": 461.22, "end": 461.74, "word": " covariance", "probability": 0.928466796875}, {"start": 461.74, "end": 462.0, "word": " does", "probability": 0.79638671875}, {"start": 462.0, "end": 462.24, "word": " not", "probability": 0.916015625}, {"start": 462.24, "end": 463.06, "word": " tell", "probability": 0.654296875}, {"start": 463.06, "end": 463.46, "word": " anything", "probability": 0.8486328125}, {"start": 463.46, "end": 463.92, "word": " about", "probability": 0.9111328125}, {"start": 463.92, "end": 464.56, "word": " the", "probability": 0.9013671875}, {"start": 464.56, "end": 464.96, "word": " strength", "probability": 0.88330078125}, {"start": 464.96, "end": 466.1, "word": " so", "probability": 0.6396484375}, {"start": 466.1, "end": 468.04, "word": " generally", "probability": 0.66552734375}, {"start": 468.04, "end": 468.52, "word": " speaking", "probability": 0.88232421875}, {"start": 468.52, "end": 469.26, "word": " covariance", "probability": 0.807373046875}, {"start": 469.26, "end": 469.82, "word": " between", "probability": 0.89453125}, {"start": 469.82, "end": 470.0, "word": " x", "probability": 0.55615234375}, {"start": 470.0, "end": 470.14, "word": " and", "probability": 0.9365234375}, {"start": 470.14, "end": 470.42, "word": " y", "probability": 0.98974609375}], "temperature": 1.0}, {"id": 20, "seek": 49871, "start": 471.91, "end": 498.71, "text": " measures the strength of two numerical variables, and you only tell if there exists positive or negative relationship, but you cannot tell anything about the strength of the relationship. Any questions? So let me ask you just to summarize what I said so far. Just give me the summary or conclusion.", "tokens": [8000, 264, 3800, 295, 732, 29054, 9102, 11, 293, 291, 787, 980, 498, 456, 8198, 3353, 420, 3671, 2480, 11, 457, 291, 2644, 980, 1340, 466, 264, 3800, 295, 264, 2480, 13, 2639, 1651, 30, 407, 718, 385, 1029, 291, 445, 281, 20858, 437, 286, 848, 370, 1400, 13, 1449, 976, 385, 264, 12691, 420, 10063, 13], "avg_logprob": -0.14897629592953057, "compression_ratio": 1.5654450261780104, "no_speech_prob": 0.0, "words": [{"start": 471.91, "end": 472.41, "word": " measures", "probability": 0.60107421875}, {"start": 472.41, "end": 472.71, "word": " the", "probability": 0.91162109375}, {"start": 472.71, "end": 473.15, "word": " strength", "probability": 0.8310546875}, {"start": 473.15, "end": 473.93, "word": " of", "probability": 0.9638671875}, {"start": 473.93, "end": 474.21, "word": " two", "probability": 0.86181640625}, {"start": 474.21, "end": 474.57, "word": " numerical", "probability": 0.96044921875}, {"start": 474.57, "end": 475.09, "word": " variables,", "probability": 0.9345703125}, {"start": 476.35, "end": 476.47, "word": " and", "probability": 0.90625}, {"start": 476.47, "end": 476.61, "word": " you", "probability": 0.9453125}, {"start": 476.61, "end": 477.23, "word": " only", "probability": 0.90478515625}, {"start": 477.23, "end": 477.61, "word": " tell", "probability": 0.86669921875}, {"start": 477.61, "end": 478.59, "word": " if", "probability": 0.8984375}, {"start": 478.59, "end": 478.81, "word": " there", "probability": 0.896484375}, {"start": 478.81, "end": 479.19, "word": " exists", "probability": 0.84521484375}, {"start": 479.19, "end": 479.57, "word": " positive", "probability": 0.8623046875}, {"start": 479.57, "end": 480.01, "word": " or", "probability": 0.96337890625}, {"start": 480.01, "end": 480.43, "word": " negative", "probability": 0.9404296875}, {"start": 480.43, "end": 481.27, "word": " relationship,", "probability": 0.8984375}, {"start": 481.77, "end": 482.01, "word": " but", "probability": 0.9208984375}, {"start": 482.01, "end": 482.15, "word": " you", "probability": 0.95947265625}, {"start": 482.15, "end": 482.33, "word": " cannot", "probability": 0.8642578125}, {"start": 482.33, "end": 482.53, "word": " tell", "probability": 0.873046875}, {"start": 482.53, "end": 482.83, "word": " anything", "probability": 0.8603515625}, {"start": 482.83, "end": 483.27, "word": " about", "probability": 0.90673828125}, {"start": 483.27, "end": 483.61, "word": " the", "probability": 0.9130859375}, {"start": 483.61, "end": 483.99, "word": " strength", "probability": 0.88037109375}, {"start": 483.99, "end": 484.51, "word": " of", "probability": 0.96826171875}, {"start": 484.51, "end": 484.77, "word": " the", "probability": 0.9140625}, {"start": 484.77, "end": 485.51, "word": " relationship.", "probability": 0.91015625}, {"start": 486.31, "end": 486.53, "word": " Any", "probability": 0.89404296875}, {"start": 486.53, "end": 486.91, "word": " questions?", "probability": 0.9501953125}, {"start": 489.61, "end": 489.89, "word": " So", "probability": 0.91748046875}, {"start": 489.89, "end": 490.01, "word": " let", "probability": 0.7890625}, {"start": 490.01, "end": 490.11, "word": " me", "probability": 0.96728515625}, {"start": 490.11, "end": 490.33, "word": " ask", "probability": 0.92626953125}, {"start": 490.33, "end": 490.53, "word": " you", "probability": 0.96240234375}, {"start": 490.53, "end": 491.73, "word": " just", "probability": 0.56884765625}, {"start": 491.73, "end": 491.95, "word": " to", "probability": 0.96533203125}, {"start": 491.95, "end": 492.27, "word": " summarize", "probability": 0.5537109375}, {"start": 492.27, "end": 492.87, "word": " what", "probability": 0.93212890625}, {"start": 492.87, "end": 493.29, "word": " I", "probability": 0.99658203125}, {"start": 493.29, "end": 494.83, "word": " said", "probability": 0.74267578125}, {"start": 494.83, "end": 495.21, "word": " so", "probability": 0.94482421875}, {"start": 495.21, "end": 495.49, "word": " far.", "probability": 0.94287109375}, {"start": 496.45, "end": 496.89, "word": " Just", "probability": 0.876953125}, {"start": 496.89, "end": 497.07, "word": " give", "probability": 0.8955078125}, {"start": 497.07, "end": 497.19, "word": " me", "probability": 0.9580078125}, {"start": 497.19, "end": 497.39, "word": " the", "probability": 0.67236328125}, {"start": 497.39, "end": 497.73, "word": " summary", "probability": 0.91943359375}, {"start": 497.73, "end": 498.17, "word": " or", "probability": 0.8720703125}, {"start": 498.17, "end": 498.71, "word": " conclusion.", "probability": 0.892578125}], "temperature": 1.0}, {"id": 21, "seek": 50184, "start": 500.66, "end": 501.84, "text": " of the covariance.", "tokens": [295, 264, 49851, 719, 13], "avg_logprob": -0.11010742435852687, "compression_ratio": 0.7037037037037037, "no_speech_prob": 0.0, "words": [{"start": 500.66, "end": 501.1, "word": " of", "probability": 0.70556640625}, {"start": 501.1, "end": 501.32, "word": " the", "probability": 0.91845703125}, {"start": 501.32, "end": 501.84, "word": " covariance.", "probability": 0.947998046875}], "temperature": 1.0}, {"id": 22, "seek": 53163, "start": 502.87, "end": 531.63, "text": " The value of the covariance determine if the relationship between the variables are positive or negative or there is no relationship that when the covariance is more than zero, the meaning that it's positive, the relationship is positive and one variable go up, another go up and vice versa. And when the covariance is less than zero, there is negative relationship and the meaning that when one variable go up, the other goes down and vice versa and when the covariance equals zero, there is no relationship between the variables.", "tokens": [440, 2158, 295, 264, 49851, 719, 6997, 498, 264, 2480, 1296, 264, 9102, 366, 3353, 420, 3671, 420, 456, 307, 572, 2480, 300, 562, 264, 49851, 719, 307, 544, 813, 4018, 11, 264, 3620, 300, 309, 311, 3353, 11, 264, 2480, 307, 3353, 293, 472, 7006, 352, 493, 11, 1071, 352, 493, 293, 11964, 25650, 13, 400, 562, 264, 49851, 719, 307, 1570, 813, 4018, 11, 456, 307, 3671, 2480, 293, 264, 3620, 300, 562, 472, 7006, 352, 493, 11, 264, 661, 1709, 760, 293, 11964, 25650, 293, 562, 264, 49851, 719, 6915, 4018, 11, 456, 307, 572, 2480, 1296, 264, 9102, 13], "avg_logprob": -0.2250601024581836, "compression_ratio": 2.485981308411215, "no_speech_prob": 1.7881393432617188e-06, "words": [{"start": 502.87, "end": 503.07, "word": " The", "probability": 0.56005859375}, {"start": 503.07, "end": 503.27, "word": " value", "probability": 0.89697265625}, {"start": 503.27, "end": 503.43, "word": " of", "probability": 0.96044921875}, {"start": 503.43, "end": 503.53, "word": " the", "probability": 0.6220703125}, {"start": 503.53, "end": 504.67, "word": " covariance", "probability": 0.98193359375}, {"start": 504.67, "end": 505.25, "word": " determine", "probability": 0.441162109375}, {"start": 505.25, "end": 505.71, "word": " if", "probability": 0.9111328125}, {"start": 505.71, "end": 505.89, "word": " the", "probability": 0.89501953125}, {"start": 505.89, "end": 506.31, "word": " relationship", "probability": 0.84423828125}, {"start": 506.31, "end": 506.61, "word": " between", "probability": 0.8623046875}, {"start": 506.61, "end": 506.81, "word": " the", "probability": 0.9091796875}, {"start": 506.81, "end": 507.11, "word": " variables", "probability": 0.39599609375}, {"start": 507.11, "end": 507.39, "word": " are", "probability": 0.85595703125}, {"start": 507.39, "end": 507.67, "word": " positive", "probability": 0.7802734375}, {"start": 507.67, "end": 507.89, "word": " or", "probability": 0.96533203125}, {"start": 507.89, "end": 508.11, "word": " negative", "probability": 0.9228515625}, {"start": 508.11, "end": 508.47, "word": " or", "probability": 0.6357421875}, {"start": 508.47, "end": 509.13, "word": " there", "probability": 0.72802734375}, {"start": 509.13, "end": 509.25, "word": " is", "probability": 0.92333984375}, {"start": 509.25, "end": 509.41, "word": " no", "probability": 0.93408203125}, {"start": 509.41, "end": 509.87, "word": " relationship", "probability": 0.8935546875}, {"start": 509.87, "end": 510.33, "word": " that", "probability": 0.74169921875}, {"start": 510.33, "end": 510.67, "word": " when", "probability": 0.697265625}, {"start": 510.67, "end": 510.79, "word": " the", "probability": 0.888671875}, {"start": 510.79, "end": 511.15, "word": " covariance", "probability": 0.82470703125}, {"start": 511.15, "end": 511.37, "word": " is", "probability": 0.94580078125}, {"start": 511.37, "end": 511.73, "word": " more", "probability": 0.939453125}, {"start": 511.73, "end": 511.97, "word": " than", "probability": 0.94921875}, {"start": 511.97, "end": 512.37, "word": " zero,", "probability": 0.62548828125}, {"start": 512.79, "end": 512.89, "word": " the", "probability": 0.36083984375}, {"start": 512.89, "end": 513.05, "word": " meaning", "probability": 0.50146484375}, {"start": 513.05, "end": 513.31, "word": " that", "probability": 0.39892578125}, {"start": 513.31, "end": 513.63, "word": " it's", "probability": 0.851318359375}, {"start": 513.63, "end": 513.99, "word": " positive,", "probability": 0.9208984375}, {"start": 514.07, "end": 514.17, "word": " the", "probability": 0.84228515625}, {"start": 514.17, "end": 514.49, "word": " relationship", "probability": 0.9033203125}, {"start": 514.49, "end": 514.77, "word": " is", "probability": 0.93310546875}, {"start": 514.77, "end": 515.07, "word": " positive", "probability": 0.94091796875}, {"start": 515.07, "end": 515.55, "word": " and", "probability": 0.57177734375}, {"start": 515.55, "end": 515.91, "word": " one", "probability": 0.83447265625}, {"start": 515.91, "end": 516.19, "word": " variable", "probability": 0.92333984375}, {"start": 516.19, "end": 516.53, "word": " go", "probability": 0.66650390625}, {"start": 516.53, "end": 516.93, "word": " up,", "probability": 0.9443359375}, {"start": 517.29, "end": 517.53, "word": " another", "probability": 0.5390625}, {"start": 517.53, "end": 517.77, "word": " go", "probability": 0.888671875}, {"start": 517.77, "end": 517.95, "word": " up", "probability": 0.94482421875}, {"start": 517.95, "end": 518.21, "word": " and", "probability": 0.640625}, {"start": 518.21, "end": 518.55, "word": " vice", "probability": 0.93017578125}, {"start": 518.55, "end": 518.85, "word": " versa.", "probability": 0.79150390625}, {"start": 519.21, "end": 519.33, "word": " And", "probability": 0.9013671875}, {"start": 519.33, "end": 519.45, "word": " when", "probability": 0.90234375}, {"start": 519.45, "end": 519.59, "word": " the", "probability": 0.91015625}, {"start": 519.59, "end": 520.19, "word": " covariance", "probability": 0.943115234375}, {"start": 520.19, "end": 520.43, "word": " is", "probability": 0.85986328125}, {"start": 520.43, "end": 520.53, "word": " less", "probability": 0.8427734375}, {"start": 520.53, "end": 520.69, "word": " than", "probability": 0.9482421875}, {"start": 520.69, "end": 521.07, "word": " zero,", "probability": 0.89599609375}, {"start": 521.19, "end": 521.33, "word": " there", "probability": 0.90625}, {"start": 521.33, "end": 521.49, "word": " is", "probability": 0.890625}, {"start": 521.49, "end": 521.81, "word": " negative", "probability": 0.6728515625}, {"start": 521.81, "end": 522.27, "word": " relationship", "probability": 0.91064453125}, {"start": 522.27, "end": 522.65, "word": " and", "probability": 0.81884765625}, {"start": 522.65, "end": 523.15, "word": " the", "probability": 0.8544921875}, {"start": 523.15, "end": 523.29, "word": " meaning", "probability": 0.83935546875}, {"start": 523.29, "end": 523.57, "word": " that", "probability": 0.9169921875}, {"start": 523.57, "end": 524.03, "word": " when", "probability": 0.8544921875}, {"start": 524.03, "end": 524.25, "word": " one", "probability": 0.92822265625}, {"start": 524.25, "end": 524.51, "word": " variable", "probability": 0.92333984375}, {"start": 524.51, "end": 524.77, "word": " go", "probability": 0.8916015625}, {"start": 524.77, "end": 524.99, "word": " up,", "probability": 0.97265625}, {"start": 525.23, "end": 525.35, "word": " the", "probability": 0.91064453125}, {"start": 525.35, "end": 525.75, "word": " other", "probability": 0.9033203125}, {"start": 525.75, "end": 526.17, "word": " goes", "probability": 0.953125}, {"start": 526.17, "end": 526.49, "word": " down", "probability": 0.853515625}, {"start": 526.49, "end": 526.79, "word": " and", "probability": 0.86083984375}, {"start": 526.79, "end": 527.19, "word": " vice", "probability": 0.96875}, {"start": 527.19, "end": 527.49, "word": " versa", "probability": 0.876953125}, {"start": 527.49, "end": 527.65, "word": " and", "probability": 0.48828125}, {"start": 527.65, "end": 527.79, "word": " when", "probability": 0.345458984375}, {"start": 527.79, "end": 527.91, "word": " the", "probability": 0.689453125}, {"start": 527.91, "end": 529.05, "word": " covariance", "probability": 0.85888671875}, {"start": 529.05, "end": 529.73, "word": " equals", "probability": 0.469482421875}, {"start": 529.73, "end": 529.95, "word": " zero,", "probability": 0.88037109375}, {"start": 530.05, "end": 530.21, "word": " there", "probability": 0.90576171875}, {"start": 530.21, "end": 530.31, "word": " is", "probability": 0.81640625}, {"start": 530.31, "end": 530.55, "word": " no", "probability": 0.748046875}, {"start": 530.55, "end": 530.83, "word": " relationship", "probability": 0.9111328125}, {"start": 530.83, "end": 531.19, "word": " between", "probability": 0.83837890625}, {"start": 531.19, "end": 531.39, "word": " the", "probability": 0.9140625}, {"start": 531.39, "end": 531.63, "word": " variables.", "probability": 0.92919921875}], "temperature": 1.0}, {"id": 23, "seek": 55411, "start": 532.57, "end": 554.11, "text": " And what's about the strength? So just tell the direction, not the strength of the relationship. Now, in order to determine both the direction and the strength, we can use the coefficient of correlation.", "tokens": [400, 437, 311, 466, 264, 3800, 30, 407, 445, 980, 264, 3513, 11, 406, 264, 3800, 295, 264, 2480, 13, 823, 11, 294, 1668, 281, 6997, 1293, 264, 3513, 293, 264, 3800, 11, 321, 393, 764, 264, 17619, 295, 20009, 13], "avg_logprob": -0.20200892502353304, "compression_ratio": 1.5572519083969465, "no_speech_prob": 0.0, "words": [{"start": 532.57, "end": 532.99, "word": " And", "probability": 0.57666015625}, {"start": 532.99, "end": 533.35, "word": " what's", "probability": 0.78662109375}, {"start": 533.35, "end": 533.59, "word": " about", "probability": 0.91064453125}, {"start": 533.59, "end": 534.09, "word": " the", "probability": 0.8837890625}, {"start": 534.09, "end": 534.93, "word": " strength?", "probability": 0.480712890625}, {"start": 537.95, "end": 538.27, "word": " So", "probability": 0.2529296875}, {"start": 538.27, "end": 538.69, "word": " just", "probability": 0.67431640625}, {"start": 538.69, "end": 538.99, "word": " tell", "probability": 0.7734375}, {"start": 538.99, "end": 540.03, "word": " the", "probability": 0.8583984375}, {"start": 540.03, "end": 540.63, "word": " direction,", "probability": 0.97412109375}, {"start": 541.35, "end": 542.29, "word": " not", "probability": 0.93896484375}, {"start": 542.29, "end": 542.51, "word": " the", "probability": 0.9130859375}, {"start": 542.51, "end": 542.91, "word": " strength", "probability": 0.85498046875}, {"start": 542.91, "end": 543.45, "word": " of", "probability": 0.88427734375}, {"start": 543.45, "end": 543.75, "word": " the", "probability": 0.91650390625}, {"start": 543.75, "end": 544.27, "word": " relationship.", "probability": 0.85693359375}, {"start": 545.17, "end": 545.41, "word": " Now,", "probability": 0.85400390625}, {"start": 546.63, "end": 547.01, "word": " in", "probability": 0.93359375}, {"start": 547.01, "end": 547.21, "word": " order", "probability": 0.935546875}, {"start": 547.21, "end": 547.67, "word": " to", "probability": 0.96923828125}, {"start": 547.67, "end": 548.15, "word": " determine", "probability": 0.9365234375}, {"start": 548.15, "end": 548.61, "word": " both", "probability": 0.8779296875}, {"start": 548.61, "end": 548.91, "word": " the", "probability": 0.69482421875}, {"start": 548.91, "end": 549.31, "word": " direction", "probability": 0.9658203125}, {"start": 549.31, "end": 549.61, "word": " and", "probability": 0.86474609375}, {"start": 549.61, "end": 549.73, "word": " the", "probability": 0.85400390625}, {"start": 549.73, "end": 550.07, "word": " strength,", "probability": 0.8642578125}, {"start": 550.15, "end": 550.27, "word": " we", "probability": 0.9404296875}, {"start": 550.27, "end": 550.47, "word": " can", "probability": 0.94287109375}, {"start": 550.47, "end": 550.91, "word": " use", "probability": 0.87744140625}, {"start": 550.91, "end": 552.11, "word": " the", "probability": 0.91552734375}, {"start": 552.11, "end": 553.37, "word": " coefficient", "probability": 0.7548828125}, {"start": 553.37, "end": 553.69, "word": " of", "probability": 0.9453125}, {"start": 553.69, "end": 554.11, "word": " correlation.", "probability": 0.94775390625}], "temperature": 1.0}, {"id": 24, "seek": 58200, "start": 556.16, "end": 582.0, "text": " The coefficient of correlation measures the relative strength of the linear relationship between two numerical variables. The simplest formula that can be used to compute the correlation coefficient is given by this one. Maybe this is the easiest formula you can use. I mean, it's shortcut formula for computation. There are many other formulas to compute the correlation.", "tokens": [440, 17619, 295, 20009, 8000, 264, 4972, 3800, 295, 264, 8213, 2480, 1296, 732, 29054, 9102, 13, 440, 22811, 8513, 300, 393, 312, 1143, 281, 14722, 264, 20009, 17619, 307, 2212, 538, 341, 472, 13, 2704, 341, 307, 264, 12889, 8513, 291, 393, 764, 13, 286, 914, 11, 309, 311, 24822, 8513, 337, 24903, 13, 821, 366, 867, 661, 30546, 281, 14722, 264, 20009, 13], "avg_logprob": -0.1329308646646413, "compression_ratio": 1.8195121951219513, "no_speech_prob": 0.0, "words": [{"start": 556.16, "end": 556.48, "word": " The", "probability": 0.330322265625}, {"start": 556.48, "end": 556.96, "word": " coefficient", "probability": 0.8544921875}, {"start": 556.96, "end": 557.58, "word": " of", "probability": 0.9501953125}, {"start": 557.58, "end": 558.02, "word": " correlation", "probability": 0.9140625}, {"start": 558.02, "end": 558.42, "word": " measures", "probability": 0.77490234375}, {"start": 558.42, "end": 558.84, "word": " the", "probability": 0.8681640625}, {"start": 558.84, "end": 559.2, "word": " relative", "probability": 0.87841796875}, {"start": 559.2, "end": 559.7, "word": " strength", "probability": 0.880859375}, {"start": 559.7, "end": 560.1, "word": " of", "probability": 0.9462890625}, {"start": 560.1, "end": 560.32, "word": " the", "probability": 0.87939453125}, {"start": 560.32, "end": 560.68, "word": " linear", "probability": 0.9140625}, {"start": 560.68, "end": 561.88, "word": " relationship", "probability": 0.91748046875}, {"start": 561.88, "end": 562.22, "word": " between", "probability": 0.8798828125}, {"start": 562.22, "end": 562.42, "word": " two", "probability": 0.8955078125}, {"start": 562.42, "end": 562.78, "word": " numerical", "probability": 0.9609375}, {"start": 562.78, "end": 563.36, "word": " variables.", "probability": 0.9375}, {"start": 564.46, "end": 564.84, "word": " The", "probability": 0.884765625}, {"start": 564.84, "end": 565.56, "word": " simplest", "probability": 0.9130859375}, {"start": 565.56, "end": 566.2, "word": " formula", "probability": 0.91455078125}, {"start": 566.2, "end": 567.3, "word": " that", "probability": 0.89892578125}, {"start": 567.3, "end": 567.52, "word": " can", "probability": 0.94287109375}, {"start": 567.52, "end": 567.68, "word": " be", "probability": 0.9541015625}, {"start": 567.68, "end": 567.94, "word": " used", "probability": 0.9140625}, {"start": 567.94, "end": 568.3, "word": " to", "probability": 0.96923828125}, {"start": 568.3, "end": 568.86, "word": " compute", "probability": 0.89111328125}, {"start": 568.86, "end": 569.08, "word": " the", "probability": 0.87890625}, {"start": 569.08, "end": 569.44, "word": " correlation", "probability": 0.9384765625}, {"start": 569.44, "end": 569.94, "word": " coefficient", "probability": 0.96337890625}, {"start": 569.94, "end": 570.76, "word": " is", "probability": 0.90283203125}, {"start": 570.76, "end": 570.98, "word": " given", "probability": 0.884765625}, {"start": 570.98, "end": 571.22, "word": " by", "probability": 0.97021484375}, {"start": 571.22, "end": 571.44, "word": " this", "probability": 0.95068359375}, {"start": 571.44, "end": 571.72, "word": " one.", "probability": 0.89013671875}, {"start": 572.06, "end": 572.24, "word": " Maybe", "probability": 0.54736328125}, {"start": 572.24, "end": 572.44, "word": " this", "probability": 0.85205078125}, {"start": 572.44, "end": 572.78, "word": " is", "probability": 0.94580078125}, {"start": 572.78, "end": 573.2, "word": " the", "probability": 0.92431640625}, {"start": 573.2, "end": 573.8, "word": " easiest", "probability": 0.86083984375}, {"start": 573.8, "end": 574.28, "word": " formula", "probability": 0.935546875}, {"start": 574.28, "end": 574.44, "word": " you", "probability": 0.95166015625}, {"start": 574.44, "end": 574.62, "word": " can", "probability": 0.94482421875}, {"start": 574.62, "end": 575.04, "word": " use.", "probability": 0.88720703125}, {"start": 576.04, "end": 576.28, "word": " I", "probability": 0.8916015625}, {"start": 576.28, "end": 576.42, "word": " mean,", "probability": 0.95849609375}, {"start": 576.48, "end": 576.6, "word": " it's", "probability": 0.909423828125}, {"start": 576.6, "end": 576.96, "word": " shortcut", "probability": 0.58154296875}, {"start": 576.96, "end": 577.62, "word": " formula", "probability": 0.92236328125}, {"start": 577.62, "end": 578.06, "word": " for", "probability": 0.9443359375}, {"start": 578.06, "end": 578.6, "word": " computation.", "probability": 0.9150390625}, {"start": 579.16, "end": 579.3, "word": " There", "probability": 0.81591796875}, {"start": 579.3, "end": 579.54, "word": " are", "probability": 0.939453125}, {"start": 579.54, "end": 579.82, "word": " many", "probability": 0.90380859375}, {"start": 579.82, "end": 580.14, "word": " other", "probability": 0.8916015625}, {"start": 580.14, "end": 580.6, "word": " formulas", "probability": 0.96630859375}, {"start": 580.6, "end": 580.86, "word": " to", "probability": 0.962890625}, {"start": 580.86, "end": 581.2, "word": " compute", "probability": 0.912109375}, {"start": 581.2, "end": 581.52, "word": " the", "probability": 0.8447265625}, {"start": 581.52, "end": 582.0, "word": " correlation.", "probability": 0.91748046875}], "temperature": 1.0}, {"id": 25, "seek": 60915, "start": 583.11, "end": 609.15, "text": " This one is the easiest one. R is just sum of xy minus n, n is the sample size, times x bar is the sample mean, y bar is the sample mean for y, because here we have two variables, divided by square root, don't forget the square root, of two quantities.", "tokens": [639, 472, 307, 264, 12889, 472, 13, 497, 307, 445, 2408, 295, 2031, 88, 3175, 297, 11, 297, 307, 264, 6889, 2744, 11, 1413, 2031, 2159, 307, 264, 6889, 914, 11, 288, 2159, 307, 264, 6889, 914, 337, 288, 11, 570, 510, 321, 362, 732, 9102, 11, 6666, 538, 3732, 5593, 11, 500, 380, 2870, 264, 3732, 5593, 11, 295, 732, 22927, 13], "avg_logprob": -0.2418212906923145, "compression_ratio": 1.632258064516129, "no_speech_prob": 0.0, "words": [{"start": 583.11, "end": 583.63, "word": " This", "probability": 0.492431640625}, {"start": 583.63, "end": 583.85, "word": " one", "probability": 0.9208984375}, {"start": 583.85, "end": 583.99, "word": " is", "probability": 0.95166015625}, {"start": 583.99, "end": 584.13, "word": " the", "probability": 0.91015625}, {"start": 584.13, "end": 584.49, "word": " easiest", "probability": 0.87109375}, {"start": 584.49, "end": 585.25, "word": " one.", "probability": 0.8701171875}, {"start": 586.03, "end": 586.15, "word": " R", "probability": 0.6875}, {"start": 586.15, "end": 586.31, "word": " is", "probability": 0.92138671875}, {"start": 586.31, "end": 586.63, "word": " just", "probability": 0.89697265625}, {"start": 586.63, "end": 587.07, "word": " sum", "probability": 0.74609375}, {"start": 587.07, "end": 587.49, "word": " of", "probability": 0.9658203125}, {"start": 587.49, "end": 588.09, "word": " xy", "probability": 0.392578125}, {"start": 588.09, "end": 590.13, "word": " minus", "probability": 0.50537109375}, {"start": 590.13, "end": 591.11, "word": " n,", "probability": 0.5361328125}, {"start": 591.83, "end": 592.03, "word": " n", "probability": 0.5986328125}, {"start": 592.03, "end": 592.21, "word": " is", "probability": 0.94677734375}, {"start": 592.21, "end": 592.39, "word": " the", "probability": 0.91064453125}, {"start": 592.39, "end": 592.57, "word": " sample", "probability": 0.826171875}, {"start": 592.57, "end": 593.11, "word": " size,", "probability": 0.85546875}, {"start": 593.69, "end": 594.37, "word": " times", "probability": 0.85302734375}, {"start": 594.37, "end": 594.77, "word": " x", "probability": 0.9365234375}, {"start": 594.77, "end": 595.11, "word": " bar", "probability": 0.8330078125}, {"start": 595.11, "end": 595.63, "word": " is", "probability": 0.8505859375}, {"start": 595.63, "end": 595.79, "word": " the", "probability": 0.90625}, {"start": 595.79, "end": 596.05, "word": " sample", "probability": 0.87548828125}, {"start": 596.05, "end": 596.35, "word": " mean,", "probability": 0.955078125}, {"start": 596.47, "end": 596.67, "word": " y", "probability": 0.94091796875}, {"start": 596.67, "end": 596.99, "word": " bar", "probability": 0.9599609375}, {"start": 596.99, "end": 597.41, "word": " is", "probability": 0.94970703125}, {"start": 597.41, "end": 597.57, "word": " the", "probability": 0.91650390625}, {"start": 597.57, "end": 597.83, "word": " sample", "probability": 0.8759765625}, {"start": 597.83, "end": 598.15, "word": " mean", "probability": 0.95849609375}, {"start": 598.15, "end": 598.43, "word": " for", "probability": 0.93896484375}, {"start": 598.43, "end": 598.75, "word": " y,", "probability": 0.92578125}, {"start": 599.17, "end": 599.45, "word": " because", "probability": 0.90087890625}, {"start": 599.45, "end": 599.65, "word": " here", "probability": 0.8505859375}, {"start": 599.65, "end": 599.83, "word": " we", "probability": 0.9404296875}, {"start": 599.83, "end": 600.65, "word": " have", "probability": 0.95068359375}, {"start": 600.65, "end": 601.09, "word": " two", "probability": 0.82177734375}, {"start": 601.09, "end": 602.61, "word": " variables,", "probability": 0.95068359375}, {"start": 603.09, "end": 603.53, "word": " divided", "probability": 0.77197265625}, {"start": 603.53, "end": 603.85, "word": " by", "probability": 0.96875}, {"start": 603.85, "end": 604.57, "word": " square", "probability": 0.72900390625}, {"start": 604.57, "end": 604.95, "word": " root,", "probability": 0.9189453125}, {"start": 605.55, "end": 605.95, "word": " don't", "probability": 0.954833984375}, {"start": 605.95, "end": 606.25, "word": " forget", "probability": 0.91015625}, {"start": 606.25, "end": 606.47, "word": " the", "probability": 0.90283203125}, {"start": 606.47, "end": 606.65, "word": " square", "probability": 0.896484375}, {"start": 606.65, "end": 606.97, "word": " root,", "probability": 0.92529296875}, {"start": 608.11, "end": 608.45, "word": " of", "probability": 0.9658203125}, {"start": 608.45, "end": 608.69, "word": " two", "probability": 0.92529296875}, {"start": 608.69, "end": 609.15, "word": " quantities.", "probability": 0.96337890625}], "temperature": 1.0}, {"id": 26, "seek": 63611, "start": 610.37, "end": 636.11, "text": " One conserved for x and the other for y. The first one, sum of x squared minus nx bar squared. The other one is similar just for the other variables, sum y squared minus ny bar squared. So in order to determine the value of R, we need, suppose for example, we have x and y, theta x and y.", "tokens": [1485, 1014, 6913, 337, 2031, 293, 264, 661, 337, 288, 13, 440, 700, 472, 11, 2408, 295, 2031, 8889, 3175, 297, 87, 2159, 8889, 13, 440, 661, 472, 307, 2531, 445, 337, 264, 661, 9102, 11, 2408, 288, 8889, 3175, 18052, 2159, 8889, 13, 407, 294, 1668, 281, 6997, 264, 2158, 295, 497, 11, 321, 643, 11, 7297, 337, 1365, 11, 321, 362, 2031, 293, 288, 11, 9725, 2031, 293, 288, 13], "avg_logprob": -0.23758562133736807, "compression_ratio": 1.680232558139535, "no_speech_prob": 0.0, "words": [{"start": 610.3700000000001, "end": 610.9300000000001, "word": " One", "probability": 0.65234375}, {"start": 610.9300000000001, "end": 611.49, "word": " conserved", "probability": 0.59161376953125}, {"start": 611.49, "end": 612.03, "word": " for", "probability": 0.94677734375}, {"start": 612.03, "end": 612.29, "word": " x", "probability": 0.751953125}, {"start": 612.29, "end": 612.49, "word": " and", "probability": 0.80712890625}, {"start": 612.49, "end": 612.61, "word": " the", "probability": 0.79736328125}, {"start": 612.61, "end": 612.83, "word": " other", "probability": 0.892578125}, {"start": 612.83, "end": 613.13, "word": " for", "probability": 0.94189453125}, {"start": 613.13, "end": 613.49, "word": " y.", "probability": 0.9912109375}, {"start": 614.25, "end": 614.61, "word": " The", "probability": 0.5029296875}, {"start": 614.61, "end": 614.89, "word": " first", "probability": 0.8740234375}, {"start": 614.89, "end": 615.11, "word": " one,", "probability": 0.92041015625}, {"start": 615.21, "end": 615.37, "word": " sum", "probability": 0.896484375}, {"start": 615.37, "end": 615.53, "word": " of", "probability": 0.96044921875}, {"start": 615.53, "end": 615.71, "word": " x", "probability": 0.9833984375}, {"start": 615.71, "end": 616.03, "word": " squared", "probability": 0.70751953125}, {"start": 616.03, "end": 616.33, "word": " minus", "probability": 0.96337890625}, {"start": 616.33, "end": 616.69, "word": " nx", "probability": 0.68310546875}, {"start": 616.69, "end": 616.89, "word": " bar", "probability": 0.904296875}, {"start": 616.89, "end": 617.25, "word": " squared.", "probability": 0.84033203125}, {"start": 617.95, "end": 618.23, "word": " The", "probability": 0.77099609375}, {"start": 618.23, "end": 618.49, "word": " other", "probability": 0.89404296875}, {"start": 618.49, "end": 618.69, "word": " one", "probability": 0.91943359375}, {"start": 618.69, "end": 618.85, "word": " is", "probability": 0.9326171875}, {"start": 618.85, "end": 619.27, "word": " similar", "probability": 0.9501953125}, {"start": 619.27, "end": 619.69, "word": " just", "probability": 0.677734375}, {"start": 619.69, "end": 620.17, "word": " for", "probability": 0.94775390625}, {"start": 620.17, "end": 620.41, "word": " the", "probability": 0.8154296875}, {"start": 620.41, "end": 620.69, "word": " other", "probability": 0.8828125}, {"start": 620.69, "end": 621.15, "word": " variables,", "probability": 0.5400390625}, {"start": 621.37, "end": 621.57, "word": " sum", "probability": 0.71875}, {"start": 621.57, "end": 621.83, "word": " y", "probability": 0.90185546875}, {"start": 621.83, "end": 622.17, "word": " squared", "probability": 0.86474609375}, {"start": 622.17, "end": 622.63, "word": " minus", "probability": 0.98583984375}, {"start": 622.63, "end": 623.35, "word": " ny", "probability": 0.84765625}, {"start": 623.35, "end": 623.67, "word": " bar", "probability": 0.951171875}, {"start": 623.67, "end": 623.97, "word": " squared.", "probability": 0.84326171875}, {"start": 624.91, "end": 625.41, "word": " So", "probability": 0.96142578125}, {"start": 625.41, "end": 625.63, "word": " in", "probability": 0.765625}, {"start": 625.63, "end": 625.85, "word": " order", "probability": 0.921875}, {"start": 625.85, "end": 626.09, "word": " to", "probability": 0.966796875}, {"start": 626.09, "end": 626.57, "word": " determine", "probability": 0.92578125}, {"start": 626.57, "end": 626.87, "word": " the", "probability": 0.916015625}, {"start": 626.87, "end": 627.15, "word": " value", "probability": 0.97412109375}, {"start": 627.15, "end": 627.35, "word": " of", "probability": 0.96875}, {"start": 627.35, "end": 627.63, "word": " R,", "probability": 0.56494140625}, {"start": 628.05, "end": 628.27, "word": " we", "probability": 0.94287109375}, {"start": 628.27, "end": 628.65, "word": " need,", "probability": 0.912109375}, {"start": 632.17, "end": 632.65, "word": " suppose", "probability": 0.5400390625}, {"start": 632.65, "end": 632.89, "word": " for", "probability": 0.607421875}, {"start": 632.89, "end": 633.19, "word": " example,", "probability": 0.9775390625}, {"start": 633.29, "end": 633.37, "word": " we", "probability": 0.91015625}, {"start": 633.37, "end": 633.53, "word": " have", "probability": 0.94189453125}, {"start": 633.53, "end": 633.71, "word": " x", "probability": 0.9453125}, {"start": 633.71, "end": 633.87, "word": " and", "probability": 0.95068359375}, {"start": 633.87, "end": 634.11, "word": " y,", "probability": 0.99658203125}, {"start": 634.87, "end": 635.21, "word": " theta", "probability": 0.347900390625}, {"start": 635.21, "end": 635.71, "word": " x", "probability": 0.80078125}, {"start": 635.71, "end": 635.89, "word": " and", "probability": 0.94482421875}, {"start": 635.89, "end": 636.11, "word": " y.", "probability": 0.99755859375}], "temperature": 1.0}, {"id": 27, "seek": 66797, "start": 640.35, "end": 667.97, "text": " x is called explanatory variable and y is called response variable sometimes x is called independent", "tokens": [2031, 307, 1219, 9045, 4745, 7006, 293, 288, 307, 1219, 4134, 7006, 2171, 2031, 307, 1219, 6695], "avg_logprob": -0.17122396330038706, "compression_ratio": 1.3835616438356164, "no_speech_prob": 0.0, "words": [{"start": 640.35, "end": 640.79, "word": " x", "probability": 0.413330078125}, {"start": 640.79, "end": 641.09, "word": " is", "probability": 0.86474609375}, {"start": 641.09, "end": 641.57, "word": " called", "probability": 0.8916015625}, {"start": 641.57, "end": 644.73, "word": " explanatory", "probability": 0.941162109375}, {"start": 644.73, "end": 645.19, "word": " variable", "probability": 0.923828125}, {"start": 645.19, "end": 654.39, "word": " and", "probability": 0.64697265625}, {"start": 654.39, "end": 656.37, "word": " y", "probability": 0.92041015625}, {"start": 656.37, "end": 656.55, "word": " is", "probability": 0.94189453125}, {"start": 656.55, "end": 657.19, "word": " called", "probability": 0.892578125}, {"start": 657.19, "end": 659.81, "word": " response", "probability": 0.93701171875}, {"start": 659.81, "end": 664.59, "word": " variable", "probability": 0.89208984375}, {"start": 664.59, "end": 666.35, "word": " sometimes", "probability": 0.81689453125}, {"start": 666.35, "end": 666.89, "word": " x", "probability": 0.94873046875}, {"start": 666.89, "end": 667.11, "word": " is", "probability": 0.9404296875}, {"start": 667.11, "end": 667.43, "word": " called", "probability": 0.90087890625}, {"start": 667.43, "end": 667.97, "word": " independent", "probability": 0.88818359375}], "temperature": 1.0}, {"id": 28, "seek": 69792, "start": 681.76, "end": 697.92, "text": " For example, suppose we are talking about consumption and input. And we are interested in the relationship between these two variables.", "tokens": [1171, 1365, 11, 7297, 321, 366, 1417, 466, 12126, 293, 4846, 13, 400, 321, 366, 3102, 294, 264, 2480, 1296, 613, 732, 9102, 13], "avg_logprob": -0.27109374046325685, "compression_ratio": 1.2592592592592593, "no_speech_prob": 0.0, "words": [{"start": 681.76, "end": 682.5, "word": " For", "probability": 0.06585693359375}, {"start": 682.5, "end": 683.24, "word": " example,", "probability": 0.9716796875}, {"start": 683.56, "end": 684.12, "word": " suppose", "probability": 0.83203125}, {"start": 684.12, "end": 684.3, "word": " we", "probability": 0.93115234375}, {"start": 684.3, "end": 684.4, "word": " are", "probability": 0.86865234375}, {"start": 684.4, "end": 684.74, "word": " talking", "probability": 0.84130859375}, {"start": 684.74, "end": 685.32, "word": " about", "probability": 0.90966796875}, {"start": 685.32, "end": 686.6, "word": " consumption", "probability": 0.9140625}, {"start": 686.6, "end": 692.28, "word": " and", "probability": 0.51025390625}, {"start": 692.28, "end": 692.62, "word": " input.", "probability": 0.407470703125}, {"start": 694.36, "end": 695.1, "word": " And", "probability": 0.92431640625}, {"start": 695.1, "end": 695.22, "word": " we", "probability": 0.921875}, {"start": 695.22, "end": 695.36, "word": " are", "probability": 0.92919921875}, {"start": 695.36, "end": 695.78, "word": " interested", "probability": 0.85791015625}, {"start": 695.78, "end": 696.14, "word": " in", "probability": 0.93798828125}, {"start": 696.14, "end": 696.26, "word": " the", "probability": 0.912109375}, {"start": 696.26, "end": 696.7, "word": " relationship", "probability": 0.9189453125}, {"start": 696.7, "end": 697.08, "word": " between", "probability": 0.88671875}, {"start": 697.08, "end": 697.3, "word": " these", "probability": 0.86181640625}, {"start": 697.3, "end": 697.48, "word": " two", "probability": 0.923828125}, {"start": 697.48, "end": 697.92, "word": " variables.", "probability": 0.93798828125}], "temperature": 1.0}, {"id": 29, "seek": 72778, "start": 699.6, "end": 727.78, "text": " Now, except for the variable or the independent, this one affects the other variable. As we mentioned, as your income increases, your consumption will go in the same direction, increases also. Income causes Y, or income affects Y. In this case, income is your X. Most of the time we use", "tokens": [823, 11, 3993, 337, 264, 7006, 420, 264, 6695, 11, 341, 472, 11807, 264, 661, 7006, 13, 1018, 321, 2835, 11, 382, 428, 5742, 8637, 11, 428, 12126, 486, 352, 294, 264, 912, 3513, 11, 8637, 611, 13, 682, 1102, 7700, 398, 11, 420, 5742, 11807, 398, 13, 682, 341, 1389, 11, 5742, 307, 428, 1783, 13, 4534, 295, 264, 565, 321, 764], "avg_logprob": -0.23388672131113708, "compression_ratio": 1.6589595375722543, "no_speech_prob": 0.0, "words": [{"start": 699.6, "end": 700.2, "word": " Now,", "probability": 0.73779296875}, {"start": 700.82, "end": 701.12, "word": " except", "probability": 0.413818359375}, {"start": 701.12, "end": 701.28, "word": " for", "probability": 0.295166015625}, {"start": 701.28, "end": 701.36, "word": " the", "probability": 0.3271484375}, {"start": 701.36, "end": 701.78, "word": " variable", "probability": 0.72216796875}, {"start": 701.78, "end": 702.08, "word": " or", "probability": 0.5322265625}, {"start": 702.08, "end": 702.24, "word": " the", "probability": 0.72509765625}, {"start": 702.24, "end": 702.66, "word": " independent,", "probability": 0.8671875}, {"start": 703.16, "end": 703.52, "word": " this", "probability": 0.91162109375}, {"start": 703.52, "end": 703.86, "word": " one", "probability": 0.9091796875}, {"start": 703.86, "end": 704.6, "word": " affects", "probability": 0.7568359375}, {"start": 704.6, "end": 704.9, "word": " the", "probability": 0.91455078125}, {"start": 704.9, "end": 705.14, "word": " other", "probability": 0.890625}, {"start": 705.14, "end": 705.6, "word": " variable.", "probability": 0.8525390625}, {"start": 707.76, "end": 708.36, "word": " As", "probability": 0.951171875}, {"start": 708.36, "end": 708.48, "word": " we", "probability": 0.84814453125}, {"start": 708.48, "end": 708.84, "word": " mentioned,", "probability": 0.80419921875}, {"start": 709.02, "end": 709.22, "word": " as", "probability": 0.95458984375}, {"start": 709.22, "end": 709.42, "word": " your", "probability": 0.8857421875}, {"start": 709.42, "end": 709.84, "word": " income", "probability": 0.939453125}, {"start": 709.84, "end": 710.7, "word": " increases,", "probability": 0.93310546875}, {"start": 712.04, "end": 712.32, "word": " your", "probability": 0.86865234375}, {"start": 712.32, "end": 712.8, "word": " consumption", "probability": 0.9755859375}, {"start": 712.8, "end": 713.22, "word": " will", "probability": 0.8505859375}, {"start": 713.22, "end": 713.42, "word": " go", "probability": 0.94873046875}, {"start": 713.42, "end": 713.52, "word": " in", "probability": 0.9287109375}, {"start": 713.52, "end": 713.64, "word": " the", "probability": 0.91845703125}, {"start": 713.64, "end": 713.8, "word": " same", "probability": 0.9052734375}, {"start": 713.8, "end": 714.22, "word": " direction,", "probability": 0.97021484375}, {"start": 714.4, "end": 714.7, "word": " increases", "probability": 0.6865234375}, {"start": 714.7, "end": 715.12, "word": " also.", "probability": 0.8671875}, {"start": 716.26, "end": 716.84, "word": " Income", "probability": 0.91943359375}, {"start": 716.84, "end": 718.58, "word": " causes", "probability": 0.787109375}, {"start": 718.58, "end": 718.88, "word": " Y,", "probability": 0.724609375}, {"start": 719.4, "end": 719.58, "word": " or", "probability": 0.8662109375}, {"start": 719.58, "end": 719.88, "word": " income", "probability": 0.88720703125}, {"start": 719.88, "end": 720.42, "word": " affects", "probability": 0.81640625}, {"start": 720.42, "end": 720.76, "word": " Y.", "probability": 0.99462890625}, {"start": 720.9, "end": 721.04, "word": " In", "probability": 0.955078125}, {"start": 721.04, "end": 721.26, "word": " this", "probability": 0.94384765625}, {"start": 721.26, "end": 721.56, "word": " case,", "probability": 0.9052734375}, {"start": 721.7, "end": 722.14, "word": " income", "probability": 0.94677734375}, {"start": 722.14, "end": 723.86, "word": " is", "probability": 0.87548828125}, {"start": 723.86, "end": 724.04, "word": " your", "probability": 0.84716796875}, {"start": 724.04, "end": 724.34, "word": " X.", "probability": 0.9599609375}, {"start": 726.18, "end": 726.78, "word": " Most", "probability": 0.8720703125}, {"start": 726.78, "end": 727.1, "word": " of", "probability": 0.96630859375}, {"start": 727.1, "end": 727.22, "word": " the", "probability": 0.91650390625}, {"start": 727.22, "end": 727.46, "word": " time", "probability": 0.884765625}, {"start": 727.46, "end": 727.64, "word": " we", "probability": 0.51220703125}, {"start": 727.64, "end": 727.78, "word": " use", "probability": 0.8583984375}], "temperature": 1.0}, {"id": 30, "seek": 75321, "start": 730.79, "end": 753.21, "text": " And Y for independent. So in this case, the response variable or your outcome or the dependent variable is your consumption. So Y is consumption, X is income. So now in order to determine the correlation coefficient, we have the data of X and Y.", "tokens": [400, 398, 337, 6695, 13, 407, 294, 341, 1389, 11, 264, 4134, 7006, 420, 428, 9700, 420, 264, 12334, 7006, 307, 428, 12126, 13, 407, 398, 307, 12126, 11, 1783, 307, 5742, 13, 407, 586, 294, 1668, 281, 6997, 264, 20009, 17619, 11, 321, 362, 264, 1412, 295, 1783, 293, 398, 13], "avg_logprob": -0.15919810645985152, "compression_ratio": 1.5668789808917198, "no_speech_prob": 0.0, "words": [{"start": 730.79, "end": 731.01, "word": " And", "probability": 0.34521484375}, {"start": 731.01, "end": 731.27, "word": " Y", "probability": 0.62255859375}, {"start": 731.27, "end": 731.49, "word": " for", "probability": 0.8740234375}, {"start": 731.49, "end": 731.87, "word": " independent.", "probability": 0.76806640625}, {"start": 732.67, "end": 733.07, "word": " So", "probability": 0.9169921875}, {"start": 733.07, "end": 733.21, "word": " in", "probability": 0.78125}, {"start": 733.21, "end": 733.39, "word": " this", "probability": 0.94677734375}, {"start": 733.39, "end": 733.77, "word": " case,", "probability": 0.91357421875}, {"start": 735.27, "end": 735.59, "word": " the", "probability": 0.87548828125}, {"start": 735.59, "end": 736.41, "word": " response", "probability": 0.95849609375}, {"start": 736.41, "end": 736.91, "word": " variable", "probability": 0.93017578125}, {"start": 736.91, "end": 737.71, "word": " or", "probability": 0.548828125}, {"start": 737.71, "end": 737.91, "word": " your", "probability": 0.85693359375}, {"start": 737.91, "end": 738.45, "word": " outcome", "probability": 0.90478515625}, {"start": 738.45, "end": 738.93, "word": " or", "probability": 0.395751953125}, {"start": 738.93, "end": 739.05, "word": " the", "probability": 0.84375}, {"start": 739.05, "end": 739.37, "word": " dependent", "probability": 0.859375}, {"start": 739.37, "end": 739.83, "word": " variable", "probability": 0.91162109375}, {"start": 739.83, "end": 740.57, "word": " is", "probability": 0.859375}, {"start": 740.57, "end": 740.73, "word": " your", "probability": 0.8916015625}, {"start": 740.73, "end": 741.19, "word": " consumption.", "probability": 0.9697265625}, {"start": 741.89, "end": 742.05, "word": " So", "probability": 0.95361328125}, {"start": 742.05, "end": 742.39, "word": " Y", "probability": 0.92431640625}, {"start": 742.39, "end": 742.59, "word": " is", "probability": 0.94580078125}, {"start": 742.59, "end": 743.11, "word": " consumption,", "probability": 0.95947265625}, {"start": 743.53, "end": 743.75, "word": " X", "probability": 0.98583984375}, {"start": 743.75, "end": 744.07, "word": " is", "probability": 0.939453125}, {"start": 744.07, "end": 744.43, "word": " income.", "probability": 0.95556640625}, {"start": 746.27, "end": 746.59, "word": " So", "probability": 0.94140625}, {"start": 746.59, "end": 746.81, "word": " now", "probability": 0.91064453125}, {"start": 746.81, "end": 747.01, "word": " in", "probability": 0.5810546875}, {"start": 747.01, "end": 747.17, "word": " order", "probability": 0.9208984375}, {"start": 747.17, "end": 747.41, "word": " to", "probability": 0.96533203125}, {"start": 747.41, "end": 747.91, "word": " determine", "probability": 0.9267578125}, {"start": 747.91, "end": 749.15, "word": " the", "probability": 0.8798828125}, {"start": 749.15, "end": 749.63, "word": " correlation", "probability": 0.91943359375}, {"start": 749.63, "end": 750.13, "word": " coefficient,", "probability": 0.9658203125}, {"start": 751.47, "end": 751.95, "word": " we", "probability": 0.935546875}, {"start": 751.95, "end": 752.13, "word": " have", "probability": 0.93115234375}, {"start": 752.13, "end": 752.27, "word": " the", "probability": 0.798828125}, {"start": 752.27, "end": 752.47, "word": " data", "probability": 0.94091796875}, {"start": 752.47, "end": 752.65, "word": " of", "probability": 0.91162109375}, {"start": 752.65, "end": 752.79, "word": " X", "probability": 0.9775390625}, {"start": 752.79, "end": 752.95, "word": " and", "probability": 0.93798828125}, {"start": 752.95, "end": 753.21, "word": " Y.", "probability": 0.99853515625}], "temperature": 1.0}, {"id": 31, "seek": 78417, "start": 756.35, "end": 784.17, "text": " The values of X, I mean the number of pairs of X should be equal to the number of pairs of Y. So if we have ten observations for X, we should have the same number of observations for Y. It's pairs. X1, Y1, X2, Y2, and so on. Now, the formula to compute R, the shortcut formula is sum of XY minus N times", "tokens": [440, 4190, 295, 1783, 11, 286, 914, 264, 1230, 295, 15494, 295, 1783, 820, 312, 2681, 281, 264, 1230, 295, 15494, 295, 398, 13, 407, 498, 321, 362, 2064, 18163, 337, 1783, 11, 321, 820, 362, 264, 912, 1230, 295, 18163, 337, 398, 13, 467, 311, 15494, 13, 1783, 16, 11, 398, 16, 11, 1783, 17, 11, 398, 17, 11, 293, 370, 322, 13, 823, 11, 264, 8513, 281, 14722, 497, 11, 264, 24822, 8513, 307, 2408, 295, 48826, 3175, 426, 1413], "avg_logprob": -0.18589984508882085, "compression_ratio": 1.6612021857923498, "no_speech_prob": 0.0, "words": [{"start": 756.35, "end": 756.63, "word": " The", "probability": 0.51708984375}, {"start": 756.63, "end": 757.05, "word": " values", "probability": 0.75830078125}, {"start": 757.05, "end": 757.49, "word": " of", "probability": 0.9609375}, {"start": 757.49, "end": 757.83, "word": " X,", "probability": 0.427978515625}, {"start": 758.11, "end": 758.19, "word": " I", "probability": 0.9658203125}, {"start": 758.19, "end": 758.29, "word": " mean", "probability": 0.96728515625}, {"start": 758.29, "end": 758.43, "word": " the", "probability": 0.63330078125}, {"start": 758.43, "end": 758.65, "word": " number", "probability": 0.80810546875}, {"start": 758.65, "end": 758.79, "word": " of", "probability": 0.95166015625}, {"start": 758.79, "end": 758.91, "word": " pairs", "probability": 0.6826171875}, {"start": 758.91, "end": 759.05, "word": " of", "probability": 0.9287109375}, {"start": 759.05, "end": 759.19, "word": " X", "probability": 0.94921875}, {"start": 759.19, "end": 759.35, "word": " should", "probability": 0.87890625}, {"start": 759.35, "end": 759.49, "word": " be", "probability": 0.865234375}, {"start": 759.49, "end": 759.69, "word": " equal", "probability": 0.873046875}, {"start": 759.69, "end": 759.79, "word": " to", "probability": 0.87744140625}, {"start": 759.79, "end": 759.85, "word": " the", "probability": 0.88818359375}, {"start": 759.85, "end": 760.03, "word": " number", "probability": 0.84765625}, {"start": 760.03, "end": 760.15, "word": " of", "probability": 0.95556640625}, {"start": 760.15, "end": 760.31, "word": " pairs", "probability": 0.85791015625}, {"start": 760.31, "end": 760.49, "word": " of", "probability": 0.9638671875}, {"start": 760.49, "end": 760.71, "word": " Y.", "probability": 0.9892578125}, {"start": 761.23, "end": 761.71, "word": " So", "probability": 0.94970703125}, {"start": 761.71, "end": 761.99, "word": " if", "probability": 0.7705078125}, {"start": 761.99, "end": 762.15, "word": " we", "probability": 0.9111328125}, {"start": 762.15, "end": 762.31, "word": " have", "probability": 0.94677734375}, {"start": 762.31, "end": 762.49, "word": " ten", "probability": 0.65185546875}, {"start": 762.49, "end": 762.99, "word": " observations", "probability": 0.7587890625}, {"start": 762.99, "end": 763.41, "word": " for", "probability": 0.9453125}, {"start": 763.41, "end": 763.73, "word": " X,", "probability": 0.95556640625}, {"start": 764.07, "end": 764.33, "word": " we", "probability": 0.94384765625}, {"start": 764.33, "end": 764.55, "word": " should", "probability": 0.96435546875}, {"start": 764.55, "end": 764.75, "word": " have", "probability": 0.94873046875}, {"start": 764.75, "end": 764.93, "word": " the", "probability": 0.91015625}, {"start": 764.93, "end": 765.15, "word": " same", "probability": 0.8984375}, {"start": 765.15, "end": 765.51, "word": " number", "probability": 0.90380859375}, {"start": 765.51, "end": 765.87, "word": " of", "probability": 0.81396484375}, {"start": 765.87, "end": 766.29, "word": " observations", "probability": 0.828125}, {"start": 766.29, "end": 766.59, "word": " for", "probability": 0.93994140625}, {"start": 766.59, "end": 766.89, "word": " Y.", "probability": 0.99609375}, {"start": 767.59, "end": 768.07, "word": " It's", "probability": 0.90966796875}, {"start": 768.07, "end": 768.35, "word": " pairs.", "probability": 0.67578125}, {"start": 769.53, "end": 770.01, "word": " X1,", "probability": 0.927490234375}, {"start": 770.09, "end": 770.39, "word": " Y1,", "probability": 0.874755859375}, {"start": 770.45, "end": 770.69, "word": " X2,", "probability": 0.984130859375}, {"start": 770.73, "end": 771.03, "word": " Y2,", "probability": 0.98974609375}, {"start": 771.09, "end": 771.21, "word": " and", "probability": 0.923828125}, {"start": 771.21, "end": 771.41, "word": " so", "probability": 0.94970703125}, {"start": 771.41, "end": 771.59, "word": " on.", "probability": 0.9423828125}, {"start": 773.17, "end": 773.45, "word": " Now,", "probability": 0.93994140625}, {"start": 773.69, "end": 773.87, "word": " the", "probability": 0.89892578125}, {"start": 773.87, "end": 774.21, "word": " formula", "probability": 0.826171875}, {"start": 774.21, "end": 774.39, "word": " to", "probability": 0.8994140625}, {"start": 774.39, "end": 774.75, "word": " compute", "probability": 0.927734375}, {"start": 774.75, "end": 775.13, "word": " R,", "probability": 0.8876953125}, {"start": 777.03, "end": 777.23, "word": " the", "probability": 0.86181640625}, {"start": 777.23, "end": 777.65, "word": " shortcut", "probability": 0.8935546875}, {"start": 777.65, "end": 779.15, "word": " formula", "probability": 0.9140625}, {"start": 779.15, "end": 779.49, "word": " is", "probability": 0.56201171875}, {"start": 779.49, "end": 779.69, "word": " sum", "probability": 0.77392578125}, {"start": 779.69, "end": 779.85, "word": " of", "probability": 0.9599609375}, {"start": 779.85, "end": 780.27, "word": " XY", "probability": 0.57421875}, {"start": 780.27, "end": 783.27, "word": " minus", "probability": 0.9013671875}, {"start": 783.27, "end": 783.63, "word": " N", "probability": 0.86962890625}, {"start": 783.63, "end": 784.17, "word": " times", "probability": 0.92724609375}], "temperature": 1.0}, {"id": 32, "seek": 81365, "start": 784.97, "end": 813.65, "text": " x bar, y bar, divided by the square root of two quantities. The first one, sum of x squared minus n x bar. The other one, sum of y squared minus ny y squared. So the first thing we have to do is to find the mean for each x and y. So first step, compute x bar and y bar.", "tokens": [2031, 2159, 11, 288, 2159, 11, 6666, 538, 264, 3732, 5593, 295, 732, 22927, 13, 440, 700, 472, 11, 2408, 295, 2031, 8889, 3175, 297, 2031, 2159, 13, 440, 661, 472, 11, 2408, 295, 288, 8889, 3175, 18052, 288, 8889, 13, 407, 264, 700, 551, 321, 362, 281, 360, 307, 281, 915, 264, 914, 337, 1184, 2031, 293, 288, 13, 407, 700, 1823, 11, 14722, 2031, 2159, 293, 288, 2159, 13], "avg_logprob": -0.2121310786654552, "compression_ratio": 1.7419354838709677, "no_speech_prob": 0.0, "words": [{"start": 784.97, "end": 785.33, "word": " x", "probability": 0.2286376953125}, {"start": 785.33, "end": 785.73, "word": " bar,", "probability": 0.83154296875}, {"start": 786.01, "end": 786.25, "word": " y", "probability": 0.96630859375}, {"start": 786.25, "end": 786.61, "word": " bar,", "probability": 0.96044921875}, {"start": 786.99, "end": 787.67, "word": " divided", "probability": 0.5810546875}, {"start": 787.67, "end": 787.85, "word": " by", "probability": 0.97314453125}, {"start": 787.85, "end": 787.99, "word": " the", "probability": 0.55419921875}, {"start": 787.99, "end": 788.21, "word": " square", "probability": 0.8876953125}, {"start": 788.21, "end": 788.53, "word": " root", "probability": 0.92041015625}, {"start": 788.53, "end": 789.33, "word": " of", "probability": 0.9541015625}, {"start": 789.33, "end": 789.63, "word": " two", "probability": 0.87646484375}, {"start": 789.63, "end": 790.13, "word": " quantities.", "probability": 0.92431640625}, {"start": 790.79, "end": 791.13, "word": " The", "probability": 0.8759765625}, {"start": 791.13, "end": 791.35, "word": " first", "probability": 0.8740234375}, {"start": 791.35, "end": 791.59, "word": " one,", "probability": 0.90771484375}, {"start": 791.67, "end": 791.83, "word": " sum", "probability": 0.8603515625}, {"start": 791.83, "end": 791.97, "word": " of", "probability": 0.939453125}, {"start": 791.97, "end": 792.19, "word": " x", "probability": 0.98388671875}, {"start": 792.19, "end": 792.47, "word": " squared", "probability": 0.779296875}, {"start": 792.47, "end": 792.77, "word": " minus", "probability": 0.97265625}, {"start": 792.77, "end": 792.99, "word": " n", "probability": 0.76953125}, {"start": 792.99, "end": 793.15, "word": " x", "probability": 0.57568359375}, {"start": 793.15, "end": 793.41, "word": " bar.", "probability": 0.9384765625}, {"start": 794.53, "end": 795.01, "word": " The", "probability": 0.57861328125}, {"start": 795.01, "end": 795.23, "word": " other", "probability": 0.8837890625}, {"start": 795.23, "end": 795.47, "word": " one,", "probability": 0.900390625}, {"start": 795.57, "end": 795.77, "word": " sum", "probability": 0.8984375}, {"start": 795.77, "end": 795.95, "word": " of", "probability": 0.9560546875}, {"start": 795.95, "end": 796.15, "word": " y", "probability": 0.98486328125}, {"start": 796.15, "end": 796.51, "word": " squared", "probability": 0.8720703125}, {"start": 796.51, "end": 796.91, "word": " minus", "probability": 0.98291015625}, {"start": 796.91, "end": 797.27, "word": " ny", "probability": 0.60546875}, {"start": 797.27, "end": 797.95, "word": " y", "probability": 0.333251953125}, {"start": 797.95, "end": 798.19, "word": " squared.", "probability": 0.818359375}, {"start": 799.31, "end": 799.83, "word": " So", "probability": 0.9541015625}, {"start": 799.83, "end": 800.03, "word": " the", "probability": 0.6904296875}, {"start": 800.03, "end": 800.23, "word": " first", "probability": 0.82275390625}, {"start": 800.23, "end": 800.49, "word": " thing", "probability": 0.441650390625}, {"start": 800.49, "end": 800.63, "word": " we", "probability": 0.9296875}, {"start": 800.63, "end": 800.79, "word": " have", "probability": 0.947265625}, {"start": 800.79, "end": 800.91, "word": " to", "probability": 0.96923828125}, {"start": 800.91, "end": 801.19, "word": " do", "probability": 0.96142578125}, {"start": 801.19, "end": 801.53, "word": " is", "probability": 0.89404296875}, {"start": 801.53, "end": 801.71, "word": " to", "probability": 0.8955078125}, {"start": 801.71, "end": 801.97, "word": " find", "probability": 0.89208984375}, {"start": 801.97, "end": 802.23, "word": " the", "probability": 0.9228515625}, {"start": 802.23, "end": 802.49, "word": " mean", "probability": 0.95654296875}, {"start": 802.49, "end": 802.83, "word": " for", "probability": 0.94189453125}, {"start": 802.83, "end": 803.15, "word": " each", "probability": 0.94921875}, {"start": 803.15, "end": 803.75, "word": " x", "probability": 0.8427734375}, {"start": 803.75, "end": 803.91, "word": " and", "probability": 0.94970703125}, {"start": 803.91, "end": 804.21, "word": " y.", "probability": 0.99658203125}, {"start": 808.23, "end": 808.75, "word": " So", "probability": 0.462158203125}, {"start": 808.75, "end": 809.01, "word": " first", "probability": 0.76123046875}, {"start": 809.01, "end": 809.33, "word": " step,", "probability": 0.91455078125}, {"start": 809.45, "end": 809.85, "word": " compute", "probability": 0.90966796875}, {"start": 809.85, "end": 811.35, "word": " x", "probability": 0.9794921875}, {"start": 811.35, "end": 811.71, "word": " bar", "probability": 0.91650390625}, {"start": 811.71, "end": 813.27, "word": " and", "probability": 0.8876953125}, {"start": 813.27, "end": 813.43, "word": " y", "probability": 0.99853515625}, {"start": 813.43, "end": 813.65, "word": " bar.", "probability": 0.94677734375}], "temperature": 1.0}, {"id": 33, "seek": 83995, "start": 815.39, "end": 839.95, "text": " Next, if you look here, we have x and y, x times y. So we need to compute the product of x times y. So just for example, suppose x is 10, y is 5. So x times y is 50. In addition to that, you have to compute", "tokens": [3087, 11, 498, 291, 574, 510, 11, 321, 362, 2031, 293, 288, 11, 2031, 1413, 288, 13, 407, 321, 643, 281, 14722, 264, 1674, 295, 2031, 1413, 288, 13, 407, 445, 337, 1365, 11, 7297, 2031, 307, 1266, 11, 288, 307, 1025, 13, 407, 2031, 1413, 288, 307, 2625, 13, 682, 4500, 281, 300, 11, 291, 362, 281, 14722], "avg_logprob": -0.13971354737877845, "compression_ratio": 1.4577464788732395, "no_speech_prob": 0.0, "words": [{"start": 815.39, "end": 815.91, "word": " Next,", "probability": 0.87109375}, {"start": 817.03, "end": 817.21, "word": " if", "probability": 0.93310546875}, {"start": 817.21, "end": 817.23, "word": " you", "probability": 0.95947265625}, {"start": 817.23, "end": 817.45, "word": " look", "probability": 0.96875}, {"start": 817.45, "end": 817.71, "word": " here,", "probability": 0.86328125}, {"start": 817.79, "end": 817.91, "word": " we", "probability": 0.955078125}, {"start": 817.91, "end": 818.33, "word": " have", "probability": 0.9501953125}, {"start": 818.33, "end": 818.87, "word": " x", "probability": 0.69287109375}, {"start": 818.87, "end": 819.05, "word": " and", "probability": 0.94677734375}, {"start": 819.05, "end": 819.39, "word": " y,", "probability": 0.99609375}, {"start": 820.35, "end": 820.57, "word": " x", "probability": 0.9892578125}, {"start": 820.57, "end": 820.97, "word": " times", "probability": 0.89404296875}, {"start": 820.97, "end": 821.29, "word": " y.", "probability": 0.98291015625}, {"start": 821.33, "end": 821.55, "word": " So", "probability": 0.9326171875}, {"start": 821.55, "end": 821.69, "word": " we", "probability": 0.591796875}, {"start": 821.69, "end": 821.85, "word": " need", "probability": 0.904296875}, {"start": 821.85, "end": 821.97, "word": " to", "probability": 0.958984375}, {"start": 821.97, "end": 822.25, "word": " compute", "probability": 0.441650390625}, {"start": 822.25, "end": 822.63, "word": " the", "probability": 0.89306640625}, {"start": 822.63, "end": 822.99, "word": " product", "probability": 0.95751953125}, {"start": 822.99, "end": 825.37, "word": " of", "probability": 0.90625}, {"start": 825.37, "end": 825.65, "word": " x", "probability": 0.99169921875}, {"start": 825.65, "end": 825.89, "word": " times", "probability": 0.92919921875}, {"start": 825.89, "end": 826.17, "word": " y.", "probability": 0.98828125}, {"start": 828.03, "end": 828.55, "word": " So", "probability": 0.9482421875}, {"start": 828.55, "end": 828.87, "word": " just", "probability": 0.84521484375}, {"start": 828.87, "end": 829.11, "word": " for", "probability": 0.7216796875}, {"start": 829.11, "end": 829.45, "word": " example,", "probability": 0.9755859375}, {"start": 829.57, "end": 829.83, "word": " suppose", "probability": 0.95263671875}, {"start": 829.83, "end": 830.79, "word": " x", "probability": 0.9638671875}, {"start": 830.79, "end": 830.95, "word": " is", "probability": 0.9453125}, {"start": 830.95, "end": 831.17, "word": " 10,", "probability": 0.771484375}, {"start": 831.25, "end": 831.39, "word": " y", "probability": 0.98828125}, {"start": 831.39, "end": 831.61, "word": " is", "probability": 0.95361328125}, {"start": 831.61, "end": 832.05, "word": " 5.", "probability": 0.9169921875}, {"start": 832.71, "end": 833.01, "word": " So", "probability": 0.95068359375}, {"start": 833.01, "end": 833.27, "word": " x", "probability": 0.94677734375}, {"start": 833.27, "end": 833.53, "word": " times", "probability": 0.92919921875}, {"start": 833.53, "end": 833.87, "word": " y", "probability": 0.98974609375}, {"start": 833.87, "end": 834.53, "word": " is", "probability": 0.94677734375}, {"start": 834.53, "end": 834.97, "word": " 50.", "probability": 0.68359375}, {"start": 837.81, "end": 838.33, "word": " In", "probability": 0.9287109375}, {"start": 838.33, "end": 838.65, "word": " addition", "probability": 0.9541015625}, {"start": 838.65, "end": 838.85, "word": " to", "probability": 0.966796875}, {"start": 838.85, "end": 839.05, "word": " that,", "probability": 0.939453125}, {"start": 839.09, "end": 839.17, "word": " you", "probability": 0.9365234375}, {"start": 839.17, "end": 839.35, "word": " have", "probability": 0.947265625}, {"start": 839.35, "end": 839.53, "word": " to", "probability": 0.9697265625}, {"start": 839.53, "end": 839.95, "word": " compute", "probability": 0.82568359375}], "temperature": 1.0}, {"id": 34, "seek": 87325, "start": 846.79, "end": 873.25, "text": " 100 x squared and y squared. It's like 125. Do the same calculations for the rest of the data you have. We have other data here, so we have to compute the same for the others. Then finally, just add xy, x squared, y squared.", "tokens": [2319, 2031, 8889, 293, 288, 8889, 13, 467, 311, 411, 25276, 13, 1144, 264, 912, 20448, 337, 264, 1472, 295, 264, 1412, 291, 362, 13, 492, 362, 661, 1412, 510, 11, 370, 321, 362, 281, 14722, 264, 912, 337, 264, 2357, 13, 1396, 2721, 11, 445, 909, 2031, 88, 11, 2031, 8889, 11, 288, 8889, 13], "avg_logprob": -0.2443804761819672, "compression_ratio": 1.5202702702702702, "no_speech_prob": 0.0, "words": [{"start": 846.79, "end": 847.27, "word": " 100", "probability": 0.292236328125}, {"start": 847.27, "end": 848.01, "word": " x", "probability": 0.328125}, {"start": 848.01, "end": 848.51, "word": " squared", "probability": 0.7763671875}, {"start": 848.51, "end": 849.87, "word": " and", "probability": 0.61083984375}, {"start": 849.87, "end": 850.09, "word": " y", "probability": 0.8505859375}, {"start": 850.09, "end": 850.31, "word": " squared.", "probability": 0.84033203125}, {"start": 850.49, "end": 850.55, "word": " It's", "probability": 0.6090087890625}, {"start": 850.55, "end": 850.63, "word": " like", "probability": 0.82470703125}, {"start": 850.63, "end": 852.47, "word": " 125.", "probability": 0.62353515625}, {"start": 854.81, "end": 855.45, "word": " Do", "probability": 0.93017578125}, {"start": 855.45, "end": 855.65, "word": " the", "probability": 0.91943359375}, {"start": 855.65, "end": 855.83, "word": " same", "probability": 0.8837890625}, {"start": 855.83, "end": 856.41, "word": " calculations", "probability": 0.9248046875}, {"start": 856.41, "end": 857.13, "word": " for", "probability": 0.95068359375}, {"start": 857.13, "end": 857.43, "word": " the", "probability": 0.916015625}, {"start": 857.43, "end": 858.09, "word": " rest", "probability": 0.92041015625}, {"start": 858.09, "end": 858.47, "word": " of", "probability": 0.96630859375}, {"start": 858.47, "end": 858.63, "word": " the", "probability": 0.9169921875}, {"start": 858.63, "end": 858.87, "word": " data", "probability": 0.93994140625}, {"start": 858.87, "end": 859.05, "word": " you", "probability": 0.93798828125}, {"start": 859.05, "end": 859.31, "word": " have.", "probability": 0.943359375}, {"start": 859.57, "end": 859.73, "word": " We", "probability": 0.89794921875}, {"start": 859.73, "end": 859.91, "word": " have", "probability": 0.94677734375}, {"start": 859.91, "end": 860.43, "word": " other", "probability": 0.86572265625}, {"start": 860.43, "end": 860.73, "word": " data", "probability": 0.93896484375}, {"start": 860.73, "end": 861.09, "word": " here,", "probability": 0.84716796875}, {"start": 861.59, "end": 861.87, "word": " so", "probability": 0.943359375}, {"start": 861.87, "end": 862.01, "word": " we", "probability": 0.90087890625}, {"start": 862.01, "end": 862.15, "word": " have", "probability": 0.9443359375}, {"start": 862.15, "end": 862.29, "word": " to", "probability": 0.9609375}, {"start": 862.29, "end": 862.73, "word": " compute", "probability": 0.8720703125}, {"start": 862.73, "end": 864.23, "word": " the", "probability": 0.88720703125}, {"start": 864.23, "end": 864.59, "word": " same", "probability": 0.87548828125}, {"start": 864.59, "end": 864.97, "word": " for", "probability": 0.91943359375}, {"start": 864.97, "end": 865.13, "word": " the", "probability": 0.52978515625}, {"start": 865.13, "end": 865.41, "word": " others.", "probability": 0.80859375}, {"start": 868.47, "end": 869.11, "word": " Then", "probability": 0.65771484375}, {"start": 869.11, "end": 869.51, "word": " finally,", "probability": 0.724609375}, {"start": 870.17, "end": 870.51, "word": " just", "probability": 0.8681640625}, {"start": 870.51, "end": 871.03, "word": " add", "probability": 0.8994140625}, {"start": 871.03, "end": 871.77, "word": " xy,", "probability": 0.84765625}, {"start": 872.03, "end": 872.27, "word": " x", "probability": 0.99560546875}, {"start": 872.27, "end": 872.57, "word": " squared,", "probability": 0.8134765625}, {"start": 872.69, "end": 872.89, "word": " y", "probability": 0.98828125}, {"start": 872.89, "end": 873.25, "word": " squared.", "probability": 0.83740234375}], "temperature": 1.0}, {"id": 35, "seek": 90037, "start": 875.91, "end": 900.37, "text": " The values you have here in this formula, in order to compute the coefficient. Now, this value ranges between minus one and plus one.", "tokens": [440, 4190, 291, 362, 510, 294, 341, 8513, 11, 294, 1668, 281, 14722, 264, 17619, 13, 823, 11, 341, 2158, 22526, 1296, 3175, 472, 293, 1804, 472, 13], "avg_logprob": -0.2280441851451479, "compression_ratio": 1.2523364485981308, "no_speech_prob": 0.0, "words": [{"start": 875.91, "end": 876.21, "word": " The", "probability": 0.607421875}, {"start": 876.21, "end": 876.61, "word": " values", "probability": 0.92919921875}, {"start": 876.61, "end": 876.81, "word": " you", "probability": 0.935546875}, {"start": 876.81, "end": 877.09, "word": " have", "probability": 0.9443359375}, {"start": 877.09, "end": 877.47, "word": " here", "probability": 0.84375}, {"start": 877.47, "end": 879.19, "word": " in", "probability": 0.6728515625}, {"start": 879.19, "end": 879.51, "word": " this", "probability": 0.927734375}, {"start": 879.51, "end": 880.07, "word": " formula,", "probability": 0.7119140625}, {"start": 880.47, "end": 880.65, "word": " in", "probability": 0.9287109375}, {"start": 880.65, "end": 880.83, "word": " order", "probability": 0.92236328125}, {"start": 880.83, "end": 882.05, "word": " to", "probability": 0.97216796875}, {"start": 882.05, "end": 882.79, "word": " compute", "probability": 0.90234375}, {"start": 882.79, "end": 883.99, "word": " the", "probability": 0.861328125}, {"start": 883.99, "end": 884.83, "word": " coefficient.", "probability": 0.86328125}, {"start": 894.25, "end": 895.05, "word": " Now,", "probability": 0.6123046875}, {"start": 895.99, "end": 896.45, "word": " this", "probability": 0.90771484375}, {"start": 896.45, "end": 896.93, "word": " value", "probability": 0.97802734375}, {"start": 896.93, "end": 898.51, "word": " ranges", "probability": 0.83056640625}, {"start": 898.51, "end": 898.97, "word": " between", "probability": 0.87451171875}, {"start": 898.97, "end": 899.33, "word": " minus", "probability": 0.7685546875}, {"start": 899.33, "end": 899.57, "word": " one", "probability": 0.489501953125}, {"start": 899.57, "end": 899.75, "word": " and", "probability": 0.90869140625}, {"start": 899.75, "end": 900.07, "word": " plus", "probability": 0.92236328125}, {"start": 900.07, "end": 900.37, "word": " one.", "probability": 0.8486328125}], "temperature": 1.0}, {"id": 36, "seek": 93512, "start": 906.52, "end": 935.12, "text": " So it's between minus one and plus one. That means it's never smaller than minus one or greater than one. It's between minus one and plus one. Make sense? I mean if your value is suppose you did mistake for any of these computations and R might be", "tokens": [407, 309, 311, 1296, 3175, 472, 293, 1804, 472, 13, 663, 1355, 309, 311, 1128, 4356, 813, 3175, 472, 420, 5044, 813, 472, 13, 467, 311, 1296, 3175, 472, 293, 1804, 472, 13, 4387, 2020, 30, 286, 914, 498, 428, 2158, 307, 7297, 291, 630, 6146, 337, 604, 295, 613, 2807, 763, 293, 497, 1062, 312], "avg_logprob": -0.18009867793635317, "compression_ratio": 1.631578947368421, "no_speech_prob": 0.0, "words": [{"start": 906.52, "end": 906.82, "word": " So", "probability": 0.75390625}, {"start": 906.82, "end": 907.04, "word": " it's", "probability": 0.88720703125}, {"start": 907.04, "end": 907.48, "word": " between", "probability": 0.7998046875}, {"start": 907.48, "end": 907.76, "word": " minus", "probability": 0.798828125}, {"start": 907.76, "end": 907.94, "word": " one", "probability": 0.6171875}, {"start": 907.94, "end": 908.08, "word": " and", "probability": 0.9013671875}, {"start": 908.08, "end": 908.36, "word": " plus", "probability": 0.96533203125}, {"start": 908.36, "end": 908.62, "word": " one.", "probability": 0.92919921875}, {"start": 909.74, "end": 910.38, "word": " That", "probability": 0.85546875}, {"start": 910.38, "end": 910.8, "word": " means", "probability": 0.94189453125}, {"start": 910.8, "end": 912.3, "word": " it's", "probability": 0.663818359375}, {"start": 912.3, "end": 912.66, "word": " never", "probability": 0.87353515625}, {"start": 912.66, "end": 915.84, "word": " smaller", "probability": 0.83935546875}, {"start": 915.84, "end": 916.1, "word": " than", "probability": 0.939453125}, {"start": 916.1, "end": 916.4, "word": " minus", "probability": 0.97802734375}, {"start": 916.4, "end": 916.74, "word": " one", "probability": 0.9150390625}, {"start": 916.74, "end": 918.34, "word": " or", "probability": 0.78271484375}, {"start": 918.34, "end": 918.7, "word": " greater", "probability": 0.90185546875}, {"start": 918.7, "end": 919.02, "word": " than", "probability": 0.94580078125}, {"start": 919.02, "end": 919.2, "word": " one.", "probability": 0.92578125}, {"start": 919.6, "end": 919.84, "word": " It's", "probability": 0.97509765625}, {"start": 919.84, "end": 920.2, "word": " between", "probability": 0.8876953125}, {"start": 920.2, "end": 920.52, "word": " minus", "probability": 0.98193359375}, {"start": 920.52, "end": 920.74, "word": " one", "probability": 0.91796875}, {"start": 920.74, "end": 920.9, "word": " and", "probability": 0.93701171875}, {"start": 920.9, "end": 921.2, "word": " plus", "probability": 0.95947265625}, {"start": 921.2, "end": 921.48, "word": " one.", "probability": 0.92041015625}, {"start": 924.36, "end": 925.0, "word": " Make", "probability": 0.7421875}, {"start": 925.0, "end": 925.3, "word": " sense?", "probability": 0.83544921875}, {"start": 925.94, "end": 926.08, "word": " I", "probability": 0.91064453125}, {"start": 926.08, "end": 926.22, "word": " mean", "probability": 0.9716796875}, {"start": 926.22, "end": 926.36, "word": " if", "probability": 0.533203125}, {"start": 926.36, "end": 926.56, "word": " your", "probability": 0.77978515625}, {"start": 926.56, "end": 926.94, "word": " value", "probability": 0.96533203125}, {"start": 926.94, "end": 927.26, "word": " is", "probability": 0.92041015625}, {"start": 927.26, "end": 927.92, "word": " suppose", "probability": 0.52001953125}, {"start": 927.92, "end": 928.3, "word": " you", "probability": 0.9296875}, {"start": 928.3, "end": 929.32, "word": " did", "probability": 0.849609375}, {"start": 929.32, "end": 930.02, "word": " mistake", "probability": 0.70947265625}, {"start": 930.02, "end": 930.32, "word": " for", "probability": 0.662109375}, {"start": 930.32, "end": 932.1, "word": " any", "probability": 0.837890625}, {"start": 932.1, "end": 932.58, "word": " of", "probability": 0.8798828125}, {"start": 932.58, "end": 932.76, "word": " these", "probability": 0.82275390625}, {"start": 932.76, "end": 933.44, "word": " computations", "probability": 0.953369140625}, {"start": 933.44, "end": 934.3, "word": " and", "probability": 0.64697265625}, {"start": 934.3, "end": 934.52, "word": " R", "probability": 0.5}, {"start": 934.52, "end": 934.82, "word": " might", "probability": 0.90771484375}, {"start": 934.82, "end": 935.12, "word": " be", "probability": 0.9482421875}], "temperature": 1.0}, {"id": 37, "seek": 96433, "start": 935.77, "end": 964.33, "text": " 1.15, 115. That means there is an error. Or for example, if R is negative 1.5, that means there is a mistake. So you have to find or figure out what is that mistake. So that's simple calculations. Usually in the exam, we will give the formula for the correlation coefficient, as we mentioned before. In addition to that, we will give the summation.", "tokens": [502, 13, 5211, 11, 39436, 13, 663, 1355, 456, 307, 364, 6713, 13, 1610, 337, 1365, 11, 498, 497, 307, 3671, 502, 13, 20, 11, 300, 1355, 456, 307, 257, 6146, 13, 407, 291, 362, 281, 915, 420, 2573, 484, 437, 307, 300, 6146, 13, 407, 300, 311, 2199, 20448, 13, 11419, 294, 264, 1139, 11, 321, 486, 976, 264, 8513, 337, 264, 20009, 17619, 11, 382, 321, 2835, 949, 13, 682, 4500, 281, 300, 11, 321, 486, 976, 264, 28811, 13], "avg_logprob": -0.21762048049145435, "compression_ratio": 1.6540284360189574, "no_speech_prob": 0.0, "words": [{"start": 935.77, "end": 936.21, "word": " 1", "probability": 0.38720703125}, {"start": 936.21, "end": 938.71, "word": ".15,", "probability": 0.860595703125}, {"start": 938.87, "end": 939.49, "word": " 115.", "probability": 0.5439453125}, {"start": 939.97, "end": 940.53, "word": " That", "probability": 0.75244140625}, {"start": 940.53, "end": 941.05, "word": " means", "probability": 0.7783203125}, {"start": 941.05, "end": 941.23, "word": " there", "probability": 0.869140625}, {"start": 941.23, "end": 941.35, "word": " is", "probability": 0.80517578125}, {"start": 941.35, "end": 941.49, "word": " an", "probability": 0.919921875}, {"start": 941.49, "end": 941.71, "word": " error.", "probability": 0.87548828125}, {"start": 942.27, "end": 942.59, "word": " Or", "probability": 0.861328125}, {"start": 942.59, "end": 942.89, "word": " for", "probability": 0.60791015625}, {"start": 942.89, "end": 943.21, "word": " example,", "probability": 0.9736328125}, {"start": 943.33, "end": 943.45, "word": " if", "probability": 0.92431640625}, {"start": 943.45, "end": 943.57, "word": " R", "probability": 0.52880859375}, {"start": 943.57, "end": 943.73, "word": " is", "probability": 0.9296875}, {"start": 943.73, "end": 944.05, "word": " negative", "probability": 0.80224609375}, {"start": 944.05, "end": 944.37, "word": " 1", "probability": 0.90185546875}, {"start": 944.37, "end": 944.93, "word": ".5,", "probability": 0.9921875}, {"start": 945.29, "end": 945.57, "word": " that", "probability": 0.92138671875}, {"start": 945.57, "end": 945.87, "word": " means", "probability": 0.9326171875}, {"start": 945.87, "end": 946.67, "word": " there", "probability": 0.86181640625}, {"start": 946.67, "end": 946.79, "word": " is", "probability": 0.92138671875}, {"start": 946.79, "end": 946.97, "word": " a", "probability": 0.99169921875}, {"start": 946.97, "end": 947.39, "word": " mistake.", "probability": 0.96044921875}, {"start": 947.95, "end": 948.41, "word": " So", "probability": 0.95166015625}, {"start": 948.41, "end": 948.51, "word": " you", "probability": 0.77783203125}, {"start": 948.51, "end": 948.65, "word": " have", "probability": 0.94482421875}, {"start": 948.65, "end": 948.79, "word": " to", "probability": 0.9677734375}, {"start": 948.79, "end": 949.07, "word": " find", "probability": 0.87548828125}, {"start": 949.07, "end": 949.35, "word": " or", "probability": 0.419921875}, {"start": 949.35, "end": 949.61, "word": " figure", "probability": 0.62353515625}, {"start": 949.61, "end": 950.01, "word": " out", "probability": 0.8681640625}, {"start": 950.01, "end": 951.49, "word": " what", "probability": 0.638671875}, {"start": 951.49, "end": 951.65, "word": " is", "probability": 0.9306640625}, {"start": 951.65, "end": 951.93, "word": " that", "probability": 0.73095703125}, {"start": 951.93, "end": 953.87, "word": " mistake.", "probability": 0.798828125}, {"start": 954.55, "end": 955.01, "word": " So", "probability": 0.9560546875}, {"start": 955.01, "end": 955.41, "word": " that's", "probability": 0.6439208984375}, {"start": 955.41, "end": 955.67, "word": " simple", "probability": 0.896484375}, {"start": 955.67, "end": 956.25, "word": " calculations.", "probability": 0.89794921875}, {"start": 957.49, "end": 957.83, "word": " Usually", "probability": 0.841796875}, {"start": 957.83, "end": 958.11, "word": " in", "probability": 0.693359375}, {"start": 958.11, "end": 958.23, "word": " the", "probability": 0.89013671875}, {"start": 958.23, "end": 958.55, "word": " exam,", "probability": 0.92822265625}, {"start": 958.61, "end": 958.73, "word": " we", "probability": 0.837890625}, {"start": 958.73, "end": 958.91, "word": " will", "probability": 0.69921875}, {"start": 958.91, "end": 959.09, "word": " give", "probability": 0.80810546875}, {"start": 959.09, "end": 959.27, "word": " the", "probability": 0.9052734375}, {"start": 959.27, "end": 959.63, "word": " formula", "probability": 0.9072265625}, {"start": 959.63, "end": 959.95, "word": " for", "probability": 0.94580078125}, {"start": 959.95, "end": 960.13, "word": " the", "probability": 0.89306640625}, {"start": 960.13, "end": 960.47, "word": " correlation", "probability": 0.943359375}, {"start": 960.47, "end": 960.93, "word": " coefficient,", "probability": 0.95751953125}, {"start": 961.17, "end": 961.23, "word": " as", "probability": 0.9609375}, {"start": 961.23, "end": 961.35, "word": " we", "probability": 0.94189453125}, {"start": 961.35, "end": 961.59, "word": " mentioned", "probability": 0.83740234375}, {"start": 961.59, "end": 961.97, "word": " before.", "probability": 0.85400390625}, {"start": 962.45, "end": 962.67, "word": " In", "probability": 0.9599609375}, {"start": 962.67, "end": 962.93, "word": " addition", "probability": 0.96240234375}, {"start": 962.93, "end": 963.17, "word": " to", "probability": 0.96484375}, {"start": 963.17, "end": 963.33, "word": " that,", "probability": 0.93505859375}, {"start": 963.37, "end": 963.47, "word": " we", "probability": 0.95068359375}, {"start": 963.47, "end": 963.59, "word": " will", "probability": 0.7001953125}, {"start": 963.59, "end": 963.73, "word": " give", "probability": 0.87109375}, {"start": 963.73, "end": 963.89, "word": " the", "probability": 0.9033203125}, {"start": 963.89, "end": 964.33, "word": " summation.", "probability": 0.9130859375}], "temperature": 1.0}, {"id": 38, "seek": 99484, "start": 967.78, "end": 994.84, "text": " The sum of xy is given, sum x squared and sum y squared. Also sum of x and sum of y, in order to determine the means of x and y. For example, suppose I give sum of xi and i goes from 1 to 10 is 700, for example. You have to know that the sample size is 10, so x bar.", "tokens": [440, 2408, 295, 2031, 88, 307, 2212, 11, 2408, 2031, 8889, 293, 2408, 288, 8889, 13, 2743, 2408, 295, 2031, 293, 2408, 295, 288, 11, 294, 1668, 281, 6997, 264, 1355, 295, 2031, 293, 288, 13, 1171, 1365, 11, 7297, 286, 976, 2408, 295, 36800, 293, 741, 1709, 490, 502, 281, 1266, 307, 15204, 11, 337, 1365, 13, 509, 362, 281, 458, 300, 264, 6889, 2744, 307, 1266, 11, 370, 2031, 2159, 13], "avg_logprob": -0.25168919925754135, "compression_ratio": 1.5798816568047338, "no_speech_prob": 0.0, "words": [{"start": 967.7800000000001, "end": 968.34, "word": " The", "probability": 0.075439453125}, {"start": 968.34, "end": 968.56, "word": " sum", "probability": 0.904296875}, {"start": 968.56, "end": 968.7, "word": " of", "probability": 0.94384765625}, {"start": 968.7, "end": 968.98, "word": " xy", "probability": 0.615478515625}, {"start": 968.98, "end": 969.1, "word": " is", "probability": 0.86962890625}, {"start": 969.1, "end": 969.38, "word": " given,", "probability": 0.859375}, {"start": 970.58, "end": 970.84, "word": " sum", "probability": 0.6484375}, {"start": 970.84, "end": 971.08, "word": " x", "probability": 0.9033203125}, {"start": 971.08, "end": 971.68, "word": " squared", "probability": 0.72119140625}, {"start": 971.68, "end": 971.92, "word": " and", "probability": 0.6572265625}, {"start": 971.92, "end": 972.24, "word": " sum", "probability": 0.77001953125}, {"start": 972.24, "end": 972.72, "word": " y", "probability": 0.335205078125}, {"start": 972.72, "end": 973.38, "word": " squared.", "probability": 0.80908203125}, {"start": 974.04, "end": 974.5, "word": " Also", "probability": 0.84619140625}, {"start": 974.5, "end": 974.76, "word": " sum", "probability": 0.62744140625}, {"start": 974.76, "end": 974.92, "word": " of", "probability": 0.96240234375}, {"start": 974.92, "end": 975.24, "word": " x", "probability": 0.9931640625}, {"start": 975.24, "end": 976.76, "word": " and", "probability": 0.81640625}, {"start": 976.76, "end": 977.08, "word": " sum", "probability": 0.9248046875}, {"start": 977.08, "end": 977.24, "word": " of", "probability": 0.962890625}, {"start": 977.24, "end": 977.48, "word": " y,", "probability": 0.9970703125}, {"start": 977.7, "end": 977.92, "word": " in", "probability": 0.916015625}, {"start": 977.92, "end": 978.08, "word": " order", "probability": 0.9228515625}, {"start": 978.08, "end": 978.32, "word": " to", "probability": 0.96484375}, {"start": 978.32, "end": 978.66, "word": " determine", "probability": 0.91357421875}, {"start": 978.66, "end": 978.9, "word": " the", "probability": 0.908203125}, {"start": 978.9, "end": 979.08, "word": " means", "probability": 0.73486328125}, {"start": 979.08, "end": 979.22, "word": " of", "probability": 0.93701171875}, {"start": 979.22, "end": 979.36, "word": " x", "probability": 0.9619140625}, {"start": 979.36, "end": 979.5, "word": " and", "probability": 0.765625}, {"start": 979.5, "end": 979.72, "word": " y.", "probability": 0.99609375}, {"start": 981.52, "end": 981.98, "word": " For", "probability": 0.96044921875}, {"start": 981.98, "end": 982.32, "word": " example,", "probability": 0.86474609375}, {"start": 982.52, "end": 983.46, "word": " suppose", "probability": 0.90185546875}, {"start": 983.46, "end": 984.26, "word": " I", "probability": 0.951171875}, {"start": 984.26, "end": 984.58, "word": " give", "probability": 0.6865234375}, {"start": 984.58, "end": 984.9, "word": " sum", "probability": 0.77099609375}, {"start": 984.9, "end": 985.04, "word": " of", "probability": 0.947265625}, {"start": 985.04, "end": 985.32, "word": " xi", "probability": 0.73046875}, {"start": 985.32, "end": 985.62, "word": " and", "probability": 0.677734375}, {"start": 985.62, "end": 985.82, "word": " i", "probability": 0.74755859375}, {"start": 985.82, "end": 986.1, "word": " goes", "probability": 0.90185546875}, {"start": 986.1, "end": 986.32, "word": " from", "probability": 0.88427734375}, {"start": 986.32, "end": 986.5, "word": " 1", "probability": 0.69775390625}, {"start": 986.5, "end": 986.6, "word": " to", "probability": 0.9755859375}, {"start": 986.6, "end": 986.86, "word": " 10", "probability": 0.96728515625}, {"start": 986.86, "end": 987.26, "word": " is", "probability": 0.75927734375}, {"start": 987.26, "end": 987.8, "word": " 700,", "probability": 0.96728515625}, {"start": 988.08, "end": 988.22, "word": " for", "probability": 0.9501953125}, {"start": 988.22, "end": 988.52, "word": " example.", "probability": 0.9765625}, {"start": 990.42, "end": 990.6, "word": " You", "probability": 0.564453125}, {"start": 990.6, "end": 990.74, "word": " have", "probability": 0.9404296875}, {"start": 990.74, "end": 990.86, "word": " to", "probability": 0.97119140625}, {"start": 990.86, "end": 990.96, "word": " know", "probability": 0.87890625}, {"start": 990.96, "end": 991.26, "word": " that", "probability": 0.919921875}, {"start": 991.26, "end": 991.76, "word": " the", "probability": 0.87109375}, {"start": 991.76, "end": 992.06, "word": " sample", "probability": 0.7470703125}, {"start": 992.06, "end": 992.38, "word": " size", "probability": 0.86181640625}, {"start": 992.38, "end": 992.58, "word": " is", "probability": 0.9384765625}, {"start": 992.58, "end": 992.78, "word": " 10,", "probability": 0.89892578125}, {"start": 993.82, "end": 994.14, "word": " so", "probability": 0.94482421875}, {"start": 994.14, "end": 994.5, "word": " x", "probability": 0.9384765625}, {"start": 994.5, "end": 994.84, "word": " bar.", "probability": 0.884765625}], "temperature": 1.0}, {"id": 39, "seek": 102561, "start": 997.02, "end": 1025.62, "text": " is 700 divided by 10, so it's 7. Then use the curve to compute the coefficient of correlation. Questions? I think straightforward, maybe the easiest topic in this book is to compute the coefficient of correlation or correlation coefficient.", "tokens": [307, 15204, 6666, 538, 1266, 11, 370, 309, 311, 1614, 13, 1396, 764, 264, 7605, 281, 14722, 264, 17619, 295, 20009, 13, 27738, 30, 286, 519, 15325, 11, 1310, 264, 12889, 4829, 294, 341, 1446, 307, 281, 14722, 264, 17619, 295, 20009, 420, 20009, 17619, 13], "avg_logprob": -0.275764617514103, "compression_ratio": 1.6394557823129252, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 997.02, "end": 997.38, "word": " is", "probability": 0.42626953125}, {"start": 997.38, "end": 997.88, "word": " 700", "probability": 0.79931640625}, {"start": 997.88, "end": 998.24, "word": " divided", "probability": 0.52587890625}, {"start": 998.24, "end": 998.46, "word": " by", "probability": 0.96484375}, {"start": 998.46, "end": 998.72, "word": " 10,", "probability": 0.888671875}, {"start": 999.32, "end": 999.68, "word": " so", "probability": 0.496337890625}, {"start": 999.68, "end": 999.9, "word": " it's", "probability": 0.84326171875}, {"start": 999.9, "end": 1000.16, "word": " 7.", "probability": 0.59423828125}, {"start": 1002.4, "end": 1002.72, "word": " Then", "probability": 0.57275390625}, {"start": 1002.72, "end": 1003.06, "word": " use", "probability": 0.74365234375}, {"start": 1003.06, "end": 1003.46, "word": " the", "probability": 0.83544921875}, {"start": 1003.46, "end": 1003.86, "word": " curve", "probability": 0.39501953125}, {"start": 1003.86, "end": 1004.76, "word": " to", "probability": 0.87646484375}, {"start": 1004.76, "end": 1005.24, "word": " compute", "probability": 0.91455078125}, {"start": 1005.24, "end": 1006.18, "word": " the", "probability": 0.8974609375}, {"start": 1006.18, "end": 1006.78, "word": " coefficient", "probability": 0.92529296875}, {"start": 1006.78, "end": 1007.36, "word": " of", "probability": 0.9755859375}, {"start": 1007.36, "end": 1007.9, "word": " correlation.", "probability": 0.9794921875}, {"start": 1008.98, "end": 1009.9, "word": " Questions?", "probability": 0.576171875}, {"start": 1011.4, "end": 1011.86, "word": " I", "probability": 0.9853515625}, {"start": 1011.86, "end": 1012.0, "word": " think", "probability": 0.9228515625}, {"start": 1012.0, "end": 1012.56, "word": " straightforward,", "probability": 0.5234375}, {"start": 1012.92, "end": 1013.1, "word": " maybe", "probability": 0.90576171875}, {"start": 1013.1, "end": 1013.32, "word": " the", "probability": 0.91015625}, {"start": 1013.32, "end": 1013.7, "word": " easiest", "probability": 0.88427734375}, {"start": 1013.7, "end": 1015.54, "word": " topic", "probability": 0.94482421875}, {"start": 1015.54, "end": 1015.74, "word": " in", "probability": 0.92041015625}, {"start": 1015.74, "end": 1015.9, "word": " this", "probability": 0.87255859375}, {"start": 1015.9, "end": 1016.16, "word": " book", "probability": 0.50341796875}, {"start": 1016.16, "end": 1017.58, "word": " is", "probability": 0.72607421875}, {"start": 1017.58, "end": 1017.76, "word": " to", "probability": 0.9619140625}, {"start": 1017.76, "end": 1018.1, "word": " compute", "probability": 0.916015625}, {"start": 1018.1, "end": 1019.6, "word": " the", "probability": 0.86474609375}, {"start": 1019.6, "end": 1020.58, "word": " coefficient", "probability": 0.89599609375}, {"start": 1020.58, "end": 1021.9, "word": " of", "probability": 0.92724609375}, {"start": 1021.9, "end": 1022.98, "word": " correlation", "probability": 0.951171875}, {"start": 1022.98, "end": 1023.44, "word": " or", "probability": 0.72998046875}, {"start": 1023.44, "end": 1024.16, "word": " correlation", "probability": 0.96533203125}, {"start": 1024.16, "end": 1025.62, "word": " coefficient.", "probability": 0.95654296875}], "temperature": 1.0}, {"id": 40, "seek": 105351, "start": 1027.01, "end": 1053.51, "text": " Now my question is, do you think outliers affect the correlation coefficient? We said last time outliers affect the mean, the range, the variance. Now the question is, do outliers affect the correlation?", "tokens": [823, 452, 1168, 307, 11, 360, 291, 519, 484, 23646, 3345, 264, 20009, 17619, 30, 492, 848, 1036, 565, 484, 23646, 3345, 264, 914, 11, 264, 3613, 11, 264, 21977, 13, 823, 264, 1168, 307, 11, 360, 484, 23646, 3345, 264, 20009, 30], "avg_logprob": -0.1691228716888211, "compression_ratio": 1.7, "no_speech_prob": 0.0, "words": [{"start": 1027.01, "end": 1027.29, "word": " Now", "probability": 0.6884765625}, {"start": 1027.29, "end": 1027.53, "word": " my", "probability": 0.7509765625}, {"start": 1027.53, "end": 1027.89, "word": " question", "probability": 0.91748046875}, {"start": 1027.89, "end": 1028.29, "word": " is,", "probability": 0.94970703125}, {"start": 1028.83, "end": 1029.07, "word": " do", "probability": 0.794921875}, {"start": 1029.07, "end": 1029.27, "word": " you", "probability": 0.9443359375}, {"start": 1029.27, "end": 1029.71, "word": " think", "probability": 0.91259765625}, {"start": 1029.71, "end": 1030.91, "word": " outliers", "probability": 0.88818359375}, {"start": 1030.91, "end": 1032.17, "word": " affect", "probability": 0.81787109375}, {"start": 1032.17, "end": 1032.65, "word": " the", "probability": 0.88623046875}, {"start": 1032.65, "end": 1033.09, "word": " correlation", "probability": 0.927734375}, {"start": 1033.09, "end": 1033.69, "word": " coefficient?", "probability": 0.92333984375}, {"start": 1037.01, "end": 1037.79, "word": " We", "probability": 0.68017578125}, {"start": 1037.79, "end": 1038.15, "word": " said", "probability": 0.94287109375}, {"start": 1038.15, "end": 1038.45, "word": " last", "probability": 0.8388671875}, {"start": 1038.45, "end": 1038.81, "word": " time", "probability": 0.890625}, {"start": 1038.81, "end": 1039.35, "word": " outliers", "probability": 0.822509765625}, {"start": 1039.35, "end": 1039.93, "word": " affect", "probability": 0.875}, {"start": 1039.93, "end": 1040.77, "word": " the", "probability": 0.89453125}, {"start": 1040.77, "end": 1041.11, "word": " mean,", "probability": 0.96240234375}, {"start": 1042.41, "end": 1043.21, "word": " the", "probability": 0.89111328125}, {"start": 1043.21, "end": 1043.63, "word": " range,", "probability": 0.88427734375}, {"start": 1044.75, "end": 1045.45, "word": " the", "probability": 0.86181640625}, {"start": 1045.45, "end": 1046.01, "word": " variance.", "probability": 0.92626953125}, {"start": 1046.71, "end": 1046.93, "word": " Now", "probability": 0.9306640625}, {"start": 1046.93, "end": 1047.13, "word": " the", "probability": 0.72119140625}, {"start": 1047.13, "end": 1047.45, "word": " question", "probability": 0.92236328125}, {"start": 1047.45, "end": 1048.03, "word": " is,", "probability": 0.95068359375}, {"start": 1048.17, "end": 1048.31, "word": " do", "probability": 0.939453125}, {"start": 1048.31, "end": 1048.99, "word": " outliers", "probability": 0.9541015625}, {"start": 1048.99, "end": 1051.15, "word": " affect", "probability": 0.68310546875}, {"start": 1051.15, "end": 1053.07, "word": " the", "probability": 0.908203125}, {"start": 1053.07, "end": 1053.51, "word": " correlation?", "probability": 0.90673828125}], "temperature": 1.0}, {"id": 41, "seek": 108121, "start": 1057.41, "end": 1081.21, "text": " Y. Exactly. The formula for R has X bar in it or Y bar. So it means outliers affect the correlation coefficient. So the answer is yes.", "tokens": [398, 13, 7587, 13, 440, 8513, 337, 497, 575, 1783, 2159, 294, 309, 420, 398, 2159, 13, 407, 309, 1355, 484, 23646, 3345, 264, 20009, 17619, 13, 407, 264, 1867, 307, 2086, 13], "avg_logprob": -0.2821691088816699, "compression_ratio": 1.238532110091743, "no_speech_prob": 0.0, "words": [{"start": 1057.41, "end": 1058.17, "word": " Y.", "probability": 0.1243896484375}, {"start": 1063.83, "end": 1064.59, "word": " Exactly.", "probability": 0.38525390625}, {"start": 1064.95, "end": 1065.69, "word": " The", "probability": 0.73486328125}, {"start": 1065.69, "end": 1066.25, "word": " formula", "probability": 0.9267578125}, {"start": 1066.25, "end": 1067.05, "word": " for", "probability": 0.91943359375}, {"start": 1067.05, "end": 1068.43, "word": " R", "probability": 0.873046875}, {"start": 1068.43, "end": 1069.91, "word": " has", "probability": 0.87060546875}, {"start": 1069.91, "end": 1070.15, "word": " X", "probability": 0.67431640625}, {"start": 1070.15, "end": 1070.41, "word": " bar", "probability": 0.8740234375}, {"start": 1070.41, "end": 1070.71, "word": " in", "probability": 0.91748046875}, {"start": 1070.71, "end": 1070.93, "word": " it", "probability": 0.95556640625}, {"start": 1070.93, "end": 1071.13, "word": " or", "probability": 0.68505859375}, {"start": 1071.13, "end": 1071.33, "word": " Y", "probability": 0.97802734375}, {"start": 1071.33, "end": 1071.57, "word": " bar.", "probability": 0.96044921875}, {"start": 1071.65, "end": 1071.77, "word": " So", "probability": 0.93017578125}, {"start": 1071.77, "end": 1071.91, "word": " it", "probability": 0.77783203125}, {"start": 1071.91, "end": 1072.65, "word": " means", "probability": 0.92626953125}, {"start": 1072.65, "end": 1073.63, "word": " outliers", "probability": 0.6708984375}, {"start": 1073.63, "end": 1076.67, "word": " affect", "probability": 0.72705078125}, {"start": 1076.67, "end": 1078.19, "word": " the", "probability": 0.8701171875}, {"start": 1078.19, "end": 1078.63, "word": " correlation", "probability": 0.90283203125}, {"start": 1078.63, "end": 1079.17, "word": " coefficient.", "probability": 0.9384765625}, {"start": 1080.05, "end": 1080.55, "word": " So", "probability": 0.93701171875}, {"start": 1080.55, "end": 1080.69, "word": " the", "probability": 0.8701171875}, {"start": 1080.69, "end": 1080.85, "word": " answer", "probability": 0.9560546875}, {"start": 1080.85, "end": 1081.07, "word": " is", "probability": 0.943359375}, {"start": 1081.07, "end": 1081.21, "word": " yes.", "probability": 0.9287109375}], "temperature": 1.0}, {"id": 42, "seek": 111086, "start": 1083.47, "end": 1110.87, "text": " Here we have x bar and y bar. Also, there is another formula to compute R. That formula is given by covariance between x and y. These two formulas are quite similar. I mean, by using this one, we can end with this formula. So this formula depends on this x is y.", "tokens": [1692, 321, 362, 2031, 2159, 293, 288, 2159, 13, 2743, 11, 456, 307, 1071, 8513, 281, 14722, 497, 13, 663, 8513, 307, 2212, 538, 49851, 719, 1296, 2031, 293, 288, 13, 1981, 732, 30546, 366, 1596, 2531, 13, 286, 914, 11, 538, 1228, 341, 472, 11, 321, 393, 917, 365, 341, 8513, 13, 407, 341, 8513, 5946, 322, 341, 2031, 307, 288, 13], "avg_logprob": -0.15344238420948386, "compression_ratio": 1.593939393939394, "no_speech_prob": 0.0, "words": [{"start": 1083.47, "end": 1083.73, "word": " Here", "probability": 0.74560546875}, {"start": 1083.73, "end": 1083.85, "word": " we", "probability": 0.79736328125}, {"start": 1083.85, "end": 1084.01, "word": " have", "probability": 0.9462890625}, {"start": 1084.01, "end": 1084.19, "word": " x", "probability": 0.58056640625}, {"start": 1084.19, "end": 1084.35, "word": " bar", "probability": 0.81005859375}, {"start": 1084.35, "end": 1084.55, "word": " and", "probability": 0.94189453125}, {"start": 1084.55, "end": 1084.71, "word": " y", "probability": 0.99755859375}, {"start": 1084.71, "end": 1084.97, "word": " bar.", "probability": 0.95263671875}, {"start": 1085.69, "end": 1086.09, "word": " Also,", "probability": 0.94580078125}, {"start": 1086.23, "end": 1086.27, "word": " there", "probability": 0.91064453125}, {"start": 1086.27, "end": 1086.41, "word": " is", "probability": 0.92041015625}, {"start": 1086.41, "end": 1086.75, "word": " another", "probability": 0.92138671875}, {"start": 1086.75, "end": 1087.21, "word": " formula", "probability": 0.919921875}, {"start": 1087.21, "end": 1087.43, "word": " to", "probability": 0.9638671875}, {"start": 1087.43, "end": 1087.91, "word": " compute", "probability": 0.8974609375}, {"start": 1087.91, "end": 1089.35, "word": " R.", "probability": 0.697265625}, {"start": 1089.63, "end": 1089.89, "word": " That", "probability": 0.9130859375}, {"start": 1089.89, "end": 1090.29, "word": " formula", "probability": 0.9130859375}, {"start": 1090.29, "end": 1090.69, "word": " is", "probability": 0.94873046875}, {"start": 1090.69, "end": 1090.91, "word": " given", "probability": 0.89306640625}, {"start": 1090.91, "end": 1091.29, "word": " by", "probability": 0.9697265625}, {"start": 1091.29, "end": 1092.41, "word": " covariance", "probability": 0.766845703125}, {"start": 1092.41, "end": 1092.75, "word": " between", "probability": 0.89501953125}, {"start": 1092.75, "end": 1092.95, "word": " x", "probability": 0.91064453125}, {"start": 1092.95, "end": 1093.09, "word": " and", "probability": 0.94677734375}, {"start": 1093.09, "end": 1093.37, "word": " y.", "probability": 0.9765625}, {"start": 1097.51, "end": 1098.03, "word": " These", "probability": 0.87841796875}, {"start": 1098.03, "end": 1098.27, "word": " two", "probability": 0.91845703125}, {"start": 1098.27, "end": 1098.73, "word": " formulas", "probability": 0.97265625}, {"start": 1098.73, "end": 1099.21, "word": " are", "probability": 0.94384765625}, {"start": 1099.21, "end": 1099.59, "word": " quite", "probability": 0.91015625}, {"start": 1099.59, "end": 1100.01, "word": " similar.", "probability": 0.96337890625}, {"start": 1101.27, "end": 1101.51, "word": " I", "probability": 0.79052734375}, {"start": 1101.51, "end": 1101.67, "word": " mean,", "probability": 0.96826171875}, {"start": 1101.75, "end": 1101.93, "word": " by", "probability": 0.96435546875}, {"start": 1101.93, "end": 1102.13, "word": " using", "probability": 0.9345703125}, {"start": 1102.13, "end": 1102.37, "word": " this", "probability": 0.9453125}, {"start": 1102.37, "end": 1102.57, "word": " one,", "probability": 0.93017578125}, {"start": 1102.63, "end": 1102.71, "word": " we", "probability": 0.94873046875}, {"start": 1102.71, "end": 1103.05, "word": " can", "probability": 0.94921875}, {"start": 1103.05, "end": 1104.79, "word": " end", "probability": 0.90869140625}, {"start": 1104.79, "end": 1104.97, "word": " with", "probability": 0.89404296875}, {"start": 1104.97, "end": 1105.23, "word": " this", "probability": 0.943359375}, {"start": 1105.23, "end": 1105.63, "word": " formula.", "probability": 0.90869140625}, {"start": 1105.81, "end": 1106.07, "word": " So", "probability": 0.90478515625}, {"start": 1106.07, "end": 1107.61, "word": " this", "probability": 0.78759765625}, {"start": 1107.61, "end": 1108.07, "word": " formula", "probability": 0.90966796875}, {"start": 1108.07, "end": 1108.71, "word": " depends", "probability": 0.90966796875}, {"start": 1108.71, "end": 1109.69, "word": " on", "probability": 0.9482421875}, {"start": 1109.69, "end": 1110.13, "word": " this", "probability": 0.65966796875}, {"start": 1110.13, "end": 1110.35, "word": " x", "probability": 0.908203125}, {"start": 1110.35, "end": 1110.51, "word": " is", "probability": 0.2369384765625}, {"start": 1110.51, "end": 1110.87, "word": " y.", "probability": 0.96337890625}], "temperature": 1.0}, {"id": 43, "seek": 113979, "start": 1112.67, "end": 1139.79, "text": " standard deviations of X and Y. That means outlier will affect the correlation coefficient. So in case of outliers, R could be changed. That formula is called simple correlation coefficient. On the other hand, we have population correlation coefficient.", "tokens": [3832, 31219, 763, 295, 1783, 293, 398, 13, 663, 1355, 484, 2753, 486, 3345, 264, 20009, 17619, 13, 407, 294, 1389, 295, 484, 23646, 11, 497, 727, 312, 3105, 13, 663, 8513, 307, 1219, 2199, 20009, 17619, 13, 1282, 264, 661, 1011, 11, 321, 362, 4415, 20009, 17619, 13], "avg_logprob": -0.17890625596046447, "compression_ratio": 1.6933333333333334, "no_speech_prob": 0.0, "words": [{"start": 1112.67, "end": 1113.09, "word": " standard", "probability": 0.5546875}, {"start": 1113.09, "end": 1113.47, "word": " deviations", "probability": 0.6845703125}, {"start": 1113.47, "end": 1113.63, "word": " of", "probability": 0.90234375}, {"start": 1113.63, "end": 1113.79, "word": " X", "probability": 0.462158203125}, {"start": 1113.79, "end": 1113.95, "word": " and", "probability": 0.93212890625}, {"start": 1113.95, "end": 1114.21, "word": " Y.", "probability": 0.9931640625}, {"start": 1114.33, "end": 1114.59, "word": " That", "probability": 0.904296875}, {"start": 1114.59, "end": 1114.85, "word": " means", "probability": 0.92138671875}, {"start": 1114.85, "end": 1115.45, "word": " outlier", "probability": 0.71875}, {"start": 1115.45, "end": 1116.17, "word": " will", "probability": 0.8662109375}, {"start": 1116.17, "end": 1116.75, "word": " affect", "probability": 0.83203125}, {"start": 1116.75, "end": 1119.67, "word": " the", "probability": 0.70263671875}, {"start": 1119.67, "end": 1120.17, "word": " correlation", "probability": 0.92822265625}, {"start": 1120.17, "end": 1120.67, "word": " coefficient.", "probability": 0.59619140625}, {"start": 1120.93, "end": 1121.07, "word": " So", "probability": 0.908203125}, {"start": 1121.07, "end": 1121.37, "word": " in", "probability": 0.76708984375}, {"start": 1121.37, "end": 1122.35, "word": " case", "probability": 0.7041015625}, {"start": 1122.35, "end": 1122.53, "word": " of", "probability": 0.96728515625}, {"start": 1122.53, "end": 1123.13, "word": " outliers,", "probability": 0.94580078125}, {"start": 1124.29, "end": 1124.53, "word": " R", "probability": 0.923828125}, {"start": 1124.53, "end": 1124.99, "word": " could", "probability": 0.87060546875}, {"start": 1124.99, "end": 1125.19, "word": " be", "probability": 0.95166015625}, {"start": 1125.19, "end": 1125.67, "word": " changed.", "probability": 0.91552734375}, {"start": 1131.17, "end": 1131.87, "word": " That", "probability": 0.8818359375}, {"start": 1131.87, "end": 1132.23, "word": " formula", "probability": 0.90380859375}, {"start": 1132.23, "end": 1132.49, "word": " is", "probability": 0.8544921875}, {"start": 1132.49, "end": 1132.79, "word": " called", "probability": 0.8984375}, {"start": 1132.79, "end": 1133.39, "word": " simple", "probability": 0.8203125}, {"start": 1133.39, "end": 1135.53, "word": " correlation", "probability": 0.72998046875}, {"start": 1135.53, "end": 1136.13, "word": " coefficient.", "probability": 0.94580078125}, {"start": 1137.27, "end": 1137.61, "word": " On", "probability": 0.9482421875}, {"start": 1137.61, "end": 1137.71, "word": " the", "probability": 0.92626953125}, {"start": 1137.71, "end": 1137.87, "word": " other", "probability": 0.88623046875}, {"start": 1137.87, "end": 1138.11, "word": " hand,", "probability": 0.90869140625}, {"start": 1138.15, "end": 1138.25, "word": " we", "probability": 0.9541015625}, {"start": 1138.25, "end": 1138.39, "word": " have", "probability": 0.94384765625}, {"start": 1138.39, "end": 1138.79, "word": " population", "probability": 0.9365234375}, {"start": 1138.79, "end": 1139.29, "word": " correlation", "probability": 0.92333984375}, {"start": 1139.29, "end": 1139.79, "word": " coefficient.", "probability": 0.95947265625}], "temperature": 1.0}, {"id": 44, "seek": 116976, "start": 1141.14, "end": 1169.76, "text": " If you remember last time, we used X bar as the sample mean and mu as population mean. Also, S square as sample variance and sigma square as population variance. Here, R is used as sample coefficient of correlation and rho, this Greek letter pronounced as rho. Rho is used", "tokens": [759, 291, 1604, 1036, 565, 11, 321, 1143, 1783, 2159, 382, 264, 6889, 914, 293, 2992, 382, 4415, 914, 13, 2743, 11, 318, 3732, 382, 6889, 21977, 293, 12771, 3732, 382, 4415, 21977, 13, 1692, 11, 497, 307, 1143, 382, 6889, 17619, 295, 20009, 293, 20293, 11, 341, 10281, 5063, 23155, 382, 20293, 13, 497, 1289, 307, 1143], "avg_logprob": -0.2661546751604242, "compression_ratio": 1.6058823529411765, "no_speech_prob": 0.0, "words": [{"start": 1141.14, "end": 1141.44, "word": " If", "probability": 0.734375}, {"start": 1141.44, "end": 1141.6, "word": " you", "probability": 0.958984375}, {"start": 1141.6, "end": 1141.88, "word": " remember", "probability": 0.8779296875}, {"start": 1141.88, "end": 1142.2, "word": " last", "probability": 0.6474609375}, {"start": 1142.2, "end": 1142.46, "word": " time,", "probability": 0.890625}, {"start": 1142.58, "end": 1142.64, "word": " we", "probability": 0.93603515625}, {"start": 1142.64, "end": 1143.06, "word": " used", "probability": 0.9052734375}, {"start": 1143.06, "end": 1143.98, "word": " X", "probability": 0.468994140625}, {"start": 1143.98, "end": 1144.38, "word": " bar", "probability": 0.76123046875}, {"start": 1144.38, "end": 1145.88, "word": " as", "probability": 0.6962890625}, {"start": 1145.88, "end": 1146.08, "word": " the", "probability": 0.79345703125}, {"start": 1146.08, "end": 1146.32, "word": " sample", "probability": 0.857421875}, {"start": 1146.32, "end": 1146.64, "word": " mean", "probability": 0.95849609375}, {"start": 1146.64, "end": 1148.06, "word": " and", "probability": 0.5869140625}, {"start": 1148.06, "end": 1148.66, "word": " mu", "probability": 0.4306640625}, {"start": 1148.66, "end": 1148.94, "word": " as", "probability": 0.9423828125}, {"start": 1148.94, "end": 1149.36, "word": " population", "probability": 0.63720703125}, {"start": 1149.36, "end": 1149.74, "word": " mean.", "probability": 0.95166015625}, {"start": 1150.34, "end": 1150.8, "word": " Also,", "probability": 0.89111328125}, {"start": 1151.24, "end": 1151.58, "word": " S", "probability": 0.7197265625}, {"start": 1151.58, "end": 1151.94, "word": " square", "probability": 0.379638671875}, {"start": 1151.94, "end": 1152.2, "word": " as", "probability": 0.53515625}, {"start": 1152.2, "end": 1152.84, "word": " sample", "probability": 0.6533203125}, {"start": 1152.84, "end": 1154.46, "word": " variance", "probability": 0.9228515625}, {"start": 1154.46, "end": 1155.62, "word": " and", "probability": 0.73193359375}, {"start": 1155.62, "end": 1155.94, "word": " sigma", "probability": 0.80322265625}, {"start": 1155.94, "end": 1156.24, "word": " square", "probability": 0.57177734375}, {"start": 1156.24, "end": 1156.44, "word": " as", "probability": 0.8916015625}, {"start": 1156.44, "end": 1156.82, "word": " population", "probability": 0.92333984375}, {"start": 1156.82, "end": 1157.32, "word": " variance.", "probability": 0.9345703125}, {"start": 1157.76, "end": 1158.08, "word": " Here,", "probability": 0.8603515625}, {"start": 1158.4, "end": 1158.74, "word": " R", "probability": 0.89208984375}, {"start": 1158.74, "end": 1159.76, "word": " is", "probability": 0.94580078125}, {"start": 1159.76, "end": 1160.1, "word": " used", "probability": 0.9150390625}, {"start": 1160.1, "end": 1160.64, "word": " as", "probability": 0.96484375}, {"start": 1160.64, "end": 1161.56, "word": " sample", "probability": 0.72802734375}, {"start": 1161.56, "end": 1162.28, "word": " coefficient", "probability": 0.9443359375}, {"start": 1162.28, "end": 1162.54, "word": " of", "probability": 0.8359375}, {"start": 1162.54, "end": 1162.98, "word": " correlation", "probability": 0.95849609375}, {"start": 1162.98, "end": 1164.36, "word": " and", "probability": 0.544921875}, {"start": 1164.36, "end": 1164.74, "word": " rho,", "probability": 0.78857421875}, {"start": 1165.46, "end": 1165.82, "word": " this", "probability": 0.91650390625}, {"start": 1165.82, "end": 1166.22, "word": " Greek", "probability": 0.8720703125}, {"start": 1166.22, "end": 1166.56, "word": " letter", "probability": 0.94775390625}, {"start": 1166.56, "end": 1167.22, "word": " pronounced", "probability": 0.66015625}, {"start": 1167.22, "end": 1167.76, "word": " as", "probability": 0.9677734375}, {"start": 1167.76, "end": 1168.04, "word": " rho.", "probability": 0.364013671875}, {"start": 1168.88, "end": 1169.2, "word": " Rho", "probability": 0.87646484375}, {"start": 1169.2, "end": 1169.42, "word": " is", "probability": 0.95068359375}, {"start": 1169.42, "end": 1169.76, "word": " used", "probability": 0.9130859375}], "temperature": 1.0}, {"id": 45, "seek": 119950, "start": 1171.2, "end": 1199.5, "text": " for population coefficient of correlation. There are some features of R or Rho. The first one is unity-free. R or Rho is unity-free. That means if X represents... And let's assume that the correlation between X and Y", "tokens": [337, 4415, 17619, 295, 20009, 13, 821, 366, 512, 4122, 295, 497, 420, 497, 1289, 13, 440, 700, 472, 307, 18205, 12, 10792, 13, 497, 420, 497, 1289, 307, 18205, 12, 10792, 13, 663, 1355, 498, 1783, 8855, 485, 400, 718, 311, 6552, 300, 264, 20009, 1296, 1783, 293, 398], "avg_logprob": -0.19592524451367996, "compression_ratio": 1.4965517241379311, "no_speech_prob": 0.0, "words": [{"start": 1171.2, "end": 1172.02, "word": " for", "probability": 0.304931640625}, {"start": 1172.02, "end": 1172.84, "word": " population", "probability": 0.82568359375}, {"start": 1172.84, "end": 1173.54, "word": " coefficient", "probability": 0.8798828125}, {"start": 1173.54, "end": 1174.4, "word": " of", "probability": 0.95068359375}, {"start": 1174.4, "end": 1175.16, "word": " correlation.", "probability": 0.88232421875}, {"start": 1177.64, "end": 1178.46, "word": " There", "probability": 0.66455078125}, {"start": 1178.46, "end": 1178.66, "word": " are", "probability": 0.939453125}, {"start": 1178.66, "end": 1178.94, "word": " some", "probability": 0.89990234375}, {"start": 1178.94, "end": 1179.46, "word": " features", "probability": 0.77294921875}, {"start": 1179.46, "end": 1179.98, "word": " of", "probability": 0.9599609375}, {"start": 1179.98, "end": 1180.36, "word": " R", "probability": 0.8896484375}, {"start": 1180.36, "end": 1180.6, "word": " or", "probability": 0.96142578125}, {"start": 1180.6, "end": 1180.84, "word": " Rho.", "probability": 0.689453125}, {"start": 1181.3, "end": 1181.54, "word": " The", "probability": 0.8720703125}, {"start": 1181.54, "end": 1181.8, "word": " first", "probability": 0.88818359375}, {"start": 1181.8, "end": 1182.04, "word": " one", "probability": 0.91357421875}, {"start": 1182.04, "end": 1182.4, "word": " is", "probability": 0.94140625}, {"start": 1182.4, "end": 1183.16, "word": " unity", "probability": 0.78466796875}, {"start": 1183.16, "end": 1183.46, "word": "-free.", "probability": 0.719482421875}, {"start": 1184.16, "end": 1184.4, "word": " R", "probability": 0.95166015625}, {"start": 1184.4, "end": 1185.14, "word": " or", "probability": 0.92236328125}, {"start": 1185.14, "end": 1185.34, "word": " Rho", "probability": 0.924560546875}, {"start": 1185.34, "end": 1185.56, "word": " is", "probability": 0.93115234375}, {"start": 1185.56, "end": 1185.82, "word": " unity", "probability": 0.92578125}, {"start": 1185.82, "end": 1186.08, "word": "-free.", "probability": 0.875244140625}, {"start": 1187.32, "end": 1187.7, "word": " That", "probability": 0.8896484375}, {"start": 1187.7, "end": 1187.96, "word": " means", "probability": 0.93310546875}, {"start": 1187.96, "end": 1188.38, "word": " if", "probability": 0.86376953125}, {"start": 1188.38, "end": 1189.0, "word": " X", "probability": 0.7060546875}, {"start": 1189.0, "end": 1194.9, "word": " represents...", "probability": 0.578369140625}, {"start": 1194.9, "end": 1195.62, "word": " And", "probability": 0.603515625}, {"start": 1195.62, "end": 1196.02, "word": " let's", "probability": 0.954833984375}, {"start": 1196.02, "end": 1196.52, "word": " assume", "probability": 0.9130859375}, {"start": 1196.52, "end": 1196.9, "word": " that", "probability": 0.9306640625}, {"start": 1196.9, "end": 1197.68, "word": " the", "probability": 0.837890625}, {"start": 1197.68, "end": 1198.14, "word": " correlation", "probability": 0.91015625}, {"start": 1198.14, "end": 1198.58, "word": " between", "probability": 0.896484375}, {"start": 1198.58, "end": 1198.96, "word": " X", "probability": 0.9814453125}, {"start": 1198.96, "end": 1199.16, "word": " and", "probability": 0.9462890625}, {"start": 1199.16, "end": 1199.5, "word": " Y", "probability": 0.99755859375}], "temperature": 1.0}, {"id": 46, "seek": 122454, "start": 1200.74, "end": 1224.54, "text": " equals 0.75. Now, in this case, there is no unity. You cannot say 0.75 years or 0.75 kilograms. It's unity-free. There is no unit for the correlation coefficient, the same as Cv. If you remember Cv, the coefficient of correlation, also this one is unity-free.", "tokens": [6915, 1958, 13, 11901, 13, 823, 11, 294, 341, 1389, 11, 456, 307, 572, 18205, 13, 509, 2644, 584, 1958, 13, 11901, 924, 420, 1958, 13, 11901, 30690, 13, 467, 311, 18205, 12, 10792, 13, 821, 307, 572, 4985, 337, 264, 20009, 17619, 11, 264, 912, 382, 383, 85, 13, 759, 291, 1604, 383, 85, 11, 264, 17619, 295, 20009, 11, 611, 341, 472, 307, 18205, 12, 10792, 13], "avg_logprob": -0.21339285033089775, "compression_ratio": 1.5950920245398772, "no_speech_prob": 0.0, "words": [{"start": 1200.74, "end": 1201.24, "word": " equals", "probability": 0.5419921875}, {"start": 1201.24, "end": 1201.52, "word": " 0", "probability": 0.6240234375}, {"start": 1201.52, "end": 1202.04, "word": ".75.", "probability": 0.97021484375}, {"start": 1204.68, "end": 1205.24, "word": " Now,", "probability": 0.72900390625}, {"start": 1205.28, "end": 1205.34, "word": " in", "probability": 0.8427734375}, {"start": 1205.34, "end": 1205.5, "word": " this", "probability": 0.9482421875}, {"start": 1205.5, "end": 1205.68, "word": " case,", "probability": 0.92236328125}, {"start": 1205.76, "end": 1205.86, "word": " there", "probability": 0.85400390625}, {"start": 1205.86, "end": 1205.98, "word": " is", "probability": 0.919921875}, {"start": 1205.98, "end": 1206.24, "word": " no", "probability": 0.7802734375}, {"start": 1206.24, "end": 1206.64, "word": " unity.", "probability": 0.61669921875}, {"start": 1207.04, "end": 1207.04, "word": " You", "probability": 0.8046875}, {"start": 1207.04, "end": 1207.26, "word": " cannot", "probability": 0.79638671875}, {"start": 1207.26, "end": 1207.7, "word": " say", "probability": 0.81201171875}, {"start": 1207.7, "end": 1208.24, "word": " 0", "probability": 0.865234375}, {"start": 1208.24, "end": 1208.88, "word": ".75", "probability": 0.991455078125}, {"start": 1208.88, "end": 1209.82, "word": " years", "probability": 0.69873046875}, {"start": 1209.82, "end": 1210.18, "word": " or", "probability": 0.892578125}, {"start": 1210.18, "end": 1210.44, "word": " 0", "probability": 0.98291015625}, {"start": 1210.44, "end": 1211.02, "word": ".75", "probability": 0.995361328125}, {"start": 1211.02, "end": 1211.76, "word": " kilograms.", "probability": 0.77294921875}, {"start": 1212.46, "end": 1212.9, "word": " It's", "probability": 0.903564453125}, {"start": 1212.9, "end": 1213.22, "word": " unity", "probability": 0.91064453125}, {"start": 1213.22, "end": 1213.48, "word": "-free.", "probability": 0.74169921875}, {"start": 1213.94, "end": 1214.06, "word": " There", "probability": 0.794921875}, {"start": 1214.06, "end": 1214.22, "word": " is", "probability": 0.8955078125}, {"start": 1214.22, "end": 1215.12, "word": " no", "probability": 0.8134765625}, {"start": 1215.12, "end": 1215.52, "word": " unit", "probability": 0.869140625}, {"start": 1215.52, "end": 1215.9, "word": " for", "probability": 0.88427734375}, {"start": 1215.9, "end": 1216.82, "word": " the", "probability": 0.8662109375}, {"start": 1216.82, "end": 1217.42, "word": " correlation", "probability": 0.9052734375}, {"start": 1217.42, "end": 1217.84, "word": " coefficient,", "probability": 0.90380859375}, {"start": 1218.02, "end": 1218.16, "word": " the", "probability": 0.849609375}, {"start": 1218.16, "end": 1218.38, "word": " same", "probability": 0.91552734375}, {"start": 1218.38, "end": 1218.94, "word": " as", "probability": 0.96630859375}, {"start": 1218.94, "end": 1219.78, "word": " Cv.", "probability": 0.648681640625}, {"start": 1219.88, "end": 1220.0, "word": " If", "probability": 0.94677734375}, {"start": 1220.0, "end": 1220.06, "word": " you", "probability": 0.9609375}, {"start": 1220.06, "end": 1220.34, "word": " remember", "probability": 0.8798828125}, {"start": 1220.34, "end": 1220.76, "word": " Cv,", "probability": 0.802734375}, {"start": 1220.92, "end": 1221.12, "word": " the", "probability": 0.85302734375}, {"start": 1221.12, "end": 1221.6, "word": " coefficient", "probability": 0.91845703125}, {"start": 1221.6, "end": 1221.86, "word": " of", "probability": 0.93505859375}, {"start": 1221.86, "end": 1222.3, "word": " correlation,", "probability": 0.859375}, {"start": 1223.22, "end": 1223.5, "word": " also", "probability": 0.8017578125}, {"start": 1223.5, "end": 1223.76, "word": " this", "probability": 0.79052734375}, {"start": 1223.76, "end": 1223.92, "word": " one", "probability": 0.92333984375}, {"start": 1223.92, "end": 1224.06, "word": " is", "probability": 0.94287109375}, {"start": 1224.06, "end": 1224.32, "word": " unity", "probability": 0.93505859375}, {"start": 1224.32, "end": 1224.54, "word": "-free.", "probability": 0.88330078125}], "temperature": 1.0}, {"id": 47, "seek": 125126, "start": 1226.2, "end": 1251.26, "text": " The second feature of R ranges between minus one and plus one. As I mentioned, R lies between minus one and plus one. Now, by using the value of R, you can determine two things. Number one, we can determine the direction.", "tokens": [440, 1150, 4111, 295, 497, 22526, 1296, 3175, 472, 293, 1804, 472, 13, 1018, 286, 2835, 11, 497, 9134, 1296, 3175, 472, 293, 1804, 472, 13, 823, 11, 538, 1228, 264, 2158, 295, 497, 11, 291, 393, 6997, 732, 721, 13, 5118, 472, 11, 321, 393, 6997, 264, 3513, 13], "avg_logprob": -0.1495864023180569, "compression_ratio": 1.608695652173913, "no_speech_prob": 0.0, "words": [{"start": 1226.2, "end": 1226.42, "word": " The", "probability": 0.75830078125}, {"start": 1226.42, "end": 1226.76, "word": " second", "probability": 0.89453125}, {"start": 1226.76, "end": 1227.2, "word": " feature", "probability": 0.953125}, {"start": 1227.2, "end": 1228.28, "word": " of", "probability": 0.9501953125}, {"start": 1228.28, "end": 1228.6, "word": " R", "probability": 0.77978515625}, {"start": 1228.6, "end": 1230.12, "word": " ranges", "probability": 0.77587890625}, {"start": 1230.12, "end": 1230.5, "word": " between", "probability": 0.85546875}, {"start": 1230.5, "end": 1230.78, "word": " minus", "probability": 0.71533203125}, {"start": 1230.78, "end": 1230.98, "word": " one", "probability": 0.62255859375}, {"start": 1230.98, "end": 1231.12, "word": " and", "probability": 0.9306640625}, {"start": 1231.12, "end": 1231.4, "word": " plus", "probability": 0.95068359375}, {"start": 1231.4, "end": 1231.68, "word": " one.", "probability": 0.9287109375}, {"start": 1233.2, "end": 1233.7, "word": " As", "probability": 0.96337890625}, {"start": 1233.7, "end": 1233.78, "word": " I", "probability": 0.28125}, {"start": 1233.78, "end": 1234.24, "word": " mentioned,", "probability": 0.7783203125}, {"start": 1234.62, "end": 1234.9, "word": " R", "probability": 0.98681640625}, {"start": 1234.9, "end": 1236.74, "word": " lies", "probability": 0.9345703125}, {"start": 1236.74, "end": 1237.16, "word": " between", "probability": 0.8837890625}, {"start": 1237.16, "end": 1237.76, "word": " minus", "probability": 0.97900390625}, {"start": 1237.76, "end": 1238.08, "word": " one", "probability": 0.9169921875}, {"start": 1238.08, "end": 1238.36, "word": " and", "probability": 0.9404296875}, {"start": 1238.36, "end": 1238.9, "word": " plus", "probability": 0.96337890625}, {"start": 1238.9, "end": 1239.2, "word": " one.", "probability": 0.9267578125}, {"start": 1240.76, "end": 1241.22, "word": " Now,", "probability": 0.94189453125}, {"start": 1241.3, "end": 1241.44, "word": " by", "probability": 0.9697265625}, {"start": 1241.44, "end": 1241.8, "word": " using", "probability": 0.935546875}, {"start": 1241.8, "end": 1242.34, "word": " the", "probability": 0.90771484375}, {"start": 1242.34, "end": 1242.64, "word": " value", "probability": 0.97607421875}, {"start": 1242.64, "end": 1242.82, "word": " of", "probability": 0.94189453125}, {"start": 1242.82, "end": 1243.1, "word": " R,", "probability": 0.9931640625}, {"start": 1243.96, "end": 1244.26, "word": " you", "probability": 0.95654296875}, {"start": 1244.26, "end": 1244.5, "word": " can", "probability": 0.9423828125}, {"start": 1244.5, "end": 1245.08, "word": " determine", "probability": 0.916015625}, {"start": 1245.08, "end": 1247.34, "word": " two", "probability": 0.904296875}, {"start": 1247.34, "end": 1247.74, "word": " things.", "probability": 0.8671875}, {"start": 1247.94, "end": 1248.1, "word": " Number", "probability": 0.8525390625}, {"start": 1248.1, "end": 1248.38, "word": " one,", "probability": 0.908203125}, {"start": 1249.44, "end": 1249.64, "word": " we", "probability": 0.875}, {"start": 1249.64, "end": 1249.82, "word": " can", "probability": 0.94091796875}, {"start": 1249.82, "end": 1250.18, "word": " determine", "probability": 0.9296875}, {"start": 1250.18, "end": 1250.36, "word": " the", "probability": 0.8037109375}, {"start": 1250.36, "end": 1251.26, "word": " direction.", "probability": 0.93994140625}], "temperature": 1.0}, {"id": 48, "seek": 127890, "start": 1252.58, "end": 1278.9, "text": " and strength by using the sign you can determine if there exists positive or negative so sign of R determine negative or positive relationship the direction the absolute value of R I mean absolute of R I mean ignore the sign", "tokens": [293, 3800, 538, 1228, 264, 1465, 291, 393, 6997, 498, 456, 8198, 3353, 420, 3671, 370, 1465, 295, 497, 6997, 3671, 420, 3353, 2480, 264, 3513, 264, 8236, 2158, 295, 497, 286, 914, 8236, 295, 497, 286, 914, 11200, 264, 1465], "avg_logprob": -0.24069940795501074, "compression_ratio": 1.717557251908397, "no_speech_prob": 0.0, "words": [{"start": 1252.58, "end": 1252.88, "word": " and", "probability": 0.411376953125}, {"start": 1252.88, "end": 1253.36, "word": " strength", "probability": 0.58642578125}, {"start": 1253.36, "end": 1254.18, "word": " by", "probability": 0.450439453125}, {"start": 1254.18, "end": 1254.52, "word": " using", "probability": 0.9296875}, {"start": 1254.52, "end": 1254.72, "word": " the", "probability": 0.859375}, {"start": 1254.72, "end": 1255.04, "word": " sign", "probability": 0.6865234375}, {"start": 1255.04, "end": 1255.92, "word": " you", "probability": 0.8076171875}, {"start": 1255.92, "end": 1256.14, "word": " can", "probability": 0.939453125}, {"start": 1256.14, "end": 1256.54, "word": " determine", "probability": 0.93701171875}, {"start": 1256.54, "end": 1256.8, "word": " if", "probability": 0.931640625}, {"start": 1256.8, "end": 1256.94, "word": " there", "probability": 0.82470703125}, {"start": 1256.94, "end": 1257.36, "word": " exists", "probability": 0.64208984375}, {"start": 1257.36, "end": 1258.88, "word": " positive", "probability": 0.876953125}, {"start": 1258.88, "end": 1259.18, "word": " or", "probability": 0.955078125}, {"start": 1259.18, "end": 1259.56, "word": " negative", "probability": 0.94580078125}, {"start": 1259.56, "end": 1260.68, "word": " so", "probability": 0.5078125}, {"start": 1260.68, "end": 1261.26, "word": " sign", "probability": 0.64697265625}, {"start": 1261.26, "end": 1261.44, "word": " of", "probability": 0.96826171875}, {"start": 1261.44, "end": 1261.78, "word": " R", "probability": 0.62841796875}, {"start": 1261.78, "end": 1263.98, "word": " determine", "probability": 0.6748046875}, {"start": 1263.98, "end": 1264.38, "word": " negative", "probability": 0.91455078125}, {"start": 1264.38, "end": 1265.7, "word": " or", "probability": 0.92431640625}, {"start": 1265.7, "end": 1266.1, "word": " positive", "probability": 0.94189453125}, {"start": 1266.1, "end": 1266.78, "word": " relationship", "probability": 0.7109375}, {"start": 1266.78, "end": 1267.68, "word": " the", "probability": 0.51806640625}, {"start": 1267.68, "end": 1268.04, "word": " direction", "probability": 0.8359375}, {"start": 1268.04, "end": 1269.78, "word": " the", "probability": 0.765625}, {"start": 1269.78, "end": 1270.24, "word": " absolute", "probability": 0.876953125}, {"start": 1270.24, "end": 1271.34, "word": " value", "probability": 0.97412109375}, {"start": 1271.34, "end": 1274.16, "word": " of", "probability": 0.912109375}, {"start": 1274.16, "end": 1274.52, "word": " R", "probability": 0.966796875}, {"start": 1274.52, "end": 1276.04, "word": " I", "probability": 0.60791015625}, {"start": 1276.04, "end": 1276.14, "word": " mean", "probability": 0.96142578125}, {"start": 1276.14, "end": 1276.44, "word": " absolute", "probability": 0.59716796875}, {"start": 1276.44, "end": 1276.78, "word": " of", "probability": 0.66845703125}, {"start": 1276.78, "end": 1277.16, "word": " R", "probability": 0.9892578125}, {"start": 1277.16, "end": 1277.84, "word": " I", "probability": 0.79443359375}, {"start": 1277.84, "end": 1277.96, "word": " mean", "probability": 0.9638671875}, {"start": 1277.96, "end": 1278.26, "word": " ignore", "probability": 0.85986328125}, {"start": 1278.26, "end": 1278.48, "word": " the", "probability": 0.908203125}, {"start": 1278.48, "end": 1278.9, "word": " sign", "probability": 0.8984375}], "temperature": 1.0}, {"id": 49, "seek": 130944, "start": 1280.66, "end": 1309.44, "text": " So the absolute value of R determines the strength. So by using the sine of R, you can determine the direction, either positive or negative. By using the absolute value, you can determine the strength. We can split the strength into two parts, either strong, moderate, or weak.", "tokens": [407, 264, 8236, 2158, 295, 497, 24799, 264, 3800, 13, 407, 538, 1228, 264, 18609, 295, 497, 11, 291, 393, 6997, 264, 3513, 11, 2139, 3353, 420, 3671, 13, 3146, 1228, 264, 8236, 2158, 11, 291, 393, 6997, 264, 3800, 13, 492, 393, 7472, 264, 3800, 666, 732, 3166, 11, 2139, 2068, 11, 18174, 11, 420, 5336, 13], "avg_logprob": -0.16843220035908585, "compression_ratio": 1.793548387096774, "no_speech_prob": 0.0, "words": [{"start": 1280.66, "end": 1280.94, "word": " So", "probability": 0.5810546875}, {"start": 1280.94, "end": 1281.08, "word": " the", "probability": 0.65380859375}, {"start": 1281.08, "end": 1281.38, "word": " absolute", "probability": 0.8818359375}, {"start": 1281.38, "end": 1281.64, "word": " value", "probability": 0.98046875}, {"start": 1281.64, "end": 1281.78, "word": " of", "probability": 0.94580078125}, {"start": 1281.78, "end": 1281.98, "word": " R", "probability": 0.76416015625}, {"start": 1281.98, "end": 1282.66, "word": " determines", "probability": 0.8955078125}, {"start": 1282.66, "end": 1283.7, "word": " the", "probability": 0.83251953125}, {"start": 1283.7, "end": 1284.1, "word": " strength.", "probability": 0.85205078125}, {"start": 1287.7, "end": 1288.38, "word": " So", "probability": 0.751953125}, {"start": 1288.38, "end": 1288.54, "word": " by", "probability": 0.8447265625}, {"start": 1288.54, "end": 1288.9, "word": " using", "probability": 0.93212890625}, {"start": 1288.9, "end": 1289.5, "word": " the", "probability": 0.8583984375}, {"start": 1289.5, "end": 1289.7, "word": " sine", "probability": 0.490234375}, {"start": 1289.7, "end": 1289.84, "word": " of", "probability": 0.96826171875}, {"start": 1289.84, "end": 1289.98, "word": " R,", "probability": 0.9833984375}, {"start": 1290.02, "end": 1290.1, "word": " you", "probability": 0.84716796875}, {"start": 1290.1, "end": 1290.26, "word": " can", "probability": 0.927734375}, {"start": 1290.26, "end": 1290.56, "word": " determine", "probability": 0.92724609375}, {"start": 1290.56, "end": 1290.76, "word": " the", "probability": 0.83203125}, {"start": 1290.76, "end": 1291.24, "word": " direction,", "probability": 0.97705078125}, {"start": 1292.46, "end": 1292.7, "word": " either", "probability": 0.82763671875}, {"start": 1292.7, "end": 1293.06, "word": " positive", "probability": 0.9345703125}, {"start": 1293.06, "end": 1293.3, "word": " or", "probability": 0.9658203125}, {"start": 1293.3, "end": 1293.6, "word": " negative.", "probability": 0.94580078125}, {"start": 1295.06, "end": 1295.42, "word": " By", "probability": 0.90087890625}, {"start": 1295.42, "end": 1295.68, "word": " using", "probability": 0.9296875}, {"start": 1295.68, "end": 1295.86, "word": " the", "probability": 0.8935546875}, {"start": 1295.86, "end": 1296.14, "word": " absolute", "probability": 0.89111328125}, {"start": 1296.14, "end": 1296.56, "word": " value,", "probability": 0.9794921875}, {"start": 1296.56, "end": 1296.7, "word": " you", "probability": 0.8505859375}, {"start": 1296.7, "end": 1296.84, "word": " can", "probability": 0.93505859375}, {"start": 1296.84, "end": 1297.26, "word": " determine", "probability": 0.94140625}, {"start": 1297.26, "end": 1297.74, "word": " the", "probability": 0.90625}, {"start": 1297.74, "end": 1298.4, "word": " strength.", "probability": 0.87939453125}, {"start": 1299.74, "end": 1300.02, "word": " We", "probability": 0.92626953125}, {"start": 1300.02, "end": 1300.42, "word": " can", "probability": 0.947265625}, {"start": 1300.42, "end": 1302.04, "word": " split", "probability": 0.931640625}, {"start": 1302.04, "end": 1302.38, "word": " the", "probability": 0.892578125}, {"start": 1302.38, "end": 1302.86, "word": " strength", "probability": 0.8935546875}, {"start": 1302.86, "end": 1303.28, "word": " into", "probability": 0.8525390625}, {"start": 1303.28, "end": 1303.5, "word": " two", "probability": 0.8994140625}, {"start": 1303.5, "end": 1303.98, "word": " parts,", "probability": 0.8505859375}, {"start": 1304.56, "end": 1304.9, "word": " either", "probability": 0.94189453125}, {"start": 1304.9, "end": 1305.64, "word": " strong,", "probability": 0.88916015625}, {"start": 1307.4, "end": 1307.82, "word": " moderate,", "probability": 0.935546875}, {"start": 1308.5, "end": 1309.24, "word": " or", "probability": 0.9677734375}, {"start": 1309.24, "end": 1309.44, "word": " weak.", "probability": 0.970703125}], "temperature": 1.0}, {"id": 50, "seek": 132943, "start": 1311.77, "end": 1329.43, "text": " So weak, moderate, and strong by using the absolute value of R. The closer to minus one, if R is close to minus one, the stronger the negative relationship between X and Y. For example, imagine", "tokens": [407, 5336, 11, 18174, 11, 293, 2068, 538, 1228, 264, 8236, 2158, 295, 497, 13, 440, 4966, 281, 3175, 472, 11, 498, 497, 307, 1998, 281, 3175, 472, 11, 264, 7249, 264, 3671, 2480, 1296, 1783, 293, 398, 13, 1171, 1365, 11, 3811], "avg_logprob": -0.19389205053448677, "compression_ratio": 1.4057971014492754, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1311.77, "end": 1312.45, "word": " So", "probability": 0.342041015625}, {"start": 1312.45, "end": 1312.81, "word": " weak,", "probability": 0.794921875}, {"start": 1313.77, "end": 1314.03, "word": " moderate,", "probability": 0.90966796875}, {"start": 1314.79, "end": 1315.65, "word": " and", "probability": 0.93310546875}, {"start": 1315.65, "end": 1316.13, "word": " strong", "probability": 0.896484375}, {"start": 1316.13, "end": 1317.29, "word": " by", "probability": 0.501953125}, {"start": 1317.29, "end": 1317.65, "word": " using", "probability": 0.9267578125}, {"start": 1317.65, "end": 1318.17, "word": " the", "probability": 0.89453125}, {"start": 1318.17, "end": 1318.89, "word": " absolute", "probability": 0.90283203125}, {"start": 1318.89, "end": 1319.13, "word": " value", "probability": 0.98291015625}, {"start": 1319.13, "end": 1319.31, "word": " of", "probability": 0.77978515625}, {"start": 1319.31, "end": 1319.51, "word": " R.", "probability": 0.7587890625}, {"start": 1320.11, "end": 1320.57, "word": " The", "probability": 0.8603515625}, {"start": 1320.57, "end": 1320.91, "word": " closer", "probability": 0.91796875}, {"start": 1320.91, "end": 1321.15, "word": " to", "probability": 0.96484375}, {"start": 1321.15, "end": 1321.41, "word": " minus", "probability": 0.7548828125}, {"start": 1321.41, "end": 1321.75, "word": " one,", "probability": 0.6064453125}, {"start": 1322.85, "end": 1323.03, "word": " if", "probability": 0.86962890625}, {"start": 1323.03, "end": 1323.23, "word": " R", "probability": 0.98193359375}, {"start": 1323.23, "end": 1323.37, "word": " is", "probability": 0.93115234375}, {"start": 1323.37, "end": 1323.69, "word": " close", "probability": 0.865234375}, {"start": 1323.69, "end": 1323.87, "word": " to", "probability": 0.96435546875}, {"start": 1323.87, "end": 1324.07, "word": " minus", "probability": 0.97607421875}, {"start": 1324.07, "end": 1324.37, "word": " one,", "probability": 0.91845703125}, {"start": 1324.95, "end": 1325.09, "word": " the", "probability": 0.900390625}, {"start": 1325.09, "end": 1325.53, "word": " stronger", "probability": 0.7685546875}, {"start": 1325.53, "end": 1325.93, "word": " the", "probability": 0.8251953125}, {"start": 1325.93, "end": 1326.33, "word": " negative", "probability": 0.94580078125}, {"start": 1326.33, "end": 1327.01, "word": " relationship", "probability": 0.92041015625}, {"start": 1327.01, "end": 1327.35, "word": " between", "probability": 0.81640625}, {"start": 1327.35, "end": 1327.55, "word": " X", "probability": 0.69482421875}, {"start": 1327.55, "end": 1327.73, "word": " and", "probability": 0.9482421875}, {"start": 1327.73, "end": 1327.91, "word": " Y.", "probability": 0.99072265625}, {"start": 1327.97, "end": 1328.13, "word": " For", "probability": 0.93798828125}, {"start": 1328.13, "end": 1328.51, "word": " example,", "probability": 0.97607421875}, {"start": 1329.01, "end": 1329.43, "word": " imagine", "probability": 0.89794921875}], "temperature": 1.0}, {"id": 51, "seek": 137043, "start": 1342.67, "end": 1370.43, "text": " And as we mentioned, R ranges between minus 1 and plus 1. So if R is close to minus 1, it's a strong relationship. Strong linked relationship. The closer to 1, the stronger the positive relationship. I mean, if R is close. Strong positive.", "tokens": [400, 382, 321, 2835, 11, 497, 22526, 1296, 3175, 502, 293, 1804, 502, 13, 407, 498, 497, 307, 1998, 281, 3175, 502, 11, 309, 311, 257, 2068, 2480, 13, 22792, 9408, 2480, 13, 440, 4966, 281, 502, 11, 264, 7249, 264, 3353, 2480, 13, 286, 914, 11, 498, 497, 307, 1998, 13, 22792, 3353, 13], "avg_logprob": -0.25139509833284784, "compression_ratio": 1.6783216783216783, "no_speech_prob": 0.0, "words": [{"start": 1342.67, "end": 1342.97, "word": " And", "probability": 0.5986328125}, {"start": 1342.97, "end": 1343.17, "word": " as", "probability": 0.6259765625}, {"start": 1343.17, "end": 1343.77, "word": " we", "probability": 0.57421875}, {"start": 1343.77, "end": 1344.11, "word": " mentioned,", "probability": 0.79638671875}, {"start": 1344.25, "end": 1344.51, "word": " R", "probability": 0.70703125}, {"start": 1344.51, "end": 1345.25, "word": " ranges", "probability": 0.7275390625}, {"start": 1345.25, "end": 1345.57, "word": " between", "probability": 0.75390625}, {"start": 1345.57, "end": 1345.83, "word": " minus", "probability": 0.433837890625}, {"start": 1345.83, "end": 1346.03, "word": " 1", "probability": 0.3779296875}, {"start": 1346.03, "end": 1346.13, "word": " and", "probability": 0.8486328125}, {"start": 1346.13, "end": 1346.39, "word": " plus", "probability": 0.95947265625}, {"start": 1346.39, "end": 1346.63, "word": " 1.", "probability": 0.97998046875}, {"start": 1350.07, "end": 1350.69, "word": " So", "probability": 0.912109375}, {"start": 1350.69, "end": 1350.99, "word": " if", "probability": 0.75732421875}, {"start": 1350.99, "end": 1351.19, "word": " R", "probability": 0.9921875}, {"start": 1351.19, "end": 1351.33, "word": " is", "probability": 0.93896484375}, {"start": 1351.33, "end": 1351.67, "word": " close", "probability": 0.888671875}, {"start": 1351.67, "end": 1351.87, "word": " to", "probability": 0.96923828125}, {"start": 1351.87, "end": 1352.11, "word": " minus", "probability": 0.97314453125}, {"start": 1352.11, "end": 1352.47, "word": " 1,", "probability": 0.96142578125}, {"start": 1354.15, "end": 1355.25, "word": " it's", "probability": 0.74560546875}, {"start": 1355.25, "end": 1355.37, "word": " a", "probability": 0.982421875}, {"start": 1355.37, "end": 1355.71, "word": " strong", "probability": 0.89013671875}, {"start": 1355.71, "end": 1356.43, "word": " relationship.", "probability": 0.84912109375}, {"start": 1357.77, "end": 1358.37, "word": " Strong", "probability": 0.7412109375}, {"start": 1358.37, "end": 1358.59, "word": " linked", "probability": 0.1868896484375}, {"start": 1358.59, "end": 1359.19, "word": " relationship.", "probability": 0.89599609375}, {"start": 1360.63, "end": 1361.25, "word": " The", "probability": 0.86669921875}, {"start": 1361.25, "end": 1361.59, "word": " closer", "probability": 0.92138671875}, {"start": 1361.59, "end": 1361.87, "word": " to", "probability": 0.92822265625}, {"start": 1361.87, "end": 1362.59, "word": " 1,", "probability": 0.50537109375}, {"start": 1363.85, "end": 1364.29, "word": " the", "probability": 0.90283203125}, {"start": 1364.29, "end": 1364.69, "word": " stronger", "probability": 0.84375}, {"start": 1364.69, "end": 1364.91, "word": " the", "probability": 0.85791015625}, {"start": 1364.91, "end": 1365.19, "word": " positive", "probability": 0.9375}, {"start": 1365.19, "end": 1365.81, "word": " relationship.", "probability": 0.9111328125}, {"start": 1366.03, "end": 1366.17, "word": " I", "probability": 0.94384765625}, {"start": 1366.17, "end": 1366.35, "word": " mean,", "probability": 0.9638671875}, {"start": 1366.43, "end": 1366.59, "word": " if", "probability": 0.94970703125}, {"start": 1366.59, "end": 1366.75, "word": " R", "probability": 0.99169921875}, {"start": 1366.75, "end": 1366.89, "word": " is", "probability": 0.9326171875}, {"start": 1366.89, "end": 1367.35, "word": " close.", "probability": 0.6376953125}, {"start": 1368.61, "end": 1369.23, "word": " Strong", "probability": 0.74365234375}, {"start": 1369.23, "end": 1370.43, "word": " positive.", "probability": 0.93212890625}], "temperature": 1.0}, {"id": 52, "seek": 139594, "start": 1371.5, "end": 1395.94, "text": " So strong in either direction, either to the left side or to the right side. Strong negative. On the other hand, there exists strong negative relationship. Positive. Positive. If R is close to zero, weak. Here we can say there exists weak relationship between X and Y.", "tokens": [407, 2068, 294, 2139, 3513, 11, 2139, 281, 264, 1411, 1252, 420, 281, 264, 558, 1252, 13, 22792, 3671, 13, 1282, 264, 661, 1011, 11, 456, 8198, 2068, 3671, 2480, 13, 46326, 13, 46326, 13, 759, 497, 307, 1998, 281, 4018, 11, 5336, 13, 1692, 321, 393, 584, 456, 8198, 5336, 2480, 1296, 1783, 293, 398, 13], "avg_logprob": -0.24488146860024024, "compression_ratio": 1.6918238993710693, "no_speech_prob": 0.0, "words": [{"start": 1371.5, "end": 1371.8, "word": " So", "probability": 0.79150390625}, {"start": 1371.8, "end": 1372.22, "word": " strong", "probability": 0.6083984375}, {"start": 1372.22, "end": 1372.48, "word": " in", "probability": 0.884765625}, {"start": 1372.48, "end": 1372.78, "word": " either", "probability": 0.9541015625}, {"start": 1372.78, "end": 1373.34, "word": " direction,", "probability": 0.97265625}, {"start": 1373.72, "end": 1374.02, "word": " either", "probability": 0.919921875}, {"start": 1374.02, "end": 1374.48, "word": " to", "probability": 0.92041015625}, {"start": 1374.48, "end": 1374.74, "word": " the", "probability": 0.91845703125}, {"start": 1374.74, "end": 1375.12, "word": " left", "probability": 0.9423828125}, {"start": 1375.12, "end": 1375.48, "word": " side", "probability": 0.83935546875}, {"start": 1375.48, "end": 1375.72, "word": " or", "probability": 0.86865234375}, {"start": 1375.72, "end": 1375.84, "word": " to", "probability": 0.83203125}, {"start": 1375.84, "end": 1375.98, "word": " the", "probability": 0.912109375}, {"start": 1375.98, "end": 1376.2, "word": " right", "probability": 0.9169921875}, {"start": 1376.2, "end": 1376.56, "word": " side.", "probability": 0.8623046875}, {"start": 1377.04, "end": 1377.64, "word": " Strong", "probability": 0.60791015625}, {"start": 1377.64, "end": 1377.98, "word": " negative.", "probability": 0.7080078125}, {"start": 1378.7, "end": 1378.98, "word": " On", "probability": 0.92138671875}, {"start": 1378.98, "end": 1379.08, "word": " the", "probability": 0.89111328125}, {"start": 1379.08, "end": 1379.26, "word": " other", "probability": 0.88525390625}, {"start": 1379.26, "end": 1379.56, "word": " hand,", "probability": 0.91748046875}, {"start": 1379.64, "end": 1379.72, "word": " there", "probability": 0.91015625}, {"start": 1379.72, "end": 1379.98, "word": " exists", "probability": 0.77880859375}, {"start": 1379.98, "end": 1380.28, "word": " strong", "probability": 0.81396484375}, {"start": 1380.28, "end": 1380.64, "word": " negative", "probability": 0.90576171875}, {"start": 1380.64, "end": 1381.28, "word": " relationship.", "probability": 0.83984375}, {"start": 1382.64, "end": 1382.8, "word": " Positive.", "probability": 0.6796875}, {"start": 1382.88, "end": 1382.88, "word": " Positive.", "probability": 0.6640625}, {"start": 1384.9, "end": 1385.5, "word": " If", "probability": 0.24951171875}, {"start": 1385.5, "end": 1385.82, "word": " R", "probability": 0.611328125}, {"start": 1385.82, "end": 1385.94, "word": " is", "probability": 0.498046875}, {"start": 1385.94, "end": 1386.18, "word": " close", "probability": 0.88720703125}, {"start": 1386.18, "end": 1386.38, "word": " to", "probability": 0.96923828125}, {"start": 1386.38, "end": 1386.68, "word": " zero,", "probability": 0.66650390625}, {"start": 1387.2, "end": 1388.0, "word": " weak.", "probability": 0.8662109375}, {"start": 1388.74, "end": 1389.18, "word": " Here", "probability": 0.51416015625}, {"start": 1389.18, "end": 1389.4, "word": " we", "probability": 0.6689453125}, {"start": 1389.4, "end": 1389.7, "word": " can", "probability": 0.94287109375}, {"start": 1389.7, "end": 1390.0, "word": " say", "probability": 0.8583984375}, {"start": 1390.0, "end": 1390.24, "word": " there", "probability": 0.78662109375}, {"start": 1390.24, "end": 1390.64, "word": " exists", "probability": 0.84619140625}, {"start": 1390.64, "end": 1391.1, "word": " weak", "probability": 0.86767578125}, {"start": 1391.1, "end": 1393.1, "word": " relationship", "probability": 0.9033203125}, {"start": 1393.1, "end": 1395.44, "word": " between", "probability": 0.87158203125}, {"start": 1395.44, "end": 1395.62, "word": " X", "probability": 0.6591796875}, {"start": 1395.62, "end": 1395.76, "word": " and", "probability": 0.947265625}, {"start": 1395.76, "end": 1395.94, "word": " Y.", "probability": 0.99755859375}], "temperature": 1.0}, {"id": 53, "seek": 142834, "start": 1399.26, "end": 1428.34, "text": " If R is close to 0.5 or minus 0.5, you can say there exists positive-moderate or negative-moderate relationship. So you can split or you can divide the strength of the relationship between X and Y into three parts. Strong, close to minus one of", "tokens": [759, 497, 307, 1998, 281, 1958, 13, 20, 420, 3175, 1958, 13, 20, 11, 291, 393, 584, 456, 8198, 3353, 12, 8014, 260, 473, 420, 3671, 12, 8014, 260, 473, 2480, 13, 407, 291, 393, 7472, 420, 291, 393, 9845, 264, 3800, 295, 264, 2480, 1296, 1783, 293, 398, 666, 1045, 3166, 13, 22792, 11, 1998, 281, 3175, 472, 295], "avg_logprob": -0.22886782396035116, "compression_ratio": 1.5705128205128205, "no_speech_prob": 0.0, "words": [{"start": 1399.26, "end": 1399.64, "word": " If", "probability": 0.56298828125}, {"start": 1399.64, "end": 1400.02, "word": " R", "probability": 0.67431640625}, {"start": 1400.02, "end": 1400.18, "word": " is", "probability": 0.888671875}, {"start": 1400.18, "end": 1400.76, "word": " close", "probability": 0.85009765625}, {"start": 1400.76, "end": 1401.12, "word": " to", "probability": 0.95263671875}, {"start": 1401.12, "end": 1401.4, "word": " 0", "probability": 0.26123046875}, {"start": 1401.4, "end": 1401.92, "word": ".5", "probability": 0.99267578125}, {"start": 1401.92, "end": 1405.48, "word": " or", "probability": 0.488037109375}, {"start": 1405.48, "end": 1405.86, "word": " minus", "probability": 0.49609375}, {"start": 1405.86, "end": 1406.08, "word": " 0", "probability": 0.96923828125}, {"start": 1406.08, "end": 1406.54, "word": ".5,", "probability": 0.998046875}, {"start": 1408.5, "end": 1408.9, "word": " you", "probability": 0.826171875}, {"start": 1408.9, "end": 1409.16, "word": " can", "probability": 0.9365234375}, {"start": 1409.16, "end": 1409.56, "word": " say", "probability": 0.7958984375}, {"start": 1409.56, "end": 1409.88, "word": " there", "probability": 0.6787109375}, {"start": 1409.88, "end": 1410.44, "word": " exists", "probability": 0.6162109375}, {"start": 1410.44, "end": 1412.32, "word": " positive", "probability": 0.70947265625}, {"start": 1412.32, "end": 1414.2, "word": "-moderate", "probability": 0.8070068359375}, {"start": 1414.2, "end": 1415.78, "word": " or", "probability": 0.81005859375}, {"start": 1415.78, "end": 1416.34, "word": " negative", "probability": 0.9453125}, {"start": 1416.34, "end": 1416.84, "word": "-moderate", "probability": 0.9771728515625}, {"start": 1416.84, "end": 1417.36, "word": " relationship.", "probability": 0.8779296875}, {"start": 1418.08, "end": 1418.84, "word": " So", "probability": 0.92626953125}, {"start": 1418.84, "end": 1419.02, "word": " you", "probability": 0.7373046875}, {"start": 1419.02, "end": 1419.24, "word": " can", "probability": 0.9384765625}, {"start": 1419.24, "end": 1419.64, "word": " split", "probability": 0.91748046875}, {"start": 1419.64, "end": 1420.12, "word": " or", "probability": 0.80419921875}, {"start": 1420.12, "end": 1420.28, "word": " you", "probability": 0.9111328125}, {"start": 1420.28, "end": 1420.48, "word": " can", "probability": 0.9384765625}, {"start": 1420.48, "end": 1420.86, "word": " divide", "probability": 0.9375}, {"start": 1420.86, "end": 1421.24, "word": " the", "probability": 0.9150390625}, {"start": 1421.24, "end": 1421.78, "word": " strength", "probability": 0.8046875}, {"start": 1421.78, "end": 1422.2, "word": " of", "probability": 0.96728515625}, {"start": 1422.2, "end": 1422.36, "word": " the", "probability": 0.91357421875}, {"start": 1422.36, "end": 1422.82, "word": " relationship", "probability": 0.9072265625}, {"start": 1422.82, "end": 1423.16, "word": " between", "probability": 0.90869140625}, {"start": 1423.16, "end": 1423.36, "word": " X", "probability": 0.609375}, {"start": 1423.36, "end": 1423.48, "word": " and", "probability": 0.93994140625}, {"start": 1423.48, "end": 1423.64, "word": " Y", "probability": 0.99560546875}, {"start": 1423.64, "end": 1423.86, "word": " into", "probability": 0.85400390625}, {"start": 1423.86, "end": 1424.1, "word": " three", "probability": 0.85791015625}, {"start": 1424.1, "end": 1424.54, "word": " parts.", "probability": 0.83642578125}, {"start": 1425.86, "end": 1426.62, "word": " Strong,", "probability": 0.88427734375}, {"start": 1427.1, "end": 1427.48, "word": " close", "probability": 0.85595703125}, {"start": 1427.48, "end": 1427.7, "word": " to", "probability": 0.96875}, {"start": 1427.7, "end": 1427.96, "word": " minus", "probability": 0.95654296875}, {"start": 1427.96, "end": 1428.18, "word": " one", "probability": 0.62890625}, {"start": 1428.18, "end": 1428.34, "word": " of", "probability": 0.307373046875}], "temperature": 1.0}, {"id": 54, "seek": 145650, "start": 1428.48, "end": 1456.5, "text": " Plus one, weak, close to zero, moderate, close to 0.5. 0.5 is halfway between 0 and 1, and minus 0.5 is also halfway between minus 1 and 0. Now for example, what's about if R equals minus 0.5? Suppose R1 is minus 0.5.", "tokens": [7721, 472, 11, 5336, 11, 1998, 281, 4018, 11, 18174, 11, 1998, 281, 1958, 13, 20, 13, 1958, 13, 20, 307, 15461, 1296, 1958, 293, 502, 11, 293, 3175, 1958, 13, 20, 307, 611, 15461, 1296, 3175, 502, 293, 1958, 13, 823, 337, 1365, 11, 437, 311, 466, 498, 497, 6915, 3175, 1958, 13, 20, 30, 21360, 497, 16, 307, 3175, 1958, 13, 20, 13], "avg_logprob": -0.2428977293047038, "compression_ratio": 1.4931506849315068, "no_speech_prob": 0.0, "words": [{"start": 1428.48, "end": 1428.8, "word": " Plus", "probability": 0.1549072265625}, {"start": 1428.8, "end": 1429.2, "word": " one,", "probability": 0.66552734375}, {"start": 1429.86, "end": 1430.7, "word": " weak,", "probability": 0.92431640625}, {"start": 1431.06, "end": 1431.64, "word": " close", "probability": 0.4580078125}, {"start": 1431.64, "end": 1431.8, "word": " to", "probability": 0.9658203125}, {"start": 1431.8, "end": 1432.06, "word": " zero,", "probability": 0.86181640625}, {"start": 1433.32, "end": 1433.66, "word": " moderate,", "probability": 0.87744140625}, {"start": 1434.18, "end": 1434.6, "word": " close", "probability": 0.892578125}, {"start": 1434.6, "end": 1435.64, "word": " to", "probability": 0.6845703125}, {"start": 1435.64, "end": 1435.86, "word": " 0", "probability": 0.58740234375}, {"start": 1435.86, "end": 1436.2, "word": ".5.", "probability": 0.995361328125}, {"start": 1438.12, "end": 1438.8, "word": " 0", "probability": 0.4345703125}, {"start": 1438.8, "end": 1439.24, "word": ".5", "probability": 0.99853515625}, {"start": 1439.24, "end": 1439.58, "word": " is", "probability": 0.8359375}, {"start": 1439.58, "end": 1439.98, "word": " halfway", "probability": 0.446533203125}, {"start": 1439.98, "end": 1441.48, "word": " between", "probability": 0.8544921875}, {"start": 1441.48, "end": 1441.76, "word": " 0", "probability": 0.5048828125}, {"start": 1441.76, "end": 1441.9, "word": " and", "probability": 0.93994140625}, {"start": 1441.9, "end": 1442.14, "word": " 1,", "probability": 0.93994140625}, {"start": 1442.74, "end": 1443.06, "word": " and", "probability": 0.92822265625}, {"start": 1443.06, "end": 1443.44, "word": " minus", "probability": 0.73779296875}, {"start": 1443.44, "end": 1443.68, "word": " 0", "probability": 0.98193359375}, {"start": 1443.68, "end": 1444.06, "word": ".5", "probability": 0.998779296875}, {"start": 1444.06, "end": 1444.32, "word": " is", "probability": 0.9287109375}, {"start": 1444.32, "end": 1444.58, "word": " also", "probability": 0.85986328125}, {"start": 1444.58, "end": 1444.98, "word": " halfway", "probability": 0.58740234375}, {"start": 1444.98, "end": 1445.34, "word": " between", "probability": 0.86865234375}, {"start": 1445.34, "end": 1445.68, "word": " minus", "probability": 0.8818359375}, {"start": 1445.68, "end": 1445.88, "word": " 1", "probability": 0.77587890625}, {"start": 1445.88, "end": 1446.02, "word": " and", "probability": 0.93212890625}, {"start": 1446.02, "end": 1446.3, "word": " 0.", "probability": 0.91259765625}, {"start": 1447.82, "end": 1448.5, "word": " Now", "probability": 0.9052734375}, {"start": 1448.5, "end": 1448.7, "word": " for", "probability": 0.5263671875}, {"start": 1448.7, "end": 1449.04, "word": " example,", "probability": 0.978515625}, {"start": 1449.92, "end": 1450.86, "word": " what's", "probability": 0.854248046875}, {"start": 1450.86, "end": 1451.1, "word": " about", "probability": 0.89990234375}, {"start": 1451.1, "end": 1451.32, "word": " if", "probability": 0.923828125}, {"start": 1451.32, "end": 1451.7, "word": " R", "probability": 0.73095703125}, {"start": 1451.7, "end": 1453.74, "word": " equals", "probability": 0.4697265625}, {"start": 1453.74, "end": 1454.18, "word": " minus", "probability": 0.9736328125}, {"start": 1454.18, "end": 1454.4, "word": " 0", "probability": 0.96923828125}, {"start": 1454.4, "end": 1454.7, "word": ".5?", "probability": 0.9892578125}, {"start": 1454.8, "end": 1455.04, "word": " Suppose", "probability": 0.8046875}, {"start": 1455.04, "end": 1455.46, "word": " R1", "probability": 0.948486328125}, {"start": 1455.46, "end": 1455.58, "word": " is", "probability": 0.91845703125}, {"start": 1455.58, "end": 1455.88, "word": " minus", "probability": 0.9794921875}, {"start": 1455.88, "end": 1456.18, "word": " 0", "probability": 0.98974609375}, {"start": 1456.18, "end": 1456.5, "word": ".5.", "probability": 0.998046875}], "temperature": 1.0}, {"id": 55, "seek": 147842, "start": 1460.18, "end": 1478.42, "text": " strong negative or equal minus point eight strong negative which is more strong nine nine because this value is close closer to minus one than", "tokens": [2068, 3671, 420, 2681, 3175, 935, 3180, 2068, 3671, 597, 307, 544, 2068, 4949, 4949, 570, 341, 2158, 307, 1998, 4966, 281, 3175, 472, 813], "avg_logprob": -0.3686899176010719, "compression_ratio": 1.5212765957446808, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1460.18, "end": 1460.58, "word": " strong", "probability": 0.162109375}, {"start": 1460.58, "end": 1460.98, "word": " negative", "probability": 0.7353515625}, {"start": 1460.98, "end": 1463.48, "word": " or", "probability": 0.46435546875}, {"start": 1463.48, "end": 1464.84, "word": " equal", "probability": 0.74072265625}, {"start": 1464.84, "end": 1465.24, "word": " minus", "probability": 0.7158203125}, {"start": 1465.24, "end": 1465.56, "word": " point", "probability": 0.61474609375}, {"start": 1465.56, "end": 1465.88, "word": " eight", "probability": 0.56103515625}, {"start": 1465.88, "end": 1467.4, "word": " strong", "probability": 0.56396484375}, {"start": 1467.4, "end": 1468.4, "word": " negative", "probability": 0.89892578125}, {"start": 1468.4, "end": 1469.28, "word": " which", "probability": 0.609375}, {"start": 1469.28, "end": 1469.5, "word": " is", "probability": 0.953125}, {"start": 1469.5, "end": 1469.84, "word": " more", "probability": 0.93017578125}, {"start": 1469.84, "end": 1470.32, "word": " strong", "probability": 0.85986328125}, {"start": 1470.32, "end": 1471.88, "word": " nine", "probability": 0.734375}, {"start": 1471.88, "end": 1473.08, "word": " nine", "probability": 0.4306640625}, {"start": 1473.08, "end": 1473.54, "word": " because", "probability": 0.81640625}, {"start": 1473.54, "end": 1473.84, "word": " this", "probability": 0.953125}, {"start": 1473.84, "end": 1474.22, "word": " value", "probability": 0.94970703125}, {"start": 1474.22, "end": 1474.52, "word": " is", "probability": 0.94775390625}, {"start": 1474.52, "end": 1475.06, "word": " close", "probability": 0.417236328125}, {"start": 1475.06, "end": 1476.64, "word": " closer", "probability": 0.82373046875}, {"start": 1476.64, "end": 1477.46, "word": " to", "probability": 0.966796875}, {"start": 1477.46, "end": 1477.84, "word": " minus", "probability": 0.974609375}, {"start": 1477.84, "end": 1478.1, "word": " one", "probability": 0.93212890625}, {"start": 1478.1, "end": 1478.42, "word": " than", "probability": 0.477783203125}], "temperature": 1.0}, {"id": 56, "seek": 150747, "start": 1479.13, "end": 1507.47, "text": " Minus 0.8. Even this value is greater than minus 0.9, but minus 0.9 is close to minus 1, more closer to minus 1 than minus 0.8. On the other hand, if R equals 0.75, that means there exists positive relationship. If R equals 0.85, also there exists positive.", "tokens": [2829, 301, 1958, 13, 23, 13, 2754, 341, 2158, 307, 5044, 813, 3175, 1958, 13, 24, 11, 457, 3175, 1958, 13, 24, 307, 1998, 281, 3175, 502, 11, 544, 4966, 281, 3175, 502, 813, 3175, 1958, 13, 23, 13, 1282, 264, 661, 1011, 11, 498, 497, 6915, 1958, 13, 11901, 11, 300, 1355, 456, 8198, 3353, 2480, 13, 759, 497, 6915, 1958, 13, 19287, 11, 611, 456, 8198, 3353, 13], "avg_logprob": -0.17418574489338298, "compression_ratio": 1.664516129032258, "no_speech_prob": 0.0, "words": [{"start": 1479.13, "end": 1479.67, "word": " Minus", "probability": 0.58355712890625}, {"start": 1479.67, "end": 1479.89, "word": " 0", "probability": 0.56591796875}, {"start": 1479.89, "end": 1480.21, "word": ".8.", "probability": 0.984619140625}, {"start": 1480.63, "end": 1480.91, "word": " Even", "probability": 0.7734375}, {"start": 1480.91, "end": 1481.49, "word": " this", "probability": 0.8642578125}, {"start": 1481.49, "end": 1481.91, "word": " value", "probability": 0.9775390625}, {"start": 1481.91, "end": 1482.51, "word": " is", "probability": 0.91259765625}, {"start": 1482.51, "end": 1482.85, "word": " greater", "probability": 0.88671875}, {"start": 1482.85, "end": 1483.13, "word": " than", "probability": 0.93701171875}, {"start": 1483.13, "end": 1483.41, "word": " minus", "probability": 0.8916015625}, {"start": 1483.41, "end": 1483.61, "word": " 0", "probability": 0.970703125}, {"start": 1483.61, "end": 1484.07, "word": ".9,", "probability": 0.998291015625}, {"start": 1484.53, "end": 1485.17, "word": " but", "probability": 0.89697265625}, {"start": 1485.17, "end": 1485.71, "word": " minus", "probability": 0.96826171875}, {"start": 1485.71, "end": 1485.93, "word": " 0", "probability": 0.9814453125}, {"start": 1485.93, "end": 1486.25, "word": ".9", "probability": 0.998779296875}, {"start": 1486.25, "end": 1486.43, "word": " is", "probability": 0.93896484375}, {"start": 1486.43, "end": 1486.91, "word": " close", "probability": 0.8115234375}, {"start": 1486.91, "end": 1487.63, "word": " to", "probability": 0.671875}, {"start": 1487.63, "end": 1488.79, "word": " minus", "probability": 0.91943359375}, {"start": 1488.79, "end": 1489.07, "word": " 1,", "probability": 0.625}, {"start": 1489.21, "end": 1489.89, "word": " more", "probability": 0.6748046875}, {"start": 1489.89, "end": 1490.37, "word": " closer", "probability": 0.62353515625}, {"start": 1490.37, "end": 1490.87, "word": " to", "probability": 0.5595703125}, {"start": 1490.87, "end": 1491.79, "word": " minus", "probability": 0.8291015625}, {"start": 1491.79, "end": 1492.09, "word": " 1", "probability": 0.91357421875}, {"start": 1492.09, "end": 1492.43, "word": " than", "probability": 0.89892578125}, {"start": 1492.43, "end": 1493.59, "word": " minus", "probability": 0.96923828125}, {"start": 1493.59, "end": 1493.81, "word": " 0", "probability": 0.9921875}, {"start": 1493.81, "end": 1494.13, "word": ".8.", "probability": 0.998291015625}, {"start": 1494.69, "end": 1494.85, "word": " On", "probability": 0.9306640625}, {"start": 1494.85, "end": 1494.97, "word": " the", "probability": 0.91845703125}, {"start": 1494.97, "end": 1495.13, "word": " other", "probability": 0.88818359375}, {"start": 1495.13, "end": 1495.47, "word": " hand,", "probability": 0.90576171875}, {"start": 1496.33, "end": 1496.53, "word": " if", "probability": 0.9482421875}, {"start": 1496.53, "end": 1496.91, "word": " R", "probability": 0.708984375}, {"start": 1496.91, "end": 1497.41, "word": " equals", "probability": 0.759765625}, {"start": 1497.41, "end": 1498.05, "word": " 0", "probability": 0.97412109375}, {"start": 1498.05, "end": 1498.53, "word": ".75,", "probability": 0.995849609375}, {"start": 1499.55, "end": 1499.93, "word": " that", "probability": 0.92724609375}, {"start": 1499.93, "end": 1500.17, "word": " means", "probability": 0.9326171875}, {"start": 1500.17, "end": 1500.39, "word": " there", "probability": 0.82373046875}, {"start": 1500.39, "end": 1500.71, "word": " exists", "probability": 0.82470703125}, {"start": 1500.71, "end": 1501.19, "word": " positive", "probability": 0.70263671875}, {"start": 1501.19, "end": 1501.81, "word": " relationship.", "probability": 0.90625}, {"start": 1503.03, "end": 1503.87, "word": " If", "probability": 0.88818359375}, {"start": 1503.87, "end": 1504.25, "word": " R", "probability": 0.9951171875}, {"start": 1504.25, "end": 1504.69, "word": " equals", "probability": 0.89794921875}, {"start": 1504.69, "end": 1505.33, "word": " 0", "probability": 0.98828125}, {"start": 1505.33, "end": 1505.77, "word": ".85,", "probability": 0.996826171875}, {"start": 1506.19, "end": 1506.51, "word": " also", "probability": 0.8369140625}, {"start": 1506.51, "end": 1506.73, "word": " there", "probability": 0.70947265625}, {"start": 1506.73, "end": 1506.97, "word": " exists", "probability": 0.85595703125}, {"start": 1506.97, "end": 1507.47, "word": " positive.", "probability": 0.93408203125}], "temperature": 1.0}, {"id": 57, "seek": 153080, "start": 1508.08, "end": 1530.8, "text": " But R2 is stronger than R1, because 0.85 is closer to plus 1 than 0.7. So we can say that there exists strong relationship between X and Y, and this relationship is positive. So again, by using the sign, you can tell the direction.", "tokens": [583, 497, 17, 307, 7249, 813, 497, 16, 11, 570, 1958, 13, 19287, 307, 4966, 281, 1804, 502, 813, 1958, 13, 22, 13, 407, 321, 393, 584, 300, 456, 8198, 2068, 2480, 1296, 1783, 293, 398, 11, 293, 341, 2480, 307, 3353, 13, 407, 797, 11, 538, 1228, 264, 1465, 11, 291, 393, 980, 264, 3513, 13], "avg_logprob": -0.21080280300872078, "compression_ratio": 1.4320987654320987, "no_speech_prob": 0.0, "words": [{"start": 1508.08, "end": 1508.48, "word": " But", "probability": 0.183837890625}, {"start": 1508.48, "end": 1510.26, "word": " R2", "probability": 0.723876953125}, {"start": 1510.26, "end": 1510.78, "word": " is", "probability": 0.921875}, {"start": 1510.78, "end": 1511.58, "word": " stronger", "probability": 0.78955078125}, {"start": 1511.58, "end": 1511.92, "word": " than", "probability": 0.8994140625}, {"start": 1511.92, "end": 1512.42, "word": " R1,", "probability": 0.966796875}, {"start": 1512.54, "end": 1513.0, "word": " because", "probability": 0.869140625}, {"start": 1513.0, "end": 1513.24, "word": " 0", "probability": 0.87744140625}, {"start": 1513.24, "end": 1513.54, "word": ".85", "probability": 0.989501953125}, {"start": 1513.54, "end": 1514.16, "word": " is", "probability": 0.94384765625}, {"start": 1514.16, "end": 1514.76, "word": " closer", "probability": 0.90576171875}, {"start": 1514.76, "end": 1515.62, "word": " to", "probability": 0.93115234375}, {"start": 1515.62, "end": 1516.18, "word": " plus", "probability": 0.67138671875}, {"start": 1516.18, "end": 1516.56, "word": " 1", "probability": 0.521484375}, {"start": 1516.56, "end": 1518.16, "word": " than", "probability": 0.796875}, {"start": 1518.16, "end": 1518.44, "word": " 0", "probability": 0.70703125}, {"start": 1518.44, "end": 1518.76, "word": ".7.", "probability": 0.660888671875}, {"start": 1519.74, "end": 1520.2, "word": " So", "probability": 0.92236328125}, {"start": 1520.2, "end": 1520.34, "word": " we", "probability": 0.638671875}, {"start": 1520.34, "end": 1520.52, "word": " can", "probability": 0.9501953125}, {"start": 1520.52, "end": 1520.74, "word": " say", "probability": 0.9072265625}, {"start": 1520.74, "end": 1520.98, "word": " that", "probability": 0.91455078125}, {"start": 1520.98, "end": 1521.14, "word": " there", "probability": 0.8837890625}, {"start": 1521.14, "end": 1521.52, "word": " exists", "probability": 0.7783203125}, {"start": 1521.52, "end": 1522.52, "word": " strong", "probability": 0.5087890625}, {"start": 1522.52, "end": 1523.06, "word": " relationship", "probability": 0.9208984375}, {"start": 1523.06, "end": 1523.44, "word": " between", "probability": 0.87451171875}, {"start": 1523.44, "end": 1523.66, "word": " X", "probability": 0.5166015625}, {"start": 1523.66, "end": 1523.8, "word": " and", "probability": 0.94189453125}, {"start": 1523.8, "end": 1523.96, "word": " Y,", "probability": 0.99267578125}, {"start": 1524.02, "end": 1524.12, "word": " and", "probability": 0.92724609375}, {"start": 1524.12, "end": 1524.3, "word": " this", "probability": 0.931640625}, {"start": 1524.3, "end": 1524.74, "word": " relationship", "probability": 0.9111328125}, {"start": 1524.74, "end": 1524.98, "word": " is", "probability": 0.9443359375}, {"start": 1524.98, "end": 1525.22, "word": " positive.", "probability": 0.7763671875}, {"start": 1526.36, "end": 1526.68, "word": " So", "probability": 0.95361328125}, {"start": 1526.68, "end": 1527.06, "word": " again,", "probability": 0.85888671875}, {"start": 1527.12, "end": 1527.26, "word": " by", "probability": 0.966796875}, {"start": 1527.26, "end": 1527.56, "word": " using", "probability": 0.9326171875}, {"start": 1527.56, "end": 1527.9, "word": " the", "probability": 0.9150390625}, {"start": 1527.9, "end": 1528.4, "word": " sign,", "probability": 0.81787109375}, {"start": 1528.5, "end": 1528.56, "word": " you", "probability": 0.94677734375}, {"start": 1528.56, "end": 1528.76, "word": " can", "probability": 0.943359375}, {"start": 1528.76, "end": 1529.08, "word": " tell", "probability": 0.88427734375}, {"start": 1529.08, "end": 1529.96, "word": " the", "probability": 0.82763671875}, {"start": 1529.96, "end": 1530.8, "word": " direction.", "probability": 0.92919921875}], "temperature": 1.0}, {"id": 58, "seek": 156193, "start": 1532.23, "end": 1561.93, "text": " The absolute value can tell the strength of the relationship between X and Y. So there are five features of R, unity-free. Ranges between minus one and plus one. Closer to minus one, it means stronger negative. Closer to plus one, stronger positive. Close to zero, it means there is no relationship. Or the weaker, the relationship between X and Y.", "tokens": [440, 8236, 2158, 393, 980, 264, 3800, 295, 264, 2480, 1296, 1783, 293, 398, 13, 407, 456, 366, 1732, 4122, 295, 497, 11, 18205, 12, 10792, 13, 497, 10350, 1296, 3175, 472, 293, 1804, 472, 13, 2033, 22150, 281, 3175, 472, 11, 309, 1355, 7249, 3671, 13, 2033, 22150, 281, 1804, 472, 11, 7249, 3353, 13, 16346, 281, 4018, 11, 309, 1355, 456, 307, 572, 2480, 13, 1610, 264, 24286, 11, 264, 2480, 1296, 1783, 293, 398, 13], "avg_logprob": -0.19422468392154837, "compression_ratio": 1.8368421052631578, "no_speech_prob": 0.0, "words": [{"start": 1532.23, "end": 1532.53, "word": " The", "probability": 0.36328125}, {"start": 1532.53, "end": 1532.97, "word": " absolute", "probability": 0.86376953125}, {"start": 1532.97, "end": 1533.33, "word": " value", "probability": 0.95458984375}, {"start": 1533.33, "end": 1533.63, "word": " can", "probability": 0.84130859375}, {"start": 1533.63, "end": 1533.95, "word": " tell", "probability": 0.8505859375}, {"start": 1533.95, "end": 1534.67, "word": " the", "probability": 0.8271484375}, {"start": 1534.67, "end": 1535.09, "word": " strength", "probability": 0.86279296875}, {"start": 1535.09, "end": 1535.57, "word": " of", "probability": 0.95068359375}, {"start": 1535.57, "end": 1535.91, "word": " the", "probability": 0.9033203125}, {"start": 1535.91, "end": 1537.17, "word": " relationship", "probability": 0.89111328125}, {"start": 1537.17, "end": 1537.53, "word": " between", "probability": 0.89111328125}, {"start": 1537.53, "end": 1537.73, "word": " X", "probability": 0.49951171875}, {"start": 1537.73, "end": 1537.87, "word": " and", "probability": 0.93017578125}, {"start": 1537.87, "end": 1538.09, "word": " Y.", "probability": 0.986328125}, {"start": 1538.73, "end": 1539.13, "word": " So", "probability": 0.90087890625}, {"start": 1539.13, "end": 1539.29, "word": " there", "probability": 0.62939453125}, {"start": 1539.29, "end": 1539.47, "word": " are", "probability": 0.93994140625}, {"start": 1539.47, "end": 1539.87, "word": " five", "probability": 0.755859375}, {"start": 1539.87, "end": 1540.49, "word": " features", "probability": 0.76953125}, {"start": 1540.49, "end": 1540.93, "word": " of", "probability": 0.9658203125}, {"start": 1540.93, "end": 1541.23, "word": " R,", "probability": 0.95556640625}, {"start": 1541.49, "end": 1541.97, "word": " unity", "probability": 0.90673828125}, {"start": 1541.97, "end": 1542.27, "word": "-free.", "probability": 0.742919921875}, {"start": 1543.15, "end": 1543.59, "word": " Ranges", "probability": 0.73779296875}, {"start": 1543.59, "end": 1543.91, "word": " between", "probability": 0.849609375}, {"start": 1543.91, "end": 1544.15, "word": " minus", "probability": 0.65966796875}, {"start": 1544.15, "end": 1544.33, "word": " one", "probability": 0.7021484375}, {"start": 1544.33, "end": 1544.45, "word": " and", "probability": 0.88916015625}, {"start": 1544.45, "end": 1544.73, "word": " plus", "probability": 0.95849609375}, {"start": 1544.73, "end": 1545.05, "word": " one.", "probability": 0.92529296875}, {"start": 1546.07, "end": 1546.29, "word": " Closer", "probability": 0.913330078125}, {"start": 1546.29, "end": 1546.49, "word": " to", "probability": 0.96875}, {"start": 1546.49, "end": 1546.73, "word": " minus", "probability": 0.97705078125}, {"start": 1546.73, "end": 1547.09, "word": " one,", "probability": 0.921875}, {"start": 1547.35, "end": 1547.45, "word": " it", "probability": 0.6767578125}, {"start": 1547.45, "end": 1547.75, "word": " means", "probability": 0.93017578125}, {"start": 1547.75, "end": 1548.51, "word": " stronger", "probability": 0.85498046875}, {"start": 1548.51, "end": 1549.01, "word": " negative.", "probability": 0.44677734375}, {"start": 1549.95, "end": 1550.21, "word": " Closer", "probability": 0.928955078125}, {"start": 1550.21, "end": 1550.43, "word": " to", "probability": 0.966796875}, {"start": 1550.43, "end": 1550.69, "word": " plus", "probability": 0.94873046875}, {"start": 1550.69, "end": 1550.95, "word": " one,", "probability": 0.92578125}, {"start": 1551.17, "end": 1551.95, "word": " stronger", "probability": 0.8798828125}, {"start": 1551.95, "end": 1552.49, "word": " positive.", "probability": 0.93359375}, {"start": 1553.73, "end": 1554.15, "word": " Close", "probability": 0.6630859375}, {"start": 1554.15, "end": 1554.31, "word": " to", "probability": 0.96533203125}, {"start": 1554.31, "end": 1554.61, "word": " zero,", "probability": 0.89501953125}, {"start": 1554.83, "end": 1554.99, "word": " it", "probability": 0.92626953125}, {"start": 1554.99, "end": 1555.37, "word": " means", "probability": 0.92724609375}, {"start": 1555.37, "end": 1555.85, "word": " there", "probability": 0.87939453125}, {"start": 1555.85, "end": 1556.07, "word": " is", "probability": 0.939453125}, {"start": 1556.07, "end": 1556.41, "word": " no", "probability": 0.9482421875}, {"start": 1556.41, "end": 1557.31, "word": " relationship.", "probability": 0.896484375}, {"start": 1557.81, "end": 1558.05, "word": " Or", "probability": 0.94677734375}, {"start": 1558.05, "end": 1558.51, "word": " the", "probability": 0.58447265625}, {"start": 1558.51, "end": 1558.81, "word": " weaker,", "probability": 0.810546875}, {"start": 1559.87, "end": 1560.17, "word": " the", "probability": 0.9111328125}, {"start": 1560.17, "end": 1560.79, "word": " relationship", "probability": 0.912109375}, {"start": 1560.79, "end": 1561.23, "word": " between", "probability": 0.8837890625}, {"start": 1561.23, "end": 1561.63, "word": " X", "probability": 0.98583984375}, {"start": 1561.63, "end": 1561.75, "word": " and", "probability": 0.94482421875}, {"start": 1561.75, "end": 1561.93, "word": " Y.", "probability": 0.9833984375}], "temperature": 1.0}, {"id": 59, "seek": 159042, "start": 1564.2, "end": 1590.42, "text": " By using scatter plots, we can construct a scatter plot by plotting the Y values versus the X values. Y in the vertical axis and X in the horizontal axis. If you look carefully at graph number one and three,", "tokens": [3146, 1228, 34951, 28609, 11, 321, 393, 7690, 257, 34951, 7542, 538, 41178, 264, 398, 4190, 5717, 264, 1783, 4190, 13, 398, 294, 264, 9429, 10298, 293, 1783, 294, 264, 12750, 10298, 13, 759, 291, 574, 7500, 412, 4295, 1230, 472, 293, 1045, 11], "avg_logprob": -0.18836804893281725, "compression_ratio": 1.4647887323943662, "no_speech_prob": 0.0, "words": [{"start": 1564.2, "end": 1564.94, "word": " By", "probability": 0.65087890625}, {"start": 1564.94, "end": 1565.36, "word": " using", "probability": 0.9267578125}, {"start": 1565.36, "end": 1568.08, "word": " scatter", "probability": 0.94384765625}, {"start": 1568.08, "end": 1568.48, "word": " plots,", "probability": 0.904296875}, {"start": 1568.96, "end": 1573.24, "word": " we", "probability": 0.8486328125}, {"start": 1573.24, "end": 1573.52, "word": " can", "probability": 0.919921875}, {"start": 1573.52, "end": 1574.1, "word": " construct", "probability": 0.95849609375}, {"start": 1574.1, "end": 1574.32, "word": " a", "probability": 0.365478515625}, {"start": 1574.32, "end": 1574.6, "word": " scatter", "probability": 0.9697265625}, {"start": 1574.6, "end": 1574.88, "word": " plot", "probability": 0.94873046875}, {"start": 1574.88, "end": 1575.34, "word": " by", "probability": 0.90576171875}, {"start": 1575.34, "end": 1577.24, "word": " plotting", "probability": 0.9658203125}, {"start": 1577.24, "end": 1577.64, "word": " the", "probability": 0.880859375}, {"start": 1577.64, "end": 1578.16, "word": " Y", "probability": 0.60693359375}, {"start": 1578.16, "end": 1578.76, "word": " values", "probability": 0.7216796875}, {"start": 1578.76, "end": 1579.98, "word": " versus", "probability": 0.85888671875}, {"start": 1579.98, "end": 1580.36, "word": " the", "probability": 0.888671875}, {"start": 1580.36, "end": 1580.54, "word": " X", "probability": 0.98583984375}, {"start": 1580.54, "end": 1580.96, "word": " values.", "probability": 0.9453125}, {"start": 1582.58, "end": 1583.08, "word": " Y", "probability": 0.958984375}, {"start": 1583.08, "end": 1583.24, "word": " in", "probability": 0.861328125}, {"start": 1583.24, "end": 1583.36, "word": " the", "probability": 0.9169921875}, {"start": 1583.36, "end": 1583.7, "word": " vertical", "probability": 0.9130859375}, {"start": 1583.7, "end": 1584.06, "word": " axis", "probability": 0.9423828125}, {"start": 1584.06, "end": 1585.04, "word": " and", "probability": 0.69921875}, {"start": 1585.04, "end": 1585.26, "word": " X", "probability": 0.97900390625}, {"start": 1585.26, "end": 1585.44, "word": " in", "probability": 0.86962890625}, {"start": 1585.44, "end": 1585.58, "word": " the", "probability": 0.91455078125}, {"start": 1585.58, "end": 1585.98, "word": " horizontal", "probability": 0.8642578125}, {"start": 1585.98, "end": 1586.48, "word": " axis.", "probability": 0.94384765625}, {"start": 1587.92, "end": 1588.12, "word": " If", "probability": 0.94189453125}, {"start": 1588.12, "end": 1588.22, "word": " you", "probability": 0.49365234375}, {"start": 1588.22, "end": 1588.4, "word": " look", "probability": 0.966796875}, {"start": 1588.4, "end": 1588.74, "word": " carefully", "probability": 0.783203125}, {"start": 1588.74, "end": 1589.06, "word": " at", "probability": 0.94677734375}, {"start": 1589.06, "end": 1589.46, "word": " graph", "probability": 0.67529296875}, {"start": 1589.46, "end": 1589.7, "word": " number", "probability": 0.7998046875}, {"start": 1589.7, "end": 1589.94, "word": " one", "probability": 0.552734375}, {"start": 1589.94, "end": 1590.1, "word": " and", "probability": 0.8583984375}, {"start": 1590.1, "end": 1590.42, "word": " three,", "probability": 0.921875}], "temperature": 1.0}, {"id": 60, "seek": 161376, "start": 1592.94, "end": 1613.76, "text": " We see that all the points lie on the straight line, either this way or the other way. If all the points lie on the straight line, it means they exist perfectly.", "tokens": [492, 536, 300, 439, 264, 2793, 4544, 322, 264, 2997, 1622, 11, 2139, 341, 636, 420, 264, 661, 636, 13, 759, 439, 264, 2793, 4544, 322, 264, 2997, 1622, 11, 309, 1355, 436, 2514, 6239, 13], "avg_logprob": -0.16680743404336879, "compression_ratio": 1.653061224489796, "no_speech_prob": 0.0, "words": [{"start": 1592.94, "end": 1593.72, "word": " We", "probability": 0.451904296875}, {"start": 1593.72, "end": 1594.5, "word": " see", "probability": 0.869140625}, {"start": 1594.5, "end": 1594.94, "word": " that", "probability": 0.9375}, {"start": 1594.94, "end": 1596.54, "word": " all", "probability": 0.8798828125}, {"start": 1596.54, "end": 1596.68, "word": " the", "probability": 0.904296875}, {"start": 1596.68, "end": 1597.1, "word": " points", "probability": 0.9404296875}, {"start": 1597.1, "end": 1599.92, "word": " lie", "probability": 0.73388671875}, {"start": 1599.92, "end": 1601.38, "word": " on", "probability": 0.94140625}, {"start": 1601.38, "end": 1601.68, "word": " the", "probability": 0.865234375}, {"start": 1601.68, "end": 1602.18, "word": " straight", "probability": 0.91064453125}, {"start": 1602.18, "end": 1602.54, "word": " line,", "probability": 0.916015625}, {"start": 1604.06, "end": 1604.56, "word": " either", "probability": 0.9287109375}, {"start": 1604.56, "end": 1605.14, "word": " this", "probability": 0.95166015625}, {"start": 1605.14, "end": 1605.46, "word": " way", "probability": 0.9599609375}, {"start": 1605.46, "end": 1606.02, "word": " or", "probability": 0.90869140625}, {"start": 1606.02, "end": 1606.18, "word": " the", "probability": 0.91552734375}, {"start": 1606.18, "end": 1606.42, "word": " other", "probability": 0.89013671875}, {"start": 1606.42, "end": 1606.74, "word": " way.", "probability": 0.9228515625}, {"start": 1607.96, "end": 1608.46, "word": " If", "probability": 0.96240234375}, {"start": 1608.46, "end": 1608.76, "word": " all", "probability": 0.95263671875}, {"start": 1608.76, "end": 1608.88, "word": " the", "probability": 0.91064453125}, {"start": 1608.88, "end": 1609.2, "word": " points", "probability": 0.9169921875}, {"start": 1609.2, "end": 1609.52, "word": " lie", "probability": 0.89697265625}, {"start": 1609.52, "end": 1609.66, "word": " on", "probability": 0.89990234375}, {"start": 1609.66, "end": 1609.8, "word": " the", "probability": 0.76904296875}, {"start": 1609.8, "end": 1610.1, "word": " straight", "probability": 0.9140625}, {"start": 1610.1, "end": 1610.46, "word": " line,", "probability": 0.861328125}, {"start": 1611.26, "end": 1611.66, "word": " it", "probability": 0.9404296875}, {"start": 1611.66, "end": 1612.04, "word": " means", "probability": 0.93115234375}, {"start": 1612.04, "end": 1612.32, "word": " they", "probability": 0.4609375}, {"start": 1612.32, "end": 1612.86, "word": " exist", "probability": 0.92041015625}, {"start": 1612.86, "end": 1613.76, "word": " perfectly.", "probability": 0.84033203125}], "temperature": 1.0}, {"id": 61, "seek": 164425, "start": 1615.27, "end": 1644.25, "text": " not even strong it's perfect relationship either negative or positive so this one perfect negative negative because x increases y goes down decline so if x is for example five maybe y is supposed to twenty if x increased to seven maybe y is fifteen", "tokens": [406, 754, 2068, 309, 311, 2176, 2480, 2139, 3671, 420, 3353, 370, 341, 472, 2176, 3671, 3671, 570, 2031, 8637, 288, 1709, 760, 15635, 370, 498, 2031, 307, 337, 1365, 1732, 1310, 288, 307, 3442, 281, 7699, 498, 2031, 6505, 281, 3407, 1310, 288, 307, 18126], "avg_logprob": -0.24999999873181608, "compression_ratio": 1.7054794520547945, "no_speech_prob": 0.0, "words": [{"start": 1615.27, "end": 1615.53, "word": " not", "probability": 0.2103271484375}, {"start": 1615.53, "end": 1615.83, "word": " even", "probability": 0.86328125}, {"start": 1615.83, "end": 1616.31, "word": " strong", "probability": 0.83642578125}, {"start": 1616.31, "end": 1616.57, "word": " it's", "probability": 0.609375}, {"start": 1616.57, "end": 1616.97, "word": " perfect", "probability": 0.89453125}, {"start": 1616.97, "end": 1619.21, "word": " relationship", "probability": 0.466552734375}, {"start": 1619.21, "end": 1619.61, "word": " either", "probability": 0.8671875}, {"start": 1619.61, "end": 1620.31, "word": " negative", "probability": 0.9423828125}, {"start": 1620.31, "end": 1621.45, "word": " or", "probability": 0.9501953125}, {"start": 1621.45, "end": 1621.89, "word": " positive", "probability": 0.92919921875}, {"start": 1621.89, "end": 1622.49, "word": " so", "probability": 0.7216796875}, {"start": 1622.49, "end": 1622.71, "word": " this", "probability": 0.9404296875}, {"start": 1622.71, "end": 1622.91, "word": " one", "probability": 0.9248046875}, {"start": 1622.91, "end": 1623.25, "word": " perfect", "probability": 0.78173828125}, {"start": 1623.25, "end": 1623.83, "word": " negative", "probability": 0.9072265625}, {"start": 1623.83, "end": 1627.53, "word": " negative", "probability": 0.61572265625}, {"start": 1627.53, "end": 1629.59, "word": " because", "probability": 0.68798828125}, {"start": 1629.59, "end": 1630.05, "word": " x", "probability": 0.85107421875}, {"start": 1630.05, "end": 1630.97, "word": " increases", "probability": 0.91650390625}, {"start": 1630.97, "end": 1631.81, "word": " y", "probability": 0.7998046875}, {"start": 1631.81, "end": 1632.07, "word": " goes", "probability": 0.9375}, {"start": 1632.07, "end": 1632.41, "word": " down", "probability": 0.8544921875}, {"start": 1632.41, "end": 1632.81, "word": " decline", "probability": 0.5810546875}, {"start": 1632.81, "end": 1633.49, "word": " so", "probability": 0.884765625}, {"start": 1633.49, "end": 1633.71, "word": " if", "probability": 0.9501953125}, {"start": 1633.71, "end": 1633.93, "word": " x", "probability": 0.98681640625}, {"start": 1633.93, "end": 1634.09, "word": " is", "probability": 0.919921875}, {"start": 1634.09, "end": 1634.43, "word": " for", "probability": 0.94482421875}, {"start": 1634.43, "end": 1634.73, "word": " example", "probability": 0.97802734375}, {"start": 1634.73, "end": 1635.15, "word": " five", "probability": 0.56787109375}, {"start": 1635.15, "end": 1637.43, "word": " maybe", "probability": 0.939453125}, {"start": 1637.43, "end": 1637.67, "word": " y", "probability": 0.92529296875}, {"start": 1637.67, "end": 1637.79, "word": " is", "probability": 0.849609375}, {"start": 1637.79, "end": 1638.09, "word": " supposed", "probability": 0.845703125}, {"start": 1638.09, "end": 1638.23, "word": " to", "probability": 0.5869140625}, {"start": 1638.23, "end": 1638.55, "word": " twenty", "probability": 0.440673828125}, {"start": 1638.55, "end": 1639.59, "word": " if", "probability": 0.58056640625}, {"start": 1639.59, "end": 1639.89, "word": " x", "probability": 0.98046875}, {"start": 1639.89, "end": 1640.79, "word": " increased", "probability": 0.751953125}, {"start": 1640.79, "end": 1640.97, "word": " to", "probability": 0.64990234375}, {"start": 1640.97, "end": 1641.25, "word": " seven", "probability": 0.90576171875}, {"start": 1641.25, "end": 1642.65, "word": " maybe", "probability": 0.921875}, {"start": 1642.65, "end": 1643.03, "word": " y", "probability": 0.98779296875}, {"start": 1643.03, "end": 1643.81, "word": " is", "probability": 0.95068359375}, {"start": 1643.81, "end": 1644.25, "word": " fifteen", "probability": 0.79931640625}], "temperature": 1.0}, {"id": 62, "seek": 167303, "start": 1644.69, "end": 1673.03, "text": " So if X increases, in this case, Y declines or decreases, it means there exists negative relationship. On the other hand, the left corner here, positive relationship, because X increases, Y also goes up. And perfect, because all the points lie on the straight line. So it's perfect, positive, perfect, negative relationship.", "tokens": [407, 498, 1783, 8637, 11, 294, 341, 1389, 11, 398, 7488, 1652, 420, 24108, 11, 309, 1355, 456, 8198, 3671, 2480, 13, 1282, 264, 661, 1011, 11, 264, 1411, 4538, 510, 11, 3353, 2480, 11, 570, 1783, 8637, 11, 398, 611, 1709, 493, 13, 400, 2176, 11, 570, 439, 264, 2793, 4544, 322, 264, 2997, 1622, 13, 407, 309, 311, 2176, 11, 3353, 11, 2176, 11, 3671, 2480, 13], "avg_logprob": -0.15658482632466725, "compression_ratio": 1.7955801104972375, "no_speech_prob": 0.0, "words": [{"start": 1644.69, "end": 1644.97, "word": " So", "probability": 0.84619140625}, {"start": 1644.97, "end": 1645.21, "word": " if", "probability": 0.82080078125}, {"start": 1645.21, "end": 1645.51, "word": " X", "probability": 0.59521484375}, {"start": 1645.51, "end": 1646.03, "word": " increases,", "probability": 0.90625}, {"start": 1646.69, "end": 1647.01, "word": " in", "probability": 0.91357421875}, {"start": 1647.01, "end": 1647.19, "word": " this", "probability": 0.947265625}, {"start": 1647.19, "end": 1647.55, "word": " case,", "probability": 0.90478515625}, {"start": 1647.97, "end": 1648.27, "word": " Y", "probability": 0.8837890625}, {"start": 1648.27, "end": 1648.77, "word": " declines", "probability": 0.7890625}, {"start": 1648.77, "end": 1648.93, "word": " or", "probability": 0.85986328125}, {"start": 1648.93, "end": 1649.29, "word": " decreases,", "probability": 0.95849609375}, {"start": 1649.85, "end": 1650.07, "word": " it", "probability": 0.90283203125}, {"start": 1650.07, "end": 1650.41, "word": " means", "probability": 0.9306640625}, {"start": 1650.41, "end": 1650.65, "word": " there", "probability": 0.86279296875}, {"start": 1650.65, "end": 1651.01, "word": " exists", "probability": 0.8359375}, {"start": 1651.01, "end": 1651.41, "word": " negative", "probability": 0.82275390625}, {"start": 1651.41, "end": 1651.91, "word": " relationship.", "probability": 0.88720703125}, {"start": 1654.07, "end": 1654.29, "word": " On", "probability": 0.9384765625}, {"start": 1654.29, "end": 1654.41, "word": " the", "probability": 0.91015625}, {"start": 1654.41, "end": 1654.57, "word": " other", "probability": 0.88623046875}, {"start": 1654.57, "end": 1654.97, "word": " hand,", "probability": 0.9150390625}, {"start": 1656.81, "end": 1657.09, "word": " the", "probability": 0.74169921875}, {"start": 1657.09, "end": 1657.39, "word": " left", "probability": 0.9365234375}, {"start": 1657.39, "end": 1658.27, "word": " corner", "probability": 0.9150390625}, {"start": 1658.27, "end": 1658.67, "word": " here,", "probability": 0.82177734375}, {"start": 1659.55, "end": 1660.97, "word": " positive", "probability": 0.818359375}, {"start": 1660.97, "end": 1661.65, "word": " relationship,", "probability": 0.92578125}, {"start": 1661.75, "end": 1662.17, "word": " because", "probability": 0.9169921875}, {"start": 1662.17, "end": 1662.65, "word": " X", "probability": 0.8916015625}, {"start": 1662.65, "end": 1663.15, "word": " increases,", "probability": 0.9443359375}, {"start": 1663.49, "end": 1663.75, "word": " Y", "probability": 0.9931640625}, {"start": 1663.75, "end": 1664.11, "word": " also", "probability": 0.8837890625}, {"start": 1664.11, "end": 1664.43, "word": " goes", "probability": 0.93896484375}, {"start": 1664.43, "end": 1664.71, "word": " up.", "probability": 0.96875}, {"start": 1665.97, "end": 1666.61, "word": " And", "probability": 0.93115234375}, {"start": 1666.61, "end": 1666.95, "word": " perfect,", "probability": 0.86474609375}, {"start": 1667.11, "end": 1667.41, "word": " because", "probability": 0.888671875}, {"start": 1667.41, "end": 1667.69, "word": " all", "probability": 0.94677734375}, {"start": 1667.69, "end": 1667.81, "word": " the", "probability": 0.892578125}, {"start": 1667.81, "end": 1668.07, "word": " points", "probability": 0.9306640625}, {"start": 1668.07, "end": 1668.39, "word": " lie", "probability": 0.896484375}, {"start": 1668.39, "end": 1668.69, "word": " on", "probability": 0.94384765625}, {"start": 1668.69, "end": 1668.99, "word": " the", "probability": 0.88427734375}, {"start": 1668.99, "end": 1669.57, "word": " straight", "probability": 0.87255859375}, {"start": 1669.57, "end": 1669.93, "word": " line.", "probability": 0.93701171875}, {"start": 1670.35, "end": 1670.49, "word": " So", "probability": 0.962890625}, {"start": 1670.49, "end": 1670.67, "word": " it's", "probability": 0.962646484375}, {"start": 1670.67, "end": 1671.13, "word": " perfect,", "probability": 0.85009765625}, {"start": 1671.27, "end": 1671.65, "word": " positive,", "probability": 0.93212890625}, {"start": 1671.85, "end": 1672.11, "word": " perfect,", "probability": 0.8798828125}, {"start": 1672.25, "end": 1672.55, "word": " negative", "probability": 0.9462890625}, {"start": 1672.55, "end": 1673.03, "word": " relationship.", "probability": 0.9150390625}], "temperature": 1.0}, {"id": 63, "seek": 170103, "start": 1675.11, "end": 1701.03, "text": " So it's straightforward to determine if it's perfect by using scatterplot. Also, by scatterplot, you can tell the direction of the relationship. For the second scatterplot, it seems to be that there exists negative relationship between X and Y. In this one, also there exists a relationship", "tokens": [407, 309, 311, 15325, 281, 6997, 498, 309, 311, 2176, 538, 1228, 34951, 564, 310, 13, 2743, 11, 538, 34951, 564, 310, 11, 291, 393, 980, 264, 3513, 295, 264, 2480, 13, 1171, 264, 1150, 34951, 564, 310, 11, 309, 2544, 281, 312, 300, 456, 8198, 3671, 2480, 1296, 1783, 293, 398, 13, 682, 341, 472, 11, 611, 456, 8198, 257, 2480], "avg_logprob": -0.16344245842524938, "compression_ratio": 1.6724137931034482, "no_speech_prob": 0.0, "words": [{"start": 1675.11, "end": 1675.47, "word": " So", "probability": 0.8583984375}, {"start": 1675.47, "end": 1675.81, "word": " it's", "probability": 0.845458984375}, {"start": 1675.81, "end": 1677.05, "word": " straightforward", "probability": 0.7041015625}, {"start": 1677.05, "end": 1677.35, "word": " to", "probability": 0.92333984375}, {"start": 1677.35, "end": 1677.59, "word": " determine", "probability": 0.89599609375}, {"start": 1677.59, "end": 1677.79, "word": " if", "probability": 0.86181640625}, {"start": 1677.79, "end": 1677.95, "word": " it's", "probability": 0.94677734375}, {"start": 1677.95, "end": 1678.39, "word": " perfect", "probability": 0.916015625}, {"start": 1678.39, "end": 1678.73, "word": " by", "probability": 0.9052734375}, {"start": 1678.73, "end": 1678.99, "word": " using", "probability": 0.93115234375}, {"start": 1678.99, "end": 1679.55, "word": " scatterplot.", "probability": 0.7744140625}, {"start": 1682.23, "end": 1682.87, "word": " Also,", "probability": 0.927734375}, {"start": 1682.99, "end": 1683.13, "word": " by", "probability": 0.96533203125}, {"start": 1683.13, "end": 1683.75, "word": " scatterplot,", "probability": 0.9796549479166666}, {"start": 1683.83, "end": 1683.95, "word": " you", "probability": 0.9638671875}, {"start": 1683.95, "end": 1684.17, "word": " can", "probability": 0.94384765625}, {"start": 1684.17, "end": 1684.37, "word": " tell", "probability": 0.8955078125}, {"start": 1684.37, "end": 1684.55, "word": " the", "probability": 0.91455078125}, {"start": 1684.55, "end": 1684.95, "word": " direction", "probability": 0.97705078125}, {"start": 1684.95, "end": 1685.19, "word": " of", "probability": 0.96728515625}, {"start": 1685.19, "end": 1685.31, "word": " the", "probability": 0.89990234375}, {"start": 1685.31, "end": 1685.75, "word": " relationship.", "probability": 0.89013671875}, {"start": 1687.17, "end": 1687.81, "word": " For", "probability": 0.95556640625}, {"start": 1687.81, "end": 1687.99, "word": " the", "probability": 0.916015625}, {"start": 1687.99, "end": 1688.33, "word": " second", "probability": 0.90185546875}, {"start": 1688.33, "end": 1689.27, "word": " scatterplot,", "probability": 0.986328125}, {"start": 1689.63, "end": 1689.93, "word": " it", "probability": 0.939453125}, {"start": 1689.93, "end": 1690.27, "word": " seems", "probability": 0.8251953125}, {"start": 1690.27, "end": 1690.47, "word": " to", "probability": 0.9501953125}, {"start": 1690.47, "end": 1690.63, "word": " be", "probability": 0.833984375}, {"start": 1690.63, "end": 1690.87, "word": " that", "probability": 0.93359375}, {"start": 1690.87, "end": 1691.11, "word": " there", "probability": 0.9072265625}, {"start": 1691.11, "end": 1691.51, "word": " exists", "probability": 0.86669921875}, {"start": 1691.51, "end": 1692.27, "word": " negative", "probability": 0.5830078125}, {"start": 1692.27, "end": 1692.89, "word": " relationship", "probability": 0.90478515625}, {"start": 1692.89, "end": 1693.17, "word": " between", "probability": 0.88671875}, {"start": 1693.17, "end": 1693.35, "word": " X", "probability": 0.5625}, {"start": 1693.35, "end": 1693.47, "word": " and", "probability": 0.94921875}, {"start": 1693.47, "end": 1693.73, "word": " Y.", "probability": 0.99853515625}, {"start": 1696.85, "end": 1697.49, "word": " In", "probability": 0.53466796875}, {"start": 1697.49, "end": 1697.71, "word": " this", "probability": 0.9501953125}, {"start": 1697.71, "end": 1697.99, "word": " one,", "probability": 0.93408203125}, {"start": 1698.59, "end": 1699.09, "word": " also", "probability": 0.85888671875}, {"start": 1699.09, "end": 1699.29, "word": " there", "probability": 0.8193359375}, {"start": 1699.29, "end": 1699.75, "word": " exists", "probability": 0.90185546875}, {"start": 1699.75, "end": 1700.53, "word": " a", "probability": 0.9794921875}, {"start": 1700.53, "end": 1701.03, "word": " relationship", "probability": 0.88720703125}], "temperature": 1.0}, {"id": 64, "seek": 172997, "start": 1704.73, "end": 1729.97, "text": " positive which one is strong more strong this one is stronger because the points are close to the straight line much more than the other scatter plot so you can say there exists negative relationship but that one is stronger than the other one this one is positive but the points are scattered", "tokens": [3353, 597, 472, 307, 2068, 544, 2068, 341, 472, 307, 7249, 570, 264, 2793, 366, 1998, 281, 264, 2997, 1622, 709, 544, 813, 264, 661, 34951, 7542, 370, 291, 393, 584, 456, 8198, 3671, 2480, 457, 300, 472, 307, 7249, 813, 264, 661, 472, 341, 472, 307, 3353, 457, 264, 2793, 366, 21986], "avg_logprob": -0.20601851244767508, "compression_ratio": 1.96, "no_speech_prob": 0.0, "words": [{"start": 1704.7299999999998, "end": 1705.4099999999999, "word": " positive", "probability": 0.1944580078125}, {"start": 1705.4099999999999, "end": 1706.09, "word": " which", "probability": 0.3359375}, {"start": 1706.09, "end": 1706.41, "word": " one", "probability": 0.9326171875}, {"start": 1706.41, "end": 1706.97, "word": " is", "probability": 0.9326171875}, {"start": 1706.97, "end": 1707.53, "word": " strong", "probability": 0.70263671875}, {"start": 1707.53, "end": 1708.67, "word": " more", "probability": 0.677734375}, {"start": 1708.67, "end": 1709.09, "word": " strong", "probability": 0.65283203125}, {"start": 1709.09, "end": 1712.17, "word": " this", "probability": 0.67919921875}, {"start": 1712.17, "end": 1712.33, "word": " one", "probability": 0.92919921875}, {"start": 1712.33, "end": 1713.89, "word": " is", "probability": 0.919921875}, {"start": 1713.89, "end": 1714.27, "word": " stronger", "probability": 0.68603515625}, {"start": 1714.27, "end": 1715.11, "word": " because", "probability": 0.89111328125}, {"start": 1715.11, "end": 1715.41, "word": " the", "probability": 0.92529296875}, {"start": 1715.41, "end": 1715.75, "word": " points", "probability": 0.94091796875}, {"start": 1715.75, "end": 1716.11, "word": " are", "probability": 0.95068359375}, {"start": 1716.11, "end": 1716.83, "word": " close", "probability": 0.77099609375}, {"start": 1716.83, "end": 1717.11, "word": " to", "probability": 0.90771484375}, {"start": 1717.11, "end": 1717.25, "word": " the", "probability": 0.9091796875}, {"start": 1717.25, "end": 1717.53, "word": " straight", "probability": 0.91796875}, {"start": 1717.53, "end": 1717.93, "word": " line", "probability": 0.9326171875}, {"start": 1717.93, "end": 1718.67, "word": " much", "probability": 0.90478515625}, {"start": 1718.67, "end": 1718.99, "word": " more", "probability": 0.9375}, {"start": 1718.99, "end": 1719.23, "word": " than", "probability": 0.9462890625}, {"start": 1719.23, "end": 1719.41, "word": " the", "probability": 0.919921875}, {"start": 1719.41, "end": 1719.71, "word": " other", "probability": 0.8828125}, {"start": 1719.71, "end": 1720.71, "word": " scatter", "probability": 0.67724609375}, {"start": 1720.71, "end": 1720.99, "word": " plot", "probability": 0.361083984375}, {"start": 1720.99, "end": 1721.37, "word": " so", "probability": 0.75634765625}, {"start": 1721.37, "end": 1721.47, "word": " you", "probability": 0.943359375}, {"start": 1721.47, "end": 1721.63, "word": " can", "probability": 0.947265625}, {"start": 1721.63, "end": 1721.91, "word": " say", "probability": 0.75439453125}, {"start": 1721.91, "end": 1722.35, "word": " there", "probability": 0.9013671875}, {"start": 1722.35, "end": 1722.73, "word": " exists", "probability": 0.83984375}, {"start": 1722.73, "end": 1723.41, "word": " negative", "probability": 0.5625}, {"start": 1723.41, "end": 1724.03, "word": " relationship", "probability": 0.94775390625}, {"start": 1724.03, "end": 1724.41, "word": " but", "probability": 0.73193359375}, {"start": 1724.41, "end": 1724.63, "word": " that", "probability": 0.94140625}, {"start": 1724.63, "end": 1724.81, "word": " one", "probability": 0.93115234375}, {"start": 1724.81, "end": 1724.95, "word": " is", "probability": 0.94873046875}, {"start": 1724.95, "end": 1725.29, "word": " stronger", "probability": 0.849609375}, {"start": 1725.29, "end": 1725.65, "word": " than", "probability": 0.93505859375}, {"start": 1725.65, "end": 1725.81, "word": " the", "probability": 0.92138671875}, {"start": 1725.81, "end": 1725.99, "word": " other", "probability": 0.88916015625}, {"start": 1725.99, "end": 1726.29, "word": " one", "probability": 0.84326171875}, {"start": 1726.29, "end": 1726.93, "word": " this", "probability": 0.9052734375}, {"start": 1726.93, "end": 1727.17, "word": " one", "probability": 0.92529296875}, {"start": 1727.17, "end": 1727.39, "word": " is", "probability": 0.947265625}, {"start": 1727.39, "end": 1727.83, "word": " positive", "probability": 0.935546875}, {"start": 1727.83, "end": 1728.97, "word": " but", "probability": 0.91455078125}, {"start": 1728.97, "end": 1729.13, "word": " the", "probability": 0.9189453125}, {"start": 1729.13, "end": 1729.35, "word": " points", "probability": 0.9462890625}, {"start": 1729.35, "end": 1729.55, "word": " are", "probability": 0.9462890625}, {"start": 1729.55, "end": 1729.97, "word": " scattered", "probability": 0.83251953125}], "temperature": 1.0}, {"id": 65, "seek": 175618, "start": 1731.74, "end": 1756.18, "text": " around the straight line so you can tell the direction and sometimes sometimes not all the time you can tell the strength sometimes it's very clear that the relation is strong if the points are very close straight line that means the relation is strong now the other one the last one here", "tokens": [926, 264, 2997, 1622, 370, 291, 393, 980, 264, 3513, 293, 2171, 2171, 406, 439, 264, 565, 291, 393, 980, 264, 3800, 2171, 309, 311, 588, 1850, 300, 264, 9721, 307, 2068, 498, 264, 2793, 366, 588, 1998, 2997, 1622, 300, 1355, 264, 9721, 307, 2068, 586, 264, 661, 472, 264, 1036, 472, 510], "avg_logprob": -0.16008523106575012, "compression_ratio": 2.0069444444444446, "no_speech_prob": 0.0, "words": [{"start": 1731.7399999999998, "end": 1732.3799999999999, "word": " around", "probability": 0.6796875}, {"start": 1732.3799999999999, "end": 1733.02, "word": " the", "probability": 0.7548828125}, {"start": 1733.02, "end": 1733.32, "word": " straight", "probability": 0.7060546875}, {"start": 1733.32, "end": 1733.66, "word": " line", "probability": 0.92431640625}, {"start": 1733.66, "end": 1734.62, "word": " so", "probability": 0.27587890625}, {"start": 1734.62, "end": 1734.8, "word": " you", "probability": 0.9296875}, {"start": 1734.8, "end": 1735.06, "word": " can", "probability": 0.94580078125}, {"start": 1735.06, "end": 1735.4, "word": " tell", "probability": 0.88916015625}, {"start": 1735.4, "end": 1736.1, "word": " the", "probability": 0.89697265625}, {"start": 1736.1, "end": 1736.58, "word": " direction", "probability": 0.9658203125}, {"start": 1736.58, "end": 1738.08, "word": " and", "probability": 0.74462890625}, {"start": 1738.08, "end": 1738.78, "word": " sometimes", "probability": 0.9384765625}, {"start": 1738.78, "end": 1739.48, "word": " sometimes", "probability": 0.46728515625}, {"start": 1739.48, "end": 1739.7, "word": " not", "probability": 0.779296875}, {"start": 1739.7, "end": 1739.88, "word": " all", "probability": 0.94384765625}, {"start": 1739.88, "end": 1740.0, "word": " the", "probability": 0.91552734375}, {"start": 1740.0, "end": 1740.2, "word": " time", "probability": 0.890625}, {"start": 1740.2, "end": 1740.32, "word": " you", "probability": 0.9404296875}, {"start": 1740.32, "end": 1740.58, "word": " can", "probability": 0.94482421875}, {"start": 1740.58, "end": 1740.9, "word": " tell", "probability": 0.8857421875}, {"start": 1740.9, "end": 1741.1, "word": " the", "probability": 0.91259765625}, {"start": 1741.1, "end": 1741.48, "word": " strength", "probability": 0.7236328125}, {"start": 1741.48, "end": 1743.24, "word": " sometimes", "probability": 0.7490234375}, {"start": 1743.24, "end": 1744.24, "word": " it's", "probability": 0.96923828125}, {"start": 1744.24, "end": 1744.64, "word": " very", "probability": 0.857421875}, {"start": 1744.64, "end": 1744.88, "word": " clear", "probability": 0.89404296875}, {"start": 1744.88, "end": 1745.24, "word": " that", "probability": 0.9345703125}, {"start": 1745.24, "end": 1745.78, "word": " the", "probability": 0.92236328125}, {"start": 1745.78, "end": 1746.1, "word": " relation", "probability": 0.95654296875}, {"start": 1746.1, "end": 1746.36, "word": " is", "probability": 0.9501953125}, {"start": 1746.36, "end": 1746.92, "word": " strong", "probability": 0.8828125}, {"start": 1746.92, "end": 1747.58, "word": " if", "probability": 0.9189453125}, {"start": 1747.58, "end": 1747.72, "word": " the", "probability": 0.9248046875}, {"start": 1747.72, "end": 1747.96, "word": " points", "probability": 0.92626953125}, {"start": 1747.96, "end": 1748.18, "word": " are", "probability": 0.9404296875}, {"start": 1748.18, "end": 1748.44, "word": " very", "probability": 0.84814453125}, {"start": 1748.44, "end": 1748.8, "word": " close", "probability": 0.857421875}, {"start": 1748.8, "end": 1749.26, "word": " straight", "probability": 0.544921875}, {"start": 1749.26, "end": 1749.54, "word": " line", "probability": 0.89990234375}, {"start": 1749.54, "end": 1749.74, "word": " that", "probability": 0.919921875}, {"start": 1749.74, "end": 1750.16, "word": " means", "probability": 0.9326171875}, {"start": 1750.16, "end": 1751.48, "word": " the", "probability": 0.912109375}, {"start": 1751.48, "end": 1751.82, "word": " relation", "probability": 0.96923828125}, {"start": 1751.82, "end": 1752.04, "word": " is", "probability": 0.94873046875}, {"start": 1752.04, "end": 1752.38, "word": " strong", "probability": 0.90380859375}, {"start": 1752.38, "end": 1754.34, "word": " now", "probability": 0.82568359375}, {"start": 1754.34, "end": 1754.74, "word": " the", "probability": 0.9091796875}, {"start": 1754.74, "end": 1755.0, "word": " other", "probability": 0.88916015625}, {"start": 1755.0, "end": 1755.24, "word": " one", "probability": 0.927734375}, {"start": 1755.24, "end": 1755.52, "word": " the", "probability": 0.85205078125}, {"start": 1755.52, "end": 1755.72, "word": " last", "probability": 0.8857421875}, {"start": 1755.72, "end": 1755.94, "word": " one", "probability": 0.9248046875}, {"start": 1755.94, "end": 1756.18, "word": " here", "probability": 0.84912109375}], "temperature": 1.0}, {"id": 66, "seek": 178537, "start": 1758.85, "end": 1785.37, "text": " As X increases, Y stays at the same value. For example, if Y is 20 and X is 1. X is 1, Y is 20. X increases to 2, for example. Y is still 20. So that means there is no relationship between X and Y. It's a constant. Y equals a constant value. Whatever X is,", "tokens": [1018, 1783, 8637, 11, 398, 10834, 412, 264, 912, 2158, 13, 1171, 1365, 11, 498, 398, 307, 945, 293, 1783, 307, 502, 13, 1783, 307, 502, 11, 398, 307, 945, 13, 1783, 8637, 281, 568, 11, 337, 1365, 13, 398, 307, 920, 945, 13, 407, 300, 1355, 456, 307, 572, 2480, 1296, 1783, 293, 398, 13, 467, 311, 257, 5754, 13, 398, 6915, 257, 5754, 2158, 13, 8541, 1783, 307, 11], "avg_logprob": -0.1729600721349319, "compression_ratio": 1.5575757575757576, "no_speech_prob": 0.0, "words": [{"start": 1758.85, "end": 1759.25, "word": " As", "probability": 0.7548828125}, {"start": 1759.25, "end": 1759.51, "word": " X", "probability": 0.449951171875}, {"start": 1759.51, "end": 1759.99, "word": " increases,", "probability": 0.94140625}, {"start": 1760.97, "end": 1761.31, "word": " Y", "probability": 0.97216796875}, {"start": 1761.31, "end": 1762.09, "word": " stays", "probability": 0.5546875}, {"start": 1762.09, "end": 1763.07, "word": " at", "probability": 0.8427734375}, {"start": 1763.07, "end": 1763.27, "word": " the", "probability": 0.9091796875}, {"start": 1763.27, "end": 1763.49, "word": " same", "probability": 0.90771484375}, {"start": 1763.49, "end": 1763.85, "word": " value.", "probability": 0.96875}, {"start": 1763.97, "end": 1764.09, "word": " For", "probability": 0.93896484375}, {"start": 1764.09, "end": 1764.47, "word": " example,", "probability": 0.96875}, {"start": 1765.13, "end": 1765.33, "word": " if", "probability": 0.93212890625}, {"start": 1765.33, "end": 1765.59, "word": " Y", "probability": 0.9541015625}, {"start": 1765.59, "end": 1765.73, "word": " is", "probability": 0.8994140625}, {"start": 1765.73, "end": 1766.07, "word": " 20", "probability": 0.7978515625}, {"start": 1766.07, "end": 1767.41, "word": " and", "probability": 0.703125}, {"start": 1767.41, "end": 1767.65, "word": " X", "probability": 0.98193359375}, {"start": 1767.65, "end": 1767.81, "word": " is", "probability": 0.94482421875}, {"start": 1767.81, "end": 1768.07, "word": " 1.", "probability": 0.8837890625}, {"start": 1768.57, "end": 1768.91, "word": " X", "probability": 0.91357421875}, {"start": 1768.91, "end": 1769.01, "word": " is", "probability": 0.458984375}, {"start": 1769.01, "end": 1769.13, "word": " 1,", "probability": 0.89794921875}, {"start": 1769.21, "end": 1769.29, "word": " Y", "probability": 0.923828125}, {"start": 1769.29, "end": 1769.45, "word": " is", "probability": 0.943359375}, {"start": 1769.45, "end": 1769.77, "word": " 20.", "probability": 0.93701171875}, {"start": 1770.39, "end": 1770.87, "word": " X", "probability": 0.8466796875}, {"start": 1770.87, "end": 1771.43, "word": " increases", "probability": 0.92333984375}, {"start": 1771.43, "end": 1771.67, "word": " to", "probability": 0.95361328125}, {"start": 1771.67, "end": 1771.87, "word": " 2,", "probability": 0.92041015625}, {"start": 1771.93, "end": 1772.05, "word": " for", "probability": 0.7529296875}, {"start": 1772.05, "end": 1772.43, "word": " example.", "probability": 0.96875}, {"start": 1772.87, "end": 1773.15, "word": " Y", "probability": 0.91357421875}, {"start": 1773.15, "end": 1773.37, "word": " is", "probability": 0.80859375}, {"start": 1773.37, "end": 1773.53, "word": " still", "probability": 0.94677734375}, {"start": 1773.53, "end": 1773.87, "word": " 20.", "probability": 0.91259765625}, {"start": 1774.65, "end": 1775.19, "word": " So", "probability": 0.8349609375}, {"start": 1775.19, "end": 1775.43, "word": " that", "probability": 0.70458984375}, {"start": 1775.43, "end": 1775.63, "word": " means", "probability": 0.923828125}, {"start": 1775.63, "end": 1775.81, "word": " there", "probability": 0.84423828125}, {"start": 1775.81, "end": 1775.95, "word": " is", "probability": 0.8740234375}, {"start": 1775.95, "end": 1776.15, "word": " no", "probability": 0.94921875}, {"start": 1776.15, "end": 1776.63, "word": " relationship", "probability": 0.896484375}, {"start": 1776.63, "end": 1777.01, "word": " between", "probability": 0.86376953125}, {"start": 1777.01, "end": 1777.23, "word": " X", "probability": 0.83056640625}, {"start": 1777.23, "end": 1777.35, "word": " and", "probability": 0.92578125}, {"start": 1777.35, "end": 1777.51, "word": " Y.", "probability": 0.99072265625}, {"start": 1779.21, "end": 1779.75, "word": " It's", "probability": 0.893798828125}, {"start": 1779.75, "end": 1779.85, "word": " a", "probability": 0.94873046875}, {"start": 1779.85, "end": 1780.13, "word": " constant.", "probability": 0.94921875}, {"start": 1780.33, "end": 1780.51, "word": " Y", "probability": 0.9794921875}, {"start": 1780.51, "end": 1780.89, "word": " equals", "probability": 0.71142578125}, {"start": 1780.89, "end": 1781.07, "word": " a", "probability": 0.93505859375}, {"start": 1781.07, "end": 1781.35, "word": " constant", "probability": 0.9326171875}, {"start": 1781.35, "end": 1781.83, "word": " value.", "probability": 0.96484375}, {"start": 1782.69, "end": 1783.19, "word": " Whatever", "probability": 0.919921875}, {"start": 1783.19, "end": 1784.13, "word": " X", "probability": 0.97705078125}, {"start": 1784.13, "end": 1785.37, "word": " is,", "probability": 0.92822265625}], "temperature": 1.0}, {"id": 67, "seek": 181215, "start": 1786.69, "end": 1812.15, "text": " Y will have constant value. So that means there is no relationship between X and Y. Let's see how can we compute the correlation between two variables. For example, suppose we have data for father's height and son's height.", "tokens": [398, 486, 362, 5754, 2158, 13, 407, 300, 1355, 456, 307, 572, 2480, 1296, 1783, 293, 398, 13, 961, 311, 536, 577, 393, 321, 14722, 264, 20009, 1296, 732, 9102, 13, 1171, 1365, 11, 7297, 321, 362, 1412, 337, 3086, 311, 6681, 293, 1872, 311, 6681, 13], "avg_logprob": -0.1234537772834301, "compression_ratio": 1.4545454545454546, "no_speech_prob": 0.0, "words": [{"start": 1786.69, "end": 1787.27, "word": " Y", "probability": 0.47802734375}, {"start": 1787.27, "end": 1787.93, "word": " will", "probability": 0.880859375}, {"start": 1787.93, "end": 1788.35, "word": " have", "probability": 0.9423828125}, {"start": 1788.35, "end": 1789.25, "word": " constant", "probability": 0.837890625}, {"start": 1789.25, "end": 1789.69, "word": " value.", "probability": 0.9736328125}, {"start": 1790.17, "end": 1790.27, "word": " So", "probability": 0.89892578125}, {"start": 1790.27, "end": 1790.49, "word": " that", "probability": 0.82373046875}, {"start": 1790.49, "end": 1790.77, "word": " means", "probability": 0.9306640625}, {"start": 1790.77, "end": 1790.97, "word": " there", "probability": 0.83447265625}, {"start": 1790.97, "end": 1791.21, "word": " is", "probability": 0.916015625}, {"start": 1791.21, "end": 1791.69, "word": " no", "probability": 0.935546875}, {"start": 1791.69, "end": 1792.17, "word": " relationship", "probability": 0.9130859375}, {"start": 1792.17, "end": 1792.71, "word": " between", "probability": 0.90283203125}, {"start": 1792.71, "end": 1794.33, "word": " X", "probability": 0.7333984375}, {"start": 1794.33, "end": 1794.53, "word": " and", "probability": 0.92919921875}, {"start": 1794.53, "end": 1794.79, "word": " Y.", "probability": 0.98828125}, {"start": 1796.49, "end": 1797.35, "word": " Let's", "probability": 0.943115234375}, {"start": 1797.35, "end": 1797.45, "word": " see", "probability": 0.91162109375}, {"start": 1797.45, "end": 1797.57, "word": " how", "probability": 0.9208984375}, {"start": 1797.57, "end": 1797.77, "word": " can", "probability": 0.7275390625}, {"start": 1797.77, "end": 1797.91, "word": " we", "probability": 0.9482421875}, {"start": 1797.91, "end": 1798.43, "word": " compute", "probability": 0.88525390625}, {"start": 1798.43, "end": 1801.15, "word": " the", "probability": 0.76904296875}, {"start": 1801.15, "end": 1801.85, "word": " correlation", "probability": 0.888671875}, {"start": 1801.85, "end": 1803.67, "word": " between", "probability": 0.87939453125}, {"start": 1803.67, "end": 1803.99, "word": " two", "probability": 0.90576171875}, {"start": 1803.99, "end": 1804.43, "word": " variables.", "probability": 0.94482421875}, {"start": 1804.79, "end": 1804.95, "word": " For", "probability": 0.95654296875}, {"start": 1804.95, "end": 1805.39, "word": " example,", "probability": 0.9736328125}, {"start": 1806.97, "end": 1807.35, "word": " suppose", "probability": 0.88720703125}, {"start": 1807.35, "end": 1807.53, "word": " we", "probability": 0.943359375}, {"start": 1807.53, "end": 1807.69, "word": " have", "probability": 0.94091796875}, {"start": 1807.69, "end": 1808.07, "word": " data", "probability": 0.9384765625}, {"start": 1808.07, "end": 1809.07, "word": " for", "probability": 0.9404296875}, {"start": 1809.07, "end": 1809.89, "word": " father's", "probability": 0.828369140625}, {"start": 1809.89, "end": 1810.29, "word": " height", "probability": 0.978515625}, {"start": 1810.29, "end": 1811.41, "word": " and", "probability": 0.92041015625}, {"start": 1811.41, "end": 1811.87, "word": " son's", "probability": 0.949462890625}, {"start": 1811.87, "end": 1812.15, "word": " height.", "probability": 0.95556640625}], "temperature": 1.0}, {"id": 68, "seek": 183923, "start": 1813.37, "end": 1839.23, "text": " And we are interested to see if father's height affects his son's height. So we have data for 10 observations here. Father number one, his height is 64 inches. And you know that inch equals 2.5.", "tokens": [400, 321, 366, 3102, 281, 536, 498, 3086, 311, 6681, 11807, 702, 1872, 311, 6681, 13, 407, 321, 362, 1412, 337, 1266, 18163, 510, 13, 7085, 1230, 472, 11, 702, 6681, 307, 12145, 8478, 13, 400, 291, 458, 300, 7227, 6915, 568, 13, 20, 13], "avg_logprob": -0.17663043154322583, "compression_ratio": 1.3541666666666667, "no_speech_prob": 0.0, "words": [{"start": 1813.37, "end": 1813.69, "word": " And", "probability": 0.74560546875}, {"start": 1813.69, "end": 1813.81, "word": " we", "probability": 0.8984375}, {"start": 1813.81, "end": 1813.93, "word": " are", "probability": 0.900390625}, {"start": 1813.93, "end": 1814.39, "word": " interested", "probability": 0.87255859375}, {"start": 1814.39, "end": 1814.93, "word": " to", "probability": 0.9296875}, {"start": 1814.93, "end": 1815.19, "word": " see", "probability": 0.92236328125}, {"start": 1815.19, "end": 1815.51, "word": " if", "probability": 0.947265625}, {"start": 1815.51, "end": 1816.11, "word": " father's", "probability": 0.817626953125}, {"start": 1816.11, "end": 1816.51, "word": " height", "probability": 0.96337890625}, {"start": 1816.51, "end": 1817.61, "word": " affects", "probability": 0.68798828125}, {"start": 1817.61, "end": 1818.01, "word": " his", "probability": 0.9599609375}, {"start": 1818.01, "end": 1818.39, "word": " son's", "probability": 0.915771484375}, {"start": 1818.39, "end": 1818.65, "word": " height.", "probability": 0.94384765625}, {"start": 1819.63, "end": 1820.03, "word": " So", "probability": 0.8623046875}, {"start": 1820.03, "end": 1820.13, "word": " we", "probability": 0.7255859375}, {"start": 1820.13, "end": 1820.25, "word": " have", "probability": 0.947265625}, {"start": 1820.25, "end": 1820.59, "word": " data", "probability": 0.9423828125}, {"start": 1820.59, "end": 1821.11, "word": " for", "probability": 0.818359375}, {"start": 1821.11, "end": 1821.73, "word": " 10", "probability": 0.5224609375}, {"start": 1821.73, "end": 1823.85, "word": " observations", "probability": 0.7841796875}, {"start": 1823.85, "end": 1824.27, "word": " here.", "probability": 0.8427734375}, {"start": 1825.45, "end": 1825.81, "word": " Father", "probability": 0.81689453125}, {"start": 1825.81, "end": 1826.03, "word": " number", "probability": 0.7646484375}, {"start": 1826.03, "end": 1826.37, "word": " one,", "probability": 0.66162109375}, {"start": 1827.39, "end": 1827.87, "word": " his", "probability": 0.96435546875}, {"start": 1827.87, "end": 1828.61, "word": " height", "probability": 0.95556640625}, {"start": 1828.61, "end": 1830.21, "word": " is", "probability": 0.9375}, {"start": 1830.21, "end": 1830.89, "word": " 64", "probability": 0.953125}, {"start": 1830.89, "end": 1831.55, "word": " inches.", "probability": 0.87646484375}, {"start": 1831.75, "end": 1831.93, "word": " And", "probability": 0.95361328125}, {"start": 1831.93, "end": 1832.09, "word": " you", "probability": 0.87548828125}, {"start": 1832.09, "end": 1832.25, "word": " know", "probability": 0.88916015625}, {"start": 1832.25, "end": 1832.45, "word": " that", "probability": 0.92529296875}, {"start": 1832.45, "end": 1832.71, "word": " inch", "probability": 0.859375}, {"start": 1832.71, "end": 1833.29, "word": " equals", "probability": 0.9248046875}, {"start": 1833.29, "end": 1838.57, "word": " 2", "probability": 0.27001953125}, {"start": 1838.57, "end": 1839.23, "word": ".5.", "probability": 0.987548828125}], "temperature": 1.0}, {"id": 69, "seek": 186692, "start": 1843.52, "end": 1866.92, "text": " So X is 64, Sun's height is 65. X is 68, Sun's height is 67 and so on. Sometimes, if the dataset is small enough, as in this example, we have just 10 observations, you can tell the direction.", "tokens": [407, 1783, 307, 12145, 11, 6163, 311, 6681, 307, 11624, 13, 1783, 307, 23317, 11, 6163, 311, 6681, 307, 23879, 293, 370, 322, 13, 4803, 11, 498, 264, 28872, 307, 1359, 1547, 11, 382, 294, 341, 1365, 11, 321, 362, 445, 1266, 18163, 11, 291, 393, 980, 264, 3513, 13], "avg_logprob": -0.2345281927024617, "compression_ratio": 1.411764705882353, "no_speech_prob": 0.0, "words": [{"start": 1843.52, "end": 1844.18, "word": " So", "probability": 0.26318359375}, {"start": 1844.18, "end": 1844.48, "word": " X", "probability": 0.423583984375}, {"start": 1844.48, "end": 1844.62, "word": " is", "probability": 0.67431640625}, {"start": 1844.62, "end": 1845.16, "word": " 64,", "probability": 0.83154296875}, {"start": 1846.04, "end": 1846.48, "word": " Sun's", "probability": 0.85546875}, {"start": 1846.48, "end": 1846.64, "word": " height", "probability": 0.892578125}, {"start": 1846.64, "end": 1846.82, "word": " is", "probability": 0.8662109375}, {"start": 1846.82, "end": 1847.24, "word": " 65.", "probability": 0.97607421875}, {"start": 1849.38, "end": 1850.04, "word": " X", "probability": 0.93994140625}, {"start": 1850.04, "end": 1850.16, "word": " is", "probability": 0.4755859375}, {"start": 1850.16, "end": 1850.74, "word": " 68,", "probability": 0.98583984375}, {"start": 1852.32, "end": 1852.92, "word": " Sun's", "probability": 0.954833984375}, {"start": 1852.92, "end": 1853.12, "word": " height", "probability": 0.9462890625}, {"start": 1853.12, "end": 1853.34, "word": " is", "probability": 0.92724609375}, {"start": 1853.34, "end": 1853.8, "word": " 67", "probability": 0.95263671875}, {"start": 1853.8, "end": 1854.02, "word": " and", "probability": 0.6279296875}, {"start": 1854.02, "end": 1854.18, "word": " so", "probability": 0.94921875}, {"start": 1854.18, "end": 1854.4, "word": " on.", "probability": 0.94921875}, {"start": 1855.26, "end": 1855.92, "word": " Sometimes,", "probability": 0.8876953125}, {"start": 1856.94, "end": 1858.26, "word": " if", "probability": 0.9287109375}, {"start": 1858.26, "end": 1858.46, "word": " the", "probability": 0.91064453125}, {"start": 1858.46, "end": 1858.82, "word": " dataset", "probability": 0.57958984375}, {"start": 1858.82, "end": 1859.14, "word": " is", "probability": 0.95068359375}, {"start": 1859.14, "end": 1859.46, "word": " small", "probability": 0.9365234375}, {"start": 1859.46, "end": 1859.9, "word": " enough,", "probability": 0.8515625}, {"start": 1861.22, "end": 1861.42, "word": " as", "probability": 0.83251953125}, {"start": 1861.42, "end": 1861.56, "word": " in", "probability": 0.8818359375}, {"start": 1861.56, "end": 1861.74, "word": " this", "probability": 0.94140625}, {"start": 1861.74, "end": 1862.08, "word": " example,", "probability": 0.96435546875}, {"start": 1862.16, "end": 1862.26, "word": " we", "probability": 0.7646484375}, {"start": 1862.26, "end": 1862.36, "word": " have", "probability": 0.9130859375}, {"start": 1862.36, "end": 1862.6, "word": " just", "probability": 0.87548828125}, {"start": 1862.6, "end": 1862.78, "word": " 10", "probability": 0.67236328125}, {"start": 1862.78, "end": 1863.28, "word": " observations,", "probability": 0.7958984375}, {"start": 1864.46, "end": 1864.7, "word": " you", "probability": 0.9306640625}, {"start": 1864.7, "end": 1864.94, "word": " can", "probability": 0.94677734375}, {"start": 1864.94, "end": 1865.26, "word": " tell", "probability": 0.8984375}, {"start": 1865.26, "end": 1866.36, "word": " the", "probability": 0.8564453125}, {"start": 1866.36, "end": 1866.92, "word": " direction.", "probability": 0.96728515625}], "temperature": 1.0}, {"id": 70, "seek": 189322, "start": 1868.4, "end": 1893.22, "text": " I mean, you can say, yes, for this specific example, there exists positive relationship between x and y. But if the data set is large, it's very hard to figure out if the relation is positive or negative. So we have to find or to compute the coefficient of correlation in order to see there exists positive, negative, strong, weak, or moderate.", "tokens": [286, 914, 11, 291, 393, 584, 11, 2086, 11, 337, 341, 2685, 1365, 11, 456, 8198, 3353, 2480, 1296, 2031, 293, 288, 13, 583, 498, 264, 1412, 992, 307, 2416, 11, 309, 311, 588, 1152, 281, 2573, 484, 498, 264, 9721, 307, 3353, 420, 3671, 13, 407, 321, 362, 281, 915, 420, 281, 14722, 264, 17619, 295, 20009, 294, 1668, 281, 536, 456, 8198, 3353, 11, 3671, 11, 2068, 11, 5336, 11, 420, 18174, 13], "avg_logprob": -0.13384046523194565, "compression_ratio": 1.6995073891625616, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 1868.4, "end": 1868.64, "word": " I", "probability": 0.81591796875}, {"start": 1868.64, "end": 1868.78, "word": " mean,", "probability": 0.9501953125}, {"start": 1868.88, "end": 1868.9, "word": " you", "probability": 0.90380859375}, {"start": 1868.9, "end": 1869.06, "word": " can", "probability": 0.943359375}, {"start": 1869.06, "end": 1869.44, "word": " say,", "probability": 0.9091796875}, {"start": 1869.64, "end": 1869.86, "word": " yes,", "probability": 0.83154296875}, {"start": 1870.34, "end": 1870.52, "word": " for", "probability": 0.91845703125}, {"start": 1870.52, "end": 1870.82, "word": " this", "probability": 0.94921875}, {"start": 1870.82, "end": 1871.48, "word": " specific", "probability": 0.908203125}, {"start": 1871.48, "end": 1872.06, "word": " example,", "probability": 0.9736328125}, {"start": 1872.58, "end": 1872.88, "word": " there", "probability": 0.80859375}, {"start": 1872.88, "end": 1873.36, "word": " exists", "probability": 0.740234375}, {"start": 1873.36, "end": 1873.92, "word": " positive", "probability": 0.82177734375}, {"start": 1873.92, "end": 1874.58, "word": " relationship", "probability": 0.90087890625}, {"start": 1874.58, "end": 1874.94, "word": " between", "probability": 0.87451171875}, {"start": 1874.94, "end": 1875.14, "word": " x", "probability": 0.587890625}, {"start": 1875.14, "end": 1875.28, "word": " and", "probability": 0.94873046875}, {"start": 1875.28, "end": 1875.52, "word": " y.", "probability": 0.9931640625}, {"start": 1876.74, "end": 1876.96, "word": " But", "probability": 0.9462890625}, {"start": 1876.96, "end": 1877.16, "word": " if", "probability": 0.91650390625}, {"start": 1877.16, "end": 1877.36, "word": " the", "probability": 0.91162109375}, {"start": 1877.36, "end": 1877.58, "word": " data", "probability": 0.59033203125}, {"start": 1877.58, "end": 1877.8, "word": " set", "probability": 0.9423828125}, {"start": 1877.8, "end": 1877.98, "word": " is", "probability": 0.947265625}, {"start": 1877.98, "end": 1878.52, "word": " large,", "probability": 0.96630859375}, {"start": 1879.06, "end": 1879.54, "word": " it's", "probability": 0.966552734375}, {"start": 1879.54, "end": 1879.78, "word": " very", "probability": 0.849609375}, {"start": 1879.78, "end": 1880.24, "word": " hard", "probability": 0.89208984375}, {"start": 1880.24, "end": 1880.82, "word": " to", "probability": 0.96337890625}, {"start": 1880.82, "end": 1881.0, "word": " figure", "probability": 0.97509765625}, {"start": 1881.0, "end": 1881.3, "word": " out", "probability": 0.87939453125}, {"start": 1881.3, "end": 1881.56, "word": " if", "probability": 0.943359375}, {"start": 1881.56, "end": 1881.66, "word": " the", "probability": 0.912109375}, {"start": 1881.66, "end": 1881.94, "word": " relation", "probability": 0.86083984375}, {"start": 1881.94, "end": 1882.08, "word": " is", "probability": 0.74658203125}, {"start": 1882.08, "end": 1882.38, "word": " positive", "probability": 0.93505859375}, {"start": 1882.38, "end": 1882.62, "word": " or", "probability": 0.96533203125}, {"start": 1882.62, "end": 1882.98, "word": " negative.", "probability": 0.9482421875}, {"start": 1883.4, "end": 1883.96, "word": " So", "probability": 0.96142578125}, {"start": 1883.96, "end": 1884.12, "word": " we", "probability": 0.8720703125}, {"start": 1884.12, "end": 1884.3, "word": " have", "probability": 0.9482421875}, {"start": 1884.3, "end": 1884.44, "word": " to", "probability": 0.9697265625}, {"start": 1884.44, "end": 1884.72, "word": " find", "probability": 0.9091796875}, {"start": 1884.72, "end": 1884.9, "word": " or", "probability": 0.6865234375}, {"start": 1884.9, "end": 1885.02, "word": " to", "probability": 0.76171875}, {"start": 1885.02, "end": 1885.4, "word": " compute", "probability": 0.90625}, {"start": 1885.4, "end": 1886.4, "word": " the", "probability": 0.8974609375}, {"start": 1886.4, "end": 1887.02, "word": " coefficient", "probability": 0.9384765625}, {"start": 1887.02, "end": 1887.4, "word": " of", "probability": 0.96142578125}, {"start": 1887.4, "end": 1887.86, "word": " correlation", "probability": 0.90234375}, {"start": 1887.86, "end": 1888.22, "word": " in", "probability": 0.869140625}, {"start": 1888.22, "end": 1888.4, "word": " order", "probability": 0.9111328125}, {"start": 1888.4, "end": 1888.66, "word": " to", "probability": 0.96728515625}, {"start": 1888.66, "end": 1888.92, "word": " see", "probability": 0.5771484375}, {"start": 1888.92, "end": 1889.7, "word": " there", "probability": 0.53173828125}, {"start": 1889.7, "end": 1890.1, "word": " exists", "probability": 0.78662109375}, {"start": 1890.1, "end": 1890.62, "word": " positive,", "probability": 0.92041015625}, {"start": 1890.92, "end": 1891.24, "word": " negative,", "probability": 0.9384765625}, {"start": 1892.04, "end": 1892.36, "word": " strong,", "probability": 0.892578125}, {"start": 1892.5, "end": 1892.74, "word": " weak,", "probability": 0.97998046875}, {"start": 1892.82, "end": 1892.94, "word": " or", "probability": 0.97021484375}, {"start": 1892.94, "end": 1893.22, "word": " moderate.", "probability": 0.9345703125}], "temperature": 1.0}, {"id": 71, "seek": 192122, "start": 1893.98, "end": 1921.22, "text": " but again you can tell from this simple example yes there is a positive relationship because just if you pick random numbers here for example 64 father's height his son's height 65 if we move up here to 70 for father's height his son's height is going to be 72 so as father heights increases", "tokens": [457, 797, 291, 393, 980, 490, 341, 2199, 1365, 2086, 456, 307, 257, 3353, 2480, 570, 445, 498, 291, 1888, 4974, 3547, 510, 337, 1365, 12145, 3086, 311, 6681, 702, 1872, 311, 6681, 11624, 498, 321, 1286, 493, 510, 281, 5285, 337, 3086, 311, 6681, 702, 1872, 311, 6681, 307, 516, 281, 312, 18731, 370, 382, 3086, 25930, 8637], "avg_logprob": -0.18476562922199566, "compression_ratio": 1.7380952380952381, "no_speech_prob": 0.0, "words": [{"start": 1893.98, "end": 1894.32, "word": " but", "probability": 0.2442626953125}, {"start": 1894.32, "end": 1894.66, "word": " again", "probability": 0.9033203125}, {"start": 1894.66, "end": 1894.92, "word": " you", "probability": 0.74365234375}, {"start": 1894.92, "end": 1895.14, "word": " can", "probability": 0.9423828125}, {"start": 1895.14, "end": 1895.34, "word": " tell", "probability": 0.84033203125}, {"start": 1895.34, "end": 1895.6, "word": " from", "probability": 0.86474609375}, {"start": 1895.6, "end": 1896.32, "word": " this", "probability": 0.9443359375}, {"start": 1896.32, "end": 1897.82, "word": " simple", "probability": 0.7998046875}, {"start": 1897.82, "end": 1898.34, "word": " example", "probability": 0.9765625}, {"start": 1898.34, "end": 1899.14, "word": " yes", "probability": 0.53515625}, {"start": 1899.14, "end": 1899.34, "word": " there", "probability": 0.638671875}, {"start": 1899.34, "end": 1899.46, "word": " is", "probability": 0.84912109375}, {"start": 1899.46, "end": 1899.6, "word": " a", "probability": 0.966796875}, {"start": 1899.6, "end": 1899.88, "word": " positive", "probability": 0.8505859375}, {"start": 1899.88, "end": 1900.28, "word": " relationship", "probability": 0.85791015625}, {"start": 1900.28, "end": 1900.82, "word": " because", "probability": 0.767578125}, {"start": 1900.82, "end": 1901.66, "word": " just", "probability": 0.81103515625}, {"start": 1901.66, "end": 1901.92, "word": " if", "probability": 0.94775390625}, {"start": 1901.92, "end": 1902.08, "word": " you", "probability": 0.9580078125}, {"start": 1902.08, "end": 1902.84, "word": " pick", "probability": 0.88671875}, {"start": 1902.84, "end": 1903.36, "word": " random", "probability": 0.708984375}, {"start": 1903.36, "end": 1903.94, "word": " numbers", "probability": 0.90869140625}, {"start": 1903.94, "end": 1904.32, "word": " here", "probability": 0.82373046875}, {"start": 1904.32, "end": 1904.66, "word": " for", "probability": 0.8408203125}, {"start": 1904.66, "end": 1905.06, "word": " example", "probability": 0.978515625}, {"start": 1905.06, "end": 1906.02, "word": " 64", "probability": 0.810546875}, {"start": 1906.02, "end": 1906.86, "word": " father's", "probability": 0.792236328125}, {"start": 1906.86, "end": 1907.14, "word": " height", "probability": 0.96923828125}, {"start": 1907.14, "end": 1907.4, "word": " his", "probability": 0.8388671875}, {"start": 1907.4, "end": 1907.7, "word": " son's", "probability": 0.8935546875}, {"start": 1907.7, "end": 1907.84, "word": " height", "probability": 0.9453125}, {"start": 1907.84, "end": 1908.42, "word": " 65", "probability": 0.8896484375}, {"start": 1908.42, "end": 1909.24, "word": " if", "probability": 0.75927734375}, {"start": 1909.24, "end": 1909.52, "word": " we", "probability": 0.935546875}, {"start": 1909.52, "end": 1911.02, "word": " move", "probability": 0.92919921875}, {"start": 1911.02, "end": 1911.42, "word": " up", "probability": 0.974609375}, {"start": 1911.42, "end": 1911.76, "word": " here", "probability": 0.84716796875}, {"start": 1911.76, "end": 1911.94, "word": " to", "probability": 0.96923828125}, {"start": 1911.94, "end": 1912.38, "word": " 70", "probability": 0.84912109375}, {"start": 1912.38, "end": 1912.86, "word": " for", "probability": 0.82763671875}, {"start": 1912.86, "end": 1913.34, "word": " father's", "probability": 0.90478515625}, {"start": 1913.34, "end": 1913.64, "word": " height", "probability": 0.9482421875}, {"start": 1913.64, "end": 1914.6, "word": " his", "probability": 0.92724609375}, {"start": 1914.6, "end": 1915.02, "word": " son's", "probability": 0.954345703125}, {"start": 1915.02, "end": 1915.32, "word": " height", "probability": 0.95556640625}, {"start": 1915.32, "end": 1915.82, "word": " is", "probability": 0.4462890625}, {"start": 1915.82, "end": 1915.92, "word": " going", "probability": 0.71826171875}, {"start": 1915.92, "end": 1916.02, "word": " to", "probability": 0.970703125}, {"start": 1916.02, "end": 1916.18, "word": " be", "probability": 0.95068359375}, {"start": 1916.18, "end": 1916.8, "word": " 72", "probability": 0.97607421875}, {"start": 1916.8, "end": 1919.14, "word": " so", "probability": 0.529296875}, {"start": 1919.14, "end": 1919.54, "word": " as", "probability": 0.95947265625}, {"start": 1919.54, "end": 1920.16, "word": " father", "probability": 0.8173828125}, {"start": 1920.16, "end": 1920.66, "word": " heights", "probability": 0.59228515625}, {"start": 1920.66, "end": 1921.22, "word": " increases", "probability": 0.912109375}], "temperature": 1.0}, {"id": 72, "seek": 194078, "start": 1922.3, "end": 1940.78, "text": " Also, son's height increases. For example, 77, father's height. His son's height is 76. So that means there exists positive relationship. Make sense? But again, for large data, you cannot tell that.", "tokens": [2743, 11, 1872, 311, 6681, 8637, 13, 1171, 1365, 11, 25546, 11, 3086, 311, 6681, 13, 2812, 1872, 311, 6681, 307, 24733, 13, 407, 300, 1355, 456, 8198, 3353, 2480, 13, 4387, 2020, 30, 583, 797, 11, 337, 2416, 1412, 11, 291, 2644, 980, 300, 13], "avg_logprob": -0.22855717831469596, "compression_ratio": 1.3724137931034484, "no_speech_prob": 0.0, "words": [{"start": 1922.3, "end": 1922.94, "word": " Also,", "probability": 0.6875}, {"start": 1923.86, "end": 1924.28, "word": " son's", "probability": 0.6654052734375}, {"start": 1924.28, "end": 1924.54, "word": " height", "probability": 0.96533203125}, {"start": 1924.54, "end": 1925.02, "word": " increases.", "probability": 0.8720703125}, {"start": 1926.32, "end": 1926.84, "word": " For", "probability": 0.95654296875}, {"start": 1926.84, "end": 1927.6, "word": " example,", "probability": 0.916015625}, {"start": 1927.8, "end": 1928.64, "word": " 77,", "probability": 0.75634765625}, {"start": 1928.84, "end": 1929.76, "word": " father's", "probability": 0.73876953125}, {"start": 1929.76, "end": 1930.2, "word": " height.", "probability": 0.978515625}, {"start": 1930.88, "end": 1931.06, "word": " His", "probability": 0.806640625}, {"start": 1931.06, "end": 1931.42, "word": " son's", "probability": 0.949951171875}, {"start": 1931.42, "end": 1931.7, "word": " height", "probability": 0.95703125}, {"start": 1931.7, "end": 1932.1, "word": " is", "probability": 0.947265625}, {"start": 1932.1, "end": 1932.74, "word": " 76.", "probability": 0.955078125}, {"start": 1933.3, "end": 1933.74, "word": " So", "probability": 0.92724609375}, {"start": 1933.74, "end": 1933.94, "word": " that", "probability": 0.82763671875}, {"start": 1933.94, "end": 1934.14, "word": " means", "probability": 0.9326171875}, {"start": 1934.14, "end": 1934.3, "word": " there", "probability": 0.845703125}, {"start": 1934.3, "end": 1934.58, "word": " exists", "probability": 0.80712890625}, {"start": 1934.58, "end": 1935.16, "word": " positive", "probability": 0.89599609375}, {"start": 1935.16, "end": 1935.78, "word": " relationship.", "probability": 0.73779296875}, {"start": 1936.78, "end": 1937.0, "word": " Make", "probability": 0.6787109375}, {"start": 1937.0, "end": 1937.32, "word": " sense?", "probability": 0.826171875}, {"start": 1938.54, "end": 1939.0, "word": " But", "probability": 0.90380859375}, {"start": 1939.0, "end": 1939.22, "word": " again,", "probability": 0.8916015625}, {"start": 1939.32, "end": 1939.42, "word": " for", "probability": 0.94580078125}, {"start": 1939.42, "end": 1939.74, "word": " large", "probability": 0.91162109375}, {"start": 1939.74, "end": 1939.98, "word": " data,", "probability": 0.95556640625}, {"start": 1940.02, "end": 1940.14, "word": " you", "probability": 0.9580078125}, {"start": 1940.14, "end": 1940.36, "word": " cannot", "probability": 0.880859375}, {"start": 1940.36, "end": 1940.58, "word": " tell", "probability": 0.7626953125}, {"start": 1940.58, "end": 1940.78, "word": " that.", "probability": 0.61865234375}], "temperature": 1.0}, {"id": 73, "seek": 197791, "start": 1951.71, "end": 1977.91, "text": " If, again, by using this data, small data, you can determine just the length, the strength, I'm sorry, the strength of a relationship or the direction of the relationship. Just pick any number at random. For example, if we pick this number. Father's height is 68, his son's height is 70.", "tokens": [759, 11, 797, 11, 538, 1228, 341, 1412, 11, 1359, 1412, 11, 291, 393, 6997, 445, 264, 4641, 11, 264, 3800, 11, 286, 478, 2597, 11, 264, 3800, 295, 257, 2480, 420, 264, 3513, 295, 264, 2480, 13, 1449, 1888, 604, 1230, 412, 4974, 13, 1171, 1365, 11, 498, 321, 1888, 341, 1230, 13, 7085, 311, 6681, 307, 23317, 11, 702, 1872, 311, 6681, 307, 5285, 13], "avg_logprob": -0.19588695115902843, "compression_ratio": 1.591160220994475, "no_speech_prob": 0.0, "words": [{"start": 1951.71, "end": 1952.19, "word": " If,", "probability": 0.2509765625}, {"start": 1952.41, "end": 1952.69, "word": " again,", "probability": 0.91064453125}, {"start": 1953.03, "end": 1953.27, "word": " by", "probability": 0.94384765625}, {"start": 1953.27, "end": 1953.53, "word": " using", "probability": 0.93310546875}, {"start": 1953.53, "end": 1953.77, "word": " this", "probability": 0.873046875}, {"start": 1953.77, "end": 1954.07, "word": " data,", "probability": 0.8076171875}, {"start": 1954.17, "end": 1954.41, "word": " small", "probability": 0.904296875}, {"start": 1954.41, "end": 1954.79, "word": " data,", "probability": 0.9287109375}, {"start": 1955.55, "end": 1955.87, "word": " you", "probability": 0.951171875}, {"start": 1955.87, "end": 1956.09, "word": " can", "probability": 0.947265625}, {"start": 1956.09, "end": 1956.55, "word": " determine", "probability": 0.91796875}, {"start": 1956.55, "end": 1957.67, "word": " just", "probability": 0.89013671875}, {"start": 1957.67, "end": 1958.79, "word": " the", "probability": 0.9091796875}, {"start": 1958.79, "end": 1959.13, "word": " length,", "probability": 0.58837890625}, {"start": 1960.01, "end": 1960.23, "word": " the", "probability": 0.82470703125}, {"start": 1960.23, "end": 1960.55, "word": " strength,", "probability": 0.845703125}, {"start": 1960.63, "end": 1960.73, "word": " I'm", "probability": 0.920166015625}, {"start": 1960.73, "end": 1960.95, "word": " sorry,", "probability": 0.8642578125}, {"start": 1961.09, "end": 1961.27, "word": " the", "probability": 0.9150390625}, {"start": 1961.27, "end": 1961.55, "word": " strength", "probability": 0.826171875}, {"start": 1961.55, "end": 1961.71, "word": " of", "probability": 0.962890625}, {"start": 1961.71, "end": 1961.79, "word": " a", "probability": 0.61767578125}, {"start": 1961.79, "end": 1962.15, "word": " relationship", "probability": 0.89501953125}, {"start": 1962.15, "end": 1963.05, "word": " or", "probability": 0.6474609375}, {"start": 1963.05, "end": 1963.49, "word": " the", "probability": 0.921875}, {"start": 1963.49, "end": 1964.31, "word": " direction", "probability": 0.97509765625}, {"start": 1964.31, "end": 1965.05, "word": " of", "probability": 0.96435546875}, {"start": 1965.05, "end": 1965.21, "word": " the", "probability": 0.8955078125}, {"start": 1965.21, "end": 1965.67, "word": " relationship.", "probability": 0.8994140625}, {"start": 1966.33, "end": 1966.79, "word": " Just", "probability": 0.8759765625}, {"start": 1966.79, "end": 1967.15, "word": " pick", "probability": 0.85498046875}, {"start": 1967.15, "end": 1967.59, "word": " any", "probability": 0.900390625}, {"start": 1967.59, "end": 1967.95, "word": " number", "probability": 0.9375}, {"start": 1967.95, "end": 1968.19, "word": " at", "probability": 0.72509765625}, {"start": 1968.19, "end": 1968.41, "word": " random.", "probability": 0.83447265625}, {"start": 1969.59, "end": 1969.79, "word": " For", "probability": 0.95947265625}, {"start": 1969.79, "end": 1970.05, "word": " example,", "probability": 0.97265625}, {"start": 1970.17, "end": 1970.29, "word": " if", "probability": 0.94677734375}, {"start": 1970.29, "end": 1970.47, "word": " we", "probability": 0.94384765625}, {"start": 1970.47, "end": 1970.71, "word": " pick", "probability": 0.6044921875}, {"start": 1970.71, "end": 1971.03, "word": " this", "probability": 0.95166015625}, {"start": 1971.03, "end": 1971.29, "word": " number.", "probability": 0.89697265625}, {"start": 1975.05, "end": 1975.61, "word": " Father's", "probability": 0.84375}, {"start": 1975.61, "end": 1975.77, "word": " height", "probability": 0.9521484375}, {"start": 1975.77, "end": 1975.87, "word": " is", "probability": 0.356689453125}, {"start": 1975.87, "end": 1976.27, "word": " 68,", "probability": 0.82470703125}, {"start": 1976.57, "end": 1976.99, "word": " his", "probability": 0.92578125}, {"start": 1976.99, "end": 1977.27, "word": " son's", "probability": 0.92333984375}, {"start": 1977.27, "end": 1977.43, "word": " height", "probability": 0.9345703125}, {"start": 1977.43, "end": 1977.57, "word": " is", "probability": 0.93115234375}, {"start": 1977.57, "end": 1977.91, "word": " 70.", "probability": 0.95654296875}], "temperature": 1.0}, {"id": 74, "seek": 200582, "start": 1979.94, "end": 2005.82, "text": " Now suppose we pick another number that is greater than 68, then let's see what will happen. For father's height 70, his son's height increases up to 72. Similarly, 72 father's height, his son's height 75. So that means X increases, Y also increases. So that means there exists both of them.", "tokens": [823, 7297, 321, 1888, 1071, 1230, 300, 307, 5044, 813, 23317, 11, 550, 718, 311, 536, 437, 486, 1051, 13, 1171, 3086, 311, 6681, 5285, 11, 702, 1872, 311, 6681, 8637, 493, 281, 18731, 13, 13157, 11, 18731, 3086, 311, 6681, 11, 702, 1872, 311, 6681, 9562, 13, 407, 300, 1355, 1783, 8637, 11, 398, 611, 8637, 13, 407, 300, 1355, 456, 8198, 1293, 295, 552, 13], "avg_logprob": -0.24701287991860332, "compression_ratio": 1.6685714285714286, "no_speech_prob": 0.0, "words": [{"start": 1979.94, "end": 1980.18, "word": " Now", "probability": 0.5439453125}, {"start": 1980.18, "end": 1980.54, "word": " suppose", "probability": 0.548828125}, {"start": 1980.54, "end": 1980.7, "word": " we", "probability": 0.86669921875}, {"start": 1980.7, "end": 1980.9, "word": " pick", "probability": 0.83984375}, {"start": 1980.9, "end": 1981.2, "word": " another", "probability": 0.90869140625}, {"start": 1981.2, "end": 1981.48, "word": " number", "probability": 0.9375}, {"start": 1981.48, "end": 1981.72, "word": " that", "probability": 0.81298828125}, {"start": 1981.72, "end": 1981.86, "word": " is", "probability": 0.80322265625}, {"start": 1981.86, "end": 1982.18, "word": " greater", "probability": 0.888671875}, {"start": 1982.18, "end": 1982.46, "word": " than", "probability": 0.9384765625}, {"start": 1982.46, "end": 1982.92, "word": " 68,", "probability": 0.9326171875}, {"start": 1983.1, "end": 1983.2, "word": " then", "probability": 0.533203125}, {"start": 1983.2, "end": 1983.44, "word": " let's", "probability": 0.92041015625}, {"start": 1983.44, "end": 1983.58, "word": " see", "probability": 0.92041015625}, {"start": 1983.58, "end": 1983.74, "word": " what", "probability": 0.9443359375}, {"start": 1983.74, "end": 1983.9, "word": " will", "probability": 0.84619140625}, {"start": 1983.9, "end": 1984.18, "word": " happen.", "probability": 0.90966796875}, {"start": 1985.58, "end": 1985.84, "word": " For", "probability": 0.92822265625}, {"start": 1985.84, "end": 1986.92, "word": " father's", "probability": 0.4959716796875}, {"start": 1986.92, "end": 1987.06, "word": " height", "probability": 0.76904296875}, {"start": 1987.06, "end": 1987.64, "word": " 70,", "probability": 0.701171875}, {"start": 1988.98, "end": 1989.44, "word": " his", "probability": 0.951171875}, {"start": 1989.44, "end": 1989.8, "word": " son's", "probability": 0.8896484375}, {"start": 1989.8, "end": 1990.04, "word": " height", "probability": 0.97216796875}, {"start": 1990.04, "end": 1990.64, "word": " increases", "probability": 0.88037109375}, {"start": 1990.64, "end": 1991.06, "word": " up", "probability": 0.91748046875}, {"start": 1991.06, "end": 1991.26, "word": " to", "probability": 0.95361328125}, {"start": 1991.26, "end": 1991.76, "word": " 72.", "probability": 0.97509765625}, {"start": 1993.52, "end": 1994.1, "word": " Similarly,", "probability": 0.7294921875}, {"start": 1994.54, "end": 1995.2, "word": " 72", "probability": 0.87646484375}, {"start": 1995.2, "end": 1996.38, "word": " father's", "probability": 0.723876953125}, {"start": 1996.38, "end": 1996.64, "word": " height,", "probability": 0.95654296875}, {"start": 1996.74, "end": 1996.9, "word": " his", "probability": 0.93115234375}, {"start": 1996.9, "end": 1997.16, "word": " son's", "probability": 0.9384765625}, {"start": 1997.16, "end": 1997.24, "word": " height", "probability": 0.91064453125}, {"start": 1997.24, "end": 1997.82, "word": " 75.", "probability": 0.59912109375}, {"start": 1998.74, "end": 1999.02, "word": " So", "probability": 0.861328125}, {"start": 1999.02, "end": 1999.26, "word": " that", "probability": 0.88818359375}, {"start": 1999.26, "end": 1999.6, "word": " means", "probability": 0.93212890625}, {"start": 1999.6, "end": 1999.98, "word": " X", "probability": 0.52734375}, {"start": 1999.98, "end": 2000.54, "word": " increases,", "probability": 0.92626953125}, {"start": 2001.32, "end": 2001.58, "word": " Y", "probability": 0.9765625}, {"start": 2001.58, "end": 2002.06, "word": " also", "probability": 0.88232421875}, {"start": 2002.06, "end": 2002.54, "word": " increases.", "probability": 0.828125}, {"start": 2002.8, "end": 2002.94, "word": " So", "probability": 0.93310546875}, {"start": 2002.94, "end": 2003.76, "word": " that", "probability": 0.8642578125}, {"start": 2003.76, "end": 2004.0, "word": " means", "probability": 0.92333984375}, {"start": 2004.0, "end": 2004.16, "word": " there", "probability": 0.73291015625}, {"start": 2004.16, "end": 2004.6, "word": " exists", "probability": 0.66796875}, {"start": 2004.6, "end": 2005.34, "word": " both", "probability": 0.65234375}, {"start": 2005.34, "end": 2005.74, "word": " of", "probability": 0.2100830078125}, {"start": 2005.74, "end": 2005.82, "word": " them.", "probability": 0.56201171875}], "temperature": 1.0}, {"id": 75, "seek": 203455, "start": 2008.55, "end": 2034.55, "text": " For sure it is hard to tell this direction if the data is large. Because maybe you will find as X increases for one point, Y maybe decreases for that point. So it depends on the data you have. Anyway, let's see how can we compute R. I will use Excel to show how can we do these calculations.", "tokens": [1171, 988, 309, 307, 1152, 281, 980, 341, 3513, 498, 264, 1412, 307, 2416, 13, 1436, 1310, 291, 486, 915, 382, 1783, 8637, 337, 472, 935, 11, 398, 1310, 24108, 337, 300, 935, 13, 407, 309, 5946, 322, 264, 1412, 291, 362, 13, 5684, 11, 718, 311, 536, 577, 393, 321, 14722, 497, 13, 286, 486, 764, 19060, 281, 855, 577, 393, 321, 360, 613, 20448, 13], "avg_logprob": -0.20668658000581405, "compression_ratio": 1.5449735449735449, "no_speech_prob": 0.0, "words": [{"start": 2008.55, "end": 2008.79, "word": " For", "probability": 0.59326171875}, {"start": 2008.79, "end": 2009.03, "word": " sure", "probability": 0.9189453125}, {"start": 2009.03, "end": 2009.21, "word": " it", "probability": 0.63427734375}, {"start": 2009.21, "end": 2009.37, "word": " is", "probability": 0.81298828125}, {"start": 2009.37, "end": 2009.89, "word": " hard", "probability": 0.8984375}, {"start": 2009.89, "end": 2010.45, "word": " to", "probability": 0.94140625}, {"start": 2010.45, "end": 2010.75, "word": " tell", "probability": 0.87353515625}, {"start": 2010.75, "end": 2011.97, "word": " this", "probability": 0.84765625}, {"start": 2011.97, "end": 2012.57, "word": " direction", "probability": 0.96630859375}, {"start": 2012.57, "end": 2012.91, "word": " if", "probability": 0.7119140625}, {"start": 2012.91, "end": 2013.07, "word": " the", "probability": 0.84765625}, {"start": 2013.07, "end": 2013.25, "word": " data", "probability": 0.81396484375}, {"start": 2013.25, "end": 2013.43, "word": " is", "probability": 0.54443359375}, {"start": 2013.43, "end": 2014.01, "word": " large.", "probability": 0.916015625}, {"start": 2014.95, "end": 2015.33, "word": " Because", "probability": 0.89990234375}, {"start": 2015.33, "end": 2015.57, "word": " maybe", "probability": 0.86669921875}, {"start": 2015.57, "end": 2015.69, "word": " you", "probability": 0.52490234375}, {"start": 2015.69, "end": 2015.81, "word": " will", "probability": 0.77587890625}, {"start": 2015.81, "end": 2016.13, "word": " find", "probability": 0.88623046875}, {"start": 2016.13, "end": 2016.49, "word": " as", "probability": 0.6708984375}, {"start": 2016.49, "end": 2016.75, "word": " X", "probability": 0.5009765625}, {"start": 2016.75, "end": 2017.13, "word": " increases", "probability": 0.410888671875}, {"start": 2017.13, "end": 2017.35, "word": " for", "probability": 0.5908203125}, {"start": 2017.35, "end": 2017.55, "word": " one", "probability": 0.89990234375}, {"start": 2017.55, "end": 2017.91, "word": " point,", "probability": 0.9697265625}, {"start": 2019.01, "end": 2019.57, "word": " Y", "probability": 0.94384765625}, {"start": 2019.57, "end": 2019.79, "word": " maybe", "probability": 0.484130859375}, {"start": 2019.79, "end": 2020.25, "word": " decreases", "probability": 0.89794921875}, {"start": 2020.25, "end": 2020.45, "word": " for", "probability": 0.9248046875}, {"start": 2020.45, "end": 2020.65, "word": " that", "probability": 0.5673828125}, {"start": 2020.65, "end": 2020.89, "word": " point.", "probability": 0.9287109375}, {"start": 2022.23, "end": 2022.53, "word": " So", "probability": 0.798828125}, {"start": 2022.53, "end": 2022.65, "word": " it", "probability": 0.80419921875}, {"start": 2022.65, "end": 2022.91, "word": " depends", "probability": 0.90283203125}, {"start": 2022.91, "end": 2023.07, "word": " on", "probability": 0.92431640625}, {"start": 2023.07, "end": 2023.17, "word": " the", "probability": 0.9091796875}, {"start": 2023.17, "end": 2023.41, "word": " data", "probability": 0.9521484375}, {"start": 2023.41, "end": 2023.61, "word": " you", "probability": 0.8037109375}, {"start": 2023.61, "end": 2023.73, "word": " have.", "probability": 0.8935546875}, {"start": 2024.55, "end": 2024.91, "word": " Anyway,", "probability": 0.86572265625}, {"start": 2025.71, "end": 2026.05, "word": " let's", "probability": 0.9482421875}, {"start": 2026.05, "end": 2026.29, "word": " see", "probability": 0.91552734375}, {"start": 2026.29, "end": 2026.63, "word": " how", "probability": 0.9033203125}, {"start": 2026.63, "end": 2026.85, "word": " can", "probability": 0.74267578125}, {"start": 2026.85, "end": 2027.01, "word": " we", "probability": 0.95068359375}, {"start": 2027.01, "end": 2027.49, "word": " compute", "probability": 0.8916015625}, {"start": 2027.49, "end": 2028.37, "word": " R.", "probability": 0.90380859375}, {"start": 2028.71, "end": 2029.01, "word": " I", "probability": 0.9970703125}, {"start": 2029.01, "end": 2029.19, "word": " will", "probability": 0.892578125}, {"start": 2029.19, "end": 2029.49, "word": " use", "probability": 0.8896484375}, {"start": 2029.49, "end": 2029.95, "word": " Excel", "probability": 0.8076171875}, {"start": 2029.95, "end": 2031.83, "word": " to", "probability": 0.8935546875}, {"start": 2031.83, "end": 2032.07, "word": " show", "probability": 0.94873046875}, {"start": 2032.07, "end": 2032.25, "word": " how", "probability": 0.919921875}, {"start": 2032.25, "end": 2032.49, "word": " can", "probability": 0.70947265625}, {"start": 2032.49, "end": 2032.63, "word": " we", "probability": 0.95263671875}, {"start": 2032.63, "end": 2032.93, "word": " do", "probability": 0.95947265625}, {"start": 2032.93, "end": 2033.77, "word": " these", "probability": 0.474853515625}, {"start": 2033.77, "end": 2034.55, "word": " calculations.", "probability": 0.91259765625}], "temperature": 1.0}, {"id": 76, "seek": 206683, "start": 2042.11, "end": 2066.83, "text": " The screen is clear. But give me the data of X and Y. X is 64. 68. 68. 78. There is one 68. 78. 74.", "tokens": [440, 2568, 307, 1850, 13, 583, 976, 385, 264, 1412, 295, 1783, 293, 398, 13, 1783, 307, 12145, 13, 23317, 13, 23317, 13, 26369, 13, 821, 307, 472, 23317, 13, 26369, 13, 28868, 13], "avg_logprob": -0.5299107347215924, "compression_ratio": 1.1363636363636365, "no_speech_prob": 3.5762786865234375e-07, "words": [{"start": 2042.11, "end": 2042.37, "word": " The", "probability": 0.033416748046875}, {"start": 2042.37, "end": 2042.47, "word": " screen", "probability": 0.75439453125}, {"start": 2042.47, "end": 2042.59, "word": " is", "probability": 0.8388671875}, {"start": 2042.59, "end": 2042.77, "word": " clear.", "probability": 0.6572265625}, {"start": 2044.67, "end": 2045.21, "word": " But", "probability": 0.578125}, {"start": 2045.21, "end": 2045.47, "word": " give", "probability": 0.564453125}, {"start": 2045.47, "end": 2045.59, "word": " me", "probability": 0.87255859375}, {"start": 2045.59, "end": 2045.63, "word": " the", "probability": 0.6513671875}, {"start": 2045.63, "end": 2045.81, "word": " data", "probability": 0.626953125}, {"start": 2045.81, "end": 2046.09, "word": " of", "probability": 0.36376953125}, {"start": 2046.09, "end": 2046.31, "word": " X", "probability": 0.76708984375}, {"start": 2046.31, "end": 2046.53, "word": " and", "probability": 0.9013671875}, {"start": 2046.53, "end": 2046.75, "word": " Y.", "probability": 0.99560546875}, {"start": 2050.71, "end": 2051.25, "word": " X", "probability": 0.66455078125}, {"start": 2051.25, "end": 2051.35, "word": " is", "probability": 0.297607421875}, {"start": 2051.35, "end": 2051.81, "word": " 64.", "probability": 0.63330078125}, {"start": 2053.77, "end": 2054.31, "word": " 68.", "probability": 0.9306640625}, {"start": 2058.91, "end": 2059.45, "word": " 68.", "probability": 0.84130859375}, {"start": 2060.57, "end": 2061.11, "word": " 78.", "probability": 0.68212890625}, {"start": 2061.31, "end": 2061.53, "word": " There", "probability": 0.2459716796875}, {"start": 2061.53, "end": 2061.55, "word": " is", "probability": 0.640625}, {"start": 2061.55, "end": 2061.69, "word": " one", "probability": 0.267333984375}, {"start": 2061.69, "end": 2062.13, "word": " 68.", "probability": 0.90673828125}, {"start": 2063.61, "end": 2064.15, "word": " 78.", "probability": 0.82861328125}, {"start": 2066.29, "end": 2066.83, "word": " 74.", "probability": 0.268310546875}], "temperature": 1.0}, {"id": 77, "seek": 208980, "start": 2071.12, "end": 2089.8, "text": " Seventy-four. Seventy-five. Seventy-six. Seventy-seven. Seventy-five. So that's the values of X, Y values. Seventy. Seventy-five. Seventy-seven.", "tokens": [1100, 2475, 88, 12, 23251, 13, 1100, 2475, 88, 12, 18621, 13, 1100, 2475, 88, 12, 35066, 13, 1100, 2475, 88, 12, 44476, 13, 1100, 2475, 88, 12, 18621, 13, 407, 300, 311, 264, 4190, 295, 1783, 11, 398, 4190, 13, 1100, 2475, 88, 13, 1100, 2475, 88, 12, 18621, 13, 1100, 2475, 88, 12, 44476, 13], "avg_logprob": -0.19275322967562183, "compression_ratio": 2.013888888888889, "no_speech_prob": 0.0, "words": [{"start": 2071.12, "end": 2071.9, "word": " Seventy", "probability": 0.7017822265625}, {"start": 2071.9, "end": 2072.16, "word": "-four.", "probability": 0.736328125}, {"start": 2072.42, "end": 2073.06, "word": " Seventy", "probability": 0.8642578125}, {"start": 2073.06, "end": 2073.46, "word": "-five.", "probability": 0.91552734375}, {"start": 2074.22, "end": 2074.9, "word": " Seventy", "probability": 0.9493815104166666}, {"start": 2074.9, "end": 2077.6, "word": "-six.", "probability": 0.92529296875}, {"start": 2078.36, "end": 2078.78, "word": " Seventy", "probability": 0.8234049479166666}, {"start": 2078.78, "end": 2079.06, "word": "-seven.", "probability": 0.9140625}, {"start": 2079.84, "end": 2080.34, "word": " Seventy", "probability": 0.9059244791666666}, {"start": 2080.34, "end": 2080.74, "word": "-five.", "probability": 0.849853515625}, {"start": 2081.02, "end": 2081.3, "word": " So", "probability": 0.76904296875}, {"start": 2081.3, "end": 2081.6, "word": " that's", "probability": 0.8583984375}, {"start": 2081.6, "end": 2081.76, "word": " the", "probability": 0.919921875}, {"start": 2081.76, "end": 2082.24, "word": " values", "probability": 0.93896484375}, {"start": 2082.24, "end": 2083.46, "word": " of", "probability": 0.8330078125}, {"start": 2083.46, "end": 2083.8, "word": " X,", "probability": 0.71875}, {"start": 2084.16, "end": 2084.44, "word": " Y", "probability": 0.93115234375}, {"start": 2084.44, "end": 2084.94, "word": " values.", "probability": 0.90087890625}, {"start": 2086.18, "end": 2086.98, "word": " Seventy.", "probability": 0.8938802083333334}, {"start": 2087.16, "end": 2087.6, "word": " Seventy", "probability": 0.83203125}, {"start": 2087.6, "end": 2087.96, "word": "-five.", "probability": 0.887451171875}, {"start": 2088.98, "end": 2089.44, "word": " Seventy", "probability": 0.9173177083333334}, {"start": 2089.44, "end": 2089.8, "word": "-seven.", "probability": 0.93359375}], "temperature": 1.0}, {"id": 78, "seek": 211937, "start": 2117.23, "end": 2119.37, "text": " So first we have to compute it.", "tokens": [407, 700, 321, 362, 281, 14722, 309, 13], "avg_logprob": -0.5125868055555556, "compression_ratio": 0.8, "no_speech_prob": 0.0, "words": [{"start": 2117.23, "end": 2117.73, "word": " So", "probability": 0.1414794921875}, {"start": 2117.73, "end": 2118.23, "word": " first", "probability": 0.77294921875}, {"start": 2118.23, "end": 2118.47, "word": " we", "probability": 0.479248046875}, {"start": 2118.47, "end": 2118.65, "word": " have", "probability": 0.93701171875}, {"start": 2118.65, "end": 2118.77, "word": " to", "probability": 0.97021484375}, {"start": 2118.77, "end": 2119.03, "word": " compute", "probability": 0.93798828125}, {"start": 2119.03, "end": 2119.37, "word": " it.", "probability": 0.43994140625}], "temperature": 1.0}, {"id": 79, "seek": 214605, "start": 2120.73, "end": 2146.05, "text": " x times y so that's as x times the value of y so 46 times 65 equals 4160 x squared so this value squared for y squared 65", "tokens": [2031, 1413, 288, 370, 300, 311, 382, 2031, 1413, 264, 2158, 295, 288, 370, 17835, 1413, 11624, 6915, 18173, 4550, 2031, 8889, 370, 341, 2158, 8889, 337, 288, 8889, 11624], "avg_logprob": -0.21207156681245373, "compression_ratio": 1.4186046511627908, "no_speech_prob": 0.0, "words": [{"start": 2120.73, "end": 2121.11, "word": " x", "probability": 0.44873046875}, {"start": 2121.11, "end": 2121.45, "word": " times", "probability": 0.81103515625}, {"start": 2121.45, "end": 2121.81, "word": " y", "probability": 0.93896484375}, {"start": 2121.81, "end": 2123.73, "word": " so", "probability": 0.402587890625}, {"start": 2123.73, "end": 2124.17, "word": " that's", "probability": 0.7119140625}, {"start": 2124.17, "end": 2124.59, "word": " as", "probability": 0.495361328125}, {"start": 2124.59, "end": 2125.25, "word": " x", "probability": 0.97509765625}, {"start": 2125.25, "end": 2128.27, "word": " times", "probability": 0.84912109375}, {"start": 2128.27, "end": 2129.21, "word": " the", "probability": 0.890625}, {"start": 2129.21, "end": 2129.43, "word": " value", "probability": 0.97998046875}, {"start": 2129.43, "end": 2129.55, "word": " of", "probability": 0.9453125}, {"start": 2129.55, "end": 2129.77, "word": " y", "probability": 0.93505859375}, {"start": 2129.77, "end": 2132.35, "word": " so", "probability": 0.5810546875}, {"start": 2132.35, "end": 2133.03, "word": " 46", "probability": 0.8173828125}, {"start": 2133.03, "end": 2133.39, "word": " times", "probability": 0.89306640625}, {"start": 2133.39, "end": 2133.97, "word": " 65", "probability": 0.9765625}, {"start": 2133.97, "end": 2134.69, "word": " equals", "probability": 0.94873046875}, {"start": 2134.69, "end": 2137.43, "word": " 4160", "probability": 0.814453125}, {"start": 2137.43, "end": 2138.23, "word": " x", "probability": 0.94677734375}, {"start": 2138.23, "end": 2138.65, "word": " squared", "probability": 0.75537109375}, {"start": 2138.65, "end": 2139.27, "word": " so", "probability": 0.84814453125}, {"start": 2139.27, "end": 2139.51, "word": " this", "probability": 0.94921875}, {"start": 2139.51, "end": 2139.91, "word": " value", "probability": 0.970703125}, {"start": 2139.91, "end": 2142.09, "word": " squared", "probability": 0.86962890625}, {"start": 2142.09, "end": 2144.33, "word": " for", "probability": 0.794921875}, {"start": 2144.33, "end": 2144.61, "word": " y", "probability": 0.99365234375}, {"start": 2144.61, "end": 2145.05, "word": " squared", "probability": 0.82373046875}, {"start": 2145.05, "end": 2146.05, "word": " 65", "probability": 0.89501953125}], "temperature": 1.0}, {"id": 80, "seek": 217717, "start": 2148.66, "end": 2177.18, "text": " Square and we have to do this one for the rest of the data So that's the sum of XY sum X squared and Y squared now the summation So that's the sum of X and Y", "tokens": [16463, 293, 321, 362, 281, 360, 341, 472, 337, 264, 1472, 295, 264, 1412, 407, 300, 311, 264, 2408, 295, 48826, 2408, 1783, 8889, 293, 398, 8889, 586, 264, 28811, 407, 300, 311, 264, 2408, 295, 1783, 293, 398], "avg_logprob": -0.24707031548023223, "compression_ratio": 1.6631578947368422, "no_speech_prob": 0.0, "words": [{"start": 2148.6600000000003, "end": 2149.26, "word": " Square", "probability": 0.10186767578125}, {"start": 2149.26, "end": 2149.86, "word": " and", "probability": 0.84765625}, {"start": 2149.86, "end": 2150.02, "word": " we", "probability": 0.93994140625}, {"start": 2150.02, "end": 2150.18, "word": " have", "probability": 0.95166015625}, {"start": 2150.18, "end": 2150.34, "word": " to", "probability": 0.97119140625}, {"start": 2150.34, "end": 2150.56, "word": " do", "probability": 0.95654296875}, {"start": 2150.56, "end": 2150.86, "word": " this", "probability": 0.912109375}, {"start": 2150.86, "end": 2151.08, "word": " one", "probability": 0.9326171875}, {"start": 2151.08, "end": 2151.28, "word": " for", "probability": 0.94775390625}, {"start": 2151.28, "end": 2151.44, "word": " the", "probability": 0.9208984375}, {"start": 2151.44, "end": 2151.72, "word": " rest", "probability": 0.91357421875}, {"start": 2151.72, "end": 2153.7, "word": " of", "probability": 0.9052734375}, {"start": 2153.7, "end": 2153.82, "word": " the", "probability": 0.83935546875}, {"start": 2153.82, "end": 2154.12, "word": " data", "probability": 0.90771484375}, {"start": 2154.12, "end": 2163.16, "word": " So", "probability": 0.86181640625}, {"start": 2163.16, "end": 2163.5, "word": " that's", "probability": 0.914306640625}, {"start": 2163.5, "end": 2163.72, "word": " the", "probability": 0.91650390625}, {"start": 2163.72, "end": 2164.04, "word": " sum", "probability": 0.919921875}, {"start": 2164.04, "end": 2164.18, "word": " of", "probability": 0.9736328125}, {"start": 2164.18, "end": 2164.6, "word": " XY", "probability": 0.63671875}, {"start": 2164.6, "end": 2165.1, "word": " sum", "probability": 0.480224609375}, {"start": 2165.1, "end": 2165.32, "word": " X", "probability": 0.5732421875}, {"start": 2165.32, "end": 2165.7, "word": " squared", "probability": 0.50732421875}, {"start": 2165.7, "end": 2166.72, "word": " and", "probability": 0.9482421875}, {"start": 2166.72, "end": 2167.06, "word": " Y", "probability": 0.8544921875}, {"start": 2167.06, "end": 2167.6, "word": " squared", "probability": 0.7236328125}, {"start": 2167.6, "end": 2168.4, "word": " now", "probability": 0.348388671875}, {"start": 2168.4, "end": 2168.58, "word": " the", "probability": 0.89892578125}, {"start": 2168.58, "end": 2169.06, "word": " summation", "probability": 0.90771484375}, {"start": 2169.06, "end": 2173.48, "word": " So", "probability": 0.81298828125}, {"start": 2173.48, "end": 2173.72, "word": " that's", "probability": 0.92333984375}, {"start": 2173.72, "end": 2173.86, "word": " the", "probability": 0.92822265625}, {"start": 2173.86, "end": 2174.16, "word": " sum", "probability": 0.904296875}, {"start": 2174.16, "end": 2176.54, "word": " of", "probability": 0.50146484375}, {"start": 2176.54, "end": 2176.74, "word": " X", "probability": 0.8427734375}, {"start": 2176.74, "end": 2176.9, "word": " and", "probability": 0.83349609375}, {"start": 2176.9, "end": 2177.18, "word": " Y", "probability": 0.97509765625}], "temperature": 1.0}, {"id": 81, "seek": 220938, "start": 2180.38, "end": 2209.38, "text": " We have to compute the mean of x and y. So that is this sum divided by n, where n is 10 in this case. So this is the first step. Let's see how can we compute R. R, we have sum of x, y.", "tokens": [492, 362, 281, 14722, 264, 914, 295, 2031, 293, 288, 13, 407, 300, 307, 341, 2408, 6666, 538, 297, 11, 689, 297, 307, 1266, 294, 341, 1389, 13, 407, 341, 307, 264, 700, 1823, 13, 961, 311, 536, 577, 393, 321, 14722, 497, 13, 497, 11, 321, 362, 2408, 295, 2031, 11, 288, 13], "avg_logprob": -0.2254261396147988, "compression_ratio": 1.3805970149253732, "no_speech_prob": 0.0, "words": [{"start": 2180.38, "end": 2180.68, "word": " We", "probability": 0.53955078125}, {"start": 2180.68, "end": 2180.9, "word": " have", "probability": 0.86669921875}, {"start": 2180.9, "end": 2181.04, "word": " to", "probability": 0.9609375}, {"start": 2181.04, "end": 2181.38, "word": " compute", "probability": 0.9228515625}, {"start": 2181.38, "end": 2182.58, "word": " the", "probability": 0.74951171875}, {"start": 2182.58, "end": 2182.76, "word": " mean", "probability": 0.94287109375}, {"start": 2182.76, "end": 2182.9, "word": " of", "probability": 0.958984375}, {"start": 2182.9, "end": 2183.2, "word": " x", "probability": 0.5986328125}, {"start": 2183.2, "end": 2185.04, "word": " and", "probability": 0.85498046875}, {"start": 2185.04, "end": 2185.36, "word": " y.", "probability": 0.96826171875}, {"start": 2186.3, "end": 2186.42, "word": " So", "probability": 0.78125}, {"start": 2186.42, "end": 2186.66, "word": " that", "probability": 0.76123046875}, {"start": 2186.66, "end": 2187.04, "word": " is", "probability": 0.927734375}, {"start": 2187.04, "end": 2188.16, "word": " this", "probability": 0.68359375}, {"start": 2188.16, "end": 2188.52, "word": " sum", "probability": 0.9267578125}, {"start": 2188.52, "end": 2189.24, "word": " divided", "probability": 0.6787109375}, {"start": 2189.24, "end": 2189.5, "word": " by", "probability": 0.97119140625}, {"start": 2189.5, "end": 2189.82, "word": " n,", "probability": 0.611328125}, {"start": 2189.96, "end": 2190.06, "word": " where", "probability": 0.3466796875}, {"start": 2190.06, "end": 2190.18, "word": " n", "probability": 0.95458984375}, {"start": 2190.18, "end": 2190.5, "word": " is", "probability": 0.9013671875}, {"start": 2190.5, "end": 2190.78, "word": " 10", "probability": 0.67333984375}, {"start": 2190.78, "end": 2190.9, "word": " in", "probability": 0.87255859375}, {"start": 2190.9, "end": 2191.08, "word": " this", "probability": 0.94580078125}, {"start": 2191.08, "end": 2191.38, "word": " case.", "probability": 0.90771484375}, {"start": 2194.6, "end": 2195.2, "word": " So", "probability": 0.87646484375}, {"start": 2195.2, "end": 2195.36, "word": " this", "probability": 0.86767578125}, {"start": 2195.36, "end": 2195.46, "word": " is", "probability": 0.94580078125}, {"start": 2195.46, "end": 2195.56, "word": " the", "probability": 0.91552734375}, {"start": 2195.56, "end": 2195.76, "word": " first", "probability": 0.884765625}, {"start": 2195.76, "end": 2196.1, "word": " step.", "probability": 0.94189453125}, {"start": 2201.82, "end": 2202.42, "word": " Let's", "probability": 0.941162109375}, {"start": 2202.42, "end": 2202.54, "word": " see", "probability": 0.8603515625}, {"start": 2202.54, "end": 2202.64, "word": " how", "probability": 0.88330078125}, {"start": 2202.64, "end": 2202.78, "word": " can", "probability": 0.67333984375}, {"start": 2202.78, "end": 2202.9, "word": " we", "probability": 0.92724609375}, {"start": 2202.9, "end": 2203.22, "word": " compute", "probability": 0.93896484375}, {"start": 2203.22, "end": 2203.56, "word": " R.", "probability": 0.73388671875}, {"start": 2204.58, "end": 2205.18, "word": " R,", "probability": 0.634765625}, {"start": 2206.32, "end": 2207.7, "word": " we", "probability": 0.93701171875}, {"start": 2207.7, "end": 2208.0, "word": " have", "probability": 0.9443359375}, {"start": 2208.0, "end": 2208.74, "word": " sum", "probability": 0.77734375}, {"start": 2208.74, "end": 2208.9, "word": " of", "probability": 0.9638671875}, {"start": 2208.9, "end": 2209.08, "word": " x,", "probability": 0.927734375}, {"start": 2209.18, "end": 2209.38, "word": " y.", "probability": 0.75}], "temperature": 1.0}, {"id": 82, "seek": 223542, "start": 2211.42, "end": 2235.42, "text": " minus n is 10 times x bar times y bar. This is the first quantity. The other one is square root of sum x squared minus n x bar squared.", "tokens": [3175, 297, 307, 1266, 1413, 2031, 2159, 1413, 288, 2159, 13, 639, 307, 264, 700, 11275, 13, 440, 661, 472, 307, 3732, 5593, 295, 2408, 2031, 8889, 3175, 297, 2031, 2159, 8889, 13], "avg_logprob": -0.28883271269938526, "compression_ratio": 1.4166666666666667, "no_speech_prob": 0.0, "words": [{"start": 2211.42, "end": 2211.88, "word": " minus", "probability": 0.224609375}, {"start": 2211.88, "end": 2212.18, "word": " n", "probability": 0.6376953125}, {"start": 2212.18, "end": 2212.38, "word": " is", "probability": 0.701171875}, {"start": 2212.38, "end": 2212.66, "word": " 10", "probability": 0.61279296875}, {"start": 2212.66, "end": 2214.76, "word": " times", "probability": 0.66650390625}, {"start": 2214.76, "end": 2215.1, "word": " x", "probability": 0.84130859375}, {"start": 2215.1, "end": 2215.44, "word": " bar", "probability": 0.86083984375}, {"start": 2215.44, "end": 2217.96, "word": " times", "probability": 0.85986328125}, {"start": 2217.96, "end": 2218.24, "word": " y", "probability": 0.9765625}, {"start": 2218.24, "end": 2218.5, "word": " bar.", "probability": 0.94921875}, {"start": 2220.14, "end": 2220.78, "word": " This", "probability": 0.7880859375}, {"start": 2220.78, "end": 2220.9, "word": " is", "probability": 0.94873046875}, {"start": 2220.9, "end": 2221.0, "word": " the", "probability": 0.9033203125}, {"start": 2221.0, "end": 2221.24, "word": " first", "probability": 0.8681640625}, {"start": 2221.24, "end": 2221.64, "word": " quantity.", "probability": 0.45166015625}, {"start": 2221.94, "end": 2222.1, "word": " The", "probability": 0.841796875}, {"start": 2222.1, "end": 2222.32, "word": " other", "probability": 0.8896484375}, {"start": 2222.32, "end": 2222.52, "word": " one", "probability": 0.9228515625}, {"start": 2222.52, "end": 2222.78, "word": " is", "probability": 0.9541015625}, {"start": 2222.78, "end": 2223.1, "word": " square", "probability": 0.75537109375}, {"start": 2223.1, "end": 2223.32, "word": " root", "probability": 0.9326171875}, {"start": 2223.32, "end": 2223.7, "word": " of", "probability": 0.9775390625}, {"start": 2223.7, "end": 2228.58, "word": " sum", "probability": 0.4619140625}, {"start": 2228.58, "end": 2228.84, "word": " x", "probability": 0.95166015625}, {"start": 2228.84, "end": 2230.16, "word": " squared", "probability": 0.390380859375}, {"start": 2230.16, "end": 2233.12, "word": " minus", "probability": 0.966796875}, {"start": 2233.12, "end": 2233.92, "word": " n", "probability": 0.72216796875}, {"start": 2233.92, "end": 2234.5, "word": " x", "probability": 0.83984375}, {"start": 2234.5, "end": 2234.86, "word": " bar", "probability": 0.943359375}, {"start": 2234.86, "end": 2235.42, "word": " squared.", "probability": 0.806640625}], "temperature": 1.0}, {"id": 83, "seek": 226698, "start": 2238.83, "end": 2266.99, "text": " times some y squared minus n times y bar squared. And we have to find the square root of this value. So square root, that will give this result. So now R equals this value divided by", "tokens": [1413, 512, 288, 8889, 3175, 297, 1413, 288, 2159, 8889, 13, 400, 321, 362, 281, 915, 264, 3732, 5593, 295, 341, 2158, 13, 407, 3732, 5593, 11, 300, 486, 976, 341, 1874, 13, 407, 586, 497, 6915, 341, 2158, 6666, 538], "avg_logprob": -0.24572173186710902, "compression_ratio": 1.4758064516129032, "no_speech_prob": 0.0, "words": [{"start": 2238.8300000000004, "end": 2239.55, "word": " times", "probability": 0.339599609375}, {"start": 2239.55, "end": 2240.27, "word": " some", "probability": 0.39892578125}, {"start": 2240.27, "end": 2240.51, "word": " y", "probability": 0.7275390625}, {"start": 2240.51, "end": 2240.79, "word": " squared", "probability": 0.5927734375}, {"start": 2240.79, "end": 2241.25, "word": " minus", "probability": 0.96240234375}, {"start": 2241.25, "end": 2242.05, "word": " n", "probability": 0.7529296875}, {"start": 2242.05, "end": 2243.85, "word": " times", "probability": 0.8681640625}, {"start": 2243.85, "end": 2244.11, "word": " y", "probability": 0.9677734375}, {"start": 2244.11, "end": 2244.35, "word": " bar", "probability": 0.9140625}, {"start": 2244.35, "end": 2244.77, "word": " squared.", "probability": 0.81005859375}, {"start": 2248.93, "end": 2249.65, "word": " And", "probability": 0.88671875}, {"start": 2249.65, "end": 2249.77, "word": " we", "probability": 0.83251953125}, {"start": 2249.77, "end": 2249.91, "word": " have", "probability": 0.94384765625}, {"start": 2249.91, "end": 2250.05, "word": " to", "probability": 0.96923828125}, {"start": 2250.05, "end": 2250.41, "word": " find", "probability": 0.8994140625}, {"start": 2250.41, "end": 2250.77, "word": " the", "probability": 0.90234375}, {"start": 2250.77, "end": 2251.05, "word": " square", "probability": 0.892578125}, {"start": 2251.05, "end": 2251.41, "word": " root", "probability": 0.9306640625}, {"start": 2251.41, "end": 2253.53, "word": " of", "probability": 0.88037109375}, {"start": 2253.53, "end": 2253.77, "word": " this", "probability": 0.9384765625}, {"start": 2253.77, "end": 2254.09, "word": " value.", "probability": 0.9521484375}, {"start": 2254.21, "end": 2254.31, "word": " So", "probability": 0.92724609375}, {"start": 2254.31, "end": 2254.61, "word": " square", "probability": 0.460205078125}, {"start": 2254.61, "end": 2254.97, "word": " root,", "probability": 0.92431640625}, {"start": 2256.67, "end": 2256.99, "word": " that", "probability": 0.873046875}, {"start": 2256.99, "end": 2257.19, "word": " will", "probability": 0.8876953125}, {"start": 2257.19, "end": 2257.47, "word": " give", "probability": 0.88134765625}, {"start": 2257.47, "end": 2258.07, "word": " this", "probability": 0.8095703125}, {"start": 2258.07, "end": 2258.43, "word": " result.", "probability": 0.90283203125}, {"start": 2260.19, "end": 2260.59, "word": " So", "probability": 0.9521484375}, {"start": 2260.59, "end": 2260.81, "word": " now", "probability": 0.86181640625}, {"start": 2260.81, "end": 2261.27, "word": " R", "probability": 0.39501953125}, {"start": 2261.27, "end": 2263.77, "word": " equals", "probability": 0.8994140625}, {"start": 2263.77, "end": 2265.93, "word": " this", "probability": 0.9267578125}, {"start": 2265.93, "end": 2266.27, "word": " value", "probability": 0.970703125}, {"start": 2266.27, "end": 2266.59, "word": " divided", "probability": 0.7470703125}, {"start": 2266.59, "end": 2266.99, "word": " by", "probability": 0.9736328125}], "temperature": 1.0}, {"id": 84, "seek": 229611, "start": 2269.67, "end": 2296.11, "text": " 155 and round always to two decimal places will give 87 so r is 87 so first step we have x and y compute xy x squared y squared sum of these all of these then x bar y bar values are given", "tokens": [2119, 20, 293, 3098, 1009, 281, 732, 26601, 3190, 486, 976, 27990, 370, 367, 307, 27990, 370, 700, 1823, 321, 362, 2031, 293, 288, 14722, 2031, 88, 2031, 8889, 288, 8889, 2408, 295, 613, 439, 295, 613, 550, 2031, 2159, 288, 2159, 4190, 366, 2212], "avg_logprob": -0.2102581460190856, "compression_ratio": 1.4573643410852712, "no_speech_prob": 0.0, "words": [{"start": 2269.6699999999996, "end": 2270.43, "word": " 155", "probability": 0.71826171875}, {"start": 2270.43, "end": 2271.19, "word": " and", "probability": 0.68505859375}, {"start": 2271.19, "end": 2271.51, "word": " round", "probability": 0.728515625}, {"start": 2271.51, "end": 2272.65, "word": " always", "probability": 0.6767578125}, {"start": 2272.65, "end": 2273.11, "word": " to", "probability": 0.8427734375}, {"start": 2273.11, "end": 2273.85, "word": " two", "probability": 0.77587890625}, {"start": 2273.85, "end": 2274.15, "word": " decimal", "probability": 0.83447265625}, {"start": 2274.15, "end": 2274.55, "word": " places", "probability": 0.94580078125}, {"start": 2274.55, "end": 2274.89, "word": " will", "probability": 0.8203125}, {"start": 2274.89, "end": 2275.19, "word": " give", "probability": 0.84814453125}, {"start": 2275.19, "end": 2276.77, "word": " 87", "probability": 0.9375}, {"start": 2276.77, "end": 2278.51, "word": " so", "probability": 0.3388671875}, {"start": 2278.51, "end": 2278.89, "word": " r", "probability": 0.35107421875}, {"start": 2278.89, "end": 2279.31, "word": " is", "probability": 0.90478515625}, {"start": 2279.31, "end": 2281.05, "word": " 87", "probability": 0.9345703125}, {"start": 2281.05, "end": 2283.61, "word": " so", "probability": 0.62646484375}, {"start": 2283.61, "end": 2284.01, "word": " first", "probability": 0.85400390625}, {"start": 2284.01, "end": 2284.31, "word": " step", "probability": 0.921875}, {"start": 2284.31, "end": 2284.67, "word": " we", "probability": 0.90283203125}, {"start": 2284.67, "end": 2284.83, "word": " have", "probability": 0.94970703125}, {"start": 2284.83, "end": 2285.09, "word": " x", "probability": 0.9716796875}, {"start": 2285.09, "end": 2285.25, "word": " and", "probability": 0.94140625}, {"start": 2285.25, "end": 2285.59, "word": " y", "probability": 0.998046875}, {"start": 2285.59, "end": 2287.51, "word": " compute", "probability": 0.85107421875}, {"start": 2287.51, "end": 2288.19, "word": " xy", "probability": 0.740478515625}, {"start": 2288.19, "end": 2288.55, "word": " x", "probability": 0.9462890625}, {"start": 2288.55, "end": 2288.95, "word": " squared", "probability": 0.8515625}, {"start": 2288.95, "end": 2289.25, "word": " y", "probability": 0.99267578125}, {"start": 2289.25, "end": 2289.71, "word": " squared", "probability": 0.86279296875}, {"start": 2289.71, "end": 2290.71, "word": " sum", "probability": 0.86572265625}, {"start": 2290.71, "end": 2291.07, "word": " of", "probability": 0.97021484375}, {"start": 2291.07, "end": 2291.45, "word": " these", "probability": 0.64208984375}, {"start": 2291.45, "end": 2292.35, "word": " all", "probability": 0.85107421875}, {"start": 2292.35, "end": 2292.47, "word": " of", "probability": 0.96240234375}, {"start": 2292.47, "end": 2292.73, "word": " these", "probability": 0.5771484375}, {"start": 2292.73, "end": 2293.77, "word": " then", "probability": 0.7880859375}, {"start": 2293.77, "end": 2294.17, "word": " x", "probability": 0.99365234375}, {"start": 2294.17, "end": 2294.51, "word": " bar", "probability": 0.9072265625}, {"start": 2294.51, "end": 2294.77, "word": " y", "probability": 0.982421875}, {"start": 2294.77, "end": 2295.13, "word": " bar", "probability": 0.9453125}, {"start": 2295.13, "end": 2295.55, "word": " values", "probability": 0.7158203125}, {"start": 2295.55, "end": 2295.79, "word": " are", "probability": 0.94873046875}, {"start": 2295.79, "end": 2296.11, "word": " given", "probability": 0.896484375}], "temperature": 1.0}, {"id": 85, "seek": 232606, "start": 2297.5, "end": 2326.06, "text": " Then just use the formula you have, we'll get R to be at seven. So in this case, if we just go back to the slide we have here. As we mentioned, father's height is the explanatory variable. Son's height is the response variable.", "tokens": [1396, 445, 764, 264, 8513, 291, 362, 11, 321, 603, 483, 497, 281, 312, 412, 3407, 13, 407, 294, 341, 1389, 11, 498, 321, 445, 352, 646, 281, 264, 4137, 321, 362, 510, 13, 1018, 321, 2835, 11, 3086, 311, 6681, 307, 264, 9045, 4745, 7006, 13, 5185, 311, 6681, 307, 264, 4134, 7006, 13], "avg_logprob": -0.18457031941839627, "compression_ratio": 1.4805194805194806, "no_speech_prob": 0.0, "words": [{"start": 2297.5, "end": 2297.8, "word": " Then", "probability": 0.419189453125}, {"start": 2297.8, "end": 2298.1, "word": " just", "probability": 0.69482421875}, {"start": 2298.1, "end": 2298.32, "word": " use", "probability": 0.86181640625}, {"start": 2298.32, "end": 2298.5, "word": " the", "probability": 0.9130859375}, {"start": 2298.5, "end": 2298.78, "word": " formula", "probability": 0.95703125}, {"start": 2298.78, "end": 2299.0, "word": " you", "probability": 0.7724609375}, {"start": 2299.0, "end": 2299.28, "word": " have,", "probability": 0.91162109375}, {"start": 2299.38, "end": 2299.58, "word": " we'll", "probability": 0.70703125}, {"start": 2299.58, "end": 2299.82, "word": " get", "probability": 0.92919921875}, {"start": 2299.82, "end": 2300.14, "word": " R", "probability": 0.66455078125}, {"start": 2300.14, "end": 2300.3, "word": " to", "probability": 0.93115234375}, {"start": 2300.3, "end": 2300.52, "word": " be", "probability": 0.8642578125}, {"start": 2300.52, "end": 2300.82, "word": " at", "probability": 0.63525390625}, {"start": 2300.82, "end": 2301.08, "word": " seven.", "probability": 0.54638671875}, {"start": 2301.74, "end": 2302.04, "word": " So", "probability": 0.96142578125}, {"start": 2302.04, "end": 2302.2, "word": " in", "probability": 0.8203125}, {"start": 2302.2, "end": 2302.42, "word": " this", "probability": 0.94873046875}, {"start": 2302.42, "end": 2302.88, "word": " case,", "probability": 0.91259765625}, {"start": 2303.64, "end": 2303.78, "word": " if", "probability": 0.9248046875}, {"start": 2303.78, "end": 2303.98, "word": " we", "probability": 0.9306640625}, {"start": 2303.98, "end": 2304.24, "word": " just", "probability": 0.91455078125}, {"start": 2304.24, "end": 2304.44, "word": " go", "probability": 0.95361328125}, {"start": 2304.44, "end": 2304.82, "word": " back", "probability": 0.87109375}, {"start": 2304.82, "end": 2311.54, "word": " to", "probability": 0.51708984375}, {"start": 2311.54, "end": 2311.78, "word": " the", "probability": 0.9140625}, {"start": 2311.78, "end": 2312.32, "word": " slide", "probability": 0.9599609375}, {"start": 2312.32, "end": 2312.52, "word": " we", "probability": 0.94677734375}, {"start": 2312.52, "end": 2312.88, "word": " have", "probability": 0.9306640625}, {"start": 2312.88, "end": 2313.4, "word": " here.", "probability": 0.6708984375}, {"start": 2316.44, "end": 2317.08, "word": " As", "probability": 0.96923828125}, {"start": 2317.08, "end": 2317.22, "word": " we", "probability": 0.94921875}, {"start": 2317.22, "end": 2317.68, "word": " mentioned,", "probability": 0.83349609375}, {"start": 2319.62, "end": 2320.14, "word": " father's", "probability": 0.683349609375}, {"start": 2320.14, "end": 2320.52, "word": " height", "probability": 0.9677734375}, {"start": 2320.52, "end": 2321.04, "word": " is", "probability": 0.939453125}, {"start": 2321.04, "end": 2321.38, "word": " the", "probability": 0.92626953125}, {"start": 2321.38, "end": 2322.18, "word": " explanatory", "probability": 0.96923828125}, {"start": 2322.18, "end": 2322.64, "word": " variable.", "probability": 0.91845703125}, {"start": 2323.66, "end": 2324.06, "word": " Son's", "probability": 0.968017578125}, {"start": 2324.06, "end": 2324.38, "word": " height", "probability": 0.9560546875}, {"start": 2324.38, "end": 2324.82, "word": " is", "probability": 0.9384765625}, {"start": 2324.82, "end": 2325.08, "word": " the", "probability": 0.90185546875}, {"start": 2325.08, "end": 2325.64, "word": " response", "probability": 0.9658203125}, {"start": 2325.64, "end": 2326.06, "word": " variable.", "probability": 0.8955078125}], "temperature": 1.0}, {"id": 86, "seek": 234919, "start": 2329.19, "end": 2349.19, "text": " And that simple calculation gives summation of xi, summation of yi, summation x squared, y squared, and some xy. And finally, we'll get that result, 87%. Now, the sign is positive. That means there exists positive.", "tokens": [400, 300, 2199, 17108, 2709, 28811, 295, 36800, 11, 28811, 295, 288, 72, 11, 28811, 2031, 8889, 11, 288, 8889, 11, 293, 512, 2031, 88, 13, 400, 2721, 11, 321, 603, 483, 300, 1874, 11, 27990, 6856, 823, 11, 264, 1465, 307, 3353, 13, 663, 1355, 456, 8198, 3353, 13], "avg_logprob": -0.2130821055057002, "compression_ratio": 1.5034965034965035, "no_speech_prob": 0.0, "words": [{"start": 2329.19, "end": 2329.67, "word": " And", "probability": 0.693359375}, {"start": 2329.67, "end": 2330.13, "word": " that", "probability": 0.71240234375}, {"start": 2330.13, "end": 2330.41, "word": " simple", "probability": 0.8564453125}, {"start": 2330.41, "end": 2330.89, "word": " calculation", "probability": 0.9248046875}, {"start": 2330.89, "end": 2331.41, "word": " gives", "probability": 0.58154296875}, {"start": 2331.41, "end": 2332.31, "word": " summation", "probability": 0.72119140625}, {"start": 2332.31, "end": 2332.57, "word": " of", "probability": 0.962890625}, {"start": 2332.57, "end": 2332.81, "word": " xi,", "probability": 0.399169921875}, {"start": 2334.05, "end": 2334.87, "word": " summation", "probability": 0.86669921875}, {"start": 2334.87, "end": 2335.11, "word": " of", "probability": 0.97119140625}, {"start": 2335.11, "end": 2335.63, "word": " yi,", "probability": 0.961181640625}, {"start": 2335.95, "end": 2336.25, "word": " summation", "probability": 0.86669921875}, {"start": 2336.25, "end": 2336.61, "word": " x", "probability": 0.8095703125}, {"start": 2336.61, "end": 2337.11, "word": " squared,", "probability": 0.4990234375}, {"start": 2337.25, "end": 2337.43, "word": " y", "probability": 0.99267578125}, {"start": 2337.43, "end": 2337.81, "word": " squared,", "probability": 0.87646484375}, {"start": 2337.97, "end": 2338.03, "word": " and", "probability": 0.87353515625}, {"start": 2338.03, "end": 2338.19, "word": " some", "probability": 0.76904296875}, {"start": 2338.19, "end": 2338.71, "word": " xy.", "probability": 0.965087890625}, {"start": 2339.65, "end": 2339.97, "word": " And", "probability": 0.92724609375}, {"start": 2339.97, "end": 2340.47, "word": " finally,", "probability": 0.83447265625}, {"start": 2341.13, "end": 2341.63, "word": " we'll", "probability": 0.740478515625}, {"start": 2341.63, "end": 2341.97, "word": " get", "probability": 0.94580078125}, {"start": 2341.97, "end": 2342.31, "word": " that", "probability": 0.88525390625}, {"start": 2342.31, "end": 2342.69, "word": " result,", "probability": 0.9501953125}, {"start": 2342.85, "end": 2343.73, "word": " 87%.", "probability": 0.843994140625}, {"start": 2343.73, "end": 2344.87, "word": " Now,", "probability": 0.962890625}, {"start": 2345.01, "end": 2345.13, "word": " the", "probability": 0.65966796875}, {"start": 2345.13, "end": 2345.33, "word": " sign", "probability": 0.90576171875}, {"start": 2345.33, "end": 2345.49, "word": " is", "probability": 0.9375}, {"start": 2345.49, "end": 2345.83, "word": " positive.", "probability": 0.9267578125}, {"start": 2346.95, "end": 2347.31, "word": " That", "probability": 0.89111328125}, {"start": 2347.31, "end": 2347.63, "word": " means", "probability": 0.9345703125}, {"start": 2347.63, "end": 2347.85, "word": " there", "probability": 0.81982421875}, {"start": 2347.85, "end": 2348.25, "word": " exists", "probability": 0.86181640625}, {"start": 2348.25, "end": 2349.19, "word": " positive.", "probability": 0.78125}], "temperature": 1.0}, {"id": 87, "seek": 237746, "start": 2350.26, "end": 2377.46, "text": " And 0.87 is close to 1. That means there exists strong positive relationship between father's and son's height. I think the calculation is straightforward. Now, for this example, the data are given in inches. I mean father's and son's height in inch.", "tokens": [400, 1958, 13, 23853, 307, 1998, 281, 502, 13, 663, 1355, 456, 8198, 2068, 3353, 2480, 1296, 3086, 311, 293, 1872, 311, 6681, 13, 286, 519, 264, 17108, 307, 15325, 13, 823, 11, 337, 341, 1365, 11, 264, 1412, 366, 2212, 294, 8478, 13, 286, 914, 3086, 311, 293, 1872, 311, 6681, 294, 7227, 13], "avg_logprob": -0.21330914593168668, "compression_ratio": 1.5212121212121212, "no_speech_prob": 0.0, "words": [{"start": 2350.26, "end": 2350.68, "word": " And", "probability": 0.537109375}, {"start": 2350.68, "end": 2351.02, "word": " 0", "probability": 0.69189453125}, {"start": 2351.02, "end": 2351.54, "word": ".87", "probability": 0.995361328125}, {"start": 2351.54, "end": 2352.34, "word": " is", "probability": 0.91455078125}, {"start": 2352.34, "end": 2352.96, "word": " close", "probability": 0.603515625}, {"start": 2352.96, "end": 2353.28, "word": " to", "probability": 0.94921875}, {"start": 2353.28, "end": 2353.5, "word": " 1.", "probability": 0.66162109375}, {"start": 2353.84, "end": 2353.96, "word": " That", "probability": 0.8447265625}, {"start": 2353.96, "end": 2354.26, "word": " means", "probability": 0.92529296875}, {"start": 2354.26, "end": 2354.52, "word": " there", "probability": 0.76904296875}, {"start": 2354.52, "end": 2354.96, "word": " exists", "probability": 0.734375}, {"start": 2354.96, "end": 2355.46, "word": " strong", "probability": 0.63525390625}, {"start": 2355.46, "end": 2356.7, "word": " positive", "probability": 0.89208984375}, {"start": 2356.7, "end": 2357.32, "word": " relationship", "probability": 0.90087890625}, {"start": 2357.32, "end": 2357.9, "word": " between", "probability": 0.87939453125}, {"start": 2357.9, "end": 2359.1, "word": " father's", "probability": 0.5313720703125}, {"start": 2359.1, "end": 2359.42, "word": " and", "probability": 0.9140625}, {"start": 2359.42, "end": 2359.88, "word": " son's", "probability": 0.917724609375}, {"start": 2359.88, "end": 2360.12, "word": " height.", "probability": 0.93603515625}, {"start": 2361.56, "end": 2362.0, "word": " I", "probability": 0.98486328125}, {"start": 2362.0, "end": 2362.24, "word": " think", "probability": 0.91357421875}, {"start": 2362.24, "end": 2362.48, "word": " the", "probability": 0.884765625}, {"start": 2362.48, "end": 2362.96, "word": " calculation", "probability": 0.55517578125}, {"start": 2362.96, "end": 2363.62, "word": " is", "probability": 0.93408203125}, {"start": 2363.62, "end": 2365.06, "word": " straightforward.", "probability": 0.55126953125}, {"start": 2367.28, "end": 2367.8, "word": " Now,", "probability": 0.900390625}, {"start": 2368.28, "end": 2368.72, "word": " for", "probability": 0.94580078125}, {"start": 2368.72, "end": 2368.96, "word": " this", "probability": 0.94677734375}, {"start": 2368.96, "end": 2369.4, "word": " example,", "probability": 0.9755859375}, {"start": 2371.12, "end": 2371.44, "word": " the", "probability": 0.884765625}, {"start": 2371.44, "end": 2371.86, "word": " data", "probability": 0.91845703125}, {"start": 2371.86, "end": 2372.6, "word": " are", "probability": 0.76513671875}, {"start": 2372.6, "end": 2372.96, "word": " given", "probability": 0.88623046875}, {"start": 2372.96, "end": 2373.28, "word": " in", "probability": 0.9453125}, {"start": 2373.28, "end": 2373.72, "word": " inches.", "probability": 0.8671875}, {"start": 2374.12, "end": 2374.28, "word": " I", "probability": 0.9931640625}, {"start": 2374.28, "end": 2374.54, "word": " mean", "probability": 0.9658203125}, {"start": 2374.54, "end": 2375.36, "word": " father's", "probability": 0.671630859375}, {"start": 2375.36, "end": 2375.56, "word": " and", "probability": 0.93408203125}, {"start": 2375.56, "end": 2375.92, "word": " son's", "probability": 0.956298828125}, {"start": 2375.92, "end": 2376.34, "word": " height", "probability": 0.958984375}, {"start": 2376.34, "end": 2377.18, "word": " in", "probability": 0.8525390625}, {"start": 2377.18, "end": 2377.46, "word": " inch.", "probability": 0.55322265625}], "temperature": 1.0}, {"id": 88, "seek": 240196, "start": 2378.73, "end": 2401.97, "text": " Suppose we want to convert from inch to centimeter, so we have to multiply by 2. Do you think in this case, R will change? So if we add or multiply or divide, R will not change? I mean, if we have X values,", "tokens": [21360, 321, 528, 281, 7620, 490, 7227, 281, 31914, 11, 370, 321, 362, 281, 12972, 538, 568, 13, 1144, 291, 519, 294, 341, 1389, 11, 497, 486, 1319, 30, 407, 498, 321, 909, 420, 12972, 420, 9845, 11, 497, 486, 406, 1319, 30, 286, 914, 11, 498, 321, 362, 1783, 4190, 11], "avg_logprob": -0.23113207997016186, "compression_ratio": 1.4081632653061225, "no_speech_prob": 0.0, "words": [{"start": 2378.73, "end": 2379.15, "word": " Suppose", "probability": 0.546875}, {"start": 2379.15, "end": 2379.33, "word": " we", "probability": 0.8134765625}, {"start": 2379.33, "end": 2379.53, "word": " want", "probability": 0.880859375}, {"start": 2379.53, "end": 2379.67, "word": " to", "probability": 0.96044921875}, {"start": 2379.67, "end": 2380.11, "word": " convert", "probability": 0.88720703125}, {"start": 2380.11, "end": 2380.57, "word": " from", "probability": 0.86572265625}, {"start": 2380.57, "end": 2380.89, "word": " inch", "probability": 0.68994140625}, {"start": 2380.89, "end": 2381.05, "word": " to", "probability": 0.966796875}, {"start": 2381.05, "end": 2381.49, "word": " centimeter,", "probability": 0.432861328125}, {"start": 2381.69, "end": 2381.85, "word": " so", "probability": 0.84521484375}, {"start": 2381.85, "end": 2382.07, "word": " we", "probability": 0.89599609375}, {"start": 2382.07, "end": 2382.21, "word": " have", "probability": 0.916015625}, {"start": 2382.21, "end": 2382.33, "word": " to", "probability": 0.97119140625}, {"start": 2382.33, "end": 2382.71, "word": " multiply", "probability": 0.8984375}, {"start": 2382.71, "end": 2383.01, "word": " by", "probability": 0.8955078125}, {"start": 2383.01, "end": 2383.25, "word": " 2.", "probability": 0.564453125}, {"start": 2383.89, "end": 2384.55, "word": " Do", "probability": 0.91015625}, {"start": 2384.55, "end": 2384.75, "word": " you", "probability": 0.95849609375}, {"start": 2384.75, "end": 2384.99, "word": " think", "probability": 0.9140625}, {"start": 2384.99, "end": 2385.13, "word": " in", "probability": 0.86083984375}, {"start": 2385.13, "end": 2385.37, "word": " this", "probability": 0.9482421875}, {"start": 2385.37, "end": 2385.95, "word": " case,", "probability": 0.91162109375}, {"start": 2386.43, "end": 2386.67, "word": " R", "probability": 0.63037109375}, {"start": 2386.67, "end": 2387.65, "word": " will", "probability": 0.83544921875}, {"start": 2387.65, "end": 2388.13, "word": " change?", "probability": 0.849609375}, {"start": 2389.99, "end": 2390.65, "word": " So", "probability": 0.65966796875}, {"start": 2390.65, "end": 2390.87, "word": " if", "probability": 0.76416015625}, {"start": 2390.87, "end": 2391.05, "word": " we", "probability": 0.95263671875}, {"start": 2391.05, "end": 2391.53, "word": " add", "probability": 0.8779296875}, {"start": 2391.53, "end": 2392.05, "word": " or", "probability": 0.63232421875}, {"start": 2392.05, "end": 2392.55, "word": " multiply", "probability": 0.87841796875}, {"start": 2392.55, "end": 2393.81, "word": " or", "probability": 0.87939453125}, {"start": 2393.81, "end": 2394.27, "word": " divide,", "probability": 0.9365234375}, {"start": 2395.75, "end": 2396.09, "word": " R", "probability": 0.98828125}, {"start": 2396.09, "end": 2396.29, "word": " will", "probability": 0.888671875}, {"start": 2396.29, "end": 2396.49, "word": " not", "probability": 0.9462890625}, {"start": 2396.49, "end": 2396.95, "word": " change?", "probability": 0.89892578125}, {"start": 2398.05, "end": 2398.71, "word": " I", "probability": 0.74951171875}, {"start": 2398.71, "end": 2398.93, "word": " mean,", "probability": 0.96875}, {"start": 2399.65, "end": 2399.91, "word": " if", "probability": 0.9462890625}, {"start": 2399.91, "end": 2400.07, "word": " we", "probability": 0.953125}, {"start": 2400.07, "end": 2400.35, "word": " have", "probability": 0.9365234375}, {"start": 2400.35, "end": 2401.49, "word": " X", "probability": 0.50439453125}, {"start": 2401.49, "end": 2401.97, "word": " values,", "probability": 0.8466796875}], "temperature": 1.0}, {"id": 89, "seek": 243308, "start": 2404.06, "end": 2433.08, "text": " And we divide or multiply X, I mean each value of X, by a number, by a fixed value. For example, suppose here we multiplied each value by 2.5 for X. Also multiply Y by the same value, 2.5. Y will be the same. In addition to that, if we multiply X by 2.5, for example, and Y by 5, also R will not change.", "tokens": [400, 321, 9845, 420, 12972, 1783, 11, 286, 914, 1184, 2158, 295, 1783, 11, 538, 257, 1230, 11, 538, 257, 6806, 2158, 13, 1171, 1365, 11, 7297, 510, 321, 17207, 1184, 2158, 538, 568, 13, 20, 337, 1783, 13, 2743, 12972, 398, 538, 264, 912, 2158, 11, 568, 13, 20, 13, 398, 486, 312, 264, 912, 13, 682, 4500, 281, 300, 11, 498, 321, 12972, 1783, 538, 568, 13, 20, 11, 337, 1365, 11, 293, 398, 538, 1025, 11, 611, 497, 486, 406, 1319, 13], "avg_logprob": -0.14362282271302024, "compression_ratio": 1.625668449197861, "no_speech_prob": 0.0, "words": [{"start": 2404.06, "end": 2404.38, "word": " And", "probability": 0.5224609375}, {"start": 2404.38, "end": 2404.56, "word": " we", "probability": 0.853515625}, {"start": 2404.56, "end": 2404.94, "word": " divide", "probability": 0.853515625}, {"start": 2404.94, "end": 2405.24, "word": " or", "probability": 0.8330078125}, {"start": 2405.24, "end": 2405.66, "word": " multiply", "probability": 0.904296875}, {"start": 2405.66, "end": 2406.12, "word": " X,", "probability": 0.33935546875}, {"start": 2406.74, "end": 2406.88, "word": " I", "probability": 0.8876953125}, {"start": 2406.88, "end": 2407.0, "word": " mean", "probability": 0.97021484375}, {"start": 2407.0, "end": 2407.32, "word": " each", "probability": 0.82275390625}, {"start": 2407.32, "end": 2407.56, "word": " value", "probability": 0.97412109375}, {"start": 2407.56, "end": 2407.7, "word": " of", "probability": 0.8466796875}, {"start": 2407.7, "end": 2407.96, "word": " X,", "probability": 0.96435546875}, {"start": 2408.26, "end": 2408.62, "word": " by", "probability": 0.96044921875}, {"start": 2408.62, "end": 2408.74, "word": " a", "probability": 0.97900390625}, {"start": 2408.74, "end": 2408.96, "word": " number,", "probability": 0.8818359375}, {"start": 2409.06, "end": 2409.16, "word": " by", "probability": 0.95751953125}, {"start": 2409.16, "end": 2409.28, "word": " a", "probability": 0.96142578125}, {"start": 2409.28, "end": 2409.46, "word": " fixed", "probability": 0.9150390625}, {"start": 2409.46, "end": 2409.84, "word": " value.", "probability": 0.96728515625}, {"start": 2410.82, "end": 2411.1, "word": " For", "probability": 0.955078125}, {"start": 2411.1, "end": 2411.34, "word": " example,", "probability": 0.97216796875}, {"start": 2411.42, "end": 2411.7, "word": " suppose", "probability": 0.83349609375}, {"start": 2411.7, "end": 2411.92, "word": " here", "probability": 0.8173828125}, {"start": 2411.92, "end": 2412.08, "word": " we", "probability": 0.8056640625}, {"start": 2412.08, "end": 2412.6, "word": " multiplied", "probability": 0.425048828125}, {"start": 2412.6, "end": 2413.08, "word": " each", "probability": 0.94873046875}, {"start": 2413.08, "end": 2413.32, "word": " value", "probability": 0.9775390625}, {"start": 2413.32, "end": 2413.48, "word": " by", "probability": 0.96826171875}, {"start": 2413.48, "end": 2413.68, "word": " 2", "probability": 0.94091796875}, {"start": 2413.68, "end": 2414.22, "word": ".5", "probability": 0.994140625}, {"start": 2414.22, "end": 2415.72, "word": " for", "probability": 0.6767578125}, {"start": 2415.72, "end": 2416.04, "word": " X.", "probability": 0.96142578125}, {"start": 2417.58, "end": 2418.06, "word": " Also", "probability": 0.92431640625}, {"start": 2418.06, "end": 2418.82, "word": " multiply", "probability": 0.69775390625}, {"start": 2418.82, "end": 2419.1, "word": " Y", "probability": 0.9560546875}, {"start": 2419.1, "end": 2419.28, "word": " by", "probability": 0.97119140625}, {"start": 2419.28, "end": 2419.46, "word": " the", "probability": 0.91455078125}, {"start": 2419.46, "end": 2419.66, "word": " same", "probability": 0.90625}, {"start": 2419.66, "end": 2420.02, "word": " value,", "probability": 0.97509765625}, {"start": 2420.36, "end": 2420.5, "word": " 2", "probability": 0.9658203125}, {"start": 2420.5, "end": 2420.98, "word": ".5.", "probability": 0.9990234375}, {"start": 2422.0, "end": 2422.2, "word": " Y", "probability": 0.986328125}, {"start": 2422.2, "end": 2422.36, "word": " will", "probability": 0.8857421875}, {"start": 2422.36, "end": 2422.48, "word": " be", "probability": 0.9521484375}, {"start": 2422.48, "end": 2422.64, "word": " the", "probability": 0.9140625}, {"start": 2422.64, "end": 2422.84, "word": " same.", "probability": 0.9091796875}, {"start": 2424.04, "end": 2424.26, "word": " In", "probability": 0.96142578125}, {"start": 2424.26, "end": 2424.52, "word": " addition", "probability": 0.95654296875}, {"start": 2424.52, "end": 2424.72, "word": " to", "probability": 0.966796875}, {"start": 2424.72, "end": 2424.94, "word": " that,", "probability": 0.93505859375}, {"start": 2425.74, "end": 2425.98, "word": " if", "probability": 0.9462890625}, {"start": 2425.98, "end": 2426.2, "word": " we", "probability": 0.94921875}, {"start": 2426.2, "end": 2426.52, "word": " multiply", "probability": 0.908203125}, {"start": 2426.52, "end": 2426.84, "word": " X", "probability": 0.98974609375}, {"start": 2426.84, "end": 2427.04, "word": " by", "probability": 0.97412109375}, {"start": 2427.04, "end": 2427.24, "word": " 2", "probability": 0.9970703125}, {"start": 2427.24, "end": 2427.78, "word": ".5,", "probability": 0.99951171875}, {"start": 2427.94, "end": 2428.08, "word": " for", "probability": 0.94580078125}, {"start": 2428.08, "end": 2428.44, "word": " example,", "probability": 0.974609375}, {"start": 2428.68, "end": 2428.92, "word": " and", "probability": 0.93896484375}, {"start": 2428.92, "end": 2429.12, "word": " Y", "probability": 0.9921875}, {"start": 2429.12, "end": 2429.4, "word": " by", "probability": 0.97265625}, {"start": 2429.4, "end": 2430.12, "word": " 5,", "probability": 0.8583984375}, {"start": 2430.74, "end": 2431.48, "word": " also", "probability": 0.828125}, {"start": 2431.48, "end": 2432.42, "word": " R", "probability": 0.87158203125}, {"start": 2432.42, "end": 2432.58, "word": " will", "probability": 0.875}, {"start": 2432.58, "end": 2432.8, "word": " not", "probability": 0.93017578125}, {"start": 2432.8, "end": 2433.08, "word": " change.", "probability": 0.912109375}], "temperature": 1.0}, {"id": 90, "seek": 246236, "start": 2434.08, "end": 2462.36, "text": " But you have to be careful. We multiply each value of x by the same number. And each value of y by the same number, that number may be different from x. So I mean multiply x by 2.5 and y by minus 1 or plus 2 or whatever you have. But if it's negative, then we'll get negative answer.", "tokens": [583, 291, 362, 281, 312, 5026, 13, 492, 12972, 1184, 2158, 295, 2031, 538, 264, 912, 1230, 13, 400, 1184, 2158, 295, 288, 538, 264, 912, 1230, 11, 300, 1230, 815, 312, 819, 490, 2031, 13, 407, 286, 914, 12972, 2031, 538, 568, 13, 20, 293, 288, 538, 3175, 502, 420, 1804, 568, 420, 2035, 291, 362, 13, 583, 498, 309, 311, 3671, 11, 550, 321, 603, 483, 3671, 1867, 13], "avg_logprob": -0.17024739045235845, "compression_ratio": 1.5865921787709498, "no_speech_prob": 0.0, "words": [{"start": 2434.08, "end": 2434.36, "word": " But", "probability": 0.479736328125}, {"start": 2434.36, "end": 2434.5, "word": " you", "probability": 0.81005859375}, {"start": 2434.5, "end": 2434.62, "word": " have", "probability": 0.9287109375}, {"start": 2434.62, "end": 2434.74, "word": " to", "probability": 0.9736328125}, {"start": 2434.74, "end": 2434.96, "word": " be", "probability": 0.9560546875}, {"start": 2434.96, "end": 2435.4, "word": " careful.", "probability": 0.96484375}, {"start": 2436.22, "end": 2436.38, "word": " We", "probability": 0.81298828125}, {"start": 2436.38, "end": 2436.86, "word": " multiply", "probability": 0.91455078125}, {"start": 2436.86, "end": 2437.62, "word": " each", "probability": 0.95458984375}, {"start": 2437.62, "end": 2438.0, "word": " value", "probability": 0.97412109375}, {"start": 2438.0, "end": 2438.22, "word": " of", "probability": 0.93408203125}, {"start": 2438.22, "end": 2438.48, "word": " x", "probability": 0.427490234375}, {"start": 2438.48, "end": 2438.82, "word": " by", "probability": 0.96044921875}, {"start": 2438.82, "end": 2439.06, "word": " the", "probability": 0.916015625}, {"start": 2439.06, "end": 2439.4, "word": " same", "probability": 0.91015625}, {"start": 2439.4, "end": 2440.9, "word": " number.", "probability": 0.93017578125}, {"start": 2442.12, "end": 2442.72, "word": " And", "probability": 0.89892578125}, {"start": 2442.72, "end": 2443.26, "word": " each", "probability": 0.9462890625}, {"start": 2443.26, "end": 2443.52, "word": " value", "probability": 0.9794921875}, {"start": 2443.52, "end": 2443.72, "word": " of", "probability": 0.958984375}, {"start": 2443.72, "end": 2444.0, "word": " y", "probability": 0.947265625}, {"start": 2444.0, "end": 2444.98, "word": " by", "probability": 0.8916015625}, {"start": 2444.98, "end": 2445.18, "word": " the", "probability": 0.91748046875}, {"start": 2445.18, "end": 2445.38, "word": " same", "probability": 0.90283203125}, {"start": 2445.38, "end": 2445.7, "word": " number,", "probability": 0.9326171875}, {"start": 2445.82, "end": 2445.98, "word": " that", "probability": 0.93408203125}, {"start": 2445.98, "end": 2446.26, "word": " number", "probability": 0.94140625}, {"start": 2446.26, "end": 2446.48, "word": " may", "probability": 0.7890625}, {"start": 2446.48, "end": 2446.58, "word": " be", "probability": 0.9560546875}, {"start": 2446.58, "end": 2446.96, "word": " different", "probability": 0.900390625}, {"start": 2446.96, "end": 2447.4, "word": " from", "probability": 0.88037109375}, {"start": 2447.4, "end": 2447.84, "word": " x.", "probability": 0.9912109375}, {"start": 2449.04, "end": 2449.32, "word": " So", "probability": 0.95458984375}, {"start": 2449.32, "end": 2449.44, "word": " I", "probability": 0.748046875}, {"start": 2449.44, "end": 2449.64, "word": " mean", "probability": 0.958984375}, {"start": 2449.64, "end": 2450.34, "word": " multiply", "probability": 0.75244140625}, {"start": 2450.34, "end": 2450.66, "word": " x", "probability": 0.99267578125}, {"start": 2450.66, "end": 2450.9, "word": " by", "probability": 0.9697265625}, {"start": 2450.9, "end": 2451.32, "word": " 2", "probability": 0.916015625}, {"start": 2451.32, "end": 2451.94, "word": ".5", "probability": 0.991943359375}, {"start": 2451.94, "end": 2452.24, "word": " and", "probability": 0.81982421875}, {"start": 2452.24, "end": 2452.56, "word": " y", "probability": 0.9912109375}, {"start": 2452.56, "end": 2453.1, "word": " by", "probability": 0.9716796875}, {"start": 2453.1, "end": 2453.5, "word": " minus", "probability": 0.74951171875}, {"start": 2453.5, "end": 2453.84, "word": " 1", "probability": 0.63916015625}, {"start": 2453.84, "end": 2455.6, "word": " or", "probability": 0.313720703125}, {"start": 2455.6, "end": 2456.0, "word": " plus", "probability": 0.95361328125}, {"start": 2456.0, "end": 2456.28, "word": " 2", "probability": 0.931640625}, {"start": 2456.28, "end": 2456.54, "word": " or", "probability": 0.71875}, {"start": 2456.54, "end": 2456.82, "word": " whatever", "probability": 0.93603515625}, {"start": 2456.82, "end": 2457.06, "word": " you", "probability": 0.96240234375}, {"start": 2457.06, "end": 2457.3, "word": " have.", "probability": 0.94482421875}, {"start": 2459.2, "end": 2459.76, "word": " But", "probability": 0.78662109375}, {"start": 2459.76, "end": 2459.94, "word": " if", "probability": 0.94482421875}, {"start": 2459.94, "end": 2460.16, "word": " it's", "probability": 0.939697265625}, {"start": 2460.16, "end": 2460.58, "word": " negative,", "probability": 0.95556640625}, {"start": 2460.78, "end": 2461.0, "word": " then", "probability": 0.78076171875}, {"start": 2461.0, "end": 2461.3, "word": " we'll", "probability": 0.673583984375}, {"start": 2461.3, "end": 2461.64, "word": " get", "probability": 0.58544921875}, {"start": 2461.64, "end": 2462.02, "word": " negative", "probability": 0.587890625}, {"start": 2462.02, "end": 2462.36, "word": " answer.", "probability": 0.86328125}], "temperature": 1.0}, {"id": 91, "seek": 249148, "start": 2464.76, "end": 2491.48, "text": " I mean if Y is positive, for example, and we multiply each value Y by minus one, that will give negative sign. But here I meant if we multiply this value by positive sign, plus two, plus three, and let's see how can we do that by Excel. Now this is the data we have. I just make copy.", "tokens": [286, 914, 498, 398, 307, 3353, 11, 337, 1365, 11, 293, 321, 12972, 1184, 2158, 398, 538, 3175, 472, 11, 300, 486, 976, 3671, 1465, 13, 583, 510, 286, 4140, 498, 321, 12972, 341, 2158, 538, 3353, 1465, 11, 1804, 732, 11, 1804, 1045, 11, 293, 718, 311, 536, 577, 393, 321, 360, 300, 538, 19060, 13, 823, 341, 307, 264, 1412, 321, 362, 13, 286, 445, 652, 5055, 13], "avg_logprob": -0.1892605612815266, "compression_ratio": 1.5405405405405406, "no_speech_prob": 0.0, "words": [{"start": 2464.76, "end": 2464.98, "word": " I", "probability": 0.76611328125}, {"start": 2464.98, "end": 2465.14, "word": " mean", "probability": 0.94873046875}, {"start": 2465.14, "end": 2465.32, "word": " if", "probability": 0.70068359375}, {"start": 2465.32, "end": 2465.48, "word": " Y", "probability": 0.501953125}, {"start": 2465.48, "end": 2465.64, "word": " is", "probability": 0.92041015625}, {"start": 2465.64, "end": 2466.06, "word": " positive,", "probability": 0.92626953125}, {"start": 2466.28, "end": 2466.4, "word": " for", "probability": 0.91650390625}, {"start": 2466.4, "end": 2466.76, "word": " example,", "probability": 0.97021484375}, {"start": 2466.92, "end": 2467.06, "word": " and", "probability": 0.91064453125}, {"start": 2467.06, "end": 2467.2, "word": " we", "probability": 0.8623046875}, {"start": 2467.2, "end": 2467.6, "word": " multiply", "probability": 0.9140625}, {"start": 2467.6, "end": 2467.88, "word": " each", "probability": 0.8857421875}, {"start": 2467.88, "end": 2468.06, "word": " value", "probability": 0.9248046875}, {"start": 2468.06, "end": 2468.36, "word": " Y", "probability": 0.76123046875}, {"start": 2468.36, "end": 2468.72, "word": " by", "probability": 0.96142578125}, {"start": 2468.72, "end": 2469.08, "word": " minus", "probability": 0.73974609375}, {"start": 2469.08, "end": 2469.42, "word": " one,", "probability": 0.64794921875}, {"start": 2469.66, "end": 2469.98, "word": " that", "probability": 0.9228515625}, {"start": 2469.98, "end": 2470.16, "word": " will", "probability": 0.85107421875}, {"start": 2470.16, "end": 2470.36, "word": " give", "probability": 0.7197265625}, {"start": 2470.36, "end": 2470.68, "word": " negative", "probability": 0.912109375}, {"start": 2470.68, "end": 2472.04, "word": " sign.", "probability": 0.463134765625}, {"start": 2472.7, "end": 2473.0, "word": " But", "probability": 0.8642578125}, {"start": 2473.0, "end": 2473.34, "word": " here", "probability": 0.79541015625}, {"start": 2473.34, "end": 2473.5, "word": " I", "probability": 0.73974609375}, {"start": 2473.5, "end": 2474.68, "word": " meant", "probability": 0.7314453125}, {"start": 2474.68, "end": 2475.04, "word": " if", "probability": 0.7626953125}, {"start": 2475.04, "end": 2475.22, "word": " we", "probability": 0.81689453125}, {"start": 2475.22, "end": 2475.72, "word": " multiply", "probability": 0.916015625}, {"start": 2475.72, "end": 2476.12, "word": " this", "probability": 0.9404296875}, {"start": 2476.12, "end": 2476.56, "word": " value", "probability": 0.9736328125}, {"start": 2476.56, "end": 2477.28, "word": " by", "probability": 0.96630859375}, {"start": 2477.28, "end": 2477.64, "word": " positive", "probability": 0.89892578125}, {"start": 2477.64, "end": 2478.04, "word": " sign,", "probability": 0.92724609375}, {"start": 2478.4, "end": 2478.58, "word": " plus", "probability": 0.94189453125}, {"start": 2478.58, "end": 2478.86, "word": " two,", "probability": 0.86279296875}, {"start": 2479.1, "end": 2479.42, "word": " plus", "probability": 0.96337890625}, {"start": 2479.42, "end": 2479.82, "word": " three,", "probability": 0.94482421875}, {"start": 2480.56, "end": 2480.7, "word": " and", "probability": 0.86474609375}, {"start": 2480.7, "end": 2480.9, "word": " let's", "probability": 0.637451171875}, {"start": 2480.9, "end": 2481.02, "word": " see", "probability": 0.9150390625}, {"start": 2481.02, "end": 2481.14, "word": " how", "probability": 0.91357421875}, {"start": 2481.14, "end": 2481.32, "word": " can", "probability": 0.7236328125}, {"start": 2481.32, "end": 2481.46, "word": " we", "probability": 0.9560546875}, {"start": 2481.46, "end": 2481.6, "word": " do", "probability": 0.95751953125}, {"start": 2481.6, "end": 2481.9, "word": " that", "probability": 0.93359375}, {"start": 2481.9, "end": 2482.2, "word": " by", "probability": 0.96337890625}, {"start": 2482.2, "end": 2482.54, "word": " Excel.", "probability": 0.8544921875}, {"start": 2486.32, "end": 2486.88, "word": " Now", "probability": 0.9345703125}, {"start": 2486.88, "end": 2487.16, "word": " this", "probability": 0.71240234375}, {"start": 2487.16, "end": 2487.28, "word": " is", "probability": 0.705078125}, {"start": 2487.28, "end": 2487.34, "word": " the", "probability": 0.9189453125}, {"start": 2487.34, "end": 2487.54, "word": " data", "probability": 0.9443359375}, {"start": 2487.54, "end": 2487.74, "word": " we", "probability": 0.9384765625}, {"start": 2487.74, "end": 2488.0, "word": " have.", "probability": 0.9453125}, {"start": 2490.12, "end": 2490.68, "word": " I", "probability": 0.9765625}, {"start": 2490.68, "end": 2490.9, "word": " just", "probability": 0.75}, {"start": 2490.9, "end": 2491.14, "word": " make", "probability": 0.9111328125}, {"start": 2491.14, "end": 2491.48, "word": " copy.", "probability": 0.837890625}], "temperature": 1.0}, {"id": 92, "seek": 252687, "start": 2497.73, "end": 2526.87, "text": " I will multiply each value X by 2.5. And I will do the same for Y value. I will replace this data by the new one. For sure the calculations will, the computations here will change, but R will stay the same.", "tokens": [286, 486, 12972, 1184, 2158, 1783, 538, 568, 13, 20, 13, 400, 286, 486, 360, 264, 912, 337, 398, 2158, 13, 286, 486, 7406, 341, 1412, 538, 264, 777, 472, 13, 1171, 988, 264, 20448, 486, 11, 264, 2807, 763, 510, 486, 1319, 11, 457, 497, 486, 1754, 264, 912, 13], "avg_logprob": -0.20192307950212404, "compression_ratio": 1.4475524475524475, "no_speech_prob": 0.0, "words": [{"start": 2497.73, "end": 2497.97, "word": " I", "probability": 0.84521484375}, {"start": 2497.97, "end": 2498.11, "word": " will", "probability": 0.8603515625}, {"start": 2498.11, "end": 2498.43, "word": " multiply", "probability": 0.9306640625}, {"start": 2498.43, "end": 2498.83, "word": " each", "probability": 0.9462890625}, {"start": 2498.83, "end": 2499.13, "word": " value", "probability": 0.9560546875}, {"start": 2499.13, "end": 2499.55, "word": " X", "probability": 0.29248046875}, {"start": 2499.55, "end": 2501.43, "word": " by", "probability": 0.92578125}, {"start": 2501.43, "end": 2501.75, "word": " 2", "probability": 0.943359375}, {"start": 2501.75, "end": 2502.29, "word": ".5.", "probability": 0.99072265625}, {"start": 2504.21, "end": 2504.89, "word": " And", "probability": 0.85400390625}, {"start": 2504.89, "end": 2504.97, "word": " I", "probability": 0.9365234375}, {"start": 2504.97, "end": 2505.09, "word": " will", "probability": 0.8798828125}, {"start": 2505.09, "end": 2505.19, "word": " do", "probability": 0.95703125}, {"start": 2505.19, "end": 2505.41, "word": " the", "probability": 0.9130859375}, {"start": 2505.41, "end": 2505.69, "word": " same", "probability": 0.90478515625}, {"start": 2505.69, "end": 2506.19, "word": " for", "probability": 0.92724609375}, {"start": 2506.19, "end": 2509.59, "word": " Y", "probability": 0.8740234375}, {"start": 2509.59, "end": 2509.93, "word": " value.", "probability": 0.8955078125}, {"start": 2511.71, "end": 2512.39, "word": " I", "probability": 0.99169921875}, {"start": 2512.39, "end": 2512.55, "word": " will", "probability": 0.89013671875}, {"start": 2512.55, "end": 2513.15, "word": " replace", "probability": 0.9248046875}, {"start": 2513.15, "end": 2515.93, "word": " this", "probability": 0.86474609375}, {"start": 2515.93, "end": 2516.27, "word": " data", "probability": 0.94580078125}, {"start": 2516.27, "end": 2516.47, "word": " by", "probability": 0.93310546875}, {"start": 2516.47, "end": 2516.65, "word": " the", "probability": 0.8974609375}, {"start": 2516.65, "end": 2516.83, "word": " new", "probability": 0.92041015625}, {"start": 2516.83, "end": 2517.19, "word": " one.", "probability": 0.92919921875}, {"start": 2518.07, "end": 2518.29, "word": " For", "probability": 0.95556640625}, {"start": 2518.29, "end": 2518.47, "word": " sure", "probability": 0.916015625}, {"start": 2518.47, "end": 2518.65, "word": " the", "probability": 0.6015625}, {"start": 2518.65, "end": 2519.15, "word": " calculations", "probability": 0.63037109375}, {"start": 2519.15, "end": 2519.49, "word": " will,", "probability": 0.57666015625}, {"start": 2519.63, "end": 2519.81, "word": " the", "probability": 0.876953125}, {"start": 2519.81, "end": 2520.41, "word": " computations", "probability": 0.969970703125}, {"start": 2520.41, "end": 2520.79, "word": " here", "probability": 0.8251953125}, {"start": 2520.79, "end": 2521.95, "word": " will", "probability": 0.666015625}, {"start": 2521.95, "end": 2522.79, "word": " change,", "probability": 0.880859375}, {"start": 2523.49, "end": 2523.93, "word": " but", "probability": 0.9140625}, {"start": 2523.93, "end": 2524.45, "word": " R", "probability": 0.81201171875}, {"start": 2524.45, "end": 2525.67, "word": " will", "probability": 0.314453125}, {"start": 2525.67, "end": 2526.43, "word": " stay", "probability": 0.921875}, {"start": 2526.43, "end": 2526.63, "word": " the", "probability": 0.91650390625}, {"start": 2526.63, "end": 2526.87, "word": " same.", "probability": 0.9091796875}], "temperature": 1.0}, {"id": 93, "seek": 255888, "start": 2529.34, "end": 2558.88, "text": " So here we multiply each x by 2.5 and the same for y. The calculations here are different. We have different sum, different sum of x, sum of y and so on, but are the same. Let's see if we multiply just x by 2.5 and y the same.", "tokens": [407, 510, 321, 12972, 1184, 2031, 538, 568, 13, 20, 293, 264, 912, 337, 288, 13, 440, 20448, 510, 366, 819, 13, 492, 362, 819, 2408, 11, 819, 2408, 295, 2031, 11, 2408, 295, 288, 293, 370, 322, 11, 457, 366, 264, 912, 13, 961, 311, 536, 498, 321, 12972, 445, 2031, 538, 568, 13, 20, 293, 288, 264, 912, 13], "avg_logprob": -0.14667338084790013, "compression_ratio": 1.644927536231884, "no_speech_prob": 0.0, "words": [{"start": 2529.34, "end": 2529.74, "word": " So", "probability": 0.79150390625}, {"start": 2529.74, "end": 2530.18, "word": " here", "probability": 0.7470703125}, {"start": 2530.18, "end": 2530.4, "word": " we", "probability": 0.8115234375}, {"start": 2530.4, "end": 2531.0, "word": " multiply", "probability": 0.492431640625}, {"start": 2531.0, "end": 2531.44, "word": " each", "probability": 0.88330078125}, {"start": 2531.44, "end": 2531.72, "word": " x", "probability": 0.7431640625}, {"start": 2531.72, "end": 2532.12, "word": " by", "probability": 0.9697265625}, {"start": 2532.12, "end": 2532.7, "word": " 2", "probability": 0.9169921875}, {"start": 2532.7, "end": 2533.4, "word": ".5", "probability": 0.9892578125}, {"start": 2533.4, "end": 2533.6, "word": " and", "probability": 0.7236328125}, {"start": 2533.6, "end": 2533.76, "word": " the", "probability": 0.8203125}, {"start": 2533.76, "end": 2533.98, "word": " same", "probability": 0.90234375}, {"start": 2533.98, "end": 2534.26, "word": " for", "probability": 0.93798828125}, {"start": 2534.26, "end": 2534.62, "word": " y.", "probability": 0.89306640625}, {"start": 2535.54, "end": 2535.92, "word": " The", "probability": 0.8720703125}, {"start": 2535.92, "end": 2536.48, "word": " calculations", "probability": 0.88427734375}, {"start": 2536.48, "end": 2536.94, "word": " here", "probability": 0.8583984375}, {"start": 2536.94, "end": 2537.14, "word": " are", "probability": 0.93896484375}, {"start": 2537.14, "end": 2537.58, "word": " different.", "probability": 0.888671875}, {"start": 2538.76, "end": 2539.1, "word": " We", "probability": 0.95263671875}, {"start": 2539.1, "end": 2539.4, "word": " have", "probability": 0.94677734375}, {"start": 2539.4, "end": 2539.9, "word": " different", "probability": 0.88037109375}, {"start": 2539.9, "end": 2540.34, "word": " sum,", "probability": 0.77880859375}, {"start": 2540.72, "end": 2541.08, "word": " different", "probability": 0.849609375}, {"start": 2541.08, "end": 2541.5, "word": " sum", "probability": 0.93701171875}, {"start": 2541.5, "end": 2541.64, "word": " of", "probability": 0.9609375}, {"start": 2541.64, "end": 2541.88, "word": " x,", "probability": 0.9853515625}, {"start": 2542.2, "end": 2542.2, "word": " sum", "probability": 0.796875}, {"start": 2542.2, "end": 2542.34, "word": " of", "probability": 0.96630859375}, {"start": 2542.34, "end": 2542.56, "word": " y", "probability": 0.99560546875}, {"start": 2542.56, "end": 2542.76, "word": " and", "probability": 0.5771484375}, {"start": 2542.76, "end": 2542.96, "word": " so", "probability": 0.95166015625}, {"start": 2542.96, "end": 2543.26, "word": " on,", "probability": 0.94775390625}, {"start": 2543.7, "end": 2544.0, "word": " but", "probability": 0.91162109375}, {"start": 2544.0, "end": 2544.52, "word": " are", "probability": 0.63037109375}, {"start": 2544.52, "end": 2545.86, "word": " the", "probability": 0.89697265625}, {"start": 2545.86, "end": 2546.1, "word": " same.", "probability": 0.91015625}, {"start": 2547.54, "end": 2548.06, "word": " Let's", "probability": 0.959228515625}, {"start": 2548.06, "end": 2548.24, "word": " see", "probability": 0.9130859375}, {"start": 2548.24, "end": 2548.44, "word": " if", "probability": 0.95166015625}, {"start": 2548.44, "end": 2548.84, "word": " we", "probability": 0.96142578125}, {"start": 2548.84, "end": 2551.04, "word": " multiply", "probability": 0.92333984375}, {"start": 2551.04, "end": 2553.28, "word": " just", "probability": 0.87548828125}, {"start": 2553.28, "end": 2553.54, "word": " x", "probability": 0.98974609375}, {"start": 2553.54, "end": 2553.76, "word": " by", "probability": 0.97705078125}, {"start": 2553.76, "end": 2553.96, "word": " 2", "probability": 0.9951171875}, {"start": 2553.96, "end": 2554.52, "word": ".5", "probability": 0.998779296875}, {"start": 2554.52, "end": 2555.54, "word": " and", "probability": 0.81298828125}, {"start": 2555.54, "end": 2556.0, "word": " y", "probability": 0.98876953125}, {"start": 2556.0, "end": 2558.58, "word": " the", "probability": 0.70849609375}, {"start": 2558.58, "end": 2558.88, "word": " same.", "probability": 0.91162109375}], "temperature": 1.0}, {"id": 94, "seek": 258848, "start": 2561.84, "end": 2588.48, "text": " So we multiplied x by 2.5 and we keep it make sense? Now let's see how outliers will affect the value of R. Let's say if we change one point in the data set support. I just changed 64.", "tokens": [407, 321, 17207, 2031, 538, 568, 13, 20, 293, 321, 1066, 309, 652, 2020, 30, 823, 718, 311, 536, 577, 484, 23646, 486, 3345, 264, 2158, 295, 497, 13, 961, 311, 584, 498, 321, 1319, 472, 935, 294, 264, 1412, 992, 1406, 13, 286, 445, 3105, 12145, 13], "avg_logprob": -0.32318240769055423, "compression_ratio": 1.3028169014084507, "no_speech_prob": 0.0, "words": [{"start": 2561.84, "end": 2562.1, "word": " So", "probability": 0.56689453125}, {"start": 2562.1, "end": 2562.54, "word": " we", "probability": 0.55908203125}, {"start": 2562.54, "end": 2563.18, "word": " multiplied", "probability": 0.62353515625}, {"start": 2563.18, "end": 2563.56, "word": " x", "probability": 0.51806640625}, {"start": 2563.56, "end": 2563.78, "word": " by", "probability": 0.96728515625}, {"start": 2563.78, "end": 2563.98, "word": " 2", "probability": 0.91943359375}, {"start": 2563.98, "end": 2564.58, "word": ".5", "probability": 0.986083984375}, {"start": 2564.58, "end": 2567.42, "word": " and", "probability": 0.5224609375}, {"start": 2567.42, "end": 2567.6, "word": " we", "probability": 0.93798828125}, {"start": 2567.6, "end": 2567.98, "word": " keep", "probability": 0.9287109375}, {"start": 2567.98, "end": 2569.14, "word": " it", "probability": 0.164794921875}, {"start": 2569.14, "end": 2569.36, "word": " make", "probability": 0.36865234375}, {"start": 2569.36, "end": 2569.66, "word": " sense?", "probability": 0.7802734375}, {"start": 2571.32, "end": 2572.1, "word": " Now", "probability": 0.7666015625}, {"start": 2572.1, "end": 2572.44, "word": " let's", "probability": 0.8681640625}, {"start": 2572.44, "end": 2572.72, "word": " see", "probability": 0.91162109375}, {"start": 2572.72, "end": 2573.12, "word": " how", "probability": 0.921875}, {"start": 2573.12, "end": 2573.84, "word": " outliers", "probability": 0.836669921875}, {"start": 2573.84, "end": 2575.62, "word": " will", "probability": 0.873046875}, {"start": 2575.62, "end": 2576.22, "word": " affect", "probability": 0.7490234375}, {"start": 2576.22, "end": 2577.84, "word": " the", "probability": 0.90234375}, {"start": 2577.84, "end": 2578.04, "word": " value", "probability": 0.97412109375}, {"start": 2578.04, "end": 2578.22, "word": " of", "probability": 0.96484375}, {"start": 2578.22, "end": 2578.48, "word": " R.", "probability": 0.5546875}, {"start": 2580.44, "end": 2580.84, "word": " Let's", "probability": 0.927001953125}, {"start": 2580.84, "end": 2581.0, "word": " say", "probability": 0.5751953125}, {"start": 2581.0, "end": 2581.24, "word": " if", "probability": 0.86376953125}, {"start": 2581.24, "end": 2581.42, "word": " we", "probability": 0.94921875}, {"start": 2581.42, "end": 2581.84, "word": " change", "probability": 0.88525390625}, {"start": 2581.84, "end": 2582.08, "word": " one", "probability": 0.880859375}, {"start": 2582.08, "end": 2582.46, "word": " point", "probability": 0.97900390625}, {"start": 2582.46, "end": 2583.26, "word": " in", "probability": 0.8603515625}, {"start": 2583.26, "end": 2583.36, "word": " the", "probability": 0.912109375}, {"start": 2583.36, "end": 2583.54, "word": " data", "probability": 0.57373046875}, {"start": 2583.54, "end": 2583.78, "word": " set", "probability": 0.82470703125}, {"start": 2583.78, "end": 2584.12, "word": " support.", "probability": 0.276123046875}, {"start": 2585.22, "end": 2585.88, "word": " I", "probability": 0.85986328125}, {"start": 2585.88, "end": 2586.42, "word": " just", "probability": 0.81787109375}, {"start": 2586.42, "end": 2587.6, "word": " changed", "probability": 0.56689453125}, {"start": 2587.6, "end": 2588.48, "word": " 64.", "probability": 0.76123046875}], "temperature": 1.0}, {"id": 95, "seek": 262211, "start": 2593.75, "end": 2622.11, "text": " for example if just by typo and just enter 6 so it was 87 it becomes 0.7 so there is a big difference between 0.87 and 0.7 and just we change one value now suppose the other one is zero", "tokens": [337, 1365, 498, 445, 538, 2125, 78, 293, 445, 3242, 1386, 370, 309, 390, 27990, 309, 3643, 1958, 13, 22, 370, 456, 307, 257, 955, 2649, 1296, 1958, 13, 23853, 293, 1958, 13, 22, 293, 445, 321, 1319, 472, 2158, 586, 7297, 264, 661, 472, 307, 4018], "avg_logprob": -0.2389322960128387, "compression_ratio": 1.4090909090909092, "no_speech_prob": 0.0, "words": [{"start": 2593.75, "end": 2594.25, "word": " for", "probability": 0.239501953125}, {"start": 2594.25, "end": 2594.71, "word": " example", "probability": 0.96875}, {"start": 2594.71, "end": 2595.15, "word": " if", "probability": 0.6123046875}, {"start": 2595.15, "end": 2595.77, "word": " just", "probability": 0.59326171875}, {"start": 2595.77, "end": 2596.29, "word": " by", "probability": 0.85791015625}, {"start": 2596.29, "end": 2597.27, "word": " typo", "probability": 0.634033203125}, {"start": 2597.27, "end": 2600.19, "word": " and", "probability": 0.7470703125}, {"start": 2600.19, "end": 2600.81, "word": " just", "probability": 0.72119140625}, {"start": 2600.81, "end": 2601.67, "word": " enter", "probability": 0.73046875}, {"start": 2601.67, "end": 2602.13, "word": " 6", "probability": 0.53369140625}, {"start": 2602.13, "end": 2604.03, "word": " so", "probability": 0.63818359375}, {"start": 2604.03, "end": 2604.35, "word": " it", "probability": 0.78564453125}, {"start": 2604.35, "end": 2606.69, "word": " was", "probability": 0.85595703125}, {"start": 2606.69, "end": 2608.03, "word": " 87", "probability": 0.74462890625}, {"start": 2608.03, "end": 2609.43, "word": " it", "probability": 0.75927734375}, {"start": 2609.43, "end": 2609.85, "word": " becomes", "probability": 0.83935546875}, {"start": 2609.85, "end": 2610.09, "word": " 0", "probability": 0.56005859375}, {"start": 2610.09, "end": 2610.47, "word": ".7", "probability": 0.98486328125}, {"start": 2610.47, "end": 2612.13, "word": " so", "probability": 0.72021484375}, {"start": 2612.13, "end": 2612.43, "word": " there", "probability": 0.88427734375}, {"start": 2612.43, "end": 2612.61, "word": " is", "probability": 0.77783203125}, {"start": 2612.61, "end": 2612.83, "word": " a", "probability": 0.9755859375}, {"start": 2612.83, "end": 2613.01, "word": " big", "probability": 0.927734375}, {"start": 2613.01, "end": 2613.51, "word": " difference", "probability": 0.8583984375}, {"start": 2613.51, "end": 2614.03, "word": " between", "probability": 0.8701171875}, {"start": 2614.03, "end": 2614.31, "word": " 0", "probability": 0.92822265625}, {"start": 2614.31, "end": 2614.73, "word": ".87", "probability": 0.995849609375}, {"start": 2614.73, "end": 2614.97, "word": " and", "probability": 0.9375}, {"start": 2614.97, "end": 2615.09, "word": " 0", "probability": 0.98193359375}, {"start": 2615.09, "end": 2615.51, "word": ".7", "probability": 0.998046875}, {"start": 2615.51, "end": 2616.21, "word": " and", "probability": 0.830078125}, {"start": 2616.21, "end": 2616.41, "word": " just", "probability": 0.75048828125}, {"start": 2616.41, "end": 2616.69, "word": " we", "probability": 0.7451171875}, {"start": 2616.69, "end": 2617.73, "word": " change", "probability": 0.64990234375}, {"start": 2617.73, "end": 2618.03, "word": " one", "probability": 0.9052734375}, {"start": 2618.03, "end": 2618.67, "word": " value", "probability": 0.923828125}, {"start": 2618.67, "end": 2620.67, "word": " now", "probability": 0.74658203125}, {"start": 2620.67, "end": 2621.07, "word": " suppose", "probability": 0.87158203125}, {"start": 2621.07, "end": 2621.27, "word": " the", "probability": 0.900390625}, {"start": 2621.27, "end": 2621.49, "word": " other", "probability": 0.892578125}, {"start": 2621.49, "end": 2621.69, "word": " one", "probability": 0.91552734375}, {"start": 2621.69, "end": 2621.89, "word": " is", "probability": 0.92919921875}, {"start": 2621.89, "end": 2622.11, "word": " zero", "probability": 0.53955078125}], "temperature": 1.0}, {"id": 96, "seek": 264706, "start": 2624.4, "end": 2647.06, "text": " 82. The other is 2, for example. 1. I just changed half of the data. Now R was 87, it becomes 59. That means these outliers, these values for sure are outliers and they fit the correlation coefficient.", "tokens": [29097, 13, 440, 661, 307, 568, 11, 337, 1365, 13, 502, 13, 286, 445, 3105, 1922, 295, 264, 1412, 13, 823, 497, 390, 27990, 11, 309, 3643, 24624, 13, 663, 1355, 613, 484, 23646, 11, 613, 4190, 337, 988, 366, 484, 23646, 293, 436, 3318, 264, 20009, 17619, 13], "avg_logprob": -0.2576562476158142, "compression_ratio": 1.3377483443708609, "no_speech_prob": 0.0, "words": [{"start": 2624.4, "end": 2624.84, "word": " 82.", "probability": 0.5546875}, {"start": 2625.26, "end": 2625.48, "word": " The", "probability": 0.83447265625}, {"start": 2625.48, "end": 2625.7, "word": " other", "probability": 0.67333984375}, {"start": 2625.7, "end": 2625.92, "word": " is", "probability": 0.84912109375}, {"start": 2625.92, "end": 2626.16, "word": " 2,", "probability": 0.666015625}, {"start": 2626.3, "end": 2626.44, "word": " for", "probability": 0.947265625}, {"start": 2626.44, "end": 2626.8, "word": " example.", "probability": 0.97216796875}, {"start": 2627.98, "end": 2628.26, "word": " 1.", "probability": 0.51416015625}, {"start": 2633.38, "end": 2633.56, "word": " I", "probability": 0.9326171875}, {"start": 2633.56, "end": 2633.86, "word": " just", "probability": 0.277099609375}, {"start": 2633.86, "end": 2634.56, "word": " changed", "probability": 0.73681640625}, {"start": 2634.56, "end": 2635.22, "word": " half", "probability": 0.88525390625}, {"start": 2635.22, "end": 2635.4, "word": " of", "probability": 0.96435546875}, {"start": 2635.4, "end": 2635.5, "word": " the", "probability": 0.91748046875}, {"start": 2635.5, "end": 2635.84, "word": " data.", "probability": 0.93408203125}, {"start": 2636.62, "end": 2636.82, "word": " Now", "probability": 0.94921875}, {"start": 2636.82, "end": 2637.16, "word": " R", "probability": 0.41943359375}, {"start": 2637.16, "end": 2638.18, "word": " was", "probability": 0.90771484375}, {"start": 2638.18, "end": 2638.8, "word": " 87,", "probability": 0.9765625}, {"start": 2639.02, "end": 2639.2, "word": " it", "probability": 0.8544921875}, {"start": 2639.2, "end": 2639.62, "word": " becomes", "probability": 0.76708984375}, {"start": 2639.62, "end": 2640.12, "word": " 59.", "probability": 0.97900390625}, {"start": 2640.74, "end": 2641.04, "word": " That", "probability": 0.89208984375}, {"start": 2641.04, "end": 2641.42, "word": " means", "probability": 0.9375}, {"start": 2641.42, "end": 2642.1, "word": " these", "probability": 0.6298828125}, {"start": 2642.1, "end": 2642.66, "word": " outliers,", "probability": 0.6549072265625}, {"start": 2642.74, "end": 2642.92, "word": " these", "probability": 0.84423828125}, {"start": 2642.92, "end": 2643.26, "word": " values", "probability": 0.96875}, {"start": 2643.26, "end": 2643.52, "word": " for", "probability": 0.74658203125}, {"start": 2643.52, "end": 2643.72, "word": " sure", "probability": 0.91259765625}, {"start": 2643.72, "end": 2643.9, "word": " are", "probability": 0.880859375}, {"start": 2643.9, "end": 2644.34, "word": " outliers", "probability": 0.952880859375}, {"start": 2644.34, "end": 2645.1, "word": " and", "probability": 0.46875}, {"start": 2645.1, "end": 2645.28, "word": " they", "probability": 0.88720703125}, {"start": 2645.28, "end": 2645.74, "word": " fit", "probability": 0.85546875}, {"start": 2645.74, "end": 2646.18, "word": " the", "probability": 0.90625}, {"start": 2646.18, "end": 2646.66, "word": " correlation", "probability": 0.92822265625}, {"start": 2646.66, "end": 2647.06, "word": " coefficient.", "probability": 0.9404296875}], "temperature": 1.0}, {"id": 97, "seek": 267635, "start": 2651.11, "end": 2676.35, "text": " Now let's see if we just change this 1 to be 200. It will go from 50 to up to 63. That means any changes in x or y values will change the y. But if we add or multiply all the values by a constant, r will stay the same. Any questions?", "tokens": [823, 718, 311, 536, 498, 321, 445, 1319, 341, 502, 281, 312, 2331, 13, 467, 486, 352, 490, 2625, 281, 493, 281, 25082, 13, 663, 1355, 604, 2962, 294, 2031, 420, 288, 4190, 486, 1319, 264, 288, 13, 583, 498, 321, 909, 420, 12972, 439, 264, 4190, 538, 257, 5754, 11, 367, 486, 1754, 264, 912, 13, 2639, 1651, 30], "avg_logprob": -0.20594261708806771, "compression_ratio": 1.4096385542168675, "no_speech_prob": 0.0, "words": [{"start": 2651.11, "end": 2651.47, "word": " Now", "probability": 0.72900390625}, {"start": 2651.47, "end": 2652.11, "word": " let's", "probability": 0.72900390625}, {"start": 2652.11, "end": 2652.53, "word": " see", "probability": 0.76025390625}, {"start": 2652.53, "end": 2652.71, "word": " if", "probability": 0.9287109375}, {"start": 2652.71, "end": 2652.89, "word": " we", "probability": 0.93505859375}, {"start": 2652.89, "end": 2653.13, "word": " just", "probability": 0.8515625}, {"start": 2653.13, "end": 2653.63, "word": " change", "probability": 0.86865234375}, {"start": 2653.63, "end": 2653.97, "word": " this", "probability": 0.9296875}, {"start": 2653.97, "end": 2654.27, "word": " 1", "probability": 0.45751953125}, {"start": 2654.27, "end": 2654.41, "word": " to", "probability": 0.96728515625}, {"start": 2654.41, "end": 2654.57, "word": " be", "probability": 0.9228515625}, {"start": 2654.57, "end": 2654.97, "word": " 200.", "probability": 0.91259765625}, {"start": 2655.87, "end": 2656.25, "word": " It", "probability": 0.90185546875}, {"start": 2656.25, "end": 2656.39, "word": " will", "probability": 0.88623046875}, {"start": 2656.39, "end": 2656.63, "word": " go", "probability": 0.9560546875}, {"start": 2656.63, "end": 2656.99, "word": " from", "probability": 0.892578125}, {"start": 2656.99, "end": 2657.67, "word": " 50", "probability": 0.87353515625}, {"start": 2657.67, "end": 2658.11, "word": " to", "probability": 0.72705078125}, {"start": 2658.11, "end": 2658.45, "word": " up", "probability": 0.59375}, {"start": 2658.45, "end": 2658.59, "word": " to", "probability": 0.93603515625}, {"start": 2658.59, "end": 2658.97, "word": " 63.", "probability": 0.7080078125}, {"start": 2659.15, "end": 2659.39, "word": " That", "probability": 0.8427734375}, {"start": 2659.39, "end": 2659.77, "word": " means", "probability": 0.92724609375}, {"start": 2659.77, "end": 2660.43, "word": " any", "probability": 0.453857421875}, {"start": 2660.43, "end": 2661.05, "word": " changes", "probability": 0.88232421875}, {"start": 2661.05, "end": 2662.23, "word": " in", "probability": 0.845703125}, {"start": 2662.23, "end": 2662.51, "word": " x", "probability": 0.65234375}, {"start": 2662.51, "end": 2662.81, "word": " or", "probability": 0.93408203125}, {"start": 2662.81, "end": 2663.03, "word": " y", "probability": 0.98046875}, {"start": 2663.03, "end": 2663.45, "word": " values", "probability": 0.93115234375}, {"start": 2663.45, "end": 2663.61, "word": " will", "probability": 0.82421875}, {"start": 2663.61, "end": 2664.17, "word": " change", "probability": 0.86962890625}, {"start": 2664.17, "end": 2665.09, "word": " the", "probability": 0.31005859375}, {"start": 2665.09, "end": 2665.33, "word": " y.", "probability": 0.83056640625}, {"start": 2665.57, "end": 2665.83, "word": " But", "probability": 0.91748046875}, {"start": 2665.83, "end": 2666.01, "word": " if", "probability": 0.92822265625}, {"start": 2666.01, "end": 2666.21, "word": " we", "probability": 0.826171875}, {"start": 2666.21, "end": 2666.63, "word": " add", "probability": 0.8857421875}, {"start": 2666.63, "end": 2666.85, "word": " or", "probability": 0.95556640625}, {"start": 2666.85, "end": 2667.35, "word": " multiply", "probability": 0.91015625}, {"start": 2667.35, "end": 2667.93, "word": " all", "probability": 0.9462890625}, {"start": 2667.93, "end": 2668.05, "word": " the", "probability": 0.88818359375}, {"start": 2668.05, "end": 2668.35, "word": " values", "probability": 0.96337890625}, {"start": 2668.35, "end": 2668.55, "word": " by", "probability": 0.96923828125}, {"start": 2668.55, "end": 2668.71, "word": " a", "probability": 0.8076171875}, {"start": 2668.71, "end": 2669.17, "word": " constant,", "probability": 0.9384765625}, {"start": 2669.75, "end": 2670.07, "word": " r", "probability": 0.5234375}, {"start": 2670.07, "end": 2670.29, "word": " will", "probability": 0.88818359375}, {"start": 2670.29, "end": 2670.73, "word": " stay", "probability": 0.8369140625}, {"start": 2670.73, "end": 2670.93, "word": " the", "probability": 0.9091796875}, {"start": 2670.93, "end": 2671.17, "word": " same.", "probability": 0.90283203125}, {"start": 2675.25, "end": 2675.93, "word": " Any", "probability": 0.875}, {"start": 2675.93, "end": 2676.35, "word": " questions?", "probability": 0.94140625}], "temperature": 1.0}, {"id": 98, "seek": 269527, "start": 2678.85, "end": 2695.27, "text": " That's the end of chapter 3. I will move quickly to the practice problems we have. And we posted the practice in the course website.", "tokens": [663, 311, 264, 917, 295, 7187, 805, 13, 286, 486, 1286, 2661, 281, 264, 3124, 2740, 321, 362, 13, 400, 321, 9437, 264, 3124, 294, 264, 1164, 3144, 13], "avg_logprob": -0.1938802013794581, "compression_ratio": 1.2314814814814814, "no_speech_prob": 0.0, "words": [{"start": 2678.85, "end": 2679.43, "word": " That's", "probability": 0.7724609375}, {"start": 2679.43, "end": 2679.61, "word": " the", "probability": 0.92529296875}, {"start": 2679.61, "end": 2679.91, "word": " end", "probability": 0.90673828125}, {"start": 2679.91, "end": 2680.43, "word": " of", "probability": 0.9716796875}, {"start": 2680.43, "end": 2681.15, "word": " chapter", "probability": 0.6552734375}, {"start": 2681.15, "end": 2681.55, "word": " 3.", "probability": 0.5712890625}, {"start": 2683.09, "end": 2683.45, "word": " I", "probability": 0.947265625}, {"start": 2683.45, "end": 2683.59, "word": " will", "probability": 0.880859375}, {"start": 2683.59, "end": 2683.83, "word": " move", "probability": 0.93310546875}, {"start": 2683.83, "end": 2684.33, "word": " quickly", "probability": 0.90966796875}, {"start": 2684.33, "end": 2684.81, "word": " to", "probability": 0.875}, {"start": 2684.81, "end": 2684.97, "word": " the", "probability": 0.89990234375}, {"start": 2684.97, "end": 2685.51, "word": " practice", "probability": 0.9111328125}, {"start": 2685.51, "end": 2686.85, "word": " problems", "probability": 0.54931640625}, {"start": 2686.85, "end": 2687.15, "word": " we", "probability": 0.94384765625}, {"start": 2687.15, "end": 2687.49, "word": " have.", "probability": 0.908203125}, {"start": 2688.75, "end": 2688.99, "word": " And", "probability": 0.52294921875}, {"start": 2688.99, "end": 2690.85, "word": " we", "probability": 0.78076171875}, {"start": 2690.85, "end": 2691.23, "word": " posted", "probability": 0.8916015625}, {"start": 2691.23, "end": 2691.91, "word": " the", "probability": 0.90625}, {"start": 2691.91, "end": 2694.11, "word": " practice", "probability": 0.94189453125}, {"start": 2694.11, "end": 2694.49, "word": " in", "probability": 0.7109375}, {"start": 2694.49, "end": 2694.63, "word": " the", "probability": 0.91015625}, {"start": 2694.63, "end": 2694.85, "word": " course", "probability": 0.97412109375}, {"start": 2694.85, "end": 2695.27, "word": " website.", "probability": 0.9033203125}], "temperature": 1.0}], "language": "en", "language_probability": 1.0, "duration": 2697.3925, "duration_after_vad": 2521.453749999988} \ No newline at end of file diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/EuXnSmrCFuE_raw.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/EuXnSmrCFuE_raw.srt new file mode 100644 index 0000000000000000000000000000000000000000..15d957bb6bf647704753c4f2d6824844080ddc52 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/EuXnSmrCFuE_raw.srt @@ -0,0 +1,2080 @@ +1 +00:00:06,480 --> 00:00:12,100 +Last time, I mean Tuesday, we discussed box plot + +2 +00:00:12,100 --> 00:00:19,540 +and we introduced how can we use box plot to + +3 +00:00:19,540 --> 00:00:24,160 +determine if any point is suspected to be an + +4 +00:00:24,160 --> 00:00:28,280 +outlier by using the lower limit and upper limit. + +5 +00:00:29,460 --> 00:00:32,980 +And we mentioned last time that if any point is + +6 +00:00:32,980 --> 00:00:38,580 +below the lower limit or is above the upper limit, + +7 +00:00:39,200 --> 00:00:43,000 +that point is considered to be an outlier. So + +8 +00:00:43,000 --> 00:00:46,940 +that's one of the usage of the backsplat. I mean, + +9 +00:00:47,000 --> 00:00:51,360 +for this specific example, we mentioned last time + +10 +00:00:51,360 --> 00:00:56,910 +27 is an outlier. And also here you can tell also + +11 +00:00:56,910 --> 00:01:02,770 +the data are right skewed because the right tail + +12 +00:01:02,770 --> 00:01:06,090 +exactly is much longer than the left tail. I mean + +13 +00:01:06,090 --> 00:01:10,450 +the distance between or from the median and the + +14 +00:01:10,450 --> 00:01:14,150 +maximum value is bigger or larger than the + +15 +00:01:14,150 --> 00:01:16,950 +distance from the median to the smallest value. + +16 +00:01:17,450 --> 00:01:20,870 +That means the data is not symmetric, it's quite + +17 +00:01:20,870 --> 00:01:24,050 +skewed to the right. In this case, you cannot use + +18 +00:01:24,050 --> 00:01:29,370 +the mean or the range as a measure of spread and + +19 +00:01:29,370 --> 00:01:31,730 +median and, I'm sorry, mean as a measure of + +20 +00:01:31,730 --> 00:01:36,130 +tendency. Because these measures are affected by + +21 +00:01:36,130 --> 00:01:39,450 +outcomes. In this case, you have to use the median + +22 +00:01:39,450 --> 00:01:43,690 +instead of the mean and IQR instead of the range + +23 +00:01:43,690 --> 00:01:48,090 +because IQR is the mid-spread of the data because + +24 +00:01:48,090 --> 00:01:52,790 +we just take the range from Q3 to Q1. That means + +25 +00:01:52,790 --> 00:01:57,450 +we ignore The data below Q1 and data after Q3. + +26 +00:01:57,970 --> 00:02:01,370 +That means IQR is not affected by outlier and it's + +27 +00:02:01,370 --> 00:02:04,610 +better to use it instead of R, of the range. + +28 +00:02:07,470 --> 00:02:10,950 +If the data has an outlier, it's better just to + +29 +00:02:10,950 --> 00:02:13,990 +make a star or circle for the box plot because + +30 +00:02:13,990 --> 00:02:17,250 +this one mentioned that that point is an outlier. + +31 +00:02:18,390 --> 00:02:21,390 +Sometimes outlier is maximum value or the largest + +32 +00:02:21,390 --> 00:02:25,000 +value you have. sometimes maybe the minimum value. + +33 +00:02:25,520 --> 00:02:28,480 +So it depends on the data. For this example, 27, + +34 +00:02:28,720 --> 00:02:33,360 +which was the maximum, is an outlier. But zero is + +35 +00:02:33,360 --> 00:02:36,520 +not outlier in this case, because zero is above + +36 +00:02:36,520 --> 00:02:41,500 +the lower limit. Let's move to the next topic, + +37 +00:02:42,140 --> 00:02:48,060 +which talks about covariance and correlation. + +38 +00:02:48,960 --> 00:02:51,740 +Later, we'll talk in more details about + +39 +00:02:53,020 --> 00:02:56,060 +Correlation and regression, that's when maybe + +40 +00:02:56,060 --> 00:03:02,840 +chapter 11 or 12. But here we just show how can we + +41 +00:03:02,840 --> 00:03:05,420 +compute the covariance of the correlation + +42 +00:03:05,420 --> 00:03:10,220 +coefficient and what's the meaning of that value + +43 +00:03:10,220 --> 00:03:15,840 +we have. The covariance means it measures the + +44 +00:03:15,840 --> 00:03:21,090 +strength of the linear relationship between two + +45 +00:03:21,090 --> 00:03:25,410 +numerical variables. That means if the data set is + +46 +00:03:25,410 --> 00:03:29,770 +numeric, I mean if both variables are numeric, in + +47 +00:03:29,770 --> 00:03:33,050 +this case we can use the covariance to measure the + +48 +00:03:33,050 --> 00:03:38,390 +strength of the linear association or relationship + +49 +00:03:38,390 --> 00:03:42,310 +between two numerical variables. Now the formula + +50 +00:03:42,310 --> 00:03:45,330 +is used to compute the covariance given by this + +51 +00:03:45,330 --> 00:03:52,540 +one. It's summation of the product of xi minus x + +52 +00:03:52,540 --> 00:03:56,380 +bar, yi minus y bar, divided by n minus 1. + +53 +00:03:59,660 --> 00:04:03,120 +So we need first to compute the means of x and y, + +54 +00:04:03,620 --> 00:04:07,680 +then find x minus x bar times y minus y bar, then + +55 +00:04:07,680 --> 00:04:11,160 +sum all of these values, then divide by n minus 1. + +56 +00:04:12,870 --> 00:04:17,770 +The covariance only concerned with the strength of + +57 +00:04:17,770 --> 00:04:23,370 +the relationship. By using the sign of the + +58 +00:04:23,370 --> 00:04:27,010 +covariance, you can tell if there exists positive + +59 +00:04:27,010 --> 00:04:31,070 +or negative relationship between the two + +60 +00:04:31,070 --> 00:04:33,710 +variables. For example, if the covariance between + +61 +00:04:33,710 --> 00:04:42,760 +x and y is positive, that means x and y move In + +62 +00:04:42,760 --> 00:04:48,080 +the same direction. It means that if X goes up, Y + +63 +00:04:48,080 --> 00:04:52,260 +will go in the same position. If X goes down, also + +64 +00:04:52,260 --> 00:04:55,660 +Y goes down. For example, suppose we are + +65 +00:04:55,660 --> 00:04:57,920 +interested in the relationship between consumption + +66 +00:04:57,920 --> 00:05:02,440 +and income. We know that if income increases, if + +67 +00:05:02,440 --> 00:05:07,160 +income goes up, if your salary goes up, that means + +68 +00:05:07,160 --> 00:05:13,510 +consumption also will go up. So that means they go + +69 +00:05:13,510 --> 00:05:18,650 +in the same or move in the same position. So for + +70 +00:05:18,650 --> 00:05:20,690 +sure, the covariance between X and Y should be + +71 +00:05:20,690 --> 00:05:25,550 +positive. On the other hand, if the covariance + +72 +00:05:25,550 --> 00:05:31,110 +between X and Y is negative, that means X goes up. + +73 +00:05:32,930 --> 00:05:36,370 +Y will go to the same, to the opposite direction. + +74 +00:05:36,590 --> 00:05:40,090 +I mean they move to opposite direction. That means + +75 +00:05:40,090 --> 00:05:42,230 +there exists negative relationship between X and + +76 +00:05:42,230 --> 00:05:47,630 +Y. For example, you score in statistics a number + +77 +00:05:47,630 --> 00:05:55,220 +of missing classes. If you miss more classes, it + +78 +00:05:55,220 --> 00:05:59,860 +means your score will go down so as x increases y + +79 +00:05:59,860 --> 00:06:04,820 +will go down so there is positive relationship or + +80 +00:06:04,820 --> 00:06:08,720 +negative relationship between x and y i mean x + +81 +00:06:08,720 --> 00:06:12,020 +goes up the other go in the same direction + +82 +00:06:12,020 --> 00:06:16,500 +sometimes + +83 +00:06:16,500 --> 00:06:21,800 +there is exist no relationship between x and y In + +84 +00:06:21,800 --> 00:06:24,780 +that case, covariance between x and y equals zero. + +85 +00:06:24,880 --> 00:06:31,320 +For example, your score in statistics and your + +86 +00:06:31,320 --> 00:06:31,700 +weight. + +87 +00:06:34,540 --> 00:06:36,760 +It makes sense that there is no relationship + +88 +00:06:36,760 --> 00:06:42,680 +between your weight and your score. In this case, + +89 +00:06:43,580 --> 00:06:46,760 +we are saying x and y are independent. I mean, + +90 +00:06:46,840 --> 00:06:50,790 +they are uncorrelated. Because as one variable + +91 +00:06:50,790 --> 00:06:56,010 +increases, the other maybe go up or go down. Or + +92 +00:06:56,010 --> 00:06:59,690 +maybe constant. So that means there exists no + +93 +00:06:59,690 --> 00:07:02,390 +relationship between the two variables. In that + +94 +00:07:02,390 --> 00:07:05,950 +case, the covariance between x and y equals zero. + +95 +00:07:06,450 --> 00:07:09,210 +Now, by using the covariance, you can determine + +96 +00:07:09,210 --> 00:07:12,710 +the direction of the relationship. I mean, you can + +97 +00:07:12,710 --> 00:07:14,850 +just figure out if the relation is positive or + +98 +00:07:14,850 --> 00:07:18,980 +negative. But you cannot tell exactly the strength + +99 +00:07:18,980 --> 00:07:22,100 +of the relationship. I mean you cannot tell if + +100 +00:07:22,100 --> 00:07:27,640 +they exist. strong moderate or weak relationship + +101 +00:07:27,640 --> 00:07:31,040 +just you can tell there exists positive or + +102 +00:07:31,040 --> 00:07:33,520 +negative or maybe the relationship does not exist + +103 +00:07:33,520 --> 00:07:36,580 +but you cannot tell the exact strength of the + +104 +00:07:36,580 --> 00:07:40,120 +relationship by using the value of the covariance + +105 +00:07:40,120 --> 00:07:43,060 +I mean the size of the covariance does not tell + +106 +00:07:43,060 --> 00:07:48,520 +anything about the strength so generally speaking + +107 +00:07:48,520 --> 00:07:53,150 +covariance between x and y measures the strength + +108 +00:07:53,150 --> 00:07:58,590 +of two numerical variables, and you only tell if + +109 +00:07:58,590 --> 00:08:01,270 +there exists positive or negative relationship, + +110 +00:08:01,770 --> 00:08:04,510 +but you cannot tell anything about the strength of + +111 +00:08:04,510 --> 00:08:06,910 +the relationship. Any questions? + +112 +00:08:09,610 --> 00:08:15,210 +So let me ask you just to summarize what I said so + +113 +00:08:15,210 --> 00:08:21,100 +far. Just give me the summary or conclusion. of + +114 +00:08:21,100 --> 00:08:24,670 +the covariance. The value of the covariance + +115 +00:08:24,670 --> 00:08:26,810 +determine if the relationship between the + +116 +00:08:26,810 --> 00:08:29,410 +variables are positive or negative or there is no + +117 +00:08:29,410 --> 00:08:31,970 +relationship that when the covariance is more than + +118 +00:08:31,970 --> 00:08:34,170 +zero, the meaning that it's positive, the + +119 +00:08:34,170 --> 00:08:36,930 +relationship is positive and one variable go up, + +120 +00:08:37,290 --> 00:08:39,590 +another go up and vice versa. And when the + +121 +00:08:39,590 --> 00:08:41,810 +covariance is less than zero, there is negative + +122 +00:08:41,810 --> 00:08:44,250 +relationship and the meaning that when one + +123 +00:08:44,250 --> 00:08:47,490 +variable go up, the other goes down and vice versa + +124 +00:08:47,490 --> 00:08:50,550 +and when the covariance equals zero, there is no + +125 +00:08:50,550 --> 00:08:53,350 +relationship between the variables. And what's + +126 +00:08:53,350 --> 00:08:54,930 +about the strength? + +127 +00:08:57,950 --> 00:09:03,450 +So just tell the direction, not the strength of + +128 +00:09:03,450 --> 00:09:08,610 +the relationship. Now, in order to determine both + +129 +00:09:08,610 --> 00:09:12,110 +the direction and the strength, we can use the + +130 +00:09:12,110 --> 00:09:17,580 +coefficient of correlation. The coefficient of + +131 +00:09:17,580 --> 00:09:20,320 +correlation measures the relative strength of the + +132 +00:09:20,320 --> 00:09:22,780 +linear relationship between two numerical + +133 +00:09:22,780 --> 00:09:27,940 +variables. The simplest formula that can be used + +134 +00:09:27,940 --> 00:09:31,220 +to compute the correlation coefficient is given by + +135 +00:09:31,220 --> 00:09:34,440 +this one. Maybe this is the easiest formula you + +136 +00:09:34,440 --> 00:09:38,060 +can use. I mean, it's shortcut formula for + +137 +00:09:38,060 --> 00:09:40,860 +computation. There are many other formulas to + +138 +00:09:40,860 --> 00:09:44,490 +compute the correlation. This one is the easiest + +139 +00:09:44,490 --> 00:09:52,570 +one. R is just sum of xy minus n, n is the sample + +140 +00:09:52,570 --> 00:09:57,570 +size, times x bar is the sample mean, y bar is the + +141 +00:09:57,570 --> 00:10:01,090 +sample mean for y, because here we have two + +142 +00:10:01,090 --> 00:10:06,250 +variables, divided by square root, don't forget + +143 +00:10:06,250 --> 00:10:11,490 +the square root, of two quantities. One conserved + +144 +00:10:11,490 --> 00:10:15,710 +for x and the other for y. The first one, sum of x + +145 +00:10:15,710 --> 00:10:18,850 +squared minus nx bar squared. The other one is + +146 +00:10:18,850 --> 00:10:21,830 +similar just for the other variables, sum y + +147 +00:10:21,830 --> 00:10:26,090 +squared minus ny bar squared. So in order to + +148 +00:10:26,090 --> 00:10:28,650 +determine the value of R, we need, + +149 +00:10:32,170 --> 00:10:35,890 +suppose for example, we have x and y, theta x and + +150 +00:10:35,890 --> 00:10:36,110 +y. + +151 +00:10:40,350 --> 00:10:44,730 +x is called explanatory + +152 +00:10:44,730 --> 00:10:54,390 +variable and + +153 +00:10:54,390 --> 00:11:04,590 +y is called response variable + +154 +00:11:04,590 --> 00:11:07,970 +sometimes x is called independent + +155 +00:11:21,760 --> 00:11:25,320 +For example, suppose we are talking about + +156 +00:11:25,320 --> 00:11:32,280 +consumption and + +157 +00:11:32,280 --> 00:11:36,700 +input. And we are interested in the relationship + +158 +00:11:36,700 --> 00:11:41,360 +between these two variables. Now, except for the + +159 +00:11:41,360 --> 00:11:44,900 +variable or the independent, this one affects the + +160 +00:11:44,900 --> 00:11:49,840 +other variable. As we mentioned, as your income + +161 +00:11:49,840 --> 00:11:53,800 +increases, your consumption will go in the same + +162 +00:11:53,800 --> 00:11:59,580 +direction, increases also. Income causes Y, or + +163 +00:11:59,580 --> 00:12:04,340 +income affects Y. In this case, income is your X. + +164 +00:12:06,180 --> 00:12:07,780 +Most of the time we use + +165 +00:12:10,790 --> 00:12:15,590 +And Y for independent. So in this case, the + +166 +00:12:15,590 --> 00:12:19,370 +response variable or your outcome or the dependent + +167 +00:12:19,370 --> 00:12:23,110 +variable is your consumption. So Y is consumption, + +168 +00:12:23,530 --> 00:12:29,150 +X is income. So now in order to determine the + +169 +00:12:29,150 --> 00:12:32,950 +correlation coefficient, we have the data of X and + +170 +00:12:32,950 --> 00:12:33,210 +Y. + +171 +00:12:36,350 --> 00:12:39,190 +The values of X, I mean the number of pairs of X + +172 +00:12:39,190 --> 00:12:41,990 +should be equal to the number of pairs of Y. So if + +173 +00:12:41,990 --> 00:12:44,930 +we have ten observations for X, we should have the + +174 +00:12:44,930 --> 00:12:50,010 +same number of observations for Y. It's pairs. X1, + +175 +00:12:50,090 --> 00:12:54,750 +Y1, X2, Y2, and so on. Now, the formula to compute + +176 +00:12:54,750 --> 00:13:04,170 +R, the shortcut formula is sum of XY minus N times + +177 +00:13:04,970 --> 00:13:09,630 +x bar, y bar, divided by the square root of two + +178 +00:13:09,630 --> 00:13:12,770 +quantities. The first one, sum of x squared minus + +179 +00:13:12,770 --> 00:13:17,270 +n x bar. The other one, sum of y squared minus ny + +180 +00:13:17,270 --> 00:13:21,710 +y squared. So the first thing we have to do is to + +181 +00:13:21,710 --> 00:13:24,210 +find the mean for each x and y. + +182 +00:13:28,230 --> 00:13:37,210 +So first step, compute x bar and y bar. Next, if + +183 +00:13:37,210 --> 00:13:41,690 +you look here, we have x and y, x times y. So we + +184 +00:13:41,690 --> 00:13:48,870 +need to compute the product of x times y. So just + +185 +00:13:48,870 --> 00:13:53,870 +for example, suppose x is 10, y is 5. So x times y + +186 +00:13:53,870 --> 00:13:54,970 +is 50. + +187 +00:13:57,810 --> 00:13:59,950 +In addition to that, you have to compute + +188 +00:14:06,790 --> 00:14:12,470 +100 x squared and y squared. It's like 125. + +189 +00:14:14,810 --> 00:14:18,870 +Do the same calculations for the rest of the data + +190 +00:14:18,870 --> 00:14:22,290 +you have. We have other data here, so we have to + +191 +00:14:22,290 --> 00:14:25,410 +compute the same for the others. + +192 +00:14:28,470 --> 00:14:33,250 +Then finally, just add xy, x squared, y squared. + +193 +00:14:35,910 --> 00:14:40,830 +The values you have here in this formula, in order + +194 +00:14:40,830 --> 00:14:44,830 +to compute the coefficient. + +195 +00:14:54,250 --> 00:15:00,070 +Now, this value ranges between minus one and plus + +196 +00:15:00,070 --> 00:15:00,370 +one. + +197 +00:15:06,520 --> 00:15:10,800 +So it's between minus one and plus one. That means + +198 +00:15:10,800 --> 00:15:15,840 +it's never smaller + +199 +00:15:15,840 --> 00:15:20,200 +than minus one or greater than one. It's between + +200 +00:15:20,200 --> 00:15:21,480 +minus one and plus one. + +201 +00:15:24,360 --> 00:15:28,300 +Make sense? I mean if your value is suppose you + +202 +00:15:28,300 --> 00:15:34,520 +did mistake for any of these computations and R + +203 +00:15:34,520 --> 00:15:41,710 +might be 1.15, 115. That means there is an error. + +204 +00:15:42,270 --> 00:15:45,870 +Or for example, if R is negative 1.5, that means + +205 +00:15:45,870 --> 00:15:49,610 +there is a mistake. So you have to find or figure + +206 +00:15:49,610 --> 00:15:55,670 +out what is that mistake. So that's simple + +207 +00:15:55,670 --> 00:15:59,090 +calculations. Usually in the exam, we will give + +208 +00:15:59,090 --> 00:16:01,350 +the formula for the correlation coefficient, as we + +209 +00:16:01,350 --> 00:16:03,590 +mentioned before. In addition to that, we will + +210 +00:16:03,590 --> 00:16:04,330 +give the summation. + +211 +00:16:07,780 --> 00:16:12,720 +The sum of xy is given, sum x squared and sum y + +212 +00:16:12,720 --> 00:16:18,320 +squared. Also sum of x and sum of y, in order to + +213 +00:16:18,320 --> 00:16:22,320 +determine the means of x and y. For example, + +214 +00:16:22,520 --> 00:16:26,860 +suppose I give sum of xi and i goes from 1 to 10 + +215 +00:16:26,860 --> 00:16:31,760 +is 700, for example. You have to know that the + +216 +00:16:31,760 --> 00:16:38,720 +sample size is 10, so x bar. is 700 divided by 10, + +217 +00:16:39,320 --> 00:16:46,180 +so it's 7. Then use the curve to compute the + +218 +00:16:46,180 --> 00:16:52,000 +coefficient of correlation. Questions? I think + +219 +00:16:52,000 --> 00:16:55,900 +straightforward, maybe the easiest topic in this + +220 +00:16:55,900 --> 00:17:02,980 +book is to compute the coefficient of correlation + +221 +00:17:02,980 --> 00:17:09,070 +or correlation coefficient. Now my question is, do + +222 +00:17:09,070 --> 00:17:13,090 +you think outliers affect the correlation + +223 +00:17:13,090 --> 00:17:13,690 +coefficient? + +224 +00:17:17,010 --> 00:17:23,210 +We said last time outliers affect the mean, the + +225 +00:17:23,210 --> 00:17:28,310 +range, the variance. Now the question is, do + +226 +00:17:28,310 --> 00:17:33,510 +outliers affect the correlation? + +227 +00:17:37,410 --> 00:17:38,170 +Y. + +228 +00:17:43,830 --> 00:17:51,330 +Exactly. The formula for R has X bar in it or Y + +229 +00:17:51,330 --> 00:17:56,670 +bar. So it means outliers affect + +230 +00:17:56,670 --> 00:18:01,210 +the correlation coefficient. So the answer is yes. + +231 +00:18:03,470 --> 00:18:06,410 +Here we have x bar and y bar. Also, there is + +232 +00:18:06,410 --> 00:18:10,690 +another formula to compute R. That formula is + +233 +00:18:10,690 --> 00:18:13,370 +given by covariance between x and y. + +234 +00:18:17,510 --> 00:18:21,930 +These two formulas are quite similar. I mean, by + +235 +00:18:21,930 --> 00:18:26,070 +using this one, we can end with this formula. So + +236 +00:18:26,070 --> 00:18:33,090 +this formula depends on this x is y. standard + +237 +00:18:33,090 --> 00:18:36,170 +deviations of X and Y. That means outlier will + +238 +00:18:36,170 --> 00:18:42,530 +affect the correlation coefficient. So in case of + +239 +00:18:42,530 --> 00:18:45,670 +outliers, R could be changed. + +240 +00:18:51,170 --> 00:18:55,530 +That formula is called simple correlation + +241 +00:18:55,530 --> 00:18:58,790 +coefficient. On the other hand, we have population + +242 +00:18:58,790 --> 00:19:02,200 +correlation coefficient. If you remember last + +243 +00:19:02,200 --> 00:19:08,940 +time, we used X bar as the sample mean and mu as + +244 +00:19:08,940 --> 00:19:14,460 +population mean. Also, S square as sample variance + +245 +00:19:14,460 --> 00:19:18,740 +and sigma square as population variance. Here, R + +246 +00:19:18,740 --> 00:19:24,360 +is used as sample coefficient of correlation and + +247 +00:19:24,360 --> 00:19:29,420 +rho, this Greek letter pronounced as rho. Rho is + +248 +00:19:29,420 --> 00:19:35,160 +used for population coefficient of correlation. + +249 +00:19:37,640 --> 00:19:42,040 +There are some features of R or Rho. The first one + +250 +00:19:42,040 --> 00:19:47,960 +is unity-free. R or Rho is unity-free. That means + +251 +00:19:47,960 --> 00:19:54,900 +if X represents... + +252 +00:19:54,900 --> 00:19:58,960 +And let's assume that the correlation between X + +253 +00:19:58,960 --> 00:20:02,040 +and Y equals 0.75. + +254 +00:20:04,680 --> 00:20:07,260 +Now, in this case, there is no unity. You cannot + +255 +00:20:07,260 --> 00:20:13,480 +say 0.75 years or 0.75 kilograms. It's unity-free. + +256 +00:20:13,940 --> 00:20:17,840 +There is no unit for the correlation coefficient, + +257 +00:20:18,020 --> 00:20:21,120 +the same as Cv. If you remember Cv, the + +258 +00:20:21,120 --> 00:20:24,320 +coefficient of correlation, also this one is unity + +259 +00:20:24,320 --> 00:20:30,500 +-free. The second feature of R ranges between + +260 +00:20:30,500 --> 00:20:36,740 +minus one and plus one. As I mentioned, R lies + +261 +00:20:36,740 --> 00:20:42,340 +between minus one and plus one. Now, by using the + +262 +00:20:42,340 --> 00:20:48,100 +value of R, you can determine two things. Number + +263 +00:20:48,100 --> 00:20:53,360 +one, we can determine the direction. and strength + +264 +00:20:53,360 --> 00:20:56,940 +by using the sign you can determine if there + +265 +00:20:56,940 --> 00:21:03,980 +exists positive or negative so sign of R determine + +266 +00:21:03,980 --> 00:21:08,040 +negative or positive relationship the direction + +267 +00:21:08,040 --> 00:21:17,840 +the absolute value of R I mean absolute of R I + +268 +00:21:17,840 --> 00:21:21,980 +mean ignore the sign So the absolute value of R + +269 +00:21:21,980 --> 00:21:24,100 +determines the strength. + +270 +00:21:27,700 --> 00:21:30,760 +So by using the sine of R, you can determine the + +271 +00:21:30,760 --> 00:21:35,680 +direction, either positive or negative. By using + +272 +00:21:35,680 --> 00:21:37,740 +the absolute value, you can determine the + +273 +00:21:37,740 --> 00:21:43,500 +strength. We can split the strength into two + +274 +00:21:43,500 --> 00:21:52,810 +parts, either strong, moderate, or weak. So weak, + +275 +00:21:53,770 --> 00:21:59,130 +moderate, and strong by using the absolute value + +276 +00:21:59,130 --> 00:22:03,870 +of R. The closer to minus one, if R is close to + +277 +00:22:03,870 --> 00:22:07,010 +minus one, the stronger the negative relationship + +278 +00:22:07,010 --> 00:22:09,430 +between X and Y. For example, imagine + +279 +00:22:22,670 --> 00:22:26,130 +And as we mentioned, R ranges between minus 1 and + +280 +00:22:26,130 --> 00:22:26,630 +plus 1. + +281 +00:22:30,070 --> 00:22:35,710 +So if R is close to minus 1, it's a strong + +282 +00:22:35,710 --> 00:22:41,250 +relationship. Strong linked relationship. The + +283 +00:22:41,250 --> 00:22:45,190 +closer to 1, the stronger the positive + +284 +00:22:45,190 --> 00:22:49,230 +relationship. I mean, if R is close. Strong + +285 +00:22:49,230 --> 00:22:54,480 +positive. So strong in either direction, either to + +286 +00:22:54,480 --> 00:22:57,640 +the left side or to the right side. Strong + +287 +00:22:57,640 --> 00:23:00,280 +negative. On the other hand, there exists strong + +288 +00:23:00,280 --> 00:23:05,940 +negative relationship. Positive. Positive. If R is + +289 +00:23:05,940 --> 00:23:10,640 +close to zero, weak. Here we can say there exists + +290 +00:23:10,640 --> 00:23:15,940 +weak relationship between X and Y. + +291 +00:23:19,260 --> 00:23:25,480 +If R is close to 0.5 or + +292 +00:23:25,480 --> 00:23:32,320 +minus 0.5, you can say there exists positive + +293 +00:23:32,320 --> 00:23:38,840 +-moderate or negative-moderate relationship. So + +294 +00:23:38,840 --> 00:23:42,200 +you can split or you can divide the strength of + +295 +00:23:42,200 --> 00:23:44,540 +the relationship between X and Y into three parts. + +296 +00:23:45,860 --> 00:23:50,700 +Strong, close to minus one of Plus one, weak, + +297 +00:23:51,060 --> 00:23:59,580 +close to zero, moderate, close to 0.5. 0.5 is + +298 +00:23:59,580 --> 00:24:04,580 +halfway between 0 and 1, and minus 0.5 is also + +299 +00:24:04,580 --> 00:24:09,040 +halfway between minus 1 and 0. Now for example, + +300 +00:24:09,920 --> 00:24:15,580 +what's about if R equals minus 0.5? Suppose R1 is + +301 +00:24:15,580 --> 00:24:16,500 +minus 0.5. + +302 +00:24:20,180 --> 00:24:27,400 +strong negative or equal minus point eight strong + +303 +00:24:27,400 --> 00:24:33,540 +negative which is more strong nine nine because + +304 +00:24:33,540 --> 00:24:39,670 +this value is close closer to minus one than Minus + +305 +00:24:39,670 --> 00:24:44,070 +0.8. Even this value is greater than minus 0.9, + +306 +00:24:44,530 --> 00:24:50,870 +but minus 0.9 is close to minus 1, more closer to + +307 +00:24:50,870 --> 00:24:56,910 +minus 1 than minus 0.8. On the other hand, if R + +308 +00:24:56,910 --> 00:25:01,190 +equals 0.75, that means there exists positive + +309 +00:25:01,190 --> 00:25:06,970 +relationship. If R equals 0.85, also there exists + +310 +00:25:06,970 --> 00:25:13,540 +positive. But R2 is stronger than R1, because 0.85 + +311 +00:25:13,540 --> 00:25:20,980 +is closer to plus 1 than 0.7. So we can say that + +312 +00:25:20,980 --> 00:25:23,960 +there exists strong relationship between X and Y, + +313 +00:25:24,020 --> 00:25:27,260 +and this relationship is positive. So again, by + +314 +00:25:27,260 --> 00:25:32,530 +using the sign, you can tell the direction. The + +315 +00:25:32,530 --> 00:25:35,910 +absolute value can tell the strength of the + +316 +00:25:35,910 --> 00:25:39,870 +relationship between X and Y. So there are five + +317 +00:25:39,870 --> 00:25:44,150 +features of R, unity-free. Ranges between minus + +318 +00:25:44,150 --> 00:25:47,750 +one and plus one. Closer to minus one, it means + +319 +00:25:47,750 --> 00:25:51,950 +stronger negative. Closer to plus one, stronger + +320 +00:25:51,950 --> 00:25:56,410 +positive. Close to zero, it means there is no + +321 +00:25:56,410 --> 00:26:00,790 +relationship. Or the weaker, the relationship + +322 +00:26:00,790 --> 00:26:13,240 +between X and Y. By using scatter plots, we + +323 +00:26:13,240 --> 00:26:18,160 +can construct a scatter plot by plotting the Y + +324 +00:26:18,160 --> 00:26:24,060 +values versus the X values. Y in the vertical axis + +325 +00:26:24,060 --> 00:26:28,400 +and X in the horizontal axis. If you look + +326 +00:26:28,400 --> 00:26:34,500 +carefully at graph number one and three, We see + +327 +00:26:34,500 --> 00:26:42,540 +that all the points lie on the straight line, + +328 +00:26:44,060 --> 00:26:48,880 +either this way or the other way. If all the + +329 +00:26:48,880 --> 00:26:52,320 +points lie on the straight line, it means they + +330 +00:26:52,320 --> 00:26:56,970 +exist perfectly. not even strong it's perfect + +331 +00:26:56,970 --> 00:27:02,710 +relationship either negative or positive so this + +332 +00:27:02,710 --> 00:27:07,530 +one perfect negative negative + +333 +00:27:07,530 --> 00:27:14,090 +because x increases y goes down decline so if x is + +334 +00:27:14,090 --> 00:27:19,590 +for example five maybe y is supposed to twenty if + +335 +00:27:19,590 --> 00:27:25,510 +x increased to seven maybe y is fifteen So if X + +336 +00:27:25,510 --> 00:27:29,290 +increases, in this case, Y declines or decreases, + +337 +00:27:29,850 --> 00:27:34,290 +it means there exists negative relationship. On + +338 +00:27:34,290 --> 00:27:40,970 +the other hand, the left corner here, positive + +339 +00:27:40,970 --> 00:27:44,710 +relationship, because X increases, Y also goes up. + +340 +00:27:45,970 --> 00:27:48,990 +And perfect, because all the points lie on the + +341 +00:27:48,990 --> 00:27:52,110 +straight line. So it's perfect, positive, perfect, + +342 +00:27:52,250 --> 00:27:57,350 +negative relationship. So it's straightforward to + +343 +00:27:57,350 --> 00:27:59,550 +determine if it's perfect by using scatterplot. + +344 +00:28:02,230 --> 00:28:04,950 +Also, by scatterplot, you can tell the direction + +345 +00:28:04,950 --> 00:28:09,270 +of the relationship. For the second scatterplot, + +346 +00:28:09,630 --> 00:28:12,270 +it seems to be that there exists negative + +347 +00:28:12,270 --> 00:28:13,730 +relationship between X and Y. + +348 +00:28:16,850 --> 00:28:21,030 +In this one, also there exists a relationship + +349 +00:28:24,730 --> 00:28:32,170 +positive which one is strong more strong this + +350 +00:28:32,170 --> 00:28:37,110 +one is stronger because the points are close to + +351 +00:28:37,110 --> 00:28:40,710 +the straight line much more than the other scatter + +352 +00:28:40,710 --> 00:28:43,410 +plot so you can say there exists negative + +353 +00:28:43,410 --> 00:28:45,810 +relationship but that one is stronger than the + +354 +00:28:45,810 --> 00:28:49,550 +other one this one is positive but the points are + +355 +00:28:49,550 --> 00:28:55,400 +scattered around the straight line so you can tell + +356 +00:28:55,400 --> 00:29:00,000 +the direction and sometimes sometimes not all the + +357 +00:29:00,000 --> 00:29:04,640 +time you can tell the strength sometimes it's very + +358 +00:29:04,640 --> 00:29:07,960 +clear that the relation is strong if the points + +359 +00:29:07,960 --> 00:29:11,480 +are very close straight line that means the + +360 +00:29:11,480 --> 00:29:15,940 +relation is strong now the other one the last one + +361 +00:29:15,940 --> 00:29:23,850 +here As X increases, Y stays at the same value. + +362 +00:29:23,970 --> 00:29:29,450 +For example, if Y is 20 and X is 1. X is 1, Y is + +363 +00:29:29,450 --> 00:29:33,870 +20. X increases to 2, for example. Y is still 20. + +364 +00:29:34,650 --> 00:29:37,230 +So that means there is no relationship between X + +365 +00:29:37,230 --> 00:29:41,830 +and Y. It's a constant. Y equals a constant value. + +366 +00:29:42,690 --> 00:29:50,490 +Whatever X is, Y will have constant value. So that + +367 +00:29:50,490 --> 00:29:54,790 +means there is no relationship between X and Y. + +368 +00:29:56,490 --> 00:30:01,850 +Let's see how can we compute the correlation + +369 +00:30:01,850 --> 00:30:07,530 +between two variables. For example, suppose we + +370 +00:30:07,530 --> 00:30:12,150 +have data for father's height and son's height. + +371 +00:30:13,370 --> 00:30:16,510 +And we are interested to see if father's height + +372 +00:30:16,510 --> 00:30:21,730 +affects his son's height. So we have data for 10 + +373 +00:30:21,730 --> 00:30:28,610 +observations here. Father number one, his height + +374 +00:30:28,610 --> 00:30:38,570 +is 64 inches. And you know that inch equals 2 + +375 +00:30:38,570 --> 00:30:39,230 +.5. + +376 +00:30:43,520 --> 00:30:52,920 +So X is 64, Sun's height is 65. X is 68, Sun's + +377 +00:30:52,920 --> 00:30:58,820 +height is 67 and so on. Sometimes, if the dataset + +378 +00:30:58,820 --> 00:31:02,600 +is small enough, as in this example, we have just + +379 +00:31:02,600 --> 00:31:08,640 +10 observations, you can tell the direction. I + +380 +00:31:08,640 --> 00:31:12,060 +mean, you can say, yes, for this specific example, + +381 +00:31:12,580 --> 00:31:15,280 +there exists positive relationship between x and + +382 +00:31:15,280 --> 00:31:20,820 +y. But if the data set is large, it's very hard to + +383 +00:31:20,820 --> 00:31:22,620 +figure out if the relation is positive or + +384 +00:31:22,620 --> 00:31:26,400 +negative. So we have to find or to compute the + +385 +00:31:26,400 --> 00:31:29,700 +coefficient of correlation in order to see there + +386 +00:31:29,700 --> 00:31:32,940 +exists positive, negative, strong, weak, or + +387 +00:31:32,940 --> 00:31:37,820 +moderate. but again you can tell from this simple + +388 +00:31:37,820 --> 00:31:40,280 +example yes there is a positive relationship + +389 +00:31:40,280 --> 00:31:44,660 +because just if you pick random numbers here for + +390 +00:31:44,660 --> 00:31:49,240 +example 64 father's height his son's height 65 if + +391 +00:31:49,240 --> 00:31:54,600 +we move up here to 70 for father's height his + +392 +00:31:54,600 --> 00:32:00,160 +son's height is going to be 72 so as father + +393 +00:32:00,160 --> 00:32:05,020 +heights increases Also, son's height increases. + +394 +00:32:06,320 --> 00:32:11,700 +For example, 77, father's height. His son's height + +395 +00:32:11,700 --> 00:32:15,160 +is 76. So that means there exists positive + +396 +00:32:15,160 --> 00:32:19,740 +relationship. Make sense? But again, for large + +397 +00:32:19,740 --> 00:32:20,780 +data, you cannot tell that. + +398 +00:32:31,710 --> 00:32:36,090 +If, again, by using this data, small data, you can + +399 +00:32:36,090 --> 00:32:40,730 +determine just the length, the strength, I'm + +400 +00:32:40,730 --> 00:32:43,490 +sorry, the strength of a relationship or the + +401 +00:32:43,490 --> 00:32:47,590 +direction of the relationship. Just pick any + +402 +00:32:47,590 --> 00:32:51,030 +number at random. For example, if we pick this + +403 +00:32:51,030 --> 00:32:51,290 +number. + +404 +00:32:55,050 --> 00:33:00,180 +Father's height is 68, his son's height is 70. Now + +405 +00:33:00,180 --> 00:33:02,180 +suppose we pick another number that is greater + +406 +00:33:02,180 --> 00:33:05,840 +than 68, then let's see what will happen. For + +407 +00:33:05,840 --> 00:33:11,060 +father's height 70, his son's height increases up + +408 +00:33:11,060 --> 00:33:17,160 +to 72. Similarly, 72 father's height, his son's + +409 +00:33:17,160 --> 00:33:22,060 +height 75. So that means X increases, Y also + +410 +00:33:22,060 --> 00:33:25,740 +increases. So that means there exists both of + +411 +00:33:25,740 --> 00:33:32,570 +them. For sure it is hard to tell this direction + +412 +00:33:32,570 --> 00:33:36,130 +if the data is large. Because maybe you will find + +413 +00:33:36,130 --> 00:33:40,250 +as X increases for one point, Y maybe decreases + +414 +00:33:40,250 --> 00:33:43,610 +for that point. So it depends on the data you + +415 +00:33:43,610 --> 00:33:49,010 +have. Anyway, let's see how can we compute R. I + +416 +00:33:49,010 --> 00:33:53,770 +will use Excel to show how can we do these + +417 +00:33:53,770 --> 00:33:54,550 +calculations. + +418 +00:34:02,110 --> 00:34:06,530 +The screen is clear. But give me the data of X and + +419 +00:34:06,530 --> 00:34:06,750 +Y. + +420 +00:34:10,710 --> 00:34:14,310 +X is 64. 68. + +421 +00:34:18,910 --> 00:34:26,830 +68. 78. There is one 68. 78. 74. + +422 +00:34:31,120 --> 00:34:37,600 +Seventy-four. Seventy-five. Seventy-six. + +423 +00:34:38,360 --> 00:34:42,240 +Seventy-seven. Seventy-five. So that's the values + +424 +00:34:42,240 --> 00:34:49,440 +of X, Y values. Seventy. Seventy-five. Seventy + +425 +00:34:49,440 --> 00:34:49,800 +-seven. + +426 +00:35:17,230 --> 00:35:23,730 +So first we have to compute it. x times y so + +427 +00:35:23,730 --> 00:35:28,270 +that's as x times + +428 +00:35:28,270 --> 00:35:38,230 +the value of y so 46 times 65 equals 4160 x + +429 +00:35:38,230 --> 00:35:46,050 +squared so this value squared for y squared 65 + +430 +00:35:48,660 --> 00:35:53,700 +Square and we have to do this one for the rest of + +431 +00:35:53,700 --> 00:36:03,160 +the data So + +432 +00:36:03,160 --> 00:36:07,600 +that's the sum of XY sum X squared and Y squared + +433 +00:36:07,600 --> 00:36:13,480 +now the summation So + +434 +00:36:13,480 --> 00:36:17,180 +that's the sum of X and Y + +435 +00:36:20,380 --> 00:36:27,040 +We have to compute the mean of x and y. So that is + +436 +00:36:27,040 --> 00:36:31,380 +this sum divided by n, where n is 10 in this case. + +437 +00:36:34,600 --> 00:36:36,100 +So this is the first step. + +438 +00:36:41,820 --> 00:36:48,900 +Let's see how can we compute R. R, we have sum of + +439 +00:36:48,900 --> 00:37:00,780 +x, y. minus n is 10 times x bar times y bar. This + +440 +00:37:00,780 --> 00:37:03,100 +is the first quantity. The other one is square + +441 +00:37:03,100 --> 00:37:08,580 +root of sum + +442 +00:37:08,580 --> 00:37:15,420 +x squared minus n x bar squared. + +443 +00:37:18,830 --> 00:37:24,770 +times some y squared minus n times y bar squared. + +444 +00:37:28,930 --> 00:37:34,090 +And we have to find the square root of this value. + +445 +00:37:34,210 --> 00:37:40,810 +So square root, that will give this result. So now + +446 +00:37:40,810 --> 00:37:46,990 +R equals this value divided by + +447 +00:37:49,670 --> 00:37:54,890 +155 and round always to two decimal places will + +448 +00:37:54,890 --> 00:38:05,590 +give 87 so r is 87 so first step we have x and y + +449 +00:38:05,590 --> 00:38:12,470 +compute xy x squared y squared sum of these all of + +450 +00:38:12,470 --> 00:38:18,100 +these then x bar y bar values are given Then just + +451 +00:38:18,100 --> 00:38:20,820 +use the formula you have, we'll get R to be at + +452 +00:38:20,820 --> 00:38:31,540 +seven. So in this case, if we just go back to + +453 +00:38:31,540 --> 00:38:33,400 +the slide we have here. + +454 +00:38:36,440 --> 00:38:41,380 +As we mentioned, father's height is the + +455 +00:38:41,380 --> 00:38:45,640 +explanatory variable. Son's height is the response + +456 +00:38:45,640 --> 00:38:46,060 +variable. + +457 +00:38:49,190 --> 00:38:52,810 +And that simple calculation gives summation of xi, + +458 +00:38:54,050 --> 00:38:57,810 +summation of yi, summation x squared, y squared, + +459 +00:38:57,970 --> 00:39:02,690 +and some xy. And finally, we'll get that result, + +460 +00:39:02,850 --> 00:39:07,850 +87%. Now, the sign is positive. That means there + +461 +00:39:07,850 --> 00:39:13,960 +exists positive. And 0.87 is close to 1. That + +462 +00:39:13,960 --> 00:39:17,320 +means there exists strong positive relationship + +463 +00:39:17,320 --> 00:39:22,480 +between father's and son's height. I think the + +464 +00:39:22,480 --> 00:39:25,060 +calculation is straightforward. + +465 +00:39:27,280 --> 00:39:33,280 +Now, for this example, the data are given in + +466 +00:39:33,280 --> 00:39:37,460 +inches. I mean father's and son's height in inch. + +467 +00:39:38,730 --> 00:39:41,050 +Suppose we want to convert from inch to + +468 +00:39:41,050 --> 00:39:44,750 +centimeter, so we have to multiply by 2. Do you + +469 +00:39:44,750 --> 00:39:52,050 +think in this case, R will change? So if we add or + +470 +00:39:52,050 --> 00:39:59,910 +multiply or divide, R will not change? I mean, if + +471 +00:39:59,910 --> 00:40:06,880 +we have X values, And we divide or multiply X, I + +472 +00:40:06,880 --> 00:40:09,460 +mean each value of X, by a number, by a fixed + +473 +00:40:09,460 --> 00:40:12,600 +value. For example, suppose here we multiplied + +474 +00:40:12,600 --> 00:40:19,460 +each value by 2.5 for X. Also multiply Y by the + +475 +00:40:19,460 --> 00:40:24,520 +same value, 2.5. Y will be the same. In addition + +476 +00:40:24,520 --> 00:40:28,920 +to that, if we multiply X by 2.5, for example, and + +477 +00:40:28,920 --> 00:40:34,960 +Y by 5, also R will not change. But you have to be + +478 +00:40:34,960 --> 00:40:39,400 +careful. We multiply each value of x by the same + +479 +00:40:39,400 --> 00:40:45,700 +number. And each value of y by the same number, + +480 +00:40:45,820 --> 00:40:49,640 +that number may be different from x. So I mean + +481 +00:40:49,640 --> 00:40:56,540 +multiply x by 2.5 and y by minus 1 or plus 2 or + +482 +00:40:56,540 --> 00:41:01,000 +whatever you have. But if it's negative, then + +483 +00:41:01,000 --> 00:41:05,640 +we'll get negative answer. I mean if Y is + +484 +00:41:05,640 --> 00:41:08,060 +positive, for example, and we multiply each value + +485 +00:41:08,060 --> 00:41:13,000 +Y by minus one, that will give negative sign. But + +486 +00:41:13,000 --> 00:41:17,640 +here I meant if we multiply this value by positive + +487 +00:41:17,640 --> 00:41:21,320 +sign, plus two, plus three, and let's see how can + +488 +00:41:21,320 --> 00:41:22,540 +we do that by Excel. + +489 +00:41:26,320 --> 00:41:31,480 +Now this is the data we have. I just make copy. + +490 +00:41:37,730 --> 00:41:45,190 +I will multiply each value X by 2.5. And I will do + +491 +00:41:45,190 --> 00:41:49,590 +the same for Y + +492 +00:41:49,590 --> 00:41:57,190 +value. I will replace this data by the new one. + +493 +00:41:58,070 --> 00:42:00,410 +For sure the calculations will, the computations + +494 +00:42:00,410 --> 00:42:09,740 +here will change, but R will stay the same. So + +495 +00:42:09,740 --> 00:42:14,620 +here we multiply each x by 2.5 and the same for y. + +496 +00:42:15,540 --> 00:42:19,400 +The calculations here are different. We have + +497 +00:42:19,400 --> 00:42:22,960 +different sum, different sum of x, sum of y and so + +498 +00:42:22,960 --> 00:42:31,040 +on, but are the same. Let's see if we multiply + +499 +00:42:31,040 --> 00:42:38,880 +just x by 2.5 and y the same. + +500 +00:42:41,840 --> 00:42:49,360 +So we multiplied x by 2.5 and we keep it make + +501 +00:42:49,360 --> 00:42:57,840 +sense? Now let's see how outliers will affect the + +502 +00:42:57,840 --> 00:43:03,260 +value of R. Let's say if we change one point in + +503 +00:43:03,260 --> 00:43:08,480 +the data set support. I just changed 64. + +504 +00:43:13,750 --> 00:43:24,350 +for example if just by typo and just enter 6 so it + +505 +00:43:24,350 --> 00:43:33,510 +was 87 it becomes 0.7 so there is a big difference + +506 +00:43:33,510 --> 00:43:38,670 +between 0.87 and 0.7 and just we change one value + +507 +00:43:38,670 --> 00:43:45,920 +now suppose the other one is zero 82. The other is + +508 +00:43:45,920 --> 00:43:48,260 +2, for example. 1. + +509 +00:43:53,380 --> 00:43:59,200 +I just changed half of the data. Now R was 87, it + +510 +00:43:59,200 --> 00:44:02,920 +becomes 59. That means these outliers, these + +511 +00:44:02,920 --> 00:44:06,180 +values for sure are outliers and they fit the + +512 +00:44:06,180 --> 00:44:07,060 +correlation coefficient. + +513 +00:44:11,110 --> 00:44:14,970 +Now let's see if we just change this 1 to be 200. + +514 +00:44:15,870 --> 00:44:20,430 +It will go from 50 to up to 63. That means any + +515 +00:44:20,430 --> 00:44:26,010 +changes in x or y values will change the y. But if + +516 +00:44:26,010 --> 00:44:30,070 +we add or multiply all the values by a constant, r + +517 +00:44:30,070 --> 00:44:31,170 +will stay the same. + +518 +00:44:35,250 --> 00:44:43,590 +Any questions? That's the end of chapter 3. I will + +519 +00:44:43,590 --> 00:44:48,990 +move quickly to the practice problems we have. And + +520 +00:44:48,990 --> 00:44:55,270 +we posted the practice in the course website. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/FS8UHlZfJpc.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/FS8UHlZfJpc.srt new file mode 100644 index 0000000000000000000000000000000000000000..5aee68d497a05247c93c285cefe113632a5411e7 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/FS8UHlZfJpc.srt @@ -0,0 +1,2619 @@ +1 +00:00:05,600 --> 00:00:09,760 +Last time, we talked about the types of samples + +2 +00:00:09,760 --> 00:00:15,880 +and introduced two + +3 +00:00:15,880 --> 00:00:20,120 +types of samples. One is called non-probability + +4 +00:00:20,120 --> 00:00:23,900 +samples, and the other one is probability samples. + +5 +00:00:25,120 --> 00:00:30,560 +And also, we have discussed two types of non + +6 +00:00:30,560 --> 00:00:33,620 +-probability, which are judgment and convenience. + +7 +00:00:35,100 --> 00:00:39,500 +For the product samples, we also produced four + +8 +00:00:39,500 --> 00:00:46,560 +types, random sample, systematic, stratified, and + +9 +00:00:46,560 --> 00:00:54,400 +clustered sampling. That was last Sunday. Let's + +10 +00:00:54,400 --> 00:01:02,550 +see the comparison between these sampling data. A + +11 +00:01:02,550 --> 00:01:05,370 +simple, random sample, systematic random sample, + +12 +00:01:05,510 --> 00:01:09,930 +first, for these two techniques. First of all, + +13 +00:01:09,970 --> 00:01:13,590 +they are simple to use because we just use the + +14 +00:01:13,590 --> 00:01:18,750 +random tables, random number tables, or by using + +15 +00:01:18,750 --> 00:01:27,690 +any statistical software. But the disadvantage of + +16 +00:01:27,690 --> 00:01:28,490 +this technique + +17 +00:01:37,590 --> 00:01:40,830 +So it might be this sample is not representative + +18 +00:01:40,830 --> 00:01:44,530 +of the entire population. So this is the mainly + +19 +00:01:44,530 --> 00:01:50,230 +disadvantage of this sampling technique. So it can + +20 +00:01:50,230 --> 00:01:56,250 +be used unless the population is not symmetric or + +21 +00:01:56,250 --> 00:02:00,090 +the population is not heterogeneous. I mean if the + +22 +00:02:00,090 --> 00:02:04,510 +population has the same characteristics, then we + +23 +00:02:04,510 --> 00:02:08,870 +can use simple or systematic sample. But if there + +24 +00:02:08,870 --> 00:02:12,110 +are big differences or big disturbances between + +25 +00:02:12,990 --> 00:02:15,510 +the items of the population, I mean between or + +26 +00:02:15,510 --> 00:02:21,550 +among the individuals. In this case, stratified + +27 +00:02:21,550 --> 00:02:26,190 +sampling is better than using a simple random + +28 +00:02:26,190 --> 00:02:30,170 +sample. Stratified samples ensure representation + +29 +00:02:30,170 --> 00:02:33,010 +of individuals across the entire population. If + +30 +00:02:33,010 --> 00:02:36,800 +you remember last time we said a IUG population + +31 +00:02:36,800 --> 00:02:40,340 +can be splitted according to gender, either males + +32 +00:02:40,340 --> 00:02:44,440 +or females, or can be splitted according to + +33 +00:02:44,440 --> 00:02:48,840 +students' levels. First level, second level, and + +34 +00:02:48,840 --> 00:02:51,960 +fourth level, and so on. The last type of sampling + +35 +00:02:51,960 --> 00:02:55,340 +was clusters. Cluster sampling is more cost + +36 +00:02:55,340 --> 00:02:59,940 +effective. Because in this case, you have to split + +37 +00:02:59,940 --> 00:03:03,140 +the population into many clusters, then you can + +38 +00:03:03,140 --> 00:03:08,320 +choose a random of these clusters. Also, it's less + +39 +00:03:08,320 --> 00:03:12,720 +efficient unless you use a large sample. For this + +40 +00:03:12,720 --> 00:03:16,460 +reason, it's more cost effective than using the + +41 +00:03:16,460 --> 00:03:20,640 +other sampling techniques. So, these techniques + +42 +00:03:20,640 --> 00:03:23,700 +are used based on the study you have. Sometimes + +43 +00:03:23,700 --> 00:03:26,100 +simple random sampling is fine, and you can go + +44 +00:03:26,100 --> 00:03:29,360 +ahead and use it. Most of the time, stratified + +45 +00:03:29,360 --> 00:03:33,340 +random sampling is much better. So, it depends on + +46 +00:03:33,340 --> 00:03:36,940 +the population you have underlying your study. + +47 +00:03:37,680 --> 00:03:40,240 +That was what we talked about last Sunday. + +48 +00:03:43,860 --> 00:03:47,780 +Now, suppose we design a questionnaire or survey. + +49 +00:03:48,640 --> 00:03:52,980 +You have to know, number one, what's the purpose + +50 +00:03:52,980 --> 00:03:59,600 +of the survey. In this case, you can determine the + +51 +00:03:59,600 --> 00:04:02,040 +frame of the population. Next, + +52 +00:04:05,480 --> 00:04:07,660 +survey + +53 +00:04:13,010 --> 00:04:18,350 +Is the survey based on a probability sample? If + +54 +00:04:18,350 --> 00:04:20,830 +the answer is yes, then go ahead and use one of + +55 +00:04:20,830 --> 00:04:22,910 +the non-probability sampling techniques, either + +56 +00:04:22,910 --> 00:04:26,610 +similar than some certified cluster or systematic. + +57 +00:04:28,830 --> 00:04:33,330 +Next, we have to distinguish between four types of + +58 +00:04:33,330 --> 00:04:38,770 +errors, at least now. One is called coverage + +59 +00:04:38,770 --> 00:04:44,560 +error. You have to ask yourself, is the frame + +60 +00:04:44,560 --> 00:04:48,880 +appropriate? I mean, frame appropriate means that + +61 +00:04:48,880 --> 00:04:52,540 +you have all the individual list, then you can + +62 +00:04:52,540 --> 00:04:56,040 +choose one of these. For example, suppose we + +63 +00:04:56,040 --> 00:05:01,500 +divide Gaza Strip into four governorates. North + +64 +00:05:01,500 --> 00:05:06,800 +Gaza, Gaza Middle Area, Khanon, and Rafah. So we + +65 +00:05:06,800 --> 00:05:12,950 +have five sections of five governments. In this + +66 +00:05:12,950 --> 00:05:17,430 +case, if you, so that's your frame. Now, if you + +67 +00:05:17,430 --> 00:05:19,890 +exclude one, for example, and that one is + +68 +00:05:19,890 --> 00:05:22,350 +important for you, but you exclude it for some + +69 +00:05:22,350 --> 00:05:26,270 +reasons, in this case, you will have coverage as + +70 +00:05:26,270 --> 00:05:32,600 +well, because you excluded one group out of five + +71 +00:05:32,600 --> 00:05:36,140 +and that group may be important for your study. + +72 +00:05:36,840 --> 00:05:41,740 +Next is called non-response error. Suppose I + +73 +00:05:41,740 --> 00:05:48,040 +attributed my questionnaire for 100 students and I + +74 +00:05:48,040 --> 00:05:52,840 +gave each one 30 minutes to answer the + +75 +00:05:52,840 --> 00:05:56,380 +questionnaire or to fill up the questionnaire, but + +76 +00:05:56,380 --> 00:06:01,230 +I didn't follow up. The response in this case, it + +77 +00:06:01,230 --> 00:06:05,890 +might be you will get something error, and that + +78 +00:06:05,890 --> 00:06:09,010 +error refers to non-responsive, so you have to + +79 +00:06:09,010 --> 00:06:12,190 +follow up, follow up. It means maybe sometimes you + +80 +00:06:12,190 --> 00:06:14,670 +need to clarify the question you have in your + +81 +00:06:14,670 --> 00:06:19,510 +questionnaire so that the respondent understand + +82 +00:06:19,510 --> 00:06:21,850 +what do you mean exactly by that question + +83 +00:06:21,850 --> 00:06:25,550 +otherwise if you don't follow up it means it may + +84 +00:06:25,550 --> 00:06:30,550 +be there is an error, and that error is called non + +85 +00:06:30,550 --> 00:06:35,170 +-response. The other type of error is called + +86 +00:06:35,170 --> 00:06:37,150 +measurement error, which is one of the most + +87 +00:06:37,150 --> 00:06:39,910 +important errors, and we have to avoid. + +88 +00:06:42,950 --> 00:06:45,830 +It's called measurement error. Good questions + +89 +00:06:45,830 --> 00:06:50,710 +elicit good responses. It means, suppose, for + +90 +00:06:50,710 --> 00:06:57,700 +example, my question is, I feel this candidate is + +91 +00:06:57,700 --> 00:07:01,740 +good for us. What do you think? It's my question. + +92 +00:07:02,820 --> 00:07:08,000 +I feel this candidate, candidate A, whatever he + +93 +00:07:08,000 --> 00:07:15,000 +is, is good for us. What do you think? For sure + +94 +00:07:15,000 --> 00:07:18,080 +there's abundant answer will be yes. I agree with + +95 +00:07:18,080 --> 00:07:22,660 +you. So that means you design the question in the + +96 +00:07:22,660 --> 00:07:28,100 +way that you will know they respond directly, that + +97 +00:07:28,100 --> 00:07:31,980 +he will answer yes or no depends on your design of + +98 +00:07:31,980 --> 00:07:36,580 +the question. So it means leading question. So + +99 +00:07:36,580 --> 00:07:40,160 +measurement error. So but if we have good + +100 +00:07:40,160 --> 00:07:43,260 +questions, just ask any question for the + +101 +00:07:43,260 --> 00:07:48,060 +respondent, and let him or let his answer based on + +102 +00:07:48,890 --> 00:07:52,850 +what exactly he thinks about it. So don't force + +103 +00:07:52,850 --> 00:07:56,490 +the respondent to answer the question in the + +104 +00:07:56,490 --> 00:07:59,250 +direction you want to be. Otherwise you will get + +105 +00:07:59,250 --> 00:08:03,570 +something called Measurement Error. Do you think? + +106 +00:08:04,910 --> 00:08:06,770 +Give me an example of Measurement Error. + +107 +00:08:09,770 --> 00:08:12,450 +Give me an example of Measurement Error. Just ask + +108 +00:08:12,450 --> 00:08:16,370 +a question in a way that the respondent will + +109 +00:08:16,370 --> 00:08:20,750 +answer, I mean, his answer will be the same as you + +110 +00:08:20,750 --> 00:08:24,770 +think about it. + +111 +00:08:30,130 --> 00:08:35,130 +Maybe I like coffee, do you like coffee or tea? So + +112 +00:08:35,130 --> 00:08:37,390 +maybe he will go with your answer. In this case + +113 +00:08:37,390 --> 00:08:41,630 +it's measurement. Another example. + +114 +00:09:00,260 --> 00:09:00,860 +Exactly. + +115 +00:09:07,960 --> 00:09:12,420 +So it means that if you design a question in the + +116 +00:09:12,420 --> 00:09:15,420 +way that you will get the same answer you think + +117 +00:09:15,420 --> 00:09:18,910 +about it, it means that you will have something + +118 +00:09:18,910 --> 00:09:21,310 +called measurement error. The last type is + +119 +00:09:21,310 --> 00:09:25,490 +sampling error. Sampling error always happens, + +120 +00:09:25,710 --> 00:09:29,990 +always exists. For example, suppose you are around + +121 +00:09:29,990 --> 00:09:33,150 +50 students in this class. Suppose I select + +122 +00:09:33,150 --> 00:09:40,130 +randomly 20 of you, and I am interested suppose in + +123 +00:09:40,130 --> 00:09:44,090 +your age. Maybe for this sample. + +124 +00:09:46,590 --> 00:09:53,610 +I will get an average of your age of 19 years + +125 +00:09:53,610 --> 00:09:57,370 +someone + +126 +00:09:57,370 --> 00:10:02,050 +select another sample from the same population + +127 +00:10:02,050 --> 00:10:08,670 +with the same size maybe + +128 +00:10:08,670 --> 00:10:13,530 +the average of your age is not equal to 19 years + +129 +00:10:13,530 --> 00:10:16,370 +maybe 19 years, 3 months + +130 +00:10:19,330 --> 00:10:24,790 +Someone else maybe also select the same number of + +131 +00:10:24,790 --> 00:10:28,910 +students, but the average of the class might be 20 + +132 +00:10:28,910 --> 00:10:33,690 +years. So, the first one, second tier, each of them + +133 +00:10:33,690 --> 00:10:37,830 +has different sample statistics. I mean different + +134 +00:10:37,830 --> 00:10:42,710 +sample means. This difference or this error + +135 +00:10:42,710 --> 00:10:46,470 +actually is called sampling error and always + +136 +00:10:46,470 --> 00:10:52,040 +happens. So, now we have five types of errors. One + +137 +00:10:52,040 --> 00:10:54,580 +is called coverage error. In this case, you have + +138 +00:10:54,580 --> 00:10:59,360 +a problem with the frame. The other type is called + +139 +00:10:59,360 --> 00:11:03,260 +non-response error. It means you have a problem with + +140 +00:11:03,260 --> 00:11:06,620 +following up. Measurement error. It means you have + +141 +00:11:07,810 --> 00:11:11,370 +bad questionnaire design. The last type is called + +142 +00:11:11,370 --> 00:11:14,130 +sampling error, and this one always happens and + +143 +00:11:14,130 --> 00:11:18,390 +actually we would like to have this error. I mean + +144 +00:11:18,390 --> 00:11:22,250 +this sampling error as small as possible. So, these are + +145 +00:11:22,250 --> 00:11:25,890 +the steps you have to follow up when you design + +146 +00:11:25,890 --> 00:11:26,430 +the questionnaire. + +147 +00:11:29,490 --> 00:11:34,710 +So again, for these types of errors, the first one + +148 +00:11:34,710 --> 00:11:40,170 +coverage error, or selection bias. This type of + +149 +00:11:40,170 --> 00:11:43,830 +error exists if some groups are excluded from the + +150 +00:11:43,830 --> 00:11:48,950 +frame, and have no chance of being selected. That's + +151 +00:11:48,950 --> 00:11:51,970 +the first type of error, coverage error. So, it + +152 +00:11:51,970 --> 00:11:55,690 +means there is a problem on the population frame. + +153 +00:11:56,510 --> 00:12:00,450 +Non-response error bias. It means people who don't + +154 +00:12:00,450 --> 00:12:03,230 +respond may be different from those who do + +155 +00:12:03,230 --> 00:12:09,730 +respond. For example, suppose I have a sample of + +156 +00:12:09,730 --> 00:12:11,410 +tennis students. + +157 +00:12:15,910 --> 00:12:22,950 +And I got responses from number two, number five, + +158 +00:12:24,120 --> 00:12:29,420 +and number 10. So, I have these points of view for + +159 +00:12:29,420 --> 00:12:34,860 +these three students. Now, the other seven students + +160 +00:12:34,860 --> 00:12:41,100 +might be they have different opinions. So, the only + +161 +00:12:41,100 --> 00:12:45,220 +thing you have, the opinions of just the three, + +162 +00:12:45,520 --> 00:12:47,800 +and maybe the rest have different opinions, it + +163 +00:12:47,800 --> 00:12:50,680 +means in this case you will have something called + +164 +00:12:50,680 --> 00:12:54,270 +non-responsiveness. Or the same as we said before, + +165 +00:12:54,810 --> 00:12:58,870 +if your question is designed in a correct way. The + +166 +00:12:58,870 --> 00:13:02,130 +other type, sample error, variations from sample + +167 +00:13:02,130 --> 00:13:06,230 +to sample will always exist. As I mentioned here, + +168 +00:13:06,230 --> 00:13:10,470 +we select six samples, each one has different + +169 +00:13:10,470 --> 00:13:14,730 +sample mean. The other type, Measurement Error, + +170 +00:13:15,310 --> 00:13:19,200 +due to weakness in question design. So that's the + +171 +00:13:19,200 --> 00:13:25,600 +type of survey errors. So, one more time, average + +172 +00:13:25,600 --> 00:13:32,120 +error, it means you exclude a group or groups from + +173 +00:13:32,120 --> 00:13:35,080 +the frame. So, in this case, suppose I excluded + +174 +00:13:35,080 --> 00:13:40,380 +these from my frame. So I just select the sample + +175 +00:13:40,380 --> 00:13:45,940 +from all of these, except this portion, or these + +176 +00:13:45,940 --> 00:13:49,300 +two groups. Non-response error means you don't + +177 +00:13:49,300 --> 00:13:52,920 +have a follow-up on non-responses. Sampling error, + +178 +00:13:54,060 --> 00:13:58,720 +random sample gives different sample statistics. + +179 +00:13:59,040 --> 00:14:01,400 +So it means random differences from sample to + +180 +00:14:01,400 --> 00:14:05,760 +sample. Finally, measurement error, bad or leading + +181 +00:14:05,760 --> 00:14:09,260 +questions. This is one of the most important ones + +182 +00:14:09,260 --> 00:14:15,310 +that you have to avoid. So, that's the first part + +183 +00:14:15,310 --> 00:14:20,910 +of this chapter, assembling techniques. Do you + +184 +00:14:20,910 --> 00:14:24,990 +have any questions? Next, we'll talk about + +185 +00:14:24,990 --> 00:14:30,050 +assembling distributions. So far, up to this + +186 +00:14:30,050 --> 00:14:35,690 +point. I mean, at the end of chapter 6, we + +187 +00:14:35,690 --> 00:14:40,840 +discussed the probability, for example, of + +188 +00:14:40,840 --> 00:14:46,580 +computing X greater than, for example, 7. For + +189 +00:14:46,580 --> 00:14:53,260 +example, suppose X represents your score in + +190 +00:14:53,260 --> 00:14:55,020 +business statistics course. + +191 +00:14:57,680 --> 00:15:02,680 +And suppose we know that X is normally distributed + +192 +00:15:02,680 --> 00:15:10,860 +with a mean of 80, standard deviation of 10. My + +193 +00:15:10,860 --> 00:15:15,360 +question was, in chapter 6, what's the probability + +194 +00:15:15,360 --> 00:15:23,380 +that the student scores more than 70? Suppose we + +195 +00:15:23,380 --> 00:15:26,720 +select randomly one student, and the question is, + +196 +00:15:26,840 --> 00:15:29,980 +what's the probability that his score, so just for + +197 +00 + +223 +00:17:30,370 --> 00:17:33,890 +score of the student. Now we have to use something + +224 +00:17:33,890 --> 00:17:38,470 +other called x bar. I'm interested in the average + +225 +00:17:38,470 --> 00:17:46,770 +of this. So x bar minus the mean of not x, x bar, + +226 +00:17:47,550 --> 00:17:52,550 +then divided by sigma x bar. So this is my new, + +227 +00:17:53,270 --> 00:17:54,770 +the score. + +228 +00:17:57,820 --> 00:18:00,680 +Here, there are three questions. Number one, + +229 +00:18:03,680 --> 00:18:11,000 +what's the shape of the distribution of X bar? So, + +230 +00:18:11,040 --> 00:18:13,340 +we are asking about the shape of the distribution. + +231 +00:18:14,560 --> 00:18:19,290 +It might be normal. If the entire population that + +232 +00:18:19,290 --> 00:18:22,390 +we select a sample from is normal, I mean if the + +233 +00:18:22,390 --> 00:18:24,450 +population is normally distributed, then you + +234 +00:18:24,450 --> 00:18:27,110 +select a random sample of that population, it + +235 +00:18:27,110 --> 00:18:30,590 +makes sense that the sample is also normal, so any + +236 +00:18:30,590 --> 00:18:33,030 +statistic is computed from that sample is also + +237 +00:18:33,030 --> 00:18:35,810 +normally distributed, so it makes sense. If the + +238 +00:18:35,810 --> 00:18:38,450 +population is normal, then the shape is also + +239 +00:18:38,450 --> 00:18:43,650 +normal. But if the population is unknown, you + +240 +00:18:43,650 --> 00:18:46,550 +don't have any information about the underlying + +241 +00:18:46,550 --> 00:18:50,530 +population, then you cannot say it's normal unless + +242 +00:18:50,530 --> 00:18:53,790 +you have certain condition that we'll talk about + +243 +00:18:53,790 --> 00:18:57,510 +maybe after 30 minutes. So, exactly, if the + +244 +00:18:57,510 --> 00:18:59,610 +population is normal, then the shape is also + +245 +00:18:59,610 --> 00:19:01,910 +normal, but otherwise, we have to think about it. + +246 +00:19:02,710 --> 00:19:06,890 +This is the first question. Now, there are two + +247 +00:19:06,890 --> 00:19:11,910 +unknowns in this equation. We have to know the + +248 +00:19:11,910 --> 00:19:17,980 +mean, Or x bar, so the mean of x bar is not given, + +249 +00:19:18,520 --> 00:19:23,200 +the mean means the center. So the second question, + +250 +00:19:23,440 --> 00:19:26,920 +what's the center of the distribution? In this + +251 +00:19:26,920 --> 00:19:29,980 +case, the mean of x bar. So we are looking at + +252 +00:19:29,980 --> 00:19:32,920 +what's the mean of x bar. The third question is + +253 +00:19:32,920 --> 00:19:39,350 +sigma x bar is also unknown, spread. Now shape, + +254 +00:19:39,850 --> 00:19:44,590 +center, spread, these are characteristics, these + +255 +00:19:44,590 --> 00:19:47,770 +characteristics in this case sampling + +256 +00:19:47,770 --> 00:19:50,490 +distribution, exactly which is called sampling + +257 +00:19:50,490 --> 00:19:56,130 +distribution. So by sampling distribution we mean + +258 +00:19:56,130 --> 00:20:00,110 +that, by sampling distribution, we mean that you + +259 +00:20:00,110 --> 00:20:05,840 +have to know the center of distribution, I mean + +260 +00:20:05,840 --> 00:20:08,760 +the mean of the statistic you are interested in. + +261 +00:20:09,540 --> 00:20:14,620 +Second, the spread or the variability of the + +262 +00:20:14,620 --> 00:20:17,600 +sample statistic also you are interested in. In + +263 +00:20:17,600 --> 00:20:21,240 +addition to that, you have to know the shape of + +264 +00:20:21,240 --> 00:20:25,080 +the statistic. So three things we have to know, + +265 +00:20:25,820 --> 00:20:31,980 +center, spread and shape. So that's what we'll + +266 +00:20:31,980 --> 00:20:37,340 +talk about now. So now sampling distribution is a + +267 +00:20:37,340 --> 00:20:41,640 +distribution of all of the possible values of a + +268 +00:20:41,640 --> 00:20:46,040 +sample statistic. This sample statistic could be + +269 +00:20:46,040 --> 00:20:50,240 +sample mean, could be sample variance, could be + +270 +00:20:50,240 --> 00:20:53,500 +sample proportion, because any population has + +271 +00:20:53,500 --> 00:20:57,080 +mainly three characteristics, mean, standard + +272 +00:20:57,080 --> 00:20:59,040 +deviation, and proportion. + +273 +00:21:01,520 --> 00:21:04,260 +So again, a sampling distribution is a + +274 +00:21:04,260 --> 00:21:07,400 +distribution of all of the possible values of a + +275 +00:21:07,400 --> 00:21:11,960 +sample statistic or a given size sample selected + +276 +00:21:11,960 --> 00:21:20,020 +from a population. For example, suppose you sample + +277 +00:21:20,020 --> 00:21:23,420 +50 students from your college regarding their mean + +278 +00:21:23,420 --> 00:21:30,640 +GPA. GPA means Graduate Point Average. Now, if you + +279 +00:21:30,640 --> 00:21:35,280 +obtain many different samples of size 50, you will + +280 +00:21:35,280 --> 00:21:38,760 +compute a different mean for each sample. As I + +281 +00:21:38,760 --> 00:21:42,680 +mentioned here, I select a sample the same sizes, + +282 +00:21:43,540 --> 00:21:47,580 +but we obtain different sample statistics, I mean + +283 +00:21:47,580 --> 00:21:54,260 +different sample means. We are interested in the + +284 +00:21:54,260 --> 00:21:59,760 +distribution of all potential mean GBA We might + +285 +00:21:59,760 --> 00:22:04,040 +calculate for any given sample of 50 students. So + +286 +00:22:04,040 --> 00:22:09,440 +let's focus into these values. So we have again a + +287 +00:22:09,440 --> 00:22:14,580 +random sample of 50 sample means. So we have 1, 2, + +288 +00:22:14,700 --> 00:22:18,480 +3, 4, 5, maybe 50, 6, whatever we have. So select + +289 +00:22:18,480 --> 00:22:22,660 +a random sample of size 20. Maybe we repeat this + +290 +00:22:22,660 --> 00:22:28,590 +sample 10 times. So we end with 10. different + +291 +00:22:28,590 --> 00:22:31,650 +values of the simple means. Now we have new ten + +292 +00:22:31,650 --> 00:22:38,130 +means. Now the question is, what's the center of + +293 +00:22:38,130 --> 00:22:42,590 +these values, I mean for the means? What's the + +294 +00:22:42,590 --> 00:22:46,250 +spread of the means? And what's the shape of the + +295 +00:22:46,250 --> 00:22:50,310 +means? So these are the mainly three questions. + +296 +00:22:53,510 --> 00:22:56,810 +For example, let's get just simple example and + +297 +00:22:56,810 --> 00:23:04,370 +that we have only population of size 4. In the + +298 +00:23:04,370 --> 00:23:09,950 +real life, the population size is much bigger than + +299 +00:23:09,950 --> 00:23:14,970 +4, but just for illustration. + +300 +00:23:17,290 --> 00:23:20,190 +Because size 4, I mean if the population is 4, + +301 +00:23:21,490 --> 00:23:24,950 +it's a small population. So we can take all the + +302 +00:23:24,950 --> 00:23:27,610 +values and find the mean and standard deviation. + +303 +00:23:28,290 --> 00:23:31,430 +But in reality, we have more than that. So this + +304 +00:23:31,430 --> 00:23:37,390 +one just for as example. So let's suppose that we + +305 +00:23:37,390 --> 00:23:42,790 +have a population of size 4. So n equals 4. + +306 +00:23:46,530 --> 00:23:54,030 +And we are interested in the ages. And suppose the + +307 +00:23:54,030 --> 00:23:58,930 +values of X, X again represents H, + +308 +00:24:00,690 --> 00:24:01,810 +and the values we have. + +309 +00:24:06,090 --> 00:24:08,930 +So these are the four values we have. + +310 +00:24:12,050 --> 00:24:16,910 +Now simple calculation will + +311 +00:24:16,910 --> 00:24:19,910 +give you the mean, the population mean. + +312 +00:24:25,930 --> 00:24:30,410 +Just add these values and divide by the operation + +313 +00:24:30,410 --> 00:24:35,410 +size, we'll get 21 years. And sigma, as we + +314 +00:24:35,410 --> 00:24:39,450 +mentioned in chapter three, square root of this + +315 +00:24:39,450 --> 00:24:44,550 +quantity will give 2.236 + +316 +00:24:44,550 --> 00:24:49,450 +years. So simple calculation will give these + +317 +00:24:49,450 --> 00:24:54,470 +results. Now if you look at distribution of these + +318 +00:24:54,470 --> 00:24:58,430 +values, Because as I mentioned, we are looking for + +319 +00:24:58,430 --> 00:25:03,810 +center, spread, and shape. The center is 21 of the + +320 +00:25:03,810 --> 00:25:09,430 +exact population. The variation is around 2.2. + +321 +00:25:10,050 --> 00:25:14,770 +Now, the shape of distribution. Now, 18 represents + +322 +00:25:14,770 --> 00:25:15,250 +once. + +323 +00:25:17,930 --> 00:25:22,350 +I mean, we have only one 18, so 18 divided one + +324 +00:25:22,350 --> 00:25:29,830 +time over 425. 20% represent also 25%, the same as + +325 +00:25:29,830 --> 00:25:33,030 +for 22 or 24. In this case, we have something + +326 +00:25:33,030 --> 00:25:37,530 +called uniform distribution. In this case, the + +327 +00:25:37,530 --> 00:25:43,330 +proportions are the same. So, the mean, not + +328 +00:25:43,330 --> 00:25:48,030 +normal, it's uniform distribution. The mean is 21, + +329 +00:25:48,690 --> 00:25:52,490 +standard deviation is 2.236, and the distribution + +330 +00:25:52,490 --> 00:25:58,840 +is uniform. Okay, so that's center, spread and + +331 +00:25:58,840 --> 00:26:02,920 +shape of the true population we have. Now suppose + +332 +00:26:02,920 --> 00:26:03,520 +for example, + +333 +00:26:06,600 --> 00:26:12,100 +we select a random sample of size 2 from this + +334 +00:26:12,100 --> 00:26:12,620 +population. + +335 +00:26:15,740 --> 00:26:21,500 +So we select a sample of size 2. We have 18, 20, + +336 +00:26:21,600 --> 00:26:25,860 +22, 24 years. We have four students, for example. + +337 +00:26:27,760 --> 00:26:31,140 +And we select a sample of size two. So the first + +338 +00:26:31,140 --> 00:26:40,820 +one could be 18 and 18, 18 and 20, 18 and 22. So + +339 +00:26:40,820 --> 00:26:47,400 +we have 16 different samples. So number of samples + +340 +00:26:47,400 --> 00:26:54,500 +in this case is 16. Imagine that we have five. I + +341 +00:26:54,500 --> 00:27:00,220 +mean the population size is 5 and so on. So the + +342 +00:27:00,220 --> 00:27:06,000 +rule is number + +343 +00:27:06,000 --> 00:27:13,020 +of samples in this case and the volume is million. + +344 +00:27:14,700 --> 00:27:19,140 +Because we have four, four squared is sixteen, + +345 +00:27:19,440 --> 00:27:26,940 +that's all. 5 squared, 25, and so on. Now, we have + +346 +00:27:26,940 --> 00:27:31,740 +16 different samples. For sure, we will have + +347 +00:27:31,740 --> 00:27:37,940 +different sample means. Now, for the first sample, + +348 +00:27:39,560 --> 00:27:47,200 +18, 18, the average is also 18. The next one, 18, + +349 +00:27:47,280 --> 00:27:50,040 +20, the average is 19. + +350 +00:27:54,790 --> 00:27:59,770 +20, 18, 24, the average is 21, and so on. So now + +351 +00:27:59,770 --> 00:28:05,450 +we have 16 sample means. Now this is my new + +352 +00:28:05,450 --> 00:28:10,510 +values. It's my sample. This sample has different + +353 +00:28:10,510 --> 00:28:16,050 +sample means. Now let's take these values and + +354 +00:28:16,050 --> 00:28:23,270 +compute average, sigma, and the shape of the + +355 +00:28:23,270 --> 00:28:29,200 +distribution. So again, we have a population of + +356 +00:28:29,200 --> 00:28:35,240 +size 4, we select a random cell bone. of size 2 + +357 +00:28:35,240 --> 00:28:39,060 +from that population, we end with 16 random + +358 +00:28:39,060 --> 00:28:43,620 +samples, and they have different sample means. + +359 +00:28:43,860 --> 00:28:46,700 +Might be two of them are the same. I mean, we have + +360 +00:28:46,700 --> 00:28:52,220 +18 just repeated once, but 19 repeated twice, 23 + +361 +00:28:52,220 --> 00:28:59,220 +times, 24 times, and so on. 22 three times, 23 + +362 +00:28:59,220 --> 00:29:04,270 +twice, 24 once. So it depends on The sample means + +363 +00:29:04,270 --> 00:29:07,210 +you have. So we have actually different samples. + +364 +00:29:14,790 --> 00:29:18,970 +For example, let's look at 24 and 22. What's the + +365 +00:29:18,970 --> 00:29:22,790 +average of these two values? N divided by 2 will + +366 +00:29:22,790 --> 00:29:24,290 +give 22. + +367 +00:29:33,390 --> 00:29:35,730 +So again, we have 16 sample means. + +368 +00:29:38,610 --> 00:29:41,550 +Now look first at the shape of the distribution. + +369 +00:29:43,110 --> 00:29:47,490 +18, as I mentioned, repeated once. So 1 over 16. + +370 +00:29:48,430 --> 00:29:57,950 +19 twice. 23 times. 1 four times. 22 three times. + +371 +00:29:58,940 --> 00:30:03,340 +then twice then once now the distribution was + +372 +00:30:03,340 --> 00:30:07,960 +uniform remember now it becomes normal + +373 +00:30:07,960 --> 00:30:10,780 +distribution so the first one x1 is normal + +374 +00:30:10,780 --> 00:30:16,340 +distribution so it has normal distribution so + +375 +00:30:16,340 --> 00:30:20,040 +again the shape of x1 looks like normal + +376 +00:30:20,040 --> 00:30:26,800 +distribution we need to compute the center of X + +377 +00:30:26,800 --> 00:30:32,800 +bar, the mean of X bar. We have to add the values + +378 +00:30:32,800 --> 00:30:36,380 +of X bar, the sample mean, then divide by the + +379 +00:30:36,380 --> 00:30:42,800 +total number of size, which is 16. So in this + +380 +00:30:42,800 --> 00:30:51,720 +case, we got 21, which is similar to the one for + +381 +00:30:51,720 --> 00:30:55,950 +the entire population. So this is the first + +382 +00:30:55,950 --> 00:30:59,930 +unknown parameter. The mu of x bar is the same as + +383 +00:30:59,930 --> 00:31:05,490 +the population mean mu. The second one, the split + +384 +00:31:05,490 --> 00:31:13,450 +sigma of x bar by using the same equation + +385 +00:31:13,450 --> 00:31:17,170 +we have, sum of x bar in this case minus the mean + +386 +00:31:17,170 --> 00:31:21,430 +of x bar squared, then divide this quantity by the + +387 +00:31:21,430 --> 00:31:26,270 +capital I which is 16 in this case. So we will end + +388 +00:31:26,270 --> 00:31:28,510 +with 1.58. + +389 +00:31:31,270 --> 00:31:36,170 +Now let's compare population standard deviation + +390 +00:31:36,170 --> 00:31:42,210 +and the sample standard deviation. First of all, + +391 +00:31:42,250 --> 00:31:45,050 +you see that these two values are not the same. + +392 +00:31:47,530 --> 00:31:50,370 +The population standard deviation was 2.2, around + +393 +00:31:50,370 --> 00:31:57,310 +2.2. But for the sample, for the sample mean, it's + +394 +00:31:57,310 --> 00:32:02,690 +1.58, so that means sigma of X bar is smaller than + +395 +00:32:02,690 --> 00:32:03,710 +sigma of X. + +396 +00:32:07,270 --> 00:32:12,010 +It means exactly, the variation of X bar is always + +397 +00:32:12,010 --> 00:32:15,770 +smaller than the variation of X, always. + +398 +00:32:20,420 --> 00:32:26,480 +So here is the comparison. The distribution was + +399 +00:32:26,480 --> 00:32:32,000 +uniform. It's no longer uniform. It looks like a + +400 +00:32:32,000 --> 00:32:36,440 +bell shape. The mean of X is 21, which is the same + +401 +00:32:36,440 --> 00:32:40,440 +as the mean of X bar. But the standard deviation + +402 +00:32:40,440 --> 00:32:44,200 +of the population is larger than the standard + +403 +00:32:44,200 --> 00:32:48,060 +deviation of the sample mean or the average. + +404 +00:32:53,830 --> 00:32:58,090 +Different samples of the same sample size from the + +405 +00:32:58,090 --> 00:33:00,790 +same population will yield different sample means. + +406 +00:33:01,450 --> 00:33:06,050 +We know that. If we have a population and from + +407 +00:33:06,050 --> 00:33:08,570 +that population, so we have this big population, + +408 +00:33:10,250 --> 00:33:15,010 +from this population suppose we selected 10 + +409 +00:33:15,010 --> 00:33:19,850 +samples, sample 1 with size 50. + +410 +00:33:21,540 --> 00:33:26,400 +Another sample, sample 2 with the same size. All + +411 +00:33:26,400 --> 00:33:29,980 +the way, suppose we select 10 samples, sample 10, + +412 +00:33: + +445 +00:36:23,500 --> 00:36:28,660 +smaller than sigma of the standard deviation of + +446 +00:36:28,660 --> 00:36:33,180 +normalization. Now if you look at the relationship + +447 +00:36:33,180 --> 00:36:36,380 +between the standard error of X bar and the sample + +448 +00:36:36,380 --> 00:36:41,760 +size, we'll see that as the sample size increases, + +449 +00:36:42,500 --> 00:36:46,440 +sigma of X bar decreases. So if we have large + +450 +00:36:46,440 --> 00:36:51,200 +sample size, I mean instead of selecting a random + +451 +00:36:51,200 --> 00:36:53,520 +sample of size 2, if you select a random sample of + +452 +00:36:53,520 --> 00:36:56,900 +size 3 for example, you will get sigma of X bar + +453 +00:36:56,900 --> 00:37:03,140 +less than 1.58. So note that standard error of the + +454 +00:37:03,140 --> 00:37:09,260 +mean decreases as the sample size goes up. So as n + +455 +00:37:09,260 --> 00:37:13,000 +increases, sigma of x bar goes down. So there is + +456 +00:37:13,000 --> 00:37:17,440 +an inverse relationship between the standard error of + +457 +00:37:17,440 --> 00:37:21,900 +the mean and the sample size. So now we answered + +458 +00:37:21,900 --> 00:37:24,660 +the three questions. The shape looks like a bell + +459 +00:37:24,660 --> 00:37:31,290 +shape. If we select our sample from a normal + +460 +00:37:31,290 --> 00:37:37,850 +population with a mean equal to the population mean + +461 +00:37:37,850 --> 00:37:40,530 +and standard deviation of the standard error equals + +462 +00:37:40,530 --> 00:37:48,170 +sigma over the square root of n. So now, let's talk + +463 +00:37:48,170 --> 00:37:53,730 +about the sampling distribution of the sample mean if + +464 +00:37:53,730 --> 00:37:59,170 +the population is normal. So now, my population is + +465 +00:37:59,170 --> 00:38:03,830 +normally distributed, and we are interested in the + +466 +00:38:03,830 --> 00:38:06,430 +sampling distribution of the sample mean of X bar. + +467 +00:38:07,630 --> 00:38:11,330 +If the population is normally distributed with + +468 +00:38:11,330 --> 00:38:14,870 +mean mu and standard deviation sigma, in this + +469 +00:38:14,870 --> 00:38:18,590 +case, the sampling distribution of X bar is also + +470 +00:38:18,590 --> 00:38:22,870 +normally distributed, so this is the shape. With + +471 +00:38:22,870 --> 00:38:27,070 +the mean of X bar equals mu and sigma of X bar equals + +472 +00:38:27,070 --> 00:38:35,470 +sigma over the square root of n. So again, if we sample from a normal + +473 +00:38:35,470 --> 00:38:39,650 +population, I mean my sampling technique, I select + +474 +00:38:39,650 --> 00:38:44,420 +a random sample from a normal population. Then if + +475 +00:38:44,420 --> 00:38:47,640 +we are interested in the standard distribution of + +476 +00:38:47,640 --> 00:38:51,960 +X bar, then that distribution is normally + +477 +00:38:51,960 --> 00:38:56,000 +distributed with a mean equal to mu and standard + +478 +00:38:56,000 --> 00:39:02,540 +deviation sigma over mu. So that's the shape. It's + +479 +00:39:02,540 --> 00:39:05,670 +normal. The mean is the same as the population + +480 +00:39:05,670 --> 00:39:09,030 +mean, and the standard deviation of x bar equals + +481 +00:39:09,030 --> 00:39:16,130 +sigma over the square root of n. So now let's go back to the z + +482 +00:39:16,130 --> 00:39:21,130 +-score we discussed before. If you remember, I + +483 +00:39:21,130 --> 00:39:25,150 +mentioned before + +484 +00:39:25,150 --> 00:39:32,720 +that the z-score, generally speaking, is X minus the mean + +485 +00:39:32,720 --> 00:39:34,740 +of X divided by sigma X. + +486 +00:39:37,640 --> 00:39:41,620 +And we know that Z has a standard normal + +487 +00:39:41,620 --> 00:39:48,020 +distribution with a mean of zero and a variance of one. In + +488 +00:39:48,020 --> 00:39:52,860 +this case, we are looking for the sampling + +489 +00:39:52,860 --> 00:39:59,350 +-distribution of X bar. So Z equals X bar. minus + +490 +00:39:59,350 --> 00:40:06,050 +the mean of x bar divided by sigma of x bar. So + +491 +00:40:06,050 --> 00:40:10,770 +the same equation, but different statistics. In the + +492 +00:40:10,770 --> 00:40:15,770 +first one, we have x, for example, which represents the + +493 +00:40:15,770 --> 00:40:20,370 +score. Here, my sample statistic is the sample + +494 +00:40:20,370 --> 00:40:22,890 +mean, which represents the average of the scores. + +495 +00:40:23,470 --> 00:40:29,460 +So x bar, minus its mean, I mean the mean of x + +496 +00:40:29,460 --> 00:40:37,280 +bar, divided by its standard error. So x bar minus + +497 +00:40:37,280 --> 00:40:41,000 +the mean of x bar divided by sigma of x bar. By + +498 +00:40:41,000 --> 00:40:48,020 +using that mu of x bar equals mu, and sigma of x + +499 +00:40:48,020 --> 00:40:51,240 +bar equals sigma over the square root of n, we will end with + +500 +00:40:51,240 --> 00:40:52,600 +this equation z square. + +501 +00:40:56,310 --> 00:41:00,790 +So this equation will be used instead of using the + +502 +00:41:00,790 --> 00:41:04,650 +previous one. So z square equals sigma, I'm sorry, + +503 +00:41:04,770 --> 00:41:08,470 +z equals x bar minus the mean divided by sigma + +504 +00:41:08,470 --> 00:41:13,310 +bar, where x bar is the sample mean, mu is the + +505 +00:41:13,310 --> 00:41:15,990 +population mean, sigma is the population standard + +506 +00:41:15,990 --> 00:41:19,810 +deviation, and n is the sample size. So that's the + +507 +00:41:19,810 --> 00:41:22,490 +difference between chapter six, + +508 +00:41:25,110 --> 00:41:32,750 +and that one we have only x minus y by sigma. Here + +509 +00:41:32,750 --> 00:41:36,450 +we are interested in x bar minus the mean of x bar + +510 +00:41:36,450 --> 00:41:40,290 +which is mu. And sigma of x bar equals sigma over the square root of n. + +511 +00:41:47,970 --> 00:41:52,010 +Now when we are saying that mu of x bar equals mu, + +512 +00:41:54,530 --> 00:42:01,690 +That means the expected value of + +513 +00:42:01,690 --> 00:42:05,590 +the sample mean equals the population mean. When + +514 +00:42:05,590 --> 00:42:08,610 +we are saying mean of X bar equals mu, it means + +515 +00:42:08,610 --> 00:42:13,270 +the expected value of X bar equals mu. In other + +516 +00:42:13,270 --> 00:42:20,670 +words, the expectation of X bar equals mu. If this + +517 +00:42:20,670 --> 00:42:27,900 +happens, we say that X bar is an unbiased + +518 +00:42:27,900 --> 00:42:31,420 +estimator + +519 +00:42:31,420 --> 00:42:35,580 +of + +520 +00:42:35,580 --> 00:42:40,620 +mu. So this is a new definition, an unbiased + +521 +00:42:40,620 --> 00:42:45,490 +estimator X bar is called an unbiased estimator if + +522 +00:42:45,490 --> 00:42:49,410 +this condition is satisfied. I mean, if the mean + +523 +00:42:49,410 --> 00:42:54,450 +of X bar or if the expected value of X bar equals + +524 +00:42:54,450 --> 00:42:57,790 +the population mean, in this case, we say that X + +525 +00:42:57,790 --> 00:43:02,450 +bar is a good estimator of Mu. Because on average, + +526 +00:43:05,430 --> 00:43:08,230 +The expected value of X bar equals the population + +527 +00:43:08,230 --> 00:43:14,970 +mean, so in this case, X bar is a good estimator of + +528 +00:43:14,970 --> 00:43:20,410 +Mu. Now if you compare the two distributions, + +529 +00:43:22,030 --> 00:43:27,510 +a normal distribution here with the population mean Mu + +530 +00:43:27,510 --> 00:43:30,550 +and a standard deviation for example sigma. + +531 +00:43:33,190 --> 00:43:40,590 +That's for the scores, the scores. Now instead of + +532 +00:43:40,590 --> 00:43:43,690 +the scores above, we have x bar, the sample mean. + +533 +00:43:44,670 --> 00:43:48,590 +Again, the mean of x bar is the same as the + +534 +00:43:48,590 --> 00:43:52,990 +population mean. Both means are the same, mu of x + +535 +00:43:52,990 --> 00:43:57,130 +bar equals mu. But if you look at the spread of + +536 +00:43:57,130 --> 00:44:00,190 +the second distribution, it is more than the + +537 +00:44:00,190 --> 00:44:03,350 +other one. So that's the comparison between the + +538 +00:44:03,350 --> 00:44:05,530 +two populations. + +539 +00:44:07,050 --> 00:44:13,390 +So again, to compare or to figure out the + +540 +00:44:13,390 --> 00:44:17,910 +relationship between sigma of x bar and the sample + +541 +00:44:17,910 --> 00:44:22,110 +size. Suppose we have this blue normal + +542 +00:44:22,110 --> 00:44:28,590 +distribution with a sample size of say 10 or 30, for + +543 +00:44:28,590 --> 00:44:28,870 +example. + +544 +00:44:32,220 --> 00:44:37,880 +As n gets bigger and bigger, sigma of x bar + +545 +00:44:37,880 --> 00:44:41,800 +becomes smaller and smaller. If you look at the + +546 +00:44:41,800 --> 00:44:44,760 +red one, maybe if the red one has n equal to 100, + +547 +00:44:45,700 --> 00:44:48,780 +we'll get this spread. But for the other one, we + +548 +00:44:48,780 --> 00:44:55,240 +have a larger spread. So as n increases, sigma of x + +549 +00:44:55,240 --> 00:44:59,860 +bar decreases. So this, the blue one for a smaller + +550 +00:44:59,860 --> 00:45:06,240 +sample size. The red one for a larger sample size. + +551 +00:45:06,840 --> 00:45:11,120 +So again, as n increases, sigma of x bar goes down + +552 +00:45:11,120 --> 00:45:12,040 +four degrees. + +553 +00:45:21,720 --> 00:45:29,480 +Next, let's use this fact to + +554 +00:45:29,480 --> 00:45:37,440 +figure out an interval for the sample mean with 90 + +555 +00:45:37,440 --> 00:45:42,140 +% confidence and suppose the population we have is + +556 +00:45:42,140 --> 00:45:49,500 +normal with a mean of 368 and sigma of 15 and suppose + +557 +00:45:49,500 --> 00:45:52,900 +we select a random sample of a size of 25 and the question + +558 +00:45:52,900 --> 00:45:57,600 +is find symmetrically distributed interval around + +559 +00:45:57,600 --> 00:46:03,190 +the mean that will include 95% of the sample means + +560 +00:46:03,190 --> 00:46:08,610 +when mu equals 368, sigma is 15, and your sample + +561 +00:46:08,610 --> 00:46:13,830 +size is 25. So in this case, we are looking for + +562 +00:46:13,830 --> 00:46:17,150 +the + +563 +00:46:17,150 --> 00:46:19,110 +estimation of the sample mean. + +564 +00:46:23,130 --> 00:46:24,970 +And we have this information, + +565 +00:46:28,910 --> 00:46:31,750 +Sigma is 15 and N is 25. + +566 +00:46:35,650 --> 00:46:38,890 +The problem mentioned there, we have a symmetric + +567 +00:46:38,890 --> 00:46:48,490 +distribution and this area is 95% bisymmetric and + +568 +00:46:48,490 --> 00:46:52,890 +we have only 5% out. So that means half to the + +569 +00:46:52,890 --> 00:46:56,490 +right and half to the left. + +570 +00:46:59,740 --> 00:47:02,640 +And let's see how we can compute these two values. + +571 +00:47:03,820 --> 00:47:11,440 +The problem says that the average is 368 + +572 +00:47:11,440 --> 00:47:18,660 +for this data and the standard deviation sigma of + +573 +00:47:18,660 --> 00:47:28,510 +15. He asked about what are the values of x bar. I + +574 +00:47:28,510 --> 00:47:32,430 +mean, we have to find the interval of x bar. Let's + +575 +00:47:32,430 --> 00:47:36,130 +see. If you remember last time, z score was x + +576 +00:47:36,130 --> 00:47:41,130 +minus mu divided by sigma. But now we have x bar. + +577 +00:47:41,890 --> 00:47:45,850 +So your z score should be x bar minus mu divided by + +578 +00:47:45,850 --> 00:47:50,850 +sigma over the square root of n. Now cross multiplication, you + +579 +00:47:50,850 --> 00:47:55,970 +will get x bar minus mu equals z sigma over the square root + +580 +00:47:55,970 --> 00:48:01,500 +of n. That means x bar equals mu plus z sigma over + +581 +00:48:01,500 --> 00:48:04,440 +the square root of n. Exactly the same equation we got in + +582 +00:48:04,440 --> 00:48:09,840 +chapter six, but there, in that one, we have x + +583 +00:48:09,840 --> 00:48:13,700 +equals mu plus z sigma. Now we have x bar equals + +584 +00:48:13,700 --> 00:48:18,200 +mu plus z sigma over the square root of n, because we have + +585 +00:48:18,200 --> 00:48:23,000 +different statistics. It's x bar instead of x. Now + +586 +00:48:23,000 --> 00:48:28,510 +we are looking for these two values. Now let's + +587 +00:48:28,510 --> 00:48:29,410 +compute z-score. + +588 +00:48:32,450 --> 00:48:36,830 +The z-score for this point, which has an area of 2.5% + +589 +00:48:36,830 --> 00:48:41,930 +below it, is the same as the z-score, but in the + +590 +00:48:41,930 --> 00:48:48,670 +opposite direction. If you remember, we got this + +591 +00:48:48,670 --> 00:48:49,630 +value, 1.96. + +592 +00:48:52,790 --> 00:48:58,080 +So my z-score is negative 1.96 to the left. and 1 + +593 +00:48:58,080 --> 00:49:08,480 +.9621 so now my x bar in the lower limit in this + +594 +00:49:08,480 --> 00:49:17,980 +side on the left side equals mu which is 368 minus + +595 +00:49:17,980 --> 00:49:29,720 +1.96 times sigma which is 15 divide by the square root of 25. + +596 +00:49:30,340 --> 00:49:34,980 +So that's the value of the sample mean in the + +597 +00:49:34,980 --> 00:49:39,740 +lower limit, or lower bound. On the other hand, + +598 +00:49:42,320 --> 00:49:49,720 +expand our limit to the other hand equals 316 plus 1.96 + +599 +00:49:49,720 --> 00:49:56,100 +sigma over the square root of n. Simple calculation will give this + +600 +00:49:56,100 --> 00:49:56,440 +result. + +601 +00:49:59,770 --> 00:50:06,870 +The first X bar for the lower limit is 362.12, the + +602 +00:50:06,870 --> 00:50:10,050 +other is 373.1. + +603 +00:50:11,450 --> 00:50:17,170 +So again for this data, for this example, the mean + +604 +00:50:17,170 --> 00:50:23,030 +was, the population mean was 368, the population + +605 +00:50:23,030 --> 00:50:26,310 +has a standard deviation of 15, we select a random + +606 +00:50:26,310 --> 00:50:31,070 +sample of size 25, Then we end with this result + +607 +00:50:31,070 --> 00:50:41,110 +that 95% of all sample means of sample size 25 are + +608 +00:50:41,110 --> 00:50:44,810 +between these two values. It means that we have + +609 +00:50:44,810 --> 00:50:49,530 +this big population and this population is + +610 +00:50:49,530 --> 00:50:55,240 +symmetric, it's normal. And we know that The mean of + +611 +00:50:55,240 --> 00:51:00,680 +this population is 368 with a sigma of 15. + +612 +00:51:02,280 --> 00:51:08,320 +We select from this population many samples. Each + +613 +00:51:08,320 --> 00:51:11,600 +one has a size of 25. + +614 +00:51:15,880 --> 00:51:20,940 +Suppose, for example, we select 100 samples, 100 + +615 +00:51:20,940 --> 00:51:27,260 +random samples. So we end with different sample + +616 +00:51:27,260 --> 00:51:27,620 +means. + +617 +00:51:33,720 --> 00:51:39,820 +So we have 100 new sample means. In this case, you + +618 +00:51:39,820 --> 00:51:46,320 +can say that 95 out of these, 95 out of 100, it + +619 +00:51:46,320 --> 00:51:52,560 +means 95, one of these sample means. have values + +620 +00:51:52,560 --> 00:52:01,720 +between 362.12 and 373.5. And what's remaining? + +621 +00:52:03,000 --> 00:52:07,940 +Just five of these sample means would be out of + +622 +00:52:07,940 --> 00:52:13,220 +this interval either below 362 or above the upper + +623 +00:52:13,220 --> 00:52:17,720 +limit. So you are 95% sure that + +624 +00:52:21,230 --> 00:52:24,350 +the sample mean lies between these two points. + +625 +00:52:25,410 --> 00:52:29,470 +So, 5% of the sample means will be out. Make + +626 +00:52:29,470 --> 00:52:37,510 +sense? Imagine that I have selected 200 samples. + +627 +00:52:40,270 --> 00:52:46,330 +Now, how many X bar will be between these two + +628 +00:52:46,330 --> 00:52:54,140 +values? 95% of these 200. So how many 95%? How + +629 +00:52:54,140 --> 00:52:56,060 +many means in this case? + +630 +00:52:58,900 --> 00:53:04,600 +95% out of 200 is 190. + +631 +00:53:05,480 --> 00:53:12,200 +190. Just multiply. 95 multiplies by 200. It will + +632 +00:53:12,200 --> 00:53:13,160 +give you 190. + +633 +00:53:22,740 --> 00:53:29,860 +values between 362 + + + +667 +00:56:00,160 --> 00:56:03,640 +larger and larger, or gets larger and larger, then + +668 +00:56:03,640 --> 00:56:06,860 +the standard distribution of X bar is + +669 +00:56:06,860 --> 00:56:14,090 +approximately normal in this. Again, look at the + +670 +00:56:14,090 --> 00:56:19,630 +blue curve. Now, this one looks like skewed + +671 +00:56:19,630 --> 00:56:20,850 +distribution to the right. + +672 +00:56:24,530 --> 00:56:28,730 +Now, as the sample gets large enough, then it + +673 +00:56:28,730 --> 00:56:33,470 +becomes normal. So, the sample distribution + +674 +00:56:33,470 --> 00:56:37,350 +becomes almost normal regardless of the shape of + +675 +00:56:37,350 --> 00:56:41,570 +the population. I mean if you sample from unknown + +676 +00:56:41,570 --> 00:56:46,590 +population, and that one has either right skewed + +677 +00:56:46,590 --> 00:56:52,130 +or left skewed, if the sample size is large, then + +678 +00:56:52,130 --> 00:56:55,810 +the sampling distribution of X bar becomes almost + +679 +00:56:55,810 --> 00:57:01,530 +normal distribution regardless of the… so that’s + +680 +00:57:01,530 --> 00:57:06,830 +the central limit theorem. So again, if the + +681 +00:57:06,830 --> 00:57:10,980 +population is not normal, The condition is only + +682 +00:57:10,980 --> 00:57:15,360 +you have to select a large sample. In this case, + +683 +00:57:15,960 --> 00:57:19,340 +the central tendency mu of X bar is same as mu. + +684 +00:57:20,000 --> 00:57:24,640 +The variation is also sigma over root N. + +685 +00:57:28,740 --> 00:57:32,120 +So again, standard distribution of X bar becomes + +686 +00:57:32,120 --> 00:57:38,620 +normal as N. The theorem again says If we select a + +687 +00:57:38,620 --> 00:57:42,500 +random sample from unknown population, then the + +688 +00:57:42,500 --> 00:57:44,560 +standard distribution of X part is approximately + +689 +00:57:44,560 --> 00:57:53,580 +normal as long as N gets large enough. Now the + +690 +00:57:53,580 --> 00:57:57,100 +question is how large is large enough? + +691 +00:58:00,120 --> 00:58:06,530 +There are two cases, or actually three cases. For + +692 +00:58:06,530 --> 00:58:11,310 +most distributions, if you don’t know the exact + +693 +00:58:11,310 --> 00:58:18,670 +shape, n above 30 is enough to use or to apply + +694 +00:58:18,670 --> 00:58:22,290 +that theorem. So if n is greater than 30, it will + +695 +00:58:22,290 --> 00:58:24,650 +give a standard distribution that is nearly + +696 +00:58:24,650 --> 00:58:29,070 +normal. So if my n is large, it means above 30, or + +697 +00:58:29,070 --> 00:58:33,450 +30 and above this. For fairly symmetric + +698 +00:58:33,450 --> 00:58:35,790 +distribution, I mean for nearly symmetric + +699 +00:58:35,790 --> 00:58:38,630 +distribution, the distribution is not exactly + +700 +00:58:38,630 --> 00:58:42,910 +normal, but approximately normal. In this case, N + +701 +00:58:42,910 --> 00:58:46,490 +to be large enough if it is above 15. So, N + +702 +00:58:46,490 --> 00:58:48,770 +greater than 15 will usually have same + +703 +00:58:48,770 --> 00:58:50,610 +distribution as almost normal. + +704 +00:58:55,480 --> 00:58:57,840 +For normal population, as we mentioned, of + +705 +00:58:57,840 --> 00:59:00,740 +distributions, the semantic distribution of the + +706 +00:59:00,740 --> 00:59:02,960 +mean is always. + +707 +00:59:06,680 --> 00:59:12,380 +Okay, so again, there are three cases. For most + +708 +00:59:12,380 --> 00:59:16,280 +distributions, N to be large, above 30. In this + +709 +00:59:16,280 --> 00:59:20,460 +case, the distribution is nearly normal. For + +710 +00:59:20,460 --> 00:59:24,300 +fairly symmetric distributions, N above 15 gives + +711 +00:59:24,660 --> 00:59:28,960 +almost normal distribution. But if the population + +712 +00:59:28,960 --> 00:59:32,400 +by itself is normally distributed, always the + +713 +00:59:32,400 --> 00:59:35,800 +sample mean is normally distributed. So that’s the + +714 +00:59:35,800 --> 00:59:37,300 +three cases. + +715 +00:59:40,040 --> 00:59:47,480 +Now for this example, suppose we have a + +716 +00:59:47,480 --> 00:59:49,680 +population. It means we don’t know the + +717 +00:59:49,680 --> 00:59:52,900 +distribution of that population. And that + +718 +00:59:52,900 --> 00:59:57,340 +population has mean of 8. Standard deviation of 3. + +719 +00:59:58,200 --> 01:00:01,200 +And suppose a random sample of size 36 is + +720 +01:00:01,200 --> 01:00:04,780 +selected. In this case, the population is not + +721 +01:00:04,780 --> 01:00:07,600 +normal. It says A population, so you don’t know + +722 +01:00:07,600 --> 01:00:12,340 +the exact distribution. But N is large. It’s above + +723 +01:00:12,340 --> 01:00:15,060 +30, so you can apply the central limit theorem. + +724 +01:00:15,920 --> 01:00:20,380 +Now we ask about what’s the probability that a + +725 +01:00:20,380 --> 01:00:25,920 +sample means. is between what’s the probability + +726 +01:00:25,920 --> 01:00:29,240 +that the same element is between these two values. + +727 +01:00:32,180 --> 01:00:36,220 +Now, the difference between this lecture and the + +728 +01:00:36,220 --> 01:00:39,800 +previous ones was, here we are interested in the + +729 +01:00:39,800 --> 01:00:44,440 +exponent of X. Now, even if the population is not + +730 +01:00:44,440 --> 01:00:47,080 +normally distributed, the central limit theorem + +731 +01:00:47,080 --> 01:00:51,290 +can be abused because N is large enough. So now, + +732 +01:00:51,530 --> 01:00:57,310 +the mean of X bar equals mu, which is eight, and + +733 +01:00:57,310 --> 01:01:02,170 +sigma of X bar equals sigma over root N, which is + +734 +01:01:02,170 --> 01:01:07,150 +three over square root of 36, which is one-half. + +735 +01:01:11,150 --> 01:01:17,210 +So now, the probability of X bar greater than 7.8, + +736 +01:01:17,410 --> 01:01:21,890 +smaller than 8.2, Subtracting U, then divide by + +737 +01:01:21,890 --> 01:01:26,210 +sigma over root N from both sides, so 7.8 minus 8 + +738 +01:01:26,210 --> 01:01:30,130 +divided by sigma over root N. Here we have 8.2 + +739 +01:01:30,130 --> 01:01:33,230 +minus 8 divided by sigma over root N. I will end + +740 +01:01:33,230 --> 01:01:38,150 +with Z between minus 0.4 and 0.4. Now, up to this + +741 +01:01:38,150 --> 01:01:43,170 +step, it’s in U, for chapter 7. Now, Z between + +742 +01:01:43,170 --> 01:01:47,630 +minus 0.4 up to 0.4, you have to go back. And use + +743 +01:01:47,630 --> 01:01:51,030 +the table in chapter 6, you will end with this + +744 +01:01:51,030 --> 01:01:54,530 +result. So the only difference here, you have to + +745 +01:01:54,530 --> 01:01:55,790 +use sigma over root N. diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/FS8UHlZfJpc_raw.json b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/FS8UHlZfJpc_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..2360f0f8de06f5f672e49b0c0f350435aeb340c5 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/FS8UHlZfJpc_raw.json @@ -0,0 +1 @@ +{"segments": [{"id": 1, "seek": 3361, "start": 5.6, "end": 33.62, "text": " Last time, we talked about the types of samples and introduces two types of samples. One is called non-probability samples, and the other one is probability samples. And also, we have discussed two types of non-probability, which are judgment and convenience.", "tokens": [5264, 565, 11, 321, 2825, 466, 264, 3467, 295, 10938, 293, 31472, 732, 3467, 295, 10938, 13, 1485, 307, 1219, 2107, 12, 41990, 2310, 10938, 11, 293, 264, 661, 472, 307, 8482, 10938, 13, 400, 611, 11, 321, 362, 7152, 732, 3467, 295, 2107, 12, 41990, 2310, 11, 597, 366, 12216, 293, 19283, 13], "avg_logprob": -0.2869318062608892, "compression_ratio": 1.6774193548387097, "no_speech_prob": 0.0, "words": [{"start": 5.6, "end": 6.22, "word": " Last", "probability": 0.68359375}, {"start": 6.22, "end": 6.44, "word": " time,", "probability": 0.890625}, {"start": 6.54, "end": 6.6, "word": " we", "probability": 0.9609375}, {"start": 6.6, "end": 6.88, "word": " talked", "probability": 0.85595703125}, {"start": 6.88, "end": 7.26, "word": " about", "probability": 0.9111328125}, {"start": 7.26, "end": 7.66, "word": " the", "probability": 0.61865234375}, {"start": 7.66, "end": 8.16, "word": " types", "probability": 0.8359375}, {"start": 8.16, "end": 9.36, "word": " of", "probability": 0.96044921875}, {"start": 9.36, "end": 9.76, "word": " samples", "probability": 0.64013671875}, {"start": 9.76, "end": 10.12, "word": " and", "probability": 0.52294921875}, {"start": 10.12, "end": 11.66, "word": " introduces", "probability": 0.224365234375}, {"start": 11.66, "end": 15.88, "word": " two", "probability": 0.8359375}, {"start": 15.88, "end": 16.3, "word": " types", "probability": 0.82275390625}, {"start": 16.3, "end": 16.52, "word": " of", "probability": 0.9453125}, {"start": 16.52, "end": 16.9, "word": " samples.", "probability": 0.80517578125}, {"start": 17.74, "end": 17.76, "word": " One", "probability": 0.8662109375}, {"start": 17.76, "end": 17.92, "word": " is", "probability": 0.8525390625}, {"start": 17.92, "end": 18.32, "word": " called", "probability": 0.7353515625}, {"start": 18.32, "end": 19.64, "word": " non", "probability": 0.54052734375}, {"start": 19.64, "end": 20.12, "word": "-probability", "probability": 0.7643229166666666}, {"start": 20.12, "end": 21.16, "word": " samples,", "probability": 0.77978515625}, {"start": 22.16, "end": 22.38, "word": " and", "probability": 0.82177734375}, {"start": 22.38, "end": 22.44, "word": " the", "probability": 0.6591796875}, {"start": 22.44, "end": 22.64, "word": " other", "probability": 0.888671875}, {"start": 22.64, "end": 22.86, "word": " one", "probability": 0.78515625}, {"start": 22.86, "end": 22.98, "word": " is", "probability": 0.93701171875}, {"start": 22.98, "end": 23.48, "word": " probability", "probability": 0.794921875}, {"start": 23.48, "end": 23.9, "word": " samples.", "probability": 0.83251953125}, {"start": 25.12, "end": 25.36, "word": " And", "probability": 0.91015625}, {"start": 25.36, "end": 25.58, "word": " also,", "probability": 0.7265625}, {"start": 25.66, "end": 25.76, "word": " we", "probability": 0.9169921875}, {"start": 25.76, "end": 25.98, "word": " have", "probability": 0.90234375}, {"start": 25.98, "end": 28.0, "word": " discussed", "probability": 0.88720703125}, {"start": 28.0, "end": 29.12, "word": " two", "probability": 0.92919921875}, {"start": 29.12, "end": 29.54, "word": " types", "probability": 0.83544921875}, {"start": 29.54, "end": 29.92, "word": " of", "probability": 0.96728515625}, {"start": 29.92, "end": 30.56, "word": " non", "probability": 0.97607421875}, {"start": 30.56, "end": 31.04, "word": "-probability,", "probability": 0.95849609375}, {"start": 31.6, "end": 31.9, "word": " which", "probability": 0.951171875}, {"start": 31.9, "end": 32.16, "word": " are", "probability": 0.93701171875}, {"start": 32.16, "end": 32.52, "word": " judgment", "probability": 0.44677734375}, {"start": 32.52, "end": 33.08, "word": " and", "probability": 0.279541015625}, {"start": 33.08, "end": 33.62, "word": " convenience.", "probability": 0.927734375}], "temperature": 1.0}, {"id": 2, "seek": 6040, "start": 35.1, "end": 60.4, "text": " For the product samples also we produced four types, random sample, systematic, stratified and clustered sampling. That was last Sunday. Let's see the comparison between these sampling data.", "tokens": [1171, 264, 1674, 10938, 611, 321, 7126, 1451, 3467, 11, 4974, 6889, 11, 27249, 11, 23674, 2587, 293, 596, 38624, 21179, 13, 663, 390, 1036, 7776, 13, 961, 311, 536, 264, 9660, 1296, 613, 21179, 1412, 13], "avg_logprob": -0.34457237704804067, "compression_ratio": 1.3941605839416058, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 35.1, "end": 35.98, "word": " For", "probability": 0.452880859375}, {"start": 35.98, "end": 36.24, "word": " the", "probability": 0.82421875}, {"start": 36.24, "end": 36.58, "word": " product", "probability": 0.63525390625}, {"start": 36.58, "end": 37.0, "word": " samples", "probability": 0.8076171875}, {"start": 37.0, "end": 37.36, "word": " also", "probability": 0.448974609375}, {"start": 37.36, "end": 37.76, "word": " we", "probability": 0.66748046875}, {"start": 37.76, "end": 38.8, "word": " produced", "probability": 0.424072265625}, {"start": 38.8, "end": 39.5, "word": " four", "probability": 0.6767578125}, {"start": 39.5, "end": 39.96, "word": " types,", "probability": 0.81396484375}, {"start": 40.74, "end": 41.32, "word": " random", "probability": 0.149658203125}, {"start": 41.32, "end": 41.82, "word": " sample,", "probability": 0.55712890625}, {"start": 42.74, "end": 43.46, "word": " systematic,", "probability": 0.9296875}, {"start": 44.68, "end": 45.92, "word": " stratified", "probability": 0.977294921875}, {"start": 45.92, "end": 46.56, "word": " and", "probability": 0.685546875}, {"start": 46.56, "end": 47.16, "word": " clustered", "probability": 0.641845703125}, {"start": 47.16, "end": 48.1, "word": " sampling.", "probability": 0.4765625}, {"start": 48.74, "end": 49.36, "word": " That", "probability": 0.81201171875}, {"start": 49.36, "end": 49.92, "word": " was", "probability": 0.95166015625}, {"start": 49.92, "end": 51.88, "word": " last", "probability": 0.822265625}, {"start": 51.88, "end": 52.84, "word": " Sunday.", "probability": 0.8818359375}, {"start": 53.9, "end": 54.4, "word": " Let's", "probability": 0.916015625}, {"start": 54.4, "end": 54.64, "word": " see", "probability": 0.92138671875}, {"start": 54.64, "end": 55.48, "word": " the", "probability": 0.908203125}, {"start": 55.48, "end": 56.12, "word": " comparison", "probability": 0.8818359375}, {"start": 56.12, "end": 56.74, "word": " between", "probability": 0.89404296875}, {"start": 56.74, "end": 59.6, "word": " these", "probability": 0.8134765625}, {"start": 59.6, "end": 60.16, "word": " sampling", "probability": 0.82421875}, {"start": 60.16, "end": 60.4, "word": " data.", "probability": 0.364990234375}], "temperature": 1.0}, {"id": 3, "seek": 8849, "start": 62.25, "end": 88.49, "text": " A simple, random sample, systematic random sample, first, for these two techniques. First of all, they are simple to use because we just use the random tables, random number tables, or by using any statistical software. But the disadvantage of this technique", "tokens": [316, 2199, 11, 4974, 6889, 11, 27249, 4974, 6889, 11, 700, 11, 337, 613, 732, 7512, 13, 2386, 295, 439, 11, 436, 366, 2199, 281, 764, 570, 321, 445, 764, 264, 4974, 8020, 11, 4974, 1230, 8020, 11, 420, 538, 1228, 604, 22820, 4722, 13, 583, 264, 24292, 295, 341, 6532], "avg_logprob": -0.33323317422316623, "compression_ratio": 1.628930817610063, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 62.25, "end": 62.55, "word": " A", "probability": 0.127197265625}, {"start": 62.55, "end": 62.97, "word": " simple,", "probability": 0.67919921875}, {"start": 63.17, "end": 63.37, "word": " random", "probability": 0.705078125}, {"start": 63.37, "end": 63.81, "word": " sample,", "probability": 0.7216796875}, {"start": 64.09, "end": 64.67, "word": " systematic", "probability": 0.485107421875}, {"start": 64.67, "end": 65.01, "word": " random", "probability": 0.72802734375}, {"start": 65.01, "end": 65.37, "word": " sample,", "probability": 0.8544921875}, {"start": 65.51, "end": 65.81, "word": " first,", "probability": 0.65380859375}, {"start": 66.15, "end": 66.39, "word": " for", "probability": 0.88720703125}, {"start": 66.39, "end": 66.63, "word": " these", "probability": 0.83837890625}, {"start": 66.63, "end": 67.01, "word": " two", "probability": 0.908203125}, {"start": 67.01, "end": 68.21, "word": " techniques.", "probability": 0.92431640625}, {"start": 69.11, "end": 69.59, "word": " First", "probability": 0.90234375}, {"start": 69.59, "end": 69.77, "word": " of", "probability": 0.947265625}, {"start": 69.77, "end": 69.93, "word": " all,", "probability": 0.94921875}, {"start": 69.97, "end": 70.05, "word": " they", "probability": 0.80517578125}, {"start": 70.05, "end": 70.25, "word": " are", "probability": 0.90478515625}, {"start": 70.25, "end": 70.83, "word": " simple", "probability": 0.95654296875}, {"start": 70.83, "end": 71.07, "word": " to", "probability": 0.97021484375}, {"start": 71.07, "end": 71.37, "word": " use", "probability": 0.876953125}, {"start": 71.37, "end": 72.47, "word": " because", "probability": 0.39794921875}, {"start": 72.47, "end": 72.63, "word": " we", "probability": 0.86767578125}, {"start": 72.63, "end": 72.89, "word": " just", "probability": 0.8876953125}, {"start": 72.89, "end": 73.25, "word": " use", "probability": 0.666015625}, {"start": 73.25, "end": 73.59, "word": " the", "probability": 0.84033203125}, {"start": 73.59, "end": 74.67, "word": " random", "probability": 0.8466796875}, {"start": 74.67, "end": 75.21, "word": " tables,", "probability": 0.79248046875}, {"start": 76.47, "end": 76.87, "word": " random", "probability": 0.74169921875}, {"start": 76.87, "end": 77.19, "word": " number", "probability": 0.81494140625}, {"start": 77.19, "end": 77.75, "word": " tables,", "probability": 0.74169921875}, {"start": 77.91, "end": 78.27, "word": " or", "probability": 0.1959228515625}, {"start": 78.27, "end": 78.43, "word": " by", "probability": 0.9375}, {"start": 78.43, "end": 78.75, "word": " using", "probability": 0.9345703125}, {"start": 78.75, "end": 80.59, "word": " any", "probability": 0.65478515625}, {"start": 80.59, "end": 81.93, "word": " statistical", "probability": 0.873046875}, {"start": 81.93, "end": 82.49, "word": " software.", "probability": 0.908203125}, {"start": 83.63, "end": 84.01, "word": " But", "probability": 0.9453125}, {"start": 84.01, "end": 84.41, "word": " the", "probability": 0.87548828125}, {"start": 84.41, "end": 86.05, "word": " disadvantage", "probability": 0.57568359375}, {"start": 86.05, "end": 87.69, "word": " of", "probability": 0.9609375}, {"start": 87.69, "end": 88.01, "word": " this", "probability": 0.94970703125}, {"start": 88.01, "end": 88.49, "word": " technique", "probability": 0.95166015625}], "temperature": 1.0}, {"id": 4, "seek": 11071, "start": 97.59, "end": 110.71, "text": " So it might be this sample is not representative of the entire population. So this is the mainly disadvantage of this sampling technique. So it can be used", "tokens": [407, 309, 1062, 312, 341, 6889, 307, 406, 12424, 295, 264, 2302, 4415, 13, 407, 341, 307, 264, 8704, 24292, 295, 341, 21179, 6532, 13, 407, 309, 393, 312, 1143], "avg_logprob": -0.32888104838709675, "compression_ratio": 1.3565217391304347, "no_speech_prob": 0.0, "words": [{"start": 97.59, "end": 98.25, "word": " So", "probability": 0.0596923828125}, {"start": 98.25, "end": 98.91, "word": " it", "probability": 0.359130859375}, {"start": 98.91, "end": 99.15, "word": " might", "probability": 0.59130859375}, {"start": 99.15, "end": 99.37, "word": " be", "probability": 0.8984375}, {"start": 99.37, "end": 99.67, "word": " this", "probability": 0.55859375}, {"start": 99.67, "end": 99.91, "word": " sample", "probability": 0.83984375}, {"start": 99.91, "end": 100.09, "word": " is", "probability": 0.90478515625}, {"start": 100.09, "end": 100.25, "word": " not", "probability": 0.9384765625}, {"start": 100.25, "end": 100.83, "word": " representative", "probability": 0.89794921875}, {"start": 100.83, "end": 101.33, "word": " of", "probability": 0.9619140625}, {"start": 101.33, "end": 101.49, "word": " the", "probability": 0.89794921875}, {"start": 101.49, "end": 101.89, "word": " entire", "probability": 0.8974609375}, {"start": 101.89, "end": 102.23, "word": " population.", "probability": 0.830078125}, {"start": 103.23, "end": 103.35, "word": " So", "probability": 0.85302734375}, {"start": 103.35, "end": 103.57, "word": " this", "probability": 0.89013671875}, {"start": 103.57, "end": 103.75, "word": " is", "probability": 0.94091796875}, {"start": 103.75, "end": 103.99, "word": " the", "probability": 0.6142578125}, {"start": 103.99, "end": 104.53, "word": " mainly", "probability": 0.84423828125}, {"start": 104.53, "end": 105.91, "word": " disadvantage", "probability": 0.64892578125}, {"start": 105.91, "end": 107.69, "word": " of", "probability": 0.96240234375}, {"start": 107.69, "end": 108.13, "word": " this", "probability": 0.9326171875}, {"start": 108.13, "end": 108.49, "word": " sampling", "probability": 0.97607421875}, {"start": 108.49, "end": 108.99, "word": " technique.", "probability": 0.94677734375}, {"start": 109.59, "end": 109.93, "word": " So", "probability": 0.90869140625}, {"start": 109.93, "end": 110.05, "word": " it", "probability": 0.90625}, {"start": 110.05, "end": 110.23, "word": " can", "probability": 0.94921875}, {"start": 110.23, "end": 110.39, "word": " be", "probability": 0.9501953125}, {"start": 110.39, "end": 110.71, "word": " used", "probability": 0.919921875}], "temperature": 1.0}, {"id": 5, "seek": 13211, "start": 112.23, "end": 132.11, "text": " unless the population is not symmetric or the population is not heterogeneous. I mean if the population has the same characteristics, then we can use simple or systematic sample. But if there are big differences or big disturbances between", "tokens": [5969, 264, 4415, 307, 406, 32330, 420, 264, 4415, 307, 406, 20789, 31112, 13, 286, 914, 498, 264, 4415, 575, 264, 912, 10891, 11, 550, 321, 393, 764, 2199, 420, 27249, 6889, 13, 583, 498, 456, 366, 955, 7300, 420, 955, 18071, 2676, 1296], "avg_logprob": -0.19583333598242866, "compression_ratio": 1.6326530612244898, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 112.23, "end": 112.89, "word": " unless", "probability": 0.260986328125}, {"start": 112.89, "end": 113.75, "word": " the", "probability": 0.89404296875}, {"start": 113.75, "end": 114.19, "word": " population", "probability": 0.94482421875}, {"start": 114.19, "end": 114.47, "word": " is", "probability": 0.9521484375}, {"start": 114.47, "end": 114.73, "word": " not", "probability": 0.93701171875}, {"start": 114.73, "end": 115.19, "word": " symmetric", "probability": 0.79931640625}, {"start": 115.19, "end": 116.25, "word": " or", "probability": 0.65380859375}, {"start": 116.25, "end": 116.35, "word": " the", "probability": 0.6787109375}, {"start": 116.35, "end": 116.73, "word": " population", "probability": 0.94140625}, {"start": 116.73, "end": 117.03, "word": " is", "probability": 0.95068359375}, {"start": 117.03, "end": 117.47, "word": " not", "probability": 0.95068359375}, {"start": 117.47, "end": 118.67, "word": " heterogeneous.", "probability": 0.8017578125}, {"start": 119.41, "end": 119.57, "word": " I", "probability": 0.89013671875}, {"start": 119.57, "end": 119.71, "word": " mean", "probability": 0.96533203125}, {"start": 119.71, "end": 119.89, "word": " if", "probability": 0.59423828125}, {"start": 119.89, "end": 120.09, "word": " the", "probability": 0.916015625}, {"start": 120.09, "end": 120.53, "word": " population", "probability": 0.92822265625}, {"start": 120.53, "end": 121.69, "word": " has", "probability": 0.94287109375}, {"start": 121.69, "end": 122.05, "word": " the", "probability": 0.91796875}, {"start": 122.05, "end": 122.51, "word": " same", "probability": 0.91162109375}, {"start": 122.51, "end": 123.31, "word": " characteristics,", "probability": 0.89306640625}, {"start": 124.07, "end": 124.33, "word": " then", "probability": 0.869140625}, {"start": 124.33, "end": 124.51, "word": " we", "probability": 0.93017578125}, {"start": 124.51, "end": 124.73, "word": " can", "probability": 0.9501953125}, {"start": 124.73, "end": 125.11, "word": " use", "probability": 0.89111328125}, {"start": 125.11, "end": 126.03, "word": " simple", "probability": 0.89501953125}, {"start": 126.03, "end": 126.55, "word": " or", "probability": 0.96142578125}, {"start": 126.55, "end": 127.31, "word": " systematic", "probability": 0.91650390625}, {"start": 127.31, "end": 127.81, "word": " sample.", "probability": 0.4375}, {"start": 128.25, "end": 128.51, "word": " But", "probability": 0.94384765625}, {"start": 128.51, "end": 128.71, "word": " if", "probability": 0.880859375}, {"start": 128.71, "end": 128.87, "word": " there", "probability": 0.890625}, {"start": 128.87, "end": 129.09, "word": " are", "probability": 0.9462890625}, {"start": 129.09, "end": 129.51, "word": " big", "probability": 0.92822265625}, {"start": 129.51, "end": 130.05, "word": " differences", "probability": 0.78759765625}, {"start": 130.05, "end": 130.37, "word": " or", "probability": 0.93505859375}, {"start": 130.37, "end": 130.65, "word": " big", "probability": 0.9375}, {"start": 130.65, "end": 131.65, "word": " disturbances", "probability": 0.845458984375}, {"start": 131.65, "end": 132.11, "word": " between", "probability": 0.875}], "temperature": 1.0}, {"id": 6, "seek": 15425, "start": 132.99, "end": 154.25, "text": " the items of the population, I mean between or among the individuals. In this case, stratified sampling is better than using a simple random sample. Stratified samples ensure representation of individuals across the entire population. If you remember last time we said", "tokens": [264, 4754, 295, 264, 4415, 11, 286, 914, 1296, 420, 3654, 264, 5346, 13, 682, 341, 1389, 11, 23674, 2587, 21179, 307, 1101, 813, 1228, 257, 2199, 4974, 6889, 13, 745, 4481, 2587, 10938, 5586, 10290, 295, 5346, 2108, 264, 2302, 4415, 13, 759, 291, 1604, 1036, 565, 321, 848], "avg_logprob": -0.2798713305417229, "compression_ratio": 1.6303030303030304, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 132.99, "end": 133.27, "word": " the", "probability": 0.37939453125}, {"start": 133.27, "end": 133.69, "word": " items", "probability": 0.63037109375}, {"start": 133.69, "end": 133.89, "word": " of", "probability": 0.95751953125}, {"start": 133.89, "end": 134.01, "word": " the", "probability": 0.9111328125}, {"start": 134.01, "end": 134.45, "word": " population,", "probability": 0.96728515625}, {"start": 134.65, "end": 134.67, "word": " I", "probability": 0.828125}, {"start": 134.67, "end": 134.77, "word": " mean", "probability": 0.97216796875}, {"start": 134.77, "end": 135.07, "word": " between", "probability": 0.416259765625}, {"start": 135.07, "end": 135.51, "word": " or", "probability": 0.78271484375}, {"start": 135.51, "end": 135.87, "word": " among", "probability": 0.9345703125}, {"start": 135.87, "end": 136.19, "word": " the", "probability": 0.91748046875}, {"start": 136.19, "end": 137.27, "word": " individuals.", "probability": 0.85205078125}, {"start": 138.03, "end": 138.11, "word": " In", "probability": 0.958984375}, {"start": 138.11, "end": 138.37, "word": " this", "probability": 0.94873046875}, {"start": 138.37, "end": 138.73, "word": " case,", "probability": 0.9130859375}, {"start": 139.59, "end": 141.55, "word": " stratified", "probability": 0.7139892578125}, {"start": 141.55, "end": 143.17, "word": " sampling", "probability": 0.3076171875}, {"start": 143.17, "end": 143.37, "word": " is", "probability": 0.94580078125}, {"start": 143.37, "end": 143.67, "word": " better", "probability": 0.90673828125}, {"start": 143.67, "end": 144.27, "word": " than", "probability": 0.95556640625}, {"start": 144.27, "end": 144.71, "word": " using", "probability": 0.9443359375}, {"start": 144.71, "end": 145.67, "word": " a", "probability": 0.311279296875}, {"start": 145.67, "end": 145.91, "word": " simple", "probability": 0.227783203125}, {"start": 145.91, "end": 146.19, "word": " random", "probability": 0.8134765625}, {"start": 146.19, "end": 146.63, "word": " sample.", "probability": 0.77734375}, {"start": 147.97, "end": 148.61, "word": " Stratified", "probability": 0.8971354166666666}, {"start": 148.61, "end": 148.99, "word": " samples", "probability": 0.6015625}, {"start": 148.99, "end": 149.59, "word": " ensure", "probability": 0.87646484375}, {"start": 149.59, "end": 150.17, "word": " representation", "probability": 0.640625}, {"start": 150.17, "end": 150.71, "word": " of", "probability": 0.966796875}, {"start": 150.71, "end": 151.33, "word": " individuals", "probability": 0.8173828125}, {"start": 151.33, "end": 151.85, "word": " across", "probability": 0.84765625}, {"start": 151.85, "end": 152.03, "word": " the", "probability": 0.818359375}, {"start": 152.03, "end": 152.31, "word": " entire", "probability": 0.9248046875}, {"start": 152.31, "end": 152.73, "word": " population.", "probability": 0.9365234375}, {"start": 152.89, "end": 153.01, "word": " If", "probability": 0.89892578125}, {"start": 153.01, "end": 153.07, "word": " you", "probability": 0.96484375}, {"start": 153.07, "end": 153.31, "word": " remember", "probability": 0.876953125}, {"start": 153.31, "end": 153.57, "word": " last", "probability": 0.7099609375}, {"start": 153.57, "end": 153.83, "word": " time", "probability": 0.89697265625}, {"start": 153.83, "end": 153.99, "word": " we", "probability": 0.64892578125}, {"start": 153.99, "end": 154.25, "word": " said", "probability": 0.943359375}], "temperature": 1.0}, {"id": 7, "seek": 17574, "start": 155.6, "end": 175.74, "text": " A IUG population can be splitted according to gender, either males or females, or can be splitted according to students' levels. First level, second level and fourth level, and so on. The last type of sampling was clusters. Cluster sampling is more cost effective.", "tokens": [316, 44218, 38, 4415, 393, 312, 4732, 3944, 4650, 281, 7898, 11, 2139, 20776, 420, 21529, 11, 420, 393, 312, 4732, 3944, 4650, 281, 1731, 6, 4358, 13, 2386, 1496, 11, 1150, 1496, 293, 6409, 1496, 11, 293, 370, 322, 13, 440, 1036, 2010, 295, 21179, 390, 23313, 13, 2033, 8393, 21179, 307, 544, 2063, 4942, 13], "avg_logprob": -0.28286638393484315, "compression_ratio": 1.6158536585365855, "no_speech_prob": 0.0, "words": [{"start": 155.6, "end": 155.94, "word": " A", "probability": 0.2047119140625}, {"start": 155.94, "end": 156.3, "word": " IUG", "probability": 0.6131591796875}, {"start": 156.3, "end": 156.8, "word": " population", "probability": 0.95166015625}, {"start": 156.8, "end": 157.2, "word": " can", "probability": 0.94140625}, {"start": 157.2, "end": 157.42, "word": " be", "probability": 0.9541015625}, {"start": 157.42, "end": 157.96, "word": " splitted", "probability": 0.6878662109375}, {"start": 157.96, "end": 158.96, "word": " according", "probability": 0.91748046875}, {"start": 158.96, "end": 159.22, "word": " to", "probability": 0.96875}, {"start": 159.22, "end": 159.54, "word": " gender,", "probability": 0.90283203125}, {"start": 159.72, "end": 159.92, "word": " either", "probability": 0.92919921875}, {"start": 159.92, "end": 160.34, "word": " males", "probability": 0.9189453125}, {"start": 160.34, "end": 160.6, "word": " or", "probability": 0.958984375}, {"start": 160.6, "end": 161.0, "word": " females,", "probability": 0.9384765625}, {"start": 161.76, "end": 161.92, "word": " or", "probability": 0.95751953125}, {"start": 161.92, "end": 162.12, "word": " can", "probability": 0.880859375}, {"start": 162.12, "end": 162.32, "word": " be", "probability": 0.94677734375}, {"start": 162.32, "end": 163.32, "word": " splitted", "probability": 0.73583984375}, {"start": 163.32, "end": 163.82, "word": " according", "probability": 0.91552734375}, {"start": 163.82, "end": 164.44, "word": " to", "probability": 0.96630859375}, {"start": 164.44, "end": 165.68, "word": " students'", "probability": 0.61572265625}, {"start": 165.86, "end": 166.2, "word": " levels.", "probability": 0.89208984375}, {"start": 167.22, "end": 167.66, "word": " First", "probability": 0.80859375}, {"start": 167.66, "end": 167.94, "word": " level,", "probability": 0.8466796875}, {"start": 168.1, "end": 168.28, "word": " second", "probability": 0.8505859375}, {"start": 168.28, "end": 168.64, "word": " level", "probability": 0.58642578125}, {"start": 168.64, "end": 168.84, "word": " and", "probability": 0.472900390625}, {"start": 168.84, "end": 169.16, "word": " fourth", "probability": 0.8857421875}, {"start": 169.16, "end": 169.48, "word": " level,", "probability": 0.93798828125}, {"start": 169.68, "end": 169.82, "word": " and", "probability": 0.923828125}, {"start": 169.82, "end": 169.98, "word": " so", "probability": 0.95361328125}, {"start": 169.98, "end": 170.14, "word": " on.", "probability": 0.95068359375}, {"start": 170.86, "end": 171.04, "word": " The", "probability": 0.873046875}, {"start": 171.04, "end": 171.24, "word": " last", "probability": 0.87841796875}, {"start": 171.24, "end": 171.5, "word": " type", "probability": 0.97998046875}, {"start": 171.5, "end": 171.7, "word": " of", "probability": 0.9677734375}, {"start": 171.7, "end": 171.96, "word": " sampling", "probability": 0.9580078125}, {"start": 171.96, "end": 172.36, "word": " was", "probability": 0.92822265625}, {"start": 172.36, "end": 172.94, "word": " clusters.", "probability": 0.89697265625}, {"start": 174.0, "end": 174.4, "word": " Cluster", "probability": 0.64599609375}, {"start": 174.4, "end": 174.78, "word": " sampling", "probability": 0.5517578125}, {"start": 174.78, "end": 174.96, "word": " is", "probability": 0.52001953125}, {"start": 174.96, "end": 175.14, "word": " more", "probability": 0.9150390625}, {"start": 175.14, "end": 175.34, "word": " cost", "probability": 0.87841796875}, {"start": 175.34, "end": 175.74, "word": " effective.", "probability": 0.5888671875}], "temperature": 1.0}, {"id": 8, "seek": 19754, "start": 177.04, "end": 197.54, "text": " Because in this case, you have to split the population into many clusters, then you can choose a random of these clusters. Also, it's less efficient unless you use a large sample. For this reason, it's more cost effective than using the other sampling techniques.", "tokens": [1436, 294, 341, 1389, 11, 291, 362, 281, 7472, 264, 4415, 666, 867, 23313, 11, 550, 291, 393, 2826, 257, 4974, 295, 613, 23313, 13, 2743, 11, 309, 311, 1570, 7148, 5969, 291, 764, 257, 2416, 6889, 13, 1171, 341, 1778, 11, 309, 311, 544, 2063, 4942, 813, 1228, 264, 661, 21179, 7512, 13], "avg_logprob": -0.15497158494862642, "compression_ratio": 1.543859649122807, "no_speech_prob": 0.0, "words": [{"start": 177.04, "end": 177.7, "word": " Because", "probability": 0.60791015625}, {"start": 177.7, "end": 177.84, "word": " in", "probability": 0.78466796875}, {"start": 177.84, "end": 178.02, "word": " this", "probability": 0.9404296875}, {"start": 178.02, "end": 178.28, "word": " case,", "probability": 0.927734375}, {"start": 178.54, "end": 178.62, "word": " you", "probability": 0.93359375}, {"start": 178.62, "end": 178.8, "word": " have", "probability": 0.923828125}, {"start": 178.8, "end": 179.12, "word": " to", "probability": 0.966796875}, {"start": 179.12, "end": 179.94, "word": " split", "probability": 0.95068359375}, {"start": 179.94, "end": 180.28, "word": " the", "probability": 0.84521484375}, {"start": 180.28, "end": 180.6, "word": " population", "probability": 0.9658203125}, {"start": 180.6, "end": 180.92, "word": " into", "probability": 0.7978515625}, {"start": 180.92, "end": 181.28, "word": " many", "probability": 0.89306640625}, {"start": 181.28, "end": 182.5, "word": " clusters,", "probability": 0.90576171875}, {"start": 182.62, "end": 182.78, "word": " then", "probability": 0.79638671875}, {"start": 182.78, "end": 182.92, "word": " you", "probability": 0.9423828125}, {"start": 182.92, "end": 183.14, "word": " can", "probability": 0.90869140625}, {"start": 183.14, "end": 183.76, "word": " choose", "probability": 0.888671875}, {"start": 183.76, "end": 184.44, "word": " a", "probability": 0.953125}, {"start": 184.44, "end": 184.74, "word": " random", "probability": 0.8603515625}, {"start": 184.74, "end": 185.7, "word": " of", "probability": 0.94091796875}, {"start": 185.7, "end": 186.12, "word": " these", "probability": 0.8134765625}, {"start": 186.12, "end": 186.86, "word": " clusters.", "probability": 0.921875}, {"start": 187.36, "end": 187.8, "word": " Also,", "probability": 0.87841796875}, {"start": 187.96, "end": 188.06, "word": " it's", "probability": 0.8583984375}, {"start": 188.06, "end": 188.32, "word": " less", "probability": 0.95166015625}, {"start": 188.32, "end": 188.82, "word": " efficient", "probability": 0.880859375}, {"start": 188.82, "end": 189.4, "word": " unless", "probability": 0.77197265625}, {"start": 189.4, "end": 190.18, "word": " you", "probability": 0.95068359375}, {"start": 190.18, "end": 190.52, "word": " use", "probability": 0.880859375}, {"start": 190.52, "end": 190.8, "word": " a", "probability": 0.98974609375}, {"start": 190.8, "end": 191.16, "word": " large", "probability": 0.9521484375}, {"start": 191.16, "end": 191.58, "word": " sample.", "probability": 0.91064453125}, {"start": 192.26, "end": 192.5, "word": " For", "probability": 0.9521484375}, {"start": 192.5, "end": 192.72, "word": " this", "probability": 0.9365234375}, {"start": 192.72, "end": 192.98, "word": " reason,", "probability": 0.97021484375}, {"start": 193.06, "end": 193.28, "word": " it's", "probability": 0.96044921875}, {"start": 193.28, "end": 193.52, "word": " more", "probability": 0.92529296875}, {"start": 193.52, "end": 193.84, "word": " cost", "probability": 0.92431640625}, {"start": 193.84, "end": 194.46, "word": " effective", "probability": 0.493408203125}, {"start": 194.46, "end": 195.38, "word": " than", "probability": 0.94384765625}, {"start": 195.38, "end": 195.86, "word": " using", "probability": 0.9365234375}, {"start": 195.86, "end": 196.46, "word": " the", "probability": 0.7802734375}, {"start": 196.46, "end": 196.7, "word": " other", "probability": 0.88134765625}, {"start": 196.7, "end": 197.04, "word": " sampling", "probability": 0.5625}, {"start": 197.04, "end": 197.54, "word": " techniques.", "probability": 0.8916015625}], "temperature": 1.0}, {"id": 9, "seek": 22024, "start": 198.48, "end": 220.24, "text": " So, these techniques are used based on the study you have. Sometimes simple random sampling is fine and you can go ahead and use it. Most of the time, stratified random sampling is much better. So, it depends on the population you have underlying your study. That was what we talked about last Sunday.", "tokens": [407, 11, 613, 7512, 366, 1143, 2361, 322, 264, 2979, 291, 362, 13, 4803, 2199, 4974, 21179, 307, 2489, 293, 291, 393, 352, 2286, 293, 764, 309, 13, 4534, 295, 264, 565, 11, 23674, 2587, 4974, 21179, 307, 709, 1101, 13, 407, 11, 309, 5946, 322, 264, 4415, 291, 362, 14217, 428, 2979, 13, 663, 390, 437, 321, 2825, 466, 1036, 7776, 13], "avg_logprob": -0.16772461496293545, "compression_ratio": 1.5894736842105264, "no_speech_prob": 0.0, "words": [{"start": 198.48000000000002, "end": 199.08, "word": " So,", "probability": 0.5361328125}, {"start": 199.9, "end": 200.26, "word": " these", "probability": 0.76123046875}, {"start": 200.26, "end": 200.64, "word": " techniques", "probability": 0.91455078125}, {"start": 200.64, "end": 201.32, "word": " are", "probability": 0.93017578125}, {"start": 201.32, "end": 201.6, "word": " used", "probability": 0.90283203125}, {"start": 201.6, "end": 201.92, "word": " based", "probability": 0.89990234375}, {"start": 201.92, "end": 202.18, "word": " on", "probability": 0.94970703125}, {"start": 202.18, "end": 202.4, "word": " the", "probability": 0.8896484375}, {"start": 202.4, "end": 202.74, "word": " study", "probability": 0.89111328125}, {"start": 202.74, "end": 202.92, "word": " you", "probability": 0.92626953125}, {"start": 202.92, "end": 203.14, "word": " have.", "probability": 0.9345703125}, {"start": 203.26, "end": 203.7, "word": " Sometimes", "probability": 0.85302734375}, {"start": 203.7, "end": 204.08, "word": " simple", "probability": 0.4091796875}, {"start": 204.08, "end": 204.26, "word": " random", "probability": 0.69580078125}, {"start": 204.26, "end": 204.5, "word": " sampling", "probability": 0.86279296875}, {"start": 204.5, "end": 204.72, "word": " is", "probability": 0.94677734375}, {"start": 204.72, "end": 205.02, "word": " fine", "probability": 0.93017578125}, {"start": 205.02, "end": 205.18, "word": " and", "probability": 0.71484375}, {"start": 205.18, "end": 205.28, "word": " you", "probability": 0.9287109375}, {"start": 205.28, "end": 205.58, "word": " can", "probability": 0.94384765625}, {"start": 205.58, "end": 206.1, "word": " go", "probability": 0.92626953125}, {"start": 206.1, "end": 206.22, "word": " ahead", "probability": 0.88525390625}, {"start": 206.22, "end": 206.36, "word": " and", "probability": 0.93603515625}, {"start": 206.36, "end": 206.58, "word": " use", "probability": 0.87939453125}, {"start": 206.58, "end": 206.74, "word": " it.", "probability": 0.95068359375}, {"start": 207.48, "end": 207.74, "word": " Most", "probability": 0.87548828125}, {"start": 207.74, "end": 207.86, "word": " of", "probability": 0.97021484375}, {"start": 207.86, "end": 207.96, "word": " the", "probability": 0.92041015625}, {"start": 207.96, "end": 208.32, "word": " time,", "probability": 0.87109375}, {"start": 208.8, "end": 209.36, "word": " stratified", "probability": 0.951904296875}, {"start": 209.36, "end": 209.64, "word": " random", "probability": 0.8994140625}, {"start": 209.64, "end": 210.0, "word": " sampling", "probability": 0.97265625}, {"start": 210.0, "end": 210.2, "word": " is", "probability": 0.94775390625}, {"start": 210.2, "end": 210.42, "word": " much", "probability": 0.89794921875}, {"start": 210.42, "end": 210.72, "word": " better.", "probability": 0.89892578125}, {"start": 211.86, "end": 212.18, "word": " So,", "probability": 0.9296875}, {"start": 212.4, "end": 212.62, "word": " it", "probability": 0.94384765625}, {"start": 212.62, "end": 212.96, "word": " depends", "probability": 0.892578125}, {"start": 212.96, "end": 213.34, "word": " on", "probability": 0.94970703125}, {"start": 213.34, "end": 213.62, "word": " the", "probability": 0.9287109375}, {"start": 213.62, "end": 214.5, "word": " population", "probability": 0.89306640625}, {"start": 214.5, "end": 214.9, "word": " you", "probability": 0.93310546875}, {"start": 214.9, "end": 215.2, "word": " have", "probability": 0.94873046875}, {"start": 215.2, "end": 215.76, "word": " underlying", "probability": 0.9296875}, {"start": 215.76, "end": 216.18, "word": " your", "probability": 0.86962890625}, {"start": 216.18, "end": 216.94, "word": " study.", "probability": 0.919921875}, {"start": 217.68, "end": 217.94, "word": " That", "probability": 0.88720703125}, {"start": 217.94, "end": 218.24, "word": " was", "probability": 0.91650390625}, {"start": 218.24, "end": 218.46, "word": " what", "probability": 0.9404296875}, {"start": 218.46, "end": 218.6, "word": " we", "probability": 0.95703125}, {"start": 218.6, "end": 218.86, "word": " talked", "probability": 0.8583984375}, {"start": 218.86, "end": 219.26, "word": " about", "probability": 0.90771484375}, {"start": 219.26, "end": 219.74, "word": " last", "probability": 0.85888671875}, {"start": 219.74, "end": 220.24, "word": " Sunday.", "probability": 0.8876953125}], "temperature": 1.0}, {"id": 10, "seek": 24766, "start": 223.86, "end": 247.66, "text": " Now, suppose we design a questionnaire or survey. You have to know, number one, what's the purpose of the survey. In this case, you can determine the frame of the population. Next, survey", "tokens": [823, 11, 7297, 321, 1715, 257, 44702, 420, 8984, 13, 509, 362, 281, 458, 11, 1230, 472, 11, 437, 311, 264, 4334, 295, 264, 8984, 13, 682, 341, 1389, 11, 291, 393, 6997, 264, 3920, 295, 264, 4415, 13, 3087, 11, 8984], "avg_logprob": -0.23074127213899479, "compression_ratio": 1.4029850746268657, "no_speech_prob": 0.0, "words": [{"start": 223.86, "end": 224.2, "word": " Now,", "probability": 0.8857421875}, {"start": 224.58, "end": 225.06, "word": " suppose", "probability": 0.85986328125}, {"start": 225.06, "end": 225.3, "word": " we", "probability": 0.943359375}, {"start": 225.3, "end": 225.62, "word": " design", "probability": 0.66650390625}, {"start": 225.62, "end": 225.78, "word": " a", "probability": 0.93896484375}, {"start": 225.78, "end": 226.2, "word": " questionnaire", "probability": 0.9677734375}, {"start": 226.2, "end": 227.34, "word": " or", "probability": 0.72119140625}, {"start": 227.34, "end": 227.78, "word": " survey.", "probability": 0.85888671875}, {"start": 228.64, "end": 228.84, "word": " You", "probability": 0.53125}, {"start": 228.84, "end": 229.04, "word": " have", "probability": 0.9482421875}, {"start": 229.04, "end": 229.16, "word": " to", "probability": 0.97412109375}, {"start": 229.16, "end": 229.46, "word": " know,", "probability": 0.88818359375}, {"start": 230.38, "end": 230.78, "word": " number", "probability": 0.90185546875}, {"start": 230.78, "end": 231.06, "word": " one,", "probability": 0.8203125}, {"start": 231.68, "end": 232.14, "word": " what's", "probability": 0.894775390625}, {"start": 232.14, "end": 232.42, "word": " the", "probability": 0.927734375}, {"start": 232.42, "end": 232.98, "word": " purpose", "probability": 0.85205078125}, {"start": 232.98, "end": 233.36, "word": " of", "probability": 0.96630859375}, {"start": 233.36, "end": 233.5, "word": " the", "probability": 0.8349609375}, {"start": 233.5, "end": 233.78, "word": " survey.", "probability": 0.89892578125}, {"start": 235.74, "end": 236.38, "word": " In", "probability": 0.93359375}, {"start": 236.38, "end": 236.6, "word": " this", "probability": 0.94580078125}, {"start": 236.6, "end": 236.86, "word": " case,", "probability": 0.921875}, {"start": 236.9, "end": 237.02, "word": " you", "probability": 0.96044921875}, {"start": 237.02, "end": 237.22, "word": " can", "probability": 0.9404296875}, {"start": 237.22, "end": 237.8, "word": " determine", "probability": 0.9267578125}, {"start": 237.8, "end": 239.6, "word": " the", "probability": 0.89990234375}, {"start": 239.6, "end": 240.02, "word": " frame", "probability": 0.873046875}, {"start": 240.02, "end": 240.22, "word": " of", "probability": 0.962890625}, {"start": 240.22, "end": 240.34, "word": " the", "probability": 0.88134765625}, {"start": 240.34, "end": 240.7, "word": " population.", "probability": 0.275146484375}, {"start": 241.8, "end": 242.04, "word": " Next,", "probability": 0.90478515625}, {"start": 245.48, "end": 247.66, "word": " survey", "probability": 0.81591796875}], "temperature": 1.0}, {"id": 11, "seek": 27911, "start": 253.01, "end": 279.11, "text": " Is the survey based on a probability sample? If the answer is yes, then go ahead and use one of the non-probability sampling techniques either similar than some certified cluster or systematic. Next, we have to distinguish between four types of errors, at least now. One is called coverage error.", "tokens": [1119, 264, 8984, 2361, 322, 257, 8482, 6889, 30, 759, 264, 1867, 307, 2086, 11, 550, 352, 2286, 293, 764, 472, 295, 264, 2107, 12, 41990, 2310, 21179, 7512, 2139, 2531, 813, 512, 18580, 13630, 420, 27249, 13, 3087, 11, 321, 362, 281, 20206, 1296, 1451, 3467, 295, 13603, 11, 412, 1935, 586, 13, 1485, 307, 1219, 9645, 6713, 13], "avg_logprob": -0.23283810889134643, "compression_ratio": 1.5, "no_speech_prob": 0.0, "words": [{"start": 253.01, "end": 253.69, "word": " Is", "probability": 0.2626953125}, {"start": 253.69, "end": 254.37, "word": " the", "probability": 0.8291015625}, {"start": 254.37, "end": 254.75, "word": " survey", "probability": 0.83447265625}, {"start": 254.75, "end": 255.21, "word": " based", "probability": 0.89453125}, {"start": 255.21, "end": 255.53, "word": " on", "probability": 0.9541015625}, {"start": 255.53, "end": 255.69, "word": " a", "probability": 0.77001953125}, {"start": 255.69, "end": 256.01, "word": " probability", "probability": 0.8671875}, {"start": 256.01, "end": 256.53, "word": " sample?", "probability": 0.6748046875}, {"start": 257.67, "end": 258.35, "word": " If", "probability": 0.83984375}, {"start": 258.35, "end": 258.63, "word": " the", "probability": 0.85595703125}, {"start": 258.63, "end": 258.91, "word": " answer", "probability": 0.96728515625}, {"start": 258.91, "end": 259.09, "word": " is", "probability": 0.95166015625}, {"start": 259.09, "end": 259.35, "word": " yes,", "probability": 0.8369140625}, {"start": 259.43, "end": 259.57, "word": " then", "probability": 0.84228515625}, {"start": 259.57, "end": 259.75, "word": " go", "probability": 0.8818359375}, {"start": 259.75, "end": 259.91, "word": " ahead", "probability": 0.87255859375}, {"start": 259.91, "end": 260.07, "word": " and", "probability": 0.93212890625}, {"start": 260.07, "end": 260.31, "word": " use", "probability": 0.86767578125}, {"start": 260.31, "end": 260.63, "word": " one", "probability": 0.9052734375}, {"start": 260.63, "end": 260.83, "word": " of", "probability": 0.966796875}, {"start": 260.83, "end": 260.95, "word": " the", "probability": 0.89794921875}, {"start": 260.95, "end": 261.15, "word": " non", "probability": 0.90576171875}, {"start": 261.15, "end": 261.63, "word": "-probability", "probability": 0.8230794270833334}, {"start": 261.63, "end": 262.05, "word": " sampling", "probability": 0.76904296875}, {"start": 262.05, "end": 262.57, "word": " techniques", "probability": 0.92919921875}, {"start": 262.57, "end": 262.91, "word": " either", "probability": 0.445068359375}, {"start": 262.91, "end": 264.03, "word": " similar", "probability": 0.447998046875}, {"start": 264.03, "end": 264.29, "word": " than", "probability": 0.2451171875}, {"start": 264.29, "end": 264.57, "word": " some", "probability": 0.417724609375}, {"start": 264.57, "end": 265.13, "word": " certified", "probability": 0.87939453125}, {"start": 265.13, "end": 265.79, "word": " cluster", "probability": 0.8310546875}, {"start": 265.79, "end": 266.07, "word": " or", "probability": 0.8974609375}, {"start": 266.07, "end": 266.61, "word": " systematic.", "probability": 0.88330078125}, {"start": 268.83, "end": 269.51, "word": " Next,", "probability": 0.8505859375}, {"start": 270.37, "end": 270.91, "word": " we", "probability": 0.8798828125}, {"start": 270.91, "end": 271.13, "word": " have", "probability": 0.9326171875}, {"start": 271.13, "end": 271.25, "word": " to", "probability": 0.96533203125}, {"start": 271.25, "end": 271.73, "word": " distinguish", "probability": 0.89453125}, {"start": 271.73, "end": 272.23, "word": " between", "probability": 0.8818359375}, {"start": 272.23, "end": 272.65, "word": " four", "probability": 0.75634765625}, {"start": 272.65, "end": 273.15, "word": " types", "probability": 0.802734375}, {"start": 273.15, "end": 273.33, "word": " of", "probability": 0.97021484375}, {"start": 273.33, "end": 273.71, "word": " errors,", "probability": 0.84423828125}, {"start": 274.37, "end": 274.57, "word": " at", "probability": 0.95751953125}, {"start": 274.57, "end": 274.77, "word": " least", "probability": 0.94677734375}, {"start": 274.77, "end": 275.09, "word": " now.", "probability": 0.9306640625}, {"start": 277.07, "end": 277.75, "word": " One", "probability": 0.876953125}, {"start": 277.75, "end": 277.91, "word": " is", "probability": 0.9482421875}, {"start": 277.91, "end": 278.27, "word": " called", "probability": 0.8828125}, {"start": 278.27, "end": 278.77, "word": " coverage", "probability": 0.56591796875}, {"start": 278.77, "end": 279.11, "word": " error.", "probability": 0.87060546875}], "temperature": 1.0}, {"id": 12, "seek": 30760, "start": 280.9, "end": 307.6, "text": " You have to ask yourself, is the frame appropriate? I mean, frame appropriate means that you have all the individual list, then you can choose one of these. For example, suppose we divide Gaza Strip into four governorates. North Gaza, Gaza Middle Area, Khanon and Rafah. So we have five.", "tokens": [509, 362, 281, 1029, 1803, 11, 307, 264, 3920, 6854, 30, 286, 914, 11, 3920, 6854, 1355, 300, 291, 362, 439, 264, 2609, 1329, 11, 550, 291, 393, 2826, 472, 295, 613, 13, 1171, 1365, 11, 7297, 321, 9845, 37800, 745, 8400, 666, 1451, 12965, 1024, 13, 4067, 37800, 11, 37800, 10775, 19405, 11, 18136, 266, 293, 29611, 545, 13, 407, 321, 362, 1732, 13], "avg_logprob": -0.25852271553241846, "compression_ratio": 1.5157894736842106, "no_speech_prob": 0.0, "words": [{"start": 280.9, "end": 281.12, "word": " You", "probability": 0.51220703125}, {"start": 281.12, "end": 281.28, "word": " have", "probability": 0.93798828125}, {"start": 281.28, "end": 281.38, "word": " to", "probability": 0.97412109375}, {"start": 281.38, "end": 281.62, "word": " ask", "probability": 0.93505859375}, {"start": 281.62, "end": 282.18, "word": " yourself,", "probability": 0.83349609375}, {"start": 282.76, "end": 284.0, "word": " is", "probability": 0.71630859375}, {"start": 284.0, "end": 284.22, "word": " the", "probability": 0.86962890625}, {"start": 284.22, "end": 284.56, "word": " frame", "probability": 0.89111328125}, {"start": 284.56, "end": 285.08, "word": " appropriate?", "probability": 0.7470703125}, {"start": 286.88, "end": 287.04, "word": " I", "probability": 0.92138671875}, {"start": 287.04, "end": 287.26, "word": " mean,", "probability": 0.962890625}, {"start": 287.34, "end": 287.64, "word": " frame", "probability": 0.4111328125}, {"start": 287.64, "end": 288.04, "word": " appropriate", "probability": 0.70361328125}, {"start": 288.04, "end": 288.48, "word": " means", "probability": 0.72607421875}, {"start": 288.48, "end": 288.88, "word": " that", "probability": 0.9091796875}, {"start": 288.88, "end": 289.74, "word": " you", "probability": 0.88134765625}, {"start": 289.74, "end": 290.02, "word": " have", "probability": 0.94677734375}, {"start": 290.02, "end": 290.34, "word": " all", "probability": 0.93505859375}, {"start": 290.34, "end": 290.46, "word": " the", "probability": 0.89453125}, {"start": 290.46, "end": 290.86, "word": " individual", "probability": 0.8486328125}, {"start": 290.86, "end": 291.48, "word": " list,", "probability": 0.36474609375}, {"start": 292.02, "end": 292.22, "word": " then", "probability": 0.8203125}, {"start": 292.22, "end": 292.36, "word": " you", "probability": 0.94482421875}, {"start": 292.36, "end": 292.54, "word": " can", "probability": 0.93798828125}, {"start": 292.54, "end": 292.94, "word": " choose", "probability": 0.90966796875}, {"start": 292.94, "end": 293.18, "word": " one", "probability": 0.92431640625}, {"start": 293.18, "end": 293.34, "word": " of", "probability": 0.9638671875}, {"start": 293.34, "end": 293.62, "word": " these.", "probability": 0.82666015625}, {"start": 294.76, "end": 295.0, "word": " For", "probability": 0.95263671875}, {"start": 295.0, "end": 295.3, "word": " example,", "probability": 0.97265625}, {"start": 295.42, "end": 295.72, "word": " suppose", "probability": 0.8564453125}, {"start": 295.72, "end": 296.04, "word": " we", "probability": 0.9296875}, {"start": 296.04, "end": 297.82, "word": " divide", "probability": 0.92041015625}, {"start": 297.82, "end": 298.12, "word": " Gaza", "probability": 0.78466796875}, {"start": 298.12, "end": 298.4, "word": " Strip", "probability": 0.857177734375}, {"start": 298.4, "end": 298.68, "word": " into", "probability": 0.85107421875}, {"start": 298.68, "end": 299.1, "word": " four", "probability": 0.81298828125}, {"start": 299.1, "end": 300.12, "word": " governorates.", "probability": 0.87744140625}, {"start": 301.16, "end": 301.5, "word": " North", "probability": 0.90283203125}, {"start": 301.5, "end": 302.06, "word": " Gaza,", "probability": 0.810546875}, {"start": 302.42, "end": 302.9, "word": " Gaza", "probability": 0.75537109375}, {"start": 302.9, "end": 303.26, "word": " Middle", "probability": 0.79638671875}, {"start": 303.26, "end": 304.06, "word": " Area,", "probability": 0.60009765625}, {"start": 305.52, "end": 305.94, "word": " Khanon", "probability": 0.4794921875}, {"start": 305.94, "end": 306.18, "word": " and", "probability": 0.5771484375}, {"start": 306.18, "end": 306.48, "word": " Rafah.", "probability": 0.7939453125}, {"start": 306.54, "end": 306.68, "word": " So", "probability": 0.94921875}, {"start": 306.68, "end": 306.8, "word": " we", "probability": 0.619140625}, {"start": 306.8, "end": 306.98, "word": " have", "probability": 0.9501953125}, {"start": 306.98, "end": 307.6, "word": " five.", "probability": 0.87841796875}], "temperature": 1.0}, {"id": 13, "seek": 32799, "start": 309.79, "end": 327.99, "text": " sections of five governments. In this case if you, so that's your frame. Now if you exclude one, for example, and that one is important for you, but you exclude it for some reasons, in this case you will have coverage as well, because you excluded.", "tokens": [10863, 295, 1732, 11280, 13, 682, 341, 1389, 498, 291, 11, 370, 300, 311, 428, 3920, 13, 823, 498, 291, 33536, 472, 11, 337, 1365, 11, 293, 300, 472, 307, 1021, 337, 291, 11, 457, 291, 33536, 309, 337, 512, 4112, 11, 294, 341, 1389, 291, 486, 362, 9645, 382, 731, 11, 570, 291, 29486, 13], "avg_logprob": -0.27206689851325855, "compression_ratio": 1.585987261146497, "no_speech_prob": 3.5762786865234375e-07, "words": [{"start": 309.79, "end": 310.31, "word": " sections", "probability": 0.1644287109375}, {"start": 310.31, "end": 310.55, "word": " of", "probability": 0.6298828125}, {"start": 310.55, "end": 310.83, "word": " five", "probability": 0.69384765625}, {"start": 310.83, "end": 311.31, "word": " governments.", "probability": 0.83056640625}, {"start": 312.51, "end": 312.69, "word": " In", "probability": 0.775390625}, {"start": 312.69, "end": 312.95, "word": " this", "probability": 0.947265625}, {"start": 312.95, "end": 313.23, "word": " case", "probability": 0.9169921875}, {"start": 313.23, "end": 313.45, "word": " if", "probability": 0.54150390625}, {"start": 313.45, "end": 313.67, "word": " you,", "probability": 0.95703125}, {"start": 314.11, "end": 314.39, "word": " so", "probability": 0.603515625}, {"start": 314.39, "end": 314.95, "word": " that's", "probability": 0.90576171875}, {"start": 314.95, "end": 315.29, "word": " your", "probability": 0.90185546875}, {"start": 315.29, "end": 316.39, "word": " frame.", "probability": 0.91748046875}, {"start": 316.91, "end": 317.11, "word": " Now", "probability": 0.9091796875}, {"start": 317.11, "end": 317.25, "word": " if", "probability": 0.8212890625}, {"start": 317.25, "end": 317.43, "word": " you", "probability": 0.96435546875}, {"start": 317.43, "end": 317.65, "word": " exclude", "probability": 0.853515625}, {"start": 317.65, "end": 318.05, "word": " one,", "probability": 0.9326171875}, {"start": 318.27, "end": 318.43, "word": " for", "probability": 0.94287109375}, {"start": 318.43, "end": 318.79, "word": " example,", "probability": 0.97216796875}, {"start": 319.09, "end": 319.23, "word": " and", "probability": 0.92529296875}, {"start": 319.23, "end": 319.41, "word": " that", "probability": 0.93994140625}, {"start": 319.41, "end": 319.63, "word": " one", "probability": 0.92431640625}, {"start": 319.63, "end": 319.89, "word": " is", "probability": 0.94677734375}, {"start": 319.89, "end": 320.45, "word": " important", "probability": 0.87939453125}, {"start": 320.45, "end": 320.75, "word": " for", "probability": 0.92578125}, {"start": 320.75, "end": 320.97, "word": " you,", "probability": 0.96337890625}, {"start": 321.15, "end": 321.29, "word": " but", "probability": 0.8974609375}, {"start": 321.29, "end": 321.45, "word": " you", "probability": 0.57861328125}, {"start": 321.45, "end": 321.73, "word": " exclude", "probability": 0.85693359375}, {"start": 321.73, "end": 321.87, "word": " it", "probability": 0.49609375}, {"start": 321.87, "end": 321.99, "word": " for", "probability": 0.94775390625}, {"start": 321.99, "end": 322.35, "word": " some", "probability": 0.9033203125}, {"start": 322.35, "end": 323.27, "word": " reasons,", "probability": 0.81787109375}, {"start": 323.71, "end": 323.93, "word": " in", "probability": 0.9345703125}, {"start": 323.93, "end": 324.17, "word": " this", "probability": 0.94970703125}, {"start": 324.17, "end": 324.53, "word": " case", "probability": 0.916015625}, {"start": 324.53, "end": 324.77, "word": " you", "probability": 0.7470703125}, {"start": 324.77, "end": 324.91, "word": " will", "probability": 0.85791015625}, {"start": 324.91, "end": 325.19, "word": " have", "probability": 0.94384765625}, {"start": 325.19, "end": 326.01, "word": " coverage", "probability": 0.88623046875}, {"start": 326.01, "end": 326.27, "word": " as", "probability": 0.444091796875}, {"start": 326.27, "end": 326.49, "word": " well,", "probability": 0.951171875}, {"start": 326.97, "end": 327.35, "word": " because", "probability": 0.89599609375}, {"start": 327.35, "end": 327.55, "word": " you", "probability": 0.96435546875}, {"start": 327.55, "end": 327.99, "word": " excluded.", "probability": 0.50830078125}], "temperature": 1.0}, {"id": 14, "seek": 35784, "start": 330.34, "end": 357.84, "text": " one group out of five and that group may be important for your study. Next is called non-response error. Suppose I attributed my questionnaire for 100 students and I gave each one 30 minutes to answer the questionnaire or to fill up the questionnaire, but I didn't follow up.", "tokens": [472, 1594, 484, 295, 1732, 293, 300, 1594, 815, 312, 1021, 337, 428, 2979, 13, 3087, 307, 1219, 2107, 12, 5667, 3739, 6713, 13, 21360, 286, 30976, 452, 44702, 337, 2319, 1731, 293, 286, 2729, 1184, 472, 2217, 2077, 281, 1867, 264, 44702, 420, 281, 2836, 493, 264, 44702, 11, 457, 286, 994, 380, 1524, 493, 13], "avg_logprob": -0.20177802366429362, "compression_ratio": 1.5164835164835164, "no_speech_prob": 0.0, "words": [{"start": 330.34, "end": 330.7, "word": " one", "probability": 0.436767578125}, {"start": 330.7, "end": 331.1, "word": " group", "probability": 0.9541015625}, {"start": 331.1, "end": 332.06, "word": " out", "probability": 0.572265625}, {"start": 332.06, "end": 332.28, "word": " of", "probability": 0.97314453125}, {"start": 332.28, "end": 332.6, "word": " five", "probability": 0.6865234375}, {"start": 332.6, "end": 333.06, "word": " and", "probability": 0.61181640625}, {"start": 333.06, "end": 333.24, "word": " that", "probability": 0.9111328125}, {"start": 333.24, "end": 333.48, "word": " group", "probability": 0.95751953125}, {"start": 333.48, "end": 333.68, "word": " may", "probability": 0.638671875}, {"start": 333.68, "end": 333.94, "word": " be", "probability": 0.95361328125}, {"start": 333.94, "end": 335.22, "word": " important", "probability": 0.88818359375}, {"start": 335.22, "end": 335.64, "word": " for", "probability": 0.92041015625}, {"start": 335.64, "end": 335.84, "word": " your", "probability": 0.8837890625}, {"start": 335.84, "end": 336.14, "word": " study.", "probability": 0.80908203125}, {"start": 336.84, "end": 337.18, "word": " Next", "probability": 0.873046875}, {"start": 337.18, "end": 337.42, "word": " is", "probability": 0.8662109375}, {"start": 337.42, "end": 337.76, "word": " called", "probability": 0.8076171875}, {"start": 337.76, "end": 338.28, "word": " non", "probability": 0.484130859375}, {"start": 338.28, "end": 338.98, "word": "-response", "probability": 0.8634440104166666}, {"start": 338.98, "end": 339.34, "word": " error.", "probability": 0.84423828125}, {"start": 340.88, "end": 341.3, "word": " Suppose", "probability": 0.7470703125}, {"start": 341.3, "end": 341.74, "word": " I", "probability": 0.9306640625}, {"start": 341.74, "end": 342.88, "word": " attributed", "probability": 0.439208984375}, {"start": 342.88, "end": 343.26, "word": " my", "probability": 0.96533203125}, {"start": 343.26, "end": 343.72, "word": " questionnaire", "probability": 0.9482421875}, {"start": 343.72, "end": 344.44, "word": " for", "probability": 0.91064453125}, {"start": 344.44, "end": 345.62, "word": " 100", "probability": 0.69482421875}, {"start": 345.62, "end": 346.34, "word": " students", "probability": 0.9736328125}, {"start": 346.34, "end": 347.88, "word": " and", "probability": 0.68798828125}, {"start": 347.88, "end": 348.04, "word": " I", "probability": 0.986328125}, {"start": 348.04, "end": 348.28, "word": " gave", "probability": 0.7822265625}, {"start": 348.28, "end": 348.96, "word": " each", "probability": 0.94970703125}, {"start": 348.96, "end": 349.28, "word": " one", "probability": 0.943359375}, {"start": 349.28, "end": 351.92, "word": " 30", "probability": 0.904296875}, {"start": 351.92, "end": 352.22, "word": " minutes", "probability": 0.91796875}, {"start": 352.22, "end": 352.4, "word": " to", "probability": 0.96923828125}, {"start": 352.4, "end": 352.66, "word": " answer", "probability": 0.9521484375}, {"start": 352.66, "end": 352.84, "word": " the", "probability": 0.9208984375}, {"start": 352.84, "end": 353.22, "word": " questionnaire", "probability": 0.95556640625}, {"start": 353.22, "end": 354.02, "word": " or", "probability": 0.78564453125}, {"start": 354.02, "end": 354.2, "word": " to", "probability": 0.9580078125}, {"start": 354.2, "end": 354.42, "word": " fill", "probability": 0.89453125}, {"start": 354.42, "end": 354.9, "word": " up", "probability": 0.95703125}, {"start": 354.9, "end": 355.16, "word": " the", "probability": 0.92529296875}, {"start": 355.16, "end": 355.62, "word": " questionnaire,", "probability": 0.97021484375}, {"start": 356.08, "end": 356.38, "word": " but", "probability": 0.92724609375}, {"start": 356.38, "end": 357.02, "word": " I", "probability": 0.97607421875}, {"start": 357.02, "end": 357.34, "word": " didn't", "probability": 0.951904296875}, {"start": 357.34, "end": 357.58, "word": " follow", "probability": 0.91015625}, {"start": 357.58, "end": 357.84, "word": " up.", "probability": 0.935546875}], "temperature": 1.0}, {"id": 15, "seek": 38573, "start": 358.79, "end": 385.73, "text": " the response in this case it might be you will get something error and that error refers to non-responsive so you have to follow up follow up it means maybe sometimes you need to clarify the question you have in your questionnaire so that the respondent understand what do you mean exactly by that question otherwise if you don't follow up it means it may be", "tokens": [264, 4134, 294, 341, 1389, 309, 1062, 312, 291, 486, 483, 746, 6713, 293, 300, 6713, 14942, 281, 2107, 12, 28930, 488, 370, 291, 362, 281, 1524, 493, 1524, 493, 309, 1355, 1310, 2171, 291, 643, 281, 17594, 264, 1168, 291, 362, 294, 428, 44702, 370, 300, 264, 4196, 317, 1223, 437, 360, 291, 914, 2293, 538, 300, 1168, 5911, 498, 291, 500, 380, 1524, 493, 309, 1355, 309, 815, 312], "avg_logprob": -0.1735026114102867, "compression_ratio": 1.8697916666666667, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 358.79, "end": 359.09, "word": " the", "probability": 0.42822265625}, {"start": 359.09, "end": 359.75, "word": " response", "probability": 0.9228515625}, {"start": 359.75, "end": 360.47, "word": " in", "probability": 0.2447509765625}, {"start": 360.47, "end": 360.69, "word": " this", "probability": 0.94775390625}, {"start": 360.69, "end": 360.97, "word": " case", "probability": 0.923828125}, {"start": 360.97, "end": 361.23, "word": " it", "probability": 0.8603515625}, {"start": 361.23, "end": 361.47, "word": " might", "probability": 0.896484375}, {"start": 361.47, "end": 361.73, "word": " be", "probability": 0.94677734375}, {"start": 361.73, "end": 362.05, "word": " you", "probability": 0.91943359375}, {"start": 362.05, "end": 362.19, "word": " will", "probability": 0.818359375}, {"start": 362.19, "end": 362.43, "word": " get", "probability": 0.94091796875}, {"start": 362.43, "end": 364.25, "word": " something", "probability": 0.81396484375}, {"start": 364.25, "end": 364.97, "word": " error", "probability": 0.61083984375}, {"start": 364.97, "end": 365.67, "word": " and", "probability": 0.8876953125}, {"start": 365.67, "end": 365.89, "word": " that", "probability": 0.935546875}, {"start": 365.89, "end": 366.17, "word": " error", "probability": 0.8544921875}, {"start": 366.17, "end": 366.57, "word": " refers", "probability": 0.84912109375}, {"start": 366.57, "end": 366.79, "word": " to", "probability": 0.96826171875}, {"start": 366.79, "end": 367.05, "word": " non", "probability": 0.5234375}, {"start": 367.05, "end": 367.71, "word": "-responsive", "probability": 0.6884765625}, {"start": 367.71, "end": 368.63, "word": " so", "probability": 0.50537109375}, {"start": 368.63, "end": 368.75, "word": " you", "probability": 0.91943359375}, {"start": 368.75, "end": 368.89, "word": " have", "probability": 0.94775390625}, {"start": 368.89, "end": 369.01, "word": " to", "probability": 0.970703125}, {"start": 369.01, "end": 369.17, "word": " follow", "probability": 0.919921875}, {"start": 369.17, "end": 369.33, "word": " up", "probability": 0.84765625}, {"start": 369.33, "end": 369.47, "word": " follow", "probability": 0.432861328125}, {"start": 369.47, "end": 369.63, "word": " up", "probability": 0.84765625}, {"start": 369.63, "end": 369.77, "word": " it", "probability": 0.81396484375}, {"start": 369.77, "end": 370.01, "word": " means", "probability": 0.93212890625}, {"start": 370.01, "end": 371.49, "word": " maybe", "probability": 0.8095703125}, {"start": 371.49, "end": 371.97, "word": " sometimes", "probability": 0.95068359375}, {"start": 371.97, "end": 372.19, "word": " you", "probability": 0.96728515625}, {"start": 372.19, "end": 372.39, "word": " need", "probability": 0.92578125}, {"start": 372.39, "end": 372.59, "word": " to", "probability": 0.97119140625}, {"start": 372.59, "end": 373.17, "word": " clarify", "probability": 0.96435546875}, {"start": 373.17, "end": 373.77, "word": " the", "probability": 0.921875}, {"start": 373.77, "end": 374.09, "word": " question", "probability": 0.91259765625}, {"start": 374.09, "end": 374.27, "word": " you", "probability": 0.96337890625}, {"start": 374.27, "end": 374.45, "word": " have", "probability": 0.95263671875}, {"start": 374.45, "end": 374.55, "word": " in", "probability": 0.93896484375}, {"start": 374.55, "end": 374.67, "word": " your", "probability": 0.896484375}, {"start": 374.67, "end": 375.15, "word": " questionnaire", "probability": 0.95458984375}, {"start": 375.15, "end": 376.23, "word": " so", "probability": 0.921875}, {"start": 376.23, "end": 376.73, "word": " that", "probability": 0.93701171875}, {"start": 376.73, "end": 377.29, "word": " the", "probability": 0.92236328125}, {"start": 377.29, "end": 378.33, "word": " respondent", "probability": 0.89453125}, {"start": 378.33, "end": 379.51, "word": " understand", "probability": 0.6376953125}, {"start": 379.51, "end": 380.17, "word": " what", "probability": 0.9501953125}, {"start": 380.17, "end": 380.29, "word": " do", "probability": 0.60986328125}, {"start": 380.29, "end": 380.31, "word": " you", "probability": 0.94287109375}, {"start": 380.31, "end": 380.43, "word": " mean", "probability": 0.96630859375}, {"start": 380.43, "end": 380.87, "word": " exactly", "probability": 0.876953125}, {"start": 380.87, "end": 381.15, "word": " by", "probability": 0.97119140625}, {"start": 381.15, "end": 381.41, "word": " that", "probability": 0.93408203125}, {"start": 381.41, "end": 381.85, "word": " question", "probability": 0.91552734375}, {"start": 381.85, "end": 382.89, "word": " otherwise", "probability": 0.8359375}, {"start": 382.89, "end": 383.15, "word": " if", "probability": 0.94384765625}, {"start": 383.15, "end": 383.23, "word": " you", "probability": 0.9638671875}, {"start": 383.23, "end": 383.43, "word": " don't", "probability": 0.97900390625}, {"start": 383.43, "end": 383.67, "word": " follow", "probability": 0.919921875}, {"start": 383.67, "end": 383.89, "word": " up", "probability": 0.9560546875}, {"start": 383.89, "end": 384.03, "word": " it", "probability": 0.9384765625}, {"start": 384.03, "end": 384.43, "word": " means", "probability": 0.94677734375}, {"start": 384.43, "end": 384.95, "word": " it", "probability": 0.87353515625}, {"start": 384.95, "end": 385.55, "word": " may", "probability": 0.716796875}, {"start": 385.55, "end": 385.73, "word": " be", "probability": 0.95703125}], "temperature": 1.0}, {"id": 16, "seek": 41321, "start": 388.09, "end": 413.21, "text": " there is an error, and that error is called non-response. The other type of error is called measurement error, which is one of the most important errors, and we have to avoid. It's called measurement error. Good questions elicit good responses. It means suppose, for example, my question is,", "tokens": [456, 307, 364, 6713, 11, 293, 300, 6713, 307, 1219, 2107, 12, 5667, 3739, 13, 440, 661, 2010, 295, 6713, 307, 1219, 13160, 6713, 11, 597, 307, 472, 295, 264, 881, 1021, 13603, 11, 293, 321, 362, 281, 5042, 13, 467, 311, 1219, 13160, 6713, 13, 2205, 1651, 806, 8876, 665, 13019, 13, 467, 1355, 7297, 11, 337, 1365, 11, 452, 1168, 307, 11], "avg_logprob": -0.2308894230769231, "compression_ratio": 1.7380952380952381, "no_speech_prob": 0.0, "words": [{"start": 388.09, "end": 388.47, "word": " there", "probability": 0.3994140625}, {"start": 388.47, "end": 388.79, "word": " is", "probability": 0.92236328125}, {"start": 388.79, "end": 389.03, "word": " an", "probability": 0.90625}, {"start": 389.03, "end": 389.25, "word": " error,", "probability": 0.87109375}, {"start": 389.39, "end": 389.41, "word": " and", "probability": 0.85693359375}, {"start": 389.41, "end": 389.59, "word": " that", "probability": 0.90673828125}, {"start": 389.59, "end": 389.75, "word": " error", "probability": 0.8466796875}, {"start": 389.75, "end": 389.91, "word": " is", "probability": 0.9140625}, {"start": 389.91, "end": 390.25, "word": " called", "probability": 0.85400390625}, {"start": 390.25, "end": 390.55, "word": " non", "probability": 0.70361328125}, {"start": 390.55, "end": 391.07, "word": "-response.", "probability": 0.73486328125}, {"start": 391.85, "end": 392.49, "word": " The", "probability": 0.875}, {"start": 392.49, "end": 392.75, "word": " other", "probability": 0.89013671875}, {"start": 392.75, "end": 393.07, "word": " type", "probability": 0.95849609375}, {"start": 393.07, "end": 393.23, "word": " of", "probability": 0.96484375}, {"start": 393.23, "end": 393.51, "word": " error", "probability": 0.88330078125}, {"start": 393.51, "end": 394.95, "word": " is", "probability": 0.75341796875}, {"start": 394.95, "end": 395.17, "word": " called", "probability": 0.82275390625}, {"start": 395.17, "end": 395.59, "word": " measurement", "probability": 0.767578125}, {"start": 395.59, "end": 395.99, "word": " error,", "probability": 0.79150390625}, {"start": 396.17, "end": 396.21, "word": " which", "probability": 0.64794921875}, {"start": 396.21, "end": 396.31, "word": " is", "probability": 0.9404296875}, {"start": 396.31, "end": 396.51, "word": " one", "probability": 0.92822265625}, {"start": 396.51, "end": 396.65, "word": " of", "probability": 0.9658203125}, {"start": 396.65, "end": 396.81, "word": " the", "probability": 0.9208984375}, {"start": 396.81, "end": 397.15, "word": " most", "probability": 0.89306640625}, {"start": 397.15, "end": 397.87, "word": " important", "probability": 0.88232421875}, {"start": 397.87, "end": 398.33, "word": " errors,", "probability": 0.86328125}, {"start": 398.83, "end": 398.91, "word": " and", "probability": 0.919921875}, {"start": 398.91, "end": 399.03, "word": " we", "probability": 0.87060546875}, {"start": 399.03, "end": 399.19, "word": " have", "probability": 0.94873046875}, {"start": 399.19, "end": 399.33, "word": " to", "probability": 0.97265625}, {"start": 399.33, "end": 399.91, "word": " avoid.", "probability": 0.9140625}, {"start": 402.95, "end": 403.29, "word": " It's", "probability": 0.888671875}, {"start": 403.29, "end": 403.49, "word": " called", "probability": 0.86572265625}, {"start": 403.49, "end": 403.87, "word": " measurement", "probability": 0.84423828125}, {"start": 403.87, "end": 404.23, "word": " error.", "probability": 0.90185546875}, {"start": 405.07, "end": 405.23, "word": " Good", "probability": 0.262451171875}, {"start": 405.23, "end": 405.83, "word": " questions", "probability": 0.94873046875}, {"start": 405.83, "end": 406.73, "word": " elicit", "probability": 0.935791015625}, {"start": 406.73, "end": 408.05, "word": " good", "probability": 0.9150390625}, {"start": 408.05, "end": 408.63, "word": " responses.", "probability": 0.93310546875}, {"start": 408.95, "end": 409.07, "word": " It", "probability": 0.89599609375}, {"start": 409.07, "end": 409.37, "word": " means", "probability": 0.92919921875}, {"start": 409.37, "end": 410.51, "word": " suppose,", "probability": 0.31884765625}, {"start": 410.65, "end": 410.71, "word": " for", "probability": 0.95263671875}, {"start": 410.71, "end": 411.11, "word": " example,", "probability": 0.9755859375}, {"start": 412.21, "end": 412.51, "word": " my", "probability": 0.96630859375}, {"start": 412.51, "end": 412.83, "word": " question", "probability": 0.97021484375}, {"start": 412.83, "end": 413.21, "word": " is,", "probability": 0.95068359375}], "temperature": 1.0}, {"id": 17, "seek": 43824, "start": 414.72, "end": 438.24, "text": " I feel this candidate is good for us. What do you think? It's my question. I feel this candidate, candidate A, whatever he is, is good for us. What do you think? For sure there's abundant answer will be yes. I agree with you.", "tokens": [286, 841, 341, 11532, 307, 665, 337, 505, 13, 708, 360, 291, 519, 30, 467, 311, 452, 1168, 13, 286, 841, 341, 11532, 11, 11532, 316, 11, 2035, 415, 307, 11, 307, 665, 337, 505, 13, 708, 360, 291, 519, 30, 1171, 988, 456, 311, 30657, 1867, 486, 312, 2086, 13, 286, 3986, 365, 291, 13], "avg_logprob": -0.17571271511546352, "compression_ratio": 1.6376811594202898, "no_speech_prob": 0.0, "words": [{"start": 414.72, "end": 415.0, "word": " I", "probability": 0.67041015625}, {"start": 415.0, "end": 415.44, "word": " feel", "probability": 0.91552734375}, {"start": 415.44, "end": 416.88, "word": " this", "probability": 0.8779296875}, {"start": 416.88, "end": 417.34, "word": " candidate", "probability": 0.74462890625}, {"start": 417.34, "end": 417.7, "word": " is", "probability": 0.93994140625}, {"start": 417.7, "end": 417.9, "word": " good", "probability": 0.9267578125}, {"start": 417.9, "end": 418.1, "word": " for", "probability": 0.9560546875}, {"start": 418.1, "end": 418.44, "word": " us.", "probability": 0.93505859375}, {"start": 418.8, "end": 418.92, "word": " What", "probability": 0.8193359375}, {"start": 418.92, "end": 419.04, "word": " do", "probability": 0.9521484375}, {"start": 419.04, "end": 419.1, "word": " you", "probability": 0.9609375}, {"start": 419.1, "end": 419.4, "word": " think?", "probability": 0.9150390625}, {"start": 421.0, "end": 421.2, "word": " It's", "probability": 0.598388671875}, {"start": 421.2, "end": 421.4, "word": " my", "probability": 0.97021484375}, {"start": 421.4, "end": 421.74, "word": " question.", "probability": 0.91552734375}, {"start": 422.82, "end": 422.96, "word": " I", "probability": 0.95703125}, {"start": 422.96, "end": 423.4, "word": " feel", "probability": 0.96533203125}, {"start": 423.4, "end": 424.46, "word": " this", "probability": 0.9306640625}, {"start": 424.46, "end": 424.96, "word": " candidate,", "probability": 0.79345703125}, {"start": 425.68, "end": 426.54, "word": " candidate", "probability": 0.61181640625}, {"start": 426.54, "end": 426.92, "word": " A,", "probability": 0.73681640625}, {"start": 427.02, "end": 427.22, "word": " whatever", "probability": 0.9111328125}, {"start": 427.22, "end": 428.0, "word": " he", "probability": 0.9443359375}, {"start": 428.0, "end": 428.34, "word": " is,", "probability": 0.94384765625}, {"start": 429.6, "end": 430.46, "word": " is", "probability": 0.923828125}, {"start": 430.46, "end": 430.74, "word": " good", "probability": 0.9296875}, {"start": 430.74, "end": 430.96, "word": " for", "probability": 0.951171875}, {"start": 430.96, "end": 431.3, "word": " us.", "probability": 0.93359375}, {"start": 431.48, "end": 431.6, "word": " What", "probability": 0.8515625}, {"start": 431.6, "end": 431.72, "word": " do", "probability": 0.9560546875}, {"start": 431.72, "end": 431.78, "word": " you", "probability": 0.96240234375}, {"start": 431.78, "end": 432.08, "word": " think?", "probability": 0.91259765625}, {"start": 434.04, "end": 434.68, "word": " For", "probability": 0.93115234375}, {"start": 434.68, "end": 435.0, "word": " sure", "probability": 0.927734375}, {"start": 435.0, "end": 435.52, "word": " there's", "probability": 0.582763671875}, {"start": 435.52, "end": 435.88, "word": " abundant", "probability": 0.6279296875}, {"start": 435.88, "end": 436.32, "word": " answer", "probability": 0.93603515625}, {"start": 436.32, "end": 436.48, "word": " will", "probability": 0.7158203125}, {"start": 436.48, "end": 436.6, "word": " be", "probability": 0.95556640625}, {"start": 436.6, "end": 436.92, "word": " yes.", "probability": 0.7216796875}, {"start": 437.42, "end": 437.66, "word": " I", "probability": 0.9921875}, {"start": 437.66, "end": 437.88, "word": " agree", "probability": 0.9140625}, {"start": 437.88, "end": 438.08, "word": " with", "probability": 0.90185546875}, {"start": 438.08, "end": 438.24, "word": " you.", "probability": 0.9619140625}], "temperature": 1.0}, {"id": 18, "seek": 46806, "start": 439.1, "end": 468.06, "text": " So that means you design the question in the way that you will know they respond directly that he will answer yes or no depends on your design of the question. So it means leading question. So measurement error. So but if we have good questions, just ask any question for the respondent and let him or let his answer based on", "tokens": [407, 300, 1355, 291, 1715, 264, 1168, 294, 264, 636, 300, 291, 486, 458, 436, 4196, 3838, 300, 415, 486, 1867, 2086, 420, 572, 5946, 322, 428, 1715, 295, 264, 1168, 13, 407, 309, 1355, 5775, 1168, 13, 407, 13160, 6713, 13, 407, 457, 498, 321, 362, 665, 1651, 11, 445, 1029, 604, 1168, 337, 264, 4196, 317, 293, 718, 796, 420, 718, 702, 1867, 2361, 322], "avg_logprob": -0.19485294008079698, "compression_ratio": 1.7912087912087913, "no_speech_prob": 0.0, "words": [{"start": 439.1, "end": 439.58, "word": " So", "probability": 0.67724609375}, {"start": 439.58, "end": 439.86, "word": " that", "probability": 0.76708984375}, {"start": 439.86, "end": 440.14, "word": " means", "probability": 0.93017578125}, {"start": 440.14, "end": 440.42, "word": " you", "probability": 0.91064453125}, {"start": 440.42, "end": 441.34, "word": " design", "probability": 0.83251953125}, {"start": 441.34, "end": 441.52, "word": " the", "probability": 0.81298828125}, {"start": 441.52, "end": 441.94, "word": " question", "probability": 0.90673828125}, {"start": 441.94, "end": 442.54, "word": " in", "probability": 0.91748046875}, {"start": 442.54, "end": 442.66, "word": " the", "probability": 0.7705078125}, {"start": 442.66, "end": 442.86, "word": " way", "probability": 0.95458984375}, {"start": 442.86, "end": 443.2, "word": " that", "probability": 0.923828125}, {"start": 443.2, "end": 443.92, "word": " you", "probability": 0.91845703125}, {"start": 443.92, "end": 444.16, "word": " will", "probability": 0.85546875}, {"start": 444.16, "end": 444.48, "word": " know", "probability": 0.8876953125}, {"start": 444.48, "end": 444.78, "word": " they", "probability": 0.344482421875}, {"start": 444.78, "end": 445.98, "word": " respond", "probability": 0.775390625}, {"start": 445.98, "end": 447.8, "word": " directly", "probability": 0.681640625}, {"start": 447.8, "end": 448.1, "word": " that", "probability": 0.76220703125}, {"start": 448.1, "end": 448.26, "word": " he", "probability": 0.74951171875}, {"start": 448.26, "end": 448.52, "word": " will", "probability": 0.86962890625}, {"start": 448.52, "end": 449.18, "word": " answer", "probability": 0.9609375}, {"start": 449.18, "end": 449.46, "word": " yes", "probability": 0.82421875}, {"start": 449.46, "end": 449.72, "word": " or", "probability": 0.94091796875}, {"start": 449.72, "end": 449.96, "word": " no", "probability": 0.93408203125}, {"start": 449.96, "end": 450.32, "word": " depends", "probability": 0.52783203125}, {"start": 450.32, "end": 450.74, "word": " on", "probability": 0.9423828125}, {"start": 450.74, "end": 451.08, "word": " your", "probability": 0.8759765625}, {"start": 451.08, "end": 451.84, "word": " design", "probability": 0.91748046875}, {"start": 451.84, "end": 451.98, "word": " of", "probability": 0.9404296875}, {"start": 451.98, "end": 452.06, "word": " the", "probability": 0.88818359375}, {"start": 452.06, "end": 452.38, "word": " question.", "probability": 0.9033203125}, {"start": 454.42, "end": 454.52, "word": " So", "probability": 0.83544921875}, {"start": 454.52, "end": 454.64, "word": " it", "probability": 0.87451171875}, {"start": 454.64, "end": 454.82, "word": " means", "probability": 0.9091796875}, {"start": 454.82, "end": 455.08, "word": " leading", "probability": 0.93310546875}, {"start": 455.08, "end": 455.52, "word": " question.", "probability": 0.88427734375}, {"start": 456.38, "end": 456.58, "word": " So", "probability": 0.85986328125}, {"start": 456.58, "end": 457.22, "word": " measurement", "probability": 0.72607421875}, {"start": 457.22, "end": 457.66, "word": " error.", "probability": 0.8798828125}, {"start": 459.04, "end": 459.28, "word": " So", "probability": 0.66162109375}, {"start": 459.28, "end": 459.5, "word": " but", "probability": 0.57080078125}, {"start": 459.5, "end": 459.74, "word": " if", "probability": 0.9521484375}, {"start": 459.74, "end": 459.86, "word": " we", "probability": 0.93408203125}, {"start": 459.86, "end": 460.0, "word": " have", "probability": 0.9404296875}, {"start": 460.0, "end": 460.16, "word": " good", "probability": 0.79150390625}, {"start": 460.16, "end": 460.58, "word": " questions,", "probability": 0.783203125}, {"start": 460.7, "end": 461.06, "word": " just", "probability": 0.8330078125}, {"start": 461.06, "end": 461.48, "word": " ask", "probability": 0.91064453125}, {"start": 461.48, "end": 462.48, "word": " any", "probability": 0.88232421875}, {"start": 462.48, "end": 462.82, "word": " question", "probability": 0.8994140625}, {"start": 462.82, "end": 463.08, "word": " for", "probability": 0.92919921875}, {"start": 463.08, "end": 463.26, "word": " the", "probability": 0.91650390625}, {"start": 463.26, "end": 463.86, "word": " respondent", "probability": 0.873046875}, {"start": 463.86, "end": 464.16, "word": " and", "probability": 0.88818359375}, {"start": 464.16, "end": 464.74, "word": " let", "probability": 0.95751953125}, {"start": 464.74, "end": 465.08, "word": " him", "probability": 0.923828125}, {"start": 465.08, "end": 466.16, "word": " or", "probability": 0.76904296875}, {"start": 466.16, "end": 466.36, "word": " let", "probability": 0.9404296875}, {"start": 466.36, "end": 466.58, "word": " his", "probability": 0.9375}, {"start": 466.58, "end": 467.02, "word": " answer", "probability": 0.96728515625}, {"start": 467.02, "end": 467.68, "word": " based", "probability": 0.8955078125}, {"start": 467.68, "end": 468.06, "word": " on", "probability": 0.94921875}], "temperature": 1.0}, {"id": 19, "seek": 49693, "start": 468.89, "end": 496.93, "text": " what exactly he thinks about it. So don't force the respondent to answer the question in the direction you want to be. Otherwise you will get something called Measurement Error. Do you think? Give me an example of Measurement Error. Give me an example of Measurement Error. Just ask a question in a way that the respondent will answer", "tokens": [437, 2293, 415, 7309, 466, 309, 13, 407, 500, 380, 3464, 264, 4196, 317, 281, 1867, 264, 1168, 294, 264, 3513, 291, 528, 281, 312, 13, 10328, 291, 486, 483, 746, 1219, 41436, 518, 3300, 2874, 13, 1144, 291, 519, 30, 5303, 385, 364, 1365, 295, 41436, 518, 3300, 2874, 13, 5303, 385, 364, 1365, 295, 41436, 518, 3300, 2874, 13, 1449, 1029, 257, 1168, 294, 257, 636, 300, 264, 4196, 317, 486, 1867], "avg_logprob": -0.16135416467984517, "compression_ratio": 1.8010752688172043, "no_speech_prob": 0.0, "words": [{"start": 468.89, "end": 469.19, "word": " what", "probability": 0.56494140625}, {"start": 469.19, "end": 469.91, "word": " exactly", "probability": 0.837890625}, {"start": 469.91, "end": 470.09, "word": " he", "probability": 0.86328125}, {"start": 470.09, "end": 470.41, "word": " thinks", "probability": 0.541015625}, {"start": 470.41, "end": 470.71, "word": " about", "probability": 0.90380859375}, {"start": 470.71, "end": 471.03, "word": " it.", "probability": 0.91064453125}, {"start": 471.87, "end": 472.01, "word": " So", "probability": 0.8818359375}, {"start": 472.01, "end": 472.27, "word": " don't", "probability": 0.811767578125}, {"start": 472.27, "end": 472.85, "word": " force", "probability": 0.9677734375}, {"start": 472.85, "end": 473.47, "word": " the", "probability": 0.90771484375}, {"start": 473.47, "end": 474.01, "word": " respondent", "probability": 0.888427734375}, {"start": 474.01, "end": 474.23, "word": " to", "probability": 0.96533203125}, {"start": 474.23, "end": 474.65, "word": " answer", "probability": 0.95947265625}, {"start": 474.65, "end": 474.93, "word": " the", "probability": 0.89599609375}, {"start": 474.93, "end": 475.67, "word": " question", "probability": 0.9150390625}, {"start": 475.67, "end": 476.33, "word": " in", "probability": 0.9267578125}, {"start": 476.33, "end": 476.49, "word": " the", "probability": 0.92138671875}, {"start": 476.49, "end": 476.85, "word": " direction", "probability": 0.96826171875}, {"start": 476.85, "end": 477.07, "word": " you", "probability": 0.734375}, {"start": 477.07, "end": 477.33, "word": " want", "probability": 0.89111328125}, {"start": 477.33, "end": 477.45, "word": " to", "probability": 0.78759765625}, {"start": 477.45, "end": 477.65, "word": " be.", "probability": 0.92919921875}, {"start": 478.23, "end": 478.69, "word": " Otherwise", "probability": 0.89501953125}, {"start": 478.69, "end": 478.97, "word": " you", "probability": 0.572265625}, {"start": 478.97, "end": 479.09, "word": " will", "probability": 0.85888671875}, {"start": 479.09, "end": 479.25, "word": " get", "probability": 0.935546875}, {"start": 479.25, "end": 479.59, "word": " something", "probability": 0.86865234375}, {"start": 479.59, "end": 480.19, "word": " called", "probability": 0.87353515625}, {"start": 480.19, "end": 481.97, "word": " Measurement", "probability": 0.7021484375}, {"start": 481.97, "end": 482.21, "word": " Error.", "probability": 0.94677734375}, {"start": 483.13, "end": 483.31, "word": " Do", "probability": 0.80419921875}, {"start": 483.31, "end": 483.39, "word": " you", "probability": 0.96923828125}, {"start": 483.39, "end": 483.57, "word": " think?", "probability": 0.90283203125}, {"start": 484.91, "end": 485.47, "word": " Give", "probability": 0.83984375}, {"start": 485.47, "end": 485.57, "word": " me", "probability": 0.96044921875}, {"start": 485.57, "end": 485.71, "word": " an", "probability": 0.94970703125}, {"start": 485.71, "end": 485.99, "word": " example", "probability": 0.974609375}, {"start": 485.99, "end": 486.17, "word": " of", "probability": 0.9541015625}, {"start": 486.17, "end": 486.53, "word": " Measurement", "probability": 0.972900390625}, {"start": 486.53, "end": 486.77, "word": " Error.", "probability": 0.970458984375}, {"start": 489.77, "end": 490.07, "word": " Give", "probability": 0.533203125}, {"start": 490.07, "end": 490.21, "word": " me", "probability": 0.9638671875}, {"start": 490.21, "end": 490.37, "word": " an", "probability": 0.955078125}, {"start": 490.37, "end": 490.73, "word": " example", "probability": 0.97412109375}, {"start": 490.73, "end": 491.07, "word": " of", "probability": 0.9501953125}, {"start": 491.07, "end": 491.47, "word": " Measurement", "probability": 0.98828125}, {"start": 491.47, "end": 491.67, "word": " Error.", "probability": 0.976806640625}, {"start": 491.91, "end": 492.17, "word": " Just", "probability": 0.84326171875}, {"start": 492.17, "end": 492.45, "word": " ask", "probability": 0.9052734375}, {"start": 492.45, "end": 492.61, "word": " a", "probability": 0.556640625}, {"start": 492.61, "end": 493.29, "word": " question", "probability": 0.91943359375}, {"start": 493.29, "end": 494.79, "word": " in", "probability": 0.85302734375}, {"start": 494.79, "end": 494.91, "word": " a", "probability": 0.5}, {"start": 494.91, "end": 495.07, "word": " way", "probability": 0.9521484375}, {"start": 495.07, "end": 495.37, "word": " that", "probability": 0.92919921875}, {"start": 495.37, "end": 495.61, "word": " the", "probability": 0.89697265625}, {"start": 495.61, "end": 496.15, "word": " respondent", "probability": 0.935546875}, {"start": 496.15, "end": 496.37, "word": " will", "probability": 0.861328125}, {"start": 496.37, "end": 496.93, "word": " answer", "probability": 0.96484375}], "temperature": 1.0}, {"id": 20, "seek": 52163, "start": 498.23, "end": 521.63, "text": " I mean his answer will be the same as you think about it. Maybe I like coffee, do you like coffee or tea? So maybe he will go with your answer, in this case it's measurement. Another example.", "tokens": [286, 914, 702, 1867, 486, 312, 264, 912, 382, 291, 519, 466, 309, 13, 2704, 286, 411, 4982, 11, 360, 291, 411, 4982, 420, 5817, 30, 407, 1310, 415, 486, 352, 365, 428, 1867, 11, 294, 341, 1389, 309, 311, 13160, 13, 3996, 1365, 13], "avg_logprob": -0.21569292830384296, "compression_ratio": 1.3714285714285714, "no_speech_prob": 0.0, "words": [{"start": 498.23, "end": 498.57, "word": " I", "probability": 0.67822265625}, {"start": 498.57, "end": 498.91, "word": " mean", "probability": 0.9658203125}, {"start": 498.91, "end": 499.37, "word": " his", "probability": 0.6083984375}, {"start": 499.37, "end": 499.71, "word": " answer", "probability": 0.953125}, {"start": 499.71, "end": 499.89, "word": " will", "probability": 0.759765625}, {"start": 499.89, "end": 500.01, "word": " be", "probability": 0.95068359375}, {"start": 500.01, "end": 500.19, "word": " the", "probability": 0.876953125}, {"start": 500.19, "end": 500.39, "word": " same", "probability": 0.90478515625}, {"start": 500.39, "end": 500.61, "word": " as", "probability": 0.96044921875}, {"start": 500.61, "end": 500.75, "word": " you", "probability": 0.93798828125}, {"start": 500.75, "end": 501.03, "word": " think", "probability": 0.90625}, {"start": 501.03, "end": 501.27, "word": " about", "probability": 0.904296875}, {"start": 501.27, "end": 504.77, "word": " it.", "probability": 0.85107421875}, {"start": 510.13, "end": 510.49, "word": " Maybe", "probability": 0.37060546875}, {"start": 510.49, "end": 510.91, "word": " I", "probability": 0.39453125}, {"start": 510.91, "end": 511.17, "word": " like", "probability": 0.93212890625}, {"start": 511.17, "end": 511.49, "word": " coffee,", "probability": 0.89208984375}, {"start": 511.93, "end": 512.13, "word": " do", "probability": 0.87890625}, {"start": 512.13, "end": 512.19, "word": " you", "probability": 0.9619140625}, {"start": 512.19, "end": 512.31, "word": " like", "probability": 0.93798828125}, {"start": 512.31, "end": 512.57, "word": " coffee", "probability": 0.86376953125}, {"start": 512.57, "end": 512.79, "word": " or", "probability": 0.95654296875}, {"start": 512.79, "end": 513.03, "word": " tea?", "probability": 0.94775390625}, {"start": 514.67, "end": 515.13, "word": " So", "probability": 0.7958984375}, {"start": 515.13, "end": 515.43, "word": " maybe", "probability": 0.822265625}, {"start": 515.43, "end": 515.73, "word": " he", "probability": 0.93310546875}, {"start": 515.73, "end": 515.87, "word": " will", "probability": 0.8876953125}, {"start": 515.87, "end": 516.05, "word": " go", "probability": 0.96728515625}, {"start": 516.05, "end": 516.23, "word": " with", "probability": 0.8896484375}, {"start": 516.23, "end": 516.41, "word": " your", "probability": 0.88134765625}, {"start": 516.41, "end": 516.81, "word": " answer,", "probability": 0.96044921875}, {"start": 516.89, "end": 516.97, "word": " in", "probability": 0.91748046875}, {"start": 516.97, "end": 517.17, "word": " this", "probability": 0.94091796875}, {"start": 517.17, "end": 517.39, "word": " case", "probability": 0.92236328125}, {"start": 517.39, "end": 519.55, "word": " it's", "probability": 0.855224609375}, {"start": 519.55, "end": 520.51, "word": " measurement.", "probability": 0.34228515625}, {"start": 520.87, "end": 521.23, "word": " Another", "probability": 0.841796875}, {"start": 521.23, "end": 521.63, "word": " example.", "probability": 0.97021484375}], "temperature": 1.0}, {"id": 21, "seek": 55594, "start": 540.26, "end": 555.94, "text": " Exactly. So it means that if you design a question in the way that you will get the same answer you think about it,", "tokens": [7587, 13, 407, 309, 1355, 300, 498, 291, 1715, 257, 1168, 294, 264, 636, 300, 291, 486, 483, 264, 912, 1867, 291, 519, 466, 309, 11], "avg_logprob": -0.21122685405943128, "compression_ratio": 1.2210526315789474, "no_speech_prob": 0.0, "words": [{"start": 540.26, "end": 540.86, "word": " Exactly.", "probability": 0.08843994140625}, {"start": 547.96, "end": 548.22, "word": " So", "probability": 0.900390625}, {"start": 548.22, "end": 548.6, "word": " it", "probability": 0.7568359375}, {"start": 548.6, "end": 548.96, "word": " means", "probability": 0.916015625}, {"start": 548.96, "end": 549.3, "word": " that", "probability": 0.91845703125}, {"start": 549.3, "end": 549.74, "word": " if", "probability": 0.9072265625}, {"start": 549.74, "end": 549.96, "word": " you", "probability": 0.96337890625}, {"start": 549.96, "end": 550.38, "word": " design", "probability": 0.96435546875}, {"start": 550.38, "end": 550.54, "word": " a", "probability": 0.98779296875}, {"start": 550.54, "end": 551.3, "word": " question", "probability": 0.92822265625}, {"start": 551.3, "end": 552.26, "word": " in", "probability": 0.69775390625}, {"start": 552.26, "end": 552.42, "word": " the", "probability": 0.81787109375}, {"start": 552.42, "end": 552.62, "word": " way", "probability": 0.9541015625}, {"start": 552.62, "end": 552.9, "word": " that", "probability": 0.9287109375}, {"start": 552.9, "end": 553.48, "word": " you", "probability": 0.9443359375}, {"start": 553.48, "end": 553.64, "word": " will", "probability": 0.8837890625}, {"start": 553.64, "end": 554.04, "word": " get", "probability": 0.94189453125}, {"start": 554.04, "end": 554.34, "word": " the", "probability": 0.91796875}, {"start": 554.34, "end": 554.64, "word": " same", "probability": 0.904296875}, {"start": 554.64, "end": 554.98, "word": " answer", "probability": 0.95703125}, {"start": 554.98, "end": 555.14, "word": " you", "probability": 0.73193359375}, {"start": 555.14, "end": 555.42, "word": " think", "probability": 0.88720703125}, {"start": 555.42, "end": 555.7, "word": " about", "probability": 0.9091796875}, {"start": 555.7, "end": 555.94, "word": " it,", "probability": 0.9453125}], "temperature": 1.0}, {"id": 22, "seek": 58409, "start": 556.37, "end": 584.09, "text": " It means that you will have something called measurement error. The last type is sampling error. Sampling error always happens, always exists. For example, suppose you are around 50 students in this class. Suppose I select randomly 20 of you. And I am interested suppose in your age. Maybe for this sample.", "tokens": [467, 1355, 300, 291, 486, 362, 746, 1219, 13160, 6713, 13, 440, 1036, 2010, 307, 21179, 6713, 13, 4832, 11970, 6713, 1009, 2314, 11, 1009, 8198, 13, 1171, 1365, 11, 7297, 291, 366, 926, 2625, 1731, 294, 341, 1508, 13, 21360, 286, 3048, 16979, 945, 295, 291, 13, 400, 286, 669, 3102, 7297, 294, 428, 3205, 13, 2704, 337, 341, 6889, 13], "avg_logprob": -0.17547122779346647, "compression_ratio": 1.5198019801980198, "no_speech_prob": 0.0, "words": [{"start": 556.37, "end": 556.63, "word": " It", "probability": 0.373291015625}, {"start": 556.63, "end": 556.97, "word": " means", "probability": 0.91552734375}, {"start": 556.97, "end": 557.39, "word": " that", "probability": 0.912109375}, {"start": 557.39, "end": 558.01, "word": " you", "probability": 0.88916015625}, {"start": 558.01, "end": 558.19, "word": " will", "probability": 0.85791015625}, {"start": 558.19, "end": 558.55, "word": " have", "probability": 0.951171875}, {"start": 558.55, "end": 558.91, "word": " something", "probability": 0.837890625}, {"start": 558.91, "end": 559.23, "word": " called", "probability": 0.84326171875}, {"start": 559.23, "end": 559.57, "word": " measurement", "probability": 0.61328125}, {"start": 559.57, "end": 559.89, "word": " error.", "probability": 0.8896484375}, {"start": 560.23, "end": 560.39, "word": " The", "probability": 0.849609375}, {"start": 560.39, "end": 560.65, "word": " last", "probability": 0.876953125}, {"start": 560.65, "end": 560.95, "word": " type", "probability": 0.8251953125}, {"start": 560.95, "end": 561.31, "word": " is", "probability": 0.83056640625}, {"start": 561.31, "end": 561.55, "word": " sampling", "probability": 0.8779296875}, {"start": 561.55, "end": 561.89, "word": " error.", "probability": 0.90087890625}, {"start": 562.79, "end": 563.21, "word": " Sampling", "probability": 0.943115234375}, {"start": 563.21, "end": 563.47, "word": " error", "probability": 0.87060546875}, {"start": 563.47, "end": 564.39, "word": " always", "probability": 0.87890625}, {"start": 564.39, "end": 565.49, "word": " happens,", "probability": 0.93505859375}, {"start": 565.71, "end": 565.99, "word": " always", "probability": 0.88623046875}, {"start": 565.99, "end": 566.47, "word": " exists.", "probability": 0.79345703125}, {"start": 566.67, "end": 566.79, "word": " For", "probability": 0.94580078125}, {"start": 566.79, "end": 567.13, "word": " example,", "probability": 0.9697265625}, {"start": 567.25, "end": 567.65, "word": " suppose", "probability": 0.90576171875}, {"start": 567.65, "end": 569.11, "word": " you", "probability": 0.8896484375}, {"start": 569.11, "end": 569.39, "word": " are", "probability": 0.93603515625}, {"start": 569.39, "end": 569.99, "word": " around", "probability": 0.93212890625}, {"start": 569.99, "end": 570.53, "word": " 50", "probability": 0.6572265625}, {"start": 570.53, "end": 571.05, "word": " students", "probability": 0.970703125}, {"start": 571.05, "end": 571.29, "word": " in", "probability": 0.93994140625}, {"start": 571.29, "end": 571.43, "word": " this", "probability": 0.94091796875}, {"start": 571.43, "end": 571.79, "word": " class.", "probability": 0.9619140625}, {"start": 572.19, "end": 572.43, "word": " Suppose", "probability": 0.83447265625}, {"start": 572.43, "end": 572.57, "word": " I", "probability": 0.9443359375}, {"start": 572.57, "end": 573.15, "word": " select", "probability": 0.8681640625}, {"start": 573.15, "end": 574.31, "word": " randomly", "probability": 0.73583984375}, {"start": 574.31, "end": 574.85, "word": " 20", "probability": 0.87451171875}, {"start": 574.85, "end": 575.05, "word": " of", "probability": 0.97119140625}, {"start": 575.05, "end": 575.21, "word": " you.", "probability": 0.96044921875}, {"start": 577.25, "end": 577.65, "word": " And", "probability": 0.91796875}, {"start": 577.65, "end": 577.89, "word": " I", "probability": 0.955078125}, {"start": 577.89, "end": 578.01, "word": " am", "probability": 0.78466796875}, {"start": 578.01, "end": 578.45, "word": " interested", "probability": 0.86572265625}, {"start": 578.45, "end": 579.07, "word": " suppose", "probability": 0.428955078125}, {"start": 579.07, "end": 580.13, "word": " in", "probability": 0.9013671875}, {"start": 580.13, "end": 580.39, "word": " your", "probability": 0.89453125}, {"start": 580.39, "end": 580.79, "word": " age.", "probability": 0.9404296875}, {"start": 582.71, "end": 582.99, "word": " Maybe", "probability": 0.91943359375}, {"start": 582.99, "end": 583.25, "word": " for", "probability": 0.93359375}, {"start": 583.25, "end": 583.57, "word": " this", "probability": 0.94775390625}, {"start": 583.57, "end": 584.09, "word": " sample.", "probability": 0.6376953125}], "temperature": 1.0}, {"id": 23, "seek": 61637, "start": 586.59, "end": 616.37, "text": " I will get an average of your age of 19 years someone select another sample from the same population with the same size maybe the average of your age is not equal to 19 years maybe 19 years 3 months", "tokens": [286, 486, 483, 364, 4274, 295, 428, 3205, 295, 1294, 924, 1580, 3048, 1071, 6889, 490, 264, 912, 4415, 365, 264, 912, 2744, 1310, 264, 4274, 295, 428, 3205, 307, 406, 2681, 281, 1294, 924, 1310, 1294, 924, 805, 2493], "avg_logprob": -0.20160061557118486, "compression_ratio": 1.6178861788617886, "no_speech_prob": 0.0, "words": [{"start": 586.59, "end": 586.85, "word": " I", "probability": 0.81103515625}, {"start": 586.85, "end": 587.05, "word": " will", "probability": 0.8662109375}, {"start": 587.05, "end": 587.39, "word": " get", "probability": 0.94384765625}, {"start": 587.39, "end": 588.77, "word": " an", "probability": 0.90771484375}, {"start": 588.77, "end": 589.29, "word": " average", "probability": 0.79443359375}, {"start": 589.29, "end": 589.61, "word": " of", "probability": 0.9609375}, {"start": 589.61, "end": 589.83, "word": " your", "probability": 0.8837890625}, {"start": 589.83, "end": 590.31, "word": " age", "probability": 0.94091796875}, {"start": 590.31, "end": 592.87, "word": " of", "probability": 0.8154296875}, {"start": 592.87, "end": 593.23, "word": " 19", "probability": 0.689453125}, {"start": 593.23, "end": 593.61, "word": " years", "probability": 0.908203125}, {"start": 593.61, "end": 597.37, "word": " someone", "probability": 0.30029296875}, {"start": 597.37, "end": 598.89, "word": " select", "probability": 0.8359375}, {"start": 598.89, "end": 599.37, "word": " another", "probability": 0.916015625}, {"start": 599.37, "end": 599.83, "word": " sample", "probability": 0.8291015625}, {"start": 599.83, "end": 601.05, "word": " from", "probability": 0.86767578125}, {"start": 601.05, "end": 601.27, "word": " the", "probability": 0.91748046875}, {"start": 601.27, "end": 601.51, "word": " same", "probability": 0.90625}, {"start": 601.51, "end": 602.05, "word": " population", "probability": 0.9384765625}, {"start": 602.05, "end": 603.73, "word": " with", "probability": 0.873046875}, {"start": 603.73, "end": 603.95, "word": " the", "probability": 0.9052734375}, {"start": 603.95, "end": 604.15, "word": " same", "probability": 0.90185546875}, {"start": 604.15, "end": 604.79, "word": " size", "probability": 0.83740234375}, {"start": 604.79, "end": 608.67, "word": " maybe", "probability": 0.763671875}, {"start": 608.67, "end": 609.77, "word": " the", "probability": 0.86669921875}, {"start": 609.77, "end": 610.13, "word": " average", "probability": 0.7900390625}, {"start": 610.13, "end": 610.45, "word": " of", "probability": 0.9560546875}, {"start": 610.45, "end": 610.63, "word": " your", "probability": 0.8837890625}, {"start": 610.63, "end": 611.09, "word": " age", "probability": 0.93896484375}, {"start": 611.09, "end": 612.07, "word": " is", "probability": 0.80029296875}, {"start": 612.07, "end": 612.27, "word": " not", "probability": 0.9453125}, {"start": 612.27, "end": 612.69, "word": " equal", "probability": 0.89013671875}, {"start": 612.69, "end": 612.97, "word": " to", "probability": 0.615234375}, {"start": 612.97, "end": 613.25, "word": " 19", "probability": 0.79150390625}, {"start": 613.25, "end": 613.53, "word": " years", "probability": 0.92578125}, {"start": 613.53, "end": 613.77, "word": " maybe", "probability": 0.63525390625}, {"start": 613.77, "end": 614.17, "word": " 19", "probability": 0.892578125}, {"start": 614.17, "end": 614.89, "word": " years", "probability": 0.89501953125}, {"start": 614.89, "end": 616.17, "word": " 3", "probability": 0.421875}, {"start": 616.17, "end": 616.37, "word": " months", "probability": 0.72705078125}], "temperature": 1.0}, {"id": 24, "seek": 64683, "start": 619.33, "end": 646.83, "text": " Someone else maybe also select the same number of students, but the average of the class might be 20 years. So the first one, second tier, each of them has different sample statistics, I mean different sample means. This difference or this error actually is called sampling error and always happens.", "tokens": [8734, 1646, 1310, 611, 3048, 264, 912, 1230, 295, 1731, 11, 457, 264, 4274, 295, 264, 1508, 1062, 312, 945, 924, 13, 407, 264, 700, 472, 11, 1150, 12362, 11, 1184, 295, 552, 575, 819, 6889, 12523, 11, 286, 914, 819, 6889, 1355, 13, 639, 2649, 420, 341, 6713, 767, 307, 1219, 21179, 6713, 293, 1009, 2314, 13], "avg_logprob": -0.23450741676960962, "compression_ratio": 1.5957446808510638, "no_speech_prob": 0.0, "words": [{"start": 619.33, "end": 619.73, "word": " Someone", "probability": 0.5029296875}, {"start": 619.73, "end": 620.13, "word": " else", "probability": 0.91162109375}, {"start": 620.13, "end": 620.41, "word": " maybe", "probability": 0.5888671875}, {"start": 620.41, "end": 622.35, "word": " also", "probability": 0.289794921875}, {"start": 622.35, "end": 622.93, "word": " select", "probability": 0.84130859375}, {"start": 622.93, "end": 623.37, "word": " the", "probability": 0.350341796875}, {"start": 623.37, "end": 623.55, "word": " same", "probability": 0.90478515625}, {"start": 623.55, "end": 623.91, "word": " number", "probability": 0.93505859375}, {"start": 623.91, "end": 624.79, "word": " of", "probability": 0.95458984375}, {"start": 624.79, "end": 625.29, "word": " students,", "probability": 0.88525390625}, {"start": 626.15, "end": 626.47, "word": " but", "probability": 0.845703125}, {"start": 626.47, "end": 626.61, "word": " the", "probability": 0.62060546875}, {"start": 626.61, "end": 626.93, "word": " average", "probability": 0.7509765625}, {"start": 626.93, "end": 627.45, "word": " of", "probability": 0.95263671875}, {"start": 627.45, "end": 627.59, "word": " the", "probability": 0.84423828125}, {"start": 627.59, "end": 627.91, "word": " class", "probability": 0.9580078125}, {"start": 627.91, "end": 628.41, "word": " might", "probability": 0.9052734375}, {"start": 628.41, "end": 628.65, "word": " be", "probability": 0.94873046875}, {"start": 628.65, "end": 628.91, "word": " 20", "probability": 0.7802734375}, {"start": 628.91, "end": 629.23, "word": " years.", "probability": 0.78369140625}, {"start": 630.27, "end": 630.63, "word": " So", "probability": 0.93994140625}, {"start": 630.63, "end": 630.79, "word": " the", "probability": 0.7587890625}, {"start": 630.79, "end": 631.03, "word": " first", "probability": 0.876953125}, {"start": 631.03, "end": 631.29, "word": " one,", "probability": 0.9345703125}, {"start": 631.69, "end": 632.11, "word": " second", "probability": 0.7197265625}, {"start": 632.11, "end": 632.59, "word": " tier,", "probability": 0.58544921875}, {"start": 633.07, "end": 633.37, "word": " each", "probability": 0.94287109375}, {"start": 633.37, "end": 633.51, "word": " of", "probability": 0.919921875}, {"start": 633.51, "end": 633.69, "word": " them", "probability": 0.89306640625}, {"start": 633.69, "end": 634.15, "word": " has", "probability": 0.9326171875}, {"start": 634.15, "end": 635.45, "word": " different", "probability": 0.87060546875}, {"start": 635.45, "end": 636.13, "word": " sample", "probability": 0.75732421875}, {"start": 636.13, "end": 636.75, "word": " statistics,", "probability": 0.7646484375}, {"start": 637.07, "end": 637.15, "word": " I", "probability": 0.931640625}, {"start": 637.15, "end": 637.25, "word": " mean", "probability": 0.9580078125}, {"start": 637.25, "end": 637.83, "word": " different", "probability": 0.671875}, {"start": 637.83, "end": 638.75, "word": " sample", "probability": 0.87744140625}, {"start": 638.75, "end": 639.09, "word": " means.", "probability": 0.74658203125}, {"start": 639.85, "end": 640.29, "word": " This", "probability": 0.87353515625}, {"start": 640.29, "end": 640.95, "word": " difference", "probability": 0.8896484375}, {"start": 640.95, "end": 642.21, "word": " or", "probability": 0.51513671875}, {"start": 642.21, "end": 642.45, "word": " this", "probability": 0.94775390625}, {"start": 642.45, "end": 642.71, "word": " error", "probability": 0.86669921875}, {"start": 642.71, "end": 643.21, "word": " actually", "probability": 0.79931640625}, {"start": 643.21, "end": 643.71, "word": " is", "probability": 0.84228515625}, {"start": 643.71, "end": 644.09, "word": " called", "probability": 0.89501953125}, {"start": 644.09, "end": 645.03, "word": " sampling", "probability": 0.7509765625}, {"start": 645.03, "end": 645.33, "word": " error", "probability": 0.94775390625}, {"start": 645.33, "end": 646.01, "word": " and", "probability": 0.50830078125}, {"start": 646.01, "end": 646.47, "word": " always", "probability": 0.8798828125}, {"start": 646.47, "end": 646.83, "word": " happens.", "probability": 0.927734375}], "temperature": 1.0}, {"id": 25, "seek": 66662, "start": 648.86, "end": 666.62, "text": " So now we have five types of errors. One is called coverage error. In this case, you have problem with the frame. The other type is called non-response error. It means you have problem with following up. Measurement error, it means you have", "tokens": [407, 586, 321, 362, 1732, 3467, 295, 13603, 13, 1485, 307, 1219, 9645, 6713, 13, 682, 341, 1389, 11, 291, 362, 1154, 365, 264, 3920, 13, 440, 661, 2010, 307, 1219, 2107, 12, 5667, 3739, 6713, 13, 467, 1355, 291, 362, 1154, 365, 3480, 493, 13, 41436, 518, 6713, 11, 309, 1355, 291, 362], "avg_logprob": -0.1955965909090909, "compression_ratio": 1.6394557823129252, "no_speech_prob": 0.0, "words": [{"start": 648.86, "end": 649.12, "word": " So", "probability": 0.450439453125}, {"start": 649.12, "end": 649.9, "word": " now", "probability": 0.71630859375}, {"start": 649.9, "end": 650.08, "word": " we", "probability": 0.86669921875}, {"start": 650.08, "end": 650.32, "word": " have", "probability": 0.94873046875}, {"start": 650.32, "end": 650.66, "word": " five", "probability": 0.68798828125}, {"start": 650.66, "end": 651.14, "word": " types", "probability": 0.791015625}, {"start": 651.14, "end": 651.46, "word": " of", "probability": 0.96484375}, {"start": 651.46, "end": 651.72, "word": " errors.", "probability": 0.81201171875}, {"start": 651.92, "end": 652.04, "word": " One", "probability": 0.8662109375}, {"start": 652.04, "end": 652.2, "word": " is", "probability": 0.908203125}, {"start": 652.2, "end": 652.4, "word": " called", "probability": 0.78662109375}, {"start": 652.4, "end": 652.8, "word": " coverage", "probability": 0.270751953125}, {"start": 652.8, "end": 653.1, "word": " error.", "probability": 0.8779296875}, {"start": 653.68, "end": 653.82, "word": " In", "probability": 0.9453125}, {"start": 653.82, "end": 654.02, "word": " this", "probability": 0.94873046875}, {"start": 654.02, "end": 654.28, "word": " case,", "probability": 0.91845703125}, {"start": 654.36, "end": 654.42, "word": " you", "probability": 0.92724609375}, {"start": 654.42, "end": 654.58, "word": " have", "probability": 0.94873046875}, {"start": 654.58, "end": 654.94, "word": " problem", "probability": 0.5078125}, {"start": 654.94, "end": 655.38, "word": " with", "probability": 0.91064453125}, {"start": 655.38, "end": 656.84, "word": " the", "probability": 0.8623046875}, {"start": 656.84, "end": 657.18, "word": " frame.", "probability": 0.91357421875}, {"start": 658.2, "end": 658.54, "word": " The", "probability": 0.87109375}, {"start": 658.54, "end": 658.74, "word": " other", "probability": 0.8916015625}, {"start": 658.74, "end": 659.0, "word": " type", "probability": 0.96435546875}, {"start": 659.0, "end": 659.14, "word": " is", "probability": 0.93994140625}, {"start": 659.14, "end": 659.36, "word": " called", "probability": 0.880859375}, {"start": 659.36, "end": 659.58, "word": " non", "probability": 0.89013671875}, {"start": 659.58, "end": 660.02, "word": "-response", "probability": 0.90869140625}, {"start": 660.02, "end": 660.28, "word": " error.", "probability": 0.88525390625}, {"start": 661.26, "end": 661.48, "word": " It", "probability": 0.88232421875}, {"start": 661.48, "end": 661.84, "word": " means", "probability": 0.93212890625}, {"start": 661.84, "end": 662.32, "word": " you", "probability": 0.9267578125}, {"start": 662.32, "end": 662.5, "word": " have", "probability": 0.9501953125}, {"start": 662.5, "end": 662.9, "word": " problem", "probability": 0.8203125}, {"start": 662.9, "end": 663.26, "word": " with", "probability": 0.85888671875}, {"start": 663.26, "end": 663.78, "word": " following", "probability": 0.8935546875}, {"start": 663.78, "end": 664.16, "word": " up.", "probability": 0.953125}, {"start": 665.06, "end": 665.5, "word": " Measurement", "probability": 0.954833984375}, {"start": 665.5, "end": 665.7, "word": " error,", "probability": 0.87890625}, {"start": 665.76, "end": 665.88, "word": " it", "probability": 0.92724609375}, {"start": 665.88, "end": 666.1, "word": " means", "probability": 0.931640625}, {"start": 666.1, "end": 666.28, "word": " you", "probability": 0.955078125}, {"start": 666.28, "end": 666.62, "word": " have", "probability": 0.9501953125}], "temperature": 1.0}, {"id": 26, "seek": 69721, "start": 667.81, "end": 697.21, "text": " Bad questionnaire design, the last type is called semi-error and this one always happens and actually we would like to have this error, I mean this semi-error as small as possible. So these are the steps you have to follow up when you design the questionnaire. So again, for these types of errors, the first one coverage error or selection bias.", "tokens": [11523, 44702, 1715, 11, 264, 1036, 2010, 307, 1219, 12909, 12, 260, 2874, 293, 341, 472, 1009, 2314, 293, 767, 321, 576, 411, 281, 362, 341, 6713, 11, 286, 914, 341, 12909, 12, 260, 2874, 382, 1359, 382, 1944, 13, 407, 613, 366, 264, 4439, 291, 362, 281, 1524, 493, 562, 291, 1715, 264, 44702, 13, 407, 797, 11, 337, 613, 3467, 295, 13603, 11, 264, 700, 472, 9645, 6713, 420, 9450, 12577, 13], "avg_logprob": -0.20083333492279054, "compression_ratio": 1.6796116504854368, "no_speech_prob": 0.0, "words": [{"start": 667.81, "end": 668.29, "word": " Bad", "probability": 0.2001953125}, {"start": 668.29, "end": 668.81, "word": " questionnaire", "probability": 0.880859375}, {"start": 668.81, "end": 669.33, "word": " design,", "probability": 0.93798828125}, {"start": 670.27, "end": 670.45, "word": " the", "probability": 0.81982421875}, {"start": 670.45, "end": 670.69, "word": " last", "probability": 0.85546875}, {"start": 670.69, "end": 670.97, "word": " type", "probability": 0.83837890625}, {"start": 670.97, "end": 671.13, "word": " is", "probability": 0.880859375}, {"start": 671.13, "end": 671.37, "word": " called", "probability": 0.8837890625}, {"start": 671.37, "end": 671.63, "word": " semi", "probability": 0.431884765625}, {"start": 671.63, "end": 671.93, "word": "-error", "probability": 0.84130859375}, {"start": 671.93, "end": 672.11, "word": " and", "probability": 0.58984375}, {"start": 672.11, "end": 672.29, "word": " this", "probability": 0.92822265625}, {"start": 672.29, "end": 672.49, "word": " one", "probability": 0.92333984375}, {"start": 672.49, "end": 672.95, "word": " always", "probability": 0.89501953125}, {"start": 672.95, "end": 673.41, "word": " happens", "probability": 0.9521484375}, {"start": 673.41, "end": 674.13, "word": " and", "probability": 0.483642578125}, {"start": 674.13, "end": 674.67, "word": " actually", "probability": 0.865234375}, {"start": 674.67, "end": 675.27, "word": " we", "probability": 0.67041015625}, {"start": 675.27, "end": 675.45, "word": " would", "probability": 0.90771484375}, {"start": 675.45, "end": 675.81, "word": " like", "probability": 0.93896484375}, {"start": 675.81, "end": 676.53, "word": " to", "probability": 0.92724609375}, {"start": 676.53, "end": 676.77, "word": " have", "probability": 0.95263671875}, {"start": 676.77, "end": 677.05, "word": " this", "probability": 0.93701171875}, {"start": 677.05, "end": 677.39, "word": " error,", "probability": 0.71728515625}, {"start": 678.05, "end": 678.25, "word": " I", "probability": 0.87744140625}, {"start": 678.25, "end": 678.39, "word": " mean", "probability": 0.96875}, {"start": 678.39, "end": 678.75, "word": " this", "probability": 0.78662109375}, {"start": 678.75, "end": 678.99, "word": " semi", "probability": 0.8896484375}, {"start": 678.99, "end": 679.17, "word": "-error", "probability": 0.9441731770833334}, {"start": 679.17, "end": 679.47, "word": " as", "probability": 0.73828125}, {"start": 679.47, "end": 679.89, "word": " small", "probability": 0.92822265625}, {"start": 679.89, "end": 680.59, "word": " as", "probability": 0.9462890625}, {"start": 680.59, "end": 680.95, "word": " possible.", "probability": 0.95263671875}, {"start": 681.53, "end": 681.71, "word": " So", "probability": 0.89501953125}, {"start": 681.71, "end": 681.95, "word": " these", "probability": 0.744140625}, {"start": 681.95, "end": 682.25, "word": " are", "probability": 0.9384765625}, {"start": 682.25, "end": 682.79, "word": " the", "probability": 0.92041015625}, {"start": 682.79, "end": 684.37, "word": " steps", "probability": 0.87646484375}, {"start": 684.37, "end": 684.57, "word": " you", "probability": 0.93994140625}, {"start": 684.57, "end": 684.75, "word": " have", "probability": 0.93701171875}, {"start": 684.75, "end": 684.89, "word": " to", "probability": 0.96923828125}, {"start": 684.89, "end": 685.09, "word": " follow", "probability": 0.90625}, {"start": 685.09, "end": 685.29, "word": " up", "probability": 0.8828125}, {"start": 685.29, "end": 685.45, "word": " when", "probability": 0.9267578125}, {"start": 685.45, "end": 685.59, "word": " you", "probability": 0.953125}, {"start": 685.59, "end": 685.89, "word": " design", "probability": 0.9697265625}, {"start": 685.89, "end": 686.05, "word": " the", "probability": 0.58447265625}, {"start": 686.05, "end": 686.43, "word": " questionnaire.", "probability": 0.96826171875}, {"start": 689.49, "end": 690.09, "word": " So", "probability": 0.89892578125}, {"start": 690.09, "end": 690.39, "word": " again,", "probability": 0.8896484375}, {"start": 690.73, "end": 691.03, "word": " for", "probability": 0.94775390625}, {"start": 691.03, "end": 691.31, "word": " these", "probability": 0.83349609375}, {"start": 691.31, "end": 691.65, "word": " types", "probability": 0.79638671875}, {"start": 691.65, "end": 691.83, "word": " of", "probability": 0.96826171875}, {"start": 691.83, "end": 692.13, "word": " errors,", "probability": 0.826171875}, {"start": 694.09, "end": 694.27, "word": " the", "probability": 0.8681640625}, {"start": 694.27, "end": 694.47, "word": " first", "probability": 0.88232421875}, {"start": 694.47, "end": 694.71, "word": " one", "probability": 0.9228515625}, {"start": 694.71, "end": 695.15, "word": " coverage", "probability": 0.52001953125}, {"start": 695.15, "end": 695.59, "word": " error", "probability": 0.86572265625}, {"start": 695.59, "end": 696.11, "word": " or", "probability": 0.88037109375}, {"start": 696.11, "end": 696.75, "word": " selection", "probability": 0.88427734375}, {"start": 696.75, "end": 697.21, "word": " bias.", "probability": 0.9638671875}], "temperature": 1.0}, {"id": 27, "seek": 71311, "start": 699.15, "end": 713.11, "text": " This type of error exists if some groups are excluded from the frame and have no chance of being selected. That's the first type of error, coverage error. So it means there is a problem.", "tokens": [639, 2010, 295, 6713, 8198, 498, 512, 3935, 366, 29486, 490, 264, 3920, 293, 362, 572, 2931, 295, 885, 8209, 13, 663, 311, 264, 700, 2010, 295, 6713, 11, 9645, 6713, 13, 407, 309, 1355, 456, 307, 257, 1154, 13], "avg_logprob": -0.16968369483947754, "compression_ratio": 1.4384615384615385, "no_speech_prob": 0.0, "words": [{"start": 699.15, "end": 699.61, "word": " This", "probability": 0.31396484375}, {"start": 699.61, "end": 700.01, "word": " type", "probability": 0.9541015625}, {"start": 700.01, "end": 700.17, "word": " of", "probability": 0.9716796875}, {"start": 700.17, "end": 700.39, "word": " error", "probability": 0.87158203125}, {"start": 700.39, "end": 700.97, "word": " exists", "probability": 0.830078125}, {"start": 700.97, "end": 701.31, "word": " if", "probability": 0.9404296875}, {"start": 701.31, "end": 701.65, "word": " some", "probability": 0.82763671875}, {"start": 701.65, "end": 702.07, "word": " groups", "probability": 0.94775390625}, {"start": 702.07, "end": 702.33, "word": " are", "probability": 0.94189453125}, {"start": 702.33, "end": 702.89, "word": " excluded", "probability": 0.880859375}, {"start": 702.89, "end": 703.59, "word": " from", "probability": 0.8955078125}, {"start": 703.59, "end": 703.83, "word": " the", "probability": 0.91552734375}, {"start": 703.83, "end": 704.19, "word": " frame", "probability": 0.92236328125}, {"start": 704.19, "end": 704.77, "word": " and", "probability": 0.8046875}, {"start": 704.77, "end": 704.97, "word": " have", "probability": 0.94189453125}, {"start": 704.97, "end": 705.15, "word": " no", "probability": 0.94921875}, {"start": 705.15, "end": 705.53, "word": " chance", "probability": 0.97119140625}, {"start": 705.53, "end": 705.69, "word": " of", "probability": 0.9638671875}, {"start": 705.69, "end": 705.85, "word": " being", "probability": 0.943359375}, {"start": 705.85, "end": 706.35, "word": " selected.", "probability": 0.8916015625}, {"start": 708.43, "end": 708.95, "word": " That's", "probability": 0.805419921875}, {"start": 708.95, "end": 709.09, "word": " the", "probability": 0.90673828125}, {"start": 709.09, "end": 709.35, "word": " first", "probability": 0.8779296875}, {"start": 709.35, "end": 709.63, "word": " type", "probability": 0.98046875}, {"start": 709.63, "end": 709.77, "word": " of", "probability": 0.96484375}, {"start": 709.77, "end": 709.89, "word": " error,", "probability": 0.8828125}, {"start": 710.05, "end": 710.37, "word": " coverage", "probability": 0.456787109375}, {"start": 710.37, "end": 710.63, "word": " error.", "probability": 0.91943359375}, {"start": 711.51, "end": 711.81, "word": " So", "probability": 0.9130859375}, {"start": 711.81, "end": 711.97, "word": " it", "probability": 0.7939453125}, {"start": 711.97, "end": 712.23, "word": " means", "probability": 0.9208984375}, {"start": 712.23, "end": 712.47, "word": " there", "probability": 0.76171875}, {"start": 712.47, "end": 712.63, "word": " is", "probability": 0.7119140625}, {"start": 712.63, "end": 712.75, "word": " a", "probability": 0.99169921875}, {"start": 712.75, "end": 713.11, "word": " problem.", "probability": 0.87353515625}], "temperature": 1.0}, {"id": 28, "seek": 74295, "start": 714.25, "end": 742.95, "text": " on the population frame. Non-response error bias, it means people who don't respond may be different from those who do respond. For example, suppose I have a sample of tennis students. And I got responses from number two, number five,", "tokens": [322, 264, 4415, 3920, 13, 8774, 12, 5667, 3739, 6713, 12577, 11, 309, 1355, 561, 567, 500, 380, 4196, 815, 312, 819, 490, 729, 567, 360, 4196, 13, 1171, 1365, 11, 7297, 286, 362, 257, 6889, 295, 18118, 1731, 13, 400, 286, 658, 13019, 490, 1230, 732, 11, 1230, 1732, 11], "avg_logprob": -0.1849459120287345, "compression_ratio": 1.4968152866242037, "no_speech_prob": 0.0, "words": [{"start": 714.25, "end": 714.57, "word": " on", "probability": 0.337890625}, {"start": 714.57, "end": 714.77, "word": " the", "probability": 0.9111328125}, {"start": 714.77, "end": 715.27, "word": " population", "probability": 0.9365234375}, {"start": 715.27, "end": 715.69, "word": " frame.", "probability": 0.6064453125}, {"start": 716.51, "end": 716.83, "word": " Non", "probability": 0.92724609375}, {"start": 716.83, "end": 717.37, "word": "-response", "probability": 0.8640950520833334}, {"start": 717.37, "end": 717.65, "word": " error", "probability": 0.685546875}, {"start": 717.65, "end": 718.17, "word": " bias,", "probability": 0.9609375}, {"start": 718.33, "end": 719.29, "word": " it", "probability": 0.92236328125}, {"start": 719.29, "end": 719.55, "word": " means", "probability": 0.9345703125}, {"start": 719.55, "end": 719.95, "word": " people", "probability": 0.92431640625}, {"start": 719.95, "end": 720.17, "word": " who", "probability": 0.90380859375}, {"start": 720.17, "end": 720.45, "word": " don't", "probability": 0.971435546875}, {"start": 720.45, "end": 721.05, "word": " respond", "probability": 0.8515625}, {"start": 721.05, "end": 721.41, "word": " may", "probability": 0.69873046875}, {"start": 721.41, "end": 721.51, "word": " be", "probability": 0.95849609375}, {"start": 721.51, "end": 721.93, "word": " different", "probability": 0.9072265625}, {"start": 721.93, "end": 722.27, "word": " from", "probability": 0.87548828125}, {"start": 722.27, "end": 722.75, "word": " those", "probability": 0.88232421875}, {"start": 722.75, "end": 723.01, "word": " who", "probability": 0.904296875}, {"start": 723.01, "end": 723.23, "word": " do", "probability": 0.9169921875}, {"start": 723.23, "end": 723.73, "word": " respond.", "probability": 0.87109375}, {"start": 724.31, "end": 724.63, "word": " For", "probability": 0.94921875}, {"start": 724.63, "end": 724.99, "word": " example,", "probability": 0.9716796875}, {"start": 725.97, "end": 726.45, "word": " suppose", "probability": 0.87939453125}, {"start": 726.45, "end": 727.51, "word": " I", "probability": 0.95263671875}, {"start": 727.51, "end": 727.85, "word": " have", "probability": 0.8603515625}, {"start": 727.85, "end": 728.13, "word": " a", "probability": 0.76220703125}, {"start": 728.13, "end": 728.53, "word": " sample", "probability": 0.89208984375}, {"start": 728.53, "end": 729.73, "word": " of", "probability": 0.9658203125}, {"start": 729.73, "end": 731.03, "word": " tennis", "probability": 0.908203125}, {"start": 731.03, "end": 731.41, "word": " students.", "probability": 0.6796875}, {"start": 735.91, "end": 736.73, "word": " And", "probability": 0.90966796875}, {"start": 736.73, "end": 736.87, "word": " I", "probability": 0.86474609375}, {"start": 736.87, "end": 737.21, "word": " got", "probability": 0.779296875}, {"start": 737.21, "end": 738.87, "word": " responses", "probability": 0.89794921875}, {"start": 738.87, "end": 739.57, "word": " from", "probability": 0.88720703125}, {"start": 739.57, "end": 740.33, "word": " number", "probability": 0.8857421875}, {"start": 740.33, "end": 740.69, "word": " two,", "probability": 0.71435546875}, {"start": 741.31, "end": 742.43, "word": " number", "probability": 0.93017578125}, {"start": 742.43, "end": 742.95, "word": " five,", "probability": 0.8994140625}], "temperature": 1.0}, {"id": 29, "seek": 77152, "start": 744.12, "end": 771.52, "text": " And number 10. So I have these point of views for these three students. Now the other seven students might be they have different opinions. So the only thing you have, the opinions of just the three, and maybe the rest have different opinions, it means in this case you will have something called non-responsiveness.", "tokens": [400, 1230, 1266, 13, 407, 286, 362, 613, 935, 295, 6809, 337, 613, 1045, 1731, 13, 823, 264, 661, 3407, 1731, 1062, 312, 436, 362, 819, 11819, 13, 407, 264, 787, 551, 291, 362, 11, 264, 11819, 295, 445, 264, 1045, 11, 293, 1310, 264, 1472, 362, 819, 11819, 11, 309, 1355, 294, 341, 1389, 291, 486, 362, 746, 1219, 2107, 12, 28930, 8477, 13], "avg_logprob": -0.21519886273326297, "compression_ratio": 1.6424870466321244, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 744.12, "end": 744.32, "word": " And", "probability": 0.2347412109375}, {"start": 744.32, "end": 744.54, "word": " number", "probability": 0.8876953125}, {"start": 744.54, "end": 744.86, "word": " 10.", "probability": 0.62109375}, {"start": 746.26, "end": 746.64, "word": " So", "probability": 0.92724609375}, {"start": 746.64, "end": 747.14, "word": " I", "probability": 0.810546875}, {"start": 747.14, "end": 747.42, "word": " have", "probability": 0.93701171875}, {"start": 747.42, "end": 747.88, "word": " these", "probability": 0.7939453125}, {"start": 747.88, "end": 748.42, "word": " point", "probability": 0.92724609375}, {"start": 748.42, "end": 748.62, "word": " of", "probability": 0.46337890625}, {"start": 748.62, "end": 748.94, "word": " views", "probability": 0.74658203125}, {"start": 748.94, "end": 749.42, "word": " for", "probability": 0.88037109375}, {"start": 749.42, "end": 749.8, "word": " these", "probability": 0.8525390625}, {"start": 749.8, "end": 750.56, "word": " three", "probability": 0.75390625}, {"start": 750.56, "end": 751.12, "word": " students.", "probability": 0.97998046875}, {"start": 752.12, "end": 752.78, "word": " Now", "probability": 0.95458984375}, {"start": 752.78, "end": 753.0, "word": " the", "probability": 0.560546875}, {"start": 753.0, "end": 753.36, "word": " other", "probability": 0.8955078125}, {"start": 753.36, "end": 754.0, "word": " seven", "probability": 0.8935546875}, {"start": 754.0, "end": 754.86, "word": " students", "probability": 0.97705078125}, {"start": 754.86, "end": 756.02, "word": " might", "probability": 0.85595703125}, {"start": 756.02, "end": 756.38, "word": " be", "probability": 0.74853515625}, {"start": 756.38, "end": 757.26, "word": " they", "probability": 0.517578125}, {"start": 757.26, "end": 757.66, "word": " have", "probability": 0.9482421875}, {"start": 757.66, "end": 758.2, "word": " different", "probability": 0.890625}, {"start": 758.2, "end": 758.92, "word": " opinions.", "probability": 0.93017578125}, {"start": 759.9, "end": 760.38, "word": " So", "probability": 0.9423828125}, {"start": 760.38, "end": 760.78, "word": " the", "probability": 0.8759765625}, {"start": 760.78, "end": 761.1, "word": " only", "probability": 0.93017578125}, {"start": 761.1, "end": 762.54, "word": " thing", "probability": 0.92529296875}, {"start": 762.54, "end": 762.72, "word": " you", "probability": 0.96337890625}, {"start": 762.72, "end": 763.0, "word": " have,", "probability": 0.94677734375}, {"start": 763.12, "end": 763.22, "word": " the", "probability": 0.83056640625}, {"start": 763.22, "end": 763.64, "word": " opinions", "probability": 0.88427734375}, {"start": 763.64, "end": 764.0, "word": " of", "probability": 0.953125}, {"start": 764.0, "end": 764.38, "word": " just", "probability": 0.90283203125}, {"start": 764.38, "end": 764.96, "word": " the", "probability": 0.477783203125}, {"start": 764.96, "end": 765.22, "word": " three,", "probability": 0.9365234375}, {"start": 765.52, "end": 765.7, "word": " and", "probability": 0.9130859375}, {"start": 765.7, "end": 765.86, "word": " maybe", "probability": 0.9208984375}, {"start": 765.86, "end": 766.02, "word": " the", "probability": 0.9130859375}, {"start": 766.02, "end": 766.24, "word": " rest", "probability": 0.90771484375}, {"start": 766.24, "end": 766.46, "word": " have", "probability": 0.93310546875}, {"start": 766.46, "end": 766.78, "word": " different", "probability": 0.888671875}, {"start": 766.78, "end": 767.26, "word": " opinions,", "probability": 0.927734375}, {"start": 767.64, "end": 767.8, "word": " it", "probability": 0.8828125}, {"start": 767.8, "end": 768.14, "word": " means", "probability": 0.93310546875}, {"start": 768.14, "end": 768.34, "word": " in", "probability": 0.81298828125}, {"start": 768.34, "end": 768.54, "word": " this", "probability": 0.9482421875}, {"start": 768.54, "end": 768.8, "word": " case", "probability": 0.9228515625}, {"start": 768.8, "end": 769.02, "word": " you", "probability": 0.7138671875}, {"start": 769.02, "end": 769.22, "word": " will", "probability": 0.87939453125}, {"start": 769.22, "end": 769.62, "word": " have", "probability": 0.9462890625}, {"start": 769.62, "end": 770.32, "word": " something", "probability": 0.876953125}, {"start": 770.32, "end": 770.68, "word": " called", "probability": 0.85791015625}, {"start": 770.68, "end": 770.92, "word": " non", "probability": 0.8896484375}, {"start": 770.92, "end": 771.52, "word": "-responsiveness.", "probability": 0.6266276041666666}], "temperature": 1.0}, {"id": 30, "seek": 79713, "start": 772.49, "end": 797.13, "text": " Or the same as we said before, if your question is designed in a correct way. The other type, Sample Error, variations from sample to sample will always exist. As I mentioned, here we select six samples, each one has different sample mean. The other type, Measurement Error, due to weakness in question design.", "tokens": [1610, 264, 912, 382, 321, 848, 949, 11, 498, 428, 1168, 307, 4761, 294, 257, 3006, 636, 13, 440, 661, 2010, 11, 4832, 781, 3300, 2874, 11, 17840, 490, 6889, 281, 6889, 486, 1009, 2514, 13, 1018, 286, 2835, 11, 510, 321, 3048, 2309, 10938, 11, 1184, 472, 575, 819, 6889, 914, 13, 440, 661, 2010, 11, 41436, 518, 3300, 2874, 11, 3462, 281, 12772, 294, 1168, 1715, 13], "avg_logprob": -0.207812505534717, "compression_ratio": 1.6030927835051547, "no_speech_prob": 0.0, "words": [{"start": 772.49, "end": 772.77, "word": " Or", "probability": 0.673828125}, {"start": 772.77, "end": 772.93, "word": " the", "probability": 0.7412109375}, {"start": 772.93, "end": 773.11, "word": " same", "probability": 0.89501953125}, {"start": 773.11, "end": 773.25, "word": " as", "probability": 0.9384765625}, {"start": 773.25, "end": 773.41, "word": " we", "probability": 0.91748046875}, {"start": 773.41, "end": 773.83, "word": " said", "probability": 0.9208984375}, {"start": 773.83, "end": 774.27, "word": " before,", "probability": 0.85693359375}, {"start": 774.81, "end": 775.05, "word": " if", "probability": 0.93017578125}, {"start": 775.05, "end": 775.35, "word": " your", "probability": 0.86962890625}, {"start": 775.35, "end": 775.81, "word": " question", "probability": 0.93408203125}, {"start": 775.81, "end": 776.07, "word": " is", "probability": 0.9482421875}, {"start": 776.07, "end": 776.53, "word": " designed", "probability": 0.89697265625}, {"start": 776.53, "end": 777.15, "word": " in", "probability": 0.90869140625}, {"start": 777.15, "end": 777.19, "word": " a", "probability": 0.50537109375}, {"start": 777.19, "end": 777.47, "word": " correct", "probability": 0.9013671875}, {"start": 777.47, "end": 777.73, "word": " way.", "probability": 0.947265625}, {"start": 778.73, "end": 778.87, "word": " The", "probability": 0.85205078125}, {"start": 778.87, "end": 779.03, "word": " other", "probability": 0.83544921875}, {"start": 779.03, "end": 779.31, "word": " type,", "probability": 0.96630859375}, {"start": 779.45, "end": 779.67, "word": " Sample", "probability": 0.5706787109375}, {"start": 779.67, "end": 780.07, "word": " Error,", "probability": 0.882568359375}, {"start": 780.37, "end": 781.41, "word": " variations", "probability": 0.50048828125}, {"start": 781.41, "end": 781.79, "word": " from", "probability": 0.8759765625}, {"start": 781.79, "end": 782.13, "word": " sample", "probability": 0.78955078125}, {"start": 782.13, "end": 782.37, "word": " to", "probability": 0.96484375}, {"start": 782.37, "end": 782.63, "word": " sample", "probability": 0.89208984375}, {"start": 782.63, "end": 782.79, "word": " will", "probability": 0.70947265625}, {"start": 782.79, "end": 783.07, "word": " always", "probability": 0.90478515625}, {"start": 783.07, "end": 783.47, "word": " exist.", "probability": 0.94677734375}, {"start": 784.21, "end": 784.43, "word": " As", "probability": 0.951171875}, {"start": 784.43, "end": 784.55, "word": " I", "probability": 0.984375}, {"start": 784.55, "end": 784.99, "word": " mentioned,", "probability": 0.80419921875}, {"start": 786.05, "end": 786.23, "word": " here", "probability": 0.67724609375}, {"start": 786.23, "end": 786.41, "word": " we", "probability": 0.72216796875}, {"start": 786.41, "end": 786.79, "word": " select", "probability": 0.68408203125}, {"start": 786.79, "end": 787.63, "word": " six", "probability": 0.32177734375}, {"start": 787.63, "end": 788.55, "word": " samples,", "probability": 0.90087890625}, {"start": 788.71, "end": 788.95, "word": " each", "probability": 0.9228515625}, {"start": 788.95, "end": 789.17, "word": " one", "probability": 0.93115234375}, {"start": 789.17, "end": 789.57, "word": " has", "probability": 0.9306640625}, {"start": 789.57, "end": 790.47, "word": " different", "probability": 0.57421875}, {"start": 790.47, "end": 791.15, "word": " sample", "probability": 0.8701171875}, {"start": 791.15, "end": 791.41, "word": " mean.", "probability": 0.662109375}, {"start": 792.23, "end": 792.57, "word": " The", "probability": 0.86669921875}, {"start": 792.57, "end": 792.85, "word": " other", "probability": 0.87646484375}, {"start": 792.85, "end": 793.27, "word": " type,", "probability": 0.97412109375}, {"start": 793.51, "end": 794.33, "word": " Measurement", "probability": 0.87744140625}, {"start": 794.33, "end": 794.73, "word": " Error,", "probability": 0.96044921875}, {"start": 795.31, "end": 795.49, "word": " due", "probability": 0.71728515625}, {"start": 795.49, "end": 795.69, "word": " to", "probability": 0.9697265625}, {"start": 795.69, "end": 796.07, "word": " weakness", "probability": 0.92333984375}, {"start": 796.07, "end": 796.41, "word": " in", "probability": 0.943359375}, {"start": 796.41, "end": 796.75, "word": " question", "probability": 0.6708984375}, {"start": 796.75, "end": 797.13, "word": " design.", "probability": 0.96240234375}], "temperature": 1.0}, {"id": 31, "seek": 82652, "start": 798.22, "end": 826.52, "text": " So that's the type of survey errors. So one more time, average error, it means you exclude a group or groups from the frame. So in this case, suppose I excluded these from my frame. So I just select the sample from all of these except this portion, or these two groups.", "tokens": [407, 300, 311, 264, 2010, 295, 8984, 13603, 13, 407, 472, 544, 565, 11, 4274, 6713, 11, 309, 1355, 291, 33536, 257, 1594, 420, 3935, 490, 264, 3920, 13, 407, 294, 341, 1389, 11, 7297, 286, 29486, 613, 490, 452, 3920, 13, 407, 286, 445, 3048, 264, 6889, 490, 439, 295, 613, 3993, 341, 8044, 11, 420, 613, 732, 3935, 13], "avg_logprob": -0.21396169138531532, "compression_ratio": 1.6071428571428572, "no_speech_prob": 0.0, "words": [{"start": 798.22, "end": 798.5, "word": " So", "probability": 0.8505859375}, {"start": 798.5, "end": 798.92, "word": " that's", "probability": 0.880615234375}, {"start": 798.92, "end": 799.2, "word": " the", "probability": 0.90771484375}, {"start": 799.2, "end": 799.74, "word": " type", "probability": 0.75048828125}, {"start": 799.74, "end": 800.64, "word": " of", "probability": 0.92529296875}, {"start": 800.64, "end": 801.46, "word": " survey", "probability": 0.80126953125}, {"start": 801.46, "end": 802.56, "word": " errors.", "probability": 0.70751953125}, {"start": 803.7, "end": 803.94, "word": " So", "probability": 0.94140625}, {"start": 803.94, "end": 804.16, "word": " one", "probability": 0.77978515625}, {"start": 804.16, "end": 804.34, "word": " more", "probability": 0.93310546875}, {"start": 804.34, "end": 804.7, "word": " time,", "probability": 0.87548828125}, {"start": 805.18, "end": 805.6, "word": " average", "probability": 0.322021484375}, {"start": 805.6, "end": 805.98, "word": " error,", "probability": 0.79931640625}, {"start": 806.26, "end": 806.48, "word": " it", "probability": 0.7470703125}, {"start": 806.48, "end": 806.82, "word": " means", "probability": 0.93115234375}, {"start": 806.82, "end": 807.16, "word": " you", "probability": 0.9404296875}, {"start": 807.16, "end": 808.34, "word": " exclude", "probability": 0.86865234375}, {"start": 808.34, "end": 809.54, "word": " a", "probability": 0.381103515625}, {"start": 809.54, "end": 809.84, "word": " group", "probability": 0.96484375}, {"start": 809.84, "end": 810.04, "word": " or", "probability": 0.939453125}, {"start": 810.04, "end": 810.54, "word": " groups", "probability": 0.9208984375}, {"start": 810.54, "end": 812.12, "word": " from", "probability": 0.8623046875}, {"start": 812.12, "end": 812.32, "word": " the", "probability": 0.7353515625}, {"start": 812.32, "end": 812.6, "word": " frame.", "probability": 0.9013671875}, {"start": 813.06, "end": 813.28, "word": " So", "probability": 0.93994140625}, {"start": 813.28, "end": 813.76, "word": " in", "probability": 0.82763671875}, {"start": 813.76, "end": 813.92, "word": " this", "probability": 0.94482421875}, {"start": 813.92, "end": 814.12, "word": " case,", "probability": 0.91455078125}, {"start": 814.16, "end": 814.34, "word": " suppose", "probability": 0.861328125}, {"start": 814.34, "end": 814.54, "word": " I", "probability": 0.97216796875}, {"start": 814.54, "end": 815.08, "word": " excluded", "probability": 0.89013671875}, {"start": 815.08, "end": 815.52, "word": " these", "probability": 0.466064453125}, {"start": 815.52, "end": 816.28, "word": " from", "probability": 0.88037109375}, {"start": 816.28, "end": 816.48, "word": " my", "probability": 0.96630859375}, {"start": 816.48, "end": 816.82, "word": " frame.", "probability": 0.85009765625}, {"start": 817.6, "end": 817.8, "word": " So", "probability": 0.87451171875}, {"start": 817.8, "end": 817.96, "word": " I", "probability": 0.91357421875}, {"start": 817.96, "end": 819.36, "word": " just", "probability": 0.91943359375}, {"start": 819.36, "end": 819.88, "word": " select", "probability": 0.73779296875}, {"start": 819.88, "end": 820.1, "word": " the", "probability": 0.75244140625}, {"start": 820.1, "end": 820.38, "word": " sample", "probability": 0.64208984375}, {"start": 820.38, "end": 820.84, "word": " from", "probability": 0.8798828125}, {"start": 820.84, "end": 821.98, "word": " all", "probability": 0.9482421875}, {"start": 821.98, "end": 822.1, "word": " of", "probability": 0.9658203125}, {"start": 822.1, "end": 822.4, "word": " these", "probability": 0.69873046875}, {"start": 822.4, "end": 823.52, "word": " except", "probability": 0.6435546875}, {"start": 823.52, "end": 824.48, "word": " this", "probability": 0.9375}, {"start": 824.48, "end": 824.94, "word": " portion,", "probability": 0.89697265625}, {"start": 825.4, "end": 825.74, "word": " or", "probability": 0.6298828125}, {"start": 825.74, "end": 825.94, "word": " these", "probability": 0.837890625}, {"start": 825.94, "end": 826.16, "word": " two", "probability": 0.9326171875}, {"start": 826.16, "end": 826.52, "word": " groups.", "probability": 0.931640625}], "temperature": 1.0}, {"id": 32, "seek": 85192, "start": 827.06, "end": 851.92, "text": " Non-response error means you don't have follow-up on non-responses. Sampling error, random sample gives different sample statistics. So it means random differences from sample to sample. Final measurement error, bad or leading questions. This is one of the most important ones that you have to avoid.", "tokens": [8774, 12, 5667, 3739, 6713, 1355, 291, 500, 380, 362, 1524, 12, 1010, 322, 2107, 12, 28930, 279, 13, 4832, 11970, 6713, 11, 4974, 6889, 2709, 819, 6889, 12523, 13, 407, 309, 1355, 4974, 7300, 490, 6889, 281, 6889, 13, 13443, 13160, 6713, 11, 1578, 420, 5775, 1651, 13, 639, 307, 472, 295, 264, 881, 1021, 2306, 300, 291, 362, 281, 5042, 13], "avg_logprob": -0.20654297294095159, "compression_ratio": 1.601063829787234, "no_speech_prob": 0.0, "words": [{"start": 827.06, "end": 827.36, "word": " Non", "probability": 0.90625}, {"start": 827.36, "end": 827.86, "word": "-response", "probability": 0.8229166666666666}, {"start": 827.86, "end": 828.18, "word": " error", "probability": 0.7783203125}, {"start": 828.18, "end": 828.94, "word": " means", "probability": 0.828125}, {"start": 828.94, "end": 829.12, "word": " you", "probability": 0.9345703125}, {"start": 829.12, "end": 829.3, "word": " don't", "probability": 0.902587890625}, {"start": 829.3, "end": 829.5, "word": " have", "probability": 0.9326171875}, {"start": 829.5, "end": 829.68, "word": " follow", "probability": 0.7978515625}, {"start": 829.68, "end": 829.94, "word": "-up", "probability": 0.747314453125}, {"start": 829.94, "end": 830.28, "word": " on", "probability": 0.93408203125}, {"start": 830.28, "end": 830.62, "word": " non", "probability": 0.95947265625}, {"start": 830.62, "end": 831.28, "word": "-responses.", "probability": 0.8904622395833334}, {"start": 832.18, "end": 832.56, "word": " Sampling", "probability": 0.951171875}, {"start": 832.56, "end": 832.92, "word": " error,", "probability": 0.8583984375}, {"start": 834.06, "end": 834.5, "word": " random", "probability": 0.82177734375}, {"start": 834.5, "end": 835.9, "word": " sample", "probability": 0.6494140625}, {"start": 835.9, "end": 837.0, "word": " gives", "probability": 0.84228515625}, {"start": 837.0, "end": 837.56, "word": " different", "probability": 0.86962890625}, {"start": 837.56, "end": 838.0, "word": " sample", "probability": 0.76611328125}, {"start": 838.0, "end": 838.72, "word": " statistics.", "probability": 0.8662109375}, {"start": 839.04, "end": 839.16, "word": " So", "probability": 0.904296875}, {"start": 839.16, "end": 839.26, "word": " it", "probability": 0.82958984375}, {"start": 839.26, "end": 839.44, "word": " means", "probability": 0.9091796875}, {"start": 839.44, "end": 839.86, "word": " random", "probability": 0.84814453125}, {"start": 839.86, "end": 840.42, "word": " differences", "probability": 0.58740234375}, {"start": 840.42, "end": 840.82, "word": " from", "probability": 0.884765625}, {"start": 840.82, "end": 841.18, "word": " sample", "probability": 0.884765625}, {"start": 841.18, "end": 841.4, "word": " to", "probability": 0.9599609375}, {"start": 841.4, "end": 841.66, "word": " sample.", "probability": 0.89404296875}, {"start": 843.12, "end": 843.6, "word": " Final", "probability": 0.71923828125}, {"start": 843.6, "end": 844.0, "word": " measurement", "probability": 0.79150390625}, {"start": 844.0, "end": 844.46, "word": " error,", "probability": 0.88427734375}, {"start": 844.8, "end": 845.06, "word": " bad", "probability": 0.92236328125}, {"start": 845.06, "end": 845.34, "word": " or", "probability": 0.9599609375}, {"start": 845.34, "end": 845.76, "word": " leading", "probability": 0.96044921875}, {"start": 845.76, "end": 846.86, "word": " questions.", "probability": 0.148681640625}, {"start": 847.04, "end": 847.16, "word": " This", "probability": 0.7490234375}, {"start": 847.16, "end": 847.26, "word": " is", "probability": 0.927734375}, {"start": 847.26, "end": 847.5, "word": " one", "probability": 0.720703125}, {"start": 847.5, "end": 847.7, "word": " of", "probability": 0.9111328125}, {"start": 847.7, "end": 847.82, "word": " the", "probability": 0.892578125}, {"start": 847.82, "end": 848.14, "word": " most", "probability": 0.8916015625}, {"start": 848.14, "end": 849.02, "word": " important", "probability": 0.88818359375}, {"start": 849.02, "end": 849.26, "word": " ones", "probability": 0.325927734375}, {"start": 849.26, "end": 849.46, "word": " that", "probability": 0.8857421875}, {"start": 849.46, "end": 849.62, "word": " you", "probability": 0.947265625}, {"start": 849.62, "end": 849.92, "word": " have", "probability": 0.93701171875}, {"start": 849.92, "end": 850.98, "word": " to", "probability": 0.9599609375}, {"start": 850.98, "end": 851.92, "word": " avoid.", "probability": 0.9296875}], "temperature": 1.0}, {"id": 33, "seek": 87889, "start": 852.79, "end": 878.89, "text": " So that's the first part of this chapter, assembling techniques. Do you have any questions? Next, we'll talk about assembling distributions. So far, up to this point, I mean at the end of chapter 6, we discussed the probability", "tokens": [407, 300, 311, 264, 700, 644, 295, 341, 7187, 11, 43867, 7512, 13, 1144, 291, 362, 604, 1651, 30, 3087, 11, 321, 603, 751, 466, 43867, 37870, 13, 407, 1400, 11, 493, 281, 341, 935, 11, 286, 914, 412, 264, 917, 295, 7187, 1386, 11, 321, 7152, 264, 8482], "avg_logprob": -0.19749999344348906, "compression_ratio": 1.4074074074074074, "no_speech_prob": 0.0, "words": [{"start": 852.79, "end": 853.11, "word": " So", "probability": 0.8681640625}, {"start": 853.11, "end": 853.49, "word": " that's", "probability": 0.83447265625}, {"start": 853.49, "end": 853.69, "word": " the", "probability": 0.912109375}, {"start": 853.69, "end": 853.95, "word": " first", "probability": 0.8994140625}, {"start": 853.95, "end": 855.31, "word": " part", "probability": 0.67724609375}, {"start": 855.31, "end": 855.79, "word": " of", "probability": 0.95654296875}, {"start": 855.79, "end": 855.99, "word": " this", "probability": 0.92724609375}, {"start": 855.99, "end": 856.37, "word": " chapter,", "probability": 0.85302734375}, {"start": 857.09, "end": 857.39, "word": " assembling", "probability": 0.33935546875}, {"start": 857.39, "end": 858.03, "word": " techniques.", "probability": 0.91552734375}, {"start": 860.13, "end": 860.89, "word": " Do", "probability": 0.6865234375}, {"start": 860.89, "end": 860.91, "word": " you", "probability": 0.9677734375}, {"start": 860.91, "end": 861.03, "word": " have", "probability": 0.9541015625}, {"start": 861.03, "end": 861.15, "word": " any", "probability": 0.89892578125}, {"start": 861.15, "end": 861.43, "word": " questions?", "probability": 0.486572265625}, {"start": 863.39, "end": 863.97, "word": " Next,", "probability": 0.85888671875}, {"start": 864.05, "end": 864.19, "word": " we'll", "probability": 0.7568359375}, {"start": 864.19, "end": 864.41, "word": " talk", "probability": 0.8984375}, {"start": 864.41, "end": 864.99, "word": " about", "probability": 0.908203125}, {"start": 864.99, "end": 865.51, "word": " assembling", "probability": 0.8740234375}, {"start": 865.51, "end": 866.29, "word": " distributions.", "probability": 0.96533203125}, {"start": 867.91, "end": 868.33, "word": " So", "probability": 0.9560546875}, {"start": 868.33, "end": 868.73, "word": " far,", "probability": 0.94482421875}, {"start": 869.19, "end": 869.69, "word": " up", "probability": 0.96875}, {"start": 869.69, "end": 869.83, "word": " to", "probability": 0.9609375}, {"start": 869.83, "end": 870.05, "word": " this", "probability": 0.9453125}, {"start": 870.05, "end": 870.53, "word": " point,", "probability": 0.96337890625}, {"start": 872.25, "end": 872.55, "word": " I", "probability": 0.912109375}, {"start": 872.55, "end": 872.75, "word": " mean", "probability": 0.9638671875}, {"start": 872.75, "end": 873.07, "word": " at", "probability": 0.650390625}, {"start": 873.07, "end": 873.25, "word": " the", "probability": 0.9208984375}, {"start": 873.25, "end": 873.51, "word": " end", "probability": 0.892578125}, {"start": 873.51, "end": 873.77, "word": " of", "probability": 0.97119140625}, {"start": 873.77, "end": 874.11, "word": " chapter", "probability": 0.50390625}, {"start": 874.11, "end": 874.57, "word": " 6,", "probability": 0.5869140625}, {"start": 875.41, "end": 875.69, "word": " we", "probability": 0.9580078125}, {"start": 875.69, "end": 876.37, "word": " discussed", "probability": 0.86083984375}, {"start": 876.37, "end": 878.43, "word": " the", "probability": 0.765625}, {"start": 878.43, "end": 878.89, "word": " probability", "probability": 0.94873046875}], "temperature": 1.0}, {"id": 34, "seek": 90820, "start": 879.62, "end": 908.2, "text": " For example, of computing X greater than, for example, 7. For example, suppose X represents your score in business statistics course. And suppose we know that X is normally distributed with mean of 80, standard deviation of 10.", "tokens": [1171, 1365, 11, 295, 15866, 1783, 5044, 813, 11, 337, 1365, 11, 1614, 13, 1171, 1365, 11, 7297, 1783, 8855, 428, 6175, 294, 1606, 12523, 1164, 13, 400, 7297, 321, 458, 300, 1783, 307, 5646, 12631, 365, 914, 295, 4688, 11, 3832, 25163, 295, 1266, 13], "avg_logprob": -0.22174202127659576, "compression_ratio": 1.4522292993630572, "no_speech_prob": 0.0, "words": [{"start": 879.62, "end": 879.94, "word": " For", "probability": 0.4375}, {"start": 879.94, "end": 880.42, "word": " example,", "probability": 0.9658203125}, {"start": 880.68, "end": 880.84, "word": " of", "probability": 0.4443359375}, {"start": 880.84, "end": 881.5, "word": " computing", "probability": 0.8486328125}, {"start": 881.5, "end": 882.08, "word": " X", "probability": 0.5078125}, {"start": 882.08, "end": 884.0, "word": " greater", "probability": 0.5263671875}, {"start": 884.0, "end": 884.32, "word": " than,", "probability": 0.943359375}, {"start": 884.38, "end": 884.5, "word": " for", "probability": 0.94384765625}, {"start": 884.5, "end": 884.82, "word": " example,", "probability": 0.97314453125}, {"start": 884.96, "end": 885.22, "word": " 7.", "probability": 0.525390625}, {"start": 886.22, "end": 886.58, "word": " For", "probability": 0.92431640625}, {"start": 886.58, "end": 886.8, "word": " example,", "probability": 0.970703125}, {"start": 886.88, "end": 887.32, "word": " suppose", "probability": 0.9091796875}, {"start": 887.32, "end": 888.22, "word": " X", "probability": 0.8916015625}, {"start": 888.22, "end": 889.2, "word": " represents", "probability": 0.85400390625}, {"start": 889.2, "end": 891.32, "word": " your", "probability": 0.888671875}, {"start": 891.32, "end": 892.98, "word": " score", "probability": 0.84814453125}, {"start": 892.98, "end": 893.26, "word": " in", "probability": 0.9375}, {"start": 893.26, "end": 893.76, "word": " business", "probability": 0.69384765625}, {"start": 893.76, "end": 894.42, "word": " statistics", "probability": 0.44384765625}, {"start": 894.42, "end": 895.02, "word": " course.", "probability": 0.927734375}, {"start": 897.68, "end": 898.04, "word": " And", "probability": 0.92138671875}, {"start": 898.04, "end": 898.38, "word": " suppose", "probability": 0.8740234375}, {"start": 898.38, "end": 898.54, "word": " we", "probability": 0.6455078125}, {"start": 898.54, "end": 898.68, "word": " know", "probability": 0.8876953125}, {"start": 898.68, "end": 899.12, "word": " that", "probability": 0.931640625}, {"start": 899.12, "end": 900.04, "word": " X", "probability": 0.96630859375}, {"start": 900.04, "end": 901.52, "word": " is", "probability": 0.94580078125}, {"start": 901.52, "end": 901.94, "word": " normally", "probability": 0.9091796875}, {"start": 901.94, "end": 902.68, "word": " distributed", "probability": 0.89794921875}, {"start": 902.68, "end": 903.52, "word": " with", "probability": 0.84765625}, {"start": 903.52, "end": 903.92, "word": " mean", "probability": 0.966796875}, {"start": 903.92, "end": 905.16, "word": " of", "probability": 0.951171875}, {"start": 905.16, "end": 905.76, "word": " 80,", "probability": 0.8935546875}, {"start": 906.86, "end": 907.32, "word": " standard", "probability": 0.82958984375}, {"start": 907.32, "end": 907.7, "word": " deviation", "probability": 0.89697265625}, {"start": 907.7, "end": 907.94, "word": " of", "probability": 0.95068359375}, {"start": 907.94, "end": 908.2, "word": " 10.", "probability": 0.94677734375}], "temperature": 1.0}, {"id": 35, "seek": 93350, "start": 910.54, "end": 933.5, "text": " My question was, in chapter 6, what's the probability that the student scores more than 70? Suppose we select randomly one student, and the question is, what's the probability that his score, so just for one individual, for one student, his score is above 70?", "tokens": [1222, 1168, 390, 11, 294, 7187, 1386, 11, 437, 311, 264, 8482, 300, 264, 3107, 13444, 544, 813, 5285, 30, 21360, 321, 3048, 16979, 472, 3107, 11, 293, 264, 1168, 307, 11, 437, 311, 264, 8482, 300, 702, 6175, 11, 370, 445, 337, 472, 2609, 11, 337, 472, 3107, 11, 702, 6175, 307, 3673, 5285, 30], "avg_logprob": -0.1640625, "compression_ratio": 1.6774193548387097, "no_speech_prob": 0.0, "words": [{"start": 910.54, "end": 910.86, "word": " My", "probability": 0.78271484375}, {"start": 910.86, "end": 911.3, "word": " question", "probability": 0.9189453125}, {"start": 911.3, "end": 911.76, "word": " was,", "probability": 0.94140625}, {"start": 912.28, "end": 912.62, "word": " in", "probability": 0.822265625}, {"start": 912.62, "end": 912.82, "word": " chapter", "probability": 0.513671875}, {"start": 912.82, "end": 913.32, "word": " 6,", "probability": 0.66162109375}, {"start": 914.4, "end": 914.82, "word": " what's", "probability": 0.810546875}, {"start": 914.82, "end": 914.96, "word": " the", "probability": 0.90771484375}, {"start": 914.96, "end": 915.36, "word": " probability", "probability": 0.9482421875}, {"start": 915.36, "end": 915.82, "word": " that", "probability": 0.9267578125}, {"start": 915.82, "end": 917.06, "word": " the", "probability": 0.84033203125}, {"start": 917.06, "end": 917.58, "word": " student", "probability": 0.94091796875}, {"start": 917.58, "end": 918.54, "word": " scores", "probability": 0.826171875}, {"start": 918.54, "end": 919.92, "word": " more", "probability": 0.93359375}, {"start": 919.92, "end": 920.12, "word": " than", "probability": 0.9541015625}, {"start": 920.12, "end": 920.46, "word": " 70?", "probability": 0.9580078125}, {"start": 922.54, "end": 923.22, "word": " Suppose", "probability": 0.7119140625}, {"start": 923.22, "end": 923.38, "word": " we", "probability": 0.896484375}, {"start": 923.38, "end": 923.74, "word": " select", "probability": 0.85595703125}, {"start": 923.74, "end": 924.28, "word": " randomly", "probability": 0.83154296875}, {"start": 924.28, "end": 924.5, "word": " one", "probability": 0.8857421875}, {"start": 924.5, "end": 924.86, "word": " student,", "probability": 0.96337890625}, {"start": 925.8, "end": 926.06, "word": " and", "probability": 0.9189453125}, {"start": 926.06, "end": 926.18, "word": " the", "probability": 0.921875}, {"start": 926.18, "end": 926.46, "word": " question", "probability": 0.9208984375}, {"start": 926.46, "end": 926.72, "word": " is,", "probability": 0.94921875}, {"start": 926.84, "end": 927.1, "word": " what's", "probability": 0.86328125}, {"start": 927.1, "end": 927.2, "word": " the", "probability": 0.91357421875}, {"start": 927.2, "end": 927.54, "word": " probability", "probability": 0.95361328125}, {"start": 927.54, "end": 928.06, "word": " that", "probability": 0.9365234375}, {"start": 928.06, "end": 928.48, "word": " his", "probability": 0.9375}, {"start": 928.48, "end": 929.04, "word": " score,", "probability": 0.89892578125}, {"start": 929.44, "end": 929.6, "word": " so", "probability": 0.4814453125}, {"start": 929.6, "end": 929.8, "word": " just", "probability": 0.876953125}, {"start": 929.8, "end": 929.98, "word": " for", "probability": 0.9306640625}, {"start": 929.98, "end": 930.16, "word": " one", "probability": 0.92822265625}, {"start": 930.16, "end": 930.62, "word": " individual,", "probability": 0.92138671875}, {"start": 930.72, "end": 930.86, "word": " for", "probability": 0.7998046875}, {"start": 930.86, "end": 931.0, "word": " one", "probability": 0.92626953125}, {"start": 931.0, "end": 931.38, "word": " student,", "probability": 0.9599609375}, {"start": 932.02, "end": 932.3, "word": " his", "probability": 0.94140625}, {"start": 932.3, "end": 932.6, "word": " score", "probability": 0.90087890625}, {"start": 932.6, "end": 932.76, "word": " is", "probability": 0.9423828125}, {"start": 932.76, "end": 933.04, "word": " above", "probability": 0.96826171875}, {"start": 933.04, "end": 933.5, "word": " 70?", "probability": 0.96435546875}], "temperature": 1.0}, {"id": 36, "seek": 95735, "start": 935.67, "end": 957.35, "text": " In that case, if you remember, we transform from normal distribution to standard normal distribution by using this equation, which is x minus the mean divided by sigma. The mean, this one, it means the mean of x. And sigma is also x.", "tokens": [682, 300, 1389, 11, 498, 291, 1604, 11, 321, 4088, 490, 2710, 7316, 281, 3832, 2710, 7316, 538, 1228, 341, 5367, 11, 597, 307, 2031, 3175, 264, 914, 6666, 538, 12771, 13, 440, 914, 11, 341, 472, 11, 309, 1355, 264, 914, 295, 2031, 13, 400, 12771, 307, 611, 2031, 13], "avg_logprob": -0.2579627501276823, "compression_ratio": 1.56, "no_speech_prob": 0.0, "words": [{"start": 935.67, "end": 935.89, "word": " In", "probability": 0.294677734375}, {"start": 935.89, "end": 936.07, "word": " that", "probability": 0.8515625}, {"start": 936.07, "end": 936.29, "word": " case,", "probability": 0.921875}, {"start": 936.39, "end": 936.47, "word": " if", "probability": 0.64990234375}, {"start": 936.47, "end": 936.51, "word": " you", "probability": 0.90380859375}, {"start": 936.51, "end": 936.77, "word": " remember,", "probability": 0.86181640625}, {"start": 936.87, "end": 936.99, "word": " we", "probability": 0.9423828125}, {"start": 936.99, "end": 937.55, "word": " transform", "probability": 0.537109375}, {"start": 937.55, "end": 937.85, "word": " from", "probability": 0.42919921875}, {"start": 937.85, "end": 938.13, "word": " normal", "probability": 0.4111328125}, {"start": 938.13, "end": 938.79, "word": " distribution", "probability": 0.78662109375}, {"start": 938.79, "end": 939.15, "word": " to", "probability": 0.411865234375}, {"start": 939.15, "end": 940.81, "word": " standard", "probability": 0.85791015625}, {"start": 940.81, "end": 941.25, "word": " normal", "probability": 0.76904296875}, {"start": 941.25, "end": 941.87, "word": " distribution", "probability": 0.83154296875}, {"start": 941.87, "end": 942.21, "word": " by", "probability": 0.8740234375}, {"start": 942.21, "end": 942.65, "word": " using", "probability": 0.92578125}, {"start": 942.65, "end": 944.25, "word": " this", "probability": 0.935546875}, {"start": 944.25, "end": 944.77, "word": " equation,", "probability": 0.97607421875}, {"start": 945.03, "end": 945.05, "word": " which", "probability": 0.7841796875}, {"start": 945.05, "end": 945.19, "word": " is", "probability": 0.94775390625}, {"start": 945.19, "end": 945.63, "word": " x", "probability": 0.56982421875}, {"start": 945.63, "end": 946.93, "word": " minus", "probability": 0.94921875}, {"start": 946.93, "end": 947.17, "word": " the", "probability": 0.80029296875}, {"start": 947.17, "end": 947.43, "word": " mean", "probability": 0.982421875}, {"start": 947.43, "end": 948.17, "word": " divided", "probability": 0.638671875}, {"start": 948.17, "end": 948.49, "word": " by", "probability": 0.978515625}, {"start": 948.49, "end": 948.77, "word": " sigma.", "probability": 0.86181640625}, {"start": 950.45, "end": 951.01, "word": " The", "probability": 0.837890625}, {"start": 951.01, "end": 951.29, "word": " mean,", "probability": 0.9580078125}, {"start": 951.43, "end": 951.61, "word": " this", "probability": 0.896484375}, {"start": 951.61, "end": 951.91, "word": " one,", "probability": 0.9189453125}, {"start": 952.51, "end": 952.79, "word": " it", "probability": 0.77099609375}, {"start": 952.79, "end": 953.01, "word": " means", "probability": 0.91259765625}, {"start": 953.01, "end": 953.17, "word": " the", "probability": 0.84814453125}, {"start": 953.17, "end": 953.27, "word": " mean", "probability": 0.90771484375}, {"start": 953.27, "end": 953.41, "word": " of", "probability": 0.96875}, {"start": 953.41, "end": 953.77, "word": " x.", "probability": 0.95263671875}, {"start": 955.83, "end": 956.09, "word": " And", "probability": 0.9287109375}, {"start": 956.09, "end": 956.39, "word": " sigma", "probability": 0.91064453125}, {"start": 956.39, "end": 956.63, "word": " is", "probability": 0.95556640625}, {"start": 956.63, "end": 956.93, "word": " also", "probability": 0.87451171875}, {"start": 956.93, "end": 957.35, "word": " x.", "probability": 0.53369140625}], "temperature": 1.0}, {"id": 37, "seek": 98726, "start": 959.06, "end": 987.26, "text": " Now suppose instead of saying what's the probability that a selected student scores more than 70 or above 70, suppose we select a random sample of 20 or whatever it is, 20 students from this class, and I'm interested in the probability that the average score of these 20 students is above 70.", "tokens": [823, 7297, 2602, 295, 1566, 437, 311, 264, 8482, 300, 257, 8209, 3107, 13444, 544, 813, 5285, 420, 3673, 5285, 11, 7297, 321, 3048, 257, 4974, 6889, 295, 945, 420, 2035, 309, 307, 11, 945, 1731, 490, 341, 1508, 11, 293, 286, 478, 3102, 294, 264, 8482, 300, 264, 4274, 6175, 295, 613, 945, 1731, 307, 3673, 5285, 13], "avg_logprob": -0.18841145311792692, "compression_ratio": 1.646067415730337, "no_speech_prob": 0.0, "words": [{"start": 959.06, "end": 959.44, "word": " Now", "probability": 0.88720703125}, {"start": 959.44, "end": 960.02, "word": " suppose", "probability": 0.51904296875}, {"start": 960.02, "end": 960.58, "word": " instead", "probability": 0.6455078125}, {"start": 960.58, "end": 960.88, "word": " of", "probability": 0.96923828125}, {"start": 960.88, "end": 961.36, "word": " saying", "probability": 0.70849609375}, {"start": 961.36, "end": 961.8, "word": " what's", "probability": 0.747314453125}, {"start": 961.8, "end": 961.92, "word": " the", "probability": 0.8369140625}, {"start": 961.92, "end": 962.3, "word": " probability", "probability": 0.947265625}, {"start": 962.3, "end": 962.64, "word": " that", "probability": 0.87548828125}, {"start": 962.64, "end": 962.84, "word": " a", "probability": 0.68359375}, {"start": 962.84, "end": 963.2, "word": " selected", "probability": 0.9296875}, {"start": 963.2, "end": 963.82, "word": " student", "probability": 0.94189453125}, {"start": 963.82, "end": 964.48, "word": " scores", "probability": 0.82373046875}, {"start": 964.48, "end": 965.62, "word": " more", "probability": 0.92626953125}, {"start": 965.62, "end": 965.82, "word": " than", "probability": 0.95361328125}, {"start": 965.82, "end": 966.42, "word": " 70", "probability": 0.8740234375}, {"start": 966.42, "end": 966.94, "word": " or", "probability": 0.79296875}, {"start": 966.94, "end": 967.2, "word": " above", "probability": 0.96044921875}, {"start": 967.2, "end": 967.7, "word": " 70,", "probability": 0.96630859375}, {"start": 968.42, "end": 969.18, "word": " suppose", "probability": 0.8837890625}, {"start": 969.18, "end": 969.4, "word": " we", "probability": 0.9443359375}, {"start": 969.4, "end": 969.8, "word": " select", "probability": 0.84619140625}, {"start": 969.8, "end": 969.98, "word": " a", "probability": 0.923828125}, {"start": 969.98, "end": 970.2, "word": " random", "probability": 0.8984375}, {"start": 970.2, "end": 970.64, "word": " sample", "probability": 0.92236328125}, {"start": 970.64, "end": 971.1, "word": " of", "probability": 0.95654296875}, {"start": 971.1, "end": 971.86, "word": " 20", "probability": 0.720703125}, {"start": 971.86, "end": 972.64, "word": " or", "probability": 0.544921875}, {"start": 972.64, "end": 972.98, "word": " whatever", "probability": 0.95166015625}, {"start": 972.98, "end": 973.24, "word": " it", "probability": 0.7607421875}, {"start": 973.24, "end": 973.34, "word": " is,", "probability": 0.9443359375}, {"start": 973.4, "end": 973.76, "word": " 20", "probability": 0.873046875}, {"start": 973.76, "end": 974.62, "word": " students", "probability": 0.9765625}, {"start": 974.62, "end": 975.22, "word": " from", "probability": 0.87353515625}, {"start": 975.22, "end": 975.5, "word": " this", "probability": 0.93896484375}, {"start": 975.5, "end": 976.06, "word": " class,", "probability": 0.96826171875}, {"start": 977.06, "end": 977.32, "word": " and", "probability": 0.9345703125}, {"start": 977.32, "end": 977.54, "word": " I'm", "probability": 0.7205810546875}, {"start": 977.54, "end": 977.98, "word": " interested", "probability": 0.86572265625}, {"start": 977.98, "end": 979.3, "word": " in", "probability": 0.376220703125}, {"start": 979.3, "end": 979.46, "word": " the", "probability": 0.900390625}, {"start": 979.46, "end": 979.94, "word": " probability", "probability": 0.9482421875}, {"start": 979.94, "end": 980.36, "word": " that", "probability": 0.93017578125}, {"start": 980.36, "end": 981.24, "word": " the", "probability": 0.884765625}, {"start": 981.24, "end": 981.72, "word": " average", "probability": 0.78515625}, {"start": 981.72, "end": 983.24, "word": " score", "probability": 0.88623046875}, {"start": 983.24, "end": 983.72, "word": " of", "probability": 0.95556640625}, {"start": 983.72, "end": 984.02, "word": " these", "probability": 0.77587890625}, {"start": 984.02, "end": 984.6, "word": " 20", "probability": 0.9443359375}, {"start": 984.6, "end": 985.26, "word": " students", "probability": 0.9775390625}, {"start": 985.26, "end": 986.48, "word": " is", "probability": 0.9375}, {"start": 986.48, "end": 986.8, "word": " above", "probability": 0.9619140625}, {"start": 986.8, "end": 987.26, "word": " 70.", "probability": 0.9765625}], "temperature": 1.0}, {"id": 38, "seek": 101301, "start": 989.23, "end": 1013.01, "text": " Now look at the difference between two portions. First one, we select a student randomly, and we are asking about what's the probability that this selected student can score above 70. The other one, we select a random sample of size 20.", "tokens": [823, 574, 412, 264, 2649, 1296, 732, 25070, 13, 2386, 472, 11, 321, 3048, 257, 3107, 16979, 11, 293, 321, 366, 3365, 466, 437, 311, 264, 8482, 300, 341, 8209, 3107, 393, 6175, 3673, 5285, 13, 440, 661, 472, 11, 321, 3048, 257, 4974, 6889, 295, 2744, 945, 13], "avg_logprob": -0.26734373569488523, "compression_ratio": 1.4451219512195121, "no_speech_prob": 0.0, "words": [{"start": 989.23, "end": 989.47, "word": " Now", "probability": 0.57275390625}, {"start": 989.47, "end": 989.65, "word": " look", "probability": 0.75390625}, {"start": 989.65, "end": 989.77, "word": " at", "probability": 0.96728515625}, {"start": 989.77, "end": 989.89, "word": " the", "probability": 0.9189453125}, {"start": 989.89, "end": 990.23, "word": " difference", "probability": 0.82861328125}, {"start": 990.23, "end": 990.53, "word": " between", "probability": 0.8828125}, {"start": 990.53, "end": 990.73, "word": " two", "probability": 0.8720703125}, {"start": 990.73, "end": 991.13, "word": " portions.", "probability": 0.368896484375}, {"start": 992.19, "end": 992.71, "word": " First", "probability": 0.8369140625}, {"start": 992.71, "end": 993.39, "word": " one,", "probability": 0.81201171875}, {"start": 993.93, "end": 994.23, "word": " we", "probability": 0.90625}, {"start": 994.23, "end": 994.61, "word": " select", "probability": 0.83447265625}, {"start": 994.61, "end": 994.85, "word": " a", "probability": 0.95654296875}, {"start": 994.85, "end": 995.09, "word": " student", "probability": 0.96142578125}, {"start": 995.09, "end": 995.55, "word": " randomly,", "probability": 0.8642578125}, {"start": 996.17, "end": 996.71, "word": " and", "probability": 0.90625}, {"start": 996.71, "end": 996.85, "word": " we", "probability": 0.93994140625}, {"start": 996.85, "end": 996.99, "word": " are", "probability": 0.78125}, {"start": 996.99, "end": 997.37, "word": " asking", "probability": 0.8427734375}, {"start": 997.37, "end": 997.73, "word": " about", "probability": 0.8818359375}, {"start": 997.73, "end": 998.19, "word": " what's", "probability": 0.681396484375}, {"start": 998.19, "end": 998.29, "word": " the", "probability": 0.91650390625}, {"start": 998.29, "end": 998.69, "word": " probability", "probability": 0.94580078125}, {"start": 998.69, "end": 999.09, "word": " that", "probability": 0.923828125}, {"start": 999.09, "end": 999.45, "word": " this", "probability": 0.91748046875}, {"start": 999.45, "end": 999.95, "word": " selected", "probability": 0.876953125}, {"start": 999.95, "end": 1000.51, "word": " student", "probability": 0.95849609375}, {"start": 1000.51, "end": 1001.85, "word": " can", "probability": 0.154296875}, {"start": 1001.85, "end": 1003.49, "word": " score", "probability": 0.880859375}, {"start": 1003.49, "end": 1004.29, "word": " above", "probability": 0.94189453125}, {"start": 1004.29, "end": 1004.89, "word": " 70.", "probability": 0.90673828125}, {"start": 1005.69, "end": 1006.03, "word": " The", "probability": 0.77587890625}, {"start": 1006.03, "end": 1006.25, "word": " other", "probability": 0.8896484375}, {"start": 1006.25, "end": 1006.57, "word": " one,", "probability": 0.9306640625}, {"start": 1008.01, "end": 1011.05, "word": " we", "probability": 0.44140625}, {"start": 1011.05, "end": 1011.43, "word": " select", "probability": 0.798828125}, {"start": 1011.43, "end": 1011.59, "word": " a", "probability": 0.95654296875}, {"start": 1011.59, "end": 1011.75, "word": " random", "probability": 0.86865234375}, {"start": 1011.75, "end": 1012.07, "word": " sample", "probability": 0.85693359375}, {"start": 1012.07, "end": 1012.25, "word": " of", "probability": 0.921875}, {"start": 1012.25, "end": 1012.53, "word": " size", "probability": 0.865234375}, {"start": 1012.53, "end": 1013.01, "word": " 20.", "probability": 0.9013671875}], "temperature": 1.0}, {"id": 39, "seek": 104072, "start": 1014.02, "end": 1040.72, "text": " And we are interested on the probability that the average of these scores is above 70. And again, suppose X is normally distributed with mean 80, sigma is 10. It makes sense that we have to transform again from normal distribution, standardized normal distribution, by using exactly the same technique.", "tokens": [400, 321, 366, 3102, 322, 264, 8482, 300, 264, 4274, 295, 613, 13444, 307, 3673, 5285, 13, 400, 797, 11, 7297, 1783, 307, 5646, 12631, 365, 914, 4688, 11, 12771, 307, 1266, 13, 467, 1669, 2020, 300, 321, 362, 281, 4088, 797, 490, 2710, 7316, 11, 31677, 2710, 7316, 11, 538, 1228, 2293, 264, 912, 6532, 13], "avg_logprob": -0.17025861555132374, "compression_ratio": 1.5538461538461539, "no_speech_prob": 0.0, "words": [{"start": 1014.02, "end": 1014.36, "word": " And", "probability": 0.8203125}, {"start": 1014.36, "end": 1014.58, "word": " we", "probability": 0.9296875}, {"start": 1014.58, "end": 1014.76, "word": " are", "probability": 0.92919921875}, {"start": 1014.76, "end": 1015.28, "word": " interested", "probability": 0.86279296875}, {"start": 1015.28, "end": 1015.96, "word": " on", "probability": 0.432373046875}, {"start": 1015.96, "end": 1016.1, "word": " the", "probability": 0.9033203125}, {"start": 1016.1, "end": 1016.52, "word": " probability", "probability": 0.9443359375}, {"start": 1016.52, "end": 1016.88, "word": " that", "probability": 0.92626953125}, {"start": 1016.88, "end": 1017.12, "word": " the", "probability": 0.89697265625}, {"start": 1017.12, "end": 1017.58, "word": " average", "probability": 0.78955078125}, {"start": 1017.58, "end": 1017.94, "word": " of", "probability": 0.958984375}, {"start": 1017.94, "end": 1018.74, "word": " these", "probability": 0.7900390625}, {"start": 1018.74, "end": 1020.44, "word": " scores", "probability": 0.79345703125}, {"start": 1020.44, "end": 1021.4, "word": " is", "probability": 0.93505859375}, {"start": 1021.4, "end": 1021.6, "word": " above", "probability": 0.85888671875}, {"start": 1021.6, "end": 1022.0, "word": " 70.", "probability": 0.916015625}, {"start": 1024.9, "end": 1025.62, "word": " And", "probability": 0.9345703125}, {"start": 1025.62, "end": 1025.94, "word": " again,", "probability": 0.91943359375}, {"start": 1026.02, "end": 1026.62, "word": " suppose", "probability": 0.912109375}, {"start": 1026.62, "end": 1027.42, "word": " X", "probability": 0.5478515625}, {"start": 1027.42, "end": 1027.78, "word": " is", "probability": 0.955078125}, {"start": 1027.78, "end": 1028.16, "word": " normally", "probability": 0.89111328125}, {"start": 1028.16, "end": 1028.82, "word": " distributed", "probability": 0.9189453125}, {"start": 1028.82, "end": 1029.08, "word": " with", "probability": 0.82666015625}, {"start": 1029.08, "end": 1029.28, "word": " mean", "probability": 0.5927734375}, {"start": 1029.28, "end": 1029.66, "word": " 80,", "probability": 0.87548828125}, {"start": 1029.84, "end": 1030.1, "word": " sigma", "probability": 0.72607421875}, {"start": 1030.1, "end": 1030.36, "word": " is", "probability": 0.93408203125}, {"start": 1030.36, "end": 1030.72, "word": " 10.", "probability": 0.79736328125}, {"start": 1031.92, "end": 1032.16, "word": " It", "probability": 0.94580078125}, {"start": 1032.16, "end": 1032.44, "word": " makes", "probability": 0.8271484375}, {"start": 1032.44, "end": 1032.7, "word": " sense", "probability": 0.818359375}, {"start": 1032.7, "end": 1032.96, "word": " that", "probability": 0.92919921875}, {"start": 1032.96, "end": 1033.16, "word": " we", "probability": 0.9384765625}, {"start": 1033.16, "end": 1033.36, "word": " have", "probability": 0.92431640625}, {"start": 1033.36, "end": 1033.48, "word": " to", "probability": 0.96728515625}, {"start": 1033.48, "end": 1034.08, "word": " transform", "probability": 0.94189453125}, {"start": 1034.08, "end": 1034.48, "word": " again", "probability": 0.87548828125}, {"start": 1034.48, "end": 1034.88, "word": " from", "probability": 0.84423828125}, {"start": 1034.88, "end": 1035.52, "word": " normal", "probability": 0.62109375}, {"start": 1035.52, "end": 1036.18, "word": " distribution,", "probability": 0.8603515625}, {"start": 1037.28, "end": 1037.54, "word": " standardized", "probability": 0.8486328125}, {"start": 1037.54, "end": 1037.98, "word": " normal", "probability": 0.8740234375}, {"start": 1037.98, "end": 1038.58, "word": " distribution,", "probability": 0.87744140625}, {"start": 1038.94, "end": 1039.2, "word": " by", "probability": 0.9619140625}, {"start": 1039.2, "end": 1039.48, "word": " using", "probability": 0.93701171875}, {"start": 1039.48, "end": 1039.88, "word": " exactly", "probability": 0.8857421875}, {"start": 1039.88, "end": 1040.08, "word": " the", "probability": 0.9189453125}, {"start": 1040.08, "end": 1040.28, "word": " same", "probability": 0.91015625}, {"start": 1040.28, "end": 1040.72, "word": " technique.", "probability": 0.94091796875}], "temperature": 1.0}, {"id": 40, "seek": 104436, "start": 1042.11, "end": 1044.37, "text": "The same z-score but", "tokens": [2278, 912, 710, 12, 4417, 418, 457], "avg_logprob": -0.65283203125, "compression_ratio": 0.7142857142857143, "no_speech_prob": 0.0, "words": [{"start": 1042.11, "end": 1042.27, "word": "The", "probability": 0.2294921875}, {"start": 1042.27, "end": 1042.65, "word": " same", "probability": 0.86376953125}, {"start": 1042.65, "end": 1042.99, "word": " z", "probability": 0.366943359375}, {"start": 1042.99, "end": 1043.63, "word": "-score", "probability": 0.6234537760416666}, {"start": 1043.63, "end": 1044.37, "word": " but", "probability": 0.6611328125}], "temperature": 1.0}, {"id": 41, "seek": 107477, "start": 1046.25, "end": 1074.77, "text": " with different statistics. Here we use x, just x, the score of the student. Now we have to use something other called x bar. I'm interested in the average of this. So x bar minus the mean of not x, x bar, then divided by sigma x bar. So this is my new, the score.", "tokens": [365, 819, 12523, 13, 1692, 321, 764, 2031, 11, 445, 2031, 11, 264, 6175, 295, 264, 3107, 13, 823, 321, 362, 281, 764, 746, 661, 1219, 2031, 2159, 13, 286, 478, 3102, 294, 264, 4274, 295, 341, 13, 407, 2031, 2159, 3175, 264, 914, 295, 406, 2031, 11, 2031, 2159, 11, 550, 6666, 538, 12771, 2031, 2159, 13, 407, 341, 307, 452, 777, 11, 264, 6175, 13], "avg_logprob": -0.2626378742211005, "compression_ratio": 1.5348837209302326, "no_speech_prob": 0.0, "words": [{"start": 1046.25, "end": 1046.59, "word": " with", "probability": 0.359130859375}, {"start": 1046.59, "end": 1047.09, "word": " different", "probability": 0.81396484375}, {"start": 1047.09, "end": 1047.65, "word": " statistics.", "probability": 0.701171875}, {"start": 1048.63, "end": 1048.87, "word": " Here", "probability": 0.7470703125}, {"start": 1048.87, "end": 1049.03, "word": " we", "probability": 0.76953125}, {"start": 1049.03, "end": 1049.33, "word": " use", "probability": 0.77294921875}, {"start": 1049.33, "end": 1049.69, "word": " x,", "probability": 0.4580078125}, {"start": 1049.75, "end": 1049.95, "word": " just", "probability": 0.82568359375}, {"start": 1049.95, "end": 1050.17, "word": " x,", "probability": 0.97607421875}, {"start": 1050.27, "end": 1050.37, "word": " the", "probability": 0.8603515625}, {"start": 1050.37, "end": 1050.61, "word": " score", "probability": 0.759765625}, {"start": 1050.61, "end": 1050.81, "word": " of", "probability": 0.84326171875}, {"start": 1050.81, "end": 1051.19, "word": " the", "probability": 0.861328125}, {"start": 1051.19, "end": 1051.69, "word": " student.", "probability": 0.82861328125}, {"start": 1052.25, "end": 1052.51, "word": " Now", "probability": 0.9482421875}, {"start": 1052.51, "end": 1052.65, "word": " we", "probability": 0.8291015625}, {"start": 1052.65, "end": 1052.83, "word": " have", "probability": 0.94091796875}, {"start": 1052.83, "end": 1052.97, "word": " to", "probability": 0.96533203125}, {"start": 1052.97, "end": 1053.27, "word": " use", "probability": 0.8837890625}, {"start": 1053.27, "end": 1053.89, "word": " something", "probability": 0.85009765625}, {"start": 1053.89, "end": 1054.97, "word": " other", "probability": 0.66357421875}, {"start": 1054.97, "end": 1055.55, "word": " called", "probability": 0.188720703125}, {"start": 1055.55, "end": 1055.77, "word": " x", "probability": 0.82177734375}, {"start": 1055.77, "end": 1055.99, "word": " bar.", "probability": 0.67236328125}, {"start": 1057.03, "end": 1057.63, "word": " I'm", "probability": 0.876953125}, {"start": 1057.63, "end": 1057.91, "word": " interested", "probability": 0.8701171875}, {"start": 1057.91, "end": 1058.05, "word": " in", "probability": 0.94140625}, {"start": 1058.05, "end": 1058.17, "word": " the", "probability": 0.9169921875}, {"start": 1058.17, "end": 1058.47, "word": " average", "probability": 0.80126953125}, {"start": 1058.47, "end": 1058.69, "word": " of", "probability": 0.9580078125}, {"start": 1058.69, "end": 1058.99, "word": " this.", "probability": 0.56689453125}, {"start": 1060.03, "end": 1060.29, "word": " So", "probability": 0.89013671875}, {"start": 1060.29, "end": 1061.59, "word": " x", "probability": 0.576171875}, {"start": 1061.59, "end": 1061.87, "word": " bar", "probability": 0.92431640625}, {"start": 1061.87, "end": 1062.33, "word": " minus", "probability": 0.98291015625}, {"start": 1062.33, "end": 1063.51, "word": " the", "probability": 0.85888671875}, {"start": 1063.51, "end": 1063.75, "word": " mean", "probability": 0.9453125}, {"start": 1063.75, "end": 1064.13, "word": " of", "probability": 0.9541015625}, {"start": 1064.13, "end": 1065.31, "word": " not", "probability": 0.76171875}, {"start": 1065.31, "end": 1065.73, "word": " x,", "probability": 0.98876953125}, {"start": 1065.99, "end": 1066.45, "word": " x", "probability": 0.90576171875}, {"start": 1066.45, "end": 1066.77, "word": " bar,", "probability": 0.943359375}, {"start": 1067.55, "end": 1067.83, "word": " then", "probability": 0.78125}, {"start": 1067.83, "end": 1068.19, "word": " divided", "probability": 0.475830078125}, {"start": 1068.19, "end": 1068.65, "word": " by", "probability": 0.97216796875}, {"start": 1068.65, "end": 1069.55, "word": " sigma", "probability": 0.86181640625}, {"start": 1069.55, "end": 1069.83, "word": " x", "probability": 0.349609375}, {"start": 1069.83, "end": 1069.95, "word": " bar.", "probability": 0.8798828125}, {"start": 1070.39, "end": 1070.69, "word": " So", "probability": 0.94189453125}, {"start": 1070.69, "end": 1070.97, "word": " this", "probability": 0.92333984375}, {"start": 1070.97, "end": 1071.13, "word": " is", "probability": 0.93896484375}, {"start": 1071.13, "end": 1071.45, "word": " my", "probability": 0.6162109375}, {"start": 1071.45, "end": 1072.55, "word": " new,", "probability": 0.431640625}, {"start": 1073.27, "end": 1074.33, "word": " the", "probability": 0.67236328125}, {"start": 1074.33, "end": 1074.77, "word": " score.", "probability": 0.8427734375}], "temperature": 1.0}, {"id": 42, "seek": 109564, "start": 1077.82, "end": 1095.64, "text": " Here, there are three questions. Number one, what's the shape of the distribution of X bar? So, we are asking about the shape of the distribution. It might be normal.", "tokens": [1692, 11, 456, 366, 1045, 1651, 13, 5118, 472, 11, 437, 311, 264, 3909, 295, 264, 7316, 295, 1783, 2159, 30, 407, 11, 321, 366, 3365, 466, 264, 3909, 295, 264, 7316, 13, 467, 1062, 312, 2710, 13], "avg_logprob": -0.22175480387149712, "compression_ratio": 1.4273504273504274, "no_speech_prob": 0.0, "words": [{"start": 1077.82, "end": 1078.2, "word": " Here,", "probability": 0.41650390625}, {"start": 1078.42, "end": 1078.72, "word": " there", "probability": 0.890625}, {"start": 1078.72, "end": 1078.94, "word": " are", "probability": 0.94580078125}, {"start": 1078.94, "end": 1079.2, "word": " three", "probability": 0.80126953125}, {"start": 1079.2, "end": 1079.72, "word": " questions.", "probability": 0.9482421875}, {"start": 1080.2, "end": 1080.36, "word": " Number", "probability": 0.81591796875}, {"start": 1080.36, "end": 1080.68, "word": " one,", "probability": 0.767578125}, {"start": 1083.68, "end": 1084.58, "word": " what's", "probability": 0.774169921875}, {"start": 1084.58, "end": 1084.76, "word": " the", "probability": 0.916015625}, {"start": 1084.76, "end": 1085.14, "word": " shape", "probability": 0.92431640625}, {"start": 1085.14, "end": 1086.22, "word": " of", "probability": 0.970703125}, {"start": 1086.22, "end": 1086.98, "word": " the", "probability": 0.875}, {"start": 1086.98, "end": 1087.58, "word": " distribution", "probability": 0.88818359375}, {"start": 1087.58, "end": 1087.78, "word": " of", "probability": 0.794921875}, {"start": 1087.78, "end": 1087.94, "word": " X", "probability": 0.55615234375}, {"start": 1087.94, "end": 1088.22, "word": " bar?", "probability": 0.7763671875}, {"start": 1090.68, "end": 1091.0, "word": " So,", "probability": 0.6279296875}, {"start": 1091.04, "end": 1091.16, "word": " we", "probability": 0.95703125}, {"start": 1091.16, "end": 1091.32, "word": " are", "probability": 0.8720703125}, {"start": 1091.32, "end": 1091.7, "word": " asking", "probability": 0.87451171875}, {"start": 1091.7, "end": 1092.04, "word": " about", "probability": 0.9130859375}, {"start": 1092.04, "end": 1092.32, "word": " the", "probability": 0.9130859375}, {"start": 1092.32, "end": 1092.56, "word": " shape", "probability": 0.90234375}, {"start": 1092.56, "end": 1092.7, "word": " of", "probability": 0.96875}, {"start": 1092.7, "end": 1092.84, "word": " the", "probability": 0.58935546875}, {"start": 1092.84, "end": 1093.34, "word": " distribution.", "probability": 0.884765625}, {"start": 1094.56, "end": 1094.84, "word": " It", "probability": 0.95361328125}, {"start": 1094.84, "end": 1095.1, "word": " might", "probability": 0.89892578125}, {"start": 1095.1, "end": 1095.26, "word": " be", "probability": 0.9541015625}, {"start": 1095.26, "end": 1095.64, "word": " normal.", "probability": 0.86328125}], "temperature": 1.0}, {"id": 43, "seek": 112251, "start": 1096.89, "end": 1122.51, "text": " If the entire population that we select a sample from is normal, I mean if the population is normally distributed, then you select a random sample of that population, it makes sense that the sample is also normal, so any statistic is computed from that sample is also normally distributed, so it makes sense. If the population is normal, then the shape is also normal. But if the population is unknown,", "tokens": [759, 264, 2302, 4415, 300, 321, 3048, 257, 6889, 490, 307, 2710, 11, 286, 914, 498, 264, 4415, 307, 5646, 12631, 11, 550, 291, 3048, 257, 4974, 6889, 295, 300, 4415, 11, 309, 1669, 2020, 300, 264, 6889, 307, 611, 2710, 11, 370, 604, 29588, 307, 40610, 490, 300, 6889, 307, 611, 5646, 12631, 11, 370, 309, 1669, 2020, 13, 759, 264, 4415, 307, 2710, 11, 550, 264, 3909, 307, 611, 2710, 13, 583, 498, 264, 4415, 307, 9841, 11], "avg_logprob": -0.16859568195578492, "compression_ratio": 2.202185792349727, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1096.89, "end": 1097.27, "word": " If", "probability": 0.607421875}, {"start": 1097.27, "end": 1097.63, "word": " the", "probability": 0.91162109375}, {"start": 1097.63, "end": 1098.05, "word": " entire", "probability": 0.87158203125}, {"start": 1098.05, "end": 1098.67, "word": " population", "probability": 0.943359375}, {"start": 1098.67, "end": 1099.29, "word": " that", "probability": 0.88037109375}, {"start": 1099.29, "end": 1099.45, "word": " we", "probability": 0.9169921875}, {"start": 1099.45, "end": 1099.79, "word": " select", "probability": 0.78955078125}, {"start": 1099.79, "end": 1100.05, "word": " a", "probability": 0.95849609375}, {"start": 1100.05, "end": 1100.27, "word": " sample", "probability": 0.87548828125}, {"start": 1100.27, "end": 1100.61, "word": " from", "probability": 0.8857421875}, {"start": 1100.61, "end": 1100.81, "word": " is", "probability": 0.9140625}, {"start": 1100.81, "end": 1101.11, "word": " normal,", "probability": 0.79345703125}, {"start": 1101.89, "end": 1102.05, "word": " I", "probability": 0.84423828125}, {"start": 1102.05, "end": 1102.17, "word": " mean", "probability": 0.9697265625}, {"start": 1102.17, "end": 1102.27, "word": " if", "probability": 0.64111328125}, {"start": 1102.27, "end": 1102.39, "word": " the", "probability": 0.8798828125}, {"start": 1102.39, "end": 1102.73, "word": " population", "probability": 0.93701171875}, {"start": 1102.73, "end": 1102.99, "word": " is", "probability": 0.953125}, {"start": 1102.99, "end": 1103.37, "word": " normally", "probability": 0.88134765625}, {"start": 1103.37, "end": 1103.95, "word": " distributed,", "probability": 0.9267578125}, {"start": 1104.15, "end": 1104.31, "word": " then", "probability": 0.83056640625}, {"start": 1104.31, "end": 1104.45, "word": " you", "probability": 0.73876953125}, {"start": 1104.45, "end": 1104.73, "word": " select", "probability": 0.85205078125}, {"start": 1104.73, "end": 1104.85, "word": " a", "probability": 0.9765625}, {"start": 1104.85, "end": 1105.07, "word": " random", "probability": 0.8681640625}, {"start": 1105.07, "end": 1105.49, "word": " sample", "probability": 0.890625}, {"start": 1105.49, "end": 1106.27, "word": " of", "probability": 0.892578125}, {"start": 1106.27, "end": 1106.49, "word": " that", "probability": 0.9365234375}, {"start": 1106.49, "end": 1106.91, "word": " population,", "probability": 0.92822265625}, {"start": 1107.01, "end": 1107.11, "word": " it", "probability": 0.90625}, {"start": 1107.11, "end": 1107.27, "word": " makes", "probability": 0.8173828125}, {"start": 1107.27, "end": 1107.49, "word": " sense", "probability": 0.8154296875}, {"start": 1107.49, "end": 1107.73, "word": " that", "probability": 0.91357421875}, {"start": 1107.73, "end": 1107.87, "word": " the", "probability": 0.86279296875}, {"start": 1107.87, "end": 1108.67, "word": " sample", "probability": 0.85693359375}, {"start": 1108.67, "end": 1108.87, "word": " is", "probability": 0.94189453125}, {"start": 1108.87, "end": 1109.15, "word": " also", "probability": 0.86328125}, {"start": 1109.15, "end": 1109.51, "word": " normal,", "probability": 0.8759765625}, {"start": 1110.07, "end": 1110.33, "word": " so", "probability": 0.91943359375}, {"start": 1110.33, "end": 1110.59, "word": " any", "probability": 0.8837890625}, {"start": 1110.59, "end": 1110.91, "word": " statistic", "probability": 0.87109375}, {"start": 1110.91, "end": 1111.43, "word": " is", "probability": 0.364990234375}, {"start": 1111.43, "end": 1111.77, "word": " computed", "probability": 0.94384765625}, {"start": 1111.77, "end": 1112.05, "word": " from", "probability": 0.87646484375}, {"start": 1112.05, "end": 1112.27, "word": " that", "probability": 0.927734375}, {"start": 1112.27, "end": 1112.59, "word": " sample", "probability": 0.88232421875}, {"start": 1112.59, "end": 1112.79, "word": " is", "probability": 0.880859375}, {"start": 1112.79, "end": 1113.03, "word": " also", "probability": 0.876953125}, {"start": 1113.03, "end": 1113.39, "word": " normally", "probability": 0.8955078125}, {"start": 1113.39, "end": 1113.81, "word": " distributed,", "probability": 0.9267578125}, {"start": 1114.25, "end": 1114.43, "word": " so", "probability": 0.87451171875}, {"start": 1114.43, "end": 1114.53, "word": " it", "probability": 0.9404296875}, {"start": 1114.53, "end": 1114.67, "word": " makes", "probability": 0.82666015625}, {"start": 1114.67, "end": 1114.99, "word": " sense.", "probability": 0.8271484375}, {"start": 1115.49, "end": 1115.69, "word": " If", "probability": 0.9501953125}, {"start": 1115.69, "end": 1115.81, "word": " the", "probability": 0.90673828125}, {"start": 1115.81, "end": 1116.13, "word": " population", "probability": 0.94970703125}, {"start": 1116.13, "end": 1116.35, "word": " is", "probability": 0.9453125}, {"start": 1116.35, "end": 1116.63, "word": " normal,", "probability": 0.876953125}, {"start": 1116.77, "end": 1116.99, "word": " then", "probability": 0.84521484375}, {"start": 1116.99, "end": 1117.85, "word": " the", "probability": 0.8330078125}, {"start": 1117.85, "end": 1118.07, "word": " shape", "probability": 0.9130859375}, {"start": 1118.07, "end": 1118.19, "word": " is", "probability": 0.93994140625}, {"start": 1118.19, "end": 1118.45, "word": " also", "probability": 0.87548828125}, {"start": 1118.45, "end": 1118.75, "word": " normal.", "probability": 0.8828125}, {"start": 1119.33, "end": 1119.69, "word": " But", "probability": 0.9423828125}, {"start": 1119.69, "end": 1119.97, "word": " if", "probability": 0.90087890625}, {"start": 1119.97, "end": 1120.27, "word": " the", "probability": 0.9130859375}, {"start": 1120.27, "end": 1120.71, "word": " population", "probability": 0.93505859375}, {"start": 1120.71, "end": 1122.15, "word": " is", "probability": 0.94921875}, {"start": 1122.15, "end": 1122.51, "word": " unknown,", "probability": 0.890625}], "temperature": 1.0}, {"id": 44, "seek": 115210, "start": 1123.45, "end": 1152.11, "text": " you don't have any information about the underlying population, then you cannot say it's normal unless you have certain condition that we'll talk about maybe after 30 minutes. So, exactly, if the population is normal, then the shape is also normal, but otherwise, we have to think about it. This is the first question. Now, there are two unknowns in this equation. We have to know the mean,", "tokens": [291, 500, 380, 362, 604, 1589, 466, 264, 14217, 4415, 11, 550, 291, 2644, 584, 309, 311, 2710, 5969, 291, 362, 1629, 4188, 300, 321, 603, 751, 466, 1310, 934, 2217, 2077, 13, 407, 11, 2293, 11, 498, 264, 4415, 307, 2710, 11, 550, 264, 3909, 307, 611, 2710, 11, 457, 5911, 11, 321, 362, 281, 519, 466, 309, 13, 639, 307, 264, 700, 1168, 13, 823, 11, 456, 366, 732, 46048, 294, 341, 5367, 13, 492, 362, 281, 458, 264, 914, 11], "avg_logprob": -0.19512649359447615, "compression_ratio": 1.6853448275862069, "no_speech_prob": 0.0, "words": [{"start": 1123.45, "end": 1123.65, "word": " you", "probability": 0.4267578125}, {"start": 1123.65, "end": 1123.85, "word": " don't", "probability": 0.947265625}, {"start": 1123.85, "end": 1124.07, "word": " have", "probability": 0.94384765625}, {"start": 1124.07, "end": 1124.25, "word": " any", "probability": 0.83447265625}, {"start": 1124.25, "end": 1124.73, "word": " information", "probability": 0.84326171875}, {"start": 1124.73, "end": 1125.07, "word": " about", "probability": 0.896484375}, {"start": 1125.07, "end": 1125.39, "word": " the", "probability": 0.75048828125}, {"start": 1125.39, "end": 1126.55, "word": " underlying", "probability": 0.9638671875}, {"start": 1126.55, "end": 1127.03, "word": " population,", "probability": 0.94921875}, {"start": 1127.27, "end": 1127.49, "word": " then", "probability": 0.82373046875}, {"start": 1127.49, "end": 1128.33, "word": " you", "probability": 0.919921875}, {"start": 1128.33, "end": 1128.61, "word": " cannot", "probability": 0.84375}, {"start": 1128.61, "end": 1128.95, "word": " say", "probability": 0.94580078125}, {"start": 1128.95, "end": 1129.83, "word": " it's", "probability": 0.891845703125}, {"start": 1129.83, "end": 1130.23, "word": " normal", "probability": 0.84912109375}, {"start": 1130.23, "end": 1130.53, "word": " unless", "probability": 0.63623046875}, {"start": 1130.53, "end": 1130.85, "word": " you", "probability": 0.9580078125}, {"start": 1130.85, "end": 1131.13, "word": " have", "probability": 0.94482421875}, {"start": 1131.13, "end": 1131.81, "word": " certain", "probability": 0.7841796875}, {"start": 1131.81, "end": 1132.27, "word": " condition", "probability": 0.615234375}, {"start": 1132.27, "end": 1132.95, "word": " that", "probability": 0.8544921875}, {"start": 1132.95, "end": 1133.19, "word": " we'll", "probability": 0.80078125}, {"start": 1133.19, "end": 1133.39, "word": " talk", "probability": 0.90234375}, {"start": 1133.39, "end": 1133.79, "word": " about", "probability": 0.904296875}, {"start": 1133.79, "end": 1134.13, "word": " maybe", "probability": 0.88330078125}, {"start": 1134.13, "end": 1134.45, "word": " after", "probability": 0.830078125}, {"start": 1134.45, "end": 1134.99, "word": " 30", "probability": 0.77978515625}, {"start": 1134.99, "end": 1135.31, "word": " minutes.", "probability": 0.90576171875}, {"start": 1136.21, "end": 1136.51, "word": " So,", "probability": 0.9248046875}, {"start": 1136.85, "end": 1137.17, "word": " exactly,", "probability": 0.81689453125}, {"start": 1137.29, "end": 1137.37, "word": " if", "probability": 0.93896484375}, {"start": 1137.37, "end": 1137.51, "word": " the", "probability": 0.8662109375}, {"start": 1137.51, "end": 1137.85, "word": " population", "probability": 0.94775390625}, {"start": 1137.85, "end": 1138.07, "word": " is", "probability": 0.89990234375}, {"start": 1138.07, "end": 1138.35, "word": " normal,", "probability": 0.8837890625}, {"start": 1138.49, "end": 1138.57, "word": " then", "probability": 0.83740234375}, {"start": 1138.57, "end": 1138.77, "word": " the", "probability": 0.90869140625}, {"start": 1138.77, "end": 1139.01, "word": " shape", "probability": 0.69677734375}, {"start": 1139.01, "end": 1139.35, "word": " is", "probability": 0.9453125}, {"start": 1139.35, "end": 1139.61, "word": " also", "probability": 0.87353515625}, {"start": 1139.61, "end": 1139.85, "word": " normal,", "probability": 0.89208984375}, {"start": 1139.91, "end": 1140.07, "word": " but", "probability": 0.77587890625}, {"start": 1140.07, "end": 1140.55, "word": " otherwise,", "probability": 0.8486328125}, {"start": 1140.93, "end": 1141.09, "word": " we", "probability": 0.93310546875}, {"start": 1141.09, "end": 1141.23, "word": " have", "probability": 0.94091796875}, {"start": 1141.23, "end": 1141.33, "word": " to", "probability": 0.96728515625}, {"start": 1141.33, "end": 1141.51, "word": " think", "probability": 0.90673828125}, {"start": 1141.51, "end": 1141.75, "word": " about", "probability": 0.9072265625}, {"start": 1141.75, "end": 1141.91, "word": " it.", "probability": 0.845703125}, {"start": 1142.71, "end": 1142.97, "word": " This", "probability": 0.7880859375}, {"start": 1142.97, "end": 1143.05, "word": " is", "probability": 0.94189453125}, {"start": 1143.05, "end": 1143.15, "word": " the", "probability": 0.9111328125}, {"start": 1143.15, "end": 1143.33, "word": " first", "probability": 0.88818359375}, {"start": 1143.33, "end": 1144.47, "word": " question.", "probability": 0.86376953125}, {"start": 1145.63, "end": 1145.93, "word": " Now,", "probability": 0.94677734375}, {"start": 1146.25, "end": 1146.53, "word": " there", "probability": 0.90283203125}, {"start": 1146.53, "end": 1146.71, "word": " are", "probability": 0.9404296875}, {"start": 1146.71, "end": 1146.89, "word": " two", "probability": 0.9033203125}, {"start": 1146.89, "end": 1147.39, "word": " unknowns", "probability": 0.9326171875}, {"start": 1147.39, "end": 1148.81, "word": " in", "probability": 0.90576171875}, {"start": 1148.81, "end": 1149.21, "word": " this", "probability": 0.9443359375}, {"start": 1149.21, "end": 1149.99, "word": " equation.", "probability": 0.40673828125}, {"start": 1151.21, "end": 1151.43, "word": " We", "probability": 0.8466796875}, {"start": 1151.43, "end": 1151.59, "word": " have", "probability": 0.9443359375}, {"start": 1151.59, "end": 1151.69, "word": " to", "probability": 0.336669921875}, {"start": 1151.69, "end": 1151.79, "word": " know", "probability": 0.90673828125}, {"start": 1151.79, "end": 1151.91, "word": " the", "probability": 0.9189453125}, {"start": 1151.91, "end": 1152.11, "word": " mean,", "probability": 0.95166015625}], "temperature": 1.0}, {"id": 45, "seek": 117740, "start": 1153.0, "end": 1177.4, "text": " Or x bar, so the mean of x bar is not given, the mean means the center. So the second question, what's the center of the distribution? In this case, the mean of x bar. So we are looking at what's the mean of x bar. The third question is sigma x bar is also unknown, spread.", "tokens": [1610, 2031, 2159, 11, 370, 264, 914, 295, 2031, 2159, 307, 406, 2212, 11, 264, 914, 1355, 264, 3056, 13, 407, 264, 1150, 1168, 11, 437, 311, 264, 3056, 295, 264, 7316, 30, 682, 341, 1389, 11, 264, 914, 295, 2031, 2159, 13, 407, 321, 366, 1237, 412, 437, 311, 264, 914, 295, 2031, 2159, 13, 440, 2636, 1168, 307, 12771, 2031, 2159, 307, 611, 9841, 11, 3974, 13], "avg_logprob": -0.23125000361885342, "compression_ratio": 1.7564102564102564, "no_speech_prob": 0.0, "words": [{"start": 1153.0, "end": 1153.48, "word": " Or", "probability": 0.329345703125}, {"start": 1153.48, "end": 1153.96, "word": " x", "probability": 0.47119140625}, {"start": 1153.96, "end": 1154.32, "word": " bar,", "probability": 0.7939453125}, {"start": 1155.06, "end": 1155.46, "word": " so", "probability": 0.9130859375}, {"start": 1155.46, "end": 1156.3, "word": " the", "probability": 0.865234375}, {"start": 1156.3, "end": 1156.44, "word": " mean", "probability": 0.94580078125}, {"start": 1156.44, "end": 1156.54, "word": " of", "probability": 0.9638671875}, {"start": 1156.54, "end": 1156.74, "word": " x", "probability": 0.9794921875}, {"start": 1156.74, "end": 1157.04, "word": " bar", "probability": 0.9501953125}, {"start": 1157.04, "end": 1157.42, "word": " is", "probability": 0.94580078125}, {"start": 1157.42, "end": 1157.66, "word": " not", "probability": 0.94921875}, {"start": 1157.66, "end": 1157.98, "word": " given,", "probability": 0.90478515625}, {"start": 1158.52, "end": 1158.7, "word": " the", "probability": 0.84521484375}, {"start": 1158.7, "end": 1158.88, "word": " mean", "probability": 0.90771484375}, {"start": 1158.88, "end": 1159.16, "word": " means", "probability": 0.701171875}, {"start": 1159.16, "end": 1159.36, "word": " the", "probability": 0.89697265625}, {"start": 1159.36, "end": 1159.68, "word": " center.", "probability": 0.84423828125}, {"start": 1161.54, "end": 1162.02, "word": " So", "probability": 0.80419921875}, {"start": 1162.02, "end": 1162.14, "word": " the", "probability": 0.6796875}, {"start": 1162.14, "end": 1162.76, "word": " second", "probability": 0.63134765625}, {"start": 1162.76, "end": 1163.2, "word": " question,", "probability": 0.91943359375}, {"start": 1163.44, "end": 1163.6, "word": " what's", "probability": 0.921875}, {"start": 1163.6, "end": 1163.8, "word": " the", "probability": 0.9228515625}, {"start": 1163.8, "end": 1164.12, "word": " center", "probability": 0.8857421875}, {"start": 1164.12, "end": 1164.3, "word": " of", "probability": 0.962890625}, {"start": 1164.3, "end": 1164.42, "word": " the", "probability": 0.39306640625}, {"start": 1164.42, "end": 1164.9, "word": " distribution?", "probability": 0.84423828125}, {"start": 1166.5, "end": 1166.74, "word": " In", "probability": 0.94677734375}, {"start": 1166.74, "end": 1166.92, "word": " this", "probability": 0.9453125}, {"start": 1166.92, "end": 1167.3, "word": " case,", "probability": 0.91162109375}, {"start": 1168.22, "end": 1168.52, "word": " the", "probability": 0.908203125}, {"start": 1168.52, "end": 1168.68, "word": " mean", "probability": 0.95068359375}, {"start": 1168.68, "end": 1168.8, "word": " of", "probability": 0.9638671875}, {"start": 1168.8, "end": 1169.0, "word": " x", "probability": 0.97900390625}, {"start": 1169.0, "end": 1169.2, "word": " bar.", "probability": 0.86572265625}, {"start": 1169.34, "end": 1169.46, "word": " So", "probability": 0.94677734375}, {"start": 1169.46, "end": 1169.64, "word": " we", "probability": 0.75537109375}, {"start": 1169.64, "end": 1169.74, "word": " are", "probability": 0.85400390625}, {"start": 1169.74, "end": 1169.88, "word": " looking", "probability": 0.42138671875}, {"start": 1169.88, "end": 1169.98, "word": " at", "probability": 0.356201171875}, {"start": 1169.98, "end": 1170.12, "word": " what's", "probability": 0.83349609375}, {"start": 1170.12, "end": 1170.3, "word": " the", "probability": 0.91796875}, {"start": 1170.3, "end": 1170.5, "word": " mean", "probability": 0.9423828125}, {"start": 1170.5, "end": 1170.64, "word": " of", "probability": 0.9482421875}, {"start": 1170.64, "end": 1170.82, "word": " x", "probability": 0.96923828125}, {"start": 1170.82, "end": 1170.98, "word": " bar.", "probability": 0.8935546875}, {"start": 1171.58, "end": 1171.8, "word": " The", "probability": 0.890625}, {"start": 1171.8, "end": 1172.08, "word": " third", "probability": 0.923828125}, {"start": 1172.08, "end": 1172.5, "word": " question", "probability": 0.91259765625}, {"start": 1172.5, "end": 1172.92, "word": " is", "probability": 0.9423828125}, {"start": 1172.92, "end": 1173.74, "word": " sigma", "probability": 0.486328125}, {"start": 1173.74, "end": 1174.0, "word": " x", "probability": 0.9892578125}, {"start": 1174.0, "end": 1174.24, "word": " bar", "probability": 0.9404296875}, {"start": 1174.24, "end": 1174.38, "word": " is", "probability": 0.94091796875}, {"start": 1174.38, "end": 1174.7, "word": " also", "probability": 0.88427734375}, {"start": 1174.7, "end": 1175.08, "word": " unknown,", "probability": 0.798828125}, {"start": 1176.94, "end": 1177.4, "word": " spread.", "probability": 0.88134765625}], "temperature": 1.0}, {"id": 46, "seek": 120363, "start": 1178.51, "end": 1203.63, "text": " Now shape, center, spread, these are characteristics, these characteristics in this case sampling distribution, exactly which is called sampling distribution. So by sampling distribution we mean that, by sampling distribution, we mean that you have to know the center of distribution,", "tokens": [823, 3909, 11, 3056, 11, 3974, 11, 613, 366, 10891, 11, 613, 10891, 294, 341, 1389, 21179, 7316, 11, 2293, 597, 307, 1219, 21179, 7316, 13, 407, 538, 21179, 7316, 321, 914, 300, 11, 538, 21179, 7316, 11, 321, 914, 300, 291, 362, 281, 458, 264, 3056, 295, 7316, 11], "avg_logprob": -0.3014705929101682, "compression_ratio": 1.993006993006993, "no_speech_prob": 0.0, "words": [{"start": 1178.51, "end": 1178.85, "word": " Now", "probability": 0.74365234375}, {"start": 1178.85, "end": 1179.35, "word": " shape,", "probability": 0.5751953125}, {"start": 1179.85, "end": 1180.15, "word": " center,", "probability": 0.57373046875}, {"start": 1180.45, "end": 1180.95, "word": " spread,", "probability": 0.139892578125}, {"start": 1181.53, "end": 1181.85, "word": " these", "probability": 0.8486328125}, {"start": 1181.85, "end": 1182.17, "word": " are", "probability": 0.93115234375}, {"start": 1182.17, "end": 1183.15, "word": " characteristics,", "probability": 0.861328125}, {"start": 1184.29, "end": 1184.59, "word": " these", "probability": 0.6923828125}, {"start": 1184.59, "end": 1185.31, "word": " characteristics", "probability": 0.88671875}, {"start": 1185.31, "end": 1185.79, "word": " in", "probability": 0.75439453125}, {"start": 1185.79, "end": 1185.99, "word": " this", "probability": 0.95068359375}, {"start": 1185.99, "end": 1186.41, "word": " case", "probability": 0.92138671875}, {"start": 1186.41, "end": 1187.77, "word": " sampling", "probability": 0.2498779296875}, {"start": 1187.77, "end": 1188.23, "word": " distribution,", "probability": 0.7822265625}, {"start": 1188.67, "end": 1189.17, "word": " exactly", "probability": 0.8310546875}, {"start": 1189.17, "end": 1189.75, "word": " which", "probability": 0.482177734375}, {"start": 1189.75, "end": 1189.91, "word": " is", "probability": 0.67529296875}, {"start": 1189.91, "end": 1190.09, "word": " called", "probability": 0.890625}, {"start": 1190.09, "end": 1190.49, "word": " sampling", "probability": 0.94921875}, {"start": 1190.49, "end": 1191.25, "word": " distribution.", "probability": 0.83935546875}, {"start": 1192.97, "end": 1193.85, "word": " So", "probability": 0.87939453125}, {"start": 1193.85, "end": 1194.67, "word": " by", "probability": 0.80078125}, {"start": 1194.67, "end": 1194.99, "word": " sampling", "probability": 0.9052734375}, {"start": 1194.99, "end": 1195.57, "word": " distribution", "probability": 0.86181640625}, {"start": 1195.57, "end": 1195.89, "word": " we", "probability": 0.58251953125}, {"start": 1195.89, "end": 1196.13, "word": " mean", "probability": 0.962890625}, {"start": 1196.13, "end": 1196.61, "word": " that,", "probability": 0.93212890625}, {"start": 1197.05, "end": 1197.79, "word": " by", "probability": 0.921875}, {"start": 1197.79, "end": 1198.13, "word": " sampling", "probability": 0.96142578125}, {"start": 1198.13, "end": 1198.79, "word": " distribution,", "probability": 0.873046875}, {"start": 1199.41, "end": 1199.55, "word": " we", "probability": 0.84130859375}, {"start": 1199.55, "end": 1199.71, "word": " mean", "probability": 0.962890625}, {"start": 1199.71, "end": 1199.97, "word": " that", "probability": 0.9345703125}, {"start": 1199.97, "end": 1200.11, "word": " you", "probability": 0.9326171875}, {"start": 1200.11, "end": 1200.31, "word": " have", "probability": 0.9501953125}, {"start": 1200.31, "end": 1200.43, "word": " to", "probability": 0.9736328125}, {"start": 1200.43, "end": 1200.65, "word": " know", "probability": 0.88671875}, {"start": 1200.65, "end": 1201.87, "word": " the", "probability": 0.6162109375}, {"start": 1201.87, "end": 1202.85, "word": " center", "probability": 0.8896484375}, {"start": 1202.85, "end": 1203.05, "word": " of", "probability": 0.95751953125}, {"start": 1203.05, "end": 1203.63, "word": " distribution,", "probability": 0.76220703125}], "temperature": 1.0}, {"id": 47, "seek": 122940, "start": 1205.46, "end": 1229.4, "text": " I mean the mean of the statistic you are interested in. Second, the spread or the variability of the sample statistic also you are interested in. In addition to that, you have to know the shape of the statistic. So three things we have to know, center, spread and shape.", "tokens": [286, 914, 264, 914, 295, 264, 29588, 291, 366, 3102, 294, 13, 5736, 11, 264, 3974, 420, 264, 35709, 295, 264, 6889, 29588, 611, 291, 366, 3102, 294, 13, 682, 4500, 281, 300, 11, 291, 362, 281, 458, 264, 3909, 295, 264, 29588, 13, 407, 1045, 721, 321, 362, 281, 458, 11, 3056, 11, 3974, 293, 3909, 13], "avg_logprob": -0.17478813963421322, "compression_ratio": 1.8310810810810811, "no_speech_prob": 0.0, "words": [{"start": 1205.46, "end": 1205.7, "word": " I", "probability": 0.77490234375}, {"start": 1205.7, "end": 1205.84, "word": " mean", "probability": 0.96142578125}, {"start": 1205.84, "end": 1206.04, "word": " the", "probability": 0.62744140625}, {"start": 1206.04, "end": 1206.34, "word": " mean", "probability": 0.74560546875}, {"start": 1206.34, "end": 1206.82, "word": " of", "probability": 0.96533203125}, {"start": 1206.82, "end": 1207.02, "word": " the", "probability": 0.91357421875}, {"start": 1207.02, "end": 1207.44, "word": " statistic", "probability": 0.84033203125}, {"start": 1207.44, "end": 1207.68, "word": " you", "probability": 0.943359375}, {"start": 1207.68, "end": 1207.82, "word": " are", "probability": 0.8955078125}, {"start": 1207.82, "end": 1208.32, "word": " interested", "probability": 0.8720703125}, {"start": 1208.32, "end": 1208.76, "word": " in.", "probability": 0.947265625}, {"start": 1209.54, "end": 1209.9, "word": " Second,", "probability": 0.826171875}, {"start": 1210.7, "end": 1211.08, "word": " the", "probability": 0.904296875}, {"start": 1211.08, "end": 1211.56, "word": " spread", "probability": 0.88671875}, {"start": 1211.56, "end": 1212.3, "word": " or", "probability": 0.84033203125}, {"start": 1212.3, "end": 1212.56, "word": " the", "probability": 0.67333984375}, {"start": 1212.56, "end": 1213.22, "word": " variability", "probability": 0.978515625}, {"start": 1213.22, "end": 1214.4, "word": " of", "probability": 0.9580078125}, {"start": 1214.4, "end": 1214.62, "word": " the", "probability": 0.92236328125}, {"start": 1214.62, "end": 1214.88, "word": " sample", "probability": 0.79736328125}, {"start": 1214.88, "end": 1215.48, "word": " statistic", "probability": 0.765625}, {"start": 1215.48, "end": 1215.92, "word": " also", "probability": 0.60302734375}, {"start": 1215.92, "end": 1216.06, "word": " you", "probability": 0.91845703125}, {"start": 1216.06, "end": 1216.16, "word": " are", "probability": 0.8935546875}, {"start": 1216.16, "end": 1216.6, "word": " interested", "probability": 0.86083984375}, {"start": 1216.6, "end": 1216.94, "word": " in.", "probability": 0.94921875}, {"start": 1217.42, "end": 1217.6, "word": " In", "probability": 0.951171875}, {"start": 1217.6, "end": 1217.9, "word": " addition", "probability": 0.9560546875}, {"start": 1217.9, "end": 1218.1, "word": " to", "probability": 0.96533203125}, {"start": 1218.1, "end": 1218.3, "word": " that,", "probability": 0.93994140625}, {"start": 1218.38, "end": 1218.44, "word": " you", "probability": 0.95947265625}, {"start": 1218.44, "end": 1218.62, "word": " have", "probability": 0.943359375}, {"start": 1218.62, "end": 1218.74, "word": " to", "probability": 0.970703125}, {"start": 1218.74, "end": 1218.96, "word": " know", "probability": 0.89013671875}, {"start": 1218.96, "end": 1220.26, "word": " the", "probability": 0.9072265625}, {"start": 1220.26, "end": 1220.94, "word": " shape", "probability": 0.92919921875}, {"start": 1220.94, "end": 1221.24, "word": " of", "probability": 0.9697265625}, {"start": 1221.24, "end": 1221.4, "word": " the", "probability": 0.89306640625}, {"start": 1221.4, "end": 1221.8, "word": " statistic.", "probability": 0.89453125}, {"start": 1222.74, "end": 1223.0, "word": " So", "probability": 0.9560546875}, {"start": 1223.0, "end": 1223.38, "word": " three", "probability": 0.6201171875}, {"start": 1223.38, "end": 1224.46, "word": " things", "probability": 0.857421875}, {"start": 1224.46, "end": 1224.6, "word": " we", "probability": 0.7236328125}, {"start": 1224.6, "end": 1224.76, "word": " have", "probability": 0.94970703125}, {"start": 1224.76, "end": 1224.88, "word": " to", "probability": 0.96923828125}, {"start": 1224.88, "end": 1225.08, "word": " know,", "probability": 0.890625}, {"start": 1225.82, "end": 1226.2, "word": " center,", "probability": 0.62744140625}, {"start": 1227.22, "end": 1227.92, "word": " spread", "probability": 0.9033203125}, {"start": 1227.92, "end": 1229.06, "word": " and", "probability": 0.580078125}, {"start": 1229.06, "end": 1229.4, "word": " shape.", "probability": 0.89892578125}], "temperature": 1.0}, {"id": 48, "seek": 125904, "start": 1230.52, "end": 1259.04, "text": " So that's what we'll talk about now. So now sampling distribution is a distribution of all of the possible values of a sample statistic. This sample statistic could be sample mean, could be sample variance, could be sample proportion, because any population has mainly three characteristics, mean, standard deviation, and proportion.", "tokens": [407, 300, 311, 437, 321, 603, 751, 466, 586, 13, 407, 586, 21179, 7316, 307, 257, 7316, 295, 439, 295, 264, 1944, 4190, 295, 257, 6889, 29588, 13, 639, 6889, 29588, 727, 312, 6889, 914, 11, 727, 312, 6889, 21977, 11, 727, 312, 6889, 16068, 11, 570, 604, 4415, 575, 8704, 1045, 10891, 11, 914, 11, 3832, 25163, 11, 293, 16068, 13], "avg_logprob": -0.1686507941238464, "compression_ratio": 1.8054054054054054, "no_speech_prob": 0.0, "words": [{"start": 1230.52, "end": 1230.78, "word": " So", "probability": 0.74267578125}, {"start": 1230.78, "end": 1231.28, "word": " that's", "probability": 0.814697265625}, {"start": 1231.28, "end": 1231.76, "word": " what", "probability": 0.9404296875}, {"start": 1231.76, "end": 1231.98, "word": " we'll", "probability": 0.806640625}, {"start": 1231.98, "end": 1232.18, "word": " talk", "probability": 0.90478515625}, {"start": 1232.18, "end": 1232.6, "word": " about", "probability": 0.90478515625}, {"start": 1232.6, "end": 1234.26, "word": " now.", "probability": 0.90283203125}, {"start": 1234.94, "end": 1235.06, "word": " So", "probability": 0.82373046875}, {"start": 1235.06, "end": 1235.34, "word": " now", "probability": 0.78662109375}, {"start": 1235.34, "end": 1235.7, "word": " sampling", "probability": 0.4482421875}, {"start": 1235.7, "end": 1236.32, "word": " distribution", "probability": 0.8349609375}, {"start": 1236.32, "end": 1237.2, "word": " is", "probability": 0.93603515625}, {"start": 1237.2, "end": 1237.34, "word": " a", "probability": 0.86572265625}, {"start": 1237.34, "end": 1237.84, "word": " distribution", "probability": 0.84375}, {"start": 1237.84, "end": 1238.2, "word": " of", "probability": 0.9677734375}, {"start": 1238.2, "end": 1238.62, "word": " all", "probability": 0.94384765625}, {"start": 1238.62, "end": 1239.04, "word": " of", "probability": 0.90185546875}, {"start": 1239.04, "end": 1239.16, "word": " the", "probability": 0.9169921875}, {"start": 1239.16, "end": 1239.52, "word": " possible", "probability": 0.96240234375}, {"start": 1239.52, "end": 1240.14, "word": " values", "probability": 0.97021484375}, {"start": 1240.14, "end": 1240.78, "word": " of", "probability": 0.96240234375}, {"start": 1240.78, "end": 1241.64, "word": " a", "probability": 0.947265625}, {"start": 1241.64, "end": 1241.88, "word": " sample", "probability": 0.77197265625}, {"start": 1241.88, "end": 1242.5, "word": " statistic.", "probability": 0.86572265625}, {"start": 1244.0, "end": 1244.32, "word": " This", "probability": 0.85302734375}, {"start": 1244.32, "end": 1244.62, "word": " sample", "probability": 0.896484375}, {"start": 1244.62, "end": 1245.1, "word": " statistic", "probability": 0.86865234375}, {"start": 1245.1, "end": 1245.58, "word": " could", "probability": 0.88720703125}, {"start": 1245.58, "end": 1246.04, "word": " be", "probability": 0.9560546875}, {"start": 1246.04, "end": 1247.38, "word": " sample", "probability": 0.779296875}, {"start": 1247.38, "end": 1247.66, "word": " mean,", "probability": 0.93798828125}, {"start": 1248.14, "end": 1248.36, "word": " could", "probability": 0.89453125}, {"start": 1248.36, "end": 1248.64, "word": " be", "probability": 0.95556640625}, {"start": 1248.64, "end": 1249.18, "word": " sample", "probability": 0.88330078125}, {"start": 1249.18, "end": 1249.72, "word": " variance,", "probability": 0.89404296875}, {"start": 1249.9, "end": 1250.02, "word": " could", "probability": 0.876953125}, {"start": 1250.02, "end": 1250.24, "word": " be", "probability": 0.95703125}, {"start": 1250.24, "end": 1250.6, "word": " sample", "probability": 0.83984375}, {"start": 1250.6, "end": 1251.08, "word": " proportion,", "probability": 0.78759765625}, {"start": 1251.84, "end": 1252.18, "word": " because", "probability": 0.896484375}, {"start": 1252.18, "end": 1252.6, "word": " any", "probability": 0.8984375}, {"start": 1252.6, "end": 1253.0, "word": " population", "probability": 0.953125}, {"start": 1253.0, "end": 1253.5, "word": " has", "probability": 0.93701171875}, {"start": 1253.5, "end": 1254.12, "word": " mainly", "probability": 0.463623046875}, {"start": 1254.12, "end": 1254.42, "word": " three", "probability": 0.8271484375}, {"start": 1254.42, "end": 1255.26, "word": " characteristics,", "probability": 0.90380859375}, {"start": 1256.16, "end": 1256.46, "word": " mean,", "probability": 0.9013671875}, {"start": 1256.84, "end": 1257.08, "word": " standard", "probability": 0.814453125}, {"start": 1257.08, "end": 1257.48, "word": " deviation,", "probability": 0.9638671875}, {"start": 1257.92, "end": 1258.08, "word": " and", "probability": 0.94091796875}, {"start": 1258.08, "end": 1259.04, "word": " proportion.", "probability": 0.775390625}], "temperature": 1.0}, {"id": 49, "seek": 128854, "start": 1261.52, "end": 1288.54, "text": " So again, a sampling distribution is a distribution of all of the possible values of a sample statistic or a given size sample selected from a population. For example, suppose you sample 50 students from your college regarding their mean GPA. GPA means Graduate Point Average.", "tokens": [407, 797, 11, 257, 21179, 7316, 307, 257, 7316, 295, 439, 295, 264, 1944, 4190, 295, 257, 6889, 29588, 420, 257, 2212, 2744, 6889, 8209, 490, 257, 4415, 13, 1171, 1365, 11, 7297, 291, 6889, 2625, 1731, 490, 428, 3859, 8595, 641, 914, 41321, 13, 41321, 1355, 38124, 12387, 316, 3623, 13], "avg_logprob": -0.1914799595778843, "compression_ratio": 1.5054347826086956, "no_speech_prob": 0.0, "words": [{"start": 1261.52, "end": 1261.96, "word": " So", "probability": 0.81884765625}, {"start": 1261.96, "end": 1262.42, "word": " again,", "probability": 0.7314453125}, {"start": 1263.02, "end": 1263.04, "word": " a", "probability": 0.63720703125}, {"start": 1263.04, "end": 1263.24, "word": " sampling", "probability": 0.481201171875}, {"start": 1263.24, "end": 1263.76, "word": " distribution", "probability": 0.85693359375}, {"start": 1263.76, "end": 1264.02, "word": " is", "probability": 0.9296875}, {"start": 1264.02, "end": 1264.26, "word": " a", "probability": 0.81884765625}, {"start": 1264.26, "end": 1264.8, "word": " distribution", "probability": 0.8515625}, {"start": 1264.8, "end": 1265.14, "word": " of", "probability": 0.9697265625}, {"start": 1265.14, "end": 1265.5, "word": " all", "probability": 0.92822265625}, {"start": 1265.5, "end": 1265.74, "word": " of", "probability": 0.861328125}, {"start": 1265.74, "end": 1265.86, "word": " the", "probability": 0.92236328125}, {"start": 1265.86, "end": 1266.22, "word": " possible", "probability": 0.9619140625}, {"start": 1266.22, "end": 1266.64, "word": " values", "probability": 0.96875}, {"start": 1266.64, "end": 1267.0, "word": " of", "probability": 0.96630859375}, {"start": 1267.0, "end": 1267.4, "word": " a", "probability": 0.97607421875}, {"start": 1267.4, "end": 1267.7, "word": " sample", "probability": 0.89892578125}, {"start": 1267.7, "end": 1268.24, "word": " statistic", "probability": 0.64404296875}, {"start": 1268.24, "end": 1269.24, "word": " or", "probability": 0.35546875}, {"start": 1269.24, "end": 1269.42, "word": " a", "probability": 0.95751953125}, {"start": 1269.42, "end": 1269.6, "word": " given", "probability": 0.927734375}, {"start": 1269.6, "end": 1270.24, "word": " size", "probability": 0.65234375}, {"start": 1270.24, "end": 1271.02, "word": " sample", "probability": 0.86279296875}, {"start": 1271.02, "end": 1271.96, "word": " selected", "probability": 0.828125}, {"start": 1271.96, "end": 1272.54, "word": " from", "probability": 0.88134765625}, {"start": 1272.54, "end": 1273.1, "word": " a", "probability": 0.97265625}, {"start": 1273.1, "end": 1273.68, "word": " population.", "probability": 0.9736328125}, {"start": 1274.7, "end": 1275.3, "word": " For", "probability": 0.9306640625}, {"start": 1275.3, "end": 1275.66, "word": " example,", "probability": 0.95263671875}, {"start": 1277.14, "end": 1278.98, "word": " suppose", "probability": 0.841796875}, {"start": 1278.98, "end": 1279.64, "word": " you", "probability": 0.93798828125}, {"start": 1279.64, "end": 1280.02, "word": " sample", "probability": 0.92041015625}, {"start": 1280.02, "end": 1280.46, "word": " 50", "probability": 0.8251953125}, {"start": 1280.46, "end": 1280.98, "word": " students", "probability": 0.96875}, {"start": 1280.98, "end": 1281.34, "word": " from", "probability": 0.8916015625}, {"start": 1281.34, "end": 1281.6, "word": " your", "probability": 0.89990234375}, {"start": 1281.6, "end": 1282.0, "word": " college", "probability": 0.720703125}, {"start": 1282.0, "end": 1282.66, "word": " regarding", "probability": 0.89892578125}, {"start": 1282.66, "end": 1283.14, "word": " their", "probability": 0.95556640625}, {"start": 1283.14, "end": 1283.42, "word": " mean", "probability": 0.48046875}, {"start": 1283.42, "end": 1283.66, "word": " GPA.", "probability": 0.9697265625}, {"start": 1285.0, "end": 1285.56, "word": " GPA", "probability": 0.97314453125}, {"start": 1285.56, "end": 1286.0, "word": " means", "probability": 0.82666015625}, {"start": 1286.0, "end": 1286.44, "word": " Graduate", "probability": 0.30078125}, {"start": 1286.44, "end": 1287.94, "word": " Point", "probability": 0.83056640625}, {"start": 1287.94, "end": 1288.54, "word": " Average.", "probability": 0.913330078125}], "temperature": 1.0}, {"id": 50, "seek": 131830, "start": 1289.7, "end": 1318.3, "text": " Now, if you obtain many different samples of size 50, you will compute a different mean for each sample. As I mentioned here, I select a sample the same sizes, but we obtain different sample statistics, I mean different sample means. We are interested in the distribution of all potential mean GBA", "tokens": [823, 11, 498, 291, 12701, 867, 819, 10938, 295, 2744, 2625, 11, 291, 486, 14722, 257, 819, 914, 337, 1184, 6889, 13, 1018, 286, 2835, 510, 11, 286, 3048, 257, 6889, 264, 912, 11602, 11, 457, 321, 12701, 819, 6889, 12523, 11, 286, 914, 819, 6889, 1355, 13, 492, 366, 3102, 294, 264, 7316, 295, 439, 3995, 914, 460, 9295], "avg_logprob": -0.2123463104982845, "compression_ratio": 1.6373626373626373, "no_speech_prob": 0.0, "words": [{"start": 1289.7, "end": 1290.04, "word": " Now,", "probability": 0.70849609375}, {"start": 1290.24, "end": 1290.5, "word": " if", "probability": 0.9267578125}, {"start": 1290.5, "end": 1290.64, "word": " you", "probability": 0.94921875}, {"start": 1290.64, "end": 1291.2, "word": " obtain", "probability": 0.444580078125}, {"start": 1291.2, "end": 1291.72, "word": " many", "probability": 0.8720703125}, {"start": 1291.72, "end": 1292.28, "word": " different", "probability": 0.86083984375}, {"start": 1292.28, "end": 1292.7, "word": " samples", "probability": 0.85791015625}, {"start": 1292.7, "end": 1293.0, "word": " of", "probability": 0.90673828125}, {"start": 1293.0, "end": 1293.28, "word": " size", "probability": 0.83251953125}, {"start": 1293.28, "end": 1293.74, "word": " 50,", "probability": 0.87451171875}, {"start": 1294.54, "end": 1295.1, "word": " you", "probability": 0.9384765625}, {"start": 1295.1, "end": 1295.28, "word": " will", "probability": 0.83154296875}, {"start": 1295.28, "end": 1295.7, "word": " compute", "probability": 0.91650390625}, {"start": 1295.7, "end": 1295.94, "word": " a", "probability": 0.96728515625}, {"start": 1295.94, "end": 1296.34, "word": " different", "probability": 0.87646484375}, {"start": 1296.34, "end": 1296.64, "word": " mean", "probability": 0.9443359375}, {"start": 1296.64, "end": 1296.86, "word": " for", "probability": 0.9384765625}, {"start": 1296.86, "end": 1297.1, "word": " each", "probability": 0.93212890625}, {"start": 1297.1, "end": 1297.46, "word": " sample.", "probability": 0.87841796875}, {"start": 1298.4, "end": 1298.64, "word": " As", "probability": 0.9228515625}, {"start": 1298.64, "end": 1298.76, "word": " I", "probability": 0.98388671875}, {"start": 1298.76, "end": 1299.02, "word": " mentioned", "probability": 0.796875}, {"start": 1299.02, "end": 1299.3, "word": " here,", "probability": 0.81201171875}, {"start": 1299.34, "end": 1299.5, "word": " I", "probability": 0.96240234375}, {"start": 1299.5, "end": 1299.76, "word": " select", "probability": 0.48876953125}, {"start": 1299.76, "end": 1300.0, "word": " a", "probability": 0.91064453125}, {"start": 1300.0, "end": 1300.3, "word": " sample", "probability": 0.896484375}, {"start": 1300.3, "end": 1300.98, "word": " the", "probability": 0.259033203125}, {"start": 1300.98, "end": 1302.1, "word": " same", "probability": 0.87548828125}, {"start": 1302.1, "end": 1302.68, "word": " sizes,", "probability": 0.845703125}, {"start": 1303.54, "end": 1304.02, "word": " but", "probability": 0.91552734375}, {"start": 1304.02, "end": 1304.34, "word": " we", "probability": 0.92578125}, {"start": 1304.34, "end": 1305.0, "word": " obtain", "probability": 0.76416015625}, {"start": 1305.0, "end": 1305.78, "word": " different", "probability": 0.89111328125}, {"start": 1305.78, "end": 1306.5, "word": " sample", "probability": 0.8203125}, {"start": 1306.5, "end": 1307.28, "word": " statistics,", "probability": 0.80419921875}, {"start": 1307.42, "end": 1307.5, "word": " I", "probability": 0.9345703125}, {"start": 1307.5, "end": 1307.58, "word": " mean", "probability": 0.9716796875}, {"start": 1307.58, "end": 1307.88, "word": " different", "probability": 0.7607421875}, {"start": 1307.88, "end": 1308.26, "word": " sample", "probability": 0.892578125}, {"start": 1308.26, "end": 1308.58, "word": " means.", "probability": 0.9267578125}, {"start": 1309.92, "end": 1310.68, "word": " We", "probability": 0.94189453125}, {"start": 1310.68, "end": 1310.86, "word": " are", "probability": 0.939453125}, {"start": 1310.86, "end": 1311.36, "word": " interested", "probability": 0.86328125}, {"start": 1311.36, "end": 1313.92, "word": " in", "probability": 0.93115234375}, {"start": 1313.92, "end": 1314.26, "word": " the", "probability": 0.92236328125}, {"start": 1314.26, "end": 1315.02, "word": " distribution", "probability": 0.8447265625}, {"start": 1315.02, "end": 1315.98, "word": " of", "probability": 0.96533203125}, {"start": 1315.98, "end": 1316.42, "word": " all", "probability": 0.94384765625}, {"start": 1316.42, "end": 1317.06, "word": " potential", "probability": 0.91357421875}, {"start": 1317.06, "end": 1317.8, "word": " mean", "probability": 0.869140625}, {"start": 1317.8, "end": 1318.3, "word": " GBA", "probability": 0.5799560546875}], "temperature": 1.0}, {"id": 51, "seek": 134764, "start": 1319.08, "end": 1347.64, "text": " We might calculate for any given sample of 50 students. So let's focus into these values. So we have again a random sample of 50 sample means. So we have 1, 2, 3, 4, 5, maybe 50, 6, whatever we have. So select a random sample of size 20. Maybe we repeat this sample 10 times. So we end with 10.", "tokens": [492, 1062, 8873, 337, 604, 2212, 6889, 295, 2625, 1731, 13, 407, 718, 311, 1879, 666, 613, 4190, 13, 407, 321, 362, 797, 257, 4974, 6889, 295, 2625, 6889, 1355, 13, 407, 321, 362, 502, 11, 568, 11, 805, 11, 1017, 11, 1025, 11, 1310, 2625, 11, 1386, 11, 2035, 321, 362, 13, 407, 3048, 257, 4974, 6889, 295, 2744, 945, 13, 2704, 321, 7149, 341, 6889, 1266, 1413, 13, 407, 321, 917, 365, 1266, 13], "avg_logprob": -0.15848214130897026, "compression_ratio": 1.6298342541436464, "no_speech_prob": 0.0, "words": [{"start": 1319.08, "end": 1319.38, "word": " We", "probability": 0.312255859375}, {"start": 1319.38, "end": 1319.76, "word": " might", "probability": 0.8662109375}, {"start": 1319.76, "end": 1320.3, "word": " calculate", "probability": 0.880859375}, {"start": 1320.3, "end": 1320.64, "word": " for", "probability": 0.89013671875}, {"start": 1320.64, "end": 1320.9, "word": " any", "probability": 0.89404296875}, {"start": 1320.9, "end": 1321.18, "word": " given", "probability": 0.90087890625}, {"start": 1321.18, "end": 1321.6, "word": " sample", "probability": 0.8740234375}, {"start": 1321.6, "end": 1322.16, "word": " of", "probability": 0.90869140625}, {"start": 1322.16, "end": 1322.48, "word": " 50", "probability": 0.81982421875}, {"start": 1322.48, "end": 1322.98, "word": " students.", "probability": 0.9599609375}, {"start": 1323.82, "end": 1324.04, "word": " So", "probability": 0.8720703125}, {"start": 1324.04, "end": 1324.92, "word": " let's", "probability": 0.801025390625}, {"start": 1324.92, "end": 1325.48, "word": " focus", "probability": 0.9375}, {"start": 1325.48, "end": 1326.48, "word": " into", "probability": 0.68408203125}, {"start": 1326.48, "end": 1327.08, "word": " these", "probability": 0.82421875}, {"start": 1327.08, "end": 1327.74, "word": " values.", "probability": 0.9658203125}, {"start": 1328.3, "end": 1328.64, "word": " So", "probability": 0.90234375}, {"start": 1328.64, "end": 1328.76, "word": " we", "probability": 0.87158203125}, {"start": 1328.76, "end": 1328.9, "word": " have", "probability": 0.94482421875}, {"start": 1328.9, "end": 1329.2, "word": " again", "probability": 0.8466796875}, {"start": 1329.2, "end": 1329.44, "word": " a", "probability": 0.68798828125}, {"start": 1329.44, "end": 1329.66, "word": " random", "probability": 0.8603515625}, {"start": 1329.66, "end": 1330.22, "word": " sample", "probability": 0.88525390625}, {"start": 1330.22, "end": 1331.02, "word": " of", "probability": 0.955078125}, {"start": 1331.02, "end": 1331.68, "word": " 50", "probability": 0.83837890625}, {"start": 1331.68, "end": 1332.88, "word": " sample", "probability": 0.80224609375}, {"start": 1332.88, "end": 1333.26, "word": " means.", "probability": 0.8896484375}, {"start": 1333.66, "end": 1333.84, "word": " So", "probability": 0.92529296875}, {"start": 1333.84, "end": 1333.94, "word": " we", "probability": 0.8564453125}, {"start": 1333.94, "end": 1334.08, "word": " have", "probability": 0.947265625}, {"start": 1334.08, "end": 1334.38, "word": " 1,", "probability": 0.568359375}, {"start": 1334.46, "end": 1334.58, "word": " 2,", "probability": 0.81640625}, {"start": 1334.7, "end": 1334.86, "word": " 3,", "probability": 0.97900390625}, {"start": 1335.0, "end": 1335.2, "word": " 4,", "probability": 0.9892578125}, {"start": 1335.32, "end": 1335.54, "word": " 5,", "probability": 0.98681640625}, {"start": 1335.6, "end": 1335.76, "word": " maybe", "probability": 0.90625}, {"start": 1335.76, "end": 1336.18, "word": " 50,", "probability": 0.673828125}, {"start": 1336.32, "end": 1336.52, "word": " 6,", "probability": 0.62646484375}, {"start": 1336.6, "end": 1336.82, "word": " whatever", "probability": 0.9443359375}, {"start": 1336.82, "end": 1337.12, "word": " we", "probability": 0.93994140625}, {"start": 1337.12, "end": 1337.28, "word": " have.", "probability": 0.92919921875}, {"start": 1337.86, "end": 1338.08, "word": " So", "probability": 0.94580078125}, {"start": 1338.08, "end": 1338.48, "word": " select", "probability": 0.6943359375}, {"start": 1338.48, "end": 1338.64, "word": " a", "probability": 0.9677734375}, {"start": 1338.64, "end": 1338.82, "word": " random", "probability": 0.85205078125}, {"start": 1338.82, "end": 1339.16, "word": " sample", "probability": 0.90185546875}, {"start": 1339.16, "end": 1339.32, "word": " of", "probability": 0.481201171875}, {"start": 1339.32, "end": 1339.5, "word": " size", "probability": 0.8310546875}, {"start": 1339.5, "end": 1339.86, "word": " 20.", "probability": 0.94970703125}, {"start": 1341.32, "end": 1341.74, "word": " Maybe", "probability": 0.93310546875}, {"start": 1341.74, "end": 1342.0, "word": " we", "probability": 0.94482421875}, {"start": 1342.0, "end": 1342.36, "word": " repeat", "probability": 0.92822265625}, {"start": 1342.36, "end": 1342.66, "word": " this", "probability": 0.92626953125}, {"start": 1342.66, "end": 1343.04, "word": " sample", "probability": 0.87890625}, {"start": 1343.04, "end": 1343.34, "word": " 10", "probability": 0.796875}, {"start": 1343.34, "end": 1343.82, "word": " times.", "probability": 0.9326171875}, {"start": 1345.56, "end": 1346.16, "word": " So", "probability": 0.95751953125}, {"start": 1346.16, "end": 1346.4, "word": " we", "probability": 0.947265625}, {"start": 1346.4, "end": 1346.66, "word": " end", "probability": 0.900390625}, {"start": 1346.66, "end": 1347.0, "word": " with", "probability": 0.900390625}, {"start": 1347.0, "end": 1347.64, "word": " 10.", "probability": 0.93701171875}], "temperature": 1.0}, {"id": 52, "seek": 137031, "start": 1348.01, "end": 1370.31, "text": " different values of the simple means. Now we have new ten means. Now the question is, what's the center of these values, I mean for the means? What's the spread of the means? And what's the shape of the means? So these are the mainly three questions.", "tokens": [819, 4190, 295, 264, 2199, 1355, 13, 823, 321, 362, 777, 2064, 1355, 13, 823, 264, 1168, 307, 11, 437, 311, 264, 3056, 295, 613, 4190, 11, 286, 914, 337, 264, 1355, 30, 708, 311, 264, 3974, 295, 264, 1355, 30, 400, 437, 311, 264, 3909, 295, 264, 1355, 30, 407, 613, 366, 264, 8704, 1045, 1651, 13], "avg_logprob": -0.21623410966436743, "compression_ratio": 1.7310344827586206, "no_speech_prob": 0.0, "words": [{"start": 1348.01, "end": 1348.59, "word": " different", "probability": 0.423828125}, {"start": 1348.59, "end": 1349.19, "word": " values", "probability": 0.96044921875}, {"start": 1349.19, "end": 1349.47, "word": " of", "probability": 0.94970703125}, {"start": 1349.47, "end": 1349.65, "word": " the", "probability": 0.83837890625}, {"start": 1349.65, "end": 1349.91, "word": " simple", "probability": 0.35791015625}, {"start": 1349.91, "end": 1350.17, "word": " means.", "probability": 0.58740234375}, {"start": 1350.39, "end": 1350.57, "word": " Now", "probability": 0.890625}, {"start": 1350.57, "end": 1350.69, "word": " we", "probability": 0.685546875}, {"start": 1350.69, "end": 1350.93, "word": " have", "probability": 0.951171875}, {"start": 1350.93, "end": 1351.35, "word": " new", "probability": 0.5634765625}, {"start": 1351.35, "end": 1351.65, "word": " ten", "probability": 0.461181640625}, {"start": 1351.65, "end": 1352.19, "word": " means.", "probability": 0.91748046875}, {"start": 1353.21, "end": 1353.47, "word": " Now", "probability": 0.8994140625}, {"start": 1353.47, "end": 1353.65, "word": " the", "probability": 0.8310546875}, {"start": 1353.65, "end": 1354.01, "word": " question", "probability": 0.91748046875}, {"start": 1354.01, "end": 1354.41, "word": " is,", "probability": 0.951171875}, {"start": 1355.55, "end": 1355.79, "word": " what's", "probability": 0.839111328125}, {"start": 1355.79, "end": 1355.95, "word": " the", "probability": 0.92431640625}, {"start": 1355.95, "end": 1356.25, "word": " center", "probability": 0.6943359375}, {"start": 1356.25, "end": 1358.13, "word": " of", "probability": 0.92919921875}, {"start": 1358.13, "end": 1358.43, "word": " these", "probability": 0.8359375}, {"start": 1358.43, "end": 1358.97, "word": " values,", "probability": 0.97705078125}, {"start": 1359.63, "end": 1360.13, "word": " I", "probability": 0.95849609375}, {"start": 1360.13, "end": 1360.27, "word": " mean", "probability": 0.96875}, {"start": 1360.27, "end": 1360.53, "word": " for", "probability": 0.80712890625}, {"start": 1360.53, "end": 1360.71, "word": " the", "probability": 0.91943359375}, {"start": 1360.71, "end": 1361.05, "word": " means?", "probability": 0.9375}, {"start": 1361.81, "end": 1362.41, "word": " What's", "probability": 0.920166015625}, {"start": 1362.41, "end": 1362.59, "word": " the", "probability": 0.9169921875}, {"start": 1362.59, "end": 1362.97, "word": " spread", "probability": 0.89501953125}, {"start": 1362.97, "end": 1363.43, "word": " of", "probability": 0.97216796875}, {"start": 1363.43, "end": 1363.61, "word": " the", "probability": 0.9208984375}, {"start": 1363.61, "end": 1363.93, "word": " means?", "probability": 0.9306640625}, {"start": 1364.85, "end": 1365.11, "word": " And", "probability": 0.9033203125}, {"start": 1365.11, "end": 1365.47, "word": " what's", "probability": 0.93115234375}, {"start": 1365.47, "end": 1365.63, "word": " the", "probability": 0.91748046875}, {"start": 1365.63, "end": 1365.95, "word": " shape", "probability": 0.9130859375}, {"start": 1365.95, "end": 1366.13, "word": " of", "probability": 0.96484375}, {"start": 1366.13, "end": 1366.25, "word": " the", "probability": 0.85302734375}, {"start": 1366.25, "end": 1366.51, "word": " means?", "probability": 0.91015625}, {"start": 1367.11, "end": 1367.35, "word": " So", "probability": 0.88232421875}, {"start": 1367.35, "end": 1367.97, "word": " these", "probability": 0.6708984375}, {"start": 1367.97, "end": 1368.21, "word": " are", "probability": 0.943359375}, {"start": 1368.21, "end": 1368.35, "word": " the", "probability": 0.68798828125}, {"start": 1368.35, "end": 1368.67, "word": " mainly", "probability": 0.95849609375}, {"start": 1368.67, "end": 1369.79, "word": " three", "probability": 0.896484375}, {"start": 1369.79, "end": 1370.31, "word": " questions.", "probability": 0.9521484375}], "temperature": 1.0}, {"id": 53, "seek": 140295, "start": 1373.51, "end": 1402.95, "text": " For example, let's get just simple example and that we have only population of size 4. In the real life, the population size is much bigger than 4, but just for illustration. Because size 4, I mean if the population is 4, it's a small population.", "tokens": [1171, 1365, 11, 718, 311, 483, 445, 2199, 1365, 293, 300, 321, 362, 787, 4415, 295, 2744, 1017, 13, 682, 264, 957, 993, 11, 264, 4415, 2744, 307, 709, 3801, 813, 1017, 11, 457, 445, 337, 22645, 13, 1436, 2744, 1017, 11, 286, 914, 498, 264, 4415, 307, 1017, 11, 309, 311, 257, 1359, 4415, 13], "avg_logprob": -0.23327850458914773, "compression_ratio": 1.5732484076433122, "no_speech_prob": 0.0, "words": [{"start": 1373.51, "end": 1373.83, "word": " For", "probability": 0.70751953125}, {"start": 1373.83, "end": 1374.21, "word": " example,", "probability": 0.951171875}, {"start": 1374.71, "end": 1375.13, "word": " let's", "probability": 0.9130859375}, {"start": 1375.13, "end": 1375.45, "word": " get", "probability": 0.5830078125}, {"start": 1375.45, "end": 1375.79, "word": " just", "probability": 0.798828125}, {"start": 1375.79, "end": 1376.17, "word": " simple", "probability": 0.63037109375}, {"start": 1376.17, "end": 1376.57, "word": " example", "probability": 0.95166015625}, {"start": 1376.57, "end": 1376.81, "word": " and", "probability": 0.3037109375}, {"start": 1376.81, "end": 1376.99, "word": " that", "probability": 0.86279296875}, {"start": 1376.99, "end": 1377.17, "word": " we", "probability": 0.89794921875}, {"start": 1377.17, "end": 1377.39, "word": " have", "probability": 0.92578125}, {"start": 1377.39, "end": 1377.87, "word": " only", "probability": 0.90380859375}, {"start": 1377.87, "end": 1378.79, "word": " population", "probability": 0.873046875}, {"start": 1378.79, "end": 1379.27, "word": " of", "probability": 0.9658203125}, {"start": 1379.27, "end": 1379.63, "word": " size", "probability": 0.828125}, {"start": 1379.63, "end": 1380.01, "word": " 4.", "probability": 0.57421875}, {"start": 1382.45, "end": 1382.71, "word": " In", "probability": 0.91796875}, {"start": 1382.71, "end": 1384.37, "word": " the", "probability": 0.67431640625}, {"start": 1384.37, "end": 1384.73, "word": " real", "probability": 0.8486328125}, {"start": 1384.73, "end": 1385.15, "word": " life,", "probability": 0.9033203125}, {"start": 1386.47, "end": 1386.61, "word": " the", "probability": 0.88037109375}, {"start": 1386.61, "end": 1387.43, "word": " population", "probability": 0.9130859375}, {"start": 1387.43, "end": 1387.91, "word": " size", "probability": 0.84619140625}, {"start": 1387.91, "end": 1388.07, "word": " is", "probability": 0.92578125}, {"start": 1388.07, "end": 1388.39, "word": " much", "probability": 0.89794921875}, {"start": 1388.39, "end": 1389.63, "word": " bigger", "probability": 0.94482421875}, {"start": 1389.63, "end": 1389.95, "word": " than", "probability": 0.94287109375}, {"start": 1389.95, "end": 1390.21, "word": " 4,", "probability": 0.83203125}, {"start": 1390.31, "end": 1390.41, "word": " but", "probability": 0.849609375}, {"start": 1390.41, "end": 1390.69, "word": " just", "probability": 0.7353515625}, {"start": 1390.69, "end": 1390.99, "word": " for", "probability": 0.26025390625}, {"start": 1390.99, "end": 1394.97, "word": " illustration.", "probability": 0.583984375}, {"start": 1397.29, "end": 1397.93, "word": " Because", "probability": 0.8232421875}, {"start": 1397.93, "end": 1398.25, "word": " size", "probability": 0.5849609375}, {"start": 1398.25, "end": 1398.71, "word": " 4,", "probability": 0.876953125}, {"start": 1398.85, "end": 1398.95, "word": " I", "probability": 0.8837890625}, {"start": 1398.95, "end": 1399.07, "word": " mean", "probability": 0.96826171875}, {"start": 1399.07, "end": 1399.21, "word": " if", "probability": 0.734375}, {"start": 1399.21, "end": 1399.33, "word": " the", "probability": 0.77392578125}, {"start": 1399.33, "end": 1399.63, "word": " population", "probability": 0.9482421875}, {"start": 1399.63, "end": 1399.87, "word": " is", "probability": 0.9384765625}, {"start": 1399.87, "end": 1400.19, "word": " 4,", "probability": 0.92578125}, {"start": 1401.49, "end": 1402.21, "word": " it's", "probability": 0.791015625}, {"start": 1402.21, "end": 1402.25, "word": " a", "probability": 0.5556640625}, {"start": 1402.25, "end": 1402.49, "word": " small", "probability": 0.916015625}, {"start": 1402.49, "end": 1402.95, "word": " population.", "probability": 0.92919921875}], "temperature": 1.0}, {"id": 54, "seek": 142985, "start": 1403.41, "end": 1429.85, "text": " So we can take all the values and find the mean and standard deviation. But in reality, we have more than that. So this one just for as example. So let's suppose that we have a population of size 4. So n equals 4. And we are interested in the ages.", "tokens": [407, 321, 393, 747, 439, 264, 4190, 293, 915, 264, 914, 293, 3832, 25163, 13, 583, 294, 4103, 11, 321, 362, 544, 813, 300, 13, 407, 341, 472, 445, 337, 382, 1365, 13, 407, 718, 311, 7297, 300, 321, 362, 257, 4415, 295, 2744, 1017, 13, 407, 297, 6915, 1017, 13, 400, 321, 366, 3102, 294, 264, 12357, 13], "avg_logprob": -0.18750000447034837, "compression_ratio": 1.464705882352941, "no_speech_prob": 0.0, "words": [{"start": 1403.41, "end": 1403.73, "word": " So", "probability": 0.6337890625}, {"start": 1403.73, "end": 1403.83, "word": " we", "probability": 0.462158203125}, {"start": 1403.83, "end": 1403.99, "word": " can", "probability": 0.93212890625}, {"start": 1403.99, "end": 1404.27, "word": " take", "probability": 0.865234375}, {"start": 1404.27, "end": 1404.67, "word": " all", "probability": 0.953125}, {"start": 1404.67, "end": 1404.95, "word": " the", "probability": 0.83203125}, {"start": 1404.95, "end": 1405.91, "word": " values", "probability": 0.95751953125}, {"start": 1405.91, "end": 1406.47, "word": " and", "probability": 0.85498046875}, {"start": 1406.47, "end": 1406.69, "word": " find", "probability": 0.88916015625}, {"start": 1406.69, "end": 1406.85, "word": " the", "probability": 0.8994140625}, {"start": 1406.85, "end": 1406.99, "word": " mean", "probability": 0.87548828125}, {"start": 1406.99, "end": 1407.11, "word": " and", "probability": 0.896484375}, {"start": 1407.11, "end": 1407.25, "word": " standard", "probability": 0.41357421875}, {"start": 1407.25, "end": 1407.61, "word": " deviation.", "probability": 0.9423828125}, {"start": 1408.29, "end": 1408.57, "word": " But", "probability": 0.90185546875}, {"start": 1408.57, "end": 1409.07, "word": " in", "probability": 0.83447265625}, {"start": 1409.07, "end": 1409.45, "word": " reality,", "probability": 0.97509765625}, {"start": 1409.53, "end": 1409.65, "word": " we", "probability": 0.95947265625}, {"start": 1409.65, "end": 1409.91, "word": " have", "probability": 0.94580078125}, {"start": 1409.91, "end": 1410.23, "word": " more", "probability": 0.9384765625}, {"start": 1410.23, "end": 1410.39, "word": " than", "probability": 0.953125}, {"start": 1410.39, "end": 1410.63, "word": " that.", "probability": 0.87451171875}, {"start": 1410.93, "end": 1411.07, "word": " So", "probability": 0.66796875}, {"start": 1411.07, "end": 1411.43, "word": " this", "probability": 0.359375}, {"start": 1411.43, "end": 1412.27, "word": " one", "probability": 0.8671875}, {"start": 1412.27, "end": 1412.47, "word": " just", "probability": 0.41162109375}, {"start": 1412.47, "end": 1412.79, "word": " for", "probability": 0.7998046875}, {"start": 1412.79, "end": 1414.09, "word": " as", "probability": 0.5556640625}, {"start": 1414.09, "end": 1414.43, "word": " example.", "probability": 0.91162109375}, {"start": 1414.99, "end": 1415.25, "word": " So", "probability": 0.94482421875}, {"start": 1415.25, "end": 1415.75, "word": " let's", "probability": 0.953857421875}, {"start": 1415.75, "end": 1416.99, "word": " suppose", "probability": 0.89892578125}, {"start": 1416.99, "end": 1417.23, "word": " that", "probability": 0.9345703125}, {"start": 1417.23, "end": 1417.39, "word": " we", "probability": 0.953125}, {"start": 1417.39, "end": 1417.53, "word": " have", "probability": 0.947265625}, {"start": 1417.53, "end": 1417.67, "word": " a", "probability": 0.9482421875}, {"start": 1417.67, "end": 1418.37, "word": " population", "probability": 0.92919921875}, {"start": 1418.37, "end": 1418.65, "word": " of", "probability": 0.94580078125}, {"start": 1418.65, "end": 1418.91, "word": " size", "probability": 0.83935546875}, {"start": 1418.91, "end": 1419.27, "word": " 4.", "probability": 0.517578125}, {"start": 1421.09, "end": 1421.43, "word": " So", "probability": 0.947265625}, {"start": 1421.43, "end": 1421.63, "word": " n", "probability": 0.662109375}, {"start": 1421.63, "end": 1422.27, "word": " equals", "probability": 0.8369140625}, {"start": 1422.27, "end": 1422.79, "word": " 4.", "probability": 0.9326171875}, {"start": 1426.53, "end": 1427.17, "word": " And", "probability": 0.85302734375}, {"start": 1427.17, "end": 1427.39, "word": " we", "probability": 0.95263671875}, {"start": 1427.39, "end": 1427.73, "word": " are", "probability": 0.93994140625}, {"start": 1427.73, "end": 1428.37, "word": " interested", "probability": 0.869140625}, {"start": 1428.37, "end": 1429.39, "word": " in", "probability": 0.93798828125}, {"start": 1429.39, "end": 1429.53, "word": " the", "probability": 0.86279296875}, {"start": 1429.53, "end": 1429.85, "word": " ages.", "probability": 0.7431640625}], "temperature": 1.0}, {"id": 55, "seek": 145991, "start": 1432.01, "end": 1459.91, "text": " And suppose the values of X, X again represents H, and the values we have. So these are the four values we have. Now simple calculation will give you the mean, the population mean.", "tokens": [400, 7297, 264, 4190, 295, 1783, 11, 1783, 797, 8855, 389, 11, 293, 264, 4190, 321, 362, 13, 407, 613, 366, 264, 1451, 4190, 321, 362, 13, 823, 2199, 17108, 486, 976, 291, 264, 914, 11, 264, 4415, 914, 13], "avg_logprob": -0.2734375, "compression_ratio": 1.4365079365079365, "no_speech_prob": 0.0, "words": [{"start": 1432.01, "end": 1432.33, "word": " And", "probability": 0.76513671875}, {"start": 1432.33, "end": 1432.79, "word": " suppose", "probability": 0.86083984375}, {"start": 1432.79, "end": 1434.03, "word": " the", "probability": 0.7177734375}, {"start": 1434.03, "end": 1434.37, "word": " values", "probability": 0.84619140625}, {"start": 1434.37, "end": 1434.53, "word": " of", "probability": 0.962890625}, {"start": 1434.53, "end": 1434.87, "word": " X,", "probability": 0.429443359375}, {"start": 1435.15, "end": 1435.49, "word": " X", "probability": 0.88232421875}, {"start": 1435.49, "end": 1435.75, "word": " again", "probability": 0.859375}, {"start": 1435.75, "end": 1436.29, "word": " represents", "probability": 0.60595703125}, {"start": 1436.29, "end": 1438.93, "word": " H,", "probability": 0.2822265625}, {"start": 1440.69, "end": 1440.89, "word": " and", "probability": 0.89111328125}, {"start": 1440.89, "end": 1441.03, "word": " the", "probability": 0.8740234375}, {"start": 1441.03, "end": 1441.33, "word": " values", "probability": 0.953125}, {"start": 1441.33, "end": 1441.51, "word": " we", "probability": 0.89697265625}, {"start": 1441.51, "end": 1441.81, "word": " have.", "probability": 0.94580078125}, {"start": 1446.09, "end": 1446.75, "word": " So", "probability": 0.7255859375}, {"start": 1446.75, "end": 1446.99, "word": " these", "probability": 0.7587890625}, {"start": 1446.99, "end": 1447.21, "word": " are", "probability": 0.9482421875}, {"start": 1447.21, "end": 1447.39, "word": " the", "probability": 0.88134765625}, {"start": 1447.39, "end": 1448.11, "word": " four", "probability": 0.828125}, {"start": 1448.11, "end": 1448.47, "word": " values", "probability": 0.97509765625}, {"start": 1448.47, "end": 1448.65, "word": " we", "probability": 0.935546875}, {"start": 1448.65, "end": 1448.93, "word": " have.", "probability": 0.9462890625}, {"start": 1452.05, "end": 1452.71, "word": " Now", "probability": 0.9287109375}, {"start": 1452.71, "end": 1453.15, "word": " simple", "probability": 0.525390625}, {"start": 1453.15, "end": 1453.79, "word": " calculation", "probability": 0.873046875}, {"start": 1453.79, "end": 1456.91, "word": " will", "probability": 0.82373046875}, {"start": 1456.91, "end": 1457.25, "word": " give", "probability": 0.875}, {"start": 1457.25, "end": 1457.47, "word": " you", "probability": 0.953125}, {"start": 1457.47, "end": 1457.73, "word": " the", "probability": 0.91357421875}, {"start": 1457.73, "end": 1457.97, "word": " mean,", "probability": 0.494384765625}, {"start": 1458.89, "end": 1459.07, "word": " the", "probability": 0.90478515625}, {"start": 1459.07, "end": 1459.35, "word": " population", "probability": 0.95068359375}, {"start": 1459.35, "end": 1459.91, "word": " mean.", "probability": 0.94873046875}], "temperature": 1.0}, {"id": 56, "seek": 149509, "start": 1465.93, "end": 1495.09, "text": " Just add these values and divide by the operation size, we'll get 21 years. And sigma, as we mentioned in chapter three, square root of this quantity will give 2.236 years. So simple calculation will give these results. Now if you look at distribution of these values,", "tokens": [1449, 909, 613, 4190, 293, 9845, 538, 264, 6916, 2744, 11, 321, 603, 483, 5080, 924, 13, 400, 12771, 11, 382, 321, 2835, 294, 7187, 1045, 11, 3732, 5593, 295, 341, 11275, 486, 976, 568, 13, 9356, 21, 924, 13, 407, 2199, 17108, 486, 976, 613, 3542, 13, 823, 498, 291, 574, 412, 7316, 295, 613, 4190, 11], "avg_logprob": -0.235963985576468, "compression_ratio": 1.4619565217391304, "no_speech_prob": 0.0, "words": [{"start": 1465.93, "end": 1466.35, "word": " Just", "probability": 0.5224609375}, {"start": 1466.35, "end": 1466.83, "word": " add", "probability": 0.88330078125}, {"start": 1466.83, "end": 1467.09, "word": " these", "probability": 0.83447265625}, {"start": 1467.09, "end": 1467.49, "word": " values", "probability": 0.96435546875}, {"start": 1467.49, "end": 1467.71, "word": " and", "probability": 0.76318359375}, {"start": 1467.71, "end": 1468.01, "word": " divide", "probability": 0.8876953125}, {"start": 1468.01, "end": 1468.41, "word": " by", "probability": 0.94287109375}, {"start": 1468.41, "end": 1469.79, "word": " the", "probability": 0.54052734375}, {"start": 1469.79, "end": 1470.41, "word": " operation", "probability": 0.70556640625}, {"start": 1470.41, "end": 1470.93, "word": " size,", "probability": 0.81201171875}, {"start": 1471.01, "end": 1471.17, "word": " we'll", "probability": 0.711181640625}, {"start": 1471.17, "end": 1471.27, "word": " get", "probability": 0.71484375}, {"start": 1471.27, "end": 1471.57, "word": " 21", "probability": 0.6982421875}, {"start": 1471.57, "end": 1472.13, "word": " years.", "probability": 0.90673828125}, {"start": 1473.95, "end": 1474.45, "word": " And", "probability": 0.947265625}, {"start": 1474.45, "end": 1474.87, "word": " sigma,", "probability": 0.71484375}, {"start": 1475.09, "end": 1475.27, "word": " as", "probability": 0.96240234375}, {"start": 1475.27, "end": 1475.41, "word": " we", "probability": 0.92333984375}, {"start": 1475.41, "end": 1475.71, "word": " mentioned", "probability": 0.82470703125}, {"start": 1475.71, "end": 1475.93, "word": " in", "probability": 0.42578125}, {"start": 1475.93, "end": 1476.15, "word": " chapter", "probability": 0.5693359375}, {"start": 1476.15, "end": 1476.59, "word": " three,", "probability": 0.7529296875}, {"start": 1476.93, "end": 1477.49, "word": " square", "probability": 0.62548828125}, {"start": 1477.49, "end": 1477.85, "word": " root", "probability": 0.9306640625}, {"start": 1477.85, "end": 1479.23, "word": " of", "probability": 0.857421875}, {"start": 1479.23, "end": 1479.45, "word": " this", "probability": 0.94384765625}, {"start": 1479.45, "end": 1480.05, "word": " quantity", "probability": 0.98486328125}, {"start": 1480.05, "end": 1480.57, "word": " will", "probability": 0.69921875}, {"start": 1480.57, "end": 1480.89, "word": " give", "probability": 0.86376953125}, {"start": 1480.89, "end": 1481.23, "word": " 2", "probability": 0.89892578125}, {"start": 1481.23, "end": 1484.55, "word": ".236", "probability": 0.8069661458333334}, {"start": 1484.55, "end": 1484.83, "word": " years.", "probability": 0.81005859375}, {"start": 1487.33, "end": 1488.01, "word": " So", "probability": 0.92724609375}, {"start": 1488.01, "end": 1488.33, "word": " simple", "probability": 0.56201171875}, {"start": 1488.33, "end": 1488.83, "word": " calculation", "probability": 0.85107421875}, {"start": 1488.83, "end": 1489.07, "word": " will", "probability": 0.86474609375}, {"start": 1489.07, "end": 1489.25, "word": " give", "probability": 0.82861328125}, {"start": 1489.25, "end": 1489.45, "word": " these", "probability": 0.56494140625}, {"start": 1489.45, "end": 1489.85, "word": " results.", "probability": 0.8544921875}, {"start": 1491.71, "end": 1492.47, "word": " Now", "probability": 0.95166015625}, {"start": 1492.47, "end": 1492.63, "word": " if", "probability": 0.5947265625}, {"start": 1492.63, "end": 1492.69, "word": " you", "probability": 0.8388671875}, {"start": 1492.69, "end": 1492.83, "word": " look", "probability": 0.95458984375}, {"start": 1492.83, "end": 1492.95, "word": " at", "probability": 0.95703125}, {"start": 1492.95, "end": 1493.57, "word": " distribution", "probability": 0.68701171875}, {"start": 1493.57, "end": 1494.09, "word": " of", "probability": 0.9658203125}, {"start": 1494.09, "end": 1494.47, "word": " these", "probability": 0.83984375}, {"start": 1494.47, "end": 1495.09, "word": " values,", "probability": 0.96875}], "temperature": 1.0}, {"id": 57, "seek": 152373, "start": 1496.05, "end": 1523.73, "text": " Because as I mentioned, we are looking for center, spread, and shape. The center is 21 of the exact population. The variation is around 2.2. Now, the shape of distribution. Now, 18 represents once. I mean, we have only one 18, so 18 divided one time over 425.", "tokens": [1436, 382, 286, 2835, 11, 321, 366, 1237, 337, 3056, 11, 3974, 11, 293, 3909, 13, 440, 3056, 307, 5080, 295, 264, 1900, 4415, 13, 440, 12990, 307, 926, 568, 13, 17, 13, 823, 11, 264, 3909, 295, 7316, 13, 823, 11, 2443, 8855, 1564, 13, 286, 914, 11, 321, 362, 787, 472, 2443, 11, 370, 2443, 6666, 472, 565, 670, 1017, 6074, 13], "avg_logprob": -0.22932692307692307, "compression_ratio": 1.4606741573033708, "no_speech_prob": 0.0, "words": [{"start": 1496.05, "end": 1496.33, "word": " Because", "probability": 0.591796875}, {"start": 1496.33, "end": 1496.55, "word": " as", "probability": 0.7431640625}, {"start": 1496.55, "end": 1496.93, "word": " I", "probability": 0.798828125}, {"start": 1496.93, "end": 1497.29, "word": " mentioned,", "probability": 0.79443359375}, {"start": 1497.59, "end": 1497.71, "word": " we", "probability": 0.94873046875}, {"start": 1497.71, "end": 1497.85, "word": " are", "probability": 0.8779296875}, {"start": 1497.85, "end": 1498.05, "word": " looking", "probability": 0.91455078125}, {"start": 1498.05, "end": 1498.43, "word": " for", "probability": 0.95458984375}, {"start": 1498.43, "end": 1499.15, "word": " center,", "probability": 0.494873046875}, {"start": 1499.99, "end": 1500.41, "word": " spread,", "probability": 0.6943359375}, {"start": 1500.59, "end": 1500.77, "word": " and", "probability": 0.93505859375}, {"start": 1500.77, "end": 1501.03, "word": " shape.", "probability": 0.385986328125}, {"start": 1501.81, "end": 1502.01, "word": " The", "probability": 0.88330078125}, {"start": 1502.01, "end": 1502.25, "word": " center", "probability": 0.89013671875}, {"start": 1502.25, "end": 1502.43, "word": " is", "probability": 0.90966796875}, {"start": 1502.43, "end": 1502.87, "word": " 21", "probability": 0.88720703125}, {"start": 1502.87, "end": 1503.57, "word": " of", "probability": 0.873046875}, {"start": 1503.57, "end": 1503.81, "word": " the", "probability": 0.9189453125}, {"start": 1503.81, "end": 1504.27, "word": " exact", "probability": 0.9228515625}, {"start": 1504.27, "end": 1504.95, "word": " population.", "probability": 0.94775390625}, {"start": 1506.33, "end": 1506.69, "word": " The", "probability": 0.7861328125}, {"start": 1506.69, "end": 1508.33, "word": " variation", "probability": 0.939453125}, {"start": 1508.33, "end": 1508.59, "word": " is", "probability": 0.94580078125}, {"start": 1508.59, "end": 1508.77, "word": " around", "probability": 0.91845703125}, {"start": 1508.77, "end": 1508.95, "word": " 2", "probability": 0.98388671875}, {"start": 1508.95, "end": 1509.43, "word": ".2.", "probability": 0.990966796875}, {"start": 1510.05, "end": 1510.31, "word": " Now,", "probability": 0.92724609375}, {"start": 1510.41, "end": 1510.49, "word": " the", "probability": 0.88916015625}, {"start": 1510.49, "end": 1510.85, "word": " shape", "probability": 0.9072265625}, {"start": 1510.85, "end": 1511.13, "word": " of", "probability": 0.356689453125}, {"start": 1511.13, "end": 1511.61, "word": " distribution.", "probability": 0.703125}, {"start": 1512.49, "end": 1512.67, "word": " Now,", "probability": 0.908203125}, {"start": 1513.11, "end": 1513.75, "word": " 18", "probability": 0.939453125}, {"start": 1513.75, "end": 1514.77, "word": " represents", "probability": 0.8798828125}, {"start": 1514.77, "end": 1515.25, "word": " once.", "probability": 0.85107421875}, {"start": 1517.93, "end": 1518.21, "word": " I", "probability": 0.9384765625}, {"start": 1518.21, "end": 1518.35, "word": " mean,", "probability": 0.9638671875}, {"start": 1518.39, "end": 1518.51, "word": " we", "probability": 0.95703125}, {"start": 1518.51, "end": 1518.67, "word": " have", "probability": 0.916015625}, {"start": 1518.67, "end": 1518.91, "word": " only", "probability": 0.91650390625}, {"start": 1518.91, "end": 1519.13, "word": " one", "probability": 0.87744140625}, {"start": 1519.13, "end": 1519.47, "word": " 18,", "probability": 0.94873046875}, {"start": 1519.63, "end": 1519.85, "word": " so", "probability": 0.9384765625}, {"start": 1519.85, "end": 1520.81, "word": " 18", "probability": 0.7421875}, {"start": 1520.81, "end": 1521.27, "word": " divided", "probability": 0.3486328125}, {"start": 1521.27, "end": 1522.35, "word": " one", "probability": 0.5498046875}, {"start": 1522.35, "end": 1522.69, "word": " time", "probability": 0.75537109375}, {"start": 1522.69, "end": 1523.05, "word": " over", "probability": 0.82568359375}, {"start": 1523.05, "end": 1523.73, "word": " 425.", "probability": 0.886474609375}], "temperature": 1.0}, {"id": 58, "seek": 155375, "start": 1525.13, "end": 1553.75, "text": " 20% represent also 25%, the same as for 22 or 24. In this case, we have something called uniform distribution. In this case, the proportions are the same. So, the mean, not normal, it's uniform distribution. The mean is 21, standard deviation is 2.236, and the distribution is uniform.", "tokens": [945, 4, 2906, 611, 3552, 8923, 264, 912, 382, 337, 5853, 420, 4022, 13, 682, 341, 1389, 11, 321, 362, 746, 1219, 9452, 7316, 13, 682, 341, 1389, 11, 264, 32482, 366, 264, 912, 13, 407, 11, 264, 914, 11, 406, 2710, 11, 309, 311, 9452, 7316, 13, 440, 914, 307, 5080, 11, 3832, 25163, 307, 568, 13, 9356, 21, 11, 293, 264, 7316, 307, 9452, 13], "avg_logprob": -0.23081342196639845, "compression_ratio": 1.672514619883041, "no_speech_prob": 0.0, "words": [{"start": 1525.13, "end": 1525.81, "word": " 20", "probability": 0.2440185546875}, {"start": 1525.81, "end": 1525.81, "word": "%", "probability": 0.5185546875}, {"start": 1525.81, "end": 1527.73, "word": " represent", "probability": 0.31201171875}, {"start": 1527.73, "end": 1528.37, "word": " also", "probability": 0.79150390625}, {"start": 1528.37, "end": 1529.29, "word": " 25%,", "probability": 0.696044921875}, {"start": 1529.29, "end": 1529.51, "word": " the", "probability": 0.7841796875}, {"start": 1529.51, "end": 1529.67, "word": " same", "probability": 0.89990234375}, {"start": 1529.67, "end": 1529.83, "word": " as", "probability": 0.89990234375}, {"start": 1529.83, "end": 1529.99, "word": " for", "probability": 0.814453125}, {"start": 1529.99, "end": 1530.37, "word": " 22", "probability": 0.86767578125}, {"start": 1530.37, "end": 1530.55, "word": " or", "probability": 0.611328125}, {"start": 1530.55, "end": 1530.89, "word": " 24.", "probability": 0.97412109375}, {"start": 1531.39, "end": 1531.81, "word": " In", "probability": 0.95068359375}, {"start": 1531.81, "end": 1532.05, "word": " this", "probability": 0.947265625}, {"start": 1532.05, "end": 1532.33, "word": " case,", "probability": 0.91455078125}, {"start": 1532.39, "end": 1532.51, "word": " we", "probability": 0.9599609375}, {"start": 1532.51, "end": 1532.67, "word": " have", "probability": 0.9482421875}, {"start": 1532.67, "end": 1533.03, "word": " something", "probability": 0.88232421875}, {"start": 1533.03, "end": 1533.61, "word": " called", "probability": 0.89501953125}, {"start": 1533.61, "end": 1535.63, "word": " uniform", "probability": 0.740234375}, {"start": 1535.63, "end": 1536.39, "word": " distribution.", "probability": 0.83203125}, {"start": 1536.63, "end": 1536.73, "word": " In", "probability": 0.85009765625}, {"start": 1536.73, "end": 1536.89, "word": " this", "probability": 0.94580078125}, {"start": 1536.89, "end": 1537.07, "word": " case,", "probability": 0.923828125}, {"start": 1537.19, "end": 1537.53, "word": " the", "probability": 0.6748046875}, {"start": 1537.53, "end": 1538.15, "word": " proportions", "probability": 0.830078125}, {"start": 1538.15, "end": 1539.01, "word": " are", "probability": 0.93896484375}, {"start": 1539.01, "end": 1540.85, "word": " the", "probability": 0.67236328125}, {"start": 1540.85, "end": 1541.13, "word": " same.", "probability": 0.91162109375}, {"start": 1541.67, "end": 1542.11, "word": " So,", "probability": 0.9091796875}, {"start": 1542.29, "end": 1542.43, "word": " the", "probability": 0.2978515625}, {"start": 1542.43, "end": 1542.67, "word": " mean,", "probability": 0.966796875}, {"start": 1543.07, "end": 1543.33, "word": " not", "probability": 0.92041015625}, {"start": 1543.33, "end": 1543.81, "word": " normal,", "probability": 0.85595703125}, {"start": 1544.99, "end": 1545.15, "word": " it's", "probability": 0.8271484375}, {"start": 1545.15, "end": 1545.51, "word": " uniform", "probability": 0.890625}, {"start": 1545.51, "end": 1545.97, "word": " distribution.", "probability": 0.8359375}, {"start": 1547.03, "end": 1547.27, "word": " The", "probability": 0.6875}, {"start": 1547.27, "end": 1547.49, "word": " mean", "probability": 0.96240234375}, {"start": 1547.49, "end": 1547.65, "word": " is", "probability": 0.9443359375}, {"start": 1547.65, "end": 1548.03, "word": " 21,", "probability": 0.9541015625}, {"start": 1548.69, "end": 1549.29, "word": " standard", "probability": 0.9052734375}, {"start": 1549.29, "end": 1549.69, "word": " deviation", "probability": 0.919921875}, {"start": 1549.69, "end": 1549.99, "word": " is", "probability": 0.9423828125}, {"start": 1549.99, "end": 1550.17, "word": " 2", "probability": 0.98291015625}, {"start": 1550.17, "end": 1551.19, "word": ".236,", "probability": 0.9765625}, {"start": 1551.65, "end": 1551.91, "word": " and", "probability": 0.94140625}, {"start": 1551.91, "end": 1552.03, "word": " the", "probability": 0.66259765625}, {"start": 1552.03, "end": 1552.49, "word": " distribution", "probability": 0.86328125}, {"start": 1552.49, "end": 1553.37, "word": " is", "probability": 0.94482421875}, {"start": 1553.37, "end": 1553.75, "word": " uniform.", "probability": 0.92626953125}], "temperature": 1.0}, {"id": 59, "seek": 158284, "start": 1554.96, "end": 1582.84, "text": " Okay, so that's center, spread and shape of the true population we have. Now suppose for example, we select a random sample of size 2 from this population. So we select a sample of size 2. We have 18, 20, 22, 24 years.", "tokens": [1033, 11, 370, 300, 311, 3056, 11, 3974, 293, 3909, 295, 264, 2074, 4415, 321, 362, 13, 823, 7297, 337, 1365, 11, 321, 3048, 257, 4974, 6889, 295, 2744, 568, 490, 341, 4415, 13, 407, 321, 3048, 257, 6889, 295, 2744, 568, 13, 492, 362, 2443, 11, 945, 11, 5853, 11, 4022, 924, 13], "avg_logprob": -0.1865056872367859, "compression_ratio": 1.4797297297297298, "no_speech_prob": 0.0, "words": [{"start": 1554.96, "end": 1555.28, "word": " Okay,", "probability": 0.2481689453125}, {"start": 1555.42, "end": 1555.64, "word": " so", "probability": 0.890625}, {"start": 1555.64, "end": 1556.82, "word": " that's", "probability": 0.893798828125}, {"start": 1556.82, "end": 1557.8, "word": " center,", "probability": 0.495849609375}, {"start": 1558.08, "end": 1558.52, "word": " spread", "probability": 0.74267578125}, {"start": 1558.52, "end": 1558.84, "word": " and", "probability": 0.56201171875}, {"start": 1558.84, "end": 1559.3, "word": " shape", "probability": 0.90966796875}, {"start": 1559.3, "end": 1560.02, "word": " of", "probability": 0.93701171875}, {"start": 1560.02, "end": 1560.2, "word": " the", "probability": 0.916015625}, {"start": 1560.2, "end": 1560.4, "word": " true", "probability": 0.951171875}, {"start": 1560.4, "end": 1560.88, "word": " population", "probability": 0.939453125}, {"start": 1560.88, "end": 1561.14, "word": " we", "probability": 0.92236328125}, {"start": 1561.14, "end": 1561.4, "word": " have.", "probability": 0.9521484375}, {"start": 1562.26, "end": 1562.54, "word": " Now", "probability": 0.9267578125}, {"start": 1562.54, "end": 1562.92, "word": " suppose", "probability": 0.615234375}, {"start": 1562.92, "end": 1563.1, "word": " for", "probability": 0.68115234375}, {"start": 1563.1, "end": 1563.52, "word": " example,", "probability": 0.97607421875}, {"start": 1566.6, "end": 1566.82, "word": " we", "probability": 0.88671875}, {"start": 1566.82, "end": 1567.16, "word": " select", "probability": 0.8486328125}, {"start": 1567.16, "end": 1567.34, "word": " a", "probability": 0.97021484375}, {"start": 1567.34, "end": 1567.56, "word": " random", "probability": 0.8662109375}, {"start": 1567.56, "end": 1568.04, "word": " sample", "probability": 0.87841796875}, {"start": 1568.04, "end": 1569.12, "word": " of", "probability": 0.95458984375}, {"start": 1569.12, "end": 1569.52, "word": " size", "probability": 0.85107421875}, {"start": 1569.52, "end": 1569.92, "word": " 2", "probability": 0.7041015625}, {"start": 1569.92, "end": 1571.78, "word": " from", "probability": 0.5126953125}, {"start": 1571.78, "end": 1572.1, "word": " this", "probability": 0.94921875}, {"start": 1572.1, "end": 1572.62, "word": " population.", "probability": 0.93310546875}, {"start": 1575.74, "end": 1576.38, "word": " So", "probability": 0.912109375}, {"start": 1576.38, "end": 1576.58, "word": " we", "probability": 0.767578125}, {"start": 1576.58, "end": 1576.96, "word": " select", "probability": 0.85302734375}, {"start": 1576.96, "end": 1577.32, "word": " a", "probability": 0.9091796875}, {"start": 1577.32, "end": 1577.62, "word": " sample", "probability": 0.89404296875}, {"start": 1577.62, "end": 1577.82, "word": " of", "probability": 0.9482421875}, {"start": 1577.82, "end": 1578.06, "word": " size", "probability": 0.8388671875}, {"start": 1578.06, "end": 1578.34, "word": " 2.", "probability": 0.95458984375}, {"start": 1580.1, "end": 1580.44, "word": " We", "probability": 0.9423828125}, {"start": 1580.44, "end": 1580.62, "word": " have", "probability": 0.94482421875}, {"start": 1580.62, "end": 1581.02, "word": " 18,", "probability": 0.9404296875}, {"start": 1581.2, "end": 1581.5, "word": " 20,", "probability": 0.9482421875}, {"start": 1581.6, "end": 1581.94, "word": " 22,", "probability": 0.96630859375}, {"start": 1582.08, "end": 1582.38, "word": " 24", "probability": 0.970703125}, {"start": 1582.38, "end": 1582.84, "word": " years.", "probability": 0.88916015625}], "temperature": 1.0}, {"id": 60, "seek": 161200, "start": 1583.9, "end": 1612.0, "text": " We have four students, for example. And we select a sample of size two. So the first one could be 18 and 18, 18 and 20, 18 and 22. So we have 16 different samples. So number of samples in this case is 16. Imagine that we have five.", "tokens": [492, 362, 1451, 1731, 11, 337, 1365, 13, 400, 321, 3048, 257, 6889, 295, 2744, 732, 13, 407, 264, 700, 472, 727, 312, 2443, 293, 2443, 11, 2443, 293, 945, 11, 2443, 293, 5853, 13, 407, 321, 362, 3165, 819, 10938, 13, 407, 1230, 295, 10938, 294, 341, 1389, 307, 3165, 13, 11739, 300, 321, 362, 1732, 13], "avg_logprob": -0.14896715647083217, "compression_ratio": 1.4591194968553458, "no_speech_prob": 0.0, "words": [{"start": 1583.9, "end": 1584.12, "word": " We", "probability": 0.3818359375}, {"start": 1584.12, "end": 1584.3, "word": " have", "probability": 0.91259765625}, {"start": 1584.3, "end": 1584.64, "word": " four", "probability": 0.69677734375}, {"start": 1584.64, "end": 1585.32, "word": " students,", "probability": 0.97265625}, {"start": 1585.46, "end": 1585.52, "word": " for", "probability": 0.95068359375}, {"start": 1585.52, "end": 1585.86, "word": " example.", "probability": 0.97265625}, {"start": 1587.76, "end": 1588.4, "word": " And", "probability": 0.935546875}, {"start": 1588.4, "end": 1588.58, "word": " we", "probability": 0.8701171875}, {"start": 1588.58, "end": 1588.94, "word": " select", "probability": 0.81494140625}, {"start": 1588.94, "end": 1589.5, "word": " a", "probability": 0.6259765625}, {"start": 1589.5, "end": 1589.74, "word": " sample", "probability": 0.88720703125}, {"start": 1589.74, "end": 1589.96, "word": " of", "probability": 0.955078125}, {"start": 1589.96, "end": 1590.2, "word": " size", "probability": 0.84326171875}, {"start": 1590.2, "end": 1590.44, "word": " two.", "probability": 0.57861328125}, {"start": 1590.56, "end": 1590.66, "word": " So", "probability": 0.95263671875}, {"start": 1590.66, "end": 1590.84, "word": " the", "probability": 0.873046875}, {"start": 1590.84, "end": 1591.14, "word": " first", "probability": 0.8828125}, {"start": 1591.14, "end": 1591.58, "word": " one", "probability": 0.93408203125}, {"start": 1591.58, "end": 1592.06, "word": " could", "probability": 0.873046875}, {"start": 1592.06, "end": 1592.8, "word": " be", "probability": 0.95654296875}, {"start": 1592.8, "end": 1594.1, "word": " 18", "probability": 0.93994140625}, {"start": 1594.1, "end": 1595.04, "word": " and", "probability": 0.81298828125}, {"start": 1595.04, "end": 1595.5, "word": " 18,", "probability": 0.96435546875}, {"start": 1596.44, "end": 1597.1, "word": " 18", "probability": 0.9794921875}, {"start": 1597.1, "end": 1597.24, "word": " and", "probability": 0.39208984375}, {"start": 1597.24, "end": 1597.54, "word": " 20,", "probability": 0.96826171875}, {"start": 1598.28, "end": 1598.6, "word": " 18", "probability": 0.966796875}, {"start": 1598.6, "end": 1598.76, "word": " and", "probability": 0.90869140625}, {"start": 1598.76, "end": 1599.16, "word": " 22.", "probability": 0.97705078125}, {"start": 1600.5, "end": 1600.82, "word": " So", "probability": 0.9443359375}, {"start": 1600.82, "end": 1601.0, "word": " we", "probability": 0.9384765625}, {"start": 1601.0, "end": 1601.3, "word": " have", "probability": 0.94677734375}, {"start": 1601.3, "end": 1602.68, "word": " 16", "probability": 0.94287109375}, {"start": 1602.68, "end": 1603.44, "word": " different", "probability": 0.8876953125}, {"start": 1603.44, "end": 1604.66, "word": " samples.", "probability": 0.91162109375}, {"start": 1606.12, "end": 1606.76, "word": " So", "probability": 0.92333984375}, {"start": 1606.76, "end": 1606.98, "word": " number", "probability": 0.74267578125}, {"start": 1606.98, "end": 1607.16, "word": " of", "probability": 0.96630859375}, {"start": 1607.16, "end": 1607.4, "word": " samples", "probability": 0.875}, {"start": 1607.4, "end": 1607.58, "word": " in", "probability": 0.8642578125}, {"start": 1607.58, "end": 1607.76, "word": " this", "probability": 0.947265625}, {"start": 1607.76, "end": 1608.04, "word": " case", "probability": 0.92041015625}, {"start": 1608.04, "end": 1608.58, "word": " is", "probability": 0.94091796875}, {"start": 1608.58, "end": 1609.96, "word": " 16.", "probability": 0.97314453125}, {"start": 1610.54, "end": 1610.88, "word": " Imagine", "probability": 0.849609375}, {"start": 1610.88, "end": 1611.14, "word": " that", "probability": 0.93505859375}, {"start": 1611.14, "end": 1611.3, "word": " we", "probability": 0.95458984375}, {"start": 1611.3, "end": 1611.54, "word": " have", "probability": 0.94921875}, {"start": 1611.54, "end": 1612.0, "word": " five.", "probability": 0.8662109375}], "temperature": 1.0}, {"id": 61, "seek": 164124, "start": 1614.26, "end": 1641.24, "text": " I mean the population size is 5 and so on. So the rule is number of samples in this case and the volume is million. Because we have four, four squared is sixteen, that's all.", "tokens": [286, 914, 264, 4415, 2744, 307, 1025, 293, 370, 322, 13, 407, 264, 4978, 307, 1230, 295, 10938, 294, 341, 1389, 293, 264, 5523, 307, 2459, 13, 1436, 321, 362, 1451, 11, 1451, 8889, 307, 27847, 11, 300, 311, 439, 13], "avg_logprob": -0.5465029932203747, "compression_ratio": 1.3461538461538463, "no_speech_prob": 0.0, "words": [{"start": 1614.26, "end": 1614.5, "word": " I", "probability": 0.8212890625}, {"start": 1614.5, "end": 1614.62, "word": " mean", "probability": 0.958984375}, {"start": 1614.62, "end": 1614.76, "word": " the", "probability": 0.72705078125}, {"start": 1614.76, "end": 1615.14, "word": " population", "probability": 0.9462890625}, {"start": 1615.14, "end": 1615.46, "word": " size", "probability": 0.81689453125}, {"start": 1615.46, "end": 1615.62, "word": " is", "probability": 0.2607421875}, {"start": 1615.62, "end": 1615.82, "word": " 5", "probability": 0.6025390625}, {"start": 1615.82, "end": 1617.26, "word": " and", "probability": 0.2666015625}, {"start": 1617.26, "end": 1618.62, "word": " so", "probability": 0.83056640625}, {"start": 1618.62, "end": 1618.9, "word": " on.", "probability": 0.93115234375}, {"start": 1619.68, "end": 1619.88, "word": " So", "probability": 0.837890625}, {"start": 1619.88, "end": 1620.22, "word": " the", "probability": 0.7314453125}, {"start": 1620.22, "end": 1620.62, "word": " rule", "probability": 0.67724609375}, {"start": 1620.62, "end": 1621.0, "word": " is", "probability": 0.953125}, {"start": 1621.0, "end": 1626.0, "word": " number", "probability": 0.548828125}, {"start": 1626.0, "end": 1628.1, "word": " of", "probability": 0.96923828125}, {"start": 1628.1, "end": 1628.7, "word": " samples", "probability": 0.8330078125}, {"start": 1628.7, "end": 1630.26, "word": " in", "probability": 0.68505859375}, {"start": 1630.26, "end": 1630.48, "word": " this", "probability": 0.95166015625}, {"start": 1630.48, "end": 1630.84, "word": " case", "probability": 0.91650390625}, {"start": 1630.84, "end": 1631.54, "word": " and", "probability": 0.1317138671875}, {"start": 1631.54, "end": 1631.76, "word": " the", "probability": 0.496826171875}, {"start": 1631.76, "end": 1632.1, "word": " volume", "probability": 0.06903076171875}, {"start": 1632.1, "end": 1632.52, "word": " is", "probability": 0.269287109375}, {"start": 1632.52, "end": 1633.02, "word": " million.", "probability": 0.77685546875}, {"start": 1634.7, "end": 1635.06, "word": " Because", "probability": 0.57763671875}, {"start": 1635.06, "end": 1635.26, "word": " we", "probability": 0.90478515625}, {"start": 1635.26, "end": 1635.6, "word": " have", "probability": 0.93017578125}, {"start": 1635.6, "end": 1636.36, "word": " four,", "probability": 0.42529296875}, {"start": 1636.5, "end": 1637.38, "word": " four", "probability": 0.83837890625}, {"start": 1637.38, "end": 1637.74, "word": " squared", "probability": 0.3466796875}, {"start": 1637.74, "end": 1638.18, "word": " is", "probability": 0.88916015625}, {"start": 1638.18, "end": 1639.14, "word": " sixteen,", "probability": 0.61474609375}, {"start": 1639.44, "end": 1641.0, "word": " that's", "probability": 0.5120849609375}, {"start": 1641.0, "end": 1641.24, "word": " all.", "probability": 0.62353515625}], "temperature": 1.0}, {"id": 62, "seek": 167004, "start": 1642.54, "end": 1670.04, "text": " 5 squared, 25, and so on. Now, we have 16 different samples. For sure, we will have different sample means. Now, for the first sample, 18, 18, the average is also 18. The next one, 18, 20, the average is 19.", "tokens": [1025, 8889, 11, 3552, 11, 293, 370, 322, 13, 823, 11, 321, 362, 3165, 819, 10938, 13, 1171, 988, 11, 321, 486, 362, 819, 6889, 1355, 13, 823, 11, 337, 264, 700, 6889, 11, 2443, 11, 2443, 11, 264, 4274, 307, 611, 2443, 13, 440, 958, 472, 11, 2443, 11, 945, 11, 264, 4274, 307, 1294, 13], "avg_logprob": -0.2064924599795506, "compression_ratio": 1.4964028776978417, "no_speech_prob": 0.0, "words": [{"start": 1642.54, "end": 1643.1, "word": " 5", "probability": 0.453369140625}, {"start": 1643.1, "end": 1643.44, "word": " squared,", "probability": 0.5146484375}, {"start": 1643.58, "end": 1644.04, "word": " 25,", "probability": 0.90869140625}, {"start": 1644.46, "end": 1644.6, "word": " and", "probability": 0.77197265625}, {"start": 1644.6, "end": 1644.8, "word": " so", "probability": 0.95361328125}, {"start": 1644.8, "end": 1645.06, "word": " on.", "probability": 0.94189453125}, {"start": 1646.1, "end": 1646.36, "word": " Now,", "probability": 0.90966796875}, {"start": 1646.44, "end": 1646.66, "word": " we", "probability": 0.92822265625}, {"start": 1646.66, "end": 1646.94, "word": " have", "probability": 0.94580078125}, {"start": 1646.94, "end": 1647.7, "word": " 16", "probability": 0.8662109375}, {"start": 1647.7, "end": 1648.2, "word": " different", "probability": 0.89892578125}, {"start": 1648.2, "end": 1648.7, "word": " samples.", "probability": 0.86328125}, {"start": 1649.46, "end": 1650.1, "word": " For", "probability": 0.927734375}, {"start": 1650.1, "end": 1650.54, "word": " sure,", "probability": 0.91796875}, {"start": 1650.66, "end": 1650.88, "word": " we", "probability": 0.9130859375}, {"start": 1650.88, "end": 1651.14, "word": " will", "probability": 0.638671875}, {"start": 1651.14, "end": 1651.74, "word": " have", "probability": 0.9462890625}, {"start": 1651.74, "end": 1653.68, "word": " different", "probability": 0.88671875}, {"start": 1653.68, "end": 1654.26, "word": " sample", "probability": 0.87744140625}, {"start": 1654.26, "end": 1654.68, "word": " means.", "probability": 0.9287109375}, {"start": 1656.4, "end": 1656.68, "word": " Now,", "probability": 0.9326171875}, {"start": 1656.74, "end": 1656.92, "word": " for", "probability": 0.95068359375}, {"start": 1656.92, "end": 1657.08, "word": " the", "probability": 0.92041015625}, {"start": 1657.08, "end": 1657.4, "word": " first", "probability": 0.830078125}, {"start": 1657.4, "end": 1657.94, "word": " sample,", "probability": 0.892578125}, {"start": 1659.56, "end": 1659.92, "word": " 18,", "probability": 0.94482421875}, {"start": 1660.02, "end": 1660.3, "word": " 18,", "probability": 0.76318359375}, {"start": 1660.42, "end": 1660.56, "word": " the", "probability": 0.89501953125}, {"start": 1660.56, "end": 1660.88, "word": " average", "probability": 0.7978515625}, {"start": 1660.88, "end": 1662.58, "word": " is", "probability": 0.8564453125}, {"start": 1662.58, "end": 1662.94, "word": " also", "probability": 0.87255859375}, {"start": 1662.94, "end": 1663.28, "word": " 18.", "probability": 0.91650390625}, {"start": 1664.98, "end": 1665.66, "word": " The", "probability": 0.875}, {"start": 1665.66, "end": 1665.86, "word": " next", "probability": 0.94921875}, {"start": 1665.86, "end": 1666.14, "word": " one,", "probability": 0.94189453125}, {"start": 1666.8, "end": 1667.2, "word": " 18,", "probability": 0.98681640625}, {"start": 1667.28, "end": 1667.62, "word": " 20,", "probability": 0.93701171875}, {"start": 1668.06, "end": 1669.3, "word": " the", "probability": 0.8876953125}, {"start": 1669.3, "end": 1669.56, "word": " average", "probability": 0.796875}, {"start": 1669.56, "end": 1669.72, "word": " is", "probability": 0.86328125}, {"start": 1669.72, "end": 1670.04, "word": " 19.", "probability": 0.9658203125}], "temperature": 1.0}, {"id": 63, "seek": 170403, "start": 1674.79, "end": 1704.03, "text": " 20, 18, 24, the average is 21, and so on. So now we have 16 sample means. Now this is my new values. It's my sample. This sample has different sample means. Now let's take these values and compute average, sigma, and the shape of the distribution.", "tokens": [945, 11, 2443, 11, 4022, 11, 264, 4274, 307, 5080, 11, 293, 370, 322, 13, 407, 586, 321, 362, 3165, 6889, 1355, 13, 823, 341, 307, 452, 777, 4190, 13, 467, 311, 452, 6889, 13, 639, 6889, 575, 819, 6889, 1355, 13, 823, 718, 311, 747, 613, 4190, 293, 14722, 4274, 11, 12771, 11, 293, 264, 3909, 295, 264, 7316, 13], "avg_logprob": -0.21018145762143597, "compression_ratio": 1.4939759036144578, "no_speech_prob": 0.0, "words": [{"start": 1674.79, "end": 1675.13, "word": " 20,", "probability": 0.1297607421875}, {"start": 1675.63, "end": 1676.13, "word": " 18,", "probability": 0.8681640625}, {"start": 1676.25, "end": 1676.79, "word": " 24,", "probability": 0.90869140625}, {"start": 1676.95, "end": 1677.01, "word": " the", "probability": 0.58740234375}, {"start": 1677.01, "end": 1677.21, "word": " average", "probability": 0.77294921875}, {"start": 1677.21, "end": 1677.35, "word": " is", "probability": 0.908203125}, {"start": 1677.35, "end": 1677.73, "word": " 21,", "probability": 0.95263671875}, {"start": 1677.85, "end": 1677.95, "word": " and", "probability": 0.9326171875}, {"start": 1677.95, "end": 1678.09, "word": " so", "probability": 0.9501953125}, {"start": 1678.09, "end": 1678.29, "word": " on.", "probability": 0.9443359375}, {"start": 1679.17, "end": 1679.57, "word": " So", "probability": 0.9306640625}, {"start": 1679.57, "end": 1679.77, "word": " now", "probability": 0.822265625}, {"start": 1679.77, "end": 1679.95, "word": " we", "probability": 0.84765625}, {"start": 1679.95, "end": 1680.27, "word": " have", "probability": 0.9462890625}, {"start": 1680.27, "end": 1680.99, "word": " 16", "probability": 0.880859375}, {"start": 1680.99, "end": 1681.57, "word": " sample", "probability": 0.8212890625}, {"start": 1681.57, "end": 1681.89, "word": " means.", "probability": 0.8125}, {"start": 1683.49, "end": 1684.29, "word": " Now", "probability": 0.939453125}, {"start": 1684.29, "end": 1684.71, "word": " this", "probability": 0.537109375}, {"start": 1684.71, "end": 1684.87, "word": " is", "probability": 0.94873046875}, {"start": 1684.87, "end": 1685.11, "word": " my", "probability": 0.97265625}, {"start": 1685.11, "end": 1685.45, "word": " new", "probability": 0.91162109375}, {"start": 1685.45, "end": 1686.21, "word": " values.", "probability": 0.79150390625}, {"start": 1686.83, "end": 1687.15, "word": " It's", "probability": 0.778564453125}, {"start": 1687.15, "end": 1687.37, "word": " my", "probability": 0.97119140625}, {"start": 1687.37, "end": 1687.77, "word": " sample.", "probability": 0.72216796875}, {"start": 1688.39, "end": 1688.71, "word": " This", "probability": 0.88623046875}, {"start": 1688.71, "end": 1689.01, "word": " sample", "probability": 0.876953125}, {"start": 1689.01, "end": 1689.47, "word": " has", "probability": 0.93603515625}, {"start": 1689.47, "end": 1690.51, "word": " different", "probability": 0.87841796875}, {"start": 1690.51, "end": 1691.07, "word": " sample", "probability": 0.8427734375}, {"start": 1691.07, "end": 1691.45, "word": " means.", "probability": 0.8662109375}, {"start": 1692.27, "end": 1692.63, "word": " Now", "probability": 0.89306640625}, {"start": 1692.63, "end": 1693.17, "word": " let's", "probability": 0.874267578125}, {"start": 1693.17, "end": 1693.57, "word": " take", "probability": 0.869140625}, {"start": 1693.57, "end": 1694.25, "word": " these", "probability": 0.8173828125}, {"start": 1694.25, "end": 1694.85, "word": " values", "probability": 0.9658203125}, {"start": 1694.85, "end": 1696.05, "word": " and", "probability": 0.87109375}, {"start": 1696.05, "end": 1696.83, "word": " compute", "probability": 0.93408203125}, {"start": 1696.83, "end": 1698.31, "word": " average,", "probability": 0.70068359375}, {"start": 1699.63, "end": 1700.23, "word": " sigma,", "probability": 0.8203125}, {"start": 1700.71, "end": 1701.45, "word": " and", "probability": 0.93896484375}, {"start": 1701.45, "end": 1701.81, "word": " the", "probability": 0.9189453125}, {"start": 1701.81, "end": 1702.51, "word": " shape", "probability": 0.943359375}, {"start": 1702.51, "end": 1702.95, "word": " of", "probability": 0.96728515625}, {"start": 1702.95, "end": 1703.27, "word": " the", "probability": 0.91796875}, {"start": 1703.27, "end": 1704.03, "word": " distribution.", "probability": 0.85009765625}], "temperature": 1.0}, {"id": 64, "seek": 171248, "start": 1706.12, "end": 1712.48, "text": " So again, we have a population of size 4, we select a random cell bone.", "tokens": [407, 797, 11, 321, 362, 257, 4415, 295, 2744, 1017, 11, 321, 3048, 257, 4974, 2815, 9026, 13], "avg_logprob": -0.3511513032411274, "compression_ratio": 0.9863013698630136, "no_speech_prob": 0.0, "words": [{"start": 1706.1200000000001, "end": 1706.74, "word": " So", "probability": 0.270751953125}, {"start": 1706.74, "end": 1707.04, "word": " again,", "probability": 0.7578125}, {"start": 1707.74, "end": 1708.38, "word": " we", "probability": 0.8681640625}, {"start": 1708.38, "end": 1708.58, "word": " have", "probability": 0.93359375}, {"start": 1708.58, "end": 1708.68, "word": " a", "probability": 0.94140625}, {"start": 1708.68, "end": 1709.0, "word": " population", "probability": 0.96826171875}, {"start": 1709.0, "end": 1709.2, "word": " of", "probability": 0.9013671875}, {"start": 1709.2, "end": 1709.42, "word": " size", "probability": 0.8115234375}, {"start": 1709.42, "end": 1709.78, "word": " 4,", "probability": 0.7666015625}, {"start": 1710.78, "end": 1711.02, "word": " we", "probability": 0.77099609375}, {"start": 1711.02, "end": 1711.4, "word": " select", "probability": 0.82373046875}, {"start": 1711.4, "end": 1711.56, "word": " a", "probability": 0.787109375}, {"start": 1711.56, "end": 1711.88, "word": " random", "probability": 0.869140625}, {"start": 1711.88, "end": 1712.24, "word": " cell", "probability": 0.78173828125}, {"start": 1712.24, "end": 1712.48, "word": " bone.", "probability": 0.1722412109375}], "temperature": 1.0}, {"id": 65, "seek": 174268, "start": 1714.3, "end": 1742.68, "text": " of size 2 from that population, we end with 16 random samples, and they have different sample means. Might be two of them are the same. I mean, we have 18 just repeated once, but 19 repeated twice, 23 times, 24 times, and so on. 22 three times, 23 twice, 24 once. So it depends on", "tokens": [295, 2744, 568, 490, 300, 4415, 11, 321, 917, 365, 3165, 4974, 10938, 11, 293, 436, 362, 819, 6889, 1355, 13, 23964, 312, 732, 295, 552, 366, 264, 912, 13, 286, 914, 11, 321, 362, 2443, 445, 10477, 1564, 11, 457, 1294, 10477, 6091, 11, 6673, 1413, 11, 4022, 1413, 11, 293, 370, 322, 13, 5853, 1045, 1413, 11, 6673, 6091, 11, 4022, 1564, 13, 407, 309, 5946, 322], "avg_logprob": -0.24107142984867097, "compression_ratio": 1.53551912568306, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1714.3, "end": 1714.58, "word": " of", "probability": 0.344970703125}, {"start": 1714.58, "end": 1714.94, "word": " size", "probability": 0.78369140625}, {"start": 1714.94, "end": 1715.24, "word": " 2", "probability": 0.62841796875}, {"start": 1715.24, "end": 1715.54, "word": " from", "probability": 0.798828125}, {"start": 1715.54, "end": 1715.74, "word": " that", "probability": 0.86083984375}, {"start": 1715.74, "end": 1716.3, "word": " population,", "probability": 0.935546875}, {"start": 1716.94, "end": 1717.2, "word": " we", "probability": 0.9423828125}, {"start": 1717.2, "end": 1717.54, "word": " end", "probability": 0.91259765625}, {"start": 1717.54, "end": 1717.86, "word": " with", "probability": 0.87060546875}, {"start": 1717.86, "end": 1718.66, "word": " 16", "probability": 0.90673828125}, {"start": 1718.66, "end": 1719.06, "word": " random", "probability": 0.81591796875}, {"start": 1719.06, "end": 1719.52, "word": " samples,", "probability": 0.814453125}, {"start": 1720.8, "end": 1721.08, "word": " and", "probability": 0.931640625}, {"start": 1721.08, "end": 1721.3, "word": " they", "probability": 0.8955078125}, {"start": 1721.3, "end": 1721.56, "word": " have", "probability": 0.94482421875}, {"start": 1721.56, "end": 1722.22, "word": " different", "probability": 0.90087890625}, {"start": 1722.22, "end": 1723.38, "word": " sample", "probability": 0.479248046875}, {"start": 1723.38, "end": 1723.62, "word": " means.", "probability": 0.468994140625}, {"start": 1723.86, "end": 1724.1, "word": " Might", "probability": 0.583984375}, {"start": 1724.1, "end": 1724.32, "word": " be", "probability": 0.955078125}, {"start": 1724.32, "end": 1724.6, "word": " two", "probability": 0.765625}, {"start": 1724.6, "end": 1724.74, "word": " of", "probability": 0.96826171875}, {"start": 1724.74, "end": 1724.92, "word": " them", "probability": 0.89306640625}, {"start": 1724.92, "end": 1725.1, "word": " are", "probability": 0.93359375}, {"start": 1725.1, "end": 1725.32, "word": " the", "probability": 0.89794921875}, {"start": 1725.32, "end": 1725.6, "word": " same.", "probability": 0.91064453125}, {"start": 1725.7, "end": 1725.78, "word": " I", "probability": 0.92041015625}, {"start": 1725.78, "end": 1725.96, "word": " mean,", "probability": 0.9677734375}, {"start": 1726.24, "end": 1726.5, "word": " we", "probability": 0.705078125}, {"start": 1726.5, "end": 1726.7, "word": " have", "probability": 0.92529296875}, {"start": 1726.7, "end": 1727.2, "word": " 18", "probability": 0.9541015625}, {"start": 1727.2, "end": 1728.2, "word": " just", "probability": 0.75732421875}, {"start": 1728.2, "end": 1728.84, "word": " repeated", "probability": 0.95703125}, {"start": 1728.84, "end": 1729.32, "word": " once,", "probability": 0.89599609375}, {"start": 1729.94, "end": 1730.22, "word": " but", "probability": 0.927734375}, {"start": 1730.22, "end": 1730.6, "word": " 19", "probability": 0.8974609375}, {"start": 1730.6, "end": 1730.92, "word": " repeated", "probability": 0.47705078125}, {"start": 1730.92, "end": 1731.4, "word": " twice,", "probability": 0.91796875}, {"start": 1731.9, "end": 1732.22, "word": " 23", "probability": 0.87353515625}, {"start": 1732.22, "end": 1732.92, "word": " times,", "probability": 0.92431640625}, {"start": 1733.46, "end": 1734.32, "word": " 24", "probability": 0.26318359375}, {"start": 1734.32, "end": 1735.14, "word": " times,", "probability": 0.83984375}, {"start": 1735.28, "end": 1735.38, "word": " and", "probability": 0.9287109375}, {"start": 1735.38, "end": 1735.64, "word": " so", "probability": 0.947265625}, {"start": 1735.64, "end": 1735.92, "word": " on.", "probability": 0.94775390625}, {"start": 1736.38, "end": 1736.92, "word": " 22", "probability": 0.7763671875}, {"start": 1736.92, "end": 1737.38, "word": " three", "probability": 0.425048828125}, {"start": 1737.38, "end": 1737.92, "word": " times,", "probability": 0.89208984375}, {"start": 1738.68, "end": 1739.22, "word": " 23", "probability": 0.931640625}, {"start": 1739.22, "end": 1739.98, "word": " twice,", "probability": 0.89013671875}, {"start": 1740.36, "end": 1740.72, "word": " 24", "probability": 0.9501953125}, {"start": 1740.72, "end": 1741.18, "word": " once.", "probability": 0.8740234375}, {"start": 1741.8, "end": 1741.98, "word": " So", "probability": 0.9453125}, {"start": 1741.98, "end": 1742.08, "word": " it", "probability": 0.7255859375}, {"start": 1742.08, "end": 1742.36, "word": " depends", "probability": 0.8994140625}, {"start": 1742.36, "end": 1742.68, "word": " on", "probability": 0.9521484375}], "temperature": 1.0}, {"id": 66, "seek": 176429, "start": 1743.05, "end": 1764.29, "text": " The sample means you have. So we have actually different samples. For example, let's look at 24 and 22. What's the average of these two values? N divided by 2 will give 22.", "tokens": [440, 6889, 1355, 291, 362, 13, 407, 321, 362, 767, 819, 10938, 13, 1171, 1365, 11, 718, 311, 574, 412, 4022, 293, 5853, 13, 708, 311, 264, 4274, 295, 613, 732, 4190, 30, 426, 6666, 538, 568, 486, 976, 5853, 13], "avg_logprob": -0.21819196712403072, "compression_ratio": 1.2627737226277371, "no_speech_prob": 1.7285346984863281e-06, "words": [{"start": 1743.05, "end": 1743.51, "word": " The", "probability": 0.2095947265625}, {"start": 1743.51, "end": 1743.97, "word": " sample", "probability": 0.703125}, {"start": 1743.97, "end": 1744.27, "word": " means", "probability": 0.728515625}, {"start": 1744.27, "end": 1744.45, "word": " you", "probability": 0.83837890625}, {"start": 1744.45, "end": 1744.69, "word": " have.", "probability": 0.92919921875}, {"start": 1744.89, "end": 1745.03, "word": " So", "probability": 0.93408203125}, {"start": 1745.03, "end": 1745.15, "word": " we", "probability": 0.5166015625}, {"start": 1745.15, "end": 1745.35, "word": " have", "probability": 0.92138671875}, {"start": 1745.35, "end": 1745.79, "word": " actually", "probability": 0.849609375}, {"start": 1745.79, "end": 1746.35, "word": " different", "probability": 0.87451171875}, {"start": 1746.35, "end": 1747.21, "word": " samples.", "probability": 0.51025390625}, {"start": 1754.79, "end": 1755.39, "word": " For", "probability": 0.83935546875}, {"start": 1755.39, "end": 1755.75, "word": " example,", "probability": 0.974609375}, {"start": 1756.03, "end": 1756.39, "word": " let's", "probability": 0.963623046875}, {"start": 1756.39, "end": 1756.63, "word": " look", "probability": 0.96533203125}, {"start": 1756.63, "end": 1756.81, "word": " at", "probability": 0.9658203125}, {"start": 1756.81, "end": 1757.25, "word": " 24", "probability": 0.9521484375}, {"start": 1757.25, "end": 1757.45, "word": " and", "probability": 0.7412109375}, {"start": 1757.45, "end": 1757.81, "word": " 22.", "probability": 0.96728515625}, {"start": 1758.55, "end": 1758.85, "word": " What's", "probability": 0.91748046875}, {"start": 1758.85, "end": 1758.97, "word": " the", "probability": 0.92431640625}, {"start": 1758.97, "end": 1759.23, "word": " average", "probability": 0.818359375}, {"start": 1759.23, "end": 1759.37, "word": " of", "probability": 0.951171875}, {"start": 1759.37, "end": 1759.59, "word": " these", "probability": 0.86279296875}, {"start": 1759.59, "end": 1759.79, "word": " two", "probability": 0.90869140625}, {"start": 1759.79, "end": 1760.17, "word": " values?", "probability": 0.9208984375}, {"start": 1761.43, "end": 1761.73, "word": " N", "probability": 0.281494140625}, {"start": 1761.73, "end": 1761.99, "word": " divided", "probability": 0.681640625}, {"start": 1761.99, "end": 1762.19, "word": " by", "probability": 0.9755859375}, {"start": 1762.19, "end": 1762.49, "word": " 2", "probability": 0.7890625}, {"start": 1762.49, "end": 1762.79, "word": " will", "probability": 0.80859375}, {"start": 1762.79, "end": 1763.07, "word": " give", "probability": 0.87109375}, {"start": 1763.07, "end": 1764.29, "word": " 22.", "probability": 0.9052734375}], "temperature": 1.0}, {"id": 67, "seek": 179795, "start": 1773.39, "end": 1797.95, "text": " So again, we have 16 sample means. Now look first at the shape of the distribution. 18, as I mentioned, repeated once. So 1 over 16. 19 twice. 23 times. 1 four times. 22 three times.", "tokens": [407, 797, 11, 321, 362, 3165, 6889, 1355, 13, 823, 574, 700, 412, 264, 3909, 295, 264, 7316, 13, 2443, 11, 382, 286, 2835, 11, 10477, 1564, 13, 407, 502, 670, 3165, 13, 1294, 6091, 13, 6673, 1413, 13, 502, 1451, 1413, 13, 5853, 1045, 1413, 13], "avg_logprob": -0.20084635758151612, "compression_ratio": 1.326086956521739, "no_speech_prob": 0.0, "words": [{"start": 1773.39, "end": 1773.65, "word": " So", "probability": 0.8369140625}, {"start": 1773.65, "end": 1773.95, "word": " again,", "probability": 0.81298828125}, {"start": 1774.21, "end": 1774.39, "word": " we", "probability": 0.94921875}, {"start": 1774.39, "end": 1774.53, "word": " have", "probability": 0.943359375}, {"start": 1774.53, "end": 1775.01, "word": " 16", "probability": 0.78173828125}, {"start": 1775.01, "end": 1775.41, "word": " sample", "probability": 0.73779296875}, {"start": 1775.41, "end": 1775.73, "word": " means.", "probability": 0.8115234375}, {"start": 1778.61, "end": 1779.11, "word": " Now", "probability": 0.92333984375}, {"start": 1779.11, "end": 1779.41, "word": " look", "probability": 0.724609375}, {"start": 1779.41, "end": 1779.81, "word": " first", "probability": 0.8427734375}, {"start": 1779.81, "end": 1780.13, "word": " at", "probability": 0.93896484375}, {"start": 1780.13, "end": 1780.33, "word": " the", "probability": 0.91650390625}, {"start": 1780.33, "end": 1780.65, "word": " shape", "probability": 0.93505859375}, {"start": 1780.65, "end": 1780.93, "word": " of", "probability": 0.96240234375}, {"start": 1780.93, "end": 1781.05, "word": " the", "probability": 0.6376953125}, {"start": 1781.05, "end": 1781.55, "word": " distribution.", "probability": 0.861328125}, {"start": 1783.11, "end": 1783.53, "word": " 18,", "probability": 0.81298828125}, {"start": 1783.65, "end": 1783.89, "word": " as", "probability": 0.958984375}, {"start": 1783.89, "end": 1784.01, "word": " I", "probability": 0.9892578125}, {"start": 1784.01, "end": 1784.33, "word": " mentioned,", "probability": 0.8193359375}, {"start": 1784.47, "end": 1784.73, "word": " repeated", "probability": 0.95654296875}, {"start": 1784.73, "end": 1785.41, "word": " once.", "probability": 0.90283203125}, {"start": 1786.33, "end": 1786.63, "word": " So", "probability": 0.93603515625}, {"start": 1786.63, "end": 1786.83, "word": " 1", "probability": 0.634765625}, {"start": 1786.83, "end": 1786.99, "word": " over", "probability": 0.65380859375}, {"start": 1786.99, "end": 1787.49, "word": " 16.", "probability": 0.96826171875}, {"start": 1788.43, "end": 1788.89, "word": " 19", "probability": 0.94677734375}, {"start": 1788.89, "end": 1789.37, "word": " twice.", "probability": 0.791015625}, {"start": 1791.23, "end": 1791.73, "word": " 23", "probability": 0.92919921875}, {"start": 1791.73, "end": 1792.47, "word": " times.", "probability": 0.9189453125}, {"start": 1793.91, "end": 1794.65, "word": " 1", "probability": 0.45263671875}, {"start": 1794.65, "end": 1794.97, "word": " four", "probability": 0.1806640625}, {"start": 1794.97, "end": 1795.47, "word": " times.", "probability": 0.859375}, {"start": 1796.47, "end": 1796.99, "word": " 22", "probability": 0.94970703125}, {"start": 1796.99, "end": 1797.49, "word": " three", "probability": 0.767578125}, {"start": 1797.49, "end": 1797.95, "word": " times.", "probability": 0.92236328125}], "temperature": 1.0}, {"id": 68, "seek": 182350, "start": 1798.94, "end": 1823.5, "text": " then twice then once now the distribution was uniform remember now it becomes normal distribution so the first one x1 is normal distribution so it has normal distribution so again the shape of x1 looks like normal distribution we need to compute", "tokens": [550, 6091, 550, 1564, 586, 264, 7316, 390, 9452, 1604, 586, 309, 3643, 2710, 7316, 370, 264, 700, 472, 2031, 16, 307, 2710, 7316, 370, 309, 575, 2710, 7316, 370, 797, 264, 3909, 295, 2031, 16, 1542, 411, 2710, 7316, 321, 643, 281, 14722], "avg_logprob": -0.20954860978656345, "compression_ratio": 1.9523809523809523, "no_speech_prob": 0.0, "words": [{"start": 1798.94, "end": 1799.24, "word": " then", "probability": 0.28662109375}, {"start": 1799.24, "end": 1799.6, "word": " twice", "probability": 0.90869140625}, {"start": 1799.6, "end": 1799.94, "word": " then", "probability": 0.27685546875}, {"start": 1799.94, "end": 1800.9, "word": " once", "probability": 0.8955078125}, {"start": 1800.9, "end": 1801.9, "word": " now", "probability": 0.3388671875}, {"start": 1801.9, "end": 1802.08, "word": " the", "probability": 0.8701171875}, {"start": 1802.08, "end": 1802.62, "word": " distribution", "probability": 0.87255859375}, {"start": 1802.62, "end": 1803.34, "word": " was", "probability": 0.947265625}, {"start": 1803.34, "end": 1803.92, "word": " uniform", "probability": 0.9521484375}, {"start": 1803.92, "end": 1804.72, "word": " remember", "probability": 0.6640625}, {"start": 1804.72, "end": 1806.14, "word": " now", "probability": 0.8349609375}, {"start": 1806.14, "end": 1806.32, "word": " it", "probability": 0.94921875}, {"start": 1806.32, "end": 1806.78, "word": " becomes", "probability": 0.8623046875}, {"start": 1806.78, "end": 1807.96, "word": " normal", "probability": 0.84375}, {"start": 1807.96, "end": 1808.5, "word": " distribution", "probability": 0.8828125}, {"start": 1808.5, "end": 1809.14, "word": " so", "probability": 0.7451171875}, {"start": 1809.14, "end": 1809.26, "word": " the", "probability": 0.90869140625}, {"start": 1809.26, "end": 1809.52, "word": " first", "probability": 0.88916015625}, {"start": 1809.52, "end": 1809.8, "word": " one", "probability": 0.92333984375}, {"start": 1809.8, "end": 1810.26, "word": " x1", "probability": 0.76318359375}, {"start": 1810.26, "end": 1810.42, "word": " is", "probability": 0.93359375}, {"start": 1810.42, "end": 1810.78, "word": " normal", "probability": 0.84228515625}, {"start": 1810.78, "end": 1811.34, "word": " distribution", "probability": 0.88818359375}, {"start": 1811.34, "end": 1812.1, "word": " so", "probability": 0.90625}, {"start": 1812.1, "end": 1812.2, "word": " it", "probability": 0.71142578125}, {"start": 1812.2, "end": 1812.36, "word": " has", "probability": 0.947265625}, {"start": 1812.36, "end": 1812.82, "word": " normal", "probability": 0.88330078125}, {"start": 1812.82, "end": 1814.42, "word": " distribution", "probability": 0.89111328125}, {"start": 1814.42, "end": 1816.34, "word": " so", "probability": 0.79345703125}, {"start": 1816.34, "end": 1816.68, "word": " again", "probability": 0.95458984375}, {"start": 1816.68, "end": 1817.1, "word": " the", "probability": 0.91162109375}, {"start": 1817.1, "end": 1817.86, "word": " shape", "probability": 0.9130859375}, {"start": 1817.86, "end": 1818.1, "word": " of", "probability": 0.96630859375}, {"start": 1818.1, "end": 1818.66, "word": " x1", "probability": 0.924072265625}, {"start": 1818.66, "end": 1819.4, "word": " looks", "probability": 0.73486328125}, {"start": 1819.4, "end": 1819.68, "word": " like", "probability": 0.9375}, {"start": 1819.68, "end": 1820.04, "word": " normal", "probability": 0.8564453125}, {"start": 1820.04, "end": 1820.72, "word": " distribution", "probability": 0.8642578125}, {"start": 1820.72, "end": 1822.78, "word": " we", "probability": 0.7763671875}, {"start": 1822.78, "end": 1823.02, "word": " need", "probability": 0.88330078125}, {"start": 1823.02, "end": 1823.12, "word": " to", "probability": 0.95166015625}, {"start": 1823.12, "end": 1823.5, "word": " compute", "probability": 0.94189453125}], "temperature": 1.0}, {"id": 69, "seek": 185428, "start": 1825.54, "end": 1854.28, "text": " the center of X bar, the mean of X bar. We have to add the values of X bar, the sample mean, then divide by the total number of size, which is 16. So in this case, we got 21, which is similar to the one for the entire population.", "tokens": [264, 3056, 295, 1783, 2159, 11, 264, 914, 295, 1783, 2159, 13, 492, 362, 281, 909, 264, 4190, 295, 1783, 2159, 11, 264, 6889, 914, 11, 550, 9845, 538, 264, 3217, 1230, 295, 2744, 11, 597, 307, 3165, 13, 407, 294, 341, 1389, 11, 321, 658, 5080, 11, 597, 307, 2531, 281, 264, 472, 337, 264, 2302, 4415, 13], "avg_logprob": -0.20598957762122155, "compression_ratio": 1.5231788079470199, "no_speech_prob": 0.0, "words": [{"start": 1825.54, "end": 1825.9, "word": " the", "probability": 0.30712890625}, {"start": 1825.9, "end": 1826.32, "word": " center", "probability": 0.712890625}, {"start": 1826.32, "end": 1826.64, "word": " of", "probability": 0.94677734375}, {"start": 1826.64, "end": 1826.8, "word": " X", "probability": 0.650390625}, {"start": 1826.8, "end": 1827.12, "word": " bar,", "probability": 0.85791015625}, {"start": 1827.92, "end": 1828.14, "word": " the", "probability": 0.89111328125}, {"start": 1828.14, "end": 1828.26, "word": " mean", "probability": 0.96484375}, {"start": 1828.26, "end": 1828.36, "word": " of", "probability": 0.96533203125}, {"start": 1828.36, "end": 1828.54, "word": " X", "probability": 0.98193359375}, {"start": 1828.54, "end": 1828.82, "word": " bar.", "probability": 0.9541015625}, {"start": 1829.52, "end": 1830.1, "word": " We", "probability": 0.94921875}, {"start": 1830.1, "end": 1830.32, "word": " have", "probability": 0.93701171875}, {"start": 1830.32, "end": 1830.48, "word": " to", "probability": 0.9677734375}, {"start": 1830.48, "end": 1831.28, "word": " add", "probability": 0.75732421875}, {"start": 1831.28, "end": 1832.48, "word": " the", "probability": 0.88671875}, {"start": 1832.48, "end": 1832.8, "word": " values", "probability": 0.5849609375}, {"start": 1832.8, "end": 1832.96, "word": " of", "probability": 0.900390625}, {"start": 1832.96, "end": 1833.12, "word": " X", "probability": 0.96826171875}, {"start": 1833.12, "end": 1833.44, "word": " bar,", "probability": 0.93505859375}, {"start": 1833.58, "end": 1833.72, "word": " the", "probability": 0.6435546875}, {"start": 1833.72, "end": 1833.94, "word": " sample", "probability": 0.446044921875}, {"start": 1833.94, "end": 1834.2, "word": " mean,", "probability": 0.89208984375}, {"start": 1834.28, "end": 1834.38, "word": " then", "probability": 0.27099609375}, {"start": 1834.38, "end": 1834.7, "word": " divide", "probability": 0.84375}, {"start": 1834.7, "end": 1835.08, "word": " by", "probability": 0.92236328125}, {"start": 1835.08, "end": 1836.38, "word": " the", "probability": 0.9072265625}, {"start": 1836.38, "end": 1836.76, "word": " total", "probability": 0.85302734375}, {"start": 1836.76, "end": 1837.26, "word": " number", "probability": 0.92578125}, {"start": 1837.26, "end": 1837.88, "word": " of", "probability": 0.962890625}, {"start": 1837.88, "end": 1840.12, "word": " size,", "probability": 0.7314453125}, {"start": 1840.2, "end": 1840.44, "word": " which", "probability": 0.9169921875}, {"start": 1840.44, "end": 1840.58, "word": " is", "probability": 0.93701171875}, {"start": 1840.58, "end": 1841.16, "word": " 16.", "probability": 0.9111328125}, {"start": 1842.2, "end": 1842.52, "word": " So", "probability": 0.9599609375}, {"start": 1842.52, "end": 1842.64, "word": " in", "probability": 0.415771484375}, {"start": 1842.64, "end": 1842.8, "word": " this", "probability": 0.94677734375}, {"start": 1842.8, "end": 1843.16, "word": " case,", "probability": 0.9169921875}, {"start": 1844.52, "end": 1846.26, "word": " we", "probability": 0.935546875}, {"start": 1846.26, "end": 1846.86, "word": " got", "probability": 0.7744140625}, {"start": 1846.86, "end": 1847.44, "word": " 21,", "probability": 0.95361328125}, {"start": 1849.32, "end": 1849.62, "word": " which", "probability": 0.9453125}, {"start": 1849.62, "end": 1849.8, "word": " is", "probability": 0.94384765625}, {"start": 1849.8, "end": 1850.16, "word": " similar", "probability": 0.96435546875}, {"start": 1850.16, "end": 1850.4, "word": " to", "probability": 0.96923828125}, {"start": 1850.4, "end": 1850.56, "word": " the", "probability": 0.9072265625}, {"start": 1850.56, "end": 1850.82, "word": " one", "probability": 0.9169921875}, {"start": 1850.82, "end": 1851.72, "word": " for", "probability": 0.9150390625}, {"start": 1851.72, "end": 1853.46, "word": " the", "probability": 0.92236328125}, {"start": 1853.46, "end": 1853.8, "word": " entire", "probability": 0.88623046875}, {"start": 1853.8, "end": 1854.28, "word": " population.", "probability": 0.94873046875}], "temperature": 1.0}, {"id": 70, "seek": 188387, "start": 1854.75, "end": 1883.87, "text": " So this is the first unknown parameter. The mu of x bar is the same as the population mean mu. The second one, the split sigma of x bar by using the same equation we have, sum of x bar in this case minus the mean of x bar squared, then divide this quantity by the capital I which is 16 in this case.", "tokens": [407, 341, 307, 264, 700, 9841, 13075, 13, 440, 2992, 295, 2031, 2159, 307, 264, 912, 382, 264, 4415, 914, 2992, 13, 440, 1150, 472, 11, 264, 7472, 12771, 295, 2031, 2159, 538, 1228, 264, 912, 5367, 321, 362, 11, 2408, 295, 2031, 2159, 294, 341, 1389, 3175, 264, 914, 295, 2031, 2159, 8889, 11, 550, 9845, 341, 11275, 538, 264, 4238, 286, 597, 307, 3165, 294, 341, 1389, 13], "avg_logprob": -0.23195423080887592, "compression_ratio": 1.694915254237288, "no_speech_prob": 0.0, "words": [{"start": 1854.75, "end": 1855.09, "word": " So", "probability": 0.71435546875}, {"start": 1855.09, "end": 1855.37, "word": " this", "probability": 0.7265625}, {"start": 1855.37, "end": 1855.51, "word": " is", "probability": 0.9345703125}, {"start": 1855.51, "end": 1855.63, "word": " the", "probability": 0.90087890625}, {"start": 1855.63, "end": 1855.95, "word": " first", "probability": 0.87353515625}, {"start": 1855.95, "end": 1856.39, "word": " unknown", "probability": 0.87353515625}, {"start": 1856.39, "end": 1856.81, "word": " parameter.", "probability": 0.96923828125}, {"start": 1857.81, "end": 1857.99, "word": " The", "probability": 0.259033203125}, {"start": 1857.99, "end": 1858.11, "word": " mu", "probability": 0.43310546875}, {"start": 1858.11, "end": 1858.25, "word": " of", "probability": 0.86572265625}, {"start": 1858.25, "end": 1858.41, "word": " x", "probability": 0.703125}, {"start": 1858.41, "end": 1858.71, "word": " bar", "probability": 0.78466796875}, {"start": 1858.71, "end": 1859.11, "word": " is", "probability": 0.93505859375}, {"start": 1859.11, "end": 1859.23, "word": " the", "probability": 0.91064453125}, {"start": 1859.23, "end": 1859.49, "word": " same", "probability": 0.90771484375}, {"start": 1859.49, "end": 1859.93, "word": " as", "probability": 0.96142578125}, {"start": 1859.93, "end": 1860.77, "word": " the", "probability": 0.89697265625}, {"start": 1860.77, "end": 1861.15, "word": " population", "probability": 0.87255859375}, {"start": 1861.15, "end": 1861.57, "word": " mean", "probability": 0.951171875}, {"start": 1861.57, "end": 1862.25, "word": " mu.", "probability": 0.390625}, {"start": 1862.89, "end": 1863.05, "word": " The", "probability": 0.7900390625}, {"start": 1863.05, "end": 1863.29, "word": " second", "probability": 0.912109375}, {"start": 1863.29, "end": 1863.65, "word": " one,", "probability": 0.91748046875}, {"start": 1864.81, "end": 1865.13, "word": " the", "probability": 0.6494140625}, {"start": 1865.13, "end": 1865.49, "word": " split", "probability": 0.38232421875}, {"start": 1865.49, "end": 1867.61, "word": " sigma", "probability": 0.46875}, {"start": 1867.61, "end": 1867.83, "word": " of", "probability": 0.89404296875}, {"start": 1867.83, "end": 1868.01, "word": " x", "probability": 0.9755859375}, {"start": 1868.01, "end": 1868.31, "word": " bar", "probability": 0.9384765625}, {"start": 1868.31, "end": 1869.55, "word": " by", "probability": 0.8291015625}, {"start": 1869.55, "end": 1869.81, "word": " using", "probability": 0.93408203125}, {"start": 1869.81, "end": 1870.01, "word": " the", "probability": 0.9130859375}, {"start": 1870.01, "end": 1870.41, "word": " same", "probability": 0.908203125}, {"start": 1870.41, "end": 1873.45, "word": " equation", "probability": 0.95751953125}, {"start": 1873.45, "end": 1873.65, "word": " we", "probability": 0.6513671875}, {"start": 1873.65, "end": 1873.97, "word": " have,", "probability": 0.888671875}, {"start": 1874.45, "end": 1874.63, "word": " sum", "probability": 0.7275390625}, {"start": 1874.63, "end": 1874.81, "word": " of", "probability": 0.96533203125}, {"start": 1874.81, "end": 1875.07, "word": " x", "probability": 0.98095703125}, {"start": 1875.07, "end": 1875.63, "word": " bar", "probability": 0.95068359375}, {"start": 1875.63, "end": 1875.95, "word": " in", "probability": 0.71142578125}, {"start": 1875.95, "end": 1876.13, "word": " this", "probability": 0.951171875}, {"start": 1876.13, "end": 1876.43, "word": " case", "probability": 0.904296875}, {"start": 1876.43, "end": 1876.79, "word": " minus", "probability": 0.88720703125}, {"start": 1876.79, "end": 1877.05, "word": " the", "probability": 0.919921875}, {"start": 1877.05, "end": 1877.17, "word": " mean", "probability": 0.9755859375}, {"start": 1877.17, "end": 1877.27, "word": " of", "probability": 0.958984375}, {"start": 1877.27, "end": 1877.49, "word": " x", "probability": 0.99365234375}, {"start": 1877.49, "end": 1877.77, "word": " bar", "probability": 0.94287109375}, {"start": 1877.77, "end": 1878.71, "word": " squared,", "probability": 0.7763671875}, {"start": 1879.17, "end": 1879.47, "word": " then", "probability": 0.826171875}, {"start": 1879.47, "end": 1879.83, "word": " divide", "probability": 0.7783203125}, {"start": 1879.83, "end": 1880.03, "word": " this", "probability": 0.92041015625}, {"start": 1880.03, "end": 1880.45, "word": " quantity", "probability": 0.98583984375}, {"start": 1880.45, "end": 1880.85, "word": " by", "probability": 0.96484375}, {"start": 1880.85, "end": 1881.43, "word": " the", "probability": 0.88720703125}, {"start": 1881.43, "end": 1882.33, "word": " capital", "probability": 0.91943359375}, {"start": 1882.33, "end": 1882.53, "word": " I", "probability": 0.65185546875}, {"start": 1882.53, "end": 1882.69, "word": " which", "probability": 0.68017578125}, {"start": 1882.69, "end": 1882.85, "word": " is", "probability": 0.94970703125}, {"start": 1882.85, "end": 1883.25, "word": " 16", "probability": 0.765625}, {"start": 1883.25, "end": 1883.43, "word": " in", "probability": 0.9052734375}, {"start": 1883.43, "end": 1883.59, "word": " this", "probability": 0.94775390625}, {"start": 1883.59, "end": 1883.87, "word": " case.", "probability": 0.919921875}], "temperature": 1.0}, {"id": 71, "seek": 191105, "start": 1885.17, "end": 1911.05, "text": " So we will end with 1.58. Now let's compare population standard deviation and the sample standard deviation. First of all, you see that these two values are not the same. The population standard deviation was 2.2, around 2.2.", "tokens": [407, 321, 486, 917, 365, 502, 13, 20419, 13, 823, 718, 311, 6794, 4415, 3832, 25163, 293, 264, 6889, 3832, 25163, 13, 2386, 295, 439, 11, 291, 536, 300, 613, 732, 4190, 366, 406, 264, 912, 13, 440, 4415, 3832, 25163, 390, 568, 13, 17, 11, 926, 568, 13, 17, 13], "avg_logprob": -0.12575119762466505, "compression_ratio": 1.5804195804195804, "no_speech_prob": 0.0, "words": [{"start": 1885.17, "end": 1885.55, "word": " So", "probability": 0.73828125}, {"start": 1885.55, "end": 1885.81, "word": " we", "probability": 0.7099609375}, {"start": 1885.81, "end": 1886.01, "word": " will", "probability": 0.88623046875}, {"start": 1886.01, "end": 1886.27, "word": " end", "probability": 0.8984375}, {"start": 1886.27, "end": 1886.63, "word": " with", "probability": 0.90087890625}, {"start": 1886.63, "end": 1887.73, "word": " 1", "probability": 0.95458984375}, {"start": 1887.73, "end": 1888.51, "word": ".58.", "probability": 0.97412109375}, {"start": 1891.27, "end": 1891.59, "word": " Now", "probability": 0.9208984375}, {"start": 1891.59, "end": 1891.89, "word": " let's", "probability": 0.834228515625}, {"start": 1891.89, "end": 1892.41, "word": " compare", "probability": 0.9560546875}, {"start": 1892.41, "end": 1895.39, "word": " population", "probability": 0.84326171875}, {"start": 1895.39, "end": 1895.77, "word": " standard", "probability": 0.92626953125}, {"start": 1895.77, "end": 1896.17, "word": " deviation", "probability": 0.9052734375}, {"start": 1896.17, "end": 1897.73, "word": " and", "probability": 0.8466796875}, {"start": 1897.73, "end": 1897.99, "word": " the", "probability": 0.74755859375}, {"start": 1897.99, "end": 1898.29, "word": " sample", "probability": 0.916015625}, {"start": 1898.29, "end": 1898.95, "word": " standard", "probability": 0.947265625}, {"start": 1898.95, "end": 1899.97, "word": " deviation.", "probability": 0.8974609375}, {"start": 1901.23, "end": 1901.87, "word": " First", "probability": 0.900390625}, {"start": 1901.87, "end": 1902.05, "word": " of", "probability": 0.96728515625}, {"start": 1902.05, "end": 1902.21, "word": " all,", "probability": 0.94970703125}, {"start": 1902.25, "end": 1902.31, "word": " you", "probability": 0.93896484375}, {"start": 1902.31, "end": 1902.57, "word": " see", "probability": 0.438720703125}, {"start": 1902.57, "end": 1902.91, "word": " that", "probability": 0.89990234375}, {"start": 1902.91, "end": 1903.79, "word": " these", "probability": 0.84423828125}, {"start": 1903.79, "end": 1903.99, "word": " two", "probability": 0.90625}, {"start": 1903.99, "end": 1904.33, "word": " values", "probability": 0.9013671875}, {"start": 1904.33, "end": 1904.51, "word": " are", "probability": 0.94189453125}, {"start": 1904.51, "end": 1904.65, "word": " not", "probability": 0.94189453125}, {"start": 1904.65, "end": 1904.81, "word": " the", "probability": 0.91796875}, {"start": 1904.81, "end": 1905.05, "word": " same.", "probability": 0.908203125}, {"start": 1907.53, "end": 1907.99, "word": " The", "probability": 0.8837890625}, {"start": 1907.99, "end": 1908.51, "word": " population", "probability": 0.94482421875}, {"start": 1908.51, "end": 1908.87, "word": " standard", "probability": 0.94091796875}, {"start": 1908.87, "end": 1909.23, "word": " deviation", "probability": 0.888671875}, {"start": 1909.23, "end": 1909.55, "word": " was", "probability": 0.9482421875}, {"start": 1909.55, "end": 1909.75, "word": " 2", "probability": 0.79736328125}, {"start": 1909.75, "end": 1910.13, "word": ".2,", "probability": 0.994873046875}, {"start": 1910.19, "end": 1910.37, "word": " around", "probability": 0.9267578125}, {"start": 1910.37, "end": 1910.61, "word": " 2", "probability": 0.99462890625}, {"start": 1910.61, "end": 1911.05, "word": ".2.", "probability": 0.99853515625}], "temperature": 1.0}, {"id": 72, "seek": 193577, "start": 1913.05, "end": 1935.77, "text": " But for the sample, for the sample mean, it's 1.58, so that means sigma of X bar is smaller than sigma of X. It means exactly, the variation of X bar is always smaller than the variation of X, always.", "tokens": [583, 337, 264, 6889, 11, 337, 264, 6889, 914, 11, 309, 311, 502, 13, 20419, 11, 370, 300, 1355, 12771, 295, 1783, 2159, 307, 4356, 813, 12771, 295, 1783, 13, 467, 1355, 2293, 11, 264, 12990, 295, 1783, 2159, 307, 1009, 4356, 813, 264, 12990, 295, 1783, 11, 1009, 13], "avg_logprob": -0.18841912056885513, "compression_ratio": 1.6611570247933884, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1913.05, "end": 1913.37, "word": " But", "probability": 0.578125}, {"start": 1913.37, "end": 1913.57, "word": " for", "probability": 0.84912109375}, {"start": 1913.57, "end": 1913.79, "word": " the", "probability": 0.9013671875}, {"start": 1913.79, "end": 1914.11, "word": " sample,", "probability": 0.66552734375}, {"start": 1915.55, "end": 1915.81, "word": " for", "probability": 0.69775390625}, {"start": 1915.81, "end": 1915.97, "word": " the", "probability": 0.91650390625}, {"start": 1915.97, "end": 1916.37, "word": " sample", "probability": 0.86669921875}, {"start": 1916.37, "end": 1916.75, "word": " mean,", "probability": 0.7490234375}, {"start": 1916.93, "end": 1917.31, "word": " it's", "probability": 0.914794921875}, {"start": 1917.31, "end": 1917.57, "word": " 1", "probability": 0.90283203125}, {"start": 1917.57, "end": 1918.55, "word": ".58,", "probability": 0.973876953125}, {"start": 1919.33, "end": 1919.57, "word": " so", "probability": 0.92333984375}, {"start": 1919.57, "end": 1919.79, "word": " that", "probability": 0.93310546875}, {"start": 1919.79, "end": 1920.19, "word": " means", "probability": 0.93603515625}, {"start": 1920.19, "end": 1920.97, "word": " sigma", "probability": 0.5859375}, {"start": 1920.97, "end": 1921.13, "word": " of", "probability": 0.78662109375}, {"start": 1921.13, "end": 1921.29, "word": " X", "probability": 0.482421875}, {"start": 1921.29, "end": 1921.59, "word": " bar", "probability": 0.90087890625}, {"start": 1921.59, "end": 1921.89, "word": " is", "probability": 0.94970703125}, {"start": 1921.89, "end": 1922.25, "word": " smaller", "probability": 0.85693359375}, {"start": 1922.25, "end": 1922.69, "word": " than", "probability": 0.94775390625}, {"start": 1922.69, "end": 1923.29, "word": " sigma", "probability": 0.9208984375}, {"start": 1923.29, "end": 1923.45, "word": " of", "probability": 0.94384765625}, {"start": 1923.45, "end": 1923.71, "word": " X.", "probability": 0.990234375}, {"start": 1927.27, "end": 1927.71, "word": " It", "probability": 0.76318359375}, {"start": 1927.71, "end": 1927.99, "word": " means", "probability": 0.91357421875}, {"start": 1927.99, "end": 1928.37, "word": " exactly,", "probability": 0.87451171875}, {"start": 1928.47, "end": 1928.55, "word": " the", "probability": 0.9091796875}, {"start": 1928.55, "end": 1928.99, "word": " variation", "probability": 0.884765625}, {"start": 1928.99, "end": 1930.37, "word": " of", "probability": 0.96142578125}, {"start": 1930.37, "end": 1930.67, "word": " X", "probability": 0.98876953125}, {"start": 1930.67, "end": 1931.05, "word": " bar", "probability": 0.951171875}, {"start": 1931.05, "end": 1931.43, "word": " is", "probability": 0.9453125}, {"start": 1931.43, "end": 1932.01, "word": " always", "probability": 0.91162109375}, {"start": 1932.01, "end": 1933.05, "word": " smaller", "probability": 0.8486328125}, {"start": 1933.05, "end": 1933.31, "word": " than", "probability": 0.9443359375}, {"start": 1933.31, "end": 1933.47, "word": " the", "probability": 0.9033203125}, {"start": 1933.47, "end": 1933.75, "word": " variation", "probability": 0.82080078125}, {"start": 1933.75, "end": 1933.95, "word": " of", "probability": 0.95947265625}, {"start": 1933.95, "end": 1934.23, "word": " X,", "probability": 0.9921875}, {"start": 1934.51, "end": 1935.77, "word": " always.", "probability": 0.9091796875}], "temperature": 1.0}, {"id": 73, "seek": 196806, "start": 1940.42, "end": 1968.06, "text": " So here is the comparison. The distribution was uniform. It's no longer uniform. It looks like a bell shape. The mean of X is 21, which is the same as the mean of X bar. But the standard deviation of the population is larger than the standard deviation of the sample mean or the average.", "tokens": [407, 510, 307, 264, 9660, 13, 440, 7316, 390, 9452, 13, 467, 311, 572, 2854, 9452, 13, 467, 1542, 411, 257, 4549, 3909, 13, 440, 914, 295, 1783, 307, 5080, 11, 597, 307, 264, 912, 382, 264, 914, 295, 1783, 2159, 13, 583, 264, 3832, 25163, 295, 264, 4415, 307, 4833, 813, 264, 3832, 25163, 295, 264, 6889, 914, 420, 264, 4274, 13], "avg_logprob": -0.1955566427204758, "compression_ratio": 1.6842105263157894, "no_speech_prob": 0.0, "words": [{"start": 1940.4199999999998, "end": 1941.08, "word": " So", "probability": 0.849609375}, {"start": 1941.08, "end": 1941.74, "word": " here", "probability": 0.57861328125}, {"start": 1941.74, "end": 1942.1, "word": " is", "probability": 0.72265625}, {"start": 1942.1, "end": 1942.32, "word": " the", "probability": 0.8857421875}, {"start": 1942.32, "end": 1942.72, "word": " comparison.", "probability": 0.86083984375}, {"start": 1945.0, "end": 1945.66, "word": " The", "probability": 0.8583984375}, {"start": 1945.66, "end": 1946.16, "word": " distribution", "probability": 0.8798828125}, {"start": 1946.16, "end": 1946.48, "word": " was", "probability": 0.95166015625}, {"start": 1946.48, "end": 1946.8, "word": " uniform.", "probability": 0.9609375}, {"start": 1949.2, "end": 1949.86, "word": " It's", "probability": 0.8525390625}, {"start": 1949.86, "end": 1950.14, "word": " no", "probability": 0.9296875}, {"start": 1950.14, "end": 1950.48, "word": " longer", "probability": 0.927734375}, {"start": 1950.48, "end": 1950.84, "word": " uniform.", "probability": 0.9658203125}, {"start": 1951.06, "end": 1951.18, "word": " It", "probability": 0.9462890625}, {"start": 1951.18, "end": 1951.54, "word": " looks", "probability": 0.6806640625}, {"start": 1951.54, "end": 1951.9, "word": " like", "probability": 0.93505859375}, {"start": 1951.9, "end": 1952.0, "word": " a", "probability": 0.310791015625}, {"start": 1952.0, "end": 1952.12, "word": " bell", "probability": 0.2322998046875}, {"start": 1952.12, "end": 1952.42, "word": " shape.", "probability": 0.75390625}, {"start": 1953.64, "end": 1954.3, "word": " The", "probability": 0.892578125}, {"start": 1954.3, "end": 1954.56, "word": " mean", "probability": 0.97509765625}, {"start": 1954.56, "end": 1954.9, "word": " of", "probability": 0.9658203125}, {"start": 1954.9, "end": 1955.22, "word": " X", "probability": 0.60693359375}, {"start": 1955.22, "end": 1955.38, "word": " is", "probability": 0.475830078125}, {"start": 1955.38, "end": 1955.78, "word": " 21,", "probability": 0.91357421875}, {"start": 1955.98, "end": 1956.06, "word": " which", "probability": 0.5634765625}, {"start": 1956.06, "end": 1956.08, "word": " is", "probability": 0.94580078125}, {"start": 1956.08, "end": 1956.24, "word": " the", "probability": 0.908203125}, {"start": 1956.24, "end": 1956.44, "word": " same", "probability": 0.89697265625}, {"start": 1956.44, "end": 1956.64, "word": " as", "probability": 0.96044921875}, {"start": 1956.64, "end": 1956.82, "word": " the", "probability": 0.9189453125}, {"start": 1956.82, "end": 1957.04, "word": " mean", "probability": 0.95849609375}, {"start": 1957.04, "end": 1957.46, "word": " of", "probability": 0.96630859375}, {"start": 1957.46, "end": 1957.74, "word": " X", "probability": 0.95458984375}, {"start": 1957.74, "end": 1958.02, "word": " bar.", "probability": 0.92431640625}, {"start": 1959.02, "end": 1959.5, "word": " But", "probability": 0.9384765625}, {"start": 1959.5, "end": 1959.76, "word": " the", "probability": 0.890625}, {"start": 1959.76, "end": 1960.0, "word": " standard", "probability": 0.9189453125}, {"start": 1960.0, "end": 1960.44, "word": " deviation", "probability": 0.91162109375}, {"start": 1960.44, "end": 1960.92, "word": " of", "probability": 0.96142578125}, {"start": 1960.92, "end": 1961.04, "word": " the", "probability": 0.86181640625}, {"start": 1961.04, "end": 1961.38, "word": " population", "probability": 0.95166015625}, {"start": 1961.38, "end": 1961.86, "word": " is", "probability": 0.94970703125}, {"start": 1961.86, "end": 1962.36, "word": " larger", "probability": 0.94921875}, {"start": 1962.36, "end": 1963.6, "word": " than", "probability": 0.92919921875}, {"start": 1963.6, "end": 1963.9, "word": " the", "probability": 0.91845703125}, {"start": 1963.9, "end": 1964.2, "word": " standard", "probability": 0.931640625}, {"start": 1964.2, "end": 1964.74, "word": " deviation", "probability": 0.912109375}, {"start": 1964.74, "end": 1965.44, "word": " of", "probability": 0.96728515625}, {"start": 1965.44, "end": 1965.9, "word": " the", "probability": 0.927734375}, {"start": 1965.9, "end": 1967.28, "word": " sample", "probability": 0.607421875}, {"start": 1967.28, "end": 1967.52, "word": " mean", "probability": 0.96728515625}, {"start": 1967.52, "end": 1967.68, "word": " or", "probability": 0.73779296875}, {"start": 1967.68, "end": 1967.78, "word": " the", "probability": 0.87109375}, {"start": 1967.78, "end": 1968.06, "word": " average.", "probability": 0.775390625}], "temperature": 1.0}, {"id": 74, "seek": 199985, "start": 1973.83, "end": 1999.85, "text": " Different samples of the same sample size from the same population will yield different sample means. We know that. If we have a population and from that population, so we have this big population, from this population suppose we selected 10 samples, sample 1 with size 50.", "tokens": [20825, 10938, 295, 264, 912, 6889, 2744, 490, 264, 912, 4415, 486, 11257, 819, 6889, 1355, 13, 492, 458, 300, 13, 759, 321, 362, 257, 4415, 293, 490, 300, 4415, 11, 370, 321, 362, 341, 955, 4415, 11, 490, 341, 4415, 7297, 321, 8209, 1266, 10938, 11, 6889, 502, 365, 2744, 2625, 13], "avg_logprob": -0.20833333278143848, "compression_ratio": 1.7908496732026145, "no_speech_prob": 0.0, "words": [{"start": 1973.83, "end": 1974.37, "word": " Different", "probability": 0.37890625}, {"start": 1974.37, "end": 1974.99, "word": " samples", "probability": 0.8251953125}, {"start": 1974.99, "end": 1975.21, "word": " of", "probability": 0.87841796875}, {"start": 1975.21, "end": 1975.39, "word": " the", "probability": 0.84619140625}, {"start": 1975.39, "end": 1975.67, "word": " same", "probability": 0.85986328125}, {"start": 1975.67, "end": 1976.07, "word": " sample", "probability": 0.869140625}, {"start": 1976.07, "end": 1976.59, "word": " size", "probability": 0.8115234375}, {"start": 1976.59, "end": 1977.85, "word": " from", "probability": 0.69970703125}, {"start": 1977.85, "end": 1978.09, "word": " the", "probability": 0.91748046875}, {"start": 1978.09, "end": 1978.31, "word": " same", "probability": 0.896484375}, {"start": 1978.31, "end": 1978.87, "word": " population", "probability": 0.94140625}, {"start": 1978.87, "end": 1979.35, "word": " will", "probability": 0.82080078125}, {"start": 1979.35, "end": 1979.63, "word": " yield", "probability": 0.92578125}, {"start": 1979.63, "end": 1980.11, "word": " different", "probability": 0.8798828125}, {"start": 1980.11, "end": 1980.49, "word": " sample", "probability": 0.7880859375}, {"start": 1980.49, "end": 1980.79, "word": " means.", "probability": 0.7236328125}, {"start": 1981.45, "end": 1981.59, "word": " We", "probability": 0.75244140625}, {"start": 1981.59, "end": 1981.69, "word": " know", "probability": 0.87890625}, {"start": 1981.69, "end": 1981.93, "word": " that.", "probability": 0.9375}, {"start": 1982.93, "end": 1983.31, "word": " If", "probability": 0.9033203125}, {"start": 1983.31, "end": 1983.41, "word": " we", "probability": 0.9306640625}, {"start": 1983.41, "end": 1983.51, "word": " have", "probability": 0.94873046875}, {"start": 1983.51, "end": 1983.61, "word": " a", "probability": 0.95556640625}, {"start": 1983.61, "end": 1984.03, "word": " population", "probability": 0.93701171875}, {"start": 1984.03, "end": 1985.75, "word": " and", "probability": 0.53857421875}, {"start": 1985.75, "end": 1986.05, "word": " from", "probability": 0.8681640625}, {"start": 1986.05, "end": 1986.33, "word": " that", "probability": 0.93505859375}, {"start": 1986.33, "end": 1986.93, "word": " population,", "probability": 0.92822265625}, {"start": 1987.23, "end": 1987.39, "word": " so", "probability": 0.8193359375}, {"start": 1987.39, "end": 1987.53, "word": " we", "probability": 0.88525390625}, {"start": 1987.53, "end": 1987.65, "word": " have", "probability": 0.94873046875}, {"start": 1987.65, "end": 1987.89, "word": " this", "probability": 0.94873046875}, {"start": 1987.89, "end": 1988.11, "word": " big", "probability": 0.89990234375}, {"start": 1988.11, "end": 1988.57, "word": " population,", "probability": 0.93212890625}, {"start": 1990.25, "end": 1990.53, "word": " from", "probability": 0.732421875}, {"start": 1990.53, "end": 1990.81, "word": " this", "probability": 0.94091796875}, {"start": 1990.81, "end": 1991.29, "word": " population", "probability": 0.9267578125}, {"start": 1991.29, "end": 1991.71, "word": " suppose", "probability": 0.63330078125}, {"start": 1991.71, "end": 1992.21, "word": " we", "probability": 0.8681640625}, {"start": 1992.21, "end": 1992.91, "word": " selected", "probability": 0.87890625}, {"start": 1992.91, "end": 1995.01, "word": " 10", "probability": 0.60595703125}, {"start": 1995.01, "end": 1995.71, "word": " samples,", "probability": 0.86328125}, {"start": 1995.91, "end": 1996.23, "word": " sample", "probability": 0.8955078125}, {"start": 1996.23, "end": 1996.55, "word": " 1", "probability": 0.72021484375}, {"start": 1996.55, "end": 1997.85, "word": " with", "probability": 0.7685546875}, {"start": 1997.85, "end": 1998.47, "word": " size", "probability": 0.849609375}, {"start": 1998.47, "end": 1999.85, "word": " 50.", "probability": 0.91552734375}], "temperature": 1.0}, {"id": 75, "seek": 202852, "start": 2001.54, "end": 2028.52, "text": " Another sample, sample 2 with the same size. All the way, suppose we select 10 samples, sample 10, also with the same sample size. Each one will have different average, different sample. Maybe the first one has 70, 68, suppose the last one has 71.", "tokens": [3996, 6889, 11, 6889, 568, 365, 264, 912, 2744, 13, 1057, 264, 636, 11, 7297, 321, 3048, 1266, 10938, 11, 6889, 1266, 11, 611, 365, 264, 912, 6889, 2744, 13, 6947, 472, 486, 362, 819, 4274, 11, 819, 6889, 13, 2704, 264, 700, 472, 575, 5285, 11, 23317, 11, 7297, 264, 1036, 472, 575, 30942, 13], "avg_logprob": -0.2197094298245614, "compression_ratio": 1.6423841059602649, "no_speech_prob": 0.0, "words": [{"start": 2001.54, "end": 2001.92, "word": " Another", "probability": 0.501953125}, {"start": 2001.92, "end": 2002.38, "word": " sample,", "probability": 0.71435546875}, {"start": 2002.5, "end": 2002.7, "word": " sample", "probability": 0.78173828125}, {"start": 2002.7, "end": 2003.02, "word": " 2", "probability": 0.6171875}, {"start": 2003.02, "end": 2003.24, "word": " with", "probability": 0.58740234375}, {"start": 2003.24, "end": 2003.38, "word": " the", "probability": 0.869140625}, {"start": 2003.38, "end": 2003.58, "word": " same", "probability": 0.9013671875}, {"start": 2003.58, "end": 2004.02, "word": " size.", "probability": 0.83203125}, {"start": 2006.06, "end": 2006.4, "word": " All", "probability": 0.89892578125}, {"start": 2006.4, "end": 2006.56, "word": " the", "probability": 0.9150390625}, {"start": 2006.56, "end": 2006.78, "word": " way,", "probability": 0.96142578125}, {"start": 2007.0, "end": 2007.26, "word": " suppose", "probability": 0.8388671875}, {"start": 2007.26, "end": 2007.54, "word": " we", "probability": 0.8525390625}, {"start": 2007.54, "end": 2007.96, "word": " select", "probability": 0.85400390625}, {"start": 2007.96, "end": 2008.46, "word": " 10", "probability": 0.74365234375}, {"start": 2008.46, "end": 2009.04, "word": " samples,", "probability": 0.8388671875}, {"start": 2009.32, "end": 2009.6, "word": " sample", "probability": 0.81591796875}, {"start": 2009.6, "end": 2009.98, "word": " 10,", "probability": 0.9560546875}, {"start": 2010.86, "end": 2011.36, "word": " also", "probability": 0.86181640625}, {"start": 2011.36, "end": 2011.84, "word": " with", "probability": 0.8955078125}, {"start": 2011.84, "end": 2012.0, "word": " the", "probability": 0.91162109375}, {"start": 2012.0, "end": 2012.8, "word": " same", "probability": 0.89501953125}, {"start": 2012.8, "end": 2013.68, "word": " sample", "probability": 0.888671875}, {"start": 2013.68, "end": 2014.16, "word": " size.", "probability": 0.83740234375}, {"start": 2015.42, "end": 2016.08, "word": " Each", "probability": 0.876953125}, {"start": 2016.08, "end": 2016.4, "word": " one", "probability": 0.93603515625}, {"start": 2016.4, "end": 2018.02, "word": " will", "probability": 0.8701171875}, {"start": 2018.02, "end": 2018.46, "word": " have", "probability": 0.9453125}, {"start": 2018.46, "end": 2021.06, "word": " different", "probability": 0.76513671875}, {"start": 2021.06, "end": 2021.46, "word": " average,", "probability": 0.52294921875}, {"start": 2021.62, "end": 2021.9, "word": " different", "probability": 0.77978515625}, {"start": 2021.9, "end": 2022.22, "word": " sample.", "probability": 0.435302734375}, {"start": 2022.34, "end": 2022.66, "word": " Maybe", "probability": 0.90966796875}, {"start": 2022.66, "end": 2022.96, "word": " the", "probability": 0.8984375}, {"start": 2022.96, "end": 2023.2, "word": " first", "probability": 0.88916015625}, {"start": 2023.2, "end": 2023.4, "word": " one", "probability": 0.93359375}, {"start": 2023.4, "end": 2023.62, "word": " has", "probability": 0.8681640625}, {"start": 2023.62, "end": 2024.12, "word": " 70,", "probability": 0.96484375}, {"start": 2024.64, "end": 2025.48, "word": " 68,", "probability": 0.95703125}, {"start": 2026.72, "end": 2027.02, "word": " suppose", "probability": 0.728515625}, {"start": 2027.02, "end": 2027.2, "word": " the", "probability": 0.9150390625}, {"start": 2027.2, "end": 2027.44, "word": " last", "probability": 0.88427734375}, {"start": 2027.44, "end": 2027.68, "word": " one", "probability": 0.92626953125}, {"start": 2027.68, "end": 2028.06, "word": " has", "probability": 0.94580078125}, {"start": 2028.06, "end": 2028.52, "word": " 71.", "probability": 0.9755859375}], "temperature": 1.0}, {"id": 76, "seek": 205164, "start": 2029.88, "end": 2051.64, "text": " So again, different samples of the same size, I got size of 15, from the same population will yield different sample means. This is one fact. Now, a measure of the variability, which means sigma.", "tokens": [407, 797, 11, 819, 10938, 295, 264, 912, 2744, 11, 286, 658, 2744, 295, 2119, 11, 490, 264, 912, 4415, 486, 11257, 819, 6889, 1355, 13, 639, 307, 472, 1186, 13, 823, 11, 257, 3481, 295, 264, 35709, 11, 597, 1355, 12771, 13], "avg_logprob": -0.20259232785214076, "compression_ratio": 1.410071942446043, "no_speech_prob": 0.0, "words": [{"start": 2029.88, "end": 2030.16, "word": " So", "probability": 0.80029296875}, {"start": 2030.16, "end": 2030.46, "word": " again,", "probability": 0.75537109375}, {"start": 2030.72, "end": 2030.98, "word": " different", "probability": 0.87451171875}, {"start": 2030.98, "end": 2031.62, "word": " samples", "probability": 0.87109375}, {"start": 2031.62, "end": 2034.04, "word": " of", "probability": 0.90966796875}, {"start": 2034.04, "end": 2034.26, "word": " the", "probability": 0.9130859375}, {"start": 2034.26, "end": 2034.54, "word": " same", "probability": 0.91015625}, {"start": 2034.54, "end": 2035.12, "word": " size,", "probability": 0.81494140625}, {"start": 2035.42, "end": 2036.04, "word": " I", "probability": 0.387451171875}, {"start": 2036.04, "end": 2036.2, "word": " got", "probability": 0.908203125}, {"start": 2036.2, "end": 2036.48, "word": " size", "probability": 0.62109375}, {"start": 2036.48, "end": 2036.66, "word": " of", "probability": 0.76904296875}, {"start": 2036.66, "end": 2037.06, "word": " 15,", "probability": 0.85595703125}, {"start": 2037.72, "end": 2038.12, "word": " from", "probability": 0.861328125}, {"start": 2038.12, "end": 2038.34, "word": " the", "probability": 0.92041015625}, {"start": 2038.34, "end": 2038.58, "word": " same", "probability": 0.90478515625}, {"start": 2038.58, "end": 2039.22, "word": " population", "probability": 0.93603515625}, {"start": 2039.22, "end": 2040.46, "word": " will", "probability": 0.6533203125}, {"start": 2040.46, "end": 2040.72, "word": " yield", "probability": 0.923828125}, {"start": 2040.72, "end": 2041.14, "word": " different", "probability": 0.8876953125}, {"start": 2041.14, "end": 2041.52, "word": " sample", "probability": 0.8125}, {"start": 2041.52, "end": 2041.86, "word": " means.", "probability": 0.8291015625}, {"start": 2043.12, "end": 2043.28, "word": " This", "probability": 0.7568359375}, {"start": 2043.28, "end": 2043.4, "word": " is", "probability": 0.94384765625}, {"start": 2043.4, "end": 2043.68, "word": " one", "probability": 0.921875}, {"start": 2043.68, "end": 2044.94, "word": " fact.", "probability": 0.89697265625}, {"start": 2045.9, "end": 2046.26, "word": " Now,", "probability": 0.94482421875}, {"start": 2046.66, "end": 2046.82, "word": " a", "probability": 0.81103515625}, {"start": 2046.82, "end": 2047.0, "word": " measure", "probability": 0.87939453125}, {"start": 2047.0, "end": 2047.38, "word": " of", "probability": 0.96826171875}, {"start": 2047.38, "end": 2047.54, "word": " the", "probability": 0.62451171875}, {"start": 2047.54, "end": 2048.08, "word": " variability,", "probability": 0.97509765625}, {"start": 2049.48, "end": 2049.92, "word": " which", "probability": 0.95166015625}, {"start": 2049.92, "end": 2050.44, "word": " means", "probability": 0.94384765625}, {"start": 2050.44, "end": 2051.64, "word": " sigma.", "probability": 0.76123046875}], "temperature": 1.0}, {"id": 77, "seek": 208188, "start": 2053.18, "end": 2081.88, "text": " and I'm interested in x bar. So a measure of variability in the mean from sample to sample is given by something called, instead of saying standard deviation of the sample mean, we have this expression, standard error of the mean. So this one is called standard error of the mean, or sigma of x bar. So again, it's called standard", "tokens": [293, 286, 478, 3102, 294, 2031, 2159, 13, 407, 257, 3481, 295, 35709, 294, 264, 914, 490, 6889, 281, 6889, 307, 2212, 538, 746, 1219, 11, 2602, 295, 1566, 3832, 25163, 295, 264, 6889, 914, 11, 321, 362, 341, 6114, 11, 3832, 6713, 295, 264, 914, 13, 407, 341, 472, 307, 1219, 3832, 6713, 295, 264, 914, 11, 420, 12771, 295, 2031, 2159, 13, 407, 797, 11, 309, 311, 1219, 3832], "avg_logprob": -0.1763237803760502, "compression_ratio": 1.8087431693989071, "no_speech_prob": 0.0, "words": [{"start": 2053.18, "end": 2053.48, "word": " and", "probability": 0.3154296875}, {"start": 2053.48, "end": 2053.7, "word": " I'm", "probability": 0.7587890625}, {"start": 2053.7, "end": 2054.16, "word": " interested", "probability": 0.88134765625}, {"start": 2054.16, "end": 2055.38, "word": " in", "probability": 0.9345703125}, {"start": 2055.38, "end": 2055.6, "word": " x", "probability": 0.7041015625}, {"start": 2055.6, "end": 2055.92, "word": " bar.", "probability": 0.80810546875}, {"start": 2056.4, "end": 2056.62, "word": " So", "probability": 0.865234375}, {"start": 2056.62, "end": 2056.78, "word": " a", "probability": 0.5703125}, {"start": 2056.78, "end": 2057.06, "word": " measure", "probability": 0.853515625}, {"start": 2057.06, "end": 2057.34, "word": " of", "probability": 0.9521484375}, {"start": 2057.34, "end": 2057.74, "word": " variability", "probability": 0.966796875}, {"start": 2057.74, "end": 2058.0, "word": " in", "probability": 0.8359375}, {"start": 2058.0, "end": 2058.18, "word": " the", "probability": 0.92724609375}, {"start": 2058.18, "end": 2058.44, "word": " mean", "probability": 0.97216796875}, {"start": 2058.44, "end": 2059.28, "word": " from", "probability": 0.77783203125}, {"start": 2059.28, "end": 2059.74, "word": " sample", "probability": 0.91259765625}, {"start": 2059.74, "end": 2060.02, "word": " to", "probability": 0.96044921875}, {"start": 2060.02, "end": 2060.4, "word": " sample", "probability": 0.888671875}, {"start": 2060.4, "end": 2060.7, "word": " is", "probability": 0.89599609375}, {"start": 2060.7, "end": 2060.9, "word": " given", "probability": 0.89111328125}, {"start": 2060.9, "end": 2061.1, "word": " by", "probability": 0.96533203125}, {"start": 2061.1, "end": 2061.42, "word": " something", "probability": 0.8671875}, {"start": 2061.42, "end": 2061.86, "word": " called,", "probability": 0.86962890625}, {"start": 2062.6, "end": 2063.08, "word": " instead", "probability": 0.85205078125}, {"start": 2063.08, "end": 2063.28, "word": " of", "probability": 0.970703125}, {"start": 2063.28, "end": 2063.58, "word": " saying", "probability": 0.9130859375}, {"start": 2063.58, "end": 2064.3, "word": " standard", "probability": 0.8408203125}, {"start": 2064.3, "end": 2064.72, "word": " deviation", "probability": 0.9052734375}, {"start": 2064.72, "end": 2065.0, "word": " of", "probability": 0.96435546875}, {"start": 2065.0, "end": 2065.12, "word": " the", "probability": 0.90869140625}, {"start": 2065.12, "end": 2065.36, "word": " sample", "probability": 0.8701171875}, {"start": 2065.36, "end": 2065.64, "word": " mean,", "probability": 0.94482421875}, {"start": 2066.66, "end": 2066.9, "word": " we", "probability": 0.95361328125}, {"start": 2066.9, "end": 2067.1, "word": " have", "probability": 0.9443359375}, {"start": 2067.1, "end": 2067.5, "word": " this", "probability": 0.943359375}, {"start": 2067.5, "end": 2068.66, "word": " expression,", "probability": 0.8525390625}, {"start": 2069.34, "end": 2069.82, "word": " standard", "probability": 0.5615234375}, {"start": 2069.82, "end": 2070.28, "word": " error", "probability": 0.89453125}, {"start": 2070.28, "end": 2070.62, "word": " of", "probability": 0.96728515625}, {"start": 2070.62, "end": 2070.74, "word": " the", "probability": 0.92041015625}, {"start": 2070.74, "end": 2070.9, "word": " mean.", "probability": 0.9697265625}, {"start": 2071.62, "end": 2071.78, "word": " So", "probability": 0.939453125}, {"start": 2071.78, "end": 2072.02, "word": " this", "probability": 0.90185546875}, {"start": 2072.02, "end": 2072.26, "word": " one", "probability": 0.91845703125}, {"start": 2072.26, "end": 2072.46, "word": " is", "probability": 0.8115234375}, {"start": 2072.46, "end": 2072.72, "word": " called", "probability": 0.87158203125}, {"start": 2072.72, "end": 2073.6, "word": " standard", "probability": 0.87158203125}, {"start": 2073.6, "end": 2075.12, "word": " error", "probability": 0.88720703125}, {"start": 2075.12, "end": 2076.7, "word": " of", "probability": 0.96533203125}, {"start": 2076.7, "end": 2076.88, "word": " the", "probability": 0.92333984375}, {"start": 2076.88, "end": 2077.1, "word": " mean,", "probability": 0.970703125}, {"start": 2077.46, "end": 2077.92, "word": " or", "probability": 0.95458984375}, {"start": 2077.92, "end": 2078.92, "word": " sigma", "probability": 0.82958984375}, {"start": 2078.92, "end": 2079.06, "word": " of", "probability": 0.88818359375}, {"start": 2079.06, "end": 2079.22, "word": " x", "probability": 0.986328125}, {"start": 2079.22, "end": 2079.42, "word": " bar.", "probability": 0.9267578125}, {"start": 2080.36, "end": 2080.58, "word": " So", "probability": 0.9326171875}, {"start": 2080.58, "end": 2080.76, "word": " again,", "probability": 0.87890625}, {"start": 2080.82, "end": 2080.94, "word": " it's", "probability": 0.95947265625}, {"start": 2080.94, "end": 2081.18, "word": " called", "probability": 0.88818359375}, {"start": 2081.18, "end": 2081.88, "word": " standard", "probability": 0.91845703125}], "temperature": 1.0}, {"id": 78, "seek": 210207, "start": 2083.59, "end": 2102.07, "text": " error of the mean or simply just say standard error. So it's better to distinguish between population standard deviation sigma and sigma of x bar which is the standard error of the mean. So we have sigma", "tokens": [6713, 295, 264, 914, 420, 2935, 445, 584, 3832, 6713, 13, 407, 309, 311, 1101, 281, 20206, 1296, 4415, 3832, 25163, 12771, 293, 12771, 295, 2031, 2159, 597, 307, 264, 3832, 6713, 295, 264, 914, 13, 407, 321, 362, 12771], "avg_logprob": -0.17701981998071437, "compression_ratio": 1.5813953488372092, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 2083.59, "end": 2083.93, "word": " error", "probability": 0.46875}, {"start": 2083.93, "end": 2084.15, "word": " of", "probability": 0.947265625}, {"start": 2084.15, "end": 2084.29, "word": " the", "probability": 0.87744140625}, {"start": 2084.29, "end": 2084.47, "word": " mean", "probability": 0.95751953125}, {"start": 2084.47, "end": 2084.87, "word": " or", "probability": 0.54345703125}, {"start": 2084.87, "end": 2085.25, "word": " simply", "probability": 0.8818359375}, {"start": 2085.25, "end": 2085.53, "word": " just", "probability": 0.8681640625}, {"start": 2085.53, "end": 2085.91, "word": " say", "probability": 0.93505859375}, {"start": 2085.91, "end": 2086.59, "word": " standard", "probability": 0.8486328125}, {"start": 2086.59, "end": 2086.89, "word": " error.", "probability": 0.8935546875}, {"start": 2088.33, "end": 2088.63, "word": " So", "probability": 0.85009765625}, {"start": 2088.63, "end": 2088.81, "word": " it's", "probability": 0.75537109375}, {"start": 2088.81, "end": 2089.01, "word": " better", "probability": 0.91259765625}, {"start": 2089.01, "end": 2089.61, "word": " to", "probability": 0.94970703125}, {"start": 2089.61, "end": 2090.25, "word": " distinguish", "probability": 0.88916015625}, {"start": 2090.25, "end": 2090.85, "word": " between", "probability": 0.853515625}, {"start": 2090.85, "end": 2092.29, "word": " population", "probability": 0.81640625}, {"start": 2092.29, "end": 2092.69, "word": " standard", "probability": 0.7353515625}, {"start": 2092.69, "end": 2093.15, "word": " deviation", "probability": 0.908203125}, {"start": 2093.15, "end": 2093.61, "word": " sigma", "probability": 0.454833984375}, {"start": 2093.61, "end": 2096.61, "word": " and", "probability": 0.82275390625}, {"start": 2096.61, "end": 2097.05, "word": " sigma", "probability": 0.90576171875}, {"start": 2097.05, "end": 2097.25, "word": " of", "probability": 0.90966796875}, {"start": 2097.25, "end": 2097.51, "word": " x", "probability": 0.8447265625}, {"start": 2097.51, "end": 2097.85, "word": " bar", "probability": 0.85205078125}, {"start": 2097.85, "end": 2098.17, "word": " which", "probability": 0.84033203125}, {"start": 2098.17, "end": 2098.45, "word": " is", "probability": 0.95068359375}, {"start": 2098.45, "end": 2098.71, "word": " the", "probability": 0.916015625}, {"start": 2098.71, "end": 2099.21, "word": " standard", "probability": 0.9375}, {"start": 2099.21, "end": 2099.61, "word": " error", "probability": 0.87255859375}, {"start": 2099.61, "end": 2100.53, "word": " of", "probability": 0.96337890625}, {"start": 2100.53, "end": 2100.67, "word": " the", "probability": 0.908203125}, {"start": 2100.67, "end": 2100.81, "word": " mean.", "probability": 0.96923828125}, {"start": 2101.17, "end": 2101.43, "word": " So", "probability": 0.92919921875}, {"start": 2101.43, "end": 2101.53, "word": " we", "probability": 0.87890625}, {"start": 2101.53, "end": 2101.67, "word": " have", "probability": 0.9501953125}, {"start": 2101.67, "end": 2102.07, "word": " sigma", "probability": 0.9306640625}], "temperature": 1.0}, {"id": 79, "seek": 212960, "start": 2104.02, "end": 2129.6, "text": " And sigma of x bar. This one is standard error of x bar. And always sigma of x bar is smaller than sigma, unless n equals one. Later we'll see why if n is one, then the two quantities are the same. Now, sigma of x bar", "tokens": [400, 12771, 295, 2031, 2159, 13, 639, 472, 307, 3832, 6713, 295, 2031, 2159, 13, 400, 1009, 12771, 295, 2031, 2159, 307, 4356, 813, 12771, 11, 5969, 297, 6915, 472, 13, 11965, 321, 603, 536, 983, 498, 297, 307, 472, 11, 550, 264, 732, 22927, 366, 264, 912, 13, 823, 11, 12771, 295, 2031, 2159], "avg_logprob": -0.2053571372692074, "compression_ratio": 1.5571428571428572, "no_speech_prob": 0.0, "words": [{"start": 2104.02, "end": 2104.42, "word": " And", "probability": 0.52587890625}, {"start": 2104.42, "end": 2105.08, "word": " sigma", "probability": 0.58642578125}, {"start": 2105.08, "end": 2105.24, "word": " of", "probability": 0.88330078125}, {"start": 2105.24, "end": 2105.36, "word": " x", "probability": 0.81591796875}, {"start": 2105.36, "end": 2105.54, "word": " bar.", "probability": 0.93310546875}, {"start": 2105.84, "end": 2106.2, "word": " This", "probability": 0.876953125}, {"start": 2106.2, "end": 2106.5, "word": " one", "probability": 0.916015625}, {"start": 2106.5, "end": 2106.9, "word": " is", "probability": 0.947265625}, {"start": 2106.9, "end": 2107.46, "word": " standard", "probability": 0.85595703125}, {"start": 2107.46, "end": 2107.86, "word": " error", "probability": 0.88330078125}, {"start": 2107.86, "end": 2108.46, "word": " of", "probability": 0.96142578125}, {"start": 2108.46, "end": 2108.98, "word": " x", "probability": 0.98046875}, {"start": 2108.98, "end": 2109.24, "word": " bar.", "probability": 0.9306640625}, {"start": 2109.98, "end": 2110.2, "word": " And", "probability": 0.93408203125}, {"start": 2110.2, "end": 2110.66, "word": " always", "probability": 0.85498046875}, {"start": 2110.66, "end": 2111.88, "word": " sigma", "probability": 0.5888671875}, {"start": 2111.88, "end": 2112.04, "word": " of", "probability": 0.9052734375}, {"start": 2112.04, "end": 2112.18, "word": " x", "probability": 0.99609375}, {"start": 2112.18, "end": 2112.38, "word": " bar", "probability": 0.9501953125}, {"start": 2112.38, "end": 2112.54, "word": " is", "probability": 0.94921875}, {"start": 2112.54, "end": 2112.86, "word": " smaller", "probability": 0.87158203125}, {"start": 2112.86, "end": 2114.16, "word": " than", "probability": 0.9130859375}, {"start": 2114.16, "end": 2114.6, "word": " sigma,", "probability": 0.88916015625}, {"start": 2114.94, "end": 2115.48, "word": " unless", "probability": 0.83349609375}, {"start": 2115.48, "end": 2115.76, "word": " n", "probability": 0.8544921875}, {"start": 2115.76, "end": 2116.06, "word": " equals", "probability": 0.787109375}, {"start": 2116.06, "end": 2116.3, "word": " one.", "probability": 0.5341796875}, {"start": 2116.96, "end": 2117.3, "word": " Later", "probability": 0.796875}, {"start": 2117.3, "end": 2117.5, "word": " we'll", "probability": 0.5167236328125}, {"start": 2117.5, "end": 2117.64, "word": " see", "probability": 0.9296875}, {"start": 2117.64, "end": 2118.08, "word": " why", "probability": 0.91455078125}, {"start": 2118.08, "end": 2118.66, "word": " if", "probability": 0.78466796875}, {"start": 2118.66, "end": 2118.82, "word": " n", "probability": 0.9375}, {"start": 2118.82, "end": 2118.96, "word": " is", "probability": 0.916015625}, {"start": 2118.96, "end": 2119.22, "word": " one,", "probability": 0.892578125}, {"start": 2119.34, "end": 2119.56, "word": " then", "probability": 0.841796875}, {"start": 2119.56, "end": 2120.84, "word": " the", "probability": 0.90283203125}, {"start": 2120.84, "end": 2121.08, "word": " two", "probability": 0.9384765625}, {"start": 2121.08, "end": 2121.64, "word": " quantities", "probability": 0.98095703125}, {"start": 2121.64, "end": 2122.8, "word": " are", "probability": 0.9169921875}, {"start": 2122.8, "end": 2122.98, "word": " the", "probability": 0.90283203125}, {"start": 2122.98, "end": 2123.18, "word": " same.", "probability": 0.9150390625}, {"start": 2125.76, "end": 2126.3, "word": " Now,", "probability": 0.94775390625}, {"start": 2127.84, "end": 2128.84, "word": " sigma", "probability": 0.84716796875}, {"start": 2128.84, "end": 2129.04, "word": " of", "probability": 0.96142578125}, {"start": 2129.04, "end": 2129.26, "word": " x", "probability": 0.99560546875}, {"start": 2129.26, "end": 2129.6, "word": " bar", "probability": 0.962890625}], "temperature": 1.0}, {"id": 80, "seek": 215982, "start": 2132.34, "end": 2159.82, "text": " In this case, it's 158 equals sigma over root n. I mean, for this specific example, if we divide sigma, which is 2.236 divided by n, and n 2, you will get 1.58. So we got mu x bar equal mu.", "tokens": [682, 341, 1389, 11, 309, 311, 2119, 23, 6915, 12771, 670, 5593, 297, 13, 286, 914, 11, 337, 341, 2685, 1365, 11, 498, 321, 9845, 12771, 11, 597, 307, 568, 13, 9356, 21, 6666, 538, 297, 11, 293, 297, 568, 11, 291, 486, 483, 502, 13, 20419, 13, 407, 321, 658, 2992, 2031, 2159, 2681, 2992, 13], "avg_logprob": -0.21093750077074971, "compression_ratio": 1.3013698630136987, "no_speech_prob": 0.0, "words": [{"start": 2132.34, "end": 2132.54, "word": " In", "probability": 0.2919921875}, {"start": 2132.54, "end": 2132.78, "word": " this", "probability": 0.947265625}, {"start": 2132.78, "end": 2133.12, "word": " case,", "probability": 0.9150390625}, {"start": 2133.34, "end": 2133.46, "word": " it's", "probability": 0.76025390625}, {"start": 2133.46, "end": 2134.26, "word": " 158", "probability": 0.8798828125}, {"start": 2134.26, "end": 2135.12, "word": " equals", "probability": 0.712890625}, {"start": 2135.12, "end": 2135.58, "word": " sigma", "probability": 0.833984375}, {"start": 2135.58, "end": 2135.84, "word": " over", "probability": 0.85693359375}, {"start": 2135.84, "end": 2136.1, "word": " root", "probability": 0.90380859375}, {"start": 2136.1, "end": 2136.28, "word": " n.", "probability": 0.56982421875}, {"start": 2137.6, "end": 2137.82, "word": " I", "probability": 0.947265625}, {"start": 2137.82, "end": 2138.02, "word": " mean,", "probability": 0.9619140625}, {"start": 2138.18, "end": 2138.42, "word": " for", "probability": 0.94921875}, {"start": 2138.42, "end": 2138.68, "word": " this", "probability": 0.94775390625}, {"start": 2138.68, "end": 2139.34, "word": " specific", "probability": 0.90478515625}, {"start": 2139.34, "end": 2140.6, "word": " example,", "probability": 0.97216796875}, {"start": 2140.78, "end": 2140.86, "word": " if", "probability": 0.91552734375}, {"start": 2140.86, "end": 2141.16, "word": " we", "probability": 0.95751953125}, {"start": 2141.16, "end": 2142.48, "word": " divide", "probability": 0.9169921875}, {"start": 2142.48, "end": 2143.66, "word": " sigma,", "probability": 0.88525390625}, {"start": 2143.82, "end": 2143.9, "word": " which", "probability": 0.94873046875}, {"start": 2143.9, "end": 2144.06, "word": " is", "probability": 0.9482421875}, {"start": 2144.06, "end": 2144.26, "word": " 2", "probability": 0.98388671875}, {"start": 2144.26, "end": 2145.22, "word": ".236", "probability": 0.9466145833333334}, {"start": 2145.22, "end": 2145.56, "word": " divided", "probability": 0.3740234375}, {"start": 2145.56, "end": 2145.84, "word": " by", "probability": 0.97119140625}, {"start": 2145.84, "end": 2146.16, "word": " n,", "probability": 0.92529296875}, {"start": 2146.82, "end": 2147.7, "word": " and", "probability": 0.91796875}, {"start": 2147.7, "end": 2147.98, "word": " n", "probability": 0.80810546875}, {"start": 2147.98, "end": 2150.16, "word": " 2,", "probability": 0.369140625}, {"start": 2150.4, "end": 2150.62, "word": " you", "probability": 0.71435546875}, {"start": 2150.62, "end": 2150.8, "word": " will", "probability": 0.763671875}, {"start": 2150.8, "end": 2151.14, "word": " get", "probability": 0.94189453125}, {"start": 2151.14, "end": 2152.72, "word": " 1", "probability": 0.98974609375}, {"start": 2152.72, "end": 2153.44, "word": ".58.", "probability": 0.984130859375}, {"start": 2157.62, "end": 2158.16, "word": " So", "probability": 0.95263671875}, {"start": 2158.16, "end": 2158.3, "word": " we", "probability": 0.8173828125}, {"start": 2158.3, "end": 2158.52, "word": " got", "probability": 0.830078125}, {"start": 2158.52, "end": 2158.74, "word": " mu", "probability": 0.6611328125}, {"start": 2158.74, "end": 2159.02, "word": " x", "probability": 0.904296875}, {"start": 2159.02, "end": 2159.28, "word": " bar", "probability": 0.77099609375}, {"start": 2159.28, "end": 2159.48, "word": " equal", "probability": 0.6669921875}, {"start": 2159.48, "end": 2159.82, "word": " mu.", "probability": 0.64990234375}], "temperature": 1.0}, {"id": 81, "seek": 218917, "start": 2162.14, "end": 2189.18, "text": " The spread is sigma over root. Now, if you compare sigma x bar and sigma, always sigma of x bar is smaller than sigma, unless m equals 1. And in reality, we don't have a sample of size 1. So always the sample size is greater than 1. So always sigma of x bar is smaller than sigma of the standard deviation of normalization.", "tokens": [440, 3974, 307, 12771, 670, 5593, 13, 823, 11, 498, 291, 6794, 12771, 2031, 2159, 293, 12771, 11, 1009, 12771, 295, 2031, 2159, 307, 4356, 813, 12771, 11, 5969, 275, 6915, 502, 13, 400, 294, 4103, 11, 321, 500, 380, 362, 257, 6889, 295, 2744, 502, 13, 407, 1009, 264, 6889, 2744, 307, 5044, 813, 502, 13, 407, 1009, 12771, 295, 2031, 2159, 307, 4356, 813, 12771, 295, 264, 3832, 25163, 295, 2710, 2144, 13], "avg_logprob": -0.2732318973070697, "compression_ratio": 1.7704918032786885, "no_speech_prob": 0.0, "words": [{"start": 2162.14, "end": 2162.42, "word": " The", "probability": 0.404296875}, {"start": 2162.42, "end": 2162.96, "word": " spread", "probability": 0.87451171875}, {"start": 2162.96, "end": 2163.32, "word": " is", "probability": 0.92822265625}, {"start": 2163.32, "end": 2163.64, "word": " sigma", "probability": 0.83642578125}, {"start": 2163.64, "end": 2163.86, "word": " over", "probability": 0.869140625}, {"start": 2163.86, "end": 2164.06, "word": " root.", "probability": 0.83349609375}, {"start": 2165.3, "end": 2165.76, "word": " Now,", "probability": 0.80908203125}, {"start": 2165.84, "end": 2165.86, "word": " if", "probability": 0.94384765625}, {"start": 2165.86, "end": 2165.98, "word": " you", "probability": 0.802734375}, {"start": 2165.98, "end": 2166.26, "word": " compare", "probability": 0.93701171875}, {"start": 2166.26, "end": 2166.54, "word": " sigma", "probability": 0.8974609375}, {"start": 2166.54, "end": 2166.72, "word": " x", "probability": 0.459716796875}, {"start": 2166.72, "end": 2166.86, "word": " bar", "probability": 0.8173828125}, {"start": 2166.86, "end": 2167.06, "word": " and", "probability": 0.6123046875}, {"start": 2167.06, "end": 2167.32, "word": " sigma,", "probability": 0.9501953125}, {"start": 2167.64, "end": 2168.14, "word": " always", "probability": 0.59814453125}, {"start": 2168.14, "end": 2169.46, "word": " sigma", "probability": 0.78466796875}, {"start": 2169.46, "end": 2169.64, "word": " of", "probability": 0.6748046875}, {"start": 2169.64, "end": 2169.78, "word": " x", "probability": 0.990234375}, {"start": 2169.78, "end": 2170.0, "word": " bar", "probability": 0.95361328125}, {"start": 2170.0, "end": 2170.14, "word": " is", "probability": 0.9140625}, {"start": 2170.14, "end": 2170.44, "word": " smaller", "probability": 0.85009765625}, {"start": 2170.44, "end": 2170.68, "word": " than", "probability": 0.93408203125}, {"start": 2170.68, "end": 2171.08, "word": " sigma,", "probability": 0.93603515625}, {"start": 2171.18, "end": 2171.5, "word": " unless", "probability": 0.7421875}, {"start": 2171.5, "end": 2171.76, "word": " m", "probability": 0.466552734375}, {"start": 2171.76, "end": 2172.06, "word": " equals", "probability": 0.8984375}, {"start": 2172.06, "end": 2172.3, "word": " 1.", "probability": 0.5537109375}, {"start": 2173.28, "end": 2173.68, "word": " And", "probability": 0.8271484375}, {"start": 2173.68, "end": 2173.8, "word": " in", "probability": 0.4189453125}, {"start": 2173.8, "end": 2174.1, "word": " reality,", "probability": 0.9580078125}, {"start": 2174.22, "end": 2174.3, "word": " we", "probability": 0.7919921875}, {"start": 2174.3, "end": 2174.46, "word": " don't", "probability": 0.8291015625}, {"start": 2174.46, "end": 2174.64, "word": " have", "probability": 0.94482421875}, {"start": 2174.64, "end": 2174.8, "word": " a", "probability": 0.9384765625}, {"start": 2174.8, "end": 2175.0, "word": " sample", "probability": 0.87158203125}, {"start": 2175.0, "end": 2175.16, "word": " of", "probability": 0.495361328125}, {"start": 2175.16, "end": 2175.34, "word": " size", "probability": 0.8642578125}, {"start": 2175.34, "end": 2175.58, "word": " 1.", "probability": 0.84814453125}, {"start": 2176.18, "end": 2176.6, "word": " So", "probability": 0.95751953125}, {"start": 2176.6, "end": 2176.92, "word": " always", "probability": 0.54248046875}, {"start": 2176.92, "end": 2177.12, "word": " the", "probability": 0.70703125}, {"start": 2177.12, "end": 2177.38, "word": " sample", "probability": 0.900390625}, {"start": 2177.38, "end": 2177.86, "word": " size", "probability": 0.85400390625}, {"start": 2177.86, "end": 2178.12, "word": " is", "probability": 0.94482421875}, {"start": 2178.12, "end": 2178.46, "word": " greater", "probability": 0.8994140625}, {"start": 2178.46, "end": 2178.86, "word": " than", "probability": 0.9423828125}, {"start": 2178.86, "end": 2179.9, "word": " 1.", "probability": 0.92578125}, {"start": 2180.2, "end": 2180.2, "word": " So", "probability": 0.94482421875}, {"start": 2180.2, "end": 2181.52, "word": " always", "probability": 0.63623046875}, {"start": 2181.52, "end": 2181.96, "word": " sigma", "probability": 0.8232421875}, {"start": 2181.96, "end": 2182.14, "word": " of", "probability": 0.8525390625}, {"start": 2182.14, "end": 2182.3, "word": " x", "probability": 0.9970703125}, {"start": 2182.3, "end": 2182.66, "word": " bar", "probability": 0.96044921875}, {"start": 2182.66, "end": 2183.5, "word": " is", "probability": 0.95068359375}, {"start": 2183.5, "end": 2183.96, "word": " smaller", "probability": 0.873046875}, {"start": 2183.96, "end": 2184.56, "word": " than", "probability": 0.9375}, {"start": 2184.56, "end": 2185.9, "word": " sigma", "probability": 0.75390625}, {"start": 2185.9, "end": 2187.0, "word": " of", "probability": 0.5634765625}, {"start": 2187.0, "end": 2187.28, "word": " the", "probability": 0.5244140625}, {"start": 2187.28, "end": 2187.5, "word": " standard", "probability": 0.90576171875}, {"start": 2187.5, "end": 2187.92, "word": " deviation", "probability": 0.5390625}, {"start": 2187.92, "end": 2188.66, "word": " of", "probability": 0.859375}, {"start": 2188.66, "end": 2189.18, "word": " normalization.", "probability": 0.32891845703125}], "temperature": 1.0}, {"id": 82, "seek": 221902, "start": 2191.2, "end": 2219.02, "text": " Now if you look at the relationship between the standard error of X bar and the sample size, we'll see that as the sample size increases, sigma of X bar decreases. So if we have large sample size, I mean instead of selecting a random sample of size 2, if you select a random sample of size 3 for example, you will get sigma of X bar less than 1.58.", "tokens": [823, 498, 291, 574, 412, 264, 2480, 1296, 264, 3832, 6713, 295, 1783, 2159, 293, 264, 6889, 2744, 11, 321, 603, 536, 300, 382, 264, 6889, 2744, 8637, 11, 12771, 295, 1783, 2159, 24108, 13, 407, 498, 321, 362, 2416, 6889, 2744, 11, 286, 914, 2602, 295, 18182, 257, 4974, 6889, 295, 2744, 568, 11, 498, 291, 3048, 257, 4974, 6889, 295, 2744, 805, 337, 1365, 11, 291, 486, 483, 12771, 295, 1783, 2159, 1570, 813, 502, 13, 20419, 13], "avg_logprob": -0.15538195033132293, "compression_ratio": 1.7626262626262625, "no_speech_prob": 0.0, "words": [{"start": 2191.2, "end": 2191.48, "word": " Now", "probability": 0.9189453125}, {"start": 2191.48, "end": 2191.8, "word": " if", "probability": 0.58447265625}, {"start": 2191.8, "end": 2191.9, "word": " you", "probability": 0.71142578125}, {"start": 2191.9, "end": 2192.06, "word": " look", "probability": 0.966796875}, {"start": 2192.06, "end": 2192.3, "word": " at", "probability": 0.96826171875}, {"start": 2192.3, "end": 2192.6, "word": " the", "probability": 0.91748046875}, {"start": 2192.6, "end": 2193.18, "word": " relationship", "probability": 0.90966796875}, {"start": 2193.18, "end": 2193.7, "word": " between", "probability": 0.873046875}, {"start": 2193.7, "end": 2194.66, "word": " the", "probability": 0.83935546875}, {"start": 2194.66, "end": 2195.02, "word": " standard", "probability": 0.92578125}, {"start": 2195.02, "end": 2195.24, "word": " error", "probability": 0.86572265625}, {"start": 2195.24, "end": 2195.44, "word": " of", "probability": 0.95751953125}, {"start": 2195.44, "end": 2195.6, "word": " X", "probability": 0.505859375}, {"start": 2195.6, "end": 2195.84, "word": " bar", "probability": 0.88671875}, {"start": 2195.84, "end": 2196.06, "word": " and", "probability": 0.900390625}, {"start": 2196.06, "end": 2196.22, "word": " the", "probability": 0.78173828125}, {"start": 2196.22, "end": 2196.38, "word": " sample", "probability": 0.8828125}, {"start": 2196.38, "end": 2196.8, "word": " size,", "probability": 0.8642578125}, {"start": 2199.06, "end": 2199.34, "word": " we'll", "probability": 0.528076171875}, {"start": 2199.34, "end": 2199.54, "word": " see", "probability": 0.9287109375}, {"start": 2199.54, "end": 2199.82, "word": " that", "probability": 0.89599609375}, {"start": 2199.82, "end": 2200.3, "word": " as", "probability": 0.92529296875}, {"start": 2200.3, "end": 2200.48, "word": " the", "probability": 0.91455078125}, {"start": 2200.48, "end": 2200.78, "word": " sample", "probability": 0.88525390625}, {"start": 2200.78, "end": 2201.24, "word": " size", "probability": 0.86767578125}, {"start": 2201.24, "end": 2201.76, "word": " increases,", "probability": 0.82470703125}, {"start": 2202.5, "end": 2203.16, "word": " sigma", "probability": 0.7763671875}, {"start": 2203.16, "end": 2203.36, "word": " of", "probability": 0.95068359375}, {"start": 2203.36, "end": 2203.54, "word": " X", "probability": 0.94580078125}, {"start": 2203.54, "end": 2203.86, "word": " bar", "probability": 0.96142578125}, {"start": 2203.86, "end": 2204.78, "word": " decreases.", "probability": 0.9638671875}, {"start": 2205.28, "end": 2205.58, "word": " So", "probability": 0.9306640625}, {"start": 2205.58, "end": 2205.8, "word": " if", "probability": 0.875}, {"start": 2205.8, "end": 2205.94, "word": " we", "probability": 0.8935546875}, {"start": 2205.94, "end": 2206.08, "word": " have", "probability": 0.9462890625}, {"start": 2206.08, "end": 2206.44, "word": " large", "probability": 0.85302734375}, {"start": 2206.44, "end": 2206.94, "word": " sample", "probability": 0.89501953125}, {"start": 2206.94, "end": 2207.88, "word": " size,", "probability": 0.85693359375}, {"start": 2208.3, "end": 2208.44, "word": " I", "probability": 0.9794921875}, {"start": 2208.44, "end": 2208.58, "word": " mean", "probability": 0.97021484375}, {"start": 2208.58, "end": 2209.04, "word": " instead", "probability": 0.591796875}, {"start": 2209.04, "end": 2210.28, "word": " of", "probability": 0.9599609375}, {"start": 2210.28, "end": 2210.76, "word": " selecting", "probability": 0.88232421875}, {"start": 2210.76, "end": 2210.96, "word": " a", "probability": 0.98681640625}, {"start": 2210.96, "end": 2211.2, "word": " random", "probability": 0.8330078125}, {"start": 2211.2, "end": 2211.54, "word": " sample", "probability": 0.89208984375}, {"start": 2211.54, "end": 2211.72, "word": " of", "probability": 0.9013671875}, {"start": 2211.72, "end": 2211.94, "word": " size", "probability": 0.87451171875}, {"start": 2211.94, "end": 2212.26, "word": " 2,", "probability": 0.61865234375}, {"start": 2212.38, "end": 2212.48, "word": " if", "probability": 0.62109375}, {"start": 2212.48, "end": 2212.6, "word": " you", "probability": 0.85205078125}, {"start": 2212.6, "end": 2212.92, "word": " select", "probability": 0.869140625}, {"start": 2212.92, "end": 2213.04, "word": " a", "probability": 0.92822265625}, {"start": 2213.04, "end": 2213.2, "word": " random", "probability": 0.8095703125}, {"start": 2213.2, "end": 2213.38, "word": " sample", "probability": 0.87841796875}, {"start": 2213.38, "end": 2213.52, "word": " of", "probability": 0.9033203125}, {"start": 2213.52, "end": 2213.66, "word": " size", "probability": 0.861328125}, {"start": 2213.66, "end": 2213.9, "word": " 3", "probability": 0.98828125}, {"start": 2213.9, "end": 2214.08, "word": " for", "probability": 0.5234375}, {"start": 2214.08, "end": 2214.44, "word": " example,", "probability": 0.97509765625}, {"start": 2215.06, "end": 2215.2, "word": " you", "probability": 0.95849609375}, {"start": 2215.2, "end": 2215.36, "word": " will", "probability": 0.87548828125}, {"start": 2215.36, "end": 2215.64, "word": " get", "probability": 0.93994140625}, {"start": 2215.64, "end": 2216.22, "word": " sigma", "probability": 0.91943359375}, {"start": 2216.22, "end": 2216.4, "word": " of", "probability": 0.94091796875}, {"start": 2216.4, "end": 2216.58, "word": " X", "probability": 0.9560546875}, {"start": 2216.58, "end": 2216.9, "word": " bar", "probability": 0.9541015625}, {"start": 2216.9, "end": 2217.42, "word": " less", "probability": 0.8125}, {"start": 2217.42, "end": 2218.22, "word": " than", "probability": 0.9384765625}, {"start": 2218.22, "end": 2218.36, "word": " 1", "probability": 0.85302734375}, {"start": 2218.36, "end": 2219.02, "word": ".58.", "probability": 0.950927734375}], "temperature": 1.0}, {"id": 83, "seek": 224494, "start": 2219.94, "end": 2244.94, "text": " So note that standard error of the mean decreases as the sample size goes up. So as n increases, sigma of x bar goes down. So there is inverse relationship between the standard error of the mean and the sample size. So now we answered the three questions. The shape looks like bell shape.", "tokens": [407, 3637, 300, 3832, 6713, 295, 264, 914, 24108, 382, 264, 6889, 2744, 1709, 493, 13, 407, 382, 297, 8637, 11, 12771, 295, 2031, 2159, 1709, 760, 13, 407, 456, 307, 17340, 2480, 1296, 264, 3832, 6713, 295, 264, 914, 293, 264, 6889, 2744, 13, 407, 586, 321, 10103, 264, 1045, 1651, 13, 440, 3909, 1542, 411, 4549, 3909, 13], "avg_logprob": -0.19454406421692644, "compression_ratio": 1.7515151515151515, "no_speech_prob": 0.0, "words": [{"start": 2219.94, "end": 2220.28, "word": " So", "probability": 0.66943359375}, {"start": 2220.28, "end": 2220.6, "word": " note", "probability": 0.5078125}, {"start": 2220.6, "end": 2220.92, "word": " that", "probability": 0.927734375}, {"start": 2220.92, "end": 2221.32, "word": " standard", "probability": 0.583984375}, {"start": 2221.32, "end": 2221.68, "word": " error", "probability": 0.89501953125}, {"start": 2221.68, "end": 2222.96, "word": " of", "probability": 0.95361328125}, {"start": 2222.96, "end": 2223.14, "word": " the", "probability": 0.92919921875}, {"start": 2223.14, "end": 2223.36, "word": " mean", "probability": 0.9619140625}, {"start": 2223.36, "end": 2224.2, "word": " decreases", "probability": 0.9482421875}, {"start": 2224.2, "end": 2225.9, "word": " as", "probability": 0.93017578125}, {"start": 2225.9, "end": 2226.26, "word": " the", "probability": 0.89599609375}, {"start": 2226.26, "end": 2226.5, "word": " sample", "probability": 0.923828125}, {"start": 2226.5, "end": 2227.02, "word": " size", "probability": 0.85693359375}, {"start": 2227.02, "end": 2227.86, "word": " goes", "probability": 0.9169921875}, {"start": 2227.86, "end": 2228.2, "word": " up.", "probability": 0.9736328125}, {"start": 2228.56, "end": 2228.86, "word": " So", "probability": 0.88916015625}, {"start": 2228.86, "end": 2229.1, "word": " as", "probability": 0.853515625}, {"start": 2229.1, "end": 2229.26, "word": " n", "probability": 0.52294921875}, {"start": 2229.26, "end": 2229.7, "word": " increases,", "probability": 0.927734375}, {"start": 2229.96, "end": 2230.2, "word": " sigma", "probability": 0.7021484375}, {"start": 2230.2, "end": 2230.36, "word": " of", "probability": 0.59326171875}, {"start": 2230.36, "end": 2230.5, "word": " x", "probability": 0.89208984375}, {"start": 2230.5, "end": 2230.82, "word": " bar", "probability": 0.880859375}, {"start": 2230.82, "end": 2231.4, "word": " goes", "probability": 0.92138671875}, {"start": 2231.4, "end": 2231.68, "word": " down.", "probability": 0.8583984375}, {"start": 2232.36, "end": 2232.58, "word": " So", "probability": 0.94091796875}, {"start": 2232.58, "end": 2232.82, "word": " there", "probability": 0.86376953125}, {"start": 2232.82, "end": 2233.0, "word": " is", "probability": 0.86474609375}, {"start": 2233.0, "end": 2233.62, "word": " inverse", "probability": 0.6669921875}, {"start": 2233.62, "end": 2234.6, "word": " relationship", "probability": 0.84814453125}, {"start": 2234.6, "end": 2235.14, "word": " between", "probability": 0.876953125}, {"start": 2235.14, "end": 2236.58, "word": " the", "probability": 0.86767578125}, {"start": 2236.58, "end": 2237.04, "word": " standard", "probability": 0.92919921875}, {"start": 2237.04, "end": 2237.28, "word": " error", "probability": 0.87646484375}, {"start": 2237.28, "end": 2237.44, "word": " of", "probability": 0.9658203125}, {"start": 2237.44, "end": 2237.56, "word": " the", "probability": 0.9248046875}, {"start": 2237.56, "end": 2237.78, "word": " mean", "probability": 0.97509765625}, {"start": 2237.78, "end": 2238.38, "word": " and", "probability": 0.92041015625}, {"start": 2238.38, "end": 2239.24, "word": " the", "probability": 0.7548828125}, {"start": 2239.24, "end": 2239.5, "word": " sample", "probability": 0.87451171875}, {"start": 2239.5, "end": 2239.88, "word": " size.", "probability": 0.8330078125}, {"start": 2240.5, "end": 2240.96, "word": " So", "probability": 0.91552734375}, {"start": 2240.96, "end": 2241.12, "word": " now", "probability": 0.90185546875}, {"start": 2241.12, "end": 2241.32, "word": " we", "probability": 0.884765625}, {"start": 2241.32, "end": 2241.9, "word": " answered", "probability": 0.5224609375}, {"start": 2241.9, "end": 2242.22, "word": " the", "probability": 0.6875}, {"start": 2242.22, "end": 2242.46, "word": " three", "probability": 0.85888671875}, {"start": 2242.46, "end": 2242.94, "word": " questions.", "probability": 0.95166015625}, {"start": 2243.32, "end": 2243.5, "word": " The", "probability": 0.83203125}, {"start": 2243.5, "end": 2243.8, "word": " shape", "probability": 0.9482421875}, {"start": 2243.8, "end": 2244.22, "word": " looks", "probability": 0.82470703125}, {"start": 2244.22, "end": 2244.5, "word": " like", "probability": 0.94140625}, {"start": 2244.5, "end": 2244.66, "word": " bell", "probability": 0.27587890625}, {"start": 2244.66, "end": 2244.94, "word": " shape.", "probability": 0.87548828125}], "temperature": 1.0}, {"id": 84, "seek": 227525, "start": 2247.19, "end": 2275.25, "text": " if we select our sample from normal population with mean equals the population mean and standard deviation of standard error equals sigma over square root of that. So now, let's talk about sampling distribution of the sample mean if the population is normal.", "tokens": [498, 321, 3048, 527, 6889, 490, 2710, 4415, 365, 914, 6915, 264, 4415, 914, 293, 3832, 25163, 295, 3832, 6713, 6915, 12771, 670, 3732, 5593, 295, 300, 13, 407, 586, 11, 718, 311, 751, 466, 21179, 7316, 295, 264, 6889, 914, 498, 264, 4415, 307, 2710, 13], "avg_logprob": -0.21647135571887097, "compression_ratio": 1.6818181818181819, "no_speech_prob": 0.0, "words": [{"start": 2247.19, "end": 2247.59, "word": " if", "probability": 0.43896484375}, {"start": 2247.59, "end": 2248.03, "word": " we", "probability": 0.9482421875}, {"start": 2248.03, "end": 2249.79, "word": " select", "probability": 0.802734375}, {"start": 2249.79, "end": 2250.13, "word": " our", "probability": 0.904296875}, {"start": 2250.13, "end": 2250.47, "word": " sample", "probability": 0.87744140625}, {"start": 2250.47, "end": 2250.81, "word": " from", "probability": 0.873046875}, {"start": 2250.81, "end": 2251.29, "word": " normal", "probability": 0.75146484375}, {"start": 2251.29, "end": 2251.73, "word": " population", "probability": 0.96533203125}, {"start": 2251.73, "end": 2253.41, "word": " with", "probability": 0.7060546875}, {"start": 2253.41, "end": 2253.85, "word": " mean", "probability": 0.904296875}, {"start": 2253.85, "end": 2256.03, "word": " equals", "probability": 0.80712890625}, {"start": 2256.03, "end": 2256.45, "word": " the", "probability": 0.7255859375}, {"start": 2256.45, "end": 2257.43, "word": " population", "probability": 0.91162109375}, {"start": 2257.43, "end": 2257.85, "word": " mean", "probability": 0.95556640625}, {"start": 2257.85, "end": 2258.53, "word": " and", "probability": 0.72021484375}, {"start": 2258.53, "end": 2258.89, "word": " standard", "probability": 0.865234375}, {"start": 2258.89, "end": 2259.27, "word": " deviation", "probability": 0.91064453125}, {"start": 2259.27, "end": 2259.51, "word": " of", "probability": 0.43505859375}, {"start": 2259.51, "end": 2259.79, "word": " standard", "probability": 0.85205078125}, {"start": 2259.79, "end": 2260.09, "word": " error", "probability": 0.71240234375}, {"start": 2260.09, "end": 2260.53, "word": " equals", "probability": 0.94384765625}, {"start": 2260.53, "end": 2261.37, "word": " sigma", "probability": 0.79833984375}, {"start": 2261.37, "end": 2261.83, "word": " over", "probability": 0.90625}, {"start": 2261.83, "end": 2262.99, "word": " square", "probability": 0.7978515625}, {"start": 2262.99, "end": 2263.23, "word": " root", "probability": 0.92138671875}, {"start": 2263.23, "end": 2263.39, "word": " of", "probability": 0.76708984375}, {"start": 2263.39, "end": 2263.59, "word": " that.", "probability": 0.258544921875}, {"start": 2265.95, "end": 2266.27, "word": " So", "probability": 0.83447265625}, {"start": 2266.27, "end": 2266.51, "word": " now,", "probability": 0.7001953125}, {"start": 2266.99, "end": 2267.93, "word": " let's", "probability": 0.93310546875}, {"start": 2267.93, "end": 2268.17, "word": " talk", "probability": 0.90185546875}, {"start": 2268.17, "end": 2268.85, "word": " about", "probability": 0.90966796875}, {"start": 2268.85, "end": 2269.89, "word": " sampling", "probability": 0.95947265625}, {"start": 2269.89, "end": 2270.97, "word": " distribution", "probability": 0.5751953125}, {"start": 2270.97, "end": 2272.05, "word": " of", "probability": 0.96484375}, {"start": 2272.05, "end": 2272.25, "word": " the", "probability": 0.89892578125}, {"start": 2272.25, "end": 2272.55, "word": " sample", "probability": 0.91259765625}, {"start": 2272.55, "end": 2272.91, "word": " mean", "probability": 0.95361328125}, {"start": 2272.91, "end": 2273.73, "word": " if", "probability": 0.81591796875}, {"start": 2273.73, "end": 2273.95, "word": " the", "probability": 0.9150390625}, {"start": 2273.95, "end": 2274.41, "word": " population", "probability": 0.94140625}, {"start": 2274.41, "end": 2274.89, "word": " is", "probability": 0.9501953125}, {"start": 2274.89, "end": 2275.25, "word": " normal.", "probability": 0.8701171875}], "temperature": 1.0}, {"id": 85, "seek": 230087, "start": 2276.47, "end": 2300.87, "text": " So now, my population is normally distributed, and we are interested in the sampling distribution of the sample mean of X bar. If the population is normally distributed with mean mu and standard deviation sigma, in this case, the sampling distribution of X bar is also normally distributed, so this is the shape.", "tokens": [407, 586, 11, 452, 4415, 307, 5646, 12631, 11, 293, 321, 366, 3102, 294, 264, 21179, 7316, 295, 264, 6889, 914, 295, 1783, 2159, 13, 759, 264, 4415, 307, 5646, 12631, 365, 914, 2992, 293, 3832, 25163, 12771, 11, 294, 341, 1389, 11, 264, 21179, 7316, 295, 1783, 2159, 307, 611, 5646, 12631, 11, 370, 341, 307, 264, 3909, 13], "avg_logprob": -0.18557889051124699, "compression_ratio": 2.0193548387096776, "no_speech_prob": 0.0, "words": [{"start": 2276.47, "end": 2276.75, "word": " So", "probability": 0.71728515625}, {"start": 2276.75, "end": 2277.09, "word": " now,", "probability": 0.78564453125}, {"start": 2277.75, "end": 2278.13, "word": " my", "probability": 0.93603515625}, {"start": 2278.13, "end": 2278.69, "word": " population", "probability": 0.9326171875}, {"start": 2278.69, "end": 2279.17, "word": " is", "probability": 0.94775390625}, {"start": 2279.17, "end": 2279.59, "word": " normally", "probability": 0.86474609375}, {"start": 2279.59, "end": 2280.21, "word": " distributed,", "probability": 0.91162109375}, {"start": 2280.95, "end": 2282.47, "word": " and", "probability": 0.908203125}, {"start": 2282.47, "end": 2282.63, "word": " we", "probability": 0.95068359375}, {"start": 2282.63, "end": 2282.77, "word": " are", "probability": 0.9091796875}, {"start": 2282.77, "end": 2283.23, "word": " interested", "probability": 0.8779296875}, {"start": 2283.23, "end": 2283.65, "word": " in", "probability": 0.9453125}, {"start": 2283.65, "end": 2283.83, "word": " the", "probability": 0.87255859375}, {"start": 2283.83, "end": 2284.05, "word": " sampling", "probability": 0.66650390625}, {"start": 2284.05, "end": 2284.69, "word": " distribution", "probability": 0.84912109375}, {"start": 2284.69, "end": 2285.11, "word": " of", "probability": 0.96337890625}, {"start": 2285.11, "end": 2285.27, "word": " the", "probability": 0.7255859375}, {"start": 2285.27, "end": 2285.55, "word": " sample", "probability": 0.880859375}, {"start": 2285.55, "end": 2285.75, "word": " mean", "probability": 0.9140625}, {"start": 2285.75, "end": 2285.99, "word": " of", "probability": 0.703125}, {"start": 2285.99, "end": 2286.21, "word": " X", "probability": 0.6337890625}, {"start": 2286.21, "end": 2286.43, "word": " bar.", "probability": 0.7578125}, {"start": 2287.63, "end": 2288.33, "word": " If", "probability": 0.94775390625}, {"start": 2288.33, "end": 2288.45, "word": " the", "probability": 0.8642578125}, {"start": 2288.45, "end": 2288.83, "word": " population", "probability": 0.94580078125}, {"start": 2288.83, "end": 2289.07, "word": " is", "probability": 0.94775390625}, {"start": 2289.07, "end": 2289.45, "word": " normally", "probability": 0.88134765625}, {"start": 2289.45, "end": 2290.07, "word": " distributed", "probability": 0.9208984375}, {"start": 2290.07, "end": 2291.33, "word": " with", "probability": 0.71826171875}, {"start": 2291.33, "end": 2291.55, "word": " mean", "probability": 0.8017578125}, {"start": 2291.55, "end": 2291.77, "word": " mu", "probability": 0.54736328125}, {"start": 2291.77, "end": 2292.27, "word": " and", "probability": 0.86279296875}, {"start": 2292.27, "end": 2292.75, "word": " standard", "probability": 0.888671875}, {"start": 2292.75, "end": 2293.07, "word": " deviation", "probability": 0.9169921875}, {"start": 2293.07, "end": 2293.53, "word": " sigma,", "probability": 0.86376953125}, {"start": 2294.43, "end": 2294.61, "word": " in", "probability": 0.90673828125}, {"start": 2294.61, "end": 2294.87, "word": " this", "probability": 0.947265625}, {"start": 2294.87, "end": 2295.29, "word": " case,", "probability": 0.9130859375}, {"start": 2295.59, "end": 2295.73, "word": " the", "probability": 0.8984375}, {"start": 2295.73, "end": 2296.09, "word": " sampling", "probability": 0.94580078125}, {"start": 2296.09, "end": 2296.71, "word": " distribution", "probability": 0.8466796875}, {"start": 2296.71, "end": 2296.95, "word": " of", "probability": 0.95654296875}, {"start": 2296.95, "end": 2297.11, "word": " X", "probability": 0.955078125}, {"start": 2297.11, "end": 2297.41, "word": " bar", "probability": 0.92529296875}, {"start": 2297.41, "end": 2298.09, "word": " is", "probability": 0.94091796875}, {"start": 2298.09, "end": 2298.59, "word": " also", "probability": 0.890625}, {"start": 2298.59, "end": 2299.01, "word": " normally", "probability": 0.8935546875}, {"start": 2299.01, "end": 2299.73, "word": " distributed,", "probability": 0.90869140625}, {"start": 2300.01, "end": 2300.15, "word": " so", "probability": 0.9140625}, {"start": 2300.15, "end": 2300.37, "word": " this", "probability": 0.904296875}, {"start": 2300.37, "end": 2300.45, "word": " is", "probability": 0.943359375}, {"start": 2300.45, "end": 2300.59, "word": " the", "probability": 0.90673828125}, {"start": 2300.59, "end": 2300.87, "word": " shape.", "probability": 0.923828125}], "temperature": 1.0}, {"id": 86, "seek": 232231, "start": 2302.51, "end": 2322.31, "text": " with mean of X bar equals mu and sigma of X bar equals sigma over 2. So again, if we sample from normal population, I mean my sampling technique, I select a random sample from a normal population.", "tokens": [365, 914, 295, 1783, 2159, 6915, 2992, 293, 12771, 295, 1783, 2159, 6915, 12771, 670, 568, 13, 407, 797, 11, 498, 321, 6889, 490, 2710, 4415, 11, 286, 914, 452, 21179, 6532, 11, 286, 3048, 257, 4974, 6889, 490, 257, 2710, 4415, 13], "avg_logprob": -0.21644175899299709, "compression_ratio": 1.5511811023622046, "no_speech_prob": 0.0, "words": [{"start": 2302.51, "end": 2302.87, "word": " with", "probability": 0.640625}, {"start": 2302.87, "end": 2303.23, "word": " mean", "probability": 0.93994140625}, {"start": 2303.23, "end": 2304.09, "word": " of", "probability": 0.91064453125}, {"start": 2304.09, "end": 2304.31, "word": " X", "probability": 0.63134765625}, {"start": 2304.31, "end": 2304.49, "word": " bar", "probability": 0.853515625}, {"start": 2304.49, "end": 2304.83, "word": " equals", "probability": 0.84423828125}, {"start": 2304.83, "end": 2305.15, "word": " mu", "probability": 0.43701171875}, {"start": 2305.15, "end": 2305.75, "word": " and", "probability": 0.66455078125}, {"start": 2305.75, "end": 2306.11, "word": " sigma", "probability": 0.8486328125}, {"start": 2306.11, "end": 2306.27, "word": " of", "probability": 0.89892578125}, {"start": 2306.27, "end": 2306.43, "word": " X", "probability": 0.96728515625}, {"start": 2306.43, "end": 2306.67, "word": " bar", "probability": 0.9599609375}, {"start": 2306.67, "end": 2307.07, "word": " equals", "probability": 0.91259765625}, {"start": 2307.07, "end": 2307.41, "word": " sigma", "probability": 0.9326171875}, {"start": 2307.41, "end": 2307.67, "word": " over", "probability": 0.52294921875}, {"start": 2307.67, "end": 2307.95, "word": " 2.", "probability": 0.159423828125}, {"start": 2309.23, "end": 2309.61, "word": " So", "probability": 0.8447265625}, {"start": 2309.61, "end": 2309.89, "word": " again,", "probability": 0.71826171875}, {"start": 2311.77, "end": 2312.09, "word": " if", "probability": 0.95263671875}, {"start": 2312.09, "end": 2312.47, "word": " we", "probability": 0.95849609375}, {"start": 2312.47, "end": 2313.39, "word": " sample", "probability": 0.92138671875}, {"start": 2313.39, "end": 2314.63, "word": " from", "probability": 0.87646484375}, {"start": 2314.63, "end": 2315.47, "word": " normal", "probability": 0.6923828125}, {"start": 2315.47, "end": 2315.99, "word": " population,", "probability": 0.92578125}, {"start": 2316.53, "end": 2316.67, "word": " I", "probability": 0.958984375}, {"start": 2316.67, "end": 2316.81, "word": " mean", "probability": 0.97119140625}, {"start": 2316.81, "end": 2317.17, "word": " my", "probability": 0.6396484375}, {"start": 2317.17, "end": 2317.51, "word": " sampling", "probability": 0.96826171875}, {"start": 2317.51, "end": 2318.07, "word": " technique,", "probability": 0.93994140625}, {"start": 2318.85, "end": 2319.33, "word": " I", "probability": 0.98974609375}, {"start": 2319.33, "end": 2319.65, "word": " select", "probability": 0.8173828125}, {"start": 2319.65, "end": 2319.77, "word": " a", "probability": 0.931640625}, {"start": 2319.77, "end": 2319.97, "word": " random", "probability": 0.8642578125}, {"start": 2319.97, "end": 2320.43, "word": " sample", "probability": 0.90234375}, {"start": 2320.43, "end": 2321.37, "word": " from", "probability": 0.8662109375}, {"start": 2321.37, "end": 2321.61, "word": " a", "probability": 0.984375}, {"start": 2321.61, "end": 2321.85, "word": " normal", "probability": 0.89208984375}, {"start": 2321.85, "end": 2322.31, "word": " population.", "probability": 0.92919921875}], "temperature": 1.0}, {"id": 87, "seek": 234288, "start": 2323.58, "end": 2342.88, "text": " Then if we are interested in the standard distribution of X bar, then that distribution is normally distributed with mean equal to mu and standard deviation sigma over mu. So that's the shape. It's normal.", "tokens": [1396, 498, 321, 366, 3102, 294, 264, 3832, 7316, 295, 1783, 2159, 11, 550, 300, 7316, 307, 5646, 12631, 365, 914, 2681, 281, 2992, 293, 3832, 25163, 12771, 670, 2992, 13, 407, 300, 311, 264, 3909, 13, 467, 311, 2710, 13], "avg_logprob": -0.2775297590664455, "compression_ratio": 1.5147058823529411, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2323.58, "end": 2324.02, "word": " Then", "probability": 0.360595703125}, {"start": 2324.02, "end": 2324.42, "word": " if", "probability": 0.62939453125}, {"start": 2324.42, "end": 2325.22, "word": " we", "probability": 0.8017578125}, {"start": 2325.22, "end": 2325.46, "word": " are", "probability": 0.7822265625}, {"start": 2325.46, "end": 2325.98, "word": " interested", "probability": 0.87255859375}, {"start": 2325.98, "end": 2326.26, "word": " in", "probability": 0.9462890625}, {"start": 2326.26, "end": 2326.4, "word": " the", "probability": 0.85693359375}, {"start": 2326.4, "end": 2326.82, "word": " standard", "probability": 0.16796875}, {"start": 2326.82, "end": 2327.42, "word": " distribution", "probability": 0.72900390625}, {"start": 2327.42, "end": 2327.64, "word": " of", "probability": 0.94873046875}, {"start": 2327.64, "end": 2327.8, "word": " X", "probability": 0.68505859375}, {"start": 2327.8, "end": 2328.1, "word": " bar,", "probability": 0.86376953125}, {"start": 2328.96, "end": 2329.24, "word": " then", "probability": 0.85595703125}, {"start": 2329.24, "end": 2329.72, "word": " that", "probability": 0.92333984375}, {"start": 2329.72, "end": 2330.34, "word": " distribution", "probability": 0.8427734375}, {"start": 2330.34, "end": 2331.46, "word": " is", "probability": 0.94873046875}, {"start": 2331.46, "end": 2331.96, "word": " normally", "probability": 0.90673828125}, {"start": 2331.96, "end": 2332.64, "word": " distributed", "probability": 0.904296875}, {"start": 2332.64, "end": 2333.76, "word": " with", "probability": 0.86767578125}, {"start": 2333.76, "end": 2334.02, "word": " mean", "probability": 0.97802734375}, {"start": 2334.02, "end": 2334.28, "word": " equal", "probability": 0.72509765625}, {"start": 2334.28, "end": 2334.46, "word": " to", "probability": 0.5400390625}, {"start": 2334.46, "end": 2334.66, "word": " mu", "probability": 0.66796875}, {"start": 2334.66, "end": 2335.62, "word": " and", "probability": 0.78076171875}, {"start": 2335.62, "end": 2336.0, "word": " standard", "probability": 0.74365234375}, {"start": 2336.0, "end": 2336.42, "word": " deviation", "probability": 0.96142578125}, {"start": 2336.42, "end": 2337.56, "word": " sigma", "probability": 0.703125}, {"start": 2337.56, "end": 2338.26, "word": " over", "probability": 0.87109375}, {"start": 2338.26, "end": 2338.52, "word": " mu.", "probability": 0.398681640625}, {"start": 2338.76, "end": 2339.04, "word": " So", "probability": 0.9365234375}, {"start": 2339.04, "end": 2339.32, "word": " that's", "probability": 0.7802734375}, {"start": 2339.32, "end": 2339.5, "word": " the", "probability": 0.84814453125}, {"start": 2339.5, "end": 2339.84, "word": " shape.", "probability": 0.931640625}, {"start": 2341.78, "end": 2342.54, "word": " It's", "probability": 0.8505859375}, {"start": 2342.54, "end": 2342.88, "word": " normal.", "probability": 0.873046875}], "temperature": 1.0}, {"id": 88, "seek": 236967, "start": 2344.05, "end": 2369.67, "text": " The mean is the same as the population mean, and the standard deviation of x bar equals sigma over root n. So now let's go back to the z-score we discussed before. If you remember, I mentioned before that z-score, generally speaking,", "tokens": [440, 914, 307, 264, 912, 382, 264, 4415, 914, 11, 293, 264, 3832, 25163, 295, 2031, 2159, 6915, 12771, 670, 5593, 297, 13, 407, 586, 718, 311, 352, 646, 281, 264, 710, 12, 4417, 418, 321, 7152, 949, 13, 759, 291, 1604, 11, 286, 2835, 949, 300, 710, 12, 4417, 418, 11, 5101, 4124, 11], "avg_logprob": -0.1808035746216774, "compression_ratio": 1.471698113207547, "no_speech_prob": 0.0, "words": [{"start": 2344.05, "end": 2344.29, "word": " The", "probability": 0.55078125}, {"start": 2344.29, "end": 2344.49, "word": " mean", "probability": 0.92919921875}, {"start": 2344.49, "end": 2344.63, "word": " is", "probability": 0.9462890625}, {"start": 2344.63, "end": 2344.79, "word": " the", "probability": 0.8994140625}, {"start": 2344.79, "end": 2344.99, "word": " same", "probability": 0.90283203125}, {"start": 2344.99, "end": 2345.15, "word": " as", "probability": 0.96630859375}, {"start": 2345.15, "end": 2345.25, "word": " the", "probability": 0.90576171875}, {"start": 2345.25, "end": 2345.67, "word": " population", "probability": 0.9296875}, {"start": 2345.67, "end": 2346.07, "word": " mean,", "probability": 0.95654296875}, {"start": 2346.73, "end": 2346.89, "word": " and", "probability": 0.92724609375}, {"start": 2346.89, "end": 2347.07, "word": " the", "probability": 0.88671875}, {"start": 2347.07, "end": 2347.29, "word": " standard", "probability": 0.34619140625}, {"start": 2347.29, "end": 2347.67, "word": " deviation", "probability": 0.93603515625}, {"start": 2347.67, "end": 2347.89, "word": " of", "probability": 0.958984375}, {"start": 2347.89, "end": 2348.11, "word": " x", "probability": 0.49658203125}, {"start": 2348.11, "end": 2348.43, "word": " bar", "probability": 0.69775390625}, {"start": 2348.43, "end": 2349.03, "word": " equals", "probability": 0.86669921875}, {"start": 2349.03, "end": 2349.49, "word": " sigma", "probability": 0.84423828125}, {"start": 2349.49, "end": 2350.63, "word": " over", "probability": 0.89990234375}, {"start": 2350.63, "end": 2351.67, "word": " root", "probability": 0.9013671875}, {"start": 2351.67, "end": 2352.63, "word": " n.", "probability": 0.7041015625}, {"start": 2353.15, "end": 2353.51, "word": " So", "probability": 0.8984375}, {"start": 2353.51, "end": 2353.69, "word": " now", "probability": 0.7392578125}, {"start": 2353.69, "end": 2353.93, "word": " let's", "probability": 0.868408203125}, {"start": 2353.93, "end": 2354.11, "word": " go", "probability": 0.96240234375}, {"start": 2354.11, "end": 2354.55, "word": " back", "probability": 0.873046875}, {"start": 2354.55, "end": 2355.81, "word": " to", "probability": 0.9404296875}, {"start": 2355.81, "end": 2355.95, "word": " the", "probability": 0.91015625}, {"start": 2355.95, "end": 2356.13, "word": " z", "probability": 0.86474609375}, {"start": 2356.13, "end": 2356.47, "word": "-score", "probability": 0.8336588541666666}, {"start": 2356.47, "end": 2356.75, "word": " we", "probability": 0.89501953125}, {"start": 2356.75, "end": 2357.43, "word": " discussed", "probability": 0.875}, {"start": 2357.43, "end": 2357.93, "word": " before.", "probability": 0.85888671875}, {"start": 2359.19, "end": 2359.95, "word": " If", "probability": 0.9404296875}, {"start": 2359.95, "end": 2360.09, "word": " you", "probability": 0.962890625}, {"start": 2360.09, "end": 2360.47, "word": " remember,", "probability": 0.87353515625}, {"start": 2360.99, "end": 2361.13, "word": " I", "probability": 0.99609375}, {"start": 2361.13, "end": 2361.63, "word": " mentioned", "probability": 0.84326171875}, {"start": 2361.63, "end": 2365.15, "word": " before", "probability": 0.78857421875}, {"start": 2365.15, "end": 2365.77, "word": " that", "probability": 0.92138671875}, {"start": 2365.77, "end": 2367.21, "word": " z", "probability": 0.662109375}, {"start": 2367.21, "end": 2367.67, "word": "-score,", "probability": 0.8971354166666666}, {"start": 2368.75, "end": 2369.17, "word": " generally", "probability": 0.908203125}, {"start": 2369.17, "end": 2369.67, "word": " speaking,", "probability": 0.85888671875}], "temperature": 1.0}, {"id": 89, "seek": 239748, "start": 2370.92, "end": 2397.48, "text": " X minus the mean of X divided by sigma X. And we know that Z has standard normal distribution with mean zero and variance one. In this case, we are looking for the semi-distribution of X bar. So Z equal X bar.", "tokens": [1783, 3175, 264, 914, 295, 1783, 6666, 538, 12771, 1783, 13, 400, 321, 458, 300, 1176, 575, 3832, 2710, 7316, 365, 914, 4018, 293, 21977, 472, 13, 682, 341, 1389, 11, 321, 366, 1237, 337, 264, 12909, 12, 42649, 30783, 295, 1783, 2159, 13, 407, 1176, 2681, 1783, 2159, 13], "avg_logprob": -0.20450368231418087, "compression_ratio": 1.4093959731543624, "no_speech_prob": 0.0, "words": [{"start": 2370.92, "end": 2371.4, "word": " X", "probability": 0.26171875}, {"start": 2371.4, "end": 2372.26, "word": " minus", "probability": 0.96240234375}, {"start": 2372.26, "end": 2372.58, "word": " the", "probability": 0.853515625}, {"start": 2372.58, "end": 2372.72, "word": " mean", "probability": 0.931640625}, {"start": 2372.72, "end": 2372.86, "word": " of", "probability": 0.955078125}, {"start": 2372.86, "end": 2373.24, "word": " X", "probability": 0.9091796875}, {"start": 2373.24, "end": 2373.82, "word": " divided", "probability": 0.6611328125}, {"start": 2373.82, "end": 2374.08, "word": " by", "probability": 0.9775390625}, {"start": 2374.08, "end": 2374.4, "word": " sigma", "probability": 0.7099609375}, {"start": 2374.4, "end": 2374.74, "word": " X.", "probability": 0.9228515625}, {"start": 2377.64, "end": 2378.38, "word": " And", "probability": 0.93603515625}, {"start": 2378.38, "end": 2378.52, "word": " we", "probability": 0.9150390625}, {"start": 2378.52, "end": 2378.64, "word": " know", "probability": 0.8828125}, {"start": 2378.64, "end": 2378.92, "word": " that", "probability": 0.93359375}, {"start": 2378.92, "end": 2379.2, "word": " Z", "probability": 0.88525390625}, {"start": 2379.2, "end": 2380.34, "word": " has", "probability": 0.935546875}, {"start": 2380.34, "end": 2381.2, "word": " standard", "probability": 0.86865234375}, {"start": 2381.2, "end": 2381.62, "word": " normal", "probability": 0.76708984375}, {"start": 2381.62, "end": 2382.3, "word": " distribution", "probability": 0.82470703125}, {"start": 2382.3, "end": 2383.38, "word": " with", "probability": 0.74560546875}, {"start": 2383.38, "end": 2383.58, "word": " mean", "probability": 0.90283203125}, {"start": 2383.58, "end": 2383.86, "word": " zero", "probability": 0.517578125}, {"start": 2383.86, "end": 2384.04, "word": " and", "probability": 0.9296875}, {"start": 2384.04, "end": 2384.36, "word": " variance", "probability": 0.88916015625}, {"start": 2384.36, "end": 2384.6, "word": " one.", "probability": 0.83349609375}, {"start": 2387.28, "end": 2388.02, "word": " In", "probability": 0.9609375}, {"start": 2388.02, "end": 2388.28, "word": " this", "probability": 0.947265625}, {"start": 2388.28, "end": 2388.72, "word": " case,", "probability": 0.908203125}, {"start": 2389.32, "end": 2390.64, "word": " we", "probability": 0.9296875}, {"start": 2390.64, "end": 2390.8, "word": " are", "probability": 0.927734375}, {"start": 2390.8, "end": 2391.16, "word": " looking", "probability": 0.9130859375}, {"start": 2391.16, "end": 2391.72, "word": " for", "probability": 0.951171875}, {"start": 2391.72, "end": 2392.38, "word": " the", "probability": 0.9111328125}, {"start": 2392.38, "end": 2392.86, "word": " semi", "probability": 0.383544921875}, {"start": 2392.86, "end": 2393.4, "word": "-distribution", "probability": 0.8119303385416666}, {"start": 2393.4, "end": 2393.62, "word": " of", "probability": 0.94384765625}, {"start": 2393.62, "end": 2393.78, "word": " X", "probability": 0.8798828125}, {"start": 2393.78, "end": 2394.02, "word": " bar.", "probability": 0.7890625}, {"start": 2395.16, "end": 2395.9, "word": " So", "probability": 0.9609375}, {"start": 2395.9, "end": 2396.12, "word": " Z", "probability": 0.69921875}, {"start": 2396.12, "end": 2396.48, "word": " equal", "probability": 0.5390625}, {"start": 2396.48, "end": 2397.06, "word": " X", "probability": 0.67529296875}, {"start": 2397.06, "end": 2397.48, "word": " bar.", "probability": 0.92529296875}], "temperature": 1.0}, {"id": 90, "seek": 242439, "start": 2398.67, "end": 2424.39, "text": " minus the mean of x bar divided by sigma of x bar. So the same equation, but different statistic. In the first one, we have x, for example, represents the score. Here, my sample statistic is the sample mean, which represents the average of the score. So x bar,", "tokens": [3175, 264, 914, 295, 2031, 2159, 6666, 538, 12771, 295, 2031, 2159, 13, 407, 264, 912, 5367, 11, 457, 819, 29588, 13, 682, 264, 700, 472, 11, 321, 362, 2031, 11, 337, 1365, 11, 8855, 264, 6175, 13, 1692, 11, 452, 6889, 29588, 307, 264, 6889, 914, 11, 597, 8855, 264, 4274, 295, 264, 6175, 13, 407, 2031, 2159, 11], "avg_logprob": -0.25909324161341935, "compression_ratio": 1.6211180124223603, "no_speech_prob": 0.0, "words": [{"start": 2398.6700000000005, "end": 2399.3500000000004, "word": " minus", "probability": 0.430908203125}, {"start": 2399.3500000000004, "end": 2400.03, "word": " the", "probability": 0.88427734375}, {"start": 2400.03, "end": 2400.35, "word": " mean", "probability": 0.94921875}, {"start": 2400.35, "end": 2400.77, "word": " of", "probability": 0.95703125}, {"start": 2400.77, "end": 2401.07, "word": " x", "probability": 0.515625}, {"start": 2401.07, "end": 2401.41, "word": " bar", "probability": 0.82373046875}, {"start": 2401.41, "end": 2402.99, "word": " divided", "probability": 0.50830078125}, {"start": 2402.99, "end": 2403.33, "word": " by", "probability": 0.9775390625}, {"start": 2403.33, "end": 2403.77, "word": " sigma", "probability": 0.77001953125}, {"start": 2403.77, "end": 2403.99, "word": " of", "probability": 0.71044921875}, {"start": 2403.99, "end": 2404.15, "word": " x", "probability": 0.98779296875}, {"start": 2404.15, "end": 2404.43, "word": " bar.", "probability": 0.91943359375}, {"start": 2405.65, "end": 2406.05, "word": " So", "probability": 0.8232421875}, {"start": 2406.05, "end": 2406.23, "word": " the", "probability": 0.57568359375}, {"start": 2406.23, "end": 2406.45, "word": " same", "probability": 0.8779296875}, {"start": 2406.45, "end": 2406.79, "word": " equation,", "probability": 0.6748046875}, {"start": 2407.95, "end": 2408.25, "word": " but", "probability": 0.92724609375}, {"start": 2408.25, "end": 2408.67, "word": " different", "probability": 0.8525390625}, {"start": 2408.67, "end": 2409.23, "word": " statistic.", "probability": 0.51611328125}, {"start": 2410.39, "end": 2410.67, "word": " In", "probability": 0.34326171875}, {"start": 2410.67, "end": 2410.77, "word": " the", "probability": 0.90283203125}, {"start": 2410.77, "end": 2410.97, "word": " first", "probability": 0.8857421875}, {"start": 2410.97, "end": 2411.17, "word": " one,", "probability": 0.92138671875}, {"start": 2411.33, "end": 2411.33, "word": " we", "probability": 0.94140625}, {"start": 2411.33, "end": 2411.67, "word": " have", "probability": 0.9423828125}, {"start": 2411.67, "end": 2412.73, "word": " x,", "probability": 0.94482421875}, {"start": 2413.31, "end": 2413.75, "word": " for", "probability": 0.94140625}, {"start": 2413.75, "end": 2414.09, "word": " example,", "probability": 0.9775390625}, {"start": 2414.35, "end": 2415.11, "word": " represents", "probability": 0.81884765625}, {"start": 2415.11, "end": 2415.77, "word": " the", "probability": 0.81689453125}, {"start": 2415.77, "end": 2416.21, "word": " score.", "probability": 0.76416015625}, {"start": 2417.73, "end": 2418.13, "word": " Here,", "probability": 0.8486328125}, {"start": 2418.33, "end": 2418.57, "word": " my", "probability": 0.9658203125}, {"start": 2418.57, "end": 2418.87, "word": " sample", "probability": 0.41845703125}, {"start": 2418.87, "end": 2419.43, "word": " statistic", "probability": 0.91943359375}, {"start": 2419.43, "end": 2419.93, "word": " is", "probability": 0.923828125}, {"start": 2419.93, "end": 2420.11, "word": " the", "probability": 0.90625}, {"start": 2420.11, "end": 2420.37, "word": " sample", "probability": 0.86474609375}, {"start": 2420.37, "end": 2420.71, "word": " mean,", "probability": 0.90771484375}, {"start": 2420.83, "end": 2420.99, "word": " which", "probability": 0.90771484375}, {"start": 2420.99, "end": 2421.57, "word": " represents", "probability": 0.89892578125}, {"start": 2421.57, "end": 2421.93, "word": " the", "probability": 0.92236328125}, {"start": 2421.93, "end": 2422.29, "word": " average", "probability": 0.787109375}, {"start": 2422.29, "end": 2422.49, "word": " of", "probability": 0.77734375}, {"start": 2422.49, "end": 2422.65, "word": " the", "probability": 0.81494140625}, {"start": 2422.65, "end": 2422.89, "word": " score.", "probability": 0.65380859375}, {"start": 2423.47, "end": 2423.85, "word": " So", "probability": 0.9521484375}, {"start": 2423.85, "end": 2424.09, "word": " x", "probability": 0.8603515625}, {"start": 2424.09, "end": 2424.39, "word": " bar,", "probability": 0.9296875}], "temperature": 1.0}, {"id": 91, "seek": 245260, "start": 2426.34, "end": 2452.6, "text": " minus its mean, I mean the mean of x bar, divided by its standard error. So x bar minus the mean of x bar divided by sigma of x bar. By using that mu of x bar equals mu, and sigma of x bar equals sigma over root n, we will end with this mu z square.", "tokens": [3175, 1080, 914, 11, 286, 914, 264, 914, 295, 2031, 2159, 11, 6666, 538, 1080, 3832, 6713, 13, 407, 2031, 2159, 3175, 264, 914, 295, 2031, 2159, 6666, 538, 12771, 295, 2031, 2159, 13, 3146, 1228, 300, 2992, 295, 2031, 2159, 6915, 2992, 11, 293, 12771, 295, 2031, 2159, 6915, 12771, 670, 5593, 297, 11, 321, 486, 917, 365, 341, 2992, 710, 3732, 13], "avg_logprob": -0.19771634615384615, "compression_ratio": 1.7241379310344827, "no_speech_prob": 0.0, "words": [{"start": 2426.34, "end": 2426.94, "word": " minus", "probability": 0.45849609375}, {"start": 2426.94, "end": 2427.54, "word": " its", "probability": 0.6201171875}, {"start": 2427.54, "end": 2427.94, "word": " mean,", "probability": 0.9619140625}, {"start": 2428.58, "end": 2428.78, "word": " I", "probability": 0.92822265625}, {"start": 2428.78, "end": 2428.9, "word": " mean", "probability": 0.966796875}, {"start": 2428.9, "end": 2429.06, "word": " the", "probability": 0.70166015625}, {"start": 2429.06, "end": 2429.16, "word": " mean", "probability": 0.8115234375}, {"start": 2429.16, "end": 2429.28, "word": " of", "probability": 0.9609375}, {"start": 2429.28, "end": 2429.46, "word": " x", "probability": 0.76123046875}, {"start": 2429.46, "end": 2429.74, "word": " bar,", "probability": 0.912109375}, {"start": 2430.2, "end": 2430.88, "word": " divided", "probability": 0.77490234375}, {"start": 2430.88, "end": 2431.3, "word": " by", "probability": 0.9716796875}, {"start": 2431.3, "end": 2431.84, "word": " its", "probability": 0.8056640625}, {"start": 2431.84, "end": 2432.54, "word": " standard", "probability": 0.92724609375}, {"start": 2432.54, "end": 2433.54, "word": " error.", "probability": 0.280517578125}, {"start": 2434.84, "end": 2435.36, "word": " So", "probability": 0.95361328125}, {"start": 2435.36, "end": 2435.56, "word": " x", "probability": 0.875}, {"start": 2435.56, "end": 2435.88, "word": " bar", "probability": 0.9375}, {"start": 2435.88, "end": 2437.28, "word": " minus", "probability": 0.94580078125}, {"start": 2437.28, "end": 2437.52, "word": " the", "probability": 0.92236328125}, {"start": 2437.52, "end": 2437.66, "word": " mean", "probability": 0.96240234375}, {"start": 2437.66, "end": 2437.78, "word": " of", "probability": 0.9599609375}, {"start": 2437.78, "end": 2437.94, "word": " x", "probability": 0.99462890625}, {"start": 2437.94, "end": 2438.24, "word": " bar", "probability": 0.9453125}, {"start": 2438.24, "end": 2438.6, "word": " divided", "probability": 0.71630859375}, {"start": 2438.6, "end": 2438.8, "word": " by", "probability": 0.9765625}, {"start": 2438.8, "end": 2439.12, "word": " sigma", "probability": 0.87158203125}, {"start": 2439.12, "end": 2439.28, "word": " of", "probability": 0.74609375}, {"start": 2439.28, "end": 2439.4, "word": " x", "probability": 0.99658203125}, {"start": 2439.4, "end": 2439.62, "word": " bar.", "probability": 0.94873046875}, {"start": 2440.4, "end": 2441.0, "word": " By", "probability": 0.97021484375}, {"start": 2441.0, "end": 2441.46, "word": " using", "probability": 0.93115234375}, {"start": 2441.46, "end": 2444.04, "word": " that", "probability": 0.70166015625}, {"start": 2444.04, "end": 2444.6, "word": " mu", "probability": 0.74853515625}, {"start": 2444.6, "end": 2444.78, "word": " of", "probability": 0.82958984375}, {"start": 2444.78, "end": 2445.22, "word": " x", "probability": 0.9892578125}, {"start": 2445.22, "end": 2445.42, "word": " bar", "probability": 0.9580078125}, {"start": 2445.42, "end": 2445.72, "word": " equals", "probability": 0.91796875}, {"start": 2445.72, "end": 2446.06, "word": " mu,", "probability": 0.91015625}, {"start": 2447.2, "end": 2447.46, "word": " and", "probability": 0.83935546875}, {"start": 2447.46, "end": 2447.74, "word": " sigma", "probability": 0.923828125}, {"start": 2447.74, "end": 2447.88, "word": " of", "probability": 0.9267578125}, {"start": 2447.88, "end": 2448.02, "word": " x", "probability": 0.9970703125}, {"start": 2448.02, "end": 2448.24, "word": " bar", "probability": 0.9609375}, {"start": 2448.24, "end": 2448.54, "word": " equals", "probability": 0.9326171875}, {"start": 2448.54, "end": 2448.84, "word": " sigma", "probability": 0.9453125}, {"start": 2448.84, "end": 2449.1, "word": " over", "probability": 0.72265625}, {"start": 2449.1, "end": 2449.3, "word": " root", "probability": 0.93994140625}, {"start": 2449.3, "end": 2449.58, "word": " n,", "probability": 0.646484375}, {"start": 2450.22, "end": 2450.52, "word": " we", "probability": 0.93896484375}, {"start": 2450.52, "end": 2450.74, "word": " will", "probability": 0.88720703125}, {"start": 2450.74, "end": 2451.0, "word": " end", "probability": 0.8955078125}, {"start": 2451.0, "end": 2451.24, "word": " with", "probability": 0.8857421875}, {"start": 2451.24, "end": 2451.6, "word": " this", "probability": 0.9453125}, {"start": 2451.6, "end": 2452.04, "word": " mu", "probability": 0.257568359375}, {"start": 2452.04, "end": 2452.34, "word": " z", "probability": 0.75244140625}, {"start": 2452.34, "end": 2452.6, "word": " square.", "probability": 0.60791015625}], "temperature": 1.0}, {"id": 92, "seek": 248248, "start": 2456.31, "end": 2482.49, "text": " So this equation will be used instead of using the previous one. So z square equals sigma, I'm sorry, z equals x bar minus the mean divided by sigma bar, where x bar the sample mean, mu the population mean, sigma population standard deviation, and n is the sample size. So that's the difference between chapter six,", "tokens": [407, 341, 5367, 486, 312, 1143, 2602, 295, 1228, 264, 3894, 472, 13, 407, 710, 3732, 6915, 12771, 11, 286, 478, 2597, 11, 710, 6915, 2031, 2159, 3175, 264, 914, 6666, 538, 12771, 2159, 11, 689, 2031, 2159, 264, 6889, 914, 11, 2992, 264, 4415, 914, 11, 12771, 4415, 3832, 25163, 11, 293, 297, 307, 264, 6889, 2744, 13, 407, 300, 311, 264, 2649, 1296, 7187, 2309, 11], "avg_logprob": -0.2549818702366041, "compression_ratio": 1.6544502617801047, "no_speech_prob": 0.0, "words": [{"start": 2456.31, "end": 2456.63, "word": " So", "probability": 0.779296875}, {"start": 2456.63, "end": 2457.09, "word": " this", "probability": 0.7392578125}, {"start": 2457.09, "end": 2457.65, "word": " equation", "probability": 0.9697265625}, {"start": 2457.65, "end": 2457.93, "word": " will", "probability": 0.75244140625}, {"start": 2457.93, "end": 2458.09, "word": " be", "probability": 0.9482421875}, {"start": 2458.09, "end": 2458.47, "word": " used", "probability": 0.912109375}, {"start": 2458.47, "end": 2458.97, "word": " instead", "probability": 0.79833984375}, {"start": 2458.97, "end": 2459.29, "word": " of", "probability": 0.95849609375}, {"start": 2459.29, "end": 2459.79, "word": " using", "probability": 0.93017578125}, {"start": 2459.79, "end": 2460.79, "word": " the", "probability": 0.8828125}, {"start": 2460.79, "end": 2461.09, "word": " previous", "probability": 0.7685546875}, {"start": 2461.09, "end": 2461.45, "word": " one.", "probability": 0.91455078125}, {"start": 2461.77, "end": 2461.91, "word": " So", "probability": 0.87744140625}, {"start": 2461.91, "end": 2462.07, "word": " z", "probability": 0.403076171875}, {"start": 2462.07, "end": 2462.37, "word": " square", "probability": 0.268310546875}, {"start": 2462.37, "end": 2462.87, "word": " equals", "probability": 0.82373046875}, {"start": 2462.87, "end": 2463.29, "word": " sigma,", "probability": 0.80908203125}, {"start": 2464.21, "end": 2464.47, "word": " I'm", "probability": 0.812744140625}, {"start": 2464.47, "end": 2464.65, "word": " sorry,", "probability": 0.87255859375}, {"start": 2464.77, "end": 2465.03, "word": " z", "probability": 0.84326171875}, {"start": 2465.03, "end": 2465.45, "word": " equals", "probability": 0.8701171875}, {"start": 2465.45, "end": 2465.69, "word": " x", "probability": 0.9228515625}, {"start": 2465.69, "end": 2466.01, "word": " bar", "probability": 0.83935546875}, {"start": 2466.01, "end": 2467.27, "word": " minus", "probability": 0.9111328125}, {"start": 2467.27, "end": 2467.51, "word": " the", "probability": 0.84130859375}, {"start": 2467.51, "end": 2467.65, "word": " mean", "probability": 0.91845703125}, {"start": 2467.65, "end": 2467.97, "word": " divided", "probability": 0.703125}, {"start": 2467.97, "end": 2468.15, "word": " by", "probability": 0.97265625}, {"start": 2468.15, "end": 2468.47, "word": " sigma", "probability": 0.888671875}, {"start": 2468.47, "end": 2468.77, "word": " bar,", "probability": 0.3505859375}, {"start": 2469.13, "end": 2469.51, "word": " where", "probability": 0.921875}, {"start": 2469.51, "end": 2470.47, "word": " x", "probability": 0.98876953125}, {"start": 2470.47, "end": 2470.85, "word": " bar", "probability": 0.9384765625}, {"start": 2470.85, "end": 2471.81, "word": " the", "probability": 0.25830078125}, {"start": 2471.81, "end": 2472.05, "word": " sample", "probability": 0.876953125}, {"start": 2472.05, "end": 2472.37, "word": " mean,", "probability": 0.9365234375}, {"start": 2472.91, "end": 2473.15, "word": " mu", "probability": 0.69775390625}, {"start": 2473.15, "end": 2473.31, "word": " the", "probability": 0.8212890625}, {"start": 2473.31, "end": 2473.65, "word": " population", "probability": 0.970703125}, {"start": 2473.65, "end": 2473.99, "word": " mean,", "probability": 0.9560546875}, {"start": 2474.75, "end": 2475.09, "word": " sigma", "probability": 0.931640625}, {"start": 2475.09, "end": 2475.63, "word": " population", "probability": 0.90185546875}, {"start": 2475.63, "end": 2475.99, "word": " standard", "probability": 0.441650390625}, {"start": 2475.99, "end": 2476.41, "word": " deviation,", "probability": 0.9736328125}, {"start": 2476.71, "end": 2476.83, "word": " and", "probability": 0.9326171875}, {"start": 2476.83, "end": 2477.09, "word": " n", "probability": 0.71142578125}, {"start": 2477.09, "end": 2477.95, "word": " is", "probability": 0.93994140625}, {"start": 2477.95, "end": 2478.11, "word": " the", "probability": 0.92138671875}, {"start": 2478.11, "end": 2478.39, "word": " sample", "probability": 0.90478515625}, {"start": 2478.39, "end": 2478.85, "word": " size.", "probability": 0.86181640625}, {"start": 2479.15, "end": 2479.35, "word": " So", "probability": 0.95751953125}, {"start": 2479.35, "end": 2479.65, "word": " that's", "probability": 0.949951171875}, {"start": 2479.65, "end": 2479.81, "word": " the", "probability": 0.91796875}, {"start": 2479.81, "end": 2480.17, "word": " difference", "probability": 0.87158203125}, {"start": 2480.17, "end": 2480.65, "word": " between", "probability": 0.8759765625}, {"start": 2480.65, "end": 2482.01, "word": " chapter", "probability": 0.654296875}, {"start": 2482.01, "end": 2482.49, "word": " six,", "probability": 0.53515625}], "temperature": 1.0}, {"id": 93, "seek": 251201, "start": 2485.11, "end": 2512.01, "text": " And that one we have only x minus y by sigma. Here we are interested in x bar minus the mean of x bar which is mu. And sigma of x bar equals sigma of r. Now when we are saying that mu of x bar equals mu,", "tokens": [400, 300, 472, 321, 362, 787, 2031, 3175, 288, 538, 12771, 13, 1692, 321, 366, 3102, 294, 2031, 2159, 3175, 264, 914, 295, 2031, 2159, 597, 307, 2992, 13, 400, 12771, 295, 2031, 2159, 6915, 12771, 295, 367, 13, 823, 562, 321, 366, 1566, 300, 2992, 295, 2031, 2159, 6915, 2992, 11], "avg_logprob": -0.2976120305511187, "compression_ratio": 1.5813953488372092, "no_speech_prob": 0.0, "words": [{"start": 2485.11, "end": 2485.63, "word": " And", "probability": 0.5966796875}, {"start": 2485.63, "end": 2486.11, "word": " that", "probability": 0.1939697265625}, {"start": 2486.11, "end": 2487.97, "word": " one", "probability": 0.7734375}, {"start": 2487.97, "end": 2488.13, "word": " we", "probability": 0.6953125}, {"start": 2488.13, "end": 2488.39, "word": " have", "probability": 0.92822265625}, {"start": 2488.39, "end": 2488.73, "word": " only", "probability": 0.88671875}, {"start": 2488.73, "end": 2489.15, "word": " x", "probability": 0.80908203125}, {"start": 2489.15, "end": 2490.89, "word": " minus", "probability": 0.35302734375}, {"start": 2490.89, "end": 2491.19, "word": " y", "probability": 0.289794921875}, {"start": 2491.19, "end": 2491.49, "word": " by", "probability": 0.6279296875}, {"start": 2491.49, "end": 2491.81, "word": " sigma.", "probability": 0.90087890625}, {"start": 2492.33, "end": 2492.75, "word": " Here", "probability": 0.407958984375}, {"start": 2492.75, "end": 2493.05, "word": " we", "probability": 0.67822265625}, {"start": 2493.05, "end": 2493.21, "word": " are", "probability": 0.8125}, {"start": 2493.21, "end": 2493.55, "word": " interested", "probability": 0.7998046875}, {"start": 2493.55, "end": 2493.73, "word": " in", "probability": 0.90966796875}, {"start": 2493.73, "end": 2493.93, "word": " x", "probability": 0.89892578125}, {"start": 2493.93, "end": 2494.25, "word": " bar", "probability": 0.767578125}, {"start": 2494.25, "end": 2495.19, "word": " minus", "probability": 0.96875}, {"start": 2495.19, "end": 2495.49, "word": " the", "probability": 0.90234375}, {"start": 2495.49, "end": 2495.73, "word": " mean", "probability": 0.9677734375}, {"start": 2495.73, "end": 2496.05, "word": " of", "probability": 0.958984375}, {"start": 2496.05, "end": 2496.27, "word": " x", "probability": 0.98974609375}, {"start": 2496.27, "end": 2496.45, "word": " bar", "probability": 0.92919921875}, {"start": 2496.45, "end": 2496.69, "word": " which", "probability": 0.650390625}, {"start": 2496.69, "end": 2496.81, "word": " is", "probability": 0.95556640625}, {"start": 2496.81, "end": 2497.05, "word": " mu.", "probability": 0.61328125}, {"start": 2497.93, "end": 2498.53, "word": " And", "probability": 0.93310546875}, {"start": 2498.53, "end": 2498.83, "word": " sigma", "probability": 0.9248046875}, {"start": 2498.83, "end": 2499.01, "word": " of", "probability": 0.70556640625}, {"start": 2499.01, "end": 2499.11, "word": " x", "probability": 0.990234375}, {"start": 2499.11, "end": 2499.31, "word": " bar", "probability": 0.94580078125}, {"start": 2499.31, "end": 2499.57, "word": " equals", "probability": 0.92529296875}, {"start": 2499.57, "end": 2499.89, "word": " sigma", "probability": 0.9541015625}, {"start": 2499.89, "end": 2500.11, "word": " of", "probability": 0.48095703125}, {"start": 2500.11, "end": 2500.29, "word": " r.", "probability": 0.7177734375}, {"start": 2507.97, "end": 2508.57, "word": " Now", "probability": 0.88232421875}, {"start": 2508.57, "end": 2508.89, "word": " when", "probability": 0.57666015625}, {"start": 2508.89, "end": 2509.01, "word": " we", "probability": 0.85595703125}, {"start": 2509.01, "end": 2509.17, "word": " are", "probability": 0.93017578125}, {"start": 2509.17, "end": 2509.51, "word": " saying", "probability": 0.85595703125}, {"start": 2509.51, "end": 2509.99, "word": " that", "probability": 0.9091796875}, {"start": 2509.99, "end": 2510.57, "word": " mu", "probability": 0.40966796875}, {"start": 2510.57, "end": 2510.73, "word": " of", "probability": 0.84814453125}, {"start": 2510.73, "end": 2510.95, "word": " x", "probability": 0.9912109375}, {"start": 2510.95, "end": 2511.23, "word": " bar", "probability": 0.94873046875}, {"start": 2511.23, "end": 2511.67, "word": " equals", "probability": 0.92578125}, {"start": 2511.67, "end": 2512.01, "word": " mu,", "probability": 0.93896484375}], "temperature": 1.0}, {"id": 94, "seek": 254398, "start": 2514.53, "end": 2543.99, "text": " That means the expected value of the sample mean equals the population mean. When we are saying mean of X bar equals mu, it means the expected value of X bar equals mu. In other words, the expected of X bar equals mu. If this happens, we say that X bar", "tokens": [663, 1355, 264, 5176, 2158, 295, 264, 6889, 914, 6915, 264, 4415, 914, 13, 1133, 321, 366, 1566, 914, 295, 1783, 2159, 6915, 2992, 11, 309, 1355, 264, 5176, 2158, 295, 1783, 2159, 6915, 2992, 13, 682, 661, 2283, 11, 264, 5176, 295, 1783, 2159, 6915, 2992, 13, 759, 341, 2314, 11, 321, 584, 300, 1783, 2159], "avg_logprob": -0.1641972016157775, "compression_ratio": 1.874074074074074, "no_speech_prob": 0.0, "words": [{"start": 2514.53, "end": 2514.91, "word": " That", "probability": 0.35888671875}, {"start": 2514.91, "end": 2515.29, "word": " means", "probability": 0.89501953125}, {"start": 2515.29, "end": 2516.03, "word": " the", "probability": 0.6396484375}, {"start": 2516.03, "end": 2516.55, "word": " expected", "probability": 0.87744140625}, {"start": 2516.55, "end": 2517.05, "word": " value", "probability": 0.97119140625}, {"start": 2517.05, "end": 2521.69, "word": " of", "probability": 0.857421875}, {"start": 2521.69, "end": 2522.01, "word": " the", "probability": 0.81396484375}, {"start": 2522.01, "end": 2522.29, "word": " sample", "probability": 0.88916015625}, {"start": 2522.29, "end": 2522.65, "word": " mean", "probability": 0.93603515625}, {"start": 2522.65, "end": 2523.29, "word": " equals", "probability": 0.611328125}, {"start": 2523.29, "end": 2523.81, "word": " the", "probability": 0.83984375}, {"start": 2523.81, "end": 2524.17, "word": " population", "probability": 0.92724609375}, {"start": 2524.17, "end": 2524.51, "word": " mean.", "probability": 0.955078125}, {"start": 2525.07, "end": 2525.59, "word": " When", "probability": 0.822265625}, {"start": 2525.59, "end": 2525.69, "word": " we", "probability": 0.8095703125}, {"start": 2525.69, "end": 2525.81, "word": " are", "probability": 0.765625}, {"start": 2525.81, "end": 2526.15, "word": " saying", "probability": 0.9140625}, {"start": 2526.15, "end": 2526.49, "word": " mean", "probability": 0.62646484375}, {"start": 2526.49, "end": 2526.61, "word": " of", "probability": 0.93359375}, {"start": 2526.61, "end": 2526.77, "word": " X", "probability": 0.55078125}, {"start": 2526.77, "end": 2526.93, "word": " bar", "probability": 0.82958984375}, {"start": 2526.93, "end": 2527.19, "word": " equals", "probability": 0.85498046875}, {"start": 2527.19, "end": 2527.47, "word": " mu,", "probability": 0.6474609375}, {"start": 2528.01, "end": 2528.27, "word": " it", "probability": 0.91943359375}, {"start": 2528.27, "end": 2528.61, "word": " means", "probability": 0.931640625}, {"start": 2528.61, "end": 2529.29, "word": " the", "probability": 0.80712890625}, {"start": 2529.29, "end": 2529.77, "word": " expected", "probability": 0.90380859375}, {"start": 2529.77, "end": 2530.19, "word": " value", "probability": 0.97607421875}, {"start": 2530.19, "end": 2530.39, "word": " of", "probability": 0.96728515625}, {"start": 2530.39, "end": 2530.63, "word": " X", "probability": 0.95166015625}, {"start": 2530.63, "end": 2530.99, "word": " bar", "probability": 0.93212890625}, {"start": 2530.99, "end": 2531.73, "word": " equals", "probability": 0.912109375}, {"start": 2531.73, "end": 2532.03, "word": " mu.", "probability": 0.9267578125}, {"start": 2532.77, "end": 2533.03, "word": " In", "probability": 0.9091796875}, {"start": 2533.03, "end": 2533.27, "word": " other", "probability": 0.8916015625}, {"start": 2533.27, "end": 2533.75, "word": " words,", "probability": 0.87841796875}, {"start": 2534.51, "end": 2534.67, "word": " the", "probability": 0.8388671875}, {"start": 2534.67, "end": 2535.09, "word": " expected", "probability": 0.91552734375}, {"start": 2535.09, "end": 2536.23, "word": " of", "probability": 0.58154296875}, {"start": 2536.23, "end": 2536.57, "word": " X", "probability": 0.98095703125}, {"start": 2536.57, "end": 2536.93, "word": " bar", "probability": 0.95263671875}, {"start": 2536.93, "end": 2537.71, "word": " equals", "probability": 0.9189453125}, {"start": 2537.71, "end": 2538.03, "word": " mu.", "probability": 0.958984375}, {"start": 2539.69, "end": 2540.41, "word": " If", "probability": 0.9384765625}, {"start": 2540.41, "end": 2540.67, "word": " this", "probability": 0.9443359375}, {"start": 2540.67, "end": 2541.19, "word": " happens,", "probability": 0.95458984375}, {"start": 2541.83, "end": 2542.09, "word": " we", "probability": 0.94677734375}, {"start": 2542.09, "end": 2542.37, "word": " say", "probability": 0.80078125}, {"start": 2542.37, "end": 2542.71, "word": " that", "probability": 0.9375}, {"start": 2542.71, "end": 2543.63, "word": " X", "probability": 0.9453125}, {"start": 2543.63, "end": 2543.99, "word": " bar", "probability": 0.94189453125}], "temperature": 1.0}, {"id": 95, "seek": 256154, "start": 2545.54, "end": 2561.54, "text": " is an unbiased estimator of mu. So this is a new definition, unbiased estimator", "tokens": [307, 364, 517, 5614, 1937, 8017, 1639, 295, 2992, 13, 407, 341, 307, 257, 777, 7123, 11, 517, 5614, 1937, 8017, 1639], "avg_logprob": -0.2749660429747208, "compression_ratio": 1.1940298507462686, "no_speech_prob": 0.0, "words": [{"start": 2545.54, "end": 2546.0, "word": " is", "probability": 0.37744140625}, {"start": 2546.0, "end": 2547.2, "word": " an", "probability": 0.85546875}, {"start": 2547.2, "end": 2547.9, "word": " unbiased", "probability": 0.93212890625}, {"start": 2547.9, "end": 2551.42, "word": " estimator", "probability": 0.926025390625}, {"start": 2551.42, "end": 2555.58, "word": " of", "probability": 0.9384765625}, {"start": 2555.58, "end": 2555.82, "word": " mu.", "probability": 0.32470703125}, {"start": 2557.54, "end": 2558.52, "word": " So", "probability": 0.68359375}, {"start": 2558.52, "end": 2558.72, "word": " this", "probability": 0.495361328125}, {"start": 2558.72, "end": 2558.84, "word": " is", "probability": 0.80859375}, {"start": 2558.84, "end": 2558.9, "word": " a", "probability": 0.83251953125}, {"start": 2558.9, "end": 2559.06, "word": " new", "probability": 0.92333984375}, {"start": 2559.06, "end": 2559.66, "word": " definition,", "probability": 0.91845703125}, {"start": 2560.06, "end": 2560.62, "word": " unbiased", "probability": 0.8986002604166666}, {"start": 2560.62, "end": 2561.54, "word": " estimator", "probability": 0.94140625}], "temperature": 1.0}, {"id": 96, "seek": 258244, "start": 2562.53, "end": 2582.45, "text": " X bar is called unbiased estimator if this condition is satisfied. I mean, if the mean of X bar or if the expected value of X bar equals the population mean, in this case, we say that X bar is good estimator of Mu. Because on average,", "tokens": [1783, 2159, 307, 1219, 517, 5614, 1937, 8017, 1639, 498, 341, 4188, 307, 11239, 13, 286, 914, 11, 498, 264, 914, 295, 1783, 2159, 420, 498, 264, 5176, 2158, 295, 1783, 2159, 6915, 264, 4415, 914, 11, 294, 341, 1389, 11, 321, 584, 300, 1783, 2159, 307, 665, 8017, 1639, 295, 15601, 13, 1436, 322, 4274, 11], "avg_logprob": -0.17268318991208897, "compression_ratio": 1.5771812080536913, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2562.53, "end": 2562.89, "word": " X", "probability": 0.51513671875}, {"start": 2562.89, "end": 2563.07, "word": " bar", "probability": 0.7744140625}, {"start": 2563.07, "end": 2563.27, "word": " is", "probability": 0.93212890625}, {"start": 2563.27, "end": 2563.63, "word": " called", "probability": 0.88916015625}, {"start": 2563.63, "end": 2564.31, "word": " unbiased", "probability": 0.93603515625}, {"start": 2564.31, "end": 2564.81, "word": " estimator", "probability": 0.949951171875}, {"start": 2564.81, "end": 2565.49, "word": " if", "probability": 0.8291015625}, {"start": 2565.49, "end": 2565.97, "word": " this", "probability": 0.82373046875}, {"start": 2565.97, "end": 2566.35, "word": " condition", "probability": 0.943359375}, {"start": 2566.35, "end": 2566.55, "word": " is", "probability": 0.9541015625}, {"start": 2566.55, "end": 2567.09, "word": " satisfied.", "probability": 0.86083984375}, {"start": 2567.87, "end": 2568.01, "word": " I", "probability": 0.9296875}, {"start": 2568.01, "end": 2568.27, "word": " mean,", "probability": 0.962890625}, {"start": 2568.83, "end": 2569.03, "word": " if", "probability": 0.947265625}, {"start": 2569.03, "end": 2569.25, "word": " the", "probability": 0.916015625}, {"start": 2569.25, "end": 2569.41, "word": " mean", "probability": 0.97607421875}, {"start": 2569.41, "end": 2569.53, "word": " of", "probability": 0.97021484375}, {"start": 2569.53, "end": 2569.75, "word": " X", "probability": 0.73828125}, {"start": 2569.75, "end": 2570.11, "word": " bar", "probability": 0.95751953125}, {"start": 2570.11, "end": 2571.39, "word": " or", "probability": 0.69287109375}, {"start": 2571.39, "end": 2571.73, "word": " if", "probability": 0.912109375}, {"start": 2571.73, "end": 2571.93, "word": " the", "probability": 0.90673828125}, {"start": 2571.93, "end": 2572.49, "word": " expected", "probability": 0.8720703125}, {"start": 2572.49, "end": 2572.89, "word": " value", "probability": 0.97509765625}, {"start": 2572.89, "end": 2573.09, "word": " of", "probability": 0.9638671875}, {"start": 2573.09, "end": 2573.27, "word": " X", "probability": 0.9794921875}, {"start": 2573.27, "end": 2573.63, "word": " bar", "probability": 0.96533203125}, {"start": 2573.63, "end": 2574.45, "word": " equals", "probability": 0.916015625}, {"start": 2574.45, "end": 2574.73, "word": " the", "probability": 0.87939453125}, {"start": 2574.73, "end": 2575.13, "word": " population", "probability": 0.95947265625}, {"start": 2575.13, "end": 2575.53, "word": " mean,", "probability": 0.96923828125}, {"start": 2575.99, "end": 2576.15, "word": " in", "probability": 0.923828125}, {"start": 2576.15, "end": 2576.43, "word": " this", "probability": 0.94580078125}, {"start": 2576.43, "end": 2576.75, "word": " case,", "probability": 0.91259765625}, {"start": 2576.81, "end": 2576.93, "word": " we", "probability": 0.943359375}, {"start": 2576.93, "end": 2577.15, "word": " say", "probability": 0.9150390625}, {"start": 2577.15, "end": 2577.47, "word": " that", "probability": 0.9345703125}, {"start": 2577.47, "end": 2577.79, "word": " X", "probability": 0.9619140625}, {"start": 2577.79, "end": 2578.11, "word": " bar", "probability": 0.953125}, {"start": 2578.11, "end": 2579.09, "word": " is", "probability": 0.955078125}, {"start": 2579.09, "end": 2579.33, "word": " good", "probability": 0.56591796875}, {"start": 2579.33, "end": 2579.89, "word": " estimator", "probability": 0.965087890625}, {"start": 2579.89, "end": 2580.09, "word": " of", "probability": 0.9716796875}, {"start": 2580.09, "end": 2580.25, "word": " Mu.", "probability": 0.41943359375}, {"start": 2581.33, "end": 2581.81, "word": " Because", "probability": 0.89501953125}, {"start": 2581.81, "end": 2581.97, "word": " on", "probability": 0.72607421875}, {"start": 2581.97, "end": 2582.45, "word": " average,", "probability": 0.7607421875}], "temperature": 1.0}, {"id": 97, "seek": 261055, "start": 2585.43, "end": 2610.55, "text": " Expected value of X bar equals the population mean, so in this case X bar is L by estimator of Mu. Now if you compare the two distributions, normal distribution here with population mean Mu and standard deviation for example sigma.", "tokens": [2111, 10729, 2158, 295, 1783, 2159, 6915, 264, 4415, 914, 11, 370, 294, 341, 1389, 1783, 2159, 307, 441, 538, 8017, 1639, 295, 15601, 13, 823, 498, 291, 6794, 264, 732, 37870, 11, 2710, 7316, 510, 365, 4415, 914, 15601, 293, 3832, 25163, 337, 1365, 12771, 13], "avg_logprob": -0.30371095053851604, "compression_ratio": 1.4871794871794872, "no_speech_prob": 0.0, "words": [{"start": 2585.43, "end": 2585.97, "word": " Expected", "probability": 0.55670166015625}, {"start": 2585.97, "end": 2586.27, "word": " value", "probability": 0.73193359375}, {"start": 2586.27, "end": 2586.41, "word": " of", "probability": 0.8388671875}, {"start": 2586.41, "end": 2586.57, "word": " X", "probability": 0.65966796875}, {"start": 2586.57, "end": 2586.79, "word": " bar", "probability": 0.796875}, {"start": 2586.79, "end": 2587.27, "word": " equals", "probability": 0.7646484375}, {"start": 2587.27, "end": 2587.85, "word": " the", "probability": 0.74169921875}, {"start": 2587.85, "end": 2588.23, "word": " population", "probability": 0.94677734375}, {"start": 2588.23, "end": 2589.65, "word": " mean,", "probability": 0.8974609375}, {"start": 2589.99, "end": 2590.13, "word": " so", "probability": 0.603515625}, {"start": 2590.13, "end": 2590.25, "word": " in", "probability": 0.87548828125}, {"start": 2590.25, "end": 2590.41, "word": " this", "probability": 0.951171875}, {"start": 2590.41, "end": 2590.69, "word": " case", "probability": 0.912109375}, {"start": 2590.69, "end": 2591.05, "word": " X", "probability": 0.69140625}, {"start": 2591.05, "end": 2591.41, "word": " bar", "probability": 0.93017578125}, {"start": 2591.41, "end": 2592.25, "word": " is", "probability": 0.8837890625}, {"start": 2592.25, "end": 2592.83, "word": " L", "probability": 0.28662109375}, {"start": 2592.83, "end": 2593.05, "word": " by", "probability": 0.9033203125}, {"start": 2593.05, "end": 2593.91, "word": " estimator", "probability": 0.743408203125}, {"start": 2593.91, "end": 2594.97, "word": " of", "probability": 0.70556640625}, {"start": 2594.97, "end": 2595.21, "word": " Mu.", "probability": 0.44873046875}, {"start": 2596.29, "end": 2596.57, "word": " Now", "probability": 0.89501953125}, {"start": 2596.57, "end": 2596.75, "word": " if", "probability": 0.72802734375}, {"start": 2596.75, "end": 2596.89, "word": " you", "probability": 0.82373046875}, {"start": 2596.89, "end": 2597.47, "word": " compare", "probability": 0.93896484375}, {"start": 2597.47, "end": 2598.25, "word": " the", "probability": 0.875}, {"start": 2598.25, "end": 2598.73, "word": " two", "probability": 0.89599609375}, {"start": 2598.73, "end": 2600.41, "word": " distributions,", "probability": 0.86376953125}, {"start": 2602.03, "end": 2603.69, "word": " normal", "probability": 0.8017578125}, {"start": 2603.69, "end": 2604.33, "word": " distribution", "probability": 0.82763671875}, {"start": 2604.33, "end": 2604.73, "word": " here", "probability": 0.8134765625}, {"start": 2604.73, "end": 2605.99, "word": " with", "probability": 0.708984375}, {"start": 2605.99, "end": 2606.61, "word": " population", "probability": 0.94580078125}, {"start": 2606.61, "end": 2607.17, "word": " mean", "probability": 0.927734375}, {"start": 2607.17, "end": 2607.51, "word": " Mu", "probability": 0.9052734375}, {"start": 2607.51, "end": 2608.31, "word": " and", "probability": 0.8193359375}, {"start": 2608.31, "end": 2608.77, "word": " standard", "probability": 0.84765625}, {"start": 2608.77, "end": 2609.23, "word": " deviation", "probability": 0.947265625}, {"start": 2609.23, "end": 2609.53, "word": " for", "probability": 0.71484375}, {"start": 2609.53, "end": 2610.03, "word": " example", "probability": 0.97314453125}, {"start": 2610.03, "end": 2610.55, "word": " sigma.", "probability": 0.43701171875}], "temperature": 1.0}, {"id": 98, "seek": 264057, "start": 2613.19, "end": 2640.57, "text": " That's for the scores, the scores. Now instead of the scores above, we have x bar, the sample mean. Again, the mean of x bar is the same as the population mean. Both means are the same, mu of x bar equals mu. But if you look at the spread of the second distribution, this is more than the other one.", "tokens": [663, 311, 337, 264, 13444, 11, 264, 13444, 13, 823, 2602, 295, 264, 13444, 3673, 11, 321, 362, 2031, 2159, 11, 264, 6889, 914, 13, 3764, 11, 264, 914, 295, 2031, 2159, 307, 264, 912, 382, 264, 4415, 914, 13, 6767, 1355, 366, 264, 912, 11, 2992, 295, 2031, 2159, 6915, 2992, 13, 583, 498, 291, 574, 412, 264, 3974, 295, 264, 1150, 7316, 11, 341, 307, 544, 813, 264, 661, 472, 13], "avg_logprob": -0.19995777188120661, "compression_ratio": 1.7142857142857142, "no_speech_prob": 0.0, "words": [{"start": 2613.19, "end": 2613.65, "word": " That's", "probability": 0.6593017578125}, {"start": 2613.65, "end": 2613.93, "word": " for", "probability": 0.93212890625}, {"start": 2613.93, "end": 2615.17, "word": " the", "probability": 0.8818359375}, {"start": 2615.17, "end": 2616.29, "word": " scores,", "probability": 0.76025390625}, {"start": 2617.31, "end": 2617.85, "word": " the", "probability": 0.81201171875}, {"start": 2617.85, "end": 2618.43, "word": " scores.", "probability": 0.8056640625}, {"start": 2619.67, "end": 2620.07, "word": " Now", "probability": 0.93505859375}, {"start": 2620.07, "end": 2620.43, "word": " instead", "probability": 0.63623046875}, {"start": 2620.43, "end": 2620.59, "word": " of", "probability": 0.96875}, {"start": 2620.59, "end": 2620.71, "word": " the", "probability": 0.82275390625}, {"start": 2620.71, "end": 2620.99, "word": " scores", "probability": 0.45166015625}, {"start": 2620.99, "end": 2621.27, "word": " above,", "probability": 0.2132568359375}, {"start": 2621.45, "end": 2621.51, "word": " we", "probability": 0.93798828125}, {"start": 2621.51, "end": 2621.79, "word": " have", "probability": 0.9482421875}, {"start": 2621.79, "end": 2622.33, "word": " x", "probability": 0.421630859375}, {"start": 2622.33, "end": 2622.63, "word": " bar,", "probability": 0.73193359375}, {"start": 2623.05, "end": 2623.17, "word": " the", "probability": 0.87060546875}, {"start": 2623.17, "end": 2623.45, "word": " sample", "probability": 0.89599609375}, {"start": 2623.45, "end": 2623.69, "word": " mean.", "probability": 0.9736328125}, {"start": 2624.67, "end": 2625.23, "word": " Again,", "probability": 0.935546875}, {"start": 2625.69, "end": 2625.89, "word": " the", "probability": 0.92333984375}, {"start": 2625.89, "end": 2626.05, "word": " mean", "probability": 0.97607421875}, {"start": 2626.05, "end": 2626.15, "word": " of", "probability": 0.9697265625}, {"start": 2626.15, "end": 2626.37, "word": " x", "probability": 0.97705078125}, {"start": 2626.37, "end": 2626.69, "word": " bar", "probability": 0.93359375}, {"start": 2626.69, "end": 2627.45, "word": " is", "probability": 0.9384765625}, {"start": 2627.45, "end": 2627.67, "word": " the", "probability": 0.9208984375}, {"start": 2627.67, "end": 2627.95, "word": " same", "probability": 0.9140625}, {"start": 2627.95, "end": 2628.35, "word": " as", "probability": 0.96826171875}, {"start": 2628.35, "end": 2628.59, "word": " the", "probability": 0.91943359375}, {"start": 2628.59, "end": 2628.93, "word": " population", "probability": 0.94091796875}, {"start": 2628.93, "end": 2629.39, "word": " mean.", "probability": 0.96630859375}, {"start": 2630.75, "end": 2631.05, "word": " Both", "probability": 0.87939453125}, {"start": 2631.05, "end": 2631.47, "word": " means", "probability": 0.86767578125}, {"start": 2631.47, "end": 2631.93, "word": " are", "probability": 0.9384765625}, {"start": 2631.93, "end": 2632.11, "word": " the", "probability": 0.9140625}, {"start": 2632.11, "end": 2632.37, "word": " same,", "probability": 0.916015625}, {"start": 2632.49, "end": 2632.65, "word": " mu", "probability": 0.61865234375}, {"start": 2632.65, "end": 2632.81, "word": " of", "probability": 0.7744140625}, {"start": 2632.81, "end": 2632.99, "word": " x", "probability": 0.98779296875}, {"start": 2632.99, "end": 2633.23, "word": " bar", "probability": 0.94921875}, {"start": 2633.23, "end": 2633.71, "word": " equals", "probability": 0.861328125}, {"start": 2633.71, "end": 2634.67, "word": " mu.", "probability": 0.84814453125}, {"start": 2635.23, "end": 2635.77, "word": " But", "probability": 0.92919921875}, {"start": 2635.77, "end": 2635.95, "word": " if", "probability": 0.90966796875}, {"start": 2635.95, "end": 2636.01, "word": " you", "probability": 0.9453125}, {"start": 2636.01, "end": 2636.19, "word": " look", "probability": 0.9638671875}, {"start": 2636.19, "end": 2636.37, "word": " at", "probability": 0.96826171875}, {"start": 2636.37, "end": 2636.57, "word": " the", "probability": 0.919921875}, {"start": 2636.57, "end": 2636.95, "word": " spread", "probability": 0.87841796875}, {"start": 2636.95, "end": 2637.13, "word": " of", "probability": 0.96533203125}, {"start": 2637.13, "end": 2637.29, "word": " the", "probability": 0.90966796875}, {"start": 2637.29, "end": 2637.69, "word": " second", "probability": 0.90478515625}, {"start": 2637.69, "end": 2638.57, "word": " distribution,", "probability": 0.9111328125}, {"start": 2639.05, "end": 2639.55, "word": " this", "probability": 0.93505859375}, {"start": 2639.55, "end": 2639.63, "word": " is", "probability": 0.70654296875}, {"start": 2639.63, "end": 2639.73, "word": " more", "probability": 0.57421875}, {"start": 2639.73, "end": 2640.01, "word": " than", "probability": 0.94287109375}, {"start": 2640.01, "end": 2640.19, "word": " the", "probability": 0.8974609375}, {"start": 2640.19, "end": 2640.39, "word": " other", "probability": 0.86572265625}, {"start": 2640.39, "end": 2640.57, "word": " one.", "probability": 0.84765625}], "temperature": 1.0}, {"id": 99, "seek": 266887, "start": 2641.19, "end": 2668.87, "text": " So that's the comparison between the two populations. So again, to compare or to figure out the relationship between sigma of x bar and the sample size. Suppose we have this blue normal distribution with sample size say 10 or 30, for example.", "tokens": [407, 300, 311, 264, 9660, 1296, 264, 732, 12822, 13, 407, 797, 11, 281, 6794, 420, 281, 2573, 484, 264, 2480, 1296, 12771, 295, 2031, 2159, 293, 264, 6889, 2744, 13, 21360, 321, 362, 341, 3344, 2710, 7316, 365, 6889, 2744, 584, 1266, 420, 2217, 11, 337, 1365, 13], "avg_logprob": -0.19453124850988388, "compression_ratio": 1.51875, "no_speech_prob": 0.0, "words": [{"start": 2641.19, "end": 2641.51, "word": " So", "probability": 0.86083984375}, {"start": 2641.51, "end": 2641.77, "word": " that's", "probability": 0.865234375}, {"start": 2641.77, "end": 2641.93, "word": " the", "probability": 0.90771484375}, {"start": 2641.93, "end": 2642.33, "word": " comparison", "probability": 0.87353515625}, {"start": 2642.33, "end": 2642.83, "word": " between", "probability": 0.87939453125}, {"start": 2642.83, "end": 2643.35, "word": " the", "probability": 0.89453125}, {"start": 2643.35, "end": 2643.73, "word": " two", "probability": 0.91650390625}, {"start": 2643.73, "end": 2645.53, "word": " populations.", "probability": 0.931640625}, {"start": 2647.05, "end": 2647.31, "word": " So", "probability": 0.9384765625}, {"start": 2647.31, "end": 2647.65, "word": " again,", "probability": 0.857421875}, {"start": 2648.81, "end": 2649.07, "word": " to", "probability": 0.947265625}, {"start": 2649.07, "end": 2649.53, "word": " compare", "probability": 0.9560546875}, {"start": 2649.53, "end": 2652.53, "word": " or", "probability": 0.67919921875}, {"start": 2652.53, "end": 2652.75, "word": " to", "probability": 0.90771484375}, {"start": 2652.75, "end": 2652.95, "word": " figure", "probability": 0.9697265625}, {"start": 2652.95, "end": 2653.21, "word": " out", "probability": 0.89306640625}, {"start": 2653.21, "end": 2653.39, "word": " the", "probability": 0.92138671875}, {"start": 2653.39, "end": 2653.87, "word": " relationship", "probability": 0.90673828125}, {"start": 2653.87, "end": 2654.37, "word": " between", "probability": 0.8740234375}, {"start": 2654.37, "end": 2655.29, "word": " sigma", "probability": 0.69580078125}, {"start": 2655.29, "end": 2655.47, "word": " of", "probability": 0.501953125}, {"start": 2655.47, "end": 2655.65, "word": " x", "probability": 0.5810546875}, {"start": 2655.65, "end": 2656.03, "word": " bar", "probability": 0.8388671875}, {"start": 2656.03, "end": 2657.51, "word": " and", "probability": 0.90625}, {"start": 2657.51, "end": 2657.71, "word": " the", "probability": 0.8369140625}, {"start": 2657.71, "end": 2657.91, "word": " sample", "probability": 0.87744140625}, {"start": 2657.91, "end": 2658.33, "word": " size.", "probability": 0.87255859375}, {"start": 2659.49, "end": 2660.05, "word": " Suppose", "probability": 0.80859375}, {"start": 2660.05, "end": 2660.25, "word": " we", "probability": 0.91552734375}, {"start": 2660.25, "end": 2660.43, "word": " have", "probability": 0.94482421875}, {"start": 2660.43, "end": 2660.75, "word": " this", "probability": 0.93798828125}, {"start": 2660.75, "end": 2661.27, "word": " blue", "probability": 0.6201171875}, {"start": 2661.27, "end": 2662.11, "word": " normal", "probability": 0.75537109375}, {"start": 2662.11, "end": 2662.77, "word": " distribution", "probability": 0.83935546875}, {"start": 2662.77, "end": 2664.31, "word": " with", "probability": 0.83349609375}, {"start": 2664.31, "end": 2664.71, "word": " sample", "probability": 0.8740234375}, {"start": 2664.71, "end": 2665.15, "word": " size", "probability": 0.87841796875}, {"start": 2665.15, "end": 2665.63, "word": " say", "probability": 0.59423828125}, {"start": 2665.63, "end": 2666.45, "word": " 10", "probability": 0.64599609375}, {"start": 2666.45, "end": 2667.27, "word": " or", "probability": 0.77294921875}, {"start": 2667.27, "end": 2668.01, "word": " 30,", "probability": 0.75244140625}, {"start": 2668.31, "end": 2668.59, "word": " for", "probability": 0.951171875}, {"start": 2668.59, "end": 2668.87, "word": " example.", "probability": 0.97314453125}], "temperature": 1.0}, {"id": 100, "seek": 270062, "start": 2672.22, "end": 2700.62, "text": " As n gets bigger and bigger, sigma of x bar becomes smaller and smaller. If you look at the red one, maybe if the red one has n equal 100, we'll get this spread. But for the other one, we have larger spread. So as n increases, sigma of x bar decreases. So this, the blue one for smaller sample size.", "tokens": [1018, 297, 2170, 3801, 293, 3801, 11, 12771, 295, 2031, 2159, 3643, 4356, 293, 4356, 13, 759, 291, 574, 412, 264, 2182, 472, 11, 1310, 498, 264, 2182, 472, 575, 297, 2681, 2319, 11, 321, 603, 483, 341, 3974, 13, 583, 337, 264, 661, 472, 11, 321, 362, 4833, 3974, 13, 407, 382, 297, 8637, 11, 12771, 295, 2031, 2159, 24108, 13, 407, 341, 11, 264, 3344, 472, 337, 4356, 6889, 2744, 13], "avg_logprob": -0.15825591115532694, "compression_ratio": 1.6574585635359116, "no_speech_prob": 0.0, "words": [{"start": 2672.22, "end": 2672.6, "word": " As", "probability": 0.86669921875}, {"start": 2672.6, "end": 2672.84, "word": " n", "probability": 0.552734375}, {"start": 2672.84, "end": 2673.18, "word": " gets", "probability": 0.87158203125}, {"start": 2673.18, "end": 2673.58, "word": " bigger", "probability": 0.94677734375}, {"start": 2673.58, "end": 2673.78, "word": " and", "probability": 0.93701171875}, {"start": 2673.78, "end": 2674.06, "word": " bigger,", "probability": 0.94580078125}, {"start": 2675.32, "end": 2677.16, "word": " sigma", "probability": 0.70654296875}, {"start": 2677.16, "end": 2677.36, "word": " of", "probability": 0.9052734375}, {"start": 2677.36, "end": 2677.5, "word": " x", "probability": 0.84033203125}, {"start": 2677.5, "end": 2677.88, "word": " bar", "probability": 0.8955078125}, {"start": 2677.88, "end": 2678.92, "word": " becomes", "probability": 0.83349609375}, {"start": 2678.92, "end": 2679.36, "word": " smaller", "probability": 0.89404296875}, {"start": 2679.36, "end": 2679.58, "word": " and", "probability": 0.93603515625}, {"start": 2679.58, "end": 2679.92, "word": " smaller.", "probability": 0.88330078125}, {"start": 2680.78, "end": 2681.14, "word": " If", "probability": 0.9130859375}, {"start": 2681.14, "end": 2681.22, "word": " you", "probability": 0.93603515625}, {"start": 2681.22, "end": 2681.38, "word": " look", "probability": 0.9619140625}, {"start": 2681.38, "end": 2681.56, "word": " at", "probability": 0.9638671875}, {"start": 2681.56, "end": 2681.8, "word": " the", "probability": 0.89697265625}, {"start": 2681.8, "end": 2681.98, "word": " red", "probability": 0.9189453125}, {"start": 2681.98, "end": 2682.26, "word": " one,", "probability": 0.91455078125}, {"start": 2682.4, "end": 2682.6, "word": " maybe", "probability": 0.89306640625}, {"start": 2682.6, "end": 2682.78, "word": " if", "probability": 0.44677734375}, {"start": 2682.78, "end": 2682.92, "word": " the", "probability": 0.83984375}, {"start": 2682.92, "end": 2683.1, "word": " red", "probability": 0.91064453125}, {"start": 2683.1, "end": 2683.44, "word": " one", "probability": 0.91748046875}, {"start": 2683.44, "end": 2683.82, "word": " has", "probability": 0.6376953125}, {"start": 2683.82, "end": 2684.08, "word": " n", "probability": 0.9208984375}, {"start": 2684.08, "end": 2684.34, "word": " equal", "probability": 0.51611328125}, {"start": 2684.34, "end": 2684.76, "word": " 100,", "probability": 0.8994140625}, {"start": 2685.7, "end": 2686.0, "word": " we'll", "probability": 0.712646484375}, {"start": 2686.0, "end": 2686.2, "word": " get", "probability": 0.93798828125}, {"start": 2686.2, "end": 2686.44, "word": " this", "probability": 0.93701171875}, {"start": 2686.44, "end": 2686.82, "word": " spread.", "probability": 0.7490234375}, {"start": 2687.64, "end": 2687.86, "word": " But", "probability": 0.6474609375}, {"start": 2687.86, "end": 2688.0, "word": " for", "probability": 0.9296875}, {"start": 2688.0, "end": 2688.14, "word": " the", "probability": 0.9189453125}, {"start": 2688.14, "end": 2688.38, "word": " other", "probability": 0.88525390625}, {"start": 2688.38, "end": 2688.6, "word": " one,", "probability": 0.92578125}, {"start": 2688.66, "end": 2688.78, "word": " we", "probability": 0.95166015625}, {"start": 2688.78, "end": 2688.94, "word": " have", "probability": 0.94091796875}, {"start": 2688.94, "end": 2689.32, "word": " larger", "probability": 0.9248046875}, {"start": 2689.32, "end": 2689.7, "word": " spread.", "probability": 0.91796875}, {"start": 2690.86, "end": 2691.2, "word": " So", "probability": 0.9609375}, {"start": 2691.2, "end": 2691.44, "word": " as", "probability": 0.87841796875}, {"start": 2691.44, "end": 2691.6, "word": " n", "probability": 0.9609375}, {"start": 2691.6, "end": 2692.16, "word": " increases,", "probability": 0.94287109375}, {"start": 2693.46, "end": 2694.92, "word": " sigma", "probability": 0.90478515625}, {"start": 2694.92, "end": 2695.12, "word": " of", "probability": 0.80029296875}, {"start": 2695.12, "end": 2695.24, "word": " x", "probability": 0.990234375}, {"start": 2695.24, "end": 2695.6, "word": " bar", "probability": 0.95263671875}, {"start": 2695.6, "end": 2696.62, "word": " decreases.", "probability": 0.95263671875}, {"start": 2697.54, "end": 2697.78, "word": " So", "probability": 0.953125}, {"start": 2697.78, "end": 2698.08, "word": " this,", "probability": 0.82275390625}, {"start": 2698.5, "end": 2698.84, "word": " the", "probability": 0.91552734375}, {"start": 2698.84, "end": 2699.06, "word": " blue", "probability": 0.970703125}, {"start": 2699.06, "end": 2699.26, "word": " one", "probability": 0.93115234375}, {"start": 2699.26, "end": 2699.48, "word": " for", "probability": 0.6376953125}, {"start": 2699.48, "end": 2699.86, "word": " smaller", "probability": 0.8173828125}, {"start": 2699.86, "end": 2700.16, "word": " sample", "probability": 0.78125}, {"start": 2700.16, "end": 2700.62, "word": " size.", "probability": 0.853515625}], "temperature": 1.0}, {"id": 101, "seek": 273014, "start": 2701.58, "end": 2730.14, "text": " The red one for larger sample size. So again, as n increases, sigma of x bar goes down four degrees. Next, let's use this fact to figure out", "tokens": [440, 2182, 472, 337, 4833, 6889, 2744, 13, 407, 797, 11, 382, 297, 8637, 11, 12771, 295, 2031, 2159, 1709, 760, 1451, 5310, 13, 3087, 11, 718, 311, 764, 341, 1186, 281, 2573, 484], "avg_logprob": -0.24732141835348948, "compression_ratio": 1.236842105263158, "no_speech_prob": 0.0, "words": [{"start": 2701.58, "end": 2701.88, "word": " The", "probability": 0.3125}, {"start": 2701.88, "end": 2702.18, "word": " red", "probability": 0.90380859375}, {"start": 2702.18, "end": 2702.54, "word": " one", "probability": 0.91650390625}, {"start": 2702.54, "end": 2703.02, "word": " for", "probability": 0.7919921875}, {"start": 2703.02, "end": 2704.18, "word": " larger", "probability": 0.701171875}, {"start": 2704.18, "end": 2705.86, "word": " sample", "probability": 0.7138671875}, {"start": 2705.86, "end": 2706.24, "word": " size.", "probability": 0.763671875}, {"start": 2706.84, "end": 2707.56, "word": " So", "probability": 0.9560546875}, {"start": 2707.56, "end": 2707.82, "word": " again,", "probability": 0.880859375}, {"start": 2707.98, "end": 2708.06, "word": " as", "probability": 0.9619140625}, {"start": 2708.06, "end": 2708.24, "word": " n", "probability": 0.69140625}, {"start": 2708.24, "end": 2708.68, "word": " increases,", "probability": 0.93115234375}, {"start": 2709.22, "end": 2709.68, "word": " sigma", "probability": 0.83154296875}, {"start": 2709.68, "end": 2709.94, "word": " of", "probability": 0.783203125}, {"start": 2709.94, "end": 2710.12, "word": " x", "probability": 0.8564453125}, {"start": 2710.12, "end": 2710.38, "word": " bar", "probability": 0.90576171875}, {"start": 2710.38, "end": 2710.76, "word": " goes", "probability": 0.93017578125}, {"start": 2710.76, "end": 2711.12, "word": " down", "probability": 0.85009765625}, {"start": 2711.12, "end": 2711.82, "word": " four", "probability": 0.173583984375}, {"start": 2711.82, "end": 2712.04, "word": " degrees.", "probability": 0.927734375}, {"start": 2721.72, "end": 2722.56, "word": " Next,", "probability": 0.78759765625}, {"start": 2723.1, "end": 2723.56, "word": " let's", "probability": 0.969970703125}, {"start": 2723.56, "end": 2724.02, "word": " use", "probability": 0.869140625}, {"start": 2724.02, "end": 2725.52, "word": " this", "probability": 0.87548828125}, {"start": 2725.52, "end": 2725.94, "word": " fact", "probability": 0.890625}, {"start": 2725.94, "end": 2729.48, "word": " to", "probability": 0.81982421875}, {"start": 2729.48, "end": 2729.76, "word": " figure", "probability": 0.9716796875}, {"start": 2729.76, "end": 2730.14, "word": " out", "probability": 0.8876953125}], "temperature": 1.0}, {"id": 102, "seek": 275798, "start": 2730.74, "end": 2757.98, "text": " an interval for the sample mean with 90% confidence and suppose the population we have is normal with mean 368 and sigma of 15 and suppose we select a random sample of a 25 and the question is find symmetrically distributed interval around the mean", "tokens": [364, 15035, 337, 264, 6889, 914, 365, 4289, 4, 6687, 293, 7297, 264, 4415, 321, 362, 307, 2710, 365, 914, 8652, 23, 293, 12771, 295, 2119, 293, 7297, 321, 3048, 257, 4974, 6889, 295, 257, 3552, 293, 264, 1168, 307, 915, 14232, 27965, 984, 12631, 15035, 926, 264, 914], "avg_logprob": -0.24812501311302185, "compression_ratio": 1.55625, "no_speech_prob": 0.0, "words": [{"start": 2730.74, "end": 2731.04, "word": " an", "probability": 0.2030029296875}, {"start": 2731.04, "end": 2731.46, "word": " interval", "probability": 0.95849609375}, {"start": 2731.46, "end": 2733.98, "word": " for", "probability": 0.755859375}, {"start": 2733.98, "end": 2734.58, "word": " the", "probability": 0.73876953125}, {"start": 2734.58, "end": 2734.88, "word": " sample", "probability": 0.8212890625}, {"start": 2734.88, "end": 2735.24, "word": " mean", "probability": 0.9404296875}, {"start": 2735.24, "end": 2737.02, "word": " with", "probability": 0.86474609375}, {"start": 2737.02, "end": 2737.44, "word": " 90", "probability": 0.892578125}, {"start": 2737.44, "end": 2737.96, "word": "%", "probability": 0.626953125}, {"start": 2737.96, "end": 2738.48, "word": " confidence", "probability": 0.460693359375}, {"start": 2738.48, "end": 2739.88, "word": " and", "probability": 0.242919921875}, {"start": 2739.88, "end": 2740.26, "word": " suppose", "probability": 0.87939453125}, {"start": 2740.26, "end": 2740.62, "word": " the", "probability": 0.88330078125}, {"start": 2740.62, "end": 2741.08, "word": " population", "probability": 0.95556640625}, {"start": 2741.08, "end": 2741.36, "word": " we", "probability": 0.88623046875}, {"start": 2741.36, "end": 2741.68, "word": " have", "probability": 0.94970703125}, {"start": 2741.68, "end": 2742.14, "word": " is", "probability": 0.93310546875}, {"start": 2742.14, "end": 2742.5, "word": " normal", "probability": 0.85693359375}, {"start": 2742.5, "end": 2744.38, "word": " with", "probability": 0.84716796875}, {"start": 2744.38, "end": 2744.74, "word": " mean", "probability": 0.935546875}, {"start": 2744.74, "end": 2746.64, "word": " 368", "probability": 0.922119140625}, {"start": 2746.64, "end": 2747.5, "word": " and", "probability": 0.89599609375}, {"start": 2747.5, "end": 2747.9, "word": " sigma", "probability": 0.748046875}, {"start": 2747.9, "end": 2748.52, "word": " of", "probability": 0.54833984375}, {"start": 2748.52, "end": 2749.02, "word": " 15", "probability": 0.96240234375}, {"start": 2749.02, "end": 2749.28, "word": " and", "probability": 0.76513671875}, {"start": 2749.28, "end": 2749.5, "word": " suppose", "probability": 0.9072265625}, {"start": 2749.5, "end": 2749.66, "word": " we", "probability": 0.83447265625}, {"start": 2749.66, "end": 2749.92, "word": " select", "probability": 0.8427734375}, {"start": 2749.92, "end": 2750.04, "word": " a", "probability": 0.91162109375}, {"start": 2750.04, "end": 2750.24, "word": " random", "probability": 0.86279296875}, {"start": 2750.24, "end": 2750.66, "word": " sample", "probability": 0.8798828125}, {"start": 2750.66, "end": 2751.34, "word": " of", "probability": 0.8818359375}, {"start": 2751.34, "end": 2751.44, "word": " a", "probability": 0.41796875}, {"start": 2751.44, "end": 2751.74, "word": " 25", "probability": 0.5205078125}, {"start": 2751.74, "end": 2752.52, "word": " and", "probability": 0.66552734375}, {"start": 2752.52, "end": 2752.64, "word": " the", "probability": 0.92626953125}, {"start": 2752.64, "end": 2752.9, "word": " question", "probability": 0.921875}, {"start": 2752.9, "end": 2753.26, "word": " is", "probability": 0.95947265625}, {"start": 2753.26, "end": 2754.04, "word": " find", "probability": 0.728515625}, {"start": 2754.04, "end": 2754.96, "word": " symmetrically", "probability": 0.9278971354166666}, {"start": 2754.96, "end": 2755.9, "word": " distributed", "probability": 0.90283203125}, {"start": 2755.9, "end": 2756.58, "word": " interval", "probability": 0.9130859375}, {"start": 2756.58, "end": 2757.6, "word": " around", "probability": 0.943359375}, {"start": 2757.6, "end": 2757.78, "word": " the", "probability": 0.71240234375}, {"start": 2757.78, "end": 2757.98, "word": " mean", "probability": 0.94482421875}], "temperature": 1.0}, {"id": 103, "seek": 278497, "start": 2758.79, "end": 2784.97, "text": " That will include 95% of the sample means when mu equals 368, sigma is 15, and your sample size is 25. So in this case, we are looking for the estimation of the sample mean. And we have this information,", "tokens": [663, 486, 4090, 13420, 4, 295, 264, 6889, 1355, 562, 2992, 6915, 8652, 23, 11, 12771, 307, 2119, 11, 293, 428, 6889, 2744, 307, 3552, 13, 407, 294, 341, 1389, 11, 321, 366, 1237, 337, 264, 35701, 295, 264, 6889, 914, 13, 400, 321, 362, 341, 1589, 11], "avg_logprob": -0.2514349587109624, "compression_ratio": 1.3877551020408163, "no_speech_prob": 0.0, "words": [{"start": 2758.79, "end": 2759.17, "word": " That", "probability": 0.307861328125}, {"start": 2759.17, "end": 2759.47, "word": " will", "probability": 0.8671875}, {"start": 2759.47, "end": 2760.39, "word": " include", "probability": 0.84228515625}, {"start": 2760.39, "end": 2760.97, "word": " 95", "probability": 0.943359375}, {"start": 2760.97, "end": 2761.53, "word": "%", "probability": 0.85888671875}, {"start": 2761.53, "end": 2762.21, "word": " of", "probability": 0.96142578125}, {"start": 2762.21, "end": 2762.43, "word": " the", "probability": 0.87744140625}, {"start": 2762.43, "end": 2762.73, "word": " sample", "probability": 0.8369140625}, {"start": 2762.73, "end": 2763.19, "word": " means", "probability": 0.84716796875}, {"start": 2763.19, "end": 2763.91, "word": " when", "probability": 0.587890625}, {"start": 2763.91, "end": 2764.13, "word": " mu", "probability": 0.429931640625}, {"start": 2764.13, "end": 2764.51, "word": " equals", "probability": 0.537109375}, {"start": 2764.51, "end": 2765.39, "word": " 368,", "probability": 0.84765625}, {"start": 2765.81, "end": 2766.41, "word": " sigma", "probability": 0.6611328125}, {"start": 2766.41, "end": 2766.63, "word": " is", "probability": 0.8623046875}, {"start": 2766.63, "end": 2767.13, "word": " 15,", "probability": 0.95751953125}, {"start": 2767.65, "end": 2767.99, "word": " and", "probability": 0.9228515625}, {"start": 2767.99, "end": 2768.27, "word": " your", "probability": 0.8466796875}, {"start": 2768.27, "end": 2768.61, "word": " sample", "probability": 0.904296875}, {"start": 2768.61, "end": 2769.03, "word": " size", "probability": 0.8603515625}, {"start": 2769.03, "end": 2769.17, "word": " is", "probability": 0.382568359375}, {"start": 2769.17, "end": 2769.63, "word": " 25.", "probability": 0.9619140625}, {"start": 2770.37, "end": 2771.03, "word": " So", "probability": 0.9482421875}, {"start": 2771.03, "end": 2771.17, "word": " in", "probability": 0.75048828125}, {"start": 2771.17, "end": 2771.31, "word": " this", "probability": 0.9462890625}, {"start": 2771.31, "end": 2771.57, "word": " case,", "probability": 0.919921875}, {"start": 2771.67, "end": 2771.79, "word": " we", "probability": 0.9580078125}, {"start": 2771.79, "end": 2772.05, "word": " are", "probability": 0.923828125}, {"start": 2772.05, "end": 2773.37, "word": " looking", "probability": 0.90380859375}, {"start": 2773.37, "end": 2773.83, "word": " for", "probability": 0.95263671875}, {"start": 2773.83, "end": 2777.15, "word": " the", "probability": 0.5869140625}, {"start": 2777.15, "end": 2777.75, "word": " estimation", "probability": 0.951171875}, {"start": 2777.75, "end": 2778.35, "word": " of", "probability": 0.96875}, {"start": 2778.35, "end": 2778.51, "word": " the", "probability": 0.89306640625}, {"start": 2778.51, "end": 2778.77, "word": " sample", "probability": 0.7236328125}, {"start": 2778.77, "end": 2779.11, "word": " mean.", "probability": 0.88916015625}, {"start": 2783.13, "end": 2783.97, "word": " And", "probability": 0.86474609375}, {"start": 2783.97, "end": 2784.11, "word": " we", "probability": 0.9013671875}, {"start": 2784.11, "end": 2784.27, "word": " have", "probability": 0.94677734375}, {"start": 2784.27, "end": 2784.45, "word": " this", "probability": 0.9384765625}, {"start": 2784.45, "end": 2784.97, "word": " information,", "probability": 0.83642578125}], "temperature": 1.0}, {"id": 104, "seek": 281649, "start": 2788.91, "end": 2816.49, "text": " Sigma is 15 and N is 25. The problem mentioned there, we have symmetric distribution and this area is 95% bisymmetric and we have only 5% out. So that means half to the right and half to the left.", "tokens": [36595, 307, 2119, 293, 426, 307, 3552, 13, 440, 1154, 2835, 456, 11, 321, 362, 32330, 7316, 293, 341, 1859, 307, 13420, 4, 7393, 32497, 17475, 293, 321, 362, 787, 1025, 4, 484, 13, 407, 300, 1355, 1922, 281, 264, 558, 293, 1922, 281, 264, 1411, 13], "avg_logprob": -0.20052083861082792, "compression_ratio": 1.4172661870503598, "no_speech_prob": 0.0, "words": [{"start": 2788.91, "end": 2789.29, "word": " Sigma", "probability": 0.54345703125}, {"start": 2789.29, "end": 2789.47, "word": " is", "probability": 0.81982421875}, {"start": 2789.47, "end": 2789.97, "word": " 15", "probability": 0.86962890625}, {"start": 2789.97, "end": 2791.09, "word": " and", "probability": 0.7099609375}, {"start": 2791.09, "end": 2791.29, "word": " N", "probability": 0.65380859375}, {"start": 2791.29, "end": 2791.43, "word": " is", "probability": 0.92724609375}, {"start": 2791.43, "end": 2791.75, "word": " 25.", "probability": 0.90625}, {"start": 2795.65, "end": 2796.33, "word": " The", "probability": 0.83447265625}, {"start": 2796.33, "end": 2796.67, "word": " problem", "probability": 0.86279296875}, {"start": 2796.67, "end": 2797.13, "word": " mentioned", "probability": 0.8359375}, {"start": 2797.13, "end": 2797.49, "word": " there,", "probability": 0.52099609375}, {"start": 2798.01, "end": 2798.29, "word": " we", "probability": 0.921875}, {"start": 2798.29, "end": 2798.47, "word": " have", "probability": 0.94970703125}, {"start": 2798.47, "end": 2798.89, "word": " symmetric", "probability": 0.77978515625}, {"start": 2798.89, "end": 2799.47, "word": " distribution", "probability": 0.673828125}, {"start": 2799.47, "end": 2801.45, "word": " and", "probability": 0.489990234375}, {"start": 2801.45, "end": 2801.75, "word": " this", "probability": 0.935546875}, {"start": 2801.75, "end": 2802.19, "word": " area", "probability": 0.89501953125}, {"start": 2802.19, "end": 2803.41, "word": " is", "probability": 0.94384765625}, {"start": 2803.41, "end": 2803.79, "word": " 95", "probability": 0.974609375}, {"start": 2803.79, "end": 2804.45, "word": "%", "probability": 0.68212890625}, {"start": 2804.45, "end": 2806.41, "word": " bisymmetric", "probability": 0.7611490885416666}, {"start": 2806.41, "end": 2808.49, "word": " and", "probability": 0.6591796875}, {"start": 2808.49, "end": 2808.63, "word": " we", "probability": 0.93505859375}, {"start": 2808.63, "end": 2808.77, "word": " have", "probability": 0.89208984375}, {"start": 2808.77, "end": 2808.97, "word": " only", "probability": 0.9052734375}, {"start": 2808.97, "end": 2809.25, "word": " 5", "probability": 0.98095703125}, {"start": 2809.25, "end": 2809.67, "word": "%", "probability": 0.98388671875}, {"start": 2809.67, "end": 2810.17, "word": " out.", "probability": 0.8798828125}, {"start": 2811.33, "end": 2811.57, "word": " So", "probability": 0.90771484375}, {"start": 2811.57, "end": 2811.79, "word": " that", "probability": 0.81396484375}, {"start": 2811.79, "end": 2812.09, "word": " means", "probability": 0.93408203125}, {"start": 2812.09, "end": 2812.53, "word": " half", "probability": 0.74462890625}, {"start": 2812.53, "end": 2812.75, "word": " to", "probability": 0.95654296875}, {"start": 2812.75, "end": 2812.89, "word": " the", "probability": 0.91259765625}, {"start": 2812.89, "end": 2813.17, "word": " right", "probability": 0.91015625}, {"start": 2813.17, "end": 2815.69, "word": " and", "probability": 0.8896484375}, {"start": 2815.69, "end": 2815.95, "word": " half", "probability": 0.8642578125}, {"start": 2815.95, "end": 2816.13, "word": " to", "probability": 0.958984375}, {"start": 2816.13, "end": 2816.27, "word": " the", "probability": 0.9111328125}, {"start": 2816.27, "end": 2816.49, "word": " left.", "probability": 0.947265625}], "temperature": 1.0}, {"id": 105, "seek": 284718, "start": 2819.74, "end": 2847.18, "text": " And let's see how can we compute these two values. The problem says that the average T68 for this data and the standard deviation sigma of 15. He asked about what are the values of x bar.", "tokens": [400, 718, 311, 536, 577, 393, 321, 14722, 613, 732, 4190, 13, 440, 1154, 1619, 300, 264, 4274, 314, 27102, 337, 341, 1412, 293, 264, 3832, 25163, 12771, 295, 2119, 13, 634, 2351, 466, 437, 366, 264, 4190, 295, 2031, 2159, 13], "avg_logprob": -0.22183866071146588, "compression_ratio": 1.3055555555555556, "no_speech_prob": 0.0, "words": [{"start": 2819.74, "end": 2819.98, "word": " And", "probability": 0.7568359375}, {"start": 2819.98, "end": 2820.26, "word": " let's", "probability": 0.945068359375}, {"start": 2820.26, "end": 2820.38, "word": " see", "probability": 0.90087890625}, {"start": 2820.38, "end": 2820.52, "word": " how", "probability": 0.88037109375}, {"start": 2820.52, "end": 2820.68, "word": " can", "probability": 0.779296875}, {"start": 2820.68, "end": 2820.82, "word": " we", "probability": 0.947265625}, {"start": 2820.82, "end": 2821.36, "word": " compute", "probability": 0.9150390625}, {"start": 2821.36, "end": 2822.06, "word": " these", "probability": 0.853515625}, {"start": 2822.06, "end": 2822.28, "word": " two", "probability": 0.91796875}, {"start": 2822.28, "end": 2822.64, "word": " values.", "probability": 0.927734375}, {"start": 2823.82, "end": 2824.2, "word": " The", "probability": 0.728515625}, {"start": 2824.2, "end": 2824.6, "word": " problem", "probability": 0.8818359375}, {"start": 2824.6, "end": 2825.0, "word": " says", "probability": 0.90966796875}, {"start": 2825.0, "end": 2825.46, "word": " that", "probability": 0.9150390625}, {"start": 2825.46, "end": 2826.94, "word": " the", "probability": 0.81298828125}, {"start": 2826.94, "end": 2827.5, "word": " average", "probability": 0.75732421875}, {"start": 2827.5, "end": 2831.44, "word": " T68", "probability": 0.6187744140625}, {"start": 2831.44, "end": 2833.38, "word": " for", "probability": 0.478759765625}, {"start": 2833.38, "end": 2833.6, "word": " this", "probability": 0.9404296875}, {"start": 2833.6, "end": 2833.96, "word": " data", "probability": 0.9384765625}, {"start": 2833.96, "end": 2835.46, "word": " and", "probability": 0.6669921875}, {"start": 2835.46, "end": 2835.82, "word": " the", "probability": 0.6865234375}, {"start": 2835.82, "end": 2836.08, "word": " standard", "probability": 0.92529296875}, {"start": 2836.08, "end": 2836.54, "word": " deviation", "probability": 0.92919921875}, {"start": 2836.54, "end": 2837.64, "word": " sigma", "probability": 0.6484375}, {"start": 2837.64, "end": 2838.66, "word": " of", "probability": 0.8916015625}, {"start": 2838.66, "end": 2839.08, "word": " 15.", "probability": 0.5634765625}, {"start": 2841.6, "end": 2842.02, "word": " He", "probability": 0.60693359375}, {"start": 2842.02, "end": 2842.18, "word": " asked", "probability": 0.49609375}, {"start": 2842.18, "end": 2842.48, "word": " about", "probability": 0.85498046875}, {"start": 2842.48, "end": 2842.82, "word": " what", "probability": 0.890625}, {"start": 2842.82, "end": 2843.28, "word": " are", "probability": 0.84765625}, {"start": 2843.28, "end": 2844.26, "word": " the", "probability": 0.9208984375}, {"start": 2844.26, "end": 2844.76, "word": " values", "probability": 0.9619140625}, {"start": 2844.76, "end": 2846.66, "word": " of", "probability": 0.958984375}, {"start": 2846.66, "end": 2846.9, "word": " x", "probability": 0.529296875}, {"start": 2846.9, "end": 2847.18, "word": " bar.", "probability": 0.849609375}], "temperature": 1.0}, {"id": 106, "seek": 287621, "start": 2848.31, "end": 2876.21, "text": " I mean, we have to find the interval of x bar. Let's see. If you remember last time, z score was x minus mu divided by sigma. But now we have x bar. So your z score should be minus mu divided by sigma over root n. Now cross multiplication, you will get x bar minus mu equals z sigma over root n.", "tokens": [286, 914, 11, 321, 362, 281, 915, 264, 15035, 295, 2031, 2159, 13, 961, 311, 536, 13, 759, 291, 1604, 1036, 565, 11, 710, 6175, 390, 2031, 3175, 2992, 6666, 538, 12771, 13, 583, 586, 321, 362, 2031, 2159, 13, 407, 428, 710, 6175, 820, 312, 3175, 2992, 6666, 538, 12771, 670, 5593, 297, 13, 823, 3278, 27290, 11, 291, 486, 483, 2031, 2159, 3175, 2992, 6915, 710, 12771, 670, 5593, 297, 13], "avg_logprob": -0.17567568171668696, "compression_ratio": 1.6444444444444444, "no_speech_prob": 0.0, "words": [{"start": 2848.31, "end": 2848.51, "word": " I", "probability": 0.7646484375}, {"start": 2848.51, "end": 2848.69, "word": " mean,", "probability": 0.96533203125}, {"start": 2848.77, "end": 2848.87, "word": " we", "probability": 0.94482421875}, {"start": 2848.87, "end": 2849.37, "word": " have", "probability": 0.908203125}, {"start": 2849.37, "end": 2849.49, "word": " to", "probability": 0.96875}, {"start": 2849.49, "end": 2849.75, "word": " find", "probability": 0.88623046875}, {"start": 2849.75, "end": 2849.91, "word": " the", "probability": 0.9169921875}, {"start": 2849.91, "end": 2850.33, "word": " interval", "probability": 0.76513671875}, {"start": 2850.33, "end": 2851.27, "word": " of", "probability": 0.7568359375}, {"start": 2851.27, "end": 2851.41, "word": " x", "probability": 0.6396484375}, {"start": 2851.41, "end": 2851.65, "word": " bar.", "probability": 0.8486328125}, {"start": 2852.07, "end": 2852.43, "word": " Let's", "probability": 0.88037109375}, {"start": 2852.43, "end": 2852.61, "word": " see.", "probability": 0.896484375}, {"start": 2853.21, "end": 2853.33, "word": " If", "probability": 0.759765625}, {"start": 2853.33, "end": 2853.37, "word": " you", "probability": 0.95849609375}, {"start": 2853.37, "end": 2853.57, "word": " remember", "probability": 0.869140625}, {"start": 2853.57, "end": 2853.85, "word": " last", "probability": 0.78857421875}, {"start": 2853.85, "end": 2854.21, "word": " time,", "probability": 0.89306640625}, {"start": 2855.01, "end": 2855.21, "word": " z", "probability": 0.25537109375}, {"start": 2855.21, "end": 2855.57, "word": " score", "probability": 0.43701171875}, {"start": 2855.57, "end": 2855.85, "word": " was", "probability": 0.88671875}, {"start": 2855.85, "end": 2856.13, "word": " x", "probability": 0.95703125}, {"start": 2856.13, "end": 2856.45, "word": " minus", "probability": 0.92431640625}, {"start": 2856.45, "end": 2856.63, "word": " mu", "probability": 0.81884765625}, {"start": 2856.63, "end": 2856.83, "word": " divided", "probability": 0.7607421875}, {"start": 2856.83, "end": 2857.05, "word": " by", "probability": 0.9755859375}, {"start": 2857.05, "end": 2857.31, "word": " sigma.", "probability": 0.93505859375}, {"start": 2857.83, "end": 2858.39, "word": " But", "probability": 0.85546875}, {"start": 2858.39, "end": 2858.55, "word": " now", "probability": 0.90234375}, {"start": 2858.55, "end": 2858.73, "word": " we", "probability": 0.79931640625}, {"start": 2858.73, "end": 2859.55, "word": " have", "probability": 0.94677734375}, {"start": 2859.55, "end": 2860.79, "word": " x", "probability": 0.982421875}, {"start": 2860.79, "end": 2861.13, "word": " bar.", "probability": 0.92431640625}, {"start": 2861.89, "end": 2862.39, "word": " So", "probability": 0.95458984375}, {"start": 2862.39, "end": 2862.81, "word": " your", "probability": 0.56982421875}, {"start": 2862.81, "end": 2863.03, "word": " z", "probability": 0.98681640625}, {"start": 2863.03, "end": 2863.35, "word": " score", "probability": 0.81298828125}, {"start": 2863.35, "end": 2863.73, "word": " should", "probability": 0.97412109375}, {"start": 2863.73, "end": 2864.09, "word": " be", "probability": 0.951171875}, {"start": 2864.09, "end": 2864.97, "word": " minus", "probability": 0.98095703125}, {"start": 2864.97, "end": 2865.27, "word": " mu", "probability": 0.943359375}, {"start": 2865.27, "end": 2865.59, "word": " divided", "probability": 0.8251953125}, {"start": 2865.59, "end": 2865.85, "word": " by", "probability": 0.9716796875}, {"start": 2865.85, "end": 2866.15, "word": " sigma", "probability": 0.935546875}, {"start": 2866.15, "end": 2866.37, "word": " over", "probability": 0.8701171875}, {"start": 2866.37, "end": 2866.61, "word": " root", "probability": 0.9453125}, {"start": 2866.61, "end": 2866.81, "word": " n.", "probability": 0.853515625}, {"start": 2868.79, "end": 2869.25, "word": " Now", "probability": 0.9609375}, {"start": 2869.25, "end": 2869.61, "word": " cross", "probability": 0.515625}, {"start": 2869.61, "end": 2870.11, "word": " multiplication,", "probability": 0.90087890625}, {"start": 2870.71, "end": 2870.85, "word": " you", "probability": 0.92333984375}, {"start": 2870.85, "end": 2871.03, "word": " will", "probability": 0.82666015625}, {"start": 2871.03, "end": 2871.29, "word": " get", "probability": 0.94287109375}, {"start": 2871.29, "end": 2871.93, "word": " x", "probability": 0.98876953125}, {"start": 2871.93, "end": 2872.33, "word": " bar", "probability": 0.91650390625}, {"start": 2872.33, "end": 2874.13, "word": " minus", "probability": 0.95947265625}, {"start": 2874.13, "end": 2874.41, "word": " mu", "probability": 0.94921875}, {"start": 2874.41, "end": 2874.79, "word": " equals", "probability": 0.9228515625}, {"start": 2874.79, "end": 2875.03, "word": " z", "probability": 0.92724609375}, {"start": 2875.03, "end": 2875.57, "word": " sigma", "probability": 0.83349609375}, {"start": 2875.57, "end": 2875.77, "word": " over", "probability": 0.79736328125}, {"start": 2875.77, "end": 2875.97, "word": " root", "probability": 0.93017578125}, {"start": 2875.97, "end": 2876.21, "word": " n.", "probability": 0.97265625}], "temperature": 1.0}, {"id": 107, "seek": 290558, "start": 2877.24, "end": 2905.58, "text": " That means x bar equals mu plus z sigma over root n. Exactly the same equation we got in chapter six, but there, in that one we have x equals mu plus z sigma. Now we have x bar equals mu plus z sigma over root n, because we have different statistics. It's x bar instead of x. Now we are looking for these two values.", "tokens": [663, 1355, 2031, 2159, 6915, 2992, 1804, 710, 12771, 670, 5593, 297, 13, 7587, 264, 912, 5367, 321, 658, 294, 7187, 2309, 11, 457, 456, 11, 294, 300, 472, 321, 362, 2031, 6915, 2992, 1804, 710, 12771, 13, 823, 321, 362, 2031, 2159, 6915, 2992, 1804, 710, 12771, 670, 5593, 297, 11, 570, 321, 362, 819, 12523, 13, 467, 311, 2031, 2159, 2602, 295, 2031, 13, 823, 321, 366, 1237, 337, 613, 732, 4190, 13], "avg_logprob": -0.1942845453557215, "compression_ratio": 1.770949720670391, "no_speech_prob": 0.0, "words": [{"start": 2877.24, "end": 2877.56, "word": " That", "probability": 0.6884765625}, {"start": 2877.56, "end": 2877.88, "word": " means", "probability": 0.91259765625}, {"start": 2877.88, "end": 2878.14, "word": " x", "probability": 0.66064453125}, {"start": 2878.14, "end": 2878.34, "word": " bar", "probability": 0.7822265625}, {"start": 2878.34, "end": 2878.56, "word": " equals", "probability": 0.226806640625}, {"start": 2878.56, "end": 2878.86, "word": " mu", "probability": 0.8046875}, {"start": 2878.86, "end": 2879.28, "word": " plus", "probability": 0.931640625}, {"start": 2879.28, "end": 2880.1, "word": " z", "probability": 0.890625}, {"start": 2880.1, "end": 2880.6, "word": " sigma", "probability": 0.865234375}, {"start": 2880.6, "end": 2881.5, "word": " over", "probability": 0.83984375}, {"start": 2881.5, "end": 2881.7, "word": " root", "probability": 0.94873046875}, {"start": 2881.7, "end": 2881.82, "word": " n.", "probability": 0.8056640625}, {"start": 2882.24, "end": 2882.8, "word": " Exactly", "probability": 0.73828125}, {"start": 2882.8, "end": 2883.08, "word": " the", "probability": 0.912109375}, {"start": 2883.08, "end": 2883.38, "word": " same", "probability": 0.90087890625}, {"start": 2883.38, "end": 2883.82, "word": " equation", "probability": 0.94921875}, {"start": 2883.82, "end": 2884.08, "word": " we", "probability": 0.857421875}, {"start": 2884.08, "end": 2884.26, "word": " got", "probability": 0.87646484375}, {"start": 2884.26, "end": 2884.44, "word": " in", "probability": 0.8740234375}, {"start": 2884.44, "end": 2884.68, "word": " chapter", "probability": 0.64453125}, {"start": 2884.68, "end": 2885.16, "word": " six,", "probability": 0.5673828125}, {"start": 2885.68, "end": 2885.96, "word": " but", "probability": 0.87890625}, {"start": 2885.96, "end": 2887.34, "word": " there,", "probability": 0.4111328125}, {"start": 2887.62, "end": 2887.72, "word": " in", "probability": 0.55859375}, {"start": 2887.72, "end": 2888.2, "word": " that", "probability": 0.90673828125}, {"start": 2888.2, "end": 2888.38, "word": " one", "probability": 0.88720703125}, {"start": 2888.38, "end": 2888.52, "word": " we", "probability": 0.53955078125}, {"start": 2888.52, "end": 2888.84, "word": " have", "probability": 0.92529296875}, {"start": 2888.84, "end": 2889.84, "word": " x", "probability": 0.90966796875}, {"start": 2889.84, "end": 2890.08, "word": " equals", "probability": 0.7646484375}, {"start": 2890.08, "end": 2890.3, "word": " mu", "probability": 0.9658203125}, {"start": 2890.3, "end": 2890.52, "word": " plus", "probability": 0.943359375}, {"start": 2890.52, "end": 2890.68, "word": " z", "probability": 0.9775390625}, {"start": 2890.68, "end": 2890.96, "word": " sigma.", "probability": 0.87744140625}, {"start": 2891.88, "end": 2892.44, "word": " Now", "probability": 0.90576171875}, {"start": 2892.44, "end": 2892.64, "word": " we", "probability": 0.81494140625}, {"start": 2892.64, "end": 2892.84, "word": " have", "probability": 0.94775390625}, {"start": 2892.84, "end": 2893.08, "word": " x", "probability": 0.99072265625}, {"start": 2893.08, "end": 2893.32, "word": " bar", "probability": 0.94677734375}, {"start": 2893.32, "end": 2893.7, "word": " equals", "probability": 0.93212890625}, {"start": 2893.7, "end": 2893.92, "word": " mu", "probability": 0.97021484375}, {"start": 2893.92, "end": 2894.18, "word": " plus", "probability": 0.9560546875}, {"start": 2894.18, "end": 2894.54, "word": " z", "probability": 0.98828125}, {"start": 2894.54, "end": 2895.7, "word": " sigma", "probability": 0.8525390625}, {"start": 2895.7, "end": 2896.3, "word": " over", "probability": 0.89453125}, {"start": 2896.3, "end": 2896.58, "word": " root", "probability": 0.93701171875}, {"start": 2896.58, "end": 2896.82, "word": " n,", "probability": 0.97607421875}, {"start": 2897.24, "end": 2897.82, "word": " because", "probability": 0.7900390625}, {"start": 2897.82, "end": 2898.04, "word": " we", "probability": 0.9599609375}, {"start": 2898.04, "end": 2898.2, "word": " have", "probability": 0.93994140625}, {"start": 2898.2, "end": 2898.62, "word": " different", "probability": 0.84375}, {"start": 2898.62, "end": 2899.5, "word": " statistics.", "probability": 0.5849609375}, {"start": 2900.04, "end": 2900.26, "word": " It's", "probability": 0.921630859375}, {"start": 2900.26, "end": 2900.46, "word": " x", "probability": 0.99609375}, {"start": 2900.46, "end": 2900.78, "word": " bar", "probability": 0.94482421875}, {"start": 2900.78, "end": 2901.58, "word": " instead", "probability": 0.669921875}, {"start": 2901.58, "end": 2901.76, "word": " of", "probability": 0.9677734375}, {"start": 2901.76, "end": 2902.0, "word": " x.", "probability": 0.99658203125}, {"start": 2902.8, "end": 2903.0, "word": " Now", "probability": 0.95947265625}, {"start": 2903.0, "end": 2903.14, "word": " we", "probability": 0.818359375}, {"start": 2903.14, "end": 2903.26, "word": " are", "probability": 0.919921875}, {"start": 2903.26, "end": 2903.58, "word": " looking", "probability": 0.9111328125}, {"start": 2903.58, "end": 2904.2, "word": " for", "probability": 0.9501953125}, {"start": 2904.2, "end": 2904.96, "word": " these", "probability": 0.845703125}, {"start": 2904.96, "end": 2905.14, "word": " two", "probability": 0.9296875}, {"start": 2905.14, "end": 2905.58, "word": " values.", "probability": 0.96826171875}], "temperature": 1.0}, {"id": 108, "seek": 293663, "start": 2907.95, "end": 2936.63, "text": " Now let's compute z-score. The z-score for this point, which has area of 2.5% below it, is the same as the z-score, but in the opposite direction. If you remember, we got this value, 1.96. So my z-score is negative 1.96 to the left.", "tokens": [823, 718, 311, 14722, 710, 12, 4417, 418, 13, 440, 710, 12, 4417, 418, 337, 341, 935, 11, 597, 575, 1859, 295, 568, 13, 20, 4, 2507, 309, 11, 307, 264, 912, 382, 264, 710, 12, 4417, 418, 11, 457, 294, 264, 6182, 3513, 13, 759, 291, 1604, 11, 321, 658, 341, 2158, 11, 502, 13, 22962, 13, 407, 452, 710, 12, 4417, 418, 307, 3671, 502, 13, 22962, 281, 264, 1411, 13], "avg_logprob": -0.16575168898782214, "compression_ratio": 1.3952095808383234, "no_speech_prob": 0.0, "words": [{"start": 2907.95, "end": 2908.21, "word": " Now", "probability": 0.8642578125}, {"start": 2908.21, "end": 2908.51, "word": " let's", "probability": 0.801025390625}, {"start": 2908.51, "end": 2908.75, "word": " compute", "probability": 0.7353515625}, {"start": 2908.75, "end": 2909.09, "word": " z", "probability": 0.3720703125}, {"start": 2909.09, "end": 2909.41, "word": "-score.", "probability": 0.737548828125}, {"start": 2912.45, "end": 2913.01, "word": " The", "probability": 0.372314453125}, {"start": 2913.01, "end": 2913.21, "word": " z", "probability": 0.95068359375}, {"start": 2913.21, "end": 2913.39, "word": "-score", "probability": 0.9064127604166666}, {"start": 2913.39, "end": 2913.63, "word": " for", "probability": 0.90234375}, {"start": 2913.63, "end": 2913.89, "word": " this", "probability": 0.947265625}, {"start": 2913.89, "end": 2914.23, "word": " point,", "probability": 0.97265625}, {"start": 2914.47, "end": 2914.75, "word": " which", "probability": 0.9423828125}, {"start": 2914.75, "end": 2915.07, "word": " has", "probability": 0.93408203125}, {"start": 2915.07, "end": 2915.53, "word": " area", "probability": 0.60009765625}, {"start": 2915.53, "end": 2915.75, "word": " of", "probability": 0.95458984375}, {"start": 2915.75, "end": 2915.93, "word": " 2", "probability": 0.97900390625}, {"start": 2915.93, "end": 2916.37, "word": ".5", "probability": 0.992431640625}, {"start": 2916.37, "end": 2916.83, "word": "%", "probability": 0.89306640625}, {"start": 2916.83, "end": 2917.63, "word": " below", "probability": 0.8779296875}, {"start": 2917.63, "end": 2918.05, "word": " it,", "probability": 0.9287109375}, {"start": 2919.03, "end": 2919.71, "word": " is", "probability": 0.9267578125}, {"start": 2919.71, "end": 2919.91, "word": " the", "probability": 0.919921875}, {"start": 2919.91, "end": 2920.25, "word": " same", "probability": 0.9130859375}, {"start": 2920.25, "end": 2920.73, "word": " as", "probability": 0.95703125}, {"start": 2920.73, "end": 2920.91, "word": " the", "probability": 0.82421875}, {"start": 2920.91, "end": 2921.09, "word": " z", "probability": 0.98046875}, {"start": 2921.09, "end": 2921.41, "word": "-score,", "probability": 0.9249674479166666}, {"start": 2921.61, "end": 2921.65, "word": " but", "probability": 0.9208984375}, {"start": 2921.65, "end": 2921.81, "word": " in", "probability": 0.89208984375}, {"start": 2921.81, "end": 2921.93, "word": " the", "probability": 0.8046875}, {"start": 2921.93, "end": 2922.19, "word": " opposite", "probability": 0.62109375}, {"start": 2922.19, "end": 2923.09, "word": " direction.", "probability": 0.8515625}, {"start": 2923.57, "end": 2923.83, "word": " If", "probability": 0.73876953125}, {"start": 2923.83, "end": 2923.97, "word": " you", "probability": 0.875}, {"start": 2923.97, "end": 2924.27, "word": " remember,", "probability": 0.87841796875}, {"start": 2924.47, "end": 2924.73, "word": " we", "probability": 0.94921875}, {"start": 2924.73, "end": 2927.27, "word": " got", "probability": 0.88037109375}, {"start": 2927.27, "end": 2928.67, "word": " this", "probability": 0.94287109375}, {"start": 2928.67, "end": 2928.91, "word": " value,", "probability": 0.88720703125}, {"start": 2928.99, "end": 2929.15, "word": " 1", "probability": 0.9736328125}, {"start": 2929.15, "end": 2929.63, "word": ".96.", "probability": 0.978515625}, {"start": 2932.79, "end": 2933.35, "word": " So", "probability": 0.93212890625}, {"start": 2933.35, "end": 2933.95, "word": " my", "probability": 0.810546875}, {"start": 2933.95, "end": 2934.29, "word": " z", "probability": 0.9853515625}, {"start": 2934.29, "end": 2934.63, "word": "-score", "probability": 0.91064453125}, {"start": 2934.63, "end": 2934.89, "word": " is", "probability": 0.93505859375}, {"start": 2934.89, "end": 2935.23, "word": " negative", "probability": 0.476318359375}, {"start": 2935.23, "end": 2935.51, "word": " 1", "probability": 0.98876953125}, {"start": 2935.51, "end": 2936.01, "word": ".96", "probability": 0.99365234375}, {"start": 2936.01, "end": 2936.25, "word": " to", "probability": 0.9482421875}, {"start": 2936.25, "end": 2936.41, "word": " the", "probability": 0.91796875}, {"start": 2936.41, "end": 2936.63, "word": " left.", "probability": 0.9462890625}], "temperature": 1.0}, {"id": 109, "seek": 296522, "start": 2937.56, "end": 2965.22, "text": " and 1.9621 so now my x bar in the lower limit in this side in the left side equals mu which is 368 minus 1.96 times sigma which is 15", "tokens": [293, 502, 13, 22962, 4436, 370, 586, 452, 2031, 2159, 294, 264, 3126, 4948, 294, 341, 1252, 294, 264, 1411, 1252, 6915, 2992, 597, 307, 8652, 23, 3175, 502, 13, 22962, 1413, 12771, 597, 307, 2119], "avg_logprob": -0.29265202702702703, "compression_ratio": 1.3137254901960784, "no_speech_prob": 0.0, "words": [{"start": 2937.56, "end": 2937.86, "word": " and", "probability": 0.357666015625}, {"start": 2937.86, "end": 2938.08, "word": " 1", "probability": 0.9013671875}, {"start": 2938.08, "end": 2939.18, "word": ".9621", "probability": 0.6515909830729166}, {"start": 2939.18, "end": 2941.0, "word": " so", "probability": 0.26318359375}, {"start": 2941.0, "end": 2941.48, "word": " now", "probability": 0.90087890625}, {"start": 2941.48, "end": 2942.76, "word": " my", "probability": 0.75927734375}, {"start": 2942.76, "end": 2943.3, "word": " x", "probability": 0.8271484375}, {"start": 2943.3, "end": 2943.94, "word": " bar", "probability": 0.57080078125}, {"start": 2943.94, "end": 2944.42, "word": " in", "probability": 0.81787109375}, {"start": 2944.42, "end": 2944.64, "word": " the", "probability": 0.88525390625}, {"start": 2944.64, "end": 2945.0, "word": " lower", "probability": 0.464599609375}, {"start": 2945.0, "end": 2945.68, "word": " limit", "probability": 0.96142578125}, {"start": 2945.68, "end": 2948.2, "word": " in", "probability": 0.68505859375}, {"start": 2948.2, "end": 2948.48, "word": " this", "probability": 0.93505859375}, {"start": 2948.48, "end": 2948.88, "word": " side", "probability": 0.59521484375}, {"start": 2948.88, "end": 2950.3, "word": " in", "probability": 0.5078125}, {"start": 2950.3, "end": 2950.46, "word": " the", "probability": 0.91259765625}, {"start": 2950.46, "end": 2950.68, "word": " left", "probability": 0.95068359375}, {"start": 2950.68, "end": 2951.16, "word": " side", "probability": 0.8876953125}, {"start": 2951.16, "end": 2952.14, "word": " equals", "probability": 0.912109375}, {"start": 2952.14, "end": 2952.88, "word": " mu", "probability": 0.626953125}, {"start": 2952.88, "end": 2953.22, "word": " which", "probability": 0.8740234375}, {"start": 2953.22, "end": 2953.44, "word": " is", "probability": 0.94970703125}, {"start": 2953.44, "end": 2955.04, "word": " 368", "probability": 0.913330078125}, {"start": 2955.04, "end": 2957.98, "word": " minus", "probability": 0.837890625}, {"start": 2957.98, "end": 2959.72, "word": " 1", "probability": 0.98876953125}, {"start": 2959.72, "end": 2960.54, "word": ".96", "probability": 0.99267578125}, {"start": 2960.54, "end": 2962.8, "word": " times", "probability": 0.890625}, {"start": 2962.8, "end": 2964.4, "word": " sigma", "probability": 0.62451171875}, {"start": 2964.4, "end": 2964.62, "word": " which", "probability": 0.94189453125}, {"start": 2964.62, "end": 2964.74, "word": " is", "probability": 0.9560546875}, {"start": 2964.74, "end": 2965.22, "word": " 15", "probability": 0.90576171875}], "temperature": 1.0}, {"id": 110, "seek": 299644, "start": 2967.16, "end": 2996.44, "text": " Divide by root, 25. So that's the value of the sample mean in the lower limit, or lower bound. On the other hand, expand our limit to our hand equals 316 plus 1.96 sigma over root. Simple calculation will give this result.", "tokens": [9886, 482, 538, 5593, 11, 3552, 13, 407, 300, 311, 264, 2158, 295, 264, 6889, 914, 294, 264, 3126, 4948, 11, 420, 3126, 5472, 13, 1282, 264, 661, 1011, 11, 5268, 527, 4948, 281, 527, 1011, 6915, 805, 6866, 1804, 502, 13, 22962, 12771, 670, 5593, 13, 21532, 17108, 486, 976, 341, 1874, 13], "avg_logprob": -0.3667613690549677, "compression_ratio": 1.39375, "no_speech_prob": 0.0, "words": [{"start": 2967.16, "end": 2967.62, "word": " Divide", "probability": 0.57696533203125}, {"start": 2967.62, "end": 2967.82, "word": " by", "probability": 0.95361328125}, {"start": 2967.82, "end": 2968.24, "word": " root,", "probability": 0.783203125}, {"start": 2969.08, "end": 2969.72, "word": " 25.", "probability": 0.91357421875}, {"start": 2970.34, "end": 2971.12, "word": " So", "probability": 0.9599609375}, {"start": 2971.12, "end": 2971.58, "word": " that's", "probability": 0.900390625}, {"start": 2971.58, "end": 2971.8, "word": " the", "probability": 0.92138671875}, {"start": 2971.8, "end": 2972.22, "word": " value", "probability": 0.97314453125}, {"start": 2972.22, "end": 2973.16, "word": " of", "probability": 0.95166015625}, {"start": 2973.16, "end": 2973.32, "word": " the", "probability": 0.86572265625}, {"start": 2973.32, "end": 2973.58, "word": " sample", "probability": 0.201171875}, {"start": 2973.58, "end": 2973.94, "word": " mean", "probability": 0.91357421875}, {"start": 2973.94, "end": 2974.84, "word": " in", "probability": 0.70068359375}, {"start": 2974.84, "end": 2974.98, "word": " the", "probability": 0.91455078125}, {"start": 2974.98, "end": 2975.22, "word": " lower", "probability": 0.85009765625}, {"start": 2975.22, "end": 2975.52, "word": " limit,", "probability": 0.57861328125}, {"start": 2976.02, "end": 2976.22, "word": " or", "probability": 0.313232421875}, {"start": 2976.22, "end": 2976.46, "word": " lower", "probability": 0.82568359375}, {"start": 2976.46, "end": 2976.78, "word": " bound.", "probability": 0.85107421875}, {"start": 2978.16, "end": 2978.9, "word": " On", "probability": 0.94873046875}, {"start": 2978.9, "end": 2979.04, "word": " the", "probability": 0.92138671875}, {"start": 2979.04, "end": 2979.26, "word": " other", "probability": 0.88427734375}, {"start": 2979.26, "end": 2979.74, "word": " hand,", "probability": 0.92138671875}, {"start": 2982.32, "end": 2982.7, "word": " expand", "probability": 0.1112060546875}, {"start": 2982.7, "end": 2983.0, "word": " our", "probability": 0.50244140625}, {"start": 2983.0, "end": 2983.4, "word": " limit", "probability": 0.9248046875}, {"start": 2983.4, "end": 2983.98, "word": " to", "probability": 0.57373046875}, {"start": 2983.98, "end": 2984.28, "word": " our", "probability": 0.62939453125}, {"start": 2984.28, "end": 2984.66, "word": " hand", "probability": 0.70703125}, {"start": 2984.66, "end": 2985.58, "word": " equals", "probability": 0.62841796875}, {"start": 2985.58, "end": 2987.5, "word": " 316", "probability": 0.66357421875}, {"start": 2987.5, "end": 2988.54, "word": " plus", "probability": 0.833984375}, {"start": 2988.54, "end": 2988.94, "word": " 1", "probability": 0.9755859375}, {"start": 2988.94, "end": 2989.72, "word": ".96", "probability": 0.9892578125}, {"start": 2989.72, "end": 2991.26, "word": " sigma", "probability": 0.50244140625}, {"start": 2991.26, "end": 2991.52, "word": " over", "probability": 0.68115234375}, {"start": 2991.52, "end": 2991.82, "word": " root.", "probability": 0.1087646484375}, {"start": 2994.1, "end": 2994.9, "word": " Simple", "probability": 0.39892578125}, {"start": 2994.9, "end": 2995.44, "word": " calculation", "probability": 0.88720703125}, {"start": 2995.44, "end": 2995.7, "word": " will", "probability": 0.86181640625}, {"start": 2995.7, "end": 2995.9, "word": " give", "probability": 0.87255859375}, {"start": 2995.9, "end": 2996.1, "word": " this", "probability": 0.91357421875}, {"start": 2996.1, "end": 2996.44, "word": " result.", "probability": 0.92138671875}], "temperature": 1.0}, {"id": 111, "seek": 302767, "start": 2999.77, "end": 3027.67, "text": " The first X bar for the lower limit is 362.12, the other 373.1. So again for this data, for this example, the mean was, the population mean was 368, the population has non-degradation of 15, we select a random sample of size 25,", "tokens": [440, 700, 1783, 2159, 337, 264, 3126, 4948, 307, 8652, 17, 13, 4762, 11, 264, 661, 13435, 18, 13, 16, 13, 407, 797, 337, 341, 1412, 11, 337, 341, 1365, 11, 264, 914, 390, 11, 264, 4415, 914, 390, 8652, 23, 11, 264, 4415, 575, 2107, 12, 1479, 7165, 399, 295, 2119, 11, 321, 3048, 257, 4974, 6889, 295, 2744, 3552, 11], "avg_logprob": -0.3025793698098924, "compression_ratio": 1.506578947368421, "no_speech_prob": 0.0, "words": [{"start": 2999.77, "end": 3000.21, "word": " The", "probability": 0.50390625}, {"start": 3000.21, "end": 3000.71, "word": " first", "probability": 0.8349609375}, {"start": 3000.71, "end": 3001.29, "word": " X", "probability": 0.60498046875}, {"start": 3001.29, "end": 3001.63, "word": " bar", "probability": 0.59619140625}, {"start": 3001.63, "end": 3001.99, "word": " for", "probability": 0.81591796875}, {"start": 3001.99, "end": 3002.43, "word": " the", "probability": 0.8759765625}, {"start": 3002.43, "end": 3002.87, "word": " lower", "probability": 0.8095703125}, {"start": 3002.87, "end": 3003.15, "word": " limit", "probability": 0.76318359375}, {"start": 3003.15, "end": 3003.35, "word": " is", "probability": 0.67919921875}, {"start": 3003.35, "end": 3005.19, "word": " 362", "probability": 0.69873046875}, {"start": 3005.19, "end": 3005.87, "word": ".12,", "probability": 0.944580078125}, {"start": 3006.73, "end": 3006.87, "word": " the", "probability": 0.4892578125}, {"start": 3006.87, "end": 3007.11, "word": " other", "probability": 0.85498046875}, {"start": 3007.11, "end": 3008.37, "word": " 373", "probability": 0.849853515625}, {"start": 3008.37, "end": 3010.05, "word": ".1.", "probability": 0.53369140625}, {"start": 3011.45, "end": 3012.21, "word": " So", "probability": 0.89013671875}, {"start": 3012.21, "end": 3012.53, "word": " again", "probability": 0.81005859375}, {"start": 3012.53, "end": 3012.73, "word": " for", "probability": 0.611328125}, {"start": 3012.73, "end": 3012.93, "word": " this", "probability": 0.9462890625}, {"start": 3012.93, "end": 3013.27, "word": " data,", "probability": 0.85498046875}, {"start": 3014.71, "end": 3015.35, "word": " for", "probability": 0.8798828125}, {"start": 3015.35, "end": 3015.53, "word": " this", "probability": 0.94677734375}, {"start": 3015.53, "end": 3015.91, "word": " example,", "probability": 0.9765625}, {"start": 3016.75, "end": 3016.99, "word": " the", "probability": 0.91064453125}, {"start": 3016.99, "end": 3017.17, "word": " mean", "probability": 0.86279296875}, {"start": 3017.17, "end": 3017.53, "word": " was,", "probability": 0.93798828125}, {"start": 3017.63, "end": 3017.79, "word": " the", "probability": 0.916015625}, {"start": 3017.79, "end": 3018.03, "word": " population", "probability": 0.953125}, {"start": 3018.03, "end": 3018.45, "word": " mean", "probability": 0.95556640625}, {"start": 3018.45, "end": 3020.03, "word": " was", "probability": 0.70703125}, {"start": 3020.03, "end": 3020.99, "word": " 368,", "probability": 0.9716796875}, {"start": 3022.23, "end": 3022.65, "word": " the", "probability": 0.85986328125}, {"start": 3022.65, "end": 3023.03, "word": " population", "probability": 0.947265625}, {"start": 3023.03, "end": 3023.35, "word": " has", "probability": 0.8642578125}, {"start": 3023.35, "end": 3023.59, "word": " non", "probability": 0.11077880859375}, {"start": 3023.59, "end": 3023.99, "word": "-degradation", "probability": 0.7181396484375}, {"start": 3023.99, "end": 3024.11, "word": " of", "probability": 0.8857421875}, {"start": 3024.11, "end": 3024.55, "word": " 15,", "probability": 0.9697265625}, {"start": 3025.49, "end": 3025.67, "word": " we", "probability": 0.78857421875}, {"start": 3025.67, "end": 3025.97, "word": " select", "probability": 0.841796875}, {"start": 3025.97, "end": 3026.11, "word": " a", "probability": 0.986328125}, {"start": 3026.11, "end": 3026.31, "word": " random", "probability": 0.8681640625}, {"start": 3026.31, "end": 3026.77, "word": " sample", "probability": 0.87353515625}, {"start": 3026.77, "end": 3026.97, "word": " of", "probability": 0.9306640625}, {"start": 3026.97, "end": 3027.21, "word": " size", "probability": 0.869140625}, {"start": 3027.21, "end": 3027.67, "word": " 25,", "probability": 0.947265625}], "temperature": 1.0}, {"id": 112, "seek": 305379, "start": 3029.57, "end": 3053.79, "text": " Then we end with this result that 95% of all sample means of sample size 25 are between these two values. It means that we have this big population and this population is symmetric, is normal. And we know that", "tokens": [1396, 321, 917, 365, 341, 1874, 300, 13420, 4, 295, 439, 6889, 1355, 295, 6889, 2744, 3552, 366, 1296, 613, 732, 4190, 13, 467, 1355, 300, 321, 362, 341, 955, 4415, 293, 341, 4415, 307, 32330, 11, 307, 2710, 13, 400, 321, 458, 300], "avg_logprob": -0.19809027645323013, "compression_ratio": 1.4189189189189189, "no_speech_prob": 0.0, "words": [{"start": 3029.57, "end": 3029.91, "word": " Then", "probability": 0.6064453125}, {"start": 3029.91, "end": 3030.09, "word": " we", "probability": 0.83544921875}, {"start": 3030.09, "end": 3030.31, "word": " end", "probability": 0.8974609375}, {"start": 3030.31, "end": 3030.47, "word": " with", "probability": 0.89208984375}, {"start": 3030.47, "end": 3030.69, "word": " this", "probability": 0.93017578125}, {"start": 3030.69, "end": 3031.07, "word": " result", "probability": 0.92919921875}, {"start": 3031.07, "end": 3031.55, "word": " that", "probability": 0.53857421875}, {"start": 3031.55, "end": 3033.75, "word": " 95", "probability": 0.9306640625}, {"start": 3033.75, "end": 3034.49, "word": "%", "probability": 0.84619140625}, {"start": 3034.49, "end": 3036.59, "word": " of", "probability": 0.96630859375}, {"start": 3036.59, "end": 3036.95, "word": " all", "probability": 0.927734375}, {"start": 3036.95, "end": 3037.45, "word": " sample", "probability": 0.80029296875}, {"start": 3037.45, "end": 3037.87, "word": " means", "probability": 0.84130859375}, {"start": 3037.87, "end": 3038.83, "word": " of", "probability": 0.56005859375}, {"start": 3038.83, "end": 3039.21, "word": " sample", "probability": 0.8544921875}, {"start": 3039.21, "end": 3039.61, "word": " size", "probability": 0.814453125}, {"start": 3039.61, "end": 3040.15, "word": " 25", "probability": 0.87646484375}, {"start": 3040.15, "end": 3041.11, "word": " are", "probability": 0.85693359375}, {"start": 3041.11, "end": 3041.41, "word": " between", "probability": 0.83056640625}, {"start": 3041.41, "end": 3041.71, "word": " these", "probability": 0.83837890625}, {"start": 3041.71, "end": 3041.87, "word": " two", "probability": 0.84375}, {"start": 3041.87, "end": 3042.15, "word": " values.", "probability": 0.82373046875}, {"start": 3042.27, "end": 3042.39, "word": " It", "probability": 0.74267578125}, {"start": 3042.39, "end": 3042.63, "word": " means", "probability": 0.92724609375}, {"start": 3042.63, "end": 3042.93, "word": " that", "probability": 0.9326171875}, {"start": 3042.93, "end": 3044.55, "word": " we", "probability": 0.849609375}, {"start": 3044.55, "end": 3044.81, "word": " have", "probability": 0.9482421875}, {"start": 3044.81, "end": 3045.21, "word": " this", "probability": 0.94873046875}, {"start": 3045.21, "end": 3045.67, "word": " big", "probability": 0.908203125}, {"start": 3045.67, "end": 3046.19, "word": " population", "probability": 0.9384765625}, {"start": 3046.19, "end": 3048.63, "word": " and", "probability": 0.43310546875}, {"start": 3048.63, "end": 3048.81, "word": " this", "probability": 0.94580078125}, {"start": 3048.81, "end": 3049.23, "word": " population", "probability": 0.9287109375}, {"start": 3049.23, "end": 3049.53, "word": " is", "probability": 0.95458984375}, {"start": 3049.53, "end": 3049.91, "word": " symmetric,", "probability": 0.65478515625}, {"start": 3050.21, "end": 3050.37, "word": " is", "probability": 0.8173828125}, {"start": 3050.37, "end": 3050.75, "word": " normal.", "probability": 0.8759765625}, {"start": 3052.47, "end": 3053.11, "word": " And", "probability": 0.93017578125}, {"start": 3053.11, "end": 3053.33, "word": " we", "probability": 0.94873046875}, {"start": 3053.33, "end": 3053.55, "word": " know", "probability": 0.8876953125}, {"start": 3053.55, "end": 3053.79, "word": " that", "probability": 0.8798828125}], "temperature": 1.0}, {"id": 113, "seek": 308180, "start": 3054.54, "end": 3081.8, "text": " The mean of this population is 368 with sigma of 15. We select from this population many samples. Each one has size of 25. Suppose, for example, we select 100 samples, 100 random samples.", "tokens": [440, 914, 295, 341, 4415, 307, 8652, 23, 365, 12771, 295, 2119, 13, 492, 3048, 490, 341, 4415, 867, 10938, 13, 6947, 472, 575, 2744, 295, 3552, 13, 21360, 11, 337, 1365, 11, 321, 3048, 2319, 10938, 11, 2319, 4974, 10938, 13], "avg_logprob": -0.18595566513926484, "compression_ratio": 1.4029850746268657, "no_speech_prob": 0.0, "words": [{"start": 3054.54, "end": 3054.88, "word": " The", "probability": 0.7861328125}, {"start": 3054.88, "end": 3055.1, "word": " mean", "probability": 0.96142578125}, {"start": 3055.1, "end": 3055.24, "word": " of", "probability": 0.96630859375}, {"start": 3055.24, "end": 3055.44, "word": " this", "probability": 0.93310546875}, {"start": 3055.44, "end": 3055.9, "word": " population", "probability": 0.96484375}, {"start": 3055.9, "end": 3056.1, "word": " is", "probability": 0.7578125}, {"start": 3056.1, "end": 3057.32, "word": " 368", "probability": 0.94775390625}, {"start": 3057.32, "end": 3058.34, "word": " with", "probability": 0.6416015625}, {"start": 3058.34, "end": 3058.74, "word": " sigma", "probability": 0.615234375}, {"start": 3058.74, "end": 3059.26, "word": " of", "probability": 0.66015625}, {"start": 3059.26, "end": 3060.68, "word": " 15.", "probability": 0.94580078125}, {"start": 3062.28, "end": 3063.06, "word": " We", "probability": 0.94921875}, {"start": 3063.06, "end": 3063.48, "word": " select", "probability": 0.66650390625}, {"start": 3063.48, "end": 3063.74, "word": " from", "probability": 0.87158203125}, {"start": 3063.74, "end": 3063.98, "word": " this", "probability": 0.94482421875}, {"start": 3063.98, "end": 3064.6, "word": " population", "probability": 0.94482421875}, {"start": 3064.6, "end": 3065.82, "word": " many", "probability": 0.7734375}, {"start": 3065.82, "end": 3066.32, "word": " samples.", "probability": 0.9072265625}, {"start": 3067.86, "end": 3068.32, "word": " Each", "probability": 0.88671875}, {"start": 3068.32, "end": 3068.72, "word": " one", "probability": 0.939453125}, {"start": 3068.72, "end": 3070.28, "word": " has", "probability": 0.92724609375}, {"start": 3070.28, "end": 3070.82, "word": " size", "probability": 0.71533203125}, {"start": 3070.82, "end": 3071.08, "word": " of", "probability": 0.96337890625}, {"start": 3071.08, "end": 3071.6, "word": " 25.", "probability": 0.92431640625}, {"start": 3075.88, "end": 3076.48, "word": " Suppose,", "probability": 0.7841796875}, {"start": 3076.6, "end": 3076.72, "word": " for", "probability": 0.9521484375}, {"start": 3076.72, "end": 3077.1, "word": " example,", "probability": 0.97509765625}, {"start": 3078.0, "end": 3078.28, "word": " we", "probability": 0.9404296875}, {"start": 3078.28, "end": 3078.84, "word": " select", "probability": 0.8486328125}, {"start": 3078.84, "end": 3079.36, "word": " 100", "probability": 0.7138671875}, {"start": 3079.36, "end": 3080.02, "word": " samples,", "probability": 0.7958984375}, {"start": 3080.68, "end": 3080.94, "word": " 100", "probability": 0.87939453125}, {"start": 3080.94, "end": 3081.28, "word": " random", "probability": 0.84033203125}, {"start": 3081.28, "end": 3081.8, "word": " samples.", "probability": 0.8525390625}], "temperature": 1.0}, {"id": 114, "seek": 311066, "start": 3082.5, "end": 3110.66, "text": " So we end with different sample means. So we have 100 new sample means. In this case, you can say that 95 out of these, 95 out of 100, it means 95, one of these sample means.", "tokens": [407, 321, 917, 365, 819, 6889, 1355, 13, 407, 321, 362, 2319, 777, 6889, 1355, 13, 682, 341, 1389, 11, 291, 393, 584, 300, 13420, 484, 295, 613, 11, 13420, 484, 295, 2319, 11, 309, 1355, 13420, 11, 472, 295, 613, 6889, 1355, 13], "avg_logprob": -0.19270832803514268, "compression_ratio": 1.5086206896551724, "no_speech_prob": 0.0, "words": [{"start": 3082.5, "end": 3082.78, "word": " So", "probability": 0.66455078125}, {"start": 3082.78, "end": 3082.98, "word": " we", "probability": 0.76513671875}, {"start": 3082.98, "end": 3083.22, "word": " end", "probability": 0.890625}, {"start": 3083.22, "end": 3083.6, "word": " with", "probability": 0.8984375}, {"start": 3083.6, "end": 3085.86, "word": " different", "probability": 0.86083984375}, {"start": 3085.86, "end": 3087.26, "word": " sample", "probability": 0.57421875}, {"start": 3087.26, "end": 3087.62, "word": " means.", "probability": 0.89501953125}, {"start": 3093.72, "end": 3094.44, "word": " So", "probability": 0.8798828125}, {"start": 3094.44, "end": 3094.56, "word": " we", "probability": 0.865234375}, {"start": 3094.56, "end": 3094.76, "word": " have", "probability": 0.95556640625}, {"start": 3094.76, "end": 3095.56, "word": " 100", "probability": 0.818359375}, {"start": 3095.56, "end": 3096.28, "word": " new", "probability": 0.83642578125}, {"start": 3096.28, "end": 3097.24, "word": " sample", "probability": 0.88134765625}, {"start": 3097.24, "end": 3098.06, "word": " means.", "probability": 0.94580078125}, {"start": 3098.9, "end": 3099.2, "word": " In", "probability": 0.9326171875}, {"start": 3099.2, "end": 3099.4, "word": " this", "probability": 0.94677734375}, {"start": 3099.4, "end": 3099.66, "word": " case,", "probability": 0.923828125}, {"start": 3099.72, "end": 3099.82, "word": " you", "probability": 0.947265625}, {"start": 3099.82, "end": 3100.02, "word": " can", "probability": 0.94775390625}, {"start": 3100.02, "end": 3100.24, "word": " say", "probability": 0.873046875}, {"start": 3100.24, "end": 3100.56, "word": " that", "probability": 0.91015625}, {"start": 3100.56, "end": 3101.76, "word": " 95", "probability": 0.9423828125}, {"start": 3101.76, "end": 3102.78, "word": " out", "probability": 0.89013671875}, {"start": 3102.78, "end": 3102.96, "word": " of", "probability": 0.9736328125}, {"start": 3102.96, "end": 3103.3, "word": " these,", "probability": 0.666015625}, {"start": 3103.86, "end": 3104.4, "word": " 95", "probability": 0.958984375}, {"start": 3104.4, "end": 3105.04, "word": " out", "probability": 0.8828125}, {"start": 3105.04, "end": 3105.18, "word": " of", "probability": 0.966796875}, {"start": 3105.18, "end": 3105.54, "word": " 100,", "probability": 0.93408203125}, {"start": 3106.12, "end": 3106.32, "word": " it", "probability": 0.8388671875}, {"start": 3106.32, "end": 3106.54, "word": " means", "probability": 0.9306640625}, {"start": 3106.54, "end": 3107.4, "word": " 95,", "probability": 0.96044921875}, {"start": 3109.12, "end": 3109.48, "word": " one", "probability": 0.8271484375}, {"start": 3109.48, "end": 3109.66, "word": " of", "probability": 0.966796875}, {"start": 3109.66, "end": 3109.88, "word": " these", "probability": 0.83203125}, {"start": 3109.88, "end": 3110.24, "word": " sample", "probability": 0.884765625}, {"start": 3110.24, "end": 3110.66, "word": " means.", "probability": 0.94384765625}], "temperature": 1.0}, {"id": 115, "seek": 313772, "start": 3111.6, "end": 3137.72, "text": " have values between 362.12 and 373.5. And what's remaining? Just five of these sample means would be out of this interval either below 362 or above the upper limit. So you are 95% sure that", "tokens": [362, 4190, 1296, 8652, 17, 13, 4762, 293, 13435, 18, 13, 20, 13, 400, 437, 311, 8877, 30, 1449, 1732, 295, 613, 6889, 1355, 576, 312, 484, 295, 341, 15035, 2139, 2507, 8652, 17, 420, 3673, 264, 6597, 4948, 13, 407, 291, 366, 13420, 4, 988, 300], "avg_logprob": -0.18815104477107525, "compression_ratio": 1.2666666666666666, "no_speech_prob": 0.0, "words": [{"start": 3111.6, "end": 3111.98, "word": " have", "probability": 0.493896484375}, {"start": 3111.98, "end": 3112.56, "word": " values", "probability": 0.958984375}, {"start": 3112.56, "end": 3113.24, "word": " between", "probability": 0.8916015625}, {"start": 3113.24, "end": 3115.7, "word": " 362", "probability": 0.920654296875}, {"start": 3115.7, "end": 3116.38, "word": ".12", "probability": 0.965087890625}, {"start": 3116.38, "end": 3116.92, "word": " and", "probability": 0.9248046875}, {"start": 3116.92, "end": 3117.7, "word": " 373", "probability": 0.9482421875}, {"start": 3117.7, "end": 3118.0, "word": ".5.", "probability": 0.54412841796875}, {"start": 3119.44, "end": 3119.98, "word": " And", "probability": 0.92529296875}, {"start": 3119.98, "end": 3121.4, "word": " what's", "probability": 0.885009765625}, {"start": 3121.4, "end": 3121.72, "word": " remaining?", "probability": 0.8466796875}, {"start": 3123.0, "end": 3123.96, "word": " Just", "probability": 0.8740234375}, {"start": 3123.96, "end": 3124.5, "word": " five", "probability": 0.76806640625}, {"start": 3124.5, "end": 3124.9, "word": " of", "probability": 0.96875}, {"start": 3124.9, "end": 3125.14, "word": " these", "probability": 0.83740234375}, {"start": 3125.14, "end": 3125.48, "word": " sample", "probability": 0.86962890625}, {"start": 3125.48, "end": 3125.86, "word": " means", "probability": 0.75439453125}, {"start": 3125.86, "end": 3126.4, "word": " would", "probability": 0.560546875}, {"start": 3126.4, "end": 3126.52, "word": " be", "probability": 0.9541015625}, {"start": 3126.52, "end": 3126.9, "word": " out", "probability": 0.89404296875}, {"start": 3126.9, "end": 3127.94, "word": " of", "probability": 0.95458984375}, {"start": 3127.94, "end": 3128.18, "word": " this", "probability": 0.94091796875}, {"start": 3128.18, "end": 3128.56, "word": " interval", "probability": 0.966796875}, {"start": 3128.56, "end": 3129.04, "word": " either", "probability": 0.6337890625}, {"start": 3129.04, "end": 3130.2, "word": " below", "probability": 0.85791015625}, {"start": 3130.2, "end": 3131.8, "word": " 362", "probability": 0.959716796875}, {"start": 3131.8, "end": 3132.38, "word": " or", "probability": 0.6328125}, {"start": 3132.38, "end": 3132.8, "word": " above", "probability": 0.9716796875}, {"start": 3132.8, "end": 3133.02, "word": " the", "probability": 0.9228515625}, {"start": 3133.02, "end": 3133.22, "word": " upper", "probability": 0.82373046875}, {"start": 3133.22, "end": 3133.52, "word": " limit.", "probability": 0.94091796875}, {"start": 3134.98, "end": 3135.28, "word": " So", "probability": 0.95361328125}, {"start": 3135.28, "end": 3135.44, "word": " you", "probability": 0.8173828125}, {"start": 3135.44, "end": 3135.74, "word": " are", "probability": 0.9404296875}, {"start": 3135.74, "end": 3136.44, "word": " 95", "probability": 0.95849609375}, {"start": 3136.44, "end": 3136.82, "word": "%", "probability": 0.7705078125}, {"start": 3136.82, "end": 3137.36, "word": " sure", "probability": 0.90966796875}, {"start": 3137.36, "end": 3137.72, "word": " that", "probability": 0.80322265625}], "temperature": 1.0}, {"id": 116, "seek": 317051, "start": 3141.23, "end": 3170.51, "text": " The sample means lies between these two points. So, 5% of the sample means will be out. Make sense? Imagine that I have selected 200 samples. Now, how many X bar will be between these two values? 95% of these 200.", "tokens": [440, 6889, 1355, 9134, 1296, 613, 732, 2793, 13, 407, 11, 1025, 4, 295, 264, 6889, 1355, 486, 312, 484, 13, 4387, 2020, 30, 11739, 300, 286, 362, 8209, 2331, 10938, 13, 823, 11, 577, 867, 1783, 2159, 486, 312, 1296, 613, 732, 4190, 30, 13420, 4, 295, 613, 2331, 13], "avg_logprob": -0.2029747607616278, "compression_ratio": 1.4657534246575343, "no_speech_prob": 0.0, "words": [{"start": 3141.23, "end": 3141.63, "word": " The", "probability": 0.5654296875}, {"start": 3141.63, "end": 3141.99, "word": " sample", "probability": 0.303466796875}, {"start": 3141.99, "end": 3142.35, "word": " means", "probability": 0.806640625}, {"start": 3142.35, "end": 3142.85, "word": " lies", "probability": 0.8662109375}, {"start": 3142.85, "end": 3143.59, "word": " between", "probability": 0.859375}, {"start": 3143.59, "end": 3143.83, "word": " these", "probability": 0.8486328125}, {"start": 3143.83, "end": 3143.99, "word": " two", "probability": 0.888671875}, {"start": 3143.99, "end": 3144.35, "word": " points.", "probability": 0.9150390625}, {"start": 3145.41, "end": 3145.55, "word": " So,", "probability": 0.904296875}, {"start": 3145.79, "end": 3145.85, "word": " 5", "probability": 0.6494140625}, {"start": 3145.85, "end": 3146.11, "word": "%", "probability": 0.857421875}, {"start": 3146.11, "end": 3146.43, "word": " of", "probability": 0.94970703125}, {"start": 3146.43, "end": 3146.55, "word": " the", "probability": 0.611328125}, {"start": 3146.55, "end": 3146.77, "word": " sample", "probability": 0.87744140625}, {"start": 3146.77, "end": 3146.99, "word": " means", "probability": 0.837890625}, {"start": 3146.99, "end": 3147.15, "word": " will", "probability": 0.85107421875}, {"start": 3147.15, "end": 3147.31, "word": " be", "probability": 0.95361328125}, {"start": 3147.31, "end": 3147.65, "word": " out.", "probability": 0.8310546875}, {"start": 3149.23, "end": 3149.47, "word": " Make", "probability": 0.8154296875}, {"start": 3149.47, "end": 3149.77, "word": " sense?", "probability": 0.8271484375}, {"start": 3151.81, "end": 3152.51, "word": " Imagine", "probability": 0.833984375}, {"start": 3152.51, "end": 3152.87, "word": " that", "probability": 0.896484375}, {"start": 3152.87, "end": 3154.67, "word": " I", "probability": 0.89501953125}, {"start": 3154.67, "end": 3155.31, "word": " have", "probability": 0.64013671875}, {"start": 3155.31, "end": 3155.77, "word": " selected", "probability": 0.88916015625}, {"start": 3155.77, "end": 3157.03, "word": " 200", "probability": 0.8623046875}, {"start": 3157.03, "end": 3157.51, "word": " samples.", "probability": 0.82861328125}, {"start": 3160.27, "end": 3160.73, "word": " Now,", "probability": 0.95068359375}, {"start": 3160.81, "end": 3161.01, "word": " how", "probability": 0.93115234375}, {"start": 3161.01, "end": 3161.45, "word": " many", "probability": 0.89501953125}, {"start": 3161.45, "end": 3163.11, "word": " X", "probability": 0.478271484375}, {"start": 3163.11, "end": 3163.51, "word": " bar", "probability": 0.78564453125}, {"start": 3163.51, "end": 3165.05, "word": " will", "probability": 0.84033203125}, {"start": 3165.05, "end": 3165.47, "word": " be", "probability": 0.95068359375}, {"start": 3165.47, "end": 3165.93, "word": " between", "probability": 0.873046875}, {"start": 3165.93, "end": 3166.17, "word": " these", "probability": 0.857421875}, {"start": 3166.17, "end": 3166.33, "word": " two", "probability": 0.90625}, {"start": 3166.33, "end": 3166.75, "word": " values?", "probability": 0.9697265625}, {"start": 3167.63, "end": 3168.43, "word": " 95", "probability": 0.95947265625}, {"start": 3168.43, "end": 3168.93, "word": "%", "probability": 0.98291015625}, {"start": 3168.93, "end": 3169.09, "word": " of", "probability": 0.96240234375}, {"start": 3169.09, "end": 3170.07, "word": " these", "probability": 0.79150390625}, {"start": 3170.07, "end": 3170.51, "word": " 200.", "probability": 0.85400390625}], "temperature": 1.0}, {"id": 117, "seek": 319316, "start": 3171.1, "end": 3193.16, "text": " So how many 95%? How many means in this case? 95% out of 200 is 190. 190. Just multiply. 95 multiplies by 200. It will give 190.", "tokens": [407, 577, 867, 13420, 4, 30, 1012, 867, 1355, 294, 341, 1389, 30, 13420, 4, 484, 295, 2331, 307, 37609, 13, 37609, 13, 1449, 12972, 13, 13420, 12788, 530, 538, 2331, 13, 467, 486, 976, 37609, 13], "avg_logprob": -0.3034539442313345, "compression_ratio": 1.205607476635514, "no_speech_prob": 0.0, "words": [{"start": 3171.1, "end": 3171.36, "word": " So", "probability": 0.728515625}, {"start": 3171.36, "end": 3171.54, "word": " how", "probability": 0.61083984375}, {"start": 3171.54, "end": 3171.84, "word": " many", "probability": 0.90234375}, {"start": 3171.84, "end": 3173.02, "word": " 95", "probability": 0.56103515625}, {"start": 3173.02, "end": 3173.44, "word": "%?", "probability": 0.63330078125}, {"start": 3173.92, "end": 3174.14, "word": " How", "probability": 0.93115234375}, {"start": 3174.14, "end": 3174.38, "word": " many", "probability": 0.904296875}, {"start": 3174.38, "end": 3174.76, "word": " means", "probability": 0.8876953125}, {"start": 3174.76, "end": 3175.56, "word": " in", "probability": 0.662109375}, {"start": 3175.56, "end": 3175.74, "word": " this", "probability": 0.94873046875}, {"start": 3175.74, "end": 3176.06, "word": " case?", "probability": 0.92333984375}, {"start": 3178.9, "end": 3179.8, "word": " 95", "probability": 0.7216796875}, {"start": 3179.8, "end": 3180.32, "word": "%", "probability": 0.93359375}, {"start": 3180.32, "end": 3180.94, "word": " out", "probability": 0.892578125}, {"start": 3180.94, "end": 3181.16, "word": " of", "probability": 0.9736328125}, {"start": 3181.16, "end": 3181.76, "word": " 200", "probability": 0.92578125}, {"start": 3181.76, "end": 3182.26, "word": " is", "probability": 0.92431640625}, {"start": 3182.26, "end": 3184.6, "word": " 190.", "probability": 0.6806640625}, {"start": 3185.48, "end": 3186.38, "word": " 190.", "probability": 0.398681640625}, {"start": 3187.52, "end": 3187.96, "word": " Just", "probability": 0.81201171875}, {"start": 3187.96, "end": 3188.52, "word": " multiply.", "probability": 0.908203125}, {"start": 3188.84, "end": 3189.3, "word": " 95", "probability": 0.95947265625}, {"start": 3189.3, "end": 3190.28, "word": " multiplies", "probability": 0.6510009765625}, {"start": 3190.28, "end": 3190.46, "word": " by", "probability": 0.96728515625}, {"start": 3190.46, "end": 3190.86, "word": " 200.", "probability": 0.9140625}, {"start": 3192.02, "end": 3192.12, "word": " It", "probability": 0.345703125}, {"start": 3192.12, "end": 3192.2, "word": " will", "probability": 0.7998046875}, {"start": 3192.2, "end": 3192.44, "word": " give", "probability": 0.87353515625}, {"start": 3192.44, "end": 3193.16, "word": " 190.", "probability": 0.837890625}], "temperature": 1.0}, {"id": 118, "seek": 322220, "start": 3202.74, "end": 3222.2, "text": " values between 362.12 and 373.8. Take any value, will have any value between these two values.", "tokens": [4190, 1296, 8652, 17, 13, 4762, 293, 13435, 18, 13, 23, 13, 3664, 604, 2158, 11, 486, 362, 604, 2158, 1296, 613, 732, 4190, 13], "avg_logprob": -0.33203125573121584, "compression_ratio": 1.2179487179487178, "no_speech_prob": 0.0, "words": [{"start": 3202.74, "end": 3204.14, "word": " values", "probability": 0.1995849609375}, {"start": 3204.14, "end": 3205.54, "word": " between", "probability": 0.283203125}, {"start": 3205.54, "end": 3209.86, "word": " 362", "probability": 0.898193359375}, {"start": 3209.86, "end": 3210.56, "word": ".12", "probability": 0.9267578125}, {"start": 3210.56, "end": 3211.92, "word": " and", "probability": 0.8779296875}, {"start": 3211.92, "end": 3212.88, "word": " 373", "probability": 0.932373046875}, {"start": 3212.88, "end": 3213.66, "word": ".8.", "probability": 0.7130126953125}, {"start": 3214.44, "end": 3215.2, "word": " Take", "probability": 0.5205078125}, {"start": 3215.2, "end": 3215.46, "word": " any", "probability": 0.91015625}, {"start": 3215.46, "end": 3215.88, "word": " value,", "probability": 0.97314453125}, {"start": 3217.16, "end": 3217.42, "word": " will", "probability": 0.372802734375}, {"start": 3217.42, "end": 3217.94, "word": " have", "probability": 0.9140625}, {"start": 3217.94, "end": 3218.26, "word": " any", "probability": 0.908203125}, {"start": 3218.26, "end": 3218.68, "word": " value", "probability": 0.9814453125}, {"start": 3218.68, "end": 3220.86, "word": " between", "probability": 0.81396484375}, {"start": 3220.86, "end": 3221.2, "word": " these", "probability": 0.8564453125}, {"start": 3221.2, "end": 3221.48, "word": " two", "probability": 0.87060546875}, {"start": 3221.48, "end": 3222.2, "word": " values.", "probability": 0.619140625}], "temperature": 1.0}, {"id": 119, "seek": 325390, "start": 3228.0, "end": 3253.9, "text": " In the previous one, we assumed that the population is normal distribution. If we go back a little bit here, we assumed the population is normal. If the population is normal, then the standard distribution of X bar is also normal distribution with minimum standard deviation of sigma over R. Now, the second case is", "tokens": [682, 264, 3894, 472, 11, 321, 15895, 300, 264, 4415, 307, 2710, 7316, 13, 759, 321, 352, 646, 257, 707, 857, 510, 11, 321, 15895, 264, 4415, 307, 2710, 13, 759, 264, 4415, 307, 2710, 11, 550, 264, 3832, 7316, 295, 1783, 2159, 307, 611, 2710, 7316, 365, 7285, 3832, 25163, 295, 12771, 670, 497, 13, 823, 11, 264, 1150, 1389, 307], "avg_logprob": -0.23189484741952684, "compression_ratio": 1.9268292682926829, "no_speech_prob": 0.0, "words": [{"start": 3228.0, "end": 3228.2, "word": " In", "probability": 0.5146484375}, {"start": 3228.2, "end": 3228.34, "word": " the", "probability": 0.92138671875}, {"start": 3228.34, "end": 3228.66, "word": " previous", "probability": 0.83056640625}, {"start": 3228.66, "end": 3229.24, "word": " one,", "probability": 0.90087890625}, {"start": 3229.6, "end": 3229.76, "word": " we", "probability": 0.95703125}, {"start": 3229.76, "end": 3230.44, "word": " assumed", "probability": 0.83544921875}, {"start": 3230.44, "end": 3230.98, "word": " that", "probability": 0.927734375}, {"start": 3230.98, "end": 3231.18, "word": " the", "probability": 0.8984375}, {"start": 3231.18, "end": 3231.64, "word": " population", "probability": 0.95703125}, {"start": 3231.64, "end": 3231.96, "word": " is", "probability": 0.94970703125}, {"start": 3231.96, "end": 3232.3, "word": " normal", "probability": 0.70849609375}, {"start": 3232.3, "end": 3232.88, "word": " distribution.", "probability": 0.8193359375}, {"start": 3233.74, "end": 3233.94, "word": " If", "probability": 0.92822265625}, {"start": 3233.94, "end": 3234.7, "word": " we", "probability": 0.67431640625}, {"start": 3234.7, "end": 3235.1, "word": " go", "probability": 0.86767578125}, {"start": 3235.1, "end": 3235.3, "word": " back", "probability": 0.8740234375}, {"start": 3235.3, "end": 3235.48, "word": " a", "probability": 0.45849609375}, {"start": 3235.48, "end": 3235.64, "word": " little", "probability": 0.86328125}, {"start": 3235.64, "end": 3236.0, "word": " bit", "probability": 0.95361328125}, {"start": 3236.0, "end": 3237.84, "word": " here,", "probability": 0.4228515625}, {"start": 3238.12, "end": 3238.44, "word": " we", "probability": 0.95703125}, {"start": 3238.44, "end": 3239.0, "word": " assumed", "probability": 0.64404296875}, {"start": 3239.0, "end": 3240.0, "word": " the", "probability": 0.73486328125}, {"start": 3240.0, "end": 3240.48, "word": " population", "probability": 0.943359375}, {"start": 3240.48, "end": 3241.06, "word": " is", "probability": 0.9521484375}, {"start": 3241.06, "end": 3242.28, "word": " normal.", "probability": 0.85107421875}, {"start": 3242.82, "end": 3243.24, "word": " If", "probability": 0.9619140625}, {"start": 3243.24, "end": 3243.38, "word": " the", "probability": 0.9033203125}, {"start": 3243.38, "end": 3243.72, "word": " population", "probability": 0.94873046875}, {"start": 3243.72, "end": 3243.98, "word": " is", "probability": 0.95458984375}, {"start": 3243.98, "end": 3244.34, "word": " normal,", "probability": 0.87109375}, {"start": 3245.0, "end": 3245.38, "word": " then", "probability": 0.85693359375}, {"start": 3245.38, "end": 3245.6, "word": " the", "probability": 0.90478515625}, {"start": 3245.6, "end": 3245.82, "word": " standard", "probability": 0.46728515625}, {"start": 3245.82, "end": 3246.42, "word": " distribution", "probability": 0.85791015625}, {"start": 3246.42, "end": 3246.6, "word": " of", "probability": 0.849609375}, {"start": 3246.6, "end": 3246.72, "word": " X", "probability": 0.7880859375}, {"start": 3246.72, "end": 3246.92, "word": " bar", "probability": 0.83984375}, {"start": 3246.92, "end": 3247.1, "word": " is", "probability": 0.953125}, {"start": 3247.1, "end": 3247.5, "word": " also", "probability": 0.884765625}, {"start": 3247.5, "end": 3247.84, "word": " normal", "probability": 0.77001953125}, {"start": 3247.84, "end": 3248.42, "word": " distribution", "probability": 0.86279296875}, {"start": 3248.42, "end": 3249.18, "word": " with", "probability": 0.6865234375}, {"start": 3249.18, "end": 3249.54, "word": " minimum", "probability": 0.568359375}, {"start": 3249.54, "end": 3250.32, "word": " standard", "probability": 0.935546875}, {"start": 3250.32, "end": 3250.66, "word": " deviation", "probability": 0.8505859375}, {"start": 3250.66, "end": 3250.84, "word": " of", "probability": 0.95751953125}, {"start": 3250.84, "end": 3251.1, "word": " sigma", "probability": 0.7109375}, {"start": 3251.1, "end": 3251.3, "word": " over", "probability": 0.85009765625}, {"start": 3251.3, "end": 3251.6, "word": " R.", "probability": 0.2327880859375}, {"start": 3251.88, "end": 3252.56, "word": " Now,", "probability": 0.9521484375}, {"start": 3252.64, "end": 3252.76, "word": " the", "probability": 0.90673828125}, {"start": 3252.76, "end": 3253.04, "word": " second", "probability": 0.89697265625}, {"start": 3253.04, "end": 3253.48, "word": " case", "probability": 0.91650390625}, {"start": 3253.48, "end": 3253.9, "word": " is", "probability": 0.94287109375}], "temperature": 1.0}, {"id": 120, "seek": 328326, "start": 3255.74, "end": 3283.26, "text": " Suppose we are looking for the sampling distribution of the sample mean if the population is not normal. You don't have any information about your population, and you are looking for the sampling distribution of X bar. In this case, we can apply a new theorem called central limit theorem. This theorem says that", "tokens": [21360, 321, 366, 1237, 337, 264, 21179, 7316, 295, 264, 6889, 914, 498, 264, 4415, 307, 406, 2710, 13, 509, 500, 380, 362, 604, 1589, 466, 428, 4415, 11, 293, 291, 366, 1237, 337, 264, 21179, 7316, 295, 1783, 2159, 13, 682, 341, 1389, 11, 321, 393, 3079, 257, 777, 20904, 1219, 5777, 4948, 20904, 13, 639, 20904, 1619, 300], "avg_logprob": -0.18007171447159814, "compression_ratio": 1.7584269662921348, "no_speech_prob": 0.0, "words": [{"start": 3255.74, "end": 3256.16, "word": " Suppose", "probability": 0.47900390625}, {"start": 3256.16, "end": 3256.3, "word": " we", "probability": 0.8681640625}, {"start": 3256.3, "end": 3256.38, "word": " are", "probability": 0.86767578125}, {"start": 3256.38, "end": 3256.62, "word": " looking", "probability": 0.9072265625}, {"start": 3256.62, "end": 3256.86, "word": " for", "probability": 0.95166015625}, {"start": 3256.86, "end": 3257.06, "word": " the", "probability": 0.9140625}, {"start": 3257.06, "end": 3257.28, "word": " sampling", "probability": 0.6689453125}, {"start": 3257.28, "end": 3257.92, "word": " distribution", "probability": 0.84228515625}, {"start": 3257.92, "end": 3258.22, "word": " of", "probability": 0.95556640625}, {"start": 3258.22, "end": 3258.34, "word": " the", "probability": 0.84619140625}, {"start": 3258.34, "end": 3258.66, "word": " sample", "probability": 0.90234375}, {"start": 3258.66, "end": 3258.98, "word": " mean", "probability": 0.88330078125}, {"start": 3258.98, "end": 3259.4, "word": " if", "probability": 0.79248046875}, {"start": 3259.4, "end": 3259.58, "word": " the", "probability": 0.91455078125}, {"start": 3259.58, "end": 3260.0, "word": " population", "probability": 0.95068359375}, {"start": 3260.0, "end": 3260.24, "word": " is", "probability": 0.9580078125}, {"start": 3260.24, "end": 3260.44, "word": " not", "probability": 0.94189453125}, {"start": 3260.44, "end": 3260.82, "word": " normal.", "probability": 0.8505859375}, {"start": 3262.5, "end": 3263.1, "word": " You", "probability": 0.6513671875}, {"start": 3263.1, "end": 3263.28, "word": " don't", "probability": 0.870849609375}, {"start": 3263.28, "end": 3263.48, "word": " have", "probability": 0.9521484375}, {"start": 3263.48, "end": 3263.66, "word": " any", "probability": 0.8681640625}, {"start": 3263.66, "end": 3264.14, "word": " information", "probability": 0.8173828125}, {"start": 3264.14, "end": 3264.5, "word": " about", "probability": 0.90478515625}, {"start": 3264.5, "end": 3264.78, "word": " your", "probability": 0.89013671875}, {"start": 3264.78, "end": 3265.32, "word": " population,", "probability": 0.935546875}, {"start": 3266.16, "end": 3268.02, "word": " and", "probability": 0.93505859375}, {"start": 3268.02, "end": 3268.24, "word": " you", "probability": 0.9560546875}, {"start": 3268.24, "end": 3268.36, "word": " are", "probability": 0.88232421875}, {"start": 3268.36, "end": 3268.6, "word": " looking", "probability": 0.89990234375}, {"start": 3268.6, "end": 3268.9, "word": " for", "probability": 0.94970703125}, {"start": 3268.9, "end": 3269.18, "word": " the", "probability": 0.89697265625}, {"start": 3269.18, "end": 3269.42, "word": " sampling", "probability": 0.8212890625}, {"start": 3269.42, "end": 3269.94, "word": " distribution", "probability": 0.8486328125}, {"start": 3269.94, "end": 3270.14, "word": " of", "probability": 0.77001953125}, {"start": 3270.14, "end": 3270.24, "word": " X", "probability": 0.471923828125}, {"start": 3270.24, "end": 3270.52, "word": " bar.", "probability": 0.8134765625}, {"start": 3272.3, "end": 3272.9, "word": " In", "probability": 0.9580078125}, {"start": 3272.9, "end": 3273.14, "word": " this", "probability": 0.9462890625}, {"start": 3273.14, "end": 3273.52, "word": " case,", "probability": 0.90234375}, {"start": 3273.84, "end": 3274.04, "word": " we", "probability": 0.95263671875}, {"start": 3274.04, "end": 3274.26, "word": " can", "probability": 0.9453125}, {"start": 3274.26, "end": 3274.74, "word": " apply", "probability": 0.9267578125}, {"start": 3274.74, "end": 3277.8, "word": " a", "probability": 0.75732421875}, {"start": 3277.8, "end": 3278.04, "word": " new", "probability": 0.92431640625}, {"start": 3278.04, "end": 3278.34, "word": " theorem", "probability": 0.81787109375}, {"start": 3278.34, "end": 3278.9, "word": " called", "probability": 0.475341796875}, {"start": 3278.9, "end": 3279.86, "word": " central", "probability": 0.50048828125}, {"start": 3279.86, "end": 3280.14, "word": " limit", "probability": 0.95654296875}, {"start": 3280.14, "end": 3280.54, "word": " theorem.", "probability": 0.8642578125}, {"start": 3281.64, "end": 3282.04, "word": " This", "probability": 0.880859375}, {"start": 3282.04, "end": 3282.54, "word": " theorem", "probability": 0.83837890625}, {"start": 3282.54, "end": 3282.98, "word": " says", "probability": 0.8935546875}, {"start": 3282.98, "end": 3283.26, "word": " that", "probability": 0.9365234375}], "temperature": 1.0}, {"id": 121, "seek": 331260, "start": 3285.04, "end": 3312.6, "text": " Even if the population is not normally distributed. In this case, the sampling distribution of the sample means will be not exactly normal, but approximately normally as long as the sample size is large enough. I mean if you select a random sample from unknown population and that population is not symmetric, is not normal.", "tokens": [2754, 498, 264, 4415, 307, 406, 5646, 12631, 13, 682, 341, 1389, 11, 264, 21179, 7316, 295, 264, 6889, 1355, 486, 312, 406, 2293, 2710, 11, 457, 10447, 5646, 382, 938, 382, 264, 6889, 2744, 307, 2416, 1547, 13, 286, 914, 498, 291, 3048, 257, 4974, 6889, 490, 9841, 4415, 293, 300, 4415, 307, 406, 32330, 11, 307, 406, 2710, 13], "avg_logprob": -0.1536038289627721, "compression_ratio": 1.766304347826087, "no_speech_prob": 0.0, "words": [{"start": 3285.04, "end": 3285.44, "word": " Even", "probability": 0.580078125}, {"start": 3285.44, "end": 3285.94, "word": " if", "probability": 0.943359375}, {"start": 3285.94, "end": 3286.1, "word": " the", "probability": 0.9140625}, {"start": 3286.1, "end": 3286.58, "word": " population", "probability": 0.95556640625}, {"start": 3286.58, "end": 3286.88, "word": " is", "probability": 0.9404296875}, {"start": 3286.88, "end": 3287.12, "word": " not", "probability": 0.95166015625}, {"start": 3287.12, "end": 3287.56, "word": " normally", "probability": 0.89990234375}, {"start": 3287.56, "end": 3288.08, "word": " distributed.", "probability": 0.9296875}, {"start": 3290.56, "end": 3291.2, "word": " In", "probability": 0.95703125}, {"start": 3291.2, "end": 3291.48, "word": " this", "probability": 0.94921875}, {"start": 3291.48, "end": 3291.92, "word": " case,", "probability": 0.91552734375}, {"start": 3293.5, "end": 3293.82, "word": " the", "probability": 0.91552734375}, {"start": 3293.82, "end": 3294.24, "word": " sampling", "probability": 0.94580078125}, {"start": 3294.24, "end": 3294.98, "word": " distribution", "probability": 0.85791015625}, {"start": 3294.98, "end": 3295.28, "word": " of", "probability": 0.95263671875}, {"start": 3295.28, "end": 3295.44, "word": " the", "probability": 0.8818359375}, {"start": 3295.44, "end": 3295.72, "word": " sample", "probability": 0.8798828125}, {"start": 3295.72, "end": 3296.2, "word": " means", "probability": 0.92578125}, {"start": 3296.2, "end": 3297.7, "word": " will", "probability": 0.7509765625}, {"start": 3297.7, "end": 3298.08, "word": " be", "probability": 0.93896484375}, {"start": 3298.08, "end": 3299.4, "word": " not", "probability": 0.89697265625}, {"start": 3299.4, "end": 3299.92, "word": " exactly", "probability": 0.90673828125}, {"start": 3299.92, "end": 3300.38, "word": " normal,", "probability": 0.8681640625}, {"start": 3300.92, "end": 3301.16, "word": " but", "probability": 0.92919921875}, {"start": 3301.16, "end": 3301.92, "word": " approximately", "probability": 0.8681640625}, {"start": 3301.92, "end": 3302.52, "word": " normally", "probability": 0.448974609375}, {"start": 3302.52, "end": 3303.16, "word": " as", "probability": 0.486083984375}, {"start": 3303.16, "end": 3303.48, "word": " long", "probability": 0.923828125}, {"start": 3303.48, "end": 3303.78, "word": " as", "probability": 0.9638671875}, {"start": 3303.78, "end": 3303.96, "word": " the", "probability": 0.91357421875}, {"start": 3303.96, "end": 3304.2, "word": " sample", "probability": 0.89306640625}, {"start": 3304.2, "end": 3304.56, "word": " size", "probability": 0.83740234375}, {"start": 3304.56, "end": 3304.7, "word": " is", "probability": 0.95166015625}, {"start": 3304.7, "end": 3304.94, "word": " large", "probability": 0.96630859375}, {"start": 3304.94, "end": 3305.26, "word": " enough.", "probability": 0.8505859375}, {"start": 3306.66, "end": 3306.88, "word": " I", "probability": 0.99560546875}, {"start": 3306.88, "end": 3307.04, "word": " mean", "probability": 0.96533203125}, {"start": 3307.04, "end": 3307.16, "word": " if", "probability": 0.640625}, {"start": 3307.16, "end": 3307.32, "word": " you", "probability": 0.951171875}, {"start": 3307.32, "end": 3307.6, "word": " select", "probability": 0.8544921875}, {"start": 3307.6, "end": 3307.72, "word": " a", "probability": 0.94873046875}, {"start": 3307.72, "end": 3307.98, "word": " random", "probability": 0.86376953125}, {"start": 3307.98, "end": 3308.38, "word": " sample", "probability": 0.8974609375}, {"start": 3308.38, "end": 3308.84, "word": " from", "probability": 0.876953125}, {"start": 3308.84, "end": 3309.3, "word": " unknown", "probability": 0.6962890625}, {"start": 3309.3, "end": 3309.82, "word": " population", "probability": 0.92041015625}, {"start": 3309.82, "end": 3310.1, "word": " and", "probability": 0.48974609375}, {"start": 3310.1, "end": 3310.26, "word": " that", "probability": 0.92578125}, {"start": 3310.26, "end": 3310.68, "word": " population", "probability": 0.93017578125}, {"start": 3310.68, "end": 3310.94, "word": " is", "probability": 0.9501953125}, {"start": 3310.94, "end": 3311.12, "word": " not", "probability": 0.94580078125}, {"start": 3311.12, "end": 3311.58, "word": " symmetric,", "probability": 0.76123046875}, {"start": 3311.8, "end": 3311.96, "word": " is", "probability": 0.56689453125}, {"start": 3311.96, "end": 3312.18, "word": " not", "probability": 0.9443359375}, {"start": 3312.18, "end": 3312.6, "word": " normal.", "probability": 0.8798828125}], "temperature": 1.0}, {"id": 122, "seek": 333924, "start": 3313.88, "end": 3339.24, "text": " In this case, you can say that the sampling distribution of X bar is approximately normal as long as the sample size is large enough, with the same population, with the same mean, population mean is mu, and same standard deviation sigma over root N. So now, there is a condition here.", "tokens": [682, 341, 1389, 11, 291, 393, 584, 300, 264, 21179, 7316, 295, 1783, 2159, 307, 10447, 2710, 382, 938, 382, 264, 6889, 2744, 307, 2416, 1547, 11, 365, 264, 912, 4415, 11, 365, 264, 912, 914, 11, 4415, 914, 307, 2992, 11, 293, 912, 3832, 25163, 12771, 670, 5593, 426, 13, 407, 586, 11, 456, 307, 257, 4188, 510, 13], "avg_logprob": -0.17597335772436173, "compression_ratio": 1.6193181818181819, "no_speech_prob": 0.0, "words": [{"start": 3313.88, "end": 3314.12, "word": " In", "probability": 0.77197265625}, {"start": 3314.12, "end": 3314.38, "word": " this", "probability": 0.94580078125}, {"start": 3314.38, "end": 3314.68, "word": " case,", "probability": 0.91845703125}, {"start": 3314.76, "end": 3314.86, "word": " you", "probability": 0.931640625}, {"start": 3314.86, "end": 3315.08, "word": " can", "probability": 0.95068359375}, {"start": 3315.08, "end": 3315.28, "word": " say", "probability": 0.86279296875}, {"start": 3315.28, "end": 3315.62, "word": " that", "probability": 0.9365234375}, {"start": 3315.62, "end": 3317.06, "word": " the", "probability": 0.75439453125}, {"start": 3317.06, "end": 3317.56, "word": " sampling", "probability": 0.91259765625}, {"start": 3317.56, "end": 3318.32, "word": " distribution", "probability": 0.8388671875}, {"start": 3318.32, "end": 3320.32, "word": " of", "probability": 0.9658203125}, {"start": 3320.32, "end": 3320.64, "word": " X", "probability": 0.8876953125}, {"start": 3320.64, "end": 3320.98, "word": " bar", "probability": 0.81103515625}, {"start": 3320.98, "end": 3321.28, "word": " is", "probability": 0.958984375}, {"start": 3321.28, "end": 3322.0, "word": " approximately", "probability": 0.89208984375}, {"start": 3322.0, "end": 3322.52, "word": " normal", "probability": 0.865234375}, {"start": 3322.52, "end": 3324.1, "word": " as", "probability": 0.74853515625}, {"start": 3324.1, "end": 3324.36, "word": " long", "probability": 0.92919921875}, {"start": 3324.36, "end": 3324.74, "word": " as", "probability": 0.96630859375}, {"start": 3324.74, "end": 3324.98, "word": " the", "probability": 0.90966796875}, {"start": 3324.98, "end": 3325.2, "word": " sample", "probability": 0.8720703125}, {"start": 3325.2, "end": 3325.56, "word": " size", "probability": 0.85693359375}, {"start": 3325.56, "end": 3325.74, "word": " is", "probability": 0.9560546875}, {"start": 3325.74, "end": 3325.96, "word": " large", "probability": 0.96875}, {"start": 3325.96, "end": 3326.32, "word": " enough,", "probability": 0.85693359375}, {"start": 3327.32, "end": 3327.74, "word": " with", "probability": 0.912109375}, {"start": 3327.74, "end": 3327.94, "word": " the", "probability": 0.919921875}, {"start": 3327.94, "end": 3328.18, "word": " same", "probability": 0.90673828125}, {"start": 3328.18, "end": 3328.7, "word": " population,", "probability": 0.95458984375}, {"start": 3329.18, "end": 3329.38, "word": " with", "probability": 0.89990234375}, {"start": 3329.38, "end": 3329.56, "word": " the", "probability": 0.91748046875}, {"start": 3329.56, "end": 3329.9, "word": " same", "probability": 0.91015625}, {"start": 3329.9, "end": 3330.66, "word": " mean,", "probability": 0.97119140625}, {"start": 3330.92, "end": 3331.28, "word": " population", "probability": 0.8955078125}, {"start": 3331.28, "end": 3331.46, "word": " mean", "probability": 0.775390625}, {"start": 3331.46, "end": 3331.6, "word": " is", "probability": 0.9453125}, {"start": 3331.6, "end": 3331.86, "word": " mu,", "probability": 0.371337890625}, {"start": 3332.38, "end": 3332.66, "word": " and", "probability": 0.93310546875}, {"start": 3332.66, "end": 3332.94, "word": " same", "probability": 0.845703125}, {"start": 3332.94, "end": 3333.3, "word": " standard", "probability": 0.880859375}, {"start": 3333.3, "end": 3333.58, "word": " deviation", "probability": 0.951171875}, {"start": 3333.58, "end": 3333.92, "word": " sigma", "probability": 0.556640625}, {"start": 3333.92, "end": 3334.14, "word": " over", "probability": 0.8095703125}, {"start": 3334.14, "end": 3334.38, "word": " root", "probability": 0.89697265625}, {"start": 3334.38, "end": 3334.54, "word": " N.", "probability": 0.61669921875}, {"start": 3336.18, "end": 3336.7, "word": " So", "probability": 0.953125}, {"start": 3336.7, "end": 3337.0, "word": " now,", "probability": 0.67236328125}, {"start": 3337.68, "end": 3338.24, "word": " there", "probability": 0.9072265625}, {"start": 3338.24, "end": 3338.36, "word": " is", "probability": 0.95068359375}, {"start": 3338.36, "end": 3338.48, "word": " a", "probability": 0.99462890625}, {"start": 3338.48, "end": 3338.86, "word": " condition", "probability": 0.93505859375}, {"start": 3338.86, "end": 3339.24, "word": " here.", "probability": 0.8466796875}], "temperature": 1.0}, {"id": 123, "seek": 336824, "start": 3340.46, "end": 3368.24, "text": " If the population is not normal, I mean if you select a random sample from unknown population, the condition is N is large. If N is large enough, then you can apply this theorem, central limit theorem, which says that as the sample size goes larger and larger, or gets larger and larger, then the standard distribution of X bar is approximately normal in this", "tokens": [759, 264, 4415, 307, 406, 2710, 11, 286, 914, 498, 291, 3048, 257, 4974, 6889, 490, 9841, 4415, 11, 264, 4188, 307, 426, 307, 2416, 13, 759, 426, 307, 2416, 1547, 11, 550, 291, 393, 3079, 341, 20904, 11, 5777, 4948, 20904, 11, 597, 1619, 300, 382, 264, 6889, 2744, 1709, 4833, 293, 4833, 11, 420, 2170, 4833, 293, 4833, 11, 550, 264, 3832, 7316, 295, 1783, 2159, 307, 10447, 2710, 294, 341], "avg_logprob": -0.18581080758893811, "compression_ratio": 1.7475728155339805, "no_speech_prob": 0.0, "words": [{"start": 3340.46, "end": 3340.78, "word": " If", "probability": 0.71337890625}, {"start": 3340.78, "end": 3340.96, "word": " the", "probability": 0.89697265625}, {"start": 3340.96, "end": 3341.26, "word": " population", "probability": 0.95751953125}, {"start": 3341.26, "end": 3341.68, "word": " is", "probability": 0.9541015625}, {"start": 3341.68, "end": 3341.9, "word": " not", "probability": 0.94287109375}, {"start": 3341.9, "end": 3342.34, "word": " normal,", "probability": 0.86572265625}, {"start": 3342.58, "end": 3342.72, "word": " I", "probability": 0.94189453125}, {"start": 3342.72, "end": 3342.82, "word": " mean", "probability": 0.97216796875}, {"start": 3342.82, "end": 3342.98, "word": " if", "probability": 0.72705078125}, {"start": 3342.98, "end": 3343.1, "word": " you", "probability": 0.9189453125}, {"start": 3343.1, "end": 3343.36, "word": " select", "probability": 0.8388671875}, {"start": 3343.36, "end": 3343.52, "word": " a", "probability": 0.970703125}, {"start": 3343.52, "end": 3343.74, "word": " random", "probability": 0.85009765625}, {"start": 3343.74, "end": 3344.06, "word": " sample", "probability": 0.8779296875}, {"start": 3344.06, "end": 3344.32, "word": " from", "probability": 0.83447265625}, {"start": 3344.32, "end": 3344.64, "word": " unknown", "probability": 0.61865234375}, {"start": 3344.64, "end": 3345.14, "word": " population,", "probability": 0.94775390625}, {"start": 3346.74, "end": 3346.96, "word": " the", "probability": 0.9130859375}, {"start": 3346.96, "end": 3347.44, "word": " condition", "probability": 0.943359375}, {"start": 3347.44, "end": 3347.9, "word": " is", "probability": 0.95068359375}, {"start": 3347.9, "end": 3348.06, "word": " N", "probability": 0.4775390625}, {"start": 3348.06, "end": 3349.22, "word": " is", "probability": 0.939453125}, {"start": 3349.22, "end": 3349.62, "word": " large.", "probability": 0.95556640625}, {"start": 3350.0, "end": 3350.4, "word": " If", "probability": 0.92919921875}, {"start": 3350.4, "end": 3350.54, "word": " N", "probability": 0.978515625}, {"start": 3350.54, "end": 3350.7, "word": " is", "probability": 0.95361328125}, {"start": 3350.7, "end": 3351.02, "word": " large", "probability": 0.9677734375}, {"start": 3351.02, "end": 3351.48, "word": " enough,", "probability": 0.8583984375}, {"start": 3352.16, "end": 3352.5, "word": " then", "probability": 0.85498046875}, {"start": 3352.5, "end": 3352.7, "word": " you", "probability": 0.96044921875}, {"start": 3352.7, "end": 3352.98, "word": " can", "probability": 0.94775390625}, {"start": 3352.98, "end": 3353.5, "word": " apply", "probability": 0.93701171875}, {"start": 3353.5, "end": 3353.84, "word": " this", "probability": 0.94384765625}, {"start": 3353.84, "end": 3354.28, "word": " theorem,", "probability": 0.8447265625}, {"start": 3355.94, "end": 3356.3, "word": " central", "probability": 0.68701171875}, {"start": 3356.3, "end": 3356.58, "word": " limit", "probability": 0.90869140625}, {"start": 3356.58, "end": 3356.8, "word": " theorem,", "probability": 0.82421875}, {"start": 3356.84, "end": 3357.04, "word": " which", "probability": 0.94482421875}, {"start": 3357.04, "end": 3357.32, "word": " says", "probability": 0.900390625}, {"start": 3357.32, "end": 3357.7, "word": " that", "probability": 0.91259765625}, {"start": 3357.7, "end": 3358.86, "word": " as", "probability": 0.8115234375}, {"start": 3358.86, "end": 3359.06, "word": " the", "probability": 0.91943359375}, {"start": 3359.06, "end": 3359.32, "word": " sample", "probability": 0.89990234375}, {"start": 3359.32, "end": 3359.72, "word": " size", "probability": 0.84375}, {"start": 3359.72, "end": 3360.16, "word": " goes", "probability": 0.6005859375}, {"start": 3360.16, "end": 3360.66, "word": " larger", "probability": 0.955078125}, {"start": 3360.66, "end": 3360.9, "word": " and", "probability": 0.931640625}, {"start": 3360.9, "end": 3361.22, "word": " larger,", "probability": 0.947265625}, {"start": 3361.42, "end": 3361.56, "word": " or", "probability": 0.93212890625}, {"start": 3361.56, "end": 3361.78, "word": " gets", "probability": 0.88916015625}, {"start": 3361.78, "end": 3362.16, "word": " larger", "probability": 0.9326171875}, {"start": 3362.16, "end": 3362.36, "word": " and", "probability": 0.93212890625}, {"start": 3362.36, "end": 3362.64, "word": " larger,", "probability": 0.93603515625}, {"start": 3363.16, "end": 3363.64, "word": " then", "probability": 0.85302734375}, {"start": 3363.64, "end": 3364.24, "word": " the", "probability": 0.88916015625}, {"start": 3364.24, "end": 3364.52, "word": " standard", "probability": 0.42236328125}, {"start": 3364.52, "end": 3365.1, "word": " distribution", "probability": 0.8798828125}, {"start": 3365.1, "end": 3365.4, "word": " of", "probability": 0.6904296875}, {"start": 3365.4, "end": 3365.56, "word": " X", "probability": 0.85009765625}, {"start": 3365.56, "end": 3365.88, "word": " bar", "probability": 0.80908203125}, {"start": 3365.88, "end": 3366.86, "word": " is", "probability": 0.94580078125}, {"start": 3366.86, "end": 3367.44, "word": " approximately", "probability": 0.8486328125}, {"start": 3367.44, "end": 3367.92, "word": " normal", "probability": 0.798828125}, {"start": 3367.92, "end": 3368.1, "word": " in", "probability": 0.69091796875}, {"start": 3368.1, "end": 3368.24, "word": " this", "probability": 0.62939453125}], "temperature": 1.0}, {"id": 124, "seek": 339881, "start": 3370.43, "end": 3398.81, "text": " Again, look at the blue curve. Now, this one looks like skewed distribution to the right. Now, as the sample gets large enough, then it becomes normal. So, the sample distribution becomes almost normal regardless of the shape of the population.", "tokens": [3764, 11, 574, 412, 264, 3344, 7605, 13, 823, 11, 341, 472, 1542, 411, 8756, 26896, 7316, 281, 264, 558, 13, 823, 11, 382, 264, 6889, 2170, 2416, 1547, 11, 550, 309, 3643, 2710, 13, 407, 11, 264, 6889, 7316, 3643, 1920, 2710, 10060, 295, 264, 3909, 295, 264, 4415, 13], "avg_logprob": -0.1388972352903623, "compression_ratio": 1.6013071895424837, "no_speech_prob": 0.0, "words": [{"start": 3370.43, "end": 3371.29, "word": " Again,", "probability": 0.82275390625}, {"start": 3372.43, "end": 3372.93, "word": " look", "probability": 0.84228515625}, {"start": 3372.93, "end": 3373.19, "word": " at", "probability": 0.9677734375}, {"start": 3373.19, "end": 3374.09, "word": " the", "probability": 0.9169921875}, {"start": 3374.09, "end": 3374.69, "word": " blue", "probability": 0.966796875}, {"start": 3374.69, "end": 3375.03, "word": " curve.", "probability": 0.95068359375}, {"start": 3376.29, "end": 3376.55, "word": " Now,", "probability": 0.91357421875}, {"start": 3376.59, "end": 3376.79, "word": " this", "probability": 0.94580078125}, {"start": 3376.79, "end": 3377.01, "word": " one", "probability": 0.919921875}, {"start": 3377.01, "end": 3377.43, "word": " looks", "probability": 0.8603515625}, {"start": 3377.43, "end": 3378.65, "word": " like", "probability": 0.93310546875}, {"start": 3378.65, "end": 3379.63, "word": " skewed", "probability": 0.91015625}, {"start": 3379.63, "end": 3380.27, "word": " distribution", "probability": 0.8310546875}, {"start": 3380.27, "end": 3380.51, "word": " to", "probability": 0.9375}, {"start": 3380.51, "end": 3380.59, "word": " the", "probability": 0.91650390625}, {"start": 3380.59, "end": 3380.85, "word": " right.", "probability": 0.91552734375}, {"start": 3384.53, "end": 3385.17, "word": " Now,", "probability": 0.9423828125}, {"start": 3385.27, "end": 3385.53, "word": " as", "probability": 0.94775390625}, {"start": 3385.53, "end": 3385.73, "word": " the", "probability": 0.91552734375}, {"start": 3385.73, "end": 3385.97, "word": " sample", "probability": 0.91650390625}, {"start": 3385.97, "end": 3386.47, "word": " gets", "probability": 0.869140625}, {"start": 3386.47, "end": 3387.21, "word": " large", "probability": 0.95654296875}, {"start": 3387.21, "end": 3387.77, "word": " enough,", "probability": 0.8681640625}, {"start": 3388.33, "end": 3388.57, "word": " then", "probability": 0.84814453125}, {"start": 3388.57, "end": 3388.73, "word": " it", "probability": 0.94677734375}, {"start": 3388.73, "end": 3389.31, "word": " becomes", "probability": 0.87451171875}, {"start": 3389.31, "end": 3391.05, "word": " normal.", "probability": 0.8701171875}, {"start": 3391.91, "end": 3392.21, "word": " So,", "probability": 0.9482421875}, {"start": 3392.43, "end": 3392.59, "word": " the", "probability": 0.91162109375}, {"start": 3392.59, "end": 3392.81, "word": " sample", "probability": 0.427490234375}, {"start": 3392.81, "end": 3393.47, "word": " distribution", "probability": 0.83447265625}, {"start": 3393.47, "end": 3393.91, "word": " becomes", "probability": 0.8818359375}, {"start": 3393.91, "end": 3394.41, "word": " almost", "probability": 0.828125}, {"start": 3394.41, "end": 3394.89, "word": " normal", "probability": 0.87841796875}, {"start": 3394.89, "end": 3395.99, "word": " regardless", "probability": 0.583984375}, {"start": 3395.99, "end": 3396.39, "word": " of", "probability": 0.97021484375}, {"start": 3396.39, "end": 3396.57, "word": " the", "probability": 0.921875}, {"start": 3396.57, "end": 3396.91, "word": " shape", "probability": 0.9072265625}, {"start": 3396.91, "end": 3397.35, "word": " of", "probability": 0.96923828125}, {"start": 3397.35, "end": 3397.75, "word": " the", "probability": 0.91845703125}, {"start": 3397.75, "end": 3398.81, "word": " population.", "probability": 0.9599609375}], "temperature": 1.0}, {"id": 125, "seek": 342797, "start": 3399.47, "end": 3427.97, "text": " I mean if you sample from unknown population, and that one has either right skewed or left skewed, if the sample size is large, then the sampling distribution of X bar becomes almost normal distribution regardless of the… so that's the central limit theorem. So again, if the population is not normal,", "tokens": [286, 914, 498, 291, 6889, 490, 9841, 4415, 11, 293, 300, 472, 575, 2139, 558, 8756, 26896, 420, 1411, 8756, 26896, 11, 498, 264, 6889, 2744, 307, 2416, 11, 550, 264, 21179, 7316, 295, 1783, 2159, 3643, 1920, 2710, 7316, 10060, 295, 264, 1260, 370, 300, 311, 264, 5777, 4948, 20904, 13, 407, 797, 11, 498, 264, 4415, 307, 406, 2710, 11], "avg_logprob": -0.17075892336784848, "compression_ratio": 1.625668449197861, "no_speech_prob": 0.0, "words": [{"start": 3399.47, "end": 3399.67, "word": " I", "probability": 0.76904296875}, {"start": 3399.67, "end": 3399.85, "word": " mean", "probability": 0.96826171875}, {"start": 3399.85, "end": 3400.01, "word": " if", "probability": 0.568359375}, {"start": 3400.01, "end": 3400.17, "word": " you", "probability": 0.9462890625}, {"start": 3400.17, "end": 3400.47, "word": " sample", "probability": 0.9072265625}, {"start": 3400.47, "end": 3400.89, "word": " from", "probability": 0.853515625}, {"start": 3400.89, "end": 3401.57, "word": " unknown", "probability": 0.8046875}, {"start": 3401.57, "end": 3402.11, "word": " population,", "probability": 0.9384765625}, {"start": 3403.13, "end": 3403.33, "word": " and", "probability": 0.7900390625}, {"start": 3403.33, "end": 3403.49, "word": " that", "probability": 0.93896484375}, {"start": 3403.49, "end": 3403.67, "word": " one", "probability": 0.93505859375}, {"start": 3403.67, "end": 3404.03, "word": " has", "probability": 0.94189453125}, {"start": 3404.03, "end": 3405.53, "word": " either", "probability": 0.9453125}, {"start": 3405.53, "end": 3406.11, "word": " right", "probability": 0.8857421875}, {"start": 3406.11, "end": 3406.59, "word": " skewed", "probability": 0.923828125}, {"start": 3406.59, "end": 3406.79, "word": " or", "probability": 0.94873046875}, {"start": 3406.79, "end": 3407.05, "word": " left", "probability": 0.9423828125}, {"start": 3407.05, "end": 3407.49, "word": " skewed,", "probability": 0.95849609375}, {"start": 3408.41, "end": 3408.85, "word": " if", "probability": 0.947265625}, {"start": 3408.85, "end": 3409.07, "word": " the", "probability": 0.92578125}, {"start": 3409.07, "end": 3409.35, "word": " sample", "probability": 0.89501953125}, {"start": 3409.35, "end": 3409.73, "word": " size", "probability": 0.86083984375}, {"start": 3409.73, "end": 3409.91, "word": " is", "probability": 0.95361328125}, {"start": 3409.91, "end": 3410.33, "word": " large,", "probability": 0.970703125}, {"start": 3411.45, "end": 3412.13, "word": " then", "probability": 0.8671875}, {"start": 3412.13, "end": 3412.79, "word": " the", "probability": 0.86767578125}, {"start": 3412.79, "end": 3413.05, "word": " sampling", "probability": 0.734375}, {"start": 3413.05, "end": 3413.71, "word": " distribution", "probability": 0.8603515625}, {"start": 3413.71, "end": 3413.95, "word": " of", "probability": 0.9560546875}, {"start": 3413.95, "end": 3414.17, "word": " X", "probability": 0.802734375}, {"start": 3414.17, "end": 3414.41, "word": " bar", "probability": 0.74267578125}, {"start": 3414.41, "end": 3414.99, "word": " becomes", "probability": 0.88427734375}, {"start": 3414.99, "end": 3415.81, "word": " almost", "probability": 0.810546875}, {"start": 3415.81, "end": 3417.01, "word": " normal", "probability": 0.85205078125}, {"start": 3417.01, "end": 3417.73, "word": " distribution", "probability": 0.85107421875}, {"start": 3417.73, "end": 3418.37, "word": " regardless", "probability": 0.54296875}, {"start": 3418.37, "end": 3419.55, "word": " of", "probability": 0.96923828125}, {"start": 3419.55, "end": 3420.47, "word": " the…", "probability": 0.6151123046875}, {"start": 3420.47, "end": 3421.15, "word": " so", "probability": 0.53125}, {"start": 3421.15, "end": 3421.53, "word": " that's", "probability": 0.857177734375}, {"start": 3421.53, "end": 3421.73, "word": " the", "probability": 0.91455078125}, {"start": 3421.73, "end": 3422.01, "word": " central", "probability": 0.75341796875}, {"start": 3422.01, "end": 3422.29, "word": " limit", "probability": 0.896484375}, {"start": 3422.29, "end": 3422.59, "word": " theorem.", "probability": 0.76611328125}, {"start": 3423.61, "end": 3423.85, "word": " So", "probability": 0.9521484375}, {"start": 3423.85, "end": 3424.13, "word": " again,", "probability": 0.77978515625}, {"start": 3426.39, "end": 3426.69, "word": " if", "probability": 0.95166015625}, {"start": 3426.69, "end": 3426.83, "word": " the", "probability": 0.90087890625}, {"start": 3426.83, "end": 3427.17, "word": " population", "probability": 0.9443359375}, {"start": 3427.17, "end": 3427.41, "word": " is", "probability": 0.947265625}, {"start": 3427.41, "end": 3427.61, "word": " not", "probability": 0.9453125}, {"start": 3427.61, "end": 3427.97, "word": " normal,", "probability": 0.86376953125}], "temperature": 1.0}, {"id": 126, "seek": 345610, "start": 3429.52, "end": 3456.1, "text": " The condition is only you have to select a large sample. In this case, the central tendency mu of X bar is same as mu. The variation is also sigma over root N. So again, standard distribution of X bar becomes normal as N. The theorem again says", "tokens": [440, 4188, 307, 787, 291, 362, 281, 3048, 257, 2416, 6889, 13, 682, 341, 1389, 11, 264, 5777, 18187, 2992, 295, 1783, 2159, 307, 912, 382, 2992, 13, 440, 12990, 307, 611, 12771, 670, 5593, 426, 13, 407, 797, 11, 3832, 7316, 295, 1783, 2159, 3643, 2710, 382, 426, 13, 440, 20904, 797, 1619], "avg_logprob": -0.259232945875688, "compression_ratio": 1.4759036144578312, "no_speech_prob": 0.0, "words": [{"start": 3429.52, "end": 3429.78, "word": " The", "probability": 0.6025390625}, {"start": 3429.78, "end": 3430.28, "word": " condition", "probability": 0.93408203125}, {"start": 3430.28, "end": 3430.58, "word": " is", "probability": 0.94580078125}, {"start": 3430.58, "end": 3430.98, "word": " only", "probability": 0.91357421875}, {"start": 3430.98, "end": 3431.3, "word": " you", "probability": 0.7880859375}, {"start": 3431.3, "end": 3431.48, "word": " have", "probability": 0.94873046875}, {"start": 3431.48, "end": 3431.62, "word": " to", "probability": 0.9697265625}, {"start": 3431.62, "end": 3432.18, "word": " select", "probability": 0.865234375}, {"start": 3432.18, "end": 3433.16, "word": " a", "probability": 0.94287109375}, {"start": 3433.16, "end": 3433.48, "word": " large", "probability": 0.96044921875}, {"start": 3433.48, "end": 3433.86, "word": " sample.", "probability": 0.4833984375}, {"start": 3434.58, "end": 3434.76, "word": " In", "probability": 0.92138671875}, {"start": 3434.76, "end": 3434.98, "word": " this", "probability": 0.94970703125}, {"start": 3434.98, "end": 3435.36, "word": " case,", "probability": 0.919921875}, {"start": 3435.96, "end": 3436.2, "word": " the", "probability": 0.8857421875}, {"start": 3436.2, "end": 3436.64, "word": " central", "probability": 0.37744140625}, {"start": 3436.64, "end": 3437.24, "word": " tendency", "probability": 0.87890625}, {"start": 3437.24, "end": 3437.52, "word": " mu", "probability": 0.302978515625}, {"start": 3437.52, "end": 3437.68, "word": " of", "probability": 0.399169921875}, {"start": 3437.68, "end": 3437.8, "word": " X", "probability": 0.6318359375}, {"start": 3437.8, "end": 3438.0, "word": " bar", "probability": 0.93408203125}, {"start": 3438.0, "end": 3438.26, "word": " is", "probability": 0.94287109375}, {"start": 3438.26, "end": 3438.82, "word": " same", "probability": 0.73046875}, {"start": 3438.82, "end": 3439.04, "word": " as", "probability": 0.9599609375}, {"start": 3439.04, "end": 3439.34, "word": " mu.", "probability": 0.89599609375}, {"start": 3440.0, "end": 3440.26, "word": " The", "probability": 0.8798828125}, {"start": 3440.26, "end": 3440.68, "word": " variation", "probability": 0.84765625}, {"start": 3440.68, "end": 3441.46, "word": " is", "probability": 0.951171875}, {"start": 3441.46, "end": 3442.0, "word": " also", "probability": 0.88623046875}, {"start": 3442.0, "end": 3442.72, "word": " sigma", "probability": 0.8740234375}, {"start": 3442.72, "end": 3443.56, "word": " over", "probability": 0.91064453125}, {"start": 3443.56, "end": 3444.4, "word": " root", "probability": 0.931640625}, {"start": 3444.4, "end": 3444.64, "word": " N.", "probability": 0.40478515625}, {"start": 3448.74, "end": 3449.38, "word": " So", "probability": 0.935546875}, {"start": 3449.38, "end": 3449.7, "word": " again,", "probability": 0.74951171875}, {"start": 3450.0, "end": 3450.26, "word": " standard", "probability": 0.70703125}, {"start": 3450.26, "end": 3450.88, "word": " distribution", "probability": 0.84619140625}, {"start": 3450.88, "end": 3451.08, "word": " of", "probability": 0.94091796875}, {"start": 3451.08, "end": 3451.26, "word": " X", "probability": 0.97607421875}, {"start": 3451.26, "end": 3451.54, "word": " bar", "probability": 0.9462890625}, {"start": 3451.54, "end": 3452.12, "word": " becomes", "probability": 0.88232421875}, {"start": 3452.12, "end": 3453.1, "word": " normal", "probability": 0.8486328125}, {"start": 3453.1, "end": 3453.46, "word": " as", "probability": 0.89453125}, {"start": 3453.46, "end": 3453.74, "word": " N.", "probability": 0.361572265625}, {"start": 3454.8, "end": 3455.12, "word": " The", "probability": 0.85498046875}, {"start": 3455.12, "end": 3455.44, "word": " theorem", "probability": 0.77587890625}, {"start": 3455.44, "end": 3455.7, "word": " again", "probability": 0.8896484375}, {"start": 3455.7, "end": 3456.1, "word": " says", "probability": 0.8759765625}], "temperature": 1.0}, {"id": 127, "seek": 348442, "start": 3457.72, "end": 3484.42, "text": " If we select a random sample from unknown population, then the standard distribution of X part is approximately normal as long as N gets large enough. Now the question is how large is large enough? There are two cases, or actually three cases.", "tokens": [759, 321, 3048, 257, 4974, 6889, 490, 9841, 4415, 11, 550, 264, 3832, 7316, 295, 1783, 644, 307, 10447, 2710, 382, 938, 382, 426, 2170, 2416, 1547, 13, 823, 264, 1168, 307, 577, 2416, 307, 2416, 1547, 30, 821, 366, 732, 3331, 11, 420, 767, 1045, 3331, 13], "avg_logprob": -0.21779337221262407, "compression_ratio": 1.4787878787878788, "no_speech_prob": 0.0, "words": [{"start": 3457.72, "end": 3458.0, "word": " If", "probability": 0.70166015625}, {"start": 3458.0, "end": 3458.18, "word": " we", "probability": 0.89306640625}, {"start": 3458.18, "end": 3458.46, "word": " select", "probability": 0.8544921875}, {"start": 3458.46, "end": 3458.62, "word": " a", "probability": 0.9833984375}, {"start": 3458.62, "end": 3458.82, "word": " random", "probability": 0.86474609375}, {"start": 3458.82, "end": 3459.22, "word": " sample", "probability": 0.8583984375}, {"start": 3459.22, "end": 3459.78, "word": " from", "probability": 0.87744140625}, {"start": 3459.78, "end": 3460.3, "word": " unknown", "probability": 0.7509765625}, {"start": 3460.3, "end": 3460.9, "word": " population,", "probability": 0.9345703125}, {"start": 3462.14, "end": 3462.28, "word": " then", "probability": 0.849609375}, {"start": 3462.28, "end": 3462.5, "word": " the", "probability": 0.8916015625}, {"start": 3462.5, "end": 3462.7, "word": " standard", "probability": 0.7705078125}, {"start": 3462.7, "end": 3463.28, "word": " distribution", "probability": 0.83544921875}, {"start": 3463.28, "end": 3463.46, "word": " of", "probability": 0.95361328125}, {"start": 3463.46, "end": 3463.64, "word": " X", "probability": 0.55078125}, {"start": 3463.64, "end": 3463.86, "word": " part", "probability": 0.32080078125}, {"start": 3463.86, "end": 3464.0, "word": " is", "probability": 0.9462890625}, {"start": 3464.0, "end": 3464.56, "word": " approximately", "probability": 0.8564453125}, {"start": 3464.56, "end": 3465.08, "word": " normal", "probability": 0.86376953125}, {"start": 3465.08, "end": 3466.7, "word": " as", "probability": 0.65087890625}, {"start": 3466.7, "end": 3467.06, "word": " long", "probability": 0.9228515625}, {"start": 3467.06, "end": 3467.58, "word": " as", "probability": 0.96142578125}, {"start": 3467.58, "end": 3467.88, "word": " N", "probability": 0.61767578125}, {"start": 3467.88, "end": 3468.9, "word": " gets", "probability": 0.8173828125}, {"start": 3468.9, "end": 3469.96, "word": " large", "probability": 0.9521484375}, {"start": 3469.96, "end": 3470.28, "word": " enough.", "probability": 0.8984375}, {"start": 3472.62, "end": 3473.24, "word": " Now", "probability": 0.85546875}, {"start": 3473.24, "end": 3473.58, "word": " the", "probability": 0.6884765625}, {"start": 3473.58, "end": 3474.1, "word": " question", "probability": 0.91015625}, {"start": 3474.1, "end": 3474.78, "word": " is", "probability": 0.94970703125}, {"start": 3474.78, "end": 3475.1, "word": " how", "probability": 0.57421875}, {"start": 3475.1, "end": 3475.76, "word": " large", "probability": 0.89404296875}, {"start": 3475.76, "end": 3476.16, "word": " is", "probability": 0.927734375}, {"start": 3476.16, "end": 3476.54, "word": " large", "probability": 0.5517578125}, {"start": 3476.54, "end": 3477.1, "word": " enough?", "probability": 0.89111328125}, {"start": 3480.12, "end": 3480.44, "word": " There", "probability": 0.7333984375}, {"start": 3480.44, "end": 3480.64, "word": " are", "probability": 0.94189453125}, {"start": 3480.64, "end": 3480.92, "word": " two", "probability": 0.8349609375}, {"start": 3480.92, "end": 3481.44, "word": " cases,", "probability": 0.908203125}, {"start": 3482.82, "end": 3483.24, "word": " or", "probability": 0.94677734375}, {"start": 3483.24, "end": 3483.66, "word": " actually", "probability": 0.89404296875}, {"start": 3483.66, "end": 3484.08, "word": " three", "probability": 0.8896484375}, {"start": 3484.08, "end": 3484.42, "word": " cases.", "probability": 0.91650390625}], "temperature": 1.0}, {"id": 128, "seek": 351001, "start": 3486.27, "end": 3510.01, "text": " For most distributions, if you don't know the exact shape, n above 30 is enough to use or to apply that theorem. So if n is greater than 30, it will give a standard distribution that is nearly normal. So if my n is large, it means above 30, or 30 and above this.", "tokens": [1171, 881, 37870, 11, 498, 291, 500, 380, 458, 264, 1900, 3909, 11, 297, 3673, 2217, 307, 1547, 281, 764, 420, 281, 3079, 300, 20904, 13, 407, 498, 297, 307, 5044, 813, 2217, 11, 309, 486, 976, 257, 3832, 7316, 300, 307, 6217, 2710, 13, 407, 498, 452, 297, 307, 2416, 11, 309, 1355, 3673, 2217, 11, 420, 2217, 293, 3673, 341, 13], "avg_logprob": -0.2316894493997097, "compression_ratio": 1.5290697674418605, "no_speech_prob": 0.0, "words": [{"start": 3486.27, "end": 3486.53, "word": " For", "probability": 0.4990234375}, {"start": 3486.53, "end": 3486.95, "word": " most", "probability": 0.91259765625}, {"start": 3486.95, "end": 3487.71, "word": " distributions,", "probability": 0.86328125}, {"start": 3489.53, "end": 3489.99, "word": " if", "probability": 0.89794921875}, {"start": 3489.99, "end": 3490.13, "word": " you", "probability": 0.96044921875}, {"start": 3490.13, "end": 3490.33, "word": " don't", "probability": 0.895751953125}, {"start": 3490.33, "end": 3490.57, "word": " know", "probability": 0.89453125}, {"start": 3490.57, "end": 3490.79, "word": " the", "probability": 0.91162109375}, {"start": 3490.79, "end": 3491.31, "word": " exact", "probability": 0.9306640625}, {"start": 3491.31, "end": 3492.07, "word": " shape,", "probability": 0.939453125}, {"start": 3493.45, "end": 3493.77, "word": " n", "probability": 0.4306640625}, {"start": 3493.77, "end": 3494.25, "word": " above", "probability": 0.8828125}, {"start": 3494.25, "end": 3494.79, "word": " 30", "probability": 0.865234375}, {"start": 3494.79, "end": 3496.49, "word": " is", "probability": 0.8984375}, {"start": 3496.49, "end": 3496.93, "word": " enough", "probability": 0.8779296875}, {"start": 3496.93, "end": 3497.51, "word": " to", "probability": 0.9609375}, {"start": 3497.51, "end": 3497.89, "word": " use", "probability": 0.80908203125}, {"start": 3497.89, "end": 3498.11, "word": " or", "probability": 0.8701171875}, {"start": 3498.11, "end": 3498.29, "word": " to", "probability": 0.84814453125}, {"start": 3498.29, "end": 3498.67, "word": " apply", "probability": 0.92919921875}, {"start": 3498.67, "end": 3498.93, "word": " that", "probability": 0.501953125}, {"start": 3498.93, "end": 3499.31, "word": " theorem.", "probability": 0.80322265625}, {"start": 3500.05, "end": 3500.21, "word": " So", "probability": 0.9111328125}, {"start": 3500.21, "end": 3500.35, "word": " if", "probability": 0.6845703125}, {"start": 3500.35, "end": 3500.51, "word": " n", "probability": 0.9052734375}, {"start": 3500.51, "end": 3500.65, "word": " is", "probability": 0.8408203125}, {"start": 3500.65, "end": 3500.97, "word": " greater", "probability": 0.90771484375}, {"start": 3500.97, "end": 3501.21, "word": " than", "probability": 0.9404296875}, {"start": 3501.21, "end": 3501.63, "word": " 30,", "probability": 0.94970703125}, {"start": 3502.03, "end": 3502.17, "word": " it", "probability": 0.2705078125}, {"start": 3502.17, "end": 3502.29, "word": " will", "probability": 0.8408203125}, {"start": 3502.29, "end": 3502.57, "word": " give", "probability": 0.87646484375}, {"start": 3502.57, "end": 3502.83, "word": " a", "probability": 0.76416015625}, {"start": 3502.83, "end": 3503.05, "word": " standard", "probability": 0.48388671875}, {"start": 3503.05, "end": 3503.57, "word": " distribution", "probability": 0.8515625}, {"start": 3503.57, "end": 3504.01, "word": " that", "probability": 0.9111328125}, {"start": 3504.01, "end": 3504.27, "word": " is", "probability": 0.93115234375}, {"start": 3504.27, "end": 3504.65, "word": " nearly", "probability": 0.771484375}, {"start": 3504.65, "end": 3505.07, "word": " normal.", "probability": 0.7353515625}, {"start": 3506.05, "end": 3506.35, "word": " So", "probability": 0.92919921875}, {"start": 3506.35, "end": 3506.43, "word": " if", "probability": 0.53515625}, {"start": 3506.43, "end": 3506.57, "word": " my", "probability": 0.92822265625}, {"start": 3506.57, "end": 3506.73, "word": " n", "probability": 0.8681640625}, {"start": 3506.73, "end": 3506.85, "word": " is", "probability": 0.951171875}, {"start": 3506.85, "end": 3507.27, "word": " large,", "probability": 0.94873046875}, {"start": 3507.53, "end": 3507.87, "word": " it", "probability": 0.90966796875}, {"start": 3507.87, "end": 3508.09, "word": " means", "probability": 0.90625}, {"start": 3508.09, "end": 3508.37, "word": " above", "probability": 0.7783203125}, {"start": 3508.37, "end": 3508.83, "word": " 30,", "probability": 0.951171875}, {"start": 3508.87, "end": 3509.07, "word": " or", "probability": 0.9453125}, {"start": 3509.07, "end": 3509.27, "word": " 30", "probability": 0.6337890625}, {"start": 3509.27, "end": 3509.51, "word": " and", "probability": 0.8466796875}, {"start": 3509.51, "end": 3509.77, "word": " above", "probability": 0.98095703125}, {"start": 3509.77, "end": 3510.01, "word": " this.", "probability": 0.61376953125}], "temperature": 1.0}, {"id": 129, "seek": 353061, "start": 3512.11, "end": 3530.61, "text": " For fairly symmetric distribution, I mean for nearly symmetric distribution, the distribution is not exactly normal, but approximately normal. In this case, N to be large enough if it is above 15. So, N greater than 15 will usually have same distribution as almost normal.", "tokens": [1171, 6457, 32330, 7316, 11, 286, 914, 337, 6217, 32330, 7316, 11, 264, 7316, 307, 406, 2293, 2710, 11, 457, 10447, 2710, 13, 682, 341, 1389, 11, 426, 281, 312, 2416, 1547, 498, 309, 307, 3673, 2119, 13, 407, 11, 426, 5044, 813, 2119, 486, 2673, 362, 912, 7316, 382, 1920, 2710, 13], "avg_logprob": -0.28327547289707045, "compression_ratio": 1.625, "no_speech_prob": 0.0, "words": [{"start": 3512.11, "end": 3512.39, "word": " For", "probability": 0.36328125}, {"start": 3512.39, "end": 3512.91, "word": " fairly", "probability": 0.475341796875}, {"start": 3512.91, "end": 3513.45, "word": " symmetric", "probability": 0.77783203125}, {"start": 3513.45, "end": 3514.09, "word": " distribution,", "probability": 0.46337890625}, {"start": 3514.23, "end": 3514.31, "word": " I", "probability": 0.94580078125}, {"start": 3514.31, "end": 3514.45, "word": " mean", "probability": 0.96630859375}, {"start": 3514.45, "end": 3514.75, "word": " for", "probability": 0.78076171875}, {"start": 3514.75, "end": 3515.21, "word": " nearly", "probability": 0.7490234375}, {"start": 3515.21, "end": 3515.79, "word": " symmetric", "probability": 0.82470703125}, {"start": 3515.79, "end": 3516.53, "word": " distribution,", "probability": 0.857421875}, {"start": 3517.11, "end": 3517.33, "word": " the", "probability": 0.79052734375}, {"start": 3517.33, "end": 3517.79, "word": " distribution", "probability": 0.78662109375}, {"start": 3517.79, "end": 3517.99, "word": " is", "probability": 0.8740234375}, {"start": 3517.99, "end": 3518.15, "word": " not", "probability": 0.9404296875}, {"start": 3518.15, "end": 3518.63, "word": " exactly", "probability": 0.89208984375}, {"start": 3518.63, "end": 3518.99, "word": " normal,", "probability": 0.82666015625}, {"start": 3519.55, "end": 3519.79, "word": " but", "probability": 0.91455078125}, {"start": 3519.79, "end": 3520.45, "word": " approximately", "probability": 0.85546875}, {"start": 3520.45, "end": 3520.89, "word": " normal.", "probability": 0.8525390625}, {"start": 3521.33, "end": 3521.55, "word": " In", "probability": 0.92919921875}, {"start": 3521.55, "end": 3521.75, "word": " this", "probability": 0.94384765625}, {"start": 3521.75, "end": 3522.13, "word": " case,", "probability": 0.91796875}, {"start": 3522.71, "end": 3522.91, "word": " N", "probability": 0.1971435546875}, {"start": 3522.91, "end": 3523.01, "word": " to", "probability": 0.60693359375}, {"start": 3523.01, "end": 3523.15, "word": " be", "probability": 0.88671875}, {"start": 3523.15, "end": 3523.43, "word": " large", "probability": 0.96630859375}, {"start": 3523.43, "end": 3523.81, "word": " enough", "probability": 0.87744140625}, {"start": 3523.81, "end": 3524.25, "word": " if", "probability": 0.82373046875}, {"start": 3524.25, "end": 3524.39, "word": " it", "probability": 0.93798828125}, {"start": 3524.39, "end": 3524.53, "word": " is", "probability": 0.92822265625}, {"start": 3524.53, "end": 3524.89, "word": " above", "probability": 0.900390625}, {"start": 3524.89, "end": 3525.43, "word": " 15.", "probability": 0.857421875}, {"start": 3525.89, "end": 3526.31, "word": " So,", "probability": 0.9130859375}, {"start": 3526.35, "end": 3526.49, "word": " N", "probability": 0.77734375}, {"start": 3526.49, "end": 3526.79, "word": " greater", "probability": 0.80419921875}, {"start": 3526.79, "end": 3527.03, "word": " than", "probability": 0.9501953125}, {"start": 3527.03, "end": 3527.37, "word": " 15", "probability": 0.9521484375}, {"start": 3527.37, "end": 3527.63, "word": " will", "probability": 0.84375}, {"start": 3527.63, "end": 3528.17, "word": " usually", "probability": 0.9091796875}, {"start": 3528.17, "end": 3528.45, "word": " have", "probability": 0.350830078125}, {"start": 3528.45, "end": 3528.77, "word": " same", "probability": 0.7119140625}, {"start": 3528.77, "end": 3529.23, "word": " distribution", "probability": 0.7275390625}, {"start": 3529.23, "end": 3529.51, "word": " as", "probability": 0.3330078125}, {"start": 3529.51, "end": 3530.01, "word": " almost", "probability": 0.63720703125}, {"start": 3530.01, "end": 3530.61, "word": " normal.", "probability": 0.79931640625}], "temperature": 1.0}, {"id": 130, "seek": 356430, "start": 3535.48, "end": 3564.3, "text": " For normal population, as we mentioned, of distributions, the semantic distribution of the mean is always. Okay, so again, there are three cases. For most distributions, N to be large, above 30. In this case, the distribution is nearly normal. For fairly symmetric distributions, N above 15 gives", "tokens": [1171, 2710, 4415, 11, 382, 321, 2835, 11, 295, 37870, 11, 264, 47982, 7316, 295, 264, 914, 307, 1009, 13, 1033, 11, 370, 797, 11, 456, 366, 1045, 3331, 13, 1171, 881, 37870, 11, 426, 281, 312, 2416, 11, 3673, 2217, 13, 682, 341, 1389, 11, 264, 7316, 307, 6217, 2710, 13, 1171, 6457, 32330, 37870, 11, 426, 3673, 2119, 2709], "avg_logprob": -0.2694052323218315, "compression_ratio": 1.6779661016949152, "no_speech_prob": 0.0, "words": [{"start": 3535.48, "end": 3535.74, "word": " For", "probability": 0.285888671875}, {"start": 3535.74, "end": 3536.26, "word": " normal", "probability": 0.78564453125}, {"start": 3536.26, "end": 3536.78, "word": " population,", "probability": 0.7470703125}, {"start": 3536.92, "end": 3537.0, "word": " as", "probability": 0.9248046875}, {"start": 3537.0, "end": 3537.14, "word": " we", "probability": 0.947265625}, {"start": 3537.14, "end": 3537.52, "word": " mentioned,", "probability": 0.775390625}, {"start": 3537.76, "end": 3537.84, "word": " of", "probability": 0.7275390625}, {"start": 3537.84, "end": 3538.42, "word": " distributions,", "probability": 0.80322265625}, {"start": 3539.16, "end": 3539.38, "word": " the", "probability": 0.91455078125}, {"start": 3539.38, "end": 3539.64, "word": " semantic", "probability": 0.28125}, {"start": 3539.64, "end": 3540.3, "word": " distribution", "probability": 0.84326171875}, {"start": 3540.3, "end": 3540.58, "word": " of", "probability": 0.958984375}, {"start": 3540.58, "end": 3540.74, "word": " the", "probability": 0.91796875}, {"start": 3540.74, "end": 3540.96, "word": " mean", "probability": 0.92529296875}, {"start": 3540.96, "end": 3541.48, "word": " is", "probability": 0.9384765625}, {"start": 3541.48, "end": 3542.96, "word": " always.", "probability": 0.70458984375}, {"start": 3546.68, "end": 3547.32, "word": " Okay,", "probability": 0.560546875}, {"start": 3547.58, "end": 3547.94, "word": " so", "probability": 0.93798828125}, {"start": 3547.94, "end": 3548.22, "word": " again,", "probability": 0.9013671875}, {"start": 3548.7, "end": 3548.88, "word": " there", "probability": 0.8955078125}, {"start": 3548.88, "end": 3549.02, "word": " are", "probability": 0.9443359375}, {"start": 3549.02, "end": 3549.2, "word": " three", "probability": 0.818359375}, {"start": 3549.2, "end": 3549.68, "word": " cases.", "probability": 0.900390625}, {"start": 3551.44, "end": 3551.98, "word": " For", "probability": 0.9189453125}, {"start": 3551.98, "end": 3552.38, "word": " most", "probability": 0.88818359375}, {"start": 3552.38, "end": 3552.96, "word": " distributions,", "probability": 0.458740234375}, {"start": 3553.46, "end": 3553.56, "word": " N", "probability": 0.53369140625}, {"start": 3553.56, "end": 3553.76, "word": " to", "probability": 0.94189453125}, {"start": 3553.76, "end": 3553.9, "word": " be", "probability": 0.9521484375}, {"start": 3553.9, "end": 3554.26, "word": " large,", "probability": 0.95458984375}, {"start": 3554.6, "end": 3554.9, "word": " above", "probability": 0.95458984375}, {"start": 3554.9, "end": 3555.3, "word": " 30.", "probability": 0.88037109375}, {"start": 3555.8, "end": 3556.06, "word": " In", "probability": 0.95751953125}, {"start": 3556.06, "end": 3556.28, "word": " this", "probability": 0.943359375}, {"start": 3556.28, "end": 3556.48, "word": " case,", "probability": 0.91943359375}, {"start": 3556.54, "end": 3556.66, "word": " the", "probability": 0.58447265625}, {"start": 3556.66, "end": 3557.0, "word": " distribution", "probability": 0.7431640625}, {"start": 3557.0, "end": 3557.3, "word": " is", "probability": 0.94189453125}, {"start": 3557.3, "end": 3557.56, "word": " nearly", "probability": 0.54345703125}, {"start": 3557.56, "end": 3558.16, "word": " normal.", "probability": 0.63427734375}, {"start": 3559.78, "end": 3560.46, "word": " For", "probability": 0.9345703125}, {"start": 3560.46, "end": 3560.92, "word": " fairly", "probability": 0.80908203125}, {"start": 3560.92, "end": 3561.26, "word": " symmetric", "probability": 0.6611328125}, {"start": 3561.26, "end": 3561.9, "word": " distributions,", "probability": 0.65283203125}, {"start": 3562.14, "end": 3562.3, "word": " N", "probability": 0.98583984375}, {"start": 3562.3, "end": 3562.5, "word": " above", "probability": 0.9111328125}, {"start": 3562.5, "end": 3563.1, "word": " 15", "probability": 0.923828125}, {"start": 3563.1, "end": 3564.3, "word": " gives", "probability": 0.7490234375}], "temperature": 1.0}, {"id": 131, "seek": 359096, "start": 3564.66, "end": 3590.96, "text": " almost normal distribution. But if the population by itself is normally distributed, always the sample mean is normally distributed. So that's the three cases. Now for this example, suppose we have a population. It means we don't know the distribution of that population.", "tokens": [1920, 2710, 7316, 13, 583, 498, 264, 4415, 538, 2564, 307, 5646, 12631, 11, 1009, 264, 6889, 914, 307, 5646, 12631, 13, 407, 300, 311, 264, 1045, 3331, 13, 823, 337, 341, 1365, 11, 7297, 321, 362, 257, 4415, 13, 467, 1355, 321, 500, 380, 458, 264, 7316, 295, 300, 4415, 13], "avg_logprob": -0.18101415431724405, "compression_ratio": 1.7106918238993711, "no_speech_prob": 0.0, "words": [{"start": 3564.66, "end": 3565.12, "word": " almost", "probability": 0.239013671875}, {"start": 3565.12, "end": 3565.58, "word": " normal", "probability": 0.85400390625}, {"start": 3565.58, "end": 3566.18, "word": " distribution.", "probability": 0.837890625}, {"start": 3567.04, "end": 3567.26, "word": " But", "probability": 0.93798828125}, {"start": 3567.26, "end": 3567.72, "word": " if", "probability": 0.83544921875}, {"start": 3567.72, "end": 3568.52, "word": " the", "probability": 0.890625}, {"start": 3568.52, "end": 3568.96, "word": " population", "probability": 0.95849609375}, {"start": 3568.96, "end": 3569.3, "word": " by", "probability": 0.90869140625}, {"start": 3569.3, "end": 3569.78, "word": " itself", "probability": 0.83837890625}, {"start": 3569.78, "end": 3570.1, "word": " is", "probability": 0.951171875}, {"start": 3570.1, "end": 3570.5, "word": " normally", "probability": 0.8916015625}, {"start": 3570.5, "end": 3571.1, "word": " distributed,", "probability": 0.90869140625}, {"start": 3571.54, "end": 3572.08, "word": " always", "probability": 0.8544921875}, {"start": 3572.08, "end": 3572.4, "word": " the", "probability": 0.8876953125}, {"start": 3572.4, "end": 3572.64, "word": " sample", "probability": 0.53955078125}, {"start": 3572.64, "end": 3573.02, "word": " mean", "probability": 0.630859375}, {"start": 3573.02, "end": 3573.82, "word": " is", "probability": 0.9462890625}, {"start": 3573.82, "end": 3574.2, "word": " normally", "probability": 0.89599609375}, {"start": 3574.2, "end": 3574.64, "word": " distributed.", "probability": 0.92724609375}, {"start": 3575.24, "end": 3575.44, "word": " So", "probability": 0.94677734375}, {"start": 3575.44, "end": 3575.66, "word": " that's", "probability": 0.82373046875}, {"start": 3575.66, "end": 3575.8, "word": " the", "probability": 0.9169921875}, {"start": 3575.8, "end": 3576.12, "word": " three", "probability": 0.90087890625}, {"start": 3576.12, "end": 3577.3, "word": " cases.", "probability": 0.89990234375}, {"start": 3580.04, "end": 3580.86, "word": " Now", "probability": 0.37744140625}, {"start": 3580.86, "end": 3583.44, "word": " for", "probability": 0.6044921875}, {"start": 3583.44, "end": 3583.68, "word": " this", "probability": 0.94482421875}, {"start": 3583.68, "end": 3584.1, "word": " example,", "probability": 0.97900390625}, {"start": 3584.9, "end": 3585.28, "word": " suppose", "probability": 0.82177734375}, {"start": 3585.28, "end": 3586.82, "word": " we", "probability": 0.927734375}, {"start": 3586.82, "end": 3587.22, "word": " have", "probability": 0.94921875}, {"start": 3587.22, "end": 3587.48, "word": " a", "probability": 0.99462890625}, {"start": 3587.48, "end": 3587.9, "word": " population.", "probability": 0.939453125}, {"start": 3588.38, "end": 3588.6, "word": " It", "probability": 0.84326171875}, {"start": 3588.6, "end": 3588.82, "word": " means", "probability": 0.93310546875}, {"start": 3588.82, "end": 3588.96, "word": " we", "probability": 0.9169921875}, {"start": 3588.96, "end": 3589.2, "word": " don't", "probability": 0.973876953125}, {"start": 3589.2, "end": 3589.46, "word": " know", "probability": 0.8955078125}, {"start": 3589.46, "end": 3589.68, "word": " the", "probability": 0.9111328125}, {"start": 3589.68, "end": 3590.14, "word": " distribution", "probability": 0.8486328125}, {"start": 3590.14, "end": 3590.34, "word": " of", "probability": 0.9658203125}, {"start": 3590.34, "end": 3590.5, "word": " that", "probability": 0.93017578125}, {"start": 3590.5, "end": 3590.96, "word": " population.", "probability": 0.93017578125}], "temperature": 1.0}, {"id": 132, "seek": 362102, "start": 3592.42, "end": 3621.02, "text": " And that population has mean of 8. Standard deviation of 3. And suppose a random sample of size 36 is selected. In this case, the population is not normal. It says A population, so you don't know the exact distribution. But N is large. It's above 30, so you can apply the central limit theorem. Now we ask about what's the probability that a sample means.", "tokens": [400, 300, 4415, 575, 914, 295, 1649, 13, 21298, 25163, 295, 805, 13, 400, 7297, 257, 4974, 6889, 295, 2744, 8652, 307, 8209, 13, 682, 341, 1389, 11, 264, 4415, 307, 406, 2710, 13, 467, 1619, 316, 4415, 11, 370, 291, 500, 380, 458, 264, 1900, 7316, 13, 583, 426, 307, 2416, 13, 467, 311, 3673, 2217, 11, 370, 291, 393, 3079, 264, 5777, 4948, 20904, 13, 823, 321, 1029, 466, 437, 311, 264, 8482, 300, 257, 6889, 1355, 13], "avg_logprob": -0.17380400940223975, "compression_ratio": 1.5478260869565217, "no_speech_prob": 0.0, "words": [{"start": 3592.42, "end": 3592.68, "word": " And", "probability": 0.7353515625}, {"start": 3592.68, "end": 3592.9, "word": " that", "probability": 0.900390625}, {"start": 3592.9, "end": 3593.34, "word": " population", "probability": 0.9384765625}, {"start": 3593.34, "end": 3593.66, "word": " has", "probability": 0.9345703125}, {"start": 3593.66, "end": 3593.88, "word": " mean", "probability": 0.865234375}, {"start": 3593.88, "end": 3594.18, "word": " of", "probability": 0.9609375}, {"start": 3594.18, "end": 3594.66, "word": " 8.", "probability": 0.55517578125}, {"start": 3595.92, "end": 3596.48, "word": " Standard", "probability": 0.84130859375}, {"start": 3596.48, "end": 3596.84, "word": " deviation", "probability": 0.97265625}, {"start": 3596.84, "end": 3597.02, "word": " of", "probability": 0.90380859375}, {"start": 3597.02, "end": 3597.34, "word": " 3.", "probability": 0.86376953125}, {"start": 3598.2, "end": 3598.34, "word": " And", "probability": 0.85791015625}, {"start": 3598.34, "end": 3598.8, "word": " suppose", "probability": 0.89892578125}, {"start": 3598.8, "end": 3599.08, "word": " a", "probability": 0.9091796875}, {"start": 3599.08, "end": 3599.34, "word": " random", "probability": 0.85791015625}, {"start": 3599.34, "end": 3599.74, "word": " sample", "probability": 0.8759765625}, {"start": 3599.74, "end": 3599.98, "word": " of", "probability": 0.94384765625}, {"start": 3599.98, "end": 3600.3, "word": " size", "probability": 0.85009765625}, {"start": 3600.3, "end": 3600.9, "word": " 36", "probability": 0.97216796875}, {"start": 3600.9, "end": 3601.2, "word": " is", "probability": 0.90478515625}, {"start": 3601.2, "end": 3601.54, "word": " selected.", "probability": 0.900390625}, {"start": 3602.78, "end": 3603.16, "word": " In", "probability": 0.9619140625}, {"start": 3603.16, "end": 3603.42, "word": " this", "probability": 0.94921875}, {"start": 3603.42, "end": 3603.74, "word": " case,", "probability": 0.91064453125}, {"start": 3603.86, "end": 3604.0, "word": " the", "probability": 0.90869140625}, {"start": 3604.0, "end": 3604.36, "word": " population", "probability": 0.94091796875}, {"start": 3604.36, "end": 3604.58, "word": " is", "probability": 0.94775390625}, {"start": 3604.58, "end": 3604.78, "word": " not", "probability": 0.94384765625}, {"start": 3604.78, "end": 3605.14, "word": " normal.", "probability": 0.8193359375}, {"start": 3605.3, "end": 3605.38, "word": " It", "probability": 0.94921875}, {"start": 3605.38, "end": 3605.66, "word": " says", "probability": 0.736328125}, {"start": 3605.66, "end": 3606.18, "word": " A", "probability": 0.401611328125}, {"start": 3606.18, "end": 3606.74, "word": " population,", "probability": 0.8349609375}, {"start": 3606.94, "end": 3607.04, "word": " so", "probability": 0.9443359375}, {"start": 3607.04, "end": 3607.24, "word": " you", "probability": 0.900390625}, {"start": 3607.24, "end": 3607.42, "word": " don't", "probability": 0.9091796875}, {"start": 3607.42, "end": 3607.6, "word": " know", "probability": 0.88720703125}, {"start": 3607.6, "end": 3607.74, "word": " the", "probability": 0.91552734375}, {"start": 3607.74, "end": 3608.1, "word": " exact", "probability": 0.95654296875}, {"start": 3608.1, "end": 3608.68, "word": " distribution.", "probability": 0.8115234375}, {"start": 3610.36, "end": 3610.68, "word": " But", "probability": 0.95068359375}, {"start": 3610.68, "end": 3610.9, "word": " N", "probability": 0.68408203125}, {"start": 3610.9, "end": 3611.06, "word": " is", "probability": 0.95361328125}, {"start": 3611.06, "end": 3611.58, "word": " large.", "probability": 0.9638671875}, {"start": 3611.86, "end": 3612.1, "word": " It's", "probability": 0.86865234375}, {"start": 3612.1, "end": 3612.34, "word": " above", "probability": 0.91552734375}, {"start": 3612.34, "end": 3612.74, "word": " 30,", "probability": 0.91845703125}, {"start": 3612.86, "end": 3613.08, "word": " so", "probability": 0.9453125}, {"start": 3613.08, "end": 3613.38, "word": " you", "probability": 0.96240234375}, {"start": 3613.38, "end": 3613.62, "word": " can", "probability": 0.947265625}, {"start": 3613.62, "end": 3613.98, "word": " apply", "probability": 0.92138671875}, {"start": 3613.98, "end": 3614.24, "word": " the", "probability": 0.89501953125}, {"start": 3614.24, "end": 3614.5, "word": " central", "probability": 0.662109375}, {"start": 3614.5, "end": 3614.74, "word": " limit", "probability": 0.96435546875}, {"start": 3614.74, "end": 3615.06, "word": " theorem.", "probability": 0.8837890625}, {"start": 3615.92, "end": 3616.14, "word": " Now", "probability": 0.94091796875}, {"start": 3616.14, "end": 3616.26, "word": " we", "probability": 0.68115234375}, {"start": 3616.26, "end": 3616.44, "word": " ask", "probability": 0.93310546875}, {"start": 3616.44, "end": 3616.74, "word": " about", "probability": 0.8955078125}, {"start": 3616.74, "end": 3617.62, "word": " what's", "probability": 0.8046875}, {"start": 3617.62, "end": 3617.76, "word": " the", "probability": 0.90966796875}, {"start": 3617.76, "end": 3618.22, "word": " probability", "probability": 0.96142578125}, {"start": 3618.22, "end": 3618.74, "word": " that", "probability": 0.62109375}, {"start": 3618.74, "end": 3620.38, "word": " a", "probability": 0.8837890625}, {"start": 3620.38, "end": 3620.64, "word": " sample", "probability": 0.9013671875}, {"start": 3620.64, "end": 3621.02, "word": " means.", "probability": 0.56640625}], "temperature": 1.0}, {"id": 133, "seek": 364930, "start": 3622.08, "end": 3649.3, "text": " is between what's the probability that the same element is between these two values. Now, the difference between this lecture and the previous ones was, here we are interested in the exponent of X. Now, even if the population is not normally distributed, the central limit theorem can be abused because N is large enough.", "tokens": [307, 1296, 437, 311, 264, 8482, 300, 264, 912, 4478, 307, 1296, 613, 732, 4190, 13, 823, 11, 264, 2649, 1296, 341, 7991, 293, 264, 3894, 2306, 390, 11, 510, 321, 366, 3102, 294, 264, 1278, 30365, 295, 1783, 13, 823, 11, 754, 498, 264, 4415, 307, 406, 5646, 12631, 11, 264, 5777, 4948, 20904, 393, 312, 27075, 570, 426, 307, 2416, 1547, 13], "avg_logprob": -0.3764423076923077, "compression_ratio": 1.6019900497512438, "no_speech_prob": 0.0, "words": [{"start": 3622.08, "end": 3622.38, "word": " is", "probability": 0.380615234375}, {"start": 3622.38, "end": 3622.96, "word": " between", "probability": 0.8984375}, {"start": 3622.96, "end": 3625.44, "word": " what's", "probability": 0.5567626953125}, {"start": 3625.44, "end": 3625.56, "word": " the", "probability": 0.88818359375}, {"start": 3625.56, "end": 3625.92, "word": " probability", "probability": 0.96142578125}, {"start": 3625.92, "end": 3626.3, "word": " that", "probability": 0.91748046875}, {"start": 3626.3, "end": 3626.48, "word": " the", "probability": 0.12548828125}, {"start": 3626.48, "end": 3626.68, "word": " same", "probability": 0.356689453125}, {"start": 3626.68, "end": 3626.88, "word": " element", "probability": 0.515625}, {"start": 3626.88, "end": 3628.04, "word": " is", "probability": 0.86083984375}, {"start": 3628.04, "end": 3628.42, "word": " between", "probability": 0.88916015625}, {"start": 3628.42, "end": 3628.72, "word": " these", "probability": 0.85302734375}, {"start": 3628.72, "end": 3628.86, "word": " two", "probability": 0.90380859375}, {"start": 3628.86, "end": 3629.24, "word": " values.", "probability": 0.81396484375}, {"start": 3632.18, "end": 3632.76, "word": " Now,", "probability": 0.728515625}, {"start": 3633.26, "end": 3633.52, "word": " the", "probability": 0.92041015625}, {"start": 3633.52, "end": 3634.04, "word": " difference", "probability": 0.86865234375}, {"start": 3634.04, "end": 3634.54, "word": " between", "probability": 0.8740234375}, {"start": 3634.54, "end": 3635.4, "word": " this", "probability": 0.810546875}, {"start": 3635.4, "end": 3635.64, "word": " lecture", "probability": 0.7802734375}, {"start": 3635.64, "end": 3635.82, "word": " and", "probability": 0.8837890625}, {"start": 3635.82, "end": 3636.22, "word": " the", "probability": 0.869140625}, {"start": 3636.22, "end": 3636.62, "word": " previous", "probability": 0.8203125}, {"start": 3636.62, "end": 3637.08, "word": " ones", "probability": 0.473876953125}, {"start": 3637.08, "end": 3637.42, "word": " was,", "probability": 0.60546875}, {"start": 3638.24, "end": 3639.0, "word": " here", "probability": 0.76806640625}, {"start": 3639.0, "end": 3639.14, "word": " we", "probability": 0.83740234375}, {"start": 3639.14, "end": 3639.28, "word": " are", "probability": 0.457275390625}, {"start": 3639.28, "end": 3639.52, "word": " interested", "probability": 0.8740234375}, {"start": 3639.52, "end": 3639.7, "word": " in", "probability": 0.94384765625}, {"start": 3639.7, "end": 3639.8, "word": " the", "probability": 0.333740234375}, {"start": 3639.8, "end": 3640.08, "word": " exponent", "probability": 0.3817138671875}, {"start": 3640.08, "end": 3640.36, "word": " of", "probability": 0.49072265625}, {"start": 3640.36, "end": 3640.78, "word": " X.", "probability": 0.56982421875}, {"start": 3642.24, "end": 3642.62, "word": " Now,", "probability": 0.9287109375}, {"start": 3643.0, "end": 3643.26, "word": " even", "probability": 0.8720703125}, {"start": 3643.26, "end": 3643.42, "word": " if", "probability": 0.93896484375}, {"start": 3643.42, "end": 3643.52, "word": " the", "probability": 0.85986328125}, {"start": 3643.52, "end": 3643.9, "word": " population", "probability": 0.94921875}, {"start": 3643.9, "end": 3644.18, "word": " is", "probability": 0.9404296875}, {"start": 3644.18, "end": 3644.44, "word": " not", "probability": 0.9453125}, {"start": 3644.44, "end": 3644.84, "word": " normally", "probability": 0.89306640625}, {"start": 3644.84, "end": 3645.44, "word": " distributed,", "probability": 0.91357421875}, {"start": 3646.12, "end": 3646.3, "word": " the", "probability": 0.90380859375}, {"start": 3646.3, "end": 3646.58, "word": " central", "probability": 0.28759765625}, {"start": 3646.58, "end": 3646.84, "word": " limit", "probability": 0.59619140625}, {"start": 3646.84, "end": 3647.08, "word": " theorem", "probability": 0.60009765625}, {"start": 3647.08, "end": 3647.3, "word": " can", "probability": 0.94189453125}, {"start": 3647.3, "end": 3647.48, "word": " be", "probability": 0.93798828125}, {"start": 3647.48, "end": 3647.92, "word": " abused", "probability": 0.8125}, {"start": 3647.92, "end": 3648.44, "word": " because", "probability": 0.5654296875}, {"start": 3648.44, "end": 3648.68, "word": " N", "probability": 0.347412109375}, {"start": 3648.68, "end": 3648.84, "word": " is", "probability": 0.91650390625}, {"start": 3648.84, "end": 3649.02, "word": " large", "probability": 0.52490234375}, {"start": 3649.02, "end": 3649.3, "word": " enough.", "probability": 0.9228515625}], "temperature": 1.0}, {"id": 134, "seek": 367865, "start": 3650.79, "end": 3678.65, "text": " So now, the mean of X bar equals mu, which is eight, and sigma of X bar equals sigma over root N, which is three over square root of 36, which is one-half. So now, the probability of X bar greater than 7.8, smaller than 8.2,", "tokens": [407, 586, 11, 264, 914, 295, 1783, 2159, 6915, 2992, 11, 597, 307, 3180, 11, 293, 12771, 295, 1783, 2159, 6915, 12771, 670, 5593, 426, 11, 597, 307, 1045, 670, 3732, 5593, 295, 8652, 11, 597, 307, 472, 12, 25461, 13, 407, 586, 11, 264, 8482, 295, 1783, 2159, 5044, 813, 1614, 13, 23, 11, 4356, 813, 1649, 13, 17, 11], "avg_logprob": -0.16381048651472216, "compression_ratio": 1.5957446808510638, "no_speech_prob": 0.0, "words": [{"start": 3650.79, "end": 3650.99, "word": " So", "probability": 0.88525390625}, {"start": 3650.99, "end": 3651.29, "word": " now,", "probability": 0.83447265625}, {"start": 3651.53, "end": 3651.73, "word": " the", "probability": 0.83837890625}, {"start": 3651.73, "end": 3651.89, "word": " mean", "probability": 0.9541015625}, {"start": 3651.89, "end": 3652.03, "word": " of", "probability": 0.97119140625}, {"start": 3652.03, "end": 3652.25, "word": " X", "probability": 0.712890625}, {"start": 3652.25, "end": 3652.57, "word": " bar", "probability": 0.8505859375}, {"start": 3652.57, "end": 3654.75, "word": " equals", "probability": 0.85791015625}, {"start": 3654.75, "end": 3655.27, "word": " mu,", "probability": 0.56494140625}, {"start": 3655.49, "end": 3655.51, "word": " which", "probability": 0.953125}, {"start": 3655.51, "end": 3655.59, "word": " is", "probability": 0.95361328125}, {"start": 3655.59, "end": 3655.91, "word": " eight,", "probability": 0.4599609375}, {"start": 3656.97, "end": 3657.31, "word": " and", "probability": 0.89794921875}, {"start": 3657.31, "end": 3657.59, "word": " sigma", "probability": 0.85595703125}, {"start": 3657.59, "end": 3657.75, "word": " of", "probability": 0.8955078125}, {"start": 3657.75, "end": 3657.95, "word": " X", "probability": 0.98193359375}, {"start": 3657.95, "end": 3658.27, "word": " bar", "probability": 0.9541015625}, {"start": 3658.27, "end": 3658.73, "word": " equals", "probability": 0.93896484375}, {"start": 3658.73, "end": 3659.07, "word": " sigma", "probability": 0.94384765625}, {"start": 3659.07, "end": 3659.27, "word": " over", "probability": 0.8046875}, {"start": 3659.27, "end": 3659.53, "word": " root", "probability": 0.84326171875}, {"start": 3659.53, "end": 3659.81, "word": " N,", "probability": 0.70849609375}, {"start": 3661.69, "end": 3662.03, "word": " which", "probability": 0.9541015625}, {"start": 3662.03, "end": 3662.17, "word": " is", "probability": 0.95361328125}, {"start": 3662.17, "end": 3662.41, "word": " three", "probability": 0.88037109375}, {"start": 3662.41, "end": 3662.77, "word": " over", "probability": 0.919921875}, {"start": 3662.77, "end": 3664.25, "word": " square", "probability": 0.413330078125}, {"start": 3664.25, "end": 3664.37, "word": " root", "probability": 0.892578125}, {"start": 3664.37, "end": 3664.49, "word": " of", "probability": 0.87890625}, {"start": 3664.49, "end": 3664.93, "word": " 36,", "probability": 0.853515625}, {"start": 3665.95, "end": 3666.47, "word": " which", "probability": 0.9541015625}, {"start": 3666.47, "end": 3666.59, "word": " is", "probability": 0.95068359375}, {"start": 3666.59, "end": 3666.79, "word": " one", "probability": 0.89208984375}, {"start": 3666.79, "end": 3667.15, "word": "-half.", "probability": 0.788330078125}, {"start": 3671.15, "end": 3671.75, "word": " So", "probability": 0.9248046875}, {"start": 3671.75, "end": 3672.01, "word": " now,", "probability": 0.90380859375}, {"start": 3673.31, "end": 3673.67, "word": " the", "probability": 0.91650390625}, {"start": 3673.67, "end": 3674.07, "word": " probability", "probability": 0.9560546875}, {"start": 3674.07, "end": 3674.37, "word": " of", "probability": 0.96826171875}, {"start": 3674.37, "end": 3674.61, "word": " X", "probability": 0.9892578125}, {"start": 3674.61, "end": 3674.95, "word": " bar", "probability": 0.94677734375}, {"start": 3674.95, "end": 3675.97, "word": " greater", "probability": 0.68896484375}, {"start": 3675.97, "end": 3676.41, "word": " than", "probability": 0.9453125}, {"start": 3676.41, "end": 3676.67, "word": " 7", "probability": 0.8720703125}, {"start": 3676.67, "end": 3677.21, "word": ".8,", "probability": 0.998046875}, {"start": 3677.41, "end": 3677.75, "word": " smaller", "probability": 0.85302734375}, {"start": 3677.75, "end": 3678.03, "word": " than", "probability": 0.94970703125}, {"start": 3678.03, "end": 3678.19, "word": " 8", "probability": 0.99462890625}, {"start": 3678.19, "end": 3678.65, "word": ".2,", "probability": 0.998779296875}], "temperature": 1.0}, {"id": 135, "seek": 370609, "start": 3680.29, "end": 3706.09, "text": " Subtracting U, then divide by sigma over root N from both sides, so 7.8 minus 8 divided by sigma over root N. Here we have 8.2 minus 8 divided by sigma over root N. I will end with Z between minus 0.4 and 0.4. Now, up to this step, it's in U, for chapter 7. Now, Z between minus 0.4 up to 0.4, you have to go back.", "tokens": [8511, 83, 1897, 278, 624, 11, 550, 9845, 538, 12771, 670, 5593, 426, 490, 1293, 4881, 11, 370, 1614, 13, 23, 3175, 1649, 6666, 538, 12771, 670, 5593, 426, 13, 1692, 321, 362, 1649, 13, 17, 3175, 1649, 6666, 538, 12771, 670, 5593, 426, 13, 286, 486, 917, 365, 1176, 1296, 3175, 1958, 13, 19, 293, 1958, 13, 19, 13, 823, 11, 493, 281, 341, 1823, 11, 309, 311, 294, 624, 11, 337, 7187, 1614, 13, 823, 11, 1176, 1296, 3175, 1958, 13, 19, 493, 281, 1958, 13, 19, 11, 291, 362, 281, 352, 646, 13], "avg_logprob": -0.17413015955502226, "compression_ratio": 1.7403314917127073, "no_speech_prob": 0.0, "words": [{"start": 3680.29, "end": 3680.77, "word": " Subtracting", "probability": 0.75830078125}, {"start": 3680.77, "end": 3680.99, "word": " U,", "probability": 0.51025390625}, {"start": 3681.13, "end": 3681.41, "word": " then", "probability": 0.80712890625}, {"start": 3681.41, "end": 3681.69, "word": " divide", "probability": 0.86474609375}, {"start": 3681.69, "end": 3681.89, "word": " by", "probability": 0.93896484375}, {"start": 3681.89, "end": 3682.11, "word": " sigma", "probability": 0.65087890625}, {"start": 3682.11, "end": 3682.31, "word": " over", "probability": 0.85107421875}, {"start": 3682.31, "end": 3682.49, "word": " root", "probability": 0.9345703125}, {"start": 3682.49, "end": 3682.61, "word": " N", "probability": 0.84326171875}, {"start": 3682.61, "end": 3682.85, "word": " from", "probability": 0.84716796875}, {"start": 3682.85, "end": 3683.17, "word": " both", "probability": 0.89013671875}, {"start": 3683.17, "end": 3683.63, "word": " sides,", "probability": 0.869140625}, {"start": 3684.41, "end": 3684.59, "word": " so", "probability": 0.93359375}, {"start": 3684.59, "end": 3684.87, "word": " 7", "probability": 0.8515625}, {"start": 3684.87, "end": 3685.37, "word": ".8", "probability": 0.9697265625}, {"start": 3685.37, "end": 3685.67, "word": " minus", "probability": 0.95751953125}, {"start": 3685.67, "end": 3686.21, "word": " 8", "probability": 0.96435546875}, {"start": 3686.21, "end": 3686.81, "word": " divided", "probability": 0.4033203125}, {"start": 3686.81, "end": 3687.05, "word": " by", "probability": 0.97412109375}, {"start": 3687.05, "end": 3687.43, "word": " sigma", "probability": 0.94384765625}, {"start": 3687.43, "end": 3687.67, "word": " over", "probability": 0.91162109375}, {"start": 3687.67, "end": 3687.89, "word": " root", "probability": 0.92138671875}, {"start": 3687.89, "end": 3688.15, "word": " N.", "probability": 0.99365234375}, {"start": 3688.95, "end": 3689.27, "word": " Here", "probability": 0.8251953125}, {"start": 3689.27, "end": 3689.39, "word": " we", "probability": 0.77783203125}, {"start": 3689.39, "end": 3689.59, "word": " have", "probability": 0.9462890625}, {"start": 3689.59, "end": 3689.79, "word": " 8", "probability": 0.98779296875}, {"start": 3689.79, "end": 3690.13, "word": ".2", "probability": 0.99365234375}, {"start": 3690.13, "end": 3690.35, "word": " minus", "probability": 0.98681640625}, {"start": 3690.35, "end": 3690.61, "word": " 8", "probability": 0.98779296875}, {"start": 3690.61, "end": 3690.91, "word": " divided", "probability": 0.83349609375}, {"start": 3690.91, "end": 3691.09, "word": " by", "probability": 0.9716796875}, {"start": 3691.09, "end": 3691.37, "word": " sigma", "probability": 0.94677734375}, {"start": 3691.37, "end": 3691.59, "word": " over", "probability": 0.91357421875}, {"start": 3691.59, "end": 3691.77, "word": " root", "probability": 0.91015625}, {"start": 3691.77, "end": 3691.99, "word": " N.", "probability": 0.99658203125}, {"start": 3692.69, "end": 3692.89, "word": " I", "probability": 0.990234375}, {"start": 3692.89, "end": 3693.05, "word": " will", "probability": 0.88330078125}, {"start": 3693.05, "end": 3693.23, "word": " end", "probability": 0.89013671875}, {"start": 3693.23, "end": 3693.57, "word": " with", "probability": 0.88232421875}, {"start": 3693.57, "end": 3693.89, "word": " Z", "probability": 0.853515625}, {"start": 3693.89, "end": 3694.47, "word": " between", "probability": 0.876953125}, {"start": 3694.47, "end": 3694.91, "word": " minus", "probability": 0.97607421875}, {"start": 3694.91, "end": 3695.13, "word": " 0", "probability": 0.53759765625}, {"start": 3695.13, "end": 3695.47, "word": ".4", "probability": 0.998779296875}, {"start": 3695.47, "end": 3695.71, "word": " and", "probability": 0.94091796875}, {"start": 3695.71, "end": 3695.87, "word": " 0", "probability": 0.94189453125}, {"start": 3695.87, "end": 3696.29, "word": ".4.", "probability": 0.999267578125}, {"start": 3696.97, "end": 3697.45, "word": " Now,", "probability": 0.9462890625}, {"start": 3697.55, "end": 3697.81, "word": " up", "probability": 0.9609375}, {"start": 3697.81, "end": 3697.93, "word": " to", "probability": 0.97021484375}, {"start": 3697.93, "end": 3698.15, "word": " this", "probability": 0.9453125}, {"start": 3698.15, "end": 3698.55, "word": " step,", "probability": 0.935546875}, {"start": 3698.69, "end": 3698.89, "word": " it's", "probability": 0.810546875}, {"start": 3698.89, "end": 3699.05, "word": " in", "probability": 0.197998046875}, {"start": 3699.05, "end": 3699.31, "word": " U,", "probability": 0.9853515625}, {"start": 3700.41, "end": 3700.67, "word": " for", "probability": 0.9208984375}, {"start": 3700.67, "end": 3701.05, "word": " chapter", "probability": 0.63037109375}, {"start": 3701.05, "end": 3701.39, "word": " 7.", "probability": 0.53515625}, {"start": 3702.23, "end": 3702.61, "word": " Now,", "probability": 0.95556640625}, {"start": 3702.73, "end": 3702.91, "word": " Z", "probability": 0.9765625}, {"start": 3702.91, "end": 3703.17, "word": " between", "probability": 0.88330078125}, {"start": 3703.17, "end": 3703.57, "word": " minus", "probability": 0.98046875}, {"start": 3703.57, "end": 3703.77, "word": " 0", "probability": 0.9814453125}, {"start": 3703.77, "end": 3703.99, "word": ".4", "probability": 0.99951171875}, {"start": 3703.99, "end": 3704.19, "word": " up", "probability": 0.85107421875}, {"start": 3704.19, "end": 3704.27, "word": " to", "probability": 0.96728515625}, {"start": 3704.27, "end": 3704.45, "word": " 0", "probability": 0.9677734375}, {"start": 3704.45, "end": 3704.79, "word": ".4,", "probability": 0.999755859375}, {"start": 3705.15, "end": 3705.35, "word": " you", "probability": 0.8818359375}, {"start": 3705.35, "end": 3705.51, "word": " have", "probability": 0.94775390625}, {"start": 3705.51, "end": 3705.63, "word": " to", "probability": 0.96630859375}, {"start": 3705.63, "end": 3705.77, "word": " go", "probability": 0.96142578125}, {"start": 3705.77, "end": 3706.09, "word": " back.", "probability": 0.8623046875}], "temperature": 1.0}, {"id": 136, "seek": 371579, "start": 3706.93, "end": 3715.79, "text": " And use the table in chapter 6, you will end with this result. So the only difference here, you have to use sigma over root N.", "tokens": [400, 764, 264, 3199, 294, 7187, 1386, 11, 291, 486, 917, 365, 341, 1874, 13, 407, 264, 787, 2649, 510, 11, 291, 362, 281, 764, 12771, 670, 5593, 426, 13], "avg_logprob": -0.28049394007652034, "compression_ratio": 1.1759259259259258, "no_speech_prob": 0.0, "words": [{"start": 3706.93, "end": 3707.25, "word": " And", "probability": 0.160400390625}, {"start": 3707.25, "end": 3707.63, "word": " use", "probability": 0.7744140625}, {"start": 3707.63, "end": 3708.05, "word": " the", "probability": 0.9013671875}, {"start": 3708.05, "end": 3709.15, "word": " table", "probability": 0.8544921875}, {"start": 3709.15, "end": 3709.39, "word": " in", "probability": 0.755859375}, {"start": 3709.39, "end": 3709.61, "word": " chapter", "probability": 0.51806640625}, {"start": 3709.61, "end": 3710.03, "word": " 6,", "probability": 0.59619140625}, {"start": 3710.33, "end": 3710.45, "word": " you", "probability": 0.90673828125}, {"start": 3710.45, "end": 3710.59, "word": " will", "probability": 0.88330078125}, {"start": 3710.59, "end": 3710.73, "word": " end", "probability": 0.9169921875}, {"start": 3710.73, "end": 3710.85, "word": " with", "probability": 0.734375}, {"start": 3710.85, "end": 3711.03, "word": " this", "probability": 0.91259765625}, {"start": 3711.03, "end": 3711.23, "word": " result.", "probability": 0.8017578125}, {"start": 3712.15, "end": 3712.37, "word": " So", "probability": 0.93310546875}, {"start": 3712.37, "end": 3712.51, "word": " the", "probability": 0.61669921875}, {"start": 3712.51, "end": 3712.73, "word": " only", "probability": 0.93212890625}, {"start": 3712.73, "end": 3713.21, "word": " difference", "probability": 0.8671875}, {"start": 3713.21, "end": 3713.57, "word": " here,", "probability": 0.83984375}, {"start": 3714.05, "end": 3714.19, "word": " you", "probability": 0.93505859375}, {"start": 3714.19, "end": 3714.41, "word": " have", "probability": 0.94580078125}, {"start": 3714.41, "end": 3714.53, "word": " to", "probability": 0.97216796875}, {"start": 3714.53, "end": 3714.77, "word": " use", "probability": 0.87744140625}, {"start": 3714.77, "end": 3715.09, "word": " sigma", "probability": 0.6484375}, {"start": 3715.09, "end": 3715.31, "word": " over", "probability": 0.77099609375}, {"start": 3715.31, "end": 3715.59, "word": " root", "probability": 0.65576171875}, {"start": 3715.59, "end": 3715.79, "word": " N.", "probability": 0.5634765625}], "temperature": 1.0}], "language": "en", "language_probability": 1.0, "duration": 3715.7965, "duration_after_vad": 3527.5212499999834} \ No newline at end of file diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/GyiivmJglvM.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/GyiivmJglvM.srt new file mode 100644 index 0000000000000000000000000000000000000000..34d58bd08b6ce4dfb1d93df0484ab9413d0dd6ec --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/GyiivmJglvM.srt @@ -0,0 +1,2354 @@ + +1 +00:00:11,020 --> 00:00:13,920 +The last chapter we are going to talk about in this + +2 +00:00:13,920 --> 00:00:17,820 +semester is correlation and simple linear regression. + +3 +00:00:18,380 --> 00:00:23,300 +So we are going to explain two types in chapter + +4 +00:00:23,300 --> 00:00:29,280 +12. One is called correlation. And the other type + +5 +00:00:29,280 --> 00:00:33,500 +is simple linear regression. Maybe this chapter + +6 +00:00:33,500 --> 00:00:40,020 +I'm going to spend about two lectures in order to + +7 +00:00:40,020 --> 00:00:45,000 +cover these objectives. The first objective is to + +8 +00:00:45,000 --> 00:00:48,810 +calculate the coefficient of correlation. The + +9 +00:00:48,810 --> 00:00:51,210 +second objective, the meaning of the regression + +10 +00:00:51,210 --> 00:00:55,590 +coefficients beta 0 and beta 1. And the last + +11 +00:00:55,590 --> 00:00:58,710 +objective is how to use regression analysis to + +12 +00:00:58,710 --> 00:01:03,030 +predict the value of dependent variable based on + +13 +00:01:03,030 --> 00:01:06,010 +an independent variable. It looks like that we + +14 +00:01:06,010 --> 00:01:10,590 +have discussed objective number one in chapter + +15 +00:01:10,590 --> 00:01:16,470 +three. So calculation of the correlation + +16 +00:01:16,470 --> 00:01:20,740 +coefficient is done in chapter three, but here + +17 +00:01:20,740 --> 00:01:26,060 +we'll give some details about correlation also. A + +18 +00:01:26,060 --> 00:01:28,480 +scatter plot can be used to show the relationship + +19 +00:01:28,480 --> 00:01:31,540 +between two variables. For example, imagine that + +20 +00:01:31,540 --> 00:01:35,400 +we have a random sample of 10 children. + +21 +00:01:37,800 --> 00:01:47,940 +And we have data on their weights and ages. And we + +22 +00:01:47,940 --> 00:01:51,640 +are interested to examine the relationship between + +23 +00:01:51,640 --> 00:01:58,400 +weights and age. For example, suppose child number + +24 +00:01:58,400 --> 00:02:06,260 +one, his + +25 +00:02:06,260 --> 00:02:12,060 +or her age is two years with weight, for example, + +26 +00:02:12,200 --> 00:02:12,880 +eight kilograms. + +27 +00:02:17,680 --> 00:02:21,880 +His weight or her weight is four years, and his or + +28 +00:02:21,880 --> 00:02:24,500 +her weight is, for example, 15 kilograms, and so + +29 +00:02:24,500 --> 00:02:29,680 +on. And again, we are interested to examine the + +30 +00:02:29,680 --> 00:02:32,640 +relationship between age and weight. Maybe they + +31 +00:02:32,640 --> 00:02:37,400 +exist sometimes. positive relationship between the + +32 +00:02:37,400 --> 00:02:41,100 +two variables that means if one variable increases + +33 +00:02:41,100 --> 00:02:45,260 +the other one also increase if one variable + +34 +00:02:45,260 --> 00:02:47,980 +increases the other will also decrease so they + +35 +00:02:47,980 --> 00:02:52,980 +have the same direction either up or down so we + +36 +00:02:52,980 --> 00:02:58,140 +have to know number one the form of the + +37 +00:02:58,140 --> 00:03:02,140 +relationship this one could be linear here we + +38 +00:03:02,140 --> 00:03:06,890 +focus just on linear relationship between X and Y. + +39 +00:03:08,050 --> 00:03:13,730 +The second, we have to know the direction of the + +40 +00:03:13,730 --> 00:03:21,270 +relationship. This direction might be positive or + +41 +00:03:21,270 --> 00:03:22,350 +negative relationship. + +42 +00:03:25,150 --> 00:03:27,990 +In addition to that, we have to know the strength + +43 +00:03:27,990 --> 00:03:33,760 +of the relationship between the two variables of + +44 +00:03:33,760 --> 00:03:37,320 +interest the strength can be classified into three + +45 +00:03:37,320 --> 00:03:46,480 +categories either strong, moderate or there exists + +46 +00:03:46,480 --> 00:03:50,580 +a weak relationship so it could be positive + +47 +00:03:50,580 --> 00:03:53,320 +-strong, positive-moderate or positive-weak, the + +48 +00:03:53,320 --> 00:03:58,360 +same for negative so by using scatter plot we can + +49 +00:03:58,360 --> 00:04:02,530 +determine the form either linear or non-linear, + +50 +00:04:02,690 --> 00:04:06,130 +but here we are focusing on just linear + +51 +00:04:06,130 --> 00:04:10,310 +relationship. Also, we can determine the direction + +52 +00:04:10,310 --> 00:04:12,870 +of the relationship. We can say there exists + +53 +00:04:12,870 --> 00:04:15,910 +positive or negative based on the scatter plot. + +54 +00:04:16,710 --> 00:04:19,530 +Also, we can know the strength of the + +55 +00:04:19,530 --> 00:04:23,130 +relationship, either strong, moderate or weak. For + +56 +00:04:23,130 --> 00:04:29,810 +example, suppose we have again weights and ages. + +57 +00:04:30,390 --> 00:04:33,590 +And we know that there are two types of variables + +58 +00:04:33,590 --> 00:04:36,710 +in this case. One is called dependent and the + +59 +00:04:36,710 --> 00:04:41,330 +other is independent. So if we, as we explained + +60 +00:04:41,330 --> 00:04:47,890 +before, is the dependent variable and A is + +61 +00:04:47,890 --> 00:04:48,710 +independent variable. + +62 +00:04:52,690 --> 00:04:57,270 +Always dependent + +63 +00:04:57,270 --> 00:04:57,750 +variable + +64 +00:05:00,400 --> 00:05:05,560 +is denoted by Y and always on the vertical axis so + +65 +00:05:05,560 --> 00:05:11,300 +here we have weight and independent variable is + +66 +00:05:11,300 --> 00:05:17,760 +denoted by X and X is in the X axis or horizontal + +67 +00:05:17,760 --> 00:05:26,300 +axis now scatter plot for example here child with + +68 +00:05:26,300 --> 00:05:30,820 +age 2 years his weight is 8 So two years, for + +69 +00:05:30,820 --> 00:05:36,760 +example, this is eight. So this star represents + +70 +00:05:36,760 --> 00:05:42,320 +the first pair of observation, age of two and + +71 +00:05:42,320 --> 00:05:46,820 +weight of eight. The other child, his weight is + +72 +00:05:46,820 --> 00:05:52,860 +four years, and the corresponding weight is 15. + +73 +00:05:53,700 --> 00:05:58,970 +For example, this value is 15. The same for the + +74 +00:05:58,970 --> 00:06:02,430 +other points. Here we can know the direction. + +75 +00:06:04,910 --> 00:06:10,060 +In this case they exist. Positive. Form is linear. + +76 +00:06:12,100 --> 00:06:16,860 +Strong or weak or moderate depends on how these + +77 +00:06:16,860 --> 00:06:20,260 +values are close to the straight line. Closer + +78 +00:06:20,260 --> 00:06:24,380 +means stronger. So if the points are closer to the + +79 +00:06:24,380 --> 00:06:26,620 +straight line, it means there exists stronger + +80 +00:06:26,620 --> 00:06:30,800 +relationship between the two variables. So closer + +81 +00:06:30,800 --> 00:06:34,480 +means stronger, either positive or negative. In + +82 +00:06:34,480 --> 00:06:37,580 +this case, there exists positive. Now for the + +83 +00:06:37,580 --> 00:06:42,360 +negative association or relationship, we have the + +84 +00:06:42,360 --> 00:06:46,060 +other direction, it could be this one. So in this + +85 +00:06:46,060 --> 00:06:49,460 +case there exists linear but negative + +86 +00:06:49,460 --> 00:06:51,900 +relationship, and this negative could be positive + +87 +00:06:51,900 --> 00:06:56,100 +or negative, it depends on the points. So it's + +88 +00:06:56,100 --> 00:07:02,660 +positive relationship. The other direction is + +89 +00:07:02,660 --> 00:07:06,460 +negative. So the points, if the points are closed, + +90 +00:07:06,820 --> 00:07:10,160 +then we can say there exists strong negative + +91 +00:07:10,160 --> 00:07:14,440 +relationship. So by using scatter plot, we can + +92 +00:07:14,440 --> 00:07:17,280 +determine all of these. + +93 +00:07:20,840 --> 00:07:24,460 +and direction and strength now here the two + +94 +00:07:24,460 --> 00:07:27,060 +variables we are talking about are numerical + +95 +00:07:27,060 --> 00:07:30,480 +variables so the two variables here are numerical + +96 +00:07:30,480 --> 00:07:35,220 +variables so we are talking about quantitative + +97 +00:07:35,220 --> 00:07:39,850 +variables but remember in chapter 11 We talked + +98 +00:07:39,850 --> 00:07:43,150 +about the relationship between two qualitative + +99 +00:07:43,150 --> 00:07:47,450 +variables. So we use chi-square test. Here we are + +100 +00:07:47,450 --> 00:07:49,630 +talking about something different. We are talking + +101 +00:07:49,630 --> 00:07:52,890 +about numerical variables. So we can use scatter + +102 +00:07:52,890 --> 00:07:58,510 +plot, number one. Next correlation analysis is + +103 +00:07:58,510 --> 00:08:02,090 +used to measure the strength of the association + +104 +00:08:02,090 --> 00:08:05,190 +between two variables. And here again, we are just + +105 +00:08:05,190 --> 00:08:09,560 +talking about linear relationship. So this chapter + +106 +00:08:09,560 --> 00:08:13,340 +just covers the linear relationship between the + +107 +00:08:13,340 --> 00:08:17,040 +two variables. Because sometimes there exists non + +108 +00:08:17,040 --> 00:08:23,180 +-linear relationship between the two variables. So + +109 +00:08:23,180 --> 00:08:26,120 +correlation is only concerned with the strength of + +110 +00:08:26,120 --> 00:08:30,500 +the relationship. No causal effect is implied with + +111 +00:08:30,500 --> 00:08:35,220 +correlation. We just say that X affects Y, or X + +112 +00:08:35,220 --> 00:08:39,580 +explains the variation in Y. Scatter plots were + +113 +00:08:39,580 --> 00:08:43,720 +first presented in Chapter 2, and we skipped, if + +114 +00:08:43,720 --> 00:08:48,480 +you remember, Chapter 2. And it's easy to make + +115 +00:08:48,480 --> 00:08:52,620 +scatter plots for Y versus X. In Chapter 3, we + +116 +00:08:52,620 --> 00:08:56,440 +talked about correlation, so correlation was first + +117 +00:08:56,440 --> 00:09:00,060 +presented in Chapter 3. But here I will give just + +118 +00:09:00,060 --> 00:09:07,240 +a review for computation about correlation + +119 +00:09:07,240 --> 00:09:11,460 +coefficient or coefficient of correlation. First, + +120 +00:09:12,800 --> 00:09:15,680 +coefficient of correlation measures the relative + +121 +00:09:15,680 --> 00:09:19,920 +strength of the linear relationship between two + +122 +00:09:19,920 --> 00:09:23,740 +numerical variables. So here, we are talking about + +123 +00:09:23,740 --> 00:09:28,080 +numerical variables. Sample correlation + +124 +00:09:28,080 --> 00:09:31,500 +coefficient is given by this equation. which is + +125 +00:09:31,500 --> 00:09:36,180 +sum of the product of xi minus x bar, yi minus y + +126 +00:09:36,180 --> 00:09:41,100 +bar, divided by n minus 1 times standard deviation + +127 +00:09:41,100 --> 00:09:44,960 +of x times standard deviation of y. We know that x + +128 +00:09:44,960 --> 00:09:47,240 +bar and y bar are the means of x and y + +129 +00:09:47,240 --> 00:09:51,360 +respectively. And Sx, Sy are the standard + +130 +00:09:51,360 --> 00:09:55,540 +deviations of x and y values. And we know this + +131 +00:09:55,540 --> 00:09:58,460 +equation before. But there is another equation + +132 +00:09:58,460 --> 00:10:05,330 +that one can be used For computation, which is + +133 +00:10:05,330 --> 00:10:09,290 +called shortcut formula, which is just sum of xy + +134 +00:10:09,290 --> 00:10:15,310 +minus n times x bar y bar divided by square root + +135 +00:10:15,310 --> 00:10:18,690 +of this quantity. And we know this equation from + +136 +00:10:18,690 --> 00:10:23,650 +chapter three. Now again, x bar and y bar are the + +137 +00:10:23,650 --> 00:10:30,060 +means. Now the question is, Do outliers affect the + +138 +00:10:30,060 --> 00:10:36,440 +correlation? For sure, yes. Because this formula + +139 +00:10:36,440 --> 00:10:39,940 +actually based on the means and the standard + +140 +00:10:39,940 --> 00:10:44,300 +deviations, and these two measures are affected by + +141 +00:10:44,300 --> 00:10:47,880 +outliers. So since R is a function of these two + +142 +00:10:47,880 --> 00:10:51,340 +statistics, the means and standard deviations, + +143 +00:10:51,940 --> 00:10:54,280 +then outliers will affect the value of the + +144 +00:10:54,280 --> 00:10:55,940 +correlation coefficient. + +145 +00:10:57,890 --> 00:11:01,170 +Some features about the coefficient of + +146 +00:11:01,170 --> 00:11:09,570 +correlation. Here rho is the population + +147 +00:11:09,570 --> 00:11:13,210 +coefficient of correlation, and R is the sample + +148 +00:11:13,210 --> 00:11:17,730 +coefficient of correlation. Either rho or R have + +149 +00:11:17,730 --> 00:11:21,390 +the following features. Number one, unity free. It + +150 +00:11:21,390 --> 00:11:24,890 +means R has no units. For example, here we are + +151 +00:11:24,890 --> 00:11:28,820 +talking about whales. And weight in kilograms, + +152 +00:11:29,300 --> 00:11:33,700 +ages in years. And for example, suppose the + +153 +00:11:33,700 --> 00:11:37,080 +correlation between these two variables is 0.8. + +154 +00:11:38,620 --> 00:11:41,760 +It's unity free, so it's just 0.8. So there is no + +155 +00:11:41,760 --> 00:11:45,640 +unit. You cannot say 0.8 kilogram per year or + +156 +00:11:45,640 --> 00:11:51,040 +whatever it is. So just 0.8. So the first feature + +157 +00:11:51,040 --> 00:11:53,360 +of the correlation coefficient is unity-free. + +158 +00:11:54,180 --> 00:11:56,340 +Number two ranges between negative one and plus + +159 +00:11:56,340 --> 00:12:00,380 +one. So R is always, or rho, is always between + +160 +00:12:00,380 --> 00:12:04,560 +minus one and plus one. So minus one smaller than + +161 +00:12:04,560 --> 00:12:07,340 +or equal to R smaller than or equal to plus one. + +162 +00:12:07,420 --> 00:12:11,420 +So R is always in this range. So R cannot be + +163 +00:12:11,420 --> 00:12:15,260 +smaller than negative one or greater than plus + +164 +00:12:15,260 --> 00:12:20,310 +one. The closer to minus one or negative one, the + +165 +00:12:20,310 --> 00:12:23,130 +stronger negative relationship between or linear + +166 +00:12:23,130 --> 00:12:26,770 +relationship between x and y. So, for example, if + +167 +00:12:26,770 --> 00:12:33,370 +R is negative 0.85 or R is negative 0.8. Now, this + +168 +00:12:33,370 --> 00:12:39,690 +value is closer to minus one than negative 0.8. So + +169 +00:12:39,690 --> 00:12:43,230 +negative 0.85 is stronger than negative 0.8. + +170 +00:12:44,590 --> 00:12:48,470 +Because we are looking for closer to minus 1. + +171 +00:12:49,570 --> 00:12:55,310 +Minus 0.8, the value itself is greater than minus + +172 +00:12:55,310 --> 00:12:59,610 +0.85. But this value is closer to minus 1 than + +173 +00:12:59,610 --> 00:13:03,790 +minus 0.8. So we can say that this relationship is + +174 +00:13:03,790 --> 00:13:05,070 +stronger than the other one. + +175 +00:13:07,870 --> 00:13:11,730 +Also, the closer to plus 1, the stronger the + +176 +00:13:11,730 --> 00:13:16,040 +positive linear relationship. Here, suppose R is 0 + +177 +00:13:16,040 --> 00:13:22,740 +.7 and another R is 0.8. 0.8 is closer to plus one + +178 +00:13:22,740 --> 00:13:26,740 +than 0.7, so 0.8 is stronger. This one makes + +179 +00:13:26,740 --> 00:13:31,800 +sense. The closer to zero, the weaker relationship + +180 +00:13:31,800 --> 00:13:35,420 +between the two variables. For example, suppose R + +181 +00:13:35,420 --> 00:13:40,720 +is plus or minus 0.05. This value is very close to + +182 +00:13:40,720 --> 00:13:44,420 +zero. It means there exists weak. relationship. + +183 +00:13:44,980 --> 00:13:47,960 +Sometimes we can say that there exists moderate + +184 +00:13:47,960 --> 00:13:57,080 +relationship if R is close to 0.5. So it could be + +185 +00:13:57,080 --> 00:14:01,360 +classified into these groups closer to minus 1, + +186 +00:14:01,500 --> 00:14:06,220 +closer to 1, 0.5 or 0. So we can know the + +187 +00:14:06,220 --> 00:14:11,680 +direction by the sign of R negative it means + +188 +00:14:11,680 --> 00:14:14,320 +because here our ranges as we mentioned between + +189 +00:14:14,320 --> 00:14:19,520 +minus one and plus one here zero so this these + +190 +00:14:19,520 --> 00:14:24,560 +values it means there exists negative above zero + +191 +00:14:24,560 --> 00:14:26,760 +all the way up to one it means there exists + +192 +00:14:26,760 --> 00:14:31,020 +positive relationship between the two variables so + +193 +00:14:31,020 --> 00:14:35,520 +the sign gives the direction of the relationship + +194 +00:14:36,720 --> 00:14:40,840 +The absolute value gives the strength of the + +195 +00:14:40,840 --> 00:14:43,500 +relationship between the two variables. So the + +196 +00:14:43,500 --> 00:14:49,260 +same as we had discussed before. Now, some types + +197 +00:14:49,260 --> 00:14:51,880 +of scatter plots for different types of + +198 +00:14:51,880 --> 00:14:54,740 +relationship between the two variables is + +199 +00:14:54,740 --> 00:14:59,100 +presented in this + +223 +00:16:53,260 --> 00:16:58,860 +individual and also weights are increased by three + +224 +00:16:58,860 --> 00:17:03,080 +units for each person. In this case there exists + +225 +00:17:03,080 --> 00:17:06,820 +a perfect relationship but that never happened in + +226 +00:17:06,820 --> 00:17:13,300 +real life. So perfect means all points are lie on + +227 +00:17:13,300 --> 00:17:16,260 +the straight line otherwise if the points are + +228 +00:17:16,260 --> 00:17:21,230 +close Then we can say there exists strong. Here if + +229 +00:17:21,230 --> 00:17:24,750 +you look carefully at these points corresponding + +230 +00:17:24,750 --> 00:17:30,150 +to this regression line, it looks like not strong + +231 +00:17:30,150 --> 00:17:32,630 +because some of the points are not close, so you + +232 +00:17:32,630 --> 00:17:35,450 +can say there exists maybe moderate negative + +233 +00:17:35,450 --> 00:17:39,530 +relationship. This one, most of the points are + +234 +00:17:39,530 --> 00:17:42,390 +scattered away from the straight line, so there + +235 +00:17:42,390 --> 00:17:46,930 +exists weak relationship. So by just looking at + +236 +00:17:46,930 --> 00:17:50,290 +the scatter path, sometimes you can, sometimes + +237 +00:17:50,290 --> 00:17:53,290 +it's hard to tell, but most of the time you can + +238 +00:17:53,290 --> 00:17:58,250 +tell at least the direction, positive or negative, + +239 +00:17:59,410 --> 00:18:04,150 +the form, linear or non-linear, or the strength of + +240 +00:18:04,150 --> 00:18:09,100 +the relationship. The last one here, now x + +241 +00:18:09,100 --> 00:18:13,800 +increases, y remains the same. For example, + +242 +00:18:13,880 --> 00:18:18,580 +suppose x is 1, y is 10. x increases to 2, y still + +243 +00:18:18,580 --> 00:18:22,220 +is 10. So as x increases, y stays the same + +244 +00:18:22,220 --> 00:18:26,140 +position, it means there is no linear relationship + +245 +00:18:26,140 --> 00:18:28,900 +between the two variables. So based on the scatter + +246 +00:18:28,900 --> 00:18:33,240 +plot you can have an idea about the relationship + +247 +00:18:33,240 --> 00:18:37,800 +between the two variables. Here I will give a + +248 +00:18:37,800 --> 00:18:41,120 +simple example in order to determine the + +249 +00:18:41,120 --> 00:18:45,160 +correlation coefficient. A real estate agent + +250 +00:18:45,160 --> 00:18:50,380 +wishes to examine the relationship between selling + +251 +00:18:50,380 --> 00:18:54,580 +the price of a home and its size measured in + +252 +00:18:54,580 --> 00:18:57,140 +square feet. So in this case, there are two + +253 +00:18:57,140 --> 00:19:02,400 +variables of interest. One is called selling price + +254 +00:19:02,400 --> 00:19:13,720 +of a home. So here, selling price of a home and + +255 +00:19:13,720 --> 00:19:18,020 +its size. Now, selling price in $1,000. + +256 +00:19:25,360 --> 00:19:29,380 +And size in feet squared. Here we have to + +257 +00:19:29,380 --> 00:19:35,640 +distinguish between dependent and independent. So + +258 +00:19:35,640 --> 00:19:39,740 +your dependent variable is house price, sometimes + +259 +00:19:39,740 --> 00:19:41,620 +called response variable. + +260 +00:19:45,750 --> 00:19:49,490 +The independent variable is the size, which is in + +261 +00:19:49,490 --> 00:19:54,570 +square feet, sometimes called sub-planetary + +262 +00:19:54,570 --> 00:19:54,850 +variable. + +263 +00:19:59,570 --> 00:20:06,370 +So my Y is ceiling rise, and size is square feet, + +264 +00:20:07,530 --> 00:20:12,910 +or size of the house. In this case, there are 10. + +265 +00:20:14,290 --> 00:20:17,890 +It's sample size is 10. So the first house with + +266 +00:20:17,890 --> 00:20:26,850 +size 1,400 square feet, it's selling price is 245 + +267 +00:20:26,850 --> 00:20:31,670 +multiplied by 1,000. Because these values are in + +268 +00:20:31,670 --> 00:20:37,950 +$1,000. Now based on this data, you can first plot + +269 +00:20:37,950 --> 00:20:46,590 +the scatterplot of house price In Y direction, the + +270 +00:20:46,590 --> 00:20:51,870 +vertical direction. So here is house. And rise. + +271 +00:20:54,230 --> 00:21:01,470 +And size in the X axis. You will get this scatter + +272 +00:21:01,470 --> 00:21:07,370 +plot. Now, the data here is just 10 points, so + +273 +00:21:07,370 --> 00:21:12,590 +sometimes it's hard to tell. the relationship + +274 +00:21:12,590 --> 00:21:15,510 +between the two variables if your data is small. + +275 +00:21:16,510 --> 00:21:21,170 +But just this example for illustration. But at + +276 +00:21:21,170 --> 00:21:25,370 +least you can determine that there exists linear + +277 +00:21:25,370 --> 00:21:28,810 +relationship between the two variables. It is + +278 +00:21:28,810 --> 00:21:35,490 +positive. So the form is linear. Direction is + +279 +00:21:35,490 --> 00:21:41,880 +positive. Weak or strong or moderate. Sometimes + +280 +00:21:41,880 --> 00:21:45,620 +it's not easy to tell if it is strong or moderate. + +281 +00:21:47,720 --> 00:21:50,120 +Now if you look at these points, some of them are + +282 +00:21:50,120 --> 00:21:53,700 +close to the straight line and others are away + +283 +00:21:53,700 --> 00:21:56,700 +from the straight line. So maybe there exists + +284 +00:21:56,700 --> 00:22:02,720 +moderate for example, but you cannot say strong. + +285 +00:22:03,930 --> 00:22:08,210 +Here, strong it means the points are close to the + +286 +00:22:08,210 --> 00:22:11,890 +straight line. Sometimes it's hard to tell the + +287 +00:22:11,890 --> 00:22:15,230 +strength of the relationship, but you can know the + +288 +00:22:15,230 --> 00:22:20,990 +form or the direction. But to measure the exact + +289 +00:22:20,990 --> 00:22:24,130 +strength, you have to measure the correlation + +290 +00:22:24,130 --> 00:22:29,810 +coefficient, R. Now, by looking at the data, you + +291 +00:22:29,810 --> 00:22:31,430 +can compute + +292 +00:22:33,850 --> 00:22:42,470 +The sum of x values, y values, sum of x squared, + +293 +00:22:43,290 --> 00:22:48,170 +sum of y squared, also sum of xy. Now plug these + +294 +00:22:48,170 --> 00:22:50,610 +values into the formula we have for the shortcut + +295 +00:22:50,610 --> 00:22:58,210 +formula. You will get R to be 0.76 around 76. + +296 +00:23:04,050 --> 00:23:10,170 +So there exists positive, moderate relationship + +297 +00:23:10,170 --> 00:23:13,770 +between selling + +298 +00:23:13,770 --> 00:23:19,850 +price of a home and its size. So that means if the + +299 +00:23:19,850 --> 00:23:24,670 +size increases, the selling price also increases. + +300 +00:23:25,310 --> 00:23:29,550 +So there exists positive relationship between the + +301 +00:23:29,550 --> 00:23:30,310 +two variables. + +302 +00:23:35,800 --> 00:23:40,300 +Strong it means close to 1, 0.8, 0.85, 0.9, you + +303 +00:23:40,300 --> 00:23:44,400 +can say there exists strong. But fields is not + +304 +00:23:44,400 --> 00:23:47,960 +strong relationship, you can say it's moderate + +305 +00:23:47,960 --> 00:23:53,440 +relationship. Because it's close if now if you + +306 +00:23:53,440 --> 00:23:57,080 +just compare this value and other data gives 9%. + +307 +00:23:58,830 --> 00:24:03,790 +Other one gives 85%. So these values are much + +308 +00:24:03,790 --> 00:24:08,550 +closer to 1 than 0.7, but still this value is + +309 +00:24:08,550 --> 00:24:09,570 +considered to be high. + +310 +00:24:15,710 --> 00:24:16,810 +Any question? + +311 +00:24:19,850 --> 00:24:22,810 +Next, I will give some introduction to regression + +312 +00:24:22,810 --> 00:24:23,390 +analysis. + +313 +00:24:26,970 --> 00:24:32,210 +Regression analysis used to number one, predict + +314 +00:24:32,210 --> 00:24:35,050 +the value of a dependent variable based on the + +315 +00:24:35,050 --> 00:24:39,250 +value of at least one independent variable. So by + +316 +00:24:39,250 --> 00:24:42,490 +using the data we have for selling price of a home + +317 +00:24:42,490 --> 00:24:48,370 +and size, you can predict the selling price by + +318 +00:24:48,370 --> 00:24:51,510 +knowing the value of its size. So suppose for + +319 +00:24:51,510 --> 00:24:54,870 +example, You know that the size of a house is + +320 +00:24:54,870 --> 00:25:03,510 +1450, 1450 square feet. What do you predict its + +321 +00:25:03,510 --> 00:25:10,190 +size, its sale or price? So by using this value, + +322 +00:25:10,310 --> 00:25:16,510 +we can predict the selling price. Next, explain + +323 +00:25:16,510 --> 00:25:19,890 +the impact of changes in independent variable on + +324 +00:25:19,890 --> 00:25:23,270 +the dependent variable. You can say, for example, + +325 +00:25:23,510 --> 00:25:30,650 +90% of the variability in the dependent variable + +326 +00:25:30,650 --> 00:25:36,790 +in selling price is explained by its size. So we + +327 +00:25:36,790 --> 00:25:39,410 +can predict the value of dependent variable based + +328 +00:25:39,410 --> 00:25:42,890 +on a value of one independent variable at least. + +329 +00:25:43,870 --> 00:25:47,090 +Or also explain the impact of changes in + +330 +00:25:47,090 --> 00:25:49,550 +independent variable on the dependent variable. + +331 +00:25:51,420 --> 00:25:53,920 +Sometimes there exists more than one independent + +332 +00:25:53,920 --> 00:25:59,680 +variable. For example, maybe there are more than + +333 +00:25:59,680 --> 00:26:04,500 +one variable that affects a price, a selling + +334 +00:26:04,500 --> 00:26:10,300 +price. For example, beside selling + +335 +00:26:10,300 --> 00:26:16,280 +price, beside size, maybe location. + +336 +00:26:19,480 --> 00:26:23,580 +Maybe location is also another factor that affects + +337 +00:26:23,580 --> 00:26:27,360 +the selling price. So in this case there are two + +338 +00:26:27,360 --> 00:26:32,240 +variables. If there exists more than one variable, + +339 +00:26:32,640 --> 00:26:36,080 +in this case we have something called multiple + +340 +00:26:36,080 --> 00:26:38,680 +linear regression. + +341 +00:26:42,030 --> 00:26:46,710 +Here, we just talk about one independent variable. + +342 +00:26:47,030 --> 00:26:51,610 +There is only, in this chapter, there is only one + +343 +00:26:51,610 --> 00:26:58,330 +x. So it's called simple linear + +344 +00:26:58,330 --> 00:26:59,330 +regression. + +345 +00:27:02,190 --> 00:27:07,930 +The calculations for multiple takes time. So we + +346 +00:27:07,930 --> 00:27:11,430 +are going just to cover one independent variable. + +347 +00:27:11,930 --> 00:27:14,290 +But if there exists more than one, in this case + +348 +00:27:14,290 --> 00:27:18,250 +you have to use some statistical software as SPSS. + +349 +00:27:18,470 --> 00:27:23,390 +Because in that case you can just select a + +350 +00:27:23,390 --> 00:27:25,970 +regression analysis from SPSS, then you can run + +351 +00:27:25,970 --> 00:27:28,590 +the multiple regression without doing any + +352 +00:27:28,590 --> 00:27:34,190 +computations. But here we just covered one + +353 +00:27:34,190 --> 00:27:36,820 +independent variable. In this case, it's called + +354 +00:27:36,820 --> 00:27:41,980 +simple linear regression. Again, the dependent + +355 +00:27:41,980 --> 00:27:44,600 +variable is the variable we wish to predict or + +356 +00:27:44,600 --> 00:27:50,020 +explain, the same as weight. Independent variable, + +357 +00:27:50,180 --> 00:27:52,440 +the variable used to predict or explain the + +358 +00:27:52,440 --> 00:27:54,000 +dependent variable. + +359 +00:27:57,400 --> 00:28:00,540 +For simple linear regression model, there is only + +360 +00:28:00,540 --> 00:28:01,800 +one independent variable. + +361 +00:28:04,830 --> 00:28:08,450 +Another example for simple linear regression. + +362 +00:28:08,770 --> 00:28:11,590 +Suppose we are talking about your scores. + +363 +00:28:14,210 --> 00:28:17,770 +Scores is the dependent variable can be affected + +364 +00:28:17,770 --> 00:28:21,050 +by number of hours. + +365 +00:28:25,130 --> 00:28:31,030 +Hour of study. Number of studying hours. + +366 +00:28:36,910 --> 00:28:39,810 +Maybe as number of studying hour increases, your + +367 +00:28:39,810 --> 00:28:43,390 +scores also increase. In this case, if there is + +368 +00:28:43,390 --> 00:28:46,330 +only one X, one independent variable, it's called + +369 +00:28:46,330 --> 00:28:51,110 +simple linear regression. Maybe another variable, + +370 +00:28:52,270 --> 00:28:59,730 +number of missing classes or + +371 +00:28:59,730 --> 00:29:03,160 +attendance. As number of missing classes + +372 +00:29:03,160 --> 00:29:06,380 +increases, your score goes down. That means there + +373 +00:29:06,380 --> 00:29:09,400 +exists negative relationship between missing + +374 +00:29:09,400 --> 00:29:13,540 +classes and your score. So sometimes, maybe there + +375 +00:29:13,540 --> 00:29:16,580 +exists positive or negative. It depends on the + +376 +00:29:16,580 --> 00:29:20,040 +variable itself. In this case, if there are more + +377 +00:29:20,040 --> 00:29:23,180 +than one variable, then we are talking about + +378 +00:29:23,180 --> 00:29:28,300 +multiple linear regression model. But here, we + +379 +00:29:28,300 --> 00:29:33,630 +have only one independent variable. In addition to + +380 +00:29:33,630 --> 00:29:37,230 +that, a relationship between x and y is described + +381 +00:29:37,230 --> 00:29:40,850 +by a linear function. So there exists a straight + +382 +00:29:40,850 --> 00:29:46,270 +line between the two variables. The changes in y + +383 +00:29:46,270 --> 00:29:50,210 +are assumed to be related to changes in x only. So + +384 +00:29:50,210 --> 00:29:54,270 +any change in y is related only to changes in x. + +385 +00:29:54,730 --> 00:29:57,810 +So that's the simple case we have for regression, + +386 +00:29:58,890 --> 00:30:01,170 +that we have only one independent + +387 +00:30:03,890 --> 00:30:07,070 +Variable. Types of relationships, as we mentioned, + +388 +00:30:07,210 --> 00:30:12,190 +maybe there exist linear, it means there exist + +389 +00:30:12,190 --> 00:30:16,490 +straight line between X and Y, either linear + +390 +00:30:16,490 --> 00:30:22,050 +positive or negative, or sometimes there exist non + +391 +00:30:22,050 --> 00:30:25,830 +-linear relationship, it's called curved linear + +392 +00:30:25,830 --> 00:30:29,290 +relationship. The same as this one, it's parabola. + +393 +00:30:32,570 --> 00:30:35,150 +Now in this case there is no linear relationship + +394 +00:30:35,150 --> 00:30:39,690 +but there exists curved linear or something like + +395 +00:30:39,690 --> 00:30:45,910 +this one. So these types of non-linear + +396 +00:30:45,910 --> 00:30:49,530 +relationship between the two variables. Here we + +397 +00:30:49,530 --> 00:30:54,070 +are covering just the linear relationship between + +398 +00:30:54,070 --> 00:30:56,570 +the two variables. So based on the scatter plot + +399 +00:30:56,570 --> 00:31:00,620 +you can determine the direction. The form, the + +400 +00:31:00,620 --> 00:31:03,860 +strength. Here, the form we are talking about is + +401 +00:31:03,860 --> 00:31:04,720 +just linear. + +402 +00:31:08,700 --> 00:31:13,260 +Now, another type of relationship, the strength of + +403 +00:31:13,260 --> 00:31:16,940 +the relationship. Here, the points, either for + +404 +00:31:16,940 --> 00:31:20,570 +this graph or the other one, These points are + +405 +00:31:20,570 --> 00:31:24,570 +close to the straight line, it means there exists + +406 +00:31:24,570 --> 00:31:28,210 +strong positive relationship or strong negative + +407 +00:31:28,210 --> 00:31:31,230 +relationship. So it depends on the direction. So + +408 +00:31:31,230 --> 00:31:35,710 +strong either positive or strong negative. Here + +409 +00:31:35,710 --> 00:31:38,850 +the points are scattered away from the regression + +410 +00:31:38,850 --> 00:31:41,790 +line, so you can say there exists weak + +411 +00:31:41,790 --> 00:31:45,090 +relationship, either weak positive or weak + +412 +00:31:45,090 --> 00:31:49,650 +negative. It depends on the direction of the + +413 +00:31:49,650 --> 00:31:54,270 +relationship between the two variables. Sometimes + +414 +00:31:54,270 --> 00:31:59,680 +there is no relationship or actually there is no + +415 +00:31:59,680 --> 00:32:02,340 +linear relationship between the two variables. If + +416 +00:32:02,340 --> 00:32:05,660 +the points are scattered away from the regression + +417 +00:32:05,660 --> 00:32:09,800 +line, I mean you cannot determine if it is + +418 +00:32:09,800 --> 00:32:13,160 +positive or negative, then there is no + +419 +00:32:13,160 --> 00:32:16,220 +relationship between the two variables, the same + +420 +00 + +445 +00:34:40,270 --> 00:34:43,850 +could be negative or + +446 +00:34:46,490 --> 00:34:49,350 +Maybe the straight line passes through the origin + +447 +00:34:49,350 --> 00:34:56,990 +point. So in this case, beta zero equals zero. So + +448 +00:34:56,990 --> 00:34:59,890 +it could be positive and negative or equal zero, + +449 +00:35:00,430 --> 00:35:05,510 +but still we have positive relationship. That + +450 +00:35:05,510 --> 00:35:09,970 +means the value of beta zero, the sign of beta + +451 +00:35:09,970 --> 00:35:13,310 +zero does not affect the relationship between Y + +452 +00:35:13,310 --> 00:35:17,850 +and X. Because here in the three cases, there + +453 +00:35:17,850 --> 00:35:22,390 +exists positive relationship, but beta zero could + +454 +00:35:22,390 --> 00:35:25,370 +be positive or negative or equal zero, but still + +455 +00:35:25,370 --> 00:35:31,720 +we have positive relationship. I mean, you cannot + +456 +00:35:31,720 --> 00:35:35,060 +determine by looking at beta 0, you cannot + +457 +00:35:35,060 --> 00:35:37,940 +determine if there is a positive or negative + +458 +00:35:37,940 --> 00:35:41,720 +relationship. The other term is beta 1. Beta 1 is + +459 +00:35:41,720 --> 00:35:46,900 +the population slope coefficient. Now, the sign of + +460 +00:35:46,900 --> 00:35:50,010 +the slope determines the direction of the + +461 +00:35:50,010 --> 00:35:54,090 +relationship. That means if the slope has positive + +462 +00:35:54,090 --> 00:35:56,570 +sign, it means there exists positive relationship. + +463 +00:35:57,330 --> 00:35:59,370 +Otherwise if it is negative, then there is + +464 +00:35:59,370 --> 00:36:01,390 +negative relationship between the two variables. + +465 +00:36:02,130 --> 00:36:05,310 +So the sign of the slope determines the direction. + +466 +00:36:06,090 --> 00:36:11,290 +But the sign of beta zero has no meaning about the + +467 +00:36:11,290 --> 00:36:15,470 +relationship between Y and X. X is your + +468 +00:36:15,470 --> 00:36:19,630 +independent variable, Y is your independent + +469 +00:36:19,630 --> 00:36:19,650 +your independent variable, Y is your independent + +470 +00:36:19,650 --> 00:36:21,250 +variable, Y is your independent variable, Y is + +471 +00:36:21,250 --> 00:36:24,370 +variable, Y is your independent variable, Y is + +472 +00:36:24,370 --> 00:36:24,430 +variable, Y is your independent variable, Y is + +473 +00:36:24,430 --> 00:36:24,770 +your independent variable, Y is your independent + +474 +00:36:24,770 --> 00:36:27,490 +variable, Y is your independent variable, Y is + +475 +00:36:27,490 --> 00:36:30,110 +your independent variable, Y is your It means + +476 +00:36:30,110 --> 00:36:32,450 +there are some errors you don't know about it + +477 +00:36:32,450 --> 00:36:36,130 +because you ignore some other variables that may + +478 +00:36:36,130 --> 00:36:39,410 +affect the selling price. Maybe you select a + +479 +00:36:39,410 --> 00:36:42,490 +random sample, that sample is small. Maybe there + +480 +00:36:42,490 --> 00:36:46,270 +is a random, I'm sorry, there is sampling error. + +481 +00:36:47,070 --> 00:36:52,980 +So all of these are called random error term. So + +482 +00:36:52,980 --> 00:36:57,420 +all of them are in this term. So epsilon I means + +483 +00:36:57,420 --> 00:37:00,340 +something you don't include in your regression + +484 +00:37:00,340 --> 00:37:03,280 +modeling. For example, you don't include all the + +485 +00:37:03,280 --> 00:37:06,180 +independent variables that affect Y, or your + +486 +00:37:06,180 --> 00:37:09,700 +sample size is not large enough. So all of these + +487 +00:37:09,700 --> 00:37:14,260 +measured in random error term. So epsilon I is + +488 +00:37:14,260 --> 00:37:18,840 +random error component, beta 0 plus beta 1X is + +489 +00:37:18,840 --> 00:37:25,070 +called linear component. So that's the simple + +490 +00:37:25,070 --> 00:37:31,430 +linear regression model. Now, the data you have, + +491 +00:37:32,850 --> 00:37:38,210 +the blue circles represent the observed value. So + +492 +00:37:38,210 --> 00:37:47,410 +these blue circles are the observed values. So we + +493 +00:37:47,410 --> 00:37:49,370 +have observed. + +494 +00:37:52,980 --> 00:37:57,940 +Y observed value of Y for each value X. The + +495 +00:37:57,940 --> 00:38:03,360 +regression line is the blue, the red one. It's + +496 +00:38:03,360 --> 00:38:07,560 +called the predicted values. Predicted Y. + +497 +00:38:08,180 --> 00:38:14,760 +Predicted Y is denoted always by Y hat. Now the + +498 +00:38:14,760 --> 00:38:19,740 +difference between Y and Y hat. It's called the + +499 +00:38:19,740 --> 00:38:20,200 +error term. + +500 +00:38:24,680 --> 00:38:28,000 +It's actually the difference between the observed + +501 +00:38:28,000 --> 00:38:31,600 +value and its predicted value. Now, the predicted + +502 +00:38:31,600 --> 00:38:34,720 +value can be determined by using the regression + +503 +00:38:34,720 --> 00:38:39,180 +line. So this line is the predicted value of Y for + +504 +00:38:39,180 --> 00:38:44,480 +XR. Again, beta zero is the intercept. As we + +505 +00:38:44,480 --> 00:38:46,260 +mentioned before, it could be positive or negative + +506 +00:38:46,260 --> 00:38:52,600 +or even equal zero. The slope is changing Y. + +507 +00:38:55,140 --> 00:38:57,580 +Divide by change of x. + +508 +00:39:01,840 --> 00:39:07,140 +So these are the components for the simple linear + +509 +00:39:07,140 --> 00:39:10,840 +regression model. Y again represents the + +510 +00:39:10,840 --> 00:39:14,960 +independent variable. Beta 0 y intercept. Beta 1 + +511 +00:39:14,960 --> 00:39:17,960 +is your slope. And the slope determines the + +512 +00:39:17,960 --> 00:39:20,900 +direction of the relationship. X independent + +513 +00:39:20,900 --> 00:39:25,270 +variable epsilon i is the random error term. Any + +514 +00:39:25,270 --> 00:39:25,650 +question? + +515 +00:39:31,750 --> 00:39:36,610 +The relationship may be positive or negative. It + +516 +00:39:36,610 --> 00:39:37,190 +could be negative. + +517 +00:39:40,950 --> 00:39:42,710 +Now, for negative relationship, + +518 +00:39:57,000 --> 00:40:04,460 +Or negative, where beta zero is negative. + +519 +00:40:04,520 --> 00:40:08,700 +Or beta + +520 +00:40:08,700 --> 00:40:09,740 +zero equals zero. + +521 +00:40:16,680 --> 00:40:20,620 +So here there exists negative relationship, but + +522 +00:40:20,620 --> 00:40:22,060 +beta zero may be positive. + +523 +00:40:25,870 --> 00:40:30,210 +So again, the sign of beta 0 also does not affect + +524 +00:40:30,210 --> 00:40:31,990 +the relationship between the two variables. + +525 +00:40:36,230 --> 00:40:40,590 +Now, we don't actually know the values of beta 0 + +526 +00:40:40,590 --> 00:40:44,510 +and beta 1. We are going to estimate these values + +527 +00:40:44,510 --> 00:40:48,110 +from the sample we have. So the simple linear + +528 +00:40:48,110 --> 00:40:50,970 +regression equation provides an estimate of the + +529 +00:40:50,970 --> 00:40:55,270 +population regression line. So here we have Yi hat + +530 +00:40:55,270 --> 00:41:00,010 +is the estimated or predicted Y value for + +531 +00:41:00,010 --> 00:41:00,850 +observation I. + +532 +00:41:03,530 --> 00:41:08,220 +The estimate of the regression intercept P0. The + +533 +00:41:08,220 --> 00:41:11,360 +estimate of the regression slope is b1, and this + +534 +00:41:11,360 --> 00:41:16,680 +is your x, all independent variable. So here is + +535 +00:41:16,680 --> 00:41:20,340 +the regression equation. Simple linear regression + +536 +00:41:20,340 --> 00:41:24,400 +equation is given by y hat, the predicted value of + +537 +00:41:24,400 --> 00:41:29,380 +y equals b0 plus b1 times x1. + +538 +00:41:31,240 --> 00:41:35,960 +Now these coefficients, b0 and b1 can be computed + +539 +00:41:37,900 --> 00:41:43,040 +by the following equations. So the regression + +540 +00:41:43,040 --> 00:41:52,920 +equation is + +541 +00:41:52,920 --> 00:41:57,260 +given by y hat equals b0 plus b1x. + +542 +00:41:59,940 --> 00:42:06,140 +Now the slope, b1, is r times standard deviation + +543 +00:42:06,140 --> 00:42:10,540 +of y Times standard deviation of x. This is the + +544 +00:42:10,540 --> 00:42:13,820 +simplest equation to determine the value of the + +545 +00:42:13,820 --> 00:42:18,980 +star. B1r, r is the correlation coefficient. Sy is + +546 +00:42:18,980 --> 00:42:25,080 +xr, the standard deviations of y and x. Where b0, + +547 +00:42:25,520 --> 00:42:30,880 +which is y intercept, is y bar minus b x bar, or + +548 +00:42:30,880 --> 00:42:38,100 +b1 x bar. Sx, as we know, is the sum of x minus y + +549 +00:42:38,100 --> 00:42:40,460 +squared divided by n minus 1 under square root, + +550 +00:42:40,900 --> 00:42:47,060 +similarly for y values. So this, how can we, these + +551 +00:42:47,060 --> 00:42:52,380 +formulas compute the values of b0 and b1. So we + +552 +00:42:52,380 --> 00:42:54,600 +are going to use these equations in order to + +553 +00:42:54,600 --> 00:42:58,960 +determine the values of b0 and b1. + +554 +00:43:04,670 --> 00:43:07,710 +Now, what's your interpretation about the slope + +555 +00:43:07,710 --> 00:43:13,130 +and the intercept? For example, suppose we are + +556 +00:43:13,130 --> 00:43:18,610 +talking about your score Y and + +557 +00:43:18,610 --> 00:43:22,110 +X number of missing classes. + +558 +00:43:29,210 --> 00:43:35,460 +And suppose, for example, Y hat Equal 95 minus 5x. + +559 +00:43:37,780 --> 00:43:41,420 +Now let's see what's the interpretation of B0. + +560 +00:43:42,300 --> 00:43:45,060 +This is B0. So B0 is 95. + +561 +00:43:47,660 --> 00:43:51,960 +And B1 is 5. Now what's your interpretation about + +562 +00:43:51,960 --> 00:43:57,740 +B0 and B1? B0 is the estimated mean value of Y + +563 +00:43:57,740 --> 00:44:02,560 +when the value of X is 0. that means if the + +564 +00:44:02,560 --> 00:44:08,500 +student does not miss any class that means x + +565 +00:44:08,500 --> 00:44:13,260 +equals zero in this case we predict or we estimate + +566 +00:44:13,260 --> 00:44:19,880 +the mean value of his score or her score is 95 so + +567 +00:44:19,880 --> 00:44:27,500 +95 it means when x is zero if x is zero then we + +568 +00:44:27,500 --> 00:44:35,350 +expect his or Here, the score is 95. So that means + +569 +00:44:35,350 --> 00:44:39,830 +B0 is the estimated mean value of Y when the value + +570 +00:44:39,830 --> 00:44:40,630 +of X is 0. + +571 +00:44:43,370 --> 00:44:46,590 +Now, what's the meaning of the slope? The slope in + +572 +00:44:46,590 --> 00:44:51,290 +this case is negative Y. B1, which is the slope, + +573 +00:44:51,590 --> 00:44:57,610 +is the estimated change in the mean of Y. as a + +574 +00:44:57,610 --> 00:45:03,050 +result of a one unit change in x for example let's + +575 +00:45:03,050 --> 00:45:07,070 +compute y for different values of x suppose x is + +576 +00:45:07,070 --> 00:45:15,510 +one now we predict his score to be 95 minus 5 + +577 +00:45:15,510 --> 00:45:25,470 +times 1 which is 90 when x is 2 for example Y hat + +578 +00:45:25,470 --> 00:45:28,570 +is 95 minus 5 times 2, so that's 85. + +579 +00:45:31,950 --> 00:45:39,970 +So for each one unit, there is a drop by five + +580 +00:45:39,970 --> 00:45:43,750 +units in his score. That means if number of + +581 +00:45:43,750 --> 00:45:47,550 +missing classes increases by one unit, then his or + +582 +00:45:47,550 --> 00:45:51,790 +her weight is expected to be reduced by five units + +583 +00:45:51,790 --> 00:45:56,150 +because the sign is negative. another example + +584 +00:45:56,150 --> 00:46:05,910 +suppose again we are interested in whales and + +585 +00:46:05,910 --> 00:46:16,170 +angels and imagine that just + +586 +00:46:16,170 --> 00:46:21,670 +for example y equal y hat equals three plus four x + +587 +00:46:21,670 --> 00:46:29,830 +now y hat equals 3 if x equals zero. That has no + +588 +00:46:29,830 --> 00:46:34,510 +meaning because you cannot say age of zero. So + +589 +00:46:34,510 --> 00:46:40,450 +sometimes the meaning of y intercept does not make + +590 +00:46:40,450 --> 00:46:46,150 +sense because you cannot say x equals zero. Now + +591 +00:46:46,150 --> 00:46:50,690 +for the stock of four, that means as his or her + +592 +00:46:50,690 --> 00:46:55,550 +weight increases by one year, Then we expect his + +593 +00:46:55,550 --> 00:47:00,470 +weight to increase by four kilograms. So as one + +594 +00:47:00,470 --> 00:47:05,130 +unit increase in x, y is our, his weight is + +595 +00:47:05,130 --> 00:47:10,150 +expected to increase by four units. So again, + +596 +00:47:10,370 --> 00:47:16,950 +sometimes we can interpret the y intercept, but in + +597 +00:47:16,950 --> 00:47:18,670 +some cases it has no meaning. + +598 +00:47:24,970 --> 00:47:27,190 +Now for the previous example, for the selling + +599 +00:47:27,190 --> 00:47:32,930 +price of a home and its size, B1rSy divided by Sx, + +600 +00:47:33,790 --> 00:47:43,550 +r is computed, r is found to be 76%, 76%Sy divided + +601 +00:47:43,550 --> 00:47:49,990 +by Sx, that will give 0.109. B0y bar minus B1x + +602 +00:47:49,990 --> 00:47:50,670 +bar, + +603 +00:47:53,610 --> 00:48:00,150 +Y bar for this data is 286 minus D1. So we have to + +604 +00:48:00,150 --> 00:48:03,490 +compute first D1 because we use it in order to + +605 +00:48:03,490 --> 00:48:08,590 +determine D0. And calculation gives 98. So that + +606 +00:48:08,590 --> 00:48:16,450 +means based on these equations, Y hat equals 0 + +607 +00:48:16,450 --> 00:48:22,990 +.10977 plus 98.248. + +608 +00:48:24,790 --> 00:48:29,370 +times X. X is the size. + +609 +00:48:32,890 --> 00:48:39,830 +0.1 B1 + +610 +00:48:39,830 --> 00:48:45,310 +is + +611 +00:48:45,310 --> 00:48:56,650 +0.1, B0 is 98, so 98.248 plus B1. So this is your + +612 +00:48:56,650 --> 00:49:03,730 +regression equation. So again, the intercept is + +613 +00:49:03,730 --> 00:49:09,750 +98. So this amount, the segment is 98. Now the + +614 +00:49:09,750 --> 00:49:14,790 +slope is 0.109. So house price, the expected value + +615 +00:49:14,790 --> 00:49:21,270 +of house price equals B098 plus 0.109 square feet. + +616 +00:49:23,150 --> 00:49:27,630 +So that's the prediction line for the house price. + +617 +00:49:28,510 --> 00:49:34,370 +So again, house price equal B0 98 plus 0.10977 + +618 +00:49:34,370 --> 00:49:36,930 +times square root. Now, what's your interpretation + +619 +00:49:36,930 --> 00:49:41,950 +about B0 and B1? B0 is the estimated mean value of + +620 +00:49:41,950 --> 00:49:46,430 +Y when the value of X is 0. So if X is 0, this + +621 +00:49:46,430 --> 00:49:52,980 +range of X observed X values and you have a home + +622 +00:49:52,980 --> 00:49:57,860 +or a house of size zero. So that means this value + +623 +00:49:57,860 --> 00:50:02,680 +has no meaning. Because a house cannot have a + +624 +00:50:02,680 --> 00:50:06,400 +square footage of zero. So B0 has no practical + +625 +00:50:06,400 --> 00:50:10,040 +application in this case. So sometimes it makes + +626 +00:50:10,040 --> 00:50:17,620 +sense, in other cases it doesn't have that. So for + +627 +00:50:17,620 --> 00:50:21,790 +this specific example, B0 has no practical + +628 +00:50:21,790 --> 00:50:28,210 +application in this case. But B1 which is 0.1097, + +629 +00:50:28,930 --> 00:50:33,050 +B1 estimates the change in the mean value of Y as + +630 +00:50:33,050 --> 00:50:36,730 +a result of one unit increasing X. So for this + +631 +00:50:36,730 --> 00:50:41,640 +value which is 0.109, it means This fellow tells + +632 +00:50:41,640 --> 00:50:46,420 +us that the mean value of a house can increase by + +633 +00:50:46,420 --> 00:50:52,280 +this amount, increase by 0.1097, but we have to + +634 +00:50:52,280 --> 0 + +667 +00:53:54,190 --> 00:53:57,770 +that's + +668 +00:53:57,770 --> 00:53:57,990 +all diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/GyiivmJglvM_postprocess.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/GyiivmJglvM_postprocess.srt new file mode 100644 index 0000000000000000000000000000000000000000..b3a14b079ec471d002f60cb057401c1bc8d55b4d --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/GyiivmJglvM_postprocess.srt @@ -0,0 +1,2672 @@ +1 +00:00:11,020 --> 00:00:13,920 +The last chapter we are going to talk in this + +2 +00:00:13,920 --> 00:00:17,820 +semester is correlation and simple linearization. + +3 +00:00:18,380 --> 00:00:23,300 +So we are going to explain two types in chapter + +4 +00:00:23,300 --> 00:00:29,280 +12. One is called correlation. And the other type + +5 +00:00:29,280 --> 00:00:33,500 +is simple linear regression. Maybe this chapter + +6 +00:00:33,500 --> 00:00:40,020 +I'm going to spend about two lectures in order to + +7 +00:00:40,020 --> 00:00:45,000 +cover these objectives. The first objective is to + +8 +00:00:45,000 --> 00:00:48,810 +calculate the coefficient of correlation. The + +9 +00:00:48,810 --> 00:00:51,210 +second objective, the meaning of the regression + +10 +00:00:51,210 --> 00:00:55,590 +coefficients beta 0 and beta 1. And the last + +11 +00:00:55,590 --> 00:00:58,710 +objective is how to use regression analysis to + +12 +00:00:58,710 --> 00:01:03,030 +predict the value of dependent variable based on + +13 +00:01:03,030 --> 00:01:06,010 +an independent variable. It looks like that we + +14 +00:01:06,010 --> 00:01:10,590 +have discussed objective number one in chapter + +15 +00:01:10,590 --> 00:01:16,470 +three. So calculation of the correlation + +16 +00:01:16,470 --> 00:01:20,740 +coefficient is done in chapter three, but here + +17 +00:01:20,740 --> 00:01:26,060 +we'll give some details about correlation also. A + +18 +00:01:26,060 --> 00:01:28,480 +scatter plot can be used to show the relationship + +19 +00:01:28,480 --> 00:01:31,540 +between two variables. For example, imagine that + +20 +00:01:31,540 --> 00:01:35,400 +we have a random sample of 10 children. + +21 +00:01:37,800 --> 00:01:47,940 +And we have data on their weights and ages. And we + +22 +00:01:47,940 --> 00:01:51,640 +are interested to examine the relationship between + +23 +00:01:51,640 --> 00:01:58,400 +weights and age. For example, suppose child number + +24 +00:01:58,400 --> 00:02:06,260 +one, his + +25 +00:02:06,260 --> 00:02:12,060 +or her age is two years with weight, for example, + +26 +00:02:12,200 --> 00:02:12,880 +eight kilograms. + +27 +00:02:17,680 --> 00:02:21,880 +His weight or her weight is four years, and his or + +28 +00:02:21,880 --> 00:02:24,500 +her weight is, for example, 15 kilograms, and so + +29 +00:02:24,500 --> 00:02:29,680 +on. And again, we are interested to examine the + +30 +00:02:29,680 --> 00:02:32,640 +relationship between age and weight. Maybe they + +31 +00:02:32,640 --> 00:02:37,400 +exist sometimes. positive relationship between the + +32 +00:02:37,400 --> 00:02:41,100 +two variables that means if one variable increases + +33 +00:02:41,100 --> 00:02:45,260 +the other one also increase if one variable + +34 +00:02:45,260 --> 00:02:47,980 +increases the other will also decrease so they + +35 +00:02:47,980 --> 00:02:52,980 +have the same direction either up or down so we + +36 +00:02:52,980 --> 00:02:58,140 +have to know number one the form of the + +37 +00:02:58,140 --> 00:03:02,140 +relationship this one could be linear here we + +38 +00:03:02,140 --> 00:03:06,890 +focus just on linear relationship between X and Y. + +39 +00:03:08,050 --> 00:03:13,730 +The second, we have to know the direction of the + +40 +00:03:13,730 --> 00:03:21,270 +relationship. This direction might be positive or + +41 +00:03:21,270 --> 00:03:22,350 +negative relationship. + +42 +00:03:25,150 --> 00:03:27,990 +In addition to that, we have to know the strength + +43 +00:03:27,990 --> 00:03:33,760 +of the relationship between the two variables of + +44 +00:03:33,760 --> 00:03:37,320 +interest the strength can be classified into three + +45 +00:03:37,320 --> 00:03:46,480 +categories either strong, moderate or there exists + +46 +00:03:46,480 --> 00:03:50,580 +a weak relationship so it could be positive + +47 +00:03:50,580 --> 00:03:53,320 +-strong, positive-moderate or positive-weak, the + +48 +00:03:53,320 --> 00:03:58,360 +same for negative so by using scatter plot we can + +49 +00:03:58,360 --> 00:04:02,530 +determine the form either linear or non-linear, + +50 +00:04:02,690 --> 00:04:06,130 +but here we are focusing on just linear + +51 +00:04:06,130 --> 00:04:10,310 +relationship. Also, we can determine the direction + +52 +00:04:10,310 --> 00:04:12,870 +of the relationship. We can say there exists + +53 +00:04:12,870 --> 00:04:15,910 +positive or negative based on the scatter plot. + +54 +00:04:16,710 --> 00:04:19,530 +Also, we can know the strength of the + +55 +00:04:19,530 --> 00:04:23,130 +relationship, either strong, moderate or weak. For + +56 +00:04:23,130 --> 00:04:29,810 +example, suppose we have again weights and ages. + +57 +00:04:30,390 --> 00:04:33,590 +And we know that there are two types of variables + +58 +00:04:33,590 --> 00:04:36,710 +in this case. One is called dependent and the + +59 +00:04:36,710 --> 00:04:41,330 +other is independent. So if we, as we explained + +60 +00:04:41,330 --> 00:04:47,890 +before, is the dependent variable and A is + +61 +00:04:47,890 --> 00:04:48,710 +independent variable. + +62 +00:04:52,690 --> 00:04:57,270 +Always dependent + +63 +00:04:57,270 --> 00:04:57,750 +variable + +64 +00:05:00,400 --> 00:05:05,560 +is denoted by Y and always on the vertical axis so + +65 +00:05:05,560 --> 00:05:11,300 +here we have weight and independent variable is + +66 +00:05:11,300 --> 00:05:17,760 +denoted by X and X is in the X axis or horizontal + +67 +00:05:17,760 --> 00:05:26,300 +axis now scatter plot for example here child with + +68 +00:05:26,300 --> 00:05:30,820 +age 2 years his weight is 8 So two years, for + +69 +00:05:30,820 --> 00:05:36,760 +example, this is eight. So this star represents + +70 +00:05:36,760 --> 00:05:42,320 +the first pair of observation, age of two and + +71 +00:05:42,320 --> 00:05:46,820 +weight of eight. The other child, his weight is + +72 +00:05:46,820 --> 00:05:52,860 +four years, and the corresponding weight is 15. + +73 +00:05:53,700 --> 00:05:58,970 +For example, this value is 15. The same for the + +74 +00:05:58,970 --> 00:06:02,430 +other points. Here we can know the direction. + +75 +00:06:04,910 --> 00:06:10,060 +In this case they exist. Positive. Form is linear. + +76 +00:06:12,100 --> 00:06:16,860 +Strong or weak or moderate depends on how these + +77 +00:06:16,860 --> 00:06:20,260 +values are close to the straight line. Closer + +78 +00:06:20,260 --> 00:06:24,380 +means stronger. So if the points are closer to the + +79 +00:06:24,380 --> 00:06:26,620 +straight line, it means there exists stronger + +80 +00:06:26,620 --> 00:06:30,800 +relationship between the two variables. So closer + +81 +00:06:30,800 --> 00:06:34,480 +means stronger, either positive or negative. In + +82 +00:06:34,480 --> 00:06:37,580 +this case, there exists positive. Now for the + +83 +00:06:37,580 --> 00:06:42,360 +negative association or relationship, we have the + +84 +00:06:42,360 --> 00:06:46,060 +other direction, it could be this one. So in this + +85 +00:06:46,060 --> 00:06:49,460 +case there exists linear but negative + +86 +00:06:49,460 --> 00:06:51,900 +relationship, and this negative could be positive + +87 +00:06:51,900 --> 00:06:56,100 +or negative, it depends on the points. So it's + +88 +00:06:56,100 --> 00:07:02,660 +positive relationship. The other direction is + +89 +00:07:02,660 --> 00:07:06,460 +negative. So the points, if the points are closed, + +90 +00:07:06,820 --> 00:07:10,160 +then we can say there exists strong negative + +91 +00:07:10,160 --> 00:07:14,440 +relationship. So by using scatter plot, we can + +92 +00:07:14,440 --> 00:07:17,280 +determine all of these. + +93 +00:07:20,840 --> 00:07:24,460 +and direction and strength now here the two + +94 +00:07:24,460 --> 00:07:27,060 +variables we are talking about are numerical + +95 +00:07:27,060 --> 00:07:30,480 +variables so the two variables here are numerical + +96 +00:07:30,480 --> 00:07:35,220 +variables so we are talking about quantitative + +97 +00:07:35,220 --> 00:07:39,850 +variables but remember in chapter 11 We talked + +98 +00:07:39,850 --> 00:07:43,150 +about the relationship between two qualitative + +99 +00:07:43,150 --> 00:07:47,450 +variables. So we use chi-square test. Here we are + +100 +00:07:47,450 --> 00:07:49,630 +talking about something different. We are talking + +101 +00:07:49,630 --> 00:07:52,890 +about numerical variables. So we can use scatter + +102 +00:07:52,890 --> 00:07:58,510 +plot, number one. Next correlation analysis is + +103 +00:07:58,510 --> 00:08:02,090 +used to measure the strength of the association + +104 +00:08:02,090 --> 00:08:05,190 +between two variables. And here again, we are just + +105 +00:08:05,190 --> 00:08:09,560 +talking about linear relationship. So this chapter + +106 +00:08:09,560 --> 00:08:13,340 +just covers the linear relationship between the + +107 +00:08:13,340 --> 00:08:17,040 +two variables. Because sometimes there exists non + +108 +00:08:17,040 --> 00:08:23,180 +-linear relationship between the two variables. So + +109 +00:08:23,180 --> 00:08:26,120 +correlation is only concerned with the strength of + +110 +00:08:26,120 --> 00:08:30,500 +the relationship. No causal effect is implied with + +111 +00:08:30,500 --> 00:08:35,220 +correlation. We just say that X affects Y, or X + +112 +00:08:35,220 --> 00:08:39,580 +explains the variation in Y. Scatter plots were + +113 +00:08:39,580 --> 00:08:43,720 +first presented in Chapter 2, and we skipped, if + +114 +00:08:43,720 --> 00:08:48,480 +you remember, Chapter 2. And it's easy to make + +115 +00:08:48,480 --> 00:08:52,620 +scatter plots for Y versus X. In Chapter 3, we + +116 +00:08:52,620 --> 00:08:56,440 +talked about correlation, so correlation was first + +117 +00:08:56,440 --> 00:09:00,060 +presented in Chapter 3. But here I will give just + +118 +00:09:00,060 --> 00:09:07,240 +a review for computation about correlation + +119 +00:09:07,240 --> 00:09:11,460 +coefficient or coefficient of correlation. First, + +120 +00:09:12,800 --> 00:09:15,680 +coefficient of correlation measures the relative + +121 +00:09:15,680 --> 00:09:19,920 +strength of the linear relationship between two + +122 +00:09:19,920 --> 00:09:23,740 +numerical variables. So here, we are talking about + +123 +00:09:23,740 --> 00:09:28,080 +numerical variables. Sample correlation + +124 +00:09:28,080 --> 00:09:31,500 +coefficient is given by this equation. which is + +125 +00:09:31,500 --> 00:09:36,180 +sum of the product of xi minus x bar, yi minus y + +126 +00:09:36,180 --> 00:09:41,100 +bar, divided by n minus 1 times standard deviation + +127 +00:09:41,100 --> 00:09:44,960 +of x times standard deviation of y. We know that x + +128 +00:09:44,960 --> 00:09:47,240 +bar and y bar are the means of x and y + +129 +00:09:47,240 --> 00:09:51,360 +respectively. And Sx, Sy are the standard + +130 +00:09:51,360 --> 00:09:55,540 +deviations of x and y values. And we know this + +131 +00:09:55,540 --> 00:09:58,460 +equation before. But there is another equation + +132 +00:09:58,460 --> 00:10:05,330 +that one can be used For computation, which is + +133 +00:10:05,330 --> 00:10:09,290 +called shortcut formula, which is just sum of xy + +134 +00:10:09,290 --> 00:10:15,310 +minus n times x bar y bar divided by square root + +135 +00:10:15,310 --> 00:10:18,690 +of this quantity. And we know this equation from + +136 +00:10:18,690 --> 00:10:23,650 +chapter three. Now again, x bar and y bar are the + +137 +00:10:23,650 --> 00:10:30,060 +means. Now the question is, Do outliers affect the + +138 +00:10:30,060 --> 00:10:36,440 +correlation? For sure, yes. Because this formula + +139 +00:10:36,440 --> 00:10:39,940 +actually based on the means and the standard + +140 +00:10:39,940 --> 00:10:44,300 +deviations, and these two measures are affected by + +141 +00:10:44,300 --> 00:10:47,880 +outliers. So since R is a function of these two + +142 +00:10:47,880 --> 00:10:51,340 +statistics, the means and standard deviations, + +143 +00:10:51,940 --> 00:10:54,280 +then outliers will affect the value of the + +144 +00:10:54,280 --> 00:10:55,940 +correlation coefficient. + +145 +00:10:57,890 --> 00:11:01,170 +Some features about the coefficient of + +146 +00:11:01,170 --> 00:11:09,570 +correlation. Here rho is the population + +147 +00:11:09,570 --> 00:11:13,210 +coefficient of correlation, and R is the sample + +148 +00:11:13,210 --> 00:11:17,730 +coefficient of correlation. Either rho or R have + +149 +00:11:17,730 --> 00:11:21,390 +the following features. Number one, unity free. It + +150 +00:11:21,390 --> 00:11:24,890 +means R has no units. For example, here we are + +151 +00:11:24,890 --> 00:11:28,820 +talking about whales. And weight in kilograms, + +152 +00:11:29,300 --> 00:11:33,700 +ages in years. And for example, suppose the + +153 +00:11:33,700 --> 00:11:37,080 +correlation between these two variables is 0.8. + +154 +00:11:38,620 --> 00:11:41,760 +It's unity free, so it's just 0.8. So there is no + +155 +00:11:41,760 --> 00:11:45,640 +unit. You cannot say 0.8 kilogram per year or + +156 +00:11:45,640 --> 00:11:51,040 +whatever it is. So just 0.8. So the first feature + +157 +00:11:51,040 --> 00:11:53,360 +of the correlation coefficient is unity-free. + +158 +00:11:54,180 --> 00:11:56,340 +Number two ranges between negative one and plus + +159 +00:11:56,340 --> 00:12:00,380 +one. So R is always, or rho, is always between + +160 +00:12:00,380 --> 00:12:04,560 +minus one and plus one. So minus one smaller than + +161 +00:12:04,560 --> 00:12:07,340 +or equal to R smaller than or equal to plus one. + +162 +00:12:07,420 --> 00:12:11,420 +So R is always in this range. So R cannot be + +163 +00:12:11,420 --> 00:12:15,260 +smaller than negative one or greater than plus + +164 +00:12:15,260 --> 00:12:20,310 +one. The closer to minus one or negative one, the + +165 +00:12:20,310 --> 00:12:23,130 +stronger negative relationship between or linear + +166 +00:12:23,130 --> 00:12:26,770 +relationship between x and y. So, for example, if + +167 +00:12:26,770 --> 00:12:33,370 +R is negative 0.85 or R is negative 0.8. Now, this + +168 +00:12:33,370 --> 00:12:39,690 +value is closer to minus one than negative 0.8. So + +169 +00:12:39,690 --> 00:12:43,230 +negative 0.85 is stronger than negative 0.8. + +170 +00:12:44,590 --> 00:12:48,470 +Because we are looking for closer to minus 1. + +171 +00:12:49,570 --> 00:12:55,310 +Minus 0.8, the value itself is greater than minus + +172 +00:12:55,310 --> 00:12:59,610 +0.85. But this value is closer to minus 1 than + +173 +00:12:59,610 --> 00:13:03,790 +minus 0.8. So we can say that this relationship is + +174 +00:13:03,790 --> 00:13:05,070 +stronger than the other one. + +175 +00:13:07,870 --> 00:13:11,730 +Also, the closer to plus 1, the stronger the + +176 +00:13:11,730 --> 00:13:16,040 +positive linear relationship. Here, suppose R is 0 + +177 +00:13:16,040 --> 00:13:22,740 +.7 and another R is 0.8. 0.8 is closer to plus one + +178 +00:13:22,740 --> 00:13:26,740 +than 0.7, so 0.8 is stronger. This one makes + +179 +00:13:26,740 --> 00:13:31,800 +sense. The closer to zero, the weaker relationship + +180 +00:13:31,800 --> 00:13:35,420 +between the two variables. For example, suppose R + +181 +00:13:35,420 --> 00:13:40,720 +is plus or minus 0.05. This value is very close to + +182 +00:13:40,720 --> 00:13:44,420 +zero. It means there exists weak. relationship. + +183 +00:13:44,980 --> 00:13:47,960 +Sometimes we can say that there exists moderate + +184 +00:13:47,960 --> 00:13:57,080 +relationship if R is close to 0.5. So it could be + +185 +00:13:57,080 --> 00:14:01,360 +classified into these groups closer to minus 1, + +186 +00:14:01,500 --> 00:14:06,220 +closer to 1, 0.5 or 0. So we can know the + +187 +00:14:06,220 --> 00:14:11,680 +direction by the sign of R negative it means + +188 +00:14:11,680 --> 00:14:14,320 +because here our ranges as we mentioned between + +189 +00:14:14,320 --> 00:14:19,520 +minus one and plus one here zero so this these + +190 +00:14:19,520 --> 00:14:24,560 +values it means there exists negative above zero + +191 +00:14:24,560 --> 00:14:26,760 +all the way up to one it means there exists + +192 +00:14:26,760 --> 00:14:31,020 +positive relationship between the two variables so + +193 +00:14:31,020 --> 00:14:35,520 +the sign gives the direction of the relationship + +194 +00:14:36,720 --> 00:14:40,840 +The absolute value gives the strength of the + +195 +00:14:40,840 --> 00:14:43,500 +relationship between the two variables. So the + +196 +00:14:43,500 --> 00:14:49,260 +same as we had discussed before. Now, some types + +197 +00:14:49,260 --> 00:14:51,880 +of scatter plots for different types of + +198 +00:14:51,880 --> 00:14:54,740 +relationship between the two variables is + +199 +00:14:54,740 --> 00:14:59,100 +presented in this slide. For example, if you look + +200 +00:14:59,100 --> 00:15:03,940 +carefully at figure one here, sharp one, this one, + +201 +00:15:04,720 --> 00:15:13,020 +and the other one, In each one, all points are + +202 +00:15:13,020 --> 00:15:15,820 +on the straight line, it means they exist perfect. + +203 +00:15:16,840 --> 00:15:21,720 +So if all points fall exactly on the straight + +204 +00:15:21,720 --> 00:15:24,220 +line, it means they exist perfect. + +205 +00:15:31,400 --> 00:15:35,160 +Here there exists perfect negative. So this is + +206 +00:15:35,160 --> 00:15:37,740 +perfect negative relationship. The other one + +207 +00:15:37,740 --> 00:15:41,240 +perfect positive relationship. In reality you will + +208 +00:15:41,240 --> 00:15:45,680 +never see something + +209 +00:15:45,680 --> 00:15:49,380 +like perfect positive or perfect negative. Maybe + +210 +00:15:49,380 --> 00:15:53,270 +in real situation. In real situation, most of the + +211 +00:15:53,270 --> 00:15:56,730 +time, R is close to 0.9 or 0.85 or something like + +212 +00:15:56,730 --> 00:16:02,070 +that, but it's not exactly equal one. Because + +213 +00:16:02,070 --> 00:16:05,330 +equal one, it means if you know the value of a + +214 +00:16:05,330 --> 00:16:08,630 +child's age, then you can predict the exact + +215 +00:16:08,630 --> 00:16:13,510 +weight. And that never happened. If the data looks + +216 +00:16:13,510 --> 00:16:18,770 +like this table, for example. Suppose here we have + +217 +00:16:18,770 --> 00:16:25,750 +age and weight. H1 for example 3, 5, 7 weight for + +218 +00:16:25,750 --> 00:16:32,450 +example 10, 12, 14, 16 in this case they exist + +219 +00:16:32,450 --> 00:16:37,610 +perfect because x increases by 2 units also + +220 +00:16:37,610 --> 00:16:41,910 +weights increases by 2 units or maybe weights for + +221 +00:16:41,910 --> 00:16:50,180 +example 9, 12, 15, 18 and so on So X or A is + +222 +00:16:50,180 --> 00:16:53,260 +increased by two units for each value for each + +223 +00:16:53,260 --> 00:16:58,860 +individual and also weights are increased by three + +224 +00:16:58,860 --> 00:17:03,080 +units for each person. In this case there exists + +225 +00:17:03,080 --> 00:17:06,820 +perfect relationship but that never happened in + +226 +00:17:06,820 --> 00:17:13,300 +real life. So perfect means all points are lie on + +227 +00:17:13,300 --> 00:17:16,260 +the straight line otherwise if the points are + +228 +00:17:16,260 --> 00:17:21,230 +close Then we can say there exists strong. Here if + +229 +00:17:21,230 --> 00:17:24,750 +you look carefully at these points corresponding + +230 +00:17:24,750 --> 00:17:30,150 +to this regression line, it looks like not strong + +231 +00:17:30,150 --> 00:17:32,630 +because some of the points are not closed, so you + +232 +00:17:32,630 --> 00:17:35,450 +can say there exists maybe moderate negative + +233 +00:17:35,450 --> 00:17:39,530 +relationship. This one, most of the points are + +234 +00:17:39,530 --> 00:17:42,390 +scattered away from the straight line, so there + +235 +00:17:42,390 --> 00:17:46,930 +exists weak relationship. So by just looking at + +236 +00:17:46,930 --> 00:17:50,290 +the scatter path, sometimes you can, sometimes + +237 +00:17:50,290 --> 00:17:53,290 +it's hard to tell, but most of the time you can + +238 +00:17:53,290 --> 00:17:58,250 +tell at least the direction, positive or negative, + +239 +00:17:59,410 --> 00:18:04,150 +the form, linear or non-linear, or the strength of + +240 +00:18:04,150 --> 00:18:09,100 +the relationship. The last one here, now x + +241 +00:18:09,100 --> 00:18:13,800 +increases, y remains the same. For example, + +242 +00:18:13,880 --> 00:18:18,580 +suppose x is 1, y is 10. x increases to 2, y still + +243 +00:18:18,580 --> 00:18:22,220 +is 10. So as x increases, y stays the same + +244 +00:18:22,220 --> 00:18:26,140 +position, it means there is no linear relationship + +245 +00:18:26,140 --> 00:18:28,900 +between the two variables. So based on the scatter + +246 +00:18:28,900 --> 00:18:33,240 +plot you can have an idea about the relationship + +247 +00:18:33,240 --> 00:18:37,800 +between the two variables. Here I will give a + +248 +00:18:37,800 --> 00:18:41,120 +simple example in order to determine the + +249 +00:18:41,120 --> 00:18:45,160 +correlation coefficient. A real estate agent + +250 +00:18:45,160 --> 00:18:50,380 +wishes to examine the relationship between selling + +251 +00:18:50,380 --> 00:18:54,580 +the price of a home and its size measured in + +252 +00:18:54,580 --> 00:18:57,140 +square feet. So in this case, there are two + +253 +00:18:57,140 --> 00:19:02,400 +variables of interest. One is called selling price + +254 +00:19:02,400 --> 00:19:13,720 +of a home. So here, selling price of a home and + +255 +00:19:13,720 --> 00:19:18,020 +its size. Now, selling price in $1,000. + +256 +00:19:25,360 --> 00:19:29,380 +And size in feet squared. Here we have to + +257 +00:19:29,380 --> 00:19:35,640 +distinguish between dependent and independent. So + +258 +00:19:35,640 --> 00:19:39,740 +your dependent variable is house price, sometimes + +259 +00:19:39,740 --> 00:19:41,620 +called response variable. + +260 +00:19:45,750 --> 00:19:49,490 +The independent variable is the size, which is in + +261 +00:19:49,490 --> 00:19:54,570 +square feet, sometimes called sub-planetary + +262 +00:19:54,570 --> 00:19:54,850 +variable. + +263 +00:19:59,570 --> 00:20:06,370 +So my Y is ceiling rise, and size is square feet, + +264 +00:20:07,530 --> 00:20:12,910 +or size of the house. In this case, there are 10. + +265 +00:20:14,290 --> 00:20:17,890 +It's sample size is 10. So the first house with + +266 +00:20:17,890 --> 00:20:26,850 +size 1,400 square feet, it's selling price is 245 + +267 +00:20:26,850 --> 00:20:31,670 +multiplied by 1,000. Because these values are in + +268 +00:20:31,670 --> 00:20:37,950 +$1,000. Now based on this data, you can first plot + +269 +00:20:37,950 --> 00:20:46,590 +the scatterplot of house price In Y direction, the + +270 +00:20:46,590 --> 00:20:51,870 +vertical direction. So here is house. And rise. + +271 +00:20:54,230 --> 00:21:01,470 +And size in the X axis. You will get this scatter + +272 +00:21:01,470 --> 00:21:07,370 +plot. Now, the data here is just 10 points, so + +273 +00:21:07,370 --> 00:21:12,590 +sometimes it's hard to tell. the relationship + +274 +00:21:12,590 --> 00:21:15,510 +between the two variables if your data is small. + +275 +00:21:16,510 --> 00:21:21,170 +But just this example for illustration. But at + +276 +00:21:21,170 --> 00:21:25,370 +least you can determine that there exists linear + +277 +00:21:25,370 --> 00:21:28,810 +relationship between the two variables. It is + +278 +00:21:28,810 --> 00:21:35,490 +positive. So the form is linear. Direction is + +279 +00:21:35,490 --> 00:21:41,880 +positive. Weak or strong or moderate. Sometimes + +280 +00:21:41,880 --> 00:21:45,620 +it's not easy to tell if it is strong or moderate. + +281 +00:21:47,720 --> 00:21:50,120 +Now if you look at these points, some of them are + +282 +00:21:50,120 --> 00:21:53,700 +close to the straight line and others are away + +283 +00:21:53,700 --> 00:21:56,700 +from the straight line. So maybe there exists + +284 +00:21:56,700 --> 00:22:02,720 +moderate for example, but you cannot say strong. + +285 +00:22:03,930 --> 00:22:08,210 +Here, strong it means the points are close to the + +286 +00:22:08,210 --> 00:22:11,890 +straight line. Sometimes it's hard to tell the + +287 +00:22:11,890 --> 00:22:15,230 +strength of the relationship, but you can know the + +288 +00:22:15,230 --> 00:22:20,990 +form or the direction. But to measure the exact + +289 +00:22:20,990 --> 00:22:24,130 +strength, you have to measure the correlation + +290 +00:22:24,130 --> 00:22:29,810 +coefficient, R. Now, by looking at the data, you + +291 +00:22:29,810 --> 00:22:31,430 +can compute + +292 +00:22:33,850 --> 00:22:42,470 +The sum of x values, y values, sum of x squared, + +293 +00:22:43,290 --> 00:22:48,170 +sum of y squared, also sum of xy. Now plug these + +294 +00:22:48,170 --> 00:22:50,610 +values into the formula we have for the shortcut + +295 +00:22:50,610 --> 00:22:58,210 +formula. You will get R to be 0.76 around 76. + +296 +00:23:04,050 --> 00:23:10,170 +So there exists positive, moderate relationship + +297 +00:23:10,170 --> 00:23:13,770 +between selling + +298 +00:23:13,770 --> 00:23:19,850 +price of a home and its size. So that means if the + +299 +00:23:19,850 --> 00:23:24,670 +size increases, the selling price also increases. + +300 +00:23:25,310 --> 00:23:29,550 +So there exists positive relationship between the + +301 +00:23:29,550 --> 00:23:30,310 +two variables. + +302 +00:23:35,800 --> 00:23:40,300 +Strong it means close to 1, 0.8, 0.85, 0.9, you + +303 +00:23:40,300 --> 00:23:44,400 +can say there exists strong. But fields is not + +304 +00:23:44,400 --> 00:23:47,960 +strong relationship, you can say it's moderate + +305 +00:23:47,960 --> 00:23:53,440 +relationship. Because it's close if now if you + +306 +00:23:53,440 --> 00:23:57,080 +just compare this value and other data gives 9%. + +307 +00:23:58,830 --> 00:24:03,790 +Other one gives 85%. So these values are much + +308 +00:24:03,790 --> 00:24:08,550 +closer to 1 than 0.7, but still this value is + +309 +00:24:08,550 --> 00:24:09,570 +considered to be high. + +310 +00:24:15,710 --> 00:24:16,810 +Any question? + +311 +00:24:19,850 --> 00:24:22,810 +Next, I will give some introduction to regression + +312 +00:24:22,810 --> 00:24:23,390 +analysis. + +313 +00:24:26,970 --> 00:24:32,210 +regression analysis used to number one, predict + +314 +00:24:32,210 --> 00:24:35,050 +the value of a dependent variable based on the + +315 +00:24:35,050 --> 00:24:39,250 +value of at least one independent variable. So by + +316 +00:24:39,250 --> 00:24:42,490 +using the data we have for selling price of a home + +317 +00:24:42,490 --> 00:24:48,370 +and size, you can predict the selling price by + +318 +00:24:48,370 --> 00:24:51,510 +knowing the value of its size. So suppose for + +319 +00:24:51,510 --> 00:24:54,870 +example, You know that the size of a house is + +320 +00:24:54,870 --> 00:25:03,510 +1450, 1450 square feet. What do you predict its + +321 +00:25:03,510 --> 00:25:10,190 +size, its sale or price? So by using this value, + +322 +00:25:10,310 --> 00:25:16,510 +we can predict the selling price. Next, explain + +323 +00:25:16,510 --> 00:25:19,890 +the impact of changes in independent variable on + +324 +00:25:19,890 --> 00:25:23,270 +the dependent variable. You can say, for example, + +325 +00:25:23,510 --> 00:25:30,650 +90% of the variability in the dependent variable + +326 +00:25:30,650 --> 00:25:36,790 +in selling price is explained by its size. So we + +327 +00:25:36,790 --> 00:25:39,410 +can predict the value of dependent variable based + +328 +00:25:39,410 --> 00:25:42,890 +on a value of one independent variable at least. + +329 +00:25:43,870 --> 00:25:47,090 +Or also explain the impact of changes in + +330 +00:25:47,090 --> 00:25:49,550 +independent variable on the dependent variable. + +331 +00:25:51,420 --> 00:25:53,920 +Sometimes there exists more than one independent + +332 +00:25:53,920 --> 00:25:59,680 +variable. For example, maybe there are more than + +333 +00:25:59,680 --> 00:26:04,500 +one variable that affects a price, a selling + +334 +00:26:04,500 --> 00:26:10,300 +price. For example, beside selling + +335 +00:26:10,300 --> 00:26:16,280 +price, beside size, maybe location. + +336 +00:26:19,480 --> 00:26:23,580 +Maybe location is also another factor that affects + +337 +00:26:23,580 --> 00:26:27,360 +the selling price. So in this case there are two + +338 +00:26:27,360 --> 00:26:32,240 +variables. If there exists more than one variable, + +339 +00:26:32,640 --> 00:26:36,080 +in this case we have something called multiple + +340 +00:26:36,080 --> 00:26:38,680 +linear regression. + +341 +00:26:42,030 --> 00:26:46,710 +Here, we just talk about one independent variable. + +342 +00:26:47,030 --> 00:26:51,610 +There is only, in this chapter, there is only one + +343 +00:26:51,610 --> 00:26:58,330 +x. So it's called simple linear + +344 +00:26:58,330 --> 00:26:59,330 +regression. + +345 +00:27:02,190 --> 00:27:07,930 +The calculations for multiple takes time. So we + +346 +00:27:07,930 --> 00:27:11,430 +are going just to cover one independent variable. + +347 +00:27:11,930 --> 00:27:14,290 +But if there exists more than one, in this case + +348 +00:27:14,290 --> 00:27:18,250 +you have to use some statistical software as SPSS. + +349 +00:27:18,470 --> 00:27:23,390 +Because in that case you can just select a + +350 +00:27:23,390 --> 00:27:25,970 +regression analysis from SPSS, then you can run + +351 +00:27:25,970 --> 00:27:28,590 +the multiple regression without doing any + +352 +00:27:28,590 --> 00:27:34,190 +computations. But here we just covered one + +353 +00:27:34,190 --> 00:27:36,820 +independent variable. In this case, it's called + +354 +00:27:36,820 --> 00:27:41,980 +simple linear regression. Again, the dependent + +355 +00:27:41,980 --> 00:27:44,600 +variable is the variable we wish to predict or + +356 +00:27:44,600 --> 00:27:50,020 +explain, the same as weight. Independent variable, + +357 +00:27:50,180 --> 00:27:52,440 +the variable used to predict or explain the + +358 +00:27:52,440 --> 00:27:54,000 +dependent variable. + +359 +00:27:57,400 --> 00:28:00,540 +For simple linear regression model, there is only + +360 +00:28:00,540 --> 00:28:01,800 +one independent variable. + +361 +00:28:04,830 --> 00:28:08,450 +Another example for simple linear regression. + +362 +00:28:08,770 --> 00:28:11,590 +Suppose we are talking about your scores. + +363 +00:28:14,210 --> 00:28:17,770 +Scores is the dependent variable can be affected + +364 +00:28:17,770 --> 00:28:21,050 +by number of hours. + +365 +00:28:25,130 --> 00:28:31,030 +Hour of study. Number of studying hours. + +366 +00:28:36,910 --> 00:28:39,810 +Maybe as number of studying hour increases, your + +367 +00:28:39,810 --> 00:28:43,390 +scores also increase. In this case, if there is + +368 +00:28:43,390 --> 00:28:46,330 +only one X, one independent variable, it's called + +369 +00:28:46,330 --> 00:28:51,110 +simple linear regression. Maybe another variable, + +370 +00:28:52,270 --> 00:28:59,730 +number of missing classes or + +371 +00:28:59,730 --> 00:29:03,160 +attendance. As number of missing classes + +372 +00:29:03,160 --> 00:29:06,380 +increases, your score goes down. That means there + +373 +00:29:06,380 --> 00:29:09,400 +exists negative relationship between missing + +374 +00:29:09,400 --> 00:29:13,540 +classes and your score. So sometimes, maybe there + +375 +00:29:13,540 --> 00:29:16,580 +exists positive or negative. It depends on the + +376 +00:29:16,580 --> 00:29:20,040 +variable itself. In this case, if there are more + +377 +00:29:20,040 --> 00:29:23,180 +than one variable, then we are talking about + +378 +00:29:23,180 --> 00:29:28,300 +multiple linear regression model. But here, we + +379 +00:29:28,300 --> 00:29:33,630 +have only one independent variable. In addition to + +380 +00:29:33,630 --> 00:29:37,230 +that, a relationship between x and y is described + +381 +00:29:37,230 --> 00:29:40,850 +by a linear function. So there exists a straight + +382 +00:29:40,850 --> 00:29:46,270 +line between the two variables. The changes in y + +383 +00:29:46,270 --> 00:29:50,210 +are assumed to be related to changes in x only. So + +384 +00:29:50,210 --> 00:29:54,270 +any change in y is related only to changes in x. + +385 +00:29:54,730 --> 00:29:57,810 +So that's the simple case we have for regression, + +386 +00:29:58,890 --> 00:30:01,170 +that we have only one independent + +387 +00:30:03,890 --> 00:30:07,070 +Variable. Types of relationships, as we mentioned, + +388 +00:30:07,210 --> 00:30:12,190 +maybe there exist linear, it means there exist + +389 +00:30:12,190 --> 00:30:16,490 +straight line between X and Y, either linear + +390 +00:30:16,490 --> 00:30:22,050 +positive or negative, or sometimes there exist non + +391 +00:30:22,050 --> 00:30:25,830 +-linear relationship, it's called curved linear + +392 +00:30:25,830 --> 00:30:29,290 +relationship. The same as this one, it's parabola. + +393 +00:30:32,570 --> 00:30:35,150 +Now in this case there is no linear relationship + +394 +00:30:35,150 --> 00:30:39,690 +but there exists curved linear or something like + +395 +00:30:39,690 --> 00:30:45,910 +this one. So these types of non-linear + +396 +00:30:45,910 --> 00:30:49,530 +relationship between the two variables. Here we + +397 +00:30:49,530 --> 00:30:54,070 +are covering just the linear relationship between + +398 +00:30:54,070 --> 00:30:56,570 +the two variables. So based on the scatter plot + +399 +00:30:56,570 --> 00:31:00,620 +you can determine the direction. The form, the + +400 +00:31:00,620 --> 00:31:03,860 +strength. Here, the form we are talking about is + +401 +00:31:03,860 --> 00:31:04,720 +just linear. + +402 +00:31:08,700 --> 00:31:13,260 +Now, another type of relationship, the strength of + +403 +00:31:13,260 --> 00:31:16,940 +the relationship. Here, the points, either for + +404 +00:31:16,940 --> 00:31:20,570 +this graph or the other one, These points are + +405 +00:31:20,570 --> 00:31:24,570 +close to the straight line, it means there exists + +406 +00:31:24,570 --> 00:31:28,210 +strong positive relationship or strong negative + +407 +00:31:28,210 --> 00:31:31,230 +relationship. So it depends on the direction. So + +408 +00:31:31,230 --> 00:31:35,710 +strong either positive or strong negative. Here + +409 +00:31:35,710 --> 00:31:38,850 +the points are scattered away from the regression + +410 +00:31:38,850 --> 00:31:41,790 +line, so you can say there exists weak + +411 +00:31:41,790 --> 00:31:45,090 +relationship, either weak positive or weak + +412 +00:31:45,090 --> 00:31:49,650 +negative. It depends on the direction of the + +413 +00:31:49,650 --> 00:31:54,270 +relationship between the two variables. Sometimes + +414 +00:31:54,270 --> 00:31:59,680 +there is no relationship or actually there is no + +415 +00:31:59,680 --> 00:32:02,340 +linear relationship between the two variables. If + +416 +00:32:02,340 --> 00:32:05,660 +the points are scattered away from the regression + +417 +00:32:05,660 --> 00:32:09,800 +line, I mean you cannot determine if it is + +418 +00:32:09,800 --> 00:32:13,160 +positive or negative, then there is no + +419 +00:32:13,160 --> 00:32:16,220 +relationship between the two variables, the same + +420 +00:32:16,220 --> 00:32:20,580 +as this one. X increases, Y stays nearly in the + +421 +00:32:20,580 --> 00:32:24,540 +same position, then there exists no relationship + +422 +00:32:24,540 --> 00:32:29,280 +between the two variables. So, a relationship + +423 +00:32:29,280 --> 00:32:32,740 +could be linear or curvilinear. It could be + +424 +00:32:32,740 --> 00:32:37,280 +positive or negative, strong or weak, or sometimes + +425 +00:32:37,280 --> 00:32:41,680 +there is no relationship between the two + +426 +00:32:41,680 --> 00:32:49,200 +variables. Now the question is, how can we write + +427 +00:32:51,250 --> 00:32:55,290 +Or how can we find the best regression line that + +428 +00:32:55,290 --> 00:32:59,570 +fits the data you have? We know the regression is + +429 +00:32:59,570 --> 00:33:06,270 +the straight line equation is given by this one. Y + +430 +00:33:06,270 --> 00:33:20,130 +equals beta 0 plus beta 1x plus epsilon. This can + +431 +00:33:20,130 --> 00:33:21,670 +be pronounced as epsilon. + +432 +00:33:24,790 --> 00:33:29,270 +It's a great letter, the same as alpha, beta, mu, + +433 +00:33:29,570 --> 00:33:35,150 +sigma, and so on. So it's epsilon. I, it means + +434 +00:33:35,150 --> 00:33:39,250 +observation number I. I 1, 2, 3, up to 10, for + +435 +00:33:39,250 --> 00:33:42,710 +example, is the same for selling price of a home. + +436 +00:33:43,030 --> 00:33:46,970 +So I 1, 2, 3, all the way up to the sample size. + +437 +00:33:48,370 --> 00:33:54,830 +Now, Y is your dependent variable. Beta 0 is + +438 +00:33:54,830 --> 00:33:59,810 +population Y intercept. For example, if we have + +439 +00:33:59,810 --> 00:34:00,730 +this scatter plot. + +440 +00:34:04,010 --> 00:34:10,190 +Now, beta 0 is + +441 +00:34:10,190 --> 00:34:15,370 +this one. So this is your beta 0. So this segment + +442 +00:34:15,370 --> 00:34:21,550 +is beta 0. it could be above the x-axis I mean + +443 +00:34:21,550 --> 00:34:34,890 +beta zero could be positive might be negative now + +444 +00:34:34,890 --> 00:34:40,270 +this beta zero fall below the x-axis so beta zero + +445 +00:34:40,270 --> 00:34:43,850 +could be negative or + +446 +00:34:46,490 --> 00:34:49,350 +Maybe the straight line passes through the origin + +447 +00:34:49,350 --> 00:34:56,990 +point. So in this case, beta zero equals zero. So + +448 +00:34:56,990 --> 00:34:59,890 +it could be positive and negative or equal zero, + +449 +00:35:00,430 --> 00:35:05,510 +but still we have positive relationship. That + +450 +00:35:05,510 --> 00:35:09,970 +means The value of beta zero, the sign of beta + +451 +00:35:09,970 --> 00:35:13,310 +zero does not affect the relationship between Y + +452 +00:35:13,310 --> 00:35:17,850 +and X. Because here in the three cases, there + +453 +00:35:17,850 --> 00:35:22,390 +exists positive relationship, but beta zero could + +454 +00:35:22,390 --> 00:35:25,370 +be positive or negative or equal zero, but still + +455 +00:35:25,370 --> 00:35:31,720 +we have positive relationship. I mean, you cannot + +456 +00:35:31,720 --> 00:35:35,060 +determine by looking at beta 0, you cannot + +457 +00:35:35,060 --> 00:35:37,940 +determine if there is a positive or negative + +458 +00:35:37,940 --> 00:35:41,720 +relationship. The other term is beta 1. Beta 1 is + +459 +00:35:41,720 --> 00:35:46,900 +the population slope coefficient. Now, the sign of + +460 +00:35:46,900 --> 00:35:50,010 +the slope determines the direction of the + +461 +00:35:50,010 --> 00:35:54,090 +relationship. That means if the slope has positive + +462 +00:35:54,090 --> 00:35:56,570 +sign, it means there exists positive relationship. + +463 +00:35:57,330 --> 00:35:59,370 +Otherwise if it is negative, then there is + +464 +00:35:59,370 --> 00:36:01,390 +negative relationship between the two variables. + +465 +00:36:02,130 --> 00:36:05,310 +So the sign of the slope determines the direction. + +466 +00:36:06,090 --> 00:36:11,290 +But the sign of beta zero has no meaning about the + +467 +00:36:11,290 --> 00:36:15,470 +relationship between Y and X. X is your + +468 +00:36:15,470 --> 00:36:19,630 +independent variable, Y is your independent + +469 +00:36:19,630 --> 00:36:19,650 +your independent variable, Y is your independent + +470 +00:36:19,650 --> 00:36:21,250 +variable, Y is your independent variable, Y is + +471 +00:36:21,250 --> 00:36:24,370 +variable, Y is your independent variable, Y is + +472 +00:36:24,370 --> 00:36:24,430 +variable, Y is your independent variable, Y is + +473 +00:36:24,430 --> 00:36:24,770 +your independent variable, Y is your independent + +474 +00:36:24,770 --> 00:36:27,490 +variable, Y is your independent variable, Y is + +475 +00:36:27,490 --> 00:36:30,110 +your independent variable, Y is your It means + +476 +00:36:30,110 --> 00:36:32,450 +there are some errors you don't know about it + +477 +00:36:32,450 --> 00:36:36,130 +because you ignore some other variables that may + +478 +00:36:36,130 --> 00:36:39,410 +affect the selling price. Maybe you select a + +479 +00:36:39,410 --> 00:36:42,490 +random sample, that sample is small. Maybe there + +480 +00:36:42,490 --> 00:36:46,270 +is a random, I'm sorry, there is sampling error. + +481 +00:36:47,070 --> 00:36:52,980 +So all of these are called random error term. So + +482 +00:36:52,980 --> 00:36:57,420 +all of them are in this term. So epsilon I means + +483 +00:36:57,420 --> 00:37:00,340 +something you don't include in your regression + +484 +00:37:00,340 --> 00:37:03,280 +modeling. For example, you don't include all the + +485 +00:37:03,280 --> 00:37:06,180 +independent variables that affect Y, or your + +486 +00:37:06,180 --> 00:37:09,700 +sample size is not large enough. So all of these + +487 +00:37:09,700 --> 00:37:14,260 +measured in random error term. So epsilon I is + +488 +00:37:14,260 --> 00:37:18,840 +random error component, beta 0 plus beta 1X is + +489 +00:37:18,840 --> 00:37:25,070 +called linear component. So that's the simple + +490 +00:37:25,070 --> 00:37:31,430 +linear regression model. Now, the data you have, + +491 +00:37:32,850 --> 00:37:38,210 +the blue circles represent the observed value. So + +492 +00:37:38,210 --> 00:37:47,410 +these blue circles are the observed values. So we + +493 +00:37:47,410 --> 00:37:49,370 +have observed. + +494 +00:37:52,980 --> 00:37:57,940 +Y observed value of Y for each value X. The + +495 +00:37:57,940 --> 00:38:03,360 +regression line is the blue, the red one. It's + +496 +00:38:03,360 --> 00:38:07,560 +called the predicted values. Predicted Y. + +497 +00:38:08,180 --> 00:38:14,760 +Predicted Y is denoted always by Y hat. Now the + +498 +00:38:14,760 --> 00:38:19,740 +difference between Y and Y hat. It's called the + +499 +00:38:19,740 --> 00:38:20,200 +error term. + +500 +00:38:24,680 --> 00:38:28,000 +It's actually the difference between the observed + +501 +00:38:28,000 --> 00:38:31,600 +value and its predicted value. Now, the predicted + +502 +00:38:31,600 --> 00:38:34,720 +value can be determined by using the regression + +503 +00:38:34,720 --> 00:38:39,180 +line. So this line is the predicted value of Y for + +504 +00:38:39,180 --> 00:38:44,480 +XR. Again, beta zero is the intercept. As we + +505 +00:38:44,480 --> 00:38:46,260 +mentioned before, it could be positive or negative + +506 +00:38:46,260 --> 00:38:52,600 +or even equal zero. The slope is changing Y. + +507 +00:38:55,140 --> 00:38:57,580 +Divide by change of x. + +508 +00:39:01,840 --> 00:39:07,140 +So these are the components for the simple linear + +509 +00:39:07,140 --> 00:39:10,840 +regression model. Y again represents the + +510 +00:39:10,840 --> 00:39:14,960 +independent variable. Beta 0 y intercept. Beta 1 + +511 +00:39:14,960 --> 00:39:17,960 +is your slope. And the slope determines the + +512 +00:39:17,960 --> 00:39:20,900 +direction of the relationship. X independent + +513 +00:39:20,900 --> 00:39:25,270 +variable epsilon i is the random error term. Any + +514 +00:39:25,270 --> 00:39:25,650 +question? + +515 +00:39:31,750 --> 00:39:36,610 +The relationship may be positive or negative. It + +516 +00:39:36,610 --> 00:39:37,190 +could be negative. + +517 +00:39:40,950 --> 00:39:42,710 +Now, for negative relationship, + +518 +00:39:57,000 --> 00:40:04,460 +Or negative, where beta zero is negative. + +519 +00:40:04,520 --> 00:40:08,700 +Or beta + +520 +00:40:08,700 --> 00:40:09,740 +zero equals zero. + +521 +00:40:16,680 --> 00:40:20,620 +So here there exists negative relationship, but + +522 +00:40:20,620 --> 00:40:22,060 +beta zero may be positive. + +523 +00:40:25,870 --> 00:40:30,210 +So again, the sign of beta 0 also does not affect + +524 +00:40:30,210 --> 00:40:31,990 +the relationship between the two variables. + +525 +00:40:36,230 --> 00:40:40,590 +Now, we don't actually know the values of beta 0 + +526 +00:40:40,590 --> 00:40:44,510 +and beta 1. We are going to estimate these values + +527 +00:40:44,510 --> 00:40:48,110 +from the sample we have. So the simple linear + +528 +00:40:48,110 --> 00:40:50,970 +regression equation provides an estimate of the + +529 +00:40:50,970 --> 00:40:55,270 +population regression line. So here we have Yi hat + +530 +00:40:55,270 --> 00:41:00,010 +is the estimated or predicted Y value for + +531 +00:41:00,010 --> 00:41:00,850 +observation I. + +532 +00:41:03,530 --> 00:41:08,220 +The estimate of the regression intercept P0. The + +533 +00:41:08,220 --> 00:41:11,360 +estimate of the regression slope is b1, and this + +534 +00:41:11,360 --> 00:41:16,680 +is your x, all independent variable. So here is + +535 +00:41:16,680 --> 00:41:20,340 +the regression equation. Simple linear regression + +536 +00:41:20,340 --> 00:41:24,400 +equation is given by y hat, the predicted value of + +537 +00:41:24,400 --> 00:41:29,380 +y equals b0 plus b1 times x1. + +538 +00:41:31,240 --> 00:41:35,960 +Now these coefficients, b0 and b1 can be computed + +539 +00:41:37,900 --> 00:41:43,040 +by the following equations. So the regression + +540 +00:41:43,040 --> 00:41:52,920 +equation is + +541 +00:41:52,920 --> 00:41:57,260 +given by y hat equals b0 plus b1x. + +542 +00:41:59,940 --> 00:42:06,140 +Now the slope, b1, is r times standard deviation + +543 +00:42:06,140 --> 00:42:10,540 +of y Times standard deviation of x. This is the + +544 +00:42:10,540 --> 00:42:13,820 +simplest equation to determine the value of the + +545 +00:42:13,820 --> 00:42:18,980 +star. B1r, r is the correlation coefficient. Sy is + +546 +00:42:18,980 --> 00:42:25,080 +xr, the standard deviations of y and x. Where b0, + +547 +00:42:25,520 --> 00:42:30,880 +which is y intercept, is y bar minus b x bar, or + +548 +00:42:30,880 --> 00:42:38,100 +b1 x bar. Sx, as we know, is the sum of x minus y + +549 +00:42:38,100 --> 00:42:40,460 +squared divided by n minus 1 under square root, + +550 +00:42:40,900 --> 00:42:47,060 +similarly for y values. So this, how can we, these + +551 +00:42:47,060 --> 00:42:52,380 +formulas compute the values of b0 and b1. So we + +552 +00:42:52,380 --> 00:42:54,600 +are going to use these equations in order to + +553 +00:42:54,600 --> 00:42:58,960 +determine the values of b0 and b1. + +554 +00:43:04,670 --> 00:43:07,710 +Now, what's your interpretation about the slope + +555 +00:43:07,710 --> 00:43:13,130 +and the intercept? For example, suppose we are + +556 +00:43:13,130 --> 00:43:18,610 +talking about your score Y and + +557 +00:43:18,610 --> 00:43:22,110 +X number of missing classes. + +558 +00:43:29,210 --> 00:43:35,460 +And suppose, for example, Y hat Equal 95 minus 5x. + +559 +00:43:37,780 --> 00:43:41,420 +Now let's see what's the interpretation of B0. + +560 +00:43:42,300 --> 00:43:45,060 +This is B0. So B0 is 95. + +561 +00:43:47,660 --> 00:43:51,960 +And B1 is 5. Now what's your interpretation about + +562 +00:43:51,960 --> 00:43:57,740 +B0 and B1? B0 is the estimated mean value of Y + +563 +00:43:57,740 --> 00:44:02,560 +when the value of X is 0. that means if the + +564 +00:44:02,560 --> 00:44:08,500 +student does not miss any class that means x + +565 +00:44:08,500 --> 00:44:13,260 +equals zero in this case we predict or we estimate + +566 +00:44:13,260 --> 00:44:19,880 +the mean value of his score or her score is 95 so + +567 +00:44:19,880 --> 00:44:27,500 +95 it means when x is zero if x is zero then we + +568 +00:44:27,500 --> 00:44:35,350 +expect his or Here, the score is 95. So that means + +569 +00:44:35,350 --> 00:44:39,830 +B0 is the estimated mean value of Y when the value + +570 +00:44:39,830 --> 00:44:40,630 +of X is 0. + +571 +00:44:43,370 --> 00:44:46,590 +Now, what's the meaning of the slope? The slope in + +572 +00:44:46,590 --> 00:44:51,290 +this case is negative Y. B1, which is the slope, + +573 +00:44:51,590 --> 00:44:57,610 +is the estimated change in the mean of Y. as a + +574 +00:44:57,610 --> 00:45:03,050 +result of a one unit change in x for example let's + +575 +00:45:03,050 --> 00:45:07,070 +compute y for different values of x suppose x is + +576 +00:45:07,070 --> 00:45:15,510 +one now we predict his score to be 95 minus 5 + +577 +00:45:15,510 --> 00:45:25,470 +times 1 which is 90 when x is 2 for example Y hat + +578 +00:45:25,470 --> 00:45:28,570 +is 95 minus 5 times 2, so that's 85. + +579 +00:45:31,950 --> 00:45:39,970 +So for each one unit, there is a drop by five + +580 +00:45:39,970 --> 00:45:43,750 +units in his score. That means if number of + +581 +00:45:43,750 --> 00:45:47,550 +missing classes increases by one unit, then his or + +582 +00:45:47,550 --> 00:45:51,790 +her weight is expected to be reduced by five units + +583 +00:45:51,790 --> 00:45:56,150 +because the sign is negative. another example + +584 +00:45:56,150 --> 00:46:05,910 +suppose again we are interested in whales and + +585 +00:46:05,910 --> 00:46:16,170 +angels and imagine that just + +586 +00:46:16,170 --> 00:46:21,670 +for example y equal y hat equals three plus four x + +587 +00:46:21,670 --> 00:46:29,830 +now y hat equals 3 if x equals zero. That has no + +588 +00:46:29,830 --> 00:46:34,510 +meaning because you cannot say age of zero. So + +589 +00:46:34,510 --> 00:46:40,450 +sometimes the meaning of y intercept does not make + +590 +00:46:40,450 --> 00:46:46,150 +sense because you cannot say x equals zero. Now + +591 +00:46:46,150 --> 00:46:50,690 +for the stock of four, that means as his or her + +592 +00:46:50,690 --> 00:46:55,550 +weight increases by one year, Then we expect his + +593 +00:46:55,550 --> 00:47:00,470 +weight to increase by four kilograms. So as one + +594 +00:47:00,470 --> 00:47:05,130 +unit increase in x, y is our, his weight is + +595 +00:47:05,130 --> 00:47:10,150 +expected to increase by four units. So again, + +596 +00:47:10,370 --> 00:47:16,950 +sometimes we can interpret the y intercept, but in + +597 +00:47:16,950 --> 00:47:18,670 +some cases it has no meaning. + +598 +00:47:24,970 --> 00:47:27,190 +Now for the previous example, for the selling + +599 +00:47:27,190 --> 00:47:32,930 +price of a home and its size, B1rSy divided by Sx, + +600 +00:47:33,790 --> 00:47:43,550 +r is computed, r is found to be 76%, 76%Sy divided + +601 +00:47:43,550 --> 00:47:49,990 +by Sx, that will give 0.109. B0y bar minus B1x + +602 +00:47:49,990 --> 00:47:50,670 +bar, + +603 +00:47:53,610 --> 00:48:00,150 +Y bar for this data is 286 minus D1. So we have to + +604 +00:48:00,150 --> 00:48:03,490 +compute first D1 because we use it in order to + +605 +00:48:03,490 --> 00:48:08,590 +determine D0. And calculation gives 98. So that + +606 +00:48:08,590 --> 00:48:16,450 +means based on these equations, Y hat equals 0 + +607 +00:48:16,450 --> 00:48:22,990 +.10977 plus 98.248. + +608 +00:48:24,790 --> 00:48:29,370 +times X. X is the size. + +609 +00:48:32,890 --> 00:48:39,830 +0.1 B1 + +610 +00:48:39,830 --> 00:48:45,310 +is + +611 +00:48:45,310 --> 00:48:56,650 +0.1, B0 is 98, so 98.248 plus B1. So this is your + +612 +00:48:56,650 --> 00:49:03,730 +regression equation. So again, the intercept is + +613 +00:49:03,730 --> 00:49:09,750 +98. So this amount, the segment is 98. Now the + +614 +00:49:09,750 --> 00:49:14,790 +slope is 0.109. So house price, the expected value + +615 +00:49:14,790 --> 00:49:21,270 +of house price equals B098 plus 0.109 square feet. + +616 +00:49:23,150 --> 00:49:27,630 +So that's the prediction line for the house price. + +617 +00:49:28,510 --> 00:49:34,370 +So again, house price equal B0 98 plus 0.10977 + +618 +00:49:34,370 --> 00:49:36,930 +times square root. Now, what's your interpretation + +619 +00:49:36,930 --> 00:49:41,950 +about B0 and B1? B0 is the estimated mean value of + +620 +00:49:41,950 --> 00:49:46,430 +Y when the value of X is 0. So if X is 0, this + +621 +00:49:46,430 --> 00:49:52,980 +range of X observed X values and you have a home + +622 +00:49:52,980 --> 00:49:57,860 +or a house of size zero. So that means this value + +623 +00:49:57,860 --> 00:50:02,680 +has no meaning. Because a house cannot have a + +624 +00:50:02,680 --> 00:50:06,400 +square footage of zero. So B0 has no practical + +625 +00:50:06,400 --> 00:50:10,040 +application in this case. So sometimes it makes + +626 +00:50:10,040 --> 00:50:17,620 +sense, in other cases it doesn't have that. So for + +627 +00:50:17,620 --> 00:50:21,790 +this specific example, B0 has no practical + +628 +00:50:21,790 --> 00:50:28,210 +application in this case. But B1 which is 0.1097, + +629 +00:50:28,930 --> 00:50:33,050 +B1 estimates the change in the mean value of Y as + +630 +00:50:33,050 --> 00:50:36,730 +a result of one unit increasing X. So for this + +631 +00:50:36,730 --> 00:50:41,640 +value which is 0.109, it means This fellow tells + +632 +00:50:41,640 --> 00:50:46,420 +us that the mean value of a house can increase by + +633 +00:50:46,420 --> 00:50:52,280 +this amount, increase by 0.1097, but we have to + +634 +00:50:52,280 --> 00:50:55,700 +multiply this value by a thousand because the data + +635 +00:50:55,700 --> 00:51:01,280 +was in thousand dollars, so around 109, on average + +636 +00:51:01,280 --> 00:51:05,160 +for each additional one square foot of a size. So + +637 +00:51:05,160 --> 00:51:09,990 +that means if a house So if house size increased + +638 +00:51:09,990 --> 00:51:14,630 +by one square foot, then the price increased by + +639 +00:51:14,630 --> 00:51:19,530 +around 109 dollars. So for each one unit increased + +640 +00:51:19,530 --> 00:51:22,990 +in the size, the selling price of a home increased + +641 +00:51:22,990 --> 00:51:29,590 +by 109. So that means if the size increased by + +642 +00:51:29,590 --> 00:51:35,860 +tenth, It means the selling price increased by + +643 +00:51:35,860 --> 00:51:39,400 +1097 + +644 +00:51:39,400 --> 00:51:46,600 +.7. Make sense? So for each one unit increase in + +645 +00:51:46,600 --> 00:51:50,300 +its size, the house selling price increased by + +646 +00:51:50,300 --> 00:51:55,540 +109. So we have to multiply this value by the unit + +647 +00:51:55,540 --> 00:52:02,280 +we have. Because Y was 8000 dollars. Here if you + +648 +00:52:02,280 --> 00:52:06,600 +go back to the previous data we have, the data was + +649 +00:52:06,600 --> 00:52:11,120 +house price wasn't thousand dollars, so we have to + +650 +00:52:11,120 --> 00:52:15,840 +multiply the slope by a thousand. + +651 +00:52:19,480 --> 00:52:23,720 +Now we + +652 +00:52:23,720 --> 00:52:30,380 +can use also the regression equation line to make + +653 +00:52:30,380 --> 00:52:35,390 +some prediction. For example, we can predict the + +654 +00:52:35,390 --> 00:52:42,290 +price of a house with 2000 square feet. You just + +655 +00:52:42,290 --> 00:52:43,590 +plug this value. + +656 +00:52:46,310 --> 00:52:52,210 +So we have 98.25 plus 0.109 times 2000. That will + +657 +00:52:52,210 --> 00:53:01,600 +give the house price. for 2,000 square feet. So + +658 +00:53:01,600 --> 00:53:05,920 +that means the predicted price for a house with 2 + +659 +00:53:05,920 --> 00:53:10,180 +,000 square feet is this amount multiplied by 1 + +660 +00:53:10,180 --> 00:53:18,260 +,000. So that will give $317,850. So that's how + +661 +00:53:18,260 --> 00:53:24,240 +can we make predictions for why I mean for house + +662 +00:53:24,240 --> 00:53:29,360 +price at any given value of its size. So for this + +663 +00:53:29,360 --> 00:53:36,020 +data, we have a house with 2000 square feet. So we + +664 +00:53:36,020 --> 00:53:43,180 +predict its price to be around 317,850. + +665 +00:53:44,220 --> 00:53:50,920 +I will stop at coefficient of correlation. I will + +666 +00:53:50,920 --> 00:53:54,190 +stop at coefficient of determination for next time + +667 +00:53:54,190 --> 00:53:57,770 +that's + +668 +00:53:57,770 --> 00:53:57,990 +all + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/GyiivmJglvM_raw.json b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/GyiivmJglvM_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..c667b6a58068c6ef2f83b951834193ce30b6b7db --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/GyiivmJglvM_raw.json @@ -0,0 +1 @@ +{"segments": [{"id": 1, "seek": 2616, "start": 11.02, "end": 26.16, "text": " The last chapter we are going to talk in this semester is correlation and simple linearization. So we are going to explain two types in chapter 12. One is called correlation.", "tokens": [440, 1036, 7187, 321, 366, 516, 281, 751, 294, 341, 11894, 307, 20009, 293, 2199, 8213, 2144, 13, 407, 321, 366, 516, 281, 2903, 732, 3467, 294, 7187, 2272, 13, 1485, 307, 1219, 20009, 13], "avg_logprob": -0.2230902844005161, "compression_ratio": 1.4462809917355373, "no_speech_prob": 5.960464477539062e-07, "words": [{"start": 11.019999999999998, "end": 11.74, "word": " The", "probability": 0.162109375}, {"start": 11.74, "end": 12.02, "word": " last", "probability": 0.8623046875}, {"start": 12.02, "end": 12.38, "word": " chapter", "probability": 0.8896484375}, {"start": 12.38, "end": 12.56, "word": " we", "probability": 0.89892578125}, {"start": 12.56, "end": 12.68, "word": " are", "probability": 0.90185546875}, {"start": 12.68, "end": 12.9, "word": " going", "probability": 0.9462890625}, {"start": 12.9, "end": 13.04, "word": " to", "probability": 0.9697265625}, {"start": 13.04, "end": 13.34, "word": " talk", "probability": 0.814453125}, {"start": 13.34, "end": 13.7, "word": " in", "probability": 0.59423828125}, {"start": 13.7, "end": 13.92, "word": " this", "probability": 0.916015625}, {"start": 13.92, "end": 14.34, "word": " semester", "probability": 0.95361328125}, {"start": 14.34, "end": 14.82, "word": " is", "probability": 0.9267578125}, {"start": 14.82, "end": 15.98, "word": " correlation", "probability": 0.6484375}, {"start": 15.98, "end": 16.5, "word": " and", "probability": 0.9267578125}, {"start": 16.5, "end": 16.96, "word": " simple", "probability": 0.80126953125}, {"start": 16.96, "end": 17.82, "word": " linearization.", "probability": 0.421875}, {"start": 18.38, "end": 18.54, "word": " So", "probability": 0.82373046875}, {"start": 18.54, "end": 18.68, "word": " we", "probability": 0.748046875}, {"start": 18.68, "end": 18.8, "word": " are", "probability": 0.91796875}, {"start": 18.8, "end": 19.12, "word": " going", "probability": 0.94287109375}, {"start": 19.12, "end": 19.48, "word": " to", "probability": 0.9697265625}, {"start": 19.48, "end": 20.22, "word": " explain", "probability": 0.90380859375}, {"start": 20.22, "end": 22.0, "word": " two", "probability": 0.892578125}, {"start": 22.0, "end": 22.5, "word": " types", "probability": 0.83154296875}, {"start": 22.5, "end": 23.0, "word": " in", "probability": 0.892578125}, {"start": 23.0, "end": 23.3, "word": " chapter", "probability": 0.69677734375}, {"start": 23.3, "end": 23.7, "word": " 12.", "probability": 0.66845703125}, {"start": 24.6, "end": 25.12, "word": " One", "probability": 0.9013671875}, {"start": 25.12, "end": 25.36, "word": " is", "probability": 0.92724609375}, {"start": 25.36, "end": 25.62, "word": " called", "probability": 0.90576171875}, {"start": 25.62, "end": 26.16, "word": " correlation.", "probability": 0.8828125}], "temperature": 1.0}, {"id": 2, "seek": 4682, "start": 28.08, "end": 46.82, "text": " And the other type is simple linear regression. Maybe this chapter I'm going to spend about two lectures in order to cover these objectives. The first objective is to calculate the coefficient of correlation.", "tokens": [400, 264, 661, 2010, 307, 2199, 8213, 24590, 13, 2704, 341, 7187, 286, 478, 516, 281, 3496, 466, 732, 16564, 294, 1668, 281, 2060, 613, 15961, 13, 440, 700, 10024, 307, 281, 8873, 264, 17619, 295, 20009, 13], "avg_logprob": -0.13882210850715637, "compression_ratio": 1.3841059602649006, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 28.08, "end": 28.5, "word": " And", "probability": 0.63671875}, {"start": 28.5, "end": 28.62, "word": " the", "probability": 0.7197265625}, {"start": 28.62, "end": 28.84, "word": " other", "probability": 0.90087890625}, {"start": 28.84, "end": 29.28, "word": " type", "probability": 0.79443359375}, {"start": 29.28, "end": 29.68, "word": " is", "probability": 0.939453125}, {"start": 29.68, "end": 30.1, "word": " simple", "probability": 0.884765625}, {"start": 30.1, "end": 30.64, "word": " linear", "probability": 0.9013671875}, {"start": 30.64, "end": 31.5, "word": " regression.", "probability": 0.97314453125}, {"start": 32.2, "end": 32.5, "word": " Maybe", "probability": 0.9287109375}, {"start": 32.5, "end": 32.84, "word": " this", "probability": 0.89697265625}, {"start": 32.84, "end": 33.5, "word": " chapter", "probability": 0.89208984375}, {"start": 33.5, "end": 34.82, "word": " I'm", "probability": 0.4970703125}, {"start": 34.82, "end": 35.08, "word": " going", "probability": 0.943359375}, {"start": 35.08, "end": 35.26, "word": " to", "probability": 0.96875}, {"start": 35.26, "end": 35.78, "word": " spend", "probability": 0.8779296875}, {"start": 35.78, "end": 36.38, "word": " about", "probability": 0.89892578125}, {"start": 36.38, "end": 37.94, "word": " two", "probability": 0.9375}, {"start": 37.94, "end": 38.78, "word": " lectures", "probability": 0.90673828125}, {"start": 38.78, "end": 39.7, "word": " in", "probability": 0.75146484375}, {"start": 39.7, "end": 39.84, "word": " order", "probability": 0.9384765625}, {"start": 39.84, "end": 40.02, "word": " to", "probability": 0.9658203125}, {"start": 40.02, "end": 40.34, "word": " cover", "probability": 0.95361328125}, {"start": 40.34, "end": 41.62, "word": " these", "probability": 0.8583984375}, {"start": 41.62, "end": 42.26, "word": " objectives.", "probability": 0.8544921875}, {"start": 42.88, "end": 43.14, "word": " The", "probability": 0.88330078125}, {"start": 43.14, "end": 43.42, "word": " first", "probability": 0.8994140625}, {"start": 43.42, "end": 43.88, "word": " objective", "probability": 0.9541015625}, {"start": 43.88, "end": 44.46, "word": " is", "probability": 0.9462890625}, {"start": 44.46, "end": 45.0, "word": " to", "probability": 0.96630859375}, {"start": 45.0, "end": 45.42, "word": " calculate", "probability": 0.916015625}, {"start": 45.42, "end": 45.7, "word": " the", "probability": 0.85888671875}, {"start": 45.7, "end": 46.16, "word": " coefficient", "probability": 0.931640625}, {"start": 46.16, "end": 46.42, "word": " of", "probability": 0.9580078125}, {"start": 46.42, "end": 46.82, "word": " correlation.", "probability": 0.9326171875}], "temperature": 1.0}, {"id": 3, "seek": 7719, "start": 48.51, "end": 77.19, "text": " The second objective, the meaning of the regression coefficients beta 0 and beta 1. And the last objective is how to use regression analysis to predict the value of dependent variable based on an independent variable. It looks like that we have discussed objective number one in chapter three. So calculation of the correlation coefficient", "tokens": [440, 1150, 10024, 11, 264, 3620, 295, 264, 24590, 31994, 9861, 1958, 293, 9861, 502, 13, 400, 264, 1036, 10024, 307, 577, 281, 764, 24590, 5215, 281, 6069, 264, 2158, 295, 12334, 7006, 2361, 322, 364, 6695, 7006, 13, 467, 1542, 411, 300, 321, 362, 7152, 10024, 1230, 472, 294, 7187, 1045, 13, 407, 17108, 295, 264, 20009, 17619], "avg_logprob": -0.22226562425494195, "compression_ratio": 1.6666666666666667, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 48.51, "end": 48.81, "word": " The", "probability": 0.2225341796875}, {"start": 48.81, "end": 49.03, "word": " second", "probability": 0.7900390625}, {"start": 49.03, "end": 49.51, "word": " objective,", "probability": 0.9228515625}, {"start": 49.69, "end": 49.75, "word": " the", "probability": 0.900390625}, {"start": 49.75, "end": 50.03, "word": " meaning", "probability": 0.8486328125}, {"start": 50.03, "end": 50.73, "word": " of", "probability": 0.94775390625}, {"start": 50.73, "end": 50.89, "word": " the", "probability": 0.89208984375}, {"start": 50.89, "end": 51.21, "word": " regression", "probability": 0.908203125}, {"start": 51.21, "end": 52.03, "word": " coefficients", "probability": 0.828125}, {"start": 52.03, "end": 52.41, "word": " beta", "probability": 0.497802734375}, {"start": 52.41, "end": 52.65, "word": " 0", "probability": 0.1778564453125}, {"start": 52.65, "end": 53.15, "word": " and", "probability": 0.79638671875}, {"start": 53.15, "end": 53.35, "word": " beta", "probability": 0.88525390625}, {"start": 53.35, "end": 53.65, "word": " 1.", "probability": 0.92919921875}, {"start": 54.47, "end": 54.89, "word": " And", "probability": 0.896484375}, {"start": 54.89, "end": 55.35, "word": " the", "probability": 0.88916015625}, {"start": 55.35, "end": 55.59, "word": " last", "probability": 0.87353515625}, {"start": 55.59, "end": 56.07, "word": " objective", "probability": 0.9521484375}, {"start": 56.07, "end": 56.39, "word": " is", "probability": 0.9140625}, {"start": 56.39, "end": 56.61, "word": " how", "probability": 0.90625}, {"start": 56.61, "end": 56.77, "word": " to", "probability": 0.96630859375}, {"start": 56.77, "end": 57.09, "word": " use", "probability": 0.88818359375}, {"start": 57.09, "end": 57.55, "word": " regression", "probability": 0.884765625}, {"start": 57.55, "end": 58.17, "word": " analysis", "probability": 0.86279296875}, {"start": 58.17, "end": 58.71, "word": " to", "probability": 0.95751953125}, {"start": 58.71, "end": 59.25, "word": " predict", "probability": 0.9150390625}, {"start": 59.25, "end": 60.13, "word": " the", "probability": 0.8486328125}, {"start": 60.13, "end": 60.57, "word": " value", "probability": 0.97314453125}, {"start": 60.57, "end": 61.17, "word": " of", "probability": 0.95947265625}, {"start": 61.17, "end": 61.79, "word": " dependent", "probability": 0.55810546875}, {"start": 61.79, "end": 62.25, "word": " variable", "probability": 0.82373046875}, {"start": 62.25, "end": 62.65, "word": " based", "probability": 0.83740234375}, {"start": 62.65, "end": 63.03, "word": " on", "probability": 0.94677734375}, {"start": 63.03, "end": 63.37, "word": " an", "probability": 0.56396484375}, {"start": 63.37, "end": 63.69, "word": " independent", "probability": 0.90576171875}, {"start": 63.69, "end": 64.11, "word": " variable.", "probability": 0.8828125}, {"start": 64.63, "end": 64.87, "word": " It", "probability": 0.9560546875}, {"start": 64.87, "end": 65.11, "word": " looks", "probability": 0.8251953125}, {"start": 65.11, "end": 65.41, "word": " like", "probability": 0.92626953125}, {"start": 65.41, "end": 65.69, "word": " that", "probability": 0.90869140625}, {"start": 65.69, "end": 66.01, "word": " we", "probability": 0.95458984375}, {"start": 66.01, "end": 66.43, "word": " have", "probability": 0.94091796875}, {"start": 66.43, "end": 68.19, "word": " discussed", "probability": 0.86669921875}, {"start": 68.19, "end": 69.59, "word": " objective", "probability": 0.86279296875}, {"start": 69.59, "end": 69.87, "word": " number", "probability": 0.9208984375}, {"start": 69.87, "end": 70.17, "word": " one", "probability": 0.69384765625}, {"start": 70.17, "end": 70.37, "word": " in", "probability": 0.908203125}, {"start": 70.37, "end": 70.59, "word": " chapter", "probability": 0.78515625}, {"start": 70.59, "end": 70.93, "word": " three.", "probability": 0.880859375}, {"start": 72.99, "end": 73.39, "word": " So", "probability": 0.65380859375}, {"start": 73.39, "end": 74.17, "word": " calculation", "probability": 0.623046875}, {"start": 74.17, "end": 74.91, "word": " of", "probability": 0.97021484375}, {"start": 74.91, "end": 75.11, "word": " the", "probability": 0.92138671875}, {"start": 75.11, "end": 76.47, "word": " correlation", "probability": 0.6240234375}, {"start": 76.47, "end": 77.19, "word": " coefficient", "probability": 0.9501953125}], "temperature": 1.0}, {"id": 4, "seek": 10496, "start": 78.3, "end": 104.96, "text": " is done in chapter three, but here we'll give some details about correlation also. A scatter plot can be used to show the relationship between two variables. For example, imagine that we have a random sample of 10 children. And we have data on their weights and ages.", "tokens": [307, 1096, 294, 7187, 1045, 11, 457, 510, 321, 603, 976, 512, 4365, 466, 20009, 611, 13, 316, 34951, 7542, 393, 312, 1143, 281, 855, 264, 2480, 1296, 732, 9102, 13, 1171, 1365, 11, 3811, 300, 321, 362, 257, 4974, 6889, 295, 1266, 2227, 13, 400, 321, 362, 1412, 322, 641, 17443, 293, 12357, 13], "avg_logprob": -0.18345423441912448, "compression_ratio": 1.4806629834254144, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 78.3, "end": 78.6, "word": " is", "probability": 0.463134765625}, {"start": 78.6, "end": 78.9, "word": " done", "probability": 0.87890625}, {"start": 78.9, "end": 79.4, "word": " in", "probability": 0.9287109375}, {"start": 79.4, "end": 79.64, "word": " chapter", "probability": 0.59228515625}, {"start": 79.64, "end": 80.06, "word": " three,", "probability": 0.7060546875}, {"start": 80.4, "end": 80.56, "word": " but", "probability": 0.9150390625}, {"start": 80.56, "end": 80.74, "word": " here", "probability": 0.71044921875}, {"start": 80.74, "end": 81.08, "word": " we'll", "probability": 0.734619140625}, {"start": 81.08, "end": 81.52, "word": " give", "probability": 0.70849609375}, {"start": 81.52, "end": 81.96, "word": " some", "probability": 0.900390625}, {"start": 81.96, "end": 83.42, "word": " details", "probability": 0.8583984375}, {"start": 83.42, "end": 83.88, "word": " about", "probability": 0.9111328125}, {"start": 83.88, "end": 84.56, "word": " correlation", "probability": 0.9052734375}, {"start": 84.56, "end": 85.06, "word": " also.", "probability": 0.8134765625}, {"start": 85.92, "end": 86.06, "word": " A", "probability": 0.517578125}, {"start": 86.06, "end": 86.38, "word": " scatter", "probability": 0.81591796875}, {"start": 86.38, "end": 86.62, "word": " plot", "probability": 0.7724609375}, {"start": 86.62, "end": 87.06, "word": " can", "probability": 0.9462890625}, {"start": 87.06, "end": 87.22, "word": " be", "probability": 0.9580078125}, {"start": 87.22, "end": 87.44, "word": " used", "probability": 0.9111328125}, {"start": 87.44, "end": 87.62, "word": " to", "probability": 0.96826171875}, {"start": 87.62, "end": 87.86, "word": " show", "probability": 0.9501953125}, {"start": 87.86, "end": 88.04, "word": " the", "probability": 0.91455078125}, {"start": 88.04, "end": 88.48, "word": " relationship", "probability": 0.9169921875}, {"start": 88.48, "end": 88.82, "word": " between", "probability": 0.8740234375}, {"start": 88.82, "end": 89.02, "word": " two", "probability": 0.9326171875}, {"start": 89.02, "end": 89.46, "word": " variables.", "probability": 0.9541015625}, {"start": 90.08, "end": 90.3, "word": " For", "probability": 0.95947265625}, {"start": 90.3, "end": 90.68, "word": " example,", "probability": 0.97509765625}, {"start": 90.78, "end": 91.1, "word": " imagine", "probability": 0.92431640625}, {"start": 91.1, "end": 91.54, "word": " that", "probability": 0.93603515625}, {"start": 91.54, "end": 92.44, "word": " we", "probability": 0.943359375}, {"start": 92.44, "end": 92.8, "word": " have", "probability": 0.9462890625}, {"start": 92.8, "end": 93.06, "word": " a", "probability": 0.97802734375}, {"start": 93.06, "end": 93.34, "word": " random", "probability": 0.83935546875}, {"start": 93.34, "end": 93.8, "word": " sample", "probability": 0.73583984375}, {"start": 93.8, "end": 94.28, "word": " of", "probability": 0.97021484375}, {"start": 94.28, "end": 94.66, "word": " 10", "probability": 0.58837890625}, {"start": 94.66, "end": 95.4, "word": " children.", "probability": 0.8603515625}, {"start": 97.8, "end": 98.3, "word": " And", "probability": 0.94140625}, {"start": 98.3, "end": 98.48, "word": " we", "probability": 0.94677734375}, {"start": 98.48, "end": 98.78, "word": " have", "probability": 0.9404296875}, {"start": 98.78, "end": 100.02, "word": " data", "probability": 0.87890625}, {"start": 100.02, "end": 100.54, "word": " on", "probability": 0.94189453125}, {"start": 100.54, "end": 101.18, "word": " their", "probability": 0.9609375}, {"start": 101.18, "end": 101.66, "word": " weights", "probability": 0.7021484375}, {"start": 101.66, "end": 104.4, "word": " and", "probability": 0.80419921875}, {"start": 104.4, "end": 104.96, "word": " ages.", "probability": 0.84228515625}], "temperature": 1.0}, {"id": 5, "seek": 13288, "start": 107.26, "end": 132.88, "text": " And we are interested to examine the relationship between weights and age. For example, suppose child number one, his or her age is two years with weight, for example, eight kilograms.", "tokens": [400, 321, 366, 3102, 281, 17496, 264, 2480, 1296, 17443, 293, 3205, 13, 1171, 1365, 11, 7297, 1440, 1230, 472, 11, 702, 420, 720, 3205, 307, 732, 924, 365, 3364, 11, 337, 1365, 11, 3180, 30690, 13], "avg_logprob": -0.16354851973684212, "compression_ratio": 1.4230769230769231, "no_speech_prob": 0.0, "words": [{"start": 107.26, "end": 107.6, "word": " And", "probability": 0.7392578125}, {"start": 107.6, "end": 107.94, "word": " we", "probability": 0.9443359375}, {"start": 107.94, "end": 108.1, "word": " are", "probability": 0.93603515625}, {"start": 108.1, "end": 108.56, "word": " interested", "probability": 0.85546875}, {"start": 108.56, "end": 109.08, "word": " to", "probability": 0.96044921875}, {"start": 109.08, "end": 110.34, "word": " examine", "probability": 0.9599609375}, {"start": 110.34, "end": 110.54, "word": " the", "probability": 0.923828125}, {"start": 110.54, "end": 111.04, "word": " relationship", "probability": 0.90869140625}, {"start": 111.04, "end": 111.64, "word": " between", "probability": 0.89111328125}, {"start": 111.64, "end": 113.3, "word": " weights", "probability": 0.75341796875}, {"start": 113.3, "end": 113.9, "word": " and", "probability": 0.9443359375}, {"start": 113.9, "end": 114.32, "word": " age.", "probability": 0.9501953125}, {"start": 115.52, "end": 115.72, "word": " For", "probability": 0.96484375}, {"start": 115.72, "end": 116.08, "word": " example,", "probability": 0.97412109375}, {"start": 116.28, "end": 116.72, "word": " suppose", "probability": 0.8798828125}, {"start": 116.72, "end": 118.12, "word": " child", "probability": 0.75146484375}, {"start": 118.12, "end": 118.4, "word": " number", "probability": 0.92529296875}, {"start": 118.4, "end": 118.72, "word": " one,", "probability": 0.80126953125}, {"start": 121.0, "end": 126.26, "word": " his", "probability": 0.95263671875}, {"start": 126.26, "end": 126.58, "word": " or", "probability": 0.93359375}, {"start": 126.58, "end": 126.82, "word": " her", "probability": 0.96826171875}, {"start": 126.82, "end": 127.12, "word": " age", "probability": 0.93359375}, {"start": 127.12, "end": 127.44, "word": " is", "probability": 0.94775390625}, {"start": 127.44, "end": 127.7, "word": " two", "probability": 0.7939453125}, {"start": 127.7, "end": 128.08, "word": " years", "probability": 0.93310546875}, {"start": 128.08, "end": 129.7, "word": " with", "probability": 0.51611328125}, {"start": 129.7, "end": 130.2, "word": " weight,", "probability": 0.89111328125}, {"start": 131.48, "end": 131.68, "word": " for", "probability": 0.95263671875}, {"start": 131.68, "end": 132.06, "word": " example,", "probability": 0.97412109375}, {"start": 132.2, "end": 132.46, "word": " eight", "probability": 0.740234375}, {"start": 132.46, "end": 132.88, "word": " kilograms.", "probability": 0.85888671875}], "temperature": 1.0}, {"id": 6, "seek": 15440, "start": 137.68, "end": 154.4, "text": " His weight or her weight is four years, and his or her weight is, for example, 15 kilograms, and so on. And again, we are interested to examine the relationship between age and weight. Maybe they exist sometimes.", "tokens": [2812, 3364, 420, 720, 3364, 307, 1451, 924, 11, 293, 702, 420, 720, 3364, 307, 11, 337, 1365, 11, 2119, 30690, 11, 293, 370, 322, 13, 400, 797, 11, 321, 366, 3102, 281, 17496, 264, 2480, 1296, 3205, 293, 3364, 13, 2704, 436, 2514, 2171, 13], "avg_logprob": -0.20428856890252295, "compression_ratio": 1.4791666666666667, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 137.68, "end": 138.02, "word": " His", "probability": 0.3525390625}, {"start": 138.02, "end": 138.36, "word": " weight", "probability": 0.8935546875}, {"start": 138.36, "end": 138.58, "word": " or", "probability": 0.2724609375}, {"start": 138.58, "end": 138.76, "word": " her", "probability": 0.9267578125}, {"start": 138.76, "end": 139.0, "word": " weight", "probability": 0.93359375}, {"start": 139.0, "end": 139.2, "word": " is", "probability": 0.935546875}, {"start": 139.2, "end": 139.42, "word": " four", "probability": 0.65771484375}, {"start": 139.42, "end": 139.8, "word": " years,", "probability": 0.93994140625}, {"start": 140.58, "end": 141.34, "word": " and", "probability": 0.90576171875}, {"start": 141.34, "end": 141.7, "word": " his", "probability": 0.966796875}, {"start": 141.7, "end": 141.88, "word": " or", "probability": 0.9501953125}, {"start": 141.88, "end": 142.02, "word": " her", "probability": 0.96923828125}, {"start": 142.02, "end": 142.28, "word": " weight", "probability": 0.90380859375}, {"start": 142.28, "end": 142.56, "word": " is,", "probability": 0.93994140625}, {"start": 142.72, "end": 142.74, "word": " for", "probability": 0.95166015625}, {"start": 142.74, "end": 143.1, "word": " example,", "probability": 0.97216796875}, {"start": 143.2, "end": 143.6, "word": " 15", "probability": 0.580078125}, {"start": 143.6, "end": 144.02, "word": " kilograms,", "probability": 0.71533203125}, {"start": 144.22, "end": 144.36, "word": " and", "probability": 0.94140625}, {"start": 144.36, "end": 144.5, "word": " so", "probability": 0.951171875}, {"start": 144.5, "end": 144.72, "word": " on.", "probability": 0.95068359375}, {"start": 147.34, "end": 147.72, "word": " And", "probability": 0.93798828125}, {"start": 147.72, "end": 147.94, "word": " again,", "probability": 0.845703125}, {"start": 147.96, "end": 148.04, "word": " we", "probability": 0.96044921875}, {"start": 148.04, "end": 148.16, "word": " are", "probability": 0.77783203125}, {"start": 148.16, "end": 148.56, "word": " interested", "probability": 0.8583984375}, {"start": 148.56, "end": 148.94, "word": " to", "probability": 0.96337890625}, {"start": 148.94, "end": 149.32, "word": " examine", "probability": 0.9609375}, {"start": 149.32, "end": 149.68, "word": " the", "probability": 0.91455078125}, {"start": 149.68, "end": 150.2, "word": " relationship", "probability": 0.908203125}, {"start": 150.2, "end": 150.62, "word": " between", "probability": 0.87646484375}, {"start": 150.62, "end": 151.0, "word": " age", "probability": 0.9404296875}, {"start": 151.0, "end": 151.16, "word": " and", "probability": 0.943359375}, {"start": 151.16, "end": 151.48, "word": " weight.", "probability": 0.93603515625}, {"start": 152.14, "end": 152.44, "word": " Maybe", "probability": 0.95263671875}, {"start": 152.44, "end": 152.64, "word": " they", "probability": 0.58349609375}, {"start": 152.64, "end": 153.0, "word": " exist", "probability": 0.95263671875}, {"start": 153.0, "end": 154.4, "word": " sometimes.", "probability": 0.6376953125}], "temperature": 1.0}, {"id": 7, "seek": 18370, "start": 155.48, "end": 183.7, "text": " positive relationship between the two variables that means if one variable increases the other one also increase if one variable increases the other will also decrease so they have the same direction either up or down so we have to know number one the form of the relationship this one could be linear here we focus just on", "tokens": [3353, 2480, 1296, 264, 732, 9102, 300, 1355, 498, 472, 7006, 8637, 264, 661, 472, 611, 3488, 498, 472, 7006, 8637, 264, 661, 486, 611, 11514, 370, 436, 362, 264, 912, 3513, 2139, 493, 420, 760, 370, 321, 362, 281, 458, 1230, 472, 264, 1254, 295, 264, 2480, 341, 472, 727, 312, 8213, 510, 321, 1879, 445, 322], "avg_logprob": -0.22987287832518755, "compression_ratio": 1.894736842105263, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 155.48000000000002, "end": 156.18, "word": " positive", "probability": 0.200439453125}, {"start": 156.18, "end": 156.88, "word": " relationship", "probability": 0.91064453125}, {"start": 156.88, "end": 157.22, "word": " between", "probability": 0.87158203125}, {"start": 157.22, "end": 157.4, "word": " the", "probability": 0.814453125}, {"start": 157.4, "end": 157.54, "word": " two", "probability": 0.89111328125}, {"start": 157.54, "end": 158.04, "word": " variables", "probability": 0.921875}, {"start": 158.04, "end": 158.5, "word": " that", "probability": 0.471435546875}, {"start": 158.5, "end": 158.96, "word": " means", "probability": 0.9306640625}, {"start": 158.96, "end": 159.96, "word": " if", "probability": 0.77490234375}, {"start": 159.96, "end": 160.24, "word": " one", "probability": 0.90185546875}, {"start": 160.24, "end": 160.54, "word": " variable", "probability": 0.8740234375}, {"start": 160.54, "end": 161.1, "word": " increases", "probability": 0.9130859375}, {"start": 161.1, "end": 161.52, "word": " the", "probability": 0.5830078125}, {"start": 161.52, "end": 161.76, "word": " other", "probability": 0.88134765625}, {"start": 161.76, "end": 162.06, "word": " one", "probability": 0.87451171875}, {"start": 162.06, "end": 162.72, "word": " also", "probability": 0.8115234375}, {"start": 162.72, "end": 163.36, "word": " increase", "probability": 0.357177734375}, {"start": 163.36, "end": 164.54, "word": " if", "probability": 0.6416015625}, {"start": 164.54, "end": 165.0, "word": " one", "probability": 0.89697265625}, {"start": 165.0, "end": 165.26, "word": " variable", "probability": 0.9013671875}, {"start": 165.26, "end": 165.72, "word": " increases", "probability": 0.63671875}, {"start": 165.72, "end": 166.08, "word": " the", "probability": 0.78173828125}, {"start": 166.08, "end": 166.3, "word": " other", "probability": 0.8818359375}, {"start": 166.3, "end": 166.52, "word": " will", "probability": 0.5029296875}, {"start": 166.52, "end": 166.8, "word": " also", "probability": 0.85888671875}, {"start": 166.8, "end": 167.28, "word": " decrease", "probability": 0.87890625}, {"start": 167.28, "end": 167.76, "word": " so", "probability": 0.66943359375}, {"start": 167.76, "end": 167.98, "word": " they", "probability": 0.82275390625}, {"start": 167.98, "end": 168.2, "word": " have", "probability": 0.94677734375}, {"start": 168.2, "end": 168.4, "word": " the", "probability": 0.900390625}, {"start": 168.4, "end": 168.7, "word": " same", "probability": 0.92041015625}, {"start": 168.7, "end": 169.22, "word": " direction", "probability": 0.96044921875}, {"start": 169.22, "end": 169.5, "word": " either", "probability": 0.91259765625}, {"start": 169.5, "end": 169.84, "word": " up", "probability": 0.96826171875}, {"start": 169.84, "end": 170.52, "word": " or", "probability": 0.95458984375}, {"start": 170.52, "end": 170.84, "word": " down", "probability": 0.8564453125}, {"start": 170.84, "end": 172.82, "word": " so", "probability": 0.64208984375}, {"start": 172.82, "end": 172.98, "word": " we", "probability": 0.92578125}, {"start": 172.98, "end": 173.14, "word": " have", "probability": 0.951171875}, {"start": 173.14, "end": 173.3, "word": " to", "probability": 0.9677734375}, {"start": 173.3, "end": 173.56, "word": " know", "probability": 0.88134765625}, {"start": 173.56, "end": 174.3, "word": " number", "probability": 0.79736328125}, {"start": 174.3, "end": 174.64, "word": " one", "probability": 0.84130859375}, {"start": 174.64, "end": 175.42, "word": " the", "probability": 0.77880859375}, {"start": 175.42, "end": 175.8, "word": " form", "probability": 0.908203125}, {"start": 175.8, "end": 177.98, "word": " of", "probability": 0.95947265625}, {"start": 177.98, "end": 178.14, "word": " the", "probability": 0.91357421875}, {"start": 178.14, "end": 178.66, "word": " relationship", "probability": 0.90869140625}, {"start": 178.66, "end": 179.9, "word": " this", "probability": 0.900390625}, {"start": 179.9, "end": 180.16, "word": " one", "probability": 0.67919921875}, {"start": 180.16, "end": 180.4, "word": " could", "probability": 0.8828125}, {"start": 180.4, "end": 180.58, "word": " be", "probability": 0.9501953125}, {"start": 180.58, "end": 180.92, "word": " linear", "probability": 0.91259765625}, {"start": 180.92, "end": 181.98, "word": " here", "probability": 0.74658203125}, {"start": 181.98, "end": 182.14, "word": " we", "probability": 0.8466796875}, {"start": 182.14, "end": 182.52, "word": " focus", "probability": 0.376708984375}, {"start": 182.52, "end": 183.06, "word": " just", "probability": 0.9091796875}, {"start": 183.06, "end": 183.7, "word": " on", "probability": 0.955078125}], "temperature": 1.0}, {"id": 8, "seek": 21033, "start": 184.61, "end": 210.33, "text": " linear relationship between X and Y. The second, we have to know the direction of the relationship. This direction might be positive or negative relationship. In addition to that, we have to know the strength of", "tokens": [8213, 2480, 1296, 1783, 293, 398, 13, 440, 1150, 11, 321, 362, 281, 458, 264, 3513, 295, 264, 2480, 13, 639, 3513, 1062, 312, 3353, 420, 3671, 2480, 13, 682, 4500, 281, 300, 11, 321, 362, 281, 458, 264, 3800, 295], "avg_logprob": -0.1995907798409462, "compression_ratio": 1.65625, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 184.61, "end": 185.21, "word": " linear", "probability": 0.138916015625}, {"start": 185.21, "end": 186.03, "word": " relationship", "probability": 0.88623046875}, {"start": 186.03, "end": 186.31, "word": " between", "probability": 0.88671875}, {"start": 186.31, "end": 186.51, "word": " X", "probability": 0.439697265625}, {"start": 186.51, "end": 186.65, "word": " and", "probability": 0.8984375}, {"start": 186.65, "end": 186.89, "word": " Y.", "probability": 0.98291015625}, {"start": 188.05, "end": 188.57, "word": " The", "probability": 0.6240234375}, {"start": 188.57, "end": 189.03, "word": " second,", "probability": 0.884765625}, {"start": 189.25, "end": 189.55, "word": " we", "probability": 0.908203125}, {"start": 189.55, "end": 189.79, "word": " have", "probability": 0.93896484375}, {"start": 189.79, "end": 190.05, "word": " to", "probability": 0.97119140625}, {"start": 190.05, "end": 190.21, "word": " know", "probability": 0.884765625}, {"start": 190.21, "end": 190.51, "word": " the", "probability": 0.9111328125}, {"start": 190.51, "end": 191.09, "word": " direction", "probability": 0.95458984375}, {"start": 191.09, "end": 193.57, "word": " of", "probability": 0.90576171875}, {"start": 193.57, "end": 193.73, "word": " the", "probability": 0.912109375}, {"start": 193.73, "end": 194.25, "word": " relationship.", "probability": 0.8984375}, {"start": 195.17, "end": 195.37, "word": " This", "probability": 0.86376953125}, {"start": 195.37, "end": 195.81, "word": " direction", "probability": 0.97509765625}, {"start": 195.81, "end": 196.47, "word": " might", "probability": 0.88818359375}, {"start": 196.47, "end": 197.27, "word": " be", "probability": 0.95458984375}, {"start": 197.27, "end": 198.11, "word": " positive", "probability": 0.8681640625}, {"start": 198.11, "end": 201.27, "word": " or", "probability": 0.8203125}, {"start": 201.27, "end": 201.69, "word": " negative", "probability": 0.9296875}, {"start": 201.69, "end": 202.35, "word": " relationship.", "probability": 0.90673828125}, {"start": 205.15, "end": 205.57, "word": " In", "probability": 0.93505859375}, {"start": 205.57, "end": 205.89, "word": " addition", "probability": 0.9462890625}, {"start": 205.89, "end": 206.11, "word": " to", "probability": 0.96240234375}, {"start": 206.11, "end": 206.29, "word": " that,", "probability": 0.93701171875}, {"start": 206.37, "end": 206.47, "word": " we", "probability": 0.94873046875}, {"start": 206.47, "end": 206.65, "word": " have", "probability": 0.94287109375}, {"start": 206.65, "end": 206.81, "word": " to", "probability": 0.96923828125}, {"start": 206.81, "end": 207.05, "word": " know", "probability": 0.88623046875}, {"start": 207.05, "end": 207.49, "word": " the", "probability": 0.90380859375}, {"start": 207.49, "end": 207.99, "word": " strength", "probability": 0.86962890625}, {"start": 207.99, "end": 210.33, "word": " of", "probability": 0.79833984375}], "temperature": 1.0}, {"id": 9, "seek": 23982, "start": 210.92, "end": 239.82, "text": " the relationship between the two variables of interest the strength can be classified into three categories either strong, moderate or there exists a weak relationship so it could be positive-strong, positive-moderate or positive-weak, the same for negative so by using scatter plot we can determine the form", "tokens": [264, 2480, 1296, 264, 732, 9102, 295, 1179, 264, 3800, 393, 312, 20627, 666, 1045, 10479, 2139, 2068, 11, 18174, 420, 456, 8198, 257, 5336, 2480, 370, 309, 727, 312, 3353, 12, 28063, 11, 3353, 12, 8014, 260, 473, 420, 3353, 12, 826, 514, 11, 264, 912, 337, 3671, 370, 538, 1228, 34951, 7542, 321, 393, 6997, 264, 1254], "avg_logprob": -0.22916667213042577, "compression_ratio": 1.7359550561797752, "no_speech_prob": 0.0, "words": [{"start": 210.92, "end": 211.38, "word": " the", "probability": 0.300048828125}, {"start": 211.38, "end": 212.28, "word": " relationship", "probability": 0.87744140625}, {"start": 212.28, "end": 212.72, "word": " between", "probability": 0.87646484375}, {"start": 212.72, "end": 212.92, "word": " the", "probability": 0.7109375}, {"start": 212.92, "end": 213.12, "word": " two", "probability": 0.79296875}, {"start": 213.12, "end": 213.5, "word": " variables", "probability": 0.93701171875}, {"start": 213.5, "end": 213.76, "word": " of", "probability": 0.9541015625}, {"start": 213.76, "end": 214.24, "word": " interest", "probability": 0.890625}, {"start": 214.24, "end": 215.2, "word": " the", "probability": 0.320556640625}, {"start": 215.2, "end": 215.66, "word": " strength", "probability": 0.62451171875}, {"start": 215.66, "end": 215.94, "word": " can", "probability": 0.93408203125}, {"start": 215.94, "end": 216.12, "word": " be", "probability": 0.95556640625}, {"start": 216.12, "end": 216.56, "word": " classified", "probability": 0.94189453125}, {"start": 216.56, "end": 216.94, "word": " into", "probability": 0.8173828125}, {"start": 216.94, "end": 217.32, "word": " three", "probability": 0.86865234375}, {"start": 217.32, "end": 218.26, "word": " categories", "probability": 0.92919921875}, {"start": 218.26, "end": 219.18, "word": " either", "probability": 0.82470703125}, {"start": 219.18, "end": 220.02, "word": " strong,", "probability": 0.80322265625}, {"start": 221.62, "end": 222.74, "word": " moderate", "probability": 0.88916015625}, {"start": 222.74, "end": 225.56, "word": " or", "probability": 0.6025390625}, {"start": 225.56, "end": 226.0, "word": " there", "probability": 0.837890625}, {"start": 226.0, "end": 226.48, "word": " exists", "probability": 0.82080078125}, {"start": 226.48, "end": 227.24, "word": " a", "probability": 0.9716796875}, {"start": 227.24, "end": 227.48, "word": " weak", "probability": 0.98046875}, {"start": 227.48, "end": 228.08, "word": " relationship", "probability": 0.90576171875}, {"start": 228.08, "end": 229.24, "word": " so", "probability": 0.64990234375}, {"start": 229.24, "end": 229.38, "word": " it", "probability": 0.912109375}, {"start": 229.38, "end": 229.54, "word": " could", "probability": 0.875}, {"start": 229.54, "end": 230.02, "word": " be", "probability": 0.9404296875}, {"start": 230.02, "end": 230.58, "word": " positive", "probability": 0.89306640625}, {"start": 230.58, "end": 231.06, "word": "-strong,", "probability": 0.55535888671875}, {"start": 231.24, "end": 231.58, "word": " positive", "probability": 0.9189453125}, {"start": 231.58, "end": 232.0, "word": "-moderate", "probability": 0.95654296875}, {"start": 232.0, "end": 232.14, "word": " or", "probability": 0.576171875}, {"start": 232.14, "end": 232.52, "word": " positive", "probability": 0.91162109375}, {"start": 232.52, "end": 232.94, "word": "-weak,", "probability": 0.9625651041666666}, {"start": 233.2, "end": 233.32, "word": " the", "probability": 0.79150390625}, {"start": 233.32, "end": 233.48, "word": " same", "probability": 0.9033203125}, {"start": 233.48, "end": 233.68, "word": " for", "probability": 0.6904296875}, {"start": 233.68, "end": 234.08, "word": " negative", "probability": 0.93603515625}, {"start": 234.08, "end": 235.2, "word": " so", "probability": 0.6171875}, {"start": 235.2, "end": 235.46, "word": " by", "probability": 0.91796875}, {"start": 235.46, "end": 235.78, "word": " using", "probability": 0.931640625}, {"start": 235.78, "end": 236.18, "word": " scatter", "probability": 0.58154296875}, {"start": 236.18, "end": 236.58, "word": " plot", "probability": 0.8046875}, {"start": 236.58, "end": 238.06, "word": " we", "probability": 0.83447265625}, {"start": 238.06, "end": 238.36, "word": " can", "probability": 0.94580078125}, {"start": 238.36, "end": 238.82, "word": " determine", "probability": 0.93310546875}, {"start": 238.82, "end": 239.44, "word": " the", "probability": 0.87841796875}, {"start": 239.44, "end": 239.82, "word": " form", "probability": 0.7666015625}], "temperature": 1.0}, {"id": 10, "seek": 26981, "start": 240.59, "end": 269.81, "text": " either linear or non-linear, but here we are focusing on just linear relationship. Also, we can determine the direction of the relationship. We can say there exists positive or negative based on the scatter plot. Also, we can know the strength of the relationship, either strong, moderate or weak. For example, suppose we have again weights and ages.", "tokens": [2139, 8213, 420, 2107, 12, 28263, 11, 457, 510, 321, 366, 8416, 322, 445, 8213, 2480, 13, 2743, 11, 321, 393, 6997, 264, 3513, 295, 264, 2480, 13, 492, 393, 584, 456, 8198, 3353, 420, 3671, 2361, 322, 264, 34951, 7542, 13, 2743, 11, 321, 393, 458, 264, 3800, 295, 264, 2480, 11, 2139, 2068, 11, 18174, 420, 5336, 13, 1171, 1365, 11, 7297, 321, 362, 797, 17443, 293, 12357, 13], "avg_logprob": -0.22960069051219356, "compression_ratio": 1.7121951219512195, "no_speech_prob": 0.0, "words": [{"start": 240.59, "end": 240.99, "word": " either", "probability": 0.47216796875}, {"start": 240.99, "end": 241.45, "word": " linear", "probability": 0.8369140625}, {"start": 241.45, "end": 241.91, "word": " or", "probability": 0.9423828125}, {"start": 241.91, "end": 242.29, "word": " non", "probability": 0.97802734375}, {"start": 242.29, "end": 242.53, "word": "-linear,", "probability": 0.81591796875}, {"start": 242.69, "end": 242.83, "word": " but", "probability": 0.89697265625}, {"start": 242.83, "end": 243.07, "word": " here", "probability": 0.84619140625}, {"start": 243.07, "end": 243.97, "word": " we", "probability": 0.77978515625}, {"start": 243.97, "end": 244.19, "word": " are", "probability": 0.9345703125}, {"start": 244.19, "end": 244.73, "word": " focusing", "probability": 0.85791015625}, {"start": 244.73, "end": 244.97, "word": " on", "probability": 0.802734375}, {"start": 244.97, "end": 245.39, "word": " just", "probability": 0.88623046875}, {"start": 245.39, "end": 246.13, "word": " linear", "probability": 0.779296875}, {"start": 246.13, "end": 247.17, "word": " relationship.", "probability": 0.8408203125}, {"start": 247.79, "end": 248.29, "word": " Also,", "probability": 0.89794921875}, {"start": 248.35, "end": 248.49, "word": " we", "probability": 0.89208984375}, {"start": 248.49, "end": 248.79, "word": " can", "probability": 0.9482421875}, {"start": 248.79, "end": 249.55, "word": " determine", "probability": 0.9228515625}, {"start": 249.55, "end": 249.91, "word": " the", "probability": 0.916015625}, {"start": 249.91, "end": 250.31, "word": " direction", "probability": 0.9755859375}, {"start": 250.31, "end": 251.03, "word": " of", "probability": 0.96630859375}, {"start": 251.03, "end": 251.17, "word": " the", "probability": 0.8994140625}, {"start": 251.17, "end": 251.57, "word": " relationship.", "probability": 0.89404296875}, {"start": 251.77, "end": 251.85, "word": " We", "probability": 0.94091796875}, {"start": 251.85, "end": 252.03, "word": " can", "probability": 0.94677734375}, {"start": 252.03, "end": 252.29, "word": " say", "probability": 0.71533203125}, {"start": 252.29, "end": 252.53, "word": " there", "probability": 0.58544921875}, {"start": 252.53, "end": 252.87, "word": " exists", "probability": 0.70703125}, {"start": 252.87, "end": 253.27, "word": " positive", "probability": 0.89697265625}, {"start": 253.27, "end": 253.55, "word": " or", "probability": 0.95703125}, {"start": 253.55, "end": 253.93, "word": " negative", "probability": 0.9462890625}, {"start": 253.93, "end": 254.31, "word": " based", "probability": 0.43017578125}, {"start": 254.31, "end": 254.69, "word": " on", "probability": 0.91552734375}, {"start": 254.69, "end": 255.35, "word": " the", "probability": 0.45947265625}, {"start": 255.35, "end": 255.67, "word": " scatter", "probability": 0.689453125}, {"start": 255.67, "end": 255.91, "word": " plot.", "probability": 0.375732421875}, {"start": 256.71, "end": 257.13, "word": " Also,", "probability": 0.9365234375}, {"start": 257.15, "end": 257.27, "word": " we", "probability": 0.8984375}, {"start": 257.27, "end": 257.57, "word": " can", "probability": 0.93408203125}, {"start": 257.57, "end": 258.27, "word": " know", "probability": 0.88427734375}, {"start": 258.27, "end": 258.49, "word": " the", "probability": 0.91845703125}, {"start": 258.49, "end": 258.89, "word": " strength", "probability": 0.8408203125}, {"start": 258.89, "end": 259.39, "word": " of", "probability": 0.96826171875}, {"start": 259.39, "end": 259.53, "word": " the", "probability": 0.91064453125}, {"start": 259.53, "end": 259.93, "word": " relationship,", "probability": 0.89990234375}, {"start": 260.05, "end": 260.25, "word": " either", "probability": 0.93603515625}, {"start": 260.25, "end": 261.13, "word": " strong,", "probability": 0.875}, {"start": 261.27, "end": 261.51, "word": " moderate", "probability": 0.9560546875}, {"start": 261.51, "end": 261.77, "word": " or", "probability": 0.61767578125}, {"start": 261.77, "end": 262.07, "word": " weak.", "probability": 0.97119140625}, {"start": 262.79, "end": 263.13, "word": " For", "probability": 0.9521484375}, {"start": 263.13, "end": 263.51, "word": " example,", "probability": 0.97412109375}, {"start": 263.65, "end": 264.07, "word": " suppose", "probability": 0.875}, {"start": 264.07, "end": 264.69, "word": " we", "probability": 0.9013671875}, {"start": 264.69, "end": 265.05, "word": " have", "probability": 0.947265625}, {"start": 265.05, "end": 266.77, "word": " again", "probability": 0.438232421875}, {"start": 266.77, "end": 268.93, "word": " weights", "probability": 0.70166015625}, {"start": 268.93, "end": 269.25, "word": " and", "probability": 0.94091796875}, {"start": 269.25, "end": 269.81, "word": " ages.", "probability": 0.47021484375}], "temperature": 1.0}, {"id": 11, "seek": 29775, "start": 270.39, "end": 297.75, "text": " And we know that there are two types of variables in this case. One is called dependent and the other is independent. So if we, as we explained before, is the dependent variable and A is independent variable. Always dependent variable", "tokens": [400, 321, 458, 300, 456, 366, 732, 3467, 295, 9102, 294, 341, 1389, 13, 1485, 307, 1219, 12334, 293, 264, 661, 307, 6695, 13, 407, 498, 321, 11, 382, 321, 8825, 949, 11, 307, 264, 12334, 7006, 293, 316, 307, 6695, 7006, 13, 11270, 12334, 7006], "avg_logprob": -0.28191488473973375, "compression_ratio": 1.6785714285714286, "no_speech_prob": 0.0, "words": [{"start": 270.39, "end": 270.65, "word": " And", "probability": 0.310791015625}, {"start": 270.65, "end": 270.77, "word": " we", "probability": 0.69189453125}, {"start": 270.77, "end": 270.91, "word": " know", "probability": 0.884765625}, {"start": 270.91, "end": 271.19, "word": " that", "probability": 0.884765625}, {"start": 271.19, "end": 272.35, "word": " there", "probability": 0.83154296875}, {"start": 272.35, "end": 272.51, "word": " are", "probability": 0.947265625}, {"start": 272.51, "end": 272.69, "word": " two", "probability": 0.88623046875}, {"start": 272.69, "end": 273.01, "word": " types", "probability": 0.8095703125}, {"start": 273.01, "end": 273.21, "word": " of", "probability": 0.96875}, {"start": 273.21, "end": 273.59, "word": " variables", "probability": 0.9130859375}, {"start": 273.59, "end": 273.81, "word": " in", "probability": 0.904296875}, {"start": 273.81, "end": 274.03, "word": " this", "probability": 0.94775390625}, {"start": 274.03, "end": 274.25, "word": " case.", "probability": 0.92138671875}, {"start": 274.37, "end": 274.49, "word": " One", "probability": 0.89013671875}, {"start": 274.49, "end": 274.63, "word": " is", "probability": 0.9208984375}, {"start": 274.63, "end": 274.91, "word": " called", "probability": 0.84716796875}, {"start": 274.91, "end": 275.35, "word": " dependent", "probability": 0.7607421875}, {"start": 275.35, "end": 276.31, "word": " and", "probability": 0.673828125}, {"start": 276.31, "end": 276.71, "word": " the", "probability": 0.75927734375}, {"start": 276.71, "end": 276.91, "word": " other", "probability": 0.8896484375}, {"start": 276.91, "end": 277.13, "word": " is", "probability": 0.91943359375}, {"start": 277.13, "end": 277.55, "word": " independent.", "probability": 0.86474609375}, {"start": 278.51, "end": 279.03, "word": " So", "probability": 0.87158203125}, {"start": 279.03, "end": 279.35, "word": " if", "probability": 0.495361328125}, {"start": 279.35, "end": 279.49, "word": " we,", "probability": 0.8974609375}, {"start": 279.81, "end": 280.19, "word": " as", "probability": 0.953125}, {"start": 280.19, "end": 280.43, "word": " we", "probability": 0.9482421875}, {"start": 280.43, "end": 281.33, "word": " explained", "probability": 0.81396484375}, {"start": 281.33, "end": 281.95, "word": " before,", "probability": 0.87353515625}, {"start": 283.35, "end": 284.13, "word": " is", "probability": 0.451416015625}, {"start": 284.13, "end": 284.29, "word": " the", "probability": 0.90771484375}, {"start": 284.29, "end": 284.67, "word": " dependent", "probability": 0.83544921875}, {"start": 284.67, "end": 285.17, "word": " variable", "probability": 0.9306640625}, {"start": 285.17, "end": 287.31, "word": " and", "probability": 0.5234375}, {"start": 287.31, "end": 287.55, "word": " A", "probability": 0.619140625}, {"start": 287.55, "end": 287.89, "word": " is", "probability": 0.95361328125}, {"start": 287.89, "end": 288.37, "word": " independent", "probability": 0.90185546875}, {"start": 288.37, "end": 288.71, "word": " variable.", "probability": 0.445068359375}, {"start": 292.69, "end": 293.35, "word": " Always", "probability": 0.78955078125}, {"start": 293.35, "end": 297.27, "word": " dependent", "probability": 0.59228515625}, {"start": 297.27, "end": 297.75, "word": " variable", "probability": 0.91748046875}], "temperature": 1.0}, {"id": 12, "seek": 32864, "start": 300.4, "end": 328.64, "text": " is denoted by Y and always on the vertical axis so here we have weight and independent variable is denoted by X and X is in the X axis or horizontal axis now scatter plot for example here child with age 2 years his weight is 8", "tokens": [307, 1441, 23325, 538, 398, 293, 1009, 322, 264, 9429, 10298, 370, 510, 321, 362, 3364, 293, 6695, 7006, 307, 1441, 23325, 538, 1783, 293, 1783, 307, 294, 264, 1783, 10298, 420, 12750, 10298, 586, 34951, 7542, 337, 1365, 510, 1440, 365, 3205, 568, 924, 702, 3364, 307, 1649], "avg_logprob": -0.21671874523162843, "compression_ratio": 1.523489932885906, "no_speech_prob": 0.0, "words": [{"start": 300.4, "end": 301.12, "word": " is", "probability": 0.32958984375}, {"start": 301.12, "end": 301.84, "word": " denoted", "probability": 0.936279296875}, {"start": 301.84, "end": 302.14, "word": " by", "probability": 0.97412109375}, {"start": 302.14, "end": 302.48, "word": " Y", "probability": 0.73828125}, {"start": 302.48, "end": 303.22, "word": " and", "probability": 0.6298828125}, {"start": 303.22, "end": 303.52, "word": " always", "probability": 0.80322265625}, {"start": 303.52, "end": 303.76, "word": " on", "probability": 0.90966796875}, {"start": 303.76, "end": 303.9, "word": " the", "probability": 0.9072265625}, {"start": 303.9, "end": 304.26, "word": " vertical", "probability": 0.89599609375}, {"start": 304.26, "end": 304.74, "word": " axis", "probability": 0.95703125}, {"start": 304.74, "end": 305.56, "word": " so", "probability": 0.2529296875}, {"start": 305.56, "end": 305.78, "word": " here", "probability": 0.83642578125}, {"start": 305.78, "end": 305.94, "word": " we", "probability": 0.93359375}, {"start": 305.94, "end": 306.14, "word": " have", "probability": 0.9482421875}, {"start": 306.14, "end": 306.5, "word": " weight", "probability": 0.837890625}, {"start": 306.5, "end": 309.06, "word": " and", "probability": 0.7041015625}, {"start": 309.06, "end": 309.82, "word": " independent", "probability": 0.80615234375}, {"start": 309.82, "end": 310.26, "word": " variable", "probability": 0.93115234375}, {"start": 310.26, "end": 311.3, "word": " is", "probability": 0.93603515625}, {"start": 311.3, "end": 311.64, "word": " denoted", "probability": 0.9736328125}, {"start": 311.64, "end": 311.94, "word": " by", "probability": 0.97021484375}, {"start": 311.94, "end": 312.32, "word": " X", "probability": 0.8837890625}, {"start": 312.32, "end": 314.02, "word": " and", "probability": 0.85107421875}, {"start": 314.02, "end": 314.34, "word": " X", "probability": 0.9423828125}, {"start": 314.34, "end": 314.78, "word": " is", "probability": 0.92822265625}, {"start": 314.78, "end": 314.98, "word": " in", "probability": 0.75927734375}, {"start": 314.98, "end": 315.18, "word": " the", "probability": 0.8955078125}, {"start": 315.18, "end": 315.58, "word": " X", "probability": 0.7392578125}, {"start": 315.58, "end": 316.22, "word": " axis", "probability": 0.666015625}, {"start": 316.22, "end": 317.14, "word": " or", "probability": 0.830078125}, {"start": 317.14, "end": 317.76, "word": " horizontal", "probability": 0.7705078125}, {"start": 317.76, "end": 318.84, "word": " axis", "probability": 0.958984375}, {"start": 318.84, "end": 321.32, "word": " now", "probability": 0.529296875}, {"start": 321.32, "end": 321.78, "word": " scatter", "probability": 0.6611328125}, {"start": 321.78, "end": 322.1, "word": " plot", "probability": 0.5703125}, {"start": 322.1, "end": 322.4, "word": " for", "probability": 0.85888671875}, {"start": 322.4, "end": 322.76, "word": " example", "probability": 0.974609375}, {"start": 322.76, "end": 323.6, "word": " here", "probability": 0.81640625}, {"start": 323.6, "end": 326.04, "word": " child", "probability": 0.65234375}, {"start": 326.04, "end": 326.3, "word": " with", "probability": 0.87646484375}, {"start": 326.3, "end": 326.58, "word": " age", "probability": 0.93994140625}, {"start": 326.58, "end": 326.82, "word": " 2", "probability": 0.56396484375}, {"start": 326.82, "end": 327.2, "word": " years", "probability": 0.912109375}, {"start": 327.2, "end": 327.82, "word": " his", "probability": 0.7744140625}, {"start": 327.82, "end": 328.1, "word": " weight", "probability": 0.90087890625}, {"start": 328.1, "end": 328.32, "word": " is", "probability": 0.94921875}, {"start": 328.32, "end": 328.64, "word": " 8", "probability": 0.91796875}], "temperature": 1.0}, {"id": 13, "seek": 35580, "start": 329.18, "end": 355.8, "text": " So two years, for example, this is eight. So this star represents the first pair of observation, age of two and weight of eight. The other child, his weight is four years, and the corresponding weight is 15. For example, this value is 15.", "tokens": [407, 732, 924, 11, 337, 1365, 11, 341, 307, 3180, 13, 407, 341, 3543, 8855, 264, 700, 6119, 295, 14816, 11, 3205, 295, 732, 293, 3364, 295, 3180, 13, 440, 661, 1440, 11, 702, 3364, 307, 1451, 924, 11, 293, 264, 11760, 3364, 307, 2119, 13, 1171, 1365, 11, 341, 2158, 307, 2119, 13], "avg_logprob": -0.14815341450951316, "compression_ratio": 1.5620915032679739, "no_speech_prob": 0.0, "words": [{"start": 329.18, "end": 329.48, "word": " So", "probability": 0.7197265625}, {"start": 329.48, "end": 329.7, "word": " two", "probability": 0.583984375}, {"start": 329.7, "end": 330.1, "word": " years,", "probability": 0.931640625}, {"start": 330.64, "end": 330.82, "word": " for", "probability": 0.94384765625}, {"start": 330.82, "end": 331.18, "word": " example,", "probability": 0.97509765625}, {"start": 331.88, "end": 332.06, "word": " this", "probability": 0.923828125}, {"start": 332.06, "end": 332.2, "word": " is", "probability": 0.94921875}, {"start": 332.2, "end": 332.58, "word": " eight.", "probability": 0.78369140625}, {"start": 334.46, "end": 335.2, "word": " So", "probability": 0.94189453125}, {"start": 335.2, "end": 335.56, "word": " this", "probability": 0.93212890625}, {"start": 335.56, "end": 336.12, "word": " star", "probability": 0.93359375}, {"start": 336.12, "end": 336.76, "word": " represents", "probability": 0.85888671875}, {"start": 336.76, "end": 337.08, "word": " the", "probability": 0.9140625}, {"start": 337.08, "end": 337.48, "word": " first", "probability": 0.87255859375}, {"start": 337.48, "end": 337.98, "word": " pair", "probability": 0.77587890625}, {"start": 337.98, "end": 339.48, "word": " of", "probability": 0.908203125}, {"start": 339.48, "end": 340.0, "word": " observation,", "probability": 0.84912109375}, {"start": 340.88, "end": 341.1, "word": " age", "probability": 0.93798828125}, {"start": 341.1, "end": 341.28, "word": " of", "probability": 0.9677734375}, {"start": 341.28, "end": 341.56, "word": " two", "probability": 0.88232421875}, {"start": 341.56, "end": 342.32, "word": " and", "probability": 0.859375}, {"start": 342.32, "end": 342.58, "word": " weight", "probability": 0.755859375}, {"start": 342.58, "end": 342.76, "word": " of", "probability": 0.96875}, {"start": 342.76, "end": 343.06, "word": " eight.", "probability": 0.904296875}, {"start": 343.98, "end": 344.42, "word": " The", "probability": 0.876953125}, {"start": 344.42, "end": 344.7, "word": " other", "probability": 0.8837890625}, {"start": 344.7, "end": 345.3, "word": " child,", "probability": 0.87255859375}, {"start": 345.66, "end": 345.96, "word": " his", "probability": 0.96337890625}, {"start": 345.96, "end": 346.34, "word": " weight", "probability": 0.65478515625}, {"start": 346.34, "end": 346.82, "word": " is", "probability": 0.5810546875}, {"start": 346.82, "end": 347.18, "word": " four", "probability": 0.923828125}, {"start": 347.18, "end": 347.54, "word": " years,", "probability": 0.92919921875}, {"start": 348.44, "end": 350.46, "word": " and", "probability": 0.9296875}, {"start": 350.46, "end": 350.6, "word": " the", "probability": 0.900390625}, {"start": 350.6, "end": 351.1, "word": " corresponding", "probability": 0.84375}, {"start": 351.1, "end": 351.72, "word": " weight", "probability": 0.92138671875}, {"start": 351.72, "end": 352.32, "word": " is", "probability": 0.943359375}, {"start": 352.32, "end": 352.86, "word": " 15.", "probability": 0.796875}, {"start": 353.7, "end": 353.96, "word": " For", "probability": 0.958984375}, {"start": 353.96, "end": 354.26, "word": " example,", "probability": 0.9794921875}, {"start": 354.36, "end": 354.62, "word": " this", "probability": 0.875}, {"start": 354.62, "end": 355.24, "word": " value", "probability": 0.9326171875}, {"start": 355.24, "end": 355.44, "word": " is", "probability": 0.93359375}, {"start": 355.44, "end": 355.8, "word": " 15.", "probability": 0.95166015625}], "temperature": 1.0}, {"id": 14, "seek": 36609, "start": 358.19, "end": 366.09, "text": " The same for the other points. Here we can know the direction. In this case they exist.", "tokens": [440, 912, 337, 264, 661, 2793, 13, 1692, 321, 393, 458, 264, 3513, 13, 682, 341, 1389, 436, 2514, 13], "avg_logprob": -0.3087797619047619, "compression_ratio": 1.1, "no_speech_prob": 0.0, "words": [{"start": 358.19, "end": 358.43, "word": " The", "probability": 0.251953125}, {"start": 358.43, "end": 358.63, "word": " same", "probability": 0.85498046875}, {"start": 358.63, "end": 358.81, "word": " for", "probability": 0.89111328125}, {"start": 358.81, "end": 358.97, "word": " the", "probability": 0.83447265625}, {"start": 358.97, "end": 359.21, "word": " other", "probability": 0.89013671875}, {"start": 359.21, "end": 359.67, "word": " points.", "probability": 0.59716796875}, {"start": 360.31, "end": 360.57, "word": " Here", "probability": 0.83447265625}, {"start": 360.57, "end": 360.71, "word": " we", "probability": 0.83447265625}, {"start": 360.71, "end": 360.93, "word": " can", "probability": 0.9189453125}, {"start": 360.93, "end": 361.23, "word": " know", "probability": 0.87890625}, {"start": 361.23, "end": 361.79, "word": " the", "probability": 0.8955078125}, {"start": 361.79, "end": 362.43, "word": " direction.", "probability": 0.96044921875}, {"start": 364.91, "end": 365.03, "word": " In", "probability": 0.95556640625}, {"start": 365.03, "end": 365.25, "word": " this", "probability": 0.94921875}, {"start": 365.25, "end": 365.49, "word": " case", "probability": 0.9208984375}, {"start": 365.49, "end": 365.69, "word": " they", "probability": 0.1910400390625}, {"start": 365.69, "end": 366.09, "word": " exist.", "probability": 0.89892578125}], "temperature": 1.0}, {"id": 15, "seek": 39574, "start": 368.14, "end": 395.74, "text": " Positive. Form is linear. Strong or weak or moderate depends on how these values are close to the straight line. Closer means stronger. So if the points are closer to the straight line, it means there exists stronger relationship between the two variables. So closer means stronger, either positive or negative. In this case, there exists positive.", "tokens": [46326, 13, 10126, 307, 8213, 13, 22792, 420, 5336, 420, 18174, 5946, 322, 577, 613, 4190, 366, 1998, 281, 264, 2997, 1622, 13, 2033, 22150, 1355, 7249, 13, 407, 498, 264, 2793, 366, 4966, 281, 264, 2997, 1622, 11, 309, 1355, 456, 8198, 7249, 2480, 1296, 264, 732, 9102, 13, 407, 4966, 1355, 7249, 11, 2139, 3353, 420, 3671, 13, 682, 341, 1389, 11, 456, 8198, 3353, 13], "avg_logprob": -0.2247509127077849, "compression_ratio": 1.7989690721649485, "no_speech_prob": 0.0, "words": [{"start": 368.14, "end": 368.82, "word": " Positive.", "probability": 0.44580078125}, {"start": 368.82, "end": 369.5, "word": " Form", "probability": 0.2841796875}, {"start": 369.5, "end": 369.7, "word": " is", "probability": 0.94140625}, {"start": 369.7, "end": 370.06, "word": " linear.", "probability": 0.7939453125}, {"start": 372.1, "end": 372.78, "word": " Strong", "probability": 0.77685546875}, {"start": 372.78, "end": 373.04, "word": " or", "probability": 0.89990234375}, {"start": 373.04, "end": 373.3, "word": " weak", "probability": 0.94677734375}, {"start": 373.3, "end": 373.46, "word": " or", "probability": 0.908203125}, {"start": 373.46, "end": 373.76, "word": " moderate", "probability": 0.953125}, {"start": 373.76, "end": 374.14, "word": " depends", "probability": 0.475830078125}, {"start": 374.14, "end": 374.8, "word": " on", "probability": 0.94482421875}, {"start": 374.8, "end": 376.42, "word": " how", "probability": 0.8564453125}, {"start": 376.42, "end": 376.86, "word": " these", "probability": 0.84912109375}, {"start": 376.86, "end": 377.28, "word": " values", "probability": 0.96240234375}, {"start": 377.28, "end": 377.52, "word": " are", "probability": 0.943359375}, {"start": 377.52, "end": 378.06, "word": " close", "probability": 0.86083984375}, {"start": 378.06, "end": 378.94, "word": " to", "probability": 0.93798828125}, {"start": 378.94, "end": 379.12, "word": " the", "probability": 0.900390625}, {"start": 379.12, "end": 379.42, "word": " straight", "probability": 0.90966796875}, {"start": 379.42, "end": 379.68, "word": " line.", "probability": 0.923828125}, {"start": 379.88, "end": 380.26, "word": " Closer", "probability": 0.898193359375}, {"start": 380.26, "end": 380.8, "word": " means", "probability": 0.62646484375}, {"start": 380.8, "end": 381.58, "word": " stronger.", "probability": 0.85302734375}, {"start": 382.18, "end": 382.5, "word": " So", "probability": 0.6708984375}, {"start": 382.5, "end": 382.72, "word": " if", "probability": 0.80224609375}, {"start": 382.72, "end": 382.96, "word": " the", "probability": 0.900390625}, {"start": 382.96, "end": 383.3, "word": " points", "probability": 0.9228515625}, {"start": 383.3, "end": 383.48, "word": " are", "probability": 0.94189453125}, {"start": 383.48, "end": 383.96, "word": " closer", "probability": 0.888671875}, {"start": 383.96, "end": 384.18, "word": " to", "probability": 0.95703125}, {"start": 384.18, "end": 384.38, "word": " the", "probability": 0.908203125}, {"start": 384.38, "end": 384.68, "word": " straight", "probability": 0.91064453125}, {"start": 384.68, "end": 384.98, "word": " line,", "probability": 0.9326171875}, {"start": 385.16, "end": 385.2, "word": " it", "probability": 0.87353515625}, {"start": 385.2, "end": 385.42, "word": " means", "probability": 0.9306640625}, {"start": 385.42, "end": 385.62, "word": " there", "probability": 0.80859375}, {"start": 385.62, "end": 386.04, "word": " exists", "probability": 0.80029296875}, {"start": 386.04, "end": 386.62, "word": " stronger", "probability": 0.56787109375}, {"start": 386.62, "end": 387.76, "word": " relationship", "probability": 0.8857421875}, {"start": 387.76, "end": 388.54, "word": " between", "probability": 0.87451171875}, {"start": 388.54, "end": 389.22, "word": " the", "probability": 0.8349609375}, {"start": 389.22, "end": 389.38, "word": " two", "probability": 0.91552734375}, {"start": 389.38, "end": 389.74, "word": " variables.", "probability": 0.88916015625}, {"start": 390.28, "end": 390.42, "word": " So", "probability": 0.90771484375}, {"start": 390.42, "end": 390.8, "word": " closer", "probability": 0.82861328125}, {"start": 390.8, "end": 391.14, "word": " means", "probability": 0.7333984375}, {"start": 391.14, "end": 391.62, "word": " stronger,", "probability": 0.87255859375}, {"start": 391.72, "end": 391.92, "word": " either", "probability": 0.92529296875}, {"start": 391.92, "end": 392.32, "word": " positive", "probability": 0.93994140625}, {"start": 392.32, "end": 392.54, "word": " or", "probability": 0.95947265625}, {"start": 392.54, "end": 392.88, "word": " negative.", "probability": 0.95068359375}, {"start": 393.92, "end": 394.48, "word": " In", "probability": 0.298583984375}, {"start": 394.48, "end": 394.64, "word": " this", "probability": 0.93505859375}, {"start": 394.64, "end": 394.84, "word": " case,", "probability": 0.927734375}, {"start": 394.92, "end": 395.0, "word": " there", "probability": 0.81494140625}, {"start": 395.0, "end": 395.22, "word": " exists", "probability": 0.84423828125}, {"start": 395.22, "end": 395.74, "word": " positive.", "probability": 0.91845703125}], "temperature": 1.0}, {"id": 16, "seek": 41838, "start": 396.86, "end": 418.38, "text": " Now for the negative association or relationship, we have the other direction, it could be this one. So in this case there exists linear but negative relationship, and this negative could be positive or negative, it depends on the points. So it's positive relationship.", "tokens": [823, 337, 264, 3671, 14598, 420, 2480, 11, 321, 362, 264, 661, 3513, 11, 309, 727, 312, 341, 472, 13, 407, 294, 341, 1389, 456, 8198, 8213, 457, 3671, 2480, 11, 293, 341, 3671, 727, 312, 3353, 420, 3671, 11, 309, 5946, 322, 264, 2793, 13, 407, 309, 311, 3353, 2480, 13], "avg_logprob": -0.2296580177433086, "compression_ratio": 1.7880794701986755, "no_speech_prob": 0.0, "words": [{"start": 396.86, "end": 397.18, "word": " Now", "probability": 0.59716796875}, {"start": 397.18, "end": 397.44, "word": " for", "probability": 0.66943359375}, {"start": 397.44, "end": 397.58, "word": " the", "probability": 0.810546875}, {"start": 397.58, "end": 397.84, "word": " negative", "probability": 0.8505859375}, {"start": 397.84, "end": 398.48, "word": " association", "probability": 0.88427734375}, {"start": 398.48, "end": 398.94, "word": " or", "probability": 0.71484375}, {"start": 398.94, "end": 400.54, "word": " relationship,", "probability": 0.7060546875}, {"start": 401.06, "end": 401.78, "word": " we", "probability": 0.8349609375}, {"start": 401.78, "end": 402.0, "word": " have", "probability": 0.9384765625}, {"start": 402.0, "end": 402.36, "word": " the", "probability": 0.8193359375}, {"start": 402.36, "end": 402.6, "word": " other", "probability": 0.890625}, {"start": 402.6, "end": 403.18, "word": " direction,", "probability": 0.9658203125}, {"start": 403.8, "end": 403.96, "word": " it", "probability": 0.861328125}, {"start": 403.96, "end": 404.1, "word": " could", "probability": 0.88330078125}, {"start": 404.1, "end": 404.24, "word": " be", "probability": 0.95458984375}, {"start": 404.24, "end": 404.46, "word": " this", "probability": 0.953125}, {"start": 404.46, "end": 404.7, "word": " one.", "probability": 0.90771484375}, {"start": 405.54, "end": 405.76, "word": " So", "probability": 0.837890625}, {"start": 405.76, "end": 405.88, "word": " in", "probability": 0.84716796875}, {"start": 405.88, "end": 406.06, "word": " this", "probability": 0.9462890625}, {"start": 406.06, "end": 406.28, "word": " case", "probability": 0.91845703125}, {"start": 406.28, "end": 406.46, "word": " there", "probability": 0.6494140625}, {"start": 406.46, "end": 406.9, "word": " exists", "probability": 0.7939453125}, {"start": 406.9, "end": 407.58, "word": " linear", "probability": 0.7568359375}, {"start": 407.58, "end": 409.06, "word": " but", "probability": 0.5263671875}, {"start": 409.06, "end": 409.46, "word": " negative", "probability": 0.9482421875}, {"start": 409.46, "end": 410.1, "word": " relationship,", "probability": 0.89501953125}, {"start": 410.56, "end": 410.7, "word": " and", "probability": 0.9208984375}, {"start": 410.7, "end": 410.88, "word": " this", "probability": 0.943359375}, {"start": 410.88, "end": 411.18, "word": " negative", "probability": 0.953125}, {"start": 411.18, "end": 411.38, "word": " could", "probability": 0.84716796875}, {"start": 411.38, "end": 411.58, "word": " be", "probability": 0.94873046875}, {"start": 411.58, "end": 411.9, "word": " positive", "probability": 0.92626953125}, {"start": 411.9, "end": 412.14, "word": " or", "probability": 0.96142578125}, {"start": 412.14, "end": 412.44, "word": " negative,", "probability": 0.95361328125}, {"start": 412.5, "end": 412.6, "word": " it", "probability": 0.8828125}, {"start": 412.6, "end": 412.94, "word": " depends", "probability": 0.91943359375}, {"start": 412.94, "end": 413.2, "word": " on", "probability": 0.9482421875}, {"start": 413.2, "end": 413.34, "word": " the", "probability": 0.9091796875}, {"start": 413.34, "end": 413.76, "word": " points.", "probability": 0.63623046875}, {"start": 415.22, "end": 415.82, "word": " So", "probability": 0.6015625}, {"start": 415.82, "end": 416.1, "word": " it's", "probability": 0.886474609375}, {"start": 416.1, "end": 417.5, "word": " positive", "probability": 0.865234375}, {"start": 417.5, "end": 418.38, "word": " relationship.", "probability": 0.81787109375}], "temperature": 1.0}, {"id": 17, "seek": 43728, "start": 420.46, "end": 437.28, "text": " The other direction is negative. So the points, if the points are closed, then we can say there exists strong negative relationship. So by using scatter plot, we can determine all of these.", "tokens": [440, 661, 3513, 307, 3671, 13, 407, 264, 2793, 11, 498, 264, 2793, 366, 5395, 11, 550, 321, 393, 584, 456, 8198, 2068, 3671, 2480, 13, 407, 538, 1228, 34951, 7542, 11, 321, 393, 6997, 439, 295, 613, 13], "avg_logprob": -0.3021484471857548, "compression_ratio": 1.4728682170542635, "no_speech_prob": 0.0, "words": [{"start": 420.46, "end": 420.7, "word": " The", "probability": 0.2666015625}, {"start": 420.7, "end": 420.9, "word": " other", "probability": 0.84375}, {"start": 420.9, "end": 421.46, "word": " direction", "probability": 0.9453125}, {"start": 421.46, "end": 422.66, "word": " is", "probability": 0.85546875}, {"start": 422.66, "end": 423.0, "word": " negative.", "probability": 0.904296875}, {"start": 423.64, "end": 423.76, "word": " So", "probability": 0.46728515625}, {"start": 423.76, "end": 423.9, "word": " the", "probability": 0.427001953125}, {"start": 423.9, "end": 424.22, "word": " points,", "probability": 0.89453125}, {"start": 424.9, "end": 425.3, "word": " if", "probability": 0.94677734375}, {"start": 425.3, "end": 425.56, "word": " the", "probability": 0.88330078125}, {"start": 425.56, "end": 425.78, "word": " points", "probability": 0.9169921875}, {"start": 425.78, "end": 425.96, "word": " are", "probability": 0.9462890625}, {"start": 425.96, "end": 426.46, "word": " closed,", "probability": 0.64501953125}, {"start": 426.82, "end": 427.22, "word": " then", "probability": 0.82470703125}, {"start": 427.22, "end": 427.4, "word": " we", "probability": 0.73583984375}, {"start": 427.4, "end": 427.6, "word": " can", "probability": 0.93408203125}, {"start": 427.6, "end": 427.86, "word": " say", "probability": 0.8720703125}, {"start": 427.86, "end": 428.04, "word": " there", "probability": 0.74560546875}, {"start": 428.04, "end": 428.62, "word": " exists", "probability": 0.7724609375}, {"start": 428.62, "end": 429.76, "word": " strong", "probability": 0.3974609375}, {"start": 429.76, "end": 430.16, "word": " negative", "probability": 0.85302734375}, {"start": 430.16, "end": 430.7, "word": " relationship.", "probability": 0.865234375}, {"start": 432.18, "end": 432.42, "word": " So", "probability": 0.92431640625}, {"start": 432.42, "end": 432.6, "word": " by", "probability": 0.85107421875}, {"start": 432.6, "end": 432.84, "word": " using", "probability": 0.9169921875}, {"start": 432.84, "end": 433.2, "word": " scatter", "probability": 0.7177734375}, {"start": 433.2, "end": 433.48, "word": " plot,", "probability": 0.65869140625}, {"start": 433.76, "end": 433.98, "word": " we", "probability": 0.95458984375}, {"start": 433.98, "end": 434.44, "word": " can", "probability": 0.94384765625}, {"start": 434.44, "end": 435.68, "word": " determine", "probability": 0.92138671875}, {"start": 435.68, "end": 436.86, "word": " all", "probability": 0.947265625}, {"start": 436.86, "end": 437.0, "word": " of", "probability": 0.96630859375}, {"start": 437.0, "end": 437.28, "word": " these.", "probability": 0.45703125}], "temperature": 1.0}, {"id": 18, "seek": 45860, "start": 440.84, "end": 458.6, "text": " and direction and strength now here the two variables we are talking about are numerical variables so the two variables here are numerical variables so we are talking about quantitative variables but remember in chapter 11", "tokens": [293, 3513, 293, 3800, 586, 510, 264, 732, 9102, 321, 366, 1417, 466, 366, 29054, 9102, 370, 264, 732, 9102, 510, 366, 29054, 9102, 370, 321, 366, 1417, 466, 27778, 9102, 457, 1604, 294, 7187, 2975], "avg_logprob": -0.29138512546951706, "compression_ratio": 1.9059829059829059, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 440.84, "end": 441.18, "word": " and", "probability": 0.11639404296875}, {"start": 441.18, "end": 441.58, "word": " direction", "probability": 0.51806640625}, {"start": 441.58, "end": 442.12, "word": " and", "probability": 0.7314453125}, {"start": 442.12, "end": 442.74, "word": " strength", "probability": 0.81298828125}, {"start": 442.74, "end": 443.88, "word": " now", "probability": 0.228515625}, {"start": 443.88, "end": 444.14, "word": " here", "probability": 0.818359375}, {"start": 444.14, "end": 444.28, "word": " the", "probability": 0.77734375}, {"start": 444.28, "end": 444.46, "word": " two", "probability": 0.91943359375}, {"start": 444.46, "end": 444.76, "word": " variables", "probability": 0.82373046875}, {"start": 444.76, "end": 444.96, "word": " we", "probability": 0.91162109375}, {"start": 444.96, "end": 445.1, "word": " are", "probability": 0.94677734375}, {"start": 445.1, "end": 445.44, "word": " talking", "probability": 0.8232421875}, {"start": 445.44, "end": 445.9, "word": " about", "probability": 0.89697265625}, {"start": 445.9, "end": 446.58, "word": " are", "probability": 0.9111328125}, {"start": 446.58, "end": 447.06, "word": " numerical", "probability": 0.7001953125}, {"start": 447.06, "end": 447.82, "word": " variables", "probability": 0.89208984375}, {"start": 447.82, "end": 448.58, "word": " so", "probability": 0.556640625}, {"start": 448.58, "end": 448.7, "word": " the", "probability": 0.759765625}, {"start": 448.7, "end": 448.88, "word": " two", "probability": 0.935546875}, {"start": 448.88, "end": 449.32, "word": " variables", "probability": 0.93798828125}, {"start": 449.32, "end": 449.68, "word": " here", "probability": 0.82861328125}, {"start": 449.68, "end": 450.02, "word": " are", "probability": 0.9384765625}, {"start": 450.02, "end": 450.48, "word": " numerical", "probability": 0.89794921875}, {"start": 450.48, "end": 451.18, "word": " variables", "probability": 0.90771484375}, {"start": 451.18, "end": 453.32, "word": " so", "probability": 0.708984375}, {"start": 453.32, "end": 453.46, "word": " we", "probability": 0.94873046875}, {"start": 453.46, "end": 453.58, "word": " are", "probability": 0.94384765625}, {"start": 453.58, "end": 453.98, "word": " talking", "probability": 0.84423828125}, {"start": 453.98, "end": 454.46, "word": " about", "probability": 0.896484375}, {"start": 454.46, "end": 455.22, "word": " quantitative", "probability": 0.90283203125}, {"start": 455.22, "end": 456.2, "word": " variables", "probability": 0.86083984375}, {"start": 456.2, "end": 457.34, "word": " but", "probability": 0.475830078125}, {"start": 457.34, "end": 457.74, "word": " remember", "probability": 0.66650390625}, {"start": 457.74, "end": 458.04, "word": " in", "probability": 0.92529296875}, {"start": 458.04, "end": 458.24, "word": " chapter", "probability": 0.87646484375}, {"start": 458.24, "end": 458.6, "word": " 11", "probability": 0.8349609375}], "temperature": 1.0}, {"id": 19, "seek": 48795, "start": 459.37, "end": 487.95, "text": " We talked about the relationship between two qualitative variables. So we use chi-square test. Here we are talking about something different. We are talking about numerical variables. So we can use scatter plot, number one. Next correlation analysis is used to measure the strength of the association between two variables. And here again, we are just talking about linear relationship.", "tokens": [492, 2825, 466, 264, 2480, 1296, 732, 31312, 9102, 13, 407, 321, 764, 13228, 12, 33292, 543, 1500, 13, 1692, 321, 366, 1417, 466, 746, 819, 13, 492, 366, 1417, 466, 29054, 9102, 13, 407, 321, 393, 764, 34951, 7542, 11, 1230, 472, 13, 3087, 20009, 5215, 307, 1143, 281, 3481, 264, 3800, 295, 264, 14598, 1296, 732, 9102, 13, 400, 510, 797, 11, 321, 366, 445, 1417, 466, 8213, 2480, 13], "avg_logprob": -0.15710616764956958, "compression_ratio": 1.7916666666666667, "no_speech_prob": 0.0, "words": [{"start": 459.37, "end": 459.61, "word": " We", "probability": 0.7861328125}, {"start": 459.61, "end": 459.85, "word": " talked", "probability": 0.81982421875}, {"start": 459.85, "end": 460.27, "word": " about", "probability": 0.90625}, {"start": 460.27, "end": 460.63, "word": " the", "probability": 0.916015625}, {"start": 460.63, "end": 461.25, "word": " relationship", "probability": 0.91455078125}, {"start": 461.25, "end": 461.69, "word": " between", "probability": 0.88671875}, {"start": 461.69, "end": 462.09, "word": " two", "probability": 0.896484375}, {"start": 462.09, "end": 463.15, "word": " qualitative", "probability": 0.93994140625}, {"start": 463.15, "end": 463.75, "word": " variables.", "probability": 0.94921875}, {"start": 464.13, "end": 464.85, "word": " So", "probability": 0.91162109375}, {"start": 464.85, "end": 465.13, "word": " we", "probability": 0.794921875}, {"start": 465.13, "end": 465.63, "word": " use", "probability": 0.525390625}, {"start": 465.63, "end": 465.87, "word": " chi", "probability": 0.35498046875}, {"start": 465.87, "end": 466.25, "word": "-square", "probability": 0.8561197916666666}, {"start": 466.25, "end": 466.53, "word": " test.", "probability": 0.84423828125}, {"start": 466.95, "end": 467.17, "word": " Here", "probability": 0.83935546875}, {"start": 467.17, "end": 467.31, "word": " we", "probability": 0.626953125}, {"start": 467.31, "end": 467.45, "word": " are", "probability": 0.92529296875}, {"start": 467.45, "end": 467.69, "word": " talking", "probability": 0.84375}, {"start": 467.69, "end": 467.95, "word": " about", "probability": 0.89990234375}, {"start": 467.95, "end": 468.33, "word": " something", "probability": 0.88916015625}, {"start": 468.33, "end": 468.85, "word": " different.", "probability": 0.8759765625}, {"start": 469.09, "end": 469.31, "word": " We", "probability": 0.947265625}, {"start": 469.31, "end": 469.41, "word": " are", "probability": 0.8173828125}, {"start": 469.41, "end": 469.63, "word": " talking", "probability": 0.84521484375}, {"start": 469.63, "end": 469.95, "word": " about", "probability": 0.90673828125}, {"start": 469.95, "end": 470.37, "word": " numerical", "probability": 0.845703125}, {"start": 470.37, "end": 471.11, "word": " variables.", "probability": 0.93603515625}, {"start": 471.71, "end": 472.01, "word": " So", "probability": 0.95166015625}, {"start": 472.01, "end": 472.13, "word": " we", "probability": 0.84912109375}, {"start": 472.13, "end": 472.31, "word": " can", "probability": 0.92138671875}, {"start": 472.31, "end": 472.53, "word": " use", "probability": 0.8984375}, {"start": 472.53, "end": 472.89, "word": " scatter", "probability": 0.85205078125}, {"start": 472.89, "end": 473.29, "word": " plot,", "probability": 0.88134765625}, {"start": 474.47, "end": 474.73, "word": " number", "probability": 0.93798828125}, {"start": 474.73, "end": 474.99, "word": " one.", "probability": 0.765625}, {"start": 475.49, "end": 475.91, "word": " Next", "probability": 0.93798828125}, {"start": 475.91, "end": 476.47, "word": " correlation", "probability": 0.7802734375}, {"start": 476.47, "end": 477.15, "word": " analysis", "probability": 0.87646484375}, {"start": 477.15, "end": 478.51, "word": " is", "probability": 0.91015625}, {"start": 478.51, "end": 478.83, "word": " used", "probability": 0.912109375}, {"start": 478.83, "end": 479.09, "word": " to", "probability": 0.96826171875}, {"start": 479.09, "end": 479.37, "word": " measure", "probability": 0.89453125}, {"start": 479.37, "end": 479.69, "word": " the", "probability": 0.91162109375}, {"start": 479.69, "end": 480.25, "word": " strength", "probability": 0.84716796875}, {"start": 480.25, "end": 481.01, "word": " of", "probability": 0.95751953125}, {"start": 481.01, "end": 481.33, "word": " the", "probability": 0.92333984375}, {"start": 481.33, "end": 482.09, "word": " association", "probability": 0.8515625}, {"start": 482.09, "end": 482.57, "word": " between", "probability": 0.86669921875}, {"start": 482.57, "end": 482.81, "word": " two", "probability": 0.9296875}, {"start": 482.81, "end": 483.23, "word": " variables.", "probability": 0.93701171875}, {"start": 483.69, "end": 483.91, "word": " And", "probability": 0.95166015625}, {"start": 483.91, "end": 484.07, "word": " here", "probability": 0.8427734375}, {"start": 484.07, "end": 484.27, "word": " again,", "probability": 0.8251953125}, {"start": 484.33, "end": 484.41, "word": " we", "probability": 0.958984375}, {"start": 484.41, "end": 484.81, "word": " are", "probability": 0.912109375}, {"start": 484.81, "end": 485.19, "word": " just", "probability": 0.90380859375}, {"start": 485.19, "end": 485.63, "word": " talking", "probability": 0.845703125}, {"start": 485.63, "end": 486.17, "word": " about", "probability": 0.908203125}, {"start": 486.17, "end": 487.21, "word": " linear", "probability": 0.9345703125}, {"start": 487.21, "end": 487.95, "word": " relationship.", "probability": 0.8955078125}], "temperature": 1.0}, {"id": 20, "seek": 51754, "start": 488.72, "end": 517.54, "text": " So this chapter just covers the linear relationship between the two variables. Because sometimes there exists non-linear relationship between the two variables. So correlation is only concerned with the strength of the relationship. No causal effect is implied with correlation. We just say that X affects Y, or X explains the variation in Y.", "tokens": [407, 341, 7187, 445, 10538, 264, 8213, 2480, 1296, 264, 732, 9102, 13, 1436, 2171, 456, 8198, 2107, 12, 28263, 2480, 1296, 264, 732, 9102, 13, 407, 20009, 307, 787, 5922, 365, 264, 3800, 295, 264, 2480, 13, 883, 38755, 1802, 307, 32614, 365, 20009, 13, 492, 445, 584, 300, 1783, 11807, 398, 11, 420, 1783, 13948, 264, 12990, 294, 398, 13], "avg_logprob": -0.19990079365079366, "compression_ratio": 1.8148148148148149, "no_speech_prob": 0.0, "words": [{"start": 488.72, "end": 488.98, "word": " So", "probability": 0.8193359375}, {"start": 488.98, "end": 489.16, "word": " this", "probability": 0.763671875}, {"start": 489.16, "end": 489.56, "word": " chapter", "probability": 0.85107421875}, {"start": 489.56, "end": 489.9, "word": " just", "probability": 0.87255859375}, {"start": 489.9, "end": 490.3, "word": " covers", "probability": 0.38330078125}, {"start": 490.3, "end": 491.72, "word": " the", "probability": 0.75830078125}, {"start": 491.72, "end": 492.18, "word": " linear", "probability": 0.84765625}, {"start": 492.18, "end": 492.68, "word": " relationship", "probability": 0.90283203125}, {"start": 492.68, "end": 493.04, "word": " between", "probability": 0.87060546875}, {"start": 493.04, "end": 493.34, "word": " the", "probability": 0.67041015625}, {"start": 493.34, "end": 493.46, "word": " two", "probability": 0.90625}, {"start": 493.46, "end": 493.78, "word": " variables.", "probability": 0.93017578125}, {"start": 493.94, "end": 494.12, "word": " Because", "probability": 0.6328125}, {"start": 494.12, "end": 494.72, "word": " sometimes", "probability": 0.935546875}, {"start": 494.72, "end": 495.04, "word": " there", "probability": 0.79248046875}, {"start": 495.04, "end": 495.5, "word": " exists", "probability": 0.62060546875}, {"start": 495.5, "end": 497.04, "word": " non", "probability": 0.93310546875}, {"start": 497.04, "end": 497.36, "word": "-linear", "probability": 0.6905517578125}, {"start": 497.36, "end": 498.66, "word": " relationship", "probability": 0.86572265625}, {"start": 498.66, "end": 499.8, "word": " between", "probability": 0.87255859375}, {"start": 499.8, "end": 500.48, "word": " the", "probability": 0.87939453125}, {"start": 500.48, "end": 500.64, "word": " two", "probability": 0.9375}, {"start": 500.64, "end": 501.12, "word": " variables.", "probability": 0.9423828125}, {"start": 502.96, "end": 503.18, "word": " So", "probability": 0.94677734375}, {"start": 503.18, "end": 503.68, "word": " correlation", "probability": 0.8671875}, {"start": 503.68, "end": 504.18, "word": " is", "probability": 0.9453125}, {"start": 504.18, "end": 504.52, "word": " only", "probability": 0.93017578125}, {"start": 504.52, "end": 505.0, "word": " concerned", "probability": 0.58740234375}, {"start": 505.0, "end": 505.2, "word": " with", "probability": 0.89306640625}, {"start": 505.2, "end": 505.36, "word": " the", "probability": 0.8779296875}, {"start": 505.36, "end": 505.76, "word": " strength", "probability": 0.845703125}, {"start": 505.76, "end": 506.12, "word": " of", "probability": 0.96630859375}, {"start": 506.12, "end": 506.26, "word": " the", "probability": 0.91455078125}, {"start": 506.26, "end": 506.94, "word": " relationship.", "probability": 0.9033203125}, {"start": 508.02, "end": 508.2, "word": " No", "probability": 0.9228515625}, {"start": 508.2, "end": 508.52, "word": " causal", "probability": 0.87646484375}, {"start": 508.52, "end": 508.94, "word": " effect", "probability": 0.90771484375}, {"start": 508.94, "end": 509.58, "word": " is", "probability": 0.93017578125}, {"start": 509.58, "end": 510.16, "word": " implied", "probability": 0.9423828125}, {"start": 510.16, "end": 510.5, "word": " with", "probability": 0.87548828125}, {"start": 510.5, "end": 511.08, "word": " correlation.", "probability": 0.92138671875}, {"start": 511.28, "end": 511.36, "word": " We", "probability": 0.94677734375}, {"start": 511.36, "end": 511.58, "word": " just", "probability": 0.90673828125}, {"start": 511.58, "end": 511.84, "word": " say", "probability": 0.8388671875}, {"start": 511.84, "end": 512.22, "word": " that", "probability": 0.93359375}, {"start": 512.22, "end": 513.08, "word": " X", "probability": 0.49755859375}, {"start": 513.08, "end": 513.7, "word": " affects", "probability": 0.69921875}, {"start": 513.7, "end": 514.18, "word": " Y,", "probability": 0.9873046875}, {"start": 514.6, "end": 514.84, "word": " or", "probability": 0.626953125}, {"start": 514.84, "end": 515.22, "word": " X", "probability": 0.96875}, {"start": 515.22, "end": 516.24, "word": " explains", "probability": 0.876953125}, {"start": 516.24, "end": 516.52, "word": " the", "probability": 0.91259765625}, {"start": 516.52, "end": 517.0, "word": " variation", "probability": 0.88671875}, {"start": 517.0, "end": 517.28, "word": " in", "probability": 0.904296875}, {"start": 517.28, "end": 517.54, "word": " Y.", "probability": 0.994140625}], "temperature": 1.0}, {"id": 21, "seek": 54594, "start": 518.38, "end": 545.94, "text": " Scatter plots were first presented in Chapter 2, and we skipped, if you remember, Chapter 2. And it's easy to make scatter plots for Y versus X. In Chapter 3, we talked about correlation, so correlation was first presented in Chapter 3. But here I will give just a review for computation about", "tokens": [2747, 1161, 28609, 645, 700, 8212, 294, 18874, 568, 11, 293, 321, 30193, 11, 498, 291, 1604, 11, 18874, 568, 13, 400, 309, 311, 1858, 281, 652, 34951, 28609, 337, 398, 5717, 1783, 13, 682, 18874, 805, 11, 321, 2825, 466, 20009, 11, 370, 20009, 390, 700, 8212, 294, 18874, 805, 13, 583, 510, 286, 486, 976, 445, 257, 3131, 337, 24903, 466], "avg_logprob": -0.2110595719423145, "compression_ratio": 1.6153846153846154, "no_speech_prob": 0.0, "words": [{"start": 518.38, "end": 518.94, "word": " Scatter", "probability": 0.7314453125}, {"start": 518.94, "end": 519.2, "word": " plots", "probability": 0.86572265625}, {"start": 519.2, "end": 519.58, "word": " were", "probability": 0.86376953125}, {"start": 519.58, "end": 520.1, "word": " first", "probability": 0.87890625}, {"start": 520.1, "end": 520.74, "word": " presented", "probability": 0.75439453125}, {"start": 520.74, "end": 521.06, "word": " in", "probability": 0.94091796875}, {"start": 521.06, "end": 521.4, "word": " Chapter", "probability": 0.3232421875}, {"start": 521.4, "end": 521.8, "word": " 2,", "probability": 0.5947265625}, {"start": 522.12, "end": 522.36, "word": " and", "probability": 0.9111328125}, {"start": 522.36, "end": 522.76, "word": " we", "probability": 0.95556640625}, {"start": 522.76, "end": 523.48, "word": " skipped,", "probability": 0.83544921875}, {"start": 523.6, "end": 523.72, "word": " if", "probability": 0.93603515625}, {"start": 523.72, "end": 523.78, "word": " you", "probability": 0.96435546875}, {"start": 523.78, "end": 524.0, "word": " remember,", "probability": 0.880859375}, {"start": 524.14, "end": 524.3, "word": " Chapter", "probability": 0.87841796875}, {"start": 524.3, "end": 524.58, "word": " 2.", "probability": 0.99609375}, {"start": 525.72, "end": 526.12, "word": " And", "probability": 0.75830078125}, {"start": 526.12, "end": 526.48, "word": " it's", "probability": 0.9541015625}, {"start": 526.48, "end": 526.78, "word": " easy", "probability": 0.8720703125}, {"start": 526.78, "end": 527.4, "word": " to", "probability": 0.9697265625}, {"start": 527.4, "end": 528.48, "word": " make", "probability": 0.8466796875}, {"start": 528.48, "end": 528.86, "word": " scatter", "probability": 0.9306640625}, {"start": 528.86, "end": 529.14, "word": " plots", "probability": 0.81591796875}, {"start": 529.14, "end": 529.36, "word": " for", "probability": 0.94482421875}, {"start": 529.36, "end": 529.56, "word": " Y", "probability": 0.62890625}, {"start": 529.56, "end": 529.88, "word": " versus", "probability": 0.8740234375}, {"start": 529.88, "end": 530.28, "word": " X.", "probability": 0.9951171875}, {"start": 531.1, "end": 531.26, "word": " In", "probability": 0.8427734375}, {"start": 531.26, "end": 531.48, "word": " Chapter", "probability": 0.88818359375}, {"start": 531.48, "end": 531.86, "word": " 3,", "probability": 0.98681640625}, {"start": 532.24, "end": 532.62, "word": " we", "probability": 0.9609375}, {"start": 532.62, "end": 533.74, "word": " talked", "probability": 0.87548828125}, {"start": 533.74, "end": 534.1, "word": " about", "probability": 0.90673828125}, {"start": 534.1, "end": 534.6, "word": " correlation,", "probability": 0.572265625}, {"start": 534.76, "end": 534.88, "word": " so", "probability": 0.9521484375}, {"start": 534.88, "end": 535.36, "word": " correlation", "probability": 0.9296875}, {"start": 535.36, "end": 536.14, "word": " was", "probability": 0.94970703125}, {"start": 536.14, "end": 536.44, "word": " first", "probability": 0.8603515625}, {"start": 536.44, "end": 536.9, "word": " presented", "probability": 0.775390625}, {"start": 536.9, "end": 537.12, "word": " in", "probability": 0.93359375}, {"start": 537.12, "end": 537.3, "word": " Chapter", "probability": 0.91357421875}, {"start": 537.3, "end": 537.66, "word": " 3.", "probability": 0.99609375}, {"start": 538.42, "end": 538.62, "word": " But", "probability": 0.9501953125}, {"start": 538.62, "end": 538.76, "word": " here", "probability": 0.82421875}, {"start": 538.76, "end": 538.88, "word": " I", "probability": 0.572265625}, {"start": 538.88, "end": 538.96, "word": " will", "probability": 0.853515625}, {"start": 538.96, "end": 539.2, "word": " give", "probability": 0.7666015625}, {"start": 539.2, "end": 540.06, "word": " just", "probability": 0.8798828125}, {"start": 540.06, "end": 542.6, "word": " a", "probability": 0.50244140625}, {"start": 542.6, "end": 543.48, "word": " review", "probability": 0.83642578125}, {"start": 543.48, "end": 544.08, "word": " for", "probability": 0.9453125}, {"start": 544.08, "end": 545.44, "word": " computation", "probability": 0.73486328125}, {"start": 545.44, "end": 545.94, "word": " about", "probability": 0.9033203125}], "temperature": 1.0}, {"id": 22, "seek": 56998, "start": 546.46, "end": 569.98, "text": " correlation coefficient or coefficient of correlation. First, coefficient of correlation measures the relative strength of the linear relationship between two numerical variables. So here, we are talking about numerical variables. Sample correlation coefficient is given by this equation.", "tokens": [20009, 17619, 420, 17619, 295, 20009, 13, 2386, 11, 17619, 295, 20009, 8000, 264, 4972, 3800, 295, 264, 8213, 2480, 1296, 732, 29054, 9102, 13, 407, 510, 11, 321, 366, 1417, 466, 29054, 9102, 13, 4832, 781, 20009, 17619, 307, 2212, 538, 341, 5367, 13], "avg_logprob": -0.16601561852123425, "compression_ratio": 1.9266666666666667, "no_speech_prob": 0.0, "words": [{"start": 546.46, "end": 547.24, "word": " correlation", "probability": 0.61328125}, {"start": 547.24, "end": 548.12, "word": " coefficient", "probability": 0.75439453125}, {"start": 548.12, "end": 548.46, "word": " or", "probability": 0.71484375}, {"start": 548.46, "end": 548.96, "word": " coefficient", "probability": 0.91259765625}, {"start": 548.96, "end": 549.18, "word": " of", "probability": 0.93017578125}, {"start": 549.18, "end": 549.64, "word": " correlation.", "probability": 0.912109375}, {"start": 550.82, "end": 551.46, "word": " First,", "probability": 0.70703125}, {"start": 552.8, "end": 553.48, "word": " coefficient", "probability": 0.8310546875}, {"start": 553.48, "end": 553.7, "word": " of", "probability": 0.461669921875}, {"start": 553.7, "end": 554.1, "word": " correlation", "probability": 0.91650390625}, {"start": 554.1, "end": 554.52, "word": " measures", "probability": 0.810546875}, {"start": 554.52, "end": 555.24, "word": " the", "probability": 0.88623046875}, {"start": 555.24, "end": 555.68, "word": " relative", "probability": 0.78466796875}, {"start": 555.68, "end": 556.24, "word": " strength", "probability": 0.849609375}, {"start": 556.24, "end": 557.68, "word": " of", "probability": 0.90869140625}, {"start": 557.68, "end": 558.04, "word": " the", "probability": 0.89306640625}, {"start": 558.04, "end": 558.58, "word": " linear", "probability": 0.89599609375}, {"start": 558.58, "end": 559.32, "word": " relationship", "probability": 0.9150390625}, {"start": 559.32, "end": 559.68, "word": " between", "probability": 0.8759765625}, {"start": 559.68, "end": 559.92, "word": " two", "probability": 0.90625}, {"start": 559.92, "end": 560.34, "word": " numerical", "probability": 0.9541015625}, {"start": 560.34, "end": 560.82, "word": " variables.", "probability": 0.9384765625}, {"start": 561.0, "end": 561.12, "word": " So", "probability": 0.919921875}, {"start": 561.12, "end": 561.34, "word": " here,", "probability": 0.68310546875}, {"start": 562.3, "end": 562.66, "word": " we", "probability": 0.95166015625}, {"start": 562.66, "end": 562.86, "word": " are", "probability": 0.93359375}, {"start": 562.86, "end": 563.26, "word": " talking", "probability": 0.84521484375}, {"start": 563.26, "end": 563.74, "word": " about", "probability": 0.8994140625}, {"start": 563.74, "end": 564.62, "word": " numerical", "probability": 0.7900390625}, {"start": 564.62, "end": 565.3, "word": " variables.", "probability": 0.92529296875}, {"start": 567.06, "end": 567.56, "word": " Sample", "probability": 0.92236328125}, {"start": 567.56, "end": 568.08, "word": " correlation", "probability": 0.9228515625}, {"start": 568.08, "end": 568.64, "word": " coefficient", "probability": 0.96630859375}, {"start": 568.64, "end": 568.94, "word": " is", "probability": 0.9482421875}, {"start": 568.94, "end": 569.12, "word": " given", "probability": 0.896484375}, {"start": 569.12, "end": 569.32, "word": " by", "probability": 0.9677734375}, {"start": 569.32, "end": 569.52, "word": " this", "probability": 0.94580078125}, {"start": 569.52, "end": 569.98, "word": " equation.", "probability": 0.97900390625}], "temperature": 1.0}, {"id": 23, "seek": 60072, "start": 571.08, "end": 600.72, "text": " which is sum of the product of xi minus x bar, yi minus y bar, divided by n minus 1 times standard deviation of x times standard deviation of y. We know that x bar and y bar are the means of x and y respectively. And Sx, Sy are the standard deviations of x and y values. And we know this equation before. But there is another equation that one can be used", "tokens": [597, 307, 2408, 295, 264, 1674, 295, 36800, 3175, 2031, 2159, 11, 288, 72, 3175, 288, 2159, 11, 6666, 538, 297, 3175, 502, 1413, 3832, 25163, 295, 2031, 1413, 3832, 25163, 295, 288, 13, 492, 458, 300, 2031, 2159, 293, 288, 2159, 366, 264, 1355, 295, 2031, 293, 288, 25009, 13, 400, 318, 87, 11, 3902, 366, 264, 3832, 31219, 763, 295, 2031, 293, 288, 4190, 13, 400, 321, 458, 341, 5367, 949, 13, 583, 456, 307, 1071, 5367, 300, 472, 393, 312, 1143], "avg_logprob": -0.150183824931874, "compression_ratio": 1.8835978835978835, "no_speech_prob": 0.0, "words": [{"start": 571.08, "end": 571.34, "word": " which", "probability": 0.401611328125}, {"start": 571.34, "end": 571.5, "word": " is", "probability": 0.94873046875}, {"start": 571.5, "end": 571.86, "word": " sum", "probability": 0.83740234375}, {"start": 571.86, "end": 573.16, "word": " of", "probability": 0.9501953125}, {"start": 573.16, "end": 573.32, "word": " the", "probability": 0.90283203125}, {"start": 573.32, "end": 573.74, "word": " product", "probability": 0.9326171875}, {"start": 573.74, "end": 574.1, "word": " of", "probability": 0.955078125}, {"start": 574.1, "end": 574.4, "word": " xi", "probability": 0.447265625}, {"start": 574.4, "end": 574.76, "word": " minus", "probability": 0.900390625}, {"start": 574.76, "end": 575.0, "word": " x", "probability": 0.9365234375}, {"start": 575.0, "end": 575.24, "word": " bar,", "probability": 0.84716796875}, {"start": 575.32, "end": 575.64, "word": " yi", "probability": 0.90087890625}, {"start": 575.64, "end": 575.96, "word": " minus", "probability": 0.98876953125}, {"start": 575.96, "end": 576.18, "word": " y", "probability": 0.99609375}, {"start": 576.18, "end": 576.48, "word": " bar,", "probability": 0.94873046875}, {"start": 576.98, "end": 577.62, "word": " divided", "probability": 0.86328125}, {"start": 577.62, "end": 578.06, "word": " by", "probability": 0.96923828125}, {"start": 578.06, "end": 578.98, "word": " n", "probability": 0.83984375}, {"start": 578.98, "end": 579.26, "word": " minus", "probability": 0.97900390625}, {"start": 579.26, "end": 579.54, "word": " 1", "probability": 0.693359375}, {"start": 579.54, "end": 580.02, "word": " times", "probability": 0.88232421875}, {"start": 580.02, "end": 580.72, "word": " standard", "probability": 0.86181640625}, {"start": 580.72, "end": 581.1, "word": " deviation", "probability": 0.88818359375}, {"start": 581.1, "end": 581.28, "word": " of", "probability": 0.9462890625}, {"start": 581.28, "end": 581.52, "word": " x", "probability": 0.97314453125}, {"start": 581.52, "end": 581.94, "word": " times", "probability": 0.8486328125}, {"start": 581.94, "end": 582.26, "word": " standard", "probability": 0.8935546875}, {"start": 582.26, "end": 582.52, "word": " deviation", "probability": 0.9130859375}, {"start": 582.52, "end": 582.78, "word": " of", "probability": 0.9677734375}, {"start": 582.78, "end": 583.02, "word": " y.", "probability": 0.9970703125}, {"start": 583.86, "end": 584.4, "word": " We", "probability": 0.88916015625}, {"start": 584.4, "end": 584.54, "word": " know", "probability": 0.8818359375}, {"start": 584.54, "end": 584.74, "word": " that", "probability": 0.93359375}, {"start": 584.74, "end": 584.96, "word": " x", "probability": 0.99267578125}, {"start": 584.96, "end": 585.12, "word": " bar", "probability": 0.923828125}, {"start": 585.12, "end": 585.28, "word": " and", "probability": 0.94580078125}, {"start": 585.28, "end": 585.46, "word": " y", "probability": 0.99853515625}, {"start": 585.46, "end": 585.66, "word": " bar", "probability": 0.95068359375}, {"start": 585.66, "end": 585.86, "word": " are", "probability": 0.94091796875}, {"start": 585.86, "end": 586.0, "word": " the", "probability": 0.9267578125}, {"start": 586.0, "end": 586.32, "word": " means", "probability": 0.86474609375}, {"start": 586.32, "end": 586.68, "word": " of", "probability": 0.96337890625}, {"start": 586.68, "end": 586.86, "word": " x", "probability": 0.9853515625}, {"start": 586.86, "end": 587.04, "word": " and", "probability": 0.9423828125}, {"start": 587.04, "end": 587.24, "word": " y", "probability": 0.99658203125}, {"start": 587.24, "end": 587.78, "word": " respectively.", "probability": 0.79296875}, {"start": 589.14, "end": 589.58, "word": " And", "probability": 0.9560546875}, {"start": 589.58, "end": 590.24, "word": " Sx,", "probability": 0.586669921875}, {"start": 590.46, "end": 590.58, "word": " Sy", "probability": 0.90966796875}, {"start": 590.58, "end": 590.92, "word": " are", "probability": 0.90673828125}, {"start": 590.92, "end": 591.1, "word": " the", "probability": 0.90234375}, {"start": 591.1, "end": 591.36, "word": " standard", "probability": 0.95068359375}, {"start": 591.36, "end": 592.02, "word": " deviations", "probability": 0.927734375}, {"start": 592.02, "end": 592.66, "word": " of", "probability": 0.96337890625}, {"start": 592.66, "end": 592.96, "word": " x", "probability": 0.98486328125}, {"start": 592.96, "end": 593.2, "word": " and", "probability": 0.9462890625}, {"start": 593.2, "end": 593.4, "word": " y", "probability": 0.998046875}, {"start": 593.4, "end": 593.9, "word": " values.", "probability": 0.91455078125}, {"start": 594.44, "end": 594.84, "word": " And", "probability": 0.94775390625}, {"start": 594.84, "end": 595.0, "word": " we", "probability": 0.94921875}, {"start": 595.0, "end": 595.2, "word": " know", "probability": 0.87841796875}, {"start": 595.2, "end": 595.54, "word": " this", "probability": 0.94287109375}, {"start": 595.54, "end": 596.12, "word": " equation", "probability": 0.95751953125}, {"start": 596.12, "end": 596.52, "word": " before.", "probability": 0.74169921875}, {"start": 596.9, "end": 597.16, "word": " But", "probability": 0.94775390625}, {"start": 597.16, "end": 597.36, "word": " there", "probability": 0.88818359375}, {"start": 597.36, "end": 597.5, "word": " is", "probability": 0.9384765625}, {"start": 597.5, "end": 597.86, "word": " another", "probability": 0.91845703125}, {"start": 597.86, "end": 598.46, "word": " equation", "probability": 0.9775390625}, {"start": 598.46, "end": 599.74, "word": " that", "probability": 0.5673828125}, {"start": 599.74, "end": 599.96, "word": " one", "probability": 0.87744140625}, {"start": 599.96, "end": 600.18, "word": " can", "probability": 0.9443359375}, {"start": 600.18, "end": 600.36, "word": " be", "probability": 0.9453125}, {"start": 600.36, "end": 600.72, "word": " used", "probability": 0.9111328125}], "temperature": 1.0}, {"id": 24, "seek": 62630, "start": 602.05, "end": 626.31, "text": " For computation, which is called shortcut formula, which is just sum of xy minus n times x bar y bar divided by square root of this quantity. And we know this equation from chapter three. Now again, x bar and y bar are the means. Now the question is,", "tokens": [1171, 24903, 11, 597, 307, 1219, 24822, 8513, 11, 597, 307, 445, 2408, 295, 2031, 88, 3175, 297, 1413, 2031, 2159, 288, 2159, 6666, 538, 3732, 5593, 295, 341, 11275, 13, 400, 321, 458, 341, 5367, 490, 7187, 1045, 13, 823, 797, 11, 2031, 2159, 293, 288, 2159, 366, 264, 1355, 13, 823, 264, 1168, 307, 11], "avg_logprob": -0.20945582538843155, "compression_ratio": 1.5120481927710843, "no_speech_prob": 0.0, "words": [{"start": 602.0500000000001, "end": 602.69, "word": " For", "probability": 0.466796875}, {"start": 602.69, "end": 603.33, "word": " computation,", "probability": 0.78564453125}, {"start": 604.27, "end": 605.21, "word": " which", "probability": 0.5390625}, {"start": 605.21, "end": 605.33, "word": " is", "probability": 0.9423828125}, {"start": 605.33, "end": 605.55, "word": " called", "probability": 0.84521484375}, {"start": 605.55, "end": 606.01, "word": " shortcut", "probability": 0.69140625}, {"start": 606.01, "end": 606.59, "word": " formula,", "probability": 0.90966796875}, {"start": 607.51, "end": 607.77, "word": " which", "probability": 0.89599609375}, {"start": 607.77, "end": 607.89, "word": " is", "probability": 0.95556640625}, {"start": 607.89, "end": 608.21, "word": " just", "probability": 0.8935546875}, {"start": 608.21, "end": 608.59, "word": " sum", "probability": 0.8740234375}, {"start": 608.59, "end": 608.71, "word": " of", "probability": 0.96533203125}, {"start": 608.71, "end": 609.29, "word": " xy", "probability": 0.6435546875}, {"start": 609.29, "end": 610.87, "word": " minus", "probability": 0.78076171875}, {"start": 610.87, "end": 611.25, "word": " n", "probability": 0.78271484375}, {"start": 611.25, "end": 611.63, "word": " times", "probability": 0.89697265625}, {"start": 611.63, "end": 611.95, "word": " x", "probability": 0.98486328125}, {"start": 611.95, "end": 612.23, "word": " bar", "probability": 0.8525390625}, {"start": 612.23, "end": 612.43, "word": " y", "probability": 0.81982421875}, {"start": 612.43, "end": 612.79, "word": " bar", "probability": 0.95263671875}, {"start": 612.79, "end": 614.33, "word": " divided", "probability": 0.381591796875}, {"start": 614.33, "end": 614.67, "word": " by", "probability": 0.97021484375}, {"start": 614.67, "end": 615.03, "word": " square", "probability": 0.77685546875}, {"start": 615.03, "end": 615.31, "word": " root", "probability": 0.9375}, {"start": 615.31, "end": 615.49, "word": " of", "probability": 0.9638671875}, {"start": 615.49, "end": 615.75, "word": " this", "probability": 0.9462890625}, {"start": 615.75, "end": 616.19, "word": " quantity.", "probability": 0.89892578125}, {"start": 617.43, "end": 617.63, "word": " And", "probability": 0.88623046875}, {"start": 617.63, "end": 617.75, "word": " we", "probability": 0.94091796875}, {"start": 617.75, "end": 617.89, "word": " know", "probability": 0.875}, {"start": 617.89, "end": 618.09, "word": " this", "probability": 0.93701171875}, {"start": 618.09, "end": 618.47, "word": " equation", "probability": 0.931640625}, {"start": 618.47, "end": 618.69, "word": " from", "probability": 0.875}, {"start": 618.69, "end": 619.01, "word": " chapter", "probability": 0.7255859375}, {"start": 619.01, "end": 619.35, "word": " three.", "probability": 0.65673828125}, {"start": 620.89, "end": 621.53, "word": " Now", "probability": 0.96240234375}, {"start": 621.53, "end": 621.87, "word": " again,", "probability": 0.8134765625}, {"start": 622.33, "end": 622.61, "word": " x", "probability": 0.9921875}, {"start": 622.61, "end": 622.79, "word": " bar", "probability": 0.94677734375}, {"start": 622.79, "end": 622.97, "word": " and", "probability": 0.94677734375}, {"start": 622.97, "end": 623.15, "word": " y", "probability": 0.99755859375}, {"start": 623.15, "end": 623.33, "word": " bar", "probability": 0.94775390625}, {"start": 623.33, "end": 623.49, "word": " are", "probability": 0.94677734375}, {"start": 623.49, "end": 623.65, "word": " the", "probability": 0.931640625}, {"start": 623.65, "end": 623.93, "word": " means.", "probability": 0.83984375}, {"start": 625.25, "end": 625.43, "word": " Now", "probability": 0.9052734375}, {"start": 625.43, "end": 625.59, "word": " the", "probability": 0.720703125}, {"start": 625.59, "end": 625.89, "word": " question", "probability": 0.9208984375}, {"start": 625.89, "end": 626.31, "word": " is,", "probability": 0.94580078125}], "temperature": 1.0}, {"id": 25, "seek": 65594, "start": 627.28, "end": 655.94, "text": " Do outliers affect the correlation? For sure, yes. Because this formula actually based on the means and the standard deviations, and these two measures are affected by outliers. So since R is a function of these two statistics, the means and standard deviations, then outliers will affect the value of the correlation coefficient.", "tokens": [1144, 484, 23646, 3345, 264, 20009, 30, 1171, 988, 11, 2086, 13, 1436, 341, 8513, 767, 2361, 322, 264, 1355, 293, 264, 3832, 31219, 763, 11, 293, 613, 732, 8000, 366, 8028, 538, 484, 23646, 13, 407, 1670, 497, 307, 257, 2445, 295, 613, 732, 12523, 11, 264, 1355, 293, 3832, 31219, 763, 11, 550, 484, 23646, 486, 3345, 264, 2158, 295, 264, 20009, 17619, 13], "avg_logprob": -0.1378264858651517, "compression_ratio": 1.8087431693989071, "no_speech_prob": 0.0, "words": [{"start": 627.28, "end": 627.62, "word": " Do", "probability": 0.70458984375}, {"start": 627.62, "end": 628.28, "word": " outliers", "probability": 0.91259765625}, {"start": 628.28, "end": 629.84, "word": " affect", "probability": 0.7958984375}, {"start": 629.84, "end": 630.06, "word": " the", "probability": 0.91455078125}, {"start": 630.06, "end": 630.52, "word": " correlation?", "probability": 0.912109375}, {"start": 631.8, "end": 632.3, "word": " For", "probability": 0.78076171875}, {"start": 632.3, "end": 632.62, "word": " sure,", "probability": 0.91748046875}, {"start": 632.72, "end": 632.98, "word": " yes.", "probability": 0.95751953125}, {"start": 633.46, "end": 634.02, "word": " Because", "probability": 0.8818359375}, {"start": 634.02, "end": 636.06, "word": " this", "probability": 0.86572265625}, {"start": 636.06, "end": 636.44, "word": " formula", "probability": 0.9130859375}, {"start": 636.44, "end": 636.88, "word": " actually", "probability": 0.72216796875}, {"start": 636.88, "end": 637.28, "word": " based", "probability": 0.61328125}, {"start": 637.28, "end": 637.68, "word": " on", "probability": 0.9482421875}, {"start": 637.68, "end": 638.36, "word": " the", "probability": 0.9111328125}, {"start": 638.36, "end": 638.76, "word": " means", "probability": 0.9033203125}, {"start": 638.76, "end": 639.3, "word": " and", "probability": 0.92626953125}, {"start": 639.3, "end": 639.68, "word": " the", "probability": 0.8486328125}, {"start": 639.68, "end": 639.94, "word": " standard", "probability": 0.94287109375}, {"start": 639.94, "end": 640.62, "word": " deviations,", "probability": 0.938232421875}, {"start": 641.16, "end": 641.46, "word": " and", "probability": 0.9287109375}, {"start": 641.46, "end": 641.76, "word": " these", "probability": 0.849609375}, {"start": 641.76, "end": 641.98, "word": " two", "probability": 0.92626953125}, {"start": 641.98, "end": 642.4, "word": " measures", "probability": 0.81640625}, {"start": 642.4, "end": 643.64, "word": " are", "probability": 0.9189453125}, {"start": 643.64, "end": 644.04, "word": " affected", "probability": 0.85791015625}, {"start": 644.04, "end": 644.3, "word": " by", "probability": 0.9677734375}, {"start": 644.3, "end": 644.78, "word": " outliers.", "probability": 0.9482421875}, {"start": 645.56, "end": 645.74, "word": " So", "probability": 0.93701171875}, {"start": 645.74, "end": 646.1, "word": " since", "probability": 0.662109375}, {"start": 646.1, "end": 646.38, "word": " R", "probability": 0.744140625}, {"start": 646.38, "end": 646.48, "word": " is", "probability": 0.931640625}, {"start": 646.48, "end": 646.6, "word": " a", "probability": 0.94091796875}, {"start": 646.6, "end": 646.96, "word": " function", "probability": 0.96240234375}, {"start": 646.96, "end": 647.28, "word": " of", "probability": 0.9658203125}, {"start": 647.28, "end": 647.54, "word": " these", "probability": 0.8544921875}, {"start": 647.54, "end": 647.88, "word": " two", "probability": 0.92626953125}, {"start": 647.88, "end": 648.72, "word": " statistics,", "probability": 0.77490234375}, {"start": 649.6, "end": 649.84, "word": " the", "probability": 0.91943359375}, {"start": 649.84, "end": 650.16, "word": " means", "probability": 0.9091796875}, {"start": 650.16, "end": 650.44, "word": " and", "probability": 0.9189453125}, {"start": 650.44, "end": 650.78, "word": " standard", "probability": 0.84228515625}, {"start": 650.78, "end": 651.34, "word": " deviations,", "probability": 0.94873046875}, {"start": 651.94, "end": 652.08, "word": " then", "probability": 0.8466796875}, {"start": 652.08, "end": 652.64, "word": " outliers", "probability": 0.946044921875}, {"start": 652.64, "end": 652.86, "word": " will", "probability": 0.88134765625}, {"start": 652.86, "end": 653.26, "word": " affect", "probability": 0.87841796875}, {"start": 653.26, "end": 653.62, "word": " the", "probability": 0.916015625}, {"start": 653.62, "end": 653.94, "word": " value", "probability": 0.98046875}, {"start": 653.94, "end": 654.16, "word": " of", "probability": 0.96728515625}, {"start": 654.16, "end": 654.28, "word": " the", "probability": 0.9052734375}, {"start": 654.28, "end": 654.72, "word": " correlation", "probability": 0.9130859375}, {"start": 654.72, "end": 655.94, "word": " coefficient.", "probability": 0.94091796875}], "temperature": 1.0}, {"id": 26, "seek": 68609, "start": 657.89, "end": 686.09, "text": " Some features about the coefficient of correlation. Here rho is the population coefficient of correlation, and R is the sample coefficient of correlation. Either rho or R have the following features. Number one, unity free. It means R has no units. For example, here we are talking about whales.", "tokens": [2188, 4122, 466, 264, 17619, 295, 20009, 13, 1692, 20293, 307, 264, 4415, 17619, 295, 20009, 11, 293, 497, 307, 264, 6889, 17619, 295, 20009, 13, 13746, 20293, 420, 497, 362, 264, 3480, 4122, 13, 5118, 472, 11, 18205, 1737, 13, 467, 1355, 497, 575, 572, 6815, 13, 1171, 1365, 11, 510, 321, 366, 1417, 466, 32403, 13], "avg_logprob": -0.19650423375226683, "compression_ratio": 1.7514792899408285, "no_speech_prob": 0.0, "words": [{"start": 657.89, "end": 658.27, "word": " Some", "probability": 0.662109375}, {"start": 658.27, "end": 658.83, "word": " features", "probability": 0.744140625}, {"start": 658.83, "end": 659.33, "word": " about", "probability": 0.90234375}, {"start": 659.33, "end": 660.19, "word": " the", "probability": 0.5634765625}, {"start": 660.19, "end": 660.83, "word": " coefficient", "probability": 0.736328125}, {"start": 660.83, "end": 661.17, "word": " of", "probability": 0.9609375}, {"start": 661.17, "end": 661.71, "word": " correlation.", "probability": 0.9248046875}, {"start": 663.39, "end": 663.77, "word": " Here", "probability": 0.71435546875}, {"start": 663.77, "end": 664.55, "word": " rho", "probability": 0.4912109375}, {"start": 664.55, "end": 667.25, "word": " is", "probability": 0.81396484375}, {"start": 667.25, "end": 669.19, "word": " the", "probability": 0.76904296875}, {"start": 669.19, "end": 669.57, "word": " population", "probability": 0.91845703125}, {"start": 669.57, "end": 670.29, "word": " coefficient", "probability": 0.54345703125}, {"start": 670.29, "end": 670.57, "word": " of", "probability": 0.73291015625}, {"start": 670.57, "end": 671.01, "word": " correlation,", "probability": 0.91064453125}, {"start": 671.89, "end": 672.01, "word": " and", "probability": 0.9345703125}, {"start": 672.01, "end": 672.43, "word": " R", "probability": 0.76220703125}, {"start": 672.43, "end": 672.69, "word": " is", "probability": 0.9521484375}, {"start": 672.69, "end": 672.95, "word": " the", "probability": 0.91015625}, {"start": 672.95, "end": 673.21, "word": " sample", "probability": 0.779296875}, {"start": 673.21, "end": 673.81, "word": " coefficient", "probability": 0.9482421875}, {"start": 673.81, "end": 674.09, "word": " of", "probability": 0.93505859375}, {"start": 674.09, "end": 674.49, "word": " correlation.", "probability": 0.93603515625}, {"start": 675.71, "end": 676.11, "word": " Either", "probability": 0.884765625}, {"start": 676.11, "end": 676.61, "word": " rho", "probability": 0.92724609375}, {"start": 676.61, "end": 677.05, "word": " or", "probability": 0.95849609375}, {"start": 677.05, "end": 677.43, "word": " R", "probability": 0.97412109375}, {"start": 677.43, "end": 677.73, "word": " have", "probability": 0.8896484375}, {"start": 677.73, "end": 677.89, "word": " the", "probability": 0.908203125}, {"start": 677.89, "end": 678.29, "word": " following", "probability": 0.89453125}, {"start": 678.29, "end": 679.03, "word": " features.", "probability": 0.740234375}, {"start": 679.51, "end": 679.69, "word": " Number", "probability": 0.841796875}, {"start": 679.69, "end": 679.91, "word": " one,", "probability": 0.818359375}, {"start": 679.97, "end": 680.17, "word": " unity", "probability": 0.90869140625}, {"start": 680.17, "end": 680.45, "word": " free.", "probability": 0.53466796875}, {"start": 681.17, "end": 681.39, "word": " It", "probability": 0.9541015625}, {"start": 681.39, "end": 681.71, "word": " means", "probability": 0.92724609375}, {"start": 681.71, "end": 681.99, "word": " R", "probability": 0.95849609375}, {"start": 681.99, "end": 682.25, "word": " has", "probability": 0.94677734375}, {"start": 682.25, "end": 682.41, "word": " no", "probability": 0.93408203125}, {"start": 682.41, "end": 682.77, "word": " units.", "probability": 0.6484375}, {"start": 683.05, "end": 683.17, "word": " For", "probability": 0.95458984375}, {"start": 683.17, "end": 683.57, "word": " example,", "probability": 0.9736328125}, {"start": 684.19, "end": 684.61, "word": " here", "probability": 0.8583984375}, {"start": 684.61, "end": 684.77, "word": " we", "probability": 0.89453125}, {"start": 684.77, "end": 684.89, "word": " are", "probability": 0.92236328125}, {"start": 684.89, "end": 685.21, "word": " talking", "probability": 0.8466796875}, {"start": 685.21, "end": 685.61, "word": " about", "probability": 0.91845703125}, {"start": 685.61, "end": 686.09, "word": " whales.", "probability": 0.72412109375}], "temperature": 1.0}, {"id": 27, "seek": 70764, "start": 686.96, "end": 707.64, "text": " And weight in kilograms, ages in years. And for example, suppose the correlation between these two variables is 0.8. It's unity free, so it's just 0.8. So there is no unit. You cannot say 0.8 kilogram per year or whatever it is. So just 0.8.", "tokens": [400, 3364, 294, 30690, 11, 12357, 294, 924, 13, 400, 337, 1365, 11, 7297, 264, 20009, 1296, 613, 732, 9102, 307, 1958, 13, 23, 13, 467, 311, 18205, 1737, 11, 370, 309, 311, 445, 1958, 13, 23, 13, 407, 456, 307, 572, 4985, 13, 509, 2644, 584, 1958, 13, 23, 21741, 680, 1064, 420, 2035, 309, 307, 13, 407, 445, 1958, 13, 23, 13], "avg_logprob": -0.1860576923076923, "compression_ratio": 1.4846625766871167, "no_speech_prob": 0.0, "words": [{"start": 686.96, "end": 687.32, "word": " And", "probability": 0.457763671875}, {"start": 687.32, "end": 687.92, "word": " weight", "probability": 0.79248046875}, {"start": 687.92, "end": 688.32, "word": " in", "probability": 0.85205078125}, {"start": 688.32, "end": 688.82, "word": " kilograms,", "probability": 0.79443359375}, {"start": 689.3, "end": 689.74, "word": " ages", "probability": 0.56005859375}, {"start": 689.74, "end": 689.96, "word": " in", "probability": 0.45751953125}, {"start": 689.96, "end": 690.3, "word": " years.", "probability": 0.93798828125}, {"start": 692.0, "end": 692.28, "word": " And", "probability": 0.71826171875}, {"start": 692.28, "end": 692.48, "word": " for", "probability": 0.80615234375}, {"start": 692.48, "end": 692.86, "word": " example,", "probability": 0.97216796875}, {"start": 693.0, "end": 693.42, "word": " suppose", "probability": 0.83837890625}, {"start": 693.42, "end": 693.7, "word": " the", "probability": 0.81591796875}, {"start": 693.7, "end": 694.16, "word": " correlation", "probability": 0.91455078125}, {"start": 694.16, "end": 694.66, "word": " between", "probability": 0.88720703125}, {"start": 694.66, "end": 695.52, "word": " these", "probability": 0.8544921875}, {"start": 695.52, "end": 695.74, "word": " two", "probability": 0.89990234375}, {"start": 695.74, "end": 696.16, "word": " variables", "probability": 0.9267578125}, {"start": 696.16, "end": 696.44, "word": " is", "probability": 0.9443359375}, {"start": 696.44, "end": 696.7, "word": " 0", "probability": 0.70703125}, {"start": 696.7, "end": 697.08, "word": ".8.", "probability": 0.9931640625}, {"start": 698.62, "end": 699.12, "word": " It's", "probability": 0.91748046875}, {"start": 699.12, "end": 699.32, "word": " unity", "probability": 0.748046875}, {"start": 699.32, "end": 699.58, "word": " free,", "probability": 0.5390625}, {"start": 699.74, "end": 699.9, "word": " so", "probability": 0.93603515625}, {"start": 699.9, "end": 700.14, "word": " it's", "probability": 0.77490234375}, {"start": 700.14, "end": 700.38, "word": " just", "probability": 0.9189453125}, {"start": 700.38, "end": 700.68, "word": " 0", "probability": 0.951171875}, {"start": 700.68, "end": 701.04, "word": ".8.", "probability": 0.99853515625}, {"start": 701.14, "end": 701.28, "word": " So", "probability": 0.9423828125}, {"start": 701.28, "end": 701.44, "word": " there", "probability": 0.81103515625}, {"start": 701.44, "end": 701.58, "word": " is", "probability": 0.8759765625}, {"start": 701.58, "end": 701.76, "word": " no", "probability": 0.943359375}, {"start": 701.76, "end": 702.0, "word": " unit.", "probability": 0.80078125}, {"start": 702.32, "end": 702.46, "word": " You", "probability": 0.92138671875}, {"start": 702.46, "end": 702.72, "word": " cannot", "probability": 0.87255859375}, {"start": 702.72, "end": 703.18, "word": " say", "probability": 0.94775390625}, {"start": 703.18, "end": 704.18, "word": " 0", "probability": 0.80615234375}, {"start": 704.18, "end": 704.5, "word": ".8", "probability": 0.998779296875}, {"start": 704.5, "end": 704.98, "word": " kilogram", "probability": 0.60302734375}, {"start": 704.98, "end": 705.24, "word": " per", "probability": 0.8388671875}, {"start": 705.24, "end": 705.5, "word": " year", "probability": 0.92578125}, {"start": 705.5, "end": 705.64, "word": " or", "probability": 0.7392578125}, {"start": 705.64, "end": 705.92, "word": " whatever", "probability": 0.94580078125}, {"start": 705.92, "end": 706.18, "word": " it", "probability": 0.78271484375}, {"start": 706.18, "end": 706.32, "word": " is.", "probability": 0.9404296875}, {"start": 706.48, "end": 706.7, "word": " So", "probability": 0.86083984375}, {"start": 706.7, "end": 706.96, "word": " just", "probability": 0.814453125}, {"start": 706.96, "end": 707.3, "word": " 0", "probability": 0.95654296875}, {"start": 707.3, "end": 707.64, "word": ".8.", "probability": 0.9990234375}], "temperature": 1.0}, {"id": 28, "seek": 73552, "start": 709.42, "end": 735.52, "text": " So the first feature of the correlation coefficient is unity-free. Number two ranges between negative one and plus one. So R is always, or rho, is always between minus one and plus one. So minus one smaller than or equal to R smaller than or equal to plus one. So R is always in this range. So R cannot be smaller than negative one or greater than plus one.", "tokens": [407, 264, 700, 4111, 295, 264, 20009, 17619, 307, 18205, 12, 10792, 13, 5118, 732, 22526, 1296, 3671, 472, 293, 1804, 472, 13, 407, 497, 307, 1009, 11, 420, 20293, 11, 307, 1009, 1296, 3175, 472, 293, 1804, 472, 13, 407, 3175, 472, 4356, 813, 420, 2681, 281, 497, 4356, 813, 420, 2681, 281, 1804, 472, 13, 407, 497, 307, 1009, 294, 341, 3613, 13, 407, 497, 2644, 312, 4356, 813, 3671, 472, 420, 5044, 813, 1804, 472, 13], "avg_logprob": -0.16591796465218067, "compression_ratio": 2.022598870056497, "no_speech_prob": 0.0, "words": [{"start": 709.42, "end": 709.68, "word": " So", "probability": 0.94189453125}, {"start": 709.68, "end": 710.06, "word": " the", "probability": 0.7373046875}, {"start": 710.06, "end": 710.64, "word": " first", "probability": 0.8896484375}, {"start": 710.64, "end": 711.04, "word": " feature", "probability": 0.9287109375}, {"start": 711.04, "end": 711.24, "word": " of", "probability": 0.96337890625}, {"start": 711.24, "end": 711.38, "word": " the", "probability": 0.91015625}, {"start": 711.38, "end": 711.76, "word": " correlation", "probability": 0.94873046875}, {"start": 711.76, "end": 712.3, "word": " coefficient", "probability": 0.95263671875}, {"start": 712.3, "end": 712.7, "word": " is", "probability": 0.9384765625}, {"start": 712.7, "end": 713.0, "word": " unity", "probability": 0.7568359375}, {"start": 713.0, "end": 713.36, "word": "-free.", "probability": 0.5941162109375}, {"start": 714.18, "end": 714.42, "word": " Number", "probability": 0.8681640625}, {"start": 714.42, "end": 714.62, "word": " two", "probability": 0.75830078125}, {"start": 714.62, "end": 714.98, "word": " ranges", "probability": 0.8203125}, {"start": 714.98, "end": 715.3, "word": " between", "probability": 0.81884765625}, {"start": 715.3, "end": 715.58, "word": " negative", "probability": 0.75927734375}, {"start": 715.58, "end": 715.86, "word": " one", "probability": 0.6865234375}, {"start": 715.86, "end": 716.0, "word": " and", "probability": 0.93896484375}, {"start": 716.0, "end": 716.34, "word": " plus", "probability": 0.865234375}, {"start": 716.34, "end": 716.62, "word": " one.", "probability": 0.9248046875}, {"start": 717.3, "end": 717.56, "word": " So", "probability": 0.951171875}, {"start": 717.56, "end": 717.92, "word": " R", "probability": 0.55810546875}, {"start": 717.92, "end": 718.14, "word": " is", "probability": 0.90625}, {"start": 718.14, "end": 718.58, "word": " always,", "probability": 0.896484375}, {"start": 718.64, "end": 718.8, "word": " or", "probability": 0.857421875}, {"start": 718.8, "end": 719.06, "word": " rho,", "probability": 0.619140625}, {"start": 719.22, "end": 719.44, "word": " is", "probability": 0.931640625}, {"start": 719.44, "end": 719.8, "word": " always", "probability": 0.9033203125}, {"start": 719.8, "end": 720.38, "word": " between", "probability": 0.88720703125}, {"start": 720.38, "end": 721.9, "word": " minus", "probability": 0.8876953125}, {"start": 721.9, "end": 722.18, "word": " one", "probability": 0.89404296875}, {"start": 722.18, "end": 722.34, "word": " and", "probability": 0.93017578125}, {"start": 722.34, "end": 722.58, "word": " plus", "probability": 0.95654296875}, {"start": 722.58, "end": 722.8, "word": " one.", "probability": 0.92919921875}, {"start": 722.88, "end": 723.06, "word": " So", "probability": 0.93505859375}, {"start": 723.06, "end": 723.42, "word": " minus", "probability": 0.94873046875}, {"start": 723.42, "end": 723.78, "word": " one", "probability": 0.91748046875}, {"start": 723.78, "end": 724.26, "word": " smaller", "probability": 0.4716796875}, {"start": 724.26, "end": 724.56, "word": " than", "probability": 0.9248046875}, {"start": 724.56, "end": 724.7, "word": " or", "probability": 0.93896484375}, {"start": 724.7, "end": 724.92, "word": " equal", "probability": 0.89306640625}, {"start": 724.92, "end": 725.22, "word": " to", "probability": 0.55419921875}, {"start": 725.22, "end": 726.02, "word": " R", "probability": 0.89208984375}, {"start": 726.02, "end": 726.4, "word": " smaller", "probability": 0.6416015625}, {"start": 726.4, "end": 726.62, "word": " than", "probability": 0.92431640625}, {"start": 726.62, "end": 726.76, "word": " or", "probability": 0.56103515625}, {"start": 726.76, "end": 726.82, "word": " equal", "probability": 0.90771484375}, {"start": 726.82, "end": 726.88, "word": " to", "probability": 0.95068359375}, {"start": 726.88, "end": 727.14, "word": " plus", "probability": 0.95263671875}, {"start": 727.14, "end": 727.34, "word": " one.", "probability": 0.91796875}, {"start": 727.42, "end": 727.6, "word": " So", "probability": 0.9140625}, {"start": 727.6, "end": 727.84, "word": " R", "probability": 0.9521484375}, {"start": 727.84, "end": 728.0, "word": " is", "probability": 0.935546875}, {"start": 728.0, "end": 728.5, "word": " always", "probability": 0.90673828125}, {"start": 728.5, "end": 729.24, "word": " in", "probability": 0.93798828125}, {"start": 729.24, "end": 729.42, "word": " this", "probability": 0.94091796875}, {"start": 729.42, "end": 729.82, "word": " range.", "probability": 0.85791015625}, {"start": 730.46, "end": 730.62, "word": " So", "probability": 0.95849609375}, {"start": 730.62, "end": 730.82, "word": " R", "probability": 0.94189453125}, {"start": 730.82, "end": 731.08, "word": " cannot", "probability": 0.89892578125}, {"start": 731.08, "end": 731.42, "word": " be", "probability": 0.95703125}, {"start": 731.42, "end": 731.92, "word": " smaller", "probability": 0.86279296875}, {"start": 731.92, "end": 732.16, "word": " than", "probability": 0.94091796875}, {"start": 732.16, "end": 732.38, "word": " negative", "probability": 0.9208984375}, {"start": 732.38, "end": 732.82, "word": " one", "probability": 0.8974609375}, {"start": 732.82, "end": 733.52, "word": " or", "probability": 0.7666015625}, {"start": 733.52, "end": 733.84, "word": " greater", "probability": 0.9052734375}, {"start": 733.84, "end": 734.34, "word": " than", "probability": 0.93994140625}, {"start": 734.34, "end": 735.26, "word": " plus", "probability": 0.94775390625}, {"start": 735.26, "end": 735.52, "word": " one.", "probability": 0.92724609375}], "temperature": 1.0}, {"id": 29, "seek": 76323, "start": 736.51, "end": 763.23, "text": " The closer to minus one or negative one, the stronger negative relationship between or linear relationship between x and y. So, for example, if R is negative 0.85 or R is negative 0.8. Now, this value is closer to minus one than negative 0.8. So negative 0.85 is stronger than negative 0.8.", "tokens": [440, 4966, 281, 3175, 472, 420, 3671, 472, 11, 264, 7249, 3671, 2480, 1296, 420, 8213, 2480, 1296, 2031, 293, 288, 13, 407, 11, 337, 1365, 11, 498, 497, 307, 3671, 1958, 13, 19287, 420, 497, 307, 3671, 1958, 13, 23, 13, 823, 11, 341, 2158, 307, 4966, 281, 3175, 472, 813, 3671, 1958, 13, 23, 13, 407, 3671, 1958, 13, 19287, 307, 7249, 813, 3671, 1958, 13, 23, 13], "avg_logprob": -0.16087147341647617, "compression_ratio": 1.9931506849315068, "no_speech_prob": 0.0, "words": [{"start": 736.51, "end": 736.77, "word": " The", "probability": 0.53125}, {"start": 736.77, "end": 737.15, "word": " closer", "probability": 0.86376953125}, {"start": 737.15, "end": 737.39, "word": " to", "probability": 0.96826171875}, {"start": 737.39, "end": 737.65, "word": " minus", "probability": 0.54248046875}, {"start": 737.65, "end": 738.03, "word": " one", "probability": 0.58154296875}, {"start": 738.03, "end": 739.05, "word": " or", "probability": 0.666015625}, {"start": 739.05, "end": 739.35, "word": " negative", "probability": 0.92724609375}, {"start": 739.35, "end": 739.75, "word": " one,", "probability": 0.90869140625}, {"start": 740.09, "end": 740.31, "word": " the", "probability": 0.88671875}, {"start": 740.31, "end": 740.83, "word": " stronger", "probability": 0.85693359375}, {"start": 740.83, "end": 741.35, "word": " negative", "probability": 0.89599609375}, {"start": 741.35, "end": 741.99, "word": " relationship", "probability": 0.8828125}, {"start": 741.99, "end": 742.39, "word": " between", "probability": 0.83203125}, {"start": 742.39, "end": 742.79, "word": " or", "probability": 0.6748046875}, {"start": 742.79, "end": 743.13, "word": " linear", "probability": 0.8505859375}, {"start": 743.13, "end": 743.53, "word": " relationship", "probability": 0.89697265625}, {"start": 743.53, "end": 743.81, "word": " between", "probability": 0.9013671875}, {"start": 743.81, "end": 743.97, "word": " x", "probability": 0.634765625}, {"start": 743.97, "end": 744.09, "word": " and", "probability": 0.94775390625}, {"start": 744.09, "end": 744.37, "word": " y.", "probability": 0.98095703125}, {"start": 744.83, "end": 745.03, "word": " So,", "probability": 0.92041015625}, {"start": 745.31, "end": 745.41, "word": " for", "probability": 0.9501953125}, {"start": 745.41, "end": 745.79, "word": " example,", "probability": 0.9736328125}, {"start": 746.51, "end": 746.77, "word": " if", "probability": 0.9462890625}, {"start": 746.77, "end": 747.23, "word": " R", "probability": 0.448486328125}, {"start": 747.23, "end": 747.55, "word": " is", "probability": 0.9384765625}, {"start": 747.55, "end": 747.93, "word": " negative", "probability": 0.9111328125}, {"start": 747.93, "end": 748.25, "word": " 0", "probability": 0.6220703125}, {"start": 748.25, "end": 748.67, "word": ".85", "probability": 0.9912109375}, {"start": 748.67, "end": 749.39, "word": " or", "probability": 0.74853515625}, {"start": 749.39, "end": 749.71, "word": " R", "probability": 0.95849609375}, {"start": 749.71, "end": 749.89, "word": " is", "probability": 0.9462890625}, {"start": 749.89, "end": 750.21, "word": " negative", "probability": 0.93798828125}, {"start": 750.21, "end": 750.51, "word": " 0", "probability": 0.96728515625}, {"start": 750.51, "end": 750.95, "word": ".8.", "probability": 0.985595703125}, {"start": 752.11, "end": 752.45, "word": " Now,", "probability": 0.9443359375}, {"start": 753.01, "end": 753.37, "word": " this", "probability": 0.94140625}, {"start": 753.37, "end": 753.87, "word": " value", "probability": 0.97314453125}, {"start": 753.87, "end": 755.83, "word": " is", "probability": 0.4931640625}, {"start": 755.83, "end": 756.31, "word": " closer", "probability": 0.8857421875}, {"start": 756.31, "end": 756.57, "word": " to", "probability": 0.96484375}, {"start": 756.57, "end": 756.85, "word": " minus", "probability": 0.96875}, {"start": 756.85, "end": 757.15, "word": " one", "probability": 0.73583984375}, {"start": 757.15, "end": 757.45, "word": " than", "probability": 0.91845703125}, {"start": 757.45, "end": 758.09, "word": " negative", "probability": 0.94189453125}, {"start": 758.09, "end": 758.39, "word": " 0", "probability": 0.93408203125}, {"start": 758.39, "end": 758.85, "word": ".8.", "probability": 0.997802734375}, {"start": 759.41, "end": 759.69, "word": " So", "probability": 0.9599609375}, {"start": 759.69, "end": 760.15, "word": " negative", "probability": 0.68310546875}, {"start": 760.15, "end": 760.61, "word": " 0", "probability": 0.673828125}, {"start": 760.61, "end": 760.69, "word": ".85", "probability": 0.991943359375}, {"start": 760.69, "end": 761.41, "word": " is", "probability": 0.9384765625}, {"start": 761.41, "end": 761.85, "word": " stronger", "probability": 0.8359375}, {"start": 761.85, "end": 762.21, "word": " than", "probability": 0.9404296875}, {"start": 762.21, "end": 762.51, "word": " negative", "probability": 0.91845703125}, {"start": 762.51, "end": 762.77, "word": " 0", "probability": 0.98681640625}, {"start": 762.77, "end": 763.23, "word": ".8.", "probability": 0.998291015625}], "temperature": 1.0}, {"id": 30, "seek": 79327, "start": 764.59, "end": 793.27, "text": " Because we are looking for closer to minus 1. Minus 0.8, the value itself is greater than minus 0.85. But this value is closer to minus 1 than minus 0.8. So we can say that this relationship is stronger than the other one. Also, the closer to plus 1, the stronger the positive linear relationship.", "tokens": [1436, 321, 366, 1237, 337, 4966, 281, 3175, 502, 13, 2829, 301, 1958, 13, 23, 11, 264, 2158, 2564, 307, 5044, 813, 3175, 1958, 13, 19287, 13, 583, 341, 2158, 307, 4966, 281, 3175, 502, 813, 3175, 1958, 13, 23, 13, 407, 321, 393, 584, 300, 341, 2480, 307, 7249, 813, 264, 661, 472, 13, 2743, 11, 264, 4966, 281, 1804, 502, 11, 264, 7249, 264, 3353, 8213, 2480, 13], "avg_logprob": -0.13622359490730393, "compression_ratio": 1.7426900584795322, "no_speech_prob": 0.0, "words": [{"start": 764.59, "end": 764.91, "word": " Because", "probability": 0.5673828125}, {"start": 764.91, "end": 765.05, "word": " we", "probability": 0.8916015625}, {"start": 765.05, "end": 765.15, "word": " are", "probability": 0.87939453125}, {"start": 765.15, "end": 765.47, "word": " looking", "probability": 0.9033203125}, {"start": 765.47, "end": 766.11, "word": " for", "probability": 0.8427734375}, {"start": 766.11, "end": 766.87, "word": " closer", "probability": 0.794921875}, {"start": 766.87, "end": 767.91, "word": " to", "probability": 0.865234375}, {"start": 767.91, "end": 768.19, "word": " minus", "probability": 0.489501953125}, {"start": 768.19, "end": 768.47, "word": " 1.", "probability": 0.3037109375}, {"start": 769.57, "end": 770.25, "word": " Minus", "probability": 0.885498046875}, {"start": 770.25, "end": 770.47, "word": " 0", "probability": 0.703125}, {"start": 770.47, "end": 770.71, "word": ".8,", "probability": 0.9853515625}, {"start": 771.65, "end": 772.13, "word": " the", "probability": 0.87109375}, {"start": 772.13, "end": 772.51, "word": " value", "probability": 0.97607421875}, {"start": 772.51, "end": 773.31, "word": " itself", "probability": 0.76953125}, {"start": 773.31, "end": 773.69, "word": " is", "probability": 0.916015625}, {"start": 773.69, "end": 774.07, "word": " greater", "probability": 0.8388671875}, {"start": 774.07, "end": 774.55, "word": " than", "probability": 0.93603515625}, {"start": 774.55, "end": 775.31, "word": " minus", "probability": 0.93994140625}, {"start": 775.31, "end": 775.53, "word": " 0", "probability": 0.99072265625}, {"start": 775.53, "end": 775.83, "word": ".85.", "probability": 0.995361328125}, {"start": 776.53, "end": 776.87, "word": " But", "probability": 0.939453125}, {"start": 776.87, "end": 777.17, "word": " this", "probability": 0.92578125}, {"start": 777.17, "end": 777.53, "word": " value", "probability": 0.955078125}, {"start": 777.53, "end": 777.79, "word": " is", "probability": 0.94384765625}, {"start": 777.79, "end": 778.33, "word": " closer", "probability": 0.90625}, {"start": 778.33, "end": 778.67, "word": " to", "probability": 0.9658203125}, {"start": 778.67, "end": 778.95, "word": " minus", "probability": 0.96728515625}, {"start": 778.95, "end": 779.27, "word": " 1", "probability": 0.884765625}, {"start": 779.27, "end": 779.61, "word": " than", "probability": 0.8935546875}, {"start": 779.61, "end": 780.19, "word": " minus", "probability": 0.97265625}, {"start": 780.19, "end": 780.45, "word": " 0", "probability": 0.99169921875}, {"start": 780.45, "end": 780.77, "word": ".8.", "probability": 0.996337890625}, {"start": 781.09, "end": 781.29, "word": " So", "probability": 0.9599609375}, {"start": 781.29, "end": 781.43, "word": " we", "probability": 0.7841796875}, {"start": 781.43, "end": 781.61, "word": " can", "probability": 0.94287109375}, {"start": 781.61, "end": 781.81, "word": " say", "probability": 0.8544921875}, {"start": 781.81, "end": 782.09, "word": " that", "probability": 0.90869140625}, {"start": 782.09, "end": 782.79, "word": " this", "probability": 0.8720703125}, {"start": 782.79, "end": 783.37, "word": " relationship", "probability": 0.9169921875}, {"start": 783.37, "end": 783.79, "word": " is", "probability": 0.9453125}, {"start": 783.79, "end": 784.17, "word": " stronger", "probability": 0.8564453125}, {"start": 784.17, "end": 784.45, "word": " than", "probability": 0.9423828125}, {"start": 784.45, "end": 784.59, "word": " the", "probability": 0.90283203125}, {"start": 784.59, "end": 784.77, "word": " other", "probability": 0.88134765625}, {"start": 784.77, "end": 785.07, "word": " one.", "probability": 0.91064453125}, {"start": 787.87, "end": 788.55, "word": " Also,", "probability": 0.9453125}, {"start": 788.81, "end": 789.27, "word": " the", "probability": 0.89013671875}, {"start": 789.27, "end": 789.65, "word": " closer", "probability": 0.9267578125}, {"start": 789.65, "end": 789.89, "word": " to", "probability": 0.96923828125}, {"start": 789.89, "end": 790.21, "word": " plus", "probability": 0.9326171875}, {"start": 790.21, "end": 790.53, "word": " 1,", "probability": 0.84716796875}, {"start": 790.77, "end": 790.89, "word": " the", "probability": 0.91015625}, {"start": 790.89, "end": 791.39, "word": " stronger", "probability": 0.822265625}, {"start": 791.39, "end": 791.73, "word": " the", "probability": 0.876953125}, {"start": 791.73, "end": 792.29, "word": " positive", "probability": 0.92529296875}, {"start": 792.29, "end": 792.71, "word": " linear", "probability": 0.8369140625}, {"start": 792.71, "end": 793.27, "word": " relationship.", "probability": 0.916015625}], "temperature": 1.0}, {"id": 31, "seek": 82298, "start": 794.64, "end": 822.98, "text": " Here, suppose R is 0.7 and another R is 0.8. 0.8 is closer to plus one than 0.7, so 0.8 is stronger. This one makes sense. The closer to zero, the weaker relationship between the two variables. For example, suppose R is plus or minus 0.05. This value is very close to zero. It means there exists weak.", "tokens": [1692, 11, 7297, 497, 307, 1958, 13, 22, 293, 1071, 497, 307, 1958, 13, 23, 13, 1958, 13, 23, 307, 4966, 281, 1804, 472, 813, 1958, 13, 22, 11, 370, 1958, 13, 23, 307, 7249, 13, 639, 472, 1669, 2020, 13, 440, 4966, 281, 4018, 11, 264, 24286, 2480, 1296, 264, 732, 9102, 13, 1171, 1365, 11, 7297, 497, 307, 1804, 420, 3175, 1958, 13, 13328, 13, 639, 2158, 307, 588, 1998, 281, 4018, 13, 467, 1355, 456, 8198, 5336, 13], "avg_logprob": -0.16958842026751217, "compression_ratio": 1.6063829787234043, "no_speech_prob": 0.0, "words": [{"start": 794.64, "end": 794.96, "word": " Here,", "probability": 0.5048828125}, {"start": 795.08, "end": 795.46, "word": " suppose", "probability": 0.89892578125}, {"start": 795.46, "end": 795.72, "word": " R", "probability": 0.493896484375}, {"start": 795.72, "end": 795.84, "word": " is", "probability": 0.857421875}, {"start": 795.84, "end": 796.04, "word": " 0", "probability": 0.6962890625}, {"start": 796.04, "end": 796.38, "word": ".7", "probability": 0.99072265625}, {"start": 796.38, "end": 798.08, "word": " and", "probability": 0.5498046875}, {"start": 798.08, "end": 798.48, "word": " another", "probability": 0.91357421875}, {"start": 798.48, "end": 798.74, "word": " R", "probability": 0.9716796875}, {"start": 798.74, "end": 798.88, "word": " is", "probability": 0.94287109375}, {"start": 798.88, "end": 799.12, "word": " 0", "probability": 0.97900390625}, {"start": 799.12, "end": 799.52, "word": ".8.", "probability": 0.997314453125}, {"start": 800.58, "end": 801.06, "word": " 0", "probability": 0.95654296875}, {"start": 801.06, "end": 801.4, "word": ".8", "probability": 0.996826171875}, {"start": 801.4, "end": 801.58, "word": " is", "probability": 0.9384765625}, {"start": 801.58, "end": 802.0, "word": " closer", "probability": 0.8974609375}, {"start": 802.0, "end": 802.24, "word": " to", "probability": 0.96630859375}, {"start": 802.24, "end": 802.5, "word": " plus", "probability": 0.390869140625}, {"start": 802.5, "end": 802.74, "word": " one", "probability": 0.53759765625}, {"start": 802.74, "end": 802.96, "word": " than", "probability": 0.92041015625}, {"start": 802.96, "end": 803.26, "word": " 0", "probability": 0.9404296875}, {"start": 803.26, "end": 803.56, "word": ".7,", "probability": 0.99853515625}, {"start": 804.12, "end": 804.4, "word": " so", "probability": 0.9091796875}, {"start": 804.4, "end": 804.76, "word": " 0", "probability": 0.93212890625}, {"start": 804.76, "end": 805.24, "word": ".8", "probability": 0.99755859375}, {"start": 805.24, "end": 805.5, "word": " is", "probability": 0.9404296875}, {"start": 805.5, "end": 805.88, "word": " stronger.", "probability": 0.56884765625}, {"start": 806.22, "end": 806.38, "word": " This", "probability": 0.86181640625}, {"start": 806.38, "end": 806.52, "word": " one", "probability": 0.8916015625}, {"start": 806.52, "end": 806.74, "word": " makes", "probability": 0.826171875}, {"start": 806.74, "end": 807.06, "word": " sense.", "probability": 0.81787109375}, {"start": 808.74, "end": 809.18, "word": " The", "probability": 0.4677734375}, {"start": 809.18, "end": 809.44, "word": " closer", "probability": 0.8466796875}, {"start": 809.44, "end": 809.7, "word": " to", "probability": 0.9560546875}, {"start": 809.7, "end": 810.0, "word": " zero,", "probability": 0.802734375}, {"start": 810.48, "end": 810.62, "word": " the", "probability": 0.91064453125}, {"start": 810.62, "end": 810.9, "word": " weaker", "probability": 0.9033203125}, {"start": 810.9, "end": 811.8, "word": " relationship", "probability": 0.5009765625}, {"start": 811.8, "end": 812.3, "word": " between", "probability": 0.87060546875}, {"start": 812.3, "end": 812.6, "word": " the", "probability": 0.8818359375}, {"start": 812.6, "end": 812.76, "word": " two", "probability": 0.91650390625}, {"start": 812.76, "end": 813.18, "word": " variables.", "probability": 0.92236328125}, {"start": 814.08, "end": 814.36, "word": " For", "probability": 0.955078125}, {"start": 814.36, "end": 814.74, "word": " example,", "probability": 0.97216796875}, {"start": 814.88, "end": 815.2, "word": " suppose", "probability": 0.91162109375}, {"start": 815.2, "end": 815.42, "word": " R", "probability": 0.96435546875}, {"start": 815.42, "end": 815.56, "word": " is", "probability": 0.94287109375}, {"start": 815.56, "end": 815.98, "word": " plus", "probability": 0.953125}, {"start": 815.98, "end": 816.24, "word": " or", "probability": 0.951171875}, {"start": 816.24, "end": 816.62, "word": " minus", "probability": 0.9814453125}, {"start": 816.62, "end": 816.9, "word": " 0", "probability": 0.970703125}, {"start": 816.9, "end": 817.5, "word": ".05.", "probability": 0.9560546875}, {"start": 818.82, "end": 819.42, "word": " This", "probability": 0.77001953125}, {"start": 819.42, "end": 819.74, "word": " value", "probability": 0.95947265625}, {"start": 819.74, "end": 819.92, "word": " is", "probability": 0.9384765625}, {"start": 819.92, "end": 820.14, "word": " very", "probability": 0.85693359375}, {"start": 820.14, "end": 820.54, "word": " close", "probability": 0.8828125}, {"start": 820.54, "end": 820.72, "word": " to", "probability": 0.95849609375}, {"start": 820.72, "end": 821.02, "word": " zero.", "probability": 0.87890625}, {"start": 821.3, "end": 821.44, "word": " It", "probability": 0.92431640625}, {"start": 821.44, "end": 821.8, "word": " means", "probability": 0.9306640625}, {"start": 821.8, "end": 822.04, "word": " there", "probability": 0.75146484375}, {"start": 822.04, "end": 822.5, "word": " exists", "probability": 0.86474609375}, {"start": 822.5, "end": 822.98, "word": " weak.", "probability": 0.869140625}], "temperature": 1.0}, {"id": 32, "seek": 85066, "start": 823.82, "end": 850.66, "text": " relationship. Sometimes we can say that there exists moderate relationship if R is close to 0.5. So it could be classified into these groups closer to minus 1, closer to 1, 0.5 or 0. So we can know the direction by the sign of R negative", "tokens": [2480, 13, 4803, 321, 393, 584, 300, 456, 8198, 18174, 2480, 498, 497, 307, 1998, 281, 1958, 13, 20, 13, 407, 309, 727, 312, 20627, 666, 613, 3935, 4966, 281, 3175, 502, 11, 4966, 281, 502, 11, 1958, 13, 20, 420, 1958, 13, 407, 321, 393, 458, 264, 3513, 538, 264, 1465, 295, 497, 3671], "avg_logprob": -0.21205357408949307, "compression_ratio": 1.5256410256410255, "no_speech_prob": 0.0, "words": [{"start": 823.82, "end": 824.42, "word": " relationship.", "probability": 0.3828125}, {"start": 824.98, "end": 825.74, "word": " Sometimes", "probability": 0.84814453125}, {"start": 825.74, "end": 825.98, "word": " we", "probability": 0.76513671875}, {"start": 825.98, "end": 826.18, "word": " can", "probability": 0.9453125}, {"start": 826.18, "end": 826.42, "word": " say", "probability": 0.8798828125}, {"start": 826.42, "end": 826.7, "word": " that", "probability": 0.91015625}, {"start": 826.7, "end": 827.02, "word": " there", "probability": 0.8984375}, {"start": 827.02, "end": 827.48, "word": " exists", "probability": 0.78369140625}, {"start": 827.48, "end": 827.96, "word": " moderate", "probability": 0.8779296875}, {"start": 827.96, "end": 830.48, "word": " relationship", "probability": 0.90087890625}, {"start": 830.48, "end": 830.96, "word": " if", "probability": 0.85888671875}, {"start": 830.96, "end": 831.36, "word": " R", "probability": 0.75537109375}, {"start": 831.36, "end": 831.56, "word": " is", "probability": 0.92724609375}, {"start": 831.56, "end": 832.12, "word": " close", "probability": 0.5703125}, {"start": 832.12, "end": 833.74, "word": " to", "probability": 0.91796875}, {"start": 833.74, "end": 833.96, "word": " 0", "probability": 0.1966552734375}, {"start": 833.96, "end": 834.14, "word": ".5.", "probability": 0.990234375}, {"start": 834.92, "end": 835.54, "word": " So", "probability": 0.890625}, {"start": 835.54, "end": 836.62, "word": " it", "probability": 0.67333984375}, {"start": 836.62, "end": 836.86, "word": " could", "probability": 0.87158203125}, {"start": 836.86, "end": 837.08, "word": " be", "probability": 0.951171875}, {"start": 837.08, "end": 837.64, "word": " classified", "probability": 0.92431640625}, {"start": 837.64, "end": 838.9, "word": " into", "probability": 0.82373046875}, {"start": 838.9, "end": 839.16, "word": " these", "probability": 0.6767578125}, {"start": 839.16, "end": 839.62, "word": " groups", "probability": 0.953125}, {"start": 839.62, "end": 840.56, "word": " closer", "probability": 0.78271484375}, {"start": 840.56, "end": 840.76, "word": " to", "probability": 0.97021484375}, {"start": 840.76, "end": 841.02, "word": " minus", "probability": 0.626953125}, {"start": 841.02, "end": 841.36, "word": " 1,", "probability": 0.432861328125}, {"start": 841.5, "end": 841.74, "word": " closer", "probability": 0.91064453125}, {"start": 841.74, "end": 841.98, "word": " to", "probability": 0.9609375}, {"start": 841.98, "end": 842.2, "word": " 1,", "probability": 0.93212890625}, {"start": 842.34, "end": 842.66, "word": " 0", "probability": 0.84326171875}, {"start": 842.66, "end": 843.08, "word": ".5", "probability": 0.99462890625}, {"start": 843.08, "end": 843.34, "word": " or", "probability": 0.5048828125}, {"start": 843.34, "end": 844.0, "word": " 0.", "probability": 0.79052734375}, {"start": 844.44, "end": 845.04, "word": " So", "probability": 0.92822265625}, {"start": 845.04, "end": 845.5, "word": " we", "probability": 0.83251953125}, {"start": 845.5, "end": 845.78, "word": " can", "probability": 0.94091796875}, {"start": 845.78, "end": 846.08, "word": " know", "probability": 0.873046875}, {"start": 846.08, "end": 846.22, "word": " the", "probability": 0.9140625}, {"start": 846.22, "end": 846.74, "word": " direction", "probability": 0.9765625}, {"start": 846.74, "end": 847.68, "word": " by", "probability": 0.9609375}, {"start": 847.68, "end": 847.92, "word": " the", "probability": 0.9248046875}, {"start": 847.92, "end": 848.24, "word": " sign", "probability": 0.80859375}, {"start": 848.24, "end": 849.2, "word": " of", "probability": 0.97314453125}, {"start": 849.2, "end": 849.58, "word": " R", "probability": 0.9697265625}, {"start": 849.58, "end": 850.66, "word": " negative", "probability": 0.7265625}], "temperature": 1.0}, {"id": 33, "seek": 87552, "start": 851.14, "end": 875.52, "text": " it means because here our ranges as we mentioned between minus one and plus one here zero so this these values it means there exists negative above zero all the way up to one it means there exists positive relationship between the two variables so the sign gives the direction of the relationship", "tokens": [309, 1355, 570, 510, 527, 22526, 382, 321, 2835, 1296, 3175, 472, 293, 1804, 472, 510, 4018, 370, 341, 613, 4190, 309, 1355, 456, 8198, 3671, 3673, 4018, 439, 264, 636, 493, 281, 472, 309, 1355, 456, 8198, 3353, 2480, 1296, 264, 732, 9102, 370, 264, 1465, 2709, 264, 3513, 295, 264, 2480], "avg_logprob": -0.17288773782827235, "compression_ratio": 1.8109756097560976, "no_speech_prob": 0.0, "words": [{"start": 851.14, "end": 851.42, "word": " it", "probability": 0.27392578125}, {"start": 851.42, "end": 851.68, "word": " means", "probability": 0.82568359375}, {"start": 851.68, "end": 852.06, "word": " because", "probability": 0.71923828125}, {"start": 852.06, "end": 852.36, "word": " here", "probability": 0.81494140625}, {"start": 852.36, "end": 852.82, "word": " our", "probability": 0.341796875}, {"start": 852.82, "end": 853.28, "word": " ranges", "probability": 0.81591796875}, {"start": 853.28, "end": 853.6, "word": " as", "probability": 0.90185546875}, {"start": 853.6, "end": 853.74, "word": " we", "probability": 0.6689453125}, {"start": 853.74, "end": 854.02, "word": " mentioned", "probability": 0.82763671875}, {"start": 854.02, "end": 854.32, "word": " between", "probability": 0.85693359375}, {"start": 854.32, "end": 854.62, "word": " minus", "probability": 0.8427734375}, {"start": 854.62, "end": 854.9, "word": " one", "probability": 0.7353515625}, {"start": 854.9, "end": 855.04, "word": " and", "probability": 0.90185546875}, {"start": 855.04, "end": 855.32, "word": " plus", "probability": 0.951171875}, {"start": 855.32, "end": 855.66, "word": " one", "probability": 0.9267578125}, {"start": 855.66, "end": 856.88, "word": " here", "probability": 0.6826171875}, {"start": 856.88, "end": 857.26, "word": " zero", "probability": 0.65869140625}, {"start": 857.26, "end": 858.26, "word": " so", "probability": 0.68017578125}, {"start": 858.26, "end": 858.66, "word": " this", "probability": 0.91357421875}, {"start": 858.66, "end": 859.52, "word": " these", "probability": 0.82275390625}, {"start": 859.52, "end": 860.1, "word": " values", "probability": 0.9697265625}, {"start": 860.1, "end": 861.32, "word": " it", "probability": 0.9501953125}, {"start": 861.32, "end": 861.62, "word": " means", "probability": 0.92919921875}, {"start": 861.62, "end": 861.88, "word": " there", "probability": 0.9072265625}, {"start": 861.88, "end": 862.32, "word": " exists", "probability": 0.779296875}, {"start": 862.32, "end": 863.02, "word": " negative", "probability": 0.95263671875}, {"start": 863.02, "end": 864.24, "word": " above", "probability": 0.94189453125}, {"start": 864.24, "end": 864.56, "word": " zero", "probability": 0.90966796875}, {"start": 864.56, "end": 864.9, "word": " all", "probability": 0.95556640625}, {"start": 864.9, "end": 865.06, "word": " the", "probability": 0.919921875}, {"start": 865.06, "end": 865.18, "word": " way", "probability": 0.95703125}, {"start": 865.18, "end": 865.32, "word": " up", "probability": 0.96826171875}, {"start": 865.32, "end": 865.48, "word": " to", "probability": 0.97119140625}, {"start": 865.48, "end": 865.72, "word": " one", "probability": 0.92626953125}, {"start": 865.72, "end": 865.94, "word": " it", "probability": 0.9443359375}, {"start": 865.94, "end": 866.16, "word": " means", "probability": 0.9306640625}, {"start": 866.16, "end": 866.32, "word": " there", "probability": 0.88818359375}, {"start": 866.32, "end": 866.76, "word": " exists", "probability": 0.77197265625}, {"start": 866.76, "end": 867.96, "word": " positive", "probability": 0.86962890625}, {"start": 867.96, "end": 868.84, "word": " relationship", "probability": 0.91015625}, {"start": 868.84, "end": 869.2, "word": " between", "probability": 0.8701171875}, {"start": 869.2, "end": 869.36, "word": " the", "probability": 0.91845703125}, {"start": 869.36, "end": 869.5, "word": " two", "probability": 0.9404296875}, {"start": 869.5, "end": 869.88, "word": " variables", "probability": 0.958984375}, {"start": 869.88, "end": 871.02, "word": " so", "probability": 0.822265625}, {"start": 871.02, "end": 871.44, "word": " the", "probability": 0.91552734375}, {"start": 871.44, "end": 871.88, "word": " sign", "probability": 0.6767578125}, {"start": 871.88, "end": 873.54, "word": " gives", "probability": 0.91015625}, {"start": 873.54, "end": 873.72, "word": " the", "probability": 0.91650390625}, {"start": 873.72, "end": 874.16, "word": " direction", "probability": 0.97607421875}, {"start": 874.16, "end": 874.9, "word": " of", "probability": 0.96923828125}, {"start": 874.9, "end": 875.04, "word": " the", "probability": 0.91748046875}, {"start": 875.04, "end": 875.52, "word": " relationship", "probability": 0.91455078125}], "temperature": 1.0}, {"id": 34, "seek": 90552, "start": 876.72, "end": 905.52, "text": " The absolute value gives the strength of the relationship between the two variables. So the same as we had discussed before. Now, some types of scatter plots for different types of relationship between the two variables is presented in this slide. For example, if you look carefully at figure one here, sharp one, this one, and the other one,", "tokens": [440, 8236, 2158, 2709, 264, 3800, 295, 264, 2480, 1296, 264, 732, 9102, 13, 407, 264, 912, 382, 321, 632, 7152, 949, 13, 823, 11, 512, 3467, 295, 34951, 28609, 337, 819, 3467, 295, 2480, 1296, 264, 732, 9102, 307, 8212, 294, 341, 4137, 13, 1171, 1365, 11, 498, 291, 574, 7500, 412, 2573, 472, 510, 11, 8199, 472, 11, 341, 472, 11, 293, 264, 661, 472, 11], "avg_logprob": -0.1651947515598242, "compression_ratio": 1.698019801980198, "no_speech_prob": 0.0, "words": [{"start": 876.72, "end": 876.96, "word": " The", "probability": 0.39453125}, {"start": 876.96, "end": 877.48, "word": " absolute", "probability": 0.88623046875}, {"start": 877.48, "end": 877.92, "word": " value", "probability": 0.970703125}, {"start": 877.92, "end": 878.26, "word": " gives", "probability": 0.9013671875}, {"start": 878.26, "end": 878.64, "word": " the", "probability": 0.88037109375}, {"start": 878.64, "end": 879.86, "word": " strength", "probability": 0.7998046875}, {"start": 879.86, "end": 880.56, "word": " of", "probability": 0.94287109375}, {"start": 880.56, "end": 880.84, "word": " the", "probability": 0.91796875}, {"start": 880.84, "end": 881.56, "word": " relationship", "probability": 0.90478515625}, {"start": 881.56, "end": 881.94, "word": " between", "probability": 0.87353515625}, {"start": 881.94, "end": 882.1, "word": " the", "probability": 0.8896484375}, {"start": 882.1, "end": 882.24, "word": " two", "probability": 0.91455078125}, {"start": 882.24, "end": 882.62, "word": " variables.", "probability": 0.95458984375}, {"start": 883.12, "end": 883.34, "word": " So", "probability": 0.93408203125}, {"start": 883.34, "end": 883.5, "word": " the", "probability": 0.67919921875}, {"start": 883.5, "end": 883.72, "word": " same", "probability": 0.90625}, {"start": 883.72, "end": 883.94, "word": " as", "probability": 0.958984375}, {"start": 883.94, "end": 884.2, "word": " we", "probability": 0.96484375}, {"start": 884.2, "end": 884.62, "word": " had", "probability": 0.697265625}, {"start": 884.62, "end": 885.14, "word": " discussed", "probability": 0.89306640625}, {"start": 885.14, "end": 886.16, "word": " before.", "probability": 0.8515625}, {"start": 887.94, "end": 888.52, "word": " Now,", "probability": 0.95458984375}, {"start": 888.62, "end": 888.88, "word": " some", "probability": 0.8583984375}, {"start": 888.88, "end": 889.26, "word": " types", "probability": 0.82861328125}, {"start": 889.26, "end": 889.64, "word": " of", "probability": 0.9677734375}, {"start": 889.64, "end": 890.04, "word": " scatter", "probability": 0.95703125}, {"start": 890.04, "end": 890.4, "word": " plots", "probability": 0.86376953125}, {"start": 890.4, "end": 890.9, "word": " for", "probability": 0.9404296875}, {"start": 890.9, "end": 891.32, "word": " different", "probability": 0.87744140625}, {"start": 891.32, "end": 891.72, "word": " types", "probability": 0.814453125}, {"start": 891.72, "end": 891.88, "word": " of", "probability": 0.96484375}, {"start": 891.88, "end": 892.38, "word": " relationship", "probability": 0.77685546875}, {"start": 892.38, "end": 892.84, "word": " between", "probability": 0.87353515625}, {"start": 892.84, "end": 893.6, "word": " the", "probability": 0.86865234375}, {"start": 893.6, "end": 893.78, "word": " two", "probability": 0.93798828125}, {"start": 893.78, "end": 894.24, "word": " variables", "probability": 0.95263671875}, {"start": 894.24, "end": 894.74, "word": " is", "probability": 0.58837890625}, {"start": 894.74, "end": 895.36, "word": " presented", "probability": 0.76513671875}, {"start": 895.36, "end": 895.8, "word": " in", "probability": 0.94287109375}, {"start": 895.8, "end": 896.2, "word": " this", "probability": 0.9482421875}, {"start": 896.2, "end": 897.26, "word": " slide.", "probability": 0.95751953125}, {"start": 897.38, "end": 897.52, "word": " For", "probability": 0.96240234375}, {"start": 897.52, "end": 897.92, "word": " example,", "probability": 0.97509765625}, {"start": 898.6, "end": 898.76, "word": " if", "probability": 0.93603515625}, {"start": 898.76, "end": 898.84, "word": " you", "probability": 0.7890625}, {"start": 898.84, "end": 899.1, "word": " look", "probability": 0.96337890625}, {"start": 899.1, "end": 900.04, "word": " carefully", "probability": 0.8046875}, {"start": 900.04, "end": 900.5, "word": " at", "probability": 0.88818359375}, {"start": 900.5, "end": 901.46, "word": " figure", "probability": 0.861328125}, {"start": 901.46, "end": 901.9, "word": " one", "probability": 0.60205078125}, {"start": 901.9, "end": 902.38, "word": " here,", "probability": 0.83837890625}, {"start": 902.78, "end": 903.22, "word": " sharp", "probability": 0.71826171875}, {"start": 903.22, "end": 903.44, "word": " one,", "probability": 0.92724609375}, {"start": 903.5, "end": 903.64, "word": " this", "probability": 0.94091796875}, {"start": 903.64, "end": 903.94, "word": " one,", "probability": 0.9228515625}, {"start": 904.72, "end": 904.94, "word": " and", "probability": 0.9345703125}, {"start": 904.94, "end": 905.04, "word": " the", "probability": 0.8740234375}, {"start": 905.04, "end": 905.26, "word": " other", "probability": 0.8857421875}, {"start": 905.26, "end": 905.52, "word": " one,", "probability": 0.919921875}], "temperature": 1.0}, {"id": 35, "seek": 92422, "start": 906.76, "end": 924.22, "text": " In each one, all points are on the straight line, it means they exist perfect. So if all points fall exactly on the straight line, it means they exist perfect.", "tokens": [682, 1184, 472, 11, 439, 2793, 366, 322, 264, 2997, 1622, 11, 309, 1355, 436, 2514, 2176, 13, 407, 498, 439, 2793, 2100, 2293, 322, 264, 2997, 1622, 11, 309, 1355, 436, 2514, 2176, 13], "avg_logprob": -0.22786458333333334, "compression_ratio": 1.7391304347826086, "no_speech_prob": 0.0, "words": [{"start": 906.76, "end": 907.06, "word": " In", "probability": 0.47412109375}, {"start": 907.06, "end": 907.38, "word": " each", "probability": 0.94677734375}, {"start": 907.38, "end": 907.64, "word": " one,", "probability": 0.921875}, {"start": 908.98, "end": 909.3, "word": " all", "probability": 0.904296875}, {"start": 909.3, "end": 909.72, "word": " points", "probability": 0.94970703125}, {"start": 909.72, "end": 913.02, "word": " are", "probability": 0.9130859375}, {"start": 913.02, "end": 913.28, "word": " on", "probability": 0.927734375}, {"start": 913.28, "end": 913.5, "word": " the", "probability": 0.888671875}, {"start": 913.5, "end": 914.0, "word": " straight", "probability": 0.90234375}, {"start": 914.0, "end": 914.32, "word": " line,", "probability": 0.91650390625}, {"start": 914.44, "end": 914.56, "word": " it", "probability": 0.77294921875}, {"start": 914.56, "end": 914.76, "word": " means", "probability": 0.9287109375}, {"start": 914.76, "end": 914.96, "word": " they", "probability": 0.53564453125}, {"start": 914.96, "end": 915.34, "word": " exist", "probability": 0.9072265625}, {"start": 915.34, "end": 915.82, "word": " perfect.", "probability": 0.5576171875}, {"start": 916.84, "end": 917.16, "word": " So", "probability": 0.896484375}, {"start": 917.16, "end": 917.4, "word": " if", "probability": 0.716796875}, {"start": 917.4, "end": 919.02, "word": " all", "probability": 0.505859375}, {"start": 919.02, "end": 919.56, "word": " points", "probability": 0.908203125}, {"start": 919.56, "end": 920.62, "word": " fall", "probability": 0.297119140625}, {"start": 920.62, "end": 921.1, "word": " exactly", "probability": 0.89111328125}, {"start": 921.1, "end": 921.28, "word": " on", "probability": 0.9384765625}, {"start": 921.28, "end": 921.4, "word": " the", "probability": 0.837890625}, {"start": 921.4, "end": 921.72, "word": " straight", "probability": 0.91357421875}, {"start": 921.72, "end": 922.06, "word": " line,", "probability": 0.9345703125}, {"start": 922.54, "end": 922.74, "word": " it", "probability": 0.94189453125}, {"start": 922.74, "end": 923.02, "word": " means", "probability": 0.93115234375}, {"start": 923.02, "end": 923.2, "word": " they", "probability": 0.72607421875}, {"start": 923.2, "end": 923.62, "word": " exist", "probability": 0.955078125}, {"start": 923.62, "end": 924.22, "word": " perfect.", "probability": 0.8974609375}], "temperature": 1.0}, {"id": 36, "seek": 95092, "start": 931.4, "end": 950.92, "text": " Here there exists perfect negative. So this is perfect negative relationship. The other one perfect positive relationship. In reality you will never see something like perfect positive or perfect negative. Maybe in real situation.", "tokens": [1692, 456, 8198, 2176, 3671, 13, 407, 341, 307, 2176, 3671, 2480, 13, 440, 661, 472, 2176, 3353, 2480, 13, 682, 4103, 291, 486, 1128, 536, 746, 411, 2176, 3353, 420, 2176, 3671, 13, 2704, 294, 957, 2590, 13], "avg_logprob": -0.2535156235098839, "compression_ratio": 1.7769230769230768, "no_speech_prob": 0.0, "words": [{"start": 931.4, "end": 931.68, "word": " Here", "probability": 0.3642578125}, {"start": 931.68, "end": 931.86, "word": " there", "probability": 0.78271484375}, {"start": 931.86, "end": 932.36, "word": " exists", "probability": 0.6572265625}, {"start": 932.36, "end": 932.86, "word": " perfect", "probability": 0.79736328125}, {"start": 932.86, "end": 933.32, "word": " negative.", "probability": 0.533203125}, {"start": 934.4, "end": 934.72, "word": " So", "probability": 0.79541015625}, {"start": 934.72, "end": 935.02, "word": " this", "probability": 0.78662109375}, {"start": 935.02, "end": 935.16, "word": " is", "probability": 0.7001953125}, {"start": 935.16, "end": 935.44, "word": " perfect", "probability": 0.806640625}, {"start": 935.44, "end": 935.98, "word": " negative", "probability": 0.92431640625}, {"start": 935.98, "end": 936.7, "word": " relationship.", "probability": 0.92333984375}, {"start": 937.16, "end": 937.3, "word": " The", "probability": 0.84033203125}, {"start": 937.3, "end": 937.48, "word": " other", "probability": 0.8916015625}, {"start": 937.48, "end": 937.74, "word": " one", "probability": 0.91455078125}, {"start": 937.74, "end": 938.1, "word": " perfect", "probability": 0.430419921875}, {"start": 938.1, "end": 938.62, "word": " positive", "probability": 0.9326171875}, {"start": 938.62, "end": 939.4, "word": " relationship.", "probability": 0.89990234375}, {"start": 940.0, "end": 940.22, "word": " In", "probability": 0.79296875}, {"start": 940.22, "end": 940.68, "word": " reality", "probability": 0.9638671875}, {"start": 940.68, "end": 941.08, "word": " you", "probability": 0.68505859375}, {"start": 941.08, "end": 941.24, "word": " will", "probability": 0.85302734375}, {"start": 941.24, "end": 941.56, "word": " never", "probability": 0.9326171875}, {"start": 941.56, "end": 942.52, "word": " see", "probability": 0.93701171875}, {"start": 942.52, "end": 945.68, "word": " something", "probability": 0.7763671875}, {"start": 945.68, "end": 946.16, "word": " like", "probability": 0.93701171875}, {"start": 946.16, "end": 946.66, "word": " perfect", "probability": 0.91162109375}, {"start": 946.66, "end": 947.26, "word": " positive", "probability": 0.94140625}, {"start": 947.26, "end": 947.52, "word": " or", "probability": 0.95166015625}, {"start": 947.52, "end": 947.88, "word": " perfect", "probability": 0.92041015625}, {"start": 947.88, "end": 948.4, "word": " negative.", "probability": 0.95703125}, {"start": 949.12, "end": 949.38, "word": " Maybe", "probability": 0.52197265625}, {"start": 949.38, "end": 949.6, "word": " in", "probability": 0.83203125}, {"start": 949.6, "end": 950.28, "word": " real", "probability": 0.8193359375}, {"start": 950.28, "end": 950.92, "word": " situation.", "probability": 0.880859375}], "temperature": 1.0}, {"id": 37, "seek": 98121, "start": 951.53, "end": 981.21, "text": " In real situation, most of the time, R is close to 0.9 or 0.85 or something like that, but it's not exactly equal one. Because equal one, it means if you know the value of a child's age, then you can predict the exact weight. And that never happened. If the data looks like this table, for example. Suppose here we have age and weight.", "tokens": [682, 957, 2590, 11, 881, 295, 264, 565, 11, 497, 307, 1998, 281, 1958, 13, 24, 420, 1958, 13, 19287, 420, 746, 411, 300, 11, 457, 309, 311, 406, 2293, 2681, 472, 13, 1436, 2681, 472, 11, 309, 1355, 498, 291, 458, 264, 2158, 295, 257, 1440, 311, 3205, 11, 550, 291, 393, 6069, 264, 1900, 3364, 13, 400, 300, 1128, 2011, 13, 759, 264, 1412, 1542, 411, 341, 3199, 11, 337, 1365, 13, 21360, 510, 321, 362, 3205, 293, 3364, 13], "avg_logprob": -0.18401731676365957, "compression_ratio": 1.5, "no_speech_prob": 0.0, "words": [{"start": 951.53, "end": 951.75, "word": " In", "probability": 0.479736328125}, {"start": 951.75, "end": 951.93, "word": " real", "probability": 0.80810546875}, {"start": 951.93, "end": 952.39, "word": " situation,", "probability": 0.84130859375}, {"start": 952.81, "end": 952.99, "word": " most", "probability": 0.88818359375}, {"start": 952.99, "end": 953.15, "word": " of", "probability": 0.9677734375}, {"start": 953.15, "end": 953.27, "word": " the", "probability": 0.9130859375}, {"start": 953.27, "end": 953.55, "word": " time,", "probability": 0.7626953125}, {"start": 953.85, "end": 953.99, "word": " R", "probability": 0.59912109375}, {"start": 953.99, "end": 954.13, "word": " is", "probability": 0.93408203125}, {"start": 954.13, "end": 954.45, "word": " close", "probability": 0.87744140625}, {"start": 954.45, "end": 954.59, "word": " to", "probability": 0.95361328125}, {"start": 954.59, "end": 954.75, "word": " 0", "probability": 0.54052734375}, {"start": 954.75, "end": 955.03, "word": ".9", "probability": 0.982666015625}, {"start": 955.03, "end": 955.19, "word": " or", "probability": 0.85400390625}, {"start": 955.19, "end": 955.41, "word": " 0", "probability": 0.97607421875}, {"start": 955.41, "end": 955.75, "word": ".85", "probability": 0.994140625}, {"start": 955.75, "end": 956.15, "word": " or", "probability": 0.76416015625}, {"start": 956.15, "end": 956.47, "word": " something", "probability": 0.8671875}, {"start": 956.47, "end": 956.73, "word": " like", "probability": 0.93798828125}, {"start": 956.73, "end": 957.05, "word": " that,", "probability": 0.93701171875}, {"start": 957.31, "end": 957.49, "word": " but", "probability": 0.9072265625}, {"start": 957.49, "end": 957.63, "word": " it's", "probability": 0.801513671875}, {"start": 957.63, "end": 957.81, "word": " not", "probability": 0.94580078125}, {"start": 957.81, "end": 959.33, "word": " exactly", "probability": 0.6015625}, {"start": 959.33, "end": 960.51, "word": " equal", "probability": 0.9208984375}, {"start": 960.51, "end": 960.81, "word": " one.", "probability": 0.54541015625}, {"start": 961.57, "end": 962.07, "word": " Because", "probability": 0.92724609375}, {"start": 962.07, "end": 962.77, "word": " equal", "probability": 0.88671875}, {"start": 962.77, "end": 962.99, "word": " one,", "probability": 0.92626953125}, {"start": 963.09, "end": 963.15, "word": " it", "probability": 0.92333984375}, {"start": 963.15, "end": 963.47, "word": " means", "probability": 0.91796875}, {"start": 963.47, "end": 963.81, "word": " if", "probability": 0.75390625}, {"start": 963.81, "end": 963.97, "word": " you", "probability": 0.53173828125}, {"start": 963.97, "end": 964.05, "word": " know", "probability": 0.88525390625}, {"start": 964.05, "end": 964.21, "word": " the", "probability": 0.91015625}, {"start": 964.21, "end": 964.59, "word": " value", "probability": 0.9853515625}, {"start": 964.59, "end": 965.03, "word": " of", "probability": 0.96484375}, {"start": 965.03, "end": 965.33, "word": " a", "probability": 0.69677734375}, {"start": 965.33, "end": 965.93, "word": " child's", "probability": 0.75537109375}, {"start": 965.93, "end": 966.23, "word": " age,", "probability": 0.958984375}, {"start": 966.69, "end": 966.97, "word": " then", "probability": 0.84912109375}, {"start": 966.97, "end": 967.15, "word": " you", "probability": 0.95654296875}, {"start": 967.15, "end": 967.37, "word": " can", "probability": 0.93798828125}, {"start": 967.37, "end": 967.87, "word": " predict", "probability": 0.90625}, {"start": 967.87, "end": 968.15, "word": " the", "probability": 0.90771484375}, {"start": 968.15, "end": 968.63, "word": " exact", "probability": 0.93212890625}, {"start": 968.63, "end": 969.01, "word": " weight.", "probability": 0.9189453125}, {"start": 969.97, "end": 970.25, "word": " And", "probability": 0.93798828125}, {"start": 970.25, "end": 970.43, "word": " that", "probability": 0.93505859375}, {"start": 970.43, "end": 970.69, "word": " never", "probability": 0.8955078125}, {"start": 970.69, "end": 971.05, "word": " happened.", "probability": 0.61279296875}, {"start": 972.15, "end": 972.41, "word": " If", "probability": 0.90673828125}, {"start": 972.41, "end": 972.61, "word": " the", "probability": 0.9130859375}, {"start": 972.61, "end": 972.97, "word": " data", "probability": 0.93896484375}, {"start": 972.97, "end": 973.51, "word": " looks", "probability": 0.84375}, {"start": 973.51, "end": 974.23, "word": " like", "probability": 0.93896484375}, {"start": 974.23, "end": 975.53, "word": " this", "probability": 0.93359375}, {"start": 975.53, "end": 975.99, "word": " table,", "probability": 0.9150390625}, {"start": 976.09, "end": 976.23, "word": " for", "probability": 0.9482421875}, {"start": 976.23, "end": 976.61, "word": " example.", "probability": 0.9755859375}, {"start": 977.17, "end": 977.65, "word": " Suppose", "probability": 0.8388671875}, {"start": 977.65, "end": 978.07, "word": " here", "probability": 0.75146484375}, {"start": 978.07, "end": 978.35, "word": " we", "probability": 0.79638671875}, {"start": 978.35, "end": 978.77, "word": " have", "probability": 0.93994140625}, {"start": 978.77, "end": 979.21, "word": " age", "probability": 0.88623046875}, {"start": 979.21, "end": 980.83, "word": " and", "probability": 0.8818359375}, {"start": 980.83, "end": 981.21, "word": " weight.", "probability": 0.91796875}], "temperature": 1.0}, {"id": 38, "seek": 100799, "start": 982.11, "end": 1007.99, "text": " H1 for example 3, 5, 7 weight for example 10, 12, 14, 16 in this case they exist perfect because x increases by 2 units also weights increases by 2 units or maybe weights for example 9, 12, 15, 18 and so on", "tokens": [389, 16, 337, 1365, 805, 11, 1025, 11, 1614, 3364, 337, 1365, 1266, 11, 2272, 11, 3499, 11, 3165, 294, 341, 1389, 436, 2514, 2176, 570, 2031, 8637, 538, 568, 6815, 611, 17443, 8637, 538, 568, 6815, 420, 1310, 17443, 337, 1365, 1722, 11, 2272, 11, 2119, 11, 2443, 293, 370, 322], "avg_logprob": -0.2387971743097845, "compression_ratio": 1.556390977443609, "no_speech_prob": 0.0, "words": [{"start": 982.11, "end": 982.65, "word": " H1", "probability": 0.49737548828125}, {"start": 982.65, "end": 982.83, "word": " for", "probability": 0.7548828125}, {"start": 982.83, "end": 983.19, "word": " example", "probability": 0.9560546875}, {"start": 983.19, "end": 983.55, "word": " 3,", "probability": 0.5009765625}, {"start": 983.75, "end": 984.23, "word": " 5,", "probability": 0.615234375}, {"start": 984.35, "end": 984.73, "word": " 7", "probability": 0.96533203125}, {"start": 984.73, "end": 985.57, "word": " weight", "probability": 0.3408203125}, {"start": 985.57, "end": 985.75, "word": " for", "probability": 0.8720703125}, {"start": 985.75, "end": 986.13, "word": " example", "probability": 0.96875}, {"start": 986.13, "end": 987.01, "word": " 10,", "probability": 0.875}, {"start": 987.29, "end": 987.73, "word": " 12,", "probability": 0.849609375}, {"start": 987.97, "end": 988.49, "word": " 14,", "probability": 0.8701171875}, {"start": 988.65, "end": 989.15, "word": " 16", "probability": 0.9208984375}, {"start": 989.15, "end": 990.53, "word": " in", "probability": 0.421142578125}, {"start": 990.53, "end": 990.85, "word": " this", "probability": 0.94921875}, {"start": 990.85, "end": 991.35, "word": " case", "probability": 0.91845703125}, {"start": 991.35, "end": 991.95, "word": " they", "probability": 0.427490234375}, {"start": 991.95, "end": 992.45, "word": " exist", "probability": 0.82666015625}, {"start": 992.45, "end": 993.03, "word": " perfect", "probability": 0.8955078125}, {"start": 993.03, "end": 994.35, "word": " because", "probability": 0.77685546875}, {"start": 994.35, "end": 994.83, "word": " x", "probability": 0.55908203125}, {"start": 994.83, "end": 995.31, "word": " increases", "probability": 0.83935546875}, {"start": 995.31, "end": 995.55, "word": " by", "probability": 0.96923828125}, {"start": 995.55, "end": 995.77, "word": " 2", "probability": 0.6337890625}, {"start": 995.77, "end": 996.23, "word": " units", "probability": 0.94140625}, {"start": 996.23, "end": 997.61, "word": " also", "probability": 0.67529296875}, {"start": 997.61, "end": 998.07, "word": " weights", "probability": 0.479248046875}, {"start": 998.07, "end": 998.61, "word": " increases", "probability": 0.8212890625}, {"start": 998.61, "end": 998.89, "word": " by", "probability": 0.96630859375}, {"start": 998.89, "end": 999.09, "word": " 2", "probability": 0.92041015625}, {"start": 999.09, "end": 999.43, "word": " units", "probability": 0.94775390625}, {"start": 999.43, "end": 999.93, "word": " or", "probability": 0.83056640625}, {"start": 999.93, "end": 1001.19, "word": " maybe", "probability": 0.9072265625}, {"start": 1001.19, "end": 1001.69, "word": " weights", "probability": 0.82568359375}, {"start": 1001.69, "end": 1001.91, "word": " for", "probability": 0.9267578125}, {"start": 1001.91, "end": 1002.37, "word": " example", "probability": 0.96630859375}, {"start": 1002.37, "end": 1003.47, "word": " 9,", "probability": 0.93603515625}, {"start": 1003.67, "end": 1005.71, "word": " 12,", "probability": 0.8505859375}, {"start": 1005.95, "end": 1006.61, "word": " 15,", "probability": 0.912109375}, {"start": 1007.01, "end": 1007.31, "word": " 18", "probability": 0.9248046875}, {"start": 1007.31, "end": 1007.59, "word": " and", "probability": 0.869140625}, {"start": 1007.59, "end": 1007.77, "word": " so", "probability": 0.947265625}, {"start": 1007.77, "end": 1007.99, "word": " on", "probability": 0.9541015625}], "temperature": 1.0}, {"id": 39, "seek": 103678, "start": 1008.72, "end": 1036.78, "text": " So X or A is increased by two units for each value for each individual and also weights are increased by three units for each person. In this case there exists perfect relationship but that never happened in real life. So perfect means all points are lie on the straight line otherwise if the points are close", "tokens": [407, 1783, 420, 316, 307, 6505, 538, 732, 6815, 337, 1184, 2158, 337, 1184, 2609, 293, 611, 17443, 366, 6505, 538, 1045, 6815, 337, 1184, 954, 13, 682, 341, 1389, 456, 8198, 2176, 2480, 457, 300, 1128, 2011, 294, 957, 993, 13, 407, 2176, 1355, 439, 2793, 366, 4544, 322, 264, 2997, 1622, 5911, 498, 264, 2793, 366, 1998], "avg_logprob": -0.2638020724058151, "compression_ratio": 1.6756756756756757, "no_speech_prob": 0.0, "words": [{"start": 1008.72, "end": 1009.0, "word": " So", "probability": 0.495849609375}, {"start": 1009.0, "end": 1009.4, "word": " X", "probability": 0.43798828125}, {"start": 1009.4, "end": 1009.86, "word": " or", "probability": 0.80810546875}, {"start": 1009.86, "end": 1010.06, "word": " A", "probability": 0.363037109375}, {"start": 1010.06, "end": 1010.18, "word": " is", "probability": 0.62646484375}, {"start": 1010.18, "end": 1010.46, "word": " increased", "probability": 0.916015625}, {"start": 1010.46, "end": 1010.64, "word": " by", "probability": 0.9658203125}, {"start": 1010.64, "end": 1010.82, "word": " two", "probability": 0.46484375}, {"start": 1010.82, "end": 1011.26, "word": " units", "probability": 0.94921875}, {"start": 1011.26, "end": 1011.82, "word": " for", "probability": 0.81005859375}, {"start": 1011.82, "end": 1012.3, "word": " each", "probability": 0.9482421875}, {"start": 1012.3, "end": 1012.7, "word": " value", "probability": 0.9453125}, {"start": 1012.7, "end": 1012.94, "word": " for", "probability": 0.53564453125}, {"start": 1012.94, "end": 1013.26, "word": " each", "probability": 0.94189453125}, {"start": 1013.26, "end": 1013.94, "word": " individual", "probability": 0.89208984375}, {"start": 1013.94, "end": 1014.82, "word": " and", "probability": 0.459228515625}, {"start": 1014.82, "end": 1015.22, "word": " also", "probability": 0.822265625}, {"start": 1015.22, "end": 1015.66, "word": " weights", "probability": 0.81298828125}, {"start": 1015.66, "end": 1016.34, "word": " are", "probability": 0.9091796875}, {"start": 1016.34, "end": 1017.22, "word": " increased", "probability": 0.9306640625}, {"start": 1017.22, "end": 1017.66, "word": " by", "probability": 0.95556640625}, {"start": 1017.66, "end": 1018.86, "word": " three", "probability": 0.822265625}, {"start": 1018.86, "end": 1019.2, "word": " units", "probability": 0.9052734375}, {"start": 1019.2, "end": 1019.4, "word": " for", "probability": 0.93505859375}, {"start": 1019.4, "end": 1019.64, "word": " each", "probability": 0.94921875}, {"start": 1019.64, "end": 1020.12, "word": " person.", "probability": 0.90283203125}, {"start": 1021.0, "end": 1021.58, "word": " In", "probability": 0.8837890625}, {"start": 1021.58, "end": 1021.86, "word": " this", "probability": 0.9462890625}, {"start": 1021.86, "end": 1022.28, "word": " case", "probability": 0.92041015625}, {"start": 1022.28, "end": 1022.62, "word": " there", "probability": 0.576171875}, {"start": 1022.62, "end": 1023.08, "word": " exists", "probability": 0.8154296875}, {"start": 1023.08, "end": 1023.7, "word": " perfect", "probability": 0.779296875}, {"start": 1023.7, "end": 1024.86, "word": " relationship", "probability": 0.91064453125}, {"start": 1024.86, "end": 1025.84, "word": " but", "probability": 0.30029296875}, {"start": 1025.84, "end": 1026.02, "word": " that", "probability": 0.93603515625}, {"start": 1026.02, "end": 1026.3, "word": " never", "probability": 0.92236328125}, {"start": 1026.3, "end": 1026.62, "word": " happened", "probability": 0.53173828125}, {"start": 1026.62, "end": 1026.82, "word": " in", "probability": 0.84326171875}, {"start": 1026.82, "end": 1026.98, "word": " real", "probability": 0.87060546875}, {"start": 1026.98, "end": 1027.52, "word": " life.", "probability": 0.93310546875}, {"start": 1028.78, "end": 1029.24, "word": " So", "probability": 0.8564453125}, {"start": 1029.24, "end": 1030.5, "word": " perfect", "probability": 0.8037109375}, {"start": 1030.5, "end": 1031.1, "word": " means", "probability": 0.91650390625}, {"start": 1031.1, "end": 1031.5, "word": " all", "probability": 0.9423828125}, {"start": 1031.5, "end": 1031.84, "word": " points", "probability": 0.9423828125}, {"start": 1031.84, "end": 1032.18, "word": " are", "probability": 0.82568359375}, {"start": 1032.18, "end": 1033.06, "word": " lie", "probability": 0.3310546875}, {"start": 1033.06, "end": 1033.3, "word": " on", "probability": 0.94970703125}, {"start": 1033.3, "end": 1033.44, "word": " the", "probability": 0.70458984375}, {"start": 1033.44, "end": 1033.76, "word": " straight", "probability": 0.9072265625}, {"start": 1033.76, "end": 1034.08, "word": " line", "probability": 0.93310546875}, {"start": 1034.08, "end": 1034.96, "word": " otherwise", "probability": 0.59619140625}, {"start": 1034.96, "end": 1035.58, "word": " if", "probability": 0.9189453125}, {"start": 1035.58, "end": 1035.76, "word": " the", "probability": 0.91650390625}, {"start": 1035.76, "end": 1036.06, "word": " points", "probability": 0.9365234375}, {"start": 1036.06, "end": 1036.26, "word": " are", "probability": 0.9462890625}, {"start": 1036.26, "end": 1036.78, "word": " close", "probability": 0.484619140625}], "temperature": 1.0}, {"id": 40, "seek": 105623, "start": 1038.15, "end": 1056.23, "text": " Then we can say there exists strong. Here if you look carefully at these points corresponding to this regression line, it looks like not strong because some of the points are not closed, so you can say there exists maybe moderate negative relationship.", "tokens": [1396, 321, 393, 584, 456, 8198, 2068, 13, 1692, 498, 291, 574, 7500, 412, 613, 2793, 11760, 281, 341, 24590, 1622, 11, 309, 1542, 411, 406, 2068, 570, 512, 295, 264, 2793, 366, 406, 5395, 11, 370, 291, 393, 584, 456, 8198, 1310, 18174, 3671, 2480, 13], "avg_logprob": -0.24235025979578495, "compression_ratio": 1.5714285714285714, "no_speech_prob": 0.0, "words": [{"start": 1038.15, "end": 1038.49, "word": " Then", "probability": 0.44091796875}, {"start": 1038.49, "end": 1038.65, "word": " we", "probability": 0.8212890625}, {"start": 1038.65, "end": 1038.85, "word": " can", "probability": 0.9140625}, {"start": 1038.85, "end": 1039.05, "word": " say", "probability": 0.61376953125}, {"start": 1039.05, "end": 1039.23, "word": " there", "probability": 0.650390625}, {"start": 1039.23, "end": 1039.63, "word": " exists", "probability": 0.625}, {"start": 1039.63, "end": 1040.05, "word": " strong.", "probability": 0.65380859375}, {"start": 1040.73, "end": 1041.05, "word": " Here", "probability": 0.80126953125}, {"start": 1041.05, "end": 1041.23, "word": " if", "probability": 0.8134765625}, {"start": 1041.23, "end": 1041.25, "word": " you", "probability": 0.830078125}, {"start": 1041.25, "end": 1041.45, "word": " look", "probability": 0.96630859375}, {"start": 1041.45, "end": 1041.83, "word": " carefully", "probability": 0.78271484375}, {"start": 1041.83, "end": 1042.03, "word": " at", "probability": 0.8203125}, {"start": 1042.03, "end": 1042.21, "word": " these", "probability": 0.7978515625}, {"start": 1042.21, "end": 1042.69, "word": " points", "probability": 0.91845703125}, {"start": 1042.69, "end": 1044.75, "word": " corresponding", "probability": 0.5068359375}, {"start": 1044.75, "end": 1045.45, "word": " to", "probability": 0.96923828125}, {"start": 1045.45, "end": 1045.73, "word": " this", "probability": 0.921875}, {"start": 1045.73, "end": 1046.17, "word": " regression", "probability": 0.962890625}, {"start": 1046.17, "end": 1046.67, "word": " line,", "probability": 0.931640625}, {"start": 1047.97, "end": 1048.63, "word": " it", "probability": 0.9326171875}, {"start": 1048.63, "end": 1048.89, "word": " looks", "probability": 0.8232421875}, {"start": 1048.89, "end": 1049.27, "word": " like", "probability": 0.9296875}, {"start": 1049.27, "end": 1049.57, "word": " not", "probability": 0.880859375}, {"start": 1049.57, "end": 1050.15, "word": " strong", "probability": 0.87353515625}, {"start": 1050.15, "end": 1050.61, "word": " because", "probability": 0.62939453125}, {"start": 1050.61, "end": 1050.97, "word": " some", "probability": 0.908203125}, {"start": 1050.97, "end": 1051.11, "word": " of", "probability": 0.96240234375}, {"start": 1051.11, "end": 1051.21, "word": " the", "probability": 0.9140625}, {"start": 1051.21, "end": 1051.49, "word": " points", "probability": 0.9111328125}, {"start": 1051.49, "end": 1051.65, "word": " are", "probability": 0.94140625}, {"start": 1051.65, "end": 1051.85, "word": " not", "probability": 0.94921875}, {"start": 1051.85, "end": 1052.11, "word": " closed,", "probability": 0.67626953125}, {"start": 1052.39, "end": 1052.49, "word": " so", "probability": 0.93017578125}, {"start": 1052.49, "end": 1052.63, "word": " you", "probability": 0.54638671875}, {"start": 1052.63, "end": 1052.81, "word": " can", "probability": 0.94384765625}, {"start": 1052.81, "end": 1053.01, "word": " say", "probability": 0.8681640625}, {"start": 1053.01, "end": 1053.19, "word": " there", "probability": 0.8623046875}, {"start": 1053.19, "end": 1053.67, "word": " exists", "probability": 0.8310546875}, {"start": 1053.67, "end": 1054.37, "word": " maybe", "probability": 0.833984375}, {"start": 1054.37, "end": 1054.83, "word": " moderate", "probability": 0.81494140625}, {"start": 1054.83, "end": 1055.45, "word": " negative", "probability": 0.52880859375}, {"start": 1055.45, "end": 1056.23, "word": " relationship.", "probability": 0.89892578125}], "temperature": 1.0}, {"id": 41, "seek": 108501, "start": 1057.61, "end": 1085.01, "text": " This one, most of the points are scattered away from the straight line, so there exists weak relationship. So by just looking at the scatter path, sometimes you can, sometimes it's hard to tell, but most of the time you can tell at least the direction, positive or negative, the form, linear or non-linear, or the strength of the relationship.", "tokens": [639, 472, 11, 881, 295, 264, 2793, 366, 21986, 1314, 490, 264, 2997, 1622, 11, 370, 456, 8198, 5336, 2480, 13, 407, 538, 445, 1237, 412, 264, 34951, 3100, 11, 2171, 291, 393, 11, 2171, 309, 311, 1152, 281, 980, 11, 457, 881, 295, 264, 565, 291, 393, 980, 412, 1935, 264, 3513, 11, 3353, 420, 3671, 11, 264, 1254, 11, 8213, 420, 2107, 12, 28263, 11, 420, 264, 3800, 295, 264, 2480, 13], "avg_logprob": -0.18229167103767396, "compression_ratio": 1.7373737373737375, "no_speech_prob": 0.0, "words": [{"start": 1057.61, "end": 1058.23, "word": " This", "probability": 0.357421875}, {"start": 1058.23, "end": 1058.49, "word": " one,", "probability": 0.8984375}, {"start": 1058.67, "end": 1058.85, "word": " most", "probability": 0.88134765625}, {"start": 1058.85, "end": 1058.97, "word": " of", "probability": 0.95361328125}, {"start": 1058.97, "end": 1059.09, "word": " the", "probability": 0.89990234375}, {"start": 1059.09, "end": 1059.33, "word": " points", "probability": 0.91943359375}, {"start": 1059.33, "end": 1059.53, "word": " are", "probability": 0.92236328125}, {"start": 1059.53, "end": 1059.93, "word": " scattered", "probability": 0.79736328125}, {"start": 1059.93, "end": 1060.41, "word": " away", "probability": 0.87353515625}, {"start": 1060.41, "end": 1061.03, "word": " from", "probability": 0.84033203125}, {"start": 1061.03, "end": 1061.33, "word": " the", "probability": 0.87744140625}, {"start": 1061.33, "end": 1061.71, "word": " straight", "probability": 0.9208984375}, {"start": 1061.71, "end": 1062.01, "word": " line,", "probability": 0.51171875}, {"start": 1062.19, "end": 1062.21, "word": " so", "probability": 0.9091796875}, {"start": 1062.21, "end": 1062.39, "word": " there", "probability": 0.77490234375}, {"start": 1062.39, "end": 1062.71, "word": " exists", "probability": 0.78125}, {"start": 1062.71, "end": 1063.11, "word": " weak", "probability": 0.91845703125}, {"start": 1063.11, "end": 1064.55, "word": " relationship.", "probability": 0.8525390625}, {"start": 1065.39, "end": 1065.61, "word": " So", "probability": 0.88427734375}, {"start": 1065.61, "end": 1065.83, "word": " by", "probability": 0.67138671875}, {"start": 1065.83, "end": 1066.11, "word": " just", "probability": 0.90869140625}, {"start": 1066.11, "end": 1066.51, "word": " looking", "probability": 0.90087890625}, {"start": 1066.51, "end": 1066.93, "word": " at", "probability": 0.9619140625}, {"start": 1066.93, "end": 1067.19, "word": " the", "probability": 0.87646484375}, {"start": 1067.19, "end": 1067.49, "word": " scatter", "probability": 0.68994140625}, {"start": 1067.49, "end": 1067.83, "word": " path,", "probability": 0.46923828125}, {"start": 1068.25, "end": 1068.73, "word": " sometimes", "probability": 0.9423828125}, {"start": 1068.73, "end": 1069.03, "word": " you", "probability": 0.9423828125}, {"start": 1069.03, "end": 1069.37, "word": " can,", "probability": 0.94970703125}, {"start": 1069.77, "end": 1070.29, "word": " sometimes", "probability": 0.931640625}, {"start": 1070.29, "end": 1070.61, "word": " it's", "probability": 0.875244140625}, {"start": 1070.61, "end": 1070.95, "word": " hard", "probability": 0.908203125}, {"start": 1070.95, "end": 1071.17, "word": " to", "probability": 0.94921875}, {"start": 1071.17, "end": 1071.33, "word": " tell,", "probability": 0.89453125}, {"start": 1071.91, "end": 1072.21, "word": " but", "probability": 0.91357421875}, {"start": 1072.21, "end": 1072.51, "word": " most", "probability": 0.89501953125}, {"start": 1072.51, "end": 1072.63, "word": " of", "probability": 0.96875}, {"start": 1072.63, "end": 1072.73, "word": " the", "probability": 0.92236328125}, {"start": 1072.73, "end": 1072.95, "word": " time", "probability": 0.8779296875}, {"start": 1072.95, "end": 1073.11, "word": " you", "probability": 0.779296875}, {"start": 1073.11, "end": 1073.29, "word": " can", "probability": 0.94677734375}, {"start": 1073.29, "end": 1073.61, "word": " tell", "probability": 0.87646484375}, {"start": 1073.61, "end": 1074.77, "word": " at", "probability": 0.84521484375}, {"start": 1074.77, "end": 1075.15, "word": " least", "probability": 0.9560546875}, {"start": 1075.15, "end": 1076.35, "word": " the", "probability": 0.78759765625}, {"start": 1076.35, "end": 1076.83, "word": " direction,", "probability": 0.96826171875}, {"start": 1077.29, "end": 1077.67, "word": " positive", "probability": 0.9228515625}, {"start": 1077.67, "end": 1077.93, "word": " or", "probability": 0.966796875}, {"start": 1077.93, "end": 1078.25, "word": " negative,", "probability": 0.9443359375}, {"start": 1079.41, "end": 1079.75, "word": " the", "probability": 0.90869140625}, {"start": 1079.75, "end": 1080.17, "word": " form,", "probability": 0.92578125}, {"start": 1080.43, "end": 1080.65, "word": " linear", "probability": 0.88232421875}, {"start": 1080.65, "end": 1081.05, "word": " or", "probability": 0.95947265625}, {"start": 1081.05, "end": 1081.33, "word": " non", "probability": 0.99072265625}, {"start": 1081.33, "end": 1081.59, "word": "-linear,", "probability": 0.76220703125}, {"start": 1082.23, "end": 1082.53, "word": " or", "probability": 0.9638671875}, {"start": 1082.53, "end": 1083.23, "word": " the", "probability": 0.8955078125}, {"start": 1083.23, "end": 1083.67, "word": " strength", "probability": 0.85595703125}, {"start": 1083.67, "end": 1084.15, "word": " of", "probability": 0.96728515625}, {"start": 1084.15, "end": 1084.41, "word": " the", "probability": 0.9169921875}, {"start": 1084.41, "end": 1085.01, "word": " relationship.", "probability": 0.90576171875}], "temperature": 1.0}, {"id": 42, "seek": 110966, "start": 1085.84, "end": 1109.66, "text": " The last one here, now x increases, y remains the same. For example, suppose x is 1, y is 10. x increases to 2, y still is 10. So as x increases, y stays the same position, it means there is no linear relationship between the two variables. So based on the scatter plot you can", "tokens": [440, 1036, 472, 510, 11, 586, 2031, 8637, 11, 288, 7023, 264, 912, 13, 1171, 1365, 11, 7297, 2031, 307, 502, 11, 288, 307, 1266, 13, 2031, 8637, 281, 568, 11, 288, 920, 307, 1266, 13, 407, 382, 2031, 8637, 11, 288, 10834, 264, 912, 2535, 11, 309, 1355, 456, 307, 572, 8213, 2480, 1296, 264, 732, 9102, 13, 407, 2361, 322, 264, 34951, 7542, 291, 393], "avg_logprob": -0.17865349681061857, "compression_ratio": 1.606936416184971, "no_speech_prob": 0.0, "words": [{"start": 1085.84, "end": 1086.06, "word": " The", "probability": 0.71142578125}, {"start": 1086.06, "end": 1086.32, "word": " last", "probability": 0.88232421875}, {"start": 1086.32, "end": 1086.58, "word": " one", "probability": 0.92333984375}, {"start": 1086.58, "end": 1086.88, "word": " here,", "probability": 0.84228515625}, {"start": 1088.38, "end": 1088.8, "word": " now", "probability": 0.85205078125}, {"start": 1088.8, "end": 1089.1, "word": " x", "probability": 0.55078125}, {"start": 1089.1, "end": 1089.52, "word": " increases,", "probability": 0.88134765625}, {"start": 1090.2, "end": 1091.04, "word": " y", "probability": 0.93798828125}, {"start": 1091.04, "end": 1091.44, "word": " remains", "probability": 0.63720703125}, {"start": 1091.44, "end": 1091.66, "word": " the", "probability": 0.90625}, {"start": 1091.66, "end": 1091.92, "word": " same.", "probability": 0.91015625}, {"start": 1092.94, "end": 1093.46, "word": " For", "probability": 0.9501953125}, {"start": 1093.46, "end": 1093.8, "word": " example,", "probability": 0.97216796875}, {"start": 1093.88, "end": 1094.12, "word": " suppose", "probability": 0.8759765625}, {"start": 1094.12, "end": 1094.38, "word": " x", "probability": 0.9794921875}, {"start": 1094.38, "end": 1094.54, "word": " is", "probability": 0.876953125}, {"start": 1094.54, "end": 1094.82, "word": " 1,", "probability": 0.45947265625}, {"start": 1095.26, "end": 1095.46, "word": " y", "probability": 0.98046875}, {"start": 1095.46, "end": 1095.64, "word": " is", "probability": 0.94580078125}, {"start": 1095.64, "end": 1095.92, "word": " 10.", "probability": 0.94580078125}, {"start": 1096.42, "end": 1096.8, "word": " x", "probability": 0.7568359375}, {"start": 1096.8, "end": 1097.22, "word": " increases", "probability": 0.93115234375}, {"start": 1097.22, "end": 1097.48, "word": " to", "probability": 0.96826171875}, {"start": 1097.48, "end": 1097.76, "word": " 2,", "probability": 0.9248046875}, {"start": 1097.98, "end": 1098.2, "word": " y", "probability": 0.994140625}, {"start": 1098.2, "end": 1098.58, "word": " still", "probability": 0.6533203125}, {"start": 1098.58, "end": 1098.72, "word": " is", "probability": 0.8671875}, {"start": 1098.72, "end": 1098.98, "word": " 10.", "probability": 0.951171875}, {"start": 1099.52, "end": 1099.76, "word": " So", "probability": 0.9423828125}, {"start": 1099.76, "end": 1100.0, "word": " as", "probability": 0.66162109375}, {"start": 1100.0, "end": 1100.26, "word": " x", "probability": 0.99462890625}, {"start": 1100.26, "end": 1100.76, "word": " increases,", "probability": 0.92138671875}, {"start": 1100.96, "end": 1101.2, "word": " y", "probability": 0.9892578125}, {"start": 1101.2, "end": 1101.72, "word": " stays", "probability": 0.81201171875}, {"start": 1101.72, "end": 1102.0, "word": " the", "probability": 0.46875}, {"start": 1102.0, "end": 1102.22, "word": " same", "probability": 0.91357421875}, {"start": 1102.22, "end": 1102.7, "word": " position,", "probability": 0.93994140625}, {"start": 1103.16, "end": 1103.44, "word": " it", "probability": 0.8974609375}, {"start": 1103.44, "end": 1103.74, "word": " means", "probability": 0.92138671875}, {"start": 1103.74, "end": 1103.98, "word": " there", "probability": 0.89599609375}, {"start": 1103.98, "end": 1104.16, "word": " is", "probability": 0.9404296875}, {"start": 1104.16, "end": 1104.5, "word": " no", "probability": 0.95068359375}, {"start": 1104.5, "end": 1105.54, "word": " linear", "probability": 0.865234375}, {"start": 1105.54, "end": 1106.14, "word": " relationship", "probability": 0.9189453125}, {"start": 1106.14, "end": 1106.54, "word": " between", "probability": 0.87158203125}, {"start": 1106.54, "end": 1106.74, "word": " the", "probability": 0.91552734375}, {"start": 1106.74, "end": 1106.88, "word": " two", "probability": 0.91650390625}, {"start": 1106.88, "end": 1107.28, "word": " variables.", "probability": 0.87841796875}, {"start": 1107.9, "end": 1108.08, "word": " So", "probability": 0.94873046875}, {"start": 1108.08, "end": 1108.34, "word": " based", "probability": 0.796875}, {"start": 1108.34, "end": 1108.5, "word": " on", "probability": 0.95068359375}, {"start": 1108.5, "end": 1108.62, "word": " the", "probability": 0.90966796875}, {"start": 1108.62, "end": 1108.9, "word": " scatter", "probability": 0.75146484375}, {"start": 1108.9, "end": 1109.12, "word": " plot", "probability": 0.5400390625}, {"start": 1109.12, "end": 1109.34, "word": " you", "probability": 0.51318359375}, {"start": 1109.34, "end": 1109.66, "word": " can", "probability": 0.94287109375}], "temperature": 1.0}, {"id": 43, "seek": 113159, "start": 1110.56, "end": 1131.6, "text": " have an idea about the relationship between the two variables. Here I will give a simple example in order to determine the correlation coefficient. A real estate agent wishes to examine the relationship between selling the price of a home", "tokens": [362, 364, 1558, 466, 264, 2480, 1296, 264, 732, 9102, 13, 1692, 286, 486, 976, 257, 2199, 1365, 294, 1668, 281, 6997, 264, 20009, 17619, 13, 316, 957, 9749, 9461, 15065, 281, 17496, 264, 2480, 1296, 6511, 264, 3218, 295, 257, 1280], "avg_logprob": -0.14280522632044415, "compression_ratio": 1.5933333333333333, "no_speech_prob": 0.0, "words": [{"start": 1110.56, "end": 1110.94, "word": " have", "probability": 0.291259765625}, {"start": 1110.94, "end": 1111.18, "word": " an", "probability": 0.94775390625}, {"start": 1111.18, "end": 1111.52, "word": " idea", "probability": 0.92578125}, {"start": 1111.52, "end": 1112.0, "word": " about", "probability": 0.89892578125}, {"start": 1112.0, "end": 1112.5, "word": " the", "probability": 0.9130859375}, {"start": 1112.5, "end": 1113.24, "word": " relationship", "probability": 0.90380859375}, {"start": 1113.24, "end": 1113.8, "word": " between", "probability": 0.8701171875}, {"start": 1113.8, "end": 1114.9, "word": " the", "probability": 0.83251953125}, {"start": 1114.9, "end": 1115.66, "word": " two", "probability": 0.927734375}, {"start": 1115.66, "end": 1116.34, "word": " variables.", "probability": 0.935546875}, {"start": 1116.82, "end": 1117.02, "word": " Here", "probability": 0.798828125}, {"start": 1117.02, "end": 1117.1, "word": " I", "probability": 0.70068359375}, {"start": 1117.1, "end": 1117.22, "word": " will", "probability": 0.8662109375}, {"start": 1117.22, "end": 1117.48, "word": " give", "probability": 0.88134765625}, {"start": 1117.48, "end": 1117.8, "word": " a", "probability": 0.8974609375}, {"start": 1117.8, "end": 1118.04, "word": " simple", "probability": 0.94482421875}, {"start": 1118.04, "end": 1118.46, "word": " example", "probability": 0.97412109375}, {"start": 1118.46, "end": 1118.9, "word": " in", "probability": 0.79052734375}, {"start": 1118.9, "end": 1119.08, "word": " order", "probability": 0.92626953125}, {"start": 1119.08, "end": 1119.3, "word": " to", "probability": 0.96923828125}, {"start": 1119.3, "end": 1119.78, "word": " determine", "probability": 0.908203125}, {"start": 1119.78, "end": 1121.12, "word": " the", "probability": 0.8876953125}, {"start": 1121.12, "end": 1121.74, "word": " correlation", "probability": 0.935546875}, {"start": 1121.74, "end": 1122.2, "word": " coefficient.", "probability": 0.908203125}, {"start": 1123.58, "end": 1123.82, "word": " A", "probability": 0.93310546875}, {"start": 1123.82, "end": 1124.04, "word": " real", "probability": 0.962890625}, {"start": 1124.04, "end": 1124.48, "word": " estate", "probability": 0.89794921875}, {"start": 1124.48, "end": 1125.16, "word": " agent", "probability": 0.8837890625}, {"start": 1125.16, "end": 1126.0, "word": " wishes", "probability": 0.833984375}, {"start": 1126.0, "end": 1126.36, "word": " to", "probability": 0.970703125}, {"start": 1126.36, "end": 1126.88, "word": " examine", "probability": 0.95458984375}, {"start": 1126.88, "end": 1128.78, "word": " the", "probability": 0.89306640625}, {"start": 1128.78, "end": 1129.5, "word": " relationship", "probability": 0.8984375}, {"start": 1129.5, "end": 1130.02, "word": " between", "probability": 0.8857421875}, {"start": 1130.02, "end": 1130.38, "word": " selling", "probability": 0.841796875}, {"start": 1130.38, "end": 1130.56, "word": " the", "probability": 0.6865234375}, {"start": 1130.56, "end": 1130.96, "word": " price", "probability": 0.9296875}, {"start": 1130.96, "end": 1131.2, "word": " of", "probability": 0.97021484375}, {"start": 1131.2, "end": 1131.32, "word": " a", "probability": 0.98876953125}, {"start": 1131.32, "end": 1131.6, "word": " home", "probability": 0.87744140625}], "temperature": 1.0}, {"id": 44, "seek": 115802, "start": 1132.48, "end": 1158.02, "text": " and its size measured in square feet. So in this case, there are two variables of interest. One is called selling price of a home. So here, selling price of a home and its size. Now, selling price in $1,000.", "tokens": [293, 1080, 2744, 12690, 294, 3732, 3521, 13, 407, 294, 341, 1389, 11, 456, 366, 732, 9102, 295, 1179, 13, 1485, 307, 1219, 6511, 3218, 295, 257, 1280, 13, 407, 510, 11, 6511, 3218, 295, 257, 1280, 293, 1080, 2744, 13, 823, 11, 6511, 3218, 294, 1848, 16, 11, 1360, 13], "avg_logprob": -0.2223557669382829, "compression_ratio": 1.5757575757575757, "no_speech_prob": 0.0, "words": [{"start": 1132.48, "end": 1132.88, "word": " and", "probability": 0.461669921875}, {"start": 1132.88, "end": 1133.24, "word": " its", "probability": 0.68017578125}, {"start": 1133.24, "end": 1133.72, "word": " size", "probability": 0.81201171875}, {"start": 1133.72, "end": 1134.28, "word": " measured", "probability": 0.70361328125}, {"start": 1134.28, "end": 1134.58, "word": " in", "probability": 0.93994140625}, {"start": 1134.58, "end": 1134.96, "word": " square", "probability": 0.880859375}, {"start": 1134.96, "end": 1135.28, "word": " feet.", "probability": 0.95361328125}, {"start": 1135.8, "end": 1136.02, "word": " So", "probability": 0.88134765625}, {"start": 1136.02, "end": 1136.24, "word": " in", "probability": 0.76416015625}, {"start": 1136.24, "end": 1136.44, "word": " this", "probability": 0.947265625}, {"start": 1136.44, "end": 1136.66, "word": " case,", "probability": 0.92138671875}, {"start": 1136.74, "end": 1136.82, "word": " there", "probability": 0.908203125}, {"start": 1136.82, "end": 1136.98, "word": " are", "probability": 0.939453125}, {"start": 1136.98, "end": 1137.14, "word": " two", "probability": 0.92626953125}, {"start": 1137.14, "end": 1137.62, "word": " variables", "probability": 0.92041015625}, {"start": 1137.62, "end": 1137.98, "word": " of", "probability": 0.94287109375}, {"start": 1137.98, "end": 1138.46, "word": " interest.", "probability": 0.900390625}, {"start": 1138.88, "end": 1139.6, "word": " One", "probability": 0.88916015625}, {"start": 1139.6, "end": 1139.82, "word": " is", "probability": 0.896484375}, {"start": 1139.82, "end": 1140.22, "word": " called", "probability": 0.83837890625}, {"start": 1140.22, "end": 1141.82, "word": " selling", "probability": 0.77734375}, {"start": 1141.82, "end": 1142.4, "word": " price", "probability": 0.9326171875}, {"start": 1142.4, "end": 1143.76, "word": " of", "probability": 0.95654296875}, {"start": 1143.76, "end": 1143.88, "word": " a", "probability": 0.9931640625}, {"start": 1143.88, "end": 1144.14, "word": " home.", "probability": 0.892578125}, {"start": 1144.92, "end": 1145.32, "word": " So", "probability": 0.9326171875}, {"start": 1145.32, "end": 1145.6, "word": " here,", "probability": 0.486572265625}, {"start": 1146.0, "end": 1146.46, "word": " selling", "probability": 0.87060546875}, {"start": 1146.46, "end": 1149.14, "word": " price", "probability": 0.703125}, {"start": 1149.14, "end": 1151.74, "word": " of", "probability": 0.9609375}, {"start": 1151.74, "end": 1151.88, "word": " a", "probability": 0.98779296875}, {"start": 1151.88, "end": 1152.18, "word": " home", "probability": 0.89306640625}, {"start": 1152.18, "end": 1153.72, "word": " and", "probability": 0.4921875}, {"start": 1153.72, "end": 1154.84, "word": " its", "probability": 0.84716796875}, {"start": 1154.84, "end": 1155.32, "word": " size.", "probability": 0.85546875}, {"start": 1156.44, "end": 1156.7, "word": " Now,", "probability": 0.91748046875}, {"start": 1156.76, "end": 1157.0, "word": " selling", "probability": 0.876953125}, {"start": 1157.0, "end": 1157.44, "word": " price", "probability": 0.92138671875}, {"start": 1157.44, "end": 1157.64, "word": " in", "probability": 0.442138671875}, {"start": 1157.64, "end": 1157.88, "word": " $1", "probability": 0.5989990234375}, {"start": 1157.88, "end": 1158.02, "word": ",000.", "probability": 0.9990234375}], "temperature": 1.0}, {"id": 45, "seek": 118161, "start": 1165.36, "end": 1181.62, "text": " And size in feet squared. Here we have to distinguish between dependent and independent. So your dependent variable is house price, sometimes called response variable.", "tokens": [400, 2744, 294, 3521, 8889, 13, 1692, 321, 362, 281, 20206, 1296, 12334, 293, 6695, 13, 407, 428, 12334, 7006, 307, 1782, 3218, 11, 2171, 1219, 4134, 7006, 13], "avg_logprob": -0.21041666070620219, "compression_ratio": 1.3658536585365855, "no_speech_prob": 0.0, "words": [{"start": 1165.3600000000001, "end": 1166.2, "word": " And", "probability": 0.410888671875}, {"start": 1166.2, "end": 1166.7, "word": " size", "probability": 0.55908203125}, {"start": 1166.7, "end": 1167.0, "word": " in", "probability": 0.84228515625}, {"start": 1167.0, "end": 1167.2, "word": " feet", "probability": 0.94140625}, {"start": 1167.2, "end": 1167.6, "word": " squared.", "probability": 0.442138671875}, {"start": 1168.64, "end": 1169.02, "word": " Here", "probability": 0.82373046875}, {"start": 1169.02, "end": 1169.12, "word": " we", "probability": 0.736328125}, {"start": 1169.12, "end": 1169.28, "word": " have", "probability": 0.9189453125}, {"start": 1169.28, "end": 1169.38, "word": " to", "probability": 0.951171875}, {"start": 1169.38, "end": 1169.8, "word": " distinguish", "probability": 0.85888671875}, {"start": 1169.8, "end": 1171.08, "word": " between", "probability": 0.81640625}, {"start": 1171.08, "end": 1173.46, "word": " dependent", "probability": 0.544921875}, {"start": 1173.46, "end": 1173.94, "word": " and", "probability": 0.94775390625}, {"start": 1173.94, "end": 1174.4, "word": " independent.", "probability": 0.9052734375}, {"start": 1175.28, "end": 1175.64, "word": " So", "probability": 0.9296875}, {"start": 1175.64, "end": 1175.9, "word": " your", "probability": 0.7333984375}, {"start": 1175.9, "end": 1176.22, "word": " dependent", "probability": 0.87451171875}, {"start": 1176.22, "end": 1176.7, "word": " variable", "probability": 0.9169921875}, {"start": 1176.7, "end": 1176.98, "word": " is", "probability": 0.947265625}, {"start": 1176.98, "end": 1177.3, "word": " house", "probability": 0.82080078125}, {"start": 1177.3, "end": 1177.9, "word": " price,", "probability": 0.90185546875}, {"start": 1179.34, "end": 1179.74, "word": " sometimes", "probability": 0.939453125}, {"start": 1179.74, "end": 1180.32, "word": " called", "probability": 0.8828125}, {"start": 1180.32, "end": 1181.18, "word": " response", "probability": 0.92578125}, {"start": 1181.18, "end": 1181.62, "word": " variable.", "probability": 0.8994140625}], "temperature": 1.0}, {"id": 46, "seek": 121291, "start": 1185.75, "end": 1212.91, "text": " The independent variable is the size, which is in square feet, sometimes called sub-planetary variable. So my Y is ceiling rise, and size is square feet, or size of the house. In this case, there are 10.", "tokens": [440, 6695, 7006, 307, 264, 2744, 11, 597, 307, 294, 3732, 3521, 11, 2171, 1219, 1422, 12, 16554, 302, 822, 7006, 13, 407, 452, 398, 307, 13655, 6272, 11, 293, 2744, 307, 3732, 3521, 11, 420, 2744, 295, 264, 1782, 13, 682, 341, 1389, 11, 456, 366, 1266, 13], "avg_logprob": -0.2570312625169754, "compression_ratio": 1.4166666666666667, "no_speech_prob": 0.0, "words": [{"start": 1185.75, "end": 1186.07, "word": " The", "probability": 0.74951171875}, {"start": 1186.07, "end": 1186.55, "word": " independent", "probability": 0.80126953125}, {"start": 1186.55, "end": 1186.95, "word": " variable", "probability": 0.86474609375}, {"start": 1186.95, "end": 1187.51, "word": " is", "probability": 0.9326171875}, {"start": 1187.51, "end": 1187.85, "word": " the", "probability": 0.51708984375}, {"start": 1187.85, "end": 1188.87, "word": " size,", "probability": 0.81982421875}, {"start": 1189.01, "end": 1189.21, "word": " which", "probability": 0.939453125}, {"start": 1189.21, "end": 1189.37, "word": " is", "probability": 0.86474609375}, {"start": 1189.37, "end": 1189.49, "word": " in", "probability": 0.9140625}, {"start": 1189.49, "end": 1189.79, "word": " square", "probability": 0.88818359375}, {"start": 1189.79, "end": 1190.25, "word": " feet,", "probability": 0.95068359375}, {"start": 1190.87, "end": 1193.29, "word": " sometimes", "probability": 0.93359375}, {"start": 1193.29, "end": 1193.73, "word": " called", "probability": 0.88720703125}, {"start": 1193.73, "end": 1194.13, "word": " sub", "probability": 0.154541015625}, {"start": 1194.13, "end": 1194.57, "word": "-planetary", "probability": 0.7734375}, {"start": 1194.57, "end": 1194.85, "word": " variable.", "probability": 0.8369140625}, {"start": 1199.57, "end": 1200.37, "word": " So", "probability": 0.92724609375}, {"start": 1200.37, "end": 1200.63, "word": " my", "probability": 0.87548828125}, {"start": 1200.63, "end": 1200.95, "word": " Y", "probability": 0.626953125}, {"start": 1200.95, "end": 1201.35, "word": " is", "probability": 0.9375}, {"start": 1201.35, "end": 1201.67, "word": " ceiling", "probability": 0.634765625}, {"start": 1201.67, "end": 1202.15, "word": " rise,", "probability": 0.405517578125}, {"start": 1203.47, "end": 1203.91, "word": " and", "probability": 0.8974609375}, {"start": 1203.91, "end": 1204.43, "word": " size", "probability": 0.814453125}, {"start": 1204.43, "end": 1204.93, "word": " is", "probability": 0.94091796875}, {"start": 1204.93, "end": 1205.35, "word": " square", "probability": 0.8955078125}, {"start": 1205.35, "end": 1206.37, "word": " feet,", "probability": 0.96875}, {"start": 1207.53, "end": 1207.77, "word": " or", "probability": 0.7431640625}, {"start": 1207.77, "end": 1208.13, "word": " size", "probability": 0.70166015625}, {"start": 1208.13, "end": 1208.43, "word": " of", "probability": 0.96728515625}, {"start": 1208.43, "end": 1208.55, "word": " the", "probability": 0.91748046875}, {"start": 1208.55, "end": 1208.85, "word": " house.", "probability": 0.87109375}, {"start": 1211.33, "end": 1211.85, "word": " In", "probability": 0.87841796875}, {"start": 1211.85, "end": 1212.07, "word": " this", "probability": 0.94921875}, {"start": 1212.07, "end": 1212.27, "word": " case,", "probability": 0.92138671875}, {"start": 1212.31, "end": 1212.43, "word": " there", "probability": 0.90380859375}, {"start": 1212.43, "end": 1212.61, "word": " are", "probability": 0.94482421875}, {"start": 1212.61, "end": 1212.91, "word": " 10.", "probability": 0.7177734375}], "temperature": 1.0}, {"id": 47, "seek": 124213, "start": 1214.29, "end": 1242.13, "text": " It's sample size is 10. So the first house with size 1,400 square feet, it's selling price is 245 multiplied by 1,000. Because these values are in $1,000. Now based on this data, you can first plot the scatterplot of house price", "tokens": [467, 311, 6889, 2744, 307, 1266, 13, 407, 264, 700, 1782, 365, 2744, 502, 11, 13741, 3732, 3521, 11, 309, 311, 6511, 3218, 307, 4022, 20, 17207, 538, 502, 11, 1360, 13, 1436, 613, 4190, 366, 294, 1848, 16, 11, 1360, 13, 823, 2361, 322, 341, 1412, 11, 291, 393, 700, 7542, 264, 34951, 564, 310, 295, 1782, 3218], "avg_logprob": -0.21406249850988388, "compression_ratio": 1.396341463414634, "no_speech_prob": 0.0, "words": [{"start": 1214.29, "end": 1214.61, "word": " It's", "probability": 0.6973876953125}, {"start": 1214.61, "end": 1214.79, "word": " sample", "probability": 0.33642578125}, {"start": 1214.79, "end": 1215.11, "word": " size", "probability": 0.84521484375}, {"start": 1215.11, "end": 1215.29, "word": " is", "probability": 0.703125}, {"start": 1215.29, "end": 1215.49, "word": " 10.", "probability": 0.830078125}, {"start": 1216.15, "end": 1216.65, "word": " So", "probability": 0.96240234375}, {"start": 1216.65, "end": 1216.83, "word": " the", "probability": 0.8798828125}, {"start": 1216.83, "end": 1217.15, "word": " first", "probability": 0.88427734375}, {"start": 1217.15, "end": 1217.57, "word": " house", "probability": 0.869140625}, {"start": 1217.57, "end": 1217.89, "word": " with", "probability": 0.79150390625}, {"start": 1217.89, "end": 1218.41, "word": " size", "probability": 0.8349609375}, {"start": 1218.41, "end": 1219.39, "word": " 1", "probability": 0.6328125}, {"start": 1219.39, "end": 1219.71, "word": ",400", "probability": 0.949951171875}, {"start": 1219.71, "end": 1220.35, "word": " square", "probability": 0.89208984375}, {"start": 1220.35, "end": 1220.87, "word": " feet,", "probability": 0.96337890625}, {"start": 1222.35, "end": 1222.83, "word": " it's", "probability": 0.74169921875}, {"start": 1222.83, "end": 1223.07, "word": " selling", "probability": 0.845703125}, {"start": 1223.07, "end": 1223.67, "word": " price", "probability": 0.8203125}, {"start": 1223.67, "end": 1225.03, "word": " is", "probability": 0.86962890625}, {"start": 1225.03, "end": 1226.85, "word": " 245", "probability": 0.7257080078125}, {"start": 1226.85, "end": 1227.69, "word": " multiplied", "probability": 0.62060546875}, {"start": 1227.69, "end": 1228.01, "word": " by", "probability": 0.97509765625}, {"start": 1228.01, "end": 1228.31, "word": " 1", "probability": 0.84326171875}, {"start": 1228.31, "end": 1228.43, "word": ",000.", "probability": 0.999267578125}, {"start": 1230.11, "end": 1230.55, "word": " Because", "probability": 0.86083984375}, {"start": 1230.55, "end": 1230.83, "word": " these", "probability": 0.8623046875}, {"start": 1230.83, "end": 1231.29, "word": " values", "probability": 0.951171875}, {"start": 1231.29, "end": 1231.55, "word": " are", "probability": 0.8955078125}, {"start": 1231.55, "end": 1231.67, "word": " in", "probability": 0.74560546875}, {"start": 1231.67, "end": 1231.91, "word": " $1", "probability": 0.654541015625}, {"start": 1231.91, "end": 1232.05, "word": ",000.", "probability": 0.998779296875}, {"start": 1233.71, "end": 1234.43, "word": " Now", "probability": 0.95751953125}, {"start": 1234.43, "end": 1234.75, "word": " based", "probability": 0.5263671875}, {"start": 1234.75, "end": 1234.91, "word": " on", "probability": 0.94482421875}, {"start": 1234.91, "end": 1235.11, "word": " this", "probability": 0.943359375}, {"start": 1235.11, "end": 1235.47, "word": " data,", "probability": 0.9306640625}, {"start": 1235.69, "end": 1235.83, "word": " you", "probability": 0.90966796875}, {"start": 1235.83, "end": 1236.13, "word": " can", "probability": 0.94482421875}, {"start": 1236.13, "end": 1237.55, "word": " first", "probability": 0.6591796875}, {"start": 1237.55, "end": 1237.95, "word": " plot", "probability": 0.86279296875}, {"start": 1237.95, "end": 1238.31, "word": " the", "probability": 0.884765625}, {"start": 1238.31, "end": 1239.01, "word": " scatterplot", "probability": 0.7880859375}, {"start": 1239.01, "end": 1239.77, "word": " of", "probability": 0.96728515625}, {"start": 1239.77, "end": 1241.57, "word": " house", "probability": 0.8662109375}, {"start": 1241.57, "end": 1242.13, "word": " price", "probability": 0.91162109375}], "temperature": 1.0}, {"id": 48, "seek": 126993, "start": 1244.55, "end": 1269.93, "text": " In Y direction, the vertical direction. So here is house. And rise. And size in the X axis. You will get this scatter plot. Now, the data here is just 10 points, so sometimes it's hard to tell.", "tokens": [682, 398, 3513, 11, 264, 9429, 3513, 13, 407, 510, 307, 1782, 13, 400, 6272, 13, 400, 2744, 294, 264, 1783, 10298, 13, 509, 486, 483, 341, 34951, 7542, 13, 823, 11, 264, 1412, 510, 307, 445, 1266, 2793, 11, 370, 2171, 309, 311, 1152, 281, 980, 13], "avg_logprob": -0.33290816326530615, "compression_ratio": 1.375886524822695, "no_speech_prob": 0.0, "words": [{"start": 1244.55, "end": 1244.83, "word": " In", "probability": 0.301513671875}, {"start": 1244.83, "end": 1245.15, "word": " Y", "probability": 0.517578125}, {"start": 1245.15, "end": 1245.83, "word": " direction,", "probability": 0.74658203125}, {"start": 1246.47, "end": 1246.59, "word": " the", "probability": 0.55859375}, {"start": 1246.59, "end": 1246.93, "word": " vertical", "probability": 0.83837890625}, {"start": 1246.93, "end": 1247.55, "word": " direction.", "probability": 0.9619140625}, {"start": 1247.97, "end": 1248.03, "word": " So", "probability": 0.7490234375}, {"start": 1248.03, "end": 1248.25, "word": " here", "probability": 0.75634765625}, {"start": 1248.25, "end": 1248.49, "word": " is", "probability": 0.7978515625}, {"start": 1248.49, "end": 1248.95, "word": " house.", "probability": 0.6865234375}, {"start": 1250.85, "end": 1251.57, "word": " And", "probability": 0.293701171875}, {"start": 1251.57, "end": 1251.87, "word": " rise.", "probability": 0.2568359375}, {"start": 1254.23, "end": 1255.03, "word": " And", "probability": 0.93701171875}, {"start": 1255.03, "end": 1255.59, "word": " size", "probability": 0.85546875}, {"start": 1255.59, "end": 1256.75, "word": " in", "probability": 0.552734375}, {"start": 1256.75, "end": 1256.91, "word": " the", "probability": 0.90966796875}, {"start": 1256.91, "end": 1257.09, "word": " X", "probability": 0.7734375}, {"start": 1257.09, "end": 1257.49, "word": " axis.", "probability": 0.638671875}, {"start": 1260.05, "end": 1260.29, "word": " You", "probability": 0.8740234375}, {"start": 1260.29, "end": 1260.47, "word": " will", "probability": 0.86767578125}, {"start": 1260.47, "end": 1260.65, "word": " get", "probability": 0.94140625}, {"start": 1260.65, "end": 1261.01, "word": " this", "probability": 0.90673828125}, {"start": 1261.01, "end": 1261.47, "word": " scatter", "probability": 0.8017578125}, {"start": 1261.47, "end": 1261.71, "word": " plot.", "probability": 0.89990234375}, {"start": 1262.97, "end": 1263.41, "word": " Now,", "probability": 0.9423828125}, {"start": 1263.83, "end": 1264.27, "word": " the", "probability": 0.67578125}, {"start": 1264.27, "end": 1265.33, "word": " data", "probability": 0.8583984375}, {"start": 1265.33, "end": 1265.69, "word": " here", "probability": 0.84765625}, {"start": 1265.69, "end": 1265.85, "word": " is", "probability": 0.91357421875}, {"start": 1265.85, "end": 1266.15, "word": " just", "probability": 0.916015625}, {"start": 1266.15, "end": 1266.65, "word": " 10", "probability": 0.623046875}, {"start": 1266.65, "end": 1267.07, "word": " points,", "probability": 0.92724609375}, {"start": 1267.23, "end": 1267.37, "word": " so", "probability": 0.94384765625}, {"start": 1267.37, "end": 1268.07, "word": " sometimes", "probability": 0.65234375}, {"start": 1268.07, "end": 1268.99, "word": " it's", "probability": 0.937744140625}, {"start": 1268.99, "end": 1269.33, "word": " hard", "probability": 0.8994140625}, {"start": 1269.33, "end": 1269.65, "word": " to", "probability": 0.966796875}, {"start": 1269.65, "end": 1269.93, "word": " tell.", "probability": 0.900390625}], "temperature": 1.0}, {"id": 49, "seek": 130010, "start": 1271.51, "end": 1300.11, "text": " the relationship between the two variables if your data is small. But just this example for illustration. But at least you can determine that there exists linear relationship between the two variables. It is positive. So the form is linear. Direction is positive. Weak or strong or moderate.", "tokens": [264, 2480, 1296, 264, 732, 9102, 498, 428, 1412, 307, 1359, 13, 583, 445, 341, 1365, 337, 22645, 13, 583, 412, 1935, 291, 393, 6997, 300, 456, 8198, 8213, 2480, 1296, 264, 732, 9102, 13, 467, 307, 3353, 13, 407, 264, 1254, 307, 8213, 13, 5822, 882, 307, 3353, 13, 492, 514, 420, 2068, 420, 18174, 13], "avg_logprob": -0.17941810730202445, "compression_ratio": 1.7076023391812865, "no_speech_prob": 0.0, "words": [{"start": 1271.51, "end": 1271.93, "word": " the", "probability": 0.266357421875}, {"start": 1271.93, "end": 1272.59, "word": " relationship", "probability": 0.90478515625}, {"start": 1272.59, "end": 1273.01, "word": " between", "probability": 0.86865234375}, {"start": 1273.01, "end": 1273.23, "word": " the", "probability": 0.85693359375}, {"start": 1273.23, "end": 1273.37, "word": " two", "probability": 0.90966796875}, {"start": 1273.37, "end": 1273.75, "word": " variables", "probability": 0.9296875}, {"start": 1273.75, "end": 1274.09, "word": " if", "probability": 0.6435546875}, {"start": 1274.09, "end": 1274.47, "word": " your", "probability": 0.85302734375}, {"start": 1274.47, "end": 1274.81, "word": " data", "probability": 0.9619140625}, {"start": 1274.81, "end": 1275.03, "word": " is", "probability": 0.9501953125}, {"start": 1275.03, "end": 1275.51, "word": " small.", "probability": 0.880859375}, {"start": 1276.51, "end": 1276.83, "word": " But", "probability": 0.69580078125}, {"start": 1276.83, "end": 1277.13, "word": " just", "probability": 0.5205078125}, {"start": 1277.13, "end": 1278.27, "word": " this", "probability": 0.6376953125}, {"start": 1278.27, "end": 1278.61, "word": " example", "probability": 0.943359375}, {"start": 1278.61, "end": 1279.01, "word": " for", "probability": 0.91796875}, {"start": 1279.01, "end": 1279.73, "word": " illustration.", "probability": 0.93798828125}, {"start": 1280.69, "end": 1280.99, "word": " But", "probability": 0.93994140625}, {"start": 1280.99, "end": 1281.17, "word": " at", "probability": 0.927734375}, {"start": 1281.17, "end": 1281.39, "word": " least", "probability": 0.95556640625}, {"start": 1281.39, "end": 1281.57, "word": " you", "probability": 0.8310546875}, {"start": 1281.57, "end": 1281.75, "word": " can", "probability": 0.9462890625}, {"start": 1281.75, "end": 1282.33, "word": " determine", "probability": 0.91015625}, {"start": 1282.33, "end": 1283.21, "word": " that", "probability": 0.9267578125}, {"start": 1283.21, "end": 1284.29, "word": " there", "probability": 0.87353515625}, {"start": 1284.29, "end": 1284.79, "word": " exists", "probability": 0.78759765625}, {"start": 1284.79, "end": 1285.37, "word": " linear", "probability": 0.87158203125}, {"start": 1285.37, "end": 1286.59, "word": " relationship", "probability": 0.9248046875}, {"start": 1286.59, "end": 1287.03, "word": " between", "probability": 0.86962890625}, {"start": 1287.03, "end": 1287.17, "word": " the", "probability": 0.8544921875}, {"start": 1287.17, "end": 1287.31, "word": " two", "probability": 0.9375}, {"start": 1287.31, "end": 1287.81, "word": " variables.", "probability": 0.931640625}, {"start": 1288.39, "end": 1288.65, "word": " It", "probability": 0.91796875}, {"start": 1288.65, "end": 1288.81, "word": " is", "probability": 0.94287109375}, {"start": 1288.81, "end": 1289.19, "word": " positive.", "probability": 0.92236328125}, {"start": 1290.71, "end": 1291.01, "word": " So", "probability": 0.94580078125}, {"start": 1291.01, "end": 1291.53, "word": " the", "probability": 0.63232421875}, {"start": 1291.53, "end": 1291.89, "word": " form", "probability": 0.9150390625}, {"start": 1291.89, "end": 1292.13, "word": " is", "probability": 0.9501953125}, {"start": 1292.13, "end": 1292.47, "word": " linear.", "probability": 0.89501953125}, {"start": 1294.35, "end": 1295.11, "word": " Direction", "probability": 0.941650390625}, {"start": 1295.11, "end": 1295.49, "word": " is", "probability": 0.94677734375}, {"start": 1295.49, "end": 1295.87, "word": " positive.", "probability": 0.92578125}, {"start": 1298.05, "end": 1298.81, "word": " Weak", "probability": 0.96142578125}, {"start": 1298.81, "end": 1299.01, "word": " or", "probability": 0.89453125}, {"start": 1299.01, "end": 1299.45, "word": " strong", "probability": 0.8701171875}, {"start": 1299.45, "end": 1299.75, "word": " or", "probability": 0.93310546875}, {"start": 1299.75, "end": 1300.11, "word": " moderate.", "probability": 0.94482421875}], "temperature": 1.0}, {"id": 50, "seek": 132272, "start": 1301.26, "end": 1322.72, "text": " Sometimes it's not easy to tell if it is strong or moderate. Now if you look at these points, some of them are close to the straight line and others are away from the straight line. So maybe there exists moderate for example, but you cannot say strong.", "tokens": [4803, 309, 311, 406, 1858, 281, 980, 498, 309, 307, 2068, 420, 18174, 13, 823, 498, 291, 574, 412, 613, 2793, 11, 512, 295, 552, 366, 1998, 281, 264, 2997, 1622, 293, 2357, 366, 1314, 490, 264, 2997, 1622, 13, 407, 1310, 456, 8198, 18174, 337, 1365, 11, 457, 291, 2644, 584, 2068, 13], "avg_logprob": -0.16022727597843517, "compression_ratio": 1.5617283950617284, "no_speech_prob": 0.0, "words": [{"start": 1301.26, "end": 1301.88, "word": " Sometimes", "probability": 0.56201171875}, {"start": 1301.88, "end": 1302.12, "word": " it's", "probability": 0.72314453125}, {"start": 1302.12, "end": 1302.32, "word": " not", "probability": 0.94873046875}, {"start": 1302.32, "end": 1302.62, "word": " easy", "probability": 0.908203125}, {"start": 1302.62, "end": 1302.96, "word": " to", "probability": 0.97314453125}, {"start": 1302.96, "end": 1303.38, "word": " tell", "probability": 0.87841796875}, {"start": 1303.38, "end": 1304.16, "word": " if", "probability": 0.9208984375}, {"start": 1304.16, "end": 1304.28, "word": " it", "probability": 0.9404296875}, {"start": 1304.28, "end": 1304.4, "word": " is", "probability": 0.73388671875}, {"start": 1304.4, "end": 1304.84, "word": " strong", "probability": 0.7822265625}, {"start": 1304.84, "end": 1305.24, "word": " or", "probability": 0.96142578125}, {"start": 1305.24, "end": 1305.62, "word": " moderate.", "probability": 0.9228515625}, {"start": 1307.72, "end": 1308.06, "word": " Now", "probability": 0.859375}, {"start": 1308.06, "end": 1308.22, "word": " if", "probability": 0.60888671875}, {"start": 1308.22, "end": 1308.26, "word": " you", "probability": 0.90771484375}, {"start": 1308.26, "end": 1308.4, "word": " look", "probability": 0.96630859375}, {"start": 1308.4, "end": 1308.52, "word": " at", "probability": 0.96630859375}, {"start": 1308.52, "end": 1308.74, "word": " these", "probability": 0.81005859375}, {"start": 1308.74, "end": 1309.06, "word": " points,", "probability": 0.94677734375}, {"start": 1309.24, "end": 1309.44, "word": " some", "probability": 0.8994140625}, {"start": 1309.44, "end": 1309.6, "word": " of", "probability": 0.9619140625}, {"start": 1309.6, "end": 1309.82, "word": " them", "probability": 0.896484375}, {"start": 1309.82, "end": 1310.12, "word": " are", "probability": 0.943359375}, {"start": 1310.12, "end": 1310.56, "word": " close", "probability": 0.84912109375}, {"start": 1310.56, "end": 1310.7, "word": " to", "probability": 0.96630859375}, {"start": 1310.7, "end": 1310.84, "word": " the", "probability": 0.90673828125}, {"start": 1310.84, "end": 1311.06, "word": " straight", "probability": 0.92578125}, {"start": 1311.06, "end": 1311.4, "word": " line", "probability": 0.9091796875}, {"start": 1311.4, "end": 1311.96, "word": " and", "probability": 0.66357421875}, {"start": 1311.96, "end": 1312.38, "word": " others", "probability": 0.86865234375}, {"start": 1312.38, "end": 1312.96, "word": " are", "probability": 0.93798828125}, {"start": 1312.96, "end": 1313.7, "word": " away", "probability": 0.87158203125}, {"start": 1313.7, "end": 1313.98, "word": " from", "probability": 0.8828125}, {"start": 1313.98, "end": 1314.26, "word": " the", "probability": 0.869140625}, {"start": 1314.26, "end": 1314.54, "word": " straight", "probability": 0.90869140625}, {"start": 1314.54, "end": 1314.86, "word": " line.", "probability": 0.9423828125}, {"start": 1315.32, "end": 1315.52, "word": " So", "probability": 0.912109375}, {"start": 1315.52, "end": 1315.92, "word": " maybe", "probability": 0.833984375}, {"start": 1315.92, "end": 1316.18, "word": " there", "probability": 0.74560546875}, {"start": 1316.18, "end": 1316.7, "word": " exists", "probability": 0.76904296875}, {"start": 1316.7, "end": 1317.94, "word": " moderate", "probability": 0.904296875}, {"start": 1317.94, "end": 1318.22, "word": " for", "probability": 0.6708984375}, {"start": 1318.22, "end": 1318.64, "word": " example,", "probability": 0.97509765625}, {"start": 1319.4, "end": 1319.64, "word": " but", "probability": 0.9189453125}, {"start": 1319.64, "end": 1320.0, "word": " you", "probability": 0.9287109375}, {"start": 1320.0, "end": 1320.36, "word": " cannot", "probability": 0.84033203125}, {"start": 1320.36, "end": 1320.78, "word": " say", "probability": 0.93359375}, {"start": 1320.78, "end": 1322.72, "word": " strong.", "probability": 0.7734375}], "temperature": 1.0}, {"id": 51, "seek": 135143, "start": 1323.93, "end": 1351.43, "text": " Here, strong it means the points are close to the straight line. Sometimes it's hard to tell the strength of the relationship, but you can know the form or the direction. But to measure the exact strength, you have to measure the correlation coefficient, R. Now, by looking at the data, you can compute", "tokens": [1692, 11, 2068, 309, 1355, 264, 2793, 366, 1998, 281, 264, 2997, 1622, 13, 4803, 309, 311, 1152, 281, 980, 264, 3800, 295, 264, 2480, 11, 457, 291, 393, 458, 264, 1254, 420, 264, 3513, 13, 583, 281, 3481, 264, 1900, 3800, 11, 291, 362, 281, 3481, 264, 20009, 17619, 11, 497, 13, 823, 11, 538, 1237, 412, 264, 1412, 11, 291, 393, 14722], "avg_logprob": -0.17752403846153847, "compression_ratio": 1.5947368421052632, "no_speech_prob": 0.0, "words": [{"start": 1323.93, "end": 1324.29, "word": " Here,", "probability": 0.517578125}, {"start": 1324.51, "end": 1324.79, "word": " strong", "probability": 0.7109375}, {"start": 1324.79, "end": 1324.99, "word": " it", "probability": 0.4658203125}, {"start": 1324.99, "end": 1325.43, "word": " means", "probability": 0.9228515625}, {"start": 1325.43, "end": 1326.45, "word": " the", "probability": 0.54931640625}, {"start": 1326.45, "end": 1326.83, "word": " points", "probability": 0.9208984375}, {"start": 1326.83, "end": 1327.17, "word": " are", "probability": 0.93798828125}, {"start": 1327.17, "end": 1327.87, "word": " close", "probability": 0.826171875}, {"start": 1327.87, "end": 1328.05, "word": " to", "probability": 0.96337890625}, {"start": 1328.05, "end": 1328.21, "word": " the", "probability": 0.88818359375}, {"start": 1328.21, "end": 1328.45, "word": " straight", "probability": 0.93115234375}, {"start": 1328.45, "end": 1328.77, "word": " line.", "probability": 0.88232421875}, {"start": 1329.45, "end": 1330.09, "word": " Sometimes", "probability": 0.4853515625}, {"start": 1330.09, "end": 1330.33, "word": " it's", "probability": 0.701171875}, {"start": 1330.33, "end": 1330.61, "word": " hard", "probability": 0.90673828125}, {"start": 1330.61, "end": 1330.87, "word": " to", "probability": 0.96875}, {"start": 1330.87, "end": 1331.13, "word": " tell", "probability": 0.86181640625}, {"start": 1331.13, "end": 1331.89, "word": " the", "probability": 0.90673828125}, {"start": 1331.89, "end": 1332.29, "word": " strength", "probability": 0.798828125}, {"start": 1332.29, "end": 1332.61, "word": " of", "probability": 0.96728515625}, {"start": 1332.61, "end": 1332.73, "word": " the", "probability": 0.88427734375}, {"start": 1332.73, "end": 1333.13, "word": " relationship,", "probability": 0.91650390625}, {"start": 1333.49, "end": 1333.81, "word": " but", "probability": 0.9169921875}, {"start": 1333.81, "end": 1334.05, "word": " you", "probability": 0.96044921875}, {"start": 1334.05, "end": 1334.37, "word": " can", "probability": 0.93408203125}, {"start": 1334.37, "end": 1334.85, "word": " know", "probability": 0.88037109375}, {"start": 1334.85, "end": 1335.23, "word": " the", "probability": 0.90869140625}, {"start": 1335.23, "end": 1336.25, "word": " form", "probability": 0.9052734375}, {"start": 1336.25, "end": 1337.35, "word": " or", "probability": 0.86328125}, {"start": 1337.35, "end": 1337.57, "word": " the", "probability": 0.85546875}, {"start": 1337.57, "end": 1338.13, "word": " direction.", "probability": 0.9716796875}, {"start": 1339.13, "end": 1339.45, "word": " But", "probability": 0.916015625}, {"start": 1339.45, "end": 1339.87, "word": " to", "probability": 0.87158203125}, {"start": 1339.87, "end": 1340.13, "word": " measure", "probability": 0.88525390625}, {"start": 1340.13, "end": 1340.45, "word": " the", "probability": 0.90771484375}, {"start": 1340.45, "end": 1340.99, "word": " exact", "probability": 0.95068359375}, {"start": 1340.99, "end": 1341.65, "word": " strength,", "probability": 0.87890625}, {"start": 1342.21, "end": 1342.45, "word": " you", "probability": 0.95947265625}, {"start": 1342.45, "end": 1342.65, "word": " have", "probability": 0.9453125}, {"start": 1342.65, "end": 1342.87, "word": " to", "probability": 0.970703125}, {"start": 1342.87, "end": 1343.25, "word": " measure", "probability": 0.87744140625}, {"start": 1343.25, "end": 1343.63, "word": " the", "probability": 0.9072265625}, {"start": 1343.63, "end": 1344.13, "word": " correlation", "probability": 0.91943359375}, {"start": 1344.13, "end": 1344.67, "word": " coefficient,", "probability": 0.79736328125}, {"start": 1344.97, "end": 1345.19, "word": " R.", "probability": 0.7978515625}, {"start": 1346.01, "end": 1346.41, "word": " Now,", "probability": 0.923828125}, {"start": 1346.63, "end": 1346.85, "word": " by", "probability": 0.96728515625}, {"start": 1346.85, "end": 1347.25, "word": " looking", "probability": 0.9111328125}, {"start": 1347.25, "end": 1347.93, "word": " at", "probability": 0.96826171875}, {"start": 1347.93, "end": 1348.11, "word": " the", "probability": 0.90478515625}, {"start": 1348.11, "end": 1348.47, "word": " data,", "probability": 0.94677734375}, {"start": 1349.51, "end": 1349.81, "word": " you", "probability": 0.9619140625}, {"start": 1349.81, "end": 1350.25, "word": " can", "probability": 0.94482421875}, {"start": 1350.25, "end": 1351.43, "word": " compute", "probability": 0.884765625}], "temperature": 1.0}, {"id": 52, "seek": 137821, "start": 1353.85, "end": 1378.21, "text": " The sum of x values, y values, sum of x squared, sum of y squared, also sum of xy. Now plug these values into the formula we have for the shortcut formula. You will get R to be 0.76 around 76.", "tokens": [440, 2408, 295, 2031, 4190, 11, 288, 4190, 11, 2408, 295, 2031, 8889, 11, 2408, 295, 288, 8889, 11, 611, 2408, 295, 2031, 88, 13, 823, 5452, 613, 4190, 666, 264, 8513, 321, 362, 337, 264, 24822, 8513, 13, 509, 486, 483, 497, 281, 312, 1958, 13, 25026, 926, 24733, 13], "avg_logprob": -0.20387620392900246, "compression_ratio": 1.5196850393700787, "no_speech_prob": 0.0, "words": [{"start": 1353.85, "end": 1354.29, "word": " The", "probability": 0.3544921875}, {"start": 1354.29, "end": 1355.11, "word": " sum", "probability": 0.86669921875}, {"start": 1355.11, "end": 1355.79, "word": " of", "probability": 0.97021484375}, {"start": 1355.79, "end": 1357.55, "word": " x", "probability": 0.453857421875}, {"start": 1357.55, "end": 1358.09, "word": " values,", "probability": 0.7900390625}, {"start": 1358.51, "end": 1359.49, "word": " y", "probability": 0.92578125}, {"start": 1359.49, "end": 1360.07, "word": " values,", "probability": 0.95751953125}, {"start": 1360.81, "end": 1361.29, "word": " sum", "probability": 0.87744140625}, {"start": 1361.29, "end": 1361.83, "word": " of", "probability": 0.96533203125}, {"start": 1361.83, "end": 1362.07, "word": " x", "probability": 0.97265625}, {"start": 1362.07, "end": 1362.47, "word": " squared,", "probability": 0.5546875}, {"start": 1363.29, "end": 1363.63, "word": " sum", "probability": 0.916015625}, {"start": 1363.63, "end": 1363.83, "word": " of", "probability": 0.95947265625}, {"start": 1363.83, "end": 1364.05, "word": " y", "probability": 0.9765625}, {"start": 1364.05, "end": 1364.47, "word": " squared,", "probability": 0.8603515625}, {"start": 1364.73, "end": 1365.05, "word": " also", "probability": 0.751953125}, {"start": 1365.05, "end": 1365.37, "word": " sum", "probability": 0.857421875}, {"start": 1365.37, "end": 1365.51, "word": " of", "probability": 0.96923828125}, {"start": 1365.51, "end": 1366.03, "word": " xy.", "probability": 0.94189453125}, {"start": 1367.03, "end": 1367.57, "word": " Now", "probability": 0.93505859375}, {"start": 1367.57, "end": 1367.91, "word": " plug", "probability": 0.5849609375}, {"start": 1367.91, "end": 1368.17, "word": " these", "probability": 0.8203125}, {"start": 1368.17, "end": 1368.59, "word": " values", "probability": 0.95751953125}, {"start": 1368.59, "end": 1368.83, "word": " into", "probability": 0.83056640625}, {"start": 1368.83, "end": 1369.03, "word": " the", "probability": 0.9091796875}, {"start": 1369.03, "end": 1369.35, "word": " formula", "probability": 0.916015625}, {"start": 1369.35, "end": 1369.53, "word": " we", "probability": 0.8974609375}, {"start": 1369.53, "end": 1369.83, "word": " have", "probability": 0.9375}, {"start": 1369.83, "end": 1370.11, "word": " for", "probability": 0.8994140625}, {"start": 1370.11, "end": 1370.27, "word": " the", "probability": 0.90966796875}, {"start": 1370.27, "end": 1370.61, "word": " shortcut", "probability": 0.927734375}, {"start": 1370.61, "end": 1371.23, "word": " formula.", "probability": 0.9228515625}, {"start": 1371.95, "end": 1372.09, "word": " You", "probability": 0.9248046875}, {"start": 1372.09, "end": 1372.27, "word": " will", "probability": 0.89404296875}, {"start": 1372.27, "end": 1372.59, "word": " get", "probability": 0.92529296875}, {"start": 1372.59, "end": 1373.11, "word": " R", "probability": 0.67626953125}, {"start": 1373.11, "end": 1373.27, "word": " to", "probability": 0.9443359375}, {"start": 1373.27, "end": 1373.47, "word": " be", "probability": 0.93798828125}, {"start": 1373.47, "end": 1373.73, "word": " 0", "probability": 0.7021484375}, {"start": 1373.73, "end": 1374.41, "word": ".76", "probability": 0.97021484375}, {"start": 1374.41, "end": 1375.71, "word": " around", "probability": 0.426513671875}, {"start": 1375.71, "end": 1378.21, "word": " 76.", "probability": 0.6298828125}], "temperature": 1.0}, {"id": 53, "seek": 141031, "start": 1384.05, "end": 1410.31, "text": " So there exists positive, moderate relationship between selling price of a home and its size. So that means if the size increases, the selling price also increases. So there exists positive relationship between the two variables.", "tokens": [407, 456, 8198, 3353, 11, 18174, 2480, 1296, 6511, 3218, 295, 257, 1280, 293, 1080, 2744, 13, 407, 300, 1355, 498, 264, 2744, 8637, 11, 264, 6511, 3218, 611, 8637, 13, 407, 456, 8198, 3353, 2480, 1296, 264, 732, 9102, 13], "avg_logprob": -0.19419643247411364, "compression_ratio": 1.7557251908396947, "no_speech_prob": 0.0, "words": [{"start": 1384.05, "end": 1384.49, "word": " So", "probability": 0.80859375}, {"start": 1384.49, "end": 1384.71, "word": " there", "probability": 0.7314453125}, {"start": 1384.71, "end": 1385.13, "word": " exists", "probability": 0.6298828125}, {"start": 1385.13, "end": 1386.07, "word": " positive,", "probability": 0.845703125}, {"start": 1387.21, "end": 1388.99, "word": " moderate", "probability": 0.91796875}, {"start": 1388.99, "end": 1390.17, "word": " relationship", "probability": 0.8857421875}, {"start": 1390.17, "end": 1390.69, "word": " between", "probability": 0.8916015625}, {"start": 1390.69, "end": 1393.77, "word": " selling", "probability": 0.64990234375}, {"start": 1393.77, "end": 1394.45, "word": " price", "probability": 0.9443359375}, {"start": 1394.45, "end": 1394.99, "word": " of", "probability": 0.67626953125}, {"start": 1394.99, "end": 1395.55, "word": " a", "probability": 0.974609375}, {"start": 1395.55, "end": 1395.89, "word": " home", "probability": 0.88623046875}, {"start": 1395.89, "end": 1397.37, "word": " and", "probability": 0.91845703125}, {"start": 1397.37, "end": 1397.59, "word": " its", "probability": 0.78125}, {"start": 1397.59, "end": 1397.95, "word": " size.", "probability": 0.87109375}, {"start": 1398.29, "end": 1398.57, "word": " So", "probability": 0.935546875}, {"start": 1398.57, "end": 1398.79, "word": " that", "probability": 0.90283203125}, {"start": 1398.79, "end": 1399.05, "word": " means", "probability": 0.92431640625}, {"start": 1399.05, "end": 1399.49, "word": " if", "probability": 0.84130859375}, {"start": 1399.49, "end": 1399.85, "word": " the", "probability": 0.90869140625}, {"start": 1399.85, "end": 1400.11, "word": " size", "probability": 0.787109375}, {"start": 1400.11, "end": 1400.63, "word": " increases,", "probability": 0.92529296875}, {"start": 1401.77, "end": 1402.13, "word": " the", "probability": 0.8994140625}, {"start": 1402.13, "end": 1402.57, "word": " selling", "probability": 0.8955078125}, {"start": 1402.57, "end": 1403.07, "word": " price", "probability": 0.91943359375}, {"start": 1403.07, "end": 1403.71, "word": " also", "probability": 0.416259765625}, {"start": 1403.71, "end": 1404.67, "word": " increases.", "probability": 0.939453125}, {"start": 1405.31, "end": 1405.55, "word": " So", "probability": 0.9541015625}, {"start": 1405.55, "end": 1405.71, "word": " there", "probability": 0.88427734375}, {"start": 1405.71, "end": 1406.01, "word": " exists", "probability": 0.845703125}, {"start": 1406.01, "end": 1406.63, "word": " positive", "probability": 0.86865234375}, {"start": 1406.63, "end": 1408.19, "word": " relationship", "probability": 0.916015625}, {"start": 1408.19, "end": 1408.77, "word": " between", "probability": 0.8701171875}, {"start": 1408.77, "end": 1409.55, "word": " the", "probability": 0.9140625}, {"start": 1409.55, "end": 1409.75, "word": " two", "probability": 0.93505859375}, {"start": 1409.75, "end": 1410.31, "word": " variables.", "probability": 0.94677734375}], "temperature": 1.0}, {"id": 54, "seek": 143708, "start": 1415.8, "end": 1437.08, "text": " Strong it means close to 1, 0.8, 0.85, 0.9, you can say there exists strong. But fields is not strong relationship, you can say it's moderate relationship. Because it's close if now if you just compare this value and other data gives 9%.", "tokens": [22792, 309, 1355, 1998, 281, 502, 11, 1958, 13, 23, 11, 1958, 13, 19287, 11, 1958, 13, 24, 11, 291, 393, 584, 456, 8198, 2068, 13, 583, 7909, 307, 406, 2068, 2480, 11, 291, 393, 584, 309, 311, 18174, 2480, 13, 1436, 309, 311, 1998, 498, 586, 498, 291, 445, 6794, 341, 2158, 293, 661, 1412, 2709, 1722, 6856], "avg_logprob": -0.28828125943740207, "compression_ratio": 1.4875, "no_speech_prob": 0.0, "words": [{"start": 1415.8, "end": 1416.44, "word": " Strong", "probability": 0.1849365234375}, {"start": 1416.44, "end": 1416.66, "word": " it", "probability": 0.272705078125}, {"start": 1416.66, "end": 1416.88, "word": " means", "probability": 0.91357421875}, {"start": 1416.88, "end": 1417.24, "word": " close", "probability": 0.76123046875}, {"start": 1417.24, "end": 1417.44, "word": " to", "probability": 0.95849609375}, {"start": 1417.44, "end": 1417.66, "word": " 1,", "probability": 0.490478515625}, {"start": 1417.78, "end": 1417.96, "word": " 0", "probability": 0.62890625}, {"start": 1417.96, "end": 1418.38, "word": ".8,", "probability": 0.9482421875}, {"start": 1418.48, "end": 1418.66, "word": " 0", "probability": 0.9794921875}, {"start": 1418.66, "end": 1419.32, "word": ".85,", "probability": 0.985107421875}, {"start": 1419.48, "end": 1419.56, "word": " 0", "probability": 0.974609375}, {"start": 1419.56, "end": 1419.96, "word": ".9,", "probability": 0.98828125}, {"start": 1420.1, "end": 1420.3, "word": " you", "probability": 0.8779296875}, {"start": 1420.3, "end": 1420.54, "word": " can", "probability": 0.9453125}, {"start": 1420.54, "end": 1420.74, "word": " say", "probability": 0.57958984375}, {"start": 1420.74, "end": 1420.92, "word": " there", "probability": 0.6494140625}, {"start": 1420.92, "end": 1421.16, "word": " exists", "probability": 0.47265625}, {"start": 1421.16, "end": 1421.64, "word": " strong.", "probability": 0.84033203125}, {"start": 1422.18, "end": 1422.48, "word": " But", "probability": 0.93994140625}, {"start": 1422.48, "end": 1422.84, "word": " fields", "probability": 0.312255859375}, {"start": 1422.84, "end": 1424.08, "word": " is", "probability": 0.75341796875}, {"start": 1424.08, "end": 1424.4, "word": " not", "probability": 0.9482421875}, {"start": 1424.4, "end": 1425.42, "word": " strong", "probability": 0.69775390625}, {"start": 1425.42, "end": 1426.02, "word": " relationship,", "probability": 0.91455078125}, {"start": 1426.24, "end": 1426.34, "word": " you", "probability": 0.94091796875}, {"start": 1426.34, "end": 1426.54, "word": " can", "probability": 0.94580078125}, {"start": 1426.54, "end": 1426.76, "word": " say", "probability": 0.90673828125}, {"start": 1426.76, "end": 1427.56, "word": " it's", "probability": 0.840087890625}, {"start": 1427.56, "end": 1427.96, "word": " moderate", "probability": 0.91748046875}, {"start": 1427.96, "end": 1428.62, "word": " relationship.", "probability": 0.9052734375}, {"start": 1430.16, "end": 1430.6, "word": " Because", "probability": 0.89990234375}, {"start": 1430.6, "end": 1430.84, "word": " it's", "probability": 0.93212890625}, {"start": 1430.84, "end": 1431.28, "word": " close", "probability": 0.873046875}, {"start": 1431.28, "end": 1432.5, "word": " if", "probability": 0.546875}, {"start": 1432.5, "end": 1433.12, "word": " now", "probability": 0.4326171875}, {"start": 1433.12, "end": 1433.34, "word": " if", "probability": 0.79638671875}, {"start": 1433.34, "end": 1433.44, "word": " you", "probability": 0.958984375}, {"start": 1433.44, "end": 1433.72, "word": " just", "probability": 0.8662109375}, {"start": 1433.72, "end": 1434.06, "word": " compare", "probability": 0.94482421875}, {"start": 1434.06, "end": 1434.28, "word": " this", "probability": 0.9453125}, {"start": 1434.28, "end": 1434.7, "word": " value", "probability": 0.97509765625}, {"start": 1434.7, "end": 1435.3, "word": " and", "probability": 0.685546875}, {"start": 1435.3, "end": 1435.62, "word": " other", "probability": 0.88232421875}, {"start": 1435.62, "end": 1435.98, "word": " data", "probability": 0.94482421875}, {"start": 1435.98, "end": 1436.3, "word": " gives", "probability": 0.78076171875}, {"start": 1436.3, "end": 1437.08, "word": " 9%.", "probability": 0.621337890625}], "temperature": 1.0}, {"id": 55, "seek": 146339, "start": 1438.83, "end": 1463.39, "text": " Other one gives 85%. So these values are much closer to 1 than 0.7, but still this value is considered to be high. Any question? Next, I will give some introduction to regression analysis.", "tokens": [5358, 472, 2709, 14695, 6856, 407, 613, 4190, 366, 709, 4966, 281, 502, 813, 1958, 13, 22, 11, 457, 920, 341, 2158, 307, 4888, 281, 312, 1090, 13, 2639, 1168, 30, 3087, 11, 286, 486, 976, 512, 9339, 281, 24590, 5215, 13], "avg_logprob": -0.24781975358031516, "compression_ratio": 1.2857142857142858, "no_speech_prob": 0.0, "words": [{"start": 1438.83, "end": 1439.21, "word": " Other", "probability": 0.2149658203125}, {"start": 1439.21, "end": 1439.47, "word": " one", "probability": 0.90478515625}, {"start": 1439.47, "end": 1439.85, "word": " gives", "probability": 0.83837890625}, {"start": 1439.85, "end": 1441.39, "word": " 85%.", "probability": 0.5093994140625}, {"start": 1441.39, "end": 1442.17, "word": " So", "probability": 0.89892578125}, {"start": 1442.17, "end": 1442.43, "word": " these", "probability": 0.67822265625}, {"start": 1442.43, "end": 1442.93, "word": " values", "probability": 0.9619140625}, {"start": 1442.93, "end": 1443.47, "word": " are", "probability": 0.9423828125}, {"start": 1443.47, "end": 1443.79, "word": " much", "probability": 0.9140625}, {"start": 1443.79, "end": 1444.33, "word": " closer", "probability": 0.919921875}, {"start": 1444.33, "end": 1445.57, "word": " to", "probability": 0.92138671875}, {"start": 1445.57, "end": 1445.85, "word": " 1", "probability": 0.69384765625}, {"start": 1445.85, "end": 1446.57, "word": " than", "probability": 0.8037109375}, {"start": 1446.57, "end": 1446.83, "word": " 0", "probability": 0.384521484375}, {"start": 1446.83, "end": 1447.09, "word": ".7,", "probability": 0.991455078125}, {"start": 1447.19, "end": 1447.35, "word": " but", "probability": 0.923828125}, {"start": 1447.35, "end": 1447.75, "word": " still", "probability": 0.94873046875}, {"start": 1447.75, "end": 1448.03, "word": " this", "probability": 0.64453125}, {"start": 1448.03, "end": 1448.31, "word": " value", "probability": 0.9150390625}, {"start": 1448.31, "end": 1448.55, "word": " is", "probability": 0.90478515625}, {"start": 1448.55, "end": 1448.91, "word": " considered", "probability": 0.771484375}, {"start": 1448.91, "end": 1449.13, "word": " to", "probability": 0.96533203125}, {"start": 1449.13, "end": 1449.25, "word": " be", "probability": 0.951171875}, {"start": 1449.25, "end": 1449.57, "word": " high.", "probability": 0.8974609375}, {"start": 1455.71, "end": 1456.43, "word": " Any", "probability": 0.8564453125}, {"start": 1456.43, "end": 1456.81, "word": " question?", "probability": 0.587890625}, {"start": 1459.85, "end": 1460.57, "word": " Next,", "probability": 0.8916015625}, {"start": 1460.73, "end": 1460.87, "word": " I", "probability": 0.99853515625}, {"start": 1460.87, "end": 1460.97, "word": " will", "probability": 0.8701171875}, {"start": 1460.97, "end": 1461.13, "word": " give", "probability": 0.86865234375}, {"start": 1461.13, "end": 1461.47, "word": " some", "probability": 0.88818359375}, {"start": 1461.47, "end": 1462.09, "word": " introduction", "probability": 0.8984375}, {"start": 1462.09, "end": 1462.45, "word": " to", "probability": 0.96630859375}, {"start": 1462.45, "end": 1462.81, "word": " regression", "probability": 0.95263671875}, {"start": 1462.81, "end": 1463.39, "word": " analysis.", "probability": 0.85888671875}], "temperature": 1.0}, {"id": 56, "seek": 149189, "start": 1466.97, "end": 1491.89, "text": " regression analysis used to number one, predict the value of a dependent variable based on the value of at least one independent variable. So by using the data we have for selling price of a home and size, you can predict the selling price by knowing the value of its size. So suppose for example,", "tokens": [24590, 5215, 1143, 281, 1230, 472, 11, 6069, 264, 2158, 295, 257, 12334, 7006, 2361, 322, 264, 2158, 295, 412, 1935, 472, 6695, 7006, 13, 407, 538, 1228, 264, 1412, 321, 362, 337, 6511, 3218, 295, 257, 1280, 293, 2744, 11, 291, 393, 6069, 264, 6511, 3218, 538, 5276, 264, 2158, 295, 1080, 2744, 13, 407, 7297, 337, 1365, 11], "avg_logprob": -0.18173667739649288, "compression_ratio": 1.7633136094674555, "no_speech_prob": 0.0, "words": [{"start": 1466.9699999999998, "end": 1467.6699999999998, "word": " regression", "probability": 0.31201171875}, {"start": 1467.6699999999998, "end": 1468.37, "word": " analysis", "probability": 0.76708984375}, {"start": 1468.37, "end": 1468.75, "word": " used", "probability": 0.67724609375}, {"start": 1468.75, "end": 1469.01, "word": " to", "probability": 0.87158203125}, {"start": 1469.01, "end": 1469.29, "word": " number", "probability": 0.6767578125}, {"start": 1469.29, "end": 1469.77, "word": " one,", "probability": 0.740234375}, {"start": 1471.65, "end": 1472.21, "word": " predict", "probability": 0.71142578125}, {"start": 1472.21, "end": 1472.57, "word": " the", "probability": 0.91748046875}, {"start": 1472.57, "end": 1472.95, "word": " value", "probability": 0.9716796875}, {"start": 1472.95, "end": 1473.17, "word": " of", "probability": 0.96240234375}, {"start": 1473.17, "end": 1473.27, "word": " a", "probability": 0.4013671875}, {"start": 1473.27, "end": 1473.55, "word": " dependent", "probability": 0.82373046875}, {"start": 1473.55, "end": 1474.03, "word": " variable", "probability": 0.908203125}, {"start": 1474.03, "end": 1474.47, "word": " based", "probability": 0.87939453125}, {"start": 1474.47, "end": 1474.85, "word": " on", "probability": 0.94873046875}, {"start": 1474.85, "end": 1475.05, "word": " the", "probability": 0.9150390625}, {"start": 1475.05, "end": 1475.35, "word": " value", "probability": 0.97412109375}, {"start": 1475.35, "end": 1475.57, "word": " of", "probability": 0.96435546875}, {"start": 1475.57, "end": 1475.79, "word": " at", "probability": 0.95947265625}, {"start": 1475.79, "end": 1476.07, "word": " least", "probability": 0.95263671875}, {"start": 1476.07, "end": 1476.41, "word": " one", "probability": 0.93310546875}, {"start": 1476.41, "end": 1476.81, "word": " independent", "probability": 0.8740234375}, {"start": 1476.81, "end": 1477.31, "word": " variable.", "probability": 0.90869140625}, {"start": 1478.47, "end": 1479.05, "word": " So", "probability": 0.93310546875}, {"start": 1479.05, "end": 1479.25, "word": " by", "probability": 0.80126953125}, {"start": 1479.25, "end": 1479.65, "word": " using", "probability": 0.93115234375}, {"start": 1479.65, "end": 1480.11, "word": " the", "probability": 0.91162109375}, {"start": 1480.11, "end": 1480.39, "word": " data", "probability": 0.94970703125}, {"start": 1480.39, "end": 1480.61, "word": " we", "probability": 0.93505859375}, {"start": 1480.61, "end": 1480.83, "word": " have", "probability": 0.94775390625}, {"start": 1480.83, "end": 1481.19, "word": " for", "probability": 0.93701171875}, {"start": 1481.19, "end": 1481.57, "word": " selling", "probability": 0.826171875}, {"start": 1481.57, "end": 1481.99, "word": " price", "probability": 0.89794921875}, {"start": 1481.99, "end": 1482.19, "word": " of", "probability": 0.95263671875}, {"start": 1482.19, "end": 1482.29, "word": " a", "probability": 0.9482421875}, {"start": 1482.29, "end": 1482.49, "word": " home", "probability": 0.806640625}, {"start": 1482.49, "end": 1482.71, "word": " and", "probability": 0.88427734375}, {"start": 1482.71, "end": 1483.41, "word": " size,", "probability": 0.77001953125}, {"start": 1484.01, "end": 1484.15, "word": " you", "probability": 0.81982421875}, {"start": 1484.15, "end": 1484.37, "word": " can", "probability": 0.935546875}, {"start": 1484.37, "end": 1484.85, "word": " predict", "probability": 0.9404296875}, {"start": 1484.85, "end": 1485.93, "word": " the", "probability": 0.85546875}, {"start": 1485.93, "end": 1486.25, "word": " selling", "probability": 0.892578125}, {"start": 1486.25, "end": 1486.77, "word": " price", "probability": 0.91259765625}, {"start": 1486.77, "end": 1488.37, "word": " by", "probability": 0.9345703125}, {"start": 1488.37, "end": 1489.49, "word": " knowing", "probability": 0.8525390625}, {"start": 1489.49, "end": 1489.75, "word": " the", "probability": 0.91552734375}, {"start": 1489.75, "end": 1490.11, "word": " value", "probability": 0.97021484375}, {"start": 1490.11, "end": 1490.35, "word": " of", "probability": 0.96630859375}, {"start": 1490.35, "end": 1490.55, "word": " its", "probability": 0.79931640625}, {"start": 1490.55, "end": 1490.83, "word": " size.", "probability": 0.87255859375}, {"start": 1490.93, "end": 1491.03, "word": " So", "probability": 0.9267578125}, {"start": 1491.03, "end": 1491.35, "word": " suppose", "probability": 0.83203125}, {"start": 1491.35, "end": 1491.51, "word": " for", "probability": 0.61865234375}, {"start": 1491.51, "end": 1491.89, "word": " example,", "probability": 0.9736328125}], "temperature": 1.0}, {"id": 57, "seek": 152075, "start": 1492.59, "end": 1520.75, "text": " You know that the size of a house is 1450, 1450 square feet. What do you predict its size, its sale or price? So by using this value, we can predict the selling price. Next, explain the impact of changes in independent variable on the dependent variable.", "tokens": [509, 458, 300, 264, 2744, 295, 257, 1782, 307, 3499, 2803, 11, 3499, 2803, 3732, 3521, 13, 708, 360, 291, 6069, 1080, 2744, 11, 1080, 8680, 420, 3218, 30, 407, 538, 1228, 341, 2158, 11, 321, 393, 6069, 264, 6511, 3218, 13, 3087, 11, 2903, 264, 2712, 295, 2962, 294, 6695, 7006, 322, 264, 12334, 7006, 13], "avg_logprob": -0.2163254258961513, "compression_ratio": 1.5, "no_speech_prob": 0.0, "words": [{"start": 1492.59, "end": 1492.83, "word": " You", "probability": 0.51904296875}, {"start": 1492.83, "end": 1492.97, "word": " know", "probability": 0.88427734375}, {"start": 1492.97, "end": 1493.31, "word": " that", "probability": 0.90087890625}, {"start": 1493.31, "end": 1493.69, "word": " the", "probability": 0.802734375}, {"start": 1493.69, "end": 1494.01, "word": " size", "probability": 0.83837890625}, {"start": 1494.01, "end": 1494.13, "word": " of", "probability": 0.96533203125}, {"start": 1494.13, "end": 1494.25, "word": " a", "probability": 0.60205078125}, {"start": 1494.25, "end": 1494.53, "word": " house", "probability": 0.87548828125}, {"start": 1494.53, "end": 1494.87, "word": " is", "probability": 0.9482421875}, {"start": 1494.87, "end": 1496.35, "word": " 1450,", "probability": 0.796630859375}, {"start": 1496.85, "end": 1498.13, "word": " 1450", "probability": 0.93115234375}, {"start": 1498.13, "end": 1498.65, "word": " square", "probability": 0.79052734375}, {"start": 1498.65, "end": 1499.11, "word": " feet.", "probability": 0.953125}, {"start": 1499.97, "end": 1500.33, "word": " What", "probability": 0.88037109375}, {"start": 1500.33, "end": 1500.73, "word": " do", "probability": 0.86572265625}, {"start": 1500.73, "end": 1500.91, "word": " you", "probability": 0.95751953125}, {"start": 1500.91, "end": 1501.49, "word": " predict", "probability": 0.93505859375}, {"start": 1501.49, "end": 1503.51, "word": " its", "probability": 0.50634765625}, {"start": 1503.51, "end": 1504.07, "word": " size,", "probability": 0.86181640625}, {"start": 1504.27, "end": 1504.77, "word": " its", "probability": 0.84228515625}, {"start": 1504.77, "end": 1507.17, "word": " sale", "probability": 0.155029296875}, {"start": 1507.17, "end": 1507.37, "word": " or", "probability": 0.486328125}, {"start": 1507.37, "end": 1507.69, "word": " price?", "probability": 0.94482421875}, {"start": 1508.63, "end": 1509.11, "word": " So", "probability": 0.869140625}, {"start": 1509.11, "end": 1509.31, "word": " by", "probability": 0.82421875}, {"start": 1509.31, "end": 1509.53, "word": " using", "probability": 0.92529296875}, {"start": 1509.53, "end": 1509.81, "word": " this", "probability": 0.9453125}, {"start": 1509.81, "end": 1510.19, "word": " value,", "probability": 0.966796875}, {"start": 1510.31, "end": 1510.57, "word": " we", "probability": 0.95458984375}, {"start": 1510.57, "end": 1510.83, "word": " can", "probability": 0.935546875}, {"start": 1510.83, "end": 1511.33, "word": " predict", "probability": 0.93359375}, {"start": 1511.33, "end": 1512.29, "word": " the", "probability": 0.9140625}, {"start": 1512.29, "end": 1512.79, "word": " selling", "probability": 0.88134765625}, {"start": 1512.79, "end": 1513.25, "word": " price.", "probability": 0.67236328125}, {"start": 1514.99, "end": 1515.75, "word": " Next,", "probability": 0.90869140625}, {"start": 1516.03, "end": 1516.51, "word": " explain", "probability": 0.8916015625}, {"start": 1516.51, "end": 1516.81, "word": " the", "probability": 0.91455078125}, {"start": 1516.81, "end": 1517.29, "word": " impact", "probability": 0.94677734375}, {"start": 1517.29, "end": 1517.99, "word": " of", "probability": 0.9521484375}, {"start": 1517.99, "end": 1518.47, "word": " changes", "probability": 0.837890625}, {"start": 1518.47, "end": 1518.71, "word": " in", "probability": 0.9326171875}, {"start": 1518.71, "end": 1519.13, "word": " independent", "probability": 0.56494140625}, {"start": 1519.13, "end": 1519.59, "word": " variable", "probability": 0.85498046875}, {"start": 1519.59, "end": 1519.89, "word": " on", "probability": 0.904296875}, {"start": 1519.89, "end": 1520.01, "word": " the", "probability": 0.66650390625}, {"start": 1520.01, "end": 1520.31, "word": " dependent", "probability": 0.8935546875}, {"start": 1520.31, "end": 1520.75, "word": " variable.", "probability": 0.89599609375}], "temperature": 1.0}, {"id": 58, "seek": 154955, "start": 1521.89, "end": 1549.55, "text": " You can say, for example, 90% of the variability in the dependent variable in selling price is explained by its size. So we can predict the value of dependent variable based on a value of one independent variable at least. Or also explain the impact of changes in independent variable on the dependent variable.", "tokens": [509, 393, 584, 11, 337, 1365, 11, 4289, 4, 295, 264, 35709, 294, 264, 12334, 7006, 294, 6511, 3218, 307, 8825, 538, 1080, 2744, 13, 407, 321, 393, 6069, 264, 2158, 295, 12334, 7006, 2361, 322, 257, 2158, 295, 472, 6695, 7006, 412, 1935, 13, 1610, 611, 2903, 264, 2712, 295, 2962, 294, 6695, 7006, 322, 264, 12334, 7006, 13], "avg_logprob": -0.17008196623598942, "compression_ratio": 1.8682634730538923, "no_speech_prob": 0.0, "words": [{"start": 1521.89, "end": 1522.11, "word": " You", "probability": 0.8193359375}, {"start": 1522.11, "end": 1522.35, "word": " can", "probability": 0.9453125}, {"start": 1522.35, "end": 1522.57, "word": " say,", "probability": 0.81298828125}, {"start": 1522.69, "end": 1522.75, "word": " for", "probability": 0.9482421875}, {"start": 1522.75, "end": 1523.27, "word": " example,", "probability": 0.9736328125}, {"start": 1523.51, "end": 1524.85, "word": " 90", "probability": 0.71630859375}, {"start": 1524.85, "end": 1526.55, "word": "%", "probability": 0.84912109375}, {"start": 1526.55, "end": 1528.45, "word": " of", "probability": 0.96337890625}, {"start": 1528.45, "end": 1528.77, "word": " the", "probability": 0.89111328125}, {"start": 1528.77, "end": 1529.41, "word": " variability", "probability": 0.98291015625}, {"start": 1529.41, "end": 1529.73, "word": " in", "probability": 0.86181640625}, {"start": 1529.73, "end": 1529.85, "word": " the", "probability": 0.908203125}, {"start": 1529.85, "end": 1530.15, "word": " dependent", "probability": 0.755859375}, {"start": 1530.15, "end": 1530.65, "word": " variable", "probability": 0.92919921875}, {"start": 1530.65, "end": 1530.95, "word": " in", "probability": 0.6513671875}, {"start": 1530.95, "end": 1531.15, "word": " selling", "probability": 0.787109375}, {"start": 1531.15, "end": 1531.65, "word": " price", "probability": 0.92041015625}, {"start": 1531.65, "end": 1532.51, "word": " is", "probability": 0.859375}, {"start": 1532.51, "end": 1532.97, "word": " explained", "probability": 0.84326171875}, {"start": 1532.97, "end": 1533.53, "word": " by", "probability": 0.9677734375}, {"start": 1533.53, "end": 1535.45, "word": " its", "probability": 0.814453125}, {"start": 1535.45, "end": 1535.91, "word": " size.", "probability": 0.8505859375}, {"start": 1536.31, "end": 1536.63, "word": " So", "probability": 0.95166015625}, {"start": 1536.63, "end": 1536.79, "word": " we", "probability": 0.509765625}, {"start": 1536.79, "end": 1537.01, "word": " can", "probability": 0.9423828125}, {"start": 1537.01, "end": 1537.51, "word": " predict", "probability": 0.9169921875}, {"start": 1537.51, "end": 1538.05, "word": " the", "probability": 0.90869140625}, {"start": 1538.05, "end": 1538.31, "word": " value", "probability": 0.97119140625}, {"start": 1538.31, "end": 1538.41, "word": " of", "probability": 0.96044921875}, {"start": 1538.41, "end": 1538.69, "word": " dependent", "probability": 0.4931640625}, {"start": 1538.69, "end": 1539.05, "word": " variable", "probability": 0.90625}, {"start": 1539.05, "end": 1539.41, "word": " based", "probability": 0.8984375}, {"start": 1539.41, "end": 1539.81, "word": " on", "probability": 0.9462890625}, {"start": 1539.81, "end": 1540.81, "word": " a", "probability": 0.9453125}, {"start": 1540.81, "end": 1541.13, "word": " value", "probability": 0.974609375}, {"start": 1541.13, "end": 1541.39, "word": " of", "probability": 0.9638671875}, {"start": 1541.39, "end": 1541.59, "word": " one", "probability": 0.90283203125}, {"start": 1541.59, "end": 1542.01, "word": " independent", "probability": 0.8720703125}, {"start": 1542.01, "end": 1542.37, "word": " variable", "probability": 0.91259765625}, {"start": 1542.37, "end": 1542.59, "word": " at", "probability": 0.828125}, {"start": 1542.59, "end": 1542.89, "word": " least.", "probability": 0.9521484375}, {"start": 1543.87, "end": 1544.49, "word": " Or", "probability": 0.96826171875}, {"start": 1544.49, "end": 1545.19, "word": " also", "probability": 0.75341796875}, {"start": 1545.19, "end": 1545.75, "word": " explain", "probability": 0.8671875}, {"start": 1545.75, "end": 1546.01, "word": " the", "probability": 0.90380859375}, {"start": 1546.01, "end": 1546.37, "word": " impact", "probability": 0.9521484375}, {"start": 1546.37, "end": 1546.57, "word": " of", "probability": 0.76171875}, {"start": 1546.57, "end": 1546.89, "word": " changes", "probability": 0.8466796875}, {"start": 1546.89, "end": 1547.09, "word": " in", "probability": 0.89990234375}, {"start": 1547.09, "end": 1547.45, "word": " independent", "probability": 0.47119140625}, {"start": 1547.45, "end": 1547.87, "word": " variable", "probability": 0.87109375}, {"start": 1547.87, "end": 1548.51, "word": " on", "probability": 0.93798828125}, {"start": 1548.51, "end": 1548.71, "word": " the", "probability": 0.90185546875}, {"start": 1548.71, "end": 1549.13, "word": " dependent", "probability": 0.86181640625}, {"start": 1549.13, "end": 1549.55, "word": " variable.", "probability": 0.8818359375}], "temperature": 1.0}, {"id": 59, "seek": 157628, "start": 1551.42, "end": 1576.28, "text": " Sometimes there exists more than one independent variable. For example, maybe there are more than one variable that affects a price, a selling price. For example, beside selling price, beside size, maybe location.", "tokens": [4803, 456, 8198, 544, 813, 472, 6695, 7006, 13, 1171, 1365, 11, 1310, 456, 366, 544, 813, 472, 7006, 300, 11807, 257, 3218, 11, 257, 6511, 3218, 13, 1171, 1365, 11, 15726, 6511, 3218, 11, 15726, 2744, 11, 1310, 4914, 13], "avg_logprob": -0.26209076387541635, "compression_ratio": 1.68503937007874, "no_speech_prob": 0.0, "words": [{"start": 1551.42, "end": 1552.22, "word": " Sometimes", "probability": 0.37451171875}, {"start": 1552.22, "end": 1552.44, "word": " there", "probability": 0.76708984375}, {"start": 1552.44, "end": 1552.78, "word": " exists", "probability": 0.57568359375}, {"start": 1552.78, "end": 1553.1, "word": " more", "probability": 0.93896484375}, {"start": 1553.1, "end": 1553.3, "word": " than", "probability": 0.9345703125}, {"start": 1553.3, "end": 1553.5, "word": " one", "probability": 0.91455078125}, {"start": 1553.5, "end": 1553.92, "word": " independent", "probability": 0.8173828125}, {"start": 1553.92, "end": 1554.32, "word": " variable.", "probability": 0.9228515625}, {"start": 1554.58, "end": 1554.72, "word": " For", "probability": 0.900390625}, {"start": 1554.72, "end": 1555.14, "word": " example,", "probability": 0.96826171875}, {"start": 1557.2, "end": 1557.44, "word": " maybe", "probability": 0.8232421875}, {"start": 1557.44, "end": 1557.74, "word": " there", "probability": 0.90380859375}, {"start": 1557.74, "end": 1558.14, "word": " are", "probability": 0.82080078125}, {"start": 1558.14, "end": 1559.34, "word": " more", "probability": 0.9248046875}, {"start": 1559.34, "end": 1559.68, "word": " than", "probability": 0.947265625}, {"start": 1559.68, "end": 1560.3, "word": " one", "probability": 0.9375}, {"start": 1560.3, "end": 1560.72, "word": " variable", "probability": 0.93408203125}, {"start": 1560.72, "end": 1561.06, "word": " that", "probability": 0.91650390625}, {"start": 1561.06, "end": 1561.78, "word": " affects", "probability": 0.407470703125}, {"start": 1561.78, "end": 1563.66, "word": " a", "probability": 0.30810546875}, {"start": 1563.66, "end": 1563.98, "word": " price,", "probability": 0.69384765625}, {"start": 1564.14, "end": 1564.3, "word": " a", "probability": 0.60986328125}, {"start": 1564.3, "end": 1564.5, "word": " selling", "probability": 0.84033203125}, {"start": 1564.5, "end": 1565.04, "word": " price.", "probability": 0.94580078125}, {"start": 1565.38, "end": 1565.54, "word": " For", "probability": 0.943359375}, {"start": 1565.54, "end": 1565.96, "word": " example,", "probability": 0.9736328125}, {"start": 1566.68, "end": 1567.26, "word": " beside", "probability": 0.578125}, {"start": 1567.26, "end": 1570.3, "word": " selling", "probability": 0.58251953125}, {"start": 1570.3, "end": 1570.78, "word": " price,", "probability": 0.91796875}, {"start": 1572.22, "end": 1573.52, "word": " beside", "probability": 0.7265625}, {"start": 1573.52, "end": 1574.34, "word": " size,", "probability": 0.802734375}, {"start": 1575.38, "end": 1575.76, "word": " maybe", "probability": 0.95068359375}, {"start": 1575.76, "end": 1576.28, "word": " location.", "probability": 0.94921875}], "temperature": 1.0}, {"id": 60, "seek": 159868, "start": 1579.48, "end": 1598.68, "text": " Maybe location is also another factor that affects the selling price. So in this case there are two variables. If there exists more than one variable, in this case we have something called multiple linear regression.", "tokens": [2704, 4914, 307, 611, 1071, 5952, 300, 11807, 264, 6511, 3218, 13, 407, 294, 341, 1389, 456, 366, 732, 9102, 13, 759, 456, 8198, 544, 813, 472, 7006, 11, 294, 341, 1389, 321, 362, 746, 1219, 3866, 8213, 24590, 13], "avg_logprob": -0.19721798780487804, "compression_ratio": 1.4466666666666668, "no_speech_prob": 0.0, "words": [{"start": 1579.48, "end": 1579.76, "word": " Maybe", "probability": 0.229736328125}, {"start": 1579.76, "end": 1580.18, "word": " location", "probability": 0.9345703125}, {"start": 1580.18, "end": 1580.46, "word": " is", "probability": 0.93701171875}, {"start": 1580.46, "end": 1580.78, "word": " also", "probability": 0.66650390625}, {"start": 1580.78, "end": 1581.1, "word": " another", "probability": 0.93115234375}, {"start": 1581.1, "end": 1581.66, "word": " factor", "probability": 0.87109375}, {"start": 1581.66, "end": 1582.86, "word": " that", "probability": 0.8896484375}, {"start": 1582.86, "end": 1583.58, "word": " affects", "probability": 0.72802734375}, {"start": 1583.58, "end": 1584.7, "word": " the", "probability": 0.5234375}, {"start": 1584.7, "end": 1584.88, "word": " selling", "probability": 0.89990234375}, {"start": 1584.88, "end": 1585.26, "word": " price.", "probability": 0.84033203125}, {"start": 1585.42, "end": 1585.48, "word": " So", "probability": 0.78125}, {"start": 1585.48, "end": 1585.58, "word": " in", "probability": 0.65234375}, {"start": 1585.58, "end": 1585.72, "word": " this", "probability": 0.9462890625}, {"start": 1585.72, "end": 1585.92, "word": " case", "probability": 0.91552734375}, {"start": 1585.92, "end": 1586.1, "word": " there", "probability": 0.5673828125}, {"start": 1586.1, "end": 1586.36, "word": " are", "probability": 0.94140625}, {"start": 1586.36, "end": 1587.36, "word": " two", "probability": 0.86474609375}, {"start": 1587.36, "end": 1587.82, "word": " variables.", "probability": 0.97509765625}, {"start": 1589.8, "end": 1590.44, "word": " If", "probability": 0.943359375}, {"start": 1590.44, "end": 1590.68, "word": " there", "probability": 0.90380859375}, {"start": 1590.68, "end": 1591.02, "word": " exists", "probability": 0.787109375}, {"start": 1591.02, "end": 1591.4, "word": " more", "probability": 0.943359375}, {"start": 1591.4, "end": 1591.64, "word": " than", "probability": 0.93505859375}, {"start": 1591.64, "end": 1591.86, "word": " one", "probability": 0.92822265625}, {"start": 1591.86, "end": 1592.24, "word": " variable,", "probability": 0.94775390625}, {"start": 1592.64, "end": 1592.8, "word": " in", "probability": 0.873046875}, {"start": 1592.8, "end": 1593.02, "word": " this", "probability": 0.9462890625}, {"start": 1593.02, "end": 1593.28, "word": " case", "probability": 0.9111328125}, {"start": 1593.28, "end": 1593.48, "word": " we", "probability": 0.8603515625}, {"start": 1593.48, "end": 1593.8, "word": " have", "probability": 0.94482421875}, {"start": 1593.8, "end": 1594.24, "word": " something", "probability": 0.87939453125}, {"start": 1594.24, "end": 1594.76, "word": " called", "probability": 0.89208984375}, {"start": 1594.76, "end": 1596.08, "word": " multiple", "probability": 0.7255859375}, {"start": 1596.08, "end": 1598.06, "word": " linear", "probability": 0.92626953125}, {"start": 1598.06, "end": 1598.68, "word": " regression.", "probability": 0.98583984375}], "temperature": 1.0}, {"id": 61, "seek": 162677, "start": 1602.03, "end": 1626.77, "text": " Here, we just talk about one independent variable. There is only, in this chapter, there is only one x. So it's called simple linear regression. The calculations for multiple takes time.", "tokens": [1692, 11, 321, 445, 751, 466, 472, 6695, 7006, 13, 821, 307, 787, 11, 294, 341, 7187, 11, 456, 307, 787, 472, 2031, 13, 407, 309, 311, 1219, 2199, 8213, 24590, 13, 440, 20448, 337, 3866, 2516, 565, 13], "avg_logprob": -0.20605469383299352, "compression_ratio": 1.375, "no_speech_prob": 0.0, "words": [{"start": 1602.03, "end": 1602.43, "word": " Here,", "probability": 0.67529296875}, {"start": 1602.95, "end": 1603.63, "word": " we", "probability": 0.935546875}, {"start": 1603.63, "end": 1603.95, "word": " just", "probability": 0.82275390625}, {"start": 1603.95, "end": 1604.25, "word": " talk", "probability": 0.6884765625}, {"start": 1604.25, "end": 1604.75, "word": " about", "probability": 0.90966796875}, {"start": 1604.75, "end": 1605.81, "word": " one", "probability": 0.904296875}, {"start": 1605.81, "end": 1606.25, "word": " independent", "probability": 0.84033203125}, {"start": 1606.25, "end": 1606.71, "word": " variable.", "probability": 0.9248046875}, {"start": 1607.03, "end": 1607.29, "word": " There", "probability": 0.494384765625}, {"start": 1607.29, "end": 1607.45, "word": " is", "probability": 0.89013671875}, {"start": 1607.45, "end": 1607.87, "word": " only,", "probability": 0.931640625}, {"start": 1608.05, "end": 1608.41, "word": " in", "probability": 0.93505859375}, {"start": 1608.41, "end": 1608.73, "word": " this", "probability": 0.943359375}, {"start": 1608.73, "end": 1609.01, "word": " chapter,", "probability": 0.75830078125}, {"start": 1609.81, "end": 1610.45, "word": " there", "probability": 0.85693359375}, {"start": 1610.45, "end": 1610.63, "word": " is", "probability": 0.9248046875}, {"start": 1610.63, "end": 1611.05, "word": " only", "probability": 0.923828125}, {"start": 1611.05, "end": 1611.61, "word": " one", "probability": 0.8837890625}, {"start": 1611.61, "end": 1612.01, "word": " x.", "probability": 0.64013671875}, {"start": 1612.85, "end": 1613.25, "word": " So", "probability": 0.9453125}, {"start": 1613.25, "end": 1613.45, "word": " it's", "probability": 0.8662109375}, {"start": 1613.45, "end": 1613.79, "word": " called", "probability": 0.8935546875}, {"start": 1613.79, "end": 1614.37, "word": " simple", "probability": 0.869140625}, {"start": 1614.37, "end": 1618.33, "word": " linear", "probability": 0.900390625}, {"start": 1618.33, "end": 1619.33, "word": " regression.", "probability": 0.935546875}, {"start": 1622.19, "end": 1622.47, "word": " The", "probability": 0.82568359375}, {"start": 1622.47, "end": 1623.15, "word": " calculations", "probability": 0.91552734375}, {"start": 1623.15, "end": 1623.61, "word": " for", "probability": 0.953125}, {"start": 1623.61, "end": 1624.27, "word": " multiple", "probability": 0.912109375}, {"start": 1624.27, "end": 1626.41, "word": " takes", "probability": 0.57958984375}, {"start": 1626.41, "end": 1626.77, "word": " time.", "probability": 0.89453125}], "temperature": 1.0}, {"id": 62, "seek": 165523, "start": 1627.05, "end": 1655.23, "text": " So we are going just to cover one independent variable. But if there exists more than one, in this case you have to use some statistical software as SPSS. Because in that case you can just select a regression analysis from SPSS, then you can run the multiple regression without doing any computations. But here we just covered one independent variable.", "tokens": [407, 321, 366, 516, 445, 281, 2060, 472, 6695, 7006, 13, 583, 498, 456, 8198, 544, 813, 472, 11, 294, 341, 1389, 291, 362, 281, 764, 512, 22820, 4722, 382, 318, 6273, 50, 13, 1436, 294, 300, 1389, 291, 393, 445, 3048, 257, 24590, 5215, 490, 318, 6273, 50, 11, 550, 291, 393, 1190, 264, 3866, 24590, 1553, 884, 604, 2807, 763, 13, 583, 510, 321, 445, 5343, 472, 6695, 7006, 13], "avg_logprob": -0.14768835126537166, "compression_ratio": 1.7135922330097086, "no_speech_prob": 0.0, "words": [{"start": 1627.05, "end": 1627.55, "word": " So", "probability": 0.810546875}, {"start": 1627.55, "end": 1627.93, "word": " we", "probability": 0.60107421875}, {"start": 1627.93, "end": 1628.07, "word": " are", "probability": 0.85693359375}, {"start": 1628.07, "end": 1628.29, "word": " going", "probability": 0.880859375}, {"start": 1628.29, "end": 1628.61, "word": " just", "probability": 0.6455078125}, {"start": 1628.61, "end": 1628.87, "word": " to", "probability": 0.94677734375}, {"start": 1628.87, "end": 1629.25, "word": " cover", "probability": 0.9677734375}, {"start": 1629.25, "end": 1630.51, "word": " one", "probability": 0.86962890625}, {"start": 1630.51, "end": 1630.95, "word": " independent", "probability": 0.82861328125}, {"start": 1630.95, "end": 1631.43, "word": " variable.", "probability": 0.9189453125}, {"start": 1631.93, "end": 1632.25, "word": " But", "probability": 0.92724609375}, {"start": 1632.25, "end": 1632.45, "word": " if", "probability": 0.912109375}, {"start": 1632.45, "end": 1632.61, "word": " there", "probability": 0.896484375}, {"start": 1632.61, "end": 1632.91, "word": " exists", "probability": 0.806640625}, {"start": 1632.91, "end": 1633.19, "word": " more", "probability": 0.9404296875}, {"start": 1633.19, "end": 1633.39, "word": " than", "probability": 0.94677734375}, {"start": 1633.39, "end": 1633.65, "word": " one,", "probability": 0.92724609375}, {"start": 1633.71, "end": 1633.89, "word": " in", "probability": 0.8515625}, {"start": 1633.89, "end": 1634.11, "word": " this", "probability": 0.9345703125}, {"start": 1634.11, "end": 1634.29, "word": " case", "probability": 0.912109375}, {"start": 1634.29, "end": 1634.41, "word": " you", "probability": 0.6328125}, {"start": 1634.41, "end": 1634.55, "word": " have", "probability": 0.92529296875}, {"start": 1634.55, "end": 1634.67, "word": " to", "probability": 0.96728515625}, {"start": 1634.67, "end": 1634.89, "word": " use", "probability": 0.8701171875}, {"start": 1634.89, "end": 1635.19, "word": " some", "probability": 0.90380859375}, {"start": 1635.19, "end": 1635.73, "word": " statistical", "probability": 0.88916015625}, {"start": 1635.73, "end": 1636.37, "word": " software", "probability": 0.91552734375}, {"start": 1636.37, "end": 1637.49, "word": " as", "probability": 0.78271484375}, {"start": 1637.49, "end": 1638.25, "word": " SPSS.", "probability": 0.9383138020833334}, {"start": 1638.47, "end": 1638.65, "word": " Because", "probability": 0.8681640625}, {"start": 1638.65, "end": 1638.77, "word": " in", "probability": 0.8798828125}, {"start": 1638.77, "end": 1638.95, "word": " that", "probability": 0.90576171875}, {"start": 1638.95, "end": 1639.21, "word": " case", "probability": 0.9091796875}, {"start": 1639.21, "end": 1639.39, "word": " you", "probability": 0.71484375}, {"start": 1639.39, "end": 1639.65, "word": " can", "probability": 0.94873046875}, {"start": 1639.65, "end": 1640.13, "word": " just", "probability": 0.91552734375}, {"start": 1640.13, "end": 1642.67, "word": " select", "probability": 0.80126953125}, {"start": 1642.67, "end": 1643.39, "word": " a", "probability": 0.468017578125}, {"start": 1643.39, "end": 1643.71, "word": " regression", "probability": 0.95849609375}, {"start": 1643.71, "end": 1644.17, "word": " analysis", "probability": 0.87548828125}, {"start": 1644.17, "end": 1644.49, "word": " from", "probability": 0.88427734375}, {"start": 1644.49, "end": 1645.07, "word": " SPSS,", "probability": 0.9480794270833334}, {"start": 1645.17, "end": 1645.29, "word": " then", "probability": 0.830078125}, {"start": 1645.29, "end": 1645.45, "word": " you", "probability": 0.9609375}, {"start": 1645.45, "end": 1645.61, "word": " can", "probability": 0.9453125}, {"start": 1645.61, "end": 1645.97, "word": " run", "probability": 0.9296875}, {"start": 1645.97, "end": 1646.73, "word": " the", "probability": 0.88671875}, {"start": 1646.73, "end": 1647.09, "word": " multiple", "probability": 0.9072265625}, {"start": 1647.09, "end": 1647.49, "word": " regression", "probability": 0.94091796875}, {"start": 1647.49, "end": 1647.89, "word": " without", "probability": 0.89599609375}, {"start": 1647.89, "end": 1648.35, "word": " doing", "probability": 0.95849609375}, {"start": 1648.35, "end": 1648.59, "word": " any", "probability": 0.892578125}, {"start": 1648.59, "end": 1649.25, "word": " computations.", "probability": 0.95947265625}, {"start": 1650.01, "end": 1650.29, "word": " But", "probability": 0.94677734375}, {"start": 1650.29, "end": 1650.61, "word": " here", "probability": 0.8505859375}, {"start": 1650.61, "end": 1650.83, "word": " we", "probability": 0.86962890625}, {"start": 1650.83, "end": 1651.41, "word": " just", "probability": 0.90576171875}, {"start": 1651.41, "end": 1653.05, "word": " covered", "probability": 0.81201171875}, {"start": 1653.05, "end": 1654.19, "word": " one", "probability": 0.92724609375}, {"start": 1654.19, "end": 1654.75, "word": " independent", "probability": 0.9111328125}, {"start": 1654.75, "end": 1655.23, "word": " variable.", "probability": 0.890625}], "temperature": 1.0}, {"id": 63, "seek": 168180, "start": 1655.74, "end": 1681.8, "text": " In this case, it's called simple linear regression. Again, the dependent variable is the variable we wish to predict or explain, the same as weight. Independent variable, the variable used to predict or explain the dependent variable. For simple linear regression model, there is only one independent variable.", "tokens": [682, 341, 1389, 11, 309, 311, 1219, 2199, 8213, 24590, 13, 3764, 11, 264, 12334, 7006, 307, 264, 7006, 321, 3172, 281, 6069, 420, 2903, 11, 264, 912, 382, 3364, 13, 40310, 7006, 11, 264, 7006, 1143, 281, 6069, 420, 2903, 264, 12334, 7006, 13, 1171, 2199, 8213, 24590, 2316, 11, 456, 307, 787, 472, 6695, 7006, 13], "avg_logprob": -0.16697563155222747, "compression_ratio": 2.0194805194805197, "no_speech_prob": 0.0, "words": [{"start": 1655.74, "end": 1655.96, "word": " In", "probability": 0.55615234375}, {"start": 1655.96, "end": 1656.16, "word": " this", "probability": 0.94384765625}, {"start": 1656.16, "end": 1656.46, "word": " case,", "probability": 0.8994140625}, {"start": 1656.62, "end": 1656.62, "word": " it's", "probability": 0.702392578125}, {"start": 1656.62, "end": 1656.82, "word": " called", "probability": 0.896484375}, {"start": 1656.82, "end": 1657.38, "word": " simple", "probability": 0.462646484375}, {"start": 1657.38, "end": 1658.86, "word": " linear", "probability": 0.912109375}, {"start": 1658.86, "end": 1659.58, "word": " regression.", "probability": 0.95556640625}, {"start": 1660.32, "end": 1661.02, "word": " Again,", "probability": 0.88232421875}, {"start": 1661.4, "end": 1661.58, "word": " the", "probability": 0.90966796875}, {"start": 1661.58, "end": 1661.98, "word": " dependent", "probability": 0.8232421875}, {"start": 1661.98, "end": 1662.46, "word": " variable", "probability": 0.92529296875}, {"start": 1662.46, "end": 1662.9, "word": " is", "probability": 0.9306640625}, {"start": 1662.9, "end": 1663.06, "word": " the", "probability": 0.90771484375}, {"start": 1663.06, "end": 1663.36, "word": " variable", "probability": 0.9306640625}, {"start": 1663.36, "end": 1663.58, "word": " we", "probability": 0.5771484375}, {"start": 1663.58, "end": 1663.82, "word": " wish", "probability": 0.86962890625}, {"start": 1663.82, "end": 1664.02, "word": " to", "probability": 0.97119140625}, {"start": 1664.02, "end": 1664.38, "word": " predict", "probability": 0.9013671875}, {"start": 1664.38, "end": 1664.6, "word": " or", "probability": 0.935546875}, {"start": 1664.6, "end": 1664.96, "word": " explain,", "probability": 0.9755859375}, {"start": 1665.66, "end": 1666.98, "word": " the", "probability": 0.8310546875}, {"start": 1666.98, "end": 1667.18, "word": " same", "probability": 0.89501953125}, {"start": 1667.18, "end": 1667.36, "word": " as", "probability": 0.9716796875}, {"start": 1667.36, "end": 1667.7, "word": " weight.", "probability": 0.9130859375}, {"start": 1668.98, "end": 1669.52, "word": " Independent", "probability": 0.740234375}, {"start": 1669.52, "end": 1670.02, "word": " variable,", "probability": 0.896484375}, {"start": 1670.18, "end": 1670.28, "word": " the", "probability": 0.89990234375}, {"start": 1670.28, "end": 1670.54, "word": " variable", "probability": 0.93603515625}, {"start": 1670.54, "end": 1670.82, "word": " used", "probability": 0.90283203125}, {"start": 1670.82, "end": 1671.02, "word": " to", "probability": 0.97216796875}, {"start": 1671.02, "end": 1671.44, "word": " predict", "probability": 0.90283203125}, {"start": 1671.44, "end": 1671.7, "word": " or", "probability": 0.95166015625}, {"start": 1671.7, "end": 1672.18, "word": " explain", "probability": 0.95068359375}, {"start": 1672.18, "end": 1672.44, "word": " the", "probability": 0.8623046875}, {"start": 1672.44, "end": 1672.86, "word": " dependent", "probability": 0.923828125}, {"start": 1672.86, "end": 1674.0, "word": " variable.", "probability": 0.89111328125}, {"start": 1677.4, "end": 1678.1, "word": " For", "probability": 0.9384765625}, {"start": 1678.1, "end": 1678.52, "word": " simple", "probability": 0.89306640625}, {"start": 1678.52, "end": 1678.86, "word": " linear", "probability": 0.9404296875}, {"start": 1678.86, "end": 1679.22, "word": " regression", "probability": 0.958984375}, {"start": 1679.22, "end": 1679.58, "word": " model,", "probability": 0.86279296875}, {"start": 1679.74, "end": 1679.94, "word": " there", "probability": 0.9091796875}, {"start": 1679.94, "end": 1680.14, "word": " is", "probability": 0.88330078125}, {"start": 1680.14, "end": 1680.54, "word": " only", "probability": 0.92626953125}, {"start": 1680.54, "end": 1680.88, "word": " one", "probability": 0.92626953125}, {"start": 1680.88, "end": 1681.3, "word": " independent", "probability": 0.908203125}, {"start": 1681.3, "end": 1681.8, "word": " variable.", "probability": 0.892578125}], "temperature": 1.0}, {"id": 64, "seek": 171103, "start": 1684.83, "end": 1711.03, "text": " Another example for simple linear regression. Suppose we are talking about your scores. Scores is the dependent variable can be affected by number of hours. Hour of study. Number of studying hours.", "tokens": [3996, 1365, 337, 2199, 8213, 24590, 13, 21360, 321, 366, 1417, 466, 428, 13444, 13, 2747, 2706, 307, 264, 12334, 7006, 393, 312, 8028, 538, 1230, 295, 2496, 13, 38369, 295, 2979, 13, 5118, 295, 7601, 2496, 13], "avg_logprob": -0.3339342841735253, "compression_ratio": 1.3943661971830985, "no_speech_prob": 0.0, "words": [{"start": 1684.83, "end": 1685.51, "word": " Another", "probability": 0.54150390625}, {"start": 1685.51, "end": 1685.97, "word": " example", "probability": 0.96142578125}, {"start": 1685.97, "end": 1686.47, "word": " for", "probability": 0.81787109375}, {"start": 1686.47, "end": 1687.85, "word": " simple", "probability": 0.54150390625}, {"start": 1687.85, "end": 1688.07, "word": " linear", "probability": 0.96728515625}, {"start": 1688.07, "end": 1688.45, "word": " regression.", "probability": 0.9423828125}, {"start": 1688.77, "end": 1689.07, "word": " Suppose", "probability": 0.7373046875}, {"start": 1689.07, "end": 1689.77, "word": " we", "probability": 0.82275390625}, {"start": 1689.77, "end": 1689.97, "word": " are", "probability": 0.62939453125}, {"start": 1689.97, "end": 1690.37, "word": " talking", "probability": 0.845703125}, {"start": 1690.37, "end": 1690.81, "word": " about", "probability": 0.91259765625}, {"start": 1690.81, "end": 1691.05, "word": " your", "probability": 0.6103515625}, {"start": 1691.05, "end": 1691.59, "word": " scores.", "probability": 0.73974609375}, {"start": 1694.21, "end": 1695.13, "word": " Scores", "probability": 0.806640625}, {"start": 1695.13, "end": 1695.59, "word": " is", "probability": 0.5029296875}, {"start": 1695.59, "end": 1695.89, "word": " the", "probability": 0.86376953125}, {"start": 1695.89, "end": 1696.27, "word": " dependent", "probability": 0.8125}, {"start": 1696.27, "end": 1696.73, "word": " variable", "probability": 0.92822265625}, {"start": 1696.73, "end": 1697.23, "word": " can", "probability": 0.48583984375}, {"start": 1697.23, "end": 1697.37, "word": " be", "probability": 0.9521484375}, {"start": 1697.37, "end": 1697.77, "word": " affected", "probability": 0.83056640625}, {"start": 1697.77, "end": 1698.27, "word": " by", "probability": 0.97265625}, {"start": 1698.27, "end": 1700.41, "word": " number", "probability": 0.6494140625}, {"start": 1700.41, "end": 1700.67, "word": " of", "probability": 0.9716796875}, {"start": 1700.67, "end": 1701.05, "word": " hours.", "probability": 0.91845703125}, {"start": 1705.13, "end": 1705.65, "word": " Hour", "probability": 0.28857421875}, {"start": 1705.65, "end": 1705.85, "word": " of", "probability": 0.48779296875}, {"start": 1705.85, "end": 1706.13, "word": " study.", "probability": 0.59375}, {"start": 1707.33, "end": 1707.91, "word": " Number", "probability": 0.470703125}, {"start": 1707.91, "end": 1709.25, "word": " of", "probability": 0.953125}, {"start": 1709.25, "end": 1710.69, "word": " studying", "probability": 0.86767578125}, {"start": 1710.69, "end": 1711.03, "word": " hours.", "probability": 0.92333984375}], "temperature": 1.0}, {"id": 65, "seek": 174013, "start": 1716.91, "end": 1740.13, "text": " Maybe as number of studying hour increases, your scores also increase. In this case, if there is only one X, one independent variable, it's called simple linear regression. Maybe another variable, number of missing classes or attendance.", "tokens": [2704, 382, 1230, 295, 7601, 1773, 8637, 11, 428, 13444, 611, 3488, 13, 682, 341, 1389, 11, 498, 456, 307, 787, 472, 1783, 11, 472, 6695, 7006, 11, 309, 311, 1219, 2199, 8213, 24590, 13, 2704, 1071, 7006, 11, 1230, 295, 5361, 5359, 420, 24337, 13], "avg_logprob": -0.20246010004205905, "compression_ratio": 1.5256410256410255, "no_speech_prob": 0.0, "words": [{"start": 1716.91, "end": 1717.31, "word": " Maybe", "probability": 0.6728515625}, {"start": 1717.31, "end": 1717.67, "word": " as", "probability": 0.8251953125}, {"start": 1717.67, "end": 1718.01, "word": " number", "probability": 0.7314453125}, {"start": 1718.01, "end": 1718.21, "word": " of", "probability": 0.94091796875}, {"start": 1718.21, "end": 1718.43, "word": " studying", "probability": 0.83154296875}, {"start": 1718.43, "end": 1718.63, "word": " hour", "probability": 0.6298828125}, {"start": 1718.63, "end": 1719.09, "word": " increases,", "probability": 0.91943359375}, {"start": 1719.63, "end": 1719.81, "word": " your", "probability": 0.8837890625}, {"start": 1719.81, "end": 1720.21, "word": " scores", "probability": 0.6904296875}, {"start": 1720.21, "end": 1720.59, "word": " also", "probability": 0.83154296875}, {"start": 1720.59, "end": 1721.11, "word": " increase.", "probability": 0.81494140625}, {"start": 1722.27, "end": 1722.53, "word": " In", "probability": 0.93603515625}, {"start": 1722.53, "end": 1722.75, "word": " this", "probability": 0.94580078125}, {"start": 1722.75, "end": 1722.99, "word": " case,", "probability": 0.91259765625}, {"start": 1723.05, "end": 1723.13, "word": " if", "probability": 0.931640625}, {"start": 1723.13, "end": 1723.25, "word": " there", "probability": 0.91357421875}, {"start": 1723.25, "end": 1723.39, "word": " is", "probability": 0.8896484375}, {"start": 1723.39, "end": 1723.69, "word": " only", "probability": 0.92236328125}, {"start": 1723.69, "end": 1723.97, "word": " one", "probability": 0.8974609375}, {"start": 1723.97, "end": 1724.31, "word": " X,", "probability": 0.60888671875}, {"start": 1724.71, "end": 1724.93, "word": " one", "probability": 0.91064453125}, {"start": 1724.93, "end": 1725.35, "word": " independent", "probability": 0.86328125}, {"start": 1725.35, "end": 1725.79, "word": " variable,", "probability": 0.89013671875}, {"start": 1726.01, "end": 1726.03, "word": " it's", "probability": 0.78759765625}, {"start": 1726.03, "end": 1726.33, "word": " called", "probability": 0.91259765625}, {"start": 1726.33, "end": 1726.93, "word": " simple", "probability": 0.77685546875}, {"start": 1726.93, "end": 1728.17, "word": " linear", "probability": 0.859375}, {"start": 1728.17, "end": 1728.65, "word": " regression.", "probability": 0.8486328125}, {"start": 1729.45, "end": 1730.05, "word": " Maybe", "probability": 0.93896484375}, {"start": 1730.05, "end": 1730.53, "word": " another", "probability": 0.9296875}, {"start": 1730.53, "end": 1731.11, "word": " variable,", "probability": 0.9150390625}, {"start": 1732.27, "end": 1732.73, "word": " number", "probability": 0.92724609375}, {"start": 1732.73, "end": 1733.39, "word": " of", "probability": 0.9716796875}, {"start": 1733.39, "end": 1735.39, "word": " missing", "probability": 0.78076171875}, {"start": 1735.39, "end": 1736.05, "word": " classes", "probability": 0.89208984375}, {"start": 1736.05, "end": 1739.73, "word": " or", "probability": 0.412109375}, {"start": 1739.73, "end": 1740.13, "word": " attendance.", "probability": 0.77294921875}], "temperature": 1.0}, {"id": 66, "seek": 177088, "start": 1742.04, "end": 1770.88, "text": " As number of missing classes increases, your score goes down. That means there exists negative relationship between missing classes and your score. So sometimes, maybe there exists positive or negative. It depends on the variable itself. In this case, if there are more than one variable, then we are talking about multiple linear regression model. But here, we have only one independent variable.", "tokens": [1018, 1230, 295, 5361, 5359, 8637, 11, 428, 6175, 1709, 760, 13, 663, 1355, 456, 8198, 3671, 2480, 1296, 5361, 5359, 293, 428, 6175, 13, 407, 2171, 11, 1310, 456, 8198, 3353, 420, 3671, 13, 467, 5946, 322, 264, 7006, 2564, 13, 682, 341, 1389, 11, 498, 456, 366, 544, 813, 472, 7006, 11, 550, 321, 366, 1417, 466, 3866, 8213, 24590, 2316, 13, 583, 510, 11, 321, 362, 787, 472, 6695, 7006, 13], "avg_logprob": -0.15208334008852642, "compression_ratio": 1.7155172413793103, "no_speech_prob": 0.0, "words": [{"start": 1742.04, "end": 1742.38, "word": " As", "probability": 0.68798828125}, {"start": 1742.38, "end": 1742.64, "word": " number", "probability": 0.81298828125}, {"start": 1742.64, "end": 1742.78, "word": " of", "probability": 0.9580078125}, {"start": 1742.78, "end": 1742.94, "word": " missing", "probability": 0.76171875}, {"start": 1742.94, "end": 1743.16, "word": " classes", "probability": 0.916015625}, {"start": 1743.16, "end": 1743.66, "word": " increases,", "probability": 0.87060546875}, {"start": 1743.84, "end": 1743.96, "word": " your", "probability": 0.89404296875}, {"start": 1743.96, "end": 1744.28, "word": " score", "probability": 0.86181640625}, {"start": 1744.28, "end": 1744.58, "word": " goes", "probability": 0.92822265625}, {"start": 1744.58, "end": 1744.96, "word": " down.", "probability": 0.85498046875}, {"start": 1745.64, "end": 1745.98, "word": " That", "probability": 0.71337890625}, {"start": 1745.98, "end": 1746.24, "word": " means", "probability": 0.8984375}, {"start": 1746.24, "end": 1746.38, "word": " there", "probability": 0.8056640625}, {"start": 1746.38, "end": 1746.66, "word": " exists", "probability": 0.7333984375}, {"start": 1746.66, "end": 1747.08, "word": " negative", "probability": 0.828125}, {"start": 1747.08, "end": 1747.7, "word": " relationship", "probability": 0.91796875}, {"start": 1747.7, "end": 1748.2, "word": " between", "probability": 0.8564453125}, {"start": 1748.2, "end": 1749.4, "word": " missing", "probability": 0.85888671875}, {"start": 1749.4, "end": 1749.94, "word": " classes", "probability": 0.89990234375}, {"start": 1749.94, "end": 1750.36, "word": " and", "probability": 0.90869140625}, {"start": 1750.36, "end": 1750.54, "word": " your", "probability": 0.8955078125}, {"start": 1750.54, "end": 1750.96, "word": " score.", "probability": 0.80615234375}, {"start": 1751.78, "end": 1752.04, "word": " So", "probability": 0.93505859375}, {"start": 1752.04, "end": 1752.64, "word": " sometimes,", "probability": 0.82666015625}, {"start": 1752.96, "end": 1753.32, "word": " maybe", "probability": 0.92822265625}, {"start": 1753.32, "end": 1753.54, "word": " there", "probability": 0.73193359375}, {"start": 1753.54, "end": 1753.78, "word": " exists", "probability": 0.53759765625}, {"start": 1753.78, "end": 1754.18, "word": " positive", "probability": 0.9267578125}, {"start": 1754.18, "end": 1754.42, "word": " or", "probability": 0.95556640625}, {"start": 1754.42, "end": 1754.74, "word": " negative.", "probability": 0.9560546875}, {"start": 1754.8, "end": 1754.92, "word": " It", "probability": 0.95654296875}, {"start": 1754.92, "end": 1755.24, "word": " depends", "probability": 0.90380859375}, {"start": 1755.24, "end": 1755.68, "word": " on", "probability": 0.94775390625}, {"start": 1755.68, "end": 1756.58, "word": " the", "probability": 0.90283203125}, {"start": 1756.58, "end": 1756.92, "word": " variable", "probability": 0.9365234375}, {"start": 1756.92, "end": 1757.38, "word": " itself.", "probability": 0.77197265625}, {"start": 1758.26, "end": 1758.6, "word": " In", "probability": 0.958984375}, {"start": 1758.6, "end": 1758.86, "word": " this", "probability": 0.9443359375}, {"start": 1758.86, "end": 1759.18, "word": " case,", "probability": 0.908203125}, {"start": 1759.22, "end": 1759.38, "word": " if", "probability": 0.93896484375}, {"start": 1759.38, "end": 1759.56, "word": " there", "probability": 0.9052734375}, {"start": 1759.56, "end": 1759.76, "word": " are", "probability": 0.92138671875}, {"start": 1759.76, "end": 1760.04, "word": " more", "probability": 0.9423828125}, {"start": 1760.04, "end": 1760.26, "word": " than", "probability": 0.9482421875}, {"start": 1760.26, "end": 1760.46, "word": " one", "probability": 0.919921875}, {"start": 1760.46, "end": 1760.9, "word": " variable,", "probability": 0.90869140625}, {"start": 1761.26, "end": 1761.54, "word": " then", "probability": 0.83544921875}, {"start": 1761.54, "end": 1762.26, "word": " we", "probability": 0.78173828125}, {"start": 1762.26, "end": 1762.44, "word": " are", "probability": 0.9365234375}, {"start": 1762.44, "end": 1762.78, "word": " talking", "probability": 0.84375}, {"start": 1762.78, "end": 1763.18, "word": " about", "probability": 0.8994140625}, {"start": 1763.18, "end": 1763.82, "word": " multiple", "probability": 0.8759765625}, {"start": 1763.82, "end": 1765.1, "word": " linear", "probability": 0.8671875}, {"start": 1765.1, "end": 1765.82, "word": " regression", "probability": 0.93798828125}, {"start": 1765.82, "end": 1766.14, "word": " model.", "probability": 0.748046875}, {"start": 1766.98, "end": 1767.26, "word": " But", "probability": 0.951171875}, {"start": 1767.26, "end": 1767.56, "word": " here,", "probability": 0.85302734375}, {"start": 1768.1, "end": 1768.3, "word": " we", "probability": 0.9580078125}, {"start": 1768.3, "end": 1768.56, "word": " have", "probability": 0.94287109375}, {"start": 1768.56, "end": 1769.1, "word": " only", "probability": 0.92431640625}, {"start": 1769.1, "end": 1769.7, "word": " one", "probability": 0.92724609375}, {"start": 1769.7, "end": 1770.32, "word": " independent", "probability": 0.91552734375}, {"start": 1770.32, "end": 1770.88, "word": " variable.", "probability": 0.88671875}], "temperature": 1.0}, {"id": 67, "seek": 180117, "start": 1772.87, "end": 1801.17, "text": " In addition to that, a relationship between x and y is described by a linear function. So there exists a straight line between the two variables. The changes in y are assumed to be related to changes in x only. So any change in y is related only to changes in x. So that's the simple case we have for regression, that we have only one independent", "tokens": [682, 4500, 281, 300, 11, 257, 2480, 1296, 2031, 293, 288, 307, 7619, 538, 257, 8213, 2445, 13, 407, 456, 8198, 257, 2997, 1622, 1296, 264, 732, 9102, 13, 440, 2962, 294, 288, 366, 15895, 281, 312, 4077, 281, 2962, 294, 2031, 787, 13, 407, 604, 1319, 294, 288, 307, 4077, 787, 281, 2962, 294, 2031, 13, 407, 300, 311, 264, 2199, 1389, 321, 362, 337, 24590, 11, 300, 321, 362, 787, 472, 6695], "avg_logprob": -0.1278125023841858, "compression_ratio": 1.7263681592039801, "no_speech_prob": 0.0, "words": [{"start": 1772.87, "end": 1773.11, "word": " In", "probability": 0.8212890625}, {"start": 1773.11, "end": 1773.43, "word": " addition", "probability": 0.9404296875}, {"start": 1773.43, "end": 1773.63, "word": " to", "probability": 0.9658203125}, {"start": 1773.63, "end": 1773.95, "word": " that,", "probability": 0.9296875}, {"start": 1774.91, "end": 1775.19, "word": " a", "probability": 0.5234375}, {"start": 1775.19, "end": 1775.61, "word": " relationship", "probability": 0.91455078125}, {"start": 1775.61, "end": 1776.05, "word": " between", "probability": 0.90185546875}, {"start": 1776.05, "end": 1776.21, "word": " x", "probability": 0.62255859375}, {"start": 1776.21, "end": 1776.37, "word": " and", "probability": 0.9443359375}, {"start": 1776.37, "end": 1776.57, "word": " y", "probability": 0.99169921875}, {"start": 1776.57, "end": 1776.81, "word": " is", "probability": 0.9287109375}, {"start": 1776.81, "end": 1777.23, "word": " described", "probability": 0.8173828125}, {"start": 1777.23, "end": 1777.51, "word": " by", "probability": 0.96337890625}, {"start": 1777.51, "end": 1777.73, "word": " a", "probability": 0.98583984375}, {"start": 1777.73, "end": 1777.95, "word": " linear", "probability": 0.90869140625}, {"start": 1777.95, "end": 1778.41, "word": " function.", "probability": 0.93017578125}, {"start": 1779.49, "end": 1779.81, "word": " So", "probability": 0.931640625}, {"start": 1779.81, "end": 1779.99, "word": " there", "probability": 0.74169921875}, {"start": 1779.99, "end": 1780.33, "word": " exists", "probability": 0.8017578125}, {"start": 1780.33, "end": 1780.67, "word": " a", "probability": 0.74853515625}, {"start": 1780.67, "end": 1780.85, "word": " straight", "probability": 0.89794921875}, {"start": 1780.85, "end": 1781.25, "word": " line", "probability": 0.927734375}, {"start": 1781.25, "end": 1781.71, "word": " between", "probability": 0.8642578125}, {"start": 1781.71, "end": 1781.93, "word": " the", "probability": 0.90673828125}, {"start": 1781.93, "end": 1782.07, "word": " two", "probability": 0.921875}, {"start": 1782.07, "end": 1782.63, "word": " variables.", "probability": 0.94921875}, {"start": 1784.69, "end": 1785.33, "word": " The", "probability": 0.56005859375}, {"start": 1785.33, "end": 1785.71, "word": " changes", "probability": 0.89306640625}, {"start": 1785.71, "end": 1785.99, "word": " in", "probability": 0.94287109375}, {"start": 1785.99, "end": 1786.27, "word": " y", "probability": 0.95556640625}, {"start": 1786.27, "end": 1786.49, "word": " are", "probability": 0.93505859375}, {"start": 1786.49, "end": 1786.85, "word": " assumed", "probability": 0.89013671875}, {"start": 1786.85, "end": 1787.05, "word": " to", "probability": 0.9677734375}, {"start": 1787.05, "end": 1787.15, "word": " be", "probability": 0.943359375}, {"start": 1787.15, "end": 1787.57, "word": " related", "probability": 0.951171875}, {"start": 1787.57, "end": 1787.79, "word": " to", "probability": 0.9580078125}, {"start": 1787.79, "end": 1788.23, "word": " changes", "probability": 0.552734375}, {"start": 1788.23, "end": 1788.49, "word": " in", "probability": 0.93603515625}, {"start": 1788.49, "end": 1788.73, "word": " x", "probability": 0.99267578125}, {"start": 1788.73, "end": 1789.17, "word": " only.", "probability": 0.9228515625}, {"start": 1790.01, "end": 1790.21, "word": " So", "probability": 0.9482421875}, {"start": 1790.21, "end": 1790.37, "word": " any", "probability": 0.833984375}, {"start": 1790.37, "end": 1790.67, "word": " change", "probability": 0.88037109375}, {"start": 1790.67, "end": 1790.87, "word": " in", "probability": 0.9423828125}, {"start": 1790.87, "end": 1791.17, "word": " y", "probability": 0.9892578125}, {"start": 1791.17, "end": 1791.69, "word": " is", "probability": 0.9287109375}, {"start": 1791.69, "end": 1792.07, "word": " related", "probability": 0.943359375}, {"start": 1792.07, "end": 1792.51, "word": " only", "probability": 0.91259765625}, {"start": 1792.51, "end": 1792.73, "word": " to", "probability": 0.96044921875}, {"start": 1792.73, "end": 1793.19, "word": " changes", "probability": 0.72216796875}, {"start": 1793.19, "end": 1793.59, "word": " in", "probability": 0.93896484375}, {"start": 1793.59, "end": 1794.27, "word": " x.", "probability": 0.99267578125}, {"start": 1794.73, "end": 1794.97, "word": " So", "probability": 0.94921875}, {"start": 1794.97, "end": 1795.27, "word": " that's", "probability": 0.9365234375}, {"start": 1795.27, "end": 1795.45, "word": " the", "probability": 0.88525390625}, {"start": 1795.45, "end": 1795.87, "word": " simple", "probability": 0.935546875}, {"start": 1795.87, "end": 1796.51, "word": " case", "probability": 0.91064453125}, {"start": 1796.51, "end": 1796.73, "word": " we", "probability": 0.9365234375}, {"start": 1796.73, "end": 1797.09, "word": " have", "probability": 0.94482421875}, {"start": 1797.09, "end": 1797.47, "word": " for", "probability": 0.93115234375}, {"start": 1797.47, "end": 1797.81, "word": " regression,", "probability": 0.953125}, {"start": 1798.89, "end": 1799.25, "word": " that", "probability": 0.9375}, {"start": 1799.25, "end": 1799.45, "word": " we", "probability": 0.9599609375}, {"start": 1799.45, "end": 1799.67, "word": " have", "probability": 0.9345703125}, {"start": 1799.67, "end": 1799.99, "word": " only", "probability": 0.916015625}, {"start": 1799.99, "end": 1800.37, "word": " one", "probability": 0.9296875}, {"start": 1800.37, "end": 1801.17, "word": " independent", "probability": 0.9091796875}], "temperature": 1.0}, {"id": 68, "seek": 182929, "start": 1803.89, "end": 1829.29, "text": " Variable. Types of relationships, as we mentioned, maybe there exist linear, it means there exist straight line between X and Y, either linear positive or negative, or sometimes there exist non-linear relationship, it's called curved linear relationship. The same as this one, it's parabola.", "tokens": [32511, 712, 13, 5569, 5190, 295, 6159, 11, 382, 321, 2835, 11, 1310, 456, 2514, 8213, 11, 309, 1355, 456, 2514, 2997, 1622, 1296, 1783, 293, 398, 11, 2139, 8213, 3353, 420, 3671, 11, 420, 2171, 456, 2514, 2107, 12, 28263, 2480, 11, 309, 311, 1219, 24991, 8213, 2480, 13, 440, 912, 382, 341, 472, 11, 309, 311, 45729, 4711, 13], "avg_logprob": -0.29737904427513, "compression_ratio": 1.697674418604651, "no_speech_prob": 0.0, "words": [{"start": 1803.8899999999999, "end": 1804.61, "word": " Variable.", "probability": 0.55487060546875}, {"start": 1804.61, "end": 1805.33, "word": " Types", "probability": 0.953125}, {"start": 1805.33, "end": 1805.47, "word": " of", "probability": 0.96826171875}, {"start": 1805.47, "end": 1806.05, "word": " relationships,", "probability": 0.7734375}, {"start": 1806.37, "end": 1806.55, "word": " as", "probability": 0.94970703125}, {"start": 1806.55, "end": 1806.69, "word": " we", "probability": 0.94287109375}, {"start": 1806.69, "end": 1807.07, "word": " mentioned,", "probability": 0.78759765625}, {"start": 1807.21, "end": 1807.37, "word": " maybe", "probability": 0.488037109375}, {"start": 1807.37, "end": 1807.61, "word": " there", "probability": 0.423583984375}, {"start": 1807.61, "end": 1808.01, "word": " exist", "probability": 0.63330078125}, {"start": 1808.01, "end": 1809.79, "word": " linear,", "probability": 0.822265625}, {"start": 1810.69, "end": 1811.49, "word": " it", "probability": 0.83642578125}, {"start": 1811.49, "end": 1811.73, "word": " means", "probability": 0.92529296875}, {"start": 1811.73, "end": 1811.89, "word": " there", "probability": 0.873046875}, {"start": 1811.89, "end": 1812.19, "word": " exist", "probability": 0.44873046875}, {"start": 1812.19, "end": 1812.61, "word": " straight", "probability": 0.5322265625}, {"start": 1812.61, "end": 1812.85, "word": " line", "probability": 0.86474609375}, {"start": 1812.85, "end": 1813.11, "word": " between", "probability": 0.89453125}, {"start": 1813.11, "end": 1813.29, "word": " X", "probability": 0.62548828125}, {"start": 1813.29, "end": 1813.43, "word": " and", "probability": 0.921875}, {"start": 1813.43, "end": 1813.77, "word": " Y,", "probability": 0.99609375}, {"start": 1814.97, "end": 1815.45, "word": " either", "probability": 0.9423828125}, {"start": 1815.45, "end": 1816.49, "word": " linear", "probability": 0.88134765625}, {"start": 1816.49, "end": 1816.95, "word": " positive", "probability": 0.7958984375}, {"start": 1816.95, "end": 1818.47, "word": " or", "probability": 0.76806640625}, {"start": 1818.47, "end": 1818.93, "word": " negative,", "probability": 0.94580078125}, {"start": 1819.87, "end": 1820.21, "word": " or", "probability": 0.9560546875}, {"start": 1820.21, "end": 1820.79, "word": " sometimes", "probability": 0.9169921875}, {"start": 1820.79, "end": 1821.01, "word": " there", "probability": 0.8857421875}, {"start": 1821.01, "end": 1821.45, "word": " exist", "probability": 0.7841796875}, {"start": 1821.45, "end": 1822.05, "word": " non", "probability": 0.98046875}, {"start": 1822.05, "end": 1822.39, "word": "-linear", "probability": 0.721923828125}, {"start": 1822.39, "end": 1823.71, "word": " relationship,", "probability": 0.6611328125}, {"start": 1824.75, "end": 1824.75, "word": " it's", "probability": 0.812744140625}, {"start": 1824.75, "end": 1824.99, "word": " called", "probability": 0.8974609375}, {"start": 1824.99, "end": 1825.49, "word": " curved", "probability": 0.57275390625}, {"start": 1825.49, "end": 1825.83, "word": " linear", "probability": 0.837890625}, {"start": 1825.83, "end": 1826.49, "word": " relationship.", "probability": 0.91162109375}, {"start": 1827.13, "end": 1827.81, "word": " The", "probability": 0.486328125}, {"start": 1827.81, "end": 1828.01, "word": " same", "probability": 0.9140625}, {"start": 1828.01, "end": 1828.19, "word": " as", "probability": 0.94873046875}, {"start": 1828.19, "end": 1828.45, "word": " this", "probability": 0.94873046875}, {"start": 1828.45, "end": 1828.65, "word": " one,", "probability": 0.89404296875}, {"start": 1828.75, "end": 1828.79, "word": " it's", "probability": 0.78759765625}, {"start": 1828.79, "end": 1829.29, "word": " parabola.", "probability": 0.85205078125}], "temperature": 1.0}, {"id": 69, "seek": 185831, "start": 1832.57, "end": 1858.31, "text": " Now in this case there is no linear relationship but there exists curved linear or something like this one. So these types of non-linear relationship between the two variables. Here we are covering just the linear relationship between the two variables. So based on the scatter plot you can determine the direction.", "tokens": [823, 294, 341, 1389, 456, 307, 572, 8213, 2480, 457, 456, 8198, 24991, 8213, 420, 746, 411, 341, 472, 13, 407, 613, 3467, 295, 2107, 12, 28263, 2480, 1296, 264, 732, 9102, 13, 1692, 321, 366, 10322, 445, 264, 8213, 2480, 1296, 264, 732, 9102, 13, 407, 2361, 322, 264, 34951, 7542, 291, 393, 6997, 264, 3513, 13], "avg_logprob": -0.20458157385809947, "compression_ratio": 1.8160919540229885, "no_speech_prob": 0.0, "words": [{"start": 1832.57, "end": 1832.85, "word": " Now", "probability": 0.8427734375}, {"start": 1832.85, "end": 1833.01, "word": " in", "probability": 0.615234375}, {"start": 1833.01, "end": 1833.19, "word": " this", "probability": 0.94970703125}, {"start": 1833.19, "end": 1833.49, "word": " case", "probability": 0.91796875}, {"start": 1833.49, "end": 1833.75, "word": " there", "probability": 0.55712890625}, {"start": 1833.75, "end": 1833.91, "word": " is", "probability": 0.93212890625}, {"start": 1833.91, "end": 1834.17, "word": " no", "probability": 0.9404296875}, {"start": 1834.17, "end": 1834.61, "word": " linear", "probability": 0.88525390625}, {"start": 1834.61, "end": 1835.15, "word": " relationship", "probability": 0.9384765625}, {"start": 1835.15, "end": 1835.65, "word": " but", "probability": 0.4541015625}, {"start": 1835.65, "end": 1835.95, "word": " there", "probability": 0.89599609375}, {"start": 1835.95, "end": 1836.37, "word": " exists", "probability": 0.7646484375}, {"start": 1836.37, "end": 1836.93, "word": " curved", "probability": 0.458251953125}, {"start": 1836.93, "end": 1837.71, "word": " linear", "probability": 0.81494140625}, {"start": 1837.71, "end": 1839.09, "word": " or", "probability": 0.64111328125}, {"start": 1839.09, "end": 1839.41, "word": " something", "probability": 0.841796875}, {"start": 1839.41, "end": 1839.69, "word": " like", "probability": 0.93505859375}, {"start": 1839.69, "end": 1839.91, "word": " this", "probability": 0.95458984375}, {"start": 1839.91, "end": 1840.21, "word": " one.", "probability": 0.8759765625}, {"start": 1841.35, "end": 1841.63, "word": " So", "probability": 0.91064453125}, {"start": 1841.63, "end": 1841.89, "word": " these", "probability": 0.734375}, {"start": 1841.89, "end": 1842.39, "word": " types", "probability": 0.76220703125}, {"start": 1842.39, "end": 1842.97, "word": " of", "probability": 0.9599609375}, {"start": 1842.97, "end": 1843.35, "word": " non", "probability": 0.93310546875}, {"start": 1843.35, "end": 1845.91, "word": "-linear", "probability": 0.844970703125}, {"start": 1845.91, "end": 1847.55, "word": " relationship", "probability": 0.80224609375}, {"start": 1847.55, "end": 1848.03, "word": " between", "probability": 0.884765625}, {"start": 1848.03, "end": 1848.19, "word": " the", "probability": 0.74658203125}, {"start": 1848.19, "end": 1848.31, "word": " two", "probability": 0.890625}, {"start": 1848.31, "end": 1848.59, "word": " variables.", "probability": 0.1275634765625}, {"start": 1849.05, "end": 1849.35, "word": " Here", "probability": 0.7900390625}, {"start": 1849.35, "end": 1849.53, "word": " we", "probability": 0.91064453125}, {"start": 1849.53, "end": 1849.67, "word": " are", "probability": 0.94189453125}, {"start": 1849.67, "end": 1850.07, "word": " covering", "probability": 0.921875}, {"start": 1850.07, "end": 1850.61, "word": " just", "probability": 0.91455078125}, {"start": 1850.61, "end": 1851.95, "word": " the", "probability": 0.8662109375}, {"start": 1851.95, "end": 1852.41, "word": " linear", "probability": 0.80029296875}, {"start": 1852.41, "end": 1853.67, "word": " relationship", "probability": 0.93994140625}, {"start": 1853.67, "end": 1854.07, "word": " between", "probability": 0.88037109375}, {"start": 1854.07, "end": 1854.21, "word": " the", "probability": 0.8671875}, {"start": 1854.21, "end": 1854.37, "word": " two", "probability": 0.93359375}, {"start": 1854.37, "end": 1854.67, "word": " variables.", "probability": 0.93994140625}, {"start": 1855.29, "end": 1855.51, "word": " So", "probability": 0.92529296875}, {"start": 1855.51, "end": 1855.75, "word": " based", "probability": 0.85693359375}, {"start": 1855.75, "end": 1855.95, "word": " on", "probability": 0.95068359375}, {"start": 1855.95, "end": 1856.11, "word": " the", "probability": 0.822265625}, {"start": 1856.11, "end": 1856.37, "word": " scatter", "probability": 0.9072265625}, {"start": 1856.37, "end": 1856.57, "word": " plot", "probability": 0.70263671875}, {"start": 1856.57, "end": 1856.79, "word": " you", "probability": 0.8251953125}, {"start": 1856.79, "end": 1856.97, "word": " can", "probability": 0.943359375}, {"start": 1856.97, "end": 1857.41, "word": " determine", "probability": 0.931640625}, {"start": 1857.41, "end": 1857.79, "word": " the", "probability": 0.9140625}, {"start": 1857.79, "end": 1858.31, "word": " direction.", "probability": 0.97509765625}], "temperature": 1.0}, {"id": 70, "seek": 187814, "start": 1859.66, "end": 1878.14, "text": " The form, the strength. Here, the form we are talking about is just linear. Now, another type of relationship, the strength of the relationship. Here, the points, either for this graph or the other one,", "tokens": [440, 1254, 11, 264, 3800, 13, 1692, 11, 264, 1254, 321, 366, 1417, 466, 307, 445, 8213, 13, 823, 11, 1071, 2010, 295, 2480, 11, 264, 3800, 295, 264, 2480, 13, 1692, 11, 264, 2793, 11, 2139, 337, 341, 4295, 420, 264, 661, 472, 11], "avg_logprob": -0.27377718492694525, "compression_ratio": 1.5615384615384615, "no_speech_prob": 0.0, "words": [{"start": 1859.66, "end": 1859.9, "word": " The", "probability": 0.277099609375}, {"start": 1859.9, "end": 1860.24, "word": " form,", "probability": 0.1961669921875}, {"start": 1860.46, "end": 1860.62, "word": " the", "probability": 0.8349609375}, {"start": 1860.62, "end": 1860.94, "word": " strength.", "probability": 0.85986328125}, {"start": 1861.1, "end": 1861.3, "word": " Here,", "probability": 0.6953125}, {"start": 1861.92, "end": 1862.08, "word": " the", "probability": 0.89404296875}, {"start": 1862.08, "end": 1862.34, "word": " form", "probability": 0.91162109375}, {"start": 1862.34, "end": 1862.5, "word": " we", "probability": 0.89501953125}, {"start": 1862.5, "end": 1862.6, "word": " are", "probability": 0.8837890625}, {"start": 1862.6, "end": 1862.94, "word": " talking", "probability": 0.83544921875}, {"start": 1862.94, "end": 1863.42, "word": " about", "probability": 0.91455078125}, {"start": 1863.42, "end": 1863.86, "word": " is", "probability": 0.88037109375}, {"start": 1863.86, "end": 1864.24, "word": " just", "probability": 0.90869140625}, {"start": 1864.24, "end": 1864.72, "word": " linear.", "probability": 0.88818359375}, {"start": 1868.7, "end": 1869.3, "word": " Now,", "probability": 0.8740234375}, {"start": 1870.1, "end": 1870.44, "word": " another", "probability": 0.8720703125}, {"start": 1870.44, "end": 1870.82, "word": " type", "probability": 0.5830078125}, {"start": 1870.82, "end": 1871.0, "word": " of", "probability": 0.92822265625}, {"start": 1871.0, "end": 1871.5, "word": " relationship,", "probability": 0.8212890625}, {"start": 1872.0, "end": 1872.4, "word": " the", "probability": 0.802734375}, {"start": 1872.4, "end": 1872.86, "word": " strength", "probability": 0.8525390625}, {"start": 1872.86, "end": 1873.26, "word": " of", "probability": 0.95947265625}, {"start": 1873.26, "end": 1873.38, "word": " the", "probability": 0.88818359375}, {"start": 1873.38, "end": 1873.88, "word": " relationship.", "probability": 0.89501953125}, {"start": 1874.46, "end": 1874.8, "word": " Here,", "probability": 0.84521484375}, {"start": 1875.3, "end": 1875.46, "word": " the", "probability": 0.908203125}, {"start": 1875.46, "end": 1875.98, "word": " points,", "probability": 0.86767578125}, {"start": 1876.44, "end": 1876.66, "word": " either", "probability": 0.919921875}, {"start": 1876.66, "end": 1876.94, "word": " for", "probability": 0.88818359375}, {"start": 1876.94, "end": 1877.12, "word": " this", "probability": 0.90966796875}, {"start": 1877.12, "end": 1877.4, "word": " graph", "probability": 0.96142578125}, {"start": 1877.4, "end": 1877.56, "word": " or", "probability": 0.9189453125}, {"start": 1877.56, "end": 1877.68, "word": " the", "probability": 0.6875}, {"start": 1877.68, "end": 1877.88, "word": " other", "probability": 0.89404296875}, {"start": 1877.88, "end": 1878.14, "word": " one,", "probability": 0.92578125}], "temperature": 1.0}, {"id": 71, "seek": 189397, "start": 1879.47, "end": 1893.97, "text": " These points are close to the straight line, it means there exists strong positive relationship or strong negative relationship. So it depends on the direction. So strong either positive or strong negative.", "tokens": [1981, 2793, 366, 1998, 281, 264, 2997, 1622, 11, 309, 1355, 456, 8198, 2068, 3353, 2480, 420, 2068, 3671, 2480, 13, 407, 309, 5946, 322, 264, 3513, 13, 407, 2068, 2139, 3353, 420, 2068, 3671, 13], "avg_logprob": -0.2485219465719687, "compression_ratio": 1.7107438016528926, "no_speech_prob": 0.0, "words": [{"start": 1879.47, "end": 1879.87, "word": " These", "probability": 0.35791015625}, {"start": 1879.87, "end": 1880.29, "word": " points", "probability": 0.90771484375}, {"start": 1880.29, "end": 1880.57, "word": " are", "probability": 0.9365234375}, {"start": 1880.57, "end": 1881.17, "word": " close", "probability": 0.619140625}, {"start": 1881.17, "end": 1881.91, "word": " to", "probability": 0.91748046875}, {"start": 1881.91, "end": 1882.09, "word": " the", "probability": 0.84814453125}, {"start": 1882.09, "end": 1882.41, "word": " straight", "probability": 0.916015625}, {"start": 1882.41, "end": 1882.83, "word": " line,", "probability": 0.85009765625}, {"start": 1883.59, "end": 1883.59, "word": " it", "probability": 0.47119140625}, {"start": 1883.59, "end": 1883.89, "word": " means", "probability": 0.92529296875}, {"start": 1883.89, "end": 1884.19, "word": " there", "probability": 0.73583984375}, {"start": 1884.19, "end": 1884.57, "word": " exists", "probability": 0.68505859375}, {"start": 1884.57, "end": 1884.97, "word": " strong", "probability": 0.6923828125}, {"start": 1884.97, "end": 1885.49, "word": " positive", "probability": 0.8876953125}, {"start": 1885.49, "end": 1886.39, "word": " relationship", "probability": 0.89404296875}, {"start": 1886.39, "end": 1887.15, "word": " or", "probability": 0.71875}, {"start": 1887.15, "end": 1887.59, "word": " strong", "probability": 0.84521484375}, {"start": 1887.59, "end": 1888.21, "word": " negative", "probability": 0.94580078125}, {"start": 1888.21, "end": 1888.97, "word": " relationship.", "probability": 0.908203125}, {"start": 1889.27, "end": 1889.37, "word": " So", "probability": 0.82958984375}, {"start": 1889.37, "end": 1889.45, "word": " it", "probability": 0.8037109375}, {"start": 1889.45, "end": 1889.75, "word": " depends", "probability": 0.90478515625}, {"start": 1889.75, "end": 1890.01, "word": " on", "probability": 0.94677734375}, {"start": 1890.01, "end": 1890.15, "word": " the", "probability": 0.88623046875}, {"start": 1890.15, "end": 1890.59, "word": " direction.", "probability": 0.96923828125}, {"start": 1891.13, "end": 1891.23, "word": " So", "probability": 0.80029296875}, {"start": 1891.23, "end": 1891.57, "word": " strong", "probability": 0.78466796875}, {"start": 1891.57, "end": 1891.81, "word": " either", "probability": 0.8818359375}, {"start": 1891.81, "end": 1892.37, "word": " positive", "probability": 0.9423828125}, {"start": 1892.37, "end": 1892.83, "word": " or", "probability": 0.93115234375}, {"start": 1892.83, "end": 1893.25, "word": " strong", "probability": 0.873046875}, {"start": 1893.25, "end": 1893.97, "word": " negative.", "probability": 0.94970703125}], "temperature": 1.0}, {"id": 72, "seek": 191677, "start": 1895.43, "end": 1916.77, "text": " Here the points are scattered away from the regression line, so you can say there exists weak relationship, either weak positive or weak negative. It depends on the direction of the relationship between the two variables. Sometimes there is no", "tokens": [1692, 264, 2793, 366, 21986, 1314, 490, 264, 24590, 1622, 11, 370, 291, 393, 584, 456, 8198, 5336, 2480, 11, 2139, 5336, 3353, 420, 5336, 3671, 13, 467, 5946, 322, 264, 3513, 295, 264, 2480, 1296, 264, 732, 9102, 13, 4803, 456, 307, 572], "avg_logprob": -0.15538195106718275, "compression_ratio": 1.564102564102564, "no_speech_prob": 0.0, "words": [{"start": 1895.43, "end": 1895.71, "word": " Here", "probability": 0.75341796875}, {"start": 1895.71, "end": 1895.87, "word": " the", "probability": 0.5625}, {"start": 1895.87, "end": 1896.15, "word": " points", "probability": 0.923828125}, {"start": 1896.15, "end": 1896.55, "word": " are", "probability": 0.93896484375}, {"start": 1896.55, "end": 1897.71, "word": " scattered", "probability": 0.7978515625}, {"start": 1897.71, "end": 1898.09, "word": " away", "probability": 0.8779296875}, {"start": 1898.09, "end": 1898.39, "word": " from", "probability": 0.89111328125}, {"start": 1898.39, "end": 1898.57, "word": " the", "probability": 0.91357421875}, {"start": 1898.57, "end": 1898.85, "word": " regression", "probability": 0.953125}, {"start": 1898.85, "end": 1899.27, "word": " line,", "probability": 0.93115234375}, {"start": 1899.99, "end": 1900.07, "word": " so", "probability": 0.9248046875}, {"start": 1900.07, "end": 1900.23, "word": " you", "probability": 0.87255859375}, {"start": 1900.23, "end": 1900.41, "word": " can", "probability": 0.94921875}, {"start": 1900.41, "end": 1900.67, "word": " say", "probability": 0.78759765625}, {"start": 1900.67, "end": 1900.85, "word": " there", "probability": 0.853515625}, {"start": 1900.85, "end": 1901.31, "word": " exists", "probability": 0.8173828125}, {"start": 1901.31, "end": 1901.79, "word": " weak", "probability": 0.95849609375}, {"start": 1901.79, "end": 1903.37, "word": " relationship,", "probability": 0.904296875}, {"start": 1903.53, "end": 1903.75, "word": " either", "probability": 0.951171875}, {"start": 1903.75, "end": 1904.01, "word": " weak", "probability": 0.96875}, {"start": 1904.01, "end": 1904.37, "word": " positive", "probability": 0.7587890625}, {"start": 1904.37, "end": 1904.77, "word": " or", "probability": 0.9384765625}, {"start": 1904.77, "end": 1905.09, "word": " weak", "probability": 0.978515625}, {"start": 1905.09, "end": 1906.17, "word": " negative.", "probability": 0.9423828125}, {"start": 1906.31, "end": 1906.39, "word": " It", "probability": 0.955078125}, {"start": 1906.39, "end": 1906.75, "word": " depends", "probability": 0.90234375}, {"start": 1906.75, "end": 1907.05, "word": " on", "probability": 0.9501953125}, {"start": 1907.05, "end": 1907.37, "word": " the", "probability": 0.92041015625}, {"start": 1907.37, "end": 1908.51, "word": " direction", "probability": 0.97412109375}, {"start": 1908.51, "end": 1909.49, "word": " of", "probability": 0.9658203125}, {"start": 1909.49, "end": 1909.65, "word": " the", "probability": 0.91943359375}, {"start": 1909.65, "end": 1910.17, "word": " relationship", "probability": 0.91845703125}, {"start": 1910.17, "end": 1910.83, "word": " between", "probability": 0.86669921875}, {"start": 1910.83, "end": 1912.05, "word": " the", "probability": 0.916015625}, {"start": 1912.05, "end": 1912.25, "word": " two", "probability": 0.9248046875}, {"start": 1912.25, "end": 1912.75, "word": " variables.", "probability": 0.94873046875}, {"start": 1913.59, "end": 1914.27, "word": " Sometimes", "probability": 0.90625}, {"start": 1914.27, "end": 1916.15, "word": " there", "probability": 0.7109375}, {"start": 1916.15, "end": 1916.37, "word": " is", "probability": 0.93994140625}, {"start": 1916.37, "end": 1916.77, "word": " no", "probability": 0.943359375}], "temperature": 1.0}, {"id": 73, "seek": 194636, "start": 1917.64, "end": 1946.36, "text": " relationship or actually there is no linear relationship between the two variables. If the points are scattered away from the regression line, I mean you cannot determine if it is positive or negative, then there is no relationship between the two variables, the same as this one. X increases, Y stays nearly in the same position, then there exists no relationship between the two variables.", "tokens": [2480, 420, 767, 456, 307, 572, 8213, 2480, 1296, 264, 732, 9102, 13, 759, 264, 2793, 366, 21986, 1314, 490, 264, 24590, 1622, 11, 286, 914, 291, 2644, 6997, 498, 309, 307, 3353, 420, 3671, 11, 550, 456, 307, 572, 2480, 1296, 264, 732, 9102, 11, 264, 912, 382, 341, 472, 13, 1783, 8637, 11, 398, 10834, 6217, 294, 264, 912, 2535, 11, 550, 456, 8198, 572, 2480, 1296, 264, 732, 9102, 13], "avg_logprob": -0.1626900305619111, "compression_ratio": 2.0416666666666665, "no_speech_prob": 0.0, "words": [{"start": 1917.6399999999999, "end": 1918.32, "word": " relationship", "probability": 0.46923828125}, {"start": 1918.32, "end": 1918.7, "word": " or", "probability": 0.66455078125}, {"start": 1918.7, "end": 1919.12, "word": " actually", "probability": 0.8369140625}, {"start": 1919.12, "end": 1919.36, "word": " there", "probability": 0.8671875}, {"start": 1919.36, "end": 1919.5, "word": " is", "probability": 0.89794921875}, {"start": 1919.5, "end": 1919.68, "word": " no", "probability": 0.90625}, {"start": 1919.68, "end": 1920.0, "word": " linear", "probability": 0.89501953125}, {"start": 1920.0, "end": 1920.48, "word": " relationship", "probability": 0.9130859375}, {"start": 1920.48, "end": 1920.74, "word": " between", "probability": 0.85400390625}, {"start": 1920.74, "end": 1920.9, "word": " the", "probability": 0.86279296875}, {"start": 1920.9, "end": 1921.04, "word": " two", "probability": 0.90380859375}, {"start": 1921.04, "end": 1921.48, "word": " variables.", "probability": 0.9443359375}, {"start": 1921.96, "end": 1922.34, "word": " If", "probability": 0.93896484375}, {"start": 1922.34, "end": 1922.54, "word": " the", "probability": 0.91650390625}, {"start": 1922.54, "end": 1922.8, "word": " points", "probability": 0.9345703125}, {"start": 1922.8, "end": 1923.02, "word": " are", "probability": 0.94677734375}, {"start": 1923.02, "end": 1923.5, "word": " scattered", "probability": 0.80712890625}, {"start": 1923.5, "end": 1924.74, "word": " away", "probability": 0.86669921875}, {"start": 1924.74, "end": 1925.22, "word": " from", "probability": 0.8828125}, {"start": 1925.22, "end": 1925.38, "word": " the", "probability": 0.90380859375}, {"start": 1925.38, "end": 1925.66, "word": " regression", "probability": 0.91015625}, {"start": 1925.66, "end": 1926.12, "word": " line,", "probability": 0.939453125}, {"start": 1926.76, "end": 1926.9, "word": " I", "probability": 0.8759765625}, {"start": 1926.9, "end": 1927.02, "word": " mean", "probability": 0.9658203125}, {"start": 1927.02, "end": 1927.26, "word": " you", "probability": 0.6220703125}, {"start": 1927.26, "end": 1927.86, "word": " cannot", "probability": 0.712890625}, {"start": 1927.86, "end": 1928.46, "word": " determine", "probability": 0.76953125}, {"start": 1928.46, "end": 1929.48, "word": " if", "probability": 0.935546875}, {"start": 1929.48, "end": 1929.62, "word": " it", "probability": 0.94970703125}, {"start": 1929.62, "end": 1929.8, "word": " is", "probability": 0.9375}, {"start": 1929.8, "end": 1930.14, "word": " positive", "probability": 0.93310546875}, {"start": 1930.14, "end": 1930.62, "word": " or", "probability": 0.96240234375}, {"start": 1930.62, "end": 1931.08, "word": " negative,", "probability": 0.9443359375}, {"start": 1931.98, "end": 1932.3, "word": " then", "probability": 0.85107421875}, {"start": 1932.3, "end": 1932.58, "word": " there", "probability": 0.89990234375}, {"start": 1932.58, "end": 1932.8, "word": " is", "probability": 0.943359375}, {"start": 1932.8, "end": 1933.16, "word": " no", "probability": 0.9462890625}, {"start": 1933.16, "end": 1934.1, "word": " relationship", "probability": 0.91015625}, {"start": 1934.1, "end": 1934.6, "word": " between", "probability": 0.857421875}, {"start": 1934.6, "end": 1934.78, "word": " the", "probability": 0.91357421875}, {"start": 1934.78, "end": 1934.94, "word": " two", "probability": 0.92431640625}, {"start": 1934.94, "end": 1935.4, "word": " variables,", "probability": 0.93798828125}, {"start": 1935.84, "end": 1936.02, "word": " the", "probability": 0.8466796875}, {"start": 1936.02, "end": 1936.22, "word": " same", "probability": 0.9052734375}, {"start": 1936.22, "end": 1936.38, "word": " as", "probability": 0.94873046875}, {"start": 1936.38, "end": 1936.58, "word": " this", "probability": 0.9462890625}, {"start": 1936.58, "end": 1936.84, "word": " one.", "probability": 0.9248046875}, {"start": 1937.54, "end": 1937.86, "word": " X", "probability": 0.81689453125}, {"start": 1937.86, "end": 1938.4, "word": " increases,", "probability": 0.779296875}, {"start": 1938.86, "end": 1939.2, "word": " Y", "probability": 0.8408203125}, {"start": 1939.2, "end": 1939.62, "word": " stays", "probability": 0.6943359375}, {"start": 1939.62, "end": 1940.18, "word": " nearly", "probability": 0.83642578125}, {"start": 1940.18, "end": 1940.42, "word": " in", "probability": 0.9130859375}, {"start": 1940.42, "end": 1940.58, "word": " the", "probability": 0.91455078125}, {"start": 1940.58, "end": 1940.76, "word": " same", "probability": 0.89794921875}, {"start": 1940.76, "end": 1941.2, "word": " position,", "probability": 0.9326171875}, {"start": 1941.82, "end": 1942.02, "word": " then", "probability": 0.8447265625}, {"start": 1942.02, "end": 1942.78, "word": " there", "probability": 0.89794921875}, {"start": 1942.78, "end": 1943.2, "word": " exists", "probability": 0.78271484375}, {"start": 1943.2, "end": 1943.72, "word": " no", "probability": 0.94287109375}, {"start": 1943.72, "end": 1944.54, "word": " relationship", "probability": 0.90087890625}, {"start": 1944.54, "end": 1945.7, "word": " between", "probability": 0.8662109375}, {"start": 1945.7, "end": 1945.88, "word": " the", "probability": 0.91455078125}, {"start": 1945.88, "end": 1946.04, "word": " two", "probability": 0.93212890625}, {"start": 1946.04, "end": 1946.36, "word": " variables.", "probability": 0.931640625}], "temperature": 1.0}, {"id": 74, "seek": 196920, "start": 1946.68, "end": 1969.2, "text": " So, a relationship could be linear or curvilinear. It could be positive or negative, strong or weak, or sometimes there is no relationship between the two variables. Now the question is, how can we write", "tokens": [407, 11, 257, 2480, 727, 312, 8213, 420, 1262, 20202, 533, 289, 13, 467, 727, 312, 3353, 420, 3671, 11, 2068, 420, 5336, 11, 420, 2171, 456, 307, 572, 2480, 1296, 264, 732, 9102, 13, 823, 264, 1168, 307, 11, 577, 393, 321, 2464], "avg_logprob": -0.20868055688010323, "compression_ratio": 1.489051094890511, "no_speech_prob": 0.0, "words": [{"start": 1946.68, "end": 1947.12, "word": " So,", "probability": 0.8251953125}, {"start": 1948.62, "end": 1948.88, "word": " a", "probability": 0.448486328125}, {"start": 1948.88, "end": 1949.28, "word": " relationship", "probability": 0.89990234375}, {"start": 1949.28, "end": 1949.58, "word": " could", "probability": 0.84765625}, {"start": 1949.58, "end": 1949.74, "word": " be", "probability": 0.95556640625}, {"start": 1949.74, "end": 1950.0, "word": " linear", "probability": 0.89501953125}, {"start": 1950.0, "end": 1950.32, "word": " or", "probability": 0.92431640625}, {"start": 1950.32, "end": 1951.18, "word": " curvilinear.", "probability": 0.77191162109375}, {"start": 1952.28, "end": 1952.46, "word": " It", "probability": 0.8779296875}, {"start": 1952.46, "end": 1952.6, "word": " could", "probability": 0.88525390625}, {"start": 1952.6, "end": 1952.74, "word": " be", "probability": 0.94921875}, {"start": 1952.74, "end": 1953.12, "word": " positive", "probability": 0.9150390625}, {"start": 1953.12, "end": 1953.66, "word": " or", "probability": 0.94384765625}, {"start": 1953.66, "end": 1954.06, "word": " negative,", "probability": 0.94775390625}, {"start": 1954.5, "end": 1954.96, "word": " strong", "probability": 0.88134765625}, {"start": 1954.96, "end": 1955.42, "word": " or", "probability": 0.96533203125}, {"start": 1955.42, "end": 1955.8, "word": " weak,", "probability": 0.97412109375}, {"start": 1956.3, "end": 1956.58, "word": " or", "probability": 0.93359375}, {"start": 1956.58, "end": 1957.28, "word": " sometimes", "probability": 0.939453125}, {"start": 1957.28, "end": 1958.48, "word": " there", "probability": 0.80322265625}, {"start": 1958.48, "end": 1958.68, "word": " is", "probability": 0.93310546875}, {"start": 1958.68, "end": 1959.0, "word": " no", "probability": 0.95068359375}, {"start": 1959.0, "end": 1959.8, "word": " relationship", "probability": 0.90380859375}, {"start": 1959.8, "end": 1960.36, "word": " between", "probability": 0.8681640625}, {"start": 1960.36, "end": 1961.34, "word": " the", "probability": 0.90478515625}, {"start": 1961.34, "end": 1961.68, "word": " two", "probability": 0.92626953125}, {"start": 1961.68, "end": 1962.64, "word": " variables.", "probability": 0.947265625}, {"start": 1964.3, "end": 1964.48, "word": " Now", "probability": 0.94970703125}, {"start": 1964.48, "end": 1964.62, "word": " the", "probability": 0.638671875}, {"start": 1964.62, "end": 1964.9, "word": " question", "probability": 0.91357421875}, {"start": 1964.9, "end": 1965.22, "word": " is,", "probability": 0.951171875}, {"start": 1965.54, "end": 1965.78, "word": " how", "probability": 0.82861328125}, {"start": 1965.78, "end": 1966.18, "word": " can", "probability": 0.9375}, {"start": 1966.18, "end": 1966.78, "word": " we", "probability": 0.9609375}, {"start": 1966.78, "end": 1969.2, "word": " write", "probability": 0.873046875}], "temperature": 1.0}, {"id": 75, "seek": 199845, "start": 1971.25, "end": 1998.45, "text": " Or how can we find the best regression line that fits the data you have? We know the regression is the straight line equation is given by this one. Y equals beta 0 plus beta 1x plus epsilon.", "tokens": [1610, 577, 393, 321, 915, 264, 1151, 24590, 1622, 300, 9001, 264, 1412, 291, 362, 30, 492, 458, 264, 24590, 307, 264, 2997, 1622, 5367, 307, 2212, 538, 341, 472, 13, 398, 6915, 9861, 1958, 1804, 9861, 502, 87, 1804, 17889, 13], "avg_logprob": -0.20367005259491677, "compression_ratio": 1.4360902255639099, "no_speech_prob": 0.0, "words": [{"start": 1971.25, "end": 1971.53, "word": " Or", "probability": 0.85888671875}, {"start": 1971.53, "end": 1971.77, "word": " how", "probability": 0.8544921875}, {"start": 1971.77, "end": 1971.99, "word": " can", "probability": 0.93359375}, {"start": 1971.99, "end": 1972.17, "word": " we", "probability": 0.9423828125}, {"start": 1972.17, "end": 1972.55, "word": " find", "probability": 0.892578125}, {"start": 1972.55, "end": 1972.81, "word": " the", "probability": 0.916015625}, {"start": 1972.81, "end": 1973.17, "word": " best", "probability": 0.91650390625}, {"start": 1973.17, "end": 1973.71, "word": " regression", "probability": 0.93798828125}, {"start": 1973.71, "end": 1974.17, "word": " line", "probability": 0.93408203125}, {"start": 1974.17, "end": 1975.29, "word": " that", "probability": 0.87939453125}, {"start": 1975.29, "end": 1975.73, "word": " fits", "probability": 0.8623046875}, {"start": 1975.73, "end": 1975.97, "word": " the", "probability": 0.9150390625}, {"start": 1975.97, "end": 1976.21, "word": " data", "probability": 0.9267578125}, {"start": 1976.21, "end": 1976.45, "word": " you", "probability": 0.73583984375}, {"start": 1976.45, "end": 1976.67, "word": " have?", "probability": 0.91845703125}, {"start": 1978.21, "end": 1978.57, "word": " We", "probability": 0.92822265625}, {"start": 1978.57, "end": 1978.71, "word": " know", "probability": 0.8779296875}, {"start": 1978.71, "end": 1978.87, "word": " the", "probability": 0.78076171875}, {"start": 1978.87, "end": 1979.19, "word": " regression", "probability": 0.94873046875}, {"start": 1979.19, "end": 1979.57, "word": " is", "probability": 0.54443359375}, {"start": 1979.57, "end": 1979.93, "word": " the", "probability": 0.6630859375}, {"start": 1979.93, "end": 1982.49, "word": " straight", "probability": 0.6396484375}, {"start": 1982.49, "end": 1982.79, "word": " line", "probability": 0.873046875}, {"start": 1982.79, "end": 1983.25, "word": " equation", "probability": 0.94775390625}, {"start": 1983.25, "end": 1983.65, "word": " is", "probability": 0.395263671875}, {"start": 1983.65, "end": 1983.83, "word": " given", "probability": 0.904296875}, {"start": 1983.83, "end": 1984.07, "word": " by", "probability": 0.9677734375}, {"start": 1984.07, "end": 1984.35, "word": " this", "probability": 0.95166015625}, {"start": 1984.35, "end": 1984.71, "word": " one.", "probability": 0.9326171875}, {"start": 1985.51, "end": 1986.27, "word": " Y", "probability": 0.9248046875}, {"start": 1986.27, "end": 1989.17, "word": " equals", "probability": 0.802734375}, {"start": 1989.17, "end": 1990.93, "word": " beta", "probability": 0.83740234375}, {"start": 1990.93, "end": 1991.35, "word": " 0", "probability": 0.521484375}, {"start": 1991.35, "end": 1993.03, "word": " plus", "probability": 0.90478515625}, {"start": 1993.03, "end": 1995.59, "word": " beta", "probability": 0.88037109375}, {"start": 1995.59, "end": 1996.21, "word": " 1x", "probability": 0.6492919921875}, {"start": 1996.21, "end": 1997.61, "word": " plus", "probability": 0.94482421875}, {"start": 1997.61, "end": 1998.45, "word": " epsilon.", "probability": 0.88623046875}], "temperature": 1.0}, {"id": 76, "seek": 202697, "start": 1999.35, "end": 2026.97, "text": " This can be pronounced as epsilon. It's a great letter, the same as alpha, beta, mu, sigma, and so on. So it's epsilon. I, it means observation number I. I 1, 2, 3, up to 10, for example, is the same for selling price of a home. So I 1, 2, 3, all the way up to the sample size.", "tokens": [639, 393, 312, 23155, 382, 17889, 13, 467, 311, 257, 869, 5063, 11, 264, 912, 382, 8961, 11, 9861, 11, 2992, 11, 12771, 11, 293, 370, 322, 13, 407, 309, 311, 17889, 13, 286, 11, 309, 1355, 14816, 1230, 286, 13, 286, 502, 11, 568, 11, 805, 11, 493, 281, 1266, 11, 337, 1365, 11, 307, 264, 912, 337, 6511, 3218, 295, 257, 1280, 13, 407, 286, 502, 11, 568, 11, 805, 11, 439, 264, 636, 493, 281, 264, 6889, 2744, 13], "avg_logprob": -0.17884036754987326, "compression_ratio": 1.5108695652173914, "no_speech_prob": 0.0, "words": [{"start": 1999.35, "end": 1999.77, "word": " This", "probability": 0.71923828125}, {"start": 1999.77, "end": 2000.13, "word": " can", "probability": 0.134033203125}, {"start": 2000.13, "end": 2000.49, "word": " be", "probability": 0.58740234375}, {"start": 2000.49, "end": 2000.93, "word": " pronounced", "probability": 0.83203125}, {"start": 2000.93, "end": 2001.25, "word": " as", "probability": 0.96875}, {"start": 2001.25, "end": 2001.67, "word": " epsilon.", "probability": 0.6396484375}, {"start": 2004.79, "end": 2005.39, "word": " It's", "probability": 0.963134765625}, {"start": 2005.39, "end": 2005.65, "word": " a", "probability": 0.8564453125}, {"start": 2005.65, "end": 2005.85, "word": " great", "probability": 0.44482421875}, {"start": 2005.85, "end": 2006.23, "word": " letter,", "probability": 0.9130859375}, {"start": 2006.55, "end": 2006.69, "word": " the", "probability": 0.9072265625}, {"start": 2006.69, "end": 2006.87, "word": " same", "probability": 0.92333984375}, {"start": 2006.87, "end": 2007.03, "word": " as", "probability": 0.958984375}, {"start": 2007.03, "end": 2007.37, "word": " alpha,", "probability": 0.8994140625}, {"start": 2007.69, "end": 2008.13, "word": " beta,", "probability": 0.93701171875}, {"start": 2008.93, "end": 2009.27, "word": " mu,", "probability": 0.8212890625}, {"start": 2009.57, "end": 2009.91, "word": " sigma,", "probability": 0.8984375}, {"start": 2010.05, "end": 2010.13, "word": " and", "probability": 0.9189453125}, {"start": 2010.13, "end": 2010.29, "word": " so", "probability": 0.95703125}, {"start": 2010.29, "end": 2010.55, "word": " on.", "probability": 0.9375}, {"start": 2011.31, "end": 2011.67, "word": " So", "probability": 0.38232421875}, {"start": 2011.67, "end": 2011.83, "word": " it's", "probability": 0.911865234375}, {"start": 2011.83, "end": 2012.37, "word": " epsilon.", "probability": 0.85888671875}, {"start": 2013.61, "end": 2014.21, "word": " I,", "probability": 0.81640625}, {"start": 2014.53, "end": 2014.91, "word": " it", "probability": 0.9404296875}, {"start": 2014.91, "end": 2015.15, "word": " means", "probability": 0.88330078125}, {"start": 2015.15, "end": 2015.67, "word": " observation", "probability": 0.47265625}, {"start": 2015.67, "end": 2016.27, "word": " number", "probability": 0.8916015625}, {"start": 2016.27, "end": 2016.63, "word": " I.", "probability": 0.85498046875}, {"start": 2017.65, "end": 2017.95, "word": " I", "probability": 0.982421875}, {"start": 2017.95, "end": 2018.21, "word": " 1,", "probability": 0.315673828125}, {"start": 2018.25, "end": 2018.37, "word": " 2,", "probability": 0.98388671875}, {"start": 2018.45, "end": 2018.61, "word": " 3,", "probability": 0.98876953125}, {"start": 2018.65, "end": 2018.79, "word": " up", "probability": 0.96826171875}, {"start": 2018.79, "end": 2018.89, "word": " to", "probability": 0.96337890625}, {"start": 2018.89, "end": 2019.09, "word": " 10,", "probability": 0.947265625}, {"start": 2019.11, "end": 2019.25, "word": " for", "probability": 0.95068359375}, {"start": 2019.25, "end": 2019.63, "word": " example,", "probability": 0.97412109375}, {"start": 2019.81, "end": 2019.89, "word": " is", "probability": 0.912109375}, {"start": 2019.89, "end": 2020.07, "word": " the", "probability": 0.8876953125}, {"start": 2020.07, "end": 2020.31, "word": " same", "probability": 0.865234375}, {"start": 2020.31, "end": 2020.79, "word": " for", "probability": 0.63232421875}, {"start": 2020.79, "end": 2021.09, "word": " selling", "probability": 0.63720703125}, {"start": 2021.09, "end": 2021.55, "word": " price", "probability": 0.923828125}, {"start": 2021.55, "end": 2022.33, "word": " of", "probability": 0.962890625}, {"start": 2022.33, "end": 2022.45, "word": " a", "probability": 0.98583984375}, {"start": 2022.45, "end": 2022.71, "word": " home.", "probability": 0.87451171875}, {"start": 2023.03, "end": 2023.39, "word": " So", "probability": 0.962890625}, {"start": 2023.39, "end": 2023.73, "word": " I", "probability": 0.96826171875}, {"start": 2023.73, "end": 2024.23, "word": " 1,", "probability": 0.8564453125}, {"start": 2024.31, "end": 2024.59, "word": " 2,", "probability": 0.9921875}, {"start": 2024.77, "end": 2025.17, "word": " 3,", "probability": 0.99658203125}, {"start": 2025.19, "end": 2025.41, "word": " all", "probability": 0.9482421875}, {"start": 2025.41, "end": 2025.55, "word": " the", "probability": 0.9189453125}, {"start": 2025.55, "end": 2025.73, "word": " way", "probability": 0.9521484375}, {"start": 2025.73, "end": 2025.95, "word": " up", "probability": 0.94873046875}, {"start": 2025.95, "end": 2026.13, "word": " to", "probability": 0.96044921875}, {"start": 2026.13, "end": 2026.29, "word": " the", "probability": 0.59423828125}, {"start": 2026.29, "end": 2026.53, "word": " sample", "probability": 0.591796875}, {"start": 2026.53, "end": 2026.97, "word": " size.", "probability": 0.8310546875}], "temperature": 1.0}, {"id": 77, "seek": 205742, "start": 2028.37, "end": 2057.43, "text": " Now, Y is your dependent variable. Beta 0 is population Y intercept. For example, if we have this scatter plot. Now, beta 0 is this one. So this is your beta 0. So this segment is beta 0.", "tokens": [823, 11, 398, 307, 428, 12334, 7006, 13, 33286, 1958, 307, 4415, 398, 24700, 13, 1171, 1365, 11, 498, 321, 362, 341, 34951, 7542, 13, 823, 11, 9861, 1958, 307, 341, 472, 13, 407, 341, 307, 428, 9861, 1958, 13, 407, 341, 9469, 307, 9861, 1958, 13], "avg_logprob": -0.21402994884798923, "compression_ratio": 1.4242424242424243, "no_speech_prob": 0.0, "words": [{"start": 2028.37, "end": 2028.73, "word": " Now,", "probability": 0.74755859375}, {"start": 2029.03, "end": 2029.33, "word": " Y", "probability": 0.5537109375}, {"start": 2029.33, "end": 2030.49, "word": " is", "probability": 0.9296875}, {"start": 2030.49, "end": 2030.71, "word": " your", "probability": 0.8056640625}, {"start": 2030.71, "end": 2031.01, "word": " dependent", "probability": 0.86669921875}, {"start": 2031.01, "end": 2031.53, "word": " variable.", "probability": 0.90869140625}, {"start": 2033.63, "end": 2034.09, "word": " Beta", "probability": 0.849609375}, {"start": 2034.09, "end": 2034.51, "word": " 0", "probability": 0.3642578125}, {"start": 2034.51, "end": 2034.83, "word": " is", "probability": 0.9140625}, {"start": 2034.83, "end": 2035.29, "word": " population", "probability": 0.69775390625}, {"start": 2035.29, "end": 2035.53, "word": " Y", "probability": 0.8212890625}, {"start": 2035.53, "end": 2036.01, "word": " intercept.", "probability": 0.67138671875}, {"start": 2036.71, "end": 2036.83, "word": " For", "probability": 0.9208984375}, {"start": 2036.83, "end": 2037.19, "word": " example,", "probability": 0.96923828125}, {"start": 2037.35, "end": 2037.57, "word": " if", "probability": 0.77734375}, {"start": 2037.57, "end": 2039.61, "word": " we", "probability": 0.84619140625}, {"start": 2039.61, "end": 2039.81, "word": " have", "probability": 0.92822265625}, {"start": 2039.81, "end": 2040.05, "word": " this", "probability": 0.9248046875}, {"start": 2040.05, "end": 2040.47, "word": " scatter", "probability": 0.90625}, {"start": 2040.47, "end": 2040.73, "word": " plot.", "probability": 0.578125}, {"start": 2044.01, "end": 2044.65, "word": " Now,", "probability": 0.90771484375}, {"start": 2044.75, "end": 2044.97, "word": " beta", "probability": 0.72265625}, {"start": 2044.97, "end": 2045.49, "word": " 0", "probability": 0.8896484375}, {"start": 2045.49, "end": 2050.19, "word": " is", "probability": 0.89208984375}, {"start": 2050.19, "end": 2050.41, "word": " this", "probability": 0.943359375}, {"start": 2050.41, "end": 2050.71, "word": " one.", "probability": 0.89990234375}, {"start": 2051.61, "end": 2051.97, "word": " So", "probability": 0.85205078125}, {"start": 2051.97, "end": 2052.13, "word": " this", "probability": 0.56640625}, {"start": 2052.13, "end": 2052.27, "word": " is", "probability": 0.89697265625}, {"start": 2052.27, "end": 2052.41, "word": " your", "probability": 0.83447265625}, {"start": 2052.41, "end": 2052.65, "word": " beta", "probability": 0.923828125}, {"start": 2052.65, "end": 2052.99, "word": " 0.", "probability": 0.92529296875}, {"start": 2054.37, "end": 2054.73, "word": " So", "probability": 0.9169921875}, {"start": 2054.73, "end": 2054.97, "word": " this", "probability": 0.8203125}, {"start": 2054.97, "end": 2055.37, "word": " segment", "probability": 0.9384765625}, {"start": 2055.37, "end": 2056.85, "word": " is", "probability": 0.9453125}, {"start": 2056.85, "end": 2057.11, "word": " beta", "probability": 0.92138671875}, {"start": 2057.11, "end": 2057.43, "word": " 0.", "probability": 0.9794921875}], "temperature": 1.0}, {"id": 78, "seek": 208385, "start": 2058.21, "end": 2083.85, "text": " it could be above the x-axis I mean beta zero could be positive might be negative now this beta zero fall below the x-axis so beta zero could be negative or", "tokens": [309, 727, 312, 3673, 264, 2031, 12, 24633, 286, 914, 9861, 4018, 727, 312, 3353, 1062, 312, 3671, 586, 341, 9861, 4018, 2100, 2507, 264, 2031, 12, 24633, 370, 9861, 4018, 727, 312, 3671, 420], "avg_logprob": -0.2521701446837849, "compression_ratio": 1.6185567010309279, "no_speech_prob": 0.0, "words": [{"start": 2058.21, "end": 2058.47, "word": " it", "probability": 0.284912109375}, {"start": 2058.47, "end": 2058.65, "word": " could", "probability": 0.8369140625}, {"start": 2058.65, "end": 2058.83, "word": " be", "probability": 0.94140625}, {"start": 2058.83, "end": 2059.23, "word": " above", "probability": 0.90869140625}, {"start": 2059.23, "end": 2060.57, "word": " the", "probability": 0.68359375}, {"start": 2060.57, "end": 2060.79, "word": " x", "probability": 0.802734375}, {"start": 2060.79, "end": 2061.13, "word": "-axis", "probability": 0.78076171875}, {"start": 2061.13, "end": 2061.35, "word": " I", "probability": 0.346435546875}, {"start": 2061.35, "end": 2061.55, "word": " mean", "probability": 0.96484375}, {"start": 2061.55, "end": 2062.29, "word": " beta", "probability": 0.48583984375}, {"start": 2062.29, "end": 2062.65, "word": " zero", "probability": 0.58203125}, {"start": 2062.65, "end": 2063.39, "word": " could", "probability": 0.40478515625}, {"start": 2063.39, "end": 2063.55, "word": " be", "probability": 0.947265625}, {"start": 2063.55, "end": 2063.95, "word": " positive", "probability": 0.89794921875}, {"start": 2063.95, "end": 2066.87, "word": " might", "probability": 0.75}, {"start": 2066.87, "end": 2067.11, "word": " be", "probability": 0.9453125}, {"start": 2067.11, "end": 2067.75, "word": " negative", "probability": 0.89697265625}, {"start": 2067.75, "end": 2074.89, "word": " now", "probability": 0.69189453125}, {"start": 2074.89, "end": 2075.13, "word": " this", "probability": 0.93701171875}, {"start": 2075.13, "end": 2075.37, "word": " beta", "probability": 0.7333984375}, {"start": 2075.37, "end": 2075.69, "word": " zero", "probability": 0.8662109375}, {"start": 2075.69, "end": 2076.67, "word": " fall", "probability": 0.61669921875}, {"start": 2076.67, "end": 2076.99, "word": " below", "probability": 0.912109375}, {"start": 2076.99, "end": 2077.23, "word": " the", "probability": 0.912109375}, {"start": 2077.23, "end": 2078.27, "word": " x", "probability": 0.99169921875}, {"start": 2078.27, "end": 2078.69, "word": "-axis", "probability": 0.91748046875}, {"start": 2078.69, "end": 2079.47, "word": " so", "probability": 0.875}, {"start": 2079.47, "end": 2079.87, "word": " beta", "probability": 0.89453125}, {"start": 2079.87, "end": 2080.27, "word": " zero", "probability": 0.8857421875}, {"start": 2080.27, "end": 2080.81, "word": " could", "probability": 0.88818359375}, {"start": 2080.81, "end": 2080.97, "word": " be", "probability": 0.95556640625}, {"start": 2080.97, "end": 2081.39, "word": " negative", "probability": 0.955078125}, {"start": 2081.39, "end": 2083.85, "word": " or", "probability": 0.9375}], "temperature": 1.0}, {"id": 79, "seek": 210596, "start": 2086.49, "end": 2105.97, "text": " Maybe the straight line passes through the origin point. So in this case, beta zero equals zero. So it could be positive and negative or equal zero, but still we have positive relationship. That means", "tokens": [2704, 264, 2997, 1622, 11335, 807, 264, 4957, 935, 13, 407, 294, 341, 1389, 11, 9861, 4018, 6915, 4018, 13, 407, 309, 727, 312, 3353, 293, 3671, 420, 2681, 4018, 11, 457, 920, 321, 362, 3353, 2480, 13, 663, 1355], "avg_logprob": -0.26009908536585363, "compression_ratio": 1.4565217391304348, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2086.49, "end": 2086.95, "word": " Maybe", "probability": 0.468994140625}, {"start": 2086.95, "end": 2087.23, "word": " the", "probability": 0.662109375}, {"start": 2087.23, "end": 2087.53, "word": " straight", "probability": 0.90234375}, {"start": 2087.53, "end": 2087.89, "word": " line", "probability": 0.8984375}, {"start": 2087.89, "end": 2088.37, "word": " passes", "probability": 0.7978515625}, {"start": 2088.37, "end": 2088.79, "word": " through", "probability": 0.88232421875}, {"start": 2088.79, "end": 2088.99, "word": " the", "probability": 0.876953125}, {"start": 2088.99, "end": 2089.35, "word": " origin", "probability": 0.91796875}, {"start": 2089.35, "end": 2089.73, "word": " point.", "probability": 0.96728515625}, {"start": 2090.81, "end": 2090.91, "word": " So", "probability": 0.8486328125}, {"start": 2090.91, "end": 2091.03, "word": " in", "probability": 0.71728515625}, {"start": 2091.03, "end": 2091.23, "word": " this", "probability": 0.95166015625}, {"start": 2091.23, "end": 2091.81, "word": " case,", "probability": 0.92236328125}, {"start": 2093.03, "end": 2093.73, "word": " beta", "probability": 0.57763671875}, {"start": 2093.73, "end": 2094.17, "word": " zero", "probability": 0.46044921875}, {"start": 2094.17, "end": 2095.13, "word": " equals", "probability": 0.85498046875}, {"start": 2095.13, "end": 2095.53, "word": " zero.", "probability": 0.89208984375}, {"start": 2096.33, "end": 2096.99, "word": " So", "probability": 0.89892578125}, {"start": 2096.99, "end": 2097.09, "word": " it", "probability": 0.8916015625}, {"start": 2097.09, "end": 2097.25, "word": " could", "probability": 0.87109375}, {"start": 2097.25, "end": 2097.37, "word": " be", "probability": 0.94482421875}, {"start": 2097.37, "end": 2097.77, "word": " positive", "probability": 0.90087890625}, {"start": 2097.77, "end": 2098.05, "word": " and", "probability": 0.5703125}, {"start": 2098.05, "end": 2098.41, "word": " negative", "probability": 0.939453125}, {"start": 2098.41, "end": 2099.07, "word": " or", "probability": 0.63232421875}, {"start": 2099.07, "end": 2099.43, "word": " equal", "probability": 0.62255859375}, {"start": 2099.43, "end": 2099.89, "word": " zero,", "probability": 0.70361328125}, {"start": 2100.43, "end": 2100.59, "word": " but", "probability": 0.92529296875}, {"start": 2100.59, "end": 2100.99, "word": " still", "probability": 0.9462890625}, {"start": 2100.99, "end": 2101.25, "word": " we", "probability": 0.81640625}, {"start": 2101.25, "end": 2101.59, "word": " have", "probability": 0.9501953125}, {"start": 2101.59, "end": 2102.61, "word": " positive", "probability": 0.751953125}, {"start": 2102.61, "end": 2103.49, "word": " relationship.", "probability": 0.9267578125}, {"start": 2105.07, "end": 2105.51, "word": " That", "probability": 0.8828125}, {"start": 2105.51, "end": 2105.97, "word": " means", "probability": 0.935546875}], "temperature": 1.0}, {"id": 80, "seek": 212589, "start": 2107.23, "end": 2125.89, "text": " The value of beta zero, the sign of beta zero does not affect the relationship between Y and X. Because here in the three cases, there exists positive relationship, but beta zero could be positive or negative or equal zero, but still we have", "tokens": [440, 2158, 295, 9861, 4018, 11, 264, 1465, 295, 9861, 4018, 775, 406, 3345, 264, 2480, 1296, 398, 293, 1783, 13, 1436, 510, 294, 264, 1045, 3331, 11, 456, 8198, 3353, 2480, 11, 457, 9861, 4018, 727, 312, 3353, 420, 3671, 420, 2681, 4018, 11, 457, 920, 321, 362], "avg_logprob": -0.2657812583446503, "compression_ratio": 1.6133333333333333, "no_speech_prob": 2.5033950805664062e-06, "words": [{"start": 2107.23, "end": 2107.81, "word": " The", "probability": 0.37060546875}, {"start": 2107.81, "end": 2108.37, "word": " value", "probability": 0.95703125}, {"start": 2108.37, "end": 2108.57, "word": " of", "probability": 0.9375}, {"start": 2108.57, "end": 2108.79, "word": " beta", "probability": 0.5517578125}, {"start": 2108.79, "end": 2109.09, "word": " zero,", "probability": 0.5732421875}, {"start": 2109.23, "end": 2109.35, "word": " the", "probability": 0.76416015625}, {"start": 2109.35, "end": 2109.63, "word": " sign", "probability": 0.465576171875}, {"start": 2109.63, "end": 2109.77, "word": " of", "probability": 0.962890625}, {"start": 2109.77, "end": 2109.97, "word": " beta", "probability": 0.8935546875}, {"start": 2109.97, "end": 2110.25, "word": " zero", "probability": 0.85986328125}, {"start": 2110.25, "end": 2110.49, "word": " does", "probability": 0.60302734375}, {"start": 2110.49, "end": 2110.71, "word": " not", "probability": 0.95166015625}, {"start": 2110.71, "end": 2111.23, "word": " affect", "probability": 0.86328125}, {"start": 2111.23, "end": 2112.27, "word": " the", "probability": 0.7236328125}, {"start": 2112.27, "end": 2112.77, "word": " relationship", "probability": 0.8857421875}, {"start": 2112.77, "end": 2113.13, "word": " between", "probability": 0.9013671875}, {"start": 2113.13, "end": 2113.31, "word": " Y", "probability": 0.66796875}, {"start": 2113.31, "end": 2113.51, "word": " and", "probability": 0.94384765625}, {"start": 2113.51, "end": 2113.87, "word": " X.", "probability": 0.99462890625}, {"start": 2114.49, "end": 2114.85, "word": " Because", "probability": 0.87353515625}, {"start": 2114.85, "end": 2115.15, "word": " here", "probability": 0.7958984375}, {"start": 2115.15, "end": 2115.57, "word": " in", "probability": 0.67626953125}, {"start": 2115.57, "end": 2115.79, "word": " the", "probability": 0.88720703125}, {"start": 2115.79, "end": 2116.05, "word": " three", "probability": 0.89013671875}, {"start": 2116.05, "end": 2116.69, "word": " cases,", "probability": 0.916015625}, {"start": 2117.59, "end": 2117.85, "word": " there", "probability": 0.89599609375}, {"start": 2117.85, "end": 2118.39, "word": " exists", "probability": 0.6953125}, {"start": 2118.39, "end": 2119.63, "word": " positive", "probability": 0.83203125}, {"start": 2119.63, "end": 2120.33, "word": " relationship,", "probability": 0.90478515625}, {"start": 2121.07, "end": 2121.23, "word": " but", "probability": 0.90966796875}, {"start": 2121.23, "end": 2121.69, "word": " beta", "probability": 0.8857421875}, {"start": 2121.69, "end": 2122.19, "word": " zero", "probability": 0.8623046875}, {"start": 2122.19, "end": 2122.39, "word": " could", "probability": 0.830078125}, {"start": 2122.39, "end": 2122.51, "word": " be", "probability": 0.94482421875}, {"start": 2122.51, "end": 2122.87, "word": " positive", "probability": 0.9111328125}, {"start": 2122.87, "end": 2123.17, "word": " or", "probability": 0.82421875}, {"start": 2123.17, "end": 2123.53, "word": " negative", "probability": 0.9296875}, {"start": 2123.53, "end": 2123.87, "word": " or", "probability": 0.88525390625}, {"start": 2123.87, "end": 2124.19, "word": " equal", "probability": 0.72119140625}, {"start": 2124.19, "end": 2124.57, "word": " zero,", "probability": 0.78857421875}, {"start": 2124.85, "end": 2125.05, "word": " but", "probability": 0.92138671875}, {"start": 2125.05, "end": 2125.37, "word": " still", "probability": 0.955078125}, {"start": 2125.37, "end": 2125.55, "word": " we", "probability": 0.8583984375}, {"start": 2125.55, "end": 2125.89, "word": " have", "probability": 0.94921875}], "temperature": 1.0}, {"id": 81, "seek": 214754, "start": 2127.58, "end": 2147.54, "text": " positive relationship. I mean, you cannot determine by looking at beta 0, you cannot determine if there is a positive or negative relationship. The other term is beta 1. Beta 1 is the population slope coefficient. Now, the sign of the slope", "tokens": [3353, 2480, 13, 286, 914, 11, 291, 2644, 6997, 538, 1237, 412, 9861, 1958, 11, 291, 2644, 6997, 498, 456, 307, 257, 3353, 420, 3671, 2480, 13, 440, 661, 1433, 307, 9861, 502, 13, 33286, 502, 307, 264, 4415, 13525, 17619, 13, 823, 11, 264, 1465, 295, 264, 13525], "avg_logprob": -0.2371874964237213, "compression_ratio": 1.6283783783783783, "no_speech_prob": 0.0, "words": [{"start": 2127.5800000000004, "end": 2128.2200000000003, "word": " positive", "probability": 0.1988525390625}, {"start": 2128.2200000000003, "end": 2128.86, "word": " relationship.", "probability": 0.78076171875}, {"start": 2129.7, "end": 2129.92, "word": " I", "probability": 0.82177734375}, {"start": 2129.92, "end": 2130.12, "word": " mean,", "probability": 0.96435546875}, {"start": 2130.22, "end": 2130.74, "word": " you", "probability": 0.457275390625}, {"start": 2130.74, "end": 2131.72, "word": " cannot", "probability": 0.84423828125}, {"start": 2131.72, "end": 2132.28, "word": " determine", "probability": 0.92333984375}, {"start": 2132.28, "end": 2133.24, "word": " by", "probability": 0.837890625}, {"start": 2133.24, "end": 2133.62, "word": " looking", "probability": 0.890625}, {"start": 2133.62, "end": 2133.86, "word": " at", "probability": 0.96435546875}, {"start": 2133.86, "end": 2134.16, "word": " beta", "probability": 0.64404296875}, {"start": 2134.16, "end": 2134.48, "word": " 0,", "probability": 0.419677734375}, {"start": 2134.62, "end": 2134.76, "word": " you", "probability": 0.9091796875}, {"start": 2134.76, "end": 2135.06, "word": " cannot", "probability": 0.8798828125}, {"start": 2135.06, "end": 2135.72, "word": " determine", "probability": 0.91650390625}, {"start": 2135.72, "end": 2136.38, "word": " if", "probability": 0.9228515625}, {"start": 2136.38, "end": 2136.58, "word": " there", "probability": 0.888671875}, {"start": 2136.58, "end": 2136.74, "word": " is", "probability": 0.93798828125}, {"start": 2136.74, "end": 2136.84, "word": " a", "probability": 0.73779296875}, {"start": 2136.84, "end": 2137.18, "word": " positive", "probability": 0.9169921875}, {"start": 2137.18, "end": 2137.6, "word": " or", "probability": 0.9541015625}, {"start": 2137.6, "end": 2137.94, "word": " negative", "probability": 0.919921875}, {"start": 2137.94, "end": 2138.48, "word": " relationship.", "probability": 0.8525390625}, {"start": 2139.6, "end": 2139.86, "word": " The", "probability": 0.87841796875}, {"start": 2139.86, "end": 2140.1, "word": " other", "probability": 0.8916015625}, {"start": 2140.1, "end": 2140.48, "word": " term", "probability": 0.91455078125}, {"start": 2140.48, "end": 2140.68, "word": " is", "probability": 0.94287109375}, {"start": 2140.68, "end": 2140.9, "word": " beta", "probability": 0.818359375}, {"start": 2140.9, "end": 2141.1, "word": " 1.", "probability": 0.87451171875}, {"start": 2141.18, "end": 2141.34, "word": " Beta", "probability": 0.9130859375}, {"start": 2141.34, "end": 2141.58, "word": " 1", "probability": 0.93798828125}, {"start": 2141.58, "end": 2141.72, "word": " is", "probability": 0.94384765625}, {"start": 2141.72, "end": 2141.82, "word": " the", "probability": 0.61474609375}, {"start": 2141.82, "end": 2142.28, "word": " population", "probability": 0.94873046875}, {"start": 2142.28, "end": 2142.82, "word": " slope", "probability": 0.67822265625}, {"start": 2142.82, "end": 2143.32, "word": " coefficient.", "probability": 0.935546875}, {"start": 2144.68, "end": 2145.1, "word": " Now,", "probability": 0.943359375}, {"start": 2146.06, "end": 2146.3, "word": " the", "probability": 0.9208984375}, {"start": 2146.3, "end": 2146.7, "word": " sign", "probability": 0.7890625}, {"start": 2146.7, "end": 2146.9, "word": " of", "probability": 0.96728515625}, {"start": 2146.9, "end": 2147.16, "word": " the", "probability": 0.81396484375}, {"start": 2147.16, "end": 2147.54, "word": " slope", "probability": 0.84814453125}], "temperature": 1.0}, {"id": 82, "seek": 216531, "start": 2148.39, "end": 2165.31, "text": " determines the direction of the relationship. That means if the slope has positive sign, it means there exists positive relationship. Otherwise if it is negative, then there is negative relationship between the two variables. So the sign of the slope determines the direction.", "tokens": [24799, 264, 3513, 295, 264, 2480, 13, 663, 1355, 498, 264, 13525, 575, 3353, 1465, 11, 309, 1355, 456, 8198, 3353, 2480, 13, 10328, 498, 309, 307, 3671, 11, 550, 456, 307, 3671, 2480, 1296, 264, 732, 9102, 13, 407, 264, 1465, 295, 264, 13525, 24799, 264, 3513, 13], "avg_logprob": -0.19843750029802323, "compression_ratio": 1.8972602739726028, "no_speech_prob": 0.0, "words": [{"start": 2148.39, "end": 2148.83, "word": " determines", "probability": 0.3837890625}, {"start": 2148.83, "end": 2149.15, "word": " the", "probability": 0.90771484375}, {"start": 2149.15, "end": 2149.59, "word": " direction", "probability": 0.966796875}, {"start": 2149.59, "end": 2149.87, "word": " of", "probability": 0.9619140625}, {"start": 2149.87, "end": 2150.01, "word": " the", "probability": 0.8857421875}, {"start": 2150.01, "end": 2150.45, "word": " relationship.", "probability": 0.8818359375}, {"start": 2150.81, "end": 2150.99, "word": " That", "probability": 0.7998046875}, {"start": 2150.99, "end": 2151.47, "word": " means", "probability": 0.9287109375}, {"start": 2151.47, "end": 2152.51, "word": " if", "probability": 0.7197265625}, {"start": 2152.51, "end": 2152.91, "word": " the", "probability": 0.896484375}, {"start": 2152.91, "end": 2153.23, "word": " slope", "probability": 0.798828125}, {"start": 2153.23, "end": 2153.63, "word": " has", "probability": 0.94580078125}, {"start": 2153.63, "end": 2154.09, "word": " positive", "probability": 0.81298828125}, {"start": 2154.09, "end": 2154.49, "word": " sign,", "probability": 0.87255859375}, {"start": 2154.65, "end": 2154.71, "word": " it", "probability": 0.88427734375}, {"start": 2154.71, "end": 2154.91, "word": " means", "probability": 0.931640625}, {"start": 2154.91, "end": 2155.09, "word": " there", "probability": 0.8095703125}, {"start": 2155.09, "end": 2155.39, "word": " exists", "probability": 0.64501953125}, {"start": 2155.39, "end": 2155.87, "word": " positive", "probability": 0.88818359375}, {"start": 2155.87, "end": 2156.57, "word": " relationship.", "probability": 0.91162109375}, {"start": 2157.33, "end": 2157.69, "word": " Otherwise", "probability": 0.91650390625}, {"start": 2157.69, "end": 2158.01, "word": " if", "probability": 0.39208984375}, {"start": 2158.01, "end": 2158.17, "word": " it", "probability": 0.80322265625}, {"start": 2158.17, "end": 2158.17, "word": " is", "probability": 0.59765625}, {"start": 2158.17, "end": 2158.53, "word": " negative,", "probability": 0.94921875}, {"start": 2158.77, "end": 2158.99, "word": " then", "probability": 0.82373046875}, {"start": 2158.99, "end": 2159.13, "word": " there", "probability": 0.90771484375}, {"start": 2159.13, "end": 2159.37, "word": " is", "probability": 0.931640625}, {"start": 2159.37, "end": 2159.99, "word": " negative", "probability": 0.8515625}, {"start": 2159.99, "end": 2160.53, "word": " relationship", "probability": 0.91796875}, {"start": 2160.53, "end": 2160.83, "word": " between", "probability": 0.87841796875}, {"start": 2160.83, "end": 2160.97, "word": " the", "probability": 0.8955078125}, {"start": 2160.97, "end": 2161.11, "word": " two", "probability": 0.89306640625}, {"start": 2161.11, "end": 2161.39, "word": " variables.", "probability": 0.6982421875}, {"start": 2162.13, "end": 2162.31, "word": " So", "probability": 0.94140625}, {"start": 2162.31, "end": 2162.51, "word": " the", "probability": 0.7607421875}, {"start": 2162.51, "end": 2162.77, "word": " sign", "probability": 0.90380859375}, {"start": 2162.77, "end": 2162.89, "word": " of", "probability": 0.96435546875}, {"start": 2162.89, "end": 2163.03, "word": " the", "probability": 0.90478515625}, {"start": 2163.03, "end": 2163.33, "word": " slope", "probability": 0.86279296875}, {"start": 2163.33, "end": 2164.11, "word": " determines", "probability": 0.94873046875}, {"start": 2164.11, "end": 2164.61, "word": " the", "probability": 0.91552734375}, {"start": 2164.61, "end": 2165.31, "word": " direction.", "probability": 0.96826171875}], "temperature": 1.0}, {"id": 83, "seek": 218931, "start": 2166.09, "end": 2189.31, "text": " But the sign of beta zero has no meaning about the relationship between Y and X. X is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your independent variable, Y is your", "tokens": [583, 264, 1465, 295, 9861, 4018, 575, 572, 3620, 466, 264, 2480, 1296, 398, 293, 1783, 13, 1783, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428, 6695, 7006, 11, 398, 307, 428], "avg_logprob": -0.11736111217074924, "compression_ratio": 10.81651376146789, "no_speech_prob": 0.0, "words": [{"start": 2166.09, "end": 2166.47, "word": " But", "probability": 0.73046875}, {"start": 2166.47, "end": 2166.77, "word": " the", "probability": 0.81298828125}, {"start": 2166.77, "end": 2167.13, "word": " sign", "probability": 0.78125}, {"start": 2167.13, "end": 2168.13, "word": " of", "probability": 0.890625}, {"start": 2168.13, "end": 2168.41, "word": " beta", "probability": 0.59423828125}, {"start": 2168.41, "end": 2168.71, "word": " zero", "probability": 0.56298828125}, {"start": 2168.71, "end": 2169.05, "word": " has", "probability": 0.9287109375}, {"start": 2169.05, "end": 2169.29, "word": " no", "probability": 0.9453125}, {"start": 2169.29, "end": 2169.59, "word": " meaning", "probability": 0.85498046875}, {"start": 2169.59, "end": 2170.15, "word": " about", "probability": 0.857421875}, {"start": 2170.15, "end": 2171.29, "word": " the", "probability": 0.8056640625}, {"start": 2171.29, "end": 2171.99, "word": " relationship", "probability": 0.90576171875}, {"start": 2171.99, "end": 2172.59, "word": " between", "probability": 0.91064453125}, {"start": 2172.59, "end": 2173.39, "word": " Y", "probability": 0.59765625}, {"start": 2173.39, "end": 2173.59, "word": " and", "probability": 0.931640625}, {"start": 2173.59, "end": 2173.89, "word": " X.", "probability": 0.9931640625}, {"start": 2174.53, "end": 2174.85, "word": " X", "probability": 0.96484375}, {"start": 2174.85, "end": 2175.27, "word": " is", "probability": 0.94921875}, {"start": 2175.27, "end": 2175.47, "word": " your", "probability": 0.78759765625}, {"start": 2175.47, "end": 2175.85, "word": " independent", "probability": 0.86669921875}, {"start": 2175.85, "end": 2176.31, "word": " variable,", "probability": 0.95263671875}, {"start": 2177.49, "end": 2177.73, "word": " Y", "probability": 0.625}, {"start": 2177.73, "end": 2178.55, "word": " is", "probability": 0.6484375}, {"start": 2178.55, "end": 2178.77, "word": " your", "probability": 0.281005859375}, {"start": 2178.77, "end": 2179.63, "word": " independent", "probability": 0.44580078125}, {"start": 2179.63, "end": 2179.63, "word": " variable,", "probability": 0.92138671875}, {"start": 2179.63, "end": 2179.63, "word": " Y", "probability": 0.72119140625}, {"start": 2179.63, "end": 2179.63, "word": " is", "probability": 0.78173828125}, {"start": 2179.63, "end": 2179.63, "word": " your", "probability": 0.7119140625}, {"start": 2179.63, "end": 2179.63, "word": " independent", "probability": 0.84228515625}, {"start": 2179.63, "end": 2179.63, "word": " variable,", "probability": 0.9345703125}, {"start": 2179.63, "end": 2179.63, "word": " Y", "probability": 0.80712890625}, {"start": 2179.63, "end": 2179.63, "word": " is", "probability": 0.87646484375}, {"start": 2179.63, "end": 2179.63, "word": " your", "probability": 0.82421875}, {"start": 2179.63, "end": 2179.63, "word": " independent", "probability": 0.900390625}, {"start": 2179.63, "end": 2179.63, "word": " variable,", "probability": 0.9443359375}, {"start": 2179.65, "end": 2179.65, "word": " Y", "probability": 0.87548828125}, {"start": 2179.65, "end": 2179.65, "word": " is", "probability": 0.9072265625}, {"start": 2179.65, "end": 2179.65, "word": " your", "probability": 0.8623046875}, {"start": 2179.65, "end": 2179.65, "word": " independent", "probability": 0.919921875}, {"start": 2179.65, "end": 2179.65, "word": " variable,", "probability": 0.9501953125}, {"start": 2180.21, "end": 2181.25, "word": " Y", "probability": 0.8583984375}, {"start": 2181.25, "end": 2181.25, "word": " is", "probability": 0.92236328125}, {"start": 2181.25, "end": 2181.25, "word": " your", "probability": 0.87451171875}, {"start": 2181.25, "end": 2181.25, "word": " independent", "probability": 0.92578125}, {"start": 2181.25, "end": 2181.25, "word": " variable,", "probability": 0.95458984375}, {"start": 2181.25, "end": 2181.25, "word": " Y", "probability": 0.8291015625}, {"start": 2181.25, "end": 2181.25, "word": " is", "probability": 0.9306640625}, {"start": 2181.25, "end": 2181.25, "word": " your", "probability": 0.87939453125}, {"start": 2181.25, "end": 2181.25, "word": " independent", "probability": 0.927734375}, {"start": 2181.25, "end": 2181.25, "word": " variable,", "probability": 0.9580078125}, {"start": 2181.25, "end": 2181.25, "word": " Y", "probability": 0.82421875}, {"start": 2181.25, "end": 2181.25, "word": " is", "probability": 0.93798828125}, {"start": 2181.25, "end": 2181.25, "word": " your", "probability": 0.8837890625}, {"start": 2181.25, "end": 2181.25, "word": " independent", "probability": 0.92822265625}, {"start": 2181.25, "end": 2181.25, "word": " variable,", "probability": 0.96044921875}, {"start": 2181.25, "end": 2181.25, "word": " Y", "probability": 0.83544921875}, {"start": 2181.25, "end": 2181.25, "word": " is", "probability": 0.9423828125}, {"start": 2181.25, "end": 2181.25, "word": " your", "probability": 0.88720703125}, {"start": 2181.25, "end": 2181.25, "word": " independent", "probability": 0.92919921875}, {"start": 2181.25, "end": 2181.25, "word": " variable,", "probability": 0.96142578125}, {"start": 2181.25, "end": 2181.25, "word": " Y", "probability": 0.8583984375}, {"start": 2181.25, "end": 2181.25, "word": " is", "probability": 0.94580078125}, {"start": 2181.25, "end": 2181.25, "word": " your", "probability": 0.8896484375}, {"start": 2181.25, "end": 2181.25, "word": " independent", "probability": 0.9296875}, {"start": 2181.25, "end": 2181.25, "word": " variable,", "probability": 0.96337890625}, {"start": 2181.25, "end": 2181.25, "word": " Y", "probability": 0.8779296875}, {"start": 2181.25, "end": 2181.25, "word": " is", "probability": 0.94775390625}, {"start": 2181.25, "end": 2181.25, "word": " your", "probability": 0.8896484375}, {"start": 2181.25, "end": 2181.25, "word": " independent", "probability": 0.9296875}, {"start": 2181.25, "end": 2181.25, "word": " variable,", "probability": 0.96435546875}, {"start": 2181.25, "end": 2181.29, "word": " Y", "probability": 0.89794921875}, {"start": 2181.29, "end": 2181.83, "word": " is", "probability": 0.94873046875}, {"start": 2181.83, "end": 2181.85, "word": " your", "probability": 0.890625}, {"start": 2181.85, "end": 2183.43, "word": " independent", "probability": 0.931640625}, {"start": 2183.43, "end": 2184.37, "word": " variable,", "probability": 0.96533203125}, {"start": 2184.37, "end": 2184.37, "word": " Y", "probability": 0.91357421875}, {"start": 2184.37, "end": 2184.37, "word": " is", "probability": 0.9501953125}, {"start": 2184.37, "end": 2184.37, "word": " your", "probability": 0.892578125}, {"start": 2184.37, "end": 2184.37, "word": " independent", "probability": 0.93212890625}, {"start": 2184.37, "end": 2184.37, "word": " variable,", "probability": 0.96533203125}, {"start": 2184.37, "end": 2184.37, "word": " Y", "probability": 0.9267578125}, {"start": 2184.37, "end": 2184.37, "word": " is", "probability": 0.9501953125}, {"start": 2184.37, "end": 2184.37, "word": " your", "probability": 0.8916015625}, {"start": 2184.37, "end": 2184.37, "word": " independent", "probability": 0.93408203125}, {"start": 2184.37, "end": 2184.37, "word": " variable,", "probability": 0.9658203125}, {"start": 2184.37, "end": 2184.37, "word": " Y", "probability": 0.93701171875}, {"start": 2184.37, "end": 2184.37, "word": " is", "probability": 0.95068359375}, {"start": 2184.37, "end": 2184.37, "word": " your", "probability": 0.8935546875}, {"start": 2184.37, "end": 2184.37, "word": " independent", "probability": 0.93408203125}, {"start": 2184.37, "end": 2184.37, "word": " variable,", "probability": 0.9658203125}, {"start": 2184.37, "end": 2184.37, "word": " Y", "probability": 0.9453125}, {"start": 2184.37, "end": 2184.37, "word": " is", "probability": 0.9501953125}, {"start": 2184.37, "end": 2184.37, "word": " your", "probability": 0.892578125}, {"start": 2184.37, "end": 2184.37, "word": " independent", "probability": 0.93603515625}, {"start": 2184.37, "end": 2184.37, "word": " variable,", "probability": 0.9658203125}, {"start": 2184.37, "end": 2184.37, "word": " Y", "probability": 0.94921875}, {"start": 2184.37, "end": 2184.37, "word": " is", "probability": 0.95166015625}, {"start": 2184.37, "end": 2184.37, "word": " your", "probability": 0.89453125}, {"start": 2184.37, "end": 2184.37, "word": " independent", "probability": 0.9375}, {"start": 2184.37, "end": 2184.37, "word": " variable,", "probability": 0.96630859375}, {"start": 2184.37, "end": 2184.37, "word": " Y", "probability": 0.955078125}, {"start": 2184.37, "end": 2184.37, "word": " is", "probability": 0.95068359375}, {"start": 2184.37, "end": 2184.37, "word": " your", "probability": 0.89453125}, {"start": 2184.37, "end": 2184.37, "word": " independent", "probability": 0.9384765625}, {"start": 2184.37, "end": 2184.37, "word": " variable,", "probability": 0.966796875}, {"start": 2184.37, "end": 2184.37, "word": " Y", "probability": 0.9580078125}, {"start": 2184.37, "end": 2184.37, "word": " is", "probability": 0.95068359375}, {"start": 2184.37, "end": 2184.37, "word": " your", "probability": 0.892578125}, {"start": 2184.37, "end": 2184.37, "word": " independent", "probability": 0.9384765625}, {"start": 2184.37, "end": 2184.37, "word": " variable,", "probability": 0.966796875}, {"start": 2184.37, "end": 2184.37, "word": " Y", "probability": 0.9609375}, {"start": 2184.37, "end": 2184.37, "word": " is", "probability": 0.95068359375}, {"start": 2184.37, "end": 2184.37, "word": " your", "probability": 0.8955078125}, {"start": 2184.37, "end": 2184.37, "word": " independent", "probability": 0.93896484375}, {"start": 2184.37, "end": 2184.37, "word": " variable,", "probability": 0.966796875}, {"start": 2184.37, "end": 2184.37, "word": " Y", "probability": 0.96337890625}, {"start": 2184.37, "end": 2184.37, "word": " is", "probability": 0.9501953125}, {"start": 2184.37, "end": 2184.37, "word": " your", "probability": 0.89599609375}, {"start": 2184.37, "end": 2184.37, "word": " independent", "probability": 0.93896484375}, {"start": 2184.37, "end": 2184.37, "word": " variable,", "probability": 0.9677734375}, {"start": 2184.37, "end": 2184.37, "word": " Y", "probability": 0.9658203125}, {"start": 2184.37, "end": 2184.37, "word": " is", "probability": 0.95068359375}, {"start": 2184.37, "end": 2184.37, "word": " your", "probability": 0.896484375}, {"start": 2184.37, "end": 2184.37, "word": " independent", "probability": 0.9404296875}, {"start": 2184.37, "end": 2184.37, "word": " variable,", "probability": 0.96728515625}, {"start": 2184.37, "end": 2184.37, "word": " Y", "probability": 0.9677734375}, {"start": 2184.37, "end": 2184.37, "word": " is", "probability": 0.94970703125}, {"start": 2184.37, "end": 2184.37, "word": " your", "probability": 0.896484375}, {"start": 2184.37, "end": 2184.37, "word": " independent", "probability": 0.93994140625}, {"start": 2184.37, "end": 2184.37, "word": " variable,", "probability": 0.96826171875}, {"start": 2184.37, "end": 2184.37, "word": " Y", "probability": 0.96875}, {"start": 2184.37, "end": 2184.37, "word": " is", "probability": 0.94970703125}, {"start": 2184.37, "end": 2184.37, "word": " your", "probability": 0.89697265625}, {"start": 2184.37, "end": 2184.37, "word": " independent", "probability": 0.9404296875}, {"start": 2184.37, "end": 2184.37, "word": " variable,", "probability": 0.96875}, {"start": 2184.37, "end": 2184.37, "word": " Y", "probability": 0.970703125}, {"start": 2184.37, "end": 2184.37, "word": " is", "probability": 0.94921875}, {"start": 2184.37, "end": 2184.37, "word": " your", "probability": 0.8955078125}, {"start": 2184.37, "end": 2184.37, "word": " independent", "probability": 0.93994140625}, {"start": 2184.37, "end": 2184.37, "word": " variable,", "probability": 0.96875}, {"start": 2184.37, "end": 2184.37, "word": " Y", "probability": 0.97265625}, {"start": 2184.37, "end": 2184.37, "word": " is", "probability": 0.94775390625}, {"start": 2184.37, "end": 2184.37, "word": " your", "probability": 0.8984375}, {"start": 2184.37, "end": 2184.37, "word": " independent", "probability": 0.9404296875}, {"start": 2184.37, "end": 2184.37, "word": " variable,", "probability": 0.9697265625}, {"start": 2184.37, "end": 2184.37, "word": " Y", "probability": 0.97412109375}, {"start": 2184.37, "end": 2184.37, "word": " is", "probability": 0.947265625}, {"start": 2184.37, "end": 2184.37, "word": " your", "probability": 0.89990234375}, {"start": 2184.37, "end": 2184.37, "word": " independent", "probability": 0.93994140625}, {"start": 2184.37, "end": 2184.37, "word": " variable,", "probability": 0.970703125}, {"start": 2184.37, "end": 2184.37, "word": " Y", "probability": 0.97607421875}, {"start": 2184.37, "end": 2184.37, "word": " is", "probability": 0.94677734375}, {"start": 2184.37, "end": 2184.37, "word": " your", "probability": 0.89892578125}, {"start": 2184.37, "end": 2184.37, "word": " independent", "probability": 0.939453125}, {"start": 2184.37, "end": 2184.37, "word": " variable,", "probability": 0.970703125}, {"start": 2184.37, "end": 2184.37, "word": " Y", "probability": 0.9775390625}, {"start": 2184.37, "end": 2184.37, "word": " is", "probability": 0.9453125}, {"start": 2184.37, "end": 2184.37, "word": " your", "probability": 0.89892578125}, {"start": 2184.37, "end": 2184.37, "word": " independent", "probability": 0.93994140625}, {"start": 2184.37, "end": 2184.37, "word": " variable,", "probability": 0.97216796875}, {"start": 2184.37, "end": 2184.37, "word": " Y", "probability": 0.97900390625}, {"start": 2184.37, "end": 2184.37, "word": " is", "probability": 0.94384765625}, {"start": 2184.37, "end": 2184.37, "word": " your", "probability": 0.89892578125}, {"start": 2184.37, "end": 2184.37, "word": " independent", "probability": 0.9404296875}, {"start": 2184.37, "end": 2184.43, "word": " variable,", "probability": 0.97265625}, {"start": 2184.43, "end": 2184.43, "word": " Y", "probability": 0.98095703125}, {"start": 2184.43, "end": 2184.43, "word": " is", "probability": 0.943359375}, {"start": 2184.43, "end": 2184.43, "word": " your", "probability": 0.89892578125}, {"start": 2184.43, "end": 2184.47, "word": " independent", "probability": 0.93896484375}, {"start": 2184.47, "end": 2184.67, "word": " variable,", "probability": 0.97265625}, {"start": 2184.67, "end": 2184.67, "word": " Y", "probability": 0.982421875}, {"start": 2184.67, "end": 2184.67, "word": " is", "probability": 0.9404296875}, {"start": 2184.67, "end": 2184.67, "word": " your", "probability": 0.8974609375}, {"start": 2184.67, "end": 2184.77, "word": " independent", "probability": 0.93994140625}, {"start": 2184.77, "end": 2185.13, "word": " variable,", "probability": 0.97314453125}, {"start": 2185.13, "end": 2185.65, "word": " Y", "probability": 0.98291015625}, {"start": 2185.65, "end": 2186.05, "word": " is", "probability": 0.93896484375}, {"start": 2186.05, "end": 2186.05, "word": " your", "probability": 0.8984375}, {"start": 2186.05, "end": 2186.11, "word": " independent", "probability": 0.9404296875}, {"start": 2186.11, "end": 2186.73, "word": " variable,", "probability": 0.9736328125}, {"start": 2186.75, "end": 2187.49, "word": " Y", "probability": 0.9833984375}, {"start": 2187.49, "end": 2187.49, "word": " is", "probability": 0.9384765625}, {"start": 2187.49, "end": 2187.59, "word": " your", "probability": 0.89697265625}, {"start": 2187.59, "end": 2187.83, "word": " independent", "probability": 0.94287109375}, {"start": 2187.83, "end": 2188.65, "word": " variable,", "probability": 0.974609375}, {"start": 2188.65, "end": 2189.31, "word": " Y", "probability": 0.98291015625}, {"start": 2189.31, "end": 2189.31, "word": " is", "probability": 0.93896484375}, {"start": 2189.31, "end": 2189.31, "word": " your", "probability": 0.896484375}], "temperature": 1.0}, {"id": 84, "seek": 221039, "start": 2189.55, "end": 2210.39, "text": " It means there are some errors you don't know about it because you ignore some other variables that may affect the selling price. Maybe you select a random sample, that sample is small. Maybe there is a random, I'm sorry, there is sampling error. So all of these are called", "tokens": [467, 1355, 456, 366, 512, 13603, 291, 500, 380, 458, 466, 309, 570, 291, 11200, 512, 661, 9102, 300, 815, 3345, 264, 6511, 3218, 13, 2704, 291, 3048, 257, 4974, 6889, 11, 300, 6889, 307, 1359, 13, 2704, 456, 307, 257, 4974, 11, 286, 478, 2597, 11, 456, 307, 21179, 6713, 13, 407, 439, 295, 613, 366, 1219], "avg_logprob": -0.17108050594895574, "compression_ratio": 1.621301775147929, "no_speech_prob": 0.0, "words": [{"start": 2189.55, "end": 2189.81, "word": " It", "probability": 0.60595703125}, {"start": 2189.81, "end": 2190.11, "word": " means", "probability": 0.92041015625}, {"start": 2190.11, "end": 2190.41, "word": " there", "probability": 0.873046875}, {"start": 2190.41, "end": 2190.61, "word": " are", "probability": 0.947265625}, {"start": 2190.61, "end": 2190.93, "word": " some", "probability": 0.90771484375}, {"start": 2190.93, "end": 2191.29, "word": " errors", "probability": 0.8408203125}, {"start": 2191.29, "end": 2191.49, "word": " you", "probability": 0.68017578125}, {"start": 2191.49, "end": 2191.71, "word": " don't", "probability": 0.8759765625}, {"start": 2191.71, "end": 2191.89, "word": " know", "probability": 0.89990234375}, {"start": 2191.89, "end": 2192.19, "word": " about", "probability": 0.90771484375}, {"start": 2192.19, "end": 2192.45, "word": " it", "probability": 0.9130859375}, {"start": 2192.45, "end": 2193.49, "word": " because", "probability": 0.51416015625}, {"start": 2193.49, "end": 2193.75, "word": " you", "probability": 0.9609375}, {"start": 2193.75, "end": 2194.11, "word": " ignore", "probability": 0.82958984375}, {"start": 2194.11, "end": 2194.51, "word": " some", "probability": 0.8955078125}, {"start": 2194.51, "end": 2194.85, "word": " other", "probability": 0.8994140625}, {"start": 2194.85, "end": 2195.35, "word": " variables", "probability": 0.90673828125}, {"start": 2195.35, "end": 2195.83, "word": " that", "probability": 0.92529296875}, {"start": 2195.83, "end": 2196.13, "word": " may", "probability": 0.94677734375}, {"start": 2196.13, "end": 2196.63, "word": " affect", "probability": 0.9248046875}, {"start": 2196.63, "end": 2196.83, "word": " the", "probability": 0.91552734375}, {"start": 2196.83, "end": 2197.03, "word": " selling", "probability": 0.88623046875}, {"start": 2197.03, "end": 2197.49, "word": " price.", "probability": 0.90966796875}, {"start": 2198.41, "end": 2198.71, "word": " Maybe", "probability": 0.9501953125}, {"start": 2198.71, "end": 2198.89, "word": " you", "probability": 0.93994140625}, {"start": 2198.89, "end": 2199.27, "word": " select", "probability": 0.86328125}, {"start": 2199.27, "end": 2199.41, "word": " a", "probability": 0.98681640625}, {"start": 2199.41, "end": 2199.63, "word": " random", "probability": 0.87841796875}, {"start": 2199.63, "end": 2200.05, "word": " sample,", "probability": 0.85205078125}, {"start": 2200.21, "end": 2200.39, "word": " that", "probability": 0.87158203125}, {"start": 2200.39, "end": 2200.69, "word": " sample", "probability": 0.8603515625}, {"start": 2200.69, "end": 2200.89, "word": " is", "probability": 0.93994140625}, {"start": 2200.89, "end": 2201.23, "word": " small.", "probability": 0.931640625}, {"start": 2202.01, "end": 2202.21, "word": " Maybe", "probability": 0.94287109375}, {"start": 2202.21, "end": 2202.49, "word": " there", "probability": 0.90234375}, {"start": 2202.49, "end": 2202.77, "word": " is", "probability": 0.94384765625}, {"start": 2202.77, "end": 2203.11, "word": " a", "probability": 0.72412109375}, {"start": 2203.11, "end": 2203.45, "word": " random,", "probability": 0.86572265625}, {"start": 2203.97, "end": 2204.33, "word": " I'm", "probability": 0.961669921875}, {"start": 2204.33, "end": 2204.51, "word": " sorry,", "probability": 0.86181640625}, {"start": 2204.67, "end": 2204.91, "word": " there", "probability": 0.90673828125}, {"start": 2204.91, "end": 2205.19, "word": " is", "probability": 0.9482421875}, {"start": 2205.19, "end": 2205.93, "word": " sampling", "probability": 0.89892578125}, {"start": 2205.93, "end": 2206.27, "word": " error.", "probability": 0.9052734375}, {"start": 2207.07, "end": 2207.35, "word": " So", "probability": 0.93994140625}, {"start": 2207.35, "end": 2207.77, "word": " all", "probability": 0.62890625}, {"start": 2207.77, "end": 2207.93, "word": " of", "probability": 0.96923828125}, {"start": 2207.93, "end": 2208.33, "word": " these", "probability": 0.826171875}, {"start": 2208.33, "end": 2209.89, "word": " are", "probability": 0.93505859375}, {"start": 2209.89, "end": 2210.39, "word": " called", "probability": 0.8876953125}], "temperature": 1.0}, {"id": 85, "seek": 224019, "start": 2211.4, "end": 2240.2, "text": " random error term. So all of them are in this term. So epsilon I means something you don't include in your regression modeling. For example, you don't include all the independent variables that affect Y, or your sample size is not large enough. So all of these measured in random error term. So epsilon I is random error component, beta 0 plus beta 1X is called linear component.", "tokens": [4974, 6713, 1433, 13, 407, 439, 295, 552, 366, 294, 341, 1433, 13, 407, 17889, 286, 1355, 746, 291, 500, 380, 4090, 294, 428, 24590, 15983, 13, 1171, 1365, 11, 291, 500, 380, 4090, 439, 264, 6695, 9102, 300, 3345, 398, 11, 420, 428, 6889, 2744, 307, 406, 2416, 1547, 13, 407, 439, 295, 613, 12690, 294, 4974, 6713, 1433, 13, 407, 17889, 286, 307, 4974, 6713, 6542, 11, 9861, 1958, 1804, 9861, 502, 55, 307, 1219, 8213, 6542, 13], "avg_logprob": -0.23128858319035284, "compression_ratio": 1.784037558685446, "no_speech_prob": 0.0, "words": [{"start": 2211.4, "end": 2212.0, "word": " random", "probability": 0.28271484375}, {"start": 2212.0, "end": 2212.28, "word": " error", "probability": 0.8359375}, {"start": 2212.28, "end": 2212.62, "word": " term.", "probability": 0.141357421875}, {"start": 2212.84, "end": 2212.98, "word": " So", "probability": 0.857421875}, {"start": 2212.98, "end": 2213.18, "word": " all", "probability": 0.72607421875}, {"start": 2213.18, "end": 2213.32, "word": " of", "probability": 0.966796875}, {"start": 2213.32, "end": 2213.56, "word": " them", "probability": 0.89794921875}, {"start": 2213.56, "end": 2214.18, "word": " are", "probability": 0.91650390625}, {"start": 2214.18, "end": 2214.46, "word": " in", "probability": 0.80615234375}, {"start": 2214.46, "end": 2214.8, "word": " this", "probability": 0.935546875}, {"start": 2214.8, "end": 2215.14, "word": " term.", "probability": 0.78662109375}, {"start": 2215.56, "end": 2215.72, "word": " So", "probability": 0.9296875}, {"start": 2215.72, "end": 2216.16, "word": " epsilon", "probability": 0.498046875}, {"start": 2216.16, "end": 2216.62, "word": " I", "probability": 0.50048828125}, {"start": 2216.62, "end": 2217.42, "word": " means", "probability": 0.87890625}, {"start": 2217.42, "end": 2218.46, "word": " something", "probability": 0.82275390625}, {"start": 2218.46, "end": 2218.76, "word": " you", "probability": 0.94140625}, {"start": 2218.76, "end": 2219.04, "word": " don't", "probability": 0.96044921875}, {"start": 2219.04, "end": 2219.52, "word": " include", "probability": 0.84228515625}, {"start": 2219.52, "end": 2219.74, "word": " in", "probability": 0.935546875}, {"start": 2219.74, "end": 2219.98, "word": " your", "probability": 0.87255859375}, {"start": 2219.98, "end": 2220.34, "word": " regression", "probability": 0.9169921875}, {"start": 2220.34, "end": 2220.7, "word": " modeling.", "probability": 0.60693359375}, {"start": 2221.28, "end": 2221.58, "word": " For", "probability": 0.955078125}, {"start": 2221.58, "end": 2221.86, "word": " example,", "probability": 0.974609375}, {"start": 2221.92, "end": 2222.04, "word": " you", "probability": 0.94580078125}, {"start": 2222.04, "end": 2222.2, "word": " don't", "probability": 0.976318359375}, {"start": 2222.2, "end": 2222.6, "word": " include", "probability": 0.82861328125}, {"start": 2222.6, "end": 2223.04, "word": " all", "probability": 0.95068359375}, {"start": 2223.04, "end": 2223.28, "word": " the", "probability": 0.8486328125}, {"start": 2223.28, "end": 2223.8, "word": " independent", "probability": 0.87646484375}, {"start": 2223.8, "end": 2224.28, "word": " variables", "probability": 0.71142578125}, {"start": 2224.28, "end": 2224.6, "word": " that", "probability": 0.93505859375}, {"start": 2224.6, "end": 2224.92, "word": " affect", "probability": 0.80517578125}, {"start": 2224.92, "end": 2225.32, "word": " Y,", "probability": 0.52880859375}, {"start": 2225.8, "end": 2226.0, "word": " or", "probability": 0.94921875}, {"start": 2226.0, "end": 2226.18, "word": " your", "probability": 0.8837890625}, {"start": 2226.18, "end": 2226.5, "word": " sample", "probability": 0.89599609375}, {"start": 2226.5, "end": 2226.86, "word": " size", "probability": 0.83984375}, {"start": 2226.86, "end": 2227.04, "word": " is", "probability": 0.939453125}, {"start": 2227.04, "end": 2227.26, "word": " not", "probability": 0.94384765625}, {"start": 2227.26, "end": 2227.6, "word": " large", "probability": 0.96240234375}, {"start": 2227.6, "end": 2228.02, "word": " enough.", "probability": 0.86328125}, {"start": 2228.64, "end": 2228.98, "word": " So", "probability": 0.95556640625}, {"start": 2228.98, "end": 2229.2, "word": " all", "probability": 0.89306640625}, {"start": 2229.2, "end": 2229.34, "word": " of", "probability": 0.9501953125}, {"start": 2229.34, "end": 2229.7, "word": " these", "probability": 0.5205078125}, {"start": 2229.7, "end": 2230.76, "word": " measured", "probability": 0.4365234375}, {"start": 2230.76, "end": 2231.22, "word": " in", "probability": 0.94775390625}, {"start": 2231.22, "end": 2231.74, "word": " random", "probability": 0.69970703125}, {"start": 2231.74, "end": 2231.98, "word": " error", "probability": 0.87841796875}, {"start": 2231.98, "end": 2232.26, "word": " term.", "probability": 0.888671875}, {"start": 2232.92, "end": 2233.14, "word": " So", "probability": 0.951171875}, {"start": 2233.14, "end": 2233.54, "word": " epsilon", "probability": 0.8798828125}, {"start": 2233.54, "end": 2233.98, "word": " I", "probability": 0.92333984375}, {"start": 2233.98, "end": 2234.26, "word": " is", "probability": 0.951171875}, {"start": 2234.26, "end": 2234.68, "word": " random", "probability": 0.6640625}, {"start": 2234.68, "end": 2235.08, "word": " error", "probability": 0.859375}, {"start": 2235.08, "end": 2235.92, "word": " component,", "probability": 0.86572265625}, {"start": 2236.86, "end": 2237.1, "word": " beta", "probability": 0.806640625}, {"start": 2237.1, "end": 2237.42, "word": " 0", "probability": 0.65576171875}, {"start": 2237.42, "end": 2237.66, "word": " plus", "probability": 0.890625}, {"start": 2237.66, "end": 2237.92, "word": " beta", "probability": 0.90185546875}, {"start": 2237.92, "end": 2238.42, "word": " 1X", "probability": 0.69091796875}, {"start": 2238.42, "end": 2238.84, "word": " is", "probability": 0.84375}, {"start": 2238.84, "end": 2239.08, "word": " called", "probability": 0.865234375}, {"start": 2239.08, "end": 2239.48, "word": " linear", "probability": 0.8935546875}, {"start": 2239.48, "end": 2240.2, "word": " component.", "probability": 0.8564453125}], "temperature": 1.0}, {"id": 86, "seek": 226937, "start": 2242.43, "end": 2269.37, "text": " So that's the simple linear regression model. Now, the data you have, the blue circles represent the observed value. So these blue circles are the observed values. So we have observed.", "tokens": [407, 300, 311, 264, 2199, 8213, 24590, 2316, 13, 823, 11, 264, 1412, 291, 362, 11, 264, 3344, 13040, 2906, 264, 13095, 2158, 13, 407, 613, 3344, 13040, 366, 264, 13095, 4190, 13, 407, 321, 362, 13095, 13], "avg_logprob": -0.16596554334347063, "compression_ratio": 1.5677966101694916, "no_speech_prob": 0.0, "words": [{"start": 2242.43, "end": 2242.69, "word": " So", "probability": 0.85791015625}, {"start": 2242.69, "end": 2243.33, "word": " that's", "probability": 0.86572265625}, {"start": 2243.33, "end": 2244.29, "word": " the", "probability": 0.875}, {"start": 2244.29, "end": 2245.07, "word": " simple", "probability": 0.9111328125}, {"start": 2245.07, "end": 2245.99, "word": " linear", "probability": 0.86376953125}, {"start": 2245.99, "end": 2246.41, "word": " regression", "probability": 0.9521484375}, {"start": 2246.41, "end": 2246.75, "word": " model.", "probability": 0.951171875}, {"start": 2248.69, "end": 2249.77, "word": " Now,", "probability": 0.94091796875}, {"start": 2250.31, "end": 2250.55, "word": " the", "probability": 0.8916015625}, {"start": 2250.55, "end": 2250.85, "word": " data", "probability": 0.92626953125}, {"start": 2250.85, "end": 2251.09, "word": " you", "probability": 0.95751953125}, {"start": 2251.09, "end": 2251.43, "word": " have,", "probability": 0.94921875}, {"start": 2252.85, "end": 2253.03, "word": " the", "probability": 0.89111328125}, {"start": 2253.03, "end": 2253.25, "word": " blue", "probability": 0.96630859375}, {"start": 2253.25, "end": 2253.85, "word": " circles", "probability": 0.822265625}, {"start": 2253.85, "end": 2255.35, "word": " represent", "probability": 0.57373046875}, {"start": 2255.35, "end": 2256.49, "word": " the", "probability": 0.90771484375}, {"start": 2256.49, "end": 2257.03, "word": " observed", "probability": 0.89501953125}, {"start": 2257.03, "end": 2257.51, "word": " value.", "probability": 0.9130859375}, {"start": 2258.01, "end": 2258.21, "word": " So", "probability": 0.955078125}, {"start": 2258.21, "end": 2258.59, "word": " these", "probability": 0.5625}, {"start": 2258.59, "end": 2260.29, "word": " blue", "probability": 0.374267578125}, {"start": 2260.29, "end": 2260.83, "word": " circles", "probability": 0.81982421875}, {"start": 2260.83, "end": 2261.35, "word": " are", "probability": 0.939453125}, {"start": 2261.35, "end": 2263.15, "word": " the", "probability": 0.87939453125}, {"start": 2263.15, "end": 2263.65, "word": " observed", "probability": 0.89794921875}, {"start": 2263.65, "end": 2264.15, "word": " values.", "probability": 0.91845703125}, {"start": 2265.65, "end": 2266.39, "word": " So", "probability": 0.94775390625}, {"start": 2266.39, "end": 2267.41, "word": " we", "probability": 0.79296875}, {"start": 2267.41, "end": 2267.71, "word": " have", "probability": 0.94873046875}, {"start": 2267.71, "end": 2269.37, "word": " observed.", "probability": 0.8369140625}], "temperature": 1.0}, {"id": 87, "seek": 230019, "start": 2272.98, "end": 2300.2, "text": " Y observed value of Y for each value X. The regression line is the blue, the red one. It's called the predicted values. Predicted Y. Predicted Y is denoted always by Y hat. Now the difference between Y and Y hat. It's called the error term.", "tokens": [398, 13095, 2158, 295, 398, 337, 1184, 2158, 1783, 13, 440, 24590, 1622, 307, 264, 3344, 11, 264, 2182, 472, 13, 467, 311, 1219, 264, 19147, 4190, 13, 32969, 11254, 398, 13, 32969, 11254, 398, 307, 1441, 23325, 1009, 538, 398, 2385, 13, 823, 264, 2649, 1296, 398, 293, 398, 2385, 13, 467, 311, 1219, 264, 6713, 1433, 13], "avg_logprob": -0.25585937102635703, "compression_ratio": 1.5751633986928104, "no_speech_prob": 0.0, "words": [{"start": 2272.98, "end": 2273.42, "word": " Y", "probability": 0.3427734375}, {"start": 2273.42, "end": 2274.34, "word": " observed", "probability": 0.2049560546875}, {"start": 2274.34, "end": 2274.82, "word": " value", "probability": 0.89697265625}, {"start": 2274.82, "end": 2275.02, "word": " of", "probability": 0.87939453125}, {"start": 2275.02, "end": 2275.28, "word": " Y", "probability": 0.8720703125}, {"start": 2275.28, "end": 2275.56, "word": " for", "probability": 0.880859375}, {"start": 2275.56, "end": 2275.86, "word": " each", "probability": 0.9453125}, {"start": 2275.86, "end": 2276.1, "word": " value", "probability": 0.943359375}, {"start": 2276.1, "end": 2276.48, "word": " X.", "probability": 0.5712890625}, {"start": 2277.46, "end": 2277.94, "word": " The", "probability": 0.86865234375}, {"start": 2277.94, "end": 2278.26, "word": " regression", "probability": 0.90625}, {"start": 2278.26, "end": 2278.6, "word": " line", "probability": 0.80224609375}, {"start": 2278.6, "end": 2278.78, "word": " is", "probability": 0.837890625}, {"start": 2278.78, "end": 2278.9, "word": " the", "probability": 0.8798828125}, {"start": 2278.9, "end": 2279.1, "word": " blue,", "probability": 0.8505859375}, {"start": 2279.28, "end": 2279.52, "word": " the", "probability": 0.88134765625}, {"start": 2279.52, "end": 2280.8, "word": " red", "probability": 0.9326171875}, {"start": 2280.8, "end": 2281.18, "word": " one.", "probability": 0.92333984375}, {"start": 2282.6, "end": 2283.36, "word": " It's", "probability": 0.90087890625}, {"start": 2283.36, "end": 2283.58, "word": " called", "probability": 0.80615234375}, {"start": 2283.58, "end": 2283.76, "word": " the", "probability": 0.83837890625}, {"start": 2283.76, "end": 2284.14, "word": " predicted", "probability": 0.8095703125}, {"start": 2284.14, "end": 2284.68, "word": " values.", "probability": 0.64453125}, {"start": 2286.6, "end": 2287.08, "word": " Predicted", "probability": 0.82275390625}, {"start": 2287.08, "end": 2287.56, "word": " Y.", "probability": 0.9208984375}, {"start": 2288.18, "end": 2288.7, "word": " Predicted", "probability": 0.6829833984375}, {"start": 2288.7, "end": 2289.12, "word": " Y", "probability": 0.9921875}, {"start": 2289.12, "end": 2289.52, "word": " is", "probability": 0.9267578125}, {"start": 2289.52, "end": 2289.84, "word": " denoted", "probability": 0.83349609375}, {"start": 2289.84, "end": 2290.46, "word": " always", "probability": 0.89111328125}, {"start": 2290.46, "end": 2290.88, "word": " by", "probability": 0.96728515625}, {"start": 2290.88, "end": 2292.34, "word": " Y", "probability": 0.91015625}, {"start": 2292.34, "end": 2292.72, "word": " hat.", "probability": 0.79052734375}, {"start": 2294.06, "end": 2294.58, "word": " Now", "probability": 0.94775390625}, {"start": 2294.58, "end": 2294.76, "word": " the", "probability": 0.61083984375}, {"start": 2294.76, "end": 2295.24, "word": " difference", "probability": 0.85595703125}, {"start": 2295.24, "end": 2295.68, "word": " between", "probability": 0.88720703125}, {"start": 2295.68, "end": 2295.96, "word": " Y", "probability": 0.990234375}, {"start": 2295.96, "end": 2296.16, "word": " and", "probability": 0.93798828125}, {"start": 2296.16, "end": 2296.42, "word": " Y", "probability": 0.994140625}, {"start": 2296.42, "end": 2296.8, "word": " hat.", "probability": 0.9111328125}, {"start": 2298.62, "end": 2299.38, "word": " It's", "probability": 0.864013671875}, {"start": 2299.38, "end": 2299.62, "word": " called", "probability": 0.888671875}, {"start": 2299.62, "end": 2299.74, "word": " the", "probability": 0.6083984375}, {"start": 2299.74, "end": 2299.94, "word": " error", "probability": 0.71240234375}, {"start": 2299.94, "end": 2300.2, "word": " term.", "probability": 0.422119140625}], "temperature": 1.0}, {"id": 88, "seek": 233260, "start": 2304.68, "end": 2332.6, "text": " It's actually the difference between the observed value and its predicted value. Now, the predicted value can be determined by using the regression line. So this line is the predicted value of Y for XR. Again, beta zero is the intercept. As we mentioned before, it could be positive or negative or even equal zero. The slope is changing Y.", "tokens": [467, 311, 767, 264, 2649, 1296, 264, 13095, 2158, 293, 1080, 19147, 2158, 13, 823, 11, 264, 19147, 2158, 393, 312, 9540, 538, 1228, 264, 24590, 1622, 13, 407, 341, 1622, 307, 264, 19147, 2158, 295, 398, 337, 1783, 49, 13, 3764, 11, 9861, 4018, 307, 264, 24700, 13, 1018, 321, 2835, 949, 11, 309, 727, 312, 3353, 420, 3671, 420, 754, 2681, 4018, 13, 440, 13525, 307, 4473, 398, 13], "avg_logprob": -0.2043185836325089, "compression_ratio": 1.619047619047619, "no_speech_prob": 0.0, "words": [{"start": 2304.68, "end": 2304.88, "word": " It's", "probability": 0.57830810546875}, {"start": 2304.88, "end": 2305.26, "word": " actually", "probability": 0.88232421875}, {"start": 2305.26, "end": 2305.5, "word": " the", "probability": 0.90478515625}, {"start": 2305.5, "end": 2305.94, "word": " difference", "probability": 0.87353515625}, {"start": 2305.94, "end": 2306.46, "word": " between", "probability": 0.876953125}, {"start": 2306.46, "end": 2307.52, "word": " the", "probability": 0.8779296875}, {"start": 2307.52, "end": 2308.0, "word": " observed", "probability": 0.88134765625}, {"start": 2308.0, "end": 2308.44, "word": " value", "probability": 0.958984375}, {"start": 2308.44, "end": 2308.82, "word": " and", "probability": 0.90087890625}, {"start": 2308.82, "end": 2309.1, "word": " its", "probability": 0.59130859375}, {"start": 2309.1, "end": 2309.58, "word": " predicted", "probability": 0.7802734375}, {"start": 2309.58, "end": 2310.0, "word": " value.", "probability": 0.96435546875}, {"start": 2310.64, "end": 2310.98, "word": " Now,", "probability": 0.935546875}, {"start": 2311.1, "end": 2311.24, "word": " the", "probability": 0.89990234375}, {"start": 2311.24, "end": 2311.6, "word": " predicted", "probability": 0.80126953125}, {"start": 2311.6, "end": 2311.98, "word": " value", "probability": 0.96826171875}, {"start": 2311.98, "end": 2312.18, "word": " can", "probability": 0.9267578125}, {"start": 2312.18, "end": 2312.38, "word": " be", "probability": 0.9208984375}, {"start": 2312.38, "end": 2312.84, "word": " determined", "probability": 0.947265625}, {"start": 2312.84, "end": 2313.88, "word": " by", "probability": 0.95849609375}, {"start": 2313.88, "end": 2314.24, "word": " using", "probability": 0.9326171875}, {"start": 2314.24, "end": 2314.42, "word": " the", "probability": 0.9130859375}, {"start": 2314.42, "end": 2314.72, "word": " regression", "probability": 0.9443359375}, {"start": 2314.72, "end": 2315.1, "word": " line.", "probability": 0.93994140625}, {"start": 2315.8, "end": 2316.08, "word": " So", "probability": 0.94677734375}, {"start": 2316.08, "end": 2316.46, "word": " this", "probability": 0.71826171875}, {"start": 2316.46, "end": 2316.82, "word": " line", "probability": 0.91552734375}, {"start": 2316.82, "end": 2317.06, "word": " is", "probability": 0.94482421875}, {"start": 2317.06, "end": 2317.18, "word": " the", "probability": 0.48193359375}, {"start": 2317.18, "end": 2317.52, "word": " predicted", "probability": 0.8251953125}, {"start": 2317.52, "end": 2318.02, "word": " value", "probability": 0.96728515625}, {"start": 2318.02, "end": 2318.32, "word": " of", "probability": 0.96533203125}, {"start": 2318.32, "end": 2318.66, "word": " Y", "probability": 0.61865234375}, {"start": 2318.66, "end": 2319.18, "word": " for", "probability": 0.732421875}, {"start": 2319.18, "end": 2319.66, "word": " XR.", "probability": 0.60009765625}, {"start": 2320.62, "end": 2321.04, "word": " Again,", "probability": 0.9423828125}, {"start": 2321.26, "end": 2321.44, "word": " beta", "probability": 0.7138671875}, {"start": 2321.44, "end": 2321.82, "word": " zero", "probability": 0.591796875}, {"start": 2321.82, "end": 2322.28, "word": " is", "probability": 0.93994140625}, {"start": 2322.28, "end": 2322.48, "word": " the", "probability": 0.9208984375}, {"start": 2322.48, "end": 2322.94, "word": " intercept.", "probability": 0.96875}, {"start": 2323.96, "end": 2324.36, "word": " As", "probability": 0.9658203125}, {"start": 2324.36, "end": 2324.48, "word": " we", "probability": 0.9296875}, {"start": 2324.48, "end": 2324.74, "word": " mentioned", "probability": 0.82373046875}, {"start": 2324.74, "end": 2325.02, "word": " before,", "probability": 0.86328125}, {"start": 2325.08, "end": 2325.12, "word": " it", "probability": 0.94091796875}, {"start": 2325.12, "end": 2325.26, "word": " could", "probability": 0.85791015625}, {"start": 2325.26, "end": 2325.38, "word": " be", "probability": 0.935546875}, {"start": 2325.38, "end": 2325.7, "word": " positive", "probability": 0.93505859375}, {"start": 2325.7, "end": 2325.96, "word": " or", "probability": 0.892578125}, {"start": 2325.96, "end": 2326.26, "word": " negative", "probability": 0.93505859375}, {"start": 2326.26, "end": 2326.68, "word": " or", "probability": 0.671875}, {"start": 2326.68, "end": 2327.46, "word": " even", "probability": 0.876953125}, {"start": 2327.46, "end": 2327.88, "word": " equal", "probability": 0.7841796875}, {"start": 2327.88, "end": 2328.28, "word": " zero.", "probability": 0.5205078125}, {"start": 2330.2, "end": 2330.46, "word": " The", "probability": 0.82861328125}, {"start": 2330.46, "end": 2330.84, "word": " slope", "probability": 0.5771484375}, {"start": 2330.84, "end": 2331.78, "word": " is", "probability": 0.92578125}, {"start": 2331.78, "end": 2332.2, "word": " changing", "probability": 0.35498046875}, {"start": 2332.2, "end": 2332.6, "word": " Y.", "probability": 0.85400390625}], "temperature": 1.0}, {"id": 89, "seek": 236358, "start": 2335.14, "end": 2363.58, "text": " Divide by change of x. So these are the components for the simple linear regression model. Y again represents the independent variable. Beta 0 y intercept. Beta 1 is your slope. And the slope determines the direction of the relationship. X independent variable epsilon i is the random error term.", "tokens": [9886, 482, 538, 1319, 295, 2031, 13, 407, 613, 366, 264, 6677, 337, 264, 2199, 8213, 24590, 2316, 13, 398, 797, 8855, 264, 6695, 7006, 13, 33286, 1958, 288, 24700, 13, 33286, 502, 307, 428, 13525, 13, 400, 264, 13525, 24799, 264, 3513, 295, 264, 2480, 13, 1783, 6695, 7006, 17889, 741, 307, 264, 4974, 6713, 1433, 13], "avg_logprob": -0.25926907183760306, "compression_ratio": 1.588235294117647, "no_speech_prob": 0.0, "words": [{"start": 2335.14, "end": 2335.68, "word": " Divide", "probability": 0.5712890625}, {"start": 2335.68, "end": 2336.06, "word": " by", "probability": 0.947265625}, {"start": 2336.06, "end": 2336.8, "word": " change", "probability": 0.468994140625}, {"start": 2336.8, "end": 2336.98, "word": " of", "probability": 0.89013671875}, {"start": 2336.98, "end": 2337.58, "word": " x.", "probability": 0.56494140625}, {"start": 2341.84, "end": 2342.64, "word": " So", "probability": 0.8935546875}, {"start": 2342.64, "end": 2342.96, "word": " these", "probability": 0.71142578125}, {"start": 2342.96, "end": 2343.22, "word": " are", "probability": 0.9423828125}, {"start": 2343.22, "end": 2343.5, "word": " the", "probability": 0.9228515625}, {"start": 2343.5, "end": 2344.32, "word": " components", "probability": 0.9306640625}, {"start": 2344.32, "end": 2344.88, "word": " for", "probability": 0.9306640625}, {"start": 2344.88, "end": 2345.98, "word": " the", "probability": 0.90966796875}, {"start": 2345.98, "end": 2346.6, "word": " simple", "probability": 0.837890625}, {"start": 2346.6, "end": 2347.14, "word": " linear", "probability": 0.93115234375}, {"start": 2347.14, "end": 2347.58, "word": " regression", "probability": 0.8642578125}, {"start": 2347.58, "end": 2347.94, "word": " model.", "probability": 0.9228515625}, {"start": 2348.38, "end": 2348.7, "word": " Y", "probability": 0.900390625}, {"start": 2348.7, "end": 2349.16, "word": " again", "probability": 0.828125}, {"start": 2349.16, "end": 2350.54, "word": " represents", "probability": 0.67138671875}, {"start": 2350.54, "end": 2350.84, "word": " the", "probability": 0.91455078125}, {"start": 2350.84, "end": 2351.18, "word": " independent", "probability": 0.447265625}, {"start": 2351.18, "end": 2351.66, "word": " variable.", "probability": 0.91162109375}, {"start": 2352.32, "end": 2352.54, "word": " Beta", "probability": 0.87060546875}, {"start": 2352.54, "end": 2352.82, "word": " 0", "probability": 0.52099609375}, {"start": 2352.82, "end": 2353.02, "word": " y", "probability": 0.35400390625}, {"start": 2353.02, "end": 2353.46, "word": " intercept.", "probability": 0.8916015625}, {"start": 2354.26, "end": 2354.62, "word": " Beta", "probability": 0.8935546875}, {"start": 2354.62, "end": 2354.96, "word": " 1", "probability": 0.91748046875}, {"start": 2354.96, "end": 2355.4, "word": " is", "probability": 0.9248046875}, {"start": 2355.4, "end": 2355.58, "word": " your", "probability": 0.88330078125}, {"start": 2355.58, "end": 2355.98, "word": " slope.", "probability": 0.884765625}, {"start": 2356.44, "end": 2356.88, "word": " And", "probability": 0.94580078125}, {"start": 2356.88, "end": 2357.04, "word": " the", "probability": 0.8623046875}, {"start": 2357.04, "end": 2357.26, "word": " slope", "probability": 0.88134765625}, {"start": 2357.26, "end": 2357.74, "word": " determines", "probability": 0.95703125}, {"start": 2357.74, "end": 2357.96, "word": " the", "probability": 0.912109375}, {"start": 2357.96, "end": 2358.48, "word": " direction", "probability": 0.97900390625}, {"start": 2358.48, "end": 2358.9, "word": " of", "probability": 0.9638671875}, {"start": 2358.9, "end": 2359.04, "word": " the", "probability": 0.9091796875}, {"start": 2359.04, "end": 2359.58, "word": " relationship.", "probability": 0.90380859375}, {"start": 2360.22, "end": 2360.5, "word": " X", "probability": 0.60498046875}, {"start": 2360.5, "end": 2360.9, "word": " independent", "probability": 0.85595703125}, {"start": 2360.9, "end": 2361.42, "word": " variable", "probability": 0.90966796875}, {"start": 2361.42, "end": 2361.8, "word": " epsilon", "probability": 0.7119140625}, {"start": 2361.8, "end": 2362.18, "word": " i", "probability": 0.5771484375}, {"start": 2362.18, "end": 2362.46, "word": " is", "probability": 0.92626953125}, {"start": 2362.46, "end": 2362.58, "word": " the", "probability": 0.393798828125}, {"start": 2362.58, "end": 2362.88, "word": " random", "probability": 0.85302734375}, {"start": 2362.88, "end": 2363.2, "word": " error", "probability": 0.8837890625}, {"start": 2363.2, "end": 2363.58, "word": " term.", "probability": 0.71923828125}], "temperature": 1.0}, {"id": 90, "seek": 238271, "start": 2365.01, "end": 2382.71, "text": " Any question? The relationship may be positive or negative. It could be negative. Now, for negative relationship,", "tokens": [2639, 1168, 30, 440, 2480, 815, 312, 3353, 420, 3671, 13, 467, 727, 312, 3671, 13, 823, 11, 337, 3671, 2480, 11], "avg_logprob": -0.3536005305207294, "compression_ratio": 1.3103448275862069, "no_speech_prob": 0.0, "words": [{"start": 2365.01, "end": 2365.27, "word": " Any", "probability": 0.3095703125}, {"start": 2365.27, "end": 2365.65, "word": " question?", "probability": 0.57958984375}, {"start": 2371.75, "end": 2372.57, "word": " The", "probability": 0.65625}, {"start": 2372.57, "end": 2373.01, "word": " relationship", "probability": 0.38427734375}, {"start": 2373.01, "end": 2373.47, "word": " may", "probability": 0.54248046875}, {"start": 2373.47, "end": 2373.59, "word": " be", "probability": 0.9482421875}, {"start": 2373.59, "end": 2373.89, "word": " positive", "probability": 0.87451171875}, {"start": 2373.89, "end": 2374.79, "word": " or", "probability": 0.91650390625}, {"start": 2374.79, "end": 2375.33, "word": " negative.", "probability": 0.92529296875}, {"start": 2376.57, "end": 2376.61, "word": " It", "probability": 0.6796875}, {"start": 2376.61, "end": 2376.73, "word": " could", "probability": 0.8740234375}, {"start": 2376.73, "end": 2376.87, "word": " be", "probability": 0.95361328125}, {"start": 2376.87, "end": 2377.19, "word": " negative.", "probability": 0.88232421875}, {"start": 2380.95, "end": 2381.41, "word": " Now,", "probability": 0.67724609375}, {"start": 2381.49, "end": 2381.73, "word": " for", "probability": 0.9189453125}, {"start": 2381.73, "end": 2382.05, "word": " negative", "probability": 0.7119140625}, {"start": 2382.05, "end": 2382.71, "word": " relationship,", "probability": 0.865234375}], "temperature": 1.0}, {"id": 91, "seek": 242206, "start": 2397.0, "end": 2422.06, "text": " Or negative, where beta zero is negative. Or beta zero equals zero. So here there exists negative relationship, but beta zero may be positive.", "tokens": [1610, 3671, 11, 689, 9861, 4018, 307, 3671, 13, 1610, 9861, 4018, 6915, 4018, 13, 407, 510, 456, 8198, 3671, 2480, 11, 457, 9861, 4018, 815, 312, 3353, 13], "avg_logprob": -0.30286457339922584, "compression_ratio": 1.4895833333333333, "no_speech_prob": 0.0, "words": [{"start": 2397.0, "end": 2397.76, "word": " Or", "probability": 0.29931640625}, {"start": 2397.76, "end": 2398.16, "word": " negative,", "probability": 0.59375}, {"start": 2399.54, "end": 2400.3, "word": " where", "probability": 0.73291015625}, {"start": 2400.3, "end": 2400.56, "word": " beta", "probability": 0.67236328125}, {"start": 2400.56, "end": 2400.9, "word": " zero", "probability": 0.439208984375}, {"start": 2400.9, "end": 2401.5, "word": " is", "probability": 0.9248046875}, {"start": 2401.5, "end": 2404.46, "word": " negative.", "probability": 0.92724609375}, {"start": 2404.52, "end": 2405.24, "word": " Or", "probability": 0.92236328125}, {"start": 2405.24, "end": 2408.7, "word": " beta", "probability": 0.60498046875}, {"start": 2408.7, "end": 2409.08, "word": " zero", "probability": 0.88720703125}, {"start": 2409.08, "end": 2409.38, "word": " equals", "probability": 0.841796875}, {"start": 2409.38, "end": 2409.74, "word": " zero.", "probability": 0.8515625}, {"start": 2416.68, "end": 2417.44, "word": " So", "probability": 0.89599609375}, {"start": 2417.44, "end": 2417.7, "word": " here", "probability": 0.75732421875}, {"start": 2417.7, "end": 2418.0, "word": " there", "probability": 0.56640625}, {"start": 2418.0, "end": 2418.32, "word": " exists", "probability": 0.75927734375}, {"start": 2418.32, "end": 2418.72, "word": " negative", "probability": 0.8203125}, {"start": 2418.72, "end": 2419.32, "word": " relationship,", "probability": 0.916015625}, {"start": 2420.16, "end": 2420.62, "word": " but", "probability": 0.90283203125}, {"start": 2420.62, "end": 2420.96, "word": " beta", "probability": 0.91796875}, {"start": 2420.96, "end": 2421.34, "word": " zero", "probability": 0.89599609375}, {"start": 2421.34, "end": 2421.56, "word": " may", "probability": 0.7802734375}, {"start": 2421.56, "end": 2421.68, "word": " be", "probability": 0.9541015625}, {"start": 2421.68, "end": 2422.06, "word": " positive.", "probability": 0.9365234375}], "temperature": 1.0}, {"id": 92, "seek": 245231, "start": 2425.87, "end": 2452.31, "text": " So again, the sign of beta 0 also does not affect the relationship between the two variables. Now, we don't actually know the values of beta 0 and beta 1. We are going to estimate these values from the sample we have. So the simple linear regression equation provides an estimate of the population regression line.", "tokens": [407, 797, 11, 264, 1465, 295, 9861, 1958, 611, 775, 406, 3345, 264, 2480, 1296, 264, 732, 9102, 13, 823, 11, 321, 500, 380, 767, 458, 264, 4190, 295, 9861, 1958, 293, 9861, 502, 13, 492, 366, 516, 281, 12539, 613, 4190, 490, 264, 6889, 321, 362, 13, 407, 264, 2199, 8213, 24590, 5367, 6417, 364, 12539, 295, 264, 4415, 24590, 1622, 13], "avg_logprob": -0.17431640136055648, "compression_ratio": 1.6153846153846154, "no_speech_prob": 0.0, "words": [{"start": 2425.8700000000003, "end": 2426.51, "word": " So", "probability": 0.13330078125}, {"start": 2426.51, "end": 2427.15, "word": " again,", "probability": 0.6865234375}, {"start": 2427.45, "end": 2427.45, "word": " the", "probability": 0.78857421875}, {"start": 2427.45, "end": 2427.73, "word": " sign", "probability": 0.49169921875}, {"start": 2427.73, "end": 2427.89, "word": " of", "probability": 0.92822265625}, {"start": 2427.89, "end": 2428.09, "word": " beta", "probability": 0.76318359375}, {"start": 2428.09, "end": 2428.41, "word": " 0", "probability": 0.59033203125}, {"start": 2428.41, "end": 2428.89, "word": " also", "probability": 0.5712890625}, {"start": 2428.89, "end": 2429.59, "word": " does", "probability": 0.9453125}, {"start": 2429.59, "end": 2429.79, "word": " not", "probability": 0.94970703125}, {"start": 2429.79, "end": 2430.21, "word": " affect", "probability": 0.88623046875}, {"start": 2430.21, "end": 2430.49, "word": " the", "probability": 0.90673828125}, {"start": 2430.49, "end": 2430.91, "word": " relationship", "probability": 0.89453125}, {"start": 2430.91, "end": 2431.31, "word": " between", "probability": 0.85986328125}, {"start": 2431.31, "end": 2431.51, "word": " the", "probability": 0.84228515625}, {"start": 2431.51, "end": 2431.65, "word": " two", "probability": 0.88232421875}, {"start": 2431.65, "end": 2431.99, "word": " variables.", "probability": 0.8056640625}, {"start": 2436.23, "end": 2436.87, "word": " Now,", "probability": 0.9228515625}, {"start": 2437.11, "end": 2437.55, "word": " we", "probability": 0.95166015625}, {"start": 2437.55, "end": 2437.81, "word": " don't", "probability": 0.947265625}, {"start": 2437.81, "end": 2438.27, "word": " actually", "probability": 0.875}, {"start": 2438.27, "end": 2438.49, "word": " know", "probability": 0.89013671875}, {"start": 2438.49, "end": 2438.67, "word": " the", "probability": 0.9189453125}, {"start": 2438.67, "end": 2439.17, "word": " values", "probability": 0.9658203125}, {"start": 2439.17, "end": 2440.13, "word": " of", "probability": 0.958984375}, {"start": 2440.13, "end": 2440.33, "word": " beta", "probability": 0.916015625}, {"start": 2440.33, "end": 2440.59, "word": " 0", "probability": 0.94970703125}, {"start": 2440.59, "end": 2440.73, "word": " and", "probability": 0.94482421875}, {"start": 2440.73, "end": 2440.91, "word": " beta", "probability": 0.912109375}, {"start": 2440.91, "end": 2441.19, "word": " 1.", "probability": 0.9765625}, {"start": 2441.57, "end": 2441.87, "word": " We", "probability": 0.9443359375}, {"start": 2441.87, "end": 2442.03, "word": " are", "probability": 0.92041015625}, {"start": 2442.03, "end": 2442.27, "word": " going", "probability": 0.94384765625}, {"start": 2442.27, "end": 2442.47, "word": " to", "probability": 0.966796875}, {"start": 2442.47, "end": 2442.91, "word": " estimate", "probability": 0.91748046875}, {"start": 2442.91, "end": 2444.03, "word": " these", "probability": 0.85498046875}, {"start": 2444.03, "end": 2444.51, "word": " values", "probability": 0.9638671875}, {"start": 2444.51, "end": 2444.93, "word": " from", "probability": 0.8837890625}, {"start": 2444.93, "end": 2445.39, "word": " the", "probability": 0.9130859375}, {"start": 2445.39, "end": 2445.65, "word": " sample", "probability": 0.5322265625}, {"start": 2445.65, "end": 2445.85, "word": " we", "probability": 0.9267578125}, {"start": 2445.85, "end": 2446.13, "word": " have.", "probability": 0.93701171875}, {"start": 2446.97, "end": 2447.19, "word": " So", "probability": 0.8642578125}, {"start": 2447.19, "end": 2447.35, "word": " the", "probability": 0.68798828125}, {"start": 2447.35, "end": 2447.67, "word": " simple", "probability": 0.630859375}, {"start": 2447.67, "end": 2448.11, "word": " linear", "probability": 0.9169921875}, {"start": 2448.11, "end": 2448.45, "word": " regression", "probability": 0.9736328125}, {"start": 2448.45, "end": 2448.89, "word": " equation", "probability": 0.96484375}, {"start": 2448.89, "end": 2449.61, "word": " provides", "probability": 0.89111328125}, {"start": 2449.61, "end": 2449.93, "word": " an", "probability": 0.9443359375}, {"start": 2449.93, "end": 2450.45, "word": " estimate", "probability": 0.91552734375}, {"start": 2450.45, "end": 2450.85, "word": " of", "probability": 0.96533203125}, {"start": 2450.85, "end": 2450.97, "word": " the", "probability": 0.9111328125}, {"start": 2450.97, "end": 2451.37, "word": " population", "probability": 0.94287109375}, {"start": 2451.37, "end": 2451.81, "word": " regression", "probability": 0.91357421875}, {"start": 2451.81, "end": 2452.31, "word": " line.", "probability": 0.94287109375}], "temperature": 1.0}, {"id": 93, "seek": 246681, "start": 2453.45, "end": 2466.81, "text": " So here we have Yi hat is the estimated or predicted Y value for observation I. The estimate of the regression intercept P0.", "tokens": [407, 510, 321, 362, 16747, 2385, 307, 264, 14109, 420, 19147, 398, 2158, 337, 14816, 286, 13, 440, 12539, 295, 264, 24590, 24700, 430, 15, 13], "avg_logprob": -0.22410301036304897, "compression_ratio": 1.2135922330097086, "no_speech_prob": 0.0, "words": [{"start": 2453.45, "end": 2453.71, "word": " So", "probability": 0.86572265625}, {"start": 2453.71, "end": 2453.89, "word": " here", "probability": 0.759765625}, {"start": 2453.89, "end": 2454.05, "word": " we", "probability": 0.8466796875}, {"start": 2454.05, "end": 2454.31, "word": " have", "probability": 0.9423828125}, {"start": 2454.31, "end": 2454.75, "word": " Yi", "probability": 0.364013671875}, {"start": 2454.75, "end": 2455.27, "word": " hat", "probability": 0.80078125}, {"start": 2455.27, "end": 2457.11, "word": " is", "probability": 0.74072265625}, {"start": 2457.11, "end": 2457.43, "word": " the", "probability": 0.9140625}, {"start": 2457.43, "end": 2458.13, "word": " estimated", "probability": 0.900390625}, {"start": 2458.13, "end": 2458.57, "word": " or", "probability": 0.9130859375}, {"start": 2458.57, "end": 2459.09, "word": " predicted", "probability": 0.85888671875}, {"start": 2459.09, "end": 2459.43, "word": " Y", "probability": 0.8515625}, {"start": 2459.43, "end": 2459.79, "word": " value", "probability": 0.85400390625}, {"start": 2459.79, "end": 2460.01, "word": " for", "probability": 0.93212890625}, {"start": 2460.01, "end": 2460.47, "word": " observation", "probability": 0.88330078125}, {"start": 2460.47, "end": 2460.85, "word": " I.", "probability": 0.65576171875}, {"start": 2463.53, "end": 2464.41, "word": " The", "probability": 0.84716796875}, {"start": 2464.41, "end": 2464.93, "word": " estimate", "probability": 0.921875}, {"start": 2464.93, "end": 2465.19, "word": " of", "probability": 0.96044921875}, {"start": 2465.19, "end": 2465.31, "word": " the", "probability": 0.888671875}, {"start": 2465.31, "end": 2465.59, "word": " regression", "probability": 0.94140625}, {"start": 2465.59, "end": 2466.15, "word": " intercept", "probability": 0.9501953125}, {"start": 2466.15, "end": 2466.81, "word": " P0.", "probability": 0.745849609375}], "temperature": 1.0}, {"id": 94, "seek": 249596, "start": 2468.0, "end": 2495.96, "text": " The estimate of the regression slope is b1, and this is your x, all independent variable. So here is the regression equation. Simple linear regression equation is given by y hat, the predicted value of y equals b0 plus b1 times x1. Now these coefficients, b0 and b1 can be computed", "tokens": [440, 12539, 295, 264, 24590, 13525, 307, 272, 16, 11, 293, 341, 307, 428, 2031, 11, 439, 6695, 7006, 13, 407, 510, 307, 264, 24590, 5367, 13, 21532, 8213, 24590, 5367, 307, 2212, 538, 288, 2385, 11, 264, 19147, 2158, 295, 288, 6915, 272, 15, 1804, 272, 16, 1413, 2031, 16, 13, 823, 613, 31994, 11, 272, 15, 293, 272, 16, 393, 312, 40610], "avg_logprob": -0.18774038461538461, "compression_ratio": 1.5932203389830508, "no_speech_prob": 0.0, "words": [{"start": 2468.0, "end": 2468.22, "word": " The", "probability": 0.7294921875}, {"start": 2468.22, "end": 2468.74, "word": " estimate", "probability": 0.92431640625}, {"start": 2468.74, "end": 2469.02, "word": " of", "probability": 0.9599609375}, {"start": 2469.02, "end": 2469.16, "word": " the", "probability": 0.88232421875}, {"start": 2469.16, "end": 2469.46, "word": " regression", "probability": 0.9208984375}, {"start": 2469.46, "end": 2469.84, "word": " slope", "probability": 0.80419921875}, {"start": 2469.84, "end": 2470.02, "word": " is", "probability": 0.93603515625}, {"start": 2470.02, "end": 2470.42, "word": " b1,", "probability": 0.6295166015625}, {"start": 2470.8, "end": 2471.18, "word": " and", "probability": 0.93310546875}, {"start": 2471.18, "end": 2471.36, "word": " this", "probability": 0.93896484375}, {"start": 2471.36, "end": 2471.52, "word": " is", "probability": 0.93701171875}, {"start": 2471.52, "end": 2471.86, "word": " your", "probability": 0.89697265625}, {"start": 2471.86, "end": 2473.36, "word": " x,", "probability": 0.77294921875}, {"start": 2473.72, "end": 2474.1, "word": " all", "probability": 0.7255859375}, {"start": 2474.1, "end": 2474.5, "word": " independent", "probability": 0.84423828125}, {"start": 2474.5, "end": 2474.96, "word": " variable.", "probability": 0.75341796875}, {"start": 2475.68, "end": 2476.1, "word": " So", "probability": 0.9560546875}, {"start": 2476.1, "end": 2476.42, "word": " here", "probability": 0.75341796875}, {"start": 2476.42, "end": 2476.68, "word": " is", "probability": 0.93994140625}, {"start": 2476.68, "end": 2476.82, "word": " the", "probability": 0.91259765625}, {"start": 2476.82, "end": 2477.18, "word": " regression", "probability": 0.9638671875}, {"start": 2477.18, "end": 2478.02, "word": " equation.", "probability": 0.97705078125}, {"start": 2479.0, "end": 2479.52, "word": " Simple", "probability": 0.88330078125}, {"start": 2479.52, "end": 2479.94, "word": " linear", "probability": 0.72998046875}, {"start": 2479.94, "end": 2480.34, "word": " regression", "probability": 0.9677734375}, {"start": 2480.34, "end": 2480.74, "word": " equation", "probability": 0.91748046875}, {"start": 2480.74, "end": 2480.92, "word": " is", "probability": 0.853515625}, {"start": 2480.92, "end": 2481.12, "word": " given", "probability": 0.8974609375}, {"start": 2481.12, "end": 2481.54, "word": " by", "probability": 0.974609375}, {"start": 2481.54, "end": 2482.72, "word": " y", "probability": 0.72021484375}, {"start": 2482.72, "end": 2483.08, "word": " hat,", "probability": 0.83642578125}, {"start": 2483.34, "end": 2483.52, "word": " the", "probability": 0.908203125}, {"start": 2483.52, "end": 2483.88, "word": " predicted", "probability": 0.42822265625}, {"start": 2483.88, "end": 2484.22, "word": " value", "probability": 0.9677734375}, {"start": 2484.22, "end": 2484.4, "word": " of", "probability": 0.96630859375}, {"start": 2484.4, "end": 2484.7, "word": " y", "probability": 0.94921875}, {"start": 2484.7, "end": 2485.76, "word": " equals", "probability": 0.50537109375}, {"start": 2485.76, "end": 2486.46, "word": " b0", "probability": 0.902587890625}, {"start": 2486.46, "end": 2486.72, "word": " plus", "probability": 0.92919921875}, {"start": 2486.72, "end": 2487.12, "word": " b1", "probability": 0.98681640625}, {"start": 2487.12, "end": 2487.66, "word": " times", "probability": 0.921875}, {"start": 2487.66, "end": 2489.38, "word": " x1.", "probability": 0.97998046875}, {"start": 2491.24, "end": 2491.74, "word": " Now", "probability": 0.95849609375}, {"start": 2491.74, "end": 2492.1, "word": " these", "probability": 0.50390625}, {"start": 2492.1, "end": 2492.84, "word": " coefficients,", "probability": 0.89111328125}, {"start": 2493.08, "end": 2493.46, "word": " b0", "probability": 0.968505859375}, {"start": 2493.46, "end": 2493.62, "word": " and", "probability": 0.9423828125}, {"start": 2493.62, "end": 2494.02, "word": " b1", "probability": 0.99755859375}, {"start": 2494.02, "end": 2494.26, "word": " can", "probability": 0.5810546875}, {"start": 2494.26, "end": 2495.28, "word": " be", "probability": 0.95458984375}, {"start": 2495.28, "end": 2495.96, "word": " computed", "probability": 0.8828125}], "temperature": 1.0}, {"id": 95, "seek": 252667, "start": 2497.9, "end": 2526.68, "text": " by the following equations. So the regression equation is given by y hat equals b0 plus b1x. Now the slope, b1, is r times standard deviation of y", "tokens": [538, 264, 3480, 11787, 13, 407, 264, 24590, 5367, 307, 2212, 538, 288, 2385, 6915, 272, 15, 1804, 272, 16, 87, 13, 823, 264, 13525, 11, 272, 16, 11, 307, 367, 1413, 3832, 25163, 295, 288], "avg_logprob": -0.23205237130861026, "compression_ratio": 1.2894736842105263, "no_speech_prob": 0.0, "words": [{"start": 2497.9, "end": 2498.4, "word": " by", "probability": 0.368896484375}, {"start": 2498.4, "end": 2499.22, "word": " the", "probability": 0.89892578125}, {"start": 2499.22, "end": 2499.48, "word": " following", "probability": 0.859375}, {"start": 2499.48, "end": 2500.04, "word": " equations.", "probability": 0.89013671875}, {"start": 2501.68, "end": 2502.54, "word": " So", "probability": 0.95068359375}, {"start": 2502.54, "end": 2502.72, "word": " the", "probability": 0.7294921875}, {"start": 2502.72, "end": 2503.04, "word": " regression", "probability": 0.95947265625}, {"start": 2503.04, "end": 2503.64, "word": " equation", "probability": 0.9775390625}, {"start": 2503.64, "end": 2512.92, "word": " is", "probability": 0.76318359375}, {"start": 2512.92, "end": 2513.16, "word": " given", "probability": 0.90625}, {"start": 2513.16, "end": 2513.54, "word": " by", "probability": 0.96435546875}, {"start": 2513.54, "end": 2515.12, "word": " y", "probability": 0.6318359375}, {"start": 2515.12, "end": 2515.38, "word": " hat", "probability": 0.89453125}, {"start": 2515.38, "end": 2515.7, "word": " equals", "probability": 0.603515625}, {"start": 2515.7, "end": 2516.18, "word": " b0", "probability": 0.615234375}, {"start": 2516.18, "end": 2516.52, "word": " plus", "probability": 0.9423828125}, {"start": 2516.52, "end": 2517.26, "word": " b1x.", "probability": 0.82470703125}, {"start": 2519.94, "end": 2520.4, "word": " Now", "probability": 0.90576171875}, {"start": 2520.4, "end": 2520.58, "word": " the", "probability": 0.4697265625}, {"start": 2520.58, "end": 2520.94, "word": " slope,", "probability": 0.94970703125}, {"start": 2521.24, "end": 2521.68, "word": " b1,", "probability": 0.983154296875}, {"start": 2523.2, "end": 2523.66, "word": " is", "probability": 0.93701171875}, {"start": 2523.66, "end": 2524.08, "word": " r", "probability": 0.64599609375}, {"start": 2524.08, "end": 2525.46, "word": " times", "probability": 0.92822265625}, {"start": 2525.46, "end": 2525.82, "word": " standard", "probability": 0.666015625}, {"start": 2525.82, "end": 2526.14, "word": " deviation", "probability": 0.89990234375}, {"start": 2526.14, "end": 2526.36, "word": " of", "probability": 0.9677734375}, {"start": 2526.36, "end": 2526.68, "word": " y", "probability": 0.9873046875}], "temperature": 1.0}, {"id": 96, "seek": 255362, "start": 2527.5, "end": 2553.62, "text": " Times standard deviation of x. This is the simplest equation to determine the value of the star. B1r, r is the correlation coefficient. Sy is xr, the standard deviations of y and x. Where b0, which is y intercept, is y bar minus b x bar, or b1 x bar.", "tokens": [11366, 3832, 25163, 295, 2031, 13, 639, 307, 264, 22811, 5367, 281, 6997, 264, 2158, 295, 264, 3543, 13, 363, 16, 81, 11, 367, 307, 264, 20009, 17619, 13, 3902, 307, 2031, 81, 11, 264, 3832, 31219, 763, 295, 288, 293, 2031, 13, 2305, 272, 15, 11, 597, 307, 288, 24700, 11, 307, 288, 2159, 3175, 272, 2031, 2159, 11, 420, 272, 16, 2031, 2159, 13], "avg_logprob": -0.24277051838476266, "compression_ratio": 1.5493827160493827, "no_speech_prob": 0.0, "words": [{"start": 2527.5, "end": 2527.92, "word": " Times", "probability": 0.19921875}, {"start": 2527.92, "end": 2528.18, "word": " standard", "probability": 0.578125}, {"start": 2528.18, "end": 2528.42, "word": " deviation", "probability": 0.85546875}, {"start": 2528.42, "end": 2528.56, "word": " of", "probability": 0.833984375}, {"start": 2528.56, "end": 2528.74, "word": " x.", "probability": 0.6171875}, {"start": 2529.58, "end": 2530.26, "word": " This", "probability": 0.89111328125}, {"start": 2530.26, "end": 2530.38, "word": " is", "probability": 0.95068359375}, {"start": 2530.38, "end": 2530.54, "word": " the", "probability": 0.91552734375}, {"start": 2530.54, "end": 2530.88, "word": " simplest", "probability": 0.8916015625}, {"start": 2530.88, "end": 2531.42, "word": " equation", "probability": 0.9755859375}, {"start": 2531.42, "end": 2531.96, "word": " to", "probability": 0.9541015625}, {"start": 2531.96, "end": 2532.52, "word": " determine", "probability": 0.92919921875}, {"start": 2532.52, "end": 2533.4, "word": " the", "probability": 0.89013671875}, {"start": 2533.4, "end": 2533.58, "word": " value", "probability": 0.84912109375}, {"start": 2533.58, "end": 2533.7, "word": " of", "probability": 0.96484375}, {"start": 2533.7, "end": 2533.82, "word": " the", "probability": 0.7822265625}, {"start": 2533.82, "end": 2534.06, "word": " star.", "probability": 0.66162109375}, {"start": 2534.58, "end": 2535.26, "word": " B1r,", "probability": 0.613037109375}, {"start": 2535.4, "end": 2535.64, "word": " r", "probability": 0.9697265625}, {"start": 2535.64, "end": 2535.8, "word": " is", "probability": 0.93310546875}, {"start": 2535.8, "end": 2535.92, "word": " the", "probability": 0.75}, {"start": 2535.92, "end": 2536.28, "word": " correlation", "probability": 0.919921875}, {"start": 2536.28, "end": 2536.86, "word": " coefficient.", "probability": 0.94091796875}, {"start": 2538.22, "end": 2538.74, "word": " Sy", "probability": 0.86962890625}, {"start": 2538.74, "end": 2538.98, "word": " is", "probability": 0.39794921875}, {"start": 2538.98, "end": 2539.7, "word": " xr,", "probability": 0.786376953125}, {"start": 2539.94, "end": 2540.2, "word": " the", "probability": 0.79443359375}, {"start": 2540.2, "end": 2540.5, "word": " standard", "probability": 0.95458984375}, {"start": 2540.5, "end": 2541.0, "word": " deviations", "probability": 0.918212890625}, {"start": 2541.0, "end": 2541.26, "word": " of", "probability": 0.96484375}, {"start": 2541.26, "end": 2541.54, "word": " y", "probability": 0.98828125}, {"start": 2541.54, "end": 2541.72, "word": " and", "probability": 0.943359375}, {"start": 2541.72, "end": 2542.06, "word": " x.", "probability": 0.99755859375}, {"start": 2543.64, "end": 2544.24, "word": " Where", "probability": 0.8427734375}, {"start": 2544.24, "end": 2545.08, "word": " b0,", "probability": 0.6103515625}, {"start": 2545.52, "end": 2545.88, "word": " which", "probability": 0.66357421875}, {"start": 2545.88, "end": 2545.96, "word": " is", "probability": 0.9541015625}, {"start": 2545.96, "end": 2546.22, "word": " y", "probability": 0.98388671875}, {"start": 2546.22, "end": 2546.58, "word": " intercept,", "probability": 0.876953125}, {"start": 2547.44, "end": 2547.9, "word": " is", "probability": 0.931640625}, {"start": 2547.9, "end": 2548.32, "word": " y", "probability": 0.98681640625}, {"start": 2548.32, "end": 2548.56, "word": " bar", "probability": 0.865234375}, {"start": 2548.56, "end": 2548.92, "word": " minus", "probability": 0.95751953125}, {"start": 2548.92, "end": 2549.14, "word": " b", "probability": 0.90283203125}, {"start": 2549.14, "end": 2549.36, "word": " x", "probability": 0.51904296875}, {"start": 2549.36, "end": 2549.68, "word": " bar,", "probability": 0.9404296875}, {"start": 2549.9, "end": 2550.88, "word": " or", "probability": 0.908203125}, {"start": 2550.88, "end": 2551.34, "word": " b1", "probability": 0.949951171875}, {"start": 2551.34, "end": 2553.2, "word": " x", "probability": 0.78173828125}, {"start": 2553.2, "end": 2553.62, "word": " bar.", "probability": 0.93896484375}], "temperature": 1.0}, {"id": 97, "seek": 257896, "start": 2555.12, "end": 2578.96, "text": " Sx, as we know, is the sum of x minus y squared divided by n minus 1 under square root, similarly for y values. So this, how can we, these formulas compute the values of b0 and b1. So we are going to use these equations in order to determine the values of b0 and b1.", "tokens": [318, 87, 11, 382, 321, 458, 11, 307, 264, 2408, 295, 2031, 3175, 288, 8889, 6666, 538, 297, 3175, 502, 833, 3732, 5593, 11, 14138, 337, 288, 4190, 13, 407, 341, 11, 577, 393, 321, 11, 613, 30546, 14722, 264, 4190, 295, 272, 15, 293, 272, 16, 13, 407, 321, 366, 516, 281, 764, 613, 11787, 294, 1668, 281, 6997, 264, 4190, 295, 272, 15, 293, 272, 16, 13], "avg_logprob": -0.2294642897588866, "compression_ratio": 1.6181818181818182, "no_speech_prob": 0.0, "words": [{"start": 2555.12, "end": 2555.66, "word": " Sx,", "probability": 0.55242919921875}, {"start": 2555.8, "end": 2555.98, "word": " as", "probability": 0.96533203125}, {"start": 2555.98, "end": 2556.12, "word": " we", "probability": 0.8916015625}, {"start": 2556.12, "end": 2556.34, "word": " know,", "probability": 0.88623046875}, {"start": 2556.48, "end": 2556.58, "word": " is", "probability": 0.65625}, {"start": 2556.58, "end": 2556.84, "word": " the", "probability": 0.373046875}, {"start": 2556.84, "end": 2557.08, "word": " sum", "probability": 0.92919921875}, {"start": 2557.08, "end": 2557.36, "word": " of", "probability": 0.951171875}, {"start": 2557.36, "end": 2557.5, "word": " x", "probability": 0.9345703125}, {"start": 2557.5, "end": 2557.8, "word": " minus", "probability": 0.875}, {"start": 2557.8, "end": 2558.1, "word": " y", "probability": 0.17724609375}, {"start": 2558.1, "end": 2558.52, "word": " squared", "probability": 0.833984375}, {"start": 2558.52, "end": 2558.72, "word": " divided", "probability": 0.57470703125}, {"start": 2558.72, "end": 2558.86, "word": " by", "probability": 0.96875}, {"start": 2558.86, "end": 2559.04, "word": " n", "probability": 0.93408203125}, {"start": 2559.04, "end": 2559.32, "word": " minus", "probability": 0.98291015625}, {"start": 2559.32, "end": 2559.54, "word": " 1", "probability": 0.787109375}, {"start": 2559.54, "end": 2559.84, "word": " under", "probability": 0.6474609375}, {"start": 2559.84, "end": 2560.18, "word": " square", "probability": 0.68408203125}, {"start": 2560.18, "end": 2560.46, "word": " root,", "probability": 0.92138671875}, {"start": 2560.9, "end": 2561.3, "word": " similarly", "probability": 0.90185546875}, {"start": 2561.3, "end": 2561.94, "word": " for", "probability": 0.86474609375}, {"start": 2561.94, "end": 2562.3, "word": " y", "probability": 0.9619140625}, {"start": 2562.3, "end": 2562.92, "word": " values.", "probability": 0.9482421875}, {"start": 2564.16, "end": 2564.38, "word": " So", "probability": 0.91259765625}, {"start": 2564.38, "end": 2564.96, "word": " this,", "probability": 0.491943359375}, {"start": 2565.08, "end": 2565.2, "word": " how", "probability": 0.92431640625}, {"start": 2565.2, "end": 2565.44, "word": " can", "probability": 0.94189453125}, {"start": 2565.44, "end": 2565.7, "word": " we,", "probability": 0.93408203125}, {"start": 2566.78, "end": 2567.06, "word": " these", "probability": 0.654296875}, {"start": 2567.06, "end": 2567.54, "word": " formulas", "probability": 0.96533203125}, {"start": 2567.54, "end": 2568.76, "word": " compute", "probability": 0.6435546875}, {"start": 2568.76, "end": 2569.14, "word": " the", "probability": 0.91552734375}, {"start": 2569.14, "end": 2569.52, "word": " values", "probability": 0.96435546875}, {"start": 2569.52, "end": 2570.02, "word": " of", "probability": 0.96630859375}, {"start": 2570.02, "end": 2571.46, "word": " b0", "probability": 0.62158203125}, {"start": 2571.46, "end": 2571.6, "word": " and", "probability": 0.93896484375}, {"start": 2571.6, "end": 2571.86, "word": " b1.", "probability": 0.991455078125}, {"start": 2572.08, "end": 2572.28, "word": " So", "probability": 0.958984375}, {"start": 2572.28, "end": 2572.38, "word": " we", "probability": 0.876953125}, {"start": 2572.38, "end": 2572.5, "word": " are", "probability": 0.9111328125}, {"start": 2572.5, "end": 2572.72, "word": " going", "probability": 0.9423828125}, {"start": 2572.72, "end": 2572.88, "word": " to", "probability": 0.96484375}, {"start": 2572.88, "end": 2573.06, "word": " use", "probability": 0.85693359375}, {"start": 2573.06, "end": 2573.3, "word": " these", "probability": 0.77587890625}, {"start": 2573.3, "end": 2573.88, "word": " equations", "probability": 0.9228515625}, {"start": 2573.88, "end": 2574.16, "word": " in", "probability": 0.92431640625}, {"start": 2574.16, "end": 2574.36, "word": " order", "probability": 0.9140625}, {"start": 2574.36, "end": 2574.6, "word": " to", "probability": 0.96533203125}, {"start": 2574.6, "end": 2575.06, "word": " determine", "probability": 0.9384765625}, {"start": 2575.06, "end": 2576.08, "word": " the", "probability": 0.90625}, {"start": 2576.08, "end": 2576.4, "word": " values", "probability": 0.962890625}, {"start": 2576.4, "end": 2576.54, "word": " of", "probability": 0.96533203125}, {"start": 2576.54, "end": 2577.02, "word": " b0", "probability": 0.94873046875}, {"start": 2577.02, "end": 2578.08, "word": " and", "probability": 0.93115234375}, {"start": 2578.08, "end": 2578.96, "word": " b1.", "probability": 0.991455078125}], "temperature": 1.0}, {"id": 98, "seek": 261211, "start": 2584.67, "end": 2612.11, "text": " Now, what's your interpretation about the slope and the intercept? For example, suppose we are talking about your score Y and X number of missing classes. And suppose, for example, Y hat", "tokens": [823, 11, 437, 311, 428, 14174, 466, 264, 13525, 293, 264, 24700, 30, 1171, 1365, 11, 7297, 321, 366, 1417, 466, 428, 6175, 398, 293, 1783, 1230, 295, 5361, 5359, 13, 400, 7297, 11, 337, 1365, 11, 398, 2385], "avg_logprob": -0.2259765576571226, "compression_ratio": 1.385185185185185, "no_speech_prob": 0.0, "words": [{"start": 2584.67, "end": 2584.99, "word": " Now,", "probability": 0.7666015625}, {"start": 2585.17, "end": 2585.43, "word": " what's", "probability": 0.84326171875}, {"start": 2585.43, "end": 2585.57, "word": " your", "probability": 0.8935546875}, {"start": 2585.57, "end": 2586.41, "word": " interpretation", "probability": 0.85595703125}, {"start": 2586.41, "end": 2587.07, "word": " about", "probability": 0.8603515625}, {"start": 2587.07, "end": 2587.37, "word": " the", "probability": 0.83544921875}, {"start": 2587.37, "end": 2587.71, "word": " slope", "probability": 0.80810546875}, {"start": 2587.71, "end": 2588.29, "word": " and", "probability": 0.90771484375}, {"start": 2588.29, "end": 2589.43, "word": " the", "probability": 0.88427734375}, {"start": 2589.43, "end": 2589.77, "word": " intercept?", "probability": 0.5234375}, {"start": 2589.97, "end": 2590.13, "word": " For", "probability": 0.94091796875}, {"start": 2590.13, "end": 2590.51, "word": " example,", "probability": 0.97119140625}, {"start": 2590.65, "end": 2591.09, "word": " suppose", "probability": 0.88671875}, {"start": 2591.09, "end": 2592.89, "word": " we", "probability": 0.734375}, {"start": 2592.89, "end": 2593.13, "word": " are", "probability": 0.8896484375}, {"start": 2593.13, "end": 2593.63, "word": " talking", "probability": 0.830078125}, {"start": 2593.63, "end": 2594.07, "word": " about", "probability": 0.91748046875}, {"start": 2594.07, "end": 2594.33, "word": " your", "probability": 0.67578125}, {"start": 2594.33, "end": 2594.69, "word": " score", "probability": 0.79931640625}, {"start": 2594.69, "end": 2595.07, "word": " Y", "probability": 0.3974609375}, {"start": 2595.07, "end": 2598.61, "word": " and", "probability": 0.57763671875}, {"start": 2598.61, "end": 2599.57, "word": " X", "probability": 0.94482421875}, {"start": 2599.57, "end": 2600.99, "word": " number", "probability": 0.9140625}, {"start": 2600.99, "end": 2601.27, "word": " of", "probability": 0.96533203125}, {"start": 2601.27, "end": 2601.55, "word": " missing", "probability": 0.86279296875}, {"start": 2601.55, "end": 2602.11, "word": " classes.", "probability": 0.91845703125}, {"start": 2609.21, "end": 2609.95, "word": " And", "probability": 0.8857421875}, {"start": 2609.95, "end": 2610.25, "word": " suppose,", "probability": 0.88427734375}, {"start": 2610.39, "end": 2610.47, "word": " for", "probability": 0.95361328125}, {"start": 2610.47, "end": 2610.87, "word": " example,", "probability": 0.97265625}, {"start": 2611.09, "end": 2611.67, "word": " Y", "probability": 0.955078125}, {"start": 2611.67, "end": 2612.11, "word": " hat", "probability": 0.896484375}], "temperature": 1.0}, {"id": 99, "seek": 263920, "start": 2612.96, "end": 2639.2, "text": " Equal 95 minus 5x. Now let's see what's the interpretation of B0. This is B0. So B0 is 95. And B1 is 5. Now what's your interpretation about B0 and B1? B0 is the estimated mean value of Y when the value of X is 0.", "tokens": [15624, 304, 13420, 3175, 1025, 87, 13, 823, 718, 311, 536, 437, 311, 264, 14174, 295, 363, 15, 13, 639, 307, 363, 15, 13, 407, 363, 15, 307, 13420, 13, 400, 363, 16, 307, 1025, 13, 823, 437, 311, 428, 14174, 466, 363, 15, 293, 363, 16, 30, 363, 15, 307, 264, 14109, 914, 2158, 295, 398, 562, 264, 2158, 295, 1783, 307, 1958, 13], "avg_logprob": -0.18880208446220917, "compression_ratio": 1.445945945945946, "no_speech_prob": 0.0, "words": [{"start": 2612.96, "end": 2613.42, "word": " Equal", "probability": 0.4514312744140625}, {"start": 2613.42, "end": 2613.88, "word": " 95", "probability": 0.89208984375}, {"start": 2613.88, "end": 2614.58, "word": " minus", "probability": 0.83056640625}, {"start": 2614.58, "end": 2615.46, "word": " 5x.", "probability": 0.71337890625}, {"start": 2617.78, "end": 2618.5, "word": " Now", "probability": 0.86328125}, {"start": 2618.5, "end": 2618.74, "word": " let's", "probability": 0.82177734375}, {"start": 2618.74, "end": 2618.88, "word": " see", "probability": 0.92333984375}, {"start": 2618.88, "end": 2619.16, "word": " what's", "probability": 0.859619140625}, {"start": 2619.16, "end": 2619.34, "word": " the", "probability": 0.9150390625}, {"start": 2619.34, "end": 2619.92, "word": " interpretation", "probability": 0.89599609375}, {"start": 2619.92, "end": 2620.74, "word": " of", "probability": 0.9580078125}, {"start": 2620.74, "end": 2621.42, "word": " B0.", "probability": 0.74072265625}, {"start": 2622.3, "end": 2622.7, "word": " This", "probability": 0.80517578125}, {"start": 2622.7, "end": 2622.8, "word": " is", "probability": 0.9365234375}, {"start": 2622.8, "end": 2623.24, "word": " B0.", "probability": 0.979736328125}, {"start": 2623.62, "end": 2623.86, "word": " So", "probability": 0.94140625}, {"start": 2623.86, "end": 2624.26, "word": " B0", "probability": 0.894287109375}, {"start": 2624.26, "end": 2624.48, "word": " is", "probability": 0.94140625}, {"start": 2624.48, "end": 2625.06, "word": " 95.", "probability": 0.97314453125}, {"start": 2627.66, "end": 2628.38, "word": " And", "probability": 0.89794921875}, {"start": 2628.38, "end": 2628.74, "word": " B1", "probability": 0.986572265625}, {"start": 2628.74, "end": 2628.96, "word": " is", "probability": 0.95068359375}, {"start": 2628.96, "end": 2629.28, "word": " 5.", "probability": 0.888671875}, {"start": 2630.2, "end": 2630.52, "word": " Now", "probability": 0.9169921875}, {"start": 2630.52, "end": 2630.8, "word": " what's", "probability": 0.852294921875}, {"start": 2630.8, "end": 2630.9, "word": " your", "probability": 0.875}, {"start": 2630.9, "end": 2631.44, "word": " interpretation", "probability": 0.87255859375}, {"start": 2631.44, "end": 2631.96, "word": " about", "probability": 0.85400390625}, {"start": 2631.96, "end": 2632.48, "word": " B0", "probability": 0.93994140625}, {"start": 2632.48, "end": 2632.66, "word": " and", "probability": 0.9423828125}, {"start": 2632.66, "end": 2633.02, "word": " B1?", "probability": 0.997314453125}, {"start": 2635.06, "end": 2635.78, "word": " B0", "probability": 0.98828125}, {"start": 2635.78, "end": 2636.0, "word": " is", "probability": 0.94580078125}, {"start": 2636.0, "end": 2636.14, "word": " the", "probability": 0.89892578125}, {"start": 2636.14, "end": 2636.62, "word": " estimated", "probability": 0.89306640625}, {"start": 2636.62, "end": 2636.9, "word": " mean", "probability": 0.96728515625}, {"start": 2636.9, "end": 2637.22, "word": " value", "probability": 0.9736328125}, {"start": 2637.22, "end": 2637.42, "word": " of", "probability": 0.966796875}, {"start": 2637.42, "end": 2637.74, "word": " Y", "probability": 0.5615234375}, {"start": 2637.74, "end": 2638.18, "word": " when", "probability": 0.76513671875}, {"start": 2638.18, "end": 2638.36, "word": " the", "probability": 0.9072265625}, {"start": 2638.36, "end": 2638.52, "word": " value", "probability": 0.9755859375}, {"start": 2638.52, "end": 2638.72, "word": " of", "probability": 0.90478515625}, {"start": 2638.72, "end": 2638.78, "word": " X", "probability": 0.90185546875}, {"start": 2638.78, "end": 2638.94, "word": " is", "probability": 0.9462890625}, {"start": 2638.94, "end": 2639.2, "word": " 0.", "probability": 0.6259765625}], "temperature": 1.0}, {"id": 100, "seek": 266944, "start": 2640.68, "end": 2669.44, "text": " that means if the student does not miss any class that means x equals zero in this case we predict or we estimate the mean value of his score or her score is 95 so 95 it means when x is zero if x is zero then we expect his or", "tokens": [300, 1355, 498, 264, 3107, 775, 406, 1713, 604, 1508, 300, 1355, 2031, 6915, 4018, 294, 341, 1389, 321, 6069, 420, 321, 12539, 264, 914, 2158, 295, 702, 6175, 420, 720, 6175, 307, 13420, 370, 13420, 309, 1355, 562, 2031, 307, 4018, 498, 2031, 307, 4018, 550, 321, 2066, 702, 420], "avg_logprob": -0.19666465859000498, "compression_ratio": 1.6376811594202898, "no_speech_prob": 0.0, "words": [{"start": 2640.68, "end": 2641.04, "word": " that", "probability": 0.22216796875}, {"start": 2641.04, "end": 2641.42, "word": " means", "probability": 0.912109375}, {"start": 2641.42, "end": 2642.28, "word": " if", "probability": 0.79833984375}, {"start": 2642.28, "end": 2642.56, "word": " the", "probability": 0.8427734375}, {"start": 2642.56, "end": 2643.08, "word": " student", "probability": 0.92578125}, {"start": 2643.08, "end": 2645.4, "word": " does", "probability": 0.89990234375}, {"start": 2645.4, "end": 2645.66, "word": " not", "probability": 0.955078125}, {"start": 2645.66, "end": 2645.96, "word": " miss", "probability": 0.89697265625}, {"start": 2645.96, "end": 2646.26, "word": " any", "probability": 0.908203125}, {"start": 2646.26, "end": 2646.8, "word": " class", "probability": 0.93603515625}, {"start": 2646.8, "end": 2648.02, "word": " that", "probability": 0.5634765625}, {"start": 2648.02, "end": 2648.26, "word": " means", "probability": 0.89306640625}, {"start": 2648.26, "end": 2648.5, "word": " x", "probability": 0.84130859375}, {"start": 2648.5, "end": 2648.82, "word": " equals", "probability": 0.30419921875}, {"start": 2648.82, "end": 2649.18, "word": " zero", "probability": 0.6357421875}, {"start": 2649.18, "end": 2650.02, "word": " in", "probability": 0.677734375}, {"start": 2650.02, "end": 2650.28, "word": " this", "probability": 0.947265625}, {"start": 2650.28, "end": 2650.64, "word": " case", "probability": 0.9130859375}, {"start": 2650.64, "end": 2650.96, "word": " we", "probability": 0.77587890625}, {"start": 2650.96, "end": 2651.54, "word": " predict", "probability": 0.875}, {"start": 2651.54, "end": 2652.44, "word": " or", "probability": 0.865234375}, {"start": 2652.44, "end": 2652.62, "word": " we", "probability": 0.89599609375}, {"start": 2652.62, "end": 2653.26, "word": " estimate", "probability": 0.9111328125}, {"start": 2653.26, "end": 2654.02, "word": " the", "probability": 0.89501953125}, {"start": 2654.02, "end": 2654.22, "word": " mean", "probability": 0.96337890625}, {"start": 2654.22, "end": 2654.64, "word": " value", "probability": 0.95947265625}, {"start": 2654.64, "end": 2654.88, "word": " of", "probability": 0.94921875}, {"start": 2654.88, "end": 2655.06, "word": " his", "probability": 0.9501953125}, {"start": 2655.06, "end": 2655.62, "word": " score", "probability": 0.841796875}, {"start": 2655.62, "end": 2656.1, "word": " or", "probability": 0.90771484375}, {"start": 2656.1, "end": 2656.3, "word": " her", "probability": 0.91455078125}, {"start": 2656.3, "end": 2656.78, "word": " score", "probability": 0.86083984375}, {"start": 2656.78, "end": 2657.22, "word": " is", "probability": 0.521484375}, {"start": 2657.22, "end": 2657.8, "word": " 95", "probability": 0.66650390625}, {"start": 2657.8, "end": 2659.88, "word": " so", "probability": 0.41455078125}, {"start": 2659.88, "end": 2660.3, "word": " 95", "probability": 0.93408203125}, {"start": 2660.3, "end": 2660.88, "word": " it", "probability": 0.71630859375}, {"start": 2660.88, "end": 2661.24, "word": " means", "probability": 0.93359375}, {"start": 2661.24, "end": 2661.54, "word": " when", "probability": 0.90234375}, {"start": 2661.54, "end": 2661.88, "word": " x", "probability": 0.9677734375}, {"start": 2661.88, "end": 2662.06, "word": " is", "probability": 0.92431640625}, {"start": 2662.06, "end": 2662.46, "word": " zero", "probability": 0.810546875}, {"start": 2662.46, "end": 2663.66, "word": " if", "probability": 0.73681640625}, {"start": 2663.66, "end": 2663.98, "word": " x", "probability": 0.990234375}, {"start": 2663.98, "end": 2664.16, "word": " is", "probability": 0.88427734375}, {"start": 2664.16, "end": 2664.5, "word": " zero", "probability": 0.892578125}, {"start": 2664.5, "end": 2667.0, "word": " then", "probability": 0.81396484375}, {"start": 2667.0, "end": 2667.5, "word": " we", "probability": 0.95361328125}, {"start": 2667.5, "end": 2668.06, "word": " expect", "probability": 0.93310546875}, {"start": 2668.06, "end": 2668.96, "word": " his", "probability": 0.9375}, {"start": 2668.96, "end": 2669.44, "word": " or", "probability": 0.9521484375}], "temperature": 1.0}, {"id": 101, "seek": 269579, "start": 2669.89, "end": 2695.79, "text": " Here, the score is 95. So that means B0 is the estimated mean value of Y when the value of X is 0. Now, what's the meaning of the slope? The slope in this case is negative Y. B1, which is the slope, is the estimated change in the mean of Y.", "tokens": [1692, 11, 264, 6175, 307, 13420, 13, 407, 300, 1355, 363, 15, 307, 264, 14109, 914, 2158, 295, 398, 562, 264, 2158, 295, 1783, 307, 1958, 13, 823, 11, 437, 311, 264, 3620, 295, 264, 13525, 30, 440, 13525, 294, 341, 1389, 307, 3671, 398, 13, 363, 16, 11, 597, 307, 264, 13525, 11, 307, 264, 14109, 1319, 294, 264, 914, 295, 398, 13], "avg_logprob": -0.21165865384615384, "compression_ratio": 1.5751633986928104, "no_speech_prob": 0.0, "words": [{"start": 2669.89, "end": 2670.35, "word": " Here,", "probability": 0.55859375}, {"start": 2670.59, "end": 2670.67, "word": " the", "probability": 0.367919921875}, {"start": 2670.67, "end": 2671.03, "word": " score", "probability": 0.78466796875}, {"start": 2671.03, "end": 2672.03, "word": " is", "probability": 0.93017578125}, {"start": 2672.03, "end": 2672.43, "word": " 95.", "probability": 0.31201171875}, {"start": 2674.19, "end": 2674.75, "word": " So", "probability": 0.921875}, {"start": 2674.75, "end": 2674.95, "word": " that", "probability": 0.794921875}, {"start": 2674.95, "end": 2675.35, "word": " means", "probability": 0.89892578125}, {"start": 2675.35, "end": 2676.57, "word": " B0", "probability": 0.5513916015625}, {"start": 2676.57, "end": 2676.83, "word": " is", "probability": 0.93701171875}, {"start": 2676.83, "end": 2676.99, "word": " the", "probability": 0.72216796875}, {"start": 2676.99, "end": 2677.47, "word": " estimated", "probability": 0.91162109375}, {"start": 2677.47, "end": 2677.69, "word": " mean", "probability": 0.92236328125}, {"start": 2677.69, "end": 2677.97, "word": " value", "probability": 0.97216796875}, {"start": 2677.97, "end": 2678.17, "word": " of", "probability": 0.96337890625}, {"start": 2678.17, "end": 2678.45, "word": " Y", "probability": 0.6669921875}, {"start": 2678.45, "end": 2679.03, "word": " when", "probability": 0.744140625}, {"start": 2679.03, "end": 2679.59, "word": " the", "probability": 0.89697265625}, {"start": 2679.59, "end": 2679.83, "word": " value", "probability": 0.97314453125}, {"start": 2679.83, "end": 2679.99, "word": " of", "probability": 0.921875}, {"start": 2679.99, "end": 2680.17, "word": " X", "probability": 0.93408203125}, {"start": 2680.17, "end": 2680.35, "word": " is", "probability": 0.93896484375}, {"start": 2680.35, "end": 2680.63, "word": " 0.", "probability": 0.60498046875}, {"start": 2683.37, "end": 2683.93, "word": " Now,", "probability": 0.9453125}, {"start": 2683.97, "end": 2684.23, "word": " what's", "probability": 0.938720703125}, {"start": 2684.23, "end": 2684.43, "word": " the", "probability": 0.92138671875}, {"start": 2684.43, "end": 2684.67, "word": " meaning", "probability": 0.8701171875}, {"start": 2684.67, "end": 2685.13, "word": " of", "probability": 0.966796875}, {"start": 2685.13, "end": 2685.71, "word": " the", "probability": 0.82470703125}, {"start": 2685.71, "end": 2686.01, "word": " slope?", "probability": 0.9130859375}, {"start": 2686.11, "end": 2686.27, "word": " The", "probability": 0.88916015625}, {"start": 2686.27, "end": 2686.45, "word": " slope", "probability": 0.9130859375}, {"start": 2686.45, "end": 2686.59, "word": " in", "probability": 0.857421875}, {"start": 2686.59, "end": 2686.77, "word": " this", "probability": 0.94580078125}, {"start": 2686.77, "end": 2686.97, "word": " case", "probability": 0.9189453125}, {"start": 2686.97, "end": 2687.17, "word": " is", "probability": 0.94384765625}, {"start": 2687.17, "end": 2687.45, "word": " negative", "probability": 0.55126953125}, {"start": 2687.45, "end": 2687.89, "word": " Y.", "probability": 0.41943359375}, {"start": 2689.67, "end": 2690.23, "word": " B1,", "probability": 0.966552734375}, {"start": 2690.55, "end": 2690.75, "word": " which", "probability": 0.9306640625}, {"start": 2690.75, "end": 2690.85, "word": " is", "probability": 0.94384765625}, {"start": 2690.85, "end": 2690.99, "word": " the", "probability": 0.90966796875}, {"start": 2690.99, "end": 2691.29, "word": " slope,", "probability": 0.88916015625}, {"start": 2691.59, "end": 2691.77, "word": " is", "probability": 0.94091796875}, {"start": 2691.77, "end": 2691.89, "word": " the", "probability": 0.87060546875}, {"start": 2691.89, "end": 2692.43, "word": " estimated", "probability": 0.90966796875}, {"start": 2692.43, "end": 2693.11, "word": " change", "probability": 0.900390625}, {"start": 2693.11, "end": 2694.89, "word": " in", "probability": 0.90673828125}, {"start": 2694.89, "end": 2695.07, "word": " the", "probability": 0.927734375}, {"start": 2695.07, "end": 2695.23, "word": " mean", "probability": 0.9677734375}, {"start": 2695.23, "end": 2695.43, "word": " of", "probability": 0.97021484375}, {"start": 2695.43, "end": 2695.79, "word": " Y.", "probability": 0.98583984375}], "temperature": 1.0}, {"id": 102, "seek": 272245, "start": 2696.99, "end": 2722.45, "text": " as a result of a one unit change in x for example let's compute y for different values of x suppose x is one now we predict his score to be 95 minus 5 times 1 which is 90 when x is 2 for example", "tokens": [382, 257, 1874, 295, 257, 472, 4985, 1319, 294, 2031, 337, 1365, 718, 311, 14722, 288, 337, 819, 4190, 295, 2031, 7297, 2031, 307, 472, 586, 321, 6069, 702, 6175, 281, 312, 13420, 3175, 1025, 1413, 502, 597, 307, 4289, 562, 2031, 307, 568, 337, 1365], "avg_logprob": -0.19281915400890595, "compression_ratio": 1.4028776978417266, "no_speech_prob": 0.0, "words": [{"start": 2696.99, "end": 2697.39, "word": " as", "probability": 0.256591796875}, {"start": 2697.39, "end": 2697.61, "word": " a", "probability": 0.97900390625}, {"start": 2697.61, "end": 2697.93, "word": " result", "probability": 0.94775390625}, {"start": 2697.93, "end": 2698.65, "word": " of", "probability": 0.95947265625}, {"start": 2698.65, "end": 2698.93, "word": " a", "probability": 0.63427734375}, {"start": 2698.93, "end": 2699.15, "word": " one", "probability": 0.81884765625}, {"start": 2699.15, "end": 2699.55, "word": " unit", "probability": 0.7080078125}, {"start": 2699.55, "end": 2699.97, "word": " change", "probability": 0.8291015625}, {"start": 2699.97, "end": 2700.13, "word": " in", "probability": 0.93701171875}, {"start": 2700.13, "end": 2700.41, "word": " x", "probability": 0.6708984375}, {"start": 2700.41, "end": 2700.75, "word": " for", "probability": 0.254150390625}, {"start": 2700.75, "end": 2701.15, "word": " example", "probability": 0.97314453125}, {"start": 2701.15, "end": 2703.05, "word": " let's", "probability": 0.87890625}, {"start": 2703.05, "end": 2703.39, "word": " compute", "probability": 0.88525390625}, {"start": 2703.39, "end": 2703.85, "word": " y", "probability": 0.92919921875}, {"start": 2703.85, "end": 2704.73, "word": " for", "probability": 0.94384765625}, {"start": 2704.73, "end": 2705.17, "word": " different", "probability": 0.88134765625}, {"start": 2705.17, "end": 2705.53, "word": " values", "probability": 0.8212890625}, {"start": 2705.53, "end": 2705.67, "word": " of", "probability": 0.962890625}, {"start": 2705.67, "end": 2705.97, "word": " x", "probability": 0.99365234375}, {"start": 2705.97, "end": 2706.65, "word": " suppose", "probability": 0.75146484375}, {"start": 2706.65, "end": 2706.93, "word": " x", "probability": 0.99365234375}, {"start": 2706.93, "end": 2707.07, "word": " is", "probability": 0.9169921875}, {"start": 2707.07, "end": 2707.31, "word": " one", "probability": 0.67724609375}, {"start": 2707.31, "end": 2709.23, "word": " now", "probability": 0.8486328125}, {"start": 2709.23, "end": 2709.45, "word": " we", "probability": 0.95556640625}, {"start": 2709.45, "end": 2709.93, "word": " predict", "probability": 0.892578125}, {"start": 2709.93, "end": 2711.01, "word": " his", "probability": 0.95556640625}, {"start": 2711.01, "end": 2711.55, "word": " score", "probability": 0.890625}, {"start": 2711.55, "end": 2712.39, "word": " to", "probability": 0.96435546875}, {"start": 2712.39, "end": 2712.73, "word": " be", "probability": 0.953125}, {"start": 2712.73, "end": 2713.95, "word": " 95", "probability": 0.8828125}, {"start": 2713.95, "end": 2715.01, "word": " minus", "probability": 0.955078125}, {"start": 2715.01, "end": 2715.51, "word": " 5", "probability": 0.54150390625}, {"start": 2715.51, "end": 2715.85, "word": " times", "probability": 0.9326171875}, {"start": 2715.85, "end": 2716.23, "word": " 1", "probability": 0.6416015625}, {"start": 2716.23, "end": 2718.59, "word": " which", "probability": 0.92822265625}, {"start": 2718.59, "end": 2718.73, "word": " is", "probability": 0.951171875}, {"start": 2718.73, "end": 2719.11, "word": " 90", "probability": 0.9326171875}, {"start": 2719.11, "end": 2721.27, "word": " when", "probability": 0.74365234375}, {"start": 2721.27, "end": 2721.51, "word": " x", "probability": 0.99365234375}, {"start": 2721.51, "end": 2721.65, "word": " is", "probability": 0.9462890625}, {"start": 2721.65, "end": 2721.85, "word": " 2", "probability": 0.673828125}, {"start": 2721.85, "end": 2722.05, "word": " for", "probability": 0.90771484375}, {"start": 2722.05, "end": 2722.45, "word": " example", "probability": 0.978515625}], "temperature": 1.0}, {"id": 103, "seek": 275303, "start": 2724.87, "end": 2753.03, "text": " Y hat is 95 minus 5 times 2, so that's 85. So for each one unit, there is a drop by five units in his score. That means if number of missing classes increases by one unit, then his or her weight is expected to be reduced by five units because the sign is negative.", "tokens": [398, 2385, 307, 13420, 3175, 1025, 1413, 568, 11, 370, 300, 311, 14695, 13, 407, 337, 1184, 472, 4985, 11, 456, 307, 257, 3270, 538, 1732, 6815, 294, 702, 6175, 13, 663, 1355, 498, 1230, 295, 5361, 5359, 8637, 538, 472, 4985, 11, 550, 702, 420, 720, 3364, 307, 5176, 281, 312, 9212, 538, 1732, 6815, 570, 264, 1465, 307, 3671, 13], "avg_logprob": -0.15587797051384336, "compression_ratio": 1.5317919075144508, "no_speech_prob": 0.0, "words": [{"start": 2724.87, "end": 2725.23, "word": " Y", "probability": 0.478271484375}, {"start": 2725.23, "end": 2725.47, "word": " hat", "probability": 0.685546875}, {"start": 2725.47, "end": 2725.63, "word": " is", "probability": 0.93017578125}, {"start": 2725.63, "end": 2726.09, "word": " 95", "probability": 0.61328125}, {"start": 2726.09, "end": 2726.49, "word": " minus", "probability": 0.94287109375}, {"start": 2726.49, "end": 2726.93, "word": " 5", "probability": 0.78076171875}, {"start": 2726.93, "end": 2727.27, "word": " times", "probability": 0.90576171875}, {"start": 2727.27, "end": 2727.59, "word": " 2,", "probability": 0.94921875}, {"start": 2727.71, "end": 2727.81, "word": " so", "probability": 0.89697265625}, {"start": 2727.81, "end": 2728.15, "word": " that's", "probability": 0.92529296875}, {"start": 2728.15, "end": 2728.57, "word": " 85.", "probability": 0.96826171875}, {"start": 2731.95, "end": 2732.53, "word": " So", "probability": 0.88037109375}, {"start": 2732.53, "end": 2732.81, "word": " for", "probability": 0.72802734375}, {"start": 2732.81, "end": 2733.17, "word": " each", "probability": 0.94677734375}, {"start": 2733.17, "end": 2733.53, "word": " one", "probability": 0.634765625}, {"start": 2733.53, "end": 2733.89, "word": " unit,", "probability": 0.95166015625}, {"start": 2734.61, "end": 2736.99, "word": " there", "probability": 0.8603515625}, {"start": 2736.99, "end": 2737.25, "word": " is", "probability": 0.9326171875}, {"start": 2737.25, "end": 2737.51, "word": " a", "probability": 0.99853515625}, {"start": 2737.51, "end": 2737.81, "word": " drop", "probability": 0.91015625}, {"start": 2737.81, "end": 2739.33, "word": " by", "probability": 0.93701171875}, {"start": 2739.33, "end": 2739.97, "word": " five", "probability": 0.525390625}, {"start": 2739.97, "end": 2740.39, "word": " units", "probability": 0.9521484375}, {"start": 2740.39, "end": 2740.55, "word": " in", "probability": 0.90771484375}, {"start": 2740.55, "end": 2740.69, "word": " his", "probability": 0.5107421875}, {"start": 2740.69, "end": 2741.13, "word": " score.", "probability": 0.89013671875}, {"start": 2741.47, "end": 2741.89, "word": " That", "probability": 0.90087890625}, {"start": 2741.89, "end": 2742.23, "word": " means", "probability": 0.9384765625}, {"start": 2742.23, "end": 2743.13, "word": " if", "probability": 0.75244140625}, {"start": 2743.13, "end": 2743.49, "word": " number", "probability": 0.8447265625}, {"start": 2743.49, "end": 2743.75, "word": " of", "probability": 0.9609375}, {"start": 2743.75, "end": 2744.05, "word": " missing", "probability": 0.8818359375}, {"start": 2744.05, "end": 2744.45, "word": " classes", "probability": 0.90478515625}, {"start": 2744.45, "end": 2744.99, "word": " increases", "probability": 0.904296875}, {"start": 2744.99, "end": 2745.29, "word": " by", "probability": 0.96728515625}, {"start": 2745.29, "end": 2745.57, "word": " one", "probability": 0.89208984375}, {"start": 2745.57, "end": 2745.99, "word": " unit,", "probability": 0.9609375}, {"start": 2746.71, "end": 2746.93, "word": " then", "probability": 0.82373046875}, {"start": 2746.93, "end": 2747.25, "word": " his", "probability": 0.95703125}, {"start": 2747.25, "end": 2747.55, "word": " or", "probability": 0.90966796875}, {"start": 2747.55, "end": 2747.79, "word": " her", "probability": 0.96435546875}, {"start": 2747.79, "end": 2748.11, "word": " weight", "probability": 0.92529296875}, {"start": 2748.11, "end": 2748.59, "word": " is", "probability": 0.94140625}, {"start": 2748.59, "end": 2749.15, "word": " expected", "probability": 0.87060546875}, {"start": 2749.15, "end": 2749.49, "word": " to", "probability": 0.966796875}, {"start": 2749.49, "end": 2749.69, "word": " be", "probability": 0.94775390625}, {"start": 2749.69, "end": 2750.15, "word": " reduced", "probability": 0.9443359375}, {"start": 2750.15, "end": 2750.53, "word": " by", "probability": 0.966796875}, {"start": 2750.53, "end": 2751.43, "word": " five", "probability": 0.810546875}, {"start": 2751.43, "end": 2751.79, "word": " units", "probability": 0.9521484375}, {"start": 2751.79, "end": 2752.11, "word": " because", "probability": 0.603515625}, {"start": 2752.11, "end": 2752.37, "word": " the", "probability": 0.91943359375}, {"start": 2752.37, "end": 2752.57, "word": " sign", "probability": 0.89501953125}, {"start": 2752.57, "end": 2752.71, "word": " is", "probability": 0.94287109375}, {"start": 2752.71, "end": 2753.03, "word": " negative.", "probability": 0.94580078125}], "temperature": 1.0}, {"id": 104, "seek": 278319, "start": 2755.33, "end": 2783.19, "text": " another example suppose again we are interested in whales and angels and imagine that just for example y equal y hat equals three plus four x now", "tokens": [1071, 1365, 7297, 797, 321, 366, 3102, 294, 32403, 293, 18175, 293, 3811, 300, 445, 337, 1365, 288, 2681, 288, 2385, 6915, 1045, 1804, 1451, 2031, 586], "avg_logprob": -0.2798549064568111, "compression_ratio": 1.4038461538461537, "no_speech_prob": 0.0, "words": [{"start": 2755.33, "end": 2755.67, "word": " another", "probability": 0.109375}, {"start": 2755.67, "end": 2756.15, "word": " example", "probability": 0.96875}, {"start": 2756.15, "end": 2757.01, "word": " suppose", "probability": 0.45751953125}, {"start": 2757.01, "end": 2757.79, "word": " again", "probability": 0.8876953125}, {"start": 2757.79, "end": 2759.49, "word": " we", "probability": 0.85205078125}, {"start": 2759.49, "end": 2759.83, "word": " are", "probability": 0.94287109375}, {"start": 2759.83, "end": 2760.39, "word": " interested", "probability": 0.8759765625}, {"start": 2760.39, "end": 2761.11, "word": " in", "probability": 0.90966796875}, {"start": 2761.11, "end": 2762.31, "word": " whales", "probability": 0.91455078125}, {"start": 2762.31, "end": 2765.91, "word": " and", "probability": 0.9033203125}, {"start": 2765.91, "end": 2766.43, "word": " angels", "probability": 0.931640625}, {"start": 2766.43, "end": 2769.25, "word": " and", "probability": 0.80615234375}, {"start": 2769.25, "end": 2769.57, "word": " imagine", "probability": 0.921875}, {"start": 2769.57, "end": 2770.55, "word": " that", "probability": 0.9375}, {"start": 2770.55, "end": 2776.17, "word": " just", "probability": 0.8271484375}, {"start": 2776.17, "end": 2776.35, "word": " for", "probability": 0.95947265625}, {"start": 2776.35, "end": 2776.71, "word": " example", "probability": 0.97509765625}, {"start": 2776.71, "end": 2777.97, "word": " y", "probability": 0.751953125}, {"start": 2777.97, "end": 2778.31, "word": " equal", "probability": 0.296875}, {"start": 2778.31, "end": 2778.65, "word": " y", "probability": 0.73486328125}, {"start": 2778.65, "end": 2778.91, "word": " hat", "probability": 0.853515625}, {"start": 2778.91, "end": 2779.21, "word": " equals", "probability": 0.495361328125}, {"start": 2779.21, "end": 2780.77, "word": " three", "probability": 0.712890625}, {"start": 2780.77, "end": 2781.09, "word": " plus", "probability": 0.94091796875}, {"start": 2781.09, "end": 2781.35, "word": " four", "probability": 0.921875}, {"start": 2781.35, "end": 2781.67, "word": " x", "probability": 0.9541015625}, {"start": 2781.67, "end": 2783.19, "word": " now", "probability": 0.78857421875}], "temperature": 1.0}, {"id": 105, "seek": 281293, "start": 2783.53, "end": 2812.93, "text": " y hat equals 3 if x equals zero. That has no meaning because you cannot say age of zero. So sometimes the meaning of y intercept does not make sense because you cannot say x equals zero. Now for the stock of four, that means as his or her weight increases by one year,", "tokens": [288, 2385, 6915, 805, 498, 2031, 6915, 4018, 13, 663, 575, 572, 3620, 570, 291, 2644, 584, 3205, 295, 4018, 13, 407, 2171, 264, 3620, 295, 288, 24700, 775, 406, 652, 2020, 570, 291, 2644, 584, 2031, 6915, 4018, 13, 823, 337, 264, 4127, 295, 1451, 11, 300, 1355, 382, 702, 420, 720, 3364, 8637, 538, 472, 1064, 11], "avg_logprob": -0.27630209028720853, "compression_ratio": 1.6402439024390243, "no_speech_prob": 0.0, "words": [{"start": 2783.53, "end": 2783.91, "word": " y", "probability": 0.0860595703125}, {"start": 2783.91, "end": 2784.73, "word": " hat", "probability": 0.39111328125}, {"start": 2784.73, "end": 2785.05, "word": " equals", "probability": 0.466796875}, {"start": 2785.05, "end": 2785.31, "word": " 3", "probability": 0.265380859375}, {"start": 2785.31, "end": 2785.45, "word": " if", "probability": 0.458740234375}, {"start": 2785.45, "end": 2785.65, "word": " x", "probability": 0.97021484375}, {"start": 2785.65, "end": 2785.91, "word": " equals", "probability": 0.7978515625}, {"start": 2785.91, "end": 2786.27, "word": " zero.", "probability": 0.33984375}, {"start": 2788.49, "end": 2789.21, "word": " That", "probability": 0.8212890625}, {"start": 2789.21, "end": 2789.63, "word": " has", "probability": 0.9443359375}, {"start": 2789.63, "end": 2789.83, "word": " no", "probability": 0.93798828125}, {"start": 2789.83, "end": 2790.09, "word": " meaning", "probability": 0.8642578125}, {"start": 2790.09, "end": 2790.67, "word": " because", "probability": 0.61376953125}, {"start": 2790.67, "end": 2791.31, "word": " you", "probability": 0.88818359375}, {"start": 2791.31, "end": 2791.61, "word": " cannot", "probability": 0.837890625}, {"start": 2791.61, "end": 2791.95, "word": " say", "probability": 0.94140625}, {"start": 2791.95, "end": 2792.35, "word": " age", "probability": 0.467041015625}, {"start": 2792.35, "end": 2792.53, "word": " of", "probability": 0.931640625}, {"start": 2792.53, "end": 2792.87, "word": " zero.", "probability": 0.83740234375}, {"start": 2794.09, "end": 2794.51, "word": " So", "probability": 0.79248046875}, {"start": 2794.51, "end": 2795.21, "word": " sometimes", "probability": 0.8427734375}, {"start": 2795.21, "end": 2796.05, "word": " the", "probability": 0.76513671875}, {"start": 2796.05, "end": 2796.43, "word": " meaning", "probability": 0.83935546875}, {"start": 2796.43, "end": 2796.81, "word": " of", "probability": 0.970703125}, {"start": 2796.81, "end": 2797.13, "word": " y", "probability": 0.8779296875}, {"start": 2797.13, "end": 2797.59, "word": " intercept", "probability": 0.80712890625}, {"start": 2797.59, "end": 2800.07, "word": " does", "probability": 0.83837890625}, {"start": 2800.07, "end": 2800.25, "word": " not", "probability": 0.951171875}, {"start": 2800.25, "end": 2800.45, "word": " make", "probability": 0.92919921875}, {"start": 2800.45, "end": 2800.85, "word": " sense", "probability": 0.83740234375}, {"start": 2800.85, "end": 2801.47, "word": " because", "probability": 0.81005859375}, {"start": 2801.47, "end": 2801.99, "word": " you", "probability": 0.69580078125}, {"start": 2801.99, "end": 2802.21, "word": " cannot", "probability": 0.8671875}, {"start": 2802.21, "end": 2802.61, "word": " say", "probability": 0.9521484375}, {"start": 2802.61, "end": 2804.39, "word": " x", "probability": 0.931640625}, {"start": 2804.39, "end": 2804.73, "word": " equals", "probability": 0.8984375}, {"start": 2804.73, "end": 2805.09, "word": " zero.", "probability": 0.79931640625}, {"start": 2805.95, "end": 2806.15, "word": " Now", "probability": 0.90087890625}, {"start": 2806.15, "end": 2806.37, "word": " for", "probability": 0.73974609375}, {"start": 2806.37, "end": 2806.55, "word": " the", "probability": 0.87451171875}, {"start": 2806.55, "end": 2806.91, "word": " stock", "probability": 0.61669921875}, {"start": 2806.91, "end": 2807.49, "word": " of", "probability": 0.94287109375}, {"start": 2807.49, "end": 2807.93, "word": " four,", "probability": 0.751953125}, {"start": 2808.31, "end": 2808.65, "word": " that", "probability": 0.93896484375}, {"start": 2808.65, "end": 2809.03, "word": " means", "probability": 0.93359375}, {"start": 2809.03, "end": 2810.01, "word": " as", "probability": 0.83935546875}, {"start": 2810.01, "end": 2810.31, "word": " his", "probability": 0.95947265625}, {"start": 2810.31, "end": 2810.53, "word": " or", "probability": 0.8681640625}, {"start": 2810.53, "end": 2810.69, "word": " her", "probability": 0.9716796875}, {"start": 2810.69, "end": 2811.01, "word": " weight", "probability": 0.89501953125}, {"start": 2811.01, "end": 2811.61, "word": " increases", "probability": 0.5009765625}, {"start": 2811.61, "end": 2812.01, "word": " by", "probability": 0.95751953125}, {"start": 2812.01, "end": 2812.61, "word": " one", "probability": 0.8876953125}, {"start": 2812.61, "end": 2812.93, "word": " year,", "probability": 0.93212890625}], "temperature": 1.0}, {"id": 106, "seek": 283867, "start": 2813.63, "end": 2838.67, "text": " Then we expect his weight to increase by four kilograms. So as one unit increase in x, y is our, his weight is expected to increase by four units. So again, sometimes we can interpret the y intercept, but in some cases it has no meaning.", "tokens": [1396, 321, 2066, 702, 3364, 281, 3488, 538, 1451, 30690, 13, 407, 382, 472, 4985, 3488, 294, 2031, 11, 288, 307, 527, 11, 702, 3364, 307, 5176, 281, 3488, 538, 1451, 6815, 13, 407, 797, 11, 2171, 321, 393, 7302, 264, 288, 24700, 11, 457, 294, 512, 3331, 309, 575, 572, 3620, 13], "avg_logprob": -0.22077547013759613, "compression_ratio": 1.63013698630137, "no_speech_prob": 0.0, "words": [{"start": 2813.63, "end": 2813.93, "word": " Then", "probability": 0.40673828125}, {"start": 2813.93, "end": 2814.17, "word": " we", "probability": 0.81298828125}, {"start": 2814.17, "end": 2814.79, "word": " expect", "probability": 0.92822265625}, {"start": 2814.79, "end": 2815.55, "word": " his", "probability": 0.89599609375}, {"start": 2815.55, "end": 2815.95, "word": " weight", "probability": 0.890625}, {"start": 2815.95, "end": 2816.57, "word": " to", "probability": 0.92041015625}, {"start": 2816.57, "end": 2816.93, "word": " increase", "probability": 0.63134765625}, {"start": 2816.93, "end": 2817.21, "word": " by", "probability": 0.96923828125}, {"start": 2817.21, "end": 2817.71, "word": " four", "probability": 0.386474609375}, {"start": 2817.71, "end": 2818.43, "word": " kilograms.", "probability": 0.48828125}, {"start": 2819.75, "end": 2820.01, "word": " So", "probability": 0.86572265625}, {"start": 2820.01, "end": 2820.23, "word": " as", "probability": 0.77880859375}, {"start": 2820.23, "end": 2820.47, "word": " one", "probability": 0.74755859375}, {"start": 2820.47, "end": 2820.69, "word": " unit", "probability": 0.87451171875}, {"start": 2820.69, "end": 2821.07, "word": " increase", "probability": 0.472900390625}, {"start": 2821.07, "end": 2821.29, "word": " in", "probability": 0.91748046875}, {"start": 2821.29, "end": 2821.61, "word": " x,", "probability": 0.54150390625}, {"start": 2822.59, "end": 2823.05, "word": " y", "probability": 0.74951171875}, {"start": 2823.05, "end": 2823.77, "word": " is", "probability": 0.884765625}, {"start": 2823.77, "end": 2824.19, "word": " our,", "probability": 0.77392578125}, {"start": 2824.39, "end": 2824.55, "word": " his", "probability": 0.95947265625}, {"start": 2824.55, "end": 2824.91, "word": " weight", "probability": 0.90380859375}, {"start": 2824.91, "end": 2825.13, "word": " is", "probability": 0.837890625}, {"start": 2825.13, "end": 2825.69, "word": " expected", "probability": 0.880859375}, {"start": 2825.69, "end": 2827.01, "word": " to", "probability": 0.939453125}, {"start": 2827.01, "end": 2827.43, "word": " increase", "probability": 0.83837890625}, {"start": 2827.43, "end": 2827.79, "word": " by", "probability": 0.97119140625}, {"start": 2827.79, "end": 2828.39, "word": " four", "probability": 0.802734375}, {"start": 2828.39, "end": 2828.79, "word": " units.", "probability": 0.90234375}, {"start": 2829.61, "end": 2829.87, "word": " So", "probability": 0.9560546875}, {"start": 2829.87, "end": 2830.15, "word": " again,", "probability": 0.89501953125}, {"start": 2830.37, "end": 2830.99, "word": " sometimes", "probability": 0.9560546875}, {"start": 2830.99, "end": 2831.77, "word": " we", "probability": 0.8857421875}, {"start": 2831.77, "end": 2832.25, "word": " can", "probability": 0.94287109375}, {"start": 2832.25, "end": 2833.23, "word": " interpret", "probability": 0.87255859375}, {"start": 2833.23, "end": 2833.91, "word": " the", "probability": 0.90966796875}, {"start": 2833.91, "end": 2834.31, "word": " y", "probability": 0.91552734375}, {"start": 2834.31, "end": 2834.77, "word": " intercept,", "probability": 0.6572265625}, {"start": 2836.11, "end": 2836.59, "word": " but", "probability": 0.84326171875}, {"start": 2836.59, "end": 2836.95, "word": " in", "probability": 0.92529296875}, {"start": 2836.95, "end": 2837.23, "word": " some", "probability": 0.89501953125}, {"start": 2837.23, "end": 2837.77, "word": " cases", "probability": 0.9267578125}, {"start": 2837.77, "end": 2838.05, "word": " it", "probability": 0.68115234375}, {"start": 2838.05, "end": 2838.27, "word": " has", "probability": 0.9462890625}, {"start": 2838.27, "end": 2838.45, "word": " no", "probability": 0.91357421875}, {"start": 2838.45, "end": 2838.67, "word": " meaning.", "probability": 0.89404296875}], "temperature": 1.0}, {"id": 107, "seek": 287067, "start": 2844.97, "end": 2870.67, "text": " Now for the previous example, for the selling price of a home and its size, B1rSy divided by Sx, r is computed, r is found to be 76%, 76%Sy divided by Sx, that will give 0.109. B0y bar minus B1x bar,", "tokens": [823, 337, 264, 3894, 1365, 11, 337, 264, 6511, 3218, 295, 257, 1280, 293, 1080, 2744, 11, 363, 16, 81, 50, 88, 6666, 538, 318, 87, 11, 367, 307, 40610, 11, 367, 307, 1352, 281, 312, 24733, 8923, 24733, 4, 50, 88, 6666, 538, 318, 87, 11, 300, 486, 976, 1958, 13, 3279, 24, 13, 363, 15, 88, 2159, 3175, 363, 16, 87, 2159, 11], "avg_logprob": -0.2923768939393939, "compression_ratio": 1.342281879194631, "no_speech_prob": 0.0, "words": [{"start": 2844.97, "end": 2845.23, "word": " Now", "probability": 0.6982421875}, {"start": 2845.23, "end": 2845.41, "word": " for", "probability": 0.71044921875}, {"start": 2845.41, "end": 2845.55, "word": " the", "probability": 0.9189453125}, {"start": 2845.55, "end": 2845.79, "word": " previous", "probability": 0.75}, {"start": 2845.79, "end": 2846.37, "word": " example,", "probability": 0.96630859375}, {"start": 2846.69, "end": 2846.83, "word": " for", "probability": 0.80029296875}, {"start": 2846.83, "end": 2846.99, "word": " the", "probability": 0.75048828125}, {"start": 2846.99, "end": 2847.19, "word": " selling", "probability": 0.814453125}, {"start": 2847.19, "end": 2847.69, "word": " price", "probability": 0.93359375}, {"start": 2847.69, "end": 2847.87, "word": " of", "probability": 0.465087890625}, {"start": 2847.87, "end": 2847.99, "word": " a", "probability": 0.875}, {"start": 2847.99, "end": 2848.17, "word": " home", "probability": 0.892578125}, {"start": 2848.17, "end": 2848.43, "word": " and", "probability": 0.81640625}, {"start": 2848.43, "end": 2848.71, "word": " its", "probability": 0.62548828125}, {"start": 2848.71, "end": 2849.13, "word": " size,", "probability": 0.81201171875}, {"start": 2850.09, "end": 2851.43, "word": " B1rSy", "probability": 0.582275390625}, {"start": 2851.43, "end": 2852.19, "word": " divided", "probability": 0.419921875}, {"start": 2852.19, "end": 2852.39, "word": " by", "probability": 0.97509765625}, {"start": 2852.39, "end": 2852.93, "word": " Sx,", "probability": 0.944580078125}, {"start": 2853.79, "end": 2854.15, "word": " r", "probability": 0.58349609375}, {"start": 2854.15, "end": 2854.33, "word": " is", "probability": 0.92626953125}, {"start": 2854.33, "end": 2855.31, "word": " computed,", "probability": 0.935546875}, {"start": 2856.81, "end": 2857.15, "word": " r", "probability": 0.76318359375}, {"start": 2857.15, "end": 2858.09, "word": " is", "probability": 0.9375}, {"start": 2858.09, "end": 2858.31, "word": " found", "probability": 0.873046875}, {"start": 2858.31, "end": 2858.49, "word": " to", "probability": 0.97119140625}, {"start": 2858.49, "end": 2858.63, "word": " be", "probability": 0.94970703125}, {"start": 2858.63, "end": 2859.83, "word": " 76%,", "probability": 0.609375}, {"start": 2859.83, "end": 2860.35, "word": " 76", "probability": 0.728515625}, {"start": 2860.35, "end": 2862.79, "word": "%Sy", "probability": 0.726806640625}, {"start": 2862.79, "end": 2863.55, "word": " divided", "probability": 0.74560546875}, {"start": 2863.55, "end": 2863.79, "word": " by", "probability": 0.97265625}, {"start": 2863.79, "end": 2864.33, "word": " Sx,", "probability": 0.99267578125}, {"start": 2864.91, "end": 2865.13, "word": " that", "probability": 0.9306640625}, {"start": 2865.13, "end": 2865.31, "word": " will", "probability": 0.86181640625}, {"start": 2865.31, "end": 2865.49, "word": " give", "probability": 0.79296875}, {"start": 2865.49, "end": 2865.67, "word": " 0", "probability": 0.75927734375}, {"start": 2865.67, "end": 2866.55, "word": ".109.", "probability": 0.9840494791666666}, {"start": 2868.11, "end": 2868.77, "word": " B0y", "probability": 0.9099934895833334}, {"start": 2868.77, "end": 2869.03, "word": " bar", "probability": 0.82275390625}, {"start": 2869.03, "end": 2869.33, "word": " minus", "probability": 0.9677734375}, {"start": 2869.33, "end": 2869.99, "word": " B1x", "probability": 0.9025065104166666}, {"start": 2869.99, "end": 2870.67, "word": " bar,", "probability": 0.9501953125}], "temperature": 1.0}, {"id": 108, "seek": 290299, "start": 2873.61, "end": 2902.99, "text": " Y bar for this data is 286 minus D1. So we have to compute first D1 because we use it in order to determine D0. And calculation gives 98. So that means based on these equations, Y hat equals 0.10977 plus 98.248.", "tokens": [398, 2159, 337, 341, 1412, 307, 7562, 21, 3175, 413, 16, 13, 407, 321, 362, 281, 14722, 700, 413, 16, 570, 321, 764, 309, 294, 1668, 281, 6997, 413, 15, 13, 400, 17108, 2709, 20860, 13, 407, 300, 1355, 2361, 322, 613, 11787, 11, 398, 2385, 6915, 1958, 13, 3279, 24, 17512, 1804, 20860, 13, 7911, 23, 13], "avg_logprob": -0.15426376765057193, "compression_ratio": 1.3333333333333333, "no_speech_prob": 0.0, "words": [{"start": 2873.61, "end": 2873.95, "word": " Y", "probability": 0.319580078125}, {"start": 2873.95, "end": 2874.31, "word": " bar", "probability": 0.9267578125}, {"start": 2874.31, "end": 2874.75, "word": " for", "probability": 0.763671875}, {"start": 2874.75, "end": 2875.07, "word": " this", "probability": 0.935546875}, {"start": 2875.07, "end": 2875.41, "word": " data", "probability": 0.80126953125}, {"start": 2875.41, "end": 2875.65, "word": " is", "probability": 0.931640625}, {"start": 2875.65, "end": 2876.61, "word": " 286", "probability": 0.93212890625}, {"start": 2876.61, "end": 2877.71, "word": " minus", "probability": 0.9150390625}, {"start": 2877.71, "end": 2878.37, "word": " D1.", "probability": 0.7080078125}, {"start": 2879.21, "end": 2879.55, "word": " So", "probability": 0.95166015625}, {"start": 2879.55, "end": 2879.71, "word": " we", "probability": 0.728515625}, {"start": 2879.71, "end": 2879.93, "word": " have", "probability": 0.9423828125}, {"start": 2879.93, "end": 2880.15, "word": " to", "probability": 0.96728515625}, {"start": 2880.15, "end": 2880.55, "word": " compute", "probability": 0.900390625}, {"start": 2880.55, "end": 2880.87, "word": " first", "probability": 0.7734375}, {"start": 2880.87, "end": 2881.23, "word": " D1", "probability": 0.9609375}, {"start": 2881.23, "end": 2881.55, "word": " because", "probability": 0.68017578125}, {"start": 2881.55, "end": 2881.73, "word": " we", "probability": 0.9482421875}, {"start": 2881.73, "end": 2881.93, "word": " use", "probability": 0.794921875}, {"start": 2881.93, "end": 2882.35, "word": " it", "probability": 0.94873046875}, {"start": 2882.35, "end": 2883.03, "word": " in", "probability": 0.88037109375}, {"start": 2883.03, "end": 2883.23, "word": " order", "probability": 0.9228515625}, {"start": 2883.23, "end": 2883.49, "word": " to", "probability": 0.95556640625}, {"start": 2883.49, "end": 2883.79, "word": " determine", "probability": 0.70556640625}, {"start": 2883.79, "end": 2884.37, "word": " D0.", "probability": 0.83740234375}, {"start": 2885.49, "end": 2886.21, "word": " And", "probability": 0.94091796875}, {"start": 2886.21, "end": 2886.73, "word": " calculation", "probability": 0.7890625}, {"start": 2886.73, "end": 2887.13, "word": " gives", "probability": 0.91357421875}, {"start": 2887.13, "end": 2887.71, "word": " 98.", "probability": 0.95263671875}, {"start": 2888.05, "end": 2888.31, "word": " So", "probability": 0.96533203125}, {"start": 2888.31, "end": 2888.59, "word": " that", "probability": 0.9091796875}, {"start": 2888.59, "end": 2889.01, "word": " means", "probability": 0.9326171875}, {"start": 2889.01, "end": 2891.21, "word": " based", "probability": 0.57958984375}, {"start": 2891.21, "end": 2891.53, "word": " on", "probability": 0.94921875}, {"start": 2891.53, "end": 2892.03, "word": " these", "probability": 0.8125}, {"start": 2892.03, "end": 2892.71, "word": " equations,", "probability": 0.9326171875}, {"start": 2893.57, "end": 2893.79, "word": " Y", "probability": 0.732421875}, {"start": 2893.79, "end": 2894.19, "word": " hat", "probability": 0.93603515625}, {"start": 2894.19, "end": 2896.13, "word": " equals", "probability": 0.91845703125}, {"start": 2896.13, "end": 2896.45, "word": " 0", "probability": 0.81884765625}, {"start": 2896.45, "end": 2898.37, "word": ".10977", "probability": 0.9501953125}, {"start": 2898.37, "end": 2899.57, "word": " plus", "probability": 0.91162109375}, {"start": 2899.57, "end": 2900.15, "word": " 98", "probability": 0.98095703125}, {"start": 2900.15, "end": 2902.99, "word": ".248.", "probability": 0.9601236979166666}], "temperature": 1.0}, {"id": 109, "seek": 293271, "start": 2904.79, "end": 2932.71, "text": " times X. X is the size. 0.1 B1 is 0.1, B0 is 98, so 98.248 plus B1.", "tokens": [1413, 1783, 13, 1783, 307, 264, 2744, 13, 1958, 13, 16, 363, 16, 307, 1958, 13, 16, 11, 363, 15, 307, 20860, 11, 370, 20860, 13, 7911, 23, 1804, 363, 16, 13], "avg_logprob": -0.37523674242424243, "compression_ratio": 1.0, "no_speech_prob": 0.0, "words": [{"start": 2904.79, "end": 2905.21, "word": " times", "probability": 0.1964111328125}, {"start": 2905.21, "end": 2905.61, "word": " X.", "probability": 0.4091796875}, {"start": 2906.19, "end": 2906.63, "word": " X", "probability": 0.966796875}, {"start": 2906.63, "end": 2906.97, "word": " is", "probability": 0.9326171875}, {"start": 2906.97, "end": 2907.69, "word": " the", "probability": 0.9248046875}, {"start": 2907.69, "end": 2909.37, "word": " size.", "probability": 0.73681640625}, {"start": 2912.89, "end": 2913.77, "word": " 0", "probability": 0.480224609375}, {"start": 2913.77, "end": 2914.19, "word": ".1", "probability": 0.967041015625}, {"start": 2914.19, "end": 2919.83, "word": " B1", "probability": 0.40185546875}, {"start": 2919.83, "end": 2925.31, "word": " is", "probability": 0.8837890625}, {"start": 2925.31, "end": 2925.55, "word": " 0", "probability": 0.8818359375}, {"start": 2925.55, "end": 2925.93, "word": ".1,", "probability": 0.9892578125}, {"start": 2926.17, "end": 2926.63, "word": " B0", "probability": 0.945556640625}, {"start": 2926.63, "end": 2926.77, "word": " is", "probability": 0.93408203125}, {"start": 2926.77, "end": 2927.15, "word": " 98,", "probability": 0.2257080078125}, {"start": 2927.35, "end": 2927.51, "word": " so", "probability": 0.814453125}, {"start": 2927.51, "end": 2928.03, "word": " 98", "probability": 0.90087890625}, {"start": 2928.03, "end": 2930.63, "word": ".248", "probability": 0.9153645833333334}, {"start": 2930.63, "end": 2932.17, "word": " plus", "probability": 0.72998046875}, {"start": 2932.17, "end": 2932.71, "word": " B1.", "probability": 0.979248046875}], "temperature": 1.0}, {"id": 110, "seek": 296127, "start": 2935.07, "end": 2961.27, "text": " So this is your regression equation. So again, the intercept is 98. So this amount, the segment is 98. Now the slope is 0.109. So house price, the expected value of house price equals B098 plus 0.109 square feet.", "tokens": [407, 341, 307, 428, 24590, 5367, 13, 407, 797, 11, 264, 24700, 307, 20860, 13, 407, 341, 2372, 11, 264, 9469, 307, 20860, 13, 823, 264, 13525, 307, 1958, 13, 3279, 24, 13, 407, 1782, 3218, 11, 264, 5176, 2158, 295, 1782, 3218, 6915, 363, 13811, 23, 1804, 1958, 13, 3279, 24, 3732, 3521, 13], "avg_logprob": -0.17340960114129952, "compression_ratio": 1.4689655172413794, "no_speech_prob": 0.0, "words": [{"start": 2935.0699999999997, "end": 2935.91, "word": " So", "probability": 0.35498046875}, {"start": 2935.91, "end": 2936.21, "word": " this", "probability": 0.79345703125}, {"start": 2936.21, "end": 2936.35, "word": " is", "probability": 0.9208984375}, {"start": 2936.35, "end": 2936.65, "word": " your", "probability": 0.888671875}, {"start": 2936.65, "end": 2937.89, "word": " regression", "probability": 0.818359375}, {"start": 2937.89, "end": 2938.39, "word": " equation.", "probability": 0.97900390625}, {"start": 2940.89, "end": 2941.73, "word": " So", "probability": 0.91455078125}, {"start": 2941.73, "end": 2942.39, "word": " again,", "probability": 0.8203125}, {"start": 2942.81, "end": 2942.99, "word": " the", "probability": 0.833984375}, {"start": 2942.99, "end": 2943.43, "word": " intercept", "probability": 0.9814453125}, {"start": 2943.43, "end": 2943.73, "word": " is", "probability": 0.9326171875}, {"start": 2943.73, "end": 2944.11, "word": " 98.", "probability": 0.95703125}, {"start": 2945.35, "end": 2946.19, "word": " So", "probability": 0.94921875}, {"start": 2946.19, "end": 2946.45, "word": " this", "probability": 0.9189453125}, {"start": 2946.45, "end": 2946.77, "word": " amount,", "probability": 0.79736328125}, {"start": 2946.93, "end": 2947.07, "word": " the", "probability": 0.4560546875}, {"start": 2947.07, "end": 2947.39, "word": " segment", "probability": 0.86572265625}, {"start": 2947.39, "end": 2947.93, "word": " is", "probability": 0.71923828125}, {"start": 2947.93, "end": 2948.33, "word": " 98.", "probability": 0.98583984375}, {"start": 2948.99, "end": 2949.45, "word": " Now", "probability": 0.95849609375}, {"start": 2949.45, "end": 2949.75, "word": " the", "probability": 0.6171875}, {"start": 2949.75, "end": 2950.09, "word": " slope", "probability": 0.81884765625}, {"start": 2950.09, "end": 2950.69, "word": " is", "probability": 0.93505859375}, {"start": 2950.69, "end": 2950.93, "word": " 0", "probability": 0.8984375}, {"start": 2950.93, "end": 2952.07, "word": ".109.", "probability": 0.9840494791666666}, {"start": 2952.67, "end": 2952.95, "word": " So", "probability": 0.9609375}, {"start": 2952.95, "end": 2953.29, "word": " house", "probability": 0.56884765625}, {"start": 2953.29, "end": 2953.63, "word": " price,", "probability": 0.9296875}, {"start": 2953.79, "end": 2953.85, "word": " the", "probability": 0.912109375}, {"start": 2953.85, "end": 2954.33, "word": " expected", "probability": 0.88818359375}, {"start": 2954.33, "end": 2954.79, "word": " value", "probability": 0.9638671875}, {"start": 2954.79, "end": 2954.97, "word": " of", "probability": 0.93017578125}, {"start": 2954.97, "end": 2955.25, "word": " house", "probability": 0.853515625}, {"start": 2955.25, "end": 2955.73, "word": " price", "probability": 0.92138671875}, {"start": 2955.73, "end": 2956.29, "word": " equals", "probability": 0.85888671875}, {"start": 2956.29, "end": 2958.23, "word": " B098", "probability": 0.6427408854166666}, {"start": 2958.23, "end": 2959.15, "word": " plus", "probability": 0.81787109375}, {"start": 2959.15, "end": 2959.43, "word": " 0", "probability": 0.97900390625}, {"start": 2959.43, "end": 2960.43, "word": ".109", "probability": 0.9850260416666666}, {"start": 2960.43, "end": 2960.97, "word": " square", "probability": 0.814453125}, {"start": 2960.97, "end": 2961.27, "word": " feet.", "probability": 0.94921875}], "temperature": 1.0}, {"id": 111, "seek": 298991, "start": 2963.15, "end": 2989.91, "text": " So that's the prediction line for the house price. So again, house price equal B0 98 plus 0.10977 times square root. Now, what's your interpretation about B0 and B1? B0 is the estimated mean value of Y when the value of X is 0. So if X is 0, this range of X observed X values and", "tokens": [407, 300, 311, 264, 17630, 1622, 337, 264, 1782, 3218, 13, 407, 797, 11, 1782, 3218, 2681, 363, 15, 20860, 1804, 1958, 13, 3279, 24, 17512, 1413, 3732, 5593, 13, 823, 11, 437, 311, 428, 14174, 466, 363, 15, 293, 363, 16, 30, 363, 15, 307, 264, 14109, 914, 2158, 295, 398, 562, 264, 2158, 295, 1783, 307, 1958, 13, 407, 498, 1783, 307, 1958, 11, 341, 3613, 295, 1783, 13095, 1783, 4190, 293], "avg_logprob": -0.23208333651224772, "compression_ratio": 1.5384615384615385, "no_speech_prob": 0.0, "words": [{"start": 2963.15, "end": 2963.41, "word": " So", "probability": 0.91064453125}, {"start": 2963.41, "end": 2963.75, "word": " that's", "probability": 0.9033203125}, {"start": 2963.75, "end": 2963.95, "word": " the", "probability": 0.91357421875}, {"start": 2963.95, "end": 2964.37, "word": " prediction", "probability": 0.912109375}, {"start": 2964.37, "end": 2964.83, "word": " line", "probability": 0.943359375}, {"start": 2964.83, "end": 2966.63, "word": " for", "probability": 0.87158203125}, {"start": 2966.63, "end": 2966.85, "word": " the", "probability": 0.8896484375}, {"start": 2966.85, "end": 2967.11, "word": " house", "probability": 0.72900390625}, {"start": 2967.11, "end": 2967.63, "word": " price.", "probability": 0.92529296875}, {"start": 2968.51, "end": 2968.77, "word": " So", "probability": 0.96142578125}, {"start": 2968.77, "end": 2968.99, "word": " again,", "probability": 0.78955078125}, {"start": 2969.07, "end": 2969.23, "word": " house", "probability": 0.77685546875}, {"start": 2969.23, "end": 2969.59, "word": " price", "probability": 0.89208984375}, {"start": 2969.59, "end": 2970.03, "word": " equal", "probability": 0.4384765625}, {"start": 2970.03, "end": 2971.15, "word": " B0", "probability": 0.556396484375}, {"start": 2971.15, "end": 2971.49, "word": " 98", "probability": 0.460693359375}, {"start": 2971.49, "end": 2972.07, "word": " plus", "probability": 0.76611328125}, {"start": 2972.07, "end": 2972.39, "word": " 0", "probability": 0.79248046875}, {"start": 2972.39, "end": 2974.37, "word": ".10977", "probability": 0.90087890625}, {"start": 2974.37, "end": 2974.77, "word": " times", "probability": 0.8447265625}, {"start": 2974.77, "end": 2975.09, "word": " square", "probability": 0.47607421875}, {"start": 2975.09, "end": 2975.23, "word": " root.", "probability": 0.82275390625}, {"start": 2975.55, "end": 2975.89, "word": " Now,", "probability": 0.95751953125}, {"start": 2975.97, "end": 2976.19, "word": " what's", "probability": 0.957275390625}, {"start": 2976.19, "end": 2976.33, "word": " your", "probability": 0.8896484375}, {"start": 2976.33, "end": 2976.93, "word": " interpretation", "probability": 0.884765625}, {"start": 2976.93, "end": 2977.43, "word": " about", "probability": 0.86328125}, {"start": 2977.43, "end": 2977.89, "word": " B0", "probability": 0.798095703125}, {"start": 2977.89, "end": 2978.03, "word": " and", "probability": 0.9384765625}, {"start": 2978.03, "end": 2978.33, "word": " B1?", "probability": 0.992431640625}, {"start": 2979.71, "end": 2980.31, "word": " B0", "probability": 0.97607421875}, {"start": 2980.31, "end": 2980.51, "word": " is", "probability": 0.93994140625}, {"start": 2980.51, "end": 2980.71, "word": " the", "probability": 0.51806640625}, {"start": 2980.71, "end": 2981.13, "word": " estimated", "probability": 0.912109375}, {"start": 2981.13, "end": 2981.39, "word": " mean", "probability": 0.93701171875}, {"start": 2981.39, "end": 2981.75, "word": " value", "probability": 0.9697265625}, {"start": 2981.75, "end": 2981.95, "word": " of", "probability": 0.9599609375}, {"start": 2981.95, "end": 2982.23, "word": " Y", "probability": 0.69091796875}, {"start": 2982.23, "end": 2982.95, "word": " when", "probability": 0.72119140625}, {"start": 2982.95, "end": 2983.09, "word": " the", "probability": 0.90771484375}, {"start": 2983.09, "end": 2983.29, "word": " value", "probability": 0.9775390625}, {"start": 2983.29, "end": 2983.45, "word": " of", "probability": 0.87744140625}, {"start": 2983.45, "end": 2983.53, "word": " X", "probability": 0.9501953125}, {"start": 2983.53, "end": 2983.69, "word": " is", "probability": 0.943359375}, {"start": 2983.69, "end": 2983.97, "word": " 0.", "probability": 0.56982421875}, {"start": 2984.79, "end": 2985.25, "word": " So", "probability": 0.9658203125}, {"start": 2985.25, "end": 2985.55, "word": " if", "probability": 0.80712890625}, {"start": 2985.55, "end": 2985.77, "word": " X", "probability": 0.9794921875}, {"start": 2985.77, "end": 2985.91, "word": " is", "probability": 0.79443359375}, {"start": 2985.91, "end": 2986.11, "word": " 0,", "probability": 0.9189453125}, {"start": 2986.21, "end": 2986.43, "word": " this", "probability": 0.625}, {"start": 2986.43, "end": 2986.97, "word": " range", "probability": 0.806640625}, {"start": 2986.97, "end": 2987.99, "word": " of", "probability": 0.8056640625}, {"start": 2987.99, "end": 2988.33, "word": " X", "probability": 0.84130859375}, {"start": 2988.33, "end": 2989.09, "word": " observed", "probability": 0.1414794921875}, {"start": 2989.09, "end": 2989.31, "word": " X", "probability": 0.7392578125}, {"start": 2989.31, "end": 2989.67, "word": " values", "probability": 0.90576171875}, {"start": 2989.67, "end": 2989.91, "word": " and", "probability": 0.5654296875}], "temperature": 1.0}, {"id": 112, "seek": 301900, "start": 2991.06, "end": 3019.0, "text": " you have a home or a house of size zero. So that means this value has no meaning. Because a house cannot have a square footage of zero. So B0 has no practical application in this case. So sometimes it makes sense, in other cases it doesn't have that. So for this specific example,", "tokens": [291, 362, 257, 1280, 420, 257, 1782, 295, 2744, 4018, 13, 407, 300, 1355, 341, 2158, 575, 572, 3620, 13, 1436, 257, 1782, 2644, 362, 257, 3732, 9556, 295, 4018, 13, 407, 363, 15, 575, 572, 8496, 3861, 294, 341, 1389, 13, 407, 2171, 309, 1669, 2020, 11, 294, 661, 3331, 309, 1177, 380, 362, 300, 13, 407, 337, 341, 2685, 1365, 11], "avg_logprob": -0.183715823572129, "compression_ratio": 1.6149425287356323, "no_speech_prob": 0.0, "words": [{"start": 2991.06, "end": 2991.4, "word": " you", "probability": 0.309326171875}, {"start": 2991.4, "end": 2991.82, "word": " have", "probability": 0.9365234375}, {"start": 2991.82, "end": 2992.58, "word": " a", "probability": 0.953125}, {"start": 2992.58, "end": 2992.98, "word": " home", "probability": 0.87646484375}, {"start": 2992.98, "end": 2993.56, "word": " or", "probability": 0.8388671875}, {"start": 2993.56, "end": 2993.7, "word": " a", "probability": 0.76220703125}, {"start": 2993.7, "end": 2994.12, "word": " house", "probability": 0.8876953125}, {"start": 2994.12, "end": 2994.4, "word": " of", "probability": 0.947265625}, {"start": 2994.4, "end": 2994.7, "word": " size", "probability": 0.8564453125}, {"start": 2994.7, "end": 2995.12, "word": " zero.", "probability": 0.61474609375}, {"start": 2995.62, "end": 2995.92, "word": " So", "probability": 0.9365234375}, {"start": 2995.92, "end": 2996.18, "word": " that", "probability": 0.82666015625}, {"start": 2996.18, "end": 2996.54, "word": " means", "probability": 0.92724609375}, {"start": 2996.54, "end": 2997.42, "word": " this", "probability": 0.8857421875}, {"start": 2997.42, "end": 2997.86, "word": " value", "probability": 0.97998046875}, {"start": 2997.86, "end": 2998.14, "word": " has", "probability": 0.94482421875}, {"start": 2998.14, "end": 2998.32, "word": " no", "probability": 0.94677734375}, {"start": 2998.32, "end": 2998.58, "word": " meaning.", "probability": 0.857421875}, {"start": 3000.12, "end": 3000.92, "word": " Because", "probability": 0.92919921875}, {"start": 3000.92, "end": 3001.32, "word": " a", "probability": 0.91455078125}, {"start": 3001.32, "end": 3001.66, "word": " house", "probability": 0.8896484375}, {"start": 3001.66, "end": 3001.94, "word": " cannot", "probability": 0.88916015625}, {"start": 3001.94, "end": 3002.48, "word": " have", "probability": 0.94287109375}, {"start": 3002.48, "end": 3002.68, "word": " a", "probability": 0.978515625}, {"start": 3002.68, "end": 3002.98, "word": " square", "probability": 0.87548828125}, {"start": 3002.98, "end": 3003.44, "word": " footage", "probability": 0.91064453125}, {"start": 3003.44, "end": 3003.82, "word": " of", "probability": 0.97119140625}, {"start": 3003.82, "end": 3004.16, "word": " zero.", "probability": 0.828125}, {"start": 3004.62, "end": 3004.74, "word": " So", "probability": 0.96044921875}, {"start": 3004.74, "end": 3005.2, "word": " B0", "probability": 0.5037841796875}, {"start": 3005.2, "end": 3005.72, "word": " has", "probability": 0.94091796875}, {"start": 3005.72, "end": 3005.94, "word": " no", "probability": 0.94775390625}, {"start": 3005.94, "end": 3006.4, "word": " practical", "probability": 0.90576171875}, {"start": 3006.4, "end": 3007.12, "word": " application", "probability": 0.81884765625}, {"start": 3007.12, "end": 3007.38, "word": " in", "probability": 0.6640625}, {"start": 3007.38, "end": 3007.5, "word": " this", "probability": 0.9365234375}, {"start": 3007.5, "end": 3007.78, "word": " case.", "probability": 0.8857421875}, {"start": 3008.6, "end": 3008.88, "word": " So", "probability": 0.93994140625}, {"start": 3008.88, "end": 3009.42, "word": " sometimes", "probability": 0.91357421875}, {"start": 3009.42, "end": 3009.82, "word": " it", "probability": 0.8154296875}, {"start": 3009.82, "end": 3010.04, "word": " makes", "probability": 0.8134765625}, {"start": 3010.04, "end": 3010.44, "word": " sense,", "probability": 0.798828125}, {"start": 3010.94, "end": 3011.06, "word": " in", "probability": 0.73193359375}, {"start": 3011.06, "end": 3011.32, "word": " other", "probability": 0.89013671875}, {"start": 3011.32, "end": 3011.84, "word": " cases", "probability": 0.9130859375}, {"start": 3011.84, "end": 3013.72, "word": " it", "probability": 0.68310546875}, {"start": 3013.72, "end": 3015.36, "word": " doesn't", "probability": 0.780517578125}, {"start": 3015.36, "end": 3015.64, "word": " have", "probability": 0.90966796875}, {"start": 3015.64, "end": 3015.84, "word": " that.", "probability": 0.79443359375}, {"start": 3015.98, "end": 3016.2, "word": " So", "probability": 0.91064453125}, {"start": 3016.2, "end": 3017.62, "word": " for", "probability": 0.78076171875}, {"start": 3017.62, "end": 3017.92, "word": " this", "probability": 0.9453125}, {"start": 3017.92, "end": 3018.46, "word": " specific", "probability": 0.90576171875}, {"start": 3018.46, "end": 3019.0, "word": " example,", "probability": 0.97314453125}], "temperature": 1.0}, {"id": 113, "seek": 303913, "start": 3020.13, "end": 3039.13, "text": " B0 has no practical application in this case. But B1 which is 0.1097, B1 estimates the change in the mean value of Y as a result of one unit increasing X. So for this value which is 0.109, it means", "tokens": [363, 15, 575, 572, 8496, 3861, 294, 341, 1389, 13, 583, 363, 16, 597, 307, 1958, 13, 3279, 23247, 11, 363, 16, 20561, 264, 1319, 294, 264, 914, 2158, 295, 398, 382, 257, 1874, 295, 472, 4985, 5662, 1783, 13, 407, 337, 341, 2158, 597, 307, 1958, 13, 3279, 24, 11, 309, 1355], "avg_logprob": -0.1835937453088937, "compression_ratio": 1.4142857142857144, "no_speech_prob": 0.0, "words": [{"start": 3020.13, "end": 3020.73, "word": " B0", "probability": 0.62646484375}, {"start": 3020.73, "end": 3021.09, "word": " has", "probability": 0.93359375}, {"start": 3021.09, "end": 3021.31, "word": " no", "probability": 0.93505859375}, {"start": 3021.31, "end": 3021.79, "word": " practical", "probability": 0.9033203125}, {"start": 3021.79, "end": 3023.07, "word": " application", "probability": 0.92431640625}, {"start": 3023.07, "end": 3023.53, "word": " in", "probability": 0.89990234375}, {"start": 3023.53, "end": 3023.73, "word": " this", "probability": 0.94970703125}, {"start": 3023.73, "end": 3024.09, "word": " case.", "probability": 0.91748046875}, {"start": 3025.11, "end": 3025.63, "word": " But", "probability": 0.9228515625}, {"start": 3025.63, "end": 3026.73, "word": " B1", "probability": 0.822998046875}, {"start": 3026.73, "end": 3026.93, "word": " which", "probability": 0.6845703125}, {"start": 3026.93, "end": 3027.09, "word": " is", "probability": 0.94775390625}, {"start": 3027.09, "end": 3027.27, "word": " 0", "probability": 0.67578125}, {"start": 3027.27, "end": 3028.21, "word": ".1097,", "probability": 0.9773763020833334}, {"start": 3028.93, "end": 3029.35, "word": " B1", "probability": 0.8544921875}, {"start": 3029.35, "end": 3030.07, "word": " estimates", "probability": 0.91552734375}, {"start": 3030.07, "end": 3030.27, "word": " the", "probability": 0.7626953125}, {"start": 3030.27, "end": 3030.75, "word": " change", "probability": 0.90234375}, {"start": 3030.75, "end": 3031.77, "word": " in", "probability": 0.93212890625}, {"start": 3031.77, "end": 3031.93, "word": " the", "probability": 0.92041015625}, {"start": 3031.93, "end": 3032.09, "word": " mean", "probability": 0.98095703125}, {"start": 3032.09, "end": 3032.39, "word": " value", "probability": 0.97216796875}, {"start": 3032.39, "end": 3032.59, "word": " of", "probability": 0.92724609375}, {"start": 3032.59, "end": 3032.81, "word": " Y", "probability": 0.75390625}, {"start": 3032.81, "end": 3033.05, "word": " as", "probability": 0.94482421875}, {"start": 3033.05, "end": 3033.17, "word": " a", "probability": 0.95458984375}, {"start": 3033.17, "end": 3033.45, "word": " result", "probability": 0.94189453125}, {"start": 3033.45, "end": 3033.71, "word": " of", "probability": 0.9658203125}, {"start": 3033.71, "end": 3033.99, "word": " one", "probability": 0.8515625}, {"start": 3033.99, "end": 3034.53, "word": " unit", "probability": 0.9560546875}, {"start": 3034.53, "end": 3034.97, "word": " increasing", "probability": 0.35009765625}, {"start": 3034.97, "end": 3035.39, "word": " X.", "probability": 0.67822265625}, {"start": 3035.99, "end": 3036.17, "word": " So", "probability": 0.93115234375}, {"start": 3036.17, "end": 3036.43, "word": " for", "probability": 0.70361328125}, {"start": 3036.43, "end": 3036.73, "word": " this", "probability": 0.94921875}, {"start": 3036.73, "end": 3037.13, "word": " value", "probability": 0.970703125}, {"start": 3037.13, "end": 3037.39, "word": " which", "probability": 0.54638671875}, {"start": 3037.39, "end": 3037.51, "word": " is", "probability": 0.9384765625}, {"start": 3037.51, "end": 3037.69, "word": " 0", "probability": 0.93359375}, {"start": 3037.69, "end": 3038.51, "word": ".109,", "probability": 0.9794921875}, {"start": 3038.63, "end": 3038.81, "word": " it", "probability": 0.82470703125}, {"start": 3038.81, "end": 3039.13, "word": " means", "probability": 0.9306640625}], "temperature": 1.0}, {"id": 114, "seek": 306714, "start": 3040.54, "end": 3067.14, "text": " This fellow tells us that the mean value of a house can increase by this amount, increase by 0.1097, but we have to multiply this value by a thousand because the data was in thousand dollars, so around 109, on average for each additional one square foot of a size. So that means if a house", "tokens": [639, 7177, 5112, 505, 300, 264, 914, 2158, 295, 257, 1782, 393, 3488, 538, 341, 2372, 11, 3488, 538, 1958, 13, 3279, 23247, 11, 457, 321, 362, 281, 12972, 341, 2158, 538, 257, 4714, 570, 264, 1412, 390, 294, 4714, 3808, 11, 370, 926, 1266, 24, 11, 322, 4274, 337, 1184, 4497, 472, 3732, 2671, 295, 257, 2744, 13, 407, 300, 1355, 498, 257, 1782], "avg_logprob": -0.23437499887112415, "compression_ratio": 1.5591397849462365, "no_speech_prob": 0.0, "words": [{"start": 3040.54, "end": 3040.9, "word": " This", "probability": 0.56884765625}, {"start": 3040.9, "end": 3041.12, "word": " fellow", "probability": 0.08734130859375}, {"start": 3041.12, "end": 3041.64, "word": " tells", "probability": 0.8291015625}, {"start": 3041.64, "end": 3042.04, "word": " us", "probability": 0.9384765625}, {"start": 3042.04, "end": 3042.52, "word": " that", "probability": 0.9326171875}, {"start": 3042.52, "end": 3043.28, "word": " the", "probability": 0.75244140625}, {"start": 3043.28, "end": 3043.5, "word": " mean", "probability": 0.9462890625}, {"start": 3043.5, "end": 3043.84, "word": " value", "probability": 0.97314453125}, {"start": 3043.84, "end": 3043.98, "word": " of", "probability": 0.96240234375}, {"start": 3043.98, "end": 3044.08, "word": " a", "probability": 0.96875}, {"start": 3044.08, "end": 3044.52, "word": " house", "probability": 0.8740234375}, {"start": 3044.52, "end": 3045.56, "word": " can", "probability": 0.74609375}, {"start": 3045.56, "end": 3046.04, "word": " increase", "probability": 0.77783203125}, {"start": 3046.04, "end": 3046.42, "word": " by", "probability": 0.9697265625}, {"start": 3046.42, "end": 3046.78, "word": " this", "probability": 0.9365234375}, {"start": 3046.78, "end": 3047.4, "word": " amount,", "probability": 0.90234375}, {"start": 3047.86, "end": 3048.5, "word": " increase", "probability": 0.418212890625}, {"start": 3048.5, "end": 3048.68, "word": " by", "probability": 0.97021484375}, {"start": 3048.68, "end": 3048.94, "word": " 0", "probability": 0.6611328125}, {"start": 3048.94, "end": 3050.1, "word": ".1097,", "probability": 0.9747721354166666}, {"start": 3051.64, "end": 3051.86, "word": " but", "probability": 0.8935546875}, {"start": 3051.86, "end": 3051.98, "word": " we", "probability": 0.9443359375}, {"start": 3051.98, "end": 3052.14, "word": " have", "probability": 0.94970703125}, {"start": 3052.14, "end": 3052.28, "word": " to", "probability": 0.96826171875}, {"start": 3052.28, "end": 3052.7, "word": " multiply", "probability": 0.91357421875}, {"start": 3052.7, "end": 3053.14, "word": " this", "probability": 0.94287109375}, {"start": 3053.14, "end": 3053.6, "word": " value", "probability": 0.96630859375}, {"start": 3053.6, "end": 3053.8, "word": " by", "probability": 0.9736328125}, {"start": 3053.8, "end": 3053.98, "word": " a", "probability": 0.54296875}, {"start": 3053.98, "end": 3054.28, "word": " thousand", "probability": 0.6904296875}, {"start": 3054.28, "end": 3055.18, "word": " because", "probability": 0.57958984375}, {"start": 3055.18, "end": 3055.48, "word": " the", "probability": 0.92138671875}, {"start": 3055.48, "end": 3055.7, "word": " data", "probability": 0.94580078125}, {"start": 3055.7, "end": 3055.98, "word": " was", "probability": 0.912109375}, {"start": 3055.98, "end": 3056.16, "word": " in", "probability": 0.87646484375}, {"start": 3056.16, "end": 3056.44, "word": " thousand", "probability": 0.84912109375}, {"start": 3056.44, "end": 3056.82, "word": " dollars,", "probability": 0.953125}, {"start": 3057.62, "end": 3057.78, "word": " so", "probability": 0.93212890625}, {"start": 3057.78, "end": 3058.08, "word": " around", "probability": 0.91845703125}, {"start": 3058.08, "end": 3058.88, "word": " 109,", "probability": 0.81982421875}, {"start": 3059.38, "end": 3060.38, "word": " on", "probability": 0.95361328125}, {"start": 3060.38, "end": 3061.28, "word": " average", "probability": 0.7822265625}, {"start": 3061.28, "end": 3061.66, "word": " for", "probability": 0.72802734375}, {"start": 3061.66, "end": 3062.04, "word": " each", "probability": 0.9453125}, {"start": 3062.04, "end": 3062.62, "word": " additional", "probability": 0.9091796875}, {"start": 3062.62, "end": 3063.08, "word": " one", "probability": 0.8701171875}, {"start": 3063.08, "end": 3063.42, "word": " square", "probability": 0.83203125}, {"start": 3063.42, "end": 3063.9, "word": " foot", "probability": 0.89794921875}, {"start": 3063.9, "end": 3064.1, "word": " of", "probability": 0.95556640625}, {"start": 3064.1, "end": 3064.24, "word": " a", "probability": 0.7421875}, {"start": 3064.24, "end": 3064.54, "word": " size.", "probability": 0.83837890625}, {"start": 3064.98, "end": 3065.16, "word": " So", "probability": 0.9267578125}, {"start": 3065.16, "end": 3065.5, "word": " that", "probability": 0.77880859375}, {"start": 3065.5, "end": 3065.82, "word": " means", "probability": 0.9326171875}, {"start": 3065.82, "end": 3066.54, "word": " if", "probability": 0.8310546875}, {"start": 3066.54, "end": 3066.76, "word": " a", "probability": 0.9453125}, {"start": 3066.76, "end": 3067.14, "word": " house", "probability": 0.8818359375}], "temperature": 1.0}, {"id": 115, "seek": 308999, "start": 3068.51, "end": 3089.99, "text": " So if house size increased by one square foot, then the price increased by around 109 dollars. So for each one unit increased in the size, the selling price of a home increased by 109. So that means if the size increased by tenth,", "tokens": [407, 498, 1782, 2744, 6505, 538, 472, 3732, 2671, 11, 550, 264, 3218, 6505, 538, 926, 1266, 24, 3808, 13, 407, 337, 1184, 472, 4985, 6505, 294, 264, 2744, 11, 264, 6511, 3218, 295, 257, 1280, 6505, 538, 1266, 24, 13, 407, 300, 1355, 498, 264, 2744, 6505, 538, 27269, 11], "avg_logprob": -0.2391826963195434, "compression_ratio": 1.673913043478261, "no_speech_prob": 0.0, "words": [{"start": 3068.51, "end": 3068.77, "word": " So", "probability": 0.2210693359375}, {"start": 3068.77, "end": 3068.93, "word": " if", "probability": 0.8310546875}, {"start": 3068.93, "end": 3069.23, "word": " house", "probability": 0.72265625}, {"start": 3069.23, "end": 3069.53, "word": " size", "probability": 0.80517578125}, {"start": 3069.53, "end": 3069.99, "word": " increased", "probability": 0.425537109375}, {"start": 3069.99, "end": 3070.27, "word": " by", "probability": 0.96484375}, {"start": 3070.27, "end": 3070.69, "word": " one", "probability": 0.59619140625}, {"start": 3070.69, "end": 3072.01, "word": " square", "probability": 0.849609375}, {"start": 3072.01, "end": 3072.45, "word": " foot,", "probability": 0.89306640625}, {"start": 3072.77, "end": 3073.25, "word": " then", "probability": 0.83203125}, {"start": 3073.25, "end": 3073.49, "word": " the", "probability": 0.912109375}, {"start": 3073.49, "end": 3073.89, "word": " price", "probability": 0.9013671875}, {"start": 3073.89, "end": 3074.31, "word": " increased", "probability": 0.80859375}, {"start": 3074.31, "end": 3074.63, "word": " by", "probability": 0.96923828125}, {"start": 3074.63, "end": 3075.13, "word": " around", "probability": 0.9150390625}, {"start": 3075.13, "end": 3076.49, "word": " 109", "probability": 0.7335205078125}, {"start": 3076.49, "end": 3076.85, "word": " dollars.", "probability": 0.568359375}, {"start": 3077.53, "end": 3077.95, "word": " So", "probability": 0.896484375}, {"start": 3077.95, "end": 3078.19, "word": " for", "probability": 0.86962890625}, {"start": 3078.19, "end": 3078.47, "word": " each", "probability": 0.9462890625}, {"start": 3078.47, "end": 3078.73, "word": " one", "probability": 0.88720703125}, {"start": 3078.73, "end": 3079.01, "word": " unit", "probability": 0.93505859375}, {"start": 3079.01, "end": 3079.53, "word": " increased", "probability": 0.923828125}, {"start": 3079.53, "end": 3079.77, "word": " in", "probability": 0.489013671875}, {"start": 3079.77, "end": 3079.93, "word": " the", "probability": 0.85498046875}, {"start": 3079.93, "end": 3080.31, "word": " size,", "probability": 0.84521484375}, {"start": 3080.99, "end": 3081.37, "word": " the", "probability": 0.8994140625}, {"start": 3081.37, "end": 3081.65, "word": " selling", "probability": 0.8916015625}, {"start": 3081.65, "end": 3082.03, "word": " price", "probability": 0.89453125}, {"start": 3082.03, "end": 3082.21, "word": " of", "probability": 0.9091796875}, {"start": 3082.21, "end": 3082.31, "word": " a", "probability": 0.68408203125}, {"start": 3082.31, "end": 3082.55, "word": " home", "probability": 0.8935546875}, {"start": 3082.55, "end": 3082.99, "word": " increased", "probability": 0.90869140625}, {"start": 3082.99, "end": 3083.37, "word": " by", "probability": 0.9736328125}, {"start": 3083.37, "end": 3085.03, "word": " 109.", "probability": 0.9228515625}, {"start": 3085.79, "end": 3086.51, "word": " So", "probability": 0.9423828125}, {"start": 3086.51, "end": 3086.73, "word": " that", "probability": 0.9208984375}, {"start": 3086.73, "end": 3087.07, "word": " means", "probability": 0.9326171875}, {"start": 3087.07, "end": 3088.53, "word": " if", "probability": 0.76220703125}, {"start": 3088.53, "end": 3088.75, "word": " the", "probability": 0.9150390625}, {"start": 3088.75, "end": 3088.99, "word": " size", "probability": 0.853515625}, {"start": 3088.99, "end": 3089.37, "word": " increased", "probability": 0.75}, {"start": 3089.37, "end": 3089.59, "word": " by", "probability": 0.97216796875}, {"start": 3089.59, "end": 3089.99, "word": " tenth,", "probability": 0.42626953125}], "temperature": 1.0}, {"id": 116, "seek": 311930, "start": 3091.92, "end": 3119.3, "text": " It means the selling price increased by 1097.7. Make sense? So for each one unit increase in its size, the house selling price increased by 109. So we have to multiply this value by the unit we have. Because Y was 8000 dollars.", "tokens": [467, 1355, 264, 6511, 3218, 6505, 538, 1266, 23247, 13, 22, 13, 4387, 2020, 30, 407, 337, 1184, 472, 4985, 3488, 294, 1080, 2744, 11, 264, 1782, 6511, 3218, 6505, 538, 1266, 24, 13, 407, 321, 362, 281, 12972, 341, 2158, 538, 264, 4985, 321, 362, 13, 1436, 398, 390, 1649, 1360, 3808, 13], "avg_logprob": -0.35994318398562347, "compression_ratio": 1.5, "no_speech_prob": 0.0, "words": [{"start": 3091.92, "end": 3092.76, "word": " It", "probability": 0.07073974609375}, {"start": 3092.76, "end": 3093.36, "word": " means", "probability": 0.5625}, {"start": 3093.36, "end": 3094.26, "word": " the", "probability": 0.513671875}, {"start": 3094.26, "end": 3094.5, "word": " selling", "probability": 0.88525390625}, {"start": 3094.5, "end": 3094.92, "word": " price", "probability": 0.89892578125}, {"start": 3094.92, "end": 3095.46, "word": " increased", "probability": 0.5927734375}, {"start": 3095.46, "end": 3095.86, "word": " by", "probability": 0.97412109375}, {"start": 3095.86, "end": 3099.4, "word": " 1097", "probability": 0.6719970703125}, {"start": 3099.4, "end": 3099.98, "word": ".7.", "probability": 0.731201171875}, {"start": 3102.16, "end": 3103.0, "word": " Make", "probability": 0.46240234375}, {"start": 3103.0, "end": 3103.36, "word": " sense?", "probability": 0.81201171875}, {"start": 3103.72, "end": 3104.12, "word": " So", "probability": 0.68603515625}, {"start": 3104.12, "end": 3104.36, "word": " for", "probability": 0.7841796875}, {"start": 3104.36, "end": 3104.6, "word": " each", "probability": 0.9560546875}, {"start": 3104.6, "end": 3104.84, "word": " one", "probability": 0.85205078125}, {"start": 3104.84, "end": 3105.22, "word": " unit", "probability": 0.95556640625}, {"start": 3105.22, "end": 3106.3, "word": " increase", "probability": 0.3427734375}, {"start": 3106.3, "end": 3106.6, "word": " in", "probability": 0.9072265625}, {"start": 3106.6, "end": 3106.8, "word": " its", "probability": 0.58984375}, {"start": 3106.8, "end": 3107.24, "word": " size,", "probability": 0.81689453125}, {"start": 3107.5, "end": 3107.64, "word": " the", "probability": 0.81103515625}, {"start": 3107.64, "end": 3108.08, "word": " house", "probability": 0.85302734375}, {"start": 3108.08, "end": 3109.06, "word": " selling", "probability": 0.80810546875}, {"start": 3109.06, "end": 3109.5, "word": " price", "probability": 0.798828125}, {"start": 3109.5, "end": 3109.96, "word": " increased", "probability": 0.395751953125}, {"start": 3109.96, "end": 3110.3, "word": " by", "probability": 0.97021484375}, {"start": 3110.3, "end": 3111.46, "word": " 109.", "probability": 0.946533203125}, {"start": 3111.6, "end": 3111.9, "word": " So", "probability": 0.62646484375}, {"start": 3111.9, "end": 3112.06, "word": " we", "probability": 0.80517578125}, {"start": 3112.06, "end": 3112.18, "word": " have", "probability": 0.935546875}, {"start": 3112.18, "end": 3112.34, "word": " to", "probability": 0.9638671875}, {"start": 3112.34, "end": 3112.76, "word": " multiply", "probability": 0.8818359375}, {"start": 3112.76, "end": 3113.24, "word": " this", "probability": 0.9365234375}, {"start": 3113.24, "end": 3113.72, "word": " value", "probability": 0.95556640625}, {"start": 3113.72, "end": 3114.12, "word": " by", "probability": 0.96240234375}, {"start": 3114.12, "end": 3115.34, "word": " the", "probability": 0.859375}, {"start": 3115.34, "end": 3115.54, "word": " unit", "probability": 0.94775390625}, {"start": 3115.54, "end": 3115.94, "word": " we", "probability": 0.453125}, {"start": 3115.94, "end": 3115.94, "word": " have.", "probability": 0.923828125}, {"start": 3117.04, "end": 3117.46, "word": " Because", "probability": 0.8701171875}, {"start": 3117.46, "end": 3117.74, "word": " Y", "probability": 0.83837890625}, {"start": 3117.74, "end": 3118.26, "word": " was", "probability": 0.90869140625}, {"start": 3118.26, "end": 3119.0, "word": " 8000", "probability": 0.54388427734375}, {"start": 3119.0, "end": 3119.3, "word": " dollars.", "probability": 0.61474609375}], "temperature": 1.0}, {"id": 117, "seek": 315112, "start": 3121.74, "end": 3151.12, "text": " Here if you go back to the previous data we have, the data was house price wasn't thousand dollars, so we have to multiply the slope by a thousand. Now we can use also the regression equation line to make some prediction.", "tokens": [1692, 498, 291, 352, 646, 281, 264, 3894, 1412, 321, 362, 11, 264, 1412, 390, 1782, 3218, 2067, 380, 4714, 3808, 11, 370, 321, 362, 281, 12972, 264, 13525, 538, 257, 4714, 13, 823, 321, 393, 764, 611, 264, 24590, 5367, 1622, 281, 652, 512, 17630, 13], "avg_logprob": -0.19612630332509676, "compression_ratio": 1.510204081632653, "no_speech_prob": 0.0, "words": [{"start": 3121.74, "end": 3122.04, "word": " Here", "probability": 0.5625}, {"start": 3122.04, "end": 3122.18, "word": " if", "probability": 0.60107421875}, {"start": 3122.18, "end": 3122.28, "word": " you", "probability": 0.92431640625}, {"start": 3122.28, "end": 3122.48, "word": " go", "probability": 0.93310546875}, {"start": 3122.48, "end": 3122.96, "word": " back", "probability": 0.88330078125}, {"start": 3122.96, "end": 3123.22, "word": " to", "probability": 0.79736328125}, {"start": 3123.22, "end": 3123.3, "word": " the", "probability": 0.88720703125}, {"start": 3123.3, "end": 3123.62, "word": " previous", "probability": 0.86669921875}, {"start": 3123.62, "end": 3123.94, "word": " data", "probability": 0.92578125}, {"start": 3123.94, "end": 3124.12, "word": " we", "probability": 0.86865234375}, {"start": 3124.12, "end": 3124.42, "word": " have,", "probability": 0.8515625}, {"start": 3125.64, "end": 3125.8, "word": " the", "probability": 0.87255859375}, {"start": 3125.8, "end": 3126.08, "word": " data", "probability": 0.81591796875}, {"start": 3126.08, "end": 3126.6, "word": " was", "probability": 0.927734375}, {"start": 3126.6, "end": 3128.2, "word": " house", "probability": 0.7626953125}, {"start": 3128.2, "end": 3128.66, "word": " price", "probability": 0.89306640625}, {"start": 3128.66, "end": 3129.08, "word": " wasn't", "probability": 0.756591796875}, {"start": 3129.08, "end": 3129.5, "word": " thousand", "probability": 0.3896484375}, {"start": 3129.5, "end": 3129.94, "word": " dollars,", "probability": 0.9130859375}, {"start": 3130.38, "end": 3130.58, "word": " so", "probability": 0.93115234375}, {"start": 3130.58, "end": 3130.7, "word": " we", "probability": 0.908203125}, {"start": 3130.7, "end": 3130.88, "word": " have", "probability": 0.94140625}, {"start": 3130.88, "end": 3131.12, "word": " to", "probability": 0.96728515625}, {"start": 3131.12, "end": 3131.74, "word": " multiply", "probability": 0.90087890625}, {"start": 3131.74, "end": 3133.28, "word": " the", "probability": 0.87548828125}, {"start": 3133.28, "end": 3133.66, "word": " slope", "probability": 0.9052734375}, {"start": 3133.66, "end": 3134.2, "word": " by", "probability": 0.97216796875}, {"start": 3134.2, "end": 3135.46, "word": " a", "probability": 0.80224609375}, {"start": 3135.46, "end": 3135.84, "word": " thousand.", "probability": 0.84130859375}, {"start": 3139.48, "end": 3139.82, "word": " Now", "probability": 0.34814453125}, {"start": 3139.82, "end": 3143.72, "word": " we", "probability": 0.75244140625}, {"start": 3143.72, "end": 3144.02, "word": " can", "probability": 0.94580078125}, {"start": 3144.02, "end": 3144.28, "word": " use", "probability": 0.8740234375}, {"start": 3144.28, "end": 3144.84, "word": " also", "probability": 0.85986328125}, {"start": 3144.84, "end": 3147.44, "word": " the", "probability": 0.82373046875}, {"start": 3147.44, "end": 3147.9, "word": " regression", "probability": 0.97119140625}, {"start": 3147.9, "end": 3148.42, "word": " equation", "probability": 0.9814453125}, {"start": 3148.42, "end": 3148.98, "word": " line", "probability": 0.92431640625}, {"start": 3148.98, "end": 3150.12, "word": " to", "probability": 0.95068359375}, {"start": 3150.12, "end": 3150.38, "word": " make", "probability": 0.9404296875}, {"start": 3150.38, "end": 3150.66, "word": " some", "probability": 0.90087890625}, {"start": 3150.66, "end": 3151.12, "word": " prediction.", "probability": 0.78857421875}], "temperature": 1.0}, {"id": 118, "seek": 317589, "start": 3152.75, "end": 3175.89, "text": " For example, we can predict the price of a house with 2000 square feet. You just plug this value. So we have 98.25 plus 0.109 times 2000. That will give the house price.", "tokens": [1171, 1365, 11, 321, 393, 6069, 264, 3218, 295, 257, 1782, 365, 8132, 3732, 3521, 13, 509, 445, 5452, 341, 2158, 13, 407, 321, 362, 20860, 13, 6074, 1804, 1958, 13, 3279, 24, 1413, 8132, 13, 663, 486, 976, 264, 1782, 3218, 13], "avg_logprob": -0.17675781487063927, "compression_ratio": 1.25, "no_speech_prob": 0.0, "words": [{"start": 3152.75, "end": 3152.99, "word": " For", "probability": 0.73583984375}, {"start": 3152.99, "end": 3153.41, "word": " example,", "probability": 0.9580078125}, {"start": 3154.15, "end": 3154.43, "word": " we", "probability": 0.89208984375}, {"start": 3154.43, "end": 3154.69, "word": " can", "probability": 0.93017578125}, {"start": 3154.69, "end": 3155.11, "word": " predict", "probability": 0.76513671875}, {"start": 3155.11, "end": 3155.39, "word": " the", "probability": 0.89892578125}, {"start": 3155.39, "end": 3155.79, "word": " price", "probability": 0.92236328125}, {"start": 3155.79, "end": 3155.95, "word": " of", "probability": 0.96533203125}, {"start": 3155.95, "end": 3156.11, "word": " a", "probability": 0.95166015625}, {"start": 3156.11, "end": 3156.71, "word": " house", "probability": 0.86083984375}, {"start": 3156.71, "end": 3157.13, "word": " with", "probability": 0.8876953125}, {"start": 3157.13, "end": 3157.83, "word": " 2000", "probability": 0.64404296875}, {"start": 3157.83, "end": 3159.11, "word": " square", "probability": 0.6484375}, {"start": 3159.11, "end": 3159.49, "word": " feet.", "probability": 0.92431640625}, {"start": 3161.23, "end": 3161.87, "word": " You", "probability": 0.88818359375}, {"start": 3161.87, "end": 3162.29, "word": " just", "probability": 0.89013671875}, {"start": 3162.29, "end": 3162.89, "word": " plug", "probability": 0.86181640625}, {"start": 3162.89, "end": 3163.15, "word": " this", "probability": 0.89599609375}, {"start": 3163.15, "end": 3163.59, "word": " value.", "probability": 0.97119140625}, {"start": 3166.31, "end": 3166.55, "word": " So", "probability": 0.8798828125}, {"start": 3166.55, "end": 3166.67, "word": " we", "probability": 0.63671875}, {"start": 3166.67, "end": 3166.83, "word": " have", "probability": 0.94677734375}, {"start": 3166.83, "end": 3167.13, "word": " 98", "probability": 0.92724609375}, {"start": 3167.13, "end": 3167.95, "word": ".25", "probability": 0.97509765625}, {"start": 3167.95, "end": 3168.27, "word": " plus", "probability": 0.70703125}, {"start": 3168.27, "end": 3168.53, "word": " 0", "probability": 0.72265625}, {"start": 3168.53, "end": 3169.53, "word": ".109", "probability": 0.9881184895833334}, {"start": 3169.53, "end": 3170.51, "word": " times", "probability": 0.82080078125}, {"start": 3170.51, "end": 3171.07, "word": " 2000.", "probability": 0.83154296875}, {"start": 3171.57, "end": 3171.99, "word": " That", "probability": 0.87890625}, {"start": 3171.99, "end": 3172.21, "word": " will", "probability": 0.88134765625}, {"start": 3172.21, "end": 3172.53, "word": " give", "probability": 0.8701171875}, {"start": 3172.53, "end": 3174.91, "word": " the", "probability": 0.84423828125}, {"start": 3174.91, "end": 3175.33, "word": " house", "probability": 0.89013671875}, {"start": 3175.33, "end": 3175.89, "word": " price.", "probability": 0.8720703125}], "temperature": 1.0}, {"id": 119, "seek": 320184, "start": 3177.92, "end": 3201.84, "text": " for 2,000 square feet. So that means the predicted price for a house with 2,000 square feet is this amount multiplied by 1,000. So that will give $317,850. So that's how can we make predictions for why", "tokens": [337, 568, 11, 1360, 3732, 3521, 13, 407, 300, 1355, 264, 19147, 3218, 337, 257, 1782, 365, 568, 11, 1360, 3732, 3521, 307, 341, 2372, 17207, 538, 502, 11, 1360, 13, 407, 300, 486, 976, 1848, 18, 7773, 11, 23, 2803, 13, 407, 300, 311, 577, 393, 321, 652, 21264, 337, 983], "avg_logprob": -0.19649174190917104, "compression_ratio": 1.4225352112676057, "no_speech_prob": 0.0, "words": [{"start": 3177.92, "end": 3178.4, "word": " for", "probability": 0.59912109375}, {"start": 3178.4, "end": 3179.12, "word": " 2", "probability": 0.2509765625}, {"start": 3179.12, "end": 3179.56, "word": ",000", "probability": 0.931396484375}, {"start": 3179.56, "end": 3180.0, "word": " square", "probability": 0.85205078125}, {"start": 3180.0, "end": 3180.42, "word": " feet.", "probability": 0.94921875}, {"start": 3181.38, "end": 3181.6, "word": " So", "probability": 0.88525390625}, {"start": 3181.6, "end": 3181.84, "word": " that", "probability": 0.81005859375}, {"start": 3181.84, "end": 3182.06, "word": " means", "probability": 0.9345703125}, {"start": 3182.06, "end": 3182.22, "word": " the", "probability": 0.85205078125}, {"start": 3182.22, "end": 3182.66, "word": " predicted", "probability": 0.841796875}, {"start": 3182.66, "end": 3183.32, "word": " price", "probability": 0.91552734375}, {"start": 3183.32, "end": 3183.66, "word": " for", "probability": 0.94287109375}, {"start": 3183.66, "end": 3183.8, "word": " a", "probability": 0.96875}, {"start": 3183.8, "end": 3184.12, "word": " house", "probability": 0.8828125}, {"start": 3184.12, "end": 3184.52, "word": " with", "probability": 0.89892578125}, {"start": 3184.52, "end": 3185.92, "word": " 2", "probability": 0.91455078125}, {"start": 3185.92, "end": 3186.22, "word": ",000", "probability": 0.99853515625}, {"start": 3186.22, "end": 3186.6, "word": " square", "probability": 0.9033203125}, {"start": 3186.6, "end": 3186.9, "word": " feet", "probability": 0.9599609375}, {"start": 3186.9, "end": 3187.22, "word": " is", "probability": 0.89501953125}, {"start": 3187.22, "end": 3187.52, "word": " this", "probability": 0.94140625}, {"start": 3187.52, "end": 3187.92, "word": " amount", "probability": 0.90087890625}, {"start": 3187.92, "end": 3188.56, "word": " multiplied", "probability": 0.6650390625}, {"start": 3188.56, "end": 3189.12, "word": " by", "probability": 0.974609375}, {"start": 3189.12, "end": 3190.18, "word": " 1", "probability": 0.83251953125}, {"start": 3190.18, "end": 3190.76, "word": ",000.", "probability": 0.986328125}, {"start": 3190.88, "end": 3191.16, "word": " So", "probability": 0.94970703125}, {"start": 3191.16, "end": 3191.38, "word": " that", "probability": 0.87939453125}, {"start": 3191.38, "end": 3191.56, "word": " will", "probability": 0.40771484375}, {"start": 3191.56, "end": 3191.82, "word": " give", "probability": 0.66162109375}, {"start": 3191.82, "end": 3194.08, "word": " $317", "probability": 0.7435709635416666}, {"start": 3194.08, "end": 3196.32, "word": ",850.", "probability": 0.9384765625}, {"start": 3197.04, "end": 3197.82, "word": " So", "probability": 0.95068359375}, {"start": 3197.82, "end": 3198.14, "word": " that's", "probability": 0.952392578125}, {"start": 3198.14, "end": 3198.26, "word": " how", "probability": 0.93701171875}, {"start": 3198.26, "end": 3198.48, "word": " can", "probability": 0.7626953125}, {"start": 3198.48, "end": 3198.64, "word": " we", "probability": 0.95703125}, {"start": 3198.64, "end": 3199.0, "word": " make", "probability": 0.94140625}, {"start": 3199.0, "end": 3199.78, "word": " predictions", "probability": 0.6435546875}, {"start": 3199.78, "end": 3201.18, "word": " for", "probability": 0.93603515625}, {"start": 3201.18, "end": 3201.84, "word": " why", "probability": 0.483642578125}], "temperature": 1.0}, {"id": 120, "seek": 323158, "start": 3202.7, "end": 3231.58, "text": " I mean for house price at any given value of its size. So for this data, we have a house with 2000 square feet. So we predict its price to be around 317,850. I will stop at coefficient of correlation. I will stop at", "tokens": [286, 914, 337, 1782, 3218, 412, 604, 2212, 2158, 295, 1080, 2744, 13, 407, 337, 341, 1412, 11, 321, 362, 257, 1782, 365, 8132, 3732, 3521, 13, 407, 321, 6069, 1080, 3218, 281, 312, 926, 805, 7773, 11, 23, 2803, 13, 286, 486, 1590, 412, 17619, 295, 20009, 13, 286, 486, 1590, 412], "avg_logprob": -0.17780670909969895, "compression_ratio": 1.4304635761589404, "no_speech_prob": 0.0, "words": [{"start": 3202.7, "end": 3202.96, "word": " I", "probability": 0.66064453125}, {"start": 3202.96, "end": 3203.12, "word": " mean", "probability": 0.9443359375}, {"start": 3203.12, "end": 3203.42, "word": " for", "probability": 0.80224609375}, {"start": 3203.42, "end": 3204.24, "word": " house", "probability": 0.8037109375}, {"start": 3204.24, "end": 3204.78, "word": " price", "probability": 0.927734375}, {"start": 3204.78, "end": 3205.82, "word": " at", "probability": 0.89599609375}, {"start": 3205.82, "end": 3206.14, "word": " any", "probability": 0.9130859375}, {"start": 3206.14, "end": 3206.36, "word": " given", "probability": 0.89892578125}, {"start": 3206.36, "end": 3206.84, "word": " value", "probability": 0.97021484375}, {"start": 3206.84, "end": 3207.22, "word": " of", "probability": 0.955078125}, {"start": 3207.22, "end": 3207.48, "word": " its", "probability": 0.84228515625}, {"start": 3207.48, "end": 3207.9, "word": " size.", "probability": 0.8486328125}, {"start": 3208.7, "end": 3208.82, "word": " So", "probability": 0.791015625}, {"start": 3208.82, "end": 3209.08, "word": " for", "probability": 0.70458984375}, {"start": 3209.08, "end": 3209.36, "word": " this", "probability": 0.947265625}, {"start": 3209.36, "end": 3209.8, "word": " data,", "probability": 0.91845703125}, {"start": 3210.1, "end": 3210.26, "word": " we", "probability": 0.9521484375}, {"start": 3210.26, "end": 3210.58, "word": " have", "probability": 0.9521484375}, {"start": 3210.58, "end": 3211.68, "word": " a", "probability": 0.9638671875}, {"start": 3211.68, "end": 3212.02, "word": " house", "probability": 0.8798828125}, {"start": 3212.02, "end": 3212.4, "word": " with", "probability": 0.896484375}, {"start": 3212.4, "end": 3213.76, "word": " 2000", "probability": 0.541015625}, {"start": 3213.76, "end": 3214.34, "word": " square", "probability": 0.765625}, {"start": 3214.34, "end": 3214.72, "word": " feet.", "probability": 0.96044921875}, {"start": 3215.12, "end": 3215.76, "word": " So", "probability": 0.947265625}, {"start": 3215.76, "end": 3216.02, "word": " we", "probability": 0.87890625}, {"start": 3216.02, "end": 3216.5, "word": " predict", "probability": 0.91796875}, {"start": 3216.5, "end": 3217.1, "word": " its", "probability": 0.748046875}, {"start": 3217.1, "end": 3217.96, "word": " price", "probability": 0.9208984375}, {"start": 3217.96, "end": 3218.24, "word": " to", "probability": 0.958984375}, {"start": 3218.24, "end": 3218.5, "word": " be", "probability": 0.955078125}, {"start": 3218.5, "end": 3219.14, "word": " around", "probability": 0.92041015625}, {"start": 3219.14, "end": 3221.14, "word": " 317", "probability": 0.864013671875}, {"start": 3221.14, "end": 3223.18, "word": ",850.", "probability": 0.8680013020833334}, {"start": 3224.22, "end": 3224.92, "word": " I", "probability": 0.962890625}, {"start": 3224.92, "end": 3225.02, "word": " will", "probability": 0.8896484375}, {"start": 3225.02, "end": 3225.4, "word": " stop", "probability": 0.91845703125}, {"start": 3225.4, "end": 3226.5, "word": " at", "probability": 0.93359375}, {"start": 3226.5, "end": 3227.78, "word": " coefficient", "probability": 0.44580078125}, {"start": 3227.78, "end": 3228.38, "word": " of", "probability": 0.953125}, {"start": 3228.38, "end": 3228.9, "word": " correlation.", "probability": 0.955078125}, {"start": 3229.98, "end": 3230.8, "word": " I", "probability": 0.88720703125}, {"start": 3230.8, "end": 3230.92, "word": " will", "probability": 0.89501953125}, {"start": 3230.92, "end": 3231.26, "word": " stop", "probability": 0.9248046875}, {"start": 3231.26, "end": 3231.58, "word": " at", "probability": 0.93603515625}], "temperature": 1.0}, {"id": 121, "seek": 323799, "start": 3232.03, "end": 3237.99, "text": " coefficient of determination for next time that's all", "tokens": [17619, 295, 18432, 337, 958, 565, 300, 311, 439], "avg_logprob": -0.30976563692092896, "compression_ratio": 0.9473684210526315, "no_speech_prob": 0.0, "words": [{"start": 3232.03, "end": 3232.59, "word": " coefficient", "probability": 0.59326171875}, {"start": 3232.59, "end": 3232.89, "word": " of", "probability": 0.9111328125}, {"start": 3232.89, "end": 3233.39, "word": " determination", "probability": 0.958984375}, {"start": 3233.39, "end": 3233.65, "word": " for", "probability": 0.9111328125}, {"start": 3233.65, "end": 3233.87, "word": " next", "probability": 0.861328125}, {"start": 3233.87, "end": 3234.19, "word": " time", "probability": 0.67724609375}, {"start": 3234.19, "end": 3237.77, "word": " that's", "probability": 0.57611083984375}, {"start": 3237.77, "end": 3237.99, "word": " all", "probability": 0.931640625}], "temperature": 1.0}], "language": "en", "language_probability": 1.0, "duration": 3241.4825, "duration_after_vad": 3089.4459687499866} \ No newline at end of file diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/GyiivmJglvM_raw.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/GyiivmJglvM_raw.srt new file mode 100644 index 0000000000000000000000000000000000000000..313bd5a76f92758205fe4efb85103947a3233260 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/GyiivmJglvM_raw.srt @@ -0,0 +1,2732 @@ +1 +00:00:11,020 --> 00:00:13,920 +The last chapter we are going to talk in this + +2 +00:00:13,920 --> 00:00:17,820 +semester is correlation and simple linearization. + +3 +00:00:18,380 --> 00:00:23,300 +So we are going to explain two types in chapter + +4 +00:00:23,300 --> 00:00:29,280 +12. One is called correlation. And the other type + +5 +00:00:29,280 --> 00:00:33,500 +is simple linear regression. Maybe this chapter + +6 +00:00:33,500 --> 00:00:40,020 +I'm going to spend about two lectures in order to + +7 +00:00:40,020 --> 00:00:45,000 +cover these objectives. The first objective is to + +8 +00:00:45,000 --> 00:00:48,810 +calculate the coefficient of correlation. The + +9 +00:00:48,810 --> 00:00:51,210 +second objective, the meaning of the regression + +10 +00:00:51,210 --> 00:00:55,590 +coefficients beta 0 and beta 1. And the last + +11 +00:00:55,590 --> 00:00:58,710 +objective is how to use regression analysis to + +12 +00:00:58,710 --> 00:01:03,030 +predict the value of dependent variable based on + +13 +00:01:03,030 --> 00:01:06,010 +an independent variable. It looks like that we + +14 +00:01:06,010 --> 00:01:10,590 +have discussed objective number one in chapter + +15 +00:01:10,590 --> 00:01:16,470 +three. So calculation of the correlation + +16 +00:01:16,470 --> 00:01:20,740 +coefficient is done in chapter three, but here + +17 +00:01:20,740 --> 00:01:26,060 +we'll give some details about correlation also. A + +18 +00:01:26,060 --> 00:01:28,480 +scatter plot can be used to show the relationship + +19 +00:01:28,480 --> 00:01:31,540 +between two variables. For example, imagine that + +20 +00:01:31,540 --> 00:01:35,400 +we have a random sample of 10 children. + +21 +00:01:37,800 --> 00:01:47,940 +And we have data on their weights and ages. And we + +22 +00:01:47,940 --> 00:01:51,640 +are interested to examine the relationship between + +23 +00:01:51,640 --> 00:01:58,400 +weights and age. For example, suppose child number + +24 +00:01:58,400 --> 00:02:06,260 +one, his + +25 +00:02:06,260 --> 00:02:12,060 +or her age is two years with weight, for example, + +26 +00:02:12,200 --> 00:02:12,880 +eight kilograms. + +27 +00:02:17,680 --> 00:02:21,880 +His weight or her weight is four years, and his or + +28 +00:02:21,880 --> 00:02:24,500 +her weight is, for example, 15 kilograms, and so + +29 +00:02:24,500 --> 00:02:29,680 +on. And again, we are interested to examine the + +30 +00:02:29,680 --> 00:02:32,640 +relationship between age and weight. Maybe they + +31 +00:02:32,640 --> 00:02:37,400 +exist sometimes. positive relationship between the + +32 +00:02:37,400 --> 00:02:41,100 +two variables that means if one variable increases + +33 +00:02:41,100 --> 00:02:45,260 +the other one also increase if one variable + +34 +00:02:45,260 --> 00:02:47,980 +increases the other will also decrease so they + +35 +00:02:47,980 --> 00:02:52,980 +have the same direction either up or down so we + +36 +00:02:52,980 --> 00:02:58,140 +have to know number one the form of the + +37 +00:02:58,140 --> 00:03:02,140 +relationship this one could be linear here we + +38 +00:03:02,140 --> 00:03:06,890 +focus just on linear relationship between X and Y. + +39 +00:03:08,050 --> 00:03:13,730 +The second, we have to know the direction of the + +40 +00:03:13,730 --> 00:03:21,270 +relationship. This direction might be positive or + +41 +00:03:21,270 --> 00:03:22,350 +negative relationship. + +42 +00:03:25,150 --> 00:03:27,990 +In addition to that, we have to know the strength + +43 +00:03:27,990 --> 00:03:33,760 +of the relationship between the two variables of + +44 +00:03:33,760 --> 00:03:37,320 +interest the strength can be classified into three + +45 +00:03:37,320 --> 00:03:46,480 +categories either strong, moderate or there exists + +46 +00:03:46,480 --> 00:03:50,580 +a weak relationship so it could be positive + +47 +00:03:50,580 --> 00:03:53,320 +-strong, positive-moderate or positive-weak, the + +48 +00:03:53,320 --> 00:03:58,360 +same for negative so by using scatter plot we can + +49 +00:03:58,360 --> 00:04:02,530 +determine the form either linear or non-linear, + +50 +00:04:02,690 --> 00:04:06,130 +but here we are focusing on just linear + +51 +00:04:06,130 --> 00:04:10,310 +relationship. Also, we can determine the direction + +52 +00:04:10,310 --> 00:04:12,870 +of the relationship. We can say there exists + +53 +00:04:12,870 --> 00:04:15,910 +positive or negative based on the scatter plot. + +54 +00:04:16,710 --> 00:04:19,530 +Also, we can know the strength of the + +55 +00:04:19,530 --> 00:04:23,130 +relationship, either strong, moderate or weak. For + +56 +00:04:23,130 --> 00:04:29,810 +example, suppose we have again weights and ages. + +57 +00:04:30,390 --> 00:04:33,590 +And we know that there are two types of variables + +58 +00:04:33,590 --> 00:04:36,710 +in this case. One is called dependent and the + +59 +00:04:36,710 --> 00:04:41,330 +other is independent. So if we, as we explained + +60 +00:04:41,330 --> 00:04:47,890 +before, is the dependent variable and A is + +61 +00:04:47,890 --> 00:04:48,710 +independent variable. + +62 +00:04:52,690 --> 00:04:57,270 +Always dependent + +63 +00:04:57,270 --> 00:04:57,750 +variable + +64 +00:05:00,400 --> 00:05:05,560 +is denoted by Y and always on the vertical axis so + +65 +00:05:05,560 --> 00:05:11,300 +here we have weight and independent variable is + +66 +00:05:11,300 --> 00:05:17,760 +denoted by X and X is in the X axis or horizontal + +67 +00:05:17,760 --> 00:05:26,300 +axis now scatter plot for example here child with + +68 +00:05:26,300 --> 00:05:30,820 +age 2 years his weight is 8 So two years, for + +69 +00:05:30,820 --> 00:05:36,760 +example, this is eight. So this star represents + +70 +00:05:36,760 --> 00:05:42,320 +the first pair of observation, age of two and + +71 +00:05:42,320 --> 00:05:46,820 +weight of eight. The other child, his weight is + +72 +00:05:46,820 --> 00:05:52,860 +four years, and the corresponding weight is 15. + +73 +00:05:53,700 --> 00:05:58,970 +For example, this value is 15. The same for the + +74 +00:05:58,970 --> 00:06:02,430 +other points. Here we can know the direction. + +75 +00:06:04,910 --> 00:06:10,060 +In this case they exist. Positive. Form is linear. + +76 +00:06:12,100 --> 00:06:16,860 +Strong or weak or moderate depends on how these + +77 +00:06:16,860 --> 00:06:20,260 +values are close to the straight line. Closer + +78 +00:06:20,260 --> 00:06:24,380 +means stronger. So if the points are closer to the + +79 +00:06:24,380 --> 00:06:26,620 +straight line, it means there exists stronger + +80 +00:06:26,620 --> 00:06:30,800 +relationship between the two variables. So closer + +81 +00:06:30,800 --> 00:06:34,480 +means stronger, either positive or negative. In + +82 +00:06:34,480 --> 00:06:37,580 +this case, there exists positive. Now for the + +83 +00:06:37,580 --> 00:06:42,360 +negative association or relationship, we have the + +84 +00:06:42,360 --> 00:06:46,060 +other direction, it could be this one. So in this + +85 +00:06:46,060 --> 00:06:49,460 +case there exists linear but negative + +86 +00:06:49,460 --> 00:06:51,900 +relationship, and this negative could be positive + +87 +00:06:51,900 --> 00:06:56,100 +or negative, it depends on the points. So it's + +88 +00:06:56,100 --> 00:07:02,660 +positive relationship. The other direction is + +89 +00:07:02,660 --> 00:07:06,460 +negative. So the points, if the points are closed, + +90 +00:07:06,820 --> 00:07:10,160 +then we can say there exists strong negative + +91 +00:07:10,160 --> 00:07:14,440 +relationship. So by using scatter plot, we can + +92 +00:07:14,440 --> 00:07:17,280 +determine all of these. + +93 +00:07:20,840 --> 00:07:24,460 +and direction and strength now here the two + +94 +00:07:24,460 --> 00:07:27,060 +variables we are talking about are numerical + +95 +00:07:27,060 --> 00:07:30,480 +variables so the two variables here are numerical + +96 +00:07:30,480 --> 00:07:35,220 +variables so we are talking about quantitative + +97 +00:07:35,220 --> 00:07:39,850 +variables but remember in chapter 11 We talked + +98 +00:07:39,850 --> 00:07:43,150 +about the relationship between two qualitative + +99 +00:07:43,150 --> 00:07:47,450 +variables. So we use chi-square test. Here we are + +100 +00:07:47,450 --> 00:07:49,630 +talking about something different. We are talking + +101 +00:07:49,630 --> 00:07:52,890 +about numerical variables. So we can use scatter + +102 +00:07:52,890 --> 00:07:58,510 +plot, number one. Next correlation analysis is + +103 +00:07:58,510 --> 00:08:02,090 +used to measure the strength of the association + +104 +00:08:02,090 --> 00:08:05,190 +between two variables. And here again, we are just + +105 +00:08:05,190 --> 00:08:09,560 +talking about linear relationship. So this chapter + +106 +00:08:09,560 --> 00:08:13,340 +just covers the linear relationship between the + +107 +00:08:13,340 --> 00:08:17,040 +two variables. Because sometimes there exists non + +108 +00:08:17,040 --> 00:08:23,180 +-linear relationship between the two variables. So + +109 +00:08:23,180 --> 00:08:26,120 +correlation is only concerned with the strength of + +110 +00:08:26,120 --> 00:08:30,500 +the relationship. No causal effect is implied with + +111 +00:08:30,500 --> 00:08:35,220 +correlation. We just say that X affects Y, or X + +112 +00:08:35,220 --> 00:08:39,580 +explains the variation in Y. Scatter plots were + +113 +00:08:39,580 --> 00:08:43,720 +first presented in Chapter 2, and we skipped, if + +114 +00:08:43,720 --> 00:08:48,480 +you remember, Chapter 2. And it's easy to make + +115 +00:08:48,480 --> 00:08:52,620 +scatter plots for Y versus X. In Chapter 3, we + +116 +00:08:52,620 --> 00:08:56,440 +talked about correlation, so correlation was first + +117 +00:08:56,440 --> 00:09:00,060 +presented in Chapter 3. But here I will give just + +118 +00:09:00,060 --> 00:09:07,240 +a review for computation about correlation + +119 +00:09:07,240 --> 00:09:11,460 +coefficient or coefficient of correlation. First, + +120 +00:09:12,800 --> 00:09:15,680 +coefficient of correlation measures the relative + +121 +00:09:15,680 --> 00:09:19,920 +strength of the linear relationship between two + +122 +00:09:19,920 --> 00:09:23,740 +numerical variables. So here, we are talking about + +123 +00:09:23,740 --> 00:09:28,080 +numerical variables. Sample correlation + +124 +00:09:28,080 --> 00:09:31,500 +coefficient is given by this equation. which is + +125 +00:09:31,500 --> 00:09:36,180 +sum of the product of xi minus x bar, yi minus y + +126 +00:09:36,180 --> 00:09:41,100 +bar, divided by n minus 1 times standard deviation + +127 +00:09:41,100 --> 00:09:44,960 +of x times standard deviation of y. We know that x + +128 +00:09:44,960 --> 00:09:47,240 +bar and y bar are the means of x and y + +129 +00:09:47,240 --> 00:09:51,360 +respectively. And Sx, Sy are the standard + +130 +00:09:51,360 --> 00:09:55,540 +deviations of x and y values. And we know this + +131 +00:09:55,540 --> 00:09:58,460 +equation before. But there is another equation + +132 +00:09:58,460 --> 00:10:05,330 +that one can be used For computation, which is + +133 +00:10:05,330 --> 00:10:09,290 +called shortcut formula, which is just sum of xy + +134 +00:10:09,290 --> 00:10:15,310 +minus n times x bar y bar divided by square root + +135 +00:10:15,310 --> 00:10:18,690 +of this quantity. And we know this equation from + +136 +00:10:18,690 --> 00:10:23,650 +chapter three. Now again, x bar and y bar are the + +137 +00:10:23,650 --> 00:10:30,060 +means. Now the question is, Do outliers affect the + +138 +00:10:30,060 --> 00:10:36,440 +correlation? For sure, yes. Because this formula + +139 +00:10:36,440 --> 00:10:39,940 +actually based on the means and the standard + +140 +00:10:39,940 --> 00:10:44,300 +deviations, and these two measures are affected by + +141 +00:10:44,300 --> 00:10:47,880 +outliers. So since R is a function of these two + +142 +00:10:47,880 --> 00:10:51,340 +statistics, the means and standard deviations, + +143 +00:10:51,940 --> 00:10:54,280 +then outliers will affect the value of the + +144 +00:10:54,280 --> 00:10:55,940 +correlation coefficient. + +145 +00:10:57,890 --> 00:11:01,170 +Some features about the coefficient of + +146 +00:11:01,170 --> 00:11:09,570 +correlation. Here rho is the population + +147 +00:11:09,570 --> 00:11:13,210 +coefficient of correlation, and R is the sample + +148 +00:11:13,210 --> 00:11:17,730 +coefficient of correlation. Either rho or R have + +149 +00:11:17,730 --> 00:11:21,390 +the following features. Number one, unity free. It + +150 +00:11:21,390 --> 00:11:24,890 +means R has no units. For example, here we are + +151 +00:11:24,890 --> 00:11:28,820 +talking about whales. And weight in kilograms, + +152 +00:11:29,300 --> 00:11:33,700 +ages in years. And for example, suppose the + +153 +00:11:33,700 --> 00:11:37,080 +correlation between these two variables is 0.8. + +154 +00:11:38,620 --> 00:11:41,760 +It's unity free, so it's just 0.8. So there is no + +155 +00:11:41,760 --> 00:11:45,640 +unit. You cannot say 0.8 kilogram per year or + +156 +00:11:45,640 --> 00:11:51,040 +whatever it is. So just 0.8. So the first feature + +157 +00:11:51,040 --> 00:11:53,360 +of the correlation coefficient is unity-free. + +158 +00:11:54,180 --> 00:11:56,340 +Number two ranges between negative one and plus + +159 +00:11:56,340 --> 00:12:00,380 +one. So R is always, or rho, is always between + +160 +00:12:00,380 --> 00:12:04,560 +minus one and plus one. So minus one smaller than + +161 +00:12:04,560 --> 00:12:07,340 +or equal to R smaller than or equal to plus one. + +162 +00:12:07,420 --> 00:12:11,420 +So R is always in this range. So R cannot be + +163 +00:12:11,420 --> 00:12:15,260 +smaller than negative one or greater than plus + +164 +00:12:15,260 --> 00:12:20,310 +one. The closer to minus one or negative one, the + +165 +00:12:20,310 --> 00:12:23,130 +stronger negative relationship between or linear + +166 +00:12:23,130 --> 00:12:26,770 +relationship between x and y. So, for example, if + +167 +00:12:26,770 --> 00:12:33,370 +R is negative 0.85 or R is negative 0.8. Now, this + +168 +00:12:33,370 --> 00:12:39,690 +value is closer to minus one than negative 0.8. So + +169 +00:12:39,690 --> 00:12:43,230 +negative 0.85 is stronger than negative 0.8. + +170 +00:12:44,590 --> 00:12:48,470 +Because we are looking for closer to minus 1. + +171 +00:12:49,570 --> 00:12:55,310 +Minus 0.8, the value itself is greater than minus + +172 +00:12:55,310 --> 00:12:59,610 +0.85. But this value is closer to minus 1 than + +173 +00:12:59,610 --> 00:13:03,790 +minus 0.8. So we can say that this relationship is + +174 +00:13:03,790 --> 00:13:05,070 +stronger than the other one. + +175 +00:13:07,870 --> 00:13:11,730 +Also, the closer to plus 1, the stronger the + +176 +00:13:11,730 --> 00:13:16,040 +positive linear relationship. Here, suppose R is 0 + +177 +00:13:16,040 --> 00:13:22,740 +.7 and another R is 0.8. 0.8 is closer to plus one + +178 +00:13:22,740 --> 00:13:26,740 +than 0.7, so 0.8 is stronger. This one makes + +179 +00:13:26,740 --> 00:13:31,800 +sense. The closer to zero, the weaker relationship + +180 +00:13:31,800 --> 00:13:35,420 +between the two variables. For example, suppose R + +181 +00:13:35,420 --> 00:13:40,720 +is plus or minus 0.05. This value is very close to + +182 +00:13:40,720 --> 00:13:44,420 +zero. It means there exists weak. relationship. + +183 +00:13:44,980 --> 00:13:47,960 +Sometimes we can say that there exists moderate + +184 +00:13:47,960 --> 00:13:57,080 +relationship if R is close to 0.5. So it could be + +185 +00:13:57,080 --> 00:14:01,360 +classified into these groups closer to minus 1, + +186 +00:14:01,500 --> 00:14:06,220 +closer to 1, 0.5 or 0. So we can know the + +187 +00:14:06,220 --> 00:14:11,680 +direction by the sign of R negative it means + +188 +00:14:11,680 --> 00:14:14,320 +because here our ranges as we mentioned between + +189 +00:14:14,320 --> 00:14:19,520 +minus one and plus one here zero so this these + +190 +00:14:19,520 --> 00:14:24,560 +values it means there exists negative above zero + +191 +00:14:24,560 --> 00:14:26,760 +all the way up to one it means there exists + +192 +00:14:26,760 --> 00:14:31,020 +positive relationship between the two variables so + +193 +00:14:31,020 --> 00:14:35,520 +the sign gives the direction of the relationship + +194 +00:14:36,720 --> 00:14:40,840 +The absolute value gives the strength of the + +195 +00:14:40,840 --> 00:14:43,500 +relationship between the two variables. So the + +196 +00:14:43,500 --> 00:14:49,260 +same as we had discussed before. Now, some types + +197 +00:14:49,260 --> 00:14:51,880 +of scatter plots for different types of + +198 +00:14:51,880 --> 00:14:54,740 +relationship between the two variables is + +199 +00:14:54,740 --> 00:14:59,100 +presented in this slide. For example, if you look + +200 +00:14:59,100 --> 00:15:03,940 +carefully at figure one here, sharp one, this one, + +201 +00:15:04,720 --> 00:15:13,020 +and the other one, In each one, all points are + +202 +00:15:13,020 --> 00:15:15,820 +on the straight line, it means they exist perfect. + +203 +00:15:16,840 --> 00:15:21,720 +So if all points fall exactly on the straight + +204 +00:15:21,720 --> 00:15:24,220 +line, it means they exist perfect. + +205 +00:15:31,400 --> 00:15:35,160 +Here there exists perfect negative. So this is + +206 +00:15:35,160 --> 00:15:37,740 +perfect negative relationship. The other one + +207 +00:15:37,740 --> 00:15:41,240 +perfect positive relationship. In reality you will + +208 +00:15:41,240 --> 00:15:45,680 +never see something + +209 +00:15:45,680 --> 00:15:49,380 +like perfect positive or perfect negative. Maybe + +210 +00:15:49,380 --> 00:15:53,270 +in real situation. In real situation, most of the + +211 +00:15:53,270 --> 00:15:56,730 +time, R is close to 0.9 or 0.85 or something like + +212 +00:15:56,730 --> 00:16:02,070 +that, but it's not exactly equal one. Because + +213 +00:16:02,070 --> 00:16:05,330 +equal one, it means if you know the value of a + +214 +00:16:05,330 --> 00:16:08,630 +child's age, then you can predict the exact + +215 +00:16:08,630 --> 00:16:13,510 +weight. And that never happened. If the data looks + +216 +00:16:13,510 --> 00:16:18,770 +like this table, for example. Suppose here we have + +217 +00:16:18,770 --> 00:16:25,750 +age and weight. H1 for example 3, 5, 7 weight for + +218 +00:16:25,750 --> 00:16:32,450 +example 10, 12, 14, 16 in this case they exist + +219 +00:16:32,450 --> 00:16:37,610 +perfect because x increases by 2 units also + +220 +00:16:37,610 --> 00:16:41,910 +weights increases by 2 units or maybe weights for + +221 +00:16:41,910 --> 00:16:50,180 +example 9, 12, 15, 18 and so on So X or A is + +222 +00:16:50,180 --> 00:16:53,260 +increased by two units for each value for each + +223 +00:16:53,260 --> 00:16:58,860 +individual and also weights are increased by three + +224 +00:16:58,860 --> 00:17:03,080 +units for each person. In this case there exists + +225 +00:17:03,080 --> 00:17:06,820 +perfect relationship but that never happened in + +226 +00:17:06,820 --> 00:17:13,300 +real life. So perfect means all points are lie on + +227 +00:17:13,300 --> 00:17:16,260 +the straight line otherwise if the points are + +228 +00:17:16,260 --> 00:17:21,230 +close Then we can say there exists strong. Here if + +229 +00:17:21,230 --> 00:17:24,750 +you look carefully at these points corresponding + +230 +00:17:24,750 --> 00:17:30,150 +to this regression line, it looks like not strong + +231 +00:17:30,150 --> 00:17:32,630 +because some of the points are not closed, so you + +232 +00:17:32,630 --> 00:17:35,450 +can say there exists maybe moderate negative + +233 +00:17:35,450 --> 00:17:39,530 +relationship. This one, most of the points are + +234 +00:17:39,530 --> 00:17:42,390 +scattered away from the straight line, so there + +235 +00:17:42,390 --> 00:17:46,930 +exists weak relationship. So by just looking at + +236 +00:17:46,930 --> 00:17:50,290 +the scatter path, sometimes you can, sometimes + +237 +00:17:50,290 --> 00:17:53,290 +it's hard to tell, but most of the time you can + +238 +00:17:53,290 --> 00:17:58,250 +tell at least the direction, positive or negative, + +239 +00:17:59,410 --> 00:18:04,150 +the form, linear or non-linear, or the strength of + +240 +00:18:04,150 --> 00:18:09,100 +the relationship. The last one here, now x + +241 +00:18:09,100 --> 00:18:13,800 +increases, y remains the same. For example, + +242 +00:18:13,880 --> 00:18:18,580 +suppose x is 1, y is 10. x increases to 2, y still + +243 +00:18:18,580 --> 00:18:22,220 +is 10. So as x increases, y stays the same + +244 +00:18:22,220 --> 00:18:26,140 +position, it means there is no linear relationship + +245 +00:18:26,140 --> 00:18:28,900 +between the two variables. So based on the scatter + +246 +00:18:28,900 --> 00:18:33,240 +plot you can have an idea about the relationship + +247 +00:18:33,240 --> 00:18:37,800 +between the two variables. Here I will give a + +248 +00:18:37,800 --> 00:18:41,120 +simple example in order to determine the + +249 +00:18:41,120 --> 00:18:45,160 +correlation coefficient. A real estate agent + +250 +00:18:45,160 --> 00:18:50,380 +wishes to examine the relationship between selling + +251 +00:18:50,380 --> 00:18:54,580 +the price of a home and its size measured in + +252 +00:18:54,580 --> 00:18:57,140 +square feet. So in this case, there are two + +253 +00:18:57,140 --> 00:19:02,400 +variables of interest. One is called selling price + +254 +00:19:02,400 --> 00:19:13,720 +of a home. So here, selling price of a home and + +255 +00:19:13,720 --> 00:19:18,020 +its size. Now, selling price in $1,000. + +256 +00:19:25,360 --> 00:19:29,380 +And size in feet squared. Here we have to + +257 +00:19:29,380 --> 00:19:35,640 +distinguish between dependent and independent. So + +258 +00:19:35,640 --> 00:19:39,740 +your dependent variable is house price, sometimes + +259 +00:19:39,740 --> 00:19:41,620 +called response variable. + +260 +00:19:45,750 --> 00:19:49,490 +The independent variable is the size, which is in + +261 +00:19:49,490 --> 00:19:54,570 +square feet, sometimes called sub-planetary + +262 +00:19:54,570 --> 00:19:54,850 +variable. + +263 +00:19:59,570 --> 00:20:06,370 +So my Y is ceiling rise, and size is square feet, + +264 +00:20:07,530 --> 00:20:12,910 +or size of the house. In this case, there are 10. + +265 +00:20:14,290 --> 00:20:17,890 +It's sample size is 10. So the first house with + +266 +00:20:17,890 --> 00:20:26,850 +size 1,400 square feet, it's selling price is 245 + +267 +00:20:26,850 --> 00:20:31,670 +multiplied by 1,000. Because these values are in + +268 +00:20:31,670 --> 00:20:37,950 +$1,000. Now based on this data, you can first plot + +269 +00:20:37,950 --> 00:20:46,590 +the scatterplot of house price In Y direction, the + +270 +00:20:46,590 --> 00:20:51,870 +vertical direction. So here is house. And rise. + +271 +00:20:54,230 --> 00:21:01,470 +And size in the X axis. You will get this scatter + +272 +00:21:01,470 --> 00:21:07,370 +plot. Now, the data here is just 10 points, so + +273 +00:21:07,370 --> 00:21:12,590 +sometimes it's hard to tell. the relationship + +274 +00:21:12,590 --> 00:21:15,510 +between the two variables if your data is small. + +275 +00:21:16,510 --> 00:21:21,170 +But just this example for illustration. But at + +276 +00:21:21,170 --> 00:21:25,370 +least you can determine that there exists linear + +277 +00:21:25,370 --> 00:21:28,810 +relationship between the two variables. It is + +278 +00:21:28,810 --> 00:21:35,490 +positive. So the form is linear. Direction is + +279 +00:21:35,490 --> 00:21:41,880 +positive. Weak or strong or moderate. Sometimes + +280 +00:21:41,880 --> 00:21:45,620 +it's not easy to tell if it is strong or moderate. + +281 +00:21:47,720 --> 00:21:50,120 +Now if you look at these points, some of them are + +282 +00:21:50,120 --> 00:21:53,700 +close to the straight line and others are away + +283 +00:21:53,700 --> 00:21:56,700 +from the straight line. So maybe there exists + +284 +00:21:56,700 --> 00:22:02,720 +moderate for example, but you cannot say strong. + +285 +00:22:03,930 --> 00:22:08,210 +Here, strong it means the points are close to the + +286 +00:22:08,210 --> 00:22:11,890 +straight line. Sometimes it's hard to tell the + +287 +00:22:11,890 --> 00:22:15,230 +strength of the relationship, but you can know the + +288 +00:22:15,230 --> 00:22:20,990 +form or the direction. But to measure the exact + +289 +00:22:20,990 --> 00:22:24,130 +strength, you have to measure the correlation + +290 +00:22:24,130 --> 00:22:29,810 +coefficient, R. Now, by looking at the data, you + +291 +00:22:29,810 --> 00:22:31,430 +can compute + +292 +00:22:33,850 --> 00:22:42,470 +The sum of x values, y values, sum of x squared, + +293 +00:22:43,290 --> 00:22:48,170 +sum of y squared, also sum of xy. Now plug these + +294 +00:22:48,170 --> 00:22:50,610 +values into the formula we have for the shortcut + +295 +00:22:50,610 --> 00:22:58,210 +formula. You will get R to be 0.76 around 76. + +296 +00:23:04,050 --> 00:23:10,170 +So there exists positive, moderate relationship + +297 +00:23:10,170 --> 00:23:13,770 +between selling + +298 +00:23:13,770 --> 00:23:19,850 +price of a home and its size. So that means if the + +299 +00:23:19,850 --> 00:23:24,670 +size increases, the selling price also increases. + +300 +00:23:25,310 --> 00:23:29,550 +So there exists positive relationship between the + +301 +00:23:29,550 --> 00:23:30,310 +two variables. + +302 +00:23:35,800 --> 00:23:40,300 +Strong it means close to 1, 0.8, 0.85, 0.9, you + +303 +00:23:40,300 --> 00:23:44,400 +can say there exists strong. But fields is not + +304 +00:23:44,400 --> 00:23:47,960 +strong relationship, you can say it's moderate + +305 +00:23:47,960 --> 00:23:53,440 +relationship. Because it's close if now if you + +306 +00:23:53,440 --> 00:23:57,080 +just compare this value and other data gives 9%. + +307 +00:23:58,830 --> 00:24:03,790 +Other one gives 85%. So these values are much + +308 +00:24:03,790 --> 00:24:08,550 +closer to 1 than 0.7, but still this value is + +309 +00:24:08,550 --> 00:24:09,570 +considered to be high. + +310 +00:24:15,710 --> 00:24:16,810 +Any question? + +311 +00:24:19,850 --> 00:24:22,810 +Next, I will give some introduction to regression + +312 +00:24:22,810 --> 00:24:23,390 +analysis. + +313 +00:24:26,970 --> 00:24:32,210 +regression analysis used to number one, predict + +314 +00:24:32,210 --> 00:24:35,050 +the value of a dependent variable based on the + +315 +00:24:35,050 --> 00:24:39,250 +value of at least one independent variable. So by + +316 +00:24:39,250 --> 00:24:42,490 +using the data we have for selling price of a home + +317 +00:24:42,490 --> 00:24:48,370 +and size, you can predict the selling price by + +318 +00:24:48,370 --> 00:24:51,510 +knowing the value of its size. So suppose for + +319 +00:24:51,510 --> 00:24:54,870 +example, You know that the size of a house is + +320 +00:24:54,870 --> 00:25:03,510 +1450, 1450 square feet. What do you predict its + +321 +00:25:03,510 --> 00:25:10,190 +size, its sale or price? So by using this value, + +322 +00:25:10,310 --> 00:25:16,510 +we can predict the selling price. Next, explain + +323 +00:25:16,510 --> 00:25:19,890 +the impact of changes in independent variable on + +324 +00:25:19,890 --> 00:25:23,270 +the dependent variable. You can say, for example, + +325 +00:25:23,510 --> 00:25:30,650 +90% of the variability in the dependent variable + +326 +00:25:30,650 --> 00:25:36,790 +in selling price is explained by its size. So we + +327 +00:25:36,790 --> 00:25:39,410 +can predict the value of dependent variable based + +328 +00:25:39,410 --> 00:25:42,890 +on a value of one independent variable at least. + +329 +00:25:43,870 --> 00:25:47,090 +Or also explain the impact of changes in + +330 +00:25:47,090 --> 00:25:49,550 +independent variable on the dependent variable. + +331 +00:25:51,420 --> 00:25:53,920 +Sometimes there exists more than one independent + +332 +00:25:53,920 --> 00:25:59,680 +variable. For example, maybe there are more than + +333 +00:25:59,680 --> 00:26:04,500 +one variable that affects a price, a selling + +334 +00:26:04,500 --> 00:26:10,300 +price. For example, beside selling + +335 +00:26:10,300 --> 00:26:16,280 +price, beside size, maybe location. + +336 +00:26:19,480 --> 00:26:23,580 +Maybe location is also another factor that affects + +337 +00:26:23,580 --> 00:26:27,360 +the selling price. So in this case there are two + +338 +00:26:27,360 --> 00:26:32,240 +variables. If there exists more than one variable, + +339 +00:26:32,640 --> 00:26:36,080 +in this case we have something called multiple + +340 +00:26:36,080 --> 00:26:38,680 +linear regression. + +341 +00:26:42,030 --> 00:26:46,710 +Here, we just talk about one independent variable. + +342 +00:26:47,030 --> 00:26:51,610 +There is only, in this chapter, there is only one + +343 +00:26:51,610 --> 00:26:58,330 +x. So it's called simple linear + +344 +00:26:58,330 --> 00:26:59,330 +regression. + +345 +00:27:02,190 --> 00:27:07,930 +The calculations for multiple takes time. So we + +346 +00:27:07,930 --> 00:27:11,430 +are going just to cover one independent variable. + +347 +00:27:11,930 --> 00:27:14,290 +But if there exists more than one, in this case + +348 +00:27:14,290 --> 00:27:18,250 +you have to use some statistical software as SPSS. + +349 +00:27:18,470 --> 00:27:23,390 +Because in that case you can just select a + +350 +00:27:23,390 --> 00:27:25,970 +regression analysis from SPSS, then you can run + +351 +00:27:25,970 --> 00:27:28,590 +the multiple regression without doing any + +352 +00:27:28,590 --> 00:27:34,190 +computations. But here we just covered one + +353 +00:27:34,190 --> 00:27:36,820 +independent variable. In this case, it's called + +354 +00:27:36,820 --> 00:27:41,980 +simple linear regression. Again, the dependent + +355 +00:27:41,980 --> 00:27:44,600 +variable is the variable we wish to predict or + +356 +00:27:44,600 --> 00:27:50,020 +explain, the same as weight. Independent variable, + +357 +00:27:50,180 --> 00:27:52,440 +the variable used to predict or explain the + +358 +00:27:52,440 --> 00:27:54,000 +dependent variable. + +359 +00:27:57,400 --> 00:28:00,540 +For simple linear regression model, there is only + +360 +00:28:00,540 --> 00:28:01,800 +one independent variable. + +361 +00:28:04,830 --> 00:28:08,450 +Another example for simple linear regression. + +362 +00:28:08,770 --> 00:28:11,590 +Suppose we are talking about your scores. + +363 +00:28:14,210 --> 00:28:17,770 +Scores is the dependent variable can be affected + +364 +00:28:17,770 --> 00:28:21,050 +by number of hours. + +365 +00:28:25,130 --> 00:28:31,030 +Hour of study. Number of studying hours. + +366 +00:28:36,910 --> 00:28:39,810 +Maybe as number of studying hour increases, your + +367 +00:28:39,810 --> 00:28:43,390 +scores also increase. In this case, if there is + +368 +00:28:43,390 --> 00:28:46,330 +only one X, one independent variable, it's called + +369 +00:28:46,330 --> 00:28:51,110 +simple linear regression. Maybe another variable, + +370 +00:28:52,270 --> 00:28:59,730 +number of missing classes or + +371 +00:28:59,730 --> 00:29:03,160 +attendance. As number of missing classes + +372 +00:29:03,160 --> 00:29:06,380 +increases, your score goes down. That means there + +373 +00:29:06,380 --> 00:29:09,400 +exists negative relationship between missing + +374 +00:29:09,400 --> 00:29:13,540 +classes and your score. So sometimes, maybe there + +375 +00:29:13,540 --> 00:29:16,580 +exists positive or negative. It depends on the + +376 +00:29:16,580 --> 00:29:20,040 +variable itself. In this case, if there are more + +377 +00:29:20,040 --> 00:29:23,180 +than one variable, then we are talking about + +378 +00:29:23,180 --> 00:29:28,300 +multiple linear regression model. But here, we + +379 +00:29:28,300 --> 00:29:33,630 +have only one independent variable. In addition to + +380 +00:29:33,630 --> 00:29:37,230 +that, a relationship between x and y is described + +381 +00:29:37,230 --> 00:29:40,850 +by a linear function. So there exists a straight + +382 +00:29:40,850 --> 00:29:46,270 +line between the two variables. The changes in y + +383 +00:29:46,270 --> 00:29:50,210 +are assumed to be related to changes in x only. So + +384 +00:29:50,210 --> 00:29:54,270 +any change in y is related only to changes in x. + +385 +00:29:54,730 --> 00:29:57,810 +So that's the simple case we have for regression, + +386 +00:29:58,890 --> 00:30:01,170 +that we have only one independent + +387 +00:30:03,890 --> 00:30:07,070 +Variable. Types of relationships, as we mentioned, + +388 +00:30:07,210 --> 00:30:12,190 +maybe there exist linear, it means there exist + +389 +00:30:12,190 --> 00:30:16,490 +straight line between X and Y, either linear + +390 +00:30:16,490 --> 00:30:22,050 +positive or negative, or sometimes there exist non + +391 +00:30:22,050 --> 00:30:25,830 +-linear relationship, it's called curved linear + +392 +00:30:25,830 --> 00:30:29,290 +relationship. The same as this one, it's parabola. + +393 +00:30:32,570 --> 00:30:35,150 +Now in this case there is no linear relationship + +394 +00:30:35,150 --> 00:30:39,690 +but there exists curved linear or something like + +395 +00:30:39,690 --> 00:30:45,910 +this one. So these types of non-linear + +396 +00:30:45,910 --> 00:30:49,530 +relationship between the two variables. Here we + +397 +00:30:49,530 --> 00:30:54,070 +are covering just the linear relationship between + +398 +00:30:54,070 --> 00:30:56,570 +the two variables. So based on the scatter plot + +399 +00:30:56,570 --> 00:31:00,620 +you can determine the direction. The form, the + +400 +00:31:00,620 --> 00:31:03,860 +strength. Here, the form we are talking about is + +401 +00:31:03,860 --> 00:31:04,720 +just linear. + +402 +00:31:08,700 --> 00:31:13,260 +Now, another type of relationship, the strength of + +403 +00:31:13,260 --> 00:31:16,940 +the relationship. Here, the points, either for + +404 +00:31:16,940 --> 00:31:20,570 +this graph or the other one, These points are + +405 +00:31:20,570 --> 00:31:24,570 +close to the straight line, it means there exists + +406 +00:31:24,570 --> 00:31:28,210 +strong positive relationship or strong negative + +407 +00:31:28,210 --> 00:31:31,230 +relationship. So it depends on the direction. So + +408 +00:31:31,230 --> 00:31:35,710 +strong either positive or strong negative. Here + +409 +00:31:35,710 --> 00:31:38,850 +the points are scattered away from the regression + +410 +00:31:38,850 --> 00:31:41,790 +line, so you can say there exists weak + +411 +00:31:41,790 --> 00:31:45,090 +relationship, either weak positive or weak + +412 +00:31:45,090 --> 00:31:49,650 +negative. It depends on the direction of the + +413 +00:31:49,650 --> 00:31:54,270 +relationship between the two variables. Sometimes + +414 +00:31:54,270 --> 00:31:59,680 +there is no relationship or actually there is no + +415 +00:31:59,680 --> 00:32:02,340 +linear relationship between the two variables. If + +416 +00:32:02,340 --> 00:32:05,660 +the points are scattered away from the regression + +417 +00:32:05,660 --> 00:32:09,800 +line, I mean you cannot determine if it is + +418 +00:32:09,800 --> 00:32:13,160 +positive or negative, then there is no + +419 +00:32:13,160 --> 00:32:16,220 +relationship between the two variables, the same + +420 +00:32:16,220 --> 00:32:20,580 +as this one. X increases, Y stays nearly in the + +421 +00:32:20,580 --> 00:32:24,540 +same position, then there exists no relationship + +422 +00:32:24,540 --> 00:32:29,280 +between the two variables. So, a relationship + +423 +00:32:29,280 --> 00:32:32,740 +could be linear or curvilinear. It could be + +424 +00:32:32,740 --> 00:32:37,280 +positive or negative, strong or weak, or sometimes + +425 +00:32:37,280 --> 00:32:41,680 +there is no relationship between the two + +426 +00:32:41,680 --> 00:32:49,200 +variables. Now the question is, how can we write + +427 +00:32:51,250 --> 00:32:55,290 +Or how can we find the best regression line that + +428 +00:32:55,290 --> 00:32:59,570 +fits the data you have? We know the regression is + +429 +00:32:59,570 --> 00:33:06,270 +the straight line equation is given by this one. Y + +430 +00:33:06,270 --> 00:33:20,130 +equals beta 0 plus beta 1x plus epsilon. This can + +431 +00:33:20,130 --> 00:33:21,670 +be pronounced as epsilon. + +432 +00:33:24,790 --> 00:33:29,270 +It's a great letter, the same as alpha, beta, mu, + +433 +00:33:29,570 --> 00:33:35,150 +sigma, and so on. So it's epsilon. I, it means + +434 +00:33:35,150 --> 00:33:39,250 +observation number I. I 1, 2, 3, up to 10, for + +435 +00:33:39,250 --> 00:33:42,710 +example, is the same for selling price of a home. + +436 +00:33:43,030 --> 00:33:46,970 +So I 1, 2, 3, all the way up to the sample size. + +437 +00:33:48,370 --> 00:33:54,830 +Now, Y is your dependent variable. Beta 0 is + +438 +00:33:54,830 --> 00:33:59,810 +population Y intercept. For example, if we have + +439 +00:33:59,810 --> 00:34:00,730 +this scatter plot. + +440 +00:34:04,010 --> 00:34:10,190 +Now, beta 0 is + +441 +00:34:10,190 --> 00:34:15,370 +this one. So this is your beta 0. So this segment + +442 +00:34:15,370 --> 00:34:21,550 +is beta 0. it could be above the x-axis I mean + +443 +00:34:21,550 --> 00:34:34,890 +beta zero could be positive might be negative now + +444 +00:34:34,890 --> 00:34:40,270 +this beta zero fall below the x-axis so beta zero + +445 +00:34:40,270 --> 00:34:43,850 +could be negative or + +446 +00:34:46,490 --> 00:34:49,350 +Maybe the straight line passes through the origin + +447 +00:34:49,350 --> 00:34:56,990 +point. So in this case, beta zero equals zero. So + +448 +00:34:56,990 --> 00:34:59,890 +it could be positive and negative or equal zero, + +449 +00:35:00,430 --> 00:35:05,510 +but still we have positive relationship. That + +450 +00:35:05,510 --> 00:35:09,970 +means The value of beta zero, the sign of beta + +451 +00:35:09,970 --> 00:35:13,310 +zero does not affect the relationship between Y + +452 +00:35:13,310 --> 00:35:17,850 +and X. Because here in the three cases, there + +453 +00:35:17,850 --> 00:35:22,390 +exists positive relationship, but beta zero could + +454 +00:35:22,390 --> 00:35:25,370 +be positive or negative or equal zero, but still + +455 +00:35:25,370 --> 00:35:31,720 +we have positive relationship. I mean, you cannot + +456 +00:35:31,720 --> 00:35:35,060 +determine by looking at beta 0, you cannot + +457 +00:35:35,060 --> 00:35:37,940 +determine if there is a positive or negative + +458 +00:35:37,940 --> 00:35:41,720 +relationship. The other term is beta 1. Beta 1 is + +459 +00:35:41,720 --> 00:35:46,900 +the population slope coefficient. Now, the sign of + +460 +00:35:46,900 --> 00:35:50,010 +the slope determines the direction of the + +461 +00:35:50,010 --> 00:35:54,090 +relationship. That means if the slope has positive + +462 +00:35:54,090 --> 00:35:56,570 +sign, it means there exists positive relationship. + +463 +00:35:57,330 --> 00:35:59,370 +Otherwise if it is negative, then there is + +464 +00:35:59,370 --> 00:36:01,390 +negative relationship between the two variables. + +465 +00:36:02,130 --> 00:36:05,310 +So the sign of the slope determines the direction. + +466 +00:36:06,090 --> 00:36:11,290 +But the sign of beta zero has no meaning about the + +467 +00:36:11,290 --> 00:36:15,470 +relationship between Y and X. X is your + +468 +00:36:15,470 --> 00:36:19,630 +independent variable, Y is your independent + +469 +00:36:19,630 --> 00:36:19,630 +variable, Y is your independent variable, Y is + +470 +00:36:19,630 --> 00:36:19,650 +your independent variable, Y is your independent + +471 +00:36:19,650 --> 00:36:21,250 +variable, Y is your independent variable, Y is + +472 +00:36:21,250 --> 00:36:21,250 +your independent variable, Y is your independent + +473 +00:36:21,250 --> 00:36:21,250 +variable, Y is your independent variable, Y is + +474 +00:36:21,250 --> 00:36:21,250 +your independent variable, Y is your independent + +475 +00:36:21,250 --> 00:36:24,370 +variable, Y is your independent variable, Y is + +476 +00:36:24,370 --> 00:36:24,370 +your independent variable, Y is your independent + +477 +00:36:24,370 --> 00:36:24,370 +variable, Y is your independent variable, Y is + +478 +00:36:24,370 --> 00:36:24,370 +your independent variable, Y is your independent + +479 +00:36:24,370 --> 00:36:24,370 +variable, Y is your independent variable, Y is + +480 +00:36:24,370 --> 00:36:24,370 +your independent variable, Y is your independent + +481 +00:36:24,370 --> 00:36:24,370 +variable, Y is your independent variable, Y is + +482 +00:36:24,370 --> 00:36:24,370 +your independent variable, Y is your independent + +483 +00:36:24,370 --> 00:36:24,370 +variable, Y is your independent variable, Y is + +484 +00:36:24,370 --> 00:36:24,370 +your independent variable, Y is your independent + +485 +00:36:24,370 --> 00:36:24,370 +variable, Y is your independent variable, Y is + +486 +00:36:24,370 --> 00:36:24,370 +your independent variable, Y is your independent + +487 +00:36:24,370 --> 00:36:24,430 +variable, Y is your independent variable, Y is + +488 +00:36:24,430 --> 00:36:24,770 +your independent variable, Y is your independent + +489 +00:36:24,770 --> 00:36:27,490 +variable, Y is your independent variable, Y is + +490 +00:36:27,490 --> 00:36:30,110 +your independent variable, Y is your It means + +491 +00:36:30,110 --> 00:36:32,450 +there are some errors you don't know about it + +492 +00:36:32,450 --> 00:36:36,130 +because you ignore some other variables that may + +493 +00:36:36,130 --> 00:36:39,410 +affect the selling price. Maybe you select a + +494 +00:36:39,410 --> 00:36:42,490 +random sample, that sample is small. Maybe there + +495 +00:36:42,490 --> 00:36:46,270 +is a random, I'm sorry, there is sampling error. + +496 +00:36:47,070 --> 00:36:52,980 +So all of these are called random error term. So + +497 +00:36:52,980 --> 00:36:57,420 +all of them are in this term. So epsilon I means + +498 +00:36:57,420 --> 00:37:00,340 +something you don't include in your regression + +499 +00:37:00,340 --> 00:37:03,280 +modeling. For example, you don't include all the + +500 +00:37:03,280 --> 00:37:06,180 +independent variables that affect Y, or your + +501 +00:37:06,180 --> 00:37:09,700 +sample size is not large enough. So all of these + +502 +00:37:09,700 --> 00:37:14,260 +measured in random error term. So epsilon I is + +503 +00:37:14,260 --> 00:37:18,840 +random error component, beta 0 plus beta 1X is + +504 +00:37:18,840 --> 00:37:25,070 +called linear component. So that's the simple + +505 +00:37:25,070 --> 00:37:31,430 +linear regression model. Now, the data you have, + +506 +00:37:32,850 --> 00:37:38,210 +the blue circles represent the observed value. So + +507 +00:37:38,210 --> 00:37:47,410 +these blue circles are the observed values. So we + +508 +00:37:47,410 --> 00:37:49,370 +have observed. + +509 +00:37:52,980 --> 00:37:57,940 +Y observed value of Y for each value X. The + +510 +00:37:57,940 --> 00:38:03,360 +regression line is the blue, the red one. It's + +511 +00:38:03,360 --> 00:38:07,560 +called the predicted values. Predicted Y. + +512 +00:38:08,180 --> 00:38:14,760 +Predicted Y is denoted always by Y hat. Now the + +513 +00:38:14,760 --> 00:38:19,740 +difference between Y and Y hat. It's called the + +514 +00:38:19,740 --> 00:38:20,200 +error term. + +515 +00:38:24,680 --> 00:38:28,000 +It's actually the difference between the observed + +516 +00:38:28,000 --> 00:38:31,600 +value and its predicted value. Now, the predicted + +517 +00:38:31,600 --> 00:38:34,720 +value can be determined by using the regression + +518 +00:38:34,720 --> 00:38:39,180 +line. So this line is the predicted value of Y for + +519 +00:38:39,180 --> 00:38:44,480 +XR. Again, beta zero is the intercept. As we + +520 +00:38:44,480 --> 00:38:46,260 +mentioned before, it could be positive or negative + +521 +00:38:46,260 --> 00:38:52,600 +or even equal zero. The slope is changing Y. + +522 +00:38:55,140 --> 00:38:57,580 +Divide by change of x. + +523 +00:39:01,840 --> 00:39:07,140 +So these are the components for the simple linear + +524 +00:39:07,140 --> 00:39:10,840 +regression model. Y again represents the + +525 +00:39:10,840 --> 00:39:14,960 +independent variable. Beta 0 y intercept. Beta 1 + +526 +00:39:14,960 --> 00:39:17,960 +is your slope. And the slope determines the + +527 +00:39:17,960 --> 00:39:20,900 +direction of the relationship. X independent + +528 +00:39:20,900 --> 00:39:25,270 +variable epsilon i is the random error term. Any + +529 +00:39:25,270 --> 00:39:25,650 +question? + +530 +00:39:31,750 --> 00:39:36,610 +The relationship may be positive or negative. It + +531 +00:39:36,610 --> 00:39:37,190 +could be negative. + +532 +00:39:40,950 --> 00:39:42,710 +Now, for negative relationship, + +533 +00:39:57,000 --> 00:40:04,460 +Or negative, where beta zero is negative. + +534 +00:40:04,520 --> 00:40:08,700 +Or beta + +535 +00:40:08,700 --> 00:40:09,740 +zero equals zero. + +536 +00:40:16,680 --> 00:40:20,620 +So here there exists negative relationship, but + +537 +00:40:20,620 --> 00:40:22,060 +beta zero may be positive. + +538 +00:40:25,870 --> 00:40:30,210 +So again, the sign of beta 0 also does not affect + +539 +00:40:30,210 --> 00:40:31,990 +the relationship between the two variables. + +540 +00:40:36,230 --> 00:40:40,590 +Now, we don't actually know the values of beta 0 + +541 +00:40:40,590 --> 00:40:44,510 +and beta 1. We are going to estimate these values + +542 +00:40:44,510 --> 00:40:48,110 +from the sample we have. So the simple linear + +543 +00:40:48,110 --> 00:40:50,970 +regression equation provides an estimate of the + +544 +00:40:50,970 --> 00:40:55,270 +population regression line. So here we have Yi hat + +545 +00:40:55,270 --> 00:41:00,010 +is the estimated or predicted Y value for + +546 +00:41:00,010 --> 00:41:00,850 +observation I. + +547 +00:41:03,530 --> 00:41:08,220 +The estimate of the regression intercept P0. The + +548 +00:41:08,220 --> 00:41:11,360 +estimate of the regression slope is b1, and this + +549 +00:41:11,360 --> 00:41:16,680 +is your x, all independent variable. So here is + +550 +00:41:16,680 --> 00:41:20,340 +the regression equation. Simple linear regression + +551 +00:41:20,340 --> 00:41:24,400 +equation is given by y hat, the predicted value of + +552 +00:41:24,400 --> 00:41:29,380 +y equals b0 plus b1 times x1. + +553 +00:41:31,240 --> 00:41:35,960 +Now these coefficients, b0 and b1 can be computed + +554 +00:41:37,900 --> 00:41:43,040 +by the following equations. So the regression + +555 +00:41:43,040 --> 00:41:52,920 +equation is + +556 +00:41:52,920 --> 00:41:57,260 +given by y hat equals b0 plus b1x. + +557 +00:41:59,940 --> 00:42:06,140 +Now the slope, b1, is r times standard deviation + +558 +00:42:06,140 --> 00:42:10,540 +of y Times standard deviation of x. This is the + +559 +00:42:10,540 --> 00:42:13,820 +simplest equation to determine the value of the + +560 +00:42:13,820 --> 00:42:18,980 +star. B1r, r is the correlation coefficient. Sy is + +561 +00:42:18,980 --> 00:42:25,080 +xr, the standard deviations of y and x. Where b0, + +562 +00:42:25,520 --> 00:42:30,880 +which is y intercept, is y bar minus b x bar, or + +563 +00:42:30,880 --> 00:42:38,100 +b1 x bar. Sx, as we know, is the sum of x minus y + +564 +00:42:38,100 --> 00:42:40,460 +squared divided by n minus 1 under square root, + +565 +00:42:40,900 --> 00:42:47,060 +similarly for y values. So this, how can we, these + +566 +00:42:47,060 --> 00:42:52,380 +formulas compute the values of b0 and b1. So we + +567 +00:42:52,380 --> 00:42:54,600 +are going to use these equations in order to + +568 +00:42:54,600 --> 00:42:58,960 +determine the values of b0 and b1. + +569 +00:43:04,670 --> 00:43:07,710 +Now, what's your interpretation about the slope + +570 +00:43:07,710 --> 00:43:13,130 +and the intercept? For example, suppose we are + +571 +00:43:13,130 --> 00:43:18,610 +talking about your score Y and + +572 +00:43:18,610 --> 00:43:22,110 +X number of missing classes. + +573 +00:43:29,210 --> 00:43:35,460 +And suppose, for example, Y hat Equal 95 minus 5x. + +574 +00:43:37,780 --> 00:43:41,420 +Now let's see what's the interpretation of B0. + +575 +00:43:42,300 --> 00:43:45,060 +This is B0. So B0 is 95. + +576 +00:43:47,660 --> 00:43:51,960 +And B1 is 5. Now what's your interpretation about + +577 +00:43:51,960 --> 00:43:57,740 +B0 and B1? B0 is the estimated mean value of Y + +578 +00:43:57,740 --> 00:44:02,560 +when the value of X is 0. that means if the + +579 +00:44:02,560 --> 00:44:08,500 +student does not miss any class that means x + +580 +00:44:08,500 --> 00:44:13,260 +equals zero in this case we predict or we estimate + +581 +00:44:13,260 --> 00:44:19,880 +the mean value of his score or her score is 95 so + +582 +00:44:19,880 --> 00:44:27,500 +95 it means when x is zero if x is zero then we + +583 +00:44:27,500 --> 00:44:35,350 +expect his or Here, the score is 95. So that means + +584 +00:44:35,350 --> 00:44:39,830 +B0 is the estimated mean value of Y when the value + +585 +00:44:39,830 --> 00:44:40,630 +of X is 0. + +586 +00:44:43,370 --> 00:44:46,590 +Now, what's the meaning of the slope? The slope in + +587 +00:44:46,590 --> 00:44:51,290 +this case is negative Y. B1, which is the slope, + +588 +00:44:51,590 --> 00:44:57,610 +is the estimated change in the mean of Y. as a + +589 +00:44:57,610 --> 00:45:03,050 +result of a one unit change in x for example let's + +590 +00:45:03,050 --> 00:45:07,070 +compute y for different values of x suppose x is + +591 +00:45:07,070 --> 00:45:15,510 +one now we predict his score to be 95 minus 5 + +592 +00:45:15,510 --> 00:45:25,470 +times 1 which is 90 when x is 2 for example Y hat + +593 +00:45:25,470 --> 00:45:28,570 +is 95 minus 5 times 2, so that's 85. + +594 +00:45:31,950 --> 00:45:39,970 +So for each one unit, there is a drop by five + +595 +00:45:39,970 --> 00:45:43,750 +units in his score. That means if number of + +596 +00:45:43,750 --> 00:45:47,550 +missing classes increases by one unit, then his or + +597 +00:45:47,550 --> 00:45:51,790 +her weight is expected to be reduced by five units + +598 +00:45:51,790 --> 00:45:56,150 +because the sign is negative. another example + +599 +00:45:56,150 --> 00:46:05,910 +suppose again we are interested in whales and + +600 +00:46:05,910 --> 00:46:16,170 +angels and imagine that just + +601 +00:46:16,170 --> 00:46:21,670 +for example y equal y hat equals three plus four x + +602 +00:46:21,670 --> 00:46:29,830 +now y hat equals 3 if x equals zero. That has no + +603 +00:46:29,830 --> 00:46:34,510 +meaning because you cannot say age of zero. So + +604 +00:46:34,510 --> 00:46:40,450 +sometimes the meaning of y intercept does not make + +605 +00:46:40,450 --> 00:46:46,150 +sense because you cannot say x equals zero. Now + +606 +00:46:46,150 --> 00:46:50,690 +for the stock of four, that means as his or her + +607 +00:46:50,690 --> 00:46:55,550 +weight increases by one year, Then we expect his + +608 +00:46:55,550 --> 00:47:00,470 +weight to increase by four kilograms. So as one + +609 +00:47:00,470 --> 00:47:05,130 +unit increase in x, y is our, his weight is + +610 +00:47:05,130 --> 00:47:10,150 +expected to increase by four units. So again, + +611 +00:47:10,370 --> 00:47:16,950 +sometimes we can interpret the y intercept, but in + +612 +00:47:16,950 --> 00:47:18,670 +some cases it has no meaning. + +613 +00:47:24,970 --> 00:47:27,190 +Now for the previous example, for the selling + +614 +00:47:27,190 --> 00:47:32,930 +price of a home and its size, B1rSy divided by Sx, + +615 +00:47:33,790 --> 00:47:43,550 +r is computed, r is found to be 76%, 76%Sy divided + +616 +00:47:43,550 --> 00:47:49,990 +by Sx, that will give 0.109. B0y bar minus B1x + +617 +00:47:49,990 --> 00:47:50,670 +bar, + +618 +00:47:53,610 --> 00:48:00,150 +Y bar for this data is 286 minus D1. So we have to + +619 +00:48:00,150 --> 00:48:03,490 +compute first D1 because we use it in order to + +620 +00:48:03,490 --> 00:48:08,590 +determine D0. And calculation gives 98. So that + +621 +00:48:08,590 --> 00:48:16,450 +means based on these equations, Y hat equals 0 + +622 +00:48:16,450 --> 00:48:22,990 +.10977 plus 98.248. + +623 +00:48:24,790 --> 00:48:29,370 +times X. X is the size. + +624 +00:48:32,890 --> 00:48:39,830 +0.1 B1 + +625 +00:48:39,830 --> 00:48:45,310 +is + +626 +00:48:45,310 --> 00:48:56,650 +0.1, B0 is 98, so 98.248 plus B1. So this is your + +627 +00:48:56,650 --> 00:49:03,730 +regression equation. So again, the intercept is + +628 +00:49:03,730 --> 00:49:09,750 +98. So this amount, the segment is 98. Now the + +629 +00:49:09,750 --> 00:49:14,790 +slope is 0.109. So house price, the expected value + +630 +00:49:14,790 --> 00:49:21,270 +of house price equals B098 plus 0.109 square feet. + +631 +00:49:23,150 --> 00:49:27,630 +So that's the prediction line for the house price. + +632 +00:49:28,510 --> 00:49:34,370 +So again, house price equal B0 98 plus 0.10977 + +633 +00:49:34,370 --> 00:49:36,930 +times square root. Now, what's your interpretation + +634 +00:49:36,930 --> 00:49:41,950 +about B0 and B1? B0 is the estimated mean value of + +635 +00:49:41,950 --> 00:49:46,430 +Y when the value of X is 0. So if X is 0, this + +636 +00:49:46,430 --> 00:49:52,980 +range of X observed X values and you have a home + +637 +00:49:52,980 --> 00:49:57,860 +or a house of size zero. So that means this value + +638 +00:49:57,860 --> 00:50:02,680 +has no meaning. Because a house cannot have a + +639 +00:50:02,680 --> 00:50:06,400 +square footage of zero. So B0 has no practical + +640 +00:50:06,400 --> 00:50:10,040 +application in this case. So sometimes it makes + +641 +00:50:10,040 --> 00:50:17,620 +sense, in other cases it doesn't have that. So for + +642 +00:50:17,620 --> 00:50:21,790 +this specific example, B0 has no practical + +643 +00:50:21,790 --> 00:50:28,210 +application in this case. But B1 which is 0.1097, + +644 +00:50:28,930 --> 00:50:33,050 +B1 estimates the change in the mean value of Y as + +645 +00:50:33,050 --> 00:50:36,730 +a result of one unit increasing X. So for this + +646 +00:50:36,730 --> 00:50:41,640 +value which is 0.109, it means This fellow tells + +647 +00:50:41,640 --> 00:50:46,420 +us that the mean value of a house can increase by + +648 +00:50:46,420 --> 00:50:52,280 +this amount, increase by 0.1097, but we have to + +649 +00:50:52,280 --> 00:50:55,700 +multiply this value by a thousand because the data + +650 +00:50:55,700 --> 00:51:01,280 +was in thousand dollars, so around 109, on average + +651 +00:51:01,280 --> 00:51:05,160 +for each additional one square foot of a size. So + +652 +00:51:05,160 --> 00:51:09,990 +that means if a house So if house size increased + +653 +00:51:09,990 --> 00:51:14,630 +by one square foot, then the price increased by + +654 +00:51:14,630 --> 00:51:19,530 +around 109 dollars. So for each one unit increased + +655 +00:51:19,530 --> 00:51:22,990 +in the size, the selling price of a home increased + +656 +00:51:22,990 --> 00:51:29,590 +by 109. So that means if the size increased by + +657 +00:51:29,590 --> 00:51:35,860 +tenth, It means the selling price increased by + +658 +00:51:35,860 --> 00:51:39,400 +1097 + +659 +00:51:39,400 --> 00:51:46,600 +.7. Make sense? So for each one unit increase in + +660 +00:51:46,600 --> 00:51:50,300 +its size, the house selling price increased by + +661 +00:51:50,300 --> 00:51:55,540 +109. So we have to multiply this value by the unit + +662 +00:51:55,540 --> 00:52:02,280 +we have. Because Y was 8000 dollars. Here if you + +663 +00:52:02,280 --> 00:52:06,600 +go back to the previous data we have, the data was + +664 +00:52:06,600 --> 00:52:11,120 +house price wasn't thousand dollars, so we have to + +665 +00:52:11,120 --> 00:52:15,840 +multiply the slope by a thousand. + +666 +00:52:19,480 --> 00:52:23,720 +Now we + +667 +00:52:23,720 --> 00:52:30,380 +can use also the regression equation line to make + +668 +00:52:30,380 --> 00:52:35,390 +some prediction. For example, we can predict the + +669 +00:52:35,390 --> 00:52:42,290 +price of a house with 2000 square feet. You just + +670 +00:52:42,290 --> 00:52:43,590 +plug this value. + +671 +00:52:46,310 --> 00:52:52,210 +So we have 98.25 plus 0.109 times 2000. That will + +672 +00:52:52,210 --> 00:53:01,600 +give the house price. for 2,000 square feet. So + +673 +00:53:01,600 --> 00:53:05,920 +that means the predicted price for a house with 2 + +674 +00:53:05,920 --> 00:53:10,180 +,000 square feet is this amount multiplied by 1 + +675 +00:53:10,180 --> 00:53:18,260 +,000. So that will give $317,850. So that's how + +676 +00:53:18,260 --> 00:53:24,240 +can we make predictions for why I mean for house + +677 +00:53:24,240 --> 00:53:29,360 +price at any given value of its size. So for this + +678 +00:53:29,360 --> 00:53:36,020 +data, we have a house with 2000 square feet. So we + +679 +00:53:36,020 --> 00:53:43,180 +predict its price to be around 317,850. + +680 +00:53:44,220 --> 00:53:50,920 +I will stop at coefficient of correlation. I will + +681 +00:53:50,920 --> 00:53:54,190 +stop at coefficient of determination for next time + +682 +00:53:54,190 --> 00:53:57,770 +that's + +683 +00:53:57,770 --> 00:53:57,990 +all + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/I7SEpdLlzFg_postprocess.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/I7SEpdLlzFg_postprocess.srt new file mode 100644 index 0000000000000000000000000000000000000000..218039ba8c28c3a67d9988dcd6763a0b850483fe --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/I7SEpdLlzFg_postprocess.srt @@ -0,0 +1,2804 @@ +1 +00:00:06,760 --> 00:00:10,320 +Eventually I will give some practice problem for + +2 +00:00:10,320 --> 00:00:15,040 +chapter eight. Generally speaking, there are three + +3 +00:00:15,040 --> 00:00:19,800 +types of questions. The first type, multiple + +4 +00:00:19,800 --> 00:00:22,940 +choice, so MCQ questions. + +5 +00:00:36,250 --> 00:00:41,490 +The other type of problems will be true or false. + +6 +00:00:42,890 --> 00:00:54,870 +Part B, Part C, three response problems. + +7 +00:00:56,510 --> 00:01:00,210 +So three types of questions. Multiple choice, we + +8 +00:01:00,210 --> 00:01:03,130 +have four answers. You have to select correct one. + +9 +00:01:06,060 --> 00:01:11,660 +True or false problems. And the last part, free + +10 +00:01:11,660 --> 00:01:14,800 +response problems. Here we'll talk about one of + +11 +00:01:14,800 --> 00:01:20,740 +these. I will cover multiple choice questions as + +12 +00:01:20,740 --> 00:01:24,200 +well as true and false. Let's start with number + +13 +00:01:24,200 --> 00:01:30,200 +one for multiple choice. The width of a confidence + +14 +00:01:30,200 --> 00:01:36,050 +interval estimate for a proportion will be Here we + +15 +00:01:36,050 --> 00:01:39,690 +are talking about the width of a confidence + +16 +00:01:39,690 --> 00:01:40,230 +interval. + +17 +00:01:43,070 --> 00:01:48,770 +Estimates for a proportion will be narrower for 99 + +18 +00:01:48,770 --> 00:01:56,180 +% confidence than for a 9%. For 95 confidence? No, + +19 +00:01:56,280 --> 00:01:59,120 +because as we know that as the confidence level + +20 +00:01:59,120 --> 00:02:03,120 +increases, the width becomes wider. So A is + +21 +00:02:03,120 --> 00:02:10,440 +incorrect. Is this true? B. Wider for sample size + +22 +00:02:10,440 --> 00:02:14,900 +of 100 than for a sample size of 50? False, + +23 +00:02:15,020 --> 00:02:20,400 +because as sample size increases, The sampling + +24 +00:02:20,400 --> 00:02:23,600 +error goes down. That means the width of the + +25 +00:02:23,600 --> 00:02:28,700 +interval becomes smaller and smaller. Yes, for N. + +26 +00:02:30,300 --> 00:02:37,100 +Part C. Normal for 90% confidence, then for 95% + +27 +00:02:37,100 --> 00:02:40,620 +confidence. That's correct. So C is the correct + +28 +00:02:40,620 --> 00:02:43,640 +answer. Part D. Normal when the sampling + +29 +00:02:43,640 --> 00:02:49,100 +proportion is 50%. is incorrect because if we have + +30 +00:02:49,100 --> 00:02:52,920 +smaller than 50%, we'll get smaller confidence, + +31 +00:02:53,320 --> 00:02:56,620 +smaller weight of the confidence. So C is the + +32 +00:02:56,620 --> 00:03:02,720 +correct answer. Any question? So C is the correct + +33 +00:03:02,720 --> 00:03:06,180 +answer because as C level increases, the + +34 +00:03:06,180 --> 00:03:08,760 +confidence interval becomes narrower. + +35 +00:03:11,040 --> 00:03:12,920 +Let's move to the second one. + +36 +00:03:16,540 --> 00:03:19,900 +A 99% confidence interval estimate can be + +37 +00:03:19,900 --> 00:03:23,940 +interpreted to mean that. Let's look at the + +38 +00:03:23,940 --> 00:03:28,040 +interpretation of the 99% confidence interval. + +39 +00:03:28,940 --> 00:03:29,660 +Part eight. + +40 +00:03:33,360 --> 00:03:38,820 +If all possible samples are taken and confidence + +41 +00:03:38,820 --> 00:03:43,070 +interval estimates are developed, 99% of them + +42 +00:03:43,070 --> 00:03:45,750 +would include the true population mean somewhere + +43 +00:03:45,750 --> 00:03:46,790 +within their interval. + +44 +00:03:49,750 --> 00:03:53,210 +Here we are talking about the population mean. It + +45 +00:03:53,210 --> 00:03:57,890 +says that 99% of them of these intervals would + +46 +00:03:57,890 --> 00:04:00,790 +include the true population mean somewhere within + +47 +00:04:00,790 --> 00:04:05,490 +their interval. It's correct. Why false? Why is it + +48 +00:04:05,490 --> 00:04:10,620 +false? This is correct answer, because it's + +49 +00:04:10,620 --> 00:04:15,240 +mentioned that 99% of these confidence intervals + +50 +00:04:15,240 --> 00:04:19,600 +will contain the true population mean somewhere + +51 +00:04:19,600 --> 00:04:22,900 +within their interval. So A is correct. Let's look + +52 +00:04:22,900 --> 00:04:30,880 +at B. B says we have 99% confidence that we have + +53 +00:04:30,880 --> 00:04:34,160 +selected a sample whose interval does include the + +54 +00:04:34,160 --> 00:04:39,160 +population mean. Also, this one is correct. Again, + +55 +00:04:39,300 --> 00:04:42,540 +it's mentioned that 99% confidence that we have + +56 +00:04:42,540 --> 00:04:47,080 +selected sample whose interval does include. So + +57 +00:04:47,080 --> 00:04:52,600 +it's correct. So C is both of the above and D none + +58 +00:04:52,600 --> 00:04:54,840 +of the above. So C is the correct answer. So + +59 +00:04:54,840 --> 00:04:59,080 +sometimes maybe there is only one answer. Maybe in + +60 +00:04:59,080 --> 00:05:03,360 +other problems, it might be two answers are + +61 +00:05:03,360 --> 00:05:07,150 +correct. So for this one, B and C. I'm sorry, A + +62 +00:05:07,150 --> 00:05:11,790 +and B are correct, so C is the correct answer. + +63 +00:05:14,270 --> 00:05:17,530 +Number three, which of the following is not true + +64 +00:05:17,530 --> 00:05:20,610 +about the student's T distribution? Here, we are + +65 +00:05:20,610 --> 00:05:25,550 +talking about the not true statement about the + +66 +00:05:25,550 --> 00:05:31,110 +student T distribution, A. It has more data in the + +67 +00:05:31,110 --> 00:05:35,780 +tails. and less in the center than does the normal + +68 +00:05:35,780 --> 00:05:38,580 +distribution. That's correct because we mentioned + +69 +00:05:38,580 --> 00:05:44,160 +last time that T distribution, the tail is fatter + +70 +00:05:44,160 --> 00:05:48,000 +than the Z normal. So that means it has more data + +71 +00:05:48,000 --> 00:05:52,100 +in the tails and less data in the center. So + +72 +00:05:52,100 --> 00:05:52,720 +that's correct. + +73 +00:05:58,000 --> 00:06:01,020 +It is used to construct confidence intervals for + +74 +00:06:01,020 --> 00:06:03,220 +the population mean when the population standard + +75 +00:06:03,220 --> 00:06:07,400 +deviation is known. No, we use z instead of t, so + +76 +00:06:07,400 --> 00:06:11,680 +this one is incorrect about t. It is well-shaped + +77 +00:06:11,680 --> 00:06:17,320 +and symmetrical, so that's true, so we are looking + +78 +00:06:17,320 --> 00:06:21,900 +for the incorrect statement. D, as the number of + +79 +00:06:21,900 --> 00:06:23,260 +degrees of freedom increases, + +80 +00:06:25,850 --> 00:06:31,510 +The T distribution approaches the normal. That's + +81 +00:06:31,510 --> 00:06:36,830 +true. So which one? P. So P is incorrect. So + +82 +00:06:36,830 --> 00:06:39,670 +number four. Extra. + +83 +00:06:42,010 --> 00:06:47,090 +Can you explain the average total compensation of + +84 +00:06:47,090 --> 00:06:51,830 +CEOs in the service industry? Data were randomly + +85 +00:06:51,830 --> 00:06:57,480 +collected from 18 CEOs and 19 employees. 97% + +86 +00:06:57,480 --> 00:07:06,040 +confidence interval was calculated to be $281, + +87 +00:07:07,040 --> 00:07:09,020 +$260, + +88 +00:07:10,060 --> 00:07:13,780 +$5836, + +89 +00:07:14,800 --> 00:07:19,300 +and $180. Which of the following interpretations + +90 +00:07:19,300 --> 00:07:27,310 +is correct? Part number A. It says 97% of the + +91 +00:07:27,310 --> 00:07:32,450 +sample data compensation value between these two + +92 +00:07:32,450 --> 00:07:37,310 +values, correct or incorrect statement. Because it + +93 +00:07:37,310 --> 00:07:44,310 +says 97% of the sample data. For the confidence + +94 +00:07:44,310 --> 00:07:48,310 +center value, we are looking for the average, not + +95 +00:07:48,310 --> 00:07:51,690 +for the population, not for the sample. So A is + +96 +00:07:51,690 --> 00:07:55,890 +incorrect. Because A, it says here 97% of the + +97 +00:07:55,890 --> 00:07:58,690 +sampling total. Sample total, we are looking for + +98 +00:07:58,690 --> 00:08:02,390 +the average of the population. So A is incorrect + +99 +00:08:02,390 --> 00:08:09,150 +statement. B, we are 97% confident that the mean + +100 +00:08:09,150 --> 00:08:15,890 +of the sample. So it's false. Because the + +101 +00:08:15,890 --> 00:08:18,470 +confidence about the entire population is about + +102 +00:08:18,470 --> 00:08:24,160 +the population mean. So B is incorrect. C. In the + +103 +00:08:24,160 --> 00:08:27,760 +population of the surface industry, here we have + +104 +00:08:27,760 --> 00:08:33,240 +97% of them will have a total death toll. Also, + +105 +00:08:33,360 --> 00:08:37,480 +this one is incorrect because it mentions in the + +106 +00:08:37,480 --> 00:08:39,900 +population. Here we are talking about total, but + +107 +00:08:39,900 --> 00:08:44,000 +we are looking for the average. Now, part D. We + +108 +00:08:44,000 --> 00:08:47,360 +are 97% confident that the average total + +109 +00:08:50,460 --> 00:08:53,440 +So this one is correct statement. So D is the + +110 +00:08:53,440 --> 00:08:55,840 +correct statement. So for the confidence interval, + +111 +00:08:55,840 --> 00:08:59,040 +we are looking for population, number one. Number + +112 +00:08:59,040 --> 00:09:03,520 +two, the average of that population. So D is the + +113 +00:09:03,520 --> 00:09:07,260 +correct answer. Let's go back to part A. In part + +114 +00:09:07,260 --> 00:09:10,420 +A, it says sample total. So this is incorrect. + +115 +00:09:11,380 --> 00:09:15,140 +Next one. The mean of the sample. We are looking + +116 +00:09:15,140 --> 00:09:17,440 +for the mean of the population. So B is incorrect. + +117 +00:09:18,040 --> 00:09:25,240 +Part C. It mentions here population, but total. So + +118 +00:09:25,240 --> 00:09:30,300 +this one is incorrect. Finally here, we are 97% + +119 +00:09:30,300 --> 00:09:34,680 +confident that the average total. So this one is + +120 +00:09:34,680 --> 00:09:39,360 +true of all. So here we have population and the + +121 +00:09:39,360 --> 00:09:42,100 +average of that population. So it makes sense that + +122 +00:09:42,100 --> 00:09:43,260 +this is the correct answer. + +123 +00:09:46,520 --> 00:09:47,660 +Number five. + +124 +00:09:59,690 --> 00:10:03,290 +Number five, confidence interval. Confidence + +125 +00:10:03,290 --> 00:10:06,610 +interval was used to estimate the proportion of + +126 +00:10:06,610 --> 00:10:10,170 +statistics students that are females. A random + +127 +00:10:10,170 --> 00:10:16,170 +sample of 72 statistics students generated the + +128 +00:10:16,170 --> 00:10:22,970 +following 90% confidence interval, 0.438 + +129 +00:10:22,970 --> 00:10:28,150 +and 0.640. + +130 +00:10:28,510 --> 00:10:32,890 +42, based on the interval above the population + +131 +00:10:32,890 --> 00:10:38,230 +proportion of females equals to 0.6. So here we + +132 +00:10:38,230 --> 00:10:44,310 +have confidence interval for the female proportion + +133 +00:10:44,310 --> 00:10:52,990 +ranges between 0.438 up to 0.642. Based on this + +134 +00:10:52,990 --> 00:10:57,050 +interval. Is the population proportion of females + +135 +00:10:57,050 --> 00:10:58,770 +equal 60%? + +136 +00:11:03,410 --> 00:11:06,690 +So here we have from this point all the way up to + +137 +00:11:06,690 --> 00:11:10,610 +0.6. Is the population proportion of females equal + +138 +00:11:10,610 --> 00:11:16,250 +to 0.6? No. The answer is no, but know what? + +139 +00:11:16,850 --> 00:11:24,320 +Number A. No, and we are 90% sure of it. No, the + +140 +00:11:24,320 --> 00:11:31,200 +proportion is 54.17. See, maybe 60% is a + +141 +00:11:31,200 --> 00:11:33,760 +believable value of population proportion based on + +142 +00:11:33,760 --> 00:11:38,080 +information about it. He said yes, and we are 90% + +143 +00:11:38,080 --> 00:11:44,300 +sure of it. So which one is correct? Farah. Which + +144 +00:11:44,300 --> 00:11:44,900 +one is correct? + +145 +00:11:50,000 --> 00:11:56,760 +B says the proportion is 54. 54 if we take the + +146 +00:11:56,760 --> 00:12:01,120 +average of these two values, the answer is 54. But + +147 +00:12:01,120 --> 00:12:04,960 +the true proportion is not the average of the two + +148 +00:12:04,960 --> 00:12:07,640 +endpoints. + +149 +00:12:08,440 --> 00:12:09,500 +So B is incorrect. + +150 +00:12:12,780 --> 00:12:16,320 +If you look at A, the answer is no. And we + +151 +00:12:16,320 --> 00:12:20,440 +mentioned before that this interval may Or may not + +152 +00:12:20,440 --> 00:12:25,380 +contains the true proportion, so A is incorrect. + +153 +00:12:26,700 --> 00:12:32,640 +Now C, maybe. So C is the correct statement, maybe + +154 +00:12:32,640 --> 00:12:35,820 +60% is believable value of the population + +155 +00:12:35,820 --> 00:12:39,020 +proportion based on the information about. So C is + +156 +00:12:39,020 --> 00:12:44,440 +the correct answer. A6, number six. + +157 +00:12:48,590 --> 00:12:49,550 +Number six. + +158 +00:13:21,280 --> 00:13:23,800 +So up to this point, we have the same information + +159 +00:13:23,800 --> 00:13:27,440 +for the previous problem. Using the information + +160 +00:13:27,440 --> 00:13:31,440 +about what total size sample would be necessary if + +161 +00:13:31,440 --> 00:13:35,460 +we wanted to estimate the true proportion within + +162 +00:13:35,460 --> 00:13:43,620 +minus positive or minus 0.108 using 95% + +163 +00:13:43,620 --> 00:13:46,320 +confidence. Now here we are looking for the sample + +164 +00:13:46,320 --> 00:13:49,160 +size that is required to estimate the true + +165 +00:13:49,160 --> 00:13:53,720 +proportion to be within 8% plus or minus 8% using + +166 +00:13:53,720 --> 00:13:54,720 +95% confidence. + +167 +00:13:58,640 --> 00:14:05,360 +The formula first, n equals z squared c plus one. + +168 +00:14:08,740 --> 00:14:14,240 +We have pi, one minus pi divided by e squared. + +169 +00:14:15,740 --> 00:14:21,120 +Now, pi is not given. So in this case either we + +170 +00:14:21,120 --> 00:14:25,880 +use a sinus sample in order to estimate the sample + +171 +00:14:25,880 --> 00:14:30,400 +proportion, Or use Pi to be 0.5. So in this case + +172 +00:14:30,400 --> 00:14:35,900 +we have to use Pi 1 half. If you remember last + +173 +00:14:35,900 --> 00:14:39,720 +time I gave you this equation. Z alpha over 2 + +174 +00:14:39,720 --> 00:14:44,820 +divided by 2 squared. So we have this equation. + +175 +00:14:45,900 --> 00:14:49,280 +Because Pi is not given, just use Pi to be 1 half. + +176 +00:14:50,060 --> 00:14:54,880 +Or you may use this equation. shortcut formula. In + +177 +00:14:54,880 --> 00:15:02,120 +this case, here we are talking about 95%. So + +178 +00:15:02,120 --> 00:15:07,240 +what's the value of Z? 196. 2 times E. + +179 +00:15:10,100 --> 00:15:17,140 +E is 8%. So 196 divided by 2 times E, the quantity + +180 +00:15:17,140 --> 00:15:25,540 +squared. Now the answer of this problem 150. So + +181 +00:15:25,540 --> 00:15:28,720 +approximately 150. + +182 +00:15:29,160 --> 00:15:33,520 +So 150 is the correct answer. So again, here we + +183 +00:15:33,520 --> 00:15:41,460 +used pi to be 1 half because P is not given. And + +184 +00:15:41,460 --> 00:15:46,580 +simple calculation results in 150 for the sample + +185 +00:15:46,580 --> 00:15:49,820 +size. So P is the correct answer, 7. + +186 +00:15:55,220 --> 00:15:56,000 +Number seven. + +187 +00:16:00,480 --> 00:16:03,820 +Number seven. When determining the sample size + +188 +00:16:03,820 --> 00:16:05,820 +necessarily for estimating the true population + +189 +00:16:05,820 --> 00:16:10,560 +mean, which factor is not considered when sampling + +190 +00:16:10,560 --> 00:16:14,960 +with replacement? Now here, if you remember the + +191 +00:16:14,960 --> 00:16:17,960 +formula for the sample size. + +192 +00:16:38,820 --> 00:16:43,460 +Now, which factor is not considered when sampling + +193 +00:16:43,460 --> 00:16:47,120 +without weight replacement? Now, the population + +194 +00:16:47,120 --> 00:16:51,460 +size, the population size is not in this quantity, + +195 +00:16:51,600 --> 00:16:54,420 +so A is the correct answer. B, the population + +196 +00:16:54,420 --> 00:16:58,820 +standard deviation, sigma is here. C, the level of + +197 +00:16:58,820 --> 00:17:03,090 +confidence desired in the estimate, we have Z. The + +198 +00:17:03,090 --> 00:17:06,470 +allowable or tolerable seminar, we have it here. + +199 +00:17:07,370 --> 00:17:10,770 +So eight is the correct answer. + +200 +00:17:13,290 --> 00:17:13,630 +Eight. + +201 +00:17:20,020 --> 00:17:22,600 +Supposedly, I'm supposed to focus on the companies + +202 +00:17:22,600 --> 00:17:25,640 +that you're working on now. It turns out to be one + +203 +00:17:25,640 --> 00:17:28,360 +of them. I'm not sure if I'm on the right track. + +204 +00:17:28,420 --> 00:17:31,040 +To make more use of it as a reference for the + +205 +00:17:31,040 --> 00:17:31,780 +update. + +206 +00:17:43,820 --> 00:17:47,240 +Now, which of the following will result in a + +207 +00:17:47,240 --> 00:17:50,100 +reduced interval width? So here we are talking + +208 +00:17:50,100 --> 00:17:55,580 +about reducing the width of the interval. Number + +209 +00:17:55,580 --> 00:17:58,120 +one. Here, if you look carefully at this equation, + +210 +00:17:59,040 --> 00:18:08,140 +increase the sample size, the error Z up over 2 + +211 +00:18:08,140 --> 00:18:17,180 +sigma over n. So this is the error state. Now, + +212 +00:18:17,240 --> 00:18:20,360 +based on this equation, if we increase the sample + +213 +00:18:20,360 --> 00:18:25,180 +size, the error will decrease. That means we + +214 +00:18:25,180 --> 00:18:28,360 +reduce the interval with it. So A is the correct + +215 +00:18:28,360 --> 00:18:31,560 +answer. Look at B. Increase the confidence level. + +216 +00:18:34,030 --> 00:18:36,770 +Increasing the confidence level, it means increase + +217 +00:18:36,770 --> 00:18:41,030 +Z, increase E, that means we have wider confidence + +218 +00:18:41,030 --> 00:18:43,910 +interval, so B is incorrect. Increase the + +219 +00:18:43,910 --> 00:18:46,790 +population mean, it doesn't matter actually, so + +220 +00:18:46,790 --> 00:18:50,250 +it's not correct. Increase the sample mean also. + +221 +00:18:50,770 --> 00:18:54,990 +So C and D are incorrect totally, so B is + +222 +00:18:54,990 --> 00:18:57,670 +incorrect, so E is the correct answer. So the + +223 +00:18:57,670 --> 00:19:00,630 +correct answer is A, nine. + +224 +00:19:07,140 --> 00:19:10,500 +A major department store chain is interested in + +225 +00:19:10,500 --> 00:19:13,560 +estimating the average amount each credit and + +226 +00:19:13,560 --> 00:19:16,560 +customers spent on their first visit to the + +227 +00:19:16,560 --> 00:19:21,380 +chain's new store in the mall. 15 credit cards + +228 +00:19:21,380 --> 00:19:26,540 +accounts were randomly sampled and analyzed with + +229 +00:19:26,540 --> 00:19:29,320 +the following results. So here we have this + +230 +00:19:29,320 --> 00:19:34,880 +information about the 15 data points. We have x + +231 +00:19:34,880 --> 00:19:35,220 +bar. + +232 +00:19:38,550 --> 00:19:42,150 +of $50.5. + +233 +00:19:43,470 --> 00:19:47,390 +And S squared, the sample variance is 400. + +234 +00:19:49,890 --> 00:19:52,750 +Construct 95 confidence interval for the average + +235 +00:19:52,750 --> 00:19:55,570 +amount it's credit card customer spent on their + +236 +00:19:55,570 --> 00:20:01,230 +first visit to the chain. It's a new store. It's + +237 +00:20:01,230 --> 00:20:04,310 +in the mall, assuming the amount spent follows a + +238 +00:20:04,310 --> 00:20:05,010 +normal distribution. + +239 +00:20:08,090 --> 00:20:13,150 +In this case, we should use T instead of Z. So the + +240 +00:20:13,150 --> 00:20:16,310 +formula should be X bar plus or minus T, alpha + +241 +00:20:16,310 --> 00:20:17,610 +over 2S over root N. + +242 +00:20:23,110 --> 00:20:29,350 +So X bar is 50.5. T, we should use the T table. In + +243 +00:20:29,350 --> 00:20:34,010 +this case, here we are talking about 95%. + +244 +00:20:36,830 --> 00:20:44,130 +So that means alpha is 5%, alpha over 2, 0, 2, 5. + +245 +00:20:44,770 --> 00:20:48,930 +So now we are looking for 2, 0, 2, 5, and degrees + +246 +00:20:48,930 --> 00:20:55,170 +of freedom. N is 15. It says that 15 credit cards. + +247 +00:20:55,770 --> 00:20:59,110 +So 15 credit cards accounts for random samples. So + +248 +00:20:59,110 --> 00:21:05,470 +N equals 15. So since N is 15, Degrees of freedom + +249 +00:21:05,470 --> 00:21:09,850 +is 14. Now we may use the normal, the T table in + +250 +00:21:09,850 --> 00:21:16,250 +order to find the value of T in + +251 +00:21:16,250 --> 00:21:19,270 +the upper tier actually. So what's the value if + +252 +00:21:19,270 --> 00:21:26,350 +you have the table? So look at degrees of freedom + +253 +00:21:26,350 --> 00:21:33,090 +14 under the probability of 0 to 5. + +254 +00:21:40,190 --> 00:21:45,050 +So again, we are looking for degrees of freedom + +255 +00:21:45,050 --> 00:21:49,170 +equal 14 under 2.5%. + +256 +00:22:04,850 --> 00:22:11,390 +0.5 plus or minus 2 + +257 +00:22:11,390 --> 00:22:18,390 +.1448. S squared is given 400. Take square root of + +258 +00:22:18,390 --> 00:22:25,570 +this quantity 20 over root n over root 15. And the + +259 +00:22:25,570 --> 00:22:30,350 +answer, just simple calculation will give + +260 +00:22:34,250 --> 00:22:38,410 +This result, so D is the correct answer. So the + +261 +00:22:38,410 --> 00:22:45,870 +answer should be 50.5 plus or minus 11.08. So D is + +262 +00:22:45,870 --> 00:22:49,170 +the correct answer. So this one is straightforward + +263 +00:22:49,170 --> 00:22:52,990 +calculation, gives part D to be the correct + +264 +00:22:52,990 --> 00:22:55,750 +answer. Any question? + +265 +00:22:58,510 --> 00:23:00,110 +11, 10? + +266 +00:23:03,110 --> 00:23:07,250 +Private colleges and universities rely on money + +267 +00:23:07,250 --> 00:23:12,730 +contributed by individuals and corporations for + +268 +00:23:12,730 --> 00:23:17,950 +their operating expenses. Much of this money is + +269 +00:23:17,950 --> 00:23:24,090 +put into a fund called an endowment, and the + +270 +00:23:24,090 --> 00:23:27,530 +college spends only the interest earned by the + +271 +00:23:27,530 --> 00:23:33,130 +fund. Now, here we have a recent It says that a + +272 +00:23:33,130 --> 00:23:35,310 +recent survey of eight private colleges in the + +273 +00:23:35,310 --> 00:23:39,450 +United States revealed the following endowment in + +274 +00:23:39,450 --> 00:23:44,350 +millions of dollars. So we have this data. So it's + +275 +00:23:44,350 --> 00:23:50,650 +raw data. Summary statistics yield export to be + +276 +00:23:50,650 --> 00:23:53,010 +180. + +277 +00:23:57,010 --> 00:23:57,850 +So export. + +278 +00:24:07,070 --> 00:24:12,130 +Now if this information is not given, you have to + +279 +00:24:12,130 --> 00:24:15,170 +compute the average and standard deviation by the + +280 +00:24:15,170 --> 00:24:19,860 +equations we know. But here, the mean and standard + +281 +00:24:19,860 --> 00:24:23,200 +deviation are given. So just use this information + +282 +00:24:23,200 --> 00:24:27,480 +anyway. Calculate 95 confidence interval for the + +283 +00:24:27,480 --> 00:24:30,140 +mean endowment of all private colleges in the + +284 +00:24:30,140 --> 00:24:34,520 +United States, assuming a normal distribution for + +285 +00:24:34,520 --> 00:24:39,300 +the endowment. Here we have 95%. + +286 +00:24:39,300 --> 00:24:42,600 +Now + +287 +00:24:42,600 --> 00:24:48,480 +what's the sample size? It says that eight. So N + +288 +00:24:48,480 --> 00:24:53,900 +is eight. So again, simple calculation. So + +289 +00:24:53,900 --> 00:24:59,680 +explore, plus or minus T, S over root N. So use + +290 +00:24:59,680 --> 00:25:04,200 +the same idea for the previous one. And the answer + +291 +00:25:04,200 --> 00:25:10,420 +for number 10 is part D. So D is the correct + +292 +00:25:10,420 --> 00:25:17,380 +answer. So again, For eleven, D is the correct + +293 +00:25:17,380 --> 00:25:22,680 +answer. For ten, D is the correct answer. Next. So + +294 +00:25:22,680 --> 00:25:26,280 +this one is similar to the one we just did. + +295 +00:25:30,660 --> 00:25:31,260 +Eleven. + +296 +00:25:47,140 --> 00:25:51,140 +Here it says that rather than examine the records + +297 +00:25:51,140 --> 00:25:56,220 +of all students, the dean took a random sample of + +298 +00:25:56,220 --> 00:26:01,340 +size 200. So we have large university. Here we + +299 +00:26:01,340 --> 00:26:04,860 +took representative sample of size 200. + +300 +00:26:26,980 --> 00:26:31,900 +How many students would be to be assembled? It + +301 +00:26:31,900 --> 00:26:34,540 +says that if the dean wanted to estimate the + +302 +00:26:34,540 --> 00:26:38,040 +proportion of all students, The saving financial + +303 +00:26:38,040 --> 00:26:46,100 +aid to within 3% with 99% probability. How many + +304 +00:26:46,100 --> 00:26:51,620 +students would need to be sampled? So we have the + +305 +00:26:51,620 --> 00:26:56,920 +formula, if you remember, n equals z y 1 minus y + +306 +00:26:56,920 --> 00:27:00,860 +divided by e. So we have z squared. + +307 +00:27:03,640 --> 00:27:09,200 +Now, y is not given. If Pi is not given, we have + +308 +00:27:09,200 --> 00:27:14,180 +to look at either B or 0.5. Now in this problem, + +309 +00:27:15,000 --> 00:27:18,900 +it says that Dean selected 200 students, and he + +310 +00:27:18,900 --> 00:27:23,800 +finds that out of this number, 118 of them are + +311 +00:27:23,800 --> 00:27:26,480 +receiving financial aid. So based on this + +312 +00:27:26,480 --> 00:27:30,480 +information, we can compute B. So B is x over n. + +313 +00:27:30,700 --> 00:27:34,840 +It's 118 divided by 200. And this one gives? + +314 +00:27:41,090 --> 00:27:46,310 +So in this case, out of 200 students, 118 of them + +315 +00:27:46,310 --> 00:27:49,630 +are receiving financial aid. That means the + +316 +00:27:49,630 --> 00:27:53,730 +proportion, the sample proportion, is 118 divided + +317 +00:27:53,730 --> 00:27:57,690 +by 200, which is 0.59. So we have to use this + +318 +00:27:57,690 --> 00:28:03,830 +information instead of pi. So n equals, + +319 +00:28:08,050 --> 00:28:15,120 +now it's about 99%. 2.85. Exactly, it's one of + +320 +00:28:15,120 --> 00:28:21,380 +these. We have 2.57 and + +321 +00:28:21,380 --> 00:28:30,220 +8. It says 99%. So + +322 +00:28:30,220 --> 00:28:32,720 +here we have 99%. So what's left? + +323 +00:28:42,180 --> 00:28:47,320 +0.5 percent, this area. 0.5 to the right and 0.5 + +324 +00:28:47,320 --> 00:28:52,500 +to the left, so 005. Now if you look at 2.5 under + +325 +00:28:52,500 --> 00:28:57,280 +7, the answer is 0051, the other one 0049. + +326 +00:28:59,840 --> 00:29:04,600 +So either this one or the other value, so 2.57. or + +327 +00:29:04,600 --> 00:29:07,600 +2.58, it's better to take the average of these + +328 +00:29:07,600 --> 00:29:13,320 +two. Because 005 lies exactly between these two + +329 +00:29:13,320 --> 00:29:20,780 +values. So the score in this case, either 2.75 or + +330 +00:29:20,780 --> 00:29:30,880 +2.78, or the average. And the exact one, 2.7, I'm + +331 +00:29:30,880 --> 00:29:34,680 +sorry, 2.576. The exact answer. + +332 +00:29:38,000 --> 00:29:40,700 +It's better to use the average if you don't + +333 +00:29:40,700 --> 00:29:46,100 +remember the exact answer. So it's the exact one. + +334 +00:29:47,480 --> 00:29:53,440 +But 2.575 is okay. Now just use this equation, 2 + +335 +00:29:53,440 --> 00:30:02,020 +.575 times square, times 59. + +336 +00:30:03,900 --> 00:30:09,440 +1 minus 59 divided by the error. It's three + +337 +00:30:09,440 --> 00:30:17,800 +percent. So it's 0.0312 squared. So the answer in + +338 +00:30:17,800 --> 00:30:23,420 +this case is part 2 + +339 +00:30:23,420 --> 00:30:30,300 +.57 times 59 times 41 divided by 03 squared. The + +340 +00:30:30,300 --> 00:30:31,140 +answer is part. + +341 +00:30:41,650 --> 00:30:46,530 +You will get the exact answer if you use 2.576. + +342 +00:30:48,190 --> 00:30:51,230 +You will get the exact answer. But anyway, if you + +343 +00:30:51,230 --> 00:30:53,310 +use one of these, you will get approximate answer + +344 +00:30:53,310 --> 00:30:56,430 +to be 1784. + +345 +00:30:58,590 --> 00:31:04,430 +Any question? So in this case, we used the sample + +346 +00:31:04,430 --> 00:31:11,240 +proportion instead of 0.5, because the dean + +347 +00:31:11,240 --> 00:31:14,120 +selected a random sample of size 200, and he finds + +348 +00:31:14,120 --> 00:31:19,200 +that 118 of them are receiving financial aid. That + +349 +00:31:19,200 --> 00:31:24,980 +means the sample proportion is 118 divided by 200, + +350 +00:31:25,360 --> 00:31:30,420 +which gives 0.59. So we have to use 59% as the + +351 +00:31:30,420 --> 00:31:38,360 +sample proportion. Is it clear? Next, number + +352 +00:31:38,360 --> 00:31:38,760 +three. + +353 +00:31:41,700 --> 00:31:45,860 +An economist is interested in studying the incomes + +354 +00:31:45,860 --> 00:31:51,620 +of consumers in a particular region. The + +355 +00:31:51,620 --> 00:31:56,400 +population standard deviation is known to be 1 + +356 +00:31:56,400 --> 00:32:00,560 +,000. A random sample of 50 individuals resulted + +357 +00:32:00,560 --> 00:32:06,460 +in an average income of $15,000. What is the + +358 +00:32:06,460 --> 00:32:11,520 +weight of the 90% confidence interval? So here in + +359 +00:32:11,520 --> 00:32:16,560 +this example, the population standard deviation + +360 +00:32:16,560 --> 00:32:21,480 +sigma is known. So sigma is $1000. + +361 +00:32:24,600 --> 00:32:32,280 +Random sample of size 50 is selected. This sample + +362 +00:32:32,280 --> 00:32:41,430 +gives an average of $15,000 ask + +363 +00:32:41,430 --> 00:32:48,150 +about what is the width of the 90% confidence + +364 +00:32:48,150 --> 00:32:55,630 +interval. So again, here + +365 +00:32:55,630 --> 00:32:58,710 +we are asking about the width of the confidence + +366 +00:32:58,710 --> 00:33:02,570 +interval. If we have a random sample of size 50, + +367 +00:33:03,320 --> 00:33:07,560 +And that sample gives an average of $15,000. And + +368 +00:33:07,560 --> 00:33:10,940 +we know that the population standard deviation is + +369 +00:33:10,940 --> 00:33:17,580 +1,000. Now, what's the width of the 90% confidence + +370 +00:33:17,580 --> 00:33:21,800 +interval? Any idea of this? + +371 +00:33:33,760 --> 00:33:40,020 +So idea number one is fine. You may calculate the + +372 +00:33:40,020 --> 00:33:43,400 +lower limit and upper limit. And the difference + +373 +00:33:43,400 --> 00:33:46,640 +between these two gives the width. So idea number + +374 +00:33:46,640 --> 00:33:51,360 +one, the width equals the distance between upper + +375 +00:33:51,360 --> 00:33:59,070 +limit our limit minus lower limit. Now this + +376 +00:33:59,070 --> 00:34:03,270 +distance gives a width, that's correct. Let's see. + +377 +00:34:04,710 --> 00:34:07,910 +So in other words, you have to find the confidence + +378 +00:34:07,910 --> 00:34:12,070 +interval by using this equation x bar plus or + +379 +00:34:12,070 --> 00:34:17,070 +minus z sigma over root n, x bar is given. + +380 +00:34:20,190 --> 00:34:28,690 +Now for 90% we know that z equals 1.645 sigma is + +381 +00:34:28,690 --> 00:34:32,670 +1000 divided + +382 +00:34:32,670 --> 00:34:40,850 +by root 50 plus or minus. By calculator, 1000 + +383 +00:34:40,850 --> 00:34:45,010 +times this number divided by root 50 will give + +384 +00:34:45,010 --> 00:34:49,190 +around + +385 +00:34:49,190 --> 00:34:50,730 +232.6. + +386 +00:34:58,290 --> 00:35:05,790 +So the upper limit is this value and lower limit + +387 +00:35:05,790 --> 00:35:09,650 +147671. + +388 +00:35:11,350 --> 00:35:14,250 +So now the upper limit and lower limit are + +389 +00:35:14,250 --> 00:35:18,590 +computed. Now the difference between these two + +390 +00:35:18,590 --> 00:35:24,010 +values will give the weight. If you subtract these + +391 +00:35:24,010 --> 00:35:26,030 +two values, what equals 15,000? + +392 +00:35:30,670 --> 00:35:37,190 +And the answer is 465.13, around. + +393 +00:35:40,050 --> 00:35:45,550 +Maybe I took two minutes to figure the answer, the + +394 +00:35:45,550 --> 00:35:49,350 +right answer. But there is another one, another + +395 +00:35:49,350 --> 00:35:52,790 +idea, maybe shorter. It'll take shorter time. + +396 +00:35:56,890 --> 00:36:00,230 +It's correct, but straightforward. Just compute + +397 +00:36:00,230 --> 00:36:05,790 +the lower and upper limits. And the width is the + +398 +00:36:05,790 --> 00:36:07,190 +difference between these two values. + +399 +00:36:11,370 --> 00:36:16,050 +If you look carefully at this equation, difference + +400 +00:36:16,050 --> 00:36:21,560 +between these two values gives the width. Now + +401 +00:36:21,560 --> 00:36:25,880 +let's imagine that the lower limit equals x bar + +402 +00:36:25,880 --> 00:36:28,920 +minus + +403 +00:36:28,920 --> 00:36:36,340 +the error term. And upper limit is also x bar plus + +404 +00:36:36,340 --> 00:36:37,960 +the error term. + +405 +00:36:41,460 --> 00:36:46,580 +Now if we add this, or if we subtract 2 from 1, + +406 +00:36:47,900 --> 00:36:52,560 +you will get upper limit minus lower limit equals + +407 +00:36:52,560 --> 00:36:55,740 +x + +408 +00:36:55,740 --> 00:37:07,280 +bar cancels with 2x bar. If you subtract, w minus + +409 +00:37:07,280 --> 00:37:10,960 +equals 2e. + +410 +00:37:12,520 --> 00:37:18,060 +Upper limit minus lower limit is the width. So w, + +411 +00:37:18,760 --> 00:37:24,800 +the width is twice the sampling error. So we have + +412 +00:37:24,800 --> 00:37:29,980 +this new information, W equals twice of the margin + +413 +00:37:29,980 --> 00:37:36,400 +of error. If we add 1 and 2, that will give lower + +414 +00:37:36,400 --> 00:37:41,120 +limit plus upper limit equals to x bar. That means + +415 +00:37:41,120 --> 00:37:45,800 +x bar equals lower limit plus upper limit divided + +416 +00:37:45,800 --> 00:37:46,220 +by 2. + +417 +00:37:53,970 --> 00:37:59,790 +the error, and X bar is the average of lower and + +418 +00:37:59,790 --> 00:38:05,310 +upper limits. So by using this idea now, if we + +419 +00:38:05,310 --> 00:38:12,490 +compute the error term, E equals Z sigma over root + +420 +00:38:12,490 --> 00:38:13,630 +N, this quantity. + +421 +00:38:17,350 --> 00:38:25,260 +And again, Z is 1645. Sigma is 1000. Divide by + +422 +00:38:25,260 --> 00:38:34,960 +root 50. This gives 232.6. This is the error tier, + +423 +00:38:36,300 --> 00:38:40,040 +or the margin of error. As we know, that's called + +424 +00:38:40,040 --> 00:38:46,400 +margin of error or sampling error. + +425 +00:38:50,580 --> 00:38:56,190 +So the error is this amount. The width is twice + +426 +00:38:56,190 --> 00:39:04,490 +this value. So W equals 2 times the error. And the + +427 +00:39:04,490 --> 00:39:10,830 +answer should be the same as the one we just... So + +428 +00:39:10,830 --> 00:39:13,450 +we end with the same result. Now which one is + +429 +00:39:13,450 --> 00:39:17,370 +shorter? Forget about my explanation up to this + +430 +00:39:17,370 --> 00:39:22,570 +point. We started from this one. We just computed + +431 +00:39:22,570 --> 00:39:27,390 +the error tier. I mean this amount. Then we found + +432 +00:39:27,390 --> 00:39:32,950 +the error to be this 232 multiply this by 2 will + +433 +00:39:32,950 --> 00:39:35,550 +give the sampling error or the, I'm sorry, will + +434 +00:39:35,550 --> 00:39:39,630 +give the weight of the interval. Now imagine for + +435 +00:39:39,630 --> 00:39:43,370 +this problem, the income, the average income is + +436 +00:39:43,370 --> 00:39:43,830 +not given. + +437 +00:39:47,590 --> 00:39:55,450 +Suppose x bar is not given. Now the question is, + +438 +00:39:55,550 --> 00:40:00,030 +can you find the answer by using this idea? But + +439 +00:40:00,030 --> 00:40:04,390 +here, without using x bar, we computed the + +440 +00:40:04,390 --> 00:40:07,810 +sampling error to multiply this value by 2 and get + +441 +00:40:07,810 --> 00:40:08,130 +the answer. + +442 +00:40:11,810 --> 00:40:13,810 +So that's for number 12. + +443 +00:40:16,830 --> 00:40:20,550 +Again, for this particular example, there are two + +444 +00:40:20,550 --> 00:40:25,610 +ways to solve this problem. The first one, you + +445 +00:40:25,610 --> 00:40:28,390 +have to construct the confidence interval, then + +446 +00:40:28,390 --> 00:40:32,910 +subtract upper limit from the lower limit, you + +447 +00:40:32,910 --> 00:40:38,030 +will get the width of the interval. The other way, + +448 +00:40:38,610 --> 00:40:42,150 +just compute the error and multiply the answer by + +449 +00:40:42,150 --> 00:40:48,210 +2, you will get the same result. Number 13. + +450 +00:40:56,020 --> 00:41:00,980 +13th says that the head librarian at the Library + +451 +00:41:00,980 --> 00:41:04,780 +of Congress has asked her assistant for an + +452 +00:41:04,780 --> 00:41:07,980 +interval estimate of a mean number of books + +453 +00:41:07,980 --> 00:41:12,720 +checked out each day. The assistant provides the + +454 +00:41:12,720 --> 00:41:23,000 +following interval estimate. From 740 to 920 books + +455 +00:41:23,000 --> 00:41:28,360 +per day. If the head librarian knows that the + +456 +00:41:28,360 --> 00:41:33,880 +population standard deviation is 150 books shipped + +457 +00:41:33,880 --> 00:41:37,420 +outwardly, approximately how large a sample did + +458 +00:41:37,420 --> 00:41:40,200 +her assistant use to determine the interval + +459 +00:41:40,200 --> 00:41:46,540 +estimate? So the information we have is the + +460 +00:41:46,540 --> 00:41:50,860 +following. We have information about the + +461 +00:41:50,860 --> 00:41:51,700 +confidence interval. + +462 +00:42:01,440 --> 00:42:02,800 +920 books. + +463 +00:42:05,940 --> 00:42:08,700 +And sigma is known to be 150. + +464 +00:42:12,980 --> 00:42:17,980 +That's all we have. He asked about how large a + +465 +00:42:17,980 --> 00:42:20,880 +sample did Herelsen's conclusion determine the + +466 +00:42:20,880 --> 00:42:21,820 +interval estimate. + +467 +00:42:26,740 --> 00:42:31,850 +Look at the answers. A is 2. B is 3, C is 12, it + +468 +00:42:31,850 --> 00:42:33,950 +cannot be determined from the information given. + +469 +00:42:37,190 --> 00:42:43,190 +Now, in order to find the number, the sample, we + +470 +00:42:43,190 --> 00:42:48,350 +need Sigma or E squared. Confidence is not given. + +471 +00:42:50,550 --> 00:43:00,140 +Sigma is okay. We can find the error. The error is + +472 +00:43:00,140 --> 00:43:07,940 +just W divided by 2. So the error is fine. I mean, + +473 +00:43:08,100 --> 00:43:12,200 +E is fine. E is B minus A or upper limit minus + +474 +00:43:12,200 --> 00:43:17,100 +lower limit divided by 2. So width divided by 2. + +475 +00:43:17,240 --> 00:43:20,740 +So this is fine. But you don't have information + +476 +00:43:20,740 --> 00:43:21,780 +about Z. + +477 +00:43:25,020 --> 00:43:29,550 +We are looking for N. So Z is not I mean, cannot + +478 +00:43:29,550 --> 00:43:32,810 +be computed because the confidence level is not + +479 +00:43:32,810 --> 00:43:39,830 +given. So the information is determined + +480 +00:43:39,830 --> 00:43:46,170 +from the information given. Make sense? So we + +481 +00:43:46,170 --> 00:43:50,790 +cannot compute this score. Z is fine. Z is 150. + +482 +00:43:51,330 --> 00:43:54,310 +The margin of error, we can compute the margin by + +483 +00:43:54,310 --> 00:43:59,090 +using this interval, the width. Divide by two + +484 +00:43:59,090 --> 00:44:05,790 +gives the same result. Now for number 14, we have + +485 +00:44:05,790 --> 00:44:11,330 +the same information. But here, + +486 +00:44:14,450 --> 00:44:22,030 +she asked her assistant to use 25 days. So now, n + +487 +00:44:22,030 --> 00:44:24,990 +is 25. We have the same information about the + +488 +00:44:24,990 --> 00:44:25,310 +interval. + +489 +00:44:32,020 --> 00:44:33,300 +And sigma is 150. + +490 +00:44:36,300 --> 00:44:40,800 +So she asked her assistant to use 25 days of data + +491 +00:44:40,800 --> 00:44:43,860 +to construct the interval estimate. So n is 25. + +492 +00:44:44,980 --> 00:44:48,300 +What confidence level can she attach to the + +493 +00:44:48,300 --> 00:44:53,500 +interval estimate? Now in this case, we are asking + +494 +00:44:53,500 --> 00:44:56,240 +about confidence, not z. + +495 +00:45:00,930 --> 00:45:03,530 +You have to distinguish between confidence level + +496 +00:45:03,530 --> 00:45:08,130 +and z. We use z, I'm sorry, we use z level to + +497 +00:45:08,130 --> 00:45:13,350 +compute the z score. Now, which one is correct? 99 + +498 +00:45:13,350 --> 00:45:21,670 +.7, 99, 98, 95.4. Let's see. Now, what's the + +499 +00:45:21,670 --> 00:45:25,070 +average? I'm sorry, the formula is x bar plus or + +500 +00:45:25,070 --> 00:45:29,270 +minus z sigma over root n. What's the average? In + +501 +00:45:29,270 --> 00:45:34,710 +this case, this is the formula we have. We are + +502 +00:45:34,710 --> 00:45:38,770 +looking about this one. Now, also there are two + +503 +00:45:38,770 --> 00:45:43,250 +ways to solve this problem. Either focus on the + +504 +00:45:43,250 --> 00:45:47,950 +aortia, or just find a continuous interval by + +505 +00:45:47,950 --> 00:45:55,830 +itself. So let's focus on this one. Z sigma over + +506 +00:45:55,830 --> 00:45:56,230 +root of. + +507 +00:45:59,620 --> 00:46:05,380 +And we have x bar. What's the value of x bar? x + +508 +00:46:05,380 --> 00:46:15,240 +bar 740 plus x + +509 +00:46:15,240 --> 00:46:16,400 +bar 830. + +510 +00:46:25,380 --> 00:46:31,740 +1660 divided by 2, 830. Now, z equals, I don't + +511 +00:46:31,740 --> 00:46:40,660 +know, sigma, sigma is 150, n is 5. So here we have + +512 +00:46:40,660 --> 00:46:41,600 +30 sigma. + +513 +00:46:44,980 --> 00:46:51,560 +Now, what's the value of sigma? 36, so we have x + +514 +00:46:51,560 --> 00:46:54,900 +bar, now the value of x bar. + +515 +00:47:02,330 --> 00:47:10,530 +So we have x bar 830 plus or minus 30 there. + +516 +00:47:15,290 --> 00:47:18,030 +Now, if you look carefully at this equation, + +517 +00:47:19,550 --> 00:47:24,570 +what's the value of z in order to have this + +518 +00:47:24,570 --> 00:47:29,630 +confidence interval, which is 740 and 920? + +519 +00:47:36,170 --> 00:47:40,730 +So, Z should be... + +520 +00:47:40,730 --> 00:47:46,290 +What's the value of Z? Now, 830 minus 3Z equals + +521 +00:47:46,290 --> 00:47:46,870 +this value. + +522 +00:47:49,830 --> 00:47:53,390 +So, Z equals... + +523 +00:47:53,390 --> 00:47:56,450 +3. + +524 +00:47:56,830 --> 00:48:03,540 +So, Z is 3. That's why. Now, Z is 3. What do you + +525 +00:48:03,540 --> 00:48:05,180 +think the corresponding C level? + +526 +00:48:11,460 --> 00:48:16,560 +99.7% If + +527 +00:48:16,560 --> 00:48:27,080 +you remember for the 68 empirical rule 68, 95, 99 + +528 +00:48:27,080 --> 00:48:33,760 +.7% In chapter 6 we said that 99.7% of the data + +529 +00:48:33,760 --> 00:48:37,220 +falls within three standard deviations of the + +530 +00:48:37,220 --> 00:48:41,980 +mean. So if these three, I am sure that we are + +531 +00:48:41,980 --> 00:48:50,340 +using 99.7% for the confidence level. So for this + +532 +00:48:50,340 --> 00:48:53,280 +particular example here, we have new information + +533 +00:48:53,280 --> 00:48:57,280 +about the sample size. So N is 25. + +534 +00:49:01,630 --> 00:49:06,190 +So just simple calculation x bar as I mentioned + +535 +00:49:06,190 --> 00:49:11,510 +here. The average is lower limit plus upper limit + +536 +00:49:11,510 --> 00:49:18,270 +divided by 2. So x bar equals 830. So now your + +537 +00:49:18,270 --> 00:49:25,130 +confidence interval is x bar plus or minus z sigma + +538 +00:49:25,130 --> 00:49:31,070 +over root n. z sigma over root n, z is unknown, + +539 +00:49:32,190 --> 00:49:37,030 +sigma is 150, n is 25, which is 5, square root of + +540 +00:49:37,030 --> 00:49:48,390 +it, so we'll have 3z. So now x bar 830 minus 3z, + +541 +00:49:49,610 --> 00:49:53,870 +this is the lower limit, upper limit 830 plus 3z. + +542 +00:49:55,480 --> 00:49:59,000 +Now, the confidence interval is given by 740 and + +543 +00:49:59,000 --> 00:50:09,020 +920. Just use the lower limit. 830 minus 3z equals + +544 +00:50:09,020 --> 00:50:10,820 +740. + +545 +00:50:12,300 --> 00:50:18,280 +Simple calculation here. 830 minus 740 is 90, + +546 +00:50:18,660 --> 00:50:22,340 +equals 3z. That means z equals 3. + +547 +00:50:26,070 --> 00:50:29,750 +Now the z value is 3, it means the confidence is + +548 +00:50:29,750 --> 00:50:33,530 +9917, so the correct answer is A. + +549 +00:50:44,690 --> 00:50:49,390 +The other way, you can use that one, by using the + +550 +00:50:53,010 --> 00:50:55,830 +Margin of error, which is the difference between + +551 +00:50:55,830 --> 00:50:58,270 +these two divided by two, you will get the same + +552 +00:50:58,270 --> 00:51:02,630 +result. So there are two methods, one of these + +553 +00:51:02,630 --> 00:51:05,830 +straightforward one. The other one, as you + +554 +00:51:05,830 --> 00:51:13,550 +mentioned, is the error term. It's B minus upper + +555 +00:51:13,550 --> 00:51:19,550 +limit minus lower limit divided by two. Upper + +556 +00:51:19,550 --> 00:51:27,450 +limit is 920. Minus 74 divided by 2. What's the + +557 +00:51:27,450 --> 00:51:28,370 +value for this one? + +558 +00:51:34,570 --> 00:51:40,610 +90. So the margin of error is 90. So E equals E. + +559 +00:51:41,070 --> 00:51:43,790 +Sigma or N equals? + +560 +00:51:47,110 --> 00:51:50,810 +All squared. So by using this equation you can get + +561 +00:51:50,810 --> 00:51:56,860 +your result. So, N is 25, Z is unknown, Sigma is + +562 +00:51:56,860 --> 00:52:05,520 +150, R is 90. This one squared. You will get the + +563 +00:52:05,520 --> 00:52:10,020 +same Z-score. Make sense? + +564 +00:52:17,770 --> 00:52:21,810 +Because if you take z to be three times one-fifth + +565 +00:52:21,810 --> 00:52:25,150 +divided by nine squared, you will get the same + +566 +00:52:25,150 --> 00:52:30,790 +result for z value. So both will give the same + +567 +00:52:30,790 --> 00:52:35,790 +result. So that's for the multiple choice + +568 +00:52:35,790 --> 00:52:42,430 +problems. Any question? Let's move to the section + +569 +00:52:42,430 --> 00:52:46,370 +number two, true or false problems. + +570 +00:52:47,810 --> 00:52:48,790 +Number one, + +571 +00:52:51,850 --> 00:52:57,950 +a race car driver + +572 +00:52:57,950 --> 00:53:03,670 +tested his car for time from 0 to 60 mileage per + +573 +00:53:03,670 --> 00:53:09,390 +hour. And in 20 tests, obtained an average of 4.85 + +574 +00:53:09,390 --> 00:53:16,660 +seconds, with some deviation of 1.47 seconds. 95 + +575 +00:53:16,660 --> 00:53:23,440 +confidence interval for the 0 to 60 time is 4.62 + +576 +00:53:23,440 --> 00:53:29,540 +seconds up to 5.18. I think straightforward. Just + +577 +00:53:29,540 --> 00:53:33,440 +simple calculation, it will give the right answer. + +578 +00:53:36,660 --> 00:53:40,640 +x bar n, + +579 +00:53:41,360 --> 00:53:43,620 +so we have to use this equation. + +580 +00:53:48,220 --> 00:53:54,020 +You can do it. So it says the answer is false. You + +581 +00:53:54,020 --> 00:53:58,340 +have to check this result. So it's 4.5 plus or + +582 +00:53:58,340 --> 00:54:03,460 +minus T. We have to find T. S is given to be 147 + +583 +00:54:03,460 --> 00:54:10,120 +divided by root 20. Now, to find T, we have to use + +584 +00:54:10,120 --> 00:54:18,480 +0 to 5 and 19. By this value here, you'll get the + +585 +00:54:18,480 --> 00:54:22,160 +exact answer. Part number two. + +586 +00:54:24,980 --> 00:54:32,380 +Given a sample mean of 2.1. So x bar is 2.1. + +587 +00:54:33,680 --> 00:54:34,680 +Excuse me? + +588 +00:54:38,500 --> 00:54:45,920 +Because n is small. Now, this sample, This sample + +589 +00:54:45,920 --> 00:54:52,220 +gives an average of 4.85, and standard deviation + +590 +00:54:52,220 --> 00:55:02,420 +based on this sample. So S, so X bar, 4.85, and S + +591 +00:55:02,420 --> 00:55:09,640 +is equal to 1.47. So this is not sigma, because it + +592 +00:55:09,640 --> 00:55:15,210 +says that 20 tests, so random sample is 20. This + +593 +00:55:15,210 --> 00:55:19,390 +sample gives an average of this amount and + +594 +00:55:19,390 --> 00:55:21,350 +standard deviation of this amount. + +595 +00:55:29,710 --> 00:55:34,610 +We are looking for the + +596 +00:55:34,610 --> 00:55:40,470 +continence interval, and we have two cases. First + +597 +00:55:40,470 --> 00:55:43,630 +case, if sigma is known, + +598 +00:55:47,220 --> 00:55:48,240 +Sigma is unknown. + +599 +00:55:51,520 --> 00:55:58,440 +Now for this example, sigma is unknown. So since + +600 +00:55:58,440 --> 00:56:05,740 +sigma is unknown, we have to use T distribution if + +601 +00:56:05,740 --> 00:56:09,940 +the distribution is normal or if N is large + +602 +00:56:09,940 --> 00:56:14,380 +enough. Now for this example, N is 20. So we have + +603 +00:56:14,380 --> 00:56:17,860 +to assume that the population is approximately + +604 +00:56:17,860 --> 00:56:23,660 +normal. So we have to use t. So my confidence + +605 +00:56:23,660 --> 00:56:26,100 +interval should be x bar plus or minus 3s over + +606 +00:56:26,100 --> 00:56:32,560 +root n. Now, number two. Given a sample mean of 2 + +607 +00:56:32,560 --> 00:56:36,180 +.1 and a population standard deviation. I + +608 +00:56:36,180 --> 00:56:38,720 +mentioned that population standard deviation is + +609 +00:56:38,720 --> 00:56:43,900 +given. So sigma is 0.7. So sigma is known in this + +610 +00:56:43,900 --> 00:56:49,170 +example. So in part two, sigma is given. Now, from + +611 +00:56:49,170 --> 00:56:50,890 +a sample of 10 data points, + +612 +00:56:53,730 --> 00:56:56,190 +we are looking for 90% confidence interval. + +613 +00:56:58,790 --> 00:57:07,230 +90% confidence interval will have a width of 2.36. + +614 +00:57:16,460 --> 00:57:19,180 +What is two times the assembling error? + +615 +00:57:22,500 --> 00:57:28,040 +So the answer is given. So the error here, error A + +616 +00:57:28,040 --> 00:57:30,160 +equals W. + +617 +00:57:32,060 --> 00:57:34,120 +So the answer is 1.16. + +618 +00:57:40,520 --> 00:57:45,220 +So he asked about given a sample, 90% confidence + +619 +00:57:45,220 --> 00:57:50,540 +interval will have a width of 2.36. Let's see if + +620 +00:57:50,540 --> 00:57:54,780 +the exact width is 2.36 or not. Now we have x bar + +621 +00:57:54,780 --> 00:58:03,240 +plus or minus z, sigma 1.8. x bar is 2.1, plus or + +622 +00:58:03,240 --> 00:58:08,660 +minus. Now what's the error? 1.18. + +623 +00:58:11,230 --> 00:58:16,370 +this amount without calculation or you just use + +624 +00:58:16,370 --> 00:58:19,590 +this straightforward calculation here we are + +625 +00:58:19,590 --> 00:58:23,530 +talking about z about 90 percent so this amount 1 + +626 +00:58:23,530 --> 00:58:30,330 +.645 times sigma divided by root n for sure this + +627 +00:58:30,330 --> 00:58:35,430 +quantity equals 1.18 But you don't need to do that + +628 +00:58:35,430 --> 00:58:40,570 +because the width is given to be 2.36. So E is 1 + +629 +00:58:40,570 --> 00:58:46,430 +.18. So that saves time in order to compute the + +630 +00:58:46,430 --> 00:58:55,190 +error term. So now 2.1 minus 1.8. 2.1 plus 1.8. + +631 +00:58:56,350 --> 00:58:59,070 +That F, the width, is 2.36. + +632 +00:59:02,010 --> 00:59:04,170 +that if the width equals this value. + +633 +00:59:10,410 --> 00:59:15,270 +2.36. So I solved the problem if the width. But he + +634 +00:59:15,270 --> 00:59:18,430 +asked about, do you know this value? I don't know + +635 +00:59:18,430 --> 00:59:21,230 +that one, so we have to compute the exact answer + +636 +00:59:21,230 --> 00:59:28,230 +now. So x bar 2.1 plus 1645 sigma + +637 +00:59:34,480 --> 00:59:38,600 +My calculator can find the error now. What's the + +638 +00:59:38,600 --> 00:59:41,260 +value for this amount? My calculator. + +639 +00:59:50,700 --> 00:59:56,440 +It's 5.75. 5.75. + +640 +00:59:57,640 --> 01:00:01,830 +So this is your error. So E equals this amount. So + +641 +01:00:01,830 --> 01:00:05,370 +W equals 2 plus 4. + +642 +01:00:08,350 --> 01:00:17,050 +So the error is 5.74. So what's the width? The + +643 +01:00:17,050 --> 01:00:20,210 +width equals 2 times E. + +644 +01:00:25,590 --> 01:00:30,340 +Again. This value, 1.645 times 1.7 divided by root + +645 +01:00:30,340 --> 01:00:30,580 +10. + +646 +01:00:37,280 --> 01:00:38,280 +Three point. + +647 +01:00:44,020 --> 01:00:49,700 +So again, arrow is 3.64. So what's the width? + +648 +01:00:51,160 --> 01:00:53,540 +Twice this value, so two times this one. + +649 +01:00:57,880 --> 01:00:59,560 +7.28. + +650 +01:01:02,120 --> 01:01:07,180 +Now it says in the problem here we have width of 2 + +651 +01:01:07,180 --> 01:01:09,200 +.36. So it's incorrect. + +652 +01:01:11,840 --> 01:01:16,320 +So just simple calculation gives width of 7.28, + +653 +01:01:17,180 --> 01:01:23,900 +not 2.36. Number three. + +654 +01:01:27,950 --> 01:01:32,850 +Look at number four. Other things be equal. As the + +655 +01:01:32,850 --> 01:01:35,550 +confidence level for a confidence interval + +656 +01:01:35,550 --> 01:01:41,250 +increases, the width of the interval increases. As + +657 +01:01:41,250 --> 01:01:45,310 +the confidence level increases, confidence + +658 +01:01:45,310 --> 01:01:47,650 +interval increases, the width of the interval + +659 +01:01:47,650 --> 01:01:52,750 +increases. Correct. So that's true. Let's do + +660 +01:01:52,750 --> 01:01:53,790 +number seven. + +661 +01:01:56,840 --> 01:02:02,300 +A point estimate consists + +662 +01:02:02,300 --> 01:02:06,700 +of a single sample statistic that is used to + +663 +01:02:06,700 --> 01:02:11,000 +estimate the true population parameter. That's + +664 +01:02:11,000 --> 01:02:15,940 +correct because any point estimate, for example x + +665 +01:02:15,940 --> 01:02:21,360 +bar, is used to determine the confidence interval + +666 +01:02:21,360 --> 01:02:25,600 +for the unknown parameter mu. So a single + +667 +01:02:25,600 --> 01:02:30,580 +statistic can be used to estimate the true + +668 +01:02:30,580 --> 01:02:33,400 +population parameter, either X bar as a point + +669 +01:02:33,400 --> 01:02:34,900 +estimate or P. + +670 +01:02:41,380 --> 01:02:48,000 +So that's true. Number eight. The T distribution + +671 +01:02:48,000 --> 01:02:51,100 +is used to develop a confidence interval estimate + +672 +01:02:51,100 --> 01:02:54,240 +of the population mean when the population + +673 +01:02:54,240 --> 01:02:57,200 +standard deviation is unknown. That's correct + +674 +01:02:57,200 --> 01:03:01,240 +because we are using T distribution if sigma is + +675 +01:03:01,240 --> 01:03:03,740 +not given and here we have to assume the + +676 +01:03:03,740 --> 01:03:07,960 +population is normal. 9. + +677 +01:03:11,540 --> 01:03:15,180 +The standardized normal distribution is used to + +678 +01:03:15,180 --> 01:03:17,340 +develop a confidence interval estimate of the + +679 +01:03:17,340 --> 01:03:20,700 +population proportion when the sample size is + +680 +01:03:20,700 --> 01:03:22,820 +large enough or sufficiently large. + +681 +01:03:28,640 --> 01:03:32,640 +The width + +682 +01:03:32,640 --> 01:03:37,720 +of a confidence interval equals twice the sampling + +683 +01:03:37,720 --> 01:03:42,570 +error. The weight equals twice the sample, so + +684 +01:03:42,570 --> 01:03:46,370 +that's true. A population parameter is used to + +685 +01:03:46,370 --> 01:03:50,650 +estimate a confidence interval? No way. Because we + +686 +01:03:50,650 --> 01:03:53,570 +use statistics to estimate the confidence + +687 +01:03:53,570 --> 01:03:58,130 +interval. These are statistics. So we are using + +688 +01:03:58,130 --> 01:04:02,390 +statistics to construct the confidence interval. + +689 +01:04:04,190 --> 01:04:10,080 +Number 12. Holding the sample size fixed. In + +690 +01:04:10,080 --> 01:04:14,560 +increasing level, the level of confidence in a + +691 +01:04:14,560 --> 01:04:17,520 +confidence interval will necessarily lead to wider + +692 +01:04:17,520 --> 01:04:20,500 +confidence interval. That's true. Because as C + +693 +01:04:20,500 --> 01:04:24,840 +level increases, Z becomes large, so we have large + +694 +01:04:24,840 --> 01:04:29,670 +width, so the confidence becomes wider. Last one, + +695 +01:04:30,550 --> 01:04:33,150 +holding the weight of a confidence interval fixed + +696 +01:04:33,150 --> 01:04:36,190 +and increasing the level of confidence can be + +697 +01:04:36,190 --> 01:04:40,090 +achieved with lower sample size with large sample + +698 +01:04:40,090 --> 01:04:44,830 +size. So it's false. So that's for section two. + +699 +01:04:46,230 --> 01:04:49,970 +One section is left, free response problems or + +700 +01:04:49,970 --> 01:04:52,990 +questions, you can do it at home. So next time, + +701 +01:04:53,070 --> 01:04:57,530 +inshallah, we'll start chapter nine. That's all. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/I7SEpdLlzFg_raw.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/I7SEpdLlzFg_raw.srt new file mode 100644 index 0000000000000000000000000000000000000000..218039ba8c28c3a67d9988dcd6763a0b850483fe --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/I7SEpdLlzFg_raw.srt @@ -0,0 +1,2804 @@ +1 +00:00:06,760 --> 00:00:10,320 +Eventually I will give some practice problem for + +2 +00:00:10,320 --> 00:00:15,040 +chapter eight. Generally speaking, there are three + +3 +00:00:15,040 --> 00:00:19,800 +types of questions. The first type, multiple + +4 +00:00:19,800 --> 00:00:22,940 +choice, so MCQ questions. + +5 +00:00:36,250 --> 00:00:41,490 +The other type of problems will be true or false. + +6 +00:00:42,890 --> 00:00:54,870 +Part B, Part C, three response problems. + +7 +00:00:56,510 --> 00:01:00,210 +So three types of questions. Multiple choice, we + +8 +00:01:00,210 --> 00:01:03,130 +have four answers. You have to select correct one. + +9 +00:01:06,060 --> 00:01:11,660 +True or false problems. And the last part, free + +10 +00:01:11,660 --> 00:01:14,800 +response problems. Here we'll talk about one of + +11 +00:01:14,800 --> 00:01:20,740 +these. I will cover multiple choice questions as + +12 +00:01:20,740 --> 00:01:24,200 +well as true and false. Let's start with number + +13 +00:01:24,200 --> 00:01:30,200 +one for multiple choice. The width of a confidence + +14 +00:01:30,200 --> 00:01:36,050 +interval estimate for a proportion will be Here we + +15 +00:01:36,050 --> 00:01:39,690 +are talking about the width of a confidence + +16 +00:01:39,690 --> 00:01:40,230 +interval. + +17 +00:01:43,070 --> 00:01:48,770 +Estimates for a proportion will be narrower for 99 + +18 +00:01:48,770 --> 00:01:56,180 +% confidence than for a 9%. For 95 confidence? No, + +19 +00:01:56,280 --> 00:01:59,120 +because as we know that as the confidence level + +20 +00:01:59,120 --> 00:02:03,120 +increases, the width becomes wider. So A is + +21 +00:02:03,120 --> 00:02:10,440 +incorrect. Is this true? B. Wider for sample size + +22 +00:02:10,440 --> 00:02:14,900 +of 100 than for a sample size of 50? False, + +23 +00:02:15,020 --> 00:02:20,400 +because as sample size increases, The sampling + +24 +00:02:20,400 --> 00:02:23,600 +error goes down. That means the width of the + +25 +00:02:23,600 --> 00:02:28,700 +interval becomes smaller and smaller. Yes, for N. + +26 +00:02:30,300 --> 00:02:37,100 +Part C. Normal for 90% confidence, then for 95% + +27 +00:02:37,100 --> 00:02:40,620 +confidence. That's correct. So C is the correct + +28 +00:02:40,620 --> 00:02:43,640 +answer. Part D. Normal when the sampling + +29 +00:02:43,640 --> 00:02:49,100 +proportion is 50%. is incorrect because if we have + +30 +00:02:49,100 --> 00:02:52,920 +smaller than 50%, we'll get smaller confidence, + +31 +00:02:53,320 --> 00:02:56,620 +smaller weight of the confidence. So C is the + +32 +00:02:56,620 --> 00:03:02,720 +correct answer. Any question? So C is the correct + +33 +00:03:02,720 --> 00:03:06,180 +answer because as C level increases, the + +34 +00:03:06,180 --> 00:03:08,760 +confidence interval becomes narrower. + +35 +00:03:11,040 --> 00:03:12,920 +Let's move to the second one. + +36 +00:03:16,540 --> 00:03:19,900 +A 99% confidence interval estimate can be + +37 +00:03:19,900 --> 00:03:23,940 +interpreted to mean that. Let's look at the + +38 +00:03:23,940 --> 00:03:28,040 +interpretation of the 99% confidence interval. + +39 +00:03:28,940 --> 00:03:29,660 +Part eight. + +40 +00:03:33,360 --> 00:03:38,820 +If all possible samples are taken and confidence + +41 +00:03:38,820 --> 00:03:43,070 +interval estimates are developed, 99% of them + +42 +00:03:43,070 --> 00:03:45,750 +would include the true population mean somewhere + +43 +00:03:45,750 --> 00:03:46,790 +within their interval. + +44 +00:03:49,750 --> 00:03:53,210 +Here we are talking about the population mean. It + +45 +00:03:53,210 --> 00:03:57,890 +says that 99% of them of these intervals would + +46 +00:03:57,890 --> 00:04:00,790 +include the true population mean somewhere within + +47 +00:04:00,790 --> 00:04:05,490 +their interval. It's correct. Why false? Why is it + +48 +00:04:05,490 --> 00:04:10,620 +false? This is correct answer, because it's + +49 +00:04:10,620 --> 00:04:15,240 +mentioned that 99% of these confidence intervals + +50 +00:04:15,240 --> 00:04:19,600 +will contain the true population mean somewhere + +51 +00:04:19,600 --> 00:04:22,900 +within their interval. So A is correct. Let's look + +52 +00:04:22,900 --> 00:04:30,880 +at B. B says we have 99% confidence that we have + +53 +00:04:30,880 --> 00:04:34,160 +selected a sample whose interval does include the + +54 +00:04:34,160 --> 00:04:39,160 +population mean. Also, this one is correct. Again, + +55 +00:04:39,300 --> 00:04:42,540 +it's mentioned that 99% confidence that we have + +56 +00:04:42,540 --> 00:04:47,080 +selected sample whose interval does include. So + +57 +00:04:47,080 --> 00:04:52,600 +it's correct. So C is both of the above and D none + +58 +00:04:52,600 --> 00:04:54,840 +of the above. So C is the correct answer. So + +59 +00:04:54,840 --> 00:04:59,080 +sometimes maybe there is only one answer. Maybe in + +60 +00:04:59,080 --> 00:05:03,360 +other problems, it might be two answers are + +61 +00:05:03,360 --> 00:05:07,150 +correct. So for this one, B and C. I'm sorry, A + +62 +00:05:07,150 --> 00:05:11,790 +and B are correct, so C is the correct answer. + +63 +00:05:14,270 --> 00:05:17,530 +Number three, which of the following is not true + +64 +00:05:17,530 --> 00:05:20,610 +about the student's T distribution? Here, we are + +65 +00:05:20,610 --> 00:05:25,550 +talking about the not true statement about the + +66 +00:05:25,550 --> 00:05:31,110 +student T distribution, A. It has more data in the + +67 +00:05:31,110 --> 00:05:35,780 +tails. and less in the center than does the normal + +68 +00:05:35,780 --> 00:05:38,580 +distribution. That's correct because we mentioned + +69 +00:05:38,580 --> 00:05:44,160 +last time that T distribution, the tail is fatter + +70 +00:05:44,160 --> 00:05:48,000 +than the Z normal. So that means it has more data + +71 +00:05:48,000 --> 00:05:52,100 +in the tails and less data in the center. So + +72 +00:05:52,100 --> 00:05:52,720 +that's correct. + +73 +00:05:58,000 --> 00:06:01,020 +It is used to construct confidence intervals for + +74 +00:06:01,020 --> 00:06:03,220 +the population mean when the population standard + +75 +00:06:03,220 --> 00:06:07,400 +deviation is known. No, we use z instead of t, so + +76 +00:06:07,400 --> 00:06:11,680 +this one is incorrect about t. It is well-shaped + +77 +00:06:11,680 --> 00:06:17,320 +and symmetrical, so that's true, so we are looking + +78 +00:06:17,320 --> 00:06:21,900 +for the incorrect statement. D, as the number of + +79 +00:06:21,900 --> 00:06:23,260 +degrees of freedom increases, + +80 +00:06:25,850 --> 00:06:31,510 +The T distribution approaches the normal. That's + +81 +00:06:31,510 --> 00:06:36,830 +true. So which one? P. So P is incorrect. So + +82 +00:06:36,830 --> 00:06:39,670 +number four. Extra. + +83 +00:06:42,010 --> 00:06:47,090 +Can you explain the average total compensation of + +84 +00:06:47,090 --> 00:06:51,830 +CEOs in the service industry? Data were randomly + +85 +00:06:51,830 --> 00:06:57,480 +collected from 18 CEOs and 19 employees. 97% + +86 +00:06:57,480 --> 00:07:06,040 +confidence interval was calculated to be $281, + +87 +00:07:07,040 --> 00:07:09,020 +$260, + +88 +00:07:10,060 --> 00:07:13,780 +$5836, + +89 +00:07:14,800 --> 00:07:19,300 +and $180. Which of the following interpretations + +90 +00:07:19,300 --> 00:07:27,310 +is correct? Part number A. It says 97% of the + +91 +00:07:27,310 --> 00:07:32,450 +sample data compensation value between these two + +92 +00:07:32,450 --> 00:07:37,310 +values, correct or incorrect statement. Because it + +93 +00:07:37,310 --> 00:07:44,310 +says 97% of the sample data. For the confidence + +94 +00:07:44,310 --> 00:07:48,310 +center value, we are looking for the average, not + +95 +00:07:48,310 --> 00:07:51,690 +for the population, not for the sample. So A is + +96 +00:07:51,690 --> 00:07:55,890 +incorrect. Because A, it says here 97% of the + +97 +00:07:55,890 --> 00:07:58,690 +sampling total. Sample total, we are looking for + +98 +00:07:58,690 --> 00:08:02,390 +the average of the population. So A is incorrect + +99 +00:08:02,390 --> 00:08:09,150 +statement. B, we are 97% confident that the mean + +100 +00:08:09,150 --> 00:08:15,890 +of the sample. So it's false. Because the + +101 +00:08:15,890 --> 00:08:18,470 +confidence about the entire population is about + +102 +00:08:18,470 --> 00:08:24,160 +the population mean. So B is incorrect. C. In the + +103 +00:08:24,160 --> 00:08:27,760 +population of the surface industry, here we have + +104 +00:08:27,760 --> 00:08:33,240 +97% of them will have a total death toll. Also, + +105 +00:08:33,360 --> 00:08:37,480 +this one is incorrect because it mentions in the + +106 +00:08:37,480 --> 00:08:39,900 +population. Here we are talking about total, but + +107 +00:08:39,900 --> 00:08:44,000 +we are looking for the average. Now, part D. We + +108 +00:08:44,000 --> 00:08:47,360 +are 97% confident that the average total + +109 +00:08:50,460 --> 00:08:53,440 +So this one is correct statement. So D is the + +110 +00:08:53,440 --> 00:08:55,840 +correct statement. So for the confidence interval, + +111 +00:08:55,840 --> 00:08:59,040 +we are looking for population, number one. Number + +112 +00:08:59,040 --> 00:09:03,520 +two, the average of that population. So D is the + +113 +00:09:03,520 --> 00:09:07,260 +correct answer. Let's go back to part A. In part + +114 +00:09:07,260 --> 00:09:10,420 +A, it says sample total. So this is incorrect. + +115 +00:09:11,380 --> 00:09:15,140 +Next one. The mean of the sample. We are looking + +116 +00:09:15,140 --> 00:09:17,440 +for the mean of the population. So B is incorrect. + +117 +00:09:18,040 --> 00:09:25,240 +Part C. It mentions here population, but total. So + +118 +00:09:25,240 --> 00:09:30,300 +this one is incorrect. Finally here, we are 97% + +119 +00:09:30,300 --> 00:09:34,680 +confident that the average total. So this one is + +120 +00:09:34,680 --> 00:09:39,360 +true of all. So here we have population and the + +121 +00:09:39,360 --> 00:09:42,100 +average of that population. So it makes sense that + +122 +00:09:42,100 --> 00:09:43,260 +this is the correct answer. + +123 +00:09:46,520 --> 00:09:47,660 +Number five. + +124 +00:09:59,690 --> 00:10:03,290 +Number five, confidence interval. Confidence + +125 +00:10:03,290 --> 00:10:06,610 +interval was used to estimate the proportion of + +126 +00:10:06,610 --> 00:10:10,170 +statistics students that are females. A random + +127 +00:10:10,170 --> 00:10:16,170 +sample of 72 statistics students generated the + +128 +00:10:16,170 --> 00:10:22,970 +following 90% confidence interval, 0.438 + +129 +00:10:22,970 --> 00:10:28,150 +and 0.640. + +130 +00:10:28,510 --> 00:10:32,890 +42, based on the interval above the population + +131 +00:10:32,890 --> 00:10:38,230 +proportion of females equals to 0.6. So here we + +132 +00:10:38,230 --> 00:10:44,310 +have confidence interval for the female proportion + +133 +00:10:44,310 --> 00:10:52,990 +ranges between 0.438 up to 0.642. Based on this + +134 +00:10:52,990 --> 00:10:57,050 +interval. Is the population proportion of females + +135 +00:10:57,050 --> 00:10:58,770 +equal 60%? + +136 +00:11:03,410 --> 00:11:06,690 +So here we have from this point all the way up to + +137 +00:11:06,690 --> 00:11:10,610 +0.6. Is the population proportion of females equal + +138 +00:11:10,610 --> 00:11:16,250 +to 0.6? No. The answer is no, but know what? + +139 +00:11:16,850 --> 00:11:24,320 +Number A. No, and we are 90% sure of it. No, the + +140 +00:11:24,320 --> 00:11:31,200 +proportion is 54.17. See, maybe 60% is a + +141 +00:11:31,200 --> 00:11:33,760 +believable value of population proportion based on + +142 +00:11:33,760 --> 00:11:38,080 +information about it. He said yes, and we are 90% + +143 +00:11:38,080 --> 00:11:44,300 +sure of it. So which one is correct? Farah. Which + +144 +00:11:44,300 --> 00:11:44,900 +one is correct? + +145 +00:11:50,000 --> 00:11:56,760 +B says the proportion is 54. 54 if we take the + +146 +00:11:56,760 --> 00:12:01,120 +average of these two values, the answer is 54. But + +147 +00:12:01,120 --> 00:12:04,960 +the true proportion is not the average of the two + +148 +00:12:04,960 --> 00:12:07,640 +endpoints. + +149 +00:12:08,440 --> 00:12:09,500 +So B is incorrect. + +150 +00:12:12,780 --> 00:12:16,320 +If you look at A, the answer is no. And we + +151 +00:12:16,320 --> 00:12:20,440 +mentioned before that this interval may Or may not + +152 +00:12:20,440 --> 00:12:25,380 +contains the true proportion, so A is incorrect. + +153 +00:12:26,700 --> 00:12:32,640 +Now C, maybe. So C is the correct statement, maybe + +154 +00:12:32,640 --> 00:12:35,820 +60% is believable value of the population + +155 +00:12:35,820 --> 00:12:39,020 +proportion based on the information about. So C is + +156 +00:12:39,020 --> 00:12:44,440 +the correct answer. A6, number six. + +157 +00:12:48,590 --> 00:12:49,550 +Number six. + +158 +00:13:21,280 --> 00:13:23,800 +So up to this point, we have the same information + +159 +00:13:23,800 --> 00:13:27,440 +for the previous problem. Using the information + +160 +00:13:27,440 --> 00:13:31,440 +about what total size sample would be necessary if + +161 +00:13:31,440 --> 00:13:35,460 +we wanted to estimate the true proportion within + +162 +00:13:35,460 --> 00:13:43,620 +minus positive or minus 0.108 using 95% + +163 +00:13:43,620 --> 00:13:46,320 +confidence. Now here we are looking for the sample + +164 +00:13:46,320 --> 00:13:49,160 +size that is required to estimate the true + +165 +00:13:49,160 --> 00:13:53,720 +proportion to be within 8% plus or minus 8% using + +166 +00:13:53,720 --> 00:13:54,720 +95% confidence. + +167 +00:13:58,640 --> 00:14:05,360 +The formula first, n equals z squared c plus one. + +168 +00:14:08,740 --> 00:14:14,240 +We have pi, one minus pi divided by e squared. + +169 +00:14:15,740 --> 00:14:21,120 +Now, pi is not given. So in this case either we + +170 +00:14:21,120 --> 00:14:25,880 +use a sinus sample in order to estimate the sample + +171 +00:14:25,880 --> 00:14:30,400 +proportion, Or use Pi to be 0.5. So in this case + +172 +00:14:30,400 --> 00:14:35,900 +we have to use Pi 1 half. If you remember last + +173 +00:14:35,900 --> 00:14:39,720 +time I gave you this equation. Z alpha over 2 + +174 +00:14:39,720 --> 00:14:44,820 +divided by 2 squared. So we have this equation. + +175 +00:14:45,900 --> 00:14:49,280 +Because Pi is not given, just use Pi to be 1 half. + +176 +00:14:50,060 --> 00:14:54,880 +Or you may use this equation. shortcut formula. In + +177 +00:14:54,880 --> 00:15:02,120 +this case, here we are talking about 95%. So + +178 +00:15:02,120 --> 00:15:07,240 +what's the value of Z? 196. 2 times E. + +179 +00:15:10,100 --> 00:15:17,140 +E is 8%. So 196 divided by 2 times E, the quantity + +180 +00:15:17,140 --> 00:15:25,540 +squared. Now the answer of this problem 150. So + +181 +00:15:25,540 --> 00:15:28,720 +approximately 150. + +182 +00:15:29,160 --> 00:15:33,520 +So 150 is the correct answer. So again, here we + +183 +00:15:33,520 --> 00:15:41,460 +used pi to be 1 half because P is not given. And + +184 +00:15:41,460 --> 00:15:46,580 +simple calculation results in 150 for the sample + +185 +00:15:46,580 --> 00:15:49,820 +size. So P is the correct answer, 7. + +186 +00:15:55,220 --> 00:15:56,000 +Number seven. + +187 +00:16:00,480 --> 00:16:03,820 +Number seven. When determining the sample size + +188 +00:16:03,820 --> 00:16:05,820 +necessarily for estimating the true population + +189 +00:16:05,820 --> 00:16:10,560 +mean, which factor is not considered when sampling + +190 +00:16:10,560 --> 00:16:14,960 +with replacement? Now here, if you remember the + +191 +00:16:14,960 --> 00:16:17,960 +formula for the sample size. + +192 +00:16:38,820 --> 00:16:43,460 +Now, which factor is not considered when sampling + +193 +00:16:43,460 --> 00:16:47,120 +without weight replacement? Now, the population + +194 +00:16:47,120 --> 00:16:51,460 +size, the population size is not in this quantity, + +195 +00:16:51,600 --> 00:16:54,420 +so A is the correct answer. B, the population + +196 +00:16:54,420 --> 00:16:58,820 +standard deviation, sigma is here. C, the level of + +197 +00:16:58,820 --> 00:17:03,090 +confidence desired in the estimate, we have Z. The + +198 +00:17:03,090 --> 00:17:06,470 +allowable or tolerable seminar, we have it here. + +199 +00:17:07,370 --> 00:17:10,770 +So eight is the correct answer. + +200 +00:17:13,290 --> 00:17:13,630 +Eight. + +201 +00:17:20,020 --> 00:17:22,600 +Supposedly, I'm supposed to focus on the companies + +202 +00:17:22,600 --> 00:17:25,640 +that you're working on now. It turns out to be one + +203 +00:17:25,640 --> 00:17:28,360 +of them. I'm not sure if I'm on the right track. + +204 +00:17:28,420 --> 00:17:31,040 +To make more use of it as a reference for the + +205 +00:17:31,040 --> 00:17:31,780 +update. + +206 +00:17:43,820 --> 00:17:47,240 +Now, which of the following will result in a + +207 +00:17:47,240 --> 00:17:50,100 +reduced interval width? So here we are talking + +208 +00:17:50,100 --> 00:17:55,580 +about reducing the width of the interval. Number + +209 +00:17:55,580 --> 00:17:58,120 +one. Here, if you look carefully at this equation, + +210 +00:17:59,040 --> 00:18:08,140 +increase the sample size, the error Z up over 2 + +211 +00:18:08,140 --> 00:18:17,180 +sigma over n. So this is the error state. Now, + +212 +00:18:17,240 --> 00:18:20,360 +based on this equation, if we increase the sample + +213 +00:18:20,360 --> 00:18:25,180 +size, the error will decrease. That means we + +214 +00:18:25,180 --> 00:18:28,360 +reduce the interval with it. So A is the correct + +215 +00:18:28,360 --> 00:18:31,560 +answer. Look at B. Increase the confidence level. + +216 +00:18:34,030 --> 00:18:36,770 +Increasing the confidence level, it means increase + +217 +00:18:36,770 --> 00:18:41,030 +Z, increase E, that means we have wider confidence + +218 +00:18:41,030 --> 00:18:43,910 +interval, so B is incorrect. Increase the + +219 +00:18:43,910 --> 00:18:46,790 +population mean, it doesn't matter actually, so + +220 +00:18:46,790 --> 00:18:50,250 +it's not correct. Increase the sample mean also. + +221 +00:18:50,770 --> 00:18:54,990 +So C and D are incorrect totally, so B is + +222 +00:18:54,990 --> 00:18:57,670 +incorrect, so E is the correct answer. So the + +223 +00:18:57,670 --> 00:19:00,630 +correct answer is A, nine. + +224 +00:19:07,140 --> 00:19:10,500 +A major department store chain is interested in + +225 +00:19:10,500 --> 00:19:13,560 +estimating the average amount each credit and + +226 +00:19:13,560 --> 00:19:16,560 +customers spent on their first visit to the + +227 +00:19:16,560 --> 00:19:21,380 +chain's new store in the mall. 15 credit cards + +228 +00:19:21,380 --> 00:19:26,540 +accounts were randomly sampled and analyzed with + +229 +00:19:26,540 --> 00:19:29,320 +the following results. So here we have this + +230 +00:19:29,320 --> 00:19:34,880 +information about the 15 data points. We have x + +231 +00:19:34,880 --> 00:19:35,220 +bar. + +232 +00:19:38,550 --> 00:19:42,150 +of $50.5. + +233 +00:19:43,470 --> 00:19:47,390 +And S squared, the sample variance is 400. + +234 +00:19:49,890 --> 00:19:52,750 +Construct 95 confidence interval for the average + +235 +00:19:52,750 --> 00:19:55,570 +amount it's credit card customer spent on their + +236 +00:19:55,570 --> 00:20:01,230 +first visit to the chain. It's a new store. It's + +237 +00:20:01,230 --> 00:20:04,310 +in the mall, assuming the amount spent follows a + +238 +00:20:04,310 --> 00:20:05,010 +normal distribution. + +239 +00:20:08,090 --> 00:20:13,150 +In this case, we should use T instead of Z. So the + +240 +00:20:13,150 --> 00:20:16,310 +formula should be X bar plus or minus T, alpha + +241 +00:20:16,310 --> 00:20:17,610 +over 2S over root N. + +242 +00:20:23,110 --> 00:20:29,350 +So X bar is 50.5. T, we should use the T table. In + +243 +00:20:29,350 --> 00:20:34,010 +this case, here we are talking about 95%. + +244 +00:20:36,830 --> 00:20:44,130 +So that means alpha is 5%, alpha over 2, 0, 2, 5. + +245 +00:20:44,770 --> 00:20:48,930 +So now we are looking for 2, 0, 2, 5, and degrees + +246 +00:20:48,930 --> 00:20:55,170 +of freedom. N is 15. It says that 15 credit cards. + +247 +00:20:55,770 --> 00:20:59,110 +So 15 credit cards accounts for random samples. So + +248 +00:20:59,110 --> 00:21:05,470 +N equals 15. So since N is 15, Degrees of freedom + +249 +00:21:05,470 --> 00:21:09,850 +is 14. Now we may use the normal, the T table in + +250 +00:21:09,850 --> 00:21:16,250 +order to find the value of T in + +251 +00:21:16,250 --> 00:21:19,270 +the upper tier actually. So what's the value if + +252 +00:21:19,270 --> 00:21:26,350 +you have the table? So look at degrees of freedom + +253 +00:21:26,350 --> 00:21:33,090 +14 under the probability of 0 to 5. + +254 +00:21:40,190 --> 00:21:45,050 +So again, we are looking for degrees of freedom + +255 +00:21:45,050 --> 00:21:49,170 +equal 14 under 2.5%. + +256 +00:22:04,850 --> 00:22:11,390 +0.5 plus or minus 2 + +257 +00:22:11,390 --> 00:22:18,390 +.1448. S squared is given 400. Take square root of + +258 +00:22:18,390 --> 00:22:25,570 +this quantity 20 over root n over root 15. And the + +259 +00:22:25,570 --> 00:22:30,350 +answer, just simple calculation will give + +260 +00:22:34,250 --> 00:22:38,410 +This result, so D is the correct answer. So the + +261 +00:22:38,410 --> 00:22:45,870 +answer should be 50.5 plus or minus 11.08. So D is + +262 +00:22:45,870 --> 00:22:49,170 +the correct answer. So this one is straightforward + +263 +00:22:49,170 --> 00:22:52,990 +calculation, gives part D to be the correct + +264 +00:22:52,990 --> 00:22:55,750 +answer. Any question? + +265 +00:22:58,510 --> 00:23:00,110 +11, 10? + +266 +00:23:03,110 --> 00:23:07,250 +Private colleges and universities rely on money + +267 +00:23:07,250 --> 00:23:12,730 +contributed by individuals and corporations for + +268 +00:23:12,730 --> 00:23:17,950 +their operating expenses. Much of this money is + +269 +00:23:17,950 --> 00:23:24,090 +put into a fund called an endowment, and the + +270 +00:23:24,090 --> 00:23:27,530 +college spends only the interest earned by the + +271 +00:23:27,530 --> 00:23:33,130 +fund. Now, here we have a recent It says that a + +272 +00:23:33,130 --> 00:23:35,310 +recent survey of eight private colleges in the + +273 +00:23:35,310 --> 00:23:39,450 +United States revealed the following endowment in + +274 +00:23:39,450 --> 00:23:44,350 +millions of dollars. So we have this data. So it's + +275 +00:23:44,350 --> 00:23:50,650 +raw data. Summary statistics yield export to be + +276 +00:23:50,650 --> 00:23:53,010 +180. + +277 +00:23:57,010 --> 00:23:57,850 +So export. + +278 +00:24:07,070 --> 00:24:12,130 +Now if this information is not given, you have to + +279 +00:24:12,130 --> 00:24:15,170 +compute the average and standard deviation by the + +280 +00:24:15,170 --> 00:24:19,860 +equations we know. But here, the mean and standard + +281 +00:24:19,860 --> 00:24:23,200 +deviation are given. So just use this information + +282 +00:24:23,200 --> 00:24:27,480 +anyway. Calculate 95 confidence interval for the + +283 +00:24:27,480 --> 00:24:30,140 +mean endowment of all private colleges in the + +284 +00:24:30,140 --> 00:24:34,520 +United States, assuming a normal distribution for + +285 +00:24:34,520 --> 00:24:39,300 +the endowment. Here we have 95%. + +286 +00:24:39,300 --> 00:24:42,600 +Now + +287 +00:24:42,600 --> 00:24:48,480 +what's the sample size? It says that eight. So N + +288 +00:24:48,480 --> 00:24:53,900 +is eight. So again, simple calculation. So + +289 +00:24:53,900 --> 00:24:59,680 +explore, plus or minus T, S over root N. So use + +290 +00:24:59,680 --> 00:25:04,200 +the same idea for the previous one. And the answer + +291 +00:25:04,200 --> 00:25:10,420 +for number 10 is part D. So D is the correct + +292 +00:25:10,420 --> 00:25:17,380 +answer. So again, For eleven, D is the correct + +293 +00:25:17,380 --> 00:25:22,680 +answer. For ten, D is the correct answer. Next. So + +294 +00:25:22,680 --> 00:25:26,280 +this one is similar to the one we just did. + +295 +00:25:30,660 --> 00:25:31,260 +Eleven. + +296 +00:25:47,140 --> 00:25:51,140 +Here it says that rather than examine the records + +297 +00:25:51,140 --> 00:25:56,220 +of all students, the dean took a random sample of + +298 +00:25:56,220 --> 00:26:01,340 +size 200. So we have large university. Here we + +299 +00:26:01,340 --> 00:26:04,860 +took representative sample of size 200. + +300 +00:26:26,980 --> 00:26:31,900 +How many students would be to be assembled? It + +301 +00:26:31,900 --> 00:26:34,540 +says that if the dean wanted to estimate the + +302 +00:26:34,540 --> 00:26:38,040 +proportion of all students, The saving financial + +303 +00:26:38,040 --> 00:26:46,100 +aid to within 3% with 99% probability. How many + +304 +00:26:46,100 --> 00:26:51,620 +students would need to be sampled? So we have the + +305 +00:26:51,620 --> 00:26:56,920 +formula, if you remember, n equals z y 1 minus y + +306 +00:26:56,920 --> 00:27:00,860 +divided by e. So we have z squared. + +307 +00:27:03,640 --> 00:27:09,200 +Now, y is not given. If Pi is not given, we have + +308 +00:27:09,200 --> 00:27:14,180 +to look at either B or 0.5. Now in this problem, + +309 +00:27:15,000 --> 00:27:18,900 +it says that Dean selected 200 students, and he + +310 +00:27:18,900 --> 00:27:23,800 +finds that out of this number, 118 of them are + +311 +00:27:23,800 --> 00:27:26,480 +receiving financial aid. So based on this + +312 +00:27:26,480 --> 00:27:30,480 +information, we can compute B. So B is x over n. + +313 +00:27:30,700 --> 00:27:34,840 +It's 118 divided by 200. And this one gives? + +314 +00:27:41,090 --> 00:27:46,310 +So in this case, out of 200 students, 118 of them + +315 +00:27:46,310 --> 00:27:49,630 +are receiving financial aid. That means the + +316 +00:27:49,630 --> 00:27:53,730 +proportion, the sample proportion, is 118 divided + +317 +00:27:53,730 --> 00:27:57,690 +by 200, which is 0.59. So we have to use this + +318 +00:27:57,690 --> 00:28:03,830 +information instead of pi. So n equals, + +319 +00:28:08,050 --> 00:28:15,120 +now it's about 99%. 2.85. Exactly, it's one of + +320 +00:28:15,120 --> 00:28:21,380 +these. We have 2.57 and + +321 +00:28:21,380 --> 00:28:30,220 +8. It says 99%. So + +322 +00:28:30,220 --> 00:28:32,720 +here we have 99%. So what's left? + +323 +00:28:42,180 --> 00:28:47,320 +0.5 percent, this area. 0.5 to the right and 0.5 + +324 +00:28:47,320 --> 00:28:52,500 +to the left, so 005. Now if you look at 2.5 under + +325 +00:28:52,500 --> 00:28:57,280 +7, the answer is 0051, the other one 0049. + +326 +00:28:59,840 --> 00:29:04,600 +So either this one or the other value, so 2.57. or + +327 +00:29:04,600 --> 00:29:07,600 +2.58, it's better to take the average of these + +328 +00:29:07,600 --> 00:29:13,320 +two. Because 005 lies exactly between these two + +329 +00:29:13,320 --> 00:29:20,780 +values. So the score in this case, either 2.75 or + +330 +00:29:20,780 --> 00:29:30,880 +2.78, or the average. And the exact one, 2.7, I'm + +331 +00:29:30,880 --> 00:29:34,680 +sorry, 2.576. The exact answer. + +332 +00:29:38,000 --> 00:29:40,700 +It's better to use the average if you don't + +333 +00:29:40,700 --> 00:29:46,100 +remember the exact answer. So it's the exact one. + +334 +00:29:47,480 --> 00:29:53,440 +But 2.575 is okay. Now just use this equation, 2 + +335 +00:29:53,440 --> 00:30:02,020 +.575 times square, times 59. + +336 +00:30:03,900 --> 00:30:09,440 +1 minus 59 divided by the error. It's three + +337 +00:30:09,440 --> 00:30:17,800 +percent. So it's 0.0312 squared. So the answer in + +338 +00:30:17,800 --> 00:30:23,420 +this case is part 2 + +339 +00:30:23,420 --> 00:30:30,300 +.57 times 59 times 41 divided by 03 squared. The + +340 +00:30:30,300 --> 00:30:31,140 +answer is part. + +341 +00:30:41,650 --> 00:30:46,530 +You will get the exact answer if you use 2.576. + +342 +00:30:48,190 --> 00:30:51,230 +You will get the exact answer. But anyway, if you + +343 +00:30:51,230 --> 00:30:53,310 +use one of these, you will get approximate answer + +344 +00:30:53,310 --> 00:30:56,430 +to be 1784. + +345 +00:30:58,590 --> 00:31:04,430 +Any question? So in this case, we used the sample + +346 +00:31:04,430 --> 00:31:11,240 +proportion instead of 0.5, because the dean + +347 +00:31:11,240 --> 00:31:14,120 +selected a random sample of size 200, and he finds + +348 +00:31:14,120 --> 00:31:19,200 +that 118 of them are receiving financial aid. That + +349 +00:31:19,200 --> 00:31:24,980 +means the sample proportion is 118 divided by 200, + +350 +00:31:25,360 --> 00:31:30,420 +which gives 0.59. So we have to use 59% as the + +351 +00:31:30,420 --> 00:31:38,360 +sample proportion. Is it clear? Next, number + +352 +00:31:38,360 --> 00:31:38,760 +three. + +353 +00:31:41,700 --> 00:31:45,860 +An economist is interested in studying the incomes + +354 +00:31:45,860 --> 00:31:51,620 +of consumers in a particular region. The + +355 +00:31:51,620 --> 00:31:56,400 +population standard deviation is known to be 1 + +356 +00:31:56,400 --> 00:32:00,560 +,000. A random sample of 50 individuals resulted + +357 +00:32:00,560 --> 00:32:06,460 +in an average income of $15,000. What is the + +358 +00:32:06,460 --> 00:32:11,520 +weight of the 90% confidence interval? So here in + +359 +00:32:11,520 --> 00:32:16,560 +this example, the population standard deviation + +360 +00:32:16,560 --> 00:32:21,480 +sigma is known. So sigma is $1000. + +361 +00:32:24,600 --> 00:32:32,280 +Random sample of size 50 is selected. This sample + +362 +00:32:32,280 --> 00:32:41,430 +gives an average of $15,000 ask + +363 +00:32:41,430 --> 00:32:48,150 +about what is the width of the 90% confidence + +364 +00:32:48,150 --> 00:32:55,630 +interval. So again, here + +365 +00:32:55,630 --> 00:32:58,710 +we are asking about the width of the confidence + +366 +00:32:58,710 --> 00:33:02,570 +interval. If we have a random sample of size 50, + +367 +00:33:03,320 --> 00:33:07,560 +And that sample gives an average of $15,000. And + +368 +00:33:07,560 --> 00:33:10,940 +we know that the population standard deviation is + +369 +00:33:10,940 --> 00:33:17,580 +1,000. Now, what's the width of the 90% confidence + +370 +00:33:17,580 --> 00:33:21,800 +interval? Any idea of this? + +371 +00:33:33,760 --> 00:33:40,020 +So idea number one is fine. You may calculate the + +372 +00:33:40,020 --> 00:33:43,400 +lower limit and upper limit. And the difference + +373 +00:33:43,400 --> 00:33:46,640 +between these two gives the width. So idea number + +374 +00:33:46,640 --> 00:33:51,360 +one, the width equals the distance between upper + +375 +00:33:51,360 --> 00:33:59,070 +limit our limit minus lower limit. Now this + +376 +00:33:59,070 --> 00:34:03,270 +distance gives a width, that's correct. Let's see. + +377 +00:34:04,710 --> 00:34:07,910 +So in other words, you have to find the confidence + +378 +00:34:07,910 --> 00:34:12,070 +interval by using this equation x bar plus or + +379 +00:34:12,070 --> 00:34:17,070 +minus z sigma over root n, x bar is given. + +380 +00:34:20,190 --> 00:34:28,690 +Now for 90% we know that z equals 1.645 sigma is + +381 +00:34:28,690 --> 00:34:32,670 +1000 divided + +382 +00:34:32,670 --> 00:34:40,850 +by root 50 plus or minus. By calculator, 1000 + +383 +00:34:40,850 --> 00:34:45,010 +times this number divided by root 50 will give + +384 +00:34:45,010 --> 00:34:49,190 +around + +385 +00:34:49,190 --> 00:34:50,730 +232.6. + +386 +00:34:58,290 --> 00:35:05,790 +So the upper limit is this value and lower limit + +387 +00:35:05,790 --> 00:35:09,650 +147671. + +388 +00:35:11,350 --> 00:35:14,250 +So now the upper limit and lower limit are + +389 +00:35:14,250 --> 00:35:18,590 +computed. Now the difference between these two + +390 +00:35:18,590 --> 00:35:24,010 +values will give the weight. If you subtract these + +391 +00:35:24,010 --> 00:35:26,030 +two values, what equals 15,000? + +392 +00:35:30,670 --> 00:35:37,190 +And the answer is 465.13, around. + +393 +00:35:40,050 --> 00:35:45,550 +Maybe I took two minutes to figure the answer, the + +394 +00:35:45,550 --> 00:35:49,350 +right answer. But there is another one, another + +395 +00:35:49,350 --> 00:35:52,790 +idea, maybe shorter. It'll take shorter time. + +396 +00:35:56,890 --> 00:36:00,230 +It's correct, but straightforward. Just compute + +397 +00:36:00,230 --> 00:36:05,790 +the lower and upper limits. And the width is the + +398 +00:36:05,790 --> 00:36:07,190 +difference between these two values. + +399 +00:36:11,370 --> 00:36:16,050 +If you look carefully at this equation, difference + +400 +00:36:16,050 --> 00:36:21,560 +between these two values gives the width. Now + +401 +00:36:21,560 --> 00:36:25,880 +let's imagine that the lower limit equals x bar + +402 +00:36:25,880 --> 00:36:28,920 +minus + +403 +00:36:28,920 --> 00:36:36,340 +the error term. And upper limit is also x bar plus + +404 +00:36:36,340 --> 00:36:37,960 +the error term. + +405 +00:36:41,460 --> 00:36:46,580 +Now if we add this, or if we subtract 2 from 1, + +406 +00:36:47,900 --> 00:36:52,560 +you will get upper limit minus lower limit equals + +407 +00:36:52,560 --> 00:36:55,740 +x + +408 +00:36:55,740 --> 00:37:07,280 +bar cancels with 2x bar. If you subtract, w minus + +409 +00:37:07,280 --> 00:37:10,960 +equals 2e. + +410 +00:37:12,520 --> 00:37:18,060 +Upper limit minus lower limit is the width. So w, + +411 +00:37:18,760 --> 00:37:24,800 +the width is twice the sampling error. So we have + +412 +00:37:24,800 --> 00:37:29,980 +this new information, W equals twice of the margin + +413 +00:37:29,980 --> 00:37:36,400 +of error. If we add 1 and 2, that will give lower + +414 +00:37:36,400 --> 00:37:41,120 +limit plus upper limit equals to x bar. That means + +415 +00:37:41,120 --> 00:37:45,800 +x bar equals lower limit plus upper limit divided + +416 +00:37:45,800 --> 00:37:46,220 +by 2. + +417 +00:37:53,970 --> 00:37:59,790 +the error, and X bar is the average of lower and + +418 +00:37:59,790 --> 00:38:05,310 +upper limits. So by using this idea now, if we + +419 +00:38:05,310 --> 00:38:12,490 +compute the error term, E equals Z sigma over root + +420 +00:38:12,490 --> 00:38:13,630 +N, this quantity. + +421 +00:38:17,350 --> 00:38:25,260 +And again, Z is 1645. Sigma is 1000. Divide by + +422 +00:38:25,260 --> 00:38:34,960 +root 50. This gives 232.6. This is the error tier, + +423 +00:38:36,300 --> 00:38:40,040 +or the margin of error. As we know, that's called + +424 +00:38:40,040 --> 00:38:46,400 +margin of error or sampling error. + +425 +00:38:50,580 --> 00:38:56,190 +So the error is this amount. The width is twice + +426 +00:38:56,190 --> 00:39:04,490 +this value. So W equals 2 times the error. And the + +427 +00:39:04,490 --> 00:39:10,830 +answer should be the same as the one we just... So + +428 +00:39:10,830 --> 00:39:13,450 +we end with the same result. Now which one is + +429 +00:39:13,450 --> 00:39:17,370 +shorter? Forget about my explanation up to this + +430 +00:39:17,370 --> 00:39:22,570 +point. We started from this one. We just computed + +431 +00:39:22,570 --> 00:39:27,390 +the error tier. I mean this amount. Then we found + +432 +00:39:27,390 --> 00:39:32,950 +the error to be this 232 multiply this by 2 will + +433 +00:39:32,950 --> 00:39:35,550 +give the sampling error or the, I'm sorry, will + +434 +00:39:35,550 --> 00:39:39,630 +give the weight of the interval. Now imagine for + +435 +00:39:39,630 --> 00:39:43,370 +this problem, the income, the average income is + +436 +00:39:43,370 --> 00:39:43,830 +not given. + +437 +00:39:47,590 --> 00:39:55,450 +Suppose x bar is not given. Now the question is, + +438 +00:39:55,550 --> 00:40:00,030 +can you find the answer by using this idea? But + +439 +00:40:00,030 --> 00:40:04,390 +here, without using x bar, we computed the + +440 +00:40:04,390 --> 00:40:07,810 +sampling error to multiply this value by 2 and get + +441 +00:40:07,810 --> 00:40:08,130 +the answer. + +442 +00:40:11,810 --> 00:40:13,810 +So that's for number 12. + +443 +00:40:16,830 --> 00:40:20,550 +Again, for this particular example, there are two + +444 +00:40:20,550 --> 00:40:25,610 +ways to solve this problem. The first one, you + +445 +00:40:25,610 --> 00:40:28,390 +have to construct the confidence interval, then + +446 +00:40:28,390 --> 00:40:32,910 +subtract upper limit from the lower limit, you + +447 +00:40:32,910 --> 00:40:38,030 +will get the width of the interval. The other way, + +448 +00:40:38,610 --> 00:40:42,150 +just compute the error and multiply the answer by + +449 +00:40:42,150 --> 00:40:48,210 +2, you will get the same result. Number 13. + +450 +00:40:56,020 --> 00:41:00,980 +13th says that the head librarian at the Library + +451 +00:41:00,980 --> 00:41:04,780 +of Congress has asked her assistant for an + +452 +00:41:04,780 --> 00:41:07,980 +interval estimate of a mean number of books + +453 +00:41:07,980 --> 00:41:12,720 +checked out each day. The assistant provides the + +454 +00:41:12,720 --> 00:41:23,000 +following interval estimate. From 740 to 920 books + +455 +00:41:23,000 --> 00:41:28,360 +per day. If the head librarian knows that the + +456 +00:41:28,360 --> 00:41:33,880 +population standard deviation is 150 books shipped + +457 +00:41:33,880 --> 00:41:37,420 +outwardly, approximately how large a sample did + +458 +00:41:37,420 --> 00:41:40,200 +her assistant use to determine the interval + +459 +00:41:40,200 --> 00:41:46,540 +estimate? So the information we have is the + +460 +00:41:46,540 --> 00:41:50,860 +following. We have information about the + +461 +00:41:50,860 --> 00:41:51,700 +confidence interval. + +462 +00:42:01,440 --> 00:42:02,800 +920 books. + +463 +00:42:05,940 --> 00:42:08,700 +And sigma is known to be 150. + +464 +00:42:12,980 --> 00:42:17,980 +That's all we have. He asked about how large a + +465 +00:42:17,980 --> 00:42:20,880 +sample did Herelsen's conclusion determine the + +466 +00:42:20,880 --> 00:42:21,820 +interval estimate. + +467 +00:42:26,740 --> 00:42:31,850 +Look at the answers. A is 2. B is 3, C is 12, it + +468 +00:42:31,850 --> 00:42:33,950 +cannot be determined from the information given. + +469 +00:42:37,190 --> 00:42:43,190 +Now, in order to find the number, the sample, we + +470 +00:42:43,190 --> 00:42:48,350 +need Sigma or E squared. Confidence is not given. + +471 +00:42:50,550 --> 00:43:00,140 +Sigma is okay. We can find the error. The error is + +472 +00:43:00,140 --> 00:43:07,940 +just W divided by 2. So the error is fine. I mean, + +473 +00:43:08,100 --> 00:43:12,200 +E is fine. E is B minus A or upper limit minus + +474 +00:43:12,200 --> 00:43:17,100 +lower limit divided by 2. So width divided by 2. + +475 +00:43:17,240 --> 00:43:20,740 +So this is fine. But you don't have information + +476 +00:43:20,740 --> 00:43:21,780 +about Z. + +477 +00:43:25,020 --> 00:43:29,550 +We are looking for N. So Z is not I mean, cannot + +478 +00:43:29,550 --> 00:43:32,810 +be computed because the confidence level is not + +479 +00:43:32,810 --> 00:43:39,830 +given. So the information is determined + +480 +00:43:39,830 --> 00:43:46,170 +from the information given. Make sense? So we + +481 +00:43:46,170 --> 00:43:50,790 +cannot compute this score. Z is fine. Z is 150. + +482 +00:43:51,330 --> 00:43:54,310 +The margin of error, we can compute the margin by + +483 +00:43:54,310 --> 00:43:59,090 +using this interval, the width. Divide by two + +484 +00:43:59,090 --> 00:44:05,790 +gives the same result. Now for number 14, we have + +485 +00:44:05,790 --> 00:44:11,330 +the same information. But here, + +486 +00:44:14,450 --> 00:44:22,030 +she asked her assistant to use 25 days. So now, n + +487 +00:44:22,030 --> 00:44:24,990 +is 25. We have the same information about the + +488 +00:44:24,990 --> 00:44:25,310 +interval. + +489 +00:44:32,020 --> 00:44:33,300 +And sigma is 150. + +490 +00:44:36,300 --> 00:44:40,800 +So she asked her assistant to use 25 days of data + +491 +00:44:40,800 --> 00:44:43,860 +to construct the interval estimate. So n is 25. + +492 +00:44:44,980 --> 00:44:48,300 +What confidence level can she attach to the + +493 +00:44:48,300 --> 00:44:53,500 +interval estimate? Now in this case, we are asking + +494 +00:44:53,500 --> 00:44:56,240 +about confidence, not z. + +495 +00:45:00,930 --> 00:45:03,530 +You have to distinguish between confidence level + +496 +00:45:03,530 --> 00:45:08,130 +and z. We use z, I'm sorry, we use z level to + +497 +00:45:08,130 --> 00:45:13,350 +compute the z score. Now, which one is correct? 99 + +498 +00:45:13,350 --> 00:45:21,670 +.7, 99, 98, 95.4. Let's see. Now, what's the + +499 +00:45:21,670 --> 00:45:25,070 +average? I'm sorry, the formula is x bar plus or + +500 +00:45:25,070 --> 00:45:29,270 +minus z sigma over root n. What's the average? In + +501 +00:45:29,270 --> 00:45:34,710 +this case, this is the formula we have. We are + +502 +00:45:34,710 --> 00:45:38,770 +looking about this one. Now, also there are two + +503 +00:45:38,770 --> 00:45:43,250 +ways to solve this problem. Either focus on the + +504 +00:45:43,250 --> 00:45:47,950 +aortia, or just find a continuous interval by + +505 +00:45:47,950 --> 00:45:55,830 +itself. So let's focus on this one. Z sigma over + +506 +00:45:55,830 --> 00:45:56,230 +root of. + +507 +00:45:59,620 --> 00:46:05,380 +And we have x bar. What's the value of x bar? x + +508 +00:46:05,380 --> 00:46:15,240 +bar 740 plus x + +509 +00:46:15,240 --> 00:46:16,400 +bar 830. + +510 +00:46:25,380 --> 00:46:31,740 +1660 divided by 2, 830. Now, z equals, I don't + +511 +00:46:31,740 --> 00:46:40,660 +know, sigma, sigma is 150, n is 5. So here we have + +512 +00:46:40,660 --> 00:46:41,600 +30 sigma. + +513 +00:46:44,980 --> 00:46:51,560 +Now, what's the value of sigma? 36, so we have x + +514 +00:46:51,560 --> 00:46:54,900 +bar, now the value of x bar. + +515 +00:47:02,330 --> 00:47:10,530 +So we have x bar 830 plus or minus 30 there. + +516 +00:47:15,290 --> 00:47:18,030 +Now, if you look carefully at this equation, + +517 +00:47:19,550 --> 00:47:24,570 +what's the value of z in order to have this + +518 +00:47:24,570 --> 00:47:29,630 +confidence interval, which is 740 and 920? + +519 +00:47:36,170 --> 00:47:40,730 +So, Z should be... + +520 +00:47:40,730 --> 00:47:46,290 +What's the value of Z? Now, 830 minus 3Z equals + +521 +00:47:46,290 --> 00:47:46,870 +this value. + +522 +00:47:49,830 --> 00:47:53,390 +So, Z equals... + +523 +00:47:53,390 --> 00:47:56,450 +3. + +524 +00:47:56,830 --> 00:48:03,540 +So, Z is 3. That's why. Now, Z is 3. What do you + +525 +00:48:03,540 --> 00:48:05,180 +think the corresponding C level? + +526 +00:48:11,460 --> 00:48:16,560 +99.7% If + +527 +00:48:16,560 --> 00:48:27,080 +you remember for the 68 empirical rule 68, 95, 99 + +528 +00:48:27,080 --> 00:48:33,760 +.7% In chapter 6 we said that 99.7% of the data + +529 +00:48:33,760 --> 00:48:37,220 +falls within three standard deviations of the + +530 +00:48:37,220 --> 00:48:41,980 +mean. So if these three, I am sure that we are + +531 +00:48:41,980 --> 00:48:50,340 +using 99.7% for the confidence level. So for this + +532 +00:48:50,340 --> 00:48:53,280 +particular example here, we have new information + +533 +00:48:53,280 --> 00:48:57,280 +about the sample size. So N is 25. + +534 +00:49:01,630 --> 00:49:06,190 +So just simple calculation x bar as I mentioned + +535 +00:49:06,190 --> 00:49:11,510 +here. The average is lower limit plus upper limit + +536 +00:49:11,510 --> 00:49:18,270 +divided by 2. So x bar equals 830. So now your + +537 +00:49:18,270 --> 00:49:25,130 +confidence interval is x bar plus or minus z sigma + +538 +00:49:25,130 --> 00:49:31,070 +over root n. z sigma over root n, z is unknown, + +539 +00:49:32,190 --> 00:49:37,030 +sigma is 150, n is 25, which is 5, square root of + +540 +00:49:37,030 --> 00:49:48,390 +it, so we'll have 3z. So now x bar 830 minus 3z, + +541 +00:49:49,610 --> 00:49:53,870 +this is the lower limit, upper limit 830 plus 3z. + +542 +00:49:55,480 --> 00:49:59,000 +Now, the confidence interval is given by 740 and + +543 +00:49:59,000 --> 00:50:09,020 +920. Just use the lower limit. 830 minus 3z equals + +544 +00:50:09,020 --> 00:50:10,820 +740. + +545 +00:50:12,300 --> 00:50:18,280 +Simple calculation here. 830 minus 740 is 90, + +546 +00:50:18,660 --> 00:50:22,340 +equals 3z. That means z equals 3. + +547 +00:50:26,070 --> 00:50:29,750 +Now the z value is 3, it means the confidence is + +548 +00:50:29,750 --> 00:50:33,530 +9917, so the correct answer is A. + +549 +00:50:44,690 --> 00:50:49,390 +The other way, you can use that one, by using the + +550 +00:50:53,010 --> 00:50:55,830 +Margin of error, which is the difference between + +551 +00:50:55,830 --> 00:50:58,270 +these two divided by two, you will get the same + +552 +00:50:58,270 --> 00:51:02,630 +result. So there are two methods, one of these + +553 +00:51:02,630 --> 00:51:05,830 +straightforward one. The other one, as you + +554 +00:51:05,830 --> 00:51:13,550 +mentioned, is the error term. It's B minus upper + +555 +00:51:13,550 --> 00:51:19,550 +limit minus lower limit divided by two. Upper + +556 +00:51:19,550 --> 00:51:27,450 +limit is 920. Minus 74 divided by 2. What's the + +557 +00:51:27,450 --> 00:51:28,370 +value for this one? + +558 +00:51:34,570 --> 00:51:40,610 +90. So the margin of error is 90. So E equals E. + +559 +00:51:41,070 --> 00:51:43,790 +Sigma or N equals? + +560 +00:51:47,110 --> 00:51:50,810 +All squared. So by using this equation you can get + +561 +00:51:50,810 --> 00:51:56,860 +your result. So, N is 25, Z is unknown, Sigma is + +562 +00:51:56,860 --> 00:52:05,520 +150, R is 90. This one squared. You will get the + +563 +00:52:05,520 --> 00:52:10,020 +same Z-score. Make sense? + +564 +00:52:17,770 --> 00:52:21,810 +Because if you take z to be three times one-fifth + +565 +00:52:21,810 --> 00:52:25,150 +divided by nine squared, you will get the same + +566 +00:52:25,150 --> 00:52:30,790 +result for z value. So both will give the same + +567 +00:52:30,790 --> 00:52:35,790 +result. So that's for the multiple choice + +568 +00:52:35,790 --> 00:52:42,430 +problems. Any question? Let's move to the section + +569 +00:52:42,430 --> 00:52:46,370 +number two, true or false problems. + +570 +00:52:47,810 --> 00:52:48,790 +Number one, + +571 +00:52:51,850 --> 00:52:57,950 +a race car driver + +572 +00:52:57,950 --> 00:53:03,670 +tested his car for time from 0 to 60 mileage per + +573 +00:53:03,670 --> 00:53:09,390 +hour. And in 20 tests, obtained an average of 4.85 + +574 +00:53:09,390 --> 00:53:16,660 +seconds, with some deviation of 1.47 seconds. 95 + +575 +00:53:16,660 --> 00:53:23,440 +confidence interval for the 0 to 60 time is 4.62 + +576 +00:53:23,440 --> 00:53:29,540 +seconds up to 5.18. I think straightforward. Just + +577 +00:53:29,540 --> 00:53:33,440 +simple calculation, it will give the right answer. + +578 +00:53:36,660 --> 00:53:40,640 +x bar n, + +579 +00:53:41,360 --> 00:53:43,620 +so we have to use this equation. + +580 +00:53:48,220 --> 00:53:54,020 +You can do it. So it says the answer is false. You + +581 +00:53:54,020 --> 00:53:58,340 +have to check this result. So it's 4.5 plus or + +582 +00:53:58,340 --> 00:54:03,460 +minus T. We have to find T. S is given to be 147 + +583 +00:54:03,460 --> 00:54:10,120 +divided by root 20. Now, to find T, we have to use + +584 +00:54:10,120 --> 00:54:18,480 +0 to 5 and 19. By this value here, you'll get the + +585 +00:54:18,480 --> 00:54:22,160 +exact answer. Part number two. + +586 +00:54:24,980 --> 00:54:32,380 +Given a sample mean of 2.1. So x bar is 2.1. + +587 +00:54:33,680 --> 00:54:34,680 +Excuse me? + +588 +00:54:38,500 --> 00:54:45,920 +Because n is small. Now, this sample, This sample + +589 +00:54:45,920 --> 00:54:52,220 +gives an average of 4.85, and standard deviation + +590 +00:54:52,220 --> 00:55:02,420 +based on this sample. So S, so X bar, 4.85, and S + +591 +00:55:02,420 --> 00:55:09,640 +is equal to 1.47. So this is not sigma, because it + +592 +00:55:09,640 --> 00:55:15,210 +says that 20 tests, so random sample is 20. This + +593 +00:55:15,210 --> 00:55:19,390 +sample gives an average of this amount and + +594 +00:55:19,390 --> 00:55:21,350 +standard deviation of this amount. + +595 +00:55:29,710 --> 00:55:34,610 +We are looking for the + +596 +00:55:34,610 --> 00:55:40,470 +continence interval, and we have two cases. First + +597 +00:55:40,470 --> 00:55:43,630 +case, if sigma is known, + +598 +00:55:47,220 --> 00:55:48,240 +Sigma is unknown. + +599 +00:55:51,520 --> 00:55:58,440 +Now for this example, sigma is unknown. So since + +600 +00:55:58,440 --> 00:56:05,740 +sigma is unknown, we have to use T distribution if + +601 +00:56:05,740 --> 00:56:09,940 +the distribution is normal or if N is large + +602 +00:56:09,940 --> 00:56:14,380 +enough. Now for this example, N is 20. So we have + +603 +00:56:14,380 --> 00:56:17,860 +to assume that the population is approximately + +604 +00:56:17,860 --> 00:56:23,660 +normal. So we have to use t. So my confidence + +605 +00:56:23,660 --> 00:56:26,100 +interval should be x bar plus or minus 3s over + +606 +00:56:26,100 --> 00:56:32,560 +root n. Now, number two. Given a sample mean of 2 + +607 +00:56:32,560 --> 00:56:36,180 +.1 and a population standard deviation. I + +608 +00:56:36,180 --> 00:56:38,720 +mentioned that population standard deviation is + +609 +00:56:38,720 --> 00:56:43,900 +given. So sigma is 0.7. So sigma is known in this + +610 +00:56:43,900 --> 00:56:49,170 +example. So in part two, sigma is given. Now, from + +611 +00:56:49,170 --> 00:56:50,890 +a sample of 10 data points, + +612 +00:56:53,730 --> 00:56:56,190 +we are looking for 90% confidence interval. + +613 +00:56:58,790 --> 00:57:07,230 +90% confidence interval will have a width of 2.36. + +614 +00:57:16,460 --> 00:57:19,180 +What is two times the assembling error? + +615 +00:57:22,500 --> 00:57:28,040 +So the answer is given. So the error here, error A + +616 +00:57:28,040 --> 00:57:30,160 +equals W. + +617 +00:57:32,060 --> 00:57:34,120 +So the answer is 1.16. + +618 +00:57:40,520 --> 00:57:45,220 +So he asked about given a sample, 90% confidence + +619 +00:57:45,220 --> 00:57:50,540 +interval will have a width of 2.36. Let's see if + +620 +00:57:50,540 --> 00:57:54,780 +the exact width is 2.36 or not. Now we have x bar + +621 +00:57:54,780 --> 00:58:03,240 +plus or minus z, sigma 1.8. x bar is 2.1, plus or + +622 +00:58:03,240 --> 00:58:08,660 +minus. Now what's the error? 1.18. + +623 +00:58:11,230 --> 00:58:16,370 +this amount without calculation or you just use + +624 +00:58:16,370 --> 00:58:19,590 +this straightforward calculation here we are + +625 +00:58:19,590 --> 00:58:23,530 +talking about z about 90 percent so this amount 1 + +626 +00:58:23,530 --> 00:58:30,330 +.645 times sigma divided by root n for sure this + +627 +00:58:30,330 --> 00:58:35,430 +quantity equals 1.18 But you don't need to do that + +628 +00:58:35,430 --> 00:58:40,570 +because the width is given to be 2.36. So E is 1 + +629 +00:58:40,570 --> 00:58:46,430 +.18. So that saves time in order to compute the + +630 +00:58:46,430 --> 00:58:55,190 +error term. So now 2.1 minus 1.8. 2.1 plus 1.8. + +631 +00:58:56,350 --> 00:58:59,070 +That F, the width, is 2.36. + +632 +00:59:02,010 --> 00:59:04,170 +that if the width equals this value. + +633 +00:59:10,410 --> 00:59:15,270 +2.36. So I solved the problem if the width. But he + +634 +00:59:15,270 --> 00:59:18,430 +asked about, do you know this value? I don't know + +635 +00:59:18,430 --> 00:59:21,230 +that one, so we have to compute the exact answer + +636 +00:59:21,230 --> 00:59:28,230 +now. So x bar 2.1 plus 1645 sigma + +637 +00:59:34,480 --> 00:59:38,600 +My calculator can find the error now. What's the + +638 +00:59:38,600 --> 00:59:41,260 +value for this amount? My calculator. + +639 +00:59:50,700 --> 00:59:56,440 +It's 5.75. 5.75. + +640 +00:59:57,640 --> 01:00:01,830 +So this is your error. So E equals this amount. So + +641 +01:00:01,830 --> 01:00:05,370 +W equals 2 plus 4. + +642 +01:00:08,350 --> 01:00:17,050 +So the error is 5.74. So what's the width? The + +643 +01:00:17,050 --> 01:00:20,210 +width equals 2 times E. + +644 +01:00:25,590 --> 01:00:30,340 +Again. This value, 1.645 times 1.7 divided by root + +645 +01:00:30,340 --> 01:00:30,580 +10. + +646 +01:00:37,280 --> 01:00:38,280 +Three point. + +647 +01:00:44,020 --> 01:00:49,700 +So again, arrow is 3.64. So what's the width? + +648 +01:00:51,160 --> 01:00:53,540 +Twice this value, so two times this one. + +649 +01:00:57,880 --> 01:00:59,560 +7.28. + +650 +01:01:02,120 --> 01:01:07,180 +Now it says in the problem here we have width of 2 + +651 +01:01:07,180 --> 01:01:09,200 +.36. So it's incorrect. + +652 +01:01:11,840 --> 01:01:16,320 +So just simple calculation gives width of 7.28, + +653 +01:01:17,180 --> 01:01:23,900 +not 2.36. Number three. + +654 +01:01:27,950 --> 01:01:32,850 +Look at number four. Other things be equal. As the + +655 +01:01:32,850 --> 01:01:35,550 +confidence level for a confidence interval + +656 +01:01:35,550 --> 01:01:41,250 +increases, the width of the interval increases. As + +657 +01:01:41,250 --> 01:01:45,310 +the confidence level increases, confidence + +658 +01:01:45,310 --> 01:01:47,650 +interval increases, the width of the interval + +659 +01:01:47,650 --> 01:01:52,750 +increases. Correct. So that's true. Let's do + +660 +01:01:52,750 --> 01:01:53,790 +number seven. + +661 +01:01:56,840 --> 01:02:02,300 +A point estimate consists + +662 +01:02:02,300 --> 01:02:06,700 +of a single sample statistic that is used to + +663 +01:02:06,700 --> 01:02:11,000 +estimate the true population parameter. That's + +664 +01:02:11,000 --> 01:02:15,940 +correct because any point estimate, for example x + +665 +01:02:15,940 --> 01:02:21,360 +bar, is used to determine the confidence interval + +666 +01:02:21,360 --> 01:02:25,600 +for the unknown parameter mu. So a single + +667 +01:02:25,600 --> 01:02:30,580 +statistic can be used to estimate the true + +668 +01:02:30,580 --> 01:02:33,400 +population parameter, either X bar as a point + +669 +01:02:33,400 --> 01:02:34,900 +estimate or P. + +670 +01:02:41,380 --> 01:02:48,000 +So that's true. Number eight. The T distribution + +671 +01:02:48,000 --> 01:02:51,100 +is used to develop a confidence interval estimate + +672 +01:02:51,100 --> 01:02:54,240 +of the population mean when the population + +673 +01:02:54,240 --> 01:02:57,200 +standard deviation is unknown. That's correct + +674 +01:02:57,200 --> 01:03:01,240 +because we are using T distribution if sigma is + +675 +01:03:01,240 --> 01:03:03,740 +not given and here we have to assume the + +676 +01:03:03,740 --> 01:03:07,960 +population is normal. 9. + +677 +01:03:11,540 --> 01:03:15,180 +The standardized normal distribution is used to + +678 +01:03:15,180 --> 01:03:17,340 +develop a confidence interval estimate of the + +679 +01:03:17,340 --> 01:03:20,700 +population proportion when the sample size is + +680 +01:03:20,700 --> 01:03:22,820 +large enough or sufficiently large. + +681 +01:03:28,640 --> 01:03:32,640 +The width + +682 +01:03:32,640 --> 01:03:37,720 +of a confidence interval equals twice the sampling + +683 +01:03:37,720 --> 01:03:42,570 +error. The weight equals twice the sample, so + +684 +01:03:42,570 --> 01:03:46,370 +that's true. A population parameter is used to + +685 +01:03:46,370 --> 01:03:50,650 +estimate a confidence interval? No way. Because we + +686 +01:03:50,650 --> 01:03:53,570 +use statistics to estimate the confidence + +687 +01:03:53,570 --> 01:03:58,130 +interval. These are statistics. So we are using + +688 +01:03:58,130 --> 01:04:02,390 +statistics to construct the confidence interval. + +689 +01:04:04,190 --> 01:04:10,080 +Number 12. Holding the sample size fixed. In + +690 +01:04:10,080 --> 01:04:14,560 +increasing level, the level of confidence in a + +691 +01:04:14,560 --> 01:04:17,520 +confidence interval will necessarily lead to wider + +692 +01:04:17,520 --> 01:04:20,500 +confidence interval. That's true. Because as C + +693 +01:04:20,500 --> 01:04:24,840 +level increases, Z becomes large, so we have large + +694 +01:04:24,840 --> 01:04:29,670 +width, so the confidence becomes wider. Last one, + +695 +01:04:30,550 --> 01:04:33,150 +holding the weight of a confidence interval fixed + +696 +01:04:33,150 --> 01:04:36,190 +and increasing the level of confidence can be + +697 +01:04:36,190 --> 01:04:40,090 +achieved with lower sample size with large sample + +698 +01:04:40,090 --> 01:04:44,830 +size. So it's false. So that's for section two. + +699 +01:04:46,230 --> 01:04:49,970 +One section is left, free response problems or + +700 +01:04:49,970 --> 01:04:52,990 +questions, you can do it at home. So next time, + +701 +01:04:53,070 --> 01:04:57,530 +inshallah, we'll start chapter nine. That's all. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/JEIWb3FC-Sk.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/JEIWb3FC-Sk.srt new file mode 100644 index 0000000000000000000000000000000000000000..d408a6fdd4e87849d5939349c89134ec3724229d --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/JEIWb3FC-Sk.srt @@ -0,0 +1,2287 @@ + +1 +00:00:04,670 --> 00:00:08,210 +Today, Inshallah, we are going to start Chapter 7. + +2 +00:00:09,830 --> 00:00:14,910 +Chapter 7 talks about sampling and sampling + +3 +00:00:14,910 --> 00:00:22,690 +distributions. The objectives for this chapter are + +4 +00:00:22,690 --> 00:00:27,610 +number one, we have different methods, actually we + +5 +00:00:27,610 --> 00:00:31,330 +have two methods: probability and non-probability + +6 +00:00:31,330 --> 00:00:34,750 +samples, and we are going to distinguish between + +7 +00:00:35,420 --> 00:00:40,700 +these two sampling methods. So again, in this + +8 +00:00:40,700 --> 00:00:44,980 +chapter, we will talk about two different sampling + +9 +00:00:44,980 --> 00:00:49,480 +methods. One is called probability sampling and + +10 +00:00:49,480 --> 00:00:52,940 +the other is non-probability sampling. Our goal is + +11 +00:00:52,940 --> 00:00:56,520 +to distinguish between these two different + +12 +00:00:56,520 --> 00:00:59,280 +sampling methods. The other learning objective + +13 +00:00:59,280 --> 00:01:04,400 +will be, We'll talk about the concept of the + +14 +00:01:04,400 --> 00:01:06,700 +sampling distribution. That will be next time, + +15 +00:01:06,800 --> 00:01:09,960 +inshallah. The third objective is compute + +16 +00:01:09,960 --> 00:01:15,480 +probabilities related to sample mean. In addition + +17 +00:01:15,480 --> 00:01:18,160 +to that, we'll talk about how can we compute + +18 +00:01:18,160 --> 00:01:22,920 +probabilities regarding the sample proportion. And + +19 +00:01:22,920 --> 00:01:27,130 +as I mentioned last time, There are two types of + +20 +00:01:27,130 --> 00:01:30,270 +data. One is called the numerical data. In this + +21 +00:01:30,270 --> 00:01:33,470 +case, we can use the sample mean. The other type + +22 +00:01:33,470 --> 00:01:36,630 +is called qualitative data. And in this case, we + +23 +00:01:36,630 --> 00:01:39,330 +have to use the sample proportion. So for this + +24 +00:01:39,330 --> 00:01:41,690 +chapter, we are going to discuss how can we + +25 +00:01:41,690 --> 00:01:46,370 +compute the probabilities for each one, either the + +26 +00:01:46,370 --> 00:01:50,090 +sample mean or the sample proportion. The last + +27 +00:01:50,090 --> 00:01:55,770 +objective of this chapter is to use the central + +28 +00:01:55,770 --> 00:01:58,190 +limit theorem which is the famous one of the most + +29 +00:01:58,190 --> 00:02:02,130 +famous theorem in this book which is called again + +30 +00:02:02,130 --> 00:02:05,690 +CLT, central limit theorem, and we are going to show + +31 +00:02:05,690 --> 00:02:09,310 +what are the, what is the importance of this + +32 +00:02:09,310 --> 00:02:11,930 +theorem, so these are the mainly the four + +33 +00:02:11,930 --> 00:02:16,610 +objectives for this chapter. Now let's see why we + +34 +00:02:16,610 --> 00:02:20,270 +are talking about sampling. In other words, most + +35 +00:02:20,270 --> 00:02:23,850 +of the time when we are doing study, we are using + +36 +00:02:23,850 --> 00:02:27,700 +a sample instead of using the entire population. + +37 +00:02:28,640 --> 00:02:32,080 +Now there are many reasons behind that. One of + +38 +00:02:32,080 --> 00:02:37,840 +these reasons is selecting a sample is less time + +39 +00:02:37,840 --> 00:02:40,940 +consuming than selecting every item in the + +40 +00:02:40,940 --> 00:02:44,060 +population. I think it makes sense that suppose we + +41 +00:02:44,060 --> 00:02:46,560 +have a huge population, that population consists + +42 +00:02:46,560 --> 00:02:53,140 +of thousands of items. So that will take more time + +43 +00:02:54,440 --> 00:03:00,220 +If you select 100 of their population. So time + +44 +00:03:00,220 --> 00:03:02,140 +consuming is very important. So number one, + +45 +00:03:03,000 --> 00:03:05,780 +selecting sample is less time consuming than using + +46 +00:03:05,780 --> 00:03:10,280 +all the entire population. The second reason, + +47 +00:03:10,880 --> 00:03:14,640 +selecting samples is less costly than selecting a + +48 +00:03:14,640 --> 00:03:17,280 +variety of population. Because if we have large + +49 +00:03:17,280 --> 00:03:19,560 +population, in this case you have to spend more + +50 +00:03:19,560 --> 00:03:23,540 +money in order to get the data or the information + +51 +00:03:23,540 --> 00:03:27,940 +from that population. So it's better to use these + +52 +00:03:27,940 --> 00:03:33,300 +samples. The other reason is the analysis. Our + +53 +00:03:33,300 --> 00:03:37,260 +sample is less cumbersome and more practical than + +54 +00:03:37,260 --> 00:03:40,880 +analysis of all items in the population. For these + +55 +00:03:40,880 --> 00:03:45,820 +reasons, we have to use a sample. For this reason, + +56 +00:03:45,880 --> 00:03:53,080 +we have to talk about sampling methods. Let's + +57 +00:03:53,080 --> 00:03:58,540 +start with sampling process. That begins with a + +58 +00:03:58,540 --> 00:04:05,320 +sampling frame. Now suppose my goal is to know the + +59 +00:04:05,320 --> 00:04:13,960 +opinion of IUG students about a certain subject. + +60 +00:04:16,260 --> 00:04:24,120 +So my population consists of all IUG students. So + +61 +00:04:24,120 --> 00:04:27,370 +that's the entire population. And you know that, + +62 +00:04:27,590 --> 00:04:31,750 +for example, suppose our usual students is around, + +63 +00:04:32,430 --> 00:04:39,890 +for example, 20,000 students. 20,000 students is a + +64 +00:04:39,890 --> 00:04:45,490 +big number. So it's better to select a sample from + +65 +00:04:45,490 --> 00:04:49,270 +that population. Now, the first step in this + +66 +00:04:49,270 --> 00:04:55,700 +process, we have to determine the frame of that + +67 +00:04:55,700 --> 00:05:01,320 +population. So my frame consists of all IU + +68 +00:05:01,320 --> 00:05:04,740 +students, which has maybe males and females. So my + +69 +00:05:04,740 --> 00:05:09,560 +frame in this case is all items, I mean all + +70 +00:05:09,560 --> 00:05:15,380 +students at IUG. So that's the frame. So my frame + +71 +00:05:15,380 --> 00:05:18,720 +consists + +72 +00:05:18,720 --> 00:05:22,220 +of all students. + +73 +00:05:27,630 --> 00:05:32,370 +So the definition of + +74 +00:05:32,370 --> 00:05:36,010 +the sampling frame is a listing of items that make + +75 +00:05:36,010 --> 00:05:39,350 +up the population. The items could be individual, + +76 +00:05:40,170 --> 00:05:44,490 +could be students, could be things, animals, and + +77 +00:05:44,490 --> 00:05:49,650 +so on. So frames are data sources such as a + +78 +00:05:49,650 --> 00:05:54,840 +population list. Suppose we have the names of IUDs + +79 +00:05:54,840 --> 00:05:58,840 +humans. So that's my population list. Or + +80 +00:05:58,840 --> 00:06:02,160 +directories, or maps, and so on. So that's the + +81 +00:06:02,160 --> 00:06:05,520 +frame, we have to know about the population we are + +82 +00:06:05,520 --> 00:06:10,900 +interested in. Inaccurate or biased results can + +83 +00:06:10,900 --> 00:06:16,460 +result if frame excludes certain portions of the + +84 +00:06:16,460 --> 00:06:20,620 +population. For example, suppose here, as I + +85 +00:06:20,620 --> 00:06:24,180 +mentioned, we are interested in IUG students, so + +86 +00:06:24,180 --> 00:06:29,280 +my frame and all IU students. And I know there are + +87 +00:06:29,280 --> 00:06:35,900 +students, either males or females. Suppose for + +88 +00:06:35,900 --> 00:06:40,880 +some reasons, we ignore males, and just my sample + +89 +00:06:40,880 --> 00:06:45,080 +focused on females. In this case, females. + +90 +00:06:48,700 --> 00:06:51,900 +don't represent the entire population. For this + +91 +00:06:51,900 --> 00:06:57,720 +reason, you will get inaccurate or biased results + +92 +00:06:57,720 --> 00:07:02,000 +if you ignore a certain portion. Because here + +93 +00:07:02,000 --> 00:07:08,580 +males, for example, maybe consists of 40% of the + +94 +00:07:08,580 --> 00:07:12,960 +IG students. So it makes sense that this number or + +95 +00:07:12,960 --> 00:07:16,980 +this percentage is a big number. So ignoring this + +96 +00:07:16,980 --> 00:07:21,160 +portion, may lead to misleading results or + +97 +00:07:21,160 --> 00:07:26,160 +inaccurate results or biased results. So you have + +98 +00:07:26,160 --> 00:07:29,600 +to keep in mind that you have to choose all the + +99 +00:07:29,600 --> 00:07:33,740 +portions of that frame. So inaccurate or biased + +100 +00:07:33,740 --> 00:07:38,700 +results can result if a frame excludes certain + +101 +00:07:38,700 --> 00:07:43,180 +portions of a population. Another example, suppose + +102 +00:07:43,180 --> 00:07:48,680 +we took males and females. But here for females, + +103 +00:07:49,240 --> 00:07:56,020 +females have, for example, four levels: Level one, + +104 +00:07:56,400 --> 00:07:59,980 +level two, level three, and level four. And we + +105 +00:07:59,980 --> 00:08:05,560 +ignored, for example, level one. I mean, the new + +106 +00:08:05,560 --> 00:08:09,520 +students. We ignored this portion. Maybe this + +107 +00:08:09,520 --> 00:08:12,860 +portion is very important one, but by mistake we + +108 +00:08:12,860 --> 00:08:18,690 +ignored this one. The remaining three levels will + +109 +00:08:18,690 --> 00:08:22,430 +not represent the entire female population. For + +110 +00:08:22,430 --> 00:08:25,330 +this reason, you will get inaccurate or biased + +111 +00:08:25,330 --> 00:08:31,290 +results. So you have to select all the portions of + +112 +00:08:31,290 --> 00:08:36,610 +the frames. Using different frames to generate + +113 +00:08:36,610 --> 00:08:40,110 +data can lead to dissimilar conclusions. For + +114 +00:08:40,110 --> 00:08:46,020 +example, Suppose again I am interested in IEG + +115 +00:08:46,020 --> 00:08:46,720 +students. + +116 +00:08:49,440 --> 00:08:59,460 +And I took the frame that has all students at + +117 +00:08:59,460 --> 00:09:04,060 +the University of Gaza, the Universities of Gaza. + +118 +00:09:09,250 --> 00:09:12,110 +And as we know that Gaza has three universities, + +119 +00:09:12,350 --> 00:09:15,530 +big universities: Islamic University, Lazar + +120 +00:09:15,530 --> 00:09:18,030 +University, and Al-Aqsa University. So we have + +121 +00:09:18,030 --> 00:09:23,310 +three universities. And my frame here, suppose I + +122 +00:09:23,310 --> 00:09:27,410 +took all students at these universities, but my + +123 +00:09:27,410 --> 00:09:32,470 +study focused on IU students. So my frame, the + +124 +00:09:32,470 --> 00:09:38,250 +true one, is all students at IUG. But I taught all + +125 +00:09:38,250 --> 00:09:42,170 +students at universities in Gaza. So now we have + +126 +00:09:42,170 --> 00:09:44,690 +different frames. + +127 +00:09:48,610 --> 00:09:54,590 +And you want to know what are the opinions of the + +128 +00:09:54,590 --> 00:09:59,910 +smokers about smoking. So my population now is + +129 +00:09:59,910 --> 00:10:00,530 +just... + +130 +00:10:14,030 --> 00:10:19,390 +So that's my thing. + +131 +00:10:21,010 --> 00:10:32,410 +I suppose I talk to a field that has one atom. + +132 +00:10:40,780 --> 00:10:46,040 +Oh my goodness. They are very different things. + +133 +00:10:47,700 --> 00:10:53,720 +The first one consists of only smokers. They are + +134 +00:10:53,720 --> 00:10:58,100 +very interested in you. The other one consists + +135 +00:10:58,100 --> 00:11:06,560 +of... anonymous. I thought maybe... smoker or non + +136 +00:11:06,560 --> 00:11:10,460 +-smokers. For this reason, you will get... + +137 +00:11:17,410 --> 00:11:19,350 +Conclusion, different results. + +138 +00:11:22,090 --> 00:11:28,850 +So now, + +139 +00:11:29,190 --> 00:11:33,610 +the sampling frame is a listing of items that make + +140 +00:11:33,610 --> 00:11:39,510 +up the entire population. Let's move to the types + +141 +00:11:39,510 --> 00:11:44,910 +of samples. Mainly there are two types of + +142 +00:11:44,910 --> 00:11:49,070 +sampling: One is called non-probability samples. + +143 +00:11:50,370 --> 00:11:54,650 +The other one is called probability samples. The + +144 +00:11:54,650 --> 00:11:59,790 +non-probability samples can be divided into two + +145 +00:11:59,790 --> 00:12:04,030 +segments: One is called judgment, and the other + +146 +00:12:04,030 --> 00:12:08,710 +convenience. So we have judgment and convenience + +147 +00:12:08,710 --> 00:12:13,140 +non-probability samples. The other type which is + +148 +00:12:13,140 --> 00:12:17,560 +random probability samples, has four segments or + +149 +00:12:17,560 --> 00:12:21,680 +four parts: The first one is called simple random + +150 +00:12:21,680 --> 00:12:25,680 +sample. The other one is systematic. The second + +151 +00:12:25,680 --> 00:12:28,680 +one is systematic random sample. The third one is + +152 +00:12:28,680 --> 00:12:32,940 +stratified. The fourth one, cluster random sample. + +153 +00:12:33,460 --> 00:12:37,770 +So there are two types of sampling: probability + +154 +00:12:37,770 --> 00:12:41,490 +and non-probability. Non-probability has four + +155 +00:12:41,490 --> 00:12:45,350 +methods here: simple random samples, systematic, + +156 +00:12:45,530 --> 00:12:48,530 +stratified, and cluster. And the non-probability + +157 +00:12:48,530 --> 00:12:53,090 +samples has two types: judgment and convenience. + +158 +00:12:53,670 --> 00:12:58,490 +Let's see the definition of each type of samples. + +159 +00:12:59,190 --> 00:13:03,720 +Let's start with non-probability sample. In non + +160 +00:13:03,720 --> 00:13:07,000 +-probability sample, items included or chosen + +161 +00:13:07,000 --> 00:13:10,800 +without regard to their probability of occurrence. + +162 +00:13:11,760 --> 00:13:14,740 +So that's the definition of non-probability. For + +163 +00:13:14,740 --> 00:13:15,100 +example. + +164 +00:13:23,660 --> 00:13:26,480 +So again, non-probability sample, it means you + +165 +00:13:26,480 --> 00:13:29,580 +select items without regard to their probability + +166 +00:13:29,580 --> 00:13:34,030 +of occurrence. For example, suppose females + +167 +00:13:34,030 --> 00:13:42,430 +consist of 70% of IUG students and males, the + +168 +00:13:42,430 --> 00:13:49,930 +remaining percent is 30%. And suppose I decided to + +169 +00:13:49,930 --> 00:13:56,610 +select a sample of 100 or 1000 students from IUG. + +170 +00:13:58,620 --> 00:14:07,980 +Suddenly, I have a sample that has 650 males and + +171 +00:14:07,980 --> 00:14:14,780 +350 females. Now, this sample, which has these + +172 +00:14:14,780 --> 00:14:19,260 +numbers, for sure does not represent the entire + +173 +00:14:19,260 --> 00:14:25,240 +population. Because females has 70%, and I took a + +174 +00:14:25,240 --> 00:14:30,890 +random sample or a sample of size 350. So this + +175 +00:14:30,890 --> 00:14:35,830 +sample is chosen without regard to the probability + +176 +00:14:35,830 --> 00:14:40,370 +here. Because in this case, I should choose males + +177 +00:14:40,370 --> 00:14:44,110 +with respect to their probability, which is 30%. + +178 +00:14:44,110 --> 00:14:49,330 +But in this case, I just choose different + +179 +00:14:49,330 --> 00:14:54,990 +proportions. Another example. Suppose + +180 +00:14:57,260 --> 00:14:59,920 +again, I am talking about smoking. + +181 +00:15:05,080 --> 00:15:10,120 +And I know that some people are smoking and I just + +182 +00:15:10,120 --> 00:15:14,040 +took this sample. So I took this sample based on + +183 +00:15:14,040 --> 00:15:18,600 +my knowledge. So it's without regard to their + +184 +00:15:18,600 --> 00:15:23,340 +probability. Maybe suppose I am talking about + +185 +00:15:23,340 --> 00:15:28,330 +political opinions about something. And I just + +186 +00:15:28,330 --> 00:15:36,330 +took the experts of that subject. So my sample is + +187 +00:15:36,330 --> 00:15:42,070 +not a probability sample. And this one has, as we + +188 +00:15:42,070 --> 00:15:44,230 +mentioned, has two types: One is called + +189 +00:15:44,230 --> 00:15:49,010 +convenience sampling. In this case, items are + +190 +00:15:49,010 --> 00:15:51,710 +selected based only on the fact that they are + +191 +00:15:51,710 --> 00:15:55,590 +easy. So I choose that sample because it's easy. + +192 +00:15:57,090 --> 00:15:57,690 +Inexpensive, + +193 +00:16:02,190 --> 00:16:09,790 +inexpensive, or convenient to sample. If I choose + +194 +00:16:09,790 --> 00:16:13,430 +my sample because it is easy or inexpensive, I + +195 +00:16:13,430 --> 00:16:18,480 +think it doesn't make any sense, because easy is + +196 +00:16:18,480 --> 00:16:23,780 +not a reason to select that sample + +223 +00:18:17,050 --> 00:18:20,970 +segment and so on. But the convenient sample means + +224 +00:18:20,970 --> 00:18:24,690 +that you select a sample maybe that is easy for + +225 +00:18:24,690 --> 00:18:29,430 +you, or less expensive, or that sample is + +226 +00:18:29,430 --> 00:18:32,980 +convenient. For this reason, it's called non + +227 +00:18:32,980 --> 00:18:36,300 +-probability sample because we choose that sample + +228 +00:18:36,300 --> 00:18:39,540 +without regard to their probability of occurrence. + +229 +00:18:41,080 --> 00:18:48,620 +The other type is called probability samples. In + +230 +00:18:48,620 --> 00:18:54,200 +this case, items are chosen on the basis of non + +231 +00:18:54,200 --> 00:18:58,600 +-probabilities. For example, here, if males + +232 +00:19:02,500 --> 00:19:11,060 +has or represent 30%, and females represent 70%, + +233 +00:19:11,060 --> 00:19:14,840 +and the same size has a thousand. So in this case, + +234 +00:19:14,920 --> 00:19:19,340 +you have to choose females with respect to their + +235 +00:19:19,340 --> 00:19:24,260 +probability. Now 70% for females, so I have to + +236 +00:19:24,260 --> 00:19:29,430 +choose 700 for females and the remaining 300 for + +237 +00:19:29,430 --> 00:19:34,010 +males. So in this case, I choose the items, I mean + +238 +00:19:34,010 --> 00:19:37,970 +I choose my samples regarding to their + +239 +00:19:37,970 --> 00:19:39,050 +probability. + +240 +00:19:41,010 --> 00:19:45,190 +So in probability sample items and the sample are + +241 +00:19:45,190 --> 00:19:48,610 +chosen on the basis of known probabilities. And + +242 +00:19:48,610 --> 00:19:52,360 +again, there are two types. of probability + +243 +00:19:52,360 --> 00:19:55,580 +samples, simple random sample, systematic, + +244 +00:19:56,120 --> 00:19:59,660 +stratified, and cluster. Let's talk about each one + +245 +00:19:59,660 --> 00:20:05,040 +in details. The first type is called a probability + +246 +00:20:05,040 --> 00:20:11,720 +sample. Simple random sample. The first type of + +247 +00:20:11,720 --> 00:20:16,200 +probability sample is the easiest one. Simple + +248 +00:20:16,200 --> 00:20:23,780 +random sample. Generally is denoted by SRS, Simple + +249 +00:20:23,780 --> 00:20:30,660 +Random Sample. Let's see how can we choose a + +250 +00:20:30,660 --> 00:20:35,120 +sample that is random. What do you mean by random? + +251 +00:20:36,020 --> 00:20:41,780 +In this case, every individual or item from the + +252 +00:20:41,780 --> 00:20:47,620 +frame has an equal chance of being selected. For + +253 +00:20:47,620 --> 00:20:52,530 +example, suppose number of students in this class + +254 +00:20:52,530 --> 00:21:04,010 +number of students is 52 so + +255 +00:21:04,010 --> 00:21:11,890 +each one, I mean each student from + +256 +00:21:11,890 --> 00:21:17,380 +1 up to 52 has the same probability of being + +257 +00:21:17,380 --> 00:21:23,860 +selected. 1 by 52. 1 by 52. 1 divided by 52. So + +258 +00:21:23,860 --> 00:21:27,980 +each one has this probability. So the first one + +259 +00:21:27,980 --> 00:21:31,820 +has the same because if I want to select for + +260 +00:21:31,820 --> 00:21:37,680 +example 10 out of you. So the first one has each + +261 +00:21:37,680 --> 00:21:42,400 +one has probability of 1 out of 52. That's the + +262 +00:21:42,400 --> 00:21:47,160 +meaning of Each item from the frame has an equal + +263 +00:21:47,160 --> 00:21:54,800 +chance of being selected. Selection may be with + +264 +00:21:54,800 --> 00:21:58,800 +replacement. With replacement means selected + +265 +00:21:58,800 --> 00:22:02,040 +individuals is returned to the frame for + +266 +00:22:02,040 --> 00:22:04,880 +possibility selection, or without replacement + +267 +00:22:04,880 --> 00:22:08,600 +means selected individuals or item is not returned + +268 +00:22:08,600 --> 00:22:10,820 +to the frame. So we have two types of selection, + +269 +00:22:11,000 --> 00:22:14,360 +either with... So with replacement means item is + +270 +00:22:14,360 --> 00:22:18,080 +returned back to the frame, or without population, + +271 +00:22:18,320 --> 00:22:21,400 +the item is not returned back to the frame. So + +272 +00:22:21,400 --> 00:22:26,490 +that's the two types of selection. Now how can we + +273 +00:22:26,490 --> 00:22:29,810 +obtain the sample? Sample obtained from something + +274 +00:22:29,810 --> 00:22:33,470 +called table of random numbers. In a minute I will + +275 +00:22:33,470 --> 00:22:36,430 +show you the table of random numbers. And other + +276 +00:22:36,430 --> 00:22:40,130 +method of selecting a sample by using computer + +277 +00:22:40,130 --> 00:22:44,890 +random number generators. So there are two methods + +278 +00:22:44,890 --> 00:22:48,310 +for selecting a random number. Either by using the + +279 +00:22:48,310 --> 00:22:51,950 +table that you have at the end of your book or by + +280 +00:22:51,950 --> 00:22:56,550 +using a computer. I will show one of these and in + +281 +00:22:56,550 --> 00:22:59,650 +the SPSS course you will see another one which is + +282 +00:22:59,650 --> 00:23:03,690 +by using a computer. So let's see how can we + +283 +00:23:03,690 --> 00:23:11,730 +obtain a sample from table of + +284 +00:23:11,730 --> 00:23:12,590 +random number. + +285 +00:23:16,950 --> 00:23:22,090 +I have maybe different table here. But the same + +286 +00:23:22,090 --> 00:23:28,090 +idea to use that table. Let's see how can we + +287 +00:23:28,090 --> 00:23:34,990 +choose a sample by using a random number. + +288 +00:23:42,490 --> 00:23:47,370 +Now, for example, suppose in this class As I + +289 +00:23:47,370 --> 00:23:51,090 +mentioned, there are 52 students. + +290 +00:23:55,110 --> 00:23:58,650 +So each one has a number, ID number one, two, up + +291 +00:23:58,650 --> 00:24:05,110 +to 52. So the numbers are 01, 02, all the way up + +292 +00:24:05,110 --> 00:24:10,790 +to 52. So the maximum digits here, two, two + +293 +00:24:10,790 --> 00:24:11,110 +digits. + +294 +00:24:15,150 --> 00:24:18,330 +1, 2, 3, up to 5, 2, 2, so you have two digits. + +295 +00:24:19,470 --> 00:24:23,710 +Now suppose I decided to take a random sample of + +296 +00:24:23,710 --> 00:24:28,550 +size, for example, N instead. How can I select N + +297 +00:24:28,550 --> 00:24:32,570 +out of U? In this case, each one has the same + +298 +00:24:32,570 --> 00:24:36,790 +chance of being selected. Now based on this table, + +299 +00:24:37,190 --> 00:24:44,230 +you can pick any row or any column. Randomly. For + +300 +00:24:44,230 --> 00:24:51,630 +example, suppose I select the first row. Now, the + +301 +00:24:51,630 --> 00:24:56,570 +first student will be selected as student number + +302 +00:24:56,570 --> 00:25:03,650 +to take two digits. We have to take how many + +303 +00:25:03,650 --> 00:25:08,770 +digits? Because students have ID card that + +304 +00:25:08,770 --> 00:25:13,930 +consists of two digits, 0102 up to 52. So, what's + +305 +00:25:13,930 --> 00:25:17,010 +the first number students will be selected based + +306 +00:25:17,010 --> 00:25:22,130 +on this table? Forget about the line 101. + +307 +00:25:26,270 --> 00:25:27,770 +Start with this number. + +308 +00:25:42,100 --> 00:25:50,900 +So the first one, 19. The second, 22. The third + +309 +00:25:50,900 --> 00:25:51,360 +student, + +310 +00:25:54,960 --> 00:26:04,000 +19, 22. The third, 9. The third, 9. I'm taking the + +311 +00:26:04,000 --> 00:26:16,510 +first row. Then fifth. 34 student + +312 +00:26:16,510 --> 00:26:18,710 +number 05 + +313 +00:26:24,340 --> 00:26:29,500 +Now, what's about seventy-five? Seventy-five is + +314 +00:26:29,500 --> 00:26:33,660 +not selected because the maximum I have is fifty + +315 +00:26:33,660 --> 00:26:46,180 +-two. Next. Sixty-two is not selected. Eighty + +316 +00:26:46,180 --> 00:26:53,000 +-seven. It's not selected. 13. 13. It's okay. + +317 +00:26:53,420 --> 00:27:01,740 +Next. 96. 96. Not selected. 14. 14 is okay. 91. + +318 +00:27:02,140 --> 00:27:12,080 +91. 91. Not selected. 95. 91. 45. 85. 31. 31. + +319 +00:27:15,240 --> 00:27:21,900 +So that's 10. So students numbers are 19, 22, 39, + +320 +00:27:22,140 --> 00:27:26,980 +50, 34, 5, 13, 4, 25 and take one will be + +321 +00:27:26,980 --> 00:27:30,940 +selected. So these are the ID numbers will be + +322 +00:27:30,940 --> 00:27:35,480 +selected in order to get a sample of 10. You + +323 +00:27:35,480 --> 00:27:40,500 +exclude + +324 +00:27:40,500 --> 00:27:43,440 +that one. If the number is repeated, you have to + +325 +00:27:43,440 --> 00:27:44,340 +exclude that one. + +326 +00:27:51,370 --> 00:27:57,270 +is repeated, then excluded. + +327 +00:28:02,370 --> 00:28:07,370 +So the returned number must be excluded from the + +328 +00:28:07,370 --> 00:28:14,030 +sample. Let's imagine that we have not 52 + +329 +00:28:14,030 --> 00:28:19,130 +students. We have 520 students. + +330 +00:28:25,740 --> 00:28:32,520 +Now, I have large number, 52, 520 instead of 52 + +331 +00:28:32,520 --> 00:28:36,080 +students. And again, my goal is to select just 10 + +332 +00:28:36,080 --> 00:28:42,220 +students out of 120. So each one has ID with + +333 +00:28:42,220 --> 00:28:46,220 +number one, two, all the way up to 520. So the + +334 +00:28:46,220 --> 00:28:53,160 +first one, 001. 002 all the way up to 520 now in + +335 +00:28:53,160 --> 00:28:56,480 +this case you have to choose three digits start + +336 +00:28:56,480 --> 00:29:00,060 +for example you don't have actually to start with + +337 +00:29:00,060 --> 00:29:03,060 +row number one maybe column number one or row + +338 +00:29:03,060 --> 00:29:06,140 +number two whatever is fine so let's start with + +339 +00:29:06,140 --> 00:29:10,460 +row number two for example row number 76 + +340 +00:29:14,870 --> 00:29:19,950 +It's not selected. Because the maximum number I + +341 +00:29:19,950 --> 00:29:25,110 +have is 5 to 20. So, 746 shouldn't be selected. + +342 +00:29:26,130 --> 00:29:29,430 +The next one, 764. + +343 +00:29:31,770 --> 00:29:38,750 +Again, it's not selected. 764, 715. Not selected. + +344 +00:29:38,910 --> 00:29:42,310 +Next one is 715. + +345 +00:29:44,880 --> 00:29:52,200 +099 should be 0 that's + +346 +00:29:52,200 --> 00:29:54,940 +the way how can we use the random table for using + +347 +00:29:54,940 --> 00:29:58,800 +or for selecting simple random symbols so in this + +348 +00:29:58,800 --> 00:30:03,480 +case you can choose any row or any column then you + +349 +00:30:03,480 --> 00:30:06,620 +have to decide how many digits you have to select + +350 +00:30:06,620 --> 00:30:10,500 +it depends on the number you have I mean the + +351 +00:30:10,500 --> 00:30:16,510 +population size If for example Suppose I am + +352 +00:30:16,510 --> 00:30:20,270 +talking about IUPUI students and for example, we + +353 +00:30:20,270 --> 00:30:26,530 +have 30,000 students at this school And again, I + +354 +00:30:26,530 --> 00:30:28,570 +want to select a random sample of size 10 for + +355 +00:30:28,570 --> 00:30:35,190 +example So how many digits should I use? 20,000 + +356 +00:30:35,190 --> 00:30:42,620 +Five digits And each one, each student has ID + +357 +00:30:42,620 --> 00:30:51,760 +from, starts from the first one up to twenty + +358 +00:30:51,760 --> 00:30:56,680 +thousand. So now, start with, for example, the + +359 +00:30:56,680 --> 00:30:59,240 +last row you have. + +360 +00:31:03,120 --> 00:31:08,480 +The first number 54000 is not. 81 is not. None of + +361 +00:31:08,480 --> 00:31:08,740 +these. + +362 +00:31:12,420 --> 00:31:17,760 +Look at the next one. 71000 is not selected. Now + +363 +00:31:17,760 --> 00:31:22,180 +9001. So the first number I have to select is + +364 +00:31:22,180 --> 00:31:27,200 +9001. None of the rest. Go back. + +365 +00:31:30,180 --> 00:31:37,790 +Go to the next one. The second number, 12149 + +366 +00:31:37,790 --> 00:31:45,790 +and so on. Next will be 18000 and so on. Next row, + +367 +00:31:46,470 --> 00:31:55,530 +we can select the second one, then 16, then 14000, + +368 +00:31:55,890 --> 00:32:00,850 +6500 and so on. So this is the way how can we use + +369 +00:32:00,850 --> 00:32:08,110 +the random table. It seems to be that tons of work + +370 +00:32:08,110 --> 00:32:13,450 +if you have large sample. Because in this case, + +371 +00:32:13,530 --> 00:32:16,430 +you have to choose, for example, suppose I am + +372 +00:32:16,430 --> 00:32:22,390 +interested to take a random sample of 10,000. Now, + +373 +00:32:22,510 --> 00:32:28,370 +to use this table to select 10,000 items takes + +374 +00:32:28,370 --> 00:32:33,030 +time and effort and maybe will never finish. So + +375 +00:32:33,030 --> 00:32:33,950 +it's better to use + +376 +00:32:38,020 --> 00:32:42,100 +better to use computer + +377 +00:32:42,100 --> 00:32:47,140 +random number generators. So that's the way if we, + +378 +00:32:47,580 --> 00:32:51,880 +now we can use the random table only if the sample + +379 +00:32:51,880 --> 00:32:57,780 +size is limited. I mean up to 100 maybe you can + +380 +00:32:57,780 --> 00:33:03,160 +use the random table, but after that I think it's + +381 +00:33:03,160 --> 00:33:08,670 +just you are losing your time. Another example + +382 +00:33:08,670 --> 00:33:14,390 +here. Now suppose my sampling frame for population + +383 +00:33:14,390 --> 00:33:23,230 +has 850 students. So the numbers are 001, 002, all + +384 +00:33:23,230 --> 00:33:28,490 +the way up to 850. And suppose for example we are + +385 +00:33:28,490 --> 00:33:33,610 +going to select five items randomly from that + +386 +00:33:33,610 --> 00:33:39,610 +population. So you have to choose three digits and + +387 +00:33:39,610 --> 00:33:44,990 +imagine that this is my portion of that table. + +388 +00:33:45,850 --> 00:33:51,570 +Now, take three digits. The first three digits are + +389 +00:33:51,570 --> 00:34:00,330 +492. So the first item chosen should be item + +390 +00:34:00,330 --> 00:34:10,540 +number 492. should be selected next one 800 808 + +391 +00:34:10,540 --> 00:34:17,020 +doesn't select because the maximum it's much + +392 +00:34:17,020 --> 00:34:21,100 +selected because the maximum here is 850 now next + +393 +00:34:21,100 --> 00:34:26,360 +one 892 this + +394 +00:34:26,360 --> 00:34:32,140 +one is not selected next + +395 +00:34:32,140 --> 00:34:43,030 +item four three five selected now + +396 +00:34:43,030 --> 00:34:50,710 +seven seven nine should be selected finally zeros + +397 +00:34:50,710 --> 00:34:53,130 +two should be selected so these are the five + +398 +00:34:53,130 --> 00:34:58,090 +numbers in my sample by using selected by using + +399 +00:34:58,090 --> 00:35:01,190 +the random sample any questions? + +400 +00:35:04,160 --> 00:35:07,780 +Let's move to another part. + +401 +00:35:17,600 --> 00:35:22,380 +The next type of samples is called systematic + +402 +00:35:22,380 --> 00:35:25,260 +samples. + +403 +00:35:29,120 --> 00:35:35,780 +Now suppose N represents the sample size, capital + +404 +00:35:35,780 --> 00:35:40,520 +N represents + +405 +00:35:40,520 --> 00:35:42,220 +the population size. + +406 +00:35:46,660 --> 00:35:49,900 +And let's see how can we choose a systematic + +407 +00:35:49,900 --> 00:35:54,040 +random sample from that population. For example, + +408 +00:35:55,260 --> 00:35:57,180 +suppose + +409 +00:3 + +445 +00:39:27,800 --> 00:39:31,780 +15, 25, 35, and so on if we have more than that. + +446 +00:39:33,230 --> 00:39:37,730 +Okay, so that's for, in this example, he chose + +447 +00:39:37,730 --> 00:39:42,790 +item number seven. Random selection, number seven. + +448 +00:39:43,230 --> 00:39:50,010 +So next should be 17, 27, 37, and so on. Let's do + +449 +00:39:50,010 --> 00:39:50,710 +another example. + +450 +00:39:58,590 --> 00:40:06,540 +Suppose there are In this class, there are 50 + +451 +00:40:06,540 --> 00:40:12,400 +students. So the total is 50. + +452 +00:40:15,320 --> 00:40:26,780 +10 students out of 50. So my sample is 10. Now + +453 +00:40:26,780 --> 00:40:30,260 +still, 50 divided by 10 is 50. + +454 +00:40:33,630 --> 00:40:39,650 +So there are five items or five students in a + +455 +00:40:39,650 --> 00:40:45,370 +group. So we have five in + +456 +00:40:45,370 --> 00:40:51,490 +the first group and then five in the next one and + +457 +00:40:51,490 --> 00:40:56,130 +so on. So we have how many groups? Ten groups. + +458 +00:40:59,530 --> 00:41:04,330 +So first step, you have to find a step. Still it + +459 +00:41:04,330 --> 00:41:07,930 +means number of items or number of students in a + +460 +00:41:07,930 --> 00:41:16,170 +group. Next step, select student at random from + +461 +00:41:16,170 --> 00:41:22,010 +the first group, so random selection. Now, here + +462 +00:41:22,010 --> 00:41:28,610 +there are five students, so 01, I'm sorry, not 01, + +463 +00:41:29,150 --> 00:41:35,080 +1, 2, 3, 4, 5, so one digit. Only one digit. + +464 +00:41:35,800 --> 00:41:39,420 +Because I have maximum number is five. So it's + +465 +00:41:39,420 --> 00:41:42,920 +only one digit. So go again to the random table + +466 +00:41:42,920 --> 00:41:48,220 +and take one digit. One. So my first item, six, + +467 +00:41:48,760 --> 00:41:52,580 +eleven, sixteen, twenty-one, twenty-one, all the + +468 +00:41:52,580 --> 00:41:55,500 +way up to ten items. + +469 +00:42:13,130 --> 00:42:18,170 +So I choose student number one, then skip five, + +470 +00:42:19,050 --> 00:42:22,230 +choose number six, and so on. It's called + +471 +00:42:22,230 --> 00:42:26,130 +systematic. Because if you know the first item, + +472 +00:42:28,550 --> 00:42:32,690 +and the step you can know the rest of these. + +473 +00:42:37,310 --> 00:42:41,150 +Imagine that you want to select 10 students who + +474 +00:42:41,150 --> 00:42:48,010 +entered the cafe shop or restaurant. You can pick + +475 +00:42:48,010 --> 00:42:54,790 +one of them. So suppose I'm taking number three + +476 +00:42:54,790 --> 00:43:00,550 +and my step is six. So three, then nine, and so + +477 +00:43:00,550 --> 00:43:00,790 +on. + +478 +00:43:05,830 --> 00:43:13,310 +So that's systematic assembly. Questions? So + +479 +00:43:13,310 --> 00:43:20,710 +that's about random samples and systematic. What + +480 +00:43:20,710 --> 00:43:23,550 +do you mean by stratified groups? + +481 +00:43:28,000 --> 00:43:33,080 +Let's use a definition and an example of a + +482 +00:43:33,080 --> 00:43:34,120 +stratified family. + +483 +00:43:58,810 --> 00:44:05,790 +step one. So again imagine we have IUG population + +484 +00:44:05,790 --> 00:44:11,490 +into two or more subgroups. So there are two or + +485 +00:44:11,490 --> 00:44:16,010 +more. It depends on the characteristic you are + +486 +00:44:16,010 --> 00:44:19,690 +using. So divide population into two or more + +487 +00:44:19,690 --> 00:44:24,210 +subgroups according to some common characteristic. + +488 +00:44:24,730 --> 00:44:30,280 +For example suppose I want to divide the student + +489 +00:44:30,280 --> 00:44:32,080 +into gender. + +490 +00:44:34,100 --> 00:44:38,840 +So males or females. So I have two strata. One is + +491 +00:44:38,840 --> 00:44:43,000 +called males and the other is females. Now suppose + +492 +00:44:43,000 --> 00:44:47,460 +the characteristic I am going to use is the levels + +493 +00:44:47,460 --> 00:44:51,500 +of a student. First level, second, third, fourth, + +494 +00:44:51,800 --> 00:44:56,280 +and so on. So number of strata here depends on + +495 +00:44:56,280 --> 00:45:00,380 +actually the characteristic you are interested in. + +496 +00:45:00,780 --> 00:45:04,860 +Let's use the simple one that is gender. So here + +497 +00:45:04,860 --> 00:45:12,360 +we have females. So IUV students divided into two + +498 +00:45:12,360 --> 00:45:18,560 +types, strata, or two groups, females and males. + +499 +00:45:19,200 --> 00:45:22,870 +So this is the first step. So at least you should + +500 +00:45:22,870 --> 00:45:26,750 +have two groups or two subgroups. So we have IELTS + +501 +00:45:26,750 --> 00:45:29,630 +student, the entire population, and that + +502 +00:45:29,630 --> 00:45:34,370 +population divided into two subgroups. Next, + +503 +00:45:35,650 --> 00:45:39,730 +assemble random samples. Keep careful here with + +504 +00:45:39,730 --> 00:45:45,770 +sample sizes proportional to strata sizes. That + +505 +00:45:45,770 --> 00:45:57,890 +means suppose I know that Female consists + +506 +00:45:57,890 --> 00:46:02,470 +of + +507 +00:46:02,470 --> 00:46:09,770 +70% of Irish students and + +508 +00:46:09,770 --> 00:46:11,490 +males 30%. + +509 +00:46:15,410 --> 00:46:17,950 +the sample size we are talking about here is for + +510 +00:46:17,950 --> 00:46:21,550 +example is a thousand so I want to select a sample + +511 +00:46:21,550 --> 00:46:24,990 +of a thousand seed from the registration office or + +512 +00:46:24,990 --> 00:46:31,190 +my information about that is males represent 30% + +513 +00:46:31,190 --> 00:46:37,650 +females represent 70% so in this case your sample + +514 +00:46:37,650 --> 00:46:43,650 +structure should be 70% times + +515 +00:46:50,090 --> 00:46:59,090 +So the first + +516 +00:46:59,090 --> 00:47:03,750 +group should have 700 items of students and the + +517 +00:47:03,750 --> 00:47:06,490 +other one is 300,000. + +518 +00:47:09,230 --> 00:47:11,650 +So this is the second step. + +519 +00:47:14,420 --> 00:47:17,740 +Sample sizes are determined in step number two. + +520 +00:47:18,540 --> 00:47:22,200 +Now, how can you select the 700 females here? + +521 +00:47:23,660 --> 00:47:26,180 +Again, you have to go back to the random table. + +522 +00:47:27,480 --> 00:47:31,660 +Samples from subgroups are compiled into one. Then + +523 +00:47:31,660 --> 00:47:39,600 +you can use symbol random sample. So here, 700. I + +524 +00:47:39,600 --> 00:47:45,190 +have, for example, 70% females. And I know that I + +525 +00:47:45,190 --> 00:47:51,370 +use student help. I have ideas numbers from 1 up + +526 +00:47:51,370 --> 00:47:59,070 +to 7, 14. Then by using simple random, simple + +527 +00:47:59,070 --> 00:48:01,070 +random table, you can. + +528 +00:48:09,490 --> 00:48:15,190 +So if you go back to the table, the first item, + +529 +00:48:16,650 --> 00:48:23,130 +now look at five digits. Nineteen is not selected. + +530 +00:48:24,830 --> 00:48:27,510 +Nineteen. I have, the maximum is fourteen + +531 +00:48:27,510 --> 00:48:31,890 +thousand. So skip one and two. The first item is + +532 +00:48:31,890 --> 00:48:37,850 +seven hundred and fifty-six. The first item. Next + +533 +00:48:37,850 --> 00:48:43,480 +is not chosen. Next is not chosen. Number six. + +534 +00:48:43,740 --> 00:48:44,580 +Twelve. + +535 +00:48:47,420 --> 00:48:50,620 +Zero. Unsure. + +536 +00:48:52,880 --> 00:48:58,940 +So here we divide the population into two groups + +537 +00:48:58,940 --> 00:49:03,440 +or two subgroups, females and males. And we select + +538 +00:49:03,440 --> 00:49:07,020 +a random sample of size 700 based on the + +539 +00:49:07,020 --> 00:49:10,850 +proportion of this subgroup. Then we are using the + +540 +00:49:10,850 --> 00:49:16,750 +simple random table to take the 700 females. + +541 +00:49:22,090 --> 00:49:29,810 +Now for this example, there are 16 items or 16 + +542 +00:49:29,810 --> 00:49:35,030 +students in each group. And he select randomly + +543 +00:49:35,030 --> 00:49:40,700 +number three, number 9, number 13, and so on. So + +544 +00:49:40,700 --> 00:49:44,140 +it's a random selection. Another example. + +545 +00:49:46,820 --> 00:49:52,420 +Suppose again we are talking about all IUVs. + +546 +00:50:02,780 --> 00:50:09,360 +Here I divided the population according to the + +547 +00:50:09,360 --> 00:50:17,680 +students' levels. Level one, level two, three + +548 +00:50:17,680 --> 00:50:18,240 +levels. + +549 +00:50:25,960 --> 00:50:28,300 +One, two, three and four. + +550 +00:50:32,240 --> 00:50:39,710 +So I divide the population into four subgroups + +551 +00:50:39,710 --> 00:50:43,170 +according to the student levels. So one, two, + +552 +00:50:43,290 --> 00:50:48,030 +three, and four. Now, a simple random sample is + +553 +00:50:48,030 --> 00:50:52,070 +selected from each subgroup with sample sizes + +554 +00:50:52,070 --> 00:50:57,670 +proportional to strata size. Imagine that level + +555 +00:50:57,670 --> 00:51:04,950 +number one represents 40% of the students. Level + +556 +00:51:04,950 --> 00:51:17,630 +2, 20%. Level 3, 30%. Just + +557 +00:51:17,630 --> 00:51:22,850 +an example. To make more sense? + +558 +00:51:34,990 --> 00:51:36,070 +My sample size? + +559 +00:51:38,750 --> 00:51:39,910 +3, + +560 +00:51:41,910 --> 00:51:46,430 +9, 15, 4, sorry. + +561 +00:51:53,290 --> 00:52:00,470 +So here, there are four levels. And the + +562 +00:52:00,470 --> 00:52:04,370 +proportions are 48 + +563 +00:52:06,670 --> 00:52:17,190 +sample size is 500 so the sample for each strata + +564 +00:52:17,190 --> 00:52:31,190 +will be number 1 40% times 500 gives 200 the next + +565 +00:52:31,190 --> 00:52:32,950 +150 + +566 +00:52:36,200 --> 00:52:42,380 +And so on. Now, how can we choose the 200 from + +567 +00:52:42,380 --> 00:52:46,280 +level number one? Again, we have to choose the + +568 +00:52:46,280 --> 00:52:55,540 +random table. Now, 40% from this number, it means + +569 +00:52:55,540 --> 00:52:59,620 +5 + +570 +00:52:59,620 --> 00:53:06,400 +,000. This one has 5,000. 600 females students. + +571 +00:53:07,720 --> 00:53:13,480 +Because 40% of females in level 1. And I know that + +572 +00:53:13,480 --> 00:53:17,780 +the total number of females is 14,000. So number + +573 +00:53:17,780 --> 00:53:23,420 +of females in the first level is 5600. How many + +574 +00:53:23,420 --> 00:53:28,040 +digits we have? Four digits. The first one, 001, + +575 +00:53:28,160 --> 00:53:34,460 +all the way up to 560. If you go back, into a + +576 +00:53:34,460 --> 00:53:39,520 +random table, take five, four digits. So the first + +577 +00:53:39,520 --> 00:53:43,340 +number is 1922. + +578 +00:53:43,980 --> 00:53:48,000 +Next is 3950. + +579 +00:53:50,140 --> 00:53:54,760 +And so on. So that's the way how can we choose + +580 +00:53:54,760 --> 00:53:58,640 +stratified samples. + +581 +00:54:02,360 --> 00:54:08,240 +Next, the last one is called clusters. And let's + +582 +00:54:08,240 --> 00:54:11,400 +see now what's the difference between stratified + +583 +00:54:11,400 --> 00:54:16,500 +and cluster. Step one. + +584 +00:54:25,300 --> 00:54:31,720 +Population is divided into some clusters. + +585 +00:54:35,000 --> 00:54:41,160 +Step two, assemble one by assembling clusters + +586 +00:54:41,160 --> 00:54:42,740 +selective. + +587 +00:54:46,100 --> 00:54:48,640 +Here suppose how many clusters? + +588 +00:54:53,560 --> 00:54:58,080 +16 clusters. So there are, so the population has + +589 +00:55:19,310 --> 00:55:25,820 +Step two, you have to choose a simple random + +590 +00:55:25,820 --> 00:55:31,440 +number of clusters out of 16. Suppose I decided to + +591 +00:55:31,440 --> 00:55:38,300 +choose three among these. So we have 16 clusters. + +592 +00:55:45,340 --> 00:55:49,780 +For example, I chose cluster number 411. + +593 +00:55:51,640 --> 00:56:01,030 +So I choose these clusters. Next, all items in the + +594 +00:56:01,030 --> 00:56:02,910 +selected clusters can be used. + +595 +00:56:09,130 --> 00:56:15,770 +Or items + +596 +00:56:15,770 --> 00:56:18,910 +can be chosen from a cluster using another + +597 +00:56:18,910 --> 00:56:21,130 +probability sampling technique. For example, + +598 +00:56:23,190 --> 00:56:28,840 +imagine that We are talking about students who + +599 +00:56:28,840 --> 00:56:31,460 +registered for accounting. + +600 +00:56:45,880 --> 00:56:50,540 +Imagine that we have six sections for accounting. + +601 +00:56:55,850 --> 00:56:56,650 +six sections. + +602 +00:57:00,310 --> 00:57:05,210 +And I just choose two of these, cluster number one + +603 +00:57:05,210 --> 00:57:08,910 +or section number one and the last one. So my + +604 +00:57:08,910 --> 00:57:12,590 +chosen clusters are number one and six, one and + +605 +00:57:12,590 --> 00:57:19,090 +six. Or you can use the one we just talked about, + +606 +00:57:19,590 --> 00:57:23,340 +stratified random sample. instead of using all for + +607 +00:57:23,340 --> 00:57:29,140 +example suppose there are in this section there + +608 +00:57:29,140 --> 00:57:36,180 +are 73 models and the other one there are 80 + +609 +00:57:36,180 --> 00:57:42,300 +models and + +610 +00:57:42,300 --> 00:57:46,720 +the sample size here I am going to use case 20 + +611 +00:57:50,900 --> 00:57:56,520 +So you can use 10 here and 10 in the other one, or + +612 +00:57:56,520 --> 00:58:03,060 +it depends on the proportions. Now, 70 represents + +613 +00:58:03,060 --> 00:58:09,580 +70 out of 150, because there are 150 students in + +614 +00:58:09,580 --> 00:58:14,060 +these two clusters. Now, the entire population is + +615 +00:58:14,060 --> 00:58:17,300 +not the number for each of all of these clusters, + +616 +00:58:17,560 --> 00:58:22,310 +just number one sixth. So there are 150 students + +617 +00:58:22,310 --> 00:58:25,090 +in these two selected clusters. So the population + +618 +00:58:25,090 --> 00:58:30,030 +size is 150. Make sense? Then the proportion here + +619 +00:58:30,030 --> 00:58:33,210 +is 700 divided by 150 times 20. + +620 +00:58:35,970 --> 00:58:41,610 +The other one, 80 divided by 150 times 20. + +621 +00:58:51,680 --> 00:58:55,960 +So again, all items in the selected clusters can + +622 +00:58:55,960 --> 00:58:59,400 +be used or items can be chosen from the cluster + +623 +00:58:59,400 --> 00:59:01,500 +using another probability technique as we + +624 +00:59:01,500 --> 00:59:06,640 +mentioned. Let's see how can we use another + +625 +00:59:06,640 --> 00:59:10,860 +example. Let's talk about again AUG students. + +626 +00:59:28,400 --> 00:59:31,800 +I choose suppose level number 2 and level number + +627 +00:59:31,800 --> 00:59:37,680 +4, two levels, 2 and 4. Then you can take either + +628 +00:59:37,680 --> 00:59:43,380 +all the students here or just assemble size + +629 +00:59:43,380 --> 00:59:46,460 +proportion to the + +630 +00:59:50,310 --> 00:59:54,130 +For example, this one represents 20%, and my + +631 +00:59:54,130 --> 00:59:56,730 +sample size is 1000, so in this case you have to + +632 +00:59:56,730 --> 01:00:00,310 +take 200 and 800 from that one. + +633 +01:00:03,050 --> 01:00:04,050 +Any questions? diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/JEIWb3FC-Sk_postprocess.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/JEIWb3FC-Sk_postprocess.srt new file mode 100644 index 0000000000000000000000000000000000000000..7dabc3c99ae2d568a27596faf79a8e4be68b2b06 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/JEIWb3FC-Sk_postprocess.srt @@ -0,0 +1,2532 @@ +1 +00:00:04,670 --> 00:00:08,210 +Today, Inshallah, we are going to start Chapter 7. + +2 +00:00:09,830 --> 00:00:14,910 +Chapter 7 talks about sampling and sampling + +3 +00:00:14,910 --> 00:00:22,690 +distributions. The objectives for this chapter are + +4 +00:00:22,690 --> 00:00:27,610 +number one we have different methods actually we + +5 +00:00:27,610 --> 00:00:31,330 +have two methods probability and non-probability + +6 +00:00:31,330 --> 00:00:34,750 +samples and we are going to distinguish between + +7 +00:00:35,420 --> 00:00:40,700 +these two sampling methods. So again, in this + +8 +00:00:40,700 --> 00:00:44,980 +chapter, we will talk about two different sampling + +9 +00:00:44,980 --> 00:00:49,480 +methods. One is called probability sampling and + +10 +00:00:49,480 --> 00:00:52,940 +the other is non-probability sampling. Our goal is + +11 +00:00:52,940 --> 00:00:56,520 +to distinguish between these two different + +12 +00:00:56,520 --> 00:00:59,280 +sampling methods. The other learning objective + +13 +00:00:59,280 --> 00:01:04,400 +will be We'll talk about the concept of the + +14 +00:01:04,400 --> 00:01:06,700 +sampling distribution. That will be next time, + +15 +00:01:06,800 --> 00:01:09,960 +inshallah. The third objective is compute + +16 +00:01:09,960 --> 00:01:15,480 +probabilities related to sample mean. In addition + +17 +00:01:15,480 --> 00:01:18,160 +to that, we'll talk about how can we compute + +18 +00:01:18,160 --> 00:01:22,920 +probabilities regarding the sample proportion. And + +19 +00:01:22,920 --> 00:01:27,130 +as I mentioned last time, There are two types of + +20 +00:01:27,130 --> 00:01:30,270 +data. One is called the numerical data. In this + +21 +00:01:30,270 --> 00:01:33,470 +case, we can use the sample mean. The other type + +22 +00:01:33,470 --> 00:01:36,630 +is called qualitative data. And in this case, we + +23 +00:01:36,630 --> 00:01:39,330 +have to use the sample proportion. So for this + +24 +00:01:39,330 --> 00:01:41,690 +chapter, we are going to discuss how can we + +25 +00:01:41,690 --> 00:01:46,370 +compute the probabilities for each one, either the + +26 +00:01:46,370 --> 00:01:50,090 +sample mean or the sample proportion. The last + +27 +00:01:50,090 --> 00:01:55,770 +objective of this chapter is to use the central + +28 +00:01:55,770 --> 00:01:58,190 +limit theorem which is the famous one of the most + +29 +00:01:58,190 --> 00:02:02,130 +famous theorem in this book which is called again + +30 +00:02:02,130 --> 00:02:05,690 +CLT central limit theorem and we are going to show + +31 +00:02:05,690 --> 00:02:09,310 +what are the what is the importance of this + +32 +00:02:09,310 --> 00:02:11,930 +theorem so these are the mainly the four + +33 +00:02:11,930 --> 00:02:16,610 +objectives for this chapter Now let's see why we + +34 +00:02:16,610 --> 00:02:20,270 +are talking about sampling. In other words, most + +35 +00:02:20,270 --> 00:02:23,850 +of the time when we are doing study, we are using + +36 +00:02:23,850 --> 00:02:27,700 +a sample. instead of using the entire population. + +37 +00:02:28,640 --> 00:02:32,080 +Now there are many reasons behind that. One of + +38 +00:02:32,080 --> 00:02:37,840 +these reasons is selecting a sample is less time + +39 +00:02:37,840 --> 00:02:40,940 +consuming than selecting every item in the + +40 +00:02:40,940 --> 00:02:44,060 +population. I think it makes sense that suppose we + +41 +00:02:44,060 --> 00:02:46,560 +have a huge population, that population consists + +42 +00:02:46,560 --> 00:02:53,140 +of thousands of items. So that will take more time + +43 +00:02:54,440 --> 00:03:00,220 +If you select 100 of their population. So time + +44 +00:03:00,220 --> 00:03:02,140 +consuming is very important. So number one, + +45 +00:03:03,000 --> 00:03:05,780 +selecting sample is less time consuming than using + +46 +00:03:05,780 --> 00:03:10,280 +all the entire population. The second reason, + +47 +00:03:10,880 --> 00:03:14,640 +selecting samples is less costly than selecting a + +48 +00:03:14,640 --> 00:03:17,280 +variety of population. Because if we have large + +49 +00:03:17,280 --> 00:03:19,560 +population, in this case you have to spend more + +50 +00:03:19,560 --> 00:03:23,540 +money in order to get the data or the information + +51 +00:03:23,540 --> 00:03:27,940 +from that population. So it's better to use these + +52 +00:03:27,940 --> 00:03:33,300 +samples. The other reason is the analysis. Our + +53 +00:03:33,300 --> 00:03:37,260 +sample is less cumbersome and more practical than + +54 +00:03:37,260 --> 00:03:40,880 +analysis of all items in the population. For these + +55 +00:03:40,880 --> 00:03:45,820 +reasons, we have to use a sample. For this reason, + +56 +00:03:45,880 --> 00:03:53,080 +we have to talk about sampling methods. Let's + +57 +00:03:53,080 --> 00:03:58,540 +start with sampling process. That begins with a + +58 +00:03:58,540 --> 00:04:05,320 +seminal frame. Now suppose my goal is to know the + +59 +00:04:05,320 --> 00:04:13,960 +opinion of IUG students about a certain subject. + +60 +00:04:16,260 --> 00:04:24,120 +So my population consists of all IUG students. So + +61 +00:04:24,120 --> 00:04:27,370 +that's the entire population. And you know that, + +62 +00:04:27,590 --> 00:04:31,750 +for example, suppose our usual students is around, + +63 +00:04:32,430 --> 00:04:39,890 +for example, 20,000 students. 20,000 students is a + +64 +00:04:39,890 --> 00:04:45,490 +big number. So it's better to select a sample from + +65 +00:04:45,490 --> 00:04:49,270 +that population. Now, the first step in this + +66 +00:04:49,270 --> 00:04:55,700 +process, we have to determine the frame. of that + +67 +00:04:55,700 --> 00:05:01,320 +population. So my frame consists of all IU + +68 +00:05:01,320 --> 00:05:04,740 +students, which has maybe males and females. So my + +69 +00:05:04,740 --> 00:05:09,560 +frame in this case is all items, I mean all + +70 +00:05:09,560 --> 00:05:15,380 +students at IUG. So that's the frame. So my frame + +71 +00:05:15,380 --> 00:05:18,720 +consists + +72 +00:05:18,720 --> 00:05:22,220 +of all students. + +73 +00:05:27,630 --> 00:05:32,370 +So the definition of + +74 +00:05:32,370 --> 00:05:36,010 +the semantic frame is a listing of items that make + +75 +00:05:36,010 --> 00:05:39,350 +up the population. The items could be individual, + +76 +00:05:40,170 --> 00:05:44,490 +could be students, could be things, animals, and + +77 +00:05:44,490 --> 00:05:49,650 +so on. So frames are data sources such as a + +78 +00:05:49,650 --> 00:05:54,840 +population list. Suppose we have the names of IUDs + +79 +00:05:54,840 --> 00:05:58,840 +humans. So that's my population list. Or + +80 +00:05:58,840 --> 00:06:02,160 +directories, or maps, and so on. So that's the + +81 +00:06:02,160 --> 00:06:05,520 +frame we have to know about the population we are + +82 +00:06:05,520 --> 00:06:10,900 +interested in. Inaccurate or biased results can + +83 +00:06:10,900 --> 00:06:16,460 +result if frame excludes certain portions of the + +84 +00:06:16,460 --> 00:06:20,620 +population. For example, suppose here, as I + +85 +00:06:20,620 --> 00:06:24,180 +mentioned, We are interested in IUG students, so + +86 +00:06:24,180 --> 00:06:29,280 +my frame and all IU students. And I know there are + +87 +00:06:29,280 --> 00:06:35,900 +students, either males or females. Suppose for + +88 +00:06:35,900 --> 00:06:40,880 +some reasons, we ignore males, and just my sample + +89 +00:06:40,880 --> 00:06:45,080 +focused on females. In this case, females. + +90 +00:06:48,700 --> 00:06:51,900 +don't represent the entire population. For this + +91 +00:06:51,900 --> 00:06:57,720 +reason, you will get inaccurate or biased results + +92 +00:06:57,720 --> 00:07:02,000 +if you ignore a certain portion. Because here + +93 +00:07:02,000 --> 00:07:08,580 +males, for example, maybe consists of 40% of the + +94 +00:07:08,580 --> 00:07:12,960 +IG students. So it makes sense that this number or + +95 +00:07:12,960 --> 00:07:16,980 +this percentage is a big number. So ignoring this + +96 +00:07:16,980 --> 00:07:21,160 +portion, may lead to misleading results or + +97 +00:07:21,160 --> 00:07:26,160 +inaccurate results or biased results. So you have + +98 +00:07:26,160 --> 00:07:29,600 +to keep in mind that you have to choose all the + +99 +00:07:29,600 --> 00:07:33,740 +portions of that frame. So inaccurate or biased + +100 +00:07:33,740 --> 00:07:38,700 +results can result if a frame excludes certain + +101 +00:07:38,700 --> 00:07:43,180 +portions of a population. Another example, suppose + +102 +00:07:43,180 --> 00:07:48,680 +we took males and females. But here for females, + +103 +00:07:49,240 --> 00:07:56,020 +females have, for example, four levels. Level one, + +104 +00:07:56,400 --> 00:07:59,980 +level two, level three, and level four. And we + +105 +00:07:59,980 --> 00:08:05,560 +ignored, for example, level one. I mean, the new + +106 +00:08:05,560 --> 00:08:09,520 +students. We ignored this portion. Maybe this + +107 +00:08:09,520 --> 00:08:12,860 +portion is very important one, but by mistake we + +108 +00:08:12,860 --> 00:08:18,690 +ignored this one. The remaining three levels will + +109 +00:08:18,690 --> 00:08:22,430 +not represent the entire female population. For + +110 +00:08:22,430 --> 00:08:25,330 +this reason, you will get inaccurate or biased + +111 +00:08:25,330 --> 00:08:31,290 +results. So you have to select all the portions of + +112 +00:08:31,290 --> 00:08:36,610 +the frames. Using different frames to generate + +113 +00:08:36,610 --> 00:08:40,110 +data can lead to dissimilar conclusions. For + +114 +00:08:40,110 --> 00:08:46,020 +example, Suppose again I am interested in IEG + +115 +00:08:46,020 --> 00:08:46,720 +students. + +116 +00:08:49,440 --> 00:08:59,460 +And I took the frame that has all students at + +117 +00:08:59,460 --> 00:09:04,060 +University of Gaza, Universities of Gaza. + +118 +00:09:09,250 --> 00:09:12,110 +And as we know that Gaza has three universities, + +119 +00:09:12,350 --> 00:09:15,530 +big universities, Islamic University, Lazar + +120 +00:09:15,530 --> 00:09:18,030 +University, and Al-Aqsa University. So we have + +121 +00:09:18,030 --> 00:09:23,310 +three universities. And my frame here, suppose I + +122 +00:09:23,310 --> 00:09:27,410 +took all students at these universities, but my + +123 +00:09:27,410 --> 00:09:32,470 +study focused on IU students. So my frame, the + +124 +00:09:32,470 --> 00:09:38,250 +true one, is all students at IUG. But I taught all + +125 +00:09:38,250 --> 00:09:42,170 +students at universities in Gaza. So now we have + +126 +00:09:42,170 --> 00:09:44,690 +different frames. + +127 +00:09:48,610 --> 00:09:54,590 +And you want to know what are the opinions of the + +128 +00:09:54,590 --> 00:09:59,910 +smokers about smoking. So my population now is + +129 +00:09:59,910 --> 00:10:00,530 +just... + +130 +00:10:14,030 --> 00:10:19,390 +So that's my thing. + +131 +00:10:21,010 --> 00:10:32,410 +I suppose I talk to a field that has one atom. + +132 +00:10:40,780 --> 00:10:46,040 +Oh my goodness. They are very different things. + +133 +00:10:47,700 --> 00:10:53,720 +The first one consists of only smokers. They are + +134 +00:10:53,720 --> 00:10:58,100 +very interested in you. The other one consists + +135 +00:10:58,100 --> 00:11:06,560 +of... Anonymous. I thought maybe... Smoker or non + +136 +00:11:06,560 --> 00:11:10,460 +-smokers. For this reason, you will get... + +137 +00:11:17,410 --> 00:11:19,350 +Conclusion, different results. + +138 +00:11:22,090 --> 00:11:28,850 +So now, + +139 +00:11:29,190 --> 00:11:33,610 +the sampling frame is a listing of items that make + +140 +00:11:33,610 --> 00:11:39,510 +up the entire population. Let's move to the types + +141 +00:11:39,510 --> 00:11:44,910 +of samples. Mainly there are two types of + +142 +00:11:44,910 --> 00:11:49,070 +sampling. One is cold. Non-probability samples. + +143 +00:11:50,370 --> 00:11:54,650 +The other one is called probability samples. The + +144 +00:11:54,650 --> 00:11:59,790 +non-probability samples can be divided into two + +145 +00:11:59,790 --> 00:12:04,030 +segments. One is called judgment and the other + +146 +00:12:04,030 --> 00:12:08,710 +convenience. So we have judgment and convenience + +147 +00:12:08,710 --> 00:12:13,140 +non-probability samples. The other type which is + +148 +00:12:13,140 --> 00:12:17,560 +random probability samples has four segments or + +149 +00:12:17,560 --> 00:12:21,680 +four parts. The first one is called simple random + +150 +00:12:21,680 --> 00:12:25,680 +sample. The other one is systematic. The second + +151 +00:12:25,680 --> 00:12:28,680 +one is systematic random sample. The third one is + +152 +00:12:28,680 --> 00:12:32,940 +certified. The fourth one cluster random sample. + +153 +00:12:33,460 --> 00:12:37,770 +So there are two types of sampling. Probability + +154 +00:12:37,770 --> 00:12:41,490 +and non-probability. Non-probability has four + +155 +00:12:41,490 --> 00:12:45,350 +methods here, simple random samples, systematic, + +156 +00:12:45,530 --> 00:12:48,530 +stratified, and cluster. And the non-probability + +157 +00:12:48,530 --> 00:12:53,090 +samples has two types, judgment and convenience. + +158 +00:12:53,670 --> 00:12:58,490 +Let's see the definition of each type of samples. + +159 +00:12:59,190 --> 00:13:03,720 +Let's start with non-probability sample. In non + +160 +00:13:03,720 --> 00:13:07,000 +-probability sample, items included or chosen + +161 +00:13:07,000 --> 00:13:10,800 +without regard to their probability of occurrence. + +162 +00:13:11,760 --> 00:13:14,740 +So that's the definition of non-probability. For + +163 +00:13:14,740 --> 00:13:15,100 +example. + +164 +00:13:23,660 --> 00:13:26,480 +So again, non-probability sample, it means you + +165 +00:13:26,480 --> 00:13:29,580 +select items without regard to their probability + +166 +00:13:29,580 --> 00:13:34,030 +of occurrence. For example, suppose females + +167 +00:13:34,030 --> 00:13:42,430 +consist of 70% of IUG students and males, the + +168 +00:13:42,430 --> 00:13:49,930 +remaining percent is 30%. And suppose I decided to + +169 +00:13:49,930 --> 00:13:56,610 +select a sample of 100 or 1000 students from IUG. + +170 +00:13:58,620 --> 00:14:07,980 +Suddenly, I have a sample that has 650 males and + +171 +00:14:07,980 --> 00:14:14,780 +350 females. Now, this sample, which has these + +172 +00:14:14,780 --> 00:14:19,260 +numbers, for sure does not represent the entire + +173 +00:14:19,260 --> 00:14:25,240 +population. Because females has 70%, and I took a + +174 +00:14:25,240 --> 00:14:30,890 +random sample or a sample of size 350. So this + +175 +00:14:30,890 --> 00:14:35,830 +sample is chosen without regard to the probability + +176 +00:14:35,830 --> 00:14:40,370 +here. Because in this case, I should choose males + +177 +00:14:40,370 --> 00:14:44,110 +with respect to their probability, which is 30%. + +178 +00:14:44,110 --> 00:14:49,330 +But in this case, I just choose different + +179 +00:14:49,330 --> 00:14:54,990 +proportions. Another example. Suppose + +180 +00:14:57,260 --> 00:14:59,920 +Again, I am talking about smoking. + +181 +00:15:05,080 --> 00:15:10,120 +And I know that some people are smoking and I just + +182 +00:15:10,120 --> 00:15:14,040 +took this sample. So I took this sample based on + +183 +00:15:14,040 --> 00:15:18,600 +my knowledge. So it's without regard to their + +184 +00:15:18,600 --> 00:15:23,340 +probability. Maybe suppose I am talking about + +185 +00:15:23,340 --> 00:15:28,330 +political opinions about something. And I just + +186 +00:15:28,330 --> 00:15:36,330 +took the experts of that subject. So my sample is + +187 +00:15:36,330 --> 00:15:42,070 +not a probability sample. And this one has, as we + +188 +00:15:42,070 --> 00:15:44,230 +mentioned, has two types. One is called + +189 +00:15:44,230 --> 00:15:49,010 +convenience sampling. In this case, items are + +190 +00:15:49,010 --> 00:15:51,710 +selected based only on the fact that they are + +191 +00:15:51,710 --> 00:15:55,590 +easy. So I choose that sample because it's easy. + +192 +00:15:57,090 --> 00:15:57,690 +Inexpensive, + +193 +00:16:02,190 --> 00:16:09,790 +inexpensive, or convenient to sample. If I choose + +194 +00:16:09,790 --> 00:16:13,430 +my sample because it is easy or inexpensive, I + +195 +00:16:13,430 --> 00:16:18,480 +think it doesn't make any sense, because easy. is + +196 +00:16:18,480 --> 00:16:23,780 +not a reason to select that sample. Inexpensive I + +197 +00:16:23,780 --> 00:16:27,080 +think is also is not that big reason. But if you + +198 +00:16:27,080 --> 00:16:30,340 +select a sample because these items are convenient + +199 +00:16:30,340 --> 00:16:33,760 +to assemble, it makes sense. So convenient sample + +200 +00:16:33,760 --> 00:16:38,280 +can be chosen based on easy, inexpensive or + +201 +00:16:38,280 --> 00:16:42,280 +convenient to assemble. On the other hand, In + +202 +00:16:42,280 --> 00:16:45,140 +judgment sample, you get the opinions of pre + +203 +00:16:45,140 --> 00:16:49,360 +-selected experts in the subject matter. For + +204 +00:16:49,360 --> 00:16:52,700 +example, suppose we are talking about the causes + +205 +00:16:52,700 --> 00:16:56,560 +of certain disease. Suppose we are talking about + +206 +00:16:56,560 --> 00:16:57,760 +cancer. + +207 +00:17:01,720 --> 00:17:06,820 +If I know the expert for this type of disease, + +208 +00:17:07,620 --> 00:17:10,340 +that means you have judgment sample because you + +209 +00:17:10,340 --> 00:17:14,720 +decided Before you select a sample that your + +210 +00:17:14,720 --> 00:17:27,500 +sample should contain only the expert in + +211 +00:17:27,500 --> 00:17:31,360 +cancer disease. So that's the judgment sampling. + +212 +00:17:32,260 --> 00:17:36,800 +So in this case, I didn't take all the doctors in + +213 +00:17:36,800 --> 00:17:41,340 +this case, I just taught the expert in cancer + +214 +00:17:41,340 --> 00:17:45,160 +disease. So that's called non-probability samples. + +215 +00:17:45,340 --> 00:17:48,820 +You have to make sense to distinguish between + +216 +00:17:48,820 --> 00:17:54,700 +convenience sampling and judgment sample. So for + +217 +00:17:54,700 --> 00:17:57,980 +judgment, you select a sample based on the prior + +218 +00:17:57,980 --> 00:18:00,740 +information you have about the subject matter. + +219 +00:18:02,870 --> 00:18:05,410 +Suppose I am talking about something related to + +220 +00:18:05,410 --> 00:18:08,830 +psychology, so I have to take the expert in + +221 +00:18:08,830 --> 00:18:12,910 +psychology. Suppose I am talking about expert in + +222 +00:18:12,910 --> 00:18:17,050 +sports, so I have to take a sample from that + +223 +00:18:17,050 --> 00:18:20,970 +segment and so on. But the convenient sample means + +224 +00:18:20,970 --> 00:18:24,690 +that you select a sample maybe that is easy for + +225 +00:18:24,690 --> 00:18:29,430 +you, or less expensive, or that sample is + +226 +00:18:29,430 --> 00:18:32,980 +convenient. For this reason, it's called non + +227 +00:18:32,980 --> 00:18:36,300 +-probability sample because we choose that sample + +228 +00:18:36,300 --> 00:18:39,540 +without regard to their probability of occurrence. + +229 +00:18:41,080 --> 00:18:48,620 +The other type is called probability samples. In + +230 +00:18:48,620 --> 00:18:54,200 +this case, items are chosen on the basis of non + +231 +00:18:54,200 --> 00:18:58,600 +-probabilities. For example, here, if males + +232 +00:19:02,500 --> 00:19:11,060 +has or represent 30%, and females represent 70%, + +233 +00:19:11,060 --> 00:19:14,840 +and the same size has a thousand. So in this case, + +234 +00:19:14,920 --> 00:19:19,340 +you have to choose females with respect to their + +235 +00:19:19,340 --> 00:19:24,260 +probability. Now 70% for females, so I have to + +236 +00:19:24,260 --> 00:19:29,430 +choose 700 for females and the remaining 300 for + +237 +00:19:29,430 --> 00:19:34,010 +males. So in this case, I choose the items, I mean + +238 +00:19:34,010 --> 00:19:37,970 +I choose my samples regarding to their + +239 +00:19:37,970 --> 00:19:39,050 +probability. + +240 +00:19:41,010 --> 00:19:45,190 +So in probability sample items and the sample are + +241 +00:19:45,190 --> 00:19:48,610 +chosen on the basis of known probabilities. And + +242 +00:19:48,610 --> 00:19:52,360 +again, there are two types. of probability + +243 +00:19:52,360 --> 00:19:55,580 +samples, simple random sample, systematic, + +244 +00:19:56,120 --> 00:19:59,660 +stratified, and cluster. Let's talk about each one + +245 +00:19:59,660 --> 00:20:05,040 +in details. The first type is called a probability + +246 +00:20:05,040 --> 00:20:11,720 +sample. Simple random sample. The first type of + +247 +00:20:11,720 --> 00:20:16,200 +probability sample is the easiest one. Simple + +248 +00:20:16,200 --> 00:20:23,780 +random sample. Generally is denoted by SRS, Simple + +249 +00:20:23,780 --> 00:20:30,660 +Random Sample. Let's see how can we choose a + +250 +00:20:30,660 --> 00:20:35,120 +sample that is random. What do you mean by random? + +251 +00:20:36,020 --> 00:20:41,780 +In this case, every individual or item from the + +252 +00:20:41,780 --> 00:20:47,620 +frame has an equal chance of being selected. For + +253 +00:20:47,620 --> 00:20:52,530 +example, suppose number of students in this class + +254 +00:20:52,530 --> 00:21:04,010 +number of students is 52 so + +255 +00:21:04,010 --> 00:21:11,890 +each one, I mean each student from + +256 +00:21:11,890 --> 00:21:17,380 +1 up to 52 has the same probability of being + +257 +00:21:17,380 --> 00:21:23,860 +selected. 1 by 52. 1 by 52. 1 divided by 52. So + +258 +00:21:23,860 --> 00:21:27,980 +each one has this probability. So the first one + +259 +00:21:27,980 --> 00:21:31,820 +has the same because if I want to select for + +260 +00:21:31,820 --> 00:21:37,680 +example 10 out of you. So the first one has each + +261 +00:21:37,680 --> 00:21:42,400 +one has probability of 1 out of 52. That's the + +262 +00:21:42,400 --> 00:21:47,160 +meaning ofEach item from the frame has an equal + +263 +00:21:47,160 --> 00:21:54,800 +chance of being selected. Selection may be with + +264 +00:21:54,800 --> 00:21:58,800 +replacement. With replacement means selected + +265 +00:21:58,800 --> 00:22:02,040 +individuals is returned to the frame for + +266 +00:22:02,040 --> 00:22:04,880 +possibility selection, or without replacement + +267 +00:22:04,880 --> 00:22:08,600 +means selected individuals or item is not returned + +268 +00:22:08,600 --> 00:22:10,820 +to the frame. So we have two types of selection, + +269 +00:22:11,000 --> 00:22:14,360 +either with... So with replacement means item is + +270 +00:22:14,360 --> 00:22:18,080 +returned back to the frame, or without population, + +271 +00:22:18,320 --> 00:22:21,400 +the item is not returned back to the frame. So + +272 +00:22:21,400 --> 00:22:26,490 +that's the two types of selection. Now how can we + +273 +00:22:26,490 --> 00:22:29,810 +obtain the sample? Sample obtained from something + +274 +00:22:29,810 --> 00:22:33,470 +called table of random numbers. In a minute I will + +275 +00:22:33,470 --> 00:22:36,430 +show you the table of random numbers. And other + +276 +00:22:36,430 --> 00:22:40,130 +method of selecting a sample by using computer + +277 +00:22:40,130 --> 00:22:44,890 +random number generators. So there are two methods + +278 +00:22:44,890 --> 00:22:48,310 +for selecting a random number. Either by using the + +279 +00:22:48,310 --> 00:22:51,950 +table that you have at the end of your book or by + +280 +00:22:51,950 --> 00:22:56,550 +using a computer. I will show one of these and in + +281 +00:22:56,550 --> 00:22:59,650 +the SPSS course you will see another one which is + +282 +00:22:59,650 --> 00:23:03,690 +by using a computer. So let's see how can we + +283 +00:23:03,690 --> 00:23:11,730 +obtain a sample from table of + +284 +00:23:11,730 --> 00:23:12,590 +random number. + +285 +00:23:16,950 --> 00:23:22,090 +I have maybe different table here. But the same + +286 +00:23:22,090 --> 00:23:28,090 +idea to use that table. Let's see how can we + +287 +00:23:28,090 --> 00:23:34,990 +choose a sample by using a random number. + +288 +00:23:42,490 --> 00:23:47,370 +Now, for example, suppose in this class As I + +289 +00:23:47,370 --> 00:23:51,090 +mentioned, there are 52 students. + +290 +00:23:55,110 --> 00:23:58,650 +So each one has a number, ID number one, two, up + +291 +00:23:58,650 --> 00:24:05,110 +to 52. So the numbers are 01, 02, all the way up + +292 +00:24:05,110 --> 00:24:10,790 +to 52. So the maximum digits here, two, two + +293 +00:24:10,790 --> 00:24:11,110 +digits. + +294 +00:24:15,150 --> 00:24:18,330 +1, 2, 3, up to 5, 2, 2, so you have two digits. + +295 +00:24:19,470 --> 00:24:23,710 +Now suppose I decided to take a random sample of + +296 +00:24:23,710 --> 00:24:28,550 +size, for example, N instead. How can I select N + +297 +00:24:28,550 --> 00:24:32,570 +out of U? In this case, each one has the same + +298 +00:24:32,570 --> 00:24:36,790 +chance of being selected. Now based on this table, + +299 +00:24:37,190 --> 00:24:44,230 +you can pick any row or any column. Randomly. For + +300 +00:24:44,230 --> 00:24:51,630 +example, suppose I select the first row. Now, the + +301 +00:24:51,630 --> 00:24:56,570 +first student will be selected as student number + +302 +00:24:56,570 --> 00:25:03,650 +to take two digits. We have to take how many + +303 +00:25:03,650 --> 00:25:08,770 +digits? Because students have ID card that + +304 +00:25:08,770 --> 00:25:13,930 +consists of two digits, 0102 up to 52. So, what's + +305 +00:25:13,930 --> 00:25:17,010 +the first number students will be selected based + +306 +00:25:17,010 --> 00:25:22,130 +on this table? Forget about the line 101. + +307 +00:25:26,270 --> 00:25:27,770 +Start with this number. + +308 +00:25:42,100 --> 00:25:50,900 +So the first one, 19. The second, 22. The third + +309 +00:25:50,900 --> 00:25:51,360 +student, + +310 +00:25:54,960 --> 00:26:04,000 +19, 22. The third, 9. The third, 9. I'm taking the + +311 +00:26:04,000 --> 00:26:16,510 +first row. Then fifth. 34 student + +312 +00:26:16,510 --> 00:26:18,710 +number 05 + +313 +00:26:24,340 --> 00:26:29,500 +Now, what's about seventy-five? Seventy-five is + +314 +00:26:29,500 --> 00:26:33,660 +not selected because the maximum I have is fifty + +315 +00:26:33,660 --> 00:26:46,180 +-two. Next. Sixty-two is not selected. Eighty + +316 +00:26:46,180 --> 00:26:53,000 +-seven. It's not selected. 13. 13. It's okay. + +317 +00:26:53,420 --> 00:27:01,740 +Next. 96. 96. Not selected. 14. 14 is okay. 91. + +318 +00:27:02,140 --> 00:27:12,080 +91. 91. Not selected. 95. 91. 45. 85. 31. 31. + +319 +00:27:15,240 --> 00:27:21,900 +So that's 10. So students numbers are 19, 22, 39, + +320 +00:27:22,140 --> 00:27:26,980 +50, 34, 5, 13, 4, 25 and take one will be + +321 +00:27:26,980 --> 00:27:30,940 +selected. So these are the ID numbers will be + +322 +00:27:30,940 --> 00:27:35,480 +selected in order to get a sample of 10. You + +323 +00:27:35,480 --> 00:27:40,500 +exclude + +324 +00:27:40,500 --> 00:27:43,440 +that one. If the number is repeated, you have to + +325 +00:27:43,440 --> 00:27:44,340 +exclude that one. + +326 +00:27:51,370 --> 00:27:57,270 +is repeated, then excluded. + +327 +00:28:02,370 --> 00:28:07,370 +So the returned number must be excluded from the + +328 +00:28:07,370 --> 00:28:14,030 +sample. Let's imagine that we have not 52 + +329 +00:28:14,030 --> 00:28:19,130 +students. We have 520 students. + +330 +00:28:25,740 --> 00:28:32,520 +Now, I have large number, 52, 520 instead of 52 + +331 +00:28:32,520 --> 00:28:36,080 +students. And again, my goal is to select just 10 + +332 +00:28:36,080 --> 00:28:42,220 +students out of 120. So each one has ID with + +333 +00:28:42,220 --> 00:28:46,220 +number one, two, all the way up to 520. So the + +334 +00:28:46,220 --> 00:28:53,160 +first one, 001. 002 all the way up to 520 now in + +335 +00:28:53,160 --> 00:28:56,480 +this case you have to choose three digits start + +336 +00:28:56,480 --> 00:29:00,060 +for example you don't have actually to start with + +337 +00:29:00,060 --> 00:29:03,060 +row number one maybe column number one or row + +338 +00:29:03,060 --> 00:29:06,140 +number two whatever is fine so let's start with + +339 +00:29:06,140 --> 00:29:10,460 +row number two for example row number 76 + +340 +00:29:14,870 --> 00:29:19,950 +It's not selected. Because the maximum number I + +341 +00:29:19,950 --> 00:29:25,110 +have is 5 to 20. So, 746 shouldn't be selected. + +342 +00:29:26,130 --> 00:29:29,430 +The next one, 764. + +343 +00:29:31,770 --> 00:29:38,750 +Again, it's not selected. 764, 715. Not selected. + +344 +00:29:38,910 --> 00:29:42,310 +Next one is 715. + +345 +00:29:44,880 --> 00:29:52,200 +099 should be 0 that's + +346 +00:29:52,200 --> 00:29:54,940 +the way how can we use the random table for using + +347 +00:29:54,940 --> 00:29:58,800 +or for selecting simple random symbols so in this + +348 +00:29:58,800 --> 00:30:03,480 +case you can choose any row or any column then you + +349 +00:30:03,480 --> 00:30:06,620 +have to decide how many digits you have to select + +350 +00:30:06,620 --> 00:30:10,500 +it depends on the number you have I mean the + +351 +00:30:10,500 --> 00:30:16,510 +population size If for example Suppose I am + +352 +00:30:16,510 --> 00:30:20,270 +talking about IUPUI students and for example, we + +353 +00:30:20,270 --> 00:30:26,530 +have 30,000 students at this school And again, I + +354 +00:30:26,530 --> 00:30:28,570 +want to select a random sample of size 10 for + +355 +00:30:28,570 --> 00:30:35,190 +example So how many digits should I use? 20,000 + +356 +00:30:35,190 --> 00:30:42,620 +Five digits And each one, each student has ID + +357 +00:30:42,620 --> 00:30:51,760 +from, starts from the first one up to twenty + +358 +00:30:51,760 --> 00:30:56,680 +thousand. So now, start with, for example, the + +359 +00:30:56,680 --> 00:30:59,240 +last row you have. + +360 +00:31:03,120 --> 00:31:08,480 +The first number 54000 is not. 81 is not. None of + +361 +00:31:08,480 --> 00:31:08,740 +these. + +362 +00:31:12,420 --> 00:31:17,760 +Look at the next one. 71000 is not selected. Now + +363 +00:31:17,760 --> 00:31:22,180 +9001. So the first number I have to select is + +364 +00:31:22,180 --> 00:31:27,200 +9001. None of the rest. Go back. + +365 +00:31:30,180 --> 00:31:37,790 +Go to the next one. The second number, 12149 + +366 +00:31:37,790 --> 00:31:45,790 +and so on. Next will be 18000 and so on. Next row, + +367 +00:31:46,470 --> 00:31:55,530 +we can select the second one, then 16, then 14000, + +368 +00:31:55,890 --> 00:32:00,850 +6500 and so on. So this is the way how can we use + +369 +00:32:00,850 --> 00:32:08,110 +the random table. It seems to be that tons of work + +370 +00:32:08,110 --> 00:32:13,450 +if you have large sample. Because in this case, + +371 +00:32:13,530 --> 00:32:16,430 +you have to choose, for example, suppose I am + +372 +00:32:16,430 --> 00:32:22,390 +interested to take a random sample of 10,000. Now, + +373 +00:32:22,510 --> 00:32:28,370 +to use this table to select 10,000 items takes + +374 +00:32:28,370 --> 00:32:33,030 +time and effort and maybe will never finish. So + +375 +00:32:33,030 --> 00:32:33,950 +it's better to use + +376 +00:32:38,020 --> 00:32:42,100 +better to use computer + +377 +00:32:42,100 --> 00:32:47,140 +random number generators. So that's the way if we, + +378 +00:32:47,580 --> 00:32:51,880 +now we can use the random table only if the sample + +379 +00:32:51,880 --> 00:32:57,780 +size is limited. I mean up to 100 maybe you can + +380 +00:32:57,780 --> 00:33:03,160 +use the random table, but after that I think it's + +381 +00:33:03,160 --> 00:33:08,670 +just you are losing your time. Another example + +382 +00:33:08,670 --> 00:33:14,390 +here. Now suppose my sampling frame for population + +383 +00:33:14,390 --> 00:33:23,230 +has 850 students. So the numbers are 001, 002, all + +384 +00:33:23,230 --> 00:33:28,490 +the way up to 850. And suppose for example we are + +385 +00:33:28,490 --> 00:33:33,610 +going to select five items randomly from that + +386 +00:33:33,610 --> 00:33:39,610 +population. So you have to choose three digits and + +387 +00:33:39,610 --> 00:33:44,990 +imagine that this is my portion of that table. + +388 +00:33:45,850 --> 00:33:51,570 +Now, take three digits. The first three digits are + +389 +00:33:51,570 --> 00:34:00,330 +492. So the first item chosen should be item + +390 +00:34:00,330 --> 00:34:10,540 +number 492. should be selected next one 800 808 + +391 +00:34:10,540 --> 00:34:17,020 +doesn't select because the maximum it's much + +392 +00:34:17,020 --> 00:34:21,100 +selected because the maximum here is 850 now next + +393 +00:34:21,100 --> 00:34:26,360 +one 892 this + +394 +00:34:26,360 --> 00:34:32,140 +one is not selected next + +395 +00:34:32,140 --> 00:34:43,030 +item four three five selected now + +396 +00:34:43,030 --> 00:34:50,710 +seven seven nine should be selected finally zeros + +397 +00:34:50,710 --> 00:34:53,130 +two should be selected so these are the five + +398 +00:34:53,130 --> 00:34:58,090 +numbers in my sample by using selected by using + +399 +00:34:58,090 --> 00:35:01,190 +the random sample any questions? + +400 +00:35:04,160 --> 00:35:07,780 +Let's move to another part. + +401 +00:35:17,600 --> 00:35:22,380 +The next type of samples is called systematic + +402 +00:35:22,380 --> 00:35:25,260 +samples. + +403 +00:35:29,120 --> 00:35:35,780 +Now suppose N represents the sample size, capital + +404 +00:35:35,780 --> 00:35:40,520 +N represents + +405 +00:35:40,520 --> 00:35:42,220 +the population size. + +406 +00:35:46,660 --> 00:35:49,900 +And let's see how can we choose a systematic + +407 +00:35:49,900 --> 00:35:54,040 +random sample from that population. For example, + +408 +00:35:55,260 --> 00:35:57,180 +suppose + +409 +00:35:59,610 --> 00:36:05,010 +For this specific slide, there are 40 items in the + +410 +00:36:05,010 --> 00:36:11,370 +population. And my goal is to select a sample of + +411 +00:36:11,370 --> 00:36:16,210 +size 4 by using systematic random sampling. The + +412 +00:36:16,210 --> 00:36:23,290 +first step is to find how many individuals will be + +413 +00:36:23,290 --> 00:36:28,990 +in any group. Let's use this letter K. + +414 +00:36:31,820 --> 00:36:36,940 +divide N by, divide frame of N individuals into + +415 +00:36:36,940 --> 00:36:42,900 +groups of K individuals. So, K equal capital N + +416 +00:36:42,900 --> 00:36:48,840 +over small n, this is number of items in a group. + +417 +00:36:51,570 --> 00:36:56,510 +So K represents number of subjects or number of + +418 +00:36:56,510 --> 00:37:02,750 +elements in a group. So for this example, K equals + +419 +00:37:02,750 --> 00:37:09,710 +40 divided by 4, so 10. So the group, each group + +420 +00:37:09,710 --> 00:37:11,670 +has 10 items. + +421 +00:37:16,630 --> 00:37:23,140 +So each group has 10 items. + +422 +00:37:27,420 --> 00:37:33,860 +So group number 1, 10 items, and others have the + +423 +00:37:33,860 --> 00:37:38,660 +same number. So first step, we have to decide how + +424 +00:37:38,660 --> 00:37:42,110 +many items will be in the group. And that number + +425 +00:37:42,110 --> 00:37:45,330 +equals N divided by small n, capital N divided by + +426 +00:37:45,330 --> 00:37:48,910 +small n. In this case, N is 40, the sample size is + +427 +00:37:48,910 --> 00:37:54,170 +4, so there are 10 items in each individual. Next + +428 +00:37:54,170 --> 00:38:02,850 +step, select randomly the first individual from + +429 +00:38:02,850 --> 00:38:08,620 +the first group. For example, here. Now, how many + +430 +00:38:08,620 --> 00:38:13,360 +we have here? We have 10 items. So, numbers are + +431 +00:38:13,360 --> 00:38:19,060 +01, 02, up to 10. I have to choose one more number + +432 +00:38:19,060 --> 00:38:23,680 +from these numbers, from 1 to 10, by using the + +433 +00:38:23,680 --> 00:38:27,600 +random table again. So, I have to go back to the + +434 +00:38:27,600 --> 00:38:33,730 +random table and I choose two digits. Now the + +435 +00:38:33,730 --> 00:38:36,490 +first one is nineteen, twenty-two, thirty-nine, + +436 +00:38:37,130 --> 00:38:43,450 +fifty, thirty-four, five. So I have to see. So + +437 +00:38:43,450 --> 00:38:46,230 +number one is five. What's the next one? The next + +438 +00:38:46,230 --> 00:38:54,190 +one just add K. K is ten. So next is fifteen. Then + +439 +00:38:54,190 --> 00:38:58,010 +twenty-five, then thirty-four. + +440 +00:39:02,900 --> 00:39:08,840 +Number size consists of four items. So the first + +441 +00:39:08,840 --> 00:39:12,740 +number is chosen randomly by using the random + +442 +00:39:12,740 --> 00:39:17,260 +table. The next number just add the step. This is + +443 +00:39:17,260 --> 00:39:24,340 +step. So my step is 10 because number one is five. + +444 +00:39:25,300 --> 00:39:27,800 +The first item I mean is five. Then it should be + +445 +00:39:27,800 --> 00:39:31,780 +15, 25, 35, and so on if we have more than that. + +446 +00:39:33,230 --> 00:39:37,730 +Okay, so that's for, in this example, he choose + +447 +00:39:37,730 --> 00:39:42,790 +item number seven. Random selection, number seven. + +448 +00:39:43,230 --> 00:39:50,010 +So next should be 17, 27, 37, and so on. Let's do + +449 +00:39:50,010 --> 00:39:50,710 +another example. + +450 +00:39:58,590 --> 00:40:06,540 +Suppose there are In this class, there are 50 + +451 +00:40:06,540 --> 00:40:12,400 +students. So the total is 50. + +452 +00:40:15,320 --> 00:40:26,780 +10 students out of 50. So my sample is 10. Now + +453 +00:40:26,780 --> 00:40:30,260 +still, 50 divided by 10 is 50. + +454 +00:40:33,630 --> 00:40:39,650 +So there are five items or five students in a + +455 +00:40:39,650 --> 00:40:45,370 +group. So we have five in + +456 +00:40:45,370 --> 00:40:51,490 +the first group and then five in the next one and + +457 +00:40:51,490 --> 00:40:56,130 +so on. So we have how many groups? Ten groups. + +458 +00:40:59,530 --> 00:41:04,330 +So first step, you have to find a step. Still it + +459 +00:41:04,330 --> 00:41:07,930 +means number of items or number of students in a + +460 +00:41:07,930 --> 00:41:16,170 +group. Next step, select student at random from + +461 +00:41:16,170 --> 00:41:22,010 +the first group, so random selection. Now, here + +462 +00:41:22,010 --> 00:41:28,610 +there are five students, so 01, I'm sorry, not 01, + +463 +00:41:29,150 --> 00:41:35,080 +1, 2, 3, 4, 5, so one digit. Only one digit. + +464 +00:41:35,800 --> 00:41:39,420 +Because I have maximum number is five. So it's + +465 +00:41:39,420 --> 00:41:42,920 +only one digit. So go again to the random table + +466 +00:41:42,920 --> 00:41:48,220 +and take one digit. One. So my first item, six, + +467 +00:41:48,760 --> 00:41:52,580 +eleven, sixteen, twenty-one, twenty-one, all the + +468 +00:41:52,580 --> 00:41:55,500 +way up to ten items. + +469 +00:42:13,130 --> 00:42:18,170 +So I choose student number one, then skip five, + +470 +00:42:19,050 --> 00:42:22,230 +choose number six, and so on. It's called + +471 +00:42:22,230 --> 00:42:26,130 +systematic. Because if you know the first item, + +472 +00:42:28,550 --> 00:42:32,690 +and the step you can know the rest of these. + +473 +00:42:37,310 --> 00:42:41,150 +Imagine that you want to select 10 students who + +474 +00:42:41,150 --> 00:42:48,010 +entered the cafe shop or restaurant. You can pick + +475 +00:42:48,010 --> 00:42:54,790 +one of them. So suppose I'm taking number three + +476 +00:42:54,790 --> 00:43:00,550 +and my step is six. So three, then nine, and so + +477 +00:43:00,550 --> 00:43:00,790 +on. + +478 +00:43:05,830 --> 00:43:13,310 +So that's systematic assembly. Questions? So + +479 +00:43:13,310 --> 00:43:20,710 +that's about random samples and systematic. What + +480 +00:43:20,710 --> 00:43:23,550 +do you mean by stratified groups? + +481 +00:43:28,000 --> 00:43:33,080 +Let's use a definition and an example of a + +482 +00:43:33,080 --> 00:43:34,120 +stratified family. + +483 +00:43:58,810 --> 00:44:05,790 +step one. So again imagine we have IUG population + +484 +00:44:05,790 --> 00:44:11,490 +into two or more subgroups. So there are two or + +485 +00:44:11,490 --> 00:44:16,010 +more. It depends on the characteristic you are + +486 +00:44:16,010 --> 00:44:19,690 +using. So divide population into two or more + +487 +00:44:19,690 --> 00:44:24,210 +subgroups according to some common characteristic. + +488 +00:44:24,730 --> 00:44:30,280 +For example suppose I want to divide the student + +489 +00:44:30,280 --> 00:44:32,080 +into gender. + +490 +00:44:34,100 --> 00:44:38,840 +So males or females. So I have two strata. One is + +491 +00:44:38,840 --> 00:44:43,000 +called males and the other is females. Now suppose + +492 +00:44:43,000 --> 00:44:47,460 +the characteristic I am going to use is the levels + +493 +00:44:47,460 --> 00:44:51,500 +of a student. First level, second, third, fourth, + +494 +00:44:51,800 --> 00:44:56,280 +and so on. So number of strata here depends on + +495 +00:44:56,280 --> 00:45:00,380 +actually the characteristic you are interested in. + +496 +00:45:00,780 --> 00:45:04,860 +Let's use the simple one that is gender. So here + +497 +00:45:04,860 --> 00:45:12,360 +we have females. So IUV students divided into two + +498 +00:45:12,360 --> 00:45:18,560 +types, strata, or two groups, females and males. + +499 +00:45:19,200 --> 00:45:22,870 +So this is the first step. So at least you should + +500 +00:45:22,870 --> 00:45:26,750 +have two groups or two subgroups. So we have IELTS + +501 +00:45:26,750 --> 00:45:29,630 +student, the entire population, and that + +502 +00:45:29,630 --> 00:45:34,370 +population divided into two subgroups. Next, + +503 +00:45:35,650 --> 00:45:39,730 +assemble random samples. Keep careful here with + +504 +00:45:39,730 --> 00:45:45,770 +sample sizes proportional to strata sizes. That + +505 +00:45:45,770 --> 00:45:57,890 +means suppose I know that Female consists + +506 +00:45:57,890 --> 00:46:02,470 +of + +507 +00:46:02,470 --> 00:46:09,770 +70% of Irish students and + +508 +00:46:09,770 --> 00:46:11,490 +males 30%. + +509 +00:46:15,410 --> 00:46:17,950 +the sample size we are talking about here is for + +510 +00:46:17,950 --> 00:46:21,550 +example is a thousand so I want to select a sample + +511 +00:46:21,550 --> 00:46:24,990 +of a thousand seed from the registration office or + +512 +00:46:24,990 --> 00:46:31,190 +my information about that is males represent 30% + +513 +00:46:31,190 --> 00:46:37,650 +females represent 70% so in this case your sample + +514 +00:46:37,650 --> 00:46:43,650 +structure should be 70% times + +515 +00:46:50,090 --> 00:46:59,090 +So the first + +516 +00:46:59,090 --> 00:47:03,750 +group should have 700 items of students and the + +517 +00:47:03,750 --> 00:47:06,490 +other one is 300,000. + +518 +00:47:09,230 --> 00:47:11,650 +So this is the second step. + +519 +00:47:14,420 --> 00:47:17,740 +Sample sizes are determined in step number two. + +520 +00:47:18,540 --> 00:47:22,200 +Now, how can you select the 700 females here? + +521 +00:47:23,660 --> 00:47:26,180 +Again, you have to go back to the random table. + +522 +00:47:27,480 --> 00:47:31,660 +Samples from subgroups are compiled into one. Then + +523 +00:47:31,660 --> 00:47:39,600 +you can use symbol random sample. So here, 700. I + +524 +00:47:39,600 --> 00:47:45,190 +have, for example, 70% females. And I know that I + +525 +00:47:45,190 --> 00:47:51,370 +use student help. I have ideas numbers from 1 up + +526 +00:47:51,370 --> 00:47:59,070 +to 7, 14. Then by using simple random, simple + +527 +00:47:59,070 --> 00:48:01,070 +random table, you can. + +528 +00:48:09,490 --> 00:48:15,190 +So if you go back to the table, the first item, + +529 +00:48:16,650 --> 00:48:23,130 +now look at five digits. Nineteen is not selected. + +530 +00:48:24,830 --> 00:48:27,510 +Nineteen. I have, the maximum is fourteen + +531 +00:48:27,510 --> 00:48:31,890 +thousand. So skip one and two. The first item is + +532 +00:48:31,890 --> 00:48:37,850 +seven hundred and fifty-six. The first item. Next + +533 +00:48:37,850 --> 00:48:43,480 +is not chosen. Next is not chosen. Number six. + +534 +00:48:43,740 --> 00:48:44,580 +Twelve. + +535 +00:48:47,420 --> 00:48:50,620 +Zero. Unsure. + +536 +00:48:52,880 --> 00:48:58,940 +So here we divide the population into two groups + +537 +00:48:58,940 --> 00:49:03,440 +or two subgroups, females and males. And we select + +538 +00:49:03,440 --> 00:49:07,020 +a random sample of size 700 based on the + +539 +00:49:07,020 --> 00:49:10,850 +proportion of this subgroup. Then we are using the + +540 +00:49:10,850 --> 00:49:16,750 +simple random table to take the 700 females. + +541 +00:49:22,090 --> 00:49:29,810 +Now for this example, there are 16 items or 16 + +542 +00:49:29,810 --> 00:49:35,030 +students in each group. And he select randomly + +543 +00:49:35,030 --> 00:49:40,700 +number three, number 9, number 13, and so on. So + +544 +00:49:40,700 --> 00:49:44,140 +it's a random selection. Another example. + +545 +00:49:46,820 --> 00:49:52,420 +Suppose again we are talking about all IUVs. + +546 +00:50:02,780 --> 00:50:09,360 +Here I divided the population according to the + +547 +00:50:09,360 --> 00:50:17,680 +students' levels. Level one, level two, three + +548 +00:50:17,680 --> 00:50:18,240 +levels. + +549 +00:50:25,960 --> 00:50:28,300 +One, two, three and four. + +550 +00:50:32,240 --> 00:50:39,710 +So I divide the population into four subgroups + +551 +00:50:39,710 --> 00:50:43,170 +according to the student levels. So one, two, + +552 +00:50:43,290 --> 00:50:48,030 +three, and four. Now, a simple random sample is + +553 +00:50:48,030 --> 00:50:52,070 +selected from each subgroup with sample sizes + +554 +00:50:52,070 --> 00:50:57,670 +proportional to strata size. Imagine that level + +555 +00:50:57,670 --> 00:51:04,950 +number one represents 40% of the students. Level + +556 +00:51:04,950 --> 00:51:17,630 +2, 20%. Level 3, 30%. Just + +557 +00:51:17,630 --> 00:51:22,850 +an example. To make more sense? + +558 +00:51:34,990 --> 00:51:36,070 +My sample size? + +559 +00:51:38,750 --> 00:51:39,910 +3, + +560 +00:51:41,910 --> 00:51:46,430 +9, 15, 4, sorry. + +561 +00:51:53,290 --> 00:52:00,470 +So here, there are four levels. And the + +562 +00:52:00,470 --> 00:52:04,370 +proportions are 48 + +563 +00:52:06,670 --> 00:52:17,190 +sample size is 500 so the sample for each strata + +564 +00:52:17,190 --> 00:52:31,190 +will be number 1 40% times 500 gives 200 the next + +565 +00:52:31,190 --> 00:52:32,950 +150 + +566 +00:52:36,200 --> 00:52:42,380 +And so on. Now, how can we choose the 200 from + +567 +00:52:42,380 --> 00:52:46,280 +level number one? Again, we have to choose the + +568 +00:52:46,280 --> 00:52:55,540 +random table. Now, 40% from this number, it means + +569 +00:52:55,540 --> 00:52:59,620 +5 + +570 +00:52:59,620 --> 00:53:06,400 +,000. This one has 5,000. 600 females students. + +571 +00:53:07,720 --> 00:53:13,480 +Because 40% of females in level 1. And I know that + +572 +00:53:13,480 --> 00:53:17,780 +the total number of females is 14,000. So number + +573 +00:53:17,780 --> 00:53:23,420 +of females in the first level is 5600. How many + +574 +00:53:23,420 --> 00:53:28,040 +digits we have? Four digits. The first one, 001, + +575 +00:53:28,160 --> 00:53:34,460 +all the way up to 560. If you go back, into a + +576 +00:53:34,460 --> 00:53:39,520 +random table, take five, four digits. So the first + +577 +00:53:39,520 --> 00:53:43,340 +number is 1922. + +578 +00:53:43,980 --> 00:53:48,000 +Next is 3950. + +579 +00:53:50,140 --> 00:53:54,760 +And so on. So that's the way how can we choose + +580 +00:53:54,760 --> 00:53:58,640 +stratified samples. + +581 +00:54:02,360 --> 00:54:08,240 +Next, the last one is called clusters. And let's + +582 +00:54:08,240 --> 00:54:11,400 +see now what's the difference between stratified + +583 +00:54:11,400 --> 00:54:16,500 +and cluster. Step one. + +584 +00:54:25,300 --> 00:54:31,720 +Population is divided into some clusters. + +585 +00:54:35,000 --> 00:54:41,160 +Step two, assemble one by assembling clusters + +586 +00:54:41,160 --> 00:54:42,740 +selective. + +587 +00:54:46,100 --> 00:54:48,640 +Here suppose how many clusters? + +588 +00:54:53,560 --> 00:54:58,080 +16 clusters. So there are, so the population has + +589 +00:55:19,310 --> 00:55:25,820 +Step two, you have to choose a simple random + +590 +00:55:25,820 --> 00:55:31,440 +number of clusters out of 16. Suppose I decided to + +591 +00:55:31,440 --> 00:55:38,300 +choose three among these. So we have 16 clusters. + +592 +00:55:45,340 --> 00:55:49,780 +For example, I chose cluster number 411. + +593 +00:55:51,640 --> 00:56:01,030 +So I choose these clusters. Next, all items in the + +594 +00:56:01,030 --> 00:56:02,910 +selected clusters can be used. + +595 +00:56:09,130 --> 00:56:15,770 +Or items + +596 +00:56:15,770 --> 00:56:18,910 +can be chosen from a cluster using another + +597 +00:56:18,910 --> 00:56:21,130 +probability sampling technique. For example, + +598 +00:56:23,190 --> 00:56:28,840 +imagine that We are talking about students who + +599 +00:56:28,840 --> 00:56:31,460 +registered for accounting. + +600 +00:56:45,880 --> 00:56:50,540 +Imagine that we have six sections for accounting. + +601 +00:56:55,850 --> 00:56:56,650 +six sections. + +602 +00:57:00,310 --> 00:57:05,210 +And I just choose two of these, cluster number one + +603 +00:57:05,210 --> 00:57:08,910 +or section number one and the last one. So my + +604 +00:57:08,910 --> 00:57:12,590 +chosen clusters are number one and six, one and + +605 +00:57:12,590 --> 00:57:19,090 +six. Or you can use the one we just talked about, + +606 +00:57:19,590 --> 00:57:23,340 +stratified random sample. instead of using all for + +607 +00:57:23,340 --> 00:57:29,140 +example suppose there are in this section there + +608 +00:57:29,140 --> 00:57:36,180 +are 73 models and the other one there are 80 + +609 +00:57:36,180 --> 00:57:42,300 +models and + +610 +00:57:42,300 --> 00:57:46,720 +the sample size here I am going to use case 20 + +611 +00:57:50,900 --> 00:57:56,520 +So you can use 10 here and 10 in the other one, or + +612 +00:57:56,520 --> 00:58:03,060 +it depends on the proportions. Now, 70 represents + +613 +00:58:03,060 --> 00:58:09,580 +70 out of 150, because there are 150 students in + +614 +00:58:09,580 --> 00:58:14,060 +these two clusters. Now, the entire population is + +615 +00:58:14,060 --> 00:58:17,300 +not the number for each of all of these clusters, + +616 +00:58:17,560 --> 00:58:22,310 +just number one sixth. So there are 150 students + +617 +00:58:22,310 --> 00:58:25,090 +in these two selected clusters. So the population + +618 +00:58:25,090 --> 00:58:30,030 +size is 150. Make sense? Then the proportion here + +619 +00:58:30,030 --> 00:58:33,210 +is 700 divided by 150 times 20. + +620 +00:58:35,970 --> 00:58:41,610 +The other one, 80 divided by 150 times 20. + +621 +00:58:51,680 --> 00:58:55,960 +So again, all items in the selected clusters can + +622 +00:58:55,960 --> 00:58:59,400 +be used or items can be chosen from the cluster + +623 +00:58:59,400 --> 00:59:01,500 +using another probability technique as we + +624 +00:59:01,500 --> 00:59:06,640 +mentioned. Let's see how can we use another + +625 +00:59:06,640 --> 00:59:10,860 +example. Let's talk about again AUG students. + +626 +00:59:28,400 --> 00:59:31,800 +I choose suppose level number 2 and level number + +627 +00:59:31,800 --> 00:59:37,680 +4, two levels, 2 and 4. Then you can take either + +628 +00:59:37,680 --> 00:59:43,380 +all the students here or just assemble size + +629 +00:59:43,380 --> 00:59:46,460 +proportion to the + +630 +00:59:50,310 --> 00:59:54,130 +For example, this one represents 20%, and my + +631 +00:59:54,130 --> 00:59:56,730 +sample size is 1000, so in this case you have to + +632 +00:59:56,730 --> 01:00:00,310 +take 200 and 800 from that one. + +633 +01:00:03,050 --> 01:00:04,050 +Any questions? + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/JEIWb3FC-Sk_raw.json b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/JEIWb3FC-Sk_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..94d185ab488ea759ee504526a3cf7b7317aed34e --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/JEIWb3FC-Sk_raw.json @@ -0,0 +1 @@ +{"segments": [{"id": 1, "seek": 2111, "start": 4.67, "end": 21.11, "text": " Today, Inshallah, we are going to start Chapter 7. Chapter 7 talks about sampling and sampling distributions. The objectives for this chapter", "tokens": [2692, 11, 682, 2716, 13492, 11, 321, 366, 516, 281, 722, 18874, 1614, 13, 18874, 1614, 6686, 466, 21179, 293, 21179, 37870, 13, 440, 15961, 337, 341, 7187], "avg_logprob": -0.25700432267682305, "compression_ratio": 1.2792792792792793, "no_speech_prob": 7.748603820800781e-07, "words": [{"start": 4.669999999999998, "end": 5.55, "word": " Today,", "probability": 0.56005859375}, {"start": 5.69, "end": 5.81, "word": " Inshallah,", "probability": 0.6656087239583334}, {"start": 5.89, "end": 5.99, "word": " we", "probability": 0.953125}, {"start": 5.99, "end": 6.09, "word": " are", "probability": 0.90234375}, {"start": 6.09, "end": 6.31, "word": " going", "probability": 0.94677734375}, {"start": 6.31, "end": 6.51, "word": " to", "probability": 0.96923828125}, {"start": 6.51, "end": 7.01, "word": " start", "probability": 0.92041015625}, {"start": 7.01, "end": 7.77, "word": " Chapter", "probability": 0.40673828125}, {"start": 7.77, "end": 8.21, "word": " 7.", "probability": 0.67333984375}, {"start": 9.83, "end": 10.53, "word": " Chapter", "probability": 0.84765625}, {"start": 10.53, "end": 10.83, "word": " 7", "probability": 0.98828125}, {"start": 10.83, "end": 11.17, "word": " talks", "probability": 0.87744140625}, {"start": 11.17, "end": 12.59, "word": " about", "probability": 0.90966796875}, {"start": 12.59, "end": 13.89, "word": " sampling", "probability": 0.6875}, {"start": 13.89, "end": 14.47, "word": " and", "probability": 0.904296875}, {"start": 14.47, "end": 14.91, "word": " sampling", "probability": 0.8125}, {"start": 14.91, "end": 16.07, "word": " distributions.", "probability": 0.87158203125}, {"start": 17.69, "end": 18.01, "word": " The", "probability": 0.87744140625}, {"start": 18.01, "end": 18.75, "word": " objectives", "probability": 0.81396484375}, {"start": 18.75, "end": 20.39, "word": " for", "probability": 0.9404296875}, {"start": 20.39, "end": 20.71, "word": " this", "probability": 0.939453125}, {"start": 20.71, "end": 21.11, "word": " chapter", "probability": 0.830078125}], "temperature": 1.0}, {"id": 2, "seek": 3475, "start": 22.31, "end": 34.75, "text": " are number one we have different methods actually we have two methods probability and non-probability samples and we are going to distinguish between", "tokens": [366, 1230, 472, 321, 362, 819, 7150, 767, 321, 362, 732, 7150, 8482, 293, 2107, 12, 41990, 2310, 10938, 293, 321, 366, 516, 281, 20206, 1296], "avg_logprob": -0.2388599537037037, "compression_ratio": 1.4285714285714286, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 22.31, "end": 22.69, "word": " are", "probability": 0.306396484375}, {"start": 22.69, "end": 22.93, "word": " number", "probability": 0.65576171875}, {"start": 22.93, "end": 23.23, "word": " one", "probability": 0.84130859375}, {"start": 23.23, "end": 24.59, "word": " we", "probability": 0.4638671875}, {"start": 24.59, "end": 24.85, "word": " have", "probability": 0.95068359375}, {"start": 24.85, "end": 25.31, "word": " different", "probability": 0.8857421875}, {"start": 25.31, "end": 26.15, "word": " methods", "probability": 0.40185546875}, {"start": 26.15, "end": 27.37, "word": " actually", "probability": 0.64453125}, {"start": 27.37, "end": 27.61, "word": " we", "probability": 0.92626953125}, {"start": 27.61, "end": 27.79, "word": " have", "probability": 0.95166015625}, {"start": 27.79, "end": 28.53, "word": " two", "probability": 0.93359375}, {"start": 28.53, "end": 28.87, "word": " methods", "probability": 0.89599609375}, {"start": 28.87, "end": 29.39, "word": " probability", "probability": 0.916015625}, {"start": 29.39, "end": 30.71, "word": " and", "probability": 0.93798828125}, {"start": 30.71, "end": 30.91, "word": " non", "probability": 0.794921875}, {"start": 30.91, "end": 31.33, "word": "-probability", "probability": 0.762451171875}, {"start": 31.33, "end": 31.81, "word": " samples", "probability": 0.7978515625}, {"start": 31.81, "end": 32.85, "word": " and", "probability": 0.9013671875}, {"start": 32.85, "end": 32.99, "word": " we", "probability": 0.96435546875}, {"start": 32.99, "end": 33.13, "word": " are", "probability": 0.94384765625}, {"start": 33.13, "end": 33.35, "word": " going", "probability": 0.94580078125}, {"start": 33.35, "end": 33.55, "word": " to", "probability": 0.97412109375}, {"start": 33.55, "end": 33.99, "word": " distinguish", "probability": 0.9052734375}, {"start": 33.99, "end": 34.75, "word": " between", "probability": 0.8779296875}], "temperature": 1.0}, {"id": 3, "seek": 6026, "start": 35.42, "end": 60.26, "text": " these two sampling methods. So again, in this chapter, we will talk about two different sampling methods. One is called probability sampling and the other is non-probability sampling. Our goal is to distinguish between these two different sampling methods. The other learning objective will be", "tokens": [613, 732, 21179, 7150, 13, 407, 797, 11, 294, 341, 7187, 11, 321, 486, 751, 466, 732, 819, 21179, 7150, 13, 1485, 307, 1219, 8482, 21179, 293, 264, 661, 307, 2107, 12, 41990, 2310, 21179, 13, 2621, 3387, 307, 281, 20206, 1296, 613, 732, 819, 21179, 7150, 13, 440, 661, 2539, 10024, 486, 312], "avg_logprob": -0.1663352218541232, "compression_ratio": 1.8148148148148149, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 35.42, "end": 36.04, "word": " these", "probability": 0.513671875}, {"start": 36.04, "end": 36.9, "word": " two", "probability": 0.88037109375}, {"start": 36.9, "end": 37.22, "word": " sampling", "probability": 0.9365234375}, {"start": 37.22, "end": 37.74, "word": " methods.", "probability": 0.90869140625}, {"start": 38.28, "end": 38.96, "word": " So", "probability": 0.86474609375}, {"start": 38.96, "end": 39.32, "word": " again,", "probability": 0.81982421875}, {"start": 39.96, "end": 40.32, "word": " in", "probability": 0.93017578125}, {"start": 40.32, "end": 40.7, "word": " this", "probability": 0.94775390625}, {"start": 40.7, "end": 41.08, "word": " chapter,", "probability": 0.85888671875}, {"start": 41.48, "end": 41.58, "word": " we", "probability": 0.958984375}, {"start": 41.58, "end": 42.1, "word": " will", "probability": 0.39404296875}, {"start": 42.1, "end": 42.36, "word": " talk", "probability": 0.890625}, {"start": 42.36, "end": 42.84, "word": " about", "probability": 0.90576171875}, {"start": 42.84, "end": 44.06, "word": " two", "probability": 0.91259765625}, {"start": 44.06, "end": 44.58, "word": " different", "probability": 0.87646484375}, {"start": 44.58, "end": 44.98, "word": " sampling", "probability": 0.93310546875}, {"start": 44.98, "end": 45.4, "word": " methods.", "probability": 0.88427734375}, {"start": 46.56, "end": 46.84, "word": " One", "probability": 0.91015625}, {"start": 46.84, "end": 47.0, "word": " is", "probability": 0.884765625}, {"start": 47.0, "end": 47.34, "word": " called", "probability": 0.87646484375}, {"start": 47.34, "end": 47.86, "word": " probability", "probability": 0.798828125}, {"start": 47.86, "end": 49.22, "word": " sampling", "probability": 0.955078125}, {"start": 49.22, "end": 49.48, "word": " and", "probability": 0.642578125}, {"start": 49.48, "end": 49.58, "word": " the", "probability": 0.85107421875}, {"start": 49.58, "end": 49.74, "word": " other", "probability": 0.8896484375}, {"start": 49.74, "end": 49.94, "word": " is", "probability": 0.9296875}, {"start": 49.94, "end": 50.14, "word": " non", "probability": 0.84521484375}, {"start": 50.14, "end": 50.64, "word": "-probability", "probability": 0.8263346354166666}, {"start": 50.64, "end": 51.02, "word": " sampling.", "probability": 0.96728515625}, {"start": 52.14, "end": 52.42, "word": " Our", "probability": 0.95947265625}, {"start": 52.42, "end": 52.66, "word": " goal", "probability": 0.96142578125}, {"start": 52.66, "end": 52.94, "word": " is", "probability": 0.9443359375}, {"start": 52.94, "end": 53.5, "word": " to", "probability": 0.953125}, {"start": 53.5, "end": 54.24, "word": " distinguish", "probability": 0.896484375}, {"start": 54.24, "end": 55.16, "word": " between", "probability": 0.86962890625}, {"start": 55.16, "end": 55.6, "word": " these", "probability": 0.85693359375}, {"start": 55.6, "end": 55.98, "word": " two", "probability": 0.93505859375}, {"start": 55.98, "end": 56.52, "word": " different", "probability": 0.8515625}, {"start": 56.52, "end": 56.9, "word": " sampling", "probability": 0.92236328125}, {"start": 56.9, "end": 57.3, "word": " methods.", "probability": 0.8916015625}, {"start": 57.98, "end": 58.1, "word": " The", "probability": 0.880859375}, {"start": 58.1, "end": 58.38, "word": " other", "probability": 0.89599609375}, {"start": 58.38, "end": 58.86, "word": " learning", "probability": 0.95166015625}, {"start": 58.86, "end": 59.28, "word": " objective", "probability": 0.9462890625}, {"start": 59.28, "end": 59.96, "word": " will", "probability": 0.8681640625}, {"start": 59.96, "end": 60.26, "word": " be", "probability": 0.95361328125}], "temperature": 1.0}, {"id": 4, "seek": 8474, "start": 61.44, "end": 84.74, "text": " We'll talk about the concept of the sampling distribution. That will be next time, inshallah. The third objective is compute probabilities related to sample mean. In addition to that, we'll talk about how can we compute probabilities regarding the sample proportion. And as I mentioned last time,", "tokens": [492, 603, 751, 466, 264, 3410, 295, 264, 21179, 7316, 13, 663, 486, 312, 958, 565, 11, 1028, 71, 13492, 13, 440, 2636, 10024, 307, 14722, 33783, 4077, 281, 6889, 914, 13, 682, 4500, 281, 300, 11, 321, 603, 751, 466, 577, 393, 321, 14722, 33783, 8595, 264, 6889, 16068, 13, 400, 382, 286, 2835, 1036, 565, 11], "avg_logprob": -0.1720074107081203, "compression_ratio": 1.6318681318681318, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 61.44, "end": 61.76, "word": " We'll", "probability": 0.705322265625}, {"start": 61.76, "end": 61.92, "word": " talk", "probability": 0.8974609375}, {"start": 61.92, "end": 62.26, "word": " about", "probability": 0.90234375}, {"start": 62.26, "end": 62.94, "word": " the", "probability": 0.91845703125}, {"start": 62.94, "end": 63.54, "word": " concept", "probability": 0.8701171875}, {"start": 63.54, "end": 64.2, "word": " of", "probability": 0.96484375}, {"start": 64.2, "end": 64.4, "word": " the", "probability": 0.8876953125}, {"start": 64.4, "end": 64.62, "word": " sampling", "probability": 0.5771484375}, {"start": 64.62, "end": 65.26, "word": " distribution.", "probability": 0.86083984375}, {"start": 65.8, "end": 66.04, "word": " That", "probability": 0.8916015625}, {"start": 66.04, "end": 66.18, "word": " will", "probability": 0.8779296875}, {"start": 66.18, "end": 66.3, "word": " be", "probability": 0.955078125}, {"start": 66.3, "end": 66.5, "word": " next", "probability": 0.90185546875}, {"start": 66.5, "end": 66.7, "word": " time,", "probability": 0.8759765625}, {"start": 66.8, "end": 66.96, "word": " inshallah.", "probability": 0.7039388020833334}, {"start": 67.68, "end": 68.14, "word": " The", "probability": 0.89794921875}, {"start": 68.14, "end": 68.42, "word": " third", "probability": 0.9111328125}, {"start": 68.42, "end": 68.84, "word": " objective", "probability": 0.96630859375}, {"start": 68.84, "end": 69.5, "word": " is", "probability": 0.93505859375}, {"start": 69.5, "end": 69.96, "word": " compute", "probability": 0.293212890625}, {"start": 69.96, "end": 70.58, "word": " probabilities", "probability": 0.92529296875}, {"start": 70.58, "end": 71.16, "word": " related", "probability": 0.9375}, {"start": 71.16, "end": 72.08, "word": " to", "probability": 0.97119140625}, {"start": 72.08, "end": 73.06, "word": " sample", "probability": 0.810546875}, {"start": 73.06, "end": 73.42, "word": " mean.", "probability": 0.8037109375}, {"start": 74.9, "end": 75.12, "word": " In", "probability": 0.966796875}, {"start": 75.12, "end": 75.48, "word": " addition", "probability": 0.958984375}, {"start": 75.48, "end": 75.68, "word": " to", "probability": 0.96923828125}, {"start": 75.68, "end": 76.04, "word": " that,", "probability": 0.9365234375}, {"start": 76.12, "end": 76.36, "word": " we'll", "probability": 0.929931640625}, {"start": 76.36, "end": 76.6, "word": " talk", "probability": 0.90185546875}, {"start": 76.6, "end": 76.98, "word": " about", "probability": 0.8994140625}, {"start": 76.98, "end": 77.24, "word": " how", "probability": 0.888671875}, {"start": 77.24, "end": 77.44, "word": " can", "probability": 0.92138671875}, {"start": 77.44, "end": 77.6, "word": " we", "probability": 0.9609375}, {"start": 77.6, "end": 78.16, "word": " compute", "probability": 0.90673828125}, {"start": 78.16, "end": 79.48, "word": " probabilities", "probability": 0.83984375}, {"start": 79.48, "end": 80.36, "word": " regarding", "probability": 0.90185546875}, {"start": 80.36, "end": 80.68, "word": " the", "probability": 0.92138671875}, {"start": 80.68, "end": 80.96, "word": " sample", "probability": 0.7890625}, {"start": 80.96, "end": 81.46, "word": " proportion.", "probability": 0.705078125}, {"start": 82.68, "end": 82.92, "word": " And", "probability": 0.89599609375}, {"start": 82.92, "end": 83.1, "word": " as", "probability": 0.9052734375}, {"start": 83.1, "end": 83.66, "word": " I", "probability": 0.740234375}, {"start": 83.66, "end": 83.98, "word": " mentioned", "probability": 0.826171875}, {"start": 83.98, "end": 84.32, "word": " last", "probability": 0.8603515625}, {"start": 84.32, "end": 84.74, "word": " time,", "probability": 0.88232421875}], "temperature": 1.0}, {"id": 5, "seek": 11365, "start": 86.03, "end": 113.65, "text": " There are two types of data. One is called the numerical data. In this case, we can use the sample mean. The other type is called qualitative data. And in this case, we have to use the sample proportion. So for this chapter, we are going to discuss how can we compute the probabilities for each one, either the sample mean or the sample proportion. The last objective of this chapter is to use", "tokens": [821, 366, 732, 3467, 295, 1412, 13, 1485, 307, 1219, 264, 29054, 1412, 13, 682, 341, 1389, 11, 321, 393, 764, 264, 6889, 914, 13, 440, 661, 2010, 307, 1219, 31312, 1412, 13, 400, 294, 341, 1389, 11, 321, 362, 281, 764, 264, 6889, 16068, 13, 407, 337, 341, 7187, 11, 321, 366, 516, 281, 2248, 577, 393, 321, 14722, 264, 33783, 337, 1184, 472, 11, 2139, 264, 6889, 914, 420, 264, 6889, 16068, 13, 440, 1036, 10024, 295, 341, 7187, 307, 281, 764], "avg_logprob": -0.14604778920902925, "compression_ratio": 1.867298578199052, "no_speech_prob": 0.0, "words": [{"start": 86.03, "end": 86.33, "word": " There", "probability": 0.54248046875}, {"start": 86.33, "end": 86.49, "word": " are", "probability": 0.94287109375}, {"start": 86.49, "end": 86.67, "word": " two", "probability": 0.8994140625}, {"start": 86.67, "end": 86.97, "word": " types", "probability": 0.8134765625}, {"start": 86.97, "end": 87.13, "word": " of", "probability": 0.97265625}, {"start": 87.13, "end": 87.39, "word": " data.", "probability": 0.939453125}, {"start": 87.97, "end": 88.15, "word": " One", "probability": 0.91650390625}, {"start": 88.15, "end": 88.29, "word": " is", "probability": 0.939453125}, {"start": 88.29, "end": 88.55, "word": " called", "probability": 0.87646484375}, {"start": 88.55, "end": 88.67, "word": " the", "probability": 0.420654296875}, {"start": 88.67, "end": 88.93, "word": " numerical", "probability": 0.88330078125}, {"start": 88.93, "end": 89.37, "word": " data.", "probability": 0.9462890625}, {"start": 89.85, "end": 90.07, "word": " In", "probability": 0.94677734375}, {"start": 90.07, "end": 90.27, "word": " this", "probability": 0.94775390625}, {"start": 90.27, "end": 90.49, "word": " case,", "probability": 0.916015625}, {"start": 90.53, "end": 90.65, "word": " we", "probability": 0.94775390625}, {"start": 90.65, "end": 90.85, "word": " can", "probability": 0.9453125}, {"start": 90.85, "end": 91.07, "word": " use", "probability": 0.88720703125}, {"start": 91.07, "end": 91.25, "word": " the", "probability": 0.9228515625}, {"start": 91.25, "end": 91.47, "word": " sample", "probability": 0.83251953125}, {"start": 91.47, "end": 91.79, "word": " mean.", "probability": 0.94873046875}, {"start": 92.61, "end": 92.97, "word": " The", "probability": 0.87939453125}, {"start": 92.97, "end": 93.19, "word": " other", "probability": 0.89404296875}, {"start": 93.19, "end": 93.47, "word": " type", "probability": 0.9677734375}, {"start": 93.47, "end": 93.63, "word": " is", "probability": 0.94287109375}, {"start": 93.63, "end": 93.85, "word": " called", "probability": 0.8759765625}, {"start": 93.85, "end": 94.33, "word": " qualitative", "probability": 0.8974609375}, {"start": 94.33, "end": 94.91, "word": " data.", "probability": 0.93408203125}, {"start": 95.63, "end": 95.97, "word": " And", "probability": 0.91943359375}, {"start": 95.97, "end": 96.11, "word": " in", "probability": 0.8876953125}, {"start": 96.11, "end": 96.29, "word": " this", "probability": 0.947265625}, {"start": 96.29, "end": 96.49, "word": " case,", "probability": 0.92041015625}, {"start": 96.55, "end": 96.63, "word": " we", "probability": 0.9501953125}, {"start": 96.63, "end": 96.83, "word": " have", "probability": 0.9462890625}, {"start": 96.83, "end": 96.93, "word": " to", "probability": 0.9677734375}, {"start": 96.93, "end": 97.19, "word": " use", "probability": 0.89013671875}, {"start": 97.19, "end": 97.35, "word": " the", "probability": 0.89111328125}, {"start": 97.35, "end": 97.59, "word": " sample", "probability": 0.2841796875}, {"start": 97.59, "end": 98.03, "word": " proportion.", "probability": 0.66845703125}, {"start": 98.61, "end": 98.87, "word": " So", "probability": 0.94921875}, {"start": 98.87, "end": 99.11, "word": " for", "probability": 0.7763671875}, {"start": 99.11, "end": 99.33, "word": " this", "probability": 0.94189453125}, {"start": 99.33, "end": 99.61, "word": " chapter,", "probability": 0.87890625}, {"start": 99.67, "end": 99.77, "word": " we", "probability": 0.9599609375}, {"start": 99.77, "end": 99.89, "word": " are", "probability": 0.923828125}, {"start": 99.89, "end": 100.17, "word": " going", "probability": 0.94677734375}, {"start": 100.17, "end": 100.55, "word": " to", "probability": 0.97021484375}, {"start": 100.55, "end": 101.07, "word": " discuss", "probability": 0.9072265625}, {"start": 101.07, "end": 101.31, "word": " how", "probability": 0.8291015625}, {"start": 101.31, "end": 101.53, "word": " can", "probability": 0.90771484375}, {"start": 101.53, "end": 101.69, "word": " we", "probability": 0.95703125}, {"start": 101.69, "end": 102.17, "word": " compute", "probability": 0.890625}, {"start": 102.17, "end": 103.17, "word": " the", "probability": 0.8779296875}, {"start": 103.17, "end": 103.75, "word": " probabilities", "probability": 0.916015625}, {"start": 103.75, "end": 104.25, "word": " for", "probability": 0.94970703125}, {"start": 104.25, "end": 104.55, "word": " each", "probability": 0.93701171875}, {"start": 104.55, "end": 104.85, "word": " one,", "probability": 0.93505859375}, {"start": 105.03, "end": 105.29, "word": " either", "probability": 0.927734375}, {"start": 105.29, "end": 106.37, "word": " the", "probability": 0.908203125}, {"start": 106.37, "end": 106.69, "word": " sample", "probability": 0.86181640625}, {"start": 106.69, "end": 107.03, "word": " mean", "probability": 0.96435546875}, {"start": 107.03, "end": 107.35, "word": " or", "probability": 0.9287109375}, {"start": 107.35, "end": 107.55, "word": " the", "probability": 0.9267578125}, {"start": 107.55, "end": 107.99, "word": " sample", "probability": 0.865234375}, {"start": 107.99, "end": 108.71, "word": " proportion.", "probability": 0.8310546875}, {"start": 109.39, "end": 109.67, "word": " The", "probability": 0.8828125}, {"start": 109.67, "end": 110.09, "word": " last", "probability": 0.89111328125}, {"start": 110.09, "end": 111.49, "word": " objective", "probability": 0.93310546875}, {"start": 111.49, "end": 112.09, "word": " of", "probability": 0.9677734375}, {"start": 112.09, "end": 112.31, "word": " this", "probability": 0.94091796875}, {"start": 112.31, "end": 112.67, "word": " chapter", "probability": 0.8759765625}, {"start": 112.67, "end": 113.09, "word": " is", "probability": 0.9375}, {"start": 113.09, "end": 113.29, "word": " to", "probability": 0.95751953125}, {"start": 113.29, "end": 113.65, "word": " use", "probability": 0.869140625}], "temperature": 1.0}, {"id": 6, "seek": 13331, "start": 114.67, "end": 133.31, "text": " the central limit theorem which is the famous one of the most famous theorem in this book which is called again CLT central limit theorem and we are going to show what are the what is the importance of this theorem so these are the mainly the four objectives for this chapter", "tokens": [264, 5777, 4948, 20904, 597, 307, 264, 4618, 472, 295, 264, 881, 4618, 20904, 294, 341, 1446, 597, 307, 1219, 797, 12855, 51, 5777, 4948, 20904, 293, 321, 366, 516, 281, 855, 437, 366, 264, 437, 307, 264, 7379, 295, 341, 20904, 370, 613, 366, 264, 8704, 264, 1451, 15961, 337, 341, 7187], "avg_logprob": -0.19053819168497016, "compression_ratio": 1.7922077922077921, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 114.67, "end": 115.27, "word": " the", "probability": 0.218994140625}, {"start": 115.27, "end": 115.77, "word": " central", "probability": 0.61669921875}, {"start": 115.77, "end": 116.05, "word": " limit", "probability": 0.94482421875}, {"start": 116.05, "end": 116.35, "word": " theorem", "probability": 0.8134765625}, {"start": 116.35, "end": 116.55, "word": " which", "probability": 0.490478515625}, {"start": 116.55, "end": 116.63, "word": " is", "probability": 0.93994140625}, {"start": 116.63, "end": 116.77, "word": " the", "probability": 0.5712890625}, {"start": 116.77, "end": 117.15, "word": " famous", "probability": 0.86669921875}, {"start": 117.15, "end": 117.55, "word": " one", "probability": 0.54052734375}, {"start": 117.55, "end": 117.73, "word": " of", "probability": 0.95947265625}, {"start": 117.73, "end": 117.93, "word": " the", "probability": 0.92431640625}, {"start": 117.93, "end": 118.19, "word": " most", "probability": 0.9267578125}, {"start": 118.19, "end": 118.63, "word": " famous", "probability": 0.9580078125}, {"start": 118.63, "end": 119.59, "word": " theorem", "probability": 0.7802734375}, {"start": 119.59, "end": 119.77, "word": " in", "probability": 0.91748046875}, {"start": 119.77, "end": 119.97, "word": " this", "probability": 0.94580078125}, {"start": 119.97, "end": 120.31, "word": " book", "probability": 0.95849609375}, {"start": 120.31, "end": 121.37, "word": " which", "probability": 0.75}, {"start": 121.37, "end": 121.51, "word": " is", "probability": 0.95166015625}, {"start": 121.51, "end": 121.73, "word": " called", "probability": 0.912109375}, {"start": 121.73, "end": 122.13, "word": " again", "probability": 0.93798828125}, {"start": 122.13, "end": 123.13, "word": " CLT", "probability": 0.83935546875}, {"start": 123.13, "end": 123.59, "word": " central", "probability": 0.52001953125}, {"start": 123.59, "end": 123.89, "word": " limit", "probability": 0.9482421875}, {"start": 123.89, "end": 124.25, "word": " theorem", "probability": 0.86962890625}, {"start": 124.25, "end": 124.81, "word": " and", "probability": 0.78564453125}, {"start": 124.81, "end": 124.91, "word": " we", "probability": 0.9599609375}, {"start": 124.91, "end": 125.03, "word": " are", "probability": 0.9375}, {"start": 125.03, "end": 125.23, "word": " going", "probability": 0.9453125}, {"start": 125.23, "end": 125.43, "word": " to", "probability": 0.970703125}, {"start": 125.43, "end": 125.69, "word": " show", "probability": 0.9541015625}, {"start": 125.69, "end": 126.07, "word": " what", "probability": 0.935546875}, {"start": 126.07, "end": 126.37, "word": " are", "probability": 0.82275390625}, {"start": 126.37, "end": 126.57, "word": " the", "probability": 0.77587890625}, {"start": 126.57, "end": 126.91, "word": " what", "probability": 0.83447265625}, {"start": 126.91, "end": 127.09, "word": " is", "probability": 0.9453125}, {"start": 127.09, "end": 127.27, "word": " the", "probability": 0.92529296875}, {"start": 127.27, "end": 127.71, "word": " importance", "probability": 0.96875}, {"start": 127.71, "end": 128.93, "word": " of", "probability": 0.96337890625}, {"start": 128.93, "end": 129.31, "word": " this", "probability": 0.9482421875}, {"start": 129.31, "end": 129.91, "word": " theorem", "probability": 0.80615234375}, {"start": 129.91, "end": 130.39, "word": " so", "probability": 0.5712890625}, {"start": 130.39, "end": 130.61, "word": " these", "probability": 0.8505859375}, {"start": 130.61, "end": 130.83, "word": " are", "probability": 0.9443359375}, {"start": 130.83, "end": 131.03, "word": " the", "probability": 0.85498046875}, {"start": 131.03, "end": 131.49, "word": " mainly", "probability": 0.9716796875}, {"start": 131.49, "end": 131.75, "word": " the", "probability": 0.85595703125}, {"start": 131.75, "end": 131.93, "word": " four", "probability": 0.93603515625}, {"start": 131.93, "end": 132.31, "word": " objectives", "probability": 0.8671875}, {"start": 132.31, "end": 132.77, "word": " for", "probability": 0.947265625}, {"start": 132.77, "end": 133.01, "word": " this", "probability": 0.9462890625}, {"start": 133.01, "end": 133.31, "word": " chapter", "probability": 0.9052734375}], "temperature": 1.0}, {"id": 7, "seek": 14471, "start": 134.61, "end": 144.71, "text": " Now let's see why we are talking about sampling. In other words, most of the time when we are doing study, we are using a sample.", "tokens": [823, 718, 311, 536, 983, 321, 366, 1417, 466, 21179, 13, 682, 661, 2283, 11, 881, 295, 264, 565, 562, 321, 366, 884, 2979, 11, 321, 366, 1228, 257, 6889, 13], "avg_logprob": -0.23974609607830644, "compression_ratio": 1.3, "no_speech_prob": 0.0, "words": [{"start": 134.61, "end": 134.87, "word": " Now", "probability": 0.7314453125}, {"start": 134.87, "end": 135.13, "word": " let's", "probability": 0.783935546875}, {"start": 135.13, "end": 135.35, "word": " see", "probability": 0.67041015625}, {"start": 135.35, "end": 136.47, "word": " why", "probability": 0.8056640625}, {"start": 136.47, "end": 136.61, "word": " we", "probability": 0.81494140625}, {"start": 136.61, "end": 136.75, "word": " are", "probability": 0.86279296875}, {"start": 136.75, "end": 137.09, "word": " talking", "probability": 0.84228515625}, {"start": 137.09, "end": 137.51, "word": " about", "probability": 0.91064453125}, {"start": 137.51, "end": 137.95, "word": " sampling.", "probability": 0.91552734375}, {"start": 138.31, "end": 138.33, "word": " In", "probability": 0.623046875}, {"start": 138.33, "end": 138.49, "word": " other", "probability": 0.88427734375}, {"start": 138.49, "end": 138.91, "word": " words,", "probability": 0.767578125}, {"start": 140.09, "end": 140.27, "word": " most", "probability": 0.89599609375}, {"start": 140.27, "end": 140.39, "word": " of", "probability": 0.96875}, {"start": 140.39, "end": 140.49, "word": " the", "probability": 0.9189453125}, {"start": 140.49, "end": 140.79, "word": " time", "probability": 0.86279296875}, {"start": 140.79, "end": 141.05, "word": " when", "probability": 0.75341796875}, {"start": 141.05, "end": 141.17, "word": " we", "probability": 0.95263671875}, {"start": 141.17, "end": 141.31, "word": " are", "probability": 0.90283203125}, {"start": 141.31, "end": 141.75, "word": " doing", "probability": 0.9267578125}, {"start": 141.75, "end": 142.77, "word": " study,", "probability": 0.54931640625}, {"start": 142.97, "end": 143.25, "word": " we", "probability": 0.951171875}, {"start": 143.25, "end": 143.41, "word": " are", "probability": 0.9375}, {"start": 143.41, "end": 143.85, "word": " using", "probability": 0.92724609375}, {"start": 143.85, "end": 144.33, "word": " a", "probability": 0.58056640625}, {"start": 144.33, "end": 144.71, "word": " sample.", "probability": 0.88671875}], "temperature": 1.0}, {"id": 8, "seek": 17314, "start": 145.36, "end": 173.14, "text": " instead of using the entire population. Now there are many reasons behind that. One of these reasons is selecting a sample is less time consuming than selecting every item in the population. I think it makes sense that suppose we have a huge population, that population consists of thousands of items. So that will take more time", "tokens": [2602, 295, 1228, 264, 2302, 4415, 13, 823, 456, 366, 867, 4112, 2261, 300, 13, 1485, 295, 613, 4112, 307, 18182, 257, 6889, 307, 1570, 565, 19867, 813, 18182, 633, 3174, 294, 264, 4415, 13, 286, 519, 309, 1669, 2020, 300, 7297, 321, 362, 257, 2603, 4415, 11, 300, 4415, 14689, 295, 5383, 295, 4754, 13, 407, 300, 486, 747, 544, 565], "avg_logprob": -0.16356646257733543, "compression_ratio": 1.7098445595854923, "no_speech_prob": 0.0, "words": [{"start": 145.36, "end": 145.86, "word": " instead", "probability": 0.77392578125}, {"start": 145.86, "end": 146.36, "word": " of", "probability": 0.96630859375}, {"start": 146.36, "end": 146.82, "word": " using", "probability": 0.93701171875}, {"start": 146.82, "end": 147.06, "word": " the", "probability": 0.87744140625}, {"start": 147.06, "end": 147.42, "word": " entire", "probability": 0.8837890625}, {"start": 147.42, "end": 147.7, "word": " population.", "probability": 0.9697265625}, {"start": 148.64, "end": 148.86, "word": " Now", "probability": 0.87939453125}, {"start": 148.86, "end": 149.02, "word": " there", "probability": 0.5986328125}, {"start": 149.02, "end": 149.24, "word": " are", "probability": 0.94091796875}, {"start": 149.24, "end": 149.6, "word": " many", "probability": 0.91357421875}, {"start": 149.6, "end": 150.04, "word": " reasons", "probability": 0.93017578125}, {"start": 150.04, "end": 150.68, "word": " behind", "probability": 0.9521484375}, {"start": 150.68, "end": 151.14, "word": " that.", "probability": 0.904296875}, {"start": 151.76, "end": 151.9, "word": " One", "probability": 0.91748046875}, {"start": 151.9, "end": 152.08, "word": " of", "probability": 0.96875}, {"start": 152.08, "end": 152.26, "word": " these", "probability": 0.83447265625}, {"start": 152.26, "end": 152.94, "word": " reasons", "probability": 0.91943359375}, {"start": 152.94, "end": 153.72, "word": " is", "probability": 0.93798828125}, {"start": 153.72, "end": 156.52, "word": " selecting", "probability": 0.689453125}, {"start": 156.52, "end": 156.82, "word": " a", "probability": 0.38427734375}, {"start": 156.82, "end": 157.04, "word": " sample", "probability": 0.5458984375}, {"start": 157.04, "end": 157.28, "word": " is", "probability": 0.9287109375}, {"start": 157.28, "end": 157.54, "word": " less", "probability": 0.94580078125}, {"start": 157.54, "end": 157.84, "word": " time", "probability": 0.8994140625}, {"start": 157.84, "end": 158.48, "word": " consuming", "probability": 0.63427734375}, {"start": 158.48, "end": 159.4, "word": " than", "probability": 0.8603515625}, {"start": 159.4, "end": 159.98, "word": " selecting", "probability": 0.8935546875}, {"start": 159.98, "end": 160.34, "word": " every", "probability": 0.80517578125}, {"start": 160.34, "end": 160.72, "word": " item", "probability": 0.9521484375}, {"start": 160.72, "end": 160.86, "word": " in", "probability": 0.85400390625}, {"start": 160.86, "end": 160.94, "word": " the", "probability": 0.75048828125}, {"start": 160.94, "end": 161.28, "word": " population.", "probability": 0.7314453125}, {"start": 161.46, "end": 161.56, "word": " I", "probability": 0.9775390625}, {"start": 161.56, "end": 161.68, "word": " think", "probability": 0.91259765625}, {"start": 161.68, "end": 161.82, "word": " it", "probability": 0.94287109375}, {"start": 161.82, "end": 161.98, "word": " makes", "probability": 0.82373046875}, {"start": 161.98, "end": 162.24, "word": " sense", "probability": 0.82421875}, {"start": 162.24, "end": 162.6, "word": " that", "probability": 0.9208984375}, {"start": 162.6, "end": 163.9, "word": " suppose", "probability": 0.6826171875}, {"start": 163.9, "end": 164.06, "word": " we", "probability": 0.92529296875}, {"start": 164.06, "end": 164.18, "word": " have", "probability": 0.94775390625}, {"start": 164.18, "end": 164.28, "word": " a", "probability": 0.8935546875}, {"start": 164.28, "end": 164.66, "word": " huge", "probability": 0.8935546875}, {"start": 164.66, "end": 165.5, "word": " population,", "probability": 0.9453125}, {"start": 165.64, "end": 165.78, "word": " that", "probability": 0.9345703125}, {"start": 165.78, "end": 166.1, "word": " population", "probability": 0.95166015625}, {"start": 166.1, "end": 166.56, "word": " consists", "probability": 0.8076171875}, {"start": 166.56, "end": 167.2, "word": " of", "probability": 0.97216796875}, {"start": 167.2, "end": 168.38, "word": " thousands", "probability": 0.89892578125}, {"start": 168.38, "end": 169.34, "word": " of", "probability": 0.96923828125}, {"start": 169.34, "end": 170.0, "word": " items.", "probability": 0.82470703125}, {"start": 171.24, "end": 171.46, "word": " So", "probability": 0.9541015625}, {"start": 171.46, "end": 171.64, "word": " that", "probability": 0.88671875}, {"start": 171.64, "end": 171.78, "word": " will", "probability": 0.83544921875}, {"start": 171.78, "end": 172.08, "word": " take", "probability": 0.88525390625}, {"start": 172.08, "end": 172.76, "word": " more", "probability": 0.9384765625}, {"start": 172.76, "end": 173.14, "word": " time", "probability": 0.8935546875}], "temperature": 1.0}, {"id": 9, "seek": 18700, "start": 174.44, "end": 187.0, "text": " If you select 100 of their population. So time consuming is very important. So number one, selecting sample is less time consuming than using all the entire population.", "tokens": [759, 291, 3048, 2319, 295, 641, 4415, 13, 407, 565, 19867, 307, 588, 1021, 13, 407, 1230, 472, 11, 18182, 6889, 307, 1570, 565, 19867, 813, 1228, 439, 264, 2302, 4415, 13], "avg_logprob": -0.2509469696969697, "compression_ratio": 1.4322033898305084, "no_speech_prob": 0.0, "words": [{"start": 174.44, "end": 174.72, "word": " If", "probability": 0.564453125}, {"start": 174.72, "end": 174.86, "word": " you", "probability": 0.96435546875}, {"start": 174.86, "end": 175.3, "word": " select", "probability": 0.8623046875}, {"start": 175.3, "end": 177.84, "word": " 100", "probability": 0.344970703125}, {"start": 177.84, "end": 178.16, "word": " of", "probability": 0.89453125}, {"start": 178.16, "end": 178.36, "word": " their", "probability": 0.517578125}, {"start": 178.36, "end": 179.02, "word": " population.", "probability": 0.9169921875}, {"start": 179.86, "end": 179.92, "word": " So", "probability": 0.90625}, {"start": 179.92, "end": 180.22, "word": " time", "probability": 0.568359375}, {"start": 180.22, "end": 180.58, "word": " consuming", "probability": 0.52197265625}, {"start": 180.58, "end": 180.78, "word": " is", "probability": 0.94091796875}, {"start": 180.78, "end": 180.94, "word": " very", "probability": 0.84423828125}, {"start": 180.94, "end": 181.3, "word": " important.", "probability": 0.88037109375}, {"start": 181.5, "end": 181.6, "word": " So", "probability": 0.85595703125}, {"start": 181.6, "end": 181.82, "word": " number", "probability": 0.814453125}, {"start": 181.82, "end": 182.14, "word": " one,", "probability": 0.7060546875}, {"start": 183.0, "end": 183.4, "word": " selecting", "probability": 0.90185546875}, {"start": 183.4, "end": 183.8, "word": " sample", "probability": 0.583984375}, {"start": 183.8, "end": 184.02, "word": " is", "probability": 0.89697265625}, {"start": 184.02, "end": 184.22, "word": " less", "probability": 0.92724609375}, {"start": 184.22, "end": 184.54, "word": " time", "probability": 0.892578125}, {"start": 184.54, "end": 185.04, "word": " consuming", "probability": 0.73876953125}, {"start": 185.04, "end": 185.32, "word": " than", "probability": 0.88671875}, {"start": 185.32, "end": 185.78, "word": " using", "probability": 0.94580078125}, {"start": 185.78, "end": 186.1, "word": " all", "probability": 0.826171875}, {"start": 186.1, "end": 186.24, "word": " the", "probability": 0.81787109375}, {"start": 186.24, "end": 186.5, "word": " entire", "probability": 0.91162109375}, {"start": 186.5, "end": 187.0, "word": " population.", "probability": 0.9423828125}], "temperature": 1.0}, {"id": 10, "seek": 21216, "start": 189.0, "end": 212.16, "text": " The second reason, selecting samples is less costly than selecting a variety of population. Because if we have large population, in this case you have to spend more money in order to get the data or the information from that population. So it's better to use these samples. The other reason is the analysis.", "tokens": [440, 1150, 1778, 11, 18182, 10938, 307, 1570, 28328, 813, 18182, 257, 5673, 295, 4415, 13, 1436, 498, 321, 362, 2416, 4415, 11, 294, 341, 1389, 291, 362, 281, 3496, 544, 1460, 294, 1668, 281, 483, 264, 1412, 420, 264, 1589, 490, 300, 4415, 13, 407, 309, 311, 1101, 281, 764, 613, 10938, 13, 440, 661, 1778, 307, 264, 5215, 13], "avg_logprob": -0.2029989900127534, "compression_ratio": 1.6296296296296295, "no_speech_prob": 0.0, "words": [{"start": 189.0, "end": 189.3, "word": " The", "probability": 0.81884765625}, {"start": 189.3, "end": 189.74, "word": " second", "probability": 0.8955078125}, {"start": 189.74, "end": 190.28, "word": " reason,", "probability": 0.9765625}, {"start": 190.88, "end": 192.36, "word": " selecting", "probability": 0.8779296875}, {"start": 192.36, "end": 192.78, "word": " samples", "probability": 0.5703125}, {"start": 192.78, "end": 193.06, "word": " is", "probability": 0.60400390625}, {"start": 193.06, "end": 193.3, "word": " less", "probability": 0.4326171875}, {"start": 193.3, "end": 193.62, "word": " costly", "probability": 0.9599609375}, {"start": 193.62, "end": 193.92, "word": " than", "probability": 0.89208984375}, {"start": 193.92, "end": 194.4, "word": " selecting", "probability": 0.88671875}, {"start": 194.4, "end": 194.64, "word": " a", "probability": 0.92919921875}, {"start": 194.64, "end": 194.96, "word": " variety", "probability": 0.69580078125}, {"start": 194.96, "end": 195.12, "word": " of", "probability": 0.943359375}, {"start": 195.12, "end": 195.64, "word": " population.", "probability": 0.734375}, {"start": 196.26, "end": 196.58, "word": " Because", "probability": 0.93212890625}, {"start": 196.58, "end": 196.74, "word": " if", "probability": 0.9306640625}, {"start": 196.74, "end": 196.86, "word": " we", "probability": 0.53173828125}, {"start": 196.86, "end": 197.02, "word": " have", "probability": 0.94677734375}, {"start": 197.02, "end": 197.28, "word": " large", "probability": 0.89208984375}, {"start": 197.28, "end": 197.7, "word": " population,", "probability": 0.91552734375}, {"start": 197.82, "end": 197.88, "word": " in", "probability": 0.81884765625}, {"start": 197.88, "end": 198.0, "word": " this", "probability": 0.9384765625}, {"start": 198.0, "end": 198.16, "word": " case", "probability": 0.91845703125}, {"start": 198.16, "end": 198.32, "word": " you", "probability": 0.6357421875}, {"start": 198.32, "end": 198.5, "word": " have", "probability": 0.94189453125}, {"start": 198.5, "end": 198.74, "word": " to", "probability": 0.966796875}, {"start": 198.74, "end": 199.16, "word": " spend", "probability": 0.87744140625}, {"start": 199.16, "end": 199.56, "word": " more", "probability": 0.9384765625}, {"start": 199.56, "end": 200.04, "word": " money", "probability": 0.9228515625}, {"start": 200.04, "end": 200.66, "word": " in", "probability": 0.853515625}, {"start": 200.66, "end": 200.86, "word": " order", "probability": 0.9189453125}, {"start": 200.86, "end": 201.1, "word": " to", "probability": 0.9677734375}, {"start": 201.1, "end": 201.46, "word": " get", "probability": 0.94482421875}, {"start": 201.46, "end": 202.32, "word": " the", "probability": 0.9130859375}, {"start": 202.32, "end": 202.68, "word": " data", "probability": 0.91943359375}, {"start": 202.68, "end": 202.96, "word": " or", "probability": 0.8017578125}, {"start": 202.96, "end": 203.08, "word": " the", "probability": 0.86767578125}, {"start": 203.08, "end": 203.54, "word": " information", "probability": 0.853515625}, {"start": 203.54, "end": 203.82, "word": " from", "probability": 0.53466796875}, {"start": 203.82, "end": 204.44, "word": " that", "probability": 0.72900390625}, {"start": 204.44, "end": 204.94, "word": " population.", "probability": 0.9375}, {"start": 205.66, "end": 205.88, "word": " So", "probability": 0.953125}, {"start": 205.88, "end": 206.0, "word": " it's", "probability": 0.9130859375}, {"start": 206.0, "end": 206.22, "word": " better", "probability": 0.91552734375}, {"start": 206.22, "end": 206.44, "word": " to", "probability": 0.96533203125}, {"start": 206.44, "end": 206.88, "word": " use", "probability": 0.86572265625}, {"start": 206.88, "end": 207.94, "word": " these", "probability": 0.416015625}, {"start": 207.94, "end": 208.34, "word": " samples.", "probability": 0.81494140625}, {"start": 209.2, "end": 209.38, "word": " The", "probability": 0.89306640625}, {"start": 209.38, "end": 209.74, "word": " other", "probability": 0.884765625}, {"start": 209.74, "end": 210.4, "word": " reason", "probability": 0.97216796875}, {"start": 210.4, "end": 210.84, "word": " is", "probability": 0.94677734375}, {"start": 210.84, "end": 211.68, "word": " the", "probability": 0.70849609375}, {"start": 211.68, "end": 212.16, "word": " analysis.", "probability": 0.8564453125}], "temperature": 1.0}, {"id": 11, "seek": 23524, "start": 212.98, "end": 235.24, "text": " Our sample is less cumbersome and more practical than analysis of all items in the population. For these reasons, we have to use a sample. For this reason, we have to talk about sampling methods. Let's start with sampling process.", "tokens": [2621, 6889, 307, 1570, 12713, 1616, 423, 293, 544, 8496, 813, 5215, 295, 439, 4754, 294, 264, 4415, 13, 1171, 613, 4112, 11, 321, 362, 281, 764, 257, 6889, 13, 1171, 341, 1778, 11, 321, 362, 281, 751, 466, 21179, 7150, 13, 961, 311, 722, 365, 21179, 1399, 13], "avg_logprob": -0.1303125035762787, "compression_ratio": 1.519736842105263, "no_speech_prob": 0.0, "words": [{"start": 212.98, "end": 213.3, "word": " Our", "probability": 0.65771484375}, {"start": 213.3, "end": 213.76, "word": " sample", "probability": 0.51025390625}, {"start": 213.76, "end": 214.62, "word": " is", "probability": 0.943359375}, {"start": 214.62, "end": 214.98, "word": " less", "probability": 0.9453125}, {"start": 214.98, "end": 215.72, "word": " cumbersome", "probability": 0.9860026041666666}, {"start": 215.72, "end": 216.06, "word": " and", "probability": 0.89697265625}, {"start": 216.06, "end": 216.32, "word": " more", "probability": 0.92529296875}, {"start": 216.32, "end": 216.92, "word": " practical", "probability": 0.92919921875}, {"start": 216.92, "end": 217.26, "word": " than", "probability": 0.92822265625}, {"start": 217.26, "end": 217.66, "word": " analysis", "probability": 0.927734375}, {"start": 217.66, "end": 217.96, "word": " of", "probability": 0.9580078125}, {"start": 217.96, "end": 218.2, "word": " all", "probability": 0.94921875}, {"start": 218.2, "end": 218.66, "word": " items", "probability": 0.84375}, {"start": 218.66, "end": 218.86, "word": " in", "probability": 0.84521484375}, {"start": 218.86, "end": 218.94, "word": " the", "probability": 0.53955078125}, {"start": 218.94, "end": 219.36, "word": " population.", "probability": 0.89697265625}, {"start": 220.36, "end": 220.6, "word": " For", "probability": 0.9384765625}, {"start": 220.6, "end": 220.88, "word": " these", "probability": 0.72412109375}, {"start": 220.88, "end": 221.44, "word": " reasons,", "probability": 0.90966796875}, {"start": 221.84, "end": 223.1, "word": " we", "probability": 0.9501953125}, {"start": 223.1, "end": 223.42, "word": " have", "probability": 0.9375}, {"start": 223.42, "end": 223.58, "word": " to", "probability": 0.9697265625}, {"start": 223.58, "end": 223.92, "word": " use", "probability": 0.88818359375}, {"start": 223.92, "end": 224.44, "word": " a", "probability": 0.9638671875}, {"start": 224.44, "end": 224.74, "word": " sample.", "probability": 0.89404296875}, {"start": 225.24, "end": 225.42, "word": " For", "probability": 0.931640625}, {"start": 225.42, "end": 225.62, "word": " this", "probability": 0.71826171875}, {"start": 225.62, "end": 225.82, "word": " reason,", "probability": 0.95556640625}, {"start": 225.88, "end": 225.96, "word": " we", "probability": 0.95654296875}, {"start": 225.96, "end": 226.12, "word": " have", "probability": 0.9287109375}, {"start": 226.12, "end": 226.24, "word": " to", "probability": 0.96630859375}, {"start": 226.24, "end": 226.6, "word": " talk", "probability": 0.890625}, {"start": 226.6, "end": 227.9, "word": " about", "probability": 0.90771484375}, {"start": 227.9, "end": 229.96, "word": " sampling", "probability": 0.94677734375}, {"start": 229.96, "end": 231.96, "word": " methods.", "probability": 0.8671875}, {"start": 232.56, "end": 233.08, "word": " Let's", "probability": 0.933837890625}, {"start": 233.08, "end": 233.46, "word": " start", "probability": 0.92529296875}, {"start": 233.46, "end": 233.84, "word": " with", "probability": 0.8994140625}, {"start": 233.84, "end": 234.54, "word": " sampling", "probability": 0.80615234375}, {"start": 234.54, "end": 235.24, "word": " process.", "probability": 0.9140625}], "temperature": 1.0}, {"id": 12, "seek": 26544, "start": 236.54, "end": 265.44, "text": " That begins with a seminal frame. Now suppose my goal is to know the opinion of IUG students about a certain subject. So my population consists of all IUG students. So that's the entire population.", "tokens": [663, 7338, 365, 257, 4361, 2071, 3920, 13, 823, 7297, 452, 3387, 307, 281, 458, 264, 4800, 295, 44218, 38, 1731, 466, 257, 1629, 3983, 13, 407, 452, 4415, 14689, 295, 439, 44218, 38, 1731, 13, 407, 300, 311, 264, 2302, 4415, 13], "avg_logprob": -0.18359374288808217, "compression_ratio": 1.4244604316546763, "no_speech_prob": 0.0, "words": [{"start": 236.54, "end": 236.88, "word": " That", "probability": 0.623046875}, {"start": 236.88, "end": 237.52, "word": " begins", "probability": 0.81884765625}, {"start": 237.52, "end": 238.08, "word": " with", "probability": 0.9033203125}, {"start": 238.08, "end": 238.54, "word": " a", "probability": 0.451171875}, {"start": 238.54, "end": 238.9, "word": " seminal", "probability": 0.5938720703125}, {"start": 238.9, "end": 239.2, "word": " frame.", "probability": 0.68994140625}, {"start": 240.0, "end": 240.3, "word": " Now", "probability": 0.77099609375}, {"start": 240.3, "end": 241.06, "word": " suppose", "probability": 0.416015625}, {"start": 241.06, "end": 242.24, "word": " my", "probability": 0.90478515625}, {"start": 242.24, "end": 242.68, "word": " goal", "probability": 0.939453125}, {"start": 242.68, "end": 243.68, "word": " is", "probability": 0.943359375}, {"start": 243.68, "end": 245.0, "word": " to", "probability": 0.93798828125}, {"start": 245.0, "end": 245.14, "word": " know", "probability": 0.87939453125}, {"start": 245.14, "end": 245.32, "word": " the", "probability": 0.9033203125}, {"start": 245.32, "end": 245.88, "word": " opinion", "probability": 0.91259765625}, {"start": 245.88, "end": 247.28, "word": " of", "probability": 0.9638671875}, {"start": 247.28, "end": 247.92, "word": " IUG", "probability": 0.8359375}, {"start": 247.92, "end": 248.64, "word": " students", "probability": 0.97021484375}, {"start": 248.64, "end": 251.28, "word": " about", "probability": 0.83837890625}, {"start": 251.28, "end": 253.2, "word": " a", "probability": 0.9873046875}, {"start": 253.2, "end": 253.52, "word": " certain", "probability": 0.90869140625}, {"start": 253.52, "end": 253.96, "word": " subject.", "probability": 0.95751953125}, {"start": 256.26, "end": 256.86, "word": " So", "probability": 0.9111328125}, {"start": 256.86, "end": 258.0, "word": " my", "probability": 0.76220703125}, {"start": 258.0, "end": 258.52, "word": " population", "probability": 0.91357421875}, {"start": 258.52, "end": 259.32, "word": " consists", "probability": 0.845703125}, {"start": 259.32, "end": 260.08, "word": " of", "probability": 0.96875}, {"start": 260.08, "end": 261.78, "word": " all", "probability": 0.63427734375}, {"start": 261.78, "end": 262.88, "word": " IUG", "probability": 0.945556640625}, {"start": 262.88, "end": 263.58, "word": " students.", "probability": 0.974609375}, {"start": 263.98, "end": 264.12, "word": " So", "probability": 0.9326171875}, {"start": 264.12, "end": 264.4, "word": " that's", "probability": 0.951904296875}, {"start": 264.4, "end": 264.58, "word": " the", "probability": 0.92333984375}, {"start": 264.58, "end": 264.9, "word": " entire", "probability": 0.896484375}, {"start": 264.9, "end": 265.44, "word": " population.", "probability": 0.921875}], "temperature": 1.0}, {"id": 13, "seek": 29463, "start": 266.51, "end": 294.63, "text": " And you know that, for example, suppose our usual students is around, for example, 20,000 students. 20,000 students is a big number. So it's better to select a sample from that population. Now, the first step in this process, we have to determine the frame.", "tokens": [400, 291, 458, 300, 11, 337, 1365, 11, 7297, 527, 7713, 1731, 307, 926, 11, 337, 1365, 11, 945, 11, 1360, 1731, 13, 945, 11, 1360, 1731, 307, 257, 955, 1230, 13, 407, 309, 311, 1101, 281, 3048, 257, 6889, 490, 300, 4415, 13, 823, 11, 264, 700, 1823, 294, 341, 1399, 11, 321, 362, 281, 6997, 264, 3920, 13], "avg_logprob": -0.198770492780404, "compression_ratio": 1.5357142857142858, "no_speech_prob": 0.0, "words": [{"start": 266.51, "end": 266.83, "word": " And", "probability": 0.7568359375}, {"start": 266.83, "end": 266.93, "word": " you", "probability": 0.85791015625}, {"start": 266.93, "end": 267.07, "word": " know", "probability": 0.888671875}, {"start": 267.07, "end": 267.37, "word": " that,", "probability": 0.91357421875}, {"start": 267.59, "end": 267.73, "word": " for", "probability": 0.94189453125}, {"start": 267.73, "end": 268.11, "word": " example,", "probability": 0.974609375}, {"start": 268.27, "end": 268.63, "word": " suppose", "probability": 0.88134765625}, {"start": 268.63, "end": 268.83, "word": " our", "probability": 0.2177734375}, {"start": 268.83, "end": 269.03, "word": " usual", "probability": 0.71875}, {"start": 269.03, "end": 269.67, "word": " students", "probability": 0.9306640625}, {"start": 269.67, "end": 271.31, "word": " is", "probability": 0.76513671875}, {"start": 271.31, "end": 271.75, "word": " around,", "probability": 0.92578125}, {"start": 272.43, "end": 273.33, "word": " for", "probability": 0.9423828125}, {"start": 273.33, "end": 273.75, "word": " example,", "probability": 0.97412109375}, {"start": 274.15, "end": 274.49, "word": " 20", "probability": 0.65380859375}, {"start": 274.49, "end": 274.87, "word": ",000", "probability": 0.926513671875}, {"start": 274.87, "end": 275.39, "word": " students.", "probability": 0.58935546875}, {"start": 276.99, "end": 277.75, "word": " 20", "probability": 0.876953125}, {"start": 277.75, "end": 278.19, "word": ",000", "probability": 0.997802734375}, {"start": 278.19, "end": 278.95, "word": " students", "probability": 0.97265625}, {"start": 278.95, "end": 279.83, "word": " is", "probability": 0.861328125}, {"start": 279.83, "end": 279.89, "word": " a", "probability": 0.79296875}, {"start": 279.89, "end": 280.01, "word": " big", "probability": 0.9169921875}, {"start": 280.01, "end": 280.31, "word": " number.", "probability": 0.94091796875}, {"start": 281.05, "end": 281.81, "word": " So", "probability": 0.92236328125}, {"start": 281.81, "end": 281.99, "word": " it's", "probability": 0.93994140625}, {"start": 281.99, "end": 282.19, "word": " better", "probability": 0.91162109375}, {"start": 282.19, "end": 284.01, "word": " to", "probability": 0.955078125}, {"start": 284.01, "end": 284.29, "word": " select", "probability": 0.845703125}, {"start": 284.29, "end": 284.63, "word": " a", "probability": 0.11834716796875}, {"start": 284.63, "end": 285.17, "word": " sample", "probability": 0.76806640625}, {"start": 285.17, "end": 285.49, "word": " from", "probability": 0.87841796875}, {"start": 285.49, "end": 285.67, "word": " that", "probability": 0.5087890625}, {"start": 285.67, "end": 286.11, "word": " population.", "probability": 0.9609375}, {"start": 286.61, "end": 286.77, "word": " Now,", "probability": 0.955078125}, {"start": 286.83, "end": 286.95, "word": " the", "probability": 0.9111328125}, {"start": 286.95, "end": 287.35, "word": " first", "probability": 0.8857421875}, {"start": 287.35, "end": 287.83, "word": " step", "probability": 0.90283203125}, {"start": 287.83, "end": 289.03, "word": " in", "probability": 0.77978515625}, {"start": 289.03, "end": 289.27, "word": " this", "probability": 0.947265625}, {"start": 289.27, "end": 289.91, "word": " process,", "probability": 0.9560546875}, {"start": 290.47, "end": 291.49, "word": " we", "probability": 0.9609375}, {"start": 291.49, "end": 291.93, "word": " have", "probability": 0.94677734375}, {"start": 291.93, "end": 293.29, "word": " to", "probability": 0.97021484375}, {"start": 293.29, "end": 293.87, "word": " determine", "probability": 0.92626953125}, {"start": 293.87, "end": 294.23, "word": " the", "probability": 0.923828125}, {"start": 294.23, "end": 294.63, "word": " frame.", "probability": 0.9228515625}], "temperature": 1.0}, {"id": 14, "seek": 32222, "start": 295.28, "end": 322.22, "text": " of that population. So my frame consists of all IU students, which has maybe males and females. So my frame in this case is all items, I mean all students at IUG. So that's the frame. So my frame consists of all students.", "tokens": [295, 300, 4415, 13, 407, 452, 3920, 14689, 295, 439, 44218, 1731, 11, 597, 575, 1310, 20776, 293, 21529, 13, 407, 452, 3920, 294, 341, 1389, 307, 439, 4754, 11, 286, 914, 439, 1731, 412, 44218, 38, 13, 407, 300, 311, 264, 3920, 13, 407, 452, 3920, 14689, 295, 439, 1731, 13], "avg_logprob": -0.11880896226415094, "compression_ratio": 1.6444444444444444, "no_speech_prob": 0.0, "words": [{"start": 295.28, "end": 295.48, "word": " of", "probability": 0.69580078125}, {"start": 295.48, "end": 295.7, "word": " that", "probability": 0.9140625}, {"start": 295.7, "end": 296.18, "word": " population.", "probability": 0.93994140625}, {"start": 297.54, "end": 297.74, "word": " So", "probability": 0.9033203125}, {"start": 297.74, "end": 298.0, "word": " my", "probability": 0.85546875}, {"start": 298.0, "end": 298.32, "word": " frame", "probability": 0.92578125}, {"start": 298.32, "end": 299.22, "word": " consists", "probability": 0.8037109375}, {"start": 299.22, "end": 299.9, "word": " of", "probability": 0.9658203125}, {"start": 299.9, "end": 300.68, "word": " all", "probability": 0.9482421875}, {"start": 300.68, "end": 301.32, "word": " IU", "probability": 0.95751953125}, {"start": 301.32, "end": 301.76, "word": " students,", "probability": 0.9267578125}, {"start": 301.92, "end": 302.14, "word": " which", "probability": 0.95068359375}, {"start": 302.14, "end": 302.38, "word": " has", "probability": 0.8876953125}, {"start": 302.38, "end": 302.82, "word": " maybe", "probability": 0.90234375}, {"start": 302.82, "end": 303.2, "word": " males", "probability": 0.9453125}, {"start": 303.2, "end": 303.46, "word": " and", "probability": 0.9404296875}, {"start": 303.46, "end": 303.84, "word": " females.", "probability": 0.94287109375}, {"start": 304.34, "end": 304.48, "word": " So", "probability": 0.953125}, {"start": 304.48, "end": 304.74, "word": " my", "probability": 0.94775390625}, {"start": 304.74, "end": 305.08, "word": " frame", "probability": 0.90234375}, {"start": 305.08, "end": 306.32, "word": " in", "probability": 0.65966796875}, {"start": 306.32, "end": 306.58, "word": " this", "probability": 0.9453125}, {"start": 306.58, "end": 306.98, "word": " case", "probability": 0.9130859375}, {"start": 306.98, "end": 307.28, "word": " is", "probability": 0.8232421875}, {"start": 307.28, "end": 307.62, "word": " all", "probability": 0.94970703125}, {"start": 307.62, "end": 308.18, "word": " items,", "probability": 0.68994140625}, {"start": 308.9, "end": 309.04, "word": " I", "probability": 0.97900390625}, {"start": 309.04, "end": 309.18, "word": " mean", "probability": 0.9658203125}, {"start": 309.18, "end": 309.56, "word": " all", "probability": 0.751953125}, {"start": 309.56, "end": 310.6, "word": " students", "probability": 0.88427734375}, {"start": 310.6, "end": 312.4, "word": " at", "probability": 0.88134765625}, {"start": 312.4, "end": 313.0, "word": " IUG.", "probability": 0.926025390625}, {"start": 313.32, "end": 313.48, "word": " So", "probability": 0.95703125}, {"start": 313.48, "end": 313.88, "word": " that's", "probability": 0.961669921875}, {"start": 313.88, "end": 314.16, "word": " the", "probability": 0.91845703125}, {"start": 314.16, "end": 314.58, "word": " frame.", "probability": 0.89111328125}, {"start": 314.7, "end": 314.8, "word": " So", "probability": 0.9501953125}, {"start": 314.8, "end": 315.02, "word": " my", "probability": 0.96044921875}, {"start": 315.02, "end": 315.38, "word": " frame", "probability": 0.88671875}, {"start": 315.38, "end": 318.72, "word": " consists", "probability": 0.7783203125}, {"start": 318.72, "end": 319.12, "word": " of", "probability": 0.966796875}, {"start": 319.12, "end": 319.62, "word": " all", "probability": 0.94189453125}, {"start": 319.62, "end": 322.22, "word": " students.", "probability": 0.953125}], "temperature": 1.0}, {"id": 15, "seek": 35053, "start": 327.63, "end": 350.53, "text": " So the definition of the semantic frame is a listing of items that make up the population. The items could be individual, could be students, could be things, animals, and so on. So frames are data sources such as a population list.", "tokens": [407, 264, 7123, 295, 264, 47982, 3920, 307, 257, 22161, 295, 4754, 300, 652, 493, 264, 4415, 13, 440, 4754, 727, 312, 2609, 11, 727, 312, 1731, 11, 727, 312, 721, 11, 4882, 11, 293, 370, 322, 13, 407, 12083, 366, 1412, 7139, 1270, 382, 257, 4415, 1329, 13], "avg_logprob": -0.170468752682209, "compression_ratio": 1.6223776223776223, "no_speech_prob": 0.0, "words": [{"start": 327.63000000000005, "end": 328.27000000000004, "word": " So", "probability": 0.30517578125}, {"start": 328.27000000000004, "end": 328.91, "word": " the", "probability": 0.736328125}, {"start": 328.91, "end": 329.23, "word": " definition", "probability": 0.9345703125}, {"start": 329.23, "end": 332.37, "word": " of", "probability": 0.451904296875}, {"start": 332.37, "end": 332.55, "word": " the", "probability": 0.755859375}, {"start": 332.55, "end": 332.83, "word": " semantic", "probability": 0.259765625}, {"start": 332.83, "end": 333.09, "word": " frame", "probability": 0.91552734375}, {"start": 333.09, "end": 333.41, "word": " is", "probability": 0.9404296875}, {"start": 333.41, "end": 333.75, "word": " a", "probability": 0.90380859375}, {"start": 333.75, "end": 334.09, "word": " listing", "probability": 0.93017578125}, {"start": 334.09, "end": 334.27, "word": " of", "probability": 0.970703125}, {"start": 334.27, "end": 334.77, "word": " items", "probability": 0.83740234375}, {"start": 334.77, "end": 335.71, "word": " that", "probability": 0.9169921875}, {"start": 335.71, "end": 336.01, "word": " make", "probability": 0.93603515625}, {"start": 336.01, "end": 336.33, "word": " up", "probability": 0.97265625}, {"start": 336.33, "end": 336.83, "word": " the", "probability": 0.921875}, {"start": 336.83, "end": 337.49, "word": " population.", "probability": 0.95751953125}, {"start": 338.15, "end": 338.25, "word": " The", "probability": 0.86669921875}, {"start": 338.25, "end": 338.55, "word": " items", "probability": 0.76513671875}, {"start": 338.55, "end": 338.81, "word": " could", "probability": 0.87939453125}, {"start": 338.81, "end": 338.93, "word": " be", "probability": 0.95654296875}, {"start": 338.93, "end": 339.35, "word": " individual,", "probability": 0.849609375}, {"start": 340.17, "end": 340.35, "word": " could", "probability": 0.857421875}, {"start": 340.35, "end": 340.93, "word": " be", "probability": 0.953125}, {"start": 340.93, "end": 342.13, "word": " students,", "probability": 0.970703125}, {"start": 342.53, "end": 342.65, "word": " could", "probability": 0.8916015625}, {"start": 342.65, "end": 342.79, "word": " be", "probability": 0.955078125}, {"start": 342.79, "end": 343.17, "word": " things,", "probability": 0.8603515625}, {"start": 343.67, "end": 344.03, "word": " animals,", "probability": 0.94775390625}, {"start": 344.35, "end": 344.49, "word": " and", "probability": 0.87890625}, {"start": 344.49, "end": 344.65, "word": " so", "probability": 0.96240234375}, {"start": 344.65, "end": 344.87, "word": " on.", "probability": 0.93603515625}, {"start": 347.31, "end": 347.57, "word": " So", "probability": 0.9375}, {"start": 347.57, "end": 347.87, "word": " frames", "probability": 0.775390625}, {"start": 347.87, "end": 348.11, "word": " are", "probability": 0.93310546875}, {"start": 348.11, "end": 348.39, "word": " data", "probability": 0.9375}, {"start": 348.39, "end": 348.81, "word": " sources", "probability": 0.82421875}, {"start": 348.81, "end": 349.23, "word": " such", "probability": 0.83935546875}, {"start": 349.23, "end": 349.51, "word": " as", "probability": 0.96435546875}, {"start": 349.51, "end": 349.65, "word": " a", "probability": 0.8916015625}, {"start": 349.65, "end": 349.93, "word": " population", "probability": 0.94873046875}, {"start": 349.93, "end": 350.53, "word": " list.", "probability": 0.6669921875}], "temperature": 1.0}, {"id": 16, "seek": 38092, "start": 351.88, "end": 380.92, "text": " Suppose we have the names of IUDs humans. So that's my population list. Or directories, or maps, and so on. So that's the frame we have to know about the population we are interested in. Inaccurate or biased results can result if frame excludes certain portions of the population. For example, suppose here, as I mentioned,", "tokens": [21360, 321, 362, 264, 5288, 295, 286, 9438, 82, 6255, 13, 407, 300, 311, 452, 4415, 1329, 13, 1610, 5391, 530, 11, 420, 11317, 11, 293, 370, 322, 13, 407, 300, 311, 264, 3920, 321, 362, 281, 458, 466, 264, 4415, 321, 366, 3102, 294, 13, 682, 8476, 33144, 420, 28035, 3542, 393, 1874, 498, 3920, 16269, 279, 1629, 25070, 295, 264, 4415, 13, 1171, 1365, 11, 7297, 510, 11, 382, 286, 2835, 11], "avg_logprob": -0.1711458392937978, "compression_ratio": 1.62, "no_speech_prob": 0.0, "words": [{"start": 351.88, "end": 352.24, "word": " Suppose", "probability": 0.6083984375}, {"start": 352.24, "end": 352.42, "word": " we", "probability": 0.92724609375}, {"start": 352.42, "end": 352.6, "word": " have", "probability": 0.9443359375}, {"start": 352.6, "end": 352.9, "word": " the", "probability": 0.87744140625}, {"start": 352.9, "end": 354.02, "word": " names", "probability": 0.873046875}, {"start": 354.02, "end": 354.32, "word": " of", "probability": 0.9619140625}, {"start": 354.32, "end": 354.84, "word": " IUDs", "probability": 0.4663899739583333}, {"start": 354.84, "end": 355.16, "word": " humans.", "probability": 0.451416015625}, {"start": 355.64, "end": 355.8, "word": " So", "probability": 0.93798828125}, {"start": 355.8, "end": 356.08, "word": " that's", "probability": 0.90869140625}, {"start": 356.08, "end": 356.28, "word": " my", "probability": 0.96728515625}, {"start": 356.28, "end": 356.72, "word": " population", "probability": 0.935546875}, {"start": 356.72, "end": 357.08, "word": " list.", "probability": 0.9169921875}, {"start": 358.36, "end": 358.84, "word": " Or", "probability": 0.94140625}, {"start": 358.84, "end": 359.82, "word": " directories,", "probability": 0.758544921875}, {"start": 359.96, "end": 360.08, "word": " or", "probability": 0.9619140625}, {"start": 360.08, "end": 360.42, "word": " maps,", "probability": 0.92529296875}, {"start": 360.5, "end": 360.64, "word": " and", "probability": 0.92333984375}, {"start": 360.64, "end": 360.82, "word": " so", "probability": 0.953125}, {"start": 360.82, "end": 361.02, "word": " on.", "probability": 0.9453125}, {"start": 361.44, "end": 361.64, "word": " So", "probability": 0.95263671875}, {"start": 361.64, "end": 361.96, "word": " that's", "probability": 0.952392578125}, {"start": 361.96, "end": 362.16, "word": " the", "probability": 0.92041015625}, {"start": 362.16, "end": 362.46, "word": " frame", "probability": 0.92333984375}, {"start": 362.46, "end": 362.66, "word": " we", "probability": 0.9453125}, {"start": 362.66, "end": 362.86, "word": " have", "probability": 0.9501953125}, {"start": 362.86, "end": 363.0, "word": " to", "probability": 0.9677734375}, {"start": 363.0, "end": 363.24, "word": " know", "probability": 0.87255859375}, {"start": 363.24, "end": 364.04, "word": " about", "probability": 0.744140625}, {"start": 364.04, "end": 364.5, "word": " the", "probability": 0.92431640625}, {"start": 364.5, "end": 365.08, "word": " population", "probability": 0.923828125}, {"start": 365.08, "end": 365.36, "word": " we", "probability": 0.95703125}, {"start": 365.36, "end": 365.52, "word": " are", "probability": 0.9375}, {"start": 365.52, "end": 366.02, "word": " interested", "probability": 0.84716796875}, {"start": 366.02, "end": 366.4, "word": " in.", "probability": 0.94677734375}, {"start": 368.86, "end": 369.48, "word": " Inaccurate", "probability": 0.9085286458333334}, {"start": 369.48, "end": 369.76, "word": " or", "probability": 0.91259765625}, {"start": 369.76, "end": 370.12, "word": " biased", "probability": 0.947265625}, {"start": 370.12, "end": 370.64, "word": " results", "probability": 0.90283203125}, {"start": 370.64, "end": 370.9, "word": " can", "probability": 0.92041015625}, {"start": 370.9, "end": 371.3, "word": " result", "probability": 0.9775390625}, {"start": 371.3, "end": 371.58, "word": " if", "probability": 0.806640625}, {"start": 371.58, "end": 372.18, "word": " frame", "probability": 0.68310546875}, {"start": 372.18, "end": 373.8, "word": " excludes", "probability": 0.992431640625}, {"start": 373.8, "end": 374.64, "word": " certain", "probability": 0.896484375}, {"start": 374.64, "end": 375.32, "word": " portions", "probability": 0.96044921875}, {"start": 375.32, "end": 376.3, "word": " of", "probability": 0.96826171875}, {"start": 376.3, "end": 376.46, "word": " the", "probability": 0.91943359375}, {"start": 376.46, "end": 376.92, "word": " population.", "probability": 0.9296875}, {"start": 378.66, "end": 378.9, "word": " For", "probability": 0.96240234375}, {"start": 378.9, "end": 379.18, "word": " example,", "probability": 0.97607421875}, {"start": 379.28, "end": 379.72, "word": " suppose", "probability": 0.876953125}, {"start": 379.72, "end": 380.1, "word": " here,", "probability": 0.7841796875}, {"start": 380.18, "end": 380.5, "word": " as", "probability": 0.87890625}, {"start": 380.5, "end": 380.62, "word": " I", "probability": 0.99658203125}, {"start": 380.62, "end": 380.92, "word": " mentioned,", "probability": 0.83349609375}], "temperature": 1.0}, {"id": 17, "seek": 40508, "start": 381.96, "end": 405.08, "text": " We are interested in IUG students, so my frame and all IU students. And I know there are students, either males or females. Suppose for some reasons, we ignore males, and just my sample focused on females. In this case, females.", "tokens": [492, 366, 3102, 294, 44218, 38, 1731, 11, 370, 452, 3920, 293, 439, 44218, 1731, 13, 400, 286, 458, 456, 366, 1731, 11, 2139, 20776, 420, 21529, 13, 21360, 337, 512, 4112, 11, 321, 11200, 20776, 11, 293, 445, 452, 6889, 5178, 322, 21529, 13, 682, 341, 1389, 11, 21529, 13], "avg_logprob": -0.2785456610413698, "compression_ratio": 1.5369127516778522, "no_speech_prob": 0.0, "words": [{"start": 381.96, "end": 382.22, "word": " We", "probability": 0.64111328125}, {"start": 382.22, "end": 382.34, "word": " are", "probability": 0.9072265625}, {"start": 382.34, "end": 382.78, "word": " interested", "probability": 0.8798828125}, {"start": 382.78, "end": 383.0, "word": " in", "probability": 0.951171875}, {"start": 383.0, "end": 383.38, "word": " IUG", "probability": 0.61181640625}, {"start": 383.38, "end": 383.9, "word": " students,", "probability": 0.9638671875}, {"start": 384.04, "end": 384.18, "word": " so", "probability": 0.90673828125}, {"start": 384.18, "end": 384.4, "word": " my", "probability": 0.88818359375}, {"start": 384.4, "end": 384.8, "word": " frame", "probability": 0.52587890625}, {"start": 384.8, "end": 385.84, "word": " and", "probability": 0.265869140625}, {"start": 385.84, "end": 386.22, "word": " all", "probability": 0.9423828125}, {"start": 386.22, "end": 386.42, "word": " IU", "probability": 0.966796875}, {"start": 386.42, "end": 386.84, "word": " students.", "probability": 0.8203125}, {"start": 388.28, "end": 388.66, "word": " And", "probability": 0.91357421875}, {"start": 388.66, "end": 388.82, "word": " I", "probability": 0.9853515625}, {"start": 388.82, "end": 388.98, "word": " know", "probability": 0.87890625}, {"start": 388.98, "end": 389.14, "word": " there", "probability": 0.28564453125}, {"start": 389.14, "end": 389.28, "word": " are", "probability": 0.916015625}, {"start": 389.28, "end": 389.98, "word": " students,", "probability": 0.9443359375}, {"start": 391.36, "end": 391.86, "word": " either", "probability": 0.935546875}, {"start": 391.86, "end": 392.68, "word": " males", "probability": 0.89501953125}, {"start": 392.68, "end": 392.94, "word": " or", "probability": 0.96142578125}, {"start": 392.94, "end": 393.42, "word": " females.", "probability": 0.93701171875}, {"start": 395.1, "end": 395.48, "word": " Suppose", "probability": 0.49267578125}, {"start": 395.48, "end": 395.9, "word": " for", "probability": 0.84375}, {"start": 395.9, "end": 396.48, "word": " some", "probability": 0.9033203125}, {"start": 396.48, "end": 397.02, "word": " reasons,", "probability": 0.88134765625}, {"start": 397.34, "end": 397.56, "word": " we", "probability": 0.962890625}, {"start": 397.56, "end": 398.1, "word": " ignore", "probability": 0.6474609375}, {"start": 398.1, "end": 399.08, "word": " males,", "probability": 0.921875}, {"start": 399.82, "end": 400.04, "word": " and", "probability": 0.91796875}, {"start": 400.04, "end": 400.28, "word": " just", "probability": 0.900390625}, {"start": 400.28, "end": 400.54, "word": " my", "probability": 0.919921875}, {"start": 400.54, "end": 400.88, "word": " sample", "probability": 0.76708984375}, {"start": 400.88, "end": 401.52, "word": " focused", "probability": 0.431640625}, {"start": 401.52, "end": 401.92, "word": " on", "probability": 0.94287109375}, {"start": 401.92, "end": 402.38, "word": " females.", "probability": 0.91357421875}, {"start": 403.56, "end": 403.9, "word": " In", "probability": 0.93505859375}, {"start": 403.9, "end": 404.12, "word": " this", "probability": 0.94580078125}, {"start": 404.12, "end": 404.48, "word": " case,", "probability": 0.9169921875}, {"start": 404.58, "end": 405.08, "word": " females.", "probability": 0.91015625}], "temperature": 1.0}, {"id": 18, "seek": 43744, "start": 408.7, "end": 437.44, "text": " don't represent the entire population. For this reason, you will get inaccurate or biased results if you ignore a certain portion. Because here males, for example, maybe consists of 40% of the IG students. So it makes sense that this number or this percentage is a big number. So ignoring this portion,", "tokens": [500, 380, 2906, 264, 2302, 4415, 13, 1171, 341, 1778, 11, 291, 486, 483, 46443, 420, 28035, 3542, 498, 291, 11200, 257, 1629, 8044, 13, 1436, 510, 20776, 11, 337, 1365, 11, 1310, 14689, 295, 3356, 4, 295, 264, 26367, 1731, 13, 407, 309, 1669, 2020, 300, 341, 1230, 420, 341, 9668, 307, 257, 955, 1230, 13, 407, 26258, 341, 8044, 11], "avg_logprob": -0.19618055177113367, "compression_ratio": 1.507462686567164, "no_speech_prob": 0.0, "words": [{"start": 408.70000000000005, "end": 409.46000000000004, "word": " don't", "probability": 0.7462158203125}, {"start": 409.46000000000004, "end": 410.22, "word": " represent", "probability": 0.78466796875}, {"start": 410.22, "end": 410.62, "word": " the", "probability": 0.892578125}, {"start": 410.62, "end": 410.94, "word": " entire", "probability": 0.8994140625}, {"start": 410.94, "end": 411.4, "word": " population.", "probability": 0.94140625}, {"start": 411.54, "end": 411.68, "word": " For", "probability": 0.88330078125}, {"start": 411.68, "end": 411.9, "word": " this", "probability": 0.93603515625}, {"start": 411.9, "end": 412.18, "word": " reason,", "probability": 0.96484375}, {"start": 412.26, "end": 412.3, "word": " you", "probability": 0.95556640625}, {"start": 412.3, "end": 412.44, "word": " will", "probability": 0.8701171875}, {"start": 412.44, "end": 412.78, "word": " get", "probability": 0.93408203125}, {"start": 412.78, "end": 414.06, "word": " inaccurate", "probability": 0.8720703125}, {"start": 414.06, "end": 416.4, "word": " or", "probability": 0.88818359375}, {"start": 416.4, "end": 417.08, "word": " biased", "probability": 0.919921875}, {"start": 417.08, "end": 417.72, "word": " results", "probability": 0.90185546875}, {"start": 417.72, "end": 418.42, "word": " if", "probability": 0.74072265625}, {"start": 418.42, "end": 418.66, "word": " you", "probability": 0.96337890625}, {"start": 418.66, "end": 419.2, "word": " ignore", "probability": 0.8427734375}, {"start": 419.2, "end": 419.9, "word": " a", "probability": 0.9833984375}, {"start": 419.9, "end": 420.24, "word": " certain", "probability": 0.9052734375}, {"start": 420.24, "end": 420.62, "word": " portion.", "probability": 0.87451171875}, {"start": 420.72, "end": 421.1, "word": " Because", "probability": 0.91015625}, {"start": 421.1, "end": 422.0, "word": " here", "probability": 0.74609375}, {"start": 422.0, "end": 422.66, "word": " males,", "probability": 0.68359375}, {"start": 422.78, "end": 422.92, "word": " for", "probability": 0.95068359375}, {"start": 422.92, "end": 423.28, "word": " example,", "probability": 0.97607421875}, {"start": 423.42, "end": 423.66, "word": " maybe", "probability": 0.84228515625}, {"start": 423.66, "end": 425.14, "word": " consists", "probability": 0.410400390625}, {"start": 425.14, "end": 425.72, "word": " of", "probability": 0.96923828125}, {"start": 425.72, "end": 426.64, "word": " 40", "probability": 0.94287109375}, {"start": 426.64, "end": 427.2, "word": "%", "probability": 0.80908203125}, {"start": 427.2, "end": 427.9, "word": " of", "probability": 0.9384765625}, {"start": 427.9, "end": 428.58, "word": " the", "probability": 0.90576171875}, {"start": 428.58, "end": 428.98, "word": " IG", "probability": 0.45703125}, {"start": 428.98, "end": 429.96, "word": " students.", "probability": 0.83154296875}, {"start": 430.3, "end": 430.34, "word": " So", "probability": 0.92529296875}, {"start": 430.34, "end": 431.08, "word": " it", "probability": 0.8916015625}, {"start": 431.08, "end": 431.36, "word": " makes", "probability": 0.8349609375}, {"start": 431.36, "end": 431.66, "word": " sense", "probability": 0.83154296875}, {"start": 431.66, "end": 431.92, "word": " that", "probability": 0.90576171875}, {"start": 431.92, "end": 432.22, "word": " this", "probability": 0.94873046875}, {"start": 432.22, "end": 432.58, "word": " number", "probability": 0.90283203125}, {"start": 432.58, "end": 432.96, "word": " or", "probability": 0.630859375}, {"start": 432.96, "end": 433.38, "word": " this", "probability": 0.88232421875}, {"start": 433.38, "end": 433.86, "word": " percentage", "probability": 0.861328125}, {"start": 433.86, "end": 434.22, "word": " is", "probability": 0.91796875}, {"start": 434.22, "end": 434.52, "word": " a", "probability": 0.486328125}, {"start": 434.52, "end": 434.66, "word": " big", "probability": 0.90478515625}, {"start": 434.66, "end": 435.5, "word": " number.", "probability": 0.837890625}, {"start": 435.92, "end": 436.24, "word": " So", "probability": 0.95849609375}, {"start": 436.24, "end": 436.68, "word": " ignoring", "probability": 0.935546875}, {"start": 436.68, "end": 436.98, "word": " this", "probability": 0.947265625}, {"start": 436.98, "end": 437.44, "word": " portion,", "probability": 0.89892578125}], "temperature": 1.0}, {"id": 19, "seek": 46646, "start": 438.02, "end": 466.46, "text": " may lead to misleading results or inaccurate results or biased results. So you have to keep in mind that you have to choose all the portions of that frame. So inaccurate or biased results can result if a frame excludes certain portions of a population. Another example, suppose we took males and females.", "tokens": [815, 1477, 281, 36429, 3542, 420, 46443, 3542, 420, 28035, 3542, 13, 407, 291, 362, 281, 1066, 294, 1575, 300, 291, 362, 281, 2826, 439, 264, 25070, 295, 300, 3920, 13, 407, 46443, 420, 28035, 3542, 393, 1874, 498, 257, 3920, 16269, 279, 1629, 25070, 295, 257, 4415, 13, 3996, 1365, 11, 7297, 321, 1890, 20776, 293, 21529, 13], "avg_logprob": -0.13294270435969036, "compression_ratio": 1.7732558139534884, "no_speech_prob": 0.0, "words": [{"start": 438.02, "end": 438.46, "word": " may", "probability": 0.68017578125}, {"start": 438.46, "end": 439.44, "word": " lead", "probability": 0.69287109375}, {"start": 439.44, "end": 439.74, "word": " to", "probability": 0.96875}, {"start": 439.74, "end": 440.18, "word": " misleading", "probability": 0.94580078125}, {"start": 440.18, "end": 440.78, "word": " results", "probability": 0.87353515625}, {"start": 440.78, "end": 441.16, "word": " or", "probability": 0.595703125}, {"start": 441.16, "end": 442.78, "word": " inaccurate", "probability": 0.78173828125}, {"start": 442.78, "end": 443.58, "word": " results", "probability": 0.8642578125}, {"start": 443.58, "end": 443.86, "word": " or", "probability": 0.74755859375}, {"start": 443.86, "end": 444.18, "word": " biased", "probability": 0.8916015625}, {"start": 444.18, "end": 444.64, "word": " results.", "probability": 0.89013671875}, {"start": 445.04, "end": 445.4, "word": " So", "probability": 0.8837890625}, {"start": 445.4, "end": 445.9, "word": " you", "probability": 0.7822265625}, {"start": 445.9, "end": 446.16, "word": " have", "probability": 0.947265625}, {"start": 446.16, "end": 446.32, "word": " to", "probability": 0.9697265625}, {"start": 446.32, "end": 446.56, "word": " keep", "probability": 0.923828125}, {"start": 446.56, "end": 447.1, "word": " in", "probability": 0.93115234375}, {"start": 447.1, "end": 447.32, "word": " mind", "probability": 0.89013671875}, {"start": 447.32, "end": 447.58, "word": " that", "probability": 0.92431640625}, {"start": 447.58, "end": 447.7, "word": " you", "probability": 0.923828125}, {"start": 447.7, "end": 447.86, "word": " have", "probability": 0.9375}, {"start": 447.86, "end": 447.98, "word": " to", "probability": 0.96337890625}, {"start": 447.98, "end": 448.3, "word": " choose", "probability": 0.87548828125}, {"start": 448.3, "end": 448.8, "word": " all", "probability": 0.93994140625}, {"start": 448.8, "end": 449.6, "word": " the", "probability": 0.89404296875}, {"start": 449.6, "end": 450.08, "word": " portions", "probability": 0.95654296875}, {"start": 450.08, "end": 450.36, "word": " of", "probability": 0.9638671875}, {"start": 450.36, "end": 450.74, "word": " that", "probability": 0.86474609375}, {"start": 450.74, "end": 451.3, "word": " frame.", "probability": 0.90478515625}, {"start": 452.12, "end": 452.3, "word": " So", "probability": 0.9189453125}, {"start": 452.3, "end": 452.98, "word": " inaccurate", "probability": 0.80908203125}, {"start": 452.98, "end": 453.42, "word": " or", "probability": 0.9462890625}, {"start": 453.42, "end": 453.74, "word": " biased", "probability": 0.9599609375}, {"start": 453.74, "end": 454.34, "word": " results", "probability": 0.890625}, {"start": 454.34, "end": 454.58, "word": " can", "probability": 0.93359375}, {"start": 454.58, "end": 455.0, "word": " result", "probability": 0.96533203125}, {"start": 455.0, "end": 455.54, "word": " if", "probability": 0.94189453125}, {"start": 455.54, "end": 456.36, "word": " a", "probability": 0.9638671875}, {"start": 456.36, "end": 456.7, "word": " frame", "probability": 0.90087890625}, {"start": 456.7, "end": 457.92, "word": " excludes", "probability": 0.981201171875}, {"start": 457.92, "end": 458.7, "word": " certain", "probability": 0.8896484375}, {"start": 458.7, "end": 459.28, "word": " portions", "probability": 0.94921875}, {"start": 459.28, "end": 459.86, "word": " of", "probability": 0.9677734375}, {"start": 459.86, "end": 460.48, "word": " a", "probability": 0.84375}, {"start": 460.48, "end": 460.92, "word": " population.", "probability": 0.9443359375}, {"start": 462.0, "end": 462.34, "word": " Another", "probability": 0.87060546875}, {"start": 462.34, "end": 462.68, "word": " example,", "probability": 0.97412109375}, {"start": 462.82, "end": 463.18, "word": " suppose", "probability": 0.89306640625}, {"start": 463.18, "end": 465.08, "word": " we", "probability": 0.88525390625}, {"start": 465.08, "end": 465.5, "word": " took", "probability": 0.6943359375}, {"start": 465.5, "end": 465.92, "word": " males", "probability": 0.93896484375}, {"start": 465.92, "end": 466.14, "word": " and", "probability": 0.94287109375}, {"start": 466.14, "end": 466.46, "word": " females.", "probability": 0.92626953125}], "temperature": 1.0}, {"id": 20, "seek": 49364, "start": 467.5, "end": 493.64, "text": " But here for females, females have, for example, four levels. Level one, level two, level three, and level four. And we ignored, for example, level one. I mean, the new students. We ignored this portion. Maybe this portion is very important one, but by mistake we ignored this one.", "tokens": [583, 510, 337, 21529, 11, 21529, 362, 11, 337, 1365, 11, 1451, 4358, 13, 16872, 472, 11, 1496, 732, 11, 1496, 1045, 11, 293, 1496, 1451, 13, 400, 321, 19735, 11, 337, 1365, 11, 1496, 472, 13, 286, 914, 11, 264, 777, 1731, 13, 492, 19735, 341, 8044, 13, 2704, 341, 8044, 307, 588, 1021, 472, 11, 457, 538, 6146, 321, 19735, 341, 472, 13], "avg_logprob": -0.1558948793646061, "compression_ratio": 1.7195121951219512, "no_speech_prob": 0.0, "words": [{"start": 467.5, "end": 467.84, "word": " But", "probability": 0.83544921875}, {"start": 467.84, "end": 468.04, "word": " here", "probability": 0.76025390625}, {"start": 468.04, "end": 468.24, "word": " for", "probability": 0.669921875}, {"start": 468.24, "end": 468.68, "word": " females,", "probability": 0.94580078125}, {"start": 469.24, "end": 469.78, "word": " females", "probability": 0.939453125}, {"start": 469.78, "end": 470.24, "word": " have,", "probability": 0.654296875}, {"start": 470.86, "end": 472.66, "word": " for", "probability": 0.8251953125}, {"start": 472.66, "end": 473.08, "word": " example,", "probability": 0.9697265625}, {"start": 473.26, "end": 473.64, "word": " four", "probability": 0.92529296875}, {"start": 473.64, "end": 474.04, "word": " levels.", "probability": 0.9189453125}, {"start": 475.56, "end": 475.74, "word": " Level", "probability": 0.91455078125}, {"start": 475.74, "end": 476.02, "word": " one,", "probability": 0.8798828125}, {"start": 476.4, "end": 476.56, "word": " level", "probability": 0.94091796875}, {"start": 476.56, "end": 476.86, "word": " two,", "probability": 0.93896484375}, {"start": 477.02, "end": 477.16, "word": " level", "probability": 0.9443359375}, {"start": 477.16, "end": 477.48, "word": " three,", "probability": 0.9365234375}, {"start": 477.54, "end": 477.64, "word": " and", "probability": 0.91748046875}, {"start": 477.64, "end": 477.8, "word": " level", "probability": 0.9267578125}, {"start": 477.8, "end": 478.12, "word": " four.", "probability": 0.94775390625}, {"start": 479.46, "end": 479.84, "word": " And", "probability": 0.94091796875}, {"start": 479.84, "end": 479.98, "word": " we", "probability": 0.95751953125}, {"start": 479.98, "end": 480.44, "word": " ignored,", "probability": 0.84912109375}, {"start": 480.72, "end": 480.9, "word": " for", "probability": 0.955078125}, {"start": 480.9, "end": 481.34, "word": " example,", "probability": 0.9755859375}, {"start": 481.76, "end": 482.12, "word": " level", "probability": 0.94970703125}, {"start": 482.12, "end": 482.54, "word": " one.", "probability": 0.9306640625}, {"start": 484.1, "end": 484.44, "word": " I", "probability": 0.767578125}, {"start": 484.44, "end": 484.7, "word": " mean,", "probability": 0.96484375}, {"start": 484.86, "end": 485.32, "word": " the", "probability": 0.904296875}, {"start": 485.32, "end": 485.56, "word": " new", "probability": 0.9267578125}, {"start": 485.56, "end": 485.96, "word": " students.", "probability": 0.962890625}, {"start": 486.22, "end": 486.28, "word": " We", "probability": 0.95703125}, {"start": 486.28, "end": 486.7, "word": " ignored", "probability": 0.89306640625}, {"start": 486.7, "end": 487.46, "word": " this", "probability": 0.9462890625}, {"start": 487.46, "end": 487.86, "word": " portion.", "probability": 0.88818359375}, {"start": 488.94, "end": 489.22, "word": " Maybe", "probability": 0.9521484375}, {"start": 489.22, "end": 489.52, "word": " this", "probability": 0.94091796875}, {"start": 489.52, "end": 489.82, "word": " portion", "probability": 0.92236328125}, {"start": 489.82, "end": 489.96, "word": " is", "probability": 0.73779296875}, {"start": 489.96, "end": 490.12, "word": " very", "probability": 0.72607421875}, {"start": 490.12, "end": 490.56, "word": " important", "probability": 0.88671875}, {"start": 490.56, "end": 490.84, "word": " one,", "probability": 0.55029296875}, {"start": 490.94, "end": 491.18, "word": " but", "probability": 0.92236328125}, {"start": 491.18, "end": 492.22, "word": " by", "probability": 0.92529296875}, {"start": 492.22, "end": 492.66, "word": " mistake", "probability": 0.9697265625}, {"start": 492.66, "end": 492.86, "word": " we", "probability": 0.56396484375}, {"start": 492.86, "end": 493.16, "word": " ignored", "probability": 0.67041015625}, {"start": 493.16, "end": 493.42, "word": " this", "probability": 0.9443359375}, {"start": 493.42, "end": 493.64, "word": " one.", "probability": 0.9248046875}], "temperature": 1.0}, {"id": 21, "seek": 52045, "start": 495.67, "end": 520.45, "text": " The remaining three levels will not represent the entire female population. For this reason, you will get inaccurate or biased results. So you have to select all the portions of the frames. Using different frames to generate data can lead to dissimilar conclusions. For example,", "tokens": [440, 8877, 1045, 4358, 486, 406, 2906, 264, 2302, 6556, 4415, 13, 1171, 341, 1778, 11, 291, 486, 483, 46443, 420, 28035, 3542, 13, 407, 291, 362, 281, 3048, 439, 264, 25070, 295, 264, 12083, 13, 11142, 819, 12083, 281, 8460, 1412, 393, 1477, 281, 7802, 332, 2202, 22865, 13, 1171, 1365, 11], "avg_logprob": -0.15682869735691282, "compression_ratio": 1.532967032967033, "no_speech_prob": 0.0, "words": [{"start": 495.67, "end": 495.99, "word": " The", "probability": 0.748046875}, {"start": 495.99, "end": 496.39, "word": " remaining", "probability": 0.90673828125}, {"start": 496.39, "end": 496.75, "word": " three", "probability": 0.9013671875}, {"start": 496.75, "end": 497.29, "word": " levels", "probability": 0.8935546875}, {"start": 497.29, "end": 498.69, "word": " will", "probability": 0.736328125}, {"start": 498.69, "end": 498.89, "word": " not", "probability": 0.95263671875}, {"start": 498.89, "end": 499.39, "word": " represent", "probability": 0.82861328125}, {"start": 499.39, "end": 499.59, "word": " the", "probability": 0.8583984375}, {"start": 499.59, "end": 500.13, "word": " entire", "probability": 0.89599609375}, {"start": 500.13, "end": 501.27, "word": " female", "probability": 0.89794921875}, {"start": 501.27, "end": 501.79, "word": " population.", "probability": 0.95068359375}, {"start": 502.27, "end": 502.43, "word": " For", "probability": 0.94873046875}, {"start": 502.43, "end": 502.65, "word": " this", "probability": 0.93603515625}, {"start": 502.65, "end": 502.89, "word": " reason,", "probability": 0.95556640625}, {"start": 502.99, "end": 502.99, "word": " you", "probability": 0.380126953125}, {"start": 502.99, "end": 503.11, "word": " will", "probability": 0.7958984375}, {"start": 503.11, "end": 503.49, "word": " get", "probability": 0.9404296875}, {"start": 503.49, "end": 504.47, "word": " inaccurate", "probability": 0.3544921875}, {"start": 504.47, "end": 505.03, "word": " or", "probability": 0.841796875}, {"start": 505.03, "end": 505.33, "word": " biased", "probability": 0.974609375}, {"start": 505.33, "end": 505.79, "word": " results.", "probability": 0.896484375}, {"start": 506.23, "end": 506.43, "word": " So", "probability": 0.93701171875}, {"start": 506.43, "end": 506.55, "word": " you", "probability": 0.76953125}, {"start": 506.55, "end": 506.85, "word": " have", "probability": 0.9443359375}, {"start": 506.85, "end": 507.27, "word": " to", "probability": 0.94140625}, {"start": 507.27, "end": 507.67, "word": " select", "probability": 0.86767578125}, {"start": 507.67, "end": 508.41, "word": " all", "probability": 0.9521484375}, {"start": 508.41, "end": 509.97, "word": " the", "probability": 0.8984375}, {"start": 509.97, "end": 510.71, "word": " portions", "probability": 0.9482421875}, {"start": 510.71, "end": 511.29, "word": " of", "probability": 0.96728515625}, {"start": 511.29, "end": 511.55, "word": " the", "probability": 0.91943359375}, {"start": 511.55, "end": 512.05, "word": " frames.", "probability": 0.88232421875}, {"start": 513.49, "end": 513.87, "word": " Using", "probability": 0.923828125}, {"start": 513.87, "end": 514.31, "word": " different", "probability": 0.87939453125}, {"start": 514.31, "end": 514.85, "word": " frames", "probability": 0.8837890625}, {"start": 514.85, "end": 516.27, "word": " to", "probability": 0.95458984375}, {"start": 516.27, "end": 516.61, "word": " generate", "probability": 0.9130859375}, {"start": 516.61, "end": 517.15, "word": " data", "probability": 0.9326171875}, {"start": 517.15, "end": 517.61, "word": " can", "probability": 0.91845703125}, {"start": 517.61, "end": 517.87, "word": " lead", "probability": 0.9404296875}, {"start": 517.87, "end": 518.29, "word": " to", "probability": 0.9677734375}, {"start": 518.29, "end": 519.07, "word": " dissimilar", "probability": 0.90625}, {"start": 519.07, "end": 519.71, "word": " conclusions.", "probability": 0.93505859375}, {"start": 519.97, "end": 520.11, "word": " For", "probability": 0.96630859375}, {"start": 520.11, "end": 520.45, "word": " example,", "probability": 0.974609375}], "temperature": 1.0}, {"id": 22, "seek": 54405, "start": 522.2, "end": 544.06, "text": " Suppose again I am interested in IEG students. And I took the frame that has all students at University of Gaza, Universities of Gaza.", "tokens": [21360, 797, 286, 669, 3102, 294, 286, 36, 38, 1731, 13, 400, 286, 1890, 264, 3920, 300, 575, 439, 1731, 412, 3535, 295, 37800, 11, 14052, 1088, 295, 37800, 13], "avg_logprob": -0.21610383545198747, "compression_ratio": 1.2735849056603774, "no_speech_prob": 0.0, "words": [{"start": 522.2, "end": 522.7, "word": " Suppose", "probability": 0.5908203125}, {"start": 522.7, "end": 523.12, "word": " again", "probability": 0.71923828125}, {"start": 523.12, "end": 523.7, "word": " I", "probability": 0.6572265625}, {"start": 523.7, "end": 523.84, "word": " am", "probability": 0.8076171875}, {"start": 523.84, "end": 524.38, "word": " interested", "probability": 0.8720703125}, {"start": 524.38, "end": 525.42, "word": " in", "probability": 0.95849609375}, {"start": 525.42, "end": 526.02, "word": " IEG", "probability": 0.7561848958333334}, {"start": 526.02, "end": 526.72, "word": " students.", "probability": 0.96337890625}, {"start": 529.44, "end": 530.52, "word": " And", "probability": 0.9150390625}, {"start": 530.52, "end": 530.72, "word": " I", "probability": 0.9892578125}, {"start": 530.72, "end": 530.94, "word": " took", "probability": 0.9404296875}, {"start": 530.94, "end": 531.18, "word": " the", "probability": 0.91259765625}, {"start": 531.18, "end": 531.56, "word": " frame", "probability": 0.9052734375}, {"start": 531.56, "end": 531.98, "word": " that", "probability": 0.9404296875}, {"start": 531.98, "end": 532.5, "word": " has", "probability": 0.93310546875}, {"start": 532.5, "end": 533.08, "word": " all", "probability": 0.9462890625}, {"start": 533.08, "end": 535.7, "word": " students", "probability": 0.96826171875}, {"start": 535.7, "end": 539.46, "word": " at", "probability": 0.94580078125}, {"start": 539.46, "end": 541.18, "word": " University", "probability": 0.50146484375}, {"start": 541.18, "end": 541.56, "word": " of", "probability": 0.90478515625}, {"start": 541.56, "end": 542.32, "word": " Gaza,", "probability": 0.88623046875}, {"start": 543.04, "end": 543.6, "word": " Universities", "probability": 0.7353515625}, {"start": 543.6, "end": 543.78, "word": " of", "probability": 0.96923828125}, {"start": 543.78, "end": 544.06, "word": " Gaza.", "probability": 0.88232421875}], "temperature": 1.0}, {"id": 23, "seek": 57601, "start": 549.25, "end": 576.01, "text": " And as we know that Gaza has three universities, big universities, Islamic University, Lazar University, and Al-Aqsa University. So we have three universities. And my frame here, suppose I took all students at these universities, but my study focused on IU students. So my frame, the true one, is all students at IUG.", "tokens": [400, 382, 321, 458, 300, 37800, 575, 1045, 11779, 11, 955, 11779, 11, 17970, 3535, 11, 49273, 3535, 11, 293, 967, 12, 32, 80, 5790, 3535, 13, 407, 321, 362, 1045, 11779, 13, 400, 452, 3920, 510, 11, 7297, 286, 1890, 439, 1731, 412, 613, 11779, 11, 457, 452, 2979, 5178, 322, 44218, 1731, 13, 407, 452, 3920, 11, 264, 2074, 472, 11, 307, 439, 1731, 412, 44218, 38, 13], "avg_logprob": -0.2012543946924344, "compression_ratio": 1.8171428571428572, "no_speech_prob": 0.0, "words": [{"start": 549.25, "end": 549.55, "word": " And", "probability": 0.7841796875}, {"start": 549.55, "end": 549.75, "word": " as", "probability": 0.90673828125}, {"start": 549.75, "end": 549.89, "word": " we", "probability": 0.84716796875}, {"start": 549.89, "end": 550.03, "word": " know", "probability": 0.8837890625}, {"start": 550.03, "end": 550.33, "word": " that", "probability": 0.6484375}, {"start": 550.33, "end": 550.91, "word": " Gaza", "probability": 0.8798828125}, {"start": 550.91, "end": 551.21, "word": " has", "probability": 0.9443359375}, {"start": 551.21, "end": 551.51, "word": " three", "probability": 0.89697265625}, {"start": 551.51, "end": 552.11, "word": " universities,", "probability": 0.85693359375}, {"start": 552.35, "end": 552.53, "word": " big", "probability": 0.85302734375}, {"start": 552.53, "end": 553.03, "word": " universities,", "probability": 0.9287109375}, {"start": 554.43, "end": 554.73, "word": " Islamic", "probability": 0.74951171875}, {"start": 554.73, "end": 555.15, "word": " University,", "probability": 0.5732421875}, {"start": 555.37, "end": 555.53, "word": " Lazar", "probability": 0.493896484375}, {"start": 555.53, "end": 555.79, "word": " University,", "probability": 0.6611328125}, {"start": 555.99, "end": 556.05, "word": " and", "probability": 0.888671875}, {"start": 556.05, "end": 556.19, "word": " Al", "probability": 0.365478515625}, {"start": 556.19, "end": 556.33, "word": "-Aqsa", "probability": 0.9061279296875}, {"start": 556.33, "end": 556.91, "word": " University.", "probability": 0.8369140625}, {"start": 557.43, "end": 557.77, "word": " So", "probability": 0.95654296875}, {"start": 557.77, "end": 557.89, "word": " we", "probability": 0.85498046875}, {"start": 557.89, "end": 558.03, "word": " have", "probability": 0.94677734375}, {"start": 558.03, "end": 558.41, "word": " three", "probability": 0.9228515625}, {"start": 558.41, "end": 559.09, "word": " universities.", "probability": 0.93212890625}, {"start": 560.15, "end": 560.39, "word": " And", "probability": 0.94775390625}, {"start": 560.39, "end": 560.69, "word": " my", "probability": 0.97119140625}, {"start": 560.69, "end": 561.07, "word": " frame", "probability": 0.88818359375}, {"start": 561.07, "end": 561.45, "word": " here,", "probability": 0.85302734375}, {"start": 562.69, "end": 563.01, "word": " suppose", "probability": 0.89453125}, {"start": 563.01, "end": 563.31, "word": " I", "probability": 0.9921875}, {"start": 563.31, "end": 563.65, "word": " took", "probability": 0.82275390625}, {"start": 563.65, "end": 564.09, "word": " all", "probability": 0.947265625}, {"start": 564.09, "end": 564.81, "word": " students", "probability": 0.51513671875}, {"start": 564.81, "end": 565.85, "word": " at", "probability": 0.9599609375}, {"start": 565.85, "end": 566.15, "word": " these", "probability": 0.6826171875}, {"start": 566.15, "end": 566.75, "word": " universities,", "probability": 0.927734375}, {"start": 566.93, "end": 567.15, "word": " but", "probability": 0.92626953125}, {"start": 567.15, "end": 567.41, "word": " my", "probability": 0.96533203125}, {"start": 567.41, "end": 567.79, "word": " study", "probability": 0.90673828125}, {"start": 567.79, "end": 568.39, "word": " focused", "probability": 0.82275390625}, {"start": 568.39, "end": 569.19, "word": " on", "probability": 0.9384765625}, {"start": 569.19, "end": 569.93, "word": " IU", "probability": 0.880859375}, {"start": 569.93, "end": 570.33, "word": " students.", "probability": 0.59716796875}, {"start": 570.79, "end": 570.93, "word": " So", "probability": 0.9609375}, {"start": 570.93, "end": 571.19, "word": " my", "probability": 0.93359375}, {"start": 571.19, "end": 571.57, "word": " frame,", "probability": 0.8896484375}, {"start": 572.25, "end": 572.47, "word": " the", "probability": 0.8779296875}, {"start": 572.47, "end": 573.75, "word": " true", "probability": 0.75927734375}, {"start": 573.75, "end": 574.09, "word": " one,", "probability": 0.93115234375}, {"start": 574.25, "end": 574.55, "word": " is", "probability": 0.92529296875}, {"start": 574.55, "end": 574.81, "word": " all", "probability": 0.93798828125}, {"start": 574.81, "end": 575.29, "word": " students", "probability": 0.6982421875}, {"start": 575.29, "end": 575.57, "word": " at", "probability": 0.85302734375}, {"start": 575.57, "end": 576.01, "word": " IUG.", "probability": 0.904541015625}], "temperature": 1.0}, {"id": 24, "seek": 60053, "start": 576.99, "end": 600.53, "text": " But I taught all students at universities in Gaza. So now we have different frames. And you want to know what are the opinions of the smokers about smoking. So my population now is just...", "tokens": [583, 286, 5928, 439, 1731, 412, 11779, 294, 37800, 13, 407, 586, 321, 362, 819, 12083, 13, 400, 291, 528, 281, 458, 437, 366, 264, 11819, 295, 264, 32073, 433, 466, 14055, 13, 407, 452, 4415, 586, 307, 445, 485], "avg_logprob": -0.22008384146341464, "compression_ratio": 1.35, "no_speech_prob": 4.76837158203125e-07, "words": [{"start": 576.99, "end": 577.31, "word": " But", "probability": 0.63720703125}, {"start": 577.31, "end": 577.57, "word": " I", "probability": 0.95166015625}, {"start": 577.57, "end": 577.93, "word": " taught", "probability": 0.32275390625}, {"start": 577.93, "end": 578.25, "word": " all", "probability": 0.94287109375}, {"start": 578.25, "end": 578.85, "word": " students", "probability": 0.708984375}, {"start": 578.85, "end": 579.31, "word": " at", "probability": 0.93896484375}, {"start": 579.31, "end": 580.09, "word": " universities", "probability": 0.76318359375}, {"start": 580.09, "end": 580.31, "word": " in", "probability": 0.74658203125}, {"start": 580.31, "end": 580.53, "word": " Gaza.", "probability": 0.94140625}, {"start": 581.15, "end": 581.59, "word": " So", "probability": 0.92333984375}, {"start": 581.59, "end": 581.77, "word": " now", "probability": 0.85888671875}, {"start": 581.77, "end": 581.95, "word": " we", "probability": 0.89404296875}, {"start": 581.95, "end": 582.17, "word": " have", "probability": 0.9482421875}, {"start": 582.17, "end": 583.49, "word": " different", "probability": 0.88232421875}, {"start": 583.49, "end": 584.69, "word": " frames.", "probability": 0.5947265625}, {"start": 588.61, "end": 589.53, "word": " And", "probability": 0.87255859375}, {"start": 589.53, "end": 589.69, "word": " you", "probability": 0.9072265625}, {"start": 589.69, "end": 590.47, "word": " want", "probability": 0.599609375}, {"start": 590.47, "end": 590.61, "word": " to", "probability": 0.9677734375}, {"start": 590.61, "end": 590.91, "word": " know", "probability": 0.89697265625}, {"start": 590.91, "end": 591.41, "word": " what", "probability": 0.82666015625}, {"start": 591.41, "end": 591.87, "word": " are", "probability": 0.890625}, {"start": 591.87, "end": 592.71, "word": " the", "probability": 0.919921875}, {"start": 592.71, "end": 593.41, "word": " opinions", "probability": 0.9208984375}, {"start": 593.41, "end": 594.37, "word": " of", "probability": 0.96533203125}, {"start": 594.37, "end": 594.59, "word": " the", "probability": 0.90478515625}, {"start": 594.59, "end": 595.13, "word": " smokers", "probability": 0.968505859375}, {"start": 595.13, "end": 595.83, "word": " about", "probability": 0.90576171875}, {"start": 595.83, "end": 596.27, "word": " smoking.", "probability": 0.931640625}, {"start": 597.57, "end": 597.91, "word": " So", "probability": 0.94287109375}, {"start": 597.91, "end": 598.51, "word": " my", "probability": 0.85009765625}, {"start": 598.51, "end": 599.09, "word": " population", "probability": 0.8486328125}, {"start": 599.09, "end": 599.53, "word": " now", "probability": 0.93310546875}, {"start": 599.53, "end": 599.91, "word": " is", "probability": 0.86962890625}, {"start": 599.91, "end": 600.53, "word": " just...", "probability": 0.50970458984375}], "temperature": 1.0}, {"id": 25, "seek": 63241, "start": 614.03, "end": 632.41, "text": " So that's my thing. I suppose I talk to a field that has one atom.", "tokens": [407, 300, 311, 452, 551, 13, 286, 7297, 286, 751, 281, 257, 2519, 300, 575, 472, 12018, 13], "avg_logprob": -0.4305098778323123, "compression_ratio": 0.9710144927536232, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 614.0300000000001, "end": 615.4300000000001, "word": " So", "probability": 0.09857177734375}, {"start": 615.4300000000001, "end": 616.83, "word": " that's", "probability": 0.88037109375}, {"start": 616.83, "end": 617.37, "word": " my", "probability": 0.90234375}, {"start": 617.37, "end": 619.39, "word": " thing.", "probability": 0.33935546875}, {"start": 621.01, "end": 621.47, "word": " I", "probability": 0.52587890625}, {"start": 621.47, "end": 622.31, "word": " suppose", "probability": 0.84765625}, {"start": 622.31, "end": 624.63, "word": " I", "probability": 0.80126953125}, {"start": 624.63, "end": 627.09, "word": " talk", "probability": 0.61474609375}, {"start": 627.09, "end": 627.73, "word": " to", "probability": 0.9375}, {"start": 627.73, "end": 628.01, "word": " a", "probability": 0.84375}, {"start": 628.01, "end": 628.45, "word": " field", "probability": 0.560546875}, {"start": 628.45, "end": 628.97, "word": " that", "probability": 0.9365234375}, {"start": 628.97, "end": 629.75, "word": " has", "probability": 0.94677734375}, {"start": 629.75, "end": 631.55, "word": " one", "probability": 0.85009765625}, {"start": 631.55, "end": 632.41, "word": " atom.", "probability": 0.6259765625}], "temperature": 1.0}, {"id": 26, "seek": 67046, "start": 640.78, "end": 670.46, "text": " Oh my goodness. They are very different things. The first one consists of only smokers. They are very interested in you. The other one consists of... Anonymous. I thought maybe... Smoker or non-smokers. For this reason, you will get...", "tokens": [876, 452, 8387, 13, 814, 366, 588, 819, 721, 13, 440, 700, 472, 14689, 295, 787, 32073, 433, 13, 814, 366, 588, 3102, 294, 291, 13, 440, 661, 472, 14689, 295, 485, 1107, 18092, 13, 286, 1194, 1310, 485, 3915, 16722, 420, 2107, 12, 10817, 453, 433, 13, 1171, 341, 1778, 11, 291, 486, 483, 485], "avg_logprob": -0.5350877276638097, "compression_ratio": 1.542483660130719, "no_speech_prob": 1.4901161193847656e-06, "words": [{"start": 640.78, "end": 641.2, "word": " Oh", "probability": 0.264892578125}, {"start": 641.2, "end": 641.5, "word": " my", "probability": 0.87158203125}, {"start": 641.5, "end": 642.22, "word": " goodness.", "probability": 0.54443359375}, {"start": 643.48, "end": 643.72, "word": " They", "probability": 0.3232421875}, {"start": 643.72, "end": 643.84, "word": " are", "probability": 0.32470703125}, {"start": 643.84, "end": 644.02, "word": " very", "probability": 0.09442138671875}, {"start": 644.02, "end": 644.96, "word": " different", "probability": 0.66943359375}, {"start": 644.96, "end": 646.04, "word": " things.", "probability": 0.36181640625}, {"start": 647.7, "end": 648.02, "word": " The", "probability": 0.86865234375}, {"start": 648.02, "end": 648.56, "word": " first", "probability": 0.8935546875}, {"start": 648.56, "end": 649.1, "word": " one", "probability": 0.91943359375}, {"start": 649.1, "end": 649.84, "word": " consists", "probability": 0.7392578125}, {"start": 649.84, "end": 650.38, "word": " of", "probability": 0.9609375}, {"start": 650.38, "end": 650.8, "word": " only", "probability": 0.1815185546875}, {"start": 650.8, "end": 652.08, "word": " smokers.", "probability": 0.957275390625}, {"start": 653.18, "end": 653.62, "word": " They", "probability": 0.63720703125}, {"start": 653.62, "end": 653.72, "word": " are", "probability": 0.580078125}, {"start": 653.72, "end": 654.16, "word": " very", "probability": 0.238037109375}, {"start": 654.16, "end": 655.1, "word": " interested", "probability": 0.286865234375}, {"start": 655.1, "end": 655.74, "word": " in", "probability": 0.87109375}, {"start": 655.74, "end": 655.96, "word": " you.", "probability": 0.72802734375}, {"start": 656.24, "end": 656.44, "word": " The", "probability": 0.8828125}, {"start": 656.44, "end": 656.86, "word": " other", "probability": 0.8857421875}, {"start": 656.86, "end": 657.34, "word": " one", "probability": 0.91796875}, {"start": 657.34, "end": 658.1, "word": " consists", "probability": 0.8330078125}, {"start": 658.1, "end": 659.7, "word": " of...", "probability": 0.52691650390625}, {"start": 659.7, "end": 661.12, "word": " Anonymous.", "probability": 0.315948486328125}, {"start": 662.18, "end": 662.44, "word": " I", "probability": 0.525390625}, {"start": 662.44, "end": 662.72, "word": " thought", "probability": 0.477783203125}, {"start": 662.72, "end": 664.48, "word": " maybe...", "probability": 0.727294921875}, {"start": 664.48, "end": 665.98, "word": " Smoker", "probability": 0.75634765625}, {"start": 665.98, "end": 666.36, "word": " or", "probability": 0.1744384765625}, {"start": 666.36, "end": 666.56, "word": " non", "probability": 0.78369140625}, {"start": 666.56, "end": 667.58, "word": "-smokers.", "probability": 0.8355712890625}, {"start": 667.78, "end": 668.06, "word": " For", "probability": 0.9501953125}, {"start": 668.06, "end": 668.56, "word": " this", "probability": 0.92578125}, {"start": 668.56, "end": 669.14, "word": " reason,", "probability": 0.9736328125}, {"start": 669.66, "end": 669.8, "word": " you", "probability": 0.662109375}, {"start": 669.8, "end": 670.02, "word": " will", "probability": 0.8974609375}, {"start": 670.02, "end": 670.46, "word": " get...", "probability": 0.870361328125}], "temperature": 1.0}, {"id": 27, "seek": 70627, "start": 677.41, "end": 706.27, "text": " Conclusion, different results. So now, the sampling frame is a listing of items that make up the entire population. Let's move to the types of samples. Mainly there are two types of sampling. One is cold.", "tokens": [18200, 6485, 11, 819, 3542, 13, 407, 586, 11, 264, 21179, 3920, 307, 257, 22161, 295, 4754, 300, 652, 493, 264, 2302, 4415, 13, 961, 311, 1286, 281, 264, 3467, 295, 10938, 13, 47468, 456, 366, 732, 3467, 295, 21179, 13, 1485, 307, 3554, 13], "avg_logprob": -0.21467390753652738, "compression_ratio": 1.4137931034482758, "no_speech_prob": 0.0, "words": [{"start": 677.41, "end": 678.01, "word": " Conclusion,", "probability": 0.71533203125}, {"start": 678.23, "end": 678.49, "word": " different", "probability": 0.84130859375}, {"start": 678.49, "end": 679.35, "word": " results.", "probability": 0.8828125}, {"start": 682.09, "end": 682.81, "word": " So", "probability": 0.102294921875}, {"start": 682.81, "end": 688.85, "word": " now,", "probability": 0.85546875}, {"start": 689.19, "end": 689.37, "word": " the", "probability": 0.857421875}, {"start": 689.37, "end": 690.07, "word": " sampling", "probability": 0.7958984375}, {"start": 690.07, "end": 690.55, "word": " frame", "probability": 0.8896484375}, {"start": 690.55, "end": 691.31, "word": " is", "probability": 0.92431640625}, {"start": 691.31, "end": 691.45, "word": " a", "probability": 0.97021484375}, {"start": 691.45, "end": 691.69, "word": " listing", "probability": 0.92431640625}, {"start": 691.69, "end": 691.85, "word": " of", "probability": 0.97119140625}, {"start": 691.85, "end": 692.41, "word": " items", "probability": 0.80908203125}, {"start": 692.41, "end": 693.29, "word": " that", "probability": 0.91357421875}, {"start": 693.29, "end": 693.61, "word": " make", "probability": 0.9072265625}, {"start": 693.61, "end": 693.93, "word": " up", "probability": 0.97119140625}, {"start": 693.93, "end": 694.29, "word": " the", "probability": 0.9228515625}, {"start": 694.29, "end": 694.81, "word": " entire", "probability": 0.87109375}, {"start": 694.81, "end": 695.71, "word": " population.", "probability": 0.96435546875}, {"start": 697.23, "end": 697.61, "word": " Let's", "probability": 0.9619140625}, {"start": 697.61, "end": 697.91, "word": " move", "probability": 0.94482421875}, {"start": 697.91, "end": 698.27, "word": " to", "probability": 0.96337890625}, {"start": 698.27, "end": 698.61, "word": " the", "probability": 0.9208984375}, {"start": 698.61, "end": 699.51, "word": " types", "probability": 0.8359375}, {"start": 699.51, "end": 700.57, "word": " of", "probability": 0.97021484375}, {"start": 700.57, "end": 701.57, "word": " samples.", "probability": 0.77783203125}, {"start": 703.17, "end": 703.89, "word": " Mainly", "probability": 0.7900390625}, {"start": 703.89, "end": 704.09, "word": " there", "probability": 0.56982421875}, {"start": 704.09, "end": 704.23, "word": " are", "probability": 0.94287109375}, {"start": 704.23, "end": 704.39, "word": " two", "probability": 0.92138671875}, {"start": 704.39, "end": 704.75, "word": " types", "probability": 0.8349609375}, {"start": 704.75, "end": 704.91, "word": " of", "probability": 0.96923828125}, {"start": 704.91, "end": 705.27, "word": " sampling.", "probability": 0.9521484375}, {"start": 705.53, "end": 705.69, "word": " One", "probability": 0.92333984375}, {"start": 705.69, "end": 705.87, "word": " is", "probability": 0.9453125}, {"start": 705.87, "end": 706.27, "word": " cold.", "probability": 0.31396484375}], "temperature": 1.0}, {"id": 28, "seek": 73045, "start": 707.45, "end": 730.45, "text": " Non-probability samples. The other one is called probability samples. The non-probability samples can be divided into two segments. One is called judgment and the other convenience. So we have judgment and convenience non-probability samples.", "tokens": [8774, 12, 41990, 2310, 10938, 13, 440, 661, 472, 307, 1219, 8482, 10938, 13, 440, 2107, 12, 41990, 2310, 10938, 393, 312, 6666, 666, 732, 19904, 13, 1485, 307, 1219, 12216, 293, 264, 661, 19283, 13, 407, 321, 362, 12216, 293, 19283, 2107, 12, 41990, 2310, 10938, 13], "avg_logprob": -0.23612882166492696, "compression_ratio": 1.975609756097561, "no_speech_prob": 0.0, "words": [{"start": 707.45, "end": 708.01, "word": " Non", "probability": 0.1455078125}, {"start": 708.01, "end": 708.55, "word": "-probability", "probability": 0.7833658854166666}, {"start": 708.55, "end": 709.07, "word": " samples.", "probability": 0.853515625}, {"start": 710.37, "end": 710.53, "word": " The", "probability": 0.8330078125}, {"start": 710.53, "end": 710.71, "word": " other", "probability": 0.8212890625}, {"start": 710.71, "end": 710.91, "word": " one", "probability": 0.8662109375}, {"start": 710.91, "end": 711.09, "word": " is", "probability": 0.93310546875}, {"start": 711.09, "end": 711.43, "word": " called", "probability": 0.8564453125}, {"start": 711.43, "end": 712.05, "word": " probability", "probability": 0.744140625}, {"start": 712.05, "end": 712.61, "word": " samples.", "probability": 0.759765625}, {"start": 714.19, "end": 714.65, "word": " The", "probability": 0.84375}, {"start": 714.65, "end": 715.07, "word": " non", "probability": 0.9365234375}, {"start": 715.07, "end": 715.55, "word": "-probability", "probability": 0.9454752604166666}, {"start": 715.55, "end": 716.25, "word": " samples", "probability": 0.8662109375}, {"start": 716.25, "end": 718.25, "word": " can", "probability": 0.87060546875}, {"start": 718.25, "end": 718.39, "word": " be", "probability": 0.953125}, {"start": 718.39, "end": 718.77, "word": " divided", "probability": 0.81884765625}, {"start": 718.77, "end": 719.11, "word": " into", "probability": 0.76806640625}, {"start": 719.11, "end": 719.79, "word": " two", "probability": 0.88623046875}, {"start": 719.79, "end": 721.01, "word": " segments.", "probability": 0.88720703125}, {"start": 721.23, "end": 721.35, "word": " One", "probability": 0.90380859375}, {"start": 721.35, "end": 721.53, "word": " is", "probability": 0.93994140625}, {"start": 721.53, "end": 721.85, "word": " called", "probability": 0.873046875}, {"start": 721.85, "end": 722.73, "word": " judgment", "probability": 0.67578125}, {"start": 722.73, "end": 723.73, "word": " and", "probability": 0.71142578125}, {"start": 723.73, "end": 723.85, "word": " the", "probability": 0.619140625}, {"start": 723.85, "end": 724.03, "word": " other", "probability": 0.88232421875}, {"start": 724.03, "end": 725.09, "word": " convenience.", "probability": 0.537109375}, {"start": 726.55, "end": 726.95, "word": " So", "probability": 0.88916015625}, {"start": 726.95, "end": 727.07, "word": " we", "probability": 0.59375}, {"start": 727.07, "end": 727.23, "word": " have", "probability": 0.9443359375}, {"start": 727.23, "end": 727.59, "word": " judgment", "probability": 0.83984375}, {"start": 727.59, "end": 728.03, "word": " and", "probability": 0.93017578125}, {"start": 728.03, "end": 728.71, "word": " convenience", "probability": 0.9501953125}, {"start": 728.71, "end": 729.49, "word": " non", "probability": 0.85693359375}, {"start": 729.49, "end": 729.93, "word": "-probability", "probability": 0.9365234375}, {"start": 729.93, "end": 730.45, "word": " samples.", "probability": 0.8408203125}], "temperature": 1.0}, {"id": 29, "seek": 75520, "start": 731.68, "end": 755.2, "text": " The other type which is random probability samples has four segments or four parts. The first one is called simple random sample. The other one is systematic. The second one is systematic random sample. The third one is certified. The fourth one cluster random sample. So there are two types of sampling.", "tokens": [440, 661, 2010, 597, 307, 4974, 8482, 10938, 575, 1451, 19904, 420, 1451, 3166, 13, 440, 700, 472, 307, 1219, 2199, 4974, 6889, 13, 440, 661, 472, 307, 27249, 13, 440, 1150, 472, 307, 27249, 4974, 6889, 13, 440, 2636, 472, 307, 18580, 13, 440, 6409, 472, 13630, 4974, 6889, 13, 407, 456, 366, 732, 3467, 295, 21179, 13], "avg_logprob": -0.2006510466337204, "compression_ratio": 1.8711656441717792, "no_speech_prob": 0.0, "words": [{"start": 731.68, "end": 731.92, "word": " The", "probability": 0.80517578125}, {"start": 731.92, "end": 732.18, "word": " other", "probability": 0.90234375}, {"start": 732.18, "end": 732.5, "word": " type", "probability": 0.95263671875}, {"start": 732.5, "end": 732.74, "word": " which", "probability": 0.55810546875}, {"start": 732.74, "end": 733.14, "word": " is", "probability": 0.9375}, {"start": 733.14, "end": 733.86, "word": " random", "probability": 0.1865234375}, {"start": 733.86, "end": 734.34, "word": " probability", "probability": 0.93701171875}, {"start": 734.34, "end": 735.06, "word": " samples", "probability": 0.8291015625}, {"start": 735.06, "end": 735.64, "word": " has", "probability": 0.84716796875}, {"start": 735.64, "end": 736.14, "word": " four", "probability": 0.87890625}, {"start": 736.14, "end": 736.66, "word": " segments", "probability": 0.9013671875}, {"start": 736.66, "end": 737.56, "word": " or", "probability": 0.56689453125}, {"start": 737.56, "end": 737.8, "word": " four", "probability": 0.95556640625}, {"start": 737.8, "end": 738.24, "word": " parts.", "probability": 0.80810546875}, {"start": 739.9, "end": 740.12, "word": " The", "probability": 0.74267578125}, {"start": 740.12, "end": 740.4, "word": " first", "probability": 0.88525390625}, {"start": 740.4, "end": 740.58, "word": " one", "probability": 0.92919921875}, {"start": 740.58, "end": 740.72, "word": " is", "probability": 0.8916015625}, {"start": 740.72, "end": 740.98, "word": " called", "probability": 0.89697265625}, {"start": 740.98, "end": 741.34, "word": " simple", "probability": 0.791015625}, {"start": 741.34, "end": 741.68, "word": " random", "probability": 0.8544921875}, {"start": 741.68, "end": 742.08, "word": " sample.", "probability": 0.76806640625}, {"start": 743.28, "end": 743.96, "word": " The", "probability": 0.84033203125}, {"start": 743.96, "end": 744.2, "word": " other", "probability": 0.86279296875}, {"start": 744.2, "end": 744.42, "word": " one", "probability": 0.90771484375}, {"start": 744.42, "end": 744.54, "word": " is", "probability": 0.480712890625}, {"start": 744.54, "end": 744.98, "word": " systematic.", "probability": 0.9384765625}, {"start": 745.28, "end": 745.46, "word": " The", "probability": 0.8818359375}, {"start": 745.46, "end": 745.68, "word": " second", "probability": 0.85791015625}, {"start": 745.68, "end": 745.92, "word": " one", "probability": 0.921875}, {"start": 745.92, "end": 746.04, "word": " is", "probability": 0.91796875}, {"start": 746.04, "end": 746.44, "word": " systematic", "probability": 0.9033203125}, {"start": 746.44, "end": 746.84, "word": " random", "probability": 0.83203125}, {"start": 746.84, "end": 747.26, "word": " sample.", "probability": 0.826171875}, {"start": 747.94, "end": 748.08, "word": " The", "probability": 0.87841796875}, {"start": 748.08, "end": 748.34, "word": " third", "probability": 0.93798828125}, {"start": 748.34, "end": 748.54, "word": " one", "probability": 0.92236328125}, {"start": 748.54, "end": 748.68, "word": " is", "probability": 0.92578125}, {"start": 748.68, "end": 749.16, "word": " certified.", "probability": 0.953125}, {"start": 750.66, "end": 751.1, "word": " The", "probability": 0.57177734375}, {"start": 751.1, "end": 751.44, "word": " fourth", "probability": 0.91845703125}, {"start": 751.44, "end": 751.74, "word": " one", "probability": 0.92529296875}, {"start": 751.74, "end": 752.24, "word": " cluster", "probability": 0.414794921875}, {"start": 752.24, "end": 752.6, "word": " random", "probability": 0.802734375}, {"start": 752.6, "end": 752.94, "word": " sample.", "probability": 0.80078125}, {"start": 753.46, "end": 753.74, "word": " So", "probability": 0.9453125}, {"start": 753.74, "end": 753.98, "word": " there", "probability": 0.73046875}, {"start": 753.98, "end": 754.16, "word": " are", "probability": 0.93896484375}, {"start": 754.16, "end": 754.36, "word": " two", "probability": 0.9365234375}, {"start": 754.36, "end": 754.7, "word": " types", "probability": 0.83154296875}, {"start": 754.7, "end": 754.86, "word": " of", "probability": 0.9638671875}, {"start": 754.86, "end": 755.2, "word": " sampling.", "probability": 0.9580078125}], "temperature": 1.0}, {"id": 30, "seek": 78163, "start": 756.93, "end": 781.63, "text": " Probability and non-probability. Non-probability has four methods here, simple random samples, systematic, stratified, and cluster. And the non-probability samples has two types, judgment and convenience. Let's see the definition of each type of samples. Let's start with non-probability sample.", "tokens": [8736, 2310, 293, 2107, 12, 41990, 2310, 13, 8774, 12, 41990, 2310, 575, 1451, 7150, 510, 11, 2199, 4974, 10938, 11, 27249, 11, 23674, 2587, 11, 293, 13630, 13, 400, 264, 2107, 12, 41990, 2310, 10938, 575, 732, 3467, 11, 12216, 293, 19283, 13, 961, 311, 536, 264, 7123, 295, 1184, 2010, 295, 10938, 13, 961, 311, 722, 365, 2107, 12, 41990, 2310, 6889, 13], "avg_logprob": -0.20395359871062366, "compression_ratio": 1.7514792899408285, "no_speech_prob": 0.0, "words": [{"start": 756.93, "end": 757.77, "word": " Probability", "probability": 0.715576171875}, {"start": 757.77, "end": 758.61, "word": " and", "probability": 0.763671875}, {"start": 758.61, "end": 758.85, "word": " non", "probability": 0.497802734375}, {"start": 758.85, "end": 759.41, "word": "-probability.", "probability": 0.8307291666666666}, {"start": 760.07, "end": 760.27, "word": " Non", "probability": 0.904296875}, {"start": 760.27, "end": 760.69, "word": "-probability", "probability": 0.94921875}, {"start": 760.69, "end": 761.07, "word": " has", "probability": 0.92138671875}, {"start": 761.07, "end": 761.49, "word": " four", "probability": 0.7724609375}, {"start": 761.49, "end": 762.37, "word": " methods", "probability": 0.8896484375}, {"start": 762.37, "end": 762.81, "word": " here,", "probability": 0.8193359375}, {"start": 763.45, "end": 763.87, "word": " simple", "probability": 0.5498046875}, {"start": 763.87, "end": 764.21, "word": " random", "probability": 0.7724609375}, {"start": 764.21, "end": 764.65, "word": " samples,", "probability": 0.560546875}, {"start": 764.89, "end": 765.35, "word": " systematic,", "probability": 0.90380859375}, {"start": 765.53, "end": 766.15, "word": " stratified,", "probability": 0.966796875}, {"start": 766.31, "end": 766.33, "word": " and", "probability": 0.92431640625}, {"start": 766.33, "end": 766.73, "word": " cluster.", "probability": 0.6982421875}, {"start": 767.41, "end": 767.77, "word": " And", "probability": 0.85791015625}, {"start": 767.77, "end": 767.93, "word": " the", "probability": 0.430908203125}, {"start": 767.93, "end": 768.11, "word": " non", "probability": 0.9677734375}, {"start": 768.11, "end": 768.53, "word": "-probability", "probability": 0.96240234375}, {"start": 768.53, "end": 769.03, "word": " samples", "probability": 0.75244140625}, {"start": 769.03, "end": 769.31, "word": " has", "probability": 0.83349609375}, {"start": 769.31, "end": 769.61, "word": " two", "probability": 0.93115234375}, {"start": 769.61, "end": 770.15, "word": " types,", "probability": 0.828125}, {"start": 770.65, "end": 770.95, "word": " judgment", "probability": 0.73681640625}, {"start": 770.95, "end": 772.01, "word": " and", "probability": 0.7255859375}, {"start": 772.01, "end": 773.09, "word": " convenience.", "probability": 0.951171875}, {"start": 773.67, "end": 774.03, "word": " Let's", "probability": 0.9404296875}, {"start": 774.03, "end": 774.19, "word": " see", "probability": 0.921875}, {"start": 774.19, "end": 774.39, "word": " the", "probability": 0.9189453125}, {"start": 774.39, "end": 774.77, "word": " definition", "probability": 0.92822265625}, {"start": 774.77, "end": 775.17, "word": " of", "probability": 0.95751953125}, {"start": 775.17, "end": 775.79, "word": " each", "probability": 0.93408203125}, {"start": 775.79, "end": 776.19, "word": " type", "probability": 0.97802734375}, {"start": 776.19, "end": 778.01, "word": " of", "probability": 0.95166015625}, {"start": 778.01, "end": 778.49, "word": " samples.", "probability": 0.8359375}, {"start": 779.19, "end": 779.71, "word": " Let's", "probability": 0.96533203125}, {"start": 779.71, "end": 780.25, "word": " start", "probability": 0.923828125}, {"start": 780.25, "end": 780.51, "word": " with", "probability": 0.88671875}, {"start": 780.51, "end": 780.83, "word": " non", "probability": 0.8798828125}, {"start": 780.83, "end": 781.23, "word": "-probability", "probability": 0.9697265625}, {"start": 781.23, "end": 781.63, "word": " sample.", "probability": 0.70751953125}], "temperature": 1.0}, {"id": 31, "seek": 81030, "start": 782.88, "end": 810.3, "text": " In non-probability sample, items included or chosen without regard to their probability of occurrence. So that's the definition of non-probability. For example. So again, non-probability sample, it means you select items without regard to their probability of occurrence.", "tokens": [682, 2107, 12, 41990, 2310, 6889, 11, 4754, 5556, 420, 8614, 1553, 3843, 281, 641, 8482, 295, 36122, 13, 407, 300, 311, 264, 7123, 295, 2107, 12, 41990, 2310, 13, 1171, 1365, 13, 407, 797, 11, 2107, 12, 41990, 2310, 6889, 11, 309, 1355, 291, 3048, 4754, 1553, 3843, 281, 641, 8482, 295, 36122, 13], "avg_logprob": -0.18373325201017515, "compression_ratio": 1.929078014184397, "no_speech_prob": 0.0, "words": [{"start": 782.88, "end": 783.48, "word": " In", "probability": 0.50341796875}, {"start": 783.48, "end": 783.72, "word": " non", "probability": 0.5693359375}, {"start": 783.72, "end": 784.12, "word": "-probability", "probability": 0.84521484375}, {"start": 784.12, "end": 784.52, "word": " sample,", "probability": 0.501953125}, {"start": 784.84, "end": 785.3, "word": " items", "probability": 0.73291015625}, {"start": 785.3, "end": 785.84, "word": " included", "probability": 0.77978515625}, {"start": 785.84, "end": 786.42, "word": " or", "probability": 0.85009765625}, {"start": 786.42, "end": 787.0, "word": " chosen", "probability": 0.9501953125}, {"start": 787.0, "end": 787.72, "word": " without", "probability": 0.74072265625}, {"start": 787.72, "end": 789.32, "word": " regard", "probability": 0.9765625}, {"start": 789.32, "end": 789.58, "word": " to", "probability": 0.96533203125}, {"start": 789.58, "end": 789.84, "word": " their", "probability": 0.953125}, {"start": 789.84, "end": 790.22, "word": " probability", "probability": 0.92626953125}, {"start": 790.22, "end": 790.46, "word": " of", "probability": 0.96923828125}, {"start": 790.46, "end": 790.8, "word": " occurrence.", "probability": 0.9228515625}, {"start": 791.76, "end": 792.48, "word": " So", "probability": 0.63671875}, {"start": 792.48, "end": 792.7, "word": " that's", "probability": 0.820556640625}, {"start": 792.7, "end": 792.86, "word": " the", "probability": 0.89990234375}, {"start": 792.86, "end": 793.28, "word": " definition", "probability": 0.94384765625}, {"start": 793.28, "end": 793.64, "word": " of", "probability": 0.9599609375}, {"start": 793.64, "end": 793.84, "word": " non", "probability": 0.92724609375}, {"start": 793.84, "end": 794.36, "word": "-probability.", "probability": 0.9375}, {"start": 794.6, "end": 794.74, "word": " For", "probability": 0.81640625}, {"start": 794.74, "end": 795.1, "word": " example.", "probability": 0.9599609375}, {"start": 803.66, "end": 804.38, "word": " So", "probability": 0.87109375}, {"start": 804.38, "end": 804.68, "word": " again,", "probability": 0.8486328125}, {"start": 804.9, "end": 805.06, "word": " non", "probability": 0.59326171875}, {"start": 805.06, "end": 805.46, "word": "-probability", "probability": 0.9695638020833334}, {"start": 805.46, "end": 805.86, "word": " sample,", "probability": 0.8232421875}, {"start": 805.98, "end": 806.08, "word": " it", "probability": 0.90087890625}, {"start": 806.08, "end": 806.32, "word": " means", "probability": 0.93603515625}, {"start": 806.32, "end": 806.48, "word": " you", "probability": 0.92431640625}, {"start": 806.48, "end": 806.88, "word": " select", "probability": 0.84228515625}, {"start": 806.88, "end": 807.52, "word": " items", "probability": 0.6328125}, {"start": 807.52, "end": 808.3, "word": " without", "probability": 0.89599609375}, {"start": 808.3, "end": 808.76, "word": " regard", "probability": 0.9794921875}, {"start": 808.76, "end": 808.98, "word": " to", "probability": 0.96826171875}, {"start": 808.98, "end": 809.22, "word": " their", "probability": 0.94970703125}, {"start": 809.22, "end": 809.58, "word": " probability", "probability": 0.931640625}, {"start": 809.58, "end": 809.94, "word": " of", "probability": 0.97119140625}, {"start": 809.94, "end": 810.3, "word": " occurrence.", "probability": 0.9482421875}], "temperature": 1.0}, {"id": 32, "seek": 83661, "start": 811.47, "end": 836.61, "text": " For example, suppose females consist of 70% of IUG students and males, the remaining percent is 30%. And suppose I decided to select a sample of 100 or 1000 students from IUG.", "tokens": [1171, 1365, 11, 7297, 21529, 4603, 295, 5285, 4, 295, 44218, 38, 1731, 293, 20776, 11, 264, 8877, 3043, 307, 2217, 6856, 400, 7297, 286, 3047, 281, 3048, 257, 6889, 295, 2319, 420, 9714, 1731, 490, 44218, 38, 13], "avg_logprob": -0.21269531026482583, "compression_ratio": 1.3134328358208955, "no_speech_prob": 0.0, "words": [{"start": 811.47, "end": 811.71, "word": " For", "probability": 0.8779296875}, {"start": 811.71, "end": 812.05, "word": " example,", "probability": 0.9716796875}, {"start": 812.19, "end": 812.57, "word": " suppose", "probability": 0.84716796875}, {"start": 812.57, "end": 814.03, "word": " females", "probability": 0.464599609375}, {"start": 814.03, "end": 815.99, "word": " consist", "probability": 0.74951171875}, {"start": 815.99, "end": 816.55, "word": " of", "probability": 0.95849609375}, {"start": 816.55, "end": 817.03, "word": " 70", "probability": 0.90283203125}, {"start": 817.03, "end": 817.65, "word": "%", "probability": 0.8642578125}, {"start": 817.65, "end": 818.19, "word": " of", "probability": 0.97119140625}, {"start": 818.19, "end": 818.59, "word": " IUG", "probability": 0.646728515625}, {"start": 818.59, "end": 819.17, "word": " students", "probability": 0.9619140625}, {"start": 819.17, "end": 820.35, "word": " and", "probability": 0.56787109375}, {"start": 820.35, "end": 820.95, "word": " males,", "probability": 0.93017578125}, {"start": 822.15, "end": 822.43, "word": " the", "probability": 0.861328125}, {"start": 822.43, "end": 822.75, "word": " remaining", "probability": 0.88427734375}, {"start": 822.75, "end": 823.23, "word": " percent", "probability": 0.67724609375}, {"start": 823.23, "end": 823.49, "word": " is", "probability": 0.430419921875}, {"start": 823.49, "end": 824.99, "word": " 30%.", "probability": 0.866455078125}, {"start": 824.99, "end": 827.15, "word": " And", "probability": 0.8828125}, {"start": 827.15, "end": 827.55, "word": " suppose", "probability": 0.91064453125}, {"start": 827.55, "end": 829.23, "word": " I", "probability": 0.84130859375}, {"start": 829.23, "end": 829.69, "word": " decided", "probability": 0.85791015625}, {"start": 829.69, "end": 829.93, "word": " to", "probability": 0.96435546875}, {"start": 829.93, "end": 830.25, "word": " select", "probability": 0.81103515625}, {"start": 830.25, "end": 830.45, "word": " a", "probability": 0.9775390625}, {"start": 830.45, "end": 830.69, "word": " sample", "probability": 0.92431640625}, {"start": 830.69, "end": 831.37, "word": " of", "probability": 0.97021484375}, {"start": 831.37, "end": 832.59, "word": " 100", "probability": 0.853515625}, {"start": 832.59, "end": 833.93, "word": " or", "probability": 0.89404296875}, {"start": 833.93, "end": 834.53, "word": " 1000", "probability": 0.625}, {"start": 834.53, "end": 835.79, "word": " students", "probability": 0.96044921875}, {"start": 835.79, "end": 836.17, "word": " from", "probability": 0.89404296875}, {"start": 836.17, "end": 836.61, "word": " IUG.", "probability": 0.91259765625}], "temperature": 1.0}, {"id": 33, "seek": 86748, "start": 838.62, "end": 867.48, "text": " Suddenly, I have a sample that has 650 males and 350 females. Now, this sample, which has these numbers, for sure does not represent the entire population. Because females has 70%, and I took a random sample or a sample", "tokens": [21194, 11, 286, 362, 257, 6889, 300, 575, 38566, 20776, 293, 18065, 21529, 13, 823, 11, 341, 6889, 11, 597, 575, 613, 3547, 11, 337, 988, 775, 406, 2906, 264, 2302, 4415, 13, 1436, 21529, 575, 5285, 8923, 293, 286, 1890, 257, 4974, 6889, 420, 257, 6889], "avg_logprob": -0.17610676990201077, "compression_ratio": 1.4193548387096775, "no_speech_prob": 0.0, "words": [{"start": 838.62, "end": 839.14, "word": " Suddenly,", "probability": 0.61865234375}, {"start": 839.36, "end": 839.48, "word": " I", "probability": 0.9970703125}, {"start": 839.48, "end": 839.68, "word": " have", "probability": 0.93505859375}, {"start": 839.68, "end": 839.88, "word": " a", "probability": 0.982421875}, {"start": 839.88, "end": 840.12, "word": " sample", "probability": 0.89892578125}, {"start": 840.12, "end": 840.9, "word": " that", "probability": 0.93017578125}, {"start": 840.9, "end": 841.52, "word": " has", "probability": 0.9462890625}, {"start": 841.52, "end": 844.48, "word": " 650", "probability": 0.8583984375}, {"start": 844.48, "end": 846.6, "word": " males", "probability": 0.958984375}, {"start": 846.6, "end": 847.98, "word": " and", "probability": 0.880859375}, {"start": 847.98, "end": 848.74, "word": " 350", "probability": 0.9462890625}, {"start": 848.74, "end": 849.84, "word": " females.", "probability": 0.92333984375}, {"start": 851.46, "end": 851.7, "word": " Now,", "probability": 0.8974609375}, {"start": 851.74, "end": 851.94, "word": " this", "probability": 0.9404296875}, {"start": 851.94, "end": 852.46, "word": " sample,", "probability": 0.87548828125}, {"start": 852.9, "end": 853.16, "word": " which", "probability": 0.94921875}, {"start": 853.16, "end": 853.48, "word": " has", "probability": 0.92529296875}, {"start": 853.48, "end": 854.78, "word": " these", "probability": 0.85107421875}, {"start": 854.78, "end": 855.32, "word": " numbers,", "probability": 0.90380859375}, {"start": 857.0, "end": 857.3, "word": " for", "probability": 0.9287109375}, {"start": 857.3, "end": 857.54, "word": " sure", "probability": 0.9208984375}, {"start": 857.54, "end": 857.8, "word": " does", "probability": 0.81103515625}, {"start": 857.8, "end": 858.08, "word": " not", "probability": 0.94921875}, {"start": 858.08, "end": 858.54, "word": " represent", "probability": 0.85546875}, {"start": 858.54, "end": 858.92, "word": " the", "probability": 0.92138671875}, {"start": 858.92, "end": 859.26, "word": " entire", "probability": 0.90087890625}, {"start": 859.26, "end": 859.76, "word": " population.", "probability": 0.93359375}, {"start": 860.34, "end": 860.84, "word": " Because", "probability": 0.92724609375}, {"start": 860.84, "end": 861.42, "word": " females", "probability": 0.91748046875}, {"start": 861.42, "end": 861.9, "word": " has", "probability": 0.77783203125}, {"start": 861.9, "end": 862.76, "word": " 70%,", "probability": 0.795166015625}, {"start": 862.76, "end": 863.42, "word": " and", "probability": 0.9404296875}, {"start": 863.42, "end": 863.6, "word": " I", "probability": 0.9384765625}, {"start": 863.6, "end": 864.0, "word": " took", "probability": 0.86572265625}, {"start": 864.0, "end": 865.24, "word": " a", "probability": 0.953125}, {"start": 865.24, "end": 865.48, "word": " random", "probability": 0.876953125}, {"start": 865.48, "end": 866.0, "word": " sample", "probability": 0.8828125}, {"start": 866.0, "end": 866.94, "word": " or", "probability": 0.3935546875}, {"start": 866.94, "end": 867.1, "word": " a", "probability": 0.98291015625}, {"start": 867.1, "end": 867.48, "word": " sample", "probability": 0.88916015625}], "temperature": 1.0}, {"id": 34, "seek": 89499, "start": 868.01, "end": 894.99, "text": " of size 350. So this sample is chosen without regard to the probability here. Because in this case, I should choose males with respect to their probability, which is 30%. But in this case, I just choose different proportions. Another example. Suppose", "tokens": [295, 2744, 18065, 13, 407, 341, 6889, 307, 8614, 1553, 3843, 281, 264, 8482, 510, 13, 1436, 294, 341, 1389, 11, 286, 820, 2826, 20776, 365, 3104, 281, 641, 8482, 11, 597, 307, 2217, 6856, 583, 294, 341, 1389, 11, 286, 445, 2826, 819, 32482, 13, 3996, 1365, 13, 21360], "avg_logprob": -0.21476716387505626, "compression_ratio": 1.5029940119760479, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 868.01, "end": 868.31, "word": " of", "probability": 0.316162109375}, {"start": 868.31, "end": 868.65, "word": " size", "probability": 0.86962890625}, {"start": 868.65, "end": 869.39, "word": " 350.", "probability": 0.88720703125}, {"start": 870.11, "end": 870.57, "word": " So", "probability": 0.900390625}, {"start": 870.57, "end": 870.89, "word": " this", "probability": 0.77978515625}, {"start": 870.89, "end": 871.31, "word": " sample", "probability": 0.82568359375}, {"start": 871.31, "end": 872.01, "word": " is", "probability": 0.90673828125}, {"start": 872.01, "end": 872.59, "word": " chosen", "probability": 0.91943359375}, {"start": 872.59, "end": 872.95, "word": " without", "probability": 0.89013671875}, {"start": 872.95, "end": 873.73, "word": " regard", "probability": 0.97216796875}, {"start": 873.73, "end": 874.79, "word": " to", "probability": 0.96044921875}, {"start": 874.79, "end": 875.19, "word": " the", "probability": 0.9072265625}, {"start": 875.19, "end": 875.83, "word": " probability", "probability": 0.94482421875}, {"start": 875.83, "end": 876.35, "word": " here.", "probability": 0.791015625}, {"start": 876.81, "end": 877.29, "word": " Because", "probability": 0.876953125}, {"start": 877.29, "end": 877.51, "word": " in", "probability": 0.85888671875}, {"start": 877.51, "end": 877.67, "word": " this", "probability": 0.94775390625}, {"start": 877.67, "end": 877.83, "word": " case,", "probability": 0.9140625}, {"start": 877.89, "end": 877.99, "word": " I", "probability": 0.99169921875}, {"start": 877.99, "end": 878.23, "word": " should", "probability": 0.8525390625}, {"start": 878.23, "end": 878.77, "word": " choose", "probability": 0.8310546875}, {"start": 878.77, "end": 880.37, "word": " males", "probability": 0.814453125}, {"start": 880.37, "end": 881.65, "word": " with", "probability": 0.8876953125}, {"start": 881.65, "end": 882.11, "word": " respect", "probability": 0.90869140625}, {"start": 882.11, "end": 882.33, "word": " to", "probability": 0.96728515625}, {"start": 882.33, "end": 882.63, "word": " their", "probability": 0.9111328125}, {"start": 882.63, "end": 883.11, "word": " probability,", "probability": 0.92626953125}, {"start": 883.31, "end": 883.37, "word": " which", "probability": 0.94775390625}, {"start": 883.37, "end": 883.51, "word": " is", "probability": 0.943359375}, {"start": 883.51, "end": 884.11, "word": " 30%.", "probability": 0.814697265625}, {"start": 884.11, "end": 885.29, "word": " But", "probability": 0.9306640625}, {"start": 885.29, "end": 885.69, "word": " in", "probability": 0.92578125}, {"start": 885.69, "end": 885.95, "word": " this", "probability": 0.9482421875}, {"start": 885.95, "end": 886.27, "word": " case,", "probability": 0.908203125}, {"start": 886.33, "end": 886.45, "word": " I", "probability": 0.99755859375}, {"start": 886.45, "end": 886.91, "word": " just", "probability": 0.90087890625}, {"start": 886.91, "end": 887.69, "word": " choose", "probability": 0.7783203125}, {"start": 887.69, "end": 889.33, "word": " different", "probability": 0.86767578125}, {"start": 889.33, "end": 891.15, "word": " proportions.", "probability": 0.77783203125}, {"start": 891.35, "end": 891.57, "word": " Another", "probability": 0.880859375}, {"start": 891.57, "end": 892.09, "word": " example.", "probability": 0.97265625}, {"start": 894.15, "end": 894.99, "word": " Suppose", "probability": 0.64697265625}], "temperature": 1.0}, {"id": 35, "seek": 92660, "start": 897.26, "end": 926.6, "text": " Again, I am talking about smoking. And I know that some people are smoking and I just took this sample. So I took this sample based on my knowledge. So it's without regard to their probability. Maybe suppose I am talking about political opinions about something.", "tokens": [3764, 11, 286, 669, 1417, 466, 14055, 13, 400, 286, 458, 300, 512, 561, 366, 14055, 293, 286, 445, 1890, 341, 6889, 13, 407, 286, 1890, 341, 6889, 2361, 322, 452, 3601, 13, 407, 309, 311, 1553, 3843, 281, 641, 8482, 13, 2704, 7297, 286, 669, 1417, 466, 3905, 11819, 466, 746, 13], "avg_logprob": -0.16507522651442774, "compression_ratio": 1.593939393939394, "no_speech_prob": 0.0, "words": [{"start": 897.26, "end": 897.68, "word": " Again,", "probability": 0.83154296875}, {"start": 897.76, "end": 897.86, "word": " I", "probability": 0.9814453125}, {"start": 897.86, "end": 897.96, "word": " am", "probability": 0.599609375}, {"start": 897.96, "end": 898.32, "word": " talking", "probability": 0.8525390625}, {"start": 898.32, "end": 898.84, "word": " about", "probability": 0.9091796875}, {"start": 898.84, "end": 899.92, "word": " smoking.", "probability": 0.921875}, {"start": 905.08, "end": 905.72, "word": " And", "probability": 0.84912109375}, {"start": 905.72, "end": 905.84, "word": " I", "probability": 0.94921875}, {"start": 905.84, "end": 905.98, "word": " know", "probability": 0.88671875}, {"start": 905.98, "end": 906.38, "word": " that", "probability": 0.92919921875}, {"start": 906.38, "end": 907.52, "word": " some", "probability": 0.8759765625}, {"start": 907.52, "end": 907.92, "word": " people", "probability": 0.96875}, {"start": 907.92, "end": 909.24, "word": " are", "probability": 0.9091796875}, {"start": 909.24, "end": 909.62, "word": " smoking", "probability": 0.92724609375}, {"start": 909.62, "end": 909.8, "word": " and", "probability": 0.515625}, {"start": 909.8, "end": 909.94, "word": " I", "probability": 0.421875}, {"start": 909.94, "end": 910.12, "word": " just", "probability": 0.8447265625}, {"start": 910.12, "end": 910.5, "word": " took", "probability": 0.83251953125}, {"start": 910.5, "end": 910.96, "word": " this", "probability": 0.92333984375}, {"start": 910.96, "end": 911.28, "word": " sample.", "probability": 0.8642578125}, {"start": 912.1, "end": 912.86, "word": " So", "probability": 0.9052734375}, {"start": 912.86, "end": 913.0, "word": " I", "probability": 0.67626953125}, {"start": 913.0, "end": 913.16, "word": " took", "probability": 0.91650390625}, {"start": 913.16, "end": 913.38, "word": " this", "probability": 0.9091796875}, {"start": 913.38, "end": 913.6, "word": " sample", "probability": 0.9150390625}, {"start": 913.6, "end": 913.88, "word": " based", "probability": 0.8994140625}, {"start": 913.88, "end": 914.04, "word": " on", "probability": 0.9501953125}, {"start": 914.04, "end": 914.26, "word": " my", "probability": 0.96923828125}, {"start": 914.26, "end": 914.64, "word": " knowledge.", "probability": 0.91943359375}, {"start": 916.2, "end": 916.62, "word": " So", "probability": 0.9404296875}, {"start": 916.62, "end": 916.98, "word": " it's", "probability": 0.83544921875}, {"start": 916.98, "end": 917.52, "word": " without", "probability": 0.88818359375}, {"start": 917.52, "end": 918.0, "word": " regard", "probability": 0.97216796875}, {"start": 918.0, "end": 918.24, "word": " to", "probability": 0.96630859375}, {"start": 918.24, "end": 918.6, "word": " their", "probability": 0.95849609375}, {"start": 918.6, "end": 919.92, "word": " probability.", "probability": 0.8193359375}, {"start": 921.46, "end": 921.84, "word": " Maybe", "probability": 0.919921875}, {"start": 921.84, "end": 922.26, "word": " suppose", "probability": 0.5517578125}, {"start": 922.26, "end": 922.5, "word": " I", "probability": 0.96484375}, {"start": 922.5, "end": 922.6, "word": " am", "probability": 0.806640625}, {"start": 922.6, "end": 922.86, "word": " talking", "probability": 0.84765625}, {"start": 922.86, "end": 923.34, "word": " about", "probability": 0.90234375}, {"start": 923.34, "end": 924.02, "word": " political", "probability": 0.8544921875}, {"start": 924.02, "end": 925.72, "word": " opinions", "probability": 0.91455078125}, {"start": 925.72, "end": 926.1, "word": " about", "probability": 0.6298828125}, {"start": 926.1, "end": 926.6, "word": " something.", "probability": 0.8642578125}], "temperature": 1.0}, {"id": 36, "seek": 95559, "start": 927.53, "end": 955.59, "text": " And I just took the experts of that subject. So my sample is not a probability sample. And this one has, as we mentioned, has two types. One is called convenience sampling. In this case, items are selected based only on the fact that they are easy. So I choose that sample because it's easy.", "tokens": [400, 286, 445, 1890, 264, 8572, 295, 300, 3983, 13, 407, 452, 6889, 307, 406, 257, 8482, 6889, 13, 400, 341, 472, 575, 11, 382, 321, 2835, 11, 575, 732, 3467, 13, 1485, 307, 1219, 19283, 21179, 13, 682, 341, 1389, 11, 4754, 366, 8209, 2361, 787, 322, 264, 1186, 300, 436, 366, 1858, 13, 407, 286, 2826, 300, 6889, 570, 309, 311, 1858, 13], "avg_logprob": -0.17708333378488367, "compression_ratio": 1.5287958115183247, "no_speech_prob": 0.0, "words": [{"start": 927.53, "end": 927.91, "word": " And", "probability": 0.44287109375}, {"start": 927.91, "end": 928.11, "word": " I", "probability": 0.9609375}, {"start": 928.11, "end": 928.33, "word": " just", "probability": 0.8798828125}, {"start": 928.33, "end": 928.67, "word": " took", "probability": 0.5869140625}, {"start": 928.67, "end": 929.79, "word": " the", "probability": 0.8681640625}, {"start": 929.79, "end": 930.67, "word": " experts", "probability": 0.8544921875}, {"start": 930.67, "end": 931.89, "word": " of", "probability": 0.9404296875}, {"start": 931.89, "end": 932.31, "word": " that", "probability": 0.90576171875}, {"start": 932.31, "end": 934.05, "word": " subject.", "probability": 0.96044921875}, {"start": 934.65, "end": 935.29, "word": " So", "probability": 0.94140625}, {"start": 935.29, "end": 935.81, "word": " my", "probability": 0.7421875}, {"start": 935.81, "end": 936.09, "word": " sample", "probability": 0.82080078125}, {"start": 936.09, "end": 936.33, "word": " is", "probability": 0.9423828125}, {"start": 936.33, "end": 936.65, "word": " not", "probability": 0.94775390625}, {"start": 936.65, "end": 937.81, "word": " a", "probability": 0.96875}, {"start": 937.81, "end": 938.13, "word": " probability", "probability": 0.9443359375}, {"start": 938.13, "end": 938.61, "word": " sample.", "probability": 0.84228515625}, {"start": 939.55, "end": 939.73, "word": " And", "probability": 0.818359375}, {"start": 939.73, "end": 940.29, "word": " this", "probability": 0.92822265625}, {"start": 940.29, "end": 940.49, "word": " one", "probability": 0.9365234375}, {"start": 940.49, "end": 940.75, "word": " has,", "probability": 0.83984375}, {"start": 941.51, "end": 941.89, "word": " as", "probability": 0.8544921875}, {"start": 941.89, "end": 942.07, "word": " we", "probability": 0.412841796875}, {"start": 942.07, "end": 942.37, "word": " mentioned,", "probability": 0.80810546875}, {"start": 942.45, "end": 942.63, "word": " has", "probability": 0.5556640625}, {"start": 942.63, "end": 942.85, "word": " two", "probability": 0.90234375}, {"start": 942.85, "end": 943.35, "word": " types.", "probability": 0.8232421875}, {"start": 943.61, "end": 943.83, "word": " One", "probability": 0.9150390625}, {"start": 943.83, "end": 943.97, "word": " is", "probability": 0.93115234375}, {"start": 943.97, "end": 944.23, "word": " called", "probability": 0.90673828125}, {"start": 944.23, "end": 944.77, "word": " convenience", "probability": 0.62548828125}, {"start": 944.77, "end": 945.25, "word": " sampling.", "probability": 0.923828125}, {"start": 947.13, "end": 947.77, "word": " In", "probability": 0.96923828125}, {"start": 947.77, "end": 948.01, "word": " this", "probability": 0.94287109375}, {"start": 948.01, "end": 948.31, "word": " case,", "probability": 0.91796875}, {"start": 948.37, "end": 948.77, "word": " items", "probability": 0.8388671875}, {"start": 948.77, "end": 949.01, "word": " are", "probability": 0.93798828125}, {"start": 949.01, "end": 949.51, "word": " selected", "probability": 0.8779296875}, {"start": 949.51, "end": 949.95, "word": " based", "probability": 0.9228515625}, {"start": 949.95, "end": 950.29, "word": " only", "probability": 0.89208984375}, {"start": 950.29, "end": 950.45, "word": " on", "probability": 0.94921875}, {"start": 950.45, "end": 950.61, "word": " the", "probability": 0.919921875}, {"start": 950.61, "end": 950.87, "word": " fact", "probability": 0.90625}, {"start": 950.87, "end": 951.17, "word": " that", "probability": 0.51220703125}, {"start": 951.17, "end": 951.49, "word": " they", "probability": 0.83056640625}, {"start": 951.49, "end": 951.71, "word": " are", "probability": 0.9130859375}, {"start": 951.71, "end": 952.09, "word": " easy.", "probability": 0.9111328125}, {"start": 953.11, "end": 953.37, "word": " So", "probability": 0.95751953125}, {"start": 953.37, "end": 953.57, "word": " I", "probability": 0.9140625}, {"start": 953.57, "end": 953.91, "word": " choose", "probability": 0.6650390625}, {"start": 953.91, "end": 954.19, "word": " that", "probability": 0.91455078125}, {"start": 954.19, "end": 954.51, "word": " sample", "probability": 0.89501953125}, {"start": 954.51, "end": 954.91, "word": " because", "probability": 0.86376953125}, {"start": 954.91, "end": 955.23, "word": " it's", "probability": 0.942138671875}, {"start": 955.23, "end": 955.59, "word": " easy.", "probability": 0.9140625}], "temperature": 1.0}, {"id": 37, "seek": 97683, "start": 957.09, "end": 976.83, "text": " Inexpensive, inexpensive, or convenient to sample. If I choose my sample because it is easy or inexpensive, I think it doesn't make any sense, because easy.", "tokens": [682, 27409, 11, 28382, 11, 420, 10851, 281, 6889, 13, 759, 286, 2826, 452, 6889, 570, 309, 307, 1858, 420, 28382, 11, 286, 519, 309, 1177, 380, 652, 604, 2020, 11, 570, 1858, 13], "avg_logprob": -0.28080357142857143, "compression_ratio": 1.4537037037037037, "no_speech_prob": 0.0, "words": [{"start": 957.09, "end": 957.69, "word": " Inexpensive,", "probability": 0.72998046875}, {"start": 962.19, "end": 962.19, "word": " inexpensive,", "probability": 0.45068359375}, {"start": 964.77, "end": 967.09, "word": " or", "probability": 0.9033203125}, {"start": 967.09, "end": 967.69, "word": " convenient", "probability": 0.9072265625}, {"start": 967.69, "end": 967.97, "word": " to", "probability": 0.96533203125}, {"start": 967.97, "end": 968.23, "word": " sample.", "probability": 0.85791015625}, {"start": 968.75, "end": 969.35, "word": " If", "probability": 0.9619140625}, {"start": 969.35, "end": 969.53, "word": " I", "probability": 0.9921875}, {"start": 969.53, "end": 969.79, "word": " choose", "probability": 0.7890625}, {"start": 969.79, "end": 970.03, "word": " my", "probability": 0.896484375}, {"start": 970.03, "end": 970.33, "word": " sample", "probability": 0.8701171875}, {"start": 970.33, "end": 970.69, "word": " because", "probability": 0.87353515625}, {"start": 970.69, "end": 970.91, "word": " it", "probability": 0.94921875}, {"start": 970.91, "end": 971.25, "word": " is", "probability": 0.74755859375}, {"start": 971.25, "end": 971.57, "word": " easy", "probability": 0.8984375}, {"start": 971.57, "end": 971.77, "word": " or", "probability": 0.93115234375}, {"start": 971.77, "end": 972.85, "word": " inexpensive,", "probability": 0.410888671875}, {"start": 973.31, "end": 973.43, "word": " I", "probability": 0.966796875}, {"start": 973.43, "end": 973.67, "word": " think", "probability": 0.9189453125}, {"start": 973.67, "end": 973.93, "word": " it", "probability": 0.85888671875}, {"start": 973.93, "end": 974.21, "word": " doesn't", "probability": 0.8828125}, {"start": 974.21, "end": 974.41, "word": " make", "probability": 0.93701171875}, {"start": 974.41, "end": 974.63, "word": " any", "probability": 0.8857421875}, {"start": 974.63, "end": 974.91, "word": " sense,", "probability": 0.82958984375}, {"start": 976.17, "end": 976.47, "word": " because", "probability": 0.869140625}, {"start": 976.47, "end": 976.83, "word": " easy.", "probability": 0.609375}], "temperature": 1.0}, {"id": 38, "seek": 100150, "start": 978.16, "end": 1001.5, "text": " is not a reason to select that sample. Inexpensive I think is also is not that big reason. But if you select a sample because these items are convenient to assemble, it makes sense. So convenient sample can be chosen based on easy, inexpensive or convenient to assemble. On the other hand,", "tokens": [307, 406, 257, 1778, 281, 3048, 300, 6889, 13, 682, 27409, 286, 519, 307, 611, 307, 406, 300, 955, 1778, 13, 583, 498, 291, 3048, 257, 6889, 570, 613, 4754, 366, 10851, 281, 22364, 11, 309, 1669, 2020, 13, 407, 10851, 6889, 393, 312, 8614, 2361, 322, 1858, 11, 28382, 420, 10851, 281, 22364, 13, 1282, 264, 661, 1011, 11], "avg_logprob": -0.19774590359359492, "compression_ratio": 1.695906432748538, "no_speech_prob": 0.0, "words": [{"start": 978.16, "end": 978.48, "word": " is", "probability": 0.281494140625}, {"start": 978.48, "end": 978.64, "word": " not", "probability": 0.94677734375}, {"start": 978.64, "end": 978.82, "word": " a", "probability": 0.92822265625}, {"start": 978.82, "end": 979.12, "word": " reason", "probability": 0.9765625}, {"start": 979.12, "end": 980.42, "word": " to", "probability": 0.9658203125}, {"start": 980.42, "end": 980.8, "word": " select", "probability": 0.8203125}, {"start": 980.8, "end": 981.04, "word": " that", "probability": 0.6123046875}, {"start": 981.04, "end": 981.38, "word": " sample.", "probability": 0.88916015625}, {"start": 982.68, "end": 983.32, "word": " Inexpensive", "probability": 0.892822265625}, {"start": 983.32, "end": 983.78, "word": " I", "probability": 0.441650390625}, {"start": 983.78, "end": 983.98, "word": " think", "probability": 0.92431640625}, {"start": 983.98, "end": 984.14, "word": " is", "probability": 0.765625}, {"start": 984.14, "end": 984.46, "word": " also", "probability": 0.86181640625}, {"start": 984.46, "end": 984.7, "word": " is", "probability": 0.57763671875}, {"start": 984.7, "end": 984.86, "word": " not", "probability": 0.9482421875}, {"start": 984.86, "end": 985.1, "word": " that", "probability": 0.916015625}, {"start": 985.1, "end": 985.76, "word": " big", "probability": 0.9130859375}, {"start": 985.76, "end": 986.1, "word": " reason.", "probability": 0.8603515625}, {"start": 986.38, "end": 986.68, "word": " But", "probability": 0.943359375}, {"start": 986.68, "end": 986.94, "word": " if", "probability": 0.90087890625}, {"start": 986.94, "end": 987.08, "word": " you", "probability": 0.966796875}, {"start": 987.08, "end": 987.4, "word": " select", "probability": 0.85498046875}, {"start": 987.4, "end": 987.58, "word": " a", "probability": 0.96533203125}, {"start": 987.58, "end": 987.82, "word": " sample", "probability": 0.86083984375}, {"start": 987.82, "end": 988.26, "word": " because", "probability": 0.826171875}, {"start": 988.26, "end": 989.04, "word": " these", "probability": 0.8486328125}, {"start": 989.04, "end": 989.54, "word": " items", "probability": 0.81005859375}, {"start": 989.54, "end": 989.84, "word": " are", "probability": 0.94287109375}, {"start": 989.84, "end": 990.34, "word": " convenient", "probability": 0.9404296875}, {"start": 990.34, "end": 990.56, "word": " to", "probability": 0.966796875}, {"start": 990.56, "end": 990.96, "word": " assemble,", "probability": 0.6962890625}, {"start": 991.22, "end": 991.34, "word": " it", "probability": 0.9423828125}, {"start": 991.34, "end": 991.56, "word": " makes", "probability": 0.8154296875}, {"start": 991.56, "end": 991.88, "word": " sense.", "probability": 0.81787109375}, {"start": 992.64, "end": 992.8, "word": " So", "probability": 0.93896484375}, {"start": 992.8, "end": 993.24, "word": " convenient", "probability": 0.61181640625}, {"start": 993.24, "end": 993.76, "word": " sample", "probability": 0.78076171875}, {"start": 993.76, "end": 994.8, "word": " can", "probability": 0.9287109375}, {"start": 994.8, "end": 995.0, "word": " be", "probability": 0.96044921875}, {"start": 995.0, "end": 995.38, "word": " chosen", "probability": 0.9677734375}, {"start": 995.38, "end": 995.76, "word": " based", "probability": 0.9248046875}, {"start": 995.76, "end": 996.02, "word": " on", "probability": 0.94775390625}, {"start": 996.02, "end": 996.42, "word": " easy,", "probability": 0.81640625}, {"start": 997.02, "end": 997.64, "word": " inexpensive", "probability": 0.806640625}, {"start": 997.64, "end": 998.28, "word": " or", "probability": 0.65625}, {"start": 998.28, "end": 998.76, "word": " convenient", "probability": 0.93359375}, {"start": 998.76, "end": 999.0, "word": " to", "probability": 0.6669921875}, {"start": 999.0, "end": 999.24, "word": " assemble.", "probability": 0.623046875}, {"start": 1000.42, "end": 1000.74, "word": " On", "probability": 0.9501953125}, {"start": 1000.74, "end": 1000.9, "word": " the", "probability": 0.923828125}, {"start": 1000.9, "end": 1001.08, "word": " other", "probability": 0.890625}, {"start": 1001.08, "end": 1001.5, "word": " hand,", "probability": 0.91455078125}], "temperature": 1.0}, {"id": 39, "seek": 103082, "start": 1001.92, "end": 1030.82, "text": " In judgment sample, you get the opinions of pre-selected experts in the subject matter. For example, suppose we are talking about the causes of certain disease. Suppose we are talking about cancer. If I know the expert for this type of disease, that means you have judgment sample because you decided", "tokens": [682, 12216, 6889, 11, 291, 483, 264, 11819, 295, 659, 12, 405, 1809, 292, 8572, 294, 264, 3983, 1871, 13, 1171, 1365, 11, 7297, 321, 366, 1417, 466, 264, 7700, 295, 1629, 4752, 13, 21360, 321, 366, 1417, 466, 5592, 13, 759, 286, 458, 264, 5844, 337, 341, 2010, 295, 4752, 11, 300, 1355, 291, 362, 12216, 6889, 570, 291, 3047], "avg_logprob": -0.17729335350375022, "compression_ratio": 1.681564245810056, "no_speech_prob": 0.0, "words": [{"start": 1001.92, "end": 1002.28, "word": " In", "probability": 0.7421875}, {"start": 1002.28, "end": 1002.68, "word": " judgment", "probability": 0.426513671875}, {"start": 1002.68, "end": 1003.14, "word": " sample,", "probability": 0.69482421875}, {"start": 1003.4, "end": 1003.58, "word": " you", "probability": 0.9599609375}, {"start": 1003.58, "end": 1003.88, "word": " get", "probability": 0.919921875}, {"start": 1003.88, "end": 1004.06, "word": " the", "probability": 0.74560546875}, {"start": 1004.06, "end": 1004.58, "word": " opinions", "probability": 0.80224609375}, {"start": 1004.58, "end": 1004.86, "word": " of", "probability": 0.96435546875}, {"start": 1004.86, "end": 1005.14, "word": " pre", "probability": 0.88232421875}, {"start": 1005.14, "end": 1005.66, "word": "-selected", "probability": 0.8973388671875}, {"start": 1005.66, "end": 1006.28, "word": " experts", "probability": 0.916015625}, {"start": 1006.28, "end": 1007.48, "word": " in", "probability": 0.89306640625}, {"start": 1007.48, "end": 1007.64, "word": " the", "probability": 0.916015625}, {"start": 1007.64, "end": 1008.0, "word": " subject", "probability": 0.97021484375}, {"start": 1008.0, "end": 1008.34, "word": " matter.", "probability": 0.9091796875}, {"start": 1009.12, "end": 1009.36, "word": " For", "probability": 0.9423828125}, {"start": 1009.36, "end": 1009.62, "word": " example,", "probability": 0.97021484375}, {"start": 1009.72, "end": 1010.02, "word": " suppose", "probability": 0.8193359375}, {"start": 1010.02, "end": 1010.14, "word": " we", "probability": 0.91259765625}, {"start": 1010.14, "end": 1010.24, "word": " are", "probability": 0.87158203125}, {"start": 1010.24, "end": 1010.58, "word": " talking", "probability": 0.8515625}, {"start": 1010.58, "end": 1011.06, "word": " about", "probability": 0.912109375}, {"start": 1011.06, "end": 1012.3, "word": " the", "probability": 0.88818359375}, {"start": 1012.3, "end": 1012.7, "word": " causes", "probability": 0.88720703125}, {"start": 1012.7, "end": 1013.48, "word": " of", "probability": 0.96728515625}, {"start": 1013.48, "end": 1014.56, "word": " certain", "probability": 0.62939453125}, {"start": 1014.56, "end": 1015.1, "word": " disease.", "probability": 0.64794921875}, {"start": 1015.3, "end": 1015.58, "word": " Suppose", "probability": 0.79052734375}, {"start": 1015.58, "end": 1015.74, "word": " we", "probability": 0.88916015625}, {"start": 1015.74, "end": 1015.84, "word": " are", "probability": 0.8369140625}, {"start": 1015.84, "end": 1016.06, "word": " talking", "probability": 0.86328125}, {"start": 1016.06, "end": 1016.56, "word": " about", "probability": 0.89794921875}, {"start": 1016.56, "end": 1017.76, "word": " cancer.", "probability": 0.92626953125}, {"start": 1021.72, "end": 1022.4, "word": " If", "probability": 0.9609375}, {"start": 1022.4, "end": 1022.58, "word": " I", "probability": 0.9931640625}, {"start": 1022.58, "end": 1022.72, "word": " know", "probability": 0.90283203125}, {"start": 1022.72, "end": 1022.92, "word": " the", "probability": 0.919921875}, {"start": 1022.92, "end": 1023.48, "word": " expert", "probability": 0.86669921875}, {"start": 1023.48, "end": 1025.7, "word": " for", "probability": 0.9111328125}, {"start": 1025.7, "end": 1025.96, "word": " this", "probability": 0.94775390625}, {"start": 1025.96, "end": 1026.24, "word": " type", "probability": 0.9755859375}, {"start": 1026.24, "end": 1026.38, "word": " of", "probability": 0.9677734375}, {"start": 1026.38, "end": 1026.82, "word": " disease,", "probability": 0.91943359375}, {"start": 1027.62, "end": 1028.1, "word": " that", "probability": 0.94140625}, {"start": 1028.1, "end": 1028.5, "word": " means", "probability": 0.93359375}, {"start": 1028.5, "end": 1028.7, "word": " you", "probability": 0.94921875}, {"start": 1028.7, "end": 1029.04, "word": " have", "probability": 0.94775390625}, {"start": 1029.04, "end": 1029.4, "word": " judgment", "probability": 0.66357421875}, {"start": 1029.4, "end": 1029.82, "word": " sample", "probability": 0.8427734375}, {"start": 1029.82, "end": 1030.14, "word": " because", "probability": 0.53857421875}, {"start": 1030.14, "end": 1030.34, "word": " you", "probability": 0.955078125}, {"start": 1030.34, "end": 1030.82, "word": " decided", "probability": 0.68505859375}], "temperature": 1.0}, {"id": 40, "seek": 105480, "start": 1032.24, "end": 1054.8, "text": " Before you select a sample that your sample should contain only the expert in cancer disease. So that's the judgment sampling. So in this case, I didn't take all the", "tokens": [4546, 291, 3048, 257, 6889, 300, 428, 6889, 820, 5304, 787, 264, 5844, 294, 5592, 4752, 13, 407, 300, 311, 264, 12216, 21179, 13, 407, 294, 341, 1389, 11, 286, 994, 380, 747, 439, 264], "avg_logprob": -0.23784721891085306, "compression_ratio": 1.360655737704918, "no_speech_prob": 0.0, "words": [{"start": 1032.24, "end": 1032.7, "word": " Before", "probability": 0.5888671875}, {"start": 1032.7, "end": 1032.96, "word": " you", "probability": 0.95458984375}, {"start": 1032.96, "end": 1033.28, "word": " select", "probability": 0.83154296875}, {"start": 1033.28, "end": 1033.48, "word": " a", "probability": 0.91943359375}, {"start": 1033.48, "end": 1033.66, "word": " sample", "probability": 0.8984375}, {"start": 1033.66, "end": 1034.02, "word": " that", "probability": 0.1968994140625}, {"start": 1034.02, "end": 1034.72, "word": " your", "probability": 0.72509765625}, {"start": 1034.72, "end": 1035.32, "word": " sample", "probability": 0.89013671875}, {"start": 1035.32, "end": 1037.82, "word": " should", "probability": 0.96875}, {"start": 1037.82, "end": 1039.82, "word": " contain", "probability": 0.92236328125}, {"start": 1039.82, "end": 1041.04, "word": " only", "probability": 0.91943359375}, {"start": 1041.04, "end": 1043.58, "word": " the", "probability": 0.90087890625}, {"start": 1043.58, "end": 1044.14, "word": " expert", "probability": 0.73828125}, {"start": 1044.14, "end": 1047.5, "word": " in", "probability": 0.9228515625}, {"start": 1047.5, "end": 1047.92, "word": " cancer", "probability": 0.947265625}, {"start": 1047.92, "end": 1048.42, "word": " disease.", "probability": 0.8779296875}, {"start": 1048.96, "end": 1049.44, "word": " So", "probability": 0.93359375}, {"start": 1049.44, "end": 1049.76, "word": " that's", "probability": 0.8544921875}, {"start": 1049.76, "end": 1049.92, "word": " the", "probability": 0.916015625}, {"start": 1049.92, "end": 1050.32, "word": " judgment", "probability": 0.6748046875}, {"start": 1050.32, "end": 1051.36, "word": " sampling.", "probability": 0.31103515625}, {"start": 1052.26, "end": 1052.56, "word": " So", "probability": 0.9345703125}, {"start": 1052.56, "end": 1052.7, "word": " in", "probability": 0.77685546875}, {"start": 1052.7, "end": 1052.82, "word": " this", "probability": 0.92529296875}, {"start": 1052.82, "end": 1053.02, "word": " case,", "probability": 0.9130859375}, {"start": 1053.14, "end": 1053.3, "word": " I", "probability": 0.99755859375}, {"start": 1053.3, "end": 1053.64, "word": " didn't", "probability": 0.963623046875}, {"start": 1053.64, "end": 1054.08, "word": " take", "probability": 0.89306640625}, {"start": 1054.08, "end": 1054.48, "word": " all", "probability": 0.953125}, {"start": 1054.48, "end": 1054.8, "word": " the", "probability": 0.85693359375}], "temperature": 1.0}, {"id": 41, "seek": 108074, "start": 1056.02, "end": 1080.74, "text": " doctors in this case, I just taught the expert in cancer disease. So that's called non-probability samples. You have to make sense to distinguish between convenience sampling and judgment sample. So for judgment, you select a sample based on the prior information you have about the subject matter.", "tokens": [8778, 294, 341, 1389, 11, 286, 445, 5928, 264, 5844, 294, 5592, 4752, 13, 407, 300, 311, 1219, 2107, 12, 41990, 2310, 10938, 13, 509, 362, 281, 652, 2020, 281, 20206, 1296, 19283, 21179, 293, 12216, 6889, 13, 407, 337, 12216, 11, 291, 3048, 257, 6889, 2361, 322, 264, 4059, 1589, 291, 362, 466, 264, 3983, 1871, 13], "avg_logprob": -0.20153601290815967, "compression_ratio": 1.549222797927461, "no_speech_prob": 0.0, "words": [{"start": 1056.02, "end": 1056.62, "word": " doctors", "probability": 0.3916015625}, {"start": 1056.62, "end": 1056.8, "word": " in", "probability": 0.880859375}, {"start": 1056.8, "end": 1057.02, "word": " this", "probability": 0.9443359375}, {"start": 1057.02, "end": 1057.34, "word": " case,", "probability": 0.9140625}, {"start": 1057.66, "end": 1057.82, "word": " I", "probability": 0.923828125}, {"start": 1057.82, "end": 1058.04, "word": " just", "probability": 0.8974609375}, {"start": 1058.04, "end": 1058.38, "word": " taught", "probability": 0.62353515625}, {"start": 1058.38, "end": 1059.26, "word": " the", "probability": 0.8203125}, {"start": 1059.26, "end": 1059.66, "word": " expert", "probability": 0.75439453125}, {"start": 1059.66, "end": 1060.64, "word": " in", "probability": 0.91455078125}, {"start": 1060.64, "end": 1061.34, "word": " cancer", "probability": 0.9140625}, {"start": 1061.34, "end": 1061.78, "word": " disease.", "probability": 0.72412109375}, {"start": 1062.28, "end": 1062.5, "word": " So", "probability": 0.8916015625}, {"start": 1062.5, "end": 1062.94, "word": " that's", "probability": 0.837158203125}, {"start": 1062.94, "end": 1063.32, "word": " called", "probability": 0.8955078125}, {"start": 1063.32, "end": 1063.58, "word": " non", "probability": 0.8095703125}, {"start": 1063.58, "end": 1064.18, "word": "-probability", "probability": 0.7853190104166666}, {"start": 1064.18, "end": 1065.16, "word": " samples.", "probability": 0.379150390625}, {"start": 1065.34, "end": 1065.4, "word": " You", "probability": 0.485107421875}, {"start": 1065.4, "end": 1065.6, "word": " have", "probability": 0.94580078125}, {"start": 1065.6, "end": 1066.18, "word": " to", "probability": 0.9697265625}, {"start": 1066.18, "end": 1066.54, "word": " make", "probability": 0.82666015625}, {"start": 1066.54, "end": 1066.82, "word": " sense", "probability": 0.8408203125}, {"start": 1066.82, "end": 1067.26, "word": " to", "probability": 0.77001953125}, {"start": 1067.26, "end": 1068.12, "word": " distinguish", "probability": 0.908203125}, {"start": 1068.12, "end": 1068.82, "word": " between", "probability": 0.88916015625}, {"start": 1068.82, "end": 1071.04, "word": " convenience", "probability": 0.8037109375}, {"start": 1071.04, "end": 1071.44, "word": " sampling", "probability": 0.720703125}, {"start": 1071.44, "end": 1072.58, "word": " and", "probability": 0.89453125}, {"start": 1072.58, "end": 1073.68, "word": " judgment", "probability": 0.68359375}, {"start": 1073.68, "end": 1074.08, "word": " sample.", "probability": 0.416748046875}, {"start": 1074.4, "end": 1074.52, "word": " So", "probability": 0.9033203125}, {"start": 1074.52, "end": 1074.7, "word": " for", "probability": 0.88037109375}, {"start": 1074.7, "end": 1075.02, "word": " judgment,", "probability": 0.90234375}, {"start": 1075.62, "end": 1075.92, "word": " you", "probability": 0.95947265625}, {"start": 1075.92, "end": 1076.44, "word": " select", "probability": 0.83837890625}, {"start": 1076.44, "end": 1076.66, "word": " a", "probability": 0.97607421875}, {"start": 1076.66, "end": 1076.96, "word": " sample", "probability": 0.8935546875}, {"start": 1076.96, "end": 1077.34, "word": " based", "probability": 0.90478515625}, {"start": 1077.34, "end": 1077.58, "word": " on", "probability": 0.9501953125}, {"start": 1077.58, "end": 1077.74, "word": " the", "probability": 0.91845703125}, {"start": 1077.74, "end": 1077.98, "word": " prior", "probability": 0.9541015625}, {"start": 1077.98, "end": 1078.52, "word": " information", "probability": 0.8408203125}, {"start": 1078.52, "end": 1078.74, "word": " you", "probability": 0.92236328125}, {"start": 1078.74, "end": 1078.94, "word": " have", "probability": 0.93701171875}, {"start": 1078.94, "end": 1079.4, "word": " about", "probability": 0.9072265625}, {"start": 1079.4, "end": 1080.08, "word": " the", "probability": 0.91552734375}, {"start": 1080.08, "end": 1080.44, "word": " subject", "probability": 0.96923828125}, {"start": 1080.44, "end": 1080.74, "word": " matter.", "probability": 0.90234375}], "temperature": 1.0}, {"id": 42, "seek": 110981, "start": 1082.87, "end": 1109.81, "text": " Suppose I am talking about something related to psychology, so I have to take the expert in psychology. Suppose I am talking about expert in sports, so I have to take a sample from that segment and so on. But the convenient sample means that you select a sample maybe that is easy for you, or less expensive, or that sample is convenient.", "tokens": [21360, 286, 669, 1417, 466, 746, 4077, 281, 15105, 11, 370, 286, 362, 281, 747, 264, 5844, 294, 15105, 13, 21360, 286, 669, 1417, 466, 5844, 294, 6573, 11, 370, 286, 362, 281, 747, 257, 6889, 490, 300, 9469, 293, 370, 322, 13, 583, 264, 10851, 6889, 1355, 300, 291, 3048, 257, 6889, 1310, 300, 307, 1858, 337, 291, 11, 420, 1570, 5124, 11, 420, 300, 6889, 307, 10851, 13], "avg_logprob": -0.15613996856649157, "compression_ratio": 1.8524590163934427, "no_speech_prob": 0.0, "words": [{"start": 1082.87, "end": 1083.29, "word": " Suppose", "probability": 0.61474609375}, {"start": 1083.29, "end": 1083.43, "word": " I", "probability": 0.94482421875}, {"start": 1083.43, "end": 1083.53, "word": " am", "probability": 0.68701171875}, {"start": 1083.53, "end": 1083.83, "word": " talking", "probability": 0.8427734375}, {"start": 1083.83, "end": 1084.23, "word": " about", "probability": 0.90673828125}, {"start": 1084.23, "end": 1084.65, "word": " something", "probability": 0.88232421875}, {"start": 1084.65, "end": 1085.19, "word": " related", "probability": 0.95068359375}, {"start": 1085.19, "end": 1085.41, "word": " to", "probability": 0.974609375}, {"start": 1085.41, "end": 1085.99, "word": " psychology,", "probability": 0.77734375}, {"start": 1087.05, "end": 1087.17, "word": " so", "probability": 0.90234375}, {"start": 1087.17, "end": 1087.31, "word": " I", "probability": 0.9794921875}, {"start": 1087.31, "end": 1087.45, "word": " have", "probability": 0.9423828125}, {"start": 1087.45, "end": 1087.57, "word": " to", "probability": 0.96728515625}, {"start": 1087.57, "end": 1087.85, "word": " take", "probability": 0.8349609375}, {"start": 1087.85, "end": 1088.05, "word": " the", "probability": 0.86181640625}, {"start": 1088.05, "end": 1088.45, "word": " expert", "probability": 0.80517578125}, {"start": 1088.45, "end": 1088.83, "word": " in", "probability": 0.94091796875}, {"start": 1088.83, "end": 1089.83, "word": " psychology.", "probability": 0.85986328125}, {"start": 1090.55, "end": 1090.99, "word": " Suppose", "probability": 0.82861328125}, {"start": 1090.99, "end": 1091.17, "word": " I", "probability": 0.98583984375}, {"start": 1091.17, "end": 1091.29, "word": " am", "probability": 0.8173828125}, {"start": 1091.29, "end": 1091.59, "word": " talking", "probability": 0.85498046875}, {"start": 1091.59, "end": 1092.11, "word": " about", "probability": 0.90673828125}, {"start": 1092.11, "end": 1092.61, "word": " expert", "probability": 0.43408203125}, {"start": 1092.61, "end": 1092.91, "word": " in", "probability": 0.9365234375}, {"start": 1092.91, "end": 1093.37, "word": " sports,", "probability": 0.7861328125}, {"start": 1093.71, "end": 1093.83, "word": " so", "probability": 0.90673828125}, {"start": 1093.83, "end": 1093.95, "word": " I", "probability": 0.9951171875}, {"start": 1093.95, "end": 1094.15, "word": " have", "probability": 0.935546875}, {"start": 1094.15, "end": 1094.75, "word": " to", "probability": 0.9619140625}, {"start": 1094.75, "end": 1095.07, "word": " take", "probability": 0.8818359375}, {"start": 1095.07, "end": 1095.93, "word": " a", "probability": 0.97021484375}, {"start": 1095.93, "end": 1096.17, "word": " sample", "probability": 0.921875}, {"start": 1096.17, "end": 1096.53, "word": " from", "probability": 0.88916015625}, {"start": 1096.53, "end": 1097.05, "word": " that", "probability": 0.9296875}, {"start": 1097.05, "end": 1097.93, "word": " segment", "probability": 0.9404296875}, {"start": 1097.93, "end": 1098.21, "word": " and", "probability": 0.794921875}, {"start": 1098.21, "end": 1098.35, "word": " so", "probability": 0.95751953125}, {"start": 1098.35, "end": 1098.75, "word": " on.", "probability": 0.94775390625}, {"start": 1099.45, "end": 1099.67, "word": " But", "probability": 0.93017578125}, {"start": 1099.67, "end": 1099.81, "word": " the", "probability": 0.82373046875}, {"start": 1099.81, "end": 1100.17, "word": " convenient", "probability": 0.400634765625}, {"start": 1100.17, "end": 1100.57, "word": " sample", "probability": 0.8935546875}, {"start": 1100.57, "end": 1100.97, "word": " means", "probability": 0.49951171875}, {"start": 1100.97, "end": 1101.31, "word": " that", "probability": 0.87451171875}, {"start": 1101.31, "end": 1101.93, "word": " you", "probability": 0.931640625}, {"start": 1101.93, "end": 1102.33, "word": " select", "probability": 0.8583984375}, {"start": 1102.33, "end": 1102.57, "word": " a", "probability": 0.9921875}, {"start": 1102.57, "end": 1102.97, "word": " sample", "probability": 0.8955078125}, {"start": 1102.97, "end": 1103.51, "word": " maybe", "probability": 0.60400390625}, {"start": 1103.51, "end": 1103.83, "word": " that", "probability": 0.939453125}, {"start": 1103.83, "end": 1104.11, "word": " is", "probability": 0.9296875}, {"start": 1104.11, "end": 1104.43, "word": " easy", "probability": 0.8798828125}, {"start": 1104.43, "end": 1104.69, "word": " for", "probability": 0.94921875}, {"start": 1104.69, "end": 1104.97, "word": " you,", "probability": 0.962890625}, {"start": 1105.97, "end": 1106.37, "word": " or", "probability": 0.96826171875}, {"start": 1106.37, "end": 1106.95, "word": " less", "probability": 0.9453125}, {"start": 1106.95, "end": 1107.51, "word": " expensive,", "probability": 0.9638671875}, {"start": 1108.43, "end": 1108.71, "word": " or", "probability": 0.9619140625}, {"start": 1108.71, "end": 1108.97, "word": " that", "probability": 0.92724609375}, {"start": 1108.97, "end": 1109.23, "word": " sample", "probability": 0.90478515625}, {"start": 1109.23, "end": 1109.43, "word": " is", "probability": 0.9111328125}, {"start": 1109.43, "end": 1109.81, "word": " convenient.", "probability": 0.94873046875}], "temperature": 1.0}, {"id": 43, "seek": 113859, "start": 1111.1, "end": 1138.6, "text": " For this reason, it's called non-probability sample because we choose that sample without regard to their probability of occurrence. The other type is called probability samples. In this case, items are chosen on the basis of non-probabilities. For example, here, if males", "tokens": [1171, 341, 1778, 11, 309, 311, 1219, 2107, 12, 41990, 2310, 6889, 570, 321, 2826, 300, 6889, 1553, 3843, 281, 641, 8482, 295, 36122, 13, 440, 661, 2010, 307, 1219, 8482, 10938, 13, 682, 341, 1389, 11, 4754, 366, 8614, 322, 264, 5143, 295, 2107, 12, 41990, 6167, 13, 1171, 1365, 11, 510, 11, 498, 20776], "avg_logprob": -0.15200109858261912, "compression_ratio": 1.644578313253012, "no_speech_prob": 0.0, "words": [{"start": 1111.1, "end": 1111.36, "word": " For", "probability": 0.73876953125}, {"start": 1111.36, "end": 1111.6, "word": " this", "probability": 0.943359375}, {"start": 1111.6, "end": 1111.94, "word": " reason,", "probability": 0.97509765625}, {"start": 1112.04, "end": 1112.3, "word": " it's", "probability": 0.936767578125}, {"start": 1112.3, "end": 1112.6, "word": " called", "probability": 0.9013671875}, {"start": 1112.6, "end": 1112.98, "word": " non", "probability": 0.8076171875}, {"start": 1112.98, "end": 1113.72, "word": "-probability", "probability": 0.8693033854166666}, {"start": 1113.72, "end": 1114.08, "word": " sample", "probability": 0.69970703125}, {"start": 1114.08, "end": 1114.54, "word": " because", "probability": 0.5419921875}, {"start": 1114.54, "end": 1115.18, "word": " we", "probability": 0.9228515625}, {"start": 1115.18, "end": 1115.62, "word": " choose", "probability": 0.72412109375}, {"start": 1115.62, "end": 1115.9, "word": " that", "probability": 0.93017578125}, {"start": 1115.9, "end": 1116.3, "word": " sample", "probability": 0.86474609375}, {"start": 1116.3, "end": 1116.58, "word": " without", "probability": 0.888671875}, {"start": 1116.58, "end": 1117.08, "word": " regard", "probability": 0.98046875}, {"start": 1117.08, "end": 1117.3, "word": " to", "probability": 0.96533203125}, {"start": 1117.3, "end": 1117.6, "word": " their", "probability": 0.93505859375}, {"start": 1117.6, "end": 1118.8, "word": " probability", "probability": 0.92578125}, {"start": 1118.8, "end": 1119.16, "word": " of", "probability": 0.96240234375}, {"start": 1119.16, "end": 1119.54, "word": " occurrence.", "probability": 0.94775390625}, {"start": 1121.08, "end": 1121.9, "word": " The", "probability": 0.89599609375}, {"start": 1121.9, "end": 1122.34, "word": " other", "probability": 0.8955078125}, {"start": 1122.34, "end": 1122.92, "word": " type", "probability": 0.97705078125}, {"start": 1122.92, "end": 1123.9, "word": " is", "probability": 0.9248046875}, {"start": 1123.9, "end": 1124.3, "word": " called", "probability": 0.892578125}, {"start": 1124.3, "end": 1125.92, "word": " probability", "probability": 0.662109375}, {"start": 1125.92, "end": 1126.56, "word": " samples.", "probability": 0.8203125}, {"start": 1128.18, "end": 1128.62, "word": " In", "probability": 0.9658203125}, {"start": 1128.62, "end": 1128.86, "word": " this", "probability": 0.9443359375}, {"start": 1128.86, "end": 1129.24, "word": " case,", "probability": 0.90673828125}, {"start": 1130.14, "end": 1130.62, "word": " items", "probability": 0.84033203125}, {"start": 1130.62, "end": 1131.06, "word": " are", "probability": 0.94482421875}, {"start": 1131.06, "end": 1132.18, "word": " chosen", "probability": 0.97021484375}, {"start": 1132.18, "end": 1132.64, "word": " on", "probability": 0.94677734375}, {"start": 1132.64, "end": 1132.8, "word": " the", "probability": 0.92724609375}, {"start": 1132.8, "end": 1133.24, "word": " basis", "probability": 0.92041015625}, {"start": 1133.24, "end": 1133.88, "word": " of", "probability": 0.96435546875}, {"start": 1133.88, "end": 1134.2, "word": " non", "probability": 0.68896484375}, {"start": 1134.2, "end": 1134.72, "word": "-probabilities.", "probability": 0.8429361979166666}, {"start": 1135.42, "end": 1135.62, "word": " For", "probability": 0.96484375}, {"start": 1135.62, "end": 1135.98, "word": " example,", "probability": 0.97412109375}, {"start": 1136.9, "end": 1137.22, "word": " here,", "probability": 0.796875}, {"start": 1137.88, "end": 1138.16, "word": " if", "probability": 0.955078125}, {"start": 1138.16, "end": 1138.6, "word": " males", "probability": 0.71142578125}], "temperature": 1.0}, {"id": 44, "seek": 116459, "start": 1142.5, "end": 1164.6, "text": " has or represent 30%, and females represent 70%, and the same size has a thousand. So in this case, you have to choose females with respect to their probability. Now 70% for females, so I have to choose", "tokens": [575, 420, 2906, 2217, 8923, 293, 21529, 2906, 5285, 8923, 293, 264, 912, 2744, 575, 257, 4714, 13, 407, 294, 341, 1389, 11, 291, 362, 281, 2826, 21529, 365, 3104, 281, 641, 8482, 13, 823, 5285, 4, 337, 21529, 11, 370, 286, 362, 281, 2826], "avg_logprob": -0.30944292636021326, "compression_ratio": 1.460431654676259, "no_speech_prob": 0.0, "words": [{"start": 1142.5, "end": 1143.22, "word": " has", "probability": 0.355712890625}, {"start": 1143.22, "end": 1143.94, "word": " or", "probability": 0.67333984375}, {"start": 1143.94, "end": 1145.06, "word": " represent", "probability": 0.44970703125}, {"start": 1145.06, "end": 1147.04, "word": " 30%,", "probability": 0.5447998046875}, {"start": 1147.04, "end": 1148.26, "word": " and", "probability": 0.70361328125}, {"start": 1148.26, "end": 1148.68, "word": " females", "probability": 0.81005859375}, {"start": 1148.68, "end": 1149.98, "word": " represent", "probability": 0.71826171875}, {"start": 1149.98, "end": 1151.06, "word": " 70%,", "probability": 0.739013671875}, {"start": 1151.06, "end": 1151.88, "word": " and", "probability": 0.9296875}, {"start": 1151.88, "end": 1152.02, "word": " the", "probability": 0.76123046875}, {"start": 1152.02, "end": 1152.18, "word": " same", "probability": 0.404296875}, {"start": 1152.18, "end": 1152.54, "word": " size", "probability": 0.8251953125}, {"start": 1152.54, "end": 1152.76, "word": " has", "probability": 0.56689453125}, {"start": 1152.76, "end": 1152.88, "word": " a", "probability": 0.64111328125}, {"start": 1152.88, "end": 1153.2, "word": " thousand.", "probability": 0.7744140625}, {"start": 1154.06, "end": 1154.38, "word": " So", "probability": 0.93359375}, {"start": 1154.38, "end": 1154.48, "word": " in", "probability": 0.734375}, {"start": 1154.48, "end": 1154.64, "word": " this", "probability": 0.94921875}, {"start": 1154.64, "end": 1154.84, "word": " case,", "probability": 0.916015625}, {"start": 1154.92, "end": 1154.96, "word": " you", "probability": 0.94091796875}, {"start": 1154.96, "end": 1155.1, "word": " have", "probability": 0.93505859375}, {"start": 1155.1, "end": 1155.26, "word": " to", "probability": 0.9697265625}, {"start": 1155.26, "end": 1155.66, "word": " choose", "probability": 0.9150390625}, {"start": 1155.66, "end": 1157.68, "word": " females", "probability": 0.85986328125}, {"start": 1157.68, "end": 1158.08, "word": " with", "probability": 0.8974609375}, {"start": 1158.08, "end": 1158.66, "word": " respect", "probability": 0.9140625}, {"start": 1158.66, "end": 1159.14, "word": " to", "probability": 0.96875}, {"start": 1159.14, "end": 1159.34, "word": " their", "probability": 0.9423828125}, {"start": 1159.34, "end": 1159.86, "word": " probability.", "probability": 0.97216796875}, {"start": 1160.62, "end": 1161.3, "word": " Now", "probability": 0.95556640625}, {"start": 1161.3, "end": 1161.74, "word": " 70", "probability": 0.529296875}, {"start": 1161.74, "end": 1162.06, "word": "%", "probability": 0.95703125}, {"start": 1162.06, "end": 1163.4, "word": " for", "probability": 0.8935546875}, {"start": 1163.4, "end": 1163.8, "word": " females,", "probability": 0.9375}, {"start": 1163.9, "end": 1163.98, "word": " so", "probability": 0.63037109375}, {"start": 1163.98, "end": 1164.02, "word": " I", "probability": 0.9521484375}, {"start": 1164.02, "end": 1164.16, "word": " have", "probability": 0.921875}, {"start": 1164.16, "end": 1164.26, "word": " to", "probability": 0.9658203125}, {"start": 1164.26, "end": 1164.6, "word": " choose", "probability": 0.91552734375}], "temperature": 1.0}, {"id": 45, "seek": 119036, "start": 1165.35, "end": 1190.37, "text": " 700 for females and the remaining 300 for males. So in this case, I choose the items, I mean I choose my samples regarding to their probability. So in probability sample items and the sample are chosen on the basis of known probabilities. And again, there are two types.", "tokens": [15204, 337, 21529, 293, 264, 8877, 6641, 337, 20776, 13, 407, 294, 341, 1389, 11, 286, 2826, 264, 4754, 11, 286, 914, 286, 2826, 452, 10938, 8595, 281, 641, 8482, 13, 407, 294, 8482, 6889, 4754, 293, 264, 6889, 366, 8614, 322, 264, 5143, 295, 2570, 33783, 13, 400, 797, 11, 456, 366, 732, 3467, 13], "avg_logprob": -0.22313596282088966, "compression_ratio": 1.6832298136645962, "no_speech_prob": 0.0, "words": [{"start": 1165.35, "end": 1166.05, "word": " 700", "probability": 0.190673828125}, {"start": 1166.05, "end": 1166.47, "word": " for", "probability": 0.59619140625}, {"start": 1166.47, "end": 1166.85, "word": " females", "probability": 0.908203125}, {"start": 1166.85, "end": 1167.83, "word": " and", "probability": 0.625}, {"start": 1167.83, "end": 1168.03, "word": " the", "probability": 0.89697265625}, {"start": 1168.03, "end": 1168.35, "word": " remaining", "probability": 0.87451171875}, {"start": 1168.35, "end": 1168.91, "word": " 300", "probability": 0.57177734375}, {"start": 1168.91, "end": 1169.43, "word": " for", "probability": 0.9384765625}, {"start": 1169.43, "end": 1170.09, "word": " males.", "probability": 0.89453125}, {"start": 1170.91, "end": 1171.13, "word": " So", "probability": 0.892578125}, {"start": 1171.13, "end": 1171.25, "word": " in", "probability": 0.81396484375}, {"start": 1171.25, "end": 1171.45, "word": " this", "probability": 0.9453125}, {"start": 1171.45, "end": 1171.85, "word": " case,", "probability": 0.91357421875}, {"start": 1172.17, "end": 1172.39, "word": " I", "probability": 0.99658203125}, {"start": 1172.39, "end": 1172.77, "word": " choose", "probability": 0.80419921875}, {"start": 1172.77, "end": 1172.93, "word": " the", "probability": 0.8212890625}, {"start": 1172.93, "end": 1173.47, "word": " items,", "probability": 0.83544921875}, {"start": 1173.73, "end": 1173.85, "word": " I", "probability": 0.99755859375}, {"start": 1173.85, "end": 1174.01, "word": " mean", "probability": 0.89990234375}, {"start": 1174.01, "end": 1174.19, "word": " I", "probability": 0.69677734375}, {"start": 1174.19, "end": 1174.53, "word": " choose", "probability": 0.8388671875}, {"start": 1174.53, "end": 1175.31, "word": " my", "probability": 0.9736328125}, {"start": 1175.31, "end": 1176.17, "word": " samples", "probability": 0.927734375}, {"start": 1176.17, "end": 1177.19, "word": " regarding", "probability": 0.74072265625}, {"start": 1177.19, "end": 1177.59, "word": " to", "probability": 0.78662109375}, {"start": 1177.59, "end": 1177.97, "word": " their", "probability": 0.951171875}, {"start": 1177.97, "end": 1179.05, "word": " probability.", "probability": 0.75732421875}, {"start": 1181.01, "end": 1181.49, "word": " So", "probability": 0.9296875}, {"start": 1181.49, "end": 1182.29, "word": " in", "probability": 0.75146484375}, {"start": 1182.29, "end": 1182.67, "word": " probability", "probability": 0.460205078125}, {"start": 1182.67, "end": 1183.13, "word": " sample", "probability": 0.77685546875}, {"start": 1183.13, "end": 1183.87, "word": " items", "probability": 0.73681640625}, {"start": 1183.87, "end": 1184.31, "word": " and", "probability": 0.4677734375}, {"start": 1184.31, "end": 1184.49, "word": " the", "probability": 0.88623046875}, {"start": 1184.49, "end": 1184.89, "word": " sample", "probability": 0.81103515625}, {"start": 1184.89, "end": 1185.19, "word": " are", "probability": 0.859375}, {"start": 1185.19, "end": 1185.65, "word": " chosen", "probability": 0.9677734375}, {"start": 1185.65, "end": 1185.85, "word": " on", "probability": 0.64404296875}, {"start": 1185.85, "end": 1185.97, "word": " the", "probability": 0.91748046875}, {"start": 1185.97, "end": 1186.25, "word": " basis", "probability": 0.94189453125}, {"start": 1186.25, "end": 1186.41, "word": " of", "probability": 0.9658203125}, {"start": 1186.41, "end": 1186.63, "word": " known", "probability": 0.64208984375}, {"start": 1186.63, "end": 1187.13, "word": " probabilities.", "probability": 0.86767578125}, {"start": 1188.27, "end": 1188.61, "word": " And", "probability": 0.9443359375}, {"start": 1188.61, "end": 1189.01, "word": " again,", "probability": 0.93505859375}, {"start": 1189.33, "end": 1189.61, "word": " there", "probability": 0.8955078125}, {"start": 1189.61, "end": 1189.77, "word": " are", "probability": 0.94140625}, {"start": 1189.77, "end": 1189.95, "word": " two", "probability": 0.927734375}, {"start": 1189.95, "end": 1190.37, "word": " types.", "probability": 0.82763671875}], "temperature": 1.0}, {"id": 46, "seek": 121994, "start": 1191.46, "end": 1219.94, "text": " of probability samples, simple random sample, systematic, stratified, and cluster. Let's talk about each one in details. The first type is called a probability sample. Simple random sample. The first type of probability sample is the easiest one. Simple random sample. Generally is denoted by", "tokens": [295, 8482, 10938, 11, 2199, 4974, 6889, 11, 27249, 11, 23674, 2587, 11, 293, 13630, 13, 961, 311, 751, 466, 1184, 472, 294, 4365, 13, 440, 700, 2010, 307, 1219, 257, 8482, 6889, 13, 21532, 4974, 6889, 13, 440, 700, 2010, 295, 8482, 6889, 307, 264, 12889, 472, 13, 21532, 4974, 6889, 13, 21082, 307, 1441, 23325, 538], "avg_logprob": -0.27595338679976383, "compression_ratio": 1.8782051282051282, "no_speech_prob": 0.0, "words": [{"start": 1191.46, "end": 1191.9, "word": " of", "probability": 0.289794921875}, {"start": 1191.9, "end": 1192.36, "word": " probability", "probability": 0.75634765625}, {"start": 1192.36, "end": 1192.9, "word": " samples,", "probability": 0.78125}, {"start": 1193.16, "end": 1193.36, "word": " simple", "probability": 0.495361328125}, {"start": 1193.36, "end": 1193.64, "word": " random", "probability": 0.85107421875}, {"start": 1193.64, "end": 1194.08, "word": " sample,", "probability": 0.57568359375}, {"start": 1195.08, "end": 1195.58, "word": " systematic,", "probability": 0.89501953125}, {"start": 1196.12, "end": 1196.96, "word": " stratified,", "probability": 0.966796875}, {"start": 1197.22, "end": 1197.24, "word": " and", "probability": 0.904296875}, {"start": 1197.24, "end": 1197.6, "word": " cluster.", "probability": 0.69091796875}, {"start": 1198.26, "end": 1198.54, "word": " Let's", "probability": 0.795654296875}, {"start": 1198.54, "end": 1198.78, "word": " talk", "probability": 0.87548828125}, {"start": 1198.78, "end": 1199.12, "word": " about", "probability": 0.90234375}, {"start": 1199.12, "end": 1199.4, "word": " each", "probability": 0.94091796875}, {"start": 1199.4, "end": 1199.66, "word": " one", "probability": 0.919921875}, {"start": 1199.66, "end": 1200.06, "word": " in", "probability": 0.93408203125}, {"start": 1200.06, "end": 1200.5, "word": " details.", "probability": 0.42919921875}, {"start": 1202.24, "end": 1203.08, "word": " The", "probability": 0.853515625}, {"start": 1203.08, "end": 1203.4, "word": " first", "probability": 0.8828125}, {"start": 1203.4, "end": 1203.7, "word": " type", "probability": 0.92919921875}, {"start": 1203.7, "end": 1203.9, "word": " is", "probability": 0.92431640625}, {"start": 1203.9, "end": 1204.28, "word": " called", "probability": 0.87841796875}, {"start": 1204.28, "end": 1204.66, "word": " a", "probability": 0.62451171875}, {"start": 1204.66, "end": 1205.04, "word": " probability", "probability": 0.88916015625}, {"start": 1205.04, "end": 1205.6, "word": " sample.", "probability": 0.85791015625}, {"start": 1207.6, "end": 1208.28, "word": " Simple", "probability": 0.53759765625}, {"start": 1208.28, "end": 1209.94, "word": " random", "probability": 0.60009765625}, {"start": 1209.94, "end": 1210.6, "word": " sample.", "probability": 0.845703125}, {"start": 1210.74, "end": 1210.86, "word": " The", "probability": 0.8330078125}, {"start": 1210.86, "end": 1211.14, "word": " first", "probability": 0.880859375}, {"start": 1211.14, "end": 1211.52, "word": " type", "probability": 0.9794921875}, {"start": 1211.52, "end": 1211.72, "word": " of", "probability": 0.962890625}, {"start": 1211.72, "end": 1212.14, "word": " probability", "probability": 0.93701171875}, {"start": 1212.14, "end": 1212.64, "word": " sample", "probability": 0.84619140625}, {"start": 1212.64, "end": 1213.62, "word": " is", "probability": 0.8984375}, {"start": 1213.62, "end": 1213.8, "word": " the", "probability": 0.92236328125}, {"start": 1213.8, "end": 1214.06, "word": " easiest", "probability": 0.92041015625}, {"start": 1214.06, "end": 1214.48, "word": " one.", "probability": 0.931640625}, {"start": 1215.52, "end": 1216.2, "word": " Simple", "probability": 0.93505859375}, {"start": 1216.2, "end": 1217.02, "word": " random", "probability": 0.85595703125}, {"start": 1217.02, "end": 1217.42, "word": " sample.", "probability": 0.87548828125}, {"start": 1218.24, "end": 1218.76, "word": " Generally", "probability": 0.59521484375}, {"start": 1218.76, "end": 1219.18, "word": " is", "probability": 0.385009765625}, {"start": 1219.18, "end": 1219.5, "word": " denoted", "probability": 0.97119140625}, {"start": 1219.5, "end": 1219.94, "word": " by", "probability": 0.96630859375}], "temperature": 1.0}, {"id": 47, "seek": 124796, "start": 1221.64, "end": 1247.96, "text": " SRS, Simple Random Sample. Let's see how can we choose a sample that is random. What do you mean by random? In this case, every individual or item from the frame has an equal chance of being selected. For example,", "tokens": [20840, 50, 11, 21532, 37603, 4832, 781, 13, 961, 311, 536, 577, 393, 321, 2826, 257, 6889, 300, 307, 4974, 13, 708, 360, 291, 914, 538, 4974, 30, 682, 341, 1389, 11, 633, 2609, 420, 3174, 490, 264, 3920, 575, 364, 2681, 2931, 295, 885, 8209, 13, 1171, 1365, 11], "avg_logprob": -0.18489582865845924, "compression_ratio": 1.3544303797468353, "no_speech_prob": 0.0, "words": [{"start": 1221.64, "end": 1222.4, "word": " SRS,", "probability": 0.837158203125}, {"start": 1223.26, "end": 1223.78, "word": " Simple", "probability": 0.2158203125}, {"start": 1223.78, "end": 1225.54, "word": " Random", "probability": 0.802734375}, {"start": 1225.54, "end": 1226.74, "word": " Sample.", "probability": 0.8896484375}, {"start": 1227.68, "end": 1228.06, "word": " Let's", "probability": 0.924072265625}, {"start": 1228.06, "end": 1228.16, "word": " see", "probability": 0.8740234375}, {"start": 1228.16, "end": 1228.24, "word": " how", "probability": 0.896484375}, {"start": 1228.24, "end": 1228.44, "word": " can", "probability": 0.8193359375}, {"start": 1228.44, "end": 1228.58, "word": " we", "probability": 0.94873046875}, {"start": 1228.58, "end": 1229.04, "word": " choose", "probability": 0.90478515625}, {"start": 1229.04, "end": 1230.66, "word": " a", "probability": 0.9189453125}, {"start": 1230.66, "end": 1231.02, "word": " sample", "probability": 0.79736328125}, {"start": 1231.02, "end": 1232.08, "word": " that", "probability": 0.91650390625}, {"start": 1232.08, "end": 1232.28, "word": " is", "probability": 0.94140625}, {"start": 1232.28, "end": 1232.66, "word": " random.", "probability": 0.85498046875}, {"start": 1233.86, "end": 1234.32, "word": " What", "probability": 0.6875}, {"start": 1234.32, "end": 1234.46, "word": " do", "probability": 0.87109375}, {"start": 1234.46, "end": 1234.54, "word": " you", "probability": 0.7861328125}, {"start": 1234.54, "end": 1234.62, "word": " mean", "probability": 0.96728515625}, {"start": 1234.62, "end": 1234.78, "word": " by", "probability": 0.9638671875}, {"start": 1234.78, "end": 1235.12, "word": " random?", "probability": 0.87841796875}, {"start": 1236.02, "end": 1236.3, "word": " In", "probability": 0.276611328125}, {"start": 1236.3, "end": 1238.58, "word": " this", "probability": 0.94384765625}, {"start": 1238.58, "end": 1238.94, "word": " case,", "probability": 0.9091796875}, {"start": 1239.12, "end": 1239.36, "word": " every", "probability": 0.8291015625}, {"start": 1239.36, "end": 1239.88, "word": " individual", "probability": 0.91064453125}, {"start": 1239.88, "end": 1240.26, "word": " or", "probability": 0.93310546875}, {"start": 1240.26, "end": 1240.78, "word": " item", "probability": 0.96728515625}, {"start": 1240.78, "end": 1241.48, "word": " from", "probability": 0.86474609375}, {"start": 1241.48, "end": 1241.78, "word": " the", "probability": 0.92333984375}, {"start": 1241.78, "end": 1242.2, "word": " frame", "probability": 0.9169921875}, {"start": 1242.2, "end": 1242.56, "word": " has", "probability": 0.93798828125}, {"start": 1242.56, "end": 1242.76, "word": " an", "probability": 0.95703125}, {"start": 1242.76, "end": 1243.06, "word": " equal", "probability": 0.91162109375}, {"start": 1243.06, "end": 1243.62, "word": " chance", "probability": 0.96923828125}, {"start": 1243.62, "end": 1243.82, "word": " of", "probability": 0.96435546875}, {"start": 1243.82, "end": 1244.02, "word": " being", "probability": 0.9453125}, {"start": 1244.02, "end": 1244.56, "word": " selected.", "probability": 0.88720703125}, {"start": 1246.86, "end": 1247.62, "word": " For", "probability": 0.95703125}, {"start": 1247.62, "end": 1247.96, "word": " example,", "probability": 0.97021484375}], "temperature": 1.0}, {"id": 48, "seek": 127301, "start": 1249.81, "end": 1273.01, "text": " suppose number of students in this class number of students is 52 so each one, I mean each student from 1 up to 52", "tokens": [7297, 1230, 295, 1731, 294, 341, 1508, 1230, 295, 1731, 307, 18079, 370, 1184, 472, 11, 286, 914, 1184, 3107, 490, 502, 493, 281, 18079], "avg_logprob": -0.2387319763119404, "compression_ratio": 1.369047619047619, "no_speech_prob": 0.0, "words": [{"start": 1249.81, "end": 1250.41, "word": " suppose", "probability": 0.1907958984375}, {"start": 1250.41, "end": 1251.07, "word": " number", "probability": 0.68994140625}, {"start": 1251.07, "end": 1251.29, "word": " of", "probability": 0.9638671875}, {"start": 1251.29, "end": 1251.63, "word": " students", "probability": 0.9638671875}, {"start": 1251.63, "end": 1251.85, "word": " in", "probability": 0.873046875}, {"start": 1251.85, "end": 1252.01, "word": " this", "probability": 0.9501953125}, {"start": 1252.01, "end": 1252.53, "word": " class", "probability": 0.9765625}, {"start": 1252.53, "end": 1255.31, "word": " number", "probability": 0.41943359375}, {"start": 1255.31, "end": 1255.55, "word": " of", "probability": 0.96337890625}, {"start": 1255.55, "end": 1256.07, "word": " students", "probability": 0.96875}, {"start": 1256.07, "end": 1258.93, "word": " is", "probability": 0.9189453125}, {"start": 1258.93, "end": 1259.41, "word": " 52", "probability": 0.93017578125}, {"start": 1259.41, "end": 1264.01, "word": " so", "probability": 0.499755859375}, {"start": 1264.01, "end": 1264.43, "word": " each", "probability": 0.912109375}, {"start": 1264.43, "end": 1264.75, "word": " one,", "probability": 0.8310546875}, {"start": 1265.19, "end": 1265.37, "word": " I", "probability": 0.828125}, {"start": 1265.37, "end": 1265.53, "word": " mean", "probability": 0.9677734375}, {"start": 1265.53, "end": 1265.97, "word": " each", "probability": 0.85986328125}, {"start": 1265.97, "end": 1266.69, "word": " student", "probability": 0.9599609375}, {"start": 1266.69, "end": 1271.89, "word": " from", "probability": 0.86328125}, {"start": 1271.89, "end": 1272.15, "word": " 1", "probability": 0.77783203125}, {"start": 1272.15, "end": 1272.39, "word": " up", "probability": 0.94140625}, {"start": 1272.39, "end": 1272.55, "word": " to", "probability": 0.9345703125}, {"start": 1272.55, "end": 1273.01, "word": " 52", "probability": 0.98681640625}], "temperature": 1.0}, {"id": 49, "seek": 130311, "start": 1274.4, "end": 1303.12, "text": " has the same probability of being selected. 1 by 52. 1 by 52. 1 divided by 52. So each one has this probability. So the first one has the same because if I want to select for example 10 out of you. So the first one has each one has probability of 1 out of 52. That's the meaning of", "tokens": [575, 264, 912, 8482, 295, 885, 8209, 13, 502, 538, 18079, 13, 502, 538, 18079, 13, 502, 6666, 538, 18079, 13, 407, 1184, 472, 575, 341, 8482, 13, 407, 264, 700, 472, 575, 264, 912, 570, 498, 286, 528, 281, 3048, 337, 1365, 1266, 484, 295, 291, 13, 407, 264, 700, 472, 575, 1184, 472, 575, 8482, 295, 502, 484, 295, 18079, 13, 663, 311, 264, 3620, 295], "avg_logprob": -0.22441122929255167, "compression_ratio": 1.8431372549019607, "no_speech_prob": 0.0, "words": [{"start": 1274.4, "end": 1274.82, "word": " has", "probability": 0.6083984375}, {"start": 1274.82, "end": 1275.06, "word": " the", "probability": 0.90966796875}, {"start": 1275.06, "end": 1275.52, "word": " same", "probability": 0.89306640625}, {"start": 1275.52, "end": 1276.78, "word": " probability", "probability": 0.9365234375}, {"start": 1276.78, "end": 1277.18, "word": " of", "probability": 0.9453125}, {"start": 1277.18, "end": 1277.38, "word": " being", "probability": 0.9306640625}, {"start": 1277.38, "end": 1278.0, "word": " selected.", "probability": 0.88427734375}, {"start": 1278.18, "end": 1278.42, "word": " 1", "probability": 0.370849609375}, {"start": 1278.42, "end": 1278.7, "word": " by", "probability": 0.5966796875}, {"start": 1278.7, "end": 1279.72, "word": " 52.", "probability": 0.845703125}, {"start": 1280.18, "end": 1280.6, "word": " 1", "probability": 0.716796875}, {"start": 1280.6, "end": 1281.02, "word": " by", "probability": 0.6142578125}, {"start": 1281.02, "end": 1281.48, "word": " 52.", "probability": 0.87109375}, {"start": 1281.58, "end": 1281.86, "word": " 1", "probability": 0.77294921875}, {"start": 1281.86, "end": 1282.24, "word": " divided", "probability": 0.74365234375}, {"start": 1282.24, "end": 1282.66, "word": " by", "probability": 0.96728515625}, {"start": 1282.66, "end": 1283.26, "word": " 52.", "probability": 0.95556640625}, {"start": 1283.54, "end": 1283.86, "word": " So", "probability": 0.8984375}, {"start": 1283.86, "end": 1284.1, "word": " each", "probability": 0.7705078125}, {"start": 1284.1, "end": 1284.3, "word": " one", "probability": 0.91357421875}, {"start": 1284.3, "end": 1284.56, "word": " has", "probability": 0.94384765625}, {"start": 1284.56, "end": 1284.82, "word": " this", "probability": 0.92138671875}, {"start": 1284.82, "end": 1285.28, "word": " probability.", "probability": 0.94580078125}, {"start": 1286.56, "end": 1287.24, "word": " So", "probability": 0.88330078125}, {"start": 1287.24, "end": 1287.56, "word": " the", "probability": 0.70068359375}, {"start": 1287.56, "end": 1287.8, "word": " first", "probability": 0.89013671875}, {"start": 1287.8, "end": 1287.98, "word": " one", "probability": 0.9306640625}, {"start": 1287.98, "end": 1288.18, "word": " has", "probability": 0.9384765625}, {"start": 1288.18, "end": 1288.32, "word": " the", "probability": 0.90771484375}, {"start": 1288.32, "end": 1288.58, "word": " same", "probability": 0.90234375}, {"start": 1288.58, "end": 1289.04, "word": " because", "probability": 0.3271484375}, {"start": 1289.04, "end": 1289.76, "word": " if", "probability": 0.923828125}, {"start": 1289.76, "end": 1289.92, "word": " I", "probability": 0.98193359375}, {"start": 1289.92, "end": 1290.1, "word": " want", "probability": 0.880859375}, {"start": 1290.1, "end": 1290.22, "word": " to", "probability": 0.9658203125}, {"start": 1290.22, "end": 1290.54, "word": " select", "probability": 0.85009765625}, {"start": 1290.54, "end": 1291.82, "word": " for", "probability": 0.69873046875}, {"start": 1291.82, "end": 1292.14, "word": " example", "probability": 0.9716796875}, {"start": 1292.14, "end": 1292.48, "word": " 10", "probability": 0.77685546875}, {"start": 1292.48, "end": 1292.8, "word": " out", "probability": 0.88916015625}, {"start": 1292.8, "end": 1292.98, "word": " of", "probability": 0.97216796875}, {"start": 1292.98, "end": 1293.2, "word": " you.", "probability": 0.95849609375}, {"start": 1295.1, "end": 1295.78, "word": " So", "probability": 0.293212890625}, {"start": 1295.78, "end": 1296.18, "word": " the", "probability": 0.8095703125}, {"start": 1296.18, "end": 1296.48, "word": " first", "probability": 0.89794921875}, {"start": 1296.48, "end": 1296.7, "word": " one", "probability": 0.93115234375}, {"start": 1296.7, "end": 1297.1, "word": " has", "probability": 0.93408203125}, {"start": 1297.1, "end": 1297.68, "word": " each", "probability": 0.473876953125}, {"start": 1297.68, "end": 1298.0, "word": " one", "probability": 0.92431640625}, {"start": 1298.0, "end": 1298.48, "word": " has", "probability": 0.92919921875}, {"start": 1298.48, "end": 1298.84, "word": " probability", "probability": 0.6064453125}, {"start": 1298.84, "end": 1299.2, "word": " of", "probability": 0.93017578125}, {"start": 1299.2, "end": 1299.96, "word": " 1", "probability": 0.6455078125}, {"start": 1299.96, "end": 1300.42, "word": " out", "probability": 0.8837890625}, {"start": 1300.42, "end": 1300.62, "word": " of", "probability": 0.96435546875}, {"start": 1300.62, "end": 1301.0, "word": " 52.", "probability": 0.98583984375}, {"start": 1301.62, "end": 1302.26, "word": " That's", "probability": 0.761962890625}, {"start": 1302.26, "end": 1302.4, "word": " the", "probability": 0.923828125}, {"start": 1302.4, "end": 1302.64, "word": " meaning", "probability": 0.87451171875}, {"start": 1302.64, "end": 1303.12, "word": " of", "probability": 0.96728515625}], "temperature": 1.0}, {"id": 50, "seek": 132034, "start": 1303.74, "end": 1320.34, "text": "Each item from the frame has an equal chance of being selected. Selection may be with replacement. With replacement means selected individuals is returned", "tokens": [36, 608, 3174, 490, 264, 3920, 575, 364, 2681, 2931, 295, 885, 8209, 13, 1100, 5450, 815, 312, 365, 14419, 13, 2022, 14419, 1355, 8209, 5346, 307, 8752], "avg_logprob": -0.1959859954899755, "compression_ratio": 1.3628318584070795, "no_speech_prob": 0.0, "words": [{"start": 1303.74, "end": 1304.16, "word": "Each", "probability": 0.701171875}, {"start": 1304.16, "end": 1304.68, "word": " item", "probability": 0.94873046875}, {"start": 1304.68, "end": 1305.5, "word": " from", "probability": 0.88427734375}, {"start": 1305.5, "end": 1305.78, "word": " the", "probability": 0.92236328125}, {"start": 1305.78, "end": 1306.14, "word": " frame", "probability": 0.91162109375}, {"start": 1306.14, "end": 1306.76, "word": " has", "probability": 0.9423828125}, {"start": 1306.76, "end": 1306.92, "word": " an", "probability": 0.96337890625}, {"start": 1306.92, "end": 1307.16, "word": " equal", "probability": 0.9072265625}, {"start": 1307.16, "end": 1307.74, "word": " chance", "probability": 0.96923828125}, {"start": 1307.74, "end": 1308.08, "word": " of", "probability": 0.96337890625}, {"start": 1308.08, "end": 1308.34, "word": " being", "probability": 0.9521484375}, {"start": 1308.34, "end": 1308.9, "word": " selected.", "probability": 0.8828125}, {"start": 1310.98, "end": 1312.04, "word": " Selection", "probability": 0.778076171875}, {"start": 1312.04, "end": 1313.7, "word": " may", "probability": 0.58154296875}, {"start": 1313.7, "end": 1314.3, "word": " be", "probability": 0.9560546875}, {"start": 1314.3, "end": 1314.8, "word": " with", "probability": 0.85400390625}, {"start": 1314.8, "end": 1315.4, "word": " replacement.", "probability": 0.80859375}, {"start": 1316.92, "end": 1317.14, "word": " With", "probability": 0.86865234375}, {"start": 1317.14, "end": 1317.84, "word": " replacement", "probability": 0.81787109375}, {"start": 1317.84, "end": 1318.3, "word": " means", "probability": 0.927734375}, {"start": 1318.3, "end": 1318.8, "word": " selected", "probability": 0.7744140625}, {"start": 1318.8, "end": 1319.48, "word": " individuals", "probability": 0.62646484375}, {"start": 1319.48, "end": 1319.8, "word": " is", "probability": 0.8974609375}, {"start": 1319.8, "end": 1320.34, "word": " returned", "probability": 0.875}], "temperature": 1.0}, {"id": 51, "seek": 134450, "start": 1320.8, "end": 1344.5, "text": " to the frame for possibility selection, or without replacement means selected individuals or item is not returned to the frame. So we have two types of selection, either with... So with replacement means item is returned back to the frame, or without population, the item is not returned back to the frame. So that's the two types of selection.", "tokens": [281, 264, 3920, 337, 7959, 9450, 11, 420, 1553, 14419, 1355, 8209, 5346, 420, 3174, 307, 406, 8752, 281, 264, 3920, 13, 407, 321, 362, 732, 3467, 295, 9450, 11, 2139, 365, 485, 407, 365, 14419, 1355, 3174, 307, 8752, 646, 281, 264, 3920, 11, 420, 1553, 4415, 11, 264, 3174, 307, 406, 8752, 646, 281, 264, 3920, 13, 407, 300, 311, 264, 732, 3467, 295, 9450, 13], "avg_logprob": -0.20414402001145956, "compression_ratio": 2.1036585365853657, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 1320.8, "end": 1321.08, "word": " to", "probability": 0.51318359375}, {"start": 1321.08, "end": 1321.3, "word": " the", "probability": 0.90576171875}, {"start": 1321.3, "end": 1321.64, "word": " frame", "probability": 0.9169921875}, {"start": 1321.64, "end": 1322.04, "word": " for", "probability": 0.8984375}, {"start": 1322.04, "end": 1322.46, "word": " possibility", "probability": 0.68505859375}, {"start": 1322.46, "end": 1323.06, "word": " selection,", "probability": 0.85888671875}, {"start": 1323.82, "end": 1323.92, "word": " or", "probability": 0.9521484375}, {"start": 1323.92, "end": 1324.4, "word": " without", "probability": 0.8955078125}, {"start": 1324.4, "end": 1324.88, "word": " replacement", "probability": 0.7314453125}, {"start": 1324.88, "end": 1325.46, "word": " means", "probability": 0.409912109375}, {"start": 1325.46, "end": 1326.08, "word": " selected", "probability": 0.85205078125}, {"start": 1326.08, "end": 1326.7, "word": " individuals", "probability": 0.69580078125}, {"start": 1326.7, "end": 1326.98, "word": " or", "probability": 0.7802734375}, {"start": 1326.98, "end": 1327.38, "word": " item", "probability": 0.7607421875}, {"start": 1327.38, "end": 1327.94, "word": " is", "probability": 0.8974609375}, {"start": 1327.94, "end": 1328.2, "word": " not", "probability": 0.94482421875}, {"start": 1328.2, "end": 1328.6, "word": " returned", "probability": 0.79541015625}, {"start": 1328.6, "end": 1328.8, "word": " to", "probability": 0.96044921875}, {"start": 1328.8, "end": 1328.94, "word": " the", "probability": 0.9130859375}, {"start": 1328.94, "end": 1329.2, "word": " frame.", "probability": 0.90380859375}, {"start": 1329.56, "end": 1329.74, "word": " So", "probability": 0.9140625}, {"start": 1329.74, "end": 1329.84, "word": " we", "probability": 0.67431640625}, {"start": 1329.84, "end": 1329.98, "word": " have", "probability": 0.94140625}, {"start": 1329.98, "end": 1330.1, "word": " two", "probability": 0.8544921875}, {"start": 1330.1, "end": 1330.32, "word": " types", "probability": 0.65478515625}, {"start": 1330.32, "end": 1330.48, "word": " of", "probability": 0.94482421875}, {"start": 1330.48, "end": 1330.82, "word": " selection,", "probability": 0.869140625}, {"start": 1331.0, "end": 1331.16, "word": " either", "probability": 0.94140625}, {"start": 1331.16, "end": 1332.68, "word": " with...", "probability": 0.54376220703125}, {"start": 1332.68, "end": 1332.9, "word": " So", "probability": 0.45751953125}, {"start": 1332.9, "end": 1333.14, "word": " with", "probability": 0.7890625}, {"start": 1333.14, "end": 1333.48, "word": " replacement", "probability": 0.736328125}, {"start": 1333.48, "end": 1333.8, "word": " means", "probability": 0.833984375}, {"start": 1333.8, "end": 1334.18, "word": " item", "probability": 0.904296875}, {"start": 1334.18, "end": 1334.36, "word": " is", "probability": 0.9345703125}, {"start": 1334.36, "end": 1334.72, "word": " returned", "probability": 0.859375}, {"start": 1334.72, "end": 1335.18, "word": " back", "probability": 0.87060546875}, {"start": 1335.18, "end": 1336.26, "word": " to", "probability": 0.958984375}, {"start": 1336.26, "end": 1336.44, "word": " the", "probability": 0.9169921875}, {"start": 1336.44, "end": 1336.74, "word": " frame,", "probability": 0.90380859375}, {"start": 1336.82, "end": 1337.1, "word": " or", "probability": 0.962890625}, {"start": 1337.1, "end": 1337.52, "word": " without", "probability": 0.90966796875}, {"start": 1337.52, "end": 1338.08, "word": " population,", "probability": 0.8837890625}, {"start": 1338.32, "end": 1338.4, "word": " the", "probability": 0.86865234375}, {"start": 1338.4, "end": 1338.7, "word": " item", "probability": 0.9619140625}, {"start": 1338.7, "end": 1338.98, "word": " is", "probability": 0.93603515625}, {"start": 1338.98, "end": 1339.2, "word": " not", "probability": 0.93212890625}, {"start": 1339.2, "end": 1339.58, "word": " returned", "probability": 0.8798828125}, {"start": 1339.58, "end": 1339.98, "word": " back", "probability": 0.86865234375}, {"start": 1339.98, "end": 1340.76, "word": " to", "probability": 0.9638671875}, {"start": 1340.76, "end": 1340.92, "word": " the", "probability": 0.91748046875}, {"start": 1340.92, "end": 1341.14, "word": " frame.", "probability": 0.88427734375}, {"start": 1341.26, "end": 1341.4, "word": " So", "probability": 0.953125}, {"start": 1341.4, "end": 1341.64, "word": " that's", "probability": 0.895263671875}, {"start": 1341.64, "end": 1341.72, "word": " the", "probability": 0.9091796875}, {"start": 1341.72, "end": 1341.92, "word": " two", "probability": 0.93359375}, {"start": 1341.92, "end": 1342.32, "word": " types", "probability": 0.82275390625}, {"start": 1342.32, "end": 1343.34, "word": " of", "probability": 0.966796875}, {"start": 1343.34, "end": 1344.5, "word": " selection.", "probability": 0.8916015625}], "temperature": 1.0}, {"id": 52, "seek": 137331, "start": 1345.61, "end": 1373.31, "text": " Now how can we obtain the sample? Sample obtained from something called table of random numbers. In a minute I will show you the table of random numbers. And other method of selecting a sample by using computer random number generators. So there are two methods for selecting a random number. Either by using the table that you have at the end of your book or by using a computer.", "tokens": [823, 577, 393, 321, 12701, 264, 6889, 30, 4832, 781, 14879, 490, 746, 1219, 3199, 295, 4974, 3547, 13, 682, 257, 3456, 286, 486, 855, 291, 264, 3199, 295, 4974, 3547, 13, 400, 661, 3170, 295, 18182, 257, 6889, 538, 1228, 3820, 4974, 1230, 38662, 13, 407, 456, 366, 732, 7150, 337, 18182, 257, 4974, 1230, 13, 13746, 538, 1228, 264, 3199, 300, 291, 362, 412, 264, 917, 295, 428, 1446, 420, 538, 1228, 257, 3820, 13], "avg_logprob": -0.20572917220684198, "compression_ratio": 1.886138613861386, "no_speech_prob": 0.0, "words": [{"start": 1345.61, "end": 1345.93, "word": " Now", "probability": 0.72509765625}, {"start": 1345.93, "end": 1346.17, "word": " how", "probability": 0.40869140625}, {"start": 1346.17, "end": 1346.37, "word": " can", "probability": 0.90869140625}, {"start": 1346.37, "end": 1346.49, "word": " we", "probability": 0.87451171875}, {"start": 1346.49, "end": 1346.81, "word": " obtain", "probability": 0.916015625}, {"start": 1346.81, "end": 1347.07, "word": " the", "probability": 0.65673828125}, {"start": 1347.07, "end": 1347.31, "word": " sample?", "probability": 0.8740234375}, {"start": 1347.61, "end": 1348.19, "word": " Sample", "probability": 0.756103515625}, {"start": 1348.19, "end": 1348.61, "word": " obtained", "probability": 0.9130859375}, {"start": 1348.61, "end": 1349.09, "word": " from", "probability": 0.8662109375}, {"start": 1349.09, "end": 1349.81, "word": " something", "probability": 0.80859375}, {"start": 1349.81, "end": 1350.21, "word": " called", "probability": 0.88623046875}, {"start": 1350.21, "end": 1350.59, "word": " table", "probability": 0.369140625}, {"start": 1350.59, "end": 1350.89, "word": " of", "probability": 0.96826171875}, {"start": 1350.89, "end": 1351.17, "word": " random", "probability": 0.82275390625}, {"start": 1351.17, "end": 1351.73, "word": " numbers.", "probability": 0.89892578125}, {"start": 1352.77, "end": 1352.97, "word": " In", "probability": 0.89453125}, {"start": 1352.97, "end": 1353.07, "word": " a", "probability": 0.9921875}, {"start": 1353.07, "end": 1353.21, "word": " minute", "probability": 0.92333984375}, {"start": 1353.21, "end": 1353.35, "word": " I", "probability": 0.73974609375}, {"start": 1353.35, "end": 1353.47, "word": " will", "probability": 0.88916015625}, {"start": 1353.47, "end": 1353.69, "word": " show", "probability": 0.9287109375}, {"start": 1353.69, "end": 1353.85, "word": " you", "probability": 0.6708984375}, {"start": 1353.85, "end": 1353.93, "word": " the", "probability": 0.80908203125}, {"start": 1353.93, "end": 1354.25, "word": " table", "probability": 0.875}, {"start": 1354.25, "end": 1354.75, "word": " of", "probability": 0.75048828125}, {"start": 1354.75, "end": 1354.95, "word": " random", "probability": 0.8310546875}, {"start": 1354.95, "end": 1355.29, "word": " numbers.", "probability": 0.587890625}, {"start": 1355.81, "end": 1356.07, "word": " And", "probability": 0.89794921875}, {"start": 1356.07, "end": 1356.43, "word": " other", "probability": 0.51806640625}, {"start": 1356.43, "end": 1356.79, "word": " method", "probability": 0.822265625}, {"start": 1356.79, "end": 1356.99, "word": " of", "probability": 0.93310546875}, {"start": 1356.99, "end": 1357.51, "word": " selecting", "probability": 0.88671875}, {"start": 1357.51, "end": 1357.81, "word": " a", "probability": 0.8671875}, {"start": 1357.81, "end": 1358.09, "word": " sample", "probability": 0.88232421875}, {"start": 1358.09, "end": 1359.19, "word": " by", "probability": 0.658203125}, {"start": 1359.19, "end": 1359.61, "word": " using", "probability": 0.9287109375}, {"start": 1359.61, "end": 1360.13, "word": " computer", "probability": 0.83154296875}, {"start": 1360.13, "end": 1360.47, "word": " random", "probability": 0.8642578125}, {"start": 1360.47, "end": 1360.81, "word": " number", "probability": 0.88134765625}, {"start": 1360.81, "end": 1361.37, "word": " generators.", "probability": 0.86572265625}, {"start": 1362.45, "end": 1362.65, "word": " So", "probability": 0.80615234375}, {"start": 1362.65, "end": 1362.93, "word": " there", "probability": 0.7255859375}, {"start": 1362.93, "end": 1363.13, "word": " are", "probability": 0.93212890625}, {"start": 1363.13, "end": 1363.29, "word": " two", "probability": 0.88671875}, {"start": 1363.29, "end": 1364.89, "word": " methods", "probability": 0.66845703125}, {"start": 1364.89, "end": 1365.27, "word": " for", "probability": 0.83984375}, {"start": 1365.27, "end": 1365.79, "word": " selecting", "probability": 0.88916015625}, {"start": 1365.79, "end": 1366.29, "word": " a", "probability": 0.76708984375}, {"start": 1366.29, "end": 1366.49, "word": " random", "probability": 0.84326171875}, {"start": 1366.49, "end": 1366.91, "word": " number.", "probability": 0.93603515625}, {"start": 1367.39, "end": 1367.63, "word": " Either", "probability": 0.8916015625}, {"start": 1367.63, "end": 1367.87, "word": " by", "probability": 0.95361328125}, {"start": 1367.87, "end": 1368.11, "word": " using", "probability": 0.92724609375}, {"start": 1368.11, "end": 1368.31, "word": " the", "probability": 0.86572265625}, {"start": 1368.31, "end": 1368.55, "word": " table", "probability": 0.892578125}, {"start": 1368.55, "end": 1368.79, "word": " that", "probability": 0.86474609375}, {"start": 1368.79, "end": 1368.93, "word": " you", "probability": 0.95751953125}, {"start": 1368.93, "end": 1369.11, "word": " have", "probability": 0.94384765625}, {"start": 1369.11, "end": 1369.33, "word": " at", "probability": 0.9443359375}, {"start": 1369.33, "end": 1369.45, "word": " the", "probability": 0.91748046875}, {"start": 1369.45, "end": 1369.63, "word": " end", "probability": 0.89404296875}, {"start": 1369.63, "end": 1369.75, "word": " of", "probability": 0.95751953125}, {"start": 1369.75, "end": 1369.85, "word": " your", "probability": 0.87255859375}, {"start": 1369.85, "end": 1370.19, "word": " book", "probability": 0.96142578125}, {"start": 1370.19, "end": 1371.75, "word": " or", "probability": 0.431640625}, {"start": 1371.75, "end": 1371.95, "word": " by", "probability": 0.95751953125}, {"start": 1371.95, "end": 1372.39, "word": " using", "probability": 0.9326171875}, {"start": 1372.39, "end": 1372.91, "word": " a", "probability": 0.880859375}, {"start": 1372.91, "end": 1373.31, "word": " computer.", "probability": 0.8583984375}], "temperature": 1.0}, {"id": 53, "seek": 139995, "start": 1373.79, "end": 1399.95, "text": " I will show one of these and in the SPSS course you will see another one which is by using a computer. So let's see how can we obtain a sample from table of random number. I have maybe different table here.", "tokens": [286, 486, 855, 472, 295, 613, 293, 294, 264, 318, 6273, 50, 1164, 291, 486, 536, 1071, 472, 597, 307, 538, 1228, 257, 3820, 13, 407, 718, 311, 536, 577, 393, 321, 12701, 257, 6889, 490, 3199, 295, 4974, 1230, 13, 286, 362, 1310, 819, 3199, 510, 13], "avg_logprob": -0.24521684646606445, "compression_ratio": 1.417808219178082, "no_speech_prob": 0.0, "words": [{"start": 1373.79, "end": 1374.05, "word": " I", "probability": 0.67041015625}, {"start": 1374.05, "end": 1374.17, "word": " will", "probability": 0.80517578125}, {"start": 1374.17, "end": 1374.43, "word": " show", "probability": 0.93310546875}, {"start": 1374.43, "end": 1374.93, "word": " one", "probability": 0.87646484375}, {"start": 1374.93, "end": 1375.07, "word": " of", "probability": 0.96875}, {"start": 1375.07, "end": 1375.33, "word": " these", "probability": 0.82421875}, {"start": 1375.33, "end": 1376.13, "word": " and", "probability": 0.56689453125}, {"start": 1376.13, "end": 1376.55, "word": " in", "probability": 0.55712890625}, {"start": 1376.55, "end": 1376.71, "word": " the", "probability": 0.8671875}, {"start": 1376.71, "end": 1377.29, "word": " SPSS", "probability": 0.8006998697916666}, {"start": 1377.29, "end": 1377.65, "word": " course", "probability": 0.9453125}, {"start": 1377.65, "end": 1377.83, "word": " you", "probability": 0.4951171875}, {"start": 1377.83, "end": 1377.97, "word": " will", "probability": 0.81640625}, {"start": 1377.97, "end": 1378.25, "word": " see", "probability": 0.91943359375}, {"start": 1378.25, "end": 1378.81, "word": " another", "probability": 0.69287109375}, {"start": 1378.81, "end": 1379.23, "word": " one", "probability": 0.921875}, {"start": 1379.23, "end": 1379.49, "word": " which", "probability": 0.72216796875}, {"start": 1379.49, "end": 1379.65, "word": " is", "probability": 0.93896484375}, {"start": 1379.65, "end": 1379.89, "word": " by", "probability": 0.822265625}, {"start": 1379.89, "end": 1380.39, "word": " using", "probability": 0.92578125}, {"start": 1380.39, "end": 1381.85, "word": " a", "probability": 0.6435546875}, {"start": 1381.85, "end": 1382.23, "word": " computer.", "probability": 0.81396484375}, {"start": 1382.59, "end": 1382.93, "word": " So", "probability": 0.88525390625}, {"start": 1382.93, "end": 1383.13, "word": " let's", "probability": 0.848388671875}, {"start": 1383.13, "end": 1383.25, "word": " see", "probability": 0.91259765625}, {"start": 1383.25, "end": 1383.33, "word": " how", "probability": 0.9248046875}, {"start": 1383.33, "end": 1383.51, "word": " can", "probability": 0.71044921875}, {"start": 1383.51, "end": 1383.69, "word": " we", "probability": 0.9482421875}, {"start": 1383.69, "end": 1384.15, "word": " obtain", "probability": 0.9033203125}, {"start": 1384.15, "end": 1385.79, "word": " a", "probability": 0.9541015625}, {"start": 1385.79, "end": 1386.09, "word": " sample", "probability": 0.66845703125}, {"start": 1386.09, "end": 1386.75, "word": " from", "probability": 0.88427734375}, {"start": 1386.75, "end": 1388.45, "word": " table", "probability": 0.5859375}, {"start": 1388.45, "end": 1391.73, "word": " of", "probability": 0.83984375}, {"start": 1391.73, "end": 1392.25, "word": " random", "probability": 0.8330078125}, {"start": 1392.25, "end": 1392.59, "word": " number.", "probability": 0.50830078125}, {"start": 1396.95, "end": 1397.67, "word": " I", "probability": 0.99169921875}, {"start": 1397.67, "end": 1398.03, "word": " have", "probability": 0.9423828125}, {"start": 1398.03, "end": 1398.67, "word": " maybe", "probability": 0.63916015625}, {"start": 1398.67, "end": 1399.05, "word": " different", "probability": 0.71484375}, {"start": 1399.05, "end": 1399.51, "word": " table", "probability": 0.890625}, {"start": 1399.51, "end": 1399.95, "word": " here.", "probability": 0.8544921875}], "temperature": 1.0}, {"id": 54, "seek": 142617, "start": 1401.23, "end": 1426.17, "text": " But the same idea to use that table. Let's see how can we choose a sample by using a random number. Now, for example, suppose in this class", "tokens": [583, 264, 912, 1558, 281, 764, 300, 3199, 13, 961, 311, 536, 577, 393, 321, 2826, 257, 6889, 538, 1228, 257, 4974, 1230, 13, 823, 11, 337, 1365, 11, 7297, 294, 341, 1508], "avg_logprob": -0.1891084563206224, "compression_ratio": 1.238938053097345, "no_speech_prob": 0.0, "words": [{"start": 1401.23, "end": 1401.61, "word": " But", "probability": 0.427001953125}, {"start": 1401.61, "end": 1401.81, "word": " the", "probability": 0.712890625}, {"start": 1401.81, "end": 1402.09, "word": " same", "probability": 0.8876953125}, {"start": 1402.09, "end": 1402.47, "word": " idea", "probability": 0.9140625}, {"start": 1402.47, "end": 1402.73, "word": " to", "probability": 0.87646484375}, {"start": 1402.73, "end": 1403.11, "word": " use", "probability": 0.87109375}, {"start": 1403.11, "end": 1404.49, "word": " that", "probability": 0.787109375}, {"start": 1404.49, "end": 1404.81, "word": " table.", "probability": 0.86767578125}, {"start": 1405.29, "end": 1405.77, "word": " Let's", "probability": 0.914306640625}, {"start": 1405.77, "end": 1406.43, "word": " see", "probability": 0.91845703125}, {"start": 1406.43, "end": 1407.39, "word": " how", "probability": 0.85205078125}, {"start": 1407.39, "end": 1407.75, "word": " can", "probability": 0.810546875}, {"start": 1407.75, "end": 1408.09, "word": " we", "probability": 0.958984375}, {"start": 1408.09, "end": 1409.27, "word": " choose", "probability": 0.89453125}, {"start": 1409.27, "end": 1409.53, "word": " a", "probability": 0.97998046875}, {"start": 1409.53, "end": 1409.87, "word": " sample", "probability": 0.70263671875}, {"start": 1409.87, "end": 1411.15, "word": " by", "probability": 0.91162109375}, {"start": 1411.15, "end": 1411.63, "word": " using", "probability": 0.92822265625}, {"start": 1411.63, "end": 1414.43, "word": " a", "probability": 0.85546875}, {"start": 1414.43, "end": 1414.65, "word": " random", "probability": 0.85693359375}, {"start": 1414.65, "end": 1414.99, "word": " number.", "probability": 0.951171875}, {"start": 1422.49, "end": 1423.25, "word": " Now,", "probability": 0.8193359375}, {"start": 1423.35, "end": 1423.49, "word": " for", "probability": 0.95166015625}, {"start": 1423.49, "end": 1423.83, "word": " example,", "probability": 0.97509765625}, {"start": 1423.97, "end": 1424.45, "word": " suppose", "probability": 0.8740234375}, {"start": 1424.45, "end": 1425.41, "word": " in", "probability": 0.86474609375}, {"start": 1425.41, "end": 1425.65, "word": " this", "probability": 0.95263671875}, {"start": 1425.65, "end": 1426.17, "word": " class", "probability": 0.96630859375}], "temperature": 1.0}, {"id": 55, "seek": 145111, "start": 1426.91, "end": 1451.11, "text": " As I mentioned, there are 52 students. So each one has a number, ID number one, two, up to 52. So the numbers are 01, 02, all the way up to 52. So the maximum digits here, two, two digits.", "tokens": [1018, 286, 2835, 11, 456, 366, 18079, 1731, 13, 407, 1184, 472, 575, 257, 1230, 11, 7348, 1230, 472, 11, 732, 11, 493, 281, 18079, 13, 407, 264, 3547, 366, 23185, 11, 37202, 11, 439, 264, 636, 493, 281, 18079, 13, 407, 264, 6674, 27011, 510, 11, 732, 11, 732, 27011, 13], "avg_logprob": -0.19059552224177234, "compression_ratio": 1.4538461538461538, "no_speech_prob": 0.0, "words": [{"start": 1426.91, "end": 1427.25, "word": " As", "probability": 0.861328125}, {"start": 1427.25, "end": 1427.37, "word": " I", "probability": 0.98828125}, {"start": 1427.37, "end": 1427.79, "word": " mentioned,", "probability": 0.80810546875}, {"start": 1428.47, "end": 1428.69, "word": " there", "probability": 0.8544921875}, {"start": 1428.69, "end": 1429.37, "word": " are", "probability": 0.94873046875}, {"start": 1429.37, "end": 1430.19, "word": " 52", "probability": 0.93115234375}, {"start": 1430.19, "end": 1431.09, "word": " students.", "probability": 0.8935546875}, {"start": 1435.11, "end": 1435.75, "word": " So", "probability": 0.876953125}, {"start": 1435.75, "end": 1436.01, "word": " each", "probability": 0.8203125}, {"start": 1436.01, "end": 1436.21, "word": " one", "probability": 0.935546875}, {"start": 1436.21, "end": 1436.47, "word": " has", "probability": 0.95166015625}, {"start": 1436.47, "end": 1436.59, "word": " a", "probability": 0.6943359375}, {"start": 1436.59, "end": 1436.91, "word": " number,", "probability": 0.91357421875}, {"start": 1437.39, "end": 1437.69, "word": " ID", "probability": 0.79052734375}, {"start": 1437.69, "end": 1437.99, "word": " number", "probability": 0.86083984375}, {"start": 1437.99, "end": 1438.21, "word": " one,", "probability": 0.27587890625}, {"start": 1438.31, "end": 1438.45, "word": " two,", "probability": 0.94091796875}, {"start": 1438.49, "end": 1438.65, "word": " up", "probability": 0.95947265625}, {"start": 1438.65, "end": 1438.75, "word": " to", "probability": 0.96923828125}, {"start": 1438.75, "end": 1439.13, "word": " 52.", "probability": 0.95263671875}, {"start": 1440.15, "end": 1440.43, "word": " So", "probability": 0.92822265625}, {"start": 1440.43, "end": 1440.61, "word": " the", "probability": 0.87060546875}, {"start": 1440.61, "end": 1441.03, "word": " numbers", "probability": 0.70849609375}, {"start": 1441.03, "end": 1442.39, "word": " are", "probability": 0.93896484375}, {"start": 1442.39, "end": 1442.77, "word": " 01,", "probability": 0.437744140625}, {"start": 1443.67, "end": 1444.13, "word": " 02,", "probability": 0.93603515625}, {"start": 1444.35, "end": 1444.55, "word": " all", "probability": 0.9453125}, {"start": 1444.55, "end": 1444.71, "word": " the", "probability": 0.9150390625}, {"start": 1444.71, "end": 1444.87, "word": " way", "probability": 0.955078125}, {"start": 1444.87, "end": 1445.11, "word": " up", "probability": 0.9580078125}, {"start": 1445.11, "end": 1445.39, "word": " to", "probability": 0.96875}, {"start": 1445.39, "end": 1446.09, "word": " 52.", "probability": 0.4951171875}, {"start": 1446.71, "end": 1447.05, "word": " So", "probability": 0.939453125}, {"start": 1447.05, "end": 1447.19, "word": " the", "probability": 0.8916015625}, {"start": 1447.19, "end": 1447.51, "word": " maximum", "probability": 0.939453125}, {"start": 1447.51, "end": 1447.85, "word": " digits", "probability": 0.955078125}, {"start": 1447.85, "end": 1448.23, "word": " here,", "probability": 0.85546875}, {"start": 1449.73, "end": 1450.01, "word": " two,", "probability": 0.888671875}, {"start": 1450.19, "end": 1450.79, "word": " two", "probability": 0.9384765625}, {"start": 1450.79, "end": 1451.11, "word": " digits.", "probability": 0.9248046875}], "temperature": 1.0}, {"id": 56, "seek": 148303, "start": 1455.15, "end": 1483.03, "text": " 1, 2, 3, up to 5, 2, 2, so you have two digits. Now suppose I decided to take a random sample of size, for example, N instead. How can I select N out of U? In this case, each one has the same chance of being selected. Now based on this table, you can pick any row or any column. Randomly.", "tokens": [502, 11, 568, 11, 805, 11, 493, 281, 1025, 11, 568, 11, 568, 11, 370, 291, 362, 732, 27011, 13, 823, 7297, 286, 3047, 281, 747, 257, 4974, 6889, 295, 2744, 11, 337, 1365, 11, 426, 2602, 13, 1012, 393, 286, 3048, 426, 484, 295, 624, 30, 682, 341, 1389, 11, 1184, 472, 575, 264, 912, 2931, 295, 885, 8209, 13, 823, 2361, 322, 341, 3199, 11, 291, 393, 1888, 604, 5386, 420, 604, 7738, 13, 37603, 356, 13], "avg_logprob": -0.2613281186670065, "compression_ratio": 1.4378109452736318, "no_speech_prob": 0.0, "words": [{"start": 1455.15, "end": 1455.71, "word": " 1,", "probability": 0.3134765625}, {"start": 1455.93, "end": 1456.03, "word": " 2,", "probability": 0.61279296875}, {"start": 1456.17, "end": 1456.35, "word": " 3,", "probability": 0.9619140625}, {"start": 1456.43, "end": 1456.59, "word": " up", "probability": 0.841796875}, {"start": 1456.59, "end": 1456.71, "word": " to", "probability": 0.90771484375}, {"start": 1456.71, "end": 1456.85, "word": " 5,", "probability": 0.7197265625}, {"start": 1456.99, "end": 1457.09, "word": " 2,", "probability": 0.57470703125}, {"start": 1457.09, "end": 1457.35, "word": " 2,", "probability": 0.50146484375}, {"start": 1457.41, "end": 1457.45, "word": " so", "probability": 0.59033203125}, {"start": 1457.45, "end": 1457.65, "word": " you", "probability": 0.6728515625}, {"start": 1457.65, "end": 1457.83, "word": " have", "probability": 0.927734375}, {"start": 1457.83, "end": 1458.01, "word": " two", "probability": 0.55322265625}, {"start": 1458.01, "end": 1458.33, "word": " digits.", "probability": 0.5888671875}, {"start": 1459.47, "end": 1459.75, "word": " Now", "probability": 0.89990234375}, {"start": 1459.75, "end": 1460.13, "word": " suppose", "probability": 0.6953125}, {"start": 1460.13, "end": 1460.71, "word": " I", "probability": 0.84228515625}, {"start": 1460.71, "end": 1461.19, "word": " decided", "probability": 0.86865234375}, {"start": 1461.19, "end": 1461.45, "word": " to", "probability": 0.97021484375}, {"start": 1461.45, "end": 1461.81, "word": " take", "probability": 0.88671875}, {"start": 1461.81, "end": 1462.15, "word": " a", "probability": 0.98095703125}, {"start": 1462.15, "end": 1462.41, "word": " random", "probability": 0.83984375}, {"start": 1462.41, "end": 1462.93, "word": " sample", "probability": 0.857421875}, {"start": 1462.93, "end": 1463.71, "word": " of", "probability": 0.9296875}, {"start": 1463.71, "end": 1464.29, "word": " size,", "probability": 0.84375}, {"start": 1464.61, "end": 1465.03, "word": " for", "probability": 0.95263671875}, {"start": 1465.03, "end": 1465.41, "word": " example,", "probability": 0.9697265625}, {"start": 1465.53, "end": 1465.65, "word": " N", "probability": 0.326171875}, {"start": 1465.65, "end": 1465.99, "word": " instead.", "probability": 0.187744140625}, {"start": 1466.93, "end": 1467.45, "word": " How", "probability": 0.94970703125}, {"start": 1467.45, "end": 1467.67, "word": " can", "probability": 0.94189453125}, {"start": 1467.67, "end": 1467.85, "word": " I", "probability": 0.99609375}, {"start": 1467.85, "end": 1468.27, "word": " select", "probability": 0.8701171875}, {"start": 1468.27, "end": 1468.55, "word": " N", "probability": 0.453369140625}, {"start": 1468.55, "end": 1468.93, "word": " out", "probability": 0.87158203125}, {"start": 1468.93, "end": 1469.11, "word": " of", "probability": 0.97509765625}, {"start": 1469.11, "end": 1469.29, "word": " U?", "probability": 0.853515625}, {"start": 1470.83, "end": 1471.09, "word": " In", "probability": 0.93603515625}, {"start": 1471.09, "end": 1471.27, "word": " this", "probability": 0.9462890625}, {"start": 1471.27, "end": 1471.53, "word": " case,", "probability": 0.9091796875}, {"start": 1471.63, "end": 1471.79, "word": " each", "probability": 0.93017578125}, {"start": 1471.79, "end": 1471.99, "word": " one", "probability": 0.9306640625}, {"start": 1471.99, "end": 1472.23, "word": " has", "probability": 0.9423828125}, {"start": 1472.23, "end": 1472.39, "word": " the", "probability": 0.865234375}, {"start": 1472.39, "end": 1472.57, "word": " same", "probability": 0.83203125}, {"start": 1472.57, "end": 1472.95, "word": " chance", "probability": 0.97412109375}, {"start": 1472.95, "end": 1473.09, "word": " of", "probability": 0.951171875}, {"start": 1473.09, "end": 1473.21, "word": " being", "probability": 0.947265625}, {"start": 1473.21, "end": 1473.69, "word": " selected.", "probability": 0.89404296875}, {"start": 1475.39, "end": 1475.75, "word": " Now", "probability": 0.93212890625}, {"start": 1475.75, "end": 1476.07, "word": " based", "probability": 0.57861328125}, {"start": 1476.07, "end": 1476.25, "word": " on", "probability": 0.94921875}, {"start": 1476.25, "end": 1476.47, "word": " this", "probability": 0.94482421875}, {"start": 1476.47, "end": 1476.79, "word": " table,", "probability": 0.84228515625}, {"start": 1477.19, "end": 1477.49, "word": " you", "probability": 0.958984375}, {"start": 1477.49, "end": 1477.83, "word": " can", "probability": 0.9462890625}, {"start": 1477.83, "end": 1478.33, "word": " pick", "probability": 0.8857421875}, {"start": 1478.33, "end": 1478.77, "word": " any", "probability": 0.91015625}, {"start": 1478.77, "end": 1479.01, "word": " row", "probability": 0.90283203125}, {"start": 1479.01, "end": 1479.99, "word": " or", "probability": 0.79052734375}, {"start": 1479.99, "end": 1480.31, "word": " any", "probability": 0.91015625}, {"start": 1480.31, "end": 1480.65, "word": " column.", "probability": 0.88623046875}, {"start": 1482.47, "end": 1483.03, "word": " Randomly.", "probability": 0.92724609375}], "temperature": 1.0}, {"id": 57, "seek": 151157, "start": 1483.99, "end": 1511.57, "text": " For example, suppose I select the first row. Now, the first student will be selected as student number to take two digits. We have to take how many digits? Because students have ID card that consists of two digits, 0102 up to 52.", "tokens": [1171, 1365, 11, 7297, 286, 3048, 264, 700, 5386, 13, 823, 11, 264, 700, 3107, 486, 312, 8209, 382, 3107, 1230, 281, 747, 732, 27011, 13, 492, 362, 281, 747, 577, 867, 27011, 30, 1436, 1731, 362, 7348, 2920, 300, 14689, 295, 732, 27011, 11, 1958, 3279, 17, 493, 281, 18079, 13], "avg_logprob": -0.20563089847564697, "compression_ratio": 1.4838709677419355, "no_speech_prob": 0.0, "words": [{"start": 1483.99, "end": 1484.23, "word": " For", "probability": 0.783203125}, {"start": 1484.23, "end": 1484.49, "word": " example,", "probability": 0.9638671875}, {"start": 1484.61, "end": 1484.87, "word": " suppose", "probability": 0.88671875}, {"start": 1484.87, "end": 1485.05, "word": " I", "probability": 0.94677734375}, {"start": 1485.05, "end": 1486.25, "word": " select", "probability": 0.84130859375}, {"start": 1486.25, "end": 1486.43, "word": " the", "probability": 0.78857421875}, {"start": 1486.43, "end": 1486.75, "word": " first", "probability": 0.83447265625}, {"start": 1486.75, "end": 1487.09, "word": " row.", "probability": 0.87451171875}, {"start": 1489.67, "end": 1489.95, "word": " Now,", "probability": 0.86083984375}, {"start": 1491.35, "end": 1491.63, "word": " the", "probability": 0.8994140625}, {"start": 1491.63, "end": 1492.21, "word": " first", "probability": 0.89208984375}, {"start": 1492.21, "end": 1493.11, "word": " student", "probability": 0.96044921875}, {"start": 1493.11, "end": 1493.81, "word": " will", "probability": 0.8037109375}, {"start": 1493.81, "end": 1493.97, "word": " be", "probability": 0.953125}, {"start": 1493.97, "end": 1494.45, "word": " selected", "probability": 0.76708984375}, {"start": 1494.45, "end": 1495.69, "word": " as", "probability": 0.23876953125}, {"start": 1495.69, "end": 1496.05, "word": " student", "probability": 0.69189453125}, {"start": 1496.05, "end": 1496.57, "word": " number", "probability": 0.91650390625}, {"start": 1496.57, "end": 1499.07, "word": " to", "probability": 0.65869140625}, {"start": 1499.07, "end": 1499.83, "word": " take", "probability": 0.89404296875}, {"start": 1499.83, "end": 1500.05, "word": " two", "probability": 0.8466796875}, {"start": 1500.05, "end": 1500.45, "word": " digits.", "probability": 0.95166015625}, {"start": 1501.89, "end": 1502.61, "word": " We", "probability": 0.939453125}, {"start": 1502.61, "end": 1502.83, "word": " have", "probability": 0.94580078125}, {"start": 1502.83, "end": 1502.97, "word": " to", "probability": 0.96875}, {"start": 1502.97, "end": 1503.25, "word": " take", "probability": 0.86669921875}, {"start": 1503.25, "end": 1503.47, "word": " how", "probability": 0.91943359375}, {"start": 1503.47, "end": 1503.65, "word": " many", "probability": 0.900390625}, {"start": 1503.65, "end": 1504.07, "word": " digits?", "probability": 0.958984375}, {"start": 1504.59, "end": 1505.05, "word": " Because", "probability": 0.88671875}, {"start": 1505.05, "end": 1505.49, "word": " students", "probability": 0.7490234375}, {"start": 1505.49, "end": 1505.85, "word": " have", "probability": 0.51025390625}, {"start": 1505.85, "end": 1506.59, "word": " ID", "probability": 0.6337890625}, {"start": 1506.59, "end": 1507.23, "word": " card", "probability": 0.732421875}, {"start": 1507.23, "end": 1508.77, "word": " that", "probability": 0.88818359375}, {"start": 1508.77, "end": 1509.21, "word": " consists", "probability": 0.8125}, {"start": 1509.21, "end": 1509.35, "word": " of", "probability": 0.9658203125}, {"start": 1509.35, "end": 1509.47, "word": " two", "probability": 0.890625}, {"start": 1509.47, "end": 1509.69, "word": " digits,", "probability": 0.9599609375}, {"start": 1509.85, "end": 1510.71, "word": " 0102", "probability": 0.810302734375}, {"start": 1510.71, "end": 1510.93, "word": " up", "probability": 0.93505859375}, {"start": 1510.93, "end": 1511.11, "word": " to", "probability": 0.9482421875}, {"start": 1511.11, "end": 1511.57, "word": " 52.", "probability": 0.966796875}], "temperature": 1.0}, {"id": 58, "seek": 152777, "start": 1512.89, "end": 1527.77, "text": " So, what's the first number students will be selected based on this table? Forget about the line 101. Start with this number.", "tokens": [407, 11, 437, 311, 264, 700, 1230, 1731, 486, 312, 8209, 2361, 322, 341, 3199, 30, 18675, 466, 264, 1622, 21055, 13, 6481, 365, 341, 1230, 13], "avg_logprob": -0.28599330463579725, "compression_ratio": 1.1886792452830188, "no_speech_prob": 0.0, "words": [{"start": 1512.89, "end": 1513.33, "word": " So,", "probability": 0.463134765625}, {"start": 1513.65, "end": 1513.93, "word": " what's", "probability": 0.856201171875}, {"start": 1513.93, "end": 1514.15, "word": " the", "probability": 0.92333984375}, {"start": 1514.15, "end": 1514.51, "word": " first", "probability": 0.875}, {"start": 1514.51, "end": 1514.87, "word": " number", "probability": 0.943359375}, {"start": 1514.87, "end": 1515.37, "word": " students", "probability": 0.3818359375}, {"start": 1515.37, "end": 1515.59, "word": " will", "probability": 0.85205078125}, {"start": 1515.59, "end": 1515.73, "word": " be", "probability": 0.94091796875}, {"start": 1515.73, "end": 1516.15, "word": " selected", "probability": 0.87939453125}, {"start": 1516.15, "end": 1517.01, "word": " based", "probability": 0.458740234375}, {"start": 1517.01, "end": 1517.15, "word": " on", "probability": 0.9462890625}, {"start": 1517.15, "end": 1517.35, "word": " this", "probability": 0.6015625}, {"start": 1517.35, "end": 1517.69, "word": " table?", "probability": 0.87158203125}, {"start": 1520.17, "end": 1520.87, "word": " Forget", "probability": 0.29736328125}, {"start": 1520.87, "end": 1521.25, "word": " about", "probability": 0.89453125}, {"start": 1521.25, "end": 1521.45, "word": " the", "probability": 0.89208984375}, {"start": 1521.45, "end": 1521.67, "word": " line", "probability": 0.92919921875}, {"start": 1521.67, "end": 1522.13, "word": " 101.", "probability": 0.73486328125}, {"start": 1526.27, "end": 1526.97, "word": " Start", "probability": 0.92138671875}, {"start": 1526.97, "end": 1527.25, "word": " with", "probability": 0.8994140625}, {"start": 1527.25, "end": 1527.43, "word": " this", "probability": 0.94775390625}, {"start": 1527.43, "end": 1527.77, "word": " number.", "probability": 0.9052734375}], "temperature": 1.0}, {"id": 59, "seek": 156696, "start": 1542.1, "end": 1566.96, "text": " So the first one, 19. The second, 22. The third student, 19, 22. The third, 9. The third, 9. I'm taking the first row. Then fifth.", "tokens": [407, 264, 700, 472, 11, 1294, 13, 440, 1150, 11, 5853, 13, 440, 2636, 3107, 11, 1294, 11, 5853, 13, 440, 2636, 11, 1722, 13, 440, 2636, 11, 1722, 13, 286, 478, 1940, 264, 700, 5386, 13, 1396, 9266, 13], "avg_logprob": -0.2549542741077702, "compression_ratio": 1.4395604395604396, "no_speech_prob": 0.0, "words": [{"start": 1542.1, "end": 1542.32, "word": " So", "probability": 0.59521484375}, {"start": 1542.32, "end": 1542.5, "word": " the", "probability": 0.6455078125}, {"start": 1542.5, "end": 1542.78, "word": " first", "probability": 0.869140625}, {"start": 1542.78, "end": 1543.08, "word": " one,", "probability": 0.93994140625}, {"start": 1544.98, "end": 1545.52, "word": " 19.", "probability": 0.66650390625}, {"start": 1546.64, "end": 1547.32, "word": " The", "probability": 0.7900390625}, {"start": 1547.32, "end": 1547.78, "word": " second,", "probability": 0.89453125}, {"start": 1548.02, "end": 1548.54, "word": " 22.", "probability": 0.96923828125}, {"start": 1549.92, "end": 1550.6, "word": " The", "probability": 0.82275390625}, {"start": 1550.6, "end": 1550.9, "word": " third", "probability": 0.93701171875}, {"start": 1550.9, "end": 1551.36, "word": " student,", "probability": 0.9560546875}, {"start": 1554.96, "end": 1556.52, "word": " 19,", "probability": 0.828125}, {"start": 1556.74, "end": 1557.28, "word": " 22.", "probability": 0.9638671875}, {"start": 1558.64, "end": 1558.78, "word": " The", "probability": 0.373779296875}, {"start": 1558.78, "end": 1559.02, "word": " third,", "probability": 0.8603515625}, {"start": 1559.08, "end": 1559.38, "word": " 9.", "probability": 0.69677734375}, {"start": 1561.72, "end": 1561.92, "word": " The", "probability": 0.61474609375}, {"start": 1561.92, "end": 1562.12, "word": " third,", "probability": 0.908203125}, {"start": 1562.22, "end": 1562.48, "word": " 9.", "probability": 0.96923828125}, {"start": 1563.28, "end": 1563.54, "word": " I'm", "probability": 0.876953125}, {"start": 1563.54, "end": 1563.8, "word": " taking", "probability": 0.9052734375}, {"start": 1563.8, "end": 1564.0, "word": " the", "probability": 0.92333984375}, {"start": 1564.0, "end": 1564.34, "word": " first", "probability": 0.8828125}, {"start": 1564.34, "end": 1564.7, "word": " row.", "probability": 0.88623046875}, {"start": 1565.72, "end": 1566.28, "word": " Then", "probability": 0.84375}, {"start": 1566.28, "end": 1566.96, "word": " fifth.", "probability": 0.61962890625}], "temperature": 1.0}, {"id": 60, "seek": 157871, "start": 1567.65, "end": 1578.71, "text": " 34 student number 05", "tokens": [12790, 3107, 1230, 1958, 20], "avg_logprob": -0.8776041865348816, "compression_ratio": 0.7241379310344828, "no_speech_prob": 0.0, "words": [{"start": 1567.65, "end": 1568.73, "word": " 34", "probability": 0.250732421875}, {"start": 1568.73, "end": 1576.51, "word": " student", "probability": 0.1519775390625}, {"start": 1576.51, "end": 1577.19, "word": " number", "probability": 0.87451171875}, {"start": 1577.19, "end": 1578.71, "word": " 05", "probability": 0.7099609375}], "temperature": 1.0}, {"id": 61, "seek": 160650, "start": 1584.34, "end": 1606.5, "text": " Now, what's about seventy-five? Seventy-five is not selected because the maximum I have is fifty-two. Next. Sixty-two is not selected. Eighty-seven.", "tokens": [823, 11, 437, 311, 466, 25662, 12, 18621, 30, 1100, 2475, 88, 12, 18621, 307, 406, 8209, 570, 264, 6674, 286, 362, 307, 13442, 12, 20534, 13, 3087, 13, 47374, 88, 12, 20534, 307, 406, 8209, 13, 462, 12452, 12, 44476, 13], "avg_logprob": -0.269803783228231, "compression_ratio": 1.3303571428571428, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 1584.34, "end": 1585.22, "word": " Now,", "probability": 0.378662109375}, {"start": 1585.82, "end": 1586.08, "word": " what's", "probability": 0.648681640625}, {"start": 1586.08, "end": 1586.3, "word": " about", "probability": 0.91455078125}, {"start": 1586.3, "end": 1586.62, "word": " seventy", "probability": 0.33251953125}, {"start": 1586.62, "end": 1587.02, "word": "-five?", "probability": 0.831787109375}, {"start": 1588.24, "end": 1589.12, "word": " Seventy", "probability": 0.8992513020833334}, {"start": 1589.12, "end": 1589.34, "word": "-five", "probability": 0.957763671875}, {"start": 1589.34, "end": 1589.5, "word": " is", "probability": 0.92236328125}, {"start": 1589.5, "end": 1589.66, "word": " not", "probability": 0.9560546875}, {"start": 1589.66, "end": 1590.12, "word": " selected", "probability": 0.896484375}, {"start": 1590.12, "end": 1591.7, "word": " because", "probability": 0.44580078125}, {"start": 1591.7, "end": 1592.52, "word": " the", "probability": 0.89794921875}, {"start": 1592.52, "end": 1592.86, "word": " maximum", "probability": 0.93798828125}, {"start": 1592.86, "end": 1593.06, "word": " I", "probability": 0.92724609375}, {"start": 1593.06, "end": 1593.28, "word": " have", "probability": 0.95068359375}, {"start": 1593.28, "end": 1593.42, "word": " is", "probability": 0.9462890625}, {"start": 1593.42, "end": 1593.66, "word": " fifty", "probability": 0.7587890625}, {"start": 1593.66, "end": 1593.96, "word": "-two.", "probability": 0.941162109375}, {"start": 1595.34, "end": 1595.76, "word": " Next.", "probability": 0.8251953125}, {"start": 1597.96, "end": 1598.84, "word": " Sixty", "probability": 0.7188720703125}, {"start": 1598.84, "end": 1599.92, "word": "-two", "probability": 0.95068359375}, {"start": 1599.92, "end": 1600.58, "word": " is", "probability": 0.2413330078125}, {"start": 1600.58, "end": 1603.26, "word": " not", "probability": 0.95751953125}, {"start": 1603.26, "end": 1603.76, "word": " selected.", "probability": 0.8837890625}, {"start": 1605.3, "end": 1606.18, "word": " Eighty", "probability": 0.6956787109375}, {"start": 1606.18, "end": 1606.5, "word": "-seven.", "probability": 0.9599609375}], "temperature": 1.0}, {"id": 62, "seek": 163654, "start": 1608.98, "end": 1636.54, "text": " It's not selected. 13. 13. It's okay. Next. 96. 96. Not selected. 14. 14 is okay. 91. 91. 91. Not selected. 95. 91. 45. 85. 31. 31. So that's 10.", "tokens": [467, 311, 406, 8209, 13, 3705, 13, 3705, 13, 467, 311, 1392, 13, 3087, 13, 24124, 13, 24124, 13, 1726, 8209, 13, 3499, 13, 3499, 307, 1392, 13, 31064, 13, 31064, 13, 31064, 13, 1726, 8209, 13, 13420, 13, 31064, 13, 6905, 13, 14695, 13, 10353, 13, 10353, 13, 407, 300, 311, 1266, 13], "avg_logprob": -0.3048295530405912, "compression_ratio": 1.6781609195402298, "no_speech_prob": 0.0, "words": [{"start": 1608.98, "end": 1609.76, "word": " It's", "probability": 0.4765625}, {"start": 1609.76, "end": 1610.08, "word": " not", "probability": 0.88427734375}, {"start": 1610.08, "end": 1610.74, "word": " selected.", "probability": 0.367431640625}, {"start": 1611.1, "end": 1611.62, "word": " 13.", "probability": 0.54541015625}, {"start": 1611.74, "end": 1611.96, "word": " 13.", "probability": 0.52783203125}, {"start": 1612.52, "end": 1612.76, "word": " It's", "probability": 0.817626953125}, {"start": 1612.76, "end": 1613.0, "word": " okay.", "probability": 0.685546875}, {"start": 1613.42, "end": 1614.2, "word": " Next.", "probability": 0.92236328125}, {"start": 1614.38, "end": 1615.14, "word": " 96.", "probability": 0.90087890625}, {"start": 1615.18, "end": 1615.84, "word": " 96.", "probability": 0.9501953125}, {"start": 1617.36, "end": 1617.58, "word": " Not", "probability": 0.744140625}, {"start": 1617.58, "end": 1618.0, "word": " selected.", "probability": 0.8564453125}, {"start": 1618.18, "end": 1618.38, "word": " 14.", "probability": 0.147216796875}, {"start": 1618.86, "end": 1619.38, "word": " 14", "probability": 0.9365234375}, {"start": 1619.38, "end": 1619.58, "word": " is", "probability": 0.85546875}, {"start": 1619.58, "end": 1619.88, "word": " okay.", "probability": 0.9228515625}, {"start": 1620.9, "end": 1621.74, "word": " 91.", "probability": 0.9560546875}, {"start": 1622.14, "end": 1622.68, "word": " 91.", "probability": 0.970703125}, {"start": 1622.9, "end": 1623.56, "word": " 91.", "probability": 0.464599609375}, {"start": 1623.9, "end": 1624.42, "word": " Not", "probability": 0.67236328125}, {"start": 1624.42, "end": 1624.76, "word": " selected.", "probability": 0.884765625}, {"start": 1624.9, "end": 1625.26, "word": " 95.", "probability": 0.421630859375}, {"start": 1626.24, "end": 1627.08, "word": " 91.", "probability": 0.90625}, {"start": 1627.38, "end": 1627.86, "word": " 45.", "probability": 0.494873046875}, {"start": 1628.36, "end": 1629.12, "word": " 85.", "probability": 0.351318359375}, {"start": 1630.0, "end": 1630.58, "word": " 31.", "probability": 0.87548828125}, {"start": 1631.6, "end": 1632.08, "word": " 31.", "probability": 0.9072265625}, {"start": 1635.24, "end": 1636.08, "word": " So", "probability": 0.84716796875}, {"start": 1636.08, "end": 1636.36, "word": " that's", "probability": 0.91796875}, {"start": 1636.36, "end": 1636.54, "word": " 10.", "probability": 0.79248046875}], "temperature": 1.0}, {"id": 63, "seek": 166434, "start": 1638.12, "end": 1664.34, "text": " So students numbers are 19, 22, 39, 50, 34, 5, 13, 4, 25 and take one will be selected. So these are the ID numbers will be selected in order to get a sample of 10. You exclude that one. If the number is repeated, you have to exclude that one.", "tokens": [407, 1731, 3547, 366, 1294, 11, 5853, 11, 15238, 11, 2625, 11, 12790, 11, 1025, 11, 3705, 11, 1017, 11, 3552, 293, 747, 472, 486, 312, 8209, 13, 407, 613, 366, 264, 7348, 3547, 486, 312, 8209, 294, 1668, 281, 483, 257, 6889, 295, 1266, 13, 509, 33536, 300, 472, 13, 759, 264, 1230, 307, 10477, 11, 291, 362, 281, 33536, 300, 472, 13], "avg_logprob": -0.15829326923076922, "compression_ratio": 1.525, "no_speech_prob": 0.0, "words": [{"start": 1638.12, "end": 1638.46, "word": " So", "probability": 0.80322265625}, {"start": 1638.46, "end": 1639.02, "word": " students", "probability": 0.65771484375}, {"start": 1639.02, "end": 1639.56, "word": " numbers", "probability": 0.681640625}, {"start": 1639.56, "end": 1640.24, "word": " are", "probability": 0.916015625}, {"start": 1640.24, "end": 1640.96, "word": " 19,", "probability": 0.87255859375}, {"start": 1641.06, "end": 1641.44, "word": " 22,", "probability": 0.91357421875}, {"start": 1641.6, "end": 1641.9, "word": " 39,", "probability": 0.951171875}, {"start": 1642.14, "end": 1642.48, "word": " 50,", "probability": 0.9169921875}, {"start": 1642.74, "end": 1643.2, "word": " 34,", "probability": 0.93896484375}, {"start": 1643.5, "end": 1643.84, "word": " 5,", "probability": 0.9677734375}, {"start": 1644.16, "end": 1645.12, "word": " 13,", "probability": 0.93994140625}, {"start": 1645.3, "end": 1645.5, "word": " 4,", "probability": 0.4521484375}, {"start": 1645.58, "end": 1646.02, "word": " 25", "probability": 0.95751953125}, {"start": 1646.02, "end": 1646.28, "word": " and", "probability": 0.60595703125}, {"start": 1646.28, "end": 1646.46, "word": " take", "probability": 0.68603515625}, {"start": 1646.46, "end": 1646.68, "word": " one", "probability": 0.74267578125}, {"start": 1646.68, "end": 1646.84, "word": " will", "probability": 0.80859375}, {"start": 1646.84, "end": 1646.98, "word": " be", "probability": 0.94873046875}, {"start": 1646.98, "end": 1647.4, "word": " selected.", "probability": 0.8916015625}, {"start": 1648.16, "end": 1648.32, "word": " So", "probability": 0.8994140625}, {"start": 1648.32, "end": 1648.56, "word": " these", "probability": 0.82080078125}, {"start": 1648.56, "end": 1648.88, "word": " are", "probability": 0.9384765625}, {"start": 1648.88, "end": 1649.28, "word": " the", "probability": 0.90966796875}, {"start": 1649.28, "end": 1649.84, "word": " ID", "probability": 0.87841796875}, {"start": 1649.84, "end": 1650.5, "word": " numbers", "probability": 0.869140625}, {"start": 1650.5, "end": 1650.78, "word": " will", "probability": 0.8154296875}, {"start": 1650.78, "end": 1650.94, "word": " be", "probability": 0.95654296875}, {"start": 1650.94, "end": 1651.38, "word": " selected", "probability": 0.88720703125}, {"start": 1651.38, "end": 1651.68, "word": " in", "probability": 0.93408203125}, {"start": 1651.68, "end": 1651.88, "word": " order", "probability": 0.93017578125}, {"start": 1651.88, "end": 1652.16, "word": " to", "probability": 0.970703125}, {"start": 1652.16, "end": 1652.42, "word": " get", "probability": 0.94091796875}, {"start": 1652.42, "end": 1653.08, "word": " a", "probability": 0.974609375}, {"start": 1653.08, "end": 1653.36, "word": " sample", "probability": 0.9150390625}, {"start": 1653.36, "end": 1653.58, "word": " of", "probability": 0.953125}, {"start": 1653.58, "end": 1653.78, "word": " 10.", "probability": 0.61279296875}, {"start": 1654.92, "end": 1655.48, "word": " You", "probability": 0.33447265625}, {"start": 1655.48, "end": 1660.5, "word": " exclude", "probability": 0.329833984375}, {"start": 1660.5, "end": 1660.82, "word": " that", "probability": 0.92529296875}, {"start": 1660.82, "end": 1661.0, "word": " one.", "probability": 0.91552734375}, {"start": 1661.58, "end": 1661.8, "word": " If", "probability": 0.94970703125}, {"start": 1661.8, "end": 1661.96, "word": " the", "probability": 0.90283203125}, {"start": 1661.96, "end": 1662.2, "word": " number", "probability": 0.9140625}, {"start": 1662.2, "end": 1662.38, "word": " is", "probability": 0.93310546875}, {"start": 1662.38, "end": 1662.7, "word": " repeated,", "probability": 0.939453125}, {"start": 1662.94, "end": 1663.06, "word": " you", "probability": 0.958984375}, {"start": 1663.06, "end": 1663.26, "word": " have", "probability": 0.93701171875}, {"start": 1663.26, "end": 1663.44, "word": " to", "probability": 0.9716796875}, {"start": 1663.44, "end": 1663.88, "word": " exclude", "probability": 0.86572265625}, {"start": 1663.88, "end": 1664.14, "word": " that", "probability": 0.9130859375}, {"start": 1664.14, "end": 1664.34, "word": " one.", "probability": 0.89013671875}], "temperature": 1.0}, {"id": 64, "seek": 169913, "start": 1671.37, "end": 1699.13, "text": " is repeated, then excluded. So the returned number must be excluded from the sample. Let's imagine that we have not 52 students. We have 520 students.", "tokens": [307, 10477, 11, 550, 29486, 13, 407, 264, 8752, 1230, 1633, 312, 29486, 490, 264, 6889, 13, 961, 311, 3811, 300, 321, 362, 406, 18079, 1731, 13, 492, 362, 1025, 2009, 1731, 13], "avg_logprob": -0.29204963585909677, "compression_ratio": 1.3245614035087718, "no_speech_prob": 0.0, "words": [{"start": 1671.37, "end": 1671.71, "word": " is", "probability": 0.29248046875}, {"start": 1671.71, "end": 1672.13, "word": " repeated,", "probability": 0.82177734375}, {"start": 1672.93, "end": 1675.41, "word": " then", "probability": 0.828125}, {"start": 1675.41, "end": 1677.27, "word": " excluded.", "probability": 0.63037109375}, {"start": 1682.37, "end": 1683.49, "word": " So", "probability": 0.89306640625}, {"start": 1683.49, "end": 1683.89, "word": " the", "probability": 0.259033203125}, {"start": 1683.89, "end": 1684.27, "word": " returned", "probability": 0.3408203125}, {"start": 1684.27, "end": 1684.81, "word": " number", "probability": 0.92431640625}, {"start": 1684.81, "end": 1685.51, "word": " must", "probability": 0.84765625}, {"start": 1685.51, "end": 1685.75, "word": " be", "probability": 0.9501953125}, {"start": 1685.75, "end": 1686.21, "word": " excluded", "probability": 0.87744140625}, {"start": 1686.21, "end": 1687.01, "word": " from", "probability": 0.88916015625}, {"start": 1687.01, "end": 1687.37, "word": " the", "probability": 0.8876953125}, {"start": 1687.37, "end": 1687.83, "word": " sample.", "probability": 0.92041015625}, {"start": 1689.61, "end": 1690.73, "word": " Let's", "probability": 0.670166015625}, {"start": 1690.73, "end": 1691.11, "word": " imagine", "probability": 0.9052734375}, {"start": 1691.11, "end": 1691.61, "word": " that", "probability": 0.9248046875}, {"start": 1691.61, "end": 1691.95, "word": " we", "probability": 0.95751953125}, {"start": 1691.95, "end": 1692.51, "word": " have", "probability": 0.94384765625}, {"start": 1692.51, "end": 1693.61, "word": " not", "probability": 0.81494140625}, {"start": 1693.61, "end": 1694.03, "word": " 52", "probability": 0.94921875}, {"start": 1694.03, "end": 1694.85, "word": " students.", "probability": 0.97607421875}, {"start": 1695.67, "end": 1696.79, "word": " We", "probability": 0.95654296875}, {"start": 1696.79, "end": 1697.25, "word": " have", "probability": 0.94970703125}, {"start": 1697.25, "end": 1698.51, "word": " 520", "probability": 0.882080078125}, {"start": 1698.51, "end": 1699.13, "word": " students.", "probability": 0.97412109375}], "temperature": 1.0}, {"id": 65, "seek": 172760, "start": 1705.74, "end": 1727.6, "text": " Now, I have large number, 52, 520 instead of 52 students. And again, my goal is to select just 10 students out of 120. So each one has ID with number one, two, all the way up to 520. So the first one, 001.", "tokens": [823, 11, 286, 362, 2416, 1230, 11, 18079, 11, 1025, 2009, 2602, 295, 18079, 1731, 13, 400, 797, 11, 452, 3387, 307, 281, 3048, 445, 1266, 1731, 484, 295, 10411, 13, 407, 1184, 472, 575, 7348, 365, 1230, 472, 11, 732, 11, 439, 264, 636, 493, 281, 1025, 2009, 13, 407, 264, 700, 472, 11, 7143, 16, 13], "avg_logprob": -0.13201800948482448, "compression_ratio": 1.34640522875817, "no_speech_prob": 0.0, "words": [{"start": 1705.7400000000002, "end": 1706.3400000000001, "word": " Now,", "probability": 0.82470703125}, {"start": 1706.3400000000001, "end": 1706.94, "word": " I", "probability": 0.9951171875}, {"start": 1706.94, "end": 1707.28, "word": " have", "probability": 0.94970703125}, {"start": 1707.28, "end": 1707.76, "word": " large", "probability": 0.8349609375}, {"start": 1707.76, "end": 1708.14, "word": " number,", "probability": 0.916015625}, {"start": 1708.88, "end": 1709.28, "word": " 52,", "probability": 0.93310546875}, {"start": 1709.68, "end": 1710.86, "word": " 520", "probability": 0.816162109375}, {"start": 1710.86, "end": 1711.96, "word": " instead", "probability": 0.67333984375}, {"start": 1711.96, "end": 1712.16, "word": " of", "probability": 0.9677734375}, {"start": 1712.16, "end": 1712.52, "word": " 52", "probability": 0.966796875}, {"start": 1712.52, "end": 1713.1, "word": " students.", "probability": 0.9736328125}, {"start": 1713.68, "end": 1713.9, "word": " And", "probability": 0.94189453125}, {"start": 1713.9, "end": 1714.16, "word": " again,", "probability": 0.9072265625}, {"start": 1714.24, "end": 1714.38, "word": " my", "probability": 0.96826171875}, {"start": 1714.38, "end": 1714.6, "word": " goal", "probability": 0.97509765625}, {"start": 1714.6, "end": 1714.8, "word": " is", "probability": 0.94140625}, {"start": 1714.8, "end": 1714.96, "word": " to", "probability": 0.69921875}, {"start": 1714.96, "end": 1715.3, "word": " select", "probability": 0.8525390625}, {"start": 1715.3, "end": 1715.8, "word": " just", "probability": 0.91552734375}, {"start": 1715.8, "end": 1716.08, "word": " 10", "probability": 0.86962890625}, {"start": 1716.08, "end": 1716.56, "word": " students", "probability": 0.970703125}, {"start": 1716.56, "end": 1716.86, "word": " out", "probability": 0.87353515625}, {"start": 1716.86, "end": 1717.14, "word": " of", "probability": 0.9736328125}, {"start": 1717.14, "end": 1717.9, "word": " 120.", "probability": 0.88720703125}, {"start": 1719.02, "end": 1719.22, "word": " So", "probability": 0.91796875}, {"start": 1719.22, "end": 1720.26, "word": " each", "probability": 0.7734375}, {"start": 1720.26, "end": 1720.5, "word": " one", "probability": 0.9365234375}, {"start": 1720.5, "end": 1720.92, "word": " has", "probability": 0.95068359375}, {"start": 1720.92, "end": 1721.74, "word": " ID", "probability": 0.9365234375}, {"start": 1721.74, "end": 1722.22, "word": " with", "probability": 0.87744140625}, {"start": 1722.22, "end": 1722.5, "word": " number", "probability": 0.91943359375}, {"start": 1722.5, "end": 1722.78, "word": " one,", "probability": 0.85791015625}, {"start": 1722.92, "end": 1723.08, "word": " two,", "probability": 0.9443359375}, {"start": 1723.22, "end": 1723.42, "word": " all", "probability": 0.9541015625}, {"start": 1723.42, "end": 1723.58, "word": " the", "probability": 0.91796875}, {"start": 1723.58, "end": 1723.72, "word": " way", "probability": 0.95361328125}, {"start": 1723.72, "end": 1723.96, "word": " up", "probability": 0.9599609375}, {"start": 1723.96, "end": 1724.3, "word": " to", "probability": 0.96728515625}, {"start": 1724.3, "end": 1725.32, "word": " 520.", "probability": 0.865234375}, {"start": 1725.88, "end": 1726.06, "word": " So", "probability": 0.955078125}, {"start": 1726.06, "end": 1726.22, "word": " the", "probability": 0.845703125}, {"start": 1726.22, "end": 1726.46, "word": " first", "probability": 0.888671875}, {"start": 1726.46, "end": 1726.76, "word": " one,", "probability": 0.931640625}, {"start": 1726.98, "end": 1727.6, "word": " 001.", "probability": 0.959228515625}], "temperature": 1.0}, {"id": 66, "seek": 175046, "start": 1729.02, "end": 1750.46, "text": " 002 all the way up to 520 now in this case you have to choose three digits start for example you don't have actually to start with row number one maybe column number one or row number two whatever is fine so let's start with row number two for example row number 76", "tokens": [7143, 17, 439, 264, 636, 493, 281, 1025, 2009, 586, 294, 341, 1389, 291, 362, 281, 2826, 1045, 27011, 722, 337, 1365, 291, 500, 380, 362, 767, 281, 722, 365, 5386, 1230, 472, 1310, 7738, 1230, 472, 420, 5386, 1230, 732, 2035, 307, 2489, 370, 718, 311, 722, 365, 5386, 1230, 732, 337, 1365, 5386, 1230, 24733], "avg_logprob": -0.21322737736948605, "compression_ratio": 1.7272727272727273, "no_speech_prob": 0.0, "words": [{"start": 1729.02, "end": 1729.62, "word": " 002", "probability": 0.6107177734375}, {"start": 1729.62, "end": 1729.84, "word": " all", "probability": 0.6318359375}, {"start": 1729.84, "end": 1730.02, "word": " the", "probability": 0.9150390625}, {"start": 1730.02, "end": 1730.16, "word": " way", "probability": 0.95556640625}, {"start": 1730.16, "end": 1730.38, "word": " up", "probability": 0.94482421875}, {"start": 1730.38, "end": 1730.64, "word": " to", "probability": 0.94189453125}, {"start": 1730.64, "end": 1731.86, "word": " 520", "probability": 0.835693359375}, {"start": 1731.86, "end": 1733.04, "word": " now", "probability": 0.2139892578125}, {"start": 1733.04, "end": 1733.16, "word": " in", "probability": 0.76708984375}, {"start": 1733.16, "end": 1733.32, "word": " this", "probability": 0.9482421875}, {"start": 1733.32, "end": 1733.52, "word": " case", "probability": 0.90673828125}, {"start": 1733.52, "end": 1733.64, "word": " you", "probability": 0.73046875}, {"start": 1733.64, "end": 1733.76, "word": " have", "probability": 0.90673828125}, {"start": 1733.76, "end": 1733.88, "word": " to", "probability": 0.9609375}, {"start": 1733.88, "end": 1734.14, "word": " choose", "probability": 0.88232421875}, {"start": 1734.14, "end": 1734.86, "word": " three", "probability": 0.6953125}, {"start": 1734.86, "end": 1735.36, "word": " digits", "probability": 0.9228515625}, {"start": 1735.36, "end": 1736.48, "word": " start", "probability": 0.58154296875}, {"start": 1736.48, "end": 1736.68, "word": " for", "probability": 0.9111328125}, {"start": 1736.68, "end": 1737.1, "word": " example", "probability": 0.9716796875}, {"start": 1737.1, "end": 1738.36, "word": " you", "probability": 0.8251953125}, {"start": 1738.36, "end": 1738.6, "word": " don't", "probability": 0.95751953125}, {"start": 1738.6, "end": 1738.98, "word": " have", "probability": 0.94091796875}, {"start": 1738.98, "end": 1739.36, "word": " actually", "probability": 0.5380859375}, {"start": 1739.36, "end": 1739.56, "word": " to", "probability": 0.671875}, {"start": 1739.56, "end": 1739.76, "word": " start", "probability": 0.9189453125}, {"start": 1739.76, "end": 1740.06, "word": " with", "probability": 0.890625}, {"start": 1740.06, "end": 1740.7, "word": " row", "probability": 0.8359375}, {"start": 1740.7, "end": 1740.9, "word": " number", "probability": 0.93701171875}, {"start": 1740.9, "end": 1741.08, "word": " one", "probability": 0.787109375}, {"start": 1741.08, "end": 1741.3, "word": " maybe", "probability": 0.82177734375}, {"start": 1741.3, "end": 1741.7, "word": " column", "probability": 0.8037109375}, {"start": 1741.7, "end": 1741.94, "word": " number", "probability": 0.9228515625}, {"start": 1741.94, "end": 1742.22, "word": " one", "probability": 0.8828125}, {"start": 1742.22, "end": 1742.8, "word": " or", "probability": 0.90673828125}, {"start": 1742.8, "end": 1743.06, "word": " row", "probability": 0.85302734375}, {"start": 1743.06, "end": 1743.28, "word": " number", "probability": 0.94970703125}, {"start": 1743.28, "end": 1743.58, "word": " two", "probability": 0.916015625}, {"start": 1743.58, "end": 1744.12, "word": " whatever", "probability": 0.91552734375}, {"start": 1744.12, "end": 1744.46, "word": " is", "probability": 0.94873046875}, {"start": 1744.46, "end": 1744.78, "word": " fine", "probability": 0.935546875}, {"start": 1744.78, "end": 1745.36, "word": " so", "probability": 0.8388671875}, {"start": 1745.36, "end": 1745.62, "word": " let's", "probability": 0.968017578125}, {"start": 1745.62, "end": 1745.92, "word": " start", "probability": 0.91796875}, {"start": 1745.92, "end": 1746.14, "word": " with", "probability": 0.89990234375}, {"start": 1746.14, "end": 1746.94, "word": " row", "probability": 0.88525390625}, {"start": 1746.94, "end": 1747.2, "word": " number", "probability": 0.94482421875}, {"start": 1747.2, "end": 1747.5, "word": " two", "probability": 0.89990234375}, {"start": 1747.5, "end": 1748.64, "word": " for", "probability": 0.9091796875}, {"start": 1748.64, "end": 1749.02, "word": " example", "probability": 0.96923828125}, {"start": 1749.02, "end": 1749.5, "word": " row", "probability": 0.78759765625}, {"start": 1749.5, "end": 1749.84, "word": " number", "probability": 0.94384765625}, {"start": 1749.84, "end": 1750.46, "word": " 76", "probability": 0.186279296875}], "temperature": 1.0}, {"id": 67, "seek": 178231, "start": 1754.87, "end": 1782.31, "text": " It's not selected. Because the maximum number I have is 5 to 20. So, 746 shouldn't be selected. The next one, 764. Again, it's not selected. 764, 715. Not selected. Next one is 715.", "tokens": [467, 311, 406, 8209, 13, 1436, 264, 6674, 1230, 286, 362, 307, 1025, 281, 945, 13, 407, 11, 1614, 16169, 4659, 380, 312, 8209, 13, 440, 958, 472, 11, 1614, 19395, 13, 3764, 11, 309, 311, 406, 8209, 13, 1614, 19395, 11, 1614, 5211, 13, 1726, 8209, 13, 3087, 472, 307, 1614, 5211, 13], "avg_logprob": -0.2859375065023249, "compression_ratio": 1.4, "no_speech_prob": 0.0, "words": [{"start": 1754.8700000000001, "end": 1755.73, "word": " It's", "probability": 0.42987060546875}, {"start": 1755.73, "end": 1755.87, "word": " not", "probability": 0.93408203125}, {"start": 1755.87, "end": 1756.31, "word": " selected.", "probability": 0.8779296875}, {"start": 1757.39, "end": 1758.11, "word": " Because", "probability": 0.86181640625}, {"start": 1758.11, "end": 1758.35, "word": " the", "probability": 0.88330078125}, {"start": 1758.35, "end": 1758.73, "word": " maximum", "probability": 0.9365234375}, {"start": 1758.73, "end": 1759.27, "word": " number", "probability": 0.94921875}, {"start": 1759.27, "end": 1759.95, "word": " I", "probability": 0.9296875}, {"start": 1759.95, "end": 1760.41, "word": " have", "probability": 0.94873046875}, {"start": 1760.41, "end": 1760.85, "word": " is", "probability": 0.896484375}, {"start": 1760.85, "end": 1761.03, "word": " 5", "probability": 0.8662109375}, {"start": 1761.03, "end": 1761.17, "word": " to", "probability": 0.49853515625}, {"start": 1761.17, "end": 1761.47, "word": " 20.", "probability": 0.9560546875}, {"start": 1762.27, "end": 1762.49, "word": " So,", "probability": 0.85986328125}, {"start": 1762.99, "end": 1763.39, "word": " 746", "probability": 0.744384765625}, {"start": 1763.39, "end": 1764.53, "word": " shouldn't", "probability": 0.934814453125}, {"start": 1764.53, "end": 1764.69, "word": " be", "probability": 0.9541015625}, {"start": 1764.69, "end": 1765.11, "word": " selected.", "probability": 0.89697265625}, {"start": 1766.13, "end": 1766.47, "word": " The", "probability": 0.83740234375}, {"start": 1766.47, "end": 1766.83, "word": " next", "probability": 0.9453125}, {"start": 1766.83, "end": 1767.11, "word": " one,", "probability": 0.9296875}, {"start": 1768.37, "end": 1769.43, "word": " 764.", "probability": 0.781982421875}, {"start": 1771.77, "end": 1772.63, "word": " Again,", "probability": 0.92724609375}, {"start": 1772.89, "end": 1773.09, "word": " it's", "probability": 0.760009765625}, {"start": 1773.09, "end": 1773.31, "word": " not", "probability": 0.95166015625}, {"start": 1773.31, "end": 1773.65, "word": " selected.", "probability": 0.8994140625}, {"start": 1774.79, "end": 1775.65, "word": " 764,", "probability": 0.8349609375}, {"start": 1775.89, "end": 1776.37, "word": " 715.", "probability": 0.923828125}, {"start": 1777.87, "end": 1778.29, "word": " Not", "probability": 0.90185546875}, {"start": 1778.29, "end": 1778.75, "word": " selected.", "probability": 0.8896484375}, {"start": 1778.91, "end": 1779.09, "word": " Next", "probability": 0.77490234375}, {"start": 1779.09, "end": 1779.29, "word": " one", "probability": 0.53515625}, {"start": 1779.29, "end": 1779.51, "word": " is", "probability": 0.4033203125}, {"start": 1779.51, "end": 1782.31, "word": " 715.", "probability": 0.84130859375}], "temperature": 1.0}, {"id": 68, "seek": 181126, "start": 1784.88, "end": 1811.26, "text": " 099 should be 0 that's the way how can we use the random table for using or for selecting simple random symbols so in this case you can choose any row or any column then you have to decide how many digits you have to select it depends on the number you have I mean the population size", "tokens": [1958, 8494, 820, 312, 1958, 300, 311, 264, 636, 577, 393, 321, 764, 264, 4974, 3199, 337, 1228, 420, 337, 18182, 2199, 4974, 16944, 370, 294, 341, 1389, 291, 393, 2826, 604, 5386, 420, 604, 7738, 550, 291, 362, 281, 4536, 577, 867, 27011, 291, 362, 281, 3048, 309, 5946, 322, 264, 1230, 291, 362, 286, 914, 264, 4415, 2744], "avg_logprob": -0.2686987744003046, "compression_ratio": 1.6285714285714286, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1784.88, "end": 1785.5, "word": " 099", "probability": 0.41241455078125}, {"start": 1785.5, "end": 1786.08, "word": " should", "probability": 0.6455078125}, {"start": 1786.08, "end": 1787.9, "word": " be", "probability": 0.9150390625}, {"start": 1787.9, "end": 1788.08, "word": " 0", "probability": 0.127197265625}, {"start": 1788.08, "end": 1792.2, "word": " that's", "probability": 0.49871826171875}, {"start": 1792.2, "end": 1792.32, "word": " the", "probability": 0.76123046875}, {"start": 1792.32, "end": 1792.46, "word": " way", "probability": 0.93896484375}, {"start": 1792.46, "end": 1792.64, "word": " how", "probability": 0.7021484375}, {"start": 1792.64, "end": 1792.84, "word": " can", "probability": 0.65869140625}, {"start": 1792.84, "end": 1792.98, "word": " we", "probability": 0.86083984375}, {"start": 1792.98, "end": 1793.22, "word": " use", "probability": 0.869140625}, {"start": 1793.22, "end": 1793.36, "word": " the", "probability": 0.68994140625}, {"start": 1793.36, "end": 1793.6, "word": " random", "probability": 0.81982421875}, {"start": 1793.6, "end": 1794.0, "word": " table", "probability": 0.80322265625}, {"start": 1794.0, "end": 1794.44, "word": " for", "probability": 0.83251953125}, {"start": 1794.44, "end": 1794.94, "word": " using", "probability": 0.8583984375}, {"start": 1794.94, "end": 1795.68, "word": " or", "probability": 0.79736328125}, {"start": 1795.68, "end": 1795.9, "word": " for", "probability": 0.9013671875}, {"start": 1795.9, "end": 1796.36, "word": " selecting", "probability": 0.90673828125}, {"start": 1796.36, "end": 1797.02, "word": " simple", "probability": 0.736328125}, {"start": 1797.02, "end": 1797.56, "word": " random", "probability": 0.82177734375}, {"start": 1797.56, "end": 1797.94, "word": " symbols", "probability": 0.18310546875}, {"start": 1797.94, "end": 1798.54, "word": " so", "probability": 0.486328125}, {"start": 1798.54, "end": 1798.66, "word": " in", "probability": 0.76904296875}, {"start": 1798.66, "end": 1798.8, "word": " this", "probability": 0.94921875}, {"start": 1798.8, "end": 1799.02, "word": " case", "probability": 0.916015625}, {"start": 1799.02, "end": 1799.2, "word": " you", "probability": 0.85546875}, {"start": 1799.2, "end": 1799.4, "word": " can", "probability": 0.93701171875}, {"start": 1799.4, "end": 1799.7, "word": " choose", "probability": 0.88916015625}, {"start": 1799.7, "end": 1800.06, "word": " any", "probability": 0.90283203125}, {"start": 1800.06, "end": 1800.36, "word": " row", "probability": 0.90087890625}, {"start": 1800.36, "end": 1801.52, "word": " or", "probability": 0.90673828125}, {"start": 1801.52, "end": 1801.82, "word": " any", "probability": 0.88330078125}, {"start": 1801.82, "end": 1802.22, "word": " column", "probability": 0.859375}, {"start": 1802.22, "end": 1803.34, "word": " then", "probability": 0.763671875}, {"start": 1803.34, "end": 1803.48, "word": " you", "probability": 0.95849609375}, {"start": 1803.48, "end": 1803.66, "word": " have", "probability": 0.93994140625}, {"start": 1803.66, "end": 1803.78, "word": " to", "probability": 0.9638671875}, {"start": 1803.78, "end": 1804.16, "word": " decide", "probability": 0.89208984375}, {"start": 1804.16, "end": 1804.54, "word": " how", "probability": 0.9345703125}, {"start": 1804.54, "end": 1804.86, "word": " many", "probability": 0.8896484375}, {"start": 1804.86, "end": 1805.76, "word": " digits", "probability": 0.9541015625}, {"start": 1805.76, "end": 1805.96, "word": " you", "probability": 0.94873046875}, {"start": 1805.96, "end": 1806.08, "word": " have", "probability": 0.90576171875}, {"start": 1806.08, "end": 1806.2, "word": " to", "probability": 0.96337890625}, {"start": 1806.2, "end": 1806.62, "word": " select", "probability": 0.83935546875}, {"start": 1806.62, "end": 1807.4, "word": " it", "probability": 0.8525390625}, {"start": 1807.4, "end": 1807.76, "word": " depends", "probability": 0.89501953125}, {"start": 1807.76, "end": 1808.06, "word": " on", "probability": 0.9453125}, {"start": 1808.06, "end": 1808.38, "word": " the", "probability": 0.91845703125}, {"start": 1808.38, "end": 1809.28, "word": " number", "probability": 0.92333984375}, {"start": 1809.28, "end": 1809.46, "word": " you", "probability": 0.89111328125}, {"start": 1809.46, "end": 1809.72, "word": " have", "probability": 0.943359375}, {"start": 1809.72, "end": 1810.2, "word": " I", "probability": 0.63134765625}, {"start": 1810.2, "end": 1810.34, "word": " mean", "probability": 0.96337890625}, {"start": 1810.34, "end": 1810.5, "word": " the", "probability": 0.8173828125}, {"start": 1810.5, "end": 1810.84, "word": " population", "probability": 0.95458984375}, {"start": 1810.84, "end": 1811.26, "word": " size", "probability": 0.85888671875}], "temperature": 1.0}, {"id": 69, "seek": 183819, "start": 1811.97, "end": 1838.19, "text": " If for example Suppose I am talking about IUPUI students and for example, we have 30,000 students at this school And again, I want to select a random sample of size 10 for example So how many digits should I use? 20,000 Five digits", "tokens": [759, 337, 1365, 21360, 286, 669, 1417, 466, 44218, 8115, 40, 1731, 293, 337, 1365, 11, 321, 362, 2217, 11, 1360, 1731, 412, 341, 1395, 400, 797, 11, 286, 528, 281, 3048, 257, 4974, 6889, 295, 2744, 1266, 337, 1365, 407, 577, 867, 27011, 820, 286, 764, 30, 945, 11, 1360, 9436, 27011], "avg_logprob": -0.18460648286121864, "compression_ratio": 1.45, "no_speech_prob": 0.0, "words": [{"start": 1811.97, "end": 1812.65, "word": " If", "probability": 0.68408203125}, {"start": 1812.65, "end": 1813.23, "word": " for", "probability": 0.58349609375}, {"start": 1813.23, "end": 1813.63, "word": " example", "probability": 0.9736328125}, {"start": 1813.63, "end": 1815.73, "word": " Suppose", "probability": 0.28955078125}, {"start": 1815.73, "end": 1816.37, "word": " I", "probability": 0.9365234375}, {"start": 1816.37, "end": 1816.51, "word": " am", "probability": 0.90625}, {"start": 1816.51, "end": 1816.95, "word": " talking", "probability": 0.82958984375}, {"start": 1816.95, "end": 1817.67, "word": " about", "probability": 0.9052734375}, {"start": 1817.67, "end": 1818.39, "word": " IUPUI", "probability": 0.7994791666666666}, {"start": 1818.39, "end": 1818.79, "word": " students", "probability": 0.93310546875}, {"start": 1818.79, "end": 1819.53, "word": " and", "probability": 0.74658203125}, {"start": 1819.53, "end": 1819.71, "word": " for", "probability": 0.93408203125}, {"start": 1819.71, "end": 1820.03, "word": " example,", "probability": 0.97607421875}, {"start": 1820.21, "end": 1820.27, "word": " we", "probability": 0.95068359375}, {"start": 1820.27, "end": 1820.63, "word": " have", "probability": 0.9482421875}, {"start": 1820.63, "end": 1822.37, "word": " 30", "probability": 0.7529296875}, {"start": 1822.37, "end": 1822.73, "word": ",000", "probability": 0.91064453125}, {"start": 1822.73, "end": 1823.33, "word": " students", "probability": 0.95751953125}, {"start": 1823.33, "end": 1823.65, "word": " at", "probability": 0.900390625}, {"start": 1823.65, "end": 1823.99, "word": " this", "probability": 0.9208984375}, {"start": 1823.99, "end": 1824.45, "word": " school", "probability": 0.8056640625}, {"start": 1824.45, "end": 1826.19, "word": " And", "probability": 0.69921875}, {"start": 1826.19, "end": 1826.41, "word": " again,", "probability": 0.95068359375}, {"start": 1826.43, "end": 1826.53, "word": " I", "probability": 0.9990234375}, {"start": 1826.53, "end": 1826.69, "word": " want", "probability": 0.87646484375}, {"start": 1826.69, "end": 1826.83, "word": " to", "probability": 0.96728515625}, {"start": 1826.83, "end": 1827.05, "word": " select", "probability": 0.83740234375}, {"start": 1827.05, "end": 1827.19, "word": " a", "probability": 0.94189453125}, {"start": 1827.19, "end": 1827.39, "word": " random", "probability": 0.83251953125}, {"start": 1827.39, "end": 1827.67, "word": " sample", "probability": 0.86865234375}, {"start": 1827.67, "end": 1827.83, "word": " of", "probability": 0.953125}, {"start": 1827.83, "end": 1828.05, "word": " size", "probability": 0.853515625}, {"start": 1828.05, "end": 1828.33, "word": " 10", "probability": 0.82177734375}, {"start": 1828.33, "end": 1828.57, "word": " for", "probability": 0.638671875}, {"start": 1828.57, "end": 1828.91, "word": " example", "probability": 0.9697265625}, {"start": 1828.91, "end": 1830.71, "word": " So", "probability": 0.8173828125}, {"start": 1830.71, "end": 1830.91, "word": " how", "probability": 0.8056640625}, {"start": 1830.91, "end": 1831.19, "word": " many", "probability": 0.90380859375}, {"start": 1831.19, "end": 1831.89, "word": " digits", "probability": 0.9638671875}, {"start": 1831.89, "end": 1832.23, "word": " should", "probability": 0.96240234375}, {"start": 1832.23, "end": 1832.37, "word": " I", "probability": 0.99267578125}, {"start": 1832.37, "end": 1832.73, "word": " use?", "probability": 0.8798828125}, {"start": 1834.09, "end": 1834.77, "word": " 20", "probability": 0.6728515625}, {"start": 1834.77, "end": 1835.19, "word": ",000", "probability": 0.951904296875}, {"start": 1835.19, "end": 1837.73, "word": " Five", "probability": 0.453369140625}, {"start": 1837.73, "end": 1838.19, "word": " digits", "probability": 0.9365234375}], "temperature": 1.0}, {"id": 70, "seek": 185924, "start": 1839.88, "end": 1859.24, "text": " And each one, each student has ID from, starts from the first one up to twenty thousand. So now, start with, for example, the last row you have.", "tokens": [400, 1184, 472, 11, 1184, 3107, 575, 7348, 490, 11, 3719, 490, 264, 700, 472, 493, 281, 7699, 4714, 13, 407, 586, 11, 722, 365, 11, 337, 1365, 11, 264, 1036, 5386, 291, 362, 13], "avg_logprob": -0.2808159672551685, "compression_ratio": 1.25, "no_speech_prob": 0.0, "words": [{"start": 1839.88, "end": 1840.3, "word": " And", "probability": 0.623046875}, {"start": 1840.3, "end": 1840.64, "word": " each", "probability": 0.9365234375}, {"start": 1840.64, "end": 1840.9, "word": " one,", "probability": 0.7666015625}, {"start": 1841.02, "end": 1841.22, "word": " each", "probability": 0.94091796875}, {"start": 1841.22, "end": 1841.68, "word": " student", "probability": 0.9462890625}, {"start": 1841.68, "end": 1842.28, "word": " has", "probability": 0.7578125}, {"start": 1842.28, "end": 1842.62, "word": " ID", "probability": 0.451904296875}, {"start": 1842.62, "end": 1843.06, "word": " from,", "probability": 0.744140625}, {"start": 1843.32, "end": 1843.78, "word": " starts", "probability": 0.44580078125}, {"start": 1843.78, "end": 1844.22, "word": " from", "probability": 0.89697265625}, {"start": 1844.22, "end": 1846.76, "word": " the", "probability": 0.7294921875}, {"start": 1846.76, "end": 1847.08, "word": " first", "probability": 0.88330078125}, {"start": 1847.08, "end": 1847.42, "word": " one", "probability": 0.939453125}, {"start": 1847.42, "end": 1849.78, "word": " up", "probability": 0.83642578125}, {"start": 1849.78, "end": 1850.48, "word": " to", "probability": 0.96142578125}, {"start": 1850.48, "end": 1851.76, "word": " twenty", "probability": 0.435791015625}, {"start": 1851.76, "end": 1852.06, "word": " thousand.", "probability": 0.8505859375}, {"start": 1853.32, "end": 1853.52, "word": " So", "probability": 0.87646484375}, {"start": 1853.52, "end": 1853.78, "word": " now,", "probability": 0.79833984375}, {"start": 1854.64, "end": 1855.02, "word": " start", "probability": 0.7412109375}, {"start": 1855.02, "end": 1855.26, "word": " with,", "probability": 0.79345703125}, {"start": 1855.36, "end": 1855.52, "word": " for", "probability": 0.95947265625}, {"start": 1855.52, "end": 1855.94, "word": " example,", "probability": 0.9736328125}, {"start": 1856.52, "end": 1856.68, "word": " the", "probability": 0.91845703125}, {"start": 1856.68, "end": 1858.2, "word": " last", "probability": 0.88916015625}, {"start": 1858.2, "end": 1858.92, "word": " row", "probability": 0.8974609375}, {"start": 1858.92, "end": 1859.08, "word": " you", "probability": 0.461181640625}, {"start": 1859.08, "end": 1859.24, "word": " have.", "probability": 0.94677734375}], "temperature": 1.0}, {"id": 71, "seek": 189216, "start": 1863.12, "end": 1892.16, "text": " The first number 54000 is not. 81 is not. None of these. Look at the next one. 71000 is not selected. Now 9001. So the first number I have to select is 9001. None of the rest. Go back. Go to the next one.", "tokens": [440, 700, 1230, 20793, 1360, 307, 406, 13, 30827, 307, 406, 13, 14492, 295, 613, 13, 2053, 412, 264, 958, 472, 13, 1614, 21199, 307, 406, 8209, 13, 823, 22016, 16, 13, 407, 264, 700, 1230, 286, 362, 281, 3048, 307, 22016, 16, 13, 14492, 295, 264, 1472, 13, 1037, 646, 13, 1037, 281, 264, 958, 472, 13], "avg_logprob": -0.20338983707508798, "compression_ratio": 1.5648854961832062, "no_speech_prob": 0.0, "words": [{"start": 1863.12, "end": 1863.36, "word": " The", "probability": 0.7021484375}, {"start": 1863.36, "end": 1863.64, "word": " first", "probability": 0.8740234375}, {"start": 1863.64, "end": 1863.96, "word": " number", "probability": 0.9326171875}, {"start": 1863.96, "end": 1865.0, "word": " 54000", "probability": 0.58447265625}, {"start": 1865.0, "end": 1865.18, "word": " is", "probability": 0.82666015625}, {"start": 1865.18, "end": 1865.38, "word": " not.", "probability": 0.88916015625}, {"start": 1865.88, "end": 1866.38, "word": " 81", "probability": 0.331298828125}, {"start": 1866.38, "end": 1866.66, "word": " is", "probability": 0.60498046875}, {"start": 1866.66, "end": 1866.92, "word": " not.", "probability": 0.9443359375}, {"start": 1867.96, "end": 1868.34, "word": " None", "probability": 0.8779296875}, {"start": 1868.34, "end": 1868.48, "word": " of", "probability": 0.97119140625}, {"start": 1868.48, "end": 1868.74, "word": " these.", "probability": 0.76318359375}, {"start": 1872.42, "end": 1873.02, "word": " Look", "probability": 0.783203125}, {"start": 1873.02, "end": 1873.16, "word": " at", "probability": 0.96630859375}, {"start": 1873.16, "end": 1873.28, "word": " the", "probability": 0.91650390625}, {"start": 1873.28, "end": 1873.5, "word": " next", "probability": 0.9404296875}, {"start": 1873.5, "end": 1873.74, "word": " one.", "probability": 0.9306640625}, {"start": 1875.42, "end": 1876.02, "word": " 71000", "probability": 0.682373046875}, {"start": 1876.02, "end": 1876.42, "word": " is", "probability": 0.859375}, {"start": 1876.42, "end": 1876.54, "word": " not", "probability": 0.95458984375}, {"start": 1876.54, "end": 1876.96, "word": " selected.", "probability": 0.89111328125}, {"start": 1877.44, "end": 1877.76, "word": " Now", "probability": 0.6328125}, {"start": 1877.76, "end": 1879.02, "word": " 9001.", "probability": 0.893310546875}, {"start": 1879.18, "end": 1879.34, "word": " So", "probability": 0.8955078125}, {"start": 1879.34, "end": 1879.5, "word": " the", "probability": 0.67578125}, {"start": 1879.5, "end": 1879.82, "word": " first", "probability": 0.88330078125}, {"start": 1879.82, "end": 1880.1, "word": " number", "probability": 0.93798828125}, {"start": 1880.1, "end": 1880.24, "word": " I", "probability": 0.93017578125}, {"start": 1880.24, "end": 1880.36, "word": " have", "probability": 0.93310546875}, {"start": 1880.36, "end": 1880.46, "word": " to", "probability": 0.96826171875}, {"start": 1880.46, "end": 1880.84, "word": " select", "probability": 0.85595703125}, {"start": 1880.84, "end": 1882.18, "word": " is", "probability": 0.302734375}, {"start": 1882.18, "end": 1883.18, "word": " 9001.", "probability": 0.938232421875}, {"start": 1885.0, "end": 1885.48, "word": " None", "probability": 0.9287109375}, {"start": 1885.48, "end": 1885.68, "word": " of", "probability": 0.96142578125}, {"start": 1885.68, "end": 1885.88, "word": " the", "probability": 0.912109375}, {"start": 1885.88, "end": 1886.18, "word": " rest.", "probability": 0.91650390625}, {"start": 1886.56, "end": 1886.82, "word": " Go", "probability": 0.9541015625}, {"start": 1886.82, "end": 1887.2, "word": " back.", "probability": 0.865234375}, {"start": 1890.18, "end": 1890.78, "word": " Go", "probability": 0.95947265625}, {"start": 1890.78, "end": 1891.02, "word": " to", "probability": 0.958984375}, {"start": 1891.02, "end": 1891.26, "word": " the", "probability": 0.91455078125}, {"start": 1891.26, "end": 1891.86, "word": " next", "probability": 0.939453125}, {"start": 1891.86, "end": 1892.16, "word": " one.", "probability": 0.9306640625}], "temperature": 1.0}, {"id": 72, "seek": 192275, "start": 1893.19, "end": 1922.75, "text": " The second number, 12149 and so on. Next will be 18000 and so on. Next row, we can select the second one, then 16, then 14000, 6500 and so on. So this is the way how can we use the random table.", "tokens": [440, 1150, 1230, 11, 2272, 7271, 24, 293, 370, 322, 13, 3087, 486, 312, 2443, 1360, 293, 370, 322, 13, 3087, 5386, 11, 321, 393, 3048, 264, 1150, 472, 11, 550, 3165, 11, 550, 3499, 1360, 11, 1386, 7526, 293, 370, 322, 13, 407, 341, 307, 264, 636, 577, 393, 321, 764, 264, 4974, 3199, 13], "avg_logprob": -0.2993421052631579, "compression_ratio": 1.4772727272727273, "no_speech_prob": 0.0, "words": [{"start": 1893.19, "end": 1893.41, "word": " The", "probability": 0.43798828125}, {"start": 1893.41, "end": 1893.69, "word": " second", "probability": 0.84375}, {"start": 1893.69, "end": 1894.05, "word": " number,", "probability": 0.7021484375}, {"start": 1894.17, "end": 1897.79, "word": " 12149", "probability": 0.585205078125}, {"start": 1897.79, "end": 1897.95, "word": " and", "probability": 0.62939453125}, {"start": 1897.95, "end": 1898.05, "word": " so", "probability": 0.94677734375}, {"start": 1898.05, "end": 1898.85, "word": " on.", "probability": 0.94873046875}, {"start": 1899.71, "end": 1900.05, "word": " Next", "probability": 0.884765625}, {"start": 1900.05, "end": 1900.23, "word": " will", "probability": 0.82568359375}, {"start": 1900.23, "end": 1900.61, "word": " be", "probability": 0.95166015625}, {"start": 1900.61, "end": 1903.41, "word": " 18000", "probability": 0.6241455078125}, {"start": 1903.41, "end": 1903.97, "word": " and", "probability": 0.7958984375}, {"start": 1903.97, "end": 1904.05, "word": " so", "probability": 0.94677734375}, {"start": 1904.05, "end": 1904.75, "word": " on.", "probability": 0.947265625}, {"start": 1905.05, "end": 1905.41, "word": " Next", "probability": 0.9013671875}, {"start": 1905.41, "end": 1905.79, "word": " row,", "probability": 0.88525390625}, {"start": 1906.47, "end": 1906.75, "word": " we", "probability": 0.853515625}, {"start": 1906.75, "end": 1906.93, "word": " can", "probability": 0.93359375}, {"start": 1906.93, "end": 1907.31, "word": " select", "probability": 0.87109375}, {"start": 1907.31, "end": 1907.49, "word": " the", "probability": 0.64111328125}, {"start": 1907.49, "end": 1907.77, "word": " second", "probability": 0.8896484375}, {"start": 1907.77, "end": 1908.17, "word": " one,", "probability": 0.912109375}, {"start": 1910.67, "end": 1910.99, "word": " then", "probability": 0.5859375}, {"start": 1910.99, "end": 1911.95, "word": " 16,", "probability": 0.80615234375}, {"start": 1912.93, "end": 1913.93, "word": " then", "probability": 0.75830078125}, {"start": 1913.93, "end": 1915.53, "word": " 14000,", "probability": 0.6314697265625}, {"start": 1915.89, "end": 1917.05, "word": " 6500", "probability": 0.83056640625}, {"start": 1917.05, "end": 1918.41, "word": " and", "probability": 0.69384765625}, {"start": 1918.41, "end": 1918.61, "word": " so", "probability": 0.951171875}, {"start": 1918.61, "end": 1918.77, "word": " on.", "probability": 0.94775390625}, {"start": 1919.43, "end": 1919.59, "word": " So", "probability": 0.80712890625}, {"start": 1919.59, "end": 1919.75, "word": " this", "probability": 0.83349609375}, {"start": 1919.75, "end": 1919.83, "word": " is", "probability": 0.90625}, {"start": 1919.83, "end": 1919.93, "word": " the", "probability": 0.8076171875}, {"start": 1919.93, "end": 1920.05, "word": " way", "probability": 0.95751953125}, {"start": 1920.05, "end": 1920.19, "word": " how", "probability": 0.693359375}, {"start": 1920.19, "end": 1920.33, "word": " can", "probability": 0.50146484375}, {"start": 1920.33, "end": 1920.47, "word": " we", "probability": 0.92138671875}, {"start": 1920.47, "end": 1920.85, "word": " use", "probability": 0.8720703125}, {"start": 1920.85, "end": 1921.77, "word": " the", "probability": 0.8671875}, {"start": 1921.77, "end": 1922.37, "word": " random", "probability": 0.7646484375}, {"start": 1922.37, "end": 1922.75, "word": " table.", "probability": 0.70849609375}], "temperature": 1.0}, {"id": 73, "seek": 195395, "start": 1924.95, "end": 1953.95, "text": " It seems to be that tons of work if you have large sample. Because in this case, you have to choose, for example, suppose I am interested to take a random sample of 10,000. Now, to use this table to select 10,000 items takes time and effort and maybe will never finish. So it's better to use", "tokens": [467, 2544, 281, 312, 300, 9131, 295, 589, 498, 291, 362, 2416, 6889, 13, 1436, 294, 341, 1389, 11, 291, 362, 281, 2826, 11, 337, 1365, 11, 7297, 286, 669, 3102, 281, 747, 257, 4974, 6889, 295, 1266, 11, 1360, 13, 823, 11, 281, 764, 341, 3199, 281, 3048, 1266, 11, 1360, 4754, 2516, 565, 293, 4630, 293, 1310, 486, 1128, 2413, 13, 407, 309, 311, 1101, 281, 764], "avg_logprob": -0.20223214605024883, "compression_ratio": 1.4974358974358974, "no_speech_prob": 0.0, "words": [{"start": 1924.95, "end": 1925.19, "word": " It", "probability": 0.626953125}, {"start": 1925.19, "end": 1925.49, "word": " seems", "probability": 0.80517578125}, {"start": 1925.49, "end": 1925.67, "word": " to", "probability": 0.94384765625}, {"start": 1925.67, "end": 1925.79, "word": " be", "probability": 0.84521484375}, {"start": 1925.79, "end": 1926.15, "word": " that", "probability": 0.72509765625}, {"start": 1926.15, "end": 1927.47, "word": " tons", "probability": 0.63134765625}, {"start": 1927.47, "end": 1927.71, "word": " of", "probability": 0.96923828125}, {"start": 1927.71, "end": 1928.11, "word": " work", "probability": 0.90869140625}, {"start": 1928.11, "end": 1929.31, "word": " if", "probability": 0.705078125}, {"start": 1929.31, "end": 1929.43, "word": " you", "probability": 0.96484375}, {"start": 1929.43, "end": 1929.77, "word": " have", "probability": 0.95556640625}, {"start": 1929.77, "end": 1931.19, "word": " large", "probability": 0.7109375}, {"start": 1931.19, "end": 1931.61, "word": " sample.", "probability": 0.72119140625}, {"start": 1932.27, "end": 1932.69, "word": " Because", "probability": 0.8759765625}, {"start": 1932.69, "end": 1932.97, "word": " in", "probability": 0.86279296875}, {"start": 1932.97, "end": 1933.19, "word": " this", "probability": 0.9462890625}, {"start": 1933.19, "end": 1933.45, "word": " case,", "probability": 0.91796875}, {"start": 1933.53, "end": 1933.59, "word": " you", "probability": 0.9248046875}, {"start": 1933.59, "end": 1933.75, "word": " have", "probability": 0.93896484375}, {"start": 1933.75, "end": 1933.87, "word": " to", "probability": 0.96875}, {"start": 1933.87, "end": 1934.25, "word": " choose,", "probability": 0.89892578125}, {"start": 1934.65, "end": 1934.77, "word": " for", "probability": 0.943359375}, {"start": 1934.77, "end": 1935.11, "word": " example,", "probability": 0.974609375}, {"start": 1935.23, "end": 1935.61, "word": " suppose", "probability": 0.88330078125}, {"start": 1935.61, "end": 1936.27, "word": " I", "probability": 0.8818359375}, {"start": 1936.27, "end": 1936.43, "word": " am", "probability": 0.83837890625}, {"start": 1936.43, "end": 1936.85, "word": " interested", "probability": 0.87939453125}, {"start": 1936.85, "end": 1937.99, "word": " to", "probability": 0.9248046875}, {"start": 1937.99, "end": 1938.27, "word": " take", "probability": 0.8828125}, {"start": 1938.27, "end": 1938.45, "word": " a", "probability": 0.9462890625}, {"start": 1938.45, "end": 1938.69, "word": " random", "probability": 0.880859375}, {"start": 1938.69, "end": 1939.15, "word": " sample", "probability": 0.86767578125}, {"start": 1939.15, "end": 1939.47, "word": " of", "probability": 0.96826171875}, {"start": 1939.47, "end": 1939.77, "word": " 10", "probability": 0.7578125}, {"start": 1939.77, "end": 1940.11, "word": ",000.", "probability": 0.943603515625}, {"start": 1942.09, "end": 1942.39, "word": " Now,", "probability": 0.75048828125}, {"start": 1942.51, "end": 1942.73, "word": " to", "probability": 0.8994140625}, {"start": 1942.73, "end": 1942.95, "word": " use", "probability": 0.87744140625}, {"start": 1942.95, "end": 1943.21, "word": " this", "probability": 0.94677734375}, {"start": 1943.21, "end": 1943.57, "word": " table", "probability": 0.9033203125}, {"start": 1943.57, "end": 1945.13, "word": " to", "probability": 0.83837890625}, {"start": 1945.13, "end": 1945.57, "word": " select", "probability": 0.861328125}, {"start": 1945.57, "end": 1945.83, "word": " 10", "probability": 0.9599609375}, {"start": 1945.83, "end": 1946.17, "word": ",000", "probability": 0.99853515625}, {"start": 1946.17, "end": 1946.91, "word": " items", "probability": 0.8388671875}, {"start": 1946.91, "end": 1948.37, "word": " takes", "probability": 0.681640625}, {"start": 1948.37, "end": 1948.85, "word": " time", "probability": 0.8916015625}, {"start": 1948.85, "end": 1949.41, "word": " and", "probability": 0.939453125}, {"start": 1949.41, "end": 1949.77, "word": " effort", "probability": 0.9345703125}, {"start": 1949.77, "end": 1950.19, "word": " and", "probability": 0.414794921875}, {"start": 1950.19, "end": 1950.41, "word": " maybe", "probability": 0.92041015625}, {"start": 1950.41, "end": 1950.65, "word": " will", "probability": 0.77685546875}, {"start": 1950.65, "end": 1951.61, "word": " never", "probability": 0.435302734375}, {"start": 1951.61, "end": 1952.05, "word": " finish.", "probability": 0.95751953125}, {"start": 1952.37, "end": 1953.03, "word": " So", "probability": 0.9248046875}, {"start": 1953.03, "end": 1953.19, "word": " it's", "probability": 0.890869140625}, {"start": 1953.19, "end": 1953.41, "word": " better", "probability": 0.92333984375}, {"start": 1953.41, "end": 1953.61, "word": " to", "probability": 0.96630859375}, {"start": 1953.61, "end": 1953.95, "word": " use", "probability": 0.86328125}], "temperature": 1.0}, {"id": 74, "seek": 198574, "start": 1958.02, "end": 1985.74, "text": " better to use computer random number generators. So that's the way if we, now we can use the random table only if the sample size is limited. I mean up to 100 maybe you can use the random table, but after that I think it's just you are losing your time.", "tokens": [1101, 281, 764, 3820, 4974, 1230, 38662, 13, 407, 300, 311, 264, 636, 498, 321, 11, 586, 321, 393, 764, 264, 4974, 3199, 787, 498, 264, 6889, 2744, 307, 5567, 13, 286, 914, 493, 281, 2319, 1310, 291, 393, 764, 264, 4974, 3199, 11, 457, 934, 300, 286, 519, 309, 311, 445, 291, 366, 7027, 428, 565, 13], "avg_logprob": -0.19186970995644392, "compression_ratio": 1.5582822085889572, "no_speech_prob": 0.0, "words": [{"start": 1958.02, "end": 1958.36, "word": " better", "probability": 0.2548828125}, {"start": 1958.36, "end": 1958.6, "word": " to", "probability": 0.94482421875}, {"start": 1958.6, "end": 1959.04, "word": " use", "probability": 0.87890625}, {"start": 1959.04, "end": 1962.1, "word": " computer", "probability": 0.794921875}, {"start": 1962.1, "end": 1962.52, "word": " random", "probability": 0.869140625}, {"start": 1962.52, "end": 1963.56, "word": " number", "probability": 0.92724609375}, {"start": 1963.56, "end": 1964.78, "word": " generators.", "probability": 0.75634765625}, {"start": 1965.1, "end": 1965.16, "word": " So", "probability": 0.6220703125}, {"start": 1965.16, "end": 1965.64, "word": " that's", "probability": 0.880615234375}, {"start": 1965.64, "end": 1966.3, "word": " the", "probability": 0.91015625}, {"start": 1966.3, "end": 1966.6, "word": " way", "probability": 0.9619140625}, {"start": 1966.6, "end": 1966.94, "word": " if", "probability": 0.744140625}, {"start": 1966.94, "end": 1967.14, "word": " we,", "probability": 0.951171875}, {"start": 1967.58, "end": 1967.76, "word": " now", "probability": 0.904296875}, {"start": 1967.76, "end": 1967.96, "word": " we", "probability": 0.83056640625}, {"start": 1967.96, "end": 1968.18, "word": " can", "probability": 0.9462890625}, {"start": 1968.18, "end": 1968.42, "word": " use", "probability": 0.87548828125}, {"start": 1968.42, "end": 1968.7, "word": " the", "probability": 0.88427734375}, {"start": 1968.7, "end": 1969.54, "word": " random", "probability": 0.83837890625}, {"start": 1969.54, "end": 1969.98, "word": " table", "probability": 0.8876953125}, {"start": 1969.98, "end": 1970.6, "word": " only", "probability": 0.890625}, {"start": 1970.6, "end": 1971.4, "word": " if", "probability": 0.9423828125}, {"start": 1971.4, "end": 1971.62, "word": " the", "probability": 0.91845703125}, {"start": 1971.62, "end": 1971.88, "word": " sample", "probability": 0.83740234375}, {"start": 1971.88, "end": 1972.48, "word": " size", "probability": 0.8671875}, {"start": 1972.48, "end": 1972.86, "word": " is", "probability": 0.94970703125}, {"start": 1972.86, "end": 1973.22, "word": " limited.", "probability": 0.939453125}, {"start": 1975.0, "end": 1975.16, "word": " I", "probability": 0.95263671875}, {"start": 1975.16, "end": 1975.34, "word": " mean", "probability": 0.96484375}, {"start": 1975.34, "end": 1976.16, "word": " up", "probability": 0.51416015625}, {"start": 1976.16, "end": 1976.36, "word": " to", "probability": 0.97119140625}, {"start": 1976.36, "end": 1977.04, "word": " 100", "probability": 0.7451171875}, {"start": 1977.04, "end": 1977.4, "word": " maybe", "probability": 0.8505859375}, {"start": 1977.4, "end": 1977.58, "word": " you", "probability": 0.9287109375}, {"start": 1977.58, "end": 1977.78, "word": " can", "probability": 0.94482421875}, {"start": 1977.78, "end": 1978.12, "word": " use", "probability": 0.888671875}, {"start": 1978.12, "end": 1978.52, "word": " the", "probability": 0.884765625}, {"start": 1978.52, "end": 1978.78, "word": " random", "probability": 0.861328125}, {"start": 1978.78, "end": 1979.08, "word": " table,", "probability": 0.89453125}, {"start": 1979.2, "end": 1979.3, "word": " but", "probability": 0.92822265625}, {"start": 1979.3, "end": 1979.62, "word": " after", "probability": 0.8330078125}, {"start": 1979.62, "end": 1979.96, "word": " that", "probability": 0.939453125}, {"start": 1979.96, "end": 1980.8, "word": " I", "probability": 0.6748046875}, {"start": 1980.8, "end": 1981.18, "word": " think", "probability": 0.91845703125}, {"start": 1981.18, "end": 1983.16, "word": " it's", "probability": 0.940185546875}, {"start": 1983.16, "end": 1984.36, "word": " just", "probability": 0.89794921875}, {"start": 1984.36, "end": 1984.7, "word": " you", "probability": 0.86962890625}, {"start": 1984.7, "end": 1984.9, "word": " are", "probability": 0.93505859375}, {"start": 1984.9, "end": 1985.22, "word": " losing", "probability": 0.89794921875}, {"start": 1985.22, "end": 1985.46, "word": " your", "probability": 0.88720703125}, {"start": 1985.46, "end": 1985.74, "word": " time.", "probability": 0.8916015625}], "temperature": 1.0}, {"id": 75, "seek": 201447, "start": 1987.81, "end": 2014.47, "text": " Another example here. Now suppose my sampling frame for population has 850 students. So the numbers are 001, 002, all the way up to 850. And suppose for example we are going to select five items randomly from that population.", "tokens": [3996, 1365, 510, 13, 823, 7297, 452, 21179, 3920, 337, 4415, 575, 1649, 2803, 1731, 13, 407, 264, 3547, 366, 7143, 16, 11, 7143, 17, 11, 439, 264, 636, 493, 281, 1649, 2803, 13, 400, 7297, 337, 1365, 321, 366, 516, 281, 3048, 1732, 4754, 16979, 490, 300, 4415, 13], "avg_logprob": -0.20833333917692595, "compression_ratio": 1.4125, "no_speech_prob": 0.0, "words": [{"start": 1987.81, "end": 1988.19, "word": " Another", "probability": 0.63330078125}, {"start": 1988.19, "end": 1988.67, "word": " example", "probability": 0.96240234375}, {"start": 1988.67, "end": 1989.03, "word": " here.", "probability": 0.76611328125}, {"start": 1991.25, "end": 1991.57, "word": " Now", "probability": 0.7841796875}, {"start": 1991.57, "end": 1992.11, "word": " suppose", "probability": 0.623046875}, {"start": 1992.11, "end": 1992.53, "word": " my", "probability": 0.91064453125}, {"start": 1992.53, "end": 1992.87, "word": " sampling", "probability": 0.470458984375}, {"start": 1992.87, "end": 1993.41, "word": " frame", "probability": 0.88818359375}, {"start": 1993.41, "end": 1993.89, "word": " for", "probability": 0.92041015625}, {"start": 1993.89, "end": 1994.39, "word": " population", "probability": 0.86669921875}, {"start": 1994.39, "end": 1996.33, "word": " has", "probability": 0.89453125}, {"start": 1996.33, "end": 1997.95, "word": " 850", "probability": 0.871826171875}, {"start": 1997.95, "end": 1998.53, "word": " students.", "probability": 0.97021484375}, {"start": 1998.69, "end": 1998.81, "word": " So", "probability": 0.857421875}, {"start": 1998.81, "end": 1999.05, "word": " the", "probability": 0.76416015625}, {"start": 1999.05, "end": 1999.45, "word": " numbers", "probability": 0.88525390625}, {"start": 1999.45, "end": 2000.41, "word": " are", "probability": 0.9404296875}, {"start": 2000.41, "end": 2001.61, "word": " 001,", "probability": 0.907470703125}, {"start": 2001.99, "end": 2002.93, "word": " 002,", "probability": 0.945556640625}, {"start": 2003.07, "end": 2003.23, "word": " all", "probability": 0.89697265625}, {"start": 2003.23, "end": 2003.43, "word": " the", "probability": 0.9140625}, {"start": 2003.43, "end": 2003.59, "word": " way", "probability": 0.9541015625}, {"start": 2003.59, "end": 2003.77, "word": " up", "probability": 0.95751953125}, {"start": 2003.77, "end": 2003.99, "word": " to", "probability": 0.9599609375}, {"start": 2003.99, "end": 2004.85, "word": " 850.", "probability": 0.87890625}, {"start": 2005.67, "end": 2005.87, "word": " And", "probability": 0.79736328125}, {"start": 2005.87, "end": 2006.15, "word": " suppose", "probability": 0.92431640625}, {"start": 2006.15, "end": 2006.35, "word": " for", "probability": 0.65625}, {"start": 2006.35, "end": 2006.71, "word": " example", "probability": 0.97509765625}, {"start": 2006.71, "end": 2007.07, "word": " we", "probability": 0.70068359375}, {"start": 2007.07, "end": 2008.49, "word": " are", "probability": 0.92626953125}, {"start": 2008.49, "end": 2008.77, "word": " going", "probability": 0.9423828125}, {"start": 2008.77, "end": 2008.95, "word": " to", "probability": 0.96728515625}, {"start": 2008.95, "end": 2009.37, "word": " select", "probability": 0.83642578125}, {"start": 2009.37, "end": 2010.31, "word": " five", "probability": 0.63525390625}, {"start": 2010.31, "end": 2011.01, "word": " items", "probability": 0.83935546875}, {"start": 2011.01, "end": 2012.25, "word": " randomly", "probability": 0.83447265625}, {"start": 2012.25, "end": 2013.21, "word": " from", "probability": 0.8759765625}, {"start": 2013.21, "end": 2013.61, "word": " that", "probability": 0.70703125}, {"start": 2013.61, "end": 2014.47, "word": " population.", "probability": 0.93798828125}], "temperature": 1.0}, {"id": 76, "seek": 204221, "start": 2015.93, "end": 2042.21, "text": " So you have to choose three digits and imagine that this is my portion of that table. Now, take three digits. The first three digits are 492. So the first item chosen should be item number 492.", "tokens": [407, 291, 362, 281, 2826, 1045, 27011, 293, 3811, 300, 341, 307, 452, 8044, 295, 300, 3199, 13, 823, 11, 747, 1045, 27011, 13, 440, 700, 1045, 27011, 366, 16513, 17, 13, 407, 264, 700, 3174, 8614, 820, 312, 3174, 1230, 16513, 17, 13], "avg_logprob": -0.2727430449591743, "compression_ratio": 1.4586466165413534, "no_speech_prob": 0.0, "words": [{"start": 2015.93, "end": 2016.67, "word": " So", "probability": 0.322998046875}, {"start": 2016.67, "end": 2016.79, "word": " you", "probability": 0.68017578125}, {"start": 2016.79, "end": 2016.93, "word": " have", "probability": 0.89306640625}, {"start": 2016.93, "end": 2017.05, "word": " to", "probability": 0.97119140625}, {"start": 2017.05, "end": 2017.39, "word": " choose", "probability": 0.87744140625}, {"start": 2017.39, "end": 2017.93, "word": " three", "probability": 0.55908203125}, {"start": 2017.93, "end": 2018.37, "word": " digits", "probability": 0.91845703125}, {"start": 2018.37, "end": 2019.61, "word": " and", "probability": 0.5234375}, {"start": 2019.61, "end": 2019.95, "word": " imagine", "probability": 0.81787109375}, {"start": 2019.95, "end": 2020.35, "word": " that", "probability": 0.58056640625}, {"start": 2020.35, "end": 2021.55, "word": " this", "probability": 0.8876953125}, {"start": 2021.55, "end": 2021.73, "word": " is", "probability": 0.9365234375}, {"start": 2021.73, "end": 2022.09, "word": " my", "probability": 0.85693359375}, {"start": 2022.09, "end": 2023.93, "word": " portion", "probability": 0.27197265625}, {"start": 2023.93, "end": 2024.47, "word": " of", "probability": 0.87353515625}, {"start": 2024.47, "end": 2024.67, "word": " that", "probability": 0.68896484375}, {"start": 2024.67, "end": 2024.99, "word": " table.", "probability": 0.87890625}, {"start": 2025.85, "end": 2026.23, "word": " Now,", "probability": 0.88525390625}, {"start": 2026.75, "end": 2027.09, "word": " take", "probability": 0.8310546875}, {"start": 2027.09, "end": 2027.35, "word": " three", "probability": 0.8837890625}, {"start": 2027.35, "end": 2027.75, "word": " digits.", "probability": 0.935546875}, {"start": 2030.25, "end": 2030.53, "word": " The", "probability": 0.84814453125}, {"start": 2030.53, "end": 2030.87, "word": " first", "probability": 0.8818359375}, {"start": 2030.87, "end": 2031.13, "word": " three", "probability": 0.8935546875}, {"start": 2031.13, "end": 2031.39, "word": " digits", "probability": 0.91796875}, {"start": 2031.39, "end": 2031.57, "word": " are", "probability": 0.322021484375}, {"start": 2031.57, "end": 2032.39, "word": " 492.", "probability": 0.878662109375}, {"start": 2033.13, "end": 2033.43, "word": " So", "probability": 0.82177734375}, {"start": 2033.43, "end": 2033.75, "word": " the", "probability": 0.70361328125}, {"start": 2033.75, "end": 2034.07, "word": " first", "probability": 0.8642578125}, {"start": 2034.07, "end": 2034.63, "word": " item", "probability": 0.93115234375}, {"start": 2034.63, "end": 2036.01, "word": " chosen", "probability": 0.9013671875}, {"start": 2036.01, "end": 2036.37, "word": " should", "probability": 0.94873046875}, {"start": 2036.37, "end": 2038.25, "word": " be", "probability": 0.94873046875}, {"start": 2038.25, "end": 2040.33, "word": " item", "probability": 0.578125}, {"start": 2040.33, "end": 2041.33, "word": " number", "probability": 0.8076171875}, {"start": 2041.33, "end": 2042.21, "word": " 492.", "probability": 0.94482421875}], "temperature": 1.0}, {"id": 77, "seek": 207264, "start": 2043.8, "end": 2072.64, "text": " should be selected next one 800 808 doesn't select because the maximum it's much selected because the maximum here is 850 now next one 892 this one is not selected next item", "tokens": [820, 312, 8209, 958, 472, 13083, 4688, 23, 1177, 380, 3048, 570, 264, 6674, 309, 311, 709, 8209, 570, 264, 6674, 510, 307, 1649, 2803, 586, 958, 472, 31877, 17, 341, 472, 307, 406, 8209, 958, 3174], "avg_logprob": -0.3190789457998778, "compression_ratio": 1.5818181818181818, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2043.8, "end": 2044.12, "word": " should", "probability": 0.459228515625}, {"start": 2044.12, "end": 2044.24, "word": " be", "probability": 0.9189453125}, {"start": 2044.24, "end": 2044.66, "word": " selected", "probability": 0.62451171875}, {"start": 2044.66, "end": 2045.82, "word": " next", "probability": 0.432373046875}, {"start": 2045.82, "end": 2046.66, "word": " one", "probability": 0.88818359375}, {"start": 2046.66, "end": 2047.98, "word": " 800", "probability": 0.4365234375}, {"start": 2047.98, "end": 2050.54, "word": " 808", "probability": 0.6905517578125}, {"start": 2050.54, "end": 2050.84, "word": " doesn't", "probability": 0.7022705078125}, {"start": 2050.84, "end": 2052.82, "word": " select", "probability": 0.66259765625}, {"start": 2052.82, "end": 2054.96, "word": " because", "probability": 0.74462890625}, {"start": 2054.96, "end": 2055.24, "word": " the", "probability": 0.86474609375}, {"start": 2055.24, "end": 2055.7, "word": " maximum", "probability": 0.916015625}, {"start": 2055.7, "end": 2056.86, "word": " it's", "probability": 0.4593505859375}, {"start": 2056.86, "end": 2057.02, "word": " much", "probability": 0.262451171875}, {"start": 2057.02, "end": 2057.42, "word": " selected", "probability": 0.8564453125}, {"start": 2057.42, "end": 2058.38, "word": " because", "probability": 0.82958984375}, {"start": 2058.38, "end": 2058.52, "word": " the", "probability": 0.9091796875}, {"start": 2058.52, "end": 2058.78, "word": " maximum", "probability": 0.8994140625}, {"start": 2058.78, "end": 2058.96, "word": " here", "probability": 0.61474609375}, {"start": 2058.96, "end": 2059.1, "word": " is", "probability": 0.94189453125}, {"start": 2059.1, "end": 2059.68, "word": " 850", "probability": 0.86376953125}, {"start": 2059.68, "end": 2060.8, "word": " now", "probability": 0.8623046875}, {"start": 2060.8, "end": 2061.1, "word": " next", "probability": 0.8955078125}, {"start": 2061.1, "end": 2062.02, "word": " one", "probability": 0.93603515625}, {"start": 2062.02, "end": 2063.32, "word": " 892", "probability": 0.6881103515625}, {"start": 2063.32, "end": 2066.36, "word": " this", "probability": 0.8203125}, {"start": 2066.36, "end": 2066.56, "word": " one", "probability": 0.93212890625}, {"start": 2066.56, "end": 2066.7, "word": " is", "probability": 0.9482421875}, {"start": 2066.7, "end": 2066.84, "word": " not", "probability": 0.90625}, {"start": 2066.84, "end": 2067.28, "word": " selected", "probability": 0.861328125}, {"start": 2067.28, "end": 2072.14, "word": " next", "probability": 0.90234375}, {"start": 2072.14, "end": 2072.64, "word": " item", "probability": 0.95654296875}], "temperature": 1.0}, {"id": 78, "seek": 210119, "start": 2074.63, "end": 2101.19, "text": " four three five selected now seven seven nine should be selected finally zeros two should be selected so these are the five numbers in my sample by using selected by using the random sample any questions?", "tokens": [1451, 1045, 1732, 8209, 586, 3407, 3407, 4949, 820, 312, 8209, 2721, 35193, 732, 820, 312, 8209, 370, 613, 366, 264, 1732, 3547, 294, 452, 6889, 538, 1228, 8209, 538, 1228, 264, 4974, 6889, 604, 1651, 30], "avg_logprob": -0.23972039512897791, "compression_ratio": 1.6666666666666667, "no_speech_prob": 0.0, "words": [{"start": 2074.63, "end": 2074.91, "word": " four", "probability": 0.2374267578125}, {"start": 2074.91, "end": 2075.15, "word": " three", "probability": 0.7431640625}, {"start": 2075.15, "end": 2075.59, "word": " five", "probability": 0.87939453125}, {"start": 2075.59, "end": 2077.45, "word": " selected", "probability": 0.32177734375}, {"start": 2077.45, "end": 2083.03, "word": " now", "probability": 0.5322265625}, {"start": 2083.03, "end": 2083.77, "word": " seven", "probability": 0.77734375}, {"start": 2083.77, "end": 2084.05, "word": " seven", "probability": 0.859375}, {"start": 2084.05, "end": 2084.53, "word": " nine", "probability": 0.92138671875}, {"start": 2084.53, "end": 2086.39, "word": " should", "probability": 0.93115234375}, {"start": 2086.39, "end": 2086.57, "word": " be", "probability": 0.95068359375}, {"start": 2086.57, "end": 2087.01, "word": " selected", "probability": 0.900390625}, {"start": 2087.01, "end": 2089.01, "word": " finally", "probability": 0.74560546875}, {"start": 2089.01, "end": 2090.71, "word": " zeros", "probability": 0.4462890625}, {"start": 2090.71, "end": 2091.01, "word": " two", "probability": 0.65380859375}, {"start": 2091.01, "end": 2091.25, "word": " should", "probability": 0.962890625}, {"start": 2091.25, "end": 2091.41, "word": " be", "probability": 0.94580078125}, {"start": 2091.41, "end": 2091.95, "word": " selected", "probability": 0.86767578125}, {"start": 2091.95, "end": 2092.29, "word": " so", "probability": 0.65966796875}, {"start": 2092.29, "end": 2092.51, "word": " these", "probability": 0.8359375}, {"start": 2092.51, "end": 2092.73, "word": " are", "probability": 0.94775390625}, {"start": 2092.73, "end": 2092.87, "word": " the", "probability": 0.853515625}, {"start": 2092.87, "end": 2093.13, "word": " five", "probability": 0.8994140625}, {"start": 2093.13, "end": 2093.71, "word": " numbers", "probability": 0.90576171875}, {"start": 2093.71, "end": 2094.79, "word": " in", "probability": 0.93603515625}, {"start": 2094.79, "end": 2095.07, "word": " my", "probability": 0.97119140625}, {"start": 2095.07, "end": 2095.49, "word": " sample", "probability": 0.87890625}, {"start": 2095.49, "end": 2095.77, "word": " by", "probability": 0.91064453125}, {"start": 2095.77, "end": 2096.29, "word": " using", "probability": 0.95361328125}, {"start": 2096.29, "end": 2097.41, "word": " selected", "probability": 0.82666015625}, {"start": 2097.41, "end": 2097.63, "word": " by", "probability": 0.966796875}, {"start": 2097.63, "end": 2098.09, "word": " using", "probability": 0.9375}, {"start": 2098.09, "end": 2099.23, "word": " the", "probability": 0.90869140625}, {"start": 2099.23, "end": 2099.63, "word": " random", "probability": 0.8935546875}, {"start": 2099.63, "end": 2100.23, "word": " sample", "probability": 0.80322265625}, {"start": 2100.23, "end": 2100.73, "word": " any", "probability": 0.7607421875}, {"start": 2100.73, "end": 2101.19, "word": " questions?", "probability": 0.9443359375}], "temperature": 1.0}, {"id": 79, "seek": 212526, "start": 2104.16, "end": 2125.26, "text": " Let's move to another part. The next type of samples is called systematic samples.", "tokens": [961, 311, 1286, 281, 1071, 644, 13, 440, 958, 2010, 295, 10938, 307, 1219, 27249, 10938, 13], "avg_logprob": -0.14518228587177065, "compression_ratio": 1.077922077922078, "no_speech_prob": 0.0, "words": [{"start": 2104.16, "end": 2104.62, "word": " Let's", "probability": 0.810546875}, {"start": 2104.62, "end": 2104.98, "word": " move", "probability": 0.93994140625}, {"start": 2104.98, "end": 2106.52, "word": " to", "probability": 0.90234375}, {"start": 2106.52, "end": 2107.2, "word": " another", "probability": 0.90673828125}, {"start": 2107.2, "end": 2107.78, "word": " part.", "probability": 0.89453125}, {"start": 2117.6, "end": 2118.86, "word": " The", "probability": 0.78076171875}, {"start": 2118.86, "end": 2119.2, "word": " next", "probability": 0.9521484375}, {"start": 2119.2, "end": 2120.12, "word": " type", "probability": 0.97509765625}, {"start": 2120.12, "end": 2120.32, "word": " of", "probability": 0.97119140625}, {"start": 2120.32, "end": 2120.66, "word": " samples", "probability": 0.599609375}, {"start": 2120.66, "end": 2121.04, "word": " is", "probability": 0.90087890625}, {"start": 2121.04, "end": 2121.5, "word": " called", "probability": 0.900390625}, {"start": 2121.5, "end": 2122.38, "word": " systematic", "probability": 0.64697265625}, {"start": 2122.38, "end": 2125.26, "word": " samples.", "probability": 0.87158203125}], "temperature": 1.0}, {"id": 80, "seek": 215717, "start": 2129.12, "end": 2157.18, "text": " Now suppose N represents the sample size, capital N represents the population size. And let's see how can we choose a systematic random sample from that population. For example, suppose", "tokens": [823, 7297, 426, 8855, 264, 6889, 2744, 11, 4238, 426, 8855, 264, 4415, 2744, 13, 400, 718, 311, 536, 577, 393, 321, 2826, 257, 27249, 4974, 6889, 490, 300, 4415, 13, 1171, 1365, 11, 7297], "avg_logprob": -0.2131076343357563, "compression_ratio": 1.4645669291338583, "no_speech_prob": 0.0, "words": [{"start": 2129.12, "end": 2129.44, "word": " Now", "probability": 0.79638671875}, {"start": 2129.44, "end": 2130.08, "word": " suppose", "probability": 0.6025390625}, {"start": 2130.08, "end": 2131.2, "word": " N", "probability": 0.5634765625}, {"start": 2131.2, "end": 2132.46, "word": " represents", "probability": 0.84130859375}, {"start": 2132.46, "end": 2132.9, "word": " the", "probability": 0.861328125}, {"start": 2132.9, "end": 2133.14, "word": " sample", "probability": 0.89306640625}, {"start": 2133.14, "end": 2133.7, "word": " size,", "probability": 0.8798828125}, {"start": 2135.44, "end": 2135.78, "word": " capital", "probability": 0.62255859375}, {"start": 2135.78, "end": 2136.14, "word": " N", "probability": 0.9755859375}, {"start": 2136.14, "end": 2140.52, "word": " represents", "probability": 0.86083984375}, {"start": 2140.52, "end": 2141.14, "word": " the", "probability": 0.896484375}, {"start": 2141.14, "end": 2141.66, "word": " population", "probability": 0.9453125}, {"start": 2141.66, "end": 2142.22, "word": " size.", "probability": 0.87353515625}, {"start": 2146.66, "end": 2147.54, "word": " And", "probability": 0.67724609375}, {"start": 2147.54, "end": 2147.76, "word": " let's", "probability": 0.914306640625}, {"start": 2147.76, "end": 2147.88, "word": " see", "probability": 0.916015625}, {"start": 2147.88, "end": 2147.98, "word": " how", "probability": 0.912109375}, {"start": 2147.98, "end": 2148.2, "word": " can", "probability": 0.72216796875}, {"start": 2148.2, "end": 2148.36, "word": " we", "probability": 0.95556640625}, {"start": 2148.36, "end": 2148.78, "word": " choose", "probability": 0.916015625}, {"start": 2148.78, "end": 2149.4, "word": " a", "probability": 0.439697265625}, {"start": 2149.4, "end": 2149.9, "word": " systematic", "probability": 0.83984375}, {"start": 2149.9, "end": 2150.44, "word": " random", "probability": 0.810546875}, {"start": 2150.44, "end": 2150.9, "word": " sample", "probability": 0.91552734375}, {"start": 2150.9, "end": 2151.24, "word": " from", "probability": 0.8583984375}, {"start": 2151.24, "end": 2151.52, "word": " that", "probability": 0.87646484375}, {"start": 2151.52, "end": 2152.08, "word": " population.", "probability": 0.9267578125}, {"start": 2153.36, "end": 2153.7, "word": " For", "probability": 0.9423828125}, {"start": 2153.7, "end": 2154.04, "word": " example,", "probability": 0.96142578125}, {"start": 2155.26, "end": 2157.18, "word": " suppose", "probability": 0.861328125}], "temperature": 1.0}, {"id": 81, "seek": 218898, "start": 2159.61, "end": 2188.99, "text": " For this specific slide, there are 40 items in the population. And my goal is to select a sample of size 4 by using systematic random sampling. The first step is to find how many individuals will be in any group. Let's use this letter K.", "tokens": [1171, 341, 2685, 4137, 11, 456, 366, 3356, 4754, 294, 264, 4415, 13, 400, 452, 3387, 307, 281, 3048, 257, 6889, 295, 2744, 1017, 538, 1228, 27249, 4974, 21179, 13, 440, 700, 1823, 307, 281, 915, 577, 867, 5346, 486, 312, 294, 604, 1594, 13, 961, 311, 764, 341, 5063, 591, 13], "avg_logprob": -0.16273585580429942, "compression_ratio": 1.3837209302325582, "no_speech_prob": 0.0, "words": [{"start": 2159.61, "end": 2159.85, "word": " For", "probability": 0.544921875}, {"start": 2159.85, "end": 2160.13, "word": " this", "probability": 0.94482421875}, {"start": 2160.13, "end": 2160.77, "word": " specific", "probability": 0.89013671875}, {"start": 2160.77, "end": 2161.69, "word": " slide,", "probability": 0.9462890625}, {"start": 2162.19, "end": 2162.33, "word": " there", "probability": 0.89599609375}, {"start": 2162.33, "end": 2162.63, "word": " are", "probability": 0.94970703125}, {"start": 2162.63, "end": 2163.05, "word": " 40", "probability": 0.86474609375}, {"start": 2163.05, "end": 2163.67, "word": " items", "probability": 0.81787109375}, {"start": 2163.67, "end": 2164.13, "word": " in", "probability": 0.91943359375}, {"start": 2164.13, "end": 2165.01, "word": " the", "probability": 0.88232421875}, {"start": 2165.01, "end": 2165.63, "word": " population.", "probability": 0.91943359375}, {"start": 2167.01, "end": 2167.93, "word": " And", "probability": 0.83642578125}, {"start": 2167.93, "end": 2168.17, "word": " my", "probability": 0.9501953125}, {"start": 2168.17, "end": 2168.37, "word": " goal", "probability": 0.97314453125}, {"start": 2168.37, "end": 2168.59, "word": " is", "probability": 0.94580078125}, {"start": 2168.59, "end": 2168.73, "word": " to", "probability": 0.681640625}, {"start": 2168.73, "end": 2169.21, "word": " select", "probability": 0.81787109375}, {"start": 2169.21, "end": 2170.63, "word": " a", "probability": 0.9453125}, {"start": 2170.63, "end": 2171.01, "word": " sample", "probability": 0.90869140625}, {"start": 2171.01, "end": 2171.37, "word": " of", "probability": 0.96875}, {"start": 2171.37, "end": 2171.75, "word": " size", "probability": 0.837890625}, {"start": 2171.75, "end": 2172.25, "word": " 4", "probability": 0.47607421875}, {"start": 2172.25, "end": 2173.59, "word": " by", "probability": 0.751953125}, {"start": 2173.59, "end": 2173.87, "word": " using", "probability": 0.93115234375}, {"start": 2173.87, "end": 2174.37, "word": " systematic", "probability": 0.76171875}, {"start": 2174.37, "end": 2174.71, "word": " random", "probability": 0.93017578125}, {"start": 2174.71, "end": 2175.11, "word": " sampling.", "probability": 0.69873046875}, {"start": 2175.85, "end": 2176.21, "word": " The", "probability": 0.81201171875}, {"start": 2176.21, "end": 2176.53, "word": " first", "probability": 0.87646484375}, {"start": 2176.53, "end": 2176.91, "word": " step", "probability": 0.93408203125}, {"start": 2176.91, "end": 2177.37, "word": " is", "probability": 0.94140625}, {"start": 2177.37, "end": 2179.05, "word": " to", "probability": 0.939453125}, {"start": 2179.05, "end": 2179.53, "word": " find", "probability": 0.888671875}, {"start": 2179.53, "end": 2180.97, "word": " how", "probability": 0.89794921875}, {"start": 2180.97, "end": 2181.31, "word": " many", "probability": 0.8828125}, {"start": 2181.31, "end": 2181.93, "word": " individuals", "probability": 0.6748046875}, {"start": 2181.93, "end": 2182.99, "word": " will", "probability": 0.861328125}, {"start": 2182.99, "end": 2183.29, "word": " be", "probability": 0.95703125}, {"start": 2183.29, "end": 2183.67, "word": " in", "probability": 0.9482421875}, {"start": 2183.67, "end": 2183.93, "word": " any", "probability": 0.88720703125}, {"start": 2183.93, "end": 2184.33, "word": " group.", "probability": 0.96435546875}, {"start": 2185.59, "end": 2186.29, "word": " Let's", "probability": 0.955322265625}, {"start": 2186.29, "end": 2187.63, "word": " use", "probability": 0.8623046875}, {"start": 2187.63, "end": 2188.07, "word": " this", "probability": 0.93798828125}, {"start": 2188.07, "end": 2188.53, "word": " letter", "probability": 0.93115234375}, {"start": 2188.53, "end": 2188.99, "word": " K.", "probability": 0.7119140625}], "temperature": 1.0}, {"id": 82, "seek": 220884, "start": 2191.82, "end": 2208.84, "text": " divide N by, divide frame of N individuals into groups of K individuals. So, K equal capital N over small n, this is number of items in a group.", "tokens": [9845, 426, 538, 11, 9845, 3920, 295, 426, 5346, 666, 3935, 295, 591, 5346, 13, 407, 11, 591, 2681, 4238, 426, 670, 1359, 297, 11, 341, 307, 1230, 295, 4754, 294, 257, 1594, 13], "avg_logprob": -0.3167410731315613, "compression_ratio": 1.355140186915888, "no_speech_prob": 0.0, "words": [{"start": 2191.82, "end": 2192.44, "word": " divide", "probability": 0.259765625}, {"start": 2192.44, "end": 2192.76, "word": " N", "probability": 0.54248046875}, {"start": 2192.76, "end": 2193.16, "word": " by,", "probability": 0.94873046875}, {"start": 2194.06, "end": 2194.74, "word": " divide", "probability": 0.89892578125}, {"start": 2194.74, "end": 2195.34, "word": " frame", "probability": 0.712890625}, {"start": 2195.34, "end": 2195.64, "word": " of", "probability": 0.9658203125}, {"start": 2195.64, "end": 2195.86, "word": " N", "probability": 0.8642578125}, {"start": 2195.86, "end": 2196.4, "word": " individuals", "probability": 0.787109375}, {"start": 2196.4, "end": 2196.94, "word": " into", "probability": 0.84130859375}, {"start": 2196.94, "end": 2197.66, "word": " groups", "probability": 0.9541015625}, {"start": 2197.66, "end": 2197.9, "word": " of", "probability": 0.9267578125}, {"start": 2197.9, "end": 2198.08, "word": " K", "probability": 0.73828125}, {"start": 2198.08, "end": 2198.74, "word": " individuals.", "probability": 0.82275390625}, {"start": 2199.28, "end": 2199.56, "word": " So,", "probability": 0.9189453125}, {"start": 2199.68, "end": 2199.78, "word": " K", "probability": 0.90625}, {"start": 2199.78, "end": 2200.22, "word": " equal", "probability": 0.354736328125}, {"start": 2200.22, "end": 2202.68, "word": " capital", "probability": 0.80517578125}, {"start": 2202.68, "end": 2202.9, "word": " N", "probability": 0.9765625}, {"start": 2202.9, "end": 2203.14, "word": " over", "probability": 0.8984375}, {"start": 2203.14, "end": 2203.46, "word": " small", "probability": 0.921875}, {"start": 2203.46, "end": 2203.74, "word": " n,", "probability": 0.4775390625}, {"start": 2204.06, "end": 2204.22, "word": " this", "probability": 0.431884765625}, {"start": 2204.22, "end": 2204.3, "word": " is", "probability": 0.9228515625}, {"start": 2204.3, "end": 2204.66, "word": " number", "probability": 0.87841796875}, {"start": 2204.66, "end": 2206.04, "word": " of", "probability": 0.970703125}, {"start": 2206.04, "end": 2206.72, "word": " items", "probability": 0.85107421875}, {"start": 2206.72, "end": 2208.52, "word": " in", "probability": 0.93896484375}, {"start": 2208.52, "end": 2208.64, "word": " a", "probability": 0.9833984375}, {"start": 2208.64, "end": 2208.84, "word": " group.", "probability": 0.96728515625}], "temperature": 1.0}, {"id": 83, "seek": 223809, "start": 2211.57, "end": 2238.09, "text": " So K represents number of subjects or number of elements in a group. So for this example, K equals 40 divided by 4, so 10. So the group, each group has 10 items. So each group", "tokens": [407, 591, 8855, 1230, 295, 13066, 420, 1230, 295, 4959, 294, 257, 1594, 13, 407, 337, 341, 1365, 11, 591, 6915, 3356, 6666, 538, 1017, 11, 370, 1266, 13, 407, 264, 1594, 11, 1184, 1594, 575, 1266, 4754, 13, 407, 1184, 1594], "avg_logprob": -0.17623546234397, "compression_ratio": 1.408, "no_speech_prob": 0.0, "words": [{"start": 2211.57, "end": 2211.85, "word": " So", "probability": 0.78955078125}, {"start": 2211.85, "end": 2212.11, "word": " K", "probability": 0.35205078125}, {"start": 2212.11, "end": 2213.37, "word": " represents", "probability": 0.87158203125}, {"start": 2213.37, "end": 2214.11, "word": " number", "probability": 0.84619140625}, {"start": 2214.11, "end": 2214.69, "word": " of", "probability": 0.9736328125}, {"start": 2214.69, "end": 2215.29, "word": " subjects", "probability": 0.90576171875}, {"start": 2215.29, "end": 2215.97, "word": " or", "probability": 0.70703125}, {"start": 2215.97, "end": 2216.31, "word": " number", "probability": 0.91259765625}, {"start": 2216.31, "end": 2216.51, "word": " of", "probability": 0.96826171875}, {"start": 2216.51, "end": 2217.01, "word": " elements", "probability": 0.8955078125}, {"start": 2217.01, "end": 2218.25, "word": " in", "probability": 0.91162109375}, {"start": 2218.25, "end": 2219.23, "word": " a", "probability": 0.96142578125}, {"start": 2219.23, "end": 2219.59, "word": " group.", "probability": 0.96630859375}, {"start": 2220.15, "end": 2220.39, "word": " So", "probability": 0.89599609375}, {"start": 2220.39, "end": 2220.61, "word": " for", "probability": 0.8349609375}, {"start": 2220.61, "end": 2220.87, "word": " this", "probability": 0.9443359375}, {"start": 2220.87, "end": 2221.31, "word": " example,", "probability": 0.9775390625}, {"start": 2221.85, "end": 2222.27, "word": " K", "probability": 0.85986328125}, {"start": 2222.27, "end": 2222.75, "word": " equals", "probability": 0.73828125}, {"start": 2222.75, "end": 2223.37, "word": " 40", "probability": 0.83447265625}, {"start": 2223.37, "end": 2223.95, "word": " divided", "probability": 0.7421875}, {"start": 2223.95, "end": 2224.25, "word": " by", "probability": 0.953125}, {"start": 2224.25, "end": 2224.57, "word": " 4,", "probability": 0.8115234375}, {"start": 2225.35, "end": 2225.61, "word": " so", "probability": 0.77197265625}, {"start": 2225.61, "end": 2225.79, "word": " 10.", "probability": 0.93359375}, {"start": 2227.87, "end": 2228.67, "word": " So", "probability": 0.873046875}, {"start": 2228.67, "end": 2228.85, "word": " the", "probability": 0.7763671875}, {"start": 2228.85, "end": 2229.15, "word": " group,", "probability": 0.95654296875}, {"start": 2229.25, "end": 2229.45, "word": " each", "probability": 0.9423828125}, {"start": 2229.45, "end": 2229.71, "word": " group", "probability": 0.966796875}, {"start": 2229.71, "end": 2229.99, "word": " has", "probability": 0.87939453125}, {"start": 2229.99, "end": 2230.37, "word": " 10", "probability": 0.85107421875}, {"start": 2230.37, "end": 2231.67, "word": " items.", "probability": 0.82763671875}, {"start": 2236.63, "end": 2237.43, "word": " So", "probability": 0.7783203125}, {"start": 2237.43, "end": 2237.67, "word": " each", "probability": 0.9111328125}, {"start": 2237.67, "end": 2238.09, "word": " group", "probability": 0.96630859375}], "temperature": 1.0}, {"id": 84, "seek": 226019, "start": 2240.64, "end": 2260.2, "text": " has 10 items. So group number 1, 10 items, and others have the same number. So first step, we have to decide how many items will be in the group.", "tokens": [575, 1266, 4754, 13, 407, 1594, 1230, 502, 11, 1266, 4754, 11, 293, 2357, 362, 264, 912, 1230, 13, 407, 700, 1823, 11, 321, 362, 281, 4536, 577, 867, 4754, 486, 312, 294, 264, 1594, 13], "avg_logprob": -0.26710304376241323, "compression_ratio": 1.3518518518518519, "no_speech_prob": 0.0, "words": [{"start": 2240.64, "end": 2241.04, "word": " has", "probability": 0.2349853515625}, {"start": 2241.04, "end": 2241.5, "word": " 10", "probability": 0.650390625}, {"start": 2241.5, "end": 2243.14, "word": " items.", "probability": 0.81787109375}, {"start": 2247.42, "end": 2247.94, "word": " So", "probability": 0.74755859375}, {"start": 2247.94, "end": 2248.12, "word": " group", "probability": 0.818359375}, {"start": 2248.12, "end": 2248.44, "word": " number", "probability": 0.83935546875}, {"start": 2248.44, "end": 2248.68, "word": " 1,", "probability": 0.53076171875}, {"start": 2248.78, "end": 2248.9, "word": " 10", "probability": 0.9013671875}, {"start": 2248.9, "end": 2249.44, "word": " items,", "probability": 0.81396484375}, {"start": 2251.5, "end": 2252.58, "word": " and", "probability": 0.6572265625}, {"start": 2252.58, "end": 2253.18, "word": " others", "probability": 0.65380859375}, {"start": 2253.18, "end": 2253.68, "word": " have", "probability": 0.9267578125}, {"start": 2253.68, "end": 2253.86, "word": " the", "probability": 0.876953125}, {"start": 2253.86, "end": 2254.02, "word": " same", "probability": 0.91064453125}, {"start": 2254.02, "end": 2254.3, "word": " number.", "probability": 0.9228515625}, {"start": 2255.42, "end": 2255.6, "word": " So", "probability": 0.85986328125}, {"start": 2255.6, "end": 2255.86, "word": " first", "probability": 0.7666015625}, {"start": 2255.86, "end": 2256.1, "word": " step,", "probability": 0.943359375}, {"start": 2256.18, "end": 2256.24, "word": " we", "probability": 0.95654296875}, {"start": 2256.24, "end": 2256.44, "word": " have", "probability": 0.9453125}, {"start": 2256.44, "end": 2256.56, "word": " to", "probability": 0.9697265625}, {"start": 2256.56, "end": 2257.04, "word": " decide", "probability": 0.90966796875}, {"start": 2257.04, "end": 2258.66, "word": " how", "probability": 0.88720703125}, {"start": 2258.66, "end": 2258.92, "word": " many", "probability": 0.8876953125}, {"start": 2258.92, "end": 2259.32, "word": " items", "probability": 0.83251953125}, {"start": 2259.32, "end": 2259.52, "word": " will", "probability": 0.87109375}, {"start": 2259.52, "end": 2259.64, "word": " be", "probability": 0.9560546875}, {"start": 2259.64, "end": 2259.74, "word": " in", "probability": 0.95068359375}, {"start": 2259.74, "end": 2259.88, "word": " the", "probability": 0.90283203125}, {"start": 2259.88, "end": 2260.2, "word": " group.", "probability": 0.962890625}], "temperature": 1.0}, {"id": 85, "seek": 228561, "start": 2260.91, "end": 2285.61, "text": " And that number equals N divided by small n, capital N divided by small n. In this case, N is 40, the sample size is 4, so there are 10 items in each individual. Next step, select randomly the first individual from the first group. For example, here.", "tokens": [400, 300, 1230, 6915, 426, 6666, 538, 1359, 297, 11, 4238, 426, 6666, 538, 1359, 297, 13, 682, 341, 1389, 11, 426, 307, 3356, 11, 264, 6889, 2744, 307, 1017, 11, 370, 456, 366, 1266, 4754, 294, 1184, 2609, 13, 3087, 1823, 11, 3048, 16979, 264, 700, 2609, 490, 264, 700, 1594, 13, 1171, 1365, 11, 510, 13], "avg_logprob": -0.23040254237288135, "compression_ratio": 1.5212121212121212, "no_speech_prob": 0.0, "words": [{"start": 2260.91, "end": 2261.15, "word": " And", "probability": 0.451416015625}, {"start": 2261.15, "end": 2261.45, "word": " that", "probability": 0.91162109375}, {"start": 2261.45, "end": 2262.11, "word": " number", "probability": 0.94140625}, {"start": 2262.11, "end": 2262.67, "word": " equals", "probability": 0.89404296875}, {"start": 2262.67, "end": 2263.63, "word": " N", "probability": 0.67431640625}, {"start": 2263.63, "end": 2263.87, "word": " divided", "probability": 0.61279296875}, {"start": 2263.87, "end": 2264.07, "word": " by", "probability": 0.97607421875}, {"start": 2264.07, "end": 2264.33, "word": " small", "probability": 0.76806640625}, {"start": 2264.33, "end": 2264.43, "word": " n,", "probability": 0.626953125}, {"start": 2264.55, "end": 2264.73, "word": " capital", "probability": 0.78173828125}, {"start": 2264.73, "end": 2264.93, "word": " N", "probability": 0.97607421875}, {"start": 2264.93, "end": 2265.15, "word": " divided", "probability": 0.81298828125}, {"start": 2265.15, "end": 2265.33, "word": " by", "probability": 0.97021484375}, {"start": 2265.33, "end": 2265.61, "word": " small", "probability": 0.93115234375}, {"start": 2265.61, "end": 2265.75, "word": " n.", "probability": 0.955078125}, {"start": 2265.79, "end": 2265.91, "word": " In", "probability": 0.91845703125}, {"start": 2265.91, "end": 2266.13, "word": " this", "probability": 0.9482421875}, {"start": 2266.13, "end": 2266.47, "word": " case,", "probability": 0.9189453125}, {"start": 2266.65, "end": 2266.75, "word": " N", "probability": 0.90673828125}, {"start": 2266.75, "end": 2266.91, "word": " is", "probability": 0.9541015625}, {"start": 2266.91, "end": 2267.33, "word": " 40,", "probability": 0.68310546875}, {"start": 2268.01, "end": 2268.19, "word": " the", "probability": 0.8291015625}, {"start": 2268.19, "end": 2268.41, "word": " sample", "probability": 0.912109375}, {"start": 2268.41, "end": 2268.75, "word": " size", "probability": 0.8623046875}, {"start": 2268.75, "end": 2268.91, "word": " is", "probability": 0.9072265625}, {"start": 2268.91, "end": 2269.19, "word": " 4,", "probability": 0.76904296875}, {"start": 2269.65, "end": 2269.97, "word": " so", "probability": 0.89697265625}, {"start": 2269.97, "end": 2270.91, "word": " there", "probability": 0.875}, {"start": 2270.91, "end": 2271.15, "word": " are", "probability": 0.94580078125}, {"start": 2271.15, "end": 2271.51, "word": " 10", "probability": 0.7802734375}, {"start": 2271.51, "end": 2272.11, "word": " items", "probability": 0.83935546875}, {"start": 2272.11, "end": 2272.41, "word": " in", "probability": 0.471923828125}, {"start": 2272.41, "end": 2272.63, "word": " each", "probability": 0.9345703125}, {"start": 2272.63, "end": 2272.87, "word": " individual.", "probability": 0.0958251953125}, {"start": 2273.61, "end": 2274.17, "word": " Next", "probability": 0.90673828125}, {"start": 2274.17, "end": 2274.57, "word": " step,", "probability": 0.9248046875}, {"start": 2275.71, "end": 2276.37, "word": " select", "probability": 0.8642578125}, {"start": 2276.37, "end": 2279.21, "word": " randomly", "probability": 0.78564453125}, {"start": 2279.21, "end": 2280.17, "word": " the", "probability": 0.86376953125}, {"start": 2280.17, "end": 2280.65, "word": " first", "probability": 0.87939453125}, {"start": 2280.65, "end": 2281.55, "word": " individual", "probability": 0.89404296875}, {"start": 2281.55, "end": 2282.85, "word": " from", "probability": 0.81494140625}, {"start": 2282.85, "end": 2283.03, "word": " the", "probability": 0.9208984375}, {"start": 2283.03, "end": 2283.27, "word": " first", "probability": 0.90869140625}, {"start": 2283.27, "end": 2283.59, "word": " group.", "probability": 0.9619140625}, {"start": 2284.69, "end": 2284.95, "word": " For", "probability": 0.96240234375}, {"start": 2284.95, "end": 2285.25, "word": " example,", "probability": 0.974609375}, {"start": 2285.31, "end": 2285.61, "word": " here.", "probability": 0.85693359375}], "temperature": 1.0}, {"id": 86, "seek": 231166, "start": 2287.76, "end": 2311.66, "text": " Now, how many we have here? We have 10 items. So, numbers are 01, 02, up to 10. I have to choose one more number from these numbers, from 1 to 10, by using the random table again. So, I have to go back to the random table and I choose two digits.", "tokens": [823, 11, 577, 867, 321, 362, 510, 30, 492, 362, 1266, 4754, 13, 407, 11, 3547, 366, 23185, 11, 37202, 11, 493, 281, 1266, 13, 286, 362, 281, 2826, 472, 544, 1230, 490, 613, 3547, 11, 490, 502, 281, 1266, 11, 538, 1228, 264, 4974, 3199, 797, 13, 407, 11, 286, 362, 281, 352, 646, 281, 264, 4974, 3199, 293, 286, 2826, 732, 27011, 13], "avg_logprob": -0.21957859961372433, "compression_ratio": 1.5632911392405062, "no_speech_prob": 0.0, "words": [{"start": 2287.76, "end": 2288.1, "word": " Now,", "probability": 0.62841796875}, {"start": 2288.18, "end": 2288.34, "word": " how", "probability": 0.83203125}, {"start": 2288.34, "end": 2288.62, "word": " many", "probability": 0.89453125}, {"start": 2288.62, "end": 2289.18, "word": " we", "probability": 0.5791015625}, {"start": 2289.18, "end": 2289.44, "word": " have", "probability": 0.94482421875}, {"start": 2289.44, "end": 2289.7, "word": " here?", "probability": 0.8271484375}, {"start": 2289.82, "end": 2289.92, "word": " We", "probability": 0.8564453125}, {"start": 2289.92, "end": 2290.22, "word": " have", "probability": 0.95166015625}, {"start": 2290.22, "end": 2290.78, "word": " 10", "probability": 0.609375}, {"start": 2290.78, "end": 2291.34, "word": " items.", "probability": 0.79736328125}, {"start": 2292.48, "end": 2292.48, "word": " So,", "probability": 0.8876953125}, {"start": 2292.68, "end": 2293.08, "word": " numbers", "probability": 0.8046875}, {"start": 2293.08, "end": 2293.36, "word": " are", "probability": 0.9326171875}, {"start": 2293.36, "end": 2293.74, "word": " 01,", "probability": 0.403076171875}, {"start": 2295.16, "end": 2295.58, "word": " 02,", "probability": 0.85400390625}, {"start": 2295.7, "end": 2295.84, "word": " up", "probability": 0.923828125}, {"start": 2295.84, "end": 2295.98, "word": " to", "probability": 0.95654296875}, {"start": 2295.98, "end": 2296.2, "word": " 10.", "probability": 0.97216796875}, {"start": 2296.76, "end": 2297.0, "word": " I", "probability": 0.98046875}, {"start": 2297.0, "end": 2297.2, "word": " have", "probability": 0.939453125}, {"start": 2297.2, "end": 2297.34, "word": " to", "probability": 0.96435546875}, {"start": 2297.34, "end": 2298.28, "word": " choose", "probability": 0.8955078125}, {"start": 2298.28, "end": 2298.64, "word": " one", "probability": 0.69482421875}, {"start": 2298.64, "end": 2298.86, "word": " more", "probability": 0.7431640625}, {"start": 2298.86, "end": 2299.06, "word": " number", "probability": 0.72998046875}, {"start": 2299.06, "end": 2300.86, "word": " from", "probability": 0.74365234375}, {"start": 2300.86, "end": 2301.12, "word": " these", "probability": 0.79541015625}, {"start": 2301.12, "end": 2301.52, "word": " numbers,", "probability": 0.8642578125}, {"start": 2301.62, "end": 2301.82, "word": " from", "probability": 0.818359375}, {"start": 2301.82, "end": 2302.08, "word": " 1", "probability": 0.828125}, {"start": 2302.08, "end": 2302.3, "word": " to", "probability": 0.97021484375}, {"start": 2302.3, "end": 2302.58, "word": " 10,", "probability": 0.97265625}, {"start": 2302.9, "end": 2303.12, "word": " by", "probability": 0.9521484375}, {"start": 2303.12, "end": 2303.46, "word": " using", "probability": 0.92529296875}, {"start": 2303.46, "end": 2303.68, "word": " the", "probability": 0.90087890625}, {"start": 2303.68, "end": 2303.88, "word": " random", "probability": 0.79296875}, {"start": 2303.88, "end": 2304.24, "word": " table", "probability": 0.87744140625}, {"start": 2304.24, "end": 2304.58, "word": " again.", "probability": 0.93896484375}, {"start": 2304.92, "end": 2305.1, "word": " So,", "probability": 0.94873046875}, {"start": 2305.14, "end": 2305.2, "word": " I", "probability": 0.99560546875}, {"start": 2305.2, "end": 2305.34, "word": " have", "probability": 0.9482421875}, {"start": 2305.34, "end": 2305.44, "word": " to", "probability": 0.96826171875}, {"start": 2305.44, "end": 2305.58, "word": " go", "probability": 0.96240234375}, {"start": 2305.58, "end": 2305.9, "word": " back", "probability": 0.87109375}, {"start": 2305.9, "end": 2307.46, "word": " to", "probability": 0.95556640625}, {"start": 2307.46, "end": 2307.6, "word": " the", "probability": 0.9189453125}, {"start": 2307.6, "end": 2307.86, "word": " random", "probability": 0.85693359375}, {"start": 2307.86, "end": 2308.28, "word": " table", "probability": 0.89599609375}, {"start": 2308.28, "end": 2308.7, "word": " and", "probability": 0.67529296875}, {"start": 2308.7, "end": 2309.02, "word": " I", "probability": 0.52099609375}, {"start": 2309.02, "end": 2309.44, "word": " choose", "probability": 0.8974609375}, {"start": 2309.44, "end": 2311.3, "word": " two", "probability": 0.734375}, {"start": 2311.3, "end": 2311.66, "word": " digits.", "probability": 0.95068359375}], "temperature": 1.0}, {"id": 87, "seek": 233801, "start": 2313.29, "end": 2338.01, "text": " Now the first one is nineteen, twenty-two, thirty-nine, fifty, thirty-four, five. So I have to see. So number one is five. What's the next one? The next one just add K. K is ten. So next is fifteen. Then twenty-five, then thirty-four.", "tokens": [823, 264, 700, 472, 307, 31555, 11, 7699, 12, 20534, 11, 11790, 12, 46140, 11, 13442, 11, 11790, 12, 23251, 11, 1732, 13, 407, 286, 362, 281, 536, 13, 407, 1230, 472, 307, 1732, 13, 708, 311, 264, 958, 472, 30, 440, 958, 472, 445, 909, 591, 13, 591, 307, 2064, 13, 407, 958, 307, 18126, 13, 1396, 7699, 12, 18621, 11, 550, 11790, 12, 23251, 13], "avg_logprob": -0.27297795563936234, "compression_ratio": 1.598639455782313, "no_speech_prob": 2.5928020477294922e-05, "words": [{"start": 2313.29, "end": 2313.55, "word": " Now", "probability": 0.302978515625}, {"start": 2313.55, "end": 2313.73, "word": " the", "probability": 0.5498046875}, {"start": 2313.73, "end": 2313.97, "word": " first", "probability": 0.8798828125}, {"start": 2313.97, "end": 2314.19, "word": " one", "probability": 0.921875}, {"start": 2314.19, "end": 2314.33, "word": " is", "probability": 0.8984375}, {"start": 2314.33, "end": 2314.77, "word": " nineteen,", "probability": 0.1375732421875}, {"start": 2314.95, "end": 2315.25, "word": " twenty", "probability": 0.80126953125}, {"start": 2315.25, "end": 2315.63, "word": "-two,", "probability": 0.812255859375}, {"start": 2315.89, "end": 2316.11, "word": " thirty", "probability": 0.935546875}, {"start": 2316.11, "end": 2316.49, "word": "-nine,", "probability": 0.939697265625}, {"start": 2317.13, "end": 2317.53, "word": " fifty,", "probability": 0.90869140625}, {"start": 2317.91, "end": 2318.31, "word": " thirty", "probability": 0.93017578125}, {"start": 2318.31, "end": 2318.83, "word": "-four,", "probability": 0.94775390625}, {"start": 2319.41, "end": 2320.03, "word": " five.", "probability": 0.884765625}, {"start": 2321.69, "end": 2322.29, "word": " So", "probability": 0.859375}, {"start": 2322.29, "end": 2322.61, "word": " I", "probability": 0.7392578125}, {"start": 2322.61, "end": 2322.89, "word": " have", "probability": 0.95263671875}, {"start": 2322.89, "end": 2323.03, "word": " to", "probability": 0.8955078125}, {"start": 2323.03, "end": 2323.11, "word": " see.", "probability": 0.21044921875}, {"start": 2323.29, "end": 2323.45, "word": " So", "probability": 0.8271484375}, {"start": 2323.45, "end": 2323.73, "word": " number", "probability": 0.8623046875}, {"start": 2323.73, "end": 2323.95, "word": " one", "probability": 0.912109375}, {"start": 2323.95, "end": 2324.09, "word": " is", "probability": 0.947265625}, {"start": 2324.09, "end": 2324.33, "word": " five.", "probability": 0.89453125}, {"start": 2324.45, "end": 2324.65, "word": " What's", "probability": 0.84130859375}, {"start": 2324.65, "end": 2324.79, "word": " the", "probability": 0.916015625}, {"start": 2324.79, "end": 2324.99, "word": " next", "probability": 0.93798828125}, {"start": 2324.99, "end": 2325.27, "word": " one?", "probability": 0.921875}, {"start": 2325.69, "end": 2326.01, "word": " The", "probability": 0.88720703125}, {"start": 2326.01, "end": 2326.23, "word": " next", "probability": 0.935546875}, {"start": 2326.23, "end": 2326.49, "word": " one", "probability": 0.923828125}, {"start": 2326.49, "end": 2326.83, "word": " just", "probability": 0.61376953125}, {"start": 2326.83, "end": 2327.35, "word": " add", "probability": 0.7451171875}, {"start": 2327.35, "end": 2327.69, "word": " K.", "probability": 0.429931640625}, {"start": 2329.03, "end": 2329.63, "word": " K", "probability": 0.58154296875}, {"start": 2329.63, "end": 2329.85, "word": " is", "probability": 0.9287109375}, {"start": 2329.85, "end": 2330.11, "word": " ten.", "probability": 0.869140625}, {"start": 2330.63, "end": 2330.83, "word": " So", "probability": 0.92236328125}, {"start": 2330.83, "end": 2331.05, "word": " next", "probability": 0.92333984375}, {"start": 2331.05, "end": 2331.25, "word": " is", "probability": 0.9287109375}, {"start": 2331.25, "end": 2331.71, "word": " fifteen.", "probability": 0.88818359375}, {"start": 2333.67, "end": 2334.19, "word": " Then", "probability": 0.640625}, {"start": 2334.19, "end": 2334.91, "word": " twenty", "probability": 0.751953125}, {"start": 2334.91, "end": 2335.29, "word": "-five,", "probability": 0.942626953125}, {"start": 2335.69, "end": 2336.73, "word": " then", "probability": 0.8447265625}, {"start": 2336.73, "end": 2337.89, "word": " thirty", "probability": 0.40380859375}, {"start": 2337.89, "end": 2338.01, "word": "-four.", "probability": 0.71044921875}], "temperature": 1.0}, {"id": 88, "seek": 237178, "start": 2342.9, "end": 2371.78, "text": " Number size consists of four items. So the first number is chosen randomly by using the random table. The next number just add the step. This is step. So my step is 10 because number one is five. The first item I mean is five. Then it should be 15, 25, 35, and so on if we have more than that.", "tokens": [5118, 2744, 14689, 295, 1451, 4754, 13, 407, 264, 700, 1230, 307, 8614, 16979, 538, 1228, 264, 4974, 3199, 13, 440, 958, 1230, 445, 909, 264, 1823, 13, 639, 307, 1823, 13, 407, 452, 1823, 307, 1266, 570, 1230, 472, 307, 1732, 13, 440, 700, 3174, 286, 914, 307, 1732, 13, 1396, 309, 820, 312, 2119, 11, 3552, 11, 6976, 11, 293, 370, 322, 498, 321, 362, 544, 813, 300, 13], "avg_logprob": -0.24587673652503225, "compression_ratio": 1.5806451612903225, "no_speech_prob": 0.0, "words": [{"start": 2342.9000000000005, "end": 2343.5800000000004, "word": " Number", "probability": 0.0821533203125}, {"start": 2343.5800000000004, "end": 2344.26, "word": " size", "probability": 0.66748046875}, {"start": 2344.26, "end": 2345.02, "word": " consists", "probability": 0.755859375}, {"start": 2345.02, "end": 2345.42, "word": " of", "probability": 0.96826171875}, {"start": 2345.42, "end": 2345.82, "word": " four", "probability": 0.80908203125}, {"start": 2345.82, "end": 2346.48, "word": " items.", "probability": 0.7802734375}, {"start": 2347.9, "end": 2348.14, "word": " So", "probability": 0.86865234375}, {"start": 2348.14, "end": 2348.38, "word": " the", "probability": 0.767578125}, {"start": 2348.38, "end": 2348.84, "word": " first", "probability": 0.8740234375}, {"start": 2348.84, "end": 2349.46, "word": " number", "probability": 0.92724609375}, {"start": 2349.46, "end": 2349.82, "word": " is", "probability": 0.93212890625}, {"start": 2349.82, "end": 2350.5, "word": " chosen", "probability": 0.9619140625}, {"start": 2350.5, "end": 2351.24, "word": " randomly", "probability": 0.80322265625}, {"start": 2351.24, "end": 2351.76, "word": " by", "probability": 0.87158203125}, {"start": 2351.76, "end": 2352.2, "word": " using", "probability": 0.93310546875}, {"start": 2352.2, "end": 2352.42, "word": " the", "probability": 0.79443359375}, {"start": 2352.42, "end": 2352.74, "word": " random", "probability": 0.8203125}, {"start": 2352.74, "end": 2353.22, "word": " table.", "probability": 0.87646484375}, {"start": 2353.92, "end": 2354.1, "word": " The", "probability": 0.69482421875}, {"start": 2354.1, "end": 2354.48, "word": " next", "probability": 0.90771484375}, {"start": 2354.48, "end": 2354.86, "word": " number", "probability": 0.9267578125}, {"start": 2354.86, "end": 2355.32, "word": " just", "probability": 0.85302734375}, {"start": 2355.32, "end": 2355.82, "word": " add", "probability": 0.33349609375}, {"start": 2355.82, "end": 2356.16, "word": " the", "probability": 0.56591796875}, {"start": 2356.16, "end": 2356.44, "word": " step.", "probability": 0.716796875}, {"start": 2356.96, "end": 2357.08, "word": " This", "probability": 0.495849609375}, {"start": 2357.08, "end": 2357.26, "word": " is", "probability": 0.84619140625}, {"start": 2357.26, "end": 2357.54, "word": " step.", "probability": 0.8740234375}, {"start": 2360.2, "end": 2360.38, "word": " So", "probability": 0.5244140625}, {"start": 2360.38, "end": 2360.58, "word": " my", "probability": 0.9345703125}, {"start": 2360.58, "end": 2360.9, "word": " step", "probability": 0.9423828125}, {"start": 2360.9, "end": 2361.14, "word": " is", "probability": 0.9423828125}, {"start": 2361.14, "end": 2361.62, "word": " 10", "probability": 0.6748046875}, {"start": 2361.62, "end": 2362.32, "word": " because", "probability": 0.50537109375}, {"start": 2362.32, "end": 2363.14, "word": " number", "probability": 0.89013671875}, {"start": 2363.14, "end": 2363.78, "word": " one", "probability": 0.7275390625}, {"start": 2363.78, "end": 2363.96, "word": " is", "probability": 0.9453125}, {"start": 2363.96, "end": 2364.34, "word": " five.", "probability": 0.84619140625}, {"start": 2365.3, "end": 2365.42, "word": " The", "probability": 0.83740234375}, {"start": 2365.42, "end": 2365.68, "word": " first", "probability": 0.8818359375}, {"start": 2365.68, "end": 2366.02, "word": " item", "probability": 0.95947265625}, {"start": 2366.02, "end": 2366.16, "word": " I", "probability": 0.66796875}, {"start": 2366.16, "end": 2366.28, "word": " mean", "probability": 0.7607421875}, {"start": 2366.28, "end": 2366.46, "word": " is", "probability": 0.90478515625}, {"start": 2366.46, "end": 2366.76, "word": " five.", "probability": 0.9033203125}, {"start": 2367.2, "end": 2367.34, "word": " Then", "probability": 0.43603515625}, {"start": 2367.34, "end": 2367.5, "word": " it", "probability": 0.88427734375}, {"start": 2367.5, "end": 2367.66, "word": " should", "probability": 0.96240234375}, {"start": 2367.66, "end": 2367.8, "word": " be", "probability": 0.94482421875}, {"start": 2367.8, "end": 2368.24, "word": " 15,", "probability": 0.896484375}, {"start": 2368.34, "end": 2368.76, "word": " 25,", "probability": 0.97216796875}, {"start": 2369.0, "end": 2369.36, "word": " 35,", "probability": 0.9765625}, {"start": 2369.88, "end": 2370.58, "word": " and", "probability": 0.9345703125}, {"start": 2370.58, "end": 2370.78, "word": " so", "probability": 0.95654296875}, {"start": 2370.78, "end": 2370.9, "word": " on", "probability": 0.9189453125}, {"start": 2370.9, "end": 2371.04, "word": " if", "probability": 0.69287109375}, {"start": 2371.04, "end": 2371.14, "word": " we", "probability": 0.8583984375}, {"start": 2371.14, "end": 2371.26, "word": " have", "probability": 0.6494140625}, {"start": 2371.26, "end": 2371.46, "word": " more", "probability": 0.93896484375}, {"start": 2371.46, "end": 2371.6, "word": " than", "probability": 0.94287109375}, {"start": 2371.6, "end": 2371.78, "word": " that.", "probability": 0.9072265625}], "temperature": 1.0}, {"id": 89, "seek": 240053, "start": 2373.23, "end": 2400.53, "text": " Okay, so that's for, in this example, he choose item number seven. Random selection, number seven. So next should be 17, 27, 37, and so on. Let's do another example. Suppose there are", "tokens": [1033, 11, 370, 300, 311, 337, 11, 294, 341, 1365, 11, 415, 2826, 3174, 1230, 3407, 13, 37603, 9450, 11, 1230, 3407, 13, 407, 958, 820, 312, 3282, 11, 7634, 11, 13435, 11, 293, 370, 322, 13, 961, 311, 360, 1071, 1365, 13, 21360, 456, 366], "avg_logprob": -0.15383976634512556, "compression_ratio": 1.3430656934306568, "no_speech_prob": 0.0, "words": [{"start": 2373.23, "end": 2373.47, "word": " Okay,", "probability": 0.81884765625}, {"start": 2373.57, "end": 2373.69, "word": " so", "probability": 0.9091796875}, {"start": 2373.69, "end": 2374.09, "word": " that's", "probability": 0.90673828125}, {"start": 2374.09, "end": 2374.45, "word": " for,", "probability": 0.89013671875}, {"start": 2374.95, "end": 2375.23, "word": " in", "probability": 0.88671875}, {"start": 2375.23, "end": 2376.41, "word": " this", "probability": 0.94140625}, {"start": 2376.41, "end": 2376.89, "word": " example,", "probability": 0.9794921875}, {"start": 2377.11, "end": 2377.31, "word": " he", "probability": 0.939453125}, {"start": 2377.31, "end": 2377.73, "word": " choose", "probability": 0.50634765625}, {"start": 2377.73, "end": 2378.17, "word": " item", "probability": 0.9560546875}, {"start": 2378.17, "end": 2378.53, "word": " number", "probability": 0.93212890625}, {"start": 2378.53, "end": 2378.89, "word": " seven.", "probability": 0.82177734375}, {"start": 2380.61, "end": 2381.03, "word": " Random", "probability": 0.8671875}, {"start": 2381.03, "end": 2381.57, "word": " selection,", "probability": 0.67041015625}, {"start": 2382.17, "end": 2382.41, "word": " number", "probability": 0.94482421875}, {"start": 2382.41, "end": 2382.79, "word": " seven.", "probability": 0.90087890625}, {"start": 2383.23, "end": 2383.47, "word": " So", "probability": 0.95263671875}, {"start": 2383.47, "end": 2383.71, "word": " next", "probability": 0.8232421875}, {"start": 2383.71, "end": 2383.91, "word": " should", "probability": 0.955078125}, {"start": 2383.91, "end": 2384.11, "word": " be", "probability": 0.9521484375}, {"start": 2384.11, "end": 2384.71, "word": " 17,", "probability": 0.93310546875}, {"start": 2385.09, "end": 2385.87, "word": " 27,", "probability": 0.97802734375}, {"start": 2386.47, "end": 2387.11, "word": " 37,", "probability": 0.9755859375}, {"start": 2387.23, "end": 2387.37, "word": " and", "probability": 0.92724609375}, {"start": 2387.37, "end": 2387.51, "word": " so", "probability": 0.95166015625}, {"start": 2387.51, "end": 2387.67, "word": " on.", "probability": 0.9453125}, {"start": 2389.11, "end": 2389.87, "word": " Let's", "probability": 0.967041015625}, {"start": 2389.87, "end": 2390.01, "word": " do", "probability": 0.96435546875}, {"start": 2390.01, "end": 2390.29, "word": " another", "probability": 0.919921875}, {"start": 2390.29, "end": 2390.71, "word": " example.", "probability": 0.974609375}, {"start": 2398.59, "end": 2399.35, "word": " Suppose", "probability": 0.78759765625}, {"start": 2399.35, "end": 2400.15, "word": " there", "probability": 0.88720703125}, {"start": 2400.15, "end": 2400.53, "word": " are", "probability": 0.94677734375}], "temperature": 1.0}, {"id": 90, "seek": 243026, "start": 2402.48, "end": 2430.26, "text": " In this class, there are 50 students. So the total is 50. 10 students out of 50. So my sample is 10. Now still, 50 divided by 10 is 50.", "tokens": [682, 341, 1508, 11, 456, 366, 2625, 1731, 13, 407, 264, 3217, 307, 2625, 13, 1266, 1731, 484, 295, 2625, 13, 407, 452, 6889, 307, 1266, 13, 823, 920, 11, 2625, 6666, 538, 1266, 307, 2625, 13], "avg_logprob": -0.22779605106303566, "compression_ratio": 1.2710280373831775, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2402.48, "end": 2402.7, "word": " In", "probability": 0.7470703125}, {"start": 2402.7, "end": 2402.94, "word": " this", "probability": 0.9482421875}, {"start": 2402.94, "end": 2403.46, "word": " class,", "probability": 0.96923828125}, {"start": 2405.12, "end": 2405.7, "word": " there", "probability": 0.8984375}, {"start": 2405.7, "end": 2406.0, "word": " are", "probability": 0.9482421875}, {"start": 2406.0, "end": 2406.54, "word": " 50", "probability": 0.7568359375}, {"start": 2406.54, "end": 2408.48, "word": " students.", "probability": 0.9814453125}, {"start": 2409.5, "end": 2410.56, "word": " So", "probability": 0.88525390625}, {"start": 2410.56, "end": 2410.7, "word": " the", "probability": 0.8173828125}, {"start": 2410.7, "end": 2411.12, "word": " total", "probability": 0.8623046875}, {"start": 2411.12, "end": 2412.08, "word": " is", "probability": 0.9345703125}, {"start": 2412.08, "end": 2412.4, "word": " 50.", "probability": 0.9130859375}, {"start": 2415.32, "end": 2416.38, "word": " 10", "probability": 0.5458984375}, {"start": 2416.38, "end": 2417.78, "word": " students", "probability": 0.97216796875}, {"start": 2417.78, "end": 2419.06, "word": " out", "probability": 0.841796875}, {"start": 2419.06, "end": 2420.24, "word": " of", "probability": 0.97119140625}, {"start": 2420.24, "end": 2420.66, "word": " 50.", "probability": 0.947265625}, {"start": 2422.44, "end": 2422.76, "word": " So", "probability": 0.697265625}, {"start": 2422.76, "end": 2423.06, "word": " my", "probability": 0.84912109375}, {"start": 2423.06, "end": 2423.54, "word": " sample", "probability": 0.7080078125}, {"start": 2423.54, "end": 2424.74, "word": " is", "probability": 0.83154296875}, {"start": 2424.74, "end": 2425.38, "word": " 10.", "probability": 0.94287109375}, {"start": 2426.52, "end": 2426.78, "word": " Now", "probability": 0.8349609375}, {"start": 2426.78, "end": 2427.24, "word": " still,", "probability": 0.291748046875}, {"start": 2428.02, "end": 2429.2, "word": " 50", "probability": 0.931640625}, {"start": 2429.2, "end": 2429.54, "word": " divided", "probability": 0.78955078125}, {"start": 2429.54, "end": 2429.72, "word": " by", "probability": 0.96728515625}, {"start": 2429.72, "end": 2429.96, "word": " 10", "probability": 0.97021484375}, {"start": 2429.96, "end": 2430.1, "word": " is", "probability": 0.71484375}, {"start": 2430.1, "end": 2430.26, "word": " 50.", "probability": 0.281982421875}], "temperature": 1.0}, {"id": 91, "seek": 246227, "start": 2433.63, "end": 2462.27, "text": " So there are five items or five students in a group. So we have five in the first group and then five in the next one and so on. So we have how many groups? Ten groups. So first step, you have to find a step.", "tokens": [407, 456, 366, 1732, 4754, 420, 1732, 1731, 294, 257, 1594, 13, 407, 321, 362, 1732, 294, 264, 700, 1594, 293, 550, 1732, 294, 264, 958, 472, 293, 370, 322, 13, 407, 321, 362, 577, 867, 3935, 30, 9380, 3935, 13, 407, 700, 1823, 11, 291, 362, 281, 915, 257, 1823, 13], "avg_logprob": -0.19545990566037735, "compression_ratio": 1.672, "no_speech_prob": 0.0, "words": [{"start": 2433.63, "end": 2433.95, "word": " So", "probability": 0.389404296875}, {"start": 2433.95, "end": 2434.33, "word": " there", "probability": 0.69140625}, {"start": 2434.33, "end": 2434.71, "word": " are", "probability": 0.94677734375}, {"start": 2434.71, "end": 2435.39, "word": " five", "probability": 0.6513671875}, {"start": 2435.39, "end": 2436.23, "word": " items", "probability": 0.86572265625}, {"start": 2436.23, "end": 2436.61, "word": " or", "probability": 0.7646484375}, {"start": 2436.61, "end": 2437.07, "word": " five", "probability": 0.8837890625}, {"start": 2437.07, "end": 2438.03, "word": " students", "probability": 0.9755859375}, {"start": 2438.03, "end": 2439.49, "word": " in", "probability": 0.8837890625}, {"start": 2439.49, "end": 2439.65, "word": " a", "probability": 0.98974609375}, {"start": 2439.65, "end": 2439.99, "word": " group.", "probability": 0.96923828125}, {"start": 2440.81, "end": 2441.49, "word": " So", "probability": 0.9150390625}, {"start": 2441.49, "end": 2441.65, "word": " we", "probability": 0.35693359375}, {"start": 2441.65, "end": 2441.77, "word": " have", "probability": 0.94677734375}, {"start": 2441.77, "end": 2442.27, "word": " five", "probability": 0.87353515625}, {"start": 2442.27, "end": 2445.37, "word": " in", "probability": 0.8369140625}, {"start": 2445.37, "end": 2445.53, "word": " the", "probability": 0.919921875}, {"start": 2445.53, "end": 2445.87, "word": " first", "probability": 0.8837890625}, {"start": 2445.87, "end": 2446.35, "word": " group", "probability": 0.95751953125}, {"start": 2446.35, "end": 2448.33, "word": " and", "probability": 0.308837890625}, {"start": 2448.33, "end": 2448.53, "word": " then", "probability": 0.85205078125}, {"start": 2448.53, "end": 2449.01, "word": " five", "probability": 0.89990234375}, {"start": 2449.01, "end": 2450.67, "word": " in", "probability": 0.919921875}, {"start": 2450.67, "end": 2450.81, "word": " the", "probability": 0.91650390625}, {"start": 2450.81, "end": 2451.03, "word": " next", "probability": 0.935546875}, {"start": 2451.03, "end": 2451.29, "word": " one", "probability": 0.89892578125}, {"start": 2451.29, "end": 2451.49, "word": " and", "probability": 0.8203125}, {"start": 2451.49, "end": 2451.67, "word": " so", "probability": 0.9521484375}, {"start": 2451.67, "end": 2451.89, "word": " on.", "probability": 0.94482421875}, {"start": 2452.95, "end": 2453.17, "word": " So", "probability": 0.93994140625}, {"start": 2453.17, "end": 2453.31, "word": " we", "probability": 0.8388671875}, {"start": 2453.31, "end": 2453.49, "word": " have", "probability": 0.951171875}, {"start": 2453.49, "end": 2453.63, "word": " how", "probability": 0.9345703125}, {"start": 2453.63, "end": 2453.81, "word": " many", "probability": 0.8994140625}, {"start": 2453.81, "end": 2454.17, "word": " groups?", "probability": 0.93994140625}, {"start": 2454.57, "end": 2454.83, "word": " Ten", "probability": 0.685546875}, {"start": 2454.83, "end": 2456.13, "word": " groups.", "probability": 0.59130859375}, {"start": 2459.53, "end": 2459.87, "word": " So", "probability": 0.861328125}, {"start": 2459.87, "end": 2460.17, "word": " first", "probability": 0.748046875}, {"start": 2460.17, "end": 2460.53, "word": " step,", "probability": 0.9384765625}, {"start": 2460.73, "end": 2461.19, "word": " you", "probability": 0.95703125}, {"start": 2461.19, "end": 2461.39, "word": " have", "probability": 0.94970703125}, {"start": 2461.39, "end": 2461.53, "word": " to", "probability": 0.96923828125}, {"start": 2461.53, "end": 2461.79, "word": " find", "probability": 0.8984375}, {"start": 2461.79, "end": 2461.99, "word": " a", "probability": 0.84912109375}, {"start": 2461.99, "end": 2462.27, "word": " step.", "probability": 0.89892578125}], "temperature": 1.0}, {"id": 92, "seek": 249165, "start": 2463.83, "end": 2491.65, "text": " Still it means number of items or number of students in a group. Next step, select student at random from the first group, so random selection. Now, here there are five students, so 01, I'm sorry, not 01, 1, 2, 3, 4, 5, so one digit.", "tokens": [8291, 309, 1355, 1230, 295, 4754, 420, 1230, 295, 1731, 294, 257, 1594, 13, 3087, 1823, 11, 3048, 3107, 412, 4974, 490, 264, 700, 1594, 11, 370, 4974, 9450, 13, 823, 11, 510, 456, 366, 1732, 1731, 11, 370, 23185, 11, 286, 478, 2597, 11, 406, 23185, 11, 502, 11, 568, 11, 805, 11, 1017, 11, 1025, 11, 370, 472, 14293, 13], "avg_logprob": -0.21329365268586173, "compression_ratio": 1.5096774193548388, "no_speech_prob": 0.0, "words": [{"start": 2463.83, "end": 2464.17, "word": " Still", "probability": 0.54833984375}, {"start": 2464.17, "end": 2464.33, "word": " it", "probability": 0.66259765625}, {"start": 2464.33, "end": 2464.51, "word": " means", "probability": 0.87109375}, {"start": 2464.51, "end": 2464.89, "word": " number", "probability": 0.85986328125}, {"start": 2464.89, "end": 2465.07, "word": " of", "probability": 0.9638671875}, {"start": 2465.07, "end": 2465.53, "word": " items", "probability": 0.85791015625}, {"start": 2465.53, "end": 2465.71, "word": " or", "probability": 0.70654296875}, {"start": 2465.71, "end": 2465.97, "word": " number", "probability": 0.916015625}, {"start": 2465.97, "end": 2466.15, "word": " of", "probability": 0.96142578125}, {"start": 2466.15, "end": 2466.71, "word": " students", "probability": 0.96923828125}, {"start": 2466.71, "end": 2467.77, "word": " in", "probability": 0.91357421875}, {"start": 2467.77, "end": 2467.93, "word": " a", "probability": 0.97705078125}, {"start": 2467.93, "end": 2468.17, "word": " group.", "probability": 0.96142578125}, {"start": 2470.09, "end": 2470.77, "word": " Next", "probability": 0.9248046875}, {"start": 2470.77, "end": 2471.23, "word": " step,", "probability": 0.91943359375}, {"start": 2472.31, "end": 2473.09, "word": " select", "probability": 0.85498046875}, {"start": 2473.09, "end": 2474.53, "word": " student", "probability": 0.7744140625}, {"start": 2474.53, "end": 2474.91, "word": " at", "probability": 0.94677734375}, {"start": 2474.91, "end": 2475.25, "word": " random", "probability": 0.8623046875}, {"start": 2475.25, "end": 2476.17, "word": " from", "probability": 0.77734375}, {"start": 2476.17, "end": 2476.39, "word": " the", "probability": 0.9228515625}, {"start": 2476.39, "end": 2476.73, "word": " first", "probability": 0.89208984375}, {"start": 2476.73, "end": 2477.21, "word": " group,", "probability": 0.95947265625}, {"start": 2477.81, "end": 2477.93, "word": " so", "probability": 0.8564453125}, {"start": 2477.93, "end": 2478.19, "word": " random", "probability": 0.61572265625}, {"start": 2478.19, "end": 2478.57, "word": " selection.", "probability": 0.88818359375}, {"start": 2480.61, "end": 2480.99, "word": " Now,", "probability": 0.95166015625}, {"start": 2481.69, "end": 2482.01, "word": " here", "probability": 0.81640625}, {"start": 2482.01, "end": 2482.41, "word": " there", "probability": 0.77978515625}, {"start": 2482.41, "end": 2482.57, "word": " are", "probability": 0.94677734375}, {"start": 2482.57, "end": 2482.83, "word": " five", "probability": 0.82080078125}, {"start": 2482.83, "end": 2483.47, "word": " students,", "probability": 0.97119140625}, {"start": 2483.97, "end": 2483.97, "word": " so", "probability": 0.890625}, {"start": 2483.97, "end": 2485.15, "word": " 01,", "probability": 0.26025390625}, {"start": 2485.43, "end": 2485.85, "word": " I'm", "probability": 0.7236328125}, {"start": 2485.85, "end": 2486.05, "word": " sorry,", "probability": 0.85888671875}, {"start": 2487.97, "end": 2488.31, "word": " not", "probability": 0.86474609375}, {"start": 2488.31, "end": 2488.61, "word": " 01,", "probability": 0.9462890625}, {"start": 2489.15, "end": 2489.91, "word": " 1,", "probability": 0.42431640625}, {"start": 2489.95, "end": 2490.11, "word": " 2,", "probability": 0.76953125}, {"start": 2490.21, "end": 2490.43, "word": " 3,", "probability": 0.9794921875}, {"start": 2490.51, "end": 2490.71, "word": " 4,", "probability": 0.9931640625}, {"start": 2490.81, "end": 2491.03, "word": " 5,", "probability": 0.9912109375}, {"start": 2491.09, "end": 2491.21, "word": " so", "probability": 0.921875}, {"start": 2491.21, "end": 2491.41, "word": " one", "probability": 0.88720703125}, {"start": 2491.41, "end": 2491.65, "word": " digit.", "probability": 0.90966796875}], "temperature": 1.0}, {"id": 93, "seek": 251550, "start": 2493.96, "end": 2515.5, "text": " Only one digit. Because I have maximum number is five. So it's only one digit. So go again to the random table and take one digit. One. So my first item, six, eleven, sixteen, twenty-one, twenty-one, all the way up to ten items.", "tokens": [5686, 472, 14293, 13, 1436, 286, 362, 6674, 1230, 307, 1732, 13, 407, 309, 311, 787, 472, 14293, 13, 407, 352, 797, 281, 264, 4974, 3199, 293, 747, 472, 14293, 13, 1485, 13, 407, 452, 700, 3174, 11, 2309, 11, 21090, 11, 27847, 11, 7699, 12, 546, 11, 7699, 12, 546, 11, 439, 264, 636, 493, 281, 2064, 4754, 13], "avg_logprob": -0.2797131264796022, "compression_ratio": 1.5578231292517006, "no_speech_prob": 1.7285346984863281e-06, "words": [{"start": 2493.96, "end": 2494.64, "word": " Only", "probability": 0.1297607421875}, {"start": 2494.64, "end": 2494.84, "word": " one", "probability": 0.783203125}, {"start": 2494.84, "end": 2495.08, "word": " digit.", "probability": 0.93798828125}, {"start": 2495.8, "end": 2496.24, "word": " Because", "probability": 0.75439453125}, {"start": 2496.24, "end": 2496.36, "word": " I", "probability": 0.859375}, {"start": 2496.36, "end": 2496.56, "word": " have", "probability": 0.94384765625}, {"start": 2496.56, "end": 2497.18, "word": " maximum", "probability": 0.89111328125}, {"start": 2497.18, "end": 2497.74, "word": " number", "probability": 0.93408203125}, {"start": 2497.74, "end": 2498.24, "word": " is", "probability": 0.52490234375}, {"start": 2498.24, "end": 2498.6, "word": " five.", "probability": 0.63232421875}, {"start": 2499.16, "end": 2499.3, "word": " So", "probability": 0.76953125}, {"start": 2499.3, "end": 2499.42, "word": " it's", "probability": 0.83251953125}, {"start": 2499.42, "end": 2499.62, "word": " only", "probability": 0.92578125}, {"start": 2499.62, "end": 2499.82, "word": " one", "probability": 0.9169921875}, {"start": 2499.82, "end": 2500.08, "word": " digit.", "probability": 0.92919921875}, {"start": 2500.68, "end": 2500.86, "word": " So", "probability": 0.9189453125}, {"start": 2500.86, "end": 2501.24, "word": " go", "probability": 0.896484375}, {"start": 2501.24, "end": 2501.66, "word": " again", "probability": 0.9453125}, {"start": 2501.66, "end": 2502.08, "word": " to", "probability": 0.54833984375}, {"start": 2502.08, "end": 2502.22, "word": " the", "probability": 0.89306640625}, {"start": 2502.22, "end": 2502.5, "word": " random", "probability": 0.865234375}, {"start": 2502.5, "end": 2502.92, "word": " table", "probability": 0.8203125}, {"start": 2502.92, "end": 2503.6, "word": " and", "probability": 0.7587890625}, {"start": 2503.6, "end": 2503.94, "word": " take", "probability": 0.72509765625}, {"start": 2503.94, "end": 2504.18, "word": " one", "probability": 0.9208984375}, {"start": 2504.18, "end": 2504.48, "word": " digit.", "probability": 0.9521484375}, {"start": 2504.8, "end": 2505.46, "word": " One.", "probability": 0.49853515625}, {"start": 2505.84, "end": 2506.14, "word": " So", "probability": 0.87060546875}, {"start": 2506.14, "end": 2506.42, "word": " my", "probability": 0.9072265625}, {"start": 2506.42, "end": 2506.94, "word": " first", "probability": 0.88427734375}, {"start": 2506.94, "end": 2507.2, "word": " item,", "probability": 0.83056640625}, {"start": 2507.74, "end": 2508.22, "word": " six,", "probability": 0.418212890625}, {"start": 2508.76, "end": 2509.18, "word": " eleven,", "probability": 0.771484375}, {"start": 2509.48, "end": 2510.06, "word": " sixteen,", "probability": 0.91796875}, {"start": 2510.44, "end": 2510.94, "word": " twenty", "probability": 0.8876953125}, {"start": 2510.94, "end": 2511.24, "word": "-one,", "probability": 0.798095703125}, {"start": 2511.58, "end": 2511.86, "word": " twenty", "probability": 0.798828125}, {"start": 2511.86, "end": 2512.12, "word": "-one,", "probability": 0.952880859375}, {"start": 2512.2, "end": 2512.42, "word": " all", "probability": 0.91845703125}, {"start": 2512.42, "end": 2512.58, "word": " the", "probability": 0.923828125}, {"start": 2512.58, "end": 2512.88, "word": " way", "probability": 0.955078125}, {"start": 2512.88, "end": 2514.06, "word": " up", "probability": 0.90087890625}, {"start": 2514.06, "end": 2514.36, "word": " to", "probability": 0.96337890625}, {"start": 2514.36, "end": 2514.9, "word": " ten", "probability": 0.89111328125}, {"start": 2514.9, "end": 2515.5, "word": " items.", "probability": 0.806640625}], "temperature": 1.0}, {"id": 94, "seek": 254613, "start": 2533.13, "end": 2546.13, "text": " So I choose student number one, then skip five, choose number six, and so on. It's called systematic. Because if you know the first item,", "tokens": [407, 286, 2826, 3107, 1230, 472, 11, 550, 10023, 1732, 11, 2826, 1230, 2309, 11, 293, 370, 322, 13, 467, 311, 1219, 27249, 13, 1436, 498, 291, 458, 264, 700, 3174, 11], "avg_logprob": -0.27296401515151514, "compression_ratio": 1.2432432432432432, "no_speech_prob": 0.0, "words": [{"start": 2533.1299999999997, "end": 2533.97, "word": " So", "probability": 0.07794189453125}, {"start": 2533.97, "end": 2534.81, "word": " I", "probability": 0.7216796875}, {"start": 2534.81, "end": 2535.35, "word": " choose", "probability": 0.6044921875}, {"start": 2535.35, "end": 2535.77, "word": " student", "probability": 0.8798828125}, {"start": 2535.77, "end": 2536.09, "word": " number", "probability": 0.8896484375}, {"start": 2536.09, "end": 2536.49, "word": " one,", "probability": 0.6796875}, {"start": 2536.81, "end": 2537.09, "word": " then", "probability": 0.8193359375}, {"start": 2537.09, "end": 2537.73, "word": " skip", "probability": 0.7587890625}, {"start": 2537.73, "end": 2538.17, "word": " five,", "probability": 0.8125}, {"start": 2539.05, "end": 2539.55, "word": " choose", "probability": 0.8916015625}, {"start": 2539.55, "end": 2539.81, "word": " number", "probability": 0.94482421875}, {"start": 2539.81, "end": 2540.21, "word": " six,", "probability": 0.93408203125}, {"start": 2540.33, "end": 2540.43, "word": " and", "probability": 0.9365234375}, {"start": 2540.43, "end": 2540.65, "word": " so", "probability": 0.9580078125}, {"start": 2540.65, "end": 2541.17, "word": " on.", "probability": 0.89501953125}, {"start": 2541.69, "end": 2541.97, "word": " It's", "probability": 0.851318359375}, {"start": 2541.97, "end": 2542.23, "word": " called", "probability": 0.86865234375}, {"start": 2542.23, "end": 2542.85, "word": " systematic.", "probability": 0.875}, {"start": 2543.71, "end": 2544.17, "word": " Because", "probability": 0.923828125}, {"start": 2544.17, "end": 2544.55, "word": " if", "probability": 0.9130859375}, {"start": 2544.55, "end": 2544.71, "word": " you", "probability": 0.95703125}, {"start": 2544.71, "end": 2544.87, "word": " know", "probability": 0.80712890625}, {"start": 2544.87, "end": 2545.07, "word": " the", "probability": 0.904296875}, {"start": 2545.07, "end": 2545.49, "word": " first", "probability": 0.86865234375}, {"start": 2545.49, "end": 2546.13, "word": " item,", "probability": 0.96435546875}], "temperature": 1.0}, {"id": 95, "seek": 257721, "start": 2548.55, "end": 2577.21, "text": " and the step you can know the rest of these. Imagine that you want to select 10 students who entered the cafe shop or restaurant. You can pick one of them. So suppose I'm taking number three and my step is six.", "tokens": [293, 264, 1823, 291, 393, 458, 264, 1472, 295, 613, 13, 11739, 300, 291, 528, 281, 3048, 1266, 1731, 567, 9065, 264, 17773, 3945, 420, 6383, 13, 509, 393, 1888, 472, 295, 552, 13, 407, 7297, 286, 478, 1940, 1230, 1045, 293, 452, 1823, 307, 2309, 13], "avg_logprob": -0.3128255108992259, "compression_ratio": 1.4256756756756757, "no_speech_prob": 0.0, "words": [{"start": 2548.55, "end": 2548.99, "word": " and", "probability": 0.56689453125}, {"start": 2548.99, "end": 2549.27, "word": " the", "probability": 0.72265625}, {"start": 2549.27, "end": 2549.59, "word": " step", "probability": 0.76123046875}, {"start": 2549.59, "end": 2549.97, "word": " you", "probability": 0.70849609375}, {"start": 2549.97, "end": 2550.37, "word": " can", "probability": 0.93798828125}, {"start": 2550.37, "end": 2550.99, "word": " know", "probability": 0.8134765625}, {"start": 2550.99, "end": 2551.19, "word": " the", "probability": 0.90673828125}, {"start": 2551.19, "end": 2551.57, "word": " rest", "probability": 0.94091796875}, {"start": 2551.57, "end": 2552.23, "word": " of", "probability": 0.9482421875}, {"start": 2552.23, "end": 2552.69, "word": " these.", "probability": 0.3310546875}, {"start": 2557.31, "end": 2558.09, "word": " Imagine", "probability": 0.73095703125}, {"start": 2558.09, "end": 2558.45, "word": " that", "probability": 0.9296875}, {"start": 2558.45, "end": 2559.43, "word": " you", "probability": 0.9013671875}, {"start": 2559.43, "end": 2559.63, "word": " want", "probability": 0.86572265625}, {"start": 2559.63, "end": 2559.75, "word": " to", "probability": 0.96923828125}, {"start": 2559.75, "end": 2560.07, "word": " select", "probability": 0.85400390625}, {"start": 2560.07, "end": 2560.39, "word": " 10", "probability": 0.5703125}, {"start": 2560.39, "end": 2560.89, "word": " students", "probability": 0.96875}, {"start": 2560.89, "end": 2561.15, "word": " who", "probability": 0.8876953125}, {"start": 2561.15, "end": 2561.69, "word": " entered", "probability": 0.76220703125}, {"start": 2561.69, "end": 2563.57, "word": " the", "probability": 0.1912841796875}, {"start": 2563.57, "end": 2564.91, "word": " cafe", "probability": 0.3642578125}, {"start": 2564.91, "end": 2565.25, "word": " shop", "probability": 0.94091796875}, {"start": 2565.25, "end": 2566.63, "word": " or", "probability": 0.81396484375}, {"start": 2566.63, "end": 2567.13, "word": " restaurant.", "probability": 0.85400390625}, {"start": 2567.59, "end": 2567.65, "word": " You", "probability": 0.85498046875}, {"start": 2567.65, "end": 2567.83, "word": " can", "probability": 0.9345703125}, {"start": 2567.83, "end": 2568.01, "word": " pick", "probability": 0.83642578125}, {"start": 2568.01, "end": 2568.23, "word": " one", "probability": 0.88818359375}, {"start": 2568.23, "end": 2568.33, "word": " of", "probability": 0.3583984375}, {"start": 2568.33, "end": 2568.35, "word": " them.", "probability": 0.7431640625}, {"start": 2569.75, "end": 2570.01, "word": " So", "probability": 0.6416015625}, {"start": 2570.01, "end": 2570.51, "word": " suppose", "probability": 0.654296875}, {"start": 2570.51, "end": 2572.75, "word": " I'm", "probability": 0.822265625}, {"start": 2572.75, "end": 2573.27, "word": " taking", "probability": 0.75048828125}, {"start": 2573.27, "end": 2574.37, "word": " number", "probability": 0.89892578125}, {"start": 2574.37, "end": 2574.79, "word": " three", "probability": 0.5849609375}, {"start": 2574.79, "end": 2575.93, "word": " and", "probability": 0.65576171875}, {"start": 2575.93, "end": 2576.17, "word": " my", "probability": 0.9638671875}, {"start": 2576.17, "end": 2576.49, "word": " step", "probability": 0.75634765625}, {"start": 2576.49, "end": 2576.71, "word": " is", "probability": 0.94921875}, {"start": 2576.71, "end": 2577.21, "word": " six.", "probability": 0.935546875}], "temperature": 1.0}, {"id": 96, "seek": 260355, "start": 2578.03, "end": 2603.55, "text": " So three, then nine, and so on. So that's systematic assembly. Questions? So that's about random samples and systematic. What do you mean by stratified groups?", "tokens": [407, 1045, 11, 550, 4949, 11, 293, 370, 322, 13, 407, 300, 311, 27249, 12103, 13, 27738, 30, 407, 300, 311, 466, 4974, 10938, 293, 27249, 13, 708, 360, 291, 914, 538, 23674, 2587, 3935, 30], "avg_logprob": -0.2761824227668144, "compression_ratio": 1.3445378151260505, "no_speech_prob": 0.0, "words": [{"start": 2578.03, "end": 2578.31, "word": " So", "probability": 0.6142578125}, {"start": 2578.31, "end": 2578.75, "word": " three,", "probability": 0.681640625}, {"start": 2579.15, "end": 2579.29, "word": " then", "probability": 0.6552734375}, {"start": 2579.29, "end": 2579.69, "word": " nine,", "probability": 0.493408203125}, {"start": 2580.07, "end": 2580.37, "word": " and", "probability": 0.62841796875}, {"start": 2580.37, "end": 2580.55, "word": " so", "probability": 0.734375}, {"start": 2580.55, "end": 2580.79, "word": " on.", "probability": 0.947265625}, {"start": 2585.83, "end": 2586.79, "word": " So", "probability": 0.5185546875}, {"start": 2586.79, "end": 2587.19, "word": " that's", "probability": 0.5953369140625}, {"start": 2587.19, "end": 2587.73, "word": " systematic", "probability": 0.88525390625}, {"start": 2587.73, "end": 2588.83, "word": " assembly.", "probability": 0.505859375}, {"start": 2590.13, "end": 2590.85, "word": " Questions?", "probability": 0.26611328125}, {"start": 2592.95, "end": 2593.31, "word": " So", "probability": 0.93896484375}, {"start": 2593.31, "end": 2593.79, "word": " that's", "probability": 0.95458984375}, {"start": 2593.79, "end": 2594.33, "word": " about", "probability": 0.90771484375}, {"start": 2594.33, "end": 2595.91, "word": " random", "probability": 0.86328125}, {"start": 2595.91, "end": 2597.93, "word": " samples", "probability": 0.5947265625}, {"start": 2597.93, "end": 2598.35, "word": " and", "probability": 0.9150390625}, {"start": 2598.35, "end": 2598.91, "word": " systematic.", "probability": 0.7177734375}, {"start": 2599.93, "end": 2600.71, "word": " What", "probability": 0.8759765625}, {"start": 2600.71, "end": 2600.91, "word": " do", "probability": 0.92041015625}, {"start": 2600.91, "end": 2601.01, "word": " you", "probability": 0.759765625}, {"start": 2601.01, "end": 2601.09, "word": " mean", "probability": 0.96630859375}, {"start": 2601.09, "end": 2601.27, "word": " by", "probability": 0.96875}, {"start": 2601.27, "end": 2601.99, "word": " stratified", "probability": 0.726806640625}, {"start": 2601.99, "end": 2603.55, "word": " groups?", "probability": 0.87060546875}], "temperature": 1.0}, {"id": 97, "seek": 261412, "start": 2608.0, "end": 2614.12, "text": " Let's use a definition and an example of a stratified family.", "tokens": [961, 311, 764, 257, 7123, 293, 364, 1365, 295, 257, 23674, 2587, 1605, 13], "avg_logprob": -0.48385414282480876, "compression_ratio": 0.9393939393939394, "no_speech_prob": 0.0, "words": [{"start": 2608.0, "end": 2608.8, "word": " Let's", "probability": 0.5537109375}, {"start": 2608.8, "end": 2608.9, "word": " use", "probability": 0.403076171875}, {"start": 2608.9, "end": 2609.02, "word": " a", "probability": 0.6494140625}, {"start": 2609.02, "end": 2609.44, "word": " definition", "probability": 0.9375}, {"start": 2609.44, "end": 2610.44, "word": " and", "probability": 0.88623046875}, {"start": 2610.44, "end": 2610.6, "word": " an", "probability": 0.71875}, {"start": 2610.6, "end": 2611.06, "word": " example", "probability": 0.9775390625}, {"start": 2611.06, "end": 2612.86, "word": " of", "probability": 0.93212890625}, {"start": 2612.86, "end": 2613.08, "word": " a", "probability": 0.2236328125}, {"start": 2613.08, "end": 2613.74, "word": " stratified", "probability": 0.6588134765625}, {"start": 2613.74, "end": 2614.12, "word": " family.", "probability": 0.3251953125}], "temperature": 1.0}, {"id": 98, "seek": 266597, "start": 2638.81, "end": 2665.97, "text": " step one. So again imagine we have IUG population into two or more subgroups. So there are two or more. It depends on the characteristic you are using. So divide population into two or more subgroups according to some common characteristic. For example suppose", "tokens": [1823, 472, 13, 407, 797, 3811, 321, 362, 286, 52, 38, 4415, 666, 732, 420, 544, 1422, 17377, 82, 13, 407, 456, 366, 732, 420, 544, 13, 467, 5946, 322, 264, 16282, 291, 366, 1228, 13, 407, 9845, 4415, 666, 732, 420, 544, 1422, 17377, 82, 4650, 281, 512, 2689, 16282, 13, 1171, 1365, 7297], "avg_logprob": -0.24483816299055303, "compression_ratio": 1.6948051948051948, "no_speech_prob": 0.0, "words": [{"start": 2638.81, "end": 2639.23, "word": " step", "probability": 0.1715087890625}, {"start": 2639.23, "end": 2639.57, "word": " one.", "probability": 0.52587890625}, {"start": 2640.69, "end": 2640.95, "word": " So", "probability": 0.7578125}, {"start": 2640.95, "end": 2641.21, "word": " again", "probability": 0.81982421875}, {"start": 2641.21, "end": 2641.61, "word": " imagine", "probability": 0.66650390625}, {"start": 2641.61, "end": 2643.31, "word": " we", "probability": 0.791015625}, {"start": 2643.31, "end": 2643.75, "word": " have", "probability": 0.9541015625}, {"start": 2643.75, "end": 2644.57, "word": " IUG", "probability": 0.5850830078125}, {"start": 2644.57, "end": 2645.79, "word": " population", "probability": 0.9404296875}, {"start": 2645.79, "end": 2646.31, "word": " into", "probability": 0.8369140625}, {"start": 2646.31, "end": 2646.69, "word": " two", "probability": 0.88720703125}, {"start": 2646.69, "end": 2646.99, "word": " or", "probability": 0.962890625}, {"start": 2646.99, "end": 2647.27, "word": " more", "probability": 0.9404296875}, {"start": 2647.27, "end": 2648.31, "word": " subgroups.", "probability": 0.8997395833333334}, {"start": 2649.41, "end": 2650.25, "word": " So", "probability": 0.81494140625}, {"start": 2650.25, "end": 2650.69, "word": " there", "probability": 0.759765625}, {"start": 2650.69, "end": 2650.89, "word": " are", "probability": 0.947265625}, {"start": 2650.89, "end": 2651.17, "word": " two", "probability": 0.93896484375}, {"start": 2651.17, "end": 2651.49, "word": " or", "probability": 0.96142578125}, {"start": 2651.49, "end": 2651.81, "word": " more.", "probability": 0.93408203125}, {"start": 2652.27, "end": 2652.63, "word": " It", "probability": 0.9296875}, {"start": 2652.63, "end": 2652.95, "word": " depends", "probability": 0.908203125}, {"start": 2652.95, "end": 2653.25, "word": " on", "probability": 0.9482421875}, {"start": 2653.25, "end": 2653.57, "word": " the", "probability": 0.92333984375}, {"start": 2653.57, "end": 2654.67, "word": " characteristic", "probability": 0.869140625}, {"start": 2654.67, "end": 2655.85, "word": " you", "probability": 0.9423828125}, {"start": 2655.85, "end": 2656.01, "word": " are", "probability": 0.896484375}, {"start": 2656.01, "end": 2656.41, "word": " using.", "probability": 0.9306640625}, {"start": 2657.17, "end": 2657.37, "word": " So", "probability": 0.92041015625}, {"start": 2657.37, "end": 2658.49, "word": " divide", "probability": 0.814453125}, {"start": 2658.49, "end": 2658.97, "word": " population", "probability": 0.86962890625}, {"start": 2658.97, "end": 2659.23, "word": " into", "probability": 0.52978515625}, {"start": 2659.23, "end": 2659.37, "word": " two", "probability": 0.912109375}, {"start": 2659.37, "end": 2659.53, "word": " or", "probability": 0.94384765625}, {"start": 2659.53, "end": 2659.69, "word": " more", "probability": 0.9375}, {"start": 2659.69, "end": 2661.31, "word": " subgroups", "probability": 0.9122721354166666}, {"start": 2661.31, "end": 2662.01, "word": " according", "probability": 0.79296875}, {"start": 2662.01, "end": 2662.51, "word": " to", "probability": 0.966796875}, {"start": 2662.51, "end": 2662.87, "word": " some", "probability": 0.8935546875}, {"start": 2662.87, "end": 2663.39, "word": " common", "probability": 0.8681640625}, {"start": 2663.39, "end": 2664.21, "word": " characteristic.", "probability": 0.84326171875}, {"start": 2664.73, "end": 2664.89, "word": " For", "probability": 0.9541015625}, {"start": 2664.89, "end": 2665.31, "word": " example", "probability": 0.974609375}, {"start": 2665.31, "end": 2665.97, "word": " suppose", "probability": 0.74462890625}], "temperature": 1.0}, {"id": 99, "seek": 269254, "start": 2666.92, "end": 2692.54, "text": " I want to divide the student into gender. So males or females. So I have two strata. One is called males and the other is females. Now suppose the characteristic I am going to use is the levels of a student. First level, second, third, fourth, and so on.", "tokens": [286, 528, 281, 9845, 264, 3107, 666, 7898, 13, 407, 20776, 420, 21529, 13, 407, 286, 362, 732, 23674, 64, 13, 1485, 307, 1219, 20776, 293, 264, 661, 307, 21529, 13, 823, 7297, 264, 16282, 286, 669, 516, 281, 764, 307, 264, 4358, 295, 257, 3107, 13, 2386, 1496, 11, 1150, 11, 2636, 11, 6409, 11, 293, 370, 322, 13], "avg_logprob": -0.21144979312771656, "compression_ratio": 1.536144578313253, "no_speech_prob": 0.0, "words": [{"start": 2666.92, "end": 2667.18, "word": " I", "probability": 0.85888671875}, {"start": 2667.18, "end": 2667.56, "word": " want", "probability": 0.869140625}, {"start": 2667.56, "end": 2667.74, "word": " to", "probability": 0.96630859375}, {"start": 2667.74, "end": 2669.02, "word": " divide", "probability": 0.57080078125}, {"start": 2669.02, "end": 2669.76, "word": " the", "probability": 0.448486328125}, {"start": 2669.76, "end": 2670.28, "word": " student", "probability": 0.6240234375}, {"start": 2670.28, "end": 2671.0, "word": " into", "probability": 0.84716796875}, {"start": 2671.0, "end": 2672.08, "word": " gender.", "probability": 0.86279296875}, {"start": 2674.1, "end": 2674.36, "word": " So", "probability": 0.70556640625}, {"start": 2674.36, "end": 2674.62, "word": " males", "probability": 0.77783203125}, {"start": 2674.62, "end": 2674.84, "word": " or", "probability": 0.876953125}, {"start": 2674.84, "end": 2675.14, "word": " females.", "probability": 0.89404296875}, {"start": 2676.1, "end": 2676.44, "word": " So", "probability": 0.83447265625}, {"start": 2676.44, "end": 2676.8, "word": " I", "probability": 0.8984375}, {"start": 2676.8, "end": 2677.02, "word": " have", "probability": 0.94287109375}, {"start": 2677.02, "end": 2677.2, "word": " two", "probability": 0.830078125}, {"start": 2677.2, "end": 2677.78, "word": " strata.", "probability": 0.5093994140625}, {"start": 2678.46, "end": 2678.64, "word": " One", "probability": 0.91357421875}, {"start": 2678.64, "end": 2678.84, "word": " is", "probability": 0.869140625}, {"start": 2678.84, "end": 2679.2, "word": " called", "probability": 0.86865234375}, {"start": 2679.2, "end": 2679.84, "word": " males", "probability": 0.865234375}, {"start": 2679.84, "end": 2680.18, "word": " and", "probability": 0.77880859375}, {"start": 2680.18, "end": 2680.3, "word": " the", "probability": 0.5634765625}, {"start": 2680.3, "end": 2680.46, "word": " other", "probability": 0.87744140625}, {"start": 2680.46, "end": 2680.64, "word": " is", "probability": 0.86376953125}, {"start": 2680.64, "end": 2680.98, "word": " females.", "probability": 0.9599609375}, {"start": 2682.28, "end": 2682.46, "word": " Now", "probability": 0.93505859375}, {"start": 2682.46, "end": 2683.0, "word": " suppose", "probability": 0.6943359375}, {"start": 2683.0, "end": 2683.38, "word": " the", "probability": 0.89111328125}, {"start": 2683.38, "end": 2684.24, "word": " characteristic", "probability": 0.6689453125}, {"start": 2684.24, "end": 2684.52, "word": " I", "probability": 0.96923828125}, {"start": 2684.52, "end": 2684.66, "word": " am", "probability": 0.80859375}, {"start": 2684.66, "end": 2684.96, "word": " going", "probability": 0.9443359375}, {"start": 2684.96, "end": 2685.16, "word": " to", "probability": 0.96923828125}, {"start": 2685.16, "end": 2685.58, "word": " use", "probability": 0.87451171875}, {"start": 2685.58, "end": 2686.06, "word": " is", "probability": 0.88525390625}, {"start": 2686.06, "end": 2686.58, "word": " the", "probability": 0.9169921875}, {"start": 2686.58, "end": 2687.46, "word": " levels", "probability": 0.87060546875}, {"start": 2687.46, "end": 2687.64, "word": " of", "probability": 0.9658203125}, {"start": 2687.64, "end": 2687.8, "word": " a", "probability": 0.2919921875}, {"start": 2687.8, "end": 2688.1, "word": " student.", "probability": 0.9560546875}, {"start": 2689.02, "end": 2689.62, "word": " First", "probability": 0.84619140625}, {"start": 2689.62, "end": 2689.92, "word": " level,", "probability": 0.92724609375}, {"start": 2690.12, "end": 2690.34, "word": " second,", "probability": 0.90087890625}, {"start": 2690.6, "end": 2690.9, "word": " third,", "probability": 0.853515625}, {"start": 2691.18, "end": 2691.5, "word": " fourth,", "probability": 0.931640625}, {"start": 2691.8, "end": 2692.14, "word": " and", "probability": 0.9384765625}, {"start": 2692.14, "end": 2692.34, "word": " so", "probability": 0.9580078125}, {"start": 2692.34, "end": 2692.54, "word": " on.", "probability": 0.94775390625}], "temperature": 1.0}, {"id": 100, "seek": 272026, "start": 2693.5, "end": 2720.26, "text": " So number of strata here depends on actually the characteristic you are interested in. Let's use the simple one that is gender. So here we have females. So IUV students divided into two types, strata, or two groups, females and males. So this is the first step.", "tokens": [407, 1230, 295, 1056, 3274, 510, 5946, 322, 767, 264, 16282, 291, 366, 3102, 294, 13, 961, 311, 764, 264, 2199, 472, 300, 307, 7898, 13, 407, 510, 321, 362, 21529, 13, 407, 44218, 53, 1731, 6666, 666, 732, 3467, 11, 1056, 3274, 11, 420, 732, 3935, 11, 21529, 293, 20776, 13, 407, 341, 307, 264, 700, 1823, 13], "avg_logprob": -0.1850260389347871, "compression_ratio": 1.497142857142857, "no_speech_prob": 0.0, "words": [{"start": 2693.5, "end": 2693.94, "word": " So", "probability": 0.84814453125}, {"start": 2693.94, "end": 2694.5, "word": " number", "probability": 0.583984375}, {"start": 2694.5, "end": 2694.76, "word": " of", "probability": 0.94775390625}, {"start": 2694.76, "end": 2695.36, "word": " strata", "probability": 0.829345703125}, {"start": 2695.36, "end": 2695.6, "word": " here", "probability": 0.8232421875}, {"start": 2695.6, "end": 2696.04, "word": " depends", "probability": 0.86962890625}, {"start": 2696.04, "end": 2696.28, "word": " on", "probability": 0.931640625}, {"start": 2696.28, "end": 2696.84, "word": " actually", "probability": 0.83544921875}, {"start": 2696.84, "end": 2698.1, "word": " the", "probability": 0.84912109375}, {"start": 2698.1, "end": 2698.84, "word": " characteristic", "probability": 0.8408203125}, {"start": 2698.84, "end": 2699.26, "word": " you", "probability": 0.9453125}, {"start": 2699.26, "end": 2699.48, "word": " are", "probability": 0.8603515625}, {"start": 2699.48, "end": 2700.1, "word": " interested", "probability": 0.85888671875}, {"start": 2700.1, "end": 2700.38, "word": " in.", "probability": 0.9267578125}, {"start": 2700.78, "end": 2701.36, "word": " Let's", "probability": 0.947265625}, {"start": 2701.36, "end": 2701.56, "word": " use", "probability": 0.88525390625}, {"start": 2701.56, "end": 2701.74, "word": " the", "probability": 0.65625}, {"start": 2701.74, "end": 2702.02, "word": " simple", "probability": 0.8662109375}, {"start": 2702.02, "end": 2702.3, "word": " one", "probability": 0.92626953125}, {"start": 2702.3, "end": 2702.58, "word": " that", "probability": 0.7412109375}, {"start": 2702.58, "end": 2702.98, "word": " is", "probability": 0.931640625}, {"start": 2702.98, "end": 2703.44, "word": " gender.", "probability": 0.8037109375}, {"start": 2704.5, "end": 2704.7, "word": " So", "probability": 0.9287109375}, {"start": 2704.7, "end": 2704.86, "word": " here", "probability": 0.8203125}, {"start": 2704.86, "end": 2705.02, "word": " we", "probability": 0.93017578125}, {"start": 2705.02, "end": 2705.34, "word": " have", "probability": 0.9501953125}, {"start": 2705.34, "end": 2706.16, "word": " females.", "probability": 0.94873046875}, {"start": 2707.44, "end": 2707.66, "word": " So", "probability": 0.9404296875}, {"start": 2707.66, "end": 2708.22, "word": " IUV", "probability": 0.4639892578125}, {"start": 2708.22, "end": 2708.82, "word": " students", "probability": 0.93115234375}, {"start": 2708.82, "end": 2711.2, "word": " divided", "probability": 0.61962890625}, {"start": 2711.2, "end": 2711.94, "word": " into", "probability": 0.853515625}, {"start": 2711.94, "end": 2712.36, "word": " two", "probability": 0.86279296875}, {"start": 2712.36, "end": 2714.08, "word": " types,", "probability": 0.7548828125}, {"start": 2714.36, "end": 2714.84, "word": " strata,", "probability": 0.9130859375}, {"start": 2715.94, "end": 2716.02, "word": " or", "probability": 0.95068359375}, {"start": 2716.02, "end": 2716.22, "word": " two", "probability": 0.93017578125}, {"start": 2716.22, "end": 2716.56, "word": " groups,", "probability": 0.94287109375}, {"start": 2716.68, "end": 2717.12, "word": " females", "probability": 0.95556640625}, {"start": 2717.12, "end": 2718.24, "word": " and", "probability": 0.89599609375}, {"start": 2718.24, "end": 2718.56, "word": " males.", "probability": 0.89501953125}, {"start": 2719.2, "end": 2719.44, "word": " So", "probability": 0.8681640625}, {"start": 2719.44, "end": 2719.6, "word": " this", "probability": 0.5869140625}, {"start": 2719.6, "end": 2719.66, "word": " is", "probability": 0.9287109375}, {"start": 2719.66, "end": 2719.72, "word": " the", "probability": 0.85205078125}, {"start": 2719.72, "end": 2719.94, "word": " first", "probability": 0.8828125}, {"start": 2719.94, "end": 2720.26, "word": " step.", "probability": 0.92529296875}], "temperature": 1.0}, {"id": 101, "seek": 275051, "start": 2721.75, "end": 2750.51, "text": " So at least you should have two groups or two subgroups. So we have IELTS student, the entire population, and that population divided into two subgroups. Next, assemble random samples. Keep careful here with sample sizes proportional to strata sizes. That means suppose I know that", "tokens": [407, 412, 1935, 291, 820, 362, 732, 3935, 420, 732, 1422, 17377, 82, 13, 407, 321, 362, 286, 3158, 7327, 3107, 11, 264, 2302, 4415, 11, 293, 300, 4415, 6666, 666, 732, 1422, 17377, 82, 13, 3087, 11, 22364, 4974, 10938, 13, 5527, 5026, 510, 365, 6889, 11602, 24969, 281, 1056, 3274, 11602, 13, 663, 1355, 7297, 286, 458, 300], "avg_logprob": -0.23770491119290962, "compression_ratio": 1.5243243243243243, "no_speech_prob": 0.0, "words": [{"start": 2721.75, "end": 2721.99, "word": " So", "probability": 0.77783203125}, {"start": 2721.99, "end": 2722.21, "word": " at", "probability": 0.73681640625}, {"start": 2722.21, "end": 2722.47, "word": " least", "probability": 0.95947265625}, {"start": 2722.47, "end": 2722.67, "word": " you", "probability": 0.8837890625}, {"start": 2722.67, "end": 2722.87, "word": " should", "probability": 0.95947265625}, {"start": 2722.87, "end": 2723.11, "word": " have", "probability": 0.9501953125}, {"start": 2723.11, "end": 2723.43, "word": " two", "probability": 0.8310546875}, {"start": 2723.43, "end": 2724.01, "word": " groups", "probability": 0.9453125}, {"start": 2724.01, "end": 2724.63, "word": " or", "probability": 0.6640625}, {"start": 2724.63, "end": 2724.81, "word": " two", "probability": 0.9306640625}, {"start": 2724.81, "end": 2725.45, "word": " subgroups.", "probability": 0.8953450520833334}, {"start": 2725.59, "end": 2725.83, "word": " So", "probability": 0.92333984375}, {"start": 2725.83, "end": 2725.99, "word": " we", "probability": 0.87060546875}, {"start": 2725.99, "end": 2726.29, "word": " have", "probability": 0.94384765625}, {"start": 2726.29, "end": 2726.75, "word": " IELTS", "probability": 0.5463053385416666}, {"start": 2726.75, "end": 2727.19, "word": " student,", "probability": 0.72998046875}, {"start": 2727.67, "end": 2727.85, "word": " the", "probability": 0.89794921875}, {"start": 2727.85, "end": 2728.15, "word": " entire", "probability": 0.89599609375}, {"start": 2728.15, "end": 2728.69, "word": " population,", "probability": 0.9375}, {"start": 2729.27, "end": 2729.41, "word": " and", "probability": 0.92578125}, {"start": 2729.41, "end": 2729.63, "word": " that", "probability": 0.93408203125}, {"start": 2729.63, "end": 2730.05, "word": " population", "probability": 0.9375}, {"start": 2730.05, "end": 2730.59, "word": " divided", "probability": 0.6181640625}, {"start": 2730.59, "end": 2731.17, "word": " into", "probability": 0.85400390625}, {"start": 2731.17, "end": 2731.81, "word": " two", "probability": 0.931640625}, {"start": 2731.81, "end": 2733.37, "word": " subgroups.", "probability": 0.9596354166666666}, {"start": 2734.17, "end": 2734.37, "word": " Next,", "probability": 0.92138671875}, {"start": 2735.65, "end": 2736.05, "word": " assemble", "probability": 0.285400390625}, {"start": 2736.05, "end": 2736.47, "word": " random", "probability": 0.84619140625}, {"start": 2736.47, "end": 2736.99, "word": " samples.", "probability": 0.8203125}, {"start": 2737.75, "end": 2738.09, "word": " Keep", "probability": 0.85205078125}, {"start": 2738.09, "end": 2738.49, "word": " careful", "probability": 0.93017578125}, {"start": 2738.49, "end": 2738.81, "word": " here", "probability": 0.84912109375}, {"start": 2738.81, "end": 2739.73, "word": " with", "probability": 0.54541015625}, {"start": 2739.73, "end": 2740.21, "word": " sample", "probability": 0.8056640625}, {"start": 2740.21, "end": 2740.85, "word": " sizes", "probability": 0.8955078125}, {"start": 2740.85, "end": 2741.83, "word": " proportional", "probability": 0.289306640625}, {"start": 2741.83, "end": 2742.61, "word": " to", "probability": 0.95703125}, {"start": 2742.61, "end": 2743.15, "word": " strata", "probability": 0.843994140625}, {"start": 2743.15, "end": 2743.75, "word": " sizes.", "probability": 0.91015625}, {"start": 2745.19, "end": 2745.77, "word": " That", "probability": 0.896484375}, {"start": 2745.77, "end": 2746.17, "word": " means", "probability": 0.92822265625}, {"start": 2746.17, "end": 2748.09, "word": " suppose", "probability": 0.56103515625}, {"start": 2748.09, "end": 2750.01, "word": " I", "probability": 0.73388671875}, {"start": 2750.01, "end": 2750.27, "word": " know", "probability": 0.87060546875}, {"start": 2750.27, "end": 2750.51, "word": " that", "probability": 0.8251953125}], "temperature": 1.0}, {"id": 102, "seek": 277149, "start": 2751.45, "end": 2771.49, "text": " Female consists of 70% of Irish students and males 30%.", "tokens": [27288, 14689, 295, 5285, 4, 295, 16801, 1731, 293, 20776, 2217, 6856], "avg_logprob": -0.45222354852236235, "compression_ratio": 0.9655172413793104, "no_speech_prob": 0.0, "words": [{"start": 2751.45, "end": 2752.05, "word": " Female", "probability": 0.413818359375}, {"start": 2752.05, "end": 2757.89, "word": " consists", "probability": 0.3291015625}, {"start": 2757.89, "end": 2762.47, "word": " of", "probability": 0.9599609375}, {"start": 2762.47, "end": 2763.33, "word": " 70", "probability": 0.9091796875}, {"start": 2763.33, "end": 2764.23, "word": "%", "probability": 0.72314453125}, {"start": 2764.23, "end": 2764.67, "word": " of", "probability": 0.765625}, {"start": 2764.67, "end": 2765.11, "word": " Irish", "probability": 0.28271484375}, {"start": 2765.11, "end": 2765.95, "word": " students", "probability": 0.447998046875}, {"start": 2765.95, "end": 2769.77, "word": " and", "probability": 0.73583984375}, {"start": 2769.77, "end": 2770.43, "word": " males", "probability": 0.6328125}, {"start": 2770.43, "end": 2771.49, "word": " 30%.", "probability": 0.84326171875}], "temperature": 1.0}, {"id": 103, "seek": 280365, "start": 2775.41, "end": 2803.65, "text": " the sample size we are talking about here is for example is a thousand so I want to select a sample of a thousand seed from the registration office or my information about that is males represent 30% females represent 70% so in this case your sample structure should be 70% times", "tokens": [264, 6889, 2744, 321, 366, 1417, 466, 510, 307, 337, 1365, 307, 257, 4714, 370, 286, 528, 281, 3048, 257, 6889, 295, 257, 4714, 8871, 490, 264, 16847, 3398, 420, 452, 1589, 466, 300, 307, 20776, 2906, 2217, 4, 21529, 2906, 5285, 4, 370, 294, 341, 1389, 428, 6889, 3877, 820, 312, 5285, 4, 1413], "avg_logprob": -0.2769252152315208, "compression_ratio": 1.6091954022988506, "no_speech_prob": 0.0, "words": [{"start": 2775.41, "end": 2775.65, "word": " the", "probability": 0.1519775390625}, {"start": 2775.65, "end": 2775.83, "word": " sample", "probability": 0.896484375}, {"start": 2775.83, "end": 2776.15, "word": " size", "probability": 0.83349609375}, {"start": 2776.15, "end": 2776.31, "word": " we", "probability": 0.73046875}, {"start": 2776.31, "end": 2776.41, "word": " are", "probability": 0.84228515625}, {"start": 2776.41, "end": 2776.79, "word": " talking", "probability": 0.8486328125}, {"start": 2776.79, "end": 2777.13, "word": " about", "probability": 0.89990234375}, {"start": 2777.13, "end": 2777.43, "word": " here", "probability": 0.84228515625}, {"start": 2777.43, "end": 2777.69, "word": " is", "probability": 0.67041015625}, {"start": 2777.69, "end": 2777.95, "word": " for", "probability": 0.7373046875}, {"start": 2777.95, "end": 2778.35, "word": " example", "probability": 0.97216796875}, {"start": 2778.35, "end": 2778.61, "word": " is", "probability": 0.466796875}, {"start": 2778.61, "end": 2778.75, "word": " a", "probability": 0.54150390625}, {"start": 2778.75, "end": 2779.07, "word": " thousand", "probability": 0.68359375}, {"start": 2779.07, "end": 2779.75, "word": " so", "probability": 0.6484375}, {"start": 2779.75, "end": 2780.23, "word": " I", "probability": 0.5234375}, {"start": 2780.23, "end": 2780.41, "word": " want", "probability": 0.88916015625}, {"start": 2780.41, "end": 2780.55, "word": " to", "probability": 0.96875}, {"start": 2780.55, "end": 2780.89, "word": " select", "probability": 0.837890625}, {"start": 2780.89, "end": 2781.27, "word": " a", "probability": 0.98779296875}, {"start": 2781.27, "end": 2781.55, "word": " sample", "probability": 0.90185546875}, {"start": 2781.55, "end": 2781.73, "word": " of", "probability": 0.95849609375}, {"start": 2781.73, "end": 2781.87, "word": " a", "probability": 0.8134765625}, {"start": 2781.87, "end": 2782.21, "word": " thousand", "probability": 0.79150390625}, {"start": 2782.21, "end": 2782.59, "word": " seed", "probability": 0.456787109375}, {"start": 2782.59, "end": 2782.95, "word": " from", "probability": 0.5751953125}, {"start": 2782.95, "end": 2783.33, "word": " the", "probability": 0.9130859375}, {"start": 2783.33, "end": 2783.93, "word": " registration", "probability": 0.95361328125}, {"start": 2783.93, "end": 2784.53, "word": " office", "probability": 0.89794921875}, {"start": 2784.53, "end": 2784.99, "word": " or", "probability": 0.494140625}, {"start": 2784.99, "end": 2785.73, "word": " my", "probability": 0.86962890625}, {"start": 2785.73, "end": 2786.17, "word": " information", "probability": 0.8408203125}, {"start": 2786.17, "end": 2786.45, "word": " about", "probability": 0.90625}, {"start": 2786.45, "end": 2786.71, "word": " that", "probability": 0.93115234375}, {"start": 2786.71, "end": 2787.15, "word": " is", "probability": 0.9462890625}, {"start": 2787.15, "end": 2788.53, "word": " males", "probability": 0.8974609375}, {"start": 2788.53, "end": 2789.51, "word": " represent", "probability": 0.81689453125}, {"start": 2789.51, "end": 2790.59, "word": " 30", "probability": 0.63525390625}, {"start": 2790.59, "end": 2791.19, "word": "%", "probability": 0.4873046875}, {"start": 2791.19, "end": 2792.99, "word": " females", "probability": 0.8935546875}, {"start": 2792.99, "end": 2793.87, "word": " represent", "probability": 0.68701171875}, {"start": 2793.87, "end": 2794.55, "word": " 70", "probability": 0.96630859375}, {"start": 2794.55, "end": 2794.89, "word": "%", "probability": 0.9677734375}, {"start": 2794.89, "end": 2796.01, "word": " so", "probability": 0.88232421875}, {"start": 2796.01, "end": 2796.17, "word": " in", "probability": 0.70654296875}, {"start": 2796.17, "end": 2796.37, "word": " this", "probability": 0.9501953125}, {"start": 2796.37, "end": 2796.75, "word": " case", "probability": 0.912109375}, {"start": 2796.75, "end": 2797.07, "word": " your", "probability": 0.81396484375}, {"start": 2797.07, "end": 2797.65, "word": " sample", "probability": 0.88720703125}, {"start": 2797.65, "end": 2798.33, "word": " structure", "probability": 0.2529296875}, {"start": 2798.33, "end": 2800.79, "word": " should", "probability": 0.95751953125}, {"start": 2800.79, "end": 2801.21, "word": " be", "probability": 0.94873046875}, {"start": 2801.21, "end": 2801.89, "word": " 70", "probability": 0.87158203125}, {"start": 2801.89, "end": 2802.43, "word": "%", "probability": 0.87841796875}, {"start": 2802.43, "end": 2803.65, "word": " times", "probability": 0.72216796875}], "temperature": 1.0}, {"id": 104, "seek": 283165, "start": 2810.09, "end": 2831.65, "text": " So the first group should have 700 items of students and the other one is 300,000. So this is the second step.", "tokens": [407, 264, 700, 1594, 820, 362, 15204, 4754, 295, 1731, 293, 264, 661, 472, 307, 6641, 11, 1360, 13, 407, 341, 307, 264, 1150, 1823, 13], "avg_logprob": -0.3480902711550395, "compression_ratio": 1.168421052631579, "no_speech_prob": 0.0, "words": [{"start": 2810.09, "end": 2811.17, "word": " So", "probability": 0.1282958984375}, {"start": 2811.17, "end": 2812.25, "word": " the", "probability": 0.25244140625}, {"start": 2812.25, "end": 2819.09, "word": " first", "probability": 0.72412109375}, {"start": 2819.09, "end": 2819.67, "word": " group", "probability": 0.95751953125}, {"start": 2819.67, "end": 2820.03, "word": " should", "probability": 0.90869140625}, {"start": 2820.03, "end": 2820.41, "word": " have", "probability": 0.818359375}, {"start": 2820.41, "end": 2821.43, "word": " 700", "probability": 0.83642578125}, {"start": 2821.43, "end": 2822.21, "word": " items", "probability": 0.8310546875}, {"start": 2822.21, "end": 2822.57, "word": " of", "probability": 0.59130859375}, {"start": 2822.57, "end": 2823.01, "word": " students", "probability": 0.8583984375}, {"start": 2823.01, "end": 2823.63, "word": " and", "probability": 0.63427734375}, {"start": 2823.63, "end": 2823.75, "word": " the", "probability": 0.77880859375}, {"start": 2823.75, "end": 2823.97, "word": " other", "probability": 0.8662109375}, {"start": 2823.97, "end": 2824.35, "word": " one", "probability": 0.81640625}, {"start": 2824.35, "end": 2825.57, "word": " is", "probability": 0.61376953125}, {"start": 2825.57, "end": 2825.95, "word": " 300", "probability": 0.921875}, {"start": 2825.95, "end": 2826.49, "word": ",000.", "probability": 0.6744384765625}, {"start": 2829.23, "end": 2830.31, "word": " So", "probability": 0.90625}, {"start": 2830.31, "end": 2830.53, "word": " this", "probability": 0.880859375}, {"start": 2830.53, "end": 2830.65, "word": " is", "probability": 0.9423828125}, {"start": 2830.65, "end": 2830.77, "word": " the", "probability": 0.69189453125}, {"start": 2830.77, "end": 2831.11, "word": " second", "probability": 0.90283203125}, {"start": 2831.11, "end": 2831.65, "word": " step.", "probability": 0.939453125}], "temperature": 1.0}, {"id": 105, "seek": 286296, "start": 2834.42, "end": 2862.96, "text": " Sample sizes are determined in step number two. Now, how can you select the 700 females here? Again, you have to go back to the random table. Samples from subgroups are compiled into one. Then you can use symbol random sample. So here, 700. I have, for example, 70% females.", "tokens": [4832, 781, 11602, 366, 9540, 294, 1823, 1230, 732, 13, 823, 11, 577, 393, 291, 3048, 264, 15204, 21529, 510, 30, 3764, 11, 291, 362, 281, 352, 646, 281, 264, 4974, 3199, 13, 4832, 2622, 490, 1422, 17377, 82, 366, 36548, 666, 472, 13, 1396, 291, 393, 764, 5986, 4974, 6889, 13, 407, 510, 11, 15204, 13, 286, 362, 11, 337, 1365, 11, 5285, 4, 21529, 13], "avg_logprob": -0.1700367622954004, "compression_ratio": 1.4705882352941178, "no_speech_prob": 0.0, "words": [{"start": 2834.42, "end": 2834.96, "word": " Sample", "probability": 0.714111328125}, {"start": 2834.96, "end": 2835.32, "word": " sizes", "probability": 0.89892578125}, {"start": 2835.32, "end": 2835.7, "word": " are", "probability": 0.947265625}, {"start": 2835.7, "end": 2836.22, "word": " determined", "probability": 0.91845703125}, {"start": 2836.22, "end": 2836.78, "word": " in", "probability": 0.86328125}, {"start": 2836.78, "end": 2837.1, "word": " step", "probability": 0.85498046875}, {"start": 2837.1, "end": 2837.42, "word": " number", "probability": 0.89208984375}, {"start": 2837.42, "end": 2837.74, "word": " two.", "probability": 0.6318359375}, {"start": 2838.54, "end": 2838.78, "word": " Now,", "probability": 0.9296875}, {"start": 2838.86, "end": 2839.04, "word": " how", "probability": 0.93603515625}, {"start": 2839.04, "end": 2839.28, "word": " can", "probability": 0.939453125}, {"start": 2839.28, "end": 2839.46, "word": " you", "probability": 0.66259765625}, {"start": 2839.46, "end": 2839.88, "word": " select", "probability": 0.85107421875}, {"start": 2839.88, "end": 2840.12, "word": " the", "probability": 0.8623046875}, {"start": 2840.12, "end": 2840.68, "word": " 700", "probability": 0.90966796875}, {"start": 2840.68, "end": 2841.46, "word": " females", "probability": 0.98291015625}, {"start": 2841.46, "end": 2842.2, "word": " here?", "probability": 0.48828125}, {"start": 2843.66, "end": 2844.34, "word": " Again,", "probability": 0.943359375}, {"start": 2844.42, "end": 2844.54, "word": " you", "probability": 0.95556640625}, {"start": 2844.54, "end": 2844.7, "word": " have", "probability": 0.94482421875}, {"start": 2844.7, "end": 2844.8, "word": " to", "probability": 0.9697265625}, {"start": 2844.8, "end": 2844.96, "word": " go", "probability": 0.962890625}, {"start": 2844.96, "end": 2845.2, "word": " back", "probability": 0.875}, {"start": 2845.2, "end": 2845.34, "word": " to", "probability": 0.96533203125}, {"start": 2845.34, "end": 2845.48, "word": " the", "probability": 0.91796875}, {"start": 2845.48, "end": 2845.76, "word": " random", "probability": 0.79541015625}, {"start": 2845.76, "end": 2846.18, "word": " table.", "probability": 0.86181640625}, {"start": 2847.48, "end": 2847.88, "word": " Samples", "probability": 0.92138671875}, {"start": 2847.88, "end": 2848.24, "word": " from", "probability": 0.8828125}, {"start": 2848.24, "end": 2849.06, "word": " subgroups", "probability": 0.9436848958333334}, {"start": 2849.06, "end": 2849.32, "word": " are", "probability": 0.9453125}, {"start": 2849.32, "end": 2849.76, "word": " compiled", "probability": 0.7607421875}, {"start": 2849.76, "end": 2850.04, "word": " into", "probability": 0.8134765625}, {"start": 2850.04, "end": 2850.34, "word": " one.", "probability": 0.91748046875}, {"start": 2851.38, "end": 2851.66, "word": " Then", "probability": 0.7548828125}, {"start": 2851.66, "end": 2851.82, "word": " you", "probability": 0.76416015625}, {"start": 2851.82, "end": 2852.06, "word": " can", "probability": 0.9443359375}, {"start": 2852.06, "end": 2852.52, "word": " use", "probability": 0.90380859375}, {"start": 2852.52, "end": 2853.54, "word": " symbol", "probability": 0.3916015625}, {"start": 2853.54, "end": 2853.98, "word": " random", "probability": 0.73095703125}, {"start": 2853.98, "end": 2854.32, "word": " sample.", "probability": 0.496337890625}, {"start": 2854.56, "end": 2854.7, "word": " So", "probability": 0.9560546875}, {"start": 2854.7, "end": 2855.02, "word": " here,", "probability": 0.76025390625}, {"start": 2856.76, "end": 2857.3, "word": " 700.", "probability": 0.958984375}, {"start": 2858.92, "end": 2859.6, "word": " I", "probability": 0.99609375}, {"start": 2859.6, "end": 2860.02, "word": " have,", "probability": 0.94921875}, {"start": 2860.34, "end": 2860.52, "word": " for", "probability": 0.9541015625}, {"start": 2860.52, "end": 2860.98, "word": " example,", "probability": 0.9755859375}, {"start": 2861.24, "end": 2861.72, "word": " 70", "probability": 0.97021484375}, {"start": 2861.72, "end": 2862.06, "word": "%", "probability": 0.86083984375}, {"start": 2862.06, "end": 2862.96, "word": " females.", "probability": 0.94140625}], "temperature": 1.0}, {"id": 106, "seek": 288107, "start": 2863.55, "end": 2881.07, "text": " And I know that I use student help. I have ideas numbers from 1 up to 7, 14. Then by using simple random, simple random table, you can.", "tokens": [400, 286, 458, 300, 286, 764, 3107, 854, 13, 286, 362, 3487, 3547, 490, 502, 493, 281, 1614, 11, 3499, 13, 1396, 538, 1228, 2199, 4974, 11, 2199, 4974, 3199, 11, 291, 393, 13], "avg_logprob": -0.4968750034059797, "compression_ratio": 1.2142857142857142, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2863.55, "end": 2863.87, "word": " And", "probability": 0.396240234375}, {"start": 2863.87, "end": 2864.01, "word": " I", "probability": 0.93408203125}, {"start": 2864.01, "end": 2864.17, "word": " know", "probability": 0.87548828125}, {"start": 2864.17, "end": 2864.63, "word": " that", "probability": 0.91650390625}, {"start": 2864.63, "end": 2865.19, "word": " I", "probability": 0.54052734375}, {"start": 2865.19, "end": 2865.45, "word": " use", "probability": 0.305419921875}, {"start": 2865.45, "end": 2865.85, "word": " student", "probability": 0.6181640625}, {"start": 2865.85, "end": 2866.07, "word": " help.", "probability": 0.2440185546875}, {"start": 2866.11, "end": 2866.17, "word": " I", "probability": 0.85205078125}, {"start": 2866.17, "end": 2866.39, "word": " have", "probability": 0.8798828125}, {"start": 2866.39, "end": 2866.77, "word": " ideas", "probability": 0.48095703125}, {"start": 2866.77, "end": 2867.23, "word": " numbers", "probability": 0.64404296875}, {"start": 2867.23, "end": 2867.77, "word": " from", "probability": 0.8701171875}, {"start": 2867.77, "end": 2868.69, "word": " 1", "probability": 0.560546875}, {"start": 2868.69, "end": 2871.37, "word": " up", "probability": 0.77099609375}, {"start": 2871.37, "end": 2872.23, "word": " to", "probability": 0.9541015625}, {"start": 2872.23, "end": 2873.89, "word": " 7,", "probability": 0.291748046875}, {"start": 2874.09, "end": 2874.55, "word": " 14.", "probability": 0.72900390625}, {"start": 2876.21, "end": 2876.89, "word": " Then", "probability": 0.83984375}, {"start": 2876.89, "end": 2877.11, "word": " by", "probability": 0.8251953125}, {"start": 2877.11, "end": 2877.49, "word": " using", "probability": 0.9404296875}, {"start": 2877.49, "end": 2878.07, "word": " simple", "probability": 0.8115234375}, {"start": 2878.07, "end": 2878.39, "word": " random,", "probability": 0.8271484375}, {"start": 2878.57, "end": 2879.07, "word": " simple", "probability": 0.8349609375}, {"start": 2879.07, "end": 2879.47, "word": " random", "probability": 0.853515625}, {"start": 2879.47, "end": 2879.81, "word": " table,", "probability": 0.822265625}, {"start": 2880.69, "end": 2880.85, "word": " you", "probability": 0.95751953125}, {"start": 2880.85, "end": 2881.07, "word": " can.", "probability": 0.9365234375}], "temperature": 1.0}, {"id": 107, "seek": 291899, "start": 2889.49, "end": 2918.99, "text": " So if you go back to the table, the first item, now look at five digits. Nineteen is not selected. Nineteen. I have, the maximum is fourteen thousand. So skip one and two. The first item is seven hundred and fifty-six. The first item. Next is not chosen.", "tokens": [407, 498, 291, 352, 646, 281, 264, 3199, 11, 264, 700, 3174, 11, 586, 574, 412, 1732, 27011, 13, 16093, 3498, 268, 307, 406, 8209, 13, 16093, 3498, 268, 13, 286, 362, 11, 264, 6674, 307, 32253, 4714, 13, 407, 10023, 472, 293, 732, 13, 440, 700, 3174, 307, 3407, 3262, 293, 13442, 12, 35066, 13, 440, 700, 3174, 13, 3087, 307, 406, 8614, 13], "avg_logprob": -0.2566287982644457, "compression_ratio": 1.5740740740740742, "no_speech_prob": 0.0, "words": [{"start": 2889.49, "end": 2889.85, "word": " So", "probability": 0.42919921875}, {"start": 2889.85, "end": 2890.33, "word": " if", "probability": 0.75390625}, {"start": 2890.33, "end": 2890.53, "word": " you", "probability": 0.896484375}, {"start": 2890.53, "end": 2890.79, "word": " go", "probability": 0.919921875}, {"start": 2890.79, "end": 2891.19, "word": " back", "probability": 0.8603515625}, {"start": 2891.19, "end": 2892.15, "word": " to", "probability": 0.90283203125}, {"start": 2892.15, "end": 2892.39, "word": " the", "probability": 0.8984375}, {"start": 2892.39, "end": 2892.75, "word": " table,", "probability": 0.9013671875}, {"start": 2893.93, "end": 2894.11, "word": " the", "probability": 0.80859375}, {"start": 2894.11, "end": 2894.51, "word": " first", "probability": 0.87890625}, {"start": 2894.51, "end": 2895.19, "word": " item,", "probability": 0.9658203125}, {"start": 2896.65, "end": 2897.09, "word": " now", "probability": 0.861328125}, {"start": 2897.09, "end": 2897.37, "word": " look", "probability": 0.93115234375}, {"start": 2897.37, "end": 2897.53, "word": " at", "probability": 0.96142578125}, {"start": 2897.53, "end": 2897.85, "word": " five", "probability": 0.60986328125}, {"start": 2897.85, "end": 2898.37, "word": " digits.", "probability": 0.9248046875}, {"start": 2900.21, "end": 2900.93, "word": " Nineteen", "probability": 0.7604166666666666}, {"start": 2900.93, "end": 2902.37, "word": " is", "probability": 0.76513671875}, {"start": 2902.37, "end": 2902.57, "word": " not", "probability": 0.95361328125}, {"start": 2902.57, "end": 2903.13, "word": " selected.", "probability": 0.88134765625}, {"start": 2904.83, "end": 2905.55, "word": " Nineteen.", "probability": 0.8260091145833334}, {"start": 2906.11, "end": 2906.27, "word": " I", "probability": 0.84228515625}, {"start": 2906.27, "end": 2906.53, "word": " have,", "probability": 0.955078125}, {"start": 2906.63, "end": 2906.73, "word": " the", "probability": 0.89697265625}, {"start": 2906.73, "end": 2906.99, "word": " maximum", "probability": 0.84814453125}, {"start": 2906.99, "end": 2907.21, "word": " is", "probability": 0.84619140625}, {"start": 2907.21, "end": 2907.51, "word": " fourteen", "probability": 0.69677734375}, {"start": 2907.51, "end": 2907.95, "word": " thousand.", "probability": 0.88720703125}, {"start": 2908.53, "end": 2908.83, "word": " So", "probability": 0.91552734375}, {"start": 2908.83, "end": 2909.39, "word": " skip", "probability": 0.70361328125}, {"start": 2909.39, "end": 2909.67, "word": " one", "probability": 0.91650390625}, {"start": 2909.67, "end": 2909.91, "word": " and", "probability": 0.92041015625}, {"start": 2909.91, "end": 2910.13, "word": " two.", "probability": 0.92822265625}, {"start": 2910.53, "end": 2910.71, "word": " The", "probability": 0.892578125}, {"start": 2910.71, "end": 2911.11, "word": " first", "probability": 0.88720703125}, {"start": 2911.11, "end": 2911.51, "word": " item", "probability": 0.9091796875}, {"start": 2911.51, "end": 2911.89, "word": " is", "probability": 0.369384765625}, {"start": 2911.89, "end": 2913.69, "word": " seven", "probability": 0.62548828125}, {"start": 2913.69, "end": 2914.13, "word": " hundred", "probability": 0.90625}, {"start": 2914.13, "end": 2914.25, "word": " and", "probability": 0.7763671875}, {"start": 2914.25, "end": 2914.49, "word": " fifty", "probability": 0.56689453125}, {"start": 2914.49, "end": 2914.83, "word": "-six.", "probability": 0.7685546875}, {"start": 2915.31, "end": 2915.63, "word": " The", "probability": 0.5126953125}, {"start": 2915.63, "end": 2915.95, "word": " first", "probability": 0.87841796875}, {"start": 2915.95, "end": 2916.39, "word": " item.", "probability": 0.96923828125}, {"start": 2917.13, "end": 2917.85, "word": " Next", "probability": 0.859375}, {"start": 2917.85, "end": 2918.43, "word": " is", "probability": 0.78759765625}, {"start": 2918.43, "end": 2918.63, "word": " not", "probability": 0.931640625}, {"start": 2918.63, "end": 2918.99, "word": " chosen.", "probability": 0.919921875}], "temperature": 1.0}, {"id": 108, "seek": 294882, "start": 2920.44, "end": 2948.82, "text": " Next is not chosen. Number six. Twelve. Zero. Unsure. So here we divide the population into two groups or two subgroups, females and males. And we select a random sample of size 700 based on the proportion of this subgroup.", "tokens": [3087, 307, 406, 8614, 13, 5118, 2309, 13, 48063, 13, 17182, 13, 1156, 15091, 265, 13, 407, 510, 321, 9845, 264, 4415, 666, 732, 3935, 420, 732, 1422, 17377, 82, 11, 21529, 293, 20776, 13, 400, 321, 3048, 257, 4974, 6889, 295, 2744, 15204, 2361, 322, 264, 16068, 295, 341, 1422, 17377, 13], "avg_logprob": -0.31944444554823415, "compression_ratio": 1.382716049382716, "no_speech_prob": 0.0, "words": [{"start": 2920.44, "end": 2920.82, "word": " Next", "probability": 0.66552734375}, {"start": 2920.82, "end": 2921.0, "word": " is", "probability": 0.888671875}, {"start": 2921.0, "end": 2921.16, "word": " not", "probability": 0.81494140625}, {"start": 2921.16, "end": 2921.54, "word": " chosen.", "probability": 0.89892578125}, {"start": 2922.12, "end": 2922.96, "word": " Number", "probability": 0.69580078125}, {"start": 2922.96, "end": 2923.48, "word": " six.", "probability": 0.5458984375}, {"start": 2923.74, "end": 2924.58, "word": " Twelve.", "probability": 0.724609375}, {"start": 2927.42, "end": 2928.14, "word": " Zero.", "probability": 0.266845703125}, {"start": 2929.78, "end": 2930.62, "word": " Unsure.", "probability": 0.6119791666666666}, {"start": 2932.88, "end": 2933.22, "word": " So", "probability": 0.89990234375}, {"start": 2933.22, "end": 2933.52, "word": " here", "probability": 0.755859375}, {"start": 2933.52, "end": 2933.8, "word": " we", "probability": 0.78173828125}, {"start": 2933.8, "end": 2934.48, "word": " divide", "probability": 0.355712890625}, {"start": 2934.48, "end": 2936.06, "word": " the", "probability": 0.7578125}, {"start": 2936.06, "end": 2937.1, "word": " population", "probability": 0.97998046875}, {"start": 2937.1, "end": 2937.56, "word": " into", "probability": 0.83349609375}, {"start": 2937.56, "end": 2938.12, "word": " two", "probability": 0.90087890625}, {"start": 2938.12, "end": 2938.94, "word": " groups", "probability": 0.9453125}, {"start": 2938.94, "end": 2939.7, "word": " or", "probability": 0.471923828125}, {"start": 2939.7, "end": 2939.9, "word": " two", "probability": 0.9228515625}, {"start": 2939.9, "end": 2940.72, "word": " subgroups,", "probability": 0.9070638020833334}, {"start": 2940.8, "end": 2941.1, "word": " females", "probability": 0.888671875}, {"start": 2941.1, "end": 2941.7, "word": " and", "probability": 0.9453125}, {"start": 2941.7, "end": 2942.12, "word": " males.", "probability": 0.95947265625}, {"start": 2942.72, "end": 2942.96, "word": " And", "probability": 0.88427734375}, {"start": 2942.96, "end": 2943.1, "word": " we", "probability": 0.677734375}, {"start": 2943.1, "end": 2943.44, "word": " select", "probability": 0.8359375}, {"start": 2943.44, "end": 2943.54, "word": " a", "probability": 0.55859375}, {"start": 2943.54, "end": 2943.84, "word": " random", "probability": 0.77099609375}, {"start": 2943.84, "end": 2944.24, "word": " sample", "probability": 0.91748046875}, {"start": 2944.24, "end": 2944.46, "word": " of", "probability": 0.9443359375}, {"start": 2944.46, "end": 2944.72, "word": " size", "probability": 0.85546875}, {"start": 2944.72, "end": 2945.32, "word": " 700", "probability": 0.80322265625}, {"start": 2945.32, "end": 2945.82, "word": " based", "probability": 0.8837890625}, {"start": 2945.82, "end": 2946.24, "word": " on", "probability": 0.951171875}, {"start": 2946.24, "end": 2947.02, "word": " the", "probability": 0.921875}, {"start": 2947.02, "end": 2947.6, "word": " proportion", "probability": 0.8193359375}, {"start": 2947.6, "end": 2947.9, "word": " of", "probability": 0.970703125}, {"start": 2947.9, "end": 2948.18, "word": " this", "probability": 0.81103515625}, {"start": 2948.18, "end": 2948.82, "word": " subgroup.", "probability": 0.96142578125}], "temperature": 1.0}, {"id": 109, "seek": 297657, "start": 2949.51, "end": 2976.57, "text": " Then we are using the simple random table to take the 700 females. Now for this example, there are 16 items or 16 students in each group. And he select randomly number three,", "tokens": [1396, 321, 366, 1228, 264, 2199, 4974, 3199, 281, 747, 264, 15204, 21529, 13, 823, 337, 341, 1365, 11, 456, 366, 3165, 4754, 420, 3165, 1731, 294, 1184, 1594, 13, 400, 415, 3048, 16979, 1230, 1045, 11], "avg_logprob": -0.22574013667671303, "compression_ratio": 1.3059701492537314, "no_speech_prob": 0.0, "words": [{"start": 2949.51, "end": 2949.87, "word": " Then", "probability": 0.70458984375}, {"start": 2949.87, "end": 2950.03, "word": " we", "probability": 0.7783203125}, {"start": 2950.03, "end": 2950.19, "word": " are", "probability": 0.91943359375}, {"start": 2950.19, "end": 2950.57, "word": " using", "probability": 0.93896484375}, {"start": 2950.57, "end": 2950.85, "word": " the", "probability": 0.83837890625}, {"start": 2950.85, "end": 2951.19, "word": " simple", "probability": 0.84716796875}, {"start": 2951.19, "end": 2951.61, "word": " random", "probability": 0.8115234375}, {"start": 2951.61, "end": 2952.33, "word": " table", "probability": 0.8330078125}, {"start": 2952.33, "end": 2952.77, "word": " to", "probability": 0.9375}, {"start": 2952.77, "end": 2953.21, "word": " take", "probability": 0.8740234375}, {"start": 2953.21, "end": 2954.13, "word": " the", "probability": 0.83447265625}, {"start": 2954.13, "end": 2954.67, "word": " 700", "probability": 0.9208984375}, {"start": 2954.67, "end": 2956.75, "word": " females.", "probability": 0.9677734375}, {"start": 2962.09, "end": 2962.97, "word": " Now", "probability": 0.9130859375}, {"start": 2962.97, "end": 2963.19, "word": " for", "probability": 0.67578125}, {"start": 2963.19, "end": 2963.51, "word": " this", "probability": 0.94873046875}, {"start": 2963.51, "end": 2964.59, "word": " example,", "probability": 0.97705078125}, {"start": 2966.15, "end": 2967.07, "word": " there", "probability": 0.88671875}, {"start": 2967.07, "end": 2967.45, "word": " are", "probability": 0.9443359375}, {"start": 2967.45, "end": 2968.31, "word": " 16", "probability": 0.90087890625}, {"start": 2968.31, "end": 2969.17, "word": " items", "probability": 0.89013671875}, {"start": 2969.17, "end": 2969.43, "word": " or", "probability": 0.64697265625}, {"start": 2969.43, "end": 2969.81, "word": " 16", "probability": 0.97119140625}, {"start": 2969.81, "end": 2970.55, "word": " students", "probability": 0.984375}, {"start": 2970.55, "end": 2971.49, "word": " in", "probability": 0.9189453125}, {"start": 2971.49, "end": 2971.93, "word": " each", "probability": 0.94091796875}, {"start": 2971.93, "end": 2973.09, "word": " group.", "probability": 0.962890625}, {"start": 2973.65, "end": 2974.15, "word": " And", "probability": 0.9384765625}, {"start": 2974.15, "end": 2974.31, "word": " he", "probability": 0.62744140625}, {"start": 2974.31, "end": 2974.63, "word": " select", "probability": 0.481201171875}, {"start": 2974.63, "end": 2975.03, "word": " randomly", "probability": 0.8564453125}, {"start": 2975.03, "end": 2976.09, "word": " number", "probability": 0.57861328125}, {"start": 2976.09, "end": 2976.57, "word": " three,", "probability": 0.66015625}], "temperature": 1.0}, {"id": 110, "seek": 300676, "start": 2978.12, "end": 3006.76, "text": " number 9, number 13, and so on. So it's a random selection. Another example. Suppose again we are talking about all IUVs. Here I divided the population", "tokens": [1230, 1722, 11, 1230, 3705, 11, 293, 370, 322, 13, 407, 309, 311, 257, 4974, 9450, 13, 3996, 1365, 13, 21360, 797, 321, 366, 1417, 466, 439, 44218, 53, 82, 13, 1692, 286, 6666, 264, 4415], "avg_logprob": -0.32221284105971054, "compression_ratio": 1.216, "no_speech_prob": 0.0, "words": [{"start": 2978.12, "end": 2978.48, "word": " number", "probability": 0.411865234375}, {"start": 2978.48, "end": 2978.88, "word": " 9,", "probability": 0.60400390625}, {"start": 2979.12, "end": 2979.32, "word": " number", "probability": 0.91357421875}, {"start": 2979.32, "end": 2979.64, "word": " 13,", "probability": 0.591796875}, {"start": 2979.76, "end": 2979.82, "word": " and", "probability": 0.865234375}, {"start": 2979.82, "end": 2980.0, "word": " so", "probability": 0.947265625}, {"start": 2980.0, "end": 2980.22, "word": " on.", "probability": 0.94677734375}, {"start": 2980.56, "end": 2980.7, "word": " So", "probability": 0.54296875}, {"start": 2980.7, "end": 2980.9, "word": " it's", "probability": 0.7890625}, {"start": 2980.9, "end": 2981.0, "word": " a", "probability": 0.432861328125}, {"start": 2981.0, "end": 2981.16, "word": " random", "probability": 0.6298828125}, {"start": 2981.16, "end": 2981.6, "word": " selection.", "probability": 0.81591796875}, {"start": 2983.04, "end": 2983.58, "word": " Another", "probability": 0.79638671875}, {"start": 2983.58, "end": 2984.14, "word": " example.", "probability": 0.96728515625}, {"start": 2986.82, "end": 2987.14, "word": " Suppose", "probability": 0.62890625}, {"start": 2987.14, "end": 2987.46, "word": " again", "probability": 0.81689453125}, {"start": 2987.46, "end": 2987.66, "word": " we", "probability": 0.70703125}, {"start": 2987.66, "end": 2987.86, "word": " are", "probability": 0.8955078125}, {"start": 2987.86, "end": 2988.36, "word": " talking", "probability": 0.849609375}, {"start": 2988.36, "end": 2989.5, "word": " about", "probability": 0.9072265625}, {"start": 2989.5, "end": 2991.66, "word": " all", "probability": 0.8583984375}, {"start": 2991.66, "end": 2992.42, "word": " IUVs.", "probability": 0.6315104166666666}, {"start": 3002.78, "end": 3003.46, "word": " Here", "probability": 0.8037109375}, {"start": 3003.46, "end": 3003.64, "word": " I", "probability": 0.8984375}, {"start": 3003.64, "end": 3004.06, "word": " divided", "probability": 0.76123046875}, {"start": 3004.06, "end": 3006.2, "word": " the", "probability": 0.90234375}, {"start": 3006.2, "end": 3006.76, "word": " population", "probability": 0.96240234375}], "temperature": 1.0}, {"id": 111, "seek": 303370, "start": 3007.76, "end": 3033.7, "text": " according to the students' levels. Level one, level two, three levels. One, two, three and four. So I divide", "tokens": [4650, 281, 264, 1731, 6, 4358, 13, 16872, 472, 11, 1496, 732, 11, 1045, 4358, 13, 1485, 11, 732, 11, 1045, 293, 1451, 13, 407, 286, 9845], "avg_logprob": -0.3666294738650322, "compression_ratio": 1.2386363636363635, "no_speech_prob": 0.0, "words": [{"start": 3007.76, "end": 3008.38, "word": " according", "probability": 0.65185546875}, {"start": 3008.38, "end": 3008.8, "word": " to", "probability": 0.96923828125}, {"start": 3008.8, "end": 3009.36, "word": " the", "probability": 0.89208984375}, {"start": 3009.36, "end": 3011.28, "word": " students'", "probability": 0.619140625}, {"start": 3011.52, "end": 3011.8, "word": " levels.", "probability": 0.845703125}, {"start": 3012.88, "end": 3013.26, "word": " Level", "probability": 0.78125}, {"start": 3013.26, "end": 3013.62, "word": " one,", "probability": 0.357177734375}, {"start": 3014.44, "end": 3016.44, "word": " level", "probability": 0.82275390625}, {"start": 3016.44, "end": 3016.86, "word": " two,", "probability": 0.931640625}, {"start": 3017.44, "end": 3017.68, "word": " three", "probability": 0.35595703125}, {"start": 3017.68, "end": 3018.24, "word": " levels.", "probability": 0.81298828125}, {"start": 3025.96, "end": 3026.38, "word": " One,", "probability": 0.607421875}, {"start": 3026.58, "end": 3026.82, "word": " two,", "probability": 0.93994140625}, {"start": 3027.26, "end": 3027.68, "word": " three", "probability": 0.94091796875}, {"start": 3027.68, "end": 3028.02, "word": " and", "probability": 0.62890625}, {"start": 3028.02, "end": 3028.3, "word": " four.", "probability": 0.93994140625}, {"start": 3032.24, "end": 3033.08, "word": " So", "probability": 0.93408203125}, {"start": 3033.08, "end": 3033.3, "word": " I", "probability": 0.8046875}, {"start": 3033.3, "end": 3033.7, "word": " divide", "probability": 0.85546875}], "temperature": 1.0}, {"id": 112, "seek": 306313, "start": 3035.79, "end": 3063.13, "text": " the population into four subgroups according to the student levels. So one, two, three, and four. Now, a simple random sample is selected from each subgroup with sample sizes proportional to strata size. Imagine that level number one represents 40% of the students.", "tokens": [264, 4415, 666, 1451, 1422, 17377, 82, 4650, 281, 264, 3107, 4358, 13, 407, 472, 11, 732, 11, 1045, 11, 293, 1451, 13, 823, 11, 257, 2199, 4974, 6889, 307, 8209, 490, 1184, 1422, 17377, 365, 6889, 11602, 24969, 281, 1056, 3274, 2744, 13, 11739, 300, 1496, 1230, 472, 8855, 3356, 4, 295, 264, 1731, 13], "avg_logprob": -0.18750000209139103, "compression_ratio": 1.5113636363636365, "no_speech_prob": 0.0, "words": [{"start": 3035.79, "end": 3036.05, "word": " the", "probability": 0.467041015625}, {"start": 3036.05, "end": 3036.79, "word": " population", "probability": 0.9541015625}, {"start": 3036.79, "end": 3037.59, "word": " into", "probability": 0.8369140625}, {"start": 3037.59, "end": 3038.49, "word": " four", "probability": 0.80908203125}, {"start": 3038.49, "end": 3039.71, "word": " subgroups", "probability": 0.9111328125}, {"start": 3039.71, "end": 3040.49, "word": " according", "probability": 0.66650390625}, {"start": 3040.49, "end": 3040.77, "word": " to", "probability": 0.96240234375}, {"start": 3040.77, "end": 3040.97, "word": " the", "probability": 0.87548828125}, {"start": 3040.97, "end": 3041.47, "word": " student", "probability": 0.91943359375}, {"start": 3041.47, "end": 3041.97, "word": " levels.", "probability": 0.87548828125}, {"start": 3042.65, "end": 3042.79, "word": " So", "probability": 0.83984375}, {"start": 3042.79, "end": 3043.01, "word": " one,", "probability": 0.55322265625}, {"start": 3043.09, "end": 3043.17, "word": " two,", "probability": 0.94384765625}, {"start": 3043.29, "end": 3043.45, "word": " three,", "probability": 0.935546875}, {"start": 3043.47, "end": 3043.63, "word": " and", "probability": 0.91552734375}, {"start": 3043.63, "end": 3043.91, "word": " four.", "probability": 0.94091796875}, {"start": 3045.45, "end": 3045.87, "word": " Now,", "probability": 0.94775390625}, {"start": 3046.13, "end": 3046.31, "word": " a", "probability": 0.80078125}, {"start": 3046.31, "end": 3046.57, "word": " simple", "probability": 0.376708984375}, {"start": 3046.57, "end": 3046.95, "word": " random", "probability": 0.7841796875}, {"start": 3046.95, "end": 3047.47, "word": " sample", "probability": 0.86962890625}, {"start": 3047.47, "end": 3048.03, "word": " is", "probability": 0.923828125}, {"start": 3048.03, "end": 3048.45, "word": " selected", "probability": 0.87939453125}, {"start": 3048.45, "end": 3048.83, "word": " from", "probability": 0.88916015625}, {"start": 3048.83, "end": 3049.05, "word": " each", "probability": 0.947265625}, {"start": 3049.05, "end": 3049.75, "word": " subgroup", "probability": 0.946044921875}, {"start": 3049.75, "end": 3051.13, "word": " with", "probability": 0.8115234375}, {"start": 3051.13, "end": 3051.55, "word": " sample", "probability": 0.884765625}, {"start": 3051.55, "end": 3052.07, "word": " sizes", "probability": 0.8818359375}, {"start": 3052.07, "end": 3052.71, "word": " proportional", "probability": 0.8759765625}, {"start": 3052.71, "end": 3053.13, "word": " to", "probability": 0.9658203125}, {"start": 3053.13, "end": 3053.51, "word": " strata", "probability": 0.680908203125}, {"start": 3053.51, "end": 3053.95, "word": " size.", "probability": 0.86669921875}, {"start": 3055.63, "end": 3056.05, "word": " Imagine", "probability": 0.8525390625}, {"start": 3056.05, "end": 3056.57, "word": " that", "probability": 0.93310546875}, {"start": 3056.57, "end": 3057.67, "word": " level", "probability": 0.576171875}, {"start": 3057.67, "end": 3058.03, "word": " number", "probability": 0.9130859375}, {"start": 3058.03, "end": 3058.47, "word": " one", "probability": 0.85498046875}, {"start": 3058.47, "end": 3060.31, "word": " represents", "probability": 0.84814453125}, {"start": 3060.31, "end": 3060.75, "word": " 40", "probability": 0.95703125}, {"start": 3060.75, "end": 3061.05, "word": "%", "probability": 0.89453125}, {"start": 3061.05, "end": 3062.51, "word": " of", "probability": 0.9638671875}, {"start": 3062.51, "end": 3062.69, "word": " the", "probability": 0.91455078125}, {"start": 3062.69, "end": 3063.13, "word": " students.", "probability": 0.9638671875}], "temperature": 1.0}, {"id": 113, "seek": 308285, "start": 3064.55, "end": 3082.85, "text": " Level 2, 20%. Level 3, 30%. Just an example. To make more sense?", "tokens": [16872, 568, 11, 945, 6856, 16872, 805, 11, 2217, 6856, 1449, 364, 1365, 13, 1407, 652, 544, 2020, 30], "avg_logprob": -0.33984375596046446, "compression_ratio": 0.9848484848484849, "no_speech_prob": 0.0, "words": [{"start": 3064.55, "end": 3064.95, "word": " Level", "probability": 0.44287109375}, {"start": 3064.95, "end": 3065.35, "word": " 2,", "probability": 0.6220703125}, {"start": 3065.85, "end": 3066.95, "word": " 20%.", "probability": 0.579833984375}, {"start": 3066.95, "end": 3068.21, "word": " Level", "probability": 0.88671875}, {"start": 3068.21, "end": 3068.67, "word": " 3,", "probability": 0.98974609375}, {"start": 3071.07, "end": 3073.75, "word": " 30%.", "probability": 0.840087890625}, {"start": 3073.75, "end": 3077.63, "word": " Just", "probability": 0.5966796875}, {"start": 3077.63, "end": 3078.05, "word": " an", "probability": 0.853515625}, {"start": 3078.05, "end": 3078.45, "word": " example.", "probability": 0.97607421875}, {"start": 3081.05, "end": 3082.05, "word": " To", "probability": 0.93701171875}, {"start": 3082.05, "end": 3082.19, "word": " make", "probability": 0.94580078125}, {"start": 3082.19, "end": 3082.43, "word": " more", "probability": 0.9306640625}, {"start": 3082.43, "end": 3082.85, "word": " sense?", "probability": 0.83544921875}], "temperature": 1.0}, {"id": 114, "seek": 312437, "start": 3094.99, "end": 3124.37, "text": " My sample size? 3, 9, 15, 4, sorry. So here, there are four levels. And the proportions are 48", "tokens": [1222, 6889, 2744, 30, 805, 11, 1722, 11, 2119, 11, 1017, 11, 2597, 13, 407, 510, 11, 456, 366, 1451, 4358, 13, 400, 264, 32482, 366, 11174], "avg_logprob": -0.5881696407284055, "compression_ratio": 1.0326086956521738, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 3094.99, "end": 3095.27, "word": " My", "probability": 0.57421875}, {"start": 3095.27, "end": 3095.59, "word": " sample", "probability": 0.85693359375}, {"start": 3095.59, "end": 3096.07, "word": " size?", "probability": 0.8466796875}, {"start": 3098.75, "end": 3099.91, "word": " 3,", "probability": 0.13525390625}, {"start": 3101.91, "end": 3103.21, "word": " 9,", "probability": 0.845703125}, {"start": 3103.43, "end": 3104.01, "word": " 15,", "probability": 0.88916015625}, {"start": 3104.45, "end": 3104.95, "word": " 4,", "probability": 0.8828125}, {"start": 3105.05, "end": 3106.43, "word": " sorry.", "probability": 0.56591796875}, {"start": 3113.29, "end": 3114.45, "word": " So", "probability": 0.371337890625}, {"start": 3114.45, "end": 3114.97, "word": " here,", "probability": 0.5634765625}, {"start": 3115.61, "end": 3115.95, "word": " there", "probability": 0.828125}, {"start": 3115.95, "end": 3116.25, "word": " are", "probability": 0.943359375}, {"start": 3116.25, "end": 3116.69, "word": " four", "probability": 0.45849609375}, {"start": 3116.69, "end": 3117.09, "word": " levels.", "probability": 0.58837890625}, {"start": 3119.49, "end": 3120.29, "word": " And", "probability": 0.9111328125}, {"start": 3120.29, "end": 3120.47, "word": " the", "probability": 0.9072265625}, {"start": 3120.47, "end": 3121.19, "word": " proportions", "probability": 0.841796875}, {"start": 3121.19, "end": 3122.15, "word": " are", "probability": 0.94677734375}, {"start": 3122.15, "end": 3124.37, "word": " 48", "probability": 0.431884765625}], "temperature": 1.0}, {"id": 115, "seek": 315295, "start": 3126.67, "end": 3152.95, "text": " sample size is 500 so the sample for each strata will be number 1 40% times 500 gives 200 the next 150", "tokens": [6889, 2744, 307, 5923, 370, 264, 6889, 337, 1184, 1056, 3274, 486, 312, 1230, 502, 3356, 4, 1413, 5923, 2709, 2331, 264, 958, 8451], "avg_logprob": -0.36031250953674315, "compression_ratio": 1.1573033707865168, "no_speech_prob": 0.0, "words": [{"start": 3126.6699999999996, "end": 3127.99, "word": " sample", "probability": 0.052764892578125}, {"start": 3127.99, "end": 3128.59, "word": " size", "probability": 0.7958984375}, {"start": 3128.59, "end": 3130.23, "word": " is", "probability": 0.85107421875}, {"start": 3130.23, "end": 3130.87, "word": " 500", "probability": 0.88916015625}, {"start": 3130.87, "end": 3132.93, "word": " so", "probability": 0.4990234375}, {"start": 3132.93, "end": 3135.53, "word": " the", "probability": 0.67431640625}, {"start": 3135.53, "end": 3135.99, "word": " sample", "probability": 0.89111328125}, {"start": 3135.99, "end": 3136.41, "word": " for", "probability": 0.904296875}, {"start": 3136.41, "end": 3136.67, "word": " each", "probability": 0.95361328125}, {"start": 3136.67, "end": 3137.19, "word": " strata", "probability": 0.859130859375}, {"start": 3137.19, "end": 3137.43, "word": " will", "probability": 0.85888671875}, {"start": 3137.43, "end": 3137.85, "word": " be", "probability": 0.955078125}, {"start": 3137.85, "end": 3139.15, "word": " number", "probability": 0.8193359375}, {"start": 3139.15, "end": 3139.55, "word": " 1", "probability": 0.424072265625}, {"start": 3139.55, "end": 3142.07, "word": " 40", "probability": 0.732421875}, {"start": 3142.07, "end": 3143.85, "word": "%", "probability": 0.53662109375}, {"start": 3143.85, "end": 3145.41, "word": " times", "probability": 0.54638671875}, {"start": 3145.41, "end": 3146.07, "word": " 500", "probability": 0.94775390625}, {"start": 3146.07, "end": 3146.57, "word": " gives", "probability": 0.7509765625}, {"start": 3146.57, "end": 3148.65, "word": " 200", "probability": 0.8818359375}, {"start": 3148.65, "end": 3150.87, "word": " the", "probability": 0.72265625}, {"start": 3150.87, "end": 3151.19, "word": " next", "probability": 0.9345703125}, {"start": 3151.19, "end": 3152.95, "word": " 150", "probability": 0.810546875}], "temperature": 1.0}, {"id": 116, "seek": 318324, "start": 3156.2, "end": 3183.24, "text": " And so on. Now, how can we choose the 200 from level number one? Again, we have to choose the random table. Now, 40% from this number, it means 5,000. This one has 5,000.", "tokens": [400, 370, 322, 13, 823, 11, 577, 393, 321, 2826, 264, 2331, 490, 1496, 1230, 472, 30, 3764, 11, 321, 362, 281, 2826, 264, 4974, 3199, 13, 823, 11, 3356, 4, 490, 341, 1230, 11, 309, 1355, 1025, 11, 1360, 13, 639, 472, 575, 1025, 11, 1360, 13], "avg_logprob": -0.16597576530612246, "compression_ratio": 1.3464566929133859, "no_speech_prob": 0.0, "words": [{"start": 3156.2, "end": 3156.46, "word": " And", "probability": 0.2291259765625}, {"start": 3156.46, "end": 3156.62, "word": " so", "probability": 0.87109375}, {"start": 3156.62, "end": 3156.82, "word": " on.", "probability": 0.927734375}, {"start": 3158.68, "end": 3159.36, "word": " Now,", "probability": 0.92919921875}, {"start": 3159.6, "end": 3159.86, "word": " how", "probability": 0.9404296875}, {"start": 3159.86, "end": 3160.1, "word": " can", "probability": 0.94091796875}, {"start": 3160.1, "end": 3160.38, "word": " we", "probability": 0.955078125}, {"start": 3160.38, "end": 3161.36, "word": " choose", "probability": 0.90283203125}, {"start": 3161.36, "end": 3161.56, "word": " the", "probability": 0.7724609375}, {"start": 3161.56, "end": 3161.86, "word": " 200", "probability": 0.89306640625}, {"start": 3161.86, "end": 3162.38, "word": " from", "probability": 0.8876953125}, {"start": 3162.38, "end": 3162.66, "word": " level", "probability": 0.7890625}, {"start": 3162.66, "end": 3162.86, "word": " number", "probability": 0.81884765625}, {"start": 3162.86, "end": 3163.14, "word": " one?", "probability": 0.5546875}, {"start": 3163.22, "end": 3163.54, "word": " Again,", "probability": 0.947265625}, {"start": 3165.12, "end": 3165.34, "word": " we", "probability": 0.73095703125}, {"start": 3165.34, "end": 3165.5, "word": " have", "probability": 0.9228515625}, {"start": 3165.5, "end": 3165.64, "word": " to", "probability": 0.95654296875}, {"start": 3165.64, "end": 3165.9, "word": " choose", "probability": 0.9150390625}, {"start": 3165.9, "end": 3166.28, "word": " the", "probability": 0.873046875}, {"start": 3166.28, "end": 3166.96, "word": " random", "probability": 0.80029296875}, {"start": 3166.96, "end": 3167.32, "word": " table.", "probability": 0.87841796875}, {"start": 3169.52, "end": 3170.2, "word": " Now,", "probability": 0.9619140625}, {"start": 3170.28, "end": 3170.56, "word": " 40", "probability": 0.966796875}, {"start": 3170.56, "end": 3170.98, "word": "%", "probability": 0.8251953125}, {"start": 3170.98, "end": 3172.76, "word": " from", "probability": 0.8916015625}, {"start": 3172.76, "end": 3173.12, "word": " this", "probability": 0.9501953125}, {"start": 3173.12, "end": 3173.52, "word": " number,", "probability": 0.93359375}, {"start": 3174.8, "end": 3175.06, "word": " it", "probability": 0.92236328125}, {"start": 3175.06, "end": 3175.54, "word": " means", "probability": 0.93505859375}, {"start": 3175.54, "end": 3179.62, "word": " 5", "probability": 0.619140625}, {"start": 3179.62, "end": 3180.12, "word": ",000.", "probability": 0.984619140625}, {"start": 3181.14, "end": 3181.82, "word": " This", "probability": 0.86572265625}, {"start": 3181.82, "end": 3182.06, "word": " one", "probability": 0.92333984375}, {"start": 3182.06, "end": 3182.42, "word": " has", "probability": 0.9404296875}, {"start": 3182.42, "end": 3182.78, "word": " 5", "probability": 0.990234375}, {"start": 3182.78, "end": 3183.24, "word": ",000.", "probability": 0.99853515625}], "temperature": 1.0}, {"id": 117, "seek": 321360, "start": 3183.96, "end": 3213.6, "text": " 600 females students. Because 40% of females in level 1. And I know that the total number of females is 14,000. So number of females in the first level is 5600. How many digits we have? Four digits. The first one, 001, all the way up to 560. If you go back,", "tokens": [11849, 21529, 1731, 13, 1436, 3356, 4, 295, 21529, 294, 1496, 502, 13, 400, 286, 458, 300, 264, 3217, 1230, 295, 21529, 307, 3499, 11, 1360, 13, 407, 1230, 295, 21529, 294, 264, 700, 1496, 307, 1025, 15707, 13, 1012, 867, 27011, 321, 362, 30, 7451, 27011, 13, 440, 700, 472, 11, 7143, 16, 11, 439, 264, 636, 493, 281, 1025, 4550, 13, 759, 291, 352, 646, 11], "avg_logprob": -0.20142662611560544, "compression_ratio": 1.5, "no_speech_prob": 0.0, "words": [{"start": 3183.96, "end": 3184.56, "word": " 600", "probability": 0.367431640625}, {"start": 3184.56, "end": 3185.4, "word": " females", "probability": 0.67626953125}, {"start": 3185.4, "end": 3186.4, "word": " students.", "probability": 0.74609375}, {"start": 3187.72, "end": 3188.42, "word": " Because", "probability": 0.77978515625}, {"start": 3188.42, "end": 3189.24, "word": " 40", "probability": 0.9267578125}, {"start": 3189.24, "end": 3189.8, "word": "%", "probability": 0.837890625}, {"start": 3189.8, "end": 3190.3, "word": " of", "probability": 0.9404296875}, {"start": 3190.3, "end": 3190.82, "word": " females", "probability": 0.92822265625}, {"start": 3190.82, "end": 3191.18, "word": " in", "probability": 0.6005859375}, {"start": 3191.18, "end": 3191.4, "word": " level", "probability": 0.79736328125}, {"start": 3191.4, "end": 3191.68, "word": " 1.", "probability": 0.5419921875}, {"start": 3192.76, "end": 3193.0, "word": " And", "probability": 0.8544921875}, {"start": 3193.0, "end": 3193.16, "word": " I", "probability": 0.92138671875}, {"start": 3193.16, "end": 3193.3, "word": " know", "probability": 0.8916015625}, {"start": 3193.3, "end": 3193.48, "word": " that", "probability": 0.8681640625}, {"start": 3193.48, "end": 3193.64, "word": " the", "probability": 0.90478515625}, {"start": 3193.64, "end": 3193.98, "word": " total", "probability": 0.84912109375}, {"start": 3193.98, "end": 3194.46, "word": " number", "probability": 0.9306640625}, {"start": 3194.46, "end": 3194.66, "word": " of", "probability": 0.966796875}, {"start": 3194.66, "end": 3194.98, "word": " females", "probability": 0.94970703125}, {"start": 3194.98, "end": 3195.2, "word": " is", "probability": 0.77978515625}, {"start": 3195.2, "end": 3195.56, "word": " 14", "probability": 0.93212890625}, {"start": 3195.56, "end": 3195.96, "word": ",000.", "probability": 0.840087890625}, {"start": 3196.64, "end": 3197.34, "word": " So", "probability": 0.94873046875}, {"start": 3197.34, "end": 3197.78, "word": " number", "probability": 0.68896484375}, {"start": 3197.78, "end": 3198.0, "word": " of", "probability": 0.96337890625}, {"start": 3198.0, "end": 3198.32, "word": " females", "probability": 0.94091796875}, {"start": 3198.32, "end": 3198.5, "word": " in", "probability": 0.82275390625}, {"start": 3198.5, "end": 3198.62, "word": " the", "probability": 0.90185546875}, {"start": 3198.62, "end": 3198.88, "word": " first", "probability": 0.88623046875}, {"start": 3198.88, "end": 3199.32, "word": " level", "probability": 0.95166015625}, {"start": 3199.32, "end": 3200.24, "word": " is", "probability": 0.935546875}, {"start": 3200.24, "end": 3201.68, "word": " 5600.", "probability": 0.723388671875}, {"start": 3202.66, "end": 3203.06, "word": " How", "probability": 0.9541015625}, {"start": 3203.06, "end": 3203.42, "word": " many", "probability": 0.89892578125}, {"start": 3203.42, "end": 3203.76, "word": " digits", "probability": 0.9169921875}, {"start": 3203.76, "end": 3203.96, "word": " we", "probability": 0.74755859375}, {"start": 3203.96, "end": 3204.28, "word": " have?", "probability": 0.9482421875}, {"start": 3204.48, "end": 3204.94, "word": " Four", "probability": 0.80517578125}, {"start": 3204.94, "end": 3205.44, "word": " digits.", "probability": 0.93896484375}, {"start": 3206.44, "end": 3206.68, "word": " The", "probability": 0.8603515625}, {"start": 3206.68, "end": 3206.98, "word": " first", "probability": 0.88671875}, {"start": 3206.98, "end": 3207.18, "word": " one,", "probability": 0.904296875}, {"start": 3207.32, "end": 3208.04, "word": " 001,", "probability": 0.92626953125}, {"start": 3208.16, "end": 3208.44, "word": " all", "probability": 0.9375}, {"start": 3208.44, "end": 3208.6, "word": " the", "probability": 0.91748046875}, {"start": 3208.6, "end": 3208.86, "word": " way", "probability": 0.95166015625}, {"start": 3208.86, "end": 3209.3, "word": " up", "probability": 0.92578125}, {"start": 3209.3, "end": 3210.16, "word": " to", "probability": 0.96875}, {"start": 3210.16, "end": 3211.68, "word": " 560.", "probability": 0.89453125}, {"start": 3212.32, "end": 3213.02, "word": " If", "probability": 0.75732421875}, {"start": 3213.02, "end": 3213.12, "word": " you", "probability": 0.9599609375}, {"start": 3213.12, "end": 3213.3, "word": " go", "probability": 0.97021484375}, {"start": 3213.3, "end": 3213.6, "word": " back,", "probability": 0.8583984375}], "temperature": 1.0}, {"id": 118, "seek": 323864, "start": 3213.96, "end": 3238.64, "text": " into a random table, take five, four digits. So the first number is 1922. Next is 3950. And so on. So that's the way how can we choose stratified samples.", "tokens": [666, 257, 4974, 3199, 11, 747, 1732, 11, 1451, 27011, 13, 407, 264, 700, 1230, 307, 1294, 7490, 13, 3087, 307, 15238, 2803, 13, 400, 370, 322, 13, 407, 300, 311, 264, 636, 577, 393, 321, 2826, 23674, 2587, 10938, 13], "avg_logprob": -0.2511160792339416, "compression_ratio": 1.2015503875968991, "no_speech_prob": 0.0, "words": [{"start": 3213.96, "end": 3214.32, "word": " into", "probability": 0.1279296875}, {"start": 3214.32, "end": 3214.46, "word": " a", "probability": 0.6748046875}, {"start": 3214.46, "end": 3214.7, "word": " random", "probability": 0.853515625}, {"start": 3214.7, "end": 3215.18, "word": " table,", "probability": 0.8349609375}, {"start": 3215.88, "end": 3217.24, "word": " take", "probability": 0.75537109375}, {"start": 3217.24, "end": 3217.56, "word": " five,", "probability": 0.401611328125}, {"start": 3217.78, "end": 3217.92, "word": " four", "probability": 0.9326171875}, {"start": 3217.92, "end": 3218.38, "word": " digits.", "probability": 0.96484375}, {"start": 3218.92, "end": 3219.06, "word": " So", "probability": 0.9169921875}, {"start": 3219.06, "end": 3219.22, "word": " the", "probability": 0.783203125}, {"start": 3219.22, "end": 3219.52, "word": " first", "probability": 0.8798828125}, {"start": 3219.52, "end": 3219.9, "word": " number", "probability": 0.93603515625}, {"start": 3219.9, "end": 3220.34, "word": " is", "probability": 0.943359375}, {"start": 3220.34, "end": 3223.34, "word": " 1922.", "probability": 0.79833984375}, {"start": 3223.98, "end": 3224.7, "word": " Next", "probability": 0.91796875}, {"start": 3224.7, "end": 3225.26, "word": " is", "probability": 0.94140625}, {"start": 3225.26, "end": 3228.0, "word": " 3950.", "probability": 0.8935546875}, {"start": 3230.14, "end": 3230.48, "word": " And", "probability": 0.74169921875}, {"start": 3230.48, "end": 3230.66, "word": " so", "probability": 0.951171875}, {"start": 3230.66, "end": 3230.94, "word": " on.", "probability": 0.8642578125}, {"start": 3232.76, "end": 3233.26, "word": " So", "probability": 0.94091796875}, {"start": 3233.26, "end": 3233.56, "word": " that's", "probability": 0.951171875}, {"start": 3233.56, "end": 3233.68, "word": " the", "probability": 0.8876953125}, {"start": 3233.68, "end": 3233.8, "word": " way", "probability": 0.94677734375}, {"start": 3233.8, "end": 3233.94, "word": " how", "probability": 0.6904296875}, {"start": 3233.94, "end": 3234.1, "word": " can", "probability": 0.88037109375}, {"start": 3234.1, "end": 3234.24, "word": " we", "probability": 0.91650390625}, {"start": 3234.24, "end": 3234.76, "word": " choose", "probability": 0.8759765625}, {"start": 3234.76, "end": 3237.46, "word": " stratified", "probability": 0.962158203125}, {"start": 3237.46, "end": 3238.64, "word": " samples.", "probability": 0.83544921875}], "temperature": 1.0}, {"id": 119, "seek": 327172, "start": 3242.36, "end": 3271.72, "text": " Next, the last one is called clusters. And let's see now what's the difference between stratified and cluster. Step one. Population is divided into some clusters.", "tokens": [3087, 11, 264, 1036, 472, 307, 1219, 23313, 13, 400, 718, 311, 536, 586, 437, 311, 264, 2649, 1296, 23674, 2587, 293, 13630, 13, 5470, 472, 13, 10215, 2776, 307, 6666, 666, 512, 23313, 13], "avg_logprob": -0.17578124586078855, "compression_ratio": 1.3471074380165289, "no_speech_prob": 0.0, "words": [{"start": 3242.36, "end": 3243.02, "word": " Next,", "probability": 0.78369140625}, {"start": 3243.54, "end": 3243.74, "word": " the", "probability": 0.908203125}, {"start": 3243.74, "end": 3243.94, "word": " last", "probability": 0.876953125}, {"start": 3243.94, "end": 3244.26, "word": " one", "probability": 0.93017578125}, {"start": 3244.26, "end": 3244.6, "word": " is", "probability": 0.80224609375}, {"start": 3244.6, "end": 3244.88, "word": " called", "probability": 0.9033203125}, {"start": 3244.88, "end": 3245.56, "word": " clusters.", "probability": 0.52294921875}, {"start": 3247.1, "end": 3247.98, "word": " And", "probability": 0.85595703125}, {"start": 3247.98, "end": 3248.24, "word": " let's", "probability": 0.931396484375}, {"start": 3248.24, "end": 3248.42, "word": " see", "probability": 0.927734375}, {"start": 3248.42, "end": 3248.7, "word": " now", "probability": 0.90478515625}, {"start": 3248.7, "end": 3249.4, "word": " what's", "probability": 0.79150390625}, {"start": 3249.4, "end": 3249.56, "word": " the", "probability": 0.9248046875}, {"start": 3249.56, "end": 3250.0, "word": " difference", "probability": 0.85888671875}, {"start": 3250.0, "end": 3250.44, "word": " between", "probability": 0.87841796875}, {"start": 3250.44, "end": 3251.4, "word": " stratified", "probability": 0.876708984375}, {"start": 3251.4, "end": 3252.74, "word": " and", "probability": 0.9404296875}, {"start": 3252.74, "end": 3253.12, "word": " cluster.", "probability": 0.51025390625}, {"start": 3253.78, "end": 3254.1, "word": " Step", "probability": 0.955078125}, {"start": 3254.1, "end": 3256.5, "word": " one.", "probability": 0.6083984375}, {"start": 3265.3, "end": 3266.18, "word": " Population", "probability": 0.947021484375}, {"start": 3266.18, "end": 3268.1, "word": " is", "probability": 0.92724609375}, {"start": 3268.1, "end": 3268.54, "word": " divided", "probability": 0.845703125}, {"start": 3268.54, "end": 3271.0, "word": " into", "probability": 0.83544921875}, {"start": 3271.0, "end": 3271.26, "word": " some", "probability": 0.7822265625}, {"start": 3271.26, "end": 3271.72, "word": " clusters.", "probability": 0.9287109375}], "temperature": 1.0}, {"id": 120, "seek": 329808, "start": 3275.0, "end": 3298.08, "text": " Step two, assemble one by assembling clusters selective. Here suppose how many clusters? 16 clusters. So there are, so the population has", "tokens": [5470, 732, 11, 22364, 472, 538, 43867, 23313, 33930, 13, 634, 265, 7297, 577, 867, 23313, 30, 3165, 23313, 13, 407, 456, 366, 11, 370, 264, 4415, 575], "avg_logprob": -0.4919181116696062, "compression_ratio": 1.3018867924528301, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 3275.0, "end": 3275.42, "word": " Step", "probability": 0.58056640625}, {"start": 3275.42, "end": 3276.12, "word": " two,", "probability": 0.388427734375}, {"start": 3277.3, "end": 3278.14, "word": " assemble", "probability": 0.7646484375}, {"start": 3278.14, "end": 3278.94, "word": " one", "probability": 0.14990234375}, {"start": 3278.94, "end": 3279.2, "word": " by", "probability": 0.86474609375}, {"start": 3279.2, "end": 3280.18, "word": " assembling", "probability": 0.7939453125}, {"start": 3280.18, "end": 3281.16, "word": " clusters", "probability": 0.362060546875}, {"start": 3281.16, "end": 3282.74, "word": " selective.", "probability": 0.50537109375}, {"start": 3286.1, "end": 3286.52, "word": " Here", "probability": 0.4674072265625}, {"start": 3286.52, "end": 3287.26, "word": " suppose", "probability": 0.42138671875}, {"start": 3287.26, "end": 3287.96, "word": " how", "probability": 0.435302734375}, {"start": 3287.96, "end": 3288.18, "word": " many", "probability": 0.9013671875}, {"start": 3288.18, "end": 3288.64, "word": " clusters?", "probability": 0.9404296875}, {"start": 3293.56, "end": 3294.6, "word": " 16", "probability": 0.474365234375}, {"start": 3294.6, "end": 3295.06, "word": " clusters.", "probability": 0.97119140625}, {"start": 3295.7, "end": 3295.96, "word": " So", "probability": 0.8994140625}, {"start": 3295.96, "end": 3296.16, "word": " there", "probability": 0.7080078125}, {"start": 3296.16, "end": 3296.52, "word": " are,", "probability": 0.94580078125}, {"start": 3296.8, "end": 3297.02, "word": " so", "probability": 0.84619140625}, {"start": 3297.02, "end": 3297.14, "word": " the", "probability": 0.89697265625}, {"start": 3297.14, "end": 3297.56, "word": " population", "probability": 0.89501953125}, {"start": 3297.56, "end": 3298.08, "word": " has", "probability": 0.75048828125}], "temperature": 1.0}, {"id": 121, "seek": 332349, "start": 3319.31, "end": 3323.49, "text": " Step two, you have to choose", "tokens": [5470, 732, 11, 291, 362, 281, 2826], "avg_logprob": -0.4877929463982582, "compression_ratio": 0.7837837837837838, "no_speech_prob": 0.0, "words": [{"start": 3319.3099999999995, "end": 3320.0699999999997, "word": " Step", "probability": 0.51123046875}, {"start": 3320.0699999999997, "end": 3320.83, "word": " two,", "probability": 0.387451171875}, {"start": 3322.65, "end": 3322.73, "word": " you", "probability": 0.92724609375}, {"start": 3322.73, "end": 3322.93, "word": " have", "probability": 0.93701171875}, {"start": 3322.93, "end": 3323.11, "word": " to", "probability": 0.880859375}, {"start": 3323.11, "end": 3323.49, "word": " choose", "probability": 0.8603515625}], "temperature": 1.0}, {"id": 122, "seek": 335440, "start": 3324.66, "end": 3354.4, "text": " a simple random number of clusters out of 16. Suppose I decided to choose three among these. So we have 16 clusters. For example, I chose cluster number 411. So I choose these clusters.", "tokens": [257, 2199, 4974, 1230, 295, 23313, 484, 295, 3165, 13, 21360, 286, 3047, 281, 2826, 1045, 3654, 613, 13, 407, 321, 362, 3165, 23313, 13, 1171, 1365, 11, 286, 5111, 13630, 1230, 1017, 5348, 13, 407, 286, 2826, 613, 23313, 13], "avg_logprob": -0.23288690085921968, "compression_ratio": 1.441860465116279, "no_speech_prob": 0.0, "words": [{"start": 3324.66, "end": 3324.94, "word": " a", "probability": 0.370361328125}, {"start": 3324.94, "end": 3325.32, "word": " simple", "probability": 0.853515625}, {"start": 3325.32, "end": 3325.82, "word": " random", "probability": 0.615234375}, {"start": 3325.82, "end": 3326.8, "word": " number", "probability": 0.9345703125}, {"start": 3326.8, "end": 3327.02, "word": " of", "probability": 0.94921875}, {"start": 3327.02, "end": 3327.44, "word": " clusters", "probability": 0.90966796875}, {"start": 3327.44, "end": 3327.84, "word": " out", "probability": 0.86767578125}, {"start": 3327.84, "end": 3328.02, "word": " of", "probability": 0.9736328125}, {"start": 3328.02, "end": 3328.54, "word": " 16.", "probability": 0.68603515625}, {"start": 3329.34, "end": 3329.86, "word": " Suppose", "probability": 0.7578125}, {"start": 3329.86, "end": 3330.32, "word": " I", "probability": 0.92236328125}, {"start": 3330.32, "end": 3330.98, "word": " decided", "probability": 0.85400390625}, {"start": 3330.98, "end": 3331.44, "word": " to", "probability": 0.9658203125}, {"start": 3331.44, "end": 3331.96, "word": " choose", "probability": 0.88232421875}, {"start": 3331.96, "end": 3332.64, "word": " three", "probability": 0.493896484375}, {"start": 3332.64, "end": 3333.06, "word": " among", "probability": 0.9111328125}, {"start": 3333.06, "end": 3333.6, "word": " these.", "probability": 0.79052734375}, {"start": 3335.08, "end": 3335.5, "word": " So", "probability": 0.86572265625}, {"start": 3335.5, "end": 3336.64, "word": " we", "probability": 0.50634765625}, {"start": 3336.64, "end": 3336.9, "word": " have", "probability": 0.94921875}, {"start": 3336.9, "end": 3337.66, "word": " 16", "probability": 0.826171875}, {"start": 3337.66, "end": 3338.3, "word": " clusters.", "probability": 0.93701171875}, {"start": 3345.34, "end": 3346.28, "word": " For", "probability": 0.94580078125}, {"start": 3346.28, "end": 3346.62, "word": " example,", "probability": 0.97705078125}, {"start": 3346.7, "end": 3346.86, "word": " I", "probability": 0.9921875}, {"start": 3346.86, "end": 3347.18, "word": " chose", "probability": 0.52001953125}, {"start": 3347.18, "end": 3347.6, "word": " cluster", "probability": 0.92578125}, {"start": 3347.6, "end": 3347.96, "word": " number", "probability": 0.9140625}, {"start": 3347.96, "end": 3349.78, "word": " 411.", "probability": 0.4737548828125}, {"start": 3351.64, "end": 3352.02, "word": " So", "probability": 0.86669921875}, {"start": 3352.02, "end": 3352.14, "word": " I", "probability": 0.96044921875}, {"start": 3352.14, "end": 3352.62, "word": " choose", "probability": 0.64697265625}, {"start": 3352.62, "end": 3353.92, "word": " these", "probability": 0.65185546875}, {"start": 3353.92, "end": 3354.4, "word": " clusters.", "probability": 0.93212890625}], "temperature": 1.0}, {"id": 123, "seek": 338411, "start": 3355.95, "end": 3384.11, "text": " Next, all items in the selected clusters can be used. Or items can be chosen from a cluster using another probability sampling technique. For example, imagine that", "tokens": [3087, 11, 439, 4754, 294, 264, 8209, 23313, 393, 312, 1143, 13, 1610, 4754, 393, 312, 8614, 490, 257, 13630, 1228, 1071, 8482, 21179, 6532, 13, 1171, 1365, 11, 3811, 300], "avg_logprob": -0.2546386672183871, "compression_ratio": 1.3442622950819672, "no_speech_prob": 0.0, "words": [{"start": 3355.95, "end": 3356.87, "word": " Next,", "probability": 0.654296875}, {"start": 3358.29, "end": 3358.47, "word": " all", "probability": 0.8466796875}, {"start": 3358.47, "end": 3359.05, "word": " items", "probability": 0.81787109375}, {"start": 3359.05, "end": 3360.89, "word": " in", "probability": 0.86572265625}, {"start": 3360.89, "end": 3361.03, "word": " the", "probability": 0.87890625}, {"start": 3361.03, "end": 3361.49, "word": " selected", "probability": 0.888671875}, {"start": 3361.49, "end": 3362.01, "word": " clusters", "probability": 0.84375}, {"start": 3362.01, "end": 3362.37, "word": " can", "probability": 0.9404296875}, {"start": 3362.37, "end": 3362.51, "word": " be", "probability": 0.9599609375}, {"start": 3362.51, "end": 3362.91, "word": " used.", "probability": 0.9296875}, {"start": 3369.13, "end": 3369.17, "word": " Or", "probability": 0.341064453125}, {"start": 3369.17, "end": 3375.77, "word": " items", "probability": 0.521484375}, {"start": 3375.77, "end": 3376.09, "word": " can", "probability": 0.94384765625}, {"start": 3376.09, "end": 3376.27, "word": " be", "probability": 0.95458984375}, {"start": 3376.27, "end": 3376.75, "word": " chosen", "probability": 0.9580078125}, {"start": 3376.75, "end": 3377.23, "word": " from", "probability": 0.86572265625}, {"start": 3377.23, "end": 3377.45, "word": " a", "probability": 0.9072265625}, {"start": 3377.45, "end": 3377.77, "word": " cluster", "probability": 0.89306640625}, {"start": 3377.77, "end": 3378.37, "word": " using", "probability": 0.9130859375}, {"start": 3378.37, "end": 3378.91, "word": " another", "probability": 0.939453125}, {"start": 3378.91, "end": 3379.49, "word": " probability", "probability": 0.93212890625}, {"start": 3379.49, "end": 3379.89, "word": " sampling", "probability": 0.4443359375}, {"start": 3379.89, "end": 3380.41, "word": " technique.", "probability": 0.935546875}, {"start": 3380.57, "end": 3380.71, "word": " For", "probability": 0.88232421875}, {"start": 3380.71, "end": 3381.13, "word": " example,", "probability": 0.97216796875}, {"start": 3383.19, "end": 3383.69, "word": " imagine", "probability": 0.87890625}, {"start": 3383.69, "end": 3384.11, "word": " that", "probability": 0.86279296875}], "temperature": 1.0}, {"id": 124, "seek": 341054, "start": 3384.9, "end": 3410.54, "text": " We are talking about students who registered for accounting. Imagine that we have six sections for accounting.", "tokens": [492, 366, 1417, 466, 1731, 567, 13968, 337, 19163, 13, 11739, 300, 321, 362, 2309, 10863, 337, 19163, 13], "avg_logprob": -0.17871093526482582, "compression_ratio": 1.247191011235955, "no_speech_prob": 0.0, "words": [{"start": 3384.9, "end": 3385.2, "word": " We", "probability": 0.4453125}, {"start": 3385.2, "end": 3385.4, "word": " are", "probability": 0.9052734375}, {"start": 3385.4, "end": 3385.78, "word": " talking", "probability": 0.84619140625}, {"start": 3385.78, "end": 3386.36, "word": " about", "probability": 0.91455078125}, {"start": 3386.36, "end": 3387.28, "word": " students", "probability": 0.921875}, {"start": 3387.28, "end": 3388.84, "word": " who", "probability": 0.841796875}, {"start": 3388.84, "end": 3389.38, "word": " registered", "probability": 0.71142578125}, {"start": 3389.38, "end": 3390.32, "word": " for", "probability": 0.94921875}, {"start": 3390.32, "end": 3391.46, "word": " accounting.", "probability": 0.91796875}, {"start": 3405.88, "end": 3406.96, "word": " Imagine", "probability": 0.8115234375}, {"start": 3406.96, "end": 3407.22, "word": " that", "probability": 0.93408203125}, {"start": 3407.22, "end": 3407.4, "word": " we", "probability": 0.94580078125}, {"start": 3407.4, "end": 3407.74, "word": " have", "probability": 0.93798828125}, {"start": 3407.74, "end": 3409.3, "word": " six", "probability": 0.93359375}, {"start": 3409.3, "end": 3409.82, "word": " sections", "probability": 0.92724609375}, {"start": 3409.82, "end": 3410.12, "word": " for", "probability": 0.947265625}, {"start": 3410.12, "end": 3410.54, "word": " accounting.", "probability": 0.93603515625}], "temperature": 1.0}, {"id": 125, "seek": 344081, "start": 3415.85, "end": 3440.81, "text": " six sections. And I just choose two of these, cluster number one or section number one and the last one. So my chosen clusters are number one and six, one and six. Or you can use the one we just talked about, stratified random sample.", "tokens": [2309, 10863, 13, 400, 286, 445, 2826, 732, 295, 613, 11, 13630, 1230, 472, 420, 3541, 1230, 472, 293, 264, 1036, 472, 13, 407, 452, 8614, 23313, 366, 1230, 472, 293, 2309, 11, 472, 293, 2309, 13, 1610, 291, 393, 764, 264, 472, 321, 445, 2825, 466, 11, 23674, 2587, 4974, 6889, 13], "avg_logprob": -0.21310764606352206, "compression_ratio": 1.587837837837838, "no_speech_prob": 0.0, "words": [{"start": 3415.85, "end": 3416.23, "word": " six", "probability": 0.1959228515625}, {"start": 3416.23, "end": 3416.65, "word": " sections.", "probability": 0.904296875}, {"start": 3420.31, "end": 3421.01, "word": " And", "probability": 0.86962890625}, {"start": 3421.01, "end": 3421.15, "word": " I", "probability": 0.9521484375}, {"start": 3421.15, "end": 3421.41, "word": " just", "probability": 0.90234375}, {"start": 3421.41, "end": 3421.97, "word": " choose", "probability": 0.4521484375}, {"start": 3421.97, "end": 3422.65, "word": " two", "probability": 0.89794921875}, {"start": 3422.65, "end": 3422.87, "word": " of", "probability": 0.83447265625}, {"start": 3422.87, "end": 3423.13, "word": " these,", "probability": 0.82958984375}, {"start": 3424.07, "end": 3424.43, "word": " cluster", "probability": 0.82080078125}, {"start": 3424.43, "end": 3424.85, "word": " number", "probability": 0.91064453125}, {"start": 3424.85, "end": 3425.21, "word": " one", "probability": 0.8662109375}, {"start": 3425.21, "end": 3425.93, "word": " or", "probability": 0.583984375}, {"start": 3425.93, "end": 3426.25, "word": " section", "probability": 0.90283203125}, {"start": 3426.25, "end": 3426.51, "word": " number", "probability": 0.927734375}, {"start": 3426.51, "end": 3426.87, "word": " one", "probability": 0.92333984375}, {"start": 3426.87, "end": 3427.17, "word": " and", "probability": 0.6552734375}, {"start": 3427.17, "end": 3427.27, "word": " the", "probability": 0.90673828125}, {"start": 3427.27, "end": 3427.47, "word": " last", "probability": 0.880859375}, {"start": 3427.47, "end": 3427.67, "word": " one.", "probability": 0.9208984375}, {"start": 3428.49, "end": 3428.71, "word": " So", "probability": 0.9423828125}, {"start": 3428.71, "end": 3428.91, "word": " my", "probability": 0.84326171875}, {"start": 3428.91, "end": 3429.41, "word": " chosen", "probability": 0.95703125}, {"start": 3429.41, "end": 3430.31, "word": " clusters", "probability": 0.921875}, {"start": 3430.31, "end": 3430.67, "word": " are", "probability": 0.93603515625}, {"start": 3430.67, "end": 3430.93, "word": " number", "probability": 0.9130859375}, {"start": 3430.93, "end": 3431.17, "word": " one", "probability": 0.87158203125}, {"start": 3431.17, "end": 3431.31, "word": " and", "probability": 0.9287109375}, {"start": 3431.31, "end": 3431.63, "word": " six,", "probability": 0.94580078125}, {"start": 3432.19, "end": 3432.37, "word": " one", "probability": 0.90185546875}, {"start": 3432.37, "end": 3432.59, "word": " and", "probability": 0.91650390625}, {"start": 3432.59, "end": 3432.93, "word": " six.", "probability": 0.95263671875}, {"start": 3433.35, "end": 3433.67, "word": " Or", "probability": 0.95947265625}, {"start": 3433.67, "end": 3435.15, "word": " you", "probability": 0.8505859375}, {"start": 3435.15, "end": 3435.43, "word": " can", "probability": 0.9462890625}, {"start": 3435.43, "end": 3435.87, "word": " use", "probability": 0.8828125}, {"start": 3435.87, "end": 3437.67, "word": " the", "probability": 0.88134765625}, {"start": 3437.67, "end": 3437.89, "word": " one", "probability": 0.9287109375}, {"start": 3437.89, "end": 3438.09, "word": " we", "probability": 0.95654296875}, {"start": 3438.09, "end": 3438.39, "word": " just", "probability": 0.890625}, {"start": 3438.39, "end": 3438.69, "word": " talked", "probability": 0.62890625}, {"start": 3438.69, "end": 3439.09, "word": " about,", "probability": 0.89990234375}, {"start": 3439.59, "end": 3440.07, "word": " stratified", "probability": 0.9208984375}, {"start": 3440.07, "end": 3440.37, "word": " random", "probability": 0.8955078125}, {"start": 3440.37, "end": 3440.81, "word": " sample.", "probability": 0.8447265625}], "temperature": 1.0}, {"id": 126, "seek": 346672, "start": 3441.4, "end": 3466.72, "text": " instead of using all for example suppose there are in this section there are 73 models and the other one there are 80 models and the sample size here I am going to use case 20", "tokens": [2602, 295, 1228, 439, 337, 1365, 7297, 456, 366, 294, 341, 3541, 456, 366, 28387, 5245, 293, 264, 661, 472, 456, 366, 4688, 5245, 293, 264, 6889, 2744, 510, 286, 669, 516, 281, 764, 1389, 945], "avg_logprob": -0.37774491954494166, "compression_ratio": 1.4915254237288136, "no_speech_prob": 0.0, "words": [{"start": 3441.4, "end": 3441.84, "word": " instead", "probability": 0.449462890625}, {"start": 3441.84, "end": 3441.98, "word": " of", "probability": 0.95947265625}, {"start": 3441.98, "end": 3442.28, "word": " using", "probability": 0.9267578125}, {"start": 3442.28, "end": 3442.66, "word": " all", "probability": 0.76123046875}, {"start": 3442.66, "end": 3443.34, "word": " for", "probability": 0.423828125}, {"start": 3443.34, "end": 3443.66, "word": " example", "probability": 0.97216796875}, {"start": 3443.66, "end": 3444.22, "word": " suppose", "probability": 0.6650390625}, {"start": 3444.22, "end": 3444.56, "word": " there", "probability": 0.83447265625}, {"start": 3444.56, "end": 3444.92, "word": " are", "probability": 0.93505859375}, {"start": 3444.92, "end": 3446.0, "word": " in", "probability": 0.6923828125}, {"start": 3446.0, "end": 3446.2, "word": " this", "probability": 0.9521484375}, {"start": 3446.2, "end": 3446.74, "word": " section", "probability": 0.896484375}, {"start": 3446.74, "end": 3449.14, "word": " there", "probability": 0.45654296875}, {"start": 3449.14, "end": 3449.48, "word": " are", "probability": 0.94091796875}, {"start": 3449.48, "end": 3450.42, "word": " 73", "probability": 0.1644287109375}, {"start": 3450.42, "end": 3451.34, "word": " models", "probability": 0.343505859375}, {"start": 3451.34, "end": 3453.0, "word": " and", "probability": 0.4150390625}, {"start": 3453.0, "end": 3453.18, "word": " the", "probability": 0.421630859375}, {"start": 3453.18, "end": 3453.54, "word": " other", "probability": 0.8779296875}, {"start": 3453.54, "end": 3454.0, "word": " one", "probability": 0.8896484375}, {"start": 3454.0, "end": 3454.7, "word": " there", "probability": 0.7958984375}, {"start": 3454.7, "end": 3455.32, "word": " are", "probability": 0.95166015625}, {"start": 3455.32, "end": 3456.18, "word": " 80", "probability": 0.52197265625}, {"start": 3456.18, "end": 3457.08, "word": " models", "probability": 0.34521484375}, {"start": 3457.08, "end": 3462.3, "word": " and", "probability": 0.81689453125}, {"start": 3462.3, "end": 3462.56, "word": " the", "probability": 0.56591796875}, {"start": 3462.56, "end": 3462.82, "word": " sample", "probability": 0.88037109375}, {"start": 3462.82, "end": 3463.52, "word": " size", "probability": 0.85302734375}, {"start": 3463.52, "end": 3464.34, "word": " here", "probability": 0.814453125}, {"start": 3464.34, "end": 3464.64, "word": " I", "probability": 0.77197265625}, {"start": 3464.64, "end": 3464.8, "word": " am", "probability": 0.6162109375}, {"start": 3464.8, "end": 3465.16, "word": " going", "probability": 0.94482421875}, {"start": 3465.16, "end": 3465.4, "word": " to", "probability": 0.96875}, {"start": 3465.4, "end": 3465.86, "word": " use", "probability": 0.8857421875}, {"start": 3465.86, "end": 3466.28, "word": " case", "probability": 0.48779296875}, {"start": 3466.28, "end": 3466.72, "word": " 20", "probability": 0.8955078125}], "temperature": 1.0}, {"id": 127, "seek": 349900, "start": 3470.9, "end": 3499.0, "text": " So you can use 10 here and 10 in the other one, or it depends on the proportions. Now, 70 represents 70 out of 150, because there are 150 students in these two clusters. Now, the entire population is not the number for each of all of these clusters, just number one sixth.", "tokens": [407, 291, 393, 764, 1266, 510, 293, 1266, 294, 264, 661, 472, 11, 420, 309, 5946, 322, 264, 32482, 13, 823, 11, 5285, 8855, 5285, 484, 295, 8451, 11, 570, 456, 366, 8451, 1731, 294, 613, 732, 23313, 13, 823, 11, 264, 2302, 4415, 307, 406, 264, 1230, 337, 1184, 295, 439, 295, 613, 23313, 11, 445, 1230, 472, 15102, 13], "avg_logprob": -0.17389113431976688, "compression_ratio": 1.56, "no_speech_prob": 0.0, "words": [{"start": 3470.9, "end": 3471.22, "word": " So", "probability": 0.8583984375}, {"start": 3471.22, "end": 3471.44, "word": " you", "probability": 0.8349609375}, {"start": 3471.44, "end": 3471.7, "word": " can", "probability": 0.94482421875}, {"start": 3471.7, "end": 3472.16, "word": " use", "probability": 0.8876953125}, {"start": 3472.16, "end": 3472.74, "word": " 10", "probability": 0.8212890625}, {"start": 3472.74, "end": 3473.8, "word": " here", "probability": 0.8359375}, {"start": 3473.8, "end": 3474.12, "word": " and", "probability": 0.8125}, {"start": 3474.12, "end": 3474.36, "word": " 10", "probability": 0.9560546875}, {"start": 3474.36, "end": 3474.58, "word": " in", "probability": 0.60546875}, {"start": 3474.58, "end": 3474.66, "word": " the", "probability": 0.89208984375}, {"start": 3474.66, "end": 3474.84, "word": " other", "probability": 0.88720703125}, {"start": 3474.84, "end": 3475.18, "word": " one,", "probability": 0.92431640625}, {"start": 3476.14, "end": 3476.52, "word": " or", "probability": 0.92919921875}, {"start": 3476.52, "end": 3476.8, "word": " it", "probability": 0.93017578125}, {"start": 3476.8, "end": 3477.22, "word": " depends", "probability": 0.91796875}, {"start": 3477.22, "end": 3477.64, "word": " on", "probability": 0.94921875}, {"start": 3477.64, "end": 3478.74, "word": " the", "probability": 0.9150390625}, {"start": 3478.74, "end": 3479.26, "word": " proportions.", "probability": 0.367919921875}, {"start": 3480.28, "end": 3480.72, "word": " Now,", "probability": 0.953125}, {"start": 3481.06, "end": 3481.68, "word": " 70", "probability": 0.97216796875}, {"start": 3481.68, "end": 3483.06, "word": " represents", "probability": 0.86572265625}, {"start": 3483.06, "end": 3483.68, "word": " 70", "probability": 0.97021484375}, {"start": 3483.68, "end": 3484.02, "word": " out", "probability": 0.8681640625}, {"start": 3484.02, "end": 3484.7, "word": " of", "probability": 0.96923828125}, {"start": 3484.7, "end": 3486.66, "word": " 150,", "probability": 0.8583984375}, {"start": 3487.38, "end": 3487.76, "word": " because", "probability": 0.8271484375}, {"start": 3487.76, "end": 3487.92, "word": " there", "probability": 0.90478515625}, {"start": 3487.92, "end": 3488.1, "word": " are", "probability": 0.9453125}, {"start": 3488.1, "end": 3488.66, "word": " 150", "probability": 0.8994140625}, {"start": 3488.66, "end": 3489.22, "word": " students", "probability": 0.97802734375}, {"start": 3489.22, "end": 3489.58, "word": " in", "probability": 0.94140625}, {"start": 3489.58, "end": 3489.76, "word": " these", "probability": 0.84521484375}, {"start": 3489.76, "end": 3489.94, "word": " two", "probability": 0.91748046875}, {"start": 3489.94, "end": 3490.32, "word": " clusters.", "probability": 0.92138671875}, {"start": 3491.26, "end": 3491.54, "word": " Now,", "probability": 0.96044921875}, {"start": 3491.84, "end": 3491.98, "word": " the", "probability": 0.9150390625}, {"start": 3491.98, "end": 3492.32, "word": " entire", "probability": 0.84765625}, {"start": 3492.32, "end": 3492.88, "word": " population", "probability": 0.95068359375}, {"start": 3492.88, "end": 3494.06, "word": " is", "probability": 0.939453125}, {"start": 3494.06, "end": 3494.3, "word": " not", "probability": 0.94677734375}, {"start": 3494.3, "end": 3494.48, "word": " the", "probability": 0.90869140625}, {"start": 3494.48, "end": 3494.86, "word": " number", "probability": 0.9365234375}, {"start": 3494.86, "end": 3495.4, "word": " for", "probability": 0.94384765625}, {"start": 3495.4, "end": 3495.78, "word": " each", "probability": 0.93115234375}, {"start": 3495.78, "end": 3496.04, "word": " of", "probability": 0.8974609375}, {"start": 3496.04, "end": 3496.46, "word": " all", "probability": 0.8544921875}, {"start": 3496.46, "end": 3496.6, "word": " of", "probability": 0.90771484375}, {"start": 3496.6, "end": 3496.8, "word": " these", "probability": 0.8486328125}, {"start": 3496.8, "end": 3497.3, "word": " clusters,", "probability": 0.9248046875}, {"start": 3497.56, "end": 3497.98, "word": " just", "probability": 0.888671875}, {"start": 3497.98, "end": 3498.42, "word": " number", "probability": 0.83984375}, {"start": 3498.42, "end": 3498.62, "word": " one", "probability": 0.5908203125}, {"start": 3498.62, "end": 3499.0, "word": " sixth.", "probability": 0.376220703125}], "temperature": 1.0}, {"id": 128, "seek": 352161, "start": 3499.55, "end": 3521.61, "text": " So there are 150 students in these two selected clusters. So the population size is 150. Make sense? Then the proportion here is 700 divided by 150 times 20. The other one, 80 divided by 150 times 20.", "tokens": [407, 456, 366, 8451, 1731, 294, 613, 732, 8209, 23313, 13, 407, 264, 4415, 2744, 307, 8451, 13, 4387, 2020, 30, 1396, 264, 16068, 510, 307, 15204, 6666, 538, 8451, 1413, 945, 13, 440, 661, 472, 11, 4688, 6666, 538, 8451, 1413, 945, 13], "avg_logprob": -0.15763889153798422, "compression_ratio": 1.467153284671533, "no_speech_prob": 0.0, "words": [{"start": 3499.55, "end": 3499.93, "word": " So", "probability": 0.8330078125}, {"start": 3499.93, "end": 3500.21, "word": " there", "probability": 0.779296875}, {"start": 3500.21, "end": 3500.49, "word": " are", "probability": 0.94775390625}, {"start": 3500.49, "end": 3501.53, "word": " 150", "probability": 0.88232421875}, {"start": 3501.53, "end": 3502.31, "word": " students", "probability": 0.97607421875}, {"start": 3502.31, "end": 3502.53, "word": " in", "probability": 0.828125}, {"start": 3502.53, "end": 3502.69, "word": " these", "probability": 0.552734375}, {"start": 3502.69, "end": 3502.87, "word": " two", "probability": 0.7412109375}, {"start": 3502.87, "end": 3503.27, "word": " selected", "probability": 0.85888671875}, {"start": 3503.27, "end": 3503.81, "word": " clusters.", "probability": 0.8505859375}, {"start": 3504.29, "end": 3504.61, "word": " So", "probability": 0.93994140625}, {"start": 3504.61, "end": 3504.73, "word": " the", "probability": 0.8623046875}, {"start": 3504.73, "end": 3505.09, "word": " population", "probability": 0.9365234375}, {"start": 3505.09, "end": 3505.53, "word": " size", "probability": 0.85205078125}, {"start": 3505.53, "end": 3505.65, "word": " is", "probability": 0.9033203125}, {"start": 3505.65, "end": 3506.41, "word": " 150.", "probability": 0.8896484375}, {"start": 3506.91, "end": 3507.39, "word": " Make", "probability": 0.85791015625}, {"start": 3507.39, "end": 3507.71, "word": " sense?", "probability": 0.84033203125}, {"start": 3508.49, "end": 3508.85, "word": " Then", "probability": 0.84130859375}, {"start": 3508.85, "end": 3509.27, "word": " the", "probability": 0.73193359375}, {"start": 3509.27, "end": 3509.75, "word": " proportion", "probability": 0.81689453125}, {"start": 3509.75, "end": 3510.03, "word": " here", "probability": 0.84521484375}, {"start": 3510.03, "end": 3510.23, "word": " is", "probability": 0.943359375}, {"start": 3510.23, "end": 3510.73, "word": " 700", "probability": 0.95703125}, {"start": 3510.73, "end": 3511.45, "word": " divided", "probability": 0.68408203125}, {"start": 3511.45, "end": 3511.65, "word": " by", "probability": 0.97021484375}, {"start": 3511.65, "end": 3512.17, "word": " 150", "probability": 0.92822265625}, {"start": 3512.17, "end": 3512.79, "word": " times", "probability": 0.83935546875}, {"start": 3512.79, "end": 3513.21, "word": " 20.", "probability": 0.966796875}, {"start": 3515.97, "end": 3516.81, "word": " The", "probability": 0.87353515625}, {"start": 3516.81, "end": 3517.05, "word": " other", "probability": 0.87841796875}, {"start": 3517.05, "end": 3517.39, "word": " one,", "probability": 0.9189453125}, {"start": 3519.03, "end": 3519.39, "word": " 80", "probability": 0.9580078125}, {"start": 3519.39, "end": 3519.71, "word": " divided", "probability": 0.81005859375}, {"start": 3519.71, "end": 3519.89, "word": " by", "probability": 0.9443359375}, {"start": 3519.89, "end": 3520.47, "word": " 150", "probability": 0.9345703125}, {"start": 3520.47, "end": 3521.19, "word": " times", "probability": 0.90673828125}, {"start": 3521.19, "end": 3521.61, "word": " 20.", "probability": 0.9560546875}], "temperature": 1.0}, {"id": 129, "seek": 355086, "start": 3531.68, "end": 3550.86, "text": " So again, all items in the selected clusters can be used or items can be chosen from the cluster using another probability technique as we mentioned. Let's see how can we use another example. Let's talk about again AUG students.", "tokens": [407, 797, 11, 439, 4754, 294, 264, 8209, 23313, 393, 312, 1143, 420, 4754, 393, 312, 8614, 490, 264, 13630, 1228, 1071, 8482, 6532, 382, 321, 2835, 13, 961, 311, 536, 577, 393, 321, 764, 1071, 1365, 13, 961, 311, 751, 466, 797, 7171, 38, 1731, 13], "avg_logprob": -0.15364582960804304, "compression_ratio": 1.4679487179487178, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 3531.68, "end": 3531.84, "word": " So", "probability": 0.837890625}, {"start": 3531.84, "end": 3532.14, "word": " again,", "probability": 0.8603515625}, {"start": 3533.54, "end": 3534.12, "word": " all", "probability": 0.92529296875}, {"start": 3534.12, "end": 3534.48, "word": " items", "probability": 0.84228515625}, {"start": 3534.48, "end": 3534.7, "word": " in", "probability": 0.93115234375}, {"start": 3534.7, "end": 3534.82, "word": " the", "probability": 0.8212890625}, {"start": 3534.82, "end": 3535.16, "word": " selected", "probability": 0.7802734375}, {"start": 3535.16, "end": 3535.66, "word": " clusters", "probability": 0.79638671875}, {"start": 3535.66, "end": 3535.96, "word": " can", "probability": 0.94091796875}, {"start": 3535.96, "end": 3536.12, "word": " be", "probability": 0.95458984375}, {"start": 3536.12, "end": 3536.42, "word": " used", "probability": 0.92529296875}, {"start": 3536.42, "end": 3536.64, "word": " or", "probability": 0.5927734375}, {"start": 3536.64, "end": 3537.08, "word": " items", "probability": 0.70361328125}, {"start": 3537.08, "end": 3537.3, "word": " can", "probability": 0.92919921875}, {"start": 3537.3, "end": 3537.44, "word": " be", "probability": 0.95458984375}, {"start": 3537.44, "end": 3537.9, "word": " chosen", "probability": 0.966796875}, {"start": 3537.9, "end": 3538.84, "word": " from", "probability": 0.86474609375}, {"start": 3538.84, "end": 3539.04, "word": " the", "probability": 0.87744140625}, {"start": 3539.04, "end": 3539.4, "word": " cluster", "probability": 0.79248046875}, {"start": 3539.4, "end": 3539.8, "word": " using", "probability": 0.9140625}, {"start": 3539.8, "end": 3540.16, "word": " another", "probability": 0.94140625}, {"start": 3540.16, "end": 3540.64, "word": " probability", "probability": 0.93310546875}, {"start": 3540.64, "end": 3541.1, "word": " technique", "probability": 0.91552734375}, {"start": 3541.1, "end": 3541.36, "word": " as", "probability": 0.70849609375}, {"start": 3541.36, "end": 3541.5, "word": " we", "probability": 0.94384765625}, {"start": 3541.5, "end": 3541.78, "word": " mentioned.", "probability": 0.8212890625}, {"start": 3544.4, "end": 3545.1, "word": " Let's", "probability": 0.922607421875}, {"start": 3545.1, "end": 3545.28, "word": " see", "probability": 0.9208984375}, {"start": 3545.28, "end": 3545.5, "word": " how", "probability": 0.91015625}, {"start": 3545.5, "end": 3545.72, "word": " can", "probability": 0.80126953125}, {"start": 3545.72, "end": 3545.92, "word": " we", "probability": 0.95703125}, {"start": 3545.92, "end": 3546.34, "word": " use", "probability": 0.880859375}, {"start": 3546.34, "end": 3546.64, "word": " another", "probability": 0.9169921875}, {"start": 3546.64, "end": 3547.08, "word": " example.", "probability": 0.97021484375}, {"start": 3548.18, "end": 3548.72, "word": " Let's", "probability": 0.96826171875}, {"start": 3548.72, "end": 3548.94, "word": " talk", "probability": 0.88134765625}, {"start": 3548.94, "end": 3549.28, "word": " about", "probability": 0.8505859375}, {"start": 3549.28, "end": 3549.72, "word": " again", "probability": 0.857421875}, {"start": 3549.72, "end": 3550.46, "word": " AUG", "probability": 0.6226806640625}, {"start": 3550.46, "end": 3550.86, "word": " students.", "probability": 0.9189453125}], "temperature": 1.0}, {"id": 130, "seek": 358646, "start": 3568.4, "end": 3586.46, "text": " I choose suppose level number 2 and level number 4, two levels, 2 and 4. Then you can take either all the students here or just assemble size proportion to the", "tokens": [286, 2826, 7297, 1496, 1230, 568, 293, 1496, 1230, 1017, 11, 732, 4358, 11, 568, 293, 1017, 13, 1396, 291, 393, 747, 2139, 439, 264, 1731, 510, 420, 445, 22364, 2744, 16068, 281, 264], "avg_logprob": -0.27276786395481656, "compression_ratio": 1.3445378151260505, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 3568.4, "end": 3569.12, "word": " I", "probability": 0.623046875}, {"start": 3569.12, "end": 3569.64, "word": " choose", "probability": 0.708984375}, {"start": 3569.64, "end": 3570.24, "word": " suppose", "probability": 0.61083984375}, {"start": 3570.24, "end": 3570.52, "word": " level", "probability": 0.8671875}, {"start": 3570.52, "end": 3570.8, "word": " number", "probability": 0.8759765625}, {"start": 3570.8, "end": 3571.1, "word": " 2", "probability": 0.455078125}, {"start": 3571.1, "end": 3571.32, "word": " and", "probability": 0.90869140625}, {"start": 3571.32, "end": 3571.56, "word": " level", "probability": 0.93408203125}, {"start": 3571.56, "end": 3571.8, "word": " number", "probability": 0.90380859375}, {"start": 3571.8, "end": 3572.18, "word": " 4,", "probability": 0.95849609375}, {"start": 3572.6, "end": 3572.84, "word": " two", "probability": 0.7177734375}, {"start": 3572.84, "end": 3573.2, "word": " levels,", "probability": 0.91943359375}, {"start": 3573.76, "end": 3573.9, "word": " 2", "probability": 0.77294921875}, {"start": 3573.9, "end": 3574.1, "word": " and", "probability": 0.9296875}, {"start": 3574.1, "end": 3574.4, "word": " 4.", "probability": 0.99462890625}, {"start": 3575.24, "end": 3575.54, "word": " Then", "probability": 0.84033203125}, {"start": 3575.54, "end": 3575.7, "word": " you", "probability": 0.88427734375}, {"start": 3575.7, "end": 3575.92, "word": " can", "probability": 0.93701171875}, {"start": 3575.92, "end": 3576.28, "word": " take", "probability": 0.8583984375}, {"start": 3576.28, "end": 3577.68, "word": " either", "probability": 0.90673828125}, {"start": 3577.68, "end": 3578.08, "word": " all", "probability": 0.9453125}, {"start": 3578.08, "end": 3578.22, "word": " the", "probability": 0.86962890625}, {"start": 3578.22, "end": 3578.58, "word": " students", "probability": 0.97314453125}, {"start": 3578.58, "end": 3578.92, "word": " here", "probability": 0.85205078125}, {"start": 3578.92, "end": 3580.0, "word": " or", "probability": 0.70263671875}, {"start": 3580.0, "end": 3580.52, "word": " just", "probability": 0.91259765625}, {"start": 3580.52, "end": 3582.56, "word": " assemble", "probability": 0.673828125}, {"start": 3582.56, "end": 3583.38, "word": " size", "probability": 0.43896484375}, {"start": 3583.38, "end": 3585.82, "word": " proportion", "probability": 0.46923828125}, {"start": 3585.82, "end": 3586.18, "word": " to", "probability": 0.93359375}, {"start": 3586.18, "end": 3586.46, "word": " the", "probability": 0.90625}], "temperature": 1.0}, {"id": 131, "seek": 360405, "start": 3590.31, "end": 3604.05, "text": " For example, this one represents 20%, and my sample size is 1000, so in this case you have to take 200 and 800 from that one. Any questions?", "tokens": [1171, 1365, 11, 341, 472, 8855, 945, 8923, 293, 452, 6889, 2744, 307, 9714, 11, 370, 294, 341, 1389, 291, 362, 281, 747, 2331, 293, 13083, 490, 300, 472, 13, 2639, 1651, 30], "avg_logprob": -0.278722428223666, "compression_ratio": 1.2155172413793103, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 3590.31, "end": 3590.49, "word": " For", "probability": 0.1990966796875}, {"start": 3590.49, "end": 3590.49, "word": " example,", "probability": 0.9501953125}, {"start": 3590.77, "end": 3590.99, "word": " this", "probability": 0.9208984375}, {"start": 3590.99, "end": 3591.19, "word": " one", "probability": 0.88916015625}, {"start": 3591.19, "end": 3591.53, "word": " represents", "probability": 0.69873046875}, {"start": 3591.53, "end": 3592.65, "word": " 20%,", "probability": 0.5728759765625}, {"start": 3592.65, "end": 3593.13, "word": " and", "probability": 0.8955078125}, {"start": 3593.13, "end": 3594.13, "word": " my", "probability": 0.5576171875}, {"start": 3594.13, "end": 3594.47, "word": " sample", "probability": 0.869140625}, {"start": 3594.47, "end": 3594.79, "word": " size", "probability": 0.73779296875}, {"start": 3594.79, "end": 3594.91, "word": " is", "probability": 0.72509765625}, {"start": 3594.91, "end": 3595.37, "word": " 1000,", "probability": 0.765625}, {"start": 3595.73, "end": 3595.91, "word": " so", "probability": 0.9169921875}, {"start": 3595.91, "end": 3596.05, "word": " in", "probability": 0.8984375}, {"start": 3596.05, "end": 3596.21, "word": " this", "probability": 0.9443359375}, {"start": 3596.21, "end": 3596.39, "word": " case", "probability": 0.90966796875}, {"start": 3596.39, "end": 3596.49, "word": " you", "probability": 0.49169921875}, {"start": 3596.49, "end": 3596.63, "word": " have", "probability": 0.880859375}, {"start": 3596.63, "end": 3596.73, "word": " to", "probability": 0.96923828125}, {"start": 3596.73, "end": 3596.97, "word": " take", "probability": 0.88720703125}, {"start": 3596.97, "end": 3598.01, "word": " 200", "probability": 0.783203125}, {"start": 3598.01, "end": 3598.85, "word": " and", "probability": 0.814453125}, {"start": 3598.85, "end": 3599.41, "word": " 800", "probability": 0.95068359375}, {"start": 3599.41, "end": 3599.85, "word": " from", "probability": 0.83251953125}, {"start": 3599.85, "end": 3600.01, "word": " that", "probability": 0.90673828125}, {"start": 3600.01, "end": 3600.31, "word": " one.", "probability": 0.9111328125}, {"start": 3603.05, "end": 3603.69, "word": " Any", "probability": 0.8974609375}, {"start": 3603.69, "end": 3604.05, "word": " questions?", "probability": 0.95068359375}], "temperature": 1.0}], "language": "en", "language_probability": 1.0, "duration": 3618.43525, "duration_after_vad": 3381.596249999989} \ No newline at end of file diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/JEIWb3FC-Sk_raw.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/JEIWb3FC-Sk_raw.srt new file mode 100644 index 0000000000000000000000000000000000000000..7dabc3c99ae2d568a27596faf79a8e4be68b2b06 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/JEIWb3FC-Sk_raw.srt @@ -0,0 +1,2532 @@ +1 +00:00:04,670 --> 00:00:08,210 +Today, Inshallah, we are going to start Chapter 7. + +2 +00:00:09,830 --> 00:00:14,910 +Chapter 7 talks about sampling and sampling + +3 +00:00:14,910 --> 00:00:22,690 +distributions. The objectives for this chapter are + +4 +00:00:22,690 --> 00:00:27,610 +number one we have different methods actually we + +5 +00:00:27,610 --> 00:00:31,330 +have two methods probability and non-probability + +6 +00:00:31,330 --> 00:00:34,750 +samples and we are going to distinguish between + +7 +00:00:35,420 --> 00:00:40,700 +these two sampling methods. So again, in this + +8 +00:00:40,700 --> 00:00:44,980 +chapter, we will talk about two different sampling + +9 +00:00:44,980 --> 00:00:49,480 +methods. One is called probability sampling and + +10 +00:00:49,480 --> 00:00:52,940 +the other is non-probability sampling. Our goal is + +11 +00:00:52,940 --> 00:00:56,520 +to distinguish between these two different + +12 +00:00:56,520 --> 00:00:59,280 +sampling methods. The other learning objective + +13 +00:00:59,280 --> 00:01:04,400 +will be We'll talk about the concept of the + +14 +00:01:04,400 --> 00:01:06,700 +sampling distribution. That will be next time, + +15 +00:01:06,800 --> 00:01:09,960 +inshallah. The third objective is compute + +16 +00:01:09,960 --> 00:01:15,480 +probabilities related to sample mean. In addition + +17 +00:01:15,480 --> 00:01:18,160 +to that, we'll talk about how can we compute + +18 +00:01:18,160 --> 00:01:22,920 +probabilities regarding the sample proportion. And + +19 +00:01:22,920 --> 00:01:27,130 +as I mentioned last time, There are two types of + +20 +00:01:27,130 --> 00:01:30,270 +data. One is called the numerical data. In this + +21 +00:01:30,270 --> 00:01:33,470 +case, we can use the sample mean. The other type + +22 +00:01:33,470 --> 00:01:36,630 +is called qualitative data. And in this case, we + +23 +00:01:36,630 --> 00:01:39,330 +have to use the sample proportion. So for this + +24 +00:01:39,330 --> 00:01:41,690 +chapter, we are going to discuss how can we + +25 +00:01:41,690 --> 00:01:46,370 +compute the probabilities for each one, either the + +26 +00:01:46,370 --> 00:01:50,090 +sample mean or the sample proportion. The last + +27 +00:01:50,090 --> 00:01:55,770 +objective of this chapter is to use the central + +28 +00:01:55,770 --> 00:01:58,190 +limit theorem which is the famous one of the most + +29 +00:01:58,190 --> 00:02:02,130 +famous theorem in this book which is called again + +30 +00:02:02,130 --> 00:02:05,690 +CLT central limit theorem and we are going to show + +31 +00:02:05,690 --> 00:02:09,310 +what are the what is the importance of this + +32 +00:02:09,310 --> 00:02:11,930 +theorem so these are the mainly the four + +33 +00:02:11,930 --> 00:02:16,610 +objectives for this chapter Now let's see why we + +34 +00:02:16,610 --> 00:02:20,270 +are talking about sampling. In other words, most + +35 +00:02:20,270 --> 00:02:23,850 +of the time when we are doing study, we are using + +36 +00:02:23,850 --> 00:02:27,700 +a sample. instead of using the entire population. + +37 +00:02:28,640 --> 00:02:32,080 +Now there are many reasons behind that. One of + +38 +00:02:32,080 --> 00:02:37,840 +these reasons is selecting a sample is less time + +39 +00:02:37,840 --> 00:02:40,940 +consuming than selecting every item in the + +40 +00:02:40,940 --> 00:02:44,060 +population. I think it makes sense that suppose we + +41 +00:02:44,060 --> 00:02:46,560 +have a huge population, that population consists + +42 +00:02:46,560 --> 00:02:53,140 +of thousands of items. So that will take more time + +43 +00:02:54,440 --> 00:03:00,220 +If you select 100 of their population. So time + +44 +00:03:00,220 --> 00:03:02,140 +consuming is very important. So number one, + +45 +00:03:03,000 --> 00:03:05,780 +selecting sample is less time consuming than using + +46 +00:03:05,780 --> 00:03:10,280 +all the entire population. The second reason, + +47 +00:03:10,880 --> 00:03:14,640 +selecting samples is less costly than selecting a + +48 +00:03:14,640 --> 00:03:17,280 +variety of population. Because if we have large + +49 +00:03:17,280 --> 00:03:19,560 +population, in this case you have to spend more + +50 +00:03:19,560 --> 00:03:23,540 +money in order to get the data or the information + +51 +00:03:23,540 --> 00:03:27,940 +from that population. So it's better to use these + +52 +00:03:27,940 --> 00:03:33,300 +samples. The other reason is the analysis. Our + +53 +00:03:33,300 --> 00:03:37,260 +sample is less cumbersome and more practical than + +54 +00:03:37,260 --> 00:03:40,880 +analysis of all items in the population. For these + +55 +00:03:40,880 --> 00:03:45,820 +reasons, we have to use a sample. For this reason, + +56 +00:03:45,880 --> 00:03:53,080 +we have to talk about sampling methods. Let's + +57 +00:03:53,080 --> 00:03:58,540 +start with sampling process. That begins with a + +58 +00:03:58,540 --> 00:04:05,320 +seminal frame. Now suppose my goal is to know the + +59 +00:04:05,320 --> 00:04:13,960 +opinion of IUG students about a certain subject. + +60 +00:04:16,260 --> 00:04:24,120 +So my population consists of all IUG students. So + +61 +00:04:24,120 --> 00:04:27,370 +that's the entire population. And you know that, + +62 +00:04:27,590 --> 00:04:31,750 +for example, suppose our usual students is around, + +63 +00:04:32,430 --> 00:04:39,890 +for example, 20,000 students. 20,000 students is a + +64 +00:04:39,890 --> 00:04:45,490 +big number. So it's better to select a sample from + +65 +00:04:45,490 --> 00:04:49,270 +that population. Now, the first step in this + +66 +00:04:49,270 --> 00:04:55,700 +process, we have to determine the frame. of that + +67 +00:04:55,700 --> 00:05:01,320 +population. So my frame consists of all IU + +68 +00:05:01,320 --> 00:05:04,740 +students, which has maybe males and females. So my + +69 +00:05:04,740 --> 00:05:09,560 +frame in this case is all items, I mean all + +70 +00:05:09,560 --> 00:05:15,380 +students at IUG. So that's the frame. So my frame + +71 +00:05:15,380 --> 00:05:18,720 +consists + +72 +00:05:18,720 --> 00:05:22,220 +of all students. + +73 +00:05:27,630 --> 00:05:32,370 +So the definition of + +74 +00:05:32,370 --> 00:05:36,010 +the semantic frame is a listing of items that make + +75 +00:05:36,010 --> 00:05:39,350 +up the population. The items could be individual, + +76 +00:05:40,170 --> 00:05:44,490 +could be students, could be things, animals, and + +77 +00:05:44,490 --> 00:05:49,650 +so on. So frames are data sources such as a + +78 +00:05:49,650 --> 00:05:54,840 +population list. Suppose we have the names of IUDs + +79 +00:05:54,840 --> 00:05:58,840 +humans. So that's my population list. Or + +80 +00:05:58,840 --> 00:06:02,160 +directories, or maps, and so on. So that's the + +81 +00:06:02,160 --> 00:06:05,520 +frame we have to know about the population we are + +82 +00:06:05,520 --> 00:06:10,900 +interested in. Inaccurate or biased results can + +83 +00:06:10,900 --> 00:06:16,460 +result if frame excludes certain portions of the + +84 +00:06:16,460 --> 00:06:20,620 +population. For example, suppose here, as I + +85 +00:06:20,620 --> 00:06:24,180 +mentioned, We are interested in IUG students, so + +86 +00:06:24,180 --> 00:06:29,280 +my frame and all IU students. And I know there are + +87 +00:06:29,280 --> 00:06:35,900 +students, either males or females. Suppose for + +88 +00:06:35,900 --> 00:06:40,880 +some reasons, we ignore males, and just my sample + +89 +00:06:40,880 --> 00:06:45,080 +focused on females. In this case, females. + +90 +00:06:48,700 --> 00:06:51,900 +don't represent the entire population. For this + +91 +00:06:51,900 --> 00:06:57,720 +reason, you will get inaccurate or biased results + +92 +00:06:57,720 --> 00:07:02,000 +if you ignore a certain portion. Because here + +93 +00:07:02,000 --> 00:07:08,580 +males, for example, maybe consists of 40% of the + +94 +00:07:08,580 --> 00:07:12,960 +IG students. So it makes sense that this number or + +95 +00:07:12,960 --> 00:07:16,980 +this percentage is a big number. So ignoring this + +96 +00:07:16,980 --> 00:07:21,160 +portion, may lead to misleading results or + +97 +00:07:21,160 --> 00:07:26,160 +inaccurate results or biased results. So you have + +98 +00:07:26,160 --> 00:07:29,600 +to keep in mind that you have to choose all the + +99 +00:07:29,600 --> 00:07:33,740 +portions of that frame. So inaccurate or biased + +100 +00:07:33,740 --> 00:07:38,700 +results can result if a frame excludes certain + +101 +00:07:38,700 --> 00:07:43,180 +portions of a population. Another example, suppose + +102 +00:07:43,180 --> 00:07:48,680 +we took males and females. But here for females, + +103 +00:07:49,240 --> 00:07:56,020 +females have, for example, four levels. Level one, + +104 +00:07:56,400 --> 00:07:59,980 +level two, level three, and level four. And we + +105 +00:07:59,980 --> 00:08:05,560 +ignored, for example, level one. I mean, the new + +106 +00:08:05,560 --> 00:08:09,520 +students. We ignored this portion. Maybe this + +107 +00:08:09,520 --> 00:08:12,860 +portion is very important one, but by mistake we + +108 +00:08:12,860 --> 00:08:18,690 +ignored this one. The remaining three levels will + +109 +00:08:18,690 --> 00:08:22,430 +not represent the entire female population. For + +110 +00:08:22,430 --> 00:08:25,330 +this reason, you will get inaccurate or biased + +111 +00:08:25,330 --> 00:08:31,290 +results. So you have to select all the portions of + +112 +00:08:31,290 --> 00:08:36,610 +the frames. Using different frames to generate + +113 +00:08:36,610 --> 00:08:40,110 +data can lead to dissimilar conclusions. For + +114 +00:08:40,110 --> 00:08:46,020 +example, Suppose again I am interested in IEG + +115 +00:08:46,020 --> 00:08:46,720 +students. + +116 +00:08:49,440 --> 00:08:59,460 +And I took the frame that has all students at + +117 +00:08:59,460 --> 00:09:04,060 +University of Gaza, Universities of Gaza. + +118 +00:09:09,250 --> 00:09:12,110 +And as we know that Gaza has three universities, + +119 +00:09:12,350 --> 00:09:15,530 +big universities, Islamic University, Lazar + +120 +00:09:15,530 --> 00:09:18,030 +University, and Al-Aqsa University. So we have + +121 +00:09:18,030 --> 00:09:23,310 +three universities. And my frame here, suppose I + +122 +00:09:23,310 --> 00:09:27,410 +took all students at these universities, but my + +123 +00:09:27,410 --> 00:09:32,470 +study focused on IU students. So my frame, the + +124 +00:09:32,470 --> 00:09:38,250 +true one, is all students at IUG. But I taught all + +125 +00:09:38,250 --> 00:09:42,170 +students at universities in Gaza. So now we have + +126 +00:09:42,170 --> 00:09:44,690 +different frames. + +127 +00:09:48,610 --> 00:09:54,590 +And you want to know what are the opinions of the + +128 +00:09:54,590 --> 00:09:59,910 +smokers about smoking. So my population now is + +129 +00:09:59,910 --> 00:10:00,530 +just... + +130 +00:10:14,030 --> 00:10:19,390 +So that's my thing. + +131 +00:10:21,010 --> 00:10:32,410 +I suppose I talk to a field that has one atom. + +132 +00:10:40,780 --> 00:10:46,040 +Oh my goodness. They are very different things. + +133 +00:10:47,700 --> 00:10:53,720 +The first one consists of only smokers. They are + +134 +00:10:53,720 --> 00:10:58,100 +very interested in you. The other one consists + +135 +00:10:58,100 --> 00:11:06,560 +of... Anonymous. I thought maybe... Smoker or non + +136 +00:11:06,560 --> 00:11:10,460 +-smokers. For this reason, you will get... + +137 +00:11:17,410 --> 00:11:19,350 +Conclusion, different results. + +138 +00:11:22,090 --> 00:11:28,850 +So now, + +139 +00:11:29,190 --> 00:11:33,610 +the sampling frame is a listing of items that make + +140 +00:11:33,610 --> 00:11:39,510 +up the entire population. Let's move to the types + +141 +00:11:39,510 --> 00:11:44,910 +of samples. Mainly there are two types of + +142 +00:11:44,910 --> 00:11:49,070 +sampling. One is cold. Non-probability samples. + +143 +00:11:50,370 --> 00:11:54,650 +The other one is called probability samples. The + +144 +00:11:54,650 --> 00:11:59,790 +non-probability samples can be divided into two + +145 +00:11:59,790 --> 00:12:04,030 +segments. One is called judgment and the other + +146 +00:12:04,030 --> 00:12:08,710 +convenience. So we have judgment and convenience + +147 +00:12:08,710 --> 00:12:13,140 +non-probability samples. The other type which is + +148 +00:12:13,140 --> 00:12:17,560 +random probability samples has four segments or + +149 +00:12:17,560 --> 00:12:21,680 +four parts. The first one is called simple random + +150 +00:12:21,680 --> 00:12:25,680 +sample. The other one is systematic. The second + +151 +00:12:25,680 --> 00:12:28,680 +one is systematic random sample. The third one is + +152 +00:12:28,680 --> 00:12:32,940 +certified. The fourth one cluster random sample. + +153 +00:12:33,460 --> 00:12:37,770 +So there are two types of sampling. Probability + +154 +00:12:37,770 --> 00:12:41,490 +and non-probability. Non-probability has four + +155 +00:12:41,490 --> 00:12:45,350 +methods here, simple random samples, systematic, + +156 +00:12:45,530 --> 00:12:48,530 +stratified, and cluster. And the non-probability + +157 +00:12:48,530 --> 00:12:53,090 +samples has two types, judgment and convenience. + +158 +00:12:53,670 --> 00:12:58,490 +Let's see the definition of each type of samples. + +159 +00:12:59,190 --> 00:13:03,720 +Let's start with non-probability sample. In non + +160 +00:13:03,720 --> 00:13:07,000 +-probability sample, items included or chosen + +161 +00:13:07,000 --> 00:13:10,800 +without regard to their probability of occurrence. + +162 +00:13:11,760 --> 00:13:14,740 +So that's the definition of non-probability. For + +163 +00:13:14,740 --> 00:13:15,100 +example. + +164 +00:13:23,660 --> 00:13:26,480 +So again, non-probability sample, it means you + +165 +00:13:26,480 --> 00:13:29,580 +select items without regard to their probability + +166 +00:13:29,580 --> 00:13:34,030 +of occurrence. For example, suppose females + +167 +00:13:34,030 --> 00:13:42,430 +consist of 70% of IUG students and males, the + +168 +00:13:42,430 --> 00:13:49,930 +remaining percent is 30%. And suppose I decided to + +169 +00:13:49,930 --> 00:13:56,610 +select a sample of 100 or 1000 students from IUG. + +170 +00:13:58,620 --> 00:14:07,980 +Suddenly, I have a sample that has 650 males and + +171 +00:14:07,980 --> 00:14:14,780 +350 females. Now, this sample, which has these + +172 +00:14:14,780 --> 00:14:19,260 +numbers, for sure does not represent the entire + +173 +00:14:19,260 --> 00:14:25,240 +population. Because females has 70%, and I took a + +174 +00:14:25,240 --> 00:14:30,890 +random sample or a sample of size 350. So this + +175 +00:14:30,890 --> 00:14:35,830 +sample is chosen without regard to the probability + +176 +00:14:35,830 --> 00:14:40,370 +here. Because in this case, I should choose males + +177 +00:14:40,370 --> 00:14:44,110 +with respect to their probability, which is 30%. + +178 +00:14:44,110 --> 00:14:49,330 +But in this case, I just choose different + +179 +00:14:49,330 --> 00:14:54,990 +proportions. Another example. Suppose + +180 +00:14:57,260 --> 00:14:59,920 +Again, I am talking about smoking. + +181 +00:15:05,080 --> 00:15:10,120 +And I know that some people are smoking and I just + +182 +00:15:10,120 --> 00:15:14,040 +took this sample. So I took this sample based on + +183 +00:15:14,040 --> 00:15:18,600 +my knowledge. So it's without regard to their + +184 +00:15:18,600 --> 00:15:23,340 +probability. Maybe suppose I am talking about + +185 +00:15:23,340 --> 00:15:28,330 +political opinions about something. And I just + +186 +00:15:28,330 --> 00:15:36,330 +took the experts of that subject. So my sample is + +187 +00:15:36,330 --> 00:15:42,070 +not a probability sample. And this one has, as we + +188 +00:15:42,070 --> 00:15:44,230 +mentioned, has two types. One is called + +189 +00:15:44,230 --> 00:15:49,010 +convenience sampling. In this case, items are + +190 +00:15:49,010 --> 00:15:51,710 +selected based only on the fact that they are + +191 +00:15:51,710 --> 00:15:55,590 +easy. So I choose that sample because it's easy. + +192 +00:15:57,090 --> 00:15:57,690 +Inexpensive, + +193 +00:16:02,190 --> 00:16:09,790 +inexpensive, or convenient to sample. If I choose + +194 +00:16:09,790 --> 00:16:13,430 +my sample because it is easy or inexpensive, I + +195 +00:16:13,430 --> 00:16:18,480 +think it doesn't make any sense, because easy. is + +196 +00:16:18,480 --> 00:16:23,780 +not a reason to select that sample. Inexpensive I + +197 +00:16:23,780 --> 00:16:27,080 +think is also is not that big reason. But if you + +198 +00:16:27,080 --> 00:16:30,340 +select a sample because these items are convenient + +199 +00:16:30,340 --> 00:16:33,760 +to assemble, it makes sense. So convenient sample + +200 +00:16:33,760 --> 00:16:38,280 +can be chosen based on easy, inexpensive or + +201 +00:16:38,280 --> 00:16:42,280 +convenient to assemble. On the other hand, In + +202 +00:16:42,280 --> 00:16:45,140 +judgment sample, you get the opinions of pre + +203 +00:16:45,140 --> 00:16:49,360 +-selected experts in the subject matter. For + +204 +00:16:49,360 --> 00:16:52,700 +example, suppose we are talking about the causes + +205 +00:16:52,700 --> 00:16:56,560 +of certain disease. Suppose we are talking about + +206 +00:16:56,560 --> 00:16:57,760 +cancer. + +207 +00:17:01,720 --> 00:17:06,820 +If I know the expert for this type of disease, + +208 +00:17:07,620 --> 00:17:10,340 +that means you have judgment sample because you + +209 +00:17:10,340 --> 00:17:14,720 +decided Before you select a sample that your + +210 +00:17:14,720 --> 00:17:27,500 +sample should contain only the expert in + +211 +00:17:27,500 --> 00:17:31,360 +cancer disease. So that's the judgment sampling. + +212 +00:17:32,260 --> 00:17:36,800 +So in this case, I didn't take all the doctors in + +213 +00:17:36,800 --> 00:17:41,340 +this case, I just taught the expert in cancer + +214 +00:17:41,340 --> 00:17:45,160 +disease. So that's called non-probability samples. + +215 +00:17:45,340 --> 00:17:48,820 +You have to make sense to distinguish between + +216 +00:17:48,820 --> 00:17:54,700 +convenience sampling and judgment sample. So for + +217 +00:17:54,700 --> 00:17:57,980 +judgment, you select a sample based on the prior + +218 +00:17:57,980 --> 00:18:00,740 +information you have about the subject matter. + +219 +00:18:02,870 --> 00:18:05,410 +Suppose I am talking about something related to + +220 +00:18:05,410 --> 00:18:08,830 +psychology, so I have to take the expert in + +221 +00:18:08,830 --> 00:18:12,910 +psychology. Suppose I am talking about expert in + +222 +00:18:12,910 --> 00:18:17,050 +sports, so I have to take a sample from that + +223 +00:18:17,050 --> 00:18:20,970 +segment and so on. But the convenient sample means + +224 +00:18:20,970 --> 00:18:24,690 +that you select a sample maybe that is easy for + +225 +00:18:24,690 --> 00:18:29,430 +you, or less expensive, or that sample is + +226 +00:18:29,430 --> 00:18:32,980 +convenient. For this reason, it's called non + +227 +00:18:32,980 --> 00:18:36,300 +-probability sample because we choose that sample + +228 +00:18:36,300 --> 00:18:39,540 +without regard to their probability of occurrence. + +229 +00:18:41,080 --> 00:18:48,620 +The other type is called probability samples. In + +230 +00:18:48,620 --> 00:18:54,200 +this case, items are chosen on the basis of non + +231 +00:18:54,200 --> 00:18:58,600 +-probabilities. For example, here, if males + +232 +00:19:02,500 --> 00:19:11,060 +has or represent 30%, and females represent 70%, + +233 +00:19:11,060 --> 00:19:14,840 +and the same size has a thousand. So in this case, + +234 +00:19:14,920 --> 00:19:19,340 +you have to choose females with respect to their + +235 +00:19:19,340 --> 00:19:24,260 +probability. Now 70% for females, so I have to + +236 +00:19:24,260 --> 00:19:29,430 +choose 700 for females and the remaining 300 for + +237 +00:19:29,430 --> 00:19:34,010 +males. So in this case, I choose the items, I mean + +238 +00:19:34,010 --> 00:19:37,970 +I choose my samples regarding to their + +239 +00:19:37,970 --> 00:19:39,050 +probability. + +240 +00:19:41,010 --> 00:19:45,190 +So in probability sample items and the sample are + +241 +00:19:45,190 --> 00:19:48,610 +chosen on the basis of known probabilities. And + +242 +00:19:48,610 --> 00:19:52,360 +again, there are two types. of probability + +243 +00:19:52,360 --> 00:19:55,580 +samples, simple random sample, systematic, + +244 +00:19:56,120 --> 00:19:59,660 +stratified, and cluster. Let's talk about each one + +245 +00:19:59,660 --> 00:20:05,040 +in details. The first type is called a probability + +246 +00:20:05,040 --> 00:20:11,720 +sample. Simple random sample. The first type of + +247 +00:20:11,720 --> 00:20:16,200 +probability sample is the easiest one. Simple + +248 +00:20:16,200 --> 00:20:23,780 +random sample. Generally is denoted by SRS, Simple + +249 +00:20:23,780 --> 00:20:30,660 +Random Sample. Let's see how can we choose a + +250 +00:20:30,660 --> 00:20:35,120 +sample that is random. What do you mean by random? + +251 +00:20:36,020 --> 00:20:41,780 +In this case, every individual or item from the + +252 +00:20:41,780 --> 00:20:47,620 +frame has an equal chance of being selected. For + +253 +00:20:47,620 --> 00:20:52,530 +example, suppose number of students in this class + +254 +00:20:52,530 --> 00:21:04,010 +number of students is 52 so + +255 +00:21:04,010 --> 00:21:11,890 +each one, I mean each student from + +256 +00:21:11,890 --> 00:21:17,380 +1 up to 52 has the same probability of being + +257 +00:21:17,380 --> 00:21:23,860 +selected. 1 by 52. 1 by 52. 1 divided by 52. So + +258 +00:21:23,860 --> 00:21:27,980 +each one has this probability. So the first one + +259 +00:21:27,980 --> 00:21:31,820 +has the same because if I want to select for + +260 +00:21:31,820 --> 00:21:37,680 +example 10 out of you. So the first one has each + +261 +00:21:37,680 --> 00:21:42,400 +one has probability of 1 out of 52. That's the + +262 +00:21:42,400 --> 00:21:47,160 +meaning ofEach item from the frame has an equal + +263 +00:21:47,160 --> 00:21:54,800 +chance of being selected. Selection may be with + +264 +00:21:54,800 --> 00:21:58,800 +replacement. With replacement means selected + +265 +00:21:58,800 --> 00:22:02,040 +individuals is returned to the frame for + +266 +00:22:02,040 --> 00:22:04,880 +possibility selection, or without replacement + +267 +00:22:04,880 --> 00:22:08,600 +means selected individuals or item is not returned + +268 +00:22:08,600 --> 00:22:10,820 +to the frame. So we have two types of selection, + +269 +00:22:11,000 --> 00:22:14,360 +either with... So with replacement means item is + +270 +00:22:14,360 --> 00:22:18,080 +returned back to the frame, or without population, + +271 +00:22:18,320 --> 00:22:21,400 +the item is not returned back to the frame. So + +272 +00:22:21,400 --> 00:22:26,490 +that's the two types of selection. Now how can we + +273 +00:22:26,490 --> 00:22:29,810 +obtain the sample? Sample obtained from something + +274 +00:22:29,810 --> 00:22:33,470 +called table of random numbers. In a minute I will + +275 +00:22:33,470 --> 00:22:36,430 +show you the table of random numbers. And other + +276 +00:22:36,430 --> 00:22:40,130 +method of selecting a sample by using computer + +277 +00:22:40,130 --> 00:22:44,890 +random number generators. So there are two methods + +278 +00:22:44,890 --> 00:22:48,310 +for selecting a random number. Either by using the + +279 +00:22:48,310 --> 00:22:51,950 +table that you have at the end of your book or by + +280 +00:22:51,950 --> 00:22:56,550 +using a computer. I will show one of these and in + +281 +00:22:56,550 --> 00:22:59,650 +the SPSS course you will see another one which is + +282 +00:22:59,650 --> 00:23:03,690 +by using a computer. So let's see how can we + +283 +00:23:03,690 --> 00:23:11,730 +obtain a sample from table of + +284 +00:23:11,730 --> 00:23:12,590 +random number. + +285 +00:23:16,950 --> 00:23:22,090 +I have maybe different table here. But the same + +286 +00:23:22,090 --> 00:23:28,090 +idea to use that table. Let's see how can we + +287 +00:23:28,090 --> 00:23:34,990 +choose a sample by using a random number. + +288 +00:23:42,490 --> 00:23:47,370 +Now, for example, suppose in this class As I + +289 +00:23:47,370 --> 00:23:51,090 +mentioned, there are 52 students. + +290 +00:23:55,110 --> 00:23:58,650 +So each one has a number, ID number one, two, up + +291 +00:23:58,650 --> 00:24:05,110 +to 52. So the numbers are 01, 02, all the way up + +292 +00:24:05,110 --> 00:24:10,790 +to 52. So the maximum digits here, two, two + +293 +00:24:10,790 --> 00:24:11,110 +digits. + +294 +00:24:15,150 --> 00:24:18,330 +1, 2, 3, up to 5, 2, 2, so you have two digits. + +295 +00:24:19,470 --> 00:24:23,710 +Now suppose I decided to take a random sample of + +296 +00:24:23,710 --> 00:24:28,550 +size, for example, N instead. How can I select N + +297 +00:24:28,550 --> 00:24:32,570 +out of U? In this case, each one has the same + +298 +00:24:32,570 --> 00:24:36,790 +chance of being selected. Now based on this table, + +299 +00:24:37,190 --> 00:24:44,230 +you can pick any row or any column. Randomly. For + +300 +00:24:44,230 --> 00:24:51,630 +example, suppose I select the first row. Now, the + +301 +00:24:51,630 --> 00:24:56,570 +first student will be selected as student number + +302 +00:24:56,570 --> 00:25:03,650 +to take two digits. We have to take how many + +303 +00:25:03,650 --> 00:25:08,770 +digits? Because students have ID card that + +304 +00:25:08,770 --> 00:25:13,930 +consists of two digits, 0102 up to 52. So, what's + +305 +00:25:13,930 --> 00:25:17,010 +the first number students will be selected based + +306 +00:25:17,010 --> 00:25:22,130 +on this table? Forget about the line 101. + +307 +00:25:26,270 --> 00:25:27,770 +Start with this number. + +308 +00:25:42,100 --> 00:25:50,900 +So the first one, 19. The second, 22. The third + +309 +00:25:50,900 --> 00:25:51,360 +student, + +310 +00:25:54,960 --> 00:26:04,000 +19, 22. The third, 9. The third, 9. I'm taking the + +311 +00:26:04,000 --> 00:26:16,510 +first row. Then fifth. 34 student + +312 +00:26:16,510 --> 00:26:18,710 +number 05 + +313 +00:26:24,340 --> 00:26:29,500 +Now, what's about seventy-five? Seventy-five is + +314 +00:26:29,500 --> 00:26:33,660 +not selected because the maximum I have is fifty + +315 +00:26:33,660 --> 00:26:46,180 +-two. Next. Sixty-two is not selected. Eighty + +316 +00:26:46,180 --> 00:26:53,000 +-seven. It's not selected. 13. 13. It's okay. + +317 +00:26:53,420 --> 00:27:01,740 +Next. 96. 96. Not selected. 14. 14 is okay. 91. + +318 +00:27:02,140 --> 00:27:12,080 +91. 91. Not selected. 95. 91. 45. 85. 31. 31. + +319 +00:27:15,240 --> 00:27:21,900 +So that's 10. So students numbers are 19, 22, 39, + +320 +00:27:22,140 --> 00:27:26,980 +50, 34, 5, 13, 4, 25 and take one will be + +321 +00:27:26,980 --> 00:27:30,940 +selected. So these are the ID numbers will be + +322 +00:27:30,940 --> 00:27:35,480 +selected in order to get a sample of 10. You + +323 +00:27:35,480 --> 00:27:40,500 +exclude + +324 +00:27:40,500 --> 00:27:43,440 +that one. If the number is repeated, you have to + +325 +00:27:43,440 --> 00:27:44,340 +exclude that one. + +326 +00:27:51,370 --> 00:27:57,270 +is repeated, then excluded. + +327 +00:28:02,370 --> 00:28:07,370 +So the returned number must be excluded from the + +328 +00:28:07,370 --> 00:28:14,030 +sample. Let's imagine that we have not 52 + +329 +00:28:14,030 --> 00:28:19,130 +students. We have 520 students. + +330 +00:28:25,740 --> 00:28:32,520 +Now, I have large number, 52, 520 instead of 52 + +331 +00:28:32,520 --> 00:28:36,080 +students. And again, my goal is to select just 10 + +332 +00:28:36,080 --> 00:28:42,220 +students out of 120. So each one has ID with + +333 +00:28:42,220 --> 00:28:46,220 +number one, two, all the way up to 520. So the + +334 +00:28:46,220 --> 00:28:53,160 +first one, 001. 002 all the way up to 520 now in + +335 +00:28:53,160 --> 00:28:56,480 +this case you have to choose three digits start + +336 +00:28:56,480 --> 00:29:00,060 +for example you don't have actually to start with + +337 +00:29:00,060 --> 00:29:03,060 +row number one maybe column number one or row + +338 +00:29:03,060 --> 00:29:06,140 +number two whatever is fine so let's start with + +339 +00:29:06,140 --> 00:29:10,460 +row number two for example row number 76 + +340 +00:29:14,870 --> 00:29:19,950 +It's not selected. Because the maximum number I + +341 +00:29:19,950 --> 00:29:25,110 +have is 5 to 20. So, 746 shouldn't be selected. + +342 +00:29:26,130 --> 00:29:29,430 +The next one, 764. + +343 +00:29:31,770 --> 00:29:38,750 +Again, it's not selected. 764, 715. Not selected. + +344 +00:29:38,910 --> 00:29:42,310 +Next one is 715. + +345 +00:29:44,880 --> 00:29:52,200 +099 should be 0 that's + +346 +00:29:52,200 --> 00:29:54,940 +the way how can we use the random table for using + +347 +00:29:54,940 --> 00:29:58,800 +or for selecting simple random symbols so in this + +348 +00:29:58,800 --> 00:30:03,480 +case you can choose any row or any column then you + +349 +00:30:03,480 --> 00:30:06,620 +have to decide how many digits you have to select + +350 +00:30:06,620 --> 00:30:10,500 +it depends on the number you have I mean the + +351 +00:30:10,500 --> 00:30:16,510 +population size If for example Suppose I am + +352 +00:30:16,510 --> 00:30:20,270 +talking about IUPUI students and for example, we + +353 +00:30:20,270 --> 00:30:26,530 +have 30,000 students at this school And again, I + +354 +00:30:26,530 --> 00:30:28,570 +want to select a random sample of size 10 for + +355 +00:30:28,570 --> 00:30:35,190 +example So how many digits should I use? 20,000 + +356 +00:30:35,190 --> 00:30:42,620 +Five digits And each one, each student has ID + +357 +00:30:42,620 --> 00:30:51,760 +from, starts from the first one up to twenty + +358 +00:30:51,760 --> 00:30:56,680 +thousand. So now, start with, for example, the + +359 +00:30:56,680 --> 00:30:59,240 +last row you have. + +360 +00:31:03,120 --> 00:31:08,480 +The first number 54000 is not. 81 is not. None of + +361 +00:31:08,480 --> 00:31:08,740 +these. + +362 +00:31:12,420 --> 00:31:17,760 +Look at the next one. 71000 is not selected. Now + +363 +00:31:17,760 --> 00:31:22,180 +9001. So the first number I have to select is + +364 +00:31:22,180 --> 00:31:27,200 +9001. None of the rest. Go back. + +365 +00:31:30,180 --> 00:31:37,790 +Go to the next one. The second number, 12149 + +366 +00:31:37,790 --> 00:31:45,790 +and so on. Next will be 18000 and so on. Next row, + +367 +00:31:46,470 --> 00:31:55,530 +we can select the second one, then 16, then 14000, + +368 +00:31:55,890 --> 00:32:00,850 +6500 and so on. So this is the way how can we use + +369 +00:32:00,850 --> 00:32:08,110 +the random table. It seems to be that tons of work + +370 +00:32:08,110 --> 00:32:13,450 +if you have large sample. Because in this case, + +371 +00:32:13,530 --> 00:32:16,430 +you have to choose, for example, suppose I am + +372 +00:32:16,430 --> 00:32:22,390 +interested to take a random sample of 10,000. Now, + +373 +00:32:22,510 --> 00:32:28,370 +to use this table to select 10,000 items takes + +374 +00:32:28,370 --> 00:32:33,030 +time and effort and maybe will never finish. So + +375 +00:32:33,030 --> 00:32:33,950 +it's better to use + +376 +00:32:38,020 --> 00:32:42,100 +better to use computer + +377 +00:32:42,100 --> 00:32:47,140 +random number generators. So that's the way if we, + +378 +00:32:47,580 --> 00:32:51,880 +now we can use the random table only if the sample + +379 +00:32:51,880 --> 00:32:57,780 +size is limited. I mean up to 100 maybe you can + +380 +00:32:57,780 --> 00:33:03,160 +use the random table, but after that I think it's + +381 +00:33:03,160 --> 00:33:08,670 +just you are losing your time. Another example + +382 +00:33:08,670 --> 00:33:14,390 +here. Now suppose my sampling frame for population + +383 +00:33:14,390 --> 00:33:23,230 +has 850 students. So the numbers are 001, 002, all + +384 +00:33:23,230 --> 00:33:28,490 +the way up to 850. And suppose for example we are + +385 +00:33:28,490 --> 00:33:33,610 +going to select five items randomly from that + +386 +00:33:33,610 --> 00:33:39,610 +population. So you have to choose three digits and + +387 +00:33:39,610 --> 00:33:44,990 +imagine that this is my portion of that table. + +388 +00:33:45,850 --> 00:33:51,570 +Now, take three digits. The first three digits are + +389 +00:33:51,570 --> 00:34:00,330 +492. So the first item chosen should be item + +390 +00:34:00,330 --> 00:34:10,540 +number 492. should be selected next one 800 808 + +391 +00:34:10,540 --> 00:34:17,020 +doesn't select because the maximum it's much + +392 +00:34:17,020 --> 00:34:21,100 +selected because the maximum here is 850 now next + +393 +00:34:21,100 --> 00:34:26,360 +one 892 this + +394 +00:34:26,360 --> 00:34:32,140 +one is not selected next + +395 +00:34:32,140 --> 00:34:43,030 +item four three five selected now + +396 +00:34:43,030 --> 00:34:50,710 +seven seven nine should be selected finally zeros + +397 +00:34:50,710 --> 00:34:53,130 +two should be selected so these are the five + +398 +00:34:53,130 --> 00:34:58,090 +numbers in my sample by using selected by using + +399 +00:34:58,090 --> 00:35:01,190 +the random sample any questions? + +400 +00:35:04,160 --> 00:35:07,780 +Let's move to another part. + +401 +00:35:17,600 --> 00:35:22,380 +The next type of samples is called systematic + +402 +00:35:22,380 --> 00:35:25,260 +samples. + +403 +00:35:29,120 --> 00:35:35,780 +Now suppose N represents the sample size, capital + +404 +00:35:35,780 --> 00:35:40,520 +N represents + +405 +00:35:40,520 --> 00:35:42,220 +the population size. + +406 +00:35:46,660 --> 00:35:49,900 +And let's see how can we choose a systematic + +407 +00:35:49,900 --> 00:35:54,040 +random sample from that population. For example, + +408 +00:35:55,260 --> 00:35:57,180 +suppose + +409 +00:35:59,610 --> 00:36:05,010 +For this specific slide, there are 40 items in the + +410 +00:36:05,010 --> 00:36:11,370 +population. And my goal is to select a sample of + +411 +00:36:11,370 --> 00:36:16,210 +size 4 by using systematic random sampling. The + +412 +00:36:16,210 --> 00:36:23,290 +first step is to find how many individuals will be + +413 +00:36:23,290 --> 00:36:28,990 +in any group. Let's use this letter K. + +414 +00:36:31,820 --> 00:36:36,940 +divide N by, divide frame of N individuals into + +415 +00:36:36,940 --> 00:36:42,900 +groups of K individuals. So, K equal capital N + +416 +00:36:42,900 --> 00:36:48,840 +over small n, this is number of items in a group. + +417 +00:36:51,570 --> 00:36:56,510 +So K represents number of subjects or number of + +418 +00:36:56,510 --> 00:37:02,750 +elements in a group. So for this example, K equals + +419 +00:37:02,750 --> 00:37:09,710 +40 divided by 4, so 10. So the group, each group + +420 +00:37:09,710 --> 00:37:11,670 +has 10 items. + +421 +00:37:16,630 --> 00:37:23,140 +So each group has 10 items. + +422 +00:37:27,420 --> 00:37:33,860 +So group number 1, 10 items, and others have the + +423 +00:37:33,860 --> 00:37:38,660 +same number. So first step, we have to decide how + +424 +00:37:38,660 --> 00:37:42,110 +many items will be in the group. And that number + +425 +00:37:42,110 --> 00:37:45,330 +equals N divided by small n, capital N divided by + +426 +00:37:45,330 --> 00:37:48,910 +small n. In this case, N is 40, the sample size is + +427 +00:37:48,910 --> 00:37:54,170 +4, so there are 10 items in each individual. Next + +428 +00:37:54,170 --> 00:38:02,850 +step, select randomly the first individual from + +429 +00:38:02,850 --> 00:38:08,620 +the first group. For example, here. Now, how many + +430 +00:38:08,620 --> 00:38:13,360 +we have here? We have 10 items. So, numbers are + +431 +00:38:13,360 --> 00:38:19,060 +01, 02, up to 10. I have to choose one more number + +432 +00:38:19,060 --> 00:38:23,680 +from these numbers, from 1 to 10, by using the + +433 +00:38:23,680 --> 00:38:27,600 +random table again. So, I have to go back to the + +434 +00:38:27,600 --> 00:38:33,730 +random table and I choose two digits. Now the + +435 +00:38:33,730 --> 00:38:36,490 +first one is nineteen, twenty-two, thirty-nine, + +436 +00:38:37,130 --> 00:38:43,450 +fifty, thirty-four, five. So I have to see. So + +437 +00:38:43,450 --> 00:38:46,230 +number one is five. What's the next one? The next + +438 +00:38:46,230 --> 00:38:54,190 +one just add K. K is ten. So next is fifteen. Then + +439 +00:38:54,190 --> 00:38:58,010 +twenty-five, then thirty-four. + +440 +00:39:02,900 --> 00:39:08,840 +Number size consists of four items. So the first + +441 +00:39:08,840 --> 00:39:12,740 +number is chosen randomly by using the random + +442 +00:39:12,740 --> 00:39:17,260 +table. The next number just add the step. This is + +443 +00:39:17,260 --> 00:39:24,340 +step. So my step is 10 because number one is five. + +444 +00:39:25,300 --> 00:39:27,800 +The first item I mean is five. Then it should be + +445 +00:39:27,800 --> 00:39:31,780 +15, 25, 35, and so on if we have more than that. + +446 +00:39:33,230 --> 00:39:37,730 +Okay, so that's for, in this example, he choose + +447 +00:39:37,730 --> 00:39:42,790 +item number seven. Random selection, number seven. + +448 +00:39:43,230 --> 00:39:50,010 +So next should be 17, 27, 37, and so on. Let's do + +449 +00:39:50,010 --> 00:39:50,710 +another example. + +450 +00:39:58,590 --> 00:40:06,540 +Suppose there are In this class, there are 50 + +451 +00:40:06,540 --> 00:40:12,400 +students. So the total is 50. + +452 +00:40:15,320 --> 00:40:26,780 +10 students out of 50. So my sample is 10. Now + +453 +00:40:26,780 --> 00:40:30,260 +still, 50 divided by 10 is 50. + +454 +00:40:33,630 --> 00:40:39,650 +So there are five items or five students in a + +455 +00:40:39,650 --> 00:40:45,370 +group. So we have five in + +456 +00:40:45,370 --> 00:40:51,490 +the first group and then five in the next one and + +457 +00:40:51,490 --> 00:40:56,130 +so on. So we have how many groups? Ten groups. + +458 +00:40:59,530 --> 00:41:04,330 +So first step, you have to find a step. Still it + +459 +00:41:04,330 --> 00:41:07,930 +means number of items or number of students in a + +460 +00:41:07,930 --> 00:41:16,170 +group. Next step, select student at random from + +461 +00:41:16,170 --> 00:41:22,010 +the first group, so random selection. Now, here + +462 +00:41:22,010 --> 00:41:28,610 +there are five students, so 01, I'm sorry, not 01, + +463 +00:41:29,150 --> 00:41:35,080 +1, 2, 3, 4, 5, so one digit. Only one digit. + +464 +00:41:35,800 --> 00:41:39,420 +Because I have maximum number is five. So it's + +465 +00:41:39,420 --> 00:41:42,920 +only one digit. So go again to the random table + +466 +00:41:42,920 --> 00:41:48,220 +and take one digit. One. So my first item, six, + +467 +00:41:48,760 --> 00:41:52,580 +eleven, sixteen, twenty-one, twenty-one, all the + +468 +00:41:52,580 --> 00:41:55,500 +way up to ten items. + +469 +00:42:13,130 --> 00:42:18,170 +So I choose student number one, then skip five, + +470 +00:42:19,050 --> 00:42:22,230 +choose number six, and so on. It's called + +471 +00:42:22,230 --> 00:42:26,130 +systematic. Because if you know the first item, + +472 +00:42:28,550 --> 00:42:32,690 +and the step you can know the rest of these. + +473 +00:42:37,310 --> 00:42:41,150 +Imagine that you want to select 10 students who + +474 +00:42:41,150 --> 00:42:48,010 +entered the cafe shop or restaurant. You can pick + +475 +00:42:48,010 --> 00:42:54,790 +one of them. So suppose I'm taking number three + +476 +00:42:54,790 --> 00:43:00,550 +and my step is six. So three, then nine, and so + +477 +00:43:00,550 --> 00:43:00,790 +on. + +478 +00:43:05,830 --> 00:43:13,310 +So that's systematic assembly. Questions? So + +479 +00:43:13,310 --> 00:43:20,710 +that's about random samples and systematic. What + +480 +00:43:20,710 --> 00:43:23,550 +do you mean by stratified groups? + +481 +00:43:28,000 --> 00:43:33,080 +Let's use a definition and an example of a + +482 +00:43:33,080 --> 00:43:34,120 +stratified family. + +483 +00:43:58,810 --> 00:44:05,790 +step one. So again imagine we have IUG population + +484 +00:44:05,790 --> 00:44:11,490 +into two or more subgroups. So there are two or + +485 +00:44:11,490 --> 00:44:16,010 +more. It depends on the characteristic you are + +486 +00:44:16,010 --> 00:44:19,690 +using. So divide population into two or more + +487 +00:44:19,690 --> 00:44:24,210 +subgroups according to some common characteristic. + +488 +00:44:24,730 --> 00:44:30,280 +For example suppose I want to divide the student + +489 +00:44:30,280 --> 00:44:32,080 +into gender. + +490 +00:44:34,100 --> 00:44:38,840 +So males or females. So I have two strata. One is + +491 +00:44:38,840 --> 00:44:43,000 +called males and the other is females. Now suppose + +492 +00:44:43,000 --> 00:44:47,460 +the characteristic I am going to use is the levels + +493 +00:44:47,460 --> 00:44:51,500 +of a student. First level, second, third, fourth, + +494 +00:44:51,800 --> 00:44:56,280 +and so on. So number of strata here depends on + +495 +00:44:56,280 --> 00:45:00,380 +actually the characteristic you are interested in. + +496 +00:45:00,780 --> 00:45:04,860 +Let's use the simple one that is gender. So here + +497 +00:45:04,860 --> 00:45:12,360 +we have females. So IUV students divided into two + +498 +00:45:12,360 --> 00:45:18,560 +types, strata, or two groups, females and males. + +499 +00:45:19,200 --> 00:45:22,870 +So this is the first step. So at least you should + +500 +00:45:22,870 --> 00:45:26,750 +have two groups or two subgroups. So we have IELTS + +501 +00:45:26,750 --> 00:45:29,630 +student, the entire population, and that + +502 +00:45:29,630 --> 00:45:34,370 +population divided into two subgroups. Next, + +503 +00:45:35,650 --> 00:45:39,730 +assemble random samples. Keep careful here with + +504 +00:45:39,730 --> 00:45:45,770 +sample sizes proportional to strata sizes. That + +505 +00:45:45,770 --> 00:45:57,890 +means suppose I know that Female consists + +506 +00:45:57,890 --> 00:46:02,470 +of + +507 +00:46:02,470 --> 00:46:09,770 +70% of Irish students and + +508 +00:46:09,770 --> 00:46:11,490 +males 30%. + +509 +00:46:15,410 --> 00:46:17,950 +the sample size we are talking about here is for + +510 +00:46:17,950 --> 00:46:21,550 +example is a thousand so I want to select a sample + +511 +00:46:21,550 --> 00:46:24,990 +of a thousand seed from the registration office or + +512 +00:46:24,990 --> 00:46:31,190 +my information about that is males represent 30% + +513 +00:46:31,190 --> 00:46:37,650 +females represent 70% so in this case your sample + +514 +00:46:37,650 --> 00:46:43,650 +structure should be 70% times + +515 +00:46:50,090 --> 00:46:59,090 +So the first + +516 +00:46:59,090 --> 00:47:03,750 +group should have 700 items of students and the + +517 +00:47:03,750 --> 00:47:06,490 +other one is 300,000. + +518 +00:47:09,230 --> 00:47:11,650 +So this is the second step. + +519 +00:47:14,420 --> 00:47:17,740 +Sample sizes are determined in step number two. + +520 +00:47:18,540 --> 00:47:22,200 +Now, how can you select the 700 females here? + +521 +00:47:23,660 --> 00:47:26,180 +Again, you have to go back to the random table. + +522 +00:47:27,480 --> 00:47:31,660 +Samples from subgroups are compiled into one. Then + +523 +00:47:31,660 --> 00:47:39,600 +you can use symbol random sample. So here, 700. I + +524 +00:47:39,600 --> 00:47:45,190 +have, for example, 70% females. And I know that I + +525 +00:47:45,190 --> 00:47:51,370 +use student help. I have ideas numbers from 1 up + +526 +00:47:51,370 --> 00:47:59,070 +to 7, 14. Then by using simple random, simple + +527 +00:47:59,070 --> 00:48:01,070 +random table, you can. + +528 +00:48:09,490 --> 00:48:15,190 +So if you go back to the table, the first item, + +529 +00:48:16,650 --> 00:48:23,130 +now look at five digits. Nineteen is not selected. + +530 +00:48:24,830 --> 00:48:27,510 +Nineteen. I have, the maximum is fourteen + +531 +00:48:27,510 --> 00:48:31,890 +thousand. So skip one and two. The first item is + +532 +00:48:31,890 --> 00:48:37,850 +seven hundred and fifty-six. The first item. Next + +533 +00:48:37,850 --> 00:48:43,480 +is not chosen. Next is not chosen. Number six. + +534 +00:48:43,740 --> 00:48:44,580 +Twelve. + +535 +00:48:47,420 --> 00:48:50,620 +Zero. Unsure. + +536 +00:48:52,880 --> 00:48:58,940 +So here we divide the population into two groups + +537 +00:48:58,940 --> 00:49:03,440 +or two subgroups, females and males. And we select + +538 +00:49:03,440 --> 00:49:07,020 +a random sample of size 700 based on the + +539 +00:49:07,020 --> 00:49:10,850 +proportion of this subgroup. Then we are using the + +540 +00:49:10,850 --> 00:49:16,750 +simple random table to take the 700 females. + +541 +00:49:22,090 --> 00:49:29,810 +Now for this example, there are 16 items or 16 + +542 +00:49:29,810 --> 00:49:35,030 +students in each group. And he select randomly + +543 +00:49:35,030 --> 00:49:40,700 +number three, number 9, number 13, and so on. So + +544 +00:49:40,700 --> 00:49:44,140 +it's a random selection. Another example. + +545 +00:49:46,820 --> 00:49:52,420 +Suppose again we are talking about all IUVs. + +546 +00:50:02,780 --> 00:50:09,360 +Here I divided the population according to the + +547 +00:50:09,360 --> 00:50:17,680 +students' levels. Level one, level two, three + +548 +00:50:17,680 --> 00:50:18,240 +levels. + +549 +00:50:25,960 --> 00:50:28,300 +One, two, three and four. + +550 +00:50:32,240 --> 00:50:39,710 +So I divide the population into four subgroups + +551 +00:50:39,710 --> 00:50:43,170 +according to the student levels. So one, two, + +552 +00:50:43,290 --> 00:50:48,030 +three, and four. Now, a simple random sample is + +553 +00:50:48,030 --> 00:50:52,070 +selected from each subgroup with sample sizes + +554 +00:50:52,070 --> 00:50:57,670 +proportional to strata size. Imagine that level + +555 +00:50:57,670 --> 00:51:04,950 +number one represents 40% of the students. Level + +556 +00:51:04,950 --> 00:51:17,630 +2, 20%. Level 3, 30%. Just + +557 +00:51:17,630 --> 00:51:22,850 +an example. To make more sense? + +558 +00:51:34,990 --> 00:51:36,070 +My sample size? + +559 +00:51:38,750 --> 00:51:39,910 +3, + +560 +00:51:41,910 --> 00:51:46,430 +9, 15, 4, sorry. + +561 +00:51:53,290 --> 00:52:00,470 +So here, there are four levels. And the + +562 +00:52:00,470 --> 00:52:04,370 +proportions are 48 + +563 +00:52:06,670 --> 00:52:17,190 +sample size is 500 so the sample for each strata + +564 +00:52:17,190 --> 00:52:31,190 +will be number 1 40% times 500 gives 200 the next + +565 +00:52:31,190 --> 00:52:32,950 +150 + +566 +00:52:36,200 --> 00:52:42,380 +And so on. Now, how can we choose the 200 from + +567 +00:52:42,380 --> 00:52:46,280 +level number one? Again, we have to choose the + +568 +00:52:46,280 --> 00:52:55,540 +random table. Now, 40% from this number, it means + +569 +00:52:55,540 --> 00:52:59,620 +5 + +570 +00:52:59,620 --> 00:53:06,400 +,000. This one has 5,000. 600 females students. + +571 +00:53:07,720 --> 00:53:13,480 +Because 40% of females in level 1. And I know that + +572 +00:53:13,480 --> 00:53:17,780 +the total number of females is 14,000. So number + +573 +00:53:17,780 --> 00:53:23,420 +of females in the first level is 5600. How many + +574 +00:53:23,420 --> 00:53:28,040 +digits we have? Four digits. The first one, 001, + +575 +00:53:28,160 --> 00:53:34,460 +all the way up to 560. If you go back, into a + +576 +00:53:34,460 --> 00:53:39,520 +random table, take five, four digits. So the first + +577 +00:53:39,520 --> 00:53:43,340 +number is 1922. + +578 +00:53:43,980 --> 00:53:48,000 +Next is 3950. + +579 +00:53:50,140 --> 00:53:54,760 +And so on. So that's the way how can we choose + +580 +00:53:54,760 --> 00:53:58,640 +stratified samples. + +581 +00:54:02,360 --> 00:54:08,240 +Next, the last one is called clusters. And let's + +582 +00:54:08,240 --> 00:54:11,400 +see now what's the difference between stratified + +583 +00:54:11,400 --> 00:54:16,500 +and cluster. Step one. + +584 +00:54:25,300 --> 00:54:31,720 +Population is divided into some clusters. + +585 +00:54:35,000 --> 00:54:41,160 +Step two, assemble one by assembling clusters + +586 +00:54:41,160 --> 00:54:42,740 +selective. + +587 +00:54:46,100 --> 00:54:48,640 +Here suppose how many clusters? + +588 +00:54:53,560 --> 00:54:58,080 +16 clusters. So there are, so the population has + +589 +00:55:19,310 --> 00:55:25,820 +Step two, you have to choose a simple random + +590 +00:55:25,820 --> 00:55:31,440 +number of clusters out of 16. Suppose I decided to + +591 +00:55:31,440 --> 00:55:38,300 +choose three among these. So we have 16 clusters. + +592 +00:55:45,340 --> 00:55:49,780 +For example, I chose cluster number 411. + +593 +00:55:51,640 --> 00:56:01,030 +So I choose these clusters. Next, all items in the + +594 +00:56:01,030 --> 00:56:02,910 +selected clusters can be used. + +595 +00:56:09,130 --> 00:56:15,770 +Or items + +596 +00:56:15,770 --> 00:56:18,910 +can be chosen from a cluster using another + +597 +00:56:18,910 --> 00:56:21,130 +probability sampling technique. For example, + +598 +00:56:23,190 --> 00:56:28,840 +imagine that We are talking about students who + +599 +00:56:28,840 --> 00:56:31,460 +registered for accounting. + +600 +00:56:45,880 --> 00:56:50,540 +Imagine that we have six sections for accounting. + +601 +00:56:55,850 --> 00:56:56,650 +six sections. + +602 +00:57:00,310 --> 00:57:05,210 +And I just choose two of these, cluster number one + +603 +00:57:05,210 --> 00:57:08,910 +or section number one and the last one. So my + +604 +00:57:08,910 --> 00:57:12,590 +chosen clusters are number one and six, one and + +605 +00:57:12,590 --> 00:57:19,090 +six. Or you can use the one we just talked about, + +606 +00:57:19,590 --> 00:57:23,340 +stratified random sample. instead of using all for + +607 +00:57:23,340 --> 00:57:29,140 +example suppose there are in this section there + +608 +00:57:29,140 --> 00:57:36,180 +are 73 models and the other one there are 80 + +609 +00:57:36,180 --> 00:57:42,300 +models and + +610 +00:57:42,300 --> 00:57:46,720 +the sample size here I am going to use case 20 + +611 +00:57:50,900 --> 00:57:56,520 +So you can use 10 here and 10 in the other one, or + +612 +00:57:56,520 --> 00:58:03,060 +it depends on the proportions. Now, 70 represents + +613 +00:58:03,060 --> 00:58:09,580 +70 out of 150, because there are 150 students in + +614 +00:58:09,580 --> 00:58:14,060 +these two clusters. Now, the entire population is + +615 +00:58:14,060 --> 00:58:17,300 +not the number for each of all of these clusters, + +616 +00:58:17,560 --> 00:58:22,310 +just number one sixth. So there are 150 students + +617 +00:58:22,310 --> 00:58:25,090 +in these two selected clusters. So the population + +618 +00:58:25,090 --> 00:58:30,030 +size is 150. Make sense? Then the proportion here + +619 +00:58:30,030 --> 00:58:33,210 +is 700 divided by 150 times 20. + +620 +00:58:35,970 --> 00:58:41,610 +The other one, 80 divided by 150 times 20. + +621 +00:58:51,680 --> 00:58:55,960 +So again, all items in the selected clusters can + +622 +00:58:55,960 --> 00:58:59,400 +be used or items can be chosen from the cluster + +623 +00:58:59,400 --> 00:59:01,500 +using another probability technique as we + +624 +00:59:01,500 --> 00:59:06,640 +mentioned. Let's see how can we use another + +625 +00:59:06,640 --> 00:59:10,860 +example. Let's talk about again AUG students. + +626 +00:59:28,400 --> 00:59:31,800 +I choose suppose level number 2 and level number + +627 +00:59:31,800 --> 00:59:37,680 +4, two levels, 2 and 4. Then you can take either + +628 +00:59:37,680 --> 00:59:43,380 +all the students here or just assemble size + +629 +00:59:43,380 --> 00:59:46,460 +proportion to the + +630 +00:59:50,310 --> 00:59:54,130 +For example, this one represents 20%, and my + +631 +00:59:54,130 --> 00:59:56,730 +sample size is 1000, so in this case you have to + +632 +00:59:56,730 --> 01:00:00,310 +take 200 and 800 from that one. + +633 +01:00:03,050 --> 01:00:04,050 +Any questions? + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/QKT3u32x4wE_postprocess.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/QKT3u32x4wE_postprocess.srt new file mode 100644 index 0000000000000000000000000000000000000000..443df1b5abd5b791382eb1670ccd9acbf768050e --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/QKT3u32x4wE_postprocess.srt @@ -0,0 +1,2420 @@ +1 +00:00:08,370 --> 00:00:13,950 +Today, inshallah, we'll start chapter six. Chapter + +2 +00:00:13,950 --> 00:00:20,350 +six talks about the normal distribution. In this + +3 +00:00:20,350 --> 00:00:24,810 +chapter, there are mainly two objectives. The + +4 +00:00:24,810 --> 00:00:30,470 +first objective is to compute probabilities from + +5 +00:00:30,470 --> 00:00:34,530 +normal distribution. And mainly we'll focus on + +6 +00:00:34,530 --> 00:00:37,270 +objective number one. So we are going to use + +7 +00:00:37,270 --> 00:00:40,290 +normal distribution in this chapter. And we'll + +8 +00:00:40,290 --> 00:00:43,830 +know how can we compute probabilities if the data + +9 +00:00:43,830 --> 00:00:46,810 +set is normally distributed. You know many times + +10 +00:00:46,810 --> 00:00:50,690 +you talked about extreme points or outliers. So + +11 +00:00:50,690 --> 00:00:54,490 +that means if the data has outliers, that is the + +12 +00:00:54,490 --> 00:00:57,290 +distribution is not normally distributed. Now in + +13 +00:00:57,290 --> 00:01:01,090 +this case, If the distribution is normal, how can + +14 +00:01:01,090 --> 00:01:04,350 +we compute probabilities underneath the normal + +15 +00:01:04,350 --> 00:01:10,030 +curve? The second objective is to use the normal + +16 +00:01:10,030 --> 00:01:13,210 +probability plot to determine whether a set of + +17 +00:01:13,210 --> 00:01:18,150 +data is approximately normally distributed. I mean + +18 +00:01:18,150 --> 00:01:25,550 +beside box plots we discussed before. Beside this + +19 +00:01:25,550 --> 00:01:30,190 +score, how can we tell if the data point or + +20 +00:01:30,190 --> 00:01:35,350 +actually the entire distribution is approximately + +21 +00:01:35,350 --> 00:01:39,410 +normally distributed or not. Before we learn if + +22 +00:01:39,410 --> 00:01:44,110 +the point is outlier by using backsplot and this + +23 +00:01:44,110 --> 00:01:46,750 +score. In this chapter we'll know how can we + +24 +00:01:46,750 --> 00:01:51,630 +determine if the entire distribution is + +25 +00:01:51,630 --> 00:01:54,770 +approximately normal distributed. So there are two + +26 +00:01:54,770 --> 00:01:56,710 +objectives. One is to compute probabilities + +27 +00:01:56,710 --> 00:01:59,370 +underneath the normal curve. The other, how can we + +28 +00:01:59,370 --> 00:02:05,310 +tell if the data set is out or not? If you + +29 +00:02:05,310 --> 00:02:09,330 +remember, first class, we mentioned something + +30 +00:02:09,330 --> 00:02:13,130 +about data types. And we said data has mainly two + +31 +00:02:13,130 --> 00:02:17,930 +types. Numerical data, I mean quantitative data. + +32 +00:02:18,690 --> 00:02:22,630 +and categorical data, qualitative. For numerical + +33 +00:02:22,630 --> 00:02:26,190 +data also it has two types, continuous and + +34 +00:02:26,190 --> 00:02:30,430 +discrete. And discrete takes only integers such as + +35 +00:02:30,430 --> 00:02:35,310 +number of students who take this class or number + +36 +00:02:35,310 --> 00:02:40,190 +of accidents and so on. But if you are talking + +37 +00:02:40,190 --> 00:02:45,320 +about Age, weight, scores, temperature, and so on. + +38 +00:02:45,560 --> 00:02:49,260 +It's continuous distribution. For this type of + +39 +00:02:49,260 --> 00:02:53,320 +variable, I mean for continuous distribution, how + +40 +00:02:53,320 --> 00:02:56,300 +can we compute the probabilities underneath the + +41 +00:02:56,300 --> 00:02:59,640 +normal? So normal distribution maybe is the most + +42 +00:02:59,640 --> 00:03:02,380 +common distribution in statistics, and it's type + +43 +00:03:02,380 --> 00:03:07,820 +of continuous distribution. So first, let's define + +44 +00:03:07,820 --> 00:03:12,010 +continuous random variable. maybe because for + +45 +00:03:12,010 --> 00:03:15,230 +multiple choice problem you should know the + +46 +00:03:15,230 --> 00:03:19,110 +definition of continuous random variable is a + +47 +00:03:19,110 --> 00:03:22,070 +variable that can assume any value on a continuous + +48 +00:03:23,380 --> 00:03:27,020 +it can assume any uncountable number of values. So + +49 +00:03:27,020 --> 00:03:31,080 +it could be any number in an interval. For + +50 +00:03:31,080 --> 00:03:35,720 +example, suppose your ages range between 18 years + +51 +00:03:35,720 --> 00:03:39,580 +and 20 years. So maybe someone of you, their age + +52 +00:03:39,580 --> 00:03:44,000 +is about 18 years, three months. Or maybe your + +53 +00:03:44,000 --> 00:03:47,580 +weight is 70 kilogram point five, and so on. So + +54 +00:03:47,580 --> 00:03:49,780 +it's continuous on the variable. Other examples + +55 +00:03:49,780 --> 00:03:53,140 +for continuous, thickness of an item. For example, + +56 +00:03:53,740 --> 00:03:54,440 +the thickness. + +57 +00:03:58,260 --> 00:04:02,490 +This one is called thickness. Now, the thickness + +58 +00:04:02,490 --> 00:04:05,930 +may be 2 centimeters or 3 centimeters and so on, + +59 +00:04:06,210 --> 00:04:09,730 +but it might be 2.5 centimeters. For example, for + +60 +00:04:09,730 --> 00:04:13,030 +this remote, the thickness is 2.5 centimeters or 2 + +61 +00:04:13,030 --> 00:04:16,510 +.6, not exactly 2 or 3. So it could be any value. + +62 +00:04:16,650 --> 00:04:19,450 +Range is, for example, between 2 centimeters and 3 + +63 +00:04:19,450 --> 00:04:23,010 +centimeters. So from 2 to 3 is a big range because + +64 +00:04:23,010 --> 00:04:25,670 +it can take anywhere from 2.1 to 2.15 and so on. + +65 +00:04:26,130 --> 00:04:28,810 +So thickness is an example of continuous random + +66 +00:04:28,810 --> 00:04:31,190 +variable. Another example, time required to + +67 +00:04:31,190 --> 00:04:36,010 +complete a task. Now suppose you want to do an + +68 +00:04:36,010 --> 00:04:39,710 +exercise. Now the time required to finish or to + +69 +00:04:39,710 --> 00:04:45,150 +complete this task may be any value between 2 + +70 +00:04:45,150 --> 00:04:48,730 +minutes up to 3 minutes. So maybe 2 minutes 30 + +71 +00:04:48,730 --> 00:04:52,150 +seconds, 2 minutes 40 seconds and so on. So it's + +72 +00:04:52,150 --> 00:04:55,550 +continuous random variable. Temperature of a + +73 +00:04:55,550 --> 00:05:00,140 +solution. height, weight, ages, and so on. These + +74 +00:05:00,140 --> 00:05:03,720 +are examples of continuous random variable. So + +75 +00:05:03,720 --> 00:05:08,040 +these variables can potentially take on any value + +76 +00:05:08,040 --> 00:05:11,340 +depending only on the ability to precisely and + +77 +00:05:11,340 --> 00:05:14,020 +accurately measure. So that's the definition of + +78 +00:05:14,020 --> 00:05:17,320 +continuous random variable. Now, if you look at + +79 +00:05:17,320 --> 00:05:21,810 +the normal distribution, It looks like bell + +80 +00:05:21,810 --> 00:05:25,990 +-shaped, as we discussed before. So it's bell + +81 +00:05:25,990 --> 00:05:31,270 +-shaped, symmetrical. Symmetrical means the area + +82 +00:05:31,270 --> 00:05:34,390 +to the right of the mean equals the area to the + +83 +00:05:34,390 --> 00:05:37,950 +left of the mean. I mean 50% of the area above and + +84 +00:05:37,950 --> 00:05:41,770 +50% below. So that's the meaning of symmetrical. + +85 +00:05:42,490 --> 00:05:46,370 +The other feature of normal distribution, the + +86 +00:05:46,370 --> 00:05:49,510 +measures of center tendency are equal or + +87 +00:05:49,510 --> 00:05:53,170 +approximately equal. Mean, median, and mode are + +88 +00:05:53,170 --> 00:05:55,530 +roughly equal. In reality, they are not equal, + +89 +00:05:55,650 --> 00:05:58,210 +exactly equal, but you can say they are + +90 +00:05:58,210 --> 00:06:01,850 +approximately equal. Now, there are two parameters + +91 +00:06:01,850 --> 00:06:05,750 +describing the normal distribution. One is called + +92 +00:06:05,750 --> 00:06:10,820 +the location parameter. location, or central + +93 +00:06:10,820 --> 00:06:13,800 +tendency, as we discussed before, location is + +94 +00:06:13,800 --> 00:06:17,160 +determined by the mean mu. So the first parameter + +95 +00:06:17,160 --> 00:06:20,340 +for the normal distribution is the mean mu. The + +96 +00:06:20,340 --> 00:06:24,240 +other parameter measures the spread of the data, + +97 +00:06:24,280 --> 00:06:27,680 +or the variability of the data, and the spread is + +98 +00:06:27,680 --> 00:06:31,860 +sigma, or the variation. So we have two + +99 +00:06:31,860 --> 00:06:36,770 +parameters, mu and sigma. The random variable in + +100 +00:06:36,770 --> 00:06:39,930 +this case can take any value from minus infinity + +101 +00:06:39,930 --> 00:06:44,270 +up to infinity. So random variable in this case + +102 +00:06:44,270 --> 00:06:50,310 +continuous ranges from minus infinity all the way + +103 +00:06:50,310 --> 00:06:55,100 +up to infinity. I mean from this point here up to + +104 +00:06:55,100 --> 00:06:58,380 +infinity. So the values range from minus infinity + +105 +00:06:58,380 --> 00:07:02,080 +up to infinity. And if you look here, the mean is + +106 +00:07:02,080 --> 00:07:05,600 +located nearly in the middle. And mean and median + +107 +00:07:05,600 --> 00:07:10,820 +are all approximately equal. That's the features + +108 +00:07:10,820 --> 00:07:14,740 +or the characteristics of the normal distribution. + +109 +00:07:16,460 --> 00:07:20,360 +Now, how can we compute the probabilities under + +110 +00:07:20,360 --> 00:07:25,840 +the normal killer? The formula that is used to + +111 +00:07:25,840 --> 00:07:29,220 +compute the probabilities is given by this one. It + +112 +00:07:29,220 --> 00:07:33,560 +looks complicated formula because we have to use + +113 +00:07:33,560 --> 00:07:36,040 +calculus in order to determine the area underneath + +114 +00:07:36,040 --> 00:07:40,120 +the cube. So we are looking for something else. So + +115 +00:07:40,120 --> 00:07:45,300 +this formula is it seems to be complicated. It's + +116 +00:07:45,300 --> 00:07:49,600 +not hard but it's complicated one, but we can use + +117 +00:07:49,600 --> 00:07:52,380 +it. If we know calculus very well, we can use + +118 +00:07:52,380 --> 00:07:55,240 +integration to create the probabilities underneath + +119 +00:07:55,240 --> 00:07:58,900 +the curve. But for our course, we are going to + +120 +00:07:58,900 --> 00:08:04,460 +skip this formula because this + +121 +00:08:04,460 --> 00:08:09,340 +formula depends actually on mu and sigma. A mu can + +122 +00:08:09,340 --> 00:08:13,110 +take any value. Sigma also can take any value. + +123 +00:08:13,930 --> 00:08:17,310 +That means we have different normal distributions. + +124 +00:08:18,470 --> 00:08:23,830 +Because the distribution actually depends on these + +125 +00:08:23,830 --> 00:08:27,610 +two parameters. So by varying the parameters mu + +126 +00:08:27,610 --> 00:08:29,790 +and sigma, we obtain different normal + +127 +00:08:29,790 --> 00:08:32,710 +distributions. Since we have different mu and + +128 +00:08:32,710 --> 00:08:36,310 +sigma, it means we should have different normal + +129 +00:08:36,310 --> 00:08:38,770 +distributions. For this reason, it's very + +130 +00:08:38,770 --> 00:08:43,430 +complicated to have tables or probability tables + +131 +00:08:43,430 --> 00:08:46,010 +in order to determine these probabilities because + +132 +00:08:46,010 --> 00:08:50,130 +there are infinite values of mu and sigma maybe + +133 +00:08:50,130 --> 00:08:57,750 +your edges the mean is 19. Sigma is, for example, + +134 +00:08:57,910 --> 00:09:01,990 +5. For weights, maybe the mean is 70 kilograms, + +135 +00:09:02,250 --> 00:09:04,990 +the average is 10. For scores, maybe the average + +136 +00:09:04,990 --> 00:09:08,710 +is 65, the mean is 20, sigma is 20, and so on. So + +137 +00:09:08,710 --> 00:09:11,090 +we have different values of mu and sigma. For this + +138 +00:09:11,090 --> 00:09:13,650 +reason, we have different normal distributions. + +139 +00:09:18,490 --> 00:09:25,740 +Because changing mu shifts the distribution either + +140 +00:09:25,740 --> 00:09:29,640 +left or to the right. So maybe the mean is shifted + +141 +00:09:29,640 --> 00:09:32,440 +to the right side, or the mean maybe shifted to + +142 +00:09:32,440 --> 00:09:37,140 +the left side. Also, changing sigma, sigma is the + +143 +00:09:37,140 --> 00:09:40,660 +distance between the mu and the curve. The curve + +144 +00:09:40,660 --> 00:09:45,220 +is the points, or the data values. Now this sigma + +145 +00:09:45,220 --> 00:09:48,380 +can be increases or decreases. So if sigma + +146 +00:09:48,380 --> 00:09:52,860 +increases, it means the spread also increases. Or + +147 +00:09:52,860 --> 00:09:55,780 +if sigma decreases, also the spread will decrease. + +148 +00:09:56,200 --> 00:09:59,660 +So the distribution or the normal distribution + +149 +00:09:59,660 --> 00:10:02,820 +depends actually on these two values. For this + +150 +00:10:02,820 --> 00:10:05,120 +reason, since we have too many values or infinite + +151 +00:10:05,120 --> 00:10:07,600 +values of mu and sigma, then in this case we have + +152 +00:10:07,600 --> 00:10:14,500 +different normal distributions. There is another + +153 +00:10:14,500 --> 00:10:16,940 +distribution. It's called standardized normal. + +154 +00:10:20,330 --> 00:10:26,070 +Now, we have normal distribution X, and how can we + +155 +00:10:26,070 --> 00:10:31,930 +transform from normal distribution to standardized + +156 +00:10:31,930 --> 00:10:35,310 +normal distribution? The reason is that the mean + +157 +00:10:35,310 --> 00:10:40,310 +of Z, I mean, Z is used for standardized normal. + +158 +00:10:40,850 --> 00:10:44,490 +The mean of Z is always zero, and sigma is one. + +159 +00:10:45,770 --> 00:10:48,150 +Now it's a big difference. The first one has + +160 +00:10:48,150 --> 00:10:53,160 +infinite values of Mu and Sigma. Now, for the + +161 +00:10:53,160 --> 00:10:56,200 +standardized normal distribution, the mean is + +162 +00:10:56,200 --> 00:11:01,540 +fixed value. The mean is zero, Sigma is one. So, + +163 +00:11:01,620 --> 00:11:04,340 +the question is, how can we actually transform + +164 +00:11:04,340 --> 00:11:09,720 +from X, which has normal distribution, to Z, which + +165 +00:11:09,720 --> 00:11:13,160 +has standardized normal with mean zero and Sigma + +166 +00:11:13,160 --> 00:11:23,330 +of one. Let's see. How can we translate x which + +167 +00:11:23,330 --> 00:11:27,510 +has normal distribution to z that has standardized + +168 +00:11:27,510 --> 00:11:32,190 +normal distribution? The idea is you have just to + +169 +00:11:32,190 --> 00:11:39,170 +subtract mu of x, x minus mu, then divide this + +170 +00:11:39,170 --> 00:11:43,150 +result by sigma. So we just subtract the mean of + +171 +00:11:43,150 --> 00:11:49,660 +x. and dividing by its standard deviation now so + +172 +00:11:49,660 --> 00:11:52,360 +if we have x which has normal distribution with + +173 +00:11:52,360 --> 00:11:55,940 +mean mu and standard deviation sigma to transform + +174 +00:11:55,940 --> 00:12:00,960 +or to convert to z score use this formula x minus + +175 +00:12:00,960 --> 00:12:05,220 +the mean then divide by its standard deviation now + +176 +00:12:05,220 --> 00:12:09,090 +all of the time we are going to use z for + +177 +00:12:09,090 --> 00:12:12,230 +standardized normal distribution and always z has + +178 +00:12:12,230 --> 00:12:15,370 +mean zero and all and sigma or standard deviation. + +179 +00:12:16,250 --> 00:12:20,170 +So the z distribution always has mean of zero and + +180 +00:12:20,170 --> 00:12:25,490 +sigma of one. So that's the story of standardizing + +181 +00:12:25,490 --> 00:12:33,070 +the normal value. Now the Formula for this score + +182 +00:12:33,070 --> 00:12:37,570 +becomes better than the first one, but still we + +183 +00:12:37,570 --> 00:12:40,570 +have to use calculus in order to determine the + +184 +00:12:40,570 --> 00:12:45,710 +probabilities under the standardized normal k. But + +185 +00:12:45,710 --> 00:12:49,470 +this distribution has mean of zero and sigma of + +186 +00:12:49,470 --> 00:12:56,910 +one. So we have a table on page 570. Look at page + +187 +00:12:56,910 --> 00:13:00,910 +570. We have table or actually there are two + +188 +00:13:00,910 --> 00:13:05,010 +tables. One for negative value of Z and the other + +189 +00:13:05,010 --> 00:13:08,830 +for positive value of Z. So we have two tables for + +190 +00:13:08,830 --> 00:13:14,730 +positive and negative values of Z on page 570 and + +191 +00:13:14,730 --> 00:13:15,470 +571. + +192 +00:13:17,870 --> 00:13:22,770 +Now the table on page 570 looks like this one. The + +193 +00:13:22,770 --> 00:13:26,610 +table you have starts from minus 6, then minus 5, + +194 +00:13:26,750 --> 00:13:32,510 +minus 4.5, and so on. Here we start from minus 3.4 + +195 +00:13:32,510 --> 00:13:38,850 +all the way down up to 0. Look here, all the way + +196 +00:13:38,850 --> 00:13:44,490 +up to 0. So these scores here. Also we have 0.00, + +197 +00:13:44,610 --> 00:13:51,880 +0.01, up to 0.09. Also, the other page, page 571, + +198 +00:13:52,140 --> 00:13:56,940 +gives the area for positive z values. Here we have + +199 +00:13:56,940 --> 00:14:01,760 +0.0, 0.1, 0.2, all the way down up to 3.4 and you + +200 +00:14:01,760 --> 00:14:05,920 +have up to 6. Now let's see how can we use this + +201 +00:14:05,920 --> 00:14:11,020 +table to compute the probabilities underneath the + +202 +00:14:11,020 --> 00:14:12,460 +normal curve. + +203 +00:14:14,940 --> 00:14:19,190 +First of all, you have to know that Z has mean + +204 +00:14:19,190 --> 00:14:23,750 +zero, standard deviation of one. And the values + +205 +00:14:23,750 --> 00:14:26,610 +could be positive or negative. Values above the + +206 +00:14:26,610 --> 00:14:32,850 +mean, zero, have positive Z values. The other one, + +207 +00:14:32,910 --> 00:14:36,690 +values below the mean, have negative Z values. So + +208 +00:14:36,690 --> 00:14:42,770 +Z score can be negative or positive. Now this is + +209 +00:14:42,770 --> 00:14:46,530 +the formula we have, z equals x minus mu divided + +210 +00:14:46,530 --> 00:14:46,990 +by six. + +211 +00:14:52,810 --> 00:15:01,170 +Now this value could be positive if x is above the + +212 +00:15:01,170 --> 00:15:04,810 +mean, as we mentioned before. It could be a + +213 +00:15:04,810 --> 00:15:09,870 +negative if x is smaller than the mean or zero. + +214 +00:15:13,120 --> 00:15:18,140 +Now the table we have gives the area to the right, + +215 +00:15:18,420 --> 00:15:21,240 +to the left, I'm sorry, to the left, for positive + +216 +00:15:21,240 --> 00:15:26,220 +and negative values of z. Okay, so we have two + +217 +00:15:26,220 --> 00:15:32,160 +tables actually, one for negative on page 570, and + +218 +00:15:32,160 --> 00:15:38,260 +the other one for positive values of z. I think we + +219 +00:15:38,260 --> 00:15:41,060 +discussed that before when we talked about these + +220 +00:15:41,060 --> 00:15:44,080 +scores. We have the same formula. + +221 +00:15:47,120 --> 00:15:53,700 +Now let's look at this, the next slide. Suppose x + +222 +00:15:53,700 --> 00:16:01,880 +is distributed normally with mean of 100. So the + +223 +00:16:01,880 --> 00:16:06,470 +mean of x is 100. and the standard deviation of + +224 +00:16:06,470 --> 00:16:11,110 +50. So sigma is 50. Now let's see how can we + +225 +00:16:11,110 --> 00:16:17,750 +compute the z-score for x equals 200. Again the + +226 +00:16:17,750 --> 00:16:22,790 +formula is just x minus mu divided by sigma x 200 + +227 +00:16:22,790 --> 00:16:28,330 +minus 100 divided by 50 that will give 2. Now the + +228 +00:16:28,330 --> 00:16:33,910 +sign of this value is positive That means x is + +229 +00:16:33,910 --> 00:16:37,950 +greater than the mean, because x is 200. Now, + +230 +00:16:37,990 --> 00:16:42,270 +what's the meaning of 2? What does this value tell + +231 +00:16:42,270 --> 00:16:42,410 +you? + +232 +00:16:48,230 --> 00:16:55,430 +Yeah, exactly. x equals 200 is two standard + +233 +00:16:55,430 --> 00:16:58,690 +deviations above the mean. Because if you look at + +234 +00:16:58,690 --> 00:17:05,210 +200, the x value, The mean is 100, sigma is 50. + +235 +00:17:05,730 --> 00:17:09,690 +Now the difference between the score, which is + +236 +00:17:09,690 --> 00:17:16,810 +200, and the mu, which is 100, is equal to + +237 +00:17:16,810 --> 00:17:18,690 +standard deviations, because the difference is + +238 +00:17:18,690 --> 00:17:24,230 +100. 2 times 50 is 100. So this says that x equals + +239 +00:17:24,230 --> 00:17:29,070 +200 is 2 standard deviations above the mean. If z + +240 +00:17:29,070 --> 00:17:34,330 +is negative, you can say that x is two standard + +241 +00:17:34,330 --> 00:17:38,710 +deviations below them. Make sense? So that's how + +242 +00:17:38,710 --> 00:17:42,670 +can we compute the z square. Now, when we + +243 +00:17:42,670 --> 00:17:45,970 +transform from normal distribution to + +244 +00:17:45,970 --> 00:17:49,490 +standardized, still we will have the same shape. I + +245 +00:17:49,490 --> 00:17:51,350 +mean the distribution is still normally + +246 +00:17:51,350 --> 00:17:55,800 +distributed. So note, the shape of the + +247 +00:17:55,800 --> 00:17:58,840 +distribution is the same, only the scale has + +248 +00:17:58,840 --> 00:18:04,500 +changed. So we can express the problem in original + +249 +00:18:04,500 --> 00:18:10,640 +units, X, or in a standardized unit, Z. So when we + +250 +00:18:10,640 --> 00:18:16,620 +have X, just use this equation to transform to + +251 +00:18:16,620 --> 00:18:17,160 +this form. + +252 +00:18:21,360 --> 00:18:23,200 +Now, for example, suppose we have normal + +253 +00:18:23,200 --> 00:18:26,040 +distribution and we are interested in the area + +254 +00:18:26,040 --> 00:18:32,660 +between A and B. Now, the area between A and B, it + +255 +00:18:32,660 --> 00:18:34,700 +means the probability between them. So + +256 +00:18:34,700 --> 00:18:39,140 +statistically speaking, area means probability. So + +257 +00:18:39,140 --> 00:18:42,700 +probability between A and B, I mean probability of + +258 +00:18:42,700 --> 00:18:45,380 +X greater than or equal A and less than or equal B + +259 +00:18:45,380 --> 00:18:49,420 +is the same as X greater than A or less than B. + +260 +00:18:50,450 --> 00:18:57,210 +that means the probability of X equals A this + +261 +00:18:57,210 --> 00:19:02,510 +probability is zero or probability of X equals B + +262 +00:19:02,510 --> 00:19:06,930 +is also zero so in continuous distribution the + +263 +00:19:06,930 --> 00:19:10,630 +equal sign does not matter I mean if we have equal + +264 +00:19:10,630 --> 00:19:15,130 +sign or we don't have these probabilities are the + +265 +00:19:15,130 --> 00:19:19,390 +same so I mean for example if we are interested + +266 +00:19:20,310 --> 00:19:23,450 +for probability of X smaller than or equal to E. + +267 +00:19:24,850 --> 00:19:30,370 +This probability is the same as X smaller than E. + +268 +00:19:31,330 --> 00:19:33,730 +Or on the other hand, if you are interested in the + +269 +00:19:33,730 --> 00:19:39,010 +area above B greater than or equal to B, it's the + +270 +00:19:39,010 --> 00:19:44,770 +same as X smaller than E. So don't worry about the + +271 +00:19:44,770 --> 00:19:48,660 +equal sign. Or continuous distribution, exactly. + +272 +00:19:49,120 --> 00:19:53,820 +But for discrete, it does matter. Now, since we + +273 +00:19:53,820 --> 00:19:58,200 +are talking about normal distribution, and as we + +274 +00:19:58,200 --> 00:20:01,320 +mentioned, normal distribution is symmetric around + +275 +00:20:01,320 --> 00:20:05,900 +the mean, that means the area to the right equals + +276 +00:20:05,900 --> 00:20:09,340 +the area to the left. Now the entire area + +277 +00:20:09,340 --> 00:20:12,940 +underneath the normal curve equals one. I mean + +278 +00:20:12,940 --> 00:20:16,500 +probability of X ranges from minus infinity up to + +279 +00:20:16,500 --> 00:20:21,500 +infinity equals one. So probability of X greater + +280 +00:20:21,500 --> 00:20:26,920 +than minus infinity up to infinity is one. The + +281 +00:20:26,920 --> 00:20:31,480 +total area is one. So the area from minus infinity + +282 +00:20:31,480 --> 00:20:38,080 +up to the mean mu is one-half. The same as the + +283 +00:20:38,080 --> 00:20:42,600 +area from mu up to infinity is also one-half. That + +284 +00:20:42,600 --> 00:20:44,760 +means the probability of X greater than minus + +285 +00:20:44,760 --> 00:20:48,300 +infinity up to mu equals the probability from mu + +286 +00:20:48,300 --> 00:20:52,120 +up to infinity because of symmetry. I mean you + +287 +00:20:52,120 --> 00:20:56,160 +cannot say that for any distribution. Just for + +288 +00:20:56,160 --> 00:20:59,000 +symmetric distribution, the area below the mean + +289 +00:20:59,000 --> 00:21:03,780 +equals one-half, which is the same as the area to + +290 +00:21:03,780 --> 00:21:07,110 +the right of the mean. So the entire Probability + +291 +00:21:07,110 --> 00:21:11,330 +is one. And also you have to keep in mind that the + +292 +00:21:11,330 --> 00:21:17,570 +probability always ranges between zero and one. So + +293 +00:21:17,570 --> 00:21:20,030 +that means the probability couldn't be negative. + +294 +00:21:22,870 --> 00:21:27,730 +It should be positive. It shouldn't be greater + +295 +00:21:27,730 --> 00:21:31,710 +than one. So it's between zero and one. So always + +296 +00:21:31,710 --> 00:21:39,020 +the probability lies between zero and one. The + +297 +00:21:39,020 --> 00:21:44,500 +tables we have on page 570 and 571 give the area + +298 +00:21:44,500 --> 00:21:46,040 +to the left side. + +299 +00:21:49,420 --> 00:21:54,660 +For negative or positive z's. Now for example, + +300 +00:21:54,940 --> 00:22:03,060 +suppose we are looking for probability of z less + +301 +00:22:03,060 --> 00:22:08,750 +than 2. How can we find this probability by using + +302 +00:22:08,750 --> 00:22:12,210 +the normal curve? Let's go back to this normal + +303 +00:22:12,210 --> 00:22:16,410 +distribution. In the second page, we have positive + +304 +00:22:16,410 --> 00:22:17,070 +z-scores. + +305 +00:22:23,850 --> 00:22:33,390 +So we ask about the probability of z less than. So + +306 +00:22:33,390 --> 00:22:40,690 +the second page, gives positive values of z. And + +307 +00:22:40,690 --> 00:22:44,590 +the table gives the area below. And he asked about + +308 +00:22:44,590 --> 00:22:49,550 +here, B of z is smaller than 2. Now 2, if you + +309 +00:22:49,550 --> 00:22:54,910 +hear, up all the way down here, 2, 0, 0. So the + +310 +00:22:54,910 --> 00:23:00,530 +answer is 9772. So this value, so the probability + +311 +00:23:00,530 --> 00:23:02,130 +is 9772. + +312 +00:23:03,990 --> 00:23:05,390 +Because it's 2. + +313 +00:23:09,510 --> 00:23:14,650 +It's 2, 0, 0. But if you ask about what's the + +314 +00:23:14,650 --> 00:23:20,590 +probability of Z less than 2.05? So this is 2. + +315 +00:23:23,810 --> 00:23:30,370 +Now under 5, 9, 7, 9, 8. So the answer is 9, 7. + +316 +00:23:34,360 --> 00:23:38,900 +Because this is two, and we need five decimal + +317 +00:23:38,900 --> 00:23:44,820 +places. So all the way up to 9798. So this value + +318 +00:23:44,820 --> 00:23:54,380 +is 2.05. Now it's about, it's more than 1.5, + +319 +00:23:55,600 --> 00:23:56,880 +exactly 1.5. + +320 +00:24:02,140 --> 00:24:04,880 +1.5. This is 1.5. + +321 +00:24:08,800 --> 00:24:09,720 +9332. + +322 +00:24:12,440 --> 00:24:16,300 +1.5. Exactly 1.5. So 9332. + +323 +00:24:18,780 --> 00:24:27,990 +What's about probability less than 1.35? 1.3 all + +324 +00:24:27,990 --> 00:24:35,250 +the way to 9.115. 9.115. 9.115. 9.115. 9.115. 9 + +325 +00:24:35,250 --> 00:24:35,650 +.115. 9.115. + +326 +00:24:41,170 --> 00:24:42,430 +9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9 + +327 +00:24:42,430 --> 00:24:42,450 +.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9 + +328 +00:24:42,450 --> 00:24:44,050 +.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9 + +329 +00:24:44,050 --> 00:24:50,530 +.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9 + +330 +00:24:50,530 --> 00:24:54,980 +.115. 9. But here we are looking for the area to + +331 +00:24:54,980 --> 00:25:01,280 +the right. One minus one. Now this area equals + +332 +00:25:01,280 --> 00:25:05,660 +one minus because + +333 +00:25:05,660 --> 00:25:11,420 +since suppose + +334 +00:25:11,420 --> 00:25:18,760 +this is the 1.35 and we are interested in the area + +335 +00:25:18,760 --> 00:25:24,030 +to the right or above 1.35. The table gives the + +336 +00:25:24,030 --> 00:25:28,230 +area below. So the area above equals the total + +337 +00:25:28,230 --> 00:25:31,970 +area underneath the curve is 1. So 1 minus this + +338 +00:25:31,970 --> 00:25:39,050 +value, so equals 0.0885, + +339 +00:25:39,350 --> 00:25:42,250 +and so on. So this is the way how can we compute + +340 +00:25:42,250 --> 00:25:47,850 +the probabilities underneath the normal curve. if + +341 +00:25:47,850 --> 00:25:51,090 +it's probability of z is smaller than then just + +342 +00:25:51,090 --> 00:25:55,910 +use the table directly otherwise if we are talking + +343 +00:25:55,910 --> 00:26:00,390 +about z greater than subtract from one to get the + +344 +00:26:00,390 --> 00:26:04,870 +result that's how can we compute the probability + +345 +00:26:04,870 --> 00:26:13,750 +of z less than or equal now + +346 +00:26:13,750 --> 00:26:18,890 +let's see if we have x and x that has normal + +347 +00:26:18,890 --> 00:26:22,070 +distribution with mean mu and standard deviation + +348 +00:26:22,070 --> 00:26:26,250 +of sigma and let's see how can we compute the + +349 +00:26:26,250 --> 00:26:33,790 +value of the probability mainly + +350 +00:26:33,790 --> 00:26:38,190 +there are three steps to find the probability of x + +351 +00:26:38,190 --> 00:26:42,490 +greater than a and less than b when x is + +352 +00:26:42,490 --> 00:26:47,000 +distributed normally first step Draw normal curve + +353 +00:26:47,000 --> 00:26:54,880 +for the problem in terms of x. So draw the normal + +354 +00:26:54,880 --> 00:26:58,140 +curve first. Second, translate x values to z + +355 +00:26:58,140 --> 00:27:03,040 +values by using the formula we have. z x minus mu + +356 +00:27:03,040 --> 00:27:06,440 +divided by sigma. Then use the standardized normal + +357 +00:27:06,440 --> 00:27:15,140 +table on page 570 and 571. For example, Let's see + +358 +00:27:15,140 --> 00:27:18,420 +how can we find normal probabilities. Let's assume + +359 +00:27:18,420 --> 00:27:23,760 +that X represents the time it takes to download an + +360 +00:27:23,760 --> 00:27:28,580 +image from the internet. So suppose X, time + +361 +00:27:28,580 --> 00:27:33,760 +required to download an image file from the + +362 +00:27:33,760 --> 00:27:38,460 +internet. And suppose we know that the time is + +363 +00:27:38,460 --> 00:27:42,060 +normally distributed for with mean of eight + +364 +00:27:42,060 --> 00:27:46,130 +minutes. And standard deviation of five minutes. + +365 +00:27:46,490 --> 00:27:47,510 +So we know the mean. + +366 +00:27:50,610 --> 00:27:59,670 +Eight. Eight. And sigma of five minutes. And they + +367 +00:27:59,670 --> 00:28:03,410 +ask about what's the probability of X smaller than + +368 +00:28:03,410 --> 00:28:07,990 +eight one six. So first thing we have to compute, + +369 +00:28:08,170 --> 00:28:12,190 +to draw the normal curve. The mean lies in the + +370 +00:28:12,190 --> 00:28:18,060 +center. which is 8. He asked about probability of + +371 +00:28:18,060 --> 00:28:22,580 +X smaller than 8.6. So we are interested in the + +372 +00:28:22,580 --> 00:28:27,920 +area below 8.6. So it matched the table we have. + +373 +00:28:29,980 --> 00:28:34,900 +Second step, we have to transform from normal + +374 +00:28:34,900 --> 00:28:37,280 +distribution to standardized normal distribution + +375 +00:28:37,280 --> 00:28:42,120 +by using this form, which is X minus mu divided by + +376 +00:28:42,120 --> 00:28:51,430 +sigma. So x is 8.6 minus the mean, 8, divided by + +377 +00:28:51,430 --> 00:28:57,130 +sigma, gives 0.12. So just straightforward + +378 +00:28:57,130 --> 00:29:02,890 +calculation, 8.6 is your value of x. The mean is + +379 +00:29:02,890 --> 00:29:12,810 +8, sigma is 5, so that gives 0.12. So now, the + +380 +00:29:12,810 --> 00:29:17,210 +problem becomes, instead of asking x smaller than + +381 +00:29:17,210 --> 00:29:25,110 +8.6, it's similar to z less than 0.12. Still, we + +382 +00:29:25,110 --> 00:29:26,310 +have the same normal curve. + +383 +00:29:29,450 --> 00:29:32,990 +8, the mean. Now, the mean of z is 0, as we + +384 +00:29:32,990 --> 00:29:39,230 +mentioned. Instead of x, 8.6, the corresponding z + +385 +00:29:39,230 --> 00:29:43,000 +value is 0.12. So instead of finding probability + +386 +00:29:43,000 --> 00:29:48,580 +of X smaller than 8.6, smaller than 1.12, so they + +387 +00:29:48,580 --> 00:29:53,760 +are equivalent. So we transform here from normal + +388 +00:29:53,760 --> 00:29:56,980 +distribution to standardized normal distribution + +389 +00:29:56,980 --> 00:29:59,980 +in order to compute the probability we are looking + +390 +00:29:59,980 --> 00:30:05,820 +for. Now, this is just a portion of the table we + +391 +00:30:05,820 --> 00:30:06,100 +have. + +392 +00:30:10,530 --> 00:30:18,530 +So for positive z values. Now 0.1 is 0.1. Because + +393 +00:30:18,530 --> 00:30:25,670 +here we are looking for z less than 0.1. So 0.1. + +394 +00:30:27,210 --> 00:30:32,950 +Also, we have two. So move up to two decimal + +395 +00:30:32,950 --> 00:30:38,190 +places, we get this value. So the answer is point. + +396 +00:30:42,120 --> 00:30:45,860 +I think it's straightforward to compute the + +397 +00:30:45,860 --> 00:30:49,460 +probability underneath the normal curve if X has + +398 +00:30:49,460 --> 00:30:53,160 +normal distribution. So B of X is smaller than 8.6 + +399 +00:30:53,160 --> 00:30:56,740 +is the same as B of Z less than 0.12, which is + +400 +00:30:56,740 --> 00:31:02,680 +around 55%. Makes sense because the area to the + +401 +00:31:02,680 --> 00:31:07,080 +left of 0 equals 1 half. But we are looking for + +402 +00:31:07,080 --> 00:31:12,440 +the area below 0.12. So greater than zero. So this + +403 +00:31:12,440 --> 00:31:16,600 +area actually is greater than 0.5. So it makes + +404 +00:31:16,600 --> 00:31:20,440 +sense that your result is greater than 0.5. + +405 +00:31:22,320 --> 00:31:22,960 +Questions? + +406 +00:31:25,480 --> 00:31:30,780 +Next, suppose we are interested of probability of + +407 +00:31:30,780 --> 00:31:35,380 +X greater than. So that's how can we find normal + +408 +00:31:35,380 --> 00:31:41,980 +upper tail probabilities. Again, the table we have + +409 +00:31:41,980 --> 00:31:46,580 +gives the area to the left. In order to compute + +410 +00:31:46,580 --> 00:31:50,880 +the area in the upper tail probabilities, I mean + +411 +00:31:50,880 --> 00:31:55,620 +this area, since the normal distribution is + +412 +00:31:55,620 --> 00:32:00,160 +symmetric and The total area underneath the curve + +413 +00:32:00,160 --> 00:32:04,680 +is 1. So the probability of X greater than 8.6 is + +414 +00:32:04,680 --> 00:32:11,640 +the same as 1 minus B of X less than 8.6. So first + +415 +00:32:11,640 --> 00:32:17,020 +step, just find the probability we just have and + +416 +00:32:17,020 --> 00:32:21,680 +subtract from 1. So B of X greater than 8.6, the + +417 +00:32:21,680 --> 00:32:25,930 +same as B of Z greater than 0.12. which is the + +418 +00:32:25,930 --> 00:32:30,370 +same as 1 minus B of Z less than 0.5. It's 1 minus + +419 +00:32:30,370 --> 00:32:36,230 +the result we got from previous one. So this value + +420 +00:32:36,230 --> 00:32:39,410 +1 minus this value gives 0.452. + +421 +00:32:41,610 --> 00:32:45,090 +So for the other tail probability, just subtract 1 + +422 +00:32:45,090 --> 00:32:47,690 +from the lower tail probabilities. + +423 +00:32:51,930 --> 00:32:55,750 +Now let's see how can we find Normal probability + +424 +00:32:55,750 --> 00:33:01,750 +between two values. I mean if X, for example, for + +425 +00:33:01,750 --> 00:33:06,610 +the same data we have, suppose X between 8 and 8 + +426 +00:33:06,610 --> 00:33:13,360 +.6. Now what's the area between these two? Here we + +427 +00:33:13,360 --> 00:33:17,220 +have two values of x, x is 8 and x is 8.6. + +428 +00:33:24,280 --> 00:33:33,780 +Exactly, so below 8.6 minus below 8 and below 8 is + +429 +00:33:33,780 --> 00:33:40,840 +1 half. So the probability of x between 8 + +430 +00:33:40,840 --> 00:33:47,340 +and And 8.2 and 8.6. You can find z-score for the + +431 +00:33:47,340 --> 00:33:52,480 +first value, which is zero. Also compute the z + +432 +00:33:52,480 --> 00:33:55,540 +-score for the other value, which as we computed + +433 +00:33:55,540 --> 00:34:01,580 +before, 0.12. Now this problem becomes z between + +434 +00:34:01,580 --> 00:34:04,540 +zero and 0.5. + +435 +00:34:07,480 --> 00:34:15,120 +So B of x. Greater than 8 and smaller than 8.6 is + +436 +00:34:15,120 --> 00:34:20,800 +the same as z between 0 and 0.12. Now this area + +437 +00:34:20,800 --> 00:34:25,320 +equals b of z smaller than 0.12 minus the area + +438 +00:34:25,320 --> 00:34:26,520 +below z which is 1.5. + +439 +00:34:31,100 --> 00:34:37,380 +So again, b of z between 0 and 1.5 equal b of z + +440 +00:34:37,380 --> 00:34:42,840 +small. larger than 0.12 minus b of z less than + +441 +00:34:42,840 --> 00:34:46,520 +zero. Now, b of z less than 0.12 gives this + +442 +00:34:46,520 --> 00:34:53,060 +result, 0.5478. The probability below zero is one + +443 +00:34:53,060 --> 00:34:56,160 +-half because we know that the area to the left is + +444 +00:34:56,160 --> 00:34:59,320 +zero, same as to the right is one-half. So the + +445 +00:34:59,320 --> 00:35:04,240 +answer is going to be 0.478. So that's how can we + +446 +00:35:04,240 --> 00:35:07,540 +compute the probabilities for lower 10 directly + +447 +00:35:07,540 --> 00:35:12,230 +from the table. upper tail is just one minus lower + +448 +00:35:12,230 --> 00:35:18,990 +tail and between two values just subtracts the + +449 +00:35:18,990 --> 00:35:21,970 +larger one minus smaller one because he was + +450 +00:35:21,970 --> 00:35:26,310 +subtracted bz less than point one minus bz less + +451 +00:35:26,310 --> 00:35:29,430 +than or equal to zero that will give the normal + +452 +00:35:29,430 --> 00:35:36,850 +probability another example suppose we are looking + +453 +00:35:36,850 --> 00:35:49,350 +for X between 7.4 and 8. Now, 7.4 lies below the + +454 +00:35:49,350 --> 00:35:55,270 +mean. So here, this value, we have to compute the + +455 +00:35:55,270 --> 00:36:00,130 +z-score for 7.4 and also the z-score for 8, which + +456 +00:36:00,130 --> 00:36:04,090 +is zero. And that will give, again, + +457 +00:36:07,050 --> 00:36:13,710 +7.4, if you just use this equation, minus + +458 +00:36:13,710 --> 00:36:17,690 +the mean, divided by sigma, negative 0.6 divided + +459 +00:36:17,690 --> 00:36:21,150 +by 5, which is negative 0.12. + +460 +00:36:22,730 --> 00:36:31,410 +So it gives B of z between minus 0.12 and 0. And + +461 +00:36:31,410 --> 00:36:35,700 +that again is B of z less than 0. minus P of Z + +462 +00:36:35,700 --> 00:36:40,140 +less than negative 0.12. Is it clear? Now here we + +463 +00:36:40,140 --> 00:36:42,260 +converted or we transformed from normal + +464 +00:36:42,260 --> 00:36:45,960 +distribution to standardized. So instead of X + +465 +00:36:45,960 --> 00:36:52,100 +between 7.4 and 8, we have now Z between minus 0 + +466 +00:36:52,100 --> 00:36:57,480 +.12 and 0. So this area actually is the red one, + +467 +00:36:57,620 --> 00:37:03,740 +the red area is one-half. Total area below z is + +468 +00:37:03,740 --> 00:37:10,700 +one-half, below zero, and minus z below minus 0 + +469 +00:37:10,700 --> 00:37:17,820 +.12. So B of z less than zero minus negative 0.12. + +470 +00:37:18,340 --> 00:37:21,940 +That will give the area between minus 0.12 and + +471 +00:37:21,940 --> 00:37:28,860 +zero. This is one-half. Now, B of z less than + +472 +00:37:28,860 --> 00:37:33,270 +negative 0.12. look you go back to the normal + +473 +00:37:33,270 --> 00:37:37,650 +curve to the normal table but for the negative + +474 +00:37:37,650 --> 00:37:42,310 +values of z negative point one two negative point + +475 +00:37:42,310 --> 00:37:53,290 +one two four five two two it's four five point + +476 +00:37:53,290 --> 00:37:56,630 +five minus point four five two two will give the + +477 +00:37:56,630 --> 00:37:58,370 +result we are looking for + +478 +00:38:01,570 --> 00:38:06,370 +So B of Z less than 0 is 0.5. B of Z less than + +479 +00:38:06,370 --> 00:38:12,650 +negative 0.12 equals minus 0.4522. That will give + +480 +00:38:12,650 --> 00:38:14,290 +0 forcibility. + +481 +00:38:16,790 --> 00:38:23,590 +Now, by symmetric, you can see that this + +482 +00:38:23,590 --> 00:38:28,470 +probability between + +483 +00:38:28,470 --> 00:38:38,300 +Z between minus 0.12 and 0 is the same as the + +484 +00:38:38,300 --> 00:38:43,340 +other side from 0.12 I mean this area the red one + +485 +00:38:43,340 --> 00:38:46,200 +is the same up to 8.6 + +486 +00:38:55,600 --> 00:38:58,840 +So the area between minus 0.12 up to 0 is the same + +487 +00:38:58,840 --> 00:39:04,920 +as from 0 up to 0.12. Because of symmetric, since + +488 +00:39:04,920 --> 00:39:09,680 +this area equals the same for the other part. So + +489 +00:39:09,680 --> 00:39:15,660 +from 0 up to 0.12 is the same as minus 0.12 up to + +490 +00:39:15,660 --> 00:39:19,100 +0. So equal, so the normal distribution is + +491 +00:39:19,100 --> 00:39:23,200 +symmetric. So this probability is the same as B of + +492 +00:39:23,200 --> 00:39:27,980 +Z between 0 and 0.12. Any question? + +493 +00:39:34,520 --> 00:39:36,620 +Again, the equal sign does not matter. + +494 +00:39:42,120 --> 00:39:45,000 +Because here we have the complement. The + +495 +00:39:45,000 --> 00:39:49,250 +complement. If this one, I mean, complement of z + +496 +00:39:49,250 --> 00:39:53,350 +less than, greater than 0.12, the complement is B + +497 +00:39:53,350 --> 00:39:56,350 +of z less than or equal to minus 0.12. So we + +498 +00:39:56,350 --> 00:40:00,070 +should have just permutation, the equality. But it + +499 +00:40:00,070 --> 00:40:04,830 +doesn't matter. If in the problem we don't have + +500 +00:40:04,830 --> 00:40:07,470 +equal sign in the complement, we should have equal + +501 +00:40:07,470 --> 00:40:11,430 +sign. But it doesn't matter actually if we have + +502 +00:40:11,430 --> 00:40:14,510 +equal sign or not. For example, if we are looking + +503 +00:40:14,510 --> 00:40:19,430 +for B of X greater than A. Now what's the + +504 +00:40:19,430 --> 00:40:25,950 +complement of that? 1 minus less + +505 +00:40:25,950 --> 00:40:32,450 +than or equal to A. But if X is greater than or + +506 +00:40:32,450 --> 00:40:37,870 +equal to A, the complement is without equal sign. + +507 +00:40:38,310 --> 00:40:40,970 +But in continuous distribution, the equal sign + +508 +00:40:40,970 --> 00:40:44,990 +does not matter. Any question? + +509 +00:40:52,190 --> 00:40:58,130 +comments. Let's move to the next topic which talks + +510 +00:40:58,130 --> 00:41:05,510 +about the empirical rule. If you remember before + +511 +00:41:05,510 --> 00:41:16,750 +we said there is an empirical rule for 68, 95, 95, + +512 +00:41:17,420 --> 00:41:23,060 +99.71. Now let's see the exact meaning of this + +513 +00:41:23,060 --> 00:41:23,320 +rule. + +514 +00:41:37,580 --> 00:41:40,460 +Now we have to apply the empirical rule not to + +515 +00:41:40,460 --> 00:41:43,020 +Chebyshev's inequality because the distribution is + +516 +00:41:43,020 --> 00:41:48,670 +normal. Chebyshev's is applied for skewed + +517 +00:41:48,670 --> 00:41:52,630 +distributions. For symmetric, we have to apply the + +518 +00:41:52,630 --> 00:41:55,630 +empirical rule. Here, we assume the distribution + +519 +00:41:55,630 --> 00:41:58,390 +is normal. And today, we are talking about normal + +520 +00:41:58,390 --> 00:42:01,330 +distribution. So we have to use the empirical + +521 +00:42:01,330 --> 00:42:02,410 +rules. + +522 +00:42:07,910 --> 00:42:13,530 +Now, the mean is the value in the middle. Suppose + +523 +00:42:13,530 --> 00:42:16,900 +we are far away. from the mean by one standard + +524 +00:42:16,900 --> 00:42:22,720 +deviation either below or above and we are + +525 +00:42:22,720 --> 00:42:27,040 +interested in the area between this value which is + +526 +00:42:27,040 --> 00:42:33,040 +mu minus sigma so we are looking for mu minus + +527 +00:42:33,040 --> 00:42:36,360 +sigma and mu plus sigma + +528 +00:42:53,270 --> 00:42:59,890 +Last time we said there's a rule 68% of the data + +529 +00:42:59,890 --> 00:43:06,790 +lies one standard deviation within the mean. Now + +530 +00:43:06,790 --> 00:43:10,550 +let's see how can we compute the exact area, area + +531 +00:43:10,550 --> 00:43:15,250 +not just say 68%. Now X has normal distribution + +532 +00:43:15,250 --> 00:43:18,390 +with mean mu and standard deviation sigma. So + +533 +00:43:18,390 --> 00:43:25,280 +let's compare it from normal distribution to + +534 +00:43:25,280 --> 00:43:29,700 +standardized. So this is the first value here. Now + +535 +00:43:29,700 --> 00:43:34,940 +the z-score, the general formula is x minus the + +536 +00:43:34,940 --> 00:43:40,120 +mean divided by sigma. Now the first quantity is + +537 +00:43:40,120 --> 00:43:45,660 +mu minus sigma. So instead of x here, so first z + +538 +00:43:45,660 --> 00:43:49,820 +is, now this x should be replaced by mu minus + +539 +00:43:49,820 --> 00:43:55,040 +sigma. So mu minus sigma. So that's my x value, + +540 +00:43:55,560 --> 00:44:00,240 +minus the mean of that, which is mu, divided by + +541 +00:44:00,240 --> 00:44:07,900 +sigma. Mu minus sigma minus mu mu cancels, so plus + +542 +00:44:07,900 --> 00:44:13,520 +one. And let's see how can we compute that area. I + +543 +00:44:13,520 --> 00:44:16,980 +mean between minus one and plus one. In this case, + +544 +00:44:17,040 --> 00:44:23,180 +we are interested or we are looking for the area + +545 +00:44:23,180 --> 00:44:28,300 +between minus one and plus one this area now the + +546 +00:44:28,300 --> 00:44:31,360 +dashed area i mean the area between minus one and + +547 +00:44:31,360 --> 00:44:39,460 +plus one equals the area below one this area minus + +548 +00:44:39,460 --> 00:44:44,980 +the area below minus one that will give the area + +549 +00:44:44,980 --> 00:44:48,200 +between minus one and plus one now go back to the + +550 +00:44:48,200 --> 00:44:52,500 +normal table you have and look at the value of one + +551 +00:44:52,500 --> 00:45:02,620 +z and one under zero what's your answer one point + +552 +00:45:02,620 --> 00:45:11,520 +one point now without using the table can you tell + +553 +00:45:11,520 --> 00:45:17,360 +the area below minus one one minus this one + +554 +00:45:17,360 --> 00:45:17,840 +because + +555 +00:45:23,430 --> 00:45:29,870 +Now the area below, this is 1. The area below 1 is + +556 +00:45:29,870 --> 00:45:31,310 +0.3413. + +557 +00:45:34,430 --> 00:45:37,590 +Okay, now the area below minus 1. + +558 +00:45:40,770 --> 00:45:42,050 +This is minus 1. + +559 +00:45:46,810 --> 00:45:49,550 +Now, the area below minus 1 is the same as above + +560 +00:45:49,550 --> 00:45:50,510 +1. + +561 +00:45:54,310 --> 00:45:58,810 +These are the two areas here are equal. So the + +562 +00:45:58,810 --> 00:46:03,110 +area below minus 1, I mean b of z less than minus + +563 +00:46:03,110 --> 00:46:09,130 +1 is the same as b of z greater than 1. And b of z + +564 +00:46:09,130 --> 00:46:12,650 +greater than 1 is the same as 1 minus b of z + +565 +00:46:12,650 --> 00:46:17,310 +smaller than 1. So b of z less than 1 here. You + +566 +00:46:17,310 --> 00:46:19,710 +shouldn't need to look again to the table. Just + +567 +00:46:19,710 --> 00:46:26,770 +subtract 1 from this value. Make sense? Here we + +568 +00:46:26,770 --> 00:46:30,490 +compute the value of B of Z less than 1, which is + +569 +00:46:30,490 --> 00:46:35,430 +0.8413. We are looking for B of Z less than minus + +570 +00:46:35,430 --> 00:46:39,770 +1, which is the same as B of Z greater than 1. + +571 +00:46:40,750 --> 00:46:43,850 +Now, greater than means our tail. It's 1 minus the + +572 +00:46:43,850 --> 00:46:48,700 +lower tail probability. So this is 1 minus. So the + +573 +00:46:48,700 --> 00:46:52,240 +answer again is 1 minus 0.8413. + +574 +00:46:54,280 --> 00:47:00,040 +So 8413 minus 0.1587. + +575 +00:47:11,380 --> 00:47:17,030 +So 8413. minus 1.1587. + +576 +00:47:21,630 --> 00:47:27,570 +Okay, so that gives 0.6826. + +577 +00:47:29,090 --> 00:47:37,550 +Multiply this one by 100, we get 68.1826. + +578 +00:47:38,750 --> 00:47:44,010 +So roughly 60-80% of the observations lie between + +579 +00:47:44,010 --> 00:47:50,470 +one standard deviation around the mean. So this is + +580 +00:47:50,470 --> 00:47:53,850 +the way how can we compute the area below one + +581 +00:47:53,850 --> 00:47:57,250 +standard deviation or above one standard deviation + +582 +00:47:57,250 --> 00:48:03,790 +of the mean. Do the same for not mu minus sigma, + +583 +00:48:05,230 --> 00:48:11,540 +mu plus minus two sigma and mu plus two sigma. The + +584 +00:48:11,540 --> 00:48:14,600 +only difference is that this one is going to be + +585 +00:48:14,600 --> 00:48:17,280 +minus 2 and do the same. + +586 +00:48:20,620 --> 00:48:23,080 +That's the empirical rule we discussed in chapter + +587 +00:48:23,080 --> 00:48:28,980 +3. So here we can find any probability, not just + +588 +00:48:28,980 --> 00:48:33,660 +95 or 68 or 99.7. We can use the normal table to + +589 +00:48:33,660 --> 00:48:36,900 +give or to find or to compute any probability. + +590 +00:48:48,270 --> 00:48:53,090 +So again, for the other one, mu plus or minus two + +591 +00:48:53,090 --> 00:49:00,190 +sigma, it covers about 95% of the axis. For mu + +592 +00:49:00,190 --> 00:49:03,750 +plus or minus three sigma, it covers around all + +593 +00:49:03,750 --> 00:49:08,450 +the data, 99.7. So just do it at home, you will + +594 +00:49:08,450 --> 00:49:14,210 +see that the exact area is 95.44 instead of 95. + +595 +00:49:14,840 --> 00:49:18,520 +And the other one is 99.73. So that's the + +596 +00:49:18,520 --> 00:49:23,520 +empirical rule we discussed in chapter three. I'm + +597 +00:49:23,520 --> 00:49:32,560 +going to stop at this point, which is the x value + +598 +00:49:32,560 --> 00:49:38,400 +for the normal probability. Now, what we discussed + +599 +00:49:38,400 --> 00:49:43,560 +so far, we computed the probability. I mean, + +600 +00:49:43,740 --> 00:49:49,120 +what's the probability of X smaller than E? Now, + +601 +00:49:49,200 --> 00:49:56,240 +suppose this probability is known. How can we + +602 +00:49:56,240 --> 00:50:01,500 +compute this value? Later, we'll talk about that. + +603 +00:50:06,300 --> 00:50:09,820 +It's backward calculations. It's inverse or + +604 +00:50:09,820 --> 00:50:11,420 +backward calculation. + +605 +00:50:13,300 --> 00:50:14,460 +for next time inshallah. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/QKT3u32x4wE_raw.json b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/QKT3u32x4wE_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..7fb6cfc6f0a9065d556cff61d35f2351cf3d4c7a --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/QKT3u32x4wE_raw.json @@ -0,0 +1 @@ +{"segments": [{"id": 1, "seek": 3131, "start": 8.37, "end": 31.31, "text": " Today, inshallah, we'll start chapter six. Chapter six talks about the normal distribution. In this chapter, there are mainly two objectives. The first objective is to compute probabilities from normal distribution.", "tokens": [2692, 11, 1028, 71, 13492, 11, 321, 603, 722, 7187, 2309, 13, 18874, 2309, 6686, 466, 264, 2710, 7316, 13, 682, 341, 7187, 11, 456, 366, 8704, 732, 15961, 13, 440, 700, 10024, 307, 281, 14722, 33783, 490, 2710, 7316, 13], "avg_logprob": -0.18591889242331186, "compression_ratio": 1.5, "no_speech_prob": 0.0, "words": [{"start": 8.37, "end": 8.71, "word": " Today,", "probability": 0.7255859375}, {"start": 8.89, "end": 8.99, "word": " inshallah,", "probability": 0.6405436197916666}, {"start": 9.09, "end": 9.23, "word": " we'll", "probability": 0.763916015625}, {"start": 9.23, "end": 9.69, "word": " start", "probability": 0.927734375}, {"start": 9.69, "end": 10.89, "word": " chapter", "probability": 0.59423828125}, {"start": 10.89, "end": 11.37, "word": " six.", "probability": 0.6064453125}, {"start": 13.49, "end": 13.95, "word": " Chapter", "probability": 0.9404296875}, {"start": 13.95, "end": 14.25, "word": " six", "probability": 0.93359375}, {"start": 14.25, "end": 14.57, "word": " talks", "probability": 0.8740234375}, {"start": 14.57, "end": 15.03, "word": " about", "probability": 0.90185546875}, {"start": 15.03, "end": 15.89, "word": " the", "probability": 0.89404296875}, {"start": 15.89, "end": 16.51, "word": " normal", "probability": 0.837890625}, {"start": 16.51, "end": 17.19, "word": " distribution.", "probability": 0.85546875}, {"start": 18.93, "end": 19.21, "word": " In", "probability": 0.951171875}, {"start": 19.21, "end": 20.35, "word": " this", "probability": 0.94384765625}, {"start": 20.35, "end": 20.81, "word": " chapter,", "probability": 0.8681640625}, {"start": 21.19, "end": 21.39, "word": " there", "probability": 0.9140625}, {"start": 21.39, "end": 21.59, "word": " are", "probability": 0.9453125}, {"start": 21.59, "end": 21.89, "word": " mainly", "probability": 0.94970703125}, {"start": 21.89, "end": 22.31, "word": " two", "probability": 0.943359375}, {"start": 22.31, "end": 22.77, "word": " objectives.", "probability": 0.87255859375}, {"start": 24.33, "end": 24.81, "word": " The", "probability": 0.89111328125}, {"start": 24.81, "end": 25.25, "word": " first", "probability": 0.89453125}, {"start": 25.25, "end": 25.69, "word": " objective", "probability": 0.96142578125}, {"start": 25.69, "end": 26.15, "word": " is", "probability": 0.9423828125}, {"start": 26.15, "end": 28.87, "word": " to", "probability": 0.55126953125}, {"start": 28.87, "end": 29.43, "word": " compute", "probability": 0.92138671875}, {"start": 29.43, "end": 30.03, "word": " probabilities", "probability": 0.9658203125}, {"start": 30.03, "end": 30.47, "word": " from", "probability": 0.86181640625}, {"start": 30.47, "end": 30.81, "word": " normal", "probability": 0.85693359375}, {"start": 30.81, "end": 31.31, "word": " distribution.", "probability": 0.88134765625}], "temperature": 1.0}, {"id": 2, "seek": 5801, "start": 32.77, "end": 58.01, "text": " And mainly we'll focus on objective number one. So we are going to use normal distribution in this chapter. And we'll know how can we compute probabilities if the data set is normally distributed. You know many times you talked about extreme points or outliers. So that means if the data has outliers, that is the distribution is not normally distributed. Now in this case,", "tokens": [400, 8704, 321, 603, 1879, 322, 10024, 1230, 472, 13, 407, 321, 366, 516, 281, 764, 2710, 7316, 294, 341, 7187, 13, 400, 321, 603, 458, 577, 393, 321, 14722, 33783, 498, 264, 1412, 992, 307, 5646, 12631, 13, 509, 458, 867, 1413, 291, 2825, 466, 8084, 2793, 420, 484, 23646, 13, 407, 300, 1355, 498, 264, 1412, 575, 484, 23646, 11, 300, 307, 264, 7316, 307, 406, 5646, 12631, 13, 823, 294, 341, 1389, 11], "avg_logprob": -0.19348620594321908, "compression_ratio": 1.7155963302752293, "no_speech_prob": 0.0, "words": [{"start": 32.77, "end": 33.11, "word": " And", "probability": 0.47509765625}, {"start": 33.11, "end": 33.43, "word": " mainly", "probability": 0.935546875}, {"start": 33.43, "end": 33.73, "word": " we'll", "probability": 0.4166259765625}, {"start": 33.73, "end": 34.23, "word": " focus", "probability": 0.94580078125}, {"start": 34.23, "end": 34.53, "word": " on", "probability": 0.9404296875}, {"start": 34.53, "end": 34.89, "word": " objective", "probability": 0.84814453125}, {"start": 34.89, "end": 35.13, "word": " number", "probability": 0.91357421875}, {"start": 35.13, "end": 35.39, "word": " one.", "probability": 0.7880859375}, {"start": 36.03, "end": 36.25, "word": " So", "probability": 0.951171875}, {"start": 36.25, "end": 36.39, "word": " we", "probability": 0.818359375}, {"start": 36.39, "end": 36.51, "word": " are", "probability": 0.86669921875}, {"start": 36.51, "end": 36.73, "word": " going", "probability": 0.95068359375}, {"start": 36.73, "end": 36.89, "word": " to", "probability": 0.96728515625}, {"start": 36.89, "end": 37.27, "word": " use", "probability": 0.88525390625}, {"start": 37.27, "end": 37.95, "word": " normal", "probability": 0.830078125}, {"start": 37.95, "end": 38.49, "word": " distribution", "probability": 0.83740234375}, {"start": 38.49, "end": 38.73, "word": " in", "probability": 0.89794921875}, {"start": 38.73, "end": 38.85, "word": " this", "probability": 0.94140625}, {"start": 38.85, "end": 39.21, "word": " chapter.", "probability": 0.87060546875}, {"start": 39.69, "end": 40.11, "word": " And", "probability": 0.94189453125}, {"start": 40.11, "end": 40.29, "word": " we'll", "probability": 0.722900390625}, {"start": 40.29, "end": 40.45, "word": " know", "probability": 0.88427734375}, {"start": 40.45, "end": 40.61, "word": " how", "probability": 0.89892578125}, {"start": 40.61, "end": 40.83, "word": " can", "probability": 0.8720703125}, {"start": 40.83, "end": 40.97, "word": " we", "probability": 0.955078125}, {"start": 40.97, "end": 41.43, "word": " compute", "probability": 0.908203125}, {"start": 41.43, "end": 42.13, "word": " probabilities", "probability": 0.9072265625}, {"start": 42.13, "end": 43.39, "word": " if", "probability": 0.7265625}, {"start": 43.39, "end": 43.57, "word": " the", "probability": 0.91943359375}, {"start": 43.57, "end": 43.83, "word": " data", "probability": 0.5556640625}, {"start": 43.83, "end": 44.21, "word": " set", "probability": 0.927734375}, {"start": 44.21, "end": 44.75, "word": " is", "probability": 0.94384765625}, {"start": 44.75, "end": 45.15, "word": " normally", "probability": 0.8896484375}, {"start": 45.15, "end": 45.63, "word": " distributed.", "probability": 0.9306640625}, {"start": 46.17, "end": 46.27, "word": " You", "probability": 0.62744140625}, {"start": 46.27, "end": 46.35, "word": " know", "probability": 0.908203125}, {"start": 46.35, "end": 46.51, "word": " many", "probability": 0.49609375}, {"start": 46.51, "end": 46.81, "word": " times", "probability": 0.90087890625}, {"start": 46.81, "end": 46.93, "word": " you", "probability": 0.55126953125}, {"start": 46.93, "end": 47.15, "word": " talked", "probability": 0.76953125}, {"start": 47.15, "end": 47.47, "word": " about", "probability": 0.8828125}, {"start": 47.47, "end": 47.91, "word": " extreme", "probability": 0.87548828125}, {"start": 47.91, "end": 48.91, "word": " points", "probability": 0.599609375}, {"start": 48.91, "end": 49.15, "word": " or", "probability": 0.90380859375}, {"start": 49.15, "end": 49.63, "word": " outliers.", "probability": 0.853515625}, {"start": 50.57, "end": 50.69, "word": " So", "probability": 0.52490234375}, {"start": 50.69, "end": 50.89, "word": " that", "probability": 0.92724609375}, {"start": 50.89, "end": 51.13, "word": " means", "probability": 0.9365234375}, {"start": 51.13, "end": 51.37, "word": " if", "probability": 0.8623046875}, {"start": 51.37, "end": 51.61, "word": " the", "probability": 0.91552734375}, {"start": 51.61, "end": 51.85, "word": " data", "probability": 0.93994140625}, {"start": 51.85, "end": 52.09, "word": " has", "probability": 0.93505859375}, {"start": 52.09, "end": 52.59, "word": " outliers,", "probability": 0.95751953125}, {"start": 53.75, "end": 54.07, "word": " that", "probability": 0.92919921875}, {"start": 54.07, "end": 54.33, "word": " is", "probability": 0.88525390625}, {"start": 54.33, "end": 54.49, "word": " the", "probability": 0.68017578125}, {"start": 54.49, "end": 54.91, "word": " distribution", "probability": 0.86328125}, {"start": 54.91, "end": 55.13, "word": " is", "probability": 0.90234375}, {"start": 55.13, "end": 55.29, "word": " not", "probability": 0.94384765625}, {"start": 55.29, "end": 55.71, "word": " normally", "probability": 0.9091796875}, {"start": 55.71, "end": 56.23, "word": " distributed.", "probability": 0.93115234375}, {"start": 56.77, "end": 57.09, "word": " Now", "probability": 0.9619140625}, {"start": 57.09, "end": 57.29, "word": " in", "probability": 0.72021484375}, {"start": 57.29, "end": 57.55, "word": " this", "probability": 0.9482421875}, {"start": 57.55, "end": 58.01, "word": " case,", "probability": 0.9072265625}], "temperature": 1.0}, {"id": 3, "seek": 8755, "start": 58.57, "end": 87.55, "text": " If the distribution is normal, how can we compute probabilities underneath the normal curve? The second objective is to use the normal probability plot to determine whether a set of data is approximately normally distributed. I mean beside box plots we discussed before. Beside this score, how can we tell", "tokens": [759, 264, 7316, 307, 2710, 11, 577, 393, 321, 14722, 33783, 7223, 264, 2710, 7605, 30, 440, 1150, 10024, 307, 281, 764, 264, 2710, 8482, 7542, 281, 6997, 1968, 257, 992, 295, 1412, 307, 10447, 5646, 12631, 13, 286, 914, 15726, 2424, 28609, 321, 7152, 949, 13, 8190, 482, 341, 6175, 11, 577, 393, 321, 980], "avg_logprob": -0.1621436403508772, "compression_ratio": 1.6020942408376964, "no_speech_prob": 0.0, "words": [{"start": 58.57, "end": 58.97, "word": " If", "probability": 0.8193359375}, {"start": 58.97, "end": 59.19, "word": " the", "probability": 0.84423828125}, {"start": 59.19, "end": 59.63, "word": " distribution", "probability": 0.83935546875}, {"start": 59.63, "end": 59.91, "word": " is", "probability": 0.93798828125}, {"start": 59.91, "end": 60.27, "word": " normal,", "probability": 0.85791015625}, {"start": 60.63, "end": 60.85, "word": " how", "probability": 0.9345703125}, {"start": 60.85, "end": 61.09, "word": " can", "probability": 0.943359375}, {"start": 61.09, "end": 61.25, "word": " we", "probability": 0.95166015625}, {"start": 61.25, "end": 61.65, "word": " compute", "probability": 0.9033203125}, {"start": 61.65, "end": 62.35, "word": " probabilities", "probability": 0.875}, {"start": 62.35, "end": 63.01, "word": " underneath", "probability": 0.9462890625}, {"start": 63.01, "end": 63.89, "word": " the", "probability": 0.9111328125}, {"start": 63.89, "end": 64.35, "word": " normal", "probability": 0.86083984375}, {"start": 64.35, "end": 65.05, "word": " curve?", "probability": 0.7548828125}, {"start": 67.13, "end": 67.67, "word": " The", "probability": 0.8916015625}, {"start": 67.67, "end": 68.09, "word": " second", "probability": 0.90087890625}, {"start": 68.09, "end": 68.49, "word": " objective", "probability": 0.94873046875}, {"start": 68.49, "end": 68.91, "word": " is", "probability": 0.9375}, {"start": 68.91, "end": 69.11, "word": " to", "probability": 0.85400390625}, {"start": 69.11, "end": 69.45, "word": " use", "probability": 0.892578125}, {"start": 69.45, "end": 69.67, "word": " the", "probability": 0.8623046875}, {"start": 69.67, "end": 70.03, "word": " normal", "probability": 0.72900390625}, {"start": 70.03, "end": 70.59, "word": " probability", "probability": 0.9638671875}, {"start": 70.59, "end": 70.91, "word": " plot", "probability": 0.94384765625}, {"start": 70.91, "end": 71.13, "word": " to", "probability": 0.96337890625}, {"start": 71.13, "end": 71.65, "word": " determine", "probability": 0.91015625}, {"start": 71.65, "end": 72.55, "word": " whether", "probability": 0.921875}, {"start": 72.55, "end": 72.81, "word": " a", "probability": 0.986328125}, {"start": 72.81, "end": 73.03, "word": " set", "probability": 0.927734375}, {"start": 73.03, "end": 73.21, "word": " of", "probability": 0.97021484375}, {"start": 73.21, "end": 73.59, "word": " data", "probability": 0.93994140625}, {"start": 73.59, "end": 74.35, "word": " is", "probability": 0.951171875}, {"start": 74.35, "end": 74.97, "word": " approximately", "probability": 0.7919921875}, {"start": 74.97, "end": 75.47, "word": " normally", "probability": 0.8857421875}, {"start": 75.47, "end": 76.13, "word": " distributed.", "probability": 0.92529296875}, {"start": 77.83, "end": 78.03, "word": " I", "probability": 0.95654296875}, {"start": 78.03, "end": 78.15, "word": " mean", "probability": 0.9638671875}, {"start": 78.15, "end": 78.75, "word": " beside", "probability": 0.269775390625}, {"start": 78.75, "end": 80.03, "word": " box", "probability": 0.54345703125}, {"start": 80.03, "end": 80.39, "word": " plots", "probability": 0.94091796875}, {"start": 80.39, "end": 80.77, "word": " we", "probability": 0.75927734375}, {"start": 80.77, "end": 81.31, "word": " discussed", "probability": 0.87353515625}, {"start": 81.31, "end": 82.33, "word": " before.", "probability": 0.861328125}, {"start": 84.31, "end": 85.09, "word": " Beside", "probability": 0.806884765625}, {"start": 85.09, "end": 85.55, "word": " this", "probability": 0.465576171875}, {"start": 85.55, "end": 85.97, "word": " score,", "probability": 0.7197265625}, {"start": 86.59, "end": 86.83, "word": " how", "probability": 0.93603515625}, {"start": 86.83, "end": 87.07, "word": " can", "probability": 0.9404296875}, {"start": 87.07, "end": 87.27, "word": " we", "probability": 0.951171875}, {"start": 87.27, "end": 87.55, "word": " tell", "probability": 0.890625}], "temperature": 1.0}, {"id": 4, "seek": 11001, "start": 88.27, "end": 110.01, "text": " if the data point or actually the entire distribution is approximately normally distributed or not. Before we learn if the point is outlier by using backsplot and this score. In this chapter we'll know how can we determine if the entire distribution", "tokens": [498, 264, 1412, 935, 420, 767, 264, 2302, 7316, 307, 10447, 5646, 12631, 420, 406, 13, 4546, 321, 1466, 498, 264, 935, 307, 484, 2753, 538, 1228, 646, 46535, 310, 293, 341, 6175, 13, 682, 341, 7187, 321, 603, 458, 577, 393, 321, 6997, 498, 264, 2302, 7316], "avg_logprob": -0.2273596987432363, "compression_ratio": 1.5822784810126582, "no_speech_prob": 0.0, "words": [{"start": 88.27, "end": 88.55, "word": " if", "probability": 0.74853515625}, {"start": 88.55, "end": 88.71, "word": " the", "probability": 0.9287109375}, {"start": 88.71, "end": 88.95, "word": " data", "probability": 0.94775390625}, {"start": 88.95, "end": 89.43, "word": " point", "probability": 0.96826171875}, {"start": 89.43, "end": 90.19, "word": " or", "probability": 0.74267578125}, {"start": 90.19, "end": 90.63, "word": " actually", "probability": 0.873046875}, {"start": 90.63, "end": 90.85, "word": " the", "probability": 0.90185546875}, {"start": 90.85, "end": 91.21, "word": " entire", "probability": 0.9140625}, {"start": 91.21, "end": 91.85, "word": " distribution", "probability": 0.865234375}, {"start": 91.85, "end": 94.01, "word": " is", "probability": 0.87353515625}, {"start": 94.01, "end": 95.35, "word": " approximately", "probability": 0.84912109375}, {"start": 95.35, "end": 95.83, "word": " normally", "probability": 0.865234375}, {"start": 95.83, "end": 96.41, "word": " distributed", "probability": 0.921875}, {"start": 96.41, "end": 96.63, "word": " or", "probability": 0.90478515625}, {"start": 96.63, "end": 96.99, "word": " not.", "probability": 0.93994140625}, {"start": 97.69, "end": 98.41, "word": " Before", "probability": 0.8125}, {"start": 98.41, "end": 98.61, "word": " we", "probability": 0.82568359375}, {"start": 98.61, "end": 98.89, "word": " learn", "probability": 0.76171875}, {"start": 98.89, "end": 99.41, "word": " if", "probability": 0.896484375}, {"start": 99.41, "end": 99.57, "word": " the", "probability": 0.91796875}, {"start": 99.57, "end": 99.83, "word": " point", "probability": 0.9697265625}, {"start": 99.83, "end": 100.01, "word": " is", "probability": 0.951171875}, {"start": 100.01, "end": 100.45, "word": " outlier", "probability": 0.945068359375}, {"start": 100.45, "end": 101.97, "word": " by", "probability": 0.76025390625}, {"start": 101.97, "end": 102.23, "word": " using", "probability": 0.9296875}, {"start": 102.23, "end": 102.81, "word": " backsplot", "probability": 0.4561360677083333}, {"start": 102.81, "end": 103.29, "word": " and", "probability": 0.93359375}, {"start": 103.29, "end": 104.11, "word": " this", "probability": 0.58984375}, {"start": 104.11, "end": 104.43, "word": " score.", "probability": 0.794921875}, {"start": 105.09, "end": 105.47, "word": " In", "probability": 0.9267578125}, {"start": 105.47, "end": 105.65, "word": " this", "probability": 0.9443359375}, {"start": 105.65, "end": 105.91, "word": " chapter", "probability": 0.86767578125}, {"start": 105.91, "end": 106.13, "word": " we'll", "probability": 0.53125}, {"start": 106.13, "end": 106.27, "word": " know", "probability": 0.88623046875}, {"start": 106.27, "end": 106.39, "word": " how", "probability": 0.92138671875}, {"start": 106.39, "end": 106.61, "word": " can", "probability": 0.8427734375}, {"start": 106.61, "end": 106.75, "word": " we", "probability": 0.9599609375}, {"start": 106.75, "end": 107.21, "word": " determine", "probability": 0.91650390625}, {"start": 107.21, "end": 108.61, "word": " if", "probability": 0.9443359375}, {"start": 108.61, "end": 108.93, "word": " the", "probability": 0.91943359375}, {"start": 108.93, "end": 109.29, "word": " entire", "probability": 0.88818359375}, {"start": 109.29, "end": 110.01, "word": " distribution", "probability": 0.87060546875}], "temperature": 1.0}, {"id": 5, "seek": 13793, "start": 111.19, "end": 137.93, "text": " is approximately normal distributed. So there are two objectives. One is to compute probabilities underneath the normal curve. The other, how can we tell if the data set is out or not? If you remember, first class, we mentioned something about data types. And we said data has mainly two types. Numerical data, I mean quantitative data.", "tokens": [307, 10447, 2710, 12631, 13, 407, 456, 366, 732, 15961, 13, 1485, 307, 281, 14722, 33783, 7223, 264, 2710, 7605, 13, 440, 661, 11, 577, 393, 321, 980, 498, 264, 1412, 992, 307, 484, 420, 406, 30, 759, 291, 1604, 11, 700, 1508, 11, 321, 2835, 746, 466, 1412, 3467, 13, 400, 321, 848, 1412, 575, 8704, 732, 3467, 13, 426, 15583, 804, 1412, 11, 286, 914, 27778, 1412, 13], "avg_logprob": -0.21963028420864697, "compression_ratio": 1.518018018018018, "no_speech_prob": 0.0, "words": [{"start": 111.19, "end": 111.63, "word": " is", "probability": 0.7705078125}, {"start": 111.63, "end": 112.27, "word": " approximately", "probability": 0.82421875}, {"start": 112.27, "end": 112.67, "word": " normal", "probability": 0.78076171875}, {"start": 112.67, "end": 113.15, "word": " distributed.", "probability": 0.173095703125}, {"start": 113.87, "end": 114.31, "word": " So", "probability": 0.93896484375}, {"start": 114.31, "end": 114.51, "word": " there", "probability": 0.5869140625}, {"start": 114.51, "end": 114.63, "word": " are", "probability": 0.9365234375}, {"start": 114.63, "end": 114.77, "word": " two", "probability": 0.921875}, {"start": 114.77, "end": 115.17, "word": " objectives.", "probability": 0.82470703125}, {"start": 115.37, "end": 115.49, "word": " One", "probability": 0.91943359375}, {"start": 115.49, "end": 115.65, "word": " is", "probability": 0.9345703125}, {"start": 115.65, "end": 115.79, "word": " to", "probability": 0.951171875}, {"start": 115.79, "end": 116.11, "word": " compute", "probability": 0.9326171875}, {"start": 116.11, "end": 116.71, "word": " probabilities", "probability": 0.9462890625}, {"start": 116.71, "end": 117.21, "word": " underneath", "probability": 0.943359375}, {"start": 117.21, "end": 117.59, "word": " the", "probability": 0.91650390625}, {"start": 117.59, "end": 117.85, "word": " normal", "probability": 0.85888671875}, {"start": 117.85, "end": 118.19, "word": " curve.", "probability": 0.8095703125}, {"start": 118.61, "end": 118.71, "word": " The", "probability": 0.81201171875}, {"start": 118.71, "end": 118.91, "word": " other,", "probability": 0.89306640625}, {"start": 119.01, "end": 119.09, "word": " how", "probability": 0.921875}, {"start": 119.09, "end": 119.23, "word": " can", "probability": 0.94091796875}, {"start": 119.23, "end": 119.37, "word": " we", "probability": 0.9384765625}, {"start": 119.37, "end": 119.57, "word": " tell", "probability": 0.875}, {"start": 119.57, "end": 119.69, "word": " if", "probability": 0.9453125}, {"start": 119.69, "end": 119.83, "word": " the", "probability": 0.884765625}, {"start": 119.83, "end": 120.03, "word": " data", "probability": 0.50537109375}, {"start": 120.03, "end": 120.39, "word": " set", "probability": 0.9423828125}, {"start": 120.39, "end": 121.87, "word": " is", "probability": 0.9423828125}, {"start": 121.87, "end": 122.67, "word": " out", "probability": 0.5263671875}, {"start": 122.67, "end": 122.87, "word": " or", "probability": 0.3486328125}, {"start": 122.87, "end": 123.09, "word": " not?", "probability": 0.6826171875}, {"start": 124.57, "end": 125.21, "word": " If", "probability": 0.9658203125}, {"start": 125.21, "end": 125.31, "word": " you", "probability": 0.9638671875}, {"start": 125.31, "end": 125.59, "word": " remember,", "probability": 0.880859375}, {"start": 125.81, "end": 126.17, "word": " first", "probability": 0.54296875}, {"start": 126.17, "end": 126.73, "word": " class,", "probability": 0.9677734375}, {"start": 128.31, "end": 128.53, "word": " we", "probability": 0.95458984375}, {"start": 128.53, "end": 128.91, "word": " mentioned", "probability": 0.8173828125}, {"start": 128.91, "end": 129.33, "word": " something", "probability": 0.86767578125}, {"start": 129.33, "end": 129.77, "word": " about", "probability": 0.89501953125}, {"start": 129.77, "end": 130.41, "word": " data", "probability": 0.541015625}, {"start": 130.41, "end": 130.97, "word": " types.", "probability": 0.85595703125}, {"start": 131.53, "end": 131.75, "word": " And", "probability": 0.9501953125}, {"start": 131.75, "end": 131.91, "word": " we", "probability": 0.892578125}, {"start": 131.91, "end": 132.13, "word": " said", "probability": 0.94677734375}, {"start": 132.13, "end": 132.45, "word": " data", "probability": 0.8154296875}, {"start": 132.45, "end": 132.73, "word": " has", "probability": 0.9423828125}, {"start": 132.73, "end": 132.93, "word": " mainly", "probability": 0.79833984375}, {"start": 132.93, "end": 133.13, "word": " two", "probability": 0.939453125}, {"start": 133.13, "end": 133.51, "word": " types.", "probability": 0.8310546875}, {"start": 134.21, "end": 134.85, "word": " Numerical", "probability": 0.8824869791666666}, {"start": 134.85, "end": 136.09, "word": " data,", "probability": 0.9287109375}, {"start": 136.41, "end": 136.65, "word": " I", "probability": 0.9921875}, {"start": 136.65, "end": 136.81, "word": " mean", "probability": 0.9697265625}, {"start": 136.81, "end": 137.31, "word": " quantitative", "probability": 0.8310546875}, {"start": 137.31, "end": 137.93, "word": " data.", "probability": 0.92724609375}], "temperature": 1.0}, {"id": 6, "seek": 16057, "start": 138.69, "end": 160.57, "text": " and categorical data, qualitative. For numerical data also it has two types, continuous and discrete. And discrete takes only integers such as number of students who take this class or number of accidents and so on. But if you are talking about", "tokens": [293, 19250, 804, 1412, 11, 31312, 13, 1171, 29054, 1412, 611, 309, 575, 732, 3467, 11, 10957, 293, 27706, 13, 400, 27706, 2516, 787, 41674, 1270, 382, 1230, 295, 1731, 567, 747, 341, 1508, 420, 1230, 295, 23875, 293, 370, 322, 13, 583, 498, 291, 366, 1417, 466], "avg_logprob": -0.1831951530612245, "compression_ratio": 1.5506329113924051, "no_speech_prob": 0.0, "words": [{"start": 138.69, "end": 139.09, "word": " and", "probability": 0.689453125}, {"start": 139.09, "end": 139.77, "word": " categorical", "probability": 0.807373046875}, {"start": 139.77, "end": 140.19, "word": " data,", "probability": 0.93994140625}, {"start": 140.43, "end": 140.89, "word": " qualitative.", "probability": 0.78271484375}, {"start": 141.87, "end": 142.25, "word": " For", "probability": 0.95263671875}, {"start": 142.25, "end": 142.63, "word": " numerical", "probability": 0.93603515625}, {"start": 142.63, "end": 143.09, "word": " data", "probability": 0.93505859375}, {"start": 143.09, "end": 143.49, "word": " also", "probability": 0.5478515625}, {"start": 143.49, "end": 143.65, "word": " it", "probability": 0.8154296875}, {"start": 143.65, "end": 143.83, "word": " has", "probability": 0.9423828125}, {"start": 143.83, "end": 144.01, "word": " two", "probability": 0.89306640625}, {"start": 144.01, "end": 144.45, "word": " types,", "probability": 0.84130859375}, {"start": 144.89, "end": 145.61, "word": " continuous", "probability": 0.8984375}, {"start": 145.61, "end": 146.19, "word": " and", "probability": 0.9326171875}, {"start": 146.19, "end": 146.75, "word": " discrete.", "probability": 0.89111328125}, {"start": 147.01, "end": 147.15, "word": " And", "probability": 0.9072265625}, {"start": 147.15, "end": 147.47, "word": " discrete", "probability": 0.873046875}, {"start": 147.47, "end": 148.07, "word": " takes", "probability": 0.79736328125}, {"start": 148.07, "end": 148.37, "word": " only", "probability": 0.92919921875}, {"start": 148.37, "end": 148.79, "word": " integers", "probability": 0.9287109375}, {"start": 148.79, "end": 149.87, "word": " such", "probability": 0.364990234375}, {"start": 149.87, "end": 150.43, "word": " as", "probability": 0.96728515625}, {"start": 150.43, "end": 151.33, "word": " number", "probability": 0.67236328125}, {"start": 151.33, "end": 151.55, "word": " of", "probability": 0.97021484375}, {"start": 151.55, "end": 152.07, "word": " students", "probability": 0.97509765625}, {"start": 152.07, "end": 152.81, "word": " who", "probability": 0.890625}, {"start": 152.81, "end": 153.17, "word": " take", "probability": 0.86181640625}, {"start": 153.17, "end": 153.41, "word": " this", "probability": 0.9462890625}, {"start": 153.41, "end": 153.77, "word": " class", "probability": 0.96044921875}, {"start": 153.77, "end": 155.01, "word": " or", "probability": 0.77490234375}, {"start": 155.01, "end": 155.31, "word": " number", "probability": 0.9345703125}, {"start": 155.31, "end": 155.49, "word": " of", "probability": 0.9736328125}, {"start": 155.49, "end": 156.05, "word": " accidents", "probability": 0.9189453125}, {"start": 156.05, "end": 157.89, "word": " and", "probability": 0.75048828125}, {"start": 157.89, "end": 158.15, "word": " so", "probability": 0.953125}, {"start": 158.15, "end": 158.37, "word": " on.", "probability": 0.9501953125}, {"start": 159.21, "end": 159.47, "word": " But", "probability": 0.943359375}, {"start": 159.47, "end": 159.61, "word": " if", "probability": 0.92822265625}, {"start": 159.61, "end": 159.71, "word": " you", "probability": 0.8798828125}, {"start": 159.71, "end": 159.83, "word": " are", "probability": 0.9306640625}, {"start": 159.83, "end": 160.19, "word": " talking", "probability": 0.859375}, {"start": 160.19, "end": 160.57, "word": " about", "probability": 0.90234375}], "temperature": 1.0}, {"id": 7, "seek": 18906, "start": 161.54, "end": 189.06, "text": " Age, weight, scores, temperature, and so on. It's continuous distribution. For this type of variable, I mean for continuous distribution, how can we compute the probabilities underneath the normal? So normal distribution maybe is the most common distribution in statistics, and it's type of continuous distribution. So first, let's define continuous random variable.", "tokens": [16280, 11, 3364, 11, 13444, 11, 4292, 11, 293, 370, 322, 13, 467, 311, 10957, 7316, 13, 1171, 341, 2010, 295, 7006, 11, 286, 914, 337, 10957, 7316, 11, 577, 393, 321, 14722, 264, 33783, 7223, 264, 2710, 30, 407, 2710, 7316, 1310, 307, 264, 881, 2689, 7316, 294, 12523, 11, 293, 309, 311, 2010, 295, 10957, 7316, 13, 407, 700, 11, 718, 311, 6964, 10957, 4974, 7006, 13], "avg_logprob": -0.17176339413438524, "compression_ratio": 1.7902439024390244, "no_speech_prob": 0.0, "words": [{"start": 161.54, "end": 162.04, "word": " Age,", "probability": 0.443359375}, {"start": 162.24, "end": 162.68, "word": " weight,", "probability": 0.91064453125}, {"start": 163.46, "end": 164.04, "word": " scores,", "probability": 0.82080078125}, {"start": 164.22, "end": 164.64, "word": " temperature,", "probability": 0.916015625}, {"start": 164.78, "end": 164.86, "word": " and", "probability": 0.94287109375}, {"start": 164.86, "end": 165.08, "word": " so", "probability": 0.9560546875}, {"start": 165.08, "end": 165.32, "word": " on.", "probability": 0.943359375}, {"start": 165.56, "end": 166.3, "word": " It's", "probability": 0.91650390625}, {"start": 166.3, "end": 166.72, "word": " continuous", "probability": 0.82568359375}, {"start": 166.72, "end": 167.24, "word": " distribution.", "probability": 0.84326171875}, {"start": 167.74, "end": 168.1, "word": " For", "probability": 0.95361328125}, {"start": 168.1, "end": 168.38, "word": " this", "probability": 0.94384765625}, {"start": 168.38, "end": 168.74, "word": " type", "probability": 0.97998046875}, {"start": 168.74, "end": 169.26, "word": " of", "probability": 0.96875}, {"start": 169.26, "end": 169.7, "word": " variable,", "probability": 0.89990234375}, {"start": 170.48, "end": 170.62, "word": " I", "probability": 0.95654296875}, {"start": 170.62, "end": 170.74, "word": " mean", "probability": 0.9697265625}, {"start": 170.74, "end": 170.92, "word": " for", "probability": 0.6279296875}, {"start": 170.92, "end": 171.38, "word": " continuous", "probability": 0.84716796875}, {"start": 171.38, "end": 172.0, "word": " distribution,", "probability": 0.87744140625}, {"start": 172.96, "end": 173.32, "word": " how", "probability": 0.93408203125}, {"start": 173.32, "end": 173.58, "word": " can", "probability": 0.94384765625}, {"start": 173.58, "end": 173.74, "word": " we", "probability": 0.95849609375}, {"start": 173.74, "end": 174.24, "word": " compute", "probability": 0.89794921875}, {"start": 174.24, "end": 174.68, "word": " the", "probability": 0.91943359375}, {"start": 174.68, "end": 175.3, "word": " probabilities", "probability": 0.89501953125}, {"start": 175.3, "end": 176.08, "word": " underneath", "probability": 0.951171875}, {"start": 176.08, "end": 176.3, "word": " the", "probability": 0.89794921875}, {"start": 176.3, "end": 176.54, "word": " normal?", "probability": 0.7626953125}, {"start": 176.9, "end": 177.48, "word": " So", "probability": 0.9423828125}, {"start": 177.48, "end": 177.78, "word": " normal", "probability": 0.81298828125}, {"start": 177.78, "end": 178.38, "word": " distribution", "probability": 0.88818359375}, {"start": 178.38, "end": 179.08, "word": " maybe", "probability": 0.78515625}, {"start": 179.08, "end": 179.26, "word": " is", "probability": 0.90283203125}, {"start": 179.26, "end": 179.4, "word": " the", "probability": 0.91455078125}, {"start": 179.4, "end": 179.64, "word": " most", "probability": 0.9052734375}, {"start": 179.64, "end": 180.02, "word": " common", "probability": 0.90234375}, {"start": 180.02, "end": 180.54, "word": " distribution", "probability": 0.87255859375}, {"start": 180.54, "end": 180.74, "word": " in", "probability": 0.84130859375}, {"start": 180.74, "end": 181.22, "word": " statistics,", "probability": 0.86572265625}, {"start": 181.76, "end": 181.98, "word": " and", "probability": 0.92041015625}, {"start": 181.98, "end": 182.14, "word": " it's", "probability": 0.829345703125}, {"start": 182.14, "end": 182.38, "word": " type", "probability": 0.58837890625}, {"start": 182.38, "end": 182.56, "word": " of", "probability": 0.90283203125}, {"start": 182.56, "end": 183.0, "word": " continuous", "probability": 0.86181640625}, {"start": 183.0, "end": 183.48, "word": " distribution.", "probability": 0.91015625}, {"start": 184.84, "end": 185.18, "word": " So", "probability": 0.955078125}, {"start": 185.18, "end": 185.52, "word": " first,", "probability": 0.7314453125}, {"start": 185.64, "end": 186.26, "word": " let's", "probability": 0.968505859375}, {"start": 186.26, "end": 187.82, "word": " define", "probability": 0.97119140625}, {"start": 187.82, "end": 188.34, "word": " continuous", "probability": 0.439208984375}, {"start": 188.34, "end": 188.64, "word": " random", "probability": 0.9560546875}, {"start": 188.64, "end": 189.06, "word": " variable.", "probability": 0.90087890625}], "temperature": 1.0}, {"id": 8, "seek": 20207, "start": 190.65, "end": 202.07, "text": " maybe because for multiple choice problem you should know the definition of continuous random variable is a variable that can assume any value on a continuous", "tokens": [1310, 570, 337, 3866, 3922, 1154, 291, 820, 458, 264, 7123, 295, 10957, 4974, 7006, 307, 257, 7006, 300, 393, 6552, 604, 2158, 322, 257, 10957], "avg_logprob": -0.2909432848294576, "compression_ratio": 1.4587155963302751, "no_speech_prob": 3.5762786865234375e-07, "words": [{"start": 190.65, "end": 191.25, "word": " maybe", "probability": 0.09619140625}, {"start": 191.25, "end": 191.65, "word": " because", "probability": 0.708984375}, {"start": 191.65, "end": 192.01, "word": " for", "probability": 0.79833984375}, {"start": 192.01, "end": 192.41, "word": " multiple", "probability": 0.8251953125}, {"start": 192.41, "end": 192.67, "word": " choice", "probability": 0.8916015625}, {"start": 192.67, "end": 193.09, "word": " problem", "probability": 0.787109375}, {"start": 193.09, "end": 193.59, "word": " you", "probability": 0.73681640625}, {"start": 193.59, "end": 193.89, "word": " should", "probability": 0.96142578125}, {"start": 193.89, "end": 194.23, "word": " know", "probability": 0.88330078125}, {"start": 194.23, "end": 195.23, "word": " the", "probability": 0.84912109375}, {"start": 195.23, "end": 195.85, "word": " definition", "probability": 0.9345703125}, {"start": 195.85, "end": 196.37, "word": " of", "probability": 0.93798828125}, {"start": 196.37, "end": 196.93, "word": " continuous", "probability": 0.767578125}, {"start": 196.93, "end": 197.21, "word": " random", "probability": 0.89453125}, {"start": 197.21, "end": 197.83, "word": " variable", "probability": 0.87841796875}, {"start": 197.83, "end": 198.91, "word": " is", "probability": 0.56201171875}, {"start": 198.91, "end": 199.11, "word": " a", "probability": 0.97119140625}, {"start": 199.11, "end": 199.39, "word": " variable", "probability": 0.89111328125}, {"start": 199.39, "end": 199.75, "word": " that", "probability": 0.9052734375}, {"start": 199.75, "end": 200.21, "word": " can", "probability": 0.91259765625}, {"start": 200.21, "end": 200.59, "word": " assume", "probability": 0.9453125}, {"start": 200.59, "end": 201.03, "word": " any", "probability": 0.91650390625}, {"start": 201.03, "end": 201.41, "word": " value", "probability": 0.98095703125}, {"start": 201.41, "end": 201.55, "word": " on", "probability": 0.44873046875}, {"start": 201.55, "end": 201.63, "word": " a", "probability": 0.66259765625}, {"start": 201.63, "end": 202.07, "word": " continuous", "probability": 0.79443359375}], "temperature": 1.0}, {"id": 9, "seek": 22226, "start": 203.38, "end": 222.26, "text": " it can assume any uncountable number of values. So it could be any number in an interval. For example, suppose your ages range between 18 years and 20 years. So maybe someone of you, their age is about 18 years, three months.", "tokens": [309, 393, 6552, 604, 6219, 792, 712, 1230, 295, 4190, 13, 407, 309, 727, 312, 604, 1230, 294, 364, 15035, 13, 1171, 1365, 11, 7297, 428, 12357, 3613, 1296, 2443, 924, 293, 945, 924, 13, 407, 1310, 1580, 295, 291, 11, 641, 3205, 307, 466, 2443, 924, 11, 1045, 2493, 13], "avg_logprob": -0.22160456186303726, "compression_ratio": 1.4394904458598725, "no_speech_prob": 0.0, "words": [{"start": 203.38, "end": 203.54, "word": " it", "probability": 0.11224365234375}, {"start": 203.54, "end": 203.68, "word": " can", "probability": 0.90576171875}, {"start": 203.68, "end": 203.96, "word": " assume", "probability": 0.931640625}, {"start": 203.96, "end": 204.18, "word": " any", "probability": 0.78271484375}, {"start": 204.18, "end": 204.86, "word": " uncountable", "probability": 0.8458658854166666}, {"start": 204.86, "end": 205.38, "word": " number", "probability": 0.89990234375}, {"start": 205.38, "end": 205.64, "word": " of", "probability": 0.93603515625}, {"start": 205.64, "end": 206.1, "word": " values.", "probability": 0.93310546875}, {"start": 206.92, "end": 207.02, "word": " So", "probability": 0.83837890625}, {"start": 207.02, "end": 208.0, "word": " it", "probability": 0.6650390625}, {"start": 208.0, "end": 208.2, "word": " could", "probability": 0.89111328125}, {"start": 208.2, "end": 208.48, "word": " be", "probability": 0.95849609375}, {"start": 208.48, "end": 209.52, "word": " any", "probability": 0.8828125}, {"start": 209.52, "end": 209.96, "word": " number", "probability": 0.935546875}, {"start": 209.96, "end": 210.38, "word": " in", "probability": 0.919921875}, {"start": 210.38, "end": 210.52, "word": " an", "probability": 0.9453125}, {"start": 210.52, "end": 210.84, "word": " interval.", "probability": 0.9677734375}, {"start": 210.96, "end": 211.08, "word": " For", "probability": 0.95556640625}, {"start": 211.08, "end": 211.44, "word": " example,", "probability": 0.97021484375}, {"start": 211.88, "end": 212.18, "word": " suppose", "probability": 0.91552734375}, {"start": 212.18, "end": 212.7, "word": " your", "probability": 0.88671875}, {"start": 212.7, "end": 213.18, "word": " ages", "probability": 0.88427734375}, {"start": 213.18, "end": 214.28, "word": " range", "probability": 0.787109375}, {"start": 214.28, "end": 214.8, "word": " between", "probability": 0.888671875}, {"start": 214.8, "end": 215.34, "word": " 18", "probability": 0.86083984375}, {"start": 215.34, "end": 215.72, "word": " years", "probability": 0.916015625}, {"start": 215.72, "end": 215.88, "word": " and", "probability": 0.93798828125}, {"start": 215.88, "end": 216.2, "word": " 20", "probability": 0.9521484375}, {"start": 216.2, "end": 216.6, "word": " years.", "probability": 0.9267578125}, {"start": 217.14, "end": 217.3, "word": " So", "probability": 0.955078125}, {"start": 217.3, "end": 217.52, "word": " maybe", "probability": 0.923828125}, {"start": 217.52, "end": 218.0, "word": " someone", "probability": 0.693359375}, {"start": 218.0, "end": 218.34, "word": " of", "probability": 0.92626953125}, {"start": 218.34, "end": 218.58, "word": " you,", "probability": 0.740234375}, {"start": 219.12, "end": 219.24, "word": " their", "probability": 0.257568359375}, {"start": 219.24, "end": 219.58, "word": " age", "probability": 0.95166015625}, {"start": 219.58, "end": 219.84, "word": " is", "probability": 0.93994140625}, {"start": 219.84, "end": 220.22, "word": " about", "probability": 0.912109375}, {"start": 220.22, "end": 221.28, "word": " 18", "probability": 0.9580078125}, {"start": 221.28, "end": 221.68, "word": " years,", "probability": 0.92822265625}, {"start": 221.82, "end": 222.04, "word": " three", "probability": 0.6181640625}, {"start": 222.04, "end": 222.26, "word": " months.", "probability": 0.73193359375}], "temperature": 1.0}, {"id": 10, "seek": 23994, "start": 223.34, "end": 239.94, "text": " Or maybe your weight is 70 kilogram point five, and so on. So it's continuous on the variable. Other examples for continuous, thickness of an item. For example, the thickness. This one is called thickness.", "tokens": [1610, 1310, 428, 3364, 307, 5285, 21741, 935, 1732, 11, 293, 370, 322, 13, 407, 309, 311, 10957, 322, 264, 7006, 13, 5358, 5110, 337, 10957, 11, 14855, 295, 364, 3174, 13, 1171, 1365, 11, 264, 14855, 13, 639, 472, 307, 1219, 14855, 13], "avg_logprob": -0.2522569497426351, "compression_ratio": 1.4206896551724137, "no_speech_prob": 0.0, "words": [{"start": 223.34, "end": 223.58, "word": " Or", "probability": 0.5654296875}, {"start": 223.58, "end": 223.78, "word": " maybe", "probability": 0.90380859375}, {"start": 223.78, "end": 224.0, "word": " your", "probability": 0.884765625}, {"start": 224.0, "end": 224.26, "word": " weight", "probability": 0.9228515625}, {"start": 224.26, "end": 224.88, "word": " is", "probability": 0.86572265625}, {"start": 224.88, "end": 225.32, "word": " 70", "probability": 0.89453125}, {"start": 225.32, "end": 225.7, "word": " kilogram", "probability": 0.53955078125}, {"start": 225.7, "end": 225.98, "word": " point", "probability": 0.6953125}, {"start": 225.98, "end": 226.44, "word": " five,", "probability": 0.642578125}, {"start": 227.04, "end": 227.2, "word": " and", "probability": 0.88720703125}, {"start": 227.2, "end": 227.34, "word": " so", "probability": 0.9541015625}, {"start": 227.34, "end": 227.46, "word": " on.", "probability": 0.76953125}, {"start": 227.48, "end": 227.58, "word": " So", "probability": 0.876953125}, {"start": 227.58, "end": 227.68, "word": " it's", "probability": 0.87939453125}, {"start": 227.68, "end": 228.12, "word": " continuous", "probability": 0.873046875}, {"start": 228.12, "end": 228.34, "word": " on", "probability": 0.810546875}, {"start": 228.34, "end": 228.46, "word": " the", "probability": 0.8759765625}, {"start": 228.46, "end": 228.64, "word": " variable.", "probability": 0.295654296875}, {"start": 229.12, "end": 229.3, "word": " Other", "probability": 0.9169921875}, {"start": 229.3, "end": 229.78, "word": " examples", "probability": 0.83935546875}, {"start": 229.78, "end": 230.0, "word": " for", "probability": 0.90625}, {"start": 230.0, "end": 230.44, "word": " continuous,", "probability": 0.8203125}, {"start": 231.1, "end": 231.44, "word": " thickness", "probability": 0.9404296875}, {"start": 231.44, "end": 231.76, "word": " of", "probability": 0.96728515625}, {"start": 231.76, "end": 231.92, "word": " an", "probability": 0.95751953125}, {"start": 231.92, "end": 232.28, "word": " item.", "probability": 0.9638671875}, {"start": 232.64, "end": 232.76, "word": " For", "probability": 0.9580078125}, {"start": 232.76, "end": 233.14, "word": " example,", "probability": 0.970703125}, {"start": 233.74, "end": 233.9, "word": " the", "probability": 0.912109375}, {"start": 233.9, "end": 234.44, "word": " thickness.", "probability": 0.9619140625}, {"start": 238.26, "end": 238.78, "word": " This", "probability": 0.7421875}, {"start": 238.78, "end": 239.24, "word": " one", "probability": 0.849609375}, {"start": 239.24, "end": 239.4, "word": " is", "probability": 0.8359375}, {"start": 239.4, "end": 239.56, "word": " called", "probability": 0.63330078125}, {"start": 239.56, "end": 239.94, "word": " thickness.", "probability": 0.71728515625}], "temperature": 1.0}, {"id": 11, "seek": 26913, "start": 241.75, "end": 269.13, "text": " Now, the thickness may be 2 centimeters or 3 centimeters and so on, but it might be 2.5 centimeters. For example, for this remote, the thickness is 2.5 centimeters or 2.6, not exactly 2 or 3. So it could be any value. Range is, for example, between 2 centimeters and 3 centimeters. So from 2 to 3 is a big range because it can take anywhere from 2.1 to 2.15 and so on. So thickness is an example of continuous random variable.", "tokens": [823, 11, 264, 14855, 815, 312, 568, 23300, 420, 805, 23300, 293, 370, 322, 11, 457, 309, 1062, 312, 568, 13, 20, 23300, 13, 1171, 1365, 11, 337, 341, 8607, 11, 264, 14855, 307, 568, 13, 20, 23300, 420, 568, 13, 21, 11, 406, 2293, 568, 420, 805, 13, 407, 309, 727, 312, 604, 2158, 13, 33778, 307, 11, 337, 1365, 11, 1296, 568, 23300, 293, 805, 23300, 13, 407, 490, 568, 281, 805, 307, 257, 955, 3613, 570, 309, 393, 747, 604, 17098, 265, 490, 568, 13, 16, 281, 568, 13, 5211, 293, 370, 322, 13, 407, 14855, 307, 364, 1365, 295, 10957, 4974, 7006, 13], "avg_logprob": -0.19285300912128556, "compression_ratio": 1.8484848484848484, "no_speech_prob": 0.0, "words": [{"start": 241.75, "end": 241.99, "word": " Now,", "probability": 0.71630859375}, {"start": 242.03, "end": 242.13, "word": " the", "probability": 0.90576171875}, {"start": 242.13, "end": 242.49, "word": " thickness", "probability": 0.966796875}, {"start": 242.49, "end": 243.33, "word": " may", "probability": 0.59716796875}, {"start": 243.33, "end": 243.69, "word": " be", "probability": 0.951171875}, {"start": 243.69, "end": 244.11, "word": " 2", "probability": 0.33251953125}, {"start": 244.11, "end": 244.63, "word": " centimeters", "probability": 0.41259765625}, {"start": 244.63, "end": 244.81, "word": " or", "probability": 0.80517578125}, {"start": 244.81, "end": 245.01, "word": " 3", "probability": 0.96875}, {"start": 245.01, "end": 245.45, "word": " centimeters", "probability": 0.892578125}, {"start": 245.45, "end": 245.65, "word": " and", "probability": 0.76220703125}, {"start": 245.65, "end": 245.77, "word": " so", "probability": 0.95263671875}, {"start": 245.77, "end": 245.93, "word": " on,", "probability": 0.94140625}, {"start": 246.21, "end": 246.35, "word": " but", "probability": 0.91455078125}, {"start": 246.35, "end": 246.49, "word": " it", "probability": 0.5537109375}, {"start": 246.49, "end": 246.69, "word": " might", "probability": 0.89599609375}, {"start": 246.69, "end": 246.87, "word": " be", "probability": 0.9443359375}, {"start": 246.87, "end": 247.07, "word": " 2", "probability": 0.994140625}, {"start": 247.07, "end": 247.47, "word": ".5", "probability": 0.9970703125}, {"start": 247.47, "end": 247.85, "word": " centimeters.", "probability": 0.876953125}, {"start": 248.49, "end": 248.93, "word": " For", "probability": 0.389404296875}, {"start": 248.93, "end": 249.51, "word": " example,", "probability": 0.95703125}, {"start": 249.61, "end": 249.73, "word": " for", "probability": 0.884765625}, {"start": 249.73, "end": 249.95, "word": " this", "probability": 0.9462890625}, {"start": 249.95, "end": 250.29, "word": " remote,", "probability": 0.9296875}, {"start": 250.83, "end": 250.99, "word": " the", "probability": 0.91259765625}, {"start": 250.99, "end": 251.23, "word": " thickness", "probability": 0.95556640625}, {"start": 251.23, "end": 251.41, "word": " is", "probability": 0.93115234375}, {"start": 251.41, "end": 251.55, "word": " 2", "probability": 0.994140625}, {"start": 251.55, "end": 252.01, "word": ".5", "probability": 0.975341796875}, {"start": 252.01, "end": 252.41, "word": " centimeters", "probability": 0.83642578125}, {"start": 252.41, "end": 252.79, "word": " or", "probability": 0.8212890625}, {"start": 252.79, "end": 253.03, "word": " 2", "probability": 0.9951171875}, {"start": 253.03, "end": 253.57, "word": ".6,", "probability": 0.99462890625}, {"start": 253.81, "end": 254.11, "word": " not", "probability": 0.8642578125}, {"start": 254.11, "end": 254.47, "word": " exactly", "probability": 0.8984375}, {"start": 254.47, "end": 254.67, "word": " 2", "probability": 0.7978515625}, {"start": 254.67, "end": 254.79, "word": " or", "probability": 0.9599609375}, {"start": 254.79, "end": 255.01, "word": " 3.", "probability": 0.9951171875}, {"start": 255.31, "end": 255.53, "word": " So", "probability": 0.765625}, {"start": 255.53, "end": 255.59, "word": " it", "probability": 0.68115234375}, {"start": 255.59, "end": 255.75, "word": " could", "probability": 0.8759765625}, {"start": 255.75, "end": 255.89, "word": " be", "probability": 0.9541015625}, {"start": 255.89, "end": 256.11, "word": " any", "probability": 0.9150390625}, {"start": 256.11, "end": 256.51, "word": " value.", "probability": 0.97900390625}, {"start": 256.65, "end": 256.89, "word": " Range", "probability": 0.646484375}, {"start": 256.89, "end": 257.01, "word": " is,", "probability": 0.8125}, {"start": 257.13, "end": 257.13, "word": " for", "probability": 0.9462890625}, {"start": 257.13, "end": 257.51, "word": " example,", "probability": 0.974609375}, {"start": 257.91, "end": 258.23, "word": " between", "probability": 0.873046875}, {"start": 258.23, "end": 258.47, "word": " 2", "probability": 0.9375}, {"start": 258.47, "end": 258.99, "word": " centimeters", "probability": 0.89501953125}, {"start": 258.99, "end": 259.23, "word": " and", "probability": 0.93408203125}, {"start": 259.23, "end": 259.45, "word": " 3", "probability": 0.96435546875}, {"start": 259.45, "end": 259.81, "word": " centimeters.", "probability": 0.85791015625}, {"start": 259.97, "end": 260.13, "word": " So", "probability": 0.53466796875}, {"start": 260.13, "end": 260.33, "word": " from", "probability": 0.83837890625}, {"start": 260.33, "end": 260.55, "word": " 2", "probability": 0.86279296875}, {"start": 260.55, "end": 260.69, "word": " to", "probability": 0.9677734375}, {"start": 260.69, "end": 260.89, "word": " 3", "probability": 0.9990234375}, {"start": 260.89, "end": 261.07, "word": " is", "probability": 0.9404296875}, {"start": 261.07, "end": 261.17, "word": " a", "probability": 0.99462890625}, {"start": 261.17, "end": 261.41, "word": " big", "probability": 0.92333984375}, {"start": 261.41, "end": 262.43, "word": " range", "probability": 0.873046875}, {"start": 262.43, "end": 263.01, "word": " because", "probability": 0.475341796875}, {"start": 263.01, "end": 263.13, "word": " it", "probability": 0.80712890625}, {"start": 263.13, "end": 263.27, "word": " can", "probability": 0.92919921875}, {"start": 263.27, "end": 263.49, "word": " take", "probability": 0.8974609375}, {"start": 263.49, "end": 263.81, "word": " anywhere", "probability": 0.6407063802083334}, {"start": 263.81, "end": 263.89, "word": " from", "probability": 0.43896484375}, {"start": 263.89, "end": 264.01, "word": " 2", "probability": 0.98828125}, {"start": 264.01, "end": 264.37, "word": ".1", "probability": 0.952392578125}, {"start": 264.37, "end": 264.51, "word": " to", "probability": 0.63916015625}, {"start": 264.51, "end": 264.69, "word": " 2", "probability": 0.9921875}, {"start": 264.69, "end": 265.13, "word": ".15", "probability": 0.960205078125}, {"start": 265.13, "end": 265.33, "word": " and", "probability": 0.84716796875}, {"start": 265.33, "end": 265.51, "word": " so", "probability": 0.9501953125}, {"start": 265.51, "end": 265.67, "word": " on.", "probability": 0.943359375}, {"start": 266.13, "end": 266.29, "word": " So", "probability": 0.95947265625}, {"start": 266.29, "end": 266.59, "word": " thickness", "probability": 0.8779296875}, {"start": 266.59, "end": 266.97, "word": " is", "probability": 0.94482421875}, {"start": 266.97, "end": 267.13, "word": " an", "probability": 0.9609375}, {"start": 267.13, "end": 267.55, "word": " example", "probability": 0.9765625}, {"start": 267.55, "end": 268.01, "word": " of", "probability": 0.96435546875}, {"start": 268.01, "end": 268.47, "word": " continuous", "probability": 0.77978515625}, {"start": 268.47, "end": 268.81, "word": " random", "probability": 0.81640625}, {"start": 268.81, "end": 269.13, "word": " variable.", "probability": 0.52587890625}], "temperature": 1.0}, {"id": 12, "seek": 29595, "start": 269.49, "end": 295.95, "text": " Another example, time required to complete a task. Now suppose you want to do an exercise. Now the time required to finish or to complete this task may be any value between 2 minutes up to 3 minutes. So maybe 2 minutes 30 seconds, 2 minutes 40 seconds and so on. So it's continuous random variable. Temperature of a solution.", "tokens": [3996, 1365, 11, 565, 4739, 281, 3566, 257, 5633, 13, 823, 7297, 291, 528, 281, 360, 364, 5380, 13, 823, 264, 565, 4739, 281, 2413, 420, 281, 3566, 341, 5633, 815, 312, 604, 2158, 1296, 568, 2077, 493, 281, 805, 2077, 13, 407, 1310, 568, 2077, 2217, 3949, 11, 568, 2077, 3356, 3949, 293, 370, 322, 13, 407, 309, 311, 10957, 4974, 7006, 13, 34864, 1503, 295, 257, 3827, 13], "avg_logprob": -0.21908011025106403, "compression_ratio": 1.63, "no_speech_prob": 0.0, "words": [{"start": 269.49, "end": 269.81, "word": " Another", "probability": 0.3037109375}, {"start": 269.81, "end": 270.25, "word": " example,", "probability": 0.9609375}, {"start": 270.39, "end": 270.61, "word": " time", "probability": 0.60986328125}, {"start": 270.61, "end": 271.01, "word": " required", "probability": 0.81201171875}, {"start": 271.01, "end": 271.19, "word": " to", "probability": 0.96728515625}, {"start": 271.19, "end": 271.45, "word": " complete", "probability": 0.77734375}, {"start": 271.45, "end": 271.61, "word": " a", "probability": 0.62451171875}, {"start": 271.61, "end": 271.97, "word": " task.", "probability": 0.93408203125}, {"start": 273.11, "end": 273.31, "word": " Now", "probability": 0.82470703125}, {"start": 273.31, "end": 273.79, "word": " suppose", "probability": 0.58837890625}, {"start": 273.79, "end": 275.41, "word": " you", "probability": 0.7705078125}, {"start": 275.41, "end": 275.61, "word": " want", "probability": 0.89306640625}, {"start": 275.61, "end": 275.71, "word": " to", "probability": 0.97021484375}, {"start": 275.71, "end": 275.83, "word": " do", "probability": 0.9619140625}, {"start": 275.83, "end": 276.01, "word": " an", "probability": 0.9453125}, {"start": 276.01, "end": 276.43, "word": " exercise.", "probability": 0.95556640625}, {"start": 277.63, "end": 277.83, "word": " Now", "probability": 0.8427734375}, {"start": 277.83, "end": 277.99, "word": " the", "probability": 0.67333984375}, {"start": 277.99, "end": 278.21, "word": " time", "probability": 0.88037109375}, {"start": 278.21, "end": 278.61, "word": " required", "probability": 0.82373046875}, {"start": 278.61, "end": 279.05, "word": " to", "probability": 0.96923828125}, {"start": 279.05, "end": 279.37, "word": " finish", "probability": 0.921875}, {"start": 279.37, "end": 279.57, "word": " or", "probability": 0.88525390625}, {"start": 279.57, "end": 279.71, "word": " to", "probability": 0.8818359375}, {"start": 279.71, "end": 279.95, "word": " complete", "probability": 0.7880859375}, {"start": 279.95, "end": 280.19, "word": " this", "probability": 0.94482421875}, {"start": 280.19, "end": 280.65, "word": " task", "probability": 0.92626953125}, {"start": 280.65, "end": 281.65, "word": " may", "probability": 0.491455078125}, {"start": 281.65, "end": 281.87, "word": " be", "probability": 0.96240234375}, {"start": 281.87, "end": 283.99, "word": " any", "probability": 0.8759765625}, {"start": 283.99, "end": 284.27, "word": " value", "probability": 0.9765625}, {"start": 284.27, "end": 284.61, "word": " between", "probability": 0.8818359375}, {"start": 284.61, "end": 285.15, "word": " 2", "probability": 0.408935546875}, {"start": 285.15, "end": 285.53, "word": " minutes", "probability": 0.9052734375}, {"start": 285.53, "end": 286.09, "word": " up", "probability": 0.9052734375}, {"start": 286.09, "end": 286.27, "word": " to", "probability": 0.96240234375}, {"start": 286.27, "end": 286.49, "word": " 3", "probability": 0.93701171875}, {"start": 286.49, "end": 286.93, "word": " minutes.", "probability": 0.931640625}, {"start": 287.41, "end": 287.65, "word": " So", "probability": 0.91796875}, {"start": 287.65, "end": 287.85, "word": " maybe", "probability": 0.810546875}, {"start": 287.85, "end": 288.05, "word": " 2", "probability": 0.8740234375}, {"start": 288.05, "end": 288.33, "word": " minutes", "probability": 0.91259765625}, {"start": 288.33, "end": 288.73, "word": " 30", "probability": 0.60400390625}, {"start": 288.73, "end": 289.11, "word": " seconds,", "probability": 0.49169921875}, {"start": 289.83, "end": 290.05, "word": " 2", "probability": 0.9794921875}, {"start": 290.05, "end": 290.33, "word": " minutes", "probability": 0.923828125}, {"start": 290.33, "end": 290.59, "word": " 40", "probability": 0.96435546875}, {"start": 290.59, "end": 290.95, "word": " seconds", "probability": 0.78466796875}, {"start": 290.95, "end": 291.15, "word": " and", "probability": 0.68603515625}, {"start": 291.15, "end": 291.31, "word": " so", "probability": 0.94970703125}, {"start": 291.31, "end": 291.51, "word": " on.", "probability": 0.9482421875}, {"start": 291.87, "end": 291.95, "word": " So", "probability": 0.93603515625}, {"start": 291.95, "end": 292.15, "word": " it's", "probability": 0.871826171875}, {"start": 292.15, "end": 292.63, "word": " continuous", "probability": 0.85595703125}, {"start": 292.63, "end": 292.95, "word": " random", "probability": 0.27587890625}, {"start": 292.95, "end": 293.33, "word": " variable.", "probability": 0.86181640625}, {"start": 294.47, "end": 295.07, "word": " Temperature", "probability": 0.947021484375}, {"start": 295.07, "end": 295.43, "word": " of", "probability": 0.9375}, {"start": 295.43, "end": 295.55, "word": " a", "probability": 0.5966796875}, {"start": 295.55, "end": 295.95, "word": " solution.", "probability": 0.98193359375}], "temperature": 1.0}, {"id": 13, "seek": 31894, "start": 297.62, "end": 318.94, "text": " height, weight, ages, and so on. These are examples of continuous random variable. So these variables can potentially take on any value depending only on the ability to precisely and accurately measure. So that's the definition of continuous random variable. Now, if you look at the normal distribution,", "tokens": [6681, 11, 3364, 11, 12357, 11, 293, 370, 322, 13, 1981, 366, 5110, 295, 10957, 4974, 7006, 13, 407, 613, 9102, 393, 7263, 747, 322, 604, 2158, 5413, 787, 322, 264, 3485, 281, 13402, 293, 20095, 3481, 13, 407, 300, 311, 264, 7123, 295, 10957, 4974, 7006, 13, 823, 11, 498, 291, 574, 412, 264, 2710, 7316, 11], "avg_logprob": -0.16975636047832035, "compression_ratio": 1.6432432432432433, "no_speech_prob": 0.0, "words": [{"start": 297.62, "end": 298.1, "word": " height,", "probability": 0.5029296875}, {"start": 298.32, "end": 298.66, "word": " weight,", "probability": 0.890625}, {"start": 298.92, "end": 299.16, "word": " ages,", "probability": 0.484130859375}, {"start": 299.26, "end": 299.34, "word": " and", "probability": 0.93408203125}, {"start": 299.34, "end": 299.5, "word": " so", "probability": 0.9541015625}, {"start": 299.5, "end": 299.66, "word": " on.", "probability": 0.94970703125}, {"start": 299.84, "end": 300.14, "word": " These", "probability": 0.89111328125}, {"start": 300.14, "end": 300.34, "word": " are", "probability": 0.93701171875}, {"start": 300.34, "end": 300.78, "word": " examples", "probability": 0.85546875}, {"start": 300.78, "end": 301.04, "word": " of", "probability": 0.96533203125}, {"start": 301.04, "end": 301.46, "word": " continuous", "probability": 0.77587890625}, {"start": 301.46, "end": 301.92, "word": " random", "probability": 0.869140625}, {"start": 301.92, "end": 302.8, "word": " variable.", "probability": 0.46337890625}, {"start": 303.6, "end": 303.72, "word": " So", "probability": 0.90869140625}, {"start": 303.72, "end": 304.16, "word": " these", "probability": 0.7265625}, {"start": 304.16, "end": 305.16, "word": " variables", "probability": 0.921875}, {"start": 305.16, "end": 305.96, "word": " can", "probability": 0.94384765625}, {"start": 305.96, "end": 306.54, "word": " potentially", "probability": 0.73291015625}, {"start": 306.54, "end": 306.92, "word": " take", "probability": 0.896484375}, {"start": 306.92, "end": 307.32, "word": " on", "probability": 0.93798828125}, {"start": 307.32, "end": 307.66, "word": " any", "probability": 0.90771484375}, {"start": 307.66, "end": 308.04, "word": " value", "probability": 0.97900390625}, {"start": 308.04, "end": 308.44, "word": " depending", "probability": 0.7646484375}, {"start": 308.44, "end": 309.28, "word": " only", "probability": 0.7939453125}, {"start": 309.28, "end": 309.54, "word": " on", "probability": 0.94873046875}, {"start": 309.54, "end": 309.64, "word": " the", "probability": 0.86767578125}, {"start": 309.64, "end": 309.94, "word": " ability", "probability": 0.943359375}, {"start": 309.94, "end": 310.14, "word": " to", "probability": 0.92724609375}, {"start": 310.14, "end": 310.72, "word": " precisely", "probability": 0.82373046875}, {"start": 310.72, "end": 311.34, "word": " and", "probability": 0.87158203125}, {"start": 311.34, "end": 311.76, "word": " accurately", "probability": 0.77587890625}, {"start": 311.76, "end": 312.1, "word": " measure.", "probability": 0.7724609375}, {"start": 312.6, "end": 312.86, "word": " So", "probability": 0.93701171875}, {"start": 312.86, "end": 313.14, "word": " that's", "probability": 0.91796875}, {"start": 313.14, "end": 313.24, "word": " the", "probability": 0.86474609375}, {"start": 313.24, "end": 313.62, "word": " definition", "probability": 0.94287109375}, {"start": 313.62, "end": 314.02, "word": " of", "probability": 0.96728515625}, {"start": 314.02, "end": 314.48, "word": " continuous", "probability": 0.83447265625}, {"start": 314.48, "end": 315.32, "word": " random", "probability": 0.85693359375}, {"start": 315.32, "end": 315.74, "word": " variable.", "probability": 0.87841796875}, {"start": 316.1, "end": 316.42, "word": " Now,", "probability": 0.9482421875}, {"start": 316.9, "end": 316.98, "word": " if", "probability": 0.9453125}, {"start": 316.98, "end": 317.06, "word": " you", "probability": 0.95458984375}, {"start": 317.06, "end": 317.22, "word": " look", "probability": 0.9638671875}, {"start": 317.22, "end": 317.32, "word": " at", "probability": 0.96240234375}, {"start": 317.32, "end": 317.44, "word": " the", "probability": 0.86572265625}, {"start": 317.44, "end": 317.7, "word": " normal", "probability": 0.87939453125}, {"start": 317.7, "end": 318.94, "word": " distribution,", "probability": 0.86181640625}], "temperature": 1.0}, {"id": 14, "seek": 34177, "start": 320.67, "end": 341.77, "text": " It looks like bell-shaped, as we discussed before. So it's bell-shaped, symmetrical. Symmetrical means the area to the right of the mean equals the area to the left of the mean. I mean 50% of the area above and 50% below. So that's the meaning of symmetrical.", "tokens": [467, 1542, 411, 4549, 12, 23103, 11, 382, 321, 7152, 949, 13, 407, 309, 311, 4549, 12, 23103, 11, 40360, 13, 3902, 2174, 32283, 1355, 264, 1859, 281, 264, 558, 295, 264, 914, 6915, 264, 1859, 281, 264, 1411, 295, 264, 914, 13, 286, 914, 2625, 4, 295, 264, 1859, 3673, 293, 2625, 4, 2507, 13, 407, 300, 311, 264, 3620, 295, 40360, 13], "avg_logprob": -0.14819711538461539, "compression_ratio": 1.7333333333333334, "no_speech_prob": 0.0, "words": [{"start": 320.67, "end": 320.91, "word": " It", "probability": 0.87353515625}, {"start": 320.91, "end": 321.15, "word": " looks", "probability": 0.849609375}, {"start": 321.15, "end": 321.45, "word": " like", "probability": 0.94287109375}, {"start": 321.45, "end": 321.81, "word": " bell", "probability": 0.72216796875}, {"start": 321.81, "end": 322.09, "word": "-shaped,", "probability": 0.570068359375}, {"start": 322.81, "end": 323.53, "word": " as", "probability": 0.94775390625}, {"start": 323.53, "end": 323.73, "word": " we", "probability": 0.9658203125}, {"start": 323.73, "end": 324.25, "word": " discussed", "probability": 0.8603515625}, {"start": 324.25, "end": 324.73, "word": " before.", "probability": 0.85791015625}, {"start": 325.13, "end": 325.21, "word": " So", "probability": 0.8759765625}, {"start": 325.21, "end": 325.49, "word": " it's", "probability": 0.882568359375}, {"start": 325.49, "end": 325.99, "word": " bell", "probability": 0.91845703125}, {"start": 325.99, "end": 326.65, "word": "-shaped,", "probability": 0.867919921875}, {"start": 327.43, "end": 327.99, "word": " symmetrical.", "probability": 0.82275390625}, {"start": 328.31, "end": 328.75, "word": " Symmetrical", "probability": 0.9641927083333334}, {"start": 328.75, "end": 329.33, "word": " means", "probability": 0.9375}, {"start": 329.33, "end": 330.89, "word": " the", "probability": 0.7607421875}, {"start": 330.89, "end": 331.27, "word": " area", "probability": 0.8955078125}, {"start": 331.27, "end": 331.47, "word": " to", "probability": 0.96630859375}, {"start": 331.47, "end": 331.63, "word": " the", "probability": 0.9150390625}, {"start": 331.63, "end": 331.81, "word": " right", "probability": 0.91455078125}, {"start": 331.81, "end": 331.97, "word": " of", "probability": 0.96533203125}, {"start": 331.97, "end": 332.11, "word": " the", "probability": 0.9296875}, {"start": 332.11, "end": 332.29, "word": " mean", "probability": 0.853515625}, {"start": 332.29, "end": 333.75, "word": " equals", "probability": 0.525390625}, {"start": 333.75, "end": 333.95, "word": " the", "probability": 0.83056640625}, {"start": 333.95, "end": 334.11, "word": " area", "probability": 0.896484375}, {"start": 334.11, "end": 334.27, "word": " to", "probability": 0.95751953125}, {"start": 334.27, "end": 334.39, "word": " the", "probability": 0.9140625}, {"start": 334.39, "end": 334.57, "word": " left", "probability": 0.94775390625}, {"start": 334.57, "end": 334.73, "word": " of", "probability": 0.95556640625}, {"start": 334.73, "end": 334.85, "word": " the", "probability": 0.90380859375}, {"start": 334.85, "end": 334.93, "word": " mean.", "probability": 0.966796875}, {"start": 335.01, "end": 335.07, "word": " I", "probability": 0.98681640625}, {"start": 335.07, "end": 335.23, "word": " mean", "probability": 0.966796875}, {"start": 335.23, "end": 335.59, "word": " 50", "probability": 0.65966796875}, {"start": 335.59, "end": 335.89, "word": "%", "probability": 0.6904296875}, {"start": 335.89, "end": 336.25, "word": " of", "probability": 0.96435546875}, {"start": 336.25, "end": 336.35, "word": " the", "probability": 0.919921875}, {"start": 336.35, "end": 336.67, "word": " area", "probability": 0.89697265625}, {"start": 336.67, "end": 337.17, "word": " above", "probability": 0.9658203125}, {"start": 337.17, "end": 337.95, "word": " and", "probability": 0.796875}, {"start": 337.95, "end": 338.25, "word": " 50", "probability": 0.96875}, {"start": 338.25, "end": 338.71, "word": "%", "probability": 0.99560546875}, {"start": 338.71, "end": 339.67, "word": " below.", "probability": 0.9033203125}, {"start": 340.13, "end": 340.33, "word": " So", "probability": 0.9482421875}, {"start": 340.33, "end": 340.59, "word": " that's", "probability": 0.9521484375}, {"start": 340.59, "end": 340.71, "word": " the", "probability": 0.92041015625}, {"start": 340.71, "end": 340.99, "word": " meaning", "probability": 0.87158203125}, {"start": 340.99, "end": 341.33, "word": " of", "probability": 0.96875}, {"start": 341.33, "end": 341.77, "word": " symmetrical.", "probability": 0.7861328125}], "temperature": 1.0}, {"id": 15, "seek": 36743, "start": 342.49, "end": 367.43, "text": " The other feature of normal distribution, the measures of center tendency are equal or approximately equal. Mean, median, and mode are roughly equal. In reality, they are not equal, exactly equal, but you can say they are approximately equal. Now, there are two parameters describing the normal distribution. One is called the location parameter.", "tokens": [440, 661, 4111, 295, 2710, 7316, 11, 264, 8000, 295, 3056, 18187, 366, 2681, 420, 10447, 2681, 13, 12302, 11, 26779, 11, 293, 4391, 366, 9810, 2681, 13, 682, 4103, 11, 436, 366, 406, 2681, 11, 2293, 2681, 11, 457, 291, 393, 584, 436, 366, 10447, 2681, 13, 823, 11, 456, 366, 732, 9834, 16141, 264, 2710, 7316, 13, 1485, 307, 1219, 264, 4914, 13075, 13], "avg_logprob": -0.14517257195800098, "compression_ratio": 1.7704081632653061, "no_speech_prob": 0.0, "words": [{"start": 342.49, "end": 342.73, "word": " The", "probability": 0.82373046875}, {"start": 342.73, "end": 343.11, "word": " other", "probability": 0.8818359375}, {"start": 343.11, "end": 344.27, "word": " feature", "probability": 0.93212890625}, {"start": 344.27, "end": 344.53, "word": " of", "probability": 0.9677734375}, {"start": 344.53, "end": 344.83, "word": " normal", "probability": 0.80029296875}, {"start": 344.83, "end": 345.41, "word": " distribution,", "probability": 0.85009765625}, {"start": 346.25, "end": 346.37, "word": " the", "probability": 0.91748046875}, {"start": 346.37, "end": 346.77, "word": " measures", "probability": 0.796875}, {"start": 346.77, "end": 346.97, "word": " of", "probability": 0.962890625}, {"start": 346.97, "end": 347.25, "word": " center", "probability": 0.8466796875}, {"start": 347.25, "end": 347.81, "word": " tendency", "probability": 0.84228515625}, {"start": 347.81, "end": 348.83, "word": " are", "probability": 0.9013671875}, {"start": 348.83, "end": 349.19, "word": " equal", "probability": 0.91748046875}, {"start": 349.19, "end": 349.51, "word": " or", "probability": 0.763671875}, {"start": 349.51, "end": 350.13, "word": " approximately", "probability": 0.8798828125}, {"start": 350.13, "end": 350.53, "word": " equal.", "probability": 0.89013671875}, {"start": 351.15, "end": 351.59, "word": " Mean,", "probability": 0.90625}, {"start": 351.75, "end": 352.11, "word": " median,", "probability": 0.919921875}, {"start": 352.27, "end": 352.43, "word": " and", "probability": 0.9404296875}, {"start": 352.43, "end": 352.77, "word": " mode", "probability": 0.7939453125}, {"start": 352.77, "end": 353.17, "word": " are", "probability": 0.91845703125}, {"start": 353.17, "end": 353.47, "word": " roughly", "probability": 0.85595703125}, {"start": 353.47, "end": 353.85, "word": " equal.", "probability": 0.7978515625}, {"start": 354.25, "end": 354.41, "word": " In", "probability": 0.591796875}, {"start": 354.41, "end": 354.71, "word": " reality,", "probability": 0.97509765625}, {"start": 354.81, "end": 354.91, "word": " they", "probability": 0.88818359375}, {"start": 354.91, "end": 355.03, "word": " are", "probability": 0.9306640625}, {"start": 355.03, "end": 355.21, "word": " not", "probability": 0.94677734375}, {"start": 355.21, "end": 355.53, "word": " equal,", "probability": 0.68212890625}, {"start": 355.65, "end": 355.93, "word": " exactly", "probability": 0.8935546875}, {"start": 355.93, "end": 356.25, "word": " equal,", "probability": 0.86865234375}, {"start": 356.25, "end": 356.55, "word": " but", "probability": 0.923828125}, {"start": 356.55, "end": 356.77, "word": " you", "probability": 0.95947265625}, {"start": 356.77, "end": 356.97, "word": " can", "probability": 0.9453125}, {"start": 356.97, "end": 357.27, "word": " say", "probability": 0.76904296875}, {"start": 357.27, "end": 357.85, "word": " they", "probability": 0.86767578125}, {"start": 357.85, "end": 358.21, "word": " are", "probability": 0.9443359375}, {"start": 358.21, "end": 359.09, "word": " approximately", "probability": 0.8798828125}, {"start": 359.09, "end": 359.75, "word": " equal.", "probability": 0.8837890625}, {"start": 360.57, "end": 360.83, "word": " Now,", "probability": 0.94970703125}, {"start": 360.93, "end": 361.09, "word": " there", "probability": 0.9072265625}, {"start": 361.09, "end": 361.23, "word": " are", "probability": 0.9365234375}, {"start": 361.23, "end": 361.37, "word": " two", "probability": 0.9287109375}, {"start": 361.37, "end": 361.85, "word": " parameters", "probability": 0.96337890625}, {"start": 361.85, "end": 363.13, "word": " describing", "probability": 0.8701171875}, {"start": 363.13, "end": 363.65, "word": " the", "probability": 0.91162109375}, {"start": 363.65, "end": 364.15, "word": " normal", "probability": 0.85498046875}, {"start": 364.15, "end": 364.71, "word": " distribution.", "probability": 0.87158203125}, {"start": 364.99, "end": 365.15, "word": " One", "probability": 0.9287109375}, {"start": 365.15, "end": 365.35, "word": " is", "probability": 0.94775390625}, {"start": 365.35, "end": 365.75, "word": " called", "probability": 0.88427734375}, {"start": 365.75, "end": 366.51, "word": " the", "probability": 0.91455078125}, {"start": 366.51, "end": 367.01, "word": " location", "probability": 0.8935546875}, {"start": 367.01, "end": 367.43, "word": " parameter.", "probability": 0.97119140625}], "temperature": 1.0}, {"id": 16, "seek": 39456, "start": 369.04, "end": 394.56, "text": " location, or central tendency, as we discussed before, location is determined by the mean mu. So the first parameter for the normal distribution is the mean mu. The other parameter measures the spread of the data, or the variability of the data, and the spread is sigma, or the variation. So we have two parameters, mu and sigma.", "tokens": [4914, 11, 420, 5777, 18187, 11, 382, 321, 7152, 949, 11, 4914, 307, 9540, 538, 264, 914, 2992, 13, 407, 264, 700, 13075, 337, 264, 2710, 7316, 307, 264, 914, 2992, 13, 440, 661, 13075, 8000, 264, 3974, 295, 264, 1412, 11, 420, 264, 35709, 295, 264, 1412, 11, 293, 264, 3974, 307, 12771, 11, 420, 264, 12990, 13, 407, 321, 362, 732, 9834, 11, 2992, 293, 12771, 13], "avg_logprob": -0.19408481738397054, "compression_ratio": 1.875, "no_speech_prob": 0.0, "words": [{"start": 369.04, "end": 369.58, "word": " location,", "probability": 0.6748046875}, {"start": 370.04, "end": 370.4, "word": " or", "probability": 0.91552734375}, {"start": 370.4, "end": 370.82, "word": " central", "probability": 0.875}, {"start": 370.82, "end": 371.32, "word": " tendency,", "probability": 0.93115234375}, {"start": 371.58, "end": 371.74, "word": " as", "probability": 0.9580078125}, {"start": 371.74, "end": 371.92, "word": " we", "probability": 0.9560546875}, {"start": 371.92, "end": 372.36, "word": " discussed", "probability": 0.8759765625}, {"start": 372.36, "end": 372.82, "word": " before,", "probability": 0.86865234375}, {"start": 373.18, "end": 373.52, "word": " location", "probability": 0.90478515625}, {"start": 373.52, "end": 373.8, "word": " is", "probability": 0.9296875}, {"start": 373.8, "end": 374.2, "word": " determined", "probability": 0.90625}, {"start": 374.2, "end": 374.64, "word": " by", "probability": 0.9677734375}, {"start": 374.64, "end": 374.86, "word": " the", "probability": 0.9033203125}, {"start": 374.86, "end": 375.0, "word": " mean", "probability": 0.8251953125}, {"start": 375.0, "end": 375.16, "word": " mu.", "probability": 0.402587890625}, {"start": 375.82, "end": 376.38, "word": " So", "probability": 0.8056640625}, {"start": 376.38, "end": 376.5, "word": " the", "probability": 0.7392578125}, {"start": 376.5, "end": 376.76, "word": " first", "probability": 0.8701171875}, {"start": 376.76, "end": 377.16, "word": " parameter", "probability": 0.97509765625}, {"start": 377.16, "end": 377.52, "word": " for", "probability": 0.9384765625}, {"start": 377.52, "end": 377.74, "word": " the", "probability": 0.90380859375}, {"start": 377.74, "end": 378.1, "word": " normal", "probability": 0.88720703125}, {"start": 378.1, "end": 378.64, "word": " distribution", "probability": 0.85400390625}, {"start": 378.64, "end": 378.94, "word": " is", "probability": 0.9404296875}, {"start": 378.94, "end": 379.08, "word": " the", "probability": 0.900390625}, {"start": 379.08, "end": 379.22, "word": " mean", "probability": 0.9580078125}, {"start": 379.22, "end": 379.42, "word": " mu.", "probability": 0.87646484375}, {"start": 379.98, "end": 380.34, "word": " The", "probability": 0.888671875}, {"start": 380.34, "end": 380.58, "word": " other", "probability": 0.88671875}, {"start": 380.58, "end": 381.06, "word": " parameter", "probability": 0.96337890625}, {"start": 381.06, "end": 382.6, "word": " measures", "probability": 0.81640625}, {"start": 382.6, "end": 382.92, "word": " the", "probability": 0.904296875}, {"start": 382.92, "end": 383.4, "word": " spread", "probability": 0.9072265625}, {"start": 383.4, "end": 383.96, "word": " of", "probability": 0.6005859375}, {"start": 383.96, "end": 384.08, "word": " the", "probability": 0.8017578125}, {"start": 384.08, "end": 384.24, "word": " data,", "probability": 0.7080078125}, {"start": 384.28, "end": 384.8, "word": " or", "probability": 0.3076171875}, {"start": 384.8, "end": 385.08, "word": " the", "probability": 0.90966796875}, {"start": 385.08, "end": 385.46, "word": " variability", "probability": 0.95703125}, {"start": 385.46, "end": 385.82, "word": " of", "probability": 0.9609375}, {"start": 385.82, "end": 385.98, "word": " the", "probability": 0.9111328125}, {"start": 385.98, "end": 386.3, "word": " data,", "probability": 0.93701171875}, {"start": 386.82, "end": 386.92, "word": " and", "probability": 0.92138671875}, {"start": 386.92, "end": 387.08, "word": " the", "probability": 0.70849609375}, {"start": 387.08, "end": 387.32, "word": " spread", "probability": 0.9091796875}, {"start": 387.32, "end": 387.68, "word": " is", "probability": 0.92919921875}, {"start": 387.68, "end": 388.22, "word": " sigma,", "probability": 0.75732421875}, {"start": 388.82, "end": 389.3, "word": " or", "probability": 0.9306640625}, {"start": 389.3, "end": 389.42, "word": " the", "probability": 0.90380859375}, {"start": 389.42, "end": 389.78, "word": " variation.", "probability": 0.88916015625}, {"start": 390.84, "end": 391.16, "word": " So", "probability": 0.9453125}, {"start": 391.16, "end": 391.3, "word": " we", "probability": 0.91845703125}, {"start": 391.3, "end": 391.54, "word": " have", "probability": 0.943359375}, {"start": 391.54, "end": 391.86, "word": " two", "probability": 0.9306640625}, {"start": 391.86, "end": 392.36, "word": " parameters,", "probability": 0.97021484375}, {"start": 392.88, "end": 393.14, "word": " mu", "probability": 0.73583984375}, {"start": 393.14, "end": 393.94, "word": " and", "probability": 0.91552734375}, {"start": 393.94, "end": 394.56, "word": " sigma.", "probability": 0.92578125}], "temperature": 1.0}, {"id": 17, "seek": 41305, "start": 395.57, "end": 413.05, "text": " The random variable in this case can take any value from minus infinity up to infinity. So random variable in this case continuous ranges from minus infinity all the way up to infinity. I mean from this point here", "tokens": [440, 4974, 7006, 294, 341, 1389, 393, 747, 604, 2158, 490, 3175, 13202, 493, 281, 13202, 13, 407, 4974, 7006, 294, 341, 1389, 10957, 22526, 490, 3175, 13202, 439, 264, 636, 493, 281, 13202, 13, 286, 914, 490, 341, 935, 510], "avg_logprob": -0.20293899093355453, "compression_ratio": 1.7833333333333334, "no_speech_prob": 0.0, "words": [{"start": 395.57, "end": 395.91, "word": " The", "probability": 0.6181640625}, {"start": 395.91, "end": 396.15, "word": " random", "probability": 0.7470703125}, {"start": 396.15, "end": 396.57, "word": " variable", "probability": 0.8837890625}, {"start": 396.57, "end": 396.77, "word": " in", "probability": 0.8310546875}, {"start": 396.77, "end": 396.99, "word": " this", "probability": 0.95068359375}, {"start": 396.99, "end": 397.37, "word": " case", "probability": 0.90576171875}, {"start": 397.37, "end": 397.97, "word": " can", "probability": 0.8564453125}, {"start": 397.97, "end": 398.27, "word": " take", "probability": 0.876953125}, {"start": 398.27, "end": 398.55, "word": " any", "probability": 0.9140625}, {"start": 398.55, "end": 398.85, "word": " value", "probability": 0.9736328125}, {"start": 398.85, "end": 399.09, "word": " from", "probability": 0.90234375}, {"start": 399.09, "end": 399.45, "word": " minus", "probability": 0.87548828125}, {"start": 399.45, "end": 399.93, "word": " infinity", "probability": 0.89599609375}, {"start": 399.93, "end": 400.49, "word": " up", "probability": 0.9228515625}, {"start": 400.49, "end": 400.65, "word": " to", "probability": 0.96435546875}, {"start": 400.65, "end": 401.17, "word": " infinity.", "probability": 0.88134765625}, {"start": 402.19, "end": 402.79, "word": " So", "probability": 0.71435546875}, {"start": 402.79, "end": 403.01, "word": " random", "probability": 0.658203125}, {"start": 403.01, "end": 403.51, "word": " variable", "probability": 0.88427734375}, {"start": 403.51, "end": 403.87, "word": " in", "probability": 0.66796875}, {"start": 403.87, "end": 404.05, "word": " this", "probability": 0.94775390625}, {"start": 404.05, "end": 404.27, "word": " case", "probability": 0.9208984375}, {"start": 404.27, "end": 404.95, "word": " continuous", "probability": 0.369140625}, {"start": 404.95, "end": 406.83, "word": " ranges", "probability": 0.363037109375}, {"start": 406.83, "end": 408.33, "word": " from", "probability": 0.88671875}, {"start": 408.33, "end": 408.73, "word": " minus", "probability": 0.9833984375}, {"start": 408.73, "end": 409.29, "word": " infinity", "probability": 0.90966796875}, {"start": 409.29, "end": 409.87, "word": " all", "probability": 0.9443359375}, {"start": 409.87, "end": 410.05, "word": " the", "probability": 0.9228515625}, {"start": 410.05, "end": 410.31, "word": " way", "probability": 0.9521484375}, {"start": 410.31, "end": 410.67, "word": " up", "probability": 0.9619140625}, {"start": 410.67, "end": 410.83, "word": " to", "probability": 0.97021484375}, {"start": 410.83, "end": 411.23, "word": " infinity.", "probability": 0.86865234375}, {"start": 411.33, "end": 411.41, "word": " I", "probability": 0.9736328125}, {"start": 411.41, "end": 411.59, "word": " mean", "probability": 0.9580078125}, {"start": 411.59, "end": 411.87, "word": " from", "probability": 0.81689453125}, {"start": 411.87, "end": 412.17, "word": " this", "probability": 0.94873046875}, {"start": 412.17, "end": 412.51, "word": " point", "probability": 0.96533203125}, {"start": 412.51, "end": 413.05, "word": " here", "probability": 0.8115234375}], "temperature": 1.0}, {"id": 18, "seek": 44122, "start": 414.6, "end": 441.22, "text": " up to infinity. So the values range from minus infinity up to infinity. And if you look here, the mean is located nearly in the middle. And mean and median are all approximately equal. That's the features or the characteristics of the normal distribution. Now, how can we compute the probabilities under the normal killer?", "tokens": [493, 281, 13202, 13, 407, 264, 4190, 3613, 490, 3175, 13202, 493, 281, 13202, 13, 400, 498, 291, 574, 510, 11, 264, 914, 307, 6870, 6217, 294, 264, 2808, 13, 400, 914, 293, 26779, 366, 439, 10447, 2681, 13, 663, 311, 264, 4122, 420, 264, 10891, 295, 264, 2710, 7316, 13, 823, 11, 577, 393, 321, 14722, 264, 33783, 833, 264, 2710, 13364, 30], "avg_logprob": -0.19242788461538463, "compression_ratio": 1.599009900990099, "no_speech_prob": 0.0, "words": [{"start": 414.6, "end": 414.94, "word": " up", "probability": 0.38623046875}, {"start": 414.94, "end": 415.1, "word": " to", "probability": 0.96044921875}, {"start": 415.1, "end": 415.54, "word": " infinity.", "probability": 0.7744140625}, {"start": 415.76, "end": 415.86, "word": " So", "probability": 0.89453125}, {"start": 415.86, "end": 416.16, "word": " the", "probability": 0.62646484375}, {"start": 416.16, "end": 416.98, "word": " values", "probability": 0.85595703125}, {"start": 416.98, "end": 417.38, "word": " range", "probability": 0.5419921875}, {"start": 417.38, "end": 417.66, "word": " from", "probability": 0.8818359375}, {"start": 417.66, "end": 417.96, "word": " minus", "probability": 0.9541015625}, {"start": 417.96, "end": 418.38, "word": " infinity", "probability": 0.8857421875}, {"start": 418.38, "end": 419.14, "word": " up", "probability": 0.86865234375}, {"start": 419.14, "end": 419.28, "word": " to", "probability": 0.96826171875}, {"start": 419.28, "end": 419.7, "word": " infinity.", "probability": 0.875}, {"start": 420.08, "end": 420.2, "word": " And", "probability": 0.90283203125}, {"start": 420.2, "end": 420.34, "word": " if", "probability": 0.413330078125}, {"start": 420.34, "end": 420.34, "word": " you", "probability": 0.93994140625}, {"start": 420.34, "end": 420.5, "word": " look", "probability": 0.96533203125}, {"start": 420.5, "end": 420.7, "word": " here,", "probability": 0.8564453125}, {"start": 420.8, "end": 420.96, "word": " the", "probability": 0.90771484375}, {"start": 420.96, "end": 421.2, "word": " mean", "probability": 0.96044921875}, {"start": 421.2, "end": 422.08, "word": " is", "probability": 0.943359375}, {"start": 422.08, "end": 422.58, "word": " located", "probability": 0.96875}, {"start": 422.58, "end": 423.14, "word": " nearly", "probability": 0.85986328125}, {"start": 423.14, "end": 423.66, "word": " in", "probability": 0.947265625}, {"start": 423.66, "end": 423.78, "word": " the", "probability": 0.9169921875}, {"start": 423.78, "end": 423.98, "word": " middle.", "probability": 0.95166015625}, {"start": 424.6, "end": 424.98, "word": " And", "probability": 0.9443359375}, {"start": 424.98, "end": 425.22, "word": " mean", "probability": 0.9052734375}, {"start": 425.22, "end": 425.36, "word": " and", "probability": 0.61767578125}, {"start": 425.36, "end": 425.6, "word": " median", "probability": 0.962890625}, {"start": 425.6, "end": 425.9, "word": " are", "probability": 0.9365234375}, {"start": 425.9, "end": 426.46, "word": " all", "probability": 0.939453125}, {"start": 426.46, "end": 428.1, "word": " approximately", "probability": 0.5791015625}, {"start": 428.1, "end": 429.02, "word": " equal.", "probability": 0.89453125}, {"start": 429.32, "end": 429.86, "word": " That's", "probability": 0.94873046875}, {"start": 429.86, "end": 430.14, "word": " the", "probability": 0.9169921875}, {"start": 430.14, "end": 430.82, "word": " features", "probability": 0.7470703125}, {"start": 430.82, "end": 431.48, "word": " or", "probability": 0.72412109375}, {"start": 431.48, "end": 431.64, "word": " the", "probability": 0.9150390625}, {"start": 431.64, "end": 432.26, "word": " characteristics", "probability": 0.89453125}, {"start": 432.26, "end": 433.56, "word": " of", "probability": 0.95166015625}, {"start": 433.56, "end": 433.8, "word": " the", "probability": 0.84521484375}, {"start": 433.8, "end": 434.18, "word": " normal", "probability": 0.88037109375}, {"start": 434.18, "end": 434.74, "word": " distribution.", "probability": 0.87548828125}, {"start": 436.46, "end": 436.82, "word": " Now,", "probability": 0.95556640625}, {"start": 436.94, "end": 437.16, "word": " how", "probability": 0.9365234375}, {"start": 437.16, "end": 437.4, "word": " can", "probability": 0.9404296875}, {"start": 437.4, "end": 437.66, "word": " we", "probability": 0.9033203125}, {"start": 437.66, "end": 438.38, "word": " compute", "probability": 0.89892578125}, {"start": 438.38, "end": 439.44, "word": " the", "probability": 0.91845703125}, {"start": 439.44, "end": 439.94, "word": " probabilities", "probability": 0.9033203125}, {"start": 439.94, "end": 440.36, "word": " under", "probability": 0.89892578125}, {"start": 440.36, "end": 440.6, "word": " the", "probability": 0.9091796875}, {"start": 440.6, "end": 440.86, "word": " normal", "probability": 0.86328125}, {"start": 440.86, "end": 441.22, "word": " killer?", "probability": 0.202392578125}], "temperature": 1.0}, {"id": 19, "seek": 46670, "start": 443.46, "end": 466.7, "text": " The formula that is used to compute the probabilities is given by this one. It looks complicated formula because we have to use calculus in order to determine the area underneath the cube. So we are looking for something else. So this formula is it seems to be complicated. It's not hard but it's", "tokens": [440, 8513, 300, 307, 1143, 281, 14722, 264, 33783, 307, 2212, 538, 341, 472, 13, 467, 1542, 6179, 8513, 570, 321, 362, 281, 764, 33400, 294, 1668, 281, 6997, 264, 1859, 7223, 264, 13728, 13, 407, 321, 366, 1237, 337, 746, 1646, 13, 407, 341, 8513, 307, 309, 2544, 281, 312, 6179, 13, 467, 311, 406, 1152, 457, 309, 311], "avg_logprob": -0.1880122989904685, "compression_ratio": 1.6141304347826086, "no_speech_prob": 0.0, "words": [{"start": 443.46, "end": 443.74, "word": " The", "probability": 0.72705078125}, {"start": 443.74, "end": 444.16, "word": " formula", "probability": 0.92529296875}, {"start": 444.16, "end": 445.1, "word": " that", "probability": 0.9140625}, {"start": 445.1, "end": 445.32, "word": " is", "probability": 0.94140625}, {"start": 445.32, "end": 445.62, "word": " used", "probability": 0.9111328125}, {"start": 445.62, "end": 445.84, "word": " to", "probability": 0.97021484375}, {"start": 445.84, "end": 446.08, "word": " compute", "probability": 0.919921875}, {"start": 446.08, "end": 446.24, "word": " the", "probability": 0.87646484375}, {"start": 446.24, "end": 446.74, "word": " probabilities", "probability": 0.8681640625}, {"start": 446.74, "end": 447.5, "word": " is", "probability": 0.576171875}, {"start": 447.5, "end": 448.06, "word": " given", "probability": 0.87939453125}, {"start": 448.06, "end": 448.32, "word": " by", "probability": 0.97021484375}, {"start": 448.32, "end": 448.58, "word": " this", "probability": 0.9521484375}, {"start": 448.58, "end": 448.82, "word": " one.", "probability": 0.84814453125}, {"start": 449.06, "end": 449.22, "word": " It", "probability": 0.90380859375}, {"start": 449.22, "end": 449.5, "word": " looks", "probability": 0.429931640625}, {"start": 449.5, "end": 450.44, "word": " complicated", "probability": 0.8427734375}, {"start": 450.44, "end": 450.9, "word": " formula", "probability": 0.7578125}, {"start": 450.9, "end": 452.66, "word": " because", "probability": 0.53466796875}, {"start": 452.66, "end": 452.96, "word": " we", "probability": 0.94970703125}, {"start": 452.96, "end": 453.18, "word": " have", "probability": 0.94873046875}, {"start": 453.18, "end": 453.32, "word": " to", "probability": 0.9677734375}, {"start": 453.32, "end": 453.56, "word": " use", "probability": 0.8798828125}, {"start": 453.56, "end": 453.94, "word": " calculus", "probability": 0.94287109375}, {"start": 453.94, "end": 454.28, "word": " in", "probability": 0.93994140625}, {"start": 454.28, "end": 454.46, "word": " order", "probability": 0.91845703125}, {"start": 454.46, "end": 454.68, "word": " to", "probability": 0.96435546875}, {"start": 454.68, "end": 455.08, "word": " determine", "probability": 0.9033203125}, {"start": 455.08, "end": 455.34, "word": " the", "probability": 0.91357421875}, {"start": 455.34, "end": 455.62, "word": " area", "probability": 0.90283203125}, {"start": 455.62, "end": 456.04, "word": " underneath", "probability": 0.921875}, {"start": 456.04, "end": 456.4, "word": " the", "probability": 0.916015625}, {"start": 456.4, "end": 456.66, "word": " cube.", "probability": 0.248779296875}, {"start": 457.3, "end": 457.82, "word": " So", "probability": 0.9453125}, {"start": 457.82, "end": 458.04, "word": " we", "probability": 0.640625}, {"start": 458.04, "end": 458.2, "word": " are", "probability": 0.93798828125}, {"start": 458.2, "end": 458.46, "word": " looking", "probability": 0.9140625}, {"start": 458.46, "end": 458.7, "word": " for", "probability": 0.95166015625}, {"start": 458.7, "end": 459.02, "word": " something", "probability": 0.86376953125}, {"start": 459.02, "end": 459.46, "word": " else.", "probability": 0.908203125}, {"start": 459.92, "end": 460.12, "word": " So", "probability": 0.919921875}, {"start": 460.12, "end": 460.4, "word": " this", "probability": 0.91064453125}, {"start": 460.4, "end": 460.92, "word": " formula", "probability": 0.91162109375}, {"start": 460.92, "end": 461.2, "word": " is", "probability": 0.7412109375}, {"start": 461.2, "end": 462.62, "word": " it", "probability": 0.3291015625}, {"start": 462.62, "end": 462.98, "word": " seems", "probability": 0.791015625}, {"start": 462.98, "end": 463.16, "word": " to", "probability": 0.9677734375}, {"start": 463.16, "end": 463.34, "word": " be", "probability": 0.95166015625}, {"start": 463.34, "end": 464.04, "word": " complicated.", "probability": 0.91015625}, {"start": 464.98, "end": 465.3, "word": " It's", "probability": 0.95556640625}, {"start": 465.3, "end": 465.52, "word": " not", "probability": 0.9482421875}, {"start": 465.52, "end": 465.88, "word": " hard", "probability": 0.89794921875}, {"start": 465.88, "end": 466.18, "word": " but", "probability": 0.525390625}, {"start": 466.18, "end": 466.7, "word": " it's", "probability": 0.954833984375}], "temperature": 1.0}, {"id": 20, "seek": 49016, "start": 468.04, "end": 490.16, "text": " complicated one, but we can use it. If we know calculus very well, we can use integration to create the probabilities underneath the curve. But for our course, we are going to skip this formula because this formula depends actually on mu and sigma. A mu can take any value.", "tokens": [6179, 472, 11, 457, 321, 393, 764, 309, 13, 759, 321, 458, 33400, 588, 731, 11, 321, 393, 764, 10980, 281, 1884, 264, 33783, 7223, 264, 7605, 13, 583, 337, 527, 1164, 11, 321, 366, 516, 281, 10023, 341, 8513, 570, 341, 8513, 5946, 767, 322, 2992, 293, 12771, 13, 316, 2992, 393, 747, 604, 2158, 13], "avg_logprob": -0.20245151247443824, "compression_ratio": 1.5307262569832403, "no_speech_prob": 0.0, "words": [{"start": 468.03999999999996, "end": 468.52, "word": " complicated", "probability": 0.72216796875}, {"start": 468.52, "end": 468.76, "word": " one,", "probability": 0.8701171875}, {"start": 468.84, "end": 468.92, "word": " but", "probability": 0.91455078125}, {"start": 468.92, "end": 469.12, "word": " we", "probability": 0.953125}, {"start": 469.12, "end": 469.36, "word": " can", "probability": 0.94677734375}, {"start": 469.36, "end": 469.6, "word": " use", "probability": 0.8828125}, {"start": 469.6, "end": 469.92, "word": " it.", "probability": 0.94384765625}, {"start": 470.12, "end": 470.46, "word": " If", "probability": 0.9541015625}, {"start": 470.46, "end": 470.62, "word": " we", "probability": 0.89111328125}, {"start": 470.62, "end": 470.86, "word": " know", "probability": 0.54541015625}, {"start": 470.86, "end": 471.36, "word": " calculus", "probability": 0.943359375}, {"start": 471.36, "end": 471.64, "word": " very", "probability": 0.8427734375}, {"start": 471.64, "end": 471.82, "word": " well,", "probability": 0.9482421875}, {"start": 471.86, "end": 471.96, "word": " we", "probability": 0.9560546875}, {"start": 471.96, "end": 472.16, "word": " can", "probability": 0.9482421875}, {"start": 472.16, "end": 472.38, "word": " use", "probability": 0.87841796875}, {"start": 472.38, "end": 473.22, "word": " integration", "probability": 0.93896484375}, {"start": 473.22, "end": 473.96, "word": " to", "probability": 0.90380859375}, {"start": 473.96, "end": 474.2, "word": " create", "probability": 0.41064453125}, {"start": 474.2, "end": 474.36, "word": " the", "probability": 0.468994140625}, {"start": 474.36, "end": 474.72, "word": " probabilities", "probability": 0.90283203125}, {"start": 474.72, "end": 475.24, "word": " underneath", "probability": 0.9267578125}, {"start": 475.24, "end": 475.6, "word": " the", "probability": 0.9013671875}, {"start": 475.6, "end": 475.84, "word": " curve.", "probability": 0.75439453125}, {"start": 476.08, "end": 476.18, "word": " But", "probability": 0.91357421875}, {"start": 476.18, "end": 476.96, "word": " for", "probability": 0.84521484375}, {"start": 476.96, "end": 477.82, "word": " our", "probability": 0.79638671875}, {"start": 477.82, "end": 478.24, "word": " course,", "probability": 0.958984375}, {"start": 478.32, "end": 478.38, "word": " we", "probability": 0.9609375}, {"start": 478.38, "end": 478.52, "word": " are", "probability": 0.923828125}, {"start": 478.52, "end": 478.72, "word": " going", "probability": 0.94775390625}, {"start": 478.72, "end": 478.9, "word": " to", "probability": 0.96826171875}, {"start": 478.9, "end": 479.2, "word": " skip", "probability": 0.97998046875}, {"start": 479.2, "end": 479.54, "word": " this", "probability": 0.93896484375}, {"start": 479.54, "end": 479.96, "word": " formula", "probability": 0.9013671875}, {"start": 479.96, "end": 481.06, "word": " because", "probability": 0.62939453125}, {"start": 481.06, "end": 484.46, "word": " this", "probability": 0.607421875}, {"start": 484.46, "end": 485.34, "word": " formula", "probability": 0.90478515625}, {"start": 485.34, "end": 485.8, "word": " depends", "probability": 0.904296875}, {"start": 485.8, "end": 486.38, "word": " actually", "probability": 0.85302734375}, {"start": 486.38, "end": 487.06, "word": " on", "probability": 0.80322265625}, {"start": 487.06, "end": 487.3, "word": " mu", "probability": 0.442626953125}, {"start": 487.3, "end": 487.5, "word": " and", "probability": 0.94970703125}, {"start": 487.5, "end": 487.82, "word": " sigma.", "probability": 0.9306640625}, {"start": 488.78, "end": 488.94, "word": " A", "probability": 0.324462890625}, {"start": 488.94, "end": 489.1, "word": " mu", "probability": 0.95458984375}, {"start": 489.1, "end": 489.34, "word": " can", "probability": 0.94873046875}, {"start": 489.34, "end": 489.62, "word": " take", "probability": 0.89208984375}, {"start": 489.62, "end": 489.86, "word": " any", "probability": 0.9072265625}, {"start": 489.86, "end": 490.16, "word": " value.", "probability": 0.97998046875}], "temperature": 1.0}, {"id": 21, "seek": 52077, "start": 491.25, "end": 520.77, "text": " Sigma also can take any value. That means we have different normal distributions. Because the distribution actually depends on these two parameters. So by varying the parameters mu and sigma, we obtain different normal distributions. Since we have different mu and sigma, it means we should have different normal distributions. For this reason, it's very complicated to have tables", "tokens": [36595, 611, 393, 747, 604, 2158, 13, 663, 1355, 321, 362, 819, 2710, 37870, 13, 1436, 264, 7316, 767, 5946, 322, 613, 732, 9834, 13, 407, 538, 22984, 264, 9834, 2992, 293, 12771, 11, 321, 12701, 819, 2710, 37870, 13, 4162, 321, 362, 819, 2992, 293, 12771, 11, 309, 1355, 321, 820, 362, 819, 2710, 37870, 13, 1171, 341, 1778, 11, 309, 311, 588, 6179, 281, 362, 8020], "avg_logprob": -0.12398098085237585, "compression_ratio": 1.958974358974359, "no_speech_prob": 0.0, "words": [{"start": 491.25, "end": 491.73, "word": " Sigma", "probability": 0.69873046875}, {"start": 491.73, "end": 492.07, "word": " also", "probability": 0.81201171875}, {"start": 492.07, "end": 492.31, "word": " can", "probability": 0.94091796875}, {"start": 492.31, "end": 492.53, "word": " take", "probability": 0.87939453125}, {"start": 492.53, "end": 492.77, "word": " any", "probability": 0.9091796875}, {"start": 492.77, "end": 493.11, "word": " value.", "probability": 0.98291015625}, {"start": 493.93, "end": 494.23, "word": " That", "probability": 0.9072265625}, {"start": 494.23, "end": 494.53, "word": " means", "probability": 0.9296875}, {"start": 494.53, "end": 494.71, "word": " we", "probability": 0.91796875}, {"start": 494.71, "end": 494.87, "word": " have", "probability": 0.947265625}, {"start": 494.87, "end": 495.51, "word": " different", "probability": 0.880859375}, {"start": 495.51, "end": 496.69, "word": " normal", "probability": 0.84716796875}, {"start": 496.69, "end": 497.31, "word": " distributions.", "probability": 0.91796875}, {"start": 498.47, "end": 499.11, "word": " Because", "probability": 0.93505859375}, {"start": 499.11, "end": 499.97, "word": " the", "probability": 0.73193359375}, {"start": 499.97, "end": 500.55, "word": " distribution", "probability": 0.859375}, {"start": 500.55, "end": 501.53, "word": " actually", "probability": 0.85595703125}, {"start": 501.53, "end": 503.35, "word": " depends", "probability": 0.8798828125}, {"start": 503.35, "end": 503.55, "word": " on", "probability": 0.94189453125}, {"start": 503.55, "end": 503.83, "word": " these", "probability": 0.80029296875}, {"start": 503.83, "end": 504.11, "word": " two", "probability": 0.91796875}, {"start": 504.11, "end": 504.53, "word": " parameters.", "probability": 0.96484375}, {"start": 505.21, "end": 505.39, "word": " So", "probability": 0.95361328125}, {"start": 505.39, "end": 505.83, "word": " by", "probability": 0.857421875}, {"start": 505.83, "end": 506.39, "word": " varying", "probability": 0.94384765625}, {"start": 506.39, "end": 506.81, "word": " the", "probability": 0.92041015625}, {"start": 506.81, "end": 507.39, "word": " parameters", "probability": 0.9384765625}, {"start": 507.39, "end": 507.61, "word": " mu", "probability": 0.38916015625}, {"start": 507.61, "end": 507.77, "word": " and", "probability": 0.94921875}, {"start": 507.77, "end": 508.07, "word": " sigma,", "probability": 0.9306640625}, {"start": 508.27, "end": 508.39, "word": " we", "probability": 0.95703125}, {"start": 508.39, "end": 508.85, "word": " obtain", "probability": 0.88720703125}, {"start": 508.85, "end": 509.37, "word": " different", "probability": 0.87451171875}, {"start": 509.37, "end": 509.79, "word": " normal", "probability": 0.8623046875}, {"start": 509.79, "end": 510.35, "word": " distributions.", "probability": 0.9091796875}, {"start": 511.07, "end": 511.47, "word": " Since", "probability": 0.8330078125}, {"start": 511.47, "end": 511.69, "word": " we", "probability": 0.95654296875}, {"start": 511.69, "end": 511.89, "word": " have", "probability": 0.9453125}, {"start": 511.89, "end": 512.25, "word": " different", "probability": 0.8935546875}, {"start": 512.25, "end": 512.53, "word": " mu", "probability": 0.94580078125}, {"start": 512.53, "end": 512.71, "word": " and", "probability": 0.9443359375}, {"start": 512.71, "end": 513.03, "word": " sigma,", "probability": 0.93359375}, {"start": 513.43, "end": 513.69, "word": " it", "probability": 0.94482421875}, {"start": 513.69, "end": 514.01, "word": " means", "probability": 0.92919921875}, {"start": 514.01, "end": 514.29, "word": " we", "probability": 0.95166015625}, {"start": 514.29, "end": 514.65, "word": " should", "probability": 0.96533203125}, {"start": 514.65, "end": 515.29, "word": " have", "probability": 0.94140625}, {"start": 515.29, "end": 515.95, "word": " different", "probability": 0.869140625}, {"start": 515.95, "end": 516.31, "word": " normal", "probability": 0.87548828125}, {"start": 516.31, "end": 516.83, "word": " distributions.", "probability": 0.92529296875}, {"start": 517.25, "end": 517.53, "word": " For", "probability": 0.96337890625}, {"start": 517.53, "end": 517.79, "word": " this", "probability": 0.94091796875}, {"start": 517.79, "end": 518.17, "word": " reason,", "probability": 0.97119140625}, {"start": 518.27, "end": 518.53, "word": " it's", "probability": 0.965087890625}, {"start": 518.53, "end": 518.77, "word": " very", "probability": 0.85302734375}, {"start": 518.77, "end": 519.35, "word": " complicated", "probability": 0.92431640625}, {"start": 519.35, "end": 520.05, "word": " to", "probability": 0.9677734375}, {"start": 520.05, "end": 520.25, "word": " have", "probability": 0.94921875}, {"start": 520.25, "end": 520.77, "word": " tables", "probability": 0.82275390625}], "temperature": 1.0}, {"id": 22, "seek": 53342, "start": 522.09, "end": 533.43, "text": " or probability tables in order to determine these probabilities because there are infinite values of mu and sigma maybe your edges the mean is", "tokens": [420, 8482, 8020, 294, 1668, 281, 6997, 613, 33783, 570, 456, 366, 13785, 4190, 295, 2992, 293, 12771, 1310, 428, 8819, 264, 914, 307], "avg_logprob": -0.21890625, "compression_ratio": 1.3883495145631068, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 522.09, "end": 522.43, "word": " or", "probability": 0.27685546875}, {"start": 522.43, "end": 522.97, "word": " probability", "probability": 0.87939453125}, {"start": 522.97, "end": 523.43, "word": " tables", "probability": 0.7568359375}, {"start": 523.43, "end": 523.63, "word": " in", "probability": 0.87109375}, {"start": 523.63, "end": 523.83, "word": " order", "probability": 0.93408203125}, {"start": 523.83, "end": 524.09, "word": " to", "probability": 0.9736328125}, {"start": 524.09, "end": 524.43, "word": " determine", "probability": 0.8984375}, {"start": 524.43, "end": 524.69, "word": " these", "probability": 0.77734375}, {"start": 524.69, "end": 525.17, "word": " probabilities", "probability": 0.95751953125}, {"start": 525.17, "end": 526.01, "word": " because", "probability": 0.64013671875}, {"start": 526.01, "end": 526.19, "word": " there", "probability": 0.87060546875}, {"start": 526.19, "end": 526.39, "word": " are", "probability": 0.9443359375}, {"start": 526.39, "end": 527.33, "word": " infinite", "probability": 0.91650390625}, {"start": 527.33, "end": 528.57, "word": " values", "probability": 0.96044921875}, {"start": 528.57, "end": 528.79, "word": " of", "probability": 0.86376953125}, {"start": 528.79, "end": 529.01, "word": " mu", "probability": 0.63818359375}, {"start": 529.01, "end": 529.27, "word": " and", "probability": 0.9501953125}, {"start": 529.27, "end": 529.57, "word": " sigma", "probability": 0.9345703125}, {"start": 529.57, "end": 530.13, "word": " maybe", "probability": 0.71337890625}, {"start": 530.13, "end": 530.55, "word": " your", "probability": 0.8193359375}, {"start": 530.55, "end": 530.89, "word": " edges", "probability": 0.44482421875}, {"start": 530.89, "end": 531.85, "word": " the", "probability": 0.78173828125}, {"start": 531.85, "end": 532.31, "word": " mean", "probability": 0.96142578125}, {"start": 532.31, "end": 533.43, "word": " is", "probability": 0.95458984375}], "temperature": 1.0}, {"id": 23, "seek": 56183, "start": 535.05, "end": 561.83, "text": " 19. Sigma is, for example, 5. For weights, maybe the mean is 70 kilograms, the average is 10. For scores, maybe the average is 65, the mean is 20, sigma is 20, and so on. So we have different values of mu and sigma. For this reason, we have different normal distributions. Because changing mu", "tokens": [1294, 13, 36595, 307, 11, 337, 1365, 11, 1025, 13, 1171, 17443, 11, 1310, 264, 914, 307, 5285, 30690, 11, 264, 4274, 307, 1266, 13, 1171, 13444, 11, 1310, 264, 4274, 307, 11624, 11, 264, 914, 307, 945, 11, 12771, 307, 945, 11, 293, 370, 322, 13, 407, 321, 362, 819, 4190, 295, 2992, 293, 12771, 13, 1171, 341, 1778, 11, 321, 362, 819, 2710, 37870, 13, 1436, 4473, 2992], "avg_logprob": -0.1966329275722235, "compression_ratio": 1.5923913043478262, "no_speech_prob": 0.0, "words": [{"start": 535.05, "end": 535.63, "word": " 19.", "probability": 0.462890625}, {"start": 535.63, "end": 536.21, "word": " Sigma", "probability": 0.88916015625}, {"start": 536.21, "end": 536.81, "word": " is,", "probability": 0.947265625}, {"start": 537.29, "end": 537.37, "word": " for", "probability": 0.9521484375}, {"start": 537.37, "end": 537.75, "word": " example,", "probability": 0.97021484375}, {"start": 537.91, "end": 538.23, "word": " 5.", "probability": 0.5234375}, {"start": 538.81, "end": 539.07, "word": " For", "probability": 0.9306640625}, {"start": 539.07, "end": 539.39, "word": " weights,", "probability": 0.77734375}, {"start": 539.57, "end": 539.67, "word": " maybe", "probability": 0.9306640625}, {"start": 539.67, "end": 539.97, "word": " the", "probability": 0.88818359375}, {"start": 539.97, "end": 540.33, "word": " mean", "probability": 0.96826171875}, {"start": 540.33, "end": 540.71, "word": " is", "probability": 0.95361328125}, {"start": 540.71, "end": 541.61, "word": " 70", "probability": 0.9365234375}, {"start": 541.61, "end": 541.99, "word": " kilograms,", "probability": 0.467529296875}, {"start": 542.25, "end": 542.33, "word": " the", "probability": 0.85498046875}, {"start": 542.33, "end": 542.59, "word": " average", "probability": 0.794921875}, {"start": 542.59, "end": 542.83, "word": " is", "probability": 0.94580078125}, {"start": 542.83, "end": 543.07, "word": " 10.", "probability": 0.94140625}, {"start": 543.63, "end": 543.95, "word": " For", "probability": 0.92333984375}, {"start": 543.95, "end": 544.37, "word": " scores,", "probability": 0.80859375}, {"start": 544.49, "end": 544.57, "word": " maybe", "probability": 0.9404296875}, {"start": 544.57, "end": 544.75, "word": " the", "probability": 0.91259765625}, {"start": 544.75, "end": 544.99, "word": " average", "probability": 0.79150390625}, {"start": 544.99, "end": 545.19, "word": " is", "probability": 0.9169921875}, {"start": 545.19, "end": 545.59, "word": " 65,", "probability": 0.966796875}, {"start": 545.89, "end": 545.97, "word": " the", "probability": 0.7333984375}, {"start": 545.97, "end": 546.11, "word": " mean", "probability": 0.86767578125}, {"start": 546.11, "end": 546.27, "word": " is", "probability": 0.9404296875}, {"start": 546.27, "end": 546.59, "word": " 20,", "probability": 0.91943359375}, {"start": 546.89, "end": 547.25, "word": " sigma", "probability": 0.68896484375}, {"start": 547.25, "end": 547.41, "word": " is", "probability": 0.87353515625}, {"start": 547.41, "end": 547.75, "word": " 20,", "probability": 0.9580078125}, {"start": 547.87, "end": 548.05, "word": " and", "probability": 0.939453125}, {"start": 548.05, "end": 548.19, "word": " so", "probability": 0.953125}, {"start": 548.19, "end": 548.31, "word": " on.", "probability": 0.9482421875}, {"start": 548.53, "end": 548.71, "word": " So", "probability": 0.95166015625}, {"start": 548.71, "end": 548.81, "word": " we", "probability": 0.69482421875}, {"start": 548.81, "end": 548.93, "word": " have", "probability": 0.943359375}, {"start": 548.93, "end": 549.25, "word": " different", "probability": 0.87841796875}, {"start": 549.25, "end": 549.61, "word": " values", "probability": 0.96826171875}, {"start": 549.61, "end": 549.75, "word": " of", "probability": 0.67236328125}, {"start": 549.75, "end": 549.81, "word": " mu", "probability": 0.7529296875}, {"start": 549.81, "end": 549.93, "word": " and", "probability": 0.94384765625}, {"start": 549.93, "end": 550.27, "word": " sigma.", "probability": 0.939453125}, {"start": 550.61, "end": 550.87, "word": " For", "probability": 0.9599609375}, {"start": 550.87, "end": 551.09, "word": " this", "probability": 0.93603515625}, {"start": 551.09, "end": 551.37, "word": " reason,", "probability": 0.9697265625}, {"start": 551.43, "end": 551.59, "word": " we", "probability": 0.95703125}, {"start": 551.59, "end": 551.91, "word": " have", "probability": 0.92529296875}, {"start": 551.91, "end": 552.71, "word": " different", "probability": 0.87060546875}, {"start": 552.71, "end": 553.07, "word": " normal", "probability": 0.8642578125}, {"start": 553.07, "end": 553.65, "word": " distributions.", "probability": 0.92529296875}, {"start": 558.49, "end": 559.07, "word": " Because", "probability": 0.708984375}, {"start": 559.07, "end": 561.37, "word": " changing", "probability": 0.87548828125}, {"start": 561.37, "end": 561.83, "word": " mu", "probability": 0.85400390625}], "temperature": 1.0}, {"id": 24, "seek": 59116, "start": 563.78, "end": 591.16, "text": " shifts the distribution either left or to the right. So maybe the mean is shifted to the right side, or the mean maybe shifted to the left side. Also, changing sigma, sigma is the distance between the mu and the curve. The curve is the points, or the data values. Now this sigma can be increases or decreases. So if sigma increases, it means the spread also increases.", "tokens": [19201, 264, 7316, 2139, 1411, 420, 281, 264, 558, 13, 407, 1310, 264, 914, 307, 18892, 281, 264, 558, 1252, 11, 420, 264, 914, 1310, 18892, 281, 264, 1411, 1252, 13, 2743, 11, 4473, 12771, 11, 12771, 307, 264, 4560, 1296, 264, 2992, 293, 264, 7605, 13, 440, 7605, 307, 264, 2793, 11, 420, 264, 1412, 4190, 13, 823, 341, 12771, 393, 312, 8637, 420, 24108, 13, 407, 498, 12771, 8637, 11, 309, 1355, 264, 3974, 611, 8637, 13], "avg_logprob": -0.2027343712747097, "compression_ratio": 1.8636363636363635, "no_speech_prob": 0.0, "words": [{"start": 563.78, "end": 564.38, "word": " shifts", "probability": 0.65576171875}, {"start": 564.38, "end": 564.76, "word": " the", "probability": 0.88916015625}, {"start": 564.76, "end": 565.36, "word": " distribution", "probability": 0.81640625}, {"start": 565.36, "end": 565.74, "word": " either", "probability": 0.892578125}, {"start": 565.74, "end": 566.16, "word": " left", "probability": 0.9296875}, {"start": 566.16, "end": 566.48, "word": " or", "probability": 0.9404296875}, {"start": 566.48, "end": 566.64, "word": " to", "probability": 0.92431640625}, {"start": 566.64, "end": 566.76, "word": " the", "probability": 0.90478515625}, {"start": 566.76, "end": 566.96, "word": " right.", "probability": 0.9111328125}, {"start": 567.42, "end": 567.64, "word": " So", "probability": 0.94873046875}, {"start": 567.64, "end": 567.9, "word": " maybe", "probability": 0.81787109375}, {"start": 567.9, "end": 568.14, "word": " the", "probability": 0.80126953125}, {"start": 568.14, "end": 568.38, "word": " mean", "probability": 0.96630859375}, {"start": 568.38, "end": 569.3, "word": " is", "probability": 0.74609375}, {"start": 569.3, "end": 569.64, "word": " shifted", "probability": 0.9248046875}, {"start": 569.64, "end": 569.88, "word": " to", "probability": 0.96435546875}, {"start": 569.88, "end": 570.02, "word": " the", "probability": 0.908203125}, {"start": 570.02, "end": 570.18, "word": " right", "probability": 0.91650390625}, {"start": 570.18, "end": 570.54, "word": " side,", "probability": 0.85546875}, {"start": 571.0, "end": 571.24, "word": " or", "probability": 0.9580078125}, {"start": 571.24, "end": 571.4, "word": " the", "probability": 0.8759765625}, {"start": 571.4, "end": 571.62, "word": " mean", "probability": 0.96142578125}, {"start": 571.62, "end": 571.98, "word": " maybe", "probability": 0.4248046875}, {"start": 571.98, "end": 572.26, "word": " shifted", "probability": 0.74658203125}, {"start": 572.26, "end": 572.44, "word": " to", "probability": 0.9541015625}, {"start": 572.44, "end": 572.54, "word": " the", "probability": 0.8671875}, {"start": 572.54, "end": 572.74, "word": " left", "probability": 0.94287109375}, {"start": 572.74, "end": 573.04, "word": " side.", "probability": 0.85986328125}, {"start": 573.78, "end": 574.28, "word": " Also,", "probability": 0.9482421875}, {"start": 574.5, "end": 574.94, "word": " changing", "probability": 0.38623046875}, {"start": 574.94, "end": 575.48, "word": " sigma,", "probability": 0.8212890625}, {"start": 576.38, "end": 576.66, "word": " sigma", "probability": 0.8994140625}, {"start": 576.66, "end": 576.98, "word": " is", "probability": 0.947265625}, {"start": 576.98, "end": 577.14, "word": " the", "probability": 0.919921875}, {"start": 577.14, "end": 577.56, "word": " distance", "probability": 0.9345703125}, {"start": 577.56, "end": 577.96, "word": " between", "probability": 0.85595703125}, {"start": 577.96, "end": 578.22, "word": " the", "probability": 0.9296875}, {"start": 578.22, "end": 578.46, "word": " mu", "probability": 0.7724609375}, {"start": 578.46, "end": 579.42, "word": " and", "probability": 0.857421875}, {"start": 579.42, "end": 580.02, "word": " the", "probability": 0.90625}, {"start": 580.02, "end": 580.2, "word": " curve.", "probability": 0.72705078125}, {"start": 580.28, "end": 580.44, "word": " The", "probability": 0.81884765625}, {"start": 580.44, "end": 580.66, "word": " curve", "probability": 0.908203125}, {"start": 580.66, "end": 580.94, "word": " is", "probability": 0.2236328125}, {"start": 580.94, "end": 581.26, "word": " the", "probability": 0.82763671875}, {"start": 581.26, "end": 581.66, "word": " points,", "probability": 0.759765625}, {"start": 582.7, "end": 582.98, "word": " or", "probability": 0.84619140625}, {"start": 582.98, "end": 583.14, "word": " the", "probability": 0.91064453125}, {"start": 583.14, "end": 583.32, "word": " data", "probability": 0.75341796875}, {"start": 583.32, "end": 583.74, "word": " values.", "probability": 0.9228515625}, {"start": 584.28, "end": 584.44, "word": " Now", "probability": 0.95068359375}, {"start": 584.44, "end": 584.8, "word": " this", "probability": 0.57421875}, {"start": 584.8, "end": 585.22, "word": " sigma", "probability": 0.93212890625}, {"start": 585.22, "end": 585.78, "word": " can", "probability": 0.896484375}, {"start": 585.78, "end": 585.94, "word": " be", "probability": 0.93115234375}, {"start": 585.94, "end": 586.38, "word": " increases", "probability": 0.890625}, {"start": 586.38, "end": 586.8, "word": " or", "probability": 0.955078125}, {"start": 586.8, "end": 587.2, "word": " decreases.", "probability": 0.97802734375}, {"start": 587.5, "end": 587.74, "word": " So", "probability": 0.9462890625}, {"start": 587.74, "end": 588.1, "word": " if", "probability": 0.8505859375}, {"start": 588.1, "end": 588.38, "word": " sigma", "probability": 0.9169921875}, {"start": 588.38, "end": 588.82, "word": " increases,", "probability": 0.93408203125}, {"start": 589.3, "end": 589.5, "word": " it", "probability": 0.94287109375}, {"start": 589.5, "end": 589.74, "word": " means", "probability": 0.93603515625}, {"start": 589.74, "end": 589.94, "word": " the", "probability": 0.896484375}, {"start": 589.94, "end": 590.22, "word": " spread", "probability": 0.86328125}, {"start": 590.22, "end": 590.62, "word": " also", "probability": 0.85498046875}, {"start": 590.62, "end": 591.16, "word": " increases.", "probability": 0.38330078125}], "temperature": 1.0}, {"id": 25, "seek": 61694, "start": 592.48, "end": 616.94, "text": " Or if sigma decreases, also the spread will decrease. So the distribution or the normal distribution depends actually on these two values. For this reason, since we have too many values or infinite values of mu and sigma, then in this case we have different normal distributions. There is another distribution. It's called standardized normal.", "tokens": [1610, 498, 12771, 24108, 11, 611, 264, 3974, 486, 11514, 13, 407, 264, 7316, 420, 264, 2710, 7316, 5946, 767, 322, 613, 732, 4190, 13, 1171, 341, 1778, 11, 1670, 321, 362, 886, 867, 4190, 420, 13785, 4190, 295, 2992, 293, 12771, 11, 550, 294, 341, 1389, 321, 362, 819, 2710, 37870, 13, 821, 307, 1071, 7316, 13, 467, 311, 1219, 31677, 2710, 13], "avg_logprob": -0.18221153846153845, "compression_ratio": 1.7373737373737375, "no_speech_prob": 0.0, "words": [{"start": 592.48, "end": 592.86, "word": " Or", "probability": 0.669921875}, {"start": 592.86, "end": 593.14, "word": " if", "probability": 0.84130859375}, {"start": 593.14, "end": 593.42, "word": " sigma", "probability": 0.57470703125}, {"start": 593.42, "end": 593.94, "word": " decreases,", "probability": 0.9560546875}, {"start": 594.18, "end": 594.46, "word": " also", "probability": 0.8603515625}, {"start": 594.46, "end": 594.72, "word": " the", "probability": 0.87255859375}, {"start": 594.72, "end": 594.94, "word": " spread", "probability": 0.896484375}, {"start": 594.94, "end": 595.26, "word": " will", "probability": 0.8720703125}, {"start": 595.26, "end": 595.78, "word": " decrease.", "probability": 0.896484375}, {"start": 596.2, "end": 596.48, "word": " So", "probability": 0.939453125}, {"start": 596.48, "end": 596.86, "word": " the", "probability": 0.69482421875}, {"start": 596.86, "end": 597.5, "word": " distribution", "probability": 0.84619140625}, {"start": 597.5, "end": 598.78, "word": " or", "probability": 0.51416015625}, {"start": 598.78, "end": 598.92, "word": " the", "probability": 0.9072265625}, {"start": 598.92, "end": 599.18, "word": " normal", "probability": 0.865234375}, {"start": 599.18, "end": 599.66, "word": " distribution", "probability": 0.86572265625}, {"start": 599.66, "end": 600.06, "word": " depends", "probability": 0.85888671875}, {"start": 600.06, "end": 600.54, "word": " actually", "probability": 0.85009765625}, {"start": 600.54, "end": 601.06, "word": " on", "probability": 0.935546875}, {"start": 601.06, "end": 601.34, "word": " these", "probability": 0.84814453125}, {"start": 601.34, "end": 601.54, "word": " two", "probability": 0.89794921875}, {"start": 601.54, "end": 601.96, "word": " values.", "probability": 0.9697265625}, {"start": 602.2, "end": 602.58, "word": " For", "probability": 0.958984375}, {"start": 602.58, "end": 602.82, "word": " this", "probability": 0.91845703125}, {"start": 602.82, "end": 603.12, "word": " reason,", "probability": 0.96240234375}, {"start": 603.22, "end": 603.42, "word": " since", "probability": 0.85693359375}, {"start": 603.42, "end": 603.62, "word": " we", "probability": 0.96044921875}, {"start": 603.62, "end": 603.8, "word": " have", "probability": 0.947265625}, {"start": 603.8, "end": 604.02, "word": " too", "probability": 0.92919921875}, {"start": 604.02, "end": 604.22, "word": " many", "probability": 0.9091796875}, {"start": 604.22, "end": 604.6, "word": " values", "probability": 0.9619140625}, {"start": 604.6, "end": 604.78, "word": " or", "probability": 0.75634765625}, {"start": 604.78, "end": 605.12, "word": " infinite", "probability": 0.9052734375}, {"start": 605.12, "end": 605.48, "word": " values", "probability": 0.95849609375}, {"start": 605.48, "end": 605.6, "word": " of", "probability": 0.459716796875}, {"start": 605.6, "end": 605.66, "word": " mu", "probability": 0.59912109375}, {"start": 605.66, "end": 605.78, "word": " and", "probability": 0.94482421875}, {"start": 605.78, "end": 606.12, "word": " sigma,", "probability": 0.923828125}, {"start": 606.66, "end": 606.84, "word": " then", "probability": 0.74462890625}, {"start": 606.84, "end": 606.96, "word": " in", "probability": 0.7822265625}, {"start": 606.96, "end": 607.12, "word": " this", "probability": 0.9404296875}, {"start": 607.12, "end": 607.3, "word": " case", "probability": 0.876953125}, {"start": 607.3, "end": 607.44, "word": " we", "probability": 0.6435546875}, {"start": 607.44, "end": 607.6, "word": " have", "probability": 0.94482421875}, {"start": 607.6, "end": 609.04, "word": " different", "probability": 0.734375}, {"start": 609.04, "end": 610.58, "word": " normal", "probability": 0.8662109375}, {"start": 610.58, "end": 611.78, "word": " distributions.", "probability": 0.6904296875}, {"start": 613.48, "end": 614.06, "word": " There", "probability": 0.7919921875}, {"start": 614.06, "end": 614.2, "word": " is", "probability": 0.94091796875}, {"start": 614.2, "end": 614.5, "word": " another", "probability": 0.92236328125}, {"start": 614.5, "end": 615.08, "word": " distribution.", "probability": 0.8408203125}, {"start": 615.08, "end": 615.38, "word": " It's", "probability": 0.890625}, {"start": 615.38, "end": 615.7, "word": " called", "probability": 0.8896484375}, {"start": 615.7, "end": 616.26, "word": " standardized", "probability": 0.8056640625}, {"start": 616.26, "end": 616.94, "word": " normal.", "probability": 0.81494140625}], "temperature": 1.0}, {"id": 26, "seek": 64871, "start": 620.33, "end": 648.71, "text": " Now, we have normal distribution X, and how can we transform from normal distribution to standardized normal distribution? The reason is that the mean of Z, I mean, Z is used for standardized normal. The mean of Z is always zero, and sigma is one. Now it's a big difference. The first one has infinite", "tokens": [823, 11, 321, 362, 2710, 7316, 1783, 11, 293, 577, 393, 321, 4088, 490, 2710, 7316, 281, 31677, 2710, 7316, 30, 440, 1778, 307, 300, 264, 914, 295, 1176, 11, 286, 914, 11, 1176, 307, 1143, 337, 31677, 2710, 13, 440, 914, 295, 1176, 307, 1009, 4018, 11, 293, 12771, 307, 472, 13, 823, 309, 311, 257, 955, 2649, 13, 440, 700, 472, 575, 13785], "avg_logprob": -0.21010889587077228, "compression_ratio": 1.7257142857142858, "no_speech_prob": 0.0, "words": [{"start": 620.3299999999999, "end": 620.93, "word": " Now,", "probability": 0.8896484375}, {"start": 620.93, "end": 621.53, "word": " we", "probability": 0.9580078125}, {"start": 621.53, "end": 621.87, "word": " have", "probability": 0.9501953125}, {"start": 621.87, "end": 622.31, "word": " normal", "probability": 0.75390625}, {"start": 622.31, "end": 622.89, "word": " distribution", "probability": 0.8212890625}, {"start": 622.89, "end": 623.25, "word": " X,", "probability": 0.5751953125}, {"start": 624.03, "end": 625.35, "word": " and", "probability": 0.9248046875}, {"start": 625.35, "end": 625.53, "word": " how", "probability": 0.93408203125}, {"start": 625.53, "end": 625.77, "word": " can", "probability": 0.93896484375}, {"start": 625.77, "end": 626.07, "word": " we", "probability": 0.962890625}, {"start": 626.07, "end": 627.87, "word": " transform", "probability": 0.89404296875}, {"start": 627.87, "end": 629.19, "word": " from", "probability": 0.83154296875}, {"start": 629.19, "end": 629.55, "word": " normal", "probability": 0.853515625}, {"start": 629.55, "end": 630.15, "word": " distribution", "probability": 0.8359375}, {"start": 630.15, "end": 631.31, "word": " to", "probability": 0.93017578125}, {"start": 631.31, "end": 631.93, "word": " standardized", "probability": 0.82958984375}, {"start": 631.93, "end": 632.25, "word": " normal", "probability": 0.85888671875}, {"start": 632.25, "end": 632.67, "word": " distribution?", "probability": 0.876953125}, {"start": 633.35, "end": 633.55, "word": " The", "probability": 0.8818359375}, {"start": 633.55, "end": 633.81, "word": " reason", "probability": 0.974609375}, {"start": 633.81, "end": 634.01, "word": " is", "probability": 0.94287109375}, {"start": 634.01, "end": 634.31, "word": " that", "probability": 0.80126953125}, {"start": 634.31, "end": 635.11, "word": " the", "probability": 0.71337890625}, {"start": 635.11, "end": 635.31, "word": " mean", "probability": 0.9619140625}, {"start": 635.31, "end": 635.47, "word": " of", "probability": 0.96337890625}, {"start": 635.47, "end": 635.69, "word": " Z,", "probability": 0.66064453125}, {"start": 636.75, "end": 637.03, "word": " I", "probability": 0.94482421875}, {"start": 637.03, "end": 637.23, "word": " mean,", "probability": 0.96533203125}, {"start": 637.37, "end": 637.55, "word": " Z", "probability": 0.978515625}, {"start": 637.55, "end": 637.79, "word": " is", "probability": 0.94580078125}, {"start": 637.79, "end": 638.07, "word": " used", "probability": 0.89892578125}, {"start": 638.07, "end": 638.51, "word": " for", "probability": 0.9453125}, {"start": 638.51, "end": 639.75, "word": " standardized", "probability": 0.322998046875}, {"start": 639.75, "end": 640.31, "word": " normal.", "probability": 0.86083984375}, {"start": 640.85, "end": 641.09, "word": " The", "probability": 0.7763671875}, {"start": 641.09, "end": 641.29, "word": " mean", "probability": 0.9560546875}, {"start": 641.29, "end": 641.45, "word": " of", "probability": 0.9638671875}, {"start": 641.45, "end": 641.69, "word": " Z", "probability": 0.984375}, {"start": 641.69, "end": 641.95, "word": " is", "probability": 0.947265625}, {"start": 641.95, "end": 642.39, "word": " always", "probability": 0.9189453125}, {"start": 642.39, "end": 642.77, "word": " zero,", "probability": 0.5556640625}, {"start": 643.55, "end": 643.83, "word": " and", "probability": 0.9404296875}, {"start": 643.83, "end": 644.07, "word": " sigma", "probability": 0.77294921875}, {"start": 644.07, "end": 644.27, "word": " is", "probability": 0.95068359375}, {"start": 644.27, "end": 644.49, "word": " one.", "probability": 0.88671875}, {"start": 645.77, "end": 646.17, "word": " Now", "probability": 0.93359375}, {"start": 646.17, "end": 646.37, "word": " it's", "probability": 0.78271484375}, {"start": 646.37, "end": 646.45, "word": " a", "probability": 0.525390625}, {"start": 646.45, "end": 646.55, "word": " big", "probability": 0.91259765625}, {"start": 646.55, "end": 647.01, "word": " difference.", "probability": 0.8642578125}, {"start": 647.27, "end": 647.41, "word": " The", "probability": 0.8828125}, {"start": 647.41, "end": 647.67, "word": " first", "probability": 0.8896484375}, {"start": 647.67, "end": 647.87, "word": " one", "probability": 0.92919921875}, {"start": 647.87, "end": 648.15, "word": " has", "probability": 0.93896484375}, {"start": 648.15, "end": 648.71, "word": " infinite", "probability": 0.89697265625}], "temperature": 1.0}, {"id": 27, "seek": 67756, "start": 649.84, "end": 677.56, "text": " values of Mu and Sigma. Now, for the standardized normal distribution, the mean is fixed value. The mean is zero, Sigma is one. So, the question is, how can we actually transform from X, which has normal distribution, to Z, which has standardized normal with mean zero and Sigma of one. Let's see.", "tokens": [4190, 295, 15601, 293, 36595, 13, 823, 11, 337, 264, 31677, 2710, 7316, 11, 264, 914, 307, 6806, 2158, 13, 440, 914, 307, 4018, 11, 36595, 307, 472, 13, 407, 11, 264, 1168, 307, 11, 577, 393, 321, 767, 4088, 490, 1783, 11, 597, 575, 2710, 7316, 11, 281, 1176, 11, 597, 575, 31677, 2710, 365, 914, 4018, 293, 36595, 295, 472, 13, 961, 311, 536, 13], "avg_logprob": -0.20863970062311957, "compression_ratio": 1.7126436781609196, "no_speech_prob": 0.0, "words": [{"start": 649.84, "end": 650.44, "word": " values", "probability": 0.66650390625}, {"start": 650.44, "end": 650.84, "word": " of", "probability": 0.951171875}, {"start": 650.84, "end": 651.1, "word": " Mu", "probability": 0.4384765625}, {"start": 651.1, "end": 651.38, "word": " and", "probability": 0.9384765625}, {"start": 651.38, "end": 651.7, "word": " Sigma.", "probability": 0.76318359375}, {"start": 652.1, "end": 652.5, "word": " Now,", "probability": 0.88330078125}, {"start": 652.68, "end": 652.86, "word": " for", "probability": 0.943359375}, {"start": 652.86, "end": 653.16, "word": " the", "probability": 0.8544921875}, {"start": 653.16, "end": 653.62, "word": " standardized", "probability": 0.86865234375}, {"start": 653.62, "end": 654.18, "word": " normal", "probability": 0.7568359375}, {"start": 654.18, "end": 654.88, "word": " distribution,", "probability": 0.84228515625}, {"start": 655.68, "end": 655.82, "word": " the", "probability": 0.8876953125}, {"start": 655.82, "end": 656.02, "word": " mean", "probability": 0.92333984375}, {"start": 656.02, "end": 656.2, "word": " is", "probability": 0.9423828125}, {"start": 656.2, "end": 656.48, "word": " fixed", "probability": 0.58935546875}, {"start": 656.48, "end": 656.86, "word": " value.", "probability": 0.89111328125}, {"start": 657.2, "end": 657.62, "word": " The", "probability": 0.9033203125}, {"start": 657.62, "end": 657.76, "word": " mean", "probability": 0.91748046875}, {"start": 657.76, "end": 657.9, "word": " is", "probability": 0.935546875}, {"start": 657.9, "end": 658.2, "word": " zero,", "probability": 0.7265625}, {"start": 658.9, "end": 659.16, "word": " Sigma", "probability": 0.62109375}, {"start": 659.16, "end": 659.4, "word": " is", "probability": 0.95361328125}, {"start": 659.4, "end": 659.64, "word": " one.", "probability": 0.88916015625}, {"start": 661.02, "end": 661.54, "word": " So,", "probability": 0.93896484375}, {"start": 661.62, "end": 661.76, "word": " the", "probability": 0.921875}, {"start": 661.76, "end": 662.1, "word": " question", "probability": 0.9169921875}, {"start": 662.1, "end": 662.42, "word": " is,", "probability": 0.94970703125}, {"start": 662.58, "end": 662.72, "word": " how", "probability": 0.79296875}, {"start": 662.72, "end": 663.0, "word": " can", "probability": 0.9306640625}, {"start": 663.0, "end": 663.2, "word": " we", "probability": 0.9423828125}, {"start": 663.2, "end": 663.6, "word": " actually", "probability": 0.873046875}, {"start": 663.6, "end": 664.34, "word": " transform", "probability": 0.91845703125}, {"start": 664.34, "end": 664.68, "word": " from", "probability": 0.87548828125}, {"start": 664.68, "end": 665.0, "word": " X,", "probability": 0.88037109375}, {"start": 665.84, "end": 666.3, "word": " which", "probability": 0.9443359375}, {"start": 666.3, "end": 666.62, "word": " has", "probability": 0.93701171875}, {"start": 666.62, "end": 667.0, "word": " normal", "probability": 0.7978515625}, {"start": 667.0, "end": 667.6, "word": " distribution,", "probability": 0.84130859375}, {"start": 668.22, "end": 669.18, "word": " to", "probability": 0.9609375}, {"start": 669.18, "end": 669.42, "word": " Z,", "probability": 0.97607421875}, {"start": 669.54, "end": 669.72, "word": " which", "probability": 0.94482421875}, {"start": 669.72, "end": 670.02, "word": " has", "probability": 0.9052734375}, {"start": 670.02, "end": 670.46, "word": " standardized", "probability": 0.9228515625}, {"start": 670.46, "end": 671.04, "word": " normal", "probability": 0.87109375}, {"start": 671.04, "end": 671.74, "word": " with", "probability": 0.673828125}, {"start": 671.74, "end": 671.96, "word": " mean", "probability": 0.8662109375}, {"start": 671.96, "end": 672.34, "word": " zero", "probability": 0.8642578125}, {"start": 672.34, "end": 672.74, "word": " and", "probability": 0.89404296875}, {"start": 672.74, "end": 673.16, "word": " Sigma", "probability": 0.7431640625}, {"start": 673.16, "end": 674.34, "word": " of", "probability": 0.958984375}, {"start": 674.34, "end": 674.58, "word": " one.", "probability": 0.90283203125}, {"start": 676.74, "end": 677.4, "word": " Let's", "probability": 0.948486328125}, {"start": 677.4, "end": 677.56, "word": " see.", "probability": 0.583984375}], "temperature": 1.0}, {"id": 28, "seek": 70353, "start": 678.37, "end": 703.53, "text": " How can we translate x which has normal distribution to z that has standardized normal distribution? The idea is you have just to subtract mu of x, x minus mu, then divide this result by sigma. So we just subtract the mean of x.", "tokens": [1012, 393, 321, 13799, 2031, 597, 575, 2710, 7316, 281, 710, 300, 575, 31677, 2710, 7316, 30, 440, 1558, 307, 291, 362, 445, 281, 16390, 2992, 295, 2031, 11, 2031, 3175, 2992, 11, 550, 9845, 341, 1874, 538, 12771, 13, 407, 321, 445, 16390, 264, 914, 295, 2031, 13], "avg_logprob": -0.1935937562584877, "compression_ratio": 1.4774193548387098, "no_speech_prob": 0.0, "words": [{"start": 678.37, "end": 678.63, "word": " How", "probability": 0.67236328125}, {"start": 678.63, "end": 678.87, "word": " can", "probability": 0.943359375}, {"start": 678.87, "end": 679.05, "word": " we", "probability": 0.94189453125}, {"start": 679.05, "end": 679.63, "word": " translate", "probability": 0.81298828125}, {"start": 679.63, "end": 681.01, "word": " x", "probability": 0.39794921875}, {"start": 681.01, "end": 683.33, "word": " which", "probability": 0.404541015625}, {"start": 683.33, "end": 683.71, "word": " has", "probability": 0.9345703125}, {"start": 683.71, "end": 684.33, "word": " normal", "probability": 0.845703125}, {"start": 684.33, "end": 684.97, "word": " distribution", "probability": 0.81982421875}, {"start": 684.97, "end": 685.91, "word": " to", "probability": 0.8505859375}, {"start": 685.91, "end": 686.17, "word": " z", "probability": 0.9091796875}, {"start": 686.17, "end": 686.49, "word": " that", "probability": 0.88427734375}, {"start": 686.49, "end": 686.87, "word": " has", "probability": 0.9384765625}, {"start": 686.87, "end": 687.51, "word": " standardized", "probability": 0.7353515625}, {"start": 687.51, "end": 687.97, "word": " normal", "probability": 0.8671875}, {"start": 687.97, "end": 688.53, "word": " distribution?", "probability": 0.869140625}, {"start": 689.41, "end": 689.97, "word": " The", "probability": 0.8515625}, {"start": 689.97, "end": 690.29, "word": " idea", "probability": 0.91748046875}, {"start": 690.29, "end": 690.67, "word": " is", "probability": 0.9462890625}, {"start": 690.67, "end": 691.03, "word": " you", "probability": 0.8212890625}, {"start": 691.03, "end": 691.23, "word": " have", "probability": 0.916015625}, {"start": 691.23, "end": 691.57, "word": " just", "probability": 0.84521484375}, {"start": 691.57, "end": 692.19, "word": " to", "probability": 0.9375}, {"start": 692.19, "end": 692.89, "word": " subtract", "probability": 0.85791015625}, {"start": 692.89, "end": 693.49, "word": " mu", "probability": 0.587890625}, {"start": 693.49, "end": 694.61, "word": " of", "probability": 0.9326171875}, {"start": 694.61, "end": 694.99, "word": " x,", "probability": 0.97265625}, {"start": 695.85, "end": 696.61, "word": " x", "probability": 0.67431640625}, {"start": 696.61, "end": 696.97, "word": " minus", "probability": 0.95751953125}, {"start": 696.97, "end": 697.35, "word": " mu,", "probability": 0.93310546875}, {"start": 697.81, "end": 698.29, "word": " then", "probability": 0.8544921875}, {"start": 698.29, "end": 698.75, "word": " divide", "probability": 0.912109375}, {"start": 698.75, "end": 699.17, "word": " this", "probability": 0.94140625}, {"start": 699.17, "end": 699.57, "word": " result", "probability": 0.93408203125}, {"start": 699.57, "end": 699.81, "word": " by", "probability": 0.97216796875}, {"start": 699.81, "end": 700.11, "word": " sigma.", "probability": 0.91259765625}, {"start": 700.81, "end": 701.11, "word": " So", "probability": 0.94970703125}, {"start": 701.11, "end": 701.37, "word": " we", "probability": 0.751953125}, {"start": 701.37, "end": 701.63, "word": " just", "probability": 0.90380859375}, {"start": 701.63, "end": 702.15, "word": " subtract", "probability": 0.86767578125}, {"start": 702.15, "end": 702.41, "word": " the", "probability": 0.92626953125}, {"start": 702.41, "end": 702.65, "word": " mean", "probability": 0.97900390625}, {"start": 702.65, "end": 703.15, "word": " of", "probability": 0.9697265625}, {"start": 703.15, "end": 703.53, "word": " x.", "probability": 0.98583984375}], "temperature": 1.0}, {"id": 29, "seek": 72808, "start": 705.0, "end": 728.08, "text": " and dividing by its standard deviation now so if we have x which has normal distribution with mean mu and standard deviation sigma to transform or to convert to z score use this formula x minus the mean then divide by its standard deviation now all of the time we are going to use z", "tokens": [293, 26764, 538, 1080, 3832, 25163, 586, 370, 498, 321, 362, 2031, 597, 575, 2710, 7316, 365, 914, 2992, 293, 3832, 25163, 12771, 281, 4088, 420, 281, 7620, 281, 710, 6175, 764, 341, 8513, 2031, 3175, 264, 914, 550, 9845, 538, 1080, 3832, 25163, 586, 439, 295, 264, 565, 321, 366, 516, 281, 764, 710], "avg_logprob": -0.13699776892151153, "compression_ratio": 1.7911392405063291, "no_speech_prob": 0.0, "words": [{"start": 705.0, "end": 705.3, "word": " and", "probability": 0.82177734375}, {"start": 705.3, "end": 705.7, "word": " dividing", "probability": 0.87841796875}, {"start": 705.7, "end": 706.62, "word": " by", "probability": 0.958984375}, {"start": 706.62, "end": 706.94, "word": " its", "probability": 0.82568359375}, {"start": 706.94, "end": 707.4, "word": " standard", "probability": 0.919921875}, {"start": 707.4, "end": 708.16, "word": " deviation", "probability": 0.92138671875}, {"start": 708.16, "end": 709.24, "word": " now", "probability": 0.415771484375}, {"start": 709.24, "end": 709.66, "word": " so", "probability": 0.87060546875}, {"start": 709.66, "end": 709.86, "word": " if", "probability": 0.96337890625}, {"start": 709.86, "end": 709.98, "word": " we", "probability": 0.96630859375}, {"start": 709.98, "end": 710.14, "word": " have", "probability": 0.94775390625}, {"start": 710.14, "end": 710.48, "word": " x", "probability": 0.97216796875}, {"start": 710.48, "end": 710.96, "word": " which", "probability": 0.9501953125}, {"start": 710.96, "end": 711.2, "word": " has", "probability": 0.9462890625}, {"start": 711.2, "end": 711.54, "word": " normal", "probability": 0.87353515625}, {"start": 711.54, "end": 712.06, "word": " distribution", "probability": 0.8662109375}, {"start": 712.06, "end": 712.36, "word": " with", "probability": 0.90869140625}, {"start": 712.36, "end": 712.54, "word": " mean", "probability": 0.7001953125}, {"start": 712.54, "end": 712.72, "word": " mu", "probability": 0.4970703125}, {"start": 712.72, "end": 712.96, "word": " and", "probability": 0.94384765625}, {"start": 712.96, "end": 713.52, "word": " standard", "probability": 0.93603515625}, {"start": 713.52, "end": 713.82, "word": " deviation", "probability": 0.88623046875}, {"start": 713.82, "end": 714.22, "word": " sigma", "probability": 0.90966796875}, {"start": 714.22, "end": 715.3, "word": " to", "probability": 0.9453125}, {"start": 715.3, "end": 715.94, "word": " transform", "probability": 0.97119140625}, {"start": 715.94, "end": 716.24, "word": " or", "probability": 0.91845703125}, {"start": 716.24, "end": 716.38, "word": " to", "probability": 0.9521484375}, {"start": 716.38, "end": 716.88, "word": " convert", "probability": 0.9169921875}, {"start": 716.88, "end": 717.26, "word": " to", "probability": 0.9443359375}, {"start": 717.26, "end": 717.46, "word": " z", "probability": 0.83935546875}, {"start": 717.46, "end": 717.84, "word": " score", "probability": 0.509765625}, {"start": 717.84, "end": 719.3, "word": " use", "probability": 0.83642578125}, {"start": 719.3, "end": 719.52, "word": " this", "probability": 0.9287109375}, {"start": 719.52, "end": 719.86, "word": " formula", "probability": 0.77099609375}, {"start": 719.86, "end": 720.42, "word": " x", "probability": 0.6025390625}, {"start": 720.42, "end": 720.96, "word": " minus", "probability": 0.98193359375}, {"start": 720.96, "end": 721.42, "word": " the", "probability": 0.93115234375}, {"start": 721.42, "end": 721.64, "word": " mean", "probability": 0.9736328125}, {"start": 721.64, "end": 722.1, "word": " then", "probability": 0.8271484375}, {"start": 722.1, "end": 722.42, "word": " divide", "probability": 0.79638671875}, {"start": 722.42, "end": 722.72, "word": " by", "probability": 0.9677734375}, {"start": 722.72, "end": 723.06, "word": " its", "probability": 0.8759765625}, {"start": 723.06, "end": 723.5, "word": " standard", "probability": 0.93701171875}, {"start": 723.5, "end": 724.56, "word": " deviation", "probability": 0.8720703125}, {"start": 724.56, "end": 725.22, "word": " now", "probability": 0.89306640625}, {"start": 725.22, "end": 726.32, "word": " all", "probability": 0.95068359375}, {"start": 726.32, "end": 726.42, "word": " of", "probability": 0.9423828125}, {"start": 726.42, "end": 726.52, "word": " the", "probability": 0.92333984375}, {"start": 726.52, "end": 726.8, "word": " time", "probability": 0.8916015625}, {"start": 726.8, "end": 727.02, "word": " we", "probability": 0.96044921875}, {"start": 727.02, "end": 727.12, "word": " are", "probability": 0.94140625}, {"start": 727.12, "end": 727.38, "word": " going", "probability": 0.94384765625}, {"start": 727.38, "end": 727.54, "word": " to", "probability": 0.97021484375}, {"start": 727.54, "end": 727.82, "word": " use", "probability": 0.88427734375}, {"start": 727.82, "end": 728.08, "word": " z", "probability": 0.98779296875}], "temperature": 1.0}, {"id": 30, "seek": 75093, "start": 728.73, "end": 750.93, "text": " for standardized normal distribution and always z has mean zero and all and sigma or standard deviation. So the z distribution always has mean of zero and sigma of one. So that's the story of standardizing the normal value. Now the", "tokens": [337, 31677, 2710, 7316, 293, 1009, 710, 575, 914, 4018, 293, 439, 293, 12771, 420, 3832, 25163, 13, 407, 264, 710, 7316, 1009, 575, 914, 295, 4018, 293, 12771, 295, 472, 13, 407, 300, 311, 264, 1657, 295, 3832, 3319, 264, 2710, 2158, 13, 823, 264], "avg_logprob": -0.1883311113144489, "compression_ratio": 1.7185185185185186, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 728.73, "end": 729.09, "word": " for", "probability": 0.466552734375}, {"start": 729.09, "end": 729.47, "word": " standardized", "probability": 0.74755859375}, {"start": 729.47, "end": 730.01, "word": " normal", "probability": 0.83447265625}, {"start": 730.01, "end": 730.65, "word": " distribution", "probability": 0.85302734375}, {"start": 730.65, "end": 731.41, "word": " and", "probability": 0.69775390625}, {"start": 731.41, "end": 731.77, "word": " always", "probability": 0.89208984375}, {"start": 731.77, "end": 731.99, "word": " z", "probability": 0.69140625}, {"start": 731.99, "end": 732.23, "word": " has", "probability": 0.93017578125}, {"start": 732.23, "end": 732.43, "word": " mean", "probability": 0.90380859375}, {"start": 732.43, "end": 732.77, "word": " zero", "probability": 0.6708984375}, {"start": 732.77, "end": 733.01, "word": " and", "probability": 0.9306640625}, {"start": 733.01, "end": 733.31, "word": " all", "probability": 0.76220703125}, {"start": 733.31, "end": 733.73, "word": " and", "probability": 0.880859375}, {"start": 733.73, "end": 734.09, "word": " sigma", "probability": 0.85595703125}, {"start": 734.09, "end": 734.75, "word": " or", "probability": 0.5234375}, {"start": 734.75, "end": 735.07, "word": " standard", "probability": 0.90966796875}, {"start": 735.07, "end": 735.37, "word": " deviation.", "probability": 0.8388671875}, {"start": 736.25, "end": 736.97, "word": " So", "probability": 0.84228515625}, {"start": 736.97, "end": 737.19, "word": " the", "probability": 0.73974609375}, {"start": 737.19, "end": 737.37, "word": " z", "probability": 0.953125}, {"start": 737.37, "end": 737.95, "word": " distribution", "probability": 0.81005859375}, {"start": 737.95, "end": 738.69, "word": " always", "probability": 0.900390625}, {"start": 738.69, "end": 739.09, "word": " has", "probability": 0.9423828125}, {"start": 739.09, "end": 739.37, "word": " mean", "probability": 0.94775390625}, {"start": 739.37, "end": 739.53, "word": " of", "probability": 0.970703125}, {"start": 739.53, "end": 739.81, "word": " zero", "probability": 0.88427734375}, {"start": 739.81, "end": 740.17, "word": " and", "probability": 0.94677734375}, {"start": 740.17, "end": 740.57, "word": " sigma", "probability": 0.91357421875}, {"start": 740.57, "end": 741.23, "word": " of", "probability": 0.87939453125}, {"start": 741.23, "end": 741.43, "word": " one.", "probability": 0.90673828125}, {"start": 741.97, "end": 742.21, "word": " So", "probability": 0.93212890625}, {"start": 742.21, "end": 742.51, "word": " that's", "probability": 0.937744140625}, {"start": 742.51, "end": 742.75, "word": " the", "probability": 0.9228515625}, {"start": 742.75, "end": 743.09, "word": " story", "probability": 0.93359375}, {"start": 743.09, "end": 743.67, "word": " of", "probability": 0.96923828125}, {"start": 743.67, "end": 745.49, "word": " standardizing", "probability": 0.955078125}, {"start": 745.49, "end": 746.83, "word": " the", "probability": 0.88916015625}, {"start": 746.83, "end": 747.23, "word": " normal", "probability": 0.85986328125}, {"start": 747.23, "end": 748.31, "word": " value.", "probability": 0.96337890625}, {"start": 749.81, "end": 750.47, "word": " Now", "probability": 0.94140625}, {"start": 750.47, "end": 750.93, "word": " the", "probability": 0.7900390625}], "temperature": 1.0}, {"id": 31, "seek": 76979, "start": 751.51, "end": 769.79, "text": " Formula for this score becomes better than the first one, but still we have to use calculus in order to determine the probabilities under the standardized normal k. But this distribution has mean of zero and sigma of one.", "tokens": [35872, 337, 341, 6175, 3643, 1101, 813, 264, 700, 472, 11, 457, 920, 321, 362, 281, 764, 33400, 294, 1668, 281, 6997, 264, 33783, 833, 264, 31677, 2710, 350, 13, 583, 341, 7316, 575, 914, 295, 4018, 293, 12771, 295, 472, 13], "avg_logprob": -0.2570857502693354, "compression_ratio": 1.4322580645161291, "no_speech_prob": 0.0, "words": [{"start": 751.51, "end": 752.13, "word": " Formula", "probability": 0.331298828125}, {"start": 752.13, "end": 752.47, "word": " for", "probability": 0.9091796875}, {"start": 752.47, "end": 752.71, "word": " this", "probability": 0.39501953125}, {"start": 752.71, "end": 753.07, "word": " score", "probability": 0.85400390625}, {"start": 753.07, "end": 753.81, "word": " becomes", "probability": 0.8408203125}, {"start": 753.81, "end": 754.61, "word": " better", "probability": 0.88232421875}, {"start": 754.61, "end": 754.91, "word": " than", "probability": 0.93994140625}, {"start": 754.91, "end": 755.13, "word": " the", "probability": 0.90087890625}, {"start": 755.13, "end": 755.45, "word": " first", "probability": 0.84716796875}, {"start": 755.45, "end": 755.77, "word": " one,", "probability": 0.9208984375}, {"start": 756.13, "end": 756.21, "word": " but", "probability": 0.88720703125}, {"start": 756.21, "end": 756.61, "word": " still", "probability": 0.93359375}, {"start": 756.61, "end": 757.57, "word": " we", "probability": 0.5576171875}, {"start": 757.57, "end": 757.77, "word": " have", "probability": 0.9365234375}, {"start": 757.77, "end": 757.91, "word": " to", "probability": 0.97021484375}, {"start": 757.91, "end": 758.09, "word": " use", "probability": 0.87744140625}, {"start": 758.09, "end": 758.45, "word": " calculus", "probability": 0.939453125}, {"start": 758.45, "end": 758.85, "word": " in", "probability": 0.91259765625}, {"start": 758.85, "end": 759.11, "word": " order", "probability": 0.93017578125}, {"start": 759.11, "end": 759.35, "word": " to", "probability": 0.9638671875}, {"start": 759.35, "end": 759.77, "word": " determine", "probability": 0.9140625}, {"start": 759.77, "end": 760.57, "word": " the", "probability": 0.78857421875}, {"start": 760.57, "end": 761.13, "word": " probabilities", "probability": 0.8603515625}, {"start": 761.13, "end": 761.53, "word": " under", "probability": 0.9072265625}, {"start": 761.53, "end": 761.79, "word": " the", "probability": 0.82080078125}, {"start": 761.79, "end": 762.23, "word": " standardized", "probability": 0.861328125}, {"start": 762.23, "end": 762.53, "word": " normal", "probability": 0.7080078125}, {"start": 762.53, "end": 762.81, "word": " k.", "probability": 0.1971435546875}, {"start": 765.05, "end": 765.71, "word": " But", "probability": 0.90673828125}, {"start": 765.71, "end": 766.21, "word": " this", "probability": 0.82666015625}, {"start": 766.21, "end": 766.85, "word": " distribution", "probability": 0.8564453125}, {"start": 766.85, "end": 768.21, "word": " has", "probability": 0.88623046875}, {"start": 768.21, "end": 768.37, "word": " mean", "probability": 0.904296875}, {"start": 768.37, "end": 768.51, "word": " of", "probability": 0.9580078125}, {"start": 768.51, "end": 768.73, "word": " zero", "probability": 0.51123046875}, {"start": 768.73, "end": 768.89, "word": " and", "probability": 0.9345703125}, {"start": 768.89, "end": 769.13, "word": " sigma", "probability": 0.87890625}, {"start": 769.13, "end": 769.47, "word": " of", "probability": 0.9111328125}, {"start": 769.47, "end": 769.79, "word": " one.", "probability": 0.77099609375}], "temperature": 1.0}, {"id": 32, "seek": 79547, "start": 770.87, "end": 795.47, "text": " So we have a table on page 570. Look at page 570. We have table or actually there are two tables. One for negative value of Z and the other for positive value of Z. So we have two tables for positive and negative values of Z on page 570 and 571.", "tokens": [407, 321, 362, 257, 3199, 322, 3028, 1025, 5867, 13, 2053, 412, 3028, 1025, 5867, 13, 492, 362, 3199, 420, 767, 456, 366, 732, 8020, 13, 1485, 337, 3671, 2158, 295, 1176, 293, 264, 661, 337, 3353, 2158, 295, 1176, 13, 407, 321, 362, 732, 8020, 337, 3353, 293, 3671, 4190, 295, 1176, 322, 3028, 1025, 5867, 293, 21423, 16, 13], "avg_logprob": -0.15612399457923828, "compression_ratio": 1.7697841726618706, "no_speech_prob": 0.0, "words": [{"start": 770.87, "end": 771.19, "word": " So", "probability": 0.92431640625}, {"start": 771.19, "end": 771.39, "word": " we", "probability": 0.7548828125}, {"start": 771.39, "end": 771.65, "word": " have", "probability": 0.9453125}, {"start": 771.65, "end": 771.93, "word": " a", "probability": 0.9853515625}, {"start": 771.93, "end": 772.23, "word": " table", "probability": 0.89501953125}, {"start": 772.23, "end": 773.17, "word": " on", "probability": 0.90966796875}, {"start": 773.17, "end": 773.45, "word": " page", "probability": 0.896484375}, {"start": 773.45, "end": 774.95, "word": " 570.", "probability": 0.924072265625}, {"start": 775.89, "end": 776.35, "word": " Look", "probability": 0.7822265625}, {"start": 776.35, "end": 776.59, "word": " at", "probability": 0.93798828125}, {"start": 776.59, "end": 776.91, "word": " page", "probability": 0.8974609375}, {"start": 776.91, "end": 777.65, "word": " 570.", "probability": 0.944091796875}, {"start": 778.25, "end": 778.53, "word": " We", "probability": 0.927734375}, {"start": 778.53, "end": 778.69, "word": " have", "probability": 0.9482421875}, {"start": 778.69, "end": 779.15, "word": " table", "probability": 0.55859375}, {"start": 779.15, "end": 779.87, "word": " or", "probability": 0.3955078125}, {"start": 779.87, "end": 780.43, "word": " actually", "probability": 0.84765625}, {"start": 780.43, "end": 780.63, "word": " there", "probability": 0.71875}, {"start": 780.63, "end": 780.75, "word": " are", "probability": 0.94189453125}, {"start": 780.75, "end": 780.91, "word": " two", "probability": 0.8857421875}, {"start": 780.91, "end": 781.37, "word": " tables.", "probability": 0.83349609375}, {"start": 781.71, "end": 781.99, "word": " One", "probability": 0.9287109375}, {"start": 781.99, "end": 782.29, "word": " for", "probability": 0.94677734375}, {"start": 782.29, "end": 782.87, "word": " negative", "probability": 0.93505859375}, {"start": 782.87, "end": 783.23, "word": " value", "probability": 0.81689453125}, {"start": 783.23, "end": 783.39, "word": " of", "probability": 0.94287109375}, {"start": 783.39, "end": 783.51, "word": " Z", "probability": 0.5078125}, {"start": 783.51, "end": 784.65, "word": " and", "probability": 0.71435546875}, {"start": 784.65, "end": 784.81, "word": " the", "probability": 0.7939453125}, {"start": 784.81, "end": 785.01, "word": " other", "probability": 0.8935546875}, {"start": 785.01, "end": 785.27, "word": " for", "probability": 0.94140625}, {"start": 785.27, "end": 785.67, "word": " positive", "probability": 0.92724609375}, {"start": 785.67, "end": 786.11, "word": " value", "probability": 0.73388671875}, {"start": 786.11, "end": 786.31, "word": " of", "probability": 0.89404296875}, {"start": 786.31, "end": 786.43, "word": " Z.", "probability": 0.98486328125}, {"start": 787.39, "end": 787.61, "word": " So", "probability": 0.947265625}, {"start": 787.61, "end": 787.73, "word": " we", "probability": 0.84619140625}, {"start": 787.73, "end": 787.81, "word": " have", "probability": 0.94677734375}, {"start": 787.81, "end": 787.99, "word": " two", "probability": 0.93115234375}, {"start": 787.99, "end": 788.45, "word": " tables", "probability": 0.837890625}, {"start": 788.45, "end": 788.83, "word": " for", "probability": 0.63671875}, {"start": 788.83, "end": 789.19, "word": " positive", "probability": 0.9287109375}, {"start": 789.19, "end": 789.99, "word": " and", "probability": 0.93408203125}, {"start": 789.99, "end": 790.97, "word": " negative", "probability": 0.93701171875}, {"start": 790.97, "end": 791.61, "word": " values", "probability": 0.95947265625}, {"start": 791.61, "end": 792.63, "word": " of", "probability": 0.96044921875}, {"start": 792.63, "end": 792.91, "word": " Z", "probability": 0.9814453125}, {"start": 792.91, "end": 793.25, "word": " on", "probability": 0.58056640625}, {"start": 793.25, "end": 793.47, "word": " page", "probability": 0.85205078125}, {"start": 793.47, "end": 794.55, "word": " 570", "probability": 0.906494140625}, {"start": 794.55, "end": 794.73, "word": " and", "probability": 0.93505859375}, {"start": 794.73, "end": 795.47, "word": " 571.", "probability": 0.8515625}], "temperature": 1.0}, {"id": 33, "seek": 82601, "start": 797.87, "end": 826.01, "text": " Now the table on page 570 looks like this one. The table you have starts from minus 6, then minus 5, minus 4.5, and so on. Here we start from minus 3.4 all the way down up to 0. Look here, all the way up to 0. So these scores here. Also we have 0.00, 0.01, up to 0.09.", "tokens": [823, 264, 3199, 322, 3028, 1025, 5867, 1542, 411, 341, 472, 13, 440, 3199, 291, 362, 3719, 490, 3175, 1386, 11, 550, 3175, 1025, 11, 3175, 1017, 13, 20, 11, 293, 370, 322, 13, 1692, 321, 722, 490, 3175, 805, 13, 19, 439, 264, 636, 760, 493, 281, 1958, 13, 2053, 510, 11, 439, 264, 636, 493, 281, 1958, 13, 407, 613, 13444, 510, 13, 2743, 321, 362, 1958, 13, 628, 11, 1958, 13, 10607, 11, 493, 281, 1958, 13, 13811, 13], "avg_logprob": -0.17912274311824017, "compression_ratio": 1.5823529411764705, "no_speech_prob": 0.0, "words": [{"start": 797.87, "end": 798.15, "word": " Now", "probability": 0.47900390625}, {"start": 798.15, "end": 798.31, "word": " the", "probability": 0.70703125}, {"start": 798.31, "end": 798.63, "word": " table", "probability": 0.88916015625}, {"start": 798.63, "end": 798.99, "word": " on", "probability": 0.8115234375}, {"start": 798.99, "end": 799.49, "word": " page", "probability": 0.8984375}, {"start": 799.49, "end": 800.49, "word": " 570", "probability": 0.84033203125}, {"start": 800.49, "end": 800.91, "word": " looks", "probability": 0.7138671875}, {"start": 800.91, "end": 801.27, "word": " like", "probability": 0.93798828125}, {"start": 801.27, "end": 801.99, "word": " this", "probability": 0.9306640625}, {"start": 801.99, "end": 802.25, "word": " one.", "probability": 0.91064453125}, {"start": 802.53, "end": 802.77, "word": " The", "probability": 0.876953125}, {"start": 802.77, "end": 802.95, "word": " table", "probability": 0.90673828125}, {"start": 802.95, "end": 803.11, "word": " you", "probability": 0.499267578125}, {"start": 803.11, "end": 803.31, "word": " have", "probability": 0.94482421875}, {"start": 803.31, "end": 803.75, "word": " starts", "probability": 0.86572265625}, {"start": 803.75, "end": 804.07, "word": " from", "probability": 0.896484375}, {"start": 804.07, "end": 804.41, "word": " minus", "probability": 0.54833984375}, {"start": 804.41, "end": 804.85, "word": " 6,", "probability": 0.64892578125}, {"start": 805.53, "end": 805.91, "word": " then", "probability": 0.8427734375}, {"start": 805.91, "end": 806.23, "word": " minus", "probability": 0.97314453125}, {"start": 806.23, "end": 806.61, "word": " 5,", "probability": 0.9765625}, {"start": 806.75, "end": 806.95, "word": " minus", "probability": 0.91748046875}, {"start": 806.95, "end": 807.21, "word": " 4", "probability": 0.9892578125}, {"start": 807.21, "end": 807.63, "word": ".5,", "probability": 0.959228515625}, {"start": 807.67, "end": 807.79, "word": " and", "probability": 0.9384765625}, {"start": 807.79, "end": 807.91, "word": " so", "probability": 0.9501953125}, {"start": 807.91, "end": 808.11, "word": " on.", "probability": 0.9482421875}, {"start": 808.49, "end": 808.73, "word": " Here", "probability": 0.845703125}, {"start": 808.73, "end": 808.89, "word": " we", "probability": 0.802734375}, {"start": 808.89, "end": 809.41, "word": " start", "probability": 0.93017578125}, {"start": 809.41, "end": 810.55, "word": " from", "probability": 0.89794921875}, {"start": 810.55, "end": 811.77, "word": " minus", "probability": 0.9775390625}, {"start": 811.77, "end": 812.01, "word": " 3", "probability": 0.994140625}, {"start": 812.01, "end": 812.51, "word": ".4", "probability": 0.998046875}, {"start": 812.51, "end": 813.87, "word": " all", "probability": 0.57568359375}, {"start": 813.87, "end": 814.05, "word": " the", "probability": 0.919921875}, {"start": 814.05, "end": 814.19, "word": " way", "probability": 0.95263671875}, {"start": 814.19, "end": 814.49, "word": " down", "probability": 0.84228515625}, {"start": 814.49, "end": 814.81, "word": " up", "probability": 0.9306640625}, {"start": 814.81, "end": 815.05, "word": " to", "probability": 0.97119140625}, {"start": 815.05, "end": 815.45, "word": " 0.", "probability": 0.442626953125}, {"start": 816.95, "end": 817.55, "word": " Look", "probability": 0.7783203125}, {"start": 817.55, "end": 817.83, "word": " here,", "probability": 0.85107421875}, {"start": 818.13, "end": 818.53, "word": " all", "probability": 0.947265625}, {"start": 818.53, "end": 818.67, "word": " the", "probability": 0.91943359375}, {"start": 818.67, "end": 818.85, "word": " way", "probability": 0.95166015625}, {"start": 818.85, "end": 819.15, "word": " up", "probability": 0.9609375}, {"start": 819.15, "end": 819.33, "word": " to", "probability": 0.9677734375}, {"start": 819.33, "end": 819.55, "word": " 0.", "probability": 0.90185546875}, {"start": 820.23, "end": 820.53, "word": " So", "probability": 0.9453125}, {"start": 820.53, "end": 820.97, "word": " these", "probability": 0.51416015625}, {"start": 820.97, "end": 821.39, "word": " scores", "probability": 0.6630859375}, {"start": 821.39, "end": 821.89, "word": " here.", "probability": 0.8095703125}, {"start": 822.97, "end": 823.27, "word": " Also", "probability": 0.783203125}, {"start": 823.27, "end": 823.43, "word": " we", "probability": 0.56884765625}, {"start": 823.43, "end": 823.59, "word": " have", "probability": 0.947265625}, {"start": 823.59, "end": 823.97, "word": " 0", "probability": 0.67626953125}, {"start": 823.97, "end": 824.49, "word": ".00,", "probability": 0.9228515625}, {"start": 824.61, "end": 824.71, "word": " 0", "probability": 0.91162109375}, {"start": 824.71, "end": 825.05, "word": ".01,", "probability": 0.975341796875}, {"start": 825.05, "end": 825.27, "word": " up", "probability": 0.93212890625}, {"start": 825.27, "end": 825.39, "word": " to", "probability": 0.97021484375}, {"start": 825.39, "end": 825.59, "word": " 0", "probability": 0.93994140625}, {"start": 825.59, "end": 826.01, "word": ".09.", "probability": 0.953857421875}], "temperature": 1.0}, {"id": 34, "seek": 85646, "start": 828.42, "end": 856.46, "text": " Also, the other page, page 571, gives the area for positive z values. Here we have 0.0, 0.1, 0.2, all the way down up to 3.4 and you have up to 6. Now let's see how can we use this table to compute the probabilities underneath the normal curve. First of all, you have to know that", "tokens": [2743, 11, 264, 661, 3028, 11, 3028, 21423, 16, 11, 2709, 264, 1859, 337, 3353, 710, 4190, 13, 1692, 321, 362, 1958, 13, 15, 11, 1958, 13, 16, 11, 1958, 13, 17, 11, 439, 264, 636, 760, 493, 281, 805, 13, 19, 293, 291, 362, 493, 281, 1386, 13, 823, 718, 311, 536, 577, 393, 321, 764, 341, 3199, 281, 14722, 264, 33783, 7223, 264, 2710, 7605, 13, 2386, 295, 439, 11, 291, 362, 281, 458, 300], "avg_logprob": -0.16726763470050615, "compression_ratio": 1.441025641025641, "no_speech_prob": 0.0, "words": [{"start": 828.4200000000001, "end": 828.94, "word": " Also,", "probability": 0.8076171875}, {"start": 829.46, "end": 829.5, "word": " the", "probability": 0.8525390625}, {"start": 829.5, "end": 830.28, "word": " other", "probability": 0.8681640625}, {"start": 830.28, "end": 830.62, "word": " page,", "probability": 0.91748046875}, {"start": 830.74, "end": 830.98, "word": " page", "probability": 0.91064453125}, {"start": 830.98, "end": 831.88, "word": " 571,", "probability": 0.92333984375}, {"start": 832.14, "end": 832.48, "word": " gives", "probability": 0.91064453125}, {"start": 832.48, "end": 832.66, "word": " the", "probability": 0.890625}, {"start": 832.66, "end": 832.96, "word": " area", "probability": 0.830078125}, {"start": 832.96, "end": 834.22, "word": " for", "probability": 0.90673828125}, {"start": 834.22, "end": 834.68, "word": " positive", "probability": 0.86181640625}, {"start": 834.68, "end": 835.0, "word": " z", "probability": 0.6025390625}, {"start": 835.0, "end": 835.58, "word": " values.", "probability": 0.7607421875}, {"start": 836.26, "end": 836.6, "word": " Here", "probability": 0.80810546875}, {"start": 836.6, "end": 836.72, "word": " we", "probability": 0.7529296875}, {"start": 836.72, "end": 836.94, "word": " have", "probability": 0.94970703125}, {"start": 836.94, "end": 837.28, "word": " 0", "probability": 0.40185546875}, {"start": 837.28, "end": 838.2, "word": ".0,", "probability": 0.783935546875}, {"start": 838.32, "end": 838.48, "word": " 0", "probability": 0.93701171875}, {"start": 838.48, "end": 838.78, "word": ".1,", "probability": 0.993896484375}, {"start": 838.88, "end": 838.94, "word": " 0", "probability": 0.970703125}, {"start": 838.94, "end": 839.22, "word": ".2,", "probability": 0.996826171875}, {"start": 839.26, "end": 839.38, "word": " all", "probability": 0.79931640625}, {"start": 839.38, "end": 839.52, "word": " the", "probability": 0.91259765625}, {"start": 839.52, "end": 839.64, "word": " way", "probability": 0.9560546875}, {"start": 839.64, "end": 839.9, "word": " down", "probability": 0.8212890625}, {"start": 839.9, "end": 840.12, "word": " up", "probability": 0.6943359375}, {"start": 840.12, "end": 840.24, "word": " to", "probability": 0.958984375}, {"start": 840.24, "end": 840.46, "word": " 3", "probability": 0.98193359375}, {"start": 840.46, "end": 841.42, "word": ".4", "probability": 0.995849609375}, {"start": 841.42, "end": 841.62, "word": " and", "probability": 0.5185546875}, {"start": 841.62, "end": 841.76, "word": " you", "probability": 0.8193359375}, {"start": 841.76, "end": 841.92, "word": " have", "probability": 0.95166015625}, {"start": 841.92, "end": 842.12, "word": " up", "probability": 0.96337890625}, {"start": 842.12, "end": 842.26, "word": " to", "probability": 0.96630859375}, {"start": 842.26, "end": 842.56, "word": " 6.", "probability": 0.90869140625}, {"start": 843.6, "end": 844.12, "word": " Now", "probability": 0.9306640625}, {"start": 844.12, "end": 844.38, "word": " let's", "probability": 0.7919921875}, {"start": 844.38, "end": 844.52, "word": " see", "probability": 0.92138671875}, {"start": 844.52, "end": 844.64, "word": " how", "probability": 0.89794921875}, {"start": 844.64, "end": 844.86, "word": " can", "probability": 0.703125}, {"start": 844.86, "end": 845.02, "word": " we", "probability": 0.94677734375}, {"start": 845.02, "end": 845.34, "word": " use", "probability": 0.8720703125}, {"start": 845.34, "end": 845.92, "word": " this", "probability": 0.9462890625}, {"start": 845.92, "end": 846.28, "word": " table", "probability": 0.90234375}, {"start": 846.28, "end": 847.38, "word": " to", "probability": 0.927734375}, {"start": 847.38, "end": 847.76, "word": " compute", "probability": 0.8896484375}, {"start": 847.76, "end": 849.24, "word": " the", "probability": 0.89013671875}, {"start": 849.24, "end": 849.9, "word": " probabilities", "probability": 0.90478515625}, {"start": 849.9, "end": 850.52, "word": " underneath", "probability": 0.93603515625}, {"start": 850.52, "end": 851.02, "word": " the", "probability": 0.90234375}, {"start": 851.02, "end": 851.4, "word": " normal", "probability": 0.86328125}, {"start": 851.4, "end": 852.46, "word": " curve.", "probability": 0.90576171875}, {"start": 854.94, "end": 855.46, "word": " First", "probability": 0.91162109375}, {"start": 855.46, "end": 855.6, "word": " of", "probability": 0.96630859375}, {"start": 855.6, "end": 855.76, "word": " all,", "probability": 0.9501953125}, {"start": 855.78, "end": 855.86, "word": " you", "probability": 0.95263671875}, {"start": 855.86, "end": 856.02, "word": " have", "probability": 0.94140625}, {"start": 856.02, "end": 856.14, "word": " to", "probability": 0.96728515625}, {"start": 856.14, "end": 856.26, "word": " know", "probability": 0.8974609375}, {"start": 856.26, "end": 856.46, "word": " that", "probability": 0.91259765625}], "temperature": 1.0}, {"id": 35, "seek": 88061, "start": 858.01, "end": 880.61, "text": " Z has mean zero, standard deviation of one. And the values could be positive or negative. Values above the mean, zero, have positive Z values. The other one, values below the mean, have negative Z values. So Z score can be negative or positive.", "tokens": [1176, 575, 914, 4018, 11, 3832, 25163, 295, 472, 13, 400, 264, 4190, 727, 312, 3353, 420, 3671, 13, 7188, 1247, 3673, 264, 914, 11, 4018, 11, 362, 3353, 1176, 4190, 13, 440, 661, 472, 11, 4190, 2507, 264, 914, 11, 362, 3671, 1176, 4190, 13, 407, 1176, 6175, 393, 312, 3671, 420, 3353, 13], "avg_logprob": -0.19266182955886638, "compression_ratio": 1.6896551724137931, "no_speech_prob": 0.0, "words": [{"start": 858.01, "end": 858.39, "word": " Z", "probability": 0.6044921875}, {"start": 858.39, "end": 858.95, "word": " has", "probability": 0.92724609375}, {"start": 858.95, "end": 859.19, "word": " mean", "probability": 0.82861328125}, {"start": 859.19, "end": 859.55, "word": " zero,", "probability": 0.56982421875}, {"start": 859.85, "end": 860.25, "word": " standard", "probability": 0.900390625}, {"start": 860.25, "end": 860.61, "word": " deviation", "probability": 0.9560546875}, {"start": 860.61, "end": 860.79, "word": " of", "probability": 0.96240234375}, {"start": 860.79, "end": 861.03, "word": " one.", "probability": 0.75048828125}, {"start": 862.55, "end": 863.15, "word": " And", "probability": 0.822265625}, {"start": 863.15, "end": 863.45, "word": " the", "probability": 0.86767578125}, {"start": 863.45, "end": 863.75, "word": " values", "probability": 0.94970703125}, {"start": 863.75, "end": 863.99, "word": " could", "probability": 0.89599609375}, {"start": 863.99, "end": 864.13, "word": " be", "probability": 0.94482421875}, {"start": 864.13, "end": 864.41, "word": " positive", "probability": 0.92041015625}, {"start": 864.41, "end": 864.69, "word": " or", "probability": 0.9599609375}, {"start": 864.69, "end": 865.05, "word": " negative.", "probability": 0.9443359375}, {"start": 865.75, "end": 866.05, "word": " Values", "probability": 0.896240234375}, {"start": 866.05, "end": 866.39, "word": " above", "probability": 0.94970703125}, {"start": 866.39, "end": 866.61, "word": " the", "probability": 0.92724609375}, {"start": 866.61, "end": 866.81, "word": " mean,", "probability": 0.9521484375}, {"start": 866.93, "end": 867.17, "word": " zero,", "probability": 0.83447265625}, {"start": 867.37, "end": 867.73, "word": " have", "probability": 0.9267578125}, {"start": 867.73, "end": 868.87, "word": " positive", "probability": 0.900390625}, {"start": 868.87, "end": 869.13, "word": " Z", "probability": 0.623046875}, {"start": 869.13, "end": 869.61, "word": " values.", "probability": 0.77880859375}, {"start": 871.45, "end": 871.59, "word": " The", "probability": 0.87548828125}, {"start": 871.59, "end": 872.61, "word": " other", "probability": 0.880859375}, {"start": 872.61, "end": 872.85, "word": " one,", "probability": 0.84521484375}, {"start": 872.91, "end": 873.23, "word": " values", "probability": 0.95166015625}, {"start": 873.23, "end": 873.53, "word": " below", "probability": 0.90283203125}, {"start": 873.53, "end": 873.77, "word": " the", "probability": 0.91455078125}, {"start": 873.77, "end": 873.97, "word": " mean,", "probability": 0.96484375}, {"start": 874.67, "end": 875.05, "word": " have", "probability": 0.935546875}, {"start": 875.05, "end": 875.75, "word": " negative", "probability": 0.943359375}, {"start": 875.75, "end": 876.01, "word": " Z", "probability": 0.9814453125}, {"start": 876.01, "end": 876.35, "word": " values.", "probability": 0.90869140625}, {"start": 876.49, "end": 876.69, "word": " So", "probability": 0.921875}, {"start": 876.69, "end": 877.61, "word": " Z", "probability": 0.458740234375}, {"start": 877.61, "end": 878.09, "word": " score", "probability": 0.60107421875}, {"start": 878.09, "end": 878.51, "word": " can", "probability": 0.93603515625}, {"start": 878.51, "end": 878.69, "word": " be", "probability": 0.94970703125}, {"start": 878.69, "end": 879.15, "word": " negative", "probability": 0.89306640625}, {"start": 879.15, "end": 880.21, "word": " or", "probability": 0.9375}, {"start": 880.21, "end": 880.61, "word": " positive.", "probability": 0.93408203125}], "temperature": 1.0}, {"id": 36, "seek": 90987, "start": 881.97, "end": 909.87, "text": " Now this is the formula we have, z equals x minus mu divided by six. Now this value could be positive if x is above the mean, as we mentioned before. It could be a negative if x is smaller than the mean or zero.", "tokens": [823, 341, 307, 264, 8513, 321, 362, 11, 710, 6915, 2031, 3175, 2992, 6666, 538, 2309, 13, 823, 341, 2158, 727, 312, 3353, 498, 2031, 307, 3673, 264, 914, 11, 382, 321, 2835, 949, 13, 467, 727, 312, 257, 3671, 498, 2031, 307, 4356, 813, 264, 914, 420, 4018, 13], "avg_logprob": -0.2182904411764706, "compression_ratio": 1.50354609929078, "no_speech_prob": 0.0, "words": [{"start": 881.97, "end": 882.33, "word": " Now", "probability": 0.72314453125}, {"start": 882.33, "end": 882.67, "word": " this", "probability": 0.58056640625}, {"start": 882.67, "end": 882.77, "word": " is", "probability": 0.9375}, {"start": 882.77, "end": 882.89, "word": " the", "probability": 0.90185546875}, {"start": 882.89, "end": 883.23, "word": " formula", "probability": 0.92919921875}, {"start": 883.23, "end": 883.43, "word": " we", "probability": 0.837890625}, {"start": 883.43, "end": 883.65, "word": " have,", "probability": 0.9130859375}, {"start": 884.05, "end": 884.17, "word": " z", "probability": 0.5703125}, {"start": 884.17, "end": 884.65, "word": " equals", "probability": 0.5419921875}, {"start": 884.65, "end": 885.65, "word": " x", "probability": 0.892578125}, {"start": 885.65, "end": 886.01, "word": " minus", "probability": 0.978515625}, {"start": 886.01, "end": 886.27, "word": " mu", "probability": 0.83154296875}, {"start": 886.27, "end": 886.53, "word": " divided", "probability": 0.7412109375}, {"start": 886.53, "end": 886.73, "word": " by", "probability": 0.974609375}, {"start": 886.73, "end": 886.99, "word": " six.", "probability": 0.4765625}, {"start": 892.81, "end": 893.35, "word": " Now", "probability": 0.916015625}, {"start": 893.35, "end": 893.61, "word": " this", "probability": 0.88427734375}, {"start": 893.61, "end": 893.99, "word": " value", "probability": 0.9658203125}, {"start": 893.99, "end": 895.07, "word": " could", "probability": 0.876953125}, {"start": 895.07, "end": 895.23, "word": " be", "probability": 0.9541015625}, {"start": 895.23, "end": 895.65, "word": " positive", "probability": 0.92333984375}, {"start": 895.65, "end": 897.57, "word": " if", "probability": 0.8388671875}, {"start": 897.57, "end": 899.11, "word": " x", "probability": 0.98486328125}, {"start": 899.11, "end": 900.71, "word": " is", "probability": 0.94873046875}, {"start": 900.71, "end": 900.97, "word": " above", "probability": 0.9541015625}, {"start": 900.97, "end": 901.17, "word": " the", "probability": 0.9326171875}, {"start": 901.17, "end": 901.33, "word": " mean,", "probability": 0.984375}, {"start": 901.95, "end": 902.89, "word": " as", "probability": 0.95263671875}, {"start": 902.89, "end": 903.01, "word": " we", "probability": 0.94677734375}, {"start": 903.01, "end": 903.31, "word": " mentioned", "probability": 0.82568359375}, {"start": 903.31, "end": 903.75, "word": " before.", "probability": 0.875}, {"start": 904.29, "end": 904.45, "word": " It", "probability": 0.6025390625}, {"start": 904.45, "end": 904.57, "word": " could", "probability": 0.88720703125}, {"start": 904.57, "end": 904.71, "word": " be", "probability": 0.95068359375}, {"start": 904.71, "end": 904.81, "word": " a", "probability": 0.59814453125}, {"start": 904.81, "end": 905.07, "word": " negative", "probability": 0.94580078125}, {"start": 905.07, "end": 907.55, "word": " if", "probability": 0.74853515625}, {"start": 907.55, "end": 907.89, "word": " x", "probability": 0.99267578125}, {"start": 907.89, "end": 908.07, "word": " is", "probability": 0.935546875}, {"start": 908.07, "end": 908.35, "word": " smaller", "probability": 0.88134765625}, {"start": 908.35, "end": 908.57, "word": " than", "probability": 0.947265625}, {"start": 908.57, "end": 908.73, "word": " the", "probability": 0.92578125}, {"start": 908.73, "end": 908.95, "word": " mean", "probability": 0.97412109375}, {"start": 908.95, "end": 909.51, "word": " or", "probability": 0.55517578125}, {"start": 909.51, "end": 909.87, "word": " zero.", "probability": 0.8203125}], "temperature": 1.0}, {"id": 37, "seek": 93678, "start": 913.12, "end": 936.78, "text": " Now the table we have gives the area to the right, to the left, I'm sorry, to the left, for positive and negative values of z. Okay, so we have two tables actually, one for negative on page 570, and the other one for positive values of z.", "tokens": [823, 264, 3199, 321, 362, 2709, 264, 1859, 281, 264, 558, 11, 281, 264, 1411, 11, 286, 478, 2597, 11, 281, 264, 1411, 11, 337, 3353, 293, 3671, 4190, 295, 710, 13, 1033, 11, 370, 321, 362, 732, 8020, 767, 11, 472, 337, 3671, 322, 3028, 1025, 5867, 11, 293, 264, 661, 472, 337, 3353, 4190, 295, 710, 13], "avg_logprob": -0.1996093784769376, "compression_ratio": 1.6482758620689655, "no_speech_prob": 0.0, "words": [{"start": 913.12, "end": 913.64, "word": " Now", "probability": 0.485107421875}, {"start": 913.64, "end": 914.16, "word": " the", "probability": 0.595703125}, {"start": 914.16, "end": 914.44, "word": " table", "probability": 0.82421875}, {"start": 914.44, "end": 914.62, "word": " we", "probability": 0.82861328125}, {"start": 914.62, "end": 915.02, "word": " have", "probability": 0.9462890625}, {"start": 915.02, "end": 917.06, "word": " gives", "probability": 0.71435546875}, {"start": 917.06, "end": 917.26, "word": " the", "probability": 0.8994140625}, {"start": 917.26, "end": 917.54, "word": " area", "probability": 0.798828125}, {"start": 917.54, "end": 917.78, "word": " to", "probability": 0.93212890625}, {"start": 917.78, "end": 917.92, "word": " the", "probability": 0.91943359375}, {"start": 917.92, "end": 918.14, "word": " right,", "probability": 0.802734375}, {"start": 918.42, "end": 919.02, "word": " to", "probability": 0.6083984375}, {"start": 919.02, "end": 919.12, "word": " the", "probability": 0.919921875}, {"start": 919.12, "end": 919.26, "word": " left,", "probability": 0.892578125}, {"start": 919.32, "end": 919.42, "word": " I'm", "probability": 0.90478515625}, {"start": 919.42, "end": 919.6, "word": " sorry,", "probability": 0.86865234375}, {"start": 919.72, "end": 919.82, "word": " to", "probability": 0.91015625}, {"start": 919.82, "end": 919.96, "word": " the", "probability": 0.92041015625}, {"start": 919.96, "end": 920.18, "word": " left,", "probability": 0.935546875}, {"start": 920.68, "end": 920.84, "word": " for", "probability": 0.94091796875}, {"start": 920.84, "end": 921.24, "word": " positive", "probability": 0.9169921875}, {"start": 921.24, "end": 921.5, "word": " and", "probability": 0.9384765625}, {"start": 921.5, "end": 921.74, "word": " negative", "probability": 0.93994140625}, {"start": 921.74, "end": 922.34, "word": " values", "probability": 0.96435546875}, {"start": 922.34, "end": 923.32, "word": " of", "probability": 0.96240234375}, {"start": 923.32, "end": 924.02, "word": " z.", "probability": 0.4736328125}, {"start": 924.86, "end": 925.38, "word": " Okay,", "probability": 0.65576171875}, {"start": 925.42, "end": 925.64, "word": " so", "probability": 0.94921875}, {"start": 925.64, "end": 925.86, "word": " we", "probability": 0.91845703125}, {"start": 925.86, "end": 926.04, "word": " have", "probability": 0.94482421875}, {"start": 926.04, "end": 926.22, "word": " two", "probability": 0.91845703125}, {"start": 926.22, "end": 926.56, "word": " tables", "probability": 0.81005859375}, {"start": 926.56, "end": 927.0, "word": " actually,", "probability": 0.79736328125}, {"start": 927.16, "end": 927.32, "word": " one", "probability": 0.923828125}, {"start": 927.32, "end": 927.56, "word": " for", "probability": 0.947265625}, {"start": 927.56, "end": 928.0, "word": " negative", "probability": 0.9296875}, {"start": 928.0, "end": 929.44, "word": " on", "probability": 0.673828125}, {"start": 929.44, "end": 929.66, "word": " page", "probability": 0.74560546875}, {"start": 929.66, "end": 930.38, "word": " 570,", "probability": 0.897705078125}, {"start": 931.04, "end": 932.16, "word": " and", "probability": 0.93310546875}, {"start": 932.16, "end": 932.34, "word": " the", "probability": 0.77587890625}, {"start": 932.34, "end": 932.58, "word": " other", "probability": 0.8896484375}, {"start": 932.58, "end": 932.9, "word": " one", "probability": 0.919921875}, {"start": 932.9, "end": 934.92, "word": " for", "probability": 0.8828125}, {"start": 934.92, "end": 935.28, "word": " positive", "probability": 0.93798828125}, {"start": 935.28, "end": 935.88, "word": " values", "probability": 0.966796875}, {"start": 935.88, "end": 936.54, "word": " of", "probability": 0.96044921875}, {"start": 936.54, "end": 936.78, "word": " z.", "probability": 0.97802734375}], "temperature": 1.0}, {"id": 38, "seek": 96424, "start": 937.74, "end": 964.24, "text": " I think we discussed that before when we talked about these scores. We have the same formula. Now let's look at this, the next slide. Suppose x is distributed normally with mean of 100. So the mean of x is 100.", "tokens": [286, 519, 321, 7152, 300, 949, 562, 321, 2825, 466, 613, 13444, 13, 492, 362, 264, 912, 8513, 13, 823, 718, 311, 574, 412, 341, 11, 264, 958, 4137, 13, 21360, 2031, 307, 12631, 5646, 365, 914, 295, 2319, 13, 407, 264, 914, 295, 2031, 307, 2319, 13], "avg_logprob": -0.17378827017180773, "compression_ratio": 1.3973509933774835, "no_speech_prob": 0.0, "words": [{"start": 937.74, "end": 937.98, "word": " I", "probability": 0.86376953125}, {"start": 937.98, "end": 938.12, "word": " think", "probability": 0.9140625}, {"start": 938.12, "end": 938.26, "word": " we", "probability": 0.9140625}, {"start": 938.26, "end": 938.72, "word": " discussed", "probability": 0.8359375}, {"start": 938.72, "end": 939.04, "word": " that", "probability": 0.87109375}, {"start": 939.04, "end": 939.46, "word": " before", "probability": 0.78955078125}, {"start": 939.46, "end": 939.86, "word": " when", "probability": 0.475341796875}, {"start": 939.86, "end": 940.28, "word": " we", "probability": 0.8525390625}, {"start": 940.28, "end": 940.5, "word": " talked", "probability": 0.8642578125}, {"start": 940.5, "end": 940.78, "word": " about", "probability": 0.91064453125}, {"start": 940.78, "end": 941.06, "word": " these", "probability": 0.82763671875}, {"start": 941.06, "end": 941.46, "word": " scores.", "probability": 0.79296875}, {"start": 941.8, "end": 941.96, "word": " We", "probability": 0.900390625}, {"start": 941.96, "end": 942.16, "word": " have", "probability": 0.89111328125}, {"start": 942.16, "end": 942.32, "word": " the", "probability": 0.91162109375}, {"start": 942.32, "end": 942.64, "word": " same", "probability": 0.90576171875}, {"start": 942.64, "end": 944.08, "word": " formula.", "probability": 0.8544921875}, {"start": 947.12, "end": 947.92, "word": " Now", "probability": 0.93212890625}, {"start": 947.92, "end": 948.36, "word": " let's", "probability": 0.869140625}, {"start": 948.36, "end": 949.22, "word": " look", "probability": 0.95654296875}, {"start": 949.22, "end": 949.76, "word": " at", "probability": 0.96630859375}, {"start": 949.76, "end": 950.46, "word": " this,", "probability": 0.595703125}, {"start": 950.6, "end": 950.74, "word": " the", "probability": 0.916015625}, {"start": 950.74, "end": 951.02, "word": " next", "probability": 0.94140625}, {"start": 951.02, "end": 951.5, "word": " slide.", "probability": 0.96875}, {"start": 952.88, "end": 953.32, "word": " Suppose", "probability": 0.76513671875}, {"start": 953.32, "end": 953.7, "word": " x", "probability": 0.49560546875}, {"start": 953.7, "end": 955.84, "word": " is", "probability": 0.9130859375}, {"start": 955.84, "end": 956.5, "word": " distributed", "probability": 0.8984375}, {"start": 956.5, "end": 957.04, "word": " normally", "probability": 0.92333984375}, {"start": 957.04, "end": 957.56, "word": " with", "probability": 0.890625}, {"start": 957.56, "end": 957.94, "word": " mean", "probability": 0.8896484375}, {"start": 957.94, "end": 958.6, "word": " of", "probability": 0.96337890625}, {"start": 958.6, "end": 959.14, "word": " 100.", "probability": 0.767578125}, {"start": 960.94, "end": 961.74, "word": " So", "probability": 0.94921875}, {"start": 961.74, "end": 961.88, "word": " the", "probability": 0.7978515625}, {"start": 961.88, "end": 962.02, "word": " mean", "probability": 0.9599609375}, {"start": 962.02, "end": 962.14, "word": " of", "probability": 0.9677734375}, {"start": 962.14, "end": 962.5, "word": " x", "probability": 0.98291015625}, {"start": 962.5, "end": 963.86, "word": " is", "probability": 0.94970703125}, {"start": 963.86, "end": 964.24, "word": " 100.", "probability": 0.87744140625}], "temperature": 1.0}, {"id": 39, "seek": 99029, "start": 965.13, "end": 990.29, "text": " and the standard deviation of 50. So sigma is 50. Now let's see how can we compute the z-score for x equals 200. Again the formula is just x minus mu divided by sigma x 200 minus 100 divided by 50 that will give 2. Now the sign of this value is positive", "tokens": [293, 264, 3832, 25163, 295, 2625, 13, 407, 12771, 307, 2625, 13, 823, 718, 311, 536, 577, 393, 321, 14722, 264, 710, 12, 4417, 418, 337, 2031, 6915, 2331, 13, 3764, 264, 8513, 307, 445, 2031, 3175, 2992, 6666, 538, 12771, 2031, 2331, 3175, 2319, 6666, 538, 2625, 300, 486, 976, 568, 13, 823, 264, 1465, 295, 341, 2158, 307, 3353], "avg_logprob": -0.1909022155788637, "compression_ratio": 1.4853801169590644, "no_speech_prob": 0.0, "words": [{"start": 965.13, "end": 965.43, "word": " and", "probability": 0.5263671875}, {"start": 965.43, "end": 965.61, "word": " the", "probability": 0.5322265625}, {"start": 965.61, "end": 965.85, "word": " standard", "probability": 0.91015625}, {"start": 965.85, "end": 966.25, "word": " deviation", "probability": 0.9140625}, {"start": 966.25, "end": 966.47, "word": " of", "probability": 0.92333984375}, {"start": 966.47, "end": 966.87, "word": " 50.", "probability": 0.89111328125}, {"start": 967.19, "end": 967.31, "word": " So", "probability": 0.8095703125}, {"start": 967.31, "end": 967.59, "word": " sigma", "probability": 0.441650390625}, {"start": 967.59, "end": 967.81, "word": " is", "probability": 0.92333984375}, {"start": 967.81, "end": 968.07, "word": " 50.", "probability": 0.45947265625}, {"start": 969.59, "end": 970.13, "word": " Now", "probability": 0.9072265625}, {"start": 970.13, "end": 970.45, "word": " let's", "probability": 0.883056640625}, {"start": 970.45, "end": 970.59, "word": " see", "probability": 0.9189453125}, {"start": 970.59, "end": 970.73, "word": " how", "probability": 0.8759765625}, {"start": 970.73, "end": 970.95, "word": " can", "probability": 0.7470703125}, {"start": 970.95, "end": 971.11, "word": " we", "probability": 0.95947265625}, {"start": 971.11, "end": 971.63, "word": " compute", "probability": 0.86962890625}, {"start": 971.63, "end": 971.97, "word": " the", "probability": 0.88427734375}, {"start": 971.97, "end": 972.13, "word": " z", "probability": 0.8779296875}, {"start": 972.13, "end": 972.55, "word": "-score", "probability": 0.8170572916666666}, {"start": 972.55, "end": 973.93, "word": " for", "probability": 0.8916015625}, {"start": 973.93, "end": 974.31, "word": " x", "probability": 0.92724609375}, {"start": 974.31, "end": 974.87, "word": " equals", "probability": 0.82177734375}, {"start": 974.87, "end": 975.65, "word": " 200.", "probability": 0.89013671875}, {"start": 976.95, "end": 977.55, "word": " Again", "probability": 0.91845703125}, {"start": 977.55, "end": 977.75, "word": " the", "probability": 0.5810546875}, {"start": 977.75, "end": 978.01, "word": " formula", "probability": 0.91162109375}, {"start": 978.01, "end": 978.31, "word": " is", "probability": 0.9501953125}, {"start": 978.31, "end": 978.61, "word": " just", "probability": 0.9228515625}, {"start": 978.61, "end": 978.93, "word": " x", "probability": 0.97412109375}, {"start": 978.93, "end": 979.23, "word": " minus", "probability": 0.98291015625}, {"start": 979.23, "end": 979.47, "word": " mu", "probability": 0.916015625}, {"start": 979.47, "end": 979.67, "word": " divided", "probability": 0.75927734375}, {"start": 979.67, "end": 979.87, "word": " by", "probability": 0.97802734375}, {"start": 979.87, "end": 980.17, "word": " sigma", "probability": 0.93115234375}, {"start": 980.17, "end": 981.81, "word": " x", "probability": 0.6484375}, {"start": 981.81, "end": 982.79, "word": " 200", "probability": 0.8603515625}, {"start": 982.79, "end": 983.27, "word": " minus", "probability": 0.984375}, {"start": 983.27, "end": 983.95, "word": " 100", "probability": 0.8994140625}, {"start": 983.95, "end": 984.71, "word": " divided", "probability": 0.78076171875}, {"start": 984.71, "end": 984.95, "word": " by", "probability": 0.96923828125}, {"start": 984.95, "end": 985.45, "word": " 50", "probability": 0.9638671875}, {"start": 985.45, "end": 986.43, "word": " that", "probability": 0.43798828125}, {"start": 986.43, "end": 986.61, "word": " will", "probability": 0.88525390625}, {"start": 986.61, "end": 986.81, "word": " give", "probability": 0.89501953125}, {"start": 986.81, "end": 987.03, "word": " 2.", "probability": 0.78173828125}, {"start": 987.69, "end": 988.07, "word": " Now", "probability": 0.93212890625}, {"start": 988.07, "end": 988.33, "word": " the", "probability": 0.88720703125}, {"start": 988.33, "end": 988.65, "word": " sign", "probability": 0.89453125}, {"start": 988.65, "end": 989.11, "word": " of", "probability": 0.97021484375}, {"start": 989.11, "end": 989.39, "word": " this", "probability": 0.94921875}, {"start": 989.39, "end": 989.75, "word": " value", "probability": 0.97265625}, {"start": 989.75, "end": 989.93, "word": " is", "probability": 0.94677734375}, {"start": 989.93, "end": 990.29, "word": " positive", "probability": 0.9296875}], "temperature": 1.0}, {"id": 40, "seek": 102061, "start": 991.39, "end": 1020.61, "text": " That means x is greater than the mean, because x is 200. Now, what's the meaning of 2? What does this value tell you? Yeah, exactly. x equals 200 is two standard deviations above the mean. Because if you look at 200, the x value,", "tokens": [663, 1355, 2031, 307, 5044, 813, 264, 914, 11, 570, 2031, 307, 2331, 13, 823, 11, 437, 311, 264, 3620, 295, 568, 30, 708, 775, 341, 2158, 980, 291, 30, 865, 11, 2293, 13, 2031, 6915, 2331, 307, 732, 3832, 31219, 763, 3673, 264, 914, 13, 1436, 498, 291, 574, 412, 2331, 11, 264, 2031, 2158, 11], "avg_logprob": -0.19544719956044493, "compression_ratio": 1.4197530864197532, "no_speech_prob": 0.0, "words": [{"start": 991.39, "end": 991.77, "word": " That", "probability": 0.65625}, {"start": 991.77, "end": 992.21, "word": " means", "probability": 0.92822265625}, {"start": 992.21, "end": 993.47, "word": " x", "probability": 0.517578125}, {"start": 993.47, "end": 993.91, "word": " is", "probability": 0.904296875}, {"start": 993.91, "end": 994.25, "word": " greater", "probability": 0.82763671875}, {"start": 994.25, "end": 994.77, "word": " than", "probability": 0.953125}, {"start": 994.77, "end": 995.73, "word": " the", "probability": 0.79345703125}, {"start": 995.73, "end": 995.87, "word": " mean,", "probability": 0.97412109375}, {"start": 995.97, "end": 996.27, "word": " because", "probability": 0.89404296875}, {"start": 996.27, "end": 996.51, "word": " x", "probability": 0.88818359375}, {"start": 996.51, "end": 996.59, "word": " is", "probability": 0.84521484375}, {"start": 996.59, "end": 996.89, "word": " 200.", "probability": 0.779296875}, {"start": 997.35, "end": 997.95, "word": " Now,", "probability": 0.91015625}, {"start": 997.99, "end": 998.13, "word": " what's", "probability": 0.8984375}, {"start": 998.13, "end": 998.25, "word": " the", "probability": 0.9208984375}, {"start": 998.25, "end": 998.43, "word": " meaning", "probability": 0.85400390625}, {"start": 998.43, "end": 998.63, "word": " of", "probability": 0.96826171875}, {"start": 998.63, "end": 998.83, "word": " 2?", "probability": 0.58447265625}, {"start": 1000.53, "end": 1001.21, "word": " What", "probability": 0.8798828125}, {"start": 1001.21, "end": 1001.47, "word": " does", "probability": 0.97119140625}, {"start": 1001.47, "end": 1001.73, "word": " this", "probability": 0.94873046875}, {"start": 1001.73, "end": 1002.07, "word": " value", "probability": 0.9716796875}, {"start": 1002.07, "end": 1002.27, "word": " tell", "probability": 0.88525390625}, {"start": 1002.27, "end": 1002.41, "word": " you?", "probability": 0.9609375}, {"start": 1008.23, "end": 1008.91, "word": " Yeah,", "probability": 0.075439453125}, {"start": 1010.91, "end": 1011.39, "word": " exactly.", "probability": 0.87451171875}, {"start": 1011.51, "end": 1011.79, "word": " x", "probability": 0.79443359375}, {"start": 1011.79, "end": 1012.49, "word": " equals", "probability": 0.7626953125}, {"start": 1012.49, "end": 1013.09, "word": " 200", "probability": 0.91064453125}, {"start": 1013.09, "end": 1014.85, "word": " is", "probability": 0.740234375}, {"start": 1014.85, "end": 1015.07, "word": " two", "probability": 0.62939453125}, {"start": 1015.07, "end": 1015.43, "word": " standard", "probability": 0.93505859375}, {"start": 1015.43, "end": 1016.09, "word": " deviations", "probability": 0.9345703125}, {"start": 1016.09, "end": 1016.85, "word": " above", "probability": 0.943359375}, {"start": 1016.85, "end": 1017.05, "word": " the", "probability": 0.9287109375}, {"start": 1017.05, "end": 1017.23, "word": " mean.", "probability": 0.9677734375}, {"start": 1017.85, "end": 1018.19, "word": " Because", "probability": 0.92822265625}, {"start": 1018.19, "end": 1018.35, "word": " if", "probability": 0.87548828125}, {"start": 1018.35, "end": 1018.41, "word": " you", "probability": 0.9130859375}, {"start": 1018.41, "end": 1018.55, "word": " look", "probability": 0.962890625}, {"start": 1018.55, "end": 1018.69, "word": " at", "probability": 0.962890625}, {"start": 1018.69, "end": 1019.07, "word": " 200,", "probability": 0.9248046875}, {"start": 1019.65, "end": 1019.91, "word": " the", "probability": 0.8955078125}, {"start": 1019.91, "end": 1020.27, "word": " x", "probability": 0.98779296875}, {"start": 1020.27, "end": 1020.61, "word": " value,", "probability": 0.82861328125}], "temperature": 1.0}, {"id": 41, "seek": 104765, "start": 1022.03, "end": 1047.65, "text": " The mean is 100, sigma is 50. Now the difference between the score, which is 200, and the mu, which is 100, is equal to standard deviations, because the difference is 100. 2 times 50 is 100. So this says that x equals 200 is 2 standard deviations above the mean.", "tokens": [440, 914, 307, 2319, 11, 12771, 307, 2625, 13, 823, 264, 2649, 1296, 264, 6175, 11, 597, 307, 2331, 11, 293, 264, 2992, 11, 597, 307, 2319, 11, 307, 2681, 281, 3832, 31219, 763, 11, 570, 264, 2649, 307, 2319, 13, 568, 1413, 2625, 307, 2319, 13, 407, 341, 1619, 300, 2031, 6915, 2331, 307, 568, 3832, 31219, 763, 3673, 264, 914, 13], "avg_logprob": -0.22143555292859674, "compression_ratio": 1.6645569620253164, "no_speech_prob": 0.0, "words": [{"start": 1022.03, "end": 1022.33, "word": " The", "probability": 0.587890625}, {"start": 1022.33, "end": 1022.63, "word": " mean", "probability": 0.921875}, {"start": 1022.63, "end": 1023.41, "word": " is", "probability": 0.912109375}, {"start": 1023.41, "end": 1024.03, "word": " 100,", "probability": 0.407470703125}, {"start": 1024.45, "end": 1024.63, "word": " sigma", "probability": 0.62255859375}, {"start": 1024.63, "end": 1024.83, "word": " is", "probability": 0.93603515625}, {"start": 1024.83, "end": 1025.21, "word": " 50.", "probability": 0.9541015625}, {"start": 1025.73, "end": 1025.95, "word": " Now", "probability": 0.90966796875}, {"start": 1025.95, "end": 1026.11, "word": " the", "probability": 0.63427734375}, {"start": 1026.11, "end": 1026.49, "word": " difference", "probability": 0.8681640625}, {"start": 1026.49, "end": 1026.93, "word": " between", "probability": 0.87548828125}, {"start": 1026.93, "end": 1028.27, "word": " the", "probability": 0.7939453125}, {"start": 1028.27, "end": 1028.65, "word": " score,", "probability": 0.81201171875}, {"start": 1029.21, "end": 1029.49, "word": " which", "probability": 0.95361328125}, {"start": 1029.49, "end": 1029.69, "word": " is", "probability": 0.95068359375}, {"start": 1029.69, "end": 1030.09, "word": " 200,", "probability": 0.91015625}, {"start": 1030.37, "end": 1031.51, "word": " and", "probability": 0.9375}, {"start": 1031.51, "end": 1031.67, "word": " the", "probability": 0.8466796875}, {"start": 1031.67, "end": 1031.83, "word": " mu,", "probability": 0.5673828125}, {"start": 1032.07, "end": 1032.11, "word": " which", "probability": 0.955078125}, {"start": 1032.11, "end": 1032.49, "word": " is", "probability": 0.95361328125}, {"start": 1032.49, "end": 1033.25, "word": " 100,", "probability": 0.89697265625}, {"start": 1034.01, "end": 1035.13, "word": " is", "probability": 0.87646484375}, {"start": 1035.13, "end": 1036.15, "word": " equal", "probability": 0.89013671875}, {"start": 1036.15, "end": 1036.81, "word": " to", "probability": 0.7685546875}, {"start": 1036.81, "end": 1037.19, "word": " standard", "probability": 0.5546875}, {"start": 1037.19, "end": 1037.63, "word": " deviations,", "probability": 0.902587890625}, {"start": 1037.81, "end": 1038.07, "word": " because", "probability": 0.89697265625}, {"start": 1038.07, "end": 1038.33, "word": " the", "probability": 0.65576171875}, {"start": 1038.33, "end": 1038.57, "word": " difference", "probability": 0.8818359375}, {"start": 1038.57, "end": 1038.69, "word": " is", "probability": 0.7470703125}, {"start": 1038.69, "end": 1039.03, "word": " 100.", "probability": 0.900390625}, {"start": 1039.67, "end": 1039.91, "word": " 2", "probability": 0.611328125}, {"start": 1039.91, "end": 1040.17, "word": " times", "probability": 0.82958984375}, {"start": 1040.17, "end": 1040.57, "word": " 50", "probability": 0.96826171875}, {"start": 1040.57, "end": 1040.75, "word": " is", "probability": 0.93896484375}, {"start": 1040.75, "end": 1041.03, "word": " 100.", "probability": 0.92041015625}, {"start": 1042.15, "end": 1042.73, "word": " So", "probability": 0.9345703125}, {"start": 1042.73, "end": 1042.97, "word": " this", "probability": 0.8564453125}, {"start": 1042.97, "end": 1043.29, "word": " says", "probability": 0.86865234375}, {"start": 1043.29, "end": 1043.63, "word": " that", "probability": 0.92724609375}, {"start": 1043.63, "end": 1043.93, "word": " x", "probability": 0.77197265625}, {"start": 1043.93, "end": 1044.23, "word": " equals", "probability": 0.80712890625}, {"start": 1044.23, "end": 1044.63, "word": " 200", "probability": 0.7119140625}, {"start": 1044.63, "end": 1044.91, "word": " is", "probability": 0.88037109375}, {"start": 1044.91, "end": 1045.09, "word": " 2", "probability": 0.5576171875}, {"start": 1045.09, "end": 1045.43, "word": " standard", "probability": 0.93212890625}, {"start": 1045.43, "end": 1046.07, "word": " deviations", "probability": 0.958740234375}, {"start": 1046.07, "end": 1047.25, "word": " above", "probability": 0.93115234375}, {"start": 1047.25, "end": 1047.49, "word": " the", "probability": 0.93359375}, {"start": 1047.49, "end": 1047.65, "word": " mean.", "probability": 0.97705078125}], "temperature": 1.0}, {"id": 42, "seek": 107245, "start": 1048.53, "end": 1072.45, "text": " If z is negative, you can say that x is two standard deviations below them. Make sense? So that's how can we compute the z square. Now, when we transform from normal distribution to standardized, still we will have the same shape. I mean the distribution is still normally distributed.", "tokens": [759, 710, 307, 3671, 11, 291, 393, 584, 300, 2031, 307, 732, 3832, 31219, 763, 2507, 552, 13, 4387, 2020, 30, 407, 300, 311, 577, 393, 321, 14722, 264, 710, 3732, 13, 823, 11, 562, 321, 4088, 490, 2710, 7316, 281, 31677, 11, 920, 321, 486, 362, 264, 912, 3909, 13, 286, 914, 264, 7316, 307, 920, 5646, 12631, 13], "avg_logprob": -0.18378585577011108, "compression_ratio": 1.5376344086021505, "no_speech_prob": 0.0, "words": [{"start": 1048.53, "end": 1048.89, "word": " If", "probability": 0.8212890625}, {"start": 1048.89, "end": 1049.07, "word": " z", "probability": 0.6025390625}, {"start": 1049.07, "end": 1049.21, "word": " is", "probability": 0.9482421875}, {"start": 1049.21, "end": 1049.55, "word": " negative,", "probability": 0.94189453125}, {"start": 1049.93, "end": 1050.05, "word": " you", "probability": 0.9423828125}, {"start": 1050.05, "end": 1050.27, "word": " can", "probability": 0.94384765625}, {"start": 1050.27, "end": 1050.53, "word": " say", "probability": 0.9169921875}, {"start": 1050.53, "end": 1051.11, "word": " that", "probability": 0.92529296875}, {"start": 1051.11, "end": 1052.89, "word": " x", "probability": 0.81396484375}, {"start": 1052.89, "end": 1053.33, "word": " is", "probability": 0.79052734375}, {"start": 1053.33, "end": 1053.97, "word": " two", "probability": 0.806640625}, {"start": 1053.97, "end": 1054.33, "word": " standard", "probability": 0.95458984375}, {"start": 1054.33, "end": 1054.87, "word": " deviations", "probability": 0.93701171875}, {"start": 1054.87, "end": 1055.63, "word": " below", "probability": 0.85693359375}, {"start": 1055.63, "end": 1056.19, "word": " them.", "probability": 0.61328125}, {"start": 1056.71, "end": 1056.97, "word": " Make", "probability": 0.65185546875}, {"start": 1056.97, "end": 1057.29, "word": " sense?", "probability": 0.82373046875}, {"start": 1057.65, "end": 1058.31, "word": " So", "probability": 0.92529296875}, {"start": 1058.31, "end": 1058.59, "word": " that's", "probability": 0.873046875}, {"start": 1058.59, "end": 1058.71, "word": " how", "probability": 0.9365234375}, {"start": 1058.71, "end": 1058.89, "word": " can", "probability": 0.5107421875}, {"start": 1058.89, "end": 1059.03, "word": " we", "probability": 0.93603515625}, {"start": 1059.03, "end": 1059.55, "word": " compute", "probability": 0.9052734375}, {"start": 1059.55, "end": 1060.17, "word": " the", "probability": 0.6162109375}, {"start": 1060.17, "end": 1060.35, "word": " z", "probability": 0.439453125}, {"start": 1060.35, "end": 1060.63, "word": " square.", "probability": 0.32763671875}, {"start": 1061.35, "end": 1061.71, "word": " Now,", "probability": 0.93896484375}, {"start": 1062.17, "end": 1062.45, "word": " when", "probability": 0.94091796875}, {"start": 1062.45, "end": 1062.67, "word": " we", "probability": 0.9638671875}, {"start": 1062.67, "end": 1063.35, "word": " transform", "probability": 0.9072265625}, {"start": 1063.35, "end": 1063.67, "word": " from", "probability": 0.8701171875}, {"start": 1063.67, "end": 1064.07, "word": " normal", "probability": 0.85546875}, {"start": 1064.07, "end": 1064.79, "word": " distribution", "probability": 0.83447265625}, {"start": 1064.79, "end": 1065.97, "word": " to", "probability": 0.900390625}, {"start": 1065.97, "end": 1066.39, "word": " standardized,", "probability": 0.74853515625}, {"start": 1067.27, "end": 1067.69, "word": " still", "probability": 0.9267578125}, {"start": 1067.69, "end": 1067.89, "word": " we", "probability": 0.83203125}, {"start": 1067.89, "end": 1068.05, "word": " will", "probability": 0.88037109375}, {"start": 1068.05, "end": 1068.31, "word": " have", "probability": 0.94482421875}, {"start": 1068.31, "end": 1068.49, "word": " the", "probability": 0.91357421875}, {"start": 1068.49, "end": 1068.79, "word": " same", "probability": 0.900390625}, {"start": 1068.79, "end": 1069.17, "word": " shape.", "probability": 0.919921875}, {"start": 1069.47, "end": 1069.49, "word": " I", "probability": 0.982421875}, {"start": 1069.49, "end": 1069.65, "word": " mean", "probability": 0.96728515625}, {"start": 1069.65, "end": 1069.87, "word": " the", "probability": 0.440673828125}, {"start": 1069.87, "end": 1070.45, "word": " distribution", "probability": 0.8662109375}, {"start": 1070.45, "end": 1070.69, "word": " is", "probability": 0.91455078125}, {"start": 1070.69, "end": 1070.91, "word": " still", "probability": 0.95556640625}, {"start": 1070.91, "end": 1071.35, "word": " normally", "probability": 0.90234375}, {"start": 1071.35, "end": 1072.45, "word": " distributed.", "probability": 0.912109375}], "temperature": 1.0}, {"id": 43, "seek": 109716, "start": 1073.92, "end": 1097.16, "text": " So note, the shape of the distribution is the same, only the scale has changed. So we can express the problem in original units, X, or in a standardized unit, Z. So when we have X, just use this equation to transform to this form.", "tokens": [407, 3637, 11, 264, 3909, 295, 264, 7316, 307, 264, 912, 11, 787, 264, 4373, 575, 3105, 13, 407, 321, 393, 5109, 264, 1154, 294, 3380, 6815, 11, 1783, 11, 420, 294, 257, 31677, 4985, 11, 1176, 13, 407, 562, 321, 362, 1783, 11, 445, 764, 341, 5367, 281, 4088, 281, 341, 1254, 13], "avg_logprob": -0.19090909416025334, "compression_ratio": 1.5, "no_speech_prob": 0.0, "words": [{"start": 1073.92, "end": 1074.18, "word": " So", "probability": 0.88232421875}, {"start": 1074.18, "end": 1074.42, "word": " note,", "probability": 0.53125}, {"start": 1075.06, "end": 1075.2, "word": " the", "probability": 0.90869140625}, {"start": 1075.2, "end": 1075.52, "word": " shape", "probability": 0.90625}, {"start": 1075.52, "end": 1075.7, "word": " of", "probability": 0.9658203125}, {"start": 1075.7, "end": 1075.8, "word": " the", "probability": 0.779296875}, {"start": 1075.8, "end": 1076.3, "word": " distribution", "probability": 0.8115234375}, {"start": 1076.3, "end": 1076.52, "word": " is", "probability": 0.88720703125}, {"start": 1076.52, "end": 1076.7, "word": " the", "probability": 0.9140625}, {"start": 1076.7, "end": 1077.0, "word": " same,", "probability": 0.9091796875}, {"start": 1077.6, "end": 1077.9, "word": " only", "probability": 0.9248046875}, {"start": 1077.9, "end": 1078.16, "word": " the", "probability": 0.9169921875}, {"start": 1078.16, "end": 1078.5, "word": " scale", "probability": 0.828125}, {"start": 1078.5, "end": 1078.84, "word": " has", "probability": 0.93017578125}, {"start": 1078.84, "end": 1079.3, "word": " changed.", "probability": 0.90478515625}, {"start": 1081.06, "end": 1081.76, "word": " So", "probability": 0.94677734375}, {"start": 1081.76, "end": 1081.9, "word": " we", "probability": 0.8623046875}, {"start": 1081.9, "end": 1082.08, "word": " can", "probability": 0.939453125}, {"start": 1082.08, "end": 1082.38, "word": " express", "probability": 0.90869140625}, {"start": 1082.38, "end": 1082.74, "word": " the", "probability": 0.91552734375}, {"start": 1082.74, "end": 1083.1, "word": " problem", "probability": 0.87646484375}, {"start": 1083.1, "end": 1083.5, "word": " in", "probability": 0.92626953125}, {"start": 1083.5, "end": 1084.5, "word": " original", "probability": 0.7890625}, {"start": 1084.5, "end": 1085.04, "word": " units,", "probability": 0.87060546875}, {"start": 1085.24, "end": 1085.48, "word": " X,", "probability": 0.56298828125}, {"start": 1086.22, "end": 1086.6, "word": " or", "probability": 0.962890625}, {"start": 1086.6, "end": 1086.82, "word": " in", "probability": 0.9306640625}, {"start": 1086.82, "end": 1086.94, "word": " a", "probability": 0.46826171875}, {"start": 1086.94, "end": 1087.28, "word": " standardized", "probability": 0.91748046875}, {"start": 1087.28, "end": 1088.5, "word": " unit,", "probability": 0.93408203125}, {"start": 1088.94, "end": 1089.22, "word": " Z.", "probability": 0.90771484375}, {"start": 1089.62, "end": 1090.0, "word": " So", "probability": 0.96044921875}, {"start": 1090.0, "end": 1090.36, "word": " when", "probability": 0.84033203125}, {"start": 1090.36, "end": 1090.64, "word": " we", "probability": 0.95947265625}, {"start": 1090.64, "end": 1091.04, "word": " have", "probability": 0.94873046875}, {"start": 1091.04, "end": 1091.44, "word": " X,", "probability": 0.9443359375}, {"start": 1092.06, "end": 1092.46, "word": " just", "probability": 0.912109375}, {"start": 1092.46, "end": 1092.8, "word": " use", "probability": 0.86669921875}, {"start": 1092.8, "end": 1093.12, "word": " this", "probability": 0.94873046875}, {"start": 1093.12, "end": 1093.68, "word": " equation", "probability": 0.966796875}, {"start": 1093.68, "end": 1095.02, "word": " to", "probability": 0.68115234375}, {"start": 1095.02, "end": 1096.38, "word": " transform", "probability": 0.91552734375}, {"start": 1096.38, "end": 1096.62, "word": " to", "probability": 0.75439453125}, {"start": 1096.62, "end": 1096.8, "word": " this", "probability": 0.3095703125}, {"start": 1096.8, "end": 1097.16, "word": " form.", "probability": 0.84814453125}], "temperature": 1.0}, {"id": 44, "seek": 112942, "start": 1101.36, "end": 1129.42, "text": " Now, for example, suppose we have normal distribution and we are interested in the area between A and B. Now, the area between A and B, it means the probability between them. So statistically speaking, area means probability. So probability between A and B, I mean probability of X greater than or equal A and less than or equal B is the same as X greater than A or less than B.", "tokens": [823, 11, 337, 1365, 11, 7297, 321, 362, 2710, 7316, 293, 321, 366, 3102, 294, 264, 1859, 1296, 316, 293, 363, 13, 823, 11, 264, 1859, 1296, 316, 293, 363, 11, 309, 1355, 264, 8482, 1296, 552, 13, 407, 36478, 4124, 11, 1859, 1355, 8482, 13, 407, 8482, 1296, 316, 293, 363, 11, 286, 914, 8482, 295, 1783, 5044, 813, 420, 2681, 316, 293, 1570, 813, 420, 2681, 363, 307, 264, 912, 382, 1783, 5044, 813, 316, 420, 1570, 813, 363, 13], "avg_logprob": -0.13911898308489695, "compression_ratio": 1.9637305699481866, "no_speech_prob": 0.0, "words": [{"start": 1101.36, "end": 1101.62, "word": " Now,", "probability": 0.77392578125}, {"start": 1101.74, "end": 1101.84, "word": " for", "probability": 0.93017578125}, {"start": 1101.84, "end": 1102.16, "word": " example,", "probability": 0.97314453125}, {"start": 1102.26, "end": 1102.54, "word": " suppose", "probability": 0.8955078125}, {"start": 1102.54, "end": 1102.7, "word": " we", "probability": 0.91650390625}, {"start": 1102.7, "end": 1102.88, "word": " have", "probability": 0.94775390625}, {"start": 1102.88, "end": 1103.2, "word": " normal", "probability": 0.78515625}, {"start": 1103.2, "end": 1103.76, "word": " distribution", "probability": 0.8251953125}, {"start": 1103.76, "end": 1104.5, "word": " and", "probability": 0.57177734375}, {"start": 1104.5, "end": 1104.64, "word": " we", "probability": 0.939453125}, {"start": 1104.64, "end": 1104.8, "word": " are", "probability": 0.91748046875}, {"start": 1104.8, "end": 1105.28, "word": " interested", "probability": 0.85595703125}, {"start": 1105.28, "end": 1105.64, "word": " in", "probability": 0.9482421875}, {"start": 1105.64, "end": 1105.78, "word": " the", "probability": 0.912109375}, {"start": 1105.78, "end": 1106.04, "word": " area", "probability": 0.90771484375}, {"start": 1106.04, "end": 1106.38, "word": " between", "probability": 0.890625}, {"start": 1106.38, "end": 1106.56, "word": " A", "probability": 0.73681640625}, {"start": 1106.56, "end": 1106.72, "word": " and", "probability": 0.9521484375}, {"start": 1106.72, "end": 1106.94, "word": " B.", "probability": 0.9873046875}, {"start": 1108.52, "end": 1109.06, "word": " Now,", "probability": 0.88134765625}, {"start": 1110.14, "end": 1111.16, "word": " the", "probability": 0.798828125}, {"start": 1111.16, "end": 1111.46, "word": " area", "probability": 0.90771484375}, {"start": 1111.46, "end": 1111.82, "word": " between", "probability": 0.865234375}, {"start": 1111.82, "end": 1112.12, "word": " A", "probability": 0.9873046875}, {"start": 1112.12, "end": 1112.28, "word": " and", "probability": 0.94775390625}, {"start": 1112.28, "end": 1112.46, "word": " B,", "probability": 0.99560546875}, {"start": 1112.58, "end": 1112.66, "word": " it", "probability": 0.89501953125}, {"start": 1112.66, "end": 1112.9, "word": " means", "probability": 0.927734375}, {"start": 1112.9, "end": 1113.08, "word": " the", "probability": 0.89501953125}, {"start": 1113.08, "end": 1113.42, "word": " probability", "probability": 0.939453125}, {"start": 1113.42, "end": 1113.8, "word": " between", "probability": 0.89990234375}, {"start": 1113.8, "end": 1114.02, "word": " them.", "probability": 0.8623046875}, {"start": 1114.46, "end": 1114.7, "word": " So", "probability": 0.94873046875}, {"start": 1114.7, "end": 1115.36, "word": " statistically", "probability": 0.68115234375}, {"start": 1115.36, "end": 1115.98, "word": " speaking,", "probability": 0.88720703125}, {"start": 1116.14, "end": 1116.48, "word": " area", "probability": 0.85595703125}, {"start": 1116.48, "end": 1117.08, "word": " means", "probability": 0.927734375}, {"start": 1117.08, "end": 1117.5, "word": " probability.", "probability": 0.9423828125}, {"start": 1118.62, "end": 1119.14, "word": " So", "probability": 0.95361328125}, {"start": 1119.14, "end": 1119.46, "word": " probability", "probability": 0.814453125}, {"start": 1119.46, "end": 1119.98, "word": " between", "probability": 0.87255859375}, {"start": 1119.98, "end": 1120.18, "word": " A", "probability": 0.9814453125}, {"start": 1120.18, "end": 1120.36, "word": " and", "probability": 0.94384765625}, {"start": 1120.36, "end": 1120.62, "word": " B,", "probability": 0.998046875}, {"start": 1121.44, "end": 1121.58, "word": " I", "probability": 0.970703125}, {"start": 1121.58, "end": 1121.8, "word": " mean", "probability": 0.96484375}, {"start": 1121.8, "end": 1122.42, "word": " probability", "probability": 0.580078125}, {"start": 1122.42, "end": 1122.7, "word": " of", "probability": 0.94482421875}, {"start": 1122.7, "end": 1122.9, "word": " X", "probability": 0.8681640625}, {"start": 1122.9, "end": 1123.2, "word": " greater", "probability": 0.861328125}, {"start": 1123.2, "end": 1123.5, "word": " than", "probability": 0.93505859375}, {"start": 1123.5, "end": 1123.64, "word": " or", "probability": 0.95556640625}, {"start": 1123.64, "end": 1123.88, "word": " equal", "probability": 0.900390625}, {"start": 1123.88, "end": 1124.14, "word": " A", "probability": 0.61767578125}, {"start": 1124.14, "end": 1124.36, "word": " and", "probability": 0.75927734375}, {"start": 1124.36, "end": 1124.56, "word": " less", "probability": 0.90966796875}, {"start": 1124.56, "end": 1124.72, "word": " than", "probability": 0.943359375}, {"start": 1124.72, "end": 1124.84, "word": " or", "probability": 0.9599609375}, {"start": 1124.84, "end": 1125.06, "word": " equal", "probability": 0.9130859375}, {"start": 1125.06, "end": 1125.38, "word": " B", "probability": 0.98876953125}, {"start": 1125.38, "end": 1126.24, "word": " is", "probability": 0.57861328125}, {"start": 1126.24, "end": 1126.42, "word": " the", "probability": 0.9267578125}, {"start": 1126.42, "end": 1126.64, "word": " same", "probability": 0.90869140625}, {"start": 1126.64, "end": 1126.9, "word": " as", "probability": 0.94384765625}, {"start": 1126.9, "end": 1127.26, "word": " X", "probability": 0.9775390625}, {"start": 1127.26, "end": 1127.62, "word": " greater", "probability": 0.90771484375}, {"start": 1127.62, "end": 1127.94, "word": " than", "probability": 0.955078125}, {"start": 1127.94, "end": 1128.18, "word": " A", "probability": 0.96923828125}, {"start": 1128.18, "end": 1128.6, "word": " or", "probability": 0.92822265625}, {"start": 1128.6, "end": 1128.98, "word": " less", "probability": 0.9384765625}, {"start": 1128.98, "end": 1129.28, "word": " than", "probability": 0.9521484375}, {"start": 1129.28, "end": 1129.42, "word": " B.", "probability": 0.912109375}], "temperature": 1.0}, {"id": 45, "seek": 115939, "start": 1130.45, "end": 1159.39, "text": " that means the probability of X equals A this probability is zero or probability of X equals B is also zero so in continuous distribution the equal sign does not matter I mean if we have equal sign or we don't have these probabilities are the same so I mean for example if we are interested", "tokens": [300, 1355, 264, 8482, 295, 1783, 6915, 316, 341, 8482, 307, 4018, 420, 8482, 295, 1783, 6915, 363, 307, 611, 4018, 370, 294, 10957, 7316, 264, 2681, 1465, 775, 406, 1871, 286, 914, 498, 321, 362, 2681, 1465, 420, 321, 500, 380, 362, 613, 33783, 366, 264, 912, 370, 286, 914, 337, 1365, 498, 321, 366, 3102], "avg_logprob": -0.20406788561878533, "compression_ratio": 1.81875, "no_speech_prob": 0.0, "words": [{"start": 1130.45, "end": 1131.19, "word": " that", "probability": 0.212646484375}, {"start": 1131.19, "end": 1131.65, "word": " means", "probability": 0.91796875}, {"start": 1131.65, "end": 1132.95, "word": " the", "probability": 0.5859375}, {"start": 1132.95, "end": 1133.49, "word": " probability", "probability": 0.8974609375}, {"start": 1133.49, "end": 1133.91, "word": " of", "probability": 0.931640625}, {"start": 1133.91, "end": 1134.27, "word": " X", "probability": 0.6044921875}, {"start": 1134.27, "end": 1134.89, "word": " equals", "probability": 0.61376953125}, {"start": 1134.89, "end": 1135.29, "word": " A", "probability": 0.298095703125}, {"start": 1135.29, "end": 1137.21, "word": " this", "probability": 0.546875}, {"start": 1137.21, "end": 1137.59, "word": " probability", "probability": 0.91162109375}, {"start": 1137.59, "end": 1137.89, "word": " is", "probability": 0.9208984375}, {"start": 1137.89, "end": 1138.17, "word": " zero", "probability": 0.72998046875}, {"start": 1138.17, "end": 1140.91, "word": " or", "probability": 0.64599609375}, {"start": 1140.91, "end": 1141.41, "word": " probability", "probability": 0.78759765625}, {"start": 1141.41, "end": 1141.63, "word": " of", "probability": 0.92431640625}, {"start": 1141.63, "end": 1141.81, "word": " X", "probability": 0.96435546875}, {"start": 1141.81, "end": 1142.27, "word": " equals", "probability": 0.92431640625}, {"start": 1142.27, "end": 1142.51, "word": " B", "probability": 0.9736328125}, {"start": 1142.51, "end": 1143.29, "word": " is", "probability": 0.91552734375}, {"start": 1143.29, "end": 1143.57, "word": " also", "probability": 0.8818359375}, {"start": 1143.57, "end": 1143.91, "word": " zero", "probability": 0.89990234375}, {"start": 1143.91, "end": 1145.09, "word": " so", "probability": 0.468994140625}, {"start": 1145.09, "end": 1145.47, "word": " in", "probability": 0.84814453125}, {"start": 1145.47, "end": 1145.95, "word": " continuous", "probability": 0.8134765625}, {"start": 1145.95, "end": 1146.49, "word": " distribution", "probability": 0.84033203125}, {"start": 1146.49, "end": 1146.93, "word": " the", "probability": 0.787109375}, {"start": 1146.93, "end": 1147.25, "word": " equal", "probability": 0.68798828125}, {"start": 1147.25, "end": 1147.59, "word": " sign", "probability": 0.892578125}, {"start": 1147.59, "end": 1147.83, "word": " does", "probability": 0.95751953125}, {"start": 1147.83, "end": 1148.07, "word": " not", "probability": 0.9541015625}, {"start": 1148.07, "end": 1148.39, "word": " matter", "probability": 0.86328125}, {"start": 1148.39, "end": 1149.23, "word": " I", "probability": 0.74853515625}, {"start": 1149.23, "end": 1149.39, "word": " mean", "probability": 0.9677734375}, {"start": 1149.39, "end": 1149.63, "word": " if", "probability": 0.89892578125}, {"start": 1149.63, "end": 1150.15, "word": " we", "probability": 0.9384765625}, {"start": 1150.15, "end": 1150.33, "word": " have", "probability": 0.94873046875}, {"start": 1150.33, "end": 1150.63, "word": " equal", "probability": 0.86376953125}, {"start": 1150.63, "end": 1151.05, "word": " sign", "probability": 0.9052734375}, {"start": 1151.05, "end": 1151.29, "word": " or", "probability": 0.92919921875}, {"start": 1151.29, "end": 1151.47, "word": " we", "probability": 0.84765625}, {"start": 1151.47, "end": 1151.71, "word": " don't", "probability": 0.95751953125}, {"start": 1151.71, "end": 1152.05, "word": " have", "probability": 0.9404296875}, {"start": 1152.05, "end": 1152.71, "word": " these", "probability": 0.755859375}, {"start": 1152.71, "end": 1153.17, "word": " probabilities", "probability": 0.9248046875}, {"start": 1153.17, "end": 1154.03, "word": " are", "probability": 0.93896484375}, {"start": 1154.03, "end": 1155.13, "word": " the", "probability": 0.88330078125}, {"start": 1155.13, "end": 1155.35, "word": " same", "probability": 0.91259765625}, {"start": 1155.35, "end": 1157.43, "word": " so", "probability": 0.57421875}, {"start": 1157.43, "end": 1157.61, "word": " I", "probability": 0.92333984375}, {"start": 1157.61, "end": 1157.81, "word": " mean", "probability": 0.95751953125}, {"start": 1157.81, "end": 1158.19, "word": " for", "probability": 0.90478515625}, {"start": 1158.19, "end": 1158.49, "word": " example", "probability": 0.97705078125}, {"start": 1158.49, "end": 1158.71, "word": " if", "probability": 0.92236328125}, {"start": 1158.71, "end": 1158.83, "word": " we", "probability": 0.9365234375}, {"start": 1158.83, "end": 1158.97, "word": " are", "probability": 0.93994140625}, {"start": 1158.97, "end": 1159.39, "word": " interested", "probability": 0.904296875}], "temperature": 1.0}, {"id": 46, "seek": 118543, "start": 1160.31, "end": 1185.43, "text": " for probability of X smaller than or equal to E. This probability is the same as X smaller than E. Or on the other hand, if you are interested in the area above B greater than or equal to B, it's the same as X smaller than E. So don't worry about the equal sign.", "tokens": [337, 8482, 295, 1783, 4356, 813, 420, 2681, 281, 462, 13, 639, 8482, 307, 264, 912, 382, 1783, 4356, 813, 462, 13, 1610, 322, 264, 661, 1011, 11, 498, 291, 366, 3102, 294, 264, 1859, 3673, 363, 5044, 813, 420, 2681, 281, 363, 11, 309, 311, 264, 912, 382, 1783, 4356, 813, 462, 13, 407, 500, 380, 3292, 466, 264, 2681, 1465, 13], "avg_logprob": -0.21362305339425802, "compression_ratio": 1.7533333333333334, "no_speech_prob": 0.0, "words": [{"start": 1160.31, "end": 1160.63, "word": " for", "probability": 0.346923828125}, {"start": 1160.63, "end": 1161.11, "word": " probability", "probability": 0.521484375}, {"start": 1161.11, "end": 1161.33, "word": " of", "probability": 0.81640625}, {"start": 1161.33, "end": 1161.57, "word": " X", "probability": 0.5830078125}, {"start": 1161.57, "end": 1162.51, "word": " smaller", "probability": 0.4892578125}, {"start": 1162.51, "end": 1162.81, "word": " than", "probability": 0.9248046875}, {"start": 1162.81, "end": 1162.93, "word": " or", "probability": 0.95849609375}, {"start": 1162.93, "end": 1163.13, "word": " equal", "probability": 0.88134765625}, {"start": 1163.13, "end": 1163.25, "word": " to", "probability": 0.6953125}, {"start": 1163.25, "end": 1163.45, "word": " E.", "probability": 0.3076171875}, {"start": 1164.85, "end": 1165.37, "word": " This", "probability": 0.8515625}, {"start": 1165.37, "end": 1165.87, "word": " probability", "probability": 0.94189453125}, {"start": 1165.87, "end": 1166.15, "word": " is", "probability": 0.93994140625}, {"start": 1166.15, "end": 1166.33, "word": " the", "probability": 0.88330078125}, {"start": 1166.33, "end": 1166.53, "word": " same", "probability": 0.92431640625}, {"start": 1166.53, "end": 1166.99, "word": " as", "probability": 0.962890625}, {"start": 1166.99, "end": 1169.19, "word": " X", "probability": 0.849609375}, {"start": 1169.19, "end": 1169.85, "word": " smaller", "probability": 0.44189453125}, {"start": 1169.85, "end": 1170.17, "word": " than", "probability": 0.95068359375}, {"start": 1170.17, "end": 1170.37, "word": " E.", "probability": 0.66015625}, {"start": 1171.33, "end": 1171.79, "word": " Or", "probability": 0.90185546875}, {"start": 1171.79, "end": 1171.99, "word": " on", "probability": 0.65380859375}, {"start": 1171.99, "end": 1172.11, "word": " the", "probability": 0.90966796875}, {"start": 1172.11, "end": 1172.31, "word": " other", "probability": 0.892578125}, {"start": 1172.31, "end": 1172.55, "word": " hand,", "probability": 0.923828125}, {"start": 1172.63, "end": 1172.69, "word": " if", "probability": 0.95068359375}, {"start": 1172.69, "end": 1172.79, "word": " you", "probability": 0.74072265625}, {"start": 1172.79, "end": 1172.91, "word": " are", "probability": 0.68505859375}, {"start": 1172.91, "end": 1173.35, "word": " interested", "probability": 0.857421875}, {"start": 1173.35, "end": 1173.63, "word": " in", "probability": 0.853515625}, {"start": 1173.63, "end": 1173.73, "word": " the", "probability": 0.736328125}, {"start": 1173.73, "end": 1174.07, "word": " area", "probability": 0.908203125}, {"start": 1174.07, "end": 1175.11, "word": " above", "probability": 0.91796875}, {"start": 1175.11, "end": 1176.03, "word": " B", "probability": 0.8994140625}, {"start": 1176.03, "end": 1177.21, "word": " greater", "probability": 0.7294921875}, {"start": 1177.21, "end": 1177.47, "word": " than", "probability": 0.93310546875}, {"start": 1177.47, "end": 1177.59, "word": " or", "probability": 0.955078125}, {"start": 1177.59, "end": 1177.79, "word": " equal", "probability": 0.90966796875}, {"start": 1177.79, "end": 1177.93, "word": " to", "probability": 0.84228515625}, {"start": 1177.93, "end": 1178.15, "word": " B,", "probability": 0.9619140625}, {"start": 1178.61, "end": 1178.87, "word": " it's", "probability": 0.691650390625}, {"start": 1178.87, "end": 1179.01, "word": " the", "probability": 0.91845703125}, {"start": 1179.01, "end": 1179.23, "word": " same", "probability": 0.9150390625}, {"start": 1179.23, "end": 1179.73, "word": " as", "probability": 0.962890625}, {"start": 1179.73, "end": 1181.37, "word": " X", "probability": 0.96826171875}, {"start": 1181.37, "end": 1182.35, "word": " smaller", "probability": 0.822265625}, {"start": 1182.35, "end": 1182.63, "word": " than", "probability": 0.947265625}, {"start": 1182.63, "end": 1182.79, "word": " E.", "probability": 0.7939453125}, {"start": 1183.49, "end": 1184.01, "word": " So", "probability": 0.9375}, {"start": 1184.01, "end": 1184.19, "word": " don't", "probability": 0.884765625}, {"start": 1184.19, "end": 1184.35, "word": " worry", "probability": 0.9306640625}, {"start": 1184.35, "end": 1184.59, "word": " about", "probability": 0.90380859375}, {"start": 1184.59, "end": 1184.77, "word": " the", "probability": 0.9150390625}, {"start": 1184.77, "end": 1185.01, "word": " equal", "probability": 0.8125}, {"start": 1185.01, "end": 1185.43, "word": " sign.", "probability": 0.89697265625}], "temperature": 1.0}, {"id": 47, "seek": 120674, "start": 1187.02, "end": 1206.74, "text": " Or continuous distribution, exactly. But for discrete, it does matter. Now, since we are talking about normal distribution, and as we mentioned, normal distribution is symmetric around the mean, that means the area to the right equals the area to the left.", "tokens": [1610, 1421, 12549, 7316, 11, 2293, 13, 583, 337, 27706, 11, 309, 775, 1871, 13, 823, 11, 1670, 321, 366, 1417, 466, 2710, 7316, 11, 293, 382, 321, 2835, 11, 2710, 7316, 307, 32330, 926, 264, 914, 11, 300, 1355, 264, 1859, 281, 264, 558, 6915, 264, 1859, 281, 264, 1411, 13], "avg_logprob": -0.19840801549407672, "compression_ratio": 1.6688311688311688, "no_speech_prob": 0.0, "words": [{"start": 1187.02, "end": 1187.28, "word": " Or", "probability": 0.385009765625}, {"start": 1187.28, "end": 1187.74, "word": " continuous", "probability": 0.54931640625}, {"start": 1187.74, "end": 1188.24, "word": " distribution,", "probability": 0.830078125}, {"start": 1188.4, "end": 1188.66, "word": " exactly.", "probability": 0.8671875}, {"start": 1189.12, "end": 1189.28, "word": " But", "probability": 0.92333984375}, {"start": 1189.28, "end": 1189.48, "word": " for", "probability": 0.88916015625}, {"start": 1189.48, "end": 1189.88, "word": " discrete,", "probability": 0.8623046875}, {"start": 1190.18, "end": 1190.46, "word": " it", "probability": 0.94970703125}, {"start": 1190.46, "end": 1190.64, "word": " does", "probability": 0.74853515625}, {"start": 1190.64, "end": 1190.88, "word": " matter.", "probability": 0.8515625}, {"start": 1192.64, "end": 1193.2, "word": " Now,", "probability": 0.953125}, {"start": 1193.26, "end": 1193.54, "word": " since", "probability": 0.86279296875}, {"start": 1193.54, "end": 1193.82, "word": " we", "probability": 0.96240234375}, {"start": 1193.82, "end": 1194.28, "word": " are", "probability": 0.93701171875}, {"start": 1194.28, "end": 1194.7, "word": " talking", "probability": 0.85107421875}, {"start": 1194.7, "end": 1195.22, "word": " about", "probability": 0.90185546875}, {"start": 1195.22, "end": 1196.38, "word": " normal", "probability": 0.82568359375}, {"start": 1196.38, "end": 1196.94, "word": " distribution,", "probability": 0.84912109375}, {"start": 1197.7, "end": 1197.9, "word": " and", "probability": 0.92529296875}, {"start": 1197.9, "end": 1198.06, "word": " as", "probability": 0.8974609375}, {"start": 1198.06, "end": 1198.2, "word": " we", "probability": 0.9228515625}, {"start": 1198.2, "end": 1198.62, "word": " mentioned,", "probability": 0.8095703125}, {"start": 1199.18, "end": 1199.8, "word": " normal", "probability": 0.78369140625}, {"start": 1199.8, "end": 1200.24, "word": " distribution", "probability": 0.81298828125}, {"start": 1200.24, "end": 1200.46, "word": " is", "probability": 0.8603515625}, {"start": 1200.46, "end": 1200.8, "word": " symmetric", "probability": 0.83935546875}, {"start": 1200.8, "end": 1201.32, "word": " around", "probability": 0.9267578125}, {"start": 1201.32, "end": 1201.56, "word": " the", "probability": 0.8974609375}, {"start": 1201.56, "end": 1201.72, "word": " mean,", "probability": 0.9765625}, {"start": 1203.1, "end": 1203.38, "word": " that", "probability": 0.9287109375}, {"start": 1203.38, "end": 1203.62, "word": " means", "probability": 0.92822265625}, {"start": 1203.62, "end": 1203.76, "word": " the", "probability": 0.857421875}, {"start": 1203.76, "end": 1203.94, "word": " area", "probability": 0.8671875}, {"start": 1203.94, "end": 1204.1, "word": " to", "probability": 0.96484375}, {"start": 1204.1, "end": 1204.24, "word": " the", "probability": 0.91357421875}, {"start": 1204.24, "end": 1204.78, "word": " right", "probability": 0.923828125}, {"start": 1204.78, "end": 1205.9, "word": " equals", "probability": 0.85595703125}, {"start": 1205.9, "end": 1206.1, "word": " the", "probability": 0.884765625}, {"start": 1206.1, "end": 1206.24, "word": " area", "probability": 0.873046875}, {"start": 1206.24, "end": 1206.42, "word": " to", "probability": 0.9599609375}, {"start": 1206.42, "end": 1206.54, "word": " the", "probability": 0.9140625}, {"start": 1206.54, "end": 1206.74, "word": " left.", "probability": 0.95263671875}], "temperature": 1.0}, {"id": 48, "seek": 123550, "start": 1207.84, "end": 1235.5, "text": " Now the entire area underneath the normal curve equals one. I mean probability of X ranges from minus infinity up to infinity equals one. So probability of X greater than minus infinity up to infinity is one. The total area is one. So the area from minus infinity up to the mean mu is one-half.", "tokens": [823, 264, 2302, 1859, 7223, 264, 2710, 7605, 6915, 472, 13, 286, 914, 8482, 295, 1783, 22526, 490, 3175, 13202, 493, 281, 13202, 6915, 472, 13, 407, 8482, 295, 1783, 5044, 813, 3175, 13202, 493, 281, 13202, 307, 472, 13, 440, 3217, 1859, 307, 472, 13, 407, 264, 1859, 490, 3175, 13202, 493, 281, 264, 914, 2992, 307, 472, 12, 25461, 13], "avg_logprob": -0.16294642715227037, "compression_ratio": 1.9407894736842106, "no_speech_prob": 0.0, "words": [{"start": 1207.84, "end": 1208.2, "word": " Now", "probability": 0.73291015625}, {"start": 1208.2, "end": 1208.4, "word": " the", "probability": 0.60107421875}, {"start": 1208.4, "end": 1208.82, "word": " entire", "probability": 0.884765625}, {"start": 1208.82, "end": 1209.34, "word": " area", "probability": 0.90185546875}, {"start": 1209.34, "end": 1209.86, "word": " underneath", "probability": 0.8291015625}, {"start": 1209.86, "end": 1210.26, "word": " the", "probability": 0.89404296875}, {"start": 1210.26, "end": 1210.54, "word": " normal", "probability": 0.83544921875}, {"start": 1210.54, "end": 1210.9, "word": " curve", "probability": 0.9208984375}, {"start": 1210.9, "end": 1211.66, "word": " equals", "probability": 0.8056640625}, {"start": 1211.66, "end": 1211.92, "word": " one.", "probability": 0.486083984375}, {"start": 1212.54, "end": 1212.78, "word": " I", "probability": 0.93701171875}, {"start": 1212.78, "end": 1212.94, "word": " mean", "probability": 0.9697265625}, {"start": 1212.94, "end": 1213.34, "word": " probability", "probability": 0.427978515625}, {"start": 1213.34, "end": 1213.6, "word": " of", "probability": 0.95947265625}, {"start": 1213.6, "end": 1213.88, "word": " X", "probability": 0.73828125}, {"start": 1213.88, "end": 1214.36, "word": " ranges", "probability": 0.77001953125}, {"start": 1214.36, "end": 1214.64, "word": " from", "probability": 0.8984375}, {"start": 1214.64, "end": 1214.94, "word": " minus", "probability": 0.9677734375}, {"start": 1214.94, "end": 1215.44, "word": " infinity", "probability": 0.876953125}, {"start": 1215.44, "end": 1216.34, "word": " up", "probability": 0.89111328125}, {"start": 1216.34, "end": 1216.5, "word": " to", "probability": 0.97021484375}, {"start": 1216.5, "end": 1216.98, "word": " infinity", "probability": 0.86669921875}, {"start": 1216.98, "end": 1218.54, "word": " equals", "probability": 0.64501953125}, {"start": 1218.54, "end": 1218.8, "word": " one.", "probability": 0.89892578125}, {"start": 1219.32, "end": 1219.54, "word": " So", "probability": 0.90478515625}, {"start": 1219.54, "end": 1219.84, "word": " probability", "probability": 0.8232421875}, {"start": 1219.84, "end": 1220.12, "word": " of", "probability": 0.95751953125}, {"start": 1220.12, "end": 1220.42, "word": " X", "probability": 0.97509765625}, {"start": 1220.42, "end": 1221.5, "word": " greater", "probability": 0.78125}, {"start": 1221.5, "end": 1221.78, "word": " than", "probability": 0.935546875}, {"start": 1221.78, "end": 1222.1, "word": " minus", "probability": 0.9697265625}, {"start": 1222.1, "end": 1222.56, "word": " infinity", "probability": 0.89990234375}, {"start": 1222.56, "end": 1224.62, "word": " up", "probability": 0.76171875}, {"start": 1224.62, "end": 1224.84, "word": " to", "probability": 0.97216796875}, {"start": 1224.84, "end": 1225.28, "word": " infinity", "probability": 0.88134765625}, {"start": 1225.28, "end": 1225.62, "word": " is", "probability": 0.93505859375}, {"start": 1225.62, "end": 1225.84, "word": " one.", "probability": 0.9248046875}, {"start": 1226.76, "end": 1226.92, "word": " The", "probability": 0.81298828125}, {"start": 1226.92, "end": 1227.2, "word": " total", "probability": 0.88818359375}, {"start": 1227.2, "end": 1227.54, "word": " area", "probability": 0.888671875}, {"start": 1227.54, "end": 1227.76, "word": " is", "probability": 0.943359375}, {"start": 1227.76, "end": 1227.96, "word": " one.", "probability": 0.92431640625}, {"start": 1229.32, "end": 1229.7, "word": " So", "probability": 0.92041015625}, {"start": 1229.7, "end": 1229.9, "word": " the", "probability": 0.88427734375}, {"start": 1229.9, "end": 1230.2, "word": " area", "probability": 0.8876953125}, {"start": 1230.2, "end": 1230.52, "word": " from", "probability": 0.89306640625}, {"start": 1230.52, "end": 1230.92, "word": " minus", "probability": 0.98681640625}, {"start": 1230.92, "end": 1231.48, "word": " infinity", "probability": 0.88720703125}, {"start": 1231.48, "end": 1232.3, "word": " up", "probability": 0.95263671875}, {"start": 1232.3, "end": 1232.48, "word": " to", "probability": 0.9697265625}, {"start": 1232.48, "end": 1232.72, "word": " the", "probability": 0.92626953125}, {"start": 1232.72, "end": 1232.92, "word": " mean", "probability": 0.91162109375}, {"start": 1232.92, "end": 1233.18, "word": " mu", "probability": 0.50341796875}, {"start": 1233.18, "end": 1235.02, "word": " is", "probability": 0.88916015625}, {"start": 1235.02, "end": 1235.2, "word": " one", "probability": 0.8994140625}, {"start": 1235.2, "end": 1235.5, "word": "-half.", "probability": 0.714599609375}], "temperature": 1.0}, {"id": 49, "seek": 126542, "start": 1237.0, "end": 1265.42, "text": " The same as the area from mu up to infinity is also one-half. That means the probability of X greater than minus infinity up to mu equals the probability from mu up to infinity because of symmetry. I mean you cannot say that for any distribution. Just for symmetric distribution, the area below the mean equals one-half, which is the same as the area to the right of the mean. So the entire", "tokens": [440, 912, 382, 264, 1859, 490, 2992, 493, 281, 13202, 307, 611, 472, 12, 25461, 13, 663, 1355, 264, 8482, 295, 1783, 5044, 813, 3175, 13202, 493, 281, 2992, 6915, 264, 8482, 490, 2992, 493, 281, 13202, 570, 295, 25440, 13, 286, 914, 291, 2644, 584, 300, 337, 604, 7316, 13, 1449, 337, 32330, 7316, 11, 264, 1859, 2507, 264, 914, 6915, 472, 12, 25461, 11, 597, 307, 264, 912, 382, 264, 1859, 281, 264, 558, 295, 264, 914, 13, 407, 264, 2302], "avg_logprob": -0.2021949468624024, "compression_ratio": 1.8980582524271845, "no_speech_prob": 0.0, "words": [{"start": 1237.0, "end": 1237.24, "word": " The", "probability": 0.435302734375}, {"start": 1237.24, "end": 1237.58, "word": " same", "probability": 0.81884765625}, {"start": 1237.58, "end": 1237.94, "word": " as", "probability": 0.8671875}, {"start": 1237.94, "end": 1238.08, "word": " the", "probability": 0.76953125}, {"start": 1238.08, "end": 1238.38, "word": " area", "probability": 0.888671875}, {"start": 1238.38, "end": 1238.66, "word": " from", "probability": 0.87744140625}, {"start": 1238.66, "end": 1239.02, "word": " mu", "probability": 0.257568359375}, {"start": 1239.02, "end": 1239.64, "word": " up", "probability": 0.8505859375}, {"start": 1239.64, "end": 1239.98, "word": " to", "probability": 0.95361328125}, {"start": 1239.98, "end": 1240.74, "word": " infinity", "probability": 0.84912109375}, {"start": 1240.74, "end": 1241.04, "word": " is", "probability": 0.79638671875}, {"start": 1241.04, "end": 1241.34, "word": " also", "probability": 0.8037109375}, {"start": 1241.34, "end": 1241.52, "word": " one", "probability": 0.53466796875}, {"start": 1241.52, "end": 1241.76, "word": "-half.", "probability": 0.6209716796875}, {"start": 1242.24, "end": 1242.6, "word": " That", "probability": 0.880859375}, {"start": 1242.6, "end": 1242.82, "word": " means", "probability": 0.92578125}, {"start": 1242.82, "end": 1242.94, "word": " the", "probability": 0.556640625}, {"start": 1242.94, "end": 1243.2, "word": " probability", "probability": 0.9287109375}, {"start": 1243.2, "end": 1243.4, "word": " of", "probability": 0.94580078125}, {"start": 1243.4, "end": 1243.6, "word": " X", "probability": 0.53271484375}, {"start": 1243.6, "end": 1244.04, "word": " greater", "probability": 0.8291015625}, {"start": 1244.04, "end": 1244.38, "word": " than", "probability": 0.939453125}, {"start": 1244.38, "end": 1244.76, "word": " minus", "probability": 0.953125}, {"start": 1244.76, "end": 1245.14, "word": " infinity", "probability": 0.82958984375}, {"start": 1245.14, "end": 1245.4, "word": " up", "probability": 0.8798828125}, {"start": 1245.4, "end": 1245.54, "word": " to", "probability": 0.97021484375}, {"start": 1245.54, "end": 1245.8, "word": " mu", "probability": 0.90966796875}, {"start": 1245.8, "end": 1247.08, "word": " equals", "probability": 0.65673828125}, {"start": 1247.08, "end": 1247.42, "word": " the", "probability": 0.8759765625}, {"start": 1247.42, "end": 1247.72, "word": " probability", "probability": 0.947265625}, {"start": 1247.72, "end": 1248.02, "word": " from", "probability": 0.80126953125}, {"start": 1248.02, "end": 1248.3, "word": " mu", "probability": 0.86474609375}, {"start": 1248.3, "end": 1248.54, "word": " up", "probability": 0.921875}, {"start": 1248.54, "end": 1248.74, "word": " to", "probability": 0.9697265625}, {"start": 1248.74, "end": 1249.2, "word": " infinity", "probability": 0.8779296875}, {"start": 1249.2, "end": 1250.02, "word": " because", "probability": 0.72900390625}, {"start": 1250.02, "end": 1250.2, "word": " of", "probability": 0.884765625}, {"start": 1250.2, "end": 1250.54, "word": " symmetry.", "probability": 0.7333984375}, {"start": 1251.68, "end": 1251.82, "word": " I", "probability": 0.80078125}, {"start": 1251.82, "end": 1251.96, "word": " mean", "probability": 0.9638671875}, {"start": 1251.96, "end": 1252.12, "word": " you", "probability": 0.58154296875}, {"start": 1252.12, "end": 1252.4, "word": " cannot", "probability": 0.83984375}, {"start": 1252.4, "end": 1253.54, "word": " say", "probability": 0.9130859375}, {"start": 1253.54, "end": 1253.84, "word": " that", "probability": 0.92578125}, {"start": 1253.84, "end": 1254.2, "word": " for", "probability": 0.94287109375}, {"start": 1254.2, "end": 1254.54, "word": " any", "probability": 0.8994140625}, {"start": 1254.54, "end": 1255.24, "word": " distribution.", "probability": 0.81884765625}, {"start": 1255.68, "end": 1255.88, "word": " Just", "probability": 0.8583984375}, {"start": 1255.88, "end": 1256.16, "word": " for", "probability": 0.9462890625}, {"start": 1256.16, "end": 1256.52, "word": " symmetric", "probability": 0.732421875}, {"start": 1256.52, "end": 1257.14, "word": " distribution,", "probability": 0.84423828125}, {"start": 1257.94, "end": 1258.1, "word": " the", "probability": 0.9091796875}, {"start": 1258.1, "end": 1258.34, "word": " area", "probability": 0.8798828125}, {"start": 1258.34, "end": 1258.6, "word": " below", "probability": 0.89892578125}, {"start": 1258.6, "end": 1258.84, "word": " the", "probability": 0.9267578125}, {"start": 1258.84, "end": 1259.0, "word": " mean", "probability": 0.9716796875}, {"start": 1259.0, "end": 1260.68, "word": " equals", "probability": 0.8486328125}, {"start": 1260.68, "end": 1261.16, "word": " one", "probability": 0.91796875}, {"start": 1261.16, "end": 1261.48, "word": "-half,", "probability": 0.925048828125}, {"start": 1261.92, "end": 1262.12, "word": " which", "probability": 0.95458984375}, {"start": 1262.12, "end": 1262.26, "word": " is", "probability": 0.94677734375}, {"start": 1262.26, "end": 1262.42, "word": " the", "probability": 0.91796875}, {"start": 1262.42, "end": 1262.62, "word": " same", "probability": 0.89990234375}, {"start": 1262.62, "end": 1262.88, "word": " as", "probability": 0.95263671875}, {"start": 1262.88, "end": 1263.0, "word": " the", "probability": 0.91064453125}, {"start": 1263.0, "end": 1263.32, "word": " area", "probability": 0.89013671875}, {"start": 1263.32, "end": 1263.78, "word": " to", "probability": 0.96044921875}, {"start": 1263.78, "end": 1263.94, "word": " the", "probability": 0.912109375}, {"start": 1263.94, "end": 1264.08, "word": " right", "probability": 0.921875}, {"start": 1264.08, "end": 1264.24, "word": " of", "probability": 0.9482421875}, {"start": 1264.24, "end": 1264.34, "word": " the", "probability": 0.91552734375}, {"start": 1264.34, "end": 1264.46, "word": " mean.", "probability": 0.9697265625}, {"start": 1264.7, "end": 1264.86, "word": " So", "probability": 0.943359375}, {"start": 1264.86, "end": 1265.0, "word": " the", "probability": 0.74755859375}, {"start": 1265.0, "end": 1265.42, "word": " entire", "probability": 0.890625}], "temperature": 1.0}, {"id": 50, "seek": 129619, "start": 1266.59, "end": 1296.19, "text": " Probability is one. And also you have to keep in mind that the probability always ranges between zero and one. So that means the probability couldn't be negative. It should be positive. It shouldn't be greater than one. So it's between zero and one. So always the probability lies between zero and one.", "tokens": [8736, 2310, 307, 472, 13, 400, 611, 291, 362, 281, 1066, 294, 1575, 300, 264, 8482, 1009, 22526, 1296, 4018, 293, 472, 13, 407, 300, 1355, 264, 8482, 2809, 380, 312, 3671, 13, 467, 820, 312, 3353, 13, 467, 4659, 380, 312, 5044, 813, 472, 13, 407, 309, 311, 1296, 4018, 293, 472, 13, 407, 1009, 264, 8482, 9134, 1296, 4018, 293, 472, 13], "avg_logprob": -0.1421875, "compression_ratio": 1.9803921568627452, "no_speech_prob": 0.0, "words": [{"start": 1266.59, "end": 1267.11, "word": " Probability", "probability": 0.802001953125}, {"start": 1267.11, "end": 1267.37, "word": " is", "probability": 0.89111328125}, {"start": 1267.37, "end": 1267.59, "word": " one.", "probability": 0.5009765625}, {"start": 1268.57, "end": 1268.89, "word": " And", "probability": 0.75341796875}, {"start": 1268.89, "end": 1269.15, "word": " also", "probability": 0.775390625}, {"start": 1269.15, "end": 1269.33, "word": " you", "probability": 0.4814453125}, {"start": 1269.33, "end": 1269.33, "word": " have", "probability": 0.92333984375}, {"start": 1269.33, "end": 1269.41, "word": " to", "probability": 0.96533203125}, {"start": 1269.41, "end": 1269.97, "word": " keep", "probability": 0.90966796875}, {"start": 1269.97, "end": 1270.13, "word": " in", "probability": 0.88134765625}, {"start": 1270.13, "end": 1270.39, "word": " mind", "probability": 0.89990234375}, {"start": 1270.39, "end": 1270.75, "word": " that", "probability": 0.89697265625}, {"start": 1270.75, "end": 1271.33, "word": " the", "probability": 0.60791015625}, {"start": 1271.33, "end": 1271.79, "word": " probability", "probability": 0.9189453125}, {"start": 1271.79, "end": 1274.25, "word": " always", "probability": 0.75048828125}, {"start": 1274.25, "end": 1274.73, "word": " ranges", "probability": 0.89453125}, {"start": 1274.73, "end": 1275.11, "word": " between", "probability": 0.87060546875}, {"start": 1275.11, "end": 1275.37, "word": " zero", "probability": 0.642578125}, {"start": 1275.37, "end": 1275.51, "word": " and", "probability": 0.947265625}, {"start": 1275.51, "end": 1275.77, "word": " one.", "probability": 0.92724609375}, {"start": 1276.85, "end": 1277.57, "word": " So", "probability": 0.93310546875}, {"start": 1277.57, "end": 1277.79, "word": " that", "probability": 0.873046875}, {"start": 1277.79, "end": 1278.15, "word": " means", "probability": 0.93701171875}, {"start": 1278.15, "end": 1278.65, "word": " the", "probability": 0.830078125}, {"start": 1278.65, "end": 1279.01, "word": " probability", "probability": 0.943359375}, {"start": 1279.01, "end": 1279.49, "word": " couldn't", "probability": 0.89501953125}, {"start": 1279.49, "end": 1279.69, "word": " be", "probability": 0.95068359375}, {"start": 1279.69, "end": 1280.03, "word": " negative.", "probability": 0.94970703125}, {"start": 1282.87, "end": 1283.27, "word": " It", "probability": 0.88232421875}, {"start": 1283.27, "end": 1283.43, "word": " should", "probability": 0.9443359375}, {"start": 1283.43, "end": 1283.57, "word": " be", "probability": 0.94873046875}, {"start": 1283.57, "end": 1283.81, "word": " positive.", "probability": 0.89501953125}, {"start": 1285.29, "end": 1285.57, "word": " It", "probability": 0.951171875}, {"start": 1285.57, "end": 1286.01, "word": " shouldn't", "probability": 0.96875}, {"start": 1286.01, "end": 1286.41, "word": " be", "probability": 0.9521484375}, {"start": 1286.41, "end": 1287.73, "word": " greater", "probability": 0.8818359375}, {"start": 1287.73, "end": 1288.01, "word": " than", "probability": 0.9443359375}, {"start": 1288.01, "end": 1288.21, "word": " one.", "probability": 0.9150390625}, {"start": 1288.73, "end": 1288.97, "word": " So", "probability": 0.939453125}, {"start": 1288.97, "end": 1289.17, "word": " it's", "probability": 0.932861328125}, {"start": 1289.17, "end": 1289.61, "word": " between", "probability": 0.8896484375}, {"start": 1289.61, "end": 1290.35, "word": " zero", "probability": 0.865234375}, {"start": 1290.35, "end": 1290.51, "word": " and", "probability": 0.943359375}, {"start": 1290.51, "end": 1290.71, "word": " one.", "probability": 0.927734375}, {"start": 1291.13, "end": 1291.35, "word": " So", "probability": 0.943359375}, {"start": 1291.35, "end": 1291.71, "word": " always", "probability": 0.86376953125}, {"start": 1291.71, "end": 1291.89, "word": " the", "probability": 0.83447265625}, {"start": 1291.89, "end": 1292.27, "word": " probability", "probability": 0.947265625}, {"start": 1292.27, "end": 1293.31, "word": " lies", "probability": 0.939453125}, {"start": 1293.31, "end": 1293.83, "word": " between", "probability": 0.8720703125}, {"start": 1293.83, "end": 1295.21, "word": " zero", "probability": 0.87548828125}, {"start": 1295.21, "end": 1295.97, "word": " and", "probability": 0.9423828125}, {"start": 1295.97, "end": 1296.19, "word": " one.", "probability": 0.92626953125}], "temperature": 1.0}, {"id": 51, "seek": 132356, "start": 1298.66, "end": 1323.56, "text": " The tables we have on page 570 and 571 give the area to the left side. For negative or positive z's. Now for example, suppose we are looking for probability of z less than 2.", "tokens": [440, 8020, 321, 362, 322, 3028, 1025, 5867, 293, 21423, 16, 976, 264, 1859, 281, 264, 1411, 1252, 13, 1171, 3671, 420, 3353, 710, 311, 13, 823, 337, 1365, 11, 7297, 321, 366, 1237, 337, 8482, 295, 710, 1570, 813, 568, 13], "avg_logprob": -0.22656249445538187, "compression_ratio": 1.286764705882353, "no_speech_prob": 0.0, "words": [{"start": 1298.66, "end": 1299.02, "word": " The", "probability": 0.7041015625}, {"start": 1299.02, "end": 1299.58, "word": " tables", "probability": 0.8046875}, {"start": 1299.58, "end": 1299.82, "word": " we", "probability": 0.90380859375}, {"start": 1299.82, "end": 1300.14, "word": " have", "probability": 0.93505859375}, {"start": 1300.14, "end": 1301.02, "word": " on", "probability": 0.8720703125}, {"start": 1301.02, "end": 1301.36, "word": " page", "probability": 0.78125}, {"start": 1301.36, "end": 1302.28, "word": " 570", "probability": 0.553466796875}, {"start": 1302.28, "end": 1302.46, "word": " and", "probability": 0.88671875}, {"start": 1302.46, "end": 1303.24, "word": " 571", "probability": 0.86669921875}, {"start": 1303.24, "end": 1304.18, "word": " give", "probability": 0.60986328125}, {"start": 1304.18, "end": 1304.32, "word": " the", "probability": 0.8984375}, {"start": 1304.32, "end": 1304.5, "word": " area", "probability": 0.7802734375}, {"start": 1304.5, "end": 1304.7, "word": " to", "probability": 0.953125}, {"start": 1304.7, "end": 1304.98, "word": " the", "probability": 0.91748046875}, {"start": 1304.98, "end": 1305.56, "word": " left", "probability": 0.943359375}, {"start": 1305.56, "end": 1306.04, "word": " side.", "probability": 0.8349609375}, {"start": 1309.42, "end": 1310.18, "word": " For", "probability": 0.81103515625}, {"start": 1310.18, "end": 1310.58, "word": " negative", "probability": 0.9111328125}, {"start": 1310.58, "end": 1310.86, "word": " or", "probability": 0.92724609375}, {"start": 1310.86, "end": 1311.4, "word": " positive", "probability": 0.93701171875}, {"start": 1311.4, "end": 1311.82, "word": " z's.", "probability": 0.68896484375}, {"start": 1313.82, "end": 1314.08, "word": " Now", "probability": 0.8779296875}, {"start": 1314.08, "end": 1314.3, "word": " for", "probability": 0.57568359375}, {"start": 1314.3, "end": 1314.66, "word": " example,", "probability": 0.97705078125}, {"start": 1314.94, "end": 1315.44, "word": " suppose", "probability": 0.859375}, {"start": 1315.44, "end": 1318.04, "word": " we", "probability": 0.75146484375}, {"start": 1318.04, "end": 1318.22, "word": " are", "probability": 0.93359375}, {"start": 1318.22, "end": 1318.66, "word": " looking", "probability": 0.90771484375}, {"start": 1318.66, "end": 1321.06, "word": " for", "probability": 0.9375}, {"start": 1321.06, "end": 1321.64, "word": " probability", "probability": 0.59912109375}, {"start": 1321.64, "end": 1322.02, "word": " of", "probability": 0.96337890625}, {"start": 1322.02, "end": 1322.3, "word": " z", "probability": 0.818359375}, {"start": 1322.3, "end": 1323.06, "word": " less", "probability": 0.802734375}, {"start": 1323.06, "end": 1323.34, "word": " than", "probability": 0.94384765625}, {"start": 1323.34, "end": 1323.56, "word": " 2.", "probability": 0.347900390625}], "temperature": 1.0}, {"id": 52, "seek": 135427, "start": 1324.77, "end": 1354.27, "text": " How can we find this probability by using the normal curve? Let's go back to this normal distribution. In the second page, we have positive z-scores. So we ask about the probability of z less than. So the second page,", "tokens": [1012, 393, 321, 915, 341, 8482, 538, 1228, 264, 2710, 7605, 30, 961, 311, 352, 646, 281, 341, 2710, 7316, 13, 682, 264, 1150, 3028, 11, 321, 362, 3353, 710, 12, 4417, 2706, 13, 407, 321, 1029, 466, 264, 8482, 295, 710, 1570, 813, 13, 407, 264, 1150, 3028, 11], "avg_logprob": -0.21675857258777992, "compression_ratio": 1.4630872483221478, "no_speech_prob": 0.0, "words": [{"start": 1324.77, "end": 1325.03, "word": " How", "probability": 0.467529296875}, {"start": 1325.03, "end": 1325.29, "word": " can", "probability": 0.91943359375}, {"start": 1325.29, "end": 1325.51, "word": " we", "probability": 0.9482421875}, {"start": 1325.51, "end": 1326.39, "word": " find", "probability": 0.865234375}, {"start": 1326.39, "end": 1326.77, "word": " this", "probability": 0.89599609375}, {"start": 1326.77, "end": 1327.15, "word": " probability", "probability": 0.91943359375}, {"start": 1327.15, "end": 1328.25, "word": " by", "probability": 0.74267578125}, {"start": 1328.25, "end": 1328.75, "word": " using", "probability": 0.9267578125}, {"start": 1328.75, "end": 1329.21, "word": " the", "probability": 0.88037109375}, {"start": 1329.21, "end": 1329.71, "word": " normal", "probability": 0.736328125}, {"start": 1329.71, "end": 1330.03, "word": " curve?", "probability": 0.87353515625}, {"start": 1330.47, "end": 1330.77, "word": " Let's", "probability": 0.920654296875}, {"start": 1330.77, "end": 1330.89, "word": " go", "probability": 0.96240234375}, {"start": 1330.89, "end": 1331.19, "word": " back", "probability": 0.87890625}, {"start": 1331.19, "end": 1331.53, "word": " to", "probability": 0.9619140625}, {"start": 1331.53, "end": 1331.83, "word": " this", "probability": 0.9267578125}, {"start": 1331.83, "end": 1332.21, "word": " normal", "probability": 0.8720703125}, {"start": 1332.21, "end": 1332.79, "word": " distribution.", "probability": 0.8671875}, {"start": 1334.13, "end": 1334.85, "word": " In", "probability": 0.72119140625}, {"start": 1334.85, "end": 1334.99, "word": " the", "probability": 0.91650390625}, {"start": 1334.99, "end": 1335.31, "word": " second", "probability": 0.89404296875}, {"start": 1335.31, "end": 1335.57, "word": " page,", "probability": 0.8935546875}, {"start": 1335.57, "end": 1335.79, "word": " we", "probability": 0.95361328125}, {"start": 1335.79, "end": 1336.01, "word": " have", "probability": 0.94873046875}, {"start": 1336.01, "end": 1336.41, "word": " positive", "probability": 0.9150390625}, {"start": 1336.41, "end": 1336.65, "word": " z", "probability": 0.8251953125}, {"start": 1336.65, "end": 1337.07, "word": "-scores.", "probability": 0.80712890625}, {"start": 1343.85, "end": 1344.57, "word": " So", "probability": 0.912109375}, {"start": 1344.57, "end": 1344.69, "word": " we", "probability": 0.2227783203125}, {"start": 1344.69, "end": 1344.89, "word": " ask", "probability": 0.923828125}, {"start": 1344.89, "end": 1345.21, "word": " about", "probability": 0.91015625}, {"start": 1345.21, "end": 1345.77, "word": " the", "probability": 0.6162109375}, {"start": 1345.77, "end": 1346.19, "word": " probability", "probability": 0.939453125}, {"start": 1346.19, "end": 1348.21, "word": " of", "probability": 0.94091796875}, {"start": 1348.21, "end": 1348.51, "word": " z", "probability": 0.904296875}, {"start": 1348.51, "end": 1349.93, "word": " less", "probability": 0.7744140625}, {"start": 1349.93, "end": 1350.15, "word": " than.", "probability": 0.95361328125}, {"start": 1352.67, "end": 1353.39, "word": " So", "probability": 0.88916015625}, {"start": 1353.39, "end": 1353.57, "word": " the", "probability": 0.59814453125}, {"start": 1353.57, "end": 1353.87, "word": " second", "probability": 0.90185546875}, {"start": 1353.87, "end": 1354.27, "word": " page,", "probability": 0.9130859375}], "temperature": 1.0}, {"id": 53, "seek": 138213, "start": 1355.99, "end": 1382.13, "text": " gives positive values of z. And the table gives the area below. And he asked about here, B of z is smaller than 2. Now 2, if you hear, up all the way down here, 2, 0, 0. So the answer is 9772. So this value, so the probability is 9772.", "tokens": [2709, 3353, 4190, 295, 710, 13, 400, 264, 3199, 2709, 264, 1859, 2507, 13, 400, 415, 2351, 466, 510, 11, 363, 295, 710, 307, 4356, 813, 568, 13, 823, 568, 11, 498, 291, 1568, 11, 493, 439, 264, 636, 760, 510, 11, 568, 11, 1958, 11, 1958, 13, 407, 264, 1867, 307, 1722, 17512, 17, 13, 407, 341, 2158, 11, 370, 264, 8482, 307, 1722, 17512, 17, 13], "avg_logprob": -0.27558876293292944, "compression_ratio": 1.475, "no_speech_prob": 0.0, "words": [{"start": 1355.99, "end": 1356.55, "word": " gives", "probability": 0.325927734375}, {"start": 1356.55, "end": 1357.11, "word": " positive", "probability": 0.89306640625}, {"start": 1357.11, "end": 1357.47, "word": " values", "probability": 0.8857421875}, {"start": 1357.47, "end": 1357.65, "word": " of", "probability": 0.927734375}, {"start": 1357.65, "end": 1357.79, "word": " z.", "probability": 0.71044921875}, {"start": 1360.13, "end": 1360.69, "word": " And", "probability": 0.9189453125}, {"start": 1360.69, "end": 1360.85, "word": " the", "probability": 0.88037109375}, {"start": 1360.85, "end": 1361.11, "word": " table", "probability": 0.830078125}, {"start": 1361.11, "end": 1361.39, "word": " gives", "probability": 0.91748046875}, {"start": 1361.39, "end": 1361.53, "word": " the", "probability": 0.900390625}, {"start": 1361.53, "end": 1361.77, "word": " area", "probability": 0.88671875}, {"start": 1361.77, "end": 1362.13, "word": " below.", "probability": 0.888671875}, {"start": 1363.49, "end": 1363.85, "word": " And", "probability": 0.91162109375}, {"start": 1363.85, "end": 1363.99, "word": " he", "probability": 0.84375}, {"start": 1363.99, "end": 1364.23, "word": " asked", "probability": 0.4853515625}, {"start": 1364.23, "end": 1364.59, "word": " about", "probability": 0.8837890625}, {"start": 1364.59, "end": 1364.99, "word": " here,", "probability": 0.69140625}, {"start": 1365.29, "end": 1365.51, "word": " B", "probability": 0.46923828125}, {"start": 1365.51, "end": 1365.67, "word": " of", "probability": 0.90283203125}, {"start": 1365.67, "end": 1365.83, "word": " z", "probability": 0.83544921875}, {"start": 1365.83, "end": 1366.07, "word": " is", "probability": 0.86083984375}, {"start": 1366.07, "end": 1366.37, "word": " smaller", "probability": 0.87744140625}, {"start": 1366.37, "end": 1366.65, "word": " than", "probability": 0.93896484375}, {"start": 1366.65, "end": 1366.89, "word": " 2.", "probability": 0.7275390625}, {"start": 1368.43, "end": 1368.85, "word": " Now", "probability": 0.923828125}, {"start": 1368.85, "end": 1369.21, "word": " 2,", "probability": 0.50927734375}, {"start": 1369.31, "end": 1369.43, "word": " if", "probability": 0.8544921875}, {"start": 1369.43, "end": 1369.55, "word": " you", "probability": 0.65478515625}, {"start": 1369.55, "end": 1369.89, "word": " hear,", "probability": 0.31298828125}, {"start": 1370.17, "end": 1370.51, "word": " up", "probability": 0.77978515625}, {"start": 1370.51, "end": 1370.81, "word": " all", "probability": 0.91748046875}, {"start": 1370.81, "end": 1370.95, "word": " the", "probability": 0.91650390625}, {"start": 1370.95, "end": 1371.11, "word": " way", "probability": 0.95263671875}, {"start": 1371.11, "end": 1371.41, "word": " down", "probability": 0.8408203125}, {"start": 1371.41, "end": 1371.65, "word": " here,", "probability": 0.82763671875}, {"start": 1371.77, "end": 1371.99, "word": " 2,", "probability": 0.78955078125}, {"start": 1372.33, "end": 1373.31, "word": " 0,", "probability": 0.83984375}, {"start": 1373.39, "end": 1373.67, "word": " 0.", "probability": 0.923828125}, {"start": 1374.31, "end": 1374.77, "word": " So", "probability": 0.951171875}, {"start": 1374.77, "end": 1374.91, "word": " the", "probability": 0.8583984375}, {"start": 1374.91, "end": 1375.17, "word": " answer", "probability": 0.95849609375}, {"start": 1375.17, "end": 1375.37, "word": " is", "probability": 0.94091796875}, {"start": 1375.37, "end": 1376.53, "word": " 9772.", "probability": 0.78564453125}, {"start": 1377.17, "end": 1377.37, "word": " So", "probability": 0.87255859375}, {"start": 1377.37, "end": 1377.61, "word": " this", "probability": 0.81787109375}, {"start": 1377.61, "end": 1377.99, "word": " value,", "probability": 0.9541015625}, {"start": 1379.09, "end": 1379.97, "word": " so", "probability": 0.68603515625}, {"start": 1379.97, "end": 1380.13, "word": " the", "probability": 0.9150390625}, {"start": 1380.13, "end": 1380.53, "word": " probability", "probability": 0.96337890625}, {"start": 1380.53, "end": 1380.95, "word": " is", "probability": 0.94091796875}, {"start": 1380.95, "end": 1382.13, "word": " 9772.", "probability": 0.8429361979166666}], "temperature": 1.0}, {"id": 54, "seek": 141037, "start": 1383.99, "end": 1410.37, "text": " Because it's 2. It's 2, 0, 0. But if you ask about what's the probability of Z less than 2.05? So this is 2. Now under 5, 9, 7, 9, 8. So the answer is 9, 7.", "tokens": [1436, 309, 311, 568, 13, 467, 311, 568, 11, 1958, 11, 1958, 13, 583, 498, 291, 1029, 466, 437, 311, 264, 8482, 295, 1176, 1570, 813, 568, 13, 13328, 30, 407, 341, 307, 568, 13, 823, 833, 1025, 11, 1722, 11, 1614, 11, 1722, 11, 1649, 13, 407, 264, 1867, 307, 1722, 11, 1614, 13], "avg_logprob": -0.29101562553218435, "compression_ratio": 1.2265625, "no_speech_prob": 0.0, "words": [{"start": 1383.99, "end": 1384.47, "word": " Because", "probability": 0.3837890625}, {"start": 1384.47, "end": 1384.95, "word": " it's", "probability": 0.897705078125}, {"start": 1384.95, "end": 1385.39, "word": " 2.", "probability": 0.4619140625}, {"start": 1389.51, "end": 1389.99, "word": " It's", "probability": 0.667236328125}, {"start": 1389.99, "end": 1390.19, "word": " 2,", "probability": 0.61474609375}, {"start": 1390.27, "end": 1390.49, "word": " 0,", "probability": 0.7578125}, {"start": 1390.55, "end": 1390.79, "word": " 0.", "probability": 0.98388671875}, {"start": 1391.75, "end": 1392.07, "word": " But", "probability": 0.92041015625}, {"start": 1392.07, "end": 1392.19, "word": " if", "probability": 0.83984375}, {"start": 1392.19, "end": 1392.23, "word": " you", "probability": 0.90869140625}, {"start": 1392.23, "end": 1392.45, "word": " ask", "probability": 0.9287109375}, {"start": 1392.45, "end": 1392.87, "word": " about", "probability": 0.90673828125}, {"start": 1392.87, "end": 1394.59, "word": " what's", "probability": 0.84326171875}, {"start": 1394.59, "end": 1394.65, "word": " the", "probability": 0.560546875}, {"start": 1394.65, "end": 1394.89, "word": " probability", "probability": 0.97802734375}, {"start": 1394.89, "end": 1395.21, "word": " of", "probability": 0.93896484375}, {"start": 1395.21, "end": 1395.45, "word": " Z", "probability": 0.64404296875}, {"start": 1395.45, "end": 1396.33, "word": " less", "probability": 0.86474609375}, {"start": 1396.33, "end": 1396.55, "word": " than", "probability": 0.94580078125}, {"start": 1396.55, "end": 1396.79, "word": " 2", "probability": 0.98046875}, {"start": 1396.79, "end": 1397.47, "word": ".05?", "probability": 0.965576171875}, {"start": 1399.47, "end": 1399.95, "word": " So", "probability": 0.2352294921875}, {"start": 1399.95, "end": 1400.19, "word": " this", "probability": 0.80712890625}, {"start": 1400.19, "end": 1400.39, "word": " is", "probability": 0.94677734375}, {"start": 1400.39, "end": 1400.59, "word": " 2.", "probability": 0.77783203125}, {"start": 1403.81, "end": 1404.29, "word": " Now", "probability": 0.236083984375}, {"start": 1404.29, "end": 1404.65, "word": " under", "probability": 0.5791015625}, {"start": 1404.65, "end": 1405.07, "word": " 5,", "probability": 0.87890625}, {"start": 1407.43, "end": 1407.69, "word": " 9,", "probability": 0.3798828125}, {"start": 1407.81, "end": 1408.09, "word": " 7,", "probability": 0.8076171875}, {"start": 1408.27, "end": 1408.71, "word": " 9,", "probability": 0.994140625}, {"start": 1408.83, "end": 1408.99, "word": " 8.", "probability": 0.9873046875}, {"start": 1409.15, "end": 1409.27, "word": " So", "probability": 0.91845703125}, {"start": 1409.27, "end": 1409.43, "word": " the", "probability": 0.89111328125}, {"start": 1409.43, "end": 1409.65, "word": " answer", "probability": 0.95361328125}, {"start": 1409.65, "end": 1409.87, "word": " is", "probability": 0.94580078125}, {"start": 1409.87, "end": 1410.09, "word": " 9,", "probability": 0.9423828125}, {"start": 1410.17, "end": 1410.37, "word": " 7.", "probability": 0.89697265625}], "temperature": 1.0}, {"id": 55, "seek": 143688, "start": 1414.36, "end": 1436.88, "text": " Because this is two, and we need five decimal places. So all the way up to 9798. So this value is 2.05. Now it's about, it's more than 1.5, exactly 1.5.", "tokens": [1436, 341, 307, 732, 11, 293, 321, 643, 1732, 26601, 3190, 13, 407, 439, 264, 636, 493, 281, 23399, 22516, 13, 407, 341, 2158, 307, 568, 13, 13328, 13, 823, 309, 311, 466, 11, 309, 311, 544, 813, 502, 13, 20, 11, 2293, 502, 13, 20, 13], "avg_logprob": -0.23681640811264515, "compression_ratio": 1.1953125, "no_speech_prob": 0.0, "words": [{"start": 1414.3600000000001, "end": 1415.16, "word": " Because", "probability": 0.2059326171875}, {"start": 1415.16, "end": 1415.96, "word": " this", "probability": 0.9267578125}, {"start": 1415.96, "end": 1416.1, "word": " is", "probability": 0.9453125}, {"start": 1416.1, "end": 1416.3, "word": " two,", "probability": 0.51708984375}, {"start": 1417.2, "end": 1417.68, "word": " and", "probability": 0.9296875}, {"start": 1417.68, "end": 1417.86, "word": " we", "probability": 0.955078125}, {"start": 1417.86, "end": 1418.14, "word": " need", "probability": 0.92041015625}, {"start": 1418.14, "end": 1418.56, "word": " five", "probability": 0.8896484375}, {"start": 1418.56, "end": 1418.9, "word": " decimal", "probability": 0.55029296875}, {"start": 1418.9, "end": 1419.32, "word": " places.", "probability": 0.96337890625}, {"start": 1420.74, "end": 1421.54, "word": " So", "probability": 0.919921875}, {"start": 1421.54, "end": 1421.8, "word": " all", "probability": 0.744140625}, {"start": 1421.8, "end": 1421.9, "word": " the", "probability": 0.91748046875}, {"start": 1421.9, "end": 1422.04, "word": " way", "probability": 0.95849609375}, {"start": 1422.04, "end": 1422.2, "word": " up", "probability": 0.9541015625}, {"start": 1422.2, "end": 1422.36, "word": " to", "probability": 0.96826171875}, {"start": 1422.36, "end": 1423.26, "word": " 9798.", "probability": 0.47412109375}, {"start": 1423.92, "end": 1424.18, "word": " So", "probability": 0.95068359375}, {"start": 1424.18, "end": 1424.42, "word": " this", "probability": 0.90771484375}, {"start": 1424.42, "end": 1424.82, "word": " value", "probability": 0.97607421875}, {"start": 1424.82, "end": 1427.24, "word": " is", "probability": 0.873046875}, {"start": 1427.24, "end": 1427.48, "word": " 2", "probability": 0.984375}, {"start": 1427.48, "end": 1428.96, "word": ".05.", "probability": 0.993408203125}, {"start": 1429.62, "end": 1429.92, "word": " Now", "probability": 0.5556640625}, {"start": 1429.92, "end": 1430.1, "word": " it's", "probability": 0.7288818359375}, {"start": 1430.1, "end": 1430.4, "word": " about,", "probability": 0.8828125}, {"start": 1431.98, "end": 1433.08, "word": " it's", "probability": 0.8642578125}, {"start": 1433.08, "end": 1433.32, "word": " more", "probability": 0.91162109375}, {"start": 1433.32, "end": 1433.6, "word": " than", "probability": 0.9482421875}, {"start": 1433.6, "end": 1433.82, "word": " 1", "probability": 0.9912109375}, {"start": 1433.82, "end": 1434.38, "word": ".5,", "probability": 0.991455078125}, {"start": 1435.6, "end": 1436.14, "word": " exactly", "probability": 0.90380859375}, {"start": 1436.14, "end": 1436.38, "word": " 1", "probability": 0.990234375}, {"start": 1436.38, "end": 1436.88, "word": ".5.", "probability": 0.9990234375}], "temperature": 1.0}, {"id": 56, "seek": 146320, "start": 1442.14, "end": 1463.2, "text": " 1.5. This is 1.5. 9332. 1.5. Exactly 1.5. So 9332. What's about probability less than 1.35?", "tokens": [502, 13, 20, 13, 639, 307, 502, 13, 20, 13, 1722, 10191, 17, 13, 502, 13, 20, 13, 7587, 502, 13, 20, 13, 407, 1722, 10191, 17, 13, 708, 311, 466, 8482, 1570, 813, 502, 13, 8794, 30], "avg_logprob": -0.24699519383601654, "compression_ratio": 1.15, "no_speech_prob": 0.0, "words": [{"start": 1442.1399999999999, "end": 1443.06, "word": " 1", "probability": 0.245849609375}, {"start": 1443.06, "end": 1443.68, "word": ".5.", "probability": 0.953125}, {"start": 1443.82, "end": 1444.08, "word": " This", "probability": 0.274169921875}, {"start": 1444.08, "end": 1444.2, "word": " is", "probability": 0.93408203125}, {"start": 1444.2, "end": 1444.38, "word": " 1", "probability": 0.978515625}, {"start": 1444.38, "end": 1444.88, "word": ".5.", "probability": 0.995849609375}, {"start": 1448.8, "end": 1449.72, "word": " 9332.", "probability": 0.90087890625}, {"start": 1452.44, "end": 1453.36, "word": " 1", "probability": 0.798828125}, {"start": 1453.36, "end": 1453.96, "word": ".5.", "probability": 0.996337890625}, {"start": 1454.12, "end": 1454.5, "word": " Exactly", "probability": 0.72314453125}, {"start": 1454.5, "end": 1454.76, "word": " 1", "probability": 0.9375}, {"start": 1454.76, "end": 1455.14, "word": ".5.", "probability": 0.99853515625}, {"start": 1455.24, "end": 1455.34, "word": " So", "probability": 0.7998046875}, {"start": 1455.34, "end": 1456.3, "word": " 9332.", "probability": 0.92626953125}, {"start": 1458.78, "end": 1459.62, "word": " What's", "probability": 0.820556640625}, {"start": 1459.62, "end": 1459.96, "word": " about", "probability": 0.91748046875}, {"start": 1459.96, "end": 1460.5, "word": " probability", "probability": 0.79638671875}, {"start": 1460.5, "end": 1462.0, "word": " less", "probability": 0.2958984375}, {"start": 1462.0, "end": 1462.2, "word": " than", "probability": 0.93798828125}, {"start": 1462.2, "end": 1462.48, "word": " 1", "probability": 0.99267578125}, {"start": 1462.48, "end": 1463.2, "word": ".35?", "probability": 0.994873046875}], "temperature": 1.0}, {"id": 57, "seek": 149189, "start": 1464.21, "end": 1491.89, "text": " 1.3 all the way to 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.115. 9.", "tokens": [502, 13, 18, 439, 264, 636, 281, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13, 16, 5211, 13, 1722, 13], "avg_logprob": -0.11145833651224772, "compression_ratio": 8.72972972972973, "no_speech_prob": 0.0, "words": [{"start": 1464.21, "end": 1464.69, "word": " 1", "probability": 0.1385498046875}, {"start": 1464.69, "end": 1465.37, "word": ".3", "probability": 0.724609375}, {"start": 1465.37, "end": 1467.99, "word": " all", "probability": 0.205322265625}, {"start": 1467.99, "end": 1468.23, "word": " the", "probability": 0.91259765625}, {"start": 1468.23, "end": 1468.49, "word": " way", "probability": 0.95751953125}, {"start": 1468.49, "end": 1469.17, "word": " to", "probability": 0.341064453125}, {"start": 1469.17, "end": 1470.43, "word": " 9", "probability": 0.74853515625}, {"start": 1470.43, "end": 1470.43, "word": ".115.", "probability": 0.7003580729166666}, {"start": 1470.43, "end": 1470.43, "word": " 9", "probability": 0.33349609375}, {"start": 1470.43, "end": 1470.83, "word": ".115.", "probability": 0.9537760416666666}, {"start": 1471.39, "end": 1471.73, "word": " 9", "probability": 0.4609375}, {"start": 1471.73, "end": 1472.33, "word": ".115.", "probability": 0.97802734375}, {"start": 1473.67, "end": 1473.67, "word": " 9", "probability": 0.489501953125}, {"start": 1473.67, "end": 1474.27, "word": ".115.", "probability": 0.9850260416666666}, {"start": 1474.53, "end": 1474.79, "word": " 9", "probability": 0.4990234375}, {"start": 1474.79, "end": 1475.25, "word": ".115.", "probability": 0.9879557291666666}, {"start": 1475.25, "end": 1475.25, "word": " 9", "probability": 0.339599609375}, {"start": 1475.25, "end": 1475.47, "word": ".115.", "probability": 0.9871419270833334}, {"start": 1475.65, "end": 1475.65, "word": " 9", "probability": 0.1962890625}, {"start": 1475.65, "end": 1475.65, "word": ".115.", "probability": 0.9845377604166666}, {"start": 1481.17, "end": 1481.87, "word": " 9", "probability": 0.254150390625}, {"start": 1481.87, "end": 1481.87, "word": ".115.", "probability": 0.9820963541666666}, {"start": 1481.87, "end": 1481.93, "word": " 9", "probability": 0.445556640625}, {"start": 1481.93, "end": 1481.93, "word": ".115.", "probability": 0.9812825520833334}, {"start": 1481.93, "end": 1481.95, "word": " 9", "probability": 0.57763671875}, {"start": 1481.95, "end": 1481.95, "word": ".115.", "probability": 0.9807942708333334}, {"start": 1482.05, "end": 1482.05, "word": " 9", "probability": 0.658203125}, {"start": 1482.05, "end": 1482.05, "word": ".115.", "probability": 0.9807942708333334}, {"start": 1482.05, "end": 1482.05, "word": " 9", "probability": 0.71435546875}, {"start": 1482.05, "end": 1482.05, "word": ".115.", "probability": 0.9807942708333334}, {"start": 1482.05, "end": 1482.05, "word": " 9", "probability": 0.7587890625}, {"start": 1482.05, "end": 1482.05, "word": ".115.", "probability": 0.98095703125}, {"start": 1482.05, "end": 1482.05, "word": " 9", "probability": 0.7861328125}, {"start": 1482.05, "end": 1482.05, "word": ".115.", "probability": 0.9814453125}, {"start": 1482.41, "end": 1482.43, "word": " 9", "probability": 0.8115234375}, {"start": 1482.43, "end": 1482.45, "word": ".115.", "probability": 0.98193359375}, {"start": 1482.45, "end": 1482.45, "word": " 9", "probability": 0.82568359375}, {"start": 1482.45, "end": 1482.45, "word": ".115.", "probability": 0.9820963541666666}, {"start": 1482.45, "end": 1482.45, "word": " 9", "probability": 0.8349609375}, {"start": 1482.45, "end": 1482.45, "word": ".115.", "probability": 0.9825846354166666}, {"start": 1482.45, "end": 1482.45, "word": " 9", "probability": 0.841796875}, {"start": 1482.45, "end": 1482.45, "word": ".115.", "probability": 0.9832356770833334}, {"start": 1482.45, "end": 1482.45, "word": " 9", "probability": 0.8466796875}, {"start": 1482.45, "end": 1482.45, "word": ".115.", "probability": 0.9833984375}, {"start": 1482.45, "end": 1482.45, "word": " 9", "probability": 0.85205078125}, {"start": 1482.45, "end": 1482.45, "word": ".115.", "probability": 0.9837239583333334}, {"start": 1482.45, "end": 1482.45, "word": " 9", "probability": 0.85595703125}, {"start": 1482.45, "end": 1482.45, "word": ".115.", "probability": 0.984375}, {"start": 1482.45, "end": 1482.45, "word": " 9", "probability": 0.8603515625}, {"start": 1482.45, "end": 1482.45, "word": ".115.", "probability": 0.9842122395833334}, {"start": 1482.45, "end": 1482.45, "word": " 9", "probability": 0.86328125}, {"start": 1482.45, "end": 1482.45, "word": ".115.", "probability": 0.98486328125}, {"start": 1482.45, "end": 1482.45, "word": " 9", "probability": 0.8681640625}, {"start": 1482.45, "end": 1482.45, "word": ".115.", "probability": 0.9850260416666666}, {"start": 1482.45, "end": 1482.45, "word": " 9", "probability": 0.8720703125}, {"start": 1482.45, "end": 1482.45, "word": ".115.", "probability": 0.98583984375}, {"start": 1482.45, "end": 1482.45, "word": " 9", "probability": 0.87890625}, {"start": 1482.45, "end": 1482.45, "word": ".115.", "probability": 0.98583984375}, {"start": 1482.45, "end": 1482.45, "word": " 9", "probability": 0.8837890625}, {"start": 1482.45, "end": 1482.45, "word": ".115.", "probability": 0.9861653645833334}, {"start": 1482.45, "end": 1482.45, "word": " 9", "probability": 0.8876953125}, {"start": 1482.45, "end": 1482.45, "word": ".115.", "probability": 0.986328125}, {"start": 1482.45, "end": 1482.45, "word": " 9", "probability": 0.89013671875}, {"start": 1482.45, "end": 1482.45, "word": ".115.", "probability": 0.9861653645833334}, {"start": 1482.45, "end": 1482.45, "word": " 9", "probability": 0.89501953125}, {"start": 1482.45, "end": 1482.45, "word": ".115.", "probability": 0.9861653645833334}, {"start": 1482.45, "end": 1482.45, "word": " 9", "probability": 0.90087890625}, {"start": 1482.45, "end": 1482.45, "word": ".115.", "probability": 0.986328125}, {"start": 1482.45, "end": 1482.45, "word": " 9", "probability": 0.90380859375}, {"start": 1482.45, "end": 1482.55, "word": ".115.", "probability": 0.9861653645833334}, {"start": 1482.57, "end": 1482.77, "word": " 9", "probability": 0.9072265625}, {"start": 1482.77, "end": 1483.31, "word": ".115.", "probability": 0.98583984375}, {"start": 1484.05, "end": 1484.05, "word": " 9", "probability": 0.9111328125}, {"start": 1484.05, "end": 1484.05, "word": ".115.", "probability": 0.98583984375}, {"start": 1484.05, "end": 1484.05, "word": " 9", "probability": 0.912109375}, {"start": 1484.05, "end": 1484.05, "word": ".115.", "probability": 0.9855143229166666}, {"start": 1484.05, "end": 1484.05, "word": " 9", "probability": 0.9150390625}, {"start": 1484.05, "end": 1484.05, "word": ".115.", "probability": 0.9847005208333334}, {"start": 1484.05, "end": 1484.05, "word": " 9", "probability": 0.91796875}, {"start": 1484.05, "end": 1484.05, "word": ".115.", "probability": 0.9845377604166666}, {"start": 1484.05, "end": 1484.05, "word": " 9", "probability": 0.919921875}, {"start": 1484.05, "end": 1484.31, "word": ".115.", "probability": 0.984375}, {"start": 1484.45, "end": 1484.99, "word": " 9", "probability": 0.921875}, {"start": 1484.99, "end": 1486.53, "word": ".115.", "probability": 0.9832356770833334}, {"start": 1486.55, "end": 1486.69, "word": " 9", "probability": 0.92333984375}, {"start": 1486.69, "end": 1487.27, "word": ".115.", "probability": 0.98193359375}, {"start": 1488.09, "end": 1488.33, "word": " 9", "probability": 0.92529296875}, {"start": 1488.33, "end": 1488.71, "word": ".115.", "probability": 0.9814453125}, {"start": 1488.73, "end": 1489.43, "word": " 9", "probability": 0.9267578125}, {"start": 1489.43, "end": 1489.59, "word": ".115.", "probability": 0.9811197916666666}, {"start": 1490.09, "end": 1490.53, "word": " 9", "probability": 0.927734375}, {"start": 1490.53, "end": 1491.45, "word": ".115.", "probability": 0.9812825520833334}, {"start": 1491.63, "end": 1491.89, "word": " 9.", "probability": 0.93017578125}], "temperature": 1.0}, {"id": 58, "seek": 152172, "start": 1492.98, "end": 1521.72, "text": " But here we are looking for the area to the right. One minus one. Now this area equals one minus because since suppose this is the 1.35 and we are interested in the area to the right or above 1.35.", "tokens": [583, 510, 321, 366, 1237, 337, 264, 1859, 281, 264, 558, 13, 1485, 3175, 472, 13, 823, 341, 1859, 6915, 472, 3175, 570, 1670, 7297, 341, 307, 264, 502, 13, 8794, 293, 321, 366, 3102, 294, 264, 1859, 281, 264, 558, 420, 3673, 502, 13, 8794, 13], "avg_logprob": -0.2898762921492259, "compression_ratio": 1.5348837209302326, "no_speech_prob": 0.0, "words": [{"start": 1492.98, "end": 1493.3, "word": " But", "probability": 0.7373046875}, {"start": 1493.3, "end": 1493.56, "word": " here", "probability": 0.8193359375}, {"start": 1493.56, "end": 1493.74, "word": " we", "probability": 0.60693359375}, {"start": 1493.74, "end": 1493.86, "word": " are", "probability": 0.89208984375}, {"start": 1493.86, "end": 1494.12, "word": " looking", "probability": 0.90869140625}, {"start": 1494.12, "end": 1494.42, "word": " for", "probability": 0.9345703125}, {"start": 1494.42, "end": 1494.6, "word": " the", "probability": 0.84228515625}, {"start": 1494.6, "end": 1494.8, "word": " area", "probability": 0.85986328125}, {"start": 1494.8, "end": 1494.98, "word": " to", "probability": 0.95458984375}, {"start": 1494.98, "end": 1495.14, "word": " the", "probability": 0.92041015625}, {"start": 1495.14, "end": 1495.4, "word": " right.", "probability": 0.9208984375}, {"start": 1495.6, "end": 1495.72, "word": " One", "probability": 0.22607421875}, {"start": 1495.72, "end": 1495.98, "word": " minus", "probability": 0.97216796875}, {"start": 1495.98, "end": 1496.18, "word": " one.", "probability": 0.3447265625}, {"start": 1497.06, "end": 1497.24, "word": " Now", "probability": 0.59716796875}, {"start": 1497.24, "end": 1497.66, "word": " this", "probability": 0.51611328125}, {"start": 1497.66, "end": 1498.24, "word": " area", "probability": 0.875}, {"start": 1498.24, "end": 1501.28, "word": " equals", "probability": 0.80810546875}, {"start": 1501.28, "end": 1501.58, "word": " one", "probability": 0.85400390625}, {"start": 1501.58, "end": 1502.1, "word": " minus", "probability": 0.98486328125}, {"start": 1502.1, "end": 1505.66, "word": " because", "probability": 0.07733154296875}, {"start": 1505.66, "end": 1508.02, "word": " since", "probability": 0.74951171875}, {"start": 1508.02, "end": 1511.42, "word": " suppose", "probability": 0.50537109375}, {"start": 1511.42, "end": 1511.8, "word": " this", "probability": 0.884765625}, {"start": 1511.8, "end": 1511.96, "word": " is", "probability": 0.92822265625}, {"start": 1511.96, "end": 1512.18, "word": " the", "probability": 0.52490234375}, {"start": 1512.18, "end": 1513.18, "word": " 1", "probability": 0.6591796875}, {"start": 1513.18, "end": 1513.72, "word": ".35", "probability": 0.984375}, {"start": 1513.72, "end": 1515.48, "word": " and", "probability": 0.6533203125}, {"start": 1515.48, "end": 1515.64, "word": " we", "probability": 0.93115234375}, {"start": 1515.64, "end": 1515.78, "word": " are", "probability": 0.90283203125}, {"start": 1515.78, "end": 1516.22, "word": " interested", "probability": 0.85791015625}, {"start": 1516.22, "end": 1518.46, "word": " in", "probability": 0.73193359375}, {"start": 1518.46, "end": 1518.52, "word": " the", "probability": 0.794921875}, {"start": 1518.52, "end": 1518.76, "word": " area", "probability": 0.87451171875}, {"start": 1518.76, "end": 1518.94, "word": " to", "probability": 0.94677734375}, {"start": 1518.94, "end": 1519.12, "word": " the", "probability": 0.9130859375}, {"start": 1519.12, "end": 1519.44, "word": " right", "probability": 0.91552734375}, {"start": 1519.44, "end": 1520.7, "word": " or", "probability": 0.58349609375}, {"start": 1520.7, "end": 1520.98, "word": " above", "probability": 0.95068359375}, {"start": 1520.98, "end": 1521.24, "word": " 1", "probability": 0.93701171875}, {"start": 1521.24, "end": 1521.72, "word": ".35.", "probability": 0.99658203125}], "temperature": 1.0}, {"id": 59, "seek": 154675, "start": 1523.09, "end": 1546.75, "text": " The table gives the area below. So the area above equals the total area underneath the curve is 1. So 1 minus this value, so equals 0.0885, and so on. So this is the way how can we compute the probabilities underneath the normal curve.", "tokens": [440, 3199, 2709, 264, 1859, 2507, 13, 407, 264, 1859, 3673, 6915, 264, 3217, 1859, 7223, 264, 7605, 307, 502, 13, 407, 502, 3175, 341, 2158, 11, 370, 6915, 1958, 13, 16133, 19287, 11, 293, 370, 322, 13, 407, 341, 307, 264, 636, 577, 393, 321, 14722, 264, 33783, 7223, 264, 2710, 7605, 13], "avg_logprob": -0.1784090979532762, "compression_ratio": 1.542483660130719, "no_speech_prob": 0.0, "words": [{"start": 1523.09, "end": 1523.37, "word": " The", "probability": 0.54345703125}, {"start": 1523.37, "end": 1523.65, "word": " table", "probability": 0.80712890625}, {"start": 1523.65, "end": 1523.93, "word": " gives", "probability": 0.873046875}, {"start": 1523.93, "end": 1524.03, "word": " the", "probability": 0.72802734375}, {"start": 1524.03, "end": 1524.21, "word": " area", "probability": 0.85791015625}, {"start": 1524.21, "end": 1524.49, "word": " below.", "probability": 0.927734375}, {"start": 1525.31, "end": 1525.55, "word": " So", "probability": 0.90771484375}, {"start": 1525.55, "end": 1525.79, "word": " the", "probability": 0.75341796875}, {"start": 1525.79, "end": 1526.15, "word": " area", "probability": 0.8896484375}, {"start": 1526.15, "end": 1526.63, "word": " above", "probability": 0.95556640625}, {"start": 1526.63, "end": 1527.25, "word": " equals", "probability": 0.94189453125}, {"start": 1527.25, "end": 1527.89, "word": " the", "probability": 0.71533203125}, {"start": 1527.89, "end": 1528.23, "word": " total", "probability": 0.86962890625}, {"start": 1528.23, "end": 1528.65, "word": " area", "probability": 0.91943359375}, {"start": 1528.65, "end": 1529.29, "word": " underneath", "probability": 0.91064453125}, {"start": 1529.29, "end": 1529.53, "word": " the", "probability": 0.89404296875}, {"start": 1529.53, "end": 1529.73, "word": " curve", "probability": 0.89892578125}, {"start": 1529.73, "end": 1529.87, "word": " is", "probability": 0.849609375}, {"start": 1529.87, "end": 1530.09, "word": " 1.", "probability": 0.50634765625}, {"start": 1530.49, "end": 1531.07, "word": " So", "probability": 0.84326171875}, {"start": 1531.07, "end": 1531.33, "word": " 1", "probability": 0.90869140625}, {"start": 1531.33, "end": 1531.67, "word": " minus", "probability": 0.97900390625}, {"start": 1531.67, "end": 1531.97, "word": " this", "probability": 0.951171875}, {"start": 1531.97, "end": 1532.39, "word": " value,", "probability": 0.9794921875}, {"start": 1532.95, "end": 1533.31, "word": " so", "probability": 0.8212890625}, {"start": 1533.31, "end": 1533.81, "word": " equals", "probability": 0.73193359375}, {"start": 1533.81, "end": 1534.95, "word": " 0", "probability": 0.6943359375}, {"start": 1534.95, "end": 1539.05, "word": ".0885,", "probability": 0.833984375}, {"start": 1539.35, "end": 1539.59, "word": " and", "probability": 0.92333984375}, {"start": 1539.59, "end": 1539.75, "word": " so", "probability": 0.95166015625}, {"start": 1539.75, "end": 1539.93, "word": " on.", "probability": 0.91748046875}, {"start": 1540.35, "end": 1540.65, "word": " So", "probability": 0.9462890625}, {"start": 1540.65, "end": 1540.87, "word": " this", "probability": 0.9404296875}, {"start": 1540.87, "end": 1540.99, "word": " is", "probability": 0.94140625}, {"start": 1540.99, "end": 1541.11, "word": " the", "probability": 0.8994140625}, {"start": 1541.11, "end": 1541.21, "word": " way", "probability": 0.962890625}, {"start": 1541.21, "end": 1541.35, "word": " how", "probability": 0.79052734375}, {"start": 1541.35, "end": 1541.53, "word": " can", "probability": 0.8505859375}, {"start": 1541.53, "end": 1541.67, "word": " we", "probability": 0.94677734375}, {"start": 1541.67, "end": 1542.25, "word": " compute", "probability": 0.90087890625}, {"start": 1542.25, "end": 1543.53, "word": " the", "probability": 0.91162109375}, {"start": 1543.53, "end": 1544.15, "word": " probabilities", "probability": 0.9150390625}, {"start": 1544.15, "end": 1545.13, "word": " underneath", "probability": 0.9599609375}, {"start": 1545.13, "end": 1546.15, "word": " the", "probability": 0.9140625}, {"start": 1546.15, "end": 1546.43, "word": " normal", "probability": 0.89599609375}, {"start": 1546.43, "end": 1546.75, "word": " curve.", "probability": 0.89599609375}], "temperature": 1.0}, {"id": 60, "seek": 157429, "start": 1547.49, "end": 1574.29, "text": " if it's probability of z is smaller than then just use the table directly otherwise if we are talking about z greater than subtract from one to get the result that's how can we compute the probability of z less than or equal now let's see", "tokens": [498, 309, 311, 8482, 295, 710, 307, 4356, 813, 550, 445, 764, 264, 3199, 3838, 5911, 498, 321, 366, 1417, 466, 710, 5044, 813, 16390, 490, 472, 281, 483, 264, 1874, 300, 311, 577, 393, 321, 14722, 264, 8482, 295, 710, 1570, 813, 420, 2681, 586, 718, 311, 536], "avg_logprob": -0.158749997317791, "compression_ratio": 1.5933333333333333, "no_speech_prob": 0.0, "words": [{"start": 1547.49, "end": 1547.85, "word": " if", "probability": 0.357666015625}, {"start": 1547.85, "end": 1548.55, "word": " it's", "probability": 0.793701171875}, {"start": 1548.55, "end": 1548.99, "word": " probability", "probability": 0.9208984375}, {"start": 1548.99, "end": 1549.17, "word": " of", "probability": 0.9072265625}, {"start": 1549.17, "end": 1549.27, "word": " z", "probability": 0.58935546875}, {"start": 1549.27, "end": 1549.39, "word": " is", "probability": 0.80419921875}, {"start": 1549.39, "end": 1549.67, "word": " smaller", "probability": 0.85400390625}, {"start": 1549.67, "end": 1549.93, "word": " than", "probability": 0.83740234375}, {"start": 1549.93, "end": 1550.31, "word": " then", "probability": 0.50634765625}, {"start": 1550.31, "end": 1551.09, "word": " just", "probability": 0.7841796875}, {"start": 1551.09, "end": 1551.41, "word": " use", "probability": 0.87548828125}, {"start": 1551.41, "end": 1551.61, "word": " the", "probability": 0.9208984375}, {"start": 1551.61, "end": 1551.91, "word": " table", "probability": 0.89404296875}, {"start": 1551.91, "end": 1552.51, "word": " directly", "probability": 0.8828125}, {"start": 1552.51, "end": 1553.43, "word": " otherwise", "probability": 0.63134765625}, {"start": 1553.43, "end": 1554.47, "word": " if", "probability": 0.91015625}, {"start": 1554.47, "end": 1554.67, "word": " we", "probability": 0.96484375}, {"start": 1554.67, "end": 1554.97, "word": " are", "probability": 0.94384765625}, {"start": 1554.97, "end": 1555.91, "word": " talking", "probability": 0.82470703125}, {"start": 1555.91, "end": 1556.37, "word": " about", "probability": 0.90771484375}, {"start": 1556.37, "end": 1556.71, "word": " z", "probability": 0.94970703125}, {"start": 1556.71, "end": 1557.07, "word": " greater", "probability": 0.89794921875}, {"start": 1557.07, "end": 1557.49, "word": " than", "probability": 0.93212890625}, {"start": 1557.49, "end": 1558.91, "word": " subtract", "probability": 0.80712890625}, {"start": 1558.91, "end": 1559.33, "word": " from", "probability": 0.79248046875}, {"start": 1559.33, "end": 1559.57, "word": " one", "probability": 0.7470703125}, {"start": 1559.57, "end": 1559.75, "word": " to", "probability": 0.9697265625}, {"start": 1559.75, "end": 1559.99, "word": " get", "probability": 0.94775390625}, {"start": 1559.99, "end": 1560.39, "word": " the", "probability": 0.91552734375}, {"start": 1560.39, "end": 1560.97, "word": " result", "probability": 0.9580078125}, {"start": 1560.97, "end": 1563.31, "word": " that's", "probability": 0.864013671875}, {"start": 1563.31, "end": 1563.41, "word": " how", "probability": 0.94189453125}, {"start": 1563.41, "end": 1563.59, "word": " can", "probability": 0.8251953125}, {"start": 1563.59, "end": 1563.75, "word": " we", "probability": 0.96044921875}, {"start": 1563.75, "end": 1564.21, "word": " compute", "probability": 0.89453125}, {"start": 1564.21, "end": 1564.53, "word": " the", "probability": 0.91943359375}, {"start": 1564.53, "end": 1564.87, "word": " probability", "probability": 0.94677734375}, {"start": 1564.87, "end": 1565.29, "word": " of", "probability": 0.96728515625}, {"start": 1565.29, "end": 1565.61, "word": " z", "probability": 0.95556640625}, {"start": 1565.61, "end": 1568.55, "word": " less", "probability": 0.9169921875}, {"start": 1568.55, "end": 1568.81, "word": " than", "probability": 0.94677734375}, {"start": 1568.81, "end": 1569.05, "word": " or", "probability": 0.96630859375}, {"start": 1569.05, "end": 1569.35, "word": " equal", "probability": 0.87255859375}, {"start": 1569.35, "end": 1573.75, "word": " now", "probability": 0.71826171875}, {"start": 1573.75, "end": 1574.07, "word": " let's", "probability": 0.966796875}, {"start": 1574.07, "end": 1574.29, "word": " see", "probability": 0.92236328125}], "temperature": 1.0}, {"id": 61, "seek": 160469, "start": 1575.45, "end": 1604.69, "text": " if we have x and x that has normal distribution with mean mu and standard deviation of sigma and let's see how can we compute the value of the probability mainly there are three steps to find the probability of x greater than a and less than b when x is distributed normally first step", "tokens": [498, 321, 362, 2031, 293, 2031, 300, 575, 2710, 7316, 365, 914, 2992, 293, 3832, 25163, 295, 12771, 293, 718, 311, 536, 577, 393, 321, 14722, 264, 2158, 295, 264, 8482, 8704, 456, 366, 1045, 4439, 281, 915, 264, 8482, 295, 2031, 5044, 813, 257, 293, 1570, 813, 272, 562, 2031, 307, 12631, 5646, 700, 1823], "avg_logprob": -0.14350328738229318, "compression_ratio": 1.672514619883041, "no_speech_prob": 0.0, "words": [{"start": 1575.45, "end": 1575.87, "word": " if", "probability": 0.357666015625}, {"start": 1575.87, "end": 1576.13, "word": " we", "probability": 0.9501953125}, {"start": 1576.13, "end": 1576.39, "word": " have", "probability": 0.94775390625}, {"start": 1576.39, "end": 1576.81, "word": " x", "probability": 0.55615234375}, {"start": 1576.81, "end": 1577.15, "word": " and", "probability": 0.5751953125}, {"start": 1577.15, "end": 1577.37, "word": " x", "probability": 0.98583984375}, {"start": 1577.37, "end": 1577.67, "word": " that", "probability": 0.89306640625}, {"start": 1577.67, "end": 1578.11, "word": " has", "probability": 0.94287109375}, {"start": 1578.11, "end": 1578.89, "word": " normal", "probability": 0.81591796875}, {"start": 1578.89, "end": 1579.51, "word": " distribution", "probability": 0.85498046875}, {"start": 1579.51, "end": 1580.37, "word": " with", "probability": 0.884765625}, {"start": 1580.37, "end": 1580.59, "word": " mean", "probability": 0.7275390625}, {"start": 1580.59, "end": 1580.87, "word": " mu", "probability": 0.58642578125}, {"start": 1580.87, "end": 1581.27, "word": " and", "probability": 0.9306640625}, {"start": 1581.27, "end": 1581.65, "word": " standard", "probability": 0.7998046875}, {"start": 1581.65, "end": 1582.07, "word": " deviation", "probability": 0.90771484375}, {"start": 1582.07, "end": 1582.53, "word": " of", "probability": 0.96630859375}, {"start": 1582.53, "end": 1583.63, "word": " sigma", "probability": 0.8388671875}, {"start": 1583.63, "end": 1584.17, "word": " and", "probability": 0.517578125}, {"start": 1584.17, "end": 1584.37, "word": " let's", "probability": 0.9619140625}, {"start": 1584.37, "end": 1584.47, "word": " see", "probability": 0.91064453125}, {"start": 1584.47, "end": 1584.57, "word": " how", "probability": 0.9228515625}, {"start": 1584.57, "end": 1584.79, "word": " can", "probability": 0.8681640625}, {"start": 1584.79, "end": 1584.93, "word": " we", "probability": 0.95458984375}, {"start": 1584.93, "end": 1585.43, "word": " compute", "probability": 0.88916015625}, {"start": 1585.43, "end": 1586.25, "word": " the", "probability": 0.912109375}, {"start": 1586.25, "end": 1586.69, "word": " value", "probability": 0.97314453125}, {"start": 1586.69, "end": 1587.89, "word": " of", "probability": 0.96533203125}, {"start": 1587.89, "end": 1588.19, "word": " the", "probability": 0.89892578125}, {"start": 1588.19, "end": 1588.71, "word": " probability", "probability": 0.9501953125}, {"start": 1588.71, "end": 1593.79, "word": " mainly", "probability": 0.779296875}, {"start": 1593.79, "end": 1594.21, "word": " there", "probability": 0.83642578125}, {"start": 1594.21, "end": 1594.39, "word": " are", "probability": 0.94384765625}, {"start": 1594.39, "end": 1594.69, "word": " three", "probability": 0.92578125}, {"start": 1594.69, "end": 1595.13, "word": " steps", "probability": 0.8720703125}, {"start": 1595.13, "end": 1596.89, "word": " to", "probability": 0.94287109375}, {"start": 1596.89, "end": 1597.17, "word": " find", "probability": 0.89208984375}, {"start": 1597.17, "end": 1597.29, "word": " the", "probability": 0.8759765625}, {"start": 1597.29, "end": 1597.55, "word": " probability", "probability": 0.9541015625}, {"start": 1597.55, "end": 1597.83, "word": " of", "probability": 0.96630859375}, {"start": 1597.83, "end": 1598.19, "word": " x", "probability": 0.97314453125}, {"start": 1598.19, "end": 1598.89, "word": " greater", "probability": 0.90185546875}, {"start": 1598.89, "end": 1599.17, "word": " than", "probability": 0.9541015625}, {"start": 1599.17, "end": 1599.33, "word": " a", "probability": 0.91650390625}, {"start": 1599.33, "end": 1599.51, "word": " and", "probability": 0.93798828125}, {"start": 1599.51, "end": 1599.71, "word": " less", "probability": 0.93603515625}, {"start": 1599.71, "end": 1599.89, "word": " than", "probability": 0.94384765625}, {"start": 1599.89, "end": 1600.11, "word": " b", "probability": 0.98388671875}, {"start": 1600.11, "end": 1601.77, "word": " when", "probability": 0.91943359375}, {"start": 1601.77, "end": 1602.09, "word": " x", "probability": 0.9951171875}, {"start": 1602.09, "end": 1602.49, "word": " is", "probability": 0.95361328125}, {"start": 1602.49, "end": 1603.17, "word": " distributed", "probability": 0.91943359375}, {"start": 1603.17, "end": 1603.75, "word": " normally", "probability": 0.91796875}, {"start": 1603.75, "end": 1604.31, "word": " first", "probability": 0.7138671875}, {"start": 1604.31, "end": 1604.69, "word": " step", "probability": 0.92578125}], "temperature": 1.0}, {"id": 62, "seek": 163210, "start": 1605.64, "end": 1632.1, "text": " Draw normal curve for the problem in terms of x. So draw the normal curve first. Second, translate x values to z values by using the formula we have. z x minus mu divided by sigma. Then use the standardized normal table on page 570 and 571. For example,", "tokens": [20386, 2710, 7605, 337, 264, 1154, 294, 2115, 295, 2031, 13, 407, 2642, 264, 2710, 7605, 700, 13, 5736, 11, 13799, 2031, 4190, 281, 710, 4190, 538, 1228, 264, 8513, 321, 362, 13, 710, 2031, 3175, 2992, 6666, 538, 12771, 13, 1396, 764, 264, 31677, 2710, 3199, 322, 3028, 1025, 5867, 293, 21423, 16, 13, 1171, 1365, 11], "avg_logprob": -0.21729343775975501, "compression_ratio": 1.4350282485875707, "no_speech_prob": 0.0, "words": [{"start": 1605.64, "end": 1606.1, "word": " Draw", "probability": 0.5107421875}, {"start": 1606.1, "end": 1606.62, "word": " normal", "probability": 0.420166015625}, {"start": 1606.62, "end": 1607.0, "word": " curve", "probability": 0.9296875}, {"start": 1607.0, "end": 1609.94, "word": " for", "probability": 0.5029296875}, {"start": 1609.94, "end": 1610.1, "word": " the", "probability": 0.849609375}, {"start": 1610.1, "end": 1610.48, "word": " problem", "probability": 0.86767578125}, {"start": 1610.48, "end": 1611.06, "word": " in", "probability": 0.91796875}, {"start": 1611.06, "end": 1611.34, "word": " terms", "probability": 0.896484375}, {"start": 1611.34, "end": 1611.48, "word": " of", "probability": 0.96875}, {"start": 1611.48, "end": 1611.82, "word": " x.", "probability": 0.5185546875}, {"start": 1613.98, "end": 1614.22, "word": " So", "probability": 0.73828125}, {"start": 1614.22, "end": 1614.42, "word": " draw", "probability": 0.5537109375}, {"start": 1614.42, "end": 1614.64, "word": " the", "probability": 0.85400390625}, {"start": 1614.64, "end": 1614.88, "word": " normal", "probability": 0.87841796875}, {"start": 1614.88, "end": 1615.22, "word": " curve", "probability": 0.900390625}, {"start": 1615.22, "end": 1615.54, "word": " first.", "probability": 0.7763671875}, {"start": 1615.94, "end": 1616.34, "word": " Second,", "probability": 0.70947265625}, {"start": 1616.5, "end": 1617.06, "word": " translate", "probability": 0.8154296875}, {"start": 1617.06, "end": 1617.38, "word": " x", "probability": 0.94287109375}, {"start": 1617.38, "end": 1617.78, "word": " values", "probability": 0.642578125}, {"start": 1617.78, "end": 1617.96, "word": " to", "probability": 0.93017578125}, {"start": 1617.96, "end": 1618.14, "word": " z", "probability": 0.97509765625}, {"start": 1618.14, "end": 1618.56, "word": " values", "probability": 0.9619140625}, {"start": 1618.56, "end": 1619.2, "word": " by", "probability": 0.85302734375}, {"start": 1619.2, "end": 1619.54, "word": " using", "probability": 0.927734375}, {"start": 1619.54, "end": 1619.76, "word": " the", "probability": 0.91455078125}, {"start": 1619.76, "end": 1620.12, "word": " formula", "probability": 0.91064453125}, {"start": 1620.12, "end": 1620.32, "word": " we", "probability": 0.92041015625}, {"start": 1620.32, "end": 1620.62, "word": " have.", "probability": 0.9375}, {"start": 1621.94, "end": 1622.22, "word": " z", "probability": 0.64306640625}, {"start": 1622.22, "end": 1622.48, "word": " x", "probability": 0.55126953125}, {"start": 1622.48, "end": 1622.82, "word": " minus", "probability": 0.94970703125}, {"start": 1622.82, "end": 1623.04, "word": " mu", "probability": 0.85107421875}, {"start": 1623.04, "end": 1623.24, "word": " divided", "probability": 0.77001953125}, {"start": 1623.24, "end": 1623.44, "word": " by", "probability": 0.9755859375}, {"start": 1623.44, "end": 1623.68, "word": " sigma.", "probability": 0.8994140625}, {"start": 1624.54, "end": 1624.96, "word": " Then", "probability": 0.8603515625}, {"start": 1624.96, "end": 1625.28, "word": " use", "probability": 0.72900390625}, {"start": 1625.28, "end": 1625.5, "word": " the", "probability": 0.89892578125}, {"start": 1625.5, "end": 1626.08, "word": " standardized", "probability": 0.84033203125}, {"start": 1626.08, "end": 1626.44, "word": " normal", "probability": 0.87939453125}, {"start": 1626.44, "end": 1626.92, "word": " table", "probability": 0.84228515625}, {"start": 1626.92, "end": 1627.58, "word": " on", "probability": 0.92333984375}, {"start": 1627.58, "end": 1627.9, "word": " page", "probability": 0.8486328125}, {"start": 1627.9, "end": 1628.6, "word": " 570", "probability": 0.8955078125}, {"start": 1628.6, "end": 1628.9, "word": " and", "probability": 0.93115234375}, {"start": 1628.9, "end": 1630.58, "word": " 571.", "probability": 0.830810546875}, {"start": 1631.4, "end": 1631.76, "word": " For", "probability": 0.9482421875}, {"start": 1631.76, "end": 1632.1, "word": " example,", "probability": 0.96533203125}], "temperature": 1.0}, {"id": 63, "seek": 166310, "start": 1634.7, "end": 1663.1, "text": " Let's see how can we find normal probabilities. Let's assume that X represents the time it takes to download an image from the internet. So suppose X, time required to download an image file from the internet. And suppose we know that the time is normally distributed for with mean of eight minutes.", "tokens": [961, 311, 536, 577, 393, 321, 915, 2710, 33783, 13, 961, 311, 6552, 300, 1783, 8855, 264, 565, 309, 2516, 281, 5484, 364, 3256, 490, 264, 4705, 13, 407, 7297, 1783, 11, 565, 4739, 281, 5484, 364, 3256, 3991, 490, 264, 4705, 13, 400, 7297, 321, 458, 300, 264, 565, 307, 5646, 12631, 337, 365, 914, 295, 3180, 2077, 13], "avg_logprob": -0.17802253609797994, "compression_ratio": 1.675977653631285, "no_speech_prob": 0.0, "words": [{"start": 1634.7, "end": 1635.04, "word": " Let's", "probability": 0.821533203125}, {"start": 1635.04, "end": 1635.14, "word": " see", "probability": 0.9033203125}, {"start": 1635.14, "end": 1635.24, "word": " how", "probability": 0.90576171875}, {"start": 1635.24, "end": 1635.44, "word": " can", "probability": 0.6318359375}, {"start": 1635.44, "end": 1635.56, "word": " we", "probability": 0.94580078125}, {"start": 1635.56, "end": 1635.92, "word": " find", "probability": 0.88818359375}, {"start": 1635.92, "end": 1636.44, "word": " normal", "probability": 0.8203125}, {"start": 1636.44, "end": 1636.98, "word": " probabilities.", "probability": 0.90625}, {"start": 1637.68, "end": 1638.04, "word": " Let's", "probability": 0.9443359375}, {"start": 1638.04, "end": 1638.42, "word": " assume", "probability": 0.9140625}, {"start": 1638.42, "end": 1638.8, "word": " that", "probability": 0.92724609375}, {"start": 1638.8, "end": 1639.3, "word": " X", "probability": 0.5078125}, {"start": 1639.3, "end": 1639.92, "word": " represents", "probability": 0.5849609375}, {"start": 1639.92, "end": 1641.52, "word": " the", "probability": 0.8603515625}, {"start": 1641.52, "end": 1641.94, "word": " time", "probability": 0.89404296875}, {"start": 1641.94, "end": 1642.34, "word": " it", "probability": 0.939453125}, {"start": 1642.34, "end": 1642.78, "word": " takes", "probability": 0.80029296875}, {"start": 1642.78, "end": 1643.02, "word": " to", "probability": 0.96142578125}, {"start": 1643.02, "end": 1643.4, "word": " download", "probability": 0.9521484375}, {"start": 1643.4, "end": 1643.76, "word": " an", "probability": 0.92919921875}, {"start": 1643.76, "end": 1644.02, "word": " image", "probability": 0.921875}, {"start": 1644.02, "end": 1644.44, "word": " from", "probability": 0.88623046875}, {"start": 1644.44, "end": 1645.26, "word": " the", "probability": 0.91064453125}, {"start": 1645.26, "end": 1645.62, "word": " internet.", "probability": 0.705078125}, {"start": 1646.86, "end": 1647.1, "word": " So", "probability": 0.65478515625}, {"start": 1647.1, "end": 1647.38, "word": " suppose", "probability": 0.69189453125}, {"start": 1647.38, "end": 1647.74, "word": " X,", "probability": 0.91455078125}, {"start": 1648.18, "end": 1648.58, "word": " time", "probability": 0.75146484375}, {"start": 1648.58, "end": 1649.2, "word": " required", "probability": 0.8173828125}, {"start": 1649.2, "end": 1650.04, "word": " to", "probability": 0.95654296875}, {"start": 1650.04, "end": 1650.58, "word": " download", "probability": 0.95458984375}, {"start": 1650.58, "end": 1651.5, "word": " an", "probability": 0.93359375}, {"start": 1651.5, "end": 1651.92, "word": " image", "probability": 0.93505859375}, {"start": 1651.92, "end": 1653.22, "word": " file", "probability": 0.87548828125}, {"start": 1653.22, "end": 1653.6, "word": " from", "probability": 0.88330078125}, {"start": 1653.6, "end": 1653.76, "word": " the", "probability": 0.91455078125}, {"start": 1653.76, "end": 1654.12, "word": " internet.", "probability": 0.8984375}, {"start": 1655.5, "end": 1655.7, "word": " And", "probability": 0.84619140625}, {"start": 1655.7, "end": 1656.0, "word": " suppose", "probability": 0.9072265625}, {"start": 1656.0, "end": 1656.18, "word": " we", "probability": 0.916015625}, {"start": 1656.18, "end": 1656.32, "word": " know", "probability": 0.89013671875}, {"start": 1656.32, "end": 1656.72, "word": " that", "probability": 0.9384765625}, {"start": 1656.72, "end": 1657.78, "word": " the", "probability": 0.84326171875}, {"start": 1657.78, "end": 1658.18, "word": " time", "probability": 0.89697265625}, {"start": 1658.18, "end": 1658.46, "word": " is", "probability": 0.94970703125}, {"start": 1658.46, "end": 1658.9, "word": " normally", "probability": 0.91455078125}, {"start": 1658.9, "end": 1659.6, "word": " distributed", "probability": 0.91064453125}, {"start": 1659.6, "end": 1660.62, "word": " for", "probability": 0.478271484375}, {"start": 1660.62, "end": 1660.88, "word": " with", "probability": 0.642578125}, {"start": 1660.88, "end": 1661.18, "word": " mean", "probability": 0.8994140625}, {"start": 1661.18, "end": 1661.66, "word": " of", "probability": 0.955078125}, {"start": 1661.66, "end": 1662.06, "word": " eight", "probability": 0.638671875}, {"start": 1662.06, "end": 1663.1, "word": " minutes.", "probability": 0.9208984375}], "temperature": 1.0}, {"id": 64, "seek": 169251, "start": 1663.79, "end": 1692.51, "text": " And standard deviation of five minutes. So we know the mean. Eight. Eight. And sigma of five minutes. And they ask about what's the probability of X smaller than eight one six. So first thing we have to compute, to draw the normal curve. The mean lies in the center.", "tokens": [400, 3832, 25163, 295, 1732, 2077, 13, 407, 321, 458, 264, 914, 13, 17708, 13, 17708, 13, 400, 12771, 295, 1732, 2077, 13, 400, 436, 1029, 466, 437, 311, 264, 8482, 295, 1783, 4356, 813, 3180, 472, 2309, 13, 407, 700, 551, 321, 362, 281, 14722, 11, 281, 2642, 264, 2710, 7605, 13, 440, 914, 9134, 294, 264, 3056, 13], "avg_logprob": -0.3078893364452925, "compression_ratio": 1.5344827586206897, "no_speech_prob": 0.0, "words": [{"start": 1663.79, "end": 1664.27, "word": " And", "probability": 0.36474609375}, {"start": 1664.27, "end": 1664.79, "word": " standard", "probability": 0.859375}, {"start": 1664.79, "end": 1665.21, "word": " deviation", "probability": 0.91064453125}, {"start": 1665.21, "end": 1665.45, "word": " of", "probability": 0.93701171875}, {"start": 1665.45, "end": 1665.75, "word": " five", "probability": 0.646484375}, {"start": 1665.75, "end": 1666.13, "word": " minutes.", "probability": 0.7890625}, {"start": 1666.49, "end": 1666.77, "word": " So", "probability": 0.95068359375}, {"start": 1666.77, "end": 1666.97, "word": " we", "probability": 0.8369140625}, {"start": 1666.97, "end": 1667.13, "word": " know", "probability": 0.88427734375}, {"start": 1667.13, "end": 1667.27, "word": " the", "probability": 0.76025390625}, {"start": 1667.27, "end": 1667.51, "word": " mean.", "probability": 0.96240234375}, {"start": 1670.61, "end": 1671.23, "word": " Eight.", "probability": 0.345947265625}, {"start": 1671.39, "end": 1672.01, "word": " Eight.", "probability": 0.64697265625}, {"start": 1674.11, "end": 1674.73, "word": " And", "probability": 0.79052734375}, {"start": 1674.73, "end": 1675.19, "word": " sigma", "probability": 0.765625}, {"start": 1675.19, "end": 1676.11, "word": " of", "probability": 0.8369140625}, {"start": 1676.11, "end": 1676.37, "word": " five", "probability": 0.87109375}, {"start": 1676.37, "end": 1676.57, "word": " minutes.", "probability": 0.828125}, {"start": 1678.93, "end": 1679.55, "word": " And", "probability": 0.8095703125}, {"start": 1679.55, "end": 1679.67, "word": " they", "probability": 0.248291015625}, {"start": 1679.67, "end": 1679.85, "word": " ask", "probability": 0.8818359375}, {"start": 1679.85, "end": 1680.25, "word": " about", "probability": 0.90234375}, {"start": 1680.25, "end": 1680.87, "word": " what's", "probability": 0.82666015625}, {"start": 1680.87, "end": 1680.95, "word": " the", "probability": 0.78271484375}, {"start": 1680.95, "end": 1681.31, "word": " probability", "probability": 0.96240234375}, {"start": 1681.31, "end": 1682.13, "word": " of", "probability": 0.83837890625}, {"start": 1682.13, "end": 1682.43, "word": " X", "probability": 0.6083984375}, {"start": 1682.43, "end": 1682.93, "word": " smaller", "probability": 0.7529296875}, {"start": 1682.93, "end": 1683.41, "word": " than", "probability": 0.947265625}, {"start": 1683.41, "end": 1684.99, "word": " eight", "probability": 0.70263671875}, {"start": 1684.99, "end": 1685.21, "word": " one", "probability": 0.1551513671875}, {"start": 1685.21, "end": 1685.45, "word": " six.", "probability": 0.4013671875}, {"start": 1686.45, "end": 1686.95, "word": " So", "probability": 0.78662109375}, {"start": 1686.95, "end": 1687.21, "word": " first", "probability": 0.74658203125}, {"start": 1687.21, "end": 1687.33, "word": " thing", "probability": 0.5244140625}, {"start": 1687.33, "end": 1687.47, "word": " we", "probability": 0.69921875}, {"start": 1687.47, "end": 1687.61, "word": " have", "probability": 0.9423828125}, {"start": 1687.61, "end": 1687.71, "word": " to", "probability": 0.966796875}, {"start": 1687.71, "end": 1687.99, "word": " compute,", "probability": 0.493408203125}, {"start": 1688.17, "end": 1688.29, "word": " to", "probability": 0.9189453125}, {"start": 1688.29, "end": 1688.67, "word": " draw", "probability": 0.888671875}, {"start": 1688.67, "end": 1688.97, "word": " the", "probability": 0.84521484375}, {"start": 1688.97, "end": 1689.25, "word": " normal", "probability": 0.87744140625}, {"start": 1689.25, "end": 1689.59, "word": " curve.", "probability": 0.430419921875}, {"start": 1690.55, "end": 1690.75, "word": " The", "probability": 0.83984375}, {"start": 1690.75, "end": 1691.01, "word": " mean", "probability": 0.97998046875}, {"start": 1691.01, "end": 1691.87, "word": " lies", "probability": 0.92138671875}, {"start": 1691.87, "end": 1692.05, "word": " in", "probability": 0.94677734375}, {"start": 1692.05, "end": 1692.19, "word": " the", "probability": 0.9130859375}, {"start": 1692.19, "end": 1692.51, "word": " center.", "probability": 0.8876953125}], "temperature": 1.0}, {"id": 65, "seek": 172242, "start": 1693.34, "end": 1722.42, "text": " which is 8. He asked about probability of X smaller than 8.6. So we are interested in the area below 8.6. So it matched the table we have. Second step, we have to transform from normal distribution to standardized normal distribution by using this form, which is X minus mu divided by sigma.", "tokens": [597, 307, 1649, 13, 634, 2351, 466, 8482, 295, 1783, 4356, 813, 1649, 13, 21, 13, 407, 321, 366, 3102, 294, 264, 1859, 2507, 1649, 13, 21, 13, 407, 309, 21447, 264, 3199, 321, 362, 13, 5736, 1823, 11, 321, 362, 281, 4088, 490, 2710, 7316, 281, 31677, 2710, 7316, 538, 1228, 341, 1254, 11, 597, 307, 1783, 3175, 2992, 6666, 538, 12771, 13], "avg_logprob": -0.2075721153846154, "compression_ratio": 1.5614973262032086, "no_speech_prob": 0.0, "words": [{"start": 1693.34, "end": 1693.58, "word": " which", "probability": 0.35546875}, {"start": 1693.58, "end": 1693.7, "word": " is", "probability": 0.953125}, {"start": 1693.7, "end": 1694.18, "word": " 8.", "probability": 0.43994140625}, {"start": 1695.04, "end": 1695.6, "word": " He", "probability": 0.8701171875}, {"start": 1695.6, "end": 1695.8, "word": " asked", "probability": 0.5888671875}, {"start": 1695.8, "end": 1696.3, "word": " about", "probability": 0.90869140625}, {"start": 1696.3, "end": 1697.8, "word": " probability", "probability": 0.60595703125}, {"start": 1697.8, "end": 1698.06, "word": " of", "probability": 0.9326171875}, {"start": 1698.06, "end": 1698.32, "word": " X", "probability": 0.662109375}, {"start": 1698.32, "end": 1699.06, "word": " smaller", "probability": 0.79833984375}, {"start": 1699.06, "end": 1699.38, "word": " than", "probability": 0.9482421875}, {"start": 1699.38, "end": 1699.6, "word": " 8", "probability": 0.984375}, {"start": 1699.6, "end": 1700.16, "word": ".6.", "probability": 0.9951171875}, {"start": 1700.54, "end": 1701.0, "word": " So", "probability": 0.96044921875}, {"start": 1701.0, "end": 1701.4, "word": " we", "probability": 0.72265625}, {"start": 1701.4, "end": 1701.56, "word": " are", "probability": 0.935546875}, {"start": 1701.56, "end": 1702.02, "word": " interested", "probability": 0.87646484375}, {"start": 1702.02, "end": 1702.48, "word": " in", "probability": 0.74072265625}, {"start": 1702.48, "end": 1702.58, "word": " the", "probability": 0.85498046875}, {"start": 1702.58, "end": 1702.88, "word": " area", "probability": 0.88037109375}, {"start": 1702.88, "end": 1703.38, "word": " below", "probability": 0.861328125}, {"start": 1703.38, "end": 1703.72, "word": " 8", "probability": 0.9755859375}, {"start": 1703.72, "end": 1704.3, "word": ".6.", "probability": 0.998291015625}, {"start": 1705.16, "end": 1705.36, "word": " So", "probability": 0.77392578125}, {"start": 1705.36, "end": 1705.54, "word": " it", "probability": 0.91064453125}, {"start": 1705.54, "end": 1705.94, "word": " matched", "probability": 0.369140625}, {"start": 1705.94, "end": 1706.94, "word": " the", "probability": 0.9111328125}, {"start": 1706.94, "end": 1707.44, "word": " table", "probability": 0.8623046875}, {"start": 1707.44, "end": 1707.64, "word": " we", "probability": 0.90283203125}, {"start": 1707.64, "end": 1707.92, "word": " have.", "probability": 0.67724609375}, {"start": 1709.98, "end": 1710.78, "word": " Second", "probability": 0.8154296875}, {"start": 1710.78, "end": 1711.24, "word": " step,", "probability": 0.9287109375}, {"start": 1712.4, "end": 1713.28, "word": " we", "probability": 0.951171875}, {"start": 1713.28, "end": 1713.46, "word": " have", "probability": 0.94287109375}, {"start": 1713.46, "end": 1713.58, "word": " to", "probability": 0.96875}, {"start": 1713.58, "end": 1714.1, "word": " transform", "probability": 0.9453125}, {"start": 1714.1, "end": 1714.5, "word": " from", "probability": 0.85888671875}, {"start": 1714.5, "end": 1714.9, "word": " normal", "probability": 0.8310546875}, {"start": 1714.9, "end": 1715.44, "word": " distribution", "probability": 0.85400390625}, {"start": 1715.44, "end": 1715.86, "word": " to", "probability": 0.90771484375}, {"start": 1715.86, "end": 1716.4, "word": " standardized", "probability": 0.734375}, {"start": 1716.4, "end": 1716.72, "word": " normal", "probability": 0.853515625}, {"start": 1716.72, "end": 1717.28, "word": " distribution", "probability": 0.90185546875}, {"start": 1717.28, "end": 1718.12, "word": " by", "probability": 0.80615234375}, {"start": 1718.12, "end": 1718.42, "word": " using", "probability": 0.93359375}, {"start": 1718.42, "end": 1718.64, "word": " this", "probability": 0.9462890625}, {"start": 1718.64, "end": 1718.96, "word": " form,", "probability": 0.60400390625}, {"start": 1719.68, "end": 1719.84, "word": " which", "probability": 0.9033203125}, {"start": 1719.84, "end": 1720.06, "word": " is", "probability": 0.947265625}, {"start": 1720.06, "end": 1721.12, "word": " X", "probability": 0.853515625}, {"start": 1721.12, "end": 1721.46, "word": " minus", "probability": 0.9677734375}, {"start": 1721.46, "end": 1721.7, "word": " mu", "probability": 0.368408203125}, {"start": 1721.7, "end": 1721.94, "word": " divided", "probability": 0.77099609375}, {"start": 1721.94, "end": 1722.12, "word": " by", "probability": 0.9716796875}, {"start": 1722.12, "end": 1722.42, "word": " sigma.", "probability": 0.89306640625}], "temperature": 1.0}, {"id": 66, "seek": 174933, "start": 1723.85, "end": 1749.33, "text": " So x is 8.6 minus the mean, 8, divided by sigma, gives 0.12. So just straightforward calculation, 8.6 is your value of x. The mean is 8, sigma is 5, so that gives 0.12.", "tokens": [407, 2031, 307, 1649, 13, 21, 3175, 264, 914, 11, 1649, 11, 6666, 538, 12771, 11, 2709, 1958, 13, 4762, 13, 407, 445, 15325, 17108, 11, 1649, 13, 21, 307, 428, 2158, 295, 2031, 13, 440, 914, 307, 1649, 11, 12771, 307, 1025, 11, 370, 300, 2709, 1958, 13, 4762, 13], "avg_logprob": -0.1887019227903623, "compression_ratio": 1.310077519379845, "no_speech_prob": 0.0, "words": [{"start": 1723.85, "end": 1724.23, "word": " So", "probability": 0.869140625}, {"start": 1724.23, "end": 1724.83, "word": " x", "probability": 0.515625}, {"start": 1724.83, "end": 1725.37, "word": " is", "probability": 0.90185546875}, {"start": 1725.37, "end": 1725.61, "word": " 8", "probability": 0.89306640625}, {"start": 1725.61, "end": 1726.17, "word": ".6", "probability": 0.992431640625}, {"start": 1726.17, "end": 1728.85, "word": " minus", "probability": 0.8212890625}, {"start": 1728.85, "end": 1729.13, "word": " the", "probability": 0.88330078125}, {"start": 1729.13, "end": 1729.33, "word": " mean,", "probability": 0.96044921875}, {"start": 1729.45, "end": 1729.97, "word": " 8,", "probability": 0.77587890625}, {"start": 1730.17, "end": 1731.23, "word": " divided", "probability": 0.71044921875}, {"start": 1731.23, "end": 1731.43, "word": " by", "probability": 0.978515625}, {"start": 1731.43, "end": 1731.79, "word": " sigma,", "probability": 0.82373046875}, {"start": 1732.21, "end": 1732.71, "word": " gives", "probability": 0.8525390625}, {"start": 1732.71, "end": 1733.05, "word": " 0", "probability": 0.8623046875}, {"start": 1733.05, "end": 1733.47, "word": ".12.", "probability": 0.996826171875}, {"start": 1734.43, "end": 1734.97, "word": " So", "probability": 0.96044921875}, {"start": 1734.97, "end": 1735.31, "word": " just", "probability": 0.82861328125}, {"start": 1735.31, "end": 1737.13, "word": " straightforward", "probability": 0.6796875}, {"start": 1737.13, "end": 1737.87, "word": " calculation,", "probability": 0.92724609375}, {"start": 1739.01, "end": 1739.37, "word": " 8", "probability": 0.99267578125}, {"start": 1739.37, "end": 1739.95, "word": ".6", "probability": 0.999267578125}, {"start": 1739.95, "end": 1740.29, "word": " is", "probability": 0.93994140625}, {"start": 1740.29, "end": 1740.47, "word": " your", "probability": 0.8779296875}, {"start": 1740.47, "end": 1740.83, "word": " value", "probability": 0.95751953125}, {"start": 1740.83, "end": 1741.39, "word": " of", "probability": 0.71337890625}, {"start": 1741.39, "end": 1741.55, "word": " x.", "probability": 0.96826171875}, {"start": 1742.31, "end": 1742.47, "word": " The", "probability": 0.89208984375}, {"start": 1742.47, "end": 1742.65, "word": " mean", "probability": 0.966796875}, {"start": 1742.65, "end": 1742.89, "word": " is", "probability": 0.95361328125}, {"start": 1742.89, "end": 1743.27, "word": " 8,", "probability": 0.28662109375}, {"start": 1745.49, "end": 1745.73, "word": " sigma", "probability": 0.90869140625}, {"start": 1745.73, "end": 1745.91, "word": " is", "probability": 0.94677734375}, {"start": 1745.91, "end": 1746.21, "word": " 5,", "probability": 0.9677734375}, {"start": 1746.71, "end": 1746.85, "word": " so", "probability": 0.94873046875}, {"start": 1746.85, "end": 1747.05, "word": " that", "probability": 0.943359375}, {"start": 1747.05, "end": 1747.39, "word": " gives", "probability": 0.90576171875}, {"start": 1747.39, "end": 1748.93, "word": " 0", "probability": 0.791015625}, {"start": 1748.93, "end": 1749.33, "word": ".12.", "probability": 0.993408203125}], "temperature": 1.0}, {"id": 67, "seek": 178037, "start": 1751.89, "end": 1780.37, "text": " So now, the problem becomes, instead of asking x smaller than 8.6, it's similar to z less than 0.12. Still, we have the same normal curve. 8, the mean. Now, the mean of z is 0, as we mentioned. Instead of x, 8.6, the corresponding z value is 0.12.", "tokens": [407, 586, 11, 264, 1154, 3643, 11, 2602, 295, 3365, 2031, 4356, 813, 1649, 13, 21, 11, 309, 311, 2531, 281, 710, 1570, 813, 1958, 13, 4762, 13, 8291, 11, 321, 362, 264, 912, 2710, 7605, 13, 1649, 11, 264, 914, 13, 823, 11, 264, 914, 295, 710, 307, 1958, 11, 382, 321, 2835, 13, 7156, 295, 2031, 11, 1649, 13, 21, 11, 264, 11760, 710, 2158, 307, 1958, 13, 4762, 13], "avg_logprob": -0.14843750326600794, "compression_ratio": 1.4335260115606936, "no_speech_prob": 0.0, "words": [{"start": 1751.89, "end": 1752.19, "word": " So", "probability": 0.873046875}, {"start": 1752.19, "end": 1752.45, "word": " now,", "probability": 0.861328125}, {"start": 1752.69, "end": 1752.81, "word": " the", "probability": 0.91064453125}, {"start": 1752.81, "end": 1753.13, "word": " problem", "probability": 0.8642578125}, {"start": 1753.13, "end": 1753.63, "word": " becomes,", "probability": 0.873046875}, {"start": 1753.93, "end": 1754.17, "word": " instead", "probability": 0.86328125}, {"start": 1754.17, "end": 1754.95, "word": " of", "probability": 0.97119140625}, {"start": 1754.95, "end": 1755.83, "word": " asking", "probability": 0.849609375}, {"start": 1755.83, "end": 1756.33, "word": " x", "probability": 0.5234375}, {"start": 1756.33, "end": 1756.79, "word": " smaller", "probability": 0.50341796875}, {"start": 1756.79, "end": 1757.21, "word": " than", "probability": 0.9501953125}, {"start": 1757.21, "end": 1757.65, "word": " 8", "probability": 0.9111328125}, {"start": 1757.65, "end": 1758.25, "word": ".6,", "probability": 0.993408203125}, {"start": 1758.59, "end": 1759.37, "word": " it's", "probability": 0.912109375}, {"start": 1759.37, "end": 1759.77, "word": " similar", "probability": 0.9248046875}, {"start": 1759.77, "end": 1760.41, "word": " to", "probability": 0.96484375}, {"start": 1760.41, "end": 1760.73, "word": " z", "probability": 0.93798828125}, {"start": 1760.73, "end": 1761.37, "word": " less", "probability": 0.91162109375}, {"start": 1761.37, "end": 1761.63, "word": " than", "probability": 0.93701171875}, {"start": 1761.63, "end": 1761.89, "word": " 0", "probability": 0.84716796875}, {"start": 1761.89, "end": 1762.27, "word": ".12.", "probability": 0.992919921875}, {"start": 1764.39, "end": 1764.99, "word": " Still,", "probability": 0.8349609375}, {"start": 1765.05, "end": 1765.11, "word": " we", "probability": 0.96044921875}, {"start": 1765.11, "end": 1765.25, "word": " have", "probability": 0.9482421875}, {"start": 1765.25, "end": 1765.43, "word": " the", "probability": 0.91357421875}, {"start": 1765.43, "end": 1765.65, "word": " same", "probability": 0.89306640625}, {"start": 1765.65, "end": 1765.95, "word": " normal", "probability": 0.84033203125}, {"start": 1765.95, "end": 1766.31, "word": " curve.", "probability": 0.921875}, {"start": 1769.45, "end": 1769.91, "word": " 8,", "probability": 0.59130859375}, {"start": 1770.61, "end": 1770.85, "word": " the", "probability": 0.9140625}, {"start": 1770.85, "end": 1771.05, "word": " mean.", "probability": 0.96728515625}, {"start": 1771.33, "end": 1771.57, "word": " Now,", "probability": 0.94775390625}, {"start": 1771.63, "end": 1771.75, "word": " the", "probability": 0.921875}, {"start": 1771.75, "end": 1771.91, "word": " mean", "probability": 0.95751953125}, {"start": 1771.91, "end": 1772.07, "word": " of", "probability": 0.96923828125}, {"start": 1772.07, "end": 1772.27, "word": " z", "probability": 0.9638671875}, {"start": 1772.27, "end": 1772.45, "word": " is", "probability": 0.94677734375}, {"start": 1772.45, "end": 1772.67, "word": " 0,", "probability": 0.60546875}, {"start": 1772.69, "end": 1772.85, "word": " as", "probability": 0.9609375}, {"start": 1772.85, "end": 1772.99, "word": " we", "probability": 0.9580078125}, {"start": 1772.99, "end": 1773.31, "word": " mentioned.", "probability": 0.83056640625}, {"start": 1774.37, "end": 1774.87, "word": " Instead", "probability": 0.69287109375}, {"start": 1774.87, "end": 1775.07, "word": " of", "probability": 0.96630859375}, {"start": 1775.07, "end": 1775.39, "word": " x,", "probability": 0.9951171875}, {"start": 1775.75, "end": 1776.13, "word": " 8", "probability": 0.994140625}, {"start": 1776.13, "end": 1776.75, "word": ".6,", "probability": 0.998779296875}, {"start": 1777.83, "end": 1778.37, "word": " the", "probability": 0.88427734375}, {"start": 1778.37, "end": 1778.97, "word": " corresponding", "probability": 0.82470703125}, {"start": 1778.97, "end": 1779.23, "word": " z", "probability": 0.96044921875}, {"start": 1779.23, "end": 1779.51, "word": " value", "probability": 0.75537109375}, {"start": 1779.51, "end": 1779.69, "word": " is", "probability": 0.93896484375}, {"start": 1779.69, "end": 1779.95, "word": " 0", "probability": 0.98291015625}, {"start": 1779.95, "end": 1780.37, "word": ".12.", "probability": 0.998779296875}], "temperature": 1.0}, {"id": 68, "seek": 180610, "start": 1781.14, "end": 1806.1, "text": " So instead of finding probability of X smaller than 8.6, smaller than 1.12, so they are equivalent. So we transform here from normal distribution to standardized normal distribution in order to compute the probability we are looking for. Now, this is just a portion of the table we have.", "tokens": [407, 2602, 295, 5006, 8482, 295, 1783, 4356, 813, 1649, 13, 21, 11, 4356, 813, 502, 13, 4762, 11, 370, 436, 366, 10344, 13, 407, 321, 4088, 510, 490, 2710, 7316, 281, 31677, 2710, 7316, 294, 1668, 281, 14722, 264, 8482, 321, 366, 1237, 337, 13, 823, 11, 341, 307, 445, 257, 8044, 295, 264, 3199, 321, 362, 13], "avg_logprob": -0.16145832985639572, "compression_ratio": 1.5567567567567568, "no_speech_prob": 0.0, "words": [{"start": 1781.14, "end": 1781.46, "word": " So", "probability": 0.90380859375}, {"start": 1781.46, "end": 1781.88, "word": " instead", "probability": 0.7392578125}, {"start": 1781.88, "end": 1782.18, "word": " of", "probability": 0.96728515625}, {"start": 1782.18, "end": 1782.58, "word": " finding", "probability": 0.8642578125}, {"start": 1782.58, "end": 1783.0, "word": " probability", "probability": 0.61962890625}, {"start": 1783.0, "end": 1783.3, "word": " of", "probability": 0.91455078125}, {"start": 1783.3, "end": 1783.56, "word": " X", "probability": 0.775390625}, {"start": 1783.56, "end": 1784.1, "word": " smaller", "probability": 0.78369140625}, {"start": 1784.1, "end": 1784.38, "word": " than", "probability": 0.9423828125}, {"start": 1784.38, "end": 1784.58, "word": " 8", "probability": 0.923828125}, {"start": 1784.58, "end": 1785.2, "word": ".6,", "probability": 0.994140625}, {"start": 1785.72, "end": 1786.74, "word": " smaller", "probability": 0.52001953125}, {"start": 1786.74, "end": 1787.22, "word": " than", "probability": 0.92724609375}, {"start": 1787.22, "end": 1787.56, "word": " 1", "probability": 0.70849609375}, {"start": 1787.56, "end": 1788.06, "word": ".12,", "probability": 0.77197265625}, {"start": 1788.24, "end": 1788.44, "word": " so", "probability": 0.9140625}, {"start": 1788.44, "end": 1788.58, "word": " they", "probability": 0.880859375}, {"start": 1788.58, "end": 1788.72, "word": " are", "probability": 0.93310546875}, {"start": 1788.72, "end": 1789.06, "word": " equivalent.", "probability": 0.912109375}, {"start": 1789.88, "end": 1790.08, "word": " So", "probability": 0.95654296875}, {"start": 1790.08, "end": 1790.24, "word": " we", "probability": 0.73583984375}, {"start": 1790.24, "end": 1791.0, "word": " transform", "probability": 0.767578125}, {"start": 1791.0, "end": 1792.22, "word": " here", "probability": 0.80126953125}, {"start": 1792.22, "end": 1792.8, "word": " from", "probability": 0.8359375}, {"start": 1792.8, "end": 1793.76, "word": " normal", "probability": 0.84326171875}, {"start": 1793.76, "end": 1794.42, "word": " distribution", "probability": 0.845703125}, {"start": 1794.42, "end": 1795.5, "word": " to", "probability": 0.8876953125}, {"start": 1795.5, "end": 1796.02, "word": " standardized", "probability": 0.6337890625}, {"start": 1796.02, "end": 1796.38, "word": " normal", "probability": 0.87890625}, {"start": 1796.38, "end": 1796.98, "word": " distribution", "probability": 0.8671875}, {"start": 1796.98, "end": 1797.2, "word": " in", "probability": 0.7724609375}, {"start": 1797.2, "end": 1797.42, "word": " order", "probability": 0.91796875}, {"start": 1797.42, "end": 1797.64, "word": " to", "probability": 0.97021484375}, {"start": 1797.64, "end": 1798.04, "word": " compute", "probability": 0.9111328125}, {"start": 1798.04, "end": 1798.72, "word": " the", "probability": 0.9130859375}, {"start": 1798.72, "end": 1799.1, "word": " probability", "probability": 0.9638671875}, {"start": 1799.1, "end": 1799.58, "word": " we", "probability": 0.9482421875}, {"start": 1799.58, "end": 1799.74, "word": " are", "probability": 0.9345703125}, {"start": 1799.74, "end": 1799.98, "word": " looking", "probability": 0.916015625}, {"start": 1799.98, "end": 1800.34, "word": " for.", "probability": 0.95166015625}, {"start": 1801.28, "end": 1801.58, "word": " Now,", "probability": 0.95556640625}, {"start": 1801.76, "end": 1802.02, "word": " this", "probability": 0.943359375}, {"start": 1802.02, "end": 1802.22, "word": " is", "probability": 0.94873046875}, {"start": 1802.22, "end": 1802.54, "word": " just", "probability": 0.9189453125}, {"start": 1802.54, "end": 1802.76, "word": " a", "probability": 0.9560546875}, {"start": 1802.76, "end": 1803.12, "word": " portion", "probability": 0.85791015625}, {"start": 1803.12, "end": 1804.88, "word": " of", "probability": 0.9560546875}, {"start": 1804.88, "end": 1805.16, "word": " the", "probability": 0.91552734375}, {"start": 1805.16, "end": 1805.6, "word": " table", "probability": 0.8828125}, {"start": 1805.6, "end": 1805.82, "word": " we", "probability": 0.84619140625}, {"start": 1805.82, "end": 1806.1, "word": " have.", "probability": 0.94091796875}], "temperature": 1.0}, {"id": 69, "seek": 183819, "start": 1810.53, "end": 1838.19, "text": " So for positive z values. Now 0.1 is 0.1. Because here we are looking for z less than 0.1. So 0.1. Also, we have two. So move up to two decimal places, we get this value. So the answer is point.", "tokens": [407, 337, 3353, 710, 4190, 13, 823, 1958, 13, 16, 307, 1958, 13, 16, 13, 1436, 510, 321, 366, 1237, 337, 710, 1570, 813, 1958, 13, 16, 13, 407, 1958, 13, 16, 13, 2743, 11, 321, 362, 732, 13, 407, 1286, 493, 281, 732, 26601, 3190, 11, 321, 483, 341, 2158, 13, 407, 264, 1867, 307, 935, 13], "avg_logprob": -0.24218750151537233, "compression_ratio": 1.3928571428571428, "no_speech_prob": 0.0, "words": [{"start": 1810.53, "end": 1810.83, "word": " So", "probability": 0.63427734375}, {"start": 1810.83, "end": 1811.05, "word": " for", "probability": 0.69482421875}, {"start": 1811.05, "end": 1811.39, "word": " positive", "probability": 0.6318359375}, {"start": 1811.39, "end": 1811.59, "word": " z", "probability": 0.611328125}, {"start": 1811.59, "end": 1811.97, "word": " values.", "probability": 0.7041015625}, {"start": 1812.97, "end": 1813.27, "word": " Now", "probability": 0.87939453125}, {"start": 1813.27, "end": 1813.53, "word": " 0", "probability": 0.260009765625}, {"start": 1813.53, "end": 1813.93, "word": ".1", "probability": 0.99462890625}, {"start": 1813.93, "end": 1815.47, "word": " is", "probability": 0.32666015625}, {"start": 1815.47, "end": 1815.67, "word": " 0", "probability": 0.9052734375}, {"start": 1815.67, "end": 1816.03, "word": ".1.", "probability": 0.998046875}, {"start": 1817.85, "end": 1818.53, "word": " Because", "probability": 0.87890625}, {"start": 1818.53, "end": 1818.89, "word": " here", "probability": 0.841796875}, {"start": 1818.89, "end": 1819.03, "word": " we", "probability": 0.8095703125}, {"start": 1819.03, "end": 1819.17, "word": " are", "probability": 0.92919921875}, {"start": 1819.17, "end": 1819.45, "word": " looking", "probability": 0.91455078125}, {"start": 1819.45, "end": 1819.81, "word": " for", "probability": 0.95166015625}, {"start": 1819.81, "end": 1821.23, "word": " z", "probability": 0.935546875}, {"start": 1821.23, "end": 1821.51, "word": " less", "probability": 0.88623046875}, {"start": 1821.51, "end": 1821.69, "word": " than", "probability": 0.92578125}, {"start": 1821.69, "end": 1821.95, "word": " 0", "probability": 0.90869140625}, {"start": 1821.95, "end": 1822.19, "word": ".1.", "probability": 0.988525390625}, {"start": 1824.43, "end": 1825.07, "word": " So", "probability": 0.92041015625}, {"start": 1825.07, "end": 1825.33, "word": " 0", "probability": 0.88916015625}, {"start": 1825.33, "end": 1825.67, "word": ".1.", "probability": 0.9990234375}, {"start": 1827.21, "end": 1827.89, "word": " Also,", "probability": 0.90185546875}, {"start": 1827.99, "end": 1828.05, "word": " we", "probability": 0.8701171875}, {"start": 1828.05, "end": 1828.25, "word": " have", "probability": 0.94873046875}, {"start": 1828.25, "end": 1828.59, "word": " two.", "probability": 0.5576171875}, {"start": 1829.37, "end": 1829.61, "word": " So", "probability": 0.9052734375}, {"start": 1829.61, "end": 1831.15, "word": " move", "probability": 0.720703125}, {"start": 1831.15, "end": 1831.39, "word": " up", "probability": 0.96142578125}, {"start": 1831.39, "end": 1831.51, "word": " to", "probability": 0.9169921875}, {"start": 1831.51, "end": 1832.57, "word": " two", "probability": 0.73876953125}, {"start": 1832.57, "end": 1832.95, "word": " decimal", "probability": 0.80810546875}, {"start": 1832.95, "end": 1833.33, "word": " places,", "probability": 0.560546875}, {"start": 1833.93, "end": 1834.29, "word": " we", "probability": 0.93505859375}, {"start": 1834.29, "end": 1834.65, "word": " get", "probability": 0.9287109375}, {"start": 1834.65, "end": 1835.71, "word": " this", "probability": 0.9365234375}, {"start": 1835.71, "end": 1835.99, "word": " value.", "probability": 0.9423828125}, {"start": 1836.79, "end": 1837.09, "word": " So", "probability": 0.95654296875}, {"start": 1837.09, "end": 1837.25, "word": " the", "probability": 0.916015625}, {"start": 1837.25, "end": 1837.55, "word": " answer", "probability": 0.95703125}, {"start": 1837.55, "end": 1837.81, "word": " is", "probability": 0.94384765625}, {"start": 1837.81, "end": 1838.19, "word": " point.", "probability": 0.5634765625}], "temperature": 1.0}, {"id": 70, "seek": 186502, "start": 1842.12, "end": 1865.02, "text": " I think it's straightforward to compute the probability underneath the normal curve if X has normal distribution. So B of X is smaller than 8.6 is the same as B of Z less than 0.12, which is around 55%. Makes sense because the area to the left of 0 equals 1 half.", "tokens": [286, 519, 309, 311, 15325, 281, 14722, 264, 8482, 7223, 264, 2710, 7605, 498, 1783, 575, 2710, 7316, 13, 407, 363, 295, 1783, 307, 4356, 813, 1649, 13, 21, 307, 264, 912, 382, 363, 295, 1176, 1570, 813, 1958, 13, 4762, 11, 597, 307, 926, 12330, 6856, 25245, 2020, 570, 264, 1859, 281, 264, 1411, 295, 1958, 6915, 502, 1922, 13], "avg_logprob": -0.23538306980363785, "compression_ratio": 1.382198952879581, "no_speech_prob": 0.0, "words": [{"start": 1842.1200000000001, "end": 1842.72, "word": " I", "probability": 0.23779296875}, {"start": 1842.72, "end": 1843.32, "word": " think", "probability": 0.90478515625}, {"start": 1843.32, "end": 1843.5, "word": " it's", "probability": 0.632568359375}, {"start": 1843.5, "end": 1843.98, "word": " straightforward", "probability": 0.83154296875}, {"start": 1843.98, "end": 1845.2, "word": " to", "probability": 0.75537109375}, {"start": 1845.2, "end": 1845.56, "word": " compute", "probability": 0.89794921875}, {"start": 1845.56, "end": 1845.86, "word": " the", "probability": 0.8515625}, {"start": 1845.86, "end": 1846.28, "word": " probability", "probability": 0.9541015625}, {"start": 1846.28, "end": 1846.84, "word": " underneath", "probability": 0.888671875}, {"start": 1846.84, "end": 1847.06, "word": " the", "probability": 0.60888671875}, {"start": 1847.06, "end": 1847.38, "word": " normal", "probability": 0.865234375}, {"start": 1847.38, "end": 1847.72, "word": " curve", "probability": 0.482177734375}, {"start": 1847.72, "end": 1848.78, "word": " if", "probability": 0.71484375}, {"start": 1848.78, "end": 1849.1, "word": " X", "probability": 0.5380859375}, {"start": 1849.1, "end": 1849.46, "word": " has", "probability": 0.94384765625}, {"start": 1849.46, "end": 1849.86, "word": " normal", "probability": 0.85546875}, {"start": 1849.86, "end": 1850.44, "word": " distribution.", "probability": 0.85009765625}, {"start": 1851.04, "end": 1851.34, "word": " So", "probability": 0.95849609375}, {"start": 1851.34, "end": 1851.58, "word": " B", "probability": 0.36181640625}, {"start": 1851.58, "end": 1851.72, "word": " of", "probability": 0.93212890625}, {"start": 1851.72, "end": 1851.96, "word": " X", "probability": 0.96630859375}, {"start": 1851.96, "end": 1852.14, "word": " is", "probability": 0.6279296875}, {"start": 1852.14, "end": 1852.34, "word": " smaller", "probability": 0.78759765625}, {"start": 1852.34, "end": 1852.54, "word": " than", "probability": 0.95166015625}, {"start": 1852.54, "end": 1852.7, "word": " 8", "probability": 0.94677734375}, {"start": 1852.7, "end": 1853.16, "word": ".6", "probability": 0.994140625}, {"start": 1853.16, "end": 1853.32, "word": " is", "probability": 0.7890625}, {"start": 1853.32, "end": 1853.5, "word": " the", "probability": 0.880859375}, {"start": 1853.5, "end": 1853.66, "word": " same", "probability": 0.90966796875}, {"start": 1853.66, "end": 1853.84, "word": " as", "probability": 0.9501953125}, {"start": 1853.84, "end": 1854.0, "word": " B", "probability": 0.841796875}, {"start": 1854.0, "end": 1854.16, "word": " of", "probability": 0.96533203125}, {"start": 1854.16, "end": 1854.34, "word": " Z", "probability": 0.96728515625}, {"start": 1854.34, "end": 1854.62, "word": " less", "probability": 0.712890625}, {"start": 1854.62, "end": 1854.82, "word": " than", "probability": 0.9306640625}, {"start": 1854.82, "end": 1855.06, "word": " 0", "probability": 0.5703125}, {"start": 1855.06, "end": 1856.18, "word": ".12,", "probability": 0.985595703125}, {"start": 1856.28, "end": 1856.46, "word": " which", "probability": 0.94873046875}, {"start": 1856.46, "end": 1856.74, "word": " is", "probability": 0.94921875}, {"start": 1856.74, "end": 1857.26, "word": " around", "probability": 0.93115234375}, {"start": 1857.26, "end": 1859.74, "word": " 55%.", "probability": 0.8740234375}, {"start": 1859.74, "end": 1860.64, "word": " Makes", "probability": 0.461669921875}, {"start": 1860.64, "end": 1860.88, "word": " sense", "probability": 0.8271484375}, {"start": 1860.88, "end": 1861.32, "word": " because", "probability": 0.55029296875}, {"start": 1861.32, "end": 1862.04, "word": " the", "probability": 0.90380859375}, {"start": 1862.04, "end": 1862.34, "word": " area", "probability": 0.90771484375}, {"start": 1862.34, "end": 1862.54, "word": " to", "probability": 0.966796875}, {"start": 1862.54, "end": 1862.68, "word": " the", "probability": 0.91796875}, {"start": 1862.68, "end": 1862.92, "word": " left", "probability": 0.9404296875}, {"start": 1862.92, "end": 1863.12, "word": " of", "probability": 0.9560546875}, {"start": 1863.12, "end": 1863.44, "word": " 0", "probability": 0.52685546875}, {"start": 1863.44, "end": 1864.44, "word": " equals", "probability": 0.90625}, {"start": 1864.44, "end": 1864.76, "word": " 1", "probability": 0.56689453125}, {"start": 1864.76, "end": 1865.02, "word": " half.", "probability": 0.013763427734375}], "temperature": 1.0}, {"id": 71, "seek": 189254, "start": 1866.26, "end": 1892.54, "text": " But we are looking for the area below 0.12. So greater than zero. So this area actually is greater than 0.5. So it makes sense that your result is greater than 0.5. Questions? Next, suppose we are interested of probability of X greater than.", "tokens": [583, 321, 366, 1237, 337, 264, 1859, 2507, 1958, 13, 4762, 13, 407, 5044, 813, 4018, 13, 407, 341, 1859, 767, 307, 5044, 813, 1958, 13, 20, 13, 407, 309, 1669, 2020, 300, 428, 1874, 307, 5044, 813, 1958, 13, 20, 13, 27738, 30, 3087, 11, 7297, 321, 366, 3102, 295, 8482, 295, 1783, 5044, 813, 13], "avg_logprob": -0.1926185319135929, "compression_ratio": 1.5612903225806452, "no_speech_prob": 0.0, "words": [{"start": 1866.26, "end": 1866.42, "word": " But", "probability": 0.3984375}, {"start": 1866.42, "end": 1866.52, "word": " we", "probability": 0.80078125}, {"start": 1866.52, "end": 1866.62, "word": " are", "probability": 0.88525390625}, {"start": 1866.62, "end": 1866.84, "word": " looking", "probability": 0.91357421875}, {"start": 1866.84, "end": 1867.08, "word": " for", "probability": 0.94775390625}, {"start": 1867.08, "end": 1867.2, "word": " the", "probability": 0.904296875}, {"start": 1867.2, "end": 1867.58, "word": " area", "probability": 0.89501953125}, {"start": 1867.58, "end": 1868.76, "word": " below", "probability": 0.84521484375}, {"start": 1868.76, "end": 1869.26, "word": " 0", "probability": 0.2308349609375}, {"start": 1869.26, "end": 1869.84, "word": ".12.", "probability": 0.984130859375}, {"start": 1870.2, "end": 1870.84, "word": " So", "probability": 0.51171875}, {"start": 1870.84, "end": 1871.1, "word": " greater", "probability": 0.81005859375}, {"start": 1871.1, "end": 1871.42, "word": " than", "probability": 0.9423828125}, {"start": 1871.42, "end": 1871.72, "word": " zero.", "probability": 0.61865234375}, {"start": 1872.04, "end": 1872.18, "word": " So", "probability": 0.9404296875}, {"start": 1872.18, "end": 1872.44, "word": " this", "probability": 0.89453125}, {"start": 1872.44, "end": 1872.82, "word": " area", "probability": 0.890625}, {"start": 1872.82, "end": 1873.7, "word": " actually", "probability": 0.83154296875}, {"start": 1873.7, "end": 1873.94, "word": " is", "probability": 0.93994140625}, {"start": 1873.94, "end": 1874.26, "word": " greater", "probability": 0.83642578125}, {"start": 1874.26, "end": 1874.76, "word": " than", "probability": 0.93896484375}, {"start": 1874.76, "end": 1875.5, "word": " 0", "probability": 0.9306640625}, {"start": 1875.5, "end": 1875.84, "word": ".5.", "probability": 0.989990234375}, {"start": 1876.04, "end": 1876.24, "word": " So", "probability": 0.95263671875}, {"start": 1876.24, "end": 1876.36, "word": " it", "probability": 0.90966796875}, {"start": 1876.36, "end": 1876.6, "word": " makes", "probability": 0.81787109375}, {"start": 1876.6, "end": 1876.84, "word": " sense", "probability": 0.81494140625}, {"start": 1876.84, "end": 1877.1, "word": " that", "probability": 0.93017578125}, {"start": 1877.1, "end": 1877.3, "word": " your", "probability": 0.8154296875}, {"start": 1877.3, "end": 1877.7, "word": " result", "probability": 0.93408203125}, {"start": 1877.7, "end": 1879.54, "word": " is", "probability": 0.8896484375}, {"start": 1879.54, "end": 1879.8, "word": " greater", "probability": 0.8876953125}, {"start": 1879.8, "end": 1880.04, "word": " than", "probability": 0.93701171875}, {"start": 1880.04, "end": 1880.26, "word": " 0", "probability": 0.6845703125}, {"start": 1880.26, "end": 1880.44, "word": ".5.", "probability": 0.989990234375}, {"start": 1882.32, "end": 1882.96, "word": " Questions?", "probability": 0.79248046875}, {"start": 1885.48, "end": 1886.12, "word": " Next,", "probability": 0.88134765625}, {"start": 1887.16, "end": 1887.66, "word": " suppose", "probability": 0.9072265625}, {"start": 1887.66, "end": 1888.46, "word": " we", "probability": 0.94287109375}, {"start": 1888.46, "end": 1888.74, "word": " are", "probability": 0.94091796875}, {"start": 1888.74, "end": 1889.3, "word": " interested", "probability": 0.84814453125}, {"start": 1889.3, "end": 1890.12, "word": " of", "probability": 0.58154296875}, {"start": 1890.12, "end": 1890.5, "word": " probability", "probability": 0.6279296875}, {"start": 1890.5, "end": 1890.78, "word": " of", "probability": 0.92822265625}, {"start": 1890.78, "end": 1891.1, "word": " X", "probability": 0.88623046875}, {"start": 1891.1, "end": 1892.22, "word": " greater", "probability": 0.8798828125}, {"start": 1892.22, "end": 1892.54, "word": " than.", "probability": 0.947265625}], "temperature": 1.0}, {"id": 72, "seek": 191722, "start": 1893.14, "end": 1917.22, "text": " So that's how can we find normal upper tail probabilities. Again, the table we have gives the area to the left. In order to compute the area in the upper tail probabilities, I mean this area, since the normal distribution is symmetric and", "tokens": [407, 300, 311, 577, 393, 321, 915, 2710, 6597, 6838, 33783, 13, 3764, 11, 264, 3199, 321, 362, 2709, 264, 1859, 281, 264, 1411, 13, 682, 1668, 281, 14722, 264, 1859, 294, 264, 6597, 6838, 33783, 11, 286, 914, 341, 1859, 11, 1670, 264, 2710, 7316, 307, 32330, 293], "avg_logprob": -0.1706250062584877, "compression_ratio": 1.5620915032679739, "no_speech_prob": 0.0, "words": [{"start": 1893.14, "end": 1893.38, "word": " So", "probability": 0.9052734375}, {"start": 1893.38, "end": 1893.78, "word": " that's", "probability": 0.89697265625}, {"start": 1893.78, "end": 1893.96, "word": " how", "probability": 0.88720703125}, {"start": 1893.96, "end": 1894.18, "word": " can", "probability": 0.81884765625}, {"start": 1894.18, "end": 1894.34, "word": " we", "probability": 0.9326171875}, {"start": 1894.34, "end": 1894.68, "word": " find", "probability": 0.88134765625}, {"start": 1894.68, "end": 1895.38, "word": " normal", "probability": 0.82666015625}, {"start": 1895.38, "end": 1895.78, "word": " upper", "probability": 0.75048828125}, {"start": 1895.78, "end": 1896.12, "word": " tail", "probability": 0.475830078125}, {"start": 1896.12, "end": 1896.66, "word": " probabilities.", "probability": 0.8740234375}, {"start": 1898.24, "end": 1898.8, "word": " Again,", "probability": 0.9287109375}, {"start": 1900.88, "end": 1901.1, "word": " the", "probability": 0.8330078125}, {"start": 1901.1, "end": 1901.42, "word": " table", "probability": 0.86328125}, {"start": 1901.42, "end": 1901.64, "word": " we", "probability": 0.91845703125}, {"start": 1901.64, "end": 1901.98, "word": " have", "probability": 0.94580078125}, {"start": 1901.98, "end": 1902.52, "word": " gives", "probability": 0.7421875}, {"start": 1902.52, "end": 1902.66, "word": " the", "probability": 0.669921875}, {"start": 1902.66, "end": 1902.8, "word": " area", "probability": 0.85107421875}, {"start": 1902.8, "end": 1902.98, "word": " to", "probability": 0.962890625}, {"start": 1902.98, "end": 1903.1, "word": " the", "probability": 0.9140625}, {"start": 1903.1, "end": 1903.32, "word": " left.", "probability": 0.953125}, {"start": 1904.9, "end": 1905.42, "word": " In", "probability": 0.9677734375}, {"start": 1905.42, "end": 1905.62, "word": " order", "probability": 0.92138671875}, {"start": 1905.62, "end": 1905.96, "word": " to", "probability": 0.9716796875}, {"start": 1905.96, "end": 1906.58, "word": " compute", "probability": 0.91015625}, {"start": 1906.58, "end": 1906.78, "word": " the", "probability": 0.91357421875}, {"start": 1906.78, "end": 1907.1, "word": " area", "probability": 0.89697265625}, {"start": 1907.1, "end": 1907.8, "word": " in", "probability": 0.9130859375}, {"start": 1907.8, "end": 1907.96, "word": " the", "probability": 0.9267578125}, {"start": 1907.96, "end": 1908.24, "word": " upper", "probability": 0.80908203125}, {"start": 1908.24, "end": 1908.6, "word": " tail", "probability": 0.75146484375}, {"start": 1908.6, "end": 1909.4, "word": " probabilities,", "probability": 0.91357421875}, {"start": 1910.44, "end": 1910.78, "word": " I", "probability": 0.9833984375}, {"start": 1910.78, "end": 1910.88, "word": " mean", "probability": 0.70703125}, {"start": 1910.88, "end": 1911.14, "word": " this", "probability": 0.91162109375}, {"start": 1911.14, "end": 1911.48, "word": " area,", "probability": 0.88671875}, {"start": 1912.86, "end": 1913.28, "word": " since", "probability": 0.7138671875}, {"start": 1913.28, "end": 1913.48, "word": " the", "probability": 0.91064453125}, {"start": 1913.48, "end": 1913.76, "word": " normal", "probability": 0.87060546875}, {"start": 1913.76, "end": 1914.4, "word": " distribution", "probability": 0.8603515625}, {"start": 1914.4, "end": 1915.62, "word": " is", "probability": 0.92041015625}, {"start": 1915.62, "end": 1916.02, "word": " symmetric", "probability": 0.8173828125}, {"start": 1916.02, "end": 1917.22, "word": " and", "probability": 0.441162109375}], "temperature": 1.0}, {"id": 73, "seek": 194382, "start": 1917.74, "end": 1943.82, "text": " The total area underneath the curve is 1. So the probability of X greater than 8.6 is the same as 1 minus B of X less than 8.6. So first step, just find the probability we just have and subtract from 1. So B of X greater than 8.6, the same as B of Z greater than 0.12.", "tokens": [440, 3217, 1859, 7223, 264, 7605, 307, 502, 13, 407, 264, 8482, 295, 1783, 5044, 813, 1649, 13, 21, 307, 264, 912, 382, 502, 3175, 363, 295, 1783, 1570, 813, 1649, 13, 21, 13, 407, 700, 1823, 11, 445, 915, 264, 8482, 321, 445, 362, 293, 16390, 490, 502, 13, 407, 363, 295, 1783, 5044, 813, 1649, 13, 21, 11, 264, 912, 382, 363, 295, 1176, 5044, 813, 1958, 13, 4762, 13], "avg_logprob": -0.14629708577508796, "compression_ratio": 1.7025316455696202, "no_speech_prob": 0.0, "words": [{"start": 1917.74, "end": 1918.0, "word": " The", "probability": 0.572265625}, {"start": 1918.0, "end": 1918.34, "word": " total", "probability": 0.8720703125}, {"start": 1918.34, "end": 1918.84, "word": " area", "probability": 0.89990234375}, {"start": 1918.84, "end": 1919.66, "word": " underneath", "probability": 0.90087890625}, {"start": 1919.66, "end": 1919.96, "word": " the", "probability": 0.9072265625}, {"start": 1919.96, "end": 1920.16, "word": " curve", "probability": 0.912109375}, {"start": 1920.16, "end": 1920.3, "word": " is", "probability": 0.94091796875}, {"start": 1920.3, "end": 1920.56, "word": " 1.", "probability": 0.60595703125}, {"start": 1921.36, "end": 1921.78, "word": " So", "probability": 0.86083984375}, {"start": 1921.78, "end": 1922.0, "word": " the", "probability": 0.6611328125}, {"start": 1922.0, "end": 1922.42, "word": " probability", "probability": 0.93115234375}, {"start": 1922.42, "end": 1922.74, "word": " of", "probability": 0.95166015625}, {"start": 1922.74, "end": 1922.98, "word": " X", "probability": 0.78369140625}, {"start": 1922.98, "end": 1923.34, "word": " greater", "probability": 0.80126953125}, {"start": 1923.34, "end": 1923.66, "word": " than", "probability": 0.95263671875}, {"start": 1923.66, "end": 1923.9, "word": " 8", "probability": 0.935546875}, {"start": 1923.9, "end": 1924.44, "word": ".6", "probability": 0.990966796875}, {"start": 1924.44, "end": 1924.68, "word": " is", "probability": 0.9287109375}, {"start": 1924.68, "end": 1924.88, "word": " the", "probability": 0.8935546875}, {"start": 1924.88, "end": 1925.12, "word": " same", "probability": 0.91162109375}, {"start": 1925.12, "end": 1925.56, "word": " as", "probability": 0.9619140625}, {"start": 1925.56, "end": 1927.04, "word": " 1", "probability": 0.83935546875}, {"start": 1927.04, "end": 1927.68, "word": " minus", "probability": 0.90869140625}, {"start": 1927.68, "end": 1928.2, "word": " B", "probability": 0.449951171875}, {"start": 1928.2, "end": 1928.36, "word": " of", "probability": 0.92236328125}, {"start": 1928.36, "end": 1928.66, "word": " X", "probability": 0.96240234375}, {"start": 1928.66, "end": 1929.2, "word": " less", "probability": 0.91455078125}, {"start": 1929.2, "end": 1929.5, "word": " than", "probability": 0.9404296875}, {"start": 1929.5, "end": 1929.78, "word": " 8", "probability": 0.98583984375}, {"start": 1929.78, "end": 1930.28, "word": ".6.", "probability": 0.99755859375}, {"start": 1931.1, "end": 1931.38, "word": " So", "probability": 0.92529296875}, {"start": 1931.38, "end": 1931.64, "word": " first", "probability": 0.73681640625}, {"start": 1931.64, "end": 1931.86, "word": " step,", "probability": 0.92333984375}, {"start": 1931.96, "end": 1932.16, "word": " just", "probability": 0.8544921875}, {"start": 1932.16, "end": 1932.52, "word": " find", "probability": 0.876953125}, {"start": 1932.52, "end": 1932.74, "word": " the", "probability": 0.912109375}, {"start": 1932.74, "end": 1933.18, "word": " probability", "probability": 0.9541015625}, {"start": 1933.18, "end": 1933.46, "word": " we", "probability": 0.85302734375}, {"start": 1933.46, "end": 1933.9, "word": " just", "probability": 0.88525390625}, {"start": 1933.9, "end": 1935.94, "word": " have", "probability": 0.9248046875}, {"start": 1935.94, "end": 1937.02, "word": " and", "probability": 0.72900390625}, {"start": 1937.02, "end": 1937.38, "word": " subtract", "probability": 0.865234375}, {"start": 1937.38, "end": 1937.76, "word": " from", "probability": 0.884765625}, {"start": 1937.76, "end": 1938.0, "word": " 1.", "probability": 0.81201171875}, {"start": 1938.74, "end": 1939.24, "word": " So", "probability": 0.94921875}, {"start": 1939.24, "end": 1939.44, "word": " B", "probability": 0.86181640625}, {"start": 1939.44, "end": 1939.56, "word": " of", "probability": 0.96826171875}, {"start": 1939.56, "end": 1939.82, "word": " X", "probability": 0.99365234375}, {"start": 1939.82, "end": 1940.26, "word": " greater", "probability": 0.71875}, {"start": 1940.26, "end": 1940.54, "word": " than", "probability": 0.94482421875}, {"start": 1940.54, "end": 1940.74, "word": " 8", "probability": 0.9912109375}, {"start": 1940.74, "end": 1941.34, "word": ".6,", "probability": 0.9990234375}, {"start": 1941.48, "end": 1941.68, "word": " the", "probability": 0.7939453125}, {"start": 1941.68, "end": 1941.84, "word": " same", "probability": 0.9130859375}, {"start": 1941.84, "end": 1942.06, "word": " as", "probability": 0.95849609375}, {"start": 1942.06, "end": 1942.22, "word": " B", "probability": 0.98193359375}, {"start": 1942.22, "end": 1942.38, "word": " of", "probability": 0.9716796875}, {"start": 1942.38, "end": 1942.52, "word": " Z", "probability": 0.9833984375}, {"start": 1942.52, "end": 1942.9, "word": " greater", "probability": 0.90966796875}, {"start": 1942.9, "end": 1943.18, "word": " than", "probability": 0.935546875}, {"start": 1943.18, "end": 1943.34, "word": " 0", "probability": 0.81982421875}, {"start": 1943.34, "end": 1943.82, "word": ".12.", "probability": 0.996337890625}], "temperature": 1.0}, {"id": 74, "seek": 197443, "start": 1945.23, "end": 1974.43, "text": " which is the same as 1 minus B of Z less than 0.5. It's 1 minus the result we got from previous one. So this value 1 minus this value gives 0.452. So for the other tail probability, just subtract 1 from the lower tail probabilities. Now let's see how can we find", "tokens": [597, 307, 264, 912, 382, 502, 3175, 363, 295, 1176, 1570, 813, 1958, 13, 20, 13, 467, 311, 502, 3175, 264, 1874, 321, 658, 490, 3894, 472, 13, 407, 341, 2158, 502, 3175, 341, 2158, 2709, 1958, 13, 8465, 17, 13, 407, 337, 264, 661, 6838, 8482, 11, 445, 16390, 502, 490, 264, 3126, 6838, 33783, 13, 823, 718, 311, 536, 577, 393, 321, 915], "avg_logprob": -0.21969697127739587, "compression_ratio": 1.5202312138728324, "no_speech_prob": 0.0, "words": [{"start": 1945.23, "end": 1945.67, "word": " which", "probability": 0.51123046875}, {"start": 1945.67, "end": 1945.79, "word": " is", "probability": 0.94970703125}, {"start": 1945.79, "end": 1945.93, "word": " the", "probability": 0.87353515625}, {"start": 1945.93, "end": 1946.11, "word": " same", "probability": 0.91796875}, {"start": 1946.11, "end": 1946.35, "word": " as", "probability": 0.958984375}, {"start": 1946.35, "end": 1946.61, "word": " 1", "probability": 0.5029296875}, {"start": 1946.61, "end": 1947.11, "word": " minus", "probability": 0.912109375}, {"start": 1947.11, "end": 1947.79, "word": " B", "probability": 0.53759765625}, {"start": 1947.79, "end": 1947.97, "word": " of", "probability": 0.9228515625}, {"start": 1947.97, "end": 1948.13, "word": " Z", "probability": 0.7763671875}, {"start": 1948.13, "end": 1948.37, "word": " less", "probability": 0.86083984375}, {"start": 1948.37, "end": 1948.55, "word": " than", "probability": 0.935546875}, {"start": 1948.55, "end": 1948.73, "word": " 0", "probability": 0.5986328125}, {"start": 1948.73, "end": 1949.09, "word": ".5.", "probability": 0.93603515625}, {"start": 1949.53, "end": 1949.79, "word": " It's", "probability": 0.6971435546875}, {"start": 1949.79, "end": 1950.01, "word": " 1", "probability": 0.775390625}, {"start": 1950.01, "end": 1950.37, "word": " minus", "probability": 0.98681640625}, {"start": 1950.37, "end": 1950.73, "word": " the", "probability": 0.89892578125}, {"start": 1950.73, "end": 1951.25, "word": " result", "probability": 0.94384765625}, {"start": 1951.25, "end": 1951.61, "word": " we", "probability": 0.85693359375}, {"start": 1951.61, "end": 1951.85, "word": " got", "probability": 0.8837890625}, {"start": 1951.85, "end": 1952.65, "word": " from", "probability": 0.87255859375}, {"start": 1952.65, "end": 1953.87, "word": " previous", "probability": 0.407470703125}, {"start": 1953.87, "end": 1954.31, "word": " one.", "probability": 0.87890625}, {"start": 1955.07, "end": 1955.35, "word": " So", "probability": 0.9404296875}, {"start": 1955.35, "end": 1955.61, "word": " this", "probability": 0.83251953125}, {"start": 1955.61, "end": 1956.23, "word": " value", "probability": 0.8408203125}, {"start": 1956.23, "end": 1956.77, "word": " 1", "probability": 0.48046875}, {"start": 1956.77, "end": 1957.17, "word": " minus", "probability": 0.98046875}, {"start": 1957.17, "end": 1957.39, "word": " this", "probability": 0.91552734375}, {"start": 1957.39, "end": 1957.65, "word": " value", "probability": 0.96630859375}, {"start": 1957.65, "end": 1957.95, "word": " gives", "probability": 0.779296875}, {"start": 1957.95, "end": 1958.29, "word": " 0", "probability": 0.908203125}, {"start": 1958.29, "end": 1959.41, "word": ".452.", "probability": 0.76611328125}, {"start": 1961.61, "end": 1962.25, "word": " So", "probability": 0.90625}, {"start": 1962.25, "end": 1962.91, "word": " for", "probability": 0.8095703125}, {"start": 1962.91, "end": 1963.05, "word": " the", "probability": 0.91650390625}, {"start": 1963.05, "end": 1963.25, "word": " other", "probability": 0.45751953125}, {"start": 1963.25, "end": 1963.47, "word": " tail", "probability": 0.74365234375}, {"start": 1963.47, "end": 1963.79, "word": " probability,", "probability": 0.78955078125}, {"start": 1964.11, "end": 1964.33, "word": " just", "probability": 0.90380859375}, {"start": 1964.33, "end": 1964.77, "word": " subtract", "probability": 0.8671875}, {"start": 1964.77, "end": 1965.09, "word": " 1", "probability": 0.6337890625}, {"start": 1965.09, "end": 1965.39, "word": " from", "probability": 0.890625}, {"start": 1965.39, "end": 1965.69, "word": " the", "probability": 0.912109375}, {"start": 1965.69, "end": 1966.03, "word": " lower", "probability": 0.88037109375}, {"start": 1966.03, "end": 1966.43, "word": " tail", "probability": 0.85498046875}, {"start": 1966.43, "end": 1967.69, "word": " probabilities.", "probability": 0.8369140625}, {"start": 1971.93, "end": 1972.57, "word": " Now", "probability": 0.9443359375}, {"start": 1972.57, "end": 1973.23, "word": " let's", "probability": 0.760986328125}, {"start": 1973.23, "end": 1973.45, "word": " see", "probability": 0.87158203125}, {"start": 1973.45, "end": 1973.67, "word": " how", "probability": 0.8251953125}, {"start": 1973.67, "end": 1973.89, "word": " can", "probability": 0.88623046875}, {"start": 1973.89, "end": 1974.03, "word": " we", "probability": 0.943359375}, {"start": 1974.03, "end": 1974.43, "word": " find", "probability": 0.89599609375}], "temperature": 1.0}, {"id": 75, "seek": 198997, "start": 1974.83, "end": 1989.97, "text": " Normal probability between two values. I mean if X, for example, for the same data we have, suppose X between 8 and 8.6. Now what's the area between these two?", "tokens": [21277, 8482, 1296, 732, 4190, 13, 286, 914, 498, 1783, 11, 337, 1365, 11, 337, 264, 912, 1412, 321, 362, 11, 7297, 1783, 1296, 1649, 293, 1649, 13, 21, 13, 823, 437, 311, 264, 1859, 1296, 613, 732, 30], "avg_logprob": -0.19511719085276127, "compression_ratio": 1.3114754098360655, "no_speech_prob": 0.0, "words": [{"start": 1974.83, "end": 1975.33, "word": " Normal", "probability": 0.41015625}, {"start": 1975.33, "end": 1975.75, "word": " probability", "probability": 0.939453125}, {"start": 1975.75, "end": 1976.73, "word": " between", "probability": 0.8818359375}, {"start": 1976.73, "end": 1976.99, "word": " two", "probability": 0.91845703125}, {"start": 1976.99, "end": 1977.39, "word": " values.", "probability": 0.97119140625}, {"start": 1979.11, "end": 1979.17, "word": " I", "probability": 0.88818359375}, {"start": 1979.17, "end": 1979.35, "word": " mean", "probability": 0.96923828125}, {"start": 1979.35, "end": 1979.55, "word": " if", "probability": 0.59033203125}, {"start": 1979.55, "end": 1980.07, "word": " X,", "probability": 0.60498046875}, {"start": 1980.77, "end": 1981.09, "word": " for", "probability": 0.95556640625}, {"start": 1981.09, "end": 1981.47, "word": " example,", "probability": 0.953125}, {"start": 1981.63, "end": 1981.75, "word": " for", "probability": 0.92333984375}, {"start": 1981.75, "end": 1981.93, "word": " the", "probability": 0.921875}, {"start": 1981.93, "end": 1982.25, "word": " same", "probability": 0.900390625}, {"start": 1982.25, "end": 1982.65, "word": " data", "probability": 0.95654296875}, {"start": 1982.65, "end": 1982.81, "word": " we", "probability": 0.9208984375}, {"start": 1982.81, "end": 1983.09, "word": " have,", "probability": 0.9453125}, {"start": 1983.85, "end": 1984.19, "word": " suppose", "probability": 0.884765625}, {"start": 1984.19, "end": 1984.65, "word": " X", "probability": 0.96533203125}, {"start": 1984.65, "end": 1985.77, "word": " between", "probability": 0.8212890625}, {"start": 1985.77, "end": 1986.11, "word": " 8", "probability": 0.59814453125}, {"start": 1986.11, "end": 1986.39, "word": " and", "probability": 0.93310546875}, {"start": 1986.39, "end": 1986.61, "word": " 8", "probability": 0.9951171875}, {"start": 1986.61, "end": 1987.21, "word": ".6.", "probability": 0.9892578125}, {"start": 1988.27, "end": 1988.61, "word": " Now", "probability": 0.9130859375}, {"start": 1988.61, "end": 1988.91, "word": " what's", "probability": 0.789794921875}, {"start": 1988.91, "end": 1989.05, "word": " the", "probability": 0.92041015625}, {"start": 1989.05, "end": 1989.31, "word": " area", "probability": 0.93212890625}, {"start": 1989.31, "end": 1989.61, "word": " between", "probability": 0.86865234375}, {"start": 1989.61, "end": 1989.83, "word": " these", "probability": 0.75927734375}, {"start": 1989.83, "end": 1989.97, "word": " two?", "probability": 0.6357421875}], "temperature": 1.0}, {"id": 76, "seek": 202098, "start": 1992.68, "end": 2020.98, "text": " Here we have two values of x, x is 8 and x is 8.6. Exactly, so below 8.6 minus below 8 and below 8 is 1 half. So the probability of x between 8 and", "tokens": [1692, 321, 362, 732, 4190, 295, 2031, 11, 2031, 307, 1649, 293, 2031, 307, 1649, 13, 21, 13, 7587, 11, 370, 2507, 1649, 13, 21, 3175, 2507, 1649, 293, 2507, 1649, 307, 502, 1922, 13, 407, 264, 8482, 295, 2031, 1296, 1649, 293], "avg_logprob": -0.3110795481638475, "compression_ratio": 1.2982456140350878, "no_speech_prob": 0.0, "words": [{"start": 1992.68, "end": 1993.22, "word": " Here", "probability": 0.434326171875}, {"start": 1993.22, "end": 1993.36, "word": " we", "probability": 0.767578125}, {"start": 1993.36, "end": 1993.48, "word": " have", "probability": 0.943359375}, {"start": 1993.48, "end": 1993.66, "word": " two", "probability": 0.82568359375}, {"start": 1993.66, "end": 1993.92, "word": " values", "probability": 0.94970703125}, {"start": 1993.92, "end": 1994.08, "word": " of", "probability": 0.94140625}, {"start": 1994.08, "end": 1994.3, "word": " x,", "probability": 0.48046875}, {"start": 1994.48, "end": 1994.64, "word": " x", "probability": 0.96435546875}, {"start": 1994.64, "end": 1994.76, "word": " is", "probability": 0.77880859375}, {"start": 1994.76, "end": 1995.08, "word": " 8", "probability": 0.47509765625}, {"start": 1995.08, "end": 1996.06, "word": " and", "probability": 0.72021484375}, {"start": 1996.06, "end": 1996.28, "word": " x", "probability": 0.9384765625}, {"start": 1996.28, "end": 1996.44, "word": " is", "probability": 0.94482421875}, {"start": 1996.44, "end": 1996.64, "word": " 8", "probability": 0.96240234375}, {"start": 1996.64, "end": 1997.22, "word": ".6.", "probability": 0.96875}, {"start": 2004.28, "end": 2004.88, "word": " Exactly,", "probability": 0.1279296875}, {"start": 2007.16, "end": 2007.26, "word": " so", "probability": 0.84423828125}, {"start": 2007.26, "end": 2007.66, "word": " below", "probability": 0.861328125}, {"start": 2007.66, "end": 2009.34, "word": " 8", "probability": 0.853515625}, {"start": 2009.34, "end": 2009.98, "word": ".6", "probability": 0.995361328125}, {"start": 2009.98, "end": 2011.26, "word": " minus", "probability": 0.71533203125}, {"start": 2011.26, "end": 2012.0, "word": " below", "probability": 0.873046875}, {"start": 2012.0, "end": 2012.42, "word": " 8", "probability": 0.86767578125}, {"start": 2012.42, "end": 2012.98, "word": " and", "probability": 0.544921875}, {"start": 2012.98, "end": 2013.24, "word": " below", "probability": 0.8818359375}, {"start": 2013.24, "end": 2013.54, "word": " 8", "probability": 0.90771484375}, {"start": 2013.54, "end": 2013.78, "word": " is", "probability": 0.93212890625}, {"start": 2013.78, "end": 2014.02, "word": " 1", "probability": 0.389404296875}, {"start": 2014.02, "end": 2014.32, "word": " half.", "probability": 0.047454833984375}, {"start": 2014.88, "end": 2015.22, "word": " So", "probability": 0.919921875}, {"start": 2015.22, "end": 2015.42, "word": " the", "probability": 0.77685546875}, {"start": 2015.42, "end": 2015.74, "word": " probability", "probability": 0.9345703125}, {"start": 2015.74, "end": 2016.2, "word": " of", "probability": 0.9619140625}, {"start": 2016.2, "end": 2017.06, "word": " x", "probability": 0.86328125}, {"start": 2017.06, "end": 2017.6, "word": " between", "probability": 0.8984375}, {"start": 2017.6, "end": 2020.84, "word": " 8", "probability": 0.8046875}, {"start": 2020.84, "end": 2020.98, "word": " and", "probability": 0.66796875}], "temperature": 1.0}, {"id": 77, "seek": 204967, "start": 2021.08, "end": 2049.68, "text": " And 8.2 and 8.6. You can find z-score for the first value, which is zero. Also compute the z-score for the other value, which as we computed before, 0.12. Now this problem becomes z between zero and 0.5. So B of x.", "tokens": [400, 1649, 13, 17, 293, 1649, 13, 21, 13, 509, 393, 915, 710, 12, 4417, 418, 337, 264, 700, 2158, 11, 597, 307, 4018, 13, 2743, 14722, 264, 710, 12, 4417, 418, 337, 264, 661, 2158, 11, 597, 382, 321, 40610, 949, 11, 1958, 13, 4762, 13, 823, 341, 1154, 3643, 710, 1296, 4018, 293, 1958, 13, 20, 13, 407, 363, 295, 2031, 13], "avg_logprob": -0.23569711538461538, "compression_ratio": 1.4429530201342282, "no_speech_prob": 0.0, "words": [{"start": 2021.08, "end": 2021.36, "word": " And", "probability": 0.431884765625}, {"start": 2021.36, "end": 2021.62, "word": " 8", "probability": 0.912109375}, {"start": 2021.62, "end": 2022.12, "word": ".2", "probability": 0.97412109375}, {"start": 2022.12, "end": 2022.52, "word": " and", "probability": 0.7685546875}, {"start": 2022.52, "end": 2022.72, "word": " 8", "probability": 0.99462890625}, {"start": 2022.72, "end": 2023.3, "word": ".6.", "probability": 0.998291015625}, {"start": 2024.64, "end": 2025.2, "word": " You", "probability": 0.953125}, {"start": 2025.2, "end": 2025.48, "word": " can", "probability": 0.94921875}, {"start": 2025.48, "end": 2025.92, "word": " find", "probability": 0.87841796875}, {"start": 2025.92, "end": 2026.38, "word": " z", "probability": 0.2890625}, {"start": 2026.38, "end": 2026.74, "word": "-score", "probability": 0.730224609375}, {"start": 2026.74, "end": 2027.16, "word": " for", "probability": 0.93994140625}, {"start": 2027.16, "end": 2027.34, "word": " the", "probability": 0.9150390625}, {"start": 2027.34, "end": 2027.58, "word": " first", "probability": 0.87548828125}, {"start": 2027.58, "end": 2027.94, "word": " value,", "probability": 0.974609375}, {"start": 2028.32, "end": 2028.5, "word": " which", "probability": 0.943359375}, {"start": 2028.5, "end": 2028.62, "word": " is", "probability": 0.94873046875}, {"start": 2028.62, "end": 2028.86, "word": " zero.", "probability": 0.59521484375}, {"start": 2030.92, "end": 2031.56, "word": " Also", "probability": 0.92724609375}, {"start": 2031.56, "end": 2032.06, "word": " compute", "probability": 0.8232421875}, {"start": 2032.06, "end": 2032.28, "word": " the", "probability": 0.88037109375}, {"start": 2032.28, "end": 2032.48, "word": " z", "probability": 0.98583984375}, {"start": 2032.48, "end": 2032.8, "word": "-score", "probability": 0.9187825520833334}, {"start": 2032.8, "end": 2033.04, "word": " for", "probability": 0.94482421875}, {"start": 2033.04, "end": 2033.18, "word": " the", "probability": 0.9091796875}, {"start": 2033.18, "end": 2033.42, "word": " other", "probability": 0.88134765625}, {"start": 2033.42, "end": 2033.74, "word": " value,", "probability": 0.962890625}, {"start": 2033.84, "end": 2033.94, "word": " which", "probability": 0.943359375}, {"start": 2033.94, "end": 2034.12, "word": " as", "probability": 0.486328125}, {"start": 2034.12, "end": 2034.38, "word": " we", "probability": 0.96337890625}, {"start": 2034.38, "end": 2035.54, "word": " computed", "probability": 0.90869140625}, {"start": 2035.54, "end": 2035.9, "word": " before,", "probability": 0.873046875}, {"start": 2036.02, "end": 2036.26, "word": " 0", "probability": 0.451416015625}, {"start": 2036.26, "end": 2036.58, "word": ".12.", "probability": 0.990234375}, {"start": 2037.28, "end": 2037.56, "word": " Now", "probability": 0.93603515625}, {"start": 2037.56, "end": 2037.86, "word": " this", "probability": 0.7021484375}, {"start": 2037.86, "end": 2038.26, "word": " problem", "probability": 0.8544921875}, {"start": 2038.26, "end": 2038.98, "word": " becomes", "probability": 0.85546875}, {"start": 2038.98, "end": 2041.3, "word": " z", "probability": 0.87060546875}, {"start": 2041.3, "end": 2041.58, "word": " between", "probability": 0.8828125}, {"start": 2041.58, "end": 2042.1, "word": " zero", "probability": 0.6533203125}, {"start": 2042.1, "end": 2043.98, "word": " and", "probability": 0.89111328125}, {"start": 2043.98, "end": 2044.24, "word": " 0", "probability": 0.625}, {"start": 2044.24, "end": 2044.54, "word": ".5.", "probability": 0.826171875}, {"start": 2047.48, "end": 2047.9, "word": " So", "probability": 0.953125}, {"start": 2047.9, "end": 2049.18, "word": " B", "probability": 0.09246826171875}, {"start": 2049.18, "end": 2049.32, "word": " of", "probability": 0.6484375}, {"start": 2049.32, "end": 2049.68, "word": " x.", "probability": 0.56005859375}], "temperature": 1.0}, {"id": 78, "seek": 207900, "start": 2050.56, "end": 2079.0, "text": " Greater than 8 and smaller than 8.6 is the same as z between 0 and 0.12. Now this area equals b of z smaller than 0.12 minus the area below z which is 1.5. So again, b of z between 0 and 1.5 equal b of z small.", "tokens": [38410, 813, 1649, 293, 4356, 813, 1649, 13, 21, 307, 264, 912, 382, 710, 1296, 1958, 293, 1958, 13, 4762, 13, 823, 341, 1859, 6915, 272, 295, 710, 4356, 813, 1958, 13, 4762, 3175, 264, 1859, 2507, 710, 597, 307, 502, 13, 20, 13, 407, 797, 11, 272, 295, 710, 1296, 1958, 293, 502, 13, 20, 2681, 272, 295, 710, 1359, 13], "avg_logprob": -0.22743055129808093, "compression_ratio": 1.5746268656716418, "no_speech_prob": 0.0, "words": [{"start": 2050.56, "end": 2051.0, "word": " Greater", "probability": 0.269287109375}, {"start": 2051.0, "end": 2051.38, "word": " than", "probability": 0.9443359375}, {"start": 2051.38, "end": 2051.72, "word": " 8", "probability": 0.556640625}, {"start": 2051.72, "end": 2052.74, "word": " and", "probability": 0.5439453125}, {"start": 2052.74, "end": 2053.12, "word": " smaller", "probability": 0.85888671875}, {"start": 2053.12, "end": 2053.36, "word": " than", "probability": 0.943359375}, {"start": 2053.36, "end": 2053.52, "word": " 8", "probability": 0.98388671875}, {"start": 2053.52, "end": 2054.14, "word": ".6", "probability": 0.986572265625}, {"start": 2054.14, "end": 2055.12, "word": " is", "probability": 0.767578125}, {"start": 2055.12, "end": 2055.28, "word": " the", "probability": 0.91650390625}, {"start": 2055.28, "end": 2055.52, "word": " same", "probability": 0.91650390625}, {"start": 2055.52, "end": 2055.9, "word": " as", "probability": 0.94921875}, {"start": 2055.9, "end": 2056.24, "word": " z", "probability": 0.65283203125}, {"start": 2056.24, "end": 2056.58, "word": " between", "probability": 0.89990234375}, {"start": 2056.58, "end": 2057.04, "word": " 0", "probability": 0.79541015625}, {"start": 2057.04, "end": 2057.74, "word": " and", "probability": 0.92626953125}, {"start": 2057.74, "end": 2058.0, "word": " 0", "probability": 0.52099609375}, {"start": 2058.0, "end": 2058.26, "word": ".12.", "probability": 0.830322265625}, {"start": 2059.26, "end": 2059.94, "word": " Now", "probability": 0.943359375}, {"start": 2059.94, "end": 2060.34, "word": " this", "probability": 0.64013671875}, {"start": 2060.34, "end": 2060.8, "word": " area", "probability": 0.900390625}, {"start": 2060.8, "end": 2061.6, "word": " equals", "probability": 0.90234375}, {"start": 2061.6, "end": 2062.08, "word": " b", "probability": 0.427734375}, {"start": 2062.08, "end": 2062.24, "word": " of", "probability": 0.951171875}, {"start": 2062.24, "end": 2062.38, "word": " z", "probability": 0.9755859375}, {"start": 2062.38, "end": 2062.78, "word": " smaller", "probability": 0.748046875}, {"start": 2062.78, "end": 2063.06, "word": " than", "probability": 0.9365234375}, {"start": 2063.06, "end": 2063.32, "word": " 0", "probability": 0.880859375}, {"start": 2063.32, "end": 2063.66, "word": ".12", "probability": 0.98388671875}, {"start": 2063.66, "end": 2064.72, "word": " minus", "probability": 0.931640625}, {"start": 2064.72, "end": 2065.1, "word": " the", "probability": 0.9091796875}, {"start": 2065.1, "end": 2065.32, "word": " area", "probability": 0.91455078125}, {"start": 2065.32, "end": 2065.56, "word": " below", "probability": 0.86767578125}, {"start": 2065.56, "end": 2065.8, "word": " z", "probability": 0.9306640625}, {"start": 2065.8, "end": 2065.98, "word": " which", "probability": 0.64306640625}, {"start": 2065.98, "end": 2066.08, "word": " is", "probability": 0.9501953125}, {"start": 2066.08, "end": 2066.28, "word": " 1", "probability": 0.58740234375}, {"start": 2066.28, "end": 2066.52, "word": ".5.", "probability": 0.336212158203125}, {"start": 2071.1, "end": 2071.78, "word": " So", "probability": 0.955078125}, {"start": 2071.78, "end": 2072.12, "word": " again,", "probability": 0.90869140625}, {"start": 2073.28, "end": 2074.24, "word": " b", "probability": 0.81396484375}, {"start": 2074.24, "end": 2074.4, "word": " of", "probability": 0.9677734375}, {"start": 2074.4, "end": 2074.76, "word": " z", "probability": 0.994140625}, {"start": 2074.76, "end": 2075.16, "word": " between", "probability": 0.87060546875}, {"start": 2075.16, "end": 2075.46, "word": " 0", "probability": 0.91162109375}, {"start": 2075.46, "end": 2075.58, "word": " and", "probability": 0.93359375}, {"start": 2075.58, "end": 2075.74, "word": " 1", "probability": 0.7900390625}, {"start": 2075.74, "end": 2075.96, "word": ".5", "probability": 0.8779296875}, {"start": 2075.96, "end": 2076.38, "word": " equal", "probability": 0.455078125}, {"start": 2076.38, "end": 2076.92, "word": " b", "probability": 0.91845703125}, {"start": 2076.92, "end": 2077.1, "word": " of", "probability": 0.97607421875}, {"start": 2077.1, "end": 2077.38, "word": " z", "probability": 0.99560546875}, {"start": 2077.38, "end": 2079.0, "word": " small.", "probability": 0.55029296875}], "temperature": 1.0}, {"id": 79, "seek": 210838, "start": 2079.74, "end": 2108.38, "text": " larger than 0.12 minus b of z less than zero. Now, b of z less than 0.12 gives this result, 0.5478. The probability below zero is one-half because we know that the area to the left is zero, same as to the right is one-half. So the answer is going to be 0.478. So that's how can we compute the probabilities for lower 10 directly from the table.", "tokens": [4833, 813, 1958, 13, 4762, 3175, 272, 295, 710, 1570, 813, 4018, 13, 823, 11, 272, 295, 710, 1570, 813, 1958, 13, 4762, 2709, 341, 1874, 11, 1958, 13, 20, 14060, 23, 13, 440, 8482, 2507, 4018, 307, 472, 12, 25461, 570, 321, 458, 300, 264, 1859, 281, 264, 1411, 307, 4018, 11, 912, 382, 281, 264, 558, 307, 472, 12, 25461, 13, 407, 264, 1867, 307, 516, 281, 312, 1958, 13, 14060, 23, 13, 407, 300, 311, 577, 393, 321, 14722, 264, 33783, 337, 3126, 1266, 3838, 490, 264, 3199, 13], "avg_logprob": -0.24596773488547213, "compression_ratio": 1.6121495327102804, "no_speech_prob": 0.0, "words": [{"start": 2079.74, "end": 2080.12, "word": " larger", "probability": 0.31640625}, {"start": 2080.12, "end": 2080.38, "word": " than", "probability": 0.91064453125}, {"start": 2080.38, "end": 2080.54, "word": " 0", "probability": 0.587890625}, {"start": 2080.54, "end": 2080.94, "word": ".12", "probability": 0.98095703125}, {"start": 2080.94, "end": 2081.52, "word": " minus", "probability": 0.88623046875}, {"start": 2081.52, "end": 2082.24, "word": " b", "probability": 0.30322265625}, {"start": 2082.24, "end": 2082.38, "word": " of", "probability": 0.876953125}, {"start": 2082.38, "end": 2082.48, "word": " z", "probability": 0.91259765625}, {"start": 2082.48, "end": 2082.68, "word": " less", "probability": 0.89111328125}, {"start": 2082.68, "end": 2082.84, "word": " than", "probability": 0.92822265625}, {"start": 2082.84, "end": 2083.16, "word": " zero.", "probability": 0.3974609375}, {"start": 2083.84, "end": 2084.14, "word": " Now,", "probability": 0.9013671875}, {"start": 2084.28, "end": 2084.4, "word": " b", "probability": 0.91455078125}, {"start": 2084.4, "end": 2084.52, "word": " of", "probability": 0.96240234375}, {"start": 2084.52, "end": 2084.66, "word": " z", "probability": 0.98828125}, {"start": 2084.66, "end": 2084.88, "word": " less", "probability": 0.91748046875}, {"start": 2084.88, "end": 2085.08, "word": " than", "probability": 0.93505859375}, {"start": 2085.08, "end": 2085.3, "word": " 0", "probability": 0.9189453125}, {"start": 2085.3, "end": 2085.66, "word": ".12", "probability": 0.996826171875}, {"start": 2085.66, "end": 2086.18, "word": " gives", "probability": 0.884765625}, {"start": 2086.18, "end": 2086.52, "word": " this", "probability": 0.84130859375}, {"start": 2086.52, "end": 2086.96, "word": " result,", "probability": 0.91455078125}, {"start": 2089.38, "end": 2089.56, "word": " 0", "probability": 0.81201171875}, {"start": 2089.56, "end": 2090.82, "word": ".5478.", "probability": 0.8983154296875}, {"start": 2091.28, "end": 2091.56, "word": " The", "probability": 0.72216796875}, {"start": 2091.56, "end": 2092.06, "word": " probability", "probability": 0.953125}, {"start": 2092.06, "end": 2092.4, "word": " below", "probability": 0.88623046875}, {"start": 2092.4, "end": 2092.76, "word": " zero", "probability": 0.77978515625}, {"start": 2092.76, "end": 2092.9, "word": " is", "probability": 0.9033203125}, {"start": 2092.9, "end": 2093.06, "word": " one", "probability": 0.73779296875}, {"start": 2093.06, "end": 2093.3, "word": "-half", "probability": 0.736572265625}, {"start": 2093.3, "end": 2094.08, "word": " because", "probability": 0.462646484375}, {"start": 2094.08, "end": 2094.22, "word": " we", "probability": 0.90576171875}, {"start": 2094.22, "end": 2094.34, "word": " know", "probability": 0.884765625}, {"start": 2094.34, "end": 2094.64, "word": " that", "probability": 0.9140625}, {"start": 2094.64, "end": 2094.8, "word": " the", "probability": 0.75634765625}, {"start": 2094.8, "end": 2094.96, "word": " area", "probability": 0.3203125}, {"start": 2094.96, "end": 2095.46, "word": " to", "probability": 0.79833984375}, {"start": 2095.46, "end": 2095.6, "word": " the", "probability": 0.9208984375}, {"start": 2095.6, "end": 2095.88, "word": " left", "probability": 0.9462890625}, {"start": 2095.88, "end": 2096.16, "word": " is", "probability": 0.336181640625}, {"start": 2096.16, "end": 2096.48, "word": " zero,", "probability": 0.88623046875}, {"start": 2096.82, "end": 2097.14, "word": " same", "probability": 0.7216796875}, {"start": 2097.14, "end": 2097.36, "word": " as", "probability": 0.94580078125}, {"start": 2097.36, "end": 2097.56, "word": " to", "probability": 0.468994140625}, {"start": 2097.56, "end": 2097.74, "word": " the", "probability": 0.90185546875}, {"start": 2097.74, "end": 2097.94, "word": " right", "probability": 0.91455078125}, {"start": 2097.94, "end": 2098.14, "word": " is", "probability": 0.7841796875}, {"start": 2098.14, "end": 2098.3, "word": " one", "probability": 0.923828125}, {"start": 2098.3, "end": 2098.52, "word": "-half.", "probability": 0.94384765625}, {"start": 2099.02, "end": 2099.2, "word": " So", "probability": 0.89697265625}, {"start": 2099.2, "end": 2099.32, "word": " the", "probability": 0.65380859375}, {"start": 2099.32, "end": 2099.52, "word": " answer", "probability": 0.95654296875}, {"start": 2099.52, "end": 2099.68, "word": " is", "probability": 0.4697265625}, {"start": 2099.68, "end": 2099.76, "word": " going", "probability": 0.82373046875}, {"start": 2099.76, "end": 2099.8, "word": " to", "probability": 0.97021484375}, {"start": 2099.8, "end": 2099.96, "word": " be", "probability": 0.94970703125}, {"start": 2099.96, "end": 2100.36, "word": " 0", "probability": 0.8271484375}, {"start": 2100.36, "end": 2101.96, "word": ".478.", "probability": 0.8253580729166666}, {"start": 2102.96, "end": 2103.5, "word": " So", "probability": 0.93212890625}, {"start": 2103.5, "end": 2103.78, "word": " that's", "probability": 0.885009765625}, {"start": 2103.78, "end": 2103.9, "word": " how", "probability": 0.9326171875}, {"start": 2103.9, "end": 2104.08, "word": " can", "probability": 0.68896484375}, {"start": 2104.08, "end": 2104.24, "word": " we", "probability": 0.94921875}, {"start": 2104.24, "end": 2104.68, "word": " compute", "probability": 0.9033203125}, {"start": 2104.68, "end": 2105.28, "word": " the", "probability": 0.88037109375}, {"start": 2105.28, "end": 2105.68, "word": " probabilities", "probability": 0.8955078125}, {"start": 2105.68, "end": 2106.0, "word": " for", "probability": 0.93115234375}, {"start": 2106.0, "end": 2106.46, "word": " lower", "probability": 0.77685546875}, {"start": 2106.46, "end": 2106.84, "word": " 10", "probability": 0.243408203125}, {"start": 2106.84, "end": 2107.54, "word": " directly", "probability": 0.849609375}, {"start": 2107.54, "end": 2107.96, "word": " from", "probability": 0.880859375}, {"start": 2107.96, "end": 2108.14, "word": " the", "probability": 0.92529296875}, {"start": 2108.14, "end": 2108.38, "word": " table.", "probability": 0.8408203125}], "temperature": 1.0}, {"id": 80, "seek": 213733, "start": 2109.19, "end": 2137.33, "text": " upper tail is just one minus lower tail and between two values just subtracts the larger one minus smaller one because he was subtracted bz less than point one minus bz less than or equal to zero that will give the normal probability another example suppose we are looking for", "tokens": [6597, 6838, 307, 445, 472, 3175, 3126, 6838, 293, 1296, 732, 4190, 445, 16390, 82, 264, 4833, 472, 3175, 4356, 472, 570, 415, 390, 16390, 292, 272, 89, 1570, 813, 935, 472, 3175, 272, 89, 1570, 813, 420, 2681, 281, 4018, 300, 486, 976, 264, 2710, 8482, 1071, 1365, 7297, 321, 366, 1237, 337], "avg_logprob": -0.1940340968695554, "compression_ratio": 1.7098765432098766, "no_speech_prob": 0.0, "words": [{"start": 2109.19, "end": 2109.57, "word": " upper", "probability": 0.18408203125}, {"start": 2109.57, "end": 2109.97, "word": " tail", "probability": 0.88427734375}, {"start": 2109.97, "end": 2110.89, "word": " is", "probability": 0.8232421875}, {"start": 2110.89, "end": 2111.11, "word": " just", "probability": 0.90576171875}, {"start": 2111.11, "end": 2111.35, "word": " one", "probability": 0.66455078125}, {"start": 2111.35, "end": 2111.81, "word": " minus", "probability": 0.98388671875}, {"start": 2111.81, "end": 2112.23, "word": " lower", "probability": 0.8701171875}, {"start": 2112.23, "end": 2112.59, "word": " tail", "probability": 0.875}, {"start": 2112.59, "end": 2114.09, "word": " and", "probability": 0.78955078125}, {"start": 2114.09, "end": 2114.97, "word": " between", "probability": 0.89501953125}, {"start": 2114.97, "end": 2115.23, "word": " two", "probability": 0.93115234375}, {"start": 2115.23, "end": 2115.75, "word": " values", "probability": 0.96728515625}, {"start": 2115.75, "end": 2116.85, "word": " just", "probability": 0.7734375}, {"start": 2116.85, "end": 2118.23, "word": " subtracts", "probability": 0.80615234375}, {"start": 2118.23, "end": 2118.99, "word": " the", "probability": 0.89306640625}, {"start": 2118.99, "end": 2119.39, "word": " larger", "probability": 0.9443359375}, {"start": 2119.39, "end": 2119.83, "word": " one", "probability": 0.921875}, {"start": 2119.83, "end": 2120.25, "word": " minus", "probability": 0.982421875}, {"start": 2120.25, "end": 2120.63, "word": " smaller", "probability": 0.82666015625}, {"start": 2120.63, "end": 2120.95, "word": " one", "probability": 0.927734375}, {"start": 2120.95, "end": 2121.37, "word": " because", "probability": 0.75244140625}, {"start": 2121.37, "end": 2121.53, "word": " he", "probability": 0.7373046875}, {"start": 2121.53, "end": 2121.97, "word": " was", "probability": 0.6240234375}, {"start": 2121.97, "end": 2122.97, "word": " subtracted", "probability": 0.8994140625}, {"start": 2122.97, "end": 2123.73, "word": " bz", "probability": 0.734619140625}, {"start": 2123.73, "end": 2124.19, "word": " less", "probability": 0.8642578125}, {"start": 2124.19, "end": 2124.33, "word": " than", "probability": 0.923828125}, {"start": 2124.33, "end": 2124.57, "word": " point", "probability": 0.467529296875}, {"start": 2124.57, "end": 2124.81, "word": " one", "probability": 0.465576171875}, {"start": 2124.81, "end": 2125.37, "word": " minus", "probability": 0.9658203125}, {"start": 2125.37, "end": 2126.05, "word": " bz", "probability": 0.923828125}, {"start": 2126.05, "end": 2126.31, "word": " less", "probability": 0.90966796875}, {"start": 2126.31, "end": 2126.51, "word": " than", "probability": 0.931640625}, {"start": 2126.51, "end": 2126.67, "word": " or", "probability": 0.912109375}, {"start": 2126.67, "end": 2126.85, "word": " equal", "probability": 0.927734375}, {"start": 2126.85, "end": 2127.47, "word": " to", "probability": 0.9462890625}, {"start": 2127.47, "end": 2127.73, "word": " zero", "probability": 0.81640625}, {"start": 2127.73, "end": 2128.55, "word": " that", "probability": 0.68798828125}, {"start": 2128.55, "end": 2128.73, "word": " will", "probability": 0.87548828125}, {"start": 2128.73, "end": 2128.95, "word": " give", "probability": 0.88525390625}, {"start": 2128.95, "end": 2129.13, "word": " the", "probability": 0.9130859375}, {"start": 2129.13, "end": 2129.43, "word": " normal", "probability": 0.8759765625}, {"start": 2129.43, "end": 2129.95, "word": " probability", "probability": 0.7958984375}, {"start": 2129.95, "end": 2132.07, "word": " another", "probability": 0.7431640625}, {"start": 2132.07, "end": 2132.57, "word": " example", "probability": 0.97021484375}, {"start": 2132.57, "end": 2134.55, "word": " suppose", "probability": 0.892578125}, {"start": 2134.55, "end": 2136.29, "word": " we", "probability": 0.95947265625}, {"start": 2136.29, "end": 2136.51, "word": " are", "probability": 0.9423828125}, {"start": 2136.51, "end": 2136.85, "word": " looking", "probability": 0.91357421875}, {"start": 2136.85, "end": 2137.33, "word": " for", "probability": 0.95458984375}], "temperature": 1.0}, {"id": 81, "seek": 216409, "start": 2138.75, "end": 2164.09, "text": " X between 7.4 and 8. Now, 7.4 lies below the mean. So here, this value, we have to compute the z-score for 7.4 and also the z-score for 8, which is zero. And that will give, again,", "tokens": [1783, 1296, 1614, 13, 19, 293, 1649, 13, 823, 11, 1614, 13, 19, 9134, 2507, 264, 914, 13, 407, 510, 11, 341, 2158, 11, 321, 362, 281, 14722, 264, 710, 12, 4417, 418, 337, 1614, 13, 19, 293, 611, 264, 710, 12, 4417, 418, 337, 1649, 11, 597, 307, 4018, 13, 400, 300, 486, 976, 11, 797, 11], "avg_logprob": -0.21438030014603826, "compression_ratio": 1.371212121212121, "no_speech_prob": 0.0, "words": [{"start": 2138.75, "end": 2139.33, "word": " X", "probability": 0.346923828125}, {"start": 2139.33, "end": 2140.01, "word": " between", "probability": 0.84716796875}, {"start": 2140.01, "end": 2142.37, "word": " 7", "probability": 0.759765625}, {"start": 2142.37, "end": 2143.03, "word": ".4", "probability": 0.984130859375}, {"start": 2143.03, "end": 2145.13, "word": " and", "probability": 0.89306640625}, {"start": 2145.13, "end": 2145.55, "word": " 8.", "probability": 0.75927734375}, {"start": 2146.57, "end": 2147.25, "word": " Now,", "probability": 0.84619140625}, {"start": 2147.31, "end": 2147.53, "word": " 7", "probability": 0.974609375}, {"start": 2147.53, "end": 2148.05, "word": ".4", "probability": 0.998779296875}, {"start": 2148.05, "end": 2148.59, "word": " lies", "probability": 0.9384765625}, {"start": 2148.59, "end": 2149.03, "word": " below", "probability": 0.8984375}, {"start": 2149.03, "end": 2149.35, "word": " the", "probability": 0.91748046875}, {"start": 2149.35, "end": 2149.53, "word": " mean.", "probability": 0.97314453125}, {"start": 2150.87, "end": 2151.43, "word": " So", "probability": 0.591796875}, {"start": 2151.43, "end": 2151.71, "word": " here,", "probability": 0.66162109375}, {"start": 2152.03, "end": 2152.99, "word": " this", "probability": 0.9228515625}, {"start": 2152.99, "end": 2153.41, "word": " value,", "probability": 0.9775390625}, {"start": 2153.85, "end": 2154.01, "word": " we", "probability": 0.94677734375}, {"start": 2154.01, "end": 2154.19, "word": " have", "probability": 0.9423828125}, {"start": 2154.19, "end": 2154.31, "word": " to", "probability": 0.96875}, {"start": 2154.31, "end": 2154.75, "word": " compute", "probability": 0.89306640625}, {"start": 2154.75, "end": 2155.27, "word": " the", "probability": 0.57861328125}, {"start": 2155.27, "end": 2155.41, "word": " z", "probability": 0.50341796875}, {"start": 2155.41, "end": 2155.75, "word": "-score", "probability": 0.8152669270833334}, {"start": 2155.75, "end": 2156.05, "word": " for", "probability": 0.93408203125}, {"start": 2156.05, "end": 2156.29, "word": " 7", "probability": 0.99169921875}, {"start": 2156.29, "end": 2156.81, "word": ".4", "probability": 0.9990234375}, {"start": 2156.81, "end": 2158.23, "word": " and", "probability": 0.4248046875}, {"start": 2158.23, "end": 2158.71, "word": " also", "probability": 0.87451171875}, {"start": 2158.71, "end": 2158.97, "word": " the", "probability": 0.82470703125}, {"start": 2158.97, "end": 2159.15, "word": " z", "probability": 0.990234375}, {"start": 2159.15, "end": 2159.35, "word": "-score", "probability": 0.9031575520833334}, {"start": 2159.35, "end": 2159.63, "word": " for", "probability": 0.943359375}, {"start": 2159.63, "end": 2159.93, "word": " 8,", "probability": 0.90771484375}, {"start": 2160.03, "end": 2160.13, "word": " which", "probability": 0.94775390625}, {"start": 2160.13, "end": 2160.29, "word": " is", "probability": 0.9501953125}, {"start": 2160.29, "end": 2160.59, "word": " zero.", "probability": 0.53857421875}, {"start": 2161.61, "end": 2162.17, "word": " And", "probability": 0.93701171875}, {"start": 2162.17, "end": 2162.37, "word": " that", "probability": 0.91357421875}, {"start": 2162.37, "end": 2162.53, "word": " will", "probability": 0.73388671875}, {"start": 2162.53, "end": 2162.77, "word": " give,", "probability": 0.6953125}, {"start": 2163.63, "end": 2164.09, "word": " again,", "probability": 0.9580078125}], "temperature": 1.0}, {"id": 82, "seek": 219392, "start": 2167.05, "end": 2193.93, "text": " 7.4, if you just use this equation, minus the mean, divided by sigma, negative 0.6 divided by 5, which is negative 0.12. So it gives B of z between minus 0.12 and 0. And that again is B of z less than 0.", "tokens": [1614, 13, 19, 11, 498, 291, 445, 764, 341, 5367, 11, 3175, 264, 914, 11, 6666, 538, 12771, 11, 3671, 1958, 13, 21, 6666, 538, 1025, 11, 597, 307, 3671, 1958, 13, 4762, 13, 407, 309, 2709, 363, 295, 710, 1296, 3175, 1958, 13, 4762, 293, 1958, 13, 400, 300, 797, 307, 363, 295, 710, 1570, 813, 1958, 13], "avg_logprob": -0.184375, "compression_ratio": 1.4166666666666667, "no_speech_prob": 0.0, "words": [{"start": 2167.05, "end": 2167.49, "word": " 7", "probability": 0.43505859375}, {"start": 2167.49, "end": 2168.15, "word": ".4,", "probability": 0.980712890625}, {"start": 2168.25, "end": 2168.41, "word": " if", "probability": 0.90625}, {"start": 2168.41, "end": 2168.55, "word": " you", "probability": 0.6845703125}, {"start": 2168.55, "end": 2168.87, "word": " just", "probability": 0.88818359375}, {"start": 2168.87, "end": 2169.39, "word": " use", "probability": 0.853515625}, {"start": 2169.39, "end": 2169.63, "word": " this", "probability": 0.94873046875}, {"start": 2169.63, "end": 2170.17, "word": " equation,", "probability": 0.93798828125}, {"start": 2170.69, "end": 2173.71, "word": " minus", "probability": 0.92041015625}, {"start": 2173.71, "end": 2173.99, "word": " the", "probability": 0.912109375}, {"start": 2173.99, "end": 2174.19, "word": " mean,", "probability": 0.96923828125}, {"start": 2174.53, "end": 2174.87, "word": " divided", "probability": 0.71630859375}, {"start": 2174.87, "end": 2175.07, "word": " by", "probability": 0.9755859375}, {"start": 2175.07, "end": 2175.47, "word": " sigma,", "probability": 0.7490234375}, {"start": 2176.31, "end": 2176.75, "word": " negative", "probability": 0.81982421875}, {"start": 2176.75, "end": 2177.07, "word": " 0", "probability": 0.611328125}, {"start": 2177.07, "end": 2177.43, "word": ".6", "probability": 0.98828125}, {"start": 2177.43, "end": 2177.69, "word": " divided", "probability": 0.71142578125}, {"start": 2177.69, "end": 2177.93, "word": " by", "probability": 0.9677734375}, {"start": 2177.93, "end": 2178.33, "word": " 5,", "probability": 0.810546875}, {"start": 2178.61, "end": 2178.81, "word": " which", "probability": 0.94482421875}, {"start": 2178.81, "end": 2178.95, "word": " is", "probability": 0.94482421875}, {"start": 2178.95, "end": 2179.17, "word": " negative", "probability": 0.9091796875}, {"start": 2179.17, "end": 2179.49, "word": " 0", "probability": 0.92529296875}, {"start": 2179.49, "end": 2181.15, "word": ".12.", "probability": 0.966064453125}, {"start": 2182.73, "end": 2183.41, "word": " So", "probability": 0.9580078125}, {"start": 2183.41, "end": 2183.65, "word": " it", "probability": 0.81201171875}, {"start": 2183.65, "end": 2184.03, "word": " gives", "probability": 0.90625}, {"start": 2184.03, "end": 2184.49, "word": " B", "probability": 0.83154296875}, {"start": 2184.49, "end": 2185.45, "word": " of", "probability": 0.92333984375}, {"start": 2185.45, "end": 2185.77, "word": " z", "probability": 0.65673828125}, {"start": 2185.77, "end": 2186.91, "word": " between", "probability": 0.837890625}, {"start": 2186.91, "end": 2187.37, "word": " minus", "probability": 0.92919921875}, {"start": 2187.37, "end": 2187.65, "word": " 0", "probability": 0.96240234375}, {"start": 2187.65, "end": 2188.13, "word": ".12", "probability": 0.997314453125}, {"start": 2188.13, "end": 2189.63, "word": " and", "probability": 0.861328125}, {"start": 2189.63, "end": 2189.99, "word": " 0.", "probability": 0.6142578125}, {"start": 2190.73, "end": 2191.41, "word": " And", "probability": 0.88525390625}, {"start": 2191.41, "end": 2191.59, "word": " that", "probability": 0.93212890625}, {"start": 2191.59, "end": 2191.89, "word": " again", "probability": 0.875}, {"start": 2191.89, "end": 2192.29, "word": " is", "probability": 0.90283203125}, {"start": 2192.29, "end": 2192.89, "word": " B", "probability": 0.88671875}, {"start": 2192.89, "end": 2193.05, "word": " of", "probability": 0.95751953125}, {"start": 2193.05, "end": 2193.17, "word": " z", "probability": 0.98486328125}, {"start": 2193.17, "end": 2193.39, "word": " less", "probability": 0.93310546875}, {"start": 2193.39, "end": 2193.59, "word": " than", "probability": 0.94140625}, {"start": 2193.59, "end": 2193.93, "word": " 0.", "probability": 0.76171875}], "temperature": 1.0}, {"id": 83, "seek": 222054, "start": 2194.64, "end": 2220.54, "text": " minus P of Z less than negative 0.12. Is it clear? Now here we converted or we transformed from normal distribution to standardized. So instead of X between 7.4 and 8, we have now Z between minus 0.12 and 0. So this area actually is the red one, the red area is one-half.", "tokens": [3175, 430, 295, 1176, 1570, 813, 3671, 1958, 13, 4762, 13, 1119, 309, 1850, 30, 823, 510, 321, 16424, 420, 321, 16894, 490, 2710, 7316, 281, 31677, 13, 407, 2602, 295, 1783, 1296, 1614, 13, 19, 293, 1649, 11, 321, 362, 586, 1176, 1296, 3175, 1958, 13, 4762, 293, 1958, 13, 407, 341, 1859, 767, 307, 264, 2182, 472, 11, 264, 2182, 1859, 307, 472, 12, 25461, 13], "avg_logprob": -0.20697463681732398, "compression_ratio": 1.431578947368421, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 2194.64, "end": 2195.18, "word": " minus", "probability": 0.24560546875}, {"start": 2195.18, "end": 2195.42, "word": " P", "probability": 0.5634765625}, {"start": 2195.42, "end": 2195.58, "word": " of", "probability": 0.88818359375}, {"start": 2195.58, "end": 2195.7, "word": " Z", "probability": 0.560546875}, {"start": 2195.7, "end": 2195.92, "word": " less", "probability": 0.88720703125}, {"start": 2195.92, "end": 2196.08, "word": " than", "probability": 0.92822265625}, {"start": 2196.08, "end": 2196.38, "word": " negative", "probability": 0.8603515625}, {"start": 2196.38, "end": 2196.64, "word": " 0", "probability": 0.65966796875}, {"start": 2196.64, "end": 2196.88, "word": ".12.", "probability": 0.9443359375}, {"start": 2197.72, "end": 2198.34, "word": " Is", "probability": 0.93701171875}, {"start": 2198.34, "end": 2198.44, "word": " it", "probability": 0.56884765625}, {"start": 2198.44, "end": 2198.72, "word": " clear?", "probability": 0.88330078125}, {"start": 2199.18, "end": 2199.8, "word": " Now", "probability": 0.63525390625}, {"start": 2199.8, "end": 2199.98, "word": " here", "probability": 0.6748046875}, {"start": 2199.98, "end": 2200.14, "word": " we", "probability": 0.7568359375}, {"start": 2200.14, "end": 2200.6, "word": " converted", "probability": 0.4208984375}, {"start": 2200.6, "end": 2200.98, "word": " or", "probability": 0.6298828125}, {"start": 2200.98, "end": 2201.2, "word": " we", "probability": 0.89306640625}, {"start": 2201.2, "end": 2201.7, "word": " transformed", "probability": 0.7822265625}, {"start": 2201.7, "end": 2201.98, "word": " from", "probability": 0.7822265625}, {"start": 2201.98, "end": 2202.26, "word": " normal", "probability": 0.83056640625}, {"start": 2202.26, "end": 2202.8, "word": " distribution", "probability": 0.7177734375}, {"start": 2202.8, "end": 2203.16, "word": " to", "probability": 0.89599609375}, {"start": 2203.16, "end": 2203.88, "word": " standardized.", "probability": 0.70703125}, {"start": 2204.84, "end": 2205.18, "word": " So", "probability": 0.9541015625}, {"start": 2205.18, "end": 2205.52, "word": " instead", "probability": 0.83349609375}, {"start": 2205.52, "end": 2205.74, "word": " of", "probability": 0.9658203125}, {"start": 2205.74, "end": 2205.96, "word": " X", "probability": 0.8486328125}, {"start": 2205.96, "end": 2206.4, "word": " between", "probability": 0.87158203125}, {"start": 2206.4, "end": 2206.98, "word": " 7", "probability": 0.90673828125}, {"start": 2206.98, "end": 2207.44, "word": ".4", "probability": 0.988037109375}, {"start": 2207.44, "end": 2207.64, "word": " and", "probability": 0.9326171875}, {"start": 2207.64, "end": 2208.06, "word": " 8,", "probability": 0.74462890625}, {"start": 2208.86, "end": 2209.1, "word": " we", "probability": 0.94873046875}, {"start": 2209.1, "end": 2209.34, "word": " have", "probability": 0.94140625}, {"start": 2209.34, "end": 2209.6, "word": " now", "probability": 0.91748046875}, {"start": 2209.6, "end": 2209.88, "word": " Z", "probability": 0.919921875}, {"start": 2209.88, "end": 2210.42, "word": " between", "probability": 0.87548828125}, {"start": 2210.42, "end": 2211.84, "word": " minus", "probability": 0.9267578125}, {"start": 2211.84, "end": 2212.1, "word": " 0", "probability": 0.96923828125}, {"start": 2212.1, "end": 2212.46, "word": ".12", "probability": 0.99755859375}, {"start": 2212.46, "end": 2212.68, "word": " and", "probability": 0.9345703125}, {"start": 2212.68, "end": 2213.0, "word": " 0.", "probability": 0.73974609375}, {"start": 2213.62, "end": 2213.76, "word": " So", "probability": 0.9130859375}, {"start": 2213.76, "end": 2214.26, "word": " this", "probability": 0.76025390625}, {"start": 2214.26, "end": 2214.76, "word": " area", "probability": 0.88037109375}, {"start": 2214.76, "end": 2215.4, "word": " actually", "probability": 0.81591796875}, {"start": 2215.4, "end": 2215.86, "word": " is", "probability": 0.85546875}, {"start": 2215.86, "end": 2217.0, "word": " the", "probability": 0.76416015625}, {"start": 2217.0, "end": 2217.18, "word": " red", "probability": 0.951171875}, {"start": 2217.18, "end": 2217.48, "word": " one,", "probability": 0.869140625}, {"start": 2217.62, "end": 2217.76, "word": " the", "probability": 0.87939453125}, {"start": 2217.76, "end": 2217.96, "word": " red", "probability": 0.93798828125}, {"start": 2217.96, "end": 2218.36, "word": " area", "probability": 0.87841796875}, {"start": 2218.36, "end": 2219.92, "word": " is", "probability": 0.66357421875}, {"start": 2219.92, "end": 2220.16, "word": " one", "probability": 0.76171875}, {"start": 2220.16, "end": 2220.54, "word": "-half.", "probability": 0.761474609375}], "temperature": 1.0}, {"id": 84, "seek": 225006, "start": 2221.98, "end": 2250.06, "text": " Total area below z is one-half, below zero, and minus z below minus 0.12. So B of z less than zero minus negative 0.12. That will give the area between minus 0.12 and zero. This is one-half. Now, B of z less than negative 0.12.", "tokens": [23170, 1859, 2507, 710, 307, 472, 12, 25461, 11, 2507, 4018, 11, 293, 3175, 710, 2507, 3175, 1958, 13, 4762, 13, 407, 363, 295, 710, 1570, 813, 4018, 3175, 3671, 1958, 13, 4762, 13, 663, 486, 976, 264, 1859, 1296, 3175, 1958, 13, 4762, 293, 4018, 13, 639, 307, 472, 12, 25461, 13, 823, 11, 363, 295, 710, 1570, 813, 3671, 1958, 13, 4762, 13], "avg_logprob": -0.2438447026140762, "compression_ratio": 1.7014925373134329, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2221.98, "end": 2222.54, "word": " Total", "probability": 0.07318115234375}, {"start": 2222.54, "end": 2223.1, "word": " area", "probability": 0.87353515625}, {"start": 2223.1, "end": 2223.36, "word": " below", "probability": 0.83984375}, {"start": 2223.36, "end": 2223.58, "word": " z", "probability": 0.59375}, {"start": 2223.58, "end": 2223.74, "word": " is", "probability": 0.90966796875}, {"start": 2223.74, "end": 2223.94, "word": " one", "probability": 0.556640625}, {"start": 2223.94, "end": 2224.22, "word": "-half,", "probability": 0.678955078125}, {"start": 2224.44, "end": 2224.44, "word": " below", "probability": 0.71630859375}, {"start": 2224.44, "end": 2225.06, "word": " zero,", "probability": 0.80029296875}, {"start": 2225.62, "end": 2225.84, "word": " and", "probability": 0.92919921875}, {"start": 2225.84, "end": 2226.44, "word": " minus", "probability": 0.91748046875}, {"start": 2226.44, "end": 2228.98, "word": " z", "probability": 0.4482421875}, {"start": 2228.98, "end": 2229.32, "word": " below", "probability": 0.9296875}, {"start": 2229.32, "end": 2230.44, "word": " minus", "probability": 0.80810546875}, {"start": 2230.44, "end": 2230.7, "word": " 0", "probability": 0.71533203125}, {"start": 2230.7, "end": 2230.96, "word": ".12.", "probability": 0.871337890625}, {"start": 2231.5, "end": 2231.8, "word": " So", "probability": 0.94287109375}, {"start": 2231.8, "end": 2232.3, "word": " B", "probability": 0.33447265625}, {"start": 2232.3, "end": 2232.48, "word": " of", "probability": 0.9365234375}, {"start": 2232.48, "end": 2232.74, "word": " z", "probability": 0.90625}, {"start": 2232.74, "end": 2234.34, "word": " less", "probability": 0.8916015625}, {"start": 2234.34, "end": 2234.56, "word": " than", "probability": 0.93212890625}, {"start": 2234.56, "end": 2234.84, "word": " zero", "probability": 0.71044921875}, {"start": 2234.84, "end": 2235.36, "word": " minus", "probability": 0.95361328125}, {"start": 2235.36, "end": 2237.24, "word": " negative", "probability": 0.81201171875}, {"start": 2237.24, "end": 2237.48, "word": " 0", "probability": 0.93359375}, {"start": 2237.48, "end": 2237.82, "word": ".12.", "probability": 0.995361328125}, {"start": 2238.34, "end": 2238.72, "word": " That", "probability": 0.90576171875}, {"start": 2238.72, "end": 2238.88, "word": " will", "probability": 0.8642578125}, {"start": 2238.88, "end": 2239.08, "word": " give", "probability": 0.86181640625}, {"start": 2239.08, "end": 2239.28, "word": " the", "probability": 0.90869140625}, {"start": 2239.28, "end": 2239.56, "word": " area", "probability": 0.87548828125}, {"start": 2239.56, "end": 2240.06, "word": " between", "probability": 0.8662109375}, {"start": 2240.06, "end": 2241.22, "word": " minus", "probability": 0.96875}, {"start": 2241.22, "end": 2241.46, "word": " 0", "probability": 0.978515625}, {"start": 2241.46, "end": 2241.72, "word": ".12", "probability": 0.997314453125}, {"start": 2241.72, "end": 2241.94, "word": " and", "probability": 0.92236328125}, {"start": 2241.94, "end": 2242.24, "word": " zero.", "probability": 0.77880859375}, {"start": 2243.9, "end": 2244.46, "word": " This", "probability": 0.252197265625}, {"start": 2244.46, "end": 2244.52, "word": " is", "probability": 0.9228515625}, {"start": 2244.52, "end": 2244.68, "word": " one", "probability": 0.9345703125}, {"start": 2244.68, "end": 2245.0, "word": "-half.", "probability": 0.8935546875}, {"start": 2247.2, "end": 2247.76, "word": " Now,", "probability": 0.9453125}, {"start": 2248.02, "end": 2248.16, "word": " B", "probability": 0.9541015625}, {"start": 2248.16, "end": 2248.32, "word": " of", "probability": 0.97216796875}, {"start": 2248.32, "end": 2248.46, "word": " z", "probability": 0.97314453125}, {"start": 2248.46, "end": 2248.68, "word": " less", "probability": 0.92236328125}, {"start": 2248.68, "end": 2248.86, "word": " than", "probability": 0.9267578125}, {"start": 2248.86, "end": 2249.18, "word": " negative", "probability": 0.9404296875}, {"start": 2249.18, "end": 2249.46, "word": " 0", "probability": 0.95654296875}, {"start": 2249.46, "end": 2250.06, "word": ".12.", "probability": 0.998291015625}], "temperature": 1.0}, {"id": 85, "seek": 227837, "start": 2251.71, "end": 2278.37, "text": " look you go back to the normal curve to the normal table but for the negative values of z negative point one two negative point one two four five two two it's four five point five minus point four five two two will give the result we are looking for", "tokens": [574, 291, 352, 646, 281, 264, 2710, 7605, 281, 264, 2710, 3199, 457, 337, 264, 3671, 4190, 295, 710, 3671, 935, 472, 732, 3671, 935, 472, 732, 1451, 1732, 732, 732, 309, 311, 1451, 1732, 935, 1732, 3175, 935, 1451, 1732, 732, 732, 486, 976, 264, 1874, 321, 366, 1237, 337], "avg_logprob": -0.1989182738157419, "compression_ratio": 1.893939393939394, "no_speech_prob": 0.0, "words": [{"start": 2251.71, "end": 2252.07, "word": " look", "probability": 0.1431884765625}, {"start": 2252.07, "end": 2252.29, "word": " you", "probability": 0.5322265625}, {"start": 2252.29, "end": 2252.45, "word": " go", "probability": 0.94970703125}, {"start": 2252.45, "end": 2252.67, "word": " back", "probability": 0.8876953125}, {"start": 2252.67, "end": 2252.83, "word": " to", "probability": 0.966796875}, {"start": 2252.83, "end": 2252.97, "word": " the", "probability": 0.90234375}, {"start": 2252.97, "end": 2253.27, "word": " normal", "probability": 0.88134765625}, {"start": 2253.27, "end": 2253.63, "word": " curve", "probability": 0.8779296875}, {"start": 2253.63, "end": 2254.37, "word": " to", "probability": 0.81005859375}, {"start": 2254.37, "end": 2254.57, "word": " the", "probability": 0.9208984375}, {"start": 2254.57, "end": 2254.87, "word": " normal", "probability": 0.896484375}, {"start": 2254.87, "end": 2255.25, "word": " table", "probability": 0.85400390625}, {"start": 2255.25, "end": 2256.83, "word": " but", "probability": 0.79052734375}, {"start": 2256.83, "end": 2257.05, "word": " for", "probability": 0.94775390625}, {"start": 2257.05, "end": 2257.29, "word": " the", "probability": 0.92724609375}, {"start": 2257.29, "end": 2257.65, "word": " negative", "probability": 0.93994140625}, {"start": 2257.65, "end": 2258.99, "word": " values", "probability": 0.96435546875}, {"start": 2258.99, "end": 2259.17, "word": " of", "probability": 0.9658203125}, {"start": 2259.17, "end": 2259.39, "word": " z", "probability": 0.77783203125}, {"start": 2259.39, "end": 2260.73, "word": " negative", "probability": 0.8271484375}, {"start": 2260.73, "end": 2261.03, "word": " point", "probability": 0.53564453125}, {"start": 2261.03, "end": 2261.33, "word": " one", "probability": 0.8720703125}, {"start": 2261.33, "end": 2261.57, "word": " two", "probability": 0.74560546875}, {"start": 2261.57, "end": 2262.03, "word": " negative", "probability": 0.373291015625}, {"start": 2262.03, "end": 2262.31, "word": " point", "probability": 0.86083984375}, {"start": 2262.31, "end": 2262.75, "word": " one", "probability": 0.927734375}, {"start": 2262.75, "end": 2263.89, "word": " two", "probability": 0.8701171875}, {"start": 2263.89, "end": 2264.93, "word": " four", "probability": 0.89501953125}, {"start": 2264.93, "end": 2265.43, "word": " five", "probability": 0.90283203125}, {"start": 2265.43, "end": 2265.83, "word": " two", "probability": 0.94384765625}, {"start": 2265.83, "end": 2266.17, "word": " two", "probability": 0.8740234375}, {"start": 2266.17, "end": 2267.37, "word": " it's", "probability": 0.75146484375}, {"start": 2267.37, "end": 2267.59, "word": " four", "probability": 0.92431640625}, {"start": 2267.59, "end": 2268.05, "word": " five", "probability": 0.9072265625}, {"start": 2268.05, "end": 2273.29, "word": " point", "probability": 0.80615234375}, {"start": 2273.29, "end": 2273.65, "word": " five", "probability": 0.89599609375}, {"start": 2273.65, "end": 2274.15, "word": " minus", "probability": 0.9814453125}, {"start": 2274.15, "end": 2274.69, "word": " point", "probability": 0.82958984375}, {"start": 2274.69, "end": 2274.99, "word": " four", "probability": 0.6611328125}, {"start": 2274.99, "end": 2275.73, "word": " five", "probability": 0.76513671875}, {"start": 2275.73, "end": 2275.93, "word": " two", "probability": 0.93701171875}, {"start": 2275.93, "end": 2276.07, "word": " two", "probability": 0.92724609375}, {"start": 2276.07, "end": 2276.23, "word": " will", "probability": 0.87353515625}, {"start": 2276.23, "end": 2276.43, "word": " give", "probability": 0.892578125}, {"start": 2276.43, "end": 2276.63, "word": " the", "probability": 0.9208984375}, {"start": 2276.63, "end": 2276.99, "word": " result", "probability": 0.931640625}, {"start": 2276.99, "end": 2277.67, "word": " we", "probability": 0.95751953125}, {"start": 2277.67, "end": 2277.81, "word": " are", "probability": 0.94287109375}, {"start": 2277.81, "end": 2278.03, "word": " looking", "probability": 0.92138671875}, {"start": 2278.03, "end": 2278.37, "word": " for", "probability": 0.953125}], "temperature": 1.0}, {"id": 86, "seek": 231075, "start": 2281.57, "end": 2310.75, "text": " So B of Z less than 0 is 0.5. B of Z less than negative 0.12 equals minus 0.4522. That will give 0 forcibility. Now, by symmetric, you can see that this probability between Z between minus 0.12 and 0", "tokens": [407, 363, 295, 1176, 1570, 813, 1958, 307, 1958, 13, 20, 13, 363, 295, 1176, 1570, 813, 3671, 1958, 13, 4762, 6915, 3175, 1958, 13, 8465, 7490, 13, 663, 486, 976, 1958, 337, 537, 39802, 13, 823, 11, 538, 32330, 11, 291, 393, 536, 300, 341, 8482, 1296, 1176, 1296, 3175, 1958, 13, 4762, 293, 1958], "avg_logprob": -0.23766446741003738, "compression_ratio": 1.36986301369863, "no_speech_prob": 0.0, "words": [{"start": 2281.57, "end": 2281.85, "word": " So", "probability": 0.7705078125}, {"start": 2281.85, "end": 2282.01, "word": " B", "probability": 0.362060546875}, {"start": 2282.01, "end": 2282.17, "word": " of", "probability": 0.90625}, {"start": 2282.17, "end": 2282.27, "word": " Z", "probability": 0.666015625}, {"start": 2282.27, "end": 2282.47, "word": " less", "probability": 0.888671875}, {"start": 2282.47, "end": 2282.65, "word": " than", "probability": 0.93603515625}, {"start": 2282.65, "end": 2282.89, "word": " 0", "probability": 0.4267578125}, {"start": 2282.89, "end": 2283.07, "word": " is", "probability": 0.88671875}, {"start": 2283.07, "end": 2283.27, "word": " 0", "probability": 0.84375}, {"start": 2283.27, "end": 2283.73, "word": ".5.", "probability": 0.991943359375}, {"start": 2284.89, "end": 2285.49, "word": " B", "probability": 0.953125}, {"start": 2285.49, "end": 2285.67, "word": " of", "probability": 0.9697265625}, {"start": 2285.67, "end": 2285.87, "word": " Z", "probability": 0.958984375}, {"start": 2285.87, "end": 2286.17, "word": " less", "probability": 0.92138671875}, {"start": 2286.17, "end": 2286.37, "word": " than", "probability": 0.93701171875}, {"start": 2286.37, "end": 2286.69, "word": " negative", "probability": 0.73046875}, {"start": 2286.69, "end": 2286.99, "word": " 0", "probability": 0.93310546875}, {"start": 2286.99, "end": 2287.33, "word": ".12", "probability": 0.98681640625}, {"start": 2287.33, "end": 2288.47, "word": " equals", "probability": 0.87158203125}, {"start": 2288.47, "end": 2289.05, "word": " minus", "probability": 0.91552734375}, {"start": 2289.05, "end": 2289.33, "word": " 0", "probability": 0.9677734375}, {"start": 2289.33, "end": 2291.77, "word": ".4522.", "probability": 0.9591471354166666}, {"start": 2291.97, "end": 2292.23, "word": " That", "probability": 0.8994140625}, {"start": 2292.23, "end": 2292.41, "word": " will", "probability": 0.87451171875}, {"start": 2292.41, "end": 2292.65, "word": " give", "probability": 0.86181640625}, {"start": 2292.65, "end": 2292.99, "word": " 0", "probability": 0.5390625}, {"start": 2292.99, "end": 2294.29, "word": " forcibility.", "probability": 0.4251302083333333}, {"start": 2296.79, "end": 2297.39, "word": " Now,", "probability": 0.94873046875}, {"start": 2297.41, "end": 2297.61, "word": " by", "probability": 0.896484375}, {"start": 2297.61, "end": 2298.05, "word": " symmetric,", "probability": 0.76171875}, {"start": 2300.15, "end": 2301.29, "word": " you", "probability": 0.9453125}, {"start": 2301.29, "end": 2301.55, "word": " can", "probability": 0.943359375}, {"start": 2301.55, "end": 2301.79, "word": " see", "probability": 0.92578125}, {"start": 2301.79, "end": 2302.19, "word": " that", "probability": 0.93701171875}, {"start": 2302.19, "end": 2303.59, "word": " this", "probability": 0.92041015625}, {"start": 2303.59, "end": 2304.15, "word": " probability", "probability": 0.9541015625}, {"start": 2304.15, "end": 2308.47, "word": " between", "probability": 0.450439453125}, {"start": 2308.47, "end": 2308.89, "word": " Z", "probability": 0.54248046875}, {"start": 2308.89, "end": 2309.19, "word": " between", "probability": 0.41552734375}, {"start": 2309.19, "end": 2309.57, "word": " minus", "probability": 0.97119140625}, {"start": 2309.57, "end": 2309.79, "word": " 0", "probability": 0.9736328125}, {"start": 2309.79, "end": 2310.15, "word": ".12", "probability": 0.995361328125}, {"start": 2310.15, "end": 2310.43, "word": " and", "probability": 0.93603515625}, {"start": 2310.43, "end": 2310.75, "word": " 0", "probability": 0.86083984375}], "temperature": 1.0}, {"id": 87, "seek": 232619, "start": 2311.92, "end": 2326.2, "text": " is the same as the other side from 0.12 I mean this area the red one is the same up to 8.6", "tokens": [307, 264, 912, 382, 264, 661, 1252, 490, 1958, 13, 4762, 286, 914, 341, 1859, 264, 2182, 472, 307, 264, 912, 493, 281, 1649, 13, 21], "avg_logprob": -0.2617187367545234, "compression_ratio": 1.1666666666666667, "no_speech_prob": 0.0, "words": [{"start": 2311.92, "end": 2312.2, "word": " is", "probability": 0.388427734375}, {"start": 2312.2, "end": 2312.36, "word": " the", "probability": 0.89501953125}, {"start": 2312.36, "end": 2312.62, "word": " same", "probability": 0.90478515625}, {"start": 2312.62, "end": 2313.1, "word": " as", "probability": 0.9580078125}, {"start": 2313.1, "end": 2318.3, "word": " the", "probability": 0.68017578125}, {"start": 2318.3, "end": 2318.52, "word": " other", "probability": 0.89306640625}, {"start": 2318.52, "end": 2318.96, "word": " side", "probability": 0.86376953125}, {"start": 2318.96, "end": 2319.5, "word": " from", "probability": 0.6962890625}, {"start": 2319.5, "end": 2319.78, "word": " 0", "probability": 0.65966796875}, {"start": 2319.78, "end": 2320.6, "word": ".12", "probability": 0.6431884765625}, {"start": 2320.6, "end": 2321.16, "word": " I", "probability": 0.334228515625}, {"start": 2321.16, "end": 2321.3, "word": " mean", "probability": 0.96923828125}, {"start": 2321.3, "end": 2321.56, "word": " this", "probability": 0.9052734375}, {"start": 2321.56, "end": 2321.92, "word": " area", "probability": 0.8984375}, {"start": 2321.92, "end": 2322.82, "word": " the", "probability": 0.75390625}, {"start": 2322.82, "end": 2323.0, "word": " red", "probability": 0.931640625}, {"start": 2323.0, "end": 2323.34, "word": " one", "probability": 0.9248046875}, {"start": 2323.34, "end": 2323.9, "word": " is", "probability": 0.93212890625}, {"start": 2323.9, "end": 2324.04, "word": " the", "probability": 0.91943359375}, {"start": 2324.04, "end": 2324.36, "word": " same", "probability": 0.89892578125}, {"start": 2324.36, "end": 2324.98, "word": " up", "probability": 0.96337890625}, {"start": 2324.98, "end": 2325.26, "word": " to", "probability": 0.962890625}, {"start": 2325.26, "end": 2325.58, "word": " 8", "probability": 0.7177734375}, {"start": 2325.58, "end": 2326.2, "word": ".6", "probability": 0.9677734375}], "temperature": 1.0}, {"id": 88, "seek": 236536, "start": 2335.6, "end": 2365.36, "text": " So the area between minus 0.12 up to 0 is the same as from 0 up to 0.12. Because of symmetric, since this area equals the same for the other part. So from 0 up to 0.12 is the same as minus 0.12 up to 0. So equal, so the normal distribution is symmetric. So this probability is the same as B of Z between 0 and 0.12.", "tokens": [407, 264, 1859, 1296, 3175, 1958, 13, 4762, 493, 281, 1958, 307, 264, 912, 382, 490, 1958, 493, 281, 1958, 13, 4762, 13, 1436, 295, 32330, 11, 1670, 341, 1859, 6915, 264, 912, 337, 264, 661, 644, 13, 407, 490, 1958, 493, 281, 1958, 13, 4762, 307, 264, 912, 382, 3175, 1958, 13, 4762, 493, 281, 1958, 13, 407, 2681, 11, 370, 264, 2710, 7316, 307, 32330, 13, 407, 341, 8482, 307, 264, 912, 382, 363, 295, 1176, 1296, 1958, 293, 1958, 13, 4762, 13], "avg_logprob": -0.14098837105340736, "compression_ratio": 1.847953216374269, "no_speech_prob": 0.0, "words": [{"start": 2335.6, "end": 2335.88, "word": " So", "probability": 0.853515625}, {"start": 2335.88, "end": 2336.08, "word": " the", "probability": 0.79150390625}, {"start": 2336.08, "end": 2336.34, "word": " area", "probability": 0.90234375}, {"start": 2336.34, "end": 2336.68, "word": " between", "probability": 0.88671875}, {"start": 2336.68, "end": 2337.1, "word": " minus", "probability": 0.794921875}, {"start": 2337.1, "end": 2337.32, "word": " 0", "probability": 0.67919921875}, {"start": 2337.32, "end": 2337.6, "word": ".12", "probability": 0.982421875}, {"start": 2337.6, "end": 2337.84, "word": " up", "probability": 0.95263671875}, {"start": 2337.84, "end": 2337.98, "word": " to", "probability": 0.96337890625}, {"start": 2337.98, "end": 2338.24, "word": " 0", "probability": 0.765625}, {"start": 2338.24, "end": 2338.46, "word": " is", "probability": 0.89208984375}, {"start": 2338.46, "end": 2338.62, "word": " the", "probability": 0.91455078125}, {"start": 2338.62, "end": 2338.84, "word": " same", "probability": 0.91357421875}, {"start": 2338.84, "end": 2339.26, "word": " as", "probability": 0.9248046875}, {"start": 2339.26, "end": 2340.02, "word": " from", "probability": 0.81884765625}, {"start": 2340.02, "end": 2340.28, "word": " 0", "probability": 0.9384765625}, {"start": 2340.28, "end": 2340.56, "word": " up", "probability": 0.95166015625}, {"start": 2340.56, "end": 2340.9, "word": " to", "probability": 0.958984375}, {"start": 2340.9, "end": 2341.4, "word": " 0", "probability": 0.94775390625}, {"start": 2341.4, "end": 2341.68, "word": ".12.", "probability": 0.974609375}, {"start": 2343.1, "end": 2343.66, "word": " Because", "probability": 0.9248046875}, {"start": 2343.66, "end": 2343.82, "word": " of", "probability": 0.7744140625}, {"start": 2343.82, "end": 2344.2, "word": " symmetric,", "probability": 0.62841796875}, {"start": 2344.54, "end": 2344.92, "word": " since", "probability": 0.86474609375}, {"start": 2344.92, "end": 2346.82, "word": " this", "probability": 0.9130859375}, {"start": 2346.82, "end": 2347.18, "word": " area", "probability": 0.923828125}, {"start": 2347.18, "end": 2347.56, "word": " equals", "probability": 0.7177734375}, {"start": 2347.56, "end": 2347.78, "word": " the", "probability": 0.80517578125}, {"start": 2347.78, "end": 2348.04, "word": " same", "probability": 0.89697265625}, {"start": 2348.04, "end": 2348.32, "word": " for", "probability": 0.92578125}, {"start": 2348.32, "end": 2348.48, "word": " the", "probability": 0.9140625}, {"start": 2348.48, "end": 2348.7, "word": " other", "probability": 0.8857421875}, {"start": 2348.7, "end": 2349.12, "word": " part.", "probability": 0.87744140625}, {"start": 2349.54, "end": 2349.68, "word": " So", "probability": 0.9638671875}, {"start": 2349.68, "end": 2349.94, "word": " from", "probability": 0.8447265625}, {"start": 2349.94, "end": 2350.3, "word": " 0", "probability": 0.92041015625}, {"start": 2350.3, "end": 2351.34, "word": " up", "probability": 0.931640625}, {"start": 2351.34, "end": 2351.46, "word": " to", "probability": 0.9638671875}, {"start": 2351.46, "end": 2351.7, "word": " 0", "probability": 0.943359375}, {"start": 2351.7, "end": 2352.0, "word": ".12", "probability": 0.99853515625}, {"start": 2352.0, "end": 2352.98, "word": " is", "probability": 0.83837890625}, {"start": 2352.98, "end": 2353.12, "word": " the", "probability": 0.9169921875}, {"start": 2353.12, "end": 2353.32, "word": " same", "probability": 0.91064453125}, {"start": 2353.32, "end": 2353.72, "word": " as", "probability": 0.9619140625}, {"start": 2353.72, "end": 2354.62, "word": " minus", "probability": 0.93798828125}, {"start": 2354.62, "end": 2354.9, "word": " 0", "probability": 0.98681640625}, {"start": 2354.9, "end": 2355.1, "word": ".12", "probability": 0.998046875}, {"start": 2355.1, "end": 2355.5, "word": " up", "probability": 0.9423828125}, {"start": 2355.5, "end": 2355.66, "word": " to", "probability": 0.95166015625}, {"start": 2355.66, "end": 2355.9, "word": " 0.", "probability": 0.9638671875}, {"start": 2356.12, "end": 2356.14, "word": " So", "probability": 0.96240234375}, {"start": 2356.14, "end": 2356.48, "word": " equal,", "probability": 0.66552734375}, {"start": 2357.5, "end": 2357.92, "word": " so", "probability": 0.93505859375}, {"start": 2357.92, "end": 2358.08, "word": " the", "probability": 0.9111328125}, {"start": 2358.08, "end": 2358.38, "word": " normal", "probability": 0.88671875}, {"start": 2358.38, "end": 2358.88, "word": " distribution", "probability": 0.8046875}, {"start": 2358.88, "end": 2359.1, "word": " is", "probability": 0.94189453125}, {"start": 2359.1, "end": 2359.4, "word": " symmetric.", "probability": 0.8173828125}, {"start": 2360.34, "end": 2360.58, "word": " So", "probability": 0.96240234375}, {"start": 2360.58, "end": 2360.86, "word": " this", "probability": 0.896484375}, {"start": 2360.86, "end": 2361.88, "word": " probability", "probability": 0.822265625}, {"start": 2361.88, "end": 2362.16, "word": " is", "probability": 0.76953125}, {"start": 2362.16, "end": 2362.32, "word": " the", "probability": 0.908203125}, {"start": 2362.32, "end": 2362.48, "word": " same", "probability": 0.91015625}, {"start": 2362.48, "end": 2362.82, "word": " as", "probability": 0.9599609375}, {"start": 2362.82, "end": 2363.02, "word": " B", "probability": 0.525390625}, {"start": 2363.02, "end": 2363.2, "word": " of", "probability": 0.91162109375}, {"start": 2363.2, "end": 2363.44, "word": " Z", "probability": 0.64794921875}, {"start": 2363.44, "end": 2364.36, "word": " between", "probability": 0.8642578125}, {"start": 2364.36, "end": 2364.64, "word": " 0", "probability": 0.93359375}, {"start": 2364.64, "end": 2364.82, "word": " and", "probability": 0.93994140625}, {"start": 2364.82, "end": 2365.04, "word": " 0", "probability": 0.9560546875}, {"start": 2365.04, "end": 2365.36, "word": ".12.", "probability": 0.98046875}], "temperature": 1.0}, {"id": 89, "seek": 238534, "start": 2367.32, "end": 2385.34, "text": " Any question? Again, the equal sign does not matter. Because here we have the complement. The complement.", "tokens": [2639, 1168, 30, 3764, 11, 264, 2681, 1465, 775, 406, 1871, 13, 1436, 510, 321, 362, 264, 17103, 13, 440, 17103, 13], "avg_logprob": -0.21908967520879663, "compression_ratio": 1.1777777777777778, "no_speech_prob": 0.0, "words": [{"start": 2367.32, "end": 2367.58, "word": " Any", "probability": 0.75439453125}, {"start": 2367.58, "end": 2367.98, "word": " question?", "probability": 0.56982421875}, {"start": 2374.52, "end": 2375.12, "word": " Again,", "probability": 0.74169921875}, {"start": 2375.32, "end": 2375.32, "word": " the", "probability": 0.89013671875}, {"start": 2375.32, "end": 2375.56, "word": " equal", "probability": 0.69287109375}, {"start": 2375.56, "end": 2375.9, "word": " sign", "probability": 0.83935546875}, {"start": 2375.9, "end": 2376.12, "word": " does", "probability": 0.9384765625}, {"start": 2376.12, "end": 2376.34, "word": " not", "probability": 0.95751953125}, {"start": 2376.34, "end": 2376.62, "word": " matter.", "probability": 0.8759765625}, {"start": 2382.12, "end": 2382.72, "word": " Because", "probability": 0.87255859375}, {"start": 2382.72, "end": 2382.92, "word": " here", "probability": 0.8349609375}, {"start": 2382.92, "end": 2383.06, "word": " we", "probability": 0.8310546875}, {"start": 2383.06, "end": 2383.26, "word": " have", "probability": 0.94287109375}, {"start": 2383.26, "end": 2383.5, "word": " the", "probability": 0.81396484375}, {"start": 2383.5, "end": 2383.86, "word": " complement.", "probability": 0.83837890625}, {"start": 2384.7, "end": 2385.0, "word": " The", "probability": 0.875}, {"start": 2385.0, "end": 2385.34, "word": " complement.", "probability": 0.912109375}], "temperature": 1.0}, {"id": 90, "seek": 241489, "start": 2386.65, "end": 2414.89, "text": " If this one, I mean, complement of z less than, greater than 0.12, the complement is B of z less than or equal to minus 0.12. So we should have just permutation, the equality. But it doesn't matter. If in the problem we don't have equal sign in the complement, we should have equal sign. But it doesn't matter actually if we have equal sign or not. For example, if we are looking for", "tokens": [759, 341, 472, 11, 286, 914, 11, 17103, 295, 710, 1570, 813, 11, 5044, 813, 1958, 13, 4762, 11, 264, 17103, 307, 363, 295, 710, 1570, 813, 420, 2681, 281, 3175, 1958, 13, 4762, 13, 407, 321, 820, 362, 445, 4784, 11380, 11, 264, 14949, 13, 583, 309, 1177, 380, 1871, 13, 759, 294, 264, 1154, 321, 500, 380, 362, 2681, 1465, 294, 264, 17103, 11, 321, 820, 362, 2681, 1465, 13, 583, 309, 1177, 380, 1871, 767, 498, 321, 362, 2681, 1465, 420, 406, 13, 1171, 1365, 11, 498, 321, 366, 1237, 337], "avg_logprob": -0.19374999717662209, "compression_ratio": 1.855072463768116, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2386.65, "end": 2386.93, "word": " If", "probability": 0.236572265625}, {"start": 2386.93, "end": 2387.17, "word": " this", "probability": 0.9248046875}, {"start": 2387.17, "end": 2387.45, "word": " one,", "probability": 0.92236328125}, {"start": 2387.87, "end": 2387.95, "word": " I", "probability": 0.95556640625}, {"start": 2387.95, "end": 2388.13, "word": " mean,", "probability": 0.96630859375}, {"start": 2388.39, "end": 2388.71, "word": " complement", "probability": 0.5703125}, {"start": 2388.71, "end": 2389.01, "word": " of", "probability": 0.9619140625}, {"start": 2389.01, "end": 2389.25, "word": " z", "probability": 0.56640625}, {"start": 2389.25, "end": 2389.85, "word": " less", "probability": 0.7373046875}, {"start": 2389.85, "end": 2390.15, "word": " than,", "probability": 0.9345703125}, {"start": 2390.51, "end": 2390.85, "word": " greater", "probability": 0.88671875}, {"start": 2390.85, "end": 2391.15, "word": " than", "probability": 0.94140625}, {"start": 2391.15, "end": 2391.33, "word": " 0", "probability": 0.5166015625}, {"start": 2391.33, "end": 2391.65, "word": ".12,", "probability": 0.963623046875}, {"start": 2392.01, "end": 2392.25, "word": " the", "probability": 0.91064453125}, {"start": 2392.25, "end": 2392.59, "word": " complement", "probability": 0.94775390625}, {"start": 2392.59, "end": 2393.07, "word": " is", "probability": 0.9423828125}, {"start": 2393.07, "end": 2393.35, "word": " B", "probability": 0.55322265625}, {"start": 2393.35, "end": 2393.51, "word": " of", "probability": 0.9189453125}, {"start": 2393.51, "end": 2393.73, "word": " z", "probability": 0.875}, {"start": 2393.73, "end": 2394.31, "word": " less", "probability": 0.912109375}, {"start": 2394.31, "end": 2394.51, "word": " than", "probability": 0.935546875}, {"start": 2394.51, "end": 2394.67, "word": " or", "probability": 0.95654296875}, {"start": 2394.67, "end": 2394.83, "word": " equal", "probability": 0.9169921875}, {"start": 2394.83, "end": 2394.95, "word": " to", "probability": 0.463623046875}, {"start": 2394.95, "end": 2395.21, "word": " minus", "probability": 0.93408203125}, {"start": 2395.21, "end": 2395.43, "word": " 0", "probability": 0.96875}, {"start": 2395.43, "end": 2395.61, "word": ".12.", "probability": 0.97509765625}, {"start": 2395.99, "end": 2396.21, "word": " So", "probability": 0.93994140625}, {"start": 2396.21, "end": 2396.35, "word": " we", "probability": 0.7353515625}, {"start": 2396.35, "end": 2396.55, "word": " should", "probability": 0.96240234375}, {"start": 2396.55, "end": 2396.79, "word": " have", "probability": 0.93994140625}, {"start": 2396.79, "end": 2397.13, "word": " just", "probability": 0.91650390625}, {"start": 2397.13, "end": 2397.85, "word": " permutation,", "probability": 0.49407958984375}, {"start": 2398.03, "end": 2398.15, "word": " the", "probability": 0.791015625}, {"start": 2398.15, "end": 2398.47, "word": " equality.", "probability": 0.9453125}, {"start": 2399.41, "end": 2399.79, "word": " But", "probability": 0.95166015625}, {"start": 2399.79, "end": 2400.07, "word": " it", "probability": 0.474853515625}, {"start": 2400.07, "end": 2400.33, "word": " doesn't", "probability": 0.95068359375}, {"start": 2400.33, "end": 2400.65, "word": " matter.", "probability": 0.86279296875}, {"start": 2403.19, "end": 2403.67, "word": " If", "probability": 0.92919921875}, {"start": 2403.67, "end": 2403.81, "word": " in", "probability": 0.88232421875}, {"start": 2403.81, "end": 2403.93, "word": " the", "probability": 0.91748046875}, {"start": 2403.93, "end": 2404.21, "word": " problem", "probability": 0.88232421875}, {"start": 2404.21, "end": 2404.43, "word": " we", "probability": 0.63623046875}, {"start": 2404.43, "end": 2404.63, "word": " don't", "probability": 0.9794921875}, {"start": 2404.63, "end": 2404.83, "word": " have", "probability": 0.94482421875}, {"start": 2404.83, "end": 2405.03, "word": " equal", "probability": 0.8681640625}, {"start": 2405.03, "end": 2405.43, "word": " sign", "probability": 0.91064453125}, {"start": 2405.43, "end": 2405.61, "word": " in", "probability": 0.267578125}, {"start": 2405.61, "end": 2405.73, "word": " the", "probability": 0.916015625}, {"start": 2405.73, "end": 2406.05, "word": " complement,", "probability": 0.94482421875}, {"start": 2406.55, "end": 2406.85, "word": " we", "probability": 0.95263671875}, {"start": 2406.85, "end": 2407.03, "word": " should", "probability": 0.96875}, {"start": 2407.03, "end": 2407.25, "word": " have", "probability": 0.9443359375}, {"start": 2407.25, "end": 2407.47, "word": " equal", "probability": 0.89990234375}, {"start": 2407.47, "end": 2407.77, "word": " sign.", "probability": 0.92236328125}, {"start": 2409.51, "end": 2409.99, "word": " But", "probability": 0.951171875}, {"start": 2409.99, "end": 2410.11, "word": " it", "probability": 0.94580078125}, {"start": 2410.11, "end": 2410.33, "word": " doesn't", "probability": 0.957275390625}, {"start": 2410.33, "end": 2410.55, "word": " matter", "probability": 0.859375}, {"start": 2410.55, "end": 2410.85, "word": " actually", "probability": 0.73095703125}, {"start": 2410.85, "end": 2411.07, "word": " if", "probability": 0.90380859375}, {"start": 2411.07, "end": 2411.21, "word": " we", "probability": 0.94384765625}, {"start": 2411.21, "end": 2411.43, "word": " have", "probability": 0.9453125}, {"start": 2411.43, "end": 2411.67, "word": " equal", "probability": 0.896484375}, {"start": 2411.67, "end": 2411.95, "word": " sign", "probability": 0.90673828125}, {"start": 2411.95, "end": 2412.09, "word": " or", "probability": 0.95556640625}, {"start": 2412.09, "end": 2412.25, "word": " not.", "probability": 0.94140625}, {"start": 2413.21, "end": 2413.53, "word": " For", "probability": 0.9638671875}, {"start": 2413.53, "end": 2413.83, "word": " example,", "probability": 0.9765625}, {"start": 2413.91, "end": 2414.07, "word": " if", "probability": 0.94482421875}, {"start": 2414.07, "end": 2414.17, "word": " we", "probability": 0.91259765625}, {"start": 2414.17, "end": 2414.27, "word": " are", "probability": 0.9296875}, {"start": 2414.27, "end": 2414.51, "word": " looking", "probability": 0.908203125}, {"start": 2414.51, "end": 2414.89, "word": " for", "probability": 0.9501953125}], "temperature": 1.0}, {"id": 91, "seek": 244498, "start": 2416.23, "end": 2444.99, "text": " B of X greater than A. Now what's the complement of that? 1 minus less than or equal to A. But if X is greater than or equal to A, the complement is without equal sign. But in continuous distribution, the equal sign does not matter. Any question?", "tokens": [363, 295, 1783, 5044, 813, 316, 13, 823, 437, 311, 264, 17103, 295, 300, 30, 502, 3175, 1570, 813, 420, 2681, 281, 316, 13, 583, 498, 1783, 307, 5044, 813, 420, 2681, 281, 316, 11, 264, 17103, 307, 1553, 2681, 1465, 13, 583, 294, 10957, 7316, 11, 264, 2681, 1465, 775, 406, 1871, 13, 2639, 1168, 30], "avg_logprob": -0.23679957359001555, "compression_ratio": 1.6466666666666667, "no_speech_prob": 0.0, "words": [{"start": 2416.23, "end": 2416.45, "word": " B", "probability": 0.07550048828125}, {"start": 2416.45, "end": 2416.61, "word": " of", "probability": 0.9140625}, {"start": 2416.61, "end": 2416.81, "word": " X", "probability": 0.70947265625}, {"start": 2416.81, "end": 2417.15, "word": " greater", "probability": 0.7138671875}, {"start": 2417.15, "end": 2417.45, "word": " than", "probability": 0.9541015625}, {"start": 2417.45, "end": 2417.65, "word": " A.", "probability": 0.681640625}, {"start": 2418.57, "end": 2419.05, "word": " Now", "probability": 0.861328125}, {"start": 2419.05, "end": 2419.31, "word": " what's", "probability": 0.701416015625}, {"start": 2419.31, "end": 2419.43, "word": " the", "probability": 0.91259765625}, {"start": 2419.43, "end": 2419.81, "word": " complement", "probability": 0.79638671875}, {"start": 2419.81, "end": 2420.07, "word": " of", "probability": 0.93603515625}, {"start": 2420.07, "end": 2420.33, "word": " that?", "probability": 0.92578125}, {"start": 2421.03, "end": 2421.61, "word": " 1", "probability": 0.45263671875}, {"start": 2421.61, "end": 2422.25, "word": " minus", "probability": 0.9638671875}, {"start": 2422.25, "end": 2425.95, "word": " less", "probability": 0.82275390625}, {"start": 2425.95, "end": 2426.21, "word": " than", "probability": 0.947265625}, {"start": 2426.21, "end": 2426.35, "word": " or", "probability": 0.95263671875}, {"start": 2426.35, "end": 2426.55, "word": " equal", "probability": 0.9052734375}, {"start": 2426.55, "end": 2426.65, "word": " to", "probability": 0.5380859375}, {"start": 2426.65, "end": 2426.81, "word": " A.", "probability": 0.94091796875}, {"start": 2428.51, "end": 2429.09, "word": " But", "probability": 0.90576171875}, {"start": 2429.09, "end": 2429.77, "word": " if", "probability": 0.8212890625}, {"start": 2429.77, "end": 2431.37, "word": " X", "probability": 0.888671875}, {"start": 2431.37, "end": 2431.71, "word": " is", "probability": 0.473876953125}, {"start": 2431.71, "end": 2431.99, "word": " greater", "probability": 0.9091796875}, {"start": 2431.99, "end": 2432.27, "word": " than", "probability": 0.9306640625}, {"start": 2432.27, "end": 2432.45, "word": " or", "probability": 0.955078125}, {"start": 2432.45, "end": 2432.63, "word": " equal", "probability": 0.90673828125}, {"start": 2432.63, "end": 2432.79, "word": " to", "probability": 0.91064453125}, {"start": 2432.79, "end": 2432.87, "word": " A,", "probability": 0.970703125}, {"start": 2432.93, "end": 2433.03, "word": " the", "probability": 0.81591796875}, {"start": 2433.03, "end": 2433.35, "word": " complement", "probability": 0.9072265625}, {"start": 2433.35, "end": 2433.93, "word": " is", "probability": 0.91357421875}, {"start": 2433.93, "end": 2436.43, "word": " without", "probability": 0.1600341796875}, {"start": 2436.43, "end": 2437.51, "word": " equal", "probability": 0.79443359375}, {"start": 2437.51, "end": 2437.87, "word": " sign.", "probability": 0.88916015625}, {"start": 2438.31, "end": 2438.75, "word": " But", "probability": 0.9404296875}, {"start": 2438.75, "end": 2439.19, "word": " in", "probability": 0.90283203125}, {"start": 2439.19, "end": 2439.65, "word": " continuous", "probability": 0.828125}, {"start": 2439.65, "end": 2440.15, "word": " distribution,", "probability": 0.83056640625}, {"start": 2440.39, "end": 2440.49, "word": " the", "probability": 0.9169921875}, {"start": 2440.49, "end": 2440.71, "word": " equal", "probability": 0.8798828125}, {"start": 2440.71, "end": 2440.97, "word": " sign", "probability": 0.896484375}, {"start": 2440.97, "end": 2441.15, "word": " does", "probability": 0.9169921875}, {"start": 2441.15, "end": 2441.33, "word": " not", "probability": 0.94482421875}, {"start": 2441.33, "end": 2441.59, "word": " matter.", "probability": 0.85791015625}, {"start": 2444.05, "end": 2444.63, "word": " Any", "probability": 0.90869140625}, {"start": 2444.63, "end": 2444.99, "word": " question?", "probability": 0.61865234375}], "temperature": 1.0}, {"id": 92, "seek": 247675, "start": 2452.19, "end": 2476.75, "text": " comments. Let's move to the next topic which talks about the empirical rule. If you remember before we said there is an empirical rule for 68, 95, 95,", "tokens": [3053, 13, 961, 311, 1286, 281, 264, 958, 4829, 597, 6686, 466, 264, 31886, 4978, 13, 759, 291, 1604, 949, 321, 848, 456, 307, 364, 31886, 4978, 337, 23317, 11, 13420, 11, 13420, 11], "avg_logprob": -0.23325893027441844, "compression_ratio": 1.2796610169491525, "no_speech_prob": 0.0, "words": [{"start": 2452.1900000000005, "end": 2453.4300000000003, "word": " comments.", "probability": 0.46435546875}, {"start": 2453.4300000000003, "end": 2454.67, "word": " Let's", "probability": 0.91796875}, {"start": 2454.67, "end": 2454.99, "word": " move", "probability": 0.94482421875}, {"start": 2454.99, "end": 2455.33, "word": " to", "probability": 0.96337890625}, {"start": 2455.33, "end": 2455.61, "word": " the", "probability": 0.91650390625}, {"start": 2455.61, "end": 2455.83, "word": " next", "probability": 0.9404296875}, {"start": 2455.83, "end": 2456.29, "word": " topic", "probability": 0.9658203125}, {"start": 2456.29, "end": 2457.81, "word": " which", "probability": 0.546875}, {"start": 2457.81, "end": 2458.13, "word": " talks", "probability": 0.87158203125}, {"start": 2458.13, "end": 2458.75, "word": " about", "probability": 0.91259765625}, {"start": 2458.75, "end": 2459.95, "word": " the", "probability": 0.85888671875}, {"start": 2459.95, "end": 2460.51, "word": " empirical", "probability": 0.80078125}, {"start": 2460.51, "end": 2460.89, "word": " rule.", "probability": 0.79638671875}, {"start": 2463.21, "end": 2464.45, "word": " If", "probability": 0.94677734375}, {"start": 2464.45, "end": 2464.57, "word": " you", "probability": 0.9619140625}, {"start": 2464.57, "end": 2464.87, "word": " remember", "probability": 0.875}, {"start": 2464.87, "end": 2465.51, "word": " before", "probability": 0.76513671875}, {"start": 2465.51, "end": 2466.51, "word": " we", "probability": 0.662109375}, {"start": 2466.51, "end": 2466.87, "word": " said", "probability": 0.94970703125}, {"start": 2466.87, "end": 2467.21, "word": " there", "probability": 0.86669921875}, {"start": 2467.21, "end": 2467.87, "word": " is", "probability": 0.82177734375}, {"start": 2467.87, "end": 2468.49, "word": " an", "probability": 0.9189453125}, {"start": 2468.49, "end": 2468.79, "word": " empirical", "probability": 0.9033203125}, {"start": 2468.79, "end": 2469.29, "word": " rule", "probability": 0.91748046875}, {"start": 2469.29, "end": 2470.53, "word": " for", "probability": 0.9306640625}, {"start": 2470.53, "end": 2471.19, "word": " 68,", "probability": 0.7666015625}, {"start": 2472.49, "end": 2473.71, "word": " 95,", "probability": 0.95361328125}, {"start": 2474.99, "end": 2476.75, "word": " 95,", "probability": 0.638671875}], "temperature": 1.0}, {"id": 93, "seek": 250562, "start": 2477.42, "end": 2505.62, "text": " 99.71. Now let's see the exact meaning of this rule. Now we have to apply the empirical rule not to Chebyshev's inequality because the distribution is normal. Chebyshev's is applied", "tokens": [11803, 13, 29985, 13, 823, 718, 311, 536, 264, 1900, 3620, 295, 341, 4978, 13, 823, 321, 362, 281, 3079, 264, 31886, 4978, 406, 281, 3351, 65, 749, 675, 85, 311, 16970, 570, 264, 7316, 307, 2710, 13, 3351, 65, 749, 675, 85, 311, 307, 6456], "avg_logprob": -0.2470079799915882, "compression_ratio": 1.378787878787879, "no_speech_prob": 0.0, "words": [{"start": 2477.42, "end": 2477.94, "word": " 99", "probability": 0.294189453125}, {"start": 2477.94, "end": 2478.6, "word": ".71.", "probability": 0.4783935546875}, {"start": 2480.2, "end": 2480.8, "word": " Now", "probability": 0.7666015625}, {"start": 2480.8, "end": 2481.1, "word": " let's", "probability": 0.799072265625}, {"start": 2481.1, "end": 2481.3, "word": " see", "probability": 0.92333984375}, {"start": 2481.3, "end": 2481.56, "word": " the", "probability": 0.90576171875}, {"start": 2481.56, "end": 2481.96, "word": " exact", "probability": 0.951171875}, {"start": 2481.96, "end": 2482.36, "word": " meaning", "probability": 0.85693359375}, {"start": 2482.36, "end": 2482.84, "word": " of", "probability": 0.958984375}, {"start": 2482.84, "end": 2483.06, "word": " this", "probability": 0.947265625}, {"start": 2483.06, "end": 2483.32, "word": " rule.", "probability": 0.94677734375}, {"start": 2497.58, "end": 2498.18, "word": " Now", "probability": 0.85205078125}, {"start": 2498.18, "end": 2499.04, "word": " we", "probability": 0.57861328125}, {"start": 2499.04, "end": 2499.2, "word": " have", "probability": 0.9404296875}, {"start": 2499.2, "end": 2499.32, "word": " to", "probability": 0.97216796875}, {"start": 2499.32, "end": 2499.56, "word": " apply", "probability": 0.94677734375}, {"start": 2499.56, "end": 2499.7, "word": " the", "probability": 0.880859375}, {"start": 2499.7, "end": 2500.0, "word": " empirical", "probability": 0.92919921875}, {"start": 2500.0, "end": 2500.18, "word": " rule", "probability": 0.72265625}, {"start": 2500.18, "end": 2500.36, "word": " not", "probability": 0.77685546875}, {"start": 2500.36, "end": 2500.46, "word": " to", "probability": 0.54052734375}, {"start": 2500.46, "end": 2501.0, "word": " Chebyshev's", "probability": 0.7605387369791666}, {"start": 2501.0, "end": 2501.24, "word": " inequality", "probability": 0.80517578125}, {"start": 2501.24, "end": 2502.34, "word": " because", "probability": 0.5224609375}, {"start": 2502.34, "end": 2502.52, "word": " the", "probability": 0.83544921875}, {"start": 2502.52, "end": 2502.82, "word": " distribution", "probability": 0.82373046875}, {"start": 2502.82, "end": 2503.02, "word": " is", "probability": 0.93798828125}, {"start": 2503.02, "end": 2503.36, "word": " normal.", "probability": 0.8388671875}, {"start": 2504.62, "end": 2505.22, "word": " Chebyshev's", "probability": 0.9517415364583334}, {"start": 2505.22, "end": 2505.28, "word": " is", "probability": 0.828125}, {"start": 2505.28, "end": 2505.62, "word": " applied", "probability": 0.81005859375}], "temperature": 1.0}, {"id": 94, "seek": 253453, "start": 2507.43, "end": 2534.53, "text": " for skewed distributions. For symmetric, we have to apply the empirical rule. Here, we assume the distribution is normal. And today, we are talking about normal distribution. So we have to use the empirical rules. Now, the mean is the value in the middle. Suppose we are far away.", "tokens": [337, 8756, 26896, 37870, 13, 1171, 32330, 11, 321, 362, 281, 3079, 264, 31886, 4978, 13, 1692, 11, 321, 6552, 264, 7316, 307, 2710, 13, 400, 965, 11, 321, 366, 1417, 466, 2710, 7316, 13, 407, 321, 362, 281, 764, 264, 31886, 4474, 13, 823, 11, 264, 914, 307, 264, 2158, 294, 264, 2808, 13, 21360, 321, 366, 1400, 1314, 13], "avg_logprob": -0.1499495902849782, "compression_ratio": 1.6726190476190477, "no_speech_prob": 0.0, "words": [{"start": 2507.4300000000003, "end": 2508.05, "word": " for", "probability": 0.3681640625}, {"start": 2508.05, "end": 2508.67, "word": " skewed", "probability": 0.953125}, {"start": 2508.67, "end": 2509.33, "word": " distributions.", "probability": 0.84375}, {"start": 2510.49, "end": 2511.11, "word": " For", "probability": 0.82080078125}, {"start": 2511.11, "end": 2511.51, "word": " symmetric,", "probability": 0.5546875}, {"start": 2511.69, "end": 2511.77, "word": " we", "probability": 0.6083984375}, {"start": 2511.77, "end": 2511.95, "word": " have", "probability": 0.9443359375}, {"start": 2511.95, "end": 2512.09, "word": " to", "probability": 0.97216796875}, {"start": 2512.09, "end": 2512.43, "word": " apply", "probability": 0.93212890625}, {"start": 2512.43, "end": 2512.63, "word": " the", "probability": 0.89990234375}, {"start": 2512.63, "end": 2512.97, "word": " empirical", "probability": 0.91943359375}, {"start": 2512.97, "end": 2513.33, "word": " rule.", "probability": 0.896484375}, {"start": 2514.35, "end": 2514.61, "word": " Here,", "probability": 0.85400390625}, {"start": 2514.65, "end": 2514.75, "word": " we", "probability": 0.96337890625}, {"start": 2514.75, "end": 2515.15, "word": " assume", "probability": 0.9013671875}, {"start": 2515.15, "end": 2515.31, "word": " the", "probability": 0.5146484375}, {"start": 2515.31, "end": 2515.63, "word": " distribution", "probability": 0.8544921875}, {"start": 2515.63, "end": 2515.87, "word": " is", "probability": 0.9052734375}, {"start": 2515.87, "end": 2516.19, "word": " normal.", "probability": 0.86865234375}, {"start": 2516.63, "end": 2517.01, "word": " And", "probability": 0.9560546875}, {"start": 2517.01, "end": 2517.23, "word": " today,", "probability": 0.79931640625}, {"start": 2517.27, "end": 2517.37, "word": " we", "probability": 0.96533203125}, {"start": 2517.37, "end": 2517.47, "word": " are", "probability": 0.91845703125}, {"start": 2517.47, "end": 2517.79, "word": " talking", "probability": 0.84912109375}, {"start": 2517.79, "end": 2518.11, "word": " about", "probability": 0.89501953125}, {"start": 2518.11, "end": 2518.39, "word": " normal", "probability": 0.84765625}, {"start": 2518.39, "end": 2518.79, "word": " distribution.", "probability": 0.80810546875}, {"start": 2519.21, "end": 2519.37, "word": " So", "probability": 0.9638671875}, {"start": 2519.37, "end": 2519.49, "word": " we", "probability": 0.93212890625}, {"start": 2519.49, "end": 2519.63, "word": " have", "probability": 0.94580078125}, {"start": 2519.63, "end": 2519.73, "word": " to", "probability": 0.9619140625}, {"start": 2519.73, "end": 2520.05, "word": " use", "probability": 0.873046875}, {"start": 2520.05, "end": 2520.83, "word": " the", "probability": 0.9091796875}, {"start": 2520.83, "end": 2521.33, "word": " empirical", "probability": 0.91162109375}, {"start": 2521.33, "end": 2522.41, "word": " rules.", "probability": 0.84619140625}, {"start": 2527.91, "end": 2528.53, "word": " Now,", "probability": 0.9560546875}, {"start": 2530.53, "end": 2530.79, "word": " the", "probability": 0.80419921875}, {"start": 2530.79, "end": 2531.05, "word": " mean", "probability": 0.97998046875}, {"start": 2531.05, "end": 2531.51, "word": " is", "probability": 0.94287109375}, {"start": 2531.51, "end": 2531.65, "word": " the", "probability": 0.9208984375}, {"start": 2531.65, "end": 2531.81, "word": " value", "probability": 0.970703125}, {"start": 2531.81, "end": 2531.95, "word": " in", "probability": 0.87255859375}, {"start": 2531.95, "end": 2532.03, "word": " the", "probability": 0.91552734375}, {"start": 2532.03, "end": 2532.27, "word": " middle.", "probability": 0.9462890625}, {"start": 2533.19, "end": 2533.53, "word": " Suppose", "probability": 0.818359375}, {"start": 2533.53, "end": 2533.71, "word": " we", "probability": 0.94189453125}, {"start": 2533.71, "end": 2533.89, "word": " are", "probability": 0.94140625}, {"start": 2533.89, "end": 2534.23, "word": " far", "probability": 0.94873046875}, {"start": 2534.23, "end": 2534.53, "word": " away.", "probability": 0.87890625}], "temperature": 1.0}, {"id": 95, "seek": 255636, "start": 2535.3, "end": 2556.36, "text": " from the mean by one standard deviation either below or above and we are interested in the area between this value which is mu minus sigma so we are looking for mu minus sigma and mu plus sigma", "tokens": [490, 264, 914, 538, 472, 3832, 25163, 2139, 2507, 420, 3673, 293, 321, 366, 3102, 294, 264, 1859, 1296, 341, 2158, 597, 307, 2992, 3175, 12771, 370, 321, 366, 1237, 337, 2992, 3175, 12771, 293, 2992, 1804, 12771], "avg_logprob": -0.1535456753694094, "compression_ratio": 1.564516129032258, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2535.3, "end": 2535.7, "word": " from", "probability": 0.393798828125}, {"start": 2535.7, "end": 2535.92, "word": " the", "probability": 0.85302734375}, {"start": 2535.92, "end": 2536.1, "word": " mean", "probability": 0.94873046875}, {"start": 2536.1, "end": 2536.38, "word": " by", "probability": 0.93896484375}, {"start": 2536.38, "end": 2536.6, "word": " one", "probability": 0.87060546875}, {"start": 2536.6, "end": 2536.9, "word": " standard", "probability": 0.96484375}, {"start": 2536.9, "end": 2537.3, "word": " deviation", "probability": 0.88916015625}, {"start": 2537.3, "end": 2537.64, "word": " either", "probability": 0.64501953125}, {"start": 2537.64, "end": 2538.08, "word": " below", "probability": 0.861328125}, {"start": 2538.08, "end": 2539.76, "word": " or", "probability": 0.86181640625}, {"start": 2539.76, "end": 2540.12, "word": " above", "probability": 0.962890625}, {"start": 2540.12, "end": 2541.68, "word": " and", "probability": 0.44873046875}, {"start": 2541.68, "end": 2542.54, "word": " we", "probability": 0.90966796875}, {"start": 2542.54, "end": 2542.72, "word": " are", "probability": 0.94580078125}, {"start": 2542.72, "end": 2543.18, "word": " interested", "probability": 0.85205078125}, {"start": 2543.18, "end": 2543.56, "word": " in", "probability": 0.94775390625}, {"start": 2543.56, "end": 2543.72, "word": " the", "probability": 0.92236328125}, {"start": 2543.72, "end": 2544.08, "word": " area", "probability": 0.90234375}, {"start": 2544.08, "end": 2544.62, "word": " between", "probability": 0.89501953125}, {"start": 2544.62, "end": 2546.34, "word": " this", "probability": 0.93505859375}, {"start": 2546.34, "end": 2546.66, "word": " value", "probability": 0.9755859375}, {"start": 2546.66, "end": 2546.9, "word": " which", "probability": 0.9150390625}, {"start": 2546.9, "end": 2547.04, "word": " is", "probability": 0.9609375}, {"start": 2547.04, "end": 2547.24, "word": " mu", "probability": 0.55224609375}, {"start": 2547.24, "end": 2547.52, "word": " minus", "probability": 0.96728515625}, {"start": 2547.52, "end": 2547.86, "word": " sigma", "probability": 0.87548828125}, {"start": 2547.86, "end": 2549.7, "word": " so", "probability": 0.6611328125}, {"start": 2549.7, "end": 2549.88, "word": " we", "probability": 0.93994140625}, {"start": 2549.88, "end": 2550.0, "word": " are", "probability": 0.94140625}, {"start": 2550.0, "end": 2550.3, "word": " looking", "probability": 0.919921875}, {"start": 2550.3, "end": 2550.74, "word": " for", "probability": 0.9619140625}, {"start": 2550.74, "end": 2552.72, "word": " mu", "probability": 0.884765625}, {"start": 2552.72, "end": 2553.04, "word": " minus", "probability": 0.9814453125}, {"start": 2553.04, "end": 2553.44, "word": " sigma", "probability": 0.927734375}, {"start": 2553.44, "end": 2555.62, "word": " and", "probability": 0.943359375}, {"start": 2555.62, "end": 2555.82, "word": " mu", "probability": 0.884765625}, {"start": 2555.82, "end": 2556.04, "word": " plus", "probability": 0.94482421875}, {"start": 2556.04, "end": 2556.36, "word": " sigma", "probability": 0.93017578125}], "temperature": 1.0}, {"id": 96, "seek": 260161, "start": 2573.27, "end": 2601.61, "text": " Last time we said there's a rule 68% of the data lies one standard deviation within the mean. Now let's see how can we compute the exact area, area not just say 68%. Now X has normal distribution with mean mu and standard deviation sigma. So let's compare it from", "tokens": [5264, 565, 321, 848, 456, 311, 257, 4978, 23317, 4, 295, 264, 1412, 9134, 472, 3832, 25163, 1951, 264, 914, 13, 823, 718, 311, 536, 577, 393, 321, 14722, 264, 1900, 1859, 11, 1859, 406, 445, 584, 23317, 6856, 823, 1783, 575, 2710, 7316, 365, 914, 2992, 293, 3832, 25163, 12771, 13, 407, 718, 311, 6794, 309, 490], "avg_logprob": -0.2635063549219552, "compression_ratio": 1.543859649122807, "no_speech_prob": 0.0, "words": [{"start": 2573.2700000000004, "end": 2573.9500000000003, "word": " Last", "probability": 0.2493896484375}, {"start": 2573.9500000000003, "end": 2574.63, "word": " time", "probability": 0.8525390625}, {"start": 2574.63, "end": 2574.77, "word": " we", "probability": 0.7099609375}, {"start": 2574.77, "end": 2574.95, "word": " said", "probability": 0.85009765625}, {"start": 2574.95, "end": 2575.21, "word": " there's", "probability": 0.5938720703125}, {"start": 2575.21, "end": 2575.33, "word": " a", "probability": 0.6455078125}, {"start": 2575.33, "end": 2575.67, "word": " rule", "probability": 0.63037109375}, {"start": 2575.67, "end": 2577.63, "word": " 68", "probability": 0.80029296875}, {"start": 2577.63, "end": 2578.03, "word": "%", "probability": 0.810546875}, {"start": 2578.03, "end": 2579.29, "word": " of", "probability": 0.93310546875}, {"start": 2579.29, "end": 2579.45, "word": " the", "probability": 0.900390625}, {"start": 2579.45, "end": 2579.89, "word": " data", "probability": 0.94921875}, {"start": 2579.89, "end": 2581.09, "word": " lies", "probability": 0.8623046875}, {"start": 2581.09, "end": 2581.73, "word": " one", "probability": 0.72265625}, {"start": 2581.73, "end": 2582.05, "word": " standard", "probability": 0.9658203125}, {"start": 2582.05, "end": 2582.45, "word": " deviation", "probability": 0.9326171875}, {"start": 2582.45, "end": 2584.47, "word": " within", "probability": 0.8857421875}, {"start": 2584.47, "end": 2584.73, "word": " the", "probability": 0.90478515625}, {"start": 2584.73, "end": 2584.85, "word": " mean.", "probability": 0.9541015625}, {"start": 2586.11, "end": 2586.79, "word": " Now", "probability": 0.90478515625}, {"start": 2586.79, "end": 2587.07, "word": " let's", "probability": 0.877685546875}, {"start": 2587.07, "end": 2587.31, "word": " see", "probability": 0.9150390625}, {"start": 2587.31, "end": 2588.03, "word": " how", "probability": 0.853515625}, {"start": 2588.03, "end": 2588.27, "word": " can", "probability": 0.83544921875}, {"start": 2588.27, "end": 2588.43, "word": " we", "probability": 0.95703125}, {"start": 2588.43, "end": 2588.79, "word": " compute", "probability": 0.923828125}, {"start": 2588.79, "end": 2588.99, "word": " the", "probability": 0.9091796875}, {"start": 2588.99, "end": 2589.47, "word": " exact", "probability": 0.95703125}, {"start": 2589.47, "end": 2590.15, "word": " area,", "probability": 0.88671875}, {"start": 2590.31, "end": 2590.55, "word": " area", "probability": 0.78564453125}, {"start": 2590.55, "end": 2590.79, "word": " not", "probability": 0.85107421875}, {"start": 2590.79, "end": 2591.13, "word": " just", "probability": 0.88330078125}, {"start": 2591.13, "end": 2591.43, "word": " say", "probability": 0.5634765625}, {"start": 2591.43, "end": 2592.41, "word": " 68%.", "probability": 0.909423828125}, {"start": 2592.41, "end": 2593.83, "word": " Now", "probability": 0.9521484375}, {"start": 2593.83, "end": 2594.13, "word": " X", "probability": 0.5439453125}, {"start": 2594.13, "end": 2594.33, "word": " has", "probability": 0.9453125}, {"start": 2594.33, "end": 2594.65, "word": " normal", "probability": 0.8583984375}, {"start": 2594.65, "end": 2595.25, "word": " distribution", "probability": 0.85888671875}, {"start": 2595.25, "end": 2595.59, "word": " with", "probability": 0.7646484375}, {"start": 2595.59, "end": 2595.73, "word": " mean", "probability": 0.8271484375}, {"start": 2595.73, "end": 2595.93, "word": " mu", "probability": 0.3662109375}, {"start": 2595.93, "end": 2596.11, "word": " and", "probability": 0.9306640625}, {"start": 2596.11, "end": 2596.37, "word": " standard", "probability": 0.95947265625}, {"start": 2596.37, "end": 2596.63, "word": " deviation", "probability": 0.9169921875}, {"start": 2596.63, "end": 2597.01, "word": " sigma.", "probability": 0.87158203125}, {"start": 2598.05, "end": 2598.39, "word": " So", "probability": 0.9443359375}, {"start": 2598.39, "end": 2598.69, "word": " let's", "probability": 0.929443359375}, {"start": 2598.69, "end": 2599.21, "word": " compare", "probability": 0.53662109375}, {"start": 2599.21, "end": 2601.23, "word": " it", "probability": 0.317138671875}, {"start": 2601.23, "end": 2601.61, "word": " from", "probability": 0.8291015625}], "temperature": 1.0}, {"id": 97, "seek": 263214, "start": 2603.74, "end": 2632.14, "text": " normal distribution to standardized. So this is the first value here. Now the z-score, the general formula is x minus the mean divided by sigma. Now the first quantity is mu minus sigma. So instead of x here, so first z is, now this x should be replaced by mu minus sigma. So mu minus sigma.", "tokens": [2710, 7316, 281, 31677, 13, 407, 341, 307, 264, 700, 2158, 510, 13, 823, 264, 710, 12, 4417, 418, 11, 264, 2674, 8513, 307, 2031, 3175, 264, 914, 6666, 538, 12771, 13, 823, 264, 700, 11275, 307, 2992, 3175, 12771, 13, 407, 2602, 295, 2031, 510, 11, 370, 700, 710, 307, 11, 586, 341, 2031, 820, 312, 10772, 538, 2992, 3175, 12771, 13, 407, 2992, 3175, 12771, 13], "avg_logprob": -0.2085597869278728, "compression_ratio": 1.6781609195402298, "no_speech_prob": 0.0, "words": [{"start": 2603.74, "end": 2604.18, "word": " normal", "probability": 0.384765625}, {"start": 2604.18, "end": 2604.84, "word": " distribution", "probability": 0.826171875}, {"start": 2604.84, "end": 2605.28, "word": " to", "probability": 0.82470703125}, {"start": 2605.28, "end": 2605.92, "word": " standardized.", "probability": 0.408935546875}, {"start": 2606.36, "end": 2606.62, "word": " So", "probability": 0.85546875}, {"start": 2606.62, "end": 2606.84, "word": " this", "probability": 0.779296875}, {"start": 2606.84, "end": 2606.96, "word": " is", "probability": 0.951171875}, {"start": 2606.96, "end": 2607.1, "word": " the", "probability": 0.92041015625}, {"start": 2607.1, "end": 2607.4, "word": " first", "probability": 0.89111328125}, {"start": 2607.4, "end": 2607.78, "word": " value", "probability": 0.9609375}, {"start": 2607.78, "end": 2608.06, "word": " here.", "probability": 0.43505859375}, {"start": 2609.44, "end": 2609.7, "word": " Now", "probability": 0.9228515625}, {"start": 2609.7, "end": 2609.92, "word": " the", "probability": 0.73876953125}, {"start": 2609.92, "end": 2610.1, "word": " z", "probability": 0.771484375}, {"start": 2610.1, "end": 2610.6, "word": "-score,", "probability": 0.7918294270833334}, {"start": 2611.52, "end": 2611.74, "word": " the", "probability": 0.84130859375}, {"start": 2611.74, "end": 2612.24, "word": " general", "probability": 0.888671875}, {"start": 2612.24, "end": 2612.72, "word": " formula", "probability": 0.90771484375}, {"start": 2612.72, "end": 2613.08, "word": " is", "probability": 0.83837890625}, {"start": 2613.08, "end": 2614.3, "word": " x", "probability": 0.7431640625}, {"start": 2614.3, "end": 2614.7, "word": " minus", "probability": 0.9677734375}, {"start": 2614.7, "end": 2614.94, "word": " the", "probability": 0.59619140625}, {"start": 2614.94, "end": 2615.08, "word": " mean", "probability": 0.62451171875}, {"start": 2615.08, "end": 2615.32, "word": " divided", "probability": 0.73486328125}, {"start": 2615.32, "end": 2615.5, "word": " by", "probability": 0.9765625}, {"start": 2615.5, "end": 2615.76, "word": " sigma.", "probability": 0.869140625}, {"start": 2617.18, "end": 2617.84, "word": " Now", "probability": 0.94091796875}, {"start": 2617.84, "end": 2618.96, "word": " the", "probability": 0.6455078125}, {"start": 2618.96, "end": 2619.26, "word": " first", "probability": 0.87451171875}, {"start": 2619.26, "end": 2619.74, "word": " quantity", "probability": 0.98095703125}, {"start": 2619.74, "end": 2620.12, "word": " is", "probability": 0.95166015625}, {"start": 2620.12, "end": 2620.32, "word": " mu", "probability": 0.77294921875}, {"start": 2620.32, "end": 2620.56, "word": " minus", "probability": 0.98681640625}, {"start": 2620.56, "end": 2620.88, "word": " sigma.", "probability": 0.9296875}, {"start": 2622.12, "end": 2622.46, "word": " So", "probability": 0.95068359375}, {"start": 2622.46, "end": 2622.82, "word": " instead", "probability": 0.8779296875}, {"start": 2622.82, "end": 2623.0, "word": " of", "probability": 0.9677734375}, {"start": 2623.0, "end": 2623.3, "word": " x", "probability": 0.98486328125}, {"start": 2623.3, "end": 2623.68, "word": " here,", "probability": 0.84814453125}, {"start": 2624.5, "end": 2624.68, "word": " so", "probability": 0.59765625}, {"start": 2624.68, "end": 2625.4, "word": " first", "probability": 0.70654296875}, {"start": 2625.4, "end": 2625.66, "word": " z", "probability": 0.87109375}, {"start": 2625.66, "end": 2626.0, "word": " is,", "probability": 0.947265625}, {"start": 2626.52, "end": 2626.76, "word": " now", "probability": 0.93017578125}, {"start": 2626.76, "end": 2627.06, "word": " this", "probability": 0.9365234375}, {"start": 2627.06, "end": 2627.42, "word": " x", "probability": 0.99365234375}, {"start": 2627.42, "end": 2628.44, "word": " should", "probability": 0.9453125}, {"start": 2628.44, "end": 2628.6, "word": " be", "probability": 0.9541015625}, {"start": 2628.6, "end": 2629.04, "word": " replaced", "probability": 0.94140625}, {"start": 2629.04, "end": 2629.34, "word": " by", "probability": 0.97412109375}, {"start": 2629.34, "end": 2629.6, "word": " mu", "probability": 0.95751953125}, {"start": 2629.6, "end": 2629.82, "word": " minus", "probability": 0.9873046875}, {"start": 2629.82, "end": 2630.18, "word": " sigma.", "probability": 0.921875}, {"start": 2631.04, "end": 2631.26, "word": " So", "probability": 0.958984375}, {"start": 2631.26, "end": 2631.5, "word": " mu", "probability": 0.91015625}, {"start": 2631.5, "end": 2631.78, "word": " minus", "probability": 0.984375}, {"start": 2631.78, "end": 2632.14, "word": " sigma.", "probability": 0.91455078125}], "temperature": 1.0}, {"id": 98, "seek": 266132, "start": 2633.58, "end": 2661.32, "text": " So that's my x value, minus the mean of that, which is mu, divided by sigma. Mu minus sigma minus mu mu cancels, so plus one. And let's see how can we compute that area. I mean between minus one and plus one. In this case, we are interested or we are looking for", "tokens": [407, 300, 311, 452, 2031, 2158, 11, 3175, 264, 914, 295, 300, 11, 597, 307, 2992, 11, 6666, 538, 12771, 13, 15601, 3175, 12771, 3175, 2992, 2992, 393, 66, 1625, 11, 370, 1804, 472, 13, 400, 718, 311, 536, 577, 393, 321, 14722, 300, 1859, 13, 286, 914, 1296, 3175, 472, 293, 1804, 472, 13, 682, 341, 1389, 11, 321, 366, 3102, 420, 321, 366, 1237, 337], "avg_logprob": -0.2105928359224516, "compression_ratio": 1.5114942528735633, "no_speech_prob": 0.0, "words": [{"start": 2633.58, "end": 2633.86, "word": " So", "probability": 0.84912109375}, {"start": 2633.86, "end": 2634.18, "word": " that's", "probability": 0.901123046875}, {"start": 2634.18, "end": 2634.38, "word": " my", "probability": 0.9658203125}, {"start": 2634.38, "end": 2634.66, "word": " x", "probability": 0.7470703125}, {"start": 2634.66, "end": 2635.04, "word": " value,", "probability": 0.77783203125}, {"start": 2635.56, "end": 2636.66, "word": " minus", "probability": 0.9833984375}, {"start": 2636.66, "end": 2636.92, "word": " the", "probability": 0.9287109375}, {"start": 2636.92, "end": 2637.08, "word": " mean", "probability": 0.96533203125}, {"start": 2637.08, "end": 2637.24, "word": " of", "probability": 0.9677734375}, {"start": 2637.24, "end": 2637.54, "word": " that,", "probability": 0.93505859375}, {"start": 2637.74, "end": 2637.84, "word": " which", "probability": 0.9501953125}, {"start": 2637.84, "end": 2638.06, "word": " is", "probability": 0.95703125}, {"start": 2638.06, "end": 2638.36, "word": " mu,", "probability": 0.55712890625}, {"start": 2639.22, "end": 2640.0, "word": " divided", "probability": 0.6630859375}, {"start": 2640.0, "end": 2640.24, "word": " by", "probability": 0.97021484375}, {"start": 2640.24, "end": 2640.46, "word": " sigma.", "probability": 0.8623046875}, {"start": 2642.44, "end": 2643.04, "word": " Mu", "probability": 0.468505859375}, {"start": 2643.04, "end": 2643.34, "word": " minus", "probability": 0.9638671875}, {"start": 2643.34, "end": 2643.6, "word": " sigma", "probability": 0.94482421875}, {"start": 2643.6, "end": 2643.94, "word": " minus", "probability": 0.9599609375}, {"start": 2643.94, "end": 2644.22, "word": " mu", "probability": 0.85400390625}, {"start": 2644.22, "end": 2644.4, "word": " mu", "probability": 0.445068359375}, {"start": 2644.4, "end": 2645.08, "word": " cancels,", "probability": 0.890625}, {"start": 2646.28, "end": 2646.7, "word": " so", "probability": 0.81640625}, {"start": 2646.7, "end": 2647.9, "word": " plus", "probability": 0.50439453125}, {"start": 2647.9, "end": 2648.26, "word": " one.", "probability": 0.57763671875}, {"start": 2649.46, "end": 2649.76, "word": " And", "probability": 0.9287109375}, {"start": 2649.76, "end": 2649.98, "word": " let's", "probability": 0.925048828125}, {"start": 2649.98, "end": 2650.1, "word": " see", "probability": 0.91650390625}, {"start": 2650.1, "end": 2650.22, "word": " how", "probability": 0.89404296875}, {"start": 2650.22, "end": 2650.4, "word": " can", "probability": 0.8349609375}, {"start": 2650.4, "end": 2650.54, "word": " we", "probability": 0.95263671875}, {"start": 2650.54, "end": 2650.9, "word": " compute", "probability": 0.8994140625}, {"start": 2650.9, "end": 2651.2, "word": " that", "probability": 0.93408203125}, {"start": 2651.2, "end": 2651.6, "word": " area.", "probability": 0.90478515625}, {"start": 2653.12, "end": 2653.52, "word": " I", "probability": 0.98388671875}, {"start": 2653.52, "end": 2653.66, "word": " mean", "probability": 0.96484375}, {"start": 2653.66, "end": 2653.96, "word": " between", "probability": 0.64990234375}, {"start": 2653.96, "end": 2654.3, "word": " minus", "probability": 0.97705078125}, {"start": 2654.3, "end": 2654.48, "word": " one", "probability": 0.86376953125}, {"start": 2654.48, "end": 2654.6, "word": " and", "probability": 0.93505859375}, {"start": 2654.6, "end": 2654.86, "word": " plus", "probability": 0.95947265625}, {"start": 2654.86, "end": 2655.1, "word": " one.", "probability": 0.92529296875}, {"start": 2656.24, "end": 2656.58, "word": " In", "probability": 0.95361328125}, {"start": 2656.58, "end": 2656.76, "word": " this", "probability": 0.94677734375}, {"start": 2656.76, "end": 2656.98, "word": " case,", "probability": 0.9130859375}, {"start": 2657.04, "end": 2657.12, "word": " we", "probability": 0.94921875}, {"start": 2657.12, "end": 2657.24, "word": " are", "probability": 0.93359375}, {"start": 2657.24, "end": 2657.68, "word": " interested", "probability": 0.83984375}, {"start": 2657.68, "end": 2660.12, "word": " or", "probability": 0.431640625}, {"start": 2660.12, "end": 2660.48, "word": " we", "probability": 0.94287109375}, {"start": 2660.48, "end": 2660.6, "word": " are", "probability": 0.92431640625}, {"start": 2660.6, "end": 2660.88, "word": " looking", "probability": 0.9091796875}, {"start": 2660.88, "end": 2661.32, "word": " for", "probability": 0.95068359375}], "temperature": 1.0}, {"id": 99, "seek": 269148, "start": 2662.64, "end": 2691.48, "text": " the area between minus one and plus one this area now the dashed area i mean the area between minus one and plus one equals the area below one this area minus the area below minus one that will give the area between minus one and plus one now go back to the normal table you have and look at", "tokens": [264, 1859, 1296, 3175, 472, 293, 1804, 472, 341, 1859, 586, 264, 8240, 292, 1859, 741, 914, 264, 1859, 1296, 3175, 472, 293, 1804, 472, 6915, 264, 1859, 2507, 472, 341, 1859, 3175, 264, 1859, 2507, 3175, 472, 300, 486, 976, 264, 1859, 1296, 3175, 472, 293, 1804, 472, 586, 352, 646, 281, 264, 2710, 3199, 291, 362, 293, 574, 412], "avg_logprob": -0.16481854982914462, "compression_ratio": 2.336, "no_speech_prob": 0.0, "words": [{"start": 2662.64, "end": 2662.9, "word": " the", "probability": 0.1920166015625}, {"start": 2662.9, "end": 2663.18, "word": " area", "probability": 0.8076171875}, {"start": 2663.18, "end": 2663.4, "word": " between", "probability": 0.8828125}, {"start": 2663.4, "end": 2663.72, "word": " minus", "probability": 0.59033203125}, {"start": 2663.72, "end": 2663.9, "word": " one", "probability": 0.62744140625}, {"start": 2663.9, "end": 2664.06, "word": " and", "probability": 0.92236328125}, {"start": 2664.06, "end": 2664.38, "word": " plus", "probability": 0.916015625}, {"start": 2664.38, "end": 2664.62, "word": " one", "probability": 0.9267578125}, {"start": 2664.62, "end": 2665.08, "word": " this", "probability": 0.358642578125}, {"start": 2665.08, "end": 2665.36, "word": " area", "probability": 0.77294921875}, {"start": 2665.36, "end": 2668.0, "word": " now", "probability": 0.50830078125}, {"start": 2668.0, "end": 2668.3, "word": " the", "probability": 0.87744140625}, {"start": 2668.3, "end": 2668.78, "word": " dashed", "probability": 0.797607421875}, {"start": 2668.78, "end": 2669.18, "word": " area", "probability": 0.86669921875}, {"start": 2669.18, "end": 2670.16, "word": " i", "probability": 0.61962890625}, {"start": 2670.16, "end": 2670.26, "word": " mean", "probability": 0.96923828125}, {"start": 2670.26, "end": 2670.38, "word": " the", "probability": 0.92578125}, {"start": 2670.38, "end": 2670.54, "word": " area", "probability": 0.8955078125}, {"start": 2670.54, "end": 2670.74, "word": " between", "probability": 0.89013671875}, {"start": 2670.74, "end": 2671.06, "word": " minus", "probability": 0.97998046875}, {"start": 2671.06, "end": 2671.26, "word": " one", "probability": 0.92578125}, {"start": 2671.26, "end": 2671.36, "word": " and", "probability": 0.880859375}, {"start": 2671.36, "end": 2671.62, "word": " plus", "probability": 0.8154296875}, {"start": 2671.62, "end": 2671.96, "word": " one", "probability": 0.927734375}, {"start": 2671.96, "end": 2673.54, "word": " equals", "probability": 0.9599609375}, {"start": 2673.54, "end": 2675.08, "word": " the", "probability": 0.9052734375}, {"start": 2675.08, "end": 2675.38, "word": " area", "probability": 0.86865234375}, {"start": 2675.38, "end": 2675.62, "word": " below", "probability": 0.90380859375}, {"start": 2675.62, "end": 2675.92, "word": " one", "probability": 0.91064453125}, {"start": 2675.92, "end": 2677.74, "word": " this", "probability": 0.787109375}, {"start": 2677.74, "end": 2678.04, "word": " area", "probability": 0.86669921875}, {"start": 2678.04, "end": 2679.46, "word": " minus", "probability": 0.96533203125}, {"start": 2679.46, "end": 2681.74, "word": " the", "probability": 0.82373046875}, {"start": 2681.74, "end": 2682.04, "word": " area", "probability": 0.86328125}, {"start": 2682.04, "end": 2682.24, "word": " below", "probability": 0.92236328125}, {"start": 2682.24, "end": 2682.54, "word": " minus", "probability": 0.9833984375}, {"start": 2682.54, "end": 2682.82, "word": " one", "probability": 0.9267578125}, {"start": 2682.82, "end": 2683.38, "word": " that", "probability": 0.9091796875}, {"start": 2683.38, "end": 2683.56, "word": " will", "probability": 0.8828125}, {"start": 2683.56, "end": 2683.82, "word": " give", "probability": 0.8896484375}, {"start": 2683.82, "end": 2684.14, "word": " the", "probability": 0.921875}, {"start": 2684.14, "end": 2684.98, "word": " area", "probability": 0.8623046875}, {"start": 2684.98, "end": 2685.22, "word": " between", "probability": 0.8759765625}, {"start": 2685.22, "end": 2685.5, "word": " minus", "probability": 0.97998046875}, {"start": 2685.5, "end": 2685.68, "word": " one", "probability": 0.9287109375}, {"start": 2685.68, "end": 2685.8, "word": " and", "probability": 0.8408203125}, {"start": 2685.8, "end": 2686.02, "word": " plus", "probability": 0.86962890625}, {"start": 2686.02, "end": 2686.3, "word": " one", "probability": 0.92578125}, {"start": 2686.3, "end": 2687.4, "word": " now", "probability": 0.83837890625}, {"start": 2687.4, "end": 2687.62, "word": " go", "probability": 0.95947265625}, {"start": 2687.62, "end": 2687.88, "word": " back", "probability": 0.88037109375}, {"start": 2687.88, "end": 2688.06, "word": " to", "probability": 0.96923828125}, {"start": 2688.06, "end": 2688.2, "word": " the", "probability": 0.91748046875}, {"start": 2688.2, "end": 2688.52, "word": " normal", "probability": 0.8916015625}, {"start": 2688.52, "end": 2689.0, "word": " table", "probability": 0.89013671875}, {"start": 2689.0, "end": 2689.2, "word": " you", "probability": 0.958984375}, {"start": 2689.2, "end": 2689.58, "word": " have", "probability": 0.9501953125}, {"start": 2689.58, "end": 2690.88, "word": " and", "probability": 0.93115234375}, {"start": 2690.88, "end": 2691.14, "word": " look", "probability": 0.9697265625}, {"start": 2691.14, "end": 2691.48, "word": " at", "probability": 0.96923828125}], "temperature": 1.0}, {"id": 100, "seek": 271784, "start": 2691.68, "end": 2717.84, "text": " the value of one z and one under zero what's your answer one point one point now without using the table can you tell the area below minus one one minus this one because", "tokens": [264, 2158, 295, 472, 710, 293, 472, 833, 4018, 437, 311, 428, 1867, 472, 935, 472, 935, 586, 1553, 1228, 264, 3199, 393, 291, 980, 264, 1859, 2507, 3175, 472, 472, 3175, 341, 472, 570], "avg_logprob": -0.3146701264712546, "compression_ratio": 1.4782608695652173, "no_speech_prob": 0.0, "words": [{"start": 2691.68, "end": 2691.94, "word": " the", "probability": 0.277099609375}, {"start": 2691.94, "end": 2692.14, "word": " value", "probability": 0.95068359375}, {"start": 2692.14, "end": 2692.26, "word": " of", "probability": 0.6943359375}, {"start": 2692.26, "end": 2692.5, "word": " one", "probability": 0.292236328125}, {"start": 2692.5, "end": 2693.78, "word": " z", "probability": 0.360107421875}, {"start": 2693.78, "end": 2694.32, "word": " and", "probability": 0.8798828125}, {"start": 2694.32, "end": 2694.58, "word": " one", "probability": 0.8935546875}, {"start": 2694.58, "end": 2696.6, "word": " under", "probability": 0.677734375}, {"start": 2696.6, "end": 2696.9, "word": " zero", "probability": 0.83837890625}, {"start": 2696.9, "end": 2698.72, "word": " what's", "probability": 0.816162109375}, {"start": 2698.72, "end": 2698.86, "word": " your", "probability": 0.90478515625}, {"start": 2698.86, "end": 2699.56, "word": " answer", "probability": 0.9541015625}, {"start": 2699.56, "end": 2702.48, "word": " one", "probability": 0.3916015625}, {"start": 2702.48, "end": 2702.62, "word": " point", "probability": 0.50537109375}, {"start": 2702.62, "end": 2705.48, "word": " one", "probability": 0.68994140625}, {"start": 2705.48, "end": 2705.68, "word": " point", "probability": 0.22998046875}, {"start": 2705.68, "end": 2708.22, "word": " now", "probability": 0.517578125}, {"start": 2708.22, "end": 2709.54, "word": " without", "probability": 0.89501953125}, {"start": 2709.54, "end": 2709.92, "word": " using", "probability": 0.93359375}, {"start": 2709.92, "end": 2710.1, "word": " the", "probability": 0.9296875}, {"start": 2710.1, "end": 2710.44, "word": " table", "probability": 0.88330078125}, {"start": 2710.44, "end": 2711.02, "word": " can", "probability": 0.919921875}, {"start": 2711.02, "end": 2711.18, "word": " you", "probability": 0.970703125}, {"start": 2711.18, "end": 2711.52, "word": " tell", "probability": 0.892578125}, {"start": 2711.52, "end": 2712.38, "word": " the", "probability": 0.8896484375}, {"start": 2712.38, "end": 2712.78, "word": " area", "probability": 0.8720703125}, {"start": 2712.78, "end": 2713.08, "word": " below", "probability": 0.89111328125}, {"start": 2713.08, "end": 2713.38, "word": " minus", "probability": 0.97216796875}, {"start": 2713.38, "end": 2713.72, "word": " one", "probability": 0.91064453125}, {"start": 2713.72, "end": 2715.52, "word": " one", "probability": 0.59228515625}, {"start": 2715.52, "end": 2716.88, "word": " minus", "probability": 0.962890625}, {"start": 2716.88, "end": 2717.14, "word": " this", "probability": 0.93408203125}, {"start": 2717.14, "end": 2717.36, "word": " one", "probability": 0.90576171875}, {"start": 2717.36, "end": 2717.84, "word": " because", "probability": 0.87939453125}], "temperature": 1.0}, {"id": 101, "seek": 274205, "start": 2723.43, "end": 2742.05, "text": " Now the area below, this is 1. The area below 1 is 0.3413. Okay, now the area below minus 1. This is minus 1.", "tokens": [823, 264, 1859, 2507, 11, 341, 307, 502, 13, 440, 1859, 2507, 502, 307, 1958, 13, 12249, 7668, 13, 1033, 11, 586, 264, 1859, 2507, 3175, 502, 13, 639, 307, 3175, 502, 13], "avg_logprob": -0.3177849343594383, "compression_ratio": 1.5277777777777777, "no_speech_prob": 1.1324882507324219e-06, "words": [{"start": 2723.4300000000003, "end": 2724.11, "word": " Now", "probability": 0.463134765625}, {"start": 2724.11, "end": 2724.29, "word": " the", "probability": 0.607421875}, {"start": 2724.29, "end": 2724.63, "word": " area", "probability": 0.8828125}, {"start": 2724.63, "end": 2725.41, "word": " below,", "probability": 0.869140625}, {"start": 2725.55, "end": 2725.71, "word": " this", "probability": 0.91259765625}, {"start": 2725.71, "end": 2725.85, "word": " is", "probability": 0.9365234375}, {"start": 2725.85, "end": 2726.05, "word": " 1.", "probability": 0.4130859375}, {"start": 2727.31, "end": 2727.99, "word": " The", "probability": 0.78466796875}, {"start": 2727.99, "end": 2728.23, "word": " area", "probability": 0.8837890625}, {"start": 2728.23, "end": 2728.45, "word": " below", "probability": 0.9072265625}, {"start": 2728.45, "end": 2728.77, "word": " 1", "probability": 0.91455078125}, {"start": 2728.77, "end": 2729.87, "word": " is", "probability": 0.796875}, {"start": 2729.87, "end": 2730.13, "word": " 0", "probability": 0.486572265625}, {"start": 2730.13, "end": 2731.31, "word": ".3413.", "probability": 0.73486328125}, {"start": 2734.43, "end": 2734.77, "word": " Okay,", "probability": 0.4677734375}, {"start": 2735.23, "end": 2735.61, "word": " now", "probability": 0.9384765625}, {"start": 2735.61, "end": 2736.25, "word": " the", "probability": 0.716796875}, {"start": 2736.25, "end": 2736.57, "word": " area", "probability": 0.8876953125}, {"start": 2736.57, "end": 2736.89, "word": " below", "probability": 0.919921875}, {"start": 2736.89, "end": 2737.21, "word": " minus", "probability": 0.8076171875}, {"start": 2737.21, "end": 2737.59, "word": " 1.", "probability": 0.91552734375}, {"start": 2740.77, "end": 2741.45, "word": " This", "probability": 0.6904296875}, {"start": 2741.45, "end": 2741.53, "word": " is", "probability": 0.9072265625}, {"start": 2741.53, "end": 2741.77, "word": " minus", "probability": 0.9736328125}, {"start": 2741.77, "end": 2742.05, "word": " 1.", "probability": 0.94482421875}], "temperature": 1.0}, {"id": 102, "seek": 277609, "start": 2746.81, "end": 2776.09, "text": " Now, the area below minus 1 is the same as above 1. These are the two areas here are equal. So the area below minus 1, I mean b of z less than minus 1 is the same as b of z greater than 1. And b of z greater than 1 is the same as 1 minus b of z smaller than 1. So b of z less than 1 here.", "tokens": [823, 11, 264, 1859, 2507, 3175, 502, 307, 264, 912, 382, 3673, 502, 13, 1981, 366, 264, 732, 3179, 510, 366, 2681, 13, 407, 264, 1859, 2507, 3175, 502, 11, 286, 914, 272, 295, 710, 1570, 813, 3175, 502, 307, 264, 912, 382, 272, 295, 710, 5044, 813, 502, 13, 400, 272, 295, 710, 5044, 813, 502, 307, 264, 912, 382, 502, 3175, 272, 295, 710, 4356, 813, 502, 13, 407, 272, 295, 710, 1570, 813, 502, 510, 13], "avg_logprob": -0.1511718723922968, "compression_ratio": 2.049645390070922, "no_speech_prob": 0.0, "words": [{"start": 2746.81, "end": 2747.05, "word": " Now,", "probability": 0.7119140625}, {"start": 2747.15, "end": 2747.21, "word": " the", "probability": 0.90380859375}, {"start": 2747.21, "end": 2747.43, "word": " area", "probability": 0.8916015625}, {"start": 2747.43, "end": 2747.61, "word": " below", "probability": 0.900390625}, {"start": 2747.61, "end": 2747.93, "word": " minus", "probability": 0.88720703125}, {"start": 2747.93, "end": 2748.15, "word": " 1", "probability": 0.5859375}, {"start": 2748.15, "end": 2748.29, "word": " is", "probability": 0.94775390625}, {"start": 2748.29, "end": 2748.43, "word": " the", "probability": 0.9072265625}, {"start": 2748.43, "end": 2748.65, "word": " same", "probability": 0.9228515625}, {"start": 2748.65, "end": 2749.09, "word": " as", "probability": 0.9609375}, {"start": 2749.09, "end": 2749.55, "word": " above", "probability": 0.51220703125}, {"start": 2749.55, "end": 2750.51, "word": " 1.", "probability": 0.912109375}, {"start": 2754.31, "end": 2754.75, "word": " These", "probability": 0.70703125}, {"start": 2754.75, "end": 2755.01, "word": " are", "probability": 0.8935546875}, {"start": 2755.01, "end": 2755.73, "word": " the", "probability": 0.47119140625}, {"start": 2755.73, "end": 2755.93, "word": " two", "probability": 0.81396484375}, {"start": 2755.93, "end": 2756.23, "word": " areas", "probability": 0.958984375}, {"start": 2756.23, "end": 2756.49, "word": " here", "probability": 0.84326171875}, {"start": 2756.49, "end": 2756.97, "word": " are", "probability": 0.89697265625}, {"start": 2756.97, "end": 2757.67, "word": " equal.", "probability": 0.904296875}, {"start": 2758.39, "end": 2758.67, "word": " So", "probability": 0.95068359375}, {"start": 2758.67, "end": 2758.81, "word": " the", "probability": 0.71435546875}, {"start": 2758.81, "end": 2759.03, "word": " area", "probability": 0.89111328125}, {"start": 2759.03, "end": 2759.23, "word": " below", "probability": 0.931640625}, {"start": 2759.23, "end": 2759.57, "word": " minus", "probability": 0.986328125}, {"start": 2759.57, "end": 2759.79, "word": " 1,", "probability": 0.97216796875}, {"start": 2759.83, "end": 2759.93, "word": " I", "probability": 0.99072265625}, {"start": 2759.93, "end": 2760.11, "word": " mean", "probability": 0.96728515625}, {"start": 2760.11, "end": 2760.47, "word": " b", "probability": 0.2578125}, {"start": 2760.47, "end": 2760.65, "word": " of", "probability": 0.962890625}, {"start": 2760.65, "end": 2760.83, "word": " z", "probability": 0.970703125}, {"start": 2760.83, "end": 2762.63, "word": " less", "probability": 0.5166015625}, {"start": 2762.63, "end": 2762.81, "word": " than", "probability": 0.94677734375}, {"start": 2762.81, "end": 2763.11, "word": " minus", "probability": 0.85546875}, {"start": 2763.11, "end": 2763.43, "word": " 1", "probability": 0.9775390625}, {"start": 2763.43, "end": 2763.73, "word": " is", "probability": 0.7998046875}, {"start": 2763.73, "end": 2763.89, "word": " the", "probability": 0.91796875}, {"start": 2763.89, "end": 2764.13, "word": " same", "probability": 0.91650390625}, {"start": 2764.13, "end": 2764.47, "word": " as", "probability": 0.95849609375}, {"start": 2764.47, "end": 2764.63, "word": " b", "probability": 0.9541015625}, {"start": 2764.63, "end": 2764.81, "word": " of", "probability": 0.96728515625}, {"start": 2764.81, "end": 2765.05, "word": " z", "probability": 0.99609375}, {"start": 2765.05, "end": 2766.51, "word": " greater", "probability": 0.8701171875}, {"start": 2766.51, "end": 2766.79, "word": " than", "probability": 0.94482421875}, {"start": 2766.79, "end": 2767.01, "word": " 1.", "probability": 0.97216796875}, {"start": 2768.31, "end": 2768.75, "word": " And", "probability": 0.9287109375}, {"start": 2768.75, "end": 2768.89, "word": " b", "probability": 0.97265625}, {"start": 2768.89, "end": 2769.03, "word": " of", "probability": 0.97314453125}, {"start": 2769.03, "end": 2769.13, "word": " z", "probability": 0.9970703125}, {"start": 2769.13, "end": 2769.41, "word": " greater", "probability": 0.92041015625}, {"start": 2769.41, "end": 2769.67, "word": " than", "probability": 0.94189453125}, {"start": 2769.67, "end": 2769.85, "word": " 1", "probability": 0.97998046875}, {"start": 2769.85, "end": 2769.99, "word": " is", "probability": 0.935546875}, {"start": 2769.99, "end": 2770.13, "word": " the", "probability": 0.90087890625}, {"start": 2770.13, "end": 2770.31, "word": " same", "probability": 0.91162109375}, {"start": 2770.31, "end": 2770.75, "word": " as", "probability": 0.96435546875}, {"start": 2770.75, "end": 2771.45, "word": " 1", "probability": 0.99072265625}, {"start": 2771.45, "end": 2771.97, "word": " minus", "probability": 0.98681640625}, {"start": 2771.97, "end": 2772.35, "word": " b", "probability": 0.95849609375}, {"start": 2772.35, "end": 2772.53, "word": " of", "probability": 0.96875}, {"start": 2772.53, "end": 2772.65, "word": " z", "probability": 0.99658203125}, {"start": 2772.65, "end": 2773.07, "word": " smaller", "probability": 0.84716796875}, {"start": 2773.07, "end": 2773.29, "word": " than", "probability": 0.638671875}, {"start": 2773.29, "end": 2773.45, "word": " 1.", "probability": 0.953125}, {"start": 2774.33, "end": 2774.77, "word": " So", "probability": 0.9619140625}, {"start": 2774.77, "end": 2774.97, "word": " b", "probability": 0.82861328125}, {"start": 2774.97, "end": 2775.11, "word": " of", "probability": 0.97412109375}, {"start": 2775.11, "end": 2775.21, "word": " z", "probability": 0.99755859375}, {"start": 2775.21, "end": 2775.45, "word": " less", "probability": 0.873046875}, {"start": 2775.45, "end": 2775.63, "word": " than", "probability": 0.943359375}, {"start": 2775.63, "end": 2775.79, "word": " 1", "probability": 0.98291015625}, {"start": 2775.79, "end": 2776.09, "word": " here.", "probability": 0.845703125}], "temperature": 1.0}, {"id": 103, "seek": 280497, "start": 2777.07, "end": 2804.97, "text": " You shouldn't need to look again to the table. Just subtract 1 from this value. Make sense? Here we compute the value of B of Z less than 1, which is 0.8413. We are looking for B of Z less than minus 1, which is the same as B of Z greater than 1. Now, greater than means our tail. It's 1 minus the lower tail probability.", "tokens": [509, 4659, 380, 643, 281, 574, 797, 281, 264, 3199, 13, 1449, 16390, 502, 490, 341, 2158, 13, 4387, 2020, 30, 1692, 321, 14722, 264, 2158, 295, 363, 295, 1176, 1570, 813, 502, 11, 597, 307, 1958, 13, 23, 17344, 18, 13, 492, 366, 1237, 337, 363, 295, 1176, 1570, 813, 3175, 502, 11, 597, 307, 264, 912, 382, 363, 295, 1176, 5044, 813, 502, 13, 823, 11, 5044, 813, 1355, 527, 6838, 13, 467, 311, 502, 3175, 264, 3126, 6838, 8482, 13], "avg_logprob": -0.17559524075615973, "compression_ratio": 1.5631067961165048, "no_speech_prob": 0.0, "words": [{"start": 2777.07, "end": 2777.31, "word": " You", "probability": 0.73681640625}, {"start": 2777.31, "end": 2777.71, "word": " shouldn't", "probability": 0.964599609375}, {"start": 2777.71, "end": 2778.11, "word": " need", "probability": 0.8857421875}, {"start": 2778.11, "end": 2778.25, "word": " to", "probability": 0.96435546875}, {"start": 2778.25, "end": 2778.41, "word": " look", "probability": 0.9658203125}, {"start": 2778.41, "end": 2778.75, "word": " again", "probability": 0.94287109375}, {"start": 2778.75, "end": 2778.93, "word": " to", "probability": 0.7265625}, {"start": 2778.93, "end": 2779.07, "word": " the", "probability": 0.9228515625}, {"start": 2779.07, "end": 2779.33, "word": " table.", "probability": 0.87548828125}, {"start": 2779.45, "end": 2779.71, "word": " Just", "probability": 0.8671875}, {"start": 2779.71, "end": 2780.21, "word": " subtract", "probability": 0.859375}, {"start": 2780.21, "end": 2780.45, "word": " 1", "probability": 0.406005859375}, {"start": 2780.45, "end": 2780.69, "word": " from", "probability": 0.8876953125}, {"start": 2780.69, "end": 2780.97, "word": " this", "probability": 0.9287109375}, {"start": 2780.97, "end": 2781.29, "word": " value.", "probability": 0.95458984375}, {"start": 2783.19, "end": 2783.75, "word": " Make", "probability": 0.364990234375}, {"start": 2783.75, "end": 2784.07, "word": " sense?", "probability": 0.8349609375}, {"start": 2786.03, "end": 2786.59, "word": " Here", "probability": 0.84619140625}, {"start": 2786.59, "end": 2786.77, "word": " we", "probability": 0.61181640625}, {"start": 2786.77, "end": 2787.19, "word": " compute", "probability": 0.8896484375}, {"start": 2787.19, "end": 2787.55, "word": " the", "probability": 0.9169921875}, {"start": 2787.55, "end": 2787.95, "word": " value", "probability": 0.97314453125}, {"start": 2787.95, "end": 2788.31, "word": " of", "probability": 0.9560546875}, {"start": 2788.31, "end": 2788.69, "word": " B", "probability": 0.51123046875}, {"start": 2788.69, "end": 2788.87, "word": " of", "probability": 0.951171875}, {"start": 2788.87, "end": 2788.97, "word": " Z", "probability": 0.615234375}, {"start": 2788.97, "end": 2789.21, "word": " less", "probability": 0.921875}, {"start": 2789.21, "end": 2789.37, "word": " than", "probability": 0.94140625}, {"start": 2789.37, "end": 2789.61, "word": " 1,", "probability": 0.90576171875}, {"start": 2789.91, "end": 2790.23, "word": " which", "probability": 0.9462890625}, {"start": 2790.23, "end": 2790.49, "word": " is", "probability": 0.94091796875}, {"start": 2790.49, "end": 2790.77, "word": " 0", "probability": 0.9072265625}, {"start": 2790.77, "end": 2791.67, "word": ".8413.", "probability": 0.70355224609375}, {"start": 2792.61, "end": 2792.95, "word": " We", "probability": 0.96240234375}, {"start": 2792.95, "end": 2793.09, "word": " are", "probability": 0.9375}, {"start": 2793.09, "end": 2793.37, "word": " looking", "probability": 0.91845703125}, {"start": 2793.37, "end": 2793.75, "word": " for", "probability": 0.95068359375}, {"start": 2793.75, "end": 2794.29, "word": " B", "probability": 0.9931640625}, {"start": 2794.29, "end": 2794.45, "word": " of", "probability": 0.97509765625}, {"start": 2794.45, "end": 2794.61, "word": " Z", "probability": 0.9873046875}, {"start": 2794.61, "end": 2794.89, "word": " less", "probability": 0.94775390625}, {"start": 2794.89, "end": 2795.09, "word": " than", "probability": 0.93701171875}, {"start": 2795.09, "end": 2795.43, "word": " minus", "probability": 0.953125}, {"start": 2795.43, "end": 2795.79, "word": " 1,", "probability": 0.96142578125}, {"start": 2796.93, "end": 2797.25, "word": " which", "probability": 0.9482421875}, {"start": 2797.25, "end": 2797.39, "word": " is", "probability": 0.9453125}, {"start": 2797.39, "end": 2797.57, "word": " the", "probability": 0.91796875}, {"start": 2797.57, "end": 2797.85, "word": " same", "probability": 0.91162109375}, {"start": 2797.85, "end": 2798.37, "word": " as", "probability": 0.95849609375}, {"start": 2798.37, "end": 2798.57, "word": " B", "probability": 0.99365234375}, {"start": 2798.57, "end": 2798.73, "word": " of", "probability": 0.96875}, {"start": 2798.73, "end": 2798.87, "word": " Z", "probability": 0.9912109375}, {"start": 2798.87, "end": 2799.25, "word": " greater", "probability": 0.90966796875}, {"start": 2799.25, "end": 2799.53, "word": " than", "probability": 0.9462890625}, {"start": 2799.53, "end": 2799.77, "word": " 1.", "probability": 0.9619140625}, {"start": 2800.75, "end": 2801.01, "word": " Now,", "probability": 0.85595703125}, {"start": 2801.07, "end": 2801.27, "word": " greater", "probability": 0.81689453125}, {"start": 2801.27, "end": 2801.53, "word": " than", "probability": 0.93798828125}, {"start": 2801.53, "end": 2801.79, "word": " means", "probability": 0.40234375}, {"start": 2801.79, "end": 2802.15, "word": " our", "probability": 0.6533203125}, {"start": 2802.15, "end": 2802.45, "word": " tail.", "probability": 0.38134765625}, {"start": 2802.63, "end": 2802.77, "word": " It's", "probability": 0.869140625}, {"start": 2802.77, "end": 2803.01, "word": " 1", "probability": 0.85595703125}, {"start": 2803.01, "end": 2803.47, "word": " minus", "probability": 0.98779296875}, {"start": 2803.47, "end": 2803.85, "word": " the", "probability": 0.90283203125}, {"start": 2803.85, "end": 2804.05, "word": " lower", "probability": 0.87646484375}, {"start": 2804.05, "end": 2804.41, "word": " tail", "probability": 0.87939453125}, {"start": 2804.41, "end": 2804.97, "word": " probability.", "probability": 0.88720703125}], "temperature": 1.0}, {"id": 104, "seek": 283298, "start": 2806.26, "end": 2832.98, "text": " So this is 1 minus. So the answer again is 1 minus 0.8413. So 8413 minus 0.1587. So 8413.", "tokens": [407, 341, 307, 502, 3175, 13, 407, 264, 1867, 797, 307, 502, 3175, 1958, 13, 25494, 7668, 13, 407, 29018, 7668, 3175, 1958, 13, 5211, 23853, 13, 407, 29018, 7668, 13], "avg_logprob": -0.2462158128619194, "compression_ratio": 1.3846153846153846, "no_speech_prob": 0.0, "words": [{"start": 2806.26, "end": 2806.62, "word": " So", "probability": 0.70751953125}, {"start": 2806.62, "end": 2806.88, "word": " this", "probability": 0.57763671875}, {"start": 2806.88, "end": 2806.98, "word": " is", "probability": 0.943359375}, {"start": 2806.98, "end": 2807.2, "word": " 1", "probability": 0.43896484375}, {"start": 2807.2, "end": 2807.62, "word": " minus.", "probability": 0.85791015625}, {"start": 2808.0, "end": 2808.36, "word": " So", "probability": 0.87109375}, {"start": 2808.36, "end": 2808.7, "word": " the", "probability": 0.7646484375}, {"start": 2808.7, "end": 2809.0, "word": " answer", "probability": 0.96044921875}, {"start": 2809.0, "end": 2809.62, "word": " again", "probability": 0.6689453125}, {"start": 2809.62, "end": 2809.84, "word": " is", "probability": 0.90283203125}, {"start": 2809.84, "end": 2810.06, "word": " 1", "probability": 0.88525390625}, {"start": 2810.06, "end": 2810.56, "word": " minus", "probability": 0.96630859375}, {"start": 2810.56, "end": 2810.98, "word": " 0", "probability": 0.478271484375}, {"start": 2810.98, "end": 2812.24, "word": ".8413.", "probability": 0.869140625}, {"start": 2814.28, "end": 2815.16, "word": " So", "probability": 0.317626953125}, {"start": 2815.16, "end": 2816.76, "word": " 8413", "probability": 0.775634765625}, {"start": 2816.76, "end": 2817.76, "word": " minus", "probability": 0.982421875}, {"start": 2817.76, "end": 2818.62, "word": " 0", "probability": 0.97705078125}, {"start": 2818.62, "end": 2820.04, "word": ".1587.", "probability": 0.9879557291666666}, {"start": 2831.38, "end": 2832.26, "word": " So", "probability": 0.91259765625}, {"start": 2832.26, "end": 2832.98, "word": " 8413.", "probability": 0.862060546875}], "temperature": 1.0}, {"id": 105, "seek": 286059, "start": 2833.61, "end": 2860.59, "text": " minus 1.1587. Okay, so that gives 0.6826. Multiply this one by 100, we get 68.1826. So roughly 60-80%", "tokens": [3175, 502, 13, 5211, 23853, 13, 1033, 11, 370, 300, 2709, 1958, 13, 27102, 10880, 13, 31150, 356, 341, 472, 538, 2319, 11, 321, 483, 23317, 13, 6494, 10880, 13, 407, 9810, 4060, 12, 4702, 4], "avg_logprob": -0.3236908687127603, "compression_ratio": 1.0515463917525774, "no_speech_prob": 0.0, "words": [{"start": 2833.61, "end": 2834.05, "word": " minus", "probability": 0.1898193359375}, {"start": 2834.05, "end": 2834.45, "word": " 1", "probability": 0.67919921875}, {"start": 2834.45, "end": 2837.03, "word": ".1587.", "probability": 0.91015625}, {"start": 2841.63, "end": 2841.93, "word": " Okay,", "probability": 0.5888671875}, {"start": 2842.15, "end": 2842.35, "word": " so", "probability": 0.85693359375}, {"start": 2842.35, "end": 2842.67, "word": " that", "probability": 0.91357421875}, {"start": 2842.67, "end": 2843.33, "word": " gives", "probability": 0.64013671875}, {"start": 2843.33, "end": 2843.67, "word": " 0", "probability": 0.83740234375}, {"start": 2843.67, "end": 2847.57, "word": ".6826.", "probability": 0.9326171875}, {"start": 2849.09, "end": 2849.83, "word": " Multiply", "probability": 0.91455078125}, {"start": 2849.83, "end": 2850.05, "word": " this", "probability": 0.93603515625}, {"start": 2850.05, "end": 2850.25, "word": " one", "probability": 0.8779296875}, {"start": 2850.25, "end": 2850.41, "word": " by", "probability": 0.9736328125}, {"start": 2850.41, "end": 2850.93, "word": " 100,", "probability": 0.75634765625}, {"start": 2852.61, "end": 2852.95, "word": " we", "probability": 0.5341796875}, {"start": 2852.95, "end": 2853.25, "word": " get", "probability": 0.5078125}, {"start": 2853.25, "end": 2854.15, "word": " 68", "probability": 0.97021484375}, {"start": 2854.15, "end": 2857.55, "word": ".1826.", "probability": 0.78662109375}, {"start": 2858.75, "end": 2859.01, "word": " So", "probability": 0.9111328125}, {"start": 2859.01, "end": 2859.31, "word": " roughly", "probability": 0.73681640625}, {"start": 2859.31, "end": 2860.03, "word": " 60", "probability": 0.5390625}, {"start": 2860.03, "end": 2860.27, "word": "-80", "probability": 0.68212890625}, {"start": 2860.27, "end": 2860.59, "word": "%", "probability": 0.548828125}], "temperature": 1.0}, {"id": 106, "seek": 288963, "start": 2861.59, "end": 2889.63, "text": " of the observations lie between one standard deviation around the mean. So this is the way how can we compute the area below one standard deviation or above one standard deviation of the mean. Do the same for not mu minus sigma, mu plus minus two sigma and mu plus two sigma.", "tokens": [295, 264, 18163, 4544, 1296, 472, 3832, 25163, 926, 264, 914, 13, 407, 341, 307, 264, 636, 577, 393, 321, 14722, 264, 1859, 2507, 472, 3832, 25163, 420, 3673, 472, 3832, 25163, 295, 264, 914, 13, 1144, 264, 912, 337, 406, 2992, 3175, 12771, 11, 2992, 1804, 3175, 732, 12771, 293, 2992, 1804, 732, 12771, 13], "avg_logprob": -0.1414473705124437, "compression_ratio": 1.84, "no_speech_prob": 0.0, "words": [{"start": 2861.59, "end": 2861.91, "word": " of", "probability": 0.42138671875}, {"start": 2861.91, "end": 2862.05, "word": " the", "probability": 0.91552734375}, {"start": 2862.05, "end": 2862.63, "word": " observations", "probability": 0.76123046875}, {"start": 2862.63, "end": 2863.43, "word": " lie", "probability": 0.82275390625}, {"start": 2863.43, "end": 2864.01, "word": " between", "probability": 0.86083984375}, {"start": 2864.01, "end": 2865.29, "word": " one", "probability": 0.771484375}, {"start": 2865.29, "end": 2865.57, "word": " standard", "probability": 0.728515625}, {"start": 2865.57, "end": 2865.97, "word": " deviation", "probability": 0.9208984375}, {"start": 2865.97, "end": 2867.15, "word": " around", "probability": 0.873046875}, {"start": 2867.15, "end": 2867.41, "word": " the", "probability": 0.4990234375}, {"start": 2867.41, "end": 2867.53, "word": " mean.", "probability": 0.94921875}, {"start": 2869.45, "end": 2870.09, "word": " So", "probability": 0.9072265625}, {"start": 2870.09, "end": 2870.33, "word": " this", "probability": 0.841796875}, {"start": 2870.33, "end": 2870.47, "word": " is", "probability": 0.947265625}, {"start": 2870.47, "end": 2870.59, "word": " the", "probability": 0.8759765625}, {"start": 2870.59, "end": 2870.75, "word": " way", "probability": 0.96337890625}, {"start": 2870.75, "end": 2870.91, "word": " how", "probability": 0.82958984375}, {"start": 2870.91, "end": 2871.15, "word": " can", "probability": 0.83642578125}, {"start": 2871.15, "end": 2871.45, "word": " we", "probability": 0.95556640625}, {"start": 2871.45, "end": 2872.05, "word": " compute", "probability": 0.896484375}, {"start": 2872.05, "end": 2872.51, "word": " the", "probability": 0.90673828125}, {"start": 2872.51, "end": 2872.87, "word": " area", "probability": 0.88818359375}, {"start": 2872.87, "end": 2873.59, "word": " below", "probability": 0.888671875}, {"start": 2873.59, "end": 2873.85, "word": " one", "probability": 0.92236328125}, {"start": 2873.85, "end": 2874.07, "word": " standard", "probability": 0.96875}, {"start": 2874.07, "end": 2874.43, "word": " deviation", "probability": 0.92431640625}, {"start": 2874.43, "end": 2875.19, "word": " or", "probability": 0.80419921875}, {"start": 2875.19, "end": 2875.55, "word": " above", "probability": 0.96435546875}, {"start": 2875.55, "end": 2876.67, "word": " one", "probability": 0.9169921875}, {"start": 2876.67, "end": 2876.89, "word": " standard", "probability": 0.96728515625}, {"start": 2876.89, "end": 2877.25, "word": " deviation", "probability": 0.9365234375}, {"start": 2877.25, "end": 2877.49, "word": " of", "probability": 0.96337890625}, {"start": 2877.49, "end": 2877.61, "word": " the", "probability": 0.92724609375}, {"start": 2877.61, "end": 2877.77, "word": " mean.", "probability": 0.9716796875}, {"start": 2879.53, "end": 2879.71, "word": " Do", "probability": 0.9482421875}, {"start": 2879.71, "end": 2879.93, "word": " the", "probability": 0.91650390625}, {"start": 2879.93, "end": 2880.27, "word": " same", "probability": 0.90478515625}, {"start": 2880.27, "end": 2882.15, "word": " for", "probability": 0.93310546875}, {"start": 2882.15, "end": 2882.87, "word": " not", "probability": 0.85205078125}, {"start": 2882.87, "end": 2883.05, "word": " mu", "probability": 0.6884765625}, {"start": 2883.05, "end": 2883.33, "word": " minus", "probability": 0.97509765625}, {"start": 2883.33, "end": 2883.79, "word": " sigma,", "probability": 0.908203125}, {"start": 2885.23, "end": 2885.47, "word": " mu", "probability": 0.91552734375}, {"start": 2885.47, "end": 2885.83, "word": " plus", "probability": 0.9169921875}, {"start": 2885.83, "end": 2887.37, "word": " minus", "probability": 0.9609375}, {"start": 2887.37, "end": 2887.91, "word": " two", "probability": 0.615234375}, {"start": 2887.91, "end": 2888.55, "word": " sigma", "probability": 0.9140625}, {"start": 2888.55, "end": 2888.71, "word": " and", "probability": 0.79150390625}, {"start": 2888.71, "end": 2888.87, "word": " mu", "probability": 0.94287109375}, {"start": 2888.87, "end": 2889.11, "word": " plus", "probability": 0.955078125}, {"start": 2889.11, "end": 2889.31, "word": " two", "probability": 0.92822265625}, {"start": 2889.31, "end": 2889.63, "word": " sigma.", "probability": 0.9228515625}], "temperature": 1.0}, {"id": 107, "seek": 291690, "start": 2890.94, "end": 2916.9, "text": " The only difference is that this one is going to be minus 2 and do the same. That's the empirical rule we discussed in chapter 3. So here we can find any probability, not just 95 or 68 or 99.7. We can use the normal table to give or to find or to compute any probability.", "tokens": [440, 787, 2649, 307, 300, 341, 472, 307, 516, 281, 312, 3175, 568, 293, 360, 264, 912, 13, 663, 311, 264, 31886, 4978, 321, 7152, 294, 7187, 805, 13, 407, 510, 321, 393, 915, 604, 8482, 11, 406, 445, 13420, 420, 23317, 420, 11803, 13, 22, 13, 492, 393, 764, 264, 2710, 3199, 281, 976, 420, 281, 915, 420, 281, 14722, 604, 8482, 13], "avg_logprob": -0.20120192307692308, "compression_ratio": 1.4702702702702704, "no_speech_prob": 0.0, "words": [{"start": 2890.94, "end": 2891.54, "word": " The", "probability": 0.6552734375}, {"start": 2891.54, "end": 2892.14, "word": " only", "probability": 0.9248046875}, {"start": 2892.14, "end": 2892.68, "word": " difference", "probability": 0.8671875}, {"start": 2892.68, "end": 2893.04, "word": " is", "probability": 0.93017578125}, {"start": 2893.04, "end": 2893.38, "word": " that", "probability": 0.8837890625}, {"start": 2893.38, "end": 2894.02, "word": " this", "probability": 0.849609375}, {"start": 2894.02, "end": 2894.24, "word": " one", "probability": 0.91552734375}, {"start": 2894.24, "end": 2894.36, "word": " is", "probability": 0.343505859375}, {"start": 2894.36, "end": 2894.42, "word": " going", "probability": 0.7412109375}, {"start": 2894.42, "end": 2894.46, "word": " to", "probability": 0.97509765625}, {"start": 2894.46, "end": 2894.6, "word": " be", "probability": 0.92333984375}, {"start": 2894.6, "end": 2894.88, "word": " minus", "probability": 0.6181640625}, {"start": 2894.88, "end": 2895.26, "word": " 2", "probability": 0.451904296875}, {"start": 2895.26, "end": 2896.26, "word": " and", "probability": 0.7041015625}, {"start": 2896.26, "end": 2896.84, "word": " do", "probability": 0.7587890625}, {"start": 2896.84, "end": 2897.0, "word": " the", "probability": 0.92236328125}, {"start": 2897.0, "end": 2897.28, "word": " same.", "probability": 0.892578125}, {"start": 2900.62, "end": 2901.22, "word": " That's", "probability": 0.616455078125}, {"start": 2901.22, "end": 2901.4, "word": " the", "probability": 0.9140625}, {"start": 2901.4, "end": 2901.74, "word": " empirical", "probability": 0.88232421875}, {"start": 2901.74, "end": 2902.02, "word": " rule", "probability": 0.9208984375}, {"start": 2902.02, "end": 2902.18, "word": " we", "probability": 0.8837890625}, {"start": 2902.18, "end": 2902.58, "word": " discussed", "probability": 0.865234375}, {"start": 2902.58, "end": 2902.78, "word": " in", "probability": 0.9365234375}, {"start": 2902.78, "end": 2903.08, "word": " chapter", "probability": 0.51708984375}, {"start": 2903.08, "end": 2904.08, "word": " 3.", "probability": 0.57666015625}, {"start": 2904.68, "end": 2904.92, "word": " So", "probability": 0.912109375}, {"start": 2904.92, "end": 2905.06, "word": " here", "probability": 0.7958984375}, {"start": 2905.06, "end": 2905.2, "word": " we", "probability": 0.85107421875}, {"start": 2905.2, "end": 2905.48, "word": " can", "probability": 0.9482421875}, {"start": 2905.48, "end": 2906.54, "word": " find", "probability": 0.88623046875}, {"start": 2906.54, "end": 2906.92, "word": " any", "probability": 0.89306640625}, {"start": 2906.92, "end": 2907.38, "word": " probability,", "probability": 0.96142578125}, {"start": 2908.48, "end": 2908.72, "word": " not", "probability": 0.9443359375}, {"start": 2908.72, "end": 2908.98, "word": " just", "probability": 0.92236328125}, {"start": 2908.98, "end": 2909.36, "word": " 95", "probability": 0.98095703125}, {"start": 2909.36, "end": 2909.68, "word": " or", "probability": 0.82568359375}, {"start": 2909.68, "end": 2910.06, "word": " 68", "probability": 0.96728515625}, {"start": 2910.06, "end": 2910.42, "word": " or", "probability": 0.93994140625}, {"start": 2910.42, "end": 2910.7, "word": " 99", "probability": 0.9716796875}, {"start": 2910.7, "end": 2911.24, "word": ".7.", "probability": 0.981201171875}, {"start": 2912.0, "end": 2912.3, "word": " We", "probability": 0.93603515625}, {"start": 2912.3, "end": 2912.54, "word": " can", "probability": 0.947265625}, {"start": 2912.54, "end": 2912.74, "word": " use", "probability": 0.88232421875}, {"start": 2912.74, "end": 2912.86, "word": " the", "probability": 0.6767578125}, {"start": 2912.86, "end": 2913.08, "word": " normal", "probability": 0.88671875}, {"start": 2913.08, "end": 2913.38, "word": " table", "probability": 0.85546875}, {"start": 2913.38, "end": 2913.66, "word": " to", "probability": 0.94384765625}, {"start": 2913.66, "end": 2913.96, "word": " give", "probability": 0.5595703125}, {"start": 2913.96, "end": 2914.58, "word": " or", "probability": 0.6953125}, {"start": 2914.58, "end": 2914.76, "word": " to", "probability": 0.93798828125}, {"start": 2914.76, "end": 2915.06, "word": " find", "probability": 0.88623046875}, {"start": 2915.06, "end": 2915.3, "word": " or", "probability": 0.9091796875}, {"start": 2915.3, "end": 2915.4, "word": " to", "probability": 0.74560546875}, {"start": 2915.4, "end": 2915.72, "word": " compute", "probability": 0.9072265625}, {"start": 2915.72, "end": 2916.1, "word": " any", "probability": 0.8876953125}, {"start": 2916.1, "end": 2916.9, "word": " probability.", "probability": 0.548828125}], "temperature": 1.0}, {"id": 108, "seek": 295421, "start": 2928.27, "end": 2954.21, "text": " So again, for the other one, mu plus or minus two sigma, it covers about 95% of the axis. For mu plus or minus three sigma, it covers around all the data, 99.7. So just do it at home, you will see that the exact area is 95.44 instead of 95.", "tokens": [407, 797, 11, 337, 264, 661, 472, 11, 2992, 1804, 420, 3175, 732, 12771, 11, 309, 10538, 466, 13420, 4, 295, 264, 10298, 13, 1171, 2992, 1804, 420, 3175, 1045, 12771, 11, 309, 10538, 926, 439, 264, 1412, 11, 11803, 13, 22, 13, 407, 445, 360, 309, 412, 1280, 11, 291, 486, 536, 300, 264, 1900, 1859, 307, 13420, 13, 13912, 2602, 295, 13420, 13], "avg_logprob": -0.1667850430716168, "compression_ratio": 1.50625, "no_speech_prob": 0.0, "words": [{"start": 2928.27, "end": 2928.47, "word": " So", "probability": 0.921875}, {"start": 2928.47, "end": 2928.75, "word": " again,", "probability": 0.8046875}, {"start": 2928.87, "end": 2929.05, "word": " for", "probability": 0.95556640625}, {"start": 2929.05, "end": 2929.35, "word": " the", "probability": 0.9267578125}, {"start": 2929.35, "end": 2929.97, "word": " other", "probability": 0.8994140625}, {"start": 2929.97, "end": 2930.29, "word": " one,", "probability": 0.93359375}, {"start": 2931.27, "end": 2931.41, "word": " mu", "probability": 0.49267578125}, {"start": 2931.41, "end": 2932.41, "word": " plus", "probability": 0.822265625}, {"start": 2932.41, "end": 2932.63, "word": " or", "probability": 0.80712890625}, {"start": 2932.63, "end": 2932.85, "word": " minus", "probability": 0.99072265625}, {"start": 2932.85, "end": 2933.09, "word": " two", "probability": 0.59521484375}, {"start": 2933.09, "end": 2933.43, "word": " sigma,", "probability": 0.8701171875}, {"start": 2933.75, "end": 2933.99, "word": " it", "probability": 0.9013671875}, {"start": 2933.99, "end": 2934.27, "word": " covers", "probability": 0.8642578125}, {"start": 2934.27, "end": 2934.71, "word": " about", "probability": 0.90283203125}, {"start": 2934.71, "end": 2936.61, "word": " 95", "probability": 0.85302734375}, {"start": 2936.61, "end": 2937.13, "word": "%", "probability": 0.7490234375}, {"start": 2937.13, "end": 2938.01, "word": " of", "probability": 0.96923828125}, {"start": 2938.01, "end": 2938.17, "word": " the", "probability": 0.92529296875}, {"start": 2938.17, "end": 2938.43, "word": " axis.", "probability": 0.50146484375}, {"start": 2939.47, "end": 2939.95, "word": " For", "probability": 0.9267578125}, {"start": 2939.95, "end": 2940.19, "word": " mu", "probability": 0.93798828125}, {"start": 2940.19, "end": 2940.51, "word": " plus", "probability": 0.95947265625}, {"start": 2940.51, "end": 2940.71, "word": " or", "probability": 0.9267578125}, {"start": 2940.71, "end": 2940.99, "word": " minus", "probability": 0.9892578125}, {"start": 2940.99, "end": 2941.21, "word": " three", "probability": 0.9169921875}, {"start": 2941.21, "end": 2941.51, "word": " sigma,", "probability": 0.89453125}, {"start": 2941.65, "end": 2941.75, "word": " it", "probability": 0.93603515625}, {"start": 2941.75, "end": 2942.03, "word": " covers", "probability": 0.853515625}, {"start": 2942.03, "end": 2942.47, "word": " around", "probability": 0.89892578125}, {"start": 2942.47, "end": 2943.75, "word": " all", "probability": 0.7197265625}, {"start": 2943.75, "end": 2943.87, "word": " the", "probability": 0.78564453125}, {"start": 2943.87, "end": 2944.11, "word": " data,", "probability": 0.78369140625}, {"start": 2944.53, "end": 2944.83, "word": " 99", "probability": 0.78564453125}, {"start": 2944.83, "end": 2945.79, "word": ".7.", "probability": 0.99560546875}, {"start": 2946.53, "end": 2946.75, "word": " So", "probability": 0.95166015625}, {"start": 2946.75, "end": 2947.11, "word": " just", "probability": 0.87158203125}, {"start": 2947.11, "end": 2947.37, "word": " do", "probability": 0.9345703125}, {"start": 2947.37, "end": 2947.59, "word": " it", "probability": 0.69580078125}, {"start": 2947.59, "end": 2947.79, "word": " at", "probability": 0.322998046875}, {"start": 2947.79, "end": 2948.03, "word": " home,", "probability": 0.77783203125}, {"start": 2948.17, "end": 2948.29, "word": " you", "probability": 0.94091796875}, {"start": 2948.29, "end": 2948.45, "word": " will", "probability": 0.76416015625}, {"start": 2948.45, "end": 2948.69, "word": " see", "probability": 0.927734375}, {"start": 2948.69, "end": 2949.01, "word": " that", "probability": 0.93310546875}, {"start": 2949.01, "end": 2949.75, "word": " the", "probability": 0.9111328125}, {"start": 2949.75, "end": 2950.17, "word": " exact", "probability": 0.92919921875}, {"start": 2950.17, "end": 2950.55, "word": " area", "probability": 0.8916015625}, {"start": 2950.55, "end": 2950.91, "word": " is", "probability": 0.9482421875}, {"start": 2950.91, "end": 2951.91, "word": " 95", "probability": 0.98291015625}, {"start": 2951.91, "end": 2952.71, "word": ".44", "probability": 0.99462890625}, {"start": 2952.71, "end": 2953.63, "word": " instead", "probability": 0.6982421875}, {"start": 2953.63, "end": 2953.81, "word": " of", "probability": 0.96630859375}, {"start": 2953.81, "end": 2954.21, "word": " 95.", "probability": 0.982421875}], "temperature": 1.0}, {"id": 109, "seek": 298206, "start": 2954.84, "end": 2982.06, "text": " And the other one is 99.73. So that's the empirical rule we discussed in chapter three. I'm going to stop at this point, which is the x value for the normal probability. Now, what we discussed so far, we computed the probability.", "tokens": [400, 264, 661, 472, 307, 11803, 13, 33396, 13, 407, 300, 311, 264, 31886, 4978, 321, 7152, 294, 7187, 1045, 13, 286, 478, 516, 281, 1590, 412, 341, 935, 11, 597, 307, 264, 2031, 2158, 337, 264, 2710, 8482, 13, 823, 11, 437, 321, 7152, 370, 1400, 11, 321, 40610, 264, 8482, 13], "avg_logprob": -0.1624710609515508, "compression_ratio": 1.4197530864197532, "no_speech_prob": 0.0, "words": [{"start": 2954.84, "end": 2955.12, "word": " And", "probability": 0.53173828125}, {"start": 2955.12, "end": 2955.2, "word": " the", "probability": 0.7802734375}, {"start": 2955.2, "end": 2955.4, "word": " other", "probability": 0.88671875}, {"start": 2955.4, "end": 2955.58, "word": " one", "probability": 0.9150390625}, {"start": 2955.58, "end": 2955.74, "word": " is", "probability": 0.94677734375}, {"start": 2955.74, "end": 2956.1, "word": " 99", "probability": 0.9091796875}, {"start": 2956.1, "end": 2956.98, "word": ".73.", "probability": 0.99072265625}, {"start": 2957.5, "end": 2958.08, "word": " So", "probability": 0.947265625}, {"start": 2958.08, "end": 2958.38, "word": " that's", "probability": 0.91748046875}, {"start": 2958.38, "end": 2958.52, "word": " the", "probability": 0.9033203125}, {"start": 2958.52, "end": 2958.9, "word": " empirical", "probability": 0.904296875}, {"start": 2958.9, "end": 2959.22, "word": " rule", "probability": 0.88232421875}, {"start": 2959.22, "end": 2959.44, "word": " we", "probability": 0.94140625}, {"start": 2959.44, "end": 2959.98, "word": " discussed", "probability": 0.880859375}, {"start": 2959.98, "end": 2961.58, "word": " in", "probability": 0.908203125}, {"start": 2961.58, "end": 2961.84, "word": " chapter", "probability": 0.40673828125}, {"start": 2961.84, "end": 2962.26, "word": " three.", "probability": 0.671875}, {"start": 2963.1, "end": 2963.52, "word": " I'm", "probability": 0.9521484375}, {"start": 2963.52, "end": 2963.68, "word": " going", "probability": 0.94677734375}, {"start": 2963.68, "end": 2963.88, "word": " to", "probability": 0.96826171875}, {"start": 2963.88, "end": 2964.16, "word": " stop", "probability": 0.916015625}, {"start": 2964.16, "end": 2965.44, "word": " at", "probability": 0.96044921875}, {"start": 2965.44, "end": 2965.76, "word": " this", "probability": 0.9482421875}, {"start": 2965.76, "end": 2966.22, "word": " point,", "probability": 0.9697265625}, {"start": 2967.26, "end": 2967.86, "word": " which", "probability": 0.947265625}, {"start": 2967.86, "end": 2968.26, "word": " is", "probability": 0.92431640625}, {"start": 2968.26, "end": 2970.46, "word": " the", "probability": 0.1947021484375}, {"start": 2970.46, "end": 2972.18, "word": " x", "probability": 0.5830078125}, {"start": 2972.18, "end": 2972.56, "word": " value", "probability": 0.86328125}, {"start": 2972.56, "end": 2974.34, "word": " for", "probability": 0.91455078125}, {"start": 2974.34, "end": 2974.64, "word": " the", "probability": 0.58154296875}, {"start": 2974.64, "end": 2975.02, "word": " normal", "probability": 0.87060546875}, {"start": 2975.02, "end": 2975.5, "word": " probability.", "probability": 0.966796875}, {"start": 2976.98, "end": 2977.48, "word": " Now,", "probability": 0.95654296875}, {"start": 2977.66, "end": 2977.98, "word": " what", "probability": 0.93798828125}, {"start": 2977.98, "end": 2978.12, "word": " we", "probability": 0.95849609375}, {"start": 2978.12, "end": 2978.4, "word": " discussed", "probability": 0.71826171875}, {"start": 2978.4, "end": 2978.7, "word": " so", "probability": 0.9501953125}, {"start": 2978.7, "end": 2978.98, "word": " far,", "probability": 0.9404296875}, {"start": 2980.72, "end": 2981.0, "word": " we", "probability": 0.95947265625}, {"start": 2981.0, "end": 2981.5, "word": " computed", "probability": 0.908203125}, {"start": 2981.5, "end": 2981.7, "word": " the", "probability": 0.87451171875}, {"start": 2981.7, "end": 2982.06, "word": " probability.", "probability": 0.95947265625}], "temperature": 1.0}, {"id": 110, "seek": 301142, "start": 2983.16, "end": 3011.42, "text": " I mean, what's the probability of X smaller than E? Now, suppose this probability is known. How can we compute this value? Later, we'll talk about that. It's backward calculations. It's inverse or backward calculation.", "tokens": [286, 914, 11, 437, 311, 264, 8482, 295, 1783, 4356, 813, 462, 30, 823, 11, 7297, 341, 8482, 307, 2570, 13, 1012, 393, 321, 14722, 341, 2158, 30, 11965, 11, 321, 603, 751, 466, 300, 13, 467, 311, 23897, 20448, 13, 467, 311, 17340, 420, 23897, 17108, 13], "avg_logprob": -0.1843112293554812, "compression_ratio": 1.4797297297297298, "no_speech_prob": 0.0, "words": [{"start": 2983.16, "end": 2983.4, "word": " I", "probability": 0.83544921875}, {"start": 2983.4, "end": 2983.56, "word": " mean,", "probability": 0.966796875}, {"start": 2983.74, "end": 2983.96, "word": " what's", "probability": 0.938720703125}, {"start": 2983.96, "end": 2984.04, "word": " the", "probability": 0.88720703125}, {"start": 2984.04, "end": 2984.3, "word": " probability", "probability": 0.958984375}, {"start": 2984.3, "end": 2984.58, "word": " of", "probability": 0.94970703125}, {"start": 2984.58, "end": 2984.94, "word": " X", "probability": 0.8251953125}, {"start": 2984.94, "end": 2986.0, "word": " smaller", "probability": 0.7724609375}, {"start": 2986.0, "end": 2986.3, "word": " than", "probability": 0.95263671875}, {"start": 2986.3, "end": 2986.48, "word": " E?", "probability": 0.63623046875}, {"start": 2988.86, "end": 2989.12, "word": " Now,", "probability": 0.88916015625}, {"start": 2989.2, "end": 2989.74, "word": " suppose", "probability": 0.89453125}, {"start": 2989.74, "end": 2991.64, "word": " this", "probability": 0.52392578125}, {"start": 2991.64, "end": 2992.2, "word": " probability", "probability": 0.95361328125}, {"start": 2992.2, "end": 2992.56, "word": " is", "probability": 0.9453125}, {"start": 2992.56, "end": 2992.84, "word": " known.", "probability": 0.716796875}, {"start": 2995.12, "end": 2995.76, "word": " How", "probability": 0.95703125}, {"start": 2995.76, "end": 2996.06, "word": " can", "probability": 0.9404296875}, {"start": 2996.06, "end": 2996.24, "word": " we", "probability": 0.943359375}, {"start": 2996.24, "end": 2996.62, "word": " compute", "probability": 0.8984375}, {"start": 2996.62, "end": 2996.86, "word": " this", "probability": 0.6748046875}, {"start": 2996.86, "end": 2997.08, "word": " value?", "probability": 0.7861328125}, {"start": 2999.48, "end": 3000.12, "word": " Later,", "probability": 0.82666015625}, {"start": 3000.36, "end": 3000.78, "word": " we'll", "probability": 0.907470703125}, {"start": 3000.78, "end": 3001.02, "word": " talk", "probability": 0.90185546875}, {"start": 3001.02, "end": 3001.28, "word": " about", "probability": 0.90478515625}, {"start": 3001.28, "end": 3001.5, "word": " that.", "probability": 0.787109375}, {"start": 3006.3, "end": 3006.94, "word": " It's", "probability": 0.963623046875}, {"start": 3006.94, "end": 3007.52, "word": " backward", "probability": 0.80078125}, {"start": 3007.52, "end": 3008.24, "word": " calculations.", "probability": 0.7841796875}, {"start": 3008.9, "end": 3009.18, "word": " It's", "probability": 0.9638671875}, {"start": 3009.18, "end": 3009.56, "word": " inverse", "probability": 0.7880859375}, {"start": 3009.56, "end": 3009.82, "word": " or", "probability": 0.87744140625}, {"start": 3009.82, "end": 3010.16, "word": " backward", "probability": 0.9482421875}, {"start": 3010.16, "end": 3011.42, "word": " calculation.", "probability": 0.57373046875}], "temperature": 1.0}, {"id": 111, "seek": 301446, "start": 3013.3, "end": 3014.46, "text": " for next time inshallah.", "tokens": [337, 958, 565, 1028, 71, 13492, 13], "avg_logprob": -0.3422851674258709, "compression_ratio": 0.7575757575757576, "no_speech_prob": 0.0, "words": [{"start": 3013.3, "end": 3013.58, "word": " for", "probability": 0.56640625}, {"start": 3013.58, "end": 3013.82, "word": " next", "probability": 0.92333984375}, {"start": 3013.82, "end": 3014.1, "word": " time", "probability": 0.89697265625}, {"start": 3014.1, "end": 3014.46, "word": " inshallah.", "probability": 0.6897786458333334}], "temperature": 1.0}], "language": "en", "language_probability": 1.0, "duration": 3021.0555, "duration_after_vad": 2871.3306249999905} \ No newline at end of file diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/R6shw6IZsm8_postprocess.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/R6shw6IZsm8_postprocess.srt new file mode 100644 index 0000000000000000000000000000000000000000..a65b03746bb14c5d563bd4ecd7b1db95272a5ca9 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/R6shw6IZsm8_postprocess.srt @@ -0,0 +1,2108 @@ +1 +00:00:11,500 --> 00:00:17,380 +Last time, we talked about chi-square tests. And + +2 +00:00:17,380 --> 00:00:21,580 +we mentioned that there are two objectives in this + +3 +00:00:21,580 --> 00:00:25,220 +chapter. The first one is when to use chi-square + +4 +00:00:25,220 --> 00:00:28,630 +tests for contingency tables. And the other + +5 +00:00:28,630 --> 00:00:31,070 +objective is how to use chi-square tests for + +6 +00:00:31,070 --> 00:00:35,410 +contingency tables. And we did one chi-square test + +7 +00:00:35,410 --> 00:00:42,050 +for the difference between two proportions. In the + +8 +00:00:42,050 --> 00:00:44,630 +null hypothesis, the two proportions are equal. I + +9 +00:00:44,630 --> 00:00:47,970 +mean, proportion for population 1 equals + +10 +00:00:47,970 --> 00:00:52,970 +population proportion 2 against the alternative + +11 +00:00:52,970 --> 00:00:58,470 +here is two-sided test. Pi 1 does not equal pi 2. + +12 +00:00:59,310 --> 00:01:04,210 +In this case, we can use either this statistic. So + +13 +00:01:04,210 --> 00:01:05,150 +you may + +14 +00:01:07,680 --> 00:01:15,520 +Z statistic, which is b1 minus b2 minus y1 minus + +15 +00:01:15,520 --> 00:01:21,840 +y2 divided by b + +16 +00:01:21,840 --> 00:01:27,500 +dash times 1 minus b dash multiplied by 1 over n1 + +17 +00:01:27,500 --> 00:01:31,200 +plus 1 over n2. This quantity under the square + +18 +00:01:31,200 --> 00:01:33,080 +root, where b dash + +19 +00:01:42,180 --> 00:01:48,580 +Or proportionally, where P dash equals X1 plus X2 + +20 +00:01:48,580 --> 00:01:55,560 +divided by N1 plus N2. Or, + +21 +00:01:58,700 --> 00:02:00,720 +in this chapter, we are going to use chi-square + +22 +00:02:00,720 --> 00:02:04,520 +statistic, which is given by this equation. Chi + +23 +00:02:04,520 --> 00:02:09,620 +-square statistic is just sum of observed + +24 +00:02:09,620 --> 00:02:10,940 +frequency, FO. + +25 +00:02:15,530 --> 00:02:20,070 +minus expected frequency squared divided by + +26 +00:02:20,070 --> 00:02:22,490 +expected frequency for all cells. + +27 +00:02:25,210 --> 00:02:29,070 +Chi squared, this statistic is given by this + +28 +00:02:29,070 --> 00:02:34,190 +equation. If there are two by two rows and + +29 +00:02:34,190 --> 00:02:36,290 +columns, I mean there are two rows and two + +30 +00:02:36,290 --> 00:02:40,270 +columns. So in this case, my table is two by two. + +31 +00:02:42,120 --> 00:02:44,360 +In this case, you have only one degree of freedom. + +32 +00:02:44,640 --> 00:02:50,440 +Always degrees of freedom equals number of rows + +33 +00:02:50,440 --> 00:03:00,320 +minus one multiplied by number of columns minus + +34 +00:03:00,320 --> 00:03:06,140 +one. So for two by two tables, there are two rows + +35 +00:03:06,140 --> 00:03:11,560 +and two columns, so two minus one. times 2 minus + +36 +00:03:11,560 --> 00:03:15,420 +1, so your degrees of freedom in this case is 1. + +37 +00:03:16,440 --> 00:03:19,320 +Here the assumption is we assume that the expected + +38 +00:03:19,320 --> 00:03:22,940 +frequency is at least 5, in order to use Chi + +39 +00:03:22,940 --> 00:03:27,200 +-square statistic. Chi-square is always positive, + +40 +00:03:27,680 --> 00:03:32,260 +I mean, Chi-square value is always greater than 0. + +41 +00:03:34,040 --> 00:03:38,890 +It's one TLTS to the right one. We reject F0 if + +42 +00:03:38,890 --> 00:03:42,430 +your chi-square statistic falls in the rejection + +43 +00:03:42,430 --> 00:03:45,850 +region. That means we reject the null hypothesis + +44 +00:03:45,850 --> 00:03:49,470 +if chi-square statistic greater than chi-square + +45 +00:03:49,470 --> 00:03:53,130 +alpha. Alpha can be determined by using chi-square + +46 +00:03:53,130 --> 00:03:56,490 +table. So we reject in this case F0, otherwise, + +47 +00:03:56,890 --> 00:04:02,050 +sorry, we don't reject F0. So again, if the value + +48 +00:04:02,050 --> 00:04:05,350 +of chi-square statistic falls in this rejection + +49 +00:04:05,350 --> 00:04:10,280 +region, the yellow one, then we reject. Otherwise, + +50 +00:04:11,100 --> 00:04:13,900 +if this value, I mean if the value of the + +51 +00:04:13,900 --> 00:04:17,060 +statistic falls in non-rejection region, we don't + +52 +00:04:17,060 --> 00:04:21,680 +reject the null hypothesis. So the same concept as + +53 +00:04:21,680 --> 00:04:27,680 +we did in the previous chapters. If we go back to + +54 +00:04:27,680 --> 00:04:32,060 +the previous example we had discussed before, when + +55 +00:04:32,060 --> 00:04:36,620 +we are testing about gender and left and right + +56 +00:04:36,620 --> 00:04:41,340 +handers, So hand preference either left or right. + +57 +00:04:42,960 --> 00:04:49,320 +And the question is test to see whether hand + +58 +00:04:49,320 --> 00:04:53,100 +preference and gender are related or not. In this + +59 +00:04:53,100 --> 00:04:56,960 +case, your null hypothesis could be written as + +60 +00:04:56,960 --> 00:05:00,620 +either X0. + +61 +00:05:04,220 --> 00:05:07,160 +So the proportion of left-handers for female + +62 +00:05:07,160 --> 00:05:12,260 +equals the proportion of males left-handers. So by + +63 +00:05:12,260 --> 00:05:16,600 +one equals by two or H zero later we'll see that + +64 +00:05:16,600 --> 00:05:22,680 +the two variables of interest are independent. + +65 +00:05:32,810 --> 00:05:37,830 +Now, your B dash is + +66 +00:05:37,830 --> 00:05:42,250 +given by X1 plus X2 divided by N1 plus N2. X1 is + +67 +00:05:42,250 --> 00:05:51,930 +12, this 12, plus 24 divided by 300. That will + +68 +00:05:51,930 --> 00:05:57,310 +give 12%. So let me just write this notation, B + +69 +00:05:57,310 --> 00:05:57,710 +dash. + +70 +00:06:05,560 --> 00:06:13,740 +equals 36 by 300, so that's 12%. So the expected + +71 +00:06:13,740 --> 00:06:19,500 +frequency in this case for female, 0.12 times 120, + +72 +00:06:19,680 --> 00:06:22,140 +because there are 120 females in the data you + +73 +00:06:22,140 --> 00:06:25,520 +have, so that will give 14.4. So the expected + +74 +00:06:25,520 --> 00:06:30,680 +frequency is 0.12 times 180, 120, I'm sorry, + +75 +00:06:34,810 --> 00:06:39,590 +That will give 14.4. Similarly, for male to be + +76 +00:06:39,590 --> 00:06:43,390 +left-handed is 0.12 times number of females in the + +77 +00:06:43,390 --> 00:06:46,690 +sample, which is 180, and that will give 21.6. + +78 +00:06:48,670 --> 00:06:53,190 +Now, since you compute the expected for the first + +79 +00:06:53,190 --> 00:06:57,590 +cell, the second one direct is just the complement + +80 +00:06:57,590 --> 00:07:03,020 +120. 120 is sample size for the Rome. I mean + +81 +00:07:03,020 --> 00:07:12,200 +female total 120 minus 14.4 will give 105.6. Or 0 + +82 +00:07:12,200 --> 00:07:18,050 +.88 times 120 will give the same value. Here, the + +83 +00:07:18,050 --> 00:07:21,730 +expected is 21.6, so the compliment is the, I'm + +84 +00:07:21,730 --> 00:07:25,130 +sorry, the expected is just the compliment, which + +85 +00:07:25,130 --> 00:07:32,010 +is 180 minus 21.6 will give 158.4. Or 0.88 is the + +86 +00:07:32,010 --> 00:07:35,090 +compliment of that one multiplied by 180 will give + +87 +00:07:35,090 --> 00:07:39,070 +the same value. So that's the one we had discussed + +88 +00:07:39,070 --> 00:07:39,670 +before. + +89 +00:07:42,410 --> 00:07:46,550 +On this result, you can determine the value of chi + +90 +00:07:46,550 --> 00:07:50,750 +-square statistic by using this equation. Sum of F + +91 +00:07:50,750 --> 00:07:53,810 +observed minus F expected squared divided by F + +92 +00:07:53,810 --> 00:07:57,450 +expected for each cell. You have to compute the + +93 +00:07:57,450 --> 00:08:00,870 +value of chi-square for each cell. In this case, + +94 +00:08:01,070 --> 00:08:04,250 +the simplest case is just 2 by 2 table. So 12 + +95 +00:08:04,250 --> 00:08:09,980 +minus 14.4 squared divided by 14.4. Plus the + +96 +00:08:09,980 --> 00:08:15,720 +second one 108 minus 105 squared divided by 105 up + +97 +00:08:15,720 --> 00:08:19,780 +to the last one, you will get this result. Now my + +98 +00:08:19,780 --> 00:08:21,900 +chi-square value is 0.7576. + +99 +00:08:24,240 --> 00:08:28,140 +And in this case, if chi-square value is very + +100 +00:08:28,140 --> 00:08:31,180 +small, I mean it's close to zero, then we don't + +101 +00:08:31,180 --> 00:08:34,140 +reject the null hypothesis. Because the smallest + +102 +00:08:34,140 --> 00:08:37,500 +value of chi-square is zero, and zero happens only + +103 +00:08:37,500 --> 00:08:43,580 +if f observed is close to f expected. So here if + +104 +00:08:43,580 --> 00:08:46,920 +you look carefully for the observed and expected + +105 +00:08:46,920 --> 00:08:50,520 +frequencies, you can tell if you can reject or + +106 +00:08:50,520 --> 00:08:53,700 +don't reject the number. Now the difference + +107 +00:08:53,700 --> 00:08:58,960 +between these values looks small, so that's lead + +108 +00:08:58,960 --> 00:09:05,110 +to small chi-square. So without doing the critical + +109 +00:09:05,110 --> 00:09:08,210 +value, computer critical value, you can determine + +110 +00:09:08,210 --> 00:09:11,890 +that we don't reject the null hypothesis. Because + +111 +00:09:11,890 --> 00:09:16,070 +your chi-square value is very small. So we don't + +112 +00:09:16,070 --> 00:09:18,670 +reject the null hypothesis. Or if you look + +113 +00:09:18,670 --> 00:09:22,790 +carefully at the table, for the table we have + +114 +00:09:22,790 --> 00:09:26,410 +here, for chi-square table. By the way, the + +115 +00:09:26,410 --> 00:09:31,480 +smallest value of chi-square is 1.3. under 1 + +116 +00:09:31,480 --> 00:09:36,180 +degrees of freedom. So the smallest value 1.32. So + +117 +00:09:36,180 --> 00:09:39,360 +if your chi-square value is greater than 1, it + +118 +00:09:39,360 --> 00:09:41,920 +means maybe you reject or don't reject. It depends + +119 +00:09:41,920 --> 00:09:45,920 +on v value and alpha you have or degrees of + +120 +00:09:45,920 --> 00:09:50,280 +freedom. But in the worst scenario, if your chi + +121 +00:09:50,280 --> 00:09:53,780 +-square is smaller than this value, it means you + +122 +00:09:53,780 --> 00:09:57,600 +don't reject the null hypothesis. So generally + +123 +00:09:57,600 --> 00:10:02,120 +speaking, if Chi-square is statistical. It's + +124 +00:10:02,120 --> 00:10:06,420 +smaller than 1.32. 1.32 is a very small value. + +125 +00:10:06,940 --> 00:10:15,560 +Then we don't reject. Then we don't reject x0. + +126 +00:10:15,780 --> 00:10:24,220 +That's always, always true. Regardless of degrees + +127 +00:10:24,220 --> 00:10:31,050 +of freedom and alpha. My chi-square is close to + +128 +00:10:31,050 --> 00:10:35,710 +zero, or smaller than 1.32, because the minimum + +129 +00:10:35,710 --> 00:10:40,990 +value of critical value is 1.32. Imagine that we + +130 +00:10:40,990 --> 00:10:46,050 +are talking about alpha is 5%. So alpha is 5, so + +131 +00:10:46,050 --> 00:10:48,750 +your critical value, the smallest one for 1 + +132 +00:10:48,750 --> 00:10:53,850 +degrees of freedom, is 3.84. So that's my + +133 +00:10:53,850 --> 00:10:55,490 +smallest, if alpha + +134 +00:11:03,740 --> 00:11:08,680 +Last time we mentioned that this value is just 1 + +135 +00:11:08,680 --> 00:11:17,760 +.96 squared. And that's only true, only true for 2 + +136 +00:11:17,760 --> 00:11:24,180 +by 2 table. That means this square is just Chi + +137 +00:11:24,180 --> 00:11:29,470 +square 1. For this reason, we can test by one + +138 +00:11:29,470 --> 00:11:33,330 +equal by two, by two methods, either this + +139 +00:11:33,330 --> 00:11:37,750 +statistic or chi-square statistic. Both of them + +140 +00:11:37,750 --> 00:11:41,970 +will give the same result. So let's go back to the + +141 +00:11:41,970 --> 00:11:49,670 +question we have. My chi-square value is 0.77576. + +142 +00:11:52,160 --> 00:11:56,940 +So that's your chi-square statistic. Again, + +143 +00:11:57,500 --> 00:12:00,240 +degrees of freedom 1 to chi-square, the critical + +144 +00:12:00,240 --> 00:12:08,500 +value is 3.841. So my decision is we don't reject + +145 +00:12:08,500 --> 00:12:11,780 +the null hypothesis. My conclusion is there is not + +146 +00:12:11,780 --> 00:12:14,380 +sufficient evidence that two proportions are + +147 +00:12:14,380 --> 00:12:17,480 +different. So you don't have sufficient evidence + +148 +00:12:17,480 --> 00:12:21,900 +in order to support that the two proportions are + +149 +00:12:21,900 --> 00:12:27,720 +different at 5% level of significance. We stopped + +150 +00:12:27,720 --> 00:12:32,700 +last time at this point. Now suppose we are + +151 +00:12:32,700 --> 00:12:36,670 +testing The difference among more than two + +152 +00:12:36,670 --> 00:12:42,930 +proportions. The same steps, we have to extend in + +153 +00:12:42,930 --> 00:12:47,830 +this case chi-square. Your null hypothesis, by one + +154 +00:12:47,830 --> 00:12:50,990 +equal by two, all the way up to by C. So in this + +155 +00:12:50,990 --> 00:13:00,110 +case, there are C columns. C columns and + +156 +00:13:00,110 --> 00:13:05,420 +two rows. So number of columns equals C, and there + +157 +00:13:05,420 --> 00:13:10,520 +are only two rows. So pi 1 equals pi 2, all the + +158 +00:13:10,520 --> 00:13:13,840 +way up to pi C. So null hypothesis for the columns + +159 +00:13:13,840 --> 00:13:17,040 +we have. There are C columns. Again, it's the + +160 +00:13:17,040 --> 00:13:19,840 +alternative, not all of the pi J are equal, and J + +161 +00:13:19,840 --> 00:13:23,840 +equals 1 up to C. Now, the only difference here, + +162 +00:13:26,520 --> 00:13:27,500 +the degrees of freedom. + +163 +00:13:31,370 --> 00:13:32,850 +For 2 by c table, + +164 +00:13:35,710 --> 00:13:42,010 +2 by c, degrees of freedom equals number + +165 +00:13:42,010 --> 00:13:45,890 +of rows minus 1. There are two rows, so 2 minus 1 + +166 +00:13:45,890 --> 00:13:50,810 +times number of columns minus 1. 2 minus 1 is 1, c + +167 +00:13:50,810 --> 00:13:54,610 +minus 1, 1 times c minus 1, c minus 1. So your + +168 +00:13:54,610 --> 00:13:57,130 +degrees of freedom in this case is c minus 1. + +169 +00:14:00,070 --> 00:14:03,190 +So that's the only difference. For two by two + +170 +00:14:03,190 --> 00:14:07,130 +table, degrees of freedom is just one. If there + +171 +00:14:07,130 --> 00:14:10,670 +are C columns and we have the same number of rows, + +172 +00:14:11,450 --> 00:14:14,810 +degrees of freedom is C minus one. And we have the + +173 +00:14:14,810 --> 00:14:19,190 +same chi squared statistic, the same equation I + +174 +00:14:19,190 --> 00:14:23,890 +mean. And we have to extend also the overall + +175 +00:14:23,890 --> 00:14:27,330 +proportion instead of x1 plus x2 divided by n1 + +176 +00:14:27,330 --> 00:14:32,610 +plus n2. It becomes x1 plus x2 plus x3 all the way + +177 +00:14:32,610 --> 00:14:38,330 +up to xc because there are c columns divided by n1 + +178 +00:14:38,330 --> 00:14:41,910 +plus n2 all the way up to nc. So that's x over n. + +179 +00:14:43,540 --> 00:14:48,400 +So similarly we can reject the null hypothesis if + +180 +00:14:48,400 --> 00:14:52,260 +the value of chi-square statistic lies or falls in + +181 +00:14:52,260 --> 00:14:54,160 +the rejection region. + +182 +00:14:58,120 --> 00:15:01,980 +Other type of chi-square test is called chi-square + +183 +00:15:01,980 --> 00:15:07,380 +test of independence. Generally speaking, most of + +184 +00:15:07,380 --> 00:15:10,440 +the time there are more than two columns or more + +185 +00:15:10,440 --> 00:15:16,490 +than two rows. Now, suppose we have contingency + +186 +00:15:16,490 --> 00:15:22,370 +table that has R rows and C columns. And we are + +187 +00:15:22,370 --> 00:15:26,990 +interested to test to see whether the two + +188 +00:15:26,990 --> 00:15:31,390 +categorical variables are independent. That means + +189 +00:15:31,390 --> 00:15:35,600 +there is no relationship between them. Against the + +190 +00:15:35,600 --> 00:15:38,800 +alternative hypothesis, the two variables are + +191 +00:15:38,800 --> 00:15:42,040 +dependent. That means there is a relationship + +192 +00:15:42,040 --> 00:15:45,140 +between them. So test of independence. + +193 +00:15:47,780 --> 00:15:52,220 +Null hypothesis is always the two variables, I + +194 +00:15:52,220 --> 00:15:55,240 +mean, the two categorical variables are + +195 +00:15:55,240 --> 00:15:59,860 +independent. So it's zero. Always x and y, for + +196 +00:15:59,860 --> 00:16:02,860 +example, are independent. + +197 +00:16:06,330 --> 00:16:11,790 +This means there is no difference between them. I + +198 +00:16:11,790 --> 00:16:17,490 +mean, Y1 equals Y. Similarly, X and Y are + +199 +00:16:17,490 --> 00:16:19,850 +independent. So there is no difference between the + +200 +00:16:19,850 --> 00:16:23,030 +two populations of this notion. Against the + +201 +00:16:23,030 --> 00:16:27,010 +alternative hypothesis, either X and Y, you may + +202 +00:16:27,010 --> 00:16:29,150 +say that they are dependent. + +203 +00:16:31,630 --> 00:16:34,470 +So that means there exists a relationship between + +204 +00:16:34,470 --> 00:16:38,060 +them or They are related. + +205 +00:16:40,920 --> 00:16:45,300 +So tests of independence for chi-square test to + +206 +00:16:45,300 --> 00:16:47,480 +see whether or not the two variables are + +207 +00:16:47,480 --> 00:16:50,640 +independent. So your null, two variables are + +208 +00:16:50,640 --> 00:16:55,740 +independent against they are not independent. So + +209 +00:16:55,740 --> 00:16:58,060 +similar to the chi-square test for equality of + +210 +00:16:58,060 --> 00:17:02,640 +more than two proportions. So, in order to test to + +211 +00:17:02,640 --> 00:17:06,020 +see if more than two proportions are equal, you + +212 +00:17:06,020 --> 00:17:15,700 +cannot use this statistic. So, this statistic is + +213 +00:17:15,700 --> 00:17:27,600 +no longer appropriate or valid for more than two + +214 +00:17:27,600 --> 00:17:31,770 +proportions. In this case, you have to use chi + +215 +00:17:31,770 --> 00:17:37,310 +-square test. So this statistic can be used only + +216 +00:17:37,310 --> 00:17:40,970 +to test the difference between two proportions. + +217 +00:17:41,110 --> 00:17:44,110 +But for more than two, you have to use chi-square + +218 +00:17:44,110 --> 00:17:47,690 +test. So similar, chi-square test of independence + +219 +00:17:47,690 --> 00:17:52,470 +is similar to chi-square test for equality of more + +220 +00:17:52,470 --> 00:17:57,360 +than two proportions. But extend the concept. The + +221 +00:17:57,360 --> 00:18:02,100 +previous one was two rows and C columns, so two by + +222 +00:18:02,100 --> 00:18:05,940 +C. But here we extend the concept to contingency + +223 +00:18:05,940 --> 00:18:11,560 +tables with R rows and C columns. So we have the + +224 +00:18:11,560 --> 00:18:15,660 +case R by C. So that's in general, there are R + +225 +00:18:15,660 --> 00:18:23,060 +rows and C columns. And the question is this C, if + +226 +00:18:23,060 --> 00:18:27,480 +the two variables are independent or not. So in + +227 +00:18:27,480 --> 00:18:30,700 +this case, you cannot use this statistic. So one + +228 +00:18:30,700 --> 00:18:34,320 +more time, this statistic is valid only for two by + +229 +00:18:34,320 --> 00:18:38,020 +two tables. So that means we can use z or chi + +230 +00:18:38,020 --> 00:18:41,200 +-square to test if there is no difference between + +231 +00:18:41,200 --> 00:18:43,960 +two population proportions. But for more than + +232 +00:18:43,960 --> 00:18:46,700 +that, you have to use chi-square. + +233 +00:18:49,950 --> 00:18:53,310 +Now still we have the same equation, Chi-square + +234 +00:18:53,310 --> 00:18:57,870 +statistic is just sum F observed minus F expected + +235 +00:18:57,870 --> 00:19:00,690 +quantity squared divided by F expected. + +236 +00:19:03,490 --> 00:19:07,550 +In this case, Chi-square statistic for R by C case + +237 +00:19:07,550 --> 00:19:15,430 +has degrees of freedom R minus 1 multiplied by C + +238 +00:19:15,430 --> 00:19:18,570 +minus 1. In this case, each cell in the + +239 +00:19:18,570 --> 00:19:21,230 +contingency table has expected frequency at least + +240 +00:19:21,230 --> 00:19:26,910 +one instead of five. Now let's see how can we + +241 +00:19:26,910 --> 00:19:31,690 +compute the expected cell frequency for each cell. + +242 +00:19:32,950 --> 00:19:37,530 +The expected frequency is given by row total + +243 +00:19:37,530 --> 00:19:42,950 +multiplied by colon total divided by n. So that's + +244 +00:19:42,950 --> 00:19:50,700 +my new equation to determine I've expected it. So + +245 +00:19:50,700 --> 00:19:56,440 +the expected value for each cell is given by Rho + +246 +00:19:56,440 --> 00:20:03,380 +total multiplied by Kono, total divided by N. + +247 +00:20:05,160 --> 00:20:09,540 +Also, this equation is true for the previous + +248 +00:20:09,540 --> 00:20:15,560 +example. If you go back a little bit here, now the + +249 +00:20:16,650 --> 00:20:21,650 +Expected for this cell was 40.4. Now let's see how + +250 +00:20:21,650 --> 00:20:25,470 +can we compute the same value by using this + +251 +00:20:25,470 --> 00:20:30,250 +equation. So it's equal to row total 120 + +252 +00:20:30,250 --> 00:20:40,310 +multiplied by column total 36 divided by 300. + +253 +00:20:43,580 --> 00:20:46,500 +Now before we compute this value by using B dash + +254 +00:20:46,500 --> 00:20:50,900 +first, 300 divided by, I'm sorry, 36 divided by + +255 +00:20:50,900 --> 00:20:58,520 +300. So that's your B dash. Then we multiply this + +256 +00:20:58,520 --> 00:21:03,540 +B dash by N, and this is your N. So it's similar + +257 +00:21:03,540 --> 00:21:08,540 +equation. So either you use row total multiplied + +258 +00:21:08,540 --> 00:21:14,060 +by column total. then divide by overall sample + +259 +00:21:14,060 --> 00:21:18,880 +size you will get the same result by using the + +260 +00:21:18,880 --> 00:21:25,520 +overall proportion 12% times 120 so each one will + +261 +00:21:25,520 --> 00:21:29,860 +give the same answer so from now we are going to + +262 +00:21:29,860 --> 00:21:33,900 +use this equation in order to compute the expected + +263 +00:21:33,900 --> 00:21:37,960 +frequency for each cell so again expected + +264 +00:21:37,960 --> 00:21:42,920 +frequency is rho total times Column total divided + +265 +00:21:42,920 --> 00:21:48,620 +by N, N is the sample size. So row total it means + +266 +00:21:48,620 --> 00:21:52,220 +sum of all frequencies in the row. Similarly + +267 +00:21:52,220 --> 00:21:56,160 +column total is the sum of all frequencies in the + +268 +00:21:56,160 --> 00:22:00,180 +column and N is over all sample size. + +269 +00:22:03,030 --> 00:22:06,630 +Again, we reject the null hypothesis if your chi + +270 +00:22:06,630 --> 00:22:10,430 +-square statistic greater than chi-square alpha. + +271 +00:22:10,590 --> 00:22:13,370 +Otherwise, you don't reject it. And keep in mind, + +272 +00:22:14,270 --> 00:22:18,390 +chi-square statistic has degrees of freedom R + +273 +00:22:18,390 --> 00:22:23,730 +minus 1 times C minus 1. That's all for chi-square + +274 +00:22:23,730 --> 00:22:27,590 +as test of independence. Any question? + +275 +00:22:31,220 --> 00:22:36,300 +Here there is an example for applying chi-square + +276 +00:22:36,300 --> 00:22:42,200 +test of independence. Meal plan selected + +277 +00:22:42,200 --> 00:22:46,700 +by 200 students is shown in this table. So there + +278 +00:22:46,700 --> 00:22:50,960 +are two variables of interest. The first one is + +279 +00:22:50,960 --> 00:22:56,230 +number of meals per week. And there are three + +280 +00:22:56,230 --> 00:23:00,550 +types of number of meals, either 20 meals per + +281 +00:23:00,550 --> 00:23:07,870 +week, or 10 meals per week, or none. So that's, so + +282 +00:23:07,870 --> 00:23:12,150 +number of meals is classified into three groups. + +283 +00:23:13,210 --> 00:23:17,650 +So three columns, 20 per week, 10 per week, or + +284 +00:23:17,650 --> 00:23:23,270 +none. Class standing, students are classified into + +285 +00:23:23,270 --> 00:23:28,860 +four levels. A freshman, it means students like + +286 +00:23:28,860 --> 00:23:33,620 +you, first year. Sophomore, it means second year. + +287 +00:23:34,440 --> 00:23:38,400 +Junior, third level. Senior, fourth level. So that + +288 +00:23:38,400 --> 00:23:42,100 +means first, second, third, and fourth level. And + +289 +00:23:42,100 --> 00:23:46,140 +we have this number, these numbers for, I mean, + +290 +00:23:47,040 --> 00:23:53,660 +there are 24 A freshman who have meals for 20 per + +291 +00:23:53,660 --> 00:23:59,880 +week. So there are 24 freshmen have 20 meals per + +292 +00:23:59,880 --> 00:24:04,160 +week. 22 sophomores, the same, 10 for junior and + +293 +00:24:04,160 --> 00:24:10,220 +14 for senior. And the question is just to see if + +294 +00:24:10,220 --> 00:24:13,740 +number of meals per week is independent of class + +295 +00:24:13,740 --> 00:24:17,270 +standing. to see if there is a relationship + +296 +00:24:17,270 --> 00:24:21,890 +between these two variables. In this case, there + +297 +00:24:21,890 --> 00:24:26,850 +are four rows because the class standing is + +298 +00:24:26,850 --> 00:24:29,190 +classified into four groups. So there are four + +299 +00:24:29,190 --> 00:24:34,230 +rows and three columns. So this table actually is + +300 +00:24:34,230 --> 00:24:40,200 +four by three. And there are twelve cells in this + +301 +00:24:40,200 --> 00:24:46,660 +case. Now it takes time to compute the expected + +302 +00:24:46,660 --> 00:24:49,760 +frequencies because in this case we have to + +303 +00:24:49,760 --> 00:24:55,120 +compute the expected frequency for each cell. And + +304 +00:24:55,120 --> 00:25:01,320 +we are going to use this formula for only six of + +305 +00:25:01,320 --> 00:25:06,260 +them. I mean, we can apply this formula for only + +306 +00:25:06,260 --> 00:25:09,880 +six of them. And the others can be computed by the + +307 +00:25:09,880 --> 00:25:14,300 +complement by using either column total or row + +308 +00:25:14,300 --> 00:25:19,940 +total. So because degrees of freedom is six, that + +309 +00:25:19,940 --> 00:25:23,880 +means you may use this rule six times only. The + +310 +00:25:23,880 --> 00:25:28,420 +others can be computed by using the complement. So + +311 +00:25:28,420 --> 00:25:34,070 +here again, the hypothesis to be tested is, Mean + +312 +00:25:34,070 --> 00:25:36,550 +plan and class standing are independent, that + +313 +00:25:36,550 --> 00:25:38,670 +means there is no relationship between them. + +314 +00:25:39,150 --> 00:25:41,650 +Against alternative hypothesis, mean plan and + +315 +00:25:41,650 --> 00:25:44,630 +class standing are dependent, that means there + +316 +00:25:44,630 --> 00:25:49,950 +exists significant relationship between them. Now + +317 +00:25:49,950 --> 00:25:54,390 +let's see how can we compute the expected cell, + +318 +00:25:55,990 --> 00:26:00,470 +the expected frequency for each cell. For example, + +319 +00:26:02,250 --> 00:26:07,790 +The first observed frequency is 24. Now the + +320 +00:26:07,790 --> 00:26:15,990 +expected should be 70 times 70 divided by 200. So + +321 +00:26:15,990 --> 00:26:25,050 +for cell 11, the first cell. If expected, we can + +322 +00:26:25,050 --> 00:26:32,450 +use this notation, 11. Means first row. First + +323 +00:26:32,450 --> 00:26:40,110 +column. That should be 70. It is 70. Multiplied by + +324 +00:26:40,110 --> 00:26:43,990 +column totals. Again, in this case, 70. Multiplied + +325 +00:26:43,990 --> 00:26:47,270 +by 200. That will give 24.5. + +326 +00:26:50,150 --> 00:26:53,730 +Similarly, for the second cell, for 32. + +327 +00:26:56,350 --> 00:27:00,090 +70 times 88 divided by 200. + +328 +00:27:02,820 --> 00:27:12,620 +So for F22, again it's 70 times 88 divided by 200, + +329 +00:27:12,800 --> 00:27:22,060 +that will get 30.8. So 70 times 88, that will give + +330 +00:27:22,060 --> 00:27:32,780 +30.8. F21, rule two first, one third. rho 1 second + +331 +00:27:32,780 --> 00:27:37,600 +one the third one now either you can use the same + +332 +00:27:37,600 --> 00:27:44,320 +equation which is 70 times 42 so you can use 70 + +333 +00:27:44,320 --> 00:27:54,360 +times 42 divided by 200 that will give 14.7 or + +334 +00:27:54,360 --> 00:27:59,000 +it's just the complement which is 70 minus + +335 +00:28:03,390 --> 00:28:14,510 +24.5 plus 30.8. So either use 70 multiplied by 40 + +336 +00:28:14,510 --> 00:28:19,390 +divided by 200 or just the complement, 70 minus. + +337 +00:28:20,800 --> 00:28:28,400 +24.5 plus 30.8 will give the same value. So I just + +338 +00:28:28,400 --> 00:28:32,740 +compute the expected cell for 1 and 2, and the + +339 +00:28:32,740 --> 00:28:36,120 +third one is just the complement. Similarly, for + +340 +00:28:36,120 --> 00:28:42,560 +the second row, I mean cell 21, then 22, and 23. + +341 +00:28:43,680 --> 00:28:47,940 +By using the same method, he will get these two + +342 +00:28:47,940 --> 00:28:51,880 +values, and the other one is the complement, which + +343 +00:28:51,880 --> 00:28:54,880 +is 60 minus these, the sum of these two values, + +344 +00:28:55,300 --> 00:28:55,960 +will give 12. + +345 +00:28:58,720 --> 00:29:01,920 +Similarly, for the third cell, I'm sorry, the + +346 +00:29:01,920 --> 00:29:07,460 +third row, for this value, For 10, it's 30 times + +347 +00:29:07,460 --> 00:29:12,660 +70 divided by 200 will give this result. And the + +348 +00:29:12,660 --> 00:29:16,060 +other one is just 30 multiplied by 88 divided by + +349 +00:29:16,060 --> 00:29:20,200 +200. The other one is just the complement, 30 + +350 +00:29:20,200 --> 00:29:25,180 +minus the sum of these. Now, for the last column, + +351 +00:29:26,660 --> 00:29:35,220 +either 70 multiplied by 70 divided by 200, or 70 + +352 +00:29:35,220 --> 00:29:41,780 +this 70 minus the sum of these. 70 this one equals + +353 +00:29:41,780 --> 00:29:51,740 +70 minus the sum of 24 plus 21 plus 10. That will + +354 +00:29:51,740 --> 00:30:01,120 +give 14. Now for the other expected cell, 88. + +355 +00:30:02,370 --> 00:30:05,530 +minus the sum of these three expected frequencies. + +356 +00:30:07,290 --> 00:30:12,810 +Now for the last one, last one is either by 42 + +357 +00:30:12,810 --> 00:30:17,770 +minus the sum of these three, or 40 minus the sum + +358 +00:30:17,770 --> 00:30:20,090 +of 14 plus 6, 17.6. + +359 +00:30:22,810 --> 00:30:27,940 +Or 40 multiplied by 42 divided by 400. So let's + +360 +00:30:27,940 --> 00:30:35,180 +say we use that formula six times. For this + +361 +00:30:35,180 --> 00:30:39,100 +reason, degrees of freedom is six. The other six + +362 +00:30:39,100 --> 00:30:46,480 +are computed by the complement as we mentioned. So + +363 +00:30:46,480 --> 00:30:50,240 +these are the expected frequencies. It takes time + +364 +00:30:50,240 --> 00:30:56,010 +to compute these. But if you have only two by two + +365 +00:30:56,010 --> 00:31:01,170 +table, it's easier. Now based on that, we can + +366 +00:31:01,170 --> 00:31:07,430 +compute chi-square statistic value by using this + +367 +00:31:07,430 --> 00:31:12,390 +equation for each cell. I mean, the first one, if + +368 +00:31:12,390 --> 00:31:14,370 +you go back a little bit to the previous table, + +369 +00:31:15,150 --> 00:31:18,130 +here, in order to compute chi-square, + +370 +00:31:22,640 --> 00:31:27,760 +value, we have to use this equation, pi squared, + +371 +00:31:28,860 --> 00:31:36,080 +sum F observed minus F expected squared, divided + +372 +00:31:36,080 --> 00:31:41,980 +by F expected for all C's. So the first one is 24 + +373 +00:31:41,980 --> 00:31:44,780 +minus squared, + +374 +00:31:46,560 --> 00:31:55,350 +24 plus. The second cell is 32 squared + +375 +00:31:55,350 --> 00:31:58,990 +plus + +376 +00:31:58,990 --> 00:32:02,930 +all the way up to the last cell, which is 10. + +377 +00:32:11,090 --> 00:32:14,430 +So it takes time. But again, for two by two, it's + +378 +00:32:14,430 --> 00:32:18,890 +straightforward. Anyway, now if you compare the + +379 +00:32:18,890 --> 00:32:23,650 +expected and observed cells, you can have an idea + +380 +00:32:23,650 --> 00:32:25,650 +either to reject or fail to reject without + +381 +00:32:25,650 --> 00:32:31,470 +computing the value itself. Now, 24, 24.5. The + +382 +00:32:31,470 --> 00:32:32,430 +difference is small. + +383 +00:32:35,730 --> 00:32:39,070 +for about 7 and so on. So the difference between + +384 +00:32:39,070 --> 00:32:44,450 +observed and expected looks small. In this case, + +385 +00:32:44,590 --> 00:32:50,530 +chi-square value is close to zero. So it's 709. + +386 +00:32:51,190 --> 00:32:55,370 +Now, without looking at the table we have, we have + +387 +00:32:55,370 --> 00:33:02,710 +to don't reject. So we don't reject Because as we + +388 +00:33:02,710 --> 00:33:05,450 +mentioned, the minimum k squared value is 1132. + +389 +00:33:06,350 --> 00:33:09,670 +That's for one degrees of freedom and the alpha is + +390 +00:33:09,670 --> 00:33:14,390 +25%. So + +391 +00:33:14,390 --> 00:33:19,250 +I expect my decision is don't reject the null + +392 +00:33:19,250 --> 00:33:24,530 +hypothesis. Now by looking at k squared 5% and + +393 +00:33:24,530 --> 00:33:28,870 +degrees of freedom 6 by using k squared theorem. + +394 +00:33:30,200 --> 00:33:36,260 +Now degrees of freedom 6. Now the minimum value of + +395 +00:33:36,260 --> 00:33:40,520 +Chi-square is 7.84. I mean critical value. But + +396 +00:33:40,520 --> 00:33:48,290 +under 5% is 12.59. So this value is 12.59. So + +397 +00:33:48,290 --> 00:33:54,470 +critical value is 12.59. So my rejection region is + +398 +00:33:54,470 --> 00:33:59,890 +above this value. Now, my chi-square value falls + +399 +00:33:59,890 --> 00:34:06,250 +in the non-rejection regions. It's very small + +400 +00:34:06,250 --> 00:34:13,850 +value. So chi-square statistic is 0.709. + +401 +00:34:14,230 --> 00:34:20,620 +It's much smaller. Not even smaller than π²α, it's + +402 +00:34:20,620 --> 00:34:23,580 +much smaller than this value, so it means we don't + +403 +00:34:23,580 --> 00:34:26,440 +have sufficient evidence to support the + +404 +00:34:26,440 --> 00:34:32,010 +alternative hypothesis. So my decision is, don't + +405 +00:34:32,010 --> 00:34:36,350 +reject the null hypothesis. So conclusion, there + +406 +00:34:36,350 --> 00:34:41,150 +is not sufficient evidence that Mealy Plan, which + +407 +00:34:41,150 --> 00:34:45,310 +was classified into three groups, 20 per week or + +408 +00:34:45,310 --> 00:34:50,310 +10 per week or none, and class standing. which is + +409 +00:34:50,310 --> 00:34:54,750 +classified into four groups, freshman, sophomore, + +410 +00:34:55,010 --> 00:34:58,030 +junior, and senior are related. So you don't have + +411 +00:34:58,030 --> 00:35:00,690 +sufficient evidence that they are related. It + +412 +00:35:00,690 --> 00:35:05,630 +means they are independent. So the two variables + +413 +00:35:05,630 --> 00:35:13,590 +in this case are independent. + +414 +00:35:18,420 --> 00:35:21,520 +It means there is no relationship between number + +415 +00:35:21,520 --> 00:35:25,000 +of meals and class standing. It means the + +416 +00:35:25,000 --> 00:35:30,320 +proportions are equal. So this means pi 1 equals + +417 +00:35:30,320 --> 00:35:34,560 +pi 2 equals pi 3. So the three proportions are + +418 +00:35:34,560 --> 00:35:40,100 +equal. Pi 1 for 20 meals per week is the same as + +419 +00:35:40,100 --> 00:35:46,960 +10 or none according to class standing. Any + +420 +00:35:46,960 --> 00:35:52,600 +question? I think it's straightforward test, maybe + +421 +00:35:52,600 --> 00:35:59,140 +even easier than using a T statistic. And that's + +422 +00:35:59,140 --> 00:36:05,840 +all for this chapter. Any questions? I will do + +423 +00:36:05,840 --> 00:36:12,360 +some practice problems for chapter 11. These + +424 +00:36:12,360 --> 00:36:16,160 +problems will be posted in the course website this + +425 +00:36:16,160 --> 00:36:19,220 +week, sometime this week, maybe tomorrow or after + +426 +00:36:19,220 --> 00:36:22,840 +tomorrow. So Monday or Tuesday I'm going to post + +427 +00:36:22,840 --> 00:36:27,280 +the practice problems and solutions for chapter + +428 +00:36:27,280 --> 00:36:31,700 +11. So let's do some of these problems. + +429 +00:36:40,160 --> 00:36:43,260 +Let's do some of multiple choice problems. + +430 +00:36:55,000 --> 00:36:59,420 +When testing for independence in contingency table + +431 +00:36:59,420 --> 00:37:03,840 +with three rows and + +432 +00:37:03,840 --> 00:37:10,250 +four columns. So there are three rows, four + +433 +00:37:10,250 --> 00:37:18,150 +columns. There are degrees of freedom. So degrees + +434 +00:37:18,150 --> 00:37:23,310 +of freedom. R minus one multiplied by C minus one. + +435 +00:37:24,090 --> 00:37:28,630 +Two times three is six. So there are six degrees + +436 +00:37:28,630 --> 00:37:32,130 +of freedom. Second question. + +437 +00:37:36,710 --> 00:37:43,150 +If we wish to determine whether there is evidence + +438 +00:37:43,150 --> 00:37:46,890 +that the proportion of items of interest is the + +439 +00:37:46,890 --> 00:37:51,510 +same in group 1 as in group 2, the appropriate + +440 +00:37:51,510 --> 00:37:57,700 +test to use is. So here we are testing Pi 1 equals + +441 +00:37:57,700 --> 00:38:01,040 +Pi 2, so there are two populations. + +442 +00:38:02,480 --> 00:38:08,720 +The answer is A. Z statistic, Z test, Chi squared, + +443 +00:38:09,740 --> 00:38:13,840 +both A and B, neither A, neither of A nor B. + +444 +00:38:16,320 --> 00:38:19,540 +Exactly, the answer is C because we can use either + +445 +00:38:19,540 --> 00:38:25,080 +Z statistic or Chi squared. So Z or Chi. can be + +446 +00:38:25,080 --> 00:38:28,920 +used for testing difference between two population + +447 +00:38:28,920 --> 00:38:34,360 +proportions. And again, chi-square can be extended + +448 +00:38:34,360 --> 00:38:40,140 +to use for more than two. So in this case, the + +449 +00:38:40,140 --> 00:38:43,220 +correct answer is C, because we can use either Z + +450 +00:38:43,220 --> 00:38:52,090 +or chi-square test. Next, in testing, hypothesis + +451 +00:38:52,090 --> 00:38:58,350 +using chi-square test. The theoretical frequencies + +452 +00:38:58,350 --> 00:39:03,190 +are based on null hypothesis, alternative, normal + +453 +00:39:03,190 --> 00:39:06,490 +distribution, none of the above. Always when we + +454 +00:39:06,490 --> 00:39:10,450 +are using chi-square test, we assume the null is + +455 +00:39:10,450 --> 00:39:14,630 +true. So the theoretical frequencies are based on + +456 +00:39:14,630 --> 00:39:20,060 +the null hypothesis. So always any statistic can + +457 +00:39:20,060 --> 00:39:25,300 +be computed if we assume x0 is correct. So the + +458 +00:39:25,300 --> 00:39:26,400 +correct answer is A. + +459 +00:39:34,060 --> 00:39:37,040 +Let's look at table 11-2. + +460 +00:39:44,280 --> 00:39:49,000 +Many companies use well-known celebrities as + +461 +00:39:49,000 --> 00:39:54,420 +spokespersons in their TV advertisements. A study + +462 +00:39:54,420 --> 00:39:57,760 +was conducted to determine whether brand awareness + +463 +00:39:57,760 --> 00:40:02,140 +of female TV viewers and the gender of the + +464 +00:40:02,140 --> 00:40:05,860 +spokesperson are independent. So there are two + +465 +00:40:05,860 --> 00:40:09,820 +variables, whether a brand awareness of female TV + +466 +00:40:09,820 --> 00:40:13,740 +and gender of the spokesperson are independent. + +467 +00:40:14,820 --> 00:40:19,540 +Each and a sample of 300 female TV viewers was + +468 +00:40:19,540 --> 00:40:24,000 +asked to identify a product advertised by a + +469 +00:40:24,000 --> 00:40:27,000 +celebrity spokesperson, the gender of the + +470 +00:40:27,000 --> 00:40:32,280 +spokesperson, and whether or not the viewer could + +471 +00:40:32,280 --> 00:40:36,460 +identify the product was recorded. The number in + +472 +00:40:36,460 --> 00:40:40,080 +each category are given below. Now, the questions + +473 +00:40:40,080 --> 00:40:45,520 +are, number one, he asked about the calculated + +474 +00:40:45,520 --> 00:40:49,120 +this statistic is. We have to find Chi-square + +475 +00:40:49,120 --> 00:40:54,020 +statistic. It's two by two tables, easy one. So, + +476 +00:40:54,460 --> 00:40:59,460 +for example, to find the F expected is, + +477 +00:41:00,420 --> 00:41:13,130 +rho total is one over two. And one line here. And + +478 +00:41:13,130 --> 00:41:13,810 +this 150. + +479 +00:41:16,430 --> 00:41:22,510 +And also 150. So the expected frequency for the + +480 +00:41:22,510 --> 00:41:31,010 +first one is 102 times 150 divided by 300. + +481 +00:41:35,680 --> 00:41:39,640 +So the answer is 51. + +482 +00:41:42,880 --> 00:41:51,560 +So the first expected is 51. The other one is just + +483 +00:41:51,560 --> 00:41:54,360 +102 minus 51 is also 51. + +484 +00:41:57,320 --> 00:42:01,020 +Now here is 99. + +485 +00:42:09,080 --> 00:42:15,180 +So the second + +486 +00:42:15,180 --> 00:42:18,800 +one are the expected frequencies. So my chi-square + +487 +00:42:18,800 --> 00:42:22,400 +statistic is + +488 +00:42:22,400 --> 00:42:32,260 +41 minus 51 squared divided by 51 plus 61 minus 51 + +489 +00:42:32,260 --> 00:42:44,160 +squared. 561 plus 109 minus 99 squared 99 plus 89 + +490 +00:42:44,160 --> 00:42:47,040 +minus 99 squared. + +491 +00:42:49,080 --> 00:42:53,140 +That will give 5 point. + +492 +00:42:57,260 --> 00:43:01,760 +So the answer is 5.9418. + +493 +00:43:03,410 --> 00:43:06,210 +So simple calculation will give this result. Now, + +494 +00:43:06,450 --> 00:43:10,370 +next one, referring to the same information we + +495 +00:43:10,370 --> 00:43:15,890 +have at 5% level of significance, the critical + +496 +00:43:15,890 --> 00:43:18,510 +value of that statistic. In this case, we are + +497 +00:43:18,510 --> 00:43:22,690 +talking about 2 by 2 table, and alpha is 5. So + +498 +00:43:22,690 --> 00:43:28,130 +your critical value is 3 point. So chi squared + +499 +00:43:28,130 --> 00:43:31,610 +alpha, 5% and 1 degrees of freedom. + +500 +00:43:35,000 --> 00:43:39,220 +This is the smallest value when alpha is 5%, so 3 + +501 +00:43:39,220 --> 00:43:41,440 +.8415. + +502 +00:43:46,160 --> 00:43:51,760 +Again, degrees of freedom of this statistic are 1, + +503 +00:43:52,500 --> 00:43:53,800 +2 by 2 is 1. + +504 +00:43:56,380 --> 00:44:01,620 +Now at 5% level of significance, the conclusion is + +505 +00:44:01,620 --> 00:44:01,980 +that + +506 +00:44:06,840 --> 00:44:16,380 +In this case, we reject H0. And H0 says the two + +507 +00:44:16,380 --> 00:44:20,800 +variables are independent. X and Y are + +508 +00:44:20,800 --> 00:44:25,860 +independent. We reject that they are independent. + +509 +00:44:27,380 --> 00:44:33,200 +That means they are dependent or related. So, A, + +510 +00:44:33,520 --> 00:44:36,680 +brand awareness of female TV viewers and the + +511 +00:44:36,680 --> 00:44:41,380 +gender of the spokesperson are independent. No, + +512 +00:44:41,580 --> 00:44:45,200 +because we reject the null hypothesis. B, brand + +513 +00:44:45,200 --> 00:44:48,340 +awareness of female TV viewers and the gender of + +514 +00:44:48,340 --> 00:44:53,140 +spokesperson are not independent. Since we reject, + +515 +00:44:53,380 --> 00:44:58,330 +then they are not. Because it's a complement. So, + +516 +00:44:58,430 --> 00:45:02,810 +B is the correct answer. Now, C. A brand awareness + +517 +00:45:02,810 --> 00:45:05,450 +of female TV viewers and the gender of the + +518 +00:45:05,450 --> 00:45:10,550 +spokesperson are related. The same meaning. They + +519 +00:45:10,550 --> 00:45:15,470 +are either, you say, not independent, related or + +520 +00:45:15,470 --> 00:45:15,950 +dependent. + +521 +00:45:19,490 --> 00:45:24,930 +Either is the same, so C is correct. D both B and + +522 +00:45:24,930 --> 00:45:28,970 +C, so D is the correct answer. So again, if we + +523 +00:45:28,970 --> 00:45:31,650 +reject the null hypothesis, it means the two + +524 +00:45:31,650 --> 00:45:36,990 +variables either not independent or related or + +525 +00:45:36,990 --> 00:45:38,290 +dependent. + +526 +00:45:40,550 --> 00:45:46,630 +Any question? I will stop at this point. Next + +527 +00:45:46,630 --> 00:45:47,750 +time, inshallah, we'll start. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/R6shw6IZsm8_raw.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/R6shw6IZsm8_raw.srt new file mode 100644 index 0000000000000000000000000000000000000000..a65b03746bb14c5d563bd4ecd7b1db95272a5ca9 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/R6shw6IZsm8_raw.srt @@ -0,0 +1,2108 @@ +1 +00:00:11,500 --> 00:00:17,380 +Last time, we talked about chi-square tests. And + +2 +00:00:17,380 --> 00:00:21,580 +we mentioned that there are two objectives in this + +3 +00:00:21,580 --> 00:00:25,220 +chapter. The first one is when to use chi-square + +4 +00:00:25,220 --> 00:00:28,630 +tests for contingency tables. And the other + +5 +00:00:28,630 --> 00:00:31,070 +objective is how to use chi-square tests for + +6 +00:00:31,070 --> 00:00:35,410 +contingency tables. And we did one chi-square test + +7 +00:00:35,410 --> 00:00:42,050 +for the difference between two proportions. In the + +8 +00:00:42,050 --> 00:00:44,630 +null hypothesis, the two proportions are equal. I + +9 +00:00:44,630 --> 00:00:47,970 +mean, proportion for population 1 equals + +10 +00:00:47,970 --> 00:00:52,970 +population proportion 2 against the alternative + +11 +00:00:52,970 --> 00:00:58,470 +here is two-sided test. Pi 1 does not equal pi 2. + +12 +00:00:59,310 --> 00:01:04,210 +In this case, we can use either this statistic. So + +13 +00:01:04,210 --> 00:01:05,150 +you may + +14 +00:01:07,680 --> 00:01:15,520 +Z statistic, which is b1 minus b2 minus y1 minus + +15 +00:01:15,520 --> 00:01:21,840 +y2 divided by b + +16 +00:01:21,840 --> 00:01:27,500 +dash times 1 minus b dash multiplied by 1 over n1 + +17 +00:01:27,500 --> 00:01:31,200 +plus 1 over n2. This quantity under the square + +18 +00:01:31,200 --> 00:01:33,080 +root, where b dash + +19 +00:01:42,180 --> 00:01:48,580 +Or proportionally, where P dash equals X1 plus X2 + +20 +00:01:48,580 --> 00:01:55,560 +divided by N1 plus N2. Or, + +21 +00:01:58,700 --> 00:02:00,720 +in this chapter, we are going to use chi-square + +22 +00:02:00,720 --> 00:02:04,520 +statistic, which is given by this equation. Chi + +23 +00:02:04,520 --> 00:02:09,620 +-square statistic is just sum of observed + +24 +00:02:09,620 --> 00:02:10,940 +frequency, FO. + +25 +00:02:15,530 --> 00:02:20,070 +minus expected frequency squared divided by + +26 +00:02:20,070 --> 00:02:22,490 +expected frequency for all cells. + +27 +00:02:25,210 --> 00:02:29,070 +Chi squared, this statistic is given by this + +28 +00:02:29,070 --> 00:02:34,190 +equation. If there are two by two rows and + +29 +00:02:34,190 --> 00:02:36,290 +columns, I mean there are two rows and two + +30 +00:02:36,290 --> 00:02:40,270 +columns. So in this case, my table is two by two. + +31 +00:02:42,120 --> 00:02:44,360 +In this case, you have only one degree of freedom. + +32 +00:02:44,640 --> 00:02:50,440 +Always degrees of freedom equals number of rows + +33 +00:02:50,440 --> 00:03:00,320 +minus one multiplied by number of columns minus + +34 +00:03:00,320 --> 00:03:06,140 +one. So for two by two tables, there are two rows + +35 +00:03:06,140 --> 00:03:11,560 +and two columns, so two minus one. times 2 minus + +36 +00:03:11,560 --> 00:03:15,420 +1, so your degrees of freedom in this case is 1. + +37 +00:03:16,440 --> 00:03:19,320 +Here the assumption is we assume that the expected + +38 +00:03:19,320 --> 00:03:22,940 +frequency is at least 5, in order to use Chi + +39 +00:03:22,940 --> 00:03:27,200 +-square statistic. Chi-square is always positive, + +40 +00:03:27,680 --> 00:03:32,260 +I mean, Chi-square value is always greater than 0. + +41 +00:03:34,040 --> 00:03:38,890 +It's one TLTS to the right one. We reject F0 if + +42 +00:03:38,890 --> 00:03:42,430 +your chi-square statistic falls in the rejection + +43 +00:03:42,430 --> 00:03:45,850 +region. That means we reject the null hypothesis + +44 +00:03:45,850 --> 00:03:49,470 +if chi-square statistic greater than chi-square + +45 +00:03:49,470 --> 00:03:53,130 +alpha. Alpha can be determined by using chi-square + +46 +00:03:53,130 --> 00:03:56,490 +table. So we reject in this case F0, otherwise, + +47 +00:03:56,890 --> 00:04:02,050 +sorry, we don't reject F0. So again, if the value + +48 +00:04:02,050 --> 00:04:05,350 +of chi-square statistic falls in this rejection + +49 +00:04:05,350 --> 00:04:10,280 +region, the yellow one, then we reject. Otherwise, + +50 +00:04:11,100 --> 00:04:13,900 +if this value, I mean if the value of the + +51 +00:04:13,900 --> 00:04:17,060 +statistic falls in non-rejection region, we don't + +52 +00:04:17,060 --> 00:04:21,680 +reject the null hypothesis. So the same concept as + +53 +00:04:21,680 --> 00:04:27,680 +we did in the previous chapters. If we go back to + +54 +00:04:27,680 --> 00:04:32,060 +the previous example we had discussed before, when + +55 +00:04:32,060 --> 00:04:36,620 +we are testing about gender and left and right + +56 +00:04:36,620 --> 00:04:41,340 +handers, So hand preference either left or right. + +57 +00:04:42,960 --> 00:04:49,320 +And the question is test to see whether hand + +58 +00:04:49,320 --> 00:04:53,100 +preference and gender are related or not. In this + +59 +00:04:53,100 --> 00:04:56,960 +case, your null hypothesis could be written as + +60 +00:04:56,960 --> 00:05:00,620 +either X0. + +61 +00:05:04,220 --> 00:05:07,160 +So the proportion of left-handers for female + +62 +00:05:07,160 --> 00:05:12,260 +equals the proportion of males left-handers. So by + +63 +00:05:12,260 --> 00:05:16,600 +one equals by two or H zero later we'll see that + +64 +00:05:16,600 --> 00:05:22,680 +the two variables of interest are independent. + +65 +00:05:32,810 --> 00:05:37,830 +Now, your B dash is + +66 +00:05:37,830 --> 00:05:42,250 +given by X1 plus X2 divided by N1 plus N2. X1 is + +67 +00:05:42,250 --> 00:05:51,930 +12, this 12, plus 24 divided by 300. That will + +68 +00:05:51,930 --> 00:05:57,310 +give 12%. So let me just write this notation, B + +69 +00:05:57,310 --> 00:05:57,710 +dash. + +70 +00:06:05,560 --> 00:06:13,740 +equals 36 by 300, so that's 12%. So the expected + +71 +00:06:13,740 --> 00:06:19,500 +frequency in this case for female, 0.12 times 120, + +72 +00:06:19,680 --> 00:06:22,140 +because there are 120 females in the data you + +73 +00:06:22,140 --> 00:06:25,520 +have, so that will give 14.4. So the expected + +74 +00:06:25,520 --> 00:06:30,680 +frequency is 0.12 times 180, 120, I'm sorry, + +75 +00:06:34,810 --> 00:06:39,590 +That will give 14.4. Similarly, for male to be + +76 +00:06:39,590 --> 00:06:43,390 +left-handed is 0.12 times number of females in the + +77 +00:06:43,390 --> 00:06:46,690 +sample, which is 180, and that will give 21.6. + +78 +00:06:48,670 --> 00:06:53,190 +Now, since you compute the expected for the first + +79 +00:06:53,190 --> 00:06:57,590 +cell, the second one direct is just the complement + +80 +00:06:57,590 --> 00:07:03,020 +120. 120 is sample size for the Rome. I mean + +81 +00:07:03,020 --> 00:07:12,200 +female total 120 minus 14.4 will give 105.6. Or 0 + +82 +00:07:12,200 --> 00:07:18,050 +.88 times 120 will give the same value. Here, the + +83 +00:07:18,050 --> 00:07:21,730 +expected is 21.6, so the compliment is the, I'm + +84 +00:07:21,730 --> 00:07:25,130 +sorry, the expected is just the compliment, which + +85 +00:07:25,130 --> 00:07:32,010 +is 180 minus 21.6 will give 158.4. Or 0.88 is the + +86 +00:07:32,010 --> 00:07:35,090 +compliment of that one multiplied by 180 will give + +87 +00:07:35,090 --> 00:07:39,070 +the same value. So that's the one we had discussed + +88 +00:07:39,070 --> 00:07:39,670 +before. + +89 +00:07:42,410 --> 00:07:46,550 +On this result, you can determine the value of chi + +90 +00:07:46,550 --> 00:07:50,750 +-square statistic by using this equation. Sum of F + +91 +00:07:50,750 --> 00:07:53,810 +observed minus F expected squared divided by F + +92 +00:07:53,810 --> 00:07:57,450 +expected for each cell. You have to compute the + +93 +00:07:57,450 --> 00:08:00,870 +value of chi-square for each cell. In this case, + +94 +00:08:01,070 --> 00:08:04,250 +the simplest case is just 2 by 2 table. So 12 + +95 +00:08:04,250 --> 00:08:09,980 +minus 14.4 squared divided by 14.4. Plus the + +96 +00:08:09,980 --> 00:08:15,720 +second one 108 minus 105 squared divided by 105 up + +97 +00:08:15,720 --> 00:08:19,780 +to the last one, you will get this result. Now my + +98 +00:08:19,780 --> 00:08:21,900 +chi-square value is 0.7576. + +99 +00:08:24,240 --> 00:08:28,140 +And in this case, if chi-square value is very + +100 +00:08:28,140 --> 00:08:31,180 +small, I mean it's close to zero, then we don't + +101 +00:08:31,180 --> 00:08:34,140 +reject the null hypothesis. Because the smallest + +102 +00:08:34,140 --> 00:08:37,500 +value of chi-square is zero, and zero happens only + +103 +00:08:37,500 --> 00:08:43,580 +if f observed is close to f expected. So here if + +104 +00:08:43,580 --> 00:08:46,920 +you look carefully for the observed and expected + +105 +00:08:46,920 --> 00:08:50,520 +frequencies, you can tell if you can reject or + +106 +00:08:50,520 --> 00:08:53,700 +don't reject the number. Now the difference + +107 +00:08:53,700 --> 00:08:58,960 +between these values looks small, so that's lead + +108 +00:08:58,960 --> 00:09:05,110 +to small chi-square. So without doing the critical + +109 +00:09:05,110 --> 00:09:08,210 +value, computer critical value, you can determine + +110 +00:09:08,210 --> 00:09:11,890 +that we don't reject the null hypothesis. Because + +111 +00:09:11,890 --> 00:09:16,070 +your chi-square value is very small. So we don't + +112 +00:09:16,070 --> 00:09:18,670 +reject the null hypothesis. Or if you look + +113 +00:09:18,670 --> 00:09:22,790 +carefully at the table, for the table we have + +114 +00:09:22,790 --> 00:09:26,410 +here, for chi-square table. By the way, the + +115 +00:09:26,410 --> 00:09:31,480 +smallest value of chi-square is 1.3. under 1 + +116 +00:09:31,480 --> 00:09:36,180 +degrees of freedom. So the smallest value 1.32. So + +117 +00:09:36,180 --> 00:09:39,360 +if your chi-square value is greater than 1, it + +118 +00:09:39,360 --> 00:09:41,920 +means maybe you reject or don't reject. It depends + +119 +00:09:41,920 --> 00:09:45,920 +on v value and alpha you have or degrees of + +120 +00:09:45,920 --> 00:09:50,280 +freedom. But in the worst scenario, if your chi + +121 +00:09:50,280 --> 00:09:53,780 +-square is smaller than this value, it means you + +122 +00:09:53,780 --> 00:09:57,600 +don't reject the null hypothesis. So generally + +123 +00:09:57,600 --> 00:10:02,120 +speaking, if Chi-square is statistical. It's + +124 +00:10:02,120 --> 00:10:06,420 +smaller than 1.32. 1.32 is a very small value. + +125 +00:10:06,940 --> 00:10:15,560 +Then we don't reject. Then we don't reject x0. + +126 +00:10:15,780 --> 00:10:24,220 +That's always, always true. Regardless of degrees + +127 +00:10:24,220 --> 00:10:31,050 +of freedom and alpha. My chi-square is close to + +128 +00:10:31,050 --> 00:10:35,710 +zero, or smaller than 1.32, because the minimum + +129 +00:10:35,710 --> 00:10:40,990 +value of critical value is 1.32. Imagine that we + +130 +00:10:40,990 --> 00:10:46,050 +are talking about alpha is 5%. So alpha is 5, so + +131 +00:10:46,050 --> 00:10:48,750 +your critical value, the smallest one for 1 + +132 +00:10:48,750 --> 00:10:53,850 +degrees of freedom, is 3.84. So that's my + +133 +00:10:53,850 --> 00:10:55,490 +smallest, if alpha + +134 +00:11:03,740 --> 00:11:08,680 +Last time we mentioned that this value is just 1 + +135 +00:11:08,680 --> 00:11:17,760 +.96 squared. And that's only true, only true for 2 + +136 +00:11:17,760 --> 00:11:24,180 +by 2 table. That means this square is just Chi + +137 +00:11:24,180 --> 00:11:29,470 +square 1. For this reason, we can test by one + +138 +00:11:29,470 --> 00:11:33,330 +equal by two, by two methods, either this + +139 +00:11:33,330 --> 00:11:37,750 +statistic or chi-square statistic. Both of them + +140 +00:11:37,750 --> 00:11:41,970 +will give the same result. So let's go back to the + +141 +00:11:41,970 --> 00:11:49,670 +question we have. My chi-square value is 0.77576. + +142 +00:11:52,160 --> 00:11:56,940 +So that's your chi-square statistic. Again, + +143 +00:11:57,500 --> 00:12:00,240 +degrees of freedom 1 to chi-square, the critical + +144 +00:12:00,240 --> 00:12:08,500 +value is 3.841. So my decision is we don't reject + +145 +00:12:08,500 --> 00:12:11,780 +the null hypothesis. My conclusion is there is not + +146 +00:12:11,780 --> 00:12:14,380 +sufficient evidence that two proportions are + +147 +00:12:14,380 --> 00:12:17,480 +different. So you don't have sufficient evidence + +148 +00:12:17,480 --> 00:12:21,900 +in order to support that the two proportions are + +149 +00:12:21,900 --> 00:12:27,720 +different at 5% level of significance. We stopped + +150 +00:12:27,720 --> 00:12:32,700 +last time at this point. Now suppose we are + +151 +00:12:32,700 --> 00:12:36,670 +testing The difference among more than two + +152 +00:12:36,670 --> 00:12:42,930 +proportions. The same steps, we have to extend in + +153 +00:12:42,930 --> 00:12:47,830 +this case chi-square. Your null hypothesis, by one + +154 +00:12:47,830 --> 00:12:50,990 +equal by two, all the way up to by C. So in this + +155 +00:12:50,990 --> 00:13:00,110 +case, there are C columns. C columns and + +156 +00:13:00,110 --> 00:13:05,420 +two rows. So number of columns equals C, and there + +157 +00:13:05,420 --> 00:13:10,520 +are only two rows. So pi 1 equals pi 2, all the + +158 +00:13:10,520 --> 00:13:13,840 +way up to pi C. So null hypothesis for the columns + +159 +00:13:13,840 --> 00:13:17,040 +we have. There are C columns. Again, it's the + +160 +00:13:17,040 --> 00:13:19,840 +alternative, not all of the pi J are equal, and J + +161 +00:13:19,840 --> 00:13:23,840 +equals 1 up to C. Now, the only difference here, + +162 +00:13:26,520 --> 00:13:27,500 +the degrees of freedom. + +163 +00:13:31,370 --> 00:13:32,850 +For 2 by c table, + +164 +00:13:35,710 --> 00:13:42,010 +2 by c, degrees of freedom equals number + +165 +00:13:42,010 --> 00:13:45,890 +of rows minus 1. There are two rows, so 2 minus 1 + +166 +00:13:45,890 --> 00:13:50,810 +times number of columns minus 1. 2 minus 1 is 1, c + +167 +00:13:50,810 --> 00:13:54,610 +minus 1, 1 times c minus 1, c minus 1. So your + +168 +00:13:54,610 --> 00:13:57,130 +degrees of freedom in this case is c minus 1. + +169 +00:14:00,070 --> 00:14:03,190 +So that's the only difference. For two by two + +170 +00:14:03,190 --> 00:14:07,130 +table, degrees of freedom is just one. If there + +171 +00:14:07,130 --> 00:14:10,670 +are C columns and we have the same number of rows, + +172 +00:14:11,450 --> 00:14:14,810 +degrees of freedom is C minus one. And we have the + +173 +00:14:14,810 --> 00:14:19,190 +same chi squared statistic, the same equation I + +174 +00:14:19,190 --> 00:14:23,890 +mean. And we have to extend also the overall + +175 +00:14:23,890 --> 00:14:27,330 +proportion instead of x1 plus x2 divided by n1 + +176 +00:14:27,330 --> 00:14:32,610 +plus n2. It becomes x1 plus x2 plus x3 all the way + +177 +00:14:32,610 --> 00:14:38,330 +up to xc because there are c columns divided by n1 + +178 +00:14:38,330 --> 00:14:41,910 +plus n2 all the way up to nc. So that's x over n. + +179 +00:14:43,540 --> 00:14:48,400 +So similarly we can reject the null hypothesis if + +180 +00:14:48,400 --> 00:14:52,260 +the value of chi-square statistic lies or falls in + +181 +00:14:52,260 --> 00:14:54,160 +the rejection region. + +182 +00:14:58,120 --> 00:15:01,980 +Other type of chi-square test is called chi-square + +183 +00:15:01,980 --> 00:15:07,380 +test of independence. Generally speaking, most of + +184 +00:15:07,380 --> 00:15:10,440 +the time there are more than two columns or more + +185 +00:15:10,440 --> 00:15:16,490 +than two rows. Now, suppose we have contingency + +186 +00:15:16,490 --> 00:15:22,370 +table that has R rows and C columns. And we are + +187 +00:15:22,370 --> 00:15:26,990 +interested to test to see whether the two + +188 +00:15:26,990 --> 00:15:31,390 +categorical variables are independent. That means + +189 +00:15:31,390 --> 00:15:35,600 +there is no relationship between them. Against the + +190 +00:15:35,600 --> 00:15:38,800 +alternative hypothesis, the two variables are + +191 +00:15:38,800 --> 00:15:42,040 +dependent. That means there is a relationship + +192 +00:15:42,040 --> 00:15:45,140 +between them. So test of independence. + +193 +00:15:47,780 --> 00:15:52,220 +Null hypothesis is always the two variables, I + +194 +00:15:52,220 --> 00:15:55,240 +mean, the two categorical variables are + +195 +00:15:55,240 --> 00:15:59,860 +independent. So it's zero. Always x and y, for + +196 +00:15:59,860 --> 00:16:02,860 +example, are independent. + +197 +00:16:06,330 --> 00:16:11,790 +This means there is no difference between them. I + +198 +00:16:11,790 --> 00:16:17,490 +mean, Y1 equals Y. Similarly, X and Y are + +199 +00:16:17,490 --> 00:16:19,850 +independent. So there is no difference between the + +200 +00:16:19,850 --> 00:16:23,030 +two populations of this notion. Against the + +201 +00:16:23,030 --> 00:16:27,010 +alternative hypothesis, either X and Y, you may + +202 +00:16:27,010 --> 00:16:29,150 +say that they are dependent. + +203 +00:16:31,630 --> 00:16:34,470 +So that means there exists a relationship between + +204 +00:16:34,470 --> 00:16:38,060 +them or They are related. + +205 +00:16:40,920 --> 00:16:45,300 +So tests of independence for chi-square test to + +206 +00:16:45,300 --> 00:16:47,480 +see whether or not the two variables are + +207 +00:16:47,480 --> 00:16:50,640 +independent. So your null, two variables are + +208 +00:16:50,640 --> 00:16:55,740 +independent against they are not independent. So + +209 +00:16:55,740 --> 00:16:58,060 +similar to the chi-square test for equality of + +210 +00:16:58,060 --> 00:17:02,640 +more than two proportions. So, in order to test to + +211 +00:17:02,640 --> 00:17:06,020 +see if more than two proportions are equal, you + +212 +00:17:06,020 --> 00:17:15,700 +cannot use this statistic. So, this statistic is + +213 +00:17:15,700 --> 00:17:27,600 +no longer appropriate or valid for more than two + +214 +00:17:27,600 --> 00:17:31,770 +proportions. In this case, you have to use chi + +215 +00:17:31,770 --> 00:17:37,310 +-square test. So this statistic can be used only + +216 +00:17:37,310 --> 00:17:40,970 +to test the difference between two proportions. + +217 +00:17:41,110 --> 00:17:44,110 +But for more than two, you have to use chi-square + +218 +00:17:44,110 --> 00:17:47,690 +test. So similar, chi-square test of independence + +219 +00:17:47,690 --> 00:17:52,470 +is similar to chi-square test for equality of more + +220 +00:17:52,470 --> 00:17:57,360 +than two proportions. But extend the concept. The + +221 +00:17:57,360 --> 00:18:02,100 +previous one was two rows and C columns, so two by + +222 +00:18:02,100 --> 00:18:05,940 +C. But here we extend the concept to contingency + +223 +00:18:05,940 --> 00:18:11,560 +tables with R rows and C columns. So we have the + +224 +00:18:11,560 --> 00:18:15,660 +case R by C. So that's in general, there are R + +225 +00:18:15,660 --> 00:18:23,060 +rows and C columns. And the question is this C, if + +226 +00:18:23,060 --> 00:18:27,480 +the two variables are independent or not. So in + +227 +00:18:27,480 --> 00:18:30,700 +this case, you cannot use this statistic. So one + +228 +00:18:30,700 --> 00:18:34,320 +more time, this statistic is valid only for two by + +229 +00:18:34,320 --> 00:18:38,020 +two tables. So that means we can use z or chi + +230 +00:18:38,020 --> 00:18:41,200 +-square to test if there is no difference between + +231 +00:18:41,200 --> 00:18:43,960 +two population proportions. But for more than + +232 +00:18:43,960 --> 00:18:46,700 +that, you have to use chi-square. + +233 +00:18:49,950 --> 00:18:53,310 +Now still we have the same equation, Chi-square + +234 +00:18:53,310 --> 00:18:57,870 +statistic is just sum F observed minus F expected + +235 +00:18:57,870 --> 00:19:00,690 +quantity squared divided by F expected. + +236 +00:19:03,490 --> 00:19:07,550 +In this case, Chi-square statistic for R by C case + +237 +00:19:07,550 --> 00:19:15,430 +has degrees of freedom R minus 1 multiplied by C + +238 +00:19:15,430 --> 00:19:18,570 +minus 1. In this case, each cell in the + +239 +00:19:18,570 --> 00:19:21,230 +contingency table has expected frequency at least + +240 +00:19:21,230 --> 00:19:26,910 +one instead of five. Now let's see how can we + +241 +00:19:26,910 --> 00:19:31,690 +compute the expected cell frequency for each cell. + +242 +00:19:32,950 --> 00:19:37,530 +The expected frequency is given by row total + +243 +00:19:37,530 --> 00:19:42,950 +multiplied by colon total divided by n. So that's + +244 +00:19:42,950 --> 00:19:50,700 +my new equation to determine I've expected it. So + +245 +00:19:50,700 --> 00:19:56,440 +the expected value for each cell is given by Rho + +246 +00:19:56,440 --> 00:20:03,380 +total multiplied by Kono, total divided by N. + +247 +00:20:05,160 --> 00:20:09,540 +Also, this equation is true for the previous + +248 +00:20:09,540 --> 00:20:15,560 +example. If you go back a little bit here, now the + +249 +00:20:16,650 --> 00:20:21,650 +Expected for this cell was 40.4. Now let's see how + +250 +00:20:21,650 --> 00:20:25,470 +can we compute the same value by using this + +251 +00:20:25,470 --> 00:20:30,250 +equation. So it's equal to row total 120 + +252 +00:20:30,250 --> 00:20:40,310 +multiplied by column total 36 divided by 300. + +253 +00:20:43,580 --> 00:20:46,500 +Now before we compute this value by using B dash + +254 +00:20:46,500 --> 00:20:50,900 +first, 300 divided by, I'm sorry, 36 divided by + +255 +00:20:50,900 --> 00:20:58,520 +300. So that's your B dash. Then we multiply this + +256 +00:20:58,520 --> 00:21:03,540 +B dash by N, and this is your N. So it's similar + +257 +00:21:03,540 --> 00:21:08,540 +equation. So either you use row total multiplied + +258 +00:21:08,540 --> 00:21:14,060 +by column total. then divide by overall sample + +259 +00:21:14,060 --> 00:21:18,880 +size you will get the same result by using the + +260 +00:21:18,880 --> 00:21:25,520 +overall proportion 12% times 120 so each one will + +261 +00:21:25,520 --> 00:21:29,860 +give the same answer so from now we are going to + +262 +00:21:29,860 --> 00:21:33,900 +use this equation in order to compute the expected + +263 +00:21:33,900 --> 00:21:37,960 +frequency for each cell so again expected + +264 +00:21:37,960 --> 00:21:42,920 +frequency is rho total times Column total divided + +265 +00:21:42,920 --> 00:21:48,620 +by N, N is the sample size. So row total it means + +266 +00:21:48,620 --> 00:21:52,220 +sum of all frequencies in the row. Similarly + +267 +00:21:52,220 --> 00:21:56,160 +column total is the sum of all frequencies in the + +268 +00:21:56,160 --> 00:22:00,180 +column and N is over all sample size. + +269 +00:22:03,030 --> 00:22:06,630 +Again, we reject the null hypothesis if your chi + +270 +00:22:06,630 --> 00:22:10,430 +-square statistic greater than chi-square alpha. + +271 +00:22:10,590 --> 00:22:13,370 +Otherwise, you don't reject it. And keep in mind, + +272 +00:22:14,270 --> 00:22:18,390 +chi-square statistic has degrees of freedom R + +273 +00:22:18,390 --> 00:22:23,730 +minus 1 times C minus 1. That's all for chi-square + +274 +00:22:23,730 --> 00:22:27,590 +as test of independence. Any question? + +275 +00:22:31,220 --> 00:22:36,300 +Here there is an example for applying chi-square + +276 +00:22:36,300 --> 00:22:42,200 +test of independence. Meal plan selected + +277 +00:22:42,200 --> 00:22:46,700 +by 200 students is shown in this table. So there + +278 +00:22:46,700 --> 00:22:50,960 +are two variables of interest. The first one is + +279 +00:22:50,960 --> 00:22:56,230 +number of meals per week. And there are three + +280 +00:22:56,230 --> 00:23:00,550 +types of number of meals, either 20 meals per + +281 +00:23:00,550 --> 00:23:07,870 +week, or 10 meals per week, or none. So that's, so + +282 +00:23:07,870 --> 00:23:12,150 +number of meals is classified into three groups. + +283 +00:23:13,210 --> 00:23:17,650 +So three columns, 20 per week, 10 per week, or + +284 +00:23:17,650 --> 00:23:23,270 +none. Class standing, students are classified into + +285 +00:23:23,270 --> 00:23:28,860 +four levels. A freshman, it means students like + +286 +00:23:28,860 --> 00:23:33,620 +you, first year. Sophomore, it means second year. + +287 +00:23:34,440 --> 00:23:38,400 +Junior, third level. Senior, fourth level. So that + +288 +00:23:38,400 --> 00:23:42,100 +means first, second, third, and fourth level. And + +289 +00:23:42,100 --> 00:23:46,140 +we have this number, these numbers for, I mean, + +290 +00:23:47,040 --> 00:23:53,660 +there are 24 A freshman who have meals for 20 per + +291 +00:23:53,660 --> 00:23:59,880 +week. So there are 24 freshmen have 20 meals per + +292 +00:23:59,880 --> 00:24:04,160 +week. 22 sophomores, the same, 10 for junior and + +293 +00:24:04,160 --> 00:24:10,220 +14 for senior. And the question is just to see if + +294 +00:24:10,220 --> 00:24:13,740 +number of meals per week is independent of class + +295 +00:24:13,740 --> 00:24:17,270 +standing. to see if there is a relationship + +296 +00:24:17,270 --> 00:24:21,890 +between these two variables. In this case, there + +297 +00:24:21,890 --> 00:24:26,850 +are four rows because the class standing is + +298 +00:24:26,850 --> 00:24:29,190 +classified into four groups. So there are four + +299 +00:24:29,190 --> 00:24:34,230 +rows and three columns. So this table actually is + +300 +00:24:34,230 --> 00:24:40,200 +four by three. And there are twelve cells in this + +301 +00:24:40,200 --> 00:24:46,660 +case. Now it takes time to compute the expected + +302 +00:24:46,660 --> 00:24:49,760 +frequencies because in this case we have to + +303 +00:24:49,760 --> 00:24:55,120 +compute the expected frequency for each cell. And + +304 +00:24:55,120 --> 00:25:01,320 +we are going to use this formula for only six of + +305 +00:25:01,320 --> 00:25:06,260 +them. I mean, we can apply this formula for only + +306 +00:25:06,260 --> 00:25:09,880 +six of them. And the others can be computed by the + +307 +00:25:09,880 --> 00:25:14,300 +complement by using either column total or row + +308 +00:25:14,300 --> 00:25:19,940 +total. So because degrees of freedom is six, that + +309 +00:25:19,940 --> 00:25:23,880 +means you may use this rule six times only. The + +310 +00:25:23,880 --> 00:25:28,420 +others can be computed by using the complement. So + +311 +00:25:28,420 --> 00:25:34,070 +here again, the hypothesis to be tested is, Mean + +312 +00:25:34,070 --> 00:25:36,550 +plan and class standing are independent, that + +313 +00:25:36,550 --> 00:25:38,670 +means there is no relationship between them. + +314 +00:25:39,150 --> 00:25:41,650 +Against alternative hypothesis, mean plan and + +315 +00:25:41,650 --> 00:25:44,630 +class standing are dependent, that means there + +316 +00:25:44,630 --> 00:25:49,950 +exists significant relationship between them. Now + +317 +00:25:49,950 --> 00:25:54,390 +let's see how can we compute the expected cell, + +318 +00:25:55,990 --> 00:26:00,470 +the expected frequency for each cell. For example, + +319 +00:26:02,250 --> 00:26:07,790 +The first observed frequency is 24. Now the + +320 +00:26:07,790 --> 00:26:15,990 +expected should be 70 times 70 divided by 200. So + +321 +00:26:15,990 --> 00:26:25,050 +for cell 11, the first cell. If expected, we can + +322 +00:26:25,050 --> 00:26:32,450 +use this notation, 11. Means first row. First + +323 +00:26:32,450 --> 00:26:40,110 +column. That should be 70. It is 70. Multiplied by + +324 +00:26:40,110 --> 00:26:43,990 +column totals. Again, in this case, 70. Multiplied + +325 +00:26:43,990 --> 00:26:47,270 +by 200. That will give 24.5. + +326 +00:26:50,150 --> 00:26:53,730 +Similarly, for the second cell, for 32. + +327 +00:26:56,350 --> 00:27:00,090 +70 times 88 divided by 200. + +328 +00:27:02,820 --> 00:27:12,620 +So for F22, again it's 70 times 88 divided by 200, + +329 +00:27:12,800 --> 00:27:22,060 +that will get 30.8. So 70 times 88, that will give + +330 +00:27:22,060 --> 00:27:32,780 +30.8. F21, rule two first, one third. rho 1 second + +331 +00:27:32,780 --> 00:27:37,600 +one the third one now either you can use the same + +332 +00:27:37,600 --> 00:27:44,320 +equation which is 70 times 42 so you can use 70 + +333 +00:27:44,320 --> 00:27:54,360 +times 42 divided by 200 that will give 14.7 or + +334 +00:27:54,360 --> 00:27:59,000 +it's just the complement which is 70 minus + +335 +00:28:03,390 --> 00:28:14,510 +24.5 plus 30.8. So either use 70 multiplied by 40 + +336 +00:28:14,510 --> 00:28:19,390 +divided by 200 or just the complement, 70 minus. + +337 +00:28:20,800 --> 00:28:28,400 +24.5 plus 30.8 will give the same value. So I just + +338 +00:28:28,400 --> 00:28:32,740 +compute the expected cell for 1 and 2, and the + +339 +00:28:32,740 --> 00:28:36,120 +third one is just the complement. Similarly, for + +340 +00:28:36,120 --> 00:28:42,560 +the second row, I mean cell 21, then 22, and 23. + +341 +00:28:43,680 --> 00:28:47,940 +By using the same method, he will get these two + +342 +00:28:47,940 --> 00:28:51,880 +values, and the other one is the complement, which + +343 +00:28:51,880 --> 00:28:54,880 +is 60 minus these, the sum of these two values, + +344 +00:28:55,300 --> 00:28:55,960 +will give 12. + +345 +00:28:58,720 --> 00:29:01,920 +Similarly, for the third cell, I'm sorry, the + +346 +00:29:01,920 --> 00:29:07,460 +third row, for this value, For 10, it's 30 times + +347 +00:29:07,460 --> 00:29:12,660 +70 divided by 200 will give this result. And the + +348 +00:29:12,660 --> 00:29:16,060 +other one is just 30 multiplied by 88 divided by + +349 +00:29:16,060 --> 00:29:20,200 +200. The other one is just the complement, 30 + +350 +00:29:20,200 --> 00:29:25,180 +minus the sum of these. Now, for the last column, + +351 +00:29:26,660 --> 00:29:35,220 +either 70 multiplied by 70 divided by 200, or 70 + +352 +00:29:35,220 --> 00:29:41,780 +this 70 minus the sum of these. 70 this one equals + +353 +00:29:41,780 --> 00:29:51,740 +70 minus the sum of 24 plus 21 plus 10. That will + +354 +00:29:51,740 --> 00:30:01,120 +give 14. Now for the other expected cell, 88. + +355 +00:30:02,370 --> 00:30:05,530 +minus the sum of these three expected frequencies. + +356 +00:30:07,290 --> 00:30:12,810 +Now for the last one, last one is either by 42 + +357 +00:30:12,810 --> 00:30:17,770 +minus the sum of these three, or 40 minus the sum + +358 +00:30:17,770 --> 00:30:20,090 +of 14 plus 6, 17.6. + +359 +00:30:22,810 --> 00:30:27,940 +Or 40 multiplied by 42 divided by 400. So let's + +360 +00:30:27,940 --> 00:30:35,180 +say we use that formula six times. For this + +361 +00:30:35,180 --> 00:30:39,100 +reason, degrees of freedom is six. The other six + +362 +00:30:39,100 --> 00:30:46,480 +are computed by the complement as we mentioned. So + +363 +00:30:46,480 --> 00:30:50,240 +these are the expected frequencies. It takes time + +364 +00:30:50,240 --> 00:30:56,010 +to compute these. But if you have only two by two + +365 +00:30:56,010 --> 00:31:01,170 +table, it's easier. Now based on that, we can + +366 +00:31:01,170 --> 00:31:07,430 +compute chi-square statistic value by using this + +367 +00:31:07,430 --> 00:31:12,390 +equation for each cell. I mean, the first one, if + +368 +00:31:12,390 --> 00:31:14,370 +you go back a little bit to the previous table, + +369 +00:31:15,150 --> 00:31:18,130 +here, in order to compute chi-square, + +370 +00:31:22,640 --> 00:31:27,760 +value, we have to use this equation, pi squared, + +371 +00:31:28,860 --> 00:31:36,080 +sum F observed minus F expected squared, divided + +372 +00:31:36,080 --> 00:31:41,980 +by F expected for all C's. So the first one is 24 + +373 +00:31:41,980 --> 00:31:44,780 +minus squared, + +374 +00:31:46,560 --> 00:31:55,350 +24 plus. The second cell is 32 squared + +375 +00:31:55,350 --> 00:31:58,990 +plus + +376 +00:31:58,990 --> 00:32:02,930 +all the way up to the last cell, which is 10. + +377 +00:32:11,090 --> 00:32:14,430 +So it takes time. But again, for two by two, it's + +378 +00:32:14,430 --> 00:32:18,890 +straightforward. Anyway, now if you compare the + +379 +00:32:18,890 --> 00:32:23,650 +expected and observed cells, you can have an idea + +380 +00:32:23,650 --> 00:32:25,650 +either to reject or fail to reject without + +381 +00:32:25,650 --> 00:32:31,470 +computing the value itself. Now, 24, 24.5. The + +382 +00:32:31,470 --> 00:32:32,430 +difference is small. + +383 +00:32:35,730 --> 00:32:39,070 +for about 7 and so on. So the difference between + +384 +00:32:39,070 --> 00:32:44,450 +observed and expected looks small. In this case, + +385 +00:32:44,590 --> 00:32:50,530 +chi-square value is close to zero. So it's 709. + +386 +00:32:51,190 --> 00:32:55,370 +Now, without looking at the table we have, we have + +387 +00:32:55,370 --> 00:33:02,710 +to don't reject. So we don't reject Because as we + +388 +00:33:02,710 --> 00:33:05,450 +mentioned, the minimum k squared value is 1132. + +389 +00:33:06,350 --> 00:33:09,670 +That's for one degrees of freedom and the alpha is + +390 +00:33:09,670 --> 00:33:14,390 +25%. So + +391 +00:33:14,390 --> 00:33:19,250 +I expect my decision is don't reject the null + +392 +00:33:19,250 --> 00:33:24,530 +hypothesis. Now by looking at k squared 5% and + +393 +00:33:24,530 --> 00:33:28,870 +degrees of freedom 6 by using k squared theorem. + +394 +00:33:30,200 --> 00:33:36,260 +Now degrees of freedom 6. Now the minimum value of + +395 +00:33:36,260 --> 00:33:40,520 +Chi-square is 7.84. I mean critical value. But + +396 +00:33:40,520 --> 00:33:48,290 +under 5% is 12.59. So this value is 12.59. So + +397 +00:33:48,290 --> 00:33:54,470 +critical value is 12.59. So my rejection region is + +398 +00:33:54,470 --> 00:33:59,890 +above this value. Now, my chi-square value falls + +399 +00:33:59,890 --> 00:34:06,250 +in the non-rejection regions. It's very small + +400 +00:34:06,250 --> 00:34:13,850 +value. So chi-square statistic is 0.709. + +401 +00:34:14,230 --> 00:34:20,620 +It's much smaller. Not even smaller than π²α, it's + +402 +00:34:20,620 --> 00:34:23,580 +much smaller than this value, so it means we don't + +403 +00:34:23,580 --> 00:34:26,440 +have sufficient evidence to support the + +404 +00:34:26,440 --> 00:34:32,010 +alternative hypothesis. So my decision is, don't + +405 +00:34:32,010 --> 00:34:36,350 +reject the null hypothesis. So conclusion, there + +406 +00:34:36,350 --> 00:34:41,150 +is not sufficient evidence that Mealy Plan, which + +407 +00:34:41,150 --> 00:34:45,310 +was classified into three groups, 20 per week or + +408 +00:34:45,310 --> 00:34:50,310 +10 per week or none, and class standing. which is + +409 +00:34:50,310 --> 00:34:54,750 +classified into four groups, freshman, sophomore, + +410 +00:34:55,010 --> 00:34:58,030 +junior, and senior are related. So you don't have + +411 +00:34:58,030 --> 00:35:00,690 +sufficient evidence that they are related. It + +412 +00:35:00,690 --> 00:35:05,630 +means they are independent. So the two variables + +413 +00:35:05,630 --> 00:35:13,590 +in this case are independent. + +414 +00:35:18,420 --> 00:35:21,520 +It means there is no relationship between number + +415 +00:35:21,520 --> 00:35:25,000 +of meals and class standing. It means the + +416 +00:35:25,000 --> 00:35:30,320 +proportions are equal. So this means pi 1 equals + +417 +00:35:30,320 --> 00:35:34,560 +pi 2 equals pi 3. So the three proportions are + +418 +00:35:34,560 --> 00:35:40,100 +equal. Pi 1 for 20 meals per week is the same as + +419 +00:35:40,100 --> 00:35:46,960 +10 or none according to class standing. Any + +420 +00:35:46,960 --> 00:35:52,600 +question? I think it's straightforward test, maybe + +421 +00:35:52,600 --> 00:35:59,140 +even easier than using a T statistic. And that's + +422 +00:35:59,140 --> 00:36:05,840 +all for this chapter. Any questions? I will do + +423 +00:36:05,840 --> 00:36:12,360 +some practice problems for chapter 11. These + +424 +00:36:12,360 --> 00:36:16,160 +problems will be posted in the course website this + +425 +00:36:16,160 --> 00:36:19,220 +week, sometime this week, maybe tomorrow or after + +426 +00:36:19,220 --> 00:36:22,840 +tomorrow. So Monday or Tuesday I'm going to post + +427 +00:36:22,840 --> 00:36:27,280 +the practice problems and solutions for chapter + +428 +00:36:27,280 --> 00:36:31,700 +11. So let's do some of these problems. + +429 +00:36:40,160 --> 00:36:43,260 +Let's do some of multiple choice problems. + +430 +00:36:55,000 --> 00:36:59,420 +When testing for independence in contingency table + +431 +00:36:59,420 --> 00:37:03,840 +with three rows and + +432 +00:37:03,840 --> 00:37:10,250 +four columns. So there are three rows, four + +433 +00:37:10,250 --> 00:37:18,150 +columns. There are degrees of freedom. So degrees + +434 +00:37:18,150 --> 00:37:23,310 +of freedom. R minus one multiplied by C minus one. + +435 +00:37:24,090 --> 00:37:28,630 +Two times three is six. So there are six degrees + +436 +00:37:28,630 --> 00:37:32,130 +of freedom. Second question. + +437 +00:37:36,710 --> 00:37:43,150 +If we wish to determine whether there is evidence + +438 +00:37:43,150 --> 00:37:46,890 +that the proportion of items of interest is the + +439 +00:37:46,890 --> 00:37:51,510 +same in group 1 as in group 2, the appropriate + +440 +00:37:51,510 --> 00:37:57,700 +test to use is. So here we are testing Pi 1 equals + +441 +00:37:57,700 --> 00:38:01,040 +Pi 2, so there are two populations. + +442 +00:38:02,480 --> 00:38:08,720 +The answer is A. Z statistic, Z test, Chi squared, + +443 +00:38:09,740 --> 00:38:13,840 +both A and B, neither A, neither of A nor B. + +444 +00:38:16,320 --> 00:38:19,540 +Exactly, the answer is C because we can use either + +445 +00:38:19,540 --> 00:38:25,080 +Z statistic or Chi squared. So Z or Chi. can be + +446 +00:38:25,080 --> 00:38:28,920 +used for testing difference between two population + +447 +00:38:28,920 --> 00:38:34,360 +proportions. And again, chi-square can be extended + +448 +00:38:34,360 --> 00:38:40,140 +to use for more than two. So in this case, the + +449 +00:38:40,140 --> 00:38:43,220 +correct answer is C, because we can use either Z + +450 +00:38:43,220 --> 00:38:52,090 +or chi-square test. Next, in testing, hypothesis + +451 +00:38:52,090 --> 00:38:58,350 +using chi-square test. The theoretical frequencies + +452 +00:38:58,350 --> 00:39:03,190 +are based on null hypothesis, alternative, normal + +453 +00:39:03,190 --> 00:39:06,490 +distribution, none of the above. Always when we + +454 +00:39:06,490 --> 00:39:10,450 +are using chi-square test, we assume the null is + +455 +00:39:10,450 --> 00:39:14,630 +true. So the theoretical frequencies are based on + +456 +00:39:14,630 --> 00:39:20,060 +the null hypothesis. So always any statistic can + +457 +00:39:20,060 --> 00:39:25,300 +be computed if we assume x0 is correct. So the + +458 +00:39:25,300 --> 00:39:26,400 +correct answer is A. + +459 +00:39:34,060 --> 00:39:37,040 +Let's look at table 11-2. + +460 +00:39:44,280 --> 00:39:49,000 +Many companies use well-known celebrities as + +461 +00:39:49,000 --> 00:39:54,420 +spokespersons in their TV advertisements. A study + +462 +00:39:54,420 --> 00:39:57,760 +was conducted to determine whether brand awareness + +463 +00:39:57,760 --> 00:40:02,140 +of female TV viewers and the gender of the + +464 +00:40:02,140 --> 00:40:05,860 +spokesperson are independent. So there are two + +465 +00:40:05,860 --> 00:40:09,820 +variables, whether a brand awareness of female TV + +466 +00:40:09,820 --> 00:40:13,740 +and gender of the spokesperson are independent. + +467 +00:40:14,820 --> 00:40:19,540 +Each and a sample of 300 female TV viewers was + +468 +00:40:19,540 --> 00:40:24,000 +asked to identify a product advertised by a + +469 +00:40:24,000 --> 00:40:27,000 +celebrity spokesperson, the gender of the + +470 +00:40:27,000 --> 00:40:32,280 +spokesperson, and whether or not the viewer could + +471 +00:40:32,280 --> 00:40:36,460 +identify the product was recorded. The number in + +472 +00:40:36,460 --> 00:40:40,080 +each category are given below. Now, the questions + +473 +00:40:40,080 --> 00:40:45,520 +are, number one, he asked about the calculated + +474 +00:40:45,520 --> 00:40:49,120 +this statistic is. We have to find Chi-square + +475 +00:40:49,120 --> 00:40:54,020 +statistic. It's two by two tables, easy one. So, + +476 +00:40:54,460 --> 00:40:59,460 +for example, to find the F expected is, + +477 +00:41:00,420 --> 00:41:13,130 +rho total is one over two. And one line here. And + +478 +00:41:13,130 --> 00:41:13,810 +this 150. + +479 +00:41:16,430 --> 00:41:22,510 +And also 150. So the expected frequency for the + +480 +00:41:22,510 --> 00:41:31,010 +first one is 102 times 150 divided by 300. + +481 +00:41:35,680 --> 00:41:39,640 +So the answer is 51. + +482 +00:41:42,880 --> 00:41:51,560 +So the first expected is 51. The other one is just + +483 +00:41:51,560 --> 00:41:54,360 +102 minus 51 is also 51. + +484 +00:41:57,320 --> 00:42:01,020 +Now here is 99. + +485 +00:42:09,080 --> 00:42:15,180 +So the second + +486 +00:42:15,180 --> 00:42:18,800 +one are the expected frequencies. So my chi-square + +487 +00:42:18,800 --> 00:42:22,400 +statistic is + +488 +00:42:22,400 --> 00:42:32,260 +41 minus 51 squared divided by 51 plus 61 minus 51 + +489 +00:42:32,260 --> 00:42:44,160 +squared. 561 plus 109 minus 99 squared 99 plus 89 + +490 +00:42:44,160 --> 00:42:47,040 +minus 99 squared. + +491 +00:42:49,080 --> 00:42:53,140 +That will give 5 point. + +492 +00:42:57,260 --> 00:43:01,760 +So the answer is 5.9418. + +493 +00:43:03,410 --> 00:43:06,210 +So simple calculation will give this result. Now, + +494 +00:43:06,450 --> 00:43:10,370 +next one, referring to the same information we + +495 +00:43:10,370 --> 00:43:15,890 +have at 5% level of significance, the critical + +496 +00:43:15,890 --> 00:43:18,510 +value of that statistic. In this case, we are + +497 +00:43:18,510 --> 00:43:22,690 +talking about 2 by 2 table, and alpha is 5. So + +498 +00:43:22,690 --> 00:43:28,130 +your critical value is 3 point. So chi squared + +499 +00:43:28,130 --> 00:43:31,610 +alpha, 5% and 1 degrees of freedom. + +500 +00:43:35,000 --> 00:43:39,220 +This is the smallest value when alpha is 5%, so 3 + +501 +00:43:39,220 --> 00:43:41,440 +.8415. + +502 +00:43:46,160 --> 00:43:51,760 +Again, degrees of freedom of this statistic are 1, + +503 +00:43:52,500 --> 00:43:53,800 +2 by 2 is 1. + +504 +00:43:56,380 --> 00:44:01,620 +Now at 5% level of significance, the conclusion is + +505 +00:44:01,620 --> 00:44:01,980 +that + +506 +00:44:06,840 --> 00:44:16,380 +In this case, we reject H0. And H0 says the two + +507 +00:44:16,380 --> 00:44:20,800 +variables are independent. X and Y are + +508 +00:44:20,800 --> 00:44:25,860 +independent. We reject that they are independent. + +509 +00:44:27,380 --> 00:44:33,200 +That means they are dependent or related. So, A, + +510 +00:44:33,520 --> 00:44:36,680 +brand awareness of female TV viewers and the + +511 +00:44:36,680 --> 00:44:41,380 +gender of the spokesperson are independent. No, + +512 +00:44:41,580 --> 00:44:45,200 +because we reject the null hypothesis. B, brand + +513 +00:44:45,200 --> 00:44:48,340 +awareness of female TV viewers and the gender of + +514 +00:44:48,340 --> 00:44:53,140 +spokesperson are not independent. Since we reject, + +515 +00:44:53,380 --> 00:44:58,330 +then they are not. Because it's a complement. So, + +516 +00:44:58,430 --> 00:45:02,810 +B is the correct answer. Now, C. A brand awareness + +517 +00:45:02,810 --> 00:45:05,450 +of female TV viewers and the gender of the + +518 +00:45:05,450 --> 00:45:10,550 +spokesperson are related. The same meaning. They + +519 +00:45:10,550 --> 00:45:15,470 +are either, you say, not independent, related or + +520 +00:45:15,470 --> 00:45:15,950 +dependent. + +521 +00:45:19,490 --> 00:45:24,930 +Either is the same, so C is correct. D both B and + +522 +00:45:24,930 --> 00:45:28,970 +C, so D is the correct answer. So again, if we + +523 +00:45:28,970 --> 00:45:31,650 +reject the null hypothesis, it means the two + +524 +00:45:31,650 --> 00:45:36,990 +variables either not independent or related or + +525 +00:45:36,990 --> 00:45:38,290 +dependent. + +526 +00:45:40,550 --> 00:45:46,630 +Any question? I will stop at this point. Next + +527 +00:45:46,630 --> 00:45:47,750 +time, inshallah, we'll start. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/S5DZZsmjnq4.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/S5DZZsmjnq4.srt new file mode 100644 index 0000000000000000000000000000000000000000..72f5582888c3d63a2d430f6a1b04c225c03b19d4 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/S5DZZsmjnq4.srt @@ -0,0 +1,1346 @@ + +1 +00:00:09,320 --> 00:00:15,210 +The second material exam. Question number one. a + +2 +00:00:15,210 --> 00:00:20,170 +corporation randomly selected or selects 150 + +3 +00:00:20,170 --> 00:00:25,430 +salespeople and finds that 66% who have never + +4 +00:00:25,430 --> 00:00:29,930 +taken self-improvement course would like such a + +5 +00:00:29,930 --> 00:00:35,350 +course. So in this case, currently, they select + +6 +00:00:35,350 --> 00:00:52,170 +150 salespeople and find that 66% would + +7 +00:00:52,170 --> 00:00:59,130 +like or who have never taken this course. The firm + +8 +00:00:59,130 --> 00:01:05,110 +did a similar study 10 years ago in which 60% of a + +9 +00:01:05,110 --> 00:01:10,290 +random sample of 160 salespeople wanted a self + +10 +00:01:10,290 --> 00:01:11,830 +-improvement course. + +11 +00:01:15,620 --> 00:01:21,520 +They select a random sample of 160 and tell that + +12 +00:01:21,520 --> 00:01:30,260 +60% would like to take this course. So we have + +13 +00:01:30,260 --> 00:01:34,120 +here two information about previous study and + +14 +00:01:34,120 --> 00:01:37,440 +currently. So currently we have this information. + +15 +00:01:39,660 --> 00:01:44,860 +The sample size was 150, with a proportion 66% for + +16 +00:01:44,860 --> 00:01:47,920 +the people who would like to attend or take this + +17 +00:01:47,920 --> 00:01:51,280 +course. Mid-Paiwan and Pai Tu represent the true + +18 +00:01:51,280 --> 00:01:55,260 +proportion, it means the population proportion, of + +19 +00:01:55,260 --> 00:01:57,800 +workers who would like to attend a self + +20 +00:01:57,800 --> 00:02:01,140 +-improvement course in the recent study and the + +21 +00:02:01,140 --> 00:02:05,400 +past studies in Taiwan. So recent, Paiwan. + +22 +00:02:07,740 --> 00:02:12,100 +And Pi 2 is the previous study. This weather, this + +23 +00:02:12,100 --> 00:02:17,100 +proportion has changed from the previous study by + +24 +00:02:17,100 --> 00:02:21,580 +using two approaches. Critical value approach and + +25 +00:02:21,580 --> 00:02:26,920 +B value approach. So here we are talking about Pi + +26 +00:02:26,920 --> 00:02:32,650 +1 equals Pi 2. Since the problem says that The + +27 +00:02:32,650 --> 00:02:35,870 +proportion has changed. You don't know the exact + +28 +00:02:35,870 --> 00:02:39,610 +direction, either greater than or smaller than. So + +29 +00:02:39,610 --> 00:02:45,810 +this one should be Y1 does not equal Y2. So step + +30 +00:02:45,810 --> 00:02:49,050 +one, you have to state the appropriate null and + +31 +00:02:49,050 --> 00:02:50,930 +alternative hypothesis. + +32 +00:02:53,330 --> 00:02:58,150 +Second step, compute the value of the test + +33 +00:02:58,150 --> 00:03:01,510 +statistic. In this case, your Z statistic should + +34 +00:03:01,510 --> 00:03:09,910 +be P1 minus P2 minus Pi 1 minus Pi 2, under the + +35 +00:03:09,910 --> 00:03:17,310 +square root of P dash 1 minus P dash times 1 over + +36 +00:03:17,310 --> 00:03:24,550 +N1 plus 1 over N1. Now, P1 and P2 are given under + +37 +00:03:24,550 --> 00:03:29,730 +the null hypothesis Pi 1 minus Pi 2 is 0. So here + +38 +00:03:29,730 --> 00:03:32,770 +we have to compute P dash, which is the overall + +39 +00:03:35,350 --> 00:03:40,710 +B dash equals x1 plus x2 divided by n1 plus n2. + +40 +00:03:42,170 --> 00:03:45,150 +Now these x's, I mean the number of successes are + +41 +00:03:45,150 --> 00:03:49,450 +not given directly in this problem, but we can + +42 +00:03:49,450 --> 00:03:54,050 +figure out the values of x1 and x2 by using this + +43 +00:03:54,050 --> 00:03:58,610 +information, which is n1 equals 150 and b1 equals + +44 +00:03:58,610 --> 00:04:03,210 +66%. Because we know that b1 equals x1 over n1. + +45 +00:04:06,860 --> 00:04:14,360 +So, by using this equation, X1 equals N1 times V1. + +46 +00:04:16,920 --> 00:04:28,100 +N1 150 times 66 percent, that will give 150 times + +47 +00:04:28,100 --> 00:04:35,940 +66, so that's 99. So 150 times, it's 99. + +48 +00:04:41,690 --> 00:04:49,670 +Similarly, X2 equals N2 times V2. N2 is given by + +49 +00:04:49,670 --> 00:04:53,770 +160, so 160 times 60 percent, + +50 +00:04:55,750 --> 00:05:02,550 +96. So the number of successes are 96 for the + +51 +00:05:02,550 --> 00:05:06,070 +second, for the previous. Nine nine. + +52 +00:05:11,270 --> 00:05:16,410 +So B dash equals x1 99 + +53 +00:05:16,410 --> 00:05:28,330 +plus 96 divided by n1 plus n2, 350. And that will + +54 +00:05:28,330 --> 00:05:34,850 +give the overall proportions divided by 310, 0 + +55 +00:05:34,850 --> 00:05:35,510 +.629. + +56 +00:05:40,870 --> 00:05:44,570 +So, this is the value of the overall proportion. + +57 +00:05:45,390 --> 00:05:50,650 +Now, B dash equals 1.629. So, 1 times 1 minus B + +58 +00:05:50,650 --> 00:05:54,970 +dash is 1 minus this value times 1 over N1, 1 over + +59 +00:05:54,970 --> 00:06:00,550 +150 plus 1 over 160. Simple calculation will give + +60 +00:06:01,460 --> 00:06:07,280 +The value of z, which is in this case 1.093. + +61 +00:06:07,780 --> 00:06:10,620 +So just plug this information into this equation, + +62 +00:06:11,340 --> 00:06:19,320 +you will get z value, which is 1.093. He asked to + +63 +00:06:19,320 --> 00:06:21,780 +do this problem by using two approaches, critical + +64 +00:06:21,780 --> 00:06:25,180 +value and b value. Let's start with the first one, + +65 +00:06:26,780 --> 00:06:27,780 +b value approach. + +66 +00:06:32,710 --> 00:06:36,330 +Now your B value or critical value, start with + +67 +00:06:36,330 --> 00:06:37,050 +critical value. + +68 +00:06:40,850 --> 00:06:46,490 +Now since we are taking about a two-sided test, so + +69 +00:06:46,490 --> 00:06:50,170 +there are two critical values which are plus or + +70 +00:06:50,170 --> 00:06:54,670 +minus Z alpha over. Alpha is given by five + +71 +00:06:54,670 --> 00:06:56,990 +percent, so in this case + +72 +00:06:59,630 --> 00:07:03,370 +is equal to plus or minus 1.96. + +73 +00:07:05,930 --> 00:07:10,010 +Now, does this value, I mean does the value of + +74 +00:07:10,010 --> 00:07:14,910 +this statistic which is 1.093 fall in the critical + +75 +00:07:14,910 --> 00:07:22,730 +region? Now, my critical regions are above 196 or + +76 +00:07:22,730 --> 00:07:28,130 +below negative 1.96. Now this value actually falls + +77 +00:07:29,300 --> 00:07:32,420 +In the non-rejection region, so we don't reject + +78 +00:07:32,420 --> 00:07:36,160 +the null hypothesis. So my decision, don't reject + +79 +00:07:36,160 --> 00:07:39,980 +the null hypothesis. That means there is not + +80 +00:07:39,980 --> 00:07:43,420 +sufficient evidence to support the alternative + +81 +00:07:43,420 --> 00:07:46,960 +which states that the proportion has changed from + +82 +00:07:46,960 --> 00:07:51,290 +the previous study. So we don't reject the null + +83 +00:07:51,290 --> 00:07:54,010 +hypothesis. It means there is not sufficient + +84 +00:07:54,010 --> 00:07:58,050 +evidence to support the alternative hypothesis. + +85 +00:07:58,270 --> 00:08:02,010 +That means you cannot say that the proportion has + +86 +00:08:02,010 --> 00:08:05,530 +changed from the previous study. That by using + +87 +00:08:05,530 --> 00:08:09,650 +critical value approach. Now what's about p-value? + +88 +00:08:11,830 --> 00:08:16,170 +In order to determine the p-value, + +89 +00:08:19,460 --> 00:08:23,320 +We have to find the probability that the Z + +90 +00:08:23,320 --> 00:08:28,060 +statistic fall in the rejection regions. So that + +91 +00:08:28,060 --> 00:08:36,260 +means Z greater than my values 1093 or + +92 +00:08:36,260 --> 00:08:41,060 +Z smaller than negative 1.093. + +93 +00:08:45,450 --> 00:08:49,730 +1093 is the same as the left of negative, so they + +94 +00:08:49,730 --> 00:08:52,810 +are the same because of symmetry. So just take 1 + +95 +00:08:52,810 --> 00:08:54,050 +and multiply by 2. + +96 +00:08:58,430 --> 00:09:03,070 +Now simple calculation will give the value of 0 + +97 +00:09:03,070 --> 00:09:09,950 +.276 in chapter 6. So go back to chapter 6 to + +98 +00:09:09,950 --> 00:09:13,290 +figure out how can we calculate the probability of + +99 +00:09:13,290 --> 00:09:19,830 +Z greater than 1.0938. Now my B value is 0.276, + +100 +00:09:20,030 --> 00:09:25,190 +always we reject the null hypothesis if my B value + +101 +00:09:25,190 --> 00:09:29,050 +is smaller than alpha. Now this value is much much + +102 +00:09:29,050 --> 00:09:31,210 +bigger than alpha, so we don't reject the null + +103 +00:09:31,210 --> 00:09:36,710 +hypothesis. So since my B value is much greater + +104 +00:09:36,710 --> 00:09:42,650 +than alpha, that means we don't reject the null + +105 +00:09:42,650 --> 00:09:46,810 +hypothesis, so we reach the same conclusion, that + +106 +00:09:46,810 --> 00:09:49,270 +there is not sufficient evidence to support the + +107 +00:09:49,270 --> 00:09:55,270 +alternative. Also, we can perform the test by + +108 +00:09:55,270 --> 00:09:59,810 +using confidence interval approach, because here + +109 +00:09:59,810 --> 00:10:02,850 +we are talking about two-tailed test. Your + +110 +00:10:02,850 --> 00:10:06,670 +confidence interval is given by + +111 +00:10:10,620 --> 00:10:17,280 +B1 minus B2 plus + +112 +00:10:17,280 --> 00:10:23,720 +or minus Z alpha over 2 times B + +113 +00:10:23,720 --> 00:10:30,120 +dash 1 minus B dash multiplied by 1 over N1 plus 1 + +114 +00:10:30,120 --> 00:10:37,520 +over N2. By the way, this one + +115 +00:10:37,520 --> 00:10:43,320 +called the margin of error. So z times square root + +116 +00:10:43,320 --> 00:10:45,940 +of this sequence is called the margin of error, + +117 +00:10:46,940 --> 00:10:52,280 +and the square root itself is called the standard + +118 +00:10:52,280 --> 00:10:59,560 +error of the point estimate of pi 1 minus pi 2, + +119 +00:10:59,720 --> 00:11:04,430 +which is P1 minus P2. So square root of b dash 1 + +120 +00:11:04,430 --> 00:11:07,650 +minus b dash multiplied by 1 over n1 plus 1 over + +121 +00:11:07,650 --> 00:11:12,270 +n2 is called the standard error of the estimate of + +122 +00:11:12,270 --> 00:11:15,910 +pi 1 minus pi 2. So this is standard estimate of + +123 +00:11:15,910 --> 00:11:21,750 +b1 minus b2. Simply, you will get the confidence + +124 +00:11:21,750 --> 00:11:26,470 +interval to be between pi 1 minus the difference + +125 +00:11:26,470 --> 00:11:32,620 +between the two proportions, 4 between negative. 0 + +126 +00:11:32,620 --> 00:11:37,160 +.5 and + +127 +00:11:37,160 --> 00:11:38,940 +0.7. + +128 +00:11:44,060 --> 00:11:48,400 +Now this interval actually contains + +129 +00:11:50,230 --> 00:11:54,250 +The value of 0, that means we don't reject the + +130 +00:11:54,250 --> 00:11:57,570 +null hypothesis. So since this interval starts + +131 +00:11:57,570 --> 00:12:01,870 +from negative, lower bound is negative 0.5, upper + +132 +00:12:01,870 --> 00:12:06,190 +bound is 0.17, that means 0 inside this interval, + +133 +00:12:06,750 --> 00:12:09,130 +I mean the confidence captures the value of 0, + +134 +00:12:09,610 --> 00:12:13,810 +that means we don't reject the null hypothesis. So + +135 +00:12:13,810 --> 00:12:17,110 +by using three different approaches, we end with + +136 +00:12:17,110 --> 00:12:20,930 +the same decision and conclusion. That is, we + +137 +00:12:20,930 --> 00:12:25,370 +don't reject null hypotheses. That's all for + +138 +00:12:25,370 --> 00:12:26,110 +number one. + +139 +00:12:31,450 --> 00:12:32,910 +Question number two. + +140 +00:12:36,170 --> 00:12:40,450 +The excellent drug company claims its aspirin + +141 +00:12:40,450 --> 00:12:43,610 +tablets will relieve headaches faster than any + +142 +00:12:43,610 --> 00:12:47,470 +other aspirin on the market. So they believe that + +143 +00:12:48,440 --> 00:12:52,220 +Their drug is better than the other drug in the + +144 +00:12:52,220 --> 00:12:57,180 +market. To determine whether Excellence claim is + +145 +00:12:57,180 --> 00:13:04,260 +valid, random samples of size 15 are chosen from + +146 +00:13:04,260 --> 00:13:07,080 +aspirins made by Excellence and the sample drug + +147 +00:13:07,080 --> 00:13:12,300 +combined. So sample sizes of 15 are chosen from + +148 +00:13:12,300 --> 00:13:16,260 +each. So that means N1 equals 15 and N2 also + +149 +00:13:16,260 --> 00:13:21,160 +equals 15. And aspirin is given to each of the 30 + +150 +00:13:21,160 --> 00:13:23,520 +randomly selected persons suffering from + +151 +00:13:23,520 --> 00:13:27,220 +headaches. So the total sample size is 30, because + +152 +00:13:27,220 --> 00:13:30,780 +15 from the first company, and the second for the + +153 +00:13:30,780 --> 00:13:36,860 +simple company. So they are 30 selected persons + +154 +00:13:36,860 --> 00:13:40,280 +who are suffering from headaches. So we have + +155 +00:13:40,280 --> 00:13:43,380 +information about number of minutes required for + +156 +00:13:43,380 --> 00:13:47,720 +each to recover from the headache. is recorded, + +157 +00:13:48,200 --> 00:13:51,500 +the sample results are. So here we have two + +158 +00:13:51,500 --> 00:13:56,260 +groups, two populations. Company is called + +159 +00:13:56,260 --> 00:13:58,420 +excellent company and other one simple company. + +160 +00:13:59,120 --> 00:14:04,320 +The information we have, the sample means are 8.4 + +161 +00:14:04,320 --> 00:14:08,260 +for the excellent and 8.9 for the simple company. + +162 +00:14:09,040 --> 00:14:13,280 +With the standard deviations for the sample are 2 + +163 +00:14:13,280 --> 00:14:18,340 +.05 and 2.14 respectively for excellent and simple + +164 +00:14:18,340 --> 00:14:21,480 +and as we mentioned the sample sizes are the same + +165 +00:14:21,480 --> 00:14:26,380 +are equal 15 and 15. Now we are going to test at + +166 +00:14:26,380 --> 00:14:32,540 +five percent level of significance test whether to + +167 +00:14:32,540 --> 00:14:35,560 +determine whether excellence aspirin cure + +168 +00:14:35,560 --> 00:14:39,140 +headaches significantly faster than simple + +169 +00:14:39,140 --> 00:14:46,420 +aspirin. Now faster it means Better. Better it + +170 +00:14:46,420 --> 00:14:49,480 +means the time required to relieve headache is + +171 +00:14:49,480 --> 00:14:53,920 +smaller there. So you have to be careful in this + +172 +00:14:53,920 --> 00:15:00,800 +case. If we assume that Mu1 is the mean time + +173 +00:15:00,800 --> 00:15:05,120 +required for excellent aspirin. So Mu1 for + +174 +00:15:05,120 --> 00:15:05,500 +excellent. + +175 +00:15:17,260 --> 00:15:21,540 +So Me1, mean time required for excellence aspirin, + +176 +00:15:22,780 --> 00:15:28,860 +and Me2, mean time required for simple aspirin. So + +177 +00:15:28,860 --> 00:15:32,760 +each one, Me1, is smaller than Me3. + +178 +00:15:41,140 --> 00:15:45,960 +Since Me1 represents the time required to relieve + +179 +00:15:45,960 --> 00:15:51,500 +headache by using excellent aspirin and this one + +180 +00:15:51,500 --> 00:15:55,460 +is faster faster it means it takes less time in + +181 +00:15:55,460 --> 00:15:59,620 +order to recover from headache so mu1 should be + +182 +00:15:59,620 --> 00:16:06,400 +smaller than mu2 we are going to use T T is x1 bar + +183 +00:16:06,400 --> 00:16:11,380 +minus x2 bar minus the difference between the two + +184 +00:16:11,380 --> 00:16:14,720 +population proportions divided by + +185 +00:16:17,550 --> 00:16:22,070 +S squared B times 1 over N1 plus 1 over N2. + +186 +00:16:25,130 --> 00:16:30,470 +S squared B N1 + +187 +00:16:30,470 --> 00:16:35,330 +minus 1 S1 squared plus N2 minus 1 S2 squared + +188 +00:16:35,330 --> 00:16:41,990 +divided by N1 plus N2 minus 1. Now, a simple + +189 +00:16:41,990 --> 00:16:44,030 +calculation will give the following results. + +190 +00:16:59,660 --> 00:17:03,080 +So again, we have this data. Just plug this + +191 +00:17:03,080 --> 00:17:06,620 +information here to get the value + +223 +00:20:44,130 --> 00:20:48,930 +Or you maybe use the B-value approach. + +224 +00:20:53,070 --> 00:20:56,850 +Now, since the alternative is µ1 smaller than µ2, + +225 +00:20:57,640 --> 00:21:03,260 +So B value is probability of T smaller than + +226 +00:21:03,260 --> 00:21:08,820 +negative 0 + +227 +00:21:08,820 --> 00:21:12,400 +.653. + +228 +00:21:14,300 --> 00:21:18,420 +So we are looking for this probability B of Z + +229 +00:21:18,420 --> 00:21:21,340 +smaller than negative 0.653. + +230 +00:21:23,210 --> 00:21:27,050 +The table you have gives the area in the upper + +231 +00:21:27,050 --> 00:21:33,190 +tail. So this is the same as beauty greater than. + +232 +00:21:37,790 --> 00:21:44,350 +Because the area to the right of 0.653 is the same + +233 +00:21:44,350 --> 00:21:48,070 +as the area to the left of negative 0.75. Because + +234 +00:21:48,070 --> 00:21:52,970 +of symmetry. Just look at the tea table. Now, + +235 +00:21:53,070 --> 00:22:00,810 +smaller than negative, means this area is actually + +236 +00:22:00,810 --> 00:22:02,690 +the same as the area to the right of the same + +237 +00:22:02,690 --> 00:22:07,330 +value, but on the other side. So these two areas + +238 +00:22:07,330 --> 00:22:11,890 +are the same. So it's the same as D of T greater + +239 +00:22:11,890 --> 00:22:17,710 +than 0.653. If you look at the table for 28 + +240 +00:22:17,710 --> 00:22:19,150 +degrees of freedom, + +241 +00:22:22,300 --> 00:22:23,520 +That's your 28. + +242 +00:22:27,580 --> 00:22:32,720 +I am looking for the value of 0.653. The first + +243 +00:22:32,720 --> 00:22:38,420 +value here is 0.683. The other one is 0.8. It + +244 +00:22:38,420 --> 00:22:43,600 +means my value is below this one. If you go back + +245 +00:22:43,600 --> 00:22:46,600 +here, + +246 +00:22:46,700 --> 00:22:52,610 +so it should be to the left of this value. Now + +247 +00:22:52,610 --> 00:22:57,170 +here 25, then 20, 20, 15 and so on. So it should + +248 +00:22:57,170 --> 00:23:01,930 +be greater than 25. So your B value actually is + +249 +00:23:01,930 --> 00:23:08,570 +greater than 25%. As we mentioned before, T table + +250 +00:23:08,570 --> 00:23:12,010 +does not give the exact B value. So approximately + +251 +00:23:12,010 --> 00:23:17,290 +my B value is greater than 25%. This value + +252 +00:23:17,290 --> 00:23:22,400 +actually is much bigger than 5%. So again, we + +253 +00:23:22,400 --> 00:23:27,480 +reject, we don't reject the null hypothesis. So + +254 +00:23:27,480 --> 00:23:30,600 +again, to compute the B value, it's probability of + +255 +00:23:30,600 --> 00:23:37,320 +T smaller than the value of the statistic, which + +256 +00:23:37,320 --> 00:23:42,040 +is negative 0.653. The table you have gives the + +257 +00:23:42,040 --> 00:23:43,040 +area to the right. + +258 +00:23:46,980 --> 00:23:50,700 +So this probability is the same as B of T greater + +259 +00:23:50,700 --> 00:23:55,920 +than 0.653. So by using this table, you will get + +260 +00:23:55,920 --> 00:24:00,100 +approximate value of B, which is greater than 25%. + +261 +00:24:00,100 --> 00:24:02,960 +Always, as we mentioned, we reject the null + +262 +00:24:02,960 --> 00:24:06,660 +hypothesis if my B value is smaller than alpha. In + +263 +00:24:06,660 --> 00:24:08,920 +this case, this value is greater than alpha, so we + +264 +00:24:08,920 --> 00:24:11,480 +don't reject the null. So we reach the same + +265 +00:24:11,480 --> 00:24:15,640 +decision as by using the critical value approach. + +266 +00:24:17,040 --> 00:24:23,360 +Any question? So that's for number two. Question + +267 +00:24:23,360 --> 00:24:24,040 +number three. + +268 +00:24:32,120 --> 00:24:35,820 +To test the effectiveness of a business school + +269 +00:24:35,820 --> 00:24:41,640 +preparation course, eight students took a general + +270 +00:24:41,640 --> 00:24:47,210 +business test before and after the course. Let X1 + +271 +00:24:47,210 --> 00:24:50,330 +denote before, + +272 +00:24:53,010 --> 00:24:55,450 +and X2 after. + +273 +00:24:59,630 --> 00:25:04,630 +And the difference is X2 minus X1. + +274 +00:25:14,780 --> 00:25:19,540 +The mean of the difference equals 50. And the + +275 +00:25:19,540 --> 00:25:25,540 +standard deviation of the difference is 65.03. So + +276 +00:25:25,540 --> 00:25:28,900 +sample statistics are sample mean for the + +277 +00:25:28,900 --> 00:25:32,040 +difference and sample standard deviation of the + +278 +00:25:32,040 --> 00:25:36,860 +difference. So these two values are given. Test to + +279 +00:25:36,860 --> 00:25:40,200 +determine the effectiveness of a business school + +280 +00:25:40,200 --> 00:25:45,960 +preparation course. So what's your goal? An + +281 +00:25:45,960 --> 00:25:48,120 +alternative, null equals zero. An alternative + +282 +00:25:48,120 --> 00:25:52,340 +should + +283 +00:25:52,340 --> 00:25:58,360 +be greater than zero. Because D is X2 minus X1. So + +284 +00:25:58,360 --> 00:26:02,840 +effective, it means after is better than before. + +285 +00:26:03,680 --> 00:26:08,420 +So my score after taking the course is better than + +286 +00:26:08,420 --> 00:26:12,080 +before taking the course. So X in UD is positive. + +287 +00:26:19,090 --> 00:26:27,510 +T is D bar minus 0 divided by SD over square root + +288 +00:26:27,510 --> 00:26:41,090 +of A. D bar is 50 divided by 65 divided + +289 +00:26:41,090 --> 00:26:54,490 +by Square root of 8. So 50 divided by square + +290 +00:26:54,490 --> 00:26:57,910 +root of 8, 2.17. + +291 +00:27:04,070 --> 00:27:09,570 +Now Yumi used the critical value approach. So my + +292 +00:27:09,570 --> 00:27:10,930 +critical value is T alpha. + +293 +00:27:13,680 --> 00:27:20,140 +And degrees of freedom is 7. It's upper 10. So + +294 +00:27:20,140 --> 00:27:27,300 +it's plus. So it's T alpha 0, 5. And DF is 7, + +295 +00:27:27,320 --> 00:27:33,820 +because N equals 8. Now by using the table, at 7 + +296 +00:27:33,820 --> 00:27:34,680 +degrees of freedom, + +297 +00:27:38,220 --> 00:27:39,340 +so at 7, + +298 +00:27:53,560 --> 00:28:03,380 +So my T value is greater than the + +299 +00:28:03,380 --> 00:28:07,020 +critical region, so we reject the null hypothesis. + +300 +00:28:10,740 --> 00:28:17,700 +The rejection region starts from 1.9895 and this + +301 +00:28:17,700 --> 00:28:24,800 +value actually greater than 1.8. So since it falls + +302 +00:28:24,800 --> 00:28:30,320 +in the rejection region, then we reject the null + +303 +00:28:30,320 --> 00:28:35,060 +hypothesis. It means that taking the course, + +304 +00:28:36,370 --> 00:28:39,690 +improves your score. So we have sufficient + +305 +00:28:39,690 --> 00:28:43,010 +evidence to support the alternative hypothesis. + +306 +00:28:44,330 --> 00:28:50,650 +That's for number three. The other part, the other + +307 +00:28:50,650 --> 00:28:51,130 +part. + +308 +00:28:54,290 --> 00:28:58,550 +A statistician selected a sample of 16 receivable + +309 +00:28:58,550 --> 00:29:03,530 +accounts. He reported that the sample information + +310 +00:29:04,690 --> 00:29:07,790 +indicated the mean of the population ranges from + +311 +00:29:07,790 --> 00:29:12,730 +these two values. So we have lower and upper + +312 +00:29:12,730 --> 00:29:21,910 +limits, which are given by 4739. + +313 +00:29:36,500 --> 00:29:42,400 +So the mean of the population ranges between these + +314 +00:29:42,400 --> 00:29:47,880 +two values. And in addition to that, we have + +315 +00:29:47,880 --> 00:29:55,920 +information about the sample standard deviation is + +316 +00:29:55,920 --> 00:29:56,340 +400. + +317 +00:29:59,500 --> 00:30:03,260 +The statistician neglected to report what + +318 +00:30:03,260 --> 00:30:07,440 +confidence level he had used. So we don't know C + +319 +00:30:07,440 --> 00:30:14,180 +level. So C level is unknown, which actually is 1 + +320 +00:30:14,180 --> 00:30:14,760 +minus alpha. + +321 +00:30:20,980 --> 00:30:25,360 +Based on the above information, what's the + +322 +00:30:25,360 --> 00:30:28,380 +confidence level? So we are looking for C level. + +323 +00:30:29,380 --> 00:30:34,160 +Now just keep in mind the confidence interval is + +324 +00:30:34,160 --> 00:30:38,200 +given and we are looking for C level. + +325 +00:30:42,920 --> 00:30:46,600 +So this area actually is alpha over 2 and other + +326 +00:30:46,600 --> 00:30:49,940 +one is alpha over 2, so the area between is 1 + +327 +00:30:49,940 --> 00:30:50,440 +minus alpha. + +328 +00:30:53,340 --> 00:30:58,620 +Now since the sample size equal + +329 +00:31:01,950 --> 00:31:10,010 +16, N equals 16, so N equals 16, so your + +330 +00:31:10,010 --> 00:31:12,490 +confidence interval should be X bar plus or minus + +331 +00:31:12,490 --> 00:31:14,610 +T, S over root N. + +332 +00:31:19,350 --> 00:31:26,390 +Now, C level can be determined by T, and we know + +333 +00:31:26,390 --> 00:31:28,130 +that this quantity, + +334 +00:31:30,730 --> 00:31:36,970 +represents the margin of error. So, E equals TS + +335 +00:31:36,970 --> 00:31:42,950 +over root N. Now, since the confidence interval is + +336 +00:31:42,950 --> 00:31:50,270 +given, we know from previous chapters that the + +337 +00:31:50,270 --> 00:31:53,970 +margin equals the difference between upper and + +338 +00:31:53,970 --> 00:31:59,560 +lower divided by two. So, half distance of lower + +339 +00:31:59,560 --> 00:32:06,320 +and upper gives the margin. So that will give 260 + +340 +00:32:06,320 --> 00:32:17,620 +.2. So that's E. So now E is known to be 260.2 + +341 +00:32:17,620 --> 00:32:24,320 +equals to S is given by 400 and N is 16. + +342 +00:32:26,800 --> 00:32:29,420 +Now, simple calculation will give the value of T, + +343 +00:32:30,060 --> 00:32:31,340 +which is the critical value. + +344 +00:32:35,280 --> 00:32:38,160 +So, my T equals 2.60. + +345 +00:32:41,960 --> 00:32:47,220 +Actually, this is T alpha over 2. Now, the value + +346 +00:32:47,220 --> 00:32:52,400 +of the critical value is known to be 2.602. What's + +347 +00:32:52,400 --> 00:32:56,520 +the corresponding alpha over 2? Now look at the + +348 +00:32:56,520 --> 00:32:59,660 +table, at 15 degrees of freedom, + +349 +00:33:02,720 --> 00:33:10,680 +look at 15, at this value 2.602, at this value. + +350 +00:33:12,640 --> 00:33:19,880 +So, 15 degrees of freedom, 2.602, so the + +351 +00:33:19,880 --> 00:33:21,940 +corresponding alpha over 2, not alpha. + +352 +00:33:24,610 --> 00:33:31,830 +it's 1% so my alpha over 2 is + +353 +00:33:31,830 --> 00:33:43,110 +1% so alpha is 2% so the confidence level is 1 + +354 +00:33:43,110 --> 00:33:50,510 +minus alpha so 1 minus alpha is 90% so c level is + +355 +00:33:50,510 --> 00:33:59,410 +98% so that's level or the confidence level. So + +356 +00:33:59,410 --> 00:34:03,990 +again, maybe this is a tricky question. + +357 +00:34:07,330 --> 00:34:10,530 +But at least you know that if the confidence + +358 +00:34:10,530 --> 00:34:15,270 +interval is given, you can determine the margin of + +359 +00:34:15,270 --> 00:34:18,930 +error by the difference between lower and upper + +360 +00:34:18,930 --> 00:34:23,310 +divided by two. Then we know this term represents + +361 +00:34:23,310 --> 00:34:27,150 +this margin. So by using this equation, we can + +362 +00:34:27,150 --> 00:34:29,770 +compute the value of T, I mean the critical value. + +363 +00:34:30,670 --> 00:34:35,290 +So since the critical value is given or is + +364 +00:34:35,290 --> 00:34:38,590 +computed, we can determine the corresponding alpha + +365 +00:34:38,590 --> 00:34:45,390 +over 2. So alpha over 2 is 1%. So your alpha is + +366 +00:34:45,390 --> 00:34:51,710 +2%. So my C level is 98%. That's + +367 +00:34:51,710 --> 00:34:56,180 +all. Any questions? We're done, Muhammad. + + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/S5DZZsmjnq4_postprocess.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/S5DZZsmjnq4_postprocess.srt new file mode 100644 index 0000000000000000000000000000000000000000..409ec1c09e50907c3fb8bc3aed26ac9a7c74f630 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/S5DZZsmjnq4_postprocess.srt @@ -0,0 +1,1468 @@ +1 +00:00:09,320 --> 00:00:15,210 +The second material exam. Question number one. a + +2 +00:00:15,210 --> 00:00:20,170 +corporation randomly selected or selects 150 + +3 +00:00:20,170 --> 00:00:25,430 +salespeople and finds that 66% who have never + +4 +00:00:25,430 --> 00:00:29,930 +taken self-improvement course would like such a + +5 +00:00:29,930 --> 00:00:35,350 +course. So in this case, currently, they select + +6 +00:00:35,350 --> 00:00:52,170 +150 salespeople and find that 66% would + +7 +00:00:52,170 --> 00:00:59,130 +like or who have never taken this course. The firm + +8 +00:00:59,130 --> 00:01:05,110 +did a similar study 10 years ago in which 60% of a + +9 +00:01:05,110 --> 00:01:10,290 +random sample of 160 salespeople wanted a self + +10 +00:01:10,290 --> 00:01:11,830 +-improvement course. + +11 +00:01:15,620 --> 00:01:21,520 +They select a random sample of 160 and tell that + +12 +00:01:21,520 --> 00:01:30,260 +60% would like to take this course. So we have + +13 +00:01:30,260 --> 00:01:34,120 +here two information about previous study and + +14 +00:01:34,120 --> 00:01:37,440 +currently. So currently we have this information. + +15 +00:01:39,660 --> 00:01:44,860 +The sample size was 150, with a proportion 66% for + +16 +00:01:44,860 --> 00:01:47,920 +the people who would like to attend or take this + +17 +00:01:47,920 --> 00:01:51,280 +course. Mid-Paiwan and Pai Tu represent the true + +18 +00:01:51,280 --> 00:01:55,260 +proportion, it means the population proportion, of + +19 +00:01:55,260 --> 00:01:57,800 +workers who would like to attend a self + +20 +00:01:57,800 --> 00:02:01,140 +-improvement course in the recent study and the + +21 +00:02:01,140 --> 00:02:05,400 +past studies in Taiwan. So recent, Paiwan. + +22 +00:02:07,740 --> 00:02:12,100 +And Pi 2 is the previous study. This weather, this + +23 +00:02:12,100 --> 00:02:17,100 +proportion has changed from the previous study by + +24 +00:02:17,100 --> 00:02:21,580 +using two approaches. Critical value approach and + +25 +00:02:21,580 --> 00:02:26,920 +B value approach. So here we are talking about Pi + +26 +00:02:26,920 --> 00:02:32,650 +1 equals Pi 2. Since the problem says that The + +27 +00:02:32,650 --> 00:02:35,870 +proportion has changed. You don't know the exact + +28 +00:02:35,870 --> 00:02:39,610 +direction, either greater than or smaller than. So + +29 +00:02:39,610 --> 00:02:45,810 +this one should be Y1 does not equal Y2. So step + +30 +00:02:45,810 --> 00:02:49,050 +one, you have to state the appropriate null and + +31 +00:02:49,050 --> 00:02:50,930 +alternative hypothesis. + +32 +00:02:53,330 --> 00:02:58,150 +Second step, compute the value of the test + +33 +00:02:58,150 --> 00:03:01,510 +statistic. In this case, your Z statistic should + +34 +00:03:01,510 --> 00:03:09,910 +be P1 minus P2 minus Pi 1 minus Pi 2, under the + +35 +00:03:09,910 --> 00:03:17,310 +square root of P dash 1 minus P dash times 1 over + +36 +00:03:17,310 --> 00:03:24,550 +N1 plus 1 over N1. Now, P1 and P2 are given under + +37 +00:03:24,550 --> 00:03:29,730 +the null hypothesis Pi 1 minus Pi 2 is 0. So here + +38 +00:03:29,730 --> 00:03:32,770 +we have to compute P dash, which is the overall + +39 +00:03:35,350 --> 00:03:40,710 +B dash equals x1 plus x2 divided by n1 plus n2. + +40 +00:03:42,170 --> 00:03:45,150 +Now these x's, I mean the number of successes are + +41 +00:03:45,150 --> 00:03:49,450 +not given directly in this problem, but we can + +42 +00:03:49,450 --> 00:03:54,050 +figure out the values of x1 and x2 by using this + +43 +00:03:54,050 --> 00:03:58,610 +information, which is n1 equals 150 and b1 equals + +44 +00:03:58,610 --> 00:04:03,210 +66%. Because we know that b1 equals x1 over n1. + +45 +00:04:06,860 --> 00:04:14,360 +So, by using this equation, X1 equals N1 times V1. + +46 +00:04:16,920 --> 00:04:28,100 +N1 150 times 66 percent, that will give 150 times + +47 +00:04:28,100 --> 00:04:35,940 +66, so that's 99. So 150 times, it's 99. + +48 +00:04:41,690 --> 00:04:49,670 +Similarly, X2 equals N2 times V2. N2 is given by + +49 +00:04:49,670 --> 00:04:53,770 +160, so 160 times 60 percent, + +50 +00:04:55,750 --> 00:05:02,550 +96. So the number of successes are 96 for the + +51 +00:05:02,550 --> 00:05:06,070 +second, for the previous. Nine nine. + +52 +00:05:11,270 --> 00:05:16,410 +So B dash equals x199 + +53 +00:05:16,410 --> 00:05:28,330 +plus 96 divided by n1 plus n2, 350. And that will + +54 +00:05:28,330 --> 00:05:34,850 +give the overall proportions divided by 310, 0 + +55 +00:05:34,850 --> 00:05:35,510 +.629. + +56 +00:05:40,870 --> 00:05:44,570 +So, this is the value of the overall proportion. + +57 +00:05:45,390 --> 00:05:50,650 +Now, B dash equals 1.629. So, 1 times 1 minus B + +58 +00:05:50,650 --> 00:05:54,970 +dash is 1 minus this value times 1 over N1, 1 over + +59 +00:05:54,970 --> 00:06:00,550 +150 plus 1 over 160. Simple calculation will give + +60 +00:06:01,460 --> 00:06:07,280 +The value of z, which is in this case 1.093. + +61 +00:06:07,780 --> 00:06:10,620 +So just plug this information into this equation, + +62 +00:06:11,340 --> 00:06:19,320 +you will get z value, which is 1.093. He asked to + +63 +00:06:19,320 --> 00:06:21,780 +do this problem by using two approaches, critical + +64 +00:06:21,780 --> 00:06:25,180 +value and b value. Let's start with the first one, + +65 +00:06:26,780 --> 00:06:27,780 +b value approach. + +66 +00:06:32,710 --> 00:06:36,330 +Now your B value or critical value, start with + +67 +00:06:36,330 --> 00:06:37,050 +critical value. + +68 +00:06:40,850 --> 00:06:46,490 +Now since we are taking about a two-sided test, so + +69 +00:06:46,490 --> 00:06:50,170 +there are two critical values which are plus or + +70 +00:06:50,170 --> 00:06:54,670 +minus Z alpha over. Alpha is given by five + +71 +00:06:54,670 --> 00:06:56,990 +percent, so in this case + +72 +00:06:59,630 --> 00:07:03,370 +is equal to plus or minus 1.96. + +73 +00:07:05,930 --> 00:07:10,010 +Now, does this value, I mean does the value of + +74 +00:07:10,010 --> 00:07:14,910 +this statistic which is 1.093 fall in the critical + +75 +00:07:14,910 --> 00:07:22,730 +region? Now, my critical regions are above 196 or + +76 +00:07:22,730 --> 00:07:28,130 +below negative 1.96. Now this value actually falls + +77 +00:07:29,300 --> 00:07:32,420 +In the non-rejection region, so we don't reject + +78 +00:07:32,420 --> 00:07:36,160 +the null hypothesis. So my decision, don't reject + +79 +00:07:36,160 --> 00:07:39,980 +the null hypothesis. That means there is not + +80 +00:07:39,980 --> 00:07:43,420 +sufficient evidence to support the alternative + +81 +00:07:43,420 --> 00:07:46,960 +which states that the proportion has changed from + +82 +00:07:46,960 --> 00:07:51,290 +the previous study. So we don't reject the null + +83 +00:07:51,290 --> 00:07:54,010 +hypothesis. It means there is not sufficient + +84 +00:07:54,010 --> 00:07:58,050 +evidence to support the alternative hypothesis. + +85 +00:07:58,270 --> 00:08:02,010 +That means you cannot say that the proportion has + +86 +00:08:02,010 --> 00:08:05,530 +changed from the previous study. That by using + +87 +00:08:05,530 --> 00:08:09,650 +critical value approach. Now what's about p-value? + +88 +00:08:11,830 --> 00:08:16,170 +In order to determine the p-value, + +89 +00:08:19,460 --> 00:08:23,320 +We have to find the probability that the Z + +90 +00:08:23,320 --> 00:08:28,060 +statistic fall in the rejection regions. So that + +91 +00:08:28,060 --> 00:08:36,260 +means Z greater than my values 1093 or + +92 +00:08:36,260 --> 00:08:41,060 +Z smaller than negative 1.093. + +93 +00:08:45,450 --> 00:08:49,730 +1093 is the same as the left of negative, so they + +94 +00:08:49,730 --> 00:08:52,810 +are the same because of symmetry. So just take 1 + +95 +00:08:52,810 --> 00:08:54,050 +and multiply by 2. + +96 +00:08:58,430 --> 00:09:03,070 +Now simple calculation will give the value of 0 + +97 +00:09:03,070 --> 00:09:09,950 +.276 in chapter 6. So go back to chapter 6 to + +98 +00:09:09,950 --> 00:09:13,290 +figure out how can we calculate the probability of + +99 +00:09:13,290 --> 00:09:19,830 +Z greater than 1.0938. Now my B value is 0.276, + +100 +00:09:20,030 --> 00:09:25,190 +always we reject the null hypothesis if my B value + +101 +00:09:25,190 --> 00:09:29,050 +is smaller than alpha. Now this value is much much + +102 +00:09:29,050 --> 00:09:31,210 +bigger than alpha, so we don't reject the null + +103 +00:09:31,210 --> 00:09:36,710 +hypothesis. So since my B value is much greater + +104 +00:09:36,710 --> 00:09:42,650 +than alpha, that means we don't reject the null + +105 +00:09:42,650 --> 00:09:46,810 +hypothesis, so we reach the same conclusion, that + +106 +00:09:46,810 --> 00:09:49,270 +there is not sufficient evidence to support the + +107 +00:09:49,270 --> 00:09:55,270 +alternative. Also, we can perform the test by + +108 +00:09:55,270 --> 00:09:59,810 +using confidence interval approach, because here + +109 +00:09:59,810 --> 00:10:02,850 +we are talking about two-tailed test. Your + +110 +00:10:02,850 --> 00:10:06,670 +confidence interval is given by + +111 +00:10:10,620 --> 00:10:17,280 +B1 minus B2 plus + +112 +00:10:17,280 --> 00:10:23,720 +or minus Z alpha over 2 times B + +113 +00:10:23,720 --> 00:10:30,120 +dash 1 minus B dash multiplied by 1 over N1 plus 1 + +114 +00:10:30,120 --> 00:10:37,520 +over N2. By the way, this one + +115 +00:10:37,520 --> 00:10:43,320 +called the margin of error. So z times square root + +116 +00:10:43,320 --> 00:10:45,940 +of this sequence is called the margin of error, + +117 +00:10:46,940 --> 00:10:52,280 +and the square root itself is called the standard + +118 +00:10:52,280 --> 00:10:59,560 +error of the point estimate of pi 1 minus pi 2, + +119 +00:10:59,720 --> 00:11:04,430 +which is P1 minus P2. So square root of b dash 1 + +120 +00:11:04,430 --> 00:11:07,650 +minus b dash multiplied by 1 over n1 plus 1 over + +121 +00:11:07,650 --> 00:11:12,270 +n2 is called the standard error of the estimate of + +122 +00:11:12,270 --> 00:11:15,910 +pi 1 minus pi 2. So this is standard estimate of + +123 +00:11:15,910 --> 00:11:21,750 +b1 minus b2. Simply, you will get the confidence + +124 +00:11:21,750 --> 00:11:26,470 +interval to be between pi 1 minus the difference + +125 +00:11:26,470 --> 00:11:32,620 +between the two proportions, 4 between negative. 0 + +126 +00:11:32,620 --> 00:11:37,160 +.5 and + +127 +00:11:37,160 --> 00:11:38,940 +0.7. + +128 +00:11:44,060 --> 00:11:48,400 +Now this interval actually contains + +129 +00:11:50,230 --> 00:11:54,250 +The value of 0, that means we don't reject the + +130 +00:11:54,250 --> 00:11:57,570 +null hypothesis. So since this interval starts + +131 +00:11:57,570 --> 00:12:01,870 +from negative, lower bound is negative 0.5, upper + +132 +00:12:01,870 --> 00:12:06,190 +bound is 0.17, that means 0 inside this interval, + +133 +00:12:06,750 --> 00:12:09,130 +I mean the confidence captures the value of 0, + +134 +00:12:09,610 --> 00:12:13,810 +that means we don't reject the null hypothesis. So + +135 +00:12:13,810 --> 00:12:17,110 +by using three different approaches, we end with + +136 +00:12:17,110 --> 00:12:20,930 +the same decision and conclusion. That is, we + +137 +00:12:20,930 --> 00:12:25,370 +don't reject null hypotheses. That's all for + +138 +00:12:25,370 --> 00:12:26,110 +number one. + +139 +00:12:31,450 --> 00:12:32,910 +Question number two. + +140 +00:12:36,170 --> 00:12:40,450 +The excellent drug company claims its aspirin + +141 +00:12:40,450 --> 00:12:43,610 +tablets will relieve headaches faster than any + +142 +00:12:43,610 --> 00:12:47,470 +other aspirin on the market. So they believe that + +143 +00:12:48,440 --> 00:12:52,220 +Their drug is better than the other drug in the + +144 +00:12:52,220 --> 00:12:57,180 +market. To determine whether Excellence claim is + +145 +00:12:57,180 --> 00:13:04,260 +valid, random samples of size 15 are chosen from + +146 +00:13:04,260 --> 00:13:07,080 +aspirins made by Excellence and the sample drug + +147 +00:13:07,080 --> 00:13:12,300 +combined. So sample sizes of 15 are chosen from + +148 +00:13:12,300 --> 00:13:16,260 +each. So that means N1 equals 15 and N2 also + +149 +00:13:16,260 --> 00:13:21,160 +equals 15. And aspirin is given to each of the 30 + +150 +00:13:21,160 --> 00:13:23,520 +randomly selected persons suffering from + +151 +00:13:23,520 --> 00:13:27,220 +headaches. So the total sample size is 30, because + +152 +00:13:27,220 --> 00:13:30,780 +15 from the first company, and the second for the + +153 +00:13:30,780 --> 00:13:36,860 +simple company. So they are 30 selected persons + +154 +00:13:36,860 --> 00:13:40,280 +who are suffering from headaches. So we have + +155 +00:13:40,280 --> 00:13:43,380 +information about number of minutes required for + +156 +00:13:43,380 --> 00:13:47,720 +each to recover from the headache. is recorded, + +157 +00:13:48,200 --> 00:13:51,500 +the sample results are. So here we have two + +158 +00:13:51,500 --> 00:13:56,260 +groups, two populations. Company is called + +159 +00:13:56,260 --> 00:13:58,420 +excellent company and other one simple company. + +160 +00:13:59,120 --> 00:14:04,320 +The information we have, the sample means are 8.4 + +161 +00:14:04,320 --> 00:14:08,260 +for the excellent and 8.9 for the simple company. + +162 +00:14:09,040 --> 00:14:13,280 +With the standard deviations for the sample are 2 + +163 +00:14:13,280 --> 00:14:18,340 +.05 and 2.14 respectively for excellent and simple + +164 +00:14:18,340 --> 00:14:21,480 +and as we mentioned the sample sizes are the same + +165 +00:14:21,480 --> 00:14:26,380 +are equal 15 and 15. Now we are going to test at + +166 +00:14:26,380 --> 00:14:32,540 +five percent level of significance test whether to + +167 +00:14:32,540 --> 00:14:35,560 +determine whether excellence aspirin cure + +168 +00:14:35,560 --> 00:14:39,140 +headaches significantly faster than simple + +169 +00:14:39,140 --> 00:14:46,420 +aspirin. Now faster it means Better. Better it + +170 +00:14:46,420 --> 00:14:49,480 +means the time required to relieve headache is + +171 +00:14:49,480 --> 00:14:53,920 +smaller there. So you have to be careful in this + +172 +00:14:53,920 --> 00:15:00,800 +case. If we assume that Mu1 is the mean time + +173 +00:15:00,800 --> 00:15:05,120 +required for excellent aspirin. So Mu1 for + +174 +00:15:05,120 --> 00:15:05,500 +excellent. + +175 +00:15:17,260 --> 00:15:21,540 +So Me1, mean time required for excellence aspirin, + +176 +00:15:22,780 --> 00:15:28,860 +and Me2, mean time required for simple aspirin. So + +177 +00:15:28,860 --> 00:15:32,760 +each one, Me1, is smaller than Me3. + +178 +00:15:41,140 --> 00:15:45,960 +Since Me1 represents the time required to relieve + +179 +00:15:45,960 --> 00:15:51,500 +headache by using excellent aspirin and this one + +180 +00:15:51,500 --> 00:15:55,460 +is faster faster it means it takes less time in + +181 +00:15:55,460 --> 00:15:59,620 +order to recover from headache so mu1 should be + +182 +00:15:59,620 --> 00:16:06,400 +smaller than mu2 we are going to use T T is x1 bar + +183 +00:16:06,400 --> 00:16:11,380 +minus x2 bar minus the difference between the two + +184 +00:16:11,380 --> 00:16:14,720 +population proportions divided by + +185 +00:16:17,550 --> 00:16:22,070 +S squared B times 1 over N1 plus 1 over N2. + +186 +00:16:25,130 --> 00:16:30,470 +S squared B N1 + +187 +00:16:30,470 --> 00:16:35,330 +minus 1 S1 squared plus N2 minus 1 S2 squared + +188 +00:16:35,330 --> 00:16:41,990 +divided by N1 plus N2 minus 1. Now, a simple + +189 +00:16:41,990 --> 00:16:44,030 +calculation will give the following results. + +190 +00:16:59,660 --> 00:17:03,080 +So again, we have this data. Just plug this + +191 +00:17:03,080 --> 00:17:06,620 +information here to get the value of S square B. + +192 +00:17:07,740 --> 00:17:13,120 +And finally, you will end with this result. + +193 +00:17:18,220 --> 00:17:24,920 +S squared B equals 2 + +194 +00:17:24,920 --> 00:17:27,240 +.095 squared. + +195 +00:17:30,140 --> 00:17:35,920 +Your T statistic equals negative + +196 +00:17:42,790 --> 00:17:48,370 +So that's your T-statistic value. So just plug the + +197 +00:17:48,370 --> 00:17:51,210 +values in 1 and 2, this 1 squared and this 2 + +198 +00:17:51,210 --> 00:17:53,350 +squared into this equation, you will get this + +199 +00:17:53,350 --> 00:18:02,970 +value. So 2.059 squared, that is 4.239. + +200 +00:18:07,670 --> 00:18:10,690 +Here you can use either the critical value + +201 +00:18:10,690 --> 00:18:17,200 +approach, Or B value. Let's do a critical value. + +202 +00:18:21,920 --> 00:18:27,460 +Since the alternative is the lower tail, one-sided + +203 +00:18:27,460 --> 00:18:31,820 +lower tail, so your B value, your critical value + +204 +00:18:31,820 --> 00:18:37,630 +is negative, T alpha, and there is a freedom. So + +205 +00:18:37,630 --> 00:18:47,630 +this is equal to negative T, 5% with 28 degrees of + +206 +00:18:47,630 --> 00:18:55,270 +freedom. By using the table you have 28, + +207 +00:18:56,030 --> 00:19:00,070 +28 + +208 +00:19:00,070 --> 00:19:12,790 +under 5%, so 28 under 5%, so + +209 +00:19:12,790 --> 00:19:20,870 +1.701, negative 1.701. + +210 +00:19:23,750 --> 00:19:28,290 +Now, we reject the null hypothesis if + +211 +00:19:33,770 --> 00:19:42,890 +region. Now again, since it's lower TL, so your + +212 +00:19:42,890 --> 00:19:48,830 +rejection region is below negative 1.701. + +213 +00:19:51,230 --> 00:19:55,630 +Now, does this value fall in the rejection region? + +214 +00:19:56,510 --> 00:20:02,350 +It falls in the non-rejection region. So the + +215 +00:20:02,350 --> 00:20:08,040 +answer is Don't reject the null hypothesis. That + +216 +00:20:08,040 --> 00:20:11,380 +means we don't have sufficient evidence to support + +217 +00:20:11,380 --> 00:20:16,300 +the excellent drug company claim which states that + +218 +00:20:16,300 --> 00:20:21,380 +their aspirin tablets relieve headaches faster + +219 +00:20:21,380 --> 00:20:28,540 +than the simple one. So that's by using a critical + +220 +00:20:28,540 --> 00:20:33,230 +value approach because this value falls in the non + +221 +00:20:33,230 --> 00:20:36,450 +-rejection region, so we don't reject the null + +222 +00:20:36,450 --> 00:20:36,890 +hypothesis. + +223 +00:20:44,130 --> 00:20:48,930 +Or you maybe use the B-value approach. + +224 +00:20:53,070 --> 00:20:56,850 +Now, since the alternative is µ1 smaller than µ2, + +225 +00:20:57,640 --> 00:21:03,260 +So B value is probability of T smaller than + +226 +00:21:03,260 --> 00:21:08,820 +negative 0 + +227 +00:21:08,820 --> 00:21:12,400 +.653. + +228 +00:21:14,300 --> 00:21:18,420 +So we are looking for this probability B of Z + +229 +00:21:18,420 --> 00:21:21,340 +smaller than negative 0.653. + +230 +00:21:23,210 --> 00:21:27,050 +The table you have gives the area in the upper + +231 +00:21:27,050 --> 00:21:33,190 +tail. So this is the same as beauty greater than. + +232 +00:21:37,790 --> 00:21:44,350 +Because the area to the right of 0.653 is the same + +233 +00:21:44,350 --> 00:21:48,070 +as the area to the left of negative 0.75. Because + +234 +00:21:48,070 --> 00:21:52,970 +of symmetry. Just look at the tea table. Now, + +235 +00:21:53,070 --> 00:22:00,810 +smaller than negative, means this area is actually + +236 +00:22:00,810 --> 00:22:02,690 +the same as the area to the right of the same + +237 +00:22:02,690 --> 00:22:07,330 +value, but on the other side. So these two areas + +238 +00:22:07,330 --> 00:22:11,890 +are the same. So it's the same as D of T greater + +239 +00:22:11,890 --> 00:22:17,710 +than 0.653. If you look at the table for 28 + +240 +00:22:17,710 --> 00:22:19,150 +degrees of freedom, + +241 +00:22:22,300 --> 00:22:23,520 +That's your 28. + +242 +00:22:27,580 --> 00:22:32,720 +I am looking for the value of 0.653. The first + +243 +00:22:32,720 --> 00:22:38,420 +value here is 0.683. The other one is 0.8. It + +244 +00:22:38,420 --> 00:22:43,600 +means my value is below this one. If you go back + +245 +00:22:43,600 --> 00:22:46,600 +here, + +246 +00:22:46,700 --> 00:22:52,610 +so it should be to the left of this value. Now + +247 +00:22:52,610 --> 00:22:57,170 +here 25, then 20, 20, 15 and so on. So it should + +248 +00:22:57,170 --> 00:23:01,930 +be greater than 25. So your B value actually is + +249 +00:23:01,930 --> 00:23:08,570 +greater than 25%. As we mentioned before, T table + +250 +00:23:08,570 --> 00:23:12,010 +does not give the exact B value. So approximately + +251 +00:23:12,010 --> 00:23:17,290 +my B value is greater than 25%. This value + +252 +00:23:17,290 --> 00:23:22,400 +actually is much bigger than 5%. So again, we + +253 +00:23:22,400 --> 00:23:27,480 +reject, we don't reject the null hypothesis. So + +254 +00:23:27,480 --> 00:23:30,600 +again, to compute the B value, it's probability of + +255 +00:23:30,600 --> 00:23:37,320 +T smaller than the value of the statistic, which + +256 +00:23:37,320 --> 00:23:42,040 +is negative 0.653. The table you have gives the + +257 +00:23:42,040 --> 00:23:43,040 +area to the right. + +258 +00:23:46,980 --> 00:23:50,700 +So this probability is the same as B of T greater + +259 +00:23:50,700 --> 00:23:55,920 +than 0.653. So by using this table, you will get + +260 +00:23:55,920 --> 00:24:00,100 +approximate value of B, which is greater than 25%. + +261 +00:24:00,100 --> 00:24:02,960 +Always, as we mentioned, we reject the null + +262 +00:24:02,960 --> 00:24:06,660 +hypothesis if my B value is smaller than alpha. In + +263 +00:24:06,660 --> 00:24:08,920 +this case, this value is greater than alpha, so we + +264 +00:24:08,920 --> 00:24:11,480 +don't reject the null. So we reach the same + +265 +00:24:11,480 --> 00:24:15,640 +decision as by using the critical value approach. + +266 +00:24:17,040 --> 00:24:23,360 +Any question? So that's for number two. Question + +267 +00:24:23,360 --> 00:24:24,040 +number three. + +268 +00:24:32,120 --> 00:24:35,820 +To test the effectiveness of a business school + +269 +00:24:35,820 --> 00:24:41,640 +preparation course, eight students took a general + +270 +00:24:41,640 --> 00:24:47,210 +business test before and after the course. Let X1 + +271 +00:24:47,210 --> 00:24:50,330 +denote before, + +272 +00:24:53,010 --> 00:24:55,450 +and X2 after. + +273 +00:24:59,630 --> 00:25:04,630 +And the difference is X2 minus X1. + +274 +00:25:14,780 --> 00:25:19,540 +The mean of the difference equals 50. And the + +275 +00:25:19,540 --> 00:25:25,540 +standard deviation of the difference is 65.03. So + +276 +00:25:25,540 --> 00:25:28,900 +sample statistics are sample mean for the + +277 +00:25:28,900 --> 00:25:32,040 +difference and sample standard deviation of the + +278 +00:25:32,040 --> 00:25:36,860 +difference. So these two values are given. Test to + +279 +00:25:36,860 --> 00:25:40,200 +determine the effectiveness of a business school + +280 +00:25:40,200 --> 00:25:45,960 +preparation course. So what's your goal? An + +281 +00:25:45,960 --> 00:25:48,120 +alternative, null equals zero. An alternative + +282 +00:25:48,120 --> 00:25:52,340 +should + +283 +00:25:52,340 --> 00:25:58,360 +be greater than zero. Because D is X2 minus X1. So + +284 +00:25:58,360 --> 00:26:02,840 +effective, it means after is better than before. + +285 +00:26:03,680 --> 00:26:08,420 +So my score after taking the course is better than + +286 +00:26:08,420 --> 00:26:12,080 +before taking the course. So X in UD is positive. + +287 +00:26:19,090 --> 00:26:27,510 +T is D bar minus 0 divided by SD over square root + +288 +00:26:27,510 --> 00:26:41,090 +of A. D bar is 50 divided by 65 divided + +289 +00:26:41,090 --> 00:26:54,490 +by Square root of 8. So 50 divided by square + +290 +00:26:54,490 --> 00:26:57,910 +root of 8, 2.17. + +291 +00:27:04,070 --> 00:27:09,570 +Now Yumi used the critical value approach. So my + +292 +00:27:09,570 --> 00:27:10,930 +critical value is T alpha. + +293 +00:27:13,680 --> 00:27:20,140 +And degrees of freedom is 7. It's upper 10. So + +294 +00:27:20,140 --> 00:27:27,300 +it's plus. So it's T alpha 0, 5. And DF is 7, + +295 +00:27:27,320 --> 00:27:33,820 +because N equals 8. Now by using the table, at 7 + +296 +00:27:33,820 --> 00:27:34,680 +degrees of freedom, + +297 +00:27:38,220 --> 00:27:39,340 +so at 7, + +298 +00:27:53,560 --> 00:28:03,380 +So my T value is greater than the + +299 +00:28:03,380 --> 00:28:07,020 +critical region, so we reject the null hypothesis. + +300 +00:28:10,740 --> 00:28:17,700 +The rejection region starts from 1.9895 and this + +301 +00:28:17,700 --> 00:28:24,800 +value actually greater than 1.8. So since it falls + +302 +00:28:24,800 --> 00:28:30,320 +in the rejection region, then we reject the null + +303 +00:28:30,320 --> 00:28:35,060 +hypothesis. It means that taking the course, + +304 +00:28:36,370 --> 00:28:39,690 +improves your score. So we have sufficient + +305 +00:28:39,690 --> 00:28:43,010 +evidence to support the alternative hypothesis. + +306 +00:28:44,330 --> 00:28:50,650 +That's for number three. The other part, the other + +307 +00:28:50,650 --> 00:28:51,130 +part. + +308 +00:28:54,290 --> 00:28:58,550 +A statistician selected a sample of 16 receivable + +309 +00:28:58,550 --> 00:29:03,530 +accounts. He reported that the sample information + +310 +00:29:04,690 --> 00:29:07,790 +indicated the mean of the population ranges from + +311 +00:29:07,790 --> 00:29:12,730 +these two values. So we have lower and upper + +312 +00:29:12,730 --> 00:29:21,910 +limits, which are given by 4739. + +313 +00:29:36,500 --> 00:29:42,400 +So the mean of the population ranges between these + +314 +00:29:42,400 --> 00:29:47,880 +two values. And in addition to that, we have + +315 +00:29:47,880 --> 00:29:55,920 +information about the sample standard deviation is + +316 +00:29:55,920 --> 00:29:56,340 +400. + +317 +00:29:59,500 --> 00:30:03,260 +The statistician neglected to report what + +318 +00:30:03,260 --> 00:30:07,440 +confidence level he had used. So we don't know C + +319 +00:30:07,440 --> 00:30:14,180 +level. So C level is unknown, which actually is 1 + +320 +00:30:14,180 --> 00:30:14,760 +minus alpha. + +321 +00:30:20,980 --> 00:30:25,360 +Based on the above information, what's the + +322 +00:30:25,360 --> 00:30:28,380 +confidence level? So we are looking for C level. + +323 +00:30:29,380 --> 00:30:34,160 +Now just keep in mind the confidence interval is + +324 +00:30:34,160 --> 00:30:38,200 +given and we are looking for C level. + +325 +00:30:42,920 --> 00:30:46,600 +So this area actually is alpha over 2 and other + +326 +00:30:46,600 --> 00:30:49,940 +one is alpha over 2, so the area between is 1 + +327 +00:30:49,940 --> 00:30:50,440 +minus alpha. + +328 +00:30:53,340 --> 00:30:58,620 +Now since the sample size equal + +329 +00:31:01,950 --> 00:31:10,010 +16, N equals 16, so N equals 16, so your + +330 +00:31:10,010 --> 00:31:12,490 +confidence interval should be X bar plus or minus + +331 +00:31:12,490 --> 00:31:14,610 +T, S over root N. + +332 +00:31:19,350 --> 00:31:26,390 +Now, C level can be determined by T, and we know + +333 +00:31:26,390 --> 00:31:28,130 +that this quantity, + +334 +00:31:30,730 --> 00:31:36,970 +represents the margin of error. So, E equals TS + +335 +00:31:36,970 --> 00:31:42,950 +over root N. Now, since the confidence interval is + +336 +00:31:42,950 --> 00:31:50,270 +given, we know from previous chapters that the + +337 +00:31:50,270 --> 00:31:53,970 +margin equals the difference between upper and + +338 +00:31:53,970 --> 00:31:59,560 +lower divided by two. So, half distance of lower + +339 +00:31:59,560 --> 00:32:06,320 +and upper gives the margin. So that will give 260 + +340 +00:32:06,320 --> 00:32:17,620 +.2. So that's E. So now E is known to be 260.2 + +341 +00:32:17,620 --> 00:32:24,320 +equals to S is given by 400 and N is 16. + +342 +00:32:26,800 --> 00:32:29,420 +Now, simple calculation will give the value of T, + +343 +00:32:30,060 --> 00:32:31,340 +which is the critical value. + +344 +00:32:35,280 --> 00:32:38,160 +So, my T equals 2.60. + +345 +00:32:41,960 --> 00:32:47,220 +Actually, this is T alpha over 2. Now, the value + +346 +00:32:47,220 --> 00:32:52,400 +of the critical value is known to be 2.602. What's + +347 +00:32:52,400 --> 00:32:56,520 +the corresponding alpha over 2? Now look at the + +348 +00:32:56,520 --> 00:32:59,660 +table, at 15 degrees of freedom, + +349 +00:33:02,720 --> 00:33:10,680 +look at 15, at this value 2.602, at this value. + +350 +00:33:12,640 --> 00:33:19,880 +So, 15 degrees of freedom, 2.602, so the + +351 +00:33:19,880 --> 00:33:21,940 +corresponding alpha over 2, not alpha. + +352 +00:33:24,610 --> 00:33:31,830 +it's 1% so my alpha over 2 is + +353 +00:33:31,830 --> 00:33:43,110 +1% so alpha is 2% so the confidence level is 1 + +354 +00:33:43,110 --> 00:33:50,510 +minus alpha so 1 minus alpha is 90% so c level is + +355 +00:33:50,510 --> 00:33:59,410 +98% so that's level or the confidence level. So + +356 +00:33:59,410 --> 00:34:03,990 +again, maybe this is a tricky question. + +357 +00:34:07,330 --> 00:34:10,530 +But at least you know that if the confidence + +358 +00:34:10,530 --> 00:34:15,270 +interval is given, you can determine the margin of + +359 +00:34:15,270 --> 00:34:18,930 +error by the difference between lower and upper + +360 +00:34:18,930 --> 00:34:23,310 +divided by two. Then we know this term represents + +361 +00:34:23,310 --> 00:34:27,150 +this margin. So by using this equation, we can + +362 +00:34:27,150 --> 00:34:29,770 +compute the value of T, I mean the critical value. + +363 +00:34:30,670 --> 00:34:35,290 +So since the critical value is given or is + +364 +00:34:35,290 --> 00:34:38,590 +computed, we can determine the corresponding alpha + +365 +00:34:38,590 --> 00:34:45,390 +over 2. So alpha over 2 is 1%. So your alpha is + +366 +00:34:45,390 --> 00:34:51,710 +2%. So my C level is 98%. That's + +367 +00:34:51,710 --> 00:34:56,180 +all. Any questions? We're done, Muhammad. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/S5DZZsmjnq4_raw.json b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/S5DZZsmjnq4_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..1e0ed36e5dfb3bb2272b868e4a2aaac32c81a0fd --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/S5DZZsmjnq4_raw.json @@ -0,0 +1 @@ +{"segments": [{"id": 1, "seek": 1354, "start": 9.32, "end": 13.54, "text": " The second material exam. Question number one.", "tokens": [440, 1150, 2527, 1139, 13, 14464, 1230, 472, 13], "avg_logprob": -0.348437511920929, "compression_ratio": 0.8545454545454545, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 9.319999999999999, "end": 10.24, "word": " The", "probability": 0.6279296875}, {"start": 10.24, "end": 10.58, "word": " second", "probability": 0.86083984375}, {"start": 10.58, "end": 11.04, "word": " material", "probability": 0.4921875}, {"start": 11.04, "end": 11.5, "word": " exam.", "probability": 0.82861328125}, {"start": 12.1, "end": 12.64, "word": " Question", "probability": 0.6962890625}, {"start": 12.64, "end": 13.24, "word": " number", "probability": 0.90380859375}, {"start": 13.24, "end": 13.54, "word": " one.", "probability": 0.724609375}], "temperature": 1.0}, {"id": 2, "seek": 4115, "start": 14.81, "end": 41.15, "text": " a corporation randomly selected or selects 150 salespeople and finds that 66% who have never taken self-improvement course would like such a course. So in this case, currently, they select 150 salespeople and find that", "tokens": [257, 22197, 16979, 8209, 420, 3048, 82, 8451, 5763, 21123, 293, 10704, 300, 21126, 4, 567, 362, 1128, 2726, 2698, 12, 332, 46955, 518, 1164, 576, 411, 1270, 257, 1164, 13, 407, 294, 341, 1389, 11, 4362, 11, 436, 3048, 8451, 5763, 21123, 293, 915, 300], "avg_logprob": -0.2521608979143995, "compression_ratio": 1.5, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 14.81, "end": 15.21, "word": " a", "probability": 0.06640625}, {"start": 15.21, "end": 16.03, "word": " corporation", "probability": 0.9541015625}, {"start": 16.03, "end": 16.65, "word": " randomly", "probability": 0.5625}, {"start": 16.65, "end": 17.35, "word": " selected", "probability": 0.892578125}, {"start": 17.35, "end": 18.25, "word": " or", "probability": 0.68896484375}, {"start": 18.25, "end": 19.31, "word": " selects", "probability": 0.905517578125}, {"start": 19.31, "end": 20.17, "word": " 150", "probability": 0.72705078125}, {"start": 20.17, "end": 21.25, "word": " salespeople", "probability": 0.6800537109375}, {"start": 21.25, "end": 21.85, "word": " and", "probability": 0.87548828125}, {"start": 21.85, "end": 22.19, "word": " finds", "probability": 0.60498046875}, {"start": 22.19, "end": 23.09, "word": " that", "probability": 0.86376953125}, {"start": 23.09, "end": 24.01, "word": " 66", "probability": 0.87060546875}, {"start": 24.01, "end": 24.69, "word": "%", "probability": 0.89111328125}, {"start": 24.69, "end": 24.93, "word": " who", "probability": 0.87646484375}, {"start": 24.93, "end": 25.13, "word": " have", "probability": 0.927734375}, {"start": 25.13, "end": 25.43, "word": " never", "probability": 0.921875}, {"start": 25.43, "end": 26.07, "word": " taken", "probability": 0.85107421875}, {"start": 26.07, "end": 26.91, "word": " self", "probability": 0.837890625}, {"start": 26.91, "end": 28.05, "word": "-improvement", "probability": 0.8687744140625}, {"start": 28.05, "end": 28.41, "word": " course", "probability": 0.9384765625}, {"start": 28.41, "end": 28.61, "word": " would", "probability": 0.857421875}, {"start": 28.61, "end": 28.93, "word": " like", "probability": 0.57373046875}, {"start": 28.93, "end": 29.77, "word": " such", "probability": 0.9560546875}, {"start": 29.77, "end": 29.93, "word": " a", "probability": 0.9755859375}, {"start": 29.93, "end": 30.23, "word": " course.", "probability": 0.95849609375}, {"start": 31.07, "end": 31.29, "word": " So", "probability": 0.9326171875}, {"start": 31.29, "end": 31.41, "word": " in", "probability": 0.65185546875}, {"start": 31.41, "end": 31.59, "word": " this", "probability": 0.94580078125}, {"start": 31.59, "end": 32.07, "word": " case,", "probability": 0.91259765625}, {"start": 32.95, "end": 33.65, "word": " currently,", "probability": 0.72412109375}, {"start": 34.55, "end": 34.87, "word": " they", "probability": 0.87255859375}, {"start": 34.87, "end": 35.35, "word": " select", "probability": 0.75048828125}, {"start": 35.35, "end": 37.59, "word": " 150", "probability": 0.8564453125}, {"start": 37.59, "end": 39.03, "word": " salespeople", "probability": 0.947509765625}, {"start": 39.03, "end": 40.19, "word": " and", "probability": 0.8603515625}, {"start": 40.19, "end": 40.67, "word": " find", "probability": 0.8671875}, {"start": 40.67, "end": 41.15, "word": " that", "probability": 0.93505859375}], "temperature": 1.0}, {"id": 3, "seek": 7183, "start": 43.23, "end": 71.83, "text": " 66% would like or who have never taken this course. The firm did a similar study 10 years ago in which 60% of a random sample of 160 salespeople wanted a self-improvement course.", "tokens": [21126, 4, 576, 411, 420, 567, 362, 1128, 2726, 341, 1164, 13, 440, 6174, 630, 257, 2531, 2979, 1266, 924, 2057, 294, 597, 4060, 4, 295, 257, 4974, 6889, 295, 21243, 5763, 21123, 1415, 257, 2698, 12, 332, 46955, 518, 1164, 13], "avg_logprob": -0.14798328072525735, "compression_ratio": 1.3065693430656935, "no_speech_prob": 0.0, "words": [{"start": 43.23, "end": 43.93, "word": " 66", "probability": 0.740234375}, {"start": 43.93, "end": 44.81, "word": "%", "probability": 0.88671875}, {"start": 44.81, "end": 52.17, "word": " would", "probability": 0.8193359375}, {"start": 52.17, "end": 52.67, "word": " like", "probability": 0.9208984375}, {"start": 52.67, "end": 53.81, "word": " or", "probability": 0.52685546875}, {"start": 53.81, "end": 54.53, "word": " who", "probability": 0.798828125}, {"start": 54.53, "end": 54.89, "word": " have", "probability": 0.8876953125}, {"start": 54.89, "end": 55.19, "word": " never", "probability": 0.9287109375}, {"start": 55.19, "end": 55.69, "word": " taken", "probability": 0.84375}, {"start": 55.69, "end": 56.97, "word": " this", "probability": 0.83251953125}, {"start": 56.97, "end": 57.37, "word": " course.", "probability": 0.9599609375}, {"start": 58.53, "end": 58.79, "word": " The", "probability": 0.8837890625}, {"start": 58.79, "end": 59.13, "word": " firm", "probability": 0.83935546875}, {"start": 59.13, "end": 59.49, "word": " did", "probability": 0.95849609375}, {"start": 59.49, "end": 59.89, "word": " a", "probability": 0.98779296875}, {"start": 59.89, "end": 60.19, "word": " similar", "probability": 0.97216796875}, {"start": 60.19, "end": 60.65, "word": " study", "probability": 0.9443359375}, {"start": 60.65, "end": 60.95, "word": " 10", "probability": 0.57080078125}, {"start": 60.95, "end": 61.19, "word": " years", "probability": 0.92529296875}, {"start": 61.19, "end": 61.53, "word": " ago", "probability": 0.88525390625}, {"start": 61.53, "end": 62.73, "word": " in", "probability": 0.544921875}, {"start": 62.73, "end": 63.03, "word": " which", "probability": 0.9541015625}, {"start": 63.03, "end": 63.57, "word": " 60", "probability": 0.91455078125}, {"start": 63.57, "end": 64.17, "word": "%", "probability": 0.9814453125}, {"start": 64.17, "end": 64.99, "word": " of", "probability": 0.95947265625}, {"start": 64.99, "end": 65.11, "word": " a", "probability": 0.79052734375}, {"start": 65.11, "end": 65.35, "word": " random", "probability": 0.89306640625}, {"start": 65.35, "end": 65.87, "word": " sample", "probability": 0.923828125}, {"start": 65.87, "end": 66.69, "word": " of", "probability": 0.951171875}, {"start": 66.69, "end": 67.43, "word": " 160", "probability": 0.65380859375}, {"start": 67.43, "end": 68.55, "word": " salespeople", "probability": 0.848388671875}, {"start": 68.55, "end": 69.79, "word": " wanted", "probability": 0.87451171875}, {"start": 69.79, "end": 70.03, "word": " a", "probability": 0.873046875}, {"start": 70.03, "end": 70.29, "word": " self", "probability": 0.87060546875}, {"start": 70.29, "end": 71.55, "word": "-improvement", "probability": 0.8984375}, {"start": 71.55, "end": 71.83, "word": " course.", "probability": 0.9580078125}], "temperature": 1.0}, {"id": 4, "seek": 9744, "start": 75.62, "end": 97.44, "text": " They select a random sample of 160 and tell that 60% would like to take this course. So we have here two information about previous study and currently. So currently we have this information.", "tokens": [814, 3048, 257, 4974, 6889, 295, 21243, 293, 980, 300, 4060, 4, 576, 411, 281, 747, 341, 1164, 13, 407, 321, 362, 510, 732, 1589, 466, 3894, 2979, 293, 4362, 13, 407, 4362, 321, 362, 341, 1589, 13], "avg_logprob": -0.2375801220918313, "compression_ratio": 1.4222222222222223, "no_speech_prob": 0.0, "words": [{"start": 75.62, "end": 75.92, "word": " They", "probability": 0.287353515625}, {"start": 75.92, "end": 76.22, "word": " select", "probability": 0.67626953125}, {"start": 76.22, "end": 76.4, "word": " a", "probability": 0.931640625}, {"start": 76.4, "end": 76.6, "word": " random", "probability": 0.85888671875}, {"start": 76.6, "end": 77.08, "word": " sample", "probability": 0.9384765625}, {"start": 77.08, "end": 77.6, "word": " of", "probability": 0.95361328125}, {"start": 77.6, "end": 78.44, "word": " 160", "probability": 0.89404296875}, {"start": 78.44, "end": 80.86, "word": " and", "probability": 0.353759765625}, {"start": 80.86, "end": 81.18, "word": " tell", "probability": 0.7880859375}, {"start": 81.18, "end": 81.52, "word": " that", "probability": 0.48046875}, {"start": 81.52, "end": 83.16, "word": " 60", "probability": 0.92431640625}, {"start": 83.16, "end": 83.52, "word": "%", "probability": 0.7880859375}, {"start": 83.52, "end": 85.22, "word": " would", "probability": 0.9375}, {"start": 85.22, "end": 85.52, "word": " like", "probability": 0.9365234375}, {"start": 85.52, "end": 85.68, "word": " to", "probability": 0.9638671875}, {"start": 85.68, "end": 85.98, "word": " take", "probability": 0.88623046875}, {"start": 85.98, "end": 86.74, "word": " this", "probability": 0.94384765625}, {"start": 86.74, "end": 87.14, "word": " course.", "probability": 0.962890625}, {"start": 89.06, "end": 89.9, "word": " So", "probability": 0.87451171875}, {"start": 89.9, "end": 90.08, "word": " we", "probability": 0.365966796875}, {"start": 90.08, "end": 90.26, "word": " have", "probability": 0.93408203125}, {"start": 90.26, "end": 90.52, "word": " here", "probability": 0.84326171875}, {"start": 90.52, "end": 90.8, "word": " two", "probability": 0.74560546875}, {"start": 90.8, "end": 91.32, "word": " information", "probability": 0.76611328125}, {"start": 91.32, "end": 91.76, "word": " about", "probability": 0.8798828125}, {"start": 91.76, "end": 92.82, "word": " previous", "probability": 0.81201171875}, {"start": 92.82, "end": 93.3, "word": " study", "probability": 0.88916015625}, {"start": 93.3, "end": 94.12, "word": " and", "probability": 0.90673828125}, {"start": 94.12, "end": 94.56, "word": " currently.", "probability": 0.66455078125}, {"start": 95.42, "end": 95.68, "word": " So", "probability": 0.900390625}, {"start": 95.68, "end": 96.08, "word": " currently", "probability": 0.78076171875}, {"start": 96.08, "end": 96.56, "word": " we", "probability": 0.65185546875}, {"start": 96.56, "end": 96.74, "word": " have", "probability": 0.947265625}, {"start": 96.74, "end": 96.94, "word": " this", "probability": 0.9423828125}, {"start": 96.94, "end": 97.44, "word": " information.", "probability": 0.8388671875}], "temperature": 1.0}, {"id": 5, "seek": 12540, "start": 99.66, "end": 125.4, "text": " The sample size was 150, with a proportion 66% for the people who would like to attend or take this course. Mid-Paiwan and Pai Tu represent the true proportion, it means the population proportion, of workers who would like to attend a self-improvement course in the recent study and the past studies in Taiwan. So recent, Paiwan.", "tokens": [440, 6889, 2744, 390, 8451, 11, 365, 257, 16068, 21126, 4, 337, 264, 561, 567, 576, 411, 281, 6888, 420, 747, 341, 1164, 13, 7033, 12, 47, 1301, 7916, 293, 430, 1301, 7836, 2906, 264, 2074, 16068, 11, 309, 1355, 264, 4415, 16068, 11, 295, 5600, 567, 576, 411, 281, 6888, 257, 2698, 12, 332, 46955, 518, 1164, 294, 264, 5162, 2979, 293, 264, 1791, 5313, 294, 12296, 13, 407, 5162, 11, 430, 1301, 7916, 13], "avg_logprob": -0.25365260049894256, "compression_ratio": 1.6666666666666667, "no_speech_prob": 0.0, "words": [{"start": 99.66, "end": 99.9, "word": " The", "probability": 0.6865234375}, {"start": 99.9, "end": 100.14, "word": " sample", "probability": 0.9287109375}, {"start": 100.14, "end": 100.48, "word": " size", "probability": 0.8544921875}, {"start": 100.48, "end": 100.72, "word": " was", "probability": 0.9521484375}, {"start": 100.72, "end": 101.58, "word": " 150,", "probability": 0.89111328125}, {"start": 102.32, "end": 102.58, "word": " with", "probability": 0.88525390625}, {"start": 102.58, "end": 102.72, "word": " a", "probability": 0.65283203125}, {"start": 102.72, "end": 103.1, "word": " proportion", "probability": 0.7919921875}, {"start": 103.1, "end": 103.66, "word": " 66", "probability": 0.318603515625}, {"start": 103.66, "end": 104.32, "word": "%", "probability": 0.7763671875}, {"start": 104.32, "end": 104.86, "word": " for", "probability": 0.939453125}, {"start": 104.86, "end": 105.02, "word": " the", "probability": 0.91748046875}, {"start": 105.02, "end": 105.24, "word": " people", "probability": 0.9697265625}, {"start": 105.24, "end": 105.46, "word": " who", "probability": 0.90380859375}, {"start": 105.46, "end": 105.68, "word": " would", "probability": 0.9248046875}, {"start": 105.68, "end": 105.98, "word": " like", "probability": 0.9462890625}, {"start": 105.98, "end": 106.16, "word": " to", "probability": 0.97119140625}, {"start": 106.16, "end": 106.66, "word": " attend", "probability": 0.90771484375}, {"start": 106.66, "end": 107.1, "word": " or", "probability": 0.90185546875}, {"start": 107.1, "end": 107.6, "word": " take", "probability": 0.666015625}, {"start": 107.6, "end": 107.92, "word": " this", "probability": 0.8984375}, {"start": 107.92, "end": 108.3, "word": " course.", "probability": 0.96044921875}, {"start": 109.28, "end": 109.46, "word": " Mid", "probability": 0.441650390625}, {"start": 109.46, "end": 109.8, "word": "-Paiwan", "probability": 0.7491455078125}, {"start": 109.8, "end": 110.0, "word": " and", "probability": 0.91650390625}, {"start": 110.0, "end": 110.18, "word": " Pai", "probability": 0.74072265625}, {"start": 110.18, "end": 110.3, "word": " Tu", "probability": 0.300537109375}, {"start": 110.3, "end": 110.86, "word": " represent", "probability": 0.6640625}, {"start": 110.86, "end": 111.08, "word": " the", "probability": 0.81298828125}, {"start": 111.08, "end": 111.28, "word": " true", "probability": 0.9658203125}, {"start": 111.28, "end": 111.84, "word": " proportion,", "probability": 0.86865234375}, {"start": 112.6, "end": 112.76, "word": " it", "probability": 0.826171875}, {"start": 112.76, "end": 113.0, "word": " means", "probability": 0.91748046875}, {"start": 113.0, "end": 113.12, "word": " the", "probability": 0.90869140625}, {"start": 113.12, "end": 113.58, "word": " population", "probability": 0.9248046875}, {"start": 113.58, "end": 114.12, "word": " proportion,", "probability": 0.69921875}, {"start": 115.16, "end": 115.26, "word": " of", "probability": 0.9521484375}, {"start": 115.26, "end": 115.84, "word": " workers", "probability": 0.8984375}, {"start": 115.84, "end": 116.14, "word": " who", "probability": 0.908203125}, {"start": 116.14, "end": 116.32, "word": " would", "probability": 0.923828125}, {"start": 116.32, "end": 116.62, "word": " like", "probability": 0.9462890625}, {"start": 116.62, "end": 116.78, "word": " to", "probability": 0.966796875}, {"start": 116.78, "end": 117.26, "word": " attend", "probability": 0.88232421875}, {"start": 117.26, "end": 117.6, "word": " a", "probability": 0.97998046875}, {"start": 117.6, "end": 117.8, "word": " self", "probability": 0.83935546875}, {"start": 117.8, "end": 118.38, "word": "-improvement", "probability": 0.95263671875}, {"start": 118.38, "end": 118.72, "word": " course", "probability": 0.95751953125}, {"start": 118.72, "end": 118.92, "word": " in", "probability": 0.609375}, {"start": 118.92, "end": 119.06, "word": " the", "probability": 0.900390625}, {"start": 119.06, "end": 119.4, "word": " recent", "probability": 0.90771484375}, {"start": 119.4, "end": 119.84, "word": " study", "probability": 0.6806640625}, {"start": 119.84, "end": 120.68, "word": " and", "probability": 0.83935546875}, {"start": 120.68, "end": 121.14, "word": " the", "probability": 0.859375}, {"start": 121.14, "end": 121.44, "word": " past", "probability": 0.8662109375}, {"start": 121.44, "end": 121.92, "word": " studies", "probability": 0.7412109375}, {"start": 121.92, "end": 122.1, "word": " in", "probability": 0.271240234375}, {"start": 122.1, "end": 122.38, "word": " Taiwan.", "probability": 0.2362060546875}, {"start": 123.66, "end": 123.88, "word": " So", "probability": 0.755859375}, {"start": 123.88, "end": 124.36, "word": " recent,", "probability": 0.60546875}, {"start": 124.7, "end": 125.4, "word": " Paiwan.", "probability": 0.7996419270833334}], "temperature": 1.0}, {"id": 6, "seek": 15104, "start": 127.74, "end": 151.04, "text": " And Pi 2 is the previous study. This weather, this proportion has changed from the previous study by using two approaches. Critical value approach and B value approach. So here we are talking about Pi 1 equals Pi 2. Since the problem says that", "tokens": [400, 17741, 568, 307, 264, 3894, 2979, 13, 639, 5503, 11, 341, 16068, 575, 3105, 490, 264, 3894, 2979, 538, 1228, 732, 11587, 13, 39482, 2158, 3109, 293, 363, 2158, 3109, 13, 407, 510, 321, 366, 1417, 466, 17741, 502, 6915, 17741, 568, 13, 4162, 264, 1154, 1619, 300], "avg_logprob": -0.23375000536441803, "compression_ratio": 1.5061728395061729, "no_speech_prob": 0.0, "words": [{"start": 127.74, "end": 128.1, "word": " And", "probability": 0.47216796875}, {"start": 128.1, "end": 128.38, "word": " Pi", "probability": 0.453857421875}, {"start": 128.38, "end": 128.66, "word": " 2", "probability": 0.6240234375}, {"start": 128.66, "end": 129.04, "word": " is", "probability": 0.93212890625}, {"start": 129.04, "end": 129.18, "word": " the", "probability": 0.88525390625}, {"start": 129.18, "end": 129.44, "word": " previous", "probability": 0.8310546875}, {"start": 129.44, "end": 129.78, "word": " study.", "probability": 0.7294921875}, {"start": 130.76, "end": 131.48, "word": " This", "probability": 0.798828125}, {"start": 131.48, "end": 131.74, "word": " weather,", "probability": 0.4912109375}, {"start": 131.92, "end": 132.1, "word": " this", "probability": 0.94140625}, {"start": 132.1, "end": 132.66, "word": " proportion", "probability": 0.849609375}, {"start": 132.66, "end": 133.0, "word": " has", "probability": 0.85009765625}, {"start": 133.0, "end": 133.66, "word": " changed", "probability": 0.8720703125}, {"start": 133.66, "end": 135.98, "word": " from", "probability": 0.72265625}, {"start": 135.98, "end": 136.24, "word": " the", "probability": 0.91796875}, {"start": 136.24, "end": 136.54, "word": " previous", "probability": 0.8720703125}, {"start": 136.54, "end": 136.86, "word": " study", "probability": 0.92333984375}, {"start": 136.86, "end": 137.1, "word": " by", "probability": 0.9306640625}, {"start": 137.1, "end": 137.54, "word": " using", "probability": 0.9345703125}, {"start": 137.54, "end": 138.56, "word": " two", "probability": 0.892578125}, {"start": 138.56, "end": 139.02, "word": " approaches.", "probability": 0.7529296875}, {"start": 139.56, "end": 140.02, "word": " Critical", "probability": 0.55615234375}, {"start": 140.02, "end": 140.38, "word": " value", "probability": 0.8359375}, {"start": 140.38, "end": 140.9, "word": " approach", "probability": 0.90673828125}, {"start": 140.9, "end": 141.58, "word": " and", "probability": 0.86962890625}, {"start": 141.58, "end": 141.86, "word": " B", "probability": 0.77294921875}, {"start": 141.86, "end": 142.16, "word": " value", "probability": 0.6669921875}, {"start": 142.16, "end": 142.54, "word": " approach.", "probability": 0.91796875}, {"start": 143.74, "end": 144.12, "word": " So", "probability": 0.92578125}, {"start": 144.12, "end": 144.28, "word": " here", "probability": 0.71240234375}, {"start": 144.28, "end": 144.38, "word": " we", "probability": 0.8759765625}, {"start": 144.38, "end": 144.52, "word": " are", "probability": 0.92431640625}, {"start": 144.52, "end": 144.88, "word": " talking", "probability": 0.85546875}, {"start": 144.88, "end": 145.42, "word": " about", "probability": 0.91552734375}, {"start": 145.42, "end": 146.92, "word": " Pi", "probability": 0.8720703125}, {"start": 146.92, "end": 147.16, "word": " 1", "probability": 0.97216796875}, {"start": 147.16, "end": 147.54, "word": " equals", "probability": 0.76416015625}, {"start": 147.54, "end": 147.8, "word": " Pi", "probability": 0.9658203125}, {"start": 147.8, "end": 148.14, "word": " 2.", "probability": 0.99462890625}, {"start": 149.16, "end": 149.64, "word": " Since", "probability": 0.76416015625}, {"start": 149.64, "end": 149.84, "word": " the", "probability": 0.90576171875}, {"start": 149.84, "end": 150.18, "word": " problem", "probability": 0.8515625}, {"start": 150.18, "end": 150.68, "word": " says", "probability": 0.8994140625}, {"start": 150.68, "end": 151.04, "word": " that", "probability": 0.88623046875}], "temperature": 1.0}, {"id": 7, "seek": 18181, "start": 152.41, "end": 181.81, "text": " The proportion has changed. You don't know the exact direction, either greater than or smaller than. So this one should be Y1 does not equal Y2. So step one, you have to state the appropriate null and alternative hypothesis. Second step, compute the value of the test statistic. In this case, your Z statistic should be", "tokens": [440, 16068, 575, 3105, 13, 509, 500, 380, 458, 264, 1900, 3513, 11, 2139, 5044, 813, 420, 4356, 813, 13, 407, 341, 472, 820, 312, 398, 16, 775, 406, 2681, 398, 17, 13, 407, 1823, 472, 11, 291, 362, 281, 1785, 264, 6854, 18184, 293, 8535, 17291, 13, 5736, 1823, 11, 14722, 264, 2158, 295, 264, 1500, 29588, 13, 682, 341, 1389, 11, 428, 1176, 29588, 820, 312], "avg_logprob": -0.15692934696225153, "compression_ratio": 1.5763546798029557, "no_speech_prob": 0.0, "words": [{"start": 152.41, "end": 152.65, "word": " The", "probability": 0.595703125}, {"start": 152.65, "end": 153.21, "word": " proportion", "probability": 0.83740234375}, {"start": 153.21, "end": 153.61, "word": " has", "probability": 0.78271484375}, {"start": 153.61, "end": 154.23, "word": " changed.", "probability": 0.9033203125}, {"start": 154.73, "end": 154.89, "word": " You", "probability": 0.873046875}, {"start": 154.89, "end": 155.11, "word": " don't", "probability": 0.97216796875}, {"start": 155.11, "end": 155.29, "word": " know", "probability": 0.884765625}, {"start": 155.29, "end": 155.43, "word": " the", "probability": 0.9169921875}, {"start": 155.43, "end": 155.87, "word": " exact", "probability": 0.93701171875}, {"start": 155.87, "end": 156.53, "word": " direction,", "probability": 0.97998046875}, {"start": 156.67, "end": 156.91, "word": " either", "probability": 0.93603515625}, {"start": 156.91, "end": 157.63, "word": " greater", "probability": 0.89697265625}, {"start": 157.63, "end": 157.89, "word": " than", "probability": 0.951171875}, {"start": 157.89, "end": 158.09, "word": " or", "probability": 0.9501953125}, {"start": 158.09, "end": 158.45, "word": " smaller", "probability": 0.87255859375}, {"start": 158.45, "end": 158.75, "word": " than.", "probability": 0.94970703125}, {"start": 159.29, "end": 159.61, "word": " So", "probability": 0.95556640625}, {"start": 159.61, "end": 159.93, "word": " this", "probability": 0.88916015625}, {"start": 159.93, "end": 160.13, "word": " one", "probability": 0.916015625}, {"start": 160.13, "end": 160.37, "word": " should", "probability": 0.970703125}, {"start": 160.37, "end": 161.73, "word": " be", "probability": 0.943359375}, {"start": 161.73, "end": 162.37, "word": " Y1", "probability": 0.531890869140625}, {"start": 162.37, "end": 163.37, "word": " does", "probability": 0.81494140625}, {"start": 163.37, "end": 163.65, "word": " not", "probability": 0.9462890625}, {"start": 163.65, "end": 164.05, "word": " equal", "probability": 0.93017578125}, {"start": 164.05, "end": 164.51, "word": " Y2.", "probability": 0.85107421875}, {"start": 165.15, "end": 165.49, "word": " So", "probability": 0.93310546875}, {"start": 165.49, "end": 165.81, "word": " step", "probability": 0.8427734375}, {"start": 165.81, "end": 166.05, "word": " one,", "probability": 0.83203125}, {"start": 166.21, "end": 166.37, "word": " you", "probability": 0.95947265625}, {"start": 166.37, "end": 166.59, "word": " have", "probability": 0.94482421875}, {"start": 166.59, "end": 166.75, "word": " to", "probability": 0.96875}, {"start": 166.75, "end": 167.15, "word": " state", "probability": 0.9296875}, {"start": 167.15, "end": 167.85, "word": " the", "probability": 0.88330078125}, {"start": 167.85, "end": 168.37, "word": " appropriate", "probability": 0.82861328125}, {"start": 168.37, "end": 168.83, "word": " null", "probability": 0.9140625}, {"start": 168.83, "end": 169.05, "word": " and", "probability": 0.9189453125}, {"start": 169.05, "end": 169.55, "word": " alternative", "probability": 0.90771484375}, {"start": 169.55, "end": 170.93, "word": " hypothesis.", "probability": 0.75048828125}, {"start": 173.33, "end": 174.01, "word": " Second", "probability": 0.82470703125}, {"start": 174.01, "end": 174.49, "word": " step,", "probability": 0.91552734375}, {"start": 175.79, "end": 176.17, "word": " compute", "probability": 0.869140625}, {"start": 176.17, "end": 177.03, "word": " the", "probability": 0.91748046875}, {"start": 177.03, "end": 177.37, "word": " value", "probability": 0.98095703125}, {"start": 177.37, "end": 177.61, "word": " of", "probability": 0.966796875}, {"start": 177.61, "end": 177.79, "word": " the", "probability": 0.78759765625}, {"start": 177.79, "end": 178.15, "word": " test", "probability": 0.7265625}, {"start": 178.15, "end": 178.85, "word": " statistic.", "probability": 0.92333984375}, {"start": 179.23, "end": 179.53, "word": " In", "probability": 0.9609375}, {"start": 179.53, "end": 179.73, "word": " this", "probability": 0.94482421875}, {"start": 179.73, "end": 180.07, "word": " case,", "probability": 0.912109375}, {"start": 180.23, "end": 180.47, "word": " your", "probability": 0.86279296875}, {"start": 180.47, "end": 180.71, "word": " Z", "probability": 0.6455078125}, {"start": 180.71, "end": 181.11, "word": " statistic", "probability": 0.6962890625}, {"start": 181.11, "end": 181.51, "word": " should", "probability": 0.966796875}, {"start": 181.51, "end": 181.81, "word": " be", "probability": 0.95263671875}], "temperature": 1.0}, {"id": 8, "seek": 21277, "start": 184.39, "end": 212.77, "text": " P1 minus P2 minus Pi 1 minus Pi 2, under the square root of P dash 1 minus P dash times 1 over N1 plus 1 over N1. Now, P1 and P2 are given under the null hypothesis Pi 1 minus Pi 2 is 0. So here we have to compute P dash, which is the overall", "tokens": [430, 16, 3175, 430, 17, 3175, 17741, 502, 3175, 17741, 568, 11, 833, 264, 3732, 5593, 295, 430, 8240, 502, 3175, 430, 8240, 1413, 502, 670, 426, 16, 1804, 502, 670, 426, 16, 13, 823, 11, 430, 16, 293, 430, 17, 366, 2212, 833, 264, 18184, 17291, 17741, 502, 3175, 17741, 568, 307, 1958, 13, 407, 510, 321, 362, 281, 14722, 430, 8240, 11, 597, 307, 264, 4787], "avg_logprob": -0.21727808316548666, "compression_ratio": 1.6643835616438356, "no_speech_prob": 0.0, "words": [{"start": 184.39, "end": 184.77, "word": " P1", "probability": 0.5382080078125}, {"start": 184.77, "end": 185.13, "word": " minus", "probability": 0.7265625}, {"start": 185.13, "end": 185.53, "word": " P2", "probability": 0.969970703125}, {"start": 185.53, "end": 185.99, "word": " minus", "probability": 0.9814453125}, {"start": 185.99, "end": 186.33, "word": " Pi", "probability": 0.432373046875}, {"start": 186.33, "end": 186.55, "word": " 1", "probability": 0.69970703125}, {"start": 186.55, "end": 186.89, "word": " minus", "probability": 0.9873046875}, {"start": 186.89, "end": 187.11, "word": " Pi", "probability": 0.986328125}, {"start": 187.11, "end": 187.41, "word": " 2,", "probability": 0.99365234375}, {"start": 189.29, "end": 189.73, "word": " under", "probability": 0.6640625}, {"start": 189.73, "end": 189.91, "word": " the", "probability": 0.52880859375}, {"start": 189.91, "end": 190.15, "word": " square", "probability": 0.63720703125}, {"start": 190.15, "end": 190.41, "word": " root", "probability": 0.93408203125}, {"start": 190.41, "end": 190.89, "word": " of", "probability": 0.9697265625}, {"start": 190.89, "end": 191.11, "word": " P", "probability": 0.8447265625}, {"start": 191.11, "end": 191.47, "word": " dash", "probability": 0.73974609375}, {"start": 191.47, "end": 193.67, "word": " 1", "probability": 0.7626953125}, {"start": 193.67, "end": 194.05, "word": " minus", "probability": 0.9873046875}, {"start": 194.05, "end": 194.25, "word": " P", "probability": 0.95263671875}, {"start": 194.25, "end": 194.55, "word": " dash", "probability": 0.90234375}, {"start": 194.55, "end": 195.09, "word": " times", "probability": 0.87548828125}, {"start": 195.09, "end": 197.05, "word": " 1", "probability": 0.89697265625}, {"start": 197.05, "end": 197.31, "word": " over", "probability": 0.91015625}, {"start": 197.31, "end": 197.63, "word": " N1", "probability": 0.750244140625}, {"start": 197.63, "end": 197.91, "word": " plus", "probability": 0.9345703125}, {"start": 197.91, "end": 198.15, "word": " 1", "probability": 0.97509765625}, {"start": 198.15, "end": 198.35, "word": " over", "probability": 0.89501953125}, {"start": 198.35, "end": 198.77, "word": " N1.", "probability": 0.8662109375}, {"start": 200.49, "end": 201.11, "word": " Now,", "probability": 0.939453125}, {"start": 201.19, "end": 201.51, "word": " P1", "probability": 0.982177734375}, {"start": 201.51, "end": 201.67, "word": " and", "probability": 0.94677734375}, {"start": 201.67, "end": 201.99, "word": " P2", "probability": 0.996337890625}, {"start": 201.99, "end": 202.19, "word": " are", "probability": 0.94873046875}, {"start": 202.19, "end": 202.51, "word": " given", "probability": 0.90625}, {"start": 202.51, "end": 204.55, "word": " under", "probability": 0.50732421875}, {"start": 204.55, "end": 204.79, "word": " the", "probability": 0.861328125}, {"start": 204.79, "end": 204.93, "word": " null", "probability": 0.96044921875}, {"start": 204.93, "end": 205.47, "word": " hypothesis", "probability": 0.84423828125}, {"start": 205.47, "end": 206.49, "word": " Pi", "probability": 0.4619140625}, {"start": 206.49, "end": 206.71, "word": " 1", "probability": 0.98046875}, {"start": 206.71, "end": 207.07, "word": " minus", "probability": 0.986328125}, {"start": 207.07, "end": 207.29, "word": " Pi", "probability": 0.98095703125}, {"start": 207.29, "end": 207.49, "word": " 2", "probability": 0.99072265625}, {"start": 207.49, "end": 207.73, "word": " is", "probability": 0.947265625}, {"start": 207.73, "end": 208.03, "word": " 0.", "probability": 0.6669921875}, {"start": 209.19, "end": 209.57, "word": " So", "probability": 0.947265625}, {"start": 209.57, "end": 209.73, "word": " here", "probability": 0.56591796875}, {"start": 209.73, "end": 209.83, "word": " we", "probability": 0.611328125}, {"start": 209.83, "end": 209.97, "word": " have", "probability": 0.94287109375}, {"start": 209.97, "end": 210.09, "word": " to", "probability": 0.96826171875}, {"start": 210.09, "end": 210.61, "word": " compute", "probability": 0.908203125}, {"start": 210.61, "end": 211.27, "word": " P", "probability": 0.7529296875}, {"start": 211.27, "end": 211.61, "word": " dash,", "probability": 0.92431640625}, {"start": 211.79, "end": 211.97, "word": " which", "probability": 0.951171875}, {"start": 211.97, "end": 212.11, "word": " is", "probability": 0.9501953125}, {"start": 212.11, "end": 212.31, "word": " the", "probability": 0.9208984375}, {"start": 212.31, "end": 212.77, "word": " overall", "probability": 0.8759765625}], "temperature": 1.0}, {"id": 9, "seek": 24321, "start": 215.35, "end": 243.21, "text": " B dash equals x1 plus x2 divided by n1 plus n2. Now these x's, I mean the number of successes are not given directly in this problem, but we can figure out the values of x1 and x2 by using this information, which is n1 equals 150 and b1 equals 66%. Because we know that b1 equals x1 over n1.", "tokens": [363, 8240, 6915, 2031, 16, 1804, 2031, 17, 6666, 538, 297, 16, 1804, 297, 17, 13, 823, 613, 2031, 311, 11, 286, 914, 264, 1230, 295, 26101, 366, 406, 2212, 3838, 294, 341, 1154, 11, 457, 321, 393, 2573, 484, 264, 4190, 295, 2031, 16, 293, 2031, 17, 538, 1228, 341, 1589, 11, 597, 307, 297, 16, 6915, 8451, 293, 272, 16, 6915, 21126, 6856, 1436, 321, 458, 300, 272, 16, 6915, 2031, 16, 670, 297, 16, 13], "avg_logprob": -0.1968947743313222, "compression_ratio": 1.4527363184079602, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 215.35, "end": 216.03, "word": " B", "probability": 0.181396484375}, {"start": 216.03, "end": 216.37, "word": " dash", "probability": 0.465087890625}, {"start": 216.37, "end": 216.75, "word": " equals", "probability": 0.7568359375}, {"start": 216.75, "end": 217.33, "word": " x1", "probability": 0.816162109375}, {"start": 217.33, "end": 217.61, "word": " plus", "probability": 0.904296875}, {"start": 217.61, "end": 218.17, "word": " x2", "probability": 0.994140625}, {"start": 218.17, "end": 219.47, "word": " divided", "probability": 0.5205078125}, {"start": 219.47, "end": 219.71, "word": " by", "probability": 0.9697265625}, {"start": 219.71, "end": 220.05, "word": " n1", "probability": 0.879638671875}, {"start": 220.05, "end": 220.35, "word": " plus", "probability": 0.94921875}, {"start": 220.35, "end": 220.71, "word": " n2.", "probability": 0.846435546875}, {"start": 222.17, "end": 222.79, "word": " Now", "probability": 0.89990234375}, {"start": 222.79, "end": 223.19, "word": " these", "probability": 0.380615234375}, {"start": 223.19, "end": 223.67, "word": " x's,", "probability": 0.880859375}, {"start": 223.75, "end": 223.85, "word": " I", "probability": 0.93359375}, {"start": 223.85, "end": 223.95, "word": " mean", "probability": 0.97021484375}, {"start": 223.95, "end": 224.07, "word": " the", "probability": 0.669921875}, {"start": 224.07, "end": 224.29, "word": " number", "probability": 0.91650390625}, {"start": 224.29, "end": 224.45, "word": " of", "probability": 0.96923828125}, {"start": 224.45, "end": 224.81, "word": " successes", "probability": 0.4541015625}, {"start": 224.81, "end": 225.15, "word": " are", "probability": 0.80126953125}, {"start": 225.15, "end": 225.35, "word": " not", "probability": 0.94287109375}, {"start": 225.35, "end": 225.67, "word": " given", "probability": 0.84423828125}, {"start": 225.67, "end": 226.65, "word": " directly", "probability": 0.85498046875}, {"start": 226.65, "end": 227.47, "word": " in", "probability": 0.8671875}, {"start": 227.47, "end": 227.79, "word": " this", "probability": 0.94677734375}, {"start": 227.79, "end": 228.77, "word": " problem,", "probability": 0.88134765625}, {"start": 228.87, "end": 229.01, "word": " but", "probability": 0.9267578125}, {"start": 229.01, "end": 229.17, "word": " we", "probability": 0.95654296875}, {"start": 229.17, "end": 229.45, "word": " can", "probability": 0.9443359375}, {"start": 229.45, "end": 230.17, "word": " figure", "probability": 0.96630859375}, {"start": 230.17, "end": 230.41, "word": " out", "probability": 0.8818359375}, {"start": 230.41, "end": 230.57, "word": " the", "probability": 0.9111328125}, {"start": 230.57, "end": 230.91, "word": " values", "probability": 0.880859375}, {"start": 230.91, "end": 231.01, "word": " of", "probability": 0.94189453125}, {"start": 231.01, "end": 231.31, "word": " x1", "probability": 0.990478515625}, {"start": 231.31, "end": 231.43, "word": " and", "probability": 0.94140625}, {"start": 231.43, "end": 231.87, "word": " x2", "probability": 0.998779296875}, {"start": 231.87, "end": 232.97, "word": " by", "probability": 0.849609375}, {"start": 232.97, "end": 233.53, "word": " using", "probability": 0.92236328125}, {"start": 233.53, "end": 234.05, "word": " this", "probability": 0.94775390625}, {"start": 234.05, "end": 234.55, "word": " information,", "probability": 0.8388671875}, {"start": 234.97, "end": 235.45, "word": " which", "probability": 0.9541015625}, {"start": 235.45, "end": 235.65, "word": " is", "probability": 0.9482421875}, {"start": 235.65, "end": 236.09, "word": " n1", "probability": 0.957275390625}, {"start": 236.09, "end": 236.35, "word": " equals", "probability": 0.7119140625}, {"start": 236.35, "end": 236.81, "word": " 150", "probability": 0.85498046875}, {"start": 236.81, "end": 237.93, "word": " and", "probability": 0.59716796875}, {"start": 237.93, "end": 238.31, "word": " b1", "probability": 0.739501953125}, {"start": 238.31, "end": 238.61, "word": " equals", "probability": 0.92529296875}, {"start": 238.61, "end": 239.43, "word": " 66%.", "probability": 0.5802001953125}, {"start": 239.43, "end": 240.07, "word": " Because", "probability": 0.9140625}, {"start": 240.07, "end": 240.99, "word": " we", "probability": 0.8857421875}, {"start": 240.99, "end": 241.19, "word": " know", "probability": 0.88134765625}, {"start": 241.19, "end": 241.43, "word": " that", "probability": 0.9404296875}, {"start": 241.43, "end": 241.81, "word": " b1", "probability": 0.935546875}, {"start": 241.81, "end": 242.15, "word": " equals", "probability": 0.9345703125}, {"start": 242.15, "end": 242.59, "word": " x1", "probability": 0.99267578125}, {"start": 242.59, "end": 242.79, "word": " over", "probability": 0.90576171875}, {"start": 242.79, "end": 243.21, "word": " n1.", "probability": 0.993408203125}], "temperature": 1.0}, {"id": 10, "seek": 27594, "start": 246.86, "end": 275.94, "text": " So, by using this equation, X1 equals N1 times V1. N1 150 times 66 percent, that will give 150 times 66, so that's 99. So 150 times, it's 99.", "tokens": [407, 11, 538, 1228, 341, 5367, 11, 1783, 16, 6915, 426, 16, 1413, 691, 16, 13, 426, 16, 8451, 1413, 21126, 3043, 11, 300, 486, 976, 8451, 1413, 21126, 11, 370, 300, 311, 11803, 13, 407, 8451, 1413, 11, 309, 311, 11803, 13], "avg_logprob": -0.23899148201400583, "compression_ratio": 1.3148148148148149, "no_speech_prob": 0.0, "words": [{"start": 246.86, "end": 247.52, "word": " So,", "probability": 0.8583984375}, {"start": 247.86, "end": 248.54, "word": " by", "probability": 0.96337890625}, {"start": 248.54, "end": 248.84, "word": " using", "probability": 0.93115234375}, {"start": 248.84, "end": 249.14, "word": " this", "probability": 0.94775390625}, {"start": 249.14, "end": 249.76, "word": " equation,", "probability": 0.97216796875}, {"start": 250.48, "end": 251.26, "word": " X1", "probability": 0.646484375}, {"start": 251.26, "end": 253.06, "word": " equals", "probability": 0.7275390625}, {"start": 253.06, "end": 253.48, "word": " N1", "probability": 0.9345703125}, {"start": 253.48, "end": 253.9, "word": " times", "probability": 0.90966796875}, {"start": 253.9, "end": 254.36, "word": " V1.", "probability": 0.814453125}, {"start": 256.92, "end": 258.16, "word": " N1", "probability": 0.964599609375}, {"start": 258.16, "end": 259.38, "word": " 150", "probability": 0.33447265625}, {"start": 259.38, "end": 262.1, "word": " times", "probability": 0.85986328125}, {"start": 262.1, "end": 263.08, "word": " 66", "probability": 0.90771484375}, {"start": 263.08, "end": 263.78, "word": " percent,", "probability": 0.430419921875}, {"start": 264.12, "end": 264.4, "word": " that", "probability": 0.93212890625}, {"start": 264.4, "end": 264.58, "word": " will", "probability": 0.865234375}, {"start": 264.58, "end": 264.9, "word": " give", "probability": 0.763671875}, {"start": 264.9, "end": 267.4, "word": " 150", "probability": 0.7724609375}, {"start": 267.4, "end": 268.1, "word": " times", "probability": 0.92529296875}, {"start": 268.1, "end": 268.68, "word": " 66,", "probability": 0.91650390625}, {"start": 269.54, "end": 270.16, "word": " so", "probability": 0.84814453125}, {"start": 270.16, "end": 270.52, "word": " that's", "probability": 0.9306640625}, {"start": 270.52, "end": 270.98, "word": " 99.", "probability": 0.9443359375}, {"start": 272.28, "end": 272.6, "word": " So", "probability": 0.70751953125}, {"start": 272.6, "end": 273.08, "word": " 150", "probability": 0.6162109375}, {"start": 273.08, "end": 273.92, "word": " times,", "probability": 0.93701171875}, {"start": 275.26, "end": 275.7, "word": " it's", "probability": 0.9228515625}, {"start": 275.7, "end": 275.94, "word": " 99.", "probability": 0.95849609375}], "temperature": 1.0}, {"id": 11, "seek": 30607, "start": 281.69, "end": 306.07, "text": " Similarly, X2 equals N2 times V2. N2 is given by 160, so 160 times 60 percent, 96. So the number of successes are 96 for the second, for the previous. Nine nine.", "tokens": [13157, 11, 1783, 17, 6915, 426, 17, 1413, 691, 17, 13, 426, 17, 307, 2212, 538, 21243, 11, 370, 21243, 1413, 4060, 3043, 11, 24124, 13, 407, 264, 1230, 295, 26101, 366, 24124, 337, 264, 1150, 11, 337, 264, 3894, 13, 18939, 4949, 13], "avg_logprob": -0.28142360316382514, "compression_ratio": 1.255813953488372, "no_speech_prob": 0.0, "words": [{"start": 281.69, "end": 282.51, "word": " Similarly,", "probability": 0.80322265625}, {"start": 283.21, "end": 283.99, "word": " X2", "probability": 0.6822509765625}, {"start": 283.99, "end": 286.05, "word": " equals", "probability": 0.8935546875}, {"start": 286.05, "end": 286.47, "word": " N2", "probability": 0.97412109375}, {"start": 286.47, "end": 286.89, "word": " times", "probability": 0.919921875}, {"start": 286.89, "end": 287.27, "word": " V2.", "probability": 0.862060546875}, {"start": 288.33, "end": 289.15, "word": " N2", "probability": 0.984619140625}, {"start": 289.15, "end": 289.29, "word": " is", "probability": 0.9384765625}, {"start": 289.29, "end": 289.47, "word": " given", "probability": 0.88037109375}, {"start": 289.47, "end": 289.67, "word": " by", "probability": 0.9697265625}, {"start": 289.67, "end": 290.55, "word": " 160,", "probability": 0.5927734375}, {"start": 290.83, "end": 290.95, "word": " so", "probability": 0.931640625}, {"start": 290.95, "end": 291.43, "word": " 160", "probability": 0.85986328125}, {"start": 291.43, "end": 291.81, "word": " times", "probability": 0.93017578125}, {"start": 291.81, "end": 292.25, "word": " 60", "probability": 0.65966796875}, {"start": 292.25, "end": 293.77, "word": " percent,", "probability": 0.384521484375}, {"start": 295.75, "end": 296.09, "word": " 96.", "probability": 0.62548828125}, {"start": 298.67, "end": 299.49, "word": " So", "probability": 0.93408203125}, {"start": 299.49, "end": 299.65, "word": " the", "probability": 0.44873046875}, {"start": 299.65, "end": 299.87, "word": " number", "probability": 0.93212890625}, {"start": 299.87, "end": 300.03, "word": " of", "probability": 0.9765625}, {"start": 300.03, "end": 300.37, "word": " successes", "probability": 0.54541015625}, {"start": 300.37, "end": 300.87, "word": " are", "probability": 0.873046875}, {"start": 300.87, "end": 301.31, "word": " 96", "probability": 0.958984375}, {"start": 301.31, "end": 302.33, "word": " for", "probability": 0.86083984375}, {"start": 302.33, "end": 302.55, "word": " the", "probability": 0.91259765625}, {"start": 302.55, "end": 302.85, "word": " second,", "probability": 0.8505859375}, {"start": 303.01, "end": 303.13, "word": " for", "probability": 0.94287109375}, {"start": 303.13, "end": 303.25, "word": " the", "probability": 0.92529296875}, {"start": 303.25, "end": 303.59, "word": " previous.", "probability": 0.86328125}, {"start": 305.33, "end": 305.67, "word": " Nine", "probability": 0.369873046875}, {"start": 305.67, "end": 306.07, "word": " nine.", "probability": 0.4140625}], "temperature": 1.0}, {"id": 12, "seek": 33551, "start": 311.27, "end": 335.51, "text": " So B dash equals x199 plus 96 divided by n1 plus n2, 350. And that will give the overall proportions divided by 310, 0.629.", "tokens": [407, 363, 8240, 6915, 2031, 3405, 24, 1804, 24124, 6666, 538, 297, 16, 1804, 297, 17, 11, 18065, 13, 400, 300, 486, 976, 264, 4787, 32482, 6666, 538, 805, 3279, 11, 1958, 13, 21, 11871, 13], "avg_logprob": -0.2453547361734751, "compression_ratio": 1.1272727272727272, "no_speech_prob": 0.0, "words": [{"start": 311.27, "end": 311.51, "word": " So", "probability": 0.89208984375}, {"start": 311.51, "end": 311.77, "word": " B", "probability": 0.5078125}, {"start": 311.77, "end": 312.05, "word": " dash", "probability": 0.7314453125}, {"start": 312.05, "end": 312.59, "word": " equals", "probability": 0.74267578125}, {"start": 312.59, "end": 316.41, "word": " x199", "probability": 0.669677734375}, {"start": 316.41, "end": 318.27, "word": " plus", "probability": 0.8896484375}, {"start": 318.27, "end": 319.01, "word": " 96", "probability": 0.96484375}, {"start": 319.01, "end": 320.65, "word": " divided", "probability": 0.62255859375}, {"start": 320.65, "end": 321.13, "word": " by", "probability": 0.97412109375}, {"start": 321.13, "end": 322.75, "word": " n1", "probability": 0.82861328125}, {"start": 322.75, "end": 323.05, "word": " plus", "probability": 0.95751953125}, {"start": 323.05, "end": 323.59, "word": " n2,", "probability": 0.991455078125}, {"start": 324.59, "end": 325.13, "word": " 350.", "probability": 0.78857421875}, {"start": 327.31, "end": 327.97, "word": " And", "probability": 0.9384765625}, {"start": 327.97, "end": 328.17, "word": " that", "probability": 0.9326171875}, {"start": 328.17, "end": 328.33, "word": " will", "probability": 0.8515625}, {"start": 328.33, "end": 328.53, "word": " give", "probability": 0.85791015625}, {"start": 328.53, "end": 328.81, "word": " the", "probability": 0.91552734375}, {"start": 328.81, "end": 329.31, "word": " overall", "probability": 0.8369140625}, {"start": 329.31, "end": 330.19, "word": " proportions", "probability": 0.471923828125}, {"start": 330.19, "end": 332.81, "word": " divided", "probability": 0.6279296875}, {"start": 332.81, "end": 333.03, "word": " by", "probability": 0.97705078125}, {"start": 333.03, "end": 333.63, "word": " 310,", "probability": 0.6607666015625}, {"start": 334.71, "end": 334.85, "word": " 0", "probability": 0.6640625}, {"start": 334.85, "end": 335.51, "word": ".629.", "probability": 0.9807942708333334}], "temperature": 1.0}, {"id": 13, "seek": 36055, "start": 340.87, "end": 360.55, "text": " So, this is the value of the overall proportion. Now, B dash equals 1.629. So, 1 times 1 minus B dash is 1 minus this value times 1 over N1, 1 over 150 plus 1 over 160. Simple calculation will give", "tokens": [407, 11, 341, 307, 264, 2158, 295, 264, 4787, 16068, 13, 823, 11, 363, 8240, 6915, 502, 13, 21, 11871, 13, 407, 11, 502, 1413, 502, 3175, 363, 8240, 307, 502, 3175, 341, 2158, 1413, 502, 670, 426, 16, 11, 502, 670, 8451, 1804, 502, 670, 21243, 13, 21532, 17108, 486, 976], "avg_logprob": -0.25073704179727807, "compression_ratio": 1.4244604316546763, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 340.87, "end": 341.19, "word": " So,", "probability": 0.77685546875}, {"start": 341.39, "end": 341.41, "word": " this", "probability": 0.90380859375}, {"start": 341.41, "end": 341.51, "word": " is", "probability": 0.9384765625}, {"start": 341.51, "end": 341.63, "word": " the", "probability": 0.9052734375}, {"start": 341.63, "end": 341.97, "word": " value", "probability": 0.9716796875}, {"start": 341.97, "end": 342.35, "word": " of", "probability": 0.96142578125}, {"start": 342.35, "end": 342.71, "word": " the", "probability": 0.90185546875}, {"start": 342.71, "end": 343.89, "word": " overall", "probability": 0.783203125}, {"start": 343.89, "end": 344.57, "word": " proportion.", "probability": 0.77490234375}, {"start": 345.39, "end": 345.55, "word": " Now,", "probability": 0.50634765625}, {"start": 345.61, "end": 345.71, "word": " B", "probability": 0.324951171875}, {"start": 345.71, "end": 346.13, "word": " dash", "probability": 0.54052734375}, {"start": 346.13, "end": 348.17, "word": " equals", "probability": 0.80419921875}, {"start": 348.17, "end": 348.45, "word": " 1", "probability": 0.2529296875}, {"start": 348.45, "end": 348.95, "word": ".629.", "probability": 0.745361328125}, {"start": 349.15, "end": 349.31, "word": " So,", "probability": 0.94677734375}, {"start": 349.37, "end": 349.53, "word": " 1", "probability": 0.84912109375}, {"start": 349.53, "end": 349.83, "word": " times", "probability": 0.8681640625}, {"start": 349.83, "end": 350.11, "word": " 1", "probability": 0.955078125}, {"start": 350.11, "end": 350.43, "word": " minus", "probability": 0.97998046875}, {"start": 350.43, "end": 350.65, "word": " B", "probability": 0.9814453125}, {"start": 350.65, "end": 350.95, "word": " dash", "probability": 0.8818359375}, {"start": 350.95, "end": 352.13, "word": " is", "probability": 0.83203125}, {"start": 352.13, "end": 352.29, "word": " 1", "probability": 0.96533203125}, {"start": 352.29, "end": 352.57, "word": " minus", "probability": 0.97412109375}, {"start": 352.57, "end": 352.81, "word": " this", "probability": 0.94482421875}, {"start": 352.81, "end": 353.15, "word": " value", "probability": 0.97216796875}, {"start": 353.15, "end": 353.47, "word": " times", "probability": 0.71875}, {"start": 353.47, "end": 353.73, "word": " 1", "probability": 0.96142578125}, {"start": 353.73, "end": 353.95, "word": " over", "probability": 0.89794921875}, {"start": 353.95, "end": 354.29, "word": " N1,", "probability": 0.803955078125}, {"start": 354.39, "end": 354.55, "word": " 1", "probability": 0.9375}, {"start": 354.55, "end": 354.97, "word": " over", "probability": 0.8955078125}, {"start": 354.97, "end": 355.89, "word": " 150", "probability": 0.849609375}, {"start": 355.89, "end": 356.89, "word": " plus", "probability": 0.7548828125}, {"start": 356.89, "end": 357.21, "word": " 1", "probability": 0.9921875}, {"start": 357.21, "end": 357.49, "word": " over", "probability": 0.92138671875}, {"start": 357.49, "end": 358.07, "word": " 160.", "probability": 0.93701171875}, {"start": 358.75, "end": 359.15, "word": " Simple", "probability": 0.9365234375}, {"start": 359.15, "end": 359.75, "word": " calculation", "probability": 0.92529296875}, {"start": 359.75, "end": 360.23, "word": " will", "probability": 0.8798828125}, {"start": 360.23, "end": 360.55, "word": " give", "probability": 0.88671875}], "temperature": 1.0}, {"id": 14, "seek": 38778, "start": 361.46, "end": 387.78, "text": " The value of z, which is in this case 1.093. So just plug this information into this equation, you will get z value, which is 1.093. He asked to do this problem by using two approaches, critical value and b value. Let's start with the first one, b value approach.", "tokens": [440, 2158, 295, 710, 11, 597, 307, 294, 341, 1389, 502, 13, 13811, 18, 13, 407, 445, 5452, 341, 1589, 666, 341, 5367, 11, 291, 486, 483, 710, 2158, 11, 597, 307, 502, 13, 13811, 18, 13, 634, 2351, 281, 360, 341, 1154, 538, 1228, 732, 11587, 11, 4924, 2158, 293, 272, 2158, 13, 961, 311, 722, 365, 264, 700, 472, 11, 272, 2158, 3109, 13], "avg_logprob": -0.14482276252846218, "compression_ratio": 1.4915254237288136, "no_speech_prob": 0.0, "words": [{"start": 361.46, "end": 361.68, "word": " The", "probability": 0.412841796875}, {"start": 361.68, "end": 362.0, "word": " value", "probability": 0.96728515625}, {"start": 362.0, "end": 362.2, "word": " of", "probability": 0.9501953125}, {"start": 362.2, "end": 362.38, "word": " z,", "probability": 0.5986328125}, {"start": 362.7, "end": 363.18, "word": " which", "probability": 0.94775390625}, {"start": 363.18, "end": 363.52, "word": " is", "probability": 0.9248046875}, {"start": 363.52, "end": 363.8, "word": " in", "probability": 0.71826171875}, {"start": 363.8, "end": 364.04, "word": " this", "probability": 0.94970703125}, {"start": 364.04, "end": 364.38, "word": " case", "probability": 0.9208984375}, {"start": 364.38, "end": 364.66, "word": " 1", "probability": 0.73388671875}, {"start": 364.66, "end": 367.28, "word": ".093.", "probability": 0.9625651041666666}, {"start": 367.78, "end": 368.1, "word": " So", "probability": 0.94482421875}, {"start": 368.1, "end": 368.36, "word": " just", "probability": 0.73291015625}, {"start": 368.36, "end": 368.66, "word": " plug", "probability": 0.771484375}, {"start": 368.66, "end": 369.0, "word": " this", "probability": 0.9072265625}, {"start": 369.0, "end": 369.56, "word": " information", "probability": 0.8388671875}, {"start": 369.56, "end": 369.88, "word": " into", "probability": 0.83349609375}, {"start": 369.88, "end": 370.12, "word": " this", "probability": 0.94580078125}, {"start": 370.12, "end": 370.62, "word": " equation,", "probability": 0.9814453125}, {"start": 371.34, "end": 371.5, "word": " you", "probability": 0.9326171875}, {"start": 371.5, "end": 371.68, "word": " will", "probability": 0.89794921875}, {"start": 371.68, "end": 372.42, "word": " get", "probability": 0.9453125}, {"start": 372.42, "end": 374.12, "word": " z", "probability": 0.873046875}, {"start": 374.12, "end": 374.86, "word": " value,", "probability": 0.95556640625}, {"start": 374.9, "end": 375.1, "word": " which", "probability": 0.95068359375}, {"start": 375.1, "end": 375.26, "word": " is", "probability": 0.94580078125}, {"start": 375.26, "end": 375.5, "word": " 1", "probability": 0.9951171875}, {"start": 375.5, "end": 376.5, "word": ".093.", "probability": 0.9830729166666666}, {"start": 378.02, "end": 378.66, "word": " He", "probability": 0.9404296875}, {"start": 378.66, "end": 379.0, "word": " asked", "probability": 0.7939453125}, {"start": 379.0, "end": 379.32, "word": " to", "probability": 0.94970703125}, {"start": 379.32, "end": 379.46, "word": " do", "probability": 0.95556640625}, {"start": 379.46, "end": 379.64, "word": " this", "probability": 0.9482421875}, {"start": 379.64, "end": 379.98, "word": " problem", "probability": 0.8876953125}, {"start": 379.98, "end": 380.22, "word": " by", "probability": 0.96142578125}, {"start": 380.22, "end": 380.48, "word": " using", "probability": 0.93798828125}, {"start": 380.48, "end": 380.68, "word": " two", "probability": 0.8916015625}, {"start": 380.68, "end": 381.12, "word": " approaches,", "probability": 0.76708984375}, {"start": 381.44, "end": 381.78, "word": " critical", "probability": 0.87646484375}, {"start": 381.78, "end": 382.3, "word": " value", "probability": 0.96630859375}, {"start": 382.3, "end": 382.7, "word": " and", "probability": 0.91552734375}, {"start": 382.7, "end": 382.86, "word": " b", "probability": 0.458740234375}, {"start": 382.86, "end": 383.12, "word": " value.", "probability": 0.84765625}, {"start": 383.24, "end": 383.48, "word": " Let's", "probability": 0.959716796875}, {"start": 383.48, "end": 383.82, "word": " start", "probability": 0.91845703125}, {"start": 383.82, "end": 384.08, "word": " with", "probability": 0.89990234375}, {"start": 384.08, "end": 384.44, "word": " the", "probability": 0.9091796875}, {"start": 384.44, "end": 384.84, "word": " first", "probability": 0.88818359375}, {"start": 384.84, "end": 385.18, "word": " one,", "probability": 0.92578125}, {"start": 386.78, "end": 387.0, "word": " b", "probability": 0.8818359375}, {"start": 387.0, "end": 387.28, "word": " value", "probability": 0.92431640625}, {"start": 387.28, "end": 387.78, "word": " approach.", "probability": 0.8759765625}], "temperature": 1.0}, {"id": 15, "seek": 41699, "start": 392.71, "end": 416.99, "text": " Now your B value or critical value, start with critical value. Now since we are taking about a two-sided test, so there are two critical values which are plus or minus Z alpha over. Alpha is given by five percent, so in this case", "tokens": [823, 428, 363, 2158, 420, 4924, 2158, 11, 722, 365, 4924, 2158, 13, 823, 1670, 321, 366, 1940, 466, 257, 732, 12, 30941, 1500, 11, 370, 456, 366, 732, 4924, 4190, 597, 366, 1804, 420, 3175, 1176, 8961, 670, 13, 20588, 307, 2212, 538, 1732, 3043, 11, 370, 294, 341, 1389], "avg_logprob": -0.24849760417754835, "compression_ratio": 1.5436241610738255, "no_speech_prob": 0.0, "words": [{"start": 392.71, "end": 392.99, "word": " Now", "probability": 0.8466796875}, {"start": 392.99, "end": 393.25, "word": " your", "probability": 0.630859375}, {"start": 393.25, "end": 393.43, "word": " B", "probability": 0.4853515625}, {"start": 393.43, "end": 393.77, "word": " value", "probability": 0.74853515625}, {"start": 393.77, "end": 394.39, "word": " or", "probability": 0.8017578125}, {"start": 394.39, "end": 394.83, "word": " critical", "probability": 0.833984375}, {"start": 394.83, "end": 395.27, "word": " value,", "probability": 0.97607421875}, {"start": 395.55, "end": 396.09, "word": " start", "probability": 0.89501953125}, {"start": 396.09, "end": 396.33, "word": " with", "probability": 0.8984375}, {"start": 396.33, "end": 396.75, "word": " critical", "probability": 0.78466796875}, {"start": 396.75, "end": 397.05, "word": " value.", "probability": 0.78955078125}, {"start": 400.85, "end": 401.47, "word": " Now", "probability": 0.8759765625}, {"start": 401.47, "end": 401.73, "word": " since", "probability": 0.65380859375}, {"start": 401.73, "end": 401.87, "word": " we", "probability": 0.93603515625}, {"start": 401.87, "end": 402.01, "word": " are", "probability": 0.9130859375}, {"start": 402.01, "end": 402.31, "word": " taking", "probability": 0.7646484375}, {"start": 402.31, "end": 403.15, "word": " about", "probability": 0.89697265625}, {"start": 403.15, "end": 405.23, "word": " a", "probability": 0.34765625}, {"start": 405.23, "end": 405.41, "word": " two", "probability": 0.90869140625}, {"start": 405.41, "end": 405.67, "word": "-sided", "probability": 0.746337890625}, {"start": 405.67, "end": 406.13, "word": " test,", "probability": 0.8642578125}, {"start": 406.35, "end": 406.49, "word": " so", "probability": 0.8837890625}, {"start": 406.49, "end": 406.65, "word": " there", "probability": 0.904296875}, {"start": 406.65, "end": 406.79, "word": " are", "probability": 0.943359375}, {"start": 406.79, "end": 407.01, "word": " two", "probability": 0.931640625}, {"start": 407.01, "end": 407.45, "word": " critical", "probability": 0.93505859375}, {"start": 407.45, "end": 407.97, "word": " values", "probability": 0.97119140625}, {"start": 407.97, "end": 408.29, "word": " which", "probability": 0.58251953125}, {"start": 408.29, "end": 408.79, "word": " are", "probability": 0.9501953125}, {"start": 408.79, "end": 409.87, "word": " plus", "probability": 0.865234375}, {"start": 409.87, "end": 410.17, "word": " or", "probability": 0.958984375}, {"start": 410.17, "end": 410.59, "word": " minus", "probability": 0.98681640625}, {"start": 410.59, "end": 410.87, "word": " Z", "probability": 0.57861328125}, {"start": 410.87, "end": 411.15, "word": " alpha", "probability": 0.81298828125}, {"start": 411.15, "end": 411.45, "word": " over.", "probability": 0.82763671875}, {"start": 412.33, "end": 412.77, "word": " Alpha", "probability": 0.98046875}, {"start": 412.77, "end": 413.01, "word": " is", "probability": 0.958984375}, {"start": 413.01, "end": 413.35, "word": " given", "probability": 0.89990234375}, {"start": 413.35, "end": 414.09, "word": " by", "probability": 0.96826171875}, {"start": 414.09, "end": 414.67, "word": " five", "probability": 0.406005859375}, {"start": 414.67, "end": 415.23, "word": " percent,", "probability": 0.9599609375}, {"start": 416.07, "end": 416.25, "word": " so", "probability": 0.93017578125}, {"start": 416.25, "end": 416.37, "word": " in", "probability": 0.931640625}, {"start": 416.37, "end": 416.57, "word": " this", "probability": 0.9501953125}, {"start": 416.57, "end": 416.99, "word": " case", "probability": 0.91357421875}], "temperature": 1.0}, {"id": 16, "seek": 44813, "start": 419.63, "end": 448.13, "text": " is equal to plus or minus 1.96. Now, does this value, I mean does the value of this statistic which is 1.093 fall in the critical region? Now, my critical regions are above 196 or below negative 1.96. Now this value actually falls", "tokens": [307, 2681, 281, 1804, 420, 3175, 502, 13, 22962, 13, 823, 11, 775, 341, 2158, 11, 286, 914, 775, 264, 2158, 295, 341, 29588, 597, 307, 502, 13, 13811, 18, 2100, 294, 264, 4924, 4458, 30, 823, 11, 452, 4924, 10682, 366, 3673, 7998, 420, 2507, 3671, 502, 13, 22962, 13, 823, 341, 2158, 767, 8804], "avg_logprob": -0.2020285066805388, "compression_ratio": 1.5098039215686274, "no_speech_prob": 0.0, "words": [{"start": 419.63, "end": 419.71, "word": " is", "probability": 0.0758056640625}, {"start": 419.71, "end": 420.05, "word": " equal", "probability": 0.7587890625}, {"start": 420.05, "end": 420.37, "word": " to", "probability": 0.72705078125}, {"start": 420.37, "end": 420.75, "word": " plus", "probability": 0.84130859375}, {"start": 420.75, "end": 421.05, "word": " or", "probability": 0.94921875}, {"start": 421.05, "end": 421.61, "word": " minus", "probability": 0.9853515625}, {"start": 421.61, "end": 422.23, "word": " 1", "probability": 0.91015625}, {"start": 422.23, "end": 423.37, "word": ".96.", "probability": 0.96435546875}, {"start": 425.93, "end": 426.55, "word": " Now,", "probability": 0.8564453125}, {"start": 427.47, "end": 427.83, "word": " does", "probability": 0.9375}, {"start": 427.83, "end": 428.11, "word": " this", "probability": 0.93212890625}, {"start": 428.11, "end": 428.53, "word": " value,", "probability": 0.98193359375}, {"start": 428.79, "end": 428.91, "word": " I", "probability": 0.90234375}, {"start": 428.91, "end": 429.05, "word": " mean", "probability": 0.9697265625}, {"start": 429.05, "end": 429.35, "word": " does", "probability": 0.62841796875}, {"start": 429.35, "end": 429.55, "word": " the", "probability": 0.83935546875}, {"start": 429.55, "end": 429.83, "word": " value", "probability": 0.98388671875}, {"start": 429.83, "end": 430.01, "word": " of", "probability": 0.94580078125}, {"start": 430.01, "end": 430.17, "word": " this", "probability": 0.46484375}, {"start": 430.17, "end": 430.79, "word": " statistic", "probability": 0.60400390625}, {"start": 430.79, "end": 431.17, "word": " which", "probability": 0.6396484375}, {"start": 431.17, "end": 431.33, "word": " is", "probability": 0.94775390625}, {"start": 431.33, "end": 431.59, "word": " 1", "probability": 0.98291015625}, {"start": 431.59, "end": 433.03, "word": ".093", "probability": 0.9705403645833334}, {"start": 433.03, "end": 434.21, "word": " fall", "probability": 0.58642578125}, {"start": 434.21, "end": 434.43, "word": " in", "probability": 0.9248046875}, {"start": 434.43, "end": 434.59, "word": " the", "probability": 0.91064453125}, {"start": 434.59, "end": 434.91, "word": " critical", "probability": 0.94677734375}, {"start": 434.91, "end": 435.31, "word": " region?", "probability": 0.93115234375}, {"start": 437.41, "end": 438.17, "word": " Now,", "probability": 0.890625}, {"start": 438.37, "end": 438.67, "word": " my", "probability": 0.9716796875}, {"start": 438.67, "end": 439.07, "word": " critical", "probability": 0.9443359375}, {"start": 439.07, "end": 439.71, "word": " regions", "probability": 0.97265625}, {"start": 439.71, "end": 440.43, "word": " are", "probability": 0.93994140625}, {"start": 440.43, "end": 441.53, "word": " above", "probability": 0.95361328125}, {"start": 441.53, "end": 442.09, "word": " 196", "probability": 0.7763671875}, {"start": 442.09, "end": 442.73, "word": " or", "probability": 0.92578125}, {"start": 442.73, "end": 443.13, "word": " below", "probability": 0.89892578125}, {"start": 443.13, "end": 443.83, "word": " negative", "probability": 0.66796875}, {"start": 443.83, "end": 444.09, "word": " 1", "probability": 0.9814453125}, {"start": 444.09, "end": 444.67, "word": ".96.", "probability": 0.97998046875}, {"start": 445.33, "end": 445.49, "word": " Now", "probability": 0.84375}, {"start": 445.49, "end": 445.77, "word": " this", "probability": 0.53076171875}, {"start": 445.77, "end": 446.09, "word": " value", "probability": 0.97021484375}, {"start": 446.09, "end": 446.61, "word": " actually", "probability": 0.888671875}, {"start": 446.61, "end": 448.13, "word": " falls", "probability": 0.83056640625}], "temperature": 1.0}, {"id": 17, "seek": 46806, "start": 449.3, "end": 468.06, "text": " In the non-rejection region, so we don't reject the null hypothesis. So my decision, don't reject the null hypothesis. That means there is not sufficient evidence to support the alternative which states that the proportion has changed from the previous study.", "tokens": [682, 264, 2107, 12, 265, 1020, 313, 4458, 11, 370, 321, 500, 380, 8248, 264, 18184, 17291, 13, 407, 452, 3537, 11, 500, 380, 8248, 264, 18184, 17291, 13, 663, 1355, 456, 307, 406, 11563, 4467, 281, 1406, 264, 8535, 597, 4368, 300, 264, 16068, 575, 3105, 490, 264, 3894, 2979, 13], "avg_logprob": -0.14445754267134756, "compression_ratio": 1.6352201257861636, "no_speech_prob": 0.0, "words": [{"start": 449.3, "end": 449.84, "word": " In", "probability": 0.56494140625}, {"start": 449.84, "end": 450.08, "word": " the", "probability": 0.90966796875}, {"start": 450.08, "end": 450.34, "word": " non", "probability": 0.94140625}, {"start": 450.34, "end": 450.86, "word": "-rejection", "probability": 0.872802734375}, {"start": 450.86, "end": 451.22, "word": " region,", "probability": 0.8017578125}, {"start": 451.4, "end": 451.52, "word": " so", "probability": 0.61669921875}, {"start": 451.52, "end": 451.72, "word": " we", "probability": 0.9248046875}, {"start": 451.72, "end": 452.02, "word": " don't", "probability": 0.920166015625}, {"start": 452.02, "end": 452.42, "word": " reject", "probability": 0.91015625}, {"start": 452.42, "end": 452.58, "word": " the", "probability": 0.8095703125}, {"start": 452.58, "end": 452.68, "word": " null", "probability": 0.95947265625}, {"start": 452.68, "end": 453.12, "word": " hypothesis.", "probability": 0.84326171875}, {"start": 454.34, "end": 454.48, "word": " So", "probability": 0.921875}, {"start": 454.48, "end": 454.66, "word": " my", "probability": 0.80712890625}, {"start": 454.66, "end": 455.02, "word": " decision,", "probability": 0.9150390625}, {"start": 455.28, "end": 455.66, "word": " don't", "probability": 0.967041015625}, {"start": 455.66, "end": 456.16, "word": " reject", "probability": 0.9287109375}, {"start": 456.16, "end": 457.06, "word": " the", "probability": 0.90771484375}, {"start": 457.06, "end": 457.22, "word": " null", "probability": 0.93798828125}, {"start": 457.22, "end": 457.66, "word": " hypothesis.", "probability": 0.88720703125}, {"start": 457.86, "end": 458.04, "word": " That", "probability": 0.90185546875}, {"start": 458.04, "end": 458.46, "word": " means", "probability": 0.93359375}, {"start": 458.46, "end": 459.58, "word": " there", "probability": 0.76953125}, {"start": 459.58, "end": 459.76, "word": " is", "probability": 0.93408203125}, {"start": 459.76, "end": 459.98, "word": " not", "probability": 0.9404296875}, {"start": 459.98, "end": 460.42, "word": " sufficient", "probability": 0.8916015625}, {"start": 460.42, "end": 461.08, "word": " evidence", "probability": 0.951171875}, {"start": 461.08, "end": 462.06, "word": " to", "probability": 0.93798828125}, {"start": 462.06, "end": 462.62, "word": " support", "probability": 0.9892578125}, {"start": 462.62, "end": 462.8, "word": " the", "probability": 0.90625}, {"start": 462.8, "end": 463.42, "word": " alternative", "probability": 0.94091796875}, {"start": 463.42, "end": 464.02, "word": " which", "probability": 0.52294921875}, {"start": 464.02, "end": 464.38, "word": " states", "probability": 0.64306640625}, {"start": 464.38, "end": 464.74, "word": " that", "probability": 0.93115234375}, {"start": 464.74, "end": 465.5, "word": " the", "probability": 0.8916015625}, {"start": 465.5, "end": 465.9, "word": " proportion", "probability": 0.859375}, {"start": 465.9, "end": 466.18, "word": " has", "probability": 0.94189453125}, {"start": 466.18, "end": 466.7, "word": " changed", "probability": 0.91552734375}, {"start": 466.7, "end": 466.96, "word": " from", "probability": 0.88427734375}, {"start": 466.96, "end": 467.12, "word": " the", "probability": 0.919921875}, {"start": 467.12, "end": 467.54, "word": " previous", "probability": 0.86181640625}, {"start": 467.54, "end": 468.06, "word": " study.", "probability": 0.9296875}], "temperature": 1.0}, {"id": 18, "seek": 49617, "start": 469.11, "end": 496.17, "text": " So we don't reject the null hypothesis. It means there is not sufficient evidence to support the alternative hypothesis. That means you cannot say that the proportion has changed from the previous study. That by using critical value approach. Now what's about p-value? In order to determine the p-value,", "tokens": [407, 321, 500, 380, 8248, 264, 18184, 17291, 13, 467, 1355, 456, 307, 406, 11563, 4467, 281, 1406, 264, 8535, 17291, 13, 663, 1355, 291, 2644, 584, 300, 264, 16068, 575, 3105, 490, 264, 3894, 2979, 13, 663, 538, 1228, 4924, 2158, 3109, 13, 823, 437, 311, 466, 280, 12, 29155, 30, 682, 1668, 281, 6997, 264, 280, 12, 29155, 11], "avg_logprob": -0.15675402865294488, "compression_ratio": 1.5510204081632653, "no_speech_prob": 0.0, "words": [{"start": 469.11, "end": 469.33, "word": " So", "probability": 0.8740234375}, {"start": 469.33, "end": 469.51, "word": " we", "probability": 0.71484375}, {"start": 469.51, "end": 469.89, "word": " don't", "probability": 0.942626953125}, {"start": 469.89, "end": 470.53, "word": " reject", "probability": 0.892578125}, {"start": 470.53, "end": 471.11, "word": " the", "probability": 0.8203125}, {"start": 471.11, "end": 471.29, "word": " null", "probability": 0.96484375}, {"start": 471.29, "end": 471.77, "word": " hypothesis.", "probability": 0.826171875}, {"start": 472.43, "end": 472.65, "word": " It", "probability": 0.94482421875}, {"start": 472.65, "end": 472.95, "word": " means", "probability": 0.92822265625}, {"start": 472.95, "end": 473.23, "word": " there", "probability": 0.833984375}, {"start": 473.23, "end": 473.39, "word": " is", "probability": 0.919921875}, {"start": 473.39, "end": 473.57, "word": " not", "probability": 0.9267578125}, {"start": 473.57, "end": 474.01, "word": " sufficient", "probability": 0.8896484375}, {"start": 474.01, "end": 474.63, "word": " evidence", "probability": 0.95849609375}, {"start": 474.63, "end": 475.65, "word": " to", "probability": 0.9345703125}, {"start": 475.65, "end": 476.39, "word": " support", "probability": 0.98486328125}, {"start": 476.39, "end": 476.95, "word": " the", "probability": 0.88671875}, {"start": 476.95, "end": 477.47, "word": " alternative", "probability": 0.94775390625}, {"start": 477.47, "end": 478.05, "word": " hypothesis.", "probability": 0.86767578125}, {"start": 478.27, "end": 478.45, "word": " That", "probability": 0.908203125}, {"start": 478.45, "end": 478.85, "word": " means", "probability": 0.93359375}, {"start": 478.85, "end": 479.55, "word": " you", "probability": 0.81005859375}, {"start": 479.55, "end": 479.85, "word": " cannot", "probability": 0.87158203125}, {"start": 479.85, "end": 480.17, "word": " say", "probability": 0.94677734375}, {"start": 480.17, "end": 480.53, "word": " that", "probability": 0.93310546875}, {"start": 480.53, "end": 481.19, "word": " the", "probability": 0.9033203125}, {"start": 481.19, "end": 481.61, "word": " proportion", "probability": 0.8330078125}, {"start": 481.61, "end": 482.01, "word": " has", "probability": 0.9453125}, {"start": 482.01, "end": 482.49, "word": " changed", "probability": 0.90771484375}, {"start": 482.49, "end": 482.89, "word": " from", "probability": 0.88623046875}, {"start": 482.89, "end": 483.25, "word": " the", "probability": 0.91455078125}, {"start": 483.25, "end": 483.53, "word": " previous", "probability": 0.8486328125}, {"start": 483.53, "end": 483.95, "word": " study.", "probability": 0.919921875}, {"start": 484.45, "end": 484.89, "word": " That", "probability": 0.8974609375}, {"start": 484.89, "end": 485.09, "word": " by", "probability": 0.58984375}, {"start": 485.09, "end": 485.53, "word": " using", "probability": 0.93408203125}, {"start": 485.53, "end": 486.07, "word": " critical", "probability": 0.88037109375}, {"start": 486.07, "end": 486.43, "word": " value", "probability": 0.8271484375}, {"start": 486.43, "end": 486.77, "word": " approach.", "probability": 0.79248046875}, {"start": 488.31, "end": 488.65, "word": " Now", "probability": 0.9365234375}, {"start": 488.65, "end": 488.93, "word": " what's", "probability": 0.718017578125}, {"start": 488.93, "end": 489.19, "word": " about", "probability": 0.9111328125}, {"start": 489.19, "end": 489.37, "word": " p", "probability": 0.261962890625}, {"start": 489.37, "end": 489.65, "word": "-value?", "probability": 0.882080078125}, {"start": 491.83, "end": 492.59, "word": " In", "probability": 0.94921875}, {"start": 492.59, "end": 492.83, "word": " order", "probability": 0.9287109375}, {"start": 492.83, "end": 493.05, "word": " to", "probability": 0.96875}, {"start": 493.05, "end": 493.59, "word": " determine", "probability": 0.9326171875}, {"start": 493.59, "end": 495.55, "word": " the", "probability": 0.89599609375}, {"start": 495.55, "end": 495.81, "word": " p", "probability": 0.9541015625}, {"start": 495.81, "end": 496.17, "word": "-value,", "probability": 0.95703125}], "temperature": 1.0}, {"id": 19, "seek": 52105, "start": 499.46, "end": 521.06, "text": " We have to find the probability that the Z statistic fall in the rejection regions. So that means Z greater than my values 1093 or Z smaller than negative 1.093.", "tokens": [492, 362, 281, 915, 264, 8482, 300, 264, 1176, 29588, 2100, 294, 264, 26044, 10682, 13, 407, 300, 1355, 1176, 5044, 813, 452, 4190, 1266, 26372, 420, 1176, 4356, 813, 3671, 502, 13, 13811, 18, 13], "avg_logprob": -0.3013091248434943, "compression_ratio": 1.296, "no_speech_prob": 0.0, "words": [{"start": 499.46, "end": 499.68, "word": " We", "probability": 0.45263671875}, {"start": 499.68, "end": 499.86, "word": " have", "probability": 0.88623046875}, {"start": 499.86, "end": 500.0, "word": " to", "probability": 0.97216796875}, {"start": 500.0, "end": 500.28, "word": " find", "probability": 0.89306640625}, {"start": 500.28, "end": 501.06, "word": " the", "probability": 0.85888671875}, {"start": 501.06, "end": 501.48, "word": " probability", "probability": 0.93408203125}, {"start": 501.48, "end": 502.02, "word": " that", "probability": 0.931640625}, {"start": 502.02, "end": 503.16, "word": " the", "probability": 0.46142578125}, {"start": 503.16, "end": 503.32, "word": " Z", "probability": 0.56884765625}, {"start": 503.32, "end": 503.94, "word": " statistic", "probability": 0.56103515625}, {"start": 503.94, "end": 505.14, "word": " fall", "probability": 0.5830078125}, {"start": 505.14, "end": 505.6, "word": " in", "probability": 0.92529296875}, {"start": 505.6, "end": 505.82, "word": " the", "probability": 0.83544921875}, {"start": 505.82, "end": 506.12, "word": " rejection", "probability": 0.92236328125}, {"start": 506.12, "end": 506.66, "word": " regions.", "probability": 0.8935546875}, {"start": 507.42, "end": 507.62, "word": " So", "probability": 0.89501953125}, {"start": 507.62, "end": 508.06, "word": " that", "probability": 0.7314453125}, {"start": 508.06, "end": 508.4, "word": " means", "probability": 0.9248046875}, {"start": 508.4, "end": 509.28, "word": " Z", "probability": 0.2484130859375}, {"start": 509.28, "end": 510.26, "word": " greater", "probability": 0.66796875}, {"start": 510.26, "end": 510.74, "word": " than", "probability": 0.94482421875}, {"start": 510.74, "end": 512.02, "word": " my", "probability": 0.767578125}, {"start": 512.02, "end": 512.32, "word": " values", "probability": 0.4716796875}, {"start": 512.32, "end": 513.18, "word": " 1093", "probability": 0.67919921875}, {"start": 513.18, "end": 516.26, "word": " or", "probability": 0.58984375}, {"start": 516.26, "end": 517.22, "word": " Z", "probability": 0.93212890625}, {"start": 517.22, "end": 517.78, "word": " smaller", "probability": 0.76025390625}, {"start": 517.78, "end": 518.1, "word": " than", "probability": 0.943359375}, {"start": 518.1, "end": 518.66, "word": " negative", "probability": 0.89892578125}, {"start": 518.66, "end": 519.9, "word": " 1", "probability": 0.9404296875}, {"start": 519.9, "end": 521.06, "word": ".093.", "probability": 0.98291015625}], "temperature": 1.0}, {"id": 20, "seek": 55425, "start": 525.45, "end": 554.25, "text": " 1093 is the same as the left of negative, so they are the same because of symmetry. So just take 1 and multiply by 2. Now simple calculation will give the value of 0.276 in chapter 6. So go back to chapter 6 to figure out how can we calculate the probability of Z greater than", "tokens": [1266, 26372, 307, 264, 912, 382, 264, 1411, 295, 3671, 11, 370, 436, 366, 264, 912, 570, 295, 25440, 13, 407, 445, 747, 502, 293, 12972, 538, 568, 13, 823, 2199, 17108, 486, 976, 264, 2158, 295, 1958, 13, 10076, 21, 294, 7187, 1386, 13, 407, 352, 646, 281, 7187, 1386, 281, 2573, 484, 577, 393, 321, 8873, 264, 8482, 295, 1176, 5044, 813], "avg_logprob": -0.21850961538461539, "compression_ratio": 1.489247311827957, "no_speech_prob": 0.0, "words": [{"start": 525.4499999999999, "end": 526.05, "word": " 1093", "probability": 0.5721435546875}, {"start": 526.05, "end": 526.33, "word": " is", "probability": 0.90673828125}, {"start": 526.33, "end": 526.49, "word": " the", "probability": 0.84814453125}, {"start": 526.49, "end": 526.71, "word": " same", "probability": 0.9013671875}, {"start": 526.71, "end": 527.01, "word": " as", "probability": 0.94384765625}, {"start": 527.01, "end": 527.17, "word": " the", "probability": 0.583984375}, {"start": 527.17, "end": 527.89, "word": " left", "probability": 0.53759765625}, {"start": 527.89, "end": 528.15, "word": " of", "probability": 0.8544921875}, {"start": 528.15, "end": 528.57, "word": " negative,", "probability": 0.85791015625}, {"start": 529.27, "end": 529.57, "word": " so", "probability": 0.8291015625}, {"start": 529.57, "end": 529.73, "word": " they", "probability": 0.8544921875}, {"start": 529.73, "end": 529.87, "word": " are", "probability": 0.91748046875}, {"start": 529.87, "end": 530.03, "word": " the", "probability": 0.88916015625}, {"start": 530.03, "end": 530.25, "word": " same", "probability": 0.912109375}, {"start": 530.25, "end": 530.53, "word": " because", "probability": 0.86279296875}, {"start": 530.53, "end": 530.69, "word": " of", "probability": 0.94384765625}, {"start": 530.69, "end": 531.15, "word": " symmetry.", "probability": 0.8740234375}, {"start": 531.91, "end": 532.07, "word": " So", "probability": 0.75830078125}, {"start": 532.07, "end": 532.29, "word": " just", "probability": 0.7802734375}, {"start": 532.29, "end": 532.57, "word": " take", "probability": 0.89306640625}, {"start": 532.57, "end": 532.81, "word": " 1", "probability": 0.400634765625}, {"start": 532.81, "end": 533.05, "word": " and", "probability": 0.92138671875}, {"start": 533.05, "end": 533.43, "word": " multiply", "probability": 0.8623046875}, {"start": 533.43, "end": 533.73, "word": " by", "probability": 0.89599609375}, {"start": 533.73, "end": 534.05, "word": " 2.", "probability": 0.970703125}, {"start": 538.43, "end": 539.03, "word": " Now", "probability": 0.923828125}, {"start": 539.03, "end": 539.37, "word": " simple", "probability": 0.499755859375}, {"start": 539.37, "end": 539.95, "word": " calculation", "probability": 0.916015625}, {"start": 539.95, "end": 540.31, "word": " will", "probability": 0.8798828125}, {"start": 540.31, "end": 540.65, "word": " give", "probability": 0.88427734375}, {"start": 540.65, "end": 541.79, "word": " the", "probability": 0.86376953125}, {"start": 541.79, "end": 542.07, "word": " value", "probability": 0.9814453125}, {"start": 542.07, "end": 542.29, "word": " of", "probability": 0.95458984375}, {"start": 542.29, "end": 543.07, "word": " 0", "probability": 0.708984375}, {"start": 543.07, "end": 544.05, "word": ".276", "probability": 0.9845377604166666}, {"start": 544.05, "end": 546.67, "word": " in", "probability": 0.59716796875}, {"start": 546.67, "end": 546.95, "word": " chapter", "probability": 0.64111328125}, {"start": 546.95, "end": 547.41, "word": " 6.", "probability": 0.8349609375}, {"start": 548.17, "end": 548.51, "word": " So", "probability": 0.90283203125}, {"start": 548.51, "end": 548.69, "word": " go", "probability": 0.8818359375}, {"start": 548.69, "end": 548.93, "word": " back", "probability": 0.8798828125}, {"start": 548.93, "end": 549.07, "word": " to", "probability": 0.78271484375}, {"start": 549.07, "end": 549.29, "word": " chapter", "probability": 0.86181640625}, {"start": 549.29, "end": 549.71, "word": " 6", "probability": 0.98583984375}, {"start": 549.71, "end": 549.95, "word": " to", "probability": 0.88037109375}, {"start": 549.95, "end": 550.15, "word": " figure", "probability": 0.96337890625}, {"start": 550.15, "end": 550.45, "word": " out", "probability": 0.87353515625}, {"start": 550.45, "end": 550.59, "word": " how", "probability": 0.890625}, {"start": 550.59, "end": 550.79, "word": " can", "probability": 0.7568359375}, {"start": 550.79, "end": 550.93, "word": " we", "probability": 0.94140625}, {"start": 550.93, "end": 551.49, "word": " calculate", "probability": 0.9140625}, {"start": 551.49, "end": 552.43, "word": " the", "probability": 0.8759765625}, {"start": 552.43, "end": 553.03, "word": " probability", "probability": 0.9150390625}, {"start": 553.03, "end": 553.29, "word": " of", "probability": 0.95361328125}, {"start": 553.29, "end": 553.45, "word": " Z", "probability": 0.66455078125}, {"start": 553.45, "end": 553.81, "word": " greater", "probability": 0.89501953125}, {"start": 553.81, "end": 554.25, "word": " than", "probability": 0.94921875}], "temperature": 1.0}, {"id": 21, "seek": 58042, "start": 554.49, "end": 580.43, "text": " 1.0938. Now my B value is 0.276, always we reject the null hypothesis if my B value is smaller than alpha. Now this value is much much bigger than alpha, so we don't reject the null hypothesis. So since my B value is much greater than alpha, that means we don't", "tokens": [502, 13, 13811, 12625, 13, 823, 452, 363, 2158, 307, 1958, 13, 10076, 21, 11, 1009, 321, 8248, 264, 18184, 17291, 498, 452, 363, 2158, 307, 4356, 813, 8961, 13, 823, 341, 2158, 307, 709, 709, 3801, 813, 8961, 11, 370, 321, 500, 380, 8248, 264, 18184, 17291, 13, 407, 1670, 452, 363, 2158, 307, 709, 5044, 813, 8961, 11, 300, 1355, 321, 500, 380], "avg_logprob": -0.18335700260870386, "compression_ratio": 1.7350993377483444, "no_speech_prob": 0.0, "words": [{"start": 554.49, "end": 554.81, "word": " 1", "probability": 0.21484375}, {"start": 554.81, "end": 555.81, "word": ".0938.", "probability": 0.861328125}, {"start": 557.17, "end": 557.77, "word": " Now", "probability": 0.92724609375}, {"start": 557.77, "end": 558.09, "word": " my", "probability": 0.6904296875}, {"start": 558.09, "end": 558.27, "word": " B", "probability": 0.4765625}, {"start": 558.27, "end": 558.53, "word": " value", "probability": 0.7705078125}, {"start": 558.53, "end": 558.73, "word": " is", "probability": 0.9443359375}, {"start": 558.73, "end": 558.89, "word": " 0", "probability": 0.81201171875}, {"start": 558.89, "end": 559.83, "word": ".276,", "probability": 0.978515625}, {"start": 560.03, "end": 560.61, "word": " always", "probability": 0.890625}, {"start": 560.61, "end": 562.37, "word": " we", "probability": 0.76318359375}, {"start": 562.37, "end": 562.85, "word": " reject", "probability": 0.8896484375}, {"start": 562.85, "end": 563.03, "word": " the", "probability": 0.63134765625}, {"start": 563.03, "end": 563.13, "word": " null", "probability": 0.98828125}, {"start": 563.13, "end": 563.51, "word": " hypothesis", "probability": 0.884765625}, {"start": 563.51, "end": 564.45, "word": " if", "probability": 0.82421875}, {"start": 564.45, "end": 564.73, "word": " my", "probability": 0.96728515625}, {"start": 564.73, "end": 564.91, "word": " B", "probability": 0.9521484375}, {"start": 564.91, "end": 565.19, "word": " value", "probability": 0.93994140625}, {"start": 565.19, "end": 565.31, "word": " is", "probability": 0.9052734375}, {"start": 565.31, "end": 565.57, "word": " smaller", "probability": 0.85791015625}, {"start": 565.57, "end": 565.81, "word": " than", "probability": 0.94677734375}, {"start": 565.81, "end": 566.09, "word": " alpha.", "probability": 0.68701171875}, {"start": 567.19, "end": 567.57, "word": " Now", "probability": 0.927734375}, {"start": 567.57, "end": 567.87, "word": " this", "probability": 0.82421875}, {"start": 567.87, "end": 568.19, "word": " value", "probability": 0.9833984375}, {"start": 568.19, "end": 568.45, "word": " is", "probability": 0.95703125}, {"start": 568.45, "end": 568.75, "word": " much", "probability": 0.9189453125}, {"start": 568.75, "end": 569.05, "word": " much", "probability": 0.650390625}, {"start": 569.05, "end": 569.35, "word": " bigger", "probability": 0.9599609375}, {"start": 569.35, "end": 569.65, "word": " than", "probability": 0.94677734375}, {"start": 569.65, "end": 569.89, "word": " alpha,", "probability": 0.9033203125}, {"start": 569.99, "end": 570.17, "word": " so", "probability": 0.94384765625}, {"start": 570.17, "end": 570.33, "word": " we", "probability": 0.94189453125}, {"start": 570.33, "end": 570.53, "word": " don't", "probability": 0.937255859375}, {"start": 570.53, "end": 570.87, "word": " reject", "probability": 0.92724609375}, {"start": 570.87, "end": 571.07, "word": " the", "probability": 0.84130859375}, {"start": 571.07, "end": 571.21, "word": " null", "probability": 0.96826171875}, {"start": 571.21, "end": 571.57, "word": " hypothesis.", "probability": 0.91845703125}, {"start": 572.39, "end": 572.59, "word": " So", "probability": 0.86669921875}, {"start": 572.59, "end": 573.07, "word": " since", "probability": 0.50048828125}, {"start": 573.07, "end": 573.51, "word": " my", "probability": 0.96240234375}, {"start": 573.51, "end": 573.75, "word": " B", "probability": 0.9765625}, {"start": 573.75, "end": 574.15, "word": " value", "probability": 0.9619140625}, {"start": 574.15, "end": 575.95, "word": " is", "probability": 0.94580078125}, {"start": 575.95, "end": 576.25, "word": " much", "probability": 0.9150390625}, {"start": 576.25, "end": 576.71, "word": " greater", "probability": 0.9111328125}, {"start": 576.71, "end": 577.05, "word": " than", "probability": 0.95166015625}, {"start": 577.05, "end": 577.37, "word": " alpha,", "probability": 0.91162109375}, {"start": 578.77, "end": 579.47, "word": " that", "probability": 0.939453125}, {"start": 579.47, "end": 579.81, "word": " means", "probability": 0.9375}, {"start": 579.81, "end": 580.05, "word": " we", "probability": 0.93408203125}, {"start": 580.05, "end": 580.43, "word": " don't", "probability": 0.9755859375}], "temperature": 1.0}, {"id": 22, "seek": 60666, "start": 581.07, "end": 606.67, "text": " reject the null hypothesis, so we reach the same conclusion, that there is not sufficient evidence to support the alternative. Also, we can perform the test by using confidence interval approach, because here we are talking about two-tailed test. Your confidence interval is given by", "tokens": [8248, 264, 18184, 17291, 11, 370, 321, 2524, 264, 912, 10063, 11, 300, 456, 307, 406, 11563, 4467, 281, 1406, 264, 8535, 13, 2743, 11, 321, 393, 2042, 264, 1500, 538, 1228, 6687, 15035, 3109, 11, 570, 510, 321, 366, 1417, 466, 732, 12, 14430, 292, 1500, 13, 2260, 6687, 15035, 307, 2212, 538], "avg_logprob": -0.1916193187236786, "compression_ratio": 1.569060773480663, "no_speech_prob": 0.0, "words": [{"start": 581.07, "end": 581.75, "word": " reject", "probability": 0.24755859375}, {"start": 581.75, "end": 582.47, "word": " the", "probability": 0.89794921875}, {"start": 582.47, "end": 582.65, "word": " null", "probability": 0.93701171875}, {"start": 582.65, "end": 583.09, "word": " hypothesis,", "probability": 0.810546875}, {"start": 583.67, "end": 583.87, "word": " so", "probability": 0.94775390625}, {"start": 583.87, "end": 584.07, "word": " we", "probability": 0.9375}, {"start": 584.07, "end": 584.35, "word": " reach", "probability": 0.9072265625}, {"start": 584.35, "end": 584.59, "word": " the", "probability": 0.9111328125}, {"start": 584.59, "end": 584.91, "word": " same", "probability": 0.90283203125}, {"start": 584.91, "end": 585.61, "word": " conclusion,", "probability": 0.88134765625}, {"start": 586.47, "end": 586.81, "word": " that", "probability": 0.93310546875}, {"start": 586.81, "end": 587.45, "word": " there", "probability": 0.8955078125}, {"start": 587.45, "end": 587.55, "word": " is", "probability": 0.91552734375}, {"start": 587.55, "end": 587.71, "word": " not", "probability": 0.9189453125}, {"start": 587.71, "end": 588.11, "word": " sufficient", "probability": 0.90478515625}, {"start": 588.11, "end": 588.53, "word": " evidence", "probability": 0.95166015625}, {"start": 588.53, "end": 588.71, "word": " to", "probability": 0.8369140625}, {"start": 588.71, "end": 589.11, "word": " support", "probability": 0.98388671875}, {"start": 589.11, "end": 589.27, "word": " the", "probability": 0.7724609375}, {"start": 589.27, "end": 589.57, "word": " alternative.", "probability": 0.83056640625}, {"start": 592.07, "end": 592.51, "word": " Also,", "probability": 0.94775390625}, {"start": 592.59, "end": 592.69, "word": " we", "probability": 0.88818359375}, {"start": 592.69, "end": 593.05, "word": " can", "probability": 0.947265625}, {"start": 593.05, "end": 594.33, "word": " perform", "probability": 0.7900390625}, {"start": 594.33, "end": 594.57, "word": " the", "probability": 0.9150390625}, {"start": 594.57, "end": 594.89, "word": " test", "probability": 0.86279296875}, {"start": 594.89, "end": 595.27, "word": " by", "probability": 0.96044921875}, {"start": 595.27, "end": 595.75, "word": " using", "probability": 0.935546875}, {"start": 595.75, "end": 597.29, "word": " confidence", "probability": 0.96630859375}, {"start": 597.29, "end": 597.93, "word": " interval", "probability": 0.95751953125}, {"start": 597.93, "end": 598.55, "word": " approach,", "probability": 0.89892578125}, {"start": 598.73, "end": 599.61, "word": " because", "probability": 0.8955078125}, {"start": 599.61, "end": 599.81, "word": " here", "probability": 0.84228515625}, {"start": 599.81, "end": 599.91, "word": " we", "probability": 0.8212890625}, {"start": 599.91, "end": 600.01, "word": " are", "probability": 0.87548828125}, {"start": 600.01, "end": 600.29, "word": " talking", "probability": 0.85302734375}, {"start": 600.29, "end": 600.55, "word": " about", "probability": 0.90478515625}, {"start": 600.55, "end": 600.75, "word": " two", "probability": 0.8896484375}, {"start": 600.75, "end": 601.03, "word": "-tailed", "probability": 0.8019205729166666}, {"start": 601.03, "end": 601.31, "word": " test.", "probability": 0.73583984375}, {"start": 602.49, "end": 602.85, "word": " Your", "probability": 0.86376953125}, {"start": 602.85, "end": 603.35, "word": " confidence", "probability": 0.9794921875}, {"start": 603.35, "end": 603.79, "word": " interval", "probability": 0.97119140625}, {"start": 603.79, "end": 605.97, "word": " is", "probability": 0.92626953125}, {"start": 605.97, "end": 606.23, "word": " given", "probability": 0.8994140625}, {"start": 606.23, "end": 606.67, "word": " by", "probability": 0.970703125}], "temperature": 1.0}, {"id": 23, "seek": 63948, "start": 610.62, "end": 639.48, "text": " B1 minus B2 plus or minus Z alpha over 2 times B dash 1 minus B dash multiplied by 1 over N1 plus 1 over N2. By the way, this one called the margin of error.", "tokens": [363, 16, 3175, 363, 17, 1804, 420, 3175, 1176, 8961, 670, 568, 1413, 363, 8240, 502, 3175, 363, 8240, 17207, 538, 502, 670, 426, 16, 1804, 502, 670, 426, 17, 13, 3146, 264, 636, 11, 341, 472, 1219, 264, 10270, 295, 6713, 13], "avg_logprob": -0.19975141740658067, "compression_ratio": 1.3389830508474576, "no_speech_prob": 0.0, "words": [{"start": 610.62, "end": 611.18, "word": " B1", "probability": 0.6173095703125}, {"start": 611.18, "end": 611.62, "word": " minus", "probability": 0.79150390625}, {"start": 611.62, "end": 612.16, "word": " B2", "probability": 0.97802734375}, {"start": 612.16, "end": 617.28, "word": " plus", "probability": 0.5361328125}, {"start": 617.28, "end": 617.64, "word": " or", "probability": 0.93798828125}, {"start": 617.64, "end": 617.94, "word": " minus", "probability": 0.98974609375}, {"start": 617.94, "end": 618.16, "word": " Z", "probability": 0.43896484375}, {"start": 618.16, "end": 618.4, "word": " alpha", "probability": 0.732421875}, {"start": 618.4, "end": 618.68, "word": " over", "probability": 0.84521484375}, {"start": 618.68, "end": 618.92, "word": " 2", "probability": 0.79296875}, {"start": 618.92, "end": 619.94, "word": " times", "probability": 0.873046875}, {"start": 619.94, "end": 623.72, "word": " B", "probability": 0.880859375}, {"start": 623.72, "end": 624.04, "word": " dash", "probability": 0.771484375}, {"start": 624.04, "end": 624.26, "word": " 1", "probability": 0.7578125}, {"start": 624.26, "end": 624.58, "word": " minus", "probability": 0.98681640625}, {"start": 624.58, "end": 624.8, "word": " B", "probability": 0.99462890625}, {"start": 624.8, "end": 625.08, "word": " dash", "probability": 0.8486328125}, {"start": 625.08, "end": 626.06, "word": " multiplied", "probability": 0.646484375}, {"start": 626.06, "end": 626.52, "word": " by", "probability": 0.9736328125}, {"start": 626.52, "end": 629.02, "word": " 1", "probability": 0.9150390625}, {"start": 629.02, "end": 629.28, "word": " over", "probability": 0.91162109375}, {"start": 629.28, "end": 629.62, "word": " N1", "probability": 0.828369140625}, {"start": 629.62, "end": 629.92, "word": " plus", "probability": 0.95361328125}, {"start": 629.92, "end": 630.12, "word": " 1", "probability": 0.9716796875}, {"start": 630.12, "end": 630.3, "word": " over", "probability": 0.9111328125}, {"start": 630.3, "end": 630.74, "word": " N2.", "probability": 0.9892578125}, {"start": 631.62, "end": 631.9, "word": " By", "probability": 0.9453125}, {"start": 631.9, "end": 632.02, "word": " the", "probability": 0.92822265625}, {"start": 632.02, "end": 632.22, "word": " way,", "probability": 0.96240234375}, {"start": 632.32, "end": 632.58, "word": " this", "probability": 0.93017578125}, {"start": 632.58, "end": 637.52, "word": " one", "probability": 0.70458984375}, {"start": 637.52, "end": 638.14, "word": " called", "probability": 0.54541015625}, {"start": 638.14, "end": 638.86, "word": " the", "probability": 0.8662109375}, {"start": 638.86, "end": 639.16, "word": " margin", "probability": 0.64794921875}, {"start": 639.16, "end": 639.36, "word": " of", "probability": 0.908203125}, {"start": 639.36, "end": 639.48, "word": " error.", "probability": 0.92724609375}], "temperature": 1.0}, {"id": 24, "seek": 66156, "start": 641.3, "end": 661.56, "text": " So z times square root of this sequence is called the margin of error, and the square root itself is called the standard error of the point estimate of pi 1 minus pi 2, which is P1 minus P2.", "tokens": [407, 710, 1413, 3732, 5593, 295, 341, 8310, 307, 1219, 264, 10270, 295, 6713, 11, 293, 264, 3732, 5593, 2564, 307, 1219, 264, 3832, 6713, 295, 264, 935, 12539, 295, 3895, 502, 3175, 3895, 568, 11, 597, 307, 430, 16, 3175, 430, 17, 13], "avg_logprob": -0.24027778175142075, "compression_ratio": 1.5039370078740157, "no_speech_prob": 0.0, "words": [{"start": 641.3, "end": 641.54, "word": " So", "probability": 0.8525390625}, {"start": 641.54, "end": 641.78, "word": " z", "probability": 0.420654296875}, {"start": 641.78, "end": 642.2, "word": " times", "probability": 0.8857421875}, {"start": 642.2, "end": 642.94, "word": " square", "probability": 0.4560546875}, {"start": 642.94, "end": 643.32, "word": " root", "probability": 0.92578125}, {"start": 643.32, "end": 644.24, "word": " of", "probability": 0.9609375}, {"start": 644.24, "end": 644.46, "word": " this", "probability": 0.91552734375}, {"start": 644.46, "end": 644.76, "word": " sequence", "probability": 0.30419921875}, {"start": 644.76, "end": 645.04, "word": " is", "probability": 0.931640625}, {"start": 645.04, "end": 645.22, "word": " called", "probability": 0.89404296875}, {"start": 645.22, "end": 645.42, "word": " the", "probability": 0.8876953125}, {"start": 645.42, "end": 645.64, "word": " margin", "probability": 0.5419921875}, {"start": 645.64, "end": 645.76, "word": " of", "probability": 0.9150390625}, {"start": 645.76, "end": 645.94, "word": " error,", "probability": 0.9140625}, {"start": 646.94, "end": 647.32, "word": " and", "probability": 0.93310546875}, {"start": 647.32, "end": 647.52, "word": " the", "probability": 0.912109375}, {"start": 647.52, "end": 647.78, "word": " square", "probability": 0.8828125}, {"start": 647.78, "end": 648.08, "word": " root", "probability": 0.90673828125}, {"start": 648.08, "end": 648.72, "word": " itself", "probability": 0.74560546875}, {"start": 648.72, "end": 649.0, "word": " is", "probability": 0.94970703125}, {"start": 649.0, "end": 649.38, "word": " called", "probability": 0.89111328125}, {"start": 649.38, "end": 651.84, "word": " the", "probability": 0.446044921875}, {"start": 651.84, "end": 652.28, "word": " standard", "probability": 0.9150390625}, {"start": 652.28, "end": 652.82, "word": " error", "probability": 0.88818359375}, {"start": 652.82, "end": 653.4, "word": " of", "probability": 0.951171875}, {"start": 653.4, "end": 655.56, "word": " the", "probability": 0.404052734375}, {"start": 655.56, "end": 656.54, "word": " point", "probability": 0.8720703125}, {"start": 656.54, "end": 657.1, "word": " estimate", "probability": 0.818359375}, {"start": 657.1, "end": 658.3, "word": " of", "probability": 0.87646484375}, {"start": 658.3, "end": 658.62, "word": " pi", "probability": 0.76025390625}, {"start": 658.62, "end": 658.82, "word": " 1", "probability": 0.77880859375}, {"start": 658.82, "end": 659.14, "word": " minus", "probability": 0.9794921875}, {"start": 659.14, "end": 659.36, "word": " pi", "probability": 0.9453125}, {"start": 659.36, "end": 659.56, "word": " 2,", "probability": 0.98779296875}, {"start": 659.72, "end": 659.72, "word": " which", "probability": 0.9599609375}, {"start": 659.72, "end": 660.12, "word": " is", "probability": 0.95849609375}, {"start": 660.12, "end": 661.04, "word": " P1", "probability": 0.552978515625}, {"start": 661.04, "end": 661.3, "word": " minus", "probability": 0.986328125}, {"start": 661.3, "end": 661.56, "word": " P2.", "probability": 0.992919921875}], "temperature": 1.0}, {"id": 25, "seek": 68995, "start": 662.75, "end": 689.95, "text": " So square root of b dash 1 minus b dash multiplied by 1 over n1 plus 1 over n2 is called the standard error of the estimate of pi 1 minus pi 2. So this is standard estimate of b1 minus b2. Simply, you will get the confidence interval to be between pi 1 minus the difference between the two proportions, 4 between negative.", "tokens": [407, 3732, 5593, 295, 272, 8240, 502, 3175, 272, 8240, 17207, 538, 502, 670, 297, 16, 1804, 502, 670, 297, 17, 307, 1219, 264, 3832, 6713, 295, 264, 12539, 295, 3895, 502, 3175, 3895, 568, 13, 407, 341, 307, 3832, 12539, 295, 272, 16, 3175, 272, 17, 13, 19596, 11, 291, 486, 483, 264, 6687, 15035, 281, 312, 1296, 3895, 502, 3175, 264, 2649, 1296, 264, 732, 32482, 11, 1017, 1296, 3671, 13], "avg_logprob": -0.17271959721236615, "compression_ratio": 1.7272727272727273, "no_speech_prob": 0.0, "words": [{"start": 662.75, "end": 663.09, "word": " So", "probability": 0.93994140625}, {"start": 663.09, "end": 663.51, "word": " square", "probability": 0.603515625}, {"start": 663.51, "end": 663.73, "word": " root", "probability": 0.93701171875}, {"start": 663.73, "end": 663.89, "word": " of", "probability": 0.95703125}, {"start": 663.89, "end": 664.01, "word": " b", "probability": 0.447265625}, {"start": 664.01, "end": 664.25, "word": " dash", "probability": 0.568359375}, {"start": 664.25, "end": 664.43, "word": " 1", "probability": 0.82177734375}, {"start": 664.43, "end": 664.69, "word": " minus", "probability": 0.92626953125}, {"start": 664.69, "end": 664.87, "word": " b", "probability": 0.95654296875}, {"start": 664.87, "end": 665.11, "word": " dash", "probability": 0.82861328125}, {"start": 665.11, "end": 665.87, "word": " multiplied", "probability": 0.6513671875}, {"start": 665.87, "end": 666.15, "word": " by", "probability": 0.91162109375}, {"start": 666.15, "end": 666.47, "word": " 1", "probability": 0.94384765625}, {"start": 666.47, "end": 666.67, "word": " over", "probability": 0.8984375}, {"start": 666.67, "end": 666.95, "word": " n1", "probability": 0.5625}, {"start": 666.95, "end": 667.23, "word": " plus", "probability": 0.92138671875}, {"start": 667.23, "end": 667.47, "word": " 1", "probability": 0.98876953125}, {"start": 667.47, "end": 667.65, "word": " over", "probability": 0.9140625}, {"start": 667.65, "end": 667.95, "word": " n2", "probability": 0.974853515625}, {"start": 667.95, "end": 668.15, "word": " is", "probability": 0.9052734375}, {"start": 668.15, "end": 668.57, "word": " called", "probability": 0.8837890625}, {"start": 668.57, "end": 669.61, "word": " the", "probability": 0.8662109375}, {"start": 669.61, "end": 670.07, "word": " standard", "probability": 0.921875}, {"start": 670.07, "end": 670.37, "word": " error", "probability": 0.8671875}, {"start": 670.37, "end": 670.55, "word": " of", "probability": 0.96142578125}, {"start": 670.55, "end": 670.67, "word": " the", "probability": 0.91943359375}, {"start": 670.67, "end": 671.19, "word": " estimate", "probability": 0.89892578125}, {"start": 671.19, "end": 672.27, "word": " of", "probability": 0.923828125}, {"start": 672.27, "end": 672.61, "word": " pi", "probability": 0.8740234375}, {"start": 672.61, "end": 672.79, "word": " 1", "probability": 0.8076171875}, {"start": 672.79, "end": 673.11, "word": " minus", "probability": 0.9833984375}, {"start": 673.11, "end": 673.29, "word": " pi", "probability": 0.95263671875}, {"start": 673.29, "end": 673.53, "word": " 2.", "probability": 0.98876953125}, {"start": 674.15, "end": 674.49, "word": " So", "probability": 0.904296875}, {"start": 674.49, "end": 674.65, "word": " this", "probability": 0.85498046875}, {"start": 674.65, "end": 674.79, "word": " is", "probability": 0.94091796875}, {"start": 674.79, "end": 675.45, "word": " standard", "probability": 0.53955078125}, {"start": 675.45, "end": 675.73, "word": " estimate", "probability": 0.630859375}, {"start": 675.73, "end": 675.91, "word": " of", "probability": 0.90625}, {"start": 675.91, "end": 676.23, "word": " b1", "probability": 0.7177734375}, {"start": 676.23, "end": 676.65, "word": " minus", "probability": 0.9833984375}, {"start": 676.65, "end": 677.03, "word": " b2.", "probability": 0.96923828125}, {"start": 678.85, "end": 679.45, "word": " Simply,", "probability": 0.8994140625}, {"start": 679.71, "end": 679.83, "word": " you", "probability": 0.93896484375}, {"start": 679.83, "end": 679.99, "word": " will", "probability": 0.88330078125}, {"start": 679.99, "end": 680.39, "word": " get", "probability": 0.94287109375}, {"start": 680.39, "end": 681.29, "word": " the", "probability": 0.8955078125}, {"start": 681.29, "end": 681.75, "word": " confidence", "probability": 0.97412109375}, {"start": 681.75, "end": 682.25, "word": " interval", "probability": 0.9423828125}, {"start": 682.25, "end": 682.49, "word": " to", "probability": 0.9716796875}, {"start": 682.49, "end": 682.79, "word": " be", "probability": 0.9033203125}, {"start": 682.79, "end": 684.19, "word": " between", "probability": 0.90625}, {"start": 684.19, "end": 684.87, "word": " pi", "probability": 0.94091796875}, {"start": 684.87, "end": 685.13, "word": " 1", "probability": 0.97021484375}, {"start": 685.13, "end": 685.53, "word": " minus", "probability": 0.990234375}, {"start": 685.53, "end": 686.05, "word": " the", "probability": 0.57568359375}, {"start": 686.05, "end": 686.47, "word": " difference", "probability": 0.8701171875}, {"start": 686.47, "end": 686.83, "word": " between", "probability": 0.873046875}, {"start": 686.83, "end": 687.01, "word": " the", "probability": 0.89892578125}, {"start": 687.01, "end": 687.17, "word": " two", "probability": 0.92041015625}, {"start": 687.17, "end": 687.81, "word": " proportions,", "probability": 0.8046875}, {"start": 688.77, "end": 689.03, "word": " 4", "probability": 0.80419921875}, {"start": 689.03, "end": 689.39, "word": " between", "probability": 0.8515625}, {"start": 689.39, "end": 689.95, "word": " negative.", "probability": 0.94189453125}], "temperature": 1.0}, {"id": 26, "seek": 70840, "start": 692.26, "end": 708.4, "text": " 0.5 and 0.7. Now this interval actually contains", "tokens": [1958, 13, 20, 293, 1958, 13, 22, 13, 823, 341, 15035, 767, 8306], "avg_logprob": -0.2554408609867096, "compression_ratio": 0.8596491228070176, "no_speech_prob": 0.0, "words": [{"start": 692.26, "end": 692.62, "word": " 0", "probability": 0.431396484375}, {"start": 692.62, "end": 693.1, "word": ".5", "probability": 0.768310546875}, {"start": 693.1, "end": 697.16, "word": " and", "probability": 0.7666015625}, {"start": 697.16, "end": 698.4, "word": " 0", "probability": 0.87890625}, {"start": 698.4, "end": 698.94, "word": ".7.", "probability": 0.975830078125}, {"start": 704.06, "end": 704.98, "word": " Now", "probability": 0.80859375}, {"start": 704.98, "end": 705.24, "word": " this", "probability": 0.6494140625}, {"start": 705.24, "end": 705.6, "word": " interval", "probability": 0.94580078125}, {"start": 705.6, "end": 706.18, "word": " actually", "probability": 0.8486328125}, {"start": 706.18, "end": 708.4, "word": " contains", "probability": 0.8603515625}], "temperature": 1.0}, {"id": 27, "seek": 73973, "start": 710.23, "end": 739.73, "text": " The value of 0, that means we don't reject the null hypothesis. So since this interval starts from negative, lower bound is negative 0.5, upper bound is 0.17, that means 0 inside this interval, I mean the confidence captures the value of 0, that means we don't reject the null hypothesis. So by using three different approaches, we end with the same decision and conclusion.", "tokens": [440, 2158, 295, 1958, 11, 300, 1355, 321, 500, 380, 8248, 264, 18184, 17291, 13, 407, 1670, 341, 15035, 3719, 490, 3671, 11, 3126, 5472, 307, 3671, 1958, 13, 20, 11, 6597, 5472, 307, 1958, 13, 7773, 11, 300, 1355, 1958, 1854, 341, 15035, 11, 286, 914, 264, 6687, 27986, 264, 2158, 295, 1958, 11, 300, 1355, 321, 500, 380, 8248, 264, 18184, 17291, 13, 407, 538, 1228, 1045, 819, 11587, 11, 321, 917, 365, 264, 912, 3537, 293, 10063, 13], "avg_logprob": -0.18969130370675064, "compression_ratio": 1.8472906403940887, "no_speech_prob": 0.0, "words": [{"start": 710.23, "end": 710.45, "word": " The", "probability": 0.27392578125}, {"start": 710.45, "end": 710.83, "word": " value", "probability": 0.9775390625}, {"start": 710.83, "end": 711.03, "word": " of", "probability": 0.876953125}, {"start": 711.03, "end": 711.35, "word": " 0,", "probability": 0.4228515625}, {"start": 711.65, "end": 712.19, "word": " that", "probability": 0.9169921875}, {"start": 712.19, "end": 712.57, "word": " means", "probability": 0.93798828125}, {"start": 712.57, "end": 712.93, "word": " we", "probability": 0.845703125}, {"start": 712.93, "end": 713.39, "word": " don't", "probability": 0.955322265625}, {"start": 713.39, "end": 714.03, "word": " reject", "probability": 0.86083984375}, {"start": 714.03, "end": 714.25, "word": " the", "probability": 0.2587890625}, {"start": 714.25, "end": 714.39, "word": " null", "probability": 0.9853515625}, {"start": 714.39, "end": 714.79, "word": " hypothesis.", "probability": 0.81591796875}, {"start": 715.79, "end": 715.97, "word": " So", "probability": 0.7890625}, {"start": 715.97, "end": 716.35, "word": " since", "probability": 0.6669921875}, {"start": 716.35, "end": 716.69, "word": " this", "probability": 0.921875}, {"start": 716.69, "end": 717.07, "word": " interval", "probability": 0.95361328125}, {"start": 717.07, "end": 717.57, "word": " starts", "probability": 0.85546875}, {"start": 717.57, "end": 717.77, "word": " from", "probability": 0.880859375}, {"start": 717.77, "end": 718.23, "word": " negative,", "probability": 0.9384765625}, {"start": 718.89, "end": 719.15, "word": " lower", "probability": 0.85107421875}, {"start": 719.15, "end": 719.41, "word": " bound", "probability": 0.91845703125}, {"start": 719.41, "end": 719.63, "word": " is", "probability": 0.93701171875}, {"start": 719.63, "end": 719.91, "word": " negative", "probability": 0.435546875}, {"start": 719.91, "end": 720.19, "word": " 0", "probability": 0.76416015625}, {"start": 720.19, "end": 720.61, "word": ".5,", "probability": 0.74072265625}, {"start": 721.09, "end": 721.87, "word": " upper", "probability": 0.74365234375}, {"start": 721.87, "end": 722.27, "word": " bound", "probability": 0.90625}, {"start": 722.27, "end": 722.55, "word": " is", "probability": 0.9384765625}, {"start": 722.55, "end": 722.75, "word": " 0", "probability": 0.83984375}, {"start": 722.75, "end": 723.37, "word": ".17,", "probability": 0.99267578125}, {"start": 723.65, "end": 723.91, "word": " that", "probability": 0.92529296875}, {"start": 723.91, "end": 724.19, "word": " means", "probability": 0.93798828125}, {"start": 724.19, "end": 724.55, "word": " 0", "probability": 0.65576171875}, {"start": 724.55, "end": 725.13, "word": " inside", "probability": 0.9287109375}, {"start": 725.13, "end": 725.73, "word": " this", "probability": 0.93408203125}, {"start": 725.73, "end": 726.19, "word": " interval,", "probability": 0.9521484375}, {"start": 726.75, "end": 726.89, "word": " I", "probability": 0.93017578125}, {"start": 726.89, "end": 727.05, "word": " mean", "probability": 0.9580078125}, {"start": 727.05, "end": 727.25, "word": " the", "probability": 0.61328125}, {"start": 727.25, "end": 727.61, "word": " confidence", "probability": 0.8681640625}, {"start": 727.61, "end": 728.29, "word": " captures", "probability": 0.8017578125}, {"start": 728.29, "end": 728.51, "word": " the", "probability": 0.90625}, {"start": 728.51, "end": 728.73, "word": " value", "probability": 0.97900390625}, {"start": 728.73, "end": 728.87, "word": " of", "probability": 0.91650390625}, {"start": 728.87, "end": 729.13, "word": " 0,", "probability": 0.87548828125}, {"start": 729.61, "end": 729.91, "word": " that", "probability": 0.923828125}, {"start": 729.91, "end": 730.29, "word": " means", "probability": 0.9375}, {"start": 730.29, "end": 730.79, "word": " we", "probability": 0.92333984375}, {"start": 730.79, "end": 731.15, "word": " don't", "probability": 0.97607421875}, {"start": 731.15, "end": 731.55, "word": " reject", "probability": 0.9111328125}, {"start": 731.55, "end": 731.75, "word": " the", "probability": 0.6845703125}, {"start": 731.75, "end": 731.89, "word": " null", "probability": 0.9619140625}, {"start": 731.89, "end": 732.29, "word": " hypothesis.", "probability": 0.85595703125}, {"start": 733.47, "end": 733.81, "word": " So", "probability": 0.95068359375}, {"start": 733.81, "end": 734.23, "word": " by", "probability": 0.77783203125}, {"start": 734.23, "end": 734.65, "word": " using", "probability": 0.93798828125}, {"start": 734.65, "end": 735.09, "word": " three", "probability": 0.82177734375}, {"start": 735.09, "end": 735.61, "word": " different", "probability": 0.88134765625}, {"start": 735.61, "end": 736.17, "word": " approaches,", "probability": 0.74365234375}, {"start": 736.35, "end": 736.49, "word": " we", "probability": 0.9541015625}, {"start": 736.49, "end": 736.91, "word": " end", "probability": 0.91015625}, {"start": 736.91, "end": 737.11, "word": " with", "probability": 0.89501953125}, {"start": 737.11, "end": 737.31, "word": " the", "probability": 0.91455078125}, {"start": 737.31, "end": 737.61, "word": " same", "probability": 0.8984375}, {"start": 737.61, "end": 738.73, "word": " decision", "probability": 0.93896484375}, {"start": 738.73, "end": 739.19, "word": " and", "probability": 0.93896484375}, {"start": 739.19, "end": 739.73, "word": " conclusion.", "probability": 0.91162109375}], "temperature": 1.0}, {"id": 28, "seek": 76747, "start": 740.19, "end": 767.47, "text": " That is, we don't reject null hypotheses. That's all for number one. Question number two. The excellent drug company claims its aspirin tablets will relieve headaches faster than any other aspirin on the market. So they believe that", "tokens": [663, 307, 11, 321, 500, 380, 8248, 18184, 49969, 13, 663, 311, 439, 337, 1230, 472, 13, 14464, 1230, 732, 13, 440, 7103, 4110, 2237, 9441, 1080, 20003, 259, 27622, 486, 30450, 35046, 4663, 813, 604, 661, 20003, 259, 322, 264, 2142, 13, 407, 436, 1697, 300], "avg_logprob": -0.18701171378294626, "compression_ratio": 1.4746835443037976, "no_speech_prob": 0.0, "words": [{"start": 740.19, "end": 740.53, "word": " That", "probability": 0.70458984375}, {"start": 740.53, "end": 740.77, "word": " is,", "probability": 0.9150390625}, {"start": 740.83, "end": 740.93, "word": " we", "probability": 0.9208984375}, {"start": 740.93, "end": 741.17, "word": " don't", "probability": 0.894287109375}, {"start": 741.17, "end": 741.59, "word": " reject", "probability": 0.8935546875}, {"start": 741.59, "end": 741.93, "word": " null", "probability": 0.67431640625}, {"start": 741.93, "end": 742.39, "word": " hypotheses.", "probability": 0.380615234375}, {"start": 744.17, "end": 744.61, "word": " That's", "probability": 0.927734375}, {"start": 744.61, "end": 744.93, "word": " all", "probability": 0.95263671875}, {"start": 744.93, "end": 745.37, "word": " for", "probability": 0.94775390625}, {"start": 745.37, "end": 745.83, "word": " number", "probability": 0.88134765625}, {"start": 745.83, "end": 746.11, "word": " one.", "probability": 0.7685546875}, {"start": 751.45, "end": 752.29, "word": " Question", "probability": 0.68212890625}, {"start": 752.29, "end": 752.57, "word": " number", "probability": 0.93994140625}, {"start": 752.57, "end": 752.91, "word": " two.", "probability": 0.92236328125}, {"start": 756.17, "end": 756.45, "word": " The", "probability": 0.8681640625}, {"start": 756.45, "end": 756.87, "word": " excellent", "probability": 0.379150390625}, {"start": 756.87, "end": 757.37, "word": " drug", "probability": 0.8759765625}, {"start": 757.37, "end": 757.89, "word": " company", "probability": 0.8935546875}, {"start": 757.89, "end": 758.51, "word": " claims", "probability": 0.7900390625}, {"start": 758.51, "end": 759.89, "word": " its", "probability": 0.486572265625}, {"start": 759.89, "end": 760.45, "word": " aspirin", "probability": 0.946044921875}, {"start": 760.45, "end": 761.03, "word": " tablets", "probability": 0.76708984375}, {"start": 761.03, "end": 761.35, "word": " will", "probability": 0.8701171875}, {"start": 761.35, "end": 761.75, "word": " relieve", "probability": 0.90869140625}, {"start": 761.75, "end": 762.19, "word": " headaches", "probability": 0.953125}, {"start": 762.19, "end": 762.75, "word": " faster", "probability": 0.89404296875}, {"start": 762.75, "end": 763.27, "word": " than", "probability": 0.94775390625}, {"start": 763.27, "end": 763.61, "word": " any", "probability": 0.91015625}, {"start": 763.61, "end": 763.89, "word": " other", "probability": 0.8876953125}, {"start": 763.89, "end": 764.41, "word": " aspirin", "probability": 0.962646484375}, {"start": 764.41, "end": 764.69, "word": " on", "probability": 0.91748046875}, {"start": 764.69, "end": 764.83, "word": " the", "probability": 0.92041015625}, {"start": 764.83, "end": 765.09, "word": " market.", "probability": 0.8984375}, {"start": 765.95, "end": 766.43, "word": " So", "probability": 0.92431640625}, {"start": 766.43, "end": 766.69, "word": " they", "probability": 0.73291015625}, {"start": 766.69, "end": 767.03, "word": " believe", "probability": 0.87158203125}, {"start": 767.03, "end": 767.47, "word": " that", "probability": 0.94189453125}], "temperature": 1.0}, {"id": 29, "seek": 79692, "start": 768.44, "end": 796.92, "text": " Their drug is better than the other drug in the market. To determine whether Excellence claim is valid, random samples of size 15 are chosen from aspirins made by Excellence and the sample drug combined. So sample sizes of 15 are chosen from each. So that means N1 equals 15 and N2 also equals 15.", "tokens": [6710, 4110, 307, 1101, 813, 264, 661, 4110, 294, 264, 2142, 13, 1407, 6997, 1968, 44684, 3932, 307, 7363, 11, 4974, 10938, 295, 2744, 2119, 366, 8614, 490, 20003, 1292, 1027, 538, 44684, 293, 264, 6889, 4110, 9354, 13, 407, 6889, 11602, 295, 2119, 366, 8614, 490, 1184, 13, 407, 300, 1355, 426, 16, 6915, 2119, 293, 426, 17, 611, 6915, 2119, 13], "avg_logprob": -0.17797851143404841, "compression_ratio": 1.6464088397790055, "no_speech_prob": 0.0, "words": [{"start": 768.44, "end": 768.84, "word": " Their", "probability": 0.55419921875}, {"start": 768.84, "end": 769.26, "word": " drug", "probability": 0.8974609375}, {"start": 769.26, "end": 769.6, "word": " is", "probability": 0.95361328125}, {"start": 769.6, "end": 769.9, "word": " better", "probability": 0.833984375}, {"start": 769.9, "end": 770.42, "word": " than", "probability": 0.9462890625}, {"start": 770.42, "end": 771.28, "word": " the", "probability": 0.80517578125}, {"start": 771.28, "end": 771.54, "word": " other", "probability": 0.88671875}, {"start": 771.54, "end": 771.88, "word": " drug", "probability": 0.81494140625}, {"start": 771.88, "end": 772.08, "word": " in", "probability": 0.92236328125}, {"start": 772.08, "end": 772.22, "word": " the", "probability": 0.92236328125}, {"start": 772.22, "end": 772.54, "word": " market.", "probability": 0.8916015625}, {"start": 774.32, "end": 774.72, "word": " To", "probability": 0.92626953125}, {"start": 774.72, "end": 775.12, "word": " determine", "probability": 0.91943359375}, {"start": 775.12, "end": 775.46, "word": " whether", "probability": 0.8955078125}, {"start": 775.46, "end": 775.86, "word": " Excellence", "probability": 0.426513671875}, {"start": 775.86, "end": 776.8, "word": " claim", "probability": 0.640625}, {"start": 776.8, "end": 777.18, "word": " is", "probability": 0.955078125}, {"start": 777.18, "end": 777.66, "word": " valid,", "probability": 0.95361328125}, {"start": 779.4, "end": 779.7, "word": " random", "probability": 0.78173828125}, {"start": 779.7, "end": 780.24, "word": " samples", "probability": 0.87451171875}, {"start": 780.24, "end": 781.68, "word": " of", "probability": 0.9521484375}, {"start": 781.68, "end": 782.12, "word": " size", "probability": 0.83642578125}, {"start": 782.12, "end": 782.7, "word": " 15", "probability": 0.87744140625}, {"start": 782.7, "end": 783.06, "word": " are", "probability": 0.9404296875}, {"start": 783.06, "end": 783.54, "word": " chosen", "probability": 0.96484375}, {"start": 783.54, "end": 784.26, "word": " from", "probability": 0.8857421875}, {"start": 784.26, "end": 784.92, "word": " aspirins", "probability": 0.942626953125}, {"start": 784.92, "end": 785.22, "word": " made", "probability": 0.869140625}, {"start": 785.22, "end": 785.46, "word": " by", "probability": 0.96923828125}, {"start": 785.46, "end": 785.84, "word": " Excellence", "probability": 0.6767578125}, {"start": 785.84, "end": 786.38, "word": " and", "probability": 0.91162109375}, {"start": 786.38, "end": 786.56, "word": " the", "probability": 0.4921875}, {"start": 786.56, "end": 786.8, "word": " sample", "probability": 0.64453125}, {"start": 786.8, "end": 787.08, "word": " drug", "probability": 0.7939453125}, {"start": 787.08, "end": 787.48, "word": " combined.", "probability": 0.6240234375}, {"start": 788.78, "end": 789.06, "word": " So", "probability": 0.9375}, {"start": 789.06, "end": 789.52, "word": " sample", "probability": 0.5556640625}, {"start": 789.52, "end": 789.92, "word": " sizes", "probability": 0.45703125}, {"start": 789.92, "end": 790.14, "word": " of", "probability": 0.947265625}, {"start": 790.14, "end": 790.64, "word": " 15", "probability": 0.94384765625}, {"start": 790.64, "end": 791.3, "word": " are", "probability": 0.94482421875}, {"start": 791.3, "end": 791.78, "word": " chosen", "probability": 0.95556640625}, {"start": 791.78, "end": 792.3, "word": " from", "probability": 0.888671875}, {"start": 792.3, "end": 792.66, "word": " each.", "probability": 0.9541015625}, {"start": 793.6, "end": 793.86, "word": " So", "probability": 0.9140625}, {"start": 793.86, "end": 794.1, "word": " that", "probability": 0.8759765625}, {"start": 794.1, "end": 794.42, "word": " means", "probability": 0.92431640625}, {"start": 794.42, "end": 794.72, "word": " N1", "probability": 0.857666015625}, {"start": 794.72, "end": 794.94, "word": " equals", "probability": 0.426513671875}, {"start": 794.94, "end": 795.4, "word": " 15", "probability": 0.91650390625}, {"start": 795.4, "end": 795.62, "word": " and", "probability": 0.7890625}, {"start": 795.62, "end": 795.96, "word": " N2", "probability": 0.9951171875}, {"start": 795.96, "end": 796.26, "word": " also", "probability": 0.7666015625}, {"start": 796.26, "end": 796.54, "word": " equals", "probability": 0.80029296875}, {"start": 796.54, "end": 796.92, "word": " 15.", "probability": 0.96484375}], "temperature": 1.0}, {"id": 30, "seek": 82628, "start": 798.44, "end": 826.28, "text": " And aspirin is given to each of the 30 randomly selected persons suffering from headaches. So the total sample size is 30, because 15 from the first company, and the second for the simple company. So they are 30 selected persons who are suffering from headaches. So we have information about number of minutes required for each to recover from the headache.", "tokens": [400, 20003, 259, 307, 2212, 281, 1184, 295, 264, 2217, 16979, 8209, 14453, 7755, 490, 35046, 13, 407, 264, 3217, 6889, 2744, 307, 2217, 11, 570, 2119, 490, 264, 700, 2237, 11, 293, 264, 1150, 337, 264, 2199, 2237, 13, 407, 436, 366, 2217, 8209, 14453, 567, 366, 7755, 490, 35046, 13, 407, 321, 362, 1589, 466, 1230, 295, 2077, 4739, 337, 1184, 281, 8114, 490, 264, 23520, 13], "avg_logprob": -0.18191963987691062, "compression_ratio": 1.7211538461538463, "no_speech_prob": 0.0, "words": [{"start": 798.44, "end": 798.74, "word": " And", "probability": 0.564453125}, {"start": 798.74, "end": 799.08, "word": " aspirin", "probability": 0.73291015625}, {"start": 799.08, "end": 799.28, "word": " is", "probability": 0.94189453125}, {"start": 799.28, "end": 799.54, "word": " given", "probability": 0.89794921875}, {"start": 799.54, "end": 799.82, "word": " to", "probability": 0.96435546875}, {"start": 799.82, "end": 800.14, "word": " each", "probability": 0.9501953125}, {"start": 800.14, "end": 800.5, "word": " of", "probability": 0.96728515625}, {"start": 800.5, "end": 800.7, "word": " the", "probability": 0.9228515625}, {"start": 800.7, "end": 801.16, "word": " 30", "probability": 0.904296875}, {"start": 801.16, "end": 801.58, "word": " randomly", "probability": 0.67041015625}, {"start": 801.58, "end": 802.16, "word": " selected", "probability": 0.8349609375}, {"start": 802.16, "end": 802.72, "word": " persons", "probability": 0.8974609375}, {"start": 802.72, "end": 803.12, "word": " suffering", "probability": 0.87548828125}, {"start": 803.12, "end": 803.52, "word": " from", "probability": 0.8955078125}, {"start": 803.52, "end": 803.84, "word": " headaches.", "probability": 0.92041015625}, {"start": 804.54, "end": 804.96, "word": " So", "probability": 0.9365234375}, {"start": 804.96, "end": 805.08, "word": " the", "probability": 0.71044921875}, {"start": 805.08, "end": 805.34, "word": " total", "probability": 0.84228515625}, {"start": 805.34, "end": 805.66, "word": " sample", "probability": 0.94189453125}, {"start": 805.66, "end": 805.94, "word": " size", "probability": 0.85400390625}, {"start": 805.94, "end": 806.08, "word": " is", "probability": 0.837890625}, {"start": 806.08, "end": 806.48, "word": " 30,", "probability": 0.94873046875}, {"start": 806.7, "end": 807.22, "word": " because", "probability": 0.91796875}, {"start": 807.22, "end": 807.74, "word": " 15", "probability": 0.962890625}, {"start": 807.74, "end": 808.0, "word": " from", "probability": 0.6904296875}, {"start": 808.0, "end": 808.2, "word": " the", "probability": 0.919921875}, {"start": 808.2, "end": 808.5, "word": " first", "probability": 0.88330078125}, {"start": 808.5, "end": 809.16, "word": " company,", "probability": 0.9267578125}, {"start": 809.74, "end": 809.96, "word": " and", "probability": 0.93505859375}, {"start": 809.96, "end": 810.1, "word": " the", "probability": 0.55078125}, {"start": 810.1, "end": 810.32, "word": " second", "probability": 0.89306640625}, {"start": 810.32, "end": 810.6, "word": " for", "probability": 0.434814453125}, {"start": 810.6, "end": 810.78, "word": " the", "probability": 0.93115234375}, {"start": 810.78, "end": 811.04, "word": " simple", "probability": 0.310791015625}, {"start": 811.04, "end": 811.48, "word": " company.", "probability": 0.9130859375}, {"start": 811.86, "end": 812.06, "word": " So", "probability": 0.95947265625}, {"start": 812.06, "end": 812.26, "word": " they", "probability": 0.52392578125}, {"start": 812.26, "end": 812.64, "word": " are", "probability": 0.94140625}, {"start": 812.64, "end": 814.2, "word": " 30", "probability": 0.9013671875}, {"start": 814.2, "end": 815.24, "word": " selected", "probability": 0.86572265625}, {"start": 815.24, "end": 816.86, "word": " persons", "probability": 0.95849609375}, {"start": 816.86, "end": 817.2, "word": " who", "probability": 0.8837890625}, {"start": 817.2, "end": 817.46, "word": " are", "probability": 0.93798828125}, {"start": 817.46, "end": 817.84, "word": " suffering", "probability": 0.90185546875}, {"start": 817.84, "end": 818.22, "word": " from", "probability": 0.8896484375}, {"start": 818.22, "end": 818.5, "word": " headaches.", "probability": 0.931640625}, {"start": 819.58, "end": 819.96, "word": " So", "probability": 0.95556640625}, {"start": 819.96, "end": 820.12, "word": " we", "probability": 0.9306640625}, {"start": 820.12, "end": 820.28, "word": " have", "probability": 0.9482421875}, {"start": 820.28, "end": 820.76, "word": " information", "probability": 0.84814453125}, {"start": 820.76, "end": 821.08, "word": " about", "probability": 0.8974609375}, {"start": 821.08, "end": 821.42, "word": " number", "probability": 0.87744140625}, {"start": 821.42, "end": 821.6, "word": " of", "probability": 0.970703125}, {"start": 821.6, "end": 822.08, "word": " minutes", "probability": 0.93017578125}, {"start": 822.08, "end": 822.9, "word": " required", "probability": 0.8193359375}, {"start": 822.9, "end": 823.38, "word": " for", "probability": 0.951171875}, {"start": 823.38, "end": 823.82, "word": " each", "probability": 0.95166015625}, {"start": 823.82, "end": 824.84, "word": " to", "probability": 0.966796875}, {"start": 824.84, "end": 825.24, "word": " recover", "probability": 0.87841796875}, {"start": 825.24, "end": 825.74, "word": " from", "probability": 0.890625}, {"start": 825.74, "end": 825.94, "word": " the", "probability": 0.923828125}, {"start": 825.94, "end": 826.28, "word": " headache.", "probability": 0.853515625}], "temperature": 1.0}, {"id": 31, "seek": 85570, "start": 826.94, "end": 855.7, "text": " is recorded, the sample results are. So here we have two groups, two populations. Company is called excellent company and other one simple company. The information we have, the sample means are 8.4 for the excellent and 8.9 for the simple company. With the standard deviations for the sample are 2.05 and 2.14 respectively for", "tokens": [307, 8287, 11, 264, 6889, 3542, 366, 13, 407, 510, 321, 362, 732, 3935, 11, 732, 12822, 13, 13918, 307, 1219, 7103, 2237, 293, 661, 472, 2199, 2237, 13, 440, 1589, 321, 362, 11, 264, 6889, 1355, 366, 1649, 13, 19, 337, 264, 7103, 293, 1649, 13, 24, 337, 264, 2199, 2237, 13, 2022, 264, 3832, 31219, 763, 337, 264, 6889, 366, 568, 13, 13328, 293, 568, 13, 7271, 25009, 337], "avg_logprob": -0.22829860800670254, "compression_ratio": 1.703125, "no_speech_prob": 0.0, "words": [{"start": 826.94, "end": 827.22, "word": " is", "probability": 0.132568359375}, {"start": 827.22, "end": 827.72, "word": " recorded,", "probability": 0.86376953125}, {"start": 828.2, "end": 828.48, "word": " the", "probability": 0.505859375}, {"start": 828.48, "end": 828.88, "word": " sample", "probability": 0.8017578125}, {"start": 828.88, "end": 829.26, "word": " results", "probability": 0.9052734375}, {"start": 829.26, "end": 829.68, "word": " are.", "probability": 0.9013671875}, {"start": 830.32, "end": 830.54, "word": " So", "probability": 0.9228515625}, {"start": 830.54, "end": 830.7, "word": " here", "probability": 0.701171875}, {"start": 830.7, "end": 830.86, "word": " we", "probability": 0.87841796875}, {"start": 830.86, "end": 831.14, "word": " have", "probability": 0.9501953125}, {"start": 831.14, "end": 831.5, "word": " two", "probability": 0.8505859375}, {"start": 831.5, "end": 832.82, "word": " groups,", "probability": 0.92626953125}, {"start": 833.26, "end": 833.36, "word": " two", "probability": 0.93310546875}, {"start": 833.36, "end": 833.84, "word": " populations.", "probability": 0.9580078125}, {"start": 835.32, "end": 835.78, "word": " Company", "probability": 0.77197265625}, {"start": 835.78, "end": 836.02, "word": " is", "probability": 0.67578125}, {"start": 836.02, "end": 836.26, "word": " called", "probability": 0.8701171875}, {"start": 836.26, "end": 836.58, "word": " excellent", "probability": 0.5908203125}, {"start": 836.58, "end": 837.02, "word": " company", "probability": 0.9033203125}, {"start": 837.02, "end": 837.22, "word": " and", "probability": 0.82470703125}, {"start": 837.22, "end": 837.46, "word": " other", "probability": 0.392822265625}, {"start": 837.46, "end": 837.66, "word": " one", "probability": 0.86376953125}, {"start": 837.66, "end": 837.94, "word": " simple", "probability": 0.2496337890625}, {"start": 837.94, "end": 838.42, "word": " company.", "probability": 0.9169921875}, {"start": 839.12, "end": 839.38, "word": " The", "probability": 0.87109375}, {"start": 839.38, "end": 839.94, "word": " information", "probability": 0.8291015625}, {"start": 839.94, "end": 840.3, "word": " we", "probability": 0.94970703125}, {"start": 840.3, "end": 840.64, "word": " have,", "probability": 0.93408203125}, {"start": 841.6, "end": 841.76, "word": " the", "probability": 0.900390625}, {"start": 841.76, "end": 842.08, "word": " sample", "probability": 0.88623046875}, {"start": 842.08, "end": 842.5, "word": " means", "probability": 0.912109375}, {"start": 842.5, "end": 843.28, "word": " are", "probability": 0.8544921875}, {"start": 843.28, "end": 843.76, "word": " 8", "probability": 0.94287109375}, {"start": 843.76, "end": 844.32, "word": ".4", "probability": 0.99365234375}, {"start": 844.32, "end": 844.68, "word": " for", "probability": 0.91162109375}, {"start": 844.68, "end": 844.84, "word": " the", "probability": 0.9013671875}, {"start": 844.84, "end": 845.18, "word": " excellent", "probability": 0.845703125}, {"start": 845.18, "end": 846.44, "word": " and", "probability": 0.7470703125}, {"start": 846.44, "end": 846.74, "word": " 8", "probability": 0.99462890625}, {"start": 846.74, "end": 847.18, "word": ".9", "probability": 0.99462890625}, {"start": 847.18, "end": 847.38, "word": " for", "probability": 0.94921875}, {"start": 847.38, "end": 847.54, "word": " the", "probability": 0.921875}, {"start": 847.54, "end": 847.8, "word": " simple", "probability": 0.86376953125}, {"start": 847.8, "end": 848.26, "word": " company.", "probability": 0.91015625}, {"start": 849.04, "end": 849.28, "word": " With", "probability": 0.79052734375}, {"start": 849.28, "end": 849.44, "word": " the", "probability": 0.41748046875}, {"start": 849.44, "end": 849.7, "word": " standard", "probability": 0.943359375}, {"start": 849.7, "end": 850.4, "word": " deviations", "probability": 0.918212890625}, {"start": 850.4, "end": 851.36, "word": " for", "probability": 0.86181640625}, {"start": 851.36, "end": 851.54, "word": " the", "probability": 0.92578125}, {"start": 851.54, "end": 851.9, "word": " sample", "probability": 0.859375}, {"start": 851.9, "end": 852.92, "word": " are", "probability": 0.86376953125}, {"start": 852.92, "end": 853.28, "word": " 2", "probability": 0.99072265625}, {"start": 853.28, "end": 853.8, "word": ".05", "probability": 0.985595703125}, {"start": 853.8, "end": 854.12, "word": " and", "probability": 0.93896484375}, {"start": 854.12, "end": 854.3, "word": " 2", "probability": 0.9970703125}, {"start": 854.3, "end": 854.8, "word": ".14", "probability": 0.99267578125}, {"start": 854.8, "end": 855.38, "word": " respectively", "probability": 0.86083984375}, {"start": 855.38, "end": 855.7, "word": " for", "probability": 0.82177734375}], "temperature": 1.0}, {"id": 32, "seek": 88300, "start": 857.2, "end": 883.0, "text": " excellent and simple and as we mentioned the sample sizes are the same are equal 15 and 15. Now we are going to test at five percent level of significance test whether to determine whether excellence aspirin cure headaches significantly faster than simple aspirin. Now faster it means", "tokens": [7103, 293, 2199, 293, 382, 321, 2835, 264, 6889, 11602, 366, 264, 912, 366, 2681, 2119, 293, 2119, 13, 823, 321, 366, 516, 281, 1500, 412, 1732, 3043, 1496, 295, 17687, 1500, 1968, 281, 6997, 1968, 21268, 20003, 259, 13698, 35046, 10591, 4663, 813, 2199, 20003, 259, 13, 823, 4663, 309, 1355], "avg_logprob": -0.21521226640017527, "compression_ratio": 1.6666666666666667, "no_speech_prob": 0.0, "words": [{"start": 857.2, "end": 857.74, "word": " excellent", "probability": 0.35546875}, {"start": 857.74, "end": 858.04, "word": " and", "probability": 0.79541015625}, {"start": 858.04, "end": 858.34, "word": " simple", "probability": 0.93603515625}, {"start": 858.34, "end": 859.04, "word": " and", "probability": 0.447509765625}, {"start": 859.04, "end": 859.26, "word": " as", "probability": 0.90478515625}, {"start": 859.26, "end": 859.4, "word": " we", "probability": 0.87646484375}, {"start": 859.4, "end": 859.84, "word": " mentioned", "probability": 0.61328125}, {"start": 859.84, "end": 860.08, "word": " the", "probability": 0.76123046875}, {"start": 860.08, "end": 860.3, "word": " sample", "probability": 0.693359375}, {"start": 860.3, "end": 860.72, "word": " sizes", "probability": 0.9140625}, {"start": 860.72, "end": 861.06, "word": " are", "probability": 0.93701171875}, {"start": 861.06, "end": 861.22, "word": " the", "probability": 0.876953125}, {"start": 861.22, "end": 861.48, "word": " same", "probability": 0.91357421875}, {"start": 861.48, "end": 861.7, "word": " are", "probability": 0.43994140625}, {"start": 861.7, "end": 862.04, "word": " equal", "probability": 0.8994140625}, {"start": 862.04, "end": 862.5, "word": " 15", "probability": 0.75}, {"start": 862.5, "end": 862.66, "word": " and", "probability": 0.8779296875}, {"start": 862.66, "end": 863.04, "word": " 15.", "probability": 0.94580078125}, {"start": 864.5, "end": 864.92, "word": " Now", "probability": 0.82666015625}, {"start": 864.92, "end": 865.1, "word": " we", "probability": 0.85693359375}, {"start": 865.1, "end": 865.24, "word": " are", "probability": 0.94140625}, {"start": 865.24, "end": 865.54, "word": " going", "probability": 0.94287109375}, {"start": 865.54, "end": 865.74, "word": " to", "probability": 0.96826171875}, {"start": 865.74, "end": 866.08, "word": " test", "probability": 0.888671875}, {"start": 866.08, "end": 866.38, "word": " at", "probability": 0.8359375}, {"start": 866.38, "end": 866.7, "word": " five", "probability": 0.40234375}, {"start": 866.7, "end": 867.34, "word": " percent", "probability": 0.93603515625}, {"start": 867.34, "end": 867.62, "word": " level", "probability": 0.94384765625}, {"start": 867.62, "end": 867.8, "word": " of", "probability": 0.970703125}, {"start": 867.8, "end": 868.44, "word": " significance", "probability": 0.96240234375}, {"start": 868.44, "end": 869.7, "word": " test", "probability": 0.609375}, {"start": 869.7, "end": 871.54, "word": " whether", "probability": 0.73095703125}, {"start": 871.54, "end": 872.54, "word": " to", "probability": 0.70947265625}, {"start": 872.54, "end": 872.94, "word": " determine", "probability": 0.90869140625}, {"start": 872.94, "end": 873.26, "word": " whether", "probability": 0.8525390625}, {"start": 873.26, "end": 874.02, "word": " excellence", "probability": 0.802734375}, {"start": 874.02, "end": 874.84, "word": " aspirin", "probability": 0.943115234375}, {"start": 874.84, "end": 875.56, "word": " cure", "probability": 0.84130859375}, {"start": 875.56, "end": 876.0, "word": " headaches", "probability": 0.93408203125}, {"start": 876.0, "end": 877.02, "word": " significantly", "probability": 0.92333984375}, {"start": 877.02, "end": 877.68, "word": " faster", "probability": 0.88720703125}, {"start": 877.68, "end": 878.22, "word": " than", "probability": 0.947265625}, {"start": 878.22, "end": 879.14, "word": " simple", "probability": 0.720703125}, {"start": 879.14, "end": 880.48, "word": " aspirin.", "probability": 0.91357421875}, {"start": 881.32, "end": 881.62, "word": " Now", "probability": 0.83837890625}, {"start": 881.62, "end": 882.26, "word": " faster", "probability": 0.88525390625}, {"start": 882.26, "end": 882.52, "word": " it", "probability": 0.8984375}, {"start": 882.52, "end": 883.0, "word": " means", "probability": 0.93408203125}], "temperature": 1.0}, {"id": 33, "seek": 90550, "start": 883.54, "end": 905.5, "text": " Better. Better it means the time required to relieve headache is smaller there. So you have to be careful in this case. If we assume that Mu1 is the mean time required for excellent aspirin. So Mu1 for excellent.", "tokens": [15753, 13, 15753, 309, 1355, 264, 565, 4739, 281, 30450, 23520, 307, 4356, 456, 13, 407, 291, 362, 281, 312, 5026, 294, 341, 1389, 13, 759, 321, 6552, 300, 15601, 16, 307, 264, 914, 565, 4739, 337, 7103, 20003, 259, 13, 407, 15601, 16, 337, 7103, 13], "avg_logprob": -0.2405598908662796, "compression_ratio": 1.5106382978723405, "no_speech_prob": 0.0, "words": [{"start": 883.54, "end": 884.04, "word": " Better.", "probability": 0.34619140625}, {"start": 885.5, "end": 886.26, "word": " Better", "probability": 0.68798828125}, {"start": 886.26, "end": 886.42, "word": " it", "probability": 0.53466796875}, {"start": 886.42, "end": 886.7, "word": " means", "probability": 0.92578125}, {"start": 886.7, "end": 886.9, "word": " the", "probability": 0.7919921875}, {"start": 886.9, "end": 887.16, "word": " time", "probability": 0.88525390625}, {"start": 887.16, "end": 887.62, "word": " required", "probability": 0.7763671875}, {"start": 887.62, "end": 887.96, "word": " to", "probability": 0.91943359375}, {"start": 887.96, "end": 888.76, "word": " relieve", "probability": 0.86181640625}, {"start": 888.76, "end": 889.18, "word": " headache", "probability": 0.87060546875}, {"start": 889.18, "end": 889.48, "word": " is", "probability": 0.95751953125}, {"start": 889.48, "end": 890.04, "word": " smaller", "probability": 0.87060546875}, {"start": 890.04, "end": 890.34, "word": " there.", "probability": 0.406494140625}, {"start": 891.62, "end": 892.38, "word": " So", "probability": 0.923828125}, {"start": 892.38, "end": 892.6, "word": " you", "probability": 0.64013671875}, {"start": 892.6, "end": 892.74, "word": " have", "probability": 0.9501953125}, {"start": 892.74, "end": 892.82, "word": " to", "probability": 0.97265625}, {"start": 892.82, "end": 892.96, "word": " be", "probability": 0.95947265625}, {"start": 892.96, "end": 893.32, "word": " careful", "probability": 0.9658203125}, {"start": 893.32, "end": 893.64, "word": " in", "probability": 0.93798828125}, {"start": 893.64, "end": 893.92, "word": " this", "probability": 0.9501953125}, {"start": 893.92, "end": 894.28, "word": " case.", "probability": 0.91845703125}, {"start": 895.14, "end": 895.5, "word": " If", "probability": 0.9599609375}, {"start": 895.5, "end": 895.68, "word": " we", "probability": 0.95556640625}, {"start": 895.68, "end": 896.12, "word": " assume", "probability": 0.89990234375}, {"start": 896.12, "end": 896.56, "word": " that", "probability": 0.9384765625}, {"start": 896.56, "end": 897.9, "word": " Mu1", "probability": 0.3778076171875}, {"start": 897.9, "end": 900.22, "word": " is", "probability": 0.93017578125}, {"start": 900.22, "end": 900.4, "word": " the", "probability": 0.92626953125}, {"start": 900.4, "end": 900.54, "word": " mean", "probability": 0.939453125}, {"start": 900.54, "end": 900.8, "word": " time", "probability": 0.86181640625}, {"start": 900.8, "end": 901.18, "word": " required", "probability": 0.8232421875}, {"start": 901.18, "end": 901.6, "word": " for", "probability": 0.95166015625}, {"start": 901.6, "end": 902.68, "word": " excellent", "probability": 0.888671875}, {"start": 902.68, "end": 903.36, "word": " aspirin.", "probability": 0.968505859375}, {"start": 903.88, "end": 904.42, "word": " So", "probability": 0.94482421875}, {"start": 904.42, "end": 904.84, "word": " Mu1", "probability": 0.901611328125}, {"start": 904.84, "end": 905.12, "word": " for", "probability": 0.9052734375}, {"start": 905.12, "end": 905.5, "word": " excellent.", "probability": 0.783203125}], "temperature": 1.0}, {"id": 34, "seek": 94416, "start": 917.26, "end": 944.16, "text": " So Me1, mean time required for excellence aspirin, and Me2, mean time required for simple aspirin. So each one, Me1, is smaller than Me3. Since Me1 represents the time required", "tokens": [407, 1923, 16, 11, 914, 565, 4739, 337, 21268, 20003, 259, 11, 293, 1923, 17, 11, 914, 565, 4739, 337, 2199, 20003, 259, 13, 407, 1184, 472, 11, 1923, 16, 11, 307, 4356, 813, 1923, 18, 13, 4162, 1923, 16, 8855, 264, 565, 4739], "avg_logprob": -0.22465277115503948, "compression_ratio": 1.5391304347826087, "no_speech_prob": 0.0, "words": [{"start": 917.26, "end": 917.5, "word": " So", "probability": 0.9228515625}, {"start": 917.5, "end": 917.92, "word": " Me1,", "probability": 0.55316162109375}, {"start": 918.54, "end": 918.84, "word": " mean", "probability": 0.7568359375}, {"start": 918.84, "end": 919.12, "word": " time", "probability": 0.82421875}, {"start": 919.12, "end": 919.54, "word": " required", "probability": 0.8173828125}, {"start": 919.54, "end": 919.82, "word": " for", "probability": 0.94873046875}, {"start": 919.82, "end": 920.32, "word": " excellence", "probability": 0.3564453125}, {"start": 920.32, "end": 921.54, "word": " aspirin,", "probability": 0.927734375}, {"start": 922.78, "end": 923.22, "word": " and", "probability": 0.92431640625}, {"start": 923.22, "end": 923.6, "word": " Me2,", "probability": 0.98388671875}, {"start": 923.68, "end": 923.82, "word": " mean", "probability": 0.9208984375}, {"start": 923.82, "end": 924.06, "word": " time", "probability": 0.87548828125}, {"start": 924.06, "end": 924.48, "word": " required", "probability": 0.830078125}, {"start": 924.48, "end": 924.94, "word": " for", "probability": 0.951171875}, {"start": 924.94, "end": 925.66, "word": " simple", "probability": 0.794921875}, {"start": 925.66, "end": 926.2, "word": " aspirin.", "probability": 0.96142578125}, {"start": 928.5, "end": 928.86, "word": " So", "probability": 0.9404296875}, {"start": 928.86, "end": 929.42, "word": " each", "probability": 0.794921875}, {"start": 929.42, "end": 929.76, "word": " one,", "probability": 0.8994140625}, {"start": 930.58, "end": 931.08, "word": " Me1,", "probability": 0.97314453125}, {"start": 931.34, "end": 931.7, "word": " is", "probability": 0.9482421875}, {"start": 931.7, "end": 932.08, "word": " smaller", "probability": 0.86669921875}, {"start": 932.08, "end": 932.38, "word": " than", "probability": 0.943359375}, {"start": 932.38, "end": 932.76, "word": " Me3.", "probability": 0.75}, {"start": 941.14, "end": 941.98, "word": " Since", "probability": 0.78466796875}, {"start": 941.98, "end": 942.74, "word": " Me1", "probability": 0.970703125}, {"start": 942.74, "end": 943.3, "word": " represents", "probability": 0.88671875}, {"start": 943.3, "end": 943.52, "word": " the", "probability": 0.92529296875}, {"start": 943.52, "end": 943.72, "word": " time", "probability": 0.888671875}, {"start": 943.72, "end": 944.16, "word": " required", "probability": 0.83837890625}], "temperature": 1.0}, {"id": 35, "seek": 97472, "start": 945.32, "end": 974.72, "text": " to relieve headache by using excellent aspirin and this one is faster faster it means it takes less time in order to recover from headache so mu1 should be smaller than mu2 we are going to use T T is x1 bar minus x2 bar minus the difference between the two population proportions divided by", "tokens": [281, 30450, 23520, 538, 1228, 7103, 20003, 259, 293, 341, 472, 307, 4663, 4663, 309, 1355, 309, 2516, 1570, 565, 294, 1668, 281, 8114, 490, 23520, 370, 2992, 16, 820, 312, 4356, 813, 2992, 17, 321, 366, 516, 281, 764, 314, 314, 307, 2031, 16, 2159, 3175, 2031, 17, 2159, 3175, 264, 2649, 1296, 264, 732, 4415, 32482, 6666, 538], "avg_logprob": -0.23091700233396936, "compression_ratio": 1.5815217391304348, "no_speech_prob": 0.0, "words": [{"start": 945.32, "end": 945.56, "word": " to", "probability": 0.308349609375}, {"start": 945.56, "end": 945.96, "word": " relieve", "probability": 0.681640625}, {"start": 945.96, "end": 946.52, "word": " headache", "probability": 0.841796875}, {"start": 946.52, "end": 947.78, "word": " by", "probability": 0.82177734375}, {"start": 947.78, "end": 948.16, "word": " using", "probability": 0.9091796875}, {"start": 948.16, "end": 948.62, "word": " excellent", "probability": 0.85595703125}, {"start": 948.62, "end": 949.1, "word": " aspirin", "probability": 0.82080078125}, {"start": 949.1, "end": 951.1, "word": " and", "probability": 0.50732421875}, {"start": 951.1, "end": 951.32, "word": " this", "probability": 0.947265625}, {"start": 951.32, "end": 951.5, "word": " one", "probability": 0.9228515625}, {"start": 951.5, "end": 951.68, "word": " is", "probability": 0.939453125}, {"start": 951.68, "end": 952.22, "word": " faster", "probability": 0.87353515625}, {"start": 952.22, "end": 952.96, "word": " faster", "probability": 0.44482421875}, {"start": 952.96, "end": 953.4, "word": " it", "probability": 0.8349609375}, {"start": 953.4, "end": 953.8, "word": " means", "probability": 0.8984375}, {"start": 953.8, "end": 953.98, "word": " it", "probability": 0.8740234375}, {"start": 953.98, "end": 954.26, "word": " takes", "probability": 0.791015625}, {"start": 954.26, "end": 954.6, "word": " less", "probability": 0.931640625}, {"start": 954.6, "end": 955.06, "word": " time", "probability": 0.8876953125}, {"start": 955.06, "end": 955.46, "word": " in", "probability": 0.923828125}, {"start": 955.46, "end": 955.62, "word": " order", "probability": 0.9296875}, {"start": 955.62, "end": 955.84, "word": " to", "probability": 0.966796875}, {"start": 955.84, "end": 956.18, "word": " recover", "probability": 0.89306640625}, {"start": 956.18, "end": 957.3, "word": " from", "probability": 0.892578125}, {"start": 957.3, "end": 957.68, "word": " headache", "probability": 0.83642578125}, {"start": 957.68, "end": 958.82, "word": " so", "probability": 0.66552734375}, {"start": 958.82, "end": 959.2, "word": " mu1", "probability": 0.4068603515625}, {"start": 959.2, "end": 959.48, "word": " should", "probability": 0.95068359375}, {"start": 959.48, "end": 959.62, "word": " be", "probability": 0.95751953125}, {"start": 959.62, "end": 960.02, "word": " smaller", "probability": 0.84716796875}, {"start": 960.02, "end": 960.3, "word": " than", "probability": 0.90576171875}, {"start": 960.3, "end": 960.84, "word": " mu2", "probability": 0.702392578125}, {"start": 960.84, "end": 963.18, "word": " we", "probability": 0.65380859375}, {"start": 963.18, "end": 963.34, "word": " are", "probability": 0.9228515625}, {"start": 963.34, "end": 963.54, "word": " going", "probability": 0.94677734375}, {"start": 963.54, "end": 963.66, "word": " to", "probability": 0.96630859375}, {"start": 963.66, "end": 963.92, "word": " use", "probability": 0.8798828125}, {"start": 963.92, "end": 964.16, "word": " T", "probability": 0.69677734375}, {"start": 964.16, "end": 965.56, "word": " T", "probability": 0.55908203125}, {"start": 965.56, "end": 965.72, "word": " is", "probability": 0.87353515625}, {"start": 965.72, "end": 966.2, "word": " x1", "probability": 0.68359375}, {"start": 966.2, "end": 966.4, "word": " bar", "probability": 0.9326171875}, {"start": 966.4, "end": 966.7, "word": " minus", "probability": 0.94091796875}, {"start": 966.7, "end": 967.14, "word": " x2", "probability": 0.986328125}, {"start": 967.14, "end": 967.44, "word": " bar", "probability": 0.9375}, {"start": 967.44, "end": 969.48, "word": " minus", "probability": 0.9833984375}, {"start": 969.48, "end": 970.2, "word": " the", "probability": 0.91259765625}, {"start": 970.2, "end": 970.68, "word": " difference", "probability": 0.857421875}, {"start": 970.68, "end": 971.06, "word": " between", "probability": 0.884765625}, {"start": 971.06, "end": 971.22, "word": " the", "probability": 0.88134765625}, {"start": 971.22, "end": 971.38, "word": " two", "probability": 0.9013671875}, {"start": 971.38, "end": 971.82, "word": " population", "probability": 0.6845703125}, {"start": 971.82, "end": 972.56, "word": " proportions", "probability": 0.681640625}, {"start": 972.56, "end": 974.32, "word": " divided", "probability": 0.69677734375}, {"start": 974.32, "end": 974.72, "word": " by", "probability": 0.97216796875}], "temperature": 1.0}, {"id": 36, "seek": 100403, "start": 977.55, "end": 1004.03, "text": " S squared B times 1 over N1 plus 1 over N2. S squared B N1 minus 1 S1 squared plus N2 minus 1 S2 squared divided by N1 plus N2 minus 1. Now, a simple calculation will give the following results.", "tokens": [318, 8889, 363, 1413, 502, 670, 426, 16, 1804, 502, 670, 426, 17, 13, 318, 8889, 363, 426, 16, 3175, 502, 318, 16, 8889, 1804, 426, 17, 3175, 502, 318, 17, 8889, 6666, 538, 426, 16, 1804, 426, 17, 3175, 502, 13, 823, 11, 257, 2199, 17108, 486, 976, 264, 3480, 3542, 13], "avg_logprob": -0.19097222415385423, "compression_ratio": 1.5853658536585367, "no_speech_prob": 0.0, "words": [{"start": 977.55, "end": 977.93, "word": " S", "probability": 0.379638671875}, {"start": 977.93, "end": 978.39, "word": " squared", "probability": 0.41455078125}, {"start": 978.39, "end": 979.07, "word": " B", "probability": 0.54638671875}, {"start": 979.07, "end": 979.47, "word": " times", "probability": 0.8974609375}, {"start": 979.47, "end": 979.87, "word": " 1", "probability": 0.71630859375}, {"start": 979.87, "end": 980.63, "word": " over", "probability": 0.86181640625}, {"start": 980.63, "end": 980.95, "word": " N1", "probability": 0.754150390625}, {"start": 980.95, "end": 981.23, "word": " plus", "probability": 0.91943359375}, {"start": 981.23, "end": 981.49, "word": " 1", "probability": 0.9306640625}, {"start": 981.49, "end": 981.73, "word": " over", "probability": 0.8740234375}, {"start": 981.73, "end": 982.07, "word": " N2.", "probability": 0.843017578125}, {"start": 985.13, "end": 985.89, "word": " S", "probability": 0.9140625}, {"start": 985.89, "end": 986.21, "word": " squared", "probability": 0.7919921875}, {"start": 986.21, "end": 986.53, "word": " B", "probability": 0.98046875}, {"start": 986.53, "end": 990.47, "word": " N1", "probability": 0.767333984375}, {"start": 990.47, "end": 990.79, "word": " minus", "probability": 0.97705078125}, {"start": 990.79, "end": 991.17, "word": " 1", "probability": 0.958984375}, {"start": 991.17, "end": 991.89, "word": " S1", "probability": 0.6566162109375}, {"start": 991.89, "end": 992.27, "word": " squared", "probability": 0.81982421875}, {"start": 992.27, "end": 992.71, "word": " plus", "probability": 0.94921875}, {"start": 992.71, "end": 993.67, "word": " N2", "probability": 0.98876953125}, {"start": 993.67, "end": 993.95, "word": " minus", "probability": 0.9833984375}, {"start": 993.95, "end": 994.33, "word": " 1", "probability": 0.97021484375}, {"start": 994.33, "end": 994.89, "word": " S2", "probability": 0.962646484375}, {"start": 994.89, "end": 995.33, "word": " squared", "probability": 0.83056640625}, {"start": 995.33, "end": 996.67, "word": " divided", "probability": 0.494384765625}, {"start": 996.67, "end": 996.93, "word": " by", "probability": 0.962890625}, {"start": 996.93, "end": 997.35, "word": " N1", "probability": 0.99169921875}, {"start": 997.35, "end": 997.65, "word": " plus", "probability": 0.96044921875}, {"start": 997.65, "end": 998.07, "word": " N2", "probability": 0.99755859375}, {"start": 998.07, "end": 998.35, "word": " minus", "probability": 0.98046875}, {"start": 998.35, "end": 998.57, "word": " 1.", "probability": 0.5390625}, {"start": 1000.29, "end": 1001.05, "word": " Now,", "probability": 0.9306640625}, {"start": 1001.63, "end": 1001.77, "word": " a", "probability": 0.64990234375}, {"start": 1001.77, "end": 1001.99, "word": " simple", "probability": 0.94140625}, {"start": 1001.99, "end": 1002.57, "word": " calculation", "probability": 0.92919921875}, {"start": 1002.57, "end": 1002.83, "word": " will", "probability": 0.8759765625}, {"start": 1002.83, "end": 1003.07, "word": " give", "probability": 0.88818359375}, {"start": 1003.07, "end": 1003.25, "word": " the", "probability": 0.91845703125}, {"start": 1003.25, "end": 1003.49, "word": " following", "probability": 0.89697265625}, {"start": 1003.49, "end": 1004.03, "word": " results.", "probability": 0.8505859375}], "temperature": 1.0}, {"id": 37, "seek": 102959, "start": 1019.66, "end": 1029.6, "text": " So again, we have this data. Just plug this information here to get the value of S square B. And finally, you will end", "tokens": [407, 797, 11, 321, 362, 341, 1412, 13, 1449, 5452, 341, 1589, 510, 281, 483, 264, 2158, 295, 318, 3732, 363, 13, 400, 2721, 11, 291, 486, 917], "avg_logprob": -0.32650861246832485, "compression_ratio": 1.1553398058252426, "no_speech_prob": 0.0, "words": [{"start": 1019.66, "end": 1020.26, "word": " So", "probability": 0.5625}, {"start": 1020.26, "end": 1020.54, "word": " again,", "probability": 0.7060546875}, {"start": 1020.6, "end": 1020.7, "word": " we", "probability": 0.94921875}, {"start": 1020.7, "end": 1021.0, "word": " have", "probability": 0.9501953125}, {"start": 1021.0, "end": 1021.54, "word": " this", "probability": 0.916015625}, {"start": 1021.54, "end": 1021.92, "word": " data.", "probability": 0.95166015625}, {"start": 1022.26, "end": 1022.48, "word": " Just", "probability": 0.8671875}, {"start": 1022.48, "end": 1022.82, "word": " plug", "probability": 0.77880859375}, {"start": 1022.82, "end": 1023.08, "word": " this", "probability": 0.9267578125}, {"start": 1023.08, "end": 1023.56, "word": " information", "probability": 0.8408203125}, {"start": 1023.56, "end": 1023.84, "word": " here", "probability": 0.69482421875}, {"start": 1023.84, "end": 1024.02, "word": " to", "probability": 0.94287109375}, {"start": 1024.02, "end": 1024.32, "word": " get", "probability": 0.94580078125}, {"start": 1024.32, "end": 1025.4, "word": " the", "probability": 0.916015625}, {"start": 1025.4, "end": 1025.72, "word": " value", "probability": 0.97021484375}, {"start": 1025.72, "end": 1025.88, "word": " of", "probability": 0.46923828125}, {"start": 1025.88, "end": 1026.06, "word": " S", "probability": 0.640625}, {"start": 1026.06, "end": 1026.36, "word": " square", "probability": 0.332275390625}, {"start": 1026.36, "end": 1026.62, "word": " B.", "probability": 0.6591796875}, {"start": 1027.74, "end": 1028.1, "word": " And", "probability": 0.94287109375}, {"start": 1028.1, "end": 1028.62, "word": " finally,", "probability": 0.84130859375}, {"start": 1028.82, "end": 1028.96, "word": " you", "probability": 0.8408203125}, {"start": 1028.96, "end": 1029.16, "word": " will", "probability": 0.896484375}, {"start": 1029.16, "end": 1029.6, "word": " end", "probability": 0.91357421875}], "temperature": 1.0}, {"id": 38, "seek": 105592, "start": 1030.72, "end": 1055.92, "text": " with this result. S squared B equals 2.095 squared. Your T statistic equals negative", "tokens": [365, 341, 1874, 13, 318, 8889, 363, 6915, 568, 13, 13811, 20, 8889, 13, 2260, 314, 29588, 6915, 3671], "avg_logprob": -0.3142578169703484, "compression_ratio": 1.0897435897435896, "no_speech_prob": 0.0, "words": [{"start": 1030.72, "end": 1031.22, "word": " with", "probability": 0.324951171875}, {"start": 1031.22, "end": 1032.68, "word": " this", "probability": 0.9208984375}, {"start": 1032.68, "end": 1033.12, "word": " result.", "probability": 0.79736328125}, {"start": 1038.22, "end": 1039.54, "word": " S", "probability": 0.72021484375}, {"start": 1039.54, "end": 1039.88, "word": " squared", "probability": 0.39501953125}, {"start": 1039.88, "end": 1040.1, "word": " B", "probability": 0.5634765625}, {"start": 1040.1, "end": 1040.62, "word": " equals", "probability": 0.86474609375}, {"start": 1040.62, "end": 1044.92, "word": " 2", "probability": 0.8310546875}, {"start": 1044.92, "end": 1046.58, "word": ".095", "probability": 0.90185546875}, {"start": 1046.58, "end": 1047.24, "word": " squared.", "probability": 0.87353515625}, {"start": 1050.14, "end": 1050.56, "word": " Your", "probability": 0.87939453125}, {"start": 1050.56, "end": 1050.72, "word": " T", "probability": 0.7900390625}, {"start": 1050.72, "end": 1051.24, "word": " statistic", "probability": 0.64208984375}, {"start": 1051.24, "end": 1054.06, "word": " equals", "probability": 0.89794921875}, {"start": 1054.06, "end": 1055.92, "word": " negative", "probability": 0.9267578125}], "temperature": 1.0}, {"id": 39, "seek": 109121, "start": 1062.79, "end": 1091.21, "text": " So that's your T-statistic value. So just plug the values in 1 and 2, this 1 squared and this 2 squared into this equation, you will get this value. So 2.059 squared, that is 4.239. Here you can use either the critical value approach,", "tokens": [407, 300, 311, 428, 314, 12, 19435, 3142, 2158, 13, 407, 445, 5452, 264, 4190, 294, 502, 293, 568, 11, 341, 502, 8889, 293, 341, 568, 8889, 666, 341, 5367, 11, 291, 486, 483, 341, 2158, 13, 407, 568, 13, 13328, 24, 8889, 11, 300, 307, 1017, 13, 9356, 24, 13, 1692, 291, 393, 764, 2139, 264, 4924, 2158, 3109, 11], "avg_logprob": -0.21685988624249736, "compression_ratio": 1.5161290322580645, "no_speech_prob": 0.0, "words": [{"start": 1062.79, "end": 1063.07, "word": " So", "probability": 0.71630859375}, {"start": 1063.07, "end": 1063.31, "word": " that's", "probability": 0.84375}, {"start": 1063.31, "end": 1063.55, "word": " your", "probability": 0.8896484375}, {"start": 1063.55, "end": 1063.71, "word": " T", "probability": 0.48583984375}, {"start": 1063.71, "end": 1064.21, "word": "-statistic", "probability": 0.67822265625}, {"start": 1064.21, "end": 1064.49, "word": " value.", "probability": 0.85400390625}, {"start": 1067.11, "end": 1067.69, "word": " So", "probability": 0.9169921875}, {"start": 1067.69, "end": 1067.95, "word": " just", "probability": 0.75732421875}, {"start": 1067.95, "end": 1068.21, "word": " plug", "probability": 0.81640625}, {"start": 1068.21, "end": 1068.37, "word": " the", "probability": 0.86962890625}, {"start": 1068.37, "end": 1068.69, "word": " values", "probability": 0.91552734375}, {"start": 1068.69, "end": 1068.89, "word": " in", "probability": 0.63330078125}, {"start": 1068.89, "end": 1069.09, "word": " 1", "probability": 0.3955078125}, {"start": 1069.09, "end": 1069.33, "word": " and", "probability": 0.91064453125}, {"start": 1069.33, "end": 1069.61, "word": " 2,", "probability": 0.990234375}, {"start": 1069.91, "end": 1070.15, "word": " this", "probability": 0.4013671875}, {"start": 1070.15, "end": 1070.37, "word": " 1", "probability": 0.59423828125}, {"start": 1070.37, "end": 1070.71, "word": " squared", "probability": 0.77099609375}, {"start": 1070.71, "end": 1070.87, "word": " and", "probability": 0.71533203125}, {"start": 1070.87, "end": 1071.03, "word": " this", "probability": 0.90380859375}, {"start": 1071.03, "end": 1071.21, "word": " 2", "probability": 0.98486328125}, {"start": 1071.21, "end": 1071.45, "word": " squared", "probability": 0.86572265625}, {"start": 1071.45, "end": 1071.63, "word": " into", "probability": 0.48291015625}, {"start": 1071.63, "end": 1071.83, "word": " this", "probability": 0.943359375}, {"start": 1071.83, "end": 1072.31, "word": " equation,", "probability": 0.982421875}, {"start": 1072.63, "end": 1072.81, "word": " you", "probability": 0.92822265625}, {"start": 1072.81, "end": 1072.93, "word": " will", "probability": 0.88427734375}, {"start": 1072.93, "end": 1073.13, "word": " get", "probability": 0.9404296875}, {"start": 1073.13, "end": 1073.35, "word": " this", "probability": 0.94580078125}, {"start": 1073.35, "end": 1073.67, "word": " value.", "probability": 0.9716796875}, {"start": 1074.49, "end": 1075.07, "word": " So", "probability": 0.9580078125}, {"start": 1075.07, "end": 1075.71, "word": " 2", "probability": 0.8076171875}, {"start": 1075.71, "end": 1077.87, "word": ".059", "probability": 0.9666341145833334}, {"start": 1077.87, "end": 1078.49, "word": " squared,", "probability": 0.82373046875}, {"start": 1079.67, "end": 1080.23, "word": " that", "probability": 0.90380859375}, {"start": 1080.23, "end": 1080.61, "word": " is", "probability": 0.943359375}, {"start": 1080.61, "end": 1081.85, "word": " 4", "probability": 0.9853515625}, {"start": 1081.85, "end": 1082.97, "word": ".239.", "probability": 0.9851888020833334}, {"start": 1087.67, "end": 1088.25, "word": " Here", "probability": 0.833984375}, {"start": 1088.25, "end": 1088.37, "word": " you", "probability": 0.7490234375}, {"start": 1088.37, "end": 1088.55, "word": " can", "probability": 0.94482421875}, {"start": 1088.55, "end": 1088.79, "word": " use", "probability": 0.8603515625}, {"start": 1088.79, "end": 1089.11, "word": " either", "probability": 0.93994140625}, {"start": 1089.11, "end": 1089.91, "word": " the", "probability": 0.89013671875}, {"start": 1089.91, "end": 1090.33, "word": " critical", "probability": 0.91796875}, {"start": 1090.33, "end": 1090.69, "word": " value", "probability": 0.9482421875}, {"start": 1090.69, "end": 1091.21, "word": " approach,", "probability": 0.91455078125}], "temperature": 1.0}, {"id": 40, "seek": 111570, "start": 1091.86, "end": 1115.7, "text": " Or B value. Let's do a critical value. Since the alternative is the lower tail, one-sided lower tail, so your B value, your critical value is negative, T alpha, and there is a freedom.", "tokens": [1610, 363, 2158, 13, 961, 311, 360, 257, 4924, 2158, 13, 4162, 264, 8535, 307, 264, 3126, 6838, 11, 472, 12, 30941, 3126, 6838, 11, 370, 428, 363, 2158, 11, 428, 4924, 2158, 307, 3671, 11, 314, 8961, 11, 293, 456, 307, 257, 5645, 13], "avg_logprob": -0.3075747386268947, "compression_ratio": 1.5040650406504066, "no_speech_prob": 0.0, "words": [{"start": 1091.86, "end": 1092.2, "word": " Or", "probability": 0.181884765625}, {"start": 1092.2, "end": 1092.38, "word": " B", "probability": 0.58837890625}, {"start": 1092.38, "end": 1092.72, "word": " value.", "probability": 0.5791015625}, {"start": 1094.6, "end": 1095.24, "word": " Let's", "probability": 0.8974609375}, {"start": 1095.24, "end": 1095.58, "word": " do", "probability": 0.82568359375}, {"start": 1095.58, "end": 1096.5, "word": " a", "probability": 0.369873046875}, {"start": 1096.5, "end": 1096.8, "word": " critical", "probability": 0.8798828125}, {"start": 1096.8, "end": 1097.2, "word": " value.", "probability": 0.9619140625}, {"start": 1101.92, "end": 1102.64, "word": " Since", "probability": 0.7490234375}, {"start": 1102.64, "end": 1103.48, "word": " the", "probability": 0.82421875}, {"start": 1103.48, "end": 1103.96, "word": " alternative", "probability": 0.91064453125}, {"start": 1103.96, "end": 1104.3, "word": " is", "probability": 0.9375}, {"start": 1104.3, "end": 1104.56, "word": " the", "probability": 0.7861328125}, {"start": 1104.56, "end": 1105.02, "word": " lower", "probability": 0.83203125}, {"start": 1105.02, "end": 1105.48, "word": " tail,", "probability": 0.85986328125}, {"start": 1106.74, "end": 1107.14, "word": " one", "probability": 0.90771484375}, {"start": 1107.14, "end": 1107.46, "word": "-sided", "probability": 0.850341796875}, {"start": 1107.46, "end": 1108.02, "word": " lower", "probability": 0.78955078125}, {"start": 1108.02, "end": 1108.4, "word": " tail,", "probability": 0.8642578125}, {"start": 1108.96, "end": 1109.26, "word": " so", "probability": 0.8515625}, {"start": 1109.26, "end": 1109.62, "word": " your", "probability": 0.83349609375}, {"start": 1109.62, "end": 1109.8, "word": " B", "probability": 0.60302734375}, {"start": 1109.8, "end": 1110.18, "word": " value,", "probability": 0.9560546875}, {"start": 1110.5, "end": 1110.74, "word": " your", "probability": 0.8525390625}, {"start": 1110.74, "end": 1111.52, "word": " critical", "probability": 0.92822265625}, {"start": 1111.52, "end": 1111.82, "word": " value", "probability": 0.97119140625}, {"start": 1111.82, "end": 1112.0, "word": " is", "probability": 0.90966796875}, {"start": 1112.0, "end": 1112.5, "word": " negative,", "probability": 0.80908203125}, {"start": 1113.42, "end": 1113.6, "word": " T", "probability": 0.313232421875}, {"start": 1113.6, "end": 1114.0, "word": " alpha,", "probability": 0.68115234375}, {"start": 1115.0, "end": 1115.12, "word": " and", "probability": 0.89990234375}, {"start": 1115.12, "end": 1115.28, "word": " there", "probability": 0.4140625}, {"start": 1115.28, "end": 1115.36, "word": " is", "probability": 0.82177734375}, {"start": 1115.36, "end": 1115.46, "word": " a", "probability": 0.63525390625}, {"start": 1115.46, "end": 1115.7, "word": " freedom.", "probability": 0.91552734375}], "temperature": 1.0}, {"id": 41, "seek": 114067, "start": 1117.33, "end": 1140.67, "text": " So this is equal to negative T, 5% with 28 degrees of freedom. By using the table you have 28, 28 under", "tokens": [407, 341, 307, 2681, 281, 3671, 314, 11, 1025, 4, 365, 7562, 5310, 295, 5645, 13, 3146, 1228, 264, 3199, 291, 362, 7562, 11, 7562, 833], "avg_logprob": -0.29195602734883624, "compression_ratio": 1.0505050505050506, "no_speech_prob": 0.0, "words": [{"start": 1117.33, "end": 1117.63, "word": " So", "probability": 0.56689453125}, {"start": 1117.63, "end": 1117.93, "word": " this", "probability": 0.60888671875}, {"start": 1117.93, "end": 1118.07, "word": " is", "probability": 0.919921875}, {"start": 1118.07, "end": 1118.35, "word": " equal", "probability": 0.88232421875}, {"start": 1118.35, "end": 1118.75, "word": " to", "probability": 0.97802734375}, {"start": 1118.75, "end": 1119.37, "word": " negative", "probability": 0.5859375}, {"start": 1119.37, "end": 1119.83, "word": " T,", "probability": 0.59814453125}, {"start": 1120.29, "end": 1121.35, "word": " 5", "probability": 0.65966796875}, {"start": 1121.35, "end": 1121.95, "word": "%", "probability": 0.31982421875}, {"start": 1121.95, "end": 1123.41, "word": " with", "probability": 0.8955078125}, {"start": 1123.41, "end": 1124.29, "word": " 28", "probability": 0.9521484375}, {"start": 1124.29, "end": 1126.57, "word": " degrees", "probability": 0.9267578125}, {"start": 1126.57, "end": 1127.63, "word": " of", "probability": 0.96923828125}, {"start": 1127.63, "end": 1128.03, "word": " freedom.", "probability": 0.9423828125}, {"start": 1128.99, "end": 1129.49, "word": " By", "probability": 0.9501953125}, {"start": 1129.49, "end": 1129.79, "word": " using", "probability": 0.93896484375}, {"start": 1129.79, "end": 1129.99, "word": " the", "probability": 0.91552734375}, {"start": 1129.99, "end": 1130.27, "word": " table", "probability": 0.8583984375}, {"start": 1130.27, "end": 1130.47, "word": " you", "probability": 0.75390625}, {"start": 1130.47, "end": 1130.77, "word": " have", "probability": 0.9375}, {"start": 1130.77, "end": 1135.27, "word": " 28,", "probability": 0.7568359375}, {"start": 1136.03, "end": 1140.07, "word": " 28", "probability": 0.92138671875}, {"start": 1140.07, "end": 1140.67, "word": " under", "probability": 0.87744140625}], "temperature": 1.0}, {"id": 42, "seek": 116829, "start": 1142.17, "end": 1168.29, "text": " 5%, so 28 under 5%, so 1.701, negative 1.701. Now, we reject the null hypothesis if", "tokens": [1025, 8923, 370, 7562, 833, 1025, 8923, 370, 502, 13, 5867, 16, 11, 3671, 502, 13, 5867, 16, 13, 823, 11, 321, 8248, 264, 18184, 17291, 498], "avg_logprob": -0.2755301317998341, "compression_ratio": 1.037037037037037, "no_speech_prob": 0.0, "words": [{"start": 1142.17, "end": 1143.23, "word": " 5%,", "probability": 0.26580810546875}, {"start": 1143.23, "end": 1145.37, "word": " so", "probability": 0.81689453125}, {"start": 1145.37, "end": 1146.01, "word": " 28", "probability": 0.89306640625}, {"start": 1146.01, "end": 1146.35, "word": " under", "probability": 0.830078125}, {"start": 1146.35, "end": 1147.83, "word": " 5%,", "probability": 0.7242431640625}, {"start": 1147.83, "end": 1152.79, "word": " so", "probability": 0.468994140625}, {"start": 1152.79, "end": 1153.73, "word": " 1", "probability": 0.80712890625}, {"start": 1153.73, "end": 1154.91, "word": ".701,", "probability": 0.9698893229166666}, {"start": 1155.65, "end": 1157.67, "word": " negative", "probability": 0.498291015625}, {"start": 1157.67, "end": 1158.09, "word": " 1", "probability": 0.9853515625}, {"start": 1158.09, "end": 1160.87, "word": ".701.", "probability": 0.9763997395833334}, {"start": 1163.75, "end": 1164.61, "word": " Now,", "probability": 0.9453125}, {"start": 1164.93, "end": 1165.45, "word": " we", "probability": 0.95849609375}, {"start": 1165.45, "end": 1165.91, "word": " reject", "probability": 0.89794921875}, {"start": 1165.91, "end": 1166.09, "word": " the", "probability": 0.8505859375}, {"start": 1166.09, "end": 1166.23, "word": " null", "probability": 0.9541015625}, {"start": 1166.23, "end": 1166.71, "word": " hypothesis", "probability": 0.94384765625}, {"start": 1166.71, "end": 1168.29, "word": " if", "probability": 0.7099609375}], "temperature": 1.0}, {"id": 43, "seek": 120277, "start": 1173.874, "end": 1202.77, "text": " region. Now again, since it's lower TL, so your rejection region is below negative 1.701. Now, does this value fall in the rejection region? It falls in the non-rejection region. So the answer is", "tokens": [4458, 13, 823, 797, 11, 1670, 309, 311, 3126, 40277, 11, 370, 428, 26044, 4458, 307, 2507, 3671, 502, 13, 5867, 16, 13, 823, 11, 775, 341, 2158, 2100, 294, 264, 26044, 4458, 30, 467, 8804, 294, 264, 2107, 12, 265, 1020, 313, 4458, 13, 407, 264, 1867, 307], "avg_logprob": -0.1968749937415123, "compression_ratio": 1.4736842105263157, "no_speech_prob": 0.0, "words": [{"start": 1173.77, "end": 1174.19, "word": " region.", "probability": 0.3525390625}, {"start": 1176.33, "end": 1176.85, "word": " Now", "probability": 0.765625}, {"start": 1176.85, "end": 1177.39, "word": " again,", "probability": 0.650390625}, {"start": 1178.27, "end": 1179.81, "word": " since", "probability": 0.84619140625}, {"start": 1179.81, "end": 1180.01, "word": " it's", "probability": 0.830078125}, {"start": 1180.01, "end": 1180.33, "word": " lower", "probability": 0.59423828125}, {"start": 1180.33, "end": 1180.79, "word": " TL,", "probability": 0.87109375}, {"start": 1181.41, "end": 1181.73, "word": " so", "probability": 0.80810546875}, {"start": 1181.73, "end": 1182.89, "word": " your", "probability": 0.8408203125}, {"start": 1182.89, "end": 1183.93, "word": " rejection", "probability": 0.95703125}, {"start": 1183.93, "end": 1184.47, "word": " region", "probability": 0.9521484375}, {"start": 1184.47, "end": 1185.55, "word": " is", "probability": 0.94140625}, {"start": 1185.55, "end": 1185.85, "word": " below", "probability": 0.86767578125}, {"start": 1185.85, "end": 1186.25, "word": " negative", "probability": 0.642578125}, {"start": 1186.25, "end": 1187.67, "word": " 1", "probability": 0.91162109375}, {"start": 1187.67, "end": 1188.83, "word": ".701.", "probability": 0.9641927083333334}, {"start": 1191.23, "end": 1191.63, "word": " Now,", "probability": 0.91650390625}, {"start": 1192.23, "end": 1192.49, "word": " does", "probability": 0.95263671875}, {"start": 1192.49, "end": 1192.77, "word": " this", "probability": 0.947265625}, {"start": 1192.77, "end": 1193.19, "word": " value", "probability": 0.97412109375}, {"start": 1193.19, "end": 1194.69, "word": " fall", "probability": 0.80859375}, {"start": 1194.69, "end": 1194.83, "word": " in", "probability": 0.91259765625}, {"start": 1194.83, "end": 1194.95, "word": " the", "probability": 0.8955078125}, {"start": 1194.95, "end": 1195.25, "word": " rejection", "probability": 0.9501953125}, {"start": 1195.25, "end": 1195.63, "word": " region?", "probability": 0.94287109375}, {"start": 1196.51, "end": 1196.89, "word": " It", "probability": 0.9453125}, {"start": 1196.89, "end": 1197.43, "word": " falls", "probability": 0.82470703125}, {"start": 1197.43, "end": 1199.01, "word": " in", "probability": 0.943359375}, {"start": 1199.01, "end": 1199.21, "word": " the", "probability": 0.91357421875}, {"start": 1199.21, "end": 1199.43, "word": " non", "probability": 0.951171875}, {"start": 1199.43, "end": 1199.93, "word": "-rejection", "probability": 0.8807373046875}, {"start": 1199.93, "end": 1200.27, "word": " region.", "probability": 0.93896484375}, {"start": 1201.87, "end": 1202.21, "word": " So", "probability": 0.9267578125}, {"start": 1202.21, "end": 1202.35, "word": " the", "probability": 0.6845703125}, {"start": 1202.35, "end": 1202.53, "word": " answer", "probability": 0.955078125}, {"start": 1202.53, "end": 1202.77, "word": " is", "probability": 0.9443359375}], "temperature": 1.0}, {"id": 44, "seek": 123083, "start": 1203.3, "end": 1230.84, "text": " Don't reject the null hypothesis. That means we don't have sufficient evidence to support the excellent drug company claim which states that their aspirin tablets relieve headaches faster than the simple one. So that's by using a critical value approach because this value", "tokens": [1468, 380, 8248, 264, 18184, 17291, 13, 663, 1355, 321, 500, 380, 362, 11563, 4467, 281, 1406, 264, 7103, 4110, 2237, 3932, 597, 4368, 300, 641, 20003, 259, 27622, 30450, 35046, 4663, 813, 264, 2199, 472, 13, 407, 300, 311, 538, 1228, 257, 4924, 2158, 3109, 570, 341, 2158], "avg_logprob": -0.20187500596046448, "compression_ratio": 1.483695652173913, "no_speech_prob": 0.0, "words": [{"start": 1203.3, "end": 1204.4, "word": " Don't", "probability": 0.63720703125}, {"start": 1204.4, "end": 1205.96, "word": " reject", "probability": 0.64404296875}, {"start": 1205.96, "end": 1206.98, "word": " the", "probability": 0.380126953125}, {"start": 1206.98, "end": 1207.2, "word": " null", "probability": 0.92041015625}, {"start": 1207.2, "end": 1207.72, "word": " hypothesis.", "probability": 0.9140625}, {"start": 1207.88, "end": 1208.04, "word": " That", "probability": 0.86669921875}, {"start": 1208.04, "end": 1208.3, "word": " means", "probability": 0.92919921875}, {"start": 1208.3, "end": 1208.48, "word": " we", "probability": 0.81787109375}, {"start": 1208.48, "end": 1208.68, "word": " don't", "probability": 0.96142578125}, {"start": 1208.68, "end": 1208.92, "word": " have", "probability": 0.94921875}, {"start": 1208.92, "end": 1209.42, "word": " sufficient", "probability": 0.87841796875}, {"start": 1209.42, "end": 1209.96, "word": " evidence", "probability": 0.94921875}, {"start": 1209.96, "end": 1210.68, "word": " to", "probability": 0.88037109375}, {"start": 1210.68, "end": 1211.38, "word": " support", "probability": 0.978515625}, {"start": 1211.38, "end": 1212.48, "word": " the", "probability": 0.826171875}, {"start": 1212.48, "end": 1213.02, "word": " excellent", "probability": 0.85498046875}, {"start": 1213.02, "end": 1213.64, "word": " drug", "probability": 0.859375}, {"start": 1213.64, "end": 1214.14, "word": " company", "probability": 0.92236328125}, {"start": 1214.14, "end": 1214.62, "word": " claim", "probability": 0.7177734375}, {"start": 1214.62, "end": 1215.16, "word": " which", "probability": 0.51904296875}, {"start": 1215.16, "end": 1215.82, "word": " states", "probability": 0.67626953125}, {"start": 1215.82, "end": 1216.3, "word": " that", "probability": 0.9189453125}, {"start": 1216.3, "end": 1217.12, "word": " their", "probability": 0.8330078125}, {"start": 1217.12, "end": 1218.86, "word": " aspirin", "probability": 0.925537109375}, {"start": 1218.86, "end": 1219.84, "word": " tablets", "probability": 0.8994140625}, {"start": 1219.84, "end": 1220.4, "word": " relieve", "probability": 0.853515625}, {"start": 1220.4, "end": 1220.84, "word": " headaches", "probability": 0.9482421875}, {"start": 1220.84, "end": 1221.38, "word": " faster", "probability": 0.892578125}, {"start": 1221.38, "end": 1221.84, "word": " than", "probability": 0.94580078125}, {"start": 1221.84, "end": 1222.58, "word": " the", "probability": 0.833984375}, {"start": 1222.58, "end": 1222.84, "word": " simple", "probability": 0.845703125}, {"start": 1222.84, "end": 1223.06, "word": " one.", "probability": 0.83203125}, {"start": 1225.18, "end": 1225.72, "word": " So", "probability": 0.87939453125}, {"start": 1225.72, "end": 1225.94, "word": " that's", "probability": 0.865966796875}, {"start": 1225.94, "end": 1226.1, "word": " by", "probability": 0.8955078125}, {"start": 1226.1, "end": 1226.58, "word": " using", "probability": 0.93603515625}, {"start": 1226.58, "end": 1228.2, "word": " a", "probability": 0.72900390625}, {"start": 1228.2, "end": 1228.54, "word": " critical", "probability": 0.93310546875}, {"start": 1228.54, "end": 1228.98, "word": " value", "probability": 0.9677734375}, {"start": 1228.98, "end": 1229.36, "word": " approach", "probability": 0.9736328125}, {"start": 1229.36, "end": 1230.1, "word": " because", "probability": 0.383056640625}, {"start": 1230.1, "end": 1230.42, "word": " this", "probability": 0.92919921875}, {"start": 1230.42, "end": 1230.84, "word": " value", "probability": 0.96923828125}], "temperature": 1.0}, {"id": 45, "seek": 125684, "start": 1231.89, "end": 1256.85, "text": " falls in the non-rejection region, so we don't reject the null hypothesis. Or you maybe use the B-value approach. Now, since the alternative is µ1 smaller than µ2,", "tokens": [8804, 294, 264, 2107, 12, 265, 1020, 313, 4458, 11, 370, 321, 500, 380, 8248, 264, 18184, 17291, 13, 1610, 291, 1310, 764, 264, 363, 12, 29155, 3109, 13, 823, 11, 1670, 264, 8535, 307, 1815, 113, 16, 4356, 813, 1815, 113, 17, 11], "avg_logprob": -0.2987847195731269, "compression_ratio": 1.2388059701492538, "no_speech_prob": 0.0, "words": [{"start": 1231.89, "end": 1232.35, "word": " falls", "probability": 0.2364501953125}, {"start": 1232.35, "end": 1232.73, "word": " in", "probability": 0.8916015625}, {"start": 1232.73, "end": 1232.97, "word": " the", "probability": 0.857421875}, {"start": 1232.97, "end": 1233.23, "word": " non", "probability": 0.9189453125}, {"start": 1233.23, "end": 1233.71, "word": "-rejection", "probability": 0.88232421875}, {"start": 1233.71, "end": 1234.07, "word": " region,", "probability": 0.83837890625}, {"start": 1235.07, "end": 1235.17, "word": " so", "probability": 0.921875}, {"start": 1235.17, "end": 1235.35, "word": " we", "probability": 0.85986328125}, {"start": 1235.35, "end": 1235.63, "word": " don't", "probability": 0.8955078125}, {"start": 1235.63, "end": 1236.07, "word": " reject", "probability": 0.91357421875}, {"start": 1236.07, "end": 1236.33, "word": " the", "probability": 0.215576171875}, {"start": 1236.33, "end": 1236.45, "word": " null", "probability": 0.9599609375}, {"start": 1236.45, "end": 1236.89, "word": " hypothesis.", "probability": 0.93017578125}, {"start": 1244.13, "end": 1244.93, "word": " Or", "probability": 0.94921875}, {"start": 1244.93, "end": 1245.87, "word": " you", "probability": 0.58642578125}, {"start": 1245.87, "end": 1246.29, "word": " maybe", "probability": 0.4794921875}, {"start": 1246.29, "end": 1246.87, "word": " use", "probability": 0.859375}, {"start": 1246.87, "end": 1247.37, "word": " the", "probability": 0.89599609375}, {"start": 1247.37, "end": 1248.19, "word": " B", "probability": 0.373779296875}, {"start": 1248.19, "end": 1248.51, "word": "-value", "probability": 0.757080078125}, {"start": 1248.51, "end": 1248.93, "word": " approach.", "probability": 0.96044921875}, {"start": 1253.07, "end": 1253.43, "word": " Now,", "probability": 0.90869140625}, {"start": 1253.49, "end": 1253.87, "word": " since", "probability": 0.888671875}, {"start": 1253.87, "end": 1254.73, "word": " the", "probability": 0.88671875}, {"start": 1254.73, "end": 1255.21, "word": " alternative", "probability": 0.62548828125}, {"start": 1255.21, "end": 1255.41, "word": " is", "probability": 0.499755859375}, {"start": 1255.41, "end": 1255.71, "word": " µ1", "probability": 0.7248942057291666}, {"start": 1255.71, "end": 1256.11, "word": " smaller", "probability": 0.69677734375}, {"start": 1256.11, "end": 1256.37, "word": " than", "probability": 0.95068359375}, {"start": 1256.37, "end": 1256.85, "word": " µ2,", "probability": 0.9881184895833334}], "temperature": 1.0}, {"id": 46, "seek": 128133, "start": 1257.64, "end": 1281.34, "text": " So B value is probability of T smaller than negative 0.653. So we are looking for this probability B of Z smaller than negative 0.653.", "tokens": [407, 363, 2158, 307, 8482, 295, 314, 4356, 813, 3671, 1958, 13, 16824, 18, 13, 407, 321, 366, 1237, 337, 341, 8482, 363, 295, 1176, 4356, 813, 3671, 1958, 13, 16824, 18, 13], "avg_logprob": -0.1883042327621404, "compression_ratio": 1.4361702127659575, "no_speech_prob": 0.0, "words": [{"start": 1257.64, "end": 1258.5, "word": " So", "probability": 0.77197265625}, {"start": 1258.5, "end": 1258.74, "word": " B", "probability": 0.2066650390625}, {"start": 1258.74, "end": 1259.18, "word": " value", "probability": 0.88720703125}, {"start": 1259.18, "end": 1259.68, "word": " is", "probability": 0.91015625}, {"start": 1259.68, "end": 1260.08, "word": " probability", "probability": 0.80859375}, {"start": 1260.08, "end": 1260.36, "word": " of", "probability": 0.94921875}, {"start": 1260.36, "end": 1260.7, "word": " T", "probability": 0.88427734375}, {"start": 1260.7, "end": 1262.54, "word": " smaller", "probability": 0.6396484375}, {"start": 1262.54, "end": 1263.26, "word": " than", "probability": 0.93994140625}, {"start": 1263.26, "end": 1265.52, "word": " negative", "probability": 0.66064453125}, {"start": 1265.52, "end": 1268.82, "word": " 0", "probability": 0.4150390625}, {"start": 1268.82, "end": 1272.4, "word": ".653.", "probability": 0.95849609375}, {"start": 1274.3, "end": 1274.74, "word": " So", "probability": 0.93115234375}, {"start": 1274.74, "end": 1274.86, "word": " we", "probability": 0.7724609375}, {"start": 1274.86, "end": 1274.96, "word": " are", "probability": 0.87109375}, {"start": 1274.96, "end": 1275.28, "word": " looking", "probability": 0.91015625}, {"start": 1275.28, "end": 1275.62, "word": " for", "probability": 0.94873046875}, {"start": 1275.62, "end": 1275.86, "word": " this", "probability": 0.87841796875}, {"start": 1275.86, "end": 1276.28, "word": " probability", "probability": 0.96826171875}, {"start": 1276.28, "end": 1276.86, "word": " B", "probability": 0.5478515625}, {"start": 1276.86, "end": 1278.12, "word": " of", "probability": 0.93310546875}, {"start": 1278.12, "end": 1278.42, "word": " Z", "probability": 0.8798828125}, {"start": 1278.42, "end": 1279.08, "word": " smaller", "probability": 0.7119140625}, {"start": 1279.08, "end": 1279.38, "word": " than", "probability": 0.9404296875}, {"start": 1279.38, "end": 1279.76, "word": " negative", "probability": 0.91015625}, {"start": 1279.76, "end": 1280.18, "word": " 0", "probability": 0.958984375}, {"start": 1280.18, "end": 1281.34, "word": ".653.", "probability": 0.9807942708333334}], "temperature": 1.0}, {"id": 47, "seek": 131071, "start": 1283.21, "end": 1310.71, "text": " The table you have gives the area in the upper tail. So this is the same as beauty greater than. Because the area to the right of 0.653 is the same as the area to the left of negative 0.75. Because of symmetry. Just look at the tea table.", "tokens": [440, 3199, 291, 362, 2709, 264, 1859, 294, 264, 6597, 6838, 13, 407, 341, 307, 264, 912, 382, 6643, 5044, 813, 13, 1436, 264, 1859, 281, 264, 558, 295, 1958, 13, 16824, 18, 307, 264, 912, 382, 264, 1859, 281, 264, 1411, 295, 3671, 1958, 13, 11901, 13, 1436, 295, 25440, 13, 1449, 574, 412, 264, 5817, 3199, 13], "avg_logprob": -0.2203124927977721, "compression_ratio": 1.5933333333333333, "no_speech_prob": 0.0, "words": [{"start": 1283.21, "end": 1283.51, "word": " The", "probability": 0.425048828125}, {"start": 1283.51, "end": 1283.83, "word": " table", "probability": 0.7978515625}, {"start": 1283.83, "end": 1283.99, "word": " you", "probability": 0.8994140625}, {"start": 1283.99, "end": 1284.35, "word": " have", "probability": 0.9423828125}, {"start": 1284.35, "end": 1285.59, "word": " gives", "probability": 0.7314453125}, {"start": 1285.59, "end": 1285.99, "word": " the", "probability": 0.86572265625}, {"start": 1285.99, "end": 1286.35, "word": " area", "probability": 0.89306640625}, {"start": 1286.35, "end": 1286.59, "word": " in", "probability": 0.9072265625}, {"start": 1286.59, "end": 1286.75, "word": " the", "probability": 0.85986328125}, {"start": 1286.75, "end": 1287.05, "word": " upper", "probability": 0.76708984375}, {"start": 1287.05, "end": 1287.45, "word": " tail.", "probability": 0.1903076171875}, {"start": 1289.49, "end": 1289.87, "word": " So", "probability": 0.74072265625}, {"start": 1289.87, "end": 1290.47, "word": " this", "probability": 0.379638671875}, {"start": 1290.47, "end": 1290.59, "word": " is", "probability": 0.94580078125}, {"start": 1290.59, "end": 1290.73, "word": " the", "probability": 0.833984375}, {"start": 1290.73, "end": 1290.97, "word": " same", "probability": 0.919921875}, {"start": 1290.97, "end": 1291.69, "word": " as", "probability": 0.95849609375}, {"start": 1291.69, "end": 1292.23, "word": " beauty", "probability": 0.261474609375}, {"start": 1292.23, "end": 1292.75, "word": " greater", "probability": 0.81005859375}, {"start": 1292.75, "end": 1293.19, "word": " than.", "probability": 0.9521484375}, {"start": 1297.79, "end": 1298.43, "word": " Because", "probability": 0.73681640625}, {"start": 1298.43, "end": 1298.55, "word": " the", "probability": 0.822265625}, {"start": 1298.55, "end": 1298.71, "word": " area", "probability": 0.86865234375}, {"start": 1298.71, "end": 1298.93, "word": " to", "probability": 0.9609375}, {"start": 1298.93, "end": 1299.09, "word": " the", "probability": 0.9208984375}, {"start": 1299.09, "end": 1299.43, "word": " right", "probability": 0.9169921875}, {"start": 1299.43, "end": 1300.67, "word": " of", "probability": 0.94384765625}, {"start": 1300.67, "end": 1300.99, "word": " 0", "probability": 0.60302734375}, {"start": 1300.99, "end": 1302.61, "word": ".653", "probability": 0.9462890625}, {"start": 1302.61, "end": 1303.87, "word": " is", "probability": 0.896484375}, {"start": 1303.87, "end": 1304.03, "word": " the", "probability": 0.9130859375}, {"start": 1304.03, "end": 1304.35, "word": " same", "probability": 0.91064453125}, {"start": 1304.35, "end": 1304.63, "word": " as", "probability": 0.904296875}, {"start": 1304.63, "end": 1304.77, "word": " the", "probability": 0.85986328125}, {"start": 1304.77, "end": 1304.89, "word": " area", "probability": 0.83154296875}, {"start": 1304.89, "end": 1305.09, "word": " to", "probability": 0.94970703125}, {"start": 1305.09, "end": 1305.25, "word": " the", "probability": 0.91845703125}, {"start": 1305.25, "end": 1305.47, "word": " left", "probability": 0.93994140625}, {"start": 1305.47, "end": 1305.63, "word": " of", "probability": 0.97021484375}, {"start": 1305.63, "end": 1305.97, "word": " negative", "probability": 0.6015625}, {"start": 1305.97, "end": 1306.29, "word": " 0", "probability": 0.9482421875}, {"start": 1306.29, "end": 1306.65, "word": ".75.", "probability": 0.92822265625}, {"start": 1307.43, "end": 1308.07, "word": " Because", "probability": 0.8515625}, {"start": 1308.07, "end": 1308.21, "word": " of", "probability": 0.95654296875}, {"start": 1308.21, "end": 1308.55, "word": " symmetry.", "probability": 0.81982421875}, {"start": 1309.29, "end": 1309.55, "word": " Just", "probability": 0.84814453125}, {"start": 1309.55, "end": 1309.79, "word": " look", "probability": 0.96142578125}, {"start": 1309.79, "end": 1309.99, "word": " at", "probability": 0.9638671875}, {"start": 1309.99, "end": 1310.21, "word": " the", "probability": 0.91162109375}, {"start": 1310.21, "end": 1310.35, "word": " tea", "probability": 0.60888671875}, {"start": 1310.35, "end": 1310.71, "word": " table.", "probability": 0.86572265625}], "temperature": 1.0}, {"id": 48, "seek": 133915, "start": 1312.69, "end": 1339.15, "text": " Now, smaller than negative, means this area is actually the same as the area to the right of the same value, but on the other side. So these two areas are the same. So it's the same as D of T greater than 0.653. If you look at the table for 28 degrees of freedom,", "tokens": [823, 11, 4356, 813, 3671, 11, 1355, 341, 1859, 307, 767, 264, 912, 382, 264, 1859, 281, 264, 558, 295, 264, 912, 2158, 11, 457, 322, 264, 661, 1252, 13, 407, 613, 732, 3179, 366, 264, 912, 13, 407, 309, 311, 264, 912, 382, 413, 295, 314, 5044, 813, 1958, 13, 16824, 18, 13, 759, 291, 574, 412, 264, 3199, 337, 7562, 5310, 295, 5645, 11], "avg_logprob": -0.21641790955813964, "compression_ratio": 1.5621301775147929, "no_speech_prob": 0.0, "words": [{"start": 1312.69, "end": 1312.97, "word": " Now,", "probability": 0.6435546875}, {"start": 1313.07, "end": 1313.37, "word": " smaller", "probability": 0.66845703125}, {"start": 1313.37, "end": 1313.63, "word": " than", "probability": 0.943359375}, {"start": 1313.63, "end": 1314.03, "word": " negative,", "probability": 0.87353515625}, {"start": 1314.47, "end": 1316.89, "word": " means", "probability": 0.56884765625}, {"start": 1316.89, "end": 1317.17, "word": " this", "probability": 0.9169921875}, {"start": 1317.17, "end": 1317.57, "word": " area", "probability": 0.896484375}, {"start": 1317.57, "end": 1320.37, "word": " is", "probability": 0.43115234375}, {"start": 1320.37, "end": 1320.81, "word": " actually", "probability": 0.8798828125}, {"start": 1320.81, "end": 1321.01, "word": " the", "probability": 0.85302734375}, {"start": 1321.01, "end": 1321.23, "word": " same", "probability": 0.88720703125}, {"start": 1321.23, "end": 1321.45, "word": " as", "probability": 0.654296875}, {"start": 1321.45, "end": 1321.57, "word": " the", "probability": 0.7109375}, {"start": 1321.57, "end": 1321.67, "word": " area", "probability": 0.72900390625}, {"start": 1321.67, "end": 1321.87, "word": " to", "probability": 0.919921875}, {"start": 1321.87, "end": 1322.01, "word": " the", "probability": 0.91796875}, {"start": 1322.01, "end": 1322.21, "word": " right", "probability": 0.91796875}, {"start": 1322.21, "end": 1322.35, "word": " of", "probability": 0.93359375}, {"start": 1322.35, "end": 1322.47, "word": " the", "probability": 0.87841796875}, {"start": 1322.47, "end": 1322.69, "word": " same", "probability": 0.90380859375}, {"start": 1322.69, "end": 1322.95, "word": " value,", "probability": 0.8935546875}, {"start": 1323.17, "end": 1323.19, "word": " but", "probability": 0.92138671875}, {"start": 1323.19, "end": 1323.45, "word": " on", "probability": 0.56884765625}, {"start": 1323.45, "end": 1323.59, "word": " the", "probability": 0.923828125}, {"start": 1323.59, "end": 1323.81, "word": " other", "probability": 0.869140625}, {"start": 1323.81, "end": 1324.27, "word": " side.", "probability": 0.87255859375}, {"start": 1326.19, "end": 1326.63, "word": " So", "probability": 0.90966796875}, {"start": 1326.63, "end": 1326.85, "word": " these", "probability": 0.646484375}, {"start": 1326.85, "end": 1327.07, "word": " two", "probability": 0.87890625}, {"start": 1327.07, "end": 1327.33, "word": " areas", "probability": 0.94482421875}, {"start": 1327.33, "end": 1327.53, "word": " are", "probability": 0.94677734375}, {"start": 1327.53, "end": 1327.71, "word": " the", "probability": 0.92041015625}, {"start": 1327.71, "end": 1327.97, "word": " same.", "probability": 0.91259765625}, {"start": 1329.55, "end": 1329.89, "word": " So", "probability": 0.83837890625}, {"start": 1329.89, "end": 1330.15, "word": " it's", "probability": 0.80615234375}, {"start": 1330.15, "end": 1330.39, "word": " the", "probability": 0.91064453125}, {"start": 1330.39, "end": 1330.59, "word": " same", "probability": 0.91943359375}, {"start": 1330.59, "end": 1330.81, "word": " as", "probability": 0.9560546875}, {"start": 1330.81, "end": 1331.05, "word": " D", "probability": 0.415283203125}, {"start": 1331.05, "end": 1331.41, "word": " of", "probability": 0.72021484375}, {"start": 1331.41, "end": 1331.55, "word": " T", "probability": 0.83203125}, {"start": 1331.55, "end": 1331.89, "word": " greater", "probability": 0.7890625}, {"start": 1331.89, "end": 1332.17, "word": " than", "probability": 0.94482421875}, {"start": 1332.17, "end": 1332.37, "word": " 0", "probability": 0.346435546875}, {"start": 1332.37, "end": 1333.23, "word": ".653.", "probability": 0.9669596354166666}, {"start": 1334.01, "end": 1334.53, "word": " If", "probability": 0.94677734375}, {"start": 1334.53, "end": 1334.61, "word": " you", "probability": 0.9365234375}, {"start": 1334.61, "end": 1334.75, "word": " look", "probability": 0.9619140625}, {"start": 1334.75, "end": 1334.87, "word": " at", "probability": 0.96728515625}, {"start": 1334.87, "end": 1334.99, "word": " the", "probability": 0.92236328125}, {"start": 1334.99, "end": 1335.31, "word": " table", "probability": 0.8515625}, {"start": 1335.31, "end": 1335.91, "word": " for", "probability": 0.90625}, {"start": 1335.91, "end": 1337.71, "word": " 28", "probability": 0.83203125}, {"start": 1337.71, "end": 1338.65, "word": " degrees", "probability": 0.94677734375}, {"start": 1338.65, "end": 1338.83, "word": " of", "probability": 0.97265625}, {"start": 1338.83, "end": 1339.15, "word": " freedom,", "probability": 0.9404296875}], "temperature": 1.0}, {"id": 49, "seek": 137028, "start": 1342.3, "end": 1370.28, "text": " That's your 28. I am looking for the value of 0.653. The first value here is 0.683. The other one is 0.8. It means my value is below this one. If you go back here, so it should be to the left of this value.", "tokens": [663, 311, 428, 7562, 13, 286, 669, 1237, 337, 264, 2158, 295, 1958, 13, 16824, 18, 13, 440, 700, 2158, 510, 307, 1958, 13, 27102, 18, 13, 440, 661, 472, 307, 1958, 13, 23, 13, 467, 1355, 452, 2158, 307, 2507, 341, 472, 13, 759, 291, 352, 646, 510, 11, 370, 309, 820, 312, 281, 264, 1411, 295, 341, 2158, 13], "avg_logprob": -0.13911289865932158, "compression_ratio": 1.4375, "no_speech_prob": 0.0, "words": [{"start": 1342.3, "end": 1342.78, "word": " That's", "probability": 0.67236328125}, {"start": 1342.78, "end": 1343.0, "word": " your", "probability": 0.853515625}, {"start": 1343.0, "end": 1343.52, "word": " 28.", "probability": 0.8623046875}, {"start": 1347.58, "end": 1348.18, "word": " I", "probability": 0.966796875}, {"start": 1348.18, "end": 1348.3, "word": " am", "probability": 0.61181640625}, {"start": 1348.3, "end": 1348.5, "word": " looking", "probability": 0.91259765625}, {"start": 1348.5, "end": 1348.72, "word": " for", "probability": 0.95361328125}, {"start": 1348.72, "end": 1348.86, "word": " the", "probability": 0.90087890625}, {"start": 1348.86, "end": 1349.16, "word": " value", "probability": 0.974609375}, {"start": 1349.16, "end": 1349.56, "word": " of", "probability": 0.947265625}, {"start": 1349.56, "end": 1350.02, "word": " 0", "probability": 0.56982421875}, {"start": 1350.02, "end": 1351.0, "word": ".653.", "probability": 0.98193359375}, {"start": 1351.82, "end": 1352.42, "word": " The", "probability": 0.865234375}, {"start": 1352.42, "end": 1352.72, "word": " first", "probability": 0.8525390625}, {"start": 1352.72, "end": 1353.02, "word": " value", "probability": 0.97607421875}, {"start": 1353.02, "end": 1353.2, "word": " here", "probability": 0.75341796875}, {"start": 1353.2, "end": 1353.32, "word": " is", "probability": 0.9345703125}, {"start": 1353.32, "end": 1353.48, "word": " 0", "probability": 0.97607421875}, {"start": 1353.48, "end": 1354.44, "word": ".683.", "probability": 0.9563802083333334}, {"start": 1356.38, "end": 1356.98, "word": " The", "probability": 0.81201171875}, {"start": 1356.98, "end": 1357.26, "word": " other", "probability": 0.88330078125}, {"start": 1357.26, "end": 1357.46, "word": " one", "probability": 0.91357421875}, {"start": 1357.46, "end": 1357.6, "word": " is", "probability": 0.9111328125}, {"start": 1357.6, "end": 1357.78, "word": " 0", "probability": 0.95361328125}, {"start": 1357.78, "end": 1358.2, "word": ".8.", "probability": 0.9833984375}, {"start": 1358.26, "end": 1358.42, "word": " It", "probability": 0.8369140625}, {"start": 1358.42, "end": 1358.66, "word": " means", "probability": 0.92919921875}, {"start": 1358.66, "end": 1359.02, "word": " my", "probability": 0.90087890625}, {"start": 1359.02, "end": 1359.48, "word": " value", "probability": 0.9755859375}, {"start": 1359.48, "end": 1359.94, "word": " is", "probability": 0.94677734375}, {"start": 1359.94, "end": 1360.56, "word": " below", "probability": 0.87646484375}, {"start": 1360.56, "end": 1360.86, "word": " this", "probability": 0.94921875}, {"start": 1360.86, "end": 1361.08, "word": " one.", "probability": 0.9189453125}, {"start": 1362.62, "end": 1362.86, "word": " If", "probability": 0.92919921875}, {"start": 1362.86, "end": 1362.98, "word": " you", "probability": 0.96044921875}, {"start": 1362.98, "end": 1363.2, "word": " go", "probability": 0.9599609375}, {"start": 1363.2, "end": 1363.6, "word": " back", "probability": 0.87255859375}, {"start": 1363.6, "end": 1366.6, "word": " here,", "probability": 0.6162109375}, {"start": 1366.7, "end": 1366.84, "word": " so", "probability": 0.79638671875}, {"start": 1366.84, "end": 1367.0, "word": " it", "probability": 0.916015625}, {"start": 1367.0, "end": 1367.44, "word": " should", "probability": 0.8564453125}, {"start": 1367.44, "end": 1367.88, "word": " be", "probability": 0.94775390625}, {"start": 1367.88, "end": 1369.16, "word": " to", "probability": 0.953125}, {"start": 1369.16, "end": 1369.34, "word": " the", "probability": 0.91796875}, {"start": 1369.34, "end": 1369.56, "word": " left", "probability": 0.9423828125}, {"start": 1369.56, "end": 1369.74, "word": " of", "probability": 0.96337890625}, {"start": 1369.74, "end": 1369.92, "word": " this", "probability": 0.943359375}, {"start": 1369.92, "end": 1370.28, "word": " value.", "probability": 0.978515625}], "temperature": 1.0}, {"id": 50, "seek": 140079, "start": 1372.31, "end": 1400.79, "text": " Now here 25, then 20, 20, 15 and so on. So it should be greater than 25. So your B value actually is greater than 25%. As we mentioned before, T table does not give the exact B value. So approximately my B value is greater than 25%. This value actually is much bigger than 5%.", "tokens": [823, 510, 3552, 11, 550, 945, 11, 945, 11, 2119, 293, 370, 322, 13, 407, 309, 820, 312, 5044, 813, 3552, 13, 407, 428, 363, 2158, 767, 307, 5044, 813, 3552, 6856, 1018, 321, 2835, 949, 11, 314, 3199, 775, 406, 976, 264, 1900, 363, 2158, 13, 407, 10447, 452, 363, 2158, 307, 5044, 813, 3552, 6856, 639, 2158, 767, 307, 709, 3801, 813, 1025, 6856], "avg_logprob": -0.19309702159753486, "compression_ratio": 1.6390532544378698, "no_speech_prob": 0.0, "words": [{"start": 1372.31, "end": 1372.61, "word": " Now", "probability": 0.658203125}, {"start": 1372.61, "end": 1372.85, "word": " here", "probability": 0.70703125}, {"start": 1372.85, "end": 1373.39, "word": " 25,", "probability": 0.599609375}, {"start": 1373.83, "end": 1374.03, "word": " then", "probability": 0.859375}, {"start": 1374.03, "end": 1374.89, "word": " 20,", "probability": 0.7880859375}, {"start": 1374.97, "end": 1375.37, "word": " 20,", "probability": 0.2265625}, {"start": 1375.49, "end": 1375.79, "word": " 15", "probability": 0.5068359375}, {"start": 1375.79, "end": 1376.01, "word": " and", "probability": 0.66357421875}, {"start": 1376.01, "end": 1376.15, "word": " so", "probability": 0.9423828125}, {"start": 1376.15, "end": 1376.39, "word": " on.", "probability": 0.9443359375}, {"start": 1376.85, "end": 1376.95, "word": " So", "probability": 0.9091796875}, {"start": 1376.95, "end": 1377.05, "word": " it", "probability": 0.82275390625}, {"start": 1377.05, "end": 1377.17, "word": " should", "probability": 0.96142578125}, {"start": 1377.17, "end": 1377.33, "word": " be", "probability": 0.9560546875}, {"start": 1377.33, "end": 1377.63, "word": " greater", "probability": 0.908203125}, {"start": 1377.63, "end": 1377.93, "word": " than", "probability": 0.951171875}, {"start": 1377.93, "end": 1378.27, "word": " 25.", "probability": 0.89208984375}, {"start": 1379.11, "end": 1379.45, "word": " So", "probability": 0.94775390625}, {"start": 1379.45, "end": 1379.71, "word": " your", "probability": 0.83203125}, {"start": 1379.71, "end": 1379.83, "word": " B", "probability": 0.634765625}, {"start": 1379.83, "end": 1380.21, "word": " value", "probability": 0.89404296875}, {"start": 1380.21, "end": 1380.83, "word": " actually", "probability": 0.81884765625}, {"start": 1380.83, "end": 1381.93, "word": " is", "probability": 0.8642578125}, {"start": 1381.93, "end": 1382.27, "word": " greater", "probability": 0.794921875}, {"start": 1382.27, "end": 1382.79, "word": " than", "probability": 0.94873046875}, {"start": 1382.79, "end": 1385.13, "word": " 25%.", "probability": 0.864990234375}, {"start": 1385.13, "end": 1385.97, "word": " As", "probability": 0.96533203125}, {"start": 1385.97, "end": 1386.09, "word": " we", "probability": 0.92333984375}, {"start": 1386.09, "end": 1386.35, "word": " mentioned", "probability": 0.84765625}, {"start": 1386.35, "end": 1386.87, "word": " before,", "probability": 0.86572265625}, {"start": 1388.07, "end": 1388.23, "word": " T", "probability": 0.791015625}, {"start": 1388.23, "end": 1388.57, "word": " table", "probability": 0.5419921875}, {"start": 1388.57, "end": 1389.21, "word": " does", "probability": 0.96826171875}, {"start": 1389.21, "end": 1389.43, "word": " not", "probability": 0.94921875}, {"start": 1389.43, "end": 1389.63, "word": " give", "probability": 0.88818359375}, {"start": 1389.63, "end": 1389.79, "word": " the", "probability": 0.916015625}, {"start": 1389.79, "end": 1390.21, "word": " exact", "probability": 0.93505859375}, {"start": 1390.21, "end": 1390.43, "word": " B", "probability": 0.8017578125}, {"start": 1390.43, "end": 1390.75, "word": " value.", "probability": 0.9580078125}, {"start": 1391.27, "end": 1391.43, "word": " So", "probability": 0.96630859375}, {"start": 1391.43, "end": 1392.01, "word": " approximately", "probability": 0.77734375}, {"start": 1392.01, "end": 1392.25, "word": " my", "probability": 0.7265625}, {"start": 1392.25, "end": 1392.45, "word": " B", "probability": 0.98046875}, {"start": 1392.45, "end": 1392.73, "word": " value", "probability": 0.93896484375}, {"start": 1392.73, "end": 1392.87, "word": " is", "probability": 0.91064453125}, {"start": 1392.87, "end": 1393.17, "word": " greater", "probability": 0.9130859375}, {"start": 1393.17, "end": 1393.53, "word": " than", "probability": 0.9482421875}, {"start": 1393.53, "end": 1394.51, "word": " 25%.", "probability": 0.9697265625}, {"start": 1394.51, "end": 1396.95, "word": " This", "probability": 0.8935546875}, {"start": 1396.95, "end": 1397.29, "word": " value", "probability": 0.98095703125}, {"start": 1397.29, "end": 1397.71, "word": " actually", "probability": 0.85888671875}, {"start": 1397.71, "end": 1398.03, "word": " is", "probability": 0.93505859375}, {"start": 1398.03, "end": 1398.51, "word": " much", "probability": 0.916015625}, {"start": 1398.51, "end": 1399.03, "word": " bigger", "probability": 0.94384765625}, {"start": 1399.03, "end": 1399.55, "word": " than", "probability": 0.9462890625}, {"start": 1399.55, "end": 1400.79, "word": " 5%.", "probability": 0.867431640625}], "temperature": 1.0}, {"id": 51, "seek": 142304, "start": 1401.2, "end": 1423.04, "text": " So again, we reject, we don't reject the null hypothesis. So again, to compute the B value, it's probability of T smaller than the value of the statistic, which is negative 0.653. The table you have gives the area to the right.", "tokens": [407, 797, 11, 321, 8248, 11, 321, 500, 380, 8248, 264, 18184, 17291, 13, 407, 797, 11, 281, 14722, 264, 363, 2158, 11, 309, 311, 8482, 295, 314, 4356, 813, 264, 2158, 295, 264, 29588, 11, 597, 307, 3671, 1958, 13, 16824, 18, 13, 440, 3199, 291, 362, 2709, 264, 1859, 281, 264, 558, 13], "avg_logprob": -0.15945870695369585, "compression_ratio": 1.4339622641509433, "no_speech_prob": 0.0, "words": [{"start": 1401.2, "end": 1401.5, "word": " So", "probability": 0.8583984375}, {"start": 1401.5, "end": 1401.92, "word": " again,", "probability": 0.8388671875}, {"start": 1402.18, "end": 1402.4, "word": " we", "probability": 0.8203125}, {"start": 1402.4, "end": 1402.86, "word": " reject,", "probability": 0.53955078125}, {"start": 1403.08, "end": 1403.22, "word": " we", "probability": 0.95849609375}, {"start": 1403.22, "end": 1403.5, "word": " don't", "probability": 0.9521484375}, {"start": 1403.5, "end": 1403.94, "word": " reject", "probability": 0.9150390625}, {"start": 1403.94, "end": 1404.16, "word": " the", "probability": 0.58056640625}, {"start": 1404.16, "end": 1404.26, "word": " null", "probability": 0.80126953125}, {"start": 1404.26, "end": 1404.7, "word": " hypothesis.", "probability": 0.89501953125}, {"start": 1406.88, "end": 1407.48, "word": " So", "probability": 0.935546875}, {"start": 1407.48, "end": 1407.84, "word": " again,", "probability": 0.8935546875}, {"start": 1408.64, "end": 1408.76, "word": " to", "probability": 0.90625}, {"start": 1408.76, "end": 1409.04, "word": " compute", "probability": 0.92822265625}, {"start": 1409.04, "end": 1409.2, "word": " the", "probability": 0.91943359375}, {"start": 1409.2, "end": 1409.32, "word": " B", "probability": 0.38671875}, {"start": 1409.32, "end": 1409.66, "word": " value,", "probability": 0.716796875}, {"start": 1409.88, "end": 1409.98, "word": " it's", "probability": 0.771484375}, {"start": 1409.98, "end": 1410.32, "word": " probability", "probability": 0.76416015625}, {"start": 1410.32, "end": 1410.6, "word": " of", "probability": 0.95751953125}, {"start": 1410.6, "end": 1410.9, "word": " T", "probability": 0.95751953125}, {"start": 1410.9, "end": 1412.82, "word": " smaller", "probability": 0.74853515625}, {"start": 1412.82, "end": 1413.3, "word": " than", "probability": 0.94580078125}, {"start": 1413.3, "end": 1413.78, "word": " the", "probability": 0.91455078125}, {"start": 1413.78, "end": 1413.98, "word": " value", "probability": 0.9833984375}, {"start": 1413.98, "end": 1414.12, "word": " of", "probability": 0.9638671875}, {"start": 1414.12, "end": 1414.24, "word": " the", "probability": 0.90771484375}, {"start": 1414.24, "end": 1414.66, "word": " statistic,", "probability": 0.88525390625}, {"start": 1416.42, "end": 1417.32, "word": " which", "probability": 0.94384765625}, {"start": 1417.32, "end": 1417.48, "word": " is", "probability": 0.95263671875}, {"start": 1417.48, "end": 1417.9, "word": " negative", "probability": 0.72607421875}, {"start": 1417.9, "end": 1418.54, "word": " 0", "probability": 0.72265625}, {"start": 1418.54, "end": 1419.46, "word": ".653.", "probability": 0.9676106770833334}, {"start": 1420.58, "end": 1420.94, "word": " The", "probability": 0.8798828125}, {"start": 1420.94, "end": 1421.26, "word": " table", "probability": 0.8857421875}, {"start": 1421.26, "end": 1421.4, "word": " you", "probability": 0.6826171875}, {"start": 1421.4, "end": 1421.54, "word": " have", "probability": 0.94970703125}, {"start": 1421.54, "end": 1421.8, "word": " gives", "probability": 0.89892578125}, {"start": 1421.8, "end": 1422.04, "word": " the", "probability": 0.9091796875}, {"start": 1422.04, "end": 1422.34, "word": " area", "probability": 0.880859375}, {"start": 1422.34, "end": 1422.54, "word": " to", "probability": 0.9677734375}, {"start": 1422.54, "end": 1422.7, "word": " the", "probability": 0.91943359375}, {"start": 1422.7, "end": 1423.04, "word": " right.", "probability": 0.92822265625}], "temperature": 1.0}, {"id": 52, "seek": 145564, "start": 1426.98, "end": 1455.64, "text": " So this probability is the same as B of T greater than 0.653. So by using this table, you will get approximate value of B, which is greater than 25%. Always, as we mentioned, we reject the null hypothesis if my B value is smaller than alpha. In this case, this value is greater than alpha, so we don't reject the null. So we reach the same decision as by using the critical value approach.", "tokens": [407, 341, 8482, 307, 264, 912, 382, 363, 295, 314, 5044, 813, 1958, 13, 16824, 18, 13, 407, 538, 1228, 341, 3199, 11, 291, 486, 483, 30874, 2158, 295, 363, 11, 597, 307, 5044, 813, 3552, 6856, 11270, 11, 382, 321, 2835, 11, 321, 8248, 264, 18184, 17291, 498, 452, 363, 2158, 307, 4356, 813, 8961, 13, 682, 341, 1389, 11, 341, 2158, 307, 5044, 813, 8961, 11, 370, 321, 500, 380, 8248, 264, 18184, 13, 407, 321, 2524, 264, 912, 3537, 382, 538, 1228, 264, 4924, 2158, 3109, 13], "avg_logprob": -0.11426854428354201, "compression_ratio": 1.703056768558952, "no_speech_prob": 0.0, "words": [{"start": 1426.98, "end": 1427.4, "word": " So", "probability": 0.94091796875}, {"start": 1427.4, "end": 1428.0, "word": " this", "probability": 0.78271484375}, {"start": 1428.0, "end": 1428.7, "word": " probability", "probability": 0.91162109375}, {"start": 1428.7, "end": 1428.98, "word": " is", "probability": 0.85888671875}, {"start": 1428.98, "end": 1429.12, "word": " the", "probability": 0.8974609375}, {"start": 1429.12, "end": 1429.32, "word": " same", "probability": 0.91259765625}, {"start": 1429.32, "end": 1429.56, "word": " as", "probability": 0.95068359375}, {"start": 1429.56, "end": 1430.06, "word": " B", "probability": 0.474609375}, {"start": 1430.06, "end": 1430.22, "word": " of", "probability": 0.8583984375}, {"start": 1430.22, "end": 1430.36, "word": " T", "probability": 0.7724609375}, {"start": 1430.36, "end": 1430.7, "word": " greater", "probability": 0.8837890625}, {"start": 1430.7, "end": 1430.98, "word": " than", "probability": 0.939453125}, {"start": 1430.98, "end": 1431.22, "word": " 0", "probability": 0.724609375}, {"start": 1431.22, "end": 1432.1, "word": ".653.", "probability": 0.9786783854166666}, {"start": 1432.96, "end": 1433.18, "word": " So", "probability": 0.966796875}, {"start": 1433.18, "end": 1433.38, "word": " by", "probability": 0.91064453125}, {"start": 1433.38, "end": 1433.66, "word": " using", "probability": 0.92919921875}, {"start": 1433.66, "end": 1433.94, "word": " this", "probability": 0.94384765625}, {"start": 1433.94, "end": 1434.28, "word": " table,", "probability": 0.87255859375}, {"start": 1435.24, "end": 1435.52, "word": " you", "probability": 0.92529296875}, {"start": 1435.52, "end": 1435.66, "word": " will", "probability": 0.8935546875}, {"start": 1435.66, "end": 1435.92, "word": " get", "probability": 0.9365234375}, {"start": 1435.92, "end": 1436.5, "word": " approximate", "probability": 0.82080078125}, {"start": 1436.5, "end": 1437.14, "word": " value", "probability": 0.9736328125}, {"start": 1437.14, "end": 1437.9, "word": " of", "probability": 0.9580078125}, {"start": 1437.9, "end": 1438.12, "word": " B,", "probability": 0.92919921875}, {"start": 1438.22, "end": 1438.4, "word": " which", "probability": 0.94970703125}, {"start": 1438.4, "end": 1438.56, "word": " is", "probability": 0.94970703125}, {"start": 1438.56, "end": 1438.92, "word": " greater", "probability": 0.90087890625}, {"start": 1438.92, "end": 1439.18, "word": " than", "probability": 0.943359375}, {"start": 1439.18, "end": 1440.1, "word": " 25%.", "probability": 0.972412109375}, {"start": 1440.1, "end": 1441.3, "word": " Always,", "probability": 0.89111328125}, {"start": 1441.42, "end": 1441.52, "word": " as", "probability": 0.962890625}, {"start": 1441.52, "end": 1441.64, "word": " we", "probability": 0.740234375}, {"start": 1441.64, "end": 1441.94, "word": " mentioned,", "probability": 0.82373046875}, {"start": 1442.06, "end": 1442.18, "word": " we", "probability": 0.955078125}, {"start": 1442.18, "end": 1442.64, "word": " reject", "probability": 0.87646484375}, {"start": 1442.64, "end": 1442.82, "word": " the", "probability": 0.86669921875}, {"start": 1442.82, "end": 1442.96, "word": " null", "probability": 0.943359375}, {"start": 1442.96, "end": 1443.42, "word": " hypothesis", "probability": 0.74951171875}, {"start": 1443.42, "end": 1443.84, "word": " if", "probability": 0.8623046875}, {"start": 1443.84, "end": 1444.14, "word": " my", "probability": 0.93408203125}, {"start": 1444.14, "end": 1444.32, "word": " B", "probability": 0.892578125}, {"start": 1444.32, "end": 1444.58, "word": " value", "probability": 0.904296875}, {"start": 1444.58, "end": 1444.68, "word": " is", "probability": 0.759765625}, {"start": 1444.68, "end": 1444.92, "word": " smaller", "probability": 0.86474609375}, {"start": 1444.92, "end": 1445.16, "word": " than", "probability": 0.94677734375}, {"start": 1445.16, "end": 1445.42, "word": " alpha.", "probability": 0.8740234375}, {"start": 1446.36, "end": 1446.66, "word": " In", "probability": 0.9560546875}, {"start": 1446.66, "end": 1446.88, "word": " this", "probability": 0.94482421875}, {"start": 1446.88, "end": 1447.14, "word": " case,", "probability": 0.8955078125}, {"start": 1447.22, "end": 1447.4, "word": " this", "probability": 0.94482421875}, {"start": 1447.4, "end": 1447.66, "word": " value", "probability": 0.9755859375}, {"start": 1447.66, "end": 1447.78, "word": " is", "probability": 0.841796875}, {"start": 1447.78, "end": 1448.1, "word": " greater", "probability": 0.90771484375}, {"start": 1448.1, "end": 1448.36, "word": " than", "probability": 0.951171875}, {"start": 1448.36, "end": 1448.56, "word": " alpha,", "probability": 0.9052734375}, {"start": 1448.56, "end": 1448.76, "word": " so", "probability": 0.951171875}, {"start": 1448.76, "end": 1448.92, "word": " we", "probability": 0.95556640625}, {"start": 1448.92, "end": 1449.14, "word": " don't", "probability": 0.9775390625}, {"start": 1449.14, "end": 1449.48, "word": " reject", "probability": 0.8994140625}, {"start": 1449.48, "end": 1449.68, "word": " the", "probability": 0.9150390625}, {"start": 1449.68, "end": 1449.88, "word": " null.", "probability": 0.96826171875}, {"start": 1450.26, "end": 1450.46, "word": " So", "probability": 0.9658203125}, {"start": 1450.46, "end": 1450.66, "word": " we", "probability": 0.912109375}, {"start": 1450.66, "end": 1450.92, "word": " reach", "probability": 0.80322265625}, {"start": 1450.92, "end": 1451.18, "word": " the", "probability": 0.9150390625}, {"start": 1451.18, "end": 1451.48, "word": " same", "probability": 0.90380859375}, {"start": 1451.48, "end": 1452.0, "word": " decision", "probability": 0.9130859375}, {"start": 1452.0, "end": 1452.46, "word": " as", "probability": 0.94921875}, {"start": 1452.46, "end": 1452.68, "word": " by", "probability": 0.97216796875}, {"start": 1452.68, "end": 1453.12, "word": " using", "probability": 0.93212890625}, {"start": 1453.12, "end": 1454.16, "word": " the", "probability": 0.90869140625}, {"start": 1454.16, "end": 1454.74, "word": " critical", "probability": 0.92724609375}, {"start": 1454.74, "end": 1455.18, "word": " value", "probability": 0.9599609375}, {"start": 1455.18, "end": 1455.64, "word": " approach.", "probability": 0.9111328125}], "temperature": 1.0}, {"id": 53, "seek": 148456, "start": 1457.04, "end": 1484.56, "text": " Any question? So that's for number two. Question number three. To test the effectiveness of a business school preparation course, eight students took a general business test before and after the course.", "tokens": [2639, 1168, 30, 407, 300, 311, 337, 1230, 732, 13, 14464, 1230, 1045, 13, 1407, 1500, 264, 21208, 295, 257, 1606, 1395, 13081, 1164, 11, 3180, 1731, 1890, 257, 2674, 1606, 1500, 949, 293, 934, 264, 1164, 13], "avg_logprob": -0.15815304410763276, "compression_ratio": 1.45, "no_speech_prob": 0.0, "words": [{"start": 1457.04, "end": 1457.32, "word": " Any", "probability": 0.5458984375}, {"start": 1457.32, "end": 1457.66, "word": " question?", "probability": 0.703125}, {"start": 1458.64, "end": 1458.94, "word": " So", "probability": 0.89453125}, {"start": 1458.94, "end": 1459.3, "word": " that's", "probability": 0.8740234375}, {"start": 1459.3, "end": 1459.62, "word": " for", "probability": 0.9208984375}, {"start": 1459.62, "end": 1460.02, "word": " number", "probability": 0.908203125}, {"start": 1460.02, "end": 1460.34, "word": " two.", "probability": 0.73779296875}, {"start": 1462.6, "end": 1463.36, "word": " Question", "probability": 0.765625}, {"start": 1463.36, "end": 1463.66, "word": " number", "probability": 0.93408203125}, {"start": 1463.66, "end": 1464.04, "word": " three.", "probability": 0.92041015625}, {"start": 1472.12, "end": 1472.88, "word": " To", "probability": 0.460693359375}, {"start": 1472.88, "end": 1473.2, "word": " test", "probability": 0.8583984375}, {"start": 1473.2, "end": 1473.44, "word": " the", "probability": 0.90185546875}, {"start": 1473.44, "end": 1474.0, "word": " effectiveness", "probability": 0.9072265625}, {"start": 1474.0, "end": 1474.72, "word": " of", "probability": 0.96630859375}, {"start": 1474.72, "end": 1474.92, "word": " a", "probability": 0.978515625}, {"start": 1474.92, "end": 1475.26, "word": " business", "probability": 0.943359375}, {"start": 1475.26, "end": 1475.82, "word": " school", "probability": 0.95263671875}, {"start": 1475.82, "end": 1477.24, "word": " preparation", "probability": 0.85498046875}, {"start": 1477.24, "end": 1478.18, "word": " course,", "probability": 0.9501953125}, {"start": 1479.6, "end": 1479.88, "word": " eight", "probability": 0.56689453125}, {"start": 1479.88, "end": 1480.52, "word": " students", "probability": 0.97509765625}, {"start": 1480.52, "end": 1481.04, "word": " took", "probability": 0.927734375}, {"start": 1481.04, "end": 1481.34, "word": " a", "probability": 0.9921875}, {"start": 1481.34, "end": 1481.64, "word": " general", "probability": 0.86181640625}, {"start": 1481.64, "end": 1482.14, "word": " business", "probability": 0.92333984375}, {"start": 1482.14, "end": 1482.52, "word": " test", "probability": 0.8740234375}, {"start": 1482.52, "end": 1483.12, "word": " before", "probability": 0.86572265625}, {"start": 1483.12, "end": 1483.52, "word": " and", "probability": 0.9404296875}, {"start": 1483.52, "end": 1483.96, "word": " after", "probability": 0.82861328125}, {"start": 1483.96, "end": 1484.18, "word": " the", "probability": 0.921875}, {"start": 1484.18, "end": 1484.56, "word": " course.", "probability": 0.96142578125}], "temperature": 1.0}, {"id": 54, "seek": 150463, "start": 1486.03, "end": 1504.63, "text": " Let X1 denote before, and X2 after. And the difference is X2 minus X1.", "tokens": [961, 1783, 16, 45708, 949, 11, 293, 1783, 17, 934, 13, 400, 264, 2649, 307, 1783, 17, 3175, 1783, 16, 13], "avg_logprob": -0.38707386634566565, "compression_ratio": 0.9594594594594594, "no_speech_prob": 0.0, "words": [{"start": 1486.03, "end": 1486.49, "word": " Let", "probability": 0.316650390625}, {"start": 1486.49, "end": 1487.21, "word": " X1", "probability": 0.688232421875}, {"start": 1487.21, "end": 1489.75, "word": " denote", "probability": 0.79931640625}, {"start": 1489.75, "end": 1490.33, "word": " before,", "probability": 0.65283203125}, {"start": 1493.01, "end": 1494.51, "word": " and", "probability": 0.8671875}, {"start": 1494.51, "end": 1495.05, "word": " X2", "probability": 0.98681640625}, {"start": 1495.05, "end": 1495.45, "word": " after.", "probability": 0.7841796875}, {"start": 1499.63, "end": 1500.13, "word": " And", "probability": 0.84423828125}, {"start": 1500.13, "end": 1500.27, "word": " the", "probability": 0.833984375}, {"start": 1500.27, "end": 1500.83, "word": " difference", "probability": 0.85205078125}, {"start": 1500.83, "end": 1503.35, "word": " is", "probability": 0.89013671875}, {"start": 1503.35, "end": 1503.81, "word": " X2", "probability": 0.970703125}, {"start": 1503.81, "end": 1504.09, "word": " minus", "probability": 0.85546875}, {"start": 1504.09, "end": 1504.63, "word": " X1.", "probability": 0.99560546875}], "temperature": 1.0}, {"id": 55, "seek": 154400, "start": 1514.78, "end": 1544.0, "text": " The mean of the difference equals 50. And the standard deviation of the difference is 65.03. So sample statistics are sample mean for the difference and sample standard deviation of the difference. So these two values are given. Test to determine the effectiveness of a business school preparation course. So what's your goal?", "tokens": [440, 914, 295, 264, 2649, 6915, 2625, 13, 400, 264, 3832, 25163, 295, 264, 2649, 307, 11624, 13, 11592, 13, 407, 6889, 12523, 366, 6889, 914, 337, 264, 2649, 293, 6889, 3832, 25163, 295, 264, 2649, 13, 407, 613, 732, 4190, 366, 2212, 13, 9279, 281, 6997, 264, 21208, 295, 257, 1606, 1395, 13081, 1164, 13, 407, 437, 311, 428, 3387, 30], "avg_logprob": -0.1924603245561085, "compression_ratio": 1.8066298342541436, "no_speech_prob": 0.0, "words": [{"start": 1514.78, "end": 1515.1, "word": " The", "probability": 0.54541015625}, {"start": 1515.1, "end": 1515.34, "word": " mean", "probability": 0.8935546875}, {"start": 1515.34, "end": 1515.52, "word": " of", "probability": 0.93359375}, {"start": 1515.52, "end": 1515.66, "word": " the", "probability": 0.7470703125}, {"start": 1515.66, "end": 1516.1, "word": " difference", "probability": 0.87451171875}, {"start": 1516.1, "end": 1516.48, "word": " equals", "probability": 0.60009765625}, {"start": 1516.48, "end": 1517.02, "word": " 50.", "probability": 0.48486328125}, {"start": 1518.6, "end": 1519.34, "word": " And", "probability": 0.77001953125}, {"start": 1519.34, "end": 1519.54, "word": " the", "probability": 0.80615234375}, {"start": 1519.54, "end": 1519.84, "word": " standard", "probability": 0.9130859375}, {"start": 1519.84, "end": 1520.28, "word": " deviation", "probability": 0.89794921875}, {"start": 1520.28, "end": 1520.52, "word": " of", "probability": 0.93798828125}, {"start": 1520.52, "end": 1520.64, "word": " the", "probability": 0.8916015625}, {"start": 1520.64, "end": 1521.12, "word": " difference", "probability": 0.86865234375}, {"start": 1521.12, "end": 1521.36, "word": " is", "probability": 0.90966796875}, {"start": 1521.36, "end": 1522.04, "word": " 65", "probability": 0.96240234375}, {"start": 1522.04, "end": 1523.9, "word": ".03.", "probability": 0.96923828125}, {"start": 1524.92, "end": 1525.54, "word": " So", "probability": 0.85107421875}, {"start": 1525.54, "end": 1525.84, "word": " sample", "probability": 0.4794921875}, {"start": 1525.84, "end": 1526.42, "word": " statistics", "probability": 0.82275390625}, {"start": 1526.42, "end": 1527.04, "word": " are", "probability": 0.93896484375}, {"start": 1527.04, "end": 1528.1, "word": " sample", "probability": 0.64013671875}, {"start": 1528.1, "end": 1528.48, "word": " mean", "probability": 0.9482421875}, {"start": 1528.48, "end": 1528.74, "word": " for", "probability": 0.90966796875}, {"start": 1528.74, "end": 1528.9, "word": " the", "probability": 0.908203125}, {"start": 1528.9, "end": 1529.38, "word": " difference", "probability": 0.86328125}, {"start": 1529.38, "end": 1529.66, "word": " and", "probability": 0.86279296875}, {"start": 1529.66, "end": 1530.0, "word": " sample", "probability": 0.84716796875}, {"start": 1530.0, "end": 1530.36, "word": " standard", "probability": 0.91650390625}, {"start": 1530.36, "end": 1530.78, "word": " deviation", "probability": 0.91796875}, {"start": 1530.78, "end": 1531.92, "word": " of", "probability": 0.90234375}, {"start": 1531.92, "end": 1532.04, "word": " the", "probability": 0.90625}, {"start": 1532.04, "end": 1532.32, "word": " difference.", "probability": 0.88818359375}, {"start": 1532.48, "end": 1532.6, "word": " So", "probability": 0.87255859375}, {"start": 1532.6, "end": 1532.82, "word": " these", "probability": 0.8115234375}, {"start": 1532.82, "end": 1533.0, "word": " two", "probability": 0.87060546875}, {"start": 1533.0, "end": 1533.26, "word": " values", "probability": 0.9658203125}, {"start": 1533.26, "end": 1533.44, "word": " are", "probability": 0.9462890625}, {"start": 1533.44, "end": 1533.74, "word": " given.", "probability": 0.89599609375}, {"start": 1535.78, "end": 1536.46, "word": " Test", "probability": 0.70849609375}, {"start": 1536.46, "end": 1536.86, "word": " to", "probability": 0.93359375}, {"start": 1536.86, "end": 1537.36, "word": " determine", "probability": 0.88623046875}, {"start": 1537.36, "end": 1538.32, "word": " the", "probability": 0.89892578125}, {"start": 1538.32, "end": 1538.82, "word": " effectiveness", "probability": 0.89990234375}, {"start": 1538.82, "end": 1539.36, "word": " of", "probability": 0.966796875}, {"start": 1539.36, "end": 1539.5, "word": " a", "probability": 0.9658203125}, {"start": 1539.5, "end": 1539.8, "word": " business", "probability": 0.91748046875}, {"start": 1539.8, "end": 1540.2, "word": " school", "probability": 0.90673828125}, {"start": 1540.2, "end": 1540.66, "word": " preparation", "probability": 0.95458984375}, {"start": 1540.66, "end": 1541.16, "word": " course.", "probability": 0.95361328125}, {"start": 1542.64, "end": 1543.3, "word": " So", "probability": 0.93994140625}, {"start": 1543.3, "end": 1543.66, "word": " what's", "probability": 0.8095703125}, {"start": 1543.66, "end": 1543.84, "word": " your", "probability": 0.89111328125}, {"start": 1543.84, "end": 1544.0, "word": " goal?", "probability": 0.2266845703125}], "temperature": 1.0}, {"id": 56, "seek": 157208, "start": 1545.68, "end": 1572.08, "text": " An alternative, null equals zero. An alternative should be greater than zero. Because D is X2 minus X1. So effective, it means after is better than before. So my score after taking the course is better than before taking the course. So X in UD is positive.", "tokens": [1107, 8535, 11, 18184, 6915, 4018, 13, 1107, 8535, 820, 312, 5044, 813, 4018, 13, 1436, 413, 307, 1783, 17, 3175, 1783, 16, 13, 407, 4942, 11, 309, 1355, 934, 307, 1101, 813, 949, 13, 407, 452, 6175, 934, 1940, 264, 1164, 307, 1101, 813, 949, 1940, 264, 1164, 13, 407, 1783, 294, 624, 35, 307, 3353, 13], "avg_logprob": -0.27171610371541166, "compression_ratio": 1.6688311688311688, "no_speech_prob": 0.0, "words": [{"start": 1545.68, "end": 1545.96, "word": " An", "probability": 0.262939453125}, {"start": 1545.96, "end": 1546.42, "word": " alternative,", "probability": 0.65625}, {"start": 1546.62, "end": 1546.84, "word": " null", "probability": 0.4013671875}, {"start": 1546.84, "end": 1547.2, "word": " equals", "probability": 0.53271484375}, {"start": 1547.2, "end": 1547.5, "word": " zero.", "probability": 0.74365234375}, {"start": 1547.58, "end": 1547.66, "word": " An", "probability": 0.8896484375}, {"start": 1547.66, "end": 1548.12, "word": " alternative", "probability": 0.93896484375}, {"start": 1548.12, "end": 1552.34, "word": " should", "probability": 0.8505859375}, {"start": 1552.34, "end": 1552.52, "word": " be", "probability": 0.95654296875}, {"start": 1552.52, "end": 1552.86, "word": " greater", "probability": 0.9140625}, {"start": 1552.86, "end": 1553.16, "word": " than", "probability": 0.94873046875}, {"start": 1553.16, "end": 1553.46, "word": " zero.", "probability": 0.86962890625}, {"start": 1554.62, "end": 1555.26, "word": " Because", "probability": 0.9189453125}, {"start": 1555.26, "end": 1556.42, "word": " D", "probability": 0.222900390625}, {"start": 1556.42, "end": 1556.7, "word": " is", "probability": 0.9326171875}, {"start": 1556.7, "end": 1557.08, "word": " X2", "probability": 0.78271484375}, {"start": 1557.08, "end": 1557.34, "word": " minus", "probability": 0.95947265625}, {"start": 1557.34, "end": 1557.9, "word": " X1.", "probability": 0.99365234375}, {"start": 1558.22, "end": 1558.36, "word": " So", "probability": 0.95361328125}, {"start": 1558.36, "end": 1558.78, "word": " effective,", "probability": 0.5654296875}, {"start": 1559.04, "end": 1559.14, "word": " it", "probability": 0.9375}, {"start": 1559.14, "end": 1559.52, "word": " means", "probability": 0.93359375}, {"start": 1559.52, "end": 1560.98, "word": " after", "probability": 0.67578125}, {"start": 1560.98, "end": 1561.88, "word": " is", "probability": 0.47412109375}, {"start": 1561.88, "end": 1562.18, "word": " better", "probability": 0.90869140625}, {"start": 1562.18, "end": 1562.44, "word": " than", "probability": 0.95263671875}, {"start": 1562.44, "end": 1562.84, "word": " before.", "probability": 0.86083984375}, {"start": 1563.68, "end": 1563.88, "word": " So", "probability": 0.9609375}, {"start": 1563.88, "end": 1564.14, "word": " my", "probability": 0.89892578125}, {"start": 1564.14, "end": 1564.78, "word": " score", "probability": 0.87060546875}, {"start": 1564.78, "end": 1565.84, "word": " after", "probability": 0.7099609375}, {"start": 1565.84, "end": 1566.28, "word": " taking", "probability": 0.9072265625}, {"start": 1566.28, "end": 1566.5, "word": " the", "probability": 0.9189453125}, {"start": 1566.5, "end": 1566.88, "word": " course", "probability": 0.96533203125}, {"start": 1566.88, "end": 1567.64, "word": " is", "probability": 0.8359375}, {"start": 1567.64, "end": 1567.96, "word": " better", "probability": 0.89453125}, {"start": 1567.96, "end": 1568.42, "word": " than", "probability": 0.947265625}, {"start": 1568.42, "end": 1568.86, "word": " before", "probability": 0.8701171875}, {"start": 1568.86, "end": 1569.18, "word": " taking", "probability": 0.89990234375}, {"start": 1569.18, "end": 1569.4, "word": " the", "probability": 0.91162109375}, {"start": 1569.4, "end": 1569.68, "word": " course.", "probability": 0.95703125}, {"start": 1570.14, "end": 1570.4, "word": " So", "probability": 0.96337890625}, {"start": 1570.4, "end": 1570.74, "word": " X", "probability": 0.79833984375}, {"start": 1570.74, "end": 1571.1, "word": " in", "probability": 0.2025146484375}, {"start": 1571.1, "end": 1571.32, "word": " UD", "probability": 0.828125}, {"start": 1571.32, "end": 1571.52, "word": " is", "probability": 0.94580078125}, {"start": 1571.52, "end": 1572.08, "word": " positive.", "probability": 0.94189453125}], "temperature": 1.0}, {"id": 57, "seek": 160159, "start": 1579.09, "end": 1601.59, "text": " T is D bar minus 0 divided by SD over square root of A. D bar is 50 divided by 65 divided by", "tokens": [314, 307, 413, 2159, 3175, 1958, 6666, 538, 14638, 670, 3732, 5593, 295, 316, 13, 413, 2159, 307, 2625, 6666, 538, 11624, 6666, 538], "avg_logprob": -0.3440625, "compression_ratio": 1.24, "no_speech_prob": 0.0, "words": [{"start": 1579.09, "end": 1580.09, "word": " T", "probability": 0.16748046875}, {"start": 1580.09, "end": 1581.09, "word": " is", "probability": 0.671875}, {"start": 1581.09, "end": 1581.33, "word": " D", "probability": 0.69580078125}, {"start": 1581.33, "end": 1581.61, "word": " bar", "probability": 0.69873046875}, {"start": 1581.61, "end": 1583.01, "word": " minus", "probability": 0.92431640625}, {"start": 1583.01, "end": 1583.47, "word": " 0", "probability": 0.374267578125}, {"start": 1583.47, "end": 1584.07, "word": " divided", "probability": 0.73583984375}, {"start": 1584.07, "end": 1584.31, "word": " by", "probability": 0.97412109375}, {"start": 1584.31, "end": 1584.81, "word": " SD", "probability": 0.74267578125}, {"start": 1584.81, "end": 1586.57, "word": " over", "probability": 0.869140625}, {"start": 1586.57, "end": 1587.29, "word": " square", "probability": 0.75537109375}, {"start": 1587.29, "end": 1587.51, "word": " root", "probability": 0.93701171875}, {"start": 1587.51, "end": 1587.61, "word": " of", "probability": 0.560546875}, {"start": 1587.61, "end": 1587.83, "word": " A.", "probability": 0.54833984375}, {"start": 1590.15, "end": 1590.69, "word": " D", "probability": 0.8857421875}, {"start": 1590.69, "end": 1590.91, "word": " bar", "probability": 0.88916015625}, {"start": 1590.91, "end": 1591.09, "word": " is", "probability": 0.91845703125}, {"start": 1591.09, "end": 1591.47, "word": " 50", "probability": 0.634765625}, {"start": 1591.47, "end": 1594.03, "word": " divided", "probability": 0.66162109375}, {"start": 1594.03, "end": 1594.47, "word": " by", "probability": 0.96826171875}, {"start": 1594.47, "end": 1596.07, "word": " 65", "probability": 0.93603515625}, {"start": 1596.07, "end": 1601.09, "word": " divided", "probability": 0.73681640625}, {"start": 1601.09, "end": 1601.59, "word": " by", "probability": 0.97216796875}], "temperature": 1.0}, {"id": 58, "seek": 163093, "start": 1602.57, "end": 1630.93, "text": " Square root of 8. So 50 divided by square root of 8, 2.17. Now Yumi used the critical value approach. So my critical value is T alpha.", "tokens": [16463, 5593, 295, 1649, 13, 407, 2625, 6666, 538, 3732, 5593, 295, 1649, 11, 568, 13, 7773, 13, 823, 398, 17800, 1143, 264, 4924, 2158, 3109, 13, 407, 452, 4924, 2158, 307, 314, 8961, 13], "avg_logprob": -0.2810329861111111, "compression_ratio": 1.2857142857142858, "no_speech_prob": 0.0, "words": [{"start": 1602.57, "end": 1603.09, "word": " Square", "probability": 0.1951904296875}, {"start": 1603.09, "end": 1603.33, "word": " root", "probability": 0.8935546875}, {"start": 1603.33, "end": 1603.51, "word": " of", "probability": 0.9541015625}, {"start": 1603.51, "end": 1603.81, "word": " 8.", "probability": 0.64111328125}, {"start": 1606.05, "end": 1606.87, "word": " So", "probability": 0.6689453125}, {"start": 1606.87, "end": 1607.43, "word": " 50", "probability": 0.63916015625}, {"start": 1607.43, "end": 1608.13, "word": " divided", "probability": 0.65283203125}, {"start": 1608.13, "end": 1608.81, "word": " by", "probability": 0.974609375}, {"start": 1608.81, "end": 1614.49, "word": " square", "probability": 0.467041015625}, {"start": 1614.49, "end": 1614.97, "word": " root", "probability": 0.935546875}, {"start": 1614.97, "end": 1616.15, "word": " of", "probability": 0.8154296875}, {"start": 1616.15, "end": 1616.43, "word": " 8,", "probability": 0.6103515625}, {"start": 1616.99, "end": 1617.25, "word": " 2", "probability": 0.97802734375}, {"start": 1617.25, "end": 1617.91, "word": ".17.", "probability": 0.97314453125}, {"start": 1624.07, "end": 1624.91, "word": " Now", "probability": 0.921875}, {"start": 1624.91, "end": 1625.49, "word": " Yumi", "probability": 0.6558837890625}, {"start": 1625.49, "end": 1625.91, "word": " used", "probability": 0.83837890625}, {"start": 1625.91, "end": 1626.21, "word": " the", "probability": 0.8310546875}, {"start": 1626.21, "end": 1626.55, "word": " critical", "probability": 0.896484375}, {"start": 1626.55, "end": 1626.89, "word": " value", "probability": 0.95947265625}, {"start": 1626.89, "end": 1627.29, "word": " approach.", "probability": 0.876953125}, {"start": 1628.43, "end": 1629.27, "word": " So", "probability": 0.9365234375}, {"start": 1629.27, "end": 1629.57, "word": " my", "probability": 0.90869140625}, {"start": 1629.57, "end": 1629.91, "word": " critical", "probability": 0.9267578125}, {"start": 1629.91, "end": 1630.21, "word": " value", "probability": 0.966796875}, {"start": 1630.21, "end": 1630.45, "word": " is", "probability": 0.93017578125}, {"start": 1630.45, "end": 1630.57, "word": " T", "probability": 0.859375}, {"start": 1630.57, "end": 1630.93, "word": " alpha.", "probability": 0.68310546875}], "temperature": 1.0}, {"id": 59, "seek": 165934, "start": 1633.68, "end": 1659.34, "text": " And degrees of freedom is 7. It's upper 10. So it's plus. So it's T alpha 0, 5. And DF is 7, because N equals 8. Now by using the table, at 7 degrees of freedom, so at 7,", "tokens": [400, 5310, 295, 5645, 307, 1614, 13, 467, 311, 6597, 1266, 13, 407, 309, 311, 1804, 13, 407, 309, 311, 314, 8961, 1958, 11, 1025, 13, 400, 48336, 307, 1614, 11, 570, 426, 6915, 1649, 13, 823, 538, 1228, 264, 3199, 11, 412, 1614, 5310, 295, 5645, 11, 370, 412, 1614, 11], "avg_logprob": -0.2995283041360243, "compression_ratio": 1.3153846153846154, "no_speech_prob": 0.0, "words": [{"start": 1633.68, "end": 1633.98, "word": " And", "probability": 0.294921875}, {"start": 1633.98, "end": 1634.22, "word": " degrees", "probability": 0.84228515625}, {"start": 1634.22, "end": 1634.4, "word": " of", "probability": 0.9560546875}, {"start": 1634.4, "end": 1634.68, "word": " freedom", "probability": 0.93701171875}, {"start": 1634.68, "end": 1634.9, "word": " is", "probability": 0.89306640625}, {"start": 1634.9, "end": 1635.18, "word": " 7.", "probability": 0.53173828125}, {"start": 1637.04, "end": 1637.7, "word": " It's", "probability": 0.8740234375}, {"start": 1637.7, "end": 1638.0, "word": " upper", "probability": 0.3310546875}, {"start": 1638.0, "end": 1638.38, "word": " 10.", "probability": 0.51708984375}, {"start": 1639.86, "end": 1640.14, "word": " So", "probability": 0.74462890625}, {"start": 1640.14, "end": 1640.32, "word": " it's", "probability": 0.7861328125}, {"start": 1640.32, "end": 1640.74, "word": " plus.", "probability": 0.8662109375}, {"start": 1641.12, "end": 1641.12, "word": " So", "probability": 0.8916015625}, {"start": 1641.12, "end": 1641.22, "word": " it's", "probability": 0.916259765625}, {"start": 1641.22, "end": 1641.62, "word": " T", "probability": 0.599609375}, {"start": 1641.62, "end": 1642.42, "word": " alpha", "probability": 0.6923828125}, {"start": 1642.42, "end": 1644.44, "word": " 0,", "probability": 0.703125}, {"start": 1644.52, "end": 1644.84, "word": " 5.", "probability": 0.56298828125}, {"start": 1645.32, "end": 1645.74, "word": " And", "probability": 0.95751953125}, {"start": 1645.74, "end": 1646.54, "word": " DF", "probability": 0.55126953125}, {"start": 1646.54, "end": 1647.02, "word": " is", "probability": 0.9443359375}, {"start": 1647.02, "end": 1647.3, "word": " 7,", "probability": 0.9541015625}, {"start": 1647.32, "end": 1647.7, "word": " because", "probability": 0.90478515625}, {"start": 1647.7, "end": 1647.94, "word": " N", "probability": 0.7626953125}, {"start": 1647.94, "end": 1648.26, "word": " equals", "probability": 0.935546875}, {"start": 1648.26, "end": 1648.52, "word": " 8.", "probability": 0.80859375}, {"start": 1649.84, "end": 1650.3, "word": " Now", "probability": 0.96484375}, {"start": 1650.3, "end": 1650.5, "word": " by", "probability": 0.5244140625}, {"start": 1650.5, "end": 1650.74, "word": " using", "probability": 0.93408203125}, {"start": 1650.74, "end": 1650.94, "word": " the", "probability": 0.88134765625}, {"start": 1650.94, "end": 1651.28, "word": " table,", "probability": 0.876953125}, {"start": 1652.74, "end": 1653.12, "word": " at", "probability": 0.90478515625}, {"start": 1653.12, "end": 1653.82, "word": " 7", "probability": 0.77734375}, {"start": 1653.82, "end": 1654.12, "word": " degrees", "probability": 0.9521484375}, {"start": 1654.12, "end": 1654.34, "word": " of", "probability": 0.97119140625}, {"start": 1654.34, "end": 1654.68, "word": " freedom,", "probability": 0.93310546875}, {"start": 1658.22, "end": 1658.86, "word": " so", "probability": 0.923828125}, {"start": 1658.86, "end": 1659.02, "word": " at", "probability": 0.8935546875}, {"start": 1659.02, "end": 1659.34, "word": " 7,", "probability": 0.96044921875}], "temperature": 1.0}, {"id": 60, "seek": 168702, "start": 1673.56, "end": 1687.02, "text": " So my T value is greater than the critical region, so we reject the null hypothesis.", "tokens": [407, 452, 314, 2158, 307, 5044, 813, 264, 4924, 4458, 11, 370, 321, 8248, 264, 18184, 17291, 13], "avg_logprob": -0.34107729322031927, "compression_ratio": 1.0625, "no_speech_prob": 0.0, "words": [{"start": 1673.5600000000002, "end": 1674.38, "word": " So", "probability": 0.201171875}, {"start": 1674.38, "end": 1675.2, "word": " my", "probability": 0.56884765625}, {"start": 1675.2, "end": 1675.4, "word": " T", "probability": 0.7041015625}, {"start": 1675.4, "end": 1675.78, "word": " value", "probability": 0.87060546875}, {"start": 1675.78, "end": 1676.22, "word": " is", "probability": 0.91845703125}, {"start": 1676.22, "end": 1679.16, "word": " greater", "probability": 0.413330078125}, {"start": 1679.16, "end": 1679.66, "word": " than", "probability": 0.93212890625}, {"start": 1679.66, "end": 1683.38, "word": " the", "probability": 0.802734375}, {"start": 1683.38, "end": 1683.74, "word": " critical", "probability": 0.908203125}, {"start": 1683.74, "end": 1684.12, "word": " region,", "probability": 0.89794921875}, {"start": 1684.22, "end": 1684.34, "word": " so", "probability": 0.904296875}, {"start": 1684.34, "end": 1684.48, "word": " we", "probability": 0.89111328125}, {"start": 1684.48, "end": 1684.94, "word": " reject", "probability": 0.88330078125}, {"start": 1684.94, "end": 1686.24, "word": " the", "probability": 0.86328125}, {"start": 1686.24, "end": 1686.42, "word": " null", "probability": 0.88720703125}, {"start": 1686.42, "end": 1687.02, "word": " hypothesis.", "probability": 0.85595703125}], "temperature": 1.0}, {"id": 61, "seek": 171506, "start": 1690.74, "end": 1715.06, "text": " The rejection region starts from 1.9895 and this value actually greater than 1.8. So since it falls in the rejection region, then we reject the null hypothesis. It means that taking the course,", "tokens": [440, 26044, 4458, 3719, 490, 502, 13, 22516, 15718, 293, 341, 2158, 767, 5044, 813, 502, 13, 23, 13, 407, 1670, 309, 8804, 294, 264, 26044, 4458, 11, 550, 321, 8248, 264, 18184, 17291, 13, 467, 1355, 300, 1940, 264, 1164, 11], "avg_logprob": -0.3068677408750667, "compression_ratio": 1.4057971014492754, "no_speech_prob": 0.0, "words": [{"start": 1690.74, "end": 1691.0, "word": " The", "probability": 0.2386474609375}, {"start": 1691.0, "end": 1691.3, "word": " rejection", "probability": 0.8564453125}, {"start": 1691.3, "end": 1691.66, "word": " region", "probability": 0.88623046875}, {"start": 1691.66, "end": 1692.32, "word": " starts", "probability": 0.81884765625}, {"start": 1692.32, "end": 1694.48, "word": " from", "probability": 0.8291015625}, {"start": 1694.48, "end": 1694.74, "word": " 1", "probability": 0.9208984375}, {"start": 1694.74, "end": 1696.0, "word": ".9895", "probability": 0.7246907552083334}, {"start": 1696.0, "end": 1697.44, "word": " and", "probability": 0.59765625}, {"start": 1697.44, "end": 1697.7, "word": " this", "probability": 0.90869140625}, {"start": 1697.7, "end": 1698.04, "word": " value", "probability": 0.97412109375}, {"start": 1698.04, "end": 1698.68, "word": " actually", "probability": 0.568359375}, {"start": 1698.68, "end": 1700.78, "word": " greater", "probability": 0.439453125}, {"start": 1700.78, "end": 1701.16, "word": " than", "probability": 0.9267578125}, {"start": 1701.16, "end": 1701.36, "word": " 1", "probability": 0.974609375}, {"start": 1701.36, "end": 1701.92, "word": ".8.", "probability": 0.938232421875}, {"start": 1703.22, "end": 1703.76, "word": " So", "probability": 0.87451171875}, {"start": 1703.76, "end": 1704.12, "word": " since", "probability": 0.654296875}, {"start": 1704.12, "end": 1704.32, "word": " it", "probability": 0.6025390625}, {"start": 1704.32, "end": 1704.8, "word": " falls", "probability": 0.58447265625}, {"start": 1704.8, "end": 1705.64, "word": " in", "probability": 0.91455078125}, {"start": 1705.64, "end": 1705.82, "word": " the", "probability": 0.9111328125}, {"start": 1705.82, "end": 1706.16, "word": " rejection", "probability": 0.94189453125}, {"start": 1706.16, "end": 1706.7, "word": " region,", "probability": 0.94384765625}, {"start": 1707.42, "end": 1707.78, "word": " then", "probability": 0.84814453125}, {"start": 1707.78, "end": 1708.28, "word": " we", "probability": 0.94580078125}, {"start": 1708.28, "end": 1709.18, "word": " reject", "probability": 0.90234375}, {"start": 1709.18, "end": 1710.12, "word": " the", "probability": 0.45947265625}, {"start": 1710.12, "end": 1710.32, "word": " null", "probability": 0.32177734375}, {"start": 1710.32, "end": 1710.84, "word": " hypothesis.", "probability": 0.89111328125}, {"start": 1711.8, "end": 1712.22, "word": " It", "probability": 0.9306640625}, {"start": 1712.22, "end": 1712.78, "word": " means", "probability": 0.923828125}, {"start": 1712.78, "end": 1713.16, "word": " that", "probability": 0.91259765625}, {"start": 1713.16, "end": 1714.4, "word": " taking", "probability": 0.794921875}, {"start": 1714.4, "end": 1714.68, "word": " the", "probability": 0.91064453125}, {"start": 1714.68, "end": 1715.06, "word": " course,", "probability": 0.9609375}], "temperature": 1.0}, {"id": 62, "seek": 174353, "start": 1716.37, "end": 1743.53, "text": " improves your score. So we have sufficient evidence to support the alternative hypothesis. That's for number three. The other part, the other part. A statistician selected a sample of 16 receivable accounts. He reported that the sample information", "tokens": [24771, 428, 6175, 13, 407, 321, 362, 11563, 4467, 281, 1406, 264, 8535, 17291, 13, 663, 311, 337, 1230, 1045, 13, 440, 661, 644, 11, 264, 661, 644, 13, 316, 29588, 952, 8209, 257, 6889, 295, 3165, 2268, 34376, 9402, 13, 634, 7055, 300, 264, 6889, 1589], "avg_logprob": -0.17399088200181723, "compression_ratio": 1.4588235294117646, "no_speech_prob": 0.0, "words": [{"start": 1716.37, "end": 1717.01, "word": " improves", "probability": 0.68017578125}, {"start": 1717.01, "end": 1717.37, "word": " your", "probability": 0.896484375}, {"start": 1717.37, "end": 1717.93, "word": " score.", "probability": 0.86865234375}, {"start": 1718.85, "end": 1718.99, "word": " So", "probability": 0.77001953125}, {"start": 1718.99, "end": 1719.09, "word": " we", "probability": 0.51025390625}, {"start": 1719.09, "end": 1719.21, "word": " have", "probability": 0.9443359375}, {"start": 1719.21, "end": 1719.69, "word": " sufficient", "probability": 0.90576171875}, {"start": 1719.69, "end": 1720.05, "word": " evidence", "probability": 0.939453125}, {"start": 1720.05, "end": 1720.27, "word": " to", "probability": 0.859375}, {"start": 1720.27, "end": 1720.75, "word": " support", "probability": 0.9873046875}, {"start": 1720.75, "end": 1721.55, "word": " the", "probability": 0.87841796875}, {"start": 1721.55, "end": 1722.13, "word": " alternative", "probability": 0.9326171875}, {"start": 1722.13, "end": 1723.01, "word": " hypothesis.", "probability": 0.85302734375}, {"start": 1724.33, "end": 1724.73, "word": " That's", "probability": 0.8388671875}, {"start": 1724.73, "end": 1725.05, "word": " for", "probability": 0.921875}, {"start": 1725.05, "end": 1725.51, "word": " number", "probability": 0.8876953125}, {"start": 1725.51, "end": 1725.93, "word": " three.", "probability": 0.796875}, {"start": 1727.55, "end": 1727.79, "word": " The", "probability": 0.90087890625}, {"start": 1727.79, "end": 1728.05, "word": " other", "probability": 0.8955078125}, {"start": 1728.05, "end": 1728.51, "word": " part,", "probability": 0.9033203125}, {"start": 1730.19, "end": 1730.35, "word": " the", "probability": 0.5751953125}, {"start": 1730.35, "end": 1730.65, "word": " other", "probability": 0.90478515625}, {"start": 1730.65, "end": 1731.13, "word": " part.", "probability": 0.90966796875}, {"start": 1734.29, "end": 1734.81, "word": " A", "probability": 0.499267578125}, {"start": 1734.81, "end": 1735.51, "word": " statistician", "probability": 0.926513671875}, {"start": 1735.51, "end": 1736.13, "word": " selected", "probability": 0.87890625}, {"start": 1736.13, "end": 1736.59, "word": " a", "probability": 0.96630859375}, {"start": 1736.59, "end": 1736.85, "word": " sample", "probability": 0.91015625}, {"start": 1736.85, "end": 1737.03, "word": " of", "probability": 0.97021484375}, {"start": 1737.03, "end": 1737.69, "word": " 16", "probability": 0.85400390625}, {"start": 1737.69, "end": 1738.55, "word": " receivable", "probability": 0.943603515625}, {"start": 1738.55, "end": 1739.25, "word": " accounts.", "probability": 0.8779296875}, {"start": 1740.43, "end": 1740.65, "word": " He", "probability": 0.9580078125}, {"start": 1740.65, "end": 1741.07, "word": " reported", "probability": 0.8857421875}, {"start": 1741.07, "end": 1741.51, "word": " that", "probability": 0.91796875}, {"start": 1741.51, "end": 1742.35, "word": " the", "probability": 0.8720703125}, {"start": 1742.35, "end": 1742.75, "word": " sample", "probability": 0.79736328125}, {"start": 1742.75, "end": 1743.53, "word": " information", "probability": 0.84765625}], "temperature": 1.0}, {"id": 63, "seek": 176191, "start": 1744.69, "end": 1761.91, "text": " indicated the mean of the population ranges from these two values. So we have lower and upper limits, which are given by 4739.", "tokens": [16176, 264, 914, 295, 264, 4415, 22526, 490, 613, 732, 4190, 13, 407, 321, 362, 3126, 293, 6597, 10406, 11, 597, 366, 2212, 538, 16953, 12493, 13], "avg_logprob": -0.1862444132566452, "compression_ratio": 1.1869158878504673, "no_speech_prob": 0.0, "words": [{"start": 1744.69, "end": 1745.35, "word": " indicated", "probability": 0.34130859375}, {"start": 1745.35, "end": 1745.65, "word": " the", "probability": 0.7900390625}, {"start": 1745.65, "end": 1745.95, "word": " mean", "probability": 0.9052734375}, {"start": 1745.95, "end": 1746.27, "word": " of", "probability": 0.95263671875}, {"start": 1746.27, "end": 1746.41, "word": " the", "probability": 0.80126953125}, {"start": 1746.41, "end": 1746.81, "word": " population", "probability": 0.970703125}, {"start": 1746.81, "end": 1747.31, "word": " ranges", "probability": 0.916015625}, {"start": 1747.31, "end": 1747.79, "word": " from", "probability": 0.8935546875}, {"start": 1747.79, "end": 1749.01, "word": " these", "probability": 0.8837890625}, {"start": 1749.01, "end": 1749.23, "word": " two", "probability": 0.8369140625}, {"start": 1749.23, "end": 1749.69, "word": " values.", "probability": 0.96875}, {"start": 1751.11, "end": 1751.51, "word": " So", "probability": 0.916015625}, {"start": 1751.51, "end": 1751.63, "word": " we", "probability": 0.488525390625}, {"start": 1751.63, "end": 1751.75, "word": " have", "probability": 0.94873046875}, {"start": 1751.75, "end": 1752.07, "word": " lower", "probability": 0.92529296875}, {"start": 1752.07, "end": 1752.33, "word": " and", "probability": 0.94384765625}, {"start": 1752.33, "end": 1752.73, "word": " upper", "probability": 0.86474609375}, {"start": 1752.73, "end": 1754.19, "word": " limits,", "probability": 0.970703125}, {"start": 1754.67, "end": 1756.47, "word": " which", "probability": 0.9404296875}, {"start": 1756.47, "end": 1756.93, "word": " are", "probability": 0.94580078125}, {"start": 1756.93, "end": 1758.47, "word": " given", "probability": 0.89013671875}, {"start": 1758.47, "end": 1758.89, "word": " by", "probability": 0.97509765625}, {"start": 1758.89, "end": 1761.91, "word": " 4739.", "probability": 0.956298828125}], "temperature": 1.0}, {"id": 64, "seek": 179634, "start": 1776.5, "end": 1796.34, "text": " So the mean of the population ranges between these two values. And in addition to that, we have information about the sample standard deviation is 400.", "tokens": [407, 264, 914, 295, 264, 4415, 22526, 1296, 613, 732, 4190, 13, 400, 294, 4500, 281, 300, 11, 321, 362, 1589, 466, 264, 6889, 3832, 25163, 307, 8423, 13], "avg_logprob": -0.20937500248352686, "compression_ratio": 1.2991452991452992, "no_speech_prob": 0.0, "words": [{"start": 1776.5, "end": 1777.46, "word": " So", "probability": 0.492919921875}, {"start": 1777.46, "end": 1778.42, "word": " the", "probability": 0.492431640625}, {"start": 1778.42, "end": 1778.54, "word": " mean", "probability": 0.8828125}, {"start": 1778.54, "end": 1778.66, "word": " of", "probability": 0.95361328125}, {"start": 1778.66, "end": 1778.76, "word": " the", "probability": 0.8525390625}, {"start": 1778.76, "end": 1779.24, "word": " population", "probability": 0.9794921875}, {"start": 1779.24, "end": 1781.08, "word": " ranges", "probability": 0.88232421875}, {"start": 1781.08, "end": 1781.66, "word": " between", "probability": 0.87451171875}, {"start": 1781.66, "end": 1782.4, "word": " these", "probability": 0.87744140625}, {"start": 1782.4, "end": 1782.58, "word": " two", "probability": 0.89013671875}, {"start": 1782.58, "end": 1783.02, "word": " values.", "probability": 0.96630859375}, {"start": 1784.96, "end": 1785.92, "word": " And", "probability": 0.87646484375}, {"start": 1785.92, "end": 1786.7, "word": " in", "probability": 0.73681640625}, {"start": 1786.7, "end": 1787.0, "word": " addition", "probability": 0.94775390625}, {"start": 1787.0, "end": 1787.22, "word": " to", "probability": 0.96630859375}, {"start": 1787.22, "end": 1787.42, "word": " that,", "probability": 0.93994140625}, {"start": 1787.48, "end": 1787.58, "word": " we", "probability": 0.955078125}, {"start": 1787.58, "end": 1787.88, "word": " have", "probability": 0.9482421875}, {"start": 1787.88, "end": 1788.54, "word": " information", "probability": 0.83056640625}, {"start": 1788.54, "end": 1789.02, "word": " about", "probability": 0.89208984375}, {"start": 1789.02, "end": 1790.1, "word": " the", "probability": 0.861328125}, {"start": 1790.1, "end": 1790.44, "word": " sample", "probability": 0.87158203125}, {"start": 1790.44, "end": 1790.88, "word": " standard", "probability": 0.85791015625}, {"start": 1790.88, "end": 1791.38, "word": " deviation", "probability": 0.9423828125}, {"start": 1791.38, "end": 1795.92, "word": " is", "probability": 0.413330078125}, {"start": 1795.92, "end": 1796.34, "word": " 400.", "probability": 0.87109375}], "temperature": 1.0}, {"id": 65, "seek": 182838, "start": 1799.5, "end": 1828.38, "text": " The statistician neglected to report what confidence level he had used. So we don't know C level. So C level is unknown, which actually is 1 minus alpha. Based on the above information, what's the confidence level? So we are looking for C level.", "tokens": [440, 29588, 952, 32701, 281, 2275, 437, 6687, 1496, 415, 632, 1143, 13, 407, 321, 500, 380, 458, 383, 1496, 13, 407, 383, 1496, 307, 9841, 11, 597, 767, 307, 502, 3175, 8961, 13, 18785, 322, 264, 3673, 1589, 11, 437, 311, 264, 6687, 1496, 30, 407, 321, 366, 1237, 337, 383, 1496, 13], "avg_logprob": -0.17159091288393194, "compression_ratio": 1.50920245398773, "no_speech_prob": 0.0, "words": [{"start": 1799.5, "end": 1799.74, "word": " The", "probability": 0.337158203125}, {"start": 1799.74, "end": 1800.72, "word": " statistician", "probability": 0.904296875}, {"start": 1800.72, "end": 1801.4, "word": " neglected", "probability": 0.91015625}, {"start": 1801.4, "end": 1801.78, "word": " to", "probability": 0.94970703125}, {"start": 1801.78, "end": 1802.34, "word": " report", "probability": 0.953125}, {"start": 1802.34, "end": 1803.26, "word": " what", "probability": 0.9033203125}, {"start": 1803.26, "end": 1803.94, "word": " confidence", "probability": 0.97705078125}, {"start": 1803.94, "end": 1804.38, "word": " level", "probability": 0.95263671875}, {"start": 1804.38, "end": 1804.72, "word": " he", "probability": 0.95263671875}, {"start": 1804.72, "end": 1804.92, "word": " had", "probability": 0.87744140625}, {"start": 1804.92, "end": 1805.28, "word": " used.", "probability": 0.921875}, {"start": 1806.22, "end": 1806.64, "word": " So", "probability": 0.94189453125}, {"start": 1806.64, "end": 1806.8, "word": " we", "probability": 0.476806640625}, {"start": 1806.8, "end": 1807.02, "word": " don't", "probability": 0.87255859375}, {"start": 1807.02, "end": 1807.26, "word": " know", "probability": 0.89794921875}, {"start": 1807.26, "end": 1807.44, "word": " C", "probability": 0.82861328125}, {"start": 1807.44, "end": 1807.7, "word": " level.", "probability": 0.76220703125}, {"start": 1809.14, "end": 1809.46, "word": " So", "probability": 0.55615234375}, {"start": 1809.46, "end": 1809.66, "word": " C", "probability": 0.8095703125}, {"start": 1809.66, "end": 1810.04, "word": " level", "probability": 0.9091796875}, {"start": 1810.04, "end": 1812.52, "word": " is", "probability": 0.89453125}, {"start": 1812.52, "end": 1812.94, "word": " unknown,", "probability": 0.8095703125}, {"start": 1813.12, "end": 1813.2, "word": " which", "probability": 0.90771484375}, {"start": 1813.2, "end": 1813.74, "word": " actually", "probability": 0.81787109375}, {"start": 1813.74, "end": 1814.0, "word": " is", "probability": 0.9296875}, {"start": 1814.0, "end": 1814.18, "word": " 1", "probability": 0.55908203125}, {"start": 1814.18, "end": 1814.44, "word": " minus", "probability": 0.8916015625}, {"start": 1814.44, "end": 1814.76, "word": " alpha.", "probability": 0.89892578125}, {"start": 1820.98, "end": 1821.48, "word": " Based", "probability": 0.65576171875}, {"start": 1821.48, "end": 1823.3, "word": " on", "probability": 0.9482421875}, {"start": 1823.3, "end": 1823.56, "word": " the", "probability": 0.91259765625}, {"start": 1823.56, "end": 1823.9, "word": " above", "probability": 0.95751953125}, {"start": 1823.9, "end": 1824.5, "word": " information,", "probability": 0.8369140625}, {"start": 1824.84, "end": 1825.2, "word": " what's", "probability": 0.922607421875}, {"start": 1825.2, "end": 1825.36, "word": " the", "probability": 0.92236328125}, {"start": 1825.36, "end": 1825.86, "word": " confidence", "probability": 0.984375}, {"start": 1825.86, "end": 1826.2, "word": " level?", "probability": 0.9453125}, {"start": 1826.36, "end": 1826.52, "word": " So", "probability": 0.95361328125}, {"start": 1826.52, "end": 1826.64, "word": " we", "probability": 0.81103515625}, {"start": 1826.64, "end": 1826.66, "word": " are", "probability": 0.8115234375}, {"start": 1826.66, "end": 1826.96, "word": " looking", "probability": 0.9150390625}, {"start": 1826.96, "end": 1827.38, "word": " for", "probability": 0.95361328125}, {"start": 1827.38, "end": 1828.16, "word": " C", "probability": 0.97900390625}, {"start": 1828.16, "end": 1828.38, "word": " level.", "probability": 0.9345703125}], "temperature": 1.0}, {"id": 66, "seek": 185862, "start": 1829.38, "end": 1858.62, "text": " Now just keep in mind the confidence interval is given and we are looking for C level. So this area actually is alpha over 2 and other one is alpha over 2, so the area between is 1 minus alpha. Now since the sample size equal", "tokens": [823, 445, 1066, 294, 1575, 264, 6687, 15035, 307, 2212, 293, 321, 366, 1237, 337, 383, 1496, 13, 407, 341, 1859, 767, 307, 8961, 670, 568, 293, 661, 472, 307, 8961, 670, 568, 11, 370, 264, 1859, 1296, 307, 502, 3175, 8961, 13, 823, 1670, 264, 6889, 2744, 2681], "avg_logprob": -0.21749999850988389, "compression_ratio": 1.4580645161290322, "no_speech_prob": 0.0, "words": [{"start": 1829.38, "end": 1829.7, "word": " Now", "probability": 0.52392578125}, {"start": 1829.7, "end": 1830.14, "word": " just", "probability": 0.5224609375}, {"start": 1830.14, "end": 1830.86, "word": " keep", "probability": 0.90966796875}, {"start": 1830.86, "end": 1830.98, "word": " in", "probability": 0.94189453125}, {"start": 1830.98, "end": 1831.38, "word": " mind", "probability": 0.9033203125}, {"start": 1831.38, "end": 1832.92, "word": " the", "probability": 0.475341796875}, {"start": 1832.92, "end": 1833.42, "word": " confidence", "probability": 0.96826171875}, {"start": 1833.42, "end": 1833.9, "word": " interval", "probability": 0.97265625}, {"start": 1833.9, "end": 1834.16, "word": " is", "probability": 0.92236328125}, {"start": 1834.16, "end": 1834.38, "word": " given", "probability": 0.90380859375}, {"start": 1834.38, "end": 1836.12, "word": " and", "probability": 0.560546875}, {"start": 1836.12, "end": 1836.52, "word": " we", "probability": 0.94091796875}, {"start": 1836.52, "end": 1836.66, "word": " are", "probability": 0.91650390625}, {"start": 1836.66, "end": 1837.0, "word": " looking", "probability": 0.91455078125}, {"start": 1837.0, "end": 1837.46, "word": " for", "probability": 0.95458984375}, {"start": 1837.46, "end": 1837.84, "word": " C", "probability": 0.59375}, {"start": 1837.84, "end": 1838.2, "word": " level.", "probability": 0.7890625}, {"start": 1842.92, "end": 1843.56, "word": " So", "probability": 0.884765625}, {"start": 1843.56, "end": 1843.82, "word": " this", "probability": 0.7490234375}, {"start": 1843.82, "end": 1844.18, "word": " area", "probability": 0.8798828125}, {"start": 1844.18, "end": 1844.62, "word": " actually", "probability": 0.81396484375}, {"start": 1844.62, "end": 1844.88, "word": " is", "probability": 0.95556640625}, {"start": 1844.88, "end": 1845.1, "word": " alpha", "probability": 0.71923828125}, {"start": 1845.1, "end": 1845.38, "word": " over", "probability": 0.9033203125}, {"start": 1845.38, "end": 1845.7, "word": " 2", "probability": 0.73486328125}, {"start": 1845.7, "end": 1846.3, "word": " and", "probability": 0.86328125}, {"start": 1846.3, "end": 1846.6, "word": " other", "probability": 0.62255859375}, {"start": 1846.6, "end": 1846.8, "word": " one", "probability": 0.91455078125}, {"start": 1846.8, "end": 1846.96, "word": " is", "probability": 0.95458984375}, {"start": 1846.96, "end": 1847.18, "word": " alpha", "probability": 0.8984375}, {"start": 1847.18, "end": 1847.44, "word": " over", "probability": 0.93310546875}, {"start": 1847.44, "end": 1847.72, "word": " 2,", "probability": 0.97802734375}, {"start": 1847.9, "end": 1848.0, "word": " so", "probability": 0.92919921875}, {"start": 1848.0, "end": 1848.16, "word": " the", "probability": 0.91162109375}, {"start": 1848.16, "end": 1848.38, "word": " area", "probability": 0.87890625}, {"start": 1848.38, "end": 1848.8, "word": " between", "probability": 0.88818359375}, {"start": 1848.8, "end": 1849.78, "word": " is", "probability": 0.93212890625}, {"start": 1849.78, "end": 1849.94, "word": " 1", "probability": 0.7705078125}, {"start": 1849.94, "end": 1850.2, "word": " minus", "probability": 0.94873046875}, {"start": 1850.2, "end": 1850.44, "word": " alpha.", "probability": 0.93359375}, {"start": 1853.34, "end": 1853.98, "word": " Now", "probability": 0.935546875}, {"start": 1853.98, "end": 1854.32, "word": " since", "probability": 0.76318359375}, {"start": 1854.32, "end": 1854.54, "word": " the", "probability": 0.9208984375}, {"start": 1854.54, "end": 1854.8, "word": " sample", "probability": 0.90869140625}, {"start": 1854.8, "end": 1855.4, "word": " size", "probability": 0.884765625}, {"start": 1855.4, "end": 1858.62, "word": " equal", "probability": 0.48193359375}], "temperature": 1.0}, {"id": 67, "seek": 188813, "start": 1861.95, "end": 1888.13, "text": " 16, N equals 16, so N equals 16, so your confidence interval should be X bar plus or minus T, S over root N. Now, C level can be determined by T, and we know that this quantity,", "tokens": [3165, 11, 426, 6915, 3165, 11, 370, 426, 6915, 3165, 11, 370, 428, 6687, 15035, 820, 312, 1783, 2159, 1804, 420, 3175, 314, 11, 318, 670, 5593, 426, 13, 823, 11, 383, 1496, 393, 312, 9540, 538, 314, 11, 293, 321, 458, 300, 341, 11275, 11], "avg_logprob": -0.23836436867713928, "compression_ratio": 1.3383458646616542, "no_speech_prob": 0.0, "words": [{"start": 1861.95, "end": 1862.71, "word": " 16,", "probability": 0.337890625}, {"start": 1862.71, "end": 1863.47, "word": " N", "probability": 0.2249755859375}, {"start": 1863.47, "end": 1863.89, "word": " equals", "probability": 0.412353515625}, {"start": 1863.89, "end": 1865.53, "word": " 16,", "probability": 0.90478515625}, {"start": 1865.91, "end": 1866.07, "word": " so", "probability": 0.81787109375}, {"start": 1866.07, "end": 1866.35, "word": " N", "probability": 0.78955078125}, {"start": 1866.35, "end": 1867.97, "word": " equals", "probability": 0.892578125}, {"start": 1867.97, "end": 1868.51, "word": " 16,", "probability": 0.9423828125}, {"start": 1869.39, "end": 1869.81, "word": " so", "probability": 0.90087890625}, {"start": 1869.81, "end": 1870.01, "word": " your", "probability": 0.845703125}, {"start": 1870.01, "end": 1870.41, "word": " confidence", "probability": 0.96533203125}, {"start": 1870.41, "end": 1870.81, "word": " interval", "probability": 0.95166015625}, {"start": 1870.81, "end": 1871.11, "word": " should", "probability": 0.95361328125}, {"start": 1871.11, "end": 1871.27, "word": " be", "probability": 0.9541015625}, {"start": 1871.27, "end": 1871.49, "word": " X", "probability": 0.685546875}, {"start": 1871.49, "end": 1871.65, "word": " bar", "probability": 0.888671875}, {"start": 1871.65, "end": 1871.93, "word": " plus", "probability": 0.95654296875}, {"start": 1871.93, "end": 1872.17, "word": " or", "probability": 0.95361328125}, {"start": 1872.17, "end": 1872.49, "word": " minus", "probability": 0.98779296875}, {"start": 1872.49, "end": 1872.85, "word": " T,", "probability": 0.9619140625}, {"start": 1873.43, "end": 1873.83, "word": " S", "probability": 0.876953125}, {"start": 1873.83, "end": 1874.11, "word": " over", "probability": 0.92431640625}, {"start": 1874.11, "end": 1874.37, "word": " root", "probability": 0.9560546875}, {"start": 1874.37, "end": 1874.61, "word": " N.", "probability": 0.978515625}, {"start": 1879.35, "end": 1879.73, "word": " Now,", "probability": 0.93994140625}, {"start": 1880.07, "end": 1880.23, "word": " C", "probability": 0.91064453125}, {"start": 1880.23, "end": 1880.55, "word": " level", "probability": 0.74267578125}, {"start": 1880.55, "end": 1880.93, "word": " can", "probability": 0.94580078125}, {"start": 1880.93, "end": 1881.17, "word": " be", "probability": 0.9521484375}, {"start": 1881.17, "end": 1882.21, "word": " determined", "probability": 0.92236328125}, {"start": 1882.21, "end": 1882.79, "word": " by", "probability": 0.9736328125}, {"start": 1882.79, "end": 1883.77, "word": " T,", "probability": 0.98486328125}, {"start": 1885.33, "end": 1886.05, "word": " and", "probability": 0.9443359375}, {"start": 1886.05, "end": 1886.23, "word": " we", "probability": 0.953125}, {"start": 1886.23, "end": 1886.39, "word": " know", "probability": 0.88623046875}, {"start": 1886.39, "end": 1886.75, "word": " that", "probability": 0.9404296875}, {"start": 1886.75, "end": 1887.55, "word": " this", "probability": 0.9111328125}, {"start": 1887.55, "end": 1888.13, "word": " quantity,", "probability": 0.986328125}], "temperature": 1.0}, {"id": 68, "seek": 191819, "start": 1890.73, "end": 1918.19, "text": " represents the margin of error. So, E equals TS over root N. Now, since the confidence interval is given, we know from previous chapters that the margin equals the difference between upper and lower divided by two. So, half distance", "tokens": [8855, 264, 10270, 295, 6713, 13, 407, 11, 462, 6915, 37645, 670, 5593, 426, 13, 823, 11, 1670, 264, 6687, 15035, 307, 2212, 11, 321, 458, 490, 3894, 20013, 300, 264, 10270, 6915, 264, 2649, 1296, 6597, 293, 3126, 6666, 538, 732, 13, 407, 11, 1922, 4560], "avg_logprob": -0.25683592694501084, "compression_ratio": 1.4472049689440993, "no_speech_prob": 0.0, "words": [{"start": 1890.73, "end": 1891.53, "word": " represents", "probability": 0.364013671875}, {"start": 1891.53, "end": 1892.33, "word": " the", "probability": 0.8076171875}, {"start": 1892.33, "end": 1892.65, "word": " margin", "probability": 0.90478515625}, {"start": 1892.65, "end": 1892.85, "word": " of", "probability": 0.9501953125}, {"start": 1892.85, "end": 1893.03, "word": " error.", "probability": 0.86767578125}, {"start": 1894.71, "end": 1895.29, "word": " So,", "probability": 0.54296875}, {"start": 1895.47, "end": 1895.55, "word": " E", "probability": 0.84814453125}, {"start": 1895.55, "end": 1896.09, "word": " equals", "probability": 0.376220703125}, {"start": 1896.09, "end": 1896.97, "word": " TS", "probability": 0.308837890625}, {"start": 1896.97, "end": 1897.39, "word": " over", "probability": 0.8916015625}, {"start": 1897.39, "end": 1897.67, "word": " root", "probability": 0.875}, {"start": 1897.67, "end": 1897.89, "word": " N.", "probability": 0.82568359375}, {"start": 1899.09, "end": 1899.33, "word": " Now,", "probability": 0.82080078125}, {"start": 1899.43, "end": 1899.63, "word": " since", "probability": 0.84765625}, {"start": 1899.63, "end": 1899.83, "word": " the", "probability": 0.900390625}, {"start": 1899.83, "end": 1900.17, "word": " confidence", "probability": 0.97802734375}, {"start": 1900.17, "end": 1900.57, "word": " interval", "probability": 0.5341796875}, {"start": 1900.57, "end": 1902.95, "word": " is", "probability": 0.91650390625}, {"start": 1902.95, "end": 1903.23, "word": " given,", "probability": 0.90478515625}, {"start": 1904.57, "end": 1904.79, "word": " we", "probability": 0.9189453125}, {"start": 1904.79, "end": 1905.11, "word": " know", "probability": 0.8818359375}, {"start": 1905.11, "end": 1905.53, "word": " from", "probability": 0.8740234375}, {"start": 1905.53, "end": 1906.55, "word": " previous", "probability": 0.78515625}, {"start": 1906.55, "end": 1908.37, "word": " chapters", "probability": 0.91162109375}, {"start": 1908.37, "end": 1908.79, "word": " that", "probability": 0.89453125}, {"start": 1908.79, "end": 1910.27, "word": " the", "probability": 0.818359375}, {"start": 1910.27, "end": 1910.63, "word": " margin", "probability": 0.94677734375}, {"start": 1910.63, "end": 1911.09, "word": " equals", "probability": 0.92529296875}, {"start": 1911.09, "end": 1911.39, "word": " the", "probability": 0.89404296875}, {"start": 1911.39, "end": 1911.87, "word": " difference", "probability": 0.833984375}, {"start": 1911.87, "end": 1912.47, "word": " between", "probability": 0.87890625}, {"start": 1912.47, "end": 1913.31, "word": " upper", "probability": 0.66357421875}, {"start": 1913.31, "end": 1913.97, "word": " and", "probability": 0.94921875}, {"start": 1913.97, "end": 1914.35, "word": " lower", "probability": 0.86083984375}, {"start": 1914.35, "end": 1915.39, "word": " divided", "probability": 0.6962890625}, {"start": 1915.39, "end": 1915.59, "word": " by", "probability": 0.97265625}, {"start": 1915.59, "end": 1915.87, "word": " two.", "probability": 0.4921875}, {"start": 1917.13, "end": 1917.37, "word": " So,", "probability": 0.93505859375}, {"start": 1917.45, "end": 1917.65, "word": " half", "probability": 0.7138671875}, {"start": 1917.65, "end": 1918.19, "word": " distance", "probability": 0.74462890625}], "temperature": 1.0}, {"id": 69, "seek": 194432, "start": 1918.94, "end": 1944.32, "text": " of lower and upper gives the margin. So that will give 260.2. So that's E. So now E is known to be 260.2 equals to S is given by 400 and N is 16.", "tokens": [295, 3126, 293, 6597, 2709, 264, 10270, 13, 407, 300, 486, 976, 44624, 13, 17, 13, 407, 300, 311, 462, 13, 407, 586, 462, 307, 2570, 281, 312, 44624, 13, 17, 6915, 281, 318, 307, 2212, 538, 8423, 293, 426, 307, 3165, 13], "avg_logprob": -0.2299360836094076, "compression_ratio": 1.2695652173913043, "no_speech_prob": 0.0, "words": [{"start": 1918.94, "end": 1919.3, "word": " of", "probability": 0.30126953125}, {"start": 1919.3, "end": 1919.56, "word": " lower", "probability": 0.837890625}, {"start": 1919.56, "end": 1919.74, "word": " and", "probability": 0.88232421875}, {"start": 1919.74, "end": 1919.96, "word": " upper", "probability": 0.77392578125}, {"start": 1919.96, "end": 1920.3, "word": " gives", "probability": 0.828125}, {"start": 1920.3, "end": 1920.74, "word": " the", "probability": 0.87744140625}, {"start": 1920.74, "end": 1921.32, "word": " margin.", "probability": 0.401611328125}, {"start": 1923.0, "end": 1923.9, "word": " So", "probability": 0.85498046875}, {"start": 1923.9, "end": 1924.14, "word": " that", "probability": 0.81787109375}, {"start": 1924.14, "end": 1924.32, "word": " will", "probability": 0.86572265625}, {"start": 1924.32, "end": 1924.62, "word": " give", "probability": 0.857421875}, {"start": 1924.62, "end": 1926.32, "word": " 260", "probability": 0.80908203125}, {"start": 1926.32, "end": 1926.9, "word": ".2.", "probability": 0.9404296875}, {"start": 1927.82, "end": 1928.14, "word": " So", "probability": 0.9033203125}, {"start": 1928.14, "end": 1928.9, "word": " that's", "probability": 0.90966796875}, {"start": 1928.9, "end": 1929.38, "word": " E.", "probability": 0.8056640625}, {"start": 1931.28, "end": 1932.18, "word": " So", "probability": 0.87939453125}, {"start": 1932.18, "end": 1932.46, "word": " now", "probability": 0.869140625}, {"start": 1932.46, "end": 1933.86, "word": " E", "probability": 0.59326171875}, {"start": 1933.86, "end": 1934.06, "word": " is", "probability": 0.94091796875}, {"start": 1934.06, "end": 1934.32, "word": " known", "probability": 0.73291015625}, {"start": 1934.32, "end": 1936.3, "word": " to", "probability": 0.9453125}, {"start": 1936.3, "end": 1936.44, "word": " be", "probability": 0.95556640625}, {"start": 1936.44, "end": 1936.98, "word": " 260", "probability": 0.85009765625}, {"start": 1936.98, "end": 1937.62, "word": ".2", "probability": 0.99609375}, {"start": 1937.62, "end": 1938.34, "word": " equals", "probability": 0.74755859375}, {"start": 1938.34, "end": 1938.62, "word": " to", "probability": 0.62158203125}, {"start": 1938.62, "end": 1939.94, "word": " S", "probability": 0.64404296875}, {"start": 1939.94, "end": 1940.26, "word": " is", "probability": 0.7626953125}, {"start": 1940.26, "end": 1940.54, "word": " given", "probability": 0.8984375}, {"start": 1940.54, "end": 1940.78, "word": " by", "probability": 0.9677734375}, {"start": 1940.78, "end": 1941.34, "word": " 400", "probability": 0.95703125}, {"start": 1941.34, "end": 1943.36, "word": " and", "probability": 0.580078125}, {"start": 1943.36, "end": 1943.62, "word": " N", "probability": 0.85986328125}, {"start": 1943.62, "end": 1943.78, "word": " is", "probability": 0.9404296875}, {"start": 1943.78, "end": 1944.32, "word": " 16.", "probability": 0.943359375}], "temperature": 1.0}, {"id": 70, "seek": 197416, "start": 1946.8, "end": 1974.16, "text": " Now, simple calculation will give the value of T, which is the critical value. So, my T equals 2.60. Actually, this is T alpha over 2. Now, the value of the critical value is known to be 2.602. What's the corresponding alpha over 2?", "tokens": [823, 11, 2199, 17108, 486, 976, 264, 2158, 295, 314, 11, 597, 307, 264, 4924, 2158, 13, 407, 11, 452, 314, 6915, 568, 13, 4550, 13, 5135, 11, 341, 307, 314, 8961, 670, 568, 13, 823, 11, 264, 2158, 295, 264, 4924, 2158, 307, 2570, 281, 312, 568, 13, 4550, 17, 13, 708, 311, 264, 11760, 8961, 670, 568, 30], "avg_logprob": -0.19159835381586043, "compression_ratio": 1.4935897435897436, "no_speech_prob": 0.0, "words": [{"start": 1946.8, "end": 1947.08, "word": " Now,", "probability": 0.6728515625}, {"start": 1947.16, "end": 1947.36, "word": " simple", "probability": 0.595703125}, {"start": 1947.36, "end": 1947.96, "word": " calculation", "probability": 0.890625}, {"start": 1947.96, "end": 1948.22, "word": " will", "probability": 0.8603515625}, {"start": 1948.22, "end": 1948.5, "word": " give", "probability": 0.8828125}, {"start": 1948.5, "end": 1948.7, "word": " the", "probability": 0.83203125}, {"start": 1948.7, "end": 1949.0, "word": " value", "probability": 0.98291015625}, {"start": 1949.0, "end": 1949.24, "word": " of", "probability": 0.96728515625}, {"start": 1949.24, "end": 1949.42, "word": " T,", "probability": 0.71875}, {"start": 1950.06, "end": 1950.38, "word": " which", "probability": 0.62890625}, {"start": 1950.38, "end": 1950.46, "word": " is", "probability": 0.9560546875}, {"start": 1950.46, "end": 1950.6, "word": " the", "probability": 0.86328125}, {"start": 1950.6, "end": 1950.9, "word": " critical", "probability": 0.927734375}, {"start": 1950.9, "end": 1951.34, "word": " value.", "probability": 0.9794921875}, {"start": 1955.28, "end": 1955.84, "word": " So,", "probability": 0.85693359375}, {"start": 1955.92, "end": 1956.08, "word": " my", "probability": 0.90771484375}, {"start": 1956.08, "end": 1956.28, "word": " T", "probability": 0.94970703125}, {"start": 1956.28, "end": 1956.8, "word": " equals", "probability": 0.6416015625}, {"start": 1956.8, "end": 1957.06, "word": " 2", "probability": 0.89794921875}, {"start": 1957.06, "end": 1958.16, "word": ".60.", "probability": 0.984619140625}, {"start": 1961.96, "end": 1962.52, "word": " Actually,", "probability": 0.8544921875}, {"start": 1962.76, "end": 1963.04, "word": " this", "probability": 0.489013671875}, {"start": 1963.04, "end": 1963.18, "word": " is", "probability": 0.91796875}, {"start": 1963.18, "end": 1963.36, "word": " T", "probability": 0.88037109375}, {"start": 1963.36, "end": 1963.6, "word": " alpha", "probability": 0.469970703125}, {"start": 1963.6, "end": 1963.9, "word": " over", "probability": 0.91162109375}, {"start": 1963.9, "end": 1964.18, "word": " 2.", "probability": 0.8232421875}, {"start": 1966.22, "end": 1966.68, "word": " Now,", "probability": 0.94140625}, {"start": 1966.78, "end": 1966.9, "word": " the", "probability": 0.826171875}, {"start": 1966.9, "end": 1967.22, "word": " value", "probability": 0.8125}, {"start": 1967.22, "end": 1967.46, "word": " of", "probability": 0.96044921875}, {"start": 1967.46, "end": 1967.62, "word": " the", "probability": 0.86767578125}, {"start": 1967.62, "end": 1967.96, "word": " critical", "probability": 0.8515625}, {"start": 1967.96, "end": 1969.18, "word": " value", "probability": 0.497314453125}, {"start": 1969.18, "end": 1969.38, "word": " is", "probability": 0.7939453125}, {"start": 1969.38, "end": 1969.66, "word": " known", "probability": 0.72998046875}, {"start": 1969.66, "end": 1969.86, "word": " to", "probability": 0.9599609375}, {"start": 1969.86, "end": 1970.02, "word": " be", "probability": 0.94775390625}, {"start": 1970.02, "end": 1970.16, "word": " 2", "probability": 0.99365234375}, {"start": 1970.16, "end": 1971.16, "word": ".602.", "probability": 0.9794921875}, {"start": 1971.98, "end": 1972.4, "word": " What's", "probability": 0.8720703125}, {"start": 1972.4, "end": 1972.6, "word": " the", "probability": 0.9208984375}, {"start": 1972.6, "end": 1973.1, "word": " corresponding", "probability": 0.78466796875}, {"start": 1973.1, "end": 1973.62, "word": " alpha", "probability": 0.8798828125}, {"start": 1973.62, "end": 1973.94, "word": " over", "probability": 0.91552734375}, {"start": 1973.94, "end": 1974.16, "word": " 2?", "probability": 0.90625}], "temperature": 1.0}, {"id": 71, "seek": 200194, "start": 1975.64, "end": 2001.94, "text": " Now look at the table, at 15 degrees of freedom, look at 15, at this value 2.602, at this value. So, 15 degrees of freedom, 2.602, so the corresponding alpha over 2, not alpha.", "tokens": [823, 574, 412, 264, 3199, 11, 412, 2119, 5310, 295, 5645, 11, 574, 412, 2119, 11, 412, 341, 2158, 568, 13, 4550, 17, 11, 412, 341, 2158, 13, 407, 11, 2119, 5310, 295, 5645, 11, 568, 13, 4550, 17, 11, 370, 264, 11760, 8961, 670, 568, 11, 406, 8961, 13], "avg_logprob": -0.22993260154537126, "compression_ratio": 1.5258620689655173, "no_speech_prob": 0.0, "words": [{"start": 1975.64, "end": 1975.94, "word": " Now", "probability": 0.85498046875}, {"start": 1975.94, "end": 1976.2, "word": " look", "probability": 0.66259765625}, {"start": 1976.2, "end": 1976.38, "word": " at", "probability": 0.96484375}, {"start": 1976.38, "end": 1976.52, "word": " the", "probability": 0.81982421875}, {"start": 1976.52, "end": 1976.86, "word": " table,", "probability": 0.8505859375}, {"start": 1977.52, "end": 1977.76, "word": " at", "probability": 0.767578125}, {"start": 1977.76, "end": 1978.8, "word": " 15", "probability": 0.91162109375}, {"start": 1978.8, "end": 1979.1, "word": " degrees", "probability": 0.90185546875}, {"start": 1979.1, "end": 1979.3, "word": " of", "probability": 0.958984375}, {"start": 1979.3, "end": 1979.66, "word": " freedom,", "probability": 0.9404296875}, {"start": 1982.72, "end": 1983.84, "word": " look", "probability": 0.75537109375}, {"start": 1983.84, "end": 1984.04, "word": " at", "probability": 0.96484375}, {"start": 1984.04, "end": 1984.52, "word": " 15,", "probability": 0.896484375}, {"start": 1985.4, "end": 1987.44, "word": " at", "probability": 0.88916015625}, {"start": 1987.44, "end": 1987.74, "word": " this", "probability": 0.9443359375}, {"start": 1987.74, "end": 1988.0, "word": " value", "probability": 0.97412109375}, {"start": 1988.0, "end": 1988.22, "word": " 2", "probability": 0.650390625}, {"start": 1988.22, "end": 1989.62, "word": ".602,", "probability": 0.9630533854166666}, {"start": 1989.92, "end": 1990.18, "word": " at", "probability": 0.794921875}, {"start": 1990.18, "end": 1990.38, "word": " this", "probability": 0.94775390625}, {"start": 1990.38, "end": 1990.68, "word": " value.", "probability": 0.97265625}, {"start": 1992.64, "end": 1992.98, "word": " So,", "probability": 0.90283203125}, {"start": 1993.24, "end": 1993.84, "word": " 15", "probability": 0.92138671875}, {"start": 1993.84, "end": 1994.1, "word": " degrees", "probability": 0.94140625}, {"start": 1994.1, "end": 1994.3, "word": " of", "probability": 0.96630859375}, {"start": 1994.3, "end": 1994.6, "word": " freedom,", "probability": 0.9365234375}, {"start": 1997.0, "end": 1997.7, "word": " 2", "probability": 0.9736328125}, {"start": 1997.7, "end": 1998.72, "word": ".602,", "probability": 0.97412109375}, {"start": 1999.44, "end": 1999.74, "word": " so", "probability": 0.89404296875}, {"start": 1999.74, "end": 1999.88, "word": " the", "probability": 0.7646484375}, {"start": 1999.88, "end": 2000.38, "word": " corresponding", "probability": 0.70849609375}, {"start": 2000.38, "end": 2000.94, "word": " alpha", "probability": 0.406982421875}, {"start": 2000.94, "end": 2001.2, "word": " over", "probability": 0.876953125}, {"start": 2001.2, "end": 2001.42, "word": " 2,", "probability": 0.8212890625}, {"start": 2001.44, "end": 2001.58, "word": " not", "probability": 0.81396484375}, {"start": 2001.58, "end": 2001.94, "word": " alpha.", "probability": 0.8837890625}], "temperature": 1.0}, {"id": 72, "seek": 203393, "start": 2004.61, "end": 2033.93, "text": " it's 1% so my alpha over 2 is 1% so alpha is 2% so the confidence level is 1 minus alpha so 1 minus alpha is 90% so c level is 98% so that's", "tokens": [309, 311, 502, 4, 370, 452, 8961, 670, 568, 307, 502, 4, 370, 8961, 307, 568, 4, 370, 264, 6687, 1496, 307, 502, 3175, 8961, 370, 502, 3175, 8961, 307, 4289, 4, 370, 269, 1496, 307, 20860, 4, 370, 300, 311], "avg_logprob": -0.20331101438828877, "compression_ratio": 1.4842105263157894, "no_speech_prob": 0.0, "words": [{"start": 2004.61, "end": 2005.01, "word": " it's", "probability": 0.531005859375}, {"start": 2005.01, "end": 2005.29, "word": " 1", "probability": 0.6328125}, {"start": 2005.29, "end": 2005.69, "word": "%", "probability": 0.395263671875}, {"start": 2005.69, "end": 2006.99, "word": " so", "probability": 0.68896484375}, {"start": 2006.99, "end": 2007.23, "word": " my", "probability": 0.91357421875}, {"start": 2007.23, "end": 2007.59, "word": " alpha", "probability": 0.66748046875}, {"start": 2007.59, "end": 2007.83, "word": " over", "probability": 0.89208984375}, {"start": 2007.83, "end": 2008.21, "word": " 2", "probability": 0.80615234375}, {"start": 2008.21, "end": 2011.83, "word": " is", "probability": 0.814453125}, {"start": 2011.83, "end": 2012.03, "word": " 1", "probability": 0.91650390625}, {"start": 2012.03, "end": 2012.37, "word": "%", "probability": 0.83984375}, {"start": 2012.37, "end": 2014.53, "word": " so", "probability": 0.87548828125}, {"start": 2014.53, "end": 2014.95, "word": " alpha", "probability": 0.87890625}, {"start": 2014.95, "end": 2015.39, "word": " is", "probability": 0.955078125}, {"start": 2015.39, "end": 2017.61, "word": " 2", "probability": 0.96484375}, {"start": 2017.61, "end": 2018.09, "word": "%", "probability": 0.88525390625}, {"start": 2018.09, "end": 2020.55, "word": " so", "probability": 0.91015625}, {"start": 2020.55, "end": 2020.73, "word": " the", "probability": 0.900390625}, {"start": 2020.73, "end": 2021.33, "word": " confidence", "probability": 0.98486328125}, {"start": 2021.33, "end": 2022.57, "word": " level", "probability": 0.943359375}, {"start": 2022.57, "end": 2022.89, "word": " is", "probability": 0.95068359375}, {"start": 2022.89, "end": 2023.11, "word": " 1", "probability": 0.8544921875}, {"start": 2023.11, "end": 2023.41, "word": " minus", "probability": 0.759765625}, {"start": 2023.41, "end": 2023.85, "word": " alpha", "probability": 0.90625}, {"start": 2023.85, "end": 2024.61, "word": " so", "probability": 0.63720703125}, {"start": 2024.61, "end": 2025.07, "word": " 1", "probability": 0.8837890625}, {"start": 2025.07, "end": 2025.39, "word": " minus", "probability": 0.9599609375}, {"start": 2025.39, "end": 2025.83, "word": " alpha", "probability": 0.912109375}, {"start": 2025.83, "end": 2027.75, "word": " is", "probability": 0.92626953125}, {"start": 2027.75, "end": 2028.11, "word": " 90", "probability": 0.97265625}, {"start": 2028.11, "end": 2028.55, "word": "%", "probability": 0.8701171875}, {"start": 2028.55, "end": 2029.81, "word": " so", "probability": 0.921875}, {"start": 2029.81, "end": 2030.01, "word": " c", "probability": 0.54931640625}, {"start": 2030.01, "end": 2030.27, "word": " level", "probability": 0.8564453125}, {"start": 2030.27, "end": 2030.51, "word": " is", "probability": 0.9443359375}, {"start": 2030.51, "end": 2030.87, "word": " 98", "probability": 0.97705078125}, {"start": 2030.87, "end": 2031.27, "word": "%", "probability": 0.9150390625}, {"start": 2031.27, "end": 2033.49, "word": " so", "probability": 0.92236328125}, {"start": 2033.49, "end": 2033.93, "word": " that's", "probability": 0.937255859375}], "temperature": 1.0}, {"id": 73, "seek": 206425, "start": 2036.43, "end": 2064.25, "text": " level or the confidence level. So again, maybe this is a tricky question. But at least you know that if the confidence interval is given, you can determine the margin of error by the difference between lower and upper divided by two. Then we know this term represents this margin.", "tokens": [1496, 420, 264, 6687, 1496, 13, 407, 797, 11, 1310, 341, 307, 257, 12414, 1168, 13, 583, 412, 1935, 291, 458, 300, 498, 264, 6687, 15035, 307, 2212, 11, 291, 393, 6997, 264, 10270, 295, 6713, 538, 264, 2649, 1296, 3126, 293, 6597, 6666, 538, 732, 13, 1396, 321, 458, 341, 1433, 8855, 341, 10270, 13], "avg_logprob": -0.22587719507384718, "compression_ratio": 1.5611111111111111, "no_speech_prob": 0.0, "words": [{"start": 2036.4299999999998, "end": 2037.11, "word": " level", "probability": 0.052490234375}, {"start": 2037.11, "end": 2037.41, "word": " or", "probability": 0.62548828125}, {"start": 2037.41, "end": 2037.51, "word": " the", "probability": 0.716796875}, {"start": 2037.51, "end": 2037.81, "word": " confidence", "probability": 0.93212890625}, {"start": 2037.81, "end": 2038.21, "word": " level.", "probability": 0.8681640625}, {"start": 2039.21, "end": 2039.41, "word": " So", "probability": 0.81884765625}, {"start": 2039.41, "end": 2039.79, "word": " again,", "probability": 0.775390625}, {"start": 2041.27, "end": 2042.73, "word": " maybe", "probability": 0.9091796875}, {"start": 2042.73, "end": 2042.99, "word": " this", "probability": 0.9443359375}, {"start": 2042.99, "end": 2043.13, "word": " is", "probability": 0.9365234375}, {"start": 2043.13, "end": 2043.21, "word": " a", "probability": 0.52001953125}, {"start": 2043.21, "end": 2043.43, "word": " tricky", "probability": 0.89208984375}, {"start": 2043.43, "end": 2043.99, "word": " question.", "probability": 0.9169921875}, {"start": 2047.33, "end": 2047.85, "word": " But", "probability": 0.90185546875}, {"start": 2047.85, "end": 2048.05, "word": " at", "probability": 0.9150390625}, {"start": 2048.05, "end": 2048.29, "word": " least", "probability": 0.95556640625}, {"start": 2048.29, "end": 2048.47, "word": " you", "probability": 0.86669921875}, {"start": 2048.47, "end": 2048.61, "word": " know", "probability": 0.88134765625}, {"start": 2048.61, "end": 2049.01, "word": " that", "probability": 0.9169921875}, {"start": 2049.01, "end": 2049.99, "word": " if", "probability": 0.806640625}, {"start": 2049.99, "end": 2050.17, "word": " the", "probability": 0.90869140625}, {"start": 2050.17, "end": 2050.53, "word": " confidence", "probability": 0.984375}, {"start": 2050.53, "end": 2051.03, "word": " interval", "probability": 0.98095703125}, {"start": 2051.03, "end": 2051.31, "word": " is", "probability": 0.947265625}, {"start": 2051.31, "end": 2051.59, "word": " given,", "probability": 0.90771484375}, {"start": 2052.53, "end": 2052.73, "word": " you", "probability": 0.95068359375}, {"start": 2052.73, "end": 2052.97, "word": " can", "probability": 0.94580078125}, {"start": 2052.97, "end": 2053.59, "word": " determine", "probability": 0.908203125}, {"start": 2053.59, "end": 2054.73, "word": " the", "probability": 0.89794921875}, {"start": 2054.73, "end": 2055.09, "word": " margin", "probability": 0.95654296875}, {"start": 2055.09, "end": 2055.27, "word": " of", "probability": 0.96630859375}, {"start": 2055.27, "end": 2055.53, "word": " error", "probability": 0.87646484375}, {"start": 2055.53, "end": 2057.19, "word": " by", "probability": 0.8984375}, {"start": 2057.19, "end": 2057.51, "word": " the", "probability": 0.9140625}, {"start": 2057.51, "end": 2057.91, "word": " difference", "probability": 0.841796875}, {"start": 2057.91, "end": 2058.27, "word": " between", "probability": 0.8623046875}, {"start": 2058.27, "end": 2058.53, "word": " lower", "probability": 0.8720703125}, {"start": 2058.53, "end": 2058.71, "word": " and", "probability": 0.9482421875}, {"start": 2058.71, "end": 2058.93, "word": " upper", "probability": 0.83251953125}, {"start": 2058.93, "end": 2059.23, "word": " divided", "probability": 0.736328125}, {"start": 2059.23, "end": 2059.39, "word": " by", "probability": 0.97216796875}, {"start": 2059.39, "end": 2059.69, "word": " two.", "probability": 0.54345703125}, {"start": 2060.91, "end": 2061.33, "word": " Then", "probability": 0.8466796875}, {"start": 2061.33, "end": 2061.63, "word": " we", "probability": 0.78662109375}, {"start": 2061.63, "end": 2061.89, "word": " know", "probability": 0.88623046875}, {"start": 2061.89, "end": 2062.39, "word": " this", "probability": 0.9130859375}, {"start": 2062.39, "end": 2062.83, "word": " term", "probability": 0.9423828125}, {"start": 2062.83, "end": 2063.31, "word": " represents", "probability": 0.68115234375}, {"start": 2063.31, "end": 2063.81, "word": " this", "probability": 0.94677734375}, {"start": 2063.81, "end": 2064.25, "word": " margin.", "probability": 0.95166015625}], "temperature": 1.0}, {"id": 74, "seek": 209329, "start": 2065.35, "end": 2093.29, "text": " So by using this equation, we can compute the value of T, I mean the critical value. So since the critical value is given or is computed, we can determine the corresponding alpha over 2. So alpha over 2 is 1%. So your alpha is 2%. So my C level is 98%. That's all. Any questions?", "tokens": [407, 538, 1228, 341, 5367, 11, 321, 393, 14722, 264, 2158, 295, 314, 11, 286, 914, 264, 4924, 2158, 13, 407, 1670, 264, 4924, 2158, 307, 2212, 420, 307, 40610, 11, 321, 393, 6997, 264, 11760, 8961, 670, 568, 13, 407, 8961, 670, 568, 307, 502, 6856, 407, 428, 8961, 307, 568, 6856, 407, 452, 383, 1496, 307, 20860, 6856, 663, 311, 439, 13, 2639, 1651, 30], "avg_logprob": -0.13074448814286904, "compression_ratio": 1.5819209039548023, "no_speech_prob": 0.0, "words": [{"start": 2065.35, "end": 2065.61, "word": " So", "probability": 0.8505859375}, {"start": 2065.61, "end": 2065.77, "word": " by", "probability": 0.73876953125}, {"start": 2065.77, "end": 2066.01, "word": " using", "probability": 0.93115234375}, {"start": 2066.01, "end": 2066.27, "word": " this", "probability": 0.94140625}, {"start": 2066.27, "end": 2066.67, "word": " equation,", "probability": 0.96337890625}, {"start": 2066.79, "end": 2066.89, "word": " we", "probability": 0.9345703125}, {"start": 2066.89, "end": 2067.15, "word": " can", "probability": 0.93798828125}, {"start": 2067.15, "end": 2067.53, "word": " compute", "probability": 0.93115234375}, {"start": 2067.53, "end": 2067.69, "word": " the", "probability": 0.91064453125}, {"start": 2067.69, "end": 2067.93, "word": " value", "probability": 0.98095703125}, {"start": 2067.93, "end": 2068.11, "word": " of", "probability": 0.95654296875}, {"start": 2068.11, "end": 2068.33, "word": " T,", "probability": 0.77978515625}, {"start": 2068.75, "end": 2068.87, "word": " I", "probability": 0.95654296875}, {"start": 2068.87, "end": 2068.97, "word": " mean", "probability": 0.96533203125}, {"start": 2068.97, "end": 2069.09, "word": " the", "probability": 0.73876953125}, {"start": 2069.09, "end": 2069.37, "word": " critical", "probability": 0.93359375}, {"start": 2069.37, "end": 2069.77, "word": " value.", "probability": 0.97216796875}, {"start": 2070.67, "end": 2070.99, "word": " So", "probability": 0.951171875}, {"start": 2070.99, "end": 2071.45, "word": " since", "probability": 0.748046875}, {"start": 2071.45, "end": 2072.11, "word": " the", "probability": 0.85791015625}, {"start": 2072.11, "end": 2072.51, "word": " critical", "probability": 0.94189453125}, {"start": 2072.51, "end": 2072.93, "word": " value", "probability": 0.96826171875}, {"start": 2072.93, "end": 2073.15, "word": " is", "probability": 0.9072265625}, {"start": 2073.15, "end": 2073.51, "word": " given", "probability": 0.8974609375}, {"start": 2073.51, "end": 2075.11, "word": " or", "probability": 0.51708984375}, {"start": 2075.11, "end": 2075.29, "word": " is", "probability": 0.845703125}, {"start": 2075.29, "end": 2075.67, "word": " computed,", "probability": 0.92138671875}, {"start": 2076.19, "end": 2076.55, "word": " we", "probability": 0.9580078125}, {"start": 2076.55, "end": 2076.79, "word": " can", "probability": 0.94482421875}, {"start": 2076.79, "end": 2077.33, "word": " determine", "probability": 0.9169921875}, {"start": 2077.33, "end": 2077.63, "word": " the", "probability": 0.92236328125}, {"start": 2077.63, "end": 2078.25, "word": " corresponding", "probability": 0.85009765625}, {"start": 2078.25, "end": 2078.59, "word": " alpha", "probability": 0.8125}, {"start": 2078.59, "end": 2078.85, "word": " over", "probability": 0.8935546875}, {"start": 2078.85, "end": 2079.11, "word": " 2.", "probability": 0.654296875}, {"start": 2080.43, "end": 2080.99, "word": " So", "probability": 0.96240234375}, {"start": 2080.99, "end": 2081.29, "word": " alpha", "probability": 0.88916015625}, {"start": 2081.29, "end": 2081.51, "word": " over", "probability": 0.919921875}, {"start": 2081.51, "end": 2081.69, "word": " 2", "probability": 0.95556640625}, {"start": 2081.69, "end": 2081.95, "word": " is", "probability": 0.94970703125}, {"start": 2081.95, "end": 2083.05, "word": " 1%.", "probability": 0.6932373046875}, {"start": 2083.05, "end": 2084.47, "word": " So", "probability": 0.95947265625}, {"start": 2084.47, "end": 2084.71, "word": " your", "probability": 0.86669921875}, {"start": 2084.71, "end": 2085.07, "word": " alpha", "probability": 0.91552734375}, {"start": 2085.07, "end": 2085.39, "word": " is", "probability": 0.94775390625}, {"start": 2085.39, "end": 2086.13, "word": " 2%.", "probability": 0.84326171875}, {"start": 2086.13, "end": 2086.85, "word": " So", "probability": 0.935546875}, {"start": 2086.85, "end": 2087.05, "word": " my", "probability": 0.95849609375}, {"start": 2087.05, "end": 2087.21, "word": " C", "probability": 0.79541015625}, {"start": 2087.21, "end": 2087.43, "word": " level", "probability": 0.8779296875}, {"start": 2087.43, "end": 2087.63, "word": " is", "probability": 0.9453125}, {"start": 2087.63, "end": 2088.33, "word": " 98%.", "probability": 0.970458984375}, {"start": 2088.33, "end": 2091.71, "word": " That's", "probability": 0.9287109375}, {"start": 2091.71, "end": 2092.21, "word": " all.", "probability": 0.92431640625}, {"start": 2092.53, "end": 2092.85, "word": " Any", "probability": 0.9208984375}, {"start": 2092.85, "end": 2093.29, "word": " questions?", "probability": 0.95751953125}], "temperature": 1.0}, {"id": 75, "seek": 209617, "start": 2095.32, "end": 2096.18, "text": " We're done, Muhammad.", "tokens": [492, 434, 1096, 11, 19360, 13], "avg_logprob": -0.5438058206013271, "compression_ratio": 0.7333333333333333, "no_speech_prob": 0.0, "words": [{"start": 2095.32, "end": 2095.54, "word": " We're", "probability": 0.56640625}, {"start": 2095.54, "end": 2095.78, "word": " done,", "probability": 0.80712890625}, {"start": 2095.9, "end": 2096.18, "word": " Muhammad.", "probability": 0.1964111328125}], "temperature": 1.0}], "language": "en", "language_probability": 1.0, "duration": 2099.1535, "duration_after_vad": 1962.1215937499935} \ No newline at end of file diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/WhD1LW5lZxc.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/WhD1LW5lZxc.srt new file mode 100644 index 0000000000000000000000000000000000000000..85bb20316ba90b21dd642a3a219af5f595c6b32b --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/WhD1LW5lZxc.srt @@ -0,0 +1,2638 @@ +1 +00:00:06,000 --> 00:00:10,400 +Today, I will start chapter 10. + +2 +00:00:12,160 --> 00:00:16,940 +Chapter 10 talks about two sample tests and one + +3 +00:00:16,940 --> 00:00:24,720 +analysis of variance. In the last chapters, we had + +4 +00:00:24,720 --> 00:00:28,380 +already talked about one sample. + +5 +00:00:31,700 --> 00:00:34,240 +And also, we introduced how + +6 +00:00:41,910 --> 00:00:51,590 +In addition to that, we perform tests about Mu and + +7 +00:00:51,590 --> 00:00:59,310 +Y. In this chapter, we are going to generalize our + +8 +00:00:59,310 --> 00:01:03,870 +confidence interval and testing about two-sample + +9 +00:01:03,870 --> 00:01:08,600 +tests or more than two-sample tests. For two + +10 +00:01:08,600 --> 00:01:14,900 +samples, we are interested in the difference + +11 +00:01:14,900 --> 00:01:19,220 +between either two population means or two + +12 +00:01:19,220 --> 00:01:23,580 +population proportions. For example, suppose we + +13 +00:01:23,580 --> 00:01:29,440 +are teaching students and we have males and + +14 +00:01:29,440 --> 00:01:36,280 +females. And our goal is to test to see whether or + +15 +00:01:36,280 --> 00:01:40,470 +not there exists a significant difference between + +16 +00:01:40,470 --> 00:01:45,090 +scores of males and females. So in this case, we + +17 +00:01:45,090 --> 00:01:52,510 +have two populations, population A and population + +18 +00:01:52,510 --> 00:01:58,930 +B. And for example, suppose we select a random + +19 +00:01:58,930 --> 00:02:02,910 +sample from + +20 +00:02:02,910 --> 00:02:06,660 +the first population. And select another sample + +21 +00:02:06,660 --> 00:02:11,800 +from the second population. And the goal is how + +22 +00:02:11,800 --> 00:02:15,000 +can we construct a confidence interval for the + +23 +00:02:15,000 --> 00:02:17,920 +difference between mu1 and mu2. So we are talking + +24 +00:02:17,920 --> 00:02:21,700 +about mu1 minus mu2, or the difference between two + +25 +00:02:21,700 --> 00:02:26,240 +population proportions, pi1 minus pi2. + +26 +00:02:29,620 --> 00:02:33,280 +Sometimes there exists more than two populations. + +27 +00:02:34,490 --> 00:02:39,970 +And also, we can figure out if the means, in + +28 +00:02:39,970 --> 00:02:46,890 +general, are different or not. In this chapter, + +29 +00:02:47,030 --> 00:02:50,790 +we'll just talk about two sample tests. And we are + +30 +00:02:50,790 --> 00:02:54,370 +going to skip one-way analysis of variance. So + +31 +00:02:54,370 --> 00:03:00,510 +we'll explain in details confidence intervals and + +32 +00:03:00,510 --> 00:03:05,830 +hypothesis testing about two samples. For one-way + +33 +00:03:05,830 --> 00:03:08,830 +analysis of variance, that one will be discussed + +34 +00:03:08,830 --> 00:03:15,610 +in the SPSS course. The objectives for this + +35 +00:03:15,610 --> 00:03:20,910 +chapter are how to use hypothesis testing for + +36 +00:03:20,910 --> 00:03:25,930 +comparing the difference between the means of two + +37 +00:03:25,930 --> 00:03:29,890 +independent populations, the means of two related + +38 +00:03:29,890 --> 00:03:33,390 +populations. Let's see the difference between two + +39 +00:03:33,390 --> 00:03:41,160 +independent and two related populations. So the + +40 +00:03:41,160 --> 00:03:47,800 +two populations might be considered independent or + +41 +00:03:47,800 --> 00:03:52,040 +related. Related means dependent population. Now, + +42 +00:03:52,120 --> 00:03:56,520 +for independent populations, + +43 +00:03:57,560 --> 00:04:02,600 +in this case, sample one, does not affect the + +44 +00:04:02,600 --> 00:04:06,660 +results of sample two. For this reason, we call it as + +45 +00:04:06,660 --> 00:04:09,660 +independent sample. So sample one does not affect + +46 +00:04:09,660 --> 00:04:13,240 +the results of sample two. For example, suppose as + +47 +00:04:13,240 --> 00:04:18,900 +we mentioned, we have females and + +48 +00:04:18,900 --> 00:04:23,230 +males. So here, the two groups or the two samples + +49 +00:04:23,230 --> 00:04:27,750 +are independent because a student's scores for + +50 +00:04:27,750 --> 00:04:31,650 +females, for example, does not affect score for + +51 +00:04:31,650 --> 00:04:36,370 +males. For this reason, they are independent. So + +52 +00:04:36,370 --> 00:04:40,950 +since we have two different groups, + +53 +00:04:42,150 --> 00:04:44,830 +Or two different samples. In this case, we have + +54 +00:04:44,830 --> 00:04:47,450 +independent populations or independent samples. + +55 +00:04:48,310 --> 00:04:52,550 +Now, for the two related populations, for example, + +56 +00:04:55,150 --> 00:05:00,970 +in this case, suppose we have 10 persons. + +57 +00:05:02,550 --> 00:05:10,110 +Person 1, suppose his or her weight 120 kilograms. + +58 +00:05:11,480 --> 00:05:15,780 +for the first item. The second item or second + +59 +00:05:15,780 --> 00:05:21,480 +person, his or her weight is 135 kilograms. The + +60 +00:05:21,480 --> 00:05:25,380 +third one, for example, 110. And suppose we have + +61 +00:05:25,380 --> 00:05:28,380 +10 persons. + +62 +00:05:29,700 --> 00:05:35,040 +Weight? Suppose these are weights in kilograms. + +63 +00:05:38,120 --> 00:05:41,540 +Now, suppose these people or these students or + +64 +00:05:41,540 --> 00:05:46,720 +whatever they are have + +65 +00:05:46,720 --> 00:05:51,640 +diet for a period of time. For example, suppose + +66 +00:05:51,640 --> 00:05:56,900 +they have diet for three months. So, currently + +67 +00:05:56,900 --> 00:06:02,560 +their weights are given by 120, 135 and so on. + +68 +00:06:06,560 --> 00:06:09,860 +They have diet for three months after that. Then + +69 +00:06:09,860 --> 00:06:13,760 +we measure their weights after three months. Now + +70 +00:06:13,760 --> 00:06:18,640 +suppose the first person, his weight was 120. For + +71 +00:06:18,640 --> 00:06:21,900 +example, his weight was in January 120. Now in + +72 +00:06:21,900 --> 00:06:22,220 +April. + +73 +00:06:25,260 --> 00:06:30,060 +For example, suppose 105, 105 kilograms. The + +74 +00:06:30,060 --> 00:06:34,440 +second person suppose 120. The third one suppose + +75 +00:06:34,440 --> 00:06:35,900 +95, and so on. + +76 +00:06:40,310 --> 00:06:45,890 +Now, in two cases, we have the same individuals, + +77 +00:06:46,330 --> 00:06:50,110 +the same persons. Person one, his weight was 120, + +78 +00:06:50,690 --> 00:06:54,950 +and after a specific period of time, his weight + +79 +00:06:54,950 --> 00:07:01,610 +becomes 105. So we have the same people, the same + +80 +00:07:01,610 --> 00:07:09,260 +individuals for both In this case, these two + +81 +00:07:09,260 --> 00:07:13,380 +samples are called related samples. + +82 +00:07:15,040 --> 00:07:20,220 +So for related samples, we have the same group. + +83 +00:07:21,720 --> 00:07:24,000 +But we have different, or I'm sorry, we have + +84 +00:07:24,000 --> 00:07:26,800 +repeated measures or repeated measurements. + +85 +00:07:28,520 --> 00:07:33,320 +Another example for related samples. Suppose some + +86 +00:07:33,320 --> 00:07:38,180 +patients have high blood pressure. And they are + +87 +00:07:38,180 --> 00:07:44,720 +using drug A. And we have some information about + +88 +00:07:44,720 --> 00:07:48,100 +their blood pressure. Suppose first person, his or + +89 +00:07:48,100 --> 00:07:53,140 +her weight, blood pressure 145. The second one + +90 +00:07:53,140 --> 00:07:54,520 +suppose 160 and so on. + +91 +00:07:59,900 --> 00:08:05,500 +So suppose these people, these patients, use + +92 +00:08:05,500 --> 00:08:09,900 +different drug, for example, drug B. And our goal + +93 +00:08:09,900 --> 00:08:14,240 +is to see if drug B is more effective to reduce + +94 +00:08:14,240 --> 00:08:18,040 +the blood pressure than using drug A. Suppose by + +95 +00:08:18,040 --> 00:08:21,560 +using drug B after, for example, three months, the + +96 +00:08:21,560 --> 00:08:24,320 +first person with high blood pressure, 145, + +97 +00:08:24,440 --> 00:08:27,460 +becomes, for example, 130. The other one, suppose + +98 +00:08:27,460 --> 00:08:33,700 +145, and so on. So here we have the same patients. + +99 +00:08:36,010 --> 00:08:42,110 +Patients took drug A, and after taking drug B for + +100 +00:08:42,110 --> 00:08:45,230 +three months, for example, their new measures are + +101 +00:08:45,230 --> 00:08:47,750 +given by one theory, one photograph, and so on. So + +102 +00:08:47,750 --> 00:08:50,710 +each person in this case has two values, or two + +103 +00:08:50,710 --> 00:08:55,550 +observations. One before using drug B, and the + +104 +00:08:55,550 --> 00:08:59,530 +other after. So if you have before and after, it + +105 +00:08:59,530 --> 00:09:05,070 +means we have related samples. So any problem has + +106 +00:09:05,070 --> 00:09:07,690 +before and after for the same people, the same + +107 +00:09:07,690 --> 00:09:10,830 +individual, in this case we have related samples. + +108 +00:09:11,710 --> 00:09:15,290 +So we have to distinguish between two independent + +109 +00:09:15,290 --> 00:09:21,510 +populations and two related samples. The other + +110 +00:09:21,510 --> 00:09:24,730 +objective, we are going to use hypothesis testing + +111 +00:09:24,730 --> 00:09:27,970 +for comparing the proportions of two independent + +112 +00:09:27,970 --> 00:09:32,270 +populations. That's all for this chapter. We are + +113 +00:09:32,270 --> 00:09:35,510 +going to skip the variances of two independent + +114 +00:09:35,510 --> 00:09:41,610 +populations. So this one will be skipped. How to + +115 +00:09:41,610 --> 00:09:45,250 +use one-way analysis of variance ANOVA to test for + +116 +00:09:45,250 --> 00:09:50,130 +differences among. Among this case means between + +117 +00:09:50,130 --> 00:09:54,310 +more than two populations. So among means more + +118 +00:09:54,310 --> 00:09:58,000 +than two. So here we are going also to skip + +119 +00:09:58,000 --> 00:10:01,620 +analysis of variance as well as how to perform + +120 +00:10:01,620 --> 00:10:04,040 +multiple comparisons and when we analysis of + +121 +00:10:04,040 --> 00:10:08,500 +variance will be skipped. So mainly we are + +122 +00:10:08,500 --> 00:10:14,160 +focusing on the difference between two means and + +123 +00:10:14,160 --> 00:10:18,440 +two proportions, two means for independent and + +124 +00:10:18,440 --> 00:10:23,340 +related, and also we are going to cover hypothesis + +125 +00:10:23,340 --> 00:10:26,380 +testing for the difference between to population + +126 +00:10:26,380 --> 00:10:29,640 +proportions. That's all for this chapter. + +127 +00:10:33,220 --> 00:10:38,880 +So again, we are going to explain how can we + +128 +00:10:38,880 --> 00:10:42,440 +perform testing for two sample tests. In this + +129 +00:10:42,440 --> 00:10:47,420 +case, there are four cases. First, we'll discuss + +130 +00:10:47,420 --> 00:10:51,260 +hypothesis testing for the population means for + +131 +00:10:51,260 --> 00:10:54,870 +independent samples. So in this case, we have mean + +132 +00:10:54,870 --> 00:10:58,590 +one versus mean two. So that's for independent + +133 +00:10:58,590 --> 00:11:01,450 +samples. The other one also for the population + +134 +00:11:01,450 --> 00:11:05,050 +means, but for related samples. By the way, + +135 +00:11:05,150 --> 00:11:07,090 +sometimes it's called a pair. + +136 +00:11:11,810 --> 00:11:18,230 +Samples. The word pair comes from we have two + +137 +00:11:18,230 --> 00:11:19,570 +values for the same independent + +138 +00:11:23,970 --> 00:11:28,350 +After that, it becomes 110, for example. So it's + +139 +00:11:28,350 --> 00:11:29,610 +called pair. + +140 +00:11:35,150 --> 00:11:40,450 +So in this case, we have semigroup before versus + +141 +00:11:40,450 --> 00:11:41,750 +after treatment. + +142 +00:11:44,410 --> 00:11:47,110 +The other type of two-sample test is population + +143 +00:11:47,110 --> 00:11:49,530 +proportions. In this case, we are going to test to + +144 +00:11:49,530 --> 00:11:52,270 +see if there is a difference between two + +145 +00:11:52,270 --> 00:11:57,050 +population proportions. The other type of two + +146 +00:11:57,050 --> 00:12:00,030 +-sample test, population variances, variance one + +147 +00:12:00,030 --> 00:12:04,010 +against variance two. As we mentioned, we are + +148 +00:12:04,010 --> 00:12:07,450 +going to cover population means for independent + +149 +00:12:07,450 --> 00:12:11,110 +and related samples, as well as the population + +150 +00:12:11,110 --> 00:12:16,100 +proportion. That's all, again, for this chapter. + +151 +00:12:18,300 --> 00:12:22,260 +We have to start with the sampling distribution + +152 +00:12:22,260 --> 00:12:26,220 +actually of the difference between two population + +153 +00:12:26,220 --> 00:12:32,580 +means. In chapter seven, we talked about sampling + +154 +00:12:32,580 --> 00:12:39,760 +distribution for + +155 +00:12:39,760 --> 00:12:41,620 +X bar. + +156 +00:12:45,430 --> 00:12:54,310 +And we said that X bar is a point estimate for Mu. + +157 +00:12:55,730 --> 00:13:00,130 +Now in this chapter we are interested for the + +158 +00:13:00,130 --> 00:13:03,090 +difference between two population means. That + +159 +00:13:03,090 --> 00:13:08,770 +means if we have Mu1 minus Mu2, in this case the + +160 +00:13:08,770 --> 00:13:12,850 +point estimate of this difference should be X1 + +161 +00:13:12,850 --> 00:13:17,970 +bar. minus x2. So x1 bar minus x2 bar is a point + +162 +00:13:17,970 --> 00:13:22,570 +estimate for the difference mu1 minus mu2. So this + +163 +00:13:22,570 --> 00:13:28,150 +one is a point estimate for + +164 +00:13:28,150 --> 00:13:36,910 +mu1 minus mu2. It's clear that x2 bar minus x1 bar + +165 +00:13:36,910 --> 00:13:45,310 +is a point estimate of or for mu2 minus mu1. So + +166 +00:13:45,310 --> 00:13:51,170 +that's the point estimate of mu1 minus mu2 or mu2 + +167 +00:13:51,170 --> 00:13:55,510 +minus mu1 just x1 minus x2 bar is the point + +168 +00:13:55,510 --> 00:14:00,690 +estimate for mu1 minus mu2. So again, our goal is + +169 +00:14:00,690 --> 00:14:05,090 +to test hypotheses of form or construct confidence + +170 +00:14:05,090 --> 00:14:07,650 +interval for the difference between two + +171 +00:14:07,650 --> 00:14:10,710 +populations means mu1 minus mu2. And the point + +172 +00:14:10,710 --> 00:14:16,070 +estimate for this difference is X1 bar minus X2 + +173 +00:14:16,070 --> 00:14:16,330 +bar. + +174 +00:14:20,010 --> 00:14:24,930 +In this case, we have two cases. One is called + +175 +00:14:24,930 --> 00:14:27,910 +independent or unrelated, the same meaning. + +176 +00:14:28,090 --> 00:14:32,130 +Unrelated means independent populations. In this + +177 +00:14:32,130 --> 00:14:34,670 +case, as we mentioned, samples selected from one + +178 +00:14:34,670 --> 00:14:38,870 +population. has no effect actually on the sample + +179 +00:14:38,870 --> 00:14:41,850 +selected from the other population. As we + +180 +00:14:41,850 --> 00:14:45,050 +mentioned, there are two groups, males and + +181 +00:14:45,050 --> 00:14:50,390 +females. So group one does not affect group two. I + +182 +00:14:50,390 --> 00:14:54,370 +mean males population does not affect the + +183 +00:14:54,370 --> 00:14:56,910 +population of the female. So in this case, we have + +184 +00:14:56,910 --> 00:15:00,250 +unrelated or independent populations. + +185 +00:15:01,610 --> 00:15:06,570 +In this case, there are two scenarios. Sigma is + +186 +00:15:06,570 --> 00:15:12,390 +unknown, but we assume they are equal. So sigma 1 + +187 +00:15:12,390 --> 00:15:14,750 +and sigma 2 are unknown, but we assume they are + +188 +00:15:14,750 --> 00:15:19,830 +equal. In this case, we are going to use something + +189 +00:15:19,830 --> 00:15:25,010 +called pooled variance test. The other scenario, + +190 +00:15:25,410 --> 00:15:29,990 +if the two sigma is unknown, but They are not + +191 +00:15:29,990 --> 00:15:31,890 +equal. In this case, we are going to something + +192 +00:15:31,890 --> 00:15:37,870 +called separate variance thickness. So in this + +193 +00:15:37,870 --> 00:15:41,710 +chapter, we focus just on unknown sigmas. Because + +194 +00:15:41,710 --> 00:15:46,150 +in real life, population variances are unknown. So + +195 +00:15:46,150 --> 00:15:50,610 +we have to focus on this case. I mean, sigmas are + +196 +00:15:50,610 --> 00:15:54,530 +unknown, but maybe we assume they are equal, or we + +197 +00:15:54,530 --> 00:15:56,750 +assume they are not equal. + +198 +00:16:00,020 --> 00:16:04,000 +Now, the hypothesis test in this case, if we are + +199 + + +223 +00:17:40,030 --> 00:17:42,790 +here mu1 is smaller than mu2, it means the + +224 +00:17:42,790 --> 00:17:46,490 +difference between these two is negative. And as + +225 +00:17:46,490 --> 00:17:49,990 +we mentioned before, H1 is the opposite of H0. So + +226 +00:17:49,990 --> 00:17:55,300 +if H1 Me1 is smaller than Me2, it means under the + +227 +00:17:55,300 --> 00:17:59,200 +null hypothesis, mu1 is greater than or equal to + +228 +00:17:59,200 --> 00:18:02,340 +mu2. And as we mentioned before, the equal sign + +229 +00:18:02,340 --> 00:18:07,360 +appears only under the null hypothesis. So here + +230 +00:18:07,360 --> 00:18:11,280 +for the two-sided test, the equality here appears + +231 +00:18:11,280 --> 00:18:15,060 +just on the null hypothesis, as well as for lower + +232 +00:18:15,060 --> 00:18:18,760 +and upper tail test. For the upper tail test, + +233 +00:18:20,150 --> 00:18:22,970 +Again here, we have U1 is greater than U2. It + +234 +00:18:22,970 --> 00:18:26,890 +means the difference between these two populations + +235 +00:18:26,890 --> 00:18:30,570 +is above zero, greater than zero. So that's the + +236 +00:18:30,570 --> 00:18:35,610 +new scheme for formulating or stating null and + +237 +00:18:35,610 --> 00:18:39,510 +alternative hypotheses. It's quite similar to the + +238 +00:18:39,510 --> 00:18:42,490 +one we had discussed in chapter nine. Any + +239 +00:18:42,490 --> 00:18:47,300 +question? So this is step number one for doing or + +240 +00:18:47,300 --> 00:18:52,900 +performing statistical hypothesis testing. So + +241 +00:18:52,900 --> 00:18:56,020 +again, there are two types of tests. One is two + +242 +00:18:56,020 --> 00:19:00,500 +-tailed. in this case there is no direction you + +243 +00:19:00,500 --> 00:19:02,660 +don't know the exact direction of the two + +244 +00:19:02,660 --> 00:19:05,480 +population means you just say there is a + +245 +00:19:05,480 --> 00:19:08,620 +difference between the two population means in the + +246 +00:19:08,620 --> 00:19:11,580 +other two cases you know the exact direction you + +247 +00:19:11,580 --> 00:19:18,140 +may say that population mean for a it's smaller or + +248 +00:19:18,140 --> 00:19:22,620 +less than or decrease from the other one, here + +249 +00:19:22,620 --> 00:19:27,280 +population A is larger or increased or whatever it + +250 +00:19:27,280 --> 00:19:31,180 +is. So we have null hypothesis, informative + +251 +00:19:31,180 --> 00:19:34,620 +hypothesis, maybe two-tailed or one-tailed test. + +252 +00:19:34,680 --> 00:19:38,760 +It depends on the nature of the problem itself. + +253 +00:19:40,880 --> 00:19:44,440 +Now what's about the rejection regions? Similar as + +254 +00:19:44,440 --> 00:19:47,760 +we discussed before, if we are talking about two + +255 +00:19:47,760 --> 00:19:50,360 +-tailed test, in this case, there are two + +256 +00:19:50,360 --> 00:19:54,340 +rejection regions, one to the right of alpha over + +257 +00:19:54,340 --> 00:19:56,940 +2 and the other to the left of the other side of + +258 +00:19:56,940 --> 00:20:01,340 +alpha over 2. But here we have T alpha over 2 and + +259 +00:20:01,340 --> 00:20:04,420 +minus T alpha over 2. And again, we are focusing + +260 +00:20:04,420 --> 00:20:08,280 +on unknown sigmas. So we have to use T critical + +261 +00:20:08,280 --> 00:20:12,100 +values. So we reject the null hypothesis the same + +262 +00:20:12,100 --> 00:20:17,160 +as we mentioned before if the test statistic falls + +263 +00:20:17,160 --> 00:20:19,920 +in the rejection regions. In this case, if this + +264 +00:20:19,920 --> 00:20:25,060 +statistic lies in this region or the other one, we + +265 +00:20:25,060 --> 00:20:29,070 +have to reject them. That means if we reject the + +266 +00:20:29,070 --> 00:20:32,910 +hypothesis, if T stat is less than negative T + +267 +00:20:32,910 --> 00:20:37,610 +alpha over 2, or if T stat is above or greater + +268 +00:20:37,610 --> 00:20:41,070 +than T alpha over 2. So the same as we discussed + +269 +00:20:41,070 --> 00:20:44,350 +before. That's for two-tailed test. Now for lower + +270 +00:20:44,350 --> 00:20:47,270 +-tailed test, in this case, there is only one + +271 +00:20:47,270 --> 00:20:51,370 +rejection region to the left side. It's minus T + +272 +00:20:51,370 --> 00:20:54,260 +alpha. In this case, we reject the null hypothesis + +273 +00:20:54,260 --> 00:20:57,580 +if the value of the statistic or the test + +274 +00:20:57,580 --> 00:21:01,140 +statistic is smaller than negative T alpha. So we + +275 +00:21:01,140 --> 00:21:05,740 +reject if T stat is smaller than minus T alpha. On + +276 +00:21:05,740 --> 00:21:08,380 +the other side, if we are talking about a partial + +277 +00:21:08,380 --> 00:21:12,380 +test. So your null hypothesis, I'm sorry, your + +278 +00:21:12,380 --> 00:21:16,610 +alternative hypothesis always look at the + +279 +00:21:16,610 --> 00:21:20,250 +alternative hypothesis in order to determine the + +280 +00:21:20,250 --> 00:21:24,070 +rejection region. So if it is greater than, it + +281 +00:21:24,070 --> 00:21:26,810 +means you have the area to the right. I mean the + +282 +00:21:26,810 --> 00:21:29,870 +rejection region should be to the right. If the + +283 +00:21:29,870 --> 00:21:34,230 +alternative hypothesis is negative, I mean smaller + +284 +00:21:34,230 --> 00:21:37,570 +than zero, it means the rejection region should be + +285 +00:21:37,570 --> 00:21:41,680 +to the left side. So here, the alternative + +286 +00:21:41,680 --> 00:21:44,480 +hypothesis, mu1 minus mu2, the difference is + +287 +00:21:44,480 --> 00:21:47,580 +positive. That means the rejection region is to + +288 +00:21:47,580 --> 00:21:50,280 +the right side. So we reject the null hypothesis + +289 +00:21:50,280 --> 00:21:54,340 +if T statistic is greater than T alpha. But here, + +290 +00:21:54,400 --> 00:21:58,700 +for the two-sided test or two-tailed test, they + +291 +00:21:58,700 --> 00:22:01,000 +are two regions. I mean, they are two rejection + +292 +00:22:01,000 --> 00:22:04,060 +regions because there is no direction under the + +293 +00:22:04,060 --> 00:22:06,940 +alternative hypothesis. So alpha should be split + +294 +00:22:06,940 --> 00:22:10,420 +in half. So alpha over two to the right and alpha + +295 +00:22:10,420 --> 00:22:13,680 +over two to the left side. So this scheme actually + +296 +00:22:13,680 --> 00:22:20,380 +mimics the same or similar to what we have + +297 +00:22:20,380 --> 00:22:25,880 +discussed in chapter one. Any questions? So again, + +298 +00:22:26,880 --> 00:22:32,680 +we have to formulate or state carefully null and + +299 +00:22:32,680 --> 00:22:36,420 +alternate hypothesis for both cases two and one + +300 +00:22:36,420 --> 00:22:41,890 +-tailed test. And the rejection regions, I think, + +301 +00:22:42,630 --> 00:22:46,270 +is straightforward. Now let's see what are the + +302 +00:22:46,270 --> 00:22:50,250 +assumptions in this case. If the two sigmas are + +303 +00:22:50,250 --> 00:22:54,710 +unknown, and we assume they are equal. So we + +304 +00:22:54,710 --> 00:22:59,790 +assume both sigmas are unknown. I mean, both + +305 +00:22:59,790 --> 00:23:02,810 +population standard deviations are unknown. And we + +306 +00:23:02,810 --> 00:23:06,490 +assume they are equal. The assumptions are. First, + +307 +00:23:06,650 --> 00:23:10,130 +samples should be drawn randomly and + +308 +00:23:10,130 --> 00:23:13,330 +independently. So samples are randomly and + +309 +00:23:13,330 --> 00:23:15,850 +independently drawn. So we have to select random + +310 +00:23:15,850 --> 00:23:20,250 +samples and they are independent. Assumption + +311 +00:23:20,250 --> 00:23:23,130 +number one. The second one, populations are + +312 +00:23:23,130 --> 00:23:27,070 +normally distributed. So we have to assume the + +313 +00:23:27,070 --> 00:23:31,650 +population is normal or both sample sizes are at + +314 +00:23:31,650 --> 00:23:36,250 +least 30. So, in order to apply the central + +315 +00:23:36,250 --> 00:23:39,270 +interface, so similar to the one we had discussed, + +316 +00:23:40,210 --> 00:23:43,830 +so here either the populations, I mean both of + +317 +00:23:43,830 --> 00:23:46,510 +them, normally distributed, or abnormally + +318 +00:23:46,510 --> 00:23:51,030 +distributed, or both ends, or both sample sizes, + +319 +00:23:51,370 --> 00:23:57,290 +greater than 30, greater than or equal to, so at + +320 +00:23:57,290 --> 00:24:02,740 +least In addition to that, we have to assume that + +321 +00:24:02,740 --> 00:24:06,660 +population variances are unknown, but we assume + +322 +00:24:06,660 --> 00:24:11,460 +they are equal. So the assumptions are samples are + +323 +00:24:11,460 --> 00:24:16,400 +randomly selected and independent, populations are + +324 +00:24:16,400 --> 00:24:19,760 +normally distributed, or the sample sizes are + +325 +00:24:19,760 --> 00:24:22,960 +large enough in order to apply the central limit + +326 +00:24:22,960 --> 00:24:26,320 +theorem. In addition to that, population variances + +327 +00:24:26,320 --> 00:24:30,540 +are unknown, but we assume to be equal. These are + +328 +00:24:30,540 --> 00:24:36,560 +the classical assumptions for performing a t-test, + +329 +00:24:37,120 --> 00:24:39,480 +when sigma 1 and sigma 2 are unknown, but we + +330 +00:24:39,480 --> 00:24:43,860 +assume they are equal. Any questions? + +331 +00:24:46,820 --> 00:24:51,060 +Next, let's see how can we state the test + +332 +00:24:51,060 --> 00:24:55,690 +statistic. Again, we are talking about testing for + +333 +00:24:55,690 --> 00:25:00,010 +the difference between mu1 and mu2, so hypothesis + +334 +00:25:00,010 --> 00:25:04,590 +for mu1 minus mu2 with both sigmas, sigma1 and + +335 +00:25:04,590 --> 00:25:06,830 +sigma2 unknown and assumed equal. + +336 +00:25:11,050 --> 00:25:15,910 +The test statistic in this case is similar to the + +337 +00:25:15,910 --> 00:25:19,150 +one we discussed, but there is a little difference + +338 +00:25:19,150 --> 00:25:24,510 +in these two. The first one was this statistic. It + +339 +00:25:24,510 --> 00:25:27,870 +was x bar minus the mean divided by s over root n. + +340 +00:25:30,330 --> 00:25:35,390 +That's okay if we are testing for if 0, mu equal, + +341 +00:25:35,610 --> 00:25:39,550 +for example, any value. Three or four equivalents. + +342 +00:25:41,190 --> 00:25:43,590 +Here we are talking about the difference + +343 +00:25:43,590 --> 00:25:49,400 +confidence, sorry, testing or test for Mu 1 minus + +344 +00:25:49,400 --> 00:25:49,600 +Mu. + +345 +00:25:52,500 --> 00:25:58,440 +So my D-set step equals. + +346 +00:26:02,860 --> 00:26:06,020 +For one sample, we have only point is symmetric. + +347 +00:26:06,120 --> 00:26:10,420 +X1 is a point is symmetric for Mu. But for when we + +348 +00:26:10,420 --> 00:26:13,900 +are talking about the difference between two + +349 +00:26:13,900 --> 00:26:18,160 +populations means the point estimate is x1 bar + +350 +00:26:18,160 --> 00:26:22,780 +minus x2 bar. So here I should have x1 bar minus + +351 +00:26:22,780 --> 00:26:28,500 +x2 bar. So this is the first term in this formula, + +352 +00:26:28,680 --> 00:26:34,840 +minus. Here we have minus mu. But for the new + +353 +00:26:34,840 --> 00:26:41,780 +scenario, we have x0. Mu1 minus Mu2 equals zero. + +354 +00:26:42,280 --> 00:26:48,040 +So here, Mu1 minus Mu2. In most cases, we assume + +355 +00:26:48,040 --> 00:26:53,260 +the population means are under zero. There is no + +356 +00:26:53,260 --> 00:26:55,680 +difference between these two population means. So + +357 +00:26:55,680 --> 00:26:59,300 +we are assuming Mu1 minus Mu2 equals zero. So it + +358 +00:26:59,300 --> 00:27:04,540 +means this term cancels. If we assume there is no + +359 +00:27:04,540 --> 00:27:08,300 +difference between these two population means, in + +360 +00:27:08,300 --> 00:27:14,200 +some cases, might be the difference between these + +361 +00:27:14,200 --> 00:27:17,760 +two equal, for example, A, and A is just a + +362 +00:27:17,760 --> 00:27:24,100 +constant. In this case, you have to plug A instead + +363 +00:27:24,100 --> 00:27:30,480 +of mu1 minus mu2. But most of the cases will have + +364 +00:27:30,480 --> 00:27:35,520 +this classical one, the difference is zero. Divide + +365 +00:27:35,520 --> 00:27:40,740 +by, this is the new term in this chapter, divide + +366 +00:27:40,740 --> 00:27:43,840 +by the standard error of the estimate. + +367 +00:27:48,000 --> 00:27:50,540 +Because here, if we go back a little bit to the T + +368 +00:27:50,540 --> 00:27:56,080 +statistic, it's X bar minus mu divided by S over + +369 +00:27:56,080 --> 00:27:58,980 +square root of N is the standard error of X bar. + +370 +00:28:02,990 --> 00:28:06,550 +The same here, we have, sorry, standard error of + +371 +00:28:06,550 --> 00:28:10,750 +this estimate. So the new term is how can we find + +372 +00:28:10,750 --> 00:28:15,430 +the standard error X1 bar minus X2 bar. This one + +373 +00:28:15,430 --> 00:28:21,350 +is given by square root of S square B multiplied + +374 +00:28:21,350 --> 00:28:25,480 +by 1 over N1 plus 1 over N2. S squared B is called + +375 +00:28:25,480 --> 00:28:28,740 +the pooled variance. And the pooled variance is given + +376 +00:28:28,740 --> 00:28:34,340 +by this four equation. So first of all, we have to + +377 +00:28:34,340 --> 00:28:42,060 +compute the pooled variance by using this equation, + +378 +00:28:42,240 --> 00:28:50,300 +S squared B equals N1 minus 1 S1 squared N2 minus + +379 +00:28:50,300 --> 00:28:56,470 +1 S2 squared divided by N1 minus 1 plus N2 minus + +380 +00:28:56,470 --> 00:29:02,050 +1. Now let's see if this makes sense or not, the + +381 +00:29:02,050 --> 00:29:04,910 +pooled variance. Now, as we mentioned, there are two + +382 +00:29:04,910 --> 00:29:11,250 +samples. The first one has sample size of N1. The + +383 +00:29:11,250 --> 00:29:16,130 +other one has sample size of N2 with variances of + +384 +00:29:16,130 --> 00:29:19,650 +S1 squared and S2 squared respectively. So we have + +385 +00:29:19,650 --> 00:29:24,900 +two samples with sizes N1 and N2. Sigma is + +386 +00:29:24,900 --> 00:29:29,140 +unknown, but we know the sample variance for each. + +387 +00:29:30,260 --> 00:29:35,320 +Now suppose the two samples are mixed. Let's see + +388 +00:29:35,320 --> 00:29:37,600 +how can we find the pooled. It's called the + +389 +00:29:37,600 --> 00:29:43,280 +pooled variance. Sometimes called the weighted variance. + +390 +00:29:45,020 --> 00:29:49,080 +Look at this formula. N1 minus 1 squared plus N2 + +391 +00:29:49,080 --> 00:29:53,710 +minus 1 S2 squared divided by N1 minus 1 plus + +392 +00:29:53,710 --> 00:30:00,950 +N2 minus one. We know that S squared is the sum + +393 +00:30:00,950 --> 00:30:06,650 +of X minus X bar divided by N minus one. That's if + +394 +00:30:06,650 --> 00:30:10,670 +we have only one sample. Now just cross + +395 +00:30:10,670 --> 00:30:16,070 +multiplication, we will get N minus one S squared + +396 +00:30:16,070 --> 00:30:21,230 +equals sum of X minus X bar squared. That's for + +397 +00:30:21,230 --> 00:30:27,080 +the first sample. What's about the second one? We + +398 +00:30:27,080 --> 00:30:32,660 +have two samples. So we can write for the first + +399 +00:30:32,660 --> 00:30:43,540 +one, N1 minus 1 S1 squared equals some X minus X + +400 +00:30:43,540 --> 00:30:47,720 +bar. This is for sample one. For the other sample, + +401 +00:30:48,060 --> 00:30:51,160 +we have the same equation but different data. So + +402 +00:30:51,160 --> 00:30:55,680 +we have S squared equals Y, for example, minus Y + +403 +00:30:55,680 --> 00:31:00,260 +bar divided by N2 minus 1. Now cross + +404 +00:31:00,260 --> 00:31:04,640 +multiplication will give N2 minus 1 S2 squared + +405 +00:31:04,640 --> 00:31:08,780 +equals sum of Y minus Y bar squared. That's for + +406 +00:31:08,780 --> 00:31:09,760 +the second sample. + +407 +00:31:13,000 --> 00:31:19,000 +We are looking for standard error of the + +408 +00:31:19,000 --> 00:31:22,180 +difference between these two. So now the standard + +409 +00:31:22,180 --> 00:31:26,500 +error, or let's compute first, S squared B for + +410 +00:31:26,500 --> 00:31:30,220 +both. Now, S squared in general, as we mentioned, + +411 +00:31:30,820 --> 00:31:34,900 +is sum of X minus X bar squared divided by N minus + +412 +00:31:34,900 --> 00:31:41,200 +1. So here, we have the first sum plus the second + +413 +00:31:41,200 --> 00:31:50,770 +one divided by N + +445 +00:34:38,100 --> 00:34:42,260 +multiplied by 1 over N1 plus 1 over N2. So now, + +446 +00:34:42,820 --> 00:34:47,780 +the T set S6 becomes, again, we have X1 bar minus + +447 +00:34:47,780 --> 00:34:51,240 +X2 bar minus the difference between the two + +448 +00:34:51,240 --> 00:34:56,650 +population means, divided by this term, represents + +449 +00:34:56,650 --> 00:35:02,890 +the standard error of the estimate, this estimate. + +450 +00:35:04,990 --> 00:35:10,270 +Generally speaking, any statistic, for example, T + +451 +00:35:10,270 --> 00:35:15,150 +is estimate minus + +452 +00:35:15,150 --> 00:35:21,890 +hypothesized value divided + +453 +00:35:21,890 --> 00:35:27,320 +by the standard error of this estimate. And it is + +454 +00:35:27,320 --> 00:35:31,280 +statistical. If we are talking about one sample, + +455 +00:35:32,160 --> 00:35:38,460 +in this case, we have only one estimate, so it's X + +456 +00:35:38,460 --> 00:35:44,400 +bar minus a hypothesized value, Mu, standard error + +457 +00:35:44,400 --> 00:35:50,700 +of X bar, which is S over square root. That's for + +458 +00:35:50,700 --> 00:35:55,540 +one sample. Now, for two samples, what should we + +459 +00:35:55,540 --> 00:36:02,440 +have? We are talking about two symbols. Now the + +460 +00:36:02,440 --> 00:36:06,800 +estimate for the difference. So the difference is + +461 +00:36:06,800 --> 00:36:13,520 +x1 bar minus x2 bar minus the hypothesized value + +462 +00:36:13,520 --> 00:36:18,920 +under x0. We are assuming mu1 minus mu2 equals 0. + +463 +00:36:19,910 --> 00:36:24,810 +That's the general case. I mean this special case. + +464 +00:36:25,150 --> 00:36:29,150 +Sometimes suppose it's equal A or whatever it is. + +465 +00:36:29,510 --> 00:36:34,970 +We have to plug A here. Divide by the standard + +466 +00:36:34,970 --> 00:36:40,820 +error of this estimate. Now the standard of this + +467 +00:36:40,820 --> 00:36:44,480 +estimate equals this one. So we have to divide by + +468 +00:36:44,480 --> 00:36:48,680 +S squared B multiplied by 1 over N1 plus 1 over + +469 +00:36:48,680 --> 00:36:54,820 +N2. So this is your test statistic. Any question? + +470 +00:36:56,300 --> 00:37:02,440 +So again, this is the estimate of + +471 +00:37:02,440 --> 00:37:07,380 +the difference between U1 and U2. The other one is + +472 +00:37:07,380 --> 00:37:12,550 +the hypothesized value. In most cases, this + +473 +00:37:12,550 --> 00:37:17,910 +difference is zero. Divide by this amount is the + +474 +00:37:17,910 --> 00:37:19,710 +standard error of this estimate. + +475 +00:37:23,210 --> 00:37:27,350 +And the standard error is given by square root. It + +476 +00:37:27,350 --> 00:37:32,750 +looks like square root of S squared divided by N. + +477 +00:37:33,290 --> 00:37:37,190 +But in this case, we have two standard deviations, + +478 +00:37:37,670 --> 00:37:42,600 +so S1 squared over N1. plus S2 squared over N2. + +479 +00:37:42,900 --> 00:37:46,400 +But we are assuming that both sigmas are known and + +480 +00:37:46,400 --> 00:37:49,580 +we assume they are equal. So these two are the + +481 +00:37:49,580 --> 00:37:55,400 +same. So factor out. So here we have S squared + +482 +00:37:55,400 --> 00:38:00,440 +over N1 plus 1 over 2. And this one is called + +483 +00:38:00,440 --> 00:38:06,760 +square root. Any question? Basically, we are going + +484 +00:38:06,760 --> 00:38:11,340 +to use this statistic and the formula will be + +485 +00:38:11,340 --> 00:38:15,460 +given either the whole variance equation or the + +486 +00:38:15,460 --> 00:38:20,360 +other one. + +487 +00:38:32,240 --> 00:38:35,530 +Now what about the confidence interval? As we + +488 +00:38:35,530 --> 00:38:38,350 +mentioned before, any confidence interval can be + +489 +00:38:38,350 --> 00:38:42,350 +constructed by using general form, which is + +490 +00:38:42,350 --> 00:38:47,350 +estimate, I mean the point estimate, any + +491 +00:38:47,350 --> 00:38:53,070 +confidence interval. Estimate, plus or minus + +492 +00:38:53,070 --> 00:39:00,550 +critical value times standard error of your + +493 +00:39:00,550 --> 00:39:06,280 +estimate. That's in general. estimate or point + +494 +00:39:06,280 --> 00:39:10,840 +estimate plus or minus critical value times the + +495 +00:39:10,840 --> 00:39:15,400 +standard error of your estimate before we had + +496 +00:39:15,400 --> 00:39:21,620 +talked about confidence interval for mu so in that + +497 +00:39:21,620 --> 00:39:27,400 +case we have x bar plus or minus t then standard + +498 +00:39:27,400 --> 00:39:31,420 +error of this estimate which is s over root n + +499 +00:39:31,420 --> 00:39:37,520 +that's before Now we are talking about confidence + +500 +00:39:37,520 --> 00:39:48,560 +interval for mu1 minus mu2. Now my point estimate + +501 +00:39:48,560 --> 00:39:55,580 +of this difference is x1 bar minus x2 bar, plus or + +502 +00:39:55,580 --> 00:40:03,670 +minus. Critical value is T alpha over 2. since + +503 +00:40:03,670 --> 00:40:07,550 +sigma's are unknown times the standard error of + +504 +00:40:07,550 --> 00:40:14,910 +the estimate this value square root one over n one + +505 +00:40:14,910 --> 00:40:20,650 +plus one over n one this is your confidence + +506 +00:40:20,650 --> 00:40:26,390 +interval by the way this statistic as T + +507 +00:40:26,390 --> 00:40:30,910 +distribution with degrees of freedom equals N1 + +508 +00:40:30,910 --> 00:40:35,290 +plus N2 minus 2. Because for one population, when + +509 +00:40:35,290 --> 00:40:38,210 +we have one sample, your degrees of freedom is N + +510 +00:40:38,210 --> 00:40:41,730 +minus 1. If we have two populations and we + +511 +00:40:41,730 --> 00:40:44,950 +selected two random samples, your degrees of + +512 +00:40:44,950 --> 00:40:51,160 +freedom Is n1 plus minus 1 plus n2 minus 1. So at + +513 +00:40:51,160 --> 00:40:55,260 +least should be n1 plus n2 minus 2. So this + +514 +00:40:55,260 --> 00:40:59,780 +statistic has T distribution with degrees of + +515 +00:40:59,780 --> 00:41:04,260 +freedom n1 plus n2 minus 2. This is only if we + +516 +00:41:04,260 --> 00:41:09,860 +assume variances unknown, but they are equal. In + +517 +00:41:09,860 --> 00:41:12,760 +this case, your degrees of freedom, n1 plus n2 + +518 +00:41:12,760 --> 00:41:17,790 +minus 2. So that's for the testing and the + +519 +00:41:17,790 --> 00:41:21,370 +confidence interval approach. So if sigma's + +520 +00:41:21,370 --> 00:41:23,370 +unknown and they are equal, your confidence + +521 +00:41:23,370 --> 00:41:27,610 +interval is x1 bar minus x2 bar plus or minus z + +522 +00:41:27,610 --> 00:41:30,590 +alpha over two square root. That's going to be + +523 +00:41:30,590 --> 00:41:34,250 +multiplied by one over one plus one over one. If + +524 +00:41:34,250 --> 00:41:41,610 +we are talking about confidence for mu2 minus mu1, + +525 +00:41:42,650 --> 00:41:48,410 +We should have here x2 bar minus x1 bar plus or + +526 +00:41:48,410 --> 00:41:53,710 +minus the same amount. Because both plus doesn't + +527 +00:41:53,710 --> 00:41:57,950 +change if we start with 1 over n2 plus 1 over n1. + +528 +00:41:58,750 --> 00:42:03,430 +But this one should be x2 bar minus x1 bar if we + +529 +00:42:03,430 --> 00:42:06,310 +are talking about confidence interval for the + +530 +00:42:06,310 --> 00:42:12,720 +difference mu2 minus mu1. And that's all. Any + +531 +00:42:12,720 --> 00:42:22,900 +question? In general, + +532 +00:42:23,560 --> 00:42:28,020 +x1 bar and x2 bar are not equal. Because if you + +533 +00:42:28,020 --> 00:42:30,140 +have two populations and you select two different + +534 +00:42:30,140 --> 00:42:34,660 +samples, it makes sense that the two means are not + +535 +00:42:34,660 --> 00:42:38,310 +equal. But if they are equal, it means your + +536 +00:42:38,310 --> 00:42:42,570 +statistic is zero. And that's never happened in + +537 +00:42:42,570 --> 00:42:45,610 +the real life. Maybe close to zero, but not + +538 +00:42:45,610 --> 00:42:49,150 +exactly zero. Let's look at one example. + +539 +00:43:00,650 --> 00:43:04,570 +A straightforward example. You are a financial + +540 +00:43:04,570 --> 00:43:09,960 +analyst. for brokerage fair. Is there a difference + +541 +00:43:09,960 --> 00:43:16,760 +in dividend yield between stock listed on the New + +542 +00:43:16,760 --> 00:43:20,840 +York Stock Exchange and Nasdaq? You collect the + +543 +00:43:20,840 --> 00:43:23,640 +following data. So we have two data for two + +544 +00:43:23,640 --> 00:43:28,940 +different stocks. One for New York Stock Exchange + +545 +00:43:28,940 --> 00:43:33,310 +and other for Nasdaq. We have a random sample of + +546 +00:43:33,310 --> 00:43:36,550 +size 21 from the first one with standard deviation + +547 +00:43:36,550 --> 00:43:42,690 +1.3 and sample mean 3.17. The other sample gives + +548 +00:43:42,690 --> 00:43:46,730 +the following results. The random sample size + +549 +00:43:46,730 --> 00:43:51,350 +equals 25 with mean 2.53 and standard deviation 1 + +550 +00:43:51,350 --> 00:43:56,880 +.16. So this is the information we have. Sample + +551 +00:43:56,880 --> 00:44:01,180 +sizes for both sample means and sample standard + +552 +00:44:01,180 --> 00:44:05,060 +deviations. So that means population variances are + +553 +00:44:05,060 --> 00:44:10,140 +unknown. Assuming both populations are + +554 +00:44:10,140 --> 00:44:12,920 +approximately normal. We have to assume they are + +555 +00:44:12,920 --> 00:44:18,370 +normal. Because the sample sizes are less than 30. + +556 +00:44:18,890 --> 00:44:22,210 +In this case, if they are smaller than 30 and the + +557 +00:44:22,210 --> 00:44:25,090 +populations are not normal, we cannot use the T + +558 +00:44:25,090 --> 00:44:30,370 +-satisfaction. T is used only if populations are + +559 +00:44:30,370 --> 00:44:34,100 +approximately normal, abnormal, or informal. But + +560 +00:44:34,100 --> 00:44:37,520 +in this case, the two sizes are smaller than 30, + +561 +00:44:37,780 --> 00:44:41,460 +so we have to assume both populations are normally + +562 +00:44:41,460 --> 00:44:44,080 +distributed or approximately normally distributed, + +563 +00:44:44,800 --> 00:44:49,800 +or we have to use another test. So we're assuming + +564 +00:44:49,800 --> 00:44:54,700 +both are normal with equal variances. The question + +565 +00:44:54,700 --> 00:45:00,040 +is, is there a difference in meaning? So there is + +566 +00:45:00,040 --> 00:45:03,600 +no direction. Is there a difference? That means we + +567 +00:45:03,600 --> 00:45:08,080 +are testing mu1 equals mu2 against mu1 does not + +568 +00:45:08,080 --> 00:45:13,320 +equal mu2. So the null hypothesis, the difference + +569 +00:45:13,320 --> 00:45:16,640 +between these two is zero because it asks about is + +570 +00:45:16,640 --> 00:45:20,740 +there a difference here. So we assume there is no + +571 +00:45:20,740 --> 00:45:23,400 +difference. It means mu1 equals mu2 under the null + +572 +00:45:23,400 --> 00:45:27,960 +hypothesis. Against the alternative hypothesis, + +573 +00:45:28,560 --> 00:45:32,440 +mu1 minus mu2 is not zero. That means mu1 does not + +574 +00:45:32,440 --> 00:45:36,340 +equal mu2. So either you state the null by using + +575 +00:45:36,340 --> 00:45:41,080 +this way, mu1 minus mu2 equals zero, or mu1 equals + +576 +00:45:41,080 --> 00:45:47,360 +mu2. Now, before computing the test statistic, we + +577 +00:45:47,360 --> 00:45:54,020 +have to compute S squared B. For S squared B, this + +578 +00:45:54,020 --> 00:46:00,540 +is the equation we have. Now, N1 is 21, so 21 + +579 +00:46:00,540 --> 00:46:05,020 +minus 1, times S1 squared. We have the sample + +580 +00:46:05,020 --> 00:46:08,780 +standard deviation of 1.3 for the first sample, so + +581 +00:46:08,780 --> 00:46:14,720 +this quantity squared, plus N2 was 25, minus 1 + +582 +00:46:14,720 --> 00:46:19,480 +times S2 squared, 1.16 squared, divided by N1 + +583 +00:46:19,480 --> 00:46:21,060 +minus 1 plus N2 minus 1. + +584 +00:46:24,120 --> 00:46:28,260 +The sample, I'm sorry, the bold sample variance, + +585 +00:46:29,040 --> 00:46:35,020 +which is about 1.5. Now, you, in this case, after + +586 +00:46:35,020 --> 00:46:38,910 +computing a square B, Easily you can compute the + +587 +00:46:38,910 --> 00:46:41,470 +value of the test statistic by using this + +588 +00:46:41,470 --> 00:46:46,210 +equation. Now x1 bar minus x2 bar, x1 bar is 3.17 + +589 +00:46:46,210 --> 00:46:52,590 +minus x2 bar is 2.53 minus. Here we should have + +590 +00:46:52,590 --> 00:47:00,070 +mu1 minus mu2. Now under x0 because the test + +591 +00:47:00,070 --> 00:47:06,140 +statistic is computed only if x0 is true. So, + +592 +00:47:06,700 --> 00:47:13,560 +always, always, always, + +593 +00:47:14,540 --> 00:47:26,040 +we compute T statistic under H0 is true, always. + +594 +00:47:27,460 --> 00:47:30,320 +Otherwise, we cannot compute T statistic. + +595 +00:47:33,760 --> 00:47:37,580 +The rule is to compute the value of the statistic + +596 +00:47:37,580 --> 00:47:42,980 +if H1 is true. Let's see what will happen. Now, if + +597 +00:47:42,980 --> 00:47:48,640 +H1 is true, H1 mu1 minus mu does not equal zero. + +598 +00:47:48,920 --> 00:47:51,880 +So what's the value here? You don't know. Because + +599 +00:47:51,880 --> 00:47:54,200 +this difference is not zero. So what's the value? + +600 +00:47:54,620 --> 00:47:57,900 +I don't know. So you cannot determine the value of + +601 +00:47:57,900 --> 00:48:02,240 +the statistic under H1. But under H0, here we are + +602 +00:48:02,240 --> 00:48:05,950 +assuming The difference is zero, so this statistic + +603 +00:48:05,950 --> 00:48:10,410 +can be computed only if the null hypothesis is + +604 +00:48:10,410 --> 00:48:12,690 +true, otherwise you cannot compute this value. + +605 +00:48:13,430 --> 00:48:17,310 +Make sense? So maybe true and false problem asks + +606 +00:48:17,310 --> 00:48:21,810 +about we compute this statistic under if zero is + +607 +00:48:21,810 --> 00:48:23,190 +true. It's correct statement. + +608 +00:48:26,210 --> 00:48:29,150 +In this case, we're assuming that the difference + +609 +00:48:29,150 --> 00:48:36,830 +is zero, so minus zero. In real cases, I'm sorry, + +610 +00:48:37,110 --> 00:48:40,790 +in most cases, we assume this difference is zero. + +611 +00:48:42,030 --> 00:48:45,110 +In some cases, it might be, for example, it's + +612 +00:48:45,110 --> 00:48:51,440 +three, for example, or one. One makes sense. So we + +613 +00:48:51,440 --> 00:48:54,600 +have to plug one instead of zero if the difference + +614 +00:48:54,600 --> 00:48:57,840 +is one. But here the difference is zero, so it + +615 +00:48:57,840 --> 00:49:01,880 +should be zero. So minus zero. Divide by S squared + +616 +00:49:01,880 --> 00:49:07,680 +B, this amount, multiplied by one over N1 plus one + +617 +00:49:07,680 --> 00:49:11,080 +over N2. So one over 21 plus one over 25. That + +618 +00:49:11,080 --> 00:49:15,380 +will give 2.04. So your T statistic in this case + +619 +00:49:15,380 --> 00:49:17,860 +is 2.04. + +620 +00:49:20,840 --> 00:49:24,740 +as we mentioned since we are talking about two + +621 +00:49:24,740 --> 00:49:26,980 +-tiered tests there are three different approaches + +622 +00:49:26,980 --> 00:49:31,080 +for testing one is school a critical value + +623 +00:49:31,080 --> 00:49:37,360 +approach the other one confidence interval and the + +624 +00:49:37,360 --> 00:49:40,380 +last one is b value approach so let's see two of + +625 +00:49:40,380 --> 00:49:47,260 +these critical value approach keep in mind your + +626 +00:49:47,260 --> 00:49:54,730 +test statistic is 2.04 now since it's two tailed + +627 +00:49:54,730 --> 00:50:00,970 +test so you have two rejection regions T alpha + +628 +00:50:00,970 --> 00:50:06,070 +over 2 with degrees of freedom n1 plus n2 minus 2 + +629 +00:50:06,070 --> 00:50:14,210 +n1 is 21 n2 is 25 so your degrees of freedom 21 + +630 +00:50:14,210 --> 00:50:16,730 +plus 25 minus 2 this will give + +631 +00:50:20,160 --> 00:50:27,120 +43 degrees of freedom is 43 now look at the t + +632 +00:50:27,120 --> 00:50:32,200 +table 25 + +633 +00:50:32,200 --> 00:50:36,060 +so + +634 +00:50:36,060 --> 00:50:41,680 +it's 25 + +667 +00:53:20,730 --> 00:53:25,410 +statistic. The value is 2.04. As we mentioned, + +668 +00:53:25,510 --> 00:53:28,330 +there are two, three approaches for doing this + +669 +00:53:28,330 --> 00:53:32,390 +test. One is called critical value approach. Now, + +670 +00:53:32,750 --> 00:53:37,530 +critical value is plus or minus T alpha over 2 + +671 +00:53:37,530 --> 00:53:41,270 +with degrees of freedom 44. By the T table, we got + +672 +00:53:41,270 --> 00:53:46,880 +this result. So the critical regions are above 2 + +673 +00:53:46,880 --> 00:53:52,000 +.01 or below minus 2.01. Now your statistic falls + +674 +00:53:52,000 --> 00:53:54,960 +in this rejection region. So we have to reject the + +675 +00:53:54,960 --> 00:53:57,860 +null hypothesis. So my conclusion is there is + +676 +00:53:57,860 --> 00:54:00,920 +sufficient evidence to support the alternative + +677 +00:54:00,920 --> 00:54:01,640 +hypothesis. + +678 +00:54:05,100 --> 00:54:10,280 +The other approach, confidence interval for mu1 + +679 +00:54:10,280 --> 00:54:14,190 +minus mu2. Again, the formula is, he asks about, + +680 +00:54:14,570 --> 00:54:18,450 +since we reject the null hypothesis, so this + +681 +00:54:18,450 --> 00:54:21,310 +hypothesis is false, I mean the difference is not + +682 +00:54:21,310 --> 00:54:27,630 +zero. Can we be 95% confident that the mean of New + +683 +00:54:27,630 --> 00:54:32,050 +York Stock Exchange is greater than or less than? + +684 +00:54:32,810 --> 00:54:37,530 +Let's see. Let's formulate or let's construct a + +685 +00:54:37,530 --> 00:54:40,910 +confidence interval for mu1 minus mu2. This is + +686 +00:54:40,910 --> 00:54:45,790 +your formula. So x1 bar minus x2 bar, if you go + +687 +00:54:45,790 --> 00:54:49,770 +back a little bit to these two values, x1 bar is 3 + +688 +00:54:49,770 --> 00:54:59,000 +.27 minus x2 bar is 2.53. The difference is this + +689 +00:54:59,000 --> 00:55:04,060 +amount, 0.74, plus or minus T alpha over 2, the + +690 +00:55:04,060 --> 00:55:07,580 +critical value we have here, so plus or minus this + +691 +00:55:07,580 --> 00:55:12,940 +amount, times the standard error of this estimate, + +692 +00:55:14,240 --> 00:55:20,420 +you easily can compute this value by 0.3628, and + +693 +00:55:20,420 --> 00:55:24,360 +you will end with this interval. Now, this + +694 +00:55:24,360 --> 00:55:30,860 +interval means that We are 95% confident that the + +695 +00:55:30,860 --> 00:55:34,140 +difference between the two populations means fall + +696 +00:55:34,140 --> 00:55:37,960 +between these two values. Now the question is, + +697 +00:55:38,960 --> 00:55:44,260 +since we are testing mu1 minus mu2 equals zero, + +698 +00:55:46,140 --> 00:55:50,520 +does this interval contain zero or not? So the + +699 +00:55:50,520 --> 00:55:55,480 +question is, does your interval contain zero? + +700 +00:55:56,550 --> 00:56:01,010 +contains zero, zero star, this zero. Maybe it's + +701 +00:56:01,010 --> 00:56:05,310 +one, not zero, in this case it's zero. Now, this + +702 +00:56:05,310 --> 00:56:07,830 +interval, the lower bound is positive, the upper + +703 +00:56:07,830 --> 00:56:11,190 +bound is positive, it's also positive, so zero is + +704 +00:56:11,190 --> 00:56:17,510 +not inside the interval. So that means It's never + +705 +00:56:17,510 --> 00:56:21,190 +equal zero, so we reject the null hypothesis. So + +706 +00:56:21,190 --> 00:56:24,610 +since zero lies outside this interval, I mean the + +707 +00:56:24,610 --> 00:56:28,050 +confidence interval does not contain zero. That + +708 +00:56:28,050 --> 00:56:30,950 +means we have to reject the null hypothesis. So if + +709 +00:56:30,950 --> 00:56:36,750 +the rule of thumb is if the confidence interval, + +710 +00:56:37,590 --> 00:56:43,730 +in this case for mu1 minus mu2 contains zero. + +711 +00:56:48,110 --> 00:56:56,510 +then we don't reject we don't reject otherwise we + +712 +00:56:56,510 --> 00:57:01,810 +have to reject it's zero but be careful not always + +713 +00:57:01,810 --> 00:57:04,830 +zero here we are assuming the difference is zero + +714 +00:57:04,830 --> 00:57:08,010 +but the difference if the difference is one then + +715 +00:57:08,010 --> 00:57:12,310 +ask yourself is this interval contain one one but + +716 +00:57:12,310 --> 00:57:16,370 +in this case it's zero so the question is Is the + +717 +00:57:16,370 --> 00:57:20,390 +icon contains zero or not? Zero is outside, so we + +718 +00:57:20,390 --> 00:57:24,330 +reject analog icons. Now, do you think the mean of + +719 +00:57:24,330 --> 00:57:28,850 +New York stock is greater than Nasdaq or not? + +720 +00:57:30,990 --> 00:57:36,090 +Since the interval ends, I mean lower than other + +721 +00:57:36,090 --> 00:57:41,030 +bounds, are positive, Positive, positive, so that + +722 +00:57:41,030 --> 00:57:45,730 +means the mean 1 is greater than mean 2. So mean 1 + +723 +00:57:45,730 --> 00:57:49,350 +is means for New York is greater than the mean for + +724 +00:57:49,350 --> 00:57:52,830 +Ottawa. If the interval is negative, negative, + +725 +00:57:53,310 --> 00:57:57,850 +that means mean 1 is smaller than mean 2. If it's + +726 +00:57:57,850 --> 00:58:00,290 +positive, positive, then mean 1 is greater than. + +727 +00:58:00,610 --> 00:58:05,420 +If it's negative plus, then If the interval starts + +728 +00:58:05,420 --> 00:58:09,380 +from negative to positive, that means zero lies + +729 +00:58:09,380 --> 00:58:12,600 +inside the interval. So in this case, we don't + +730 +00:58:12,600 --> 00:58:17,400 +reject. So the only time we don't reject is zero. + +731 +00:58:18,830 --> 00:58:22,210 +The lower bound is negative and the upper bound is + +732 +00:58:22,210 --> 00:58:24,990 +positive. Because if you start for example from + +733 +00:58:24,990 --> 00:58:29,650 +minus one to two for example, in this case zero in + +734 +00:58:29,650 --> 00:58:32,030 +the interval, I mean the confidence interval + +735 +00:58:32,030 --> 00:58:35,990 +contains zero. In this case we don't reject. So + +736 +00:58:35,990 --> 00:58:39,770 +again, the only time you have to don't reject is + +737 +00:58:39,770 --> 00:58:42,990 +zero if the confidence starts from negative to + +738 +00:58:42,990 --> 00:58:46,590 +positive. Otherwise, you reject the null + +739 +00:58:46,590 --> 00:58:50,350 +hypothesis. So in this case, zero is less than the + +740 +00:58:50,350 --> 00:58:54,890 +entire interval, means outside the entire + +741 +00:58:54,890 --> 00:59:01,490 +interval. We can be 95% confident that the mean of + +742 +00:59:01,490 --> 00:59:03,870 +New York Stock Exchange is greater than the mean + +743 +00:59:03,870 --> 00:59:10,350 +of Big sense? Any questions? Next time we'll talk, + +744 +00:59:10,730 --> 00:59:12,810 +I will give the third approach, the B value + +745 +00:59:12,810 --> 00:59:17,270 +approach for conducting the hypothesis testing. + +746 +00:59:18,530 --> 00:59:22,210 +Any question? So that's all for today. diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/WhD1LW5lZxc_postprocess.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/WhD1LW5lZxc_postprocess.srt new file mode 100644 index 0000000000000000000000000000000000000000..f07dea034830ac1dfe5d0f6623ccaa337ea24f6a --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/WhD1LW5lZxc_postprocess.srt @@ -0,0 +1,2984 @@ +1 +00:00:06,000 --> 00:00:10,400 +Today, I will start chapter 10. + +2 +00:00:12,160 --> 00:00:16,940 +Chapter 10 talks about two sample tests and one + +3 +00:00:16,940 --> 00:00:24,720 +analysis of variance. In the last chapters, we had + +4 +00:00:24,720 --> 00:00:28,380 +already talked about one sample. + +5 +00:00:31,700 --> 00:00:34,240 +And also, we introduced how + +6 +00:00:41,910 --> 00:00:51,590 +In addition to that, we perform tests about Mu and + +7 +00:00:51,590 --> 00:00:59,310 +Y. In this chapter, we are going to generalize our + +8 +00:00:59,310 --> 00:01:03,870 +confidence interval and testing about two-sample + +9 +00:01:03,870 --> 00:01:08,600 +tests or more than two-sample tests. For two + +10 +00:01:08,600 --> 00:01:14,900 +samples, we are interested in the difference + +11 +00:01:14,900 --> 00:01:19,220 +between either two population means or two + +12 +00:01:19,220 --> 00:01:23,580 +population proportions. For example, suppose we + +13 +00:01:23,580 --> 00:01:29,440 +are teaching students and we have males and + +14 +00:01:29,440 --> 00:01:36,280 +females. And our goal is to test to see whether or + +15 +00:01:36,280 --> 00:01:40,470 +not there exists a significant difference between + +16 +00:01:40,470 --> 00:01:45,090 +scores of males and females. So in this case, we + +17 +00:01:45,090 --> 00:01:52,510 +have two populations, population A and population + +18 +00:01:52,510 --> 00:01:58,930 +B. And for example, suppose we select a random + +19 +00:01:58,930 --> 00:02:02,910 +sample from + +20 +00:02:02,910 --> 00:02:06,660 +the first population. And select another sample + +21 +00:02:06,660 --> 00:02:11,800 +from the second population. And the goal is how + +22 +00:02:11,800 --> 00:02:15,000 +can we construct a confidence interval for the + +23 +00:02:15,000 --> 00:02:17,920 +difference between mu1 and mu2. So we are talking + +24 +00:02:17,920 --> 00:02:21,700 +about mu1 minus mu2, or the difference between two + +25 +00:02:21,700 --> 00:02:26,240 +population proportions, pi1 minus pi2. + +26 +00:02:29,620 --> 00:02:33,280 +Sometimes there exists more than two populations. + +27 +00:02:34,490 --> 00:02:39,970 +And also, we can figure out if the means, in + +28 +00:02:39,970 --> 00:02:46,890 +general, are different or not. In this chapter, + +29 +00:02:47,030 --> 00:02:50,790 +we'll just talk about two sample tests. And we are + +30 +00:02:50,790 --> 00:02:54,370 +going to skip one-way analysis of variance. So + +31 +00:02:54,370 --> 00:03:00,510 +we'll explain in details confidence intervals and + +32 +00:03:00,510 --> 00:03:05,830 +hypothesis testing about two samples. For one-way + +33 +00:03:05,830 --> 00:03:08,830 +analysis of variance, that one will be discussed + +34 +00:03:08,830 --> 00:03:15,610 +in the SPSS course. The objectives for this + +35 +00:03:15,610 --> 00:03:20,910 +chapter are how to use hypothesis testing for + +36 +00:03:20,910 --> 00:03:25,930 +comparing the difference between the means of two + +37 +00:03:25,930 --> 00:03:29,890 +independent populations, the means of two related + +38 +00:03:29,890 --> 00:03:33,390 +populations. Let's see the difference between two + +39 +00:03:33,390 --> 00:03:41,160 +independent and two related populations. So the + +40 +00:03:41,160 --> 00:03:47,800 +two populations might be considered independent or + +41 +00:03:47,800 --> 00:03:52,040 +related. Related means dependent population. Now, + +42 +00:03:52,120 --> 00:03:56,520 +for independent populations, + +43 +00:03:57,560 --> 00:04:02,600 +in this case, sample one, does not affect the + +44 +00:04:02,600 --> 00:04:06,660 +results of sample. For this reason, we call it as + +45 +00:04:06,660 --> 00:04:09,660 +independent sample. So sample one does not affect + +46 +00:04:09,660 --> 00:04:13,240 +the results of sample two. For example, suppose as + +47 +00:04:13,240 --> 00:04:18,900 +we mentioned, we have females and + +48 +00:04:18,900 --> 00:04:23,230 +males. So here, the two groups or the two samples + +49 +00:04:23,230 --> 00:04:27,750 +are independent because a student's scores for + +50 +00:04:27,750 --> 00:04:31,650 +females, for example, does not affect score for + +51 +00:04:31,650 --> 00:04:36,370 +males. For this reason, they are independent. So + +52 +00:04:36,370 --> 00:04:40,950 +since we have two different groups, + +53 +00:04:42,150 --> 00:04:44,830 +Or two different samples. In this case, we have + +54 +00:04:44,830 --> 00:04:47,450 +independent populations or independent samples. + +55 +00:04:48,310 --> 00:04:52,550 +Now, for the two related populations, for example, + +56 +00:04:55,150 --> 00:05:00,970 +in this case, suppose we have 10 persons. + +57 +00:05:02,550 --> 00:05:10,110 +Person 1, suppose his or her weight 120 kilograms. + +58 +00:05:11,480 --> 00:05:15,780 +for the first item. The second item or second + +59 +00:05:15,780 --> 00:05:21,480 +person, his or her weight is 135 kilograms. The + +60 +00:05:21,480 --> 00:05:25,380 +third one, for example, 110. And suppose we have + +61 +00:05:25,380 --> 00:05:28,380 +10 persons. + +62 +00:05:29,700 --> 00:05:35,040 +Weight? Suppose these are weights in kilograms. + +63 +00:05:38,120 --> 00:05:41,540 +Now, suppose these people or these students or + +64 +00:05:41,540 --> 00:05:46,720 +whatever they are have + +65 +00:05:46,720 --> 00:05:51,640 +diet for a period of time. For example, suppose + +66 +00:05:51,640 --> 00:05:56,900 +they have diet for three months. So, currently + +67 +00:05:56,900 --> 00:06:02,560 +their weights are given by 120, 135 and so on. + +68 +00:06:06,560 --> 00:06:09,860 +They have diet for three months after that. Then + +69 +00:06:09,860 --> 00:06:13,760 +we measure their weights after three months. Now + +70 +00:06:13,760 --> 00:06:18,640 +suppose the first person, his weight was 120. For + +71 +00:06:18,640 --> 00:06:21,900 +example, his weight was in January 120. Now in + +72 +00:06:21,900 --> 00:06:22,220 +April. + +73 +00:06:25,260 --> 00:06:30,060 +For example, suppose 105, 105 kilograms. The + +74 +00:06:30,060 --> 00:06:34,440 +second person suppose 120. The third one suppose + +75 +00:06:34,440 --> 00:06:35,900 +95, and so on. + +76 +00:06:40,310 --> 00:06:45,890 +Now, in two cases, we have the same individuals, + +77 +00:06:46,330 --> 00:06:50,110 +the same persons. Person one, his weight was 120, + +78 +00:06:50,690 --> 00:06:54,950 +and after a specific period of time, his weight + +79 +00:06:54,950 --> 00:07:01,610 +becomes 105. So we have the same people, the same + +80 +00:07:01,610 --> 00:07:09,260 +individuals for both In this case, these two + +81 +00:07:09,260 --> 00:07:13,380 +samples are called related samples. + +82 +00:07:15,040 --> 00:07:20,220 +So for related samples, we have the same group. + +83 +00:07:21,720 --> 00:07:24,000 +But we have different, or I'm sorry, we have + +84 +00:07:24,000 --> 00:07:26,800 +repeated measures or repeated measurements. + +85 +00:07:28,520 --> 00:07:33,320 +Another example for related samples. Suppose some + +86 +00:07:33,320 --> 00:07:38,180 +patients have high blood pressure. And they are + +87 +00:07:38,180 --> 00:07:44,720 +using drug A. And we have some information about + +88 +00:07:44,720 --> 00:07:48,100 +their blood pressure. Suppose first person, his or + +89 +00:07:48,100 --> 00:07:53,140 +her weight, blood pressure 145. The second one + +90 +00:07:53,140 --> 00:07:54,520 +suppose 160 and so on. + +91 +00:07:59,900 --> 00:08:05,500 +So suppose these people, these patients, use + +92 +00:08:05,500 --> 00:08:09,900 +different drug, for example, drug B. And our goal + +93 +00:08:09,900 --> 00:08:14,240 +is to see if drug B is more effective to reduce + +94 +00:08:14,240 --> 00:08:18,040 +the blood pressure than using drug A. Suppose by + +95 +00:08:18,040 --> 00:08:21,560 +using drug B after, for example, three months, the + +96 +00:08:21,560 --> 00:08:24,320 +first person with high blood pressure, 145, + +97 +00:08:24,440 --> 00:08:27,460 +becomes, for example, 130. The other one, suppose + +98 +00:08:27,460 --> 00:08:33,700 +145, and so on. So here we have the same patients. + +99 +00:08:36,010 --> 00:08:42,110 +Patients took drug A, and after taking drug B for + +100 +00:08:42,110 --> 00:08:45,230 +three months, for example, their new measures are + +101 +00:08:45,230 --> 00:08:47,750 +given by one theory, one photograph, and so on. So + +102 +00:08:47,750 --> 00:08:50,710 +each person in this case has two values, or two + +103 +00:08:50,710 --> 00:08:55,550 +observations. One before using drug B, and the + +104 +00:08:55,550 --> 00:08:59,530 +other after. So if you have before and after, it + +105 +00:08:59,530 --> 00:09:05,070 +means we have related samples. So any problem has + +106 +00:09:05,070 --> 00:09:07,690 +before and after for the same people, the same + +107 +00:09:07,690 --> 00:09:10,830 +individual, in this case we have related samples. + +108 +00:09:11,710 --> 00:09:15,290 +So we have to distinguish between two independent + +109 +00:09:15,290 --> 00:09:21,510 +populations and two related samples. The other + +110 +00:09:21,510 --> 00:09:24,730 +objective, we are going to use hypothesis testing + +111 +00:09:24,730 --> 00:09:27,970 +for comparing the proportions of two independent + +112 +00:09:27,970 --> 00:09:32,270 +populations. That's all for this chapter. We are + +113 +00:09:32,270 --> 00:09:35,510 +going to skip the variances of two independent + +114 +00:09:35,510 --> 00:09:41,610 +populations. So this one will be skipped. How to + +115 +00:09:41,610 --> 00:09:45,250 +use one-way analysis of variance ANOVA to test for + +116 +00:09:45,250 --> 00:09:50,130 +differences among. Among this case means between + +117 +00:09:50,130 --> 00:09:54,310 +more than two populations. So among means more + +118 +00:09:54,310 --> 00:09:58,000 +than two. So here we are going also to skip + +119 +00:09:58,000 --> 00:10:01,620 +analysis of variance as well as how to perform + +120 +00:10:01,620 --> 00:10:04,040 +multiple comparisons and when we analysis of + +121 +00:10:04,040 --> 00:10:08,500 +variance will be skipped. So mainly we are + +122 +00:10:08,500 --> 00:10:14,160 +focusing on the difference between two means and + +123 +00:10:14,160 --> 00:10:18,440 +two proportions, two means for independent and + +124 +00:10:18,440 --> 00:10:23,340 +related, and also we are going to cover hypothesis + +125 +00:10:23,340 --> 00:10:26,380 +testing for the difference between to population + +126 +00:10:26,380 --> 00:10:29,640 +proportions. That's all for this chapter. + +127 +00:10:33,220 --> 00:10:38,880 +So again, we are going to explain how can we + +128 +00:10:38,880 --> 00:10:42,440 +perform testing for two sample tests. In this + +129 +00:10:42,440 --> 00:10:47,420 +case, there are four cases. First, we'll discuss + +130 +00:10:47,420 --> 00:10:51,260 +hypothesis testing for the population means for + +131 +00:10:51,260 --> 00:10:54,870 +independent samples. So in this case, we have mean + +132 +00:10:54,870 --> 00:10:58,590 +one versus mean two. So that's for independent + +133 +00:10:58,590 --> 00:11:01,450 +samples. The other one also for the population + +134 +00:11:01,450 --> 00:11:05,050 +means, but for related samples. By the way, + +135 +00:11:05,150 --> 00:11:07,090 +sometimes it's called a pair. + +136 +00:11:11,810 --> 00:11:18,230 +Samples. The word pair comes from we have two + +137 +00:11:18,230 --> 00:11:19,570 +values for the same independent + +138 +00:11:23,970 --> 00:11:28,350 +After that, it becomes 110, for example. So it's + +139 +00:11:28,350 --> 00:11:29,610 +called pair. + +140 +00:11:35,150 --> 00:11:40,450 +So in this case, we have semigroup before versus + +141 +00:11:40,450 --> 00:11:41,750 +after treatment. + +142 +00:11:44,410 --> 00:11:47,110 +The other type of two-sample test is population + +143 +00:11:47,110 --> 00:11:49,530 +proportions. In this case, we are going to test to + +144 +00:11:49,530 --> 00:11:52,270 +see if there is a difference between two + +145 +00:11:52,270 --> 00:11:57,050 +population proportions. The other type of two + +146 +00:11:57,050 --> 00:12:00,030 +-sample test, population variances, variance one + +147 +00:12:00,030 --> 00:12:04,010 +against variance two. As we mentioned, we are + +148 +00:12:04,010 --> 00:12:07,450 +going to cover population means for independent + +149 +00:12:07,450 --> 00:12:11,110 +and related samples, as well as the population + +150 +00:12:11,110 --> 00:12:16,100 +proportion. That's all, again, for this chapter. + +151 +00:12:18,300 --> 00:12:22,260 +We have to start with the sampling distribution + +152 +00:12:22,260 --> 00:12:26,220 +actually of the difference between two population + +153 +00:12:26,220 --> 00:12:32,580 +means. In chapter seven, we talked about sampling + +154 +00:12:32,580 --> 00:12:39,760 +distribution for + +155 +00:12:39,760 --> 00:12:41,620 +X bar. + +156 +00:12:45,430 --> 00:12:54,310 +And we said that X bar is a point estimate for Mu. + +157 +00:12:55,730 --> 00:13:00,130 +Now in this chapter we are interested for the + +158 +00:13:00,130 --> 00:13:03,090 +difference between two population means. That + +159 +00:13:03,090 --> 00:13:08,770 +means if we have Mu1 minus Mu2, in this case the + +160 +00:13:08,770 --> 00:13:12,850 +point estimate of this difference should be X1 + +161 +00:13:12,850 --> 00:13:17,970 +bar. minus x2. So x1 bar minus x2 bar is a point + +162 +00:13:17,970 --> 00:13:22,570 +estimate for the difference mu1 minus mu2. So this + +163 +00:13:22,570 --> 00:13:28,150 +one is a point estimate for + +164 +00:13:28,150 --> 00:13:36,910 +mu1 minus mu2. It's clear that x2 bar minus x1 bar + +165 +00:13:36,910 --> 00:13:45,310 +is a point estimate of or for mu2 minus mu1. So + +166 +00:13:45,310 --> 00:13:51,170 +that's the point estimate of mu1 minus mu2 or mu2 + +167 +00:13:51,170 --> 00:13:55,510 +minus mu1 just x1 minus x2 bar is the point + +168 +00:13:55,510 --> 00:14:00,690 +estimate for mu1 minus mu2. So again, our goal is + +169 +00:14:00,690 --> 00:14:05,090 +to test hypotheses of form or construct confidence + +170 +00:14:05,090 --> 00:14:07,650 +interval for the difference between two + +171 +00:14:07,650 --> 00:14:10,710 +populations means mu1 minus mu2. And the point + +172 +00:14:10,710 --> 00:14:16,070 +estimate for this difference is X1 bar minus X2 + +173 +00:14:16,070 --> 00:14:16,330 +bar. + +174 +00:14:20,010 --> 00:14:24,930 +In this case, we have two cases. One is called + +175 +00:14:24,930 --> 00:14:27,910 +independent or unrelated, the same meaning. + +176 +00:14:28,090 --> 00:14:32,130 +Unrelated means independent populations. In this + +177 +00:14:32,130 --> 00:14:34,670 +case, as we mentioned, samples selected from one + +178 +00:14:34,670 --> 00:14:38,870 +population. has no effect actually on the sample + +179 +00:14:38,870 --> 00:14:41,850 +selected from the other population. As we + +180 +00:14:41,850 --> 00:14:45,050 +mentioned, there are two groups, males and + +181 +00:14:45,050 --> 00:14:50,390 +females. So group one does not affect group two. I + +182 +00:14:50,390 --> 00:14:54,370 +mean males population does not affect the + +183 +00:14:54,370 --> 00:14:56,910 +population of the female. So in this case, we have + +184 +00:14:56,910 --> 00:15:00,250 +unrelated or independent populations. + +185 +00:15:01,610 --> 00:15:06,570 +In this case, there are two scenarios. Sigma is + +186 +00:15:06,570 --> 00:15:12,390 +unknown, but we assume they are equal. So sigma 1 + +187 +00:15:12,390 --> 00:15:14,750 +and sigma 2 are unknown, but we assume they are + +188 +00:15:14,750 --> 00:15:19,830 +equal. In this case, we are going to use something + +189 +00:15:19,830 --> 00:15:25,010 +called pooled variance test. The other scenario, + +190 +00:15:25,410 --> 00:15:29,990 +if the two sigma is unknown, but They are not + +191 +00:15:29,990 --> 00:15:31,890 +equal. In this case, we are going to something + +192 +00:15:31,890 --> 00:15:37,870 +called separate variance thickness. So in this + +193 +00:15:37,870 --> 00:15:41,710 +chapter, we focus just on unknown sigmas. Because + +194 +00:15:41,710 --> 00:15:46,150 +in real life, population variances are unknown. So + +195 +00:15:46,150 --> 00:15:50,610 +we have to focus on this case. I mean, sigmas are + +196 +00:15:50,610 --> 00:15:54,530 +unknown, but maybe we assume they are equal, or we + +197 +00:15:54,530 --> 00:15:56,750 +assume they are not equal. + +198 +00:16:00,020 --> 00:16:04,000 +Now, the hypothesis test in this case, if we are + +199 +00:16:04,000 --> 00:16:08,280 +talking about two population means, are, again, + +200 +00:16:09,120 --> 00:16:11,620 +two-tailed test, the same as we discussed before, + +201 +00:16:12,080 --> 00:16:15,580 +or one-tailed test, and one tail has two cases, + +202 +00:16:15,780 --> 00:16:19,800 +lower tail and upper tail. For example, if we are + +203 +00:16:19,800 --> 00:16:23,920 +interested in two-tailed test, we have to state + +204 +00:16:27,240 --> 00:16:31,620 +So the null hypothesis should be mu1 equals mu2, + +205 +00:16:31,740 --> 00:16:34,900 +which means there is no difference between the two + +206 +00:16:34,900 --> 00:16:38,680 +variations. In this case, it means that the + +207 +00:16:38,680 --> 00:16:42,440 +difference equals zero. So either you can state + +208 +00:16:42,440 --> 00:16:46,590 +your null hypothesis by using this way. Mi1 equals + +209 +00:16:46,590 --> 00:16:49,610 +Mi2, or the other way, the difference is zero. + +210 +00:16:49,950 --> 00:16:52,110 +They are equivalent. Because if the means are + +211 +00:16:52,110 --> 00:16:55,590 +equal, it means the difference is nothing, is + +212 +00:16:55,590 --> 00:16:59,530 +zero. Against the alternative hypothesis, Mi1 does + +213 +00:16:59,530 --> 00:17:04,490 +not equal Mi2, or the difference is not zero. So + +214 +00:17:04,490 --> 00:17:08,510 +you may use the other format, maybe it's better, + +215 +00:17:08,730 --> 00:17:11,530 +Mi1 minus Mi2 equals zero, against the difference + +216 +00:17:11,530 --> 00:17:14,170 +is not zero, or the other one, either one is + +217 +00:17:14,170 --> 00:17:18,190 +equal. Now, for the one-tailed test, there are two + +218 +00:17:18,190 --> 00:17:22,710 +cases, lower tail or upper tail. Lower tail means + +219 +00:17:22,710 --> 00:17:28,270 +under the alternative hypothesis, each one, mu1 is + +220 +00:17:28,270 --> 00:17:31,830 +smaller than mu2. It's called lower test. So if + +221 +00:17:31,830 --> 00:17:36,070 +mu1 is smaller than mu2, that means the difference + +222 +00:17:36,070 --> 00:17:40,030 +between the two populations is negative. Because + +223 +00:17:40,030 --> 00:17:42,790 +here mu1 is smaller than mu2, it means the + +224 +00:17:42,790 --> 00:17:46,490 +difference between these two is negative. And as + +225 +00:17:46,490 --> 00:17:49,990 +we mentioned before, H1 is the opposite of H0. So + +226 +00:17:49,990 --> 00:17:55,300 +if H1 Me1 is smaller than Me2, it means under the + +227 +00:17:55,300 --> 00:17:59,200 +null hypothesis, mu1 is greater than or equal to + +228 +00:17:59,200 --> 00:18:02,340 +mu. And as we mentioned before, the equal sign + +229 +00:18:02,340 --> 00:18:07,360 +appears only under the null hypothesis. So here + +230 +00:18:07,360 --> 00:18:11,280 +for the two-sided test, the equality here appears + +231 +00:18:11,280 --> 00:18:15,060 +just on the null hypothesis, as well as for lower + +232 +00:18:15,060 --> 00:18:18,760 +and upper tail test. For the upper tail test, + +233 +00:18:20,150 --> 00:18:22,970 +Again here, we have U1 is greater than U2. It + +234 +00:18:22,970 --> 00:18:26,890 +means the difference between these two populations + +235 +00:18:26,890 --> 00:18:30,570 +is above zero, greater than zero. So that's the + +236 +00:18:30,570 --> 00:18:35,610 +new scheme for formulating or stating null and + +237 +00:18:35,610 --> 00:18:39,510 +alternative hypotheses. It's quite similar to the + +238 +00:18:39,510 --> 00:18:42,490 +one we had discussed in chapter nine. Any + +239 +00:18:42,490 --> 00:18:47,300 +question? So this is step number one for doing or + +240 +00:18:47,300 --> 00:18:52,900 +performing statistical hypothesis testing. So + +241 +00:18:52,900 --> 00:18:56,020 +again, there are two types of tests. One is two + +242 +00:18:56,020 --> 00:19:00,500 +-tailed. in this case there is no direction you + +243 +00:19:00,500 --> 00:19:02,660 +don't know the exact direction of the two + +244 +00:19:02,660 --> 00:19:05,480 +population means you just say there is a + +245 +00:19:05,480 --> 00:19:08,620 +difference between the two population means in the + +246 +00:19:08,620 --> 00:19:11,580 +other two cases you know the exact direction you + +247 +00:19:11,580 --> 00:19:18,140 +may say that population mean for a it's smaller or + +248 +00:19:18,140 --> 00:19:22,620 +less than or decrease from The other one, here + +249 +00:19:22,620 --> 00:19:27,280 +population A is larger or increased or whatever it + +250 +00:19:27,280 --> 00:19:31,180 +is. So we have null hypothesis, informative + +251 +00:19:31,180 --> 00:19:34,620 +hypothesis, maybe two-tailed or one-tailed test. + +252 +00:19:34,680 --> 00:19:38,760 +It depends on the nature of the problem itself. + +253 +00:19:40,880 --> 00:19:44,440 +Now what's about the rejection regions? Similar as + +254 +00:19:44,440 --> 00:19:47,760 +we discussed before, if we are talking about two + +255 +00:19:47,760 --> 00:19:50,360 +-tailed test, In this case, there are two + +256 +00:19:50,360 --> 00:19:54,340 +rejection regions, one to the right of alpha over + +257 +00:19:54,340 --> 00:19:56,940 +2 and the other to the left of the other side of + +258 +00:19:56,940 --> 00:20:01,340 +alpha over 2. But here we have T alpha over 2 and + +259 +00:20:01,340 --> 00:20:04,420 +minus T alpha over 2. And again, we are focusing + +260 +00:20:04,420 --> 00:20:08,280 +on unknown sigmas. So we have to use T critical + +261 +00:20:08,280 --> 00:20:12,100 +values. So we reject the null hypothesis the same + +262 +00:20:12,100 --> 00:20:17,160 +as we mentioned before if the test statistic falls + +263 +00:20:17,160 --> 00:20:19,920 +in the rejection regions. In this case, if this + +264 +00:20:19,920 --> 00:20:25,060 +statistic lies in this region or the other one, we + +265 +00:20:25,060 --> 00:20:29,070 +have to reject them. That means if we reject the + +266 +00:20:29,070 --> 00:20:32,910 +hypothesis, if T stat is less than negative T + +267 +00:20:32,910 --> 00:20:37,610 +alpha over 2, or if T stat is above or greater + +268 +00:20:37,610 --> 00:20:41,070 +than T alpha over 2. So the same as we discussed + +269 +00:20:41,070 --> 00:20:44,350 +before. That's for two-tailed test. Now for lower + +270 +00:20:44,350 --> 00:20:47,270 +-tailed test, in this case, there is only one + +271 +00:20:47,270 --> 00:20:51,370 +rejection region to the left side. It's minus T + +272 +00:20:51,370 --> 00:20:54,260 +alpha. In this case, we reject the null hypothesis + +273 +00:20:54,260 --> 00:20:57,580 +if the value of the statistic or the test + +274 +00:20:57,580 --> 00:21:01,140 +statistic is smaller than negative T alpha. So we + +275 +00:21:01,140 --> 00:21:05,740 +reject if T stat is smaller than minus T alpha. On + +276 +00:21:05,740 --> 00:21:08,380 +the other side, if we are talking about a partial + +277 +00:21:08,380 --> 00:21:12,380 +test. So your null hypothesis, I'm sorry, your + +278 +00:21:12,380 --> 00:21:16,610 +alternative hypothesis always Look at the + +279 +00:21:16,610 --> 00:21:20,250 +alternative hypothesis in order to determine the + +280 +00:21:20,250 --> 00:21:24,070 +rejection region. So if it is greater than, it + +281 +00:21:24,070 --> 00:21:26,810 +means you have the area to the right. I mean the + +282 +00:21:26,810 --> 00:21:29,870 +rejection region should be to the right. If the + +283 +00:21:29,870 --> 00:21:34,230 +alternative hypothesis is negative, I mean smaller + +284 +00:21:34,230 --> 00:21:37,570 +than zero, it means the rejection region should be + +285 +00:21:37,570 --> 00:21:41,680 +to the left side. So here, the alternative + +286 +00:21:41,680 --> 00:21:44,480 +hypothesis, mu1 minus mu2, the difference is + +287 +00:21:44,480 --> 00:21:47,580 +positive. That means the rejection region is to + +288 +00:21:47,580 --> 00:21:50,280 +the right side. So we reject the null hypothesis + +289 +00:21:50,280 --> 00:21:54,340 +if T statistic is greater than T alpha. But here, + +290 +00:21:54,400 --> 00:21:58,700 +for the two-sided test or two-tailed test, they + +291 +00:21:58,700 --> 00:22:01,000 +are two regions. I mean, they are two rejection + +292 +00:22:01,000 --> 00:22:04,060 +regions because there is no direction under the + +293 +00:22:04,060 --> 00:22:06,940 +alternative hypothesis. So alpha should be split + +294 +00:22:06,940 --> 00:22:10,420 +in half. So alpha over two to the right and alpha + +295 +00:22:10,420 --> 00:22:13,680 +over two to the left side. So this scheme actually + +296 +00:22:13,680 --> 00:22:20,380 +mimics the same or similar to what we have + +297 +00:22:20,380 --> 00:22:25,880 +discussed in chapter one. Any questions? So again, + +298 +00:22:26,880 --> 00:22:32,680 +we have to formulate or state carefully null and + +299 +00:22:32,680 --> 00:22:36,420 +alternate hypothesis for both cases two and one + +300 +00:22:36,420 --> 00:22:41,890 +-tailed test. And the rejection regions, I think, + +301 +00:22:42,630 --> 00:22:46,270 +is straightforward. Now let's see what are the + +302 +00:22:46,270 --> 00:22:50,250 +assumptions in this case. If the two sigmas are + +303 +00:22:50,250 --> 00:22:54,710 +unknown, and we assume they are equal. So we + +304 +00:22:54,710 --> 00:22:59,790 +assume both sigmas are unknown. I mean, both + +305 +00:22:59,790 --> 00:23:02,810 +population standard deviations are unknown. And we + +306 +00:23:02,810 --> 00:23:06,490 +assume they are equal. The assumptions are. First, + +307 +00:23:06,650 --> 00:23:10,130 +samples should be drawn randomly and + +308 +00:23:10,130 --> 00:23:13,330 +independently. So samples are randomly and + +309 +00:23:13,330 --> 00:23:15,850 +independently drawn. So we have to select random + +310 +00:23:15,850 --> 00:23:20,250 +samples and they are independent. Assumption + +311 +00:23:20,250 --> 00:23:23,130 +number one. The second one, populations are + +312 +00:23:23,130 --> 00:23:27,070 +normally distributed. So we have to assume the + +313 +00:23:27,070 --> 00:23:31,650 +population is normal or both sample sizes are at + +314 +00:23:31,650 --> 00:23:36,250 +least 30. So, in order to apply the central + +315 +00:23:36,250 --> 00:23:39,270 +interface, so similar to the one we had discussed, + +316 +00:23:40,210 --> 00:23:43,830 +so here either the populations, I mean both of + +317 +00:23:43,830 --> 00:23:46,510 +them, normally distributed, abnormally + +318 +00:23:46,510 --> 00:23:51,030 +distributed, or both ends, or both sample sizes, + +319 +00:23:51,370 --> 00:23:57,290 +greater than 30, greater than or equal to, so at + +320 +00:23:57,290 --> 00:24:02,740 +least In addition to that, we have to assume that + +321 +00:24:02,740 --> 00:24:06,660 +population variances are unknown, but we assume + +322 +00:24:06,660 --> 00:24:11,460 +they are equal. So the assumptions are samples are + +323 +00:24:11,460 --> 00:24:16,400 +randomly selected and independent, populations are + +324 +00:24:16,400 --> 00:24:19,760 +normally distributed, or the sample sizes are + +325 +00:24:19,760 --> 00:24:22,960 +large enough in order to apply the central limit + +326 +00:24:22,960 --> 00:24:26,320 +theorem. In addition to that, population balances + +327 +00:24:26,320 --> 00:24:30,540 +are unknown, but we assume to be equal. These are + +328 +00:24:30,540 --> 00:24:36,560 +the classical assumptions for performing a t-test, + +329 +00:24:37,120 --> 00:24:39,480 +when sigma 1 and sigma 2 are unknown, but we + +330 +00:24:39,480 --> 00:24:43,860 +assume they are equal. Any questions? + +331 +00:24:46,820 --> 00:24:51,060 +Next, let's see how can we state the test + +332 +00:24:51,060 --> 00:24:55,690 +statistic. Again, we are talking about testing for + +333 +00:24:55,690 --> 00:25:00,010 +the difference between mu1 and mu2, so hypothesis + +334 +00:25:00,010 --> 00:25:04,590 +for mu1 minus mu2 with both sigmas, sigma1 and + +335 +00:25:04,590 --> 00:25:06,830 +sigma2 unknown and assumed equal. + +336 +00:25:11,050 --> 00:25:15,910 +The test statistic in this case is similar to the + +337 +00:25:15,910 --> 00:25:19,150 +one we discussed, but There is a little difference + +338 +00:25:19,150 --> 00:25:24,510 +in these two. The first one was this statistic. It + +339 +00:25:24,510 --> 00:25:27,870 +was x bar minus the mean divided by s over root n. + +340 +00:25:30,330 --> 00:25:35,390 +That's okay if we are testing for if 0, mu equal, + +341 +00:25:35,610 --> 00:25:39,550 +for example, any value. Three or four equivalents. + +342 +00:25:41,190 --> 00:25:43,590 +Here we are talking about the difference + +343 +00:25:43,590 --> 00:25:49,400 +confident, sorry, testing or test. for Mu 1 minus + +344 +00:25:49,400 --> 00:25:49,600 +Mu. + +345 +00:25:52,500 --> 00:25:58,440 +So my D-set step equals. + +346 +00:26:02,860 --> 00:26:06,020 +For one sample, we have only point is symmetric. + +347 +00:26:06,120 --> 00:26:10,420 +X1 is a point is symmetric for Mu. But for when we + +348 +00:26:10,420 --> 00:26:13,900 +are talking about The difference between two + +349 +00:26:13,900 --> 00:26:18,160 +populations means the point estimate is x1 bar + +350 +00:26:18,160 --> 00:26:22,780 +minus x2 bar. So here I should have x1 bar minus + +351 +00:26:22,780 --> 00:26:28,500 +x2 bar. So this is the first term in this formula, + +352 +00:26:28,680 --> 00:26:34,840 +minus. Here we have minus mu. But for the new + +353 +00:26:34,840 --> 00:26:41,780 +scenario, we have x0. Mu1 minus Mu2 equals zero. + +354 +00:26:42,280 --> 00:26:48,040 +So here, Mu1 minus Mu2. In most cases, we assume + +355 +00:26:48,040 --> 00:26:53,260 +the population means are under zero. There is no + +356 +00:26:53,260 --> 00:26:55,680 +difference between these two population means. So + +357 +00:26:55,680 --> 00:26:59,300 +we are assuming Mu1 minus Mu2 equals zero. So it + +358 +00:26:59,300 --> 00:27:04,540 +means this term cancels. If we assume there is no + +359 +00:27:04,540 --> 00:27:08,300 +difference between these two population means, In + +360 +00:27:08,300 --> 00:27:14,200 +some cases, might be the difference between these + +361 +00:27:14,200 --> 00:27:17,760 +two equal, for example, A, and A is just a + +362 +00:27:17,760 --> 00:27:24,100 +constant. In this case, you have to plug A instead + +363 +00:27:24,100 --> 00:27:30,480 +of mu1 minus mu2. But most of the cases will have + +364 +00:27:30,480 --> 00:27:35,520 +this classical one, the difference is zero. Divide + +365 +00:27:35,520 --> 00:27:40,740 +by, this is the new term in this chapter, divide + +366 +00:27:40,740 --> 00:27:43,840 +by the standard error of the estimate. + +367 +00:27:48,000 --> 00:27:50,540 +Because here, if we go back a little bit to the T + +368 +00:27:50,540 --> 00:27:56,080 +statistic, it's X bar minus mu divided by S over + +369 +00:27:56,080 --> 00:27:58,980 +square root of N is the standard error of X bar. + +370 +00:28:02,990 --> 00:28:06,550 +The same here, we have, sorry, standard error of + +371 +00:28:06,550 --> 00:28:10,750 +this estimate. So the new term is how can we find + +372 +00:28:10,750 --> 00:28:15,430 +the standard error X1 bar minus X2 bar. This one + +373 +00:28:15,430 --> 00:28:21,350 +is given by square root of S square B multiplied + +374 +00:28:21,350 --> 00:28:25,480 +by 1 over N1 plus 1 over N2. S squared B is called + +375 +00:28:25,480 --> 00:28:28,740 +the Bolt variance. And the Bolt variance is given + +376 +00:28:28,740 --> 00:28:34,340 +by this four equation. So first of all, we have to + +377 +00:28:34,340 --> 00:28:42,060 +compute the Bolt variance by using this equation, + +378 +00:28:42,240 --> 00:28:50,300 +S squared B equals N1 minus 1 S1 squared N2 minus + +379 +00:28:50,300 --> 00:28:56,470 +1 S2 squared divided by N1 minus 1 plus N2 minus + +380 +00:28:56,470 --> 00:29:02,050 +1. Now let's see if this makes sense or not, the + +381 +00:29:02,050 --> 00:29:04,910 +bold variance. Now, as we mentioned, there are two + +382 +00:29:04,910 --> 00:29:11,250 +samples. The first one has sample size of N1. The + +383 +00:29:11,250 --> 00:29:16,130 +other one has sample size of N2 with variances of + +384 +00:29:16,130 --> 00:29:19,650 +S1 squared and S2 squared respectively. So we have + +385 +00:29:19,650 --> 00:29:24,900 +two samples with sizes N1 and N2. Sigma is + +386 +00:29:24,900 --> 00:29:29,140 +unknown, but we know the sample variance for each. + +387 +00:29:30,260 --> 00:29:35,320 +Now suppose the two samples are mixed. Let's see + +388 +00:29:35,320 --> 00:29:37,600 +how can we find the pooled. It's called the + +389 +00:29:37,600 --> 00:29:43,280 +pooled. Sometimes called the weighted variance. + +390 +00:29:45,020 --> 00:29:49,080 +Look at this formula. N1 minus 1 squared plus N2 + +391 +00:29:49,080 --> 00:29:53,710 +minus 1 is 2 squared divided by N1 minus 1. plus + +392 +00:29:53,710 --> 00:30:00,950 +into minus one. We know that S squared is the sum + +393 +00:30:00,950 --> 00:30:06,650 +of X minus X bar divided by N minus one. That's if + +394 +00:30:06,650 --> 00:30:10,670 +we have only one sample. Now just cross + +395 +00:30:10,670 --> 00:30:16,070 +multiplication, we will get N minus one S squared + +396 +00:30:16,070 --> 00:30:21,230 +equals sum of X minus X bar squared. That's for + +397 +00:30:21,230 --> 00:30:27,080 +the first sample. What's about the second one? We + +398 +00:30:27,080 --> 00:30:32,660 +have two samples. So we can write for the first + +399 +00:30:32,660 --> 00:30:43,540 +one, N1 minus 1 S1 squared equals some X minus X + +400 +00:30:43,540 --> 00:30:47,720 +bar. This is for sample one. For the other sample, + +401 +00:30:48,060 --> 00:30:51,160 +we have the same equation but different data. So + +402 +00:30:51,160 --> 00:30:55,680 +we have S squared equals Y, for example, minus Y + +403 +00:30:55,680 --> 00:31:00,260 +bar divided by N2 minus 1. Now cross + +404 +00:31:00,260 --> 00:31:04,640 +multiplication will give N2 minus 1 is 2 squared + +405 +00:31:04,640 --> 00:31:08,780 +equals sum of Y minus Y bar squared. That's for + +406 +00:31:08,780 --> 00:31:09,760 +the second cell. + +407 +00:31:13,000 --> 00:31:19,000 +We are looking for standard error of the + +408 +00:31:19,000 --> 00:31:22,180 +difference between these two. So now the standard + +409 +00:31:22,180 --> 00:31:26,500 +error, or let's compute first, S squared B for + +410 +00:31:26,500 --> 00:31:30,220 +both. Now, S squared in general, as we mentioned, + +411 +00:31:30,820 --> 00:31:34,900 +is sum of X minus X bar squared divided by N minus + +412 +00:31:34,900 --> 00:31:41,200 +1. So here, we have the first sum plus the second + +413 +00:31:41,200 --> 00:31:50,770 +one divided by N minus 1 for the first. plus N2 + +414 +00:31:50,770 --> 00:31:58,450 +minus 1 for the second one. Now this sum equals N1 + +415 +00:31:58,450 --> 00:32:02,370 +minus 1 S1 squared. The second sum is N2 minus 1 + +416 +00:32:02,370 --> 00:32:07,710 +S2 squared divided by N1 minus 1 plus N2 minus 1. + +417 +00:32:09,870 --> 00:32:15,210 +So this is how this equation is formulated. So S + +418 +00:32:15,210 --> 00:32:18,210 +squared is called + +419 +00:32:26,330 --> 00:32:29,810 +Because as we mentioned in chapter three, the + +420 +00:32:29,810 --> 00:32:33,790 +variance, the definition of the variance is the + +421 +00:32:33,790 --> 00:32:34,290 +average + +422 +00:32:38,800 --> 00:32:42,040 +S squared is the average of the squared + +423 +00:32:42,040 --> 00:32:46,200 +differences around the mean. And this is the same + +424 +00:32:46,200 --> 00:32:49,820 +because here we have sum of x minus x bar plus the + +425 +00:32:49,820 --> 00:32:53,560 +other sum y minus y bar all divided by n1 minus 1 + +426 +00:32:53,560 --> 00:32:59,820 +plus n2 minus 1. This term can be written as n1 + +427 +00:32:59,820 --> 00:33:06,840 +plus n2 minus 2. So this is your whole variance. + +428 +00:33:10,660 --> 00:33:14,580 +Now, sum x minus x bar squared equal n1 minus 1 is + +429 +00:33:14,580 --> 00:33:15,060 +1 squared. + +430 +00:33:18,980 --> 00:33:23,700 +Yes, because we are looking for the difference or + +431 +00:33:23,700 --> 00:33:27,700 +the variance of this, the variance for x1 bar + +432 +00:33:27,700 --> 00:33:31,480 +minus x2 bar. This variance is variance of x1 bar. + +433 +00:33:32,830 --> 00:33:37,710 +plus variance of X bar. So we have to add this + +434 +00:33:37,710 --> 00:33:43,550 +value. But we are talking about the difference of + +435 +00:33:43,550 --> 00:33:51,530 +the mean standard error of X1 bar minus X bar. Now + +436 +00:33:51,530 --> 00:33:56,790 +what's the standard error of X bar? Square root + +437 +00:33:56,790 --> 00:34:01,120 +sigma over N or S square over N. if sigma is + +438 +00:34:01,120 --> 00:34:06,200 +unknown we should have s squared but here we are + +439 +00:34:06,200 --> 00:34:09,500 +looking for standard error of x1 bar minus x2 bar + +440 +00:34:09,500 --> 00:34:17,860 +so this equals and sigmas are unknown so we have s + +441 +00:34:17,860 --> 00:34:23,820 +squared b divided by or multiply this case by one + +442 +00:34:26,440 --> 00:34:29,700 +1 over N1 plus 1 over N2, because we have two + +443 +00:34:29,700 --> 00:34:34,720 +different samples. So now, the standard error of + +444 +00:34:34,720 --> 00:34:38,100 +the difference equals square root S square B + +445 +00:34:38,100 --> 00:34:42,260 +multiplied by 1 over N1 plus 1 over N2. So now, + +446 +00:34:42,820 --> 00:34:47,780 +the T set S6 becomes, again, we have X1 bar minus + +447 +00:34:47,780 --> 00:34:51,240 +X2 bar minus the difference between the two + +448 +00:34:51,240 --> 00:34:56,650 +population means, divided by this term, represents + +449 +00:34:56,650 --> 00:35:02,890 +the standard error of the estimate, this estimate. + +450 +00:35:04,990 --> 00:35:10,270 +Generally speaking, any statistic, for example, T + +451 +00:35:10,270 --> 00:35:15,150 +is estimate minus + +452 +00:35:15,150 --> 00:35:21,890 +hypothesized value divided + +453 +00:35:21,890 --> 00:35:27,320 +by the standard error of this estimate. And it is + +454 +00:35:27,320 --> 00:35:31,280 +statistical. If we are talking about one sample, + +455 +00:35:32,160 --> 00:35:38,460 +in this case, we have only one estimate, so it's X + +456 +00:35:38,460 --> 00:35:44,400 +bar minus a hypothesized value, Mu, standard error + +457 +00:35:44,400 --> 00:35:50,700 +of X bar, which is S over square root. That's for + +458 +00:35:50,700 --> 00:35:55,540 +one sample. Now, for two samples, what should we + +459 +00:35:55,540 --> 00:36:02,440 +have? We are talking about two symbols. Now the + +460 +00:36:02,440 --> 00:36:06,800 +estimate for the difference. So the difference is + +461 +00:36:06,800 --> 00:36:13,520 +x1 bar minus x2 bar minus the hypothesized value + +462 +00:36:13,520 --> 00:36:18,920 +under x0. We are assuming mu1 minus mu2 equals 0. + +463 +00:36:19,910 --> 00:36:24,810 +That's the general case. I mean this special case. + +464 +00:36:25,150 --> 00:36:29,150 +Sometimes suppose it's equal A or whatever it is. + +465 +00:36:29,510 --> 00:36:34,970 +We have to plug A here. Divide by the standard + +466 +00:36:34,970 --> 00:36:40,820 +error of this estimate. Now the standard of this + +467 +00:36:40,820 --> 00:36:44,480 +estimate equals this one. So we have to divide by + +468 +00:36:44,480 --> 00:36:48,680 +S squared B multiplied by 1 over N1 plus 1 over + +469 +00:36:48,680 --> 00:36:54,820 +N2. So this is your test statistic. Any question? + +470 +00:36:56,300 --> 00:37:02,440 +So again, this is the estimate of + +471 +00:37:02,440 --> 00:37:07,380 +the difference between U1 and U2. The other one is + +472 +00:37:07,380 --> 00:37:12,550 +the hypothesized value. In most cases, this + +473 +00:37:12,550 --> 00:37:17,910 +difference is zero. Divide by this amount is the + +474 +00:37:17,910 --> 00:37:19,710 +standard error of this estimate. + +475 +00:37:23,210 --> 00:37:27,350 +And the standard error is given by square root. It + +476 +00:37:27,350 --> 00:37:32,750 +looks like square root of S squared divided by N. + +477 +00:37:33,290 --> 00:37:37,190 +But in this case, we have two standard deviations, + +478 +00:37:37,670 --> 00:37:42,600 +so S1 squared over N1. plus S2 squared over N2. + +479 +00:37:42,900 --> 00:37:46,400 +But we are assuming that both sigmas are known and + +480 +00:37:46,400 --> 00:37:49,580 +we assume they are equal. So these two are the + +481 +00:37:49,580 --> 00:37:55,400 +same. So factor out. So here we have S squared + +482 +00:37:55,400 --> 00:38:00,440 +over N1 plus 1 over 2. And this one is called + +483 +00:38:00,440 --> 00:38:06,760 +square root. Any question? Basically, we are going + +484 +00:38:06,760 --> 00:38:11,340 +to use this statistic and the formula will be + +485 +00:38:11,340 --> 00:38:15,460 +given either the whole variance equation or the + +486 +00:38:15,460 --> 00:38:20,360 +other one. + +487 +00:38:32,240 --> 00:38:35,530 +Now what about the confidence interval? As we + +488 +00:38:35,530 --> 00:38:38,350 +mentioned before, any confidence interval can be + +489 +00:38:38,350 --> 00:38:42,350 +constructed by using general form, which is + +490 +00:38:42,350 --> 00:38:47,350 +estimate, I mean the point estimate, any + +491 +00:38:47,350 --> 00:38:53,070 +confidence interval. Estimate, plus or minus + +492 +00:38:53,070 --> 00:39:00,550 +critical value times standard error of your + +493 +00:39:00,550 --> 00:39:06,280 +estimate. That's in general. estimate or point + +494 +00:39:06,280 --> 00:39:10,840 +estimate plus or minus critical value times the + +495 +00:39:10,840 --> 00:39:15,400 +standard error of your estimate before we had + +496 +00:39:15,400 --> 00:39:21,620 +talked about confidence interval for mu so in that + +497 +00:39:21,620 --> 00:39:27,400 +case we have x bar plus or minus t then standard + +498 +00:39:27,400 --> 00:39:31,420 +error of this estimate which is s over root n + +499 +00:39:31,420 --> 00:39:37,520 +that's before Now we are talking about confidence + +500 +00:39:37,520 --> 00:39:48,560 +interval for mu1 minus mu2. Now my point estimate + +501 +00:39:48,560 --> 00:39:55,580 +of this difference is x1 bar minus x2 bar, plus or + +502 +00:39:55,580 --> 00:40:03,670 +minus. Critical value is T alpha over 2. since + +503 +00:40:03,670 --> 00:40:07,550 +sigma's are unknown times the standard error of + +504 +00:40:07,550 --> 00:40:14,910 +the estimate this value square root one over n one + +505 +00:40:14,910 --> 00:40:20,650 +plus one over n one this is your confidence + +506 +00:40:20,650 --> 00:40:26,390 +interval by the way this statistic as T + +507 +00:40:26,390 --> 00:40:30,910 +distribution with degrees of freedom equals N1 + +508 +00:40:30,910 --> 00:40:35,290 +plus N2 minus 2. Because for one population, when + +509 +00:40:35,290 --> 00:40:38,210 +we have one sample, your degrees of freedom is N + +510 +00:40:38,210 --> 00:40:41,730 +minus 1. If we have two populations and we + +511 +00:40:41,730 --> 00:40:44,950 +selected two random samples, your degrees of + +512 +00:40:44,950 --> 00:40:51,160 +freedom Is n1 plus minus 1 plus n2 minus 1. So at + +513 +00:40:51,160 --> 00:40:55,260 +least should be n1 plus n2 minus 2. So this + +514 +00:40:55,260 --> 00:40:59,780 +statistic has T distribution with degrees of + +515 +00:40:59,780 --> 00:41:04,260 +freedom n1 plus n2 minus 2. This is only if we + +516 +00:41:04,260 --> 00:41:09,860 +assume variances unknown, but they are equal. In + +517 +00:41:09,860 --> 00:41:12,760 +this case, your degrees of freedom, n1 plus n2 + +518 +00:41:12,760 --> 00:41:17,790 +minus 2. So that's for the testing and the + +519 +00:41:17,790 --> 00:41:21,370 +confidence interval approach. So if sigma's + +520 +00:41:21,370 --> 00:41:23,370 +unknown and they are equal, your confidence + +521 +00:41:23,370 --> 00:41:27,610 +interval is x1 bar minus x2 bar plus or minus z + +522 +00:41:27,610 --> 00:41:30,590 +alpha over two square root. That's going to be + +523 +00:41:30,590 --> 00:41:34,250 +multiplied by one over one plus one over one. If + +524 +00:41:34,250 --> 00:41:41,610 +we are talking about confidence for mu2 minus mu1, + +525 +00:41:42,650 --> 00:41:48,410 +We should have here x2 bar minus x1 bar plus or + +526 +00:41:48,410 --> 00:41:53,710 +minus the same amount. Because both plus doesn't + +527 +00:41:53,710 --> 00:41:57,950 +change if we start with 1 over n2 plus 1 over n1. + +528 +00:41:58,750 --> 00:42:03,430 +But this one should be x2 bar minus x1 bar if we + +529 +00:42:03,430 --> 00:42:06,310 +are talking about confidence interval for the + +530 +00:42:06,310 --> 00:42:12,720 +difference mu2 minus mu1. And that's all. Any + +531 +00:42:12,720 --> 00:42:22,900 +question? In general, + +532 +00:42:23,560 --> 00:42:28,020 +x1 bar and x2 bar are not equal. Because if you + +533 +00:42:28,020 --> 00:42:30,140 +have two populations and you select two different + +534 +00:42:30,140 --> 00:42:34,660 +samples, it makes sense that the two means are not + +535 +00:42:34,660 --> 00:42:38,310 +equal. But if they are equal, it means your + +536 +00:42:38,310 --> 00:42:42,570 +statistic is zero. And that's never happened in + +537 +00:42:42,570 --> 00:42:45,610 +the real life. Maybe close to zero, but not + +538 +00:42:45,610 --> 00:42:49,150 +exactly zero. Let's look at one example. + +539 +00:43:00,650 --> 00:43:04,570 +A straightforward example. You are a financial + +540 +00:43:04,570 --> 00:43:09,960 +analyst. for brokerage fair. Is there a difference + +541 +00:43:09,960 --> 00:43:16,760 +in dividend yield between stock listed on the New + +542 +00:43:16,760 --> 00:43:20,840 +York Stock Exchange and Nasdaq? You collect the + +543 +00:43:20,840 --> 00:43:23,640 +following data. So we have two data for two + +544 +00:43:23,640 --> 00:43:28,940 +different stocks. One for New York Stock Exchange + +545 +00:43:28,940 --> 00:43:33,310 +and other for Nasdaq. We have a random sample of + +546 +00:43:33,310 --> 00:43:36,550 +size 21 from the first one with standard deviation + +547 +00:43:36,550 --> 00:43:42,690 +1.3 and sample mean 3.17. The other sample gives + +548 +00:43:42,690 --> 00:43:46,730 +the following results. The random sample size + +549 +00:43:46,730 --> 00:43:51,350 +equals 25 with mean 2.53 and standard deviation 1 + +550 +00:43:51,350 --> 00:43:56,880 +.16. So this is the information we have. Sample + +551 +00:43:56,880 --> 00:44:01,180 +sizes for both sample means and sample standard + +552 +00:44:01,180 --> 00:44:05,060 +deviations. So that means population variances are + +553 +00:44:05,060 --> 00:44:10,140 +unknown. Assuming both populations are + +554 +00:44:10,140 --> 00:44:12,920 +approximately normal. We have to assume they are + +555 +00:44:12,920 --> 00:44:18,370 +normal. Because the sample sizes are less than 30. + +556 +00:44:18,890 --> 00:44:22,210 +In this case, if they are smaller than 30 and the + +557 +00:44:22,210 --> 00:44:25,090 +populations are not normal, we cannot use the T + +558 +00:44:25,090 --> 00:44:30,370 +-satisfaction. T is used only if populations are + +559 +00:44:30,370 --> 00:44:34,100 +approximately normal, abnormal, or informal. But + +560 +00:44:34,100 --> 00:44:37,520 +in this case, the two sizes are smaller than 30, + +561 +00:44:37,780 --> 00:44:41,460 +so we have to assume both populations are normally + +562 +00:44:41,460 --> 00:44:44,080 +distributed or approximately normally distributed, + +563 +00:44:44,800 --> 00:44:49,800 +or we have to use another test. So we're assuming + +564 +00:44:49,800 --> 00:44:54,700 +both are normal with equal variances. The question + +565 +00:44:54,700 --> 00:45:00,040 +is, is there a difference in meaning? So there is + +566 +00:45:00,040 --> 00:45:03,600 +no direction. Is there a difference? That means we + +567 +00:45:03,600 --> 00:45:08,080 +are testing mu1 equals mu2 against mu1 does not + +568 +00:45:08,080 --> 00:45:13,320 +equal mu2. So the null hypothesis, the difference + +569 +00:45:13,320 --> 00:45:16,640 +between these two is zero because it asks about is + +570 +00:45:16,640 --> 00:45:20,740 +there a difference here. So we assume there is no + +571 +00:45:20,740 --> 00:45:23,400 +difference. It means mu1 equals mu2 under the null + +572 +00:45:23,400 --> 00:45:27,960 +hypothesis. Against the alternative hypothesis, + +573 +00:45:28,560 --> 00:45:32,440 +mu1 minus mu2 is not zero. That means mu1 does not + +574 +00:45:32,440 --> 00:45:36,340 +equal mu2. So either you state the null by using + +575 +00:45:36,340 --> 00:45:41,080 +this way, mu1 minus mu2 equals zero, or mu1 equals + +576 +00:45:41,080 --> 00:45:47,360 +mu2. Now, before computing the test statistic, we + +577 +00:45:47,360 --> 00:45:54,020 +have to compute S squared B. For S squared B, this + +578 +00:45:54,020 --> 00:46:00,540 +is the equation we have. Now, N1 is 21, so 21 + +579 +00:46:00,540 --> 00:46:05,020 +minus 1, times S1 squared. We have the sample + +580 +00:46:05,020 --> 00:46:08,780 +standard deviation of 1.3 for the first sample, so + +581 +00:46:08,780 --> 00:46:14,720 +this quantity squared, plus N2 was 25, minus 1 + +582 +00:46:14,720 --> 00:46:19,480 +times S2 squared, 1.16 squared, divided by N1 + +583 +00:46:19,480 --> 00:46:21,060 +minus 1 plus N2 minus 1. + +584 +00:46:24,120 --> 00:46:28,260 +The sample, I'm sorry, the bold sample variance, + +585 +00:46:29,040 --> 00:46:35,020 +which is about 1.5. Now, you, in this case, after + +586 +00:46:35,020 --> 00:46:38,910 +computing a square B, Easily you can compute the + +587 +00:46:38,910 --> 00:46:41,470 +value of the test statistic by using this + +588 +00:46:41,470 --> 00:46:46,210 +equation. Now x1 bar minus x2 bar, x1 bar is 3.17 + +589 +00:46:46,210 --> 00:46:52,590 +minus x2 bar is 2.53 minus. Here we should have + +590 +00:46:52,590 --> 00:47:00,070 +mu1 minus mu2. Now under x0 because the test + +591 +00:47:00,070 --> 00:47:06,140 +statistic is computed only if x0 is true. So, + +592 +00:47:06,700 --> 00:47:13,560 +always, always, always, + +593 +00:47:14,540 --> 00:47:26,040 +we compute T statistic under H0 is true, always. + +594 +00:47:27,460 --> 00:47:30,320 +Otherwise, we cannot compute T statistic. + +595 +00:47:33,760 --> 00:47:37,580 +The rule is to compute the value of the statistic + +596 +00:47:37,580 --> 00:47:42,980 +if H1 is true. Let's see what will happen. Now, if + +597 +00:47:42,980 --> 00:47:48,640 +H1 is true, H1 mu1 minus mu does not equal zero. + +598 +00:47:48,920 --> 00:47:51,880 +So what's the value here? You don't know. Because + +599 +00:47:51,880 --> 00:47:54,200 +this difference is not zero. So what's the value? + +600 +00:47:54,620 --> 00:47:57,900 +I don't know. So you cannot determine the value of + +601 +00:47:57,900 --> 00:48:02,240 +the statistic under H1. But under H0, here we are + +602 +00:48:02,240 --> 00:48:05,950 +assuming The difference is zero, so this statistic + +603 +00:48:05,950 --> 00:48:10,410 +can be computed only if the null hypothesis is + +604 +00:48:10,410 --> 00:48:12,690 +true, otherwise you cannot compute this value. + +605 +00:48:13,430 --> 00:48:17,310 +Make sense? So maybe true and false problem asks + +606 +00:48:17,310 --> 00:48:21,810 +about we compute this statistic under if zero is + +607 +00:48:21,810 --> 00:48:23,190 +true. It's correct statement. + +608 +00:48:26,210 --> 00:48:29,150 +In this case, we're assuming that the difference + +609 +00:48:29,150 --> 00:48:36,830 +is zero, so minus zero. In real cases, I'm sorry, + +610 +00:48:37,110 --> 00:48:40,790 +in most cases, we assume this difference is zero. + +611 +00:48:42,030 --> 00:48:45,110 +In some cases, it might be, for example, it's + +612 +00:48:45,110 --> 00:48:51,440 +three, for example, or one. One makes sense. So we + +613 +00:48:51,440 --> 00:48:54,600 +have to plug one instead of zero if the difference + +614 +00:48:54,600 --> 00:48:57,840 +is one. But here the difference is zero, so it + +615 +00:48:57,840 --> 00:49:01,880 +should be zero. So minus zero. Divide by S squared + +616 +00:49:01,880 --> 00:49:07,680 +B, this amount, multiplied by one over N1 plus one + +617 +00:49:07,680 --> 00:49:11,080 +over N2. So one over 21 plus one over 25. That + +618 +00:49:11,080 --> 00:49:15,380 +will give 2.04. So your T statistic in this case + +619 +00:49:15,380 --> 00:49:17,860 +is 2.04. + +620 +00:49:20,840 --> 00:49:24,740 +as we mentioned since we are talking about two + +621 +00:49:24,740 --> 00:49:26,980 +-tiered tests there are three different approaches + +622 +00:49:26,980 --> 00:49:31,080 +for testing one is school a critical value + +623 +00:49:31,080 --> 00:49:37,360 +approach the other one confidence interval and the + +624 +00:49:37,360 --> 00:49:40,380 +last one is b value approach so let's see two of + +625 +00:49:40,380 --> 00:49:47,260 +these critical value approach keep in mind your + +626 +00:49:47,260 --> 00:49:54,730 +test statistic is 2.04 now since it's two tailed + +627 +00:49:54,730 --> 00:50:00,970 +test so you have two rejection regions T alpha + +628 +00:50:00,970 --> 00:50:06,070 +over 2 with degrees of freedom n1 plus n2 minus 2 + +629 +00:50:06,070 --> 00:50:14,210 +n1 is 21 n2 is 25 so your degrees of freedom 21 + +630 +00:50:14,210 --> 00:50:16,730 +plus 25 minus 2 this will give + +631 +00:50:20,160 --> 00:50:27,120 +43 degrees of freedom is 43 now look at the t + +632 +00:50:27,120 --> 00:50:32,200 +table 25 + +633 +00:50:32,200 --> 00:50:36,060 +so + +634 +00:50:36,060 --> 00:50:41,680 +it's 25 minus 2 so that will give 44 now look at + +635 +00:50:41,680 --> 00:50:46,220 +the normal sorry t table for degrees of freedom 44 + +636 +00:50:47,300 --> 00:50:50,940 +under area to the right side of since we are + +637 +00:50:50,940 --> 00:50:55,100 +talking about alpha 5% so the area to the left to + +638 +00:50:55,100 --> 00:50:58,340 +the right 0 to 5 the area to the left is the same + +639 +00:50:58,340 --> 00:51:05,340 +so we are looking in the upper tail for 0.025 and + +640 +00:51:05,340 --> 00:51:14,100 +this amount is 2.0154 + +641 +00:51:16,700 --> 00:51:21,980 +So this point, 2.1054, the other is negative 2 + +642 +00:51:21,980 --> 00:51:22,960 +.104. + +643 +00:51:25,500 --> 00:51:29,540 +So we reject the null hypothesis if your T + +644 +00:51:29,540 --> 00:51:33,000 +statistics fall in this registration. I mean, if T + +645 +00:51:33,000 --> 00:51:36,940 +statistic greater than 2.01 or smaller than + +646 +00:51:36,940 --> 00:51:41,620 +negative 2.01. In this case, my statistic value is + +647 +00:51:41,620 --> 00:51:46,890 +2.04. This amount actually, fourth in this + +648 +00:51:46,890 --> 00:51:51,250 +rejection region, so we reject them. So since he's + +649 +00:51:51,250 --> 00:51:57,590 +statistical, he's 10. which is equal to 2.04, is + +650 +00:51:57,590 --> 00:52:04,150 +greater than 2.01, then we reject the null + +651 +00:52:04,150 --> 00:52:09,130 +hypothesis. So the decision is reject zero at 5% + +652 +00:52:09,130 --> 00:52:11,830 +level of confidence of significance. Your + +653 +00:52:11,830 --> 00:52:15,610 +conclusion, there is evidence of a difference in + +654 +00:52:15,610 --> 00:52:21,350 +means. That means the mean of your stock exchange + +655 +00:52:21,350 --> 00:52:22,870 +does not equal + +656 +00:52:27,230 --> 00:52:30,290 +So that's how can we use the critical value + +657 +00:52:30,290 --> 00:52:32,670 +approach in order for testing the null hypothesis. + +658 +00:52:34,610 --> 00:52:38,730 +Any question? So one more time. + +659 +00:52:48,600 --> 00:52:52,880 +In New York State, it's changed with this + +660 +00:52:52,880 --> 00:52:57,780 +information and for NASDAQ. And we're assuming + +661 +00:52:57,780 --> 00:53:02,200 +here populations are normally distributed because + +662 +00:53:02,200 --> 00:53:06,460 +the sample sizes are less than 13. And we assume + +663 +00:53:06,460 --> 00:53:11,120 +sigmas are known, but they are equal. So the null + +664 +00:53:11,120 --> 00:53:13,540 +hypothesis is mu 1 minus 2 equals 0. Again, this + +665 +00:53:13,540 --> 00:53:18,150 +is not zero. We have to compute the bold variance + +666 +00:53:18,150 --> 00:53:20,730 +first, then we can compute the value of the test + +667 +00:53:20,730 --> 00:53:25,410 +statistic. The value is 2.04. As we mentioned, + +668 +00:53:25,510 --> 00:53:28,330 +there are two, three approaches for doing this + +669 +00:53:28,330 --> 00:53:32,390 +test. One is called critical value approach. Now, + +670 +00:53:32,750 --> 00:53:37,530 +critical value is plus or minus T alpha over 2 + +671 +00:53:37,530 --> 00:53:41,270 +with degrees of freedom 44. By the T table, we got + +672 +00:53:41,270 --> 00:53:46,880 +this result. So the critical regions are above 2 + +673 +00:53:46,880 --> 00:53:52,000 +.01 or below minus 2.01. Now your statistic falls + +674 +00:53:52,000 --> 00:53:54,960 +in this rejection region. So we have to reject the + +675 +00:53:54,960 --> 00:53:57,860 +null hypothesis. So my conclusion is there is + +676 +00:53:57,860 --> 00:54:00,920 +sufficient evidence to support the alternative + +677 +00:54:00,920 --> 00:54:01,640 +hypothesis. + +678 +00:54:05,100 --> 00:54:10,280 +The other approach, confidence interval for mu1 + +679 +00:54:10,280 --> 00:54:14,190 +minus mu2. Again, the formula is, he asks about, + +680 +00:54:14,570 --> 00:54:18,450 +since we reject the null hypothesis, so this + +681 +00:54:18,450 --> 00:54:21,310 +hypothesis is false, I mean the difference is not + +682 +00:54:21,310 --> 00:54:27,630 +zero. Can we be 95% confident that the mean of New + +683 +00:54:27,630 --> 00:54:32,050 +York Stock Exchange is greater than or less than? + +684 +00:54:32,810 --> 00:54:37,530 +Let's see. Let's formulate or let's construct a + +685 +00:54:37,530 --> 00:54:40,910 +confidence interval for mu1 minus mu2. This is + +686 +00:54:40,910 --> 00:54:45,790 +your formula. So x1 bar minus x2 bar, if you go + +687 +00:54:45,790 --> 00:54:49,770 +back a little bit to these two values, x1 bar is 3 + +688 +00:54:49,770 --> 00:54:59,000 +.27 minus x2 bar is 2.53. The difference is this + +689 +00:54:59,000 --> 00:55:04,060 +amount, 0.74, plus or minus T alpha over 2, the + +690 +00:55:04,060 --> 00:55:07,580 +critical value we have here, so plus or minus this + +691 +00:55:07,580 --> 00:55:12,940 +amount, times the standard error of this estimate, + +692 +00:55:14,240 --> 00:55:20,420 +you easily can compute this value by 0.3628, and + +693 +00:55:20,420 --> 00:55:24,360 +you will end with this interval. Now, this + +694 +00:55:24,360 --> 00:55:30,860 +interval means that We are 95% confident that the + +695 +00:55:30,860 --> 00:55:34,140 +difference between the two populations means fall + +696 +00:55:34,140 --> 00:55:37,960 +between these two values. Now the question is, + +697 +00:55:38,960 --> 00:55:44,260 +since we are testing mu1 minus mu2 equals zero, + +698 +00:55:46,140 --> 00:55:50,520 +does this interval contain zero or not? So the + +699 +00:55:50,520 --> 00:55:55,480 +question is, does your interval contain zero? + +700 +00:55:56,550 --> 00:56:01,010 +contains zero, zero star, this zero. Maybe it's + +701 +00:56:01,010 --> 00:56:05,310 +one, not zero, in this case it's zero. Now, this + +702 +00:56:05,310 --> 00:56:07,830 +interval, the lower bound is positive, the upper + +703 +00:56:07,830 --> 00:56:11,190 +bound is positive, it's also positive, so zero is + +704 +00:56:11,190 --> 00:56:17,510 +not inside the interval. So that means It's never + +705 +00:56:17,510 --> 00:56:21,190 +equal zero, so we reject the null hypothesis. So + +706 +00:56:21,190 --> 00:56:24,610 +since zero lies outside this interval, I mean the + +707 +00:56:24,610 --> 00:56:28,050 +confidence interval does not contain zero. That + +708 +00:56:28,050 --> 00:56:30,950 +means we have to reject the null hypothesis. So if + +709 +00:56:30,950 --> 00:56:36,750 +the rule of thumb is if the confidence interval, + +710 +00:56:37,590 --> 00:56:43,730 +in this case for mu1 minus mu2 contains zero. + +711 +00:56:48,110 --> 00:56:56,510 +then we don't reject we don't reject otherwise we + +712 +00:56:56,510 --> 00:57:01,810 +have to reject it's zero but be careful not always + +713 +00:57:01,810 --> 00:57:04,830 +zero here we are assuming the difference is zero + +714 +00:57:04,830 --> 00:57:08,010 +but the difference if the difference is one then + +715 +00:57:08,010 --> 00:57:12,310 +ask yourself is this interval contain one one but + +716 +00:57:12,310 --> 00:57:16,370 +in this case it's zero so the question is Is the + +717 +00:57:16,370 --> 00:57:20,390 +icon contains zero or not? Zero is outside, so we + +718 +00:57:20,390 --> 00:57:24,330 +reject analog icons. Now, do you think the mean of + +719 +00:57:24,330 --> 00:57:28,850 +New York stock is greater than Nasdaq or not? + +720 +00:57:30,990 --> 00:57:36,090 +Since the interval ends, I mean lower than other + +721 +00:57:36,090 --> 00:57:41,030 +bounds, are positive, Positive, positive, so that + +722 +00:57:41,030 --> 00:57:45,730 +means the mean 1 is greater than mean 2. So mean 1 + +723 +00:57:45,730 --> 00:57:49,350 +is means for New York is greater than the mean for + +724 +00:57:49,350 --> 00:57:52,830 +Ottawa. If the interval is negative, negative, + +725 +00:57:53,310 --> 00:57:57,850 +that means mean 1 is smaller than mean 2. If it's + +726 +00:57:57,850 --> 00:58:00,290 +positive, positive, then mean 1 is greater than. + +727 +00:58:00,610 --> 00:58:05,420 +If it's negative plus, then If the interval starts + +728 +00:58:05,420 --> 00:58:09,380 +from negative to positive, that means zero lies + +729 +00:58:09,380 --> 00:58:12,600 +inside the interval. So in this case, we don't + +730 +00:58:12,600 --> 00:58:17,400 +reject. So the only time we don't reject is zero. + +731 +00:58:18,830 --> 00:58:22,210 +The lower bound is negative and the upper bound is + +732 +00:58:22,210 --> 00:58:24,990 +positive. Because if you start for example from + +733 +00:58:24,990 --> 00:58:29,650 +minus one to two for example, in this case zero in + +734 +00:58:29,650 --> 00:58:32,030 +the interval, I mean the confidence interval + +735 +00:58:32,030 --> 00:58:35,990 +contains zero. In this case we don't reject. So + +736 +00:58:35,990 --> 00:58:39,770 +again, the only time you have to don't reject is + +737 +00:58:39,770 --> 00:58:42,990 +zero if the confidence starts from negative to + +738 +00:58:42,990 --> 00:58:46,590 +positive. Otherwise, you reject the null + +739 +00:58:46,590 --> 00:58:50,350 +hypothesis. So in this case, zero is less than the + +740 +00:58:50,350 --> 00:58:54,890 +entire interval, means outside the entire + +741 +00:58:54,890 --> 00:59:01,490 +interval. We can be 95% confident that the mean of + +742 +00:59:01,490 --> 00:59:03,870 +New York Stock Exchange is greater than the mean + +743 +00:59:03,870 --> 00:59:10,350 +of Big sense? Any questions? Next time we'll talk, + +744 +00:59:10,730 --> 00:59:12,810 +I will give the third approach, the B value + +745 +00:59:12,810 --> 00:59:17,270 +approach for conducting the hypothesis testing. + +746 +00:59:18,530 --> 00:59:22,210 +Any question? So that's all for today. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/WhD1LW5lZxc_raw.json b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/WhD1LW5lZxc_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..6c09fdb0f0dc4f441c216cdb442913fc52f48563 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/WhD1LW5lZxc_raw.json @@ -0,0 +1 @@ +{"segments": [{"id": 1, "seek": 3424, "start": 6.0, "end": 34.24, "text": " Today, I will start chapter 10. Chapter 10 talks about two sample tests and one analysis of variance. In the last chapters, we had already talked about one sample. And also, we introduced how", "tokens": [2692, 11, 286, 486, 722, 7187, 1266, 13, 18874, 1266, 6686, 466, 732, 6889, 6921, 293, 472, 5215, 295, 21977, 13, 682, 264, 1036, 20013, 11, 321, 632, 1217, 2825, 466, 472, 6889, 13, 400, 611, 11, 321, 7268, 577], "avg_logprob": -0.3176448112580834, "compression_ratio": 1.4014598540145986, "no_speech_prob": 0.0, "words": [{"start": 6.000000000000001, "end": 7.04, "word": " Today,", "probability": 0.5126953125}, {"start": 7.24, "end": 7.24, "word": " I", "probability": 0.412841796875}, {"start": 7.24, "end": 7.4, "word": " will", "probability": 0.3740234375}, {"start": 7.4, "end": 7.84, "word": " start", "probability": 0.90625}, {"start": 7.84, "end": 8.36, "word": " chapter", "probability": 0.5673828125}, {"start": 8.36, "end": 10.4, "word": " 10.", "probability": 0.438720703125}, {"start": 12.16, "end": 12.78, "word": " Chapter", "probability": 0.89599609375}, {"start": 12.78, "end": 13.08, "word": " 10", "probability": 0.9580078125}, {"start": 13.08, "end": 13.4, "word": " talks", "probability": 0.87353515625}, {"start": 13.4, "end": 14.28, "word": " about", "probability": 0.90869140625}, {"start": 14.28, "end": 15.6, "word": " two", "probability": 0.8740234375}, {"start": 15.6, "end": 15.96, "word": " sample", "probability": 0.67236328125}, {"start": 15.96, "end": 16.42, "word": " tests", "probability": 0.74267578125}, {"start": 16.42, "end": 16.76, "word": " and", "probability": 0.7900390625}, {"start": 16.76, "end": 16.94, "word": " one", "probability": 0.73486328125}, {"start": 16.94, "end": 17.56, "word": " analysis", "probability": 0.37158203125}, {"start": 17.56, "end": 18.48, "word": " of", "probability": 0.939453125}, {"start": 18.48, "end": 18.9, "word": " variance.", "probability": 0.412841796875}, {"start": 19.9, "end": 20.7, "word": " In", "probability": 0.9306640625}, {"start": 20.7, "end": 20.86, "word": " the", "probability": 0.92138671875}, {"start": 20.86, "end": 21.3, "word": " last", "probability": 0.87255859375}, {"start": 21.3, "end": 22.82, "word": " chapters,", "probability": 0.81884765625}, {"start": 23.78, "end": 24.3, "word": " we", "probability": 0.95849609375}, {"start": 24.3, "end": 24.72, "word": " had", "probability": 0.37939453125}, {"start": 24.72, "end": 25.16, "word": " already", "probability": 0.68603515625}, {"start": 25.16, "end": 25.46, "word": " talked", "probability": 0.8857421875}, {"start": 25.46, "end": 26.34, "word": " about", "probability": 0.91162109375}, {"start": 26.34, "end": 28.08, "word": " one", "probability": 0.85107421875}, {"start": 28.08, "end": 28.38, "word": " sample.", "probability": 0.857421875}, {"start": 31.7, "end": 32.58, "word": " And", "probability": 0.92578125}, {"start": 32.58, "end": 33.16, "word": " also,", "probability": 0.78857421875}, {"start": 33.26, "end": 33.38, "word": " we", "probability": 0.955078125}, {"start": 33.38, "end": 33.92, "word": " introduced", "probability": 0.76708984375}, {"start": 33.92, "end": 34.24, "word": " how", "probability": 0.85693359375}], "temperature": 1.0}, {"id": 2, "seek": 6606, "start": 41.91, "end": 66.07, "text": " In addition to that, we perform tests about Mu and Y. In this chapter, we are going to generalize our confidence interval and testing about two-sample tests or more than two-sample tests.", "tokens": [682, 4500, 281, 300, 11, 321, 2042, 6921, 466, 15601, 293, 398, 13, 682, 341, 7187, 11, 321, 366, 516, 281, 2674, 1125, 527, 6687, 15035, 293, 4997, 466, 732, 12, 19988, 781, 6921, 420, 544, 813, 732, 12, 19988, 781, 6921, 13], "avg_logprob": -0.2409446043047038, "compression_ratio": 1.4351145038167938, "no_speech_prob": 0.0, "words": [{"start": 41.91, "end": 42.73, "word": " In", "probability": 0.20458984375}, {"start": 42.73, "end": 43.55, "word": " addition", "probability": 0.8466796875}, {"start": 43.55, "end": 43.81, "word": " to", "probability": 0.9052734375}, {"start": 43.81, "end": 44.11, "word": " that,", "probability": 0.85009765625}, {"start": 44.31, "end": 44.47, "word": " we", "probability": 0.93017578125}, {"start": 44.47, "end": 45.05, "word": " perform", "probability": 0.436279296875}, {"start": 45.05, "end": 46.67, "word": " tests", "probability": 0.73583984375}, {"start": 46.67, "end": 48.93, "word": " about", "probability": 0.86083984375}, {"start": 48.93, "end": 51.17, "word": " Mu", "probability": 0.654296875}, {"start": 51.17, "end": 51.59, "word": " and", "probability": 0.93896484375}, {"start": 51.59, "end": 51.89, "word": " Y.", "probability": 0.451904296875}, {"start": 53.61, "end": 54.43, "word": " In", "probability": 0.7646484375}, {"start": 54.43, "end": 54.73, "word": " this", "probability": 0.94287109375}, {"start": 54.73, "end": 55.03, "word": " chapter,", "probability": 0.86474609375}, {"start": 55.11, "end": 55.17, "word": " we", "probability": 0.95556640625}, {"start": 55.17, "end": 55.29, "word": " are", "probability": 0.84423828125}, {"start": 55.29, "end": 55.53, "word": " going", "probability": 0.9453125}, {"start": 55.53, "end": 55.93, "word": " to", "probability": 0.970703125}, {"start": 55.93, "end": 56.77, "word": " generalize", "probability": 0.919921875}, {"start": 56.77, "end": 59.31, "word": " our", "probability": 0.857421875}, {"start": 59.31, "end": 59.95, "word": " confidence", "probability": 0.96630859375}, {"start": 59.95, "end": 60.45, "word": " interval", "probability": 0.89404296875}, {"start": 60.45, "end": 60.87, "word": " and", "probability": 0.578125}, {"start": 60.87, "end": 61.39, "word": " testing", "probability": 0.771484375}, {"start": 61.39, "end": 61.89, "word": " about", "probability": 0.8828125}, {"start": 61.89, "end": 63.51, "word": " two", "probability": 0.7451171875}, {"start": 63.51, "end": 63.87, "word": "-sample", "probability": 0.7809244791666666}, {"start": 63.87, "end": 64.31, "word": " tests", "probability": 0.7802734375}, {"start": 64.31, "end": 64.75, "word": " or", "probability": 0.796875}, {"start": 64.75, "end": 65.01, "word": " more", "probability": 0.947265625}, {"start": 65.01, "end": 65.21, "word": " than", "probability": 0.9404296875}, {"start": 65.21, "end": 65.43, "word": " two", "probability": 0.93359375}, {"start": 65.43, "end": 65.75, "word": "-sample", "probability": 0.8224283854166666}, {"start": 65.75, "end": 66.07, "word": " tests.", "probability": 0.75390625}], "temperature": 1.0}, {"id": 3, "seek": 9668, "start": 67.84, "end": 96.68, "text": " For two samples, we are interested in the difference between either two population means or two population proportions. For example, suppose we are teaching students and we have males and females. And our goal is to test to see whether or not", "tokens": [1171, 732, 10938, 11, 321, 366, 3102, 294, 264, 2649, 1296, 2139, 732, 4415, 1355, 420, 732, 4415, 32482, 13, 1171, 1365, 11, 7297, 321, 366, 4571, 1731, 293, 321, 362, 20776, 293, 21529, 13, 400, 527, 3387, 307, 281, 1500, 281, 536, 1968, 420, 406], "avg_logprob": -0.13472407485576385, "compression_ratio": 1.577922077922078, "no_speech_prob": 0.0, "words": [{"start": 67.84, "end": 68.38, "word": " For", "probability": 0.83203125}, {"start": 68.38, "end": 68.6, "word": " two", "probability": 0.88671875}, {"start": 68.6, "end": 69.04, "word": " samples,", "probability": 0.86572265625}, {"start": 70.38, "end": 73.18, "word": " we", "probability": 0.90380859375}, {"start": 73.18, "end": 73.44, "word": " are", "probability": 0.94140625}, {"start": 73.44, "end": 73.92, "word": " interested", "probability": 0.85888671875}, {"start": 73.92, "end": 74.36, "word": " in", "probability": 0.939453125}, {"start": 74.36, "end": 74.48, "word": " the", "probability": 0.91552734375}, {"start": 74.48, "end": 74.9, "word": " difference", "probability": 0.876953125}, {"start": 74.9, "end": 75.46, "word": " between", "probability": 0.89697265625}, {"start": 75.46, "end": 76.24, "word": " either", "probability": 0.90478515625}, {"start": 76.24, "end": 77.36, "word": " two", "probability": 0.896484375}, {"start": 77.36, "end": 77.84, "word": " population", "probability": 0.85888671875}, {"start": 77.84, "end": 78.28, "word": " means", "probability": 0.947265625}, {"start": 78.28, "end": 79.04, "word": " or", "probability": 0.83642578125}, {"start": 79.04, "end": 79.22, "word": " two", "probability": 0.94384765625}, {"start": 79.22, "end": 79.68, "word": " population", "probability": 0.943359375}, {"start": 79.68, "end": 80.2, "word": " proportions.", "probability": 0.441162109375}, {"start": 80.98, "end": 81.14, "word": " For", "probability": 0.94873046875}, {"start": 81.14, "end": 81.5, "word": " example,", "probability": 0.97265625}, {"start": 82.06, "end": 82.5, "word": " suppose", "probability": 0.88671875}, {"start": 82.5, "end": 83.58, "word": " we", "probability": 0.90576171875}, {"start": 83.58, "end": 83.74, "word": " are", "probability": 0.93505859375}, {"start": 83.74, "end": 84.22, "word": " teaching", "probability": 0.900390625}, {"start": 84.22, "end": 86.48, "word": " students", "probability": 0.93798828125}, {"start": 86.48, "end": 88.14, "word": " and", "probability": 0.7138671875}, {"start": 88.14, "end": 88.36, "word": " we", "probability": 0.962890625}, {"start": 88.36, "end": 88.76, "word": " have", "probability": 0.94775390625}, {"start": 88.76, "end": 89.2, "word": " males", "probability": 0.9189453125}, {"start": 89.2, "end": 89.44, "word": " and", "probability": 0.94482421875}, {"start": 89.44, "end": 89.84, "word": " females.", "probability": 0.93115234375}, {"start": 91.78, "end": 92.48, "word": " And", "probability": 0.79345703125}, {"start": 92.48, "end": 92.8, "word": " our", "probability": 0.87451171875}, {"start": 92.8, "end": 93.16, "word": " goal", "probability": 0.97314453125}, {"start": 93.16, "end": 93.78, "word": " is", "probability": 0.94287109375}, {"start": 93.78, "end": 93.96, "word": " to", "probability": 0.955078125}, {"start": 93.96, "end": 94.28, "word": " test", "probability": 0.880859375}, {"start": 94.28, "end": 94.54, "word": " to", "probability": 0.79296875}, {"start": 94.54, "end": 94.8, "word": " see", "probability": 0.92578125}, {"start": 94.8, "end": 96.0, "word": " whether", "probability": 0.85205078125}, {"start": 96.0, "end": 96.28, "word": " or", "probability": 0.96142578125}, {"start": 96.28, "end": 96.68, "word": " not", "probability": 0.94580078125}], "temperature": 1.0}, {"id": 4, "seek": 12391, "start": 97.59, "end": 123.91, "text": " there exists a significant difference between scores of males and females. So in this case, we have two populations, population A and population B. And for example, suppose we select a random sample from the first population.", "tokens": [456, 8198, 257, 4776, 2649, 1296, 13444, 295, 20776, 293, 21529, 13, 407, 294, 341, 1389, 11, 321, 362, 732, 12822, 11, 4415, 316, 293, 4415, 363, 13, 400, 337, 1365, 11, 7297, 321, 3048, 257, 4974, 6889, 490, 264, 700, 4415, 13], "avg_logprob": -0.16601562770930203, "compression_ratio": 1.5374149659863945, "no_speech_prob": 0.0, "words": [{"start": 97.59, "end": 97.91, "word": " there", "probability": 0.63037109375}, {"start": 97.91, "end": 98.37, "word": " exists", "probability": 0.7568359375}, {"start": 98.37, "end": 98.87, "word": " a", "probability": 0.99169921875}, {"start": 98.87, "end": 99.39, "word": " significant", "probability": 0.87451171875}, {"start": 99.39, "end": 99.95, "word": " difference", "probability": 0.85888671875}, {"start": 99.95, "end": 100.47, "word": " between", "probability": 0.7958984375}, {"start": 100.47, "end": 101.61, "word": " scores", "probability": 0.53369140625}, {"start": 101.61, "end": 101.91, "word": " of", "probability": 0.95654296875}, {"start": 101.91, "end": 102.21, "word": " males", "probability": 0.94287109375}, {"start": 102.21, "end": 102.49, "word": " and", "probability": 0.9453125}, {"start": 102.49, "end": 102.91, "word": " females.", "probability": 0.9345703125}, {"start": 103.53, "end": 104.39, "word": " So", "probability": 0.8994140625}, {"start": 104.39, "end": 104.49, "word": " in", "probability": 0.73046875}, {"start": 104.49, "end": 104.65, "word": " this", "probability": 0.947265625}, {"start": 104.65, "end": 104.89, "word": " case,", "probability": 0.9140625}, {"start": 104.95, "end": 105.09, "word": " we", "probability": 0.96044921875}, {"start": 105.09, "end": 105.43, "word": " have", "probability": 0.94677734375}, {"start": 105.43, "end": 106.05, "word": " two", "probability": 0.89892578125}, {"start": 106.05, "end": 106.59, "word": " populations,", "probability": 0.9521484375}, {"start": 108.59, "end": 109.33, "word": " population", "probability": 0.85498046875}, {"start": 109.33, "end": 110.03, "word": " A", "probability": 0.865234375}, {"start": 110.03, "end": 112.01, "word": " and", "probability": 0.85302734375}, {"start": 112.01, "end": 112.51, "word": " population", "probability": 0.92822265625}, {"start": 112.51, "end": 112.83, "word": " B.", "probability": 0.99951171875}, {"start": 115.03, "end": 115.27, "word": " And", "probability": 0.81396484375}, {"start": 115.27, "end": 115.41, "word": " for", "probability": 0.833984375}, {"start": 115.41, "end": 115.71, "word": " example,", "probability": 0.97509765625}, {"start": 115.81, "end": 116.27, "word": " suppose", "probability": 0.92529296875}, {"start": 116.27, "end": 118.03, "word": " we", "probability": 0.8896484375}, {"start": 118.03, "end": 118.51, "word": " select", "probability": 0.84326171875}, {"start": 118.51, "end": 118.69, "word": " a", "probability": 0.99462890625}, {"start": 118.69, "end": 118.93, "word": " random", "probability": 0.8505859375}, {"start": 118.93, "end": 119.37, "word": " sample", "probability": 0.86328125}, {"start": 119.37, "end": 122.91, "word": " from", "probability": 0.84521484375}, {"start": 122.91, "end": 123.09, "word": " the", "probability": 0.92236328125}, {"start": 123.09, "end": 123.33, "word": " first", "probability": 0.8759765625}, {"start": 123.33, "end": 123.91, "word": " population.", "probability": 0.9306640625}], "temperature": 1.0}, {"id": 5, "seek": 15328, "start": 125.14, "end": 153.28, "text": " And select another sample from the second population. And the goal is how can we construct a confidence interval for the difference between mu1 and mu2. So we are talking about mu1 minus mu2, or the difference between two population proportions, pi1 minus pi2. Sometimes there exists more than two populations.", "tokens": [400, 3048, 1071, 6889, 490, 264, 1150, 4415, 13, 400, 264, 3387, 307, 577, 393, 321, 7690, 257, 6687, 15035, 337, 264, 2649, 1296, 2992, 16, 293, 2992, 17, 13, 407, 321, 366, 1417, 466, 2992, 16, 3175, 2992, 17, 11, 420, 264, 2649, 1296, 732, 4415, 32482, 11, 3895, 16, 3175, 3895, 17, 13, 4803, 456, 8198, 544, 813, 732, 12822, 13], "avg_logprob": -0.18518066173419356, "compression_ratio": 1.690217391304348, "no_speech_prob": 0.0, "words": [{"start": 125.14, "end": 125.4, "word": " And", "probability": 0.379638671875}, {"start": 125.4, "end": 125.78, "word": " select", "probability": 0.8427734375}, {"start": 125.78, "end": 126.24, "word": " another", "probability": 0.81005859375}, {"start": 126.24, "end": 126.66, "word": " sample", "probability": 0.57373046875}, {"start": 126.66, "end": 126.94, "word": " from", "probability": 0.84765625}, {"start": 126.94, "end": 127.12, "word": " the", "probability": 0.9111328125}, {"start": 127.12, "end": 127.38, "word": " second", "probability": 0.900390625}, {"start": 127.38, "end": 127.78, "word": " population.", "probability": 0.92529296875}, {"start": 130.26, "end": 130.66, "word": " And", "probability": 0.79541015625}, {"start": 130.66, "end": 131.14, "word": " the", "probability": 0.892578125}, {"start": 131.14, "end": 131.38, "word": " goal", "probability": 0.97314453125}, {"start": 131.38, "end": 131.64, "word": " is", "probability": 0.931640625}, {"start": 131.64, "end": 131.8, "word": " how", "probability": 0.57861328125}, {"start": 131.8, "end": 131.98, "word": " can", "probability": 0.92138671875}, {"start": 131.98, "end": 132.12, "word": " we", "probability": 0.94921875}, {"start": 132.12, "end": 132.64, "word": " construct", "probability": 0.96044921875}, {"start": 132.64, "end": 132.8, "word": " a", "probability": 0.5712890625}, {"start": 132.8, "end": 133.14, "word": " confidence", "probability": 0.939453125}, {"start": 133.14, "end": 133.6, "word": " interval", "probability": 0.96337890625}, {"start": 133.6, "end": 134.1, "word": " for", "probability": 0.93994140625}, {"start": 134.1, "end": 135.0, "word": " the", "probability": 0.82861328125}, {"start": 135.0, "end": 135.44, "word": " difference", "probability": 0.84375}, {"start": 135.44, "end": 135.82, "word": " between", "probability": 0.89111328125}, {"start": 135.82, "end": 136.16, "word": " mu1", "probability": 0.54443359375}, {"start": 136.16, "end": 136.3, "word": " and", "probability": 0.94921875}, {"start": 136.3, "end": 136.66, "word": " mu2.", "probability": 0.94384765625}, {"start": 136.76, "end": 136.92, "word": " So", "probability": 0.9560546875}, {"start": 136.92, "end": 137.12, "word": " we", "probability": 0.85791015625}, {"start": 137.12, "end": 137.32, "word": " are", "probability": 0.91357421875}, {"start": 137.32, "end": 137.92, "word": " talking", "probability": 0.84130859375}, {"start": 137.92, "end": 138.26, "word": " about", "probability": 0.9287109375}, {"start": 138.26, "end": 138.62, "word": " mu1", "probability": 0.957275390625}, {"start": 138.62, "end": 139.0, "word": " minus", "probability": 0.9091796875}, {"start": 139.0, "end": 139.56, "word": " mu2,", "probability": 0.96533203125}, {"start": 139.92, "end": 140.4, "word": " or", "probability": 0.9326171875}, {"start": 140.4, "end": 140.56, "word": " the", "probability": 0.9248046875}, {"start": 140.56, "end": 140.92, "word": " difference", "probability": 0.865234375}, {"start": 140.92, "end": 141.32, "word": " between", "probability": 0.8671875}, {"start": 141.32, "end": 141.7, "word": " two", "probability": 0.9072265625}, {"start": 141.7, "end": 142.44, "word": " population", "probability": 0.91796875}, {"start": 142.44, "end": 143.22, "word": " proportions,", "probability": 0.607421875}, {"start": 144.06, "end": 144.56, "word": " pi1", "probability": 0.80908203125}, {"start": 144.56, "end": 145.28, "word": " minus", "probability": 0.984375}, {"start": 145.28, "end": 146.24, "word": " pi2.", "probability": 0.955810546875}, {"start": 149.62, "end": 150.38, "word": " Sometimes", "probability": 0.833984375}, {"start": 150.38, "end": 151.0, "word": " there", "probability": 0.685546875}, {"start": 151.0, "end": 151.52, "word": " exists", "probability": 0.6513671875}, {"start": 151.52, "end": 152.12, "word": " more", "probability": 0.943359375}, {"start": 152.12, "end": 152.38, "word": " than", "probability": 0.9501953125}, {"start": 152.38, "end": 152.58, "word": " two", "probability": 0.93017578125}, {"start": 152.58, "end": 153.28, "word": " populations.", "probability": 0.94384765625}], "temperature": 1.0}, {"id": 6, "seek": 18331, "start": 154.49, "end": 183.31, "text": " And also, we can figure out if the means, in general, are different or not. In this chapter, we'll just talk about two sample tests. And we are going to skip one-way analysis of variance. So we'll explain in details confidence intervals and hypothesis testing about two samples.", "tokens": [400, 611, 11, 321, 393, 2573, 484, 498, 264, 1355, 11, 294, 2674, 11, 366, 819, 420, 406, 13, 682, 341, 7187, 11, 321, 603, 445, 751, 466, 732, 6889, 6921, 13, 400, 321, 366, 516, 281, 10023, 472, 12, 676, 5215, 295, 21977, 13, 407, 321, 603, 2903, 294, 4365, 6687, 26651, 293, 17291, 4997, 466, 732, 10938, 13], "avg_logprob": -0.198770492780404, "compression_ratio": 1.4919786096256684, "no_speech_prob": 0.0, "words": [{"start": 154.49, "end": 154.81, "word": " And", "probability": 0.8486328125}, {"start": 154.81, "end": 155.35, "word": " also,", "probability": 0.84912109375}, {"start": 155.65, "end": 155.95, "word": " we", "probability": 0.96044921875}, {"start": 155.95, "end": 156.21, "word": " can", "probability": 0.9462890625}, {"start": 156.21, "end": 156.45, "word": " figure", "probability": 0.96923828125}, {"start": 156.45, "end": 156.89, "word": " out", "probability": 0.884765625}, {"start": 156.89, "end": 157.85, "word": " if", "probability": 0.93798828125}, {"start": 157.85, "end": 158.89, "word": " the", "probability": 0.91552734375}, {"start": 158.89, "end": 159.37, "word": " means,", "probability": 0.9150390625}, {"start": 159.89, "end": 159.97, "word": " in", "probability": 0.92822265625}, {"start": 159.97, "end": 160.25, "word": " general,", "probability": 0.9052734375}, {"start": 160.43, "end": 160.79, "word": " are", "probability": 0.9453125}, {"start": 160.79, "end": 162.11, "word": " different", "probability": 0.89794921875}, {"start": 162.11, "end": 162.89, "word": " or", "probability": 0.89501953125}, {"start": 162.89, "end": 163.23, "word": " not.", "probability": 0.9462890625}, {"start": 165.71, "end": 166.47, "word": " In", "probability": 0.9208984375}, {"start": 166.47, "end": 166.63, "word": " this", "probability": 0.9375}, {"start": 166.63, "end": 166.89, "word": " chapter,", "probability": 0.85009765625}, {"start": 167.03, "end": 167.77, "word": " we'll", "probability": 0.7257080078125}, {"start": 167.77, "end": 168.03, "word": " just", "probability": 0.92138671875}, {"start": 168.03, "end": 168.31, "word": " talk", "probability": 0.68359375}, {"start": 168.31, "end": 168.75, "word": " about", "probability": 0.90478515625}, {"start": 168.75, "end": 169.05, "word": " two", "probability": 0.873046875}, {"start": 169.05, "end": 169.29, "word": " sample", "probability": 0.3408203125}, {"start": 169.29, "end": 169.71, "word": " tests.", "probability": 0.80224609375}, {"start": 170.07, "end": 170.41, "word": " And", "probability": 0.9541015625}, {"start": 170.41, "end": 170.63, "word": " we", "probability": 0.953125}, {"start": 170.63, "end": 170.79, "word": " are", "probability": 0.9228515625}, {"start": 170.79, "end": 170.99, "word": " going", "probability": 0.947265625}, {"start": 170.99, "end": 171.21, "word": " to", "probability": 0.966796875}, {"start": 171.21, "end": 171.61, "word": " skip", "probability": 0.98095703125}, {"start": 171.61, "end": 172.01, "word": " one", "probability": 0.71435546875}, {"start": 172.01, "end": 172.15, "word": "-way", "probability": 0.6580810546875}, {"start": 172.15, "end": 172.51, "word": " analysis", "probability": 0.87939453125}, {"start": 172.51, "end": 172.75, "word": " of", "probability": 0.9013671875}, {"start": 172.75, "end": 173.13, "word": " variance.", "probability": 0.83203125}, {"start": 174.15, "end": 174.37, "word": " So", "probability": 0.95556640625}, {"start": 174.37, "end": 175.11, "word": " we'll", "probability": 0.729736328125}, {"start": 175.11, "end": 176.57, "word": " explain", "probability": 0.86767578125}, {"start": 176.57, "end": 176.83, "word": " in", "probability": 0.8779296875}, {"start": 176.83, "end": 177.39, "word": " details", "probability": 0.740234375}, {"start": 177.39, "end": 178.39, "word": " confidence", "probability": 0.93701171875}, {"start": 178.39, "end": 178.95, "word": " intervals", "probability": 0.90185546875}, {"start": 178.95, "end": 180.51, "word": " and", "probability": 0.62890625}, {"start": 180.51, "end": 181.05, "word": " hypothesis", "probability": 0.8486328125}, {"start": 181.05, "end": 181.57, "word": " testing", "probability": 0.86474609375}, {"start": 181.57, "end": 182.03, "word": " about", "probability": 0.9130859375}, {"start": 182.03, "end": 182.77, "word": " two", "probability": 0.93701171875}, {"start": 182.77, "end": 183.31, "word": " samples.", "probability": 0.892578125}], "temperature": 1.0}, {"id": 7, "seek": 21381, "start": 185.13, "end": 213.81, "text": " For one-way analysis of variance, that one will be discussed in the SPSS course. The objectives for this chapter are how to use hypothesis testing for comparing the difference between the means of two independent populations, the means of two related populations. Let's see the difference between two independent", "tokens": [1171, 472, 12, 676, 5215, 295, 21977, 11, 300, 472, 486, 312, 7152, 294, 264, 318, 6273, 50, 1164, 13, 440, 15961, 337, 341, 7187, 366, 577, 281, 764, 17291, 4997, 337, 15763, 264, 2649, 1296, 264, 1355, 295, 732, 6695, 12822, 11, 264, 1355, 295, 732, 4077, 12822, 13, 961, 311, 536, 264, 2649, 1296, 732, 6695], "avg_logprob": -0.12824417878005465, "compression_ratio": 1.656084656084656, "no_speech_prob": 0.0, "words": [{"start": 185.13, "end": 185.43, "word": " For", "probability": 0.77490234375}, {"start": 185.43, "end": 185.67, "word": " one", "probability": 0.85693359375}, {"start": 185.67, "end": 185.83, "word": "-way", "probability": 0.773193359375}, {"start": 185.83, "end": 186.21, "word": " analysis", "probability": 0.8798828125}, {"start": 186.21, "end": 186.47, "word": " of", "probability": 0.92919921875}, {"start": 186.47, "end": 186.95, "word": " variance,", "probability": 0.78857421875}, {"start": 187.25, "end": 187.41, "word": " that", "probability": 0.9365234375}, {"start": 187.41, "end": 187.61, "word": " one", "probability": 0.908203125}, {"start": 187.61, "end": 187.81, "word": " will", "probability": 0.87548828125}, {"start": 187.81, "end": 188.13, "word": " be", "probability": 0.95654296875}, {"start": 188.13, "end": 188.83, "word": " discussed", "probability": 0.87255859375}, {"start": 188.83, "end": 190.01, "word": " in", "probability": 0.9208984375}, {"start": 190.01, "end": 190.29, "word": " the", "probability": 0.92626953125}, {"start": 190.29, "end": 191.07, "word": " SPSS", "probability": 0.9635416666666666}, {"start": 191.07, "end": 191.49, "word": " course.", "probability": 0.95166015625}, {"start": 193.07, "end": 193.81, "word": " The", "probability": 0.88232421875}, {"start": 193.81, "end": 194.31, "word": " objectives", "probability": 0.80322265625}, {"start": 194.31, "end": 195.33, "word": " for", "probability": 0.94189453125}, {"start": 195.33, "end": 195.61, "word": " this", "probability": 0.9443359375}, {"start": 195.61, "end": 196.07, "word": " chapter", "probability": 0.8583984375}, {"start": 196.07, "end": 196.89, "word": " are", "probability": 0.93701171875}, {"start": 196.89, "end": 199.03, "word": " how", "probability": 0.6005859375}, {"start": 199.03, "end": 199.21, "word": " to", "probability": 0.96484375}, {"start": 199.21, "end": 199.45, "word": " use", "probability": 0.8828125}, {"start": 199.45, "end": 199.93, "word": " hypothesis", "probability": 0.7939453125}, {"start": 199.93, "end": 200.49, "word": " testing", "probability": 0.84716796875}, {"start": 200.49, "end": 200.91, "word": " for", "probability": 0.94580078125}, {"start": 200.91, "end": 201.47, "word": " comparing", "probability": 0.93017578125}, {"start": 201.47, "end": 201.79, "word": " the", "probability": 0.919921875}, {"start": 201.79, "end": 202.25, "word": " difference", "probability": 0.853515625}, {"start": 202.25, "end": 202.67, "word": " between", "probability": 0.87451171875}, {"start": 202.67, "end": 205.25, "word": " the", "probability": 0.89453125}, {"start": 205.25, "end": 205.55, "word": " means", "probability": 0.916015625}, {"start": 205.55, "end": 205.75, "word": " of", "probability": 0.9609375}, {"start": 205.75, "end": 205.93, "word": " two", "probability": 0.9306640625}, {"start": 205.93, "end": 206.35, "word": " independent", "probability": 0.8857421875}, {"start": 206.35, "end": 206.99, "word": " populations,", "probability": 0.9443359375}, {"start": 208.19, "end": 208.61, "word": " the", "probability": 0.7724609375}, {"start": 208.61, "end": 208.95, "word": " means", "probability": 0.9306640625}, {"start": 208.95, "end": 209.19, "word": " of", "probability": 0.96484375}, {"start": 209.19, "end": 209.43, "word": " two", "probability": 0.93359375}, {"start": 209.43, "end": 209.89, "word": " related", "probability": 0.95068359375}, {"start": 209.89, "end": 210.35, "word": " populations.", "probability": 0.93115234375}, {"start": 210.59, "end": 210.83, "word": " Let's", "probability": 0.962646484375}, {"start": 210.83, "end": 210.97, "word": " see", "probability": 0.89453125}, {"start": 210.97, "end": 211.11, "word": " the", "probability": 0.91796875}, {"start": 211.11, "end": 211.51, "word": " difference", "probability": 0.861328125}, {"start": 211.51, "end": 212.05, "word": " between", "probability": 0.8828125}, {"start": 212.05, "end": 213.39, "word": " two", "probability": 0.80517578125}, {"start": 213.39, "end": 213.81, "word": " independent", "probability": 0.89794921875}], "temperature": 1.0}, {"id": 8, "seek": 23984, "start": 214.94, "end": 239.84, "text": " and two related populations. So the two populations might be considered independent or related. Related means dependent population. Now, for independent populations, in this case, sample one,", "tokens": [293, 732, 4077, 12822, 13, 407, 264, 732, 12822, 1062, 312, 4888, 6695, 420, 4077, 13, 8738, 770, 1355, 12334, 4415, 13, 823, 11, 337, 6695, 12822, 11, 294, 341, 1389, 11, 6889, 472, 11], "avg_logprob": -0.17469618055555555, "compression_ratio": 1.6134453781512605, "no_speech_prob": 0.0, "words": [{"start": 214.94, "end": 215.42, "word": " and", "probability": 0.59521484375}, {"start": 215.42, "end": 216.2, "word": " two", "probability": 0.8720703125}, {"start": 216.2, "end": 216.6, "word": " related", "probability": 0.94091796875}, {"start": 216.6, "end": 217.68, "word": " populations.", "probability": 0.9052734375}, {"start": 219.44, "end": 220.38, "word": " So", "probability": 0.88818359375}, {"start": 220.38, "end": 221.16, "word": " the", "probability": 0.69873046875}, {"start": 221.16, "end": 221.44, "word": " two", "probability": 0.94384765625}, {"start": 221.44, "end": 222.4, "word": " populations", "probability": 0.94384765625}, {"start": 222.4, "end": 223.08, "word": " might", "probability": 0.88134765625}, {"start": 223.08, "end": 223.5, "word": " be", "probability": 0.95556640625}, {"start": 223.5, "end": 224.94, "word": " considered", "probability": 0.8115234375}, {"start": 224.94, "end": 226.48, "word": " independent", "probability": 0.76611328125}, {"start": 226.48, "end": 227.8, "word": " or", "probability": 0.87841796875}, {"start": 227.8, "end": 228.46, "word": " related.", "probability": 0.962890625}, {"start": 228.56, "end": 228.92, "word": " Related", "probability": 0.971923828125}, {"start": 228.92, "end": 229.28, "word": " means", "probability": 0.951171875}, {"start": 229.28, "end": 229.7, "word": " dependent", "probability": 0.88916015625}, {"start": 229.7, "end": 230.78, "word": " population.", "probability": 0.59619140625}, {"start": 231.78, "end": 232.04, "word": " Now,", "probability": 0.92578125}, {"start": 232.12, "end": 232.3, "word": " for", "probability": 0.94140625}, {"start": 232.3, "end": 232.76, "word": " independent", "probability": 0.89404296875}, {"start": 232.76, "end": 236.52, "word": " populations,", "probability": 0.931640625}, {"start": 237.56, "end": 237.82, "word": " in", "probability": 0.94140625}, {"start": 237.82, "end": 238.06, "word": " this", "probability": 0.9462890625}, {"start": 238.06, "end": 238.46, "word": " case,", "probability": 0.921875}, {"start": 239.14, "end": 239.52, "word": " sample", "probability": 0.68994140625}, {"start": 239.52, "end": 239.84, "word": " one,", "probability": 0.70703125}], "temperature": 1.0}, {"id": 9, "seek": 25927, "start": 240.7, "end": 259.28, "text": " does not affect the results of sample. For this reason, we call it as independent sample. So sample one does not affect the results of sample two. For example, suppose as we mentioned, we have females and males.", "tokens": [775, 406, 3345, 264, 3542, 295, 6889, 13, 1171, 341, 1778, 11, 321, 818, 309, 382, 6695, 6889, 13, 407, 6889, 472, 775, 406, 3345, 264, 3542, 295, 6889, 732, 13, 1171, 1365, 11, 7297, 382, 321, 2835, 11, 321, 362, 21529, 293, 20776, 13], "avg_logprob": -0.2061820629498233, "compression_ratio": 1.669291338582677, "no_speech_prob": 0.0, "words": [{"start": 240.7, "end": 240.98, "word": " does", "probability": 0.53076171875}, {"start": 240.98, "end": 241.2, "word": " not", "probability": 0.95458984375}, {"start": 241.2, "end": 241.7, "word": " affect", "probability": 0.8447265625}, {"start": 241.7, "end": 242.6, "word": " the", "probability": 0.912109375}, {"start": 242.6, "end": 243.04, "word": " results", "probability": 0.85107421875}, {"start": 243.04, "end": 243.32, "word": " of", "probability": 0.966796875}, {"start": 243.32, "end": 243.6, "word": " sample.", "probability": 0.5439453125}, {"start": 243.86, "end": 244.1, "word": " For", "probability": 0.9287109375}, {"start": 244.1, "end": 244.34, "word": " this", "probability": 0.9423828125}, {"start": 244.34, "end": 244.68, "word": " reason,", "probability": 0.9677734375}, {"start": 244.8, "end": 244.9, "word": " we", "probability": 0.9501953125}, {"start": 244.9, "end": 245.88, "word": " call", "probability": 0.82861328125}, {"start": 245.88, "end": 246.14, "word": " it", "probability": 0.94580078125}, {"start": 246.14, "end": 246.66, "word": " as", "probability": 0.67236328125}, {"start": 246.66, "end": 247.32, "word": " independent", "probability": 0.80224609375}, {"start": 247.32, "end": 247.8, "word": " sample.", "probability": 0.461669921875}, {"start": 247.94, "end": 248.08, "word": " So", "probability": 0.880859375}, {"start": 248.08, "end": 248.34, "word": " sample", "probability": 0.63623046875}, {"start": 248.34, "end": 248.58, "word": " one", "probability": 0.47900390625}, {"start": 248.58, "end": 249.1, "word": " does", "probability": 0.93359375}, {"start": 249.1, "end": 249.26, "word": " not", "probability": 0.94482421875}, {"start": 249.26, "end": 249.66, "word": " affect", "probability": 0.91552734375}, {"start": 249.66, "end": 250.06, "word": " the", "probability": 0.91650390625}, {"start": 250.06, "end": 250.56, "word": " results", "probability": 0.87353515625}, {"start": 250.56, "end": 250.88, "word": " of", "probability": 0.96142578125}, {"start": 250.88, "end": 251.28, "word": " sample", "probability": 0.86669921875}, {"start": 251.28, "end": 251.92, "word": " two.", "probability": 0.6630859375}, {"start": 252.02, "end": 252.14, "word": " For", "probability": 0.9404296875}, {"start": 252.14, "end": 252.48, "word": " example,", "probability": 0.97314453125}, {"start": 252.6, "end": 252.94, "word": " suppose", "probability": 0.9072265625}, {"start": 252.94, "end": 253.24, "word": " as", "probability": 0.65185546875}, {"start": 253.24, "end": 253.38, "word": " we", "probability": 0.92919921875}, {"start": 253.38, "end": 253.74, "word": " mentioned,", "probability": 0.81396484375}, {"start": 254.76, "end": 254.88, "word": " we", "probability": 0.9580078125}, {"start": 254.88, "end": 255.18, "word": " have", "probability": 0.94482421875}, {"start": 255.18, "end": 255.7, "word": " females", "probability": 0.96484375}, {"start": 255.7, "end": 258.9, "word": " and", "probability": 0.927734375}, {"start": 258.9, "end": 259.28, "word": " males.", "probability": 0.9609375}], "temperature": 1.0}, {"id": 10, "seek": 28095, "start": 260.73, "end": 280.95, "text": " So here, the two groups or the two samples are independent because a student's scores for females, for example, does not affect score for males. For this reason, they are independent. So since we have two different groups,", "tokens": [407, 510, 11, 264, 732, 3935, 420, 264, 732, 10938, 366, 6695, 570, 257, 3107, 311, 13444, 337, 21529, 11, 337, 1365, 11, 775, 406, 3345, 6175, 337, 20776, 13, 1171, 341, 1778, 11, 436, 366, 6695, 13, 407, 1670, 321, 362, 732, 819, 3935, 11], "avg_logprob": -0.17952127025482503, "compression_ratio": 1.5594405594405594, "no_speech_prob": 0.0, "words": [{"start": 260.73, "end": 260.93, "word": " So", "probability": 0.9072265625}, {"start": 260.93, "end": 261.21, "word": " here,", "probability": 0.79296875}, {"start": 261.55, "end": 261.69, "word": " the", "probability": 0.91455078125}, {"start": 261.69, "end": 261.87, "word": " two", "probability": 0.93408203125}, {"start": 261.87, "end": 262.27, "word": " groups", "probability": 0.970703125}, {"start": 262.27, "end": 262.53, "word": " or", "probability": 0.469482421875}, {"start": 262.53, "end": 262.67, "word": " the", "probability": 0.91552734375}, {"start": 262.67, "end": 262.83, "word": " two", "probability": 0.94677734375}, {"start": 262.83, "end": 263.23, "word": " samples", "probability": 0.798828125}, {"start": 263.23, "end": 263.75, "word": " are", "probability": 0.9326171875}, {"start": 263.75, "end": 264.47, "word": " independent", "probability": 0.8935546875}, {"start": 264.47, "end": 265.05, "word": " because", "probability": 0.58056640625}, {"start": 265.05, "end": 265.81, "word": " a", "probability": 0.2744140625}, {"start": 265.81, "end": 267.03, "word": " student's", "probability": 0.8056640625}, {"start": 267.03, "end": 267.37, "word": " scores", "probability": 0.71142578125}, {"start": 267.37, "end": 267.75, "word": " for", "probability": 0.91650390625}, {"start": 267.75, "end": 268.25, "word": " females,", "probability": 0.95458984375}, {"start": 268.35, "end": 268.43, "word": " for", "probability": 0.95166015625}, {"start": 268.43, "end": 268.85, "word": " example,", "probability": 0.97509765625}, {"start": 269.23, "end": 270.29, "word": " does", "probability": 0.9521484375}, {"start": 270.29, "end": 270.45, "word": " not", "probability": 0.9501953125}, {"start": 270.45, "end": 270.77, "word": " affect", "probability": 0.9033203125}, {"start": 270.77, "end": 271.17, "word": " score", "probability": 0.60693359375}, {"start": 271.17, "end": 271.65, "word": " for", "probability": 0.91796875}, {"start": 271.65, "end": 272.73, "word": " males.", "probability": 0.9462890625}, {"start": 273.21, "end": 273.41, "word": " For", "probability": 0.92626953125}, {"start": 273.41, "end": 273.61, "word": " this", "probability": 0.93408203125}, {"start": 273.61, "end": 273.87, "word": " reason,", "probability": 0.97119140625}, {"start": 273.93, "end": 274.07, "word": " they", "probability": 0.88916015625}, {"start": 274.07, "end": 274.45, "word": " are", "probability": 0.939453125}, {"start": 274.45, "end": 275.29, "word": " independent.", "probability": 0.9091796875}, {"start": 276.17, "end": 276.37, "word": " So", "probability": 0.94482421875}, {"start": 276.37, "end": 276.69, "word": " since", "probability": 0.791015625}, {"start": 276.69, "end": 276.89, "word": " we", "probability": 0.958984375}, {"start": 276.89, "end": 277.27, "word": " have", "probability": 0.94287109375}, {"start": 277.27, "end": 277.63, "word": " two", "probability": 0.93603515625}, {"start": 277.63, "end": 278.15, "word": " different", "probability": 0.90625}, {"start": 278.15, "end": 280.95, "word": " groups,", "probability": 0.6748046875}], "temperature": 1.0}, {"id": 11, "seek": 31011, "start": 282.15, "end": 310.11, "text": " Or two different samples. In this case, we have independent populations or independent samples. Now, for the two related populations, for example, in this case, suppose we have 10 persons. Person 1, suppose his or her weight 120 kilograms.", "tokens": [1610, 732, 819, 10938, 13, 682, 341, 1389, 11, 321, 362, 6695, 12822, 420, 6695, 10938, 13, 823, 11, 337, 264, 732, 4077, 12822, 11, 337, 1365, 11, 294, 341, 1389, 11, 7297, 321, 362, 1266, 14453, 13, 8443, 502, 11, 7297, 702, 420, 720, 3364, 10411, 30690, 13], "avg_logprob": -0.22546875566244126, "compression_ratio": 1.6216216216216217, "no_speech_prob": 0.0, "words": [{"start": 282.15, "end": 282.49, "word": " Or", "probability": 0.42626953125}, {"start": 282.49, "end": 282.71, "word": " two", "probability": 0.83203125}, {"start": 282.71, "end": 283.11, "word": " different", "probability": 0.89892578125}, {"start": 283.11, "end": 283.49, "word": " samples.", "probability": 0.9140625}, {"start": 283.71, "end": 283.81, "word": " In", "probability": 0.9306640625}, {"start": 283.81, "end": 283.99, "word": " this", "probability": 0.94580078125}, {"start": 283.99, "end": 284.27, "word": " case,", "probability": 0.91845703125}, {"start": 284.37, "end": 284.49, "word": " we", "probability": 0.943359375}, {"start": 284.49, "end": 284.83, "word": " have", "probability": 0.9443359375}, {"start": 284.83, "end": 285.33, "word": " independent", "probability": 0.78857421875}, {"start": 285.33, "end": 286.37, "word": " populations", "probability": 0.9140625}, {"start": 286.37, "end": 286.61, "word": " or", "probability": 0.61865234375}, {"start": 286.61, "end": 286.95, "word": " independent", "probability": 0.8388671875}, {"start": 286.95, "end": 287.45, "word": " samples.", "probability": 0.90234375}, {"start": 288.31, "end": 288.53, "word": " Now,", "probability": 0.89990234375}, {"start": 288.71, "end": 288.93, "word": " for", "probability": 0.93994140625}, {"start": 288.93, "end": 289.17, "word": " the", "probability": 0.87841796875}, {"start": 289.17, "end": 289.49, "word": " two", "probability": 0.93310546875}, {"start": 289.49, "end": 290.03, "word": " related", "probability": 0.91796875}, {"start": 290.03, "end": 291.79, "word": " populations,", "probability": 0.90283203125}, {"start": 292.01, "end": 292.13, "word": " for", "probability": 0.9453125}, {"start": 292.13, "end": 292.55, "word": " example,", "probability": 0.97314453125}, {"start": 295.15, "end": 295.43, "word": " in", "probability": 0.9091796875}, {"start": 295.43, "end": 295.67, "word": " this", "probability": 0.9501953125}, {"start": 295.67, "end": 295.97, "word": " case,", "probability": 0.93017578125}, {"start": 296.17, "end": 296.53, "word": " suppose", "probability": 0.81005859375}, {"start": 296.53, "end": 297.45, "word": " we", "probability": 0.82861328125}, {"start": 297.45, "end": 297.91, "word": " have", "probability": 0.9482421875}, {"start": 297.91, "end": 298.51, "word": " 10", "probability": 0.473388671875}, {"start": 298.51, "end": 300.97, "word": " persons.", "probability": 0.8095703125}, {"start": 302.55, "end": 303.31, "word": " Person", "probability": 0.76953125}, {"start": 303.31, "end": 303.93, "word": " 1,", "probability": 0.489013671875}, {"start": 304.51, "end": 305.37, "word": " suppose", "probability": 0.849609375}, {"start": 305.37, "end": 305.79, "word": " his", "probability": 0.87841796875}, {"start": 305.79, "end": 306.25, "word": " or", "probability": 0.76171875}, {"start": 306.25, "end": 306.55, "word": " her", "probability": 0.96630859375}, {"start": 306.55, "end": 307.75, "word": " weight", "probability": 0.89990234375}, {"start": 307.75, "end": 309.67, "word": " 120", "probability": 0.6064453125}, {"start": 309.67, "end": 310.11, "word": " kilograms.", "probability": 0.6572265625}], "temperature": 1.0}, {"id": 12, "seek": 33504, "start": 311.48, "end": 335.04, "text": " for the first item. The second item or second person, his or her weight is 135 kilograms. The third one, for example, 110. And suppose we have 10 persons. Weight? Suppose these are weights in kilograms.", "tokens": [337, 264, 700, 3174, 13, 440, 1150, 3174, 420, 1150, 954, 11, 702, 420, 720, 3364, 307, 42652, 30690, 13, 440, 2636, 472, 11, 337, 1365, 11, 20154, 13, 400, 7297, 321, 362, 1266, 14453, 13, 44464, 30, 21360, 613, 366, 17443, 294, 30690, 13], "avg_logprob": -0.2184103267348331, "compression_ratio": 1.4397163120567376, "no_speech_prob": 0.0, "words": [{"start": 311.48, "end": 311.72, "word": " for", "probability": 0.5361328125}, {"start": 311.72, "end": 311.98, "word": " the", "probability": 0.89501953125}, {"start": 311.98, "end": 312.52, "word": " first", "probability": 0.88818359375}, {"start": 312.52, "end": 312.96, "word": " item.", "probability": 0.966796875}, {"start": 314.32, "end": 314.48, "word": " The", "probability": 0.79345703125}, {"start": 314.48, "end": 314.78, "word": " second", "probability": 0.90380859375}, {"start": 314.78, "end": 315.18, "word": " item", "probability": 0.931640625}, {"start": 315.18, "end": 315.44, "word": " or", "probability": 0.56884765625}, {"start": 315.44, "end": 315.78, "word": " second", "probability": 0.7236328125}, {"start": 315.78, "end": 316.22, "word": " person,", "probability": 0.89599609375}, {"start": 316.88, "end": 317.02, "word": " his", "probability": 0.95849609375}, {"start": 317.02, "end": 317.26, "word": " or", "probability": 0.896484375}, {"start": 317.26, "end": 317.52, "word": " her", "probability": 0.966796875}, {"start": 317.52, "end": 317.88, "word": " weight", "probability": 0.90625}, {"start": 317.88, "end": 318.72, "word": " is", "probability": 0.88720703125}, {"start": 318.72, "end": 319.38, "word": " 135", "probability": 0.93505859375}, {"start": 319.38, "end": 319.92, "word": " kilograms.", "probability": 0.44482421875}, {"start": 321.04, "end": 321.48, "word": " The", "probability": 0.73876953125}, {"start": 321.48, "end": 321.76, "word": " third", "probability": 0.93408203125}, {"start": 321.76, "end": 322.08, "word": " one,", "probability": 0.93359375}, {"start": 322.44, "end": 322.56, "word": " for", "probability": 0.95361328125}, {"start": 322.56, "end": 322.9, "word": " example,", "probability": 0.97265625}, {"start": 323.04, "end": 323.3, "word": " 110.", "probability": 0.919921875}, {"start": 324.36, "end": 324.66, "word": " And", "probability": 0.9150390625}, {"start": 324.66, "end": 324.94, "word": " suppose", "probability": 0.908203125}, {"start": 324.94, "end": 325.16, "word": " we", "probability": 0.9384765625}, {"start": 325.16, "end": 325.38, "word": " have", "probability": 0.94970703125}, {"start": 325.38, "end": 325.74, "word": " 10", "probability": 0.63232421875}, {"start": 325.74, "end": 328.38, "word": " persons.", "probability": 0.888671875}, {"start": 329.7, "end": 330.14, "word": " Weight?", "probability": 0.2666015625}, {"start": 330.74, "end": 331.28, "word": " Suppose", "probability": 0.76904296875}, {"start": 331.28, "end": 333.02, "word": " these", "probability": 0.8447265625}, {"start": 333.02, "end": 333.28, "word": " are", "probability": 0.939453125}, {"start": 333.28, "end": 333.64, "word": " weights", "probability": 0.85791015625}, {"start": 333.64, "end": 334.68, "word": " in", "probability": 0.734375}, {"start": 334.68, "end": 335.04, "word": " kilograms.", "probability": 0.89697265625}], "temperature": 1.0}, {"id": 13, "seek": 36256, "start": 338.12, "end": 362.56, "text": " Now, suppose these people or these students or whatever they are have diet for a period of time. For example, suppose they have diet for three months. So, currently their weights are given by 120, 135 and so on.", "tokens": [823, 11, 7297, 613, 561, 420, 613, 1731, 420, 2035, 436, 366, 362, 6339, 337, 257, 2896, 295, 565, 13, 1171, 1365, 11, 7297, 436, 362, 6339, 337, 1045, 2493, 13, 407, 11, 4362, 641, 17443, 366, 2212, 538, 10411, 11, 42652, 293, 370, 322, 13], "avg_logprob": -0.21476063449332056, "compression_ratio": 1.4228187919463087, "no_speech_prob": 0.0, "words": [{"start": 338.12, "end": 338.46, "word": " Now,", "probability": 0.8408203125}, {"start": 338.5, "end": 338.9, "word": " suppose", "probability": 0.88134765625}, {"start": 338.9, "end": 339.54, "word": " these", "probability": 0.54736328125}, {"start": 339.54, "end": 339.98, "word": " people", "probability": 0.94775390625}, {"start": 339.98, "end": 340.28, "word": " or", "probability": 0.46337890625}, {"start": 340.28, "end": 340.62, "word": " these", "probability": 0.8486328125}, {"start": 340.62, "end": 341.32, "word": " students", "probability": 0.974609375}, {"start": 341.32, "end": 341.54, "word": " or", "probability": 0.7216796875}, {"start": 341.54, "end": 341.82, "word": " whatever", "probability": 0.92919921875}, {"start": 341.82, "end": 342.38, "word": " they", "probability": 0.90478515625}, {"start": 342.38, "end": 342.74, "word": " are", "probability": 0.9375}, {"start": 342.74, "end": 346.72, "word": " have", "probability": 0.5830078125}, {"start": 346.72, "end": 347.14, "word": " diet", "probability": 0.888671875}, {"start": 347.14, "end": 349.5, "word": " for", "probability": 0.921875}, {"start": 349.5, "end": 349.76, "word": " a", "probability": 0.99072265625}, {"start": 349.76, "end": 349.98, "word": " period", "probability": 0.984375}, {"start": 349.98, "end": 350.16, "word": " of", "probability": 0.96875}, {"start": 350.16, "end": 350.46, "word": " time.", "probability": 0.88916015625}, {"start": 350.56, "end": 350.7, "word": " For", "probability": 0.9521484375}, {"start": 350.7, "end": 351.08, "word": " example,", "probability": 0.9716796875}, {"start": 351.24, "end": 351.64, "word": " suppose", "probability": 0.8603515625}, {"start": 351.64, "end": 352.26, "word": " they", "probability": 0.8525390625}, {"start": 352.26, "end": 352.44, "word": " have", "probability": 0.951171875}, {"start": 352.44, "end": 352.68, "word": " diet", "probability": 0.9150390625}, {"start": 352.68, "end": 353.0, "word": " for", "probability": 0.94873046875}, {"start": 353.0, "end": 353.28, "word": " three", "probability": 0.6953125}, {"start": 353.28, "end": 353.5, "word": " months.", "probability": 0.79248046875}, {"start": 355.78, "end": 356.04, "word": " So,", "probability": 0.89599609375}, {"start": 356.34, "end": 356.9, "word": " currently", "probability": 0.80615234375}, {"start": 356.9, "end": 358.14, "word": " their", "probability": 0.72314453125}, {"start": 358.14, "end": 358.46, "word": " weights", "probability": 0.80517578125}, {"start": 358.46, "end": 358.96, "word": " are", "probability": 0.943359375}, {"start": 358.96, "end": 360.3, "word": " given", "probability": 0.908203125}, {"start": 360.3, "end": 360.7, "word": " by", "probability": 0.97314453125}, {"start": 360.7, "end": 361.24, "word": " 120,", "probability": 0.445068359375}, {"start": 361.38, "end": 361.72, "word": " 135", "probability": 0.9697265625}, {"start": 361.72, "end": 362.16, "word": " and", "probability": 0.69287109375}, {"start": 362.16, "end": 362.38, "word": " so", "probability": 0.951171875}, {"start": 362.38, "end": 362.56, "word": " on.", "probability": 0.9482421875}], "temperature": 1.0}, {"id": 14, "seek": 39590, "start": 366.56, "end": 395.9, "text": " They have diet for three months after that. Then we measure their weights after three months. Now suppose the first person, his weight was 120. For example, his weight was in January 120. Now in April. For example, suppose 105, 105 kilograms. The second person suppose 120. The third one suppose 95, and so on.", "tokens": [814, 362, 6339, 337, 1045, 2493, 934, 300, 13, 1396, 321, 3481, 641, 17443, 934, 1045, 2493, 13, 823, 7297, 264, 700, 954, 11, 702, 3364, 390, 10411, 13, 1171, 1365, 11, 702, 3364, 390, 294, 7061, 10411, 13, 823, 294, 6929, 13, 1171, 1365, 11, 7297, 33705, 11, 33705, 21112, 1342, 82, 13, 440, 1150, 954, 7297, 10411, 13, 440, 2636, 472, 7297, 13420, 11, 293, 370, 322, 13], "avg_logprob": -0.21996038270668244, "compression_ratio": 1.6720430107526882, "no_speech_prob": 0.0, "words": [{"start": 366.56, "end": 366.86, "word": " They", "probability": 0.53076171875}, {"start": 366.86, "end": 367.06, "word": " have", "probability": 0.8720703125}, {"start": 367.06, "end": 367.4, "word": " diet", "probability": 0.7490234375}, {"start": 367.4, "end": 367.68, "word": " for", "probability": 0.93505859375}, {"start": 367.68, "end": 367.94, "word": " three", "probability": 0.6513671875}, {"start": 367.94, "end": 368.22, "word": " months", "probability": 0.71875}, {"start": 368.22, "end": 368.52, "word": " after", "probability": 0.7626953125}, {"start": 368.52, "end": 368.92, "word": " that.", "probability": 0.869140625}, {"start": 369.62, "end": 369.86, "word": " Then", "probability": 0.82568359375}, {"start": 369.86, "end": 370.16, "word": " we", "probability": 0.833984375}, {"start": 370.16, "end": 370.86, "word": " measure", "probability": 0.8564453125}, {"start": 370.86, "end": 371.22, "word": " their", "probability": 0.95751953125}, {"start": 371.22, "end": 371.56, "word": " weights", "probability": 0.78515625}, {"start": 371.56, "end": 372.24, "word": " after", "probability": 0.77880859375}, {"start": 372.24, "end": 372.72, "word": " three", "probability": 0.83544921875}, {"start": 372.72, "end": 372.96, "word": " months.", "probability": 0.82177734375}, {"start": 373.58, "end": 373.76, "word": " Now", "probability": 0.841796875}, {"start": 373.76, "end": 374.18, "word": " suppose", "probability": 0.69091796875}, {"start": 374.18, "end": 374.42, "word": " the", "probability": 0.85888671875}, {"start": 374.42, "end": 374.8, "word": " first", "probability": 0.896484375}, {"start": 374.8, "end": 375.44, "word": " person,", "probability": 0.8916015625}, {"start": 376.02, "end": 376.66, "word": " his", "probability": 0.95458984375}, {"start": 376.66, "end": 376.96, "word": " weight", "probability": 0.9208984375}, {"start": 376.96, "end": 377.46, "word": " was", "probability": 0.955078125}, {"start": 377.46, "end": 378.04, "word": " 120.", "probability": 0.87744140625}, {"start": 378.36, "end": 378.64, "word": " For", "probability": 0.953125}, {"start": 378.64, "end": 378.92, "word": " example,", "probability": 0.97314453125}, {"start": 379.04, "end": 379.2, "word": " his", "probability": 0.953125}, {"start": 379.2, "end": 379.4, "word": " weight", "probability": 0.923828125}, {"start": 379.4, "end": 379.64, "word": " was", "probability": 0.94482421875}, {"start": 379.64, "end": 379.78, "word": " in", "probability": 0.91455078125}, {"start": 379.78, "end": 380.14, "word": " January", "probability": 0.93359375}, {"start": 380.14, "end": 380.58, "word": " 120.", "probability": 0.8369140625}, {"start": 381.46, "end": 381.72, "word": " Now", "probability": 0.95947265625}, {"start": 381.72, "end": 381.9, "word": " in", "probability": 0.87255859375}, {"start": 381.9, "end": 382.22, "word": " April.", "probability": 0.8271484375}, {"start": 385.26, "end": 385.52, "word": " For", "probability": 0.921875}, {"start": 385.52, "end": 385.84, "word": " example,", "probability": 0.970703125}, {"start": 385.96, "end": 386.38, "word": " suppose", "probability": 0.89501953125}, {"start": 386.38, "end": 386.82, "word": " 105,", "probability": 0.9560546875}, {"start": 387.78, "end": 388.46, "word": " 105", "probability": 0.6787109375}, {"start": 388.46, "end": 389.28, "word": " kilograms.", "probability": 0.6392415364583334}, {"start": 389.88, "end": 390.06, "word": " The", "probability": 0.76416015625}, {"start": 390.06, "end": 390.38, "word": " second", "probability": 0.904296875}, {"start": 390.38, "end": 390.72, "word": " person", "probability": 0.8994140625}, {"start": 390.72, "end": 391.14, "word": " suppose", "probability": 0.64453125}, {"start": 391.14, "end": 391.88, "word": " 120.", "probability": 0.9169921875}, {"start": 393.0, "end": 393.32, "word": " The", "probability": 0.62548828125}, {"start": 393.32, "end": 393.62, "word": " third", "probability": 0.9384765625}, {"start": 393.62, "end": 393.98, "word": " one", "probability": 0.9296875}, {"start": 393.98, "end": 394.44, "word": " suppose", "probability": 0.85205078125}, {"start": 394.44, "end": 394.86, "word": " 95,", "probability": 0.99169921875}, {"start": 395.26, "end": 395.5, "word": " and", "probability": 0.9345703125}, {"start": 395.5, "end": 395.68, "word": " so", "probability": 0.95166015625}, {"start": 395.68, "end": 395.9, "word": " on.", "probability": 0.94775390625}], "temperature": 1.0}, {"id": 15, "seek": 42525, "start": 400.31, "end": 425.25, "text": " Now, in two cases, we have the same individuals, the same persons. Person one, his weight was 120, and after a specific period of time, his weight becomes 105. So we have the same people, the same individuals for both", "tokens": [823, 11, 294, 732, 3331, 11, 321, 362, 264, 912, 5346, 11, 264, 912, 14453, 13, 8443, 472, 11, 702, 3364, 390, 10411, 11, 293, 934, 257, 2685, 2896, 295, 565, 11, 702, 3364, 3643, 33705, 13, 407, 321, 362, 264, 912, 561, 11, 264, 912, 5346, 337, 1293], "avg_logprob": -0.1448437502980232, "compression_ratio": 1.5912408759124088, "no_speech_prob": 0.0, "words": [{"start": 400.31, "end": 401.07, "word": " Now,", "probability": 0.90087890625}, {"start": 401.07, "end": 401.83, "word": " in", "probability": 0.94384765625}, {"start": 401.83, "end": 402.13, "word": " two", "probability": 0.921875}, {"start": 402.13, "end": 402.65, "word": " cases,", "probability": 0.9453125}, {"start": 402.95, "end": 404.17, "word": " we", "probability": 0.96044921875}, {"start": 404.17, "end": 404.43, "word": " have", "probability": 0.943359375}, {"start": 404.43, "end": 404.63, "word": " the", "probability": 0.9208984375}, {"start": 404.63, "end": 405.11, "word": " same", "probability": 0.89404296875}, {"start": 405.11, "end": 405.89, "word": " individuals,", "probability": 0.87255859375}, {"start": 406.33, "end": 406.49, "word": " the", "probability": 0.923828125}, {"start": 406.49, "end": 406.75, "word": " same", "probability": 0.8994140625}, {"start": 406.75, "end": 407.27, "word": " persons.", "probability": 0.87451171875}, {"start": 407.91, "end": 408.25, "word": " Person", "probability": 0.865234375}, {"start": 408.25, "end": 408.57, "word": " one,", "probability": 0.625}, {"start": 408.97, "end": 409.19, "word": " his", "probability": 0.96533203125}, {"start": 409.19, "end": 409.41, "word": " weight", "probability": 0.94140625}, {"start": 409.41, "end": 409.75, "word": " was", "probability": 0.95654296875}, {"start": 409.75, "end": 410.11, "word": " 120,", "probability": 0.88720703125}, {"start": 410.69, "end": 411.43, "word": " and", "probability": 0.9345703125}, {"start": 411.43, "end": 412.01, "word": " after", "probability": 0.8310546875}, {"start": 412.01, "end": 412.99, "word": " a", "probability": 0.94287109375}, {"start": 412.99, "end": 413.43, "word": " specific", "probability": 0.89697265625}, {"start": 413.43, "end": 413.81, "word": " period", "probability": 0.97705078125}, {"start": 413.81, "end": 413.97, "word": " of", "probability": 0.9677734375}, {"start": 413.97, "end": 414.27, "word": " time,", "probability": 0.89111328125}, {"start": 414.39, "end": 414.59, "word": " his", "probability": 0.96484375}, {"start": 414.59, "end": 414.95, "word": " weight", "probability": 0.91796875}, {"start": 414.95, "end": 416.19, "word": " becomes", "probability": 0.8671875}, {"start": 416.19, "end": 416.65, "word": " 105.", "probability": 0.92724609375}, {"start": 418.09, "end": 418.85, "word": " So", "probability": 0.95361328125}, {"start": 418.85, "end": 419.13, "word": " we", "probability": 0.71435546875}, {"start": 419.13, "end": 419.35, "word": " have", "probability": 0.947265625}, {"start": 419.35, "end": 419.57, "word": " the", "probability": 0.91748046875}, {"start": 419.57, "end": 419.87, "word": " same", "probability": 0.9052734375}, {"start": 419.87, "end": 420.29, "word": " people,", "probability": 0.94970703125}, {"start": 421.11, "end": 421.31, "word": " the", "probability": 0.6015625}, {"start": 421.31, "end": 421.61, "word": " same", "probability": 0.8984375}, {"start": 421.61, "end": 422.39, "word": " individuals", "probability": 0.84765625}, {"start": 422.39, "end": 424.73, "word": " for", "probability": 0.48046875}, {"start": 424.73, "end": 425.25, "word": " both", "probability": 0.890625}], "temperature": 1.0}, {"id": 16, "seek": 45512, "start": 426.98, "end": 455.12, "text": " In this case, these two samples are called related samples. So for related samples, we have the same group. But we have different, or I'm sorry, we have repeated measures or repeated measurements. Another example for related samples. Suppose some patients have high blood pressure.", "tokens": [682, 341, 1389, 11, 613, 732, 10938, 366, 1219, 4077, 10938, 13, 407, 337, 4077, 10938, 11, 321, 362, 264, 912, 1594, 13, 583, 321, 362, 819, 11, 420, 286, 478, 2597, 11, 321, 362, 10477, 8000, 420, 10477, 15383, 13, 3996, 1365, 337, 4077, 10938, 13, 21360, 512, 4209, 362, 1090, 3390, 3321, 13], "avg_logprob": -0.17354910261929035, "compression_ratio": 1.709090909090909, "no_speech_prob": 0.0, "words": [{"start": 426.97999999999996, "end": 427.64, "word": " In", "probability": 0.78662109375}, {"start": 427.64, "end": 428.3, "word": " this", "probability": 0.9365234375}, {"start": 428.3, "end": 428.64, "word": " case,", "probability": 0.9267578125}, {"start": 428.86, "end": 429.04, "word": " these", "probability": 0.85302734375}, {"start": 429.04, "end": 429.26, "word": " two", "probability": 0.91943359375}, {"start": 429.26, "end": 429.6, "word": " samples", "probability": 0.884765625}, {"start": 429.6, "end": 429.9, "word": " are", "probability": 0.93994140625}, {"start": 429.9, "end": 430.26, "word": " called", "probability": 0.80322265625}, {"start": 430.26, "end": 430.94, "word": " related", "probability": 0.8154296875}, {"start": 430.94, "end": 433.38, "word": " samples.", "probability": 0.80126953125}, {"start": 435.04, "end": 435.34, "word": " So", "probability": 0.81591796875}, {"start": 435.34, "end": 435.58, "word": " for", "probability": 0.65283203125}, {"start": 435.58, "end": 436.08, "word": " related", "probability": 0.92626953125}, {"start": 436.08, "end": 436.5, "word": " samples,", "probability": 0.86865234375}, {"start": 436.66, "end": 436.78, "word": " we", "probability": 0.95458984375}, {"start": 436.78, "end": 437.08, "word": " have", "probability": 0.94775390625}, {"start": 437.08, "end": 437.3, "word": " the", "probability": 0.91748046875}, {"start": 437.3, "end": 438.74, "word": " same", "probability": 0.91455078125}, {"start": 438.74, "end": 440.22, "word": " group.", "probability": 0.95703125}, {"start": 441.72, "end": 442.08, "word": " But", "probability": 0.93408203125}, {"start": 442.08, "end": 442.3, "word": " we", "probability": 0.89306640625}, {"start": 442.3, "end": 442.58, "word": " have", "probability": 0.943359375}, {"start": 442.58, "end": 442.9, "word": " different,", "probability": 0.71240234375}, {"start": 443.22, "end": 443.3, "word": " or", "probability": 0.3193359375}, {"start": 443.3, "end": 443.46, "word": " I'm", "probability": 0.891845703125}, {"start": 443.46, "end": 443.62, "word": " sorry,", "probability": 0.84716796875}, {"start": 443.7, "end": 443.8, "word": " we", "probability": 0.93359375}, {"start": 443.8, "end": 444.0, "word": " have", "probability": 0.9423828125}, {"start": 444.0, "end": 444.52, "word": " repeated", "probability": 0.94580078125}, {"start": 444.52, "end": 445.82, "word": " measures", "probability": 0.84716796875}, {"start": 445.82, "end": 446.04, "word": " or", "probability": 0.6298828125}, {"start": 446.04, "end": 446.34, "word": " repeated", "probability": 0.9794921875}, {"start": 446.34, "end": 446.8, "word": " measurements.", "probability": 0.93115234375}, {"start": 448.52, "end": 448.8, "word": " Another", "probability": 0.83447265625}, {"start": 448.8, "end": 449.2, "word": " example", "probability": 0.9716796875}, {"start": 449.2, "end": 449.5, "word": " for", "probability": 0.92724609375}, {"start": 449.5, "end": 450.14, "word": " related", "probability": 0.89404296875}, {"start": 450.14, "end": 450.48, "word": " samples.", "probability": 0.68359375}, {"start": 450.64, "end": 451.02, "word": " Suppose", "probability": 0.82861328125}, {"start": 451.02, "end": 453.32, "word": " some", "probability": 0.83447265625}, {"start": 453.32, "end": 453.74, "word": " patients", "probability": 0.87451171875}, {"start": 453.74, "end": 454.18, "word": " have", "probability": 0.947265625}, {"start": 454.18, "end": 454.54, "word": " high", "probability": 0.90673828125}, {"start": 454.54, "end": 454.74, "word": " blood", "probability": 0.93017578125}, {"start": 454.74, "end": 455.12, "word": " pressure.", "probability": 0.89990234375}], "temperature": 1.0}, {"id": 17, "seek": 48340, "start": 457.46, "end": 483.4, "text": " And they are using drug A. And we have some information about their blood pressure. Suppose first person, his or her weight, blood pressure 145. The second one suppose 160 and so on. So suppose these people, these patients,", "tokens": [400, 436, 366, 1228, 4110, 316, 13, 400, 321, 362, 512, 1589, 466, 641, 3390, 3321, 13, 21360, 700, 954, 11, 702, 420, 720, 3364, 11, 3390, 3321, 3499, 20, 13, 440, 1150, 472, 7297, 21243, 293, 370, 322, 13, 407, 7297, 613, 561, 11, 613, 4209, 11], "avg_logprob": -0.19339923955956284, "compression_ratio": 1.4545454545454546, "no_speech_prob": 0.0, "words": [{"start": 457.46, "end": 457.82, "word": " And", "probability": 0.58837890625}, {"start": 457.82, "end": 458.02, "word": " they", "probability": 0.87841796875}, {"start": 458.02, "end": 458.18, "word": " are", "probability": 0.93359375}, {"start": 458.18, "end": 458.7, "word": " using", "probability": 0.9404296875}, {"start": 458.7, "end": 460.46, "word": " drug", "probability": 0.57763671875}, {"start": 460.46, "end": 460.76, "word": " A.", "probability": 0.7021484375}, {"start": 463.04, "end": 463.38, "word": " And", "probability": 0.91357421875}, {"start": 463.38, "end": 463.48, "word": " we", "probability": 0.93896484375}, {"start": 463.48, "end": 463.6, "word": " have", "probability": 0.9462890625}, {"start": 463.6, "end": 463.84, "word": " some", "probability": 0.8955078125}, {"start": 463.84, "end": 464.3, "word": " information", "probability": 0.8349609375}, {"start": 464.3, "end": 464.72, "word": " about", "probability": 0.9033203125}, {"start": 464.72, "end": 465.12, "word": " their", "probability": 0.9423828125}, {"start": 465.12, "end": 465.34, "word": " blood", "probability": 0.93115234375}, {"start": 465.34, "end": 465.62, "word": " pressure.", "probability": 0.9560546875}, {"start": 465.84, "end": 466.06, "word": " Suppose", "probability": 0.71826171875}, {"start": 466.06, "end": 466.6, "word": " first", "probability": 0.673828125}, {"start": 466.6, "end": 467.2, "word": " person,", "probability": 0.91748046875}, {"start": 467.72, "end": 467.92, "word": " his", "probability": 0.95751953125}, {"start": 467.92, "end": 468.1, "word": " or", "probability": 0.93701171875}, {"start": 468.1, "end": 468.48, "word": " her", "probability": 0.96875}, {"start": 468.48, "end": 468.86, "word": " weight,", "probability": 0.52587890625}, {"start": 469.3, "end": 469.5, "word": " blood", "probability": 0.91650390625}, {"start": 469.5, "end": 469.86, "word": " pressure", "probability": 0.93017578125}, {"start": 469.86, "end": 471.62, "word": " 145.", "probability": 0.8740234375}, {"start": 472.38, "end": 472.7, "word": " The", "probability": 0.7333984375}, {"start": 472.7, "end": 472.94, "word": " second", "probability": 0.90869140625}, {"start": 472.94, "end": 473.14, "word": " one", "probability": 0.91015625}, {"start": 473.14, "end": 473.38, "word": " suppose", "probability": 0.4951171875}, {"start": 473.38, "end": 473.88, "word": " 160", "probability": 0.90478515625}, {"start": 473.88, "end": 474.18, "word": " and", "probability": 0.740234375}, {"start": 474.18, "end": 474.34, "word": " so", "probability": 0.95556640625}, {"start": 474.34, "end": 474.52, "word": " on.", "probability": 0.92822265625}, {"start": 479.9, "end": 480.26, "word": " So", "probability": 0.58203125}, {"start": 480.26, "end": 480.74, "word": " suppose", "probability": 0.87109375}, {"start": 480.74, "end": 482.1, "word": " these", "probability": 0.77294921875}, {"start": 482.1, "end": 482.44, "word": " people,", "probability": 0.9677734375}, {"start": 482.6, "end": 482.8, "word": " these", "probability": 0.84130859375}, {"start": 482.8, "end": 483.4, "word": " patients,", "probability": 0.8564453125}], "temperature": 1.0}, {"id": 18, "seek": 51370, "start": 485.12, "end": 513.7, "text": " use different drug, for example, drug B. And our goal is to see if drug B is more effective to reduce the blood pressure than using drug A. Suppose by using drug B after, for example, three months, the first person with high blood pressure, 145, becomes, for example, 130. The other one, suppose 145, and so on. So here we have the same patients.", "tokens": [764, 819, 4110, 11, 337, 1365, 11, 4110, 363, 13, 400, 527, 3387, 307, 281, 536, 498, 4110, 363, 307, 544, 4942, 281, 5407, 264, 3390, 3321, 813, 1228, 4110, 316, 13, 21360, 538, 1228, 4110, 363, 934, 11, 337, 1365, 11, 1045, 2493, 11, 264, 700, 954, 365, 1090, 3390, 3321, 11, 3499, 20, 11, 3643, 11, 337, 1365, 11, 19966, 13, 440, 661, 472, 11, 7297, 3499, 20, 11, 293, 370, 322, 13, 407, 510, 321, 362, 264, 912, 4209, 13], "avg_logprob": -0.14899553464991705, "compression_ratio": 1.6602870813397128, "no_speech_prob": 0.0, "words": [{"start": 485.12, "end": 485.5, "word": " use", "probability": 0.52099609375}, {"start": 485.5, "end": 485.9, "word": " different", "probability": 0.82421875}, {"start": 485.9, "end": 486.38, "word": " drug,", "probability": 0.7294921875}, {"start": 487.1, "end": 487.42, "word": " for", "probability": 0.95361328125}, {"start": 487.42, "end": 487.74, "word": " example,", "probability": 0.96923828125}, {"start": 487.84, "end": 488.06, "word": " drug", "probability": 0.6591796875}, {"start": 488.06, "end": 488.28, "word": " B.", "probability": 0.9482421875}, {"start": 488.96, "end": 489.36, "word": " And", "probability": 0.90625}, {"start": 489.36, "end": 489.62, "word": " our", "probability": 0.87744140625}, {"start": 489.62, "end": 489.9, "word": " goal", "probability": 0.9755859375}, {"start": 489.9, "end": 490.18, "word": " is", "probability": 0.9453125}, {"start": 490.18, "end": 490.5, "word": " to", "probability": 0.96337890625}, {"start": 490.5, "end": 490.8, "word": " see", "probability": 0.92041015625}, {"start": 490.8, "end": 491.26, "word": " if", "probability": 0.94775390625}, {"start": 491.26, "end": 491.58, "word": " drug", "probability": 0.859375}, {"start": 491.58, "end": 491.88, "word": " B", "probability": 0.99609375}, {"start": 491.88, "end": 492.28, "word": " is", "probability": 0.9423828125}, {"start": 492.28, "end": 492.5, "word": " more", "probability": 0.94287109375}, {"start": 492.5, "end": 492.94, "word": " effective", "probability": 0.8720703125}, {"start": 492.94, "end": 493.82, "word": " to", "probability": 0.95166015625}, {"start": 493.82, "end": 494.24, "word": " reduce", "probability": 0.8271484375}, {"start": 494.24, "end": 494.38, "word": " the", "probability": 0.4384765625}, {"start": 494.38, "end": 494.52, "word": " blood", "probability": 0.90771484375}, {"start": 494.52, "end": 494.9, "word": " pressure", "probability": 0.94091796875}, {"start": 494.9, "end": 495.28, "word": " than", "probability": 0.86083984375}, {"start": 495.28, "end": 496.26, "word": " using", "probability": 0.91015625}, {"start": 496.26, "end": 496.58, "word": " drug", "probability": 0.9150390625}, {"start": 496.58, "end": 496.86, "word": " A.", "probability": 0.94091796875}, {"start": 497.4, "end": 497.78, "word": " Suppose", "probability": 0.64599609375}, {"start": 497.78, "end": 498.04, "word": " by", "probability": 0.86328125}, {"start": 498.04, "end": 498.3, "word": " using", "probability": 0.9306640625}, {"start": 498.3, "end": 498.56, "word": " drug", "probability": 0.90673828125}, {"start": 498.56, "end": 498.8, "word": " B", "probability": 0.99365234375}, {"start": 498.8, "end": 499.24, "word": " after,", "probability": 0.75634765625}, {"start": 499.64, "end": 499.8, "word": " for", "probability": 0.9501953125}, {"start": 499.8, "end": 500.1, "word": " example,", "probability": 0.97265625}, {"start": 500.16, "end": 500.34, "word": " three", "probability": 0.87158203125}, {"start": 500.34, "end": 500.7, "word": " months,", "probability": 0.83642578125}, {"start": 501.4, "end": 501.56, "word": " the", "probability": 0.91552734375}, {"start": 501.56, "end": 501.86, "word": " first", "probability": 0.87060546875}, {"start": 501.86, "end": 502.4, "word": " person", "probability": 0.9150390625}, {"start": 502.4, "end": 502.6, "word": " with", "probability": 0.8974609375}, {"start": 502.6, "end": 502.86, "word": " high", "probability": 0.9169921875}, {"start": 502.86, "end": 503.08, "word": " blood", "probability": 0.923828125}, {"start": 503.08, "end": 503.5, "word": " pressure,", "probability": 0.93603515625}, {"start": 503.66, "end": 504.32, "word": " 145,", "probability": 0.92333984375}, {"start": 504.44, "end": 504.82, "word": " becomes,", "probability": 0.87158203125}, {"start": 505.0, "end": 505.08, "word": " for", "probability": 0.94873046875}, {"start": 505.08, "end": 505.36, "word": " example,", "probability": 0.978515625}, {"start": 505.46, "end": 505.84, "word": " 130.", "probability": 0.85498046875}, {"start": 506.58, "end": 506.78, "word": " The", "probability": 0.8798828125}, {"start": 506.78, "end": 507.0, "word": " other", "probability": 0.89404296875}, {"start": 507.0, "end": 507.22, "word": " one,", "probability": 0.77099609375}, {"start": 507.28, "end": 507.46, "word": " suppose", "probability": 0.86474609375}, {"start": 507.46, "end": 508.18, "word": " 145,", "probability": 0.86181640625}, {"start": 508.24, "end": 508.38, "word": " and", "probability": 0.927734375}, {"start": 508.38, "end": 508.54, "word": " so", "probability": 0.953125}, {"start": 508.54, "end": 508.72, "word": " on.", "probability": 0.9423828125}, {"start": 511.54, "end": 512.14, "word": " So", "probability": 0.94775390625}, {"start": 512.14, "end": 512.28, "word": " here", "probability": 0.80322265625}, {"start": 512.28, "end": 512.38, "word": " we", "probability": 0.70947265625}, {"start": 512.38, "end": 512.52, "word": " have", "probability": 0.947265625}, {"start": 512.52, "end": 512.72, "word": " the", "probability": 0.9189453125}, {"start": 512.72, "end": 513.06, "word": " same", "probability": 0.90771484375}, {"start": 513.06, "end": 513.7, "word": " patients.", "probability": 0.921875}], "temperature": 1.0}, {"id": 19, "seek": 53997, "start": 516.01, "end": 539.97, "text": " Patients took drug A, and after taking drug B for three months, for example, their new measures are given by one theory, one photograph, and so on. So each person in this case has two values, or two observations. One before using drug B, and the other after. So if you have before and after, it means", "tokens": [4379, 2448, 1890, 4110, 316, 11, 293, 934, 1940, 4110, 363, 337, 1045, 2493, 11, 337, 1365, 11, 641, 777, 8000, 366, 2212, 538, 472, 5261, 11, 472, 8348, 11, 293, 370, 322, 13, 407, 1184, 954, 294, 341, 1389, 575, 732, 4190, 11, 420, 732, 18163, 13, 1485, 949, 1228, 4110, 363, 11, 293, 264, 661, 934, 13, 407, 498, 291, 362, 949, 293, 934, 11, 309, 1355], "avg_logprob": -0.20491071684019907, "compression_ratio": 1.5595854922279793, "no_speech_prob": 0.0, "words": [{"start": 516.01, "end": 516.53, "word": " Patients", "probability": 0.5828857421875}, {"start": 516.53, "end": 517.05, "word": " took", "probability": 0.8759765625}, {"start": 517.05, "end": 517.39, "word": " drug", "probability": 0.69287109375}, {"start": 517.39, "end": 517.69, "word": " A,", "probability": 0.8740234375}, {"start": 518.59, "end": 518.91, "word": " and", "probability": 0.92333984375}, {"start": 518.91, "end": 519.35, "word": " after", "probability": 0.841796875}, {"start": 519.35, "end": 521.29, "word": " taking", "probability": 0.892578125}, {"start": 521.29, "end": 521.65, "word": " drug", "probability": 0.8896484375}, {"start": 521.65, "end": 521.89, "word": " B", "probability": 0.9921875}, {"start": 521.89, "end": 522.11, "word": " for", "probability": 0.93359375}, {"start": 522.11, "end": 522.37, "word": " three", "probability": 0.78662109375}, {"start": 522.37, "end": 522.59, "word": " months,", "probability": 0.83544921875}, {"start": 522.63, "end": 522.73, "word": " for", "probability": 0.8935546875}, {"start": 522.73, "end": 523.09, "word": " example,", "probability": 0.97216796875}, {"start": 523.67, "end": 523.93, "word": " their", "probability": 0.93017578125}, {"start": 523.93, "end": 524.55, "word": " new", "probability": 0.86962890625}, {"start": 524.55, "end": 524.85, "word": " measures", "probability": 0.810546875}, {"start": 524.85, "end": 525.23, "word": " are", "probability": 0.9384765625}, {"start": 525.23, "end": 525.51, "word": " given", "probability": 0.89794921875}, {"start": 525.51, "end": 525.77, "word": " by", "probability": 0.9638671875}, {"start": 525.77, "end": 526.01, "word": " one", "probability": 0.509765625}, {"start": 526.01, "end": 526.23, "word": " theory,", "probability": 0.6748046875}, {"start": 526.37, "end": 526.51, "word": " one", "probability": 0.91455078125}, {"start": 526.51, "end": 526.77, "word": " photograph,", "probability": 0.1729736328125}, {"start": 526.93, "end": 527.03, "word": " and", "probability": 0.93603515625}, {"start": 527.03, "end": 527.17, "word": " so", "probability": 0.9521484375}, {"start": 527.17, "end": 527.29, "word": " on.", "probability": 0.943359375}, {"start": 527.53, "end": 527.75, "word": " So", "probability": 0.91796875}, {"start": 527.75, "end": 528.07, "word": " each", "probability": 0.86474609375}, {"start": 528.07, "end": 528.47, "word": " person", "probability": 0.90625}, {"start": 528.47, "end": 528.65, "word": " in", "probability": 0.8955078125}, {"start": 528.65, "end": 528.85, "word": " this", "probability": 0.94482421875}, {"start": 528.85, "end": 529.09, "word": " case", "probability": 0.9228515625}, {"start": 529.09, "end": 529.35, "word": " has", "probability": 0.92919921875}, {"start": 529.35, "end": 529.69, "word": " two", "probability": 0.91064453125}, {"start": 529.69, "end": 530.35, "word": " values,", "probability": 0.951171875}, {"start": 530.41, "end": 530.53, "word": " or", "probability": 0.947265625}, {"start": 530.53, "end": 530.71, "word": " two", "probability": 0.93701171875}, {"start": 530.71, "end": 531.21, "word": " observations.", "probability": 0.74853515625}, {"start": 531.71, "end": 532.09, "word": " One", "probability": 0.93408203125}, {"start": 532.09, "end": 534.21, "word": " before", "probability": 0.740234375}, {"start": 534.21, "end": 534.59, "word": " using", "probability": 0.923828125}, {"start": 534.59, "end": 534.83, "word": " drug", "probability": 0.91064453125}, {"start": 534.83, "end": 535.09, "word": " B,", "probability": 0.9970703125}, {"start": 535.21, "end": 535.43, "word": " and", "probability": 0.92919921875}, {"start": 535.43, "end": 535.55, "word": " the", "probability": 0.53369140625}, {"start": 535.55, "end": 535.73, "word": " other", "probability": 0.892578125}, {"start": 535.73, "end": 536.19, "word": " after.", "probability": 0.8291015625}, {"start": 536.63, "end": 537.11, "word": " So", "probability": 0.951171875}, {"start": 537.11, "end": 537.31, "word": " if", "probability": 0.91845703125}, {"start": 537.31, "end": 537.43, "word": " you", "probability": 0.9609375}, {"start": 537.43, "end": 537.63, "word": " have", "probability": 0.9228515625}, {"start": 537.63, "end": 537.97, "word": " before", "probability": 0.84521484375}, {"start": 537.97, "end": 538.21, "word": " and", "probability": 0.9375}, {"start": 538.21, "end": 538.59, "word": " after,", "probability": 0.8427734375}, {"start": 539.27, "end": 539.53, "word": " it", "probability": 0.9443359375}, {"start": 539.53, "end": 539.97, "word": " means", "probability": 0.9345703125}], "temperature": 1.0}, {"id": 20, "seek": 56853, "start": 541.33, "end": 568.53, "text": " we have related samples. So any problem has before and after for the same people, the same individual, in this case we have related samples. So we have to distinguish between two independent populations and two related samples. The other objective, we are going to use hypothesis testing for comparing the proportions of two independent populations.", "tokens": [321, 362, 4077, 10938, 13, 407, 604, 1154, 575, 949, 293, 934, 337, 264, 912, 561, 11, 264, 912, 2609, 11, 294, 341, 1389, 321, 362, 4077, 10938, 13, 407, 321, 362, 281, 20206, 1296, 732, 6695, 12822, 293, 732, 4077, 10938, 13, 440, 661, 10024, 11, 321, 366, 516, 281, 764, 17291, 4997, 337, 15763, 264, 32482, 295, 732, 6695, 12822, 13], "avg_logprob": -0.1365966866724193, "compression_ratio": 1.8421052631578947, "no_speech_prob": 0.0, "words": [{"start": 541.33, "end": 541.69, "word": " we", "probability": 0.37451171875}, {"start": 541.69, "end": 542.17, "word": " have", "probability": 0.94140625}, {"start": 542.17, "end": 543.23, "word": " related", "probability": 0.88427734375}, {"start": 543.23, "end": 543.81, "word": " samples.", "probability": 0.69140625}, {"start": 543.95, "end": 544.07, "word": " So", "probability": 0.908203125}, {"start": 544.07, "end": 544.31, "word": " any", "probability": 0.685546875}, {"start": 544.31, "end": 544.75, "word": " problem", "probability": 0.87890625}, {"start": 544.75, "end": 545.07, "word": " has", "probability": 0.66455078125}, {"start": 545.07, "end": 545.41, "word": " before", "probability": 0.859375}, {"start": 545.41, "end": 545.61, "word": " and", "probability": 0.9248046875}, {"start": 545.61, "end": 545.95, "word": " after", "probability": 0.84765625}, {"start": 545.95, "end": 546.53, "word": " for", "probability": 0.681640625}, {"start": 546.53, "end": 546.75, "word": " the", "probability": 0.91650390625}, {"start": 546.75, "end": 546.97, "word": " same", "probability": 0.904296875}, {"start": 546.97, "end": 547.29, "word": " people,", "probability": 0.9423828125}, {"start": 547.39, "end": 547.53, "word": " the", "probability": 0.85986328125}, {"start": 547.53, "end": 547.69, "word": " same", "probability": 0.8994140625}, {"start": 547.69, "end": 548.13, "word": " individual,", "probability": 0.86767578125}, {"start": 548.47, "end": 548.77, "word": " in", "probability": 0.9228515625}, {"start": 548.77, "end": 548.99, "word": " this", "probability": 0.94580078125}, {"start": 548.99, "end": 549.25, "word": " case", "probability": 0.9052734375}, {"start": 549.25, "end": 549.43, "word": " we", "probability": 0.6396484375}, {"start": 549.43, "end": 549.75, "word": " have", "probability": 0.93896484375}, {"start": 549.75, "end": 550.39, "word": " related", "probability": 0.947265625}, {"start": 550.39, "end": 550.83, "word": " samples.", "probability": 0.86181640625}, {"start": 551.71, "end": 551.91, "word": " So", "probability": 0.943359375}, {"start": 551.91, "end": 552.01, "word": " we", "probability": 0.8779296875}, {"start": 552.01, "end": 552.15, "word": " have", "probability": 0.9453125}, {"start": 552.15, "end": 552.25, "word": " to", "probability": 0.9677734375}, {"start": 552.25, "end": 552.69, "word": " distinguish", "probability": 0.892578125}, {"start": 552.69, "end": 553.55, "word": " between", "probability": 0.87158203125}, {"start": 553.55, "end": 554.75, "word": " two", "probability": 0.9228515625}, {"start": 554.75, "end": 555.29, "word": " independent", "probability": 0.8935546875}, {"start": 555.29, "end": 555.89, "word": " populations", "probability": 0.9462890625}, {"start": 555.89, "end": 556.93, "word": " and", "probability": 0.8291015625}, {"start": 556.93, "end": 557.27, "word": " two", "probability": 0.9345703125}, {"start": 557.27, "end": 558.39, "word": " related", "probability": 0.955078125}, {"start": 558.39, "end": 559.25, "word": " samples.", "probability": 0.85546875}, {"start": 561.05, "end": 561.27, "word": " The", "probability": 0.8564453125}, {"start": 561.27, "end": 561.51, "word": " other", "probability": 0.90087890625}, {"start": 561.51, "end": 561.93, "word": " objective,", "probability": 0.943359375}, {"start": 562.61, "end": 562.73, "word": " we", "probability": 0.95947265625}, {"start": 562.73, "end": 562.87, "word": " are", "probability": 0.92919921875}, {"start": 562.87, "end": 563.15, "word": " going", "probability": 0.94677734375}, {"start": 563.15, "end": 563.35, "word": " to", "probability": 0.96728515625}, {"start": 563.35, "end": 563.57, "word": " use", "probability": 0.87646484375}, {"start": 563.57, "end": 564.09, "word": " hypothesis", "probability": 0.82666015625}, {"start": 564.09, "end": 564.73, "word": " testing", "probability": 0.87548828125}, {"start": 564.73, "end": 565.23, "word": " for", "probability": 0.95458984375}, {"start": 565.23, "end": 566.23, "word": " comparing", "probability": 0.90380859375}, {"start": 566.23, "end": 566.47, "word": " the", "probability": 0.92529296875}, {"start": 566.47, "end": 566.99, "word": " proportions", "probability": 0.85302734375}, {"start": 566.99, "end": 567.35, "word": " of", "probability": 0.9619140625}, {"start": 567.35, "end": 567.55, "word": " two", "probability": 0.9384765625}, {"start": 567.55, "end": 567.97, "word": " independent", "probability": 0.91064453125}, {"start": 567.97, "end": 568.53, "word": " populations.", "probability": 0.947265625}], "temperature": 1.0}, {"id": 21, "seek": 59483, "start": 569.33, "end": 594.83, "text": " That's all for this chapter. We are going to skip the variances of two independent populations. So this one will be skipped. How to use one-way analysis of variance ANOVA to test for differences among. Among this case means between more than two populations. So among means more than two.", "tokens": [663, 311, 439, 337, 341, 7187, 13, 492, 366, 516, 281, 10023, 264, 1374, 21518, 295, 732, 6695, 12822, 13, 407, 341, 472, 486, 312, 30193, 13, 1012, 281, 764, 472, 12, 676, 5215, 295, 21977, 5252, 46, 20914, 281, 1500, 337, 7300, 3654, 13, 16119, 341, 1389, 1355, 1296, 544, 813, 732, 12822, 13, 407, 3654, 1355, 544, 813, 732, 13], "avg_logprob": -0.1748511923684014, "compression_ratio": 1.5792349726775956, "no_speech_prob": 0.0, "words": [{"start": 569.33, "end": 569.87, "word": " That's", "probability": 0.745361328125}, {"start": 569.87, "end": 570.13, "word": " all", "probability": 0.92626953125}, {"start": 570.13, "end": 570.35, "word": " for", "probability": 0.916015625}, {"start": 570.35, "end": 570.57, "word": " this", "probability": 0.94140625}, {"start": 570.57, "end": 570.87, "word": " chapter.", "probability": 0.85498046875}, {"start": 571.53, "end": 572.13, "word": " We", "probability": 0.9482421875}, {"start": 572.13, "end": 572.27, "word": " are", "probability": 0.888671875}, {"start": 572.27, "end": 572.45, "word": " going", "probability": 0.94873046875}, {"start": 572.45, "end": 572.63, "word": " to", "probability": 0.96484375}, {"start": 572.63, "end": 572.95, "word": " skip", "probability": 0.98193359375}, {"start": 572.95, "end": 574.05, "word": " the", "probability": 0.70458984375}, {"start": 574.05, "end": 574.49, "word": " variances", "probability": 0.772216796875}, {"start": 574.49, "end": 574.83, "word": " of", "probability": 0.94140625}, {"start": 574.83, "end": 575.07, "word": " two", "probability": 0.88818359375}, {"start": 575.07, "end": 575.51, "word": " independent", "probability": 0.8466796875}, {"start": 575.51, "end": 576.27, "word": " populations.", "probability": 0.947265625}, {"start": 576.97, "end": 577.07, "word": " So", "probability": 0.80126953125}, {"start": 577.07, "end": 577.27, "word": " this", "probability": 0.81884765625}, {"start": 577.27, "end": 577.57, "word": " one", "probability": 0.927734375}, {"start": 577.57, "end": 580.09, "word": " will", "probability": 0.5888671875}, {"start": 580.09, "end": 580.25, "word": " be", "probability": 0.90576171875}, {"start": 580.25, "end": 580.79, "word": " skipped.", "probability": 0.8125}, {"start": 581.15, "end": 581.43, "word": " How", "probability": 0.91796875}, {"start": 581.43, "end": 581.61, "word": " to", "probability": 0.955078125}, {"start": 581.61, "end": 581.77, "word": " use", "probability": 0.861328125}, {"start": 581.77, "end": 581.93, "word": " one", "probability": 0.9091796875}, {"start": 581.93, "end": 582.05, "word": "-way", "probability": 0.83154296875}, {"start": 582.05, "end": 582.43, "word": " analysis", "probability": 0.8408203125}, {"start": 582.43, "end": 582.67, "word": " of", "probability": 0.611328125}, {"start": 582.67, "end": 583.11, "word": " variance", "probability": 0.9013671875}, {"start": 583.11, "end": 583.53, "word": " ANOVA", "probability": 0.8673502604166666}, {"start": 583.53, "end": 583.69, "word": " to", "probability": 0.9375}, {"start": 583.69, "end": 584.13, "word": " test", "probability": 0.857421875}, {"start": 584.13, "end": 585.25, "word": " for", "probability": 0.91162109375}, {"start": 585.25, "end": 585.83, "word": " differences", "probability": 0.6640625}, {"start": 585.83, "end": 586.43, "word": " among.", "probability": 0.93359375}, {"start": 587.23, "end": 587.91, "word": " Among", "probability": 0.87646484375}, {"start": 587.91, "end": 588.23, "word": " this", "probability": 0.50830078125}, {"start": 588.23, "end": 588.47, "word": " case", "probability": 0.86376953125}, {"start": 588.47, "end": 588.95, "word": " means", "probability": 0.85205078125}, {"start": 588.95, "end": 590.13, "word": " between", "probability": 0.8349609375}, {"start": 590.13, "end": 591.43, "word": " more", "probability": 0.93310546875}, {"start": 591.43, "end": 591.77, "word": " than", "probability": 0.94921875}, {"start": 591.77, "end": 592.39, "word": " two", "probability": 0.916015625}, {"start": 592.39, "end": 592.93, "word": " populations.", "probability": 0.91064453125}, {"start": 593.09, "end": 593.17, "word": " So", "probability": 0.9013671875}, {"start": 593.17, "end": 593.51, "word": " among", "probability": 0.81494140625}, {"start": 593.51, "end": 593.91, "word": " means", "probability": 0.9111328125}, {"start": 593.91, "end": 594.31, "word": " more", "probability": 0.9287109375}, {"start": 594.31, "end": 594.55, "word": " than", "probability": 0.951171875}, {"start": 594.55, "end": 594.83, "word": " two.", "probability": 0.9072265625}], "temperature": 1.0}, {"id": 22, "seek": 62494, "start": 595.38, "end": 624.94, "text": " So here we are going also to skip analysis of variance as well as how to perform multiple comparisons and when we analysis of variance will be skipped. So mainly we are focusing on the difference between two means and two proportions, two means for independent and related, and also we are going to cover hypothesis testing for the difference between", "tokens": [407, 510, 321, 366, 516, 611, 281, 10023, 5215, 295, 21977, 382, 731, 382, 577, 281, 2042, 3866, 33157, 293, 562, 321, 5215, 295, 21977, 486, 312, 30193, 13, 407, 8704, 321, 366, 8416, 322, 264, 2649, 1296, 732, 1355, 293, 732, 32482, 11, 732, 1355, 337, 6695, 293, 4077, 11, 293, 611, 321, 366, 516, 281, 2060, 17291, 4997, 337, 264, 2649, 1296], "avg_logprob": -0.20084134615384616, "compression_ratio": 1.7908163265306123, "no_speech_prob": 0.0, "words": [{"start": 595.38, "end": 595.68, "word": " So", "probability": 0.86376953125}, {"start": 595.68, "end": 595.94, "word": " here", "probability": 0.7470703125}, {"start": 595.94, "end": 596.66, "word": " we", "probability": 0.54736328125}, {"start": 596.66, "end": 596.82, "word": " are", "probability": 0.9169921875}, {"start": 596.82, "end": 597.08, "word": " going", "probability": 0.91015625}, {"start": 597.08, "end": 597.42, "word": " also", "probability": 0.74755859375}, {"start": 597.42, "end": 597.62, "word": " to", "probability": 0.92236328125}, {"start": 597.62, "end": 598.0, "word": " skip", "probability": 0.9892578125}, {"start": 598.0, "end": 598.6, "word": " analysis", "probability": 0.86083984375}, {"start": 598.6, "end": 598.9, "word": " of", "probability": 0.9443359375}, {"start": 598.9, "end": 599.26, "word": " variance", "probability": 0.8564453125}, {"start": 599.26, "end": 599.56, "word": " as", "probability": 0.68017578125}, {"start": 599.56, "end": 599.74, "word": " well", "probability": 0.9384765625}, {"start": 599.74, "end": 600.14, "word": " as", "probability": 0.95263671875}, {"start": 600.14, "end": 600.88, "word": " how", "probability": 0.88818359375}, {"start": 600.88, "end": 601.08, "word": " to", "probability": 0.9697265625}, {"start": 601.08, "end": 601.62, "word": " perform", "probability": 0.77392578125}, {"start": 601.62, "end": 602.3, "word": " multiple", "probability": 0.87109375}, {"start": 602.3, "end": 602.82, "word": " comparisons", "probability": 0.892578125}, {"start": 602.82, "end": 603.16, "word": " and", "probability": 0.560546875}, {"start": 603.16, "end": 603.34, "word": " when", "probability": 0.8447265625}, {"start": 603.34, "end": 603.46, "word": " we", "probability": 0.7216796875}, {"start": 603.46, "end": 603.82, "word": " analysis", "probability": 0.207275390625}, {"start": 603.82, "end": 604.04, "word": " of", "probability": 0.481689453125}, {"start": 604.04, "end": 604.42, "word": " variance", "probability": 0.90478515625}, {"start": 604.42, "end": 605.44, "word": " will", "probability": 0.71728515625}, {"start": 605.44, "end": 605.6, "word": " be", "probability": 0.9482421875}, {"start": 605.6, "end": 605.88, "word": " skipped.", "probability": 0.8671875}, {"start": 606.82, "end": 607.26, "word": " So", "probability": 0.9462890625}, {"start": 607.26, "end": 607.6, "word": " mainly", "probability": 0.91845703125}, {"start": 607.6, "end": 608.34, "word": " we", "probability": 0.689453125}, {"start": 608.34, "end": 608.5, "word": " are", "probability": 0.9384765625}, {"start": 608.5, "end": 608.98, "word": " focusing", "probability": 0.896484375}, {"start": 608.98, "end": 609.64, "word": " on", "probability": 0.9482421875}, {"start": 609.64, "end": 610.1, "word": " the", "probability": 0.92138671875}, {"start": 610.1, "end": 610.58, "word": " difference", "probability": 0.86474609375}, {"start": 610.58, "end": 611.1, "word": " between", "probability": 0.88525390625}, {"start": 611.1, "end": 612.84, "word": " two", "probability": 0.8525390625}, {"start": 612.84, "end": 613.22, "word": " means", "probability": 0.900390625}, {"start": 613.22, "end": 614.16, "word": " and", "probability": 0.88916015625}, {"start": 614.16, "end": 614.34, "word": " two", "probability": 0.943359375}, {"start": 614.34, "end": 614.86, "word": " proportions,", "probability": 0.8662109375}, {"start": 615.76, "end": 615.88, "word": " two", "probability": 0.943359375}, {"start": 615.88, "end": 616.24, "word": " means", "probability": 0.92724609375}, {"start": 616.24, "end": 617.02, "word": " for", "probability": 0.87353515625}, {"start": 617.02, "end": 617.56, "word": " independent", "probability": 0.90234375}, {"start": 617.56, "end": 618.44, "word": " and", "probability": 0.9384765625}, {"start": 618.44, "end": 619.44, "word": " related,", "probability": 0.9560546875}, {"start": 619.9, "end": 620.16, "word": " and", "probability": 0.93310546875}, {"start": 620.16, "end": 620.56, "word": " also", "probability": 0.88134765625}, {"start": 620.56, "end": 621.14, "word": " we", "probability": 0.931640625}, {"start": 621.14, "end": 621.28, "word": " are", "probability": 0.927734375}, {"start": 621.28, "end": 621.54, "word": " going", "probability": 0.94580078125}, {"start": 621.54, "end": 621.72, "word": " to", "probability": 0.9658203125}, {"start": 621.72, "end": 622.06, "word": " cover", "probability": 0.96044921875}, {"start": 622.06, "end": 623.34, "word": " hypothesis", "probability": 0.80322265625}, {"start": 623.34, "end": 623.74, "word": " testing", "probability": 0.53857421875}, {"start": 623.74, "end": 623.94, "word": " for", "probability": 0.9482421875}, {"start": 623.94, "end": 624.08, "word": " the", "probability": 0.91162109375}, {"start": 624.08, "end": 624.46, "word": " difference", "probability": 0.8603515625}, {"start": 624.46, "end": 624.94, "word": " between", "probability": 0.87890625}], "temperature": 1.0}, {"id": 23, "seek": 65232, "start": 625.42, "end": 652.32, "text": " to population proportions. That's all for this chapter. So again, we are going to explain how can we perform testing for two sample tests. In this case, there are four cases. First, we'll discuss hypothesis testing for the population means for independent samples.", "tokens": [281, 4415, 32482, 13, 663, 311, 439, 337, 341, 7187, 13, 407, 797, 11, 321, 366, 516, 281, 2903, 577, 393, 321, 2042, 4997, 337, 732, 6889, 6921, 13, 682, 341, 1389, 11, 456, 366, 1451, 3331, 13, 2386, 11, 321, 603, 2248, 17291, 4997, 337, 264, 4415, 1355, 337, 6695, 10938, 13], "avg_logprob": -0.16030092785755792, "compression_ratio": 1.5773809523809523, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 625.42, "end": 625.76, "word": " to", "probability": 0.23876953125}, {"start": 625.76, "end": 626.38, "word": " population", "probability": 0.93994140625}, {"start": 626.38, "end": 627.18, "word": " proportions.", "probability": 0.806640625}, {"start": 627.76, "end": 628.1, "word": " That's", "probability": 0.943115234375}, {"start": 628.1, "end": 628.44, "word": " all", "probability": 0.94970703125}, {"start": 628.44, "end": 628.92, "word": " for", "probability": 0.94775390625}, {"start": 628.92, "end": 629.28, "word": " this", "probability": 0.94482421875}, {"start": 629.28, "end": 629.64, "word": " chapter.", "probability": 0.890625}, {"start": 633.22, "end": 633.94, "word": " So", "probability": 0.5322265625}, {"start": 633.94, "end": 634.26, "word": " again,", "probability": 0.82861328125}, {"start": 634.5, "end": 634.74, "word": " we", "probability": 0.962890625}, {"start": 634.74, "end": 634.86, "word": " are", "probability": 0.92919921875}, {"start": 634.86, "end": 635.14, "word": " going", "probability": 0.94384765625}, {"start": 635.14, "end": 635.6, "word": " to", "probability": 0.9697265625}, {"start": 635.6, "end": 638.26, "word": " explain", "probability": 0.82568359375}, {"start": 638.26, "end": 638.52, "word": " how", "probability": 0.91357421875}, {"start": 638.52, "end": 638.7, "word": " can", "probability": 0.80859375}, {"start": 638.7, "end": 638.88, "word": " we", "probability": 0.9619140625}, {"start": 638.88, "end": 639.44, "word": " perform", "probability": 0.81396484375}, {"start": 639.44, "end": 640.14, "word": " testing", "probability": 0.83837890625}, {"start": 640.14, "end": 640.48, "word": " for", "probability": 0.94287109375}, {"start": 640.48, "end": 640.68, "word": " two", "probability": 0.88427734375}, {"start": 640.68, "end": 641.02, "word": " sample", "probability": 0.513671875}, {"start": 641.02, "end": 641.48, "word": " tests.", "probability": 0.72802734375}, {"start": 642.06, "end": 642.24, "word": " In", "probability": 0.939453125}, {"start": 642.24, "end": 642.44, "word": " this", "probability": 0.94580078125}, {"start": 642.44, "end": 642.64, "word": " case,", "probability": 0.91455078125}, {"start": 642.78, "end": 642.86, "word": " there", "probability": 0.9072265625}, {"start": 642.86, "end": 643.16, "word": " are", "probability": 0.943359375}, {"start": 643.16, "end": 643.66, "word": " four", "probability": 0.9365234375}, {"start": 643.66, "end": 644.14, "word": " cases.", "probability": 0.91552734375}, {"start": 645.52, "end": 646.22, "word": " First,", "probability": 0.8955078125}, {"start": 646.28, "end": 646.72, "word": " we'll", "probability": 0.726806640625}, {"start": 646.72, "end": 647.42, "word": " discuss", "probability": 0.9033203125}, {"start": 647.42, "end": 648.72, "word": " hypothesis", "probability": 0.9091796875}, {"start": 648.72, "end": 649.1, "word": " testing", "probability": 0.849609375}, {"start": 649.1, "end": 649.3, "word": " for", "probability": 0.9365234375}, {"start": 649.3, "end": 649.44, "word": " the", "probability": 0.90771484375}, {"start": 649.44, "end": 649.8, "word": " population", "probability": 0.9541015625}, {"start": 649.8, "end": 650.34, "word": " means", "probability": 0.85400390625}, {"start": 650.34, "end": 651.26, "word": " for", "probability": 0.70849609375}, {"start": 651.26, "end": 651.7, "word": " independent", "probability": 0.87890625}, {"start": 651.7, "end": 652.32, "word": " samples.", "probability": 0.88330078125}], "temperature": 1.0}, {"id": 24, "seek": 67957, "start": 653.15, "end": 679.57, "text": " So in this case, we have mean one versus mean two. So that's for independent samples. The other one also for the population means, but for related samples. By the way, sometimes it's called a pair. Samples. The word pair comes from we have two values for the same independent", "tokens": [407, 294, 341, 1389, 11, 321, 362, 914, 472, 5717, 914, 732, 13, 407, 300, 311, 337, 6695, 10938, 13, 440, 661, 472, 611, 337, 264, 4415, 1355, 11, 457, 337, 4077, 10938, 13, 3146, 264, 636, 11, 2171, 309, 311, 1219, 257, 6119, 13, 4832, 2622, 13, 440, 1349, 6119, 1487, 490, 321, 362, 732, 4190, 337, 264, 912, 6695], "avg_logprob": -0.17993951420630178, "compression_ratio": 1.6428571428571428, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 653.15, "end": 653.39, "word": " So", "probability": 0.8134765625}, {"start": 653.39, "end": 653.51, "word": " in", "probability": 0.80126953125}, {"start": 653.51, "end": 653.69, "word": " this", "probability": 0.9453125}, {"start": 653.69, "end": 654.09, "word": " case,", "probability": 0.91845703125}, {"start": 654.43, "end": 654.53, "word": " we", "probability": 0.95947265625}, {"start": 654.53, "end": 654.73, "word": " have", "probability": 0.9345703125}, {"start": 654.73, "end": 654.87, "word": " mean", "probability": 0.8857421875}, {"start": 654.87, "end": 655.15, "word": " one", "probability": 0.7841796875}, {"start": 655.15, "end": 655.89, "word": " versus", "probability": 0.904296875}, {"start": 655.89, "end": 656.21, "word": " mean", "probability": 0.9501953125}, {"start": 656.21, "end": 656.51, "word": " two.", "probability": 0.9404296875}, {"start": 657.31, "end": 657.61, "word": " So", "probability": 0.95068359375}, {"start": 657.61, "end": 657.91, "word": " that's", "probability": 0.9287109375}, {"start": 657.91, "end": 658.15, "word": " for", "probability": 0.8759765625}, {"start": 658.15, "end": 658.59, "word": " independent", "probability": 0.83740234375}, {"start": 658.59, "end": 659.07, "word": " samples.", "probability": 0.8251953125}, {"start": 659.61, "end": 659.87, "word": " The", "probability": 0.88818359375}, {"start": 659.87, "end": 660.09, "word": " other", "probability": 0.8896484375}, {"start": 660.09, "end": 660.31, "word": " one", "probability": 0.9267578125}, {"start": 660.31, "end": 660.65, "word": " also", "probability": 0.7197265625}, {"start": 660.65, "end": 660.89, "word": " for", "probability": 0.9267578125}, {"start": 660.89, "end": 661.03, "word": " the", "probability": 0.92138671875}, {"start": 661.03, "end": 661.45, "word": " population", "probability": 0.95166015625}, {"start": 661.45, "end": 661.97, "word": " means,", "probability": 0.923828125}, {"start": 662.27, "end": 662.35, "word": " but", "probability": 0.9208984375}, {"start": 662.35, "end": 662.75, "word": " for", "probability": 0.9443359375}, {"start": 662.75, "end": 663.35, "word": " related", "probability": 0.9052734375}, {"start": 663.35, "end": 663.79, "word": " samples.", "probability": 0.86279296875}, {"start": 664.53, "end": 664.79, "word": " By", "probability": 0.97412109375}, {"start": 664.79, "end": 664.89, "word": " the", "probability": 0.9248046875}, {"start": 664.89, "end": 665.05, "word": " way,", "probability": 0.962890625}, {"start": 665.15, "end": 665.45, "word": " sometimes", "probability": 0.8388671875}, {"start": 665.45, "end": 665.87, "word": " it's", "probability": 0.802490234375}, {"start": 665.87, "end": 666.19, "word": " called", "probability": 0.8994140625}, {"start": 666.19, "end": 666.87, "word": " a", "probability": 0.91064453125}, {"start": 666.87, "end": 667.09, "word": " pair.", "probability": 0.51708984375}, {"start": 671.81, "end": 672.41, "word": " Samples.", "probability": 0.648681640625}, {"start": 673.91, "end": 674.07, "word": " The", "probability": 0.87646484375}, {"start": 674.07, "end": 674.37, "word": " word", "probability": 0.9375}, {"start": 674.37, "end": 674.69, "word": " pair", "probability": 0.73974609375}, {"start": 674.69, "end": 675.47, "word": " comes", "probability": 0.865234375}, {"start": 675.47, "end": 675.99, "word": " from", "probability": 0.88720703125}, {"start": 675.99, "end": 676.49, "word": " we", "probability": 0.491943359375}, {"start": 676.49, "end": 676.87, "word": " have", "probability": 0.94775390625}, {"start": 676.87, "end": 678.23, "word": " two", "probability": 0.9287109375}, {"start": 678.23, "end": 678.59, "word": " values", "probability": 0.95166015625}, {"start": 678.59, "end": 678.83, "word": " for", "probability": 0.94677734375}, {"start": 678.83, "end": 679.03, "word": " the", "probability": 0.91845703125}, {"start": 679.03, "end": 679.27, "word": " same", "probability": 0.87646484375}, {"start": 679.27, "end": 679.57, "word": " independent", "probability": 0.73779296875}], "temperature": 1.0}, {"id": 25, "seek": 70175, "start": 683.97, "end": 701.75, "text": " After that, it becomes 110, for example. So it's called pair. So in this case, we have semigroup before versus after treatment.", "tokens": [2381, 300, 11, 309, 3643, 20154, 11, 337, 1365, 13, 407, 309, 311, 1219, 6119, 13, 407, 294, 341, 1389, 11, 321, 362, 4361, 328, 81, 1250, 949, 5717, 934, 5032, 13], "avg_logprob": -0.2544981060606061, "compression_ratio": 1.2075471698113207, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 683.97, "end": 684.73, "word": " After", "probability": 0.188232421875}, {"start": 684.73, "end": 685.49, "word": " that,", "probability": 0.91796875}, {"start": 685.61, "end": 685.65, "word": " it", "probability": 0.72705078125}, {"start": 685.65, "end": 685.91, "word": " becomes", "probability": 0.8173828125}, {"start": 685.91, "end": 686.57, "word": " 110,", "probability": 0.5234375}, {"start": 686.71, "end": 686.85, "word": " for", "probability": 0.95458984375}, {"start": 686.85, "end": 687.25, "word": " example.", "probability": 0.9755859375}, {"start": 687.87, "end": 688.05, "word": " So", "probability": 0.94677734375}, {"start": 688.05, "end": 688.35, "word": " it's", "probability": 0.83544921875}, {"start": 688.35, "end": 688.65, "word": " called", "probability": 0.90283203125}, {"start": 688.65, "end": 689.61, "word": " pair.", "probability": 0.461181640625}, {"start": 695.15, "end": 695.91, "word": " So", "probability": 0.939453125}, {"start": 695.91, "end": 696.15, "word": " in", "probability": 0.85986328125}, {"start": 696.15, "end": 696.37, "word": " this", "probability": 0.94775390625}, {"start": 696.37, "end": 696.61, "word": " case,", "probability": 0.91357421875}, {"start": 696.69, "end": 696.83, "word": " we", "probability": 0.95849609375}, {"start": 696.83, "end": 697.19, "word": " have", "probability": 0.93798828125}, {"start": 697.19, "end": 699.05, "word": " semigroup", "probability": 0.8511962890625}, {"start": 699.05, "end": 699.85, "word": " before", "probability": 0.78173828125}, {"start": 699.85, "end": 700.45, "word": " versus", "probability": 0.8701171875}, {"start": 700.45, "end": 701.11, "word": " after", "probability": 0.85791015625}, {"start": 701.11, "end": 701.75, "word": " treatment.", "probability": 0.85888671875}], "temperature": 1.0}, {"id": 26, "seek": 73359, "start": 704.41, "end": 733.59, "text": " The other type of two-sample test is population proportions. In this case, we are going to test to see if there is a difference between two population proportions. The other type of two-sample test, population variances, variance one against variance two. As we mentioned, we are going to cover population means for independent and related samples, as well as the population proportion. That's all, again,", "tokens": [440, 661, 2010, 295, 732, 12, 19988, 781, 1500, 307, 4415, 32482, 13, 682, 341, 1389, 11, 321, 366, 516, 281, 1500, 281, 536, 498, 456, 307, 257, 2649, 1296, 732, 4415, 32482, 13, 440, 661, 2010, 295, 732, 12, 19988, 781, 1500, 11, 4415, 1374, 21518, 11, 21977, 472, 1970, 21977, 732, 13, 1018, 321, 2835, 11, 321, 366, 516, 281, 2060, 4415, 1355, 337, 6695, 293, 4077, 10938, 11, 382, 731, 382, 264, 4415, 16068, 13, 663, 311, 439, 11, 797, 11], "avg_logprob": -0.1392463298404918, "compression_ratio": 2.0, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 704.41, "end": 704.63, "word": " The", "probability": 0.82421875}, {"start": 704.63, "end": 704.89, "word": " other", "probability": 0.89013671875}, {"start": 704.89, "end": 705.23, "word": " type", "probability": 0.96923828125}, {"start": 705.23, "end": 705.37, "word": " of", "probability": 0.9541015625}, {"start": 705.37, "end": 705.53, "word": " two", "probability": 0.8701171875}, {"start": 705.53, "end": 705.83, "word": "-sample", "probability": 0.7955729166666666}, {"start": 705.83, "end": 706.23, "word": " test", "probability": 0.787109375}, {"start": 706.23, "end": 706.61, "word": " is", "probability": 0.935546875}, {"start": 706.61, "end": 707.11, "word": " population", "probability": 0.91552734375}, {"start": 707.11, "end": 707.73, "word": " proportions.", "probability": 0.5615234375}, {"start": 707.93, "end": 708.05, "word": " In", "probability": 0.958984375}, {"start": 708.05, "end": 708.23, "word": " this", "probability": 0.94580078125}, {"start": 708.23, "end": 708.43, "word": " case,", "probability": 0.912109375}, {"start": 708.45, "end": 708.57, "word": " we", "probability": 0.9609375}, {"start": 708.57, "end": 708.67, "word": " are", "probability": 0.927734375}, {"start": 708.67, "end": 708.91, "word": " going", "probability": 0.9443359375}, {"start": 708.91, "end": 709.07, "word": " to", "probability": 0.96435546875}, {"start": 709.07, "end": 709.33, "word": " test", "probability": 0.8828125}, {"start": 709.33, "end": 709.53, "word": " to", "probability": 0.92138671875}, {"start": 709.53, "end": 709.77, "word": " see", "probability": 0.92431640625}, {"start": 709.77, "end": 710.47, "word": " if", "probability": 0.94482421875}, {"start": 710.47, "end": 710.65, "word": " there", "probability": 0.90869140625}, {"start": 710.65, "end": 710.79, "word": " is", "probability": 0.9111328125}, {"start": 710.79, "end": 710.89, "word": " a", "probability": 0.9951171875}, {"start": 710.89, "end": 711.27, "word": " difference", "probability": 0.8662109375}, {"start": 711.27, "end": 711.71, "word": " between", "probability": 0.87353515625}, {"start": 711.71, "end": 712.27, "word": " two", "probability": 0.92431640625}, {"start": 712.27, "end": 712.89, "word": " population", "probability": 0.919921875}, {"start": 712.89, "end": 713.45, "word": " proportions.", "probability": 0.751953125}, {"start": 714.25, "end": 714.49, "word": " The", "probability": 0.88818359375}, {"start": 714.49, "end": 714.83, "word": " other", "probability": 0.89111328125}, {"start": 714.83, "end": 715.79, "word": " type", "probability": 0.97900390625}, {"start": 715.79, "end": 716.73, "word": " of", "probability": 0.96875}, {"start": 716.73, "end": 717.05, "word": " two", "probability": 0.93896484375}, {"start": 717.05, "end": 717.31, "word": "-sample", "probability": 0.9554036458333334}, {"start": 717.31, "end": 717.63, "word": " test,", "probability": 0.8330078125}, {"start": 717.79, "end": 718.13, "word": " population", "probability": 0.9384765625}, {"start": 718.13, "end": 718.67, "word": " variances,", "probability": 0.871826171875}, {"start": 719.41, "end": 719.71, "word": " variance", "probability": 0.80029296875}, {"start": 719.71, "end": 720.03, "word": " one", "probability": 0.64501953125}, {"start": 720.03, "end": 720.63, "word": " against", "probability": 0.9091796875}, {"start": 720.63, "end": 721.43, "word": " variance", "probability": 0.927734375}, {"start": 721.43, "end": 721.73, "word": " two.", "probability": 0.8271484375}, {"start": 722.33, "end": 722.59, "word": " As", "probability": 0.9677734375}, {"start": 722.59, "end": 722.81, "word": " we", "probability": 0.8544921875}, {"start": 722.81, "end": 723.13, "word": " mentioned,", "probability": 0.83837890625}, {"start": 723.77, "end": 723.89, "word": " we", "probability": 0.9619140625}, {"start": 723.89, "end": 724.01, "word": " are", "probability": 0.93115234375}, {"start": 724.01, "end": 724.27, "word": " going", "probability": 0.94384765625}, {"start": 724.27, "end": 724.47, "word": " to", "probability": 0.9697265625}, {"start": 724.47, "end": 724.73, "word": " cover", "probability": 0.96337890625}, {"start": 724.73, "end": 725.51, "word": " population", "probability": 0.951171875}, {"start": 725.51, "end": 725.95, "word": " means", "probability": 0.8779296875}, {"start": 725.95, "end": 726.35, "word": " for", "probability": 0.9443359375}, {"start": 726.35, "end": 727.45, "word": " independent", "probability": 0.8935546875}, {"start": 727.45, "end": 727.81, "word": " and", "probability": 0.9111328125}, {"start": 727.81, "end": 728.19, "word": " related", "probability": 0.9306640625}, {"start": 728.19, "end": 728.67, "word": " samples,", "probability": 0.8896484375}, {"start": 729.09, "end": 729.35, "word": " as", "probability": 0.9658203125}, {"start": 729.35, "end": 729.51, "word": " well", "probability": 0.9296875}, {"start": 729.51, "end": 729.93, "word": " as", "probability": 0.9658203125}, {"start": 729.93, "end": 730.61, "word": " the", "probability": 0.90380859375}, {"start": 730.61, "end": 731.11, "word": " population", "probability": 0.9462890625}, {"start": 731.11, "end": 731.97, "word": " proportion.", "probability": 0.44677734375}, {"start": 732.19, "end": 732.59, "word": " That's", "probability": 0.941162109375}, {"start": 732.59, "end": 733.09, "word": " all,", "probability": 0.94287109375}, {"start": 733.21, "end": 733.59, "word": " again,", "probability": 0.962890625}], "temperature": 1.0}, {"id": 27, "seek": 76162, "start": 734.3, "end": 761.62, "text": " for this chapter. We have to start with the sampling distribution actually of the difference between two population means. In chapter seven, we talked about sampling distribution for X bar.", "tokens": [337, 341, 7187, 13, 492, 362, 281, 722, 365, 264, 21179, 7316, 767, 295, 264, 2649, 1296, 732, 4415, 1355, 13, 682, 7187, 3407, 11, 321, 2825, 466, 21179, 7316, 337, 1783, 2159, 13], "avg_logprob": -0.18515624744551523, "compression_ratio": 1.450381679389313, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 734.3, "end": 735.06, "word": " for", "probability": 0.61474609375}, {"start": 735.06, "end": 735.66, "word": " this", "probability": 0.94677734375}, {"start": 735.66, "end": 736.1, "word": " chapter.", "probability": 0.86669921875}, {"start": 738.3, "end": 739.36, "word": " We", "probability": 0.90234375}, {"start": 739.36, "end": 739.58, "word": " have", "probability": 0.95068359375}, {"start": 739.58, "end": 739.76, "word": " to", "probability": 0.96875}, {"start": 739.76, "end": 740.2, "word": " start", "probability": 0.927734375}, {"start": 740.2, "end": 740.92, "word": " with", "probability": 0.9033203125}, {"start": 740.92, "end": 741.3, "word": " the", "probability": 0.91650390625}, {"start": 741.3, "end": 741.6, "word": " sampling", "probability": 0.5263671875}, {"start": 741.6, "end": 742.26, "word": " distribution", "probability": 0.8486328125}, {"start": 742.26, "end": 742.88, "word": " actually", "probability": 0.73046875}, {"start": 742.88, "end": 744.06, "word": " of", "probability": 0.74755859375}, {"start": 744.06, "end": 744.24, "word": " the", "probability": 0.92333984375}, {"start": 744.24, "end": 744.68, "word": " difference", "probability": 0.8564453125}, {"start": 744.68, "end": 745.26, "word": " between", "probability": 0.89013671875}, {"start": 745.26, "end": 745.74, "word": " two", "probability": 0.92919921875}, {"start": 745.74, "end": 746.22, "word": " population", "probability": 0.947265625}, {"start": 746.22, "end": 746.58, "word": " means.", "probability": 0.9189453125}, {"start": 747.6, "end": 747.82, "word": " In", "probability": 0.94384765625}, {"start": 747.82, "end": 748.08, "word": " chapter", "probability": 0.71044921875}, {"start": 748.08, "end": 748.46, "word": " seven,", "probability": 0.77783203125}, {"start": 749.74, "end": 750.08, "word": " we", "probability": 0.9609375}, {"start": 750.08, "end": 750.84, "word": " talked", "probability": 0.5224609375}, {"start": 750.84, "end": 751.5, "word": " about", "probability": 0.9013671875}, {"start": 751.5, "end": 752.58, "word": " sampling", "probability": 0.880859375}, {"start": 752.58, "end": 753.4, "word": " distribution", "probability": 0.8623046875}, {"start": 753.4, "end": 759.76, "word": " for", "probability": 0.875}, {"start": 759.76, "end": 761.34, "word": " X", "probability": 0.62060546875}, {"start": 761.34, "end": 761.62, "word": " bar.", "probability": 0.76953125}], "temperature": 1.0}, {"id": 28, "seek": 79317, "start": 765.43, "end": 793.17, "text": " And we said that X bar is a point estimate for Mu. Now in this chapter we are interested for the difference between two population means. That means if we have Mu1 minus Mu2, in this case the point estimate of this difference should be X1 bar.", "tokens": [400, 321, 848, 300, 1783, 2159, 307, 257, 935, 12539, 337, 15601, 13, 823, 294, 341, 7187, 321, 366, 3102, 337, 264, 2649, 1296, 732, 4415, 1355, 13, 663, 1355, 498, 321, 362, 15601, 16, 3175, 15601, 17, 11, 294, 341, 1389, 264, 935, 12539, 295, 341, 2649, 820, 312, 1783, 16, 2159, 13], "avg_logprob": -0.17926136742938648, "compression_ratio": 1.525, "no_speech_prob": 0.0, "words": [{"start": 765.43, "end": 765.75, "word": " And", "probability": 0.458251953125}, {"start": 765.75, "end": 765.89, "word": " we", "probability": 0.86083984375}, {"start": 765.89, "end": 766.07, "word": " said", "probability": 0.7822265625}, {"start": 766.07, "end": 766.39, "word": " that", "probability": 0.87744140625}, {"start": 766.39, "end": 766.99, "word": " X", "probability": 0.63671875}, {"start": 766.99, "end": 767.35, "word": " bar", "probability": 0.798828125}, {"start": 767.35, "end": 769.39, "word": " is", "probability": 0.845703125}, {"start": 769.39, "end": 769.55, "word": " a", "probability": 0.638671875}, {"start": 769.55, "end": 769.91, "word": " point", "probability": 0.966796875}, {"start": 769.91, "end": 771.91, "word": " estimate", "probability": 0.9189453125}, {"start": 771.91, "end": 774.05, "word": " for", "probability": 0.9384765625}, {"start": 774.05, "end": 774.31, "word": " Mu.", "probability": 0.60400390625}, {"start": 775.73, "end": 776.39, "word": " Now", "probability": 0.94921875}, {"start": 776.39, "end": 776.51, "word": " in", "probability": 0.6865234375}, {"start": 776.51, "end": 776.67, "word": " this", "probability": 0.9443359375}, {"start": 776.67, "end": 776.95, "word": " chapter", "probability": 0.86376953125}, {"start": 776.95, "end": 777.19, "word": " we", "probability": 0.57666015625}, {"start": 777.19, "end": 777.45, "word": " are", "probability": 0.93994140625}, {"start": 777.45, "end": 778.09, "word": " interested", "probability": 0.91455078125}, {"start": 778.09, "end": 779.95, "word": " for", "probability": 0.759765625}, {"start": 779.95, "end": 780.13, "word": " the", "probability": 0.9228515625}, {"start": 780.13, "end": 780.51, "word": " difference", "probability": 0.85498046875}, {"start": 780.51, "end": 780.87, "word": " between", "probability": 0.8935546875}, {"start": 780.87, "end": 781.09, "word": " two", "probability": 0.89501953125}, {"start": 781.09, "end": 781.53, "word": " population", "probability": 0.87451171875}, {"start": 781.53, "end": 781.97, "word": " means.", "probability": 0.8310546875}, {"start": 782.71, "end": 783.09, "word": " That", "probability": 0.89892578125}, {"start": 783.09, "end": 783.45, "word": " means", "probability": 0.9306640625}, {"start": 783.45, "end": 783.67, "word": " if", "probability": 0.884765625}, {"start": 783.67, "end": 783.85, "word": " we", "probability": 0.95751953125}, {"start": 783.85, "end": 784.21, "word": " have", "probability": 0.94140625}, {"start": 784.21, "end": 787.03, "word": " Mu1", "probability": 0.727294921875}, {"start": 787.03, "end": 787.43, "word": " minus", "probability": 0.9111328125}, {"start": 787.43, "end": 787.91, "word": " Mu2,", "probability": 0.97802734375}, {"start": 788.05, "end": 788.23, "word": " in", "probability": 0.912109375}, {"start": 788.23, "end": 788.41, "word": " this", "probability": 0.94580078125}, {"start": 788.41, "end": 788.59, "word": " case", "probability": 0.92822265625}, {"start": 788.59, "end": 788.77, "word": " the", "probability": 0.7333984375}, {"start": 788.77, "end": 789.05, "word": " point", "probability": 0.9765625}, {"start": 789.05, "end": 789.67, "word": " estimate", "probability": 0.94580078125}, {"start": 789.67, "end": 790.57, "word": " of", "probability": 0.9267578125}, {"start": 790.57, "end": 791.17, "word": " this", "probability": 0.94677734375}, {"start": 791.17, "end": 791.77, "word": " difference", "probability": 0.84130859375}, {"start": 791.77, "end": 792.15, "word": " should", "probability": 0.9599609375}, {"start": 792.15, "end": 792.39, "word": " be", "probability": 0.955078125}, {"start": 792.39, "end": 792.85, "word": " X1", "probability": 0.930908203125}, {"start": 792.85, "end": 793.17, "word": " bar.", "probability": 0.951171875}], "temperature": 1.0}, {"id": 29, "seek": 82375, "start": 794.25, "end": 823.75, "text": " minus x2. So x1 bar minus x2 bar is a point estimate for the difference mu1 minus mu2. So this one is a point estimate for mu1 minus mu2. It's clear that x2 bar minus x1 bar is a point estimate of or for mu2 minus mu1.", "tokens": [3175, 2031, 17, 13, 407, 2031, 16, 2159, 3175, 2031, 17, 2159, 307, 257, 935, 12539, 337, 264, 2649, 2992, 16, 3175, 2992, 17, 13, 407, 341, 472, 307, 257, 935, 12539, 337, 2992, 16, 3175, 2992, 17, 13, 467, 311, 1850, 300, 2031, 17, 2159, 3175, 2031, 16, 2159, 307, 257, 935, 12539, 295, 420, 337, 2992, 17, 3175, 2992, 16, 13], "avg_logprob": -0.1244506782386452, "compression_ratio": 1.9210526315789473, "no_speech_prob": 0.0, "words": [{"start": 794.25, "end": 794.71, "word": " minus", "probability": 0.2398681640625}, {"start": 794.71, "end": 795.17, "word": " x2.", "probability": 0.5428466796875}, {"start": 795.37, "end": 795.87, "word": " So", "probability": 0.89599609375}, {"start": 795.87, "end": 796.31, "word": " x1", "probability": 0.876708984375}, {"start": 796.31, "end": 796.49, "word": " bar", "probability": 0.88525390625}, {"start": 796.49, "end": 796.81, "word": " minus", "probability": 0.978515625}, {"start": 796.81, "end": 797.29, "word": " x2", "probability": 0.990234375}, {"start": 797.29, "end": 797.47, "word": " bar", "probability": 0.943359375}, {"start": 797.47, "end": 797.63, "word": " is", "probability": 0.94580078125}, {"start": 797.63, "end": 797.75, "word": " a", "probability": 0.9736328125}, {"start": 797.75, "end": 797.97, "word": " point", "probability": 0.978515625}, {"start": 797.97, "end": 798.45, "word": " estimate", "probability": 0.93603515625}, {"start": 798.45, "end": 799.81, "word": " for", "probability": 0.90673828125}, {"start": 799.81, "end": 800.01, "word": " the", "probability": 0.923828125}, {"start": 800.01, "end": 800.59, "word": " difference", "probability": 0.8369140625}, {"start": 800.59, "end": 800.99, "word": " mu1", "probability": 0.841796875}, {"start": 800.99, "end": 801.27, "word": " minus", "probability": 0.98681640625}, {"start": 801.27, "end": 801.67, "word": " mu2.", "probability": 0.967529296875}, {"start": 802.11, "end": 802.35, "word": " So", "probability": 0.9052734375}, {"start": 802.35, "end": 802.57, "word": " this", "probability": 0.88623046875}, {"start": 802.57, "end": 802.77, "word": " one", "probability": 0.86962890625}, {"start": 802.77, "end": 803.01, "word": " is", "probability": 0.947265625}, {"start": 803.01, "end": 803.19, "word": " a", "probability": 0.94677734375}, {"start": 803.19, "end": 803.47, "word": " point", "probability": 0.97998046875}, {"start": 803.47, "end": 804.01, "word": " estimate", "probability": 0.94091796875}, {"start": 804.01, "end": 808.15, "word": " for", "probability": 0.89306640625}, {"start": 808.15, "end": 809.99, "word": " mu1", "probability": 0.966064453125}, {"start": 809.99, "end": 810.31, "word": " minus", "probability": 0.98681640625}, {"start": 810.31, "end": 810.95, "word": " mu2.", "probability": 0.975341796875}, {"start": 812.39, "end": 813.05, "word": " It's", "probability": 0.879638671875}, {"start": 813.05, "end": 813.43, "word": " clear", "probability": 0.9091796875}, {"start": 813.43, "end": 813.81, "word": " that", "probability": 0.9375}, {"start": 813.81, "end": 815.01, "word": " x2", "probability": 0.989501953125}, {"start": 815.01, "end": 815.31, "word": " bar", "probability": 0.94580078125}, {"start": 815.31, "end": 816.05, "word": " minus", "probability": 0.986328125}, {"start": 816.05, "end": 816.61, "word": " x1", "probability": 0.997314453125}, {"start": 816.61, "end": 816.91, "word": " bar", "probability": 0.9375}, {"start": 816.91, "end": 819.77, "word": " is", "probability": 0.92919921875}, {"start": 819.77, "end": 819.89, "word": " a", "probability": 0.9921875}, {"start": 819.89, "end": 820.11, "word": " point", "probability": 0.9794921875}, {"start": 820.11, "end": 820.67, "word": " estimate", "probability": 0.939453125}, {"start": 820.67, "end": 821.19, "word": " of", "probability": 0.6884765625}, {"start": 821.19, "end": 821.69, "word": " or", "probability": 0.63134765625}, {"start": 821.69, "end": 822.11, "word": " for", "probability": 0.95654296875}, {"start": 822.11, "end": 823.05, "word": " mu2", "probability": 0.946044921875}, {"start": 823.05, "end": 823.31, "word": " minus", "probability": 0.98779296875}, {"start": 823.31, "end": 823.75, "word": " mu1.", "probability": 0.96044921875}], "temperature": 1.0}, {"id": 30, "seek": 84925, "start": 825.07, "end": 849.25, "text": " So that's the point estimate of mu1 minus mu2 or mu2 minus mu1 just x1 minus x2 bar is the point estimate for mu1 minus mu2. So again, our goal is to test hypotheses of form or construct confidence interval for the difference between two populations means mu1 minus mu2.", "tokens": [407, 300, 311, 264, 935, 12539, 295, 2992, 16, 3175, 2992, 17, 420, 2992, 17, 3175, 2992, 16, 445, 2031, 16, 3175, 2031, 17, 2159, 307, 264, 935, 12539, 337, 2992, 16, 3175, 2992, 17, 13, 407, 797, 11, 527, 3387, 307, 281, 1500, 49969, 295, 1254, 420, 7690, 6687, 15035, 337, 264, 2649, 1296, 732, 12822, 1355, 2992, 16, 3175, 2992, 17, 13], "avg_logprob": -0.15120192307692307, "compression_ratio": 1.6728395061728396, "no_speech_prob": 0.0, "words": [{"start": 825.07, "end": 825.31, "word": " So", "probability": 0.91845703125}, {"start": 825.31, "end": 825.57, "word": " that's", "probability": 0.90087890625}, {"start": 825.57, "end": 825.69, "word": " the", "probability": 0.88427734375}, {"start": 825.69, "end": 825.91, "word": " point", "probability": 0.97900390625}, {"start": 825.91, "end": 826.53, "word": " estimate", "probability": 0.93408203125}, {"start": 826.53, "end": 828.01, "word": " of", "probability": 0.93115234375}, {"start": 828.01, "end": 829.99, "word": " mu1", "probability": 0.62939453125}, {"start": 829.99, "end": 830.23, "word": " minus", "probability": 0.92333984375}, {"start": 830.23, "end": 830.63, "word": " mu2", "probability": 0.970458984375}, {"start": 830.63, "end": 830.85, "word": " or", "probability": 0.65966796875}, {"start": 830.85, "end": 831.17, "word": " mu2", "probability": 0.9765625}, {"start": 831.17, "end": 831.49, "word": " minus", "probability": 0.98876953125}, {"start": 831.49, "end": 831.89, "word": " mu1", "probability": 0.9599609375}, {"start": 831.89, "end": 832.23, "word": " just", "probability": 0.5078125}, {"start": 832.23, "end": 833.61, "word": " x1", "probability": 0.744140625}, {"start": 833.61, "end": 833.93, "word": " minus", "probability": 0.98681640625}, {"start": 833.93, "end": 834.37, "word": " x2", "probability": 0.99658203125}, {"start": 834.37, "end": 834.67, "word": " bar", "probability": 0.93701171875}, {"start": 834.67, "end": 835.13, "word": " is", "probability": 0.8349609375}, {"start": 835.13, "end": 835.27, "word": " the", "probability": 0.9169921875}, {"start": 835.27, "end": 835.51, "word": " point", "probability": 0.974609375}, {"start": 835.51, "end": 835.97, "word": " estimate", "probability": 0.9287109375}, {"start": 835.97, "end": 836.27, "word": " for", "probability": 0.9482421875}, {"start": 836.27, "end": 836.65, "word": " mu1", "probability": 0.9697265625}, {"start": 836.65, "end": 836.91, "word": " minus", "probability": 0.9892578125}, {"start": 836.91, "end": 837.27, "word": " mu2.", "probability": 0.979248046875}, {"start": 838.51, "end": 839.03, "word": " So", "probability": 0.9462890625}, {"start": 839.03, "end": 839.37, "word": " again,", "probability": 0.87939453125}, {"start": 839.65, "end": 839.91, "word": " our", "probability": 0.89453125}, {"start": 839.91, "end": 840.27, "word": " goal", "probability": 0.96630859375}, {"start": 840.27, "end": 840.69, "word": " is", "probability": 0.94482421875}, {"start": 840.69, "end": 840.87, "word": " to", "probability": 0.6298828125}, {"start": 840.87, "end": 841.13, "word": " test", "probability": 0.87890625}, {"start": 841.13, "end": 841.71, "word": " hypotheses", "probability": 0.599609375}, {"start": 841.71, "end": 842.17, "word": " of", "probability": 0.5947265625}, {"start": 842.17, "end": 842.63, "word": " form", "probability": 0.86083984375}, {"start": 842.63, "end": 843.43, "word": " or", "probability": 0.64208984375}, {"start": 843.43, "end": 843.99, "word": " construct", "probability": 0.95751953125}, {"start": 843.99, "end": 845.09, "word": " confidence", "probability": 0.9619140625}, {"start": 845.09, "end": 845.57, "word": " interval", "probability": 0.91064453125}, {"start": 845.57, "end": 846.47, "word": " for", "probability": 0.93310546875}, {"start": 846.47, "end": 846.65, "word": " the", "probability": 0.92236328125}, {"start": 846.65, "end": 847.05, "word": " difference", "probability": 0.8564453125}, {"start": 847.05, "end": 847.41, "word": " between", "probability": 0.8779296875}, {"start": 847.41, "end": 847.65, "word": " two", "probability": 0.9091796875}, {"start": 847.65, "end": 848.13, "word": " populations", "probability": 0.8671875}, {"start": 848.13, "end": 848.41, "word": " means", "probability": 0.20849609375}, {"start": 848.41, "end": 848.69, "word": " mu1", "probability": 0.9609375}, {"start": 848.69, "end": 848.91, "word": " minus", "probability": 0.98583984375}, {"start": 848.91, "end": 849.25, "word": " mu2.", "probability": 0.978271484375}], "temperature": 1.0}, {"id": 31, "seek": 87519, "start": 850.09, "end": 875.19, "text": " And the point estimate for this difference is X1 bar minus X2 bar. In this case, we have two cases. One is called independent or unrelated, the same meaning. Unrelated means independent populations. In this case, as we mentioned, samples selected from one population.", "tokens": [400, 264, 935, 12539, 337, 341, 2649, 307, 1783, 16, 2159, 3175, 1783, 17, 2159, 13, 682, 341, 1389, 11, 321, 362, 732, 3331, 13, 1485, 307, 1219, 6695, 420, 38967, 11, 264, 912, 3620, 13, 1156, 12004, 1355, 6695, 12822, 13, 682, 341, 1389, 11, 382, 321, 2835, 11, 10938, 8209, 490, 472, 4415, 13], "avg_logprob": -0.14254385755773177, "compression_ratio": 1.6144578313253013, "no_speech_prob": 0.0, "words": [{"start": 850.09, "end": 850.37, "word": " And", "probability": 0.8974609375}, {"start": 850.37, "end": 850.49, "word": " the", "probability": 0.90185546875}, {"start": 850.49, "end": 850.71, "word": " point", "probability": 0.9580078125}, {"start": 850.71, "end": 851.13, "word": " estimate", "probability": 0.93017578125}, {"start": 851.13, "end": 851.37, "word": " for", "probability": 0.935546875}, {"start": 851.37, "end": 851.79, "word": " this", "probability": 0.90673828125}, {"start": 851.79, "end": 852.31, "word": " difference", "probability": 0.861328125}, {"start": 852.31, "end": 852.77, "word": " is", "probability": 0.94287109375}, {"start": 852.77, "end": 853.45, "word": " X1", "probability": 0.6826171875}, {"start": 853.45, "end": 853.71, "word": " bar", "probability": 0.86181640625}, {"start": 853.71, "end": 854.21, "word": " minus", "probability": 0.9677734375}, {"start": 854.21, "end": 856.07, "word": " X2", "probability": 0.990966796875}, {"start": 856.07, "end": 856.33, "word": " bar.", "probability": 0.95068359375}, {"start": 860.01, "end": 860.73, "word": " In", "probability": 0.744140625}, {"start": 860.73, "end": 861.05, "word": " this", "probability": 0.947265625}, {"start": 861.05, "end": 861.45, "word": " case,", "probability": 0.9111328125}, {"start": 861.59, "end": 861.85, "word": " we", "probability": 0.9619140625}, {"start": 861.85, "end": 862.21, "word": " have", "probability": 0.94140625}, {"start": 862.21, "end": 863.47, "word": " two", "probability": 0.91943359375}, {"start": 863.47, "end": 863.97, "word": " cases.", "probability": 0.9267578125}, {"start": 864.33, "end": 864.53, "word": " One", "probability": 0.91552734375}, {"start": 864.53, "end": 864.69, "word": " is", "probability": 0.82958984375}, {"start": 864.69, "end": 864.93, "word": " called", "probability": 0.875}, {"start": 864.93, "end": 865.35, "word": " independent", "probability": 0.89111328125}, {"start": 865.35, "end": 865.85, "word": " or", "probability": 0.9052734375}, {"start": 865.85, "end": 866.39, "word": " unrelated,", "probability": 0.96875}, {"start": 867.01, "end": 867.43, "word": " the", "probability": 0.88037109375}, {"start": 867.43, "end": 867.61, "word": " same", "probability": 0.9013671875}, {"start": 867.61, "end": 867.91, "word": " meaning.", "probability": 0.869140625}, {"start": 868.09, "end": 868.63, "word": " Unrelated", "probability": 0.908203125}, {"start": 868.63, "end": 869.15, "word": " means", "probability": 0.935546875}, {"start": 869.15, "end": 869.73, "word": " independent", "probability": 0.8916015625}, {"start": 869.73, "end": 871.09, "word": " populations.", "probability": 0.82421875}, {"start": 871.79, "end": 871.95, "word": " In", "probability": 0.93896484375}, {"start": 871.95, "end": 872.13, "word": " this", "probability": 0.9423828125}, {"start": 872.13, "end": 872.37, "word": " case,", "probability": 0.912109375}, {"start": 872.45, "end": 872.53, "word": " as", "probability": 0.9462890625}, {"start": 872.53, "end": 872.65, "word": " we", "probability": 0.9326171875}, {"start": 872.65, "end": 872.91, "word": " mentioned,", "probability": 0.822265625}, {"start": 873.11, "end": 873.43, "word": " samples", "probability": 0.477294921875}, {"start": 873.43, "end": 873.99, "word": " selected", "probability": 0.869140625}, {"start": 873.99, "end": 874.37, "word": " from", "probability": 0.888671875}, {"start": 874.37, "end": 874.67, "word": " one", "probability": 0.9140625}, {"start": 874.67, "end": 875.19, "word": " population.", "probability": 0.92919921875}], "temperature": 1.0}, {"id": 32, "seek": 90423, "start": 876.47, "end": 904.23, "text": " has no effect actually on the sample selected from the other population. As we mentioned, there are two groups, males and females. So group one does not affect group two. I mean males population does not affect the population of the female. So in this case, we have unrelated or independent populations. In this case, there are two scenarios.", "tokens": [575, 572, 1802, 767, 322, 264, 6889, 8209, 490, 264, 661, 4415, 13, 1018, 321, 2835, 11, 456, 366, 732, 3935, 11, 20776, 293, 21529, 13, 407, 1594, 472, 775, 406, 3345, 1594, 732, 13, 286, 914, 20776, 4415, 775, 406, 3345, 264, 4415, 295, 264, 6556, 13, 407, 294, 341, 1389, 11, 321, 362, 38967, 420, 6695, 12822, 13, 682, 341, 1389, 11, 456, 366, 732, 15077, 13], "avg_logprob": -0.13973214626312255, "compression_ratio": 1.854054054054054, "no_speech_prob": 0.0, "words": [{"start": 876.47, "end": 876.83, "word": " has", "probability": 0.68115234375}, {"start": 876.83, "end": 877.09, "word": " no", "probability": 0.94970703125}, {"start": 877.09, "end": 877.57, "word": " effect", "probability": 0.8779296875}, {"start": 877.57, "end": 878.13, "word": " actually", "probability": 0.7568359375}, {"start": 878.13, "end": 878.33, "word": " on", "probability": 0.9091796875}, {"start": 878.33, "end": 878.53, "word": " the", "probability": 0.85888671875}, {"start": 878.53, "end": 878.87, "word": " sample", "probability": 0.857421875}, {"start": 878.87, "end": 879.85, "word": " selected", "probability": 0.7880859375}, {"start": 879.85, "end": 880.11, "word": " from", "probability": 0.8857421875}, {"start": 880.11, "end": 880.31, "word": " the", "probability": 0.91552734375}, {"start": 880.31, "end": 880.57, "word": " other", "probability": 0.8896484375}, {"start": 880.57, "end": 881.05, "word": " population.", "probability": 0.9287109375}, {"start": 881.47, "end": 881.71, "word": " As", "probability": 0.96728515625}, {"start": 881.71, "end": 881.85, "word": " we", "probability": 0.95263671875}, {"start": 881.85, "end": 882.11, "word": " mentioned,", "probability": 0.8251953125}, {"start": 882.31, "end": 882.33, "word": " there", "probability": 0.79443359375}, {"start": 882.33, "end": 883.09, "word": " are", "probability": 0.943359375}, {"start": 883.09, "end": 883.27, "word": " two", "probability": 0.9228515625}, {"start": 883.27, "end": 883.69, "word": " groups,", "probability": 0.94921875}, {"start": 884.35, "end": 884.63, "word": " males", "probability": 0.93603515625}, {"start": 884.63, "end": 885.05, "word": " and", "probability": 0.939453125}, {"start": 885.05, "end": 885.37, "word": " females.", "probability": 0.94091796875}, {"start": 886.47, "end": 886.69, "word": " So", "probability": 0.91064453125}, {"start": 886.69, "end": 886.97, "word": " group", "probability": 0.76318359375}, {"start": 886.97, "end": 887.25, "word": " one", "probability": 0.8349609375}, {"start": 887.25, "end": 887.75, "word": " does", "probability": 0.970703125}, {"start": 887.75, "end": 887.95, "word": " not", "probability": 0.94921875}, {"start": 887.95, "end": 888.43, "word": " affect", "probability": 0.865234375}, {"start": 888.43, "end": 889.99, "word": " group", "probability": 0.75048828125}, {"start": 889.99, "end": 890.23, "word": " two.", "probability": 0.9423828125}, {"start": 890.39, "end": 890.39, "word": " I", "probability": 0.9814453125}, {"start": 890.39, "end": 890.53, "word": " mean", "probability": 0.97119140625}, {"start": 890.53, "end": 891.61, "word": " males", "probability": 0.397705078125}, {"start": 891.61, "end": 892.17, "word": " population", "probability": 0.85107421875}, {"start": 892.17, "end": 892.69, "word": " does", "probability": 0.9404296875}, {"start": 892.69, "end": 892.89, "word": " not", "probability": 0.94580078125}, {"start": 892.89, "end": 893.35, "word": " affect", "probability": 0.912109375}, {"start": 893.35, "end": 894.37, "word": " the", "probability": 0.8779296875}, {"start": 894.37, "end": 894.77, "word": " population", "probability": 0.95458984375}, {"start": 894.77, "end": 895.05, "word": " of", "probability": 0.9638671875}, {"start": 895.05, "end": 895.15, "word": " the", "probability": 0.39208984375}, {"start": 895.15, "end": 895.41, "word": " female.", "probability": 0.62451171875}, {"start": 895.75, "end": 895.89, "word": " So", "probability": 0.9345703125}, {"start": 895.89, "end": 896.03, "word": " in", "probability": 0.86669921875}, {"start": 896.03, "end": 896.19, "word": " this", "probability": 0.943359375}, {"start": 896.19, "end": 896.41, "word": " case,", "probability": 0.90576171875}, {"start": 896.47, "end": 896.61, "word": " we", "probability": 0.9619140625}, {"start": 896.61, "end": 896.91, "word": " have", "probability": 0.94140625}, {"start": 896.91, "end": 897.53, "word": " unrelated", "probability": 0.9677734375}, {"start": 897.53, "end": 897.85, "word": " or", "probability": 0.9453125}, {"start": 897.85, "end": 898.27, "word": " independent", "probability": 0.8984375}, {"start": 898.27, "end": 900.25, "word": " populations.", "probability": 0.9560546875}, {"start": 901.61, "end": 902.01, "word": " In", "probability": 0.95458984375}, {"start": 902.01, "end": 902.27, "word": " this", "probability": 0.9443359375}, {"start": 902.27, "end": 902.67, "word": " case,", "probability": 0.90576171875}, {"start": 902.85, "end": 903.21, "word": " there", "probability": 0.90673828125}, {"start": 903.21, "end": 903.47, "word": " are", "probability": 0.93701171875}, {"start": 903.47, "end": 903.71, "word": " two", "probability": 0.92822265625}, {"start": 903.71, "end": 904.23, "word": " scenarios.", "probability": 0.919921875}], "temperature": 1.0}, {"id": 33, "seek": 92827, "start": 905.73, "end": 928.27, "text": " Sigma is unknown, but we assume they are equal. So sigma 1 and sigma 2 are unknown, but we assume they are equal. In this case, we are going to use something called pooled variance test. The other scenario, if the two sigma is unknown, but", "tokens": [36595, 307, 9841, 11, 457, 321, 6552, 436, 366, 2681, 13, 407, 12771, 502, 293, 12771, 568, 366, 9841, 11, 457, 321, 6552, 436, 366, 2681, 13, 682, 341, 1389, 11, 321, 366, 516, 281, 764, 746, 1219, 7005, 292, 21977, 1500, 13, 440, 661, 9005, 11, 498, 264, 732, 12771, 307, 9841, 11, 457], "avg_logprob": -0.24804688724023954, "compression_ratio": 1.6901408450704225, "no_speech_prob": 0.0, "words": [{"start": 905.73, "end": 906.27, "word": " Sigma", "probability": 0.1204833984375}, {"start": 906.27, "end": 906.57, "word": " is", "probability": 0.38671875}, {"start": 906.57, "end": 907.07, "word": " unknown,", "probability": 0.89013671875}, {"start": 907.99, "end": 909.41, "word": " but", "probability": 0.91015625}, {"start": 909.41, "end": 909.83, "word": " we", "probability": 0.92626953125}, {"start": 909.83, "end": 910.67, "word": " assume", "probability": 0.8955078125}, {"start": 910.67, "end": 910.91, "word": " they", "probability": 0.84765625}, {"start": 910.91, "end": 911.07, "word": " are", "probability": 0.9248046875}, {"start": 911.07, "end": 911.43, "word": " equal.", "probability": 0.89697265625}, {"start": 911.85, "end": 911.99, "word": " So", "probability": 0.62060546875}, {"start": 911.99, "end": 912.21, "word": " sigma", "probability": 0.310546875}, {"start": 912.21, "end": 912.39, "word": " 1", "probability": 0.67919921875}, {"start": 912.39, "end": 912.55, "word": " and", "probability": 0.94775390625}, {"start": 912.55, "end": 912.77, "word": " sigma", "probability": 0.91552734375}, {"start": 912.77, "end": 912.95, "word": " 2", "probability": 0.982421875}, {"start": 912.95, "end": 913.15, "word": " are", "probability": 0.93603515625}, {"start": 913.15, "end": 913.43, "word": " unknown,", "probability": 0.8896484375}, {"start": 913.65, "end": 913.81, "word": " but", "probability": 0.92041015625}, {"start": 913.81, "end": 913.97, "word": " we", "probability": 0.95703125}, {"start": 913.97, "end": 914.41, "word": " assume", "probability": 0.91064453125}, {"start": 914.41, "end": 914.61, "word": " they", "probability": 0.873046875}, {"start": 914.61, "end": 914.75, "word": " are", "probability": 0.93994140625}, {"start": 914.75, "end": 915.05, "word": " equal.", "probability": 0.8955078125}, {"start": 915.97, "end": 916.49, "word": " In", "probability": 0.9423828125}, {"start": 916.49, "end": 916.75, "word": " this", "probability": 0.9501953125}, {"start": 916.75, "end": 917.21, "word": " case,", "probability": 0.90771484375}, {"start": 917.93, "end": 918.57, "word": " we", "probability": 0.955078125}, {"start": 918.57, "end": 918.69, "word": " are", "probability": 0.92333984375}, {"start": 918.69, "end": 918.95, "word": " going", "probability": 0.943359375}, {"start": 918.95, "end": 919.13, "word": " to", "probability": 0.97021484375}, {"start": 919.13, "end": 919.45, "word": " use", "probability": 0.880859375}, {"start": 919.45, "end": 919.83, "word": " something", "probability": 0.82958984375}, {"start": 919.83, "end": 920.33, "word": " called", "probability": 0.89208984375}, {"start": 920.33, "end": 920.77, "word": " pooled", "probability": 0.655029296875}, {"start": 920.77, "end": 921.19, "word": " variance", "probability": 0.89453125}, {"start": 921.19, "end": 921.67, "word": " test.", "probability": 0.87451171875}, {"start": 923.27, "end": 923.51, "word": " The", "probability": 0.865234375}, {"start": 923.51, "end": 923.85, "word": " other", "probability": 0.87451171875}, {"start": 923.85, "end": 925.01, "word": " scenario,", "probability": 0.84375}, {"start": 925.41, "end": 925.69, "word": " if", "probability": 0.9462890625}, {"start": 925.69, "end": 925.89, "word": " the", "probability": 0.919921875}, {"start": 925.89, "end": 926.09, "word": " two", "probability": 0.8671875}, {"start": 926.09, "end": 926.39, "word": " sigma", "probability": 0.6904296875}, {"start": 926.39, "end": 926.61, "word": " is", "probability": 0.31005859375}, {"start": 926.61, "end": 926.95, "word": " unknown,", "probability": 0.90380859375}, {"start": 927.81, "end": 928.27, "word": " but", "probability": 0.92529296875}], "temperature": 1.0}, {"id": 34, "seek": 95675, "start": 928.83, "end": 956.75, "text": " They are not equal. In this case, we are going to something called separate variance thickness. So in this chapter, we focus just on unknown sigmas. Because in real life, population variances are unknown. So we have to focus on this case. I mean, sigmas are unknown, but maybe we assume they are equal, or we assume they are not equal.", "tokens": [814, 366, 406, 2681, 13, 682, 341, 1389, 11, 321, 366, 516, 281, 746, 1219, 4994, 21977, 14855, 13, 407, 294, 341, 7187, 11, 321, 1879, 445, 322, 9841, 4556, 3799, 13, 1436, 294, 957, 993, 11, 4415, 1374, 21518, 366, 9841, 13, 407, 321, 362, 281, 1879, 322, 341, 1389, 13, 286, 914, 11, 4556, 3799, 366, 9841, 11, 457, 1310, 321, 6552, 436, 366, 2681, 11, 420, 321, 6552, 436, 366, 406, 2681, 13], "avg_logprob": -0.23579545222319565, "compression_ratio": 1.7409326424870466, "no_speech_prob": 0.0, "words": [{"start": 928.83, "end": 929.13, "word": " They", "probability": 0.351318359375}, {"start": 929.13, "end": 929.65, "word": " are", "probability": 0.9375}, {"start": 929.65, "end": 929.99, "word": " not", "probability": 0.9482421875}, {"start": 929.99, "end": 930.37, "word": " equal.", "probability": 0.90185546875}, {"start": 930.49, "end": 930.55, "word": " In", "probability": 0.92138671875}, {"start": 930.55, "end": 930.71, "word": " this", "probability": 0.94384765625}, {"start": 930.71, "end": 930.93, "word": " case,", "probability": 0.9140625}, {"start": 931.01, "end": 931.07, "word": " we", "probability": 0.93896484375}, {"start": 931.07, "end": 931.19, "word": " are", "probability": 0.88330078125}, {"start": 931.19, "end": 931.41, "word": " going", "probability": 0.94384765625}, {"start": 931.41, "end": 931.57, "word": " to", "probability": 0.54638671875}, {"start": 931.57, "end": 931.89, "word": " something", "probability": 0.76611328125}, {"start": 931.89, "end": 932.27, "word": " called", "probability": 0.8603515625}, {"start": 932.27, "end": 933.01, "word": " separate", "probability": 0.76904296875}, {"start": 933.01, "end": 934.33, "word": " variance", "probability": 0.8349609375}, {"start": 934.33, "end": 934.87, "word": " thickness.", "probability": 0.1419677734375}, {"start": 936.01, "end": 936.65, "word": " So", "probability": 0.89306640625}, {"start": 936.65, "end": 937.61, "word": " in", "probability": 0.38623046875}, {"start": 937.61, "end": 937.87, "word": " this", "probability": 0.94189453125}, {"start": 937.87, "end": 938.55, "word": " chapter,", "probability": 0.8212890625}, {"start": 938.69, "end": 938.75, "word": " we", "probability": 0.70361328125}, {"start": 938.75, "end": 938.99, "word": " focus", "probability": 0.63916015625}, {"start": 938.99, "end": 939.37, "word": " just", "probability": 0.896484375}, {"start": 939.37, "end": 939.73, "word": " on", "probability": 0.94970703125}, {"start": 939.73, "end": 940.15, "word": " unknown", "probability": 0.88623046875}, {"start": 940.15, "end": 940.63, "word": " sigmas.", "probability": 0.716796875}, {"start": 941.21, "end": 941.71, "word": " Because", "probability": 0.9091796875}, {"start": 941.71, "end": 941.97, "word": " in", "probability": 0.90478515625}, {"start": 941.97, "end": 942.27, "word": " real", "probability": 0.95751953125}, {"start": 942.27, "end": 942.67, "word": " life,", "probability": 0.9189453125}, {"start": 943.73, "end": 944.47, "word": " population", "probability": 0.90576171875}, {"start": 944.47, "end": 944.99, "word": " variances", "probability": 0.908203125}, {"start": 944.99, "end": 945.27, "word": " are", "probability": 0.94140625}, {"start": 945.27, "end": 945.63, "word": " unknown.", "probability": 0.8857421875}, {"start": 945.95, "end": 946.15, "word": " So", "probability": 0.9580078125}, {"start": 946.15, "end": 946.29, "word": " we", "probability": 0.919921875}, {"start": 946.29, "end": 946.43, "word": " have", "probability": 0.9453125}, {"start": 946.43, "end": 946.59, "word": " to", "probability": 0.966796875}, {"start": 946.59, "end": 946.93, "word": " focus", "probability": 0.94091796875}, {"start": 946.93, "end": 947.19, "word": " on", "probability": 0.9501953125}, {"start": 947.19, "end": 947.53, "word": " this", "probability": 0.646484375}, {"start": 947.53, "end": 949.15, "word": " case.", "probability": 0.87548828125}, {"start": 949.37, "end": 949.47, "word": " I", "probability": 0.94677734375}, {"start": 949.47, "end": 949.71, "word": " mean,", "probability": 0.96484375}, {"start": 949.83, "end": 950.37, "word": " sigmas", "probability": 0.7021484375}, {"start": 950.37, "end": 950.61, "word": " are", "probability": 0.51171875}, {"start": 950.61, "end": 950.97, "word": " unknown,", "probability": 0.55078125}, {"start": 951.45, "end": 951.61, "word": " but", "probability": 0.91943359375}, {"start": 951.61, "end": 951.97, "word": " maybe", "probability": 0.95166015625}, {"start": 951.97, "end": 952.25, "word": " we", "probability": 0.9287109375}, {"start": 952.25, "end": 952.73, "word": " assume", "probability": 0.908203125}, {"start": 952.73, "end": 952.99, "word": " they", "probability": 0.830078125}, {"start": 952.99, "end": 953.17, "word": " are", "probability": 0.93798828125}, {"start": 953.17, "end": 953.55, "word": " equal,", "probability": 0.892578125}, {"start": 953.95, "end": 954.29, "word": " or", "probability": 0.96142578125}, {"start": 954.29, "end": 954.53, "word": " we", "probability": 0.96484375}, {"start": 954.53, "end": 954.87, "word": " assume", "probability": 0.92431640625}, {"start": 954.87, "end": 955.09, "word": " they", "probability": 0.88720703125}, {"start": 955.09, "end": 955.45, "word": " are", "probability": 0.9404296875}, {"start": 955.45, "end": 956.43, "word": " not", "probability": 0.94970703125}, {"start": 956.43, "end": 956.75, "word": " equal.", "probability": 0.88671875}], "temperature": 1.0}, {"id": 35, "seek": 98392, "start": 960.02, "end": 983.92, "text": " Now, the hypothesis test in this case, if we are talking about two population means, are, again, two-tailed test, the same as we discussed before, or one-tailed test, and one tail has two cases, lower tail and upper tail. For example, if we are interested in two-tailed test, we have to state", "tokens": [823, 11, 264, 17291, 1500, 294, 341, 1389, 11, 498, 321, 366, 1417, 466, 732, 4415, 1355, 11, 366, 11, 797, 11, 732, 12, 14430, 292, 1500, 11, 264, 912, 382, 321, 7152, 949, 11, 420, 472, 12, 14430, 292, 1500, 11, 293, 472, 6838, 575, 732, 3331, 11, 3126, 6838, 293, 6597, 6838, 13, 1171, 1365, 11, 498, 321, 366, 3102, 294, 732, 12, 14430, 292, 1500, 11, 321, 362, 281, 1785], "avg_logprob": -0.1983741483575589, "compression_ratio": 1.69364161849711, "no_speech_prob": 0.0, "words": [{"start": 960.02, "end": 960.58, "word": " Now,", "probability": 0.73388671875}, {"start": 961.0, "end": 961.24, "word": " the", "probability": 0.7783203125}, {"start": 961.24, "end": 961.86, "word": " hypothesis", "probability": 0.783203125}, {"start": 961.86, "end": 962.22, "word": " test", "probability": 0.72119140625}, {"start": 962.22, "end": 962.46, "word": " in", "probability": 0.796875}, {"start": 962.46, "end": 962.7, "word": " this", "probability": 0.947265625}, {"start": 962.7, "end": 963.1, "word": " case,", "probability": 0.91650390625}, {"start": 963.52, "end": 963.74, "word": " if", "probability": 0.95068359375}, {"start": 963.74, "end": 963.88, "word": " we", "probability": 0.9521484375}, {"start": 963.88, "end": 964.0, "word": " are", "probability": 0.9169921875}, {"start": 964.0, "end": 964.44, "word": " talking", "probability": 0.84814453125}, {"start": 964.44, "end": 964.88, "word": " about", "probability": 0.89111328125}, {"start": 964.88, "end": 966.32, "word": " two", "probability": 0.8583984375}, {"start": 966.32, "end": 966.72, "word": " population", "probability": 0.80615234375}, {"start": 966.72, "end": 967.14, "word": " means,", "probability": 0.78076171875}, {"start": 967.36, "end": 967.7, "word": " are,", "probability": 0.875}, {"start": 968.26, "end": 968.28, "word": " again,", "probability": 0.96142578125}, {"start": 969.12, "end": 969.3, "word": " two", "probability": 0.92578125}, {"start": 969.3, "end": 969.64, "word": "-tailed", "probability": 0.6827799479166666}, {"start": 969.64, "end": 969.86, "word": " test,", "probability": 0.40234375}, {"start": 969.96, "end": 970.1, "word": " the", "probability": 0.8935546875}, {"start": 970.1, "end": 970.28, "word": " same", "probability": 0.90673828125}, {"start": 970.28, "end": 970.48, "word": " as", "probability": 0.9599609375}, {"start": 970.48, "end": 970.64, "word": " we", "probability": 0.953125}, {"start": 970.64, "end": 971.12, "word": " discussed", "probability": 0.85400390625}, {"start": 971.12, "end": 971.62, "word": " before,", "probability": 0.87255859375}, {"start": 972.08, "end": 972.38, "word": " or", "probability": 0.9599609375}, {"start": 972.38, "end": 972.68, "word": " one", "probability": 0.92578125}, {"start": 972.68, "end": 973.04, "word": "-tailed", "probability": 0.8824869791666666}, {"start": 973.04, "end": 973.24, "word": " test,", "probability": 0.8837890625}, {"start": 973.3, "end": 973.42, "word": " and", "probability": 0.9296875}, {"start": 973.42, "end": 973.62, "word": " one", "probability": 0.9326171875}, {"start": 973.62, "end": 973.8, "word": " tail", "probability": 0.537109375}, {"start": 973.8, "end": 974.08, "word": " has", "probability": 0.90576171875}, {"start": 974.08, "end": 974.54, "word": " two", "probability": 0.92919921875}, {"start": 974.54, "end": 975.58, "word": " cases,", "probability": 0.92578125}, {"start": 975.78, "end": 976.02, "word": " lower", "probability": 0.8759765625}, {"start": 976.02, "end": 976.36, "word": " tail", "probability": 0.82275390625}, {"start": 976.36, "end": 976.58, "word": " and", "probability": 0.92724609375}, {"start": 976.58, "end": 976.86, "word": " upper", "probability": 0.78515625}, {"start": 976.86, "end": 977.14, "word": " tail.", "probability": 0.87060546875}, {"start": 977.76, "end": 978.16, "word": " For", "probability": 0.9599609375}, {"start": 978.16, "end": 978.5, "word": " example,", "probability": 0.97314453125}, {"start": 979.26, "end": 979.54, "word": " if", "probability": 0.953125}, {"start": 979.54, "end": 979.68, "word": " we", "probability": 0.9033203125}, {"start": 979.68, "end": 979.8, "word": " are", "probability": 0.939453125}, {"start": 979.8, "end": 980.22, "word": " interested", "probability": 0.8603515625}, {"start": 980.22, "end": 980.44, "word": " in", "probability": 0.94189453125}, {"start": 980.44, "end": 980.58, "word": " two", "probability": 0.91845703125}, {"start": 980.58, "end": 980.88, "word": "-tailed", "probability": 0.8896484375}, {"start": 980.88, "end": 981.16, "word": " test,", "probability": 0.86865234375}, {"start": 982.1, "end": 983.08, "word": " we", "probability": 0.955078125}, {"start": 983.08, "end": 983.32, "word": " have", "probability": 0.94580078125}, {"start": 983.32, "end": 983.5, "word": " to", "probability": 0.962890625}, {"start": 983.5, "end": 983.92, "word": " state", "probability": 0.861328125}], "temperature": 1.0}, {"id": 36, "seek": 100466, "start": 987.24, "end": 1004.66, "text": " So the null hypothesis should be mu1 equals mu2, which means there is no difference between the two variations. In this case, it means that the difference equals zero. So either you can state your null hypothesis by using this way.", "tokens": [407, 264, 18184, 17291, 820, 312, 2992, 16, 6915, 2992, 17, 11, 597, 1355, 456, 307, 572, 2649, 1296, 264, 732, 17840, 13, 682, 341, 1389, 11, 309, 1355, 300, 264, 2649, 6915, 4018, 13, 407, 2139, 291, 393, 1785, 428, 18184, 17291, 538, 1228, 341, 636, 13], "avg_logprob": -0.30564413265306123, "compression_ratio": 1.5364238410596027, "no_speech_prob": 0.0, "words": [{"start": 987.2400000000001, "end": 987.9200000000001, "word": " So", "probability": 0.1510009765625}, {"start": 987.9200000000001, "end": 988.6, "word": " the", "probability": 0.35498046875}, {"start": 988.6, "end": 988.76, "word": " null", "probability": 0.91845703125}, {"start": 988.76, "end": 989.24, "word": " hypothesis", "probability": 0.72314453125}, {"start": 989.24, "end": 989.7, "word": " should", "probability": 0.91455078125}, {"start": 989.7, "end": 990.04, "word": " be", "probability": 0.92626953125}, {"start": 990.04, "end": 990.58, "word": " mu1", "probability": 0.542236328125}, {"start": 990.58, "end": 991.22, "word": " equals", "probability": 0.428466796875}, {"start": 991.22, "end": 991.62, "word": " mu2,", "probability": 0.8818359375}, {"start": 991.74, "end": 991.78, "word": " which", "probability": 0.73681640625}, {"start": 991.78, "end": 991.98, "word": " means", "probability": 0.9296875}, {"start": 991.98, "end": 992.22, "word": " there", "probability": 0.7744140625}, {"start": 992.22, "end": 992.48, "word": " is", "probability": 0.890625}, {"start": 992.48, "end": 992.96, "word": " no", "probability": 0.94287109375}, {"start": 992.96, "end": 993.44, "word": " difference", "probability": 0.857421875}, {"start": 993.44, "end": 993.88, "word": " between", "probability": 0.8515625}, {"start": 993.88, "end": 994.72, "word": " the", "probability": 0.8720703125}, {"start": 994.72, "end": 994.9, "word": " two", "probability": 0.88037109375}, {"start": 994.9, "end": 995.32, "word": " variations.", "probability": 0.1343994140625}, {"start": 995.68, "end": 995.8, "word": " In", "probability": 0.68115234375}, {"start": 995.8, "end": 996.02, "word": " this", "probability": 0.94482421875}, {"start": 996.02, "end": 996.28, "word": " case,", "probability": 0.91650390625}, {"start": 996.4, "end": 996.44, "word": " it", "probability": 0.91015625}, {"start": 996.44, "end": 996.88, "word": " means", "probability": 0.9384765625}, {"start": 996.88, "end": 997.78, "word": " that", "probability": 0.896484375}, {"start": 997.78, "end": 998.68, "word": " the", "probability": 0.9140625}, {"start": 998.68, "end": 999.1, "word": " difference", "probability": 0.87109375}, {"start": 999.1, "end": 999.5, "word": " equals", "probability": 0.8505859375}, {"start": 999.5, "end": 999.82, "word": " zero.", "probability": 0.70947265625}, {"start": 1000.28, "end": 1000.54, "word": " So", "probability": 0.880859375}, {"start": 1000.54, "end": 1000.84, "word": " either", "probability": 0.84521484375}, {"start": 1000.84, "end": 1001.14, "word": " you", "probability": 0.9609375}, {"start": 1001.14, "end": 1001.48, "word": " can", "probability": 0.9423828125}, {"start": 1001.48, "end": 1002.44, "word": " state", "probability": 0.9521484375}, {"start": 1002.44, "end": 1002.7, "word": " your", "probability": 0.87109375}, {"start": 1002.7, "end": 1002.88, "word": " null", "probability": 0.9375}, {"start": 1002.88, "end": 1003.4, "word": " hypothesis", "probability": 0.826171875}, {"start": 1003.4, "end": 1003.76, "word": " by", "probability": 0.95068359375}, {"start": 1003.76, "end": 1004.08, "word": " using", "probability": 0.9326171875}, {"start": 1004.08, "end": 1004.38, "word": " this", "probability": 0.927734375}, {"start": 1004.38, "end": 1004.66, "word": " way.", "probability": 0.912109375}], "temperature": 1.0}, {"id": 37, "seek": 103453, "start": 1005.77, "end": 1034.53, "text": " Mi1 equals Mi2, or the other way, the difference is zero. They are equivalent. Because if the means are equal, it means the difference is nothing, is zero. Against the alternative hypothesis, Mi1 does not equal Mi2, or the difference is not zero. So you may use the other format, maybe it's better, Mi1 minus Mi2 equals zero, against the difference is not zero, or the other one, either one is equal.", "tokens": [10204, 16, 6915, 10204, 17, 11, 420, 264, 661, 636, 11, 264, 2649, 307, 4018, 13, 814, 366, 10344, 13, 1436, 498, 264, 1355, 366, 2681, 11, 309, 1355, 264, 2649, 307, 1825, 11, 307, 4018, 13, 29995, 264, 8535, 17291, 11, 10204, 16, 775, 406, 2681, 10204, 17, 11, 420, 264, 2649, 307, 406, 4018, 13, 407, 291, 815, 764, 264, 661, 7877, 11, 1310, 309, 311, 1101, 11, 10204, 16, 3175, 10204, 17, 6915, 4018, 11, 1970, 264, 2649, 307, 406, 4018, 11, 420, 264, 661, 472, 11, 2139, 472, 307, 2681, 13], "avg_logprob": -0.20817056919137636, "compression_ratio": 1.9560975609756097, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1005.77, "end": 1006.21, "word": " Mi1", "probability": 0.36962890625}, {"start": 1006.21, "end": 1006.59, "word": " equals", "probability": 0.4697265625}, {"start": 1006.59, "end": 1007.03, "word": " Mi2,", "probability": 0.8212890625}, {"start": 1007.19, "end": 1007.33, "word": " or", "probability": 0.91650390625}, {"start": 1007.33, "end": 1007.47, "word": " the", "probability": 0.83740234375}, {"start": 1007.47, "end": 1007.73, "word": " other", "probability": 0.87109375}, {"start": 1007.73, "end": 1008.01, "word": " way,", "probability": 0.9482421875}, {"start": 1008.59, "end": 1008.71, "word": " the", "probability": 0.88037109375}, {"start": 1008.71, "end": 1009.09, "word": " difference", "probability": 0.8603515625}, {"start": 1009.09, "end": 1009.35, "word": " is", "probability": 0.9423828125}, {"start": 1009.35, "end": 1009.61, "word": " zero.", "probability": 0.69970703125}, {"start": 1009.95, "end": 1010.09, "word": " They", "probability": 0.775390625}, {"start": 1010.09, "end": 1010.25, "word": " are", "probability": 0.92822265625}, {"start": 1010.25, "end": 1010.65, "word": " equivalent.", "probability": 0.89111328125}, {"start": 1011.09, "end": 1011.37, "word": " Because", "probability": 0.89306640625}, {"start": 1011.37, "end": 1011.55, "word": " if", "probability": 0.92529296875}, {"start": 1011.55, "end": 1011.73, "word": " the", "probability": 0.90576171875}, {"start": 1011.73, "end": 1011.93, "word": " means", "probability": 0.88916015625}, {"start": 1011.93, "end": 1012.11, "word": " are", "probability": 0.9404296875}, {"start": 1012.11, "end": 1012.45, "word": " equal,", "probability": 0.88134765625}, {"start": 1012.99, "end": 1013.33, "word": " it", "probability": 0.90234375}, {"start": 1013.33, "end": 1013.71, "word": " means", "probability": 0.9306640625}, {"start": 1013.71, "end": 1014.31, "word": " the", "probability": 0.83642578125}, {"start": 1014.31, "end": 1014.79, "word": " difference", "probability": 0.87841796875}, {"start": 1014.79, "end": 1015.07, "word": " is", "probability": 0.9345703125}, {"start": 1015.07, "end": 1015.33, "word": " nothing,", "probability": 0.88427734375}, {"start": 1015.45, "end": 1015.59, "word": " is", "probability": 0.51123046875}, {"start": 1015.59, "end": 1015.85, "word": " zero.", "probability": 0.896484375}, {"start": 1016.61, "end": 1017.05, "word": " Against", "probability": 0.693359375}, {"start": 1017.05, "end": 1017.27, "word": " the", "probability": 0.703125}, {"start": 1017.27, "end": 1017.67, "word": " alternative", "probability": 0.8515625}, {"start": 1017.67, "end": 1018.17, "word": " hypothesis,", "probability": 0.91552734375}, {"start": 1018.91, "end": 1019.33, "word": " Mi1", "probability": 0.940185546875}, {"start": 1019.33, "end": 1019.53, "word": " does", "probability": 0.97314453125}, {"start": 1019.53, "end": 1019.79, "word": " not", "probability": 0.95263671875}, {"start": 1019.79, "end": 1020.21, "word": " equal", "probability": 0.92724609375}, {"start": 1020.21, "end": 1020.67, "word": " Mi2,", "probability": 0.973876953125}, {"start": 1021.19, "end": 1021.61, "word": " or", "probability": 0.95751953125}, {"start": 1021.61, "end": 1022.55, "word": " the", "probability": 0.8330078125}, {"start": 1022.55, "end": 1022.97, "word": " difference", "probability": 0.884765625}, {"start": 1022.97, "end": 1023.19, "word": " is", "probability": 0.9482421875}, {"start": 1023.19, "end": 1023.35, "word": " not", "probability": 0.947265625}, {"start": 1023.35, "end": 1023.67, "word": " zero.", "probability": 0.90185546875}, {"start": 1024.09, "end": 1024.49, "word": " So", "probability": 0.94677734375}, {"start": 1024.49, "end": 1024.83, "word": " you", "probability": 0.76318359375}, {"start": 1024.83, "end": 1025.15, "word": " may", "probability": 0.9189453125}, {"start": 1025.15, "end": 1025.49, "word": " use", "probability": 0.87890625}, {"start": 1025.49, "end": 1025.81, "word": " the", "probability": 0.9140625}, {"start": 1025.81, "end": 1026.13, "word": " other", "probability": 0.8896484375}, {"start": 1026.13, "end": 1027.67, "word": " format,", "probability": 0.86279296875}, {"start": 1027.81, "end": 1027.97, "word": " maybe", "probability": 0.92333984375}, {"start": 1027.97, "end": 1028.19, "word": " it's", "probability": 0.816162109375}, {"start": 1028.19, "end": 1028.51, "word": " better,", "probability": 0.912109375}, {"start": 1028.73, "end": 1029.01, "word": " Mi1", "probability": 0.962158203125}, {"start": 1029.01, "end": 1029.25, "word": " minus", "probability": 0.951171875}, {"start": 1029.25, "end": 1029.47, "word": " Mi2", "probability": 0.975830078125}, {"start": 1029.47, "end": 1029.73, "word": " equals", "probability": 0.89892578125}, {"start": 1029.73, "end": 1030.01, "word": " zero,", "probability": 0.87744140625}, {"start": 1030.41, "end": 1030.91, "word": " against", "probability": 0.93017578125}, {"start": 1030.91, "end": 1031.21, "word": " the", "probability": 0.8681640625}, {"start": 1031.21, "end": 1031.53, "word": " difference", "probability": 0.87646484375}, {"start": 1031.53, "end": 1031.73, "word": " is", "probability": 0.88916015625}, {"start": 1031.73, "end": 1031.85, "word": " not", "probability": 0.94091796875}, {"start": 1031.85, "end": 1032.11, "word": " zero,", "probability": 0.91552734375}, {"start": 1032.19, "end": 1032.27, "word": " or", "probability": 0.94775390625}, {"start": 1032.27, "end": 1032.37, "word": " the", "probability": 0.755859375}, {"start": 1032.37, "end": 1032.57, "word": " other", "probability": 0.89208984375}, {"start": 1032.57, "end": 1032.83, "word": " one,", "probability": 0.908203125}, {"start": 1033.47, "end": 1033.73, "word": " either", "probability": 0.927734375}, {"start": 1033.73, "end": 1033.95, "word": " one", "probability": 0.90380859375}, {"start": 1033.95, "end": 1034.17, "word": " is", "probability": 0.83740234375}, {"start": 1034.17, "end": 1034.53, "word": " equal.", "probability": 0.447509765625}], "temperature": 1.0}, {"id": 38, "seek": 106433, "start": 1035.45, "end": 1064.33, "text": " Now, for the one-tailed test, there are two cases, lower tail or upper tail. Lower tail means under the alternative hypothesis, each one, mu1 is smaller than mu2. It's called lower test. So if mu1 is smaller than mu2, that means the difference between the two populations is negative. Because here mu1 is smaller than mu2, it means the difference between these two is negative.", "tokens": [823, 11, 337, 264, 472, 12, 14430, 292, 1500, 11, 456, 366, 732, 3331, 11, 3126, 6838, 420, 6597, 6838, 13, 25523, 6838, 1355, 833, 264, 8535, 17291, 11, 1184, 472, 11, 2992, 16, 307, 4356, 813, 2992, 17, 13, 467, 311, 1219, 3126, 1500, 13, 407, 498, 2992, 16, 307, 4356, 813, 2992, 17, 11, 300, 1355, 264, 2649, 1296, 264, 732, 12822, 307, 3671, 13, 1436, 510, 2992, 16, 307, 4356, 813, 2992, 17, 11, 309, 1355, 264, 2649, 1296, 613, 732, 307, 3671, 13], "avg_logprob": -0.17755682258443398, "compression_ratio": 1.89, "no_speech_prob": 0.0, "words": [{"start": 1035.45, "end": 1035.79, "word": " Now,", "probability": 0.88916015625}, {"start": 1036.01, "end": 1036.35, "word": " for", "probability": 0.9482421875}, {"start": 1036.35, "end": 1036.63, "word": " the", "probability": 0.9169921875}, {"start": 1036.63, "end": 1037.07, "word": " one", "probability": 0.861328125}, {"start": 1037.07, "end": 1037.37, "word": "-tailed", "probability": 0.8024088541666666}, {"start": 1037.37, "end": 1037.61, "word": " test,", "probability": 0.76123046875}, {"start": 1037.69, "end": 1037.83, "word": " there", "probability": 0.90869140625}, {"start": 1037.83, "end": 1037.99, "word": " are", "probability": 0.93896484375}, {"start": 1037.99, "end": 1038.19, "word": " two", "probability": 0.9296875}, {"start": 1038.19, "end": 1038.71, "word": " cases,", "probability": 0.92529296875}, {"start": 1039.43, "end": 1039.67, "word": " lower", "probability": 0.8466796875}, {"start": 1039.67, "end": 1040.07, "word": " tail", "probability": 0.66845703125}, {"start": 1040.07, "end": 1040.73, "word": " or", "probability": 0.8779296875}, {"start": 1040.73, "end": 1041.01, "word": " upper", "probability": 0.82763671875}, {"start": 1041.01, "end": 1041.29, "word": " tail.", "probability": 0.85546875}, {"start": 1041.65, "end": 1041.99, "word": " Lower", "probability": 0.85986328125}, {"start": 1041.99, "end": 1042.25, "word": " tail", "probability": 0.84814453125}, {"start": 1042.25, "end": 1042.71, "word": " means", "probability": 0.60205078125}, {"start": 1042.71, "end": 1043.15, "word": " under", "probability": 0.7490234375}, {"start": 1043.15, "end": 1043.55, "word": " the", "probability": 0.90380859375}, {"start": 1043.55, "end": 1044.19, "word": " alternative", "probability": 0.87939453125}, {"start": 1044.19, "end": 1044.81, "word": " hypothesis,", "probability": 0.828125}, {"start": 1046.49, "end": 1046.67, "word": " each", "probability": 0.73046875}, {"start": 1046.67, "end": 1046.97, "word": " one,", "probability": 0.88134765625}, {"start": 1047.67, "end": 1048.03, "word": " mu1", "probability": 0.50439453125}, {"start": 1048.03, "end": 1048.27, "word": " is", "probability": 0.65673828125}, {"start": 1048.27, "end": 1048.63, "word": " smaller", "probability": 0.873046875}, {"start": 1048.63, "end": 1048.87, "word": " than", "probability": 0.92333984375}, {"start": 1048.87, "end": 1049.17, "word": " mu2.", "probability": 0.95068359375}, {"start": 1049.29, "end": 1049.45, "word": " It's", "probability": 0.93701171875}, {"start": 1049.45, "end": 1049.71, "word": " called", "probability": 0.8818359375}, {"start": 1049.71, "end": 1050.23, "word": " lower", "probability": 0.744140625}, {"start": 1050.23, "end": 1051.03, "word": " test.", "probability": 0.7109375}, {"start": 1051.47, "end": 1051.65, "word": " So", "probability": 0.9560546875}, {"start": 1051.65, "end": 1051.83, "word": " if", "probability": 0.88134765625}, {"start": 1051.83, "end": 1052.51, "word": " mu1", "probability": 0.80029296875}, {"start": 1052.51, "end": 1052.69, "word": " is", "probability": 0.9423828125}, {"start": 1052.69, "end": 1053.01, "word": " smaller", "probability": 0.87451171875}, {"start": 1053.01, "end": 1053.27, "word": " than", "probability": 0.92431640625}, {"start": 1053.27, "end": 1053.67, "word": " mu2,", "probability": 0.9677734375}, {"start": 1053.79, "end": 1054.03, "word": " that", "probability": 0.92333984375}, {"start": 1054.03, "end": 1054.47, "word": " means", "probability": 0.931640625}, {"start": 1054.47, "end": 1055.61, "word": " the", "probability": 0.8857421875}, {"start": 1055.61, "end": 1056.07, "word": " difference", "probability": 0.88671875}, {"start": 1056.07, "end": 1056.53, "word": " between", "probability": 0.86376953125}, {"start": 1056.53, "end": 1056.91, "word": " the", "probability": 0.91845703125}, {"start": 1056.91, "end": 1057.09, "word": " two", "probability": 0.93603515625}, {"start": 1057.09, "end": 1057.63, "word": " populations", "probability": 0.556640625}, {"start": 1057.63, "end": 1058.19, "word": " is", "probability": 0.673828125}, {"start": 1058.19, "end": 1059.01, "word": " negative.", "probability": 0.94287109375}, {"start": 1059.53, "end": 1060.03, "word": " Because", "probability": 0.89501953125}, {"start": 1060.03, "end": 1060.61, "word": " here", "probability": 0.75146484375}, {"start": 1060.61, "end": 1060.95, "word": " mu1", "probability": 0.7298583984375}, {"start": 1060.95, "end": 1061.11, "word": " is", "probability": 0.94482421875}, {"start": 1061.11, "end": 1061.43, "word": " smaller", "probability": 0.87109375}, {"start": 1061.43, "end": 1061.67, "word": " than", "probability": 0.92919921875}, {"start": 1061.67, "end": 1062.03, "word": " mu2,", "probability": 0.966064453125}, {"start": 1062.11, "end": 1062.23, "word": " it", "probability": 0.79150390625}, {"start": 1062.23, "end": 1062.51, "word": " means", "probability": 0.92724609375}, {"start": 1062.51, "end": 1062.79, "word": " the", "probability": 0.86376953125}, {"start": 1062.79, "end": 1063.17, "word": " difference", "probability": 0.87548828125}, {"start": 1063.17, "end": 1063.51, "word": " between", "probability": 0.87060546875}, {"start": 1063.51, "end": 1063.73, "word": " these", "probability": 0.87451171875}, {"start": 1063.73, "end": 1063.89, "word": " two", "probability": 0.92919921875}, {"start": 1063.89, "end": 1064.03, "word": " is", "probability": 0.93896484375}, {"start": 1064.03, "end": 1064.33, "word": " negative.", "probability": 0.94140625}], "temperature": 1.0}, {"id": 39, "seek": 107286, "start": 1066.03, "end": 1072.87, "text": " And as we mentioned before, H1 is the opposite of H0. So if H1 Me1 is smaller than Me2,", "tokens": [400, 382, 321, 2835, 949, 11, 389, 16, 307, 264, 6182, 295, 389, 15, 13, 407, 498, 389, 16, 1923, 16, 307, 4356, 813, 1923, 17, 11], "avg_logprob": -0.27399552507059916, "compression_ratio": 1.035294117647059, "no_speech_prob": 0.0, "words": [{"start": 1066.03, "end": 1066.35, "word": " And", "probability": 0.306884765625}, {"start": 1066.35, "end": 1066.49, "word": " as", "probability": 0.8623046875}, {"start": 1066.49, "end": 1066.61, "word": " we", "probability": 0.8916015625}, {"start": 1066.61, "end": 1066.87, "word": " mentioned", "probability": 0.79931640625}, {"start": 1066.87, "end": 1067.27, "word": " before,", "probability": 0.8486328125}, {"start": 1067.39, "end": 1067.69, "word": " H1", "probability": 0.735107421875}, {"start": 1067.69, "end": 1067.83, "word": " is", "probability": 0.919921875}, {"start": 1067.83, "end": 1068.01, "word": " the", "probability": 0.875}, {"start": 1068.01, "end": 1068.41, "word": " opposite", "probability": 0.953125}, {"start": 1068.41, "end": 1069.03, "word": " of", "probability": 0.93798828125}, {"start": 1069.03, "end": 1069.61, "word": " H0.", "probability": 0.977294921875}, {"start": 1069.85, "end": 1069.99, "word": " So", "probability": 0.93359375}, {"start": 1069.99, "end": 1070.29, "word": " if", "probability": 0.85791015625}, {"start": 1070.29, "end": 1070.83, "word": " H1", "probability": 0.952880859375}, {"start": 1070.83, "end": 1071.27, "word": " Me1", "probability": 0.530853271484375}, {"start": 1071.27, "end": 1071.85, "word": " is", "probability": 0.81396484375}, {"start": 1071.85, "end": 1072.19, "word": " smaller", "probability": 0.86474609375}, {"start": 1072.19, "end": 1072.45, "word": " than", "probability": 0.94970703125}, {"start": 1072.45, "end": 1072.87, "word": " Me2,", "probability": 0.9755859375}], "temperature": 1.0}, {"id": 40, "seek": 109876, "start": 1073.54, "end": 1098.76, "text": " it means under the null hypothesis, mu1 is greater than or equal to mu. And as we mentioned before, the equal sign appears only under the null hypothesis. So here for the two-sided test, the equality here appears just on the null hypothesis, as well as for lower and upper tail test. For the upper tail test,", "tokens": [309, 1355, 833, 264, 18184, 17291, 11, 2992, 16, 307, 5044, 813, 420, 2681, 281, 2992, 13, 400, 382, 321, 2835, 949, 11, 264, 2681, 1465, 7038, 787, 833, 264, 18184, 17291, 13, 407, 510, 337, 264, 732, 12, 30941, 1500, 11, 264, 14949, 510, 7038, 445, 322, 264, 18184, 17291, 11, 382, 731, 382, 337, 3126, 293, 6597, 6838, 1500, 13, 1171, 264, 6597, 6838, 1500, 11], "avg_logprob": -0.20980525362318841, "compression_ratio": 1.8176470588235294, "no_speech_prob": 0.0, "words": [{"start": 1073.54, "end": 1073.88, "word": " it", "probability": 0.422607421875}, {"start": 1073.88, "end": 1074.34, "word": " means", "probability": 0.91552734375}, {"start": 1074.34, "end": 1074.98, "word": " under", "probability": 0.79052734375}, {"start": 1074.98, "end": 1075.3, "word": " the", "probability": 0.9091796875}, {"start": 1075.3, "end": 1075.64, "word": " null", "probability": 0.92236328125}, {"start": 1075.64, "end": 1076.12, "word": " hypothesis,", "probability": 0.77783203125}, {"start": 1076.34, "end": 1076.66, "word": " mu1", "probability": 0.5047607421875}, {"start": 1076.66, "end": 1077.0, "word": " is", "probability": 0.8798828125}, {"start": 1077.0, "end": 1078.0, "word": " greater", "probability": 0.7998046875}, {"start": 1078.0, "end": 1078.36, "word": " than", "probability": 0.9384765625}, {"start": 1078.36, "end": 1078.5, "word": " or", "probability": 0.9521484375}, {"start": 1078.5, "end": 1078.82, "word": " equal", "probability": 0.91064453125}, {"start": 1078.82, "end": 1079.2, "word": " to", "probability": 0.947265625}, {"start": 1079.2, "end": 1079.32, "word": " mu.", "probability": 0.8388671875}, {"start": 1079.9, "end": 1080.12, "word": " And", "probability": 0.8046875}, {"start": 1080.12, "end": 1080.44, "word": " as", "probability": 0.822265625}, {"start": 1080.44, "end": 1080.56, "word": " we", "probability": 0.9462890625}, {"start": 1080.56, "end": 1080.8, "word": " mentioned", "probability": 0.81640625}, {"start": 1080.8, "end": 1081.24, "word": " before,", "probability": 0.849609375}, {"start": 1081.42, "end": 1081.54, "word": " the", "probability": 0.91259765625}, {"start": 1081.54, "end": 1081.84, "word": " equal", "probability": 0.765625}, {"start": 1081.84, "end": 1082.34, "word": " sign", "probability": 0.88671875}, {"start": 1082.34, "end": 1083.02, "word": " appears", "probability": 0.87548828125}, {"start": 1083.02, "end": 1083.62, "word": " only", "probability": 0.90771484375}, {"start": 1083.62, "end": 1085.12, "word": " under", "probability": 0.8974609375}, {"start": 1085.12, "end": 1085.54, "word": " the", "probability": 0.90576171875}, {"start": 1085.54, "end": 1085.72, "word": " null", "probability": 0.95263671875}, {"start": 1085.72, "end": 1086.2, "word": " hypothesis.", "probability": 0.78125}, {"start": 1086.9, "end": 1087.08, "word": " So", "probability": 0.93212890625}, {"start": 1087.08, "end": 1087.36, "word": " here", "probability": 0.75830078125}, {"start": 1087.36, "end": 1087.6, "word": " for", "probability": 0.56787109375}, {"start": 1087.6, "end": 1087.78, "word": " the", "probability": 0.912109375}, {"start": 1087.78, "end": 1087.98, "word": " two", "probability": 0.8857421875}, {"start": 1087.98, "end": 1088.2, "word": "-sided", "probability": 0.821533203125}, {"start": 1088.2, "end": 1088.58, "word": " test,", "probability": 0.83203125}, {"start": 1088.96, "end": 1089.16, "word": " the", "probability": 0.9033203125}, {"start": 1089.16, "end": 1090.02, "word": " equality", "probability": 0.63623046875}, {"start": 1090.02, "end": 1090.52, "word": " here", "probability": 0.8310546875}, {"start": 1090.52, "end": 1091.28, "word": " appears", "probability": 0.82373046875}, {"start": 1091.28, "end": 1091.6, "word": " just", "probability": 0.88427734375}, {"start": 1091.6, "end": 1091.9, "word": " on", "probability": 0.919921875}, {"start": 1091.9, "end": 1092.14, "word": " the", "probability": 0.91796875}, {"start": 1092.14, "end": 1092.54, "word": " null", "probability": 0.94921875}, {"start": 1092.54, "end": 1093.0, "word": " hypothesis,", "probability": 0.86767578125}, {"start": 1093.64, "end": 1094.12, "word": " as", "probability": 0.9609375}, {"start": 1094.12, "end": 1094.26, "word": " well", "probability": 0.927734375}, {"start": 1094.26, "end": 1094.5, "word": " as", "probability": 0.95458984375}, {"start": 1094.5, "end": 1094.74, "word": " for", "probability": 0.93798828125}, {"start": 1094.74, "end": 1095.06, "word": " lower", "probability": 0.77001953125}, {"start": 1095.06, "end": 1095.28, "word": " and", "probability": 0.84619140625}, {"start": 1095.28, "end": 1095.52, "word": " upper", "probability": 0.6416015625}, {"start": 1095.52, "end": 1095.72, "word": " tail", "probability": 0.6396484375}, {"start": 1095.72, "end": 1096.02, "word": " test.", "probability": 0.51953125}, {"start": 1096.94, "end": 1097.56, "word": " For", "probability": 0.95458984375}, {"start": 1097.56, "end": 1097.72, "word": " the", "probability": 0.92578125}, {"start": 1097.72, "end": 1098.02, "word": " upper", "probability": 0.830078125}, {"start": 1098.02, "end": 1098.28, "word": " tail", "probability": 0.8466796875}, {"start": 1098.28, "end": 1098.76, "word": " test,", "probability": 0.88818359375}], "temperature": 1.0}, {"id": 41, "seek": 112281, "start": 1100.15, "end": 1122.81, "text": " Again here, we have U1 is greater than U2. It means the difference between these two populations is above zero, greater than zero. So that's the new scheme for formulating or stating null and alternative hypotheses. It's quite similar to the one we had discussed in chapter nine. Any question?", "tokens": [3764, 510, 11, 321, 362, 624, 16, 307, 5044, 813, 624, 17, 13, 467, 1355, 264, 2649, 1296, 613, 732, 12822, 307, 3673, 4018, 11, 5044, 813, 4018, 13, 407, 300, 311, 264, 777, 12232, 337, 1254, 12162, 420, 26688, 18184, 293, 8535, 49969, 13, 467, 311, 1596, 2531, 281, 264, 472, 321, 632, 7152, 294, 7187, 4949, 13, 2639, 1168, 30], "avg_logprob": -0.17286706727648538, "compression_ratio": 1.4923857868020305, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1100.15, "end": 1100.51, "word": " Again", "probability": 0.80322265625}, {"start": 1100.51, "end": 1100.77, "word": " here,", "probability": 0.484375}, {"start": 1100.91, "end": 1101.01, "word": " we", "probability": 0.9501953125}, {"start": 1101.01, "end": 1101.25, "word": " have", "probability": 0.9375}, {"start": 1101.25, "end": 1101.53, "word": " U1", "probability": 0.610107421875}, {"start": 1101.53, "end": 1101.73, "word": " is", "probability": 0.7373046875}, {"start": 1101.73, "end": 1102.09, "word": " greater", "probability": 0.92138671875}, {"start": 1102.09, "end": 1102.31, "word": " than", "probability": 0.94384765625}, {"start": 1102.31, "end": 1102.67, "word": " U2.", "probability": 0.990234375}, {"start": 1102.81, "end": 1102.97, "word": " It", "probability": 0.896484375}, {"start": 1102.97, "end": 1103.31, "word": " means", "probability": 0.9287109375}, {"start": 1103.31, "end": 1104.61, "word": " the", "probability": 0.84033203125}, {"start": 1104.61, "end": 1105.03, "word": " difference", "probability": 0.87109375}, {"start": 1105.03, "end": 1105.39, "word": " between", "probability": 0.87060546875}, {"start": 1105.39, "end": 1105.65, "word": " these", "probability": 0.87060546875}, {"start": 1105.65, "end": 1105.97, "word": " two", "probability": 0.93017578125}, {"start": 1105.97, "end": 1106.89, "word": " populations", "probability": 0.60888671875}, {"start": 1106.89, "end": 1107.29, "word": " is", "probability": 0.92822265625}, {"start": 1107.29, "end": 1108.01, "word": " above", "probability": 0.931640625}, {"start": 1108.01, "end": 1108.33, "word": " zero,", "probability": 0.69580078125}, {"start": 1108.45, "end": 1108.67, "word": " greater", "probability": 0.92431640625}, {"start": 1108.67, "end": 1108.95, "word": " than", "probability": 0.9462890625}, {"start": 1108.95, "end": 1109.21, "word": " zero.", "probability": 0.90625}, {"start": 1109.63, "end": 1109.93, "word": " So", "probability": 0.93310546875}, {"start": 1109.93, "end": 1110.35, "word": " that's", "probability": 0.912841796875}, {"start": 1110.35, "end": 1110.57, "word": " the", "probability": 0.91650390625}, {"start": 1110.57, "end": 1110.95, "word": " new", "probability": 0.91552734375}, {"start": 1110.95, "end": 1111.63, "word": " scheme", "probability": 0.9404296875}, {"start": 1111.63, "end": 1112.13, "word": " for", "probability": 0.9501953125}, {"start": 1112.13, "end": 1113.35, "word": " formulating", "probability": 0.969482421875}, {"start": 1113.35, "end": 1113.77, "word": " or", "probability": 0.884765625}, {"start": 1113.77, "end": 1114.47, "word": " stating", "probability": 0.9443359375}, {"start": 1114.47, "end": 1115.41, "word": " null", "probability": 0.9052734375}, {"start": 1115.41, "end": 1115.61, "word": " and", "probability": 0.67041015625}, {"start": 1115.61, "end": 1116.17, "word": " alternative", "probability": 0.86474609375}, {"start": 1116.17, "end": 1116.89, "word": " hypotheses.", "probability": 0.55712890625}, {"start": 1117.33, "end": 1117.63, "word": " It's", "probability": 0.972412109375}, {"start": 1117.63, "end": 1118.01, "word": " quite", "probability": 0.908203125}, {"start": 1118.01, "end": 1118.71, "word": " similar", "probability": 0.96923828125}, {"start": 1118.71, "end": 1119.39, "word": " to", "probability": 0.96630859375}, {"start": 1119.39, "end": 1119.51, "word": " the", "probability": 0.90234375}, {"start": 1119.51, "end": 1119.65, "word": " one", "probability": 0.9287109375}, {"start": 1119.65, "end": 1119.81, "word": " we", "probability": 0.95751953125}, {"start": 1119.81, "end": 1120.05, "word": " had", "probability": 0.8642578125}, {"start": 1120.05, "end": 1120.61, "word": " discussed", "probability": 0.88330078125}, {"start": 1120.61, "end": 1120.85, "word": " in", "probability": 0.94189453125}, {"start": 1120.85, "end": 1121.05, "word": " chapter", "probability": 0.50634765625}, {"start": 1121.05, "end": 1121.41, "word": " nine.", "probability": 0.6767578125}, {"start": 1122.09, "end": 1122.49, "word": " Any", "probability": 0.927734375}, {"start": 1122.49, "end": 1122.81, "word": " question?", "probability": 0.488525390625}], "temperature": 1.0}, {"id": 42, "seek": 113648, "start": 1124.06, "end": 1136.48, "text": " So this is step number one for doing or performing statistical hypothesis testing. So again, there are two types of tests. One is two-tailed.", "tokens": [407, 341, 307, 1823, 1230, 472, 337, 884, 420, 10205, 22820, 17291, 4997, 13, 407, 797, 11, 456, 366, 732, 3467, 295, 6921, 13, 1485, 307, 732, 12, 14430, 292, 13], "avg_logprob": -0.25097655411809683, "compression_ratio": 1.2678571428571428, "no_speech_prob": 0.0, "words": [{"start": 1124.06, "end": 1124.32, "word": " So", "probability": 0.73095703125}, {"start": 1124.32, "end": 1124.52, "word": " this", "probability": 0.755859375}, {"start": 1124.52, "end": 1124.68, "word": " is", "probability": 0.8984375}, {"start": 1124.68, "end": 1125.08, "word": " step", "probability": 0.61376953125}, {"start": 1125.08, "end": 1125.8, "word": " number", "probability": 0.8056640625}, {"start": 1125.8, "end": 1126.12, "word": " one", "probability": 0.767578125}, {"start": 1126.12, "end": 1126.54, "word": " for", "probability": 0.91552734375}, {"start": 1126.54, "end": 1126.96, "word": " doing", "probability": 0.79443359375}, {"start": 1126.96, "end": 1127.3, "word": " or", "probability": 0.76416015625}, {"start": 1127.3, "end": 1127.9, "word": " performing", "probability": 0.86376953125}, {"start": 1127.9, "end": 1128.78, "word": " statistical", "probability": 0.755859375}, {"start": 1128.78, "end": 1129.32, "word": " hypothesis", "probability": 0.734375}, {"start": 1129.32, "end": 1129.84, "word": " testing.", "probability": 0.85009765625}, {"start": 1132.26, "end": 1132.9, "word": " So", "probability": 0.880859375}, {"start": 1132.9, "end": 1133.22, "word": " again,", "probability": 0.8466796875}, {"start": 1133.36, "end": 1133.56, "word": " there", "probability": 0.9013671875}, {"start": 1133.56, "end": 1133.7, "word": " are", "probability": 0.943359375}, {"start": 1133.7, "end": 1133.88, "word": " two", "probability": 0.92578125}, {"start": 1133.88, "end": 1134.24, "word": " types", "probability": 0.8388671875}, {"start": 1134.24, "end": 1134.4, "word": " of", "probability": 0.970703125}, {"start": 1134.4, "end": 1134.72, "word": " tests.", "probability": 0.763671875}, {"start": 1135.16, "end": 1135.48, "word": " One", "probability": 0.912109375}, {"start": 1135.48, "end": 1135.78, "word": " is", "probability": 0.9453125}, {"start": 1135.78, "end": 1136.02, "word": " two", "probability": 0.1649169921875}, {"start": 1136.02, "end": 1136.48, "word": "-tailed.", "probability": 0.86083984375}], "temperature": 1.0}, {"id": 43, "seek": 116048, "start": 1137.68, "end": 1160.48, "text": " in this case there is no direction you don't know the exact direction of the two population means you just say there is a difference between the two population means in the other two cases you know the exact direction you may say that population mean for a it's smaller or less than or decrease from", "tokens": [294, 341, 1389, 456, 307, 572, 3513, 291, 500, 380, 458, 264, 1900, 3513, 295, 264, 732, 4415, 1355, 291, 445, 584, 456, 307, 257, 2649, 1296, 264, 732, 4415, 1355, 294, 264, 661, 732, 3331, 291, 458, 264, 1900, 3513, 291, 815, 584, 300, 4415, 914, 337, 257, 309, 311, 4356, 420, 1570, 813, 420, 11514, 490], "avg_logprob": -0.14843750707173753, "compression_ratio": 2.0, "no_speech_prob": 0.0, "words": [{"start": 1137.68, "end": 1137.9, "word": " in", "probability": 0.271484375}, {"start": 1137.9, "end": 1138.1, "word": " this", "probability": 0.94873046875}, {"start": 1138.1, "end": 1138.4, "word": " case", "probability": 0.9208984375}, {"start": 1138.4, "end": 1138.62, "word": " there", "probability": 0.72265625}, {"start": 1138.62, "end": 1138.76, "word": " is", "probability": 0.94775390625}, {"start": 1138.76, "end": 1138.92, "word": " no", "probability": 0.95361328125}, {"start": 1138.92, "end": 1139.4, "word": " direction", "probability": 0.96435546875}, {"start": 1139.4, "end": 1140.5, "word": " you", "probability": 0.58056640625}, {"start": 1140.5, "end": 1140.78, "word": " don't", "probability": 0.97119140625}, {"start": 1140.78, "end": 1141.0, "word": " know", "probability": 0.90185546875}, {"start": 1141.0, "end": 1141.26, "word": " the", "probability": 0.8955078125}, {"start": 1141.26, "end": 1141.68, "word": " exact", "probability": 0.94287109375}, {"start": 1141.68, "end": 1142.18, "word": " direction", "probability": 0.9736328125}, {"start": 1142.18, "end": 1142.4, "word": " of", "probability": 0.9658203125}, {"start": 1142.4, "end": 1142.52, "word": " the", "probability": 0.92236328125}, {"start": 1142.52, "end": 1142.66, "word": " two", "probability": 0.94287109375}, {"start": 1142.66, "end": 1143.1, "word": " population", "probability": 0.90234375}, {"start": 1143.1, "end": 1143.4, "word": " means", "probability": 0.93115234375}, {"start": 1143.4, "end": 1144.0, "word": " you", "probability": 0.84130859375}, {"start": 1144.0, "end": 1144.34, "word": " just", "probability": 0.92138671875}, {"start": 1144.34, "end": 1144.72, "word": " say", "probability": 0.92333984375}, {"start": 1144.72, "end": 1145.12, "word": " there", "probability": 0.8984375}, {"start": 1145.12, "end": 1145.28, "word": " is", "probability": 0.94970703125}, {"start": 1145.28, "end": 1145.48, "word": " a", "probability": 0.9970703125}, {"start": 1145.48, "end": 1145.94, "word": " difference", "probability": 0.86962890625}, {"start": 1145.94, "end": 1146.32, "word": " between", "probability": 0.8779296875}, {"start": 1146.32, "end": 1146.56, "word": " the", "probability": 0.91845703125}, {"start": 1146.56, "end": 1146.7, "word": " two", "probability": 0.94140625}, {"start": 1146.7, "end": 1147.14, "word": " population", "probability": 0.91796875}, {"start": 1147.14, "end": 1147.58, "word": " means", "probability": 0.79443359375}, {"start": 1147.58, "end": 1148.5, "word": " in", "probability": 0.4228515625}, {"start": 1148.5, "end": 1148.62, "word": " the", "probability": 0.90673828125}, {"start": 1148.62, "end": 1148.78, "word": " other", "probability": 0.892578125}, {"start": 1148.78, "end": 1149.0, "word": " two", "probability": 0.9462890625}, {"start": 1149.0, "end": 1149.4, "word": " cases", "probability": 0.921875}, {"start": 1149.4, "end": 1149.68, "word": " you", "probability": 0.91552734375}, {"start": 1149.68, "end": 1149.8, "word": " know", "probability": 0.8994140625}, {"start": 1149.8, "end": 1149.98, "word": " the", "probability": 0.916015625}, {"start": 1149.98, "end": 1150.32, "word": " exact", "probability": 0.93408203125}, {"start": 1150.32, "end": 1150.88, "word": " direction", "probability": 0.9736328125}, {"start": 1150.88, "end": 1151.58, "word": " you", "probability": 0.93603515625}, {"start": 1151.58, "end": 1151.86, "word": " may", "probability": 0.9462890625}, {"start": 1151.86, "end": 1152.2, "word": " say", "probability": 0.95263671875}, {"start": 1152.2, "end": 1152.56, "word": " that", "probability": 0.931640625}, {"start": 1152.56, "end": 1153.32, "word": " population", "probability": 0.9375}, {"start": 1153.32, "end": 1153.78, "word": " mean", "probability": 0.9228515625}, {"start": 1153.78, "end": 1154.84, "word": " for", "probability": 0.9423828125}, {"start": 1154.84, "end": 1156.32, "word": " a", "probability": 0.38134765625}, {"start": 1156.32, "end": 1157.22, "word": " it's", "probability": 0.864013671875}, {"start": 1157.22, "end": 1157.66, "word": " smaller", "probability": 0.87451171875}, {"start": 1157.66, "end": 1158.14, "word": " or", "probability": 0.96484375}, {"start": 1158.14, "end": 1158.44, "word": " less", "probability": 0.80029296875}, {"start": 1158.44, "end": 1158.72, "word": " than", "probability": 0.92431640625}, {"start": 1158.72, "end": 1159.48, "word": " or", "probability": 0.95849609375}, {"start": 1159.48, "end": 1160.02, "word": " decrease", "probability": 0.56787109375}, {"start": 1160.02, "end": 1160.48, "word": " from", "probability": 0.90283203125}], "temperature": 1.0}, {"id": 44, "seek": 118832, "start": 1161.3, "end": 1188.32, "text": " The other one, here population A is larger or increased or whatever it is. So we have null hypothesis, informative hypothesis, maybe two-tailed or one-tailed test. It depends on the nature of the problem itself. Now what's about the rejection regions? Similar as we discussed before, if we are talking about two-tailed test,", "tokens": [440, 661, 472, 11, 510, 4415, 316, 307, 4833, 420, 6505, 420, 2035, 309, 307, 13, 407, 321, 362, 18184, 17291, 11, 27759, 17291, 11, 1310, 732, 12, 14430, 292, 420, 472, 12, 14430, 292, 1500, 13, 467, 5946, 322, 264, 3687, 295, 264, 1154, 2564, 13, 823, 437, 311, 466, 264, 26044, 10682, 30, 10905, 382, 321, 7152, 949, 11, 498, 321, 366, 1417, 466, 732, 12, 14430, 292, 1500, 11], "avg_logprob": -0.1952054843510667, "compression_ratio": 1.5476190476190477, "no_speech_prob": 0.0, "words": [{"start": 1161.3, "end": 1161.52, "word": " The", "probability": 0.2958984375}, {"start": 1161.52, "end": 1161.74, "word": " other", "probability": 0.89208984375}, {"start": 1161.74, "end": 1162.02, "word": " one,", "probability": 0.9169921875}, {"start": 1162.34, "end": 1162.62, "word": " here", "probability": 0.791015625}, {"start": 1162.62, "end": 1163.28, "word": " population", "probability": 0.69140625}, {"start": 1163.28, "end": 1163.6, "word": " A", "probability": 0.85400390625}, {"start": 1163.6, "end": 1163.98, "word": " is", "probability": 0.943359375}, {"start": 1163.98, "end": 1164.76, "word": " larger", "probability": 0.95263671875}, {"start": 1164.76, "end": 1165.78, "word": " or", "probability": 0.77392578125}, {"start": 1165.78, "end": 1166.36, "word": " increased", "probability": 0.9208984375}, {"start": 1166.36, "end": 1166.8, "word": " or", "probability": 0.693359375}, {"start": 1166.8, "end": 1167.04, "word": " whatever", "probability": 0.93896484375}, {"start": 1167.04, "end": 1167.28, "word": " it", "probability": 0.67724609375}, {"start": 1167.28, "end": 1167.4, "word": " is.", "probability": 0.95166015625}, {"start": 1167.82, "end": 1167.9, "word": " So", "probability": 0.9462890625}, {"start": 1167.9, "end": 1168.04, "word": " we", "probability": 0.8427734375}, {"start": 1168.04, "end": 1168.32, "word": " have", "probability": 0.9306640625}, {"start": 1168.32, "end": 1169.58, "word": " null", "probability": 0.90185546875}, {"start": 1169.58, "end": 1170.72, "word": " hypothesis,", "probability": 0.7265625}, {"start": 1170.84, "end": 1171.18, "word": " informative", "probability": 0.5146484375}, {"start": 1171.18, "end": 1171.62, "word": " hypothesis,", "probability": 0.88525390625}, {"start": 1172.26, "end": 1172.44, "word": " maybe", "probability": 0.91845703125}, {"start": 1172.44, "end": 1172.92, "word": " two", "probability": 0.90869140625}, {"start": 1172.92, "end": 1173.3, "word": "-tailed", "probability": 0.8416341145833334}, {"start": 1173.3, "end": 1173.84, "word": " or", "probability": 0.900390625}, {"start": 1173.84, "end": 1174.1, "word": " one", "probability": 0.93701171875}, {"start": 1174.1, "end": 1174.4, "word": "-tailed", "probability": 0.8972981770833334}, {"start": 1174.4, "end": 1174.62, "word": " test.", "probability": 0.79296875}, {"start": 1174.68, "end": 1174.78, "word": " It", "probability": 0.8291015625}, {"start": 1174.78, "end": 1175.12, "word": " depends", "probability": 0.89404296875}, {"start": 1175.12, "end": 1175.66, "word": " on", "probability": 0.9482421875}, {"start": 1175.66, "end": 1175.88, "word": " the", "probability": 0.92236328125}, {"start": 1175.88, "end": 1176.34, "word": " nature", "probability": 0.9130859375}, {"start": 1176.34, "end": 1177.52, "word": " of", "probability": 0.966796875}, {"start": 1177.52, "end": 1177.68, "word": " the", "probability": 0.9189453125}, {"start": 1177.68, "end": 1178.08, "word": " problem", "probability": 0.873046875}, {"start": 1178.08, "end": 1178.76, "word": " itself.", "probability": 0.8232421875}, {"start": 1180.88, "end": 1181.3, "word": " Now", "probability": 0.962890625}, {"start": 1181.3, "end": 1181.48, "word": " what's", "probability": 0.74560546875}, {"start": 1181.48, "end": 1181.7, "word": " about", "probability": 0.9111328125}, {"start": 1181.7, "end": 1181.88, "word": " the", "probability": 0.865234375}, {"start": 1181.88, "end": 1182.14, "word": " rejection", "probability": 0.96240234375}, {"start": 1182.14, "end": 1182.64, "word": " regions?", "probability": 0.93115234375}, {"start": 1183.44, "end": 1184.04, "word": " Similar", "probability": 0.916015625}, {"start": 1184.04, "end": 1184.44, "word": " as", "probability": 0.86669921875}, {"start": 1184.44, "end": 1184.66, "word": " we", "probability": 0.96435546875}, {"start": 1184.66, "end": 1185.18, "word": " discussed", "probability": 0.8720703125}, {"start": 1185.18, "end": 1185.66, "word": " before,", "probability": 0.857421875}, {"start": 1186.34, "end": 1186.62, "word": " if", "probability": 0.94970703125}, {"start": 1186.62, "end": 1186.74, "word": " we", "probability": 0.94189453125}, {"start": 1186.74, "end": 1186.86, "word": " are", "probability": 0.92919921875}, {"start": 1186.86, "end": 1187.2, "word": " talking", "probability": 0.8486328125}, {"start": 1187.2, "end": 1187.54, "word": " about", "probability": 0.90576171875}, {"start": 1187.54, "end": 1187.76, "word": " two", "probability": 0.9296875}, {"start": 1187.76, "end": 1188.02, "word": "-tailed", "probability": 0.9078776041666666}, {"start": 1188.02, "end": 1188.32, "word": " test,", "probability": 0.74462890625}], "temperature": 1.0}, {"id": 45, "seek": 121608, "start": 1189.16, "end": 1216.08, "text": " In this case, there are two rejection regions, one to the right of alpha over 2 and the other to the left of the other side of alpha over 2. But here we have T alpha over 2 and minus T alpha over 2. And again, we are focusing on unknown sigmas. So we have to use T critical values. So we reject the null hypothesis the same as we mentioned before if the test statistic", "tokens": [682, 341, 1389, 11, 456, 366, 732, 26044, 10682, 11, 472, 281, 264, 558, 295, 8961, 670, 568, 293, 264, 661, 281, 264, 1411, 295, 264, 661, 1252, 295, 8961, 670, 568, 13, 583, 510, 321, 362, 314, 8961, 670, 568, 293, 3175, 314, 8961, 670, 568, 13, 400, 797, 11, 321, 366, 8416, 322, 9841, 4556, 3799, 13, 407, 321, 362, 281, 764, 314, 4924, 4190, 13, 407, 321, 8248, 264, 18184, 17291, 264, 912, 382, 321, 2835, 949, 498, 264, 1500, 29588], "avg_logprob": -0.1659007282818065, "compression_ratio": 1.7571428571428571, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1189.16, "end": 1189.4, "word": " In", "probability": 0.75048828125}, {"start": 1189.4, "end": 1189.6, "word": " this", "probability": 0.943359375}, {"start": 1189.6, "end": 1189.78, "word": " case,", "probability": 0.92041015625}, {"start": 1189.86, "end": 1189.96, "word": " there", "probability": 0.90478515625}, {"start": 1189.96, "end": 1190.12, "word": " are", "probability": 0.9375}, {"start": 1190.12, "end": 1190.36, "word": " two", "probability": 0.892578125}, {"start": 1190.36, "end": 1190.76, "word": " rejection", "probability": 0.94189453125}, {"start": 1190.76, "end": 1191.22, "word": " regions,", "probability": 0.96337890625}, {"start": 1191.38, "end": 1191.62, "word": " one", "probability": 0.91943359375}, {"start": 1191.62, "end": 1192.2, "word": " to", "probability": 0.95703125}, {"start": 1192.2, "end": 1192.36, "word": " the", "probability": 0.91748046875}, {"start": 1192.36, "end": 1192.72, "word": " right", "probability": 0.9033203125}, {"start": 1192.72, "end": 1193.74, "word": " of", "probability": 0.9326171875}, {"start": 1193.74, "end": 1194.08, "word": " alpha", "probability": 0.77783203125}, {"start": 1194.08, "end": 1194.34, "word": " over", "probability": 0.88623046875}, {"start": 1194.34, "end": 1194.52, "word": " 2", "probability": 0.45068359375}, {"start": 1194.52, "end": 1194.66, "word": " and", "probability": 0.64453125}, {"start": 1194.66, "end": 1194.82, "word": " the", "probability": 0.80810546875}, {"start": 1194.82, "end": 1195.06, "word": " other", "probability": 0.89111328125}, {"start": 1195.06, "end": 1195.26, "word": " to", "probability": 0.8935546875}, {"start": 1195.26, "end": 1195.42, "word": " the", "probability": 0.91064453125}, {"start": 1195.42, "end": 1195.64, "word": " left", "probability": 0.94775390625}, {"start": 1195.64, "end": 1195.88, "word": " of", "probability": 0.77880859375}, {"start": 1195.88, "end": 1196.14, "word": " the", "probability": 0.5048828125}, {"start": 1196.14, "end": 1196.34, "word": " other", "probability": 0.88134765625}, {"start": 1196.34, "end": 1196.74, "word": " side", "probability": 0.88330078125}, {"start": 1196.74, "end": 1196.94, "word": " of", "probability": 0.9462890625}, {"start": 1196.94, "end": 1197.24, "word": " alpha", "probability": 0.890625}, {"start": 1197.24, "end": 1197.48, "word": " over", "probability": 0.90869140625}, {"start": 1197.48, "end": 1197.74, "word": " 2.", "probability": 0.9677734375}, {"start": 1198.32, "end": 1198.64, "word": " But", "probability": 0.93115234375}, {"start": 1198.64, "end": 1198.8, "word": " here", "probability": 0.83154296875}, {"start": 1198.8, "end": 1198.94, "word": " we", "probability": 0.724609375}, {"start": 1198.94, "end": 1199.24, "word": " have", "probability": 0.9453125}, {"start": 1199.24, "end": 1199.6, "word": " T", "probability": 0.7529296875}, {"start": 1199.6, "end": 1200.16, "word": " alpha", "probability": 0.6044921875}, {"start": 1200.16, "end": 1200.44, "word": " over", "probability": 0.927734375}, {"start": 1200.44, "end": 1200.74, "word": " 2", "probability": 0.97021484375}, {"start": 1200.74, "end": 1201.34, "word": " and", "probability": 0.7099609375}, {"start": 1201.34, "end": 1201.76, "word": " minus", "probability": 0.96533203125}, {"start": 1201.76, "end": 1202.0, "word": " T", "probability": 0.9873046875}, {"start": 1202.0, "end": 1202.18, "word": " alpha", "probability": 0.87890625}, {"start": 1202.18, "end": 1202.44, "word": " over", "probability": 0.9345703125}, {"start": 1202.44, "end": 1202.62, "word": " 2.", "probability": 0.99365234375}, {"start": 1202.84, "end": 1203.04, "word": " And", "probability": 0.94677734375}, {"start": 1203.04, "end": 1203.32, "word": " again,", "probability": 0.90576171875}, {"start": 1203.46, "end": 1203.66, "word": " we", "probability": 0.9599609375}, {"start": 1203.66, "end": 1203.9, "word": " are", "probability": 0.93359375}, {"start": 1203.9, "end": 1204.42, "word": " focusing", "probability": 0.89453125}, {"start": 1204.42, "end": 1204.94, "word": " on", "probability": 0.95068359375}, {"start": 1204.94, "end": 1205.46, "word": " unknown", "probability": 0.88525390625}, {"start": 1205.46, "end": 1205.9, "word": " sigmas.", "probability": 0.71875}, {"start": 1206.28, "end": 1206.64, "word": " So", "probability": 0.958984375}, {"start": 1206.64, "end": 1206.78, "word": " we", "probability": 0.8779296875}, {"start": 1206.78, "end": 1206.96, "word": " have", "probability": 0.94287109375}, {"start": 1206.96, "end": 1207.1, "word": " to", "probability": 0.9443359375}, {"start": 1207.1, "end": 1207.4, "word": " use", "probability": 0.8515625}, {"start": 1207.4, "end": 1207.66, "word": " T", "probability": 0.68505859375}, {"start": 1207.66, "end": 1208.28, "word": " critical", "probability": 0.85400390625}, {"start": 1208.28, "end": 1208.74, "word": " values.", "probability": 0.9599609375}, {"start": 1209.34, "end": 1209.6, "word": " So", "probability": 0.95458984375}, {"start": 1209.6, "end": 1210.1, "word": " we", "probability": 0.89404296875}, {"start": 1210.1, "end": 1210.52, "word": " reject", "probability": 0.91064453125}, {"start": 1210.52, "end": 1210.74, "word": " the", "probability": 0.9140625}, {"start": 1210.74, "end": 1210.86, "word": " null", "probability": 0.96728515625}, {"start": 1210.86, "end": 1211.38, "word": " hypothesis", "probability": 0.7568359375}, {"start": 1211.38, "end": 1211.9, "word": " the", "probability": 0.5849609375}, {"start": 1211.9, "end": 1212.1, "word": " same", "probability": 0.91064453125}, {"start": 1212.1, "end": 1212.3, "word": " as", "probability": 0.95947265625}, {"start": 1212.3, "end": 1212.44, "word": " we", "probability": 0.9619140625}, {"start": 1212.44, "end": 1212.76, "word": " mentioned", "probability": 0.828125}, {"start": 1212.76, "end": 1213.24, "word": " before", "probability": 0.8603515625}, {"start": 1213.24, "end": 1214.44, "word": " if", "probability": 0.478271484375}, {"start": 1214.44, "end": 1214.88, "word": " the", "probability": 0.7373046875}, {"start": 1214.88, "end": 1215.32, "word": " test", "probability": 0.833984375}, {"start": 1215.32, "end": 1216.08, "word": " statistic", "probability": 0.892578125}], "temperature": 1.0}, {"id": 46, "seek": 122594, "start": 1216.7, "end": 1225.94, "text": " falls in the rejection regions. In this case, if this statistic lies in this region or the other one, we have to reject them.", "tokens": [8804, 294, 264, 26044, 10682, 13, 682, 341, 1389, 11, 498, 341, 29588, 9134, 294, 341, 4458, 420, 264, 661, 472, 11, 321, 362, 281, 8248, 552, 13], "avg_logprob": -0.22615840928307895, "compression_ratio": 1.3695652173913044, "no_speech_prob": 0.0, "words": [{"start": 1216.7, "end": 1217.16, "word": " falls", "probability": 0.31982421875}, {"start": 1217.16, "end": 1217.44, "word": " in", "probability": 0.7109375}, {"start": 1217.44, "end": 1217.58, "word": " the", "probability": 0.85498046875}, {"start": 1217.58, "end": 1217.92, "word": " rejection", "probability": 0.92041015625}, {"start": 1217.92, "end": 1218.4, "word": " regions.", "probability": 0.86962890625}, {"start": 1218.56, "end": 1218.62, "word": " In", "probability": 0.8466796875}, {"start": 1218.62, "end": 1218.8, "word": " this", "probability": 0.9462890625}, {"start": 1218.8, "end": 1219.1, "word": " case,", "probability": 0.92431640625}, {"start": 1219.6, "end": 1219.72, "word": " if", "probability": 0.9541015625}, {"start": 1219.72, "end": 1219.92, "word": " this", "probability": 0.78662109375}, {"start": 1219.92, "end": 1220.48, "word": " statistic", "probability": 0.861328125}, {"start": 1220.48, "end": 1221.94, "word": " lies", "probability": 0.9140625}, {"start": 1221.94, "end": 1222.14, "word": " in", "probability": 0.93701171875}, {"start": 1222.14, "end": 1222.38, "word": " this", "probability": 0.9345703125}, {"start": 1222.38, "end": 1222.8, "word": " region", "probability": 0.94189453125}, {"start": 1222.8, "end": 1223.04, "word": " or", "probability": 0.759765625}, {"start": 1223.04, "end": 1223.18, "word": " the", "probability": 0.79052734375}, {"start": 1223.18, "end": 1223.46, "word": " other", "probability": 0.880859375}, {"start": 1223.46, "end": 1223.76, "word": " one,", "probability": 0.9189453125}, {"start": 1224.72, "end": 1225.06, "word": " we", "probability": 0.9423828125}, {"start": 1225.06, "end": 1225.28, "word": " have", "probability": 0.9482421875}, {"start": 1225.28, "end": 1225.42, "word": " to", "probability": 0.97265625}, {"start": 1225.42, "end": 1225.72, "word": " reject", "probability": 0.958984375}, {"start": 1225.72, "end": 1225.94, "word": " them.", "probability": 0.63134765625}], "temperature": 1.0}, {"id": 47, "seek": 125157, "start": 1226.59, "end": 1251.57, "text": " That means if we reject the hypothesis, if T stat is less than negative T alpha over 2, or if T stat is above or greater than T alpha over 2. So the same as we discussed before. That's for two-tailed test. Now for lower-tailed test, in this case, there is only one rejection region to the left side. It's minus T alpha.", "tokens": [663, 1355, 498, 321, 8248, 264, 17291, 11, 498, 314, 2219, 307, 1570, 813, 3671, 314, 8961, 670, 568, 11, 420, 498, 314, 2219, 307, 3673, 420, 5044, 813, 314, 8961, 670, 568, 13, 407, 264, 912, 382, 321, 7152, 949, 13, 663, 311, 337, 732, 12, 14430, 292, 1500, 13, 823, 337, 3126, 12, 14430, 292, 1500, 11, 294, 341, 1389, 11, 456, 307, 787, 472, 26044, 4458, 281, 264, 1411, 1252, 13, 467, 311, 3175, 314, 8961, 13], "avg_logprob": -0.23939042915532618, "compression_ratio": 1.641025641025641, "no_speech_prob": 0.0, "words": [{"start": 1226.59, "end": 1226.91, "word": " That", "probability": 0.689453125}, {"start": 1226.91, "end": 1227.29, "word": " means", "probability": 0.92822265625}, {"start": 1227.29, "end": 1227.83, "word": " if", "probability": 0.77197265625}, {"start": 1227.83, "end": 1228.05, "word": " we", "probability": 0.1572265625}, {"start": 1228.05, "end": 1228.89, "word": " reject", "probability": 0.87744140625}, {"start": 1228.89, "end": 1229.07, "word": " the", "probability": 0.7880859375}, {"start": 1229.07, "end": 1229.55, "word": " hypothesis,", "probability": 0.2222900390625}, {"start": 1230.19, "end": 1230.41, "word": " if", "probability": 0.8984375}, {"start": 1230.41, "end": 1230.61, "word": " T", "probability": 0.55615234375}, {"start": 1230.61, "end": 1230.95, "word": " stat", "probability": 0.341064453125}, {"start": 1230.95, "end": 1231.75, "word": " is", "probability": 0.9052734375}, {"start": 1231.75, "end": 1232.09, "word": " less", "probability": 0.9248046875}, {"start": 1232.09, "end": 1232.33, "word": " than", "probability": 0.9365234375}, {"start": 1232.33, "end": 1232.69, "word": " negative", "probability": 0.80224609375}, {"start": 1232.69, "end": 1232.91, "word": " T", "probability": 0.857421875}, {"start": 1232.91, "end": 1233.05, "word": " alpha", "probability": 0.7197265625}, {"start": 1233.05, "end": 1233.35, "word": " over", "probability": 0.91650390625}, {"start": 1233.35, "end": 1233.63, "word": " 2,", "probability": 0.491455078125}, {"start": 1233.85, "end": 1234.31, "word": " or", "probability": 0.94140625}, {"start": 1234.31, "end": 1234.51, "word": " if", "probability": 0.92138671875}, {"start": 1234.51, "end": 1234.83, "word": " T", "probability": 0.9189453125}, {"start": 1234.83, "end": 1235.25, "word": " stat", "probability": 0.76123046875}, {"start": 1235.25, "end": 1235.81, "word": " is", "probability": 0.6904296875}, {"start": 1235.81, "end": 1236.89, "word": " above", "probability": 0.86279296875}, {"start": 1236.89, "end": 1237.29, "word": " or", "probability": 0.73876953125}, {"start": 1237.29, "end": 1237.61, "word": " greater", "probability": 0.91845703125}, {"start": 1237.61, "end": 1237.89, "word": " than", "probability": 0.94775390625}, {"start": 1237.89, "end": 1238.09, "word": " T", "probability": 0.98291015625}, {"start": 1238.09, "end": 1238.27, "word": " alpha", "probability": 0.8662109375}, {"start": 1238.27, "end": 1238.51, "word": " over", "probability": 0.79150390625}, {"start": 1238.51, "end": 1238.67, "word": " 2.", "probability": 0.9365234375}, {"start": 1238.91, "end": 1239.09, "word": " So", "probability": 0.70068359375}, {"start": 1239.09, "end": 1239.21, "word": " the", "probability": 0.615234375}, {"start": 1239.21, "end": 1239.43, "word": " same", "probability": 0.91357421875}, {"start": 1239.43, "end": 1239.67, "word": " as", "probability": 0.95361328125}, {"start": 1239.67, "end": 1239.89, "word": " we", "probability": 0.95458984375}, {"start": 1239.89, "end": 1241.07, "word": " discussed", "probability": 0.88037109375}, {"start": 1241.07, "end": 1241.43, "word": " before.", "probability": 0.75732421875}, {"start": 1241.53, "end": 1241.73, "word": " That's", "probability": 0.913818359375}, {"start": 1241.73, "end": 1241.93, "word": " for", "probability": 0.7275390625}, {"start": 1241.93, "end": 1242.33, "word": " two", "probability": 0.78515625}, {"start": 1242.33, "end": 1242.63, "word": "-tailed", "probability": 0.7578125}, {"start": 1242.63, "end": 1242.85, "word": " test.", "probability": 0.6552734375}, {"start": 1243.43, "end": 1243.59, "word": " Now", "probability": 0.7919921875}, {"start": 1243.59, "end": 1243.85, "word": " for", "probability": 0.68310546875}, {"start": 1243.85, "end": 1244.35, "word": " lower", "probability": 0.84130859375}, {"start": 1244.35, "end": 1244.75, "word": "-tailed", "probability": 0.8258463541666666}, {"start": 1244.75, "end": 1245.15, "word": " test,", "probability": 0.8857421875}, {"start": 1245.83, "end": 1246.11, "word": " in", "probability": 0.86865234375}, {"start": 1246.11, "end": 1246.27, "word": " this", "probability": 0.9462890625}, {"start": 1246.27, "end": 1246.47, "word": " case,", "probability": 0.91357421875}, {"start": 1246.53, "end": 1246.65, "word": " there", "probability": 0.908203125}, {"start": 1246.65, "end": 1246.79, "word": " is", "probability": 0.88671875}, {"start": 1246.79, "end": 1247.01, "word": " only", "probability": 0.921875}, {"start": 1247.01, "end": 1247.27, "word": " one", "probability": 0.90380859375}, {"start": 1247.27, "end": 1247.65, "word": " rejection", "probability": 0.9755859375}, {"start": 1247.65, "end": 1247.95, "word": " region", "probability": 0.6953125}, {"start": 1247.95, "end": 1248.17, "word": " to", "probability": 0.85205078125}, {"start": 1248.17, "end": 1248.29, "word": " the", "probability": 0.9140625}, {"start": 1248.29, "end": 1248.49, "word": " left", "probability": 0.94970703125}, {"start": 1248.49, "end": 1248.87, "word": " side.", "probability": 0.83056640625}, {"start": 1250.29, "end": 1250.73, "word": " It's", "probability": 0.9736328125}, {"start": 1250.73, "end": 1251.11, "word": " minus", "probability": 0.97216796875}, {"start": 1251.11, "end": 1251.37, "word": " T", "probability": 0.94921875}, {"start": 1251.37, "end": 1251.57, "word": " alpha.", "probability": 0.87255859375}], "temperature": 1.0}, {"id": 48, "seek": 127444, "start": 1252.34, "end": 1274.44, "text": " In this case, we reject the null hypothesis if the value of the statistic or the test statistic is smaller than negative T alpha. So we reject if T stat is smaller than minus T alpha. On the other side, if we are talking about a partial test. So your null hypothesis, I'm sorry, your alternative hypothesis always", "tokens": [682, 341, 1389, 11, 321, 8248, 264, 18184, 17291, 498, 264, 2158, 295, 264, 29588, 420, 264, 1500, 29588, 307, 4356, 813, 3671, 314, 8961, 13, 407, 321, 8248, 498, 314, 2219, 307, 4356, 813, 3175, 314, 8961, 13, 1282, 264, 661, 1252, 11, 498, 321, 366, 1417, 466, 257, 14641, 1500, 13, 407, 428, 18184, 17291, 11, 286, 478, 2597, 11, 428, 8535, 17291, 1009], "avg_logprob": -0.20942164757358495, "compression_ratio": 1.7740112994350283, "no_speech_prob": 0.0, "words": [{"start": 1252.34, "end": 1252.56, "word": " In", "probability": 0.72607421875}, {"start": 1252.56, "end": 1252.78, "word": " this", "probability": 0.947265625}, {"start": 1252.78, "end": 1252.98, "word": " case,", "probability": 0.9150390625}, {"start": 1253.0, "end": 1253.14, "word": " we", "probability": 0.9462890625}, {"start": 1253.14, "end": 1253.52, "word": " reject", "probability": 0.8740234375}, {"start": 1253.52, "end": 1253.7, "word": " the", "probability": 0.82470703125}, {"start": 1253.7, "end": 1253.84, "word": " null", "probability": 0.9296875}, {"start": 1253.84, "end": 1254.26, "word": " hypothesis", "probability": 0.81298828125}, {"start": 1254.26, "end": 1255.06, "word": " if", "probability": 0.82861328125}, {"start": 1255.06, "end": 1255.22, "word": " the", "probability": 0.919921875}, {"start": 1255.22, "end": 1255.5, "word": " value", "probability": 0.97900390625}, {"start": 1255.5, "end": 1255.66, "word": " of", "probability": 0.9638671875}, {"start": 1255.66, "end": 1255.76, "word": " the", "probability": 0.88818359375}, {"start": 1255.76, "end": 1256.26, "word": " statistic", "probability": 0.82861328125}, {"start": 1256.26, "end": 1257.16, "word": " or", "probability": 0.74169921875}, {"start": 1257.16, "end": 1257.34, "word": " the", "probability": 0.8251953125}, {"start": 1257.34, "end": 1257.58, "word": " test", "probability": 0.8642578125}, {"start": 1257.58, "end": 1258.28, "word": " statistic", "probability": 0.86083984375}, {"start": 1258.28, "end": 1259.04, "word": " is", "probability": 0.9462890625}, {"start": 1259.04, "end": 1259.46, "word": " smaller", "probability": 0.87255859375}, {"start": 1259.46, "end": 1259.76, "word": " than", "probability": 0.94482421875}, {"start": 1259.76, "end": 1260.06, "word": " negative", "probability": 0.73193359375}, {"start": 1260.06, "end": 1260.28, "word": " T", "probability": 0.47509765625}, {"start": 1260.28, "end": 1260.52, "word": " alpha.", "probability": 0.68896484375}, {"start": 1260.84, "end": 1261.0, "word": " So", "probability": 0.93359375}, {"start": 1261.0, "end": 1261.14, "word": " we", "probability": 0.57177734375}, {"start": 1261.14, "end": 1261.58, "word": " reject", "probability": 0.91455078125}, {"start": 1261.58, "end": 1262.32, "word": " if", "probability": 0.8916015625}, {"start": 1262.32, "end": 1262.5, "word": " T", "probability": 0.9130859375}, {"start": 1262.5, "end": 1262.8, "word": " stat", "probability": 0.2469482421875}, {"start": 1262.8, "end": 1262.94, "word": " is", "probability": 0.94287109375}, {"start": 1262.94, "end": 1263.2, "word": " smaller", "probability": 0.8798828125}, {"start": 1263.2, "end": 1263.58, "word": " than", "probability": 0.943359375}, {"start": 1263.58, "end": 1264.04, "word": " minus", "probability": 0.97216796875}, {"start": 1264.04, "end": 1264.22, "word": " T", "probability": 0.98095703125}, {"start": 1264.22, "end": 1264.52, "word": " alpha.", "probability": 0.896484375}, {"start": 1265.54, "end": 1265.74, "word": " On", "probability": 0.8974609375}, {"start": 1265.74, "end": 1265.84, "word": " the", "probability": 0.9228515625}, {"start": 1265.84, "end": 1266.0, "word": " other", "probability": 0.89306640625}, {"start": 1266.0, "end": 1266.46, "word": " side,", "probability": 0.87548828125}, {"start": 1266.84, "end": 1267.0, "word": " if", "probability": 0.78369140625}, {"start": 1267.0, "end": 1267.12, "word": " we", "probability": 0.89208984375}, {"start": 1267.12, "end": 1267.22, "word": " are", "probability": 0.94287109375}, {"start": 1267.22, "end": 1267.56, "word": " talking", "probability": 0.84814453125}, {"start": 1267.56, "end": 1267.94, "word": " about", "probability": 0.91015625}, {"start": 1267.94, "end": 1268.12, "word": " a", "probability": 0.6572265625}, {"start": 1268.12, "end": 1268.38, "word": " partial", "probability": 0.51611328125}, {"start": 1268.38, "end": 1269.02, "word": " test.", "probability": 0.91845703125}, {"start": 1270.02, "end": 1270.16, "word": " So", "probability": 0.93701171875}, {"start": 1270.16, "end": 1270.4, "word": " your", "probability": 0.7783203125}, {"start": 1270.4, "end": 1270.7, "word": " null", "probability": 0.564453125}, {"start": 1270.7, "end": 1271.18, "word": " hypothesis,", "probability": 0.80322265625}, {"start": 1271.62, "end": 1271.86, "word": " I'm", "probability": 0.914306640625}, {"start": 1271.86, "end": 1272.06, "word": " sorry,", "probability": 0.8544921875}, {"start": 1272.16, "end": 1272.38, "word": " your", "probability": 0.8515625}, {"start": 1272.38, "end": 1272.88, "word": " alternative", "probability": 0.912109375}, {"start": 1272.88, "end": 1273.4, "word": " hypothesis", "probability": 0.82373046875}, {"start": 1273.4, "end": 1274.44, "word": " always", "probability": 0.70703125}], "temperature": 1.0}, {"id": 49, "seek": 129839, "start": 1275.75, "end": 1298.39, "text": " Look at the alternative hypothesis in order to determine the rejection region. So if it is greater than, it means you have the area to the right. I mean the rejection region should be to the right. If the alternative hypothesis is negative, I mean smaller than zero, it means the rejection region should be to the left side.", "tokens": [2053, 412, 264, 8535, 17291, 294, 1668, 281, 6997, 264, 26044, 4458, 13, 407, 498, 309, 307, 5044, 813, 11, 309, 1355, 291, 362, 264, 1859, 281, 264, 558, 13, 286, 914, 264, 26044, 4458, 820, 312, 281, 264, 558, 13, 759, 264, 8535, 17291, 307, 3671, 11, 286, 914, 4356, 813, 4018, 11, 309, 1355, 264, 26044, 4458, 820, 312, 281, 264, 1411, 1252, 13], "avg_logprob": -0.13910914490472026, "compression_ratio": 2.03125, "no_speech_prob": 0.0, "words": [{"start": 1275.75, "end": 1276.15, "word": " Look", "probability": 0.494384765625}, {"start": 1276.15, "end": 1276.43, "word": " at", "probability": 0.96044921875}, {"start": 1276.43, "end": 1276.61, "word": " the", "probability": 0.67236328125}, {"start": 1276.61, "end": 1277.31, "word": " alternative", "probability": 0.88818359375}, {"start": 1277.31, "end": 1277.95, "word": " hypothesis", "probability": 0.734375}, {"start": 1277.95, "end": 1278.29, "word": " in", "probability": 0.87353515625}, {"start": 1278.29, "end": 1278.51, "word": " order", "probability": 0.92919921875}, {"start": 1278.51, "end": 1278.99, "word": " to", "probability": 0.9677734375}, {"start": 1278.99, "end": 1279.61, "word": " determine", "probability": 0.9248046875}, {"start": 1279.61, "end": 1280.25, "word": " the", "probability": 0.900390625}, {"start": 1280.25, "end": 1280.67, "word": " rejection", "probability": 0.9501953125}, {"start": 1280.67, "end": 1281.09, "word": " region.", "probability": 0.93701171875}, {"start": 1281.89, "end": 1282.19, "word": " So", "probability": 0.81640625}, {"start": 1282.19, "end": 1282.37, "word": " if", "probability": 0.77490234375}, {"start": 1282.37, "end": 1282.51, "word": " it", "probability": 0.91015625}, {"start": 1282.51, "end": 1282.67, "word": " is", "probability": 0.845703125}, {"start": 1282.67, "end": 1283.51, "word": " greater", "probability": 0.87451171875}, {"start": 1283.51, "end": 1283.89, "word": " than,", "probability": 0.927734375}, {"start": 1283.95, "end": 1284.07, "word": " it", "probability": 0.890625}, {"start": 1284.07, "end": 1284.39, "word": " means", "probability": 0.935546875}, {"start": 1284.39, "end": 1284.61, "word": " you", "probability": 0.8828125}, {"start": 1284.61, "end": 1284.81, "word": " have", "probability": 0.94384765625}, {"start": 1284.81, "end": 1285.01, "word": " the", "probability": 0.8642578125}, {"start": 1285.01, "end": 1285.27, "word": " area", "probability": 0.8759765625}, {"start": 1285.27, "end": 1285.47, "word": " to", "probability": 0.962890625}, {"start": 1285.47, "end": 1285.63, "word": " the", "probability": 0.9189453125}, {"start": 1285.63, "end": 1285.83, "word": " right.", "probability": 0.916015625}, {"start": 1286.31, "end": 1286.47, "word": " I", "probability": 0.92236328125}, {"start": 1286.47, "end": 1286.59, "word": " mean", "probability": 0.96484375}, {"start": 1286.59, "end": 1286.81, "word": " the", "probability": 0.437255859375}, {"start": 1286.81, "end": 1287.19, "word": " rejection", "probability": 0.94287109375}, {"start": 1287.19, "end": 1287.71, "word": " region", "probability": 0.923828125}, {"start": 1287.71, "end": 1287.99, "word": " should", "probability": 0.96435546875}, {"start": 1287.99, "end": 1288.11, "word": " be", "probability": 0.9501953125}, {"start": 1288.11, "end": 1288.25, "word": " to", "probability": 0.95751953125}, {"start": 1288.25, "end": 1288.37, "word": " the", "probability": 0.9072265625}, {"start": 1288.37, "end": 1288.55, "word": " right.", "probability": 0.9130859375}, {"start": 1289.11, "end": 1289.51, "word": " If", "probability": 0.9560546875}, {"start": 1289.51, "end": 1289.87, "word": " the", "probability": 0.88427734375}, {"start": 1289.87, "end": 1291.21, "word": " alternative", "probability": 0.82470703125}, {"start": 1291.21, "end": 1291.85, "word": " hypothesis", "probability": 0.82568359375}, {"start": 1291.85, "end": 1292.45, "word": " is", "probability": 0.9462890625}, {"start": 1292.45, "end": 1292.95, "word": " negative,", "probability": 0.82568359375}, {"start": 1293.69, "end": 1293.83, "word": " I", "probability": 0.9091796875}, {"start": 1293.83, "end": 1293.91, "word": " mean", "probability": 0.96337890625}, {"start": 1293.91, "end": 1294.23, "word": " smaller", "probability": 0.80517578125}, {"start": 1294.23, "end": 1294.49, "word": " than", "probability": 0.9423828125}, {"start": 1294.49, "end": 1294.79, "word": " zero,", "probability": 0.7041015625}, {"start": 1295.13, "end": 1295.49, "word": " it", "probability": 0.943359375}, {"start": 1295.49, "end": 1295.87, "word": " means", "probability": 0.93701171875}, {"start": 1295.87, "end": 1296.47, "word": " the", "probability": 0.8671875}, {"start": 1296.47, "end": 1296.81, "word": " rejection", "probability": 0.9580078125}, {"start": 1296.81, "end": 1297.15, "word": " region", "probability": 0.89013671875}, {"start": 1297.15, "end": 1297.41, "word": " should", "probability": 0.9677734375}, {"start": 1297.41, "end": 1297.57, "word": " be", "probability": 0.94921875}, {"start": 1297.57, "end": 1297.71, "word": " to", "probability": 0.95458984375}, {"start": 1297.71, "end": 1297.87, "word": " the", "probability": 0.91357421875}, {"start": 1297.87, "end": 1298.05, "word": " left", "probability": 0.951171875}, {"start": 1298.05, "end": 1298.39, "word": " side.", "probability": 0.82958984375}], "temperature": 1.0}, {"id": 50, "seek": 132758, "start": 1299.14, "end": 1327.58, "text": " So here, the alternative hypothesis, mu1 minus mu2, the difference is positive. That means the rejection region is to the right side. So we reject the null hypothesis if T statistic is greater than T alpha. But here, for the two-sided test or two-tailed test, they are two regions. I mean, they are two rejection regions because there is no direction under the alternative hypothesis. So alpha should be split in half.", "tokens": [407, 510, 11, 264, 8535, 17291, 11, 2992, 16, 3175, 2992, 17, 11, 264, 2649, 307, 3353, 13, 663, 1355, 264, 26044, 4458, 307, 281, 264, 558, 1252, 13, 407, 321, 8248, 264, 18184, 17291, 498, 314, 29588, 307, 5044, 813, 314, 8961, 13, 583, 510, 11, 337, 264, 732, 12, 30941, 1500, 420, 732, 12, 14430, 292, 1500, 11, 436, 366, 732, 10682, 13, 286, 914, 11, 436, 366, 732, 26044, 10682, 570, 456, 307, 572, 3513, 833, 264, 8535, 17291, 13, 407, 8961, 820, 312, 7472, 294, 1922, 13], "avg_logprob": -0.20023777546442073, "compression_ratio": 1.837719298245614, "no_speech_prob": 0.0, "words": [{"start": 1299.14, "end": 1299.58, "word": " So", "probability": 0.87158203125}, {"start": 1299.58, "end": 1299.88, "word": " here,", "probability": 0.77001953125}, {"start": 1300.82, "end": 1301.16, "word": " the", "probability": 0.755859375}, {"start": 1301.16, "end": 1301.68, "word": " alternative", "probability": 0.7919921875}, {"start": 1301.68, "end": 1302.26, "word": " hypothesis,", "probability": 0.80712890625}, {"start": 1302.8, "end": 1303.14, "word": " mu1", "probability": 0.5338134765625}, {"start": 1303.14, "end": 1303.48, "word": " minus", "probability": 0.947265625}, {"start": 1303.48, "end": 1303.82, "word": " mu2,", "probability": 0.9677734375}, {"start": 1303.9, "end": 1303.98, "word": " the", "probability": 0.90185546875}, {"start": 1303.98, "end": 1304.28, "word": " difference", "probability": 0.87353515625}, {"start": 1304.28, "end": 1304.48, "word": " is", "probability": 0.92529296875}, {"start": 1304.48, "end": 1304.88, "word": " positive.", "probability": 0.9306640625}, {"start": 1305.5, "end": 1305.82, "word": " That", "probability": 0.8515625}, {"start": 1305.82, "end": 1306.22, "word": " means", "probability": 0.93408203125}, {"start": 1306.22, "end": 1306.46, "word": " the", "probability": 0.8125}, {"start": 1306.46, "end": 1306.8, "word": " rejection", "probability": 0.94384765625}, {"start": 1306.8, "end": 1307.16, "word": " region", "probability": 0.72998046875}, {"start": 1307.16, "end": 1307.42, "word": " is", "probability": 0.919921875}, {"start": 1307.42, "end": 1307.58, "word": " to", "probability": 0.92822265625}, {"start": 1307.58, "end": 1307.72, "word": " the", "probability": 0.9130859375}, {"start": 1307.72, "end": 1307.94, "word": " right", "probability": 0.91650390625}, {"start": 1307.94, "end": 1308.26, "word": " side.", "probability": 0.8095703125}, {"start": 1308.76, "end": 1308.96, "word": " So", "probability": 0.95361328125}, {"start": 1308.96, "end": 1309.14, "word": " we", "probability": 0.85205078125}, {"start": 1309.14, "end": 1309.54, "word": " reject", "probability": 0.8798828125}, {"start": 1309.54, "end": 1309.7, "word": " the", "probability": 0.68798828125}, {"start": 1309.7, "end": 1309.84, "word": " null", "probability": 0.9892578125}, {"start": 1309.84, "end": 1310.28, "word": " hypothesis", "probability": 0.81201171875}, {"start": 1310.28, "end": 1310.64, "word": " if", "probability": 0.83154296875}, {"start": 1310.64, "end": 1310.84, "word": " T", "probability": 0.421630859375}, {"start": 1310.84, "end": 1311.2, "word": " statistic", "probability": 0.6162109375}, {"start": 1311.2, "end": 1311.6, "word": " is", "probability": 0.947265625}, {"start": 1311.6, "end": 1311.94, "word": " greater", "probability": 0.9169921875}, {"start": 1311.94, "end": 1312.24, "word": " than", "probability": 0.9443359375}, {"start": 1312.24, "end": 1312.42, "word": " T", "probability": 0.146240234375}, {"start": 1312.42, "end": 1312.56, "word": " alpha.", "probability": 0.3896484375}, {"start": 1313.54, "end": 1314.12, "word": " But", "probability": 0.9296875}, {"start": 1314.12, "end": 1314.34, "word": " here,", "probability": 0.79638671875}, {"start": 1314.4, "end": 1314.58, "word": " for", "probability": 0.947265625}, {"start": 1314.58, "end": 1314.78, "word": " the", "probability": 0.9140625}, {"start": 1314.78, "end": 1314.96, "word": " two", "probability": 0.91259765625}, {"start": 1314.96, "end": 1315.26, "word": "-sided", "probability": 0.815185546875}, {"start": 1315.26, "end": 1315.58, "word": " test", "probability": 0.857421875}, {"start": 1315.58, "end": 1315.8, "word": " or", "probability": 0.68603515625}, {"start": 1315.8, "end": 1316.02, "word": " two", "probability": 0.8994140625}, {"start": 1316.02, "end": 1316.36, "word": "-tailed", "probability": 0.8995768229166666}, {"start": 1316.36, "end": 1316.62, "word": " test,", "probability": 0.89501953125}, {"start": 1318.38, "end": 1318.7, "word": " they", "probability": 0.494873046875}, {"start": 1318.7, "end": 1318.92, "word": " are", "probability": 0.93310546875}, {"start": 1318.92, "end": 1319.12, "word": " two", "probability": 0.9267578125}, {"start": 1319.12, "end": 1319.52, "word": " regions.", "probability": 0.978515625}, {"start": 1319.68, "end": 1319.78, "word": " I", "probability": 0.92138671875}, {"start": 1319.78, "end": 1319.96, "word": " mean,", "probability": 0.966796875}, {"start": 1320.08, "end": 1320.26, "word": " they", "probability": 0.798828125}, {"start": 1320.26, "end": 1320.42, "word": " are", "probability": 0.93505859375}, {"start": 1320.42, "end": 1320.62, "word": " two", "probability": 0.92626953125}, {"start": 1320.62, "end": 1321.0, "word": " rejection", "probability": 0.95263671875}, {"start": 1321.0, "end": 1321.42, "word": " regions", "probability": 0.9677734375}, {"start": 1321.42, "end": 1321.92, "word": " because", "probability": 0.479248046875}, {"start": 1321.92, "end": 1322.68, "word": " there", "probability": 0.90576171875}, {"start": 1322.68, "end": 1322.82, "word": " is", "probability": 0.89892578125}, {"start": 1322.82, "end": 1323.02, "word": " no", "probability": 0.951171875}, {"start": 1323.02, "end": 1323.44, "word": " direction", "probability": 0.9658203125}, {"start": 1323.44, "end": 1323.86, "word": " under", "probability": 0.8779296875}, {"start": 1323.86, "end": 1324.06, "word": " the", "probability": 0.8857421875}, {"start": 1324.06, "end": 1324.5, "word": " alternative", "probability": 0.8994140625}, {"start": 1324.5, "end": 1324.96, "word": " hypothesis.", "probability": 0.84033203125}, {"start": 1325.44, "end": 1325.72, "word": " So", "probability": 0.958984375}, {"start": 1325.72, "end": 1326.06, "word": " alpha", "probability": 0.90478515625}, {"start": 1326.06, "end": 1326.4, "word": " should", "probability": 0.96875}, {"start": 1326.4, "end": 1326.6, "word": " be", "probability": 0.94384765625}, {"start": 1326.6, "end": 1326.94, "word": " split", "probability": 0.57568359375}, {"start": 1326.94, "end": 1327.24, "word": " in", "probability": 0.818359375}, {"start": 1327.24, "end": 1327.58, "word": " half.", "probability": 0.9130859375}], "temperature": 1.0}, {"id": 51, "seek": 135702, "start": 1328.06, "end": 1357.02, "text": " So alpha over two to the right and alpha over two to the left side. So this scheme actually mimics the same or similar to what we have discussed in chapter one. Any questions? So again, we have to formulate or state carefully null and alternate hypothesis for both cases two and one-tailed test.", "tokens": [407, 8961, 670, 732, 281, 264, 558, 293, 8961, 670, 732, 281, 264, 1411, 1252, 13, 407, 341, 12232, 767, 12247, 1167, 264, 912, 420, 2531, 281, 437, 321, 362, 7152, 294, 7187, 472, 13, 2639, 1651, 30, 407, 797, 11, 321, 362, 281, 47881, 420, 1785, 7500, 18184, 293, 18873, 17291, 337, 1293, 3331, 732, 293, 472, 12, 14430, 292, 1500, 13], "avg_logprob": -0.23864746000617743, "compression_ratio": 1.608695652173913, "no_speech_prob": 0.0, "words": [{"start": 1328.06, "end": 1328.34, "word": " So", "probability": 0.8056640625}, {"start": 1328.34, "end": 1328.58, "word": " alpha", "probability": 0.47900390625}, {"start": 1328.58, "end": 1328.82, "word": " over", "probability": 0.87353515625}, {"start": 1328.82, "end": 1328.96, "word": " two", "probability": 0.447998046875}, {"start": 1328.96, "end": 1329.16, "word": " to", "probability": 0.953125}, {"start": 1329.16, "end": 1329.3, "word": " the", "probability": 0.91455078125}, {"start": 1329.3, "end": 1329.5, "word": " right", "probability": 0.91259765625}, {"start": 1329.5, "end": 1330.12, "word": " and", "probability": 0.62158203125}, {"start": 1330.12, "end": 1330.42, "word": " alpha", "probability": 0.92822265625}, {"start": 1330.42, "end": 1330.72, "word": " over", "probability": 0.92138671875}, {"start": 1330.72, "end": 1330.9, "word": " two", "probability": 0.93017578125}, {"start": 1330.9, "end": 1331.08, "word": " to", "probability": 0.95458984375}, {"start": 1331.08, "end": 1331.2, "word": " the", "probability": 0.9140625}, {"start": 1331.2, "end": 1331.4, "word": " left", "probability": 0.9404296875}, {"start": 1331.4, "end": 1331.74, "word": " side.", "probability": 0.85107421875}, {"start": 1332.38, "end": 1332.62, "word": " So", "probability": 0.9345703125}, {"start": 1332.62, "end": 1332.9, "word": " this", "probability": 0.8828125}, {"start": 1332.9, "end": 1333.24, "word": " scheme", "probability": 0.90869140625}, {"start": 1333.24, "end": 1333.68, "word": " actually", "probability": 0.82470703125}, {"start": 1333.68, "end": 1335.2, "word": " mimics", "probability": 0.8818359375}, {"start": 1335.2, "end": 1335.48, "word": " the", "probability": 0.417724609375}, {"start": 1335.48, "end": 1336.38, "word": " same", "probability": 0.61474609375}, {"start": 1336.38, "end": 1336.66, "word": " or", "probability": 0.63232421875}, {"start": 1336.66, "end": 1337.12, "word": " similar", "probability": 0.9521484375}, {"start": 1337.12, "end": 1338.08, "word": " to", "probability": 0.9453125}, {"start": 1338.08, "end": 1339.84, "word": " what", "probability": 0.63818359375}, {"start": 1339.84, "end": 1340.08, "word": " we", "probability": 0.962890625}, {"start": 1340.08, "end": 1340.38, "word": " have", "probability": 0.84228515625}, {"start": 1340.38, "end": 1341.2, "word": " discussed", "probability": 0.8916015625}, {"start": 1341.2, "end": 1341.46, "word": " in", "probability": 0.93212890625}, {"start": 1341.46, "end": 1341.74, "word": " chapter", "probability": 0.54736328125}, {"start": 1341.74, "end": 1342.14, "word": " one.", "probability": 0.5673828125}, {"start": 1342.86, "end": 1343.14, "word": " Any", "probability": 0.90087890625}, {"start": 1343.14, "end": 1343.48, "word": " questions?", "probability": 0.595703125}, {"start": 1344.86, "end": 1345.24, "word": " So", "probability": 0.92822265625}, {"start": 1345.24, "end": 1345.88, "word": " again,", "probability": 0.81103515625}, {"start": 1346.88, "end": 1348.76, "word": " we", "probability": 0.95556640625}, {"start": 1348.76, "end": 1348.98, "word": " have", "probability": 0.9462890625}, {"start": 1348.98, "end": 1349.14, "word": " to", "probability": 0.97216796875}, {"start": 1349.14, "end": 1349.54, "word": " formulate", "probability": 0.982421875}, {"start": 1349.54, "end": 1349.92, "word": " or", "probability": 0.78369140625}, {"start": 1349.92, "end": 1350.3, "word": " state", "probability": 0.93359375}, {"start": 1350.3, "end": 1351.02, "word": " carefully", "probability": 0.8408203125}, {"start": 1351.02, "end": 1352.44, "word": " null", "probability": 0.822265625}, {"start": 1352.44, "end": 1352.68, "word": " and", "probability": 0.55908203125}, {"start": 1352.68, "end": 1352.98, "word": " alternate", "probability": 0.397216796875}, {"start": 1352.98, "end": 1353.52, "word": " hypothesis", "probability": 0.58544921875}, {"start": 1353.52, "end": 1354.44, "word": " for", "probability": 0.92822265625}, {"start": 1354.44, "end": 1354.76, "word": " both", "probability": 0.90380859375}, {"start": 1354.76, "end": 1355.18, "word": " cases", "probability": 0.91796875}, {"start": 1355.18, "end": 1355.52, "word": " two", "probability": 0.5234375}, {"start": 1355.52, "end": 1356.1, "word": " and", "probability": 0.9208984375}, {"start": 1356.1, "end": 1356.42, "word": " one", "probability": 0.92724609375}, {"start": 1356.42, "end": 1356.7, "word": "-tailed", "probability": 0.6019694010416666}, {"start": 1356.7, "end": 1357.02, "word": " test.", "probability": 0.83203125}], "temperature": 1.0}, {"id": 52, "seek": 138549, "start": 1359.23, "end": 1385.49, "text": " And the rejection regions, I think, is straightforward. Now let's see what are the assumptions in this case. If the two sigmas are unknown, and we assume they are equal. So we assume both sigmas are unknown. I mean, both population standard deviations are unknown. And we assume they are equal. The assumptions are.", "tokens": [400, 264, 26044, 10682, 11, 286, 519, 11, 307, 15325, 13, 823, 718, 311, 536, 437, 366, 264, 17695, 294, 341, 1389, 13, 759, 264, 732, 4556, 3799, 366, 9841, 11, 293, 321, 6552, 436, 366, 2681, 13, 407, 321, 6552, 1293, 4556, 3799, 366, 9841, 13, 286, 914, 11, 1293, 4415, 3832, 31219, 763, 366, 9841, 13, 400, 321, 6552, 436, 366, 2681, 13, 440, 17695, 366, 13], "avg_logprob": -0.21004464051553182, "compression_ratio": 1.8057142857142856, "no_speech_prob": 0.0, "words": [{"start": 1359.2300000000002, "end": 1359.8300000000002, "word": " And", "probability": 0.5849609375}, {"start": 1359.8300000000002, "end": 1360.43, "word": " the", "probability": 0.83740234375}, {"start": 1360.43, "end": 1360.93, "word": " rejection", "probability": 0.93310546875}, {"start": 1360.93, "end": 1361.35, "word": " regions,", "probability": 0.433349609375}, {"start": 1361.53, "end": 1361.55, "word": " I", "probability": 0.9892578125}, {"start": 1361.55, "end": 1361.89, "word": " think,", "probability": 0.9111328125}, {"start": 1362.63, "end": 1362.91, "word": " is", "probability": 0.75732421875}, {"start": 1362.91, "end": 1363.43, "word": " straightforward.", "probability": 0.87353515625}, {"start": 1364.65, "end": 1365.05, "word": " Now", "probability": 0.9541015625}, {"start": 1365.05, "end": 1365.51, "word": " let's", "probability": 0.7470703125}, {"start": 1365.51, "end": 1365.69, "word": " see", "probability": 0.9169921875}, {"start": 1365.69, "end": 1365.95, "word": " what", "probability": 0.9033203125}, {"start": 1365.95, "end": 1366.15, "word": " are", "probability": 0.923828125}, {"start": 1366.15, "end": 1366.27, "word": " the", "probability": 0.912109375}, {"start": 1366.27, "end": 1366.77, "word": " assumptions", "probability": 0.96142578125}, {"start": 1366.77, "end": 1368.43, "word": " in", "probability": 0.7578125}, {"start": 1368.43, "end": 1368.65, "word": " this", "probability": 0.94384765625}, {"start": 1368.65, "end": 1368.97, "word": " case.", "probability": 0.9189453125}, {"start": 1369.01, "end": 1369.27, "word": " If", "probability": 0.962890625}, {"start": 1369.27, "end": 1369.47, "word": " the", "probability": 0.9150390625}, {"start": 1369.47, "end": 1369.65, "word": " two", "probability": 0.916015625}, {"start": 1369.65, "end": 1369.99, "word": " sigmas", "probability": 0.7098388671875}, {"start": 1369.99, "end": 1370.25, "word": " are", "probability": 0.94287109375}, {"start": 1370.25, "end": 1370.53, "word": " unknown,", "probability": 0.892578125}, {"start": 1371.33, "end": 1371.95, "word": " and", "probability": 0.8994140625}, {"start": 1371.95, "end": 1372.13, "word": " we", "probability": 0.95947265625}, {"start": 1372.13, "end": 1372.59, "word": " assume", "probability": 0.91357421875}, {"start": 1372.59, "end": 1372.83, "word": " they", "probability": 0.8896484375}, {"start": 1372.83, "end": 1372.99, "word": " are", "probability": 0.9384765625}, {"start": 1372.99, "end": 1373.27, "word": " equal.", "probability": 0.91015625}, {"start": 1374.21, "end": 1374.45, "word": " So", "probability": 0.94921875}, {"start": 1374.45, "end": 1374.71, "word": " we", "probability": 0.77099609375}, {"start": 1374.71, "end": 1375.21, "word": " assume", "probability": 0.91357421875}, {"start": 1375.21, "end": 1376.75, "word": " both", "probability": 0.833984375}, {"start": 1376.75, "end": 1377.33, "word": " sigmas", "probability": 0.774169921875}, {"start": 1377.33, "end": 1377.87, "word": " are", "probability": 0.65625}, {"start": 1377.87, "end": 1378.15, "word": " unknown.", "probability": 0.64892578125}, {"start": 1378.85, "end": 1379.05, "word": " I", "probability": 0.9814453125}, {"start": 1379.05, "end": 1379.23, "word": " mean,", "probability": 0.96533203125}, {"start": 1379.39, "end": 1379.79, "word": " both", "probability": 0.66552734375}, {"start": 1379.79, "end": 1380.53, "word": " population", "probability": 0.6552734375}, {"start": 1380.53, "end": 1380.93, "word": " standard", "probability": 0.65087890625}, {"start": 1380.93, "end": 1381.37, "word": " deviations", "probability": 0.951171875}, {"start": 1381.37, "end": 1381.63, "word": " are", "probability": 0.9423828125}, {"start": 1381.63, "end": 1381.95, "word": " unknown.", "probability": 0.90625}, {"start": 1382.39, "end": 1382.63, "word": " And", "probability": 0.95556640625}, {"start": 1382.63, "end": 1382.81, "word": " we", "probability": 0.9599609375}, {"start": 1382.81, "end": 1383.11, "word": " assume", "probability": 0.91259765625}, {"start": 1383.11, "end": 1383.29, "word": " they", "probability": 0.8876953125}, {"start": 1383.29, "end": 1383.45, "word": " are", "probability": 0.94140625}, {"start": 1383.45, "end": 1383.75, "word": " equal.", "probability": 0.90380859375}, {"start": 1384.43, "end": 1384.65, "word": " The", "probability": 0.87451171875}, {"start": 1384.65, "end": 1385.07, "word": " assumptions", "probability": 0.95361328125}, {"start": 1385.07, "end": 1385.49, "word": " are.", "probability": 0.93896484375}], "temperature": 1.0}, {"id": 53, "seek": 141229, "start": 1386.13, "end": 1412.29, "text": " First, samples should be drawn randomly and independently. So samples are randomly and independently drawn. So we have to select random samples and they are independent. Assumption number one. The second one, populations are normally distributed. So we have to assume the population is normal or both sample sizes are at least 30.", "tokens": [2386, 11, 10938, 820, 312, 10117, 16979, 293, 21761, 13, 407, 10938, 366, 16979, 293, 21761, 10117, 13, 407, 321, 362, 281, 3048, 4974, 10938, 293, 436, 366, 6695, 13, 6281, 449, 1695, 1230, 472, 13, 440, 1150, 472, 11, 12822, 366, 5646, 12631, 13, 407, 321, 362, 281, 6552, 264, 4415, 307, 2710, 420, 1293, 6889, 11602, 366, 412, 1935, 2217, 13], "avg_logprob": -0.18676758441142738, "compression_ratio": 1.8186813186813187, "no_speech_prob": 0.0, "words": [{"start": 1386.13, "end": 1386.49, "word": " First,", "probability": 0.7626953125}, {"start": 1386.65, "end": 1386.99, "word": " samples", "probability": 0.8427734375}, {"start": 1386.99, "end": 1387.25, "word": " should", "probability": 0.96337890625}, {"start": 1387.25, "end": 1387.91, "word": " be", "probability": 0.95361328125}, {"start": 1387.91, "end": 1388.65, "word": " drawn", "probability": 0.9384765625}, {"start": 1388.65, "end": 1389.15, "word": " randomly", "probability": 0.82177734375}, {"start": 1389.15, "end": 1390.13, "word": " and", "probability": 0.88720703125}, {"start": 1390.13, "end": 1390.55, "word": " independently.", "probability": 0.59326171875}, {"start": 1391.45, "end": 1391.69, "word": " So", "probability": 0.779296875}, {"start": 1391.69, "end": 1392.05, "word": " samples", "probability": 0.5888671875}, {"start": 1392.05, "end": 1392.53, "word": " are", "probability": 0.9423828125}, {"start": 1392.53, "end": 1392.89, "word": " randomly", "probability": 0.8134765625}, {"start": 1392.89, "end": 1393.33, "word": " and", "probability": 0.912109375}, {"start": 1393.33, "end": 1393.89, "word": " independently", "probability": 0.93359375}, {"start": 1393.89, "end": 1394.31, "word": " drawn.", "probability": 0.9560546875}, {"start": 1394.67, "end": 1394.83, "word": " So", "probability": 0.89306640625}, {"start": 1394.83, "end": 1394.91, "word": " we", "probability": 0.58154296875}, {"start": 1394.91, "end": 1395.01, "word": " have", "probability": 0.94140625}, {"start": 1395.01, "end": 1395.13, "word": " to", "probability": 0.966796875}, {"start": 1395.13, "end": 1395.49, "word": " select", "probability": 0.81884765625}, {"start": 1395.49, "end": 1395.85, "word": " random", "probability": 0.8046875}, {"start": 1395.85, "end": 1396.33, "word": " samples", "probability": 0.86767578125}, {"start": 1396.33, "end": 1397.13, "word": " and", "probability": 0.43994140625}, {"start": 1397.13, "end": 1397.27, "word": " they", "probability": 0.89794921875}, {"start": 1397.27, "end": 1397.45, "word": " are", "probability": 0.9365234375}, {"start": 1397.45, "end": 1397.83, "word": " independent.", "probability": 0.89111328125}, {"start": 1399.53, "end": 1400.25, "word": " Assumption", "probability": 0.7465006510416666}, {"start": 1400.25, "end": 1400.45, "word": " number", "probability": 0.92041015625}, {"start": 1400.45, "end": 1400.69, "word": " one.", "probability": 0.82568359375}, {"start": 1401.45, "end": 1401.65, "word": " The", "probability": 0.83203125}, {"start": 1401.65, "end": 1401.93, "word": " second", "probability": 0.91455078125}, {"start": 1401.93, "end": 1402.19, "word": " one,", "probability": 0.92724609375}, {"start": 1402.23, "end": 1402.73, "word": " populations", "probability": 0.9140625}, {"start": 1402.73, "end": 1403.13, "word": " are", "probability": 0.93212890625}, {"start": 1403.13, "end": 1403.51, "word": " normally", "probability": 0.90283203125}, {"start": 1403.51, "end": 1404.13, "word": " distributed.", "probability": 0.9248046875}, {"start": 1405.09, "end": 1405.37, "word": " So", "probability": 0.9384765625}, {"start": 1405.37, "end": 1405.53, "word": " we", "probability": 0.9267578125}, {"start": 1405.53, "end": 1405.71, "word": " have", "probability": 0.94189453125}, {"start": 1405.71, "end": 1405.85, "word": " to", "probability": 0.9697265625}, {"start": 1405.85, "end": 1406.33, "word": " assume", "probability": 0.90625}, {"start": 1406.33, "end": 1407.07, "word": " the", "probability": 0.84375}, {"start": 1407.07, "end": 1407.43, "word": " population", "probability": 0.94140625}, {"start": 1407.43, "end": 1407.67, "word": " is", "probability": 0.94384765625}, {"start": 1407.67, "end": 1408.05, "word": " normal", "probability": 0.87646484375}, {"start": 1408.05, "end": 1408.95, "word": " or", "probability": 0.61865234375}, {"start": 1408.95, "end": 1410.23, "word": " both", "probability": 0.8779296875}, {"start": 1410.23, "end": 1410.65, "word": " sample", "probability": 0.86572265625}, {"start": 1410.65, "end": 1411.09, "word": " sizes", "probability": 0.90966796875}, {"start": 1411.09, "end": 1411.49, "word": " are", "probability": 0.93798828125}, {"start": 1411.49, "end": 1411.65, "word": " at", "probability": 0.966796875}, {"start": 1411.65, "end": 1411.93, "word": " least", "probability": 0.95703125}, {"start": 1411.93, "end": 1412.29, "word": " 30.", "probability": 0.450439453125}], "temperature": 1.0}, {"id": 54, "seek": 143751, "start": 1413.97, "end": 1437.51, "text": " So, in order to apply the central interface, so similar to the one we had discussed, so here either the populations, I mean both of them, normally distributed, abnormally distributed, or both ends, or both sample sizes, greater than 30, greater than or equal to, so at least", "tokens": [407, 11, 294, 1668, 281, 3079, 264, 5777, 728, 11771, 384, 11, 370, 2531, 281, 264, 472, 321, 632, 7152, 11, 370, 510, 2139, 264, 12822, 11, 286, 914, 1293, 295, 552, 11, 5646, 12631, 11, 47104, 379, 12631, 11, 420, 1293, 5314, 11, 420, 1293, 6889, 11602, 11, 5044, 813, 2217, 11, 5044, 813, 420, 2681, 281, 11, 370, 412, 1935], "avg_logprob": -0.3645833229261731, "compression_ratio": 1.6369047619047619, "no_speech_prob": 0.0, "words": [{"start": 1413.97, "end": 1414.43, "word": " So,", "probability": 0.75830078125}, {"start": 1414.73, "end": 1414.97, "word": " in", "probability": 0.92138671875}, {"start": 1414.97, "end": 1415.23, "word": " order", "probability": 0.92626953125}, {"start": 1415.23, "end": 1415.43, "word": " to", "probability": 0.97119140625}, {"start": 1415.43, "end": 1415.73, "word": " apply", "probability": 0.9375}, {"start": 1415.73, "end": 1415.95, "word": " the", "probability": 0.77001953125}, {"start": 1415.95, "end": 1416.25, "word": " central", "probability": 0.703125}, {"start": 1416.25, "end": 1416.69, "word": " interface,", "probability": 0.5191090901692709}, {"start": 1417.45, "end": 1417.67, "word": " so", "probability": 0.53857421875}, {"start": 1417.67, "end": 1418.13, "word": " similar", "probability": 0.76025390625}, {"start": 1418.13, "end": 1418.37, "word": " to", "probability": 0.96728515625}, {"start": 1418.37, "end": 1418.49, "word": " the", "probability": 0.826171875}, {"start": 1418.49, "end": 1418.65, "word": " one", "probability": 0.9306640625}, {"start": 1418.65, "end": 1418.77, "word": " we", "probability": 0.923828125}, {"start": 1418.77, "end": 1418.91, "word": " had", "probability": 0.74462890625}, {"start": 1418.91, "end": 1419.27, "word": " discussed,", "probability": 0.8994140625}, {"start": 1420.21, "end": 1420.47, "word": " so", "probability": 0.794921875}, {"start": 1420.47, "end": 1420.77, "word": " here", "probability": 0.814453125}, {"start": 1420.77, "end": 1421.27, "word": " either", "probability": 0.72998046875}, {"start": 1421.27, "end": 1421.49, "word": " the", "probability": 0.7919921875}, {"start": 1421.49, "end": 1422.11, "word": " populations,", "probability": 0.9560546875}, {"start": 1423.03, "end": 1423.23, "word": " I", "probability": 0.96533203125}, {"start": 1423.23, "end": 1423.33, "word": " mean", "probability": 0.966796875}, {"start": 1423.33, "end": 1423.67, "word": " both", "probability": 0.75146484375}, {"start": 1423.67, "end": 1423.83, "word": " of", "probability": 0.94580078125}, {"start": 1423.83, "end": 1424.05, "word": " them,", "probability": 0.892578125}, {"start": 1424.89, "end": 1425.31, "word": " normally", "probability": 0.7451171875}, {"start": 1425.31, "end": 1425.89, "word": " distributed,", "probability": 0.90478515625}, {"start": 1426.01, "end": 1426.51, "word": " abnormally", "probability": 0.95361328125}, {"start": 1426.51, "end": 1426.97, "word": " distributed,", "probability": 0.93212890625}, {"start": 1427.45, "end": 1427.87, "word": " or", "probability": 0.96044921875}, {"start": 1427.87, "end": 1428.87, "word": " both", "probability": 0.90283203125}, {"start": 1428.87, "end": 1429.27, "word": " ends,", "probability": 0.869140625}, {"start": 1429.47, "end": 1429.57, "word": " or", "probability": 0.94091796875}, {"start": 1429.57, "end": 1429.93, "word": " both", "probability": 0.90625}, {"start": 1429.93, "end": 1430.43, "word": " sample", "probability": 0.8798828125}, {"start": 1430.43, "end": 1431.03, "word": " sizes,", "probability": 0.9033203125}, {"start": 1431.37, "end": 1431.65, "word": " greater", "probability": 0.45751953125}, {"start": 1431.65, "end": 1432.13, "word": " than", "probability": 0.94677734375}, {"start": 1432.13, "end": 1432.61, "word": " 30,", "probability": 0.47021484375}, {"start": 1433.67, "end": 1435.29, "word": " greater", "probability": 0.880859375}, {"start": 1435.29, "end": 1435.79, "word": " than", "probability": 0.93994140625}, {"start": 1435.79, "end": 1436.41, "word": " or", "probability": 0.1492919921875}, {"start": 1436.41, "end": 1436.63, "word": " equal", "probability": 0.87158203125}, {"start": 1436.63, "end": 1436.89, "word": " to,", "probability": 0.94775390625}, {"start": 1436.99, "end": 1437.13, "word": " so", "probability": 0.464111328125}, {"start": 1437.13, "end": 1437.29, "word": " at", "probability": 0.94384765625}, {"start": 1437.29, "end": 1437.51, "word": " least", "probability": 0.953125}], "temperature": 1.0}, {"id": 55, "seek": 146336, "start": 1439.42, "end": 1463.36, "text": " In addition to that, we have to assume that population variances are unknown, but we assume they are equal. So the assumptions are samples are randomly selected and independent, populations are normally distributed, or the sample sizes are large enough in order to apply the central limit theorem.", "tokens": [682, 4500, 281, 300, 11, 321, 362, 281, 6552, 300, 4415, 1374, 21518, 366, 9841, 11, 457, 321, 6552, 436, 366, 2681, 13, 407, 264, 17695, 366, 10938, 366, 16979, 8209, 293, 6695, 11, 12822, 366, 5646, 12631, 11, 420, 264, 6889, 11602, 366, 2416, 1547, 294, 1668, 281, 3079, 264, 5777, 4948, 20904, 13], "avg_logprob": -0.13581194781831332, "compression_ratio": 1.6464088397790055, "no_speech_prob": 0.0, "words": [{"start": 1439.42, "end": 1439.7, "word": " In", "probability": 0.85205078125}, {"start": 1439.7, "end": 1440.04, "word": " addition", "probability": 0.9501953125}, {"start": 1440.04, "end": 1440.28, "word": " to", "probability": 0.96484375}, {"start": 1440.28, "end": 1440.6, "word": " that,", "probability": 0.9375}, {"start": 1440.78, "end": 1440.88, "word": " we", "probability": 0.95166015625}, {"start": 1440.88, "end": 1441.08, "word": " have", "probability": 0.92333984375}, {"start": 1441.08, "end": 1441.26, "word": " to", "probability": 0.97119140625}, {"start": 1441.26, "end": 1442.12, "word": " assume", "probability": 0.89990234375}, {"start": 1442.12, "end": 1442.74, "word": " that", "probability": 0.9345703125}, {"start": 1442.74, "end": 1443.76, "word": " population", "probability": 0.9423828125}, {"start": 1443.76, "end": 1444.34, "word": " variances", "probability": 0.910888671875}, {"start": 1444.34, "end": 1444.86, "word": " are", "probability": 0.9423828125}, {"start": 1444.86, "end": 1445.32, "word": " unknown,", "probability": 0.8740234375}, {"start": 1445.68, "end": 1446.06, "word": " but", "probability": 0.92236328125}, {"start": 1446.06, "end": 1446.24, "word": " we", "probability": 0.95068359375}, {"start": 1446.24, "end": 1446.66, "word": " assume", "probability": 0.9013671875}, {"start": 1446.66, "end": 1446.9, "word": " they", "probability": 0.87060546875}, {"start": 1446.9, "end": 1447.28, "word": " are", "probability": 0.9365234375}, {"start": 1447.28, "end": 1448.2, "word": " equal.", "probability": 0.9013671875}, {"start": 1448.72, "end": 1448.88, "word": " So", "probability": 0.9287109375}, {"start": 1448.88, "end": 1449.04, "word": " the", "probability": 0.71533203125}, {"start": 1449.04, "end": 1449.46, "word": " assumptions", "probability": 0.9482421875}, {"start": 1449.46, "end": 1450.16, "word": " are", "probability": 0.93994140625}, {"start": 1450.16, "end": 1451.16, "word": " samples", "probability": 0.458251953125}, {"start": 1451.16, "end": 1451.46, "word": " are", "probability": 0.93896484375}, {"start": 1451.46, "end": 1451.82, "word": " randomly", "probability": 0.849609375}, {"start": 1451.82, "end": 1452.7, "word": " selected", "probability": 0.88720703125}, {"start": 1452.7, "end": 1453.84, "word": " and", "probability": 0.8720703125}, {"start": 1453.84, "end": 1454.28, "word": " independent,", "probability": 0.873046875}, {"start": 1455.36, "end": 1455.92, "word": " populations", "probability": 0.95166015625}, {"start": 1455.92, "end": 1456.4, "word": " are", "probability": 0.94287109375}, {"start": 1456.4, "end": 1456.88, "word": " normally", "probability": 0.9052734375}, {"start": 1456.88, "end": 1457.54, "word": " distributed,", "probability": 0.93603515625}, {"start": 1458.2, "end": 1458.52, "word": " or", "probability": 0.96142578125}, {"start": 1458.52, "end": 1458.78, "word": " the", "probability": 0.828125}, {"start": 1458.78, "end": 1459.02, "word": " sample", "probability": 0.86767578125}, {"start": 1459.02, "end": 1459.44, "word": " sizes", "probability": 0.89501953125}, {"start": 1459.44, "end": 1459.76, "word": " are", "probability": 0.95166015625}, {"start": 1459.76, "end": 1460.14, "word": " large", "probability": 0.96337890625}, {"start": 1460.14, "end": 1460.56, "word": " enough", "probability": 0.8603515625}, {"start": 1460.56, "end": 1461.46, "word": " in", "probability": 0.8974609375}, {"start": 1461.46, "end": 1461.66, "word": " order", "probability": 0.92431640625}, {"start": 1461.66, "end": 1461.86, "word": " to", "probability": 0.9697265625}, {"start": 1461.86, "end": 1462.22, "word": " apply", "probability": 0.92529296875}, {"start": 1462.22, "end": 1462.46, "word": " the", "probability": 0.89892578125}, {"start": 1462.46, "end": 1462.74, "word": " central", "probability": 0.67333984375}, {"start": 1462.74, "end": 1462.96, "word": " limit", "probability": 0.9541015625}, {"start": 1462.96, "end": 1463.36, "word": " theorem.", "probability": 0.88818359375}], "temperature": 1.0}, {"id": 56, "seek": 149178, "start": 1464.38, "end": 1491.78, "text": " In addition to that, population balances are unknown, but we assume to be equal. These are the classical assumptions for performing a t-test, when sigma 1 and sigma 2 are unknown, but we assume they are equal. Any questions? Next, let's see how can we state the test statistic.", "tokens": [682, 4500, 281, 300, 11, 4415, 33993, 366, 9841, 11, 457, 321, 6552, 281, 312, 2681, 13, 1981, 366, 264, 13735, 17695, 337, 10205, 257, 256, 12, 31636, 11, 562, 12771, 502, 293, 12771, 568, 366, 9841, 11, 457, 321, 6552, 436, 366, 2681, 13, 2639, 1651, 30, 3087, 11, 718, 311, 536, 577, 393, 321, 1785, 264, 1500, 29588, 13], "avg_logprob": -0.17489919763418935, "compression_ratio": 1.5706214689265536, "no_speech_prob": 0.0, "words": [{"start": 1464.38, "end": 1464.7, "word": " In", "probability": 0.81640625}, {"start": 1464.7, "end": 1465.0, "word": " addition", "probability": 0.94921875}, {"start": 1465.0, "end": 1465.2, "word": " to", "probability": 0.958984375}, {"start": 1465.2, "end": 1465.4, "word": " that,", "probability": 0.7255859375}, {"start": 1465.62, "end": 1465.92, "word": " population", "probability": 0.68701171875}, {"start": 1465.92, "end": 1466.32, "word": " balances", "probability": 0.5810546875}, {"start": 1466.32, "end": 1466.62, "word": " are", "probability": 0.935546875}, {"start": 1466.62, "end": 1466.9, "word": " unknown,", "probability": 0.892578125}, {"start": 1467.12, "end": 1467.2, "word": " but", "probability": 0.919921875}, {"start": 1467.2, "end": 1467.36, "word": " we", "probability": 0.953125}, {"start": 1467.36, "end": 1467.76, "word": " assume", "probability": 0.86767578125}, {"start": 1467.76, "end": 1468.0, "word": " to", "probability": 0.9384765625}, {"start": 1468.0, "end": 1468.26, "word": " be", "probability": 0.95556640625}, {"start": 1468.26, "end": 1469.3, "word": " equal.", "probability": 0.8974609375}, {"start": 1469.84, "end": 1470.16, "word": " These", "probability": 0.919921875}, {"start": 1470.16, "end": 1470.54, "word": " are", "probability": 0.939453125}, {"start": 1470.54, "end": 1471.88, "word": " the", "probability": 0.90234375}, {"start": 1471.88, "end": 1473.08, "word": " classical", "probability": 0.9697265625}, {"start": 1473.08, "end": 1473.74, "word": " assumptions", "probability": 0.95654296875}, {"start": 1473.74, "end": 1474.34, "word": " for", "probability": 0.94873046875}, {"start": 1474.34, "end": 1475.1, "word": " performing", "probability": 0.79931640625}, {"start": 1475.1, "end": 1476.08, "word": " a", "probability": 0.56787109375}, {"start": 1476.08, "end": 1476.16, "word": " t", "probability": 0.6337890625}, {"start": 1476.16, "end": 1476.56, "word": "-test,", "probability": 0.870849609375}, {"start": 1477.12, "end": 1477.34, "word": " when", "probability": 0.7099609375}, {"start": 1477.34, "end": 1477.72, "word": " sigma", "probability": 0.7001953125}, {"start": 1477.72, "end": 1478.1, "word": " 1", "probability": 0.6181640625}, {"start": 1478.1, "end": 1478.28, "word": " and", "probability": 0.93603515625}, {"start": 1478.28, "end": 1478.46, "word": " sigma", "probability": 0.93408203125}, {"start": 1478.46, "end": 1478.62, "word": " 2", "probability": 0.98828125}, {"start": 1478.62, "end": 1478.78, "word": " are", "probability": 0.93798828125}, {"start": 1478.78, "end": 1479.02, "word": " unknown,", "probability": 0.9052734375}, {"start": 1479.2, "end": 1479.32, "word": " but", "probability": 0.92626953125}, {"start": 1479.32, "end": 1479.48, "word": " we", "probability": 0.9638671875}, {"start": 1479.48, "end": 1479.88, "word": " assume", "probability": 0.9091796875}, {"start": 1479.88, "end": 1480.16, "word": " they", "probability": 0.8828125}, {"start": 1480.16, "end": 1481.46, "word": " are", "probability": 0.9453125}, {"start": 1481.46, "end": 1481.78, "word": " equal.", "probability": 0.88818359375}, {"start": 1483.12, "end": 1483.56, "word": " Any", "probability": 0.7744140625}, {"start": 1483.56, "end": 1483.86, "word": " questions?", "probability": 0.60009765625}, {"start": 1486.82, "end": 1487.42, "word": " Next,", "probability": 0.9296875}, {"start": 1487.58, "end": 1487.88, "word": " let's", "probability": 0.958984375}, {"start": 1487.88, "end": 1488.02, "word": " see", "probability": 0.921875}, {"start": 1488.02, "end": 1488.14, "word": " how", "probability": 0.91796875}, {"start": 1488.14, "end": 1488.38, "word": " can", "probability": 0.76123046875}, {"start": 1488.38, "end": 1488.68, "word": " we", "probability": 0.9609375}, {"start": 1488.68, "end": 1489.62, "word": " state", "probability": 0.94189453125}, {"start": 1489.62, "end": 1490.44, "word": " the", "probability": 0.89111328125}, {"start": 1490.44, "end": 1491.06, "word": " test", "probability": 0.82275390625}, {"start": 1491.06, "end": 1491.78, "word": " statistic.", "probability": 0.83740234375}], "temperature": 1.0}, {"id": 57, "seek": 151705, "start": 1492.93, "end": 1517.05, "text": " Again, we are talking about testing for the difference between mu1 and mu2, so hypothesis for mu1 minus mu2 with both sigmas, sigma1 and sigma2 unknown and assumed equal. The test statistic in this case is similar to the one we discussed, but", "tokens": [3764, 11, 321, 366, 1417, 466, 4997, 337, 264, 2649, 1296, 2992, 16, 293, 2992, 17, 11, 370, 17291, 337, 2992, 16, 3175, 2992, 17, 365, 1293, 4556, 3799, 11, 12771, 16, 293, 12771, 17, 9841, 293, 15895, 2681, 13, 440, 1500, 29588, 294, 341, 1389, 307, 2531, 281, 264, 472, 321, 7152, 11, 457], "avg_logprob": -0.19977678544819355, "compression_ratio": 1.4727272727272727, "no_speech_prob": 0.0, "words": [{"start": 1492.93, "end": 1493.29, "word": " Again,", "probability": 0.8505859375}, {"start": 1493.35, "end": 1493.43, "word": " we", "probability": 0.96240234375}, {"start": 1493.43, "end": 1493.55, "word": " are", "probability": 0.91650390625}, {"start": 1493.55, "end": 1493.87, "word": " talking", "probability": 0.84716796875}, {"start": 1493.87, "end": 1494.27, "word": " about", "probability": 0.904296875}, {"start": 1494.27, "end": 1494.87, "word": " testing", "probability": 0.85107421875}, {"start": 1494.87, "end": 1495.69, "word": " for", "probability": 0.921875}, {"start": 1495.69, "end": 1495.85, "word": " the", "probability": 0.92431640625}, {"start": 1495.85, "end": 1496.27, "word": " difference", "probability": 0.8583984375}, {"start": 1496.27, "end": 1497.31, "word": " between", "probability": 0.89013671875}, {"start": 1497.31, "end": 1497.85, "word": " mu1", "probability": 0.512451171875}, {"start": 1497.85, "end": 1498.19, "word": " and", "probability": 0.94970703125}, {"start": 1498.19, "end": 1498.65, "word": " mu2,", "probability": 0.95556640625}, {"start": 1498.95, "end": 1499.15, "word": " so", "probability": 0.91455078125}, {"start": 1499.15, "end": 1500.01, "word": " hypothesis", "probability": 0.4482421875}, {"start": 1500.01, "end": 1500.43, "word": " for", "probability": 0.828125}, {"start": 1500.43, "end": 1500.85, "word": " mu1", "probability": 0.955078125}, {"start": 1500.85, "end": 1501.13, "word": " minus", "probability": 0.83544921875}, {"start": 1501.13, "end": 1501.63, "word": " mu2", "probability": 0.967041015625}, {"start": 1501.63, "end": 1502.35, "word": " with", "probability": 0.703125}, {"start": 1502.35, "end": 1502.83, "word": " both", "probability": 0.9130859375}, {"start": 1502.83, "end": 1503.45, "word": " sigmas,", "probability": 0.740966796875}, {"start": 1504.01, "end": 1504.47, "word": " sigma1", "probability": 0.804931640625}, {"start": 1504.47, "end": 1504.59, "word": " and", "probability": 0.93896484375}, {"start": 1504.59, "end": 1504.95, "word": " sigma2", "probability": 0.963134765625}, {"start": 1504.95, "end": 1505.43, "word": " unknown", "probability": 0.79052734375}, {"start": 1505.43, "end": 1505.91, "word": " and", "probability": 0.74462890625}, {"start": 1505.91, "end": 1506.43, "word": " assumed", "probability": 0.88916015625}, {"start": 1506.43, "end": 1506.83, "word": " equal.", "probability": 0.88232421875}, {"start": 1511.05, "end": 1511.87, "word": " The", "probability": 0.63232421875}, {"start": 1511.87, "end": 1512.51, "word": " test", "probability": 0.6689453125}, {"start": 1512.51, "end": 1512.97, "word": " statistic", "probability": 0.78662109375}, {"start": 1512.97, "end": 1513.15, "word": " in", "probability": 0.9013671875}, {"start": 1513.15, "end": 1513.35, "word": " this", "probability": 0.94384765625}, {"start": 1513.35, "end": 1513.73, "word": " case", "probability": 0.916015625}, {"start": 1513.73, "end": 1515.21, "word": " is", "probability": 0.92578125}, {"start": 1515.21, "end": 1515.59, "word": " similar", "probability": 0.9619140625}, {"start": 1515.59, "end": 1515.77, "word": " to", "probability": 0.96826171875}, {"start": 1515.77, "end": 1515.91, "word": " the", "probability": 0.90673828125}, {"start": 1515.91, "end": 1516.05, "word": " one", "probability": 0.931640625}, {"start": 1516.05, "end": 1516.21, "word": " we", "probability": 0.94482421875}, {"start": 1516.21, "end": 1516.65, "word": " discussed,", "probability": 0.85205078125}, {"start": 1516.85, "end": 1517.05, "word": " but", "probability": 0.931640625}], "temperature": 1.0}, {"id": 58, "seek": 154639, "start": 1517.65, "end": 1546.39, "text": " There is a little difference in these two. The first one was this statistic. It was x bar minus the mean divided by s over root n. That's okay if we are testing for if 0, mu equal, for example, any value. Three or four equivalents. Here we are talking about the difference confident, sorry, testing or test.", "tokens": [821, 307, 257, 707, 2649, 294, 613, 732, 13, 440, 700, 472, 390, 341, 29588, 13, 467, 390, 2031, 2159, 3175, 264, 914, 6666, 538, 262, 670, 5593, 297, 13, 663, 311, 1392, 498, 321, 366, 4997, 337, 498, 1958, 11, 2992, 2681, 11, 337, 1365, 11, 604, 2158, 13, 6244, 420, 1451, 9052, 791, 13, 1692, 321, 366, 1417, 466, 264, 2649, 6679, 11, 2597, 11, 4997, 420, 1500, 13], "avg_logprob": -0.3335503604676988, "compression_ratio": 1.5555555555555556, "no_speech_prob": 0.0, "words": [{"start": 1517.65, "end": 1517.91, "word": " There", "probability": 0.17529296875}, {"start": 1517.91, "end": 1518.11, "word": " is", "probability": 0.7431640625}, {"start": 1518.11, "end": 1518.29, "word": " a", "probability": 0.85888671875}, {"start": 1518.29, "end": 1518.51, "word": " little", "probability": 0.7197265625}, {"start": 1518.51, "end": 1519.15, "word": " difference", "probability": 0.841796875}, {"start": 1519.15, "end": 1519.93, "word": " in", "probability": 0.5546875}, {"start": 1519.93, "end": 1520.17, "word": " these", "probability": 0.84228515625}, {"start": 1520.17, "end": 1520.43, "word": " two.", "probability": 0.8974609375}, {"start": 1520.87, "end": 1521.11, "word": " The", "probability": 0.8564453125}, {"start": 1521.11, "end": 1521.37, "word": " first", "probability": 0.87451171875}, {"start": 1521.37, "end": 1521.61, "word": " one", "probability": 0.91064453125}, {"start": 1521.61, "end": 1522.01, "word": " was", "probability": 0.8916015625}, {"start": 1522.01, "end": 1522.51, "word": " this", "probability": 0.48779296875}, {"start": 1522.51, "end": 1523.13, "word": " statistic.", "probability": 0.71923828125}, {"start": 1523.91, "end": 1524.51, "word": " It", "probability": 0.65576171875}, {"start": 1524.51, "end": 1524.57, "word": " was", "probability": 0.91650390625}, {"start": 1524.57, "end": 1524.79, "word": " x", "probability": 0.55712890625}, {"start": 1524.79, "end": 1525.03, "word": " bar", "probability": 0.609375}, {"start": 1525.03, "end": 1525.37, "word": " minus", "probability": 0.96533203125}, {"start": 1525.37, "end": 1525.55, "word": " the", "probability": 0.59814453125}, {"start": 1525.55, "end": 1525.75, "word": " mean", "probability": 0.96826171875}, {"start": 1525.75, "end": 1526.67, "word": " divided", "probability": 0.53076171875}, {"start": 1526.67, "end": 1526.89, "word": " by", "probability": 0.9697265625}, {"start": 1526.89, "end": 1527.19, "word": " s", "probability": 0.412109375}, {"start": 1527.19, "end": 1527.37, "word": " over", "probability": 0.89404296875}, {"start": 1527.37, "end": 1527.63, "word": " root", "probability": 0.94970703125}, {"start": 1527.63, "end": 1527.87, "word": " n.", "probability": 0.83154296875}, {"start": 1530.33, "end": 1530.93, "word": " That's", "probability": 0.9248046875}, {"start": 1530.93, "end": 1531.27, "word": " okay", "probability": 0.744140625}, {"start": 1531.27, "end": 1531.83, "word": " if", "probability": 0.91943359375}, {"start": 1531.83, "end": 1532.15, "word": " we", "probability": 0.95166015625}, {"start": 1532.15, "end": 1532.33, "word": " are", "probability": 0.93359375}, {"start": 1532.33, "end": 1532.81, "word": " testing", "probability": 0.865234375}, {"start": 1532.81, "end": 1533.35, "word": " for", "probability": 0.92626953125}, {"start": 1533.35, "end": 1534.17, "word": " if", "probability": 0.50830078125}, {"start": 1534.17, "end": 1534.57, "word": " 0,", "probability": 0.568359375}, {"start": 1534.73, "end": 1534.97, "word": " mu", "probability": 0.6796875}, {"start": 1534.97, "end": 1535.39, "word": " equal,", "probability": 0.6708984375}, {"start": 1535.61, "end": 1535.75, "word": " for", "probability": 0.9541015625}, {"start": 1535.75, "end": 1536.17, "word": " example,", "probability": 0.970703125}, {"start": 1536.51, "end": 1536.81, "word": " any", "probability": 0.90673828125}, {"start": 1536.81, "end": 1537.15, "word": " value.", "probability": 0.951171875}, {"start": 1538.29, "end": 1538.57, "word": " Three", "probability": 0.68115234375}, {"start": 1538.57, "end": 1538.77, "word": " or", "probability": 0.92724609375}, {"start": 1538.77, "end": 1538.97, "word": " four", "probability": 0.94189453125}, {"start": 1538.97, "end": 1539.55, "word": " equivalents.", "probability": 0.470703125}, {"start": 1541.19, "end": 1541.79, "word": " Here", "probability": 0.83154296875}, {"start": 1541.79, "end": 1541.95, "word": " we", "probability": 0.69921875}, {"start": 1541.95, "end": 1542.13, "word": " are", "probability": 0.93896484375}, {"start": 1542.13, "end": 1542.53, "word": " talking", "probability": 0.86767578125}, {"start": 1542.53, "end": 1542.85, "word": " about", "probability": 0.90234375}, {"start": 1542.85, "end": 1543.05, "word": " the", "probability": 0.865234375}, {"start": 1543.05, "end": 1543.59, "word": " difference", "probability": 0.88671875}, {"start": 1543.59, "end": 1544.71, "word": " confident,", "probability": 0.2486572265625}, {"start": 1544.95, "end": 1545.17, "word": " sorry,", "probability": 0.64453125}, {"start": 1545.35, "end": 1545.77, "word": " testing", "probability": 0.8583984375}, {"start": 1545.77, "end": 1546.03, "word": " or", "probability": 0.54638671875}, {"start": 1546.03, "end": 1546.39, "word": " test.", "probability": 0.6259765625}], "temperature": 1.0}, {"id": 59, "seek": 157140, "start": 1547.56, "end": 1571.4, "text": " for Mu 1 minus Mu. So my D-set step equals. For one sample, we have only point is symmetric. X1 is a point is symmetric for Mu. But for when we are talking about", "tokens": [337, 15601, 502, 3175, 15601, 13, 407, 452, 413, 12, 3854, 1823, 6915, 13, 1171, 472, 6889, 11, 321, 362, 787, 935, 307, 32330, 13, 1783, 16, 307, 257, 935, 307, 32330, 337, 15601, 13, 583, 337, 562, 321, 366, 1417, 466], "avg_logprob": -0.4545785008474838, "compression_ratio": 1.3170731707317074, "no_speech_prob": 0.0, "words": [{"start": 1547.5600000000002, "end": 1548.18, "word": " for", "probability": 0.296630859375}, {"start": 1548.18, "end": 1548.8, "word": " Mu", "probability": 0.334228515625}, {"start": 1548.8, "end": 1549.0, "word": " 1", "probability": 0.443603515625}, {"start": 1549.0, "end": 1549.4, "word": " minus", "probability": 0.822265625}, {"start": 1549.4, "end": 1549.6, "word": " Mu.", "probability": 0.7822265625}, {"start": 1552.5, "end": 1553.12, "word": " So", "probability": 0.716796875}, {"start": 1553.12, "end": 1554.62, "word": " my", "probability": 0.5234375}, {"start": 1554.62, "end": 1555.2, "word": " D", "probability": 0.2427978515625}, {"start": 1555.2, "end": 1555.44, "word": "-set", "probability": 0.3883056640625}, {"start": 1555.44, "end": 1555.96, "word": " step", "probability": 0.85693359375}, {"start": 1555.96, "end": 1558.44, "word": " equals.", "probability": 0.87060546875}, {"start": 1562.86, "end": 1563.48, "word": " For", "probability": 0.94091796875}, {"start": 1563.48, "end": 1563.76, "word": " one", "probability": 0.93115234375}, {"start": 1563.76, "end": 1564.02, "word": " sample,", "probability": 0.8857421875}, {"start": 1564.16, "end": 1564.18, "word": " we", "probability": 0.87548828125}, {"start": 1564.18, "end": 1564.36, "word": " have", "probability": 0.90478515625}, {"start": 1564.36, "end": 1564.76, "word": " only", "probability": 0.84423828125}, {"start": 1564.76, "end": 1565.58, "word": " point", "probability": 0.83154296875}, {"start": 1565.58, "end": 1565.8, "word": " is", "probability": 0.248291015625}, {"start": 1565.8, "end": 1566.02, "word": " symmetric.", "probability": 0.492919921875}, {"start": 1566.12, "end": 1566.34, "word": " X1", "probability": 0.6357421875}, {"start": 1566.34, "end": 1566.5, "word": " is", "probability": 0.90478515625}, {"start": 1566.5, "end": 1566.6, "word": " a", "probability": 0.76806640625}, {"start": 1566.6, "end": 1566.8, "word": " point", "probability": 0.9765625}, {"start": 1566.8, "end": 1567.04, "word": " is", "probability": 0.326416015625}, {"start": 1567.04, "end": 1567.4, "word": " symmetric", "probability": 0.77783203125}, {"start": 1567.4, "end": 1568.0, "word": " for", "probability": 0.87841796875}, {"start": 1568.0, "end": 1568.26, "word": " Mu.", "probability": 0.96142578125}, {"start": 1569.2, "end": 1569.7, "word": " But", "probability": 0.9384765625}, {"start": 1569.7, "end": 1570.06, "word": " for", "probability": 0.57421875}, {"start": 1570.06, "end": 1570.32, "word": " when", "probability": 0.5732421875}, {"start": 1570.32, "end": 1570.42, "word": " we", "probability": 0.947265625}, {"start": 1570.42, "end": 1570.58, "word": " are", "probability": 0.93603515625}, {"start": 1570.58, "end": 1570.92, "word": " talking", "probability": 0.85693359375}, {"start": 1570.92, "end": 1571.4, "word": " about", "probability": 0.9091796875}], "temperature": 1.0}, {"id": 60, "seek": 159716, "start": 1572.68, "end": 1597.16, "text": " The difference between two populations means the point estimate is x1 bar minus x2 bar. So here I should have x1 bar minus x2 bar. So this is the first term in this formula, minus. Here we have minus mu. But for the new scenario, we have x0.", "tokens": [440, 2649, 1296, 732, 12822, 1355, 264, 935, 12539, 307, 2031, 16, 2159, 3175, 2031, 17, 2159, 13, 407, 510, 286, 820, 362, 2031, 16, 2159, 3175, 2031, 17, 2159, 13, 407, 341, 307, 264, 700, 1433, 294, 341, 8513, 11, 3175, 13, 1692, 321, 362, 3175, 2992, 13, 583, 337, 264, 777, 9005, 11, 321, 362, 2031, 15, 13], "avg_logprob": -0.17456455308882918, "compression_ratio": 1.5714285714285714, "no_speech_prob": 0.0, "words": [{"start": 1572.68, "end": 1572.94, "word": " The", "probability": 0.6806640625}, {"start": 1572.94, "end": 1573.34, "word": " difference", "probability": 0.8642578125}, {"start": 1573.34, "end": 1573.68, "word": " between", "probability": 0.87646484375}, {"start": 1573.68, "end": 1573.9, "word": " two", "probability": 0.86572265625}, {"start": 1573.9, "end": 1574.36, "word": " populations", "probability": 0.71142578125}, {"start": 1574.36, "end": 1574.82, "word": " means", "probability": 0.859375}, {"start": 1574.82, "end": 1575.36, "word": " the", "probability": 0.6630859375}, {"start": 1575.36, "end": 1575.64, "word": " point", "probability": 0.953125}, {"start": 1575.64, "end": 1576.18, "word": " estimate", "probability": 0.927734375}, {"start": 1576.18, "end": 1577.28, "word": " is", "probability": 0.9130859375}, {"start": 1577.28, "end": 1577.92, "word": " x1", "probability": 0.599365234375}, {"start": 1577.92, "end": 1578.16, "word": " bar", "probability": 0.8818359375}, {"start": 1578.16, "end": 1578.5, "word": " minus", "probability": 0.94921875}, {"start": 1578.5, "end": 1578.92, "word": " x2", "probability": 0.99169921875}, {"start": 1578.92, "end": 1579.14, "word": " bar.", "probability": 0.94970703125}, {"start": 1579.38, "end": 1579.56, "word": " So", "probability": 0.87646484375}, {"start": 1579.56, "end": 1579.76, "word": " here", "probability": 0.74951171875}, {"start": 1579.76, "end": 1580.22, "word": " I", "probability": 0.75244140625}, {"start": 1580.22, "end": 1580.54, "word": " should", "probability": 0.962890625}, {"start": 1580.54, "end": 1580.94, "word": " have", "probability": 0.94873046875}, {"start": 1580.94, "end": 1582.06, "word": " x1", "probability": 0.979736328125}, {"start": 1582.06, "end": 1582.3, "word": " bar", "probability": 0.94482421875}, {"start": 1582.3, "end": 1582.78, "word": " minus", "probability": 0.98388671875}, {"start": 1582.78, "end": 1584.36, "word": " x2", "probability": 0.9951171875}, {"start": 1584.36, "end": 1584.56, "word": " bar.", "probability": 0.939453125}, {"start": 1585.04, "end": 1585.24, "word": " So", "probability": 0.69384765625}, {"start": 1585.24, "end": 1585.4, "word": " this", "probability": 0.7998046875}, {"start": 1585.4, "end": 1585.82, "word": " is", "probability": 0.9296875}, {"start": 1585.82, "end": 1585.94, "word": " the", "probability": 0.87939453125}, {"start": 1585.94, "end": 1586.22, "word": " first", "probability": 0.8828125}, {"start": 1586.22, "end": 1586.6, "word": " term", "probability": 0.8857421875}, {"start": 1586.6, "end": 1587.4, "word": " in", "probability": 0.93212890625}, {"start": 1587.4, "end": 1587.74, "word": " this", "probability": 0.94140625}, {"start": 1587.74, "end": 1588.5, "word": " formula,", "probability": 0.8818359375}, {"start": 1588.68, "end": 1589.02, "word": " minus.", "probability": 0.9677734375}, {"start": 1590.1, "end": 1590.54, "word": " Here", "probability": 0.85009765625}, {"start": 1590.54, "end": 1590.68, "word": " we", "probability": 0.876953125}, {"start": 1590.68, "end": 1590.86, "word": " have", "probability": 0.9501953125}, {"start": 1590.86, "end": 1591.22, "word": " minus", "probability": 0.9814453125}, {"start": 1591.22, "end": 1591.52, "word": " mu.", "probability": 0.77392578125}, {"start": 1593.4, "end": 1594.08, "word": " But", "probability": 0.94287109375}, {"start": 1594.08, "end": 1594.42, "word": " for", "probability": 0.91259765625}, {"start": 1594.42, "end": 1594.64, "word": " the", "probability": 0.9111328125}, {"start": 1594.64, "end": 1594.84, "word": " new", "probability": 0.91552734375}, {"start": 1594.84, "end": 1595.3, "word": " scenario,", "probability": 0.90283203125}, {"start": 1596.0, "end": 1596.26, "word": " we", "probability": 0.9521484375}, {"start": 1596.26, "end": 1596.6, "word": " have", "probability": 0.94189453125}, {"start": 1596.6, "end": 1597.16, "word": " x0.", "probability": 0.541259765625}], "temperature": 1.0}, {"id": 61, "seek": 162672, "start": 1599.56, "end": 1626.72, "text": " Mu1 minus Mu2 equals zero. So here, Mu1 minus Mu2. In most cases, we assume the population means are under zero. There is no difference between these two population means. So we are assuming Mu1 minus Mu2 equals zero. So it means this term cancels. If we assume there is no difference between these two population means,", "tokens": [15601, 16, 3175, 15601, 17, 6915, 4018, 13, 407, 510, 11, 15601, 16, 3175, 15601, 17, 13, 682, 881, 3331, 11, 321, 6552, 264, 4415, 1355, 366, 833, 4018, 13, 821, 307, 572, 2649, 1296, 613, 732, 4415, 1355, 13, 407, 321, 366, 11926, 15601, 16, 3175, 15601, 17, 6915, 4018, 13, 407, 309, 1355, 341, 1433, 393, 66, 1625, 13, 759, 321, 6552, 456, 307, 572, 2649, 1296, 613, 732, 4415, 1355, 11], "avg_logprob": -0.1812500003973643, "compression_ratio": 2.0709677419354837, "no_speech_prob": 0.0, "words": [{"start": 1599.56, "end": 1600.14, "word": " Mu1", "probability": 0.38580322265625}, {"start": 1600.14, "end": 1600.66, "word": " minus", "probability": 0.87451171875}, {"start": 1600.66, "end": 1601.1, "word": " Mu2", "probability": 0.901123046875}, {"start": 1601.1, "end": 1601.46, "word": " equals", "probability": 0.79736328125}, {"start": 1601.46, "end": 1601.78, "word": " zero.", "probability": 0.5634765625}, {"start": 1602.28, "end": 1602.42, "word": " So", "probability": 0.64453125}, {"start": 1602.42, "end": 1602.6, "word": " here,", "probability": 0.654296875}, {"start": 1602.72, "end": 1603.26, "word": " Mu1", "probability": 0.954345703125}, {"start": 1603.26, "end": 1603.96, "word": " minus", "probability": 0.98583984375}, {"start": 1603.96, "end": 1604.36, "word": " Mu2.", "probability": 0.8994140625}, {"start": 1605.52, "end": 1605.98, "word": " In", "probability": 0.92041015625}, {"start": 1605.98, "end": 1606.22, "word": " most", "probability": 0.8896484375}, {"start": 1606.22, "end": 1606.7, "word": " cases,", "probability": 0.92822265625}, {"start": 1607.32, "end": 1607.48, "word": " we", "probability": 0.95751953125}, {"start": 1607.48, "end": 1608.04, "word": " assume", "probability": 0.89990234375}, {"start": 1608.04, "end": 1609.48, "word": " the", "probability": 0.84228515625}, {"start": 1609.48, "end": 1609.96, "word": " population", "probability": 0.9609375}, {"start": 1609.96, "end": 1610.34, "word": " means", "probability": 0.8935546875}, {"start": 1610.34, "end": 1610.72, "word": " are", "probability": 0.93896484375}, {"start": 1610.72, "end": 1612.04, "word": " under", "probability": 0.88330078125}, {"start": 1612.04, "end": 1612.48, "word": " zero.", "probability": 0.8818359375}, {"start": 1612.72, "end": 1612.96, "word": " There", "probability": 0.77099609375}, {"start": 1612.96, "end": 1613.1, "word": " is", "probability": 0.92138671875}, {"start": 1613.1, "end": 1613.26, "word": " no", "probability": 0.95068359375}, {"start": 1613.26, "end": 1613.62, "word": " difference", "probability": 0.8662109375}, {"start": 1613.62, "end": 1613.92, "word": " between", "probability": 0.87255859375}, {"start": 1613.92, "end": 1614.18, "word": " these", "probability": 0.85400390625}, {"start": 1614.18, "end": 1614.34, "word": " two", "probability": 0.92724609375}, {"start": 1614.34, "end": 1614.76, "word": " population", "probability": 0.93505859375}, {"start": 1614.76, "end": 1615.06, "word": " means.", "probability": 0.90673828125}, {"start": 1615.48, "end": 1615.68, "word": " So", "probability": 0.95849609375}, {"start": 1615.68, "end": 1615.84, "word": " we", "probability": 0.767578125}, {"start": 1615.84, "end": 1615.96, "word": " are", "probability": 0.93408203125}, {"start": 1615.96, "end": 1616.54, "word": " assuming", "probability": 0.88916015625}, {"start": 1616.54, "end": 1617.36, "word": " Mu1", "probability": 0.85400390625}, {"start": 1617.36, "end": 1617.64, "word": " minus", "probability": 0.986328125}, {"start": 1617.64, "end": 1618.0, "word": " Mu2", "probability": 0.9833984375}, {"start": 1618.0, "end": 1618.32, "word": " equals", "probability": 0.93017578125}, {"start": 1618.32, "end": 1618.62, "word": " zero.", "probability": 0.89794921875}, {"start": 1619.0, "end": 1619.12, "word": " So", "probability": 0.94970703125}, {"start": 1619.12, "end": 1619.3, "word": " it", "probability": 0.87841796875}, {"start": 1619.3, "end": 1619.5, "word": " means", "probability": 0.92822265625}, {"start": 1619.5, "end": 1619.76, "word": " this", "probability": 0.890625}, {"start": 1619.76, "end": 1620.02, "word": " term", "probability": 0.89794921875}, {"start": 1620.02, "end": 1620.46, "word": " cancels.", "probability": 0.6600748697916666}, {"start": 1622.3, "end": 1622.88, "word": " If", "probability": 0.9697265625}, {"start": 1622.88, "end": 1623.1, "word": " we", "probability": 0.96142578125}, {"start": 1623.1, "end": 1623.62, "word": " assume", "probability": 0.90625}, {"start": 1623.62, "end": 1624.18, "word": " there", "probability": 0.85595703125}, {"start": 1624.18, "end": 1624.34, "word": " is", "probability": 0.90185546875}, {"start": 1624.34, "end": 1624.54, "word": " no", "probability": 0.95263671875}, {"start": 1624.54, "end": 1624.98, "word": " difference", "probability": 0.865234375}, {"start": 1624.98, "end": 1625.4, "word": " between", "probability": 0.87255859375}, {"start": 1625.4, "end": 1625.74, "word": " these", "probability": 0.85400390625}, {"start": 1625.74, "end": 1625.92, "word": " two", "probability": 0.92529296875}, {"start": 1625.92, "end": 1626.34, "word": " population", "probability": 0.9453125}, {"start": 1626.34, "end": 1626.72, "word": " means,", "probability": 0.927734375}], "temperature": 1.0}, {"id": 62, "seek": 165322, "start": 1627.88, "end": 1653.22, "text": " In some cases, might be the difference between these two equal, for example, A, and A is just a constant. In this case, you have to plug A instead of mu1 minus mu2. But most of the cases will have this classical one, the difference is zero.", "tokens": [682, 512, 3331, 11, 1062, 312, 264, 2649, 1296, 613, 732, 2681, 11, 337, 1365, 11, 316, 11, 293, 316, 307, 445, 257, 5754, 13, 682, 341, 1389, 11, 291, 362, 281, 5452, 316, 2602, 295, 2992, 16, 3175, 2992, 17, 13, 583, 881, 295, 264, 3331, 486, 362, 341, 13735, 472, 11, 264, 2649, 307, 4018, 13], "avg_logprob": -0.19928496267835974, "compression_ratio": 1.478527607361963, "no_speech_prob": 0.0, "words": [{"start": 1627.88, "end": 1628.3, "word": " In", "probability": 0.76025390625}, {"start": 1628.3, "end": 1628.84, "word": " some", "probability": 0.90283203125}, {"start": 1628.84, "end": 1629.42, "word": " cases,", "probability": 0.9208984375}, {"start": 1630.14, "end": 1631.44, "word": " might", "probability": 0.6318359375}, {"start": 1631.44, "end": 1631.72, "word": " be", "probability": 0.95458984375}, {"start": 1631.72, "end": 1632.9, "word": " the", "probability": 0.56787109375}, {"start": 1632.9, "end": 1633.44, "word": " difference", "probability": 0.8486328125}, {"start": 1633.44, "end": 1633.88, "word": " between", "probability": 0.8837890625}, {"start": 1633.88, "end": 1634.2, "word": " these", "probability": 0.8662109375}, {"start": 1634.2, "end": 1634.4, "word": " two", "probability": 0.8779296875}, {"start": 1634.4, "end": 1634.8, "word": " equal,", "probability": 0.57470703125}, {"start": 1635.3, "end": 1635.74, "word": " for", "probability": 0.9443359375}, {"start": 1635.74, "end": 1636.14, "word": " example,", "probability": 0.9677734375}, {"start": 1636.26, "end": 1636.48, "word": " A,", "probability": 0.5791015625}, {"start": 1636.66, "end": 1636.9, "word": " and", "probability": 0.92529296875}, {"start": 1636.9, "end": 1637.06, "word": " A", "probability": 0.94580078125}, {"start": 1637.06, "end": 1637.24, "word": " is", "probability": 0.951171875}, {"start": 1637.24, "end": 1637.56, "word": " just", "probability": 0.9228515625}, {"start": 1637.56, "end": 1637.76, "word": " a", "probability": 0.97265625}, {"start": 1637.76, "end": 1638.16, "word": " constant.", "probability": 0.939453125}, {"start": 1639.96, "end": 1640.18, "word": " In", "probability": 0.94384765625}, {"start": 1640.18, "end": 1640.44, "word": " this", "probability": 0.9482421875}, {"start": 1640.44, "end": 1640.86, "word": " case,", "probability": 0.9130859375}, {"start": 1641.72, "end": 1641.94, "word": " you", "probability": 0.96240234375}, {"start": 1641.94, "end": 1642.2, "word": " have", "probability": 0.943359375}, {"start": 1642.2, "end": 1642.62, "word": " to", "probability": 0.97265625}, {"start": 1642.62, "end": 1643.22, "word": " plug", "probability": 0.8505859375}, {"start": 1643.22, "end": 1643.58, "word": " A", "probability": 0.90673828125}, {"start": 1643.58, "end": 1644.1, "word": " instead", "probability": 0.841796875}, {"start": 1644.1, "end": 1644.48, "word": " of", "probability": 0.9716796875}, {"start": 1644.48, "end": 1644.86, "word": " mu1", "probability": 0.42626953125}, {"start": 1644.86, "end": 1645.12, "word": " minus", "probability": 0.943359375}, {"start": 1645.12, "end": 1645.54, "word": " mu2.", "probability": 0.89892578125}, {"start": 1647.22, "end": 1648.02, "word": " But", "probability": 0.9326171875}, {"start": 1648.02, "end": 1648.58, "word": " most", "probability": 0.78125}, {"start": 1648.58, "end": 1648.76, "word": " of", "probability": 0.96728515625}, {"start": 1648.76, "end": 1648.92, "word": " the", "probability": 0.91650390625}, {"start": 1648.92, "end": 1649.44, "word": " cases", "probability": 0.92431640625}, {"start": 1649.44, "end": 1650.26, "word": " will", "probability": 0.7216796875}, {"start": 1650.26, "end": 1650.48, "word": " have", "probability": 0.9423828125}, {"start": 1650.48, "end": 1650.7, "word": " this", "probability": 0.94287109375}, {"start": 1650.7, "end": 1651.14, "word": " classical", "probability": 0.89501953125}, {"start": 1651.14, "end": 1651.46, "word": " one,", "probability": 0.9248046875}, {"start": 1651.86, "end": 1652.04, "word": " the", "probability": 0.89453125}, {"start": 1652.04, "end": 1652.46, "word": " difference", "probability": 0.8681640625}, {"start": 1652.46, "end": 1652.9, "word": " is", "probability": 0.92138671875}, {"start": 1652.9, "end": 1653.22, "word": " zero.", "probability": 0.6787109375}], "temperature": 1.0}, {"id": 63, "seek": 167898, "start": 1655.08, "end": 1678.98, "text": " Divide by, this is the new term in this chapter, divide by the standard error of the estimate. Because here, if we go back a little bit to the T statistic, it's X bar minus mu divided by S over square root of N is the standard error of X bar.", "tokens": [9886, 482, 538, 11, 341, 307, 264, 777, 1433, 294, 341, 7187, 11, 9845, 538, 264, 3832, 6713, 295, 264, 12539, 13, 1436, 510, 11, 498, 321, 352, 646, 257, 707, 857, 281, 264, 314, 29588, 11, 309, 311, 1783, 2159, 3175, 2992, 6666, 538, 318, 670, 3732, 5593, 295, 426, 307, 264, 3832, 6713, 295, 1783, 2159, 13], "avg_logprob": -0.20611979713042577, "compression_ratio": 1.5379746835443038, "no_speech_prob": 0.0, "words": [{"start": 1655.08, "end": 1655.52, "word": " Divide", "probability": 0.6910400390625}, {"start": 1655.52, "end": 1655.86, "word": " by,", "probability": 0.96923828125}, {"start": 1656.08, "end": 1656.72, "word": " this", "probability": 0.92041015625}, {"start": 1656.72, "end": 1656.86, "word": " is", "probability": 0.94384765625}, {"start": 1656.86, "end": 1657.18, "word": " the", "probability": 0.7451171875}, {"start": 1657.18, "end": 1657.66, "word": " new", "probability": 0.92626953125}, {"start": 1657.66, "end": 1658.04, "word": " term", "probability": 0.90771484375}, {"start": 1658.04, "end": 1658.26, "word": " in", "probability": 0.90576171875}, {"start": 1658.26, "end": 1658.44, "word": " this", "probability": 0.931640625}, {"start": 1658.44, "end": 1658.8, "word": " chapter,", "probability": 0.8642578125}, {"start": 1660.44, "end": 1660.74, "word": " divide", "probability": 0.736328125}, {"start": 1660.74, "end": 1660.94, "word": " by", "probability": 0.9658203125}, {"start": 1660.94, "end": 1661.16, "word": " the", "probability": 0.904296875}, {"start": 1661.16, "end": 1661.62, "word": " standard", "probability": 0.92138671875}, {"start": 1661.62, "end": 1662.08, "word": " error", "probability": 0.88818359375}, {"start": 1662.08, "end": 1663.2, "word": " of", "probability": 0.9619140625}, {"start": 1663.2, "end": 1663.36, "word": " the", "probability": 0.912109375}, {"start": 1663.36, "end": 1663.84, "word": " estimate.", "probability": 0.8994140625}, {"start": 1668.0, "end": 1668.52, "word": " Because", "probability": 0.89892578125}, {"start": 1668.52, "end": 1668.74, "word": " here,", "probability": 0.81494140625}, {"start": 1668.88, "end": 1668.94, "word": " if", "probability": 0.91259765625}, {"start": 1668.94, "end": 1669.28, "word": " we", "probability": 0.9384765625}, {"start": 1669.28, "end": 1669.42, "word": " go", "probability": 0.95556640625}, {"start": 1669.42, "end": 1669.64, "word": " back", "probability": 0.8740234375}, {"start": 1669.64, "end": 1669.76, "word": " a", "probability": 0.85498046875}, {"start": 1669.76, "end": 1669.9, "word": " little", "probability": 0.85498046875}, {"start": 1669.9, "end": 1670.12, "word": " bit", "probability": 0.9375}, {"start": 1670.12, "end": 1670.28, "word": " to", "probability": 0.92822265625}, {"start": 1670.28, "end": 1670.44, "word": " the", "probability": 0.8564453125}, {"start": 1670.44, "end": 1670.54, "word": " T", "probability": 0.47314453125}, {"start": 1670.54, "end": 1670.98, "word": " statistic,", "probability": 0.84033203125}, {"start": 1671.52, "end": 1671.74, "word": " it's", "probability": 0.908935546875}, {"start": 1671.74, "end": 1672.0, "word": " X", "probability": 0.591796875}, {"start": 1672.0, "end": 1672.18, "word": " bar", "probability": 0.76953125}, {"start": 1672.18, "end": 1672.5, "word": " minus", "probability": 0.97509765625}, {"start": 1672.5, "end": 1672.92, "word": " mu", "probability": 0.472900390625}, {"start": 1672.92, "end": 1675.28, "word": " divided", "probability": 0.373291015625}, {"start": 1675.28, "end": 1675.58, "word": " by", "probability": 0.96826171875}, {"start": 1675.58, "end": 1675.88, "word": " S", "probability": 0.88037109375}, {"start": 1675.88, "end": 1676.08, "word": " over", "probability": 0.89453125}, {"start": 1676.08, "end": 1676.44, "word": " square", "probability": 0.732421875}, {"start": 1676.44, "end": 1676.62, "word": " root", "probability": 0.94580078125}, {"start": 1676.62, "end": 1676.78, "word": " of", "probability": 0.9599609375}, {"start": 1676.78, "end": 1677.0, "word": " N", "probability": 0.87060546875}, {"start": 1677.0, "end": 1677.56, "word": " is", "probability": 0.52490234375}, {"start": 1677.56, "end": 1677.76, "word": " the", "probability": 0.9169921875}, {"start": 1677.76, "end": 1678.12, "word": " standard", "probability": 0.9443359375}, {"start": 1678.12, "end": 1678.34, "word": " error", "probability": 0.8828125}, {"start": 1678.34, "end": 1678.5, "word": " of", "probability": 0.95166015625}, {"start": 1678.5, "end": 1678.66, "word": " X", "probability": 0.96875}, {"start": 1678.66, "end": 1678.98, "word": " bar.", "probability": 0.92626953125}], "temperature": 1.0}, {"id": 64, "seek": 170333, "start": 1682.99, "end": 1703.33, "text": " The same here, we have, sorry, standard error of this estimate. So the new term is how can we find the standard error X1 bar minus X2 bar. This one is given by square root of S square B multiplied by 1 over N1 plus 1 over N2.", "tokens": [440, 912, 510, 11, 321, 362, 11, 2597, 11, 3832, 6713, 295, 341, 12539, 13, 407, 264, 777, 1433, 307, 577, 393, 321, 915, 264, 3832, 6713, 1783, 16, 2159, 3175, 1783, 17, 2159, 13, 639, 472, 307, 2212, 538, 3732, 5593, 295, 318, 3732, 363, 17207, 538, 502, 670, 426, 16, 1804, 502, 670, 426, 17, 13], "avg_logprob": -0.19902013065451282, "compression_ratio": 1.4580645161290322, "no_speech_prob": 0.0, "words": [{"start": 1682.99, "end": 1683.25, "word": " The", "probability": 0.6865234375}, {"start": 1683.25, "end": 1683.53, "word": " same", "probability": 0.87158203125}, {"start": 1683.53, "end": 1683.79, "word": " here,", "probability": 0.837890625}, {"start": 1683.89, "end": 1683.99, "word": " we", "probability": 0.9501953125}, {"start": 1683.99, "end": 1684.21, "word": " have,", "probability": 0.94140625}, {"start": 1684.33, "end": 1684.85, "word": " sorry,", "probability": 0.75634765625}, {"start": 1685.51, "end": 1685.95, "word": " standard", "probability": 0.671875}, {"start": 1685.95, "end": 1686.19, "word": " error", "probability": 0.841796875}, {"start": 1686.19, "end": 1686.55, "word": " of", "probability": 0.96240234375}, {"start": 1686.55, "end": 1687.31, "word": " this", "probability": 0.9482421875}, {"start": 1687.31, "end": 1687.83, "word": " estimate.", "probability": 0.9228515625}, {"start": 1688.49, "end": 1688.73, "word": " So", "probability": 0.9560546875}, {"start": 1688.73, "end": 1688.91, "word": " the", "probability": 0.67431640625}, {"start": 1688.91, "end": 1689.17, "word": " new", "probability": 0.931640625}, {"start": 1689.17, "end": 1689.43, "word": " term", "probability": 0.91015625}, {"start": 1689.43, "end": 1689.71, "word": " is", "probability": 0.94873046875}, {"start": 1689.71, "end": 1689.91, "word": " how", "probability": 0.5498046875}, {"start": 1689.91, "end": 1690.17, "word": " can", "probability": 0.93798828125}, {"start": 1690.17, "end": 1690.33, "word": " we", "probability": 0.95849609375}, {"start": 1690.33, "end": 1690.75, "word": " find", "probability": 0.90185546875}, {"start": 1690.75, "end": 1691.41, "word": " the", "probability": 0.91259765625}, {"start": 1691.41, "end": 1691.73, "word": " standard", "probability": 0.93798828125}, {"start": 1691.73, "end": 1692.01, "word": " error", "probability": 0.833984375}, {"start": 1692.01, "end": 1692.53, "word": " X1", "probability": 0.5521240234375}, {"start": 1692.53, "end": 1692.71, "word": " bar", "probability": 0.93115234375}, {"start": 1692.71, "end": 1692.99, "word": " minus", "probability": 0.9619140625}, {"start": 1692.99, "end": 1693.37, "word": " X2", "probability": 0.992919921875}, {"start": 1693.37, "end": 1693.59, "word": " bar.", "probability": 0.95068359375}, {"start": 1694.69, "end": 1695.25, "word": " This", "probability": 0.90283203125}, {"start": 1695.25, "end": 1695.43, "word": " one", "probability": 0.92724609375}, {"start": 1695.43, "end": 1695.61, "word": " is", "probability": 0.95068359375}, {"start": 1695.61, "end": 1695.87, "word": " given", "probability": 0.89697265625}, {"start": 1695.87, "end": 1696.41, "word": " by", "probability": 0.97021484375}, {"start": 1696.41, "end": 1697.73, "word": " square", "probability": 0.76953125}, {"start": 1697.73, "end": 1698.17, "word": " root", "probability": 0.92822265625}, {"start": 1698.17, "end": 1699.47, "word": " of", "probability": 0.96337890625}, {"start": 1699.47, "end": 1700.13, "word": " S", "probability": 0.7861328125}, {"start": 1700.13, "end": 1700.37, "word": " square", "probability": 0.379638671875}, {"start": 1700.37, "end": 1700.67, "word": " B", "probability": 0.80224609375}, {"start": 1700.67, "end": 1701.35, "word": " multiplied", "probability": 0.6806640625}, {"start": 1701.35, "end": 1701.61, "word": " by", "probability": 0.97314453125}, {"start": 1701.61, "end": 1701.83, "word": " 1", "probability": 0.84033203125}, {"start": 1701.83, "end": 1701.99, "word": " over", "probability": 0.900390625}, {"start": 1701.99, "end": 1702.31, "word": " N1", "probability": 0.934326171875}, {"start": 1702.31, "end": 1702.55, "word": " plus", "probability": 0.94140625}, {"start": 1702.55, "end": 1702.75, "word": " 1", "probability": 0.9775390625}, {"start": 1702.75, "end": 1702.91, "word": " over", "probability": 0.90380859375}, {"start": 1702.91, "end": 1703.33, "word": " N2.", "probability": 0.996826171875}], "temperature": 1.0}, {"id": 65, "seek": 173296, "start": 1704.0, "end": 1732.96, "text": " S squared B is called the Bolt variance. And the Bolt variance is given by this four equation. So first of all, we have to compute the Bolt variance by using this equation, S squared B equals N1 minus 1 S1 squared N2 minus 1 S2 squared divided by", "tokens": [318, 8889, 363, 307, 1219, 264, 37884, 21977, 13, 400, 264, 37884, 21977, 307, 2212, 538, 341, 1451, 5367, 13, 407, 700, 295, 439, 11, 321, 362, 281, 14722, 264, 37884, 21977, 538, 1228, 341, 5367, 11, 318, 8889, 363, 6915, 426, 16, 3175, 502, 318, 16, 8889, 426, 17, 3175, 502, 318, 17, 8889, 6666, 538], "avg_logprob": -0.2825969884107853, "compression_ratio": 1.6917808219178083, "no_speech_prob": 0.0, "words": [{"start": 1704.0, "end": 1704.36, "word": " S", "probability": 0.37451171875}, {"start": 1704.36, "end": 1704.6, "word": " squared", "probability": 0.232177734375}, {"start": 1704.6, "end": 1704.78, "word": " B", "probability": 0.49169921875}, {"start": 1704.78, "end": 1704.98, "word": " is", "probability": 0.923828125}, {"start": 1704.98, "end": 1705.48, "word": " called", "probability": 0.88525390625}, {"start": 1705.48, "end": 1705.94, "word": " the", "probability": 0.87841796875}, {"start": 1705.94, "end": 1706.18, "word": " Bolt", "probability": 0.52490234375}, {"start": 1706.18, "end": 1706.7, "word": " variance.", "probability": 0.345947265625}, {"start": 1707.52, "end": 1707.76, "word": " And", "probability": 0.80517578125}, {"start": 1707.76, "end": 1707.88, "word": " the", "probability": 0.826171875}, {"start": 1707.88, "end": 1708.08, "word": " Bolt", "probability": 0.8408203125}, {"start": 1708.08, "end": 1708.36, "word": " variance", "probability": 0.841796875}, {"start": 1708.36, "end": 1708.56, "word": " is", "probability": 0.94482421875}, {"start": 1708.56, "end": 1708.74, "word": " given", "probability": 0.888671875}, {"start": 1708.74, "end": 1708.98, "word": " by", "probability": 0.96630859375}, {"start": 1708.98, "end": 1709.38, "word": " this", "probability": 0.94482421875}, {"start": 1709.38, "end": 1710.8, "word": " four", "probability": 0.309326171875}, {"start": 1710.8, "end": 1711.3, "word": " equation.", "probability": 0.76806640625}, {"start": 1712.12, "end": 1712.56, "word": " So", "probability": 0.91650390625}, {"start": 1712.56, "end": 1713.0, "word": " first", "probability": 0.65576171875}, {"start": 1713.0, "end": 1713.18, "word": " of", "probability": 0.96240234375}, {"start": 1713.18, "end": 1713.46, "word": " all,", "probability": 0.94921875}, {"start": 1713.56, "end": 1713.78, "word": " we", "probability": 0.9541015625}, {"start": 1713.78, "end": 1714.06, "word": " have", "probability": 0.9462890625}, {"start": 1714.06, "end": 1714.34, "word": " to", "probability": 0.96630859375}, {"start": 1714.34, "end": 1714.96, "word": " compute", "probability": 0.88330078125}, {"start": 1714.96, "end": 1716.58, "word": " the", "probability": 0.8642578125}, {"start": 1716.58, "end": 1716.82, "word": " Bolt", "probability": 0.84521484375}, {"start": 1716.82, "end": 1717.32, "word": " variance", "probability": 0.8818359375}, {"start": 1717.32, "end": 1719.14, "word": " by", "probability": 0.88134765625}, {"start": 1719.14, "end": 1719.66, "word": " using", "probability": 0.927734375}, {"start": 1719.66, "end": 1721.44, "word": " this", "probability": 0.93408203125}, {"start": 1721.44, "end": 1722.06, "word": " equation,", "probability": 0.97705078125}, {"start": 1722.24, "end": 1722.42, "word": " S", "probability": 0.880859375}, {"start": 1722.42, "end": 1722.74, "word": " squared", "probability": 0.771484375}, {"start": 1722.74, "end": 1723.1, "word": " B", "probability": 0.978515625}, {"start": 1723.1, "end": 1725.32, "word": " equals", "probability": 0.58447265625}, {"start": 1725.32, "end": 1725.76, "word": " N1", "probability": 0.767822265625}, {"start": 1725.76, "end": 1726.3, "word": " minus", "probability": 0.95361328125}, {"start": 1726.3, "end": 1726.7, "word": " 1", "probability": 0.775390625}, {"start": 1726.7, "end": 1728.36, "word": " S1", "probability": 0.73583984375}, {"start": 1728.36, "end": 1728.82, "word": " squared", "probability": 0.84326171875}, {"start": 1728.82, "end": 1730.0, "word": " N2", "probability": 0.815673828125}, {"start": 1730.0, "end": 1730.3, "word": " minus", "probability": 0.98193359375}, {"start": 1730.3, "end": 1730.64, "word": " 1", "probability": 0.9404296875}, {"start": 1730.64, "end": 1731.2, "word": " S2", "probability": 0.971923828125}, {"start": 1731.2, "end": 1731.64, "word": " squared", "probability": 0.85107421875}, {"start": 1731.64, "end": 1732.4, "word": " divided", "probability": 0.48681640625}, {"start": 1732.4, "end": 1732.96, "word": " by", "probability": 0.97021484375}], "temperature": 1.0}, {"id": 66, "seek": 176265, "start": 1734.17, "end": 1762.65, "text": " N1 minus 1 plus N2 minus 1. Now let's see if this makes sense or not, the bold variance. Now, as we mentioned, there are two samples. The first one has sample size of N1. The other one has sample size of N2 with variances of S1 squared and S2 squared respectively. So we have two samples with sizes N1 and N2.", "tokens": [426, 16, 3175, 502, 1804, 426, 17, 3175, 502, 13, 823, 718, 311, 536, 498, 341, 1669, 2020, 420, 406, 11, 264, 11928, 21977, 13, 823, 11, 382, 321, 2835, 11, 456, 366, 732, 10938, 13, 440, 700, 472, 575, 6889, 2744, 295, 426, 16, 13, 440, 661, 472, 575, 6889, 2744, 295, 426, 17, 365, 1374, 21518, 295, 318, 16, 8889, 293, 318, 17, 8889, 25009, 13, 407, 321, 362, 732, 10938, 365, 11602, 426, 16, 293, 426, 17, 13], "avg_logprob": -0.15034298835004248, "compression_ratio": 1.684782608695652, "no_speech_prob": 0.0, "words": [{"start": 1734.17, "end": 1734.61, "word": " N1", "probability": 0.5654296875}, {"start": 1734.61, "end": 1735.27, "word": " minus", "probability": 0.6708984375}, {"start": 1735.27, "end": 1735.63, "word": " 1", "probability": 0.69140625}, {"start": 1735.63, "end": 1735.91, "word": " plus", "probability": 0.87890625}, {"start": 1735.91, "end": 1736.19, "word": " N2", "probability": 0.96826171875}, {"start": 1736.19, "end": 1736.47, "word": " minus", "probability": 0.97607421875}, {"start": 1736.47, "end": 1736.63, "word": " 1.", "probability": 0.81640625}, {"start": 1737.65, "end": 1737.93, "word": " Now", "probability": 0.93310546875}, {"start": 1737.93, "end": 1738.21, "word": " let's", "probability": 0.800048828125}, {"start": 1738.21, "end": 1738.43, "word": " see", "probability": 0.8984375}, {"start": 1738.43, "end": 1738.69, "word": " if", "probability": 0.9501953125}, {"start": 1738.69, "end": 1739.01, "word": " this", "probability": 0.94287109375}, {"start": 1739.01, "end": 1739.35, "word": " makes", "probability": 0.818359375}, {"start": 1739.35, "end": 1739.91, "word": " sense", "probability": 0.830078125}, {"start": 1739.91, "end": 1740.95, "word": " or", "probability": 0.8896484375}, {"start": 1740.95, "end": 1741.27, "word": " not,", "probability": 0.94873046875}, {"start": 1741.67, "end": 1742.05, "word": " the", "probability": 0.90380859375}, {"start": 1742.05, "end": 1742.35, "word": " bold", "probability": 0.744140625}, {"start": 1742.35, "end": 1742.91, "word": " variance.", "probability": 0.88037109375}, {"start": 1743.31, "end": 1743.63, "word": " Now,", "probability": 0.94970703125}, {"start": 1743.79, "end": 1743.97, "word": " as", "probability": 0.9619140625}, {"start": 1743.97, "end": 1744.07, "word": " we", "probability": 0.9345703125}, {"start": 1744.07, "end": 1744.33, "word": " mentioned,", "probability": 0.83056640625}, {"start": 1744.41, "end": 1744.51, "word": " there", "probability": 0.892578125}, {"start": 1744.51, "end": 1744.67, "word": " are", "probability": 0.9384765625}, {"start": 1744.67, "end": 1744.91, "word": " two", "probability": 0.888671875}, {"start": 1744.91, "end": 1745.31, "word": " samples.", "probability": 0.8974609375}, {"start": 1745.95, "end": 1746.19, "word": " The", "probability": 0.88671875}, {"start": 1746.19, "end": 1746.45, "word": " first", "probability": 0.884765625}, {"start": 1746.45, "end": 1746.67, "word": " one", "probability": 0.93212890625}, {"start": 1746.67, "end": 1747.03, "word": " has", "probability": 0.93701171875}, {"start": 1747.03, "end": 1747.47, "word": " sample", "probability": 0.85009765625}, {"start": 1747.47, "end": 1747.99, "word": " size", "probability": 0.8603515625}, {"start": 1747.99, "end": 1748.97, "word": " of", "probability": 0.96044921875}, {"start": 1748.97, "end": 1749.35, "word": " N1.", "probability": 0.992919921875}, {"start": 1750.61, "end": 1751.25, "word": " The", "probability": 0.85205078125}, {"start": 1751.25, "end": 1751.55, "word": " other", "probability": 0.88818359375}, {"start": 1751.55, "end": 1751.87, "word": " one", "probability": 0.9228515625}, {"start": 1751.87, "end": 1752.25, "word": " has", "probability": 0.93798828125}, {"start": 1752.25, "end": 1752.59, "word": " sample", "probability": 0.89013671875}, {"start": 1752.59, "end": 1753.07, "word": " size", "probability": 0.85986328125}, {"start": 1753.07, "end": 1753.35, "word": " of", "probability": 0.9365234375}, {"start": 1753.35, "end": 1753.71, "word": " N2", "probability": 0.9970703125}, {"start": 1753.71, "end": 1754.03, "word": " with", "probability": 0.8330078125}, {"start": 1754.03, "end": 1755.41, "word": " variances", "probability": 0.930908203125}, {"start": 1755.41, "end": 1756.13, "word": " of", "probability": 0.943359375}, {"start": 1756.13, "end": 1756.75, "word": " S1", "probability": 0.92822265625}, {"start": 1756.75, "end": 1757.21, "word": " squared", "probability": 0.7958984375}, {"start": 1757.21, "end": 1757.53, "word": " and", "probability": 0.47998046875}, {"start": 1757.53, "end": 1757.69, "word": " S2", "probability": 0.9619140625}, {"start": 1757.69, "end": 1758.03, "word": " squared", "probability": 0.85302734375}, {"start": 1758.03, "end": 1758.47, "word": " respectively.", "probability": 0.6728515625}, {"start": 1759.05, "end": 1759.37, "word": " So", "probability": 0.95849609375}, {"start": 1759.37, "end": 1759.49, "word": " we", "probability": 0.93408203125}, {"start": 1759.49, "end": 1759.65, "word": " have", "probability": 0.94384765625}, {"start": 1759.65, "end": 1759.95, "word": " two", "probability": 0.921875}, {"start": 1759.95, "end": 1760.67, "word": " samples", "probability": 0.8876953125}, {"start": 1760.67, "end": 1761.33, "word": " with", "probability": 0.8896484375}, {"start": 1761.33, "end": 1761.81, "word": " sizes", "probability": 0.90576171875}, {"start": 1761.81, "end": 1762.25, "word": " N1", "probability": 0.9814453125}, {"start": 1762.25, "end": 1762.35, "word": " and", "probability": 0.76416015625}, {"start": 1762.35, "end": 1762.65, "word": " N2.", "probability": 0.97509765625}], "temperature": 1.0}, {"id": 67, "seek": 179172, "start": 1764.26, "end": 1791.72, "text": " Sigma is unknown, but we know the sample variance for each. Now suppose the two samples are mixed. Let's see how can we find the pooled. It's called the pooled. Sometimes called the weighted variance. Look at this formula. N1 minus 1 squared plus N2 minus 1 is 2 squared divided by N1 minus 1.", "tokens": [36595, 307, 9841, 11, 457, 321, 458, 264, 6889, 21977, 337, 1184, 13, 823, 7297, 264, 732, 10938, 366, 7467, 13, 961, 311, 536, 577, 393, 321, 915, 264, 7005, 292, 13, 467, 311, 1219, 264, 7005, 292, 13, 4803, 1219, 264, 32807, 21977, 13, 2053, 412, 341, 8513, 13, 426, 16, 3175, 502, 8889, 1804, 426, 17, 3175, 502, 307, 568, 8889, 6666, 538, 426, 16, 3175, 502, 13], "avg_logprob": -0.20697623911038251, "compression_ratio": 1.6153846153846154, "no_speech_prob": 0.0, "words": [{"start": 1764.26, "end": 1764.74, "word": " Sigma", "probability": 0.5556640625}, {"start": 1764.74, "end": 1764.9, "word": " is", "probability": 0.87451171875}, {"start": 1764.9, "end": 1765.22, "word": " unknown,", "probability": 0.88232421875}, {"start": 1765.62, "end": 1767.02, "word": " but", "probability": 0.8916015625}, {"start": 1767.02, "end": 1767.18, "word": " we", "probability": 0.94677734375}, {"start": 1767.18, "end": 1767.32, "word": " know", "probability": 0.8857421875}, {"start": 1767.32, "end": 1767.56, "word": " the", "probability": 0.88671875}, {"start": 1767.56, "end": 1767.98, "word": " sample", "probability": 0.83935546875}, {"start": 1767.98, "end": 1768.58, "word": " variance", "probability": 0.87255859375}, {"start": 1768.58, "end": 1768.82, "word": " for", "probability": 0.916015625}, {"start": 1768.82, "end": 1769.14, "word": " each.", "probability": 0.9482421875}, {"start": 1770.26, "end": 1770.5, "word": " Now", "probability": 0.90234375}, {"start": 1770.5, "end": 1770.94, "word": " suppose", "probability": 0.58544921875}, {"start": 1770.94, "end": 1771.34, "word": " the", "probability": 0.8369140625}, {"start": 1771.34, "end": 1771.52, "word": " two", "probability": 0.90771484375}, {"start": 1771.52, "end": 1771.94, "word": " samples", "probability": 0.88525390625}, {"start": 1771.94, "end": 1772.46, "word": " are", "probability": 0.9482421875}, {"start": 1772.46, "end": 1772.78, "word": " mixed.", "probability": 0.90771484375}, {"start": 1774.74, "end": 1775.22, "word": " Let's", "probability": 0.902099609375}, {"start": 1775.22, "end": 1775.32, "word": " see", "probability": 0.91015625}, {"start": 1775.32, "end": 1775.42, "word": " how", "probability": 0.93017578125}, {"start": 1775.42, "end": 1775.6, "word": " can", "probability": 0.6669921875}, {"start": 1775.6, "end": 1775.76, "word": " we", "probability": 0.9404296875}, {"start": 1775.76, "end": 1776.16, "word": " find", "probability": 0.8994140625}, {"start": 1776.16, "end": 1776.54, "word": " the", "probability": 0.90576171875}, {"start": 1776.54, "end": 1776.94, "word": " pooled.", "probability": 0.779052734375}, {"start": 1777.06, "end": 1777.22, "word": " It's", "probability": 0.9013671875}, {"start": 1777.22, "end": 1777.42, "word": " called", "probability": 0.88037109375}, {"start": 1777.42, "end": 1777.6, "word": " the", "probability": 0.66455078125}, {"start": 1777.6, "end": 1777.98, "word": " pooled.", "probability": 0.950439453125}, {"start": 1778.1, "end": 1778.44, "word": " Sometimes", "probability": 0.6787109375}, {"start": 1778.44, "end": 1778.84, "word": " called", "probability": 0.446044921875}, {"start": 1778.84, "end": 1779.04, "word": " the", "probability": 0.88525390625}, {"start": 1779.04, "end": 1779.28, "word": " weighted", "probability": 0.87841796875}, {"start": 1779.28, "end": 1783.28, "word": " variance.", "probability": 0.767578125}, {"start": 1785.02, "end": 1785.62, "word": " Look", "probability": 0.83203125}, {"start": 1785.62, "end": 1785.78, "word": " at", "probability": 0.966796875}, {"start": 1785.78, "end": 1785.98, "word": " this", "probability": 0.94482421875}, {"start": 1785.98, "end": 1786.34, "word": " formula.", "probability": 0.943359375}, {"start": 1786.56, "end": 1786.84, "word": " N1", "probability": 0.7099609375}, {"start": 1786.84, "end": 1787.1, "word": " minus", "probability": 0.7373046875}, {"start": 1787.1, "end": 1787.36, "word": " 1", "probability": 0.80126953125}, {"start": 1787.36, "end": 1787.8, "word": " squared", "probability": 0.5302734375}, {"start": 1787.8, "end": 1788.7, "word": " plus", "probability": 0.9130859375}, {"start": 1788.7, "end": 1789.08, "word": " N2", "probability": 0.94873046875}, {"start": 1789.08, "end": 1789.34, "word": " minus", "probability": 0.98291015625}, {"start": 1789.34, "end": 1789.6, "word": " 1", "probability": 0.94091796875}, {"start": 1789.6, "end": 1789.76, "word": " is", "probability": 0.6396484375}, {"start": 1789.76, "end": 1789.96, "word": " 2", "probability": 0.90869140625}, {"start": 1789.96, "end": 1790.3, "word": " squared", "probability": 0.83056640625}, {"start": 1790.3, "end": 1790.6, "word": " divided", "probability": 0.63330078125}, {"start": 1790.6, "end": 1790.82, "word": " by", "probability": 0.96435546875}, {"start": 1790.82, "end": 1791.14, "word": " N1", "probability": 0.9892578125}, {"start": 1791.14, "end": 1791.38, "word": " minus", "probability": 0.9853515625}, {"start": 1791.38, "end": 1791.72, "word": " 1.", "probability": 0.984375}], "temperature": 1.0}, {"id": 68, "seek": 182249, "start": 1793.19, "end": 1822.49, "text": " plus into minus one. We know that S squared is the sum of X minus X bar divided by N minus one. That's if we have only one sample. Now just cross multiplication, we will get N minus one S squared equals sum of X minus X bar squared. That's for the first sample.", "tokens": [1804, 666, 3175, 472, 13, 492, 458, 300, 318, 8889, 307, 264, 2408, 295, 1783, 3175, 1783, 2159, 6666, 538, 426, 3175, 472, 13, 663, 311, 498, 321, 362, 787, 472, 6889, 13, 823, 445, 3278, 27290, 11, 321, 486, 483, 426, 3175, 472, 318, 8889, 6915, 2408, 295, 1783, 3175, 1783, 2159, 8889, 13, 663, 311, 337, 264, 700, 6889, 13], "avg_logprob": -0.18303571570487248, "compression_ratio": 1.6794871794871795, "no_speech_prob": 0.0, "words": [{"start": 1793.19, "end": 1793.71, "word": " plus", "probability": 0.298095703125}, {"start": 1793.71, "end": 1794.35, "word": " into", "probability": 0.7138671875}, {"start": 1794.35, "end": 1795.73, "word": " minus", "probability": 0.93701171875}, {"start": 1795.73, "end": 1796.13, "word": " one.", "probability": 0.626953125}, {"start": 1796.63, "end": 1797.21, "word": " We", "probability": 0.814453125}, {"start": 1797.21, "end": 1797.41, "word": " know", "probability": 0.8974609375}, {"start": 1797.41, "end": 1797.79, "word": " that", "probability": 0.935546875}, {"start": 1797.79, "end": 1798.75, "word": " S", "probability": 0.473388671875}, {"start": 1798.75, "end": 1799.17, "word": " squared", "probability": 0.5615234375}, {"start": 1799.17, "end": 1800.45, "word": " is", "probability": 0.92041015625}, {"start": 1800.45, "end": 1800.65, "word": " the", "probability": 0.876953125}, {"start": 1800.65, "end": 1800.95, "word": " sum", "probability": 0.9326171875}, {"start": 1800.95, "end": 1801.49, "word": " of", "probability": 0.96435546875}, {"start": 1801.49, "end": 1801.85, "word": " X", "probability": 0.701171875}, {"start": 1801.85, "end": 1802.39, "word": " minus", "probability": 0.98876953125}, {"start": 1802.39, "end": 1802.89, "word": " X", "probability": 0.9912109375}, {"start": 1802.89, "end": 1803.25, "word": " bar", "probability": 0.88427734375}, {"start": 1803.25, "end": 1804.49, "word": " divided", "probability": 0.6875}, {"start": 1804.49, "end": 1805.13, "word": " by", "probability": 0.97021484375}, {"start": 1805.13, "end": 1805.55, "word": " N", "probability": 0.8642578125}, {"start": 1805.55, "end": 1805.77, "word": " minus", "probability": 0.98388671875}, {"start": 1805.77, "end": 1805.97, "word": " one.", "probability": 0.87353515625}, {"start": 1806.19, "end": 1806.49, "word": " That's", "probability": 0.863037109375}, {"start": 1806.49, "end": 1806.65, "word": " if", "probability": 0.9443359375}, {"start": 1806.65, "end": 1806.79, "word": " we", "probability": 0.93505859375}, {"start": 1806.79, "end": 1806.97, "word": " have", "probability": 0.91357421875}, {"start": 1806.97, "end": 1807.39, "word": " only", "probability": 0.92333984375}, {"start": 1807.39, "end": 1808.21, "word": " one", "probability": 0.93115234375}, {"start": 1808.21, "end": 1808.59, "word": " sample.", "probability": 0.83837890625}, {"start": 1809.77, "end": 1810.01, "word": " Now", "probability": 0.94580078125}, {"start": 1810.01, "end": 1810.33, "word": " just", "probability": 0.521484375}, {"start": 1810.33, "end": 1810.67, "word": " cross", "probability": 0.79248046875}, {"start": 1810.67, "end": 1811.27, "word": " multiplication,", "probability": 0.84716796875}, {"start": 1812.17, "end": 1812.33, "word": " we", "probability": 0.68701171875}, {"start": 1812.33, "end": 1812.59, "word": " will", "probability": 0.8642578125}, {"start": 1812.59, "end": 1813.05, "word": " get", "probability": 0.94287109375}, {"start": 1813.05, "end": 1814.31, "word": " N", "probability": 0.935546875}, {"start": 1814.31, "end": 1814.61, "word": " minus", "probability": 0.98583984375}, {"start": 1814.61, "end": 1814.99, "word": " one", "probability": 0.90234375}, {"start": 1814.99, "end": 1815.63, "word": " S", "probability": 0.7197265625}, {"start": 1815.63, "end": 1816.07, "word": " squared", "probability": 0.86572265625}, {"start": 1816.07, "end": 1816.67, "word": " equals", "probability": 0.89794921875}, {"start": 1816.67, "end": 1817.17, "word": " sum", "probability": 0.77783203125}, {"start": 1817.17, "end": 1817.69, "word": " of", "probability": 0.96630859375}, {"start": 1817.69, "end": 1818.05, "word": " X", "probability": 0.98193359375}, {"start": 1818.05, "end": 1819.21, "word": " minus", "probability": 0.98583984375}, {"start": 1819.21, "end": 1820.03, "word": " X", "probability": 0.990234375}, {"start": 1820.03, "end": 1820.25, "word": " bar", "probability": 0.94287109375}, {"start": 1820.25, "end": 1820.55, "word": " squared.", "probability": 0.77880859375}, {"start": 1820.67, "end": 1820.93, "word": " That's", "probability": 0.9404296875}, {"start": 1820.93, "end": 1821.23, "word": " for", "probability": 0.94921875}, {"start": 1821.23, "end": 1821.61, "word": " the", "probability": 0.9169921875}, {"start": 1821.61, "end": 1822.11, "word": " first", "probability": 0.88427734375}, {"start": 1822.11, "end": 1822.49, "word": " sample.", "probability": 0.8916015625}], "temperature": 1.0}, {"id": 69, "seek": 185242, "start": 1823.8, "end": 1852.42, "text": " What's about the second one? We have two samples. So we can write for the first one, N1 minus 1 S1 squared equals some X minus X bar. This is for sample one. For the other sample, we have the same equation but different data. So we have S squared equals", "tokens": [708, 311, 466, 264, 1150, 472, 30, 492, 362, 732, 10938, 13, 407, 321, 393, 2464, 337, 264, 700, 472, 11, 426, 16, 3175, 502, 318, 16, 8889, 6915, 512, 1783, 3175, 1783, 2159, 13, 639, 307, 337, 6889, 472, 13, 1171, 264, 661, 6889, 11, 321, 362, 264, 912, 5367, 457, 819, 1412, 13, 407, 321, 362, 318, 8889, 6915], "avg_logprob": -0.23059475806451613, "compression_ratio": 1.5301204819277108, "no_speech_prob": 0.0, "words": [{"start": 1823.8, "end": 1824.2, "word": " What's", "probability": 0.62548828125}, {"start": 1824.2, "end": 1824.4, "word": " about", "probability": 0.91650390625}, {"start": 1824.4, "end": 1824.6, "word": " the", "probability": 0.89599609375}, {"start": 1824.6, "end": 1824.86, "word": " second", "probability": 0.88427734375}, {"start": 1824.86, "end": 1825.12, "word": " one?", "probability": 0.9169921875}, {"start": 1826.4, "end": 1827.08, "word": " We", "probability": 0.93212890625}, {"start": 1827.08, "end": 1827.34, "word": " have", "probability": 0.94873046875}, {"start": 1827.34, "end": 1827.58, "word": " two", "probability": 0.8818359375}, {"start": 1827.58, "end": 1827.92, "word": " samples.", "probability": 0.49755859375}, {"start": 1828.96, "end": 1829.48, "word": " So", "probability": 0.92626953125}, {"start": 1829.48, "end": 1829.84, "word": " we", "probability": 0.79150390625}, {"start": 1829.84, "end": 1830.22, "word": " can", "probability": 0.9462890625}, {"start": 1830.22, "end": 1830.9, "word": " write", "probability": 0.92041015625}, {"start": 1830.9, "end": 1832.22, "word": " for", "probability": 0.80126953125}, {"start": 1832.22, "end": 1832.38, "word": " the", "probability": 0.91943359375}, {"start": 1832.38, "end": 1832.66, "word": " first", "probability": 0.88037109375}, {"start": 1832.66, "end": 1833.26, "word": " one,", "probability": 0.92822265625}, {"start": 1833.78, "end": 1835.44, "word": " N1", "probability": 0.6707763671875}, {"start": 1835.44, "end": 1836.42, "word": " minus", "probability": 0.89404296875}, {"start": 1836.42, "end": 1836.78, "word": " 1", "probability": 0.76611328125}, {"start": 1836.78, "end": 1837.28, "word": " S1", "probability": 0.7529296875}, {"start": 1837.28, "end": 1837.72, "word": " squared", "probability": 0.8212890625}, {"start": 1837.72, "end": 1840.14, "word": " equals", "probability": 0.5029296875}, {"start": 1840.14, "end": 1841.58, "word": " some", "probability": 0.43408203125}, {"start": 1841.58, "end": 1842.12, "word": " X", "probability": 0.39501953125}, {"start": 1842.12, "end": 1843.26, "word": " minus", "probability": 0.9853515625}, {"start": 1843.26, "end": 1843.54, "word": " X", "probability": 0.9775390625}, {"start": 1843.54, "end": 1843.86, "word": " bar.", "probability": 0.84375}, {"start": 1844.18, "end": 1844.54, "word": " This", "probability": 0.845703125}, {"start": 1844.54, "end": 1844.72, "word": " is", "probability": 0.9365234375}, {"start": 1844.72, "end": 1844.9, "word": " for", "probability": 0.94140625}, {"start": 1844.9, "end": 1845.2, "word": " sample", "probability": 0.82763671875}, {"start": 1845.2, "end": 1845.44, "word": " one.", "probability": 0.6259765625}, {"start": 1846.64, "end": 1847.0, "word": " For", "probability": 0.9296875}, {"start": 1847.0, "end": 1847.12, "word": " the", "probability": 0.912109375}, {"start": 1847.12, "end": 1847.34, "word": " other", "probability": 0.89453125}, {"start": 1847.34, "end": 1847.72, "word": " sample,", "probability": 0.90771484375}, {"start": 1848.06, "end": 1848.38, "word": " we", "probability": 0.9560546875}, {"start": 1848.38, "end": 1848.52, "word": " have", "probability": 0.9111328125}, {"start": 1848.52, "end": 1848.7, "word": " the", "probability": 0.9052734375}, {"start": 1848.7, "end": 1848.88, "word": " same", "probability": 0.91259765625}, {"start": 1848.88, "end": 1849.3, "word": " equation", "probability": 0.97021484375}, {"start": 1849.3, "end": 1849.58, "word": " but", "probability": 0.54052734375}, {"start": 1849.58, "end": 1850.06, "word": " different", "probability": 0.88232421875}, {"start": 1850.06, "end": 1850.42, "word": " data.", "probability": 0.927734375}, {"start": 1850.72, "end": 1851.16, "word": " So", "probability": 0.9609375}, {"start": 1851.16, "end": 1851.3, "word": " we", "probability": 0.93701171875}, {"start": 1851.3, "end": 1851.5, "word": " have", "probability": 0.94189453125}, {"start": 1851.5, "end": 1851.78, "word": " S", "probability": 0.88720703125}, {"start": 1851.78, "end": 1852.08, "word": " squared", "probability": 0.7744140625}, {"start": 1852.08, "end": 1852.42, "word": " equals", "probability": 0.54833984375}], "temperature": 1.0}, {"id": 70, "seek": 188012, "start": 1853.3, "end": 1880.12, "text": " Y, for example, minus Y bar divided by N2 minus 1. Now cross multiplication will give N2 minus 1 is 2 squared equals sum of Y minus Y bar squared. That's for the second cell. We are looking for standard error of the difference between these two.", "tokens": [398, 11, 337, 1365, 11, 3175, 398, 2159, 6666, 538, 426, 17, 3175, 502, 13, 823, 3278, 27290, 486, 976, 426, 17, 3175, 502, 307, 568, 8889, 6915, 2408, 295, 398, 3175, 398, 2159, 8889, 13, 663, 311, 337, 264, 1150, 2815, 13, 492, 366, 1237, 337, 3832, 6713, 295, 264, 2649, 1296, 613, 732, 13], "avg_logprob": -0.2147752234810277, "compression_ratio": 1.4642857142857142, "no_speech_prob": 0.0, "words": [{"start": 1853.3, "end": 1853.76, "word": " Y,", "probability": 0.51904296875}, {"start": 1853.92, "end": 1854.16, "word": " for", "probability": 0.94580078125}, {"start": 1854.16, "end": 1854.52, "word": " example,", "probability": 0.96484375}, {"start": 1855.14, "end": 1855.46, "word": " minus", "probability": 0.9091796875}, {"start": 1855.46, "end": 1855.68, "word": " Y", "probability": 0.9306640625}, {"start": 1855.68, "end": 1856.02, "word": " bar", "probability": 0.89306640625}, {"start": 1856.02, "end": 1856.86, "word": " divided", "probability": 0.64013671875}, {"start": 1856.86, "end": 1857.1, "word": " by", "probability": 0.96484375}, {"start": 1857.1, "end": 1857.38, "word": " N2", "probability": 0.829833984375}, {"start": 1857.38, "end": 1857.62, "word": " minus", "probability": 0.97509765625}, {"start": 1857.62, "end": 1857.96, "word": " 1.", "probability": 0.65966796875}, {"start": 1859.34, "end": 1859.98, "word": " Now", "probability": 0.90771484375}, {"start": 1859.98, "end": 1860.26, "word": " cross", "probability": 0.69287109375}, {"start": 1860.26, "end": 1860.74, "word": " multiplication", "probability": 0.787109375}, {"start": 1860.74, "end": 1861.2, "word": " will", "probability": 0.6728515625}, {"start": 1861.2, "end": 1861.48, "word": " give", "probability": 0.5146484375}, {"start": 1861.48, "end": 1861.86, "word": " N2", "probability": 0.958984375}, {"start": 1861.86, "end": 1862.14, "word": " minus", "probability": 0.98095703125}, {"start": 1862.14, "end": 1862.48, "word": " 1", "probability": 0.93408203125}, {"start": 1862.48, "end": 1863.96, "word": " is", "probability": 0.7021484375}, {"start": 1863.96, "end": 1864.2, "word": " 2", "probability": 0.83447265625}, {"start": 1864.2, "end": 1864.64, "word": " squared", "probability": 0.81103515625}, {"start": 1864.64, "end": 1865.52, "word": " equals", "probability": 0.7587890625}, {"start": 1865.52, "end": 1865.84, "word": " sum", "probability": 0.666015625}, {"start": 1865.84, "end": 1866.0, "word": " of", "probability": 0.791015625}, {"start": 1866.0, "end": 1866.4, "word": " Y", "probability": 0.52490234375}, {"start": 1866.4, "end": 1867.46, "word": " minus", "probability": 0.98046875}, {"start": 1867.46, "end": 1867.72, "word": " Y", "probability": 0.98828125}, {"start": 1867.72, "end": 1867.94, "word": " bar", "probability": 0.90869140625}, {"start": 1867.94, "end": 1868.26, "word": " squared.", "probability": 0.76904296875}, {"start": 1868.4, "end": 1868.62, "word": " That's", "probability": 0.895751953125}, {"start": 1868.62, "end": 1868.78, "word": " for", "probability": 0.63525390625}, {"start": 1868.78, "end": 1868.98, "word": " the", "probability": 0.90380859375}, {"start": 1868.98, "end": 1869.44, "word": " second", "probability": 0.88525390625}, {"start": 1869.44, "end": 1869.76, "word": " cell.", "probability": 0.34326171875}, {"start": 1873.0, "end": 1873.64, "word": " We", "probability": 0.93994140625}, {"start": 1873.64, "end": 1873.82, "word": " are", "probability": 0.9384765625}, {"start": 1873.82, "end": 1876.54, "word": " looking", "probability": 0.41845703125}, {"start": 1876.54, "end": 1876.98, "word": " for", "probability": 0.94970703125}, {"start": 1876.98, "end": 1878.28, "word": " standard", "probability": 0.80810546875}, {"start": 1878.28, "end": 1878.58, "word": " error", "probability": 0.89013671875}, {"start": 1878.58, "end": 1878.86, "word": " of", "probability": 0.94189453125}, {"start": 1878.86, "end": 1879.0, "word": " the", "probability": 0.8974609375}, {"start": 1879.0, "end": 1879.38, "word": " difference", "probability": 0.85400390625}, {"start": 1879.38, "end": 1879.74, "word": " between", "probability": 0.880859375}, {"start": 1879.74, "end": 1879.96, "word": " these", "probability": 0.8447265625}, {"start": 1879.96, "end": 1880.12, "word": " two.", "probability": 0.85986328125}], "temperature": 1.0}, {"id": 71, "seek": 190702, "start": 1880.82, "end": 1907.02, "text": " So now the standard error, or let's compute first, S squared B for both. Now, S squared in general, as we mentioned, is sum of X minus X bar squared divided by N minus 1. So here, we have the first sum plus the second one divided by N minus 1 for the first.", "tokens": [407, 586, 264, 3832, 6713, 11, 420, 718, 311, 14722, 700, 11, 318, 8889, 363, 337, 1293, 13, 823, 11, 318, 8889, 294, 2674, 11, 382, 321, 2835, 11, 307, 2408, 295, 1783, 3175, 1783, 2159, 8889, 6666, 538, 426, 3175, 502, 13, 407, 510, 11, 321, 362, 264, 700, 2408, 1804, 264, 1150, 472, 6666, 538, 426, 3175, 502, 337, 264, 700, 13], "avg_logprob": -0.27355769230769234, "compression_ratio": 1.5731707317073171, "no_speech_prob": 0.0, "words": [{"start": 1880.82, "end": 1881.06, "word": " So", "probability": 0.411865234375}, {"start": 1881.06, "end": 1881.28, "word": " now", "probability": 0.81201171875}, {"start": 1881.28, "end": 1881.74, "word": " the", "probability": 0.2340087890625}, {"start": 1881.74, "end": 1882.18, "word": " standard", "probability": 0.90185546875}, {"start": 1882.18, "end": 1882.64, "word": " error,", "probability": 0.8076171875}, {"start": 1883.56, "end": 1883.74, "word": " or", "probability": 0.3232421875}, {"start": 1883.74, "end": 1884.16, "word": " let's", "probability": 0.777587890625}, {"start": 1884.16, "end": 1884.56, "word": " compute", "probability": 0.9189453125}, {"start": 1884.56, "end": 1885.02, "word": " first,", "probability": 0.865234375}, {"start": 1885.64, "end": 1885.76, "word": " S", "probability": 0.61474609375}, {"start": 1885.76, "end": 1886.02, "word": " squared", "probability": 0.5654296875}, {"start": 1886.02, "end": 1886.24, "word": " B", "probability": 0.468505859375}, {"start": 1886.24, "end": 1886.5, "word": " for", "probability": 0.7060546875}, {"start": 1886.5, "end": 1886.84, "word": " both.", "probability": 0.8798828125}, {"start": 1887.98, "end": 1888.32, "word": " Now,", "probability": 0.9345703125}, {"start": 1888.4, "end": 1888.72, "word": " S", "probability": 0.9814453125}, {"start": 1888.72, "end": 1888.98, "word": " squared", "probability": 0.83056640625}, {"start": 1888.98, "end": 1889.24, "word": " in", "probability": 0.650390625}, {"start": 1889.24, "end": 1889.5, "word": " general,", "probability": 0.90625}, {"start": 1889.58, "end": 1889.7, "word": " as", "probability": 0.94873046875}, {"start": 1889.7, "end": 1889.82, "word": " we", "probability": 0.80078125}, {"start": 1889.82, "end": 1890.22, "word": " mentioned,", "probability": 0.81982421875}, {"start": 1890.82, "end": 1891.08, "word": " is", "probability": 0.91259765625}, {"start": 1891.08, "end": 1891.44, "word": " sum", "probability": 0.78857421875}, {"start": 1891.44, "end": 1892.86, "word": " of", "probability": 0.9384765625}, {"start": 1892.86, "end": 1893.06, "word": " X", "probability": 0.68505859375}, {"start": 1893.06, "end": 1893.32, "word": " minus", "probability": 0.98681640625}, {"start": 1893.32, "end": 1893.54, "word": " X", "probability": 0.98291015625}, {"start": 1893.54, "end": 1893.7, "word": " bar", "probability": 0.451904296875}, {"start": 1893.7, "end": 1893.96, "word": " squared", "probability": 0.798828125}, {"start": 1893.96, "end": 1894.24, "word": " divided", "probability": 0.7275390625}, {"start": 1894.24, "end": 1894.48, "word": " by", "probability": 0.97216796875}, {"start": 1894.48, "end": 1894.64, "word": " N", "probability": 0.89208984375}, {"start": 1894.64, "end": 1894.9, "word": " minus", "probability": 0.98779296875}, {"start": 1894.9, "end": 1895.12, "word": " 1.", "probability": 0.5302734375}, {"start": 1895.5, "end": 1895.72, "word": " So", "probability": 0.9560546875}, {"start": 1895.72, "end": 1896.0, "word": " here,", "probability": 0.81298828125}, {"start": 1896.18, "end": 1896.34, "word": " we", "probability": 0.95703125}, {"start": 1896.34, "end": 1896.64, "word": " have", "probability": 0.94189453125}, {"start": 1896.64, "end": 1897.16, "word": " the", "probability": 0.9130859375}, {"start": 1897.16, "end": 1897.4, "word": " first", "probability": 0.8798828125}, {"start": 1897.4, "end": 1897.78, "word": " sum", "probability": 0.958984375}, {"start": 1897.78, "end": 1900.52, "word": " plus", "probability": 0.7685546875}, {"start": 1900.52, "end": 1900.76, "word": " the", "probability": 0.91845703125}, {"start": 1900.76, "end": 1901.2, "word": " second", "probability": 0.900390625}, {"start": 1901.2, "end": 1901.62, "word": " one", "probability": 0.92626953125}, {"start": 1901.62, "end": 1903.5, "word": " divided", "probability": 0.69775390625}, {"start": 1903.5, "end": 1903.98, "word": " by", "probability": 0.97021484375}, {"start": 1903.98, "end": 1905.76, "word": " N", "probability": 0.98095703125}, {"start": 1905.76, "end": 1906.06, "word": " minus", "probability": 0.9833984375}, {"start": 1906.06, "end": 1906.3, "word": " 1", "probability": 0.90185546875}, {"start": 1906.3, "end": 1906.52, "word": " for", "probability": 0.9453125}, {"start": 1906.52, "end": 1906.68, "word": " the", "probability": 0.91650390625}, {"start": 1906.68, "end": 1907.02, "word": " first.", "probability": 0.88134765625}], "temperature": 1.0}, {"id": 72, "seek": 193821, "start": 1909.37, "end": 1938.21, "text": " plus N2 minus 1 for the second one. Now this sum equals N1 minus 1 S1 squared. The second sum is N2 minus 1 S2 squared divided by N1 minus 1 plus N2 minus 1. So this is how this equation is formulated. So S squared is called", "tokens": [1804, 426, 17, 3175, 502, 337, 264, 1150, 472, 13, 823, 341, 2408, 6915, 426, 16, 3175, 502, 318, 16, 8889, 13, 440, 1150, 2408, 307, 426, 17, 3175, 502, 318, 17, 8889, 6666, 538, 426, 16, 3175, 502, 1804, 426, 17, 3175, 502, 13, 407, 341, 307, 577, 341, 5367, 307, 48936, 13, 407, 318, 8889, 307, 1219], "avg_logprob": -0.18528646330038706, "compression_ratio": 1.7045454545454546, "no_speech_prob": 0.0, "words": [{"start": 1909.37, "end": 1909.91, "word": " plus", "probability": 0.314697265625}, {"start": 1909.91, "end": 1910.77, "word": " N2", "probability": 0.6146240234375}, {"start": 1910.77, "end": 1911.07, "word": " minus", "probability": 0.80859375}, {"start": 1911.07, "end": 1911.27, "word": " 1", "probability": 0.65478515625}, {"start": 1911.27, "end": 1911.45, "word": " for", "probability": 0.916015625}, {"start": 1911.45, "end": 1911.61, "word": " the", "probability": 0.9072265625}, {"start": 1911.61, "end": 1911.87, "word": " second", "probability": 0.8828125}, {"start": 1911.87, "end": 1912.17, "word": " one.", "probability": 0.45166015625}, {"start": 1913.57, "end": 1914.27, "word": " Now", "probability": 0.8837890625}, {"start": 1914.27, "end": 1914.61, "word": " this", "probability": 0.6904296875}, {"start": 1914.61, "end": 1915.01, "word": " sum", "probability": 0.94189453125}, {"start": 1915.01, "end": 1915.77, "word": " equals", "probability": 0.9228515625}, {"start": 1915.77, "end": 1918.45, "word": " N1", "probability": 0.906005859375}, {"start": 1918.45, "end": 1918.75, "word": " minus", "probability": 0.9755859375}, {"start": 1918.75, "end": 1919.01, "word": " 1", "probability": 0.884765625}, {"start": 1919.01, "end": 1919.39, "word": " S1", "probability": 0.590087890625}, {"start": 1919.39, "end": 1919.75, "word": " squared.", "probability": 0.68994140625}, {"start": 1920.29, "end": 1920.61, "word": " The", "probability": 0.884765625}, {"start": 1920.61, "end": 1920.91, "word": " second", "probability": 0.88818359375}, {"start": 1920.91, "end": 1921.23, "word": " sum", "probability": 0.93212890625}, {"start": 1921.23, "end": 1921.47, "word": " is", "probability": 0.9365234375}, {"start": 1921.47, "end": 1921.83, "word": " N2", "probability": 0.990234375}, {"start": 1921.83, "end": 1922.11, "word": " minus", "probability": 0.98388671875}, {"start": 1922.11, "end": 1922.37, "word": " 1", "probability": 0.92578125}, {"start": 1922.37, "end": 1922.73, "word": " S2", "probability": 0.838623046875}, {"start": 1922.73, "end": 1923.15, "word": " squared", "probability": 0.85205078125}, {"start": 1923.15, "end": 1924.59, "word": " divided", "probability": 0.306396484375}, {"start": 1924.59, "end": 1924.79, "word": " by", "probability": 0.9619140625}, {"start": 1924.79, "end": 1925.15, "word": " N1", "probability": 0.98974609375}, {"start": 1925.15, "end": 1925.47, "word": " minus", "probability": 0.98388671875}, {"start": 1925.47, "end": 1925.93, "word": " 1", "probability": 0.96435546875}, {"start": 1925.93, "end": 1926.85, "word": " plus", "probability": 0.9306640625}, {"start": 1926.85, "end": 1927.21, "word": " N2", "probability": 0.99462890625}, {"start": 1927.21, "end": 1927.45, "word": " minus", "probability": 0.984375}, {"start": 1927.45, "end": 1927.71, "word": " 1.", "probability": 0.9765625}, {"start": 1929.87, "end": 1930.59, "word": " So", "probability": 0.92333984375}, {"start": 1930.59, "end": 1930.81, "word": " this", "probability": 0.87255859375}, {"start": 1930.81, "end": 1930.95, "word": " is", "probability": 0.94384765625}, {"start": 1930.95, "end": 1931.29, "word": " how", "probability": 0.9384765625}, {"start": 1931.29, "end": 1932.29, "word": " this", "probability": 0.939453125}, {"start": 1932.29, "end": 1932.83, "word": " equation", "probability": 0.97705078125}, {"start": 1932.83, "end": 1933.17, "word": " is", "probability": 0.94775390625}, {"start": 1933.17, "end": 1933.65, "word": " formulated.", "probability": 0.953125}, {"start": 1934.51, "end": 1934.95, "word": " So", "probability": 0.9482421875}, {"start": 1934.95, "end": 1935.21, "word": " S", "probability": 0.91357421875}, {"start": 1935.21, "end": 1935.63, "word": " squared", "probability": 0.7900390625}, {"start": 1935.63, "end": 1937.81, "word": " is", "probability": 0.92578125}, {"start": 1937.81, "end": 1938.21, "word": " called", "probability": 0.869140625}], "temperature": 1.0}, {"id": 73, "seek": 195429, "start": 1946.33, "end": 1954.29, "text": " Because as we mentioned in chapter three, the variance, the definition of the variance is the average", "tokens": [1436, 382, 321, 2835, 294, 7187, 1045, 11, 264, 21977, 11, 264, 7123, 295, 264, 21977, 307, 264, 4274], "avg_logprob": -0.36406249850988387, "compression_ratio": 1.2911392405063291, "no_speech_prob": 0.0, "words": [{"start": 1946.3300000000002, "end": 1947.13, "word": " Because", "probability": 0.2130126953125}, {"start": 1947.13, "end": 1947.93, "word": " as", "probability": 0.71826171875}, {"start": 1947.93, "end": 1948.07, "word": " we", "probability": 0.888671875}, {"start": 1948.07, "end": 1948.33, "word": " mentioned", "probability": 0.76513671875}, {"start": 1948.33, "end": 1948.49, "word": " in", "probability": 0.609375}, {"start": 1948.49, "end": 1948.69, "word": " chapter", "probability": 0.56201171875}, {"start": 1948.69, "end": 1949.13, "word": " three,", "probability": 0.513671875}, {"start": 1949.67, "end": 1949.81, "word": " the", "probability": 0.8125}, {"start": 1949.81, "end": 1950.29, "word": " variance,", "probability": 0.80859375}, {"start": 1951.19, "end": 1951.49, "word": " the", "probability": 0.9140625}, {"start": 1951.49, "end": 1951.89, "word": " definition", "probability": 0.93798828125}, {"start": 1951.89, "end": 1952.09, "word": " of", "probability": 0.96923828125}, {"start": 1952.09, "end": 1952.21, "word": " the", "probability": 0.90185546875}, {"start": 1952.21, "end": 1952.63, "word": " variance", "probability": 0.9140625}, {"start": 1952.63, "end": 1953.63, "word": " is", "probability": 0.69970703125}, {"start": 1953.63, "end": 1953.79, "word": " the", "probability": 0.91943359375}, {"start": 1953.79, "end": 1954.29, "word": " average", "probability": 0.8095703125}], "temperature": 1.0}, {"id": 74, "seek": 198210, "start": 1958.8, "end": 1982.1, "text": " S squared is the average of the squared differences around the mean. And this is the same because here we have sum of x minus x bar plus the other sum y minus y bar all divided by n1 minus 1 plus n2 minus 1. This term can be written as n1 plus n2 minus 2.", "tokens": [318, 8889, 307, 264, 4274, 295, 264, 8889, 7300, 926, 264, 914, 13, 400, 341, 307, 264, 912, 570, 510, 321, 362, 2408, 295, 2031, 3175, 2031, 2159, 1804, 264, 661, 2408, 288, 3175, 288, 2159, 439, 6666, 538, 297, 16, 3175, 502, 1804, 297, 17, 3175, 502, 13, 639, 1433, 393, 312, 3720, 382, 297, 16, 1804, 297, 17, 3175, 568, 13], "avg_logprob": -0.15209961589425802, "compression_ratio": 1.641025641025641, "no_speech_prob": 0.0, "words": [{"start": 1958.8, "end": 1959.12, "word": " S", "probability": 0.5341796875}, {"start": 1959.12, "end": 1959.48, "word": " squared", "probability": 0.505859375}, {"start": 1959.48, "end": 1959.8, "word": " is", "probability": 0.9404296875}, {"start": 1959.8, "end": 1959.96, "word": " the", "probability": 0.9189453125}, {"start": 1959.96, "end": 1960.4, "word": " average", "probability": 0.78466796875}, {"start": 1960.4, "end": 1961.34, "word": " of", "probability": 0.93310546875}, {"start": 1961.34, "end": 1961.56, "word": " the", "probability": 0.8994140625}, {"start": 1961.56, "end": 1962.04, "word": " squared", "probability": 0.55712890625}, {"start": 1962.04, "end": 1962.68, "word": " differences", "probability": 0.6357421875}, {"start": 1962.68, "end": 1964.12, "word": " around", "probability": 0.8994140625}, {"start": 1964.12, "end": 1964.32, "word": " the", "probability": 0.8896484375}, {"start": 1964.32, "end": 1964.46, "word": " mean.", "probability": 0.97998046875}, {"start": 1965.32, "end": 1965.54, "word": " And", "probability": 0.88818359375}, {"start": 1965.54, "end": 1965.7, "word": " this", "probability": 0.92626953125}, {"start": 1965.7, "end": 1965.78, "word": " is", "probability": 0.94921875}, {"start": 1965.78, "end": 1965.94, "word": " the", "probability": 0.91455078125}, {"start": 1965.94, "end": 1966.2, "word": " same", "probability": 0.90478515625}, {"start": 1966.2, "end": 1966.8, "word": " because", "probability": 0.494873046875}, {"start": 1966.8, "end": 1967.0, "word": " here", "probability": 0.84619140625}, {"start": 1967.0, "end": 1967.14, "word": " we", "probability": 0.91357421875}, {"start": 1967.14, "end": 1967.3, "word": " have", "probability": 0.9443359375}, {"start": 1967.3, "end": 1967.66, "word": " sum", "probability": 0.77197265625}, {"start": 1967.66, "end": 1967.84, "word": " of", "probability": 0.953125}, {"start": 1967.84, "end": 1967.98, "word": " x", "probability": 0.607421875}, {"start": 1967.98, "end": 1968.24, "word": " minus", "probability": 0.97119140625}, {"start": 1968.24, "end": 1968.5, "word": " x", "probability": 0.9951171875}, {"start": 1968.5, "end": 1968.78, "word": " bar", "probability": 0.87255859375}, {"start": 1968.78, "end": 1969.58, "word": " plus", "probability": 0.89794921875}, {"start": 1969.58, "end": 1969.82, "word": " the", "probability": 0.921875}, {"start": 1969.82, "end": 1970.04, "word": " other", "probability": 0.88525390625}, {"start": 1970.04, "end": 1970.34, "word": " sum", "probability": 0.93994140625}, {"start": 1970.34, "end": 1970.54, "word": " y", "probability": 0.6513671875}, {"start": 1970.54, "end": 1970.82, "word": " minus", "probability": 0.9873046875}, {"start": 1970.82, "end": 1971.0, "word": " y", "probability": 0.998046875}, {"start": 1971.0, "end": 1971.32, "word": " bar", "probability": 0.94189453125}, {"start": 1971.32, "end": 1972.0, "word": " all", "probability": 0.56005859375}, {"start": 1972.0, "end": 1972.4, "word": " divided", "probability": 0.646484375}, {"start": 1972.4, "end": 1972.78, "word": " by", "probability": 0.96728515625}, {"start": 1972.78, "end": 1973.08, "word": " n1", "probability": 0.754150390625}, {"start": 1973.08, "end": 1973.34, "word": " minus", "probability": 0.9814453125}, {"start": 1973.34, "end": 1973.56, "word": " 1", "probability": 0.8564453125}, {"start": 1973.56, "end": 1973.86, "word": " plus", "probability": 0.9541015625}, {"start": 1973.86, "end": 1974.22, "word": " n2", "probability": 0.9775390625}, {"start": 1974.22, "end": 1974.42, "word": " minus", "probability": 0.986328125}, {"start": 1974.42, "end": 1974.7, "word": " 1.", "probability": 0.9716796875}, {"start": 1975.3, "end": 1975.64, "word": " This", "probability": 0.8837890625}, {"start": 1975.64, "end": 1976.04, "word": " term", "probability": 0.8466796875}, {"start": 1976.04, "end": 1976.52, "word": " can", "probability": 0.94482421875}, {"start": 1976.52, "end": 1976.68, "word": " be", "probability": 0.95556640625}, {"start": 1976.68, "end": 1977.06, "word": " written", "probability": 0.9365234375}, {"start": 1977.06, "end": 1977.74, "word": " as", "probability": 0.95703125}, {"start": 1977.74, "end": 1979.82, "word": " n1", "probability": 0.958740234375}, {"start": 1979.82, "end": 1980.34, "word": " plus", "probability": 0.93408203125}, {"start": 1980.34, "end": 1980.8, "word": " n2", "probability": 0.99072265625}, {"start": 1980.8, "end": 1981.28, "word": " minus", "probability": 0.984375}, {"start": 1981.28, "end": 1982.1, "word": " 2.", "probability": 0.91552734375}], "temperature": 1.0}, {"id": 75, "seek": 201148, "start": 1984.26, "end": 2011.48, "text": " So this is your whole variance. Now, sum x minus x bar squared equal n1 minus 1 is 1 squared. Yes, because we are looking for the difference or the variance of this, the variance for x1 bar minus x2 bar. This variance is variance of x1 bar.", "tokens": [407, 341, 307, 428, 1379, 21977, 13, 823, 11, 2408, 2031, 3175, 2031, 2159, 8889, 2681, 297, 16, 3175, 502, 307, 502, 8889, 13, 1079, 11, 570, 321, 366, 1237, 337, 264, 2649, 420, 264, 21977, 295, 341, 11, 264, 21977, 337, 2031, 16, 2159, 3175, 2031, 17, 2159, 13, 639, 21977, 307, 21977, 295, 2031, 16, 2159, 13], "avg_logprob": -0.2433593677977721, "compression_ratio": 1.6283783783783783, "no_speech_prob": 0.0, "words": [{"start": 1984.26, "end": 1984.54, "word": " So", "probability": 0.8046875}, {"start": 1984.54, "end": 1984.8, "word": " this", "probability": 0.79443359375}, {"start": 1984.8, "end": 1984.96, "word": " is", "probability": 0.93115234375}, {"start": 1984.96, "end": 1985.34, "word": " your", "probability": 0.87890625}, {"start": 1985.34, "end": 1986.02, "word": " whole", "probability": 0.39501953125}, {"start": 1986.02, "end": 1986.84, "word": " variance.", "probability": 0.9296875}, {"start": 1990.66, "end": 1991.22, "word": " Now,", "probability": 0.53564453125}, {"start": 1991.46, "end": 1991.8, "word": " sum", "probability": 0.54296875}, {"start": 1991.8, "end": 1992.04, "word": " x", "probability": 0.59130859375}, {"start": 1992.04, "end": 1992.3, "word": " minus", "probability": 0.95849609375}, {"start": 1992.3, "end": 1992.52, "word": " x", "probability": 0.98828125}, {"start": 1992.52, "end": 1992.7, "word": " bar", "probability": 0.8935546875}, {"start": 1992.7, "end": 1993.02, "word": " squared", "probability": 0.67236328125}, {"start": 1993.02, "end": 1993.52, "word": " equal", "probability": 0.4384765625}, {"start": 1993.52, "end": 1993.94, "word": " n1", "probability": 0.6962890625}, {"start": 1993.94, "end": 1994.2, "word": " minus", "probability": 0.98291015625}, {"start": 1994.2, "end": 1994.42, "word": " 1", "probability": 0.85107421875}, {"start": 1994.42, "end": 1994.58, "word": " is", "probability": 0.59326171875}, {"start": 1994.58, "end": 1994.76, "word": " 1", "probability": 0.84423828125}, {"start": 1994.76, "end": 1995.06, "word": " squared.", "probability": 0.82861328125}, {"start": 1998.98, "end": 1999.18, "word": " Yes,", "probability": 0.861328125}, {"start": 1999.42, "end": 1999.68, "word": " because", "probability": 0.89599609375}, {"start": 1999.68, "end": 1999.86, "word": " we", "probability": 0.9609375}, {"start": 1999.86, "end": 1999.94, "word": " are", "probability": 0.9306640625}, {"start": 1999.94, "end": 2000.18, "word": " looking", "probability": 0.9111328125}, {"start": 2000.18, "end": 2000.46, "word": " for", "probability": 0.95166015625}, {"start": 2000.46, "end": 2000.64, "word": " the", "probability": 0.923828125}, {"start": 2000.64, "end": 2001.18, "word": " difference", "probability": 0.86962890625}, {"start": 2001.18, "end": 2003.7, "word": " or", "probability": 0.48681640625}, {"start": 2003.7, "end": 2004.06, "word": " the", "probability": 0.8896484375}, {"start": 2004.06, "end": 2004.42, "word": " variance", "probability": 0.888671875}, {"start": 2004.42, "end": 2004.74, "word": " of", "probability": 0.499755859375}, {"start": 2004.74, "end": 2004.98, "word": " this,", "probability": 0.1944580078125}, {"start": 2005.16, "end": 2005.36, "word": " the", "probability": 0.88037109375}, {"start": 2005.36, "end": 2005.78, "word": " variance", "probability": 0.896484375}, {"start": 2005.78, "end": 2007.06, "word": " for", "probability": 0.8115234375}, {"start": 2007.06, "end": 2007.48, "word": " x1", "probability": 0.916015625}, {"start": 2007.48, "end": 2007.7, "word": " bar", "probability": 0.95361328125}, {"start": 2007.7, "end": 2007.98, "word": " minus", "probability": 0.9873046875}, {"start": 2007.98, "end": 2008.4, "word": " x2", "probability": 0.9619140625}, {"start": 2008.4, "end": 2008.58, "word": " bar.", "probability": 0.939453125}, {"start": 2009.14, "end": 2009.7, "word": " This", "probability": 0.8828125}, {"start": 2009.7, "end": 2010.06, "word": " variance", "probability": 0.8408203125}, {"start": 2010.06, "end": 2010.26, "word": " is", "probability": 0.91015625}, {"start": 2010.26, "end": 2010.54, "word": " variance", "probability": 0.76611328125}, {"start": 2010.54, "end": 2010.74, "word": " of", "probability": 0.9521484375}, {"start": 2010.74, "end": 2011.2, "word": " x1", "probability": 0.98583984375}, {"start": 2011.2, "end": 2011.48, "word": " bar.", "probability": 0.947265625}], "temperature": 1.0}, {"id": 76, "seek": 203957, "start": 2012.83, "end": 2039.57, "text": " plus variance of X bar. So we have to add this value. But we are talking about the difference of the mean standard error of X1 bar minus X bar. Now what's the standard error of X bar? Square root sigma over N or S square over N.", "tokens": [1804, 21977, 295, 1783, 2159, 13, 407, 321, 362, 281, 909, 341, 2158, 13, 583, 321, 366, 1417, 466, 264, 2649, 295, 264, 914, 3832, 6713, 295, 1783, 16, 2159, 3175, 1783, 2159, 13, 823, 437, 311, 264, 3832, 6713, 295, 1783, 2159, 30, 16463, 5593, 12771, 670, 426, 420, 318, 3732, 670, 426, 13], "avg_logprob": -0.24846539167421205, "compression_ratio": 1.5266666666666666, "no_speech_prob": 0.0, "words": [{"start": 2012.83, "end": 2013.27, "word": " plus", "probability": 0.343505859375}, {"start": 2013.27, "end": 2013.83, "word": " variance", "probability": 0.92822265625}, {"start": 2013.83, "end": 2014.37, "word": " of", "probability": 0.955078125}, {"start": 2014.37, "end": 2014.59, "word": " X", "probability": 0.5673828125}, {"start": 2014.59, "end": 2014.77, "word": " bar.", "probability": 0.33349609375}, {"start": 2016.17, "end": 2016.81, "word": " So", "probability": 0.83447265625}, {"start": 2016.81, "end": 2016.91, "word": " we", "probability": 0.693359375}, {"start": 2016.91, "end": 2017.09, "word": " have", "probability": 0.94921875}, {"start": 2017.09, "end": 2017.25, "word": " to", "probability": 0.96044921875}, {"start": 2017.25, "end": 2017.45, "word": " add", "probability": 0.89990234375}, {"start": 2017.45, "end": 2017.71, "word": " this", "probability": 0.78271484375}, {"start": 2017.71, "end": 2017.99, "word": " value.", "probability": 0.1923828125}, {"start": 2020.13, "end": 2020.77, "word": " But", "probability": 0.92578125}, {"start": 2020.77, "end": 2020.91, "word": " we", "probability": 0.9140625}, {"start": 2020.91, "end": 2021.03, "word": " are", "probability": 0.92626953125}, {"start": 2021.03, "end": 2021.37, "word": " talking", "probability": 0.85595703125}, {"start": 2021.37, "end": 2021.87, "word": " about", "probability": 0.90283203125}, {"start": 2021.87, "end": 2022.65, "word": " the", "probability": 0.87060546875}, {"start": 2022.65, "end": 2023.13, "word": " difference", "probability": 0.85888671875}, {"start": 2023.13, "end": 2023.55, "word": " of", "probability": 0.9560546875}, {"start": 2023.55, "end": 2023.93, "word": " the", "probability": 0.92626953125}, {"start": 2023.93, "end": 2025.01, "word": " mean", "probability": 0.95361328125}, {"start": 2025.01, "end": 2026.33, "word": " standard", "probability": 0.5205078125}, {"start": 2026.33, "end": 2026.67, "word": " error", "probability": 0.88427734375}, {"start": 2026.67, "end": 2027.95, "word": " of", "probability": 0.91455078125}, {"start": 2027.95, "end": 2028.41, "word": " X1", "probability": 0.743896484375}, {"start": 2028.41, "end": 2028.63, "word": " bar", "probability": 0.92578125}, {"start": 2028.63, "end": 2028.95, "word": " minus", "probability": 0.97802734375}, {"start": 2028.95, "end": 2029.21, "word": " X", "probability": 0.9892578125}, {"start": 2029.21, "end": 2029.41, "word": " bar.", "probability": 0.52099609375}, {"start": 2030.89, "end": 2031.53, "word": " Now", "probability": 0.94677734375}, {"start": 2031.53, "end": 2031.79, "word": " what's", "probability": 0.746826171875}, {"start": 2031.79, "end": 2031.95, "word": " the", "probability": 0.91748046875}, {"start": 2031.95, "end": 2032.27, "word": " standard", "probability": 0.95263671875}, {"start": 2032.27, "end": 2032.57, "word": " error", "probability": 0.87548828125}, {"start": 2032.57, "end": 2032.81, "word": " of", "probability": 0.96044921875}, {"start": 2032.81, "end": 2032.99, "word": " X", "probability": 0.984375}, {"start": 2032.99, "end": 2033.31, "word": " bar?", "probability": 0.943359375}, {"start": 2035.87, "end": 2036.51, "word": " Square", "probability": 0.6591796875}, {"start": 2036.51, "end": 2036.79, "word": " root", "probability": 0.90185546875}, {"start": 2036.79, "end": 2037.19, "word": " sigma", "probability": 0.495361328125}, {"start": 2037.19, "end": 2037.43, "word": " over", "probability": 0.91845703125}, {"start": 2037.43, "end": 2037.67, "word": " N", "probability": 0.623046875}, {"start": 2037.67, "end": 2038.47, "word": " or", "probability": 0.38134765625}, {"start": 2038.47, "end": 2038.83, "word": " S", "probability": 0.87060546875}, {"start": 2038.83, "end": 2039.15, "word": " square", "probability": 0.51025390625}, {"start": 2039.15, "end": 2039.35, "word": " over", "probability": 0.9150390625}, {"start": 2039.35, "end": 2039.57, "word": " N.", "probability": 0.982421875}], "temperature": 1.0}, {"id": 77, "seek": 206382, "start": 2040.44, "end": 2063.82, "text": " if sigma is unknown we should have s squared but here we are looking for standard error of x1 bar minus x2 bar so this equals and sigmas are unknown so we have s squared b divided by or multiply this case by one", "tokens": [498, 12771, 307, 9841, 321, 820, 362, 262, 8889, 457, 510, 321, 366, 1237, 337, 3832, 6713, 295, 2031, 16, 2159, 3175, 2031, 17, 2159, 370, 341, 6915, 293, 4556, 3799, 366, 9841, 370, 321, 362, 262, 8889, 272, 6666, 538, 420, 12972, 341, 1389, 538, 472], "avg_logprob": -0.17545573196063438, "compression_ratio": 1.5474452554744527, "no_speech_prob": 0.0, "words": [{"start": 2040.44, "end": 2040.72, "word": " if", "probability": 0.55517578125}, {"start": 2040.72, "end": 2040.94, "word": " sigma", "probability": 0.93603515625}, {"start": 2040.94, "end": 2041.12, "word": " is", "probability": 0.921875}, {"start": 2041.12, "end": 2041.42, "word": " unknown", "probability": 0.8935546875}, {"start": 2041.42, "end": 2042.68, "word": " we", "probability": 0.471435546875}, {"start": 2042.68, "end": 2042.98, "word": " should", "probability": 0.95703125}, {"start": 2042.98, "end": 2043.18, "word": " have", "probability": 0.955078125}, {"start": 2043.18, "end": 2043.58, "word": " s", "probability": 0.634765625}, {"start": 2043.58, "end": 2043.94, "word": " squared", "probability": 0.4287109375}, {"start": 2043.94, "end": 2045.7, "word": " but", "probability": 0.587890625}, {"start": 2045.7, "end": 2045.92, "word": " here", "probability": 0.8564453125}, {"start": 2045.92, "end": 2046.02, "word": " we", "probability": 0.96875}, {"start": 2046.02, "end": 2046.2, "word": " are", "probability": 0.9443359375}, {"start": 2046.2, "end": 2046.46, "word": " looking", "probability": 0.91650390625}, {"start": 2046.46, "end": 2046.88, "word": " for", "probability": 0.953125}, {"start": 2046.88, "end": 2047.48, "word": " standard", "probability": 0.88525390625}, {"start": 2047.48, "end": 2047.72, "word": " error", "probability": 0.67041015625}, {"start": 2047.72, "end": 2047.92, "word": " of", "probability": 0.95068359375}, {"start": 2047.92, "end": 2048.34, "word": " x1", "probability": 0.757568359375}, {"start": 2048.34, "end": 2048.54, "word": " bar", "probability": 0.96435546875}, {"start": 2048.54, "end": 2048.8, "word": " minus", "probability": 0.97705078125}, {"start": 2048.8, "end": 2049.3, "word": " x2", "probability": 0.973876953125}, {"start": 2049.3, "end": 2049.5, "word": " bar", "probability": 0.953125}, {"start": 2049.5, "end": 2050.3, "word": " so", "probability": 0.78271484375}, {"start": 2050.3, "end": 2050.56, "word": " this", "probability": 0.951171875}, {"start": 2050.56, "end": 2051.2, "word": " equals", "probability": 0.9541015625}, {"start": 2051.2, "end": 2054.04, "word": " and", "probability": 0.88232421875}, {"start": 2054.04, "end": 2054.4, "word": " sigmas", "probability": 0.74658203125}, {"start": 2054.4, "end": 2054.64, "word": " are", "probability": 0.9443359375}, {"start": 2054.64, "end": 2055.02, "word": " unknown", "probability": 0.9091796875}, {"start": 2055.02, "end": 2056.4, "word": " so", "probability": 0.9287109375}, {"start": 2056.4, "end": 2056.56, "word": " we", "probability": 0.9462890625}, {"start": 2056.56, "end": 2056.82, "word": " have", "probability": 0.9462890625}, {"start": 2056.82, "end": 2057.86, "word": " s", "probability": 0.92578125}, {"start": 2057.86, "end": 2058.32, "word": " squared", "probability": 0.71435546875}, {"start": 2058.32, "end": 2059.46, "word": " b", "probability": 0.77685546875}, {"start": 2059.46, "end": 2061.3, "word": " divided", "probability": 0.66357421875}, {"start": 2061.3, "end": 2061.6, "word": " by", "probability": 0.9736328125}, {"start": 2061.6, "end": 2062.28, "word": " or", "probability": 0.9541015625}, {"start": 2062.28, "end": 2062.66, "word": " multiply", "probability": 0.7822265625}, {"start": 2062.66, "end": 2063.0, "word": " this", "probability": 0.94775390625}, {"start": 2063.0, "end": 2063.26, "word": " case", "probability": 0.9306640625}, {"start": 2063.26, "end": 2063.52, "word": " by", "probability": 0.97314453125}, {"start": 2063.52, "end": 2063.82, "word": " one", "probability": 0.828125}], "temperature": 1.0}, {"id": 78, "seek": 209458, "start": 2066.44, "end": 2094.58, "text": " 1 over N1 plus 1 over N2, because we have two different samples. So now, the standard error of the difference equals square root S square B multiplied by 1 over N1 plus 1 over N2. So now, the T set S6 becomes, again, we have X1 bar minus X2 bar minus the difference between the two population means, divided by this term,", "tokens": [502, 670, 426, 16, 1804, 502, 670, 426, 17, 11, 570, 321, 362, 732, 819, 10938, 13, 407, 586, 11, 264, 3832, 6713, 295, 264, 2649, 6915, 3732, 5593, 318, 3732, 363, 17207, 538, 502, 670, 426, 16, 1804, 502, 670, 426, 17, 13, 407, 586, 11, 264, 314, 992, 318, 21, 3643, 11, 797, 11, 321, 362, 1783, 16, 2159, 3175, 1783, 17, 2159, 3175, 264, 2649, 1296, 264, 732, 4415, 1355, 11, 6666, 538, 341, 1433, 11], "avg_logprob": -0.23398436792194843, "compression_ratio": 1.694736842105263, "no_speech_prob": 0.0, "words": [{"start": 2066.44, "end": 2066.74, "word": " 1", "probability": 0.4814453125}, {"start": 2066.74, "end": 2066.96, "word": " over", "probability": 0.8095703125}, {"start": 2066.96, "end": 2067.28, "word": " N1", "probability": 0.72314453125}, {"start": 2067.28, "end": 2067.58, "word": " plus", "probability": 0.92431640625}, {"start": 2067.58, "end": 2067.8, "word": " 1", "probability": 0.95849609375}, {"start": 2067.8, "end": 2068.02, "word": " over", "probability": 0.91162109375}, {"start": 2068.02, "end": 2068.4, "word": " N2,", "probability": 0.995361328125}, {"start": 2068.76, "end": 2069.22, "word": " because", "probability": 0.8994140625}, {"start": 2069.22, "end": 2069.36, "word": " we", "probability": 0.9580078125}, {"start": 2069.36, "end": 2069.48, "word": " have", "probability": 0.94482421875}, {"start": 2069.48, "end": 2069.7, "word": " two", "probability": 0.8505859375}, {"start": 2069.7, "end": 2070.26, "word": " different", "probability": 0.8837890625}, {"start": 2070.26, "end": 2071.62, "word": " samples.", "probability": 0.488037109375}, {"start": 2072.22, "end": 2072.42, "word": " So", "probability": 0.9453125}, {"start": 2072.42, "end": 2072.68, "word": " now,", "probability": 0.8837890625}, {"start": 2072.94, "end": 2073.2, "word": " the", "probability": 0.90673828125}, {"start": 2073.2, "end": 2074.3, "word": " standard", "probability": 0.92626953125}, {"start": 2074.3, "end": 2074.54, "word": " error", "probability": 0.84814453125}, {"start": 2074.54, "end": 2074.72, "word": " of", "probability": 0.95166015625}, {"start": 2074.72, "end": 2074.84, "word": " the", "probability": 0.9072265625}, {"start": 2074.84, "end": 2075.38, "word": " difference", "probability": 0.8720703125}, {"start": 2075.38, "end": 2076.62, "word": " equals", "probability": 0.84033203125}, {"start": 2076.62, "end": 2076.96, "word": " square", "probability": 0.83935546875}, {"start": 2076.96, "end": 2077.2, "word": " root", "probability": 0.8583984375}, {"start": 2077.2, "end": 2077.62, "word": " S", "probability": 0.340576171875}, {"start": 2077.62, "end": 2077.88, "word": " square", "probability": 0.45751953125}, {"start": 2077.88, "end": 2078.1, "word": " B", "probability": 0.853515625}, {"start": 2078.1, "end": 2078.56, "word": " multiplied", "probability": 0.6611328125}, {"start": 2078.56, "end": 2078.84, "word": " by", "probability": 0.9677734375}, {"start": 2078.84, "end": 2079.04, "word": " 1", "probability": 0.9619140625}, {"start": 2079.04, "end": 2079.24, "word": " over", "probability": 0.90380859375}, {"start": 2079.24, "end": 2079.54, "word": " N1", "probability": 0.995849609375}, {"start": 2079.54, "end": 2079.8, "word": " plus", "probability": 0.94189453125}, {"start": 2079.8, "end": 2080.0, "word": " 1", "probability": 0.98193359375}, {"start": 2080.0, "end": 2080.2, "word": " over", "probability": 0.9150390625}, {"start": 2080.2, "end": 2080.58, "word": " N2.", "probability": 0.996826171875}, {"start": 2081.58, "end": 2081.96, "word": " So", "probability": 0.947265625}, {"start": 2081.96, "end": 2082.26, "word": " now,", "probability": 0.9140625}, {"start": 2082.82, "end": 2083.12, "word": " the", "probability": 0.88232421875}, {"start": 2083.12, "end": 2083.32, "word": " T", "probability": 0.54541015625}, {"start": 2083.32, "end": 2083.54, "word": " set", "probability": 0.3583984375}, {"start": 2083.54, "end": 2083.98, "word": " S6", "probability": 0.60302734375}, {"start": 2083.98, "end": 2084.58, "word": " becomes,", "probability": 0.90625}, {"start": 2085.96, "end": 2086.4, "word": " again,", "probability": 0.951171875}, {"start": 2086.48, "end": 2086.56, "word": " we", "probability": 0.935546875}, {"start": 2086.56, "end": 2086.82, "word": " have", "probability": 0.9404296875}, {"start": 2086.82, "end": 2087.32, "word": " X1", "probability": 0.88427734375}, {"start": 2087.32, "end": 2087.52, "word": " bar", "probability": 0.93310546875}, {"start": 2087.52, "end": 2087.78, "word": " minus", "probability": 0.9833984375}, {"start": 2087.78, "end": 2088.16, "word": " X2", "probability": 0.996826171875}, {"start": 2088.16, "end": 2088.48, "word": " bar", "probability": 0.95263671875}, {"start": 2088.48, "end": 2089.58, "word": " minus", "probability": 0.89501953125}, {"start": 2089.58, "end": 2089.88, "word": " the", "probability": 0.8740234375}, {"start": 2089.88, "end": 2090.5, "word": " difference", "probability": 0.59423828125}, {"start": 2090.5, "end": 2090.94, "word": " between", "probability": 0.86767578125}, {"start": 2090.94, "end": 2091.1, "word": " the", "probability": 0.9091796875}, {"start": 2091.1, "end": 2091.24, "word": " two", "probability": 0.9306640625}, {"start": 2091.24, "end": 2091.64, "word": " population", "probability": 0.8125}, {"start": 2091.64, "end": 2092.04, "word": " means,", "probability": 0.626953125}, {"start": 2092.66, "end": 2092.96, "word": " divided", "probability": 0.7587890625}, {"start": 2092.96, "end": 2093.72, "word": " by", "probability": 0.96728515625}, {"start": 2093.72, "end": 2094.24, "word": " this", "probability": 0.947265625}, {"start": 2094.24, "end": 2094.58, "word": " term,", "probability": 0.404541015625}], "temperature": 1.0}, {"id": 79, "seek": 212461, "start": 2096.11, "end": 2124.61, "text": " represents the standard error of the estimate, this estimate. Generally speaking, any statistic, for example, T is estimate minus hypothesized value divided by the standard error of this estimate.", "tokens": [8855, 264, 3832, 6713, 295, 264, 12539, 11, 341, 12539, 13, 21082, 4124, 11, 604, 29588, 11, 337, 1365, 11, 314, 307, 12539, 3175, 14276, 1602, 2158, 6666, 538, 264, 3832, 6713, 295, 341, 12539, 13], "avg_logprob": -0.25865710103834, "compression_ratio": 1.5887096774193548, "no_speech_prob": 0.0, "words": [{"start": 2096.11, "end": 2096.65, "word": " represents", "probability": 0.407470703125}, {"start": 2096.65, "end": 2097.79, "word": " the", "probability": 0.86767578125}, {"start": 2097.79, "end": 2098.33, "word": " standard", "probability": 0.88427734375}, {"start": 2098.33, "end": 2098.83, "word": " error", "probability": 0.8828125}, {"start": 2098.83, "end": 2099.43, "word": " of", "probability": 0.95263671875}, {"start": 2099.43, "end": 2100.95, "word": " the", "probability": 0.4833984375}, {"start": 2100.95, "end": 2101.59, "word": " estimate,", "probability": 0.8291015625}, {"start": 2101.91, "end": 2102.15, "word": " this", "probability": 0.86767578125}, {"start": 2102.15, "end": 2102.89, "word": " estimate.", "probability": 0.88720703125}, {"start": 2104.99, "end": 2106.07, "word": " Generally", "probability": 0.452880859375}, {"start": 2106.07, "end": 2106.65, "word": " speaking,", "probability": 0.8466796875}, {"start": 2107.89, "end": 2108.13, "word": " any", "probability": 0.86669921875}, {"start": 2108.13, "end": 2108.87, "word": " statistic,", "probability": 0.88720703125}, {"start": 2109.53, "end": 2109.65, "word": " for", "probability": 0.94580078125}, {"start": 2109.65, "end": 2109.99, "word": " example,", "probability": 0.97119140625}, {"start": 2110.15, "end": 2110.27, "word": " T", "probability": 0.529296875}, {"start": 2110.27, "end": 2111.05, "word": " is", "probability": 0.744140625}, {"start": 2111.05, "end": 2111.77, "word": " estimate", "probability": 0.7958984375}, {"start": 2111.77, "end": 2115.15, "word": " minus", "probability": 0.8232421875}, {"start": 2115.15, "end": 2117.77, "word": " hypothesized", "probability": 0.8349609375}, {"start": 2117.77, "end": 2118.19, "word": " value", "probability": 0.97216796875}, {"start": 2118.19, "end": 2121.89, "word": " divided", "probability": 0.51123046875}, {"start": 2121.89, "end": 2122.27, "word": " by", "probability": 0.970703125}, {"start": 2122.27, "end": 2122.69, "word": " the", "probability": 0.9189453125}, {"start": 2122.69, "end": 2123.13, "word": " standard", "probability": 0.93408203125}, {"start": 2123.13, "end": 2123.55, "word": " error", "probability": 0.884765625}, {"start": 2123.55, "end": 2123.85, "word": " of", "probability": 0.96728515625}, {"start": 2123.85, "end": 2124.13, "word": " this", "probability": 0.94873046875}, {"start": 2124.13, "end": 2124.61, "word": " estimate.", "probability": 0.9111328125}], "temperature": 1.0}, {"id": 80, "seek": 215578, "start": 2126.74, "end": 2155.78, "text": " And it is statistical. If we are talking about one sample, in this case, we have only one estimate, so it's X bar minus a hypothesized value, Mu, standard error of X bar, which is S over square root. That's for one sample. Now, for two samples, what should we have?", "tokens": [400, 309, 307, 2219, 42686, 13, 759, 321, 366, 1417, 466, 472, 6889, 11, 294, 341, 1389, 11, 321, 362, 787, 472, 12539, 11, 370, 309, 311, 1783, 2159, 3175, 257, 14276, 1602, 2158, 11, 15601, 11, 3832, 6713, 295, 1783, 2159, 11, 597, 307, 318, 670, 3732, 5593, 13, 663, 311, 337, 472, 6889, 13, 823, 11, 337, 732, 10938, 11, 437, 820, 321, 362, 30], "avg_logprob": -0.21691175988491843, "compression_ratio": 1.453551912568306, "no_speech_prob": 0.0, "words": [{"start": 2126.74, "end": 2127.04, "word": " And", "probability": 0.548828125}, {"start": 2127.04, "end": 2127.18, "word": " it", "probability": 0.90771484375}, {"start": 2127.18, "end": 2127.32, "word": " is", "probability": 0.90966796875}, {"start": 2127.32, "end": 2127.88, "word": " statistical.", "probability": 0.51123046875}, {"start": 2129.0, "end": 2129.58, "word": " If", "probability": 0.9375}, {"start": 2129.58, "end": 2129.72, "word": " we", "probability": 0.88330078125}, {"start": 2129.72, "end": 2129.84, "word": " are", "probability": 0.916015625}, {"start": 2129.84, "end": 2130.2, "word": " talking", "probability": 0.8369140625}, {"start": 2130.2, "end": 2130.56, "word": " about", "probability": 0.90771484375}, {"start": 2130.56, "end": 2130.94, "word": " one", "probability": 0.91015625}, {"start": 2130.94, "end": 2131.28, "word": " sample,", "probability": 0.7841796875}, {"start": 2132.16, "end": 2134.64, "word": " in", "probability": 0.89111328125}, {"start": 2134.64, "end": 2134.88, "word": " this", "probability": 0.9462890625}, {"start": 2134.88, "end": 2135.24, "word": " case,", "probability": 0.91455078125}, {"start": 2135.52, "end": 2135.92, "word": " we", "probability": 0.95361328125}, {"start": 2135.92, "end": 2136.14, "word": " have", "probability": 0.9296875}, {"start": 2136.14, "end": 2136.4, "word": " only", "probability": 0.91259765625}, {"start": 2136.4, "end": 2136.78, "word": " one", "probability": 0.921875}, {"start": 2136.78, "end": 2137.8, "word": " estimate,", "probability": 0.90185546875}, {"start": 2138.0, "end": 2138.0, "word": " so", "probability": 0.91064453125}, {"start": 2138.0, "end": 2138.2, "word": " it's", "probability": 0.922119140625}, {"start": 2138.2, "end": 2138.46, "word": " X", "probability": 0.48486328125}, {"start": 2138.46, "end": 2138.76, "word": " bar", "probability": 0.72265625}, {"start": 2138.76, "end": 2140.82, "word": " minus", "probability": 0.8369140625}, {"start": 2140.82, "end": 2141.1, "word": " a", "probability": 0.74169921875}, {"start": 2141.1, "end": 2141.64, "word": " hypothesized", "probability": 0.78759765625}, {"start": 2141.64, "end": 2141.92, "word": " value,", "probability": 0.974609375}, {"start": 2142.02, "end": 2142.24, "word": " Mu,", "probability": 0.473388671875}, {"start": 2143.3, "end": 2143.94, "word": " standard", "probability": 0.56982421875}, {"start": 2143.94, "end": 2144.4, "word": " error", "probability": 0.90234375}, {"start": 2144.4, "end": 2145.32, "word": " of", "probability": 0.90087890625}, {"start": 2145.32, "end": 2145.56, "word": " X", "probability": 0.9873046875}, {"start": 2145.56, "end": 2145.8, "word": " bar,", "probability": 0.8740234375}, {"start": 2146.08, "end": 2146.92, "word": " which", "probability": 0.9482421875}, {"start": 2146.92, "end": 2147.16, "word": " is", "probability": 0.9453125}, {"start": 2147.16, "end": 2147.48, "word": " S", "probability": 0.923828125}, {"start": 2147.48, "end": 2147.92, "word": " over", "probability": 0.90087890625}, {"start": 2147.92, "end": 2149.54, "word": " square", "probability": 0.67041015625}, {"start": 2149.54, "end": 2149.7, "word": " root.", "probability": 0.93310546875}, {"start": 2150.0, "end": 2150.58, "word": " That's", "probability": 0.93212890625}, {"start": 2150.58, "end": 2150.7, "word": " for", "probability": 0.91162109375}, {"start": 2150.7, "end": 2150.92, "word": " one", "probability": 0.9306640625}, {"start": 2150.92, "end": 2151.24, "word": " sample.", "probability": 0.8759765625}, {"start": 2152.62, "end": 2152.82, "word": " Now,", "probability": 0.85888671875}, {"start": 2152.84, "end": 2153.02, "word": " for", "probability": 0.94873046875}, {"start": 2153.02, "end": 2153.22, "word": " two", "probability": 0.93017578125}, {"start": 2153.22, "end": 2153.7, "word": " samples,", "probability": 0.85009765625}, {"start": 2154.74, "end": 2155.24, "word": " what", "probability": 0.94580078125}, {"start": 2155.24, "end": 2155.4, "word": " should", "probability": 0.96923828125}, {"start": 2155.4, "end": 2155.54, "word": " we", "probability": 0.962890625}, {"start": 2155.54, "end": 2155.78, "word": " have?", "probability": 0.94384765625}], "temperature": 1.0}, {"id": 81, "seek": 217892, "start": 2157.94, "end": 2178.92, "text": " We are talking about two symbols. Now the estimate for the difference. So the difference is x1 bar minus x2 bar minus the hypothesized value under x0. We are assuming mu1 minus mu2 equals 0.", "tokens": [492, 366, 1417, 466, 732, 16944, 13, 823, 264, 12539, 337, 264, 2649, 13, 407, 264, 2649, 307, 2031, 16, 2159, 3175, 2031, 17, 2159, 3175, 264, 14276, 1602, 2158, 833, 2031, 15, 13, 492, 366, 11926, 2992, 16, 3175, 2992, 17, 6915, 1958, 13], "avg_logprob": -0.2547554250644601, "compression_ratio": 1.4148148148148147, "no_speech_prob": 0.0, "words": [{"start": 2157.94, "end": 2158.16, "word": " We", "probability": 0.2978515625}, {"start": 2158.16, "end": 2158.32, "word": " are", "probability": 0.90625}, {"start": 2158.32, "end": 2158.68, "word": " talking", "probability": 0.83203125}, {"start": 2158.68, "end": 2159.24, "word": " about", "probability": 0.9072265625}, {"start": 2159.24, "end": 2160.86, "word": " two", "probability": 0.86669921875}, {"start": 2160.86, "end": 2161.3, "word": " symbols.", "probability": 0.55712890625}, {"start": 2162.14, "end": 2162.26, "word": " Now", "probability": 0.85205078125}, {"start": 2162.26, "end": 2162.44, "word": " the", "probability": 0.509765625}, {"start": 2162.44, "end": 2163.08, "word": " estimate", "probability": 0.8837890625}, {"start": 2163.08, "end": 2164.14, "word": " for", "probability": 0.91015625}, {"start": 2164.14, "end": 2164.32, "word": " the", "probability": 0.91796875}, {"start": 2164.32, "end": 2164.9, "word": " difference.", "probability": 0.8505859375}, {"start": 2165.52, "end": 2165.78, "word": " So", "probability": 0.83935546875}, {"start": 2165.78, "end": 2165.96, "word": " the", "probability": 0.802734375}, {"start": 2165.96, "end": 2166.38, "word": " difference", "probability": 0.8681640625}, {"start": 2166.38, "end": 2166.8, "word": " is", "probability": 0.95361328125}, {"start": 2166.8, "end": 2168.06, "word": " x1", "probability": 0.566650390625}, {"start": 2168.06, "end": 2168.28, "word": " bar", "probability": 0.81689453125}, {"start": 2168.28, "end": 2168.58, "word": " minus", "probability": 0.92333984375}, {"start": 2168.58, "end": 2169.02, "word": " x2", "probability": 0.986572265625}, {"start": 2169.02, "end": 2169.28, "word": " bar", "probability": 0.92529296875}, {"start": 2169.28, "end": 2170.14, "word": " minus", "probability": 0.80615234375}, {"start": 2170.14, "end": 2172.46, "word": " the", "probability": 0.75390625}, {"start": 2172.46, "end": 2173.14, "word": " hypothesized", "probability": 0.76318359375}, {"start": 2173.14, "end": 2173.52, "word": " value", "probability": 0.970703125}, {"start": 2173.52, "end": 2174.58, "word": " under", "probability": 0.49658203125}, {"start": 2174.58, "end": 2175.14, "word": " x0.", "probability": 0.57421875}, {"start": 2175.34, "end": 2175.64, "word": " We", "probability": 0.935546875}, {"start": 2175.64, "end": 2175.9, "word": " are", "probability": 0.93994140625}, {"start": 2175.9, "end": 2176.48, "word": " assuming", "probability": 0.89794921875}, {"start": 2176.48, "end": 2177.56, "word": " mu1", "probability": 0.747314453125}, {"start": 2177.56, "end": 2177.88, "word": " minus", "probability": 0.984375}, {"start": 2177.88, "end": 2178.3, "word": " mu2", "probability": 0.95849609375}, {"start": 2178.3, "end": 2178.62, "word": " equals", "probability": 0.8740234375}, {"start": 2178.62, "end": 2178.92, "word": " 0.", "probability": 0.4755859375}], "temperature": 1.0}, {"id": 82, "seek": 219787, "start": 2179.91, "end": 2197.87, "text": " That's the general case. I mean this special case. Sometimes suppose it's equal A or whatever it is. We have to plug A here. Divide by the standard error of this estimate.", "tokens": [663, 311, 264, 2674, 1389, 13, 286, 914, 341, 2121, 1389, 13, 4803, 7297, 309, 311, 2681, 316, 420, 2035, 309, 307, 13, 492, 362, 281, 5452, 316, 510, 13, 9886, 482, 538, 264, 3832, 6713, 295, 341, 12539, 13], "avg_logprob": -0.2966844454044249, "compression_ratio": 1.3333333333333333, "no_speech_prob": 0.0, "words": [{"start": 2179.91, "end": 2180.49, "word": " That's", "probability": 0.6949462890625}, {"start": 2180.49, "end": 2180.91, "word": " the", "probability": 0.88330078125}, {"start": 2180.91, "end": 2181.73, "word": " general", "probability": 0.86767578125}, {"start": 2181.73, "end": 2182.29, "word": " case.", "probability": 0.9150390625}, {"start": 2183.11, "end": 2183.21, "word": " I", "probability": 0.8994140625}, {"start": 2183.21, "end": 2183.33, "word": " mean", "probability": 0.93310546875}, {"start": 2183.33, "end": 2183.67, "word": " this", "probability": 0.61767578125}, {"start": 2183.67, "end": 2184.47, "word": " special", "probability": 0.9052734375}, {"start": 2184.47, "end": 2184.81, "word": " case.", "probability": 0.9248046875}, {"start": 2185.15, "end": 2185.77, "word": " Sometimes", "probability": 0.625}, {"start": 2185.77, "end": 2186.29, "word": " suppose", "probability": 0.51416015625}, {"start": 2186.29, "end": 2186.49, "word": " it's", "probability": 0.457275390625}, {"start": 2186.49, "end": 2186.85, "word": " equal", "probability": 0.86376953125}, {"start": 2186.85, "end": 2188.19, "word": " A", "probability": 0.325927734375}, {"start": 2188.19, "end": 2188.49, "word": " or", "probability": 0.53125}, {"start": 2188.49, "end": 2188.75, "word": " whatever", "probability": 0.94189453125}, {"start": 2188.75, "end": 2189.05, "word": " it", "probability": 0.53466796875}, {"start": 2189.05, "end": 2189.15, "word": " is.", "probability": 0.9501953125}, {"start": 2189.51, "end": 2189.61, "word": " We", "probability": 0.8662109375}, {"start": 2189.61, "end": 2189.79, "word": " have", "probability": 0.94384765625}, {"start": 2189.79, "end": 2189.95, "word": " to", "probability": 0.9638671875}, {"start": 2189.95, "end": 2190.21, "word": " plug", "probability": 0.91845703125}, {"start": 2190.21, "end": 2190.47, "word": " A", "probability": 0.58984375}, {"start": 2190.47, "end": 2191.11, "word": " here.", "probability": 0.45166015625}, {"start": 2191.99, "end": 2192.71, "word": " Divide", "probability": 0.802734375}, {"start": 2192.71, "end": 2193.07, "word": " by", "probability": 0.958984375}, {"start": 2193.07, "end": 2194.55, "word": " the", "probability": 0.90283203125}, {"start": 2194.55, "end": 2194.97, "word": " standard", "probability": 0.92041015625}, {"start": 2194.97, "end": 2195.35, "word": " error", "probability": 0.896484375}, {"start": 2195.35, "end": 2196.77, "word": " of", "probability": 0.95947265625}, {"start": 2196.77, "end": 2197.17, "word": " this", "probability": 0.94873046875}, {"start": 2197.17, "end": 2197.87, "word": " estimate.", "probability": 0.91162109375}], "temperature": 1.0}, {"id": 83, "seek": 222848, "start": 2199.64, "end": 2228.48, "text": " Now the standard of this estimate equals this one. So we have to divide by S squared B multiplied by 1 over N1 plus 1 over N2. So this is your test statistic. Any question? So again, this is the estimate of the difference between U1 and U2. The other one is the hypothesized value.", "tokens": [823, 264, 3832, 295, 341, 12539, 6915, 341, 472, 13, 407, 321, 362, 281, 9845, 538, 318, 8889, 363, 17207, 538, 502, 670, 426, 16, 1804, 502, 670, 426, 17, 13, 407, 341, 307, 428, 1500, 29588, 13, 2639, 1168, 30, 407, 797, 11, 341, 307, 264, 12539, 295, 264, 2649, 1296, 624, 16, 293, 624, 17, 13, 440, 661, 472, 307, 264, 14276, 1602, 2158, 13], "avg_logprob": -0.15785846179899046, "compression_ratio": 1.492063492063492, "no_speech_prob": 0.0, "words": [{"start": 2199.64, "end": 2199.92, "word": " Now", "probability": 0.8212890625}, {"start": 2199.92, "end": 2200.1, "word": " the", "probability": 0.599609375}, {"start": 2200.1, "end": 2200.38, "word": " standard", "probability": 0.86279296875}, {"start": 2200.38, "end": 2200.6, "word": " of", "probability": 0.91259765625}, {"start": 2200.6, "end": 2200.82, "word": " this", "probability": 0.93603515625}, {"start": 2200.82, "end": 2201.18, "word": " estimate", "probability": 0.88037109375}, {"start": 2201.18, "end": 2201.66, "word": " equals", "probability": 0.8349609375}, {"start": 2201.66, "end": 2201.92, "word": " this", "probability": 0.90234375}, {"start": 2201.92, "end": 2202.16, "word": " one.", "probability": 0.90625}, {"start": 2202.72, "end": 2202.88, "word": " So", "probability": 0.83740234375}, {"start": 2202.88, "end": 2202.96, "word": " we", "probability": 0.73779296875}, {"start": 2202.96, "end": 2203.14, "word": " have", "probability": 0.94140625}, {"start": 2203.14, "end": 2203.38, "word": " to", "probability": 0.96826171875}, {"start": 2203.38, "end": 2204.1, "word": " divide", "probability": 0.92431640625}, {"start": 2204.1, "end": 2204.48, "word": " by", "probability": 0.896484375}, {"start": 2204.48, "end": 2205.26, "word": " S", "probability": 0.55517578125}, {"start": 2205.26, "end": 2205.52, "word": " squared", "probability": 0.61181640625}, {"start": 2205.52, "end": 2205.86, "word": " B", "probability": 0.5703125}, {"start": 2205.86, "end": 2206.92, "word": " multiplied", "probability": 0.56982421875}, {"start": 2206.92, "end": 2207.28, "word": " by", "probability": 0.97119140625}, {"start": 2207.28, "end": 2207.5, "word": " 1", "probability": 0.76611328125}, {"start": 2207.5, "end": 2207.72, "word": " over", "probability": 0.88330078125}, {"start": 2207.72, "end": 2208.02, "word": " N1", "probability": 0.798583984375}, {"start": 2208.02, "end": 2208.28, "word": " plus", "probability": 0.9111328125}, {"start": 2208.28, "end": 2208.52, "word": " 1", "probability": 0.95703125}, {"start": 2208.52, "end": 2208.68, "word": " over", "probability": 0.90771484375}, {"start": 2208.68, "end": 2209.02, "word": " N2.", "probability": 0.870849609375}, {"start": 2209.6, "end": 2209.88, "word": " So", "probability": 0.92529296875}, {"start": 2209.88, "end": 2210.12, "word": " this", "probability": 0.9296875}, {"start": 2210.12, "end": 2210.26, "word": " is", "probability": 0.93603515625}, {"start": 2210.26, "end": 2210.62, "word": " your", "probability": 0.88818359375}, {"start": 2210.62, "end": 2212.18, "word": " test", "probability": 0.78857421875}, {"start": 2212.18, "end": 2213.04, "word": " statistic.", "probability": 0.88037109375}, {"start": 2214.06, "end": 2214.5, "word": " Any", "probability": 0.892578125}, {"start": 2214.5, "end": 2214.82, "word": " question?", "probability": 0.50341796875}, {"start": 2216.3, "end": 2216.86, "word": " So", "probability": 0.93896484375}, {"start": 2216.86, "end": 2217.14, "word": " again,", "probability": 0.90576171875}, {"start": 2217.32, "end": 2217.54, "word": " this", "probability": 0.94140625}, {"start": 2217.54, "end": 2217.62, "word": " is", "probability": 0.94189453125}, {"start": 2217.62, "end": 2217.78, "word": " the", "probability": 0.9189453125}, {"start": 2217.78, "end": 2218.28, "word": " estimate", "probability": 0.916015625}, {"start": 2218.28, "end": 2222.44, "word": " of", "probability": 0.88330078125}, {"start": 2222.44, "end": 2222.58, "word": " the", "probability": 0.9208984375}, {"start": 2222.58, "end": 2223.1, "word": " difference", "probability": 0.8515625}, {"start": 2223.1, "end": 2223.86, "word": " between", "probability": 0.87353515625}, {"start": 2223.86, "end": 2224.32, "word": " U1", "probability": 0.83544921875}, {"start": 2224.32, "end": 2224.48, "word": " and", "probability": 0.94970703125}, {"start": 2224.48, "end": 2224.86, "word": " U2.", "probability": 0.9970703125}, {"start": 2226.3, "end": 2226.86, "word": " The", "probability": 0.84716796875}, {"start": 2226.86, "end": 2227.08, "word": " other", "probability": 0.8876953125}, {"start": 2227.08, "end": 2227.24, "word": " one", "probability": 0.92236328125}, {"start": 2227.24, "end": 2227.38, "word": " is", "probability": 0.94677734375}, {"start": 2227.38, "end": 2227.52, "word": " the", "probability": 0.91796875}, {"start": 2227.52, "end": 2228.14, "word": " hypothesized", "probability": 0.84814453125}, {"start": 2228.14, "end": 2228.48, "word": " value.", "probability": 0.96826171875}], "temperature": 1.0}, {"id": 84, "seek": 225965, "start": 2230.81, "end": 2259.65, "text": " In most cases, this difference is zero. Divide by this amount is the standard error of this estimate. And the standard error is given by square root. It looks like square root of S squared divided by N. But in this case, we have two standard deviations, so S1 squared over N1.", "tokens": [682, 881, 3331, 11, 341, 2649, 307, 4018, 13, 9886, 482, 538, 341, 2372, 307, 264, 3832, 6713, 295, 341, 12539, 13, 400, 264, 3832, 6713, 307, 2212, 538, 3732, 5593, 13, 467, 1542, 411, 3732, 5593, 295, 318, 8889, 6666, 538, 426, 13, 583, 294, 341, 1389, 11, 321, 362, 732, 3832, 31219, 763, 11, 370, 318, 16, 8889, 670, 426, 16, 13], "avg_logprob": -0.20084134615384616, "compression_ratio": 1.582857142857143, "no_speech_prob": 0.0, "words": [{"start": 2230.8100000000004, "end": 2231.55, "word": " In", "probability": 0.333251953125}, {"start": 2231.55, "end": 2231.79, "word": " most", "probability": 0.89501953125}, {"start": 2231.79, "end": 2232.19, "word": " cases,", "probability": 0.9072265625}, {"start": 2232.43, "end": 2232.55, "word": " this", "probability": 0.86474609375}, {"start": 2232.55, "end": 2233.05, "word": " difference", "probability": 0.88427734375}, {"start": 2233.05, "end": 2233.33, "word": " is", "probability": 0.9365234375}, {"start": 2233.33, "end": 2233.63, "word": " zero.", "probability": 0.69775390625}, {"start": 2235.05, "end": 2235.79, "word": " Divide", "probability": 0.756103515625}, {"start": 2235.79, "end": 2236.17, "word": " by", "probability": 0.966796875}, {"start": 2236.17, "end": 2236.93, "word": " this", "probability": 0.9130859375}, {"start": 2236.93, "end": 2237.35, "word": " amount", "probability": 0.88671875}, {"start": 2237.35, "end": 2237.67, "word": " is", "probability": 0.73876953125}, {"start": 2237.67, "end": 2237.91, "word": " the", "probability": 0.8974609375}, {"start": 2237.91, "end": 2238.29, "word": " standard", "probability": 0.91796875}, {"start": 2238.29, "end": 2238.67, "word": " error", "probability": 0.8876953125}, {"start": 2238.67, "end": 2238.93, "word": " of", "probability": 0.9443359375}, {"start": 2238.93, "end": 2239.19, "word": " this", "probability": 0.9404296875}, {"start": 2239.19, "end": 2239.71, "word": " estimate.", "probability": 0.85986328125}, {"start": 2243.21, "end": 2243.95, "word": " And", "probability": 0.77099609375}, {"start": 2243.95, "end": 2244.09, "word": " the", "probability": 0.8681640625}, {"start": 2244.09, "end": 2244.37, "word": " standard", "probability": 0.955078125}, {"start": 2244.37, "end": 2244.55, "word": " error", "probability": 0.72998046875}, {"start": 2244.55, "end": 2244.67, "word": " is", "probability": 0.873046875}, {"start": 2244.67, "end": 2244.83, "word": " given", "probability": 0.88623046875}, {"start": 2244.83, "end": 2245.07, "word": " by", "probability": 0.96337890625}, {"start": 2245.07, "end": 2245.43, "word": " square", "probability": 0.49755859375}, {"start": 2245.43, "end": 2245.85, "word": " root.", "probability": 0.93017578125}, {"start": 2246.99, "end": 2247.35, "word": " It", "probability": 0.8974609375}, {"start": 2247.35, "end": 2247.63, "word": " looks", "probability": 0.84765625}, {"start": 2247.63, "end": 2248.25, "word": " like", "probability": 0.931640625}, {"start": 2248.25, "end": 2249.91, "word": " square", "probability": 0.59716796875}, {"start": 2249.91, "end": 2250.23, "word": " root", "probability": 0.9365234375}, {"start": 2250.23, "end": 2251.57, "word": " of", "probability": 0.95458984375}, {"start": 2251.57, "end": 2251.81, "word": " S", "probability": 0.6396484375}, {"start": 2251.81, "end": 2252.05, "word": " squared", "probability": 0.60595703125}, {"start": 2252.05, "end": 2252.29, "word": " divided", "probability": 0.73974609375}, {"start": 2252.29, "end": 2252.47, "word": " by", "probability": 0.96728515625}, {"start": 2252.47, "end": 2252.75, "word": " N.", "probability": 0.7431640625}, {"start": 2253.29, "end": 2253.67, "word": " But", "probability": 0.92822265625}, {"start": 2253.67, "end": 2253.81, "word": " in", "probability": 0.8994140625}, {"start": 2253.81, "end": 2254.07, "word": " this", "probability": 0.94677734375}, {"start": 2254.07, "end": 2254.55, "word": " case,", "probability": 0.91015625}, {"start": 2255.09, "end": 2255.75, "word": " we", "probability": 0.9501953125}, {"start": 2255.75, "end": 2255.97, "word": " have", "probability": 0.9443359375}, {"start": 2255.97, "end": 2256.25, "word": " two", "probability": 0.9013671875}, {"start": 2256.25, "end": 2256.63, "word": " standard", "probability": 0.9345703125}, {"start": 2256.63, "end": 2257.19, "word": " deviations,", "probability": 0.95458984375}, {"start": 2257.67, "end": 2257.83, "word": " so", "probability": 0.8837890625}, {"start": 2257.83, "end": 2258.25, "word": " S1", "probability": 0.87841796875}, {"start": 2258.25, "end": 2258.67, "word": " squared", "probability": 0.87353515625}, {"start": 2258.67, "end": 2259.21, "word": " over", "probability": 0.91796875}, {"start": 2259.21, "end": 2259.65, "word": " N1.", "probability": 0.994384765625}], "temperature": 1.0}, {"id": 85, "seek": 228728, "start": 2260.24, "end": 2287.28, "text": " plus S2 squared over N2. But we are assuming that both sigmas are known and we assume they are equal. So these two are the same. So factor out. So here we have S squared over N1 plus 1 over 2. And this one is called square root. Any question? Basically, we are going to use", "tokens": [1804, 318, 17, 8889, 670, 426, 17, 13, 583, 321, 366, 11926, 300, 1293, 4556, 3799, 366, 2570, 293, 321, 6552, 436, 366, 2681, 13, 407, 613, 732, 366, 264, 912, 13, 407, 5952, 484, 13, 407, 510, 321, 362, 318, 8889, 670, 426, 16, 1804, 502, 670, 568, 13, 400, 341, 472, 307, 1219, 3732, 5593, 13, 2639, 1168, 30, 8537, 11, 321, 366, 516, 281, 764], "avg_logprob": -0.2511322325554447, "compression_ratio": 1.5657142857142856, "no_speech_prob": 0.0, "words": [{"start": 2260.24, "end": 2260.78, "word": " plus", "probability": 0.1982421875}, {"start": 2260.78, "end": 2261.48, "word": " S2", "probability": 0.605224609375}, {"start": 2261.48, "end": 2261.9, "word": " squared", "probability": 0.81298828125}, {"start": 2261.9, "end": 2262.16, "word": " over", "probability": 0.91845703125}, {"start": 2262.16, "end": 2262.6, "word": " N2.", "probability": 0.6829833984375}, {"start": 2262.9, "end": 2263.34, "word": " But", "probability": 0.9033203125}, {"start": 2263.34, "end": 2263.54, "word": " we", "probability": 0.85595703125}, {"start": 2263.54, "end": 2263.7, "word": " are", "probability": 0.9267578125}, {"start": 2263.7, "end": 2264.16, "word": " assuming", "probability": 0.90283203125}, {"start": 2264.16, "end": 2264.6, "word": " that", "probability": 0.9208984375}, {"start": 2264.6, "end": 2265.14, "word": " both", "probability": 0.45654296875}, {"start": 2265.14, "end": 2265.6, "word": " sigmas", "probability": 0.56573486328125}, {"start": 2265.6, "end": 2265.86, "word": " are", "probability": 0.86572265625}, {"start": 2265.86, "end": 2266.16, "word": " known", "probability": 0.58251953125}, {"start": 2266.16, "end": 2266.4, "word": " and", "probability": 0.71044921875}, {"start": 2266.4, "end": 2266.54, "word": " we", "probability": 0.91650390625}, {"start": 2266.54, "end": 2266.94, "word": " assume", "probability": 0.8603515625}, {"start": 2266.94, "end": 2267.12, "word": " they", "probability": 0.845703125}, {"start": 2267.12, "end": 2267.28, "word": " are", "probability": 0.9296875}, {"start": 2267.28, "end": 2267.62, "word": " equal.", "probability": 0.90576171875}, {"start": 2268.04, "end": 2268.24, "word": " So", "probability": 0.935546875}, {"start": 2268.24, "end": 2268.58, "word": " these", "probability": 0.75439453125}, {"start": 2268.58, "end": 2269.12, "word": " two", "probability": 0.9140625}, {"start": 2269.12, "end": 2269.38, "word": " are", "probability": 0.94287109375}, {"start": 2269.38, "end": 2269.58, "word": " the", "probability": 0.919921875}, {"start": 2269.58, "end": 2269.88, "word": " same.", "probability": 0.91064453125}, {"start": 2271.06, "end": 2271.24, "word": " So", "probability": 0.9228515625}, {"start": 2271.24, "end": 2271.58, "word": " factor", "probability": 0.74755859375}, {"start": 2271.58, "end": 2271.96, "word": " out.", "probability": 0.8623046875}, {"start": 2273.44, "end": 2274.04, "word": " So", "probability": 0.9365234375}, {"start": 2274.04, "end": 2274.24, "word": " here", "probability": 0.83837890625}, {"start": 2274.24, "end": 2274.38, "word": " we", "probability": 0.90380859375}, {"start": 2274.38, "end": 2274.6, "word": " have", "probability": 0.943359375}, {"start": 2274.6, "end": 2274.9, "word": " S", "probability": 0.95166015625}, {"start": 2274.9, "end": 2275.4, "word": " squared", "probability": 0.79931640625}, {"start": 2275.4, "end": 2277.5, "word": " over", "probability": 0.260009765625}, {"start": 2277.5, "end": 2278.42, "word": " N1", "probability": 0.950439453125}, {"start": 2278.42, "end": 2278.68, "word": " plus", "probability": 0.9345703125}, {"start": 2278.68, "end": 2278.88, "word": " 1", "probability": 0.75390625}, {"start": 2278.88, "end": 2279.0, "word": " over", "probability": 0.88818359375}, {"start": 2279.0, "end": 2279.3, "word": " 2.", "probability": 0.84423828125}, {"start": 2279.64, "end": 2279.84, "word": " And", "probability": 0.91552734375}, {"start": 2279.84, "end": 2279.98, "word": " this", "probability": 0.9482421875}, {"start": 2279.98, "end": 2280.12, "word": " one", "probability": 0.9033203125}, {"start": 2280.12, "end": 2280.28, "word": " is", "probability": 0.88623046875}, {"start": 2280.28, "end": 2280.44, "word": " called", "probability": 0.81591796875}, {"start": 2280.44, "end": 2280.8, "word": " square", "probability": 0.62255859375}, {"start": 2280.8, "end": 2280.9, "word": " root.", "probability": 0.89404296875}, {"start": 2282.5, "end": 2283.1, "word": " Any", "probability": 0.8916015625}, {"start": 2283.1, "end": 2283.42, "word": " question?", "probability": 0.6279296875}, {"start": 2285.54, "end": 2286.14, "word": " Basically,", "probability": 0.9111328125}, {"start": 2286.16, "end": 2286.36, "word": " we", "probability": 0.9599609375}, {"start": 2286.36, "end": 2286.5, "word": " are", "probability": 0.93896484375}, {"start": 2286.5, "end": 2286.76, "word": " going", "probability": 0.94580078125}, {"start": 2286.76, "end": 2286.92, "word": " to", "probability": 0.96826171875}, {"start": 2286.92, "end": 2287.28, "word": " use", "probability": 0.87646484375}], "temperature": 1.0}, {"id": 86, "seek": 231380, "start": 2287.94, "end": 2313.8, "text": " this statistic and the formula will be given either the whole variance equation or the other one. Now what about the confidence interval?", "tokens": [341, 29588, 293, 264, 8513, 486, 312, 2212, 2139, 264, 1379, 21977, 5367, 420, 264, 661, 472, 13, 823, 437, 466, 264, 6687, 15035, 30], "avg_logprob": -0.3497596257008039, "compression_ratio": 1.3142857142857143, "no_speech_prob": 0.0, "words": [{"start": 2287.94, "end": 2288.5, "word": " this", "probability": 0.17431640625}, {"start": 2288.5, "end": 2289.18, "word": " statistic", "probability": 0.611328125}, {"start": 2289.18, "end": 2289.66, "word": " and", "probability": 0.7705078125}, {"start": 2289.66, "end": 2290.04, "word": " the", "probability": 0.7705078125}, {"start": 2290.04, "end": 2290.42, "word": " formula", "probability": 0.76806640625}, {"start": 2290.42, "end": 2291.04, "word": " will", "probability": 0.87939453125}, {"start": 2291.04, "end": 2291.34, "word": " be", "probability": 0.94189453125}, {"start": 2291.34, "end": 2291.7, "word": " given", "probability": 0.89404296875}, {"start": 2291.7, "end": 2292.3, "word": " either", "probability": 0.6845703125}, {"start": 2292.3, "end": 2293.16, "word": " the", "probability": 0.7587890625}, {"start": 2293.16, "end": 2293.42, "word": " whole", "probability": 0.1141357421875}, {"start": 2293.42, "end": 2294.1, "word": " variance", "probability": 0.9169921875}, {"start": 2294.1, "end": 2295.06, "word": " equation", "probability": 0.94775390625}, {"start": 2295.06, "end": 2295.34, "word": " or", "probability": 0.92822265625}, {"start": 2295.34, "end": 2295.46, "word": " the", "probability": 0.86376953125}, {"start": 2295.46, "end": 2295.66, "word": " other", "probability": 0.892578125}, {"start": 2295.66, "end": 2300.36, "word": " one.", "probability": 0.853515625}, {"start": 2312.24, "end": 2312.4, "word": " Now", "probability": 0.80029296875}, {"start": 2312.4, "end": 2312.54, "word": " what", "probability": 0.61328125}, {"start": 2312.54, "end": 2312.72, "word": " about", "probability": 0.49853515625}, {"start": 2312.72, "end": 2312.94, "word": " the", "probability": 0.85498046875}, {"start": 2312.94, "end": 2313.36, "word": " confidence", "probability": 0.97021484375}, {"start": 2313.36, "end": 2313.8, "word": " interval?", "probability": 0.96435546875}], "temperature": 1.0}, {"id": 87, "seek": 234303, "start": 2315.03, "end": 2343.03, "text": " As we mentioned before, any confidence interval can be constructed by using general form, which is estimate, I mean the point estimate, any confidence interval. Estimate, plus or minus critical value times standard error of your estimate. That's in general.", "tokens": [1018, 321, 2835, 949, 11, 604, 6687, 15035, 393, 312, 17083, 538, 1228, 2674, 1254, 11, 597, 307, 12539, 11, 286, 914, 264, 935, 12539, 11, 604, 6687, 15035, 13, 4410, 2905, 11, 1804, 420, 3175, 4924, 2158, 1413, 3832, 6713, 295, 428, 12539, 13, 663, 311, 294, 2674, 13], "avg_logprob": -0.22273284255289563, "compression_ratio": 1.6024844720496894, "no_speech_prob": 0.0, "words": [{"start": 2315.03, "end": 2315.37, "word": " As", "probability": 0.74951171875}, {"start": 2315.37, "end": 2315.53, "word": " we", "probability": 0.9150390625}, {"start": 2315.53, "end": 2315.83, "word": " mentioned", "probability": 0.77490234375}, {"start": 2315.83, "end": 2316.35, "word": " before,", "probability": 0.859375}, {"start": 2316.57, "end": 2316.75, "word": " any", "probability": 0.87353515625}, {"start": 2316.75, "end": 2317.27, "word": " confidence", "probability": 0.9658203125}, {"start": 2317.27, "end": 2317.67, "word": " interval", "probability": 0.96923828125}, {"start": 2317.67, "end": 2318.03, "word": " can", "probability": 0.9267578125}, {"start": 2318.03, "end": 2318.35, "word": " be", "probability": 0.95703125}, {"start": 2318.35, "end": 2319.23, "word": " constructed", "probability": 0.91357421875}, {"start": 2319.23, "end": 2319.57, "word": " by", "probability": 0.9345703125}, {"start": 2319.57, "end": 2320.01, "word": " using", "probability": 0.91748046875}, {"start": 2320.01, "end": 2320.93, "word": " general", "probability": 0.5615234375}, {"start": 2320.93, "end": 2321.39, "word": " form,", "probability": 0.8759765625}, {"start": 2321.81, "end": 2322.09, "word": " which", "probability": 0.9345703125}, {"start": 2322.09, "end": 2322.35, "word": " is", "probability": 0.923828125}, {"start": 2322.35, "end": 2323.03, "word": " estimate,", "probability": 0.28125}, {"start": 2324.45, "end": 2324.69, "word": " I", "probability": 0.88525390625}, {"start": 2324.69, "end": 2324.83, "word": " mean", "probability": 0.96826171875}, {"start": 2324.83, "end": 2325.01, "word": " the", "probability": 0.6455078125}, {"start": 2325.01, "end": 2325.25, "word": " point", "probability": 0.9658203125}, {"start": 2325.25, "end": 2325.81, "word": " estimate,", "probability": 0.90576171875}, {"start": 2327.03, "end": 2327.35, "word": " any", "probability": 0.564453125}, {"start": 2327.35, "end": 2327.93, "word": " confidence", "probability": 0.97998046875}, {"start": 2327.93, "end": 2328.39, "word": " interval.", "probability": 0.95849609375}, {"start": 2329.73, "end": 2330.35, "word": " Estimate,", "probability": 0.893310546875}, {"start": 2331.17, "end": 2332.35, "word": " plus", "probability": 0.93505859375}, {"start": 2332.35, "end": 2332.63, "word": " or", "probability": 0.95068359375}, {"start": 2332.63, "end": 2333.07, "word": " minus", "probability": 0.98974609375}, {"start": 2333.07, "end": 2335.23, "word": " critical", "probability": 0.492431640625}, {"start": 2335.23, "end": 2335.87, "word": " value", "probability": 0.97265625}, {"start": 2335.87, "end": 2338.73, "word": " times", "probability": 0.60546875}, {"start": 2338.73, "end": 2339.57, "word": " standard", "probability": 0.82080078125}, {"start": 2339.57, "end": 2339.93, "word": " error", "probability": 0.91064453125}, {"start": 2339.93, "end": 2340.37, "word": " of", "probability": 0.96240234375}, {"start": 2340.37, "end": 2340.55, "word": " your", "probability": 0.876953125}, {"start": 2340.55, "end": 2340.99, "word": " estimate.", "probability": 0.92041015625}, {"start": 2342.05, "end": 2342.63, "word": " That's", "probability": 0.895263671875}, {"start": 2342.63, "end": 2342.77, "word": " in", "probability": 0.90185546875}, {"start": 2342.77, "end": 2343.03, "word": " general.", "probability": 0.91455078125}], "temperature": 1.0}, {"id": 88, "seek": 237378, "start": 2345.3, "end": 2373.78, "text": " estimate or point estimate plus or minus critical value times the standard error of your estimate before we had talked about confidence interval for mu so in that case we have x bar plus or minus t then standard error of this estimate which is s over root n that's before", "tokens": [12539, 420, 935, 12539, 1804, 420, 3175, 4924, 2158, 1413, 264, 3832, 6713, 295, 428, 12539, 949, 321, 632, 2825, 466, 6687, 15035, 337, 2992, 370, 294, 300, 1389, 321, 362, 2031, 2159, 1804, 420, 3175, 256, 550, 3832, 6713, 295, 341, 12539, 597, 307, 262, 670, 5593, 297, 300, 311, 949], "avg_logprob": -0.20459906222685328, "compression_ratio": 1.7777777777777777, "no_speech_prob": 0.0, "words": [{"start": 2345.3, "end": 2345.82, "word": " estimate", "probability": 0.4970703125}, {"start": 2345.82, "end": 2346.04, "word": " or", "probability": 0.533203125}, {"start": 2346.04, "end": 2346.28, "word": " point", "probability": 0.953125}, {"start": 2346.28, "end": 2346.78, "word": " estimate", "probability": 0.87548828125}, {"start": 2346.78, "end": 2347.94, "word": " plus", "probability": 0.498779296875}, {"start": 2347.94, "end": 2348.22, "word": " or", "probability": 0.92041015625}, {"start": 2348.22, "end": 2348.46, "word": " minus", "probability": 0.984375}, {"start": 2348.46, "end": 2348.9, "word": " critical", "probability": 0.7001953125}, {"start": 2348.9, "end": 2349.48, "word": " value", "probability": 0.9755859375}, {"start": 2349.48, "end": 2350.58, "word": " times", "probability": 0.8544921875}, {"start": 2350.58, "end": 2350.84, "word": " the", "probability": 0.76611328125}, {"start": 2350.84, "end": 2351.16, "word": " standard", "probability": 0.919921875}, {"start": 2351.16, "end": 2351.5, "word": " error", "probability": 0.86474609375}, {"start": 2351.5, "end": 2351.68, "word": " of", "probability": 0.83740234375}, {"start": 2351.68, "end": 2351.8, "word": " your", "probability": 0.873046875}, {"start": 2351.8, "end": 2352.24, "word": " estimate", "probability": 0.8896484375}, {"start": 2352.24, "end": 2354.78, "word": " before", "probability": 0.240966796875}, {"start": 2354.78, "end": 2355.12, "word": " we", "probability": 0.904296875}, {"start": 2355.12, "end": 2355.4, "word": " had", "probability": 0.86572265625}, {"start": 2355.4, "end": 2355.66, "word": " talked", "probability": 0.89208984375}, {"start": 2355.66, "end": 2356.14, "word": " about", "probability": 0.90771484375}, {"start": 2356.14, "end": 2358.28, "word": " confidence", "probability": 0.93408203125}, {"start": 2358.28, "end": 2358.82, "word": " interval", "probability": 0.95654296875}, {"start": 2358.82, "end": 2359.12, "word": " for", "probability": 0.947265625}, {"start": 2359.12, "end": 2359.4, "word": " mu", "probability": 0.413818359375}, {"start": 2359.4, "end": 2361.02, "word": " so", "probability": 0.61962890625}, {"start": 2361.02, "end": 2361.34, "word": " in", "probability": 0.91259765625}, {"start": 2361.34, "end": 2361.62, "word": " that", "probability": 0.9365234375}, {"start": 2361.62, "end": 2362.12, "word": " case", "probability": 0.9150390625}, {"start": 2362.12, "end": 2362.64, "word": " we", "probability": 0.91748046875}, {"start": 2362.64, "end": 2363.1, "word": " have", "probability": 0.95068359375}, {"start": 2363.1, "end": 2363.68, "word": " x", "probability": 0.916015625}, {"start": 2363.68, "end": 2363.98, "word": " bar", "probability": 0.81396484375}, {"start": 2363.98, "end": 2364.48, "word": " plus", "probability": 0.95751953125}, {"start": 2364.48, "end": 2364.72, "word": " or", "probability": 0.9501953125}, {"start": 2364.72, "end": 2365.04, "word": " minus", "probability": 0.98583984375}, {"start": 2365.04, "end": 2365.46, "word": " t", "probability": 0.66943359375}, {"start": 2365.46, "end": 2366.62, "word": " then", "probability": 0.666015625}, {"start": 2366.62, "end": 2367.4, "word": " standard", "probability": 0.74951171875}, {"start": 2367.4, "end": 2367.78, "word": " error", "probability": 0.90966796875}, {"start": 2367.78, "end": 2368.06, "word": " of", "probability": 0.96533203125}, {"start": 2368.06, "end": 2368.34, "word": " this", "probability": 0.94921875}, {"start": 2368.34, "end": 2369.0, "word": " estimate", "probability": 0.89306640625}, {"start": 2369.0, "end": 2370.2, "word": " which", "probability": 0.87548828125}, {"start": 2370.2, "end": 2370.34, "word": " is", "probability": 0.951171875}, {"start": 2370.34, "end": 2370.6, "word": " s", "probability": 0.59716796875}, {"start": 2370.6, "end": 2370.88, "word": " over", "probability": 0.90478515625}, {"start": 2370.88, "end": 2371.14, "word": " root", "probability": 0.9404296875}, {"start": 2371.14, "end": 2371.42, "word": " n", "probability": 0.875}, {"start": 2371.42, "end": 2373.48, "word": " that's", "probability": 0.8720703125}, {"start": 2373.48, "end": 2373.78, "word": " before", "probability": 0.85400390625}], "temperature": 1.0}, {"id": 89, "seek": 240052, "start": 2375.22, "end": 2400.52, "text": " Now we are talking about confidence interval for mu1 minus mu2. Now my point estimate of this difference is x1 bar minus x2 bar, plus or minus. Critical value is T alpha over 2.", "tokens": [823, 321, 366, 1417, 466, 6687, 15035, 337, 2992, 16, 3175, 2992, 17, 13, 823, 452, 935, 12539, 295, 341, 2649, 307, 2031, 16, 2159, 3175, 2031, 17, 2159, 11, 1804, 420, 3175, 13, 39482, 2158, 307, 314, 8961, 670, 568, 13], "avg_logprob": -0.2014898283537044, "compression_ratio": 1.328358208955224, "no_speech_prob": 0.0, "words": [{"start": 2375.22, "end": 2375.5, "word": " Now", "probability": 0.62548828125}, {"start": 2375.5, "end": 2375.66, "word": " we", "probability": 0.74267578125}, {"start": 2375.66, "end": 2375.78, "word": " are", "probability": 0.939453125}, {"start": 2375.78, "end": 2376.12, "word": " talking", "probability": 0.84228515625}, {"start": 2376.12, "end": 2376.66, "word": " about", "probability": 0.91650390625}, {"start": 2376.66, "end": 2377.52, "word": " confidence", "probability": 0.728515625}, {"start": 2377.52, "end": 2379.82, "word": " interval", "probability": 0.91162109375}, {"start": 2379.82, "end": 2382.54, "word": " for", "probability": 0.88037109375}, {"start": 2382.54, "end": 2384.42, "word": " mu1", "probability": 0.5843505859375}, {"start": 2384.42, "end": 2384.78, "word": " minus", "probability": 0.8984375}, {"start": 2384.78, "end": 2385.8, "word": " mu2.", "probability": 0.8544921875}, {"start": 2386.7, "end": 2387.36, "word": " Now", "probability": 0.85888671875}, {"start": 2387.36, "end": 2387.7, "word": " my", "probability": 0.7412109375}, {"start": 2387.7, "end": 2388.08, "word": " point", "probability": 0.982421875}, {"start": 2388.08, "end": 2388.56, "word": " estimate", "probability": 0.919921875}, {"start": 2388.56, "end": 2389.06, "word": " of", "probability": 0.9609375}, {"start": 2389.06, "end": 2389.34, "word": " this", "probability": 0.94970703125}, {"start": 2389.34, "end": 2390.0, "word": " difference", "probability": 0.875}, {"start": 2390.0, "end": 2390.6, "word": " is", "probability": 0.95068359375}, {"start": 2390.6, "end": 2392.1, "word": " x1", "probability": 0.8701171875}, {"start": 2392.1, "end": 2392.32, "word": " bar", "probability": 0.939453125}, {"start": 2392.32, "end": 2392.66, "word": " minus", "probability": 0.98388671875}, {"start": 2392.66, "end": 2393.06, "word": " x2", "probability": 0.992431640625}, {"start": 2393.06, "end": 2393.36, "word": " bar,", "probability": 0.9521484375}, {"start": 2394.08, "end": 2395.36, "word": " plus", "probability": 0.95703125}, {"start": 2395.36, "end": 2395.58, "word": " or", "probability": 0.958984375}, {"start": 2395.58, "end": 2396.0, "word": " minus.", "probability": 0.98876953125}, {"start": 2397.98, "end": 2398.7, "word": " Critical", "probability": 0.58544921875}, {"start": 2398.7, "end": 2399.22, "word": " value", "probability": 0.96875}, {"start": 2399.22, "end": 2399.6, "word": " is", "probability": 0.92919921875}, {"start": 2399.6, "end": 2399.76, "word": " T", "probability": 0.59521484375}, {"start": 2399.76, "end": 2400.02, "word": " alpha", "probability": 0.791015625}, {"start": 2400.02, "end": 2400.24, "word": " over", "probability": 0.81201171875}, {"start": 2400.24, "end": 2400.52, "word": " 2.", "probability": 0.406005859375}], "temperature": 1.0}, {"id": 90, "seek": 242383, "start": 2403.13, "end": 2423.83, "text": " since sigma's are unknown times the standard error of the estimate this value square root one over n one plus one over n one this is your confidence interval by the way this statistic", "tokens": [1670, 12771, 311, 366, 9841, 1413, 264, 3832, 6713, 295, 264, 12539, 341, 2158, 3732, 5593, 472, 670, 297, 472, 1804, 472, 670, 297, 472, 341, 307, 428, 6687, 15035, 538, 264, 636, 341, 29588], "avg_logprob": -0.3967013996508386, "compression_ratio": 1.5206611570247934, "no_speech_prob": 0.0, "words": [{"start": 2403.13, "end": 2403.67, "word": " since", "probability": 0.125732421875}, {"start": 2403.67, "end": 2404.23, "word": " sigma's", "probability": 0.30419921875}, {"start": 2404.23, "end": 2404.41, "word": " are", "probability": 0.9072265625}, {"start": 2404.41, "end": 2404.75, "word": " unknown", "probability": 0.82861328125}, {"start": 2404.75, "end": 2406.15, "word": " times", "probability": 0.4267578125}, {"start": 2406.15, "end": 2406.65, "word": " the", "probability": 0.818359375}, {"start": 2406.65, "end": 2407.13, "word": " standard", "probability": 0.876953125}, {"start": 2407.13, "end": 2407.39, "word": " error", "probability": 0.865234375}, {"start": 2407.39, "end": 2407.55, "word": " of", "probability": 0.921875}, {"start": 2407.55, "end": 2407.67, "word": " the", "probability": 0.88818359375}, {"start": 2407.67, "end": 2408.31, "word": " estimate", "probability": 0.8681640625}, {"start": 2408.31, "end": 2409.75, "word": " this", "probability": 0.473876953125}, {"start": 2409.75, "end": 2410.15, "word": " value", "probability": 0.8271484375}, {"start": 2410.15, "end": 2411.01, "word": " square", "probability": 0.305908203125}, {"start": 2411.01, "end": 2411.33, "word": " root", "probability": 0.9326171875}, {"start": 2411.33, "end": 2414.21, "word": " one", "probability": 0.314208984375}, {"start": 2414.21, "end": 2414.47, "word": " over", "probability": 0.9296875}, {"start": 2414.47, "end": 2414.67, "word": " n", "probability": 0.48486328125}, {"start": 2414.67, "end": 2414.91, "word": " one", "probability": 0.8134765625}, {"start": 2414.91, "end": 2415.31, "word": " plus", "probability": 0.783203125}, {"start": 2415.31, "end": 2415.51, "word": " one", "probability": 0.91748046875}, {"start": 2415.51, "end": 2415.65, "word": " over", "probability": 0.89892578125}, {"start": 2415.65, "end": 2415.87, "word": " n", "probability": 0.339111328125}, {"start": 2415.87, "end": 2416.87, "word": " one", "probability": 0.7568359375}, {"start": 2416.87, "end": 2419.69, "word": " this", "probability": 0.587890625}, {"start": 2419.69, "end": 2419.89, "word": " is", "probability": 0.93408203125}, {"start": 2419.89, "end": 2420.07, "word": " your", "probability": 0.8759765625}, {"start": 2420.07, "end": 2420.65, "word": " confidence", "probability": 0.9775390625}, {"start": 2420.65, "end": 2421.75, "word": " interval", "probability": 0.9609375}, {"start": 2421.75, "end": 2422.35, "word": " by", "probability": 0.77734375}, {"start": 2422.35, "end": 2422.49, "word": " the", "probability": 0.92822265625}, {"start": 2422.49, "end": 2422.73, "word": " way", "probability": 0.96044921875}, {"start": 2422.73, "end": 2423.23, "word": " this", "probability": 0.81396484375}, {"start": 2423.23, "end": 2423.83, "word": " statistic", "probability": 0.71484375}], "temperature": 1.0}, {"id": 91, "seek": 244525, "start": 2425.21, "end": 2445.25, "text": " as T distribution with degrees of freedom equals N1 plus N2 minus 2. Because for one population, when we have one sample, your degrees of freedom is N minus 1. If we have two populations and we selected two random samples, your degrees of freedom", "tokens": [382, 314, 7316, 365, 5310, 295, 5645, 6915, 426, 16, 1804, 426, 17, 3175, 568, 13, 1436, 337, 472, 4415, 11, 562, 321, 362, 472, 6889, 11, 428, 5310, 295, 5645, 307, 426, 3175, 502, 13, 759, 321, 362, 732, 12822, 293, 321, 8209, 732, 4974, 10938, 11, 428, 5310, 295, 5645], "avg_logprob": -0.17452829513909682, "compression_ratio": 1.657718120805369, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 2425.21, "end": 2425.71, "word": " as", "probability": 0.350830078125}, {"start": 2425.71, "end": 2426.39, "word": " T", "probability": 0.658203125}, {"start": 2426.39, "end": 2427.17, "word": " distribution", "probability": 0.7646484375}, {"start": 2427.17, "end": 2428.31, "word": " with", "probability": 0.85595703125}, {"start": 2428.31, "end": 2428.61, "word": " degrees", "probability": 0.93359375}, {"start": 2428.61, "end": 2428.83, "word": " of", "probability": 0.970703125}, {"start": 2428.83, "end": 2429.17, "word": " freedom", "probability": 0.9296875}, {"start": 2429.17, "end": 2429.75, "word": " equals", "probability": 0.92431640625}, {"start": 2429.75, "end": 2430.91, "word": " N1", "probability": 0.728759765625}, {"start": 2430.91, "end": 2431.21, "word": " plus", "probability": 0.91845703125}, {"start": 2431.21, "end": 2431.53, "word": " N2", "probability": 0.9912109375}, {"start": 2431.53, "end": 2431.77, "word": " minus", "probability": 0.98046875}, {"start": 2431.77, "end": 2432.05, "word": " 2.", "probability": 0.78076171875}, {"start": 2432.39, "end": 2432.93, "word": " Because", "probability": 0.90283203125}, {"start": 2432.93, "end": 2433.39, "word": " for", "probability": 0.84228515625}, {"start": 2433.39, "end": 2433.63, "word": " one", "probability": 0.90087890625}, {"start": 2433.63, "end": 2434.19, "word": " population,", "probability": 0.93505859375}, {"start": 2434.61, "end": 2435.29, "word": " when", "probability": 0.767578125}, {"start": 2435.29, "end": 2435.41, "word": " we", "probability": 0.75048828125}, {"start": 2435.41, "end": 2435.55, "word": " have", "probability": 0.94384765625}, {"start": 2435.55, "end": 2435.83, "word": " one", "probability": 0.92822265625}, {"start": 2435.83, "end": 2436.19, "word": " sample,", "probability": 0.865234375}, {"start": 2436.93, "end": 2437.15, "word": " your", "probability": 0.76708984375}, {"start": 2437.15, "end": 2437.43, "word": " degrees", "probability": 0.9375}, {"start": 2437.43, "end": 2437.61, "word": " of", "probability": 0.96923828125}, {"start": 2437.61, "end": 2437.83, "word": " freedom", "probability": 0.935546875}, {"start": 2437.83, "end": 2438.07, "word": " is", "probability": 0.9423828125}, {"start": 2438.07, "end": 2438.21, "word": " N", "probability": 0.97900390625}, {"start": 2438.21, "end": 2438.47, "word": " minus", "probability": 0.97705078125}, {"start": 2438.47, "end": 2438.85, "word": " 1.", "probability": 0.8095703125}, {"start": 2439.79, "end": 2440.07, "word": " If", "probability": 0.9541015625}, {"start": 2440.07, "end": 2440.23, "word": " we", "probability": 0.9521484375}, {"start": 2440.23, "end": 2440.43, "word": " have", "probability": 0.94287109375}, {"start": 2440.43, "end": 2440.67, "word": " two", "probability": 0.89794921875}, {"start": 2440.67, "end": 2441.31, "word": " populations", "probability": 0.6591796875}, {"start": 2441.31, "end": 2441.57, "word": " and", "probability": 0.72265625}, {"start": 2441.57, "end": 2441.73, "word": " we", "probability": 0.900390625}, {"start": 2441.73, "end": 2442.13, "word": " selected", "probability": 0.69140625}, {"start": 2442.13, "end": 2442.39, "word": " two", "probability": 0.91064453125}, {"start": 2442.39, "end": 2442.79, "word": " random", "probability": 0.80517578125}, {"start": 2442.79, "end": 2443.91, "word": " samples,", "probability": 0.92333984375}, {"start": 2444.39, "end": 2444.53, "word": " your", "probability": 0.64306640625}, {"start": 2444.53, "end": 2444.79, "word": " degrees", "probability": 0.94580078125}, {"start": 2444.79, "end": 2444.95, "word": " of", "probability": 0.966796875}, {"start": 2444.95, "end": 2445.25, "word": " freedom", "probability": 0.93115234375}], "temperature": 1.0}, {"id": 92, "seek": 247334, "start": 2446.7, "end": 2473.34, "text": " Is n1 plus minus 1 plus n2 minus 1. So at least should be n1 plus n2 minus 2. So this statistic has T distribution with degrees of freedom n1 plus n2 minus 2. This is only if we assume variances unknown, but they are equal. In this case, your degrees of freedom, n1 plus n2 minus 2.", "tokens": [1119, 297, 16, 1804, 3175, 502, 1804, 297, 17, 3175, 502, 13, 407, 412, 1935, 820, 312, 297, 16, 1804, 297, 17, 3175, 568, 13, 407, 341, 29588, 575, 314, 7316, 365, 5310, 295, 5645, 297, 16, 1804, 297, 17, 3175, 568, 13, 639, 307, 787, 498, 321, 6552, 1374, 21518, 9841, 11, 457, 436, 366, 2681, 13, 682, 341, 1389, 11, 428, 5310, 295, 5645, 11, 297, 16, 1804, 297, 17, 3175, 568, 13], "avg_logprob": -0.2003494993244347, "compression_ratio": 1.7577639751552796, "no_speech_prob": 0.0, "words": [{"start": 2446.7, "end": 2447.0, "word": " Is", "probability": 0.2005615234375}, {"start": 2447.0, "end": 2447.32, "word": " n1", "probability": 0.5592041015625}, {"start": 2447.32, "end": 2447.68, "word": " plus", "probability": 0.452392578125}, {"start": 2447.68, "end": 2448.06, "word": " minus", "probability": 0.471923828125}, {"start": 2448.06, "end": 2448.36, "word": " 1", "probability": 0.81982421875}, {"start": 2448.36, "end": 2448.8, "word": " plus", "probability": 0.92529296875}, {"start": 2448.8, "end": 2449.98, "word": " n2", "probability": 0.836181640625}, {"start": 2449.98, "end": 2450.28, "word": " minus", "probability": 0.98046875}, {"start": 2450.28, "end": 2450.6, "word": " 1.", "probability": 0.93359375}, {"start": 2450.76, "end": 2450.9, "word": " So", "probability": 0.94287109375}, {"start": 2450.9, "end": 2451.16, "word": " at", "probability": 0.1348876953125}, {"start": 2451.16, "end": 2451.52, "word": " least", "probability": 0.939453125}, {"start": 2451.52, "end": 2451.88, "word": " should", "probability": 0.73681640625}, {"start": 2451.88, "end": 2452.4, "word": " be", "probability": 0.95068359375}, {"start": 2452.4, "end": 2453.4, "word": " n1", "probability": 0.856201171875}, {"start": 2453.4, "end": 2453.68, "word": " plus", "probability": 0.953125}, {"start": 2453.68, "end": 2453.98, "word": " n2", "probability": 0.974853515625}, {"start": 2453.98, "end": 2454.24, "word": " minus", "probability": 0.9765625}, {"start": 2454.24, "end": 2454.44, "word": " 2.", "probability": 0.8330078125}, {"start": 2454.76, "end": 2455.0, "word": " So", "probability": 0.94580078125}, {"start": 2455.0, "end": 2455.26, "word": " this", "probability": 0.87646484375}, {"start": 2455.26, "end": 2455.94, "word": " statistic", "probability": 0.9169921875}, {"start": 2455.94, "end": 2457.34, "word": " has", "probability": 0.76611328125}, {"start": 2457.34, "end": 2457.62, "word": " T", "probability": 0.39697265625}, {"start": 2457.62, "end": 2458.22, "word": " distribution", "probability": 0.80078125}, {"start": 2458.22, "end": 2459.3, "word": " with", "probability": 0.83349609375}, {"start": 2459.3, "end": 2459.6, "word": " degrees", "probability": 0.9482421875}, {"start": 2459.6, "end": 2459.78, "word": " of", "probability": 0.96923828125}, {"start": 2459.78, "end": 2460.04, "word": " freedom", "probability": 0.94775390625}, {"start": 2460.04, "end": 2460.5, "word": " n1", "probability": 0.841552734375}, {"start": 2460.5, "end": 2460.8, "word": " plus", "probability": 0.9521484375}, {"start": 2460.8, "end": 2461.06, "word": " n2", "probability": 0.98779296875}, {"start": 2461.06, "end": 2461.28, "word": " minus", "probability": 0.98095703125}, {"start": 2461.28, "end": 2461.56, "word": " 2.", "probability": 0.974609375}, {"start": 2462.4, "end": 2462.7, "word": " This", "probability": 0.89697265625}, {"start": 2462.7, "end": 2462.9, "word": " is", "probability": 0.94580078125}, {"start": 2462.9, "end": 2463.42, "word": " only", "probability": 0.92822265625}, {"start": 2463.42, "end": 2463.96, "word": " if", "probability": 0.94287109375}, {"start": 2463.96, "end": 2464.26, "word": " we", "probability": 0.95751953125}, {"start": 2464.26, "end": 2464.8, "word": " assume", "probability": 0.921875}, {"start": 2464.8, "end": 2466.2, "word": " variances", "probability": 0.810791015625}, {"start": 2466.2, "end": 2466.94, "word": " unknown,", "probability": 0.828125}, {"start": 2467.56, "end": 2467.94, "word": " but", "probability": 0.92578125}, {"start": 2467.94, "end": 2468.22, "word": " they", "probability": 0.888671875}, {"start": 2468.22, "end": 2468.6, "word": " are", "probability": 0.93896484375}, {"start": 2468.6, "end": 2469.2, "word": " equal.", "probability": 0.8984375}, {"start": 2469.68, "end": 2469.86, "word": " In", "probability": 0.9599609375}, {"start": 2469.86, "end": 2470.06, "word": " this", "probability": 0.9453125}, {"start": 2470.06, "end": 2470.28, "word": " case,", "probability": 0.9111328125}, {"start": 2470.32, "end": 2470.44, "word": " your", "probability": 0.67578125}, {"start": 2470.44, "end": 2470.72, "word": " degrees", "probability": 0.931640625}, {"start": 2470.72, "end": 2470.9, "word": " of", "probability": 0.96533203125}, {"start": 2470.9, "end": 2471.26, "word": " freedom,", "probability": 0.94287109375}, {"start": 2471.92, "end": 2472.18, "word": " n1", "probability": 0.95263671875}, {"start": 2472.18, "end": 2472.48, "word": " plus", "probability": 0.94873046875}, {"start": 2472.48, "end": 2472.76, "word": " n2", "probability": 0.99609375}, {"start": 2472.76, "end": 2473.04, "word": " minus", "probability": 0.9794921875}, {"start": 2473.04, "end": 2473.34, "word": " 2.", "probability": 0.92431640625}], "temperature": 1.0}, {"id": 93, "seek": 250161, "start": 2474.85, "end": 2501.61, "text": " So that's for the testing and the confidence interval approach. So if sigma's unknown and they are equal, your confidence interval is x1 bar minus x2 bar plus or minus z alpha over two square root. That's going to be multiplied by one over one plus one over one. If we are talking about confidence for mu2 minus mu1,", "tokens": [407, 300, 311, 337, 264, 4997, 293, 264, 6687, 15035, 3109, 13, 407, 498, 12771, 311, 9841, 293, 436, 366, 2681, 11, 428, 6687, 15035, 307, 2031, 16, 2159, 3175, 2031, 17, 2159, 1804, 420, 3175, 710, 8961, 670, 732, 3732, 5593, 13, 663, 311, 516, 281, 312, 17207, 538, 472, 670, 472, 1804, 472, 670, 472, 13, 759, 321, 366, 1417, 466, 6687, 337, 2992, 17, 3175, 2992, 16, 11], "avg_logprob": -0.19563801400363445, "compression_ratio": 1.6424870466321244, "no_speech_prob": 0.0, "words": [{"start": 2474.85, "end": 2475.13, "word": " So", "probability": 0.90478515625}, {"start": 2475.13, "end": 2475.69, "word": " that's", "probability": 0.8896484375}, {"start": 2475.69, "end": 2476.51, "word": " for", "probability": 0.947265625}, {"start": 2476.51, "end": 2476.69, "word": " the", "probability": 0.91064453125}, {"start": 2476.69, "end": 2477.15, "word": " testing", "probability": 0.87060546875}, {"start": 2477.15, "end": 2477.63, "word": " and", "probability": 0.86474609375}, {"start": 2477.63, "end": 2477.79, "word": " the", "probability": 0.8779296875}, {"start": 2477.79, "end": 2478.35, "word": " confidence", "probability": 0.9716796875}, {"start": 2478.35, "end": 2478.95, "word": " interval", "probability": 0.96826171875}, {"start": 2478.95, "end": 2479.69, "word": " approach.", "probability": 0.90869140625}, {"start": 2480.63, "end": 2480.83, "word": " So", "probability": 0.94189453125}, {"start": 2480.83, "end": 2480.97, "word": " if", "probability": 0.875}, {"start": 2480.97, "end": 2481.37, "word": " sigma's", "probability": 0.5732421875}, {"start": 2481.37, "end": 2481.71, "word": " unknown", "probability": 0.8046875}, {"start": 2481.71, "end": 2481.97, "word": " and", "probability": 0.8076171875}, {"start": 2481.97, "end": 2482.09, "word": " they", "probability": 0.87939453125}, {"start": 2482.09, "end": 2482.23, "word": " are", "probability": 0.91455078125}, {"start": 2482.23, "end": 2482.55, "word": " equal,", "probability": 0.90771484375}, {"start": 2482.75, "end": 2482.91, "word": " your", "probability": 0.87109375}, {"start": 2482.91, "end": 2483.37, "word": " confidence", "probability": 0.97705078125}, {"start": 2483.37, "end": 2483.75, "word": " interval", "probability": 0.96533203125}, {"start": 2483.75, "end": 2484.23, "word": " is", "probability": 0.93701171875}, {"start": 2484.23, "end": 2485.43, "word": " x1", "probability": 0.5667724609375}, {"start": 2485.43, "end": 2485.61, "word": " bar", "probability": 0.7958984375}, {"start": 2485.61, "end": 2485.85, "word": " minus", "probability": 0.97119140625}, {"start": 2485.85, "end": 2486.23, "word": " x2", "probability": 0.990234375}, {"start": 2486.23, "end": 2486.49, "word": " bar", "probability": 0.95556640625}, {"start": 2486.49, "end": 2487.01, "word": " plus", "probability": 0.94775390625}, {"start": 2487.01, "end": 2487.17, "word": " or", "probability": 0.91259765625}, {"start": 2487.17, "end": 2487.41, "word": " minus", "probability": 0.98974609375}, {"start": 2487.41, "end": 2487.61, "word": " z", "probability": 0.283447265625}, {"start": 2487.61, "end": 2487.77, "word": " alpha", "probability": 0.93115234375}, {"start": 2487.77, "end": 2488.03, "word": " over", "probability": 0.88037109375}, {"start": 2488.03, "end": 2488.19, "word": " two", "probability": 0.48583984375}, {"start": 2488.19, "end": 2488.53, "word": " square", "probability": 0.7021484375}, {"start": 2488.53, "end": 2488.91, "word": " root.", "probability": 0.94677734375}, {"start": 2489.53, "end": 2490.15, "word": " That's", "probability": 0.6212158203125}, {"start": 2490.15, "end": 2490.35, "word": " going", "probability": 0.5810546875}, {"start": 2490.35, "end": 2490.41, "word": " to", "probability": 0.9716796875}, {"start": 2490.41, "end": 2490.59, "word": " be", "probability": 0.919921875}, {"start": 2490.59, "end": 2491.35, "word": " multiplied", "probability": 0.6767578125}, {"start": 2491.35, "end": 2491.65, "word": " by", "probability": 0.970703125}, {"start": 2491.65, "end": 2491.87, "word": " one", "probability": 0.86279296875}, {"start": 2491.87, "end": 2492.03, "word": " over", "probability": 0.9169921875}, {"start": 2492.03, "end": 2492.21, "word": " one", "probability": 0.59423828125}, {"start": 2492.21, "end": 2492.55, "word": " plus", "probability": 0.93359375}, {"start": 2492.55, "end": 2492.81, "word": " one", "probability": 0.9365234375}, {"start": 2492.81, "end": 2492.97, "word": " over", "probability": 0.91259765625}, {"start": 2492.97, "end": 2493.19, "word": " one.", "probability": 0.92333984375}, {"start": 2493.63, "end": 2494.25, "word": " If", "probability": 0.9658203125}, {"start": 2494.25, "end": 2494.39, "word": " we", "probability": 0.90966796875}, {"start": 2494.39, "end": 2494.49, "word": " are", "probability": 0.90576171875}, {"start": 2494.49, "end": 2494.87, "word": " talking", "probability": 0.85888671875}, {"start": 2494.87, "end": 2495.59, "word": " about", "probability": 0.89892578125}, {"start": 2495.59, "end": 2497.17, "word": " confidence", "probability": 0.9755859375}, {"start": 2497.17, "end": 2499.19, "word": " for", "probability": 0.8974609375}, {"start": 2499.19, "end": 2500.59, "word": " mu2", "probability": 0.57763671875}, {"start": 2500.59, "end": 2500.99, "word": " minus", "probability": 0.98486328125}, {"start": 2500.99, "end": 2501.61, "word": " mu1,", "probability": 0.9638671875}], "temperature": 1.0}, {"id": 94, "seek": 253157, "start": 2502.65, "end": 2531.57, "text": " We should have here x2 bar minus x1 bar plus or minus the same amount. Because both plus doesn't change if we start with 1 over n2 plus 1 over n1. But this one should be x2 bar minus x1 bar if we are talking about confidence interval for the difference mu2 minus mu1. And that's all.", "tokens": [492, 820, 362, 510, 2031, 17, 2159, 3175, 2031, 16, 2159, 1804, 420, 3175, 264, 912, 2372, 13, 1436, 1293, 1804, 1177, 380, 1319, 498, 321, 722, 365, 502, 670, 297, 17, 1804, 502, 670, 297, 16, 13, 583, 341, 472, 820, 312, 2031, 17, 2159, 3175, 2031, 16, 2159, 498, 321, 366, 1417, 466, 6687, 15035, 337, 264, 2649, 2992, 17, 3175, 2992, 16, 13, 400, 300, 311, 439, 13], "avg_logprob": -0.1673177048150036, "compression_ratio": 1.6136363636363635, "no_speech_prob": 0.0, "words": [{"start": 2502.65, "end": 2502.89, "word": " We", "probability": 0.489501953125}, {"start": 2502.89, "end": 2503.17, "word": " should", "probability": 0.95654296875}, {"start": 2503.17, "end": 2503.41, "word": " have", "probability": 0.9423828125}, {"start": 2503.41, "end": 2503.61, "word": " here", "probability": 0.822265625}, {"start": 2503.61, "end": 2503.99, "word": " x2", "probability": 0.767578125}, {"start": 2503.99, "end": 2504.31, "word": " bar", "probability": 0.80517578125}, {"start": 2504.31, "end": 2504.85, "word": " minus", "probability": 0.9423828125}, {"start": 2504.85, "end": 2505.37, "word": " x1", "probability": 0.993896484375}, {"start": 2505.37, "end": 2505.71, "word": " bar", "probability": 0.95166015625}, {"start": 2505.71, "end": 2508.05, "word": " plus", "probability": 0.626953125}, {"start": 2508.05, "end": 2508.41, "word": " or", "probability": 0.9560546875}, {"start": 2508.41, "end": 2508.71, "word": " minus", "probability": 0.98876953125}, {"start": 2508.71, "end": 2508.93, "word": " the", "probability": 0.91650390625}, {"start": 2508.93, "end": 2509.11, "word": " same", "probability": 0.91357421875}, {"start": 2509.11, "end": 2509.55, "word": " amount.", "probability": 0.8955078125}, {"start": 2511.69, "end": 2512.27, "word": " Because", "probability": 0.52001953125}, {"start": 2512.27, "end": 2512.59, "word": " both", "probability": 0.61865234375}, {"start": 2512.59, "end": 2513.01, "word": " plus", "probability": 0.9580078125}, {"start": 2513.01, "end": 2513.71, "word": " doesn't", "probability": 0.786376953125}, {"start": 2513.71, "end": 2514.19, "word": " change", "probability": 0.87939453125}, {"start": 2514.19, "end": 2514.41, "word": " if", "probability": 0.7470703125}, {"start": 2514.41, "end": 2514.69, "word": " we", "probability": 0.90673828125}, {"start": 2514.69, "end": 2516.19, "word": " start", "probability": 0.90380859375}, {"start": 2516.19, "end": 2516.39, "word": " with", "probability": 0.90869140625}, {"start": 2516.39, "end": 2516.57, "word": " 1", "probability": 0.4912109375}, {"start": 2516.57, "end": 2516.73, "word": " over", "probability": 0.81591796875}, {"start": 2516.73, "end": 2517.01, "word": " n2", "probability": 0.6033935546875}, {"start": 2517.01, "end": 2517.23, "word": " plus", "probability": 0.951171875}, {"start": 2517.23, "end": 2517.43, "word": " 1", "probability": 0.97802734375}, {"start": 2517.43, "end": 2517.57, "word": " over", "probability": 0.9130859375}, {"start": 2517.57, "end": 2517.95, "word": " n1.", "probability": 0.995849609375}, {"start": 2518.75, "end": 2518.99, "word": " But", "probability": 0.7333984375}, {"start": 2518.99, "end": 2519.77, "word": " this", "probability": 0.6884765625}, {"start": 2519.77, "end": 2519.95, "word": " one", "probability": 0.88037109375}, {"start": 2519.95, "end": 2520.13, "word": " should", "probability": 0.96728515625}, {"start": 2520.13, "end": 2520.53, "word": " be", "probability": 0.951171875}, {"start": 2520.53, "end": 2521.83, "word": " x2", "probability": 0.9794921875}, {"start": 2521.83, "end": 2522.13, "word": " bar", "probability": 0.9423828125}, {"start": 2522.13, "end": 2522.43, "word": " minus", "probability": 0.98583984375}, {"start": 2522.43, "end": 2522.85, "word": " x1", "probability": 0.994873046875}, {"start": 2522.85, "end": 2523.07, "word": " bar", "probability": 0.93994140625}, {"start": 2523.07, "end": 2523.31, "word": " if", "probability": 0.6591796875}, {"start": 2523.31, "end": 2523.43, "word": " we", "probability": 0.92236328125}, {"start": 2523.43, "end": 2523.57, "word": " are", "probability": 0.9033203125}, {"start": 2523.57, "end": 2523.87, "word": " talking", "probability": 0.826171875}, {"start": 2523.87, "end": 2524.31, "word": " about", "probability": 0.90283203125}, {"start": 2524.31, "end": 2525.27, "word": " confidence", "probability": 0.9296875}, {"start": 2525.27, "end": 2525.67, "word": " interval", "probability": 0.88818359375}, {"start": 2525.67, "end": 2526.11, "word": " for", "probability": 0.91845703125}, {"start": 2526.11, "end": 2526.31, "word": " the", "probability": 0.72314453125}, {"start": 2526.31, "end": 2526.81, "word": " difference", "probability": 0.8203125}, {"start": 2526.81, "end": 2527.35, "word": " mu2", "probability": 0.755859375}, {"start": 2527.35, "end": 2527.79, "word": " minus", "probability": 0.9873046875}, {"start": 2527.79, "end": 2529.37, "word": " mu1.", "probability": 0.865966796875}, {"start": 2530.63, "end": 2531.03, "word": " And", "probability": 0.71142578125}, {"start": 2531.03, "end": 2531.27, "word": " that's", "probability": 0.963623046875}, {"start": 2531.27, "end": 2531.57, "word": " all.", "probability": 0.947265625}], "temperature": 1.0}, {"id": 95, "seek": 255500, "start": 2532.46, "end": 2555.0, "text": " Any question? In general, x1 bar and x2 bar are not equal. Because if you have two populations and you select two different samples, it makes sense that the two means are not equal.", "tokens": [2639, 1168, 30, 682, 2674, 11, 2031, 16, 2159, 293, 2031, 17, 2159, 366, 406, 2681, 13, 1436, 498, 291, 362, 732, 12822, 293, 291, 3048, 732, 819, 10938, 11, 309, 1669, 2020, 300, 264, 732, 1355, 366, 406, 2681, 13], "avg_logprob": -0.2589285572369893, "compression_ratio": 1.378787878787879, "no_speech_prob": 0.0, "words": [{"start": 2532.46, "end": 2532.72, "word": " Any", "probability": 0.237548828125}, {"start": 2532.72, "end": 2533.02, "word": " question?", "probability": 0.5693359375}, {"start": 2534.38, "end": 2534.88, "word": " In", "probability": 0.2359619140625}, {"start": 2534.88, "end": 2542.9, "word": " general,", "probability": 0.9189453125}, {"start": 2543.56, "end": 2544.12, "word": " x1", "probability": 0.640625}, {"start": 2544.12, "end": 2544.32, "word": " bar", "probability": 0.89794921875}, {"start": 2544.32, "end": 2544.54, "word": " and", "probability": 0.93603515625}, {"start": 2544.54, "end": 2545.06, "word": " x2", "probability": 0.99365234375}, {"start": 2545.06, "end": 2545.26, "word": " bar", "probability": 0.92822265625}, {"start": 2545.26, "end": 2545.44, "word": " are", "probability": 0.93359375}, {"start": 2545.44, "end": 2545.62, "word": " not", "probability": 0.955078125}, {"start": 2545.62, "end": 2545.96, "word": " equal.", "probability": 0.884765625}, {"start": 2547.4, "end": 2547.78, "word": " Because", "probability": 0.84765625}, {"start": 2547.78, "end": 2547.92, "word": " if", "probability": 0.90478515625}, {"start": 2547.92, "end": 2548.02, "word": " you", "probability": 0.50146484375}, {"start": 2548.02, "end": 2548.2, "word": " have", "probability": 0.9345703125}, {"start": 2548.2, "end": 2548.4, "word": " two", "probability": 0.865234375}, {"start": 2548.4, "end": 2548.88, "word": " populations", "probability": 0.796875}, {"start": 2548.88, "end": 2549.2, "word": " and", "probability": 0.79150390625}, {"start": 2549.2, "end": 2549.34, "word": " you", "probability": 0.853515625}, {"start": 2549.34, "end": 2549.66, "word": " select", "probability": 0.8203125}, {"start": 2549.66, "end": 2549.84, "word": " two", "probability": 0.70361328125}, {"start": 2549.84, "end": 2550.14, "word": " different", "probability": 0.8916015625}, {"start": 2550.14, "end": 2550.6, "word": " samples,", "probability": 0.7373046875}, {"start": 2551.34, "end": 2551.48, "word": " it", "probability": 0.92822265625}, {"start": 2551.48, "end": 2551.72, "word": " makes", "probability": 0.79833984375}, {"start": 2551.72, "end": 2551.98, "word": " sense", "probability": 0.8291015625}, {"start": 2551.98, "end": 2552.34, "word": " that", "probability": 0.93359375}, {"start": 2552.34, "end": 2552.68, "word": " the", "probability": 0.896484375}, {"start": 2552.68, "end": 2552.88, "word": " two", "probability": 0.8642578125}, {"start": 2552.88, "end": 2553.92, "word": " means", "probability": 0.6083984375}, {"start": 2553.92, "end": 2554.44, "word": " are", "probability": 0.9375}, {"start": 2554.44, "end": 2554.66, "word": " not", "probability": 0.9462890625}, {"start": 2554.66, "end": 2555.0, "word": " equal.", "probability": 0.8701171875}], "temperature": 1.0}, {"id": 96, "seek": 258503, "start": 2556.05, "end": 2585.03, "text": " But if they are equal, it means your statistic is zero. And that's never happened in the real life. Maybe close to zero, but not exactly zero. Let's look at one example. A straightforward example. You are a financial analyst.", "tokens": [583, 498, 436, 366, 2681, 11, 309, 1355, 428, 29588, 307, 4018, 13, 400, 300, 311, 1128, 2011, 294, 264, 957, 993, 13, 2704, 1998, 281, 4018, 11, 457, 406, 2293, 4018, 13, 961, 311, 574, 412, 472, 1365, 13, 316, 15325, 1365, 13, 509, 366, 257, 4669, 19085, 13], "avg_logprob": -0.17202817926219865, "compression_ratio": 1.4037267080745341, "no_speech_prob": 0.0, "words": [{"start": 2556.05, "end": 2556.37, "word": " But", "probability": 0.5908203125}, {"start": 2556.37, "end": 2556.65, "word": " if", "probability": 0.896484375}, {"start": 2556.65, "end": 2556.83, "word": " they", "probability": 0.89501953125}, {"start": 2556.83, "end": 2557.01, "word": " are", "probability": 0.921875}, {"start": 2557.01, "end": 2557.35, "word": " equal,", "probability": 0.88720703125}, {"start": 2557.63, "end": 2557.75, "word": " it", "probability": 0.91455078125}, {"start": 2557.75, "end": 2558.05, "word": " means", "probability": 0.93505859375}, {"start": 2558.05, "end": 2558.31, "word": " your", "probability": 0.82958984375}, {"start": 2558.31, "end": 2558.93, "word": " statistic", "probability": 0.8076171875}, {"start": 2558.93, "end": 2559.51, "word": " is", "probability": 0.947265625}, {"start": 2559.51, "end": 2560.51, "word": " zero.", "probability": 0.391357421875}, {"start": 2561.19, "end": 2561.57, "word": " And", "probability": 0.79443359375}, {"start": 2561.57, "end": 2561.85, "word": " that's", "probability": 0.792236328125}, {"start": 2561.85, "end": 2562.11, "word": " never", "probability": 0.9404296875}, {"start": 2562.11, "end": 2562.41, "word": " happened", "probability": 0.80859375}, {"start": 2562.41, "end": 2562.57, "word": " in", "probability": 0.95166015625}, {"start": 2562.57, "end": 2562.71, "word": " the", "probability": 0.414794921875}, {"start": 2562.71, "end": 2562.85, "word": " real", "probability": 0.95654296875}, {"start": 2562.85, "end": 2563.19, "word": " life.", "probability": 0.92529296875}, {"start": 2563.95, "end": 2564.19, "word": " Maybe", "probability": 0.90380859375}, {"start": 2564.19, "end": 2564.57, "word": " close", "probability": 0.869140625}, {"start": 2564.57, "end": 2564.75, "word": " to", "probability": 0.970703125}, {"start": 2564.75, "end": 2564.97, "word": " zero,", "probability": 0.89697265625}, {"start": 2565.07, "end": 2565.25, "word": " but", "probability": 0.93115234375}, {"start": 2565.25, "end": 2565.61, "word": " not", "probability": 0.943359375}, {"start": 2565.61, "end": 2566.47, "word": " exactly", "probability": 0.88720703125}, {"start": 2566.47, "end": 2566.81, "word": " zero.", "probability": 0.89599609375}, {"start": 2567.71, "end": 2568.07, "word": " Let's", "probability": 0.965576171875}, {"start": 2568.07, "end": 2568.29, "word": " look", "probability": 0.96728515625}, {"start": 2568.29, "end": 2568.47, "word": " at", "probability": 0.9697265625}, {"start": 2568.47, "end": 2568.69, "word": " one", "probability": 0.93115234375}, {"start": 2568.69, "end": 2569.15, "word": " example.", "probability": 0.97216796875}, {"start": 2580.65, "end": 2581.33, "word": " A", "probability": 0.87744140625}, {"start": 2581.33, "end": 2581.77, "word": " straightforward", "probability": 0.85400390625}, {"start": 2581.77, "end": 2582.51, "word": " example.", "probability": 0.97119140625}, {"start": 2582.95, "end": 2583.27, "word": " You", "probability": 0.9482421875}, {"start": 2583.27, "end": 2583.61, "word": " are", "probability": 0.93505859375}, {"start": 2583.61, "end": 2583.97, "word": " a", "probability": 0.9921875}, {"start": 2583.97, "end": 2584.57, "word": " financial", "probability": 0.8720703125}, {"start": 2584.57, "end": 2585.03, "word": " analyst.", "probability": 0.77001953125}], "temperature": 1.0}, {"id": 97, "seek": 261008, "start": 2586.26, "end": 2610.08, "text": " for brokerage fair. Is there a difference in dividend yield between stock listed on the New York Stock Exchange and Nasdaq? You collect the following data. So we have two data for two different stocks. One for New York Stock Exchange and other for Nasdaq.", "tokens": [337, 26502, 609, 3143, 13, 1119, 456, 257, 2649, 294, 29796, 11257, 1296, 4127, 10052, 322, 264, 1873, 3609, 17857, 31169, 293, 16151, 2675, 80, 30, 509, 2500, 264, 3480, 1412, 13, 407, 321, 362, 732, 1412, 337, 732, 819, 12966, 13, 1485, 337, 1873, 3609, 17857, 31169, 293, 661, 337, 16151, 2675, 80, 13], "avg_logprob": -0.24218750319310597, "compression_ratio": 1.6, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2586.26, "end": 2586.64, "word": " for", "probability": 0.317138671875}, {"start": 2586.64, "end": 2587.18, "word": " brokerage", "probability": 0.794921875}, {"start": 2587.18, "end": 2587.48, "word": " fair.", "probability": 0.50341796875}, {"start": 2588.82, "end": 2589.2, "word": " Is", "probability": 0.85400390625}, {"start": 2589.2, "end": 2589.44, "word": " there", "probability": 0.9130859375}, {"start": 2589.44, "end": 2589.58, "word": " a", "probability": 0.9345703125}, {"start": 2589.58, "end": 2589.96, "word": " difference", "probability": 0.328125}, {"start": 2589.96, "end": 2590.18, "word": " in", "probability": 0.9130859375}, {"start": 2590.18, "end": 2591.3, "word": " dividend", "probability": 0.7333984375}, {"start": 2591.3, "end": 2592.16, "word": " yield", "probability": 0.8974609375}, {"start": 2592.16, "end": 2594.32, "word": " between", "probability": 0.8251953125}, {"start": 2594.32, "end": 2594.86, "word": " stock", "probability": 0.33984375}, {"start": 2594.86, "end": 2595.46, "word": " listed", "probability": 0.91162109375}, {"start": 2595.46, "end": 2595.92, "word": " on", "probability": 0.9013671875}, {"start": 2595.92, "end": 2596.26, "word": " the", "probability": 0.87646484375}, {"start": 2596.26, "end": 2596.76, "word": " New", "probability": 0.77001953125}, {"start": 2596.76, "end": 2597.06, "word": " York", "probability": 0.96044921875}, {"start": 2597.06, "end": 2597.66, "word": " Stock", "probability": 0.9033203125}, {"start": 2597.66, "end": 2598.32, "word": " Exchange", "probability": 0.91845703125}, {"start": 2598.32, "end": 2598.58, "word": " and", "probability": 0.5458984375}, {"start": 2598.58, "end": 2599.08, "word": " Nasdaq?", "probability": 0.7520345052083334}, {"start": 2600.02, "end": 2600.3, "word": " You", "probability": 0.9287109375}, {"start": 2600.3, "end": 2600.64, "word": " collect", "probability": 0.8408203125}, {"start": 2600.64, "end": 2600.84, "word": " the", "probability": 0.9130859375}, {"start": 2600.84, "end": 2601.06, "word": " following", "probability": 0.8681640625}, {"start": 2601.06, "end": 2601.36, "word": " data.", "probability": 0.9267578125}, {"start": 2602.06, "end": 2602.26, "word": " So", "probability": 0.833984375}, {"start": 2602.26, "end": 2602.4, "word": " we", "probability": 0.697265625}, {"start": 2602.4, "end": 2602.6, "word": " have", "probability": 0.9482421875}, {"start": 2602.6, "end": 2602.86, "word": " two", "probability": 0.80517578125}, {"start": 2602.86, "end": 2603.16, "word": " data", "probability": 0.931640625}, {"start": 2603.16, "end": 2603.42, "word": " for", "probability": 0.94189453125}, {"start": 2603.42, "end": 2603.64, "word": " two", "probability": 0.92333984375}, {"start": 2603.64, "end": 2604.02, "word": " different", "probability": 0.83740234375}, {"start": 2604.02, "end": 2604.42, "word": " stocks.", "probability": 0.85302734375}, {"start": 2605.34, "end": 2605.7, "word": " One", "probability": 0.92138671875}, {"start": 2605.7, "end": 2606.12, "word": " for", "probability": 0.92626953125}, {"start": 2606.12, "end": 2607.16, "word": " New", "probability": 0.759765625}, {"start": 2607.16, "end": 2607.42, "word": " York", "probability": 0.97216796875}, {"start": 2607.42, "end": 2607.86, "word": " Stock", "probability": 0.92431640625}, {"start": 2607.86, "end": 2608.94, "word": " Exchange", "probability": 0.94775390625}, {"start": 2608.94, "end": 2609.18, "word": " and", "probability": 0.78515625}, {"start": 2609.18, "end": 2609.4, "word": " other", "probability": 0.442626953125}, {"start": 2609.4, "end": 2609.66, "word": " for", "probability": 0.93408203125}, {"start": 2609.66, "end": 2610.08, "word": " Nasdaq.", "probability": 0.9617513020833334}], "temperature": 1.0}, {"id": 98, "seek": 263461, "start": 2611.67, "end": 2634.61, "text": " We have a random sample of size 21 from the first one with standard deviation 1.3 and sample mean 3.17. The other sample gives the following results. The random sample size equals 25 with mean 2.53 and standard deviation 1.16. So this is the information we have.", "tokens": [492, 362, 257, 4974, 6889, 295, 2744, 5080, 490, 264, 700, 472, 365, 3832, 25163, 502, 13, 18, 293, 6889, 914, 805, 13, 7773, 13, 440, 661, 6889, 2709, 264, 3480, 3542, 13, 440, 4974, 6889, 2744, 6915, 3552, 365, 914, 568, 13, 19584, 293, 3832, 25163, 502, 13, 6866, 13, 407, 341, 307, 264, 1589, 321, 362, 13], "avg_logprob": -0.12122395957509677, "compression_ratio": 1.6540880503144655, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2611.67, "end": 2611.97, "word": " We", "probability": 0.78173828125}, {"start": 2611.97, "end": 2612.29, "word": " have", "probability": 0.94091796875}, {"start": 2612.29, "end": 2612.53, "word": " a", "probability": 0.9736328125}, {"start": 2612.53, "end": 2612.75, "word": " random", "probability": 0.853515625}, {"start": 2612.75, "end": 2613.11, "word": " sample", "probability": 0.896484375}, {"start": 2613.11, "end": 2613.31, "word": " of", "probability": 0.9111328125}, {"start": 2613.31, "end": 2613.55, "word": " size", "probability": 0.8388671875}, {"start": 2613.55, "end": 2614.01, "word": " 21", "probability": 0.95556640625}, {"start": 2614.01, "end": 2614.31, "word": " from", "probability": 0.865234375}, {"start": 2614.31, "end": 2614.47, "word": " the", "probability": 0.9140625}, {"start": 2614.47, "end": 2614.75, "word": " first", "probability": 0.86865234375}, {"start": 2614.75, "end": 2615.05, "word": " one", "probability": 0.89892578125}, {"start": 2615.05, "end": 2615.41, "word": " with", "probability": 0.7392578125}, {"start": 2615.41, "end": 2616.07, "word": " standard", "probability": 0.88427734375}, {"start": 2616.07, "end": 2616.55, "word": " deviation", "probability": 0.919921875}, {"start": 2616.55, "end": 2616.87, "word": " 1", "probability": 0.9228515625}, {"start": 2616.87, "end": 2617.35, "word": ".3", "probability": 0.986572265625}, {"start": 2617.35, "end": 2617.57, "word": " and", "probability": 0.86962890625}, {"start": 2617.57, "end": 2617.87, "word": " sample", "probability": 0.88623046875}, {"start": 2617.87, "end": 2618.19, "word": " mean", "probability": 0.978515625}, {"start": 2618.19, "end": 2618.89, "word": " 3", "probability": 0.9736328125}, {"start": 2618.89, "end": 2619.53, "word": ".17.", "probability": 0.994384765625}, {"start": 2620.33, "end": 2620.79, "word": " The", "probability": 0.88671875}, {"start": 2620.79, "end": 2621.11, "word": " other", "probability": 0.8857421875}, {"start": 2621.11, "end": 2621.67, "word": " sample", "probability": 0.8935546875}, {"start": 2621.67, "end": 2622.69, "word": " gives", "probability": 0.8720703125}, {"start": 2622.69, "end": 2622.91, "word": " the", "probability": 0.90869140625}, {"start": 2622.91, "end": 2623.19, "word": " following", "probability": 0.8984375}, {"start": 2623.19, "end": 2623.73, "word": " results.", "probability": 0.85986328125}, {"start": 2624.85, "end": 2625.43, "word": " The", "probability": 0.86376953125}, {"start": 2625.43, "end": 2625.91, "word": " random", "probability": 0.86767578125}, {"start": 2625.91, "end": 2626.29, "word": " sample", "probability": 0.88720703125}, {"start": 2626.29, "end": 2626.73, "word": " size", "probability": 0.8515625}, {"start": 2626.73, "end": 2627.07, "word": " equals", "probability": 0.301025390625}, {"start": 2627.07, "end": 2627.51, "word": " 25", "probability": 0.85400390625}, {"start": 2627.51, "end": 2628.71, "word": " with", "probability": 0.80859375}, {"start": 2628.71, "end": 2629.03, "word": " mean", "probability": 0.95458984375}, {"start": 2629.03, "end": 2629.51, "word": " 2", "probability": 0.97021484375}, {"start": 2629.51, "end": 2630.09, "word": ".53", "probability": 0.976806640625}, {"start": 2630.09, "end": 2630.51, "word": " and", "probability": 0.91650390625}, {"start": 2630.51, "end": 2630.81, "word": " standard", "probability": 0.94140625}, {"start": 2630.81, "end": 2631.13, "word": " deviation", "probability": 0.93115234375}, {"start": 2631.13, "end": 2631.35, "word": " 1", "probability": 0.84765625}, {"start": 2631.35, "end": 2631.87, "word": ".16.", "probability": 0.9794921875}, {"start": 2633.01, "end": 2633.25, "word": " So", "probability": 0.8603515625}, {"start": 2633.25, "end": 2633.47, "word": " this", "probability": 0.798828125}, {"start": 2633.47, "end": 2633.59, "word": " is", "probability": 0.92431640625}, {"start": 2633.59, "end": 2633.69, "word": " the", "probability": 0.890625}, {"start": 2633.69, "end": 2634.11, "word": " information", "probability": 0.85400390625}, {"start": 2634.11, "end": 2634.39, "word": " we", "probability": 0.9248046875}, {"start": 2634.39, "end": 2634.61, "word": " have.", "probability": 0.9375}], "temperature": 1.0}, {"id": 99, "seek": 265338, "start": 2636.38, "end": 2653.38, "text": " Sample sizes for both sample means and sample standard deviations. So that means population variances are unknown. Assuming both populations are approximately normal. We have to assume they are normal.", "tokens": [4832, 781, 11602, 337, 1293, 6889, 1355, 293, 6889, 3832, 31219, 763, 13, 407, 300, 1355, 4415, 1374, 21518, 366, 9841, 13, 6281, 24919, 1293, 12822, 366, 10447, 2710, 13, 492, 362, 281, 6552, 436, 366, 2710, 13], "avg_logprob": -0.21153846765175843, "compression_ratio": 1.4962962962962962, "no_speech_prob": 0.0, "words": [{"start": 2636.38, "end": 2636.88, "word": " Sample", "probability": 0.7130126953125}, {"start": 2636.88, "end": 2637.26, "word": " sizes", "probability": 0.81396484375}, {"start": 2637.26, "end": 2637.58, "word": " for", "probability": 0.8974609375}, {"start": 2637.58, "end": 2638.22, "word": " both", "probability": 0.87939453125}, {"start": 2638.22, "end": 2639.32, "word": " sample", "probability": 0.6572265625}, {"start": 2639.32, "end": 2639.7, "word": " means", "probability": 0.89501953125}, {"start": 2639.7, "end": 2640.06, "word": " and", "probability": 0.9375}, {"start": 2640.06, "end": 2640.46, "word": " sample", "probability": 0.845703125}, {"start": 2640.46, "end": 2641.18, "word": " standard", "probability": 0.90283203125}, {"start": 2641.18, "end": 2641.86, "word": " deviations.", "probability": 0.9140625}, {"start": 2642.28, "end": 2642.36, "word": " So", "probability": 0.865234375}, {"start": 2642.36, "end": 2642.6, "word": " that", "probability": 0.82568359375}, {"start": 2642.6, "end": 2642.92, "word": " means", "probability": 0.9384765625}, {"start": 2642.92, "end": 2644.32, "word": " population", "probability": 0.87353515625}, {"start": 2644.32, "end": 2644.8, "word": " variances", "probability": 0.659423828125}, {"start": 2644.8, "end": 2645.06, "word": " are", "probability": 0.9375}, {"start": 2645.06, "end": 2645.46, "word": " unknown.", "probability": 0.7998046875}, {"start": 2647.12, "end": 2647.62, "word": " Assuming", "probability": 0.9599609375}, {"start": 2647.62, "end": 2648.04, "word": " both", "probability": 0.869140625}, {"start": 2648.04, "end": 2648.64, "word": " populations", "probability": 0.560546875}, {"start": 2648.64, "end": 2650.14, "word": " are", "probability": 0.92236328125}, {"start": 2650.14, "end": 2650.78, "word": " approximately", "probability": 0.86083984375}, {"start": 2650.78, "end": 2651.16, "word": " normal.", "probability": 0.84228515625}, {"start": 2651.24, "end": 2651.34, "word": " We", "probability": 0.9345703125}, {"start": 2651.34, "end": 2651.52, "word": " have", "probability": 0.9462890625}, {"start": 2651.52, "end": 2651.66, "word": " to", "probability": 0.9716796875}, {"start": 2651.66, "end": 2652.16, "word": " assume", "probability": 0.9169921875}, {"start": 2652.16, "end": 2652.7, "word": " they", "probability": 0.81103515625}, {"start": 2652.7, "end": 2652.92, "word": " are", "probability": 0.9326171875}, {"start": 2652.92, "end": 2653.38, "word": " normal.", "probability": 0.84716796875}], "temperature": 1.0}, {"id": 100, "seek": 267297, "start": 2654.33, "end": 2672.97, "text": " Because the sample sizes are less than 30. In this case, if they are smaller than 30 and the populations are not normal, we cannot use the T-satisfaction. T is used only if populations are approximately normal, abnormal, or informal.", "tokens": [1436, 264, 6889, 11602, 366, 1570, 813, 2217, 13, 682, 341, 1389, 11, 498, 436, 366, 4356, 813, 2217, 293, 264, 12822, 366, 406, 2710, 11, 321, 2644, 764, 264, 314, 12, 82, 25239, 2894, 13, 314, 307, 1143, 787, 498, 12822, 366, 10447, 2710, 11, 32847, 11, 420, 1536, 24440, 13], "avg_logprob": -0.30188680369898957, "compression_ratio": 1.570469798657718, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2654.33, "end": 2654.99, "word": " Because", "probability": 0.50048828125}, {"start": 2654.99, "end": 2655.57, "word": " the", "probability": 0.7939453125}, {"start": 2655.57, "end": 2655.93, "word": " sample", "probability": 0.8681640625}, {"start": 2655.93, "end": 2656.53, "word": " sizes", "probability": 0.88720703125}, {"start": 2656.53, "end": 2657.31, "word": " are", "probability": 0.92333984375}, {"start": 2657.31, "end": 2657.69, "word": " less", "probability": 0.9248046875}, {"start": 2657.69, "end": 2657.91, "word": " than", "probability": 0.9296875}, {"start": 2657.91, "end": 2658.37, "word": " 30.", "probability": 0.87890625}, {"start": 2658.89, "end": 2659.09, "word": " In", "probability": 0.93505859375}, {"start": 2659.09, "end": 2659.33, "word": " this", "probability": 0.9423828125}, {"start": 2659.33, "end": 2659.67, "word": " case,", "probability": 0.92431640625}, {"start": 2659.83, "end": 2659.99, "word": " if", "probability": 0.94873046875}, {"start": 2659.99, "end": 2660.25, "word": " they", "probability": 0.88134765625}, {"start": 2660.25, "end": 2660.57, "word": " are", "probability": 0.939453125}, {"start": 2660.57, "end": 2661.29, "word": " smaller", "probability": 0.86474609375}, {"start": 2661.29, "end": 2661.55, "word": " than", "probability": 0.9462890625}, {"start": 2661.55, "end": 2661.89, "word": " 30", "probability": 0.94677734375}, {"start": 2661.89, "end": 2662.09, "word": " and", "probability": 0.732421875}, {"start": 2662.09, "end": 2662.21, "word": " the", "probability": 0.859375}, {"start": 2662.21, "end": 2662.59, "word": " populations", "probability": 0.93603515625}, {"start": 2662.59, "end": 2662.83, "word": " are", "probability": 0.94384765625}, {"start": 2662.83, "end": 2663.03, "word": " not", "probability": 0.93115234375}, {"start": 2663.03, "end": 2663.41, "word": " normal,", "probability": 0.85205078125}, {"start": 2663.71, "end": 2664.01, "word": " we", "probability": 0.93212890625}, {"start": 2664.01, "end": 2664.33, "word": " cannot", "probability": 0.873046875}, {"start": 2664.33, "end": 2664.77, "word": " use", "probability": 0.88623046875}, {"start": 2664.77, "end": 2664.93, "word": " the", "probability": 0.83740234375}, {"start": 2664.93, "end": 2665.09, "word": " T", "probability": 0.384521484375}, {"start": 2665.09, "end": 2665.61, "word": "-satisfaction.", "probability": 0.38519287109375}, {"start": 2666.65, "end": 2667.35, "word": " T", "probability": 0.953125}, {"start": 2667.35, "end": 2667.53, "word": " is", "probability": 0.93115234375}, {"start": 2667.53, "end": 2667.89, "word": " used", "probability": 0.91845703125}, {"start": 2667.89, "end": 2668.37, "word": " only", "probability": 0.88916015625}, {"start": 2668.37, "end": 2668.83, "word": " if", "probability": 0.94580078125}, {"start": 2668.83, "end": 2669.97, "word": " populations", "probability": 0.87548828125}, {"start": 2669.97, "end": 2670.37, "word": " are", "probability": 0.95361328125}, {"start": 2670.37, "end": 2671.05, "word": " approximately", "probability": 0.7041015625}, {"start": 2671.05, "end": 2671.51, "word": " normal,", "probability": 0.81689453125}, {"start": 2671.69, "end": 2672.01, "word": " abnormal,", "probability": 0.578125}, {"start": 2672.33, "end": 2672.51, "word": " or", "probability": 0.95849609375}, {"start": 2672.51, "end": 2672.97, "word": " informal.", "probability": 0.24542236328125}], "temperature": 1.0}, {"id": 101, "seek": 269754, "start": 2673.84, "end": 2697.54, "text": " But in this case, the two sizes are smaller than 30, so we have to assume both populations are normally distributed or approximately normally distributed, or we have to use another test. So we're assuming both are normal with equal variances. The question is, is there a difference in meaning?", "tokens": [583, 294, 341, 1389, 11, 264, 732, 11602, 366, 4356, 813, 2217, 11, 370, 321, 362, 281, 6552, 1293, 12822, 366, 5646, 12631, 420, 10447, 5646, 12631, 11, 420, 321, 362, 281, 764, 1071, 1500, 13, 407, 321, 434, 11926, 1293, 366, 2710, 365, 2681, 1374, 21518, 13, 440, 1168, 307, 11, 307, 456, 257, 2649, 294, 3620, 30], "avg_logprob": -0.17695311804612476, "compression_ratio": 1.5638297872340425, "no_speech_prob": 0.0, "words": [{"start": 2673.84, "end": 2674.1, "word": " But", "probability": 0.58642578125}, {"start": 2674.1, "end": 2674.26, "word": " in", "probability": 0.8623046875}, {"start": 2674.26, "end": 2674.5, "word": " this", "probability": 0.93701171875}, {"start": 2674.5, "end": 2674.82, "word": " case,", "probability": 0.92041015625}, {"start": 2674.98, "end": 2675.08, "word": " the", "probability": 0.87939453125}, {"start": 2675.08, "end": 2675.32, "word": " two", "probability": 0.9169921875}, {"start": 2675.32, "end": 2675.68, "word": " sizes", "probability": 0.87109375}, {"start": 2675.68, "end": 2676.24, "word": " are", "probability": 0.9423828125}, {"start": 2676.24, "end": 2676.86, "word": " smaller", "probability": 0.86767578125}, {"start": 2676.86, "end": 2677.12, "word": " than", "probability": 0.94873046875}, {"start": 2677.12, "end": 2677.52, "word": " 30,", "probability": 0.88720703125}, {"start": 2677.78, "end": 2678.16, "word": " so", "probability": 0.92138671875}, {"start": 2678.16, "end": 2678.32, "word": " we", "probability": 0.9482421875}, {"start": 2678.32, "end": 2678.5, "word": " have", "probability": 0.93115234375}, {"start": 2678.5, "end": 2678.66, "word": " to", "probability": 0.97216796875}, {"start": 2678.66, "end": 2679.2, "word": " assume", "probability": 0.92138671875}, {"start": 2679.2, "end": 2680.16, "word": " both", "probability": 0.7685546875}, {"start": 2680.16, "end": 2680.8, "word": " populations", "probability": 0.95703125}, {"start": 2680.8, "end": 2681.06, "word": " are", "probability": 0.94140625}, {"start": 2681.06, "end": 2681.46, "word": " normally", "probability": 0.72705078125}, {"start": 2681.46, "end": 2682.16, "word": " distributed", "probability": 0.91455078125}, {"start": 2682.16, "end": 2682.52, "word": " or", "probability": 0.488525390625}, {"start": 2682.52, "end": 2683.02, "word": " approximately", "probability": 0.859375}, {"start": 2683.02, "end": 2683.62, "word": " normally", "probability": 0.84619140625}, {"start": 2683.62, "end": 2684.08, "word": " distributed,", "probability": 0.92626953125}, {"start": 2684.8, "end": 2685.68, "word": " or", "probability": 0.9423828125}, {"start": 2685.68, "end": 2685.86, "word": " we", "probability": 0.93603515625}, {"start": 2685.86, "end": 2686.04, "word": " have", "probability": 0.9384765625}, {"start": 2686.04, "end": 2686.14, "word": " to", "probability": 0.970703125}, {"start": 2686.14, "end": 2686.3, "word": " use", "probability": 0.87255859375}, {"start": 2686.3, "end": 2686.62, "word": " another", "probability": 0.92431640625}, {"start": 2686.62, "end": 2687.02, "word": " test.", "probability": 0.8955078125}, {"start": 2688.36, "end": 2688.8, "word": " So", "probability": 0.89306640625}, {"start": 2688.8, "end": 2689.38, "word": " we're", "probability": 0.469970703125}, {"start": 2689.38, "end": 2689.8, "word": " assuming", "probability": 0.8994140625}, {"start": 2689.8, "end": 2690.36, "word": " both", "probability": 0.85693359375}, {"start": 2690.36, "end": 2690.74, "word": " are", "probability": 0.9404296875}, {"start": 2690.74, "end": 2691.38, "word": " normal", "probability": 0.86572265625}, {"start": 2691.38, "end": 2691.82, "word": " with", "probability": 0.81689453125}, {"start": 2691.82, "end": 2692.38, "word": " equal", "probability": 0.88232421875}, {"start": 2692.38, "end": 2692.92, "word": " variances.", "probability": 0.93505859375}, {"start": 2694.18, "end": 2694.4, "word": " The", "probability": 0.87890625}, {"start": 2694.4, "end": 2694.7, "word": " question", "probability": 0.9111328125}, {"start": 2694.7, "end": 2695.04, "word": " is,", "probability": 0.94775390625}, {"start": 2695.88, "end": 2696.1, "word": " is", "probability": 0.85986328125}, {"start": 2696.1, "end": 2696.32, "word": " there", "probability": 0.9140625}, {"start": 2696.32, "end": 2696.46, "word": " a", "probability": 0.978515625}, {"start": 2696.46, "end": 2696.94, "word": " difference", "probability": 0.84716796875}, {"start": 2696.94, "end": 2697.2, "word": " in", "probability": 0.947265625}, {"start": 2697.2, "end": 2697.54, "word": " meaning?", "probability": 0.6396484375}], "temperature": 1.0}, {"id": 102, "seek": 272386, "start": 2699.46, "end": 2723.86, "text": " So there is no direction. Is there a difference? That means we are testing mu1 equals mu2 against mu1 does not equal mu2. So the null hypothesis, the difference between these two is zero because it asks about is there a difference here. So we assume there is no difference. It means mu1 equals mu2 under the null hypothesis.", "tokens": [407, 456, 307, 572, 3513, 13, 1119, 456, 257, 2649, 30, 663, 1355, 321, 366, 4997, 2992, 16, 6915, 2992, 17, 1970, 2992, 16, 775, 406, 2681, 2992, 17, 13, 407, 264, 18184, 17291, 11, 264, 2649, 1296, 613, 732, 307, 4018, 570, 309, 8962, 466, 307, 456, 257, 2649, 510, 13, 407, 321, 6552, 456, 307, 572, 2649, 13, 467, 1355, 2992, 16, 6915, 2992, 17, 833, 264, 18184, 17291, 13], "avg_logprob": -0.15721319146352272, "compression_ratio": 1.8571428571428572, "no_speech_prob": 0.0, "words": [{"start": 2699.46, "end": 2699.74, "word": " So", "probability": 0.6962890625}, {"start": 2699.74, "end": 2699.9, "word": " there", "probability": 0.6943359375}, {"start": 2699.9, "end": 2700.04, "word": " is", "probability": 0.869140625}, {"start": 2700.04, "end": 2700.18, "word": " no", "probability": 0.94189453125}, {"start": 2700.18, "end": 2700.6, "word": " direction.", "probability": 0.95068359375}, {"start": 2701.22, "end": 2701.3, "word": " Is", "probability": 0.88427734375}, {"start": 2701.3, "end": 2701.5, "word": " there", "probability": 0.921875}, {"start": 2701.5, "end": 2701.62, "word": " a", "probability": 0.97607421875}, {"start": 2701.62, "end": 2702.1, "word": " difference?", "probability": 0.8828125}, {"start": 2702.3, "end": 2702.58, "word": " That", "probability": 0.8955078125}, {"start": 2702.58, "end": 2702.98, "word": " means", "probability": 0.927734375}, {"start": 2702.98, "end": 2703.6, "word": " we", "probability": 0.845703125}, {"start": 2703.6, "end": 2703.76, "word": " are", "probability": 0.90966796875}, {"start": 2703.76, "end": 2704.18, "word": " testing", "probability": 0.8876953125}, {"start": 2704.18, "end": 2704.62, "word": " mu1", "probability": 0.5596923828125}, {"start": 2704.62, "end": 2704.96, "word": " equals", "probability": 0.6162109375}, {"start": 2704.96, "end": 2705.42, "word": " mu2", "probability": 0.929931640625}, {"start": 2705.42, "end": 2706.22, "word": " against", "probability": 0.85986328125}, {"start": 2706.22, "end": 2707.56, "word": " mu1", "probability": 0.951904296875}, {"start": 2707.56, "end": 2707.78, "word": " does", "probability": 0.90087890625}, {"start": 2707.78, "end": 2708.08, "word": " not", "probability": 0.95458984375}, {"start": 2708.08, "end": 2708.52, "word": " equal", "probability": 0.9287109375}, {"start": 2708.52, "end": 2708.98, "word": " mu2.", "probability": 0.95166015625}, {"start": 2710.04, "end": 2710.32, "word": " So", "probability": 0.9287109375}, {"start": 2710.32, "end": 2710.48, "word": " the", "probability": 0.78955078125}, {"start": 2710.48, "end": 2710.62, "word": " null", "probability": 0.97802734375}, {"start": 2710.62, "end": 2711.08, "word": " hypothesis,", "probability": 0.85693359375}, {"start": 2711.76, "end": 2712.82, "word": " the", "probability": 0.91796875}, {"start": 2712.82, "end": 2713.32, "word": " difference", "probability": 0.875}, {"start": 2713.32, "end": 2713.72, "word": " between", "probability": 0.86962890625}, {"start": 2713.72, "end": 2714.0, "word": " these", "probability": 0.86279296875}, {"start": 2714.0, "end": 2714.16, "word": " two", "probability": 0.904296875}, {"start": 2714.16, "end": 2714.34, "word": " is", "probability": 0.94580078125}, {"start": 2714.34, "end": 2714.62, "word": " zero", "probability": 0.77294921875}, {"start": 2714.62, "end": 2715.34, "word": " because", "probability": 0.5654296875}, {"start": 2715.34, "end": 2715.56, "word": " it", "probability": 0.93115234375}, {"start": 2715.56, "end": 2715.86, "word": " asks", "probability": 0.5869140625}, {"start": 2715.86, "end": 2716.3, "word": " about", "probability": 0.8759765625}, {"start": 2716.3, "end": 2716.64, "word": " is", "probability": 0.7373046875}, {"start": 2716.64, "end": 2716.84, "word": " there", "probability": 0.9140625}, {"start": 2716.84, "end": 2716.98, "word": " a", "probability": 0.97412109375}, {"start": 2716.98, "end": 2717.48, "word": " difference", "probability": 0.87841796875}, {"start": 2717.48, "end": 2717.9, "word": " here.", "probability": 0.8154296875}, {"start": 2719.14, "end": 2719.52, "word": " So", "probability": 0.9501953125}, {"start": 2719.52, "end": 2719.72, "word": " we", "probability": 0.92529296875}, {"start": 2719.72, "end": 2720.16, "word": " assume", "probability": 0.89697265625}, {"start": 2720.16, "end": 2720.42, "word": " there", "probability": 0.89208984375}, {"start": 2720.42, "end": 2720.58, "word": " is", "probability": 0.93408203125}, {"start": 2720.58, "end": 2720.74, "word": " no", "probability": 0.95361328125}, {"start": 2720.74, "end": 2721.18, "word": " difference.", "probability": 0.869140625}, {"start": 2721.3, "end": 2721.4, "word": " It", "probability": 0.8662109375}, {"start": 2721.4, "end": 2721.66, "word": " means", "probability": 0.939453125}, {"start": 2721.66, "end": 2722.04, "word": " mu1", "probability": 0.881103515625}, {"start": 2722.04, "end": 2722.38, "word": " equals", "probability": 0.9423828125}, {"start": 2722.38, "end": 2722.78, "word": " mu2", "probability": 0.972412109375}, {"start": 2722.78, "end": 2723.02, "word": " under", "probability": 0.84423828125}, {"start": 2723.02, "end": 2723.22, "word": " the", "probability": 0.88916015625}, {"start": 2723.22, "end": 2723.4, "word": " null", "probability": 0.94482421875}, {"start": 2723.4, "end": 2723.86, "word": " hypothesis.", "probability": 0.84716796875}], "temperature": 1.0}, {"id": 103, "seek": 275502, "start": 2726.0, "end": 2755.02, "text": " Against the alternative hypothesis, mu1 minus mu2 is not zero. That means mu1 does not equal mu2. So either you state the null by using this way, mu1 minus mu2 equals zero, or mu1 equals mu2. Now, before computing the test statistic, we have to compute S squared B. For S squared B, this is the equation we have.", "tokens": [29995, 264, 8535, 17291, 11, 2992, 16, 3175, 2992, 17, 307, 406, 4018, 13, 663, 1355, 2992, 16, 775, 406, 2681, 2992, 17, 13, 407, 2139, 291, 1785, 264, 18184, 538, 1228, 341, 636, 11, 2992, 16, 3175, 2992, 17, 6915, 4018, 11, 420, 2992, 16, 6915, 2992, 17, 13, 823, 11, 949, 15866, 264, 1500, 29588, 11, 321, 362, 281, 14722, 318, 8889, 363, 13, 1171, 318, 8889, 363, 11, 341, 307, 264, 5367, 321, 362, 13], "avg_logprob": -0.19214795020562184, "compression_ratio": 1.6134020618556701, "no_speech_prob": 0.0, "words": [{"start": 2726.0, "end": 2726.54, "word": " Against", "probability": 0.19384765625}, {"start": 2726.54, "end": 2726.9, "word": " the", "probability": 0.708984375}, {"start": 2726.9, "end": 2727.4, "word": " alternative", "probability": 0.89697265625}, {"start": 2727.4, "end": 2727.96, "word": " hypothesis,", "probability": 0.7939453125}, {"start": 2728.56, "end": 2728.9, "word": " mu1", "probability": 0.6068115234375}, {"start": 2728.9, "end": 2729.26, "word": " minus", "probability": 0.783203125}, {"start": 2729.26, "end": 2729.64, "word": " mu2", "probability": 0.962890625}, {"start": 2729.64, "end": 2729.8, "word": " is", "probability": 0.923828125}, {"start": 2729.8, "end": 2730.02, "word": " not", "probability": 0.94873046875}, {"start": 2730.02, "end": 2730.36, "word": " zero.", "probability": 0.68408203125}, {"start": 2730.88, "end": 2731.22, "word": " That", "probability": 0.8828125}, {"start": 2731.22, "end": 2731.58, "word": " means", "probability": 0.9365234375}, {"start": 2731.58, "end": 2732.04, "word": " mu1", "probability": 0.908935546875}, {"start": 2732.04, "end": 2732.26, "word": " does", "probability": 0.93017578125}, {"start": 2732.26, "end": 2732.44, "word": " not", "probability": 0.9560546875}, {"start": 2732.44, "end": 2732.72, "word": " equal", "probability": 0.92626953125}, {"start": 2732.72, "end": 2733.1, "word": " mu2.", "probability": 0.90771484375}, {"start": 2733.62, "end": 2733.84, "word": " So", "probability": 0.90283203125}, {"start": 2733.84, "end": 2734.14, "word": " either", "probability": 0.8408203125}, {"start": 2734.14, "end": 2734.38, "word": " you", "probability": 0.91748046875}, {"start": 2734.38, "end": 2734.78, "word": " state", "probability": 0.89697265625}, {"start": 2734.78, "end": 2735.44, "word": " the", "probability": 0.75}, {"start": 2735.44, "end": 2735.74, "word": " null", "probability": 0.9365234375}, {"start": 2735.74, "end": 2736.06, "word": " by", "probability": 0.947265625}, {"start": 2736.06, "end": 2736.34, "word": " using", "probability": 0.93408203125}, {"start": 2736.34, "end": 2736.66, "word": " this", "probability": 0.94384765625}, {"start": 2736.66, "end": 2736.94, "word": " way,", "probability": 0.92822265625}, {"start": 2737.84, "end": 2738.26, "word": " mu1", "probability": 0.956298828125}, {"start": 2738.26, "end": 2738.66, "word": " minus", "probability": 0.982421875}, {"start": 2738.66, "end": 2739.04, "word": " mu2", "probability": 0.89599609375}, {"start": 2739.04, "end": 2739.34, "word": " equals", "probability": 0.90234375}, {"start": 2739.34, "end": 2739.66, "word": " zero,", "probability": 0.84130859375}, {"start": 2739.84, "end": 2739.94, "word": " or", "probability": 0.953125}, {"start": 2739.94, "end": 2740.46, "word": " mu1", "probability": 0.978271484375}, {"start": 2740.46, "end": 2741.08, "word": " equals", "probability": 0.9560546875}, {"start": 2741.08, "end": 2741.52, "word": " mu2.", "probability": 0.973388671875}, {"start": 2742.98, "end": 2743.44, "word": " Now,", "probability": 0.93408203125}, {"start": 2744.36, "end": 2744.92, "word": " before", "probability": 0.8662109375}, {"start": 2744.92, "end": 2745.82, "word": " computing", "probability": 0.86865234375}, {"start": 2745.82, "end": 2746.06, "word": " the", "probability": 0.81982421875}, {"start": 2746.06, "end": 2746.24, "word": " test", "probability": 0.237548828125}, {"start": 2746.24, "end": 2746.7, "word": " statistic,", "probability": 0.90478515625}, {"start": 2747.14, "end": 2747.36, "word": " we", "probability": 0.95556640625}, {"start": 2747.36, "end": 2747.64, "word": " have", "probability": 0.9453125}, {"start": 2747.64, "end": 2747.82, "word": " to", "probability": 0.97119140625}, {"start": 2747.82, "end": 2748.38, "word": " compute", "probability": 0.912109375}, {"start": 2748.38, "end": 2750.26, "word": " S", "probability": 0.69873046875}, {"start": 2750.26, "end": 2750.58, "word": " squared", "probability": 0.5146484375}, {"start": 2750.58, "end": 2750.84, "word": " B.", "probability": 0.5166015625}, {"start": 2752.56, "end": 2753.22, "word": " For", "probability": 0.958984375}, {"start": 2753.22, "end": 2753.42, "word": " S", "probability": 0.6064453125}, {"start": 2753.42, "end": 2753.64, "word": " squared", "probability": 0.8193359375}, {"start": 2753.64, "end": 2753.82, "word": " B,", "probability": 0.978515625}, {"start": 2753.9, "end": 2754.02, "word": " this", "probability": 0.7431640625}, {"start": 2754.02, "end": 2754.1, "word": " is", "probability": 0.91162109375}, {"start": 2754.1, "end": 2754.2, "word": " the", "probability": 0.89697265625}, {"start": 2754.2, "end": 2754.52, "word": " equation", "probability": 0.97021484375}, {"start": 2754.52, "end": 2754.78, "word": " we", "probability": 0.93359375}, {"start": 2754.78, "end": 2755.02, "word": " have.", "probability": 0.9443359375}], "temperature": 1.0}, {"id": 104, "seek": 278106, "start": 2756.06, "end": 2781.06, "text": " Now, N1 is 21, so 21 minus 1, times S1 squared. We have the sample standard deviation of 1.3 for the first sample, so this quantity squared, plus N2 was 25, minus 1 times S2 squared, 1.16 squared, divided by N1 minus 1 plus N2 minus 1.", "tokens": [823, 11, 426, 16, 307, 5080, 11, 370, 5080, 3175, 502, 11, 1413, 318, 16, 8889, 13, 492, 362, 264, 6889, 3832, 25163, 295, 502, 13, 18, 337, 264, 700, 6889, 11, 370, 341, 11275, 8889, 11, 1804, 426, 17, 390, 3552, 11, 3175, 502, 1413, 318, 17, 8889, 11, 502, 13, 6866, 8889, 11, 6666, 538, 426, 16, 3175, 502, 1804, 426, 17, 3175, 502, 13], "avg_logprob": -0.1606158050982391, "compression_ratio": 1.5629139072847682, "no_speech_prob": 0.0, "words": [{"start": 2756.06, "end": 2756.34, "word": " Now,", "probability": 0.7958984375}, {"start": 2756.42, "end": 2756.66, "word": " N1", "probability": 0.7939453125}, {"start": 2756.66, "end": 2756.84, "word": " is", "probability": 0.9306640625}, {"start": 2756.84, "end": 2758.92, "word": " 21,", "probability": 0.9296875}, {"start": 2759.58, "end": 2760.26, "word": " so", "probability": 0.9072265625}, {"start": 2760.26, "end": 2760.54, "word": " 21", "probability": 0.94775390625}, {"start": 2760.54, "end": 2760.9, "word": " minus", "probability": 0.9052734375}, {"start": 2760.9, "end": 2761.26, "word": " 1,", "probability": 0.8076171875}, {"start": 2761.82, "end": 2762.74, "word": " times", "probability": 0.92236328125}, {"start": 2762.74, "end": 2763.6, "word": " S1", "probability": 0.93115234375}, {"start": 2763.6, "end": 2764.04, "word": " squared.", "probability": 0.81298828125}, {"start": 2764.18, "end": 2764.34, "word": " We", "probability": 0.955078125}, {"start": 2764.34, "end": 2764.56, "word": " have", "probability": 0.94287109375}, {"start": 2764.56, "end": 2764.74, "word": " the", "probability": 0.85693359375}, {"start": 2764.74, "end": 2765.02, "word": " sample", "probability": 0.83935546875}, {"start": 2765.02, "end": 2765.32, "word": " standard", "probability": 0.82421875}, {"start": 2765.32, "end": 2765.74, "word": " deviation", "probability": 0.908203125}, {"start": 2765.74, "end": 2766.12, "word": " of", "probability": 0.96044921875}, {"start": 2766.12, "end": 2766.38, "word": " 1", "probability": 0.98681640625}, {"start": 2766.38, "end": 2766.98, "word": ".3", "probability": 0.9921875}, {"start": 2766.98, "end": 2767.7, "word": " for", "probability": 0.8916015625}, {"start": 2767.7, "end": 2767.88, "word": " the", "probability": 0.90771484375}, {"start": 2767.88, "end": 2768.14, "word": " first", "probability": 0.75927734375}, {"start": 2768.14, "end": 2768.52, "word": " sample,", "probability": 0.865234375}, {"start": 2768.62, "end": 2768.78, "word": " so", "probability": 0.9404296875}, {"start": 2768.78, "end": 2768.96, "word": " this", "probability": 0.71435546875}, {"start": 2768.96, "end": 2769.34, "word": " quantity", "probability": 0.3369140625}, {"start": 2769.34, "end": 2769.84, "word": " squared,", "probability": 0.8740234375}, {"start": 2770.98, "end": 2771.5, "word": " plus", "probability": 0.94970703125}, {"start": 2771.5, "end": 2772.36, "word": " N2", "probability": 0.926025390625}, {"start": 2772.36, "end": 2772.56, "word": " was", "probability": 0.87841796875}, {"start": 2772.56, "end": 2773.02, "word": " 25,", "probability": 0.9697265625}, {"start": 2774.02, "end": 2774.4, "word": " minus", "probability": 0.9833984375}, {"start": 2774.4, "end": 2774.72, "word": " 1", "probability": 0.9375}, {"start": 2774.72, "end": 2775.06, "word": " times", "probability": 0.89501953125}, {"start": 2775.06, "end": 2775.5, "word": " S2", "probability": 0.98828125}, {"start": 2775.5, "end": 2775.94, "word": " squared,", "probability": 0.8466796875}, {"start": 2776.6, "end": 2776.8, "word": " 1", "probability": 0.98388671875}, {"start": 2776.8, "end": 2777.48, "word": ".16", "probability": 0.9892578125}, {"start": 2777.48, "end": 2777.96, "word": " squared,", "probability": 0.81298828125}, {"start": 2778.56, "end": 2778.92, "word": " divided", "probability": 0.7666015625}, {"start": 2778.92, "end": 2779.14, "word": " by", "probability": 0.96533203125}, {"start": 2779.14, "end": 2779.48, "word": " N1", "probability": 0.9912109375}, {"start": 2779.48, "end": 2779.78, "word": " minus", "probability": 0.98193359375}, {"start": 2779.78, "end": 2780.02, "word": " 1", "probability": 0.9765625}, {"start": 2780.02, "end": 2780.28, "word": " plus", "probability": 0.76220703125}, {"start": 2780.28, "end": 2780.58, "word": " N2", "probability": 0.994384765625}, {"start": 2780.58, "end": 2780.84, "word": " minus", "probability": 0.98388671875}, {"start": 2780.84, "end": 2781.06, "word": " 1.", "probability": 0.94970703125}], "temperature": 1.0}, {"id": 105, "seek": 279628, "start": 2784.12, "end": 2796.28, "text": " The sample, I'm sorry, the bold sample variance, which is about 1.5. Now, you, in this case, after computing a square B,", "tokens": [440, 6889, 11, 286, 478, 2597, 11, 264, 11928, 6889, 21977, 11, 597, 307, 466, 502, 13, 20, 13, 823, 11, 291, 11, 294, 341, 1389, 11, 934, 15866, 257, 3732, 363, 11], "avg_logprob": -0.42463235294117646, "compression_ratio": 1.1203703703703705, "no_speech_prob": 0.0, "words": [{"start": 2784.1200000000003, "end": 2784.76, "word": " The", "probability": 0.1002197265625}, {"start": 2784.76, "end": 2785.4, "word": " sample,", "probability": 0.264404296875}, {"start": 2786.34, "end": 2786.66, "word": " I'm", "probability": 0.68798828125}, {"start": 2786.66, "end": 2786.86, "word": " sorry,", "probability": 0.86962890625}, {"start": 2786.98, "end": 2787.1, "word": " the", "probability": 0.865234375}, {"start": 2787.1, "end": 2787.34, "word": " bold", "probability": 0.59716796875}, {"start": 2787.34, "end": 2787.72, "word": " sample", "probability": 0.8798828125}, {"start": 2787.72, "end": 2788.26, "word": " variance,", "probability": 0.8359375}, {"start": 2789.04, "end": 2789.46, "word": " which", "probability": 0.94091796875}, {"start": 2789.46, "end": 2789.6, "word": " is", "probability": 0.5205078125}, {"start": 2789.6, "end": 2789.88, "word": " about", "probability": 0.91162109375}, {"start": 2789.88, "end": 2790.2, "word": " 1", "probability": 0.94482421875}, {"start": 2790.2, "end": 2790.8, "word": ".5.", "probability": 0.993408203125}, {"start": 2792.26, "end": 2792.66, "word": " Now,", "probability": 0.9072265625}, {"start": 2793.0, "end": 2793.16, "word": " you,", "probability": 0.5810546875}, {"start": 2793.34, "end": 2793.56, "word": " in", "probability": 0.9482421875}, {"start": 2793.56, "end": 2793.78, "word": " this", "probability": 0.9541015625}, {"start": 2793.78, "end": 2794.12, "word": " case,", "probability": 0.92041015625}, {"start": 2794.64, "end": 2795.02, "word": " after", "probability": 0.87353515625}, {"start": 2795.02, "end": 2795.54, "word": " computing", "probability": 0.8681640625}, {"start": 2795.54, "end": 2795.78, "word": " a", "probability": 0.461181640625}, {"start": 2795.78, "end": 2796.04, "word": " square", "probability": 0.7294921875}, {"start": 2796.04, "end": 2796.28, "word": " B,", "probability": 0.4453125}], "temperature": 1.0}, {"id": 106, "seek": 282409, "start": 2797.33, "end": 2824.09, "text": " Easily you can compute the value of the test statistic by using this equation. Now x1 bar minus x2 bar, x1 bar is 3.17 minus x2 bar is 2.53 minus. Here we should have mu1 minus mu2. Now under x0 because the test statistic is computed only if x0 is true.", "tokens": [46879, 953, 291, 393, 14722, 264, 2158, 295, 264, 1500, 29588, 538, 1228, 341, 5367, 13, 823, 2031, 16, 2159, 3175, 2031, 17, 2159, 11, 2031, 16, 2159, 307, 805, 13, 7773, 3175, 2031, 17, 2159, 307, 568, 13, 19584, 3175, 13, 1692, 321, 820, 362, 2992, 16, 3175, 2992, 17, 13, 823, 833, 2031, 15, 570, 264, 1500, 29588, 307, 40610, 787, 498, 2031, 15, 307, 2074, 13], "avg_logprob": -0.20424107781478337, "compression_ratio": 1.5393939393939393, "no_speech_prob": 0.0, "words": [{"start": 2797.33, "end": 2797.85, "word": " Easily", "probability": 0.6475830078125}, {"start": 2797.85, "end": 2797.99, "word": " you", "probability": 0.8037109375}, {"start": 2797.99, "end": 2798.19, "word": " can", "probability": 0.9326171875}, {"start": 2798.19, "end": 2798.61, "word": " compute", "probability": 0.94873046875}, {"start": 2798.61, "end": 2798.91, "word": " the", "probability": 0.63427734375}, {"start": 2798.91, "end": 2799.19, "word": " value", "probability": 0.96875}, {"start": 2799.19, "end": 2799.41, "word": " of", "probability": 0.94970703125}, {"start": 2799.41, "end": 2799.45, "word": " the", "probability": 0.58984375}, {"start": 2799.45, "end": 2799.63, "word": " test", "probability": 0.67041015625}, {"start": 2799.63, "end": 2800.05, "word": " statistic", "probability": 0.8828125}, {"start": 2800.05, "end": 2800.91, "word": " by", "probability": 0.8251953125}, {"start": 2800.91, "end": 2801.23, "word": " using", "probability": 0.927734375}, {"start": 2801.23, "end": 2801.47, "word": " this", "probability": 0.93896484375}, {"start": 2801.47, "end": 2801.91, "word": " equation.", "probability": 0.97802734375}, {"start": 2802.75, "end": 2803.01, "word": " Now", "probability": 0.61328125}, {"start": 2803.01, "end": 2803.69, "word": " x1", "probability": 0.5174560546875}, {"start": 2803.69, "end": 2803.87, "word": " bar", "probability": 0.84423828125}, {"start": 2803.87, "end": 2804.15, "word": " minus", "probability": 0.83544921875}, {"start": 2804.15, "end": 2804.55, "word": " x2", "probability": 0.9853515625}, {"start": 2804.55, "end": 2804.73, "word": " bar,", "probability": 0.947265625}, {"start": 2804.81, "end": 2805.11, "word": " x1", "probability": 0.973388671875}, {"start": 2805.11, "end": 2805.27, "word": " bar", "probability": 0.923828125}, {"start": 2805.27, "end": 2805.41, "word": " is", "probability": 0.9267578125}, {"start": 2805.41, "end": 2805.61, "word": " 3", "probability": 0.9130859375}, {"start": 2805.61, "end": 2806.21, "word": ".17", "probability": 0.83984375}, {"start": 2806.21, "end": 2807.75, "word": " minus", "probability": 0.888671875}, {"start": 2807.75, "end": 2808.37, "word": " x2", "probability": 0.974853515625}, {"start": 2808.37, "end": 2808.55, "word": " bar", "probability": 0.9453125}, {"start": 2808.55, "end": 2808.73, "word": " is", "probability": 0.9111328125}, {"start": 2808.73, "end": 2808.91, "word": " 2", "probability": 0.98974609375}, {"start": 2808.91, "end": 2809.75, "word": ".53", "probability": 0.986572265625}, {"start": 2809.75, "end": 2811.01, "word": " minus.", "probability": 0.97216796875}, {"start": 2811.33, "end": 2811.65, "word": " Here", "probability": 0.8154296875}, {"start": 2811.65, "end": 2811.81, "word": " we", "probability": 0.88916015625}, {"start": 2811.81, "end": 2812.17, "word": " should", "probability": 0.95361328125}, {"start": 2812.17, "end": 2812.59, "word": " have", "probability": 0.9482421875}, {"start": 2812.59, "end": 2813.41, "word": " mu1", "probability": 0.6085205078125}, {"start": 2813.41, "end": 2813.87, "word": " minus", "probability": 0.98388671875}, {"start": 2813.87, "end": 2814.29, "word": " mu2.", "probability": 0.95751953125}, {"start": 2815.67, "end": 2815.91, "word": " Now", "probability": 0.90673828125}, {"start": 2815.91, "end": 2816.69, "word": " under", "probability": 0.65771484375}, {"start": 2816.69, "end": 2817.21, "word": " x0", "probability": 0.79296875}, {"start": 2817.21, "end": 2819.49, "word": " because", "probability": 0.431640625}, {"start": 2819.49, "end": 2819.85, "word": " the", "probability": 0.72412109375}, {"start": 2819.85, "end": 2820.07, "word": " test", "probability": 0.8466796875}, {"start": 2820.07, "end": 2820.53, "word": " statistic", "probability": 0.90673828125}, {"start": 2820.53, "end": 2820.77, "word": " is", "probability": 0.93798828125}, {"start": 2820.77, "end": 2821.23, "word": " computed", "probability": 0.92138671875}, {"start": 2821.23, "end": 2821.77, "word": " only", "probability": 0.92041015625}, {"start": 2821.77, "end": 2822.37, "word": " if", "probability": 0.94873046875}, {"start": 2822.37, "end": 2823.67, "word": " x0", "probability": 0.98193359375}, {"start": 2823.67, "end": 2823.83, "word": " is", "probability": 0.93017578125}, {"start": 2823.83, "end": 2824.09, "word": " true.", "probability": 0.89306640625}], "temperature": 1.0}, {"id": 107, "seek": 285032, "start": 2825.82, "end": 2850.32, "text": " So, always, always, always, we compute T statistic under H0 is true, always. Otherwise, we cannot compute T statistic.", "tokens": [407, 11, 1009, 11, 1009, 11, 1009, 11, 321, 14722, 314, 29588, 833, 389, 15, 307, 2074, 11, 1009, 13, 10328, 11, 321, 2644, 14722, 314, 29588, 13], "avg_logprob": -0.42429956896551724, "compression_ratio": 1.4337349397590362, "no_speech_prob": 0.0, "words": [{"start": 2825.82, "end": 2826.14, "word": " So,", "probability": 0.298828125}, {"start": 2826.7, "end": 2826.72, "word": " always,", "probability": 0.68212890625}, {"start": 2827.06, "end": 2828.52, "word": " always,", "probability": 0.7216796875}, {"start": 2830.06, "end": 2833.56, "word": " always,", "probability": 0.6162109375}, {"start": 2834.54, "end": 2835.08, "word": " we", "probability": 0.80126953125}, {"start": 2835.08, "end": 2836.5, "word": " compute", "probability": 0.86328125}, {"start": 2836.5, "end": 2838.92, "word": " T", "probability": 0.361328125}, {"start": 2838.92, "end": 2839.48, "word": " statistic", "probability": 0.5302734375}, {"start": 2839.48, "end": 2841.48, "word": " under", "probability": 0.81689453125}, {"start": 2841.48, "end": 2844.04, "word": " H0", "probability": 0.57470703125}, {"start": 2844.04, "end": 2844.8, "word": " is", "probability": 0.58740234375}, {"start": 2844.8, "end": 2845.16, "word": " true,", "probability": 0.62109375}, {"start": 2845.52, "end": 2846.04, "word": " always.", "probability": 0.83740234375}, {"start": 2847.46, "end": 2848.12, "word": " Otherwise,", "probability": 0.9033203125}, {"start": 2848.46, "end": 2848.68, "word": " we", "probability": 0.955078125}, {"start": 2848.68, "end": 2849.02, "word": " cannot", "probability": 0.82421875}, {"start": 2849.02, "end": 2849.54, "word": " compute", "probability": 0.94189453125}, {"start": 2849.54, "end": 2849.92, "word": " T", "probability": 0.83984375}, {"start": 2849.92, "end": 2850.32, "word": " statistic.", "probability": 0.82666015625}], "temperature": 1.0}, {"id": 108, "seek": 288268, "start": 2853.76, "end": 2882.68, "text": " The rule is to compute the value of the statistic if H1 is true. Let's see what will happen. Now, if H1 is true, H1 mu1 minus mu does not equal zero. So what's the value here? You don't know. Because this difference is not zero. So what's the value? I don't know. So you cannot determine the value of the statistic under H1. But under H0, here we are assuming", "tokens": [440, 4978, 307, 281, 14722, 264, 2158, 295, 264, 29588, 498, 389, 16, 307, 2074, 13, 961, 311, 536, 437, 486, 1051, 13, 823, 11, 498, 389, 16, 307, 2074, 11, 389, 16, 2992, 16, 3175, 2992, 775, 406, 2681, 4018, 13, 407, 437, 311, 264, 2158, 510, 30, 509, 500, 380, 458, 13, 1436, 341, 2649, 307, 406, 4018, 13, 407, 437, 311, 264, 2158, 30, 286, 500, 380, 458, 13, 407, 291, 2644, 6997, 264, 2158, 295, 264, 29588, 833, 389, 16, 13, 583, 833, 389, 15, 11, 510, 321, 366, 11926], "avg_logprob": -0.19588815758102818, "compression_ratio": 1.7391304347826086, "no_speech_prob": 0.0, "words": [{"start": 2853.76, "end": 2854.28, "word": " The", "probability": 0.220703125}, {"start": 2854.28, "end": 2854.8, "word": " rule", "probability": 0.88720703125}, {"start": 2854.8, "end": 2855.28, "word": " is", "probability": 0.85400390625}, {"start": 2855.28, "end": 2855.64, "word": " to", "probability": 0.389404296875}, {"start": 2855.64, "end": 2856.1, "word": " compute", "probability": 0.9150390625}, {"start": 2856.1, "end": 2856.42, "word": " the", "probability": 0.87060546875}, {"start": 2856.42, "end": 2856.74, "word": " value", "probability": 0.97705078125}, {"start": 2856.74, "end": 2856.88, "word": " of", "probability": 0.94287109375}, {"start": 2856.88, "end": 2856.98, "word": " the", "probability": 0.529296875}, {"start": 2856.98, "end": 2857.58, "word": " statistic", "probability": 0.67333984375}, {"start": 2857.58, "end": 2858.74, "word": " if", "probability": 0.75146484375}, {"start": 2858.74, "end": 2859.14, "word": " H1", "probability": 0.709228515625}, {"start": 2859.14, "end": 2859.36, "word": " is", "probability": 0.888671875}, {"start": 2859.36, "end": 2859.68, "word": " true.", "probability": 0.87109375}, {"start": 2860.32, "end": 2860.66, "word": " Let's", "probability": 0.755859375}, {"start": 2860.66, "end": 2860.8, "word": " see", "probability": 0.91259765625}, {"start": 2860.8, "end": 2860.92, "word": " what", "probability": 0.94091796875}, {"start": 2860.92, "end": 2861.06, "word": " will", "probability": 0.806640625}, {"start": 2861.06, "end": 2861.3, "word": " happen.", "probability": 0.92138671875}, {"start": 2862.3, "end": 2862.52, "word": " Now,", "probability": 0.66455078125}, {"start": 2862.8, "end": 2862.98, "word": " if", "probability": 0.95068359375}, {"start": 2862.98, "end": 2863.42, "word": " H1", "probability": 0.980712890625}, {"start": 2863.42, "end": 2863.58, "word": " is", "probability": 0.94189453125}, {"start": 2863.58, "end": 2864.0, "word": " true,", "probability": 0.96337890625}, {"start": 2865.96, "end": 2866.52, "word": " H1", "probability": 0.850341796875}, {"start": 2866.52, "end": 2867.12, "word": " mu1", "probability": 0.5220947265625}, {"start": 2867.12, "end": 2867.42, "word": " minus", "probability": 0.81787109375}, {"start": 2867.42, "end": 2867.64, "word": " mu", "probability": 0.9375}, {"start": 2867.64, "end": 2867.84, "word": " does", "probability": 0.5830078125}, {"start": 2867.84, "end": 2868.0, "word": " not", "probability": 0.9482421875}, {"start": 2868.0, "end": 2868.26, "word": " equal", "probability": 0.8984375}, {"start": 2868.26, "end": 2868.64, "word": " zero.", "probability": 0.62939453125}, {"start": 2868.92, "end": 2869.18, "word": " So", "probability": 0.91259765625}, {"start": 2869.18, "end": 2869.38, "word": " what's", "probability": 0.762451171875}, {"start": 2869.38, "end": 2869.52, "word": " the", "probability": 0.919921875}, {"start": 2869.52, "end": 2869.74, "word": " value", "probability": 0.974609375}, {"start": 2869.74, "end": 2870.04, "word": " here?", "probability": 0.845703125}, {"start": 2870.26, "end": 2870.4, "word": " You", "probability": 0.89501953125}, {"start": 2870.4, "end": 2870.64, "word": " don't", "probability": 0.96435546875}, {"start": 2870.64, "end": 2870.9, "word": " know.", "probability": 0.89892578125}, {"start": 2871.38, "end": 2871.88, "word": " Because", "probability": 0.904296875}, {"start": 2871.88, "end": 2872.26, "word": " this", "probability": 0.92333984375}, {"start": 2872.26, "end": 2872.68, "word": " difference", "probability": 0.90576171875}, {"start": 2872.68, "end": 2872.94, "word": " is", "probability": 0.9287109375}, {"start": 2872.94, "end": 2873.08, "word": " not", "probability": 0.92578125}, {"start": 2873.08, "end": 2873.34, "word": " zero.", "probability": 0.8955078125}, {"start": 2873.54, "end": 2873.72, "word": " So", "probability": 0.91748046875}, {"start": 2873.72, "end": 2873.86, "word": " what's", "probability": 0.924072265625}, {"start": 2873.86, "end": 2873.98, "word": " the", "probability": 0.91552734375}, {"start": 2873.98, "end": 2874.2, "word": " value?", "probability": 0.98193359375}, {"start": 2874.62, "end": 2874.88, "word": " I", "probability": 0.93701171875}, {"start": 2874.88, "end": 2875.06, "word": " don't", "probability": 0.975830078125}, {"start": 2875.06, "end": 2875.3, "word": " know.", "probability": 0.8994140625}, {"start": 2875.5, "end": 2875.64, "word": " So", "probability": 0.91552734375}, {"start": 2875.64, "end": 2875.76, "word": " you", "probability": 0.666015625}, {"start": 2875.76, "end": 2876.0, "word": " cannot", "probability": 0.86572265625}, {"start": 2876.0, "end": 2876.54, "word": " determine", "probability": 0.9189453125}, {"start": 2876.54, "end": 2877.48, "word": " the", "probability": 0.912109375}, {"start": 2877.48, "end": 2877.76, "word": " value", "probability": 0.9765625}, {"start": 2877.76, "end": 2877.9, "word": " of", "probability": 0.9541015625}, {"start": 2877.9, "end": 2878.02, "word": " the", "probability": 0.7685546875}, {"start": 2878.02, "end": 2878.52, "word": " statistic", "probability": 0.85498046875}, {"start": 2878.52, "end": 2879.12, "word": " under", "probability": 0.89990234375}, {"start": 2879.12, "end": 2879.56, "word": " H1.", "probability": 0.98046875}, {"start": 2880.24, "end": 2880.68, "word": " But", "probability": 0.91748046875}, {"start": 2880.68, "end": 2881.08, "word": " under", "probability": 0.88720703125}, {"start": 2881.08, "end": 2881.62, "word": " H0,", "probability": 0.91259765625}, {"start": 2881.72, "end": 2881.96, "word": " here", "probability": 0.82275390625}, {"start": 2881.96, "end": 2882.12, "word": " we", "probability": 0.859375}, {"start": 2882.12, "end": 2882.24, "word": " are", "probability": 0.61083984375}, {"start": 2882.24, "end": 2882.68, "word": " assuming", "probability": 0.8994140625}], "temperature": 1.0}, {"id": 109, "seek": 291133, "start": 2883.33, "end": 2911.33, "text": " The difference is zero, so this statistic can be computed only if the null hypothesis is true, otherwise you cannot compute this value. Make sense? So maybe true and false problem asks about we compute this statistic under if zero is true. It's correct statement. In this case, we're assuming that the difference is zero, so minus zero.", "tokens": [440, 2649, 307, 4018, 11, 370, 341, 29588, 393, 312, 40610, 787, 498, 264, 18184, 17291, 307, 2074, 11, 5911, 291, 2644, 14722, 341, 2158, 13, 4387, 2020, 30, 407, 1310, 2074, 293, 7908, 1154, 8962, 466, 321, 14722, 341, 29588, 833, 498, 4018, 307, 2074, 13, 467, 311, 3006, 5629, 13, 682, 341, 1389, 11, 321, 434, 11926, 300, 264, 2649, 307, 4018, 11, 370, 3175, 4018, 13], "avg_logprob": -0.2720982044935226, "compression_ratio": 1.6439024390243901, "no_speech_prob": 0.0, "words": [{"start": 2883.33, "end": 2883.55, "word": " The", "probability": 0.37939453125}, {"start": 2883.55, "end": 2883.99, "word": " difference", "probability": 0.841796875}, {"start": 2883.99, "end": 2884.23, "word": " is", "probability": 0.93505859375}, {"start": 2884.23, "end": 2884.55, "word": " zero,", "probability": 0.70068359375}, {"start": 2884.79, "end": 2884.93, "word": " so", "probability": 0.90771484375}, {"start": 2884.93, "end": 2885.23, "word": " this", "probability": 0.8974609375}, {"start": 2885.23, "end": 2885.95, "word": " statistic", "probability": 0.8681640625}, {"start": 2885.95, "end": 2886.89, "word": " can", "probability": 0.9140625}, {"start": 2886.89, "end": 2887.09, "word": " be", "probability": 0.80126953125}, {"start": 2887.09, "end": 2887.61, "word": " computed", "probability": 0.86962890625}, {"start": 2887.61, "end": 2888.17, "word": " only", "probability": 0.87646484375}, {"start": 2888.17, "end": 2888.95, "word": " if", "probability": 0.93603515625}, {"start": 2888.95, "end": 2889.49, "word": " the", "probability": 0.82568359375}, {"start": 2889.49, "end": 2889.69, "word": " null", "probability": 0.982421875}, {"start": 2889.69, "end": 2890.21, "word": " hypothesis", "probability": 0.83349609375}, {"start": 2890.21, "end": 2890.41, "word": " is", "probability": 0.9375}, {"start": 2890.41, "end": 2890.53, "word": " true,", "probability": 0.962890625}, {"start": 2890.65, "end": 2890.91, "word": " otherwise", "probability": 0.84326171875}, {"start": 2890.91, "end": 2891.17, "word": " you", "probability": 0.77392578125}, {"start": 2891.17, "end": 2891.47, "word": " cannot", "probability": 0.78857421875}, {"start": 2891.47, "end": 2892.13, "word": " compute", "probability": 0.91796875}, {"start": 2892.13, "end": 2892.41, "word": " this", "probability": 0.912109375}, {"start": 2892.41, "end": 2892.69, "word": " value.", "probability": 0.64892578125}, {"start": 2893.43, "end": 2893.63, "word": " Make", "probability": 0.626953125}, {"start": 2893.63, "end": 2893.93, "word": " sense?", "probability": 0.8125}, {"start": 2894.65, "end": 2895.11, "word": " So", "probability": 0.89501953125}, {"start": 2895.11, "end": 2895.55, "word": " maybe", "probability": 0.339111328125}, {"start": 2895.55, "end": 2895.79, "word": " true", "probability": 0.314697265625}, {"start": 2895.79, "end": 2895.99, "word": " and", "probability": 0.55810546875}, {"start": 2895.99, "end": 2896.31, "word": " false", "probability": 0.87255859375}, {"start": 2896.31, "end": 2896.89, "word": " problem", "probability": 0.69580078125}, {"start": 2896.89, "end": 2897.31, "word": " asks", "probability": 0.36572265625}, {"start": 2897.31, "end": 2897.81, "word": " about", "probability": 0.8427734375}, {"start": 2897.81, "end": 2898.73, "word": " we", "probability": 0.282470703125}, {"start": 2898.73, "end": 2899.23, "word": " compute", "probability": 0.8779296875}, {"start": 2899.23, "end": 2899.55, "word": " this", "probability": 0.82177734375}, {"start": 2899.55, "end": 2900.13, "word": " statistic", "probability": 0.87353515625}, {"start": 2900.13, "end": 2901.09, "word": " under", "probability": 0.86279296875}, {"start": 2901.09, "end": 2901.47, "word": " if", "probability": 0.8037109375}, {"start": 2901.47, "end": 2901.67, "word": " zero", "probability": 0.732421875}, {"start": 2901.67, "end": 2901.81, "word": " is", "probability": 0.92724609375}, {"start": 2901.81, "end": 2902.09, "word": " true.", "probability": 0.95068359375}, {"start": 2902.27, "end": 2902.45, "word": " It's", "probability": 0.6773681640625}, {"start": 2902.45, "end": 2902.71, "word": " correct", "probability": 0.57568359375}, {"start": 2902.71, "end": 2903.19, "word": " statement.", "probability": 0.92578125}, {"start": 2906.21, "end": 2906.85, "word": " In", "probability": 0.9453125}, {"start": 2906.85, "end": 2907.11, "word": " this", "probability": 0.94384765625}, {"start": 2907.11, "end": 2907.47, "word": " case,", "probability": 0.9091796875}, {"start": 2907.55, "end": 2907.83, "word": " we're", "probability": 0.67626953125}, {"start": 2907.83, "end": 2908.25, "word": " assuming", "probability": 0.89892578125}, {"start": 2908.25, "end": 2908.63, "word": " that", "probability": 0.477294921875}, {"start": 2908.63, "end": 2908.77, "word": " the", "probability": 0.90966796875}, {"start": 2908.77, "end": 2909.15, "word": " difference", "probability": 0.8720703125}, {"start": 2909.15, "end": 2909.37, "word": " is", "probability": 0.94873046875}, {"start": 2909.37, "end": 2909.71, "word": " zero,", "probability": 0.88525390625}, {"start": 2910.01, "end": 2910.27, "word": " so", "probability": 0.94140625}, {"start": 2910.27, "end": 2910.93, "word": " minus", "probability": 0.951171875}, {"start": 2910.93, "end": 2911.33, "word": " zero.", "probability": 0.87353515625}], "temperature": 1.0}, {"id": 110, "seek": 292933, "start": 2913.15, "end": 2929.33, "text": " In real cases, I'm sorry, in most cases, we assume this difference is zero. In some cases, it might be, for example, it's three, for example, or one. One makes sense.", "tokens": [682, 957, 3331, 11, 286, 478, 2597, 11, 294, 881, 3331, 11, 321, 6552, 341, 2649, 307, 4018, 13, 682, 512, 3331, 11, 309, 1062, 312, 11, 337, 1365, 11, 309, 311, 1045, 11, 337, 1365, 11, 420, 472, 13, 1485, 1669, 2020, 13], "avg_logprob": -0.2789930608537462, "compression_ratio": 1.403361344537815, "no_speech_prob": 0.0, "words": [{"start": 2913.15, "end": 2913.55, "word": " In", "probability": 0.473876953125}, {"start": 2913.55, "end": 2914.07, "word": " real", "probability": 0.430908203125}, {"start": 2914.07, "end": 2914.69, "word": " cases,", "probability": 0.92431640625}, {"start": 2916.23, "end": 2916.55, "word": " I'm", "probability": 0.727783203125}, {"start": 2916.55, "end": 2916.83, "word": " sorry,", "probability": 0.8740234375}, {"start": 2917.11, "end": 2917.11, "word": " in", "probability": 0.9013671875}, {"start": 2917.11, "end": 2918.51, "word": " most", "probability": 0.87939453125}, {"start": 2918.51, "end": 2918.95, "word": " cases,", "probability": 0.93212890625}, {"start": 2919.13, "end": 2919.23, "word": " we", "probability": 0.95068359375}, {"start": 2919.23, "end": 2919.71, "word": " assume", "probability": 0.90087890625}, {"start": 2919.71, "end": 2920.09, "word": " this", "probability": 0.8798828125}, {"start": 2920.09, "end": 2920.37, "word": " difference", "probability": 0.84521484375}, {"start": 2920.37, "end": 2920.53, "word": " is", "probability": 0.6484375}, {"start": 2920.53, "end": 2920.79, "word": " zero.", "probability": 0.7529296875}, {"start": 2922.03, "end": 2922.25, "word": " In", "probability": 0.87646484375}, {"start": 2922.25, "end": 2922.51, "word": " some", "probability": 0.89990234375}, {"start": 2922.51, "end": 2922.99, "word": " cases,", "probability": 0.91552734375}, {"start": 2923.09, "end": 2923.15, "word": " it", "probability": 0.67578125}, {"start": 2923.15, "end": 2923.43, "word": " might", "probability": 0.8916015625}, {"start": 2923.43, "end": 2923.65, "word": " be,", "probability": 0.9443359375}, {"start": 2923.87, "end": 2923.87, "word": " for", "probability": 0.95166015625}, {"start": 2923.87, "end": 2924.27, "word": " example,", "probability": 0.9736328125}, {"start": 2924.45, "end": 2925.11, "word": " it's", "probability": 0.6453857421875}, {"start": 2925.11, "end": 2925.41, "word": " three,", "probability": 0.63525390625}, {"start": 2925.85, "end": 2925.99, "word": " for", "probability": 0.677734375}, {"start": 2925.99, "end": 2926.31, "word": " example,", "probability": 0.97802734375}, {"start": 2926.43, "end": 2926.65, "word": " or", "probability": 0.9541015625}, {"start": 2926.65, "end": 2926.95, "word": " one.", "probability": 0.93115234375}, {"start": 2928.39, "end": 2928.81, "word": " One", "probability": 0.892578125}, {"start": 2928.81, "end": 2929.05, "word": " makes", "probability": 0.8369140625}, {"start": 2929.05, "end": 2929.33, "word": " sense.", "probability": 0.82275390625}], "temperature": 1.0}, {"id": 111, "seek": 295786, "start": 2930.7, "end": 2957.86, "text": " So we have to plug one instead of zero if the difference is one. But here the difference is zero, so it should be zero. So minus zero. Divide by S squared B, this amount, multiplied by one over N1 plus one over N2. So one over 21 plus one over 25. That will give 2.04. So your T statistic in this case is 2.04.", "tokens": [407, 321, 362, 281, 5452, 472, 2602, 295, 4018, 498, 264, 2649, 307, 472, 13, 583, 510, 264, 2649, 307, 4018, 11, 370, 309, 820, 312, 4018, 13, 407, 3175, 4018, 13, 9886, 482, 538, 318, 8889, 363, 11, 341, 2372, 11, 17207, 538, 472, 670, 426, 16, 1804, 472, 670, 426, 17, 13, 407, 472, 670, 5080, 1804, 472, 670, 3552, 13, 663, 486, 976, 568, 13, 14565, 13, 407, 428, 314, 29588, 294, 341, 1389, 307, 568, 13, 14565, 13], "avg_logprob": -0.2083960843373494, "compression_ratio": 1.594871794871795, "no_speech_prob": 0.0, "words": [{"start": 2930.7, "end": 2931.22, "word": " So", "probability": 0.71240234375}, {"start": 2931.22, "end": 2931.44, "word": " we", "probability": 0.67236328125}, {"start": 2931.44, "end": 2931.66, "word": " have", "probability": 0.9296875}, {"start": 2931.66, "end": 2931.88, "word": " to", "probability": 0.9404296875}, {"start": 2931.88, "end": 2932.12, "word": " plug", "probability": 0.8837890625}, {"start": 2932.12, "end": 2932.42, "word": " one", "probability": 0.57275390625}, {"start": 2932.42, "end": 2932.78, "word": " instead", "probability": 0.84912109375}, {"start": 2932.78, "end": 2932.9, "word": " of", "probability": 0.96435546875}, {"start": 2932.9, "end": 2933.18, "word": " zero", "probability": 0.8623046875}, {"start": 2933.18, "end": 2934.02, "word": " if", "probability": 0.38037109375}, {"start": 2934.02, "end": 2934.24, "word": " the", "probability": 0.9052734375}, {"start": 2934.24, "end": 2934.6, "word": " difference", "probability": 0.8466796875}, {"start": 2934.6, "end": 2934.84, "word": " is", "probability": 0.9130859375}, {"start": 2934.84, "end": 2935.04, "word": " one.", "probability": 0.82080078125}, {"start": 2935.22, "end": 2935.62, "word": " But", "probability": 0.93017578125}, {"start": 2935.62, "end": 2935.8, "word": " here", "probability": 0.82861328125}, {"start": 2935.8, "end": 2935.94, "word": " the", "probability": 0.71630859375}, {"start": 2935.94, "end": 2936.34, "word": " difference", "probability": 0.8876953125}, {"start": 2936.34, "end": 2936.58, "word": " is", "probability": 0.94482421875}, {"start": 2936.58, "end": 2936.82, "word": " zero,", "probability": 0.8935546875}, {"start": 2936.96, "end": 2937.18, "word": " so", "probability": 0.9423828125}, {"start": 2937.18, "end": 2937.84, "word": " it", "probability": 0.91455078125}, {"start": 2937.84, "end": 2937.98, "word": " should", "probability": 0.97216796875}, {"start": 2937.98, "end": 2938.12, "word": " be", "probability": 0.9501953125}, {"start": 2938.12, "end": 2938.36, "word": " zero.", "probability": 0.8583984375}, {"start": 2938.64, "end": 2938.78, "word": " So", "probability": 0.94873046875}, {"start": 2938.78, "end": 2939.08, "word": " minus", "probability": 0.9658203125}, {"start": 2939.08, "end": 2939.48, "word": " zero.", "probability": 0.578125}, {"start": 2940.12, "end": 2940.64, "word": " Divide", "probability": 0.802734375}, {"start": 2940.64, "end": 2940.96, "word": " by", "probability": 0.95947265625}, {"start": 2940.96, "end": 2941.6, "word": " S", "probability": 0.666015625}, {"start": 2941.6, "end": 2941.88, "word": " squared", "probability": 0.6328125}, {"start": 2941.88, "end": 2942.16, "word": " B,", "probability": 0.75390625}, {"start": 2942.36, "end": 2942.52, "word": " this", "probability": 0.9169921875}, {"start": 2942.52, "end": 2942.8, "word": " amount,", "probability": 0.8388671875}, {"start": 2944.34, "end": 2946.0, "word": " multiplied", "probability": 0.6103515625}, {"start": 2946.0, "end": 2946.42, "word": " by", "probability": 0.9716796875}, {"start": 2946.42, "end": 2946.68, "word": " one", "probability": 0.74853515625}, {"start": 2946.68, "end": 2946.86, "word": " over", "probability": 0.92333984375}, {"start": 2946.86, "end": 2947.16, "word": " N1", "probability": 0.748046875}, {"start": 2947.16, "end": 2947.46, "word": " plus", "probability": 0.90673828125}, {"start": 2947.46, "end": 2947.68, "word": " one", "probability": 0.916015625}, {"start": 2947.68, "end": 2947.82, "word": " over", "probability": 0.92919921875}, {"start": 2947.82, "end": 2948.14, "word": " N2.", "probability": 0.99462890625}, {"start": 2948.24, "end": 2948.32, "word": " So", "probability": 0.94482421875}, {"start": 2948.32, "end": 2948.48, "word": " one", "probability": 0.7724609375}, {"start": 2948.48, "end": 2948.6, "word": " over", "probability": 0.9287109375}, {"start": 2948.6, "end": 2948.94, "word": " 21", "probability": 0.444091796875}, {"start": 2948.94, "end": 2949.38, "word": " plus", "probability": 0.93505859375}, {"start": 2949.38, "end": 2949.56, "word": " one", "probability": 0.8359375}, {"start": 2949.56, "end": 2949.72, "word": " over", "probability": 0.931640625}, {"start": 2949.72, "end": 2950.18, "word": " 25.", "probability": 0.962890625}, {"start": 2950.7, "end": 2951.08, "word": " That", "probability": 0.8994140625}, {"start": 2951.08, "end": 2951.26, "word": " will", "probability": 0.853515625}, {"start": 2951.26, "end": 2951.48, "word": " give", "probability": 0.859375}, {"start": 2951.48, "end": 2951.66, "word": " 2", "probability": 0.88671875}, {"start": 2951.66, "end": 2952.08, "word": ".04.", "probability": 0.980224609375}, {"start": 2952.9, "end": 2953.14, "word": " So", "probability": 0.96142578125}, {"start": 2953.14, "end": 2953.46, "word": " your", "probability": 0.8193359375}, {"start": 2953.46, "end": 2953.64, "word": " T", "probability": 0.271240234375}, {"start": 2953.64, "end": 2954.18, "word": " statistic", "probability": 0.89208984375}, {"start": 2954.18, "end": 2954.76, "word": " in", "probability": 0.79736328125}, {"start": 2954.76, "end": 2955.02, "word": " this", "probability": 0.9462890625}, {"start": 2955.02, "end": 2955.38, "word": " case", "probability": 0.9189453125}, {"start": 2955.38, "end": 2955.56, "word": " is", "probability": 0.90673828125}, {"start": 2955.56, "end": 2955.74, "word": " 2", "probability": 0.99365234375}, {"start": 2955.74, "end": 2957.86, "word": ".04.", "probability": 0.992919921875}], "temperature": 1.0}, {"id": 112, "seek": 298802, "start": 2960.84, "end": 2988.02, "text": " as we mentioned since we are talking about two-tiered tests there are three different approaches for testing one is school a critical value approach the other one confidence interval and the last one is b value approach so let's see two of these critical value approach keep in mind your test statistic", "tokens": [382, 321, 2835, 1670, 321, 366, 1417, 466, 732, 12, 25402, 292, 6921, 456, 366, 1045, 819, 11587, 337, 4997, 472, 307, 1395, 257, 4924, 2158, 3109, 264, 661, 472, 6687, 15035, 293, 264, 1036, 472, 307, 272, 2158, 3109, 370, 718, 311, 536, 732, 295, 613, 4924, 2158, 3109, 1066, 294, 1575, 428, 1500, 29588], "avg_logprob": -0.24616229325010067, "compression_ratio": 1.7616279069767442, "no_speech_prob": 0.0, "words": [{"start": 2960.84, "end": 2961.2, "word": " as", "probability": 0.2203369140625}, {"start": 2961.2, "end": 2961.36, "word": " we", "probability": 0.9033203125}, {"start": 2961.36, "end": 2961.72, "word": " mentioned", "probability": 0.74755859375}, {"start": 2961.72, "end": 2963.04, "word": " since", "probability": 0.6533203125}, {"start": 2963.04, "end": 2963.22, "word": " we", "probability": 0.943359375}, {"start": 2963.22, "end": 2963.34, "word": " are", "probability": 0.89599609375}, {"start": 2963.34, "end": 2963.66, "word": " talking", "probability": 0.83837890625}, {"start": 2963.66, "end": 2964.04, "word": " about", "probability": 0.90185546875}, {"start": 2964.04, "end": 2964.74, "word": " two", "probability": 0.67041015625}, {"start": 2964.74, "end": 2965.06, "word": "-tiered", "probability": 0.55517578125}, {"start": 2965.06, "end": 2965.34, "word": " tests", "probability": 0.556640625}, {"start": 2965.34, "end": 2965.62, "word": " there", "probability": 0.666015625}, {"start": 2965.62, "end": 2965.8, "word": " are", "probability": 0.94580078125}, {"start": 2965.8, "end": 2966.04, "word": " three", "probability": 0.904296875}, {"start": 2966.04, "end": 2966.46, "word": " different", "probability": 0.873046875}, {"start": 2966.46, "end": 2966.98, "word": " approaches", "probability": 0.77880859375}, {"start": 2966.98, "end": 2968.16, "word": " for", "probability": 0.9208984375}, {"start": 2968.16, "end": 2968.56, "word": " testing", "probability": 0.84130859375}, {"start": 2968.56, "end": 2968.9, "word": " one", "probability": 0.75537109375}, {"start": 2968.9, "end": 2969.1, "word": " is", "probability": 0.8837890625}, {"start": 2969.1, "end": 2969.4, "word": " school", "probability": 0.481201171875}, {"start": 2969.4, "end": 2970.4, "word": " a", "probability": 0.5390625}, {"start": 2970.4, "end": 2970.76, "word": " critical", "probability": 0.93505859375}, {"start": 2970.76, "end": 2971.08, "word": " value", "probability": 0.939453125}, {"start": 2971.08, "end": 2971.64, "word": " approach", "probability": 0.90673828125}, {"start": 2971.64, "end": 2973.24, "word": " the", "probability": 0.79541015625}, {"start": 2973.24, "end": 2973.5, "word": " other", "probability": 0.8955078125}, {"start": 2973.5, "end": 2973.94, "word": " one", "probability": 0.92919921875}, {"start": 2973.94, "end": 2976.02, "word": " confidence", "probability": 0.6875}, {"start": 2976.02, "end": 2976.56, "word": " interval", "probability": 0.94580078125}, {"start": 2976.56, "end": 2977.22, "word": " and", "probability": 0.91748046875}, {"start": 2977.22, "end": 2977.36, "word": " the", "probability": 0.923828125}, {"start": 2977.36, "end": 2977.58, "word": " last", "probability": 0.87744140625}, {"start": 2977.58, "end": 2977.78, "word": " one", "probability": 0.9228515625}, {"start": 2977.78, "end": 2977.92, "word": " is", "probability": 0.9052734375}, {"start": 2977.92, "end": 2978.02, "word": " b", "probability": 0.41650390625}, {"start": 2978.02, "end": 2978.26, "word": " value", "probability": 0.830078125}, {"start": 2978.26, "end": 2978.64, "word": " approach", "probability": 0.89697265625}, {"start": 2978.64, "end": 2979.32, "word": " so", "probability": 0.78515625}, {"start": 2979.32, "end": 2979.58, "word": " let's", "probability": 0.9658203125}, {"start": 2979.58, "end": 2979.82, "word": " see", "probability": 0.90966796875}, {"start": 2979.82, "end": 2980.2, "word": " two", "probability": 0.90869140625}, {"start": 2980.2, "end": 2980.38, "word": " of", "probability": 0.94140625}, {"start": 2980.38, "end": 2980.74, "word": " these", "probability": 0.849609375}, {"start": 2980.74, "end": 2981.6, "word": " critical", "probability": 0.86083984375}, {"start": 2981.6, "end": 2984.38, "word": " value", "probability": 0.9423828125}, {"start": 2984.38, "end": 2984.88, "word": " approach", "probability": 0.8828125}, {"start": 2984.88, "end": 2985.44, "word": " keep", "probability": 0.79833984375}, {"start": 2985.44, "end": 2985.58, "word": " in", "probability": 0.947265625}, {"start": 2985.58, "end": 2985.88, "word": " mind", "probability": 0.892578125}, {"start": 2985.88, "end": 2987.26, "word": " your", "probability": 0.85791015625}, {"start": 2987.26, "end": 2987.5, "word": " test", "probability": 0.62890625}, {"start": 2987.5, "end": 2988.02, "word": " statistic", "probability": 0.64111328125}], "temperature": 1.0}, {"id": 113, "seek": 301673, "start": 2989.47, "end": 3016.73, "text": " is 2.04 now since it's two tailed test so you have two rejection regions T alpha over 2 with degrees of freedom n1 plus n2 minus 2 n1 is 21 n2 is 25 so your degrees of freedom 21 plus 25 minus 2 this will give", "tokens": [307, 568, 13, 14565, 586, 1670, 309, 311, 732, 6838, 292, 1500, 370, 291, 362, 732, 26044, 10682, 314, 8961, 670, 568, 365, 5310, 295, 5645, 297, 16, 1804, 297, 17, 3175, 568, 297, 16, 307, 5080, 297, 17, 307, 3552, 370, 428, 5310, 295, 5645, 5080, 1804, 3552, 3175, 568, 341, 486, 976], "avg_logprob": -0.22713067856701938, "compression_ratio": 1.5107913669064748, "no_speech_prob": 0.0, "words": [{"start": 2989.47, "end": 2989.81, "word": " is", "probability": 0.28125}, {"start": 2989.81, "end": 2990.03, "word": " 2", "probability": 0.8994140625}, {"start": 2990.03, "end": 2990.67, "word": ".04", "probability": 0.9619140625}, {"start": 2990.67, "end": 2993.07, "word": " now", "probability": 0.261962890625}, {"start": 2993.07, "end": 2993.53, "word": " since", "probability": 0.82421875}, {"start": 2993.53, "end": 2994.09, "word": " it's", "probability": 0.928466796875}, {"start": 2994.09, "end": 2994.35, "word": " two", "probability": 0.69873046875}, {"start": 2994.35, "end": 2994.73, "word": " tailed", "probability": 0.4293212890625}, {"start": 2994.73, "end": 2995.01, "word": " test", "probability": 0.7958984375}, {"start": 2995.01, "end": 2996.63, "word": " so", "probability": 0.560546875}, {"start": 2996.63, "end": 2997.67, "word": " you", "probability": 0.9052734375}, {"start": 2997.67, "end": 2997.99, "word": " have", "probability": 0.94970703125}, {"start": 2997.99, "end": 2998.23, "word": " two", "probability": 0.9033203125}, {"start": 2998.23, "end": 2998.61, "word": " rejection", "probability": 0.97412109375}, {"start": 2998.61, "end": 2999.21, "word": " regions", "probability": 0.9638671875}, {"start": 2999.21, "end": 3000.75, "word": " T", "probability": 0.48046875}, {"start": 3000.75, "end": 3000.97, "word": " alpha", "probability": 0.6240234375}, {"start": 3000.97, "end": 3001.25, "word": " over", "probability": 0.9072265625}, {"start": 3001.25, "end": 3001.53, "word": " 2", "probability": 0.52392578125}, {"start": 3001.53, "end": 3001.89, "word": " with", "probability": 0.82177734375}, {"start": 3001.89, "end": 3002.75, "word": " degrees", "probability": 0.9521484375}, {"start": 3002.75, "end": 3002.97, "word": " of", "probability": 0.970703125}, {"start": 3002.97, "end": 3003.37, "word": " freedom", "probability": 0.93896484375}, {"start": 3003.37, "end": 3004.71, "word": " n1", "probability": 0.81591796875}, {"start": 3004.71, "end": 3005.03, "word": " plus", "probability": 0.880859375}, {"start": 3005.03, "end": 3005.37, "word": " n2", "probability": 0.968994140625}, {"start": 3005.37, "end": 3005.67, "word": " minus", "probability": 0.98095703125}, {"start": 3005.67, "end": 3006.07, "word": " 2", "probability": 0.88720703125}, {"start": 3006.07, "end": 3008.79, "word": " n1", "probability": 0.834716796875}, {"start": 3008.79, "end": 3008.93, "word": " is", "probability": 0.9287109375}, {"start": 3008.93, "end": 3009.35, "word": " 21", "probability": 0.931640625}, {"start": 3009.35, "end": 3009.91, "word": " n2", "probability": 0.742431640625}, {"start": 3009.91, "end": 3010.05, "word": " is", "probability": 0.9326171875}, {"start": 3010.05, "end": 3010.47, "word": " 25", "probability": 0.96533203125}, {"start": 3010.47, "end": 3012.05, "word": " so", "probability": 0.87451171875}, {"start": 3012.05, "end": 3012.29, "word": " your", "probability": 0.8740234375}, {"start": 3012.29, "end": 3012.57, "word": " degrees", "probability": 0.9482421875}, {"start": 3012.57, "end": 3012.77, "word": " of", "probability": 0.96923828125}, {"start": 3012.77, "end": 3013.11, "word": " freedom", "probability": 0.94287109375}, {"start": 3013.11, "end": 3014.21, "word": " 21", "probability": 0.93359375}, {"start": 3014.21, "end": 3014.69, "word": " plus", "probability": 0.95068359375}, {"start": 3014.69, "end": 3015.25, "word": " 25", "probability": 0.970703125}, {"start": 3015.25, "end": 3015.55, "word": " minus", "probability": 0.98486328125}, {"start": 3015.55, "end": 3015.85, "word": " 2", "probability": 0.93408203125}, {"start": 3015.85, "end": 3016.13, "word": " this", "probability": 0.3623046875}, {"start": 3016.13, "end": 3016.47, "word": " will", "probability": 0.8935546875}, {"start": 3016.47, "end": 3016.73, "word": " give", "probability": 0.88623046875}], "temperature": 1.0}, {"id": 114, "seek": 304622, "start": 3020.16, "end": 3046.22, "text": " 43 degrees of freedom is 43 now look at the t table 25 so it's 25 minus 2 so that will give 44 now look at the normal sorry t table for degrees of freedom 44", "tokens": [17914, 5310, 295, 5645, 307, 17914, 586, 574, 412, 264, 256, 3199, 3552, 370, 309, 311, 3552, 3175, 568, 370, 300, 486, 976, 16408, 586, 574, 412, 264, 2710, 2597, 256, 3199, 337, 5310, 295, 5645, 16408], "avg_logprob": -0.324218739020197, "compression_ratio": 1.5047619047619047, "no_speech_prob": 0.0, "words": [{"start": 3020.16, "end": 3020.96, "word": " 43", "probability": 0.326416015625}, {"start": 3020.96, "end": 3021.76, "word": " degrees", "probability": 0.56982421875}, {"start": 3021.76, "end": 3022.0, "word": " of", "probability": 0.7421875}, {"start": 3022.0, "end": 3022.26, "word": " freedom", "probability": 0.93994140625}, {"start": 3022.26, "end": 3023.62, "word": " is", "probability": 0.53955078125}, {"start": 3023.62, "end": 3024.1, "word": " 43", "probability": 0.71728515625}, {"start": 3024.1, "end": 3024.8, "word": " now", "probability": 0.4248046875}, {"start": 3024.8, "end": 3025.16, "word": " look", "probability": 0.9052734375}, {"start": 3025.16, "end": 3025.44, "word": " at", "probability": 0.9560546875}, {"start": 3025.44, "end": 3026.46, "word": " the", "probability": 0.85595703125}, {"start": 3026.46, "end": 3027.12, "word": " t", "probability": 0.41455078125}, {"start": 3027.12, "end": 3027.54, "word": " table", "probability": 0.62158203125}, {"start": 3027.54, "end": 3032.2, "word": " 25", "probability": 0.50244140625}, {"start": 3032.2, "end": 3036.06, "word": " so", "probability": 0.5849609375}, {"start": 3036.06, "end": 3036.32, "word": " it's", "probability": 0.77001953125}, {"start": 3036.32, "end": 3036.82, "word": " 25", "probability": 0.9208984375}, {"start": 3036.82, "end": 3037.18, "word": " minus", "probability": 0.84521484375}, {"start": 3037.18, "end": 3037.56, "word": " 2", "probability": 0.8271484375}, {"start": 3037.56, "end": 3037.8, "word": " so", "probability": 0.8232421875}, {"start": 3037.8, "end": 3038.02, "word": " that", "probability": 0.90576171875}, {"start": 3038.02, "end": 3038.32, "word": " will", "probability": 0.480224609375}, {"start": 3038.32, "end": 3038.62, "word": " give", "probability": 0.85498046875}, {"start": 3038.62, "end": 3039.76, "word": " 44", "probability": 0.921875}, {"start": 3039.76, "end": 3040.7, "word": " now", "probability": 0.80126953125}, {"start": 3040.7, "end": 3041.28, "word": " look", "probability": 0.9375}, {"start": 3041.28, "end": 3041.68, "word": " at", "probability": 0.8623046875}, {"start": 3041.68, "end": 3042.04, "word": " the", "probability": 0.88818359375}, {"start": 3042.04, "end": 3042.48, "word": " normal", "probability": 0.78515625}, {"start": 3042.48, "end": 3042.88, "word": " sorry", "probability": 0.383056640625}, {"start": 3042.88, "end": 3043.1, "word": " t", "probability": 0.671875}, {"start": 3043.1, "end": 3043.48, "word": " table", "probability": 0.83984375}, {"start": 3043.48, "end": 3045.02, "word": " for", "probability": 0.44677734375}, {"start": 3045.02, "end": 3045.26, "word": " degrees", "probability": 0.93310546875}, {"start": 3045.26, "end": 3045.42, "word": " of", "probability": 0.9453125}, {"start": 3045.42, "end": 3045.66, "word": " freedom", "probability": 0.9453125}, {"start": 3045.66, "end": 3046.22, "word": " 44", "probability": 0.94677734375}], "temperature": 1.0}, {"id": 115, "seek": 307410, "start": 3047.3, "end": 3074.1, "text": " under area to the right side of since we are talking about alpha 5% so the area to the left to the right 0 to 5 the area to the left is the same so we are looking in the upper tail for 0.025 and this amount is 2.0154", "tokens": [833, 1859, 281, 264, 558, 1252, 295, 1670, 321, 366, 1417, 466, 8961, 1025, 4, 370, 264, 1859, 281, 264, 1411, 281, 264, 558, 1958, 281, 1025, 264, 1859, 281, 264, 1411, 307, 264, 912, 370, 321, 366, 1237, 294, 264, 6597, 6838, 337, 1958, 13, 15, 6074, 293, 341, 2372, 307, 568, 13, 15, 5211, 19], "avg_logprob": -0.2634698183372103, "compression_ratio": 1.631578947368421, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 3047.3, "end": 3047.84, "word": " under", "probability": 0.2008056640625}, {"start": 3047.84, "end": 3048.38, "word": " area", "probability": 0.7744140625}, {"start": 3048.38, "end": 3048.62, "word": " to", "probability": 0.91943359375}, {"start": 3048.62, "end": 3048.78, "word": " the", "probability": 0.9111328125}, {"start": 3048.78, "end": 3049.0, "word": " right", "probability": 0.91796875}, {"start": 3049.0, "end": 3049.44, "word": " side", "probability": 0.83837890625}, {"start": 3049.44, "end": 3049.88, "word": " of", "probability": 0.94287109375}, {"start": 3049.88, "end": 3050.66, "word": " since", "probability": 0.323974609375}, {"start": 3050.66, "end": 3050.84, "word": " we", "probability": 0.9375}, {"start": 3050.84, "end": 3050.94, "word": " are", "probability": 0.88232421875}, {"start": 3050.94, "end": 3051.22, "word": " talking", "probability": 0.84423828125}, {"start": 3051.22, "end": 3051.56, "word": " about", "probability": 0.90283203125}, {"start": 3051.56, "end": 3051.9, "word": " alpha", "probability": 0.58447265625}, {"start": 3051.9, "end": 3052.22, "word": " 5", "probability": 0.73876953125}, {"start": 3052.22, "end": 3052.62, "word": "%", "probability": 0.451416015625}, {"start": 3052.62, "end": 3054.1, "word": " so", "probability": 0.58056640625}, {"start": 3054.1, "end": 3054.26, "word": " the", "probability": 0.81494140625}, {"start": 3054.26, "end": 3054.46, "word": " area", "probability": 0.888671875}, {"start": 3054.46, "end": 3054.64, "word": " to", "probability": 0.9423828125}, {"start": 3054.64, "end": 3054.78, "word": " the", "probability": 0.91796875}, {"start": 3054.78, "end": 3054.94, "word": " left", "probability": 0.43701171875}, {"start": 3054.94, "end": 3055.1, "word": " to", "probability": 0.67724609375}, {"start": 3055.1, "end": 3055.2, "word": " the", "probability": 0.9072265625}, {"start": 3055.2, "end": 3055.5, "word": " right", "probability": 0.9111328125}, {"start": 3055.5, "end": 3056.42, "word": " 0", "probability": 0.548828125}, {"start": 3056.42, "end": 3056.58, "word": " to", "probability": 0.4873046875}, {"start": 3056.58, "end": 3056.94, "word": " 5", "probability": 0.9775390625}, {"start": 3056.94, "end": 3057.12, "word": " the", "probability": 0.3857421875}, {"start": 3057.12, "end": 3057.3, "word": " area", "probability": 0.888671875}, {"start": 3057.3, "end": 3057.46, "word": " to", "probability": 0.93115234375}, {"start": 3057.46, "end": 3057.6, "word": " the", "probability": 0.8994140625}, {"start": 3057.6, "end": 3057.76, "word": " left", "probability": 0.93701171875}, {"start": 3057.76, "end": 3057.92, "word": " is", "probability": 0.77685546875}, {"start": 3057.92, "end": 3058.08, "word": " the", "probability": 0.849609375}, {"start": 3058.08, "end": 3058.34, "word": " same", "probability": 0.91357421875}, {"start": 3058.34, "end": 3059.44, "word": " so", "probability": 0.68994140625}, {"start": 3059.44, "end": 3059.58, "word": " we", "probability": 0.92529296875}, {"start": 3059.58, "end": 3059.66, "word": " are", "probability": 0.7919921875}, {"start": 3059.66, "end": 3060.06, "word": " looking", "probability": 0.9169921875}, {"start": 3060.06, "end": 3061.0, "word": " in", "probability": 0.7275390625}, {"start": 3061.0, "end": 3061.1, "word": " the", "probability": 0.9072265625}, {"start": 3061.1, "end": 3061.36, "word": " upper", "probability": 0.767578125}, {"start": 3061.36, "end": 3061.76, "word": " tail", "probability": 0.705078125}, {"start": 3061.76, "end": 3062.4, "word": " for", "probability": 0.93212890625}, {"start": 3062.4, "end": 3062.62, "word": " 0", "probability": 0.92724609375}, {"start": 3062.62, "end": 3062.98, "word": ".025", "probability": 0.7657063802083334}, {"start": 3062.98, "end": 3065.34, "word": " and", "probability": 0.85791015625}, {"start": 3065.34, "end": 3066.54, "word": " this", "probability": 0.4931640625}, {"start": 3066.54, "end": 3066.98, "word": " amount", "probability": 0.8818359375}, {"start": 3066.98, "end": 3067.82, "word": " is", "probability": 0.943359375}, {"start": 3067.82, "end": 3068.62, "word": " 2", "probability": 0.9013671875}, {"start": 3068.62, "end": 3074.1, "word": ".0154", "probability": 0.9564208984375}], "temperature": 1.0}, {"id": 116, "seek": 310476, "start": 3076.7, "end": 3104.76, "text": " So this point, 2.1054, the other is negative 2.104. So we reject the null hypothesis if your T statistics fall in this registration. I mean, if T statistic greater than 2.01 or smaller than negative 2.01. In this case, my statistic value is 2.04. This amount actually,", "tokens": [407, 341, 935, 11, 568, 13, 3279, 19563, 11, 264, 661, 307, 3671, 568, 13, 3279, 19, 13, 407, 321, 8248, 264, 18184, 17291, 498, 428, 314, 12523, 2100, 294, 341, 1121, 468, 2405, 13, 286, 914, 11, 498, 314, 29588, 5044, 813, 568, 13, 10607, 420, 4356, 813, 3671, 568, 13, 10607, 13, 682, 341, 1389, 11, 452, 29588, 2158, 307, 568, 13, 14565, 13, 639, 2372, 767, 11], "avg_logprob": -0.265184846562399, "compression_ratio": 1.5549132947976878, "no_speech_prob": 0.0, "words": [{"start": 3076.7, "end": 3076.92, "word": " So", "probability": 0.52734375}, {"start": 3076.92, "end": 3077.14, "word": " this", "probability": 0.7353515625}, {"start": 3077.14, "end": 3077.54, "word": " point,", "probability": 0.93017578125}, {"start": 3078.32, "end": 3078.48, "word": " 2", "probability": 0.9189453125}, {"start": 3078.48, "end": 3079.58, "word": ".1054,", "probability": 0.9659830729166666}, {"start": 3080.34, "end": 3080.86, "word": " the", "probability": 0.57958984375}, {"start": 3080.86, "end": 3081.08, "word": " other", "probability": 0.8681640625}, {"start": 3081.08, "end": 3081.3, "word": " is", "probability": 0.919921875}, {"start": 3081.3, "end": 3081.66, "word": " negative", "probability": 0.64697265625}, {"start": 3081.66, "end": 3081.98, "word": " 2", "probability": 0.970703125}, {"start": 3081.98, "end": 3082.96, "word": ".104.", "probability": 0.763916015625}, {"start": 3085.5, "end": 3086.26, "word": " So", "probability": 0.94091796875}, {"start": 3086.26, "end": 3086.46, "word": " we", "probability": 0.84912109375}, {"start": 3086.46, "end": 3086.86, "word": " reject", "probability": 0.86962890625}, {"start": 3086.86, "end": 3087.02, "word": " the", "probability": 0.400634765625}, {"start": 3087.02, "end": 3087.16, "word": " null", "probability": 0.9794921875}, {"start": 3087.16, "end": 3087.54, "word": " hypothesis", "probability": 0.92138671875}, {"start": 3087.54, "end": 3088.2, "word": " if", "probability": 0.7392578125}, {"start": 3088.2, "end": 3089.4, "word": " your", "probability": 0.68798828125}, {"start": 3089.4, "end": 3089.54, "word": " T", "probability": 0.382080078125}, {"start": 3089.54, "end": 3090.2, "word": " statistics", "probability": 0.71044921875}, {"start": 3090.2, "end": 3090.58, "word": " fall", "probability": 0.4287109375}, {"start": 3090.58, "end": 3090.82, "word": " in", "probability": 0.6796875}, {"start": 3090.82, "end": 3091.1, "word": " this", "probability": 0.9130859375}, {"start": 3091.1, "end": 3091.86, "word": " registration.", "probability": 0.6791178385416666}, {"start": 3092.0, "end": 3092.08, "word": " I", "probability": 0.92919921875}, {"start": 3092.08, "end": 3092.32, "word": " mean,", "probability": 0.96142578125}, {"start": 3092.7, "end": 3092.84, "word": " if", "probability": 0.94140625}, {"start": 3092.84, "end": 3093.0, "word": " T", "probability": 0.456298828125}, {"start": 3093.0, "end": 3093.46, "word": " statistic", "probability": 0.50537109375}, {"start": 3093.46, "end": 3094.46, "word": " greater", "probability": 0.52734375}, {"start": 3094.46, "end": 3094.76, "word": " than", "probability": 0.9384765625}, {"start": 3094.76, "end": 3094.94, "word": " 2", "probability": 0.97412109375}, {"start": 3094.94, "end": 3095.4, "word": ".01", "probability": 0.93798828125}, {"start": 3095.4, "end": 3096.14, "word": " or", "probability": 0.75}, {"start": 3096.14, "end": 3096.64, "word": " smaller", "probability": 0.85009765625}, {"start": 3096.64, "end": 3096.94, "word": " than", "probability": 0.9345703125}, {"start": 3096.94, "end": 3097.24, "word": " negative", "probability": 0.908203125}, {"start": 3097.24, "end": 3097.5, "word": " 2", "probability": 0.99169921875}, {"start": 3097.5, "end": 3098.04, "word": ".01.", "probability": 0.974853515625}, {"start": 3098.76, "end": 3099.12, "word": " In", "probability": 0.94970703125}, {"start": 3099.12, "end": 3099.38, "word": " this", "probability": 0.9453125}, {"start": 3099.38, "end": 3099.74, "word": " case,", "probability": 0.9130859375}, {"start": 3099.9, "end": 3100.1, "word": " my", "probability": 0.96728515625}, {"start": 3100.1, "end": 3100.64, "word": " statistic", "probability": 0.81884765625}, {"start": 3100.64, "end": 3101.16, "word": " value", "probability": 0.96533203125}, {"start": 3101.16, "end": 3101.62, "word": " is", "probability": 0.94287109375}, {"start": 3101.62, "end": 3101.82, "word": " 2", "probability": 0.99609375}, {"start": 3101.82, "end": 3102.34, "word": ".04.", "probability": 0.991943359375}, {"start": 3103.36, "end": 3103.74, "word": " This", "probability": 0.8779296875}, {"start": 3103.74, "end": 3104.14, "word": " amount", "probability": 0.88671875}, {"start": 3104.14, "end": 3104.76, "word": " actually,", "probability": 0.6748046875}], "temperature": 1.0}, {"id": 117, "seek": 311377, "start": 3105.99, "end": 3113.77, "text": " fourth in this rejection region, so we reject them. So since he's statistical, he's 10.", "tokens": [6409, 294, 341, 26044, 4458, 11, 370, 321, 8248, 552, 13, 407, 1670, 415, 311, 22820, 11, 415, 311, 1266, 13], "avg_logprob": -0.5170454735105688, "compression_ratio": 1.1139240506329113, "no_speech_prob": 0.0, "words": [{"start": 3105.99, "end": 3106.49, "word": " fourth", "probability": 0.26416015625}, {"start": 3106.49, "end": 3106.69, "word": " in", "probability": 0.875}, {"start": 3106.69, "end": 3106.89, "word": " this", "probability": 0.88916015625}, {"start": 3106.89, "end": 3107.23, "word": " rejection", "probability": 0.80517578125}, {"start": 3107.23, "end": 3107.73, "word": " region,", "probability": 0.89208984375}, {"start": 3108.41, "end": 3108.65, "word": " so", "probability": 0.84814453125}, {"start": 3108.65, "end": 3108.75, "word": " we", "probability": 0.6435546875}, {"start": 3108.75, "end": 3109.01, "word": " reject", "probability": 0.459228515625}, {"start": 3109.01, "end": 3109.19, "word": " them.", "probability": 0.83935546875}, {"start": 3109.83, "end": 3110.51, "word": " So", "probability": 0.60302734375}, {"start": 3110.51, "end": 3110.89, "word": " since", "probability": 0.52685546875}, {"start": 3110.89, "end": 3111.25, "word": " he's", "probability": 0.5166015625}, {"start": 3111.25, "end": 3111.61, "word": " statistical,", "probability": 0.460205078125}, {"start": 3113.23, "end": 3113.55, "word": " he's", "probability": 0.911865234375}, {"start": 3113.55, "end": 3113.77, "word": " 10.", "probability": 0.37109375}], "temperature": 1.0}, {"id": 118, "seek": 314287, "start": 3115.39, "end": 3142.87, "text": " which is equal to 2.04, is greater than 2.01, then we reject the null hypothesis. So the decision is reject zero at 5% level of confidence of significance. Your conclusion, there is evidence of a difference in means. That means the mean of your stock exchange does not equal", "tokens": [597, 307, 2681, 281, 568, 13, 14565, 11, 307, 5044, 813, 568, 13, 10607, 11, 550, 321, 8248, 264, 18184, 17291, 13, 407, 264, 3537, 307, 8248, 4018, 412, 1025, 4, 1496, 295, 6687, 295, 17687, 13, 2260, 10063, 11, 456, 307, 4467, 295, 257, 2649, 294, 1355, 13, 663, 1355, 264, 914, 295, 428, 4127, 7742, 775, 406, 2681], "avg_logprob": -0.1946721360331676, "compression_ratio": 1.510989010989011, "no_speech_prob": 0.0, "words": [{"start": 3115.39, "end": 3115.59, "word": " which", "probability": 0.3408203125}, {"start": 3115.59, "end": 3115.69, "word": " is", "probability": 0.92724609375}, {"start": 3115.69, "end": 3115.89, "word": " equal", "probability": 0.88671875}, {"start": 3115.89, "end": 3116.01, "word": " to", "probability": 0.53466796875}, {"start": 3116.01, "end": 3116.17, "word": " 2", "probability": 0.93017578125}, {"start": 3116.17, "end": 3116.71, "word": ".04,", "probability": 0.960205078125}, {"start": 3117.15, "end": 3117.59, "word": " is", "probability": 0.83837890625}, {"start": 3117.59, "end": 3117.95, "word": " greater", "probability": 0.81787109375}, {"start": 3117.95, "end": 3118.23, "word": " than", "probability": 0.93505859375}, {"start": 3118.23, "end": 3118.47, "word": " 2", "probability": 0.99609375}, {"start": 3118.47, "end": 3119.23, "word": ".01,", "probability": 0.990966796875}, {"start": 3119.83, "end": 3120.11, "word": " then", "probability": 0.84716796875}, {"start": 3120.11, "end": 3121.03, "word": " we", "probability": 0.953125}, {"start": 3121.03, "end": 3122.99, "word": " reject", "probability": 0.55810546875}, {"start": 3122.99, "end": 3123.97, "word": " the", "probability": 0.8623046875}, {"start": 3123.97, "end": 3124.15, "word": " null", "probability": 0.88134765625}, {"start": 3124.15, "end": 3124.59, "word": " hypothesis.", "probability": 0.982421875}, {"start": 3125.53, "end": 3125.89, "word": " So", "probability": 0.9384765625}, {"start": 3125.89, "end": 3126.47, "word": " the", "probability": 0.78564453125}, {"start": 3126.47, "end": 3126.93, "word": " decision", "probability": 0.86572265625}, {"start": 3126.93, "end": 3127.35, "word": " is", "probability": 0.94140625}, {"start": 3127.35, "end": 3127.89, "word": " reject", "probability": 0.67431640625}, {"start": 3127.89, "end": 3128.31, "word": " zero", "probability": 0.6025390625}, {"start": 3128.31, "end": 3128.49, "word": " at", "probability": 0.9326171875}, {"start": 3128.49, "end": 3128.75, "word": " 5", "probability": 0.71435546875}, {"start": 3128.75, "end": 3129.13, "word": "%", "probability": 0.9091796875}, {"start": 3129.13, "end": 3129.35, "word": " level", "probability": 0.873046875}, {"start": 3129.35, "end": 3129.49, "word": " of", "probability": 0.96142578125}, {"start": 3129.49, "end": 3129.89, "word": " confidence", "probability": 0.9033203125}, {"start": 3129.89, "end": 3130.21, "word": " of", "probability": 0.38916015625}, {"start": 3130.21, "end": 3130.97, "word": " significance.", "probability": 0.97509765625}, {"start": 3131.69, "end": 3131.83, "word": " Your", "probability": 0.896484375}, {"start": 3131.83, "end": 3132.43, "word": " conclusion,", "probability": 0.90283203125}, {"start": 3132.83, "end": 3132.95, "word": " there", "probability": 0.88916015625}, {"start": 3132.95, "end": 3133.15, "word": " is", "probability": 0.8544921875}, {"start": 3133.15, "end": 3133.91, "word": " evidence", "probability": 0.9501953125}, {"start": 3133.91, "end": 3134.81, "word": " of", "probability": 0.96337890625}, {"start": 3134.81, "end": 3134.93, "word": " a", "probability": 0.955078125}, {"start": 3134.93, "end": 3135.37, "word": " difference", "probability": 0.83984375}, {"start": 3135.37, "end": 3135.61, "word": " in", "probability": 0.90625}, {"start": 3135.61, "end": 3135.89, "word": " means.", "probability": 0.87255859375}, {"start": 3136.83, "end": 3137.15, "word": " That", "probability": 0.88525390625}, {"start": 3137.15, "end": 3137.47, "word": " means", "probability": 0.9326171875}, {"start": 3137.47, "end": 3137.85, "word": " the", "probability": 0.89453125}, {"start": 3137.85, "end": 3138.29, "word": " mean", "probability": 0.9375}, {"start": 3138.29, "end": 3138.51, "word": " of", "probability": 0.8740234375}, {"start": 3138.51, "end": 3139.73, "word": " your", "probability": 0.5439453125}, {"start": 3139.73, "end": 3140.69, "word": " stock", "probability": 0.7900390625}, {"start": 3140.69, "end": 3141.35, "word": " exchange", "probability": 0.8994140625}, {"start": 3141.35, "end": 3142.17, "word": " does", "probability": 0.9677734375}, {"start": 3142.17, "end": 3142.39, "word": " not", "probability": 0.94775390625}, {"start": 3142.39, "end": 3142.87, "word": " equal", "probability": 0.927734375}], "temperature": 1.0}, {"id": 119, "seek": 315873, "start": 3147.23, "end": 3158.73, "text": " So that's how can we use the critical value approach in order for testing the null hypothesis. Any question? So one more time.", "tokens": [407, 300, 311, 577, 393, 321, 764, 264, 4924, 2158, 3109, 294, 1668, 337, 4997, 264, 18184, 17291, 13, 2639, 1168, 30, 407, 472, 544, 565, 13], "avg_logprob": -0.1565290242433548, "compression_ratio": 1.1759259259259258, "no_speech_prob": 0.0, "words": [{"start": 3147.23, "end": 3147.87, "word": " So", "probability": 0.546875}, {"start": 3147.87, "end": 3148.41, "word": " that's", "probability": 0.8037109375}, {"start": 3148.41, "end": 3148.75, "word": " how", "probability": 0.82470703125}, {"start": 3148.75, "end": 3149.03, "word": " can", "probability": 0.85302734375}, {"start": 3149.03, "end": 3149.19, "word": " we", "probability": 0.94775390625}, {"start": 3149.19, "end": 3149.43, "word": " use", "probability": 0.8720703125}, {"start": 3149.43, "end": 3149.63, "word": " the", "probability": 0.9150390625}, {"start": 3149.63, "end": 3149.97, "word": " critical", "probability": 0.90234375}, {"start": 3149.97, "end": 3150.29, "word": " value", "probability": 0.9375}, {"start": 3150.29, "end": 3150.75, "word": " approach", "probability": 0.91650390625}, {"start": 3150.75, "end": 3150.93, "word": " in", "probability": 0.8427734375}, {"start": 3150.93, "end": 3151.17, "word": " order", "probability": 0.89501953125}, {"start": 3151.17, "end": 3151.47, "word": " for", "probability": 0.93310546875}, {"start": 3151.47, "end": 3151.89, "word": " testing", "probability": 0.8740234375}, {"start": 3151.89, "end": 3152.11, "word": " the", "probability": 0.86181640625}, {"start": 3152.11, "end": 3152.27, "word": " null", "probability": 0.91162109375}, {"start": 3152.27, "end": 3152.67, "word": " hypothesis.", "probability": 0.853515625}, {"start": 3154.61, "end": 3155.25, "word": " Any", "probability": 0.91455078125}, {"start": 3155.25, "end": 3155.63, "word": " question?", "probability": 0.5927734375}, {"start": 3157.71, "end": 3158.05, "word": " So", "probability": 0.87060546875}, {"start": 3158.05, "end": 3158.25, "word": " one", "probability": 0.810546875}, {"start": 3158.25, "end": 3158.41, "word": " more", "probability": 0.9375}, {"start": 3158.41, "end": 3158.73, "word": " time.", "probability": 0.89501953125}], "temperature": 1.0}, {"id": 120, "seek": 319420, "start": 3168.6, "end": 3194.2, "text": " In New York State, it's changed with this information and for NASDAQ. And we're assuming here populations are normally distributed because the sample sizes are less than 13. And we assume sigmas are known, but they are equal. So the null hypothesis is mu 1 minus 2 equals 0. Again, this is not zero.", "tokens": [682, 1873, 3609, 4533, 11, 309, 311, 3105, 365, 341, 1589, 293, 337, 10182, 7509, 48, 13, 400, 321, 434, 11926, 510, 12822, 366, 5646, 12631, 570, 264, 6889, 11602, 366, 1570, 813, 3705, 13, 400, 321, 6552, 4556, 3799, 366, 2570, 11, 457, 436, 366, 2681, 13, 407, 264, 18184, 17291, 307, 2992, 502, 3175, 568, 6915, 1958, 13, 3764, 11, 341, 307, 406, 4018, 13], "avg_logprob": -0.3226102818461025, "compression_ratio": 1.4563106796116505, "no_speech_prob": 0.0, "words": [{"start": 3168.6, "end": 3168.84, "word": " In", "probability": 0.4111328125}, {"start": 3168.84, "end": 3169.02, "word": " New", "probability": 0.5556640625}, {"start": 3169.02, "end": 3169.14, "word": " York", "probability": 0.96240234375}, {"start": 3169.14, "end": 3169.52, "word": " State,", "probability": 0.251220703125}, {"start": 3170.14, "end": 3170.98, "word": " it's", "probability": 0.800537109375}, {"start": 3170.98, "end": 3171.32, "word": " changed", "probability": 0.57275390625}, {"start": 3171.32, "end": 3171.78, "word": " with", "probability": 0.86865234375}, {"start": 3171.78, "end": 3172.88, "word": " this", "probability": 0.41796875}, {"start": 3172.88, "end": 3173.52, "word": " information", "probability": 0.83349609375}, {"start": 3173.52, "end": 3174.38, "word": " and", "probability": 0.483642578125}, {"start": 3174.38, "end": 3174.62, "word": " for", "probability": 0.86572265625}, {"start": 3174.62, "end": 3175.08, "word": " NASDAQ.", "probability": 0.82177734375}, {"start": 3176.9, "end": 3177.16, "word": " And", "probability": 0.93408203125}, {"start": 3177.16, "end": 3177.38, "word": " we're", "probability": 0.712890625}, {"start": 3177.38, "end": 3177.78, "word": " assuming", "probability": 0.8759765625}, {"start": 3177.78, "end": 3178.18, "word": " here", "probability": 0.8349609375}, {"start": 3178.18, "end": 3178.84, "word": " populations", "probability": 0.62744140625}, {"start": 3178.84, "end": 3179.96, "word": " are", "probability": 0.9072265625}, {"start": 3179.96, "end": 3181.16, "word": " normally", "probability": 0.7822265625}, {"start": 3181.16, "end": 3181.7, "word": " distributed", "probability": 0.91357421875}, {"start": 3181.7, "end": 3182.2, "word": " because", "probability": 0.705078125}, {"start": 3182.2, "end": 3183.48, "word": " the", "probability": 0.469482421875}, {"start": 3183.48, "end": 3183.74, "word": " sample", "probability": 0.90087890625}, {"start": 3183.74, "end": 3184.08, "word": " sizes", "probability": 0.56396484375}, {"start": 3184.08, "end": 3184.28, "word": " are", "probability": 0.92041015625}, {"start": 3184.28, "end": 3184.46, "word": " less", "probability": 0.791015625}, {"start": 3184.46, "end": 3184.6, "word": " than", "probability": 0.9375}, {"start": 3184.6, "end": 3184.96, "word": " 13.", "probability": 0.7080078125}, {"start": 3185.54, "end": 3185.92, "word": " And", "probability": 0.9482421875}, {"start": 3185.92, "end": 3186.06, "word": " we", "probability": 0.9580078125}, {"start": 3186.06, "end": 3186.46, "word": " assume", "probability": 0.888671875}, {"start": 3186.46, "end": 3187.0, "word": " sigmas", "probability": 0.61767578125}, {"start": 3187.0, "end": 3187.18, "word": " are", "probability": 0.828125}, {"start": 3187.18, "end": 3187.38, "word": " known,", "probability": 0.603515625}, {"start": 3187.46, "end": 3187.64, "word": " but", "probability": 0.9208984375}, {"start": 3187.64, "end": 3187.82, "word": " they", "probability": 0.88427734375}, {"start": 3187.82, "end": 3187.98, "word": " are", "probability": 0.93310546875}, {"start": 3187.98, "end": 3188.28, "word": " equal.", "probability": 0.9140625}, {"start": 3190.44, "end": 3190.84, "word": " So", "probability": 0.95556640625}, {"start": 3190.84, "end": 3190.98, "word": " the", "probability": 0.6640625}, {"start": 3190.98, "end": 3191.12, "word": " null", "probability": 0.970703125}, {"start": 3191.12, "end": 3191.56, "word": " hypothesis", "probability": 0.8681640625}, {"start": 3191.56, "end": 3191.78, "word": " is", "probability": 0.336181640625}, {"start": 3191.78, "end": 3191.9, "word": " mu", "probability": 0.52685546875}, {"start": 3191.9, "end": 3192.06, "word": " 1", "probability": 0.414794921875}, {"start": 3192.06, "end": 3192.32, "word": " minus", "probability": 0.9462890625}, {"start": 3192.32, "end": 3192.6, "word": " 2", "probability": 0.8330078125}, {"start": 3192.6, "end": 3192.86, "word": " equals", "probability": 0.9248046875}, {"start": 3192.86, "end": 3193.12, "word": " 0.", "probability": 0.70849609375}, {"start": 3193.2, "end": 3193.38, "word": " Again,", "probability": 0.8642578125}, {"start": 3193.42, "end": 3193.54, "word": " this", "probability": 0.62646484375}, {"start": 3193.54, "end": 3193.7, "word": " is", "probability": 0.91015625}, {"start": 3193.7, "end": 3193.88, "word": " not", "probability": 0.9404296875}, {"start": 3193.88, "end": 3194.2, "word": " zero.", "probability": 0.473388671875}], "temperature": 1.0}, {"id": 121, "seek": 322423, "start": 3195.95, "end": 3224.23, "text": " We have to compute the bold variance first, then we can compute the value of the test statistic. The value is 2.04. As we mentioned, there are two, three approaches for doing this test. One is called critical value approach. Now, critical value is plus or minus T alpha over 2 with degrees of freedom 44. By the T table, we got this result. So the critical regions are", "tokens": [492, 362, 281, 14722, 264, 11928, 21977, 700, 11, 550, 321, 393, 14722, 264, 2158, 295, 264, 1500, 29588, 13, 440, 2158, 307, 568, 13, 14565, 13, 1018, 321, 2835, 11, 456, 366, 732, 11, 1045, 11587, 337, 884, 341, 1500, 13, 1485, 307, 1219, 4924, 2158, 3109, 13, 823, 11, 4924, 2158, 307, 1804, 420, 3175, 314, 8961, 670, 568, 365, 5310, 295, 5645, 16408, 13, 3146, 264, 314, 3199, 11, 321, 658, 341, 1874, 13, 407, 264, 4924, 10682, 366], "avg_logprob": -0.22082078600504312, "compression_ratio": 1.6327433628318584, "no_speech_prob": 0.0, "words": [{"start": 3195.95, "end": 3196.17, "word": " We", "probability": 0.51416015625}, {"start": 3196.17, "end": 3196.35, "word": " have", "probability": 0.857421875}, {"start": 3196.35, "end": 3196.49, "word": " to", "probability": 0.9609375}, {"start": 3196.49, "end": 3196.93, "word": " compute", "probability": 0.9189453125}, {"start": 3196.93, "end": 3197.41, "word": " the", "probability": 0.8759765625}, {"start": 3197.41, "end": 3197.67, "word": " bold", "probability": 0.254150390625}, {"start": 3197.67, "end": 3198.15, "word": " variance", "probability": 0.79150390625}, {"start": 3198.15, "end": 3198.63, "word": " first,", "probability": 0.8251953125}, {"start": 3198.89, "end": 3198.97, "word": " then", "probability": 0.82763671875}, {"start": 3198.97, "end": 3199.13, "word": " we", "probability": 0.93017578125}, {"start": 3199.13, "end": 3199.31, "word": " can", "probability": 0.87646484375}, {"start": 3199.31, "end": 3199.63, "word": " compute", "probability": 0.93505859375}, {"start": 3199.63, "end": 3199.81, "word": " the", "probability": 0.845703125}, {"start": 3199.81, "end": 3200.09, "word": " value", "probability": 0.96630859375}, {"start": 3200.09, "end": 3200.25, "word": " of", "probability": 0.95849609375}, {"start": 3200.25, "end": 3200.41, "word": " the", "probability": 0.87744140625}, {"start": 3200.41, "end": 3200.73, "word": " test", "probability": 0.75341796875}, {"start": 3200.73, "end": 3201.95, "word": " statistic.", "probability": 0.728515625}, {"start": 3202.51, "end": 3202.63, "word": " The", "probability": 0.8193359375}, {"start": 3202.63, "end": 3202.83, "word": " value", "probability": 0.94384765625}, {"start": 3202.83, "end": 3203.01, "word": " is", "probability": 0.92626953125}, {"start": 3203.01, "end": 3203.15, "word": " 2", "probability": 0.826171875}, {"start": 3203.15, "end": 3203.53, "word": ".04.", "probability": 0.935791015625}, {"start": 3204.45, "end": 3205.01, "word": " As", "probability": 0.955078125}, {"start": 3205.01, "end": 3205.13, "word": " we", "probability": 0.93896484375}, {"start": 3205.13, "end": 3205.41, "word": " mentioned,", "probability": 0.81201171875}, {"start": 3205.51, "end": 3205.57, "word": " there", "probability": 0.89892578125}, {"start": 3205.57, "end": 3205.75, "word": " are", "probability": 0.9384765625}, {"start": 3205.75, "end": 3206.01, "word": " two,", "probability": 0.68408203125}, {"start": 3206.31, "end": 3206.51, "word": " three", "probability": 0.92822265625}, {"start": 3206.51, "end": 3207.39, "word": " approaches", "probability": 0.76123046875}, {"start": 3207.39, "end": 3207.77, "word": " for", "probability": 0.927734375}, {"start": 3207.77, "end": 3208.07, "word": " doing", "probability": 0.95947265625}, {"start": 3208.07, "end": 3208.33, "word": " this", "probability": 0.93359375}, {"start": 3208.33, "end": 3208.65, "word": " test.", "probability": 0.87353515625}, {"start": 3209.17, "end": 3209.35, "word": " One", "probability": 0.921875}, {"start": 3209.35, "end": 3209.49, "word": " is", "probability": 0.93701171875}, {"start": 3209.49, "end": 3209.75, "word": " called", "probability": 0.86083984375}, {"start": 3209.75, "end": 3210.17, "word": " critical", "probability": 0.4013671875}, {"start": 3210.17, "end": 3210.53, "word": " value", "probability": 0.91943359375}, {"start": 3210.53, "end": 3211.05, "word": " approach.", "probability": 0.91259765625}, {"start": 3212.07, "end": 3212.39, "word": " Now,", "probability": 0.95947265625}, {"start": 3212.75, "end": 3213.31, "word": " critical", "probability": 0.65673828125}, {"start": 3213.31, "end": 3213.79, "word": " value", "probability": 0.429931640625}, {"start": 3213.79, "end": 3214.07, "word": " is", "probability": 0.81005859375}, {"start": 3214.07, "end": 3214.51, "word": " plus", "probability": 0.9296875}, {"start": 3214.51, "end": 3214.75, "word": " or", "probability": 0.96044921875}, {"start": 3214.75, "end": 3215.23, "word": " minus", "probability": 0.9853515625}, {"start": 3215.23, "end": 3216.71, "word": " T", "probability": 0.783203125}, {"start": 3216.71, "end": 3216.95, "word": " alpha", "probability": 0.70458984375}, {"start": 3216.95, "end": 3217.21, "word": " over", "probability": 0.9228515625}, {"start": 3217.21, "end": 3217.53, "word": " 2", "probability": 0.5380859375}, {"start": 3217.53, "end": 3217.93, "word": " with", "probability": 0.767578125}, {"start": 3217.93, "end": 3218.21, "word": " degrees", "probability": 0.939453125}, {"start": 3218.21, "end": 3218.37, "word": " of", "probability": 0.96337890625}, {"start": 3218.37, "end": 3218.55, "word": " freedom", "probability": 0.93896484375}, {"start": 3218.55, "end": 3218.89, "word": " 44.", "probability": 0.30810546875}, {"start": 3219.77, "end": 3220.33, "word": " By", "probability": 0.98046875}, {"start": 3220.33, "end": 3220.53, "word": " the", "probability": 0.921875}, {"start": 3220.53, "end": 3220.67, "word": " T", "probability": 0.89013671875}, {"start": 3220.67, "end": 3220.93, "word": " table,", "probability": 0.65185546875}, {"start": 3220.95, "end": 3221.11, "word": " we", "probability": 0.88525390625}, {"start": 3221.11, "end": 3221.27, "word": " got", "probability": 0.7509765625}, {"start": 3221.27, "end": 3221.49, "word": " this", "probability": 0.93701171875}, {"start": 3221.49, "end": 3221.83, "word": " result.", "probability": 0.9345703125}, {"start": 3222.49, "end": 3222.85, "word": " So", "probability": 0.95556640625}, {"start": 3222.85, "end": 3223.09, "word": " the", "probability": 0.52197265625}, {"start": 3223.09, "end": 3223.45, "word": " critical", "probability": 0.94677734375}, {"start": 3223.45, "end": 3223.93, "word": " regions", "probability": 0.94580078125}, {"start": 3223.93, "end": 3224.23, "word": " are", "probability": 0.94091796875}], "temperature": 1.0}, {"id": 122, "seek": 325100, "start": 3226.14, "end": 3251.0, "text": " above 2.01 or below minus 2.01. Now your statistic falls in this rejection region. So we have to reject the null hypothesis. So my conclusion is there is sufficient evidence to support the alternative hypothesis. The other approach, confidence interval for mu1 minus mu2.", "tokens": [3673, 568, 13, 10607, 420, 2507, 3175, 568, 13, 10607, 13, 823, 428, 29588, 8804, 294, 341, 26044, 4458, 13, 407, 321, 362, 281, 8248, 264, 18184, 17291, 13, 407, 452, 10063, 307, 456, 307, 11563, 4467, 281, 1406, 264, 8535, 17291, 13, 440, 661, 3109, 11, 6687, 15035, 337, 2992, 16, 3175, 2992, 17, 13], "avg_logprob": -0.16762609021705493, "compression_ratio": 1.5280898876404494, "no_speech_prob": 0.0, "words": [{"start": 3226.14, "end": 3226.62, "word": " above", "probability": 0.53515625}, {"start": 3226.62, "end": 3226.88, "word": " 2", "probability": 0.9296875}, {"start": 3226.88, "end": 3227.32, "word": ".01", "probability": 0.980224609375}, {"start": 3227.32, "end": 3227.7, "word": " or", "probability": 0.89306640625}, {"start": 3227.7, "end": 3228.0, "word": " below", "probability": 0.908203125}, {"start": 3228.0, "end": 3228.34, "word": " minus", "probability": 0.7919921875}, {"start": 3228.34, "end": 3228.52, "word": " 2", "probability": 0.99365234375}, {"start": 3228.52, "end": 3228.98, "word": ".01.", "probability": 0.98046875}, {"start": 3229.7, "end": 3229.94, "word": " Now", "probability": 0.92041015625}, {"start": 3229.94, "end": 3230.2, "word": " your", "probability": 0.625}, {"start": 3230.2, "end": 3230.72, "word": " statistic", "probability": 0.82373046875}, {"start": 3230.72, "end": 3232.0, "word": " falls", "probability": 0.818359375}, {"start": 3232.0, "end": 3232.5, "word": " in", "probability": 0.93505859375}, {"start": 3232.5, "end": 3232.74, "word": " this", "probability": 0.94140625}, {"start": 3232.74, "end": 3233.16, "word": " rejection", "probability": 0.90966796875}, {"start": 3233.16, "end": 3233.52, "word": " region.", "probability": 0.70947265625}, {"start": 3233.96, "end": 3234.14, "word": " So", "probability": 0.94580078125}, {"start": 3234.14, "end": 3234.26, "word": " we", "probability": 0.62890625}, {"start": 3234.26, "end": 3234.4, "word": " have", "probability": 0.94580078125}, {"start": 3234.4, "end": 3234.52, "word": " to", "probability": 0.9658203125}, {"start": 3234.52, "end": 3234.8, "word": " reject", "probability": 0.91552734375}, {"start": 3234.8, "end": 3234.96, "word": " the", "probability": 0.7451171875}, {"start": 3234.96, "end": 3235.08, "word": " null", "probability": 0.9716796875}, {"start": 3235.08, "end": 3235.56, "word": " hypothesis.", "probability": 0.806640625}, {"start": 3236.16, "end": 3236.28, "word": " So", "probability": 0.94287109375}, {"start": 3236.28, "end": 3236.5, "word": " my", "probability": 0.93212890625}, {"start": 3236.5, "end": 3236.92, "word": " conclusion", "probability": 0.9287109375}, {"start": 3236.92, "end": 3237.42, "word": " is", "probability": 0.94482421875}, {"start": 3237.42, "end": 3237.68, "word": " there", "probability": 0.7939453125}, {"start": 3237.68, "end": 3237.86, "word": " is", "probability": 0.943359375}, {"start": 3237.86, "end": 3238.34, "word": " sufficient", "probability": 0.8974609375}, {"start": 3238.34, "end": 3238.86, "word": " evidence", "probability": 0.9560546875}, {"start": 3238.86, "end": 3239.6, "word": " to", "probability": 0.9462890625}, {"start": 3239.6, "end": 3240.04, "word": " support", "probability": 0.98193359375}, {"start": 3240.04, "end": 3240.38, "word": " the", "probability": 0.8740234375}, {"start": 3240.38, "end": 3240.92, "word": " alternative", "probability": 0.92724609375}, {"start": 3240.92, "end": 3241.64, "word": " hypothesis.", "probability": 0.83447265625}, {"start": 3245.1, "end": 3245.88, "word": " The", "probability": 0.77001953125}, {"start": 3245.88, "end": 3246.14, "word": " other", "probability": 0.89453125}, {"start": 3246.14, "end": 3246.68, "word": " approach,", "probability": 0.9189453125}, {"start": 3248.14, "end": 3248.52, "word": " confidence", "probability": 0.95361328125}, {"start": 3248.52, "end": 3249.12, "word": " interval", "probability": 0.9658203125}, {"start": 3249.12, "end": 3249.94, "word": " for", "probability": 0.55908203125}, {"start": 3249.94, "end": 3250.28, "word": " mu1", "probability": 0.482421875}, {"start": 3250.28, "end": 3250.6, "word": " minus", "probability": 0.9716796875}, {"start": 3250.6, "end": 3251.0, "word": " mu2.", "probability": 0.86328125}], "temperature": 1.0}, {"id": 123, "seek": 327361, "start": 3252.05, "end": 3273.61, "text": " Again, the formula is, he asks about, since we reject the null hypothesis, so this hypothesis is false, I mean the difference is not zero. Can we be 95% confident that the mean of New York Stock Exchange is greater than or less than? Let's see.", "tokens": [3764, 11, 264, 8513, 307, 11, 415, 8962, 466, 11, 1670, 321, 8248, 264, 18184, 17291, 11, 370, 341, 17291, 307, 7908, 11, 286, 914, 264, 2649, 307, 406, 4018, 13, 1664, 321, 312, 13420, 4, 6679, 300, 264, 914, 295, 1873, 3609, 17857, 31169, 307, 5044, 813, 420, 1570, 813, 30, 961, 311, 536, 13], "avg_logprob": -0.24986292604814497, "compression_ratio": 1.4, "no_speech_prob": 0.0, "words": [{"start": 3252.05, "end": 3252.41, "word": " Again,", "probability": 0.7099609375}, {"start": 3252.61, "end": 3252.65, "word": " the", "probability": 0.8828125}, {"start": 3252.65, "end": 3252.99, "word": " formula", "probability": 0.88916015625}, {"start": 3252.99, "end": 3253.33, "word": " is,", "probability": 0.92919921875}, {"start": 3253.45, "end": 3253.63, "word": " he", "probability": 0.296630859375}, {"start": 3253.63, "end": 3253.87, "word": " asks", "probability": 0.4365234375}, {"start": 3253.87, "end": 3254.19, "word": " about,", "probability": 0.89453125}, {"start": 3254.57, "end": 3254.87, "word": " since", "probability": 0.65673828125}, {"start": 3254.87, "end": 3255.05, "word": " we", "probability": 0.95361328125}, {"start": 3255.05, "end": 3255.49, "word": " reject", "probability": 0.7705078125}, {"start": 3255.49, "end": 3255.67, "word": " the", "probability": 0.79248046875}, {"start": 3255.67, "end": 3255.81, "word": " null", "probability": 0.95166015625}, {"start": 3255.81, "end": 3256.31, "word": " hypothesis,", "probability": 0.787109375}, {"start": 3258.01, "end": 3258.23, "word": " so", "probability": 0.7353515625}, {"start": 3258.23, "end": 3258.45, "word": " this", "probability": 0.91552734375}, {"start": 3258.45, "end": 3258.89, "word": " hypothesis", "probability": 0.83935546875}, {"start": 3258.89, "end": 3259.23, "word": " is", "probability": 0.9375}, {"start": 3259.23, "end": 3259.61, "word": " false,", "probability": 0.890625}, {"start": 3260.19, "end": 3260.37, "word": " I", "probability": 0.87451171875}, {"start": 3260.37, "end": 3260.47, "word": " mean", "probability": 0.97119140625}, {"start": 3260.47, "end": 3260.61, "word": " the", "probability": 0.61572265625}, {"start": 3260.61, "end": 3260.93, "word": " difference", "probability": 0.865234375}, {"start": 3260.93, "end": 3261.13, "word": " is", "probability": 0.951171875}, {"start": 3261.13, "end": 3261.31, "word": " not", "probability": 0.94482421875}, {"start": 3261.31, "end": 3261.63, "word": " zero.", "probability": 0.8125}, {"start": 3263.17, "end": 3263.61, "word": " Can", "probability": 0.96240234375}, {"start": 3263.61, "end": 3263.83, "word": " we", "probability": 0.95556640625}, {"start": 3263.83, "end": 3264.01, "word": " be", "probability": 0.95068359375}, {"start": 3264.01, "end": 3264.75, "word": " 95", "probability": 0.90283203125}, {"start": 3264.75, "end": 3265.47, "word": "%", "probability": 0.75439453125}, {"start": 3265.47, "end": 3265.99, "word": " confident", "probability": 0.9658203125}, {"start": 3265.99, "end": 3266.47, "word": " that", "probability": 0.93798828125}, {"start": 3266.47, "end": 3266.83, "word": " the", "probability": 0.89208984375}, {"start": 3266.83, "end": 3267.01, "word": " mean", "probability": 0.8818359375}, {"start": 3267.01, "end": 3267.35, "word": " of", "probability": 0.95849609375}, {"start": 3267.35, "end": 3267.63, "word": " New", "probability": 0.66748046875}, {"start": 3267.63, "end": 3267.85, "word": " York", "probability": 0.95458984375}, {"start": 3267.85, "end": 3268.33, "word": " Stock", "probability": 0.8251953125}, {"start": 3268.33, "end": 3270.41, "word": " Exchange", "probability": 0.79443359375}, {"start": 3270.41, "end": 3271.01, "word": " is", "probability": 0.9208984375}, {"start": 3271.01, "end": 3271.33, "word": " greater", "probability": 0.90869140625}, {"start": 3271.33, "end": 3271.61, "word": " than", "probability": 0.94189453125}, {"start": 3271.61, "end": 3271.85, "word": " or", "probability": 0.1405029296875}, {"start": 3271.85, "end": 3271.85, "word": " less", "probability": 0.85107421875}, {"start": 3271.85, "end": 3272.05, "word": " than?", "probability": 0.94775390625}, {"start": 3272.81, "end": 3273.45, "word": " Let's", "probability": 0.92919921875}, {"start": 3273.45, "end": 3273.61, "word": " see.", "probability": 0.76318359375}], "temperature": 1.0}, {"id": 124, "seek": 329727, "start": 3274.79, "end": 3297.27, "text": " Let's formulate or let's construct a confidence interval for mu1 minus mu2. This is your formula. So x1 bar minus x2 bar, if you go back a little bit to these two values, x1 bar is 3.27 minus x2 bar is 2.53. The difference is", "tokens": [961, 311, 47881, 420, 718, 311, 7690, 257, 6687, 15035, 337, 2992, 16, 3175, 2992, 17, 13, 639, 307, 428, 8513, 13, 407, 2031, 16, 2159, 3175, 2031, 17, 2159, 11, 498, 291, 352, 646, 257, 707, 857, 281, 613, 732, 4190, 11, 2031, 16, 2159, 307, 805, 13, 10076, 3175, 2031, 17, 2159, 307, 568, 13, 19584, 13, 440, 2649, 307], "avg_logprob": -0.21775793414267283, "compression_ratio": 1.4580645161290322, "no_speech_prob": 0.0, "words": [{"start": 3274.79, "end": 3275.19, "word": " Let's", "probability": 0.645751953125}, {"start": 3275.19, "end": 3275.55, "word": " formulate", "probability": 0.666015625}, {"start": 3275.55, "end": 3276.01, "word": " or", "probability": 0.6357421875}, {"start": 3276.01, "end": 3276.39, "word": " let's", "probability": 0.86865234375}, {"start": 3276.39, "end": 3277.29, "word": " construct", "probability": 0.9521484375}, {"start": 3277.29, "end": 3277.53, "word": " a", "probability": 0.7998046875}, {"start": 3277.53, "end": 3277.91, "word": " confidence", "probability": 0.94384765625}, {"start": 3277.91, "end": 3278.45, "word": " interval", "probability": 0.97021484375}, {"start": 3278.45, "end": 3279.21, "word": " for", "probability": 0.86572265625}, {"start": 3279.21, "end": 3279.57, "word": " mu1", "probability": 0.49951171875}, {"start": 3279.57, "end": 3279.85, "word": " minus", "probability": 0.783203125}, {"start": 3279.85, "end": 3280.49, "word": " mu2.", "probability": 0.90673828125}, {"start": 3280.59, "end": 3280.83, "word": " This", "probability": 0.5986328125}, {"start": 3280.83, "end": 3280.91, "word": " is", "probability": 0.939453125}, {"start": 3280.91, "end": 3281.13, "word": " your", "probability": 0.6767578125}, {"start": 3281.13, "end": 3281.55, "word": " formula.", "probability": 0.890625}, {"start": 3282.57, "end": 3282.73, "word": " So", "probability": 0.72216796875}, {"start": 3282.73, "end": 3283.17, "word": " x1", "probability": 0.704345703125}, {"start": 3283.17, "end": 3283.37, "word": " bar", "probability": 0.80322265625}, {"start": 3283.37, "end": 3283.95, "word": " minus", "probability": 0.96728515625}, {"start": 3283.95, "end": 3284.41, "word": " x2", "probability": 0.9921875}, {"start": 3284.41, "end": 3284.73, "word": " bar,", "probability": 0.9384765625}, {"start": 3285.29, "end": 3285.45, "word": " if", "probability": 0.91015625}, {"start": 3285.45, "end": 3285.53, "word": " you", "probability": 0.7119140625}, {"start": 3285.53, "end": 3285.79, "word": " go", "probability": 0.9443359375}, {"start": 3285.79, "end": 3286.01, "word": " back", "probability": 0.884765625}, {"start": 3286.01, "end": 3286.13, "word": " a", "probability": 0.62646484375}, {"start": 3286.13, "end": 3286.27, "word": " little", "probability": 0.86328125}, {"start": 3286.27, "end": 3286.59, "word": " bit", "probability": 0.9443359375}, {"start": 3286.59, "end": 3287.05, "word": " to", "probability": 0.861328125}, {"start": 3287.05, "end": 3287.27, "word": " these", "probability": 0.84912109375}, {"start": 3287.27, "end": 3287.45, "word": " two", "probability": 0.87109375}, {"start": 3287.45, "end": 3287.89, "word": " values,", "probability": 0.9609375}, {"start": 3288.75, "end": 3289.25, "word": " x1", "probability": 0.976318359375}, {"start": 3289.25, "end": 3289.45, "word": " bar", "probability": 0.93359375}, {"start": 3289.45, "end": 3289.59, "word": " is", "probability": 0.91357421875}, {"start": 3289.59, "end": 3289.77, "word": " 3", "probability": 0.90283203125}, {"start": 3289.77, "end": 3290.39, "word": ".27", "probability": 0.772705078125}, {"start": 3290.39, "end": 3291.91, "word": " minus", "probability": 0.9501953125}, {"start": 3291.91, "end": 3293.79, "word": " x2", "probability": 0.867919921875}, {"start": 3293.79, "end": 3294.15, "word": " bar", "probability": 0.93701171875}, {"start": 3294.15, "end": 3294.97, "word": " is", "probability": 0.450439453125}, {"start": 3294.97, "end": 3295.51, "word": " 2", "probability": 0.9931640625}, {"start": 3295.51, "end": 3296.11, "word": ".53.", "probability": 0.989501953125}, {"start": 3296.27, "end": 3296.39, "word": " The", "probability": 0.87353515625}, {"start": 3296.39, "end": 3296.81, "word": " difference", "probability": 0.87353515625}, {"start": 3296.81, "end": 3297.27, "word": " is", "probability": 0.9404296875}], "temperature": 1.0}, {"id": 125, "seek": 332556, "start": 3298.66, "end": 3325.56, "text": " this amount, 0.74, plus or minus T alpha over 2, the critical value we have here, so plus or minus this amount, times the standard error of this estimate, you easily can compute this value by 0.3628, and you will end with this interval. Now, this interval means that", "tokens": [341, 2372, 11, 1958, 13, 34026, 11, 1804, 420, 3175, 314, 8961, 670, 568, 11, 264, 4924, 2158, 321, 362, 510, 11, 370, 1804, 420, 3175, 341, 2372, 11, 1413, 264, 3832, 6713, 295, 341, 12539, 11, 291, 3612, 393, 14722, 341, 2158, 538, 1958, 13, 11309, 11205, 11, 293, 291, 486, 917, 365, 341, 15035, 13, 823, 11, 341, 15035, 1355, 300], "avg_logprob": -0.18334961403161287, "compression_ratio": 1.5892857142857142, "no_speech_prob": 0.0, "words": [{"start": 3298.66, "end": 3299.0, "word": " this", "probability": 0.3720703125}, {"start": 3299.0, "end": 3299.28, "word": " amount,", "probability": 0.8759765625}, {"start": 3299.56, "end": 3299.64, "word": " 0", "probability": 0.56640625}, {"start": 3299.64, "end": 3300.6, "word": ".74,", "probability": 0.9736328125}, {"start": 3300.92, "end": 3301.98, "word": " plus", "probability": 0.91796875}, {"start": 3301.98, "end": 3302.22, "word": " or", "probability": 0.955078125}, {"start": 3302.22, "end": 3302.6, "word": " minus", "probability": 0.9853515625}, {"start": 3302.6, "end": 3302.92, "word": " T", "probability": 0.72265625}, {"start": 3302.92, "end": 3303.14, "word": " alpha", "probability": 0.451904296875}, {"start": 3303.14, "end": 3303.44, "word": " over", "probability": 0.921875}, {"start": 3303.44, "end": 3303.74, "word": " 2,", "probability": 0.646484375}, {"start": 3303.92, "end": 3304.06, "word": " the", "probability": 0.89990234375}, {"start": 3304.06, "end": 3304.42, "word": " critical", "probability": 0.9375}, {"start": 3304.42, "end": 3304.78, "word": " value", "probability": 0.97412109375}, {"start": 3304.78, "end": 3304.96, "word": " we", "probability": 0.865234375}, {"start": 3304.96, "end": 3305.28, "word": " have", "probability": 0.95068359375}, {"start": 3305.28, "end": 3306.06, "word": " here,", "probability": 0.833984375}, {"start": 3306.46, "end": 3306.6, "word": " so", "probability": 0.72900390625}, {"start": 3306.6, "end": 3306.84, "word": " plus", "probability": 0.90234375}, {"start": 3306.84, "end": 3307.06, "word": " or", "probability": 0.955078125}, {"start": 3307.06, "end": 3307.3, "word": " minus", "probability": 0.9853515625}, {"start": 3307.3, "end": 3307.58, "word": " this", "probability": 0.9404296875}, {"start": 3307.58, "end": 3307.94, "word": " amount,", "probability": 0.90185546875}, {"start": 3308.46, "end": 3308.86, "word": " times", "probability": 0.923828125}, {"start": 3308.86, "end": 3310.1, "word": " the", "probability": 0.90380859375}, {"start": 3310.1, "end": 3310.56, "word": " standard", "probability": 0.92919921875}, {"start": 3310.56, "end": 3310.98, "word": " error", "probability": 0.89111328125}, {"start": 3310.98, "end": 3312.0, "word": " of", "probability": 0.955078125}, {"start": 3312.0, "end": 3312.34, "word": " this", "probability": 0.94677734375}, {"start": 3312.34, "end": 3312.94, "word": " estimate,", "probability": 0.89208984375}, {"start": 3314.24, "end": 3314.58, "word": " you", "probability": 0.9521484375}, {"start": 3314.58, "end": 3315.02, "word": " easily", "probability": 0.85693359375}, {"start": 3315.02, "end": 3315.28, "word": " can", "probability": 0.93798828125}, {"start": 3315.28, "end": 3315.66, "word": " compute", "probability": 0.89892578125}, {"start": 3315.66, "end": 3316.1, "word": " this", "probability": 0.94384765625}, {"start": 3316.1, "end": 3317.28, "word": " value", "probability": 0.9677734375}, {"start": 3317.28, "end": 3317.68, "word": " by", "probability": 0.95947265625}, {"start": 3317.68, "end": 3318.34, "word": " 0", "probability": 0.96240234375}, {"start": 3318.34, "end": 3319.28, "word": ".3628,", "probability": 0.97998046875}, {"start": 3320.08, "end": 3320.42, "word": " and", "probability": 0.93994140625}, {"start": 3320.42, "end": 3320.56, "word": " you", "probability": 0.86474609375}, {"start": 3320.56, "end": 3320.7, "word": " will", "probability": 0.865234375}, {"start": 3320.7, "end": 3320.9, "word": " end", "probability": 0.91357421875}, {"start": 3320.9, "end": 3321.06, "word": " with", "probability": 0.83935546875}, {"start": 3321.06, "end": 3321.32, "word": " this", "probability": 0.93896484375}, {"start": 3321.32, "end": 3321.7, "word": " interval.", "probability": 0.7861328125}, {"start": 3322.72, "end": 3323.06, "word": " Now,", "probability": 0.9375}, {"start": 3323.26, "end": 3324.36, "word": " this", "probability": 0.94873046875}, {"start": 3324.36, "end": 3324.72, "word": " interval", "probability": 0.9677734375}, {"start": 3324.72, "end": 3325.14, "word": " means", "probability": 0.92333984375}, {"start": 3325.14, "end": 3325.56, "word": " that", "probability": 0.93701171875}], "temperature": 1.0}, {"id": 126, "seek": 335548, "start": 3326.84, "end": 3355.48, "text": " We are 95% confident that the difference between the two populations means fall between these two values. Now the question is, since we are testing mu1 minus mu2 equals zero, does this interval contain zero or not? So the question is, does your interval contain zero?", "tokens": [492, 366, 13420, 4, 6679, 300, 264, 2649, 1296, 264, 732, 12822, 1355, 2100, 1296, 613, 732, 4190, 13, 823, 264, 1168, 307, 11, 1670, 321, 366, 4997, 2992, 16, 3175, 2992, 17, 6915, 4018, 11, 775, 341, 15035, 5304, 4018, 420, 406, 30, 407, 264, 1168, 307, 11, 775, 428, 15035, 5304, 4018, 30], "avg_logprob": -0.2547433094254562, "compression_ratio": 1.6144578313253013, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 3326.84, "end": 3327.22, "word": " We", "probability": 0.2152099609375}, {"start": 3327.22, "end": 3328.16, "word": " are", "probability": 0.91650390625}, {"start": 3328.16, "end": 3328.7, "word": " 95", "probability": 0.904296875}, {"start": 3328.7, "end": 3329.38, "word": "%", "probability": 0.83349609375}, {"start": 3329.38, "end": 3329.82, "word": " confident", "probability": 0.8896484375}, {"start": 3329.82, "end": 3330.2, "word": " that", "probability": 0.8115234375}, {"start": 3330.2, "end": 3330.86, "word": " the", "probability": 0.79345703125}, {"start": 3330.86, "end": 3331.38, "word": " difference", "probability": 0.79052734375}, {"start": 3331.38, "end": 3331.9, "word": " between", "probability": 0.87158203125}, {"start": 3331.9, "end": 3332.24, "word": " the", "probability": 0.82080078125}, {"start": 3332.24, "end": 3332.4, "word": " two", "probability": 0.8701171875}, {"start": 3332.4, "end": 3332.94, "word": " populations", "probability": 0.68310546875}, {"start": 3332.94, "end": 3333.42, "word": " means", "probability": 0.59765625}, {"start": 3333.42, "end": 3334.14, "word": " fall", "probability": 0.1485595703125}, {"start": 3334.14, "end": 3334.54, "word": " between", "probability": 0.84130859375}, {"start": 3334.54, "end": 3335.48, "word": " these", "probability": 0.875}, {"start": 3335.48, "end": 3335.7, "word": " two", "probability": 0.90576171875}, {"start": 3335.7, "end": 3336.08, "word": " values.", "probability": 0.75048828125}, {"start": 3336.82, "end": 3337.1, "word": " Now", "probability": 0.873046875}, {"start": 3337.1, "end": 3337.26, "word": " the", "probability": 0.724609375}, {"start": 3337.26, "end": 3337.58, "word": " question", "probability": 0.91162109375}, {"start": 3337.58, "end": 3337.96, "word": " is,", "probability": 0.9482421875}, {"start": 3338.96, "end": 3339.5, "word": " since", "probability": 0.8330078125}, {"start": 3339.5, "end": 3339.66, "word": " we", "probability": 0.94921875}, {"start": 3339.66, "end": 3339.8, "word": " are", "probability": 0.9296875}, {"start": 3339.8, "end": 3340.3, "word": " testing", "probability": 0.88330078125}, {"start": 3340.3, "end": 3342.98, "word": " mu1", "probability": 0.6002197265625}, {"start": 3342.98, "end": 3343.26, "word": " minus", "probability": 0.677734375}, {"start": 3343.26, "end": 3343.64, "word": " mu2", "probability": 0.960205078125}, {"start": 3343.64, "end": 3343.98, "word": " equals", "probability": 0.8583984375}, {"start": 3343.98, "end": 3344.26, "word": " zero,", "probability": 0.6083984375}, {"start": 3346.14, "end": 3346.52, "word": " does", "probability": 0.9521484375}, {"start": 3346.52, "end": 3346.84, "word": " this", "probability": 0.93994140625}, {"start": 3346.84, "end": 3347.34, "word": " interval", "probability": 0.953125}, {"start": 3347.34, "end": 3348.36, "word": " contain", "probability": 0.86767578125}, {"start": 3348.36, "end": 3348.74, "word": " zero", "probability": 0.90283203125}, {"start": 3348.74, "end": 3349.38, "word": " or", "probability": 0.88134765625}, {"start": 3349.38, "end": 3349.68, "word": " not?", "probability": 0.91162109375}, {"start": 3350.16, "end": 3350.38, "word": " So", "probability": 0.865234375}, {"start": 3350.38, "end": 3350.52, "word": " the", "probability": 0.8076171875}, {"start": 3350.52, "end": 3350.8, "word": " question", "probability": 0.91748046875}, {"start": 3350.8, "end": 3351.06, "word": " is,", "probability": 0.947265625}, {"start": 3351.12, "end": 3351.44, "word": " does", "probability": 0.5615234375}, {"start": 3351.44, "end": 3352.8, "word": " your", "probability": 0.802734375}, {"start": 3352.8, "end": 3353.4, "word": " interval", "probability": 0.96044921875}, {"start": 3353.4, "end": 3355.02, "word": " contain", "probability": 0.91162109375}, {"start": 3355.02, "end": 3355.48, "word": " zero?", "probability": 0.90625}], "temperature": 1.0}, {"id": 127, "seek": 337473, "start": 3356.55, "end": 3374.73, "text": " contains zero, zero star, this zero. Maybe it's one, not zero, in this case it's zero. Now, this interval, the lower bound is positive, the upper bound is positive, it's also positive, so zero is not inside the interval. So that means", "tokens": [8306, 4018, 11, 4018, 3543, 11, 341, 4018, 13, 2704, 309, 311, 472, 11, 406, 4018, 11, 294, 341, 1389, 309, 311, 4018, 13, 823, 11, 341, 15035, 11, 264, 3126, 5472, 307, 3353, 11, 264, 6597, 5472, 307, 3353, 11, 309, 311, 611, 3353, 11, 370, 4018, 307, 406, 1854, 264, 15035, 13, 407, 300, 1355], "avg_logprob": -0.29121767446912566, "compression_ratio": 1.7028985507246377, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 3356.55, "end": 3356.97, "word": " contains", "probability": 0.029754638671875}, {"start": 3356.97, "end": 3357.53, "word": " zero,", "probability": 0.374755859375}, {"start": 3357.59, "end": 3357.77, "word": " zero", "probability": 0.75048828125}, {"start": 3357.77, "end": 3358.13, "word": " star,", "probability": 0.609375}, {"start": 3358.25, "end": 3358.41, "word": " this", "probability": 0.30615234375}, {"start": 3358.41, "end": 3358.73, "word": " zero.", "probability": 0.830078125}, {"start": 3360.53, "end": 3360.81, "word": " Maybe", "probability": 0.876953125}, {"start": 3360.81, "end": 3361.01, "word": " it's", "probability": 0.9052734375}, {"start": 3361.01, "end": 3361.25, "word": " one,", "probability": 0.87255859375}, {"start": 3361.35, "end": 3361.49, "word": " not", "probability": 0.91015625}, {"start": 3361.49, "end": 3361.79, "word": " zero,", "probability": 0.92138671875}, {"start": 3362.17, "end": 3362.35, "word": " in", "probability": 0.86083984375}, {"start": 3362.35, "end": 3362.53, "word": " this", "probability": 0.9462890625}, {"start": 3362.53, "end": 3362.75, "word": " case", "probability": 0.92236328125}, {"start": 3362.75, "end": 3362.93, "word": " it's", "probability": 0.860107421875}, {"start": 3362.93, "end": 3363.21, "word": " zero.", "probability": 0.91650390625}, {"start": 3363.91, "end": 3364.21, "word": " Now,", "probability": 0.9404296875}, {"start": 3364.99, "end": 3365.31, "word": " this", "probability": 0.87890625}, {"start": 3365.31, "end": 3365.71, "word": " interval,", "probability": 0.95654296875}, {"start": 3365.85, "end": 3365.97, "word": " the", "probability": 0.8935546875}, {"start": 3365.97, "end": 3366.25, "word": " lower", "probability": 0.87353515625}, {"start": 3366.25, "end": 3366.59, "word": " bound", "probability": 0.91943359375}, {"start": 3366.59, "end": 3366.89, "word": " is", "probability": 0.91162109375}, {"start": 3366.89, "end": 3367.29, "word": " positive,", "probability": 0.93310546875}, {"start": 3367.49, "end": 3367.59, "word": " the", "probability": 0.91455078125}, {"start": 3367.59, "end": 3367.83, "word": " upper", "probability": 0.82421875}, {"start": 3367.83, "end": 3368.09, "word": " bound", "probability": 0.90625}, {"start": 3368.09, "end": 3368.31, "word": " is", "probability": 0.947265625}, {"start": 3368.31, "end": 3368.67, "word": " positive,", "probability": 0.94189453125}, {"start": 3369.13, "end": 3369.31, "word": " it's", "probability": 0.67529296875}, {"start": 3369.31, "end": 3369.53, "word": " also", "probability": 0.87548828125}, {"start": 3369.53, "end": 3369.89, "word": " positive,", "probability": 0.947265625}, {"start": 3370.31, "end": 3370.81, "word": " so", "probability": 0.9462890625}, {"start": 3370.81, "end": 3371.05, "word": " zero", "probability": 0.890625}, {"start": 3371.05, "end": 3371.19, "word": " is", "probability": 0.6826171875}, {"start": 3371.19, "end": 3371.45, "word": " not", "probability": 0.94482421875}, {"start": 3371.45, "end": 3371.89, "word": " inside", "probability": 0.91796875}, {"start": 3371.89, "end": 3372.11, "word": " the", "probability": 0.91650390625}, {"start": 3372.11, "end": 3372.47, "word": " interval.", "probability": 0.96435546875}, {"start": 3373.51, "end": 3373.85, "word": " So", "probability": 0.9345703125}, {"start": 3373.85, "end": 3374.27, "word": " that", "probability": 0.72265625}, {"start": 3374.27, "end": 3374.73, "word": " means", "probability": 0.9345703125}], "temperature": 1.0}, {"id": 128, "seek": 340373, "start": 3376.47, "end": 3403.73, "text": " It's never equal zero, so we reject the null hypothesis. So since zero lies outside this interval, I mean the confidence interval does not contain zero. That means we have to reject the null hypothesis. So if the rule of thumb is if the confidence interval, in this case for mu1 minus mu2 contains zero.", "tokens": [467, 311, 1128, 2681, 4018, 11, 370, 321, 8248, 264, 18184, 17291, 13, 407, 1670, 4018, 9134, 2380, 341, 15035, 11, 286, 914, 264, 6687, 15035, 775, 406, 5304, 4018, 13, 663, 1355, 321, 362, 281, 8248, 264, 18184, 17291, 13, 407, 498, 264, 4978, 295, 9298, 307, 498, 264, 6687, 15035, 11, 294, 341, 1389, 337, 2992, 16, 3175, 2992, 17, 8306, 4018, 13], "avg_logprob": -0.2298768903269912, "compression_ratio": 1.7882352941176471, "no_speech_prob": 0.0, "words": [{"start": 3376.4700000000003, "end": 3377.15, "word": " It's", "probability": 0.650634765625}, {"start": 3377.15, "end": 3377.51, "word": " never", "probability": 0.927734375}, {"start": 3377.51, "end": 3378.59, "word": " equal", "probability": 0.58251953125}, {"start": 3378.59, "end": 3378.99, "word": " zero,", "probability": 0.68701171875}, {"start": 3379.27, "end": 3379.47, "word": " so", "probability": 0.94140625}, {"start": 3379.47, "end": 3379.65, "word": " we", "probability": 0.9375}, {"start": 3379.65, "end": 3380.05, "word": " reject", "probability": 0.87060546875}, {"start": 3380.05, "end": 3380.23, "word": " the", "probability": 0.76025390625}, {"start": 3380.23, "end": 3380.35, "word": " null", "probability": 0.29296875}, {"start": 3380.35, "end": 3380.77, "word": " hypothesis.", "probability": 0.1593017578125}, {"start": 3381.03, "end": 3381.19, "word": " So", "probability": 0.6953125}, {"start": 3381.19, "end": 3382.21, "word": " since", "probability": 0.57373046875}, {"start": 3382.21, "end": 3382.51, "word": " zero", "probability": 0.85498046875}, {"start": 3382.51, "end": 3382.87, "word": " lies", "probability": 0.93798828125}, {"start": 3382.87, "end": 3383.45, "word": " outside", "probability": 0.845703125}, {"start": 3383.45, "end": 3383.73, "word": " this", "probability": 0.9267578125}, {"start": 3383.73, "end": 3384.13, "word": " interval,", "probability": 0.95947265625}, {"start": 3384.21, "end": 3384.33, "word": " I", "probability": 0.9765625}, {"start": 3384.33, "end": 3384.45, "word": " mean", "probability": 0.88525390625}, {"start": 3384.45, "end": 3384.61, "word": " the", "probability": 0.7255859375}, {"start": 3384.61, "end": 3385.05, "word": " confidence", "probability": 0.97021484375}, {"start": 3385.05, "end": 3385.67, "word": " interval", "probability": 0.93896484375}, {"start": 3385.67, "end": 3386.03, "word": " does", "probability": 0.9560546875}, {"start": 3386.03, "end": 3386.25, "word": " not", "probability": 0.94775390625}, {"start": 3386.25, "end": 3386.69, "word": " contain", "probability": 0.92529296875}, {"start": 3386.69, "end": 3387.09, "word": " zero.", "probability": 0.89990234375}, {"start": 3387.53, "end": 3388.05, "word": " That", "probability": 0.90625}, {"start": 3388.05, "end": 3388.37, "word": " means", "probability": 0.9375}, {"start": 3388.37, "end": 3388.55, "word": " we", "probability": 0.9287109375}, {"start": 3388.55, "end": 3388.73, "word": " have", "probability": 0.94580078125}, {"start": 3388.73, "end": 3388.85, "word": " to", "probability": 0.97021484375}, {"start": 3388.85, "end": 3389.19, "word": " reject", "probability": 0.9169921875}, {"start": 3389.19, "end": 3389.37, "word": " the", "probability": 0.8642578125}, {"start": 3389.37, "end": 3389.55, "word": " null", "probability": 0.9365234375}, {"start": 3389.55, "end": 3389.97, "word": " hypothesis.", "probability": 0.9111328125}, {"start": 3390.45, "end": 3390.65, "word": " So", "probability": 0.93310546875}, {"start": 3390.65, "end": 3390.95, "word": " if", "probability": 0.8623046875}, {"start": 3390.95, "end": 3392.23, "word": " the", "probability": 0.63232421875}, {"start": 3392.23, "end": 3392.43, "word": " rule", "probability": 0.8896484375}, {"start": 3392.43, "end": 3392.61, "word": " of", "probability": 0.96240234375}, {"start": 3392.61, "end": 3392.83, "word": " thumb", "probability": 0.8212890625}, {"start": 3392.83, "end": 3393.17, "word": " is", "probability": 0.94384765625}, {"start": 3393.17, "end": 3393.63, "word": " if", "probability": 0.65185546875}, {"start": 3393.63, "end": 3395.45, "word": " the", "probability": 0.84716796875}, {"start": 3395.45, "end": 3396.23, "word": " confidence", "probability": 0.97900390625}, {"start": 3396.23, "end": 3396.75, "word": " interval,", "probability": 0.96337890625}, {"start": 3397.59, "end": 3397.77, "word": " in", "probability": 0.9375}, {"start": 3397.77, "end": 3397.99, "word": " this", "probability": 0.94677734375}, {"start": 3397.99, "end": 3398.29, "word": " case", "probability": 0.8974609375}, {"start": 3398.29, "end": 3398.65, "word": " for", "probability": 0.81103515625}, {"start": 3398.65, "end": 3399.63, "word": " mu1", "probability": 0.577880859375}, {"start": 3399.63, "end": 3399.97, "word": " minus", "probability": 0.94091796875}, {"start": 3399.97, "end": 3400.59, "word": " mu2", "probability": 0.9736328125}, {"start": 3400.59, "end": 3403.25, "word": " contains", "probability": 0.406494140625}, {"start": 3403.25, "end": 3403.73, "word": " zero.", "probability": 0.8916015625}], "temperature": 1.0}, {"id": 129, "seek": 343521, "start": 3408.11, "end": 3435.21, "text": " then we don't reject we don't reject otherwise we have to reject it's zero but be careful not always zero here we are assuming the difference is zero but the difference if the difference is one then ask yourself is this interval contain one one but in this case it's zero so the question is", "tokens": [550, 321, 500, 380, 8248, 321, 500, 380, 8248, 5911, 321, 362, 281, 8248, 309, 311, 4018, 457, 312, 5026, 406, 1009, 4018, 510, 321, 366, 11926, 264, 2649, 307, 4018, 457, 264, 2649, 498, 264, 2649, 307, 472, 550, 1029, 1803, 307, 341, 15035, 5304, 472, 472, 457, 294, 341, 1389, 309, 311, 4018, 370, 264, 1168, 307], "avg_logprob": -0.20898437723517418, "compression_ratio": 1.8774193548387097, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 3408.1100000000006, "end": 3408.8300000000004, "word": " then", "probability": 0.316650390625}, {"start": 3408.8300000000004, "end": 3409.55, "word": " we", "probability": 0.76708984375}, {"start": 3409.55, "end": 3409.89, "word": " don't", "probability": 0.924072265625}, {"start": 3409.89, "end": 3410.31, "word": " reject", "probability": 0.888671875}, {"start": 3410.31, "end": 3411.59, "word": " we", "probability": 0.1541748046875}, {"start": 3411.59, "end": 3413.45, "word": " don't", "probability": 0.972412109375}, {"start": 3413.45, "end": 3414.75, "word": " reject", "probability": 0.89501953125}, {"start": 3414.75, "end": 3415.57, "word": " otherwise", "probability": 0.6748046875}, {"start": 3415.57, "end": 3416.51, "word": " we", "probability": 0.7919921875}, {"start": 3416.51, "end": 3416.67, "word": " have", "probability": 0.9423828125}, {"start": 3416.67, "end": 3416.77, "word": " to", "probability": 0.96923828125}, {"start": 3416.77, "end": 3417.15, "word": " reject", "probability": 0.91162109375}, {"start": 3417.15, "end": 3418.47, "word": " it's", "probability": 0.712158203125}, {"start": 3418.47, "end": 3418.77, "word": " zero", "probability": 0.69921875}, {"start": 3418.77, "end": 3419.97, "word": " but", "probability": 0.6591796875}, {"start": 3419.97, "end": 3420.27, "word": " be", "probability": 0.95166015625}, {"start": 3420.27, "end": 3420.65, "word": " careful", "probability": 0.96337890625}, {"start": 3420.65, "end": 3421.37, "word": " not", "probability": 0.87451171875}, {"start": 3421.37, "end": 3421.81, "word": " always", "probability": 0.9130859375}, {"start": 3421.81, "end": 3422.23, "word": " zero", "probability": 0.89453125}, {"start": 3422.23, "end": 3423.11, "word": " here", "probability": 0.77978515625}, {"start": 3423.11, "end": 3423.27, "word": " we", "probability": 0.94873046875}, {"start": 3423.27, "end": 3423.43, "word": " are", "probability": 0.77001953125}, {"start": 3423.43, "end": 3423.81, "word": " assuming", "probability": 0.919921875}, {"start": 3423.81, "end": 3424.03, "word": " the", "probability": 0.90380859375}, {"start": 3424.03, "end": 3424.37, "word": " difference", "probability": 0.86962890625}, {"start": 3424.37, "end": 3424.59, "word": " is", "probability": 0.94677734375}, {"start": 3424.59, "end": 3424.83, "word": " zero", "probability": 0.92236328125}, {"start": 3424.83, "end": 3425.05, "word": " but", "probability": 0.83056640625}, {"start": 3425.05, "end": 3425.25, "word": " the", "probability": 0.806640625}, {"start": 3425.25, "end": 3425.61, "word": " difference", "probability": 0.865234375}, {"start": 3425.61, "end": 3425.97, "word": " if", "probability": 0.90625}, {"start": 3425.97, "end": 3426.35, "word": " the", "probability": 0.70947265625}, {"start": 3426.35, "end": 3426.69, "word": " difference", "probability": 0.86181640625}, {"start": 3426.69, "end": 3426.89, "word": " is", "probability": 0.94775390625}, {"start": 3426.89, "end": 3427.19, "word": " one", "probability": 0.90966796875}, {"start": 3427.19, "end": 3428.01, "word": " then", "probability": 0.85107421875}, {"start": 3428.01, "end": 3428.33, "word": " ask", "probability": 0.9169921875}, {"start": 3428.33, "end": 3428.77, "word": " yourself", "probability": 0.83447265625}, {"start": 3428.77, "end": 3429.23, "word": " is", "probability": 0.8486328125}, {"start": 3429.23, "end": 3429.51, "word": " this", "probability": 0.9482421875}, {"start": 3429.51, "end": 3429.95, "word": " interval", "probability": 0.97412109375}, {"start": 3429.95, "end": 3430.47, "word": " contain", "probability": 0.70361328125}, {"start": 3430.47, "end": 3430.69, "word": " one", "probability": 0.8271484375}, {"start": 3430.69, "end": 3430.91, "word": " one", "probability": 0.29638671875}, {"start": 3430.91, "end": 3432.31, "word": " but", "probability": 0.611328125}, {"start": 3432.31, "end": 3432.47, "word": " in", "probability": 0.93359375}, {"start": 3432.47, "end": 3432.73, "word": " this", "probability": 0.94775390625}, {"start": 3432.73, "end": 3433.13, "word": " case", "probability": 0.90673828125}, {"start": 3433.13, "end": 3433.69, "word": " it's", "probability": 0.87939453125}, {"start": 3433.69, "end": 3434.03, "word": " zero", "probability": 0.9228515625}, {"start": 3434.03, "end": 3434.43, "word": " so", "probability": 0.9130859375}, {"start": 3434.43, "end": 3434.61, "word": " the", "probability": 0.92529296875}, {"start": 3434.61, "end": 3434.89, "word": " question", "probability": 0.927734375}, {"start": 3434.89, "end": 3435.21, "word": " is", "probability": 0.95068359375}], "temperature": 1.0}, {"id": 130, "seek": 345791, "start": 3435.83, "end": 3457.91, "text": " Is the icon contains zero or not? Zero is outside, so we reject analog icons. Now, do you think the mean of New York stock is greater than Nasdaq or not? Since the interval ends, I mean lower than other bounds, are positive,", "tokens": [1119, 264, 6528, 8306, 4018, 420, 406, 30, 17182, 307, 2380, 11, 370, 321, 8248, 16660, 23308, 13, 823, 11, 360, 291, 519, 264, 914, 295, 1873, 3609, 4127, 307, 5044, 813, 16151, 2675, 80, 420, 406, 30, 4162, 264, 15035, 5314, 11, 286, 914, 3126, 813, 661, 29905, 11, 366, 3353, 11], "avg_logprob": -0.29803240133656395, "compression_ratio": 1.4331210191082802, "no_speech_prob": 0.0, "words": [{"start": 3435.83, "end": 3436.19, "word": " Is", "probability": 0.11187744140625}, {"start": 3436.19, "end": 3436.37, "word": " the", "probability": 0.8017578125}, {"start": 3436.37, "end": 3436.75, "word": " icon", "probability": 0.498291015625}, {"start": 3436.75, "end": 3437.25, "word": " contains", "probability": 0.218505859375}, {"start": 3437.25, "end": 3437.45, "word": " zero", "probability": 0.6484375}, {"start": 3437.45, "end": 3437.61, "word": " or", "probability": 0.89404296875}, {"start": 3437.61, "end": 3437.83, "word": " not?", "probability": 0.90869140625}, {"start": 3438.55, "end": 3438.85, "word": " Zero", "probability": 0.837890625}, {"start": 3438.85, "end": 3438.99, "word": " is", "probability": 0.9287109375}, {"start": 3438.99, "end": 3439.61, "word": " outside,", "probability": 0.81396484375}, {"start": 3440.09, "end": 3440.29, "word": " so", "probability": 0.90771484375}, {"start": 3440.29, "end": 3440.39, "word": " we", "probability": 0.708984375}, {"start": 3440.39, "end": 3440.71, "word": " reject", "probability": 0.95654296875}, {"start": 3440.71, "end": 3441.01, "word": " analog", "probability": 0.2568359375}, {"start": 3441.01, "end": 3441.41, "word": " icons.", "probability": 0.7001953125}, {"start": 3441.87, "end": 3442.49, "word": " Now,", "probability": 0.74365234375}, {"start": 3443.13, "end": 3443.29, "word": " do", "probability": 0.8837890625}, {"start": 3443.29, "end": 3443.43, "word": " you", "probability": 0.96142578125}, {"start": 3443.43, "end": 3443.69, "word": " think", "probability": 0.91650390625}, {"start": 3443.69, "end": 3443.99, "word": " the", "probability": 0.845703125}, {"start": 3443.99, "end": 3444.13, "word": " mean", "probability": 0.93896484375}, {"start": 3444.13, "end": 3444.33, "word": " of", "probability": 0.96728515625}, {"start": 3444.33, "end": 3444.69, "word": " New", "probability": 0.5341796875}, {"start": 3444.69, "end": 3445.03, "word": " York", "probability": 0.9501953125}, {"start": 3445.03, "end": 3445.87, "word": " stock", "probability": 0.59375}, {"start": 3445.87, "end": 3447.41, "word": " is", "probability": 0.89599609375}, {"start": 3447.41, "end": 3447.75, "word": " greater", "probability": 0.90576171875}, {"start": 3447.75, "end": 3448.07, "word": " than", "probability": 0.9423828125}, {"start": 3448.07, "end": 3448.41, "word": " Nasdaq", "probability": 0.7938639322916666}, {"start": 3448.41, "end": 3448.61, "word": " or", "probability": 0.85791015625}, {"start": 3448.61, "end": 3448.85, "word": " not?", "probability": 0.9375}, {"start": 3450.99, "end": 3451.67, "word": " Since", "probability": 0.79638671875}, {"start": 3451.67, "end": 3452.03, "word": " the", "probability": 0.91064453125}, {"start": 3452.03, "end": 3452.61, "word": " interval", "probability": 0.94140625}, {"start": 3452.61, "end": 3454.33, "word": " ends,", "probability": 0.81103515625}, {"start": 3455.07, "end": 3455.25, "word": " I", "probability": 0.91162109375}, {"start": 3455.25, "end": 3455.37, "word": " mean", "probability": 0.96875}, {"start": 3455.37, "end": 3455.63, "word": " lower", "probability": 0.89794921875}, {"start": 3455.63, "end": 3455.81, "word": " than", "probability": 0.52587890625}, {"start": 3455.81, "end": 3456.09, "word": " other", "probability": 0.6494140625}, {"start": 3456.09, "end": 3456.57, "word": " bounds,", "probability": 0.74658203125}, {"start": 3457.17, "end": 3457.55, "word": " are", "probability": 0.90283203125}, {"start": 3457.55, "end": 3457.91, "word": " positive,", "probability": 0.912109375}], "temperature": 1.0}, {"id": 131, "seek": 348301, "start": 3459.27, "end": 3483.01, "text": " Positive, positive, so that means the mean 1 is greater than mean 2. So mean 1 is means for New York is greater than the mean for Ottawa. If the interval is negative, negative, that means mean 1 is smaller than mean 2. If it's positive, positive, then mean 1 is greater than. If it's negative plus, then", "tokens": [46326, 11, 3353, 11, 370, 300, 1355, 264, 914, 502, 307, 5044, 813, 914, 568, 13, 407, 914, 502, 307, 1355, 337, 1873, 3609, 307, 5044, 813, 264, 914, 337, 40767, 13, 759, 264, 15035, 307, 3671, 11, 3671, 11, 300, 1355, 914, 502, 307, 4356, 813, 914, 568, 13, 759, 309, 311, 3353, 11, 3353, 11, 550, 914, 502, 307, 5044, 813, 13, 759, 309, 311, 3671, 1804, 11, 550], "avg_logprob": -0.2582465203271972, "compression_ratio": 2.125874125874126, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 3459.27, "end": 3459.77, "word": " Positive,", "probability": 0.209716796875}, {"start": 3459.91, "end": 3460.19, "word": " positive,", "probability": 0.8681640625}, {"start": 3460.47, "end": 3460.81, "word": " so", "probability": 0.6650390625}, {"start": 3460.81, "end": 3461.03, "word": " that", "probability": 0.9296875}, {"start": 3461.03, "end": 3461.37, "word": " means", "probability": 0.9287109375}, {"start": 3461.37, "end": 3462.15, "word": " the", "probability": 0.73681640625}, {"start": 3462.15, "end": 3462.37, "word": " mean", "probability": 0.9599609375}, {"start": 3462.37, "end": 3462.71, "word": " 1", "probability": 0.454345703125}, {"start": 3462.71, "end": 3463.83, "word": " is", "probability": 0.90234375}, {"start": 3463.83, "end": 3464.21, "word": " greater", "probability": 0.8662109375}, {"start": 3464.21, "end": 3464.47, "word": " than", "probability": 0.9404296875}, {"start": 3464.47, "end": 3464.65, "word": " mean", "probability": 0.669921875}, {"start": 3464.65, "end": 3464.87, "word": " 2.", "probability": 0.97509765625}, {"start": 3465.09, "end": 3465.23, "word": " So", "probability": 0.828125}, {"start": 3465.23, "end": 3465.53, "word": " mean", "probability": 0.66064453125}, {"start": 3465.53, "end": 3465.73, "word": " 1", "probability": 0.90283203125}, {"start": 3465.73, "end": 3465.91, "word": " is", "probability": 0.52587890625}, {"start": 3465.91, "end": 3466.39, "word": " means", "probability": 0.71240234375}, {"start": 3466.39, "end": 3467.49, "word": " for", "probability": 0.72412109375}, {"start": 3467.49, "end": 3467.71, "word": " New", "probability": 0.66015625}, {"start": 3467.71, "end": 3467.95, "word": " York", "probability": 0.96923828125}, {"start": 3467.95, "end": 3468.25, "word": " is", "probability": 0.7822265625}, {"start": 3468.25, "end": 3468.55, "word": " greater", "probability": 0.9052734375}, {"start": 3468.55, "end": 3468.87, "word": " than", "probability": 0.93896484375}, {"start": 3468.87, "end": 3469.03, "word": " the", "probability": 0.70849609375}, {"start": 3469.03, "end": 3469.17, "word": " mean", "probability": 0.94482421875}, {"start": 3469.17, "end": 3469.35, "word": " for", "probability": 0.88916015625}, {"start": 3469.35, "end": 3469.67, "word": " Ottawa.", "probability": 0.190673828125}, {"start": 3470.15, "end": 3470.49, "word": " If", "probability": 0.94921875}, {"start": 3470.49, "end": 3471.49, "word": " the", "probability": 0.90576171875}, {"start": 3471.49, "end": 3471.83, "word": " interval", "probability": 0.9697265625}, {"start": 3471.83, "end": 3472.07, "word": " is", "probability": 0.9521484375}, {"start": 3472.07, "end": 3472.43, "word": " negative,", "probability": 0.919921875}, {"start": 3472.55, "end": 3472.83, "word": " negative,", "probability": 0.93896484375}, {"start": 3473.31, "end": 3475.09, "word": " that", "probability": 0.92724609375}, {"start": 3475.09, "end": 3475.41, "word": " means", "probability": 0.9384765625}, {"start": 3475.41, "end": 3475.59, "word": " mean", "probability": 0.60986328125}, {"start": 3475.59, "end": 3475.83, "word": " 1", "probability": 0.9208984375}, {"start": 3475.83, "end": 3475.99, "word": " is", "probability": 0.947265625}, {"start": 3475.99, "end": 3476.31, "word": " smaller", "probability": 0.85107421875}, {"start": 3476.31, "end": 3476.55, "word": " than", "probability": 0.94677734375}, {"start": 3476.55, "end": 3476.73, "word": " mean", "probability": 0.95361328125}, {"start": 3476.73, "end": 3476.95, "word": " 2.", "probability": 0.9833984375}, {"start": 3477.53, "end": 3477.69, "word": " If", "probability": 0.7822265625}, {"start": 3477.69, "end": 3477.85, "word": " it's", "probability": 0.66796875}, {"start": 3477.85, "end": 3478.21, "word": " positive,", "probability": 0.93310546875}, {"start": 3478.33, "end": 3478.65, "word": " positive,", "probability": 0.95068359375}, {"start": 3478.91, "end": 3479.11, "word": " then", "probability": 0.82958984375}, {"start": 3479.11, "end": 3479.29, "word": " mean", "probability": 0.95556640625}, {"start": 3479.29, "end": 3479.47, "word": " 1", "probability": 0.958984375}, {"start": 3479.47, "end": 3479.63, "word": " is", "probability": 0.95166015625}, {"start": 3479.63, "end": 3480.01, "word": " greater", "probability": 0.9189453125}, {"start": 3480.01, "end": 3480.29, "word": " than.", "probability": 0.70703125}, {"start": 3480.61, "end": 3480.93, "word": " If", "probability": 0.9296875}, {"start": 3480.93, "end": 3481.09, "word": " it's", "probability": 0.933837890625}, {"start": 3481.09, "end": 3481.39, "word": " negative", "probability": 0.93994140625}, {"start": 3481.39, "end": 3481.89, "word": " plus,", "probability": 0.62646484375}, {"start": 3482.53, "end": 3483.01, "word": " then", "probability": 0.861328125}], "temperature": 1.0}, {"id": 132, "seek": 349740, "start": 3484.04, "end": 3497.4, "text": " If the interval starts from negative to positive, that means zero lies inside the interval. So in this case, we don't reject. So the only time we don't reject is zero.", "tokens": [759, 264, 15035, 3719, 490, 3671, 281, 3353, 11, 300, 1355, 4018, 9134, 1854, 264, 15035, 13, 407, 294, 341, 1389, 11, 321, 500, 380, 8248, 13, 407, 264, 787, 565, 321, 500, 380, 8248, 307, 4018, 13], "avg_logprob": -0.19841747329785273, "compression_ratio": 1.4608695652173913, "no_speech_prob": 0.0, "words": [{"start": 3484.04, "end": 3484.36, "word": " If", "probability": 0.48388671875}, {"start": 3484.36, "end": 3484.6, "word": " the", "probability": 0.814453125}, {"start": 3484.6, "end": 3484.92, "word": " interval", "probability": 0.904296875}, {"start": 3484.92, "end": 3485.42, "word": " starts", "probability": 0.56201171875}, {"start": 3485.42, "end": 3485.72, "word": " from", "probability": 0.8837890625}, {"start": 3485.72, "end": 3486.06, "word": " negative", "probability": 0.67578125}, {"start": 3486.06, "end": 3486.52, "word": " to", "probability": 0.953125}, {"start": 3486.52, "end": 3487.26, "word": " positive,", "probability": 0.93408203125}, {"start": 3488.02, "end": 3488.32, "word": " that", "probability": 0.7822265625}, {"start": 3488.32, "end": 3488.6, "word": " means", "probability": 0.9248046875}, {"start": 3488.6, "end": 3488.92, "word": " zero", "probability": 0.71875}, {"start": 3488.92, "end": 3489.38, "word": " lies", "probability": 0.72021484375}, {"start": 3489.38, "end": 3490.2, "word": " inside", "probability": 0.900390625}, {"start": 3490.2, "end": 3490.42, "word": " the", "probability": 0.86181640625}, {"start": 3490.42, "end": 3490.82, "word": " interval.", "probability": 0.9541015625}, {"start": 3491.24, "end": 3491.44, "word": " So", "probability": 0.83544921875}, {"start": 3491.44, "end": 3491.56, "word": " in", "probability": 0.6552734375}, {"start": 3491.56, "end": 3491.78, "word": " this", "probability": 0.94140625}, {"start": 3491.78, "end": 3492.06, "word": " case,", "probability": 0.91259765625}, {"start": 3492.18, "end": 3492.28, "word": " we", "probability": 0.93603515625}, {"start": 3492.28, "end": 3492.6, "word": " don't", "probability": 0.940673828125}, {"start": 3492.6, "end": 3492.9, "word": " reject.", "probability": 0.91064453125}, {"start": 3494.78, "end": 3495.1, "word": " So", "probability": 0.75634765625}, {"start": 3495.1, "end": 3495.26, "word": " the", "probability": 0.7734375}, {"start": 3495.26, "end": 3495.48, "word": " only", "probability": 0.90966796875}, {"start": 3495.48, "end": 3495.88, "word": " time", "probability": 0.86328125}, {"start": 3495.88, "end": 3496.16, "word": " we", "probability": 0.91748046875}, {"start": 3496.16, "end": 3496.44, "word": " don't", "probability": 0.975341796875}, {"start": 3496.44, "end": 3496.82, "word": " reject", "probability": 0.92333984375}, {"start": 3496.82, "end": 3497.06, "word": " is", "probability": 0.89892578125}, {"start": 3497.06, "end": 3497.4, "word": " zero.", "probability": 0.84912109375}], "temperature": 1.0}, {"id": 133, "seek": 351483, "start": 3498.83, "end": 3514.83, "text": " The lower bound is negative and the upper bound is positive. Because if you start for example from minus one to two for example, in this case zero in the interval, I mean the confidence interval contains zero. In this case we don't reject.", "tokens": [440, 3126, 5472, 307, 3671, 293, 264, 6597, 5472, 307, 3353, 13, 1436, 498, 291, 722, 337, 1365, 490, 3175, 472, 281, 732, 337, 1365, 11, 294, 341, 1389, 4018, 294, 264, 15035, 11, 286, 914, 264, 6687, 15035, 8306, 4018, 13, 682, 341, 1389, 321, 500, 380, 8248, 13], "avg_logprob": -0.24341299545531178, "compression_ratio": 1.5894039735099337, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 3498.83, "end": 3499.35, "word": " The", "probability": 0.21826171875}, {"start": 3499.35, "end": 3499.83, "word": " lower", "probability": 0.6396484375}, {"start": 3499.83, "end": 3500.39, "word": " bound", "probability": 0.90771484375}, {"start": 3500.39, "end": 3500.71, "word": " is", "probability": 0.91845703125}, {"start": 3500.71, "end": 3501.07, "word": " negative", "probability": 0.89306640625}, {"start": 3501.07, "end": 3501.43, "word": " and", "probability": 0.79443359375}, {"start": 3501.43, "end": 3501.53, "word": " the", "probability": 0.77490234375}, {"start": 3501.53, "end": 3501.77, "word": " upper", "probability": 0.828125}, {"start": 3501.77, "end": 3502.05, "word": " bound", "probability": 0.89208984375}, {"start": 3502.05, "end": 3502.21, "word": " is", "probability": 0.92822265625}, {"start": 3502.21, "end": 3502.61, "word": " positive.", "probability": 0.93701171875}, {"start": 3503.39, "end": 3503.61, "word": " Because", "probability": 0.720703125}, {"start": 3503.61, "end": 3503.77, "word": " if", "probability": 0.8642578125}, {"start": 3503.77, "end": 3503.89, "word": " you", "probability": 0.52734375}, {"start": 3503.89, "end": 3504.15, "word": " start", "probability": 0.580078125}, {"start": 3504.15, "end": 3504.31, "word": " for", "probability": 0.4423828125}, {"start": 3504.31, "end": 3504.69, "word": " example", "probability": 0.966796875}, {"start": 3504.69, "end": 3504.99, "word": " from", "probability": 0.8330078125}, {"start": 3504.99, "end": 3505.37, "word": " minus", "probability": 0.57177734375}, {"start": 3505.37, "end": 3505.77, "word": " one", "probability": 0.55810546875}, {"start": 3505.77, "end": 3506.41, "word": " to", "probability": 0.95361328125}, {"start": 3506.41, "end": 3506.73, "word": " two", "probability": 0.927734375}, {"start": 3506.73, "end": 3506.95, "word": " for", "probability": 0.56591796875}, {"start": 3506.95, "end": 3507.37, "word": " example,", "probability": 0.97119140625}, {"start": 3507.97, "end": 3508.09, "word": " in", "probability": 0.88818359375}, {"start": 3508.09, "end": 3508.29, "word": " this", "probability": 0.9453125}, {"start": 3508.29, "end": 3508.59, "word": " case", "probability": 0.92724609375}, {"start": 3508.59, "end": 3508.95, "word": " zero", "probability": 0.7421875}, {"start": 3508.95, "end": 3509.65, "word": " in", "probability": 0.7177734375}, {"start": 3509.65, "end": 3509.81, "word": " the", "probability": 0.92724609375}, {"start": 3509.81, "end": 3510.17, "word": " interval,", "probability": 0.9541015625}, {"start": 3510.55, "end": 3510.67, "word": " I", "probability": 0.939453125}, {"start": 3510.67, "end": 3510.77, "word": " mean", "probability": 0.96728515625}, {"start": 3510.77, "end": 3511.01, "word": " the", "probability": 0.7412109375}, {"start": 3511.01, "end": 3511.43, "word": " confidence", "probability": 0.97509765625}, {"start": 3511.43, "end": 3512.03, "word": " interval", "probability": 0.97509765625}, {"start": 3512.03, "end": 3512.85, "word": " contains", "probability": 0.74658203125}, {"start": 3512.85, "end": 3513.23, "word": " zero.", "probability": 0.88037109375}, {"start": 3513.61, "end": 3513.77, "word": " In", "probability": 0.94091796875}, {"start": 3513.77, "end": 3513.95, "word": " this", "probability": 0.9482421875}, {"start": 3513.95, "end": 3514.15, "word": " case", "probability": 0.92333984375}, {"start": 3514.15, "end": 3514.31, "word": " we", "probability": 0.71630859375}, {"start": 3514.31, "end": 3514.59, "word": " don't", "probability": 0.929931640625}, {"start": 3514.59, "end": 3514.83, "word": " reject.", "probability": 0.85302734375}], "temperature": 1.0}, {"id": 134, "seek": 354401, "start": 3515.75, "end": 3544.01, "text": " So again, the only time you have to don't reject is zero if the confidence starts from negative to positive. Otherwise, you reject the null hypothesis. So in this case, zero is less than the entire interval, means outside the entire interval. We can be 95% confident that the mean of New York Stock Exchange is greater than the mean of", "tokens": [407, 797, 11, 264, 787, 565, 291, 362, 281, 500, 380, 8248, 307, 4018, 498, 264, 6687, 3719, 490, 3671, 281, 3353, 13, 10328, 11, 291, 8248, 264, 18184, 17291, 13, 407, 294, 341, 1389, 11, 4018, 307, 1570, 813, 264, 2302, 15035, 11, 1355, 2380, 264, 2302, 15035, 13, 492, 393, 312, 13420, 4, 6679, 300, 264, 914, 295, 1873, 3609, 17857, 31169, 307, 5044, 813, 264, 914, 295], "avg_logprob": -0.21148768319210537, "compression_ratio": 1.6, "no_speech_prob": 0.0, "words": [{"start": 3515.75, "end": 3515.99, "word": " So", "probability": 0.8173828125}, {"start": 3515.99, "end": 3516.27, "word": " again,", "probability": 0.8212890625}, {"start": 3517.01, "end": 3517.11, "word": " the", "probability": 0.90380859375}, {"start": 3517.11, "end": 3517.29, "word": " only", "probability": 0.9228515625}, {"start": 3517.29, "end": 3517.61, "word": " time", "probability": 0.8837890625}, {"start": 3517.61, "end": 3517.77, "word": " you", "probability": 0.95458984375}, {"start": 3517.77, "end": 3517.97, "word": " have", "probability": 0.8818359375}, {"start": 3517.97, "end": 3518.31, "word": " to", "probability": 0.97216796875}, {"start": 3518.31, "end": 3519.15, "word": " don't", "probability": 0.7000732421875}, {"start": 3519.15, "end": 3519.53, "word": " reject", "probability": 0.92333984375}, {"start": 3519.53, "end": 3519.77, "word": " is", "probability": 0.224365234375}, {"start": 3519.77, "end": 3520.07, "word": " zero", "probability": 0.7431640625}, {"start": 3520.07, "end": 3521.05, "word": " if", "probability": 0.548828125}, {"start": 3521.05, "end": 3521.25, "word": " the", "probability": 0.9140625}, {"start": 3521.25, "end": 3521.65, "word": " confidence", "probability": 0.97509765625}, {"start": 3521.65, "end": 3522.17, "word": " starts", "probability": 0.76171875}, {"start": 3522.17, "end": 3522.35, "word": " from", "probability": 0.89453125}, {"start": 3522.35, "end": 3522.73, "word": " negative", "probability": 0.94775390625}, {"start": 3522.73, "end": 3522.99, "word": " to", "probability": 0.96630859375}, {"start": 3522.99, "end": 3523.25, "word": " positive.", "probability": 0.9287109375}, {"start": 3523.53, "end": 3523.77, "word": " Otherwise,", "probability": 0.9091796875}, {"start": 3524.13, "end": 3524.25, "word": " you", "probability": 0.96142578125}, {"start": 3524.25, "end": 3524.81, "word": " reject", "probability": 0.728515625}, {"start": 3524.81, "end": 3525.89, "word": " the", "probability": 0.86865234375}, {"start": 3525.89, "end": 3526.59, "word": " null", "probability": 0.615234375}, {"start": 3526.59, "end": 3527.05, "word": " hypothesis.", "probability": 0.98876953125}, {"start": 3527.47, "end": 3527.65, "word": " So", "probability": 0.94091796875}, {"start": 3527.65, "end": 3527.79, "word": " in", "probability": 0.83349609375}, {"start": 3527.79, "end": 3528.01, "word": " this", "probability": 0.94873046875}, {"start": 3528.01, "end": 3528.45, "word": " case,", "probability": 0.91259765625}, {"start": 3529.05, "end": 3529.33, "word": " zero", "probability": 0.85009765625}, {"start": 3529.33, "end": 3529.55, "word": " is", "probability": 0.94677734375}, {"start": 3529.55, "end": 3529.85, "word": " less", "probability": 0.93603515625}, {"start": 3529.85, "end": 3530.15, "word": " than", "probability": 0.94482421875}, {"start": 3530.15, "end": 3530.35, "word": " the", "probability": 0.91943359375}, {"start": 3530.35, "end": 3530.81, "word": " entire", "probability": 0.86865234375}, {"start": 3530.81, "end": 3531.43, "word": " interval,", "probability": 0.97900390625}, {"start": 3532.21, "end": 3532.73, "word": " means", "probability": 0.462890625}, {"start": 3532.73, "end": 3533.35, "word": " outside", "probability": 0.83642578125}, {"start": 3533.35, "end": 3534.51, "word": " the", "probability": 0.82080078125}, {"start": 3534.51, "end": 3534.89, "word": " entire", "probability": 0.88427734375}, {"start": 3534.89, "end": 3535.29, "word": " interval.", "probability": 0.96435546875}, {"start": 3535.73, "end": 3536.01, "word": " We", "probability": 0.958984375}, {"start": 3536.01, "end": 3536.29, "word": " can", "probability": 0.94677734375}, {"start": 3536.29, "end": 3536.53, "word": " be", "probability": 0.9482421875}, {"start": 3536.53, "end": 3537.71, "word": " 95", "probability": 0.9716796875}, {"start": 3537.71, "end": 3538.33, "word": "%", "probability": 0.94873046875}, {"start": 3538.33, "end": 3539.09, "word": " confident", "probability": 0.97705078125}, {"start": 3539.09, "end": 3539.49, "word": " that", "probability": 0.93017578125}, {"start": 3539.49, "end": 3540.19, "word": " the", "probability": 0.92626953125}, {"start": 3540.19, "end": 3540.45, "word": " mean", "probability": 0.96142578125}, {"start": 3540.45, "end": 3541.49, "word": " of", "probability": 0.970703125}, {"start": 3541.49, "end": 3541.71, "word": " New", "probability": 0.8544921875}, {"start": 3541.71, "end": 3541.95, "word": " York", "probability": 0.966796875}, {"start": 3541.95, "end": 3542.45, "word": " Stock", "probability": 0.3330078125}, {"start": 3542.45, "end": 3542.97, "word": " Exchange", "probability": 0.9189453125}, {"start": 3542.97, "end": 3543.23, "word": " is", "probability": 0.921875}, {"start": 3543.23, "end": 3543.47, "word": " greater", "probability": 0.9130859375}, {"start": 3543.47, "end": 3543.69, "word": " than", "probability": 0.94970703125}, {"start": 3543.69, "end": 3543.75, "word": " the", "probability": 0.89306640625}, {"start": 3543.75, "end": 3543.87, "word": " mean", "probability": 0.935546875}, {"start": 3543.87, "end": 3544.01, "word": " of", "probability": 0.9697265625}], "temperature": 1.0}, {"id": 135, "seek": 356221, "start": 3545.69, "end": 3562.21, "text": " Big sense? Any questions? Next time we'll talk, I will give the third approach, the B value approach for conducting the hypothesis testing. Any question? So that's all for today.", "tokens": [5429, 2020, 30, 2639, 1651, 30, 3087, 565, 321, 603, 751, 11, 286, 486, 976, 264, 2636, 3109, 11, 264, 363, 2158, 3109, 337, 21749, 264, 17291, 4997, 13, 2639, 1168, 30, 407, 300, 311, 439, 337, 965, 13], "avg_logprob": -0.21601562574505806, "compression_ratio": 1.325925925925926, "no_speech_prob": 0.0, "words": [{"start": 3545.69, "end": 3545.97, "word": " Big", "probability": 0.41845703125}, {"start": 3545.97, "end": 3546.29, "word": " sense?", "probability": 0.7666015625}, {"start": 3547.91, "end": 3548.47, "word": " Any", "probability": 0.876953125}, {"start": 3548.47, "end": 3548.91, "word": " questions?", "probability": 0.953125}, {"start": 3549.37, "end": 3549.71, "word": " Next", "probability": 0.896484375}, {"start": 3549.71, "end": 3549.93, "word": " time", "probability": 0.86962890625}, {"start": 3549.93, "end": 3550.11, "word": " we'll", "probability": 0.77783203125}, {"start": 3550.11, "end": 3550.35, "word": " talk,", "probability": 0.8544921875}, {"start": 3550.73, "end": 3550.93, "word": " I", "probability": 0.99365234375}, {"start": 3550.93, "end": 3551.05, "word": " will", "probability": 0.849609375}, {"start": 3551.05, "end": 3551.27, "word": " give", "probability": 0.8701171875}, {"start": 3551.27, "end": 3551.47, "word": " the", "probability": 0.8916015625}, {"start": 3551.47, "end": 3551.75, "word": " third", "probability": 0.90283203125}, {"start": 3551.75, "end": 3552.19, "word": " approach,", "probability": 0.912109375}, {"start": 3552.35, "end": 3552.39, "word": " the", "probability": 0.57568359375}, {"start": 3552.39, "end": 3552.55, "word": " B", "probability": 0.44091796875}, {"start": 3552.55, "end": 3552.81, "word": " value", "probability": 0.50341796875}, {"start": 3552.81, "end": 3553.21, "word": " approach", "probability": 0.89501953125}, {"start": 3553.21, "end": 3553.49, "word": " for", "probability": 0.8056640625}, {"start": 3553.49, "end": 3554.31, "word": " conducting", "probability": 0.87109375}, {"start": 3554.31, "end": 3555.49, "word": " the", "probability": 0.89697265625}, {"start": 3555.49, "end": 3556.11, "word": " hypothesis", "probability": 0.8974609375}, {"start": 3556.11, "end": 3557.27, "word": " testing.", "probability": 0.8125}, {"start": 3558.53, "end": 3558.87, "word": " Any", "probability": 0.92529296875}, {"start": 3558.87, "end": 3559.25, "word": " question?", "probability": 0.529296875}, {"start": 3560.31, "end": 3560.65, "word": " So", "probability": 0.62353515625}, {"start": 3560.65, "end": 3561.07, "word": " that's", "probability": 0.9169921875}, {"start": 3561.07, "end": 3561.49, "word": " all", "probability": 0.953125}, {"start": 3561.49, "end": 3561.89, "word": " for", "probability": 0.95263671875}, {"start": 3561.89, "end": 3562.21, "word": " today.", "probability": 0.84765625}], "temperature": 1.0}], "language": "en", "language_probability": 1.0, "duration": 3564.6345, "duration_after_vad": 3409.6156249999794} \ No newline at end of file diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/_OR2C5YaRyM.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/_OR2C5YaRyM.srt new file mode 100644 index 0000000000000000000000000000000000000000..84b14262bcdf33aabaa635e7639d48c856ccea84 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/_OR2C5YaRyM.srt @@ -0,0 +1,2210 @@ + +1 +00:00:07,000 --> 00:00:12,340 +So again a point estimate is just a single number + +2 +00:00:12,340 --> 00:00:18,080 +or just a value. For the interval estimation in + +3 +00:00:18,080 --> 00:00:21,400 +this case you have more information than using a + +4 +00:00:21,400 --> 00:00:24,200 +point estimate. So confidence interval provides + +5 +00:00:24,200 --> 00:00:27,800 +additional information about the spread of the or + +6 +00:00:27,800 --> 00:00:31,240 +the variability of the estimate. As I mentioned + +7 +00:00:31,240 --> 00:00:35,210 +before, there are two parameters of interest in + +8 +00:00:35,210 --> 00:00:38,010 +this chapter. The first one is the population mean + +9 +00:00:38,010 --> 00:00:42,110 +mu. In this case, we can estimate the population + +10 +00:00:42,110 --> 00:00:47,850 +mean mu by using the sample mean x bar. The other + +11 +00:00:47,850 --> 00:00:51,650 +parameter is the proportion. The population + +12 +00:00:51,650 --> 00:00:55,850 +proportion is pi. And the point estimates P. So P + +13 +00:00:55,850 --> 00:00:58,410 +is a point estimate of the population proportion + +14 +00:00:58,410 --> 00:01:04,030 +pi. So again, this chapter talks about confidence + +15 +00:01:04,030 --> 00:01:08,630 +interval estimation. And as we mentioned that + +16 +00:01:08,630 --> 00:01:10,790 +there are two types of estimation. One is called + +17 +00:01:10,790 --> 00:01:17,810 +point estimation. And for example, the average or + +18 +00:01:17,810 --> 00:01:22,070 +the mean of age is 18 years. So 18 is the point + +19 +00:01:22,070 --> 00:01:26,750 +estimate. On the other hand, the other type of the + +20 +00:01:26,750 --> 00:01:29,690 +confidence is the confidence interval estimation. + +21 +00:01:30,810 --> 00:01:33,790 +So rather than saying that the average is 18 + +22 +00:01:33,790 --> 00:01:39,270 +years, you could say that we are 95% sure that the + +23 +00:01:39,270 --> 00:01:46,510 +average of student ages range between 17 up to 19 + +24 +00:01:46,510 --> 00:01:49,390 +years. And again, the confidence interval gives + +25 +00:01:49,390 --> 00:01:53,550 +more information about the variability of the + +26 +00:01:53,550 --> 00:01:56,710 +data. And again, there are two types of parameters + +27 +00:01:56,710 --> 00:01:58,910 +we are interested in. One is called the population + +28 +00:01:58,910 --> 00:02:02,050 +mean, and the point estimate is the sample mean. + +29 +00:02:03,070 --> 00:02:05,650 +The other parameter is the population proportion + +30 +00:02:05,650 --> 00:02:09,310 +of Y, and the point estimate is P. + +31 +00:02:13,250 --> 00:02:16,830 +So again, An interval estimate provides more + +32 +00:02:16,830 --> 00:02:20,490 +information about a population characteristic than + +33 +00:02:20,490 --> 00:02:23,210 +does a point estimate. Because if you go back a + +34 +00:02:23,210 --> 00:02:25,510 +little bit to the previous example, here we + +35 +00:02:25,510 --> 00:02:28,650 +calculated the sample mean to be with just one + +36 +00:02:28,650 --> 00:02:31,050 +point. So the average of your reagents is 18 + +37 +00:02:31,050 --> 00:02:36,590 +years. But the other one, I am sure that for 95% + +38 +00:02:36,590 --> 00:02:40,710 +that your ages range between, for example, 17 and + +39 +00:02:40,710 --> 00:02:43,230 +19. So in this case, we have more information. + +40 +00:02:43,870 --> 00:02:46,190 +Such interval estimates are called confidence + +41 +00:02:46,190 --> 00:02:49,470 +intervals. And we are going to use this notation, + +42 +00:02:49,670 --> 00:02:56,750 +CI. CI stands for confidence interval estimation. + +43 +00:03:02,350 --> 00:03:05,810 +An interval gives a range of values, because here + +44 +00:03:05,810 --> 00:03:09,910 +we have a range from, for example, 17 up to 19. In + +45 +00:03:09,910 --> 00:03:12,750 +this case, this interval takes into consideration + +46 +00:03:12,750 --> 00:03:15,930 +variation in samples that intersects from sample + +47 +00:03:15,930 --> 00:03:19,610 +to sample. For example, suppose we have a huge + +48 +00:03:19,610 --> 00:03:22,330 +population, and from that huge population, we + +49 +00:03:22,330 --> 00:03:27,430 +select many samples of the same size. Maybe the + +50 +00:03:27,430 --> 00:03:32,790 +first sample, for example, of size 15, gives + +51 +00:03:32,790 --> 00:03:37,170 +an average of 17. + +52 +00:03:38,930 --> 00:03:43,470 +Maybe another sample of the same size might give + +53 +00:03:43,470 --> 00:03:49,330 +a different sample mean, 18 and so on. Suppose we + +54 +00:03:49,330 --> 00:03:54,770 +select 100 samples. So here we have different + +55 +00:03:54,770 --> 00:03:59,450 +sample means. For sure we should have different + +56 +00:04:01,140 --> 00:04:06,580 +confidence intervals. Maybe the first one starts + +57 +00:04:06,580 --> 00:04:10,500 +from 16 to 18, the other one maybe 15 to 17, and so + +58 +00:04:10,500 --> 00:04:14,320 +on. So the confidence interval takes into + +59 +00:04:14,320 --> 00:04:17,420 +consideration variation in sample statistics from + +60 +00:04:17,420 --> 00:04:20,820 +sample to sample. But the confidence interval we + +61 +00:04:20,820 --> 00:04:27,020 +have to construct based on observations just from + +62 +00:04:27,020 --> 00:04:30,940 +one sample. I mean, you don't need to select too + +63 +00:04:30,940 --> 00:04:33,920 +many random samples in order to construct a + +64 +00:04:33,920 --> 00:04:36,920 +confidence interval. Just select one sample, one + +65 +00:04:36,920 --> 00:04:39,800 +random sample, and from that sample, we can + +66 +00:04:39,800 --> 00:04:44,580 +construct a confidence interval. So my confidence + +67 +00:04:44,580 --> 00:04:50,120 +interval can be constructed by using only one + +68 +00:04:50,120 --> 00:04:53,860 +simple sample. From that sample, we can compute + +69 +00:04:53,860 --> 00:04:59,770 +the average. Now, this interval gives information + +70 +00:04:59,770 --> 00:05:02,990 +about closeness to unknown population parameters. + +71 +00:05:03,490 --> 00:05:08,530 +For example, when we are saying weights range + +72 +00:05:08,530 --> 00:05:13,650 +between, for example, 60 kilogram up to 80, and + +73 +00:05:13,650 --> 00:05:19,530 +you are sure about 90%. So that means the average + +74 +00:05:19,530 --> 00:05:27,430 +weight range between 60 to 80. That means the true + +75 +00:05:27,430 --> 00:05:34,190 +parameter mu is close to these values. So it's + +76 +00:05:34,190 --> 00:05:36,930 +between 6 to 80. Now suppose in this case, I am + +77 +00:05:36,930 --> 00:05:45,570 +interested in 95%. Now suppose someone else + +78 +00:05:45,570 --> 00:05:50,570 +is interested in 99% confidence. So do you think the + +79 +00:05:50,570 --> 00:05:55,170 +confidence interval for more confident to be wider + +80 +00:05:55,170 --> 00:06:01,770 +or narrower? As the confidence gets larger or + +81 +00:06:01,770 --> 00:06:08,230 +bigger, the interval gets wider. So maybe from 55 + +82 +00:06:08,230 --> 00:06:15,370 +up to 85. Another example, suppose we are + +83 +00:06:15,370 --> 00:06:20,090 +interested in the age of students. + +84 +00:06:23,330 --> 00:06:30,430 +80% confident that your + +85 +00:06:30,430 --> 00:06:32,950 +age is between, or your weights, let's talk about + +86 +00:06:32,950 --> 00:06:41,370 +weights, between 70 up to 75 kilograms, 80%. 90% + +87 +00:06:41,370 --> 00:06:51,410 +could be 60 to 80 kilograms, or 65 to 75 kilograms + +88 +00:06:51,410 --> 00:06:57,950 +to 80 kilograms. 95%, for example, 60 to + +89 +00:06:57,950 --> 00:07:08,130 +example, 60 to 85. 99% from 58 up to 90. Which one + +90 +00:07:08,130 --> 00:07:10,730 +is best? Which is the best confidence interval + +91 +00:07:10,730 --> 00:07:16,470 +among all of these? To say that we are 80% at 70 + +92 +00:07:16,470 --> 00:07:25,760 +to 75, 65 to 80, 60 to 85, 85 to 90. Which one is + +93 +00:07:25,760 --> 00:07:32,160 +better? Now when we are saying from 70 to 75 it + +94 +00:07:32,160 --> 00:07:36,400 +means the interval width is smaller or maybe the + +95 +00:07:36,400 --> 00:07:40,040 +smallest among the others. So this one is the best + +96 +00:07:40,040 --> 00:07:42,860 +confidence interval because the error here is just + +97 +00:07:42,860 --> 00:07:47,930 +5. Now here you can expect your weight. But for + +98 +00:07:47,930 --> 00:07:50,950 +the other scenarios here, it's very hard to say. + +99 +00:07:51,070 --> 00:07:54,690 +Now suppose I am saying that your score in + +100 +00:07:54,690 --> 00:07:59,110 +statistics is between 40 up to 100. + +101 +00:08:06,970 --> 00:08:15,170 +It's too large. Suppose I am saying that with, I am + +102 +00:08:15,170 --> 00:08:23,120 +sure, with 99.7% that your score in statistics + +103 +00:08:23,120 --> 00:08:30,180 +lies between 50 to + +104 +00:08:30,180 --> 00:08:34,620 +100. And my question is, can you expect your + +105 +00:08:34,620 --> 00:08:38,660 +score? It's very difficult because the range is + +106 +00:08:38,660 --> 00:08:44,630 +too long from 50 up to 100. Because here, I am + +107 +00:08:44,630 --> 00:08:49,650 +maybe 99.7%, so I'm almost sure that your score is + +108 +00:08:49,650 --> 00:08:54,510 +between 50 and 100. But suppose someone else says + +109 +00:08:54,510 --> 00:08:58,890 +that I am 95% that your score ranges between 60 to + +110 +00:08:58,890 --> 00:09:01,850 +90. It's still large, but it's better than the + +111 +00:09:01,850 --> 00:09:09,430 +previous one. One else says, I am 90% sure that + +112 +00:09:09,430 --> 00:09:15,620 +your score lies between 70 to 85. Another one + +113 +00:09:15,620 --> 00:09:22,000 +might be saying 85% of your score starts from 74 + +114 +00:09:22,000 --> 00:09:29,840 +up to 80. Now here the range is small, so I can + +115 +00:09:29,840 --> 00:09:35,280 +predict my scores just between 74 to 80. So we + +116 +00:09:35,280 --> 00:09:38,660 +like to have confidence interval to be small as + +117 +00:09:38,660 --> 00:09:43,380 +much as possible. So that's for the range. So in + +118 +00:09:43,380 --> 00:09:47,980 +this case, The true parameter is very close to one + +119 +00:09:47,980 --> 00:09:49,180 +of these values. + +120 +00:09:51,720 --> 00:09:55,200 +Always, the confidence interval is stated in terms + +121 +00:09:55,200 --> 00:10:00,120 +of level of confidence. The common ones are either + +122 +00:10:00,120 --> 00:10:05,760 +90%, 95 + +123 +00:10:05,760 --> 00:10:11,550 +% or 99%. So these are the common levels of + +124 +00:10:11,550 --> 00:10:12,230 +confidence. + +125 +00:10:21,230 --> 00:10:23,510 +Next slide, Inshallah, will turn to the other + +126 +00:10:23,510 --> 00:10:25,910 +side. Now, for example, + +127 +00:10:38,570 --> 00:10:46,350 +For example, serial example, suppose we know that + +128 +00:10:46,350 --> 00:10:53,130 +the population mean mu is given to be 368 and + +129 +00:10:53,130 --> 00:10:56,610 +sigma is 15. Suppose we know the population mean + +130 +00:10:56,610 --> 00:11:00,030 +and the population summation. In reality, if these + +131 +00:11:00,030 --> 00:11:03,250 +two parameters are unknown, we don't need to + +132 +00:11:03,250 --> 00:11:06,140 +select a random sample. Because we are selecting + +133 +00:11:06,140 --> 00:11:08,800 +the samples in order to estimate these unknown + +134 +00:11:08,800 --> 00:11:11,960 +parameters. But this example is just for + +135 +00:11:11,960 --> 00:11:18,540 +illustration. So again, a mu in reality is not + +136 +00:11:18,540 --> 00:11:22,020 +given, is unknown, as well as the standard + +137 +00:11:22,020 --> 00:11:25,020 +deviation. But suppose from previous studies we + +138 +00:11:25,020 --> 00:11:30,450 +know that the population mean is given by 368, and + +139 +00:11:30,450 --> 00:11:33,350 +the standard deviation for the population is about + +140 +00:11:33,350 --> 00:11:37,110 +15. Suppose we know this information from the + +141 +00:11:37,110 --> 00:11:42,350 +history or from the previous studies. Now suppose + +142 +00:11:42,350 --> 00:11:46,190 +we take a random sample of size 25, and this + +143 +00:11:46,190 --> 00:11:52,670 +sample gives the following information. So here, + +144 +00:11:53,150 --> 00:12:00,480 +we have a mean of 68, sigma of 15. And let's see how + +145 +00:12:00,480 --> 00:12:02,800 +we can construct the confidence interval as we + +146 +00:12:02,800 --> 00:12:07,000 +mentioned in the previous lectures. If you + +147 +00:12:07,000 --> 00:12:11,020 +remember the score, the x bar minus mu divided by + +148 +00:12:11,020 --> 00:12:16,620 +sigma over root n. In this case, let's see how we can + +149 +00:12:16,620 --> 00:12:21,120 +compute x bar from this equation. So z equals x + +150 +00:12:21,120 --> 00:12:23,340 +bar minus mu divided by sigma over root n, just + +151 +00:12:23,340 --> 00:12:26,840 +cross multiplication, you will get x bar to be mu + +152 +00:12:26,840 --> 00:12:32,000 +plus z sigma over root n. So this is the value of + +153 +00:12:32,000 --> 00:12:35,180 +x bar, mu plus z sigma over root n, just cross + +154 +00:12:35,180 --> 00:12:38,520 +multiplication. Now, the value of z could be + +155 +00:12:38,520 --> 00:12:40,600 +positive or negative, it depends on the direction + +156 +00:12:40,600 --> 00:12:46,000 +of the z score you have. So, if z score lies in the + +157 +00:12:46,000 --> 00:12:46,680 +left side, + +158 +00:12:50,290 --> 00:12:53,210 +the other one is positive. Because here we are + +159 +00:12:53,210 --> 00:12:55,590 +talking about confidence interval, ranges from + +160 +00:12:55,590 --> 00:12:59,430 +smallest value to the largest one. So, z-score is + +161 +00:12:59,430 --> 00:13:04,150 +negative, so x-bar plus equals mu plus or minus z + +162 +00:13:04,150 --> 00:13:08,490 +-sigma over root. Now, let's imagine that the + +163 +00:13:08,490 --> 00:13:14,250 +population mean is 368 and sigma is 15 and we + +164 +00:13:14,250 --> 00:13:18,350 +select a random sample of 15, I'm sorry, of 25, let's + +165 +00:13:18,350 --> 00:13:22,210 +see the range or the values of x bar might be + +166 +00:13:22,210 --> 00:13:28,050 +taken so x bar equals 368 + +167 +00:13:28,050 --> 00:13:35,760 +plus or minus now for 95 percent the corresponding + +168 +00:13:35,760 --> 00:13:42,520 +z value is 1.96, if you remember that. And the + +169 +00:13:42,520 --> 00:13:45,320 +other one is negative or plus, it depends on the + +170 +00:13:45,320 --> 00:13:49,220 +direction of the z-scope. So, plus or minus 1.96. + +171 +00:13:51,440 --> 00:13:56,120 +This value can be computed or found by using the + +172 +00:13:56,120 --> 00:14:02,700 +normal table. Times sigma 15 divided by root 25. + +173 +00:14:05,870 --> 00:14:13,430 +Just simple calculation will give 362, 373.8. Now, + +174 +00:14:14,270 --> 00:14:21,210 +this interval, which ranges from 362 up to 373, + +175 +00:14:21,650 --> 00:14:27,870 +contains 95% of the sample means. Suppose we have + +176 +00:14:27,870 --> 00:14:32,050 +100 sample means with different values, you can + +177 +00:14:32,050 --> 00:14:36,990 +say that 95% out of these 100 will contain the + +178 +00:14:36,990 --> 00:14:42,710 +same meaning, that if a Mu is given. But again, in + +179 +00:14:42,710 --> 00:14:47,490 +real life, you don't know the value of Mu. So when + +180 +00:14:47,490 --> 00:14:53,110 +you don't know Mu, you can use X bar to estimate + +181 +00:14:53,110 --> 00:14:59,110 +Mu. Now, suppose on the other hand, here Mu is + +182 +00:14:59,110 --> 00:14:59,490 +unknown. + +183 +00:15:03,180 --> 00:15:08,360 +And we estimate mu by x bar, the sample mean. And + +184 +00:15:08,360 --> 00:15:14,480 +suppose the sample size 25 gives a sample mean of + +185 +00:15:14,480 --> 00:15:21,940 +362.3. So, this sample mean by using a sample size + +186 +00:15:21,940 --> 00:15:27,900 +of 25. Now let's see how we can construct the + +187 +00:15:27,900 --> 00:15:31,550 +confidence interval in this case. Now x bar is + +188 +00:15:31,550 --> 00:15:35,970 +given, so my interval should be mu equals x bar + +189 +00:15:35,970 --> 00:15:41,650 +plus or minus z sigma over root n. Just replace + +190 +00:15:41,650 --> 00:15:47,610 +this mu by x bar, you will get. So, again, a sample + +191 +00:15:47,610 --> 00:15: + +223 +00:18:42,810 --> 00:18:51,270 +356, upper limit 368. I know that from previous + +224 +00:18:51,270 --> 00:18:56,630 +studies that the population means 368. So just ask + +225 +00:18:56,630 --> 00:19:00,530 +this question, does this interval containing + +226 +00:19:02,980 --> 00:19:09,220 +The range from 356 up to 368.13 covers mu. So in + +227 +00:19:09,220 --> 00:19:14,280 +this case, this interval continues. Maybe someone + +228 +00:19:14,280 --> 00:19:19,200 +select another sample of the same size, and that + +229 +00:19:19,200 --> 00:19:25,870 +sample gives sample mean of 369.5. By using this + +230 +00:19:25,870 --> 00:19:29,510 +equation, you can figure out lower limits and + +231 +00:19:29,510 --> 00:19:32,950 +upper limits. Now, again, I ask you still this + +232 +00:19:32,950 --> 00:19:36,670 +question. Does this interval contain a mu? The + +233 +00:19:36,670 --> 00:19:39,890 +answer is yes, because 368 lies between these two + +234 +00:19:39,890 --> 00:19:43,450 +values. So the first one contains a mu. The second + +235 +00:19:43,450 --> 00:19:47,010 +one contains mu. Look at the third symbol. Suppose + +236 +00:19:47,010 --> 00:19:51,210 +from my symbol, I got symbol mu to be 360. So + +237 +00:19:51,210 --> 00:19:57,240 +lower and upper limits are? 354 and upper limit + +238 +00:19:57,240 --> 00:20:06,640 +365.88 now this value 368 is outside of this + +239 +00:20:06,640 --> 00:20:11,040 +interval so maybe your interval will continue mu + +240 +00:20:11,040 --> 00:20:14,240 +or will not continue mu but you don't know + +241 +00:20:14,240 --> 00:20:17,980 +actually the value of mu so in this case you + +242 +00:20:17,980 --> 00:20:22,400 +cannot say my interval will continue mu But here, + +243 +00:20:22,460 --> 00:20:25,720 +if we know mu, you can say that this interval + +244 +00:20:25,720 --> 00:20:29,860 +contains mu, or the other one does not contain mu. + +245 +00:20:30,820 --> 00:20:35,120 +But in the real world, you don't know the value of + +246 +00:20:35,120 --> 00:20:40,060 +mu. So mu is unknown. But you can say that I am 95 + +247 +00:20:40,060 --> 00:20:45,220 +% sure that this interval will continue. Make + +248 +00:20:45,220 --> 00:20:48,260 +sense? Based on the x dot? Based on the sample + +249 +00:20:48,260 --> 00:20:54,870 +mean. And the other information we have. sigma and + +250 +00:20:54,870 --> 00:20:55,170 +N. + +251 +00:20:58,610 --> 00:21:03,310 +Again, in practice, you only take one sample of + +252 +00:21:03,310 --> 00:21:05,290 +size N, so you don't need to take many, many + +253 +00:21:05,290 --> 00:21:08,750 +samples, just take one sample. In practice, you + +254 +00:21:08,750 --> 00:21:13,150 +don't know Mu, so you don't know if the interval + +255 +00:21:13,150 --> 00:21:14,690 +actually contains Mu. + +256 +00:21:18,890 --> 00:21:23,640 +However, you do know that 95% of the intervals + +257 +00:21:23,640 --> 00:21:27,460 +formed in this manner will contain mu. So again, + +258 +00:21:28,500 --> 00:21:31,220 +any interval can be constructed in this chapter + +259 +00:21:31,220 --> 00:21:37,500 +one of these. You cannot say that this interval + +260 +00:21:37,500 --> 00:21:42,460 +contains mu. You have to say that 95% of the + +261 +00:21:42,460 --> 00:21:46,360 +intervals formed in this way will capture the + +262 +00:21:46,360 --> 00:21:47,780 +population parameter mu. + +263 +00:21:51,490 --> 00:21:57,070 +Another one, thus based on one sample, you + +264 +00:21:57,070 --> 00:22:02,190 +actually selected you can be 95% confident your + +265 +00:22:02,190 --> 00:22:06,240 +interval will continue. So again, this interval + +266 +00:22:06,240 --> 00:22:12,820 +might be or might not cover Mu. Since I don't know + +267 +00:22:12,820 --> 00:22:16,000 +the value of Mu, I cannot say that this interval + +268 +00:22:16,000 --> 00:22:20,440 +will continue Mu. But you can say that 95% of the + +269 +00:22:20,440 --> 00:22:24,880 +intervals constructed in this way will continue + +270 +00:22:24,880 --> 00:22:28,800 +Mu. That's the interpretation of the confidence + +271 +00:22:28,800 --> 00:22:29,300 +interval. + +272 +00:22:32,180 --> 00:22:38,000 +99, 95, whatever it is. Now, the process of the + +273 +00:22:38,000 --> 00:22:43,960 +estimation is, we have a huge population, and + +274 +00:22:43,960 --> 00:22:47,420 +suppose we are interested in the population mean + +275 +00:22:47,420 --> 00:22:54,700 +mu, and mu is not given or is unknown. We may + +276 +00:22:54,700 --> 00:22:58,460 +select a random sample from this population with + +277 +00:22:58,460 --> 00:23:03,140 +any size. From this sample, we can compute the + +278 +00:23:03,140 --> 00:23:07,520 +average or the mean. Then after that, we can + +279 +00:23:07,520 --> 00:23:10,920 +construct the confidence sample. For example, + +280 +00:23:11,080 --> 00:23:15,420 +maybe I'm saying that I am 95% confident that mu + +281 +00:23:15,420 --> 00:23:19,580 +is between 4 and 16. So the process of estimation, + +282 +00:23:20,020 --> 00:23:22,980 +select a random sample from your population. This + +283 +00:23:22,980 --> 00:23:25,680 +sample should be representative in order to + +284 +00:23:25,680 --> 00:23:27,880 +generalize your results. Otherwise, you cannot do + +285 +00:23:27,880 --> 00:23:32,630 +that. So this sample should be representative. And + +286 +00:23:32,630 --> 00:23:36,370 +we, in chapter seven, we discussed four types of + +287 +00:23:36,370 --> 00:23:39,210 +probability sampling techniques. Simple random + +288 +00:23:39,210 --> 00:23:46,330 +samples, systematic, cluster, and stratified. So + +289 +00:23:46,330 --> 00:23:48,490 +by using this sample, we can compute, for example, + +290 +00:23:48,610 --> 00:23:51,750 +the sample mean. Then after that, I will show how + +291 +00:23:51,750 --> 00:23:56,470 +can we do or construct or build the confidence + +292 +00:23:56,470 --> 00:24:02,140 +interval. So this is the estimation process. Now, + +293 +00:24:02,620 --> 00:24:06,120 +the general formula for all confidence intervals + +294 +00:24:06,120 --> 00:24:07,340 +is, + +295 +00:24:10,120 --> 00:24:12,940 +the general formula has mainly three components, + +296 +00:24:14,060 --> 00:24:20,980 +point estimate, plus or minus, critical value, + +297 +00:24:22,080 --> 00:24:25,480 +times standard error. Let's see the definition for + +298 +00:24:25,480 --> 00:24:30,960 +each component. The first one, point estimate. is + +299 +00:24:30,960 --> 00:24:33,880 +the sample statistic estimating this population + +300 +00:24:33,880 --> 00:24:36,920 +parameter of interest. For example, if you go back + +301 +00:24:36,920 --> 00:24:45,040 +to the previous age example, and suppose X bar is + +302 +00:24:45,040 --> 00:24:50,940 +18. So you can say that the point estimate, since + +303 +00:24:50,940 --> 00:24:55,820 +18 is the sample mean, and we are interested in + +304 +00:24:55,820 --> 00:25:03,850 +the population mean, so 18 is a point estimate + +305 +00:25:03,850 --> 00:25:10,270 +for mu. This is the first one, this is the first + +306 +00:25:10,270 --> 00:25:13,250 +component. So a point estimate is the simplest + +307 +00:25:13,250 --> 00:25:15,370 +statistic, x bar is the simplest statistic. + +308 +00:25:16,570 --> 00:25:19,010 +Estimating the population parameter of interest, I + +309 +00:25:19,010 --> 00:25:22,450 +am interested suppose in the population mean, mu. + +310 +00:25:24,750 --> 00:25:26,070 +So we have estimate. + +311 +00:25:29,450 --> 00:25:32,530 +A plus or minus, a critical value. The critical + +312 +00:25:32,530 --> 00:25:36,590 +value here is a table value. That means we have to + +313 +00:25:36,590 --> 00:25:40,390 +go back to the normal table again, the one we had + +314 +00:25:40,390 --> 00:25:44,190 +discussed in Chapter 6. A critical value is a + +315 +00:25:44,190 --> 00:25:47,470 +table value based on the sampling distribution in + +316 +00:25:47,470 --> 00:25:50,750 +Chapter 7 of the point estimate and the desired + +317 +00:25:50,750 --> 00:25:55,430 +confidence interval. For example, let's talk about + +318 +00:25:55,430 --> 00:25:58,110 +again 90%. + +319 +00:26:02,680 --> 00:26:08,380 +Confidence is in this case. This area represents + +320 +00:26:08,380 --> 00:26:13,460 +90%. The area from lower limit to upper limit in + +321 +00:26:13,460 --> 00:26:18,580 +this area. Now again by symmetric distribution we + +322 +00:26:18,580 --> 00:26:23,720 +know that the remaining 10% is split into two + +323 +00:26:23,720 --> 00:26:29,670 +halves. 5% to the right and 5% to the left. Now + +324 +00:26:29,670 --> 00:26:33,330 +the critical value in this case is the Z-score. + +325 +00:26:35,210 --> 00:26:37,450 +Now by using the normal table, the standardized + +326 +00:26:37,450 --> 00:26:41,290 +normal table, you can figure out Z in this case is + +327 +00:26:41,290 --> 00:26:47,510 +negative. Look at the table. Look at 5%. + +328 +00:26:57,940 --> 00:27:03,560 +The value is minus 1.645, the other one is 1.645. + +329 +00:27:07,060 --> 00:27:11,460 +So that's the critical value, plus or minus a + +330 +00:27:11,460 --> 00:27:15,980 +critical value. Again, the table, if you are + +331 +00:27:15,980 --> 00:27:16,560 +looking at + +332 +00:27:28,430 --> 00:27:38,070 +You will get this value. Minus 1.6 under 4, under + +333 +00:27:38,070 --> 00:27:41,910 +5. Here we have 0045, + +334 +00:27:42,770 --> 00:27:45,750 +0055. + +335 +00:27:47,590 --> 00:27:53,030 +We have the chip. Again look at the angle of this + +336 +00:27:53,030 --> 00:27:53,270 +one. + +337 +00:28:07,280 --> 00:28:09,480 +05, 05. + +338 +00:28:36,870 --> 00:28:40,330 +So again, minus 1.6 under 4. + +339 +00:28:44,250 --> 00:28:45,230 +0505. + +340 +00:29:04,750 --> 00:29:08,970 +So your answer could be either one of these + +341 +00:29:08,970 --> 00:29:10,710 +negative one point + +342 +00:29:16,450 --> 00:29:26,670 +So in this case your z score minus 1.64 or minus 1 + +343 +00:29:26,670 --> 00:29:33,930 +.65 or the average of these two values minus 1 + +344 +00:29:33,930 --> 00:29:40,780 +.645. It's better to use this one. So for 90%, 5% + +345 +00:29:40,780 --> 00:29:43,980 +to the right, 5% to the left, just use the normal + +346 +00:29:43,980 --> 00:29:46,660 +table, you will get these two values. So this is + +347 +00:29:46,660 --> 00:29:48,600 +the critical value. Time. + +348 +00:29:52,820 --> 00:29:55,900 +The standard error means the standard deviation of + +349 +00:29:55,900 --> 00:29:56,740 +the point estimate. + +350 +00:30:00,900 --> 00:30:04,020 +So the general formula for all confidence + +351 +00:30:04,020 --> 00:30:06,840 +intervals is given by this equation. Point + +352 +00:30:06,840 --> 00:30:11,730 +estimate. a plus or minus critical value times + +353 +00:30:11,730 --> 00:30:15,030 +standard error. For example, point estimate is x4, + +354 +00:30:16,050 --> 00:30:20,110 +the critical value is z-score, times the standard + +355 +00:30:20,110 --> 00:30:25,890 +error of the point estimate. So times this SE is + +356 +00:30:25,890 --> 00:30:29,470 +the standard error of the estimate. So these are + +357 +00:30:29,470 --> 00:30:33,250 +the three components in order to build any + +358 +00:30:33,250 --> 00:30:38,360 +confidence interval. Another definition here, The + +359 +00:30:38,360 --> 00:30:43,840 +confidence level. The confidence level that the + +360 +00:30:43,840 --> 00:30:46,680 +interval will contain the unknown population + +361 +00:30:46,680 --> 00:30:55,360 +parameter. For example, we can say 90%, 95%, 99%. + +362 +00:30:55,360 --> 00:31:02,620 +You never say 100%. So usually, the confidence + +363 +00:31:02,620 --> 00:31:07,560 +level is a percentage that is less than 100%. + +364 +00:31:13,240 --> 00:31:17,360 +Now, for example, suppose the confidence level is + +365 +00:31:17,360 --> 00:31:31,100 +95. This 95 can be written as 1 minus alpha equals + +366 +00:31:31,100 --> 00:31:36,600 +95 percent. That means the confidence here is 95 + +367 +00:31:36,600 --> 00:31:43,720 +percent, so the error Or alpha is 5%. So if the + +368 +00:31:43,720 --> 00:31:47,820 +confidence level is 90%, it means the error is + +369 +00:31:47,820 --> 00:31:51,200 +10%. It is 90. + +370 +00:31:54,420 --> 00:32:00,780 +So alpha is 10% and so on. So a relative frequency + +371 +00:32:00,780 --> 00:32:05,100 +interpretation in this case, you can see that 95% + +372 +00:32:07,560 --> 00:32:10,200 +Of all the confidence interval that can be + +373 +00:32:10,200 --> 00:32:12,340 +constructed will contain the unknown true + +374 +00:32:12,340 --> 00:32:17,740 +parameter. A specific interval either will contain + +375 +00:32:17,740 --> 00:32:20,140 +or will not contain the true parameter as we + +376 +00:32:20,140 --> 00:32:23,120 +mentioned. So if you have confidence interval with + +377 +00:32:23,120 --> 00:32:26,980 +confidence level 95%, this interval might be or + +378 +00:32:26,980 --> 00:32:31,740 +might not contain the true parameter. So you are + +379 +00:32:31,740 --> 00:32:37,140 +not sure 100% that the true parameter will contain + +380 +00:32:37,500 --> 00:32:39,860 +in the confidence interval. + +381 +00:32:41,720 --> 00:32:44,560 +So for this chapter, we are going to talk about + +382 +00:32:44,560 --> 00:32:52,020 +confidence intervals for population mean and + +383 +00:32:52,020 --> 00:32:54,940 +population proportion. For population mean, we + +384 +00:32:54,940 --> 00:32:58,600 +will have two cases, when sigma is known and sigma + +385 +00:32:58,600 --> 00:33:04,720 +is unknown. Now let's start with the confidence + +386 +00:33:04,720 --> 00:33:10,350 +interval for immune when sigma is given. But + +387 +00:33:10,350 --> 00:33:15,850 +again, if a mu is unknown, here we are talking + +388 +00:33:15,850 --> 00:33:19,390 +about confidence interval for mu. + +389 +00:33:22,650 --> 00:33:26,770 +Since we are talking about estimating a mu, that + +390 +00:33:26,770 --> 00:33:30,110 +means a mu is unknown. + +391 +00:33:33,190 --> 00:33:36,270 +Now the first case here we are talking about sigma + +392 +00:33:36,270 --> 00:33:37,470 +is unknown, is known. + +393 +00:33:41,990 --> 00:33:45,550 +But we know that from chapter three that sigma + +394 +00:33:45,550 --> 00:33:50,230 +square root sum x minus mu squared divided by + +395 +00:33:50,230 --> 00:33:57,970 +capital N. So in order to compute sigma, we have + +396 +00:33:57,970 --> 00:34:02,310 +to know Mu first, otherwise you cannot compute + +397 +00:34:02,310 --> 00:34:09,070 +Sigma. So how can you say that Mu is unknown and + +398 +00:34:09,070 --> 00:34:14,570 +Sigma is given or is known? So Mu should be + +399 +00:34:14,570 --> 00:34:20,790 +unknown. Or Sigma is unknown. So Sigma is known + +400 +00:34:20,790 --> 00:34:25,650 +has a big question mark. Because if Sigma is + +401 +00:34:25,650 --> 00:34:30,310 +known, that means Mu should be known. If µ is + +402 +00:34:30,310 --> 00:34:33,310 +known, you don't need to select a random sample to + +403 +00:34:33,310 --> 00:34:38,490 +estimate µ. Make sense? I know the value of µ. So + +404 +00:34:38,490 --> 00:34:42,010 +what's the goal of selecting a random sample in + +405 +00:34:42,010 --> 00:34:46,770 +order to estimate µ? You are saying sigma is + +406 +00:34:46,770 --> 00:34:51,070 +given. That means µ is given or µ is known. + +407 +00:34:54,050 --> 00:34:57,810 +What's the benefit of selecting a random sample in + +408 +00:34:57,810 --> 00:35:01,610 +order to estimate something is known? Always we + +409 +00:35:01,610 --> 00:35:06,810 +estimate unknown parameter. But here, the reason + +410 +00:35:06,810 --> 00:35:09,990 +why we are talking about sigma is given sometimes + +411 +00:35:09,99 + +445 +00:37:26,180 --> 00:37:29,200 +multiplied by the standard error of the estimate. + +446 +00:37:29,420 --> 00:37:32,140 +And we know that, we know that the standard error + +447 +00:37:32,140 --> 00:37:37,180 +of x bar is sigma over root n. Z alpha over 2 + +448 +00:37:37,180 --> 00:37:42,520 +comes from the fact that we have for example here + +449 +00:37:42,520 --> 00:37:49,380 +95% this is 1 minus alpha. 1 minus alpha is 95%. + +450 +00:37:49,380 --> 00:37:56,000 +So 5% remaining to both sides upper tail will have + +451 +00:37:56,000 --> 00:38:00,460 +for example 2.5% and lower tail the same percent 2 + +452 +00:38:00,460 --> 00:38:02,160 +.5%. + +453 +00:38:04,640 --> 00:38:07,920 +Now since 1 minus alpha equals 95%, that means + +454 +00:38:07,920 --> 00:38:13,160 +alpha is 5% for both sides. So this one alpha over + +455 +00:38:13,160 --> 00:38:16,220 +2, the other one is alpha over 2. So this is your + +456 +00:38:16,220 --> 00:38:18,640 +z. So z alpha over 2. + +457 +00:38:21,940 --> 00:38:27,620 +The other side plus z alpha over 2. So x bar is + +458 +00:38:27,620 --> 00:38:32,860 +the point estimate. Z alpha over 2 is the normal + +459 +00:38:32,860 --> 00:38:35,460 +distribution in critical value for probability of + +460 +00:38:35,460 --> 00:38:40,520 +alpha over 2 in each tail. And sigma over root n + +461 +00:38:40,520 --> 00:38:43,860 +is a standard error. So again, this is the first + +462 +00:38:43,860 --> 00:38:48,740 +formula we have in this chapter to construct the + +463 +00:38:48,740 --> 00:38:52,620 +confidence interval with 1 minus alpha confidence + +464 +00:38:52,620 --> 00:38:57,220 +level. So 1 minus alpha percent confidence + +465 +00:38:57,220 --> 00:39:05,230 +interval. For mu is x bar plus or minus z alpha + +466 +00:39:05,230 --> 00:39:10,650 +over 2 times plus or minus sigma x bar plus or + +467 +00:39:10,650 --> 00:39:13,650 +minus z alpha over 2 times sigma over square root + +468 +00:39:13,650 --> 00:39:17,990 +of n. So this is a formula can be used in order to + +469 +00:39:17,990 --> 00:39:21,250 +construct confidence interval for the population + +470 +00:39:21,250 --> 00:39:27,390 +mean mu. The lower limit is given by x bar minus + +471 +00:39:27,390 --> 00:39:30,760 +this amount. So the lower limit, + +472 +00:39:34,740 --> 00:39:36,480 +X bar minus, + +473 +00:39:39,560 --> 00:39:41,840 +and the upper limit is X bar plus. + +474 +00:39:44,720 --> 00:39:47,880 +Upper limit. + +475 +00:39:50,880 --> 00:39:54,920 +Now the point estimate is X bar. For the lower + +476 +00:39:54,920 --> 00:40:01,530 +limit, we subtract this amount. from x bar for the + +477 +00:40:01,530 --> 00:40:09,250 +upper limit we add the same amount so + +478 +00:40:09,250 --> 00:40:15,970 +subtracting specific amount and adding the same + +479 +00:40:15,970 --> 00:40:22,650 +amount this amount later will call it margin of + +480 +00:40:22,650 --> 00:40:31,520 +error this will be maybe week after next week. So + +481 +00:40:31,520 --> 00:40:36,360 +z alpha bar to sigma bar root n is the margin of + +482 +00:40:36,360 --> 00:40:39,980 +error or the error because we add or subtract the + +483 +00:40:39,980 --> 00:40:43,660 +same value of x bar. + +484 +00:40:46,480 --> 00:40:54,980 +Now this slide just explains how can we compute Z + +485 +00:40:54,980 --> 00:41:00,820 +alpha over 2. And we did that for 90%. Now here + +486 +00:41:00,820 --> 00:41:07,020 +for 95% confidence level, so alpha is 5%. So Z + +487 +00:41:07,020 --> 00:41:12,640 +alpha over 2 in the lower tail is 1.96 minus. The + +488 +00:41:12,640 --> 00:41:16,460 +other one plus 1.96. So the lower confidence limit + +489 +00:41:16,460 --> 00:41:21,120 +minus or negative. Upper confidence limit is plus. + +490 +00:41:21,360 --> 00:41:25,490 +So the same values, but different science. So I + +491 +00:41:25,490 --> 00:41:29,090 +think we talk about how can we find the critical + +492 +00:41:29,090 --> 00:41:35,530 +values of our talk. This table summarizes the + +493 +00:41:35,530 --> 00:41:40,990 +commonly used confidence levels. And most likely, + +494 +00:41:41,070 --> 00:41:48,090 +we are using 90%, 95%, 99%. So for 90%, it's + +495 +00:41:48,090 --> 00:41:57,080 +1.645, as we did. 95% 1.96, better to remember + +496 +00:41:57,080 --> 00:42:01,100 +these values. For 99, it's 2.58. + +497 +00:42:04,660 --> 00:42:05,900 +Now let's see. + +498 +00:42:08,680 --> 00:42:13,500 +As the confidence level increases. Now here the + +499 +00:42:13,500 --> 00:42:16,420 +confidence level. As the confidence level + +500 +00:42:16,420 --> 00:42:22,100 +increases. Look at the corresponding z value. is + +501 +00:42:22,100 --> 00:42:27,060 +also increased. So in this case, the interval + +502 +00:42:27,060 --> 00:42:32,260 +becomes wider or narrower? Wider. Because here we + +503 +00:42:32,260 --> 00:42:36,680 +subtract this value, this amount, z alpha over 2 + +504 +00:42:36,680 --> 00:42:41,520 +will increase. This is z alpha over 2. As + +505 +00:42:41,520 --> 00:42:47,140 +confidence level increases, z value increases + +506 +00:42:47,140 --> 00:42:52,700 +also. So that means this amount will go up. will + +507 +00:42:52,700 --> 00:42:56,720 +increase also. That means the confidence interval + +508 +00:42:56,720 --> 00:43:02,940 +becomes wider. So as C + +509 +00:43:02,940 --> 00:43:10,820 +level or confidence level goes up, increases, the + +510 +00:43:10,820 --> 00:43:18,300 +corresponding interval, the interval becomes + +511 +00:43:18,300 --> 00:43:20,740 +wider. + +512 +00:43:23,830 --> 00:43:26,470 +So as C level increases, the confidence becomes + +513 +00:43:26,470 --> 00:43:30,910 +wider. Vice versa, if the C level decreases, + +514 +00:43:32,110 --> 00:43:34,790 +narrower. The confidence interval becomes + +515 +00:43:34,790 --> 00:43:35,230 +narrower. + +516 +00:43:38,070 --> 00:43:41,210 +It's better to have narrower confidence interval. + +517 +00:43:42,970 --> 00:43:46,890 +So again, it's better if you remember these values + +518 +00:43:46,890 --> 00:43:50,690 +for the confidence level and the corresponding Z + +519 +00:43:50,690 --> 00:43:51,070 +value. + +520 +00:43:54,160 --> 00:43:58,980 +So that's the + +521 +00:43:58,980 --> 00:44:01,520 +sea level for different sizes. + +522 +00:44:04,060 --> 00:44:08,580 +This slide shows + +523 +00:44:08,580 --> 00:44:14,900 +that some continents intervals may be containing + +524 +00:44:14,900 --> 00:44:20,160 +mu. So the blue ones is the value of mu. The blue + +525 +00:44:20,160 --> 00:44:25,740 +ones contains mu. The red one does not, because mu + +526 +00:44:25,740 --> 00:44:30,360 +in this case lies outside the confidence interval. + +527 +00:44:30,740 --> 00:44:34,220 +So maybe the confidence interval you have to + +528 +00:44:34,220 --> 00:44:40,620 +construct, it might cover the mu or not. I have to + +529 +00:44:40,620 --> 00:44:43,260 +mention just one point here. It's better to say + +530 +00:44:43,260 --> 00:44:48,320 +that my confidence interval covers mu. So you can + +531 +00:44:48,320 --> 00:44:51,220 +say that I am 95% confident. + +532 +00:44:54,140 --> 00:45:03,080 +that the confidence interval contains mu, contains + +533 +00:45:03,080 --> 00:45:11,080 +the true parameter mu, rather than saying mu lies. + +534 +00:45:16,320 --> 00:45:19,140 +Because Mu is unknown. You cannot say Mu lies in + +535 +00:45:19,140 --> 00:45:22,620 +the confidence. But we can say that 95% we are + +536 +00:45:22,620 --> 00:45:29,000 +sure that my interval contains Mu. So don't say Mu + +537 +00:45:29,000 --> 00:45:32,640 +lies in this interval. So it's better to say that + +538 +00:45:32,640 --> 00:45:38,220 +we are 95% sure that my interval covers, contains + +539 +00:45:38,220 --> 00:45:39,480 +Mu. + +540 +00:45:41,240 --> 00:45:43,840 +Let's do one example. + +541 +00:45:50,590 --> 00:45:56,930 +Here we have a sample of 11 circuits from a large + +542 +00:45:56,930 --> 00:45:57,990 +normal population. + +543 +00:46:00,430 --> 00:46:06,110 +So now we have a random sample of 11. This sample + +544 +00:46:06,110 --> 00:46:09,550 +is selected from normal populations. So the first + +545 +00:46:09,550 --> 00:46:14,810 +assumption is okay. So normality is assumed to be + +546 +00:46:14,810 --> 00:46:21,900 +satisfied. Now this sample has a mean resistance + +547 +00:46:21,900 --> 00:46:27,300 +of 2.2 ohms. So again, a sample of 11 circuits + +548 +00:46:27,300 --> 00:46:30,800 +from a large normal population has a mean + +549 +00:46:30,800 --> 00:46:38,040 +resistance of 2.2 ohms. That means X bar is 2.2. + +550 +00:46:41,900 --> 00:46:49,730 +Ohms is the resistance unit. We know that From + +551 +00:46:49,730 --> 00:46:55,310 +past testing, it means from previous studies, we + +552 +00:46:55,310 --> 00:46:58,450 +know that from past testing that the population + +553 +00:46:58,450 --> 00:47:04,890 +standard deviation is 0.35. So sigma is 0.35. So + +554 +00:47:04,890 --> 00:47:08,810 +sigma is known. So the second assumption is okay. + +555 +00:47:11,110 --> 00:47:15,110 +But again, as we mentioned, Sigma's reality is not + +556 +00:47:15,110 --> 00:47:19,270 +known. But here from past testing, I mean from + +557 +00:47:19,270 --> 00:47:23,350 +previous knowledge, we know that sigma is 1 to the + +558 +00:47:23,350 --> 00:47:31,890 +35. Now the question is determined 95%. So C level + +559 +00:47:31,890 --> 00:47:41,470 +or confidence level is 95%. Determined 95% + +560 +00:47:41,470 --> 00:47:45,190 +confidence interval. For the true mean, true mean + +561 +00:47:45,190 --> 00:47:51,790 +it means population mean, resistance of the + +562 +00:47:51,790 --> 00:47:55,330 +population. So now the information that we have in + +563 +00:47:55,330 --> 00:47:59,010 +this example, we select a random sample of size 11 + +564 +00:47:59,010 --> 00:48:03,430 +from a normal population, so normality is assumed + +565 +00:48:03,430 --> 00:48:09,330 +to be satisfied. This sample gives mean resistance + +566 +00:48:09,330 --> 00:48:15,110 +of 2.2 and we know that. The standard deviation of + +567 +00:48:15,110 --> 00:48:19,170 +the population is given by 0.35, and the question + +568 +00:48:19,170 --> 00:48:24,250 +is to determine 95% confidence interval for the + +569 +00:48:24,250 --> 00:48:28,590 +true mean resistance of the approximation. So this + +570 +00:48:28,590 --> 00:48:29,690 +is the information we have. + +571 +00:48:32,570 --> 00:48:36,390 +Now straightforward calculations will give this + +572 +00:48:36,390 --> 00:48:39,590 +result. X bar plus or minus Z alpha over 2 sigma + +573 +00:48:39,590 --> 00:48:40,230 +over root N. + +574 +00:48:43,220 --> 00:48:49,200 +2.2 plus or minus 1.96 times sigma, which is 0.35 + +575 +00:48:49,200 --> 00:48:53,420 +divided by root 11. This will give 2.2 plus or + +576 +00:48:53,420 --> 00:48:58,660 +minus this amount. And as mentioned before, this + +577 +00:48:58,660 --> 00:49:02,760 +amount is the margin term. So just subtract this + +578 +00:49:02,760 --> 00:49:08,900 +value from 2.2, you will get 1.9932. And add this + +579 +00:49:08,900 --> 00:49:12,630 +value to 2.2, you will get this result. So Mu + +580 +00:49:12,630 --> 00:49:16,770 +greater than or equal to 1.99 all the way up to 2 + +581 +00:49:16,770 --> 00:49:19,650 +.4068. + +582 +00:49:23,950 --> 00:49:29,010 +So that's the calculations we have. So Mu is + +583 +00:49:29,010 --> 00:49:31,390 +between 95 percent. + +584 +00:49:34,830 --> 00:49:39,450 +Now let's see our interpretation for this result. + +585 +00:49:40,680 --> 00:49:43,260 +So again, straightforward calculations will give + +586 +00:49:43,260 --> 00:49:47,380 +this + +587 +00:49:47,380 --> 00:49:53,200 +result. Now, the interpretation, you should write + +588 +00:49:53,200 --> 00:49:58,260 +the following. We are 95% confident that the true + +589 +00:49:58,260 --> 00:50:04,120 +mean resistance is between these two values. Just + +590 +00:50:04,120 --> 00:50:07,940 +saying, we are 95% sure that the true mean + +591 +00:50:07,940 --> 00:50:11,920 +resistance is between these two values. Although + +592 +00:50:11,920 --> 00:50:16,720 +the true mean may or may not be in this interval, + +593 +00:50:17,640 --> 00:50:24,900 +but we are 95% of the intervals from form in this + +594 +00:50:24,900 --> 00:50:29,660 +manner will contain the true mean. So again, you + +595 +00:50:29,660 --> 00:50:34,320 +don't know exactly if the true mean lies in the + +596 +00:50:34,320 --> 00:50:37,980 +interval, but you could say that 95% of the + +597 +00:50:37,980 --> 00:50:43,360 +intervals formed in this way will contain the true + +598 +00:50:43,360 --> 00:50:50,780 +mean. So that's all for confidence estimation for + +599 +00:50:50,780 --> 00:50:54,840 +the population mean immune. Any questions? + +600 +00:50:59,640 --> 00:51:04,520 +Later, next time, inshallah, we'll talk about The + +601 +00:51:04,520 --> 00:51:10,500 +confidence interval when sigma is unknown. I mean, + +602 +00:51:13,740 --> 00:51:19,400 +do you ever truly know sigma? May not. In + +603 +00:51:19,400 --> 00:51:23,840 +virtually all real world business situations, + +604 +00:51:24,440 --> 00:51:31,800 +sigma is not known. If there is a situation, Where + +605 +00:51:31,800 --> 00:51:35,620 +Sigma is known, then Mu is also known. Because + +606 +00:51:35,620 --> 00:51:40,840 +since to calculate Mu, you need to know Mu. In + +607 +00:51:40,840 --> 00:51:44,860 +order to calculate Sigma, you should know the + +608 +00:51:44,860 --> 00:51:45,560 +value of Mu. + +609 +00:51:48,600 --> 00:51:52,640 +Finally, if you truly know Mu, if you know the + +610 +00:51:52,640 --> 00:51:59,680 +value of Mu, there would be no need to gather a + +611 +00:51:59,680 --> 00:52:00,560 +sample to estimate. + +612 +00:52:04,050 --> 00:52:07,610 +The value of the mean is given. You have to stop + +613 +00:52:07,610 --> 00:52:11,530 +because you don't need to select a random sample + +614 +00:52:11,530 --> 00:52:17,690 +in order to estimate the population mean. And + +615 +00:52:17,690 --> 00:52:23,550 +again, in real life, sigma is unknown. So next + +616 +00:52:23,550 --> 00:52:28,190 +time, we'll talk about confidence interval for mu + +617 +00:52:28,190 --> 00:52:34,500 +when sigma is unknown. So that's all. Fourth day. diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/_OR2C5YaRyM_postprocess.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/_OR2C5YaRyM_postprocess.srt new file mode 100644 index 0000000000000000000000000000000000000000..5c97ba7a9205db59ed81d1b33e6fdc8fa8b49877 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/_OR2C5YaRyM_postprocess.srt @@ -0,0 +1,2468 @@ +1 +00:00:07,000 --> 00:00:12,340 +So again a point estimate is just a single number + +2 +00:00:12,340 --> 00:00:18,080 +or just a value. For the interval estimation in + +3 +00:00:18,080 --> 00:00:21,400 +this case you have more information than using a + +4 +00:00:21,400 --> 00:00:24,200 +point estimate. So confidence interval provides + +5 +00:00:24,200 --> 00:00:27,800 +additional information about the spread of the or + +6 +00:00:27,800 --> 00:00:31,240 +the variability of the estimate. As I mentioned + +7 +00:00:31,240 --> 00:00:35,210 +before There are two parameters of interest in + +8 +00:00:35,210 --> 00:00:38,010 +this chapter. The first one is the population mean + +9 +00:00:38,010 --> 00:00:42,110 +mu. In this case, we can estimate the population + +10 +00:00:42,110 --> 00:00:47,850 +mean mu by using the sample mean x bar. The other + +11 +00:00:47,850 --> 00:00:51,650 +parameter is the proportion. The population + +12 +00:00:51,650 --> 00:00:55,850 +proportion is pi. And the point estimates P. So P + +13 +00:00:55,850 --> 00:00:58,410 +is a point estimate of the population proportion + +14 +00:00:58,410 --> 00:01:04,030 +pi. So again, this chapter talks about confidence + +15 +00:01:04,030 --> 00:01:08,630 +interval estimation. And as we mentioned that + +16 +00:01:08,630 --> 00:01:10,790 +there are two types of estimation. One is called + +17 +00:01:10,790 --> 00:01:17,810 +point estimation. And for example, the average or + +18 +00:01:17,810 --> 00:01:22,070 +the mean of age is 18 years. So 18 is the point + +19 +00:01:22,070 --> 00:01:26,750 +estimate. On the other hand, the other type of the + +20 +00:01:26,750 --> 00:01:29,690 +confidence is the confidence interval estimation. + +21 +00:01:30,810 --> 00:01:33,790 +So rather than saying that the average is 18 + +22 +00:01:33,790 --> 00:01:39,270 +years, you could say that we are 95% sure that the + +23 +00:01:39,270 --> 00:01:46,510 +average of student ages range between 17 up to 19 + +24 +00:01:46,510 --> 00:01:49,390 +years. And again, the confidence interval gives + +25 +00:01:49,390 --> 00:01:53,550 +more information about the variability of the + +26 +00:01:53,550 --> 00:01:56,710 +data. And again, there are two types of parameters + +27 +00:01:56,710 --> 00:01:58,910 +we are interested in. One is called the population + +28 +00:01:58,910 --> 00:02:02,050 +mean, and the point estimate is the sample mean. + +29 +00:02:03,070 --> 00:02:05,650 +The other parameter is the population of a portion + +30 +00:02:05,650 --> 00:02:09,310 +of Y, and the point estimate is P. + +31 +00:02:13,250 --> 00:02:16,830 +So again, An interval estimate provides more + +32 +00:02:16,830 --> 00:02:20,490 +information about a population characteristic than + +33 +00:02:20,490 --> 00:02:23,210 +does a point estimate. Because if you go back a + +34 +00:02:23,210 --> 00:02:25,510 +little bit to the previous example, here we + +35 +00:02:25,510 --> 00:02:28,650 +calculated the sample mean to be with just one + +36 +00:02:28,650 --> 00:02:31,050 +point. So the average of your reagents is 18 + +37 +00:02:31,050 --> 00:02:36,590 +years. But the other one, I am sure that for 95% + +38 +00:02:36,590 --> 00:02:40,710 +that your ages range between, for example, 17 and + +39 +00:02:40,710 --> 00:02:43,230 +19. So in this case, we have more information. + +40 +00:02:43,870 --> 00:02:46,190 +Such interval estimates are called confidence + +41 +00:02:46,190 --> 00:02:49,470 +intervals. And we are going to use this notation, + +42 +00:02:49,670 --> 00:02:56,750 +CI. CI stands for confidence interval estimation. + +43 +00:03:02,350 --> 00:03:05,810 +An interval gives a range of values, because here + +44 +00:03:05,810 --> 00:03:09,910 +we have a range from, for example, 17 up to 19. In + +45 +00:03:09,910 --> 00:03:12,750 +this case, this interval takes into consideration + +46 +00:03:12,750 --> 00:03:15,930 +variation in samples that intersects from sample + +47 +00:03:15,930 --> 00:03:19,610 +to sample. For example, suppose we have a huge + +48 +00:03:19,610 --> 00:03:22,330 +population, and from that huge population, we + +49 +00:03:22,330 --> 00:03:27,430 +select many samples of the same size. Maybe the + +50 +00:03:27,430 --> 00:03:32,790 +first sample, for example, of size 15. gives + +51 +00:03:32,790 --> 00:03:37,170 +average of 17. + +52 +00:03:38,930 --> 00:03:43,470 +Maybe another sample of the same size might give + +53 +00:03:43,470 --> 00:03:49,330 +different sample mean, 18 and so on. Suppose we + +54 +00:03:49,330 --> 00:03:54,770 +select 100 samples. So here we have different + +55 +00:03:54,770 --> 00:03:59,450 +sample means. For sure we should have different + +56 +00:04:01,140 --> 00:04:06,580 +confidence intervals. Maybe the first one starts + +57 +00:04:06,580 --> 00:04:10,500 +from 16 to 18, the other one maybe 15 to 17 and so + +58 +00:04:10,500 --> 00:04:14,320 +on. So the confidence interval takes into + +59 +00:04:14,320 --> 00:04:17,420 +consideration variation in sample statistics from + +60 +00:04:17,420 --> 00:04:20,820 +sample to sample. But the confidence interval we + +61 +00:04:20,820 --> 00:04:27,020 +have to construct based on observations just from + +62 +00:04:27,020 --> 00:04:30,940 +one sample. I mean, you don't need to select too + +63 +00:04:30,940 --> 00:04:33,920 +many random samples in order to construct a + +64 +00:04:33,920 --> 00:04:36,920 +confidence interval. Just select one sample, one + +65 +00:04:36,920 --> 00:04:39,800 +random sample, and from that sample, we can + +66 +00:04:39,800 --> 00:04:44,580 +construct a confidence interval. So my confidence + +67 +00:04:44,580 --> 00:04:50,120 +interval can be constructed by using only one + +68 +00:04:50,120 --> 00:04:53,860 +simple sample. From that sample, we can compute + +69 +00:04:53,860 --> 00:04:59,770 +the average. Now, this interval gives information + +70 +00:04:59,770 --> 00:05:02,990 +about closeness to unknown population parameters. + +71 +00:05:03,490 --> 00:05:08,530 +For example, when we are saying weights range + +72 +00:05:08,530 --> 00:05:13,650 +between, for example, 60 kilogram up to 80, and + +73 +00:05:13,650 --> 00:05:19,530 +you are sure about 90%. So that means the average + +74 +00:05:19,530 --> 00:05:27,430 +weight range between 60 to 80. That means the true + +75 +00:05:27,430 --> 00:05:34,190 +parameter mu is close to these values. So it's + +76 +00:05:34,190 --> 00:05:36,930 +between 6 to 80. Now suppose in this case I am + +77 +00:05:36,930 --> 00:05:45,570 +interested in 95%. Now suppose someone else + +78 +00:05:45,570 --> 00:05:50,570 +interested in 99% confidence. So do you think the + +79 +00:05:50,570 --> 00:05:55,170 +confidence interval for more confident to be wider + +80 +00:05:55,170 --> 00:06:01,770 +or narrower. As the confidence gets larger or + +81 +00:06:01,770 --> 00:06:08,230 +bigger, the interval gets wider. So maybe from 55 + +82 +00:06:08,230 --> 00:06:15,370 +up to 85. Another example, suppose we are + +83 +00:06:15,370 --> 00:06:20,090 +interested in the age of students. + +84 +00:06:23,330 --> 00:06:30,430 +80% confident that your + +85 +00:06:30,430 --> 00:06:32,950 +age is between, or your weights, let's talk about + +86 +00:06:32,950 --> 00:06:41,370 +weights, between 70 up to 75 kilograms, 80%. 90% + +87 +00:06:41,370 --> 00:06:51,410 +could be 60 to 80 kilograms, or 65, 75 kilograms + +88 +00:06:51,410 --> 00:06:57,950 +to 80 kilograms. 95%, for example, 60, for + +89 +00:06:57,950 --> 00:07:08,130 +example, 60 to 85, 99% from 58 up to 90. Which one + +90 +00:07:08,130 --> 00:07:10,730 +is best? Which is the best confidence interval + +91 +00:07:10,730 --> 00:07:16,470 +among all of these? To say that we are 80% at 70 + +92 +00:07:16,470 --> 00:07:25,760 +to 75, 65 to 80, 60 to 85, 85 to 90. Which one is + +93 +00:07:25,760 --> 00:07:32,160 +better? Now when we are saying from 70 to 75 it + +94 +00:07:32,160 --> 00:07:36,400 +means the interval width is smaller or maybe the + +95 +00:07:36,400 --> 00:07:40,040 +smallest among the others. So this one is the best + +96 +00:07:40,040 --> 00:07:42,860 +confidence interval because the error here is just + +97 +00:07:42,860 --> 00:07:47,930 +5. Now here you can expect your weight. But for + +98 +00:07:47,930 --> 00:07:50,950 +the other scenarios here, it's very hard to say. + +99 +00:07:51,070 --> 00:07:54,690 +Now suppose I am saying that your score in + +100 +00:07:54,690 --> 00:07:59,110 +statistics is between 40 up to 100. + +101 +00:08:06,970 --> 00:08:15,170 +It's too large. Suppose I am saying that with I am + +102 +00:08:15,170 --> 00:08:23,120 +sure. with 99.7% that your score in statistics + +103 +00:08:23,120 --> 00:08:30,180 +lies between 50 to + +104 +00:08:30,180 --> 00:08:34,620 +100. And my question is, can you expect your + +105 +00:08:34,620 --> 00:08:38,660 +score? It's very difficult because the range is + +106 +00:08:38,660 --> 00:08:44,630 +too long from 50 up to 100. Because here I am + +107 +00:08:44,630 --> 00:08:49,650 +maybe 99.7%, so I'm almost sure that your score is + +108 +00:08:49,650 --> 00:08:54,510 +between 50 and 100. But suppose someone else says + +109 +00:08:54,510 --> 00:08:58,890 +that I am 95% that your score ranges between 60 to + +110 +00:08:58,890 --> 00:09:01,850 +90. It's still large, but it's better than the + +111 +00:09:01,850 --> 00:09:09,430 +previous one. One else says I am 90% sure that + +112 +00:09:09,430 --> 00:09:15,620 +your score lies between 70 to 85. Another one + +113 +00:09:15,620 --> 00:09:22,000 +might be saying 85% of your score starts from 74 + +114 +00:09:22,000 --> 00:09:29,840 +up to 80. Now here the range is small, so I can + +115 +00:09:29,840 --> 00:09:35,280 +predict my scores just between 74 to 80. So we + +116 +00:09:35,280 --> 00:09:38,660 +like to have confidence interval to be small as + +117 +00:09:38,660 --> 00:09:43,380 +much as possible. So that's for the range. So in + +118 +00:09:43,380 --> 00:09:47,980 +this case, The true parameter is very close to one + +119 +00:09:47,980 --> 00:09:49,180 +of these values. + +120 +00:09:51,720 --> 00:09:55,200 +Always the confidence interval is stated in terms + +121 +00:09:55,200 --> 00:10:00,120 +of level of confidence. The common ones are either + +122 +00:10:00,120 --> 00:10:05,760 +90%, 95 + +123 +00:10:05,760 --> 00:10:11,550 +% or 99%. So these are the common levels of + +124 +00:10:11,550 --> 00:10:12,230 +confidence. + +125 +00:10:21,230 --> 00:10:23,510 +Next slide, Inshallah, will turn to the other + +126 +00:10:23,510 --> 00:10:25,910 +side. Now, for example, + +127 +00:10:38,570 --> 00:10:46,350 +For example, serial example, suppose we know that + +128 +00:10:46,350 --> 00:10:53,130 +the population mean mu is given to be 368 and + +129 +00:10:53,130 --> 00:10:56,610 +sigma is 15. Suppose we know the population mean + +130 +00:10:56,610 --> 00:11:00,030 +and the population summation. In reality, if these + +131 +00:11:00,030 --> 00:11:03,250 +two parameters are unknown, we don't need to + +132 +00:11:03,250 --> 00:11:06,140 +select a random sample. Because we are selecting + +133 +00:11:06,140 --> 00:11:08,800 +the samples in order to estimate these unknown + +134 +00:11:08,800 --> 00:11:11,960 +parameters. But this example is just for + +135 +00:11:11,960 --> 00:11:18,540 +illustration. So again, a mu in reality is not + +136 +00:11:18,540 --> 00:11:22,020 +given, is unknown, as well as the standard + +137 +00:11:22,020 --> 00:11:25,020 +deviation. But suppose from previous studies we + +138 +00:11:25,020 --> 00:11:30,450 +know that The population mean is given by 368, and + +139 +00:11:30,450 --> 00:11:33,350 +the standard deviation for the population is about + +140 +00:11:33,350 --> 00:11:37,110 +15. Suppose we know this information from the + +141 +00:11:37,110 --> 00:11:42,350 +history or from the previous studies. Now suppose + +142 +00:11:42,350 --> 00:11:46,190 +we take a random sample of size 25, and this + +143 +00:11:46,190 --> 00:11:52,670 +sample gives the following information. So here, + +144 +00:11:53,150 --> 00:12:00,480 +we have mean of 68 sigma of 15. And let's see how + +145 +00:12:00,480 --> 00:12:02,800 +can we construct the confidence interval as we + +146 +00:12:02,800 --> 00:12:07,000 +mentioned in the previous lectures. If you + +147 +00:12:07,000 --> 00:12:11,020 +remember the score, the x bar minus mu divided by + +148 +00:12:11,020 --> 00:12:16,620 +sigma over root n. In this case, let's see how can + +149 +00:12:16,620 --> 00:12:21,120 +we compute x bar from this equation. So z equals x + +150 +00:12:21,120 --> 00:12:23,340 +bar minus mu divided by sigma over root n, just + +151 +00:12:23,340 --> 00:12:26,840 +cross multiplication, you will get x bar to be mu + +152 +00:12:26,840 --> 00:12:32,000 +plus z sigma over root n. So this is the value of + +153 +00:12:32,000 --> 00:12:35,180 +x bar, mu plus z sigma over root n, just cross + +154 +00:12:35,180 --> 00:12:38,520 +multiplication. Now the value of z could be + +155 +00:12:38,520 --> 00:12:40,600 +positive or negative, it depends on the direction + +156 +00:12:40,600 --> 00:12:46,000 +of the z score you have. So if z score lies in the + +157 +00:12:46,000 --> 00:12:46,680 +left side, + +158 +00:12:50,290 --> 00:12:53,210 +The other one is positive. Because here we are + +159 +00:12:53,210 --> 00:12:55,590 +talking about confidence interval, ranges from + +160 +00:12:55,590 --> 00:12:59,430 +smallest value to the largest one. So z-score is + +161 +00:12:59,430 --> 00:13:04,150 +negative, so x-bar plus equals mu plus or minus z + +162 +00:13:04,150 --> 00:13:08,490 +-sigma over root. Now let's imagine that the + +163 +00:13:08,490 --> 00:13:14,250 +population mean is 368 and sigma is 15 and we + +164 +00:13:14,250 --> 00:13:18,350 +select a random sample of 15 I'm sorry of 25 let's + +165 +00:13:18,350 --> 00:13:22,210 +see the range or the values of x bar might be + +166 +00:13:22,210 --> 00:13:28,050 +taken so x bar equals 368 + +167 +00:13:28,050 --> 00:13:35,760 +plus or minus now for 95 percent The corresponding + +168 +00:13:35,760 --> 00:13:42,520 +z value is 1.96, if you remember that. And the + +169 +00:13:42,520 --> 00:13:45,320 +other one is negative or plus, it depends on the + +170 +00:13:45,320 --> 00:13:49,220 +direction of the z-scope. So plus or minus 1.96. + +171 +00:13:51,440 --> 00:13:56,120 +This value can be computed or found by using the + +172 +00:13:56,120 --> 00:14:02,700 +normal table. Times sigma 15 divided by root 25. + +173 +00:14:05,870 --> 00:14:13,430 +Just simple calculation will give 362, 373.8. Now, + +174 +00:14:14,270 --> 00:14:21,210 +this interval, which ranges from 362 up to 373, + +175 +00:14:21,650 --> 00:14:27,870 +contains 95% of the sample means. Suppose we have + +176 +00:14:27,870 --> 00:14:32,050 +100 sample means with different values, you can + +177 +00:14:32,050 --> 00:14:36,990 +say that 95% out of these 100. will contain the + +178 +00:14:36,990 --> 00:14:42,710 +same meaning. That if a Mu is given. But again, in + +179 +00:14:42,710 --> 00:14:47,490 +real life, you don't know the value of Mu. So when + +180 +00:14:47,490 --> 00:14:53,110 +you don't know Mu, you can use X bar to estimate + +181 +00:14:53,110 --> 00:14:59,110 +Mu. Now suppose on the other hand, here Mu is + +182 +00:14:59,110 --> 00:14:59,490 +unknown. + +183 +00:15:03,180 --> 00:15:08,360 +And we estimate mu by x bar, the sample mean. And + +184 +00:15:08,360 --> 00:15:14,480 +suppose the sample size 25 gives sample mean of + +185 +00:15:14,480 --> 00:15:21,940 +362.3. So this sample mean by using a sample size + +186 +00:15:21,940 --> 00:15:27,900 +of 25. Now let's see how can we construct the + +187 +00:15:27,900 --> 00:15:31,550 +confidence interval in this case. Now x bar is + +188 +00:15:31,550 --> 00:15:35,970 +given, so my interval should be mu equals x bar + +189 +00:15:35,970 --> 00:15:41,650 +plus or minus z sigma over root n. Just replace + +190 +00:15:41,650 --> 00:15:47,610 +this mu by x bar, you will get. So again, sample + +191 +00:15:47,610 --> 00:15:56,150 +of size 25 gives sample mean of 362.12. + +192 +00:15:56,610 --> 00:16:02,750 +plus or minus 196, sigma divided by root 25. This + +193 +00:16:02,750 --> 00:16:09,570 +interval could be new ranges between these two + +194 +00:16:09,570 --> 00:16:20,230 +values, 356 approximately, 0.42 up to the 68th + +195 +00:16:20,230 --> 00:16:26,080 +point. Now look at this interval. We know the true + +196 +00:16:26,080 --> 00:16:28,880 +mean equals 368. + +197 +00:16:32,240 --> 00:16:38,580 +We got this interval for x bar. Now if mu is not + +198 +00:16:38,580 --> 00:16:43,000 +given, is unknown, based on the sample we have of + +199 +00:16:43,000 --> 00:16:49,020 +size 25, we got x bar to be 362, and we got the + +200 +00:16:49,020 --> 00:16:56,100 +interval Four mu lies between 356 up to 368. So + +201 +00:16:56,100 --> 00:16:59,220 +this case, you can say that I am 95% sure that + +202 +00:16:59,220 --> 00:17:03,860 +this interval captures or contains the population + +203 +00:17:03,860 --> 00:17:06,980 +mean. But you don't know exactly if this interval + +204 +00:17:06,980 --> 00:17:10,340 +contains or not contain the population mean mu, + +205 +00:17:10,400 --> 00:17:15,100 +because mu is unknown. So the first scenario here, + +206 +00:17:16,060 --> 00:17:20,550 +if mu is given, is unknown. You can say that 95% + +207 +00:17:20,550 --> 00:17:28,610 +of the samples we have contain the sample means + +208 +00:17:28,610 --> 00:17:31,530 +that lie between these two values. But the + +209 +00:17:31,530 --> 00:17:35,590 +reality, as I mentioned before, mu is not given. + +210 +00:17:36,250 --> 00:17:40,250 +So we can take a random sample. And from that + +211 +00:17:40,250 --> 00:17:43,310 +sample, we can compute x bar. Then we can figure + +212 +00:17:43,310 --> 00:17:47,050 +out that mu ranges between these two values. But + +213 +00:17:47,050 --> 00:17:50,710 +here, you cannot say that this interval contains + +214 +00:17:50,710 --> 00:17:57,210 +MU 100%. But you can say that 95% of random + +215 +00:17:57,210 --> 00:18:03,910 +samples like this one will contain MU with 95%. So + +216 +00:18:03,910 --> 00:18:09,610 +that's the logic for the confidence interval. But + +217 +00:18:09,610 --> 00:18:14,450 +again, if we select many, many samples, We will + +218 +00:18:14,450 --> 00:18:21,170 +get different sample means, for sure. Now, suppose + +219 +00:18:21,170 --> 00:18:26,750 +I have 1000 students. I select, for example, 50 + +220 +00:18:26,750 --> 00:18:31,870 +samples, each one of size 30. Maybe the first + +221 +00:18:31,870 --> 00:18:38,010 +sample will give average of this value, 362. And I + +222 +00:18:38,010 --> 00:18:42,810 +will end with this confidence limits, lower limit + +223 +00:18:42,810 --> 00:18:51,270 +356, upper limit 368. I know that from previous + +224 +00:18:51,270 --> 00:18:56,630 +studies that the population means 368. So just ask + +225 +00:18:56,630 --> 00:19:00,530 +this question, does this interval containing + +226 +00:19:02,980 --> 00:19:09,220 +The range from 356 up to 368.13 covers mu. So in + +227 +00:19:09,220 --> 00:19:14,280 +this case, this interval continues. Maybe someone + +228 +00:19:14,280 --> 00:19:19,200 +select another sample of the same size, and that + +229 +00:19:19,200 --> 00:19:25,870 +sample gives sample mean of 369.5. By using this + +230 +00:19:25,870 --> 00:19:29,510 +equation, you can figure out lower limits and + +231 +00:19:29,510 --> 00:19:32,950 +upper limits. Now, again, I ask you still this + +232 +00:19:32,950 --> 00:19:36,670 +question. Does this interval contain a mu? The + +233 +00:19:36,670 --> 00:19:39,890 +answer is yes, because 368 lies between these two + +234 +00:19:39,890 --> 00:19:43,450 +values. So the first one contains a mu. The second + +235 +00:19:43,450 --> 00:19:47,010 +one contains mu. Look at the third symbol. Suppose + +236 +00:19:47,010 --> 00:19:51,210 +from my symbol, I got symbol mu to be 360. So + +237 +00:19:51,210 --> 00:19:57,240 +lower and upper limits are? 354 and upper limit + +238 +00:19:57,240 --> 00:20:06,640 +365.88 now this value 368 is outside of this + +239 +00:20:06,640 --> 00:20:11,040 +interval so maybe your interval will continue mu + +240 +00:20:11,040 --> 00:20:14,240 +or will not continue mu but you don't know + +241 +00:20:14,240 --> 00:20:17,980 +actually the value of mu so in this case you + +242 +00:20:17,980 --> 00:20:22,400 +cannot say my interval will continue mu But here, + +243 +00:20:22,460 --> 00:20:25,720 +if we know mu, you can say that this interval + +244 +00:20:25,720 --> 00:20:29,860 +contains mu, or the other one does not contain mu. + +245 +00:20:30,820 --> 00:20:35,120 +But in the real world, you don't know the value of + +246 +00:20:35,120 --> 00:20:40,060 +mu. So mu is unknown. But you can say that I am 95 + +247 +00:20:40,060 --> 00:20:45,220 +% sure that this interval will continue. Make + +248 +00:20:45,220 --> 00:20:48,260 +sense? Based on the x dot? Based on the sample + +249 +00:20:48,260 --> 00:20:54,870 +mean. And the other information we have. sigma and + +250 +00:20:54,870 --> 00:20:55,170 +N. + +251 +00:20:58,610 --> 00:21:03,310 +Again, in practice, you only take one sample of + +252 +00:21:03,310 --> 00:21:05,290 +size N, so you don't need to take many, many + +253 +00:21:05,290 --> 00:21:08,750 +samples, just take one sample. In practice, you + +254 +00:21:08,750 --> 00:21:13,150 +don't know Mu, so you don't know if the interval + +255 +00:21:13,150 --> 00:21:14,690 +actually contains Mu. + +256 +00:21:18,890 --> 00:21:23,640 +However, you do know that 95% of the intervals + +257 +00:21:23,640 --> 00:21:27,460 +formed in this manner will contain mu. So again, + +258 +00:21:28,500 --> 00:21:31,220 +any interval can be constructed in this chapter + +259 +00:21:31,220 --> 00:21:37,500 +one of these. You cannot say that this interval + +260 +00:21:37,500 --> 00:21:42,460 +contains mu. You have to say that 95% of the + +261 +00:21:42,460 --> 00:21:46,360 +intervals formed in this way will capture the + +262 +00:21:46,360 --> 00:21:47,780 +population parameter mu. + +263 +00:21:51,490 --> 00:21:57,070 +Another one, thus based on one sample, you + +264 +00:21:57,070 --> 00:22:02,190 +actually selected you can be 95% confident your + +265 +00:22:02,190 --> 00:22:06,240 +interval will continue. So again, this interval + +266 +00:22:06,240 --> 00:22:12,820 +might be or might not cover Mu. Since I don't know + +267 +00:22:12,820 --> 00:22:16,000 +the value of Mu, I cannot say that this interval + +268 +00:22:16,000 --> 00:22:20,440 +will continue Mu. But you can say that 95% of the + +269 +00:22:20,440 --> 00:22:24,880 +intervals constructed in this way will continue + +270 +00:22:24,880 --> 00:22:28,800 +Mu. That's the interpretation of the confidence + +271 +00:22:28,800 --> 00:22:29,300 +interval. + +272 +00:22:32,180 --> 00:22:38,000 +99, 95, whatever it is. Now, the process of the + +273 +00:22:38,000 --> 00:22:43,960 +estimation is, we have a huge population, and + +274 +00:22:43,960 --> 00:22:47,420 +suppose we are interested in the population mean + +275 +00:22:47,420 --> 00:22:54,700 +mu, and mu is not given or is unknown. We may + +276 +00:22:54,700 --> 00:22:58,460 +select a random sample from this population with + +277 +00:22:58,460 --> 00:23:03,140 +any size. From this sample, we can compute the + +278 +00:23:03,140 --> 00:23:07,520 +average or the mean. Then after that, we can + +279 +00:23:07,520 --> 00:23:10,920 +construct the confidence sample. For example, + +280 +00:23:11,080 --> 00:23:15,420 +maybe I'm saying that I am 95% confident that mu + +281 +00:23:15,420 --> 00:23:19,580 +is between 4 and 16. So the process of estimation, + +282 +00:23:20,020 --> 00:23:22,980 +select a random sample from your population. This + +283 +00:23:22,980 --> 00:23:25,680 +sample should be representative in order to + +284 +00:23:25,680 --> 00:23:27,880 +generalize your results. Otherwise, you cannot do + +285 +00:23:27,880 --> 00:23:32,630 +that. So this sample should be representative. And + +286 +00:23:32,630 --> 00:23:36,370 +we, in chapter seven, we discussed four types of + +287 +00:23:36,370 --> 00:23:39,210 +probability sampling techniques. Simple random + +288 +00:23:39,210 --> 00:23:46,330 +samples, systematic, cluster, and stratified. So + +289 +00:23:46,330 --> 00:23:48,490 +by using this sample, we can compute, for example, + +290 +00:23:48,610 --> 00:23:51,750 +the sample mean. Then after that, I will show how + +291 +00:23:51,750 --> 00:23:56,470 +can we do or construct or build the confidence + +292 +00:23:56,470 --> 00:24:02,140 +interval. So this is the estimation process. Now, + +293 +00:24:02,620 --> 00:24:06,120 +the general formula for all confidence intervals + +294 +00:24:06,120 --> 00:24:07,340 +is, + +295 +00:24:10,120 --> 00:24:12,940 +the general formula has mainly three components, + +296 +00:24:14,060 --> 00:24:20,980 +point estimate, plus or minus, critical value, + +297 +00:24:22,080 --> 00:24:25,480 +times standard error. Let's see the definition for + +298 +00:24:25,480 --> 00:24:30,960 +each component. The first one, point estimate. is + +299 +00:24:30,960 --> 00:24:33,880 +the sample statistic estimating this population + +300 +00:24:33,880 --> 00:24:36,920 +parameter of interest. For example, if you go back + +301 +00:24:36,920 --> 00:24:45,040 +to the previous age example, and suppose X bar is + +302 +00:24:45,040 --> 00:24:50,940 +18. So you can say that the point estimate, since + +303 +00:24:50,940 --> 00:24:55,820 +18 is the sample mean, and we are interested in + +304 +00:24:55,820 --> 00:25:03,850 +the population mean, so 18 is a point estimate + +305 +00:25:03,850 --> 00:25:10,270 +for mu. This is the first one, this is the first + +306 +00:25:10,270 --> 00:25:13,250 +component. So a point estimate is the simplest + +307 +00:25:13,250 --> 00:25:15,370 +statistic, x bar is the simplest statistic. + +308 +00:25:16,570 --> 00:25:19,010 +Estimating the population parameter of interest, I + +309 +00:25:19,010 --> 00:25:22,450 +am interested suppose in the population mean, mu. + +310 +00:25:24,750 --> 00:25:26,070 +So we have estimate. + +311 +00:25:29,450 --> 00:25:32,530 +A plus or minus, a critical value. The critical + +312 +00:25:32,530 --> 00:25:36,590 +value here is a table value. That means we have to + +313 +00:25:36,590 --> 00:25:40,390 +go back to the normal table again, the one we had + +314 +00:25:40,390 --> 00:25:44,190 +discussed in Chapter 6. A critical value is a + +315 +00:25:44,190 --> 00:25:47,470 +table value based on the sampling distribution in + +316 +00:25:47,470 --> 00:25:50,750 +Chapter 7 of the point estimate and the desired + +317 +00:25:50,750 --> 00:25:55,430 +confidence interval. For example, let's talk about + +318 +00:25:55,430 --> 00:25:58,110 +again 90%. + +319 +00:26:02,680 --> 00:26:08,380 +Confidence is in this case. This area represents + +320 +00:26:08,380 --> 00:26:13,460 +90%. The area from lower limit to upper limit in + +321 +00:26:13,460 --> 00:26:18,580 +this area. Now again by symmetric distribution we + +322 +00:26:18,580 --> 00:26:23,720 +know that the remaining 10% is split into two + +323 +00:26:23,720 --> 00:26:29,670 +halves. 5% to the right and 5% to the left. Now + +324 +00:26:29,670 --> 00:26:33,330 +the critical value in this case is the Z-score. + +325 +00:26:35,210 --> 00:26:37,450 +Now by using the normal table, the standardized + +326 +00:26:37,450 --> 00:26:41,290 +normal table, you can figure out Z in this case is + +327 +00:26:41,290 --> 00:26:47,510 +negative. Look at the table. Look at 5%. + +328 +00:26:57,940 --> 00:27:03,560 +The value is minus 1.645, the other one is 1.645. + +329 +00:27:07,060 --> 00:27:11,460 +So that's the critical value, plus or minus a + +330 +00:27:11,460 --> 00:27:15,980 +critical value. Again, the table, if you are + +331 +00:27:15,980 --> 00:27:16,560 +looking at + +332 +00:27:28,430 --> 00:27:38,070 +You will get this value. Minus 1.6 under 4, under + +333 +00:27:38,070 --> 00:27:41,910 +5. Here we have 0045, + +334 +00:27:42,770 --> 00:27:45,750 +0055. + +335 +00:27:47,590 --> 00:27:53,030 +We have the chip. Again look at the angle of this + +336 +00:27:53,030 --> 00:27:53,270 +one. + +337 +00:28:07,280 --> 00:28:09,480 +05, 05. + +338 +00:28:36,870 --> 00:28:40,330 +So again, minus 1.6 under 4. + +339 +00:28:44,250 --> 00:28:45,230 +0505. + +340 +00:29:04,750 --> 00:29:08,970 +So your answer could be either one of these + +341 +00:29:08,970 --> 00:29:10,710 +negative one point + +342 +00:29:16,450 --> 00:29:26,670 +So in this case your z score minus 1.64 or minus 1 + +343 +00:29:26,670 --> 00:29:33,930 +.65 or the average of these two values minus 1 + +344 +00:29:33,930 --> 00:29:40,780 +.645. It's better to use this one. So for 90%, 5% + +345 +00:29:40,780 --> 00:29:43,980 +to the right, 5% to the left, just use the normal + +346 +00:29:43,980 --> 00:29:46,660 +table, you will get these two values. So this is + +347 +00:29:46,660 --> 00:29:48,600 +the critical value. Time. + +348 +00:29:52,820 --> 00:29:55,900 +The standard error means the standard deviation of + +349 +00:29:55,900 --> 00:29:56,740 +the point estimate. + +350 +00:30:00,900 --> 00:30:04,020 +So the general formula for all confidence + +351 +00:30:04,020 --> 00:30:06,840 +intervals is given by this equation. Point + +352 +00:30:06,840 --> 00:30:11,730 +estimate. a plus or minus critical value times + +353 +00:30:11,730 --> 00:30:15,030 +standard error. For example, point estimate is x4, + +354 +00:30:16,050 --> 00:30:20,110 +the critical value is z-score, times the standard + +355 +00:30:20,110 --> 00:30:25,890 +error of the point estimate. So times this SE is + +356 +00:30:25,890 --> 00:30:29,470 +the standard error of the estimate. So these are + +357 +00:30:29,470 --> 00:30:33,250 +the three components in order to build any + +358 +00:30:33,250 --> 00:30:38,360 +confidence interval. Another definition here, The + +359 +00:30:38,360 --> 00:30:43,840 +confidence level. The confidence level that the + +360 +00:30:43,840 --> 00:30:46,680 +interval will contain the unknown population + +361 +00:30:46,680 --> 00:30:55,360 +parameter. For example, we can say 90%, 95%, 99%. + +362 +00:30:55,360 --> 00:31:02,620 +You never say 100%. So usually, the confidence + +363 +00:31:02,620 --> 00:31:07,560 +level is a percentage that is less than 100%. + +364 +00:31:13,240 --> 00:31:17,360 +Now, for example, suppose the confidence level is + +365 +00:31:17,360 --> 00:31:31,100 +95. This 95 can be written as 1 minus alpha equals + +366 +00:31:31,100 --> 00:31:36,600 +95 percent. That means the confidence here is 95 + +367 +00:31:36,600 --> 00:31:43,720 +percent, so the error Or alpha is 5%. So if the + +368 +00:31:43,720 --> 00:31:47,820 +confidence level is 90%, it means the error is + +369 +00:31:47,820 --> 00:31:51,200 +10%. It is 90. + +370 +00:31:54,420 --> 00:32:00,780 +So alpha is 10% and so on. So a relative frequency + +371 +00:32:00,780 --> 00:32:05,100 +interpretation in this case, you can see that 95% + +372 +00:32:07,560 --> 00:32:10,200 +Of all the confidence interval that can be + +373 +00:32:10,200 --> 00:32:12,340 +constructed will contain the unknown true + +374 +00:32:12,340 --> 00:32:17,740 +parameter. A specific interval either will contain + +375 +00:32:17,740 --> 00:32:20,140 +or will not contain the true parameter as we + +376 +00:32:20,140 --> 00:32:23,120 +mentioned. So if you have confidence interval with + +377 +00:32:23,120 --> 00:32:26,980 +confidence level 95%, this interval might be or + +378 +00:32:26,980 --> 00:32:31,740 +might not contain the true parameter. So you are + +379 +00:32:31,740 --> 00:32:37,140 +not sure 100% that the true parameter will contain + +380 +00:32:37,500 --> 00:32:39,860 +in the confidence interval. + +381 +00:32:41,720 --> 00:32:44,560 +So for this chapter, we are going to talk about + +382 +00:32:44,560 --> 00:32:52,020 +confidence intervals for population mean and + +383 +00:32:52,020 --> 00:32:54,940 +population proportion. For population mean, we + +384 +00:32:54,940 --> 00:32:58,600 +will have two cases, when sigma is known and sigma + +385 +00:32:58,600 --> 00:33:04,720 +is unknown. Now let's start with the confidence + +386 +00:33:04,720 --> 00:33:10,350 +interval for immune when sigma is given. But + +387 +00:33:10,350 --> 00:33:15,850 +again, if a mu is unknown, here we are talking + +388 +00:33:15,850 --> 00:33:19,390 +about confidence interval for mu. + +389 +00:33:22,650 --> 00:33:26,770 +Since we are talking about estimating a mu, that + +390 +00:33:26,770 --> 00:33:30,110 +means a mu is unknown. + +391 +00:33:33,190 --> 00:33:36,270 +Now the first case here we are talking about sigma + +392 +00:33:36,270 --> 00:33:37,470 +is unknown, is known. + +393 +00:33:41,990 --> 00:33:45,550 +But we know that from chapter three that sigma + +394 +00:33:45,550 --> 00:33:50,230 +square root sum x minus mu squared divided by + +395 +00:33:50,230 --> 00:33:57,970 +capital N. So in order to compute sigma, we have + +396 +00:33:57,970 --> 00:34:02,310 +to know Mu first, otherwise you cannot compute + +397 +00:34:02,310 --> 00:34:09,070 +Sigma. So how can you say that Mu is unknown and + +398 +00:34:09,070 --> 00:34:14,570 +Sigma is given or is known? So Mu should be + +399 +00:34:14,570 --> 00:34:20,790 +unknown. Or Sigma is unknown. So Sigma is known + +400 +00:34:20,790 --> 00:34:25,650 +has a big question mark. Because if Sigma is + +401 +00:34:25,650 --> 00:34:30,310 +known, that means Mu should be known. If µ is + +402 +00:34:30,310 --> 00:34:33,310 +known, you don't need to select a random sample to + +403 +00:34:33,310 --> 00:34:38,490 +estimate µ. Make sense? I know the value of µ. So + +404 +00:34:38,490 --> 00:34:42,010 +what's the goal of selecting a random sample in + +405 +00:34:42,010 --> 00:34:46,770 +order to estimate µ? You are saying sigma is + +406 +00:34:46,770 --> 00:34:51,070 +given. That means µ is given or µ is known. + +407 +00:34:54,050 --> 00:34:57,810 +What's the benefit of selecting a random sample in + +408 +00:34:57,810 --> 00:35:01,610 +order to estimate something is known? Always we + +409 +00:35:01,610 --> 00:35:06,810 +estimate unknown parameter. But here, the reason + +410 +00:35:06,810 --> 00:35:09,990 +why we are talking about sigma is given sometimes + +411 +00:35:09,990 --> 00:35:15,650 +from previous studies. Sigma could be given, but + +412 +00:35:15,650 --> 00:35:19,710 +we don't know the exact value of a mu. So we are + +413 +00:35:19,710 --> 00:35:24,660 +interested to estimate the population mean mu. So + +414 +00:35:24,660 --> 00:35:27,580 +it might be mu is unknown, but from previous + +415 +00:35:27,580 --> 00:35:31,500 +studies or from the history we have, maybe we know + +416 +00:35:31,500 --> 00:35:35,980 +the value of sigma. But in real life, most likely + +417 +00:35:35,980 --> 00:35:41,720 +sigma is unknown. That will be discussed next + +418 +00:35:41,720 --> 00:35:44,940 +time, inshallah. So here we'll discuss just the + +419 +00:35:44,940 --> 00:35:47,900 +confidence interval for mu when sigma is not + +420 +00:35:47,900 --> 00:35:52,380 +known. But again, if Sigma is known, a Mu should + +421 +00:35:52,380 --> 00:35:56,140 +be known. Otherwise, we cannot compute Sigma. But + +422 +00:35:56,140 --> 00:35:58,460 +here, when we are talking about Sigma is known, it + +423 +00:35:58,460 --> 00:36:01,520 +means we know from previous studies that Sigma is + +424 +00:36:01,520 --> 00:36:03,620 +given. Any question? + +425 +00:36:07,200 --> 00:36:12,420 +Okay, so the assumptions for the first case are + +426 +00:36:12,420 --> 00:36:17,460 +mainly there are two assumptions. Number one, + +427 +00:36:17,840 --> 00:36:22,140 +population standard deviation sigma is known, so + +428 +00:36:22,140 --> 00:36:25,720 +we know the value of sigma. The second assumption, + +429 +00:36:26,260 --> 00:36:31,880 +the population is normally distributed, but if the + +430 +00:36:31,880 --> 00:36:34,200 +population is not normal, you can use large + +431 +00:36:34,200 --> 00:36:37,120 +sample. In this case, you can apply the central + +432 +00:36:37,120 --> 00:36:39,950 +limit theorem. Then we can say that the sampling + +433 +00:36:39,950 --> 00:36:43,650 +distribution of X bar is approximately normally + +434 +00:36:43,650 --> 00:36:46,730 +distributed with mean mu and standard deviation + +435 +00:36:46,730 --> 00:36:49,890 +sigma over root N. So there are two assumptions + +436 +00:36:49,890 --> 00:36:54,530 +here. One, sigma is known because here we are + +437 +00:36:54,530 --> 00:36:58,370 +talking about sigma is known. The other one, the + +438 +00:36:58,370 --> 00:37:01,670 +population is normally distributed or N is large + +439 +00:37:01,670 --> 00:37:06,800 +if the population is not normal. In this case, The + +440 +00:37:06,800 --> 00:37:09,860 +confidence interval estimate is given by this + +441 +00:37:09,860 --> 00:37:14,480 +equation, x bar. As we mentioned before, any + +442 +00:37:14,480 --> 00:37:17,480 +confidence interval can be built by using three + +443 +00:37:17,480 --> 00:37:22,640 +components. Point estimate x bar, critical value + +444 +00:37:22,640 --> 00:37:25,140 +z, forget about alpha over two for a minute, + +445 +00:37:26,180 --> 00:37:29,200 +multiplied by the standard error of the estimate. + +446 +00:37:29,420 --> 00:37:32,140 +And we know that, we know that the standard error + +447 +00:37:32,140 --> 00:37:37,180 +of x bar is sigma over root n. Z alpha over 2 + +448 +00:37:37,180 --> 00:37:42,520 +comes from the fact that we have for example here + +449 +00:37:42,520 --> 00:37:49,380 +95% this is 1 minus alpha. 1 minus alpha is 95%. + +450 +00:37:49,380 --> 00:37:56,000 +So 5% remaining to both sides upper tail will have + +451 +00:37:56,000 --> 00:38:00,460 +for example 2.5% and lower tail the same percent 2 + +452 +00:38:00,460 --> 00:38:02,160 +.5%. + +453 +00:38:04,640 --> 00:38:07,920 +Now since 1 minus alpha equals 95%, that means + +454 +00:38:07,920 --> 00:38:13,160 +alpha is 5% for both sides. So this one alpha over + +455 +00:38:13,160 --> 00:38:16,220 +2, the other one is alpha over 2. So this is your + +456 +00:38:16,220 --> 00:38:18,640 +z. So z alpha over 2. + +457 +00:38:21,940 --> 00:38:27,620 +The other side plus z alpha over 2. So x bar is + +458 +00:38:27,620 --> 00:38:32,860 +the point estimate. Z alpha over 2 is the normal + +459 +00:38:32,860 --> 00:38:35,460 +distribution in critical value for probability of + +460 +00:38:35,460 --> 00:38:40,520 +alpha over 2 in each tail. And sigma over root n + +461 +00:38:40,520 --> 00:38:43,860 +is a standard error. So again, this is the first + +462 +00:38:43,860 --> 00:38:48,740 +formula we have in this chapter to construct the + +463 +00:38:48,740 --> 00:38:52,620 +confidence interval with 1 minus alpha confidence + +464 +00:38:52,620 --> 00:38:57,220 +level. So 1 minus alpha person confidence + +465 +00:38:57,220 --> 00:39:05,230 +interval. For mu is x bar plus or minus z alpha + +466 +00:39:05,230 --> 00:39:10,650 +over 2 times plus or minus sigma x bar plus or + +467 +00:39:10,650 --> 00:39:13,650 +minus z alpha over 2 times sigma over square root + +468 +00:39:13,650 --> 00:39:17,990 +of n. So this is a formula can be used in order to + +469 +00:39:17,990 --> 00:39:21,250 +construct confidence interval for the population + +470 +00:39:21,250 --> 00:39:27,390 +mean mu. The lower limit is given by x bar minus + +471 +00:39:27,390 --> 00:39:30,760 +this amount. So the lower limit, + +472 +00:39:34,740 --> 00:39:36,480 +X bar minus, + +473 +00:39:39,560 --> 00:39:41,840 +and the upper limit is X bar plus. + +474 +00:39:44,720 --> 00:39:47,880 +Upper limit. + +475 +00:39:50,880 --> 00:39:54,920 +Now the point estimate is X bar. For the lower + +476 +00:39:54,920 --> 00:40:01,530 +limit, we subtract this amount. from x bar for the + +477 +00:40:01,530 --> 00:40:09,250 +upper limit we add the same amount so + +478 +00:40:09,250 --> 00:40:15,970 +subtracting specific amount and adding the same + +479 +00:40:15,970 --> 00:40:22,650 +amount this amount later will call it margin of + +480 +00:40:22,650 --> 00:40:31,520 +error this will be maybe week after next week. So + +481 +00:40:31,520 --> 00:40:36,360 +z alpha bar to sigma bar root n is the margin of + +482 +00:40:36,360 --> 00:40:39,980 +error or the error because we add or subtract the + +483 +00:40:39,980 --> 00:40:43,660 +same value of x bar. + +484 +00:40:46,480 --> 00:40:54,980 +Now this slide just explains how can we compute Z + +485 +00:40:54,980 --> 00:41:00,820 +alpha over 2. And we did that for 90%. Now here + +486 +00:41:00,820 --> 00:41:07,020 +for 95% confidence level, so alpha is 5%. So Z + +487 +00:41:07,020 --> 00:41:12,640 +alpha over 2 in the lower tail is 1.96 minus. The + +488 +00:41:12,640 --> 00:41:16,460 +other one plus 1.96. So the lower confidence limit + +489 +00:41:16,460 --> 00:41:21,120 +minus or negative. Upper confidence limit is plus. + +490 +00:41:21,360 --> 00:41:25,490 +So the same values, but different science. So I + +491 +00:41:25,490 --> 00:41:29,090 +think we talk about how can we find the critical + +492 +00:41:29,090 --> 00:41:35,530 +values of our talk. This table summarizes the + +493 +00:41:35,530 --> 00:41:40,990 +commonly used confidence levels. And most likely, + +494 +00:41:41,070 --> 00:41:48,090 +we are using 90%, 95%, 99%. So for 90%, it's + +495 +00:41:48,090 --> 00:41:57,080 +11645, as we did. 95% 1.96, better to remember + +496 +00:41:57,080 --> 00:42:01,100 +these values. For 99, it's 2.58. + +497 +00:42:04,660 --> 00:42:05,900 +Now let's see. + +498 +00:42:08,680 --> 00:42:13,500 +As the confidence level increases. Now here the + +499 +00:42:13,500 --> 00:42:16,420 +confidence level. As the confidence level + +500 +00:42:16,420 --> 00:42:22,100 +increases. Look at the corresponding z value. is + +501 +00:42:22,100 --> 00:42:27,060 +also increased. So in this case, the interval + +502 +00:42:27,060 --> 00:42:32,260 +becomes wider or narrower? Wider. Because here we + +503 +00:42:32,260 --> 00:42:36,680 +subtract this value, this amount, z alpha over 2 + +504 +00:42:36,680 --> 00:42:41,520 +will increase. This is z alpha over 2. As + +505 +00:42:41,520 --> 00:42:47,140 +confidence level increases, z value increases + +506 +00:42:47,140 --> 00:42:52,700 +also. So that means this amount will go up. will + +507 +00:42:52,700 --> 00:42:56,720 +increase also. That means the confidence interval + +508 +00:42:56,720 --> 00:43:02,940 +becomes wider. So as C + +509 +00:43:02,940 --> 00:43:10,820 +level or confidence level goes up, increases, the + +510 +00:43:10,820 --> 00:43:18,300 +corresponding interval, the interval becomes + +511 +00:43:18,300 --> 00:43:20,740 +wider. + +512 +00:43:23,830 --> 00:43:26,470 +So as C level increases, the confidence becomes + +513 +00:43:26,470 --> 00:43:30,910 +wider. Vice versa, if the C level decreases, + +514 +00:43:32,110 --> 00:43:34,790 +narrower. The confidence interval becomes + +515 +00:43:34,790 --> 00:43:35,230 +narrower. + +516 +00:43:38,070 --> 00:43:41,210 +It's better to have narrower confidence interval. + +517 +00:43:42,970 --> 00:43:46,890 +So again, it's better if you remember these values + +518 +00:43:46,890 --> 00:43:50,690 +for the confidence level and the corresponding Z + +519 +00:43:50,690 --> 00:43:51,070 +value. + +520 +00:43:54,160 --> 00:43:58,980 +So that's the + +521 +00:43:58,980 --> 00:44:01,520 +sea level for different sizes. + +522 +00:44:04,060 --> 00:44:08,580 +This slide shows + +523 +00:44:08,580 --> 00:44:14,900 +that some continents intervals may be containing + +524 +00:44:14,900 --> 00:44:20,160 +mu. So the blue ones is the value of mu. The blue + +525 +00:44:20,160 --> 00:44:25,740 +ones contains mu. The red one does not, because mu + +526 +00:44:25,740 --> 00:44:30,360 +in this case lies outside the confidence interval. + +527 +00:44:30,740 --> 00:44:34,220 +So maybe the confidence interval you have to + +528 +00:44:34,220 --> 00:44:40,620 +construct, it might cover the mu or not. I have to + +529 +00:44:40,620 --> 00:44:43,260 +mention just one point here. It's better to say + +530 +00:44:43,260 --> 00:44:48,320 +that my confidence interval covers mu. So you can + +531 +00:44:48,320 --> 00:44:51,220 +say that I am 95% confident. + +532 +00:44:54,140 --> 00:45:03,080 +that the confidence interval contains mu, contains + +533 +00:45:03,080 --> 00:45:11,080 +the true parameter mu, rather than saying mu lies. + +534 +00:45:16,320 --> 00:45:19,140 +Because Mu is unknown. You cannot say Mu lies in + +535 +00:45:19,140 --> 00:45:22,620 +the confidence. But we can say that 95% we are + +536 +00:45:22,620 --> 00:45:29,000 +sure that my interval contains Mu. So don't say Mu + +537 +00:45:29,000 --> 00:45:32,640 +lies in this interval. So it's better to say that + +538 +00:45:32,640 --> 00:45:38,220 +we are 95% sure that my interval covers, contains + +539 +00:45:38,220 --> 00:45:39,480 +Mu. + +540 +00:45:41,240 --> 00:45:43,840 +Let's do one example. + +541 +00:45:50,590 --> 00:45:56,930 +Here we have a sample of 11 circuits from a large + +542 +00:45:56,930 --> 00:45:57,990 +normal population. + +543 +00:46:00,430 --> 00:46:06,110 +So now we have a random sample of 11. This sample + +544 +00:46:06,110 --> 00:46:09,550 +is selected from normal populations. So the first + +545 +00:46:09,550 --> 00:46:14,810 +assumption is okay. So normality is assumed to be + +546 +00:46:14,810 --> 00:46:21,900 +satisfied. Now this sample has a mean resistance + +547 +00:46:21,900 --> 00:46:27,300 +of 2.2 ohms. So again, a sample of 11 circuits + +548 +00:46:27,300 --> 00:46:30,800 +from a large normal population has a mean + +549 +00:46:30,800 --> 00:46:38,040 +resistance of 2.2 ohms. That means X bar is 2.2. + +550 +00:46:41,900 --> 00:46:49,730 +Ohms is the resistance unit. We know that From + +551 +00:46:49,730 --> 00:46:55,310 +past testing, it means from previous studies, we + +552 +00:46:55,310 --> 00:46:58,450 +know that from past testing that the population + +553 +00:46:58,450 --> 00:47:04,890 +standard deviation is 0.35. So sigma is 0.35. So + +554 +00:47:04,890 --> 00:47:08,810 +sigma is known. So the second assumption is okay. + +555 +00:47:11,110 --> 00:47:15,110 +But again, as we mentioned, Sigma's reality is not + +556 +00:47:15,110 --> 00:47:19,270 +known. But here from past testing, I mean from + +557 +00:47:19,270 --> 00:47:23,350 +previous knowledge, we know that sigma is 1 to the + +558 +00:47:23,350 --> 00:47:31,890 +35. Now the question is determined 95%. So C level + +559 +00:47:31,890 --> 00:47:41,470 +or confidence level is 95%. Determined 95% + +560 +00:47:41,470 --> 00:47:45,190 +confidence interval. For the true mean, true mean + +561 +00:47:45,190 --> 00:47:51,790 +it means population mean, resistance of the + +562 +00:47:51,790 --> 00:47:55,330 +population. So now the information that we have in + +563 +00:47:55,330 --> 00:47:59,010 +this example, we select a random sample of size 11 + +564 +00:47:59,010 --> 00:48:03,430 +from a normal population, so normality is assumed + +565 +00:48:03,430 --> 00:48:09,330 +to be satisfied. This sample gives mean resistance + +566 +00:48:09,330 --> 00:48:15,110 +of 2.2 and we know that. The standard deviation of + +567 +00:48:15,110 --> 00:48:19,170 +the population is given by 0.35, and the question + +568 +00:48:19,170 --> 00:48:24,250 +is to determine 95% confidence interval for the + +569 +00:48:24,250 --> 00:48:28,590 +true mean resistance of the approximation. So this + +570 +00:48:28,590 --> 00:48:29,690 +is the information we have. + +571 +00:48:32,570 --> 00:48:36,390 +Now straightforward calculations will give this + +572 +00:48:36,390 --> 00:48:39,590 +result. X bar plus or minus Z alpha over 2 sigma + +573 +00:48:39,590 --> 00:48:40,230 +over 4N. + +574 +00:48:43,220 --> 00:48:49,200 +2.2 plus or minus 1.96 times sigma, which is 0.35 + +575 +00:48:49,200 --> 00:48:53,420 +divided by root 11. This will give 2.2 plus or + +576 +00:48:53,420 --> 00:48:58,660 +minus this amount. And as mentioned before, this + +577 +00:48:58,660 --> 00:49:02,760 +amount is the margin term. So just subtract this + +578 +00:49:02,760 --> 00:49:08,900 +value from 2.2, you will get 1.9932. And add this + +579 +00:49:08,900 --> 00:49:12,630 +value to 2.2, you will get this result. So Mu + +580 +00:49:12,630 --> 00:49:16,770 +greater than or equal to 1.99 all the way up to 2 + +581 +00:49:16,770 --> 00:49:19,650 +.4068. + +582 +00:49:23,950 --> 00:49:29,010 +So that's the calculations we have. So Mu is + +583 +00:49:29,010 --> 00:49:31,390 +between 95 percent. + +584 +00:49:34,830 --> 00:49:39,450 +Now let's see our interpretation for this result. + +585 +00:49:40,680 --> 00:49:43,260 +So again, straightforward calculations will give + +586 +00:49:43,260 --> 00:49:47,380 +this + +587 +00:49:47,380 --> 00:49:53,200 +result. Now, the interpretation, you should write + +588 +00:49:53,200 --> 00:49:58,260 +the following. We are 95% confident that the true + +589 +00:49:58,260 --> 00:50:04,120 +mean resistance is between these two values. Just + +590 +00:50:04,120 --> 00:50:07,940 +saying, we are 95% sure that the true mean + +591 +00:50:07,940 --> 00:50:11,920 +resistance is between these two values. Although + +592 +00:50:11,920 --> 00:50:16,720 +the true mean may or may not be in this interval, + +593 +00:50:17,640 --> 00:50:24,900 +but we are 95% of the intervals from form in this + +594 +00:50:24,900 --> 00:50:29,660 +manner will contain the true mean. So again, you + +595 +00:50:29,660 --> 00:50:34,320 +don't know exactly if the true mean lies in the + +596 +00:50:34,320 --> 00:50:37,980 +interval, but you could say that 95% of the + +597 +00:50:37,980 --> 00:50:43,360 +intervals formed in this way will contain the true + +598 +00:50:43,360 --> 00:50:50,780 +mean. So that's all for confidence estimation for + +599 +00:50:50,780 --> 00:50:54,840 +the population mean immune. Any questions? + +600 +00:50:59,640 --> 00:51:04,520 +Later, next time, inshallah, we'll talk about The + +601 +00:51:04,520 --> 00:51:10,500 +confidence interval when sigma is unknown. I mean, + +602 +00:51:13,740 --> 00:51:19,400 +do you ever truly know sigma? May not. In + +603 +00:51:19,400 --> 00:51:23,840 +virtually all real world business situations, + +604 +00:51:24,440 --> 00:51:31,800 +sigma is not known. If there is a situation, Where + +605 +00:51:31,800 --> 00:51:35,620 +Sigma is known, then Mu is also known. Because + +606 +00:51:35,620 --> 00:51:40,840 +since to calculate Mu, you need to know Mu. In + +607 +00:51:40,840 --> 00:51:44,860 +order to calculate Sigma, you should know the + +608 +00:51:44,860 --> 00:51:45,560 +value of Mu. + +609 +00:51:48,600 --> 00:51:52,640 +Finally, if you truly know Mu, if you know the + +610 +00:51:52,640 --> 00:51:59,680 +value of Mu, there would be no need to gather a + +611 +00:51:59,680 --> 00:52:00,560 +sample to estimate. + +612 +00:52:04,050 --> 00:52:07,610 +The value of the mean is given. You have to stop + +613 +00:52:07,610 --> 00:52:11,530 +because you don't need to select a random sample + +614 +00:52:11,530 --> 00:52:17,690 +in order to estimate the population mean. And + +615 +00:52:17,690 --> 00:52:23,550 +again, in real life, sigma is unknown. So next + +616 +00:52:23,550 --> 00:52:28,190 +time, we'll talk about confidence interval for mu + +617 +00:52:28,190 --> 00:52:34,500 +when sigma is unknown. So that's all. Fourth day. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/_OR2C5YaRyM_raw.json b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/_OR2C5YaRyM_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..9919d4a847022f489aed48f1f05663a191510faf --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/_OR2C5YaRyM_raw.json @@ -0,0 +1 @@ +{"segments": [{"id": 1, "seek": 3170, "start": 7.0, "end": 31.7, "text": " So again a point estimate is just a single number or just a value. For the interval estimation in this case you have more information than using a point estimate. So confidence interval provides additional information about the spread of the or the variability of the estimate. As I mentioned before", "tokens": [407, 797, 257, 935, 12539, 307, 445, 257, 2167, 1230, 420, 445, 257, 2158, 13, 1171, 264, 15035, 35701, 294, 341, 1389, 291, 362, 544, 1589, 813, 1228, 257, 935, 12539, 13, 407, 6687, 15035, 6417, 4497, 1589, 466, 264, 3974, 295, 264, 420, 264, 35709, 295, 264, 12539, 13, 1018, 286, 2835, 949], "avg_logprob": -0.15085226785052908, "compression_ratio": 1.744186046511628, "no_speech_prob": 0.0, "words": [{"start": 7.0, "end": 7.32, "word": " So", "probability": 0.79833984375}, {"start": 7.32, "end": 7.72, "word": " again", "probability": 0.8330078125}, {"start": 7.72, "end": 8.86, "word": " a", "probability": 0.33251953125}, {"start": 8.86, "end": 9.16, "word": " point", "probability": 0.94580078125}, {"start": 9.16, "end": 9.62, "word": " estimate", "probability": 0.9111328125}, {"start": 9.62, "end": 10.8, "word": " is", "probability": 0.8818359375}, {"start": 10.8, "end": 11.12, "word": " just", "probability": 0.9130859375}, {"start": 11.12, "end": 11.4, "word": " a", "probability": 0.9931640625}, {"start": 11.4, "end": 11.8, "word": " single", "probability": 0.94970703125}, {"start": 11.8, "end": 12.34, "word": " number", "probability": 0.93359375}, {"start": 12.34, "end": 12.62, "word": " or", "probability": 0.8349609375}, {"start": 12.62, "end": 12.9, "word": " just", "probability": 0.890625}, {"start": 12.9, "end": 13.1, "word": " a", "probability": 0.990234375}, {"start": 13.1, "end": 13.42, "word": " value.", "probability": 0.9736328125}, {"start": 14.1, "end": 14.22, "word": " For", "probability": 0.935546875}, {"start": 14.22, "end": 14.38, "word": " the", "probability": 0.91259765625}, {"start": 14.38, "end": 14.8, "word": " interval", "probability": 0.9677734375}, {"start": 14.8, "end": 16.48, "word": " estimation", "probability": 0.974609375}, {"start": 16.48, "end": 18.08, "word": " in", "probability": 0.50927734375}, {"start": 18.08, "end": 18.34, "word": " this", "probability": 0.9501953125}, {"start": 18.34, "end": 18.62, "word": " case", "probability": 0.91357421875}, {"start": 18.62, "end": 18.8, "word": " you", "probability": 0.85791015625}, {"start": 18.8, "end": 19.18, "word": " have", "probability": 0.94091796875}, {"start": 19.18, "end": 19.68, "word": " more", "probability": 0.93603515625}, {"start": 19.68, "end": 20.28, "word": " information", "probability": 0.83447265625}, {"start": 20.28, "end": 20.66, "word": " than", "probability": 0.89404296875}, {"start": 20.66, "end": 21.18, "word": " using", "probability": 0.9140625}, {"start": 21.18, "end": 21.4, "word": " a", "probability": 0.9716796875}, {"start": 21.4, "end": 21.64, "word": " point", "probability": 0.97705078125}, {"start": 21.64, "end": 22.12, "word": " estimate.", "probability": 0.90576171875}, {"start": 22.66, "end": 22.78, "word": " So", "probability": 0.92578125}, {"start": 22.78, "end": 23.22, "word": " confidence", "probability": 0.9462890625}, {"start": 23.22, "end": 23.62, "word": " interval", "probability": 0.966796875}, {"start": 23.62, "end": 24.2, "word": " provides", "probability": 0.9208984375}, {"start": 24.2, "end": 24.64, "word": " additional", "probability": 0.9013671875}, {"start": 24.64, "end": 25.14, "word": " information", "probability": 0.85009765625}, {"start": 25.14, "end": 25.6, "word": " about", "probability": 0.90185546875}, {"start": 25.6, "end": 26.42, "word": " the", "probability": 0.90576171875}, {"start": 26.42, "end": 26.9, "word": " spread", "probability": 0.92529296875}, {"start": 26.9, "end": 27.3, "word": " of", "probability": 0.77001953125}, {"start": 27.3, "end": 27.54, "word": " the", "probability": 0.7783203125}, {"start": 27.54, "end": 27.8, "word": " or", "probability": 0.70751953125}, {"start": 27.8, "end": 27.98, "word": " the", "probability": 0.84765625}, {"start": 27.98, "end": 28.32, "word": " variability", "probability": 0.97119140625}, {"start": 28.32, "end": 28.82, "word": " of", "probability": 0.9677734375}, {"start": 28.82, "end": 29.06, "word": " the", "probability": 0.91845703125}, {"start": 29.06, "end": 29.6, "word": " estimate.", "probability": 0.89599609375}, {"start": 30.56, "end": 30.88, "word": " As", "probability": 0.95751953125}, {"start": 30.88, "end": 30.98, "word": " I", "probability": 0.93408203125}, {"start": 30.98, "end": 31.24, "word": " mentioned", "probability": 0.8291015625}, {"start": 31.24, "end": 31.7, "word": " before", "probability": 0.86083984375}], "temperature": 1.0}, {"id": 2, "seek": 6211, "start": 33.17, "end": 62.11, "text": " There are two parameters of interest in this chapter. The first one is the population mean mu. In this case, we can estimate the population mean mu by using the sample mean x bar. The other parameter is the proportion. The population proportion is pi. And the point estimates P. So P is a point estimate of the population proportion pi. So again, this chapter talks about", "tokens": [821, 366, 732, 9834, 295, 1179, 294, 341, 7187, 13, 440, 700, 472, 307, 264, 4415, 914, 2992, 13, 682, 341, 1389, 11, 321, 393, 12539, 264, 4415, 914, 2992, 538, 1228, 264, 6889, 914, 2031, 2159, 13, 440, 661, 13075, 307, 264, 16068, 13, 440, 4415, 16068, 307, 3895, 13, 400, 264, 935, 20561, 430, 13, 407, 430, 307, 257, 935, 12539, 295, 264, 4415, 16068, 3895, 13, 407, 797, 11, 341, 7187, 6686, 466], "avg_logprob": -0.19541396568347882, "compression_ratio": 1.9893048128342246, "no_speech_prob": 0.0, "words": [{"start": 33.17, "end": 33.57, "word": " There", "probability": 0.4931640625}, {"start": 33.57, "end": 33.73, "word": " are", "probability": 0.93798828125}, {"start": 33.73, "end": 33.89, "word": " two", "probability": 0.9248046875}, {"start": 33.89, "end": 34.29, "word": " parameters", "probability": 0.96875}, {"start": 34.29, "end": 34.59, "word": " of", "probability": 0.96533203125}, {"start": 34.59, "end": 35.01, "word": " interest", "probability": 0.9052734375}, {"start": 35.01, "end": 35.21, "word": " in", "probability": 0.9306640625}, {"start": 35.21, "end": 35.39, "word": " this", "probability": 0.9326171875}, {"start": 35.39, "end": 35.69, "word": " chapter.", "probability": 0.841796875}, {"start": 36.25, "end": 36.55, "word": " The", "probability": 0.88623046875}, {"start": 36.55, "end": 36.83, "word": " first", "probability": 0.888671875}, {"start": 36.83, "end": 37.07, "word": " one", "probability": 0.92578125}, {"start": 37.07, "end": 37.23, "word": " is", "probability": 0.943359375}, {"start": 37.23, "end": 37.35, "word": " the", "probability": 0.89208984375}, {"start": 37.35, "end": 37.77, "word": " population", "probability": 0.94921875}, {"start": 37.77, "end": 38.01, "word": " mean", "probability": 0.62548828125}, {"start": 38.01, "end": 38.25, "word": " mu.", "probability": 0.3525390625}, {"start": 39.17, "end": 39.51, "word": " In", "probability": 0.9287109375}, {"start": 39.51, "end": 39.73, "word": " this", "probability": 0.94384765625}, {"start": 39.73, "end": 39.97, "word": " case,", "probability": 0.91259765625}, {"start": 40.03, "end": 40.15, "word": " we", "probability": 0.94775390625}, {"start": 40.15, "end": 40.37, "word": " can", "probability": 0.943359375}, {"start": 40.37, "end": 41.07, "word": " estimate", "probability": 0.92578125}, {"start": 41.07, "end": 41.67, "word": " the", "probability": 0.89892578125}, {"start": 41.67, "end": 42.11, "word": " population", "probability": 0.955078125}, {"start": 42.11, "end": 42.37, "word": " mean", "probability": 0.8994140625}, {"start": 42.37, "end": 42.59, "word": " mu", "probability": 0.60888671875}, {"start": 42.59, "end": 42.83, "word": " by", "probability": 0.96044921875}, {"start": 42.83, "end": 43.31, "word": " using", "probability": 0.9228515625}, {"start": 43.31, "end": 44.47, "word": " the", "probability": 0.47998046875}, {"start": 44.47, "end": 44.81, "word": " sample", "probability": 0.50341796875}, {"start": 44.81, "end": 45.11, "word": " mean", "probability": 0.94970703125}, {"start": 45.11, "end": 45.33, "word": " x", "probability": 0.62841796875}, {"start": 45.33, "end": 45.69, "word": " bar.", "probability": 0.73828125}, {"start": 46.83, "end": 47.51, "word": " The", "probability": 0.89111328125}, {"start": 47.51, "end": 47.85, "word": " other", "probability": 0.89404296875}, {"start": 47.85, "end": 48.71, "word": " parameter", "probability": 0.9287109375}, {"start": 48.71, "end": 49.19, "word": " is", "probability": 0.94775390625}, {"start": 49.19, "end": 49.77, "word": " the", "probability": 0.92333984375}, {"start": 49.77, "end": 50.31, "word": " proportion.", "probability": 0.82568359375}, {"start": 51.03, "end": 51.21, "word": " The", "probability": 0.8828125}, {"start": 51.21, "end": 51.65, "word": " population", "probability": 0.95458984375}, {"start": 51.65, "end": 52.13, "word": " proportion", "probability": 0.82421875}, {"start": 52.13, "end": 52.49, "word": " is", "probability": 0.9501953125}, {"start": 52.49, "end": 52.77, "word": " pi.", "probability": 0.3984375}, {"start": 53.39, "end": 53.81, "word": " And", "probability": 0.93798828125}, {"start": 53.81, "end": 53.95, "word": " the", "probability": 0.900390625}, {"start": 53.95, "end": 54.19, "word": " point", "probability": 0.97216796875}, {"start": 54.19, "end": 54.67, "word": " estimates", "probability": 0.70703125}, {"start": 54.67, "end": 55.11, "word": " P.", "probability": 0.38916015625}, {"start": 55.51, "end": 55.67, "word": " So", "probability": 0.951171875}, {"start": 55.67, "end": 55.85, "word": " P", "probability": 0.74072265625}, {"start": 55.85, "end": 56.01, "word": " is", "probability": 0.94775390625}, {"start": 56.01, "end": 56.13, "word": " a", "probability": 0.78076171875}, {"start": 56.13, "end": 56.35, "word": " point", "probability": 0.97705078125}, {"start": 56.35, "end": 56.85, "word": " estimate", "probability": 0.9423828125}, {"start": 56.85, "end": 57.35, "word": " of", "probability": 0.96923828125}, {"start": 57.35, "end": 57.47, "word": " the", "probability": 0.89794921875}, {"start": 57.47, "end": 57.89, "word": " population", "probability": 0.95947265625}, {"start": 57.89, "end": 58.41, "word": " proportion", "probability": 0.83349609375}, {"start": 58.41, "end": 59.27, "word": " pi.", "probability": 0.61328125}, {"start": 59.65, "end": 59.83, "word": " So", "probability": 0.95947265625}, {"start": 59.83, "end": 60.11, "word": " again,", "probability": 0.88671875}, {"start": 60.85, "end": 61.13, "word": " this", "probability": 0.9453125}, {"start": 61.13, "end": 61.41, "word": " chapter", "probability": 0.853515625}, {"start": 61.41, "end": 61.77, "word": " talks", "probability": 0.87158203125}, {"start": 61.77, "end": 62.11, "word": " about", "probability": 0.90234375}], "temperature": 1.0}, {"id": 3, "seek": 8259, "start": 63.41, "end": 82.59, "text": " confidence interval estimation. And as we mentioned that there are two types of estimation. One is called point estimation. And for example, the average or the mean of age is 18 years. So 18 is the point estimate.", "tokens": [6687, 15035, 35701, 13, 400, 382, 321, 2835, 300, 456, 366, 732, 3467, 295, 35701, 13, 1485, 307, 1219, 935, 35701, 13, 400, 337, 1365, 11, 264, 4274, 420, 264, 914, 295, 3205, 307, 2443, 924, 13, 407, 2443, 307, 264, 935, 12539, 13], "avg_logprob": -0.16857639021343654, "compression_ratio": 1.562043795620438, "no_speech_prob": 0.0, "words": [{"start": 63.41, "end": 64.03, "word": " confidence", "probability": 0.5458984375}, {"start": 64.03, "end": 64.43, "word": " interval", "probability": 0.93359375}, {"start": 64.43, "end": 65.01, "word": " estimation.", "probability": 0.970703125}, {"start": 66.63, "end": 66.95, "word": " And", "probability": 0.8740234375}, {"start": 66.95, "end": 67.71, "word": " as", "probability": 0.81640625}, {"start": 67.71, "end": 67.91, "word": " we", "probability": 0.962890625}, {"start": 67.91, "end": 68.33, "word": " mentioned", "probability": 0.83251953125}, {"start": 68.33, "end": 68.63, "word": " that", "probability": 0.41748046875}, {"start": 68.63, "end": 68.85, "word": " there", "probability": 0.88330078125}, {"start": 68.85, "end": 69.01, "word": " are", "probability": 0.9453125}, {"start": 69.01, "end": 69.19, "word": " two", "probability": 0.94091796875}, {"start": 69.19, "end": 69.55, "word": " types", "probability": 0.806640625}, {"start": 69.55, "end": 69.71, "word": " of", "probability": 0.9658203125}, {"start": 69.71, "end": 70.09, "word": " estimation.", "probability": 0.9697265625}, {"start": 70.25, "end": 70.35, "word": " One", "probability": 0.91943359375}, {"start": 70.35, "end": 70.51, "word": " is", "probability": 0.93896484375}, {"start": 70.51, "end": 70.79, "word": " called", "probability": 0.87841796875}, {"start": 70.79, "end": 71.13, "word": " point", "probability": 0.94140625}, {"start": 71.13, "end": 71.71, "word": " estimation.", "probability": 0.95166015625}, {"start": 73.75, "end": 74.13, "word": " And", "probability": 0.888671875}, {"start": 74.13, "end": 74.47, "word": " for", "probability": 0.783203125}, {"start": 74.47, "end": 74.95, "word": " example,", "probability": 0.97412109375}, {"start": 76.27, "end": 76.51, "word": " the", "probability": 0.89794921875}, {"start": 76.51, "end": 77.29, "word": " average", "probability": 0.845703125}, {"start": 77.29, "end": 77.81, "word": " or", "probability": 0.7705078125}, {"start": 77.81, "end": 78.27, "word": " the", "probability": 0.92578125}, {"start": 78.27, "end": 78.93, "word": " mean", "probability": 0.822265625}, {"start": 78.93, "end": 79.21, "word": " of", "probability": 0.9248046875}, {"start": 79.21, "end": 79.41, "word": " age", "probability": 0.73388671875}, {"start": 79.41, "end": 79.57, "word": " is", "probability": 0.9306640625}, {"start": 79.57, "end": 79.87, "word": " 18", "probability": 0.8994140625}, {"start": 79.87, "end": 80.25, "word": " years.", "probability": 0.92041015625}, {"start": 80.51, "end": 80.71, "word": " So", "probability": 0.962890625}, {"start": 80.71, "end": 80.95, "word": " 18", "probability": 0.7705078125}, {"start": 80.95, "end": 81.19, "word": " is", "probability": 0.93701171875}, {"start": 81.19, "end": 81.53, "word": " the", "probability": 0.9208984375}, {"start": 81.53, "end": 82.07, "word": " point", "probability": 0.97509765625}, {"start": 82.07, "end": 82.59, "word": " estimate.", "probability": 0.935546875}], "temperature": 1.0}, {"id": 4, "seek": 11085, "start": 84.17, "end": 110.85, "text": " On the other hand, the other type of the confidence is the confidence interval estimation. So rather than saying that the average is 18 years, you could say that we are 95% sure that the average of student ages range between 17 up to 19 years. And again, the confidence interval gives more information", "tokens": [1282, 264, 661, 1011, 11, 264, 661, 2010, 295, 264, 6687, 307, 264, 6687, 15035, 35701, 13, 407, 2831, 813, 1566, 300, 264, 4274, 307, 2443, 924, 11, 291, 727, 584, 300, 321, 366, 13420, 4, 988, 300, 264, 4274, 295, 3107, 12357, 3613, 1296, 3282, 493, 281, 1294, 924, 13, 400, 797, 11, 264, 6687, 15035, 2709, 544, 1589], "avg_logprob": -0.12448771078078473, "compression_ratio": 1.7159090909090908, "no_speech_prob": 0.0, "words": [{"start": 84.17, "end": 84.43, "word": " On", "probability": 0.87939453125}, {"start": 84.43, "end": 84.53, "word": " the", "probability": 0.912109375}, {"start": 84.53, "end": 84.73, "word": " other", "probability": 0.8857421875}, {"start": 84.73, "end": 85.03, "word": " hand,", "probability": 0.892578125}, {"start": 85.13, "end": 85.17, "word": " the", "probability": 0.88671875}, {"start": 85.17, "end": 85.37, "word": " other", "probability": 0.89306640625}, {"start": 85.37, "end": 85.85, "word": " type", "probability": 0.9775390625}, {"start": 85.85, "end": 86.35, "word": " of", "probability": 0.794921875}, {"start": 86.35, "end": 86.75, "word": " the", "probability": 0.73095703125}, {"start": 86.75, "end": 87.35, "word": " confidence", "probability": 0.97119140625}, {"start": 87.35, "end": 87.91, "word": " is", "probability": 0.77392578125}, {"start": 87.91, "end": 88.17, "word": " the", "probability": 0.91455078125}, {"start": 88.17, "end": 88.55, "word": " confidence", "probability": 0.9423828125}, {"start": 88.55, "end": 89.13, "word": " interval", "probability": 0.96435546875}, {"start": 89.13, "end": 89.69, "word": " estimation.", "probability": 0.96875}, {"start": 90.81, "end": 90.97, "word": " So", "probability": 0.9384765625}, {"start": 90.97, "end": 91.29, "word": " rather", "probability": 0.8798828125}, {"start": 91.29, "end": 91.57, "word": " than", "probability": 0.60498046875}, {"start": 91.57, "end": 91.91, "word": " saying", "probability": 0.8173828125}, {"start": 91.91, "end": 92.27, "word": " that", "probability": 0.912109375}, {"start": 92.27, "end": 92.47, "word": " the", "probability": 0.88818359375}, {"start": 92.47, "end": 92.87, "word": " average", "probability": 0.79443359375}, {"start": 92.87, "end": 93.43, "word": " is", "probability": 0.939453125}, {"start": 93.43, "end": 93.79, "word": " 18", "probability": 0.8740234375}, {"start": 93.79, "end": 94.13, "word": " years,", "probability": 0.923828125}, {"start": 94.23, "end": 94.39, "word": " you", "probability": 0.95068359375}, {"start": 94.39, "end": 94.63, "word": " could", "probability": 0.89013671875}, {"start": 94.63, "end": 94.89, "word": " say", "probability": 0.94384765625}, {"start": 94.89, "end": 95.23, "word": " that", "probability": 0.92724609375}, {"start": 95.23, "end": 96.83, "word": " we", "probability": 0.73046875}, {"start": 96.83, "end": 97.07, "word": " are", "probability": 0.93896484375}, {"start": 97.07, "end": 97.49, "word": " 95", "probability": 0.974609375}, {"start": 97.49, "end": 97.91, "word": "%", "probability": 0.93896484375}, {"start": 97.91, "end": 98.33, "word": " sure", "probability": 0.9072265625}, {"start": 98.33, "end": 98.69, "word": " that", "probability": 0.93359375}, {"start": 98.69, "end": 99.27, "word": " the", "probability": 0.91552734375}, {"start": 99.27, "end": 99.79, "word": " average", "probability": 0.78271484375}, {"start": 99.79, "end": 101.07, "word": " of", "probability": 0.8427734375}, {"start": 101.07, "end": 101.95, "word": " student", "probability": 0.85888671875}, {"start": 101.95, "end": 102.71, "word": " ages", "probability": 0.90869140625}, {"start": 102.71, "end": 103.53, "word": " range", "probability": 0.83154296875}, {"start": 103.53, "end": 103.93, "word": " between", "probability": 0.87109375}, {"start": 103.93, "end": 104.61, "word": " 17", "probability": 0.95068359375}, {"start": 104.61, "end": 105.65, "word": " up", "probability": 0.94384765625}, {"start": 105.65, "end": 105.97, "word": " to", "probability": 0.974609375}, {"start": 105.97, "end": 106.51, "word": " 19", "probability": 0.97216796875}, {"start": 106.51, "end": 106.89, "word": " years.", "probability": 0.92626953125}, {"start": 107.23, "end": 107.43, "word": " And", "probability": 0.94970703125}, {"start": 107.43, "end": 107.73, "word": " again,", "probability": 0.91650390625}, {"start": 107.97, "end": 108.17, "word": " the", "probability": 0.919921875}, {"start": 108.17, "end": 108.55, "word": " confidence", "probability": 0.97607421875}, {"start": 108.55, "end": 109.01, "word": " interval", "probability": 0.9677734375}, {"start": 109.01, "end": 109.39, "word": " gives", "probability": 0.90673828125}, {"start": 109.39, "end": 109.77, "word": " more", "probability": 0.9345703125}, {"start": 109.77, "end": 110.85, "word": " information", "probability": 0.8388671875}], "temperature": 1.0}, {"id": 5, "seek": 13413, "start": 112.03, "end": 134.13, "text": " about the variability of the data. And again, there are two types of parameters we are interested in. One is called the population mean, and the point estimate is the sample mean. The other parameter is the population of a portion of Y, and the point estimate is P. So again,", "tokens": [466, 264, 35709, 295, 264, 1412, 13, 400, 797, 11, 456, 366, 732, 3467, 295, 9834, 321, 366, 3102, 294, 13, 1485, 307, 1219, 264, 4415, 914, 11, 293, 264, 935, 12539, 307, 264, 6889, 914, 13, 440, 661, 13075, 307, 264, 4415, 295, 257, 8044, 295, 398, 11, 293, 264, 935, 12539, 307, 430, 13, 407, 797, 11], "avg_logprob": -0.13411457737286886, "compression_ratio": 1.7468354430379747, "no_speech_prob": 0.0, "words": [{"start": 112.03, "end": 112.39, "word": " about", "probability": 0.42578125}, {"start": 112.39, "end": 112.75, "word": " the", "probability": 0.8779296875}, {"start": 112.75, "end": 113.07, "word": " variability", "probability": 0.9677734375}, {"start": 113.07, "end": 113.41, "word": " of", "probability": 0.96337890625}, {"start": 113.41, "end": 113.55, "word": " the", "probability": 0.90576171875}, {"start": 113.55, "end": 113.81, "word": " data.", "probability": 0.9404296875}, {"start": 114.39, "end": 114.53, "word": " And", "probability": 0.9208984375}, {"start": 114.53, "end": 114.73, "word": " again,", "probability": 0.87939453125}, {"start": 114.83, "end": 114.89, "word": " there", "probability": 0.90625}, {"start": 114.89, "end": 115.01, "word": " are", "probability": 0.9443359375}, {"start": 115.01, "end": 115.17, "word": " two", "probability": 0.9287109375}, {"start": 115.17, "end": 115.65, "word": " types", "probability": 0.822265625}, {"start": 115.65, "end": 116.31, "word": " of", "probability": 0.95751953125}, {"start": 116.31, "end": 116.71, "word": " parameters", "probability": 0.9638671875}, {"start": 116.71, "end": 116.97, "word": " we", "probability": 0.93115234375}, {"start": 116.97, "end": 117.11, "word": " are", "probability": 0.9345703125}, {"start": 117.11, "end": 117.55, "word": " interested", "probability": 0.84912109375}, {"start": 117.55, "end": 117.83, "word": " in.", "probability": 0.943359375}, {"start": 117.87, "end": 117.99, "word": " One", "probability": 0.9169921875}, {"start": 117.99, "end": 118.13, "word": " is", "probability": 0.93212890625}, {"start": 118.13, "end": 118.39, "word": " called", "probability": 0.84033203125}, {"start": 118.39, "end": 118.55, "word": " the", "probability": 0.86083984375}, {"start": 118.55, "end": 118.91, "word": " population", "probability": 0.89013671875}, {"start": 118.91, "end": 119.25, "word": " mean,", "probability": 0.97705078125}, {"start": 119.97, "end": 120.13, "word": " and", "probability": 0.9326171875}, {"start": 120.13, "end": 120.27, "word": " the", "probability": 0.90869140625}, {"start": 120.27, "end": 120.47, "word": " point", "probability": 0.96875}, {"start": 120.47, "end": 120.87, "word": " estimate", "probability": 0.87353515625}, {"start": 120.87, "end": 121.37, "word": " is", "probability": 0.94384765625}, {"start": 121.37, "end": 121.55, "word": " the", "probability": 0.92578125}, {"start": 121.55, "end": 121.79, "word": " sample", "probability": 0.88720703125}, {"start": 121.79, "end": 122.05, "word": " mean.", "probability": 0.9658203125}, {"start": 123.07, "end": 123.33, "word": " The", "probability": 0.85498046875}, {"start": 123.33, "end": 123.61, "word": " other", "probability": 0.89453125}, {"start": 123.61, "end": 124.35, "word": " parameter", "probability": 0.94091796875}, {"start": 124.35, "end": 124.57, "word": " is", "probability": 0.935546875}, {"start": 124.57, "end": 124.69, "word": " the", "probability": 0.8974609375}, {"start": 124.69, "end": 125.05, "word": " population", "probability": 0.955078125}, {"start": 125.05, "end": 125.27, "word": " of", "probability": 0.83251953125}, {"start": 125.27, "end": 125.37, "word": " a", "probability": 0.6474609375}, {"start": 125.37, "end": 125.65, "word": " portion", "probability": 0.90869140625}, {"start": 125.65, "end": 125.87, "word": " of", "probability": 0.818359375}, {"start": 125.87, "end": 126.11, "word": " Y,", "probability": 0.845703125}, {"start": 126.53, "end": 126.75, "word": " and", "probability": 0.93505859375}, {"start": 126.75, "end": 126.89, "word": " the", "probability": 0.9150390625}, {"start": 126.89, "end": 127.15, "word": " point", "probability": 0.97265625}, {"start": 127.15, "end": 127.61, "word": " estimate", "probability": 0.9423828125}, {"start": 127.61, "end": 128.89, "word": " is", "probability": 0.9443359375}, {"start": 128.89, "end": 129.31, "word": " P.", "probability": 0.900390625}, {"start": 133.25, "end": 133.77, "word": " So", "probability": 0.94775390625}, {"start": 133.77, "end": 134.13, "word": " again,", "probability": 0.81396484375}], "temperature": 1.0}, {"id": 6, "seek": 15141, "start": 134.61, "end": 151.41, "text": " An interval estimate provides more information about a population characteristic than does a point estimate. Because if you go back a little bit to the previous example, here we calculated the sample mean to be with just one point. So the average of your reagents is 18 years.", "tokens": [1107, 15035, 12539, 6417, 544, 1589, 466, 257, 4415, 16282, 813, 775, 257, 935, 12539, 13, 1436, 498, 291, 352, 646, 257, 707, 857, 281, 264, 3894, 1365, 11, 510, 321, 15598, 264, 6889, 914, 281, 312, 365, 445, 472, 935, 13, 407, 264, 4274, 295, 428, 26949, 791, 307, 2443, 924, 13], "avg_logprob": -0.16550925898331184, "compression_ratio": 1.4972972972972973, "no_speech_prob": 0.0, "words": [{"start": 134.61, "end": 134.99, "word": " An", "probability": 0.828125}, {"start": 134.99, "end": 135.37, "word": " interval", "probability": 0.94873046875}, {"start": 135.37, "end": 135.89, "word": " estimate", "probability": 0.85009765625}, {"start": 135.89, "end": 136.51, "word": " provides", "probability": 0.86474609375}, {"start": 136.51, "end": 136.83, "word": " more", "probability": 0.92822265625}, {"start": 136.83, "end": 137.47, "word": " information", "probability": 0.84375}, {"start": 137.47, "end": 138.09, "word": " about", "probability": 0.8935546875}, {"start": 138.09, "end": 138.83, "word": " a", "probability": 0.92138671875}, {"start": 138.83, "end": 139.35, "word": " population", "probability": 0.97119140625}, {"start": 139.35, "end": 140.15, "word": " characteristic", "probability": 0.90478515625}, {"start": 140.15, "end": 140.49, "word": " than", "probability": 0.93505859375}, {"start": 140.49, "end": 140.75, "word": " does", "probability": 0.908203125}, {"start": 140.75, "end": 140.87, "word": " a", "probability": 0.96923828125}, {"start": 140.87, "end": 141.07, "word": " point", "probability": 0.97705078125}, {"start": 141.07, "end": 141.59, "word": " estimate.", "probability": 0.9072265625}, {"start": 142.13, "end": 142.47, "word": " Because", "probability": 0.861328125}, {"start": 142.47, "end": 142.61, "word": " if", "probability": 0.8154296875}, {"start": 142.61, "end": 142.73, "word": " you", "probability": 0.88720703125}, {"start": 142.73, "end": 142.87, "word": " go", "probability": 0.9443359375}, {"start": 142.87, "end": 143.09, "word": " back", "probability": 0.87841796875}, {"start": 143.09, "end": 143.21, "word": " a", "probability": 0.75927734375}, {"start": 143.21, "end": 143.33, "word": " little", "probability": 0.85107421875}, {"start": 143.33, "end": 143.55, "word": " bit", "probability": 0.93017578125}, {"start": 143.55, "end": 143.65, "word": " to", "probability": 0.9443359375}, {"start": 143.65, "end": 143.79, "word": " the", "probability": 0.9140625}, {"start": 143.79, "end": 144.05, "word": " previous", "probability": 0.84814453125}, {"start": 144.05, "end": 144.61, "word": " example,", "probability": 0.9716796875}, {"start": 145.05, "end": 145.25, "word": " here", "probability": 0.8525390625}, {"start": 145.25, "end": 145.51, "word": " we", "probability": 0.92626953125}, {"start": 145.51, "end": 146.95, "word": " calculated", "probability": 0.92578125}, {"start": 146.95, "end": 147.21, "word": " the", "probability": 0.7841796875}, {"start": 147.21, "end": 147.45, "word": " sample", "probability": 0.92578125}, {"start": 147.45, "end": 147.77, "word": " mean", "probability": 0.9345703125}, {"start": 147.77, "end": 147.97, "word": " to", "probability": 0.9541015625}, {"start": 147.97, "end": 148.09, "word": " be", "probability": 0.94970703125}, {"start": 148.09, "end": 148.27, "word": " with", "probability": 0.6279296875}, {"start": 148.27, "end": 148.45, "word": " just", "probability": 0.90869140625}, {"start": 148.45, "end": 148.65, "word": " one", "probability": 0.8720703125}, {"start": 148.65, "end": 149.01, "word": " point.", "probability": 0.9765625}, {"start": 149.37, "end": 149.53, "word": " So", "probability": 0.900390625}, {"start": 149.53, "end": 149.65, "word": " the", "probability": 0.7392578125}, {"start": 149.65, "end": 149.93, "word": " average", "probability": 0.79150390625}, {"start": 149.93, "end": 150.09, "word": " of", "probability": 0.25732421875}, {"start": 150.09, "end": 150.19, "word": " your", "probability": 0.78173828125}, {"start": 150.19, "end": 150.57, "word": " reagents", "probability": 0.697509765625}, {"start": 150.57, "end": 150.77, "word": " is", "probability": 0.916015625}, {"start": 150.77, "end": 151.05, "word": " 18", "probability": 0.87548828125}, {"start": 151.05, "end": 151.41, "word": " years.", "probability": 0.9384765625}], "temperature": 1.0}, {"id": 7, "seek": 17675, "start": 152.63, "end": 176.75, "text": " But the other one, I am sure that for 95% that your ages range between, for example, 17 and 19. So in this case, we have more information. Such interval estimates are called confidence intervals. And we are going to use this notation, CI. CI stands for confidence interval estimation.", "tokens": [583, 264, 661, 472, 11, 286, 669, 988, 300, 337, 13420, 4, 300, 428, 12357, 3613, 1296, 11, 337, 1365, 11, 3282, 293, 1294, 13, 407, 294, 341, 1389, 11, 321, 362, 544, 1589, 13, 9653, 15035, 20561, 366, 1219, 6687, 26651, 13, 400, 321, 366, 516, 281, 764, 341, 24657, 11, 37777, 13, 37777, 7382, 337, 6687, 15035, 35701, 13], "avg_logprob": -0.1370967720304766, "compression_ratio": 1.5159574468085106, "no_speech_prob": 0.0, "words": [{"start": 152.63, "end": 152.89, "word": " But", "probability": 0.84521484375}, {"start": 152.89, "end": 153.07, "word": " the", "probability": 0.83642578125}, {"start": 153.07, "end": 153.31, "word": " other", "probability": 0.89013671875}, {"start": 153.31, "end": 153.59, "word": " one,", "probability": 0.92626953125}, {"start": 153.73, "end": 153.81, "word": " I", "probability": 0.990234375}, {"start": 153.81, "end": 153.95, "word": " am", "probability": 0.830078125}, {"start": 153.95, "end": 154.15, "word": " sure", "probability": 0.919921875}, {"start": 154.15, "end": 154.41, "word": " that", "probability": 0.89111328125}, {"start": 154.41, "end": 155.69, "word": " for", "probability": 0.71240234375}, {"start": 155.69, "end": 156.13, "word": " 95", "probability": 0.92041015625}, {"start": 156.13, "end": 156.59, "word": "%", "probability": 0.7080078125}, {"start": 156.59, "end": 156.85, "word": " that", "probability": 0.75732421875}, {"start": 156.85, "end": 157.05, "word": " your", "probability": 0.87255859375}, {"start": 157.05, "end": 157.51, "word": " ages", "probability": 0.87548828125}, {"start": 157.51, "end": 158.37, "word": " range", "probability": 0.82421875}, {"start": 158.37, "end": 158.87, "word": " between,", "probability": 0.8603515625}, {"start": 159.67, "end": 159.83, "word": " for", "probability": 0.9384765625}, {"start": 159.83, "end": 160.15, "word": " example,", "probability": 0.97705078125}, {"start": 160.23, "end": 160.55, "word": " 17", "probability": 0.85205078125}, {"start": 160.55, "end": 160.71, "word": " and", "probability": 0.90966796875}, {"start": 160.71, "end": 161.07, "word": " 19.", "probability": 0.9775390625}, {"start": 161.41, "end": 161.63, "word": " So", "probability": 0.94677734375}, {"start": 161.63, "end": 161.73, "word": " in", "probability": 0.8017578125}, {"start": 161.73, "end": 161.89, "word": " this", "probability": 0.9453125}, {"start": 161.89, "end": 162.11, "word": " case,", "probability": 0.912109375}, {"start": 162.15, "end": 162.27, "word": " we", "probability": 0.9609375}, {"start": 162.27, "end": 162.47, "word": " have", "probability": 0.943359375}, {"start": 162.47, "end": 162.73, "word": " more", "probability": 0.9326171875}, {"start": 162.73, "end": 163.23, "word": " information.", "probability": 0.837890625}, {"start": 163.87, "end": 164.15, "word": " Such", "probability": 0.8798828125}, {"start": 164.15, "end": 164.61, "word": " interval", "probability": 0.87255859375}, {"start": 164.61, "end": 164.99, "word": " estimates", "probability": 0.86865234375}, {"start": 164.99, "end": 165.37, "word": " are", "probability": 0.94091796875}, {"start": 165.37, "end": 165.69, "word": " called", "probability": 0.888671875}, {"start": 165.69, "end": 166.19, "word": " confidence", "probability": 0.9521484375}, {"start": 166.19, "end": 166.87, "word": " intervals.", "probability": 0.8798828125}, {"start": 167.45, "end": 167.77, "word": " And", "probability": 0.9384765625}, {"start": 167.77, "end": 167.87, "word": " we", "probability": 0.93408203125}, {"start": 167.87, "end": 167.99, "word": " are", "probability": 0.9208984375}, {"start": 167.99, "end": 168.27, "word": " going", "probability": 0.943359375}, {"start": 168.27, "end": 168.51, "word": " to", "probability": 0.96728515625}, {"start": 168.51, "end": 168.73, "word": " use", "probability": 0.87158203125}, {"start": 168.73, "end": 169.01, "word": " this", "probability": 0.93408203125}, {"start": 169.01, "end": 169.47, "word": " notation,", "probability": 0.9609375}, {"start": 169.67, "end": 169.89, "word": " CI.", "probability": 0.67626953125}, {"start": 171.35, "end": 171.91, "word": " CI", "probability": 0.97802734375}, {"start": 171.91, "end": 172.59, "word": " stands", "probability": 0.7783203125}, {"start": 172.59, "end": 173.09, "word": " for", "probability": 0.95166015625}, {"start": 173.09, "end": 173.65, "word": " confidence", "probability": 0.83056640625}, {"start": 173.65, "end": 174.83, "word": " interval", "probability": 0.958984375}, {"start": 174.83, "end": 176.75, "word": " estimation.", "probability": 0.95458984375}], "temperature": 1.0}, {"id": 8, "seek": 21119, "start": 182.35, "end": 211.19, "text": " An interval gives a range of values, because here we have a range from, for example, 17 up to 19. In this case, this interval takes into consideration variation in samples that intersects from sample to sample. For example, suppose we have a huge population, and from that huge population, we select many samples of the same size. Maybe the first sample, for example, of size 15.", "tokens": [1107, 15035, 2709, 257, 3613, 295, 4190, 11, 570, 510, 321, 362, 257, 3613, 490, 11, 337, 1365, 11, 3282, 493, 281, 1294, 13, 682, 341, 1389, 11, 341, 15035, 2516, 666, 12381, 12990, 294, 10938, 300, 27815, 82, 490, 6889, 281, 6889, 13, 1171, 1365, 11, 7297, 321, 362, 257, 2603, 4415, 11, 293, 490, 300, 2603, 4415, 11, 321, 3048, 867, 10938, 295, 264, 912, 2744, 13, 2704, 264, 700, 6889, 11, 337, 1365, 11, 295, 2744, 2119, 13], "avg_logprob": -0.151200454591251, "compression_ratio": 1.7757009345794392, "no_speech_prob": 0.0, "words": [{"start": 182.35, "end": 182.65, "word": " An", "probability": 0.7666015625}, {"start": 182.65, "end": 182.99, "word": " interval", "probability": 0.97314453125}, {"start": 182.99, "end": 183.39, "word": " gives", "probability": 0.89404296875}, {"start": 183.39, "end": 183.57, "word": " a", "probability": 0.96630859375}, {"start": 183.57, "end": 183.81, "word": " range", "probability": 0.89697265625}, {"start": 183.81, "end": 183.95, "word": " of", "probability": 0.96728515625}, {"start": 183.95, "end": 184.37, "word": " values,", "probability": 0.96826171875}, {"start": 185.47, "end": 185.63, "word": " because", "probability": 0.822265625}, {"start": 185.63, "end": 185.81, "word": " here", "probability": 0.84033203125}, {"start": 185.81, "end": 185.93, "word": " we", "probability": 0.83935546875}, {"start": 185.93, "end": 186.09, "word": " have", "probability": 0.9462890625}, {"start": 186.09, "end": 186.15, "word": " a", "probability": 0.93701171875}, {"start": 186.15, "end": 186.33, "word": " range", "probability": 0.9013671875}, {"start": 186.33, "end": 186.67, "word": " from,", "probability": 0.8544921875}, {"start": 187.05, "end": 187.11, "word": " for", "probability": 0.94921875}, {"start": 187.11, "end": 187.43, "word": " example,", "probability": 0.9765625}, {"start": 187.49, "end": 187.91, "word": " 17", "probability": 0.77099609375}, {"start": 187.91, "end": 188.15, "word": " up", "probability": 0.96533203125}, {"start": 188.15, "end": 188.37, "word": " to", "probability": 0.97216796875}, {"start": 188.37, "end": 188.95, "word": " 19.", "probability": 0.9677734375}, {"start": 189.59, "end": 189.91, "word": " In", "probability": 0.9541015625}, {"start": 189.91, "end": 190.13, "word": " this", "probability": 0.94482421875}, {"start": 190.13, "end": 190.35, "word": " case,", "probability": 0.91162109375}, {"start": 190.47, "end": 190.67, "word": " this", "probability": 0.9423828125}, {"start": 190.67, "end": 191.09, "word": " interval", "probability": 0.97119140625}, {"start": 191.09, "end": 191.59, "word": " takes", "probability": 0.8310546875}, {"start": 191.59, "end": 191.99, "word": " into", "probability": 0.84375}, {"start": 191.99, "end": 192.75, "word": " consideration", "probability": 0.916015625}, {"start": 192.75, "end": 193.85, "word": " variation", "probability": 0.685546875}, {"start": 193.85, "end": 194.21, "word": " in", "probability": 0.87109375}, {"start": 194.21, "end": 194.51, "word": " samples", "probability": 0.77294921875}, {"start": 194.51, "end": 194.79, "word": " that", "probability": 0.81640625}, {"start": 194.79, "end": 195.39, "word": " intersects", "probability": 0.4893798828125}, {"start": 195.39, "end": 195.57, "word": " from", "probability": 0.88671875}, {"start": 195.57, "end": 195.93, "word": " sample", "probability": 0.8759765625}, {"start": 195.93, "end": 196.15, "word": " to", "probability": 0.9599609375}, {"start": 196.15, "end": 196.47, "word": " sample.", "probability": 0.8828125}, {"start": 197.01, "end": 197.27, "word": " For", "probability": 0.958984375}, {"start": 197.27, "end": 197.61, "word": " example,", "probability": 0.97216796875}, {"start": 198.41, "end": 198.77, "word": " suppose", "probability": 0.8916015625}, {"start": 198.77, "end": 198.93, "word": " we", "probability": 0.94580078125}, {"start": 198.93, "end": 199.11, "word": " have", "probability": 0.93115234375}, {"start": 199.11, "end": 199.35, "word": " a", "probability": 0.9736328125}, {"start": 199.35, "end": 199.61, "word": " huge", "probability": 0.89404296875}, {"start": 199.61, "end": 199.97, "word": " population,", "probability": 0.97021484375}, {"start": 200.35, "end": 200.51, "word": " and", "probability": 0.93115234375}, {"start": 200.51, "end": 200.77, "word": " from", "probability": 0.87744140625}, {"start": 200.77, "end": 201.07, "word": " that", "probability": 0.92724609375}, {"start": 201.07, "end": 201.59, "word": " huge", "probability": 0.89111328125}, {"start": 201.59, "end": 202.07, "word": " population,", "probability": 0.9521484375}, {"start": 202.19, "end": 202.33, "word": " we", "probability": 0.94970703125}, {"start": 202.33, "end": 202.73, "word": " select", "probability": 0.853515625}, {"start": 202.73, "end": 203.81, "word": " many", "probability": 0.904296875}, {"start": 203.81, "end": 204.31, "word": " samples", "probability": 0.9091796875}, {"start": 204.31, "end": 205.01, "word": " of", "probability": 0.95166015625}, {"start": 205.01, "end": 205.19, "word": " the", "probability": 0.91796875}, {"start": 205.19, "end": 205.43, "word": " same", "probability": 0.90380859375}, {"start": 205.43, "end": 205.87, "word": " size.", "probability": 0.8466796875}, {"start": 206.79, "end": 207.17, "word": " Maybe", "probability": 0.9443359375}, {"start": 207.17, "end": 207.43, "word": " the", "probability": 0.69140625}, {"start": 207.43, "end": 207.95, "word": " first", "probability": 0.83056640625}, {"start": 207.95, "end": 208.45, "word": " sample,", "probability": 0.89501953125}, {"start": 209.27, "end": 209.53, "word": " for", "probability": 0.94970703125}, {"start": 209.53, "end": 209.87, "word": " example,", "probability": 0.97607421875}, {"start": 209.91, "end": 210.05, "word": " of", "probability": 0.9375}, {"start": 210.05, "end": 210.35, "word": " size", "probability": 0.86865234375}, {"start": 210.35, "end": 211.19, "word": " 15.", "probability": 0.57080078125}], "temperature": 1.0}, {"id": 9, "seek": 23945, "start": 212.37, "end": 239.45, "text": " gives average of 17. Maybe another sample of the same size might give different sample mean, 18 and so on. Suppose we select 100 samples. So here we have different sample means. For sure we should have different", "tokens": [2709, 4274, 295, 3282, 13, 2704, 1071, 6889, 295, 264, 912, 2744, 1062, 976, 819, 6889, 914, 11, 2443, 293, 370, 322, 13, 21360, 321, 3048, 2319, 10938, 13, 407, 510, 321, 362, 819, 6889, 1355, 13, 1171, 988, 321, 820, 362, 819], "avg_logprob": -0.2668678902766921, "compression_ratio": 1.525179856115108, "no_speech_prob": 0.0, "words": [{"start": 212.37, "end": 212.79, "word": " gives", "probability": 0.52880859375}, {"start": 212.79, "end": 213.41, "word": " average", "probability": 0.560546875}, {"start": 213.41, "end": 215.61, "word": " of", "probability": 0.923828125}, {"start": 215.61, "end": 217.17, "word": " 17.", "probability": 0.74365234375}, {"start": 218.93, "end": 219.73, "word": " Maybe", "probability": 0.73046875}, {"start": 219.73, "end": 220.29, "word": " another", "probability": 0.8857421875}, {"start": 220.29, "end": 220.75, "word": " sample", "probability": 0.83447265625}, {"start": 220.75, "end": 221.03, "word": " of", "probability": 0.9228515625}, {"start": 221.03, "end": 221.27, "word": " the", "probability": 0.57080078125}, {"start": 221.27, "end": 221.49, "word": " same", "probability": 0.90869140625}, {"start": 221.49, "end": 222.05, "word": " size", "probability": 0.84326171875}, {"start": 222.05, "end": 223.15, "word": " might", "probability": 0.869140625}, {"start": 223.15, "end": 223.47, "word": " give", "probability": 0.8486328125}, {"start": 223.47, "end": 224.61, "word": " different", "probability": 0.81591796875}, {"start": 224.61, "end": 226.51, "word": " sample", "probability": 0.84423828125}, {"start": 226.51, "end": 226.85, "word": " mean,", "probability": 0.55126953125}, {"start": 227.17, "end": 227.49, "word": " 18", "probability": 0.78759765625}, {"start": 227.49, "end": 227.69, "word": " and", "probability": 0.83203125}, {"start": 227.69, "end": 227.91, "word": " so", "probability": 0.962890625}, {"start": 227.91, "end": 228.09, "word": " on.", "probability": 0.94140625}, {"start": 228.67, "end": 229.03, "word": " Suppose", "probability": 0.77685546875}, {"start": 229.03, "end": 229.33, "word": " we", "probability": 0.93505859375}, {"start": 229.33, "end": 230.01, "word": " select", "probability": 0.8583984375}, {"start": 230.01, "end": 230.43, "word": " 100", "probability": 0.8408203125}, {"start": 230.43, "end": 230.89, "word": " samples.", "probability": 0.50439453125}, {"start": 232.89, "end": 233.13, "word": " So", "probability": 0.78515625}, {"start": 233.13, "end": 233.37, "word": " here", "probability": 0.68896484375}, {"start": 233.37, "end": 233.91, "word": " we", "probability": 0.59912109375}, {"start": 233.91, "end": 234.11, "word": " have", "probability": 0.947265625}, {"start": 234.11, "end": 234.77, "word": " different", "probability": 0.88916015625}, {"start": 234.77, "end": 236.51, "word": " sample", "probability": 0.89404296875}, {"start": 236.51, "end": 236.85, "word": " means.", "probability": 0.900390625}, {"start": 237.03, "end": 237.21, "word": " For", "probability": 0.9580078125}, {"start": 237.21, "end": 237.53, "word": " sure", "probability": 0.91943359375}, {"start": 237.53, "end": 237.77, "word": " we", "probability": 0.56103515625}, {"start": 237.77, "end": 238.03, "word": " should", "probability": 0.96484375}, {"start": 238.03, "end": 238.43, "word": " have", "probability": 0.943359375}, {"start": 238.43, "end": 239.45, "word": " different", "probability": 0.88916015625}], "temperature": 1.0}, {"id": 10, "seek": 26426, "start": 241.14, "end": 264.26, "text": " confidence intervals. Maybe the first one starts from 16 to 18, the other one maybe 15 to 17 and so on. So the confidence interval takes into consideration variation in sample statistics from sample to sample. But the confidence interval we have to construct based on", "tokens": [6687, 26651, 13, 2704, 264, 700, 472, 3719, 490, 3165, 281, 2443, 11, 264, 661, 472, 1310, 2119, 281, 3282, 293, 370, 322, 13, 407, 264, 6687, 15035, 2516, 666, 12381, 12990, 294, 6889, 12523, 490, 6889, 281, 6889, 13, 583, 264, 6687, 15035, 321, 362, 281, 7690, 2361, 322], "avg_logprob": -0.16957720120747885, "compression_ratio": 1.6962025316455696, "no_speech_prob": 0.0, "words": [{"start": 241.14, "end": 241.64, "word": " confidence", "probability": 0.429931640625}, {"start": 241.64, "end": 242.2, "word": " intervals.", "probability": 0.6708984375}, {"start": 244.58, "end": 245.32, "word": " Maybe", "probability": 0.9140625}, {"start": 245.32, "end": 245.52, "word": " the", "probability": 0.90625}, {"start": 245.52, "end": 245.78, "word": " first", "probability": 0.8505859375}, {"start": 245.78, "end": 246.02, "word": " one", "probability": 0.93017578125}, {"start": 246.02, "end": 246.58, "word": " starts", "probability": 0.86328125}, {"start": 246.58, "end": 247.28, "word": " from", "probability": 0.89404296875}, {"start": 247.28, "end": 247.76, "word": " 16", "probability": 0.8076171875}, {"start": 247.76, "end": 247.94, "word": " to", "probability": 0.94384765625}, {"start": 247.94, "end": 248.16, "word": " 18,", "probability": 0.92529296875}, {"start": 248.3, "end": 248.4, "word": " the", "probability": 0.80615234375}, {"start": 248.4, "end": 248.62, "word": " other", "probability": 0.8876953125}, {"start": 248.62, "end": 248.82, "word": " one", "probability": 0.89501953125}, {"start": 248.82, "end": 249.02, "word": " maybe", "probability": 0.77490234375}, {"start": 249.02, "end": 249.5, "word": " 15", "probability": 0.9287109375}, {"start": 249.5, "end": 249.68, "word": " to", "probability": 0.96728515625}, {"start": 249.68, "end": 250.04, "word": " 17", "probability": 0.962890625}, {"start": 250.04, "end": 250.3, "word": " and", "probability": 0.56005859375}, {"start": 250.3, "end": 250.5, "word": " so", "probability": 0.9482421875}, {"start": 250.5, "end": 250.68, "word": " on.", "probability": 0.94580078125}, {"start": 251.24, "end": 251.54, "word": " So", "probability": 0.92041015625}, {"start": 251.54, "end": 252.46, "word": " the", "probability": 0.61962890625}, {"start": 252.46, "end": 252.94, "word": " confidence", "probability": 0.9755859375}, {"start": 252.94, "end": 253.4, "word": " interval", "probability": 0.9638671875}, {"start": 253.4, "end": 253.98, "word": " takes", "probability": 0.83447265625}, {"start": 253.98, "end": 254.32, "word": " into", "probability": 0.84814453125}, {"start": 254.32, "end": 254.9, "word": " consideration", "probability": 0.93212890625}, {"start": 254.9, "end": 255.52, "word": " variation", "probability": 0.55078125}, {"start": 255.52, "end": 255.86, "word": " in", "probability": 0.5380859375}, {"start": 255.86, "end": 256.14, "word": " sample", "probability": 0.8095703125}, {"start": 256.14, "end": 256.72, "word": " statistics", "probability": 0.89453125}, {"start": 256.72, "end": 257.42, "word": " from", "probability": 0.82763671875}, {"start": 257.42, "end": 257.76, "word": " sample", "probability": 0.8916015625}, {"start": 257.76, "end": 257.98, "word": " to", "probability": 0.962890625}, {"start": 257.98, "end": 258.24, "word": " sample.", "probability": 0.89111328125}, {"start": 259.28, "end": 259.5, "word": " But", "probability": 0.9462890625}, {"start": 259.5, "end": 259.68, "word": " the", "probability": 0.8974609375}, {"start": 259.68, "end": 260.06, "word": " confidence", "probability": 0.9755859375}, {"start": 260.06, "end": 260.56, "word": " interval", "probability": 0.96435546875}, {"start": 260.56, "end": 260.82, "word": " we", "probability": 0.9140625}, {"start": 260.82, "end": 261.28, "word": " have", "probability": 0.94970703125}, {"start": 261.28, "end": 261.92, "word": " to", "probability": 0.97119140625}, {"start": 261.92, "end": 262.72, "word": " construct", "probability": 0.978515625}, {"start": 262.72, "end": 263.72, "word": " based", "probability": 0.89794921875}, {"start": 263.72, "end": 264.26, "word": " on", "probability": 0.94873046875}], "temperature": 1.0}, {"id": 11, "seek": 29483, "start": 265.5, "end": 294.84, "text": " observations just from one sample. I mean, you don't need to select too many random samples in order to construct a confidence interval. Just select one sample, one random sample, and from that sample, we can construct a confidence interval. So my confidence interval can be constructed by using only one simple sample. From that sample, we can compute the average.", "tokens": [18163, 445, 490, 472, 6889, 13, 286, 914, 11, 291, 500, 380, 643, 281, 3048, 886, 867, 4974, 10938, 294, 1668, 281, 7690, 257, 6687, 15035, 13, 1449, 3048, 472, 6889, 11, 472, 4974, 6889, 11, 293, 490, 300, 6889, 11, 321, 393, 7690, 257, 6687, 15035, 13, 407, 452, 6687, 15035, 393, 312, 17083, 538, 1228, 787, 472, 2199, 6889, 13, 3358, 300, 6889, 11, 321, 393, 14722, 264, 4274, 13], "avg_logprob": -0.13281249673399206, "compression_ratio": 2.0, "no_speech_prob": 0.0, "words": [{"start": 265.5, "end": 266.22, "word": " observations", "probability": 0.6396484375}, {"start": 266.22, "end": 266.58, "word": " just", "probability": 0.8603515625}, {"start": 266.58, "end": 267.02, "word": " from", "probability": 0.8798828125}, {"start": 267.02, "end": 267.44, "word": " one", "probability": 0.9189453125}, {"start": 267.44, "end": 267.78, "word": " sample.", "probability": 0.7861328125}, {"start": 268.08, "end": 268.28, "word": " I", "probability": 0.919921875}, {"start": 268.28, "end": 268.44, "word": " mean,", "probability": 0.9609375}, {"start": 268.52, "end": 268.6, "word": " you", "probability": 0.900390625}, {"start": 268.6, "end": 268.88, "word": " don't", "probability": 0.966552734375}, {"start": 268.88, "end": 269.36, "word": " need", "probability": 0.9267578125}, {"start": 269.36, "end": 269.76, "word": " to", "probability": 0.97119140625}, {"start": 269.76, "end": 270.68, "word": " select", "probability": 0.73291015625}, {"start": 270.68, "end": 270.94, "word": " too", "probability": 0.931640625}, {"start": 270.94, "end": 271.14, "word": " many", "probability": 0.9072265625}, {"start": 271.14, "end": 271.48, "word": " random", "probability": 0.8583984375}, {"start": 271.48, "end": 271.96, "word": " samples", "probability": 0.88720703125}, {"start": 271.96, "end": 272.58, "word": " in", "probability": 0.916015625}, {"start": 272.58, "end": 272.8, "word": " order", "probability": 0.91650390625}, {"start": 272.8, "end": 273.14, "word": " to", "probability": 0.96923828125}, {"start": 273.14, "end": 273.74, "word": " construct", "probability": 0.9775390625}, {"start": 273.74, "end": 273.92, "word": " a", "probability": 0.69287109375}, {"start": 273.92, "end": 274.3, "word": " confidence", "probability": 0.97265625}, {"start": 274.3, "end": 274.72, "word": " interval.", "probability": 0.96875}, {"start": 274.98, "end": 275.52, "word": " Just", "probability": 0.88037109375}, {"start": 275.52, "end": 275.94, "word": " select", "probability": 0.8671875}, {"start": 275.94, "end": 276.2, "word": " one", "probability": 0.9365234375}, {"start": 276.2, "end": 276.58, "word": " sample,", "probability": 0.8955078125}, {"start": 276.76, "end": 276.92, "word": " one", "probability": 0.93310546875}, {"start": 276.92, "end": 277.28, "word": " random", "probability": 0.84765625}, {"start": 277.28, "end": 277.7, "word": " sample,", "probability": 0.880859375}, {"start": 278.12, "end": 278.36, "word": " and", "probability": 0.93603515625}, {"start": 278.36, "end": 278.66, "word": " from", "probability": 0.8759765625}, {"start": 278.66, "end": 278.92, "word": " that", "probability": 0.93359375}, {"start": 278.92, "end": 279.2, "word": " sample,", "probability": 0.88916015625}, {"start": 279.32, "end": 279.44, "word": " we", "probability": 0.9501953125}, {"start": 279.44, "end": 279.8, "word": " can", "probability": 0.93896484375}, {"start": 279.8, "end": 281.8, "word": " construct", "probability": 0.97705078125}, {"start": 281.8, "end": 282.0, "word": " a", "probability": 0.85009765625}, {"start": 282.0, "end": 282.44, "word": " confidence", "probability": 0.97607421875}, {"start": 282.44, "end": 282.84, "word": " interval.", "probability": 0.9482421875}, {"start": 283.36, "end": 283.68, "word": " So", "probability": 0.93310546875}, {"start": 283.68, "end": 284.16, "word": " my", "probability": 0.666015625}, {"start": 284.16, "end": 284.58, "word": " confidence", "probability": 0.982421875}, {"start": 284.58, "end": 285.06, "word": " interval", "probability": 0.962890625}, {"start": 285.06, "end": 286.32, "word": " can", "probability": 0.9404296875}, {"start": 286.32, "end": 286.54, "word": " be", "probability": 0.95654296875}, {"start": 286.54, "end": 287.14, "word": " constructed", "probability": 0.93701171875}, {"start": 287.14, "end": 287.5, "word": " by", "probability": 0.966796875}, {"start": 287.5, "end": 287.98, "word": " using", "probability": 0.93701171875}, {"start": 287.98, "end": 289.1, "word": " only", "probability": 0.92529296875}, {"start": 289.1, "end": 290.12, "word": " one", "probability": 0.93603515625}, {"start": 290.12, "end": 291.04, "word": " simple", "probability": 0.474365234375}, {"start": 291.04, "end": 291.86, "word": " sample.", "probability": 0.87841796875}, {"start": 292.32, "end": 292.5, "word": " From", "probability": 0.7763671875}, {"start": 292.5, "end": 292.72, "word": " that", "probability": 0.87744140625}, {"start": 292.72, "end": 292.96, "word": " sample,", "probability": 0.8447265625}, {"start": 293.02, "end": 293.18, "word": " we", "probability": 0.9560546875}, {"start": 293.18, "end": 293.38, "word": " can", "probability": 0.94287109375}, {"start": 293.38, "end": 293.86, "word": " compute", "probability": 0.9296875}, {"start": 293.86, "end": 294.5, "word": " the", "probability": 0.91748046875}, {"start": 294.5, "end": 294.84, "word": " average.", "probability": 0.76611328125}], "temperature": 1.0}, {"id": 12, "seek": 32458, "start": 296.73, "end": 324.59, "text": " Now, this interval gives information about closeness to unknown population parameters. For example, when we are saying weights range between, for example, 60 kilogram up to 80, and you are sure about 90%. So that means the average weight range between 60 to 80.", "tokens": [823, 11, 341, 15035, 2709, 1589, 466, 2611, 15264, 281, 9841, 4415, 9834, 13, 1171, 1365, 11, 562, 321, 366, 1566, 17443, 3613, 1296, 11, 337, 1365, 11, 4060, 21741, 493, 281, 4688, 11, 293, 291, 366, 988, 466, 4289, 6856, 407, 300, 1355, 264, 4274, 3364, 3613, 1296, 4060, 281, 4688, 13], "avg_logprob": -0.17534721615137877, "compression_ratio": 1.4802259887005649, "no_speech_prob": 0.0, "words": [{"start": 296.73, "end": 297.07, "word": " Now,", "probability": 0.9208984375}, {"start": 297.33, "end": 297.67, "word": " this", "probability": 0.9375}, {"start": 297.67, "end": 298.09, "word": " interval", "probability": 0.978515625}, {"start": 298.09, "end": 299.13, "word": " gives", "probability": 0.8955078125}, {"start": 299.13, "end": 299.77, "word": " information", "probability": 0.8310546875}, {"start": 299.77, "end": 300.35, "word": " about", "probability": 0.90625}, {"start": 300.35, "end": 301.25, "word": " closeness", "probability": 0.935791015625}, {"start": 301.25, "end": 301.59, "word": " to", "probability": 0.96533203125}, {"start": 301.59, "end": 301.99, "word": " unknown", "probability": 0.88427734375}, {"start": 301.99, "end": 302.45, "word": " population", "probability": 0.97265625}, {"start": 302.45, "end": 302.99, "word": " parameters.", "probability": 0.96923828125}, {"start": 303.49, "end": 303.63, "word": " For", "probability": 0.95947265625}, {"start": 303.63, "end": 304.01, "word": " example,", "probability": 0.97265625}, {"start": 304.45, "end": 304.63, "word": " when", "probability": 0.9296875}, {"start": 304.63, "end": 304.75, "word": " we", "probability": 0.91943359375}, {"start": 304.75, "end": 304.89, "word": " are", "probability": 0.93505859375}, {"start": 304.89, "end": 305.33, "word": " saying", "probability": 0.59521484375}, {"start": 305.33, "end": 306.99, "word": " weights", "probability": 0.64404296875}, {"start": 306.99, "end": 308.53, "word": " range", "probability": 0.84423828125}, {"start": 308.53, "end": 309.15, "word": " between,", "probability": 0.84375}, {"start": 309.33, "end": 309.65, "word": " for", "probability": 0.9560546875}, {"start": 309.65, "end": 310.07, "word": " example,", "probability": 0.97314453125}, {"start": 310.33, "end": 310.77, "word": " 60", "probability": 0.90185546875}, {"start": 310.77, "end": 311.15, "word": " kilogram", "probability": 0.37548828125}, {"start": 311.15, "end": 311.51, "word": " up", "probability": 0.95654296875}, {"start": 311.51, "end": 311.67, "word": " to", "probability": 0.96826171875}, {"start": 311.67, "end": 311.97, "word": " 80,", "probability": 0.97265625}, {"start": 312.89, "end": 313.65, "word": " and", "probability": 0.9365234375}, {"start": 313.65, "end": 313.83, "word": " you", "probability": 0.958984375}, {"start": 313.83, "end": 314.09, "word": " are", "probability": 0.9248046875}, {"start": 314.09, "end": 314.47, "word": " sure", "probability": 0.9072265625}, {"start": 314.47, "end": 314.77, "word": " about", "probability": 0.876953125}, {"start": 314.77, "end": 315.73, "word": " 90%.", "probability": 0.70263671875}, {"start": 315.73, "end": 317.27, "word": " So", "probability": 0.9443359375}, {"start": 317.27, "end": 317.53, "word": " that", "probability": 0.75}, {"start": 317.53, "end": 317.95, "word": " means", "probability": 0.93115234375}, {"start": 317.95, "end": 318.99, "word": " the", "probability": 0.77783203125}, {"start": 318.99, "end": 319.53, "word": " average", "probability": 0.77587890625}, {"start": 319.53, "end": 320.75, "word": " weight", "probability": 0.896484375}, {"start": 320.75, "end": 323.43, "word": " range", "probability": 0.4990234375}, {"start": 323.43, "end": 323.71, "word": " between", "probability": 0.75}, {"start": 323.71, "end": 324.07, "word": " 60", "probability": 0.79931640625}, {"start": 324.07, "end": 324.29, "word": " to", "probability": 0.90185546875}, {"start": 324.29, "end": 324.59, "word": " 80.", "probability": 0.9814453125}], "temperature": 1.0}, {"id": 13, "seek": 35345, "start": 326.01, "end": 353.45, "text": " That means the true parameter mu is close to these values. So it's between 6 to 80. Now suppose in this case I am interested in 95%. Now suppose someone else interested in 99% confidence. So do you think the confidence interval for more confident", "tokens": [663, 1355, 264, 2074, 13075, 2992, 307, 1998, 281, 613, 4190, 13, 407, 309, 311, 1296, 1386, 281, 4688, 13, 823, 7297, 294, 341, 1389, 286, 669, 3102, 294, 13420, 6856, 823, 7297, 1580, 1646, 3102, 294, 11803, 4, 6687, 13, 407, 360, 291, 519, 264, 6687, 15035, 337, 544, 6679], "avg_logprob": -0.18344351649284363, "compression_ratio": 1.5534591194968554, "no_speech_prob": 0.0, "words": [{"start": 326.01, "end": 326.39, "word": " That", "probability": 0.7392578125}, {"start": 326.39, "end": 326.75, "word": " means", "probability": 0.92578125}, {"start": 326.75, "end": 327.17, "word": " the", "probability": 0.78564453125}, {"start": 327.17, "end": 327.43, "word": " true", "probability": 0.90478515625}, {"start": 327.43, "end": 327.87, "word": " parameter", "probability": 0.974609375}, {"start": 327.87, "end": 328.31, "word": " mu", "probability": 0.39208984375}, {"start": 328.31, "end": 330.07, "word": " is", "probability": 0.853515625}, {"start": 330.07, "end": 330.63, "word": " close", "probability": 0.76611328125}, {"start": 330.63, "end": 332.69, "word": " to", "probability": 0.93994140625}, {"start": 332.69, "end": 332.91, "word": " these", "probability": 0.78369140625}, {"start": 332.91, "end": 333.27, "word": " values.", "probability": 0.96142578125}, {"start": 333.65, "end": 333.85, "word": " So", "probability": 0.93798828125}, {"start": 333.85, "end": 334.19, "word": " it's", "probability": 0.87744140625}, {"start": 334.19, "end": 334.55, "word": " between", "probability": 0.88330078125}, {"start": 334.55, "end": 334.91, "word": " 6", "probability": 0.63427734375}, {"start": 334.91, "end": 335.09, "word": " to", "probability": 0.8388671875}, {"start": 335.09, "end": 335.33, "word": " 80.", "probability": 0.55126953125}, {"start": 335.63, "end": 335.85, "word": " Now", "probability": 0.93994140625}, {"start": 335.85, "end": 336.17, "word": " suppose", "probability": 0.6484375}, {"start": 336.17, "end": 336.31, "word": " in", "probability": 0.7490234375}, {"start": 336.31, "end": 336.47, "word": " this", "probability": 0.9462890625}, {"start": 336.47, "end": 336.67, "word": " case", "probability": 0.90869140625}, {"start": 336.67, "end": 336.81, "word": " I", "probability": 0.74365234375}, {"start": 336.81, "end": 336.93, "word": " am", "probability": 0.8095703125}, {"start": 336.93, "end": 337.41, "word": " interested", "probability": 0.8818359375}, {"start": 337.41, "end": 338.05, "word": " in", "probability": 0.953125}, {"start": 338.05, "end": 340.75, "word": " 95%.", "probability": 0.829345703125}, {"start": 340.75, "end": 343.43, "word": " Now", "probability": 0.62890625}, {"start": 343.43, "end": 343.99, "word": " suppose", "probability": 0.81201171875}, {"start": 343.99, "end": 344.55, "word": " someone", "probability": 0.93701171875}, {"start": 344.55, "end": 345.57, "word": " else", "probability": 0.927734375}, {"start": 345.57, "end": 346.39, "word": " interested", "probability": 0.77685546875}, {"start": 346.39, "end": 346.73, "word": " in", "probability": 0.9140625}, {"start": 346.73, "end": 347.23, "word": " 99", "probability": 0.97607421875}, {"start": 347.23, "end": 347.69, "word": "%", "probability": 0.9599609375}, {"start": 347.69, "end": 348.11, "word": " confidence.", "probability": 0.73291015625}, {"start": 349.37, "end": 349.61, "word": " So", "probability": 0.93603515625}, {"start": 349.61, "end": 349.79, "word": " do", "probability": 0.89599609375}, {"start": 349.79, "end": 349.93, "word": " you", "probability": 0.96240234375}, {"start": 349.93, "end": 350.21, "word": " think", "probability": 0.916015625}, {"start": 350.21, "end": 350.57, "word": " the", "probability": 0.8779296875}, {"start": 350.57, "end": 351.09, "word": " confidence", "probability": 0.98193359375}, {"start": 351.09, "end": 351.63, "word": " interval", "probability": 0.97314453125}, {"start": 351.63, "end": 352.19, "word": " for", "probability": 0.9365234375}, {"start": 352.19, "end": 352.69, "word": " more", "probability": 0.93505859375}, {"start": 352.69, "end": 353.45, "word": " confident", "probability": 0.9423828125}], "temperature": 1.0}, {"id": 14, "seek": 38009, "start": 354.19, "end": 380.09, "text": " to be wider or narrower. As the confidence gets larger or bigger, the interval gets wider. So maybe from 55 up to 85. Another example, suppose we are interested in the age of students.", "tokens": [281, 312, 11842, 420, 46751, 13, 1018, 264, 6687, 2170, 4833, 420, 3801, 11, 264, 15035, 2170, 11842, 13, 407, 1310, 490, 12330, 493, 281, 14695, 13, 3996, 1365, 11, 7297, 321, 366, 3102, 294, 264, 3205, 295, 1731, 13], "avg_logprob": -0.18092606125808344, "compression_ratio": 1.3703703703703705, "no_speech_prob": 0.0, "words": [{"start": 354.19, "end": 354.41, "word": " to", "probability": 0.427001953125}, {"start": 354.41, "end": 354.67, "word": " be", "probability": 0.94189453125}, {"start": 354.67, "end": 355.17, "word": " wider", "probability": 0.923828125}, {"start": 355.17, "end": 355.59, "word": " or", "probability": 0.9580078125}, {"start": 355.59, "end": 355.91, "word": " narrower.", "probability": 0.6015625}, {"start": 357.61, "end": 358.11, "word": " As", "probability": 0.89990234375}, {"start": 358.11, "end": 358.81, "word": " the", "probability": 0.6181640625}, {"start": 358.81, "end": 359.43, "word": " confidence", "probability": 0.97802734375}, {"start": 359.43, "end": 360.87, "word": " gets", "probability": 0.8408203125}, {"start": 360.87, "end": 361.39, "word": " larger", "probability": 0.90576171875}, {"start": 361.39, "end": 361.77, "word": " or", "probability": 0.87109375}, {"start": 361.77, "end": 362.09, "word": " bigger,", "probability": 0.94921875}, {"start": 363.21, "end": 363.49, "word": " the", "probability": 0.90966796875}, {"start": 363.49, "end": 363.89, "word": " interval", "probability": 0.970703125}, {"start": 363.89, "end": 364.41, "word": " gets", "probability": 0.87451171875}, {"start": 364.41, "end": 365.41, "word": " wider.", "probability": 0.90283203125}, {"start": 365.79, "end": 366.01, "word": " So", "probability": 0.8984375}, {"start": 366.01, "end": 366.73, "word": " maybe", "probability": 0.6728515625}, {"start": 366.73, "end": 367.67, "word": " from", "probability": 0.857421875}, {"start": 367.67, "end": 368.23, "word": " 55", "probability": 0.84912109375}, {"start": 368.23, "end": 368.65, "word": " up", "probability": 0.95654296875}, {"start": 368.65, "end": 368.79, "word": " to", "probability": 0.9609375}, {"start": 368.79, "end": 369.13, "word": " 85.", "probability": 0.974609375}, {"start": 371.13, "end": 372.01, "word": " Another", "probability": 0.89453125}, {"start": 372.01, "end": 372.39, "word": " example,", "probability": 0.97216796875}, {"start": 372.51, "end": 372.93, "word": " suppose", "probability": 0.8984375}, {"start": 372.93, "end": 375.01, "word": " we", "probability": 0.8720703125}, {"start": 375.01, "end": 375.37, "word": " are", "probability": 0.94384765625}, {"start": 375.37, "end": 377.29, "word": " interested", "probability": 0.86669921875}, {"start": 377.29, "end": 377.75, "word": " in", "probability": 0.94970703125}, {"start": 377.75, "end": 377.97, "word": " the", "probability": 0.92236328125}, {"start": 377.97, "end": 378.35, "word": " age", "probability": 0.93994140625}, {"start": 378.35, "end": 379.53, "word": " of", "probability": 0.95654296875}, {"start": 379.53, "end": 380.09, "word": " students.", "probability": 0.73876953125}], "temperature": 1.0}, {"id": 15, "seek": 41237, "start": 383.33, "end": 412.37, "text": " 80% confident that your age is between, or your weights, let's talk about weights, between 70 up to 75 kilograms, 80%. 90% could be 60 to 80 kilograms, or 65, 75 kilograms to 80 kilograms.", "tokens": [4688, 4, 6679, 300, 428, 3205, 307, 1296, 11, 420, 428, 17443, 11, 718, 311, 751, 466, 17443, 11, 1296, 5285, 493, 281, 9562, 30690, 11, 4688, 6856, 4289, 4, 727, 312, 4060, 281, 4688, 30690, 11, 420, 11624, 11, 9562, 30690, 281, 4688, 30690, 13], "avg_logprob": -0.2870678318307755, "compression_ratio": 1.5241935483870968, "no_speech_prob": 0.0, "words": [{"start": 383.33, "end": 383.71, "word": " 80", "probability": 0.68994140625}, {"start": 383.71, "end": 384.13, "word": "%", "probability": 0.84716796875}, {"start": 384.13, "end": 385.77, "word": " confident", "probability": 0.892578125}, {"start": 385.77, "end": 386.33, "word": " that", "probability": 0.92529296875}, {"start": 386.33, "end": 390.43, "word": " your", "probability": 0.75146484375}, {"start": 390.43, "end": 390.77, "word": " age", "probability": 0.344970703125}, {"start": 390.77, "end": 390.99, "word": " is", "probability": 0.50390625}, {"start": 390.99, "end": 391.37, "word": " between,", "probability": 0.88916015625}, {"start": 391.55, "end": 391.69, "word": " or", "probability": 0.8544921875}, {"start": 391.69, "end": 391.95, "word": " your", "probability": 0.86328125}, {"start": 391.95, "end": 392.29, "word": " weights,", "probability": 0.48974609375}, {"start": 392.41, "end": 392.57, "word": " let's", "probability": 0.852783203125}, {"start": 392.57, "end": 392.71, "word": " talk", "probability": 0.87939453125}, {"start": 392.71, "end": 392.95, "word": " about", "probability": 0.90966796875}, {"start": 392.95, "end": 393.31, "word": " weights,", "probability": 0.8408203125}, {"start": 393.75, "end": 394.11, "word": " between", "probability": 0.837890625}, {"start": 394.11, "end": 394.77, "word": " 70", "probability": 0.953125}, {"start": 394.77, "end": 395.73, "word": " up", "probability": 0.71826171875}, {"start": 395.73, "end": 395.91, "word": " to", "probability": 0.935546875}, {"start": 395.91, "end": 396.59, "word": " 75", "probability": 0.96142578125}, {"start": 396.59, "end": 397.35, "word": " kilograms,", "probability": 0.59814453125}, {"start": 398.61, "end": 399.53, "word": " 80%.", "probability": 0.8544921875}, {"start": 399.53, "end": 400.93, "word": " 90", "probability": 0.92724609375}, {"start": 400.93, "end": 401.37, "word": "%", "probability": 0.9208984375}, {"start": 401.37, "end": 402.61, "word": " could", "probability": 0.57763671875}, {"start": 402.61, "end": 402.95, "word": " be", "probability": 0.95654296875}, {"start": 402.95, "end": 404.55, "word": " 60", "probability": 0.97119140625}, {"start": 404.55, "end": 405.75, "word": " to", "probability": 0.92333984375}, {"start": 405.75, "end": 406.07, "word": " 80", "probability": 0.986328125}, {"start": 406.07, "end": 406.59, "word": " kilograms,", "probability": 0.81591796875}, {"start": 407.59, "end": 407.75, "word": " or", "probability": 0.96435546875}, {"start": 407.75, "end": 408.39, "word": " 65,", "probability": 0.92626953125}, {"start": 409.45, "end": 410.79, "word": " 75", "probability": 0.7373046875}, {"start": 410.79, "end": 411.41, "word": " kilograms", "probability": 0.55322265625}, {"start": 411.41, "end": 411.69, "word": " to", "probability": 0.7392578125}, {"start": 411.69, "end": 412.03, "word": " 80", "probability": 0.98681640625}, {"start": 412.03, "end": 412.37, "word": " kilograms.", "probability": 0.80517578125}], "temperature": 1.0}, {"id": 16, "seek": 43715, "start": 414.27, "end": 437.15, "text": " 95%, for example, 60, for example, 60 to 85, 99% from 58 up to 90. Which one is best? Which is the best confidence interval among all of these? To say that we are 80% at 70 to 75,", "tokens": [13420, 8923, 337, 1365, 11, 4060, 11, 337, 1365, 11, 4060, 281, 14695, 11, 11803, 4, 490, 21786, 493, 281, 4289, 13, 3013, 472, 307, 1151, 30, 3013, 307, 264, 1151, 6687, 15035, 3654, 439, 295, 613, 30, 1407, 584, 300, 321, 366, 4688, 4, 412, 5285, 281, 9562, 11], "avg_logprob": -0.207720594663246, "compression_ratio": 1.3432835820895523, "no_speech_prob": 0.0, "words": [{"start": 414.2699999999999, "end": 414.98999999999995, "word": " 95%,", "probability": 0.531005859375}, {"start": 414.98999999999995, "end": 415.71, "word": " for", "probability": 0.9228515625}, {"start": 415.71, "end": 416.05, "word": " example,", "probability": 0.95068359375}, {"start": 416.19, "end": 416.59, "word": " 60,", "probability": 0.8564453125}, {"start": 417.23, "end": 417.95, "word": " for", "probability": 0.912109375}, {"start": 417.95, "end": 418.31, "word": " example,", "probability": 0.88427734375}, {"start": 418.49, "end": 418.81, "word": " 60", "probability": 0.5166015625}, {"start": 418.81, "end": 418.97, "word": " to", "probability": 0.7041015625}, {"start": 418.97, "end": 419.39, "word": " 85,", "probability": 0.96142578125}, {"start": 419.71, "end": 420.39, "word": " 99", "probability": 0.97705078125}, {"start": 420.39, "end": 420.71, "word": "%", "probability": 0.7265625}, {"start": 420.71, "end": 422.65, "word": " from", "probability": 0.86962890625}, {"start": 422.65, "end": 423.29, "word": " 58", "probability": 0.974609375}, {"start": 423.29, "end": 424.39, "word": " up", "probability": 0.8896484375}, {"start": 424.39, "end": 424.55, "word": " to", "probability": 0.966796875}, {"start": 424.55, "end": 424.89, "word": " 90.", "probability": 0.97900390625}, {"start": 427.21, "end": 427.93, "word": " Which", "probability": 0.8935546875}, {"start": 427.93, "end": 428.13, "word": " one", "probability": 0.92578125}, {"start": 428.13, "end": 428.27, "word": " is", "probability": 0.90087890625}, {"start": 428.27, "end": 428.55, "word": " best?", "probability": 0.86181640625}, {"start": 429.23, "end": 429.45, "word": " Which", "probability": 0.8916015625}, {"start": 429.45, "end": 429.57, "word": " is", "probability": 0.9267578125}, {"start": 429.57, "end": 429.67, "word": " the", "probability": 0.91650390625}, {"start": 429.67, "end": 429.85, "word": " best", "probability": 0.931640625}, {"start": 429.85, "end": 430.33, "word": " confidence", "probability": 0.958984375}, {"start": 430.33, "end": 430.73, "word": " interval", "probability": 0.98095703125}, {"start": 430.73, "end": 431.15, "word": " among", "probability": 0.9560546875}, {"start": 431.15, "end": 431.63, "word": " all", "probability": 0.94921875}, {"start": 431.63, "end": 431.75, "word": " of", "probability": 0.96484375}, {"start": 431.75, "end": 432.03, "word": " these?", "probability": 0.83935546875}, {"start": 433.53, "end": 433.79, "word": " To", "probability": 0.91015625}, {"start": 433.79, "end": 433.95, "word": " say", "probability": 0.9443359375}, {"start": 433.95, "end": 434.15, "word": " that", "probability": 0.919921875}, {"start": 434.15, "end": 434.55, "word": " we", "probability": 0.91162109375}, {"start": 434.55, "end": 434.81, "word": " are", "probability": 0.93994140625}, {"start": 434.81, "end": 435.41, "word": " 80", "probability": 0.9697265625}, {"start": 435.41, "end": 435.69, "word": "%", "probability": 0.9384765625}, {"start": 435.69, "end": 436.09, "word": " at", "probability": 0.37744140625}, {"start": 436.09, "end": 436.47, "word": " 70", "probability": 0.94921875}, {"start": 436.47, "end": 436.63, "word": " to", "probability": 0.88232421875}, {"start": 436.63, "end": 437.15, "word": " 75,", "probability": 0.97119140625}], "temperature": 1.0}, {"id": 17, "seek": 46632, "start": 439.42, "end": 466.32, "text": " 65 to 80, 60 to 85, 85 to 90. Which one is better? Now when we are saying from 70 to 75 it means the interval width is smaller or maybe the smallest among the others. So this one is the best confidence interval because the error here is just 5. Now here you can expect your weight.", "tokens": [11624, 281, 4688, 11, 4060, 281, 14695, 11, 14695, 281, 4289, 13, 3013, 472, 307, 1101, 30, 823, 562, 321, 366, 1566, 490, 5285, 281, 9562, 309, 1355, 264, 15035, 11402, 307, 4356, 420, 1310, 264, 16998, 3654, 264, 2357, 13, 407, 341, 472, 307, 264, 1151, 6687, 15035, 570, 264, 6713, 510, 307, 445, 1025, 13, 823, 510, 291, 393, 2066, 428, 3364, 13], "avg_logprob": -0.17388731354113782, "compression_ratio": 1.4764397905759161, "no_speech_prob": 0.0, "words": [{"start": 439.41999999999996, "end": 440.02, "word": " 65", "probability": 0.63330078125}, {"start": 440.02, "end": 440.32, "word": " to", "probability": 0.68017578125}, {"start": 440.32, "end": 440.7, "word": " 80,", "probability": 0.9462890625}, {"start": 440.86, "end": 441.4, "word": " 60", "probability": 0.89111328125}, {"start": 441.4, "end": 441.5, "word": " to", "probability": 0.86962890625}, {"start": 441.5, "end": 442.06, "word": " 85,", "probability": 0.9755859375}, {"start": 442.24, "end": 442.48, "word": " 85", "probability": 0.919921875}, {"start": 442.48, "end": 442.88, "word": " to", "probability": 0.97119140625}, {"start": 442.88, "end": 443.22, "word": " 90.", "probability": 0.98828125}, {"start": 444.86, "end": 445.46, "word": " Which", "probability": 0.8447265625}, {"start": 445.46, "end": 445.64, "word": " one", "probability": 0.9287109375}, {"start": 445.64, "end": 445.76, "word": " is", "probability": 0.90869140625}, {"start": 445.76, "end": 446.0, "word": " better?", "probability": 0.89453125}, {"start": 447.36, "end": 447.96, "word": " Now", "probability": 0.75634765625}, {"start": 447.96, "end": 448.52, "word": " when", "probability": 0.483154296875}, {"start": 448.52, "end": 448.68, "word": " we", "probability": 0.9580078125}, {"start": 448.68, "end": 448.82, "word": " are", "probability": 0.9296875}, {"start": 448.82, "end": 449.28, "word": " saying", "probability": 0.81396484375}, {"start": 449.28, "end": 451.04, "word": " from", "probability": 0.74365234375}, {"start": 451.04, "end": 451.42, "word": " 70", "probability": 0.78076171875}, {"start": 451.42, "end": 451.6, "word": " to", "probability": 0.953125}, {"start": 451.6, "end": 451.96, "word": " 75", "probability": 0.95166015625}, {"start": 451.96, "end": 452.16, "word": " it", "probability": 0.51171875}, {"start": 452.16, "end": 452.38, "word": " means", "probability": 0.93212890625}, {"start": 452.38, "end": 452.56, "word": " the", "probability": 0.8037109375}, {"start": 452.56, "end": 453.02, "word": " interval", "probability": 0.97900390625}, {"start": 453.02, "end": 453.5, "word": " width", "probability": 0.869140625}, {"start": 453.5, "end": 453.92, "word": " is", "probability": 0.9501953125}, {"start": 453.92, "end": 454.38, "word": " smaller", "probability": 0.8525390625}, {"start": 454.38, "end": 456.0, "word": " or", "probability": 0.57861328125}, {"start": 456.0, "end": 456.2, "word": " maybe", "probability": 0.90087890625}, {"start": 456.2, "end": 456.4, "word": " the", "probability": 0.87939453125}, {"start": 456.4, "end": 456.82, "word": " smallest", "probability": 0.94091796875}, {"start": 456.82, "end": 457.38, "word": " among", "probability": 0.81298828125}, {"start": 457.38, "end": 457.6, "word": " the", "probability": 0.83642578125}, {"start": 457.6, "end": 457.96, "word": " others.", "probability": 0.9052734375}, {"start": 458.4, "end": 458.62, "word": " So", "probability": 0.93310546875}, {"start": 458.62, "end": 458.82, "word": " this", "probability": 0.86669921875}, {"start": 458.82, "end": 459.02, "word": " one", "probability": 0.9248046875}, {"start": 459.02, "end": 459.16, "word": " is", "probability": 0.9404296875}, {"start": 459.16, "end": 459.78, "word": " the", "probability": 0.912109375}, {"start": 459.78, "end": 460.04, "word": " best", "probability": 0.9267578125}, {"start": 460.04, "end": 460.58, "word": " confidence", "probability": 0.96142578125}, {"start": 460.58, "end": 460.96, "word": " interval", "probability": 0.978515625}, {"start": 460.96, "end": 461.74, "word": " because", "probability": 0.4716796875}, {"start": 461.74, "end": 461.94, "word": " the", "probability": 0.91650390625}, {"start": 461.94, "end": 462.16, "word": " error", "probability": 0.87060546875}, {"start": 462.16, "end": 462.46, "word": " here", "probability": 0.8486328125}, {"start": 462.46, "end": 462.64, "word": " is", "probability": 0.94482421875}, {"start": 462.64, "end": 462.86, "word": " just", "probability": 0.91552734375}, {"start": 462.86, "end": 463.1, "word": " 5.", "probability": 0.7333984375}, {"start": 463.38, "end": 463.64, "word": " Now", "probability": 0.93994140625}, {"start": 463.64, "end": 463.9, "word": " here", "probability": 0.79052734375}, {"start": 463.9, "end": 464.06, "word": " you", "probability": 0.94677734375}, {"start": 464.06, "end": 464.34, "word": " can", "probability": 0.94384765625}, {"start": 464.34, "end": 465.18, "word": " expect", "probability": 0.92578125}, {"start": 465.18, "end": 465.56, "word": " your", "probability": 0.89013671875}, {"start": 465.56, "end": 466.32, "word": " weight.", "probability": 0.93701171875}], "temperature": 1.0}, {"id": 18, "seek": 49549, "start": 467.47, "end": 495.49, "text": " But for the other scenarios here, it's very hard to say. Now suppose I am saying that your score in statistics is between 40 up to 100. It's too large. Suppose I am saying that with I am sure.", "tokens": [583, 337, 264, 661, 15077, 510, 11, 309, 311, 588, 1152, 281, 584, 13, 823, 7297, 286, 669, 1566, 300, 428, 6175, 294, 12523, 307, 1296, 3356, 493, 281, 2319, 13, 467, 311, 886, 2416, 13, 21360, 286, 669, 1566, 300, 365, 286, 669, 988, 13], "avg_logprob": -0.22639627913211255, "compression_ratio": 1.4087591240875912, "no_speech_prob": 0.0, "words": [{"start": 467.47, "end": 467.75, "word": " But", "probability": 0.796875}, {"start": 467.75, "end": 467.93, "word": " for", "probability": 0.89404296875}, {"start": 467.93, "end": 468.07, "word": " the", "probability": 0.89501953125}, {"start": 468.07, "end": 468.31, "word": " other", "probability": 0.89306640625}, {"start": 468.31, "end": 468.81, "word": " scenarios", "probability": 0.900390625}, {"start": 468.81, "end": 469.17, "word": " here,", "probability": 0.8486328125}, {"start": 469.81, "end": 470.11, "word": " it's", "probability": 0.939453125}, {"start": 470.11, "end": 470.39, "word": " very", "probability": 0.8642578125}, {"start": 470.39, "end": 470.65, "word": " hard", "probability": 0.833984375}, {"start": 470.65, "end": 470.81, "word": " to", "probability": 0.82080078125}, {"start": 470.81, "end": 470.95, "word": " say.", "probability": 0.291015625}, {"start": 471.07, "end": 471.33, "word": " Now", "probability": 0.83837890625}, {"start": 471.33, "end": 471.79, "word": " suppose", "probability": 0.59033203125}, {"start": 471.79, "end": 472.55, "word": " I", "probability": 0.8212890625}, {"start": 472.55, "end": 472.69, "word": " am", "probability": 0.60498046875}, {"start": 472.69, "end": 472.93, "word": " saying", "probability": 0.90185546875}, {"start": 472.93, "end": 473.25, "word": " that", "probability": 0.92919921875}, {"start": 473.25, "end": 473.83, "word": " your", "probability": 0.8779296875}, {"start": 473.83, "end": 474.43, "word": " score", "probability": 0.8896484375}, {"start": 474.43, "end": 474.69, "word": " in", "probability": 0.91259765625}, {"start": 474.69, "end": 475.21, "word": " statistics", "probability": 0.86572265625}, {"start": 475.21, "end": 476.59, "word": " is", "probability": 0.93408203125}, {"start": 476.59, "end": 477.15, "word": " between", "probability": 0.89599609375}, {"start": 477.15, "end": 478.01, "word": " 40", "probability": 0.93701171875}, {"start": 478.01, "end": 478.57, "word": " up", "probability": 0.919921875}, {"start": 478.57, "end": 478.69, "word": " to", "probability": 0.96533203125}, {"start": 478.69, "end": 479.11, "word": " 100.", "probability": 0.89990234375}, {"start": 486.97, "end": 487.69, "word": " It's", "probability": 0.94775390625}, {"start": 487.69, "end": 487.95, "word": " too", "probability": 0.93701171875}, {"start": 487.95, "end": 488.39, "word": " large.", "probability": 0.96337890625}, {"start": 490.17, "end": 490.89, "word": " Suppose", "probability": 0.2685546875}, {"start": 490.89, "end": 491.91, "word": " I", "probability": 0.92822265625}, {"start": 491.91, "end": 492.09, "word": " am", "probability": 0.90380859375}, {"start": 492.09, "end": 492.45, "word": " saying", "probability": 0.91748046875}, {"start": 492.45, "end": 492.97, "word": " that", "probability": 0.919921875}, {"start": 492.97, "end": 494.39, "word": " with", "probability": 0.662109375}, {"start": 494.39, "end": 495.03, "word": " I", "probability": 0.233154296875}, {"start": 495.03, "end": 495.17, "word": " am", "probability": 0.88427734375}, {"start": 495.17, "end": 495.49, "word": " sure.", "probability": 0.919921875}], "temperature": 1.0}, {"id": 19, "seek": 52132, "start": 496.12, "end": 521.32, "text": " with 99.7% that your score in statistics lies between 50 to 100. And my question is, can you expect your score? It's very difficult because the range is too long from 50 up to 100.", "tokens": [365, 11803, 13, 22, 4, 300, 428, 6175, 294, 12523, 9134, 1296, 2625, 281, 2319, 13, 400, 452, 1168, 307, 11, 393, 291, 2066, 428, 6175, 30, 467, 311, 588, 2252, 570, 264, 3613, 307, 886, 938, 490, 2625, 493, 281, 2319, 13], "avg_logprob": -0.16344105079770088, "compression_ratio": 1.292857142857143, "no_speech_prob": 0.0, "words": [{"start": 496.12, "end": 496.58, "word": " with", "probability": 0.54443359375}, {"start": 496.58, "end": 497.22, "word": " 99", "probability": 0.96875}, {"start": 497.22, "end": 498.98, "word": ".7", "probability": 0.95703125}, {"start": 498.98, "end": 499.52, "word": "%", "probability": 0.775390625}, {"start": 499.52, "end": 500.1, "word": " that", "probability": 0.69921875}, {"start": 500.1, "end": 500.94, "word": " your", "probability": 0.88916015625}, {"start": 500.94, "end": 501.6, "word": " score", "probability": 0.8857421875}, {"start": 501.6, "end": 502.42, "word": " in", "probability": 0.849609375}, {"start": 502.42, "end": 503.12, "word": " statistics", "probability": 0.59765625}, {"start": 503.12, "end": 504.44, "word": " lies", "probability": 0.85986328125}, {"start": 504.44, "end": 505.12, "word": " between", "probability": 0.86767578125}, {"start": 505.12, "end": 507.14, "word": " 50", "probability": 0.8876953125}, {"start": 507.14, "end": 510.18, "word": " to", "probability": 0.80712890625}, {"start": 510.18, "end": 510.58, "word": " 100.", "probability": 0.9013671875}, {"start": 511.74, "end": 511.98, "word": " And", "probability": 0.6826171875}, {"start": 511.98, "end": 512.16, "word": " my", "probability": 0.9541015625}, {"start": 512.16, "end": 512.44, "word": " question", "probability": 0.92333984375}, {"start": 512.44, "end": 512.68, "word": " is,", "probability": 0.94873046875}, {"start": 512.78, "end": 512.94, "word": " can", "probability": 0.88720703125}, {"start": 512.94, "end": 513.62, "word": " you", "probability": 0.966796875}, {"start": 513.62, "end": 514.4, "word": " expect", "probability": 0.93994140625}, {"start": 514.4, "end": 514.62, "word": " your", "probability": 0.89111328125}, {"start": 514.62, "end": 514.98, "word": " score?", "probability": 0.8779296875}, {"start": 516.38, "end": 516.66, "word": " It's", "probability": 0.949951171875}, {"start": 516.66, "end": 516.86, "word": " very", "probability": 0.8564453125}, {"start": 516.86, "end": 517.18, "word": " difficult", "probability": 0.931640625}, {"start": 517.18, "end": 517.68, "word": " because", "probability": 0.6064453125}, {"start": 517.68, "end": 518.02, "word": " the", "probability": 0.912109375}, {"start": 518.02, "end": 518.32, "word": " range", "probability": 0.89404296875}, {"start": 518.32, "end": 518.66, "word": " is", "probability": 0.93603515625}, {"start": 518.66, "end": 519.36, "word": " too", "probability": 0.9384765625}, {"start": 519.36, "end": 519.72, "word": " long", "probability": 0.916015625}, {"start": 519.72, "end": 520.24, "word": " from", "probability": 0.5546875}, {"start": 520.24, "end": 520.6, "word": " 50", "probability": 0.95166015625}, {"start": 520.6, "end": 520.78, "word": " up", "probability": 0.95849609375}, {"start": 520.78, "end": 520.9, "word": " to", "probability": 0.97216796875}, {"start": 520.9, "end": 521.32, "word": " 100.", "probability": 0.90869140625}], "temperature": 1.0}, {"id": 20, "seek": 55247, "start": 523.73, "end": 552.47, "text": " Because here I am maybe 99.7%, so I'm almost sure that your score is between 50 and 100. But suppose someone else says that I am 95% that your score ranges between 60 to 90. It's still large, but it's better than the previous one. One else says I am 90% sure that your score lies between 70 to 85.", "tokens": [1436, 510, 286, 669, 1310, 11803, 13, 22, 8923, 370, 286, 478, 1920, 988, 300, 428, 6175, 307, 1296, 2625, 293, 2319, 13, 583, 7297, 1580, 1646, 1619, 300, 286, 669, 13420, 4, 300, 428, 6175, 22526, 1296, 4060, 281, 4289, 13, 467, 311, 920, 2416, 11, 457, 309, 311, 1101, 813, 264, 3894, 472, 13, 1485, 1646, 1619, 286, 669, 4289, 4, 988, 300, 428, 6175, 9134, 1296, 5285, 281, 14695, 13], "avg_logprob": -0.18042652288804184, "compression_ratio": 1.6464088397790055, "no_speech_prob": 0.0, "words": [{"start": 523.73, "end": 524.01, "word": " Because", "probability": 0.5703125}, {"start": 524.01, "end": 524.23, "word": " here", "probability": 0.83203125}, {"start": 524.23, "end": 524.41, "word": " I", "probability": 0.83740234375}, {"start": 524.41, "end": 524.63, "word": " am", "probability": 0.908203125}, {"start": 524.63, "end": 525.43, "word": " maybe", "probability": 0.8701171875}, {"start": 525.43, "end": 526.07, "word": " 99", "probability": 0.393798828125}, {"start": 526.07, "end": 526.85, "word": ".7%,", "probability": 0.78271484375}, {"start": 526.85, "end": 527.21, "word": " so", "probability": 0.9375}, {"start": 527.21, "end": 527.53, "word": " I'm", "probability": 0.938232421875}, {"start": 527.53, "end": 527.93, "word": " almost", "probability": 0.818359375}, {"start": 527.93, "end": 528.29, "word": " sure", "probability": 0.91259765625}, {"start": 528.29, "end": 528.69, "word": " that", "probability": 0.92578125}, {"start": 528.69, "end": 529.25, "word": " your", "probability": 0.89013671875}, {"start": 529.25, "end": 529.51, "word": " score", "probability": 0.8740234375}, {"start": 529.51, "end": 529.65, "word": " is", "probability": 0.89453125}, {"start": 529.65, "end": 529.89, "word": " between", "probability": 0.865234375}, {"start": 529.89, "end": 530.37, "word": " 50", "probability": 0.94873046875}, {"start": 530.37, "end": 531.07, "word": " and", "probability": 0.89404296875}, {"start": 531.07, "end": 531.47, "word": " 100.", "probability": 0.91357421875}, {"start": 531.89, "end": 532.29, "word": " But", "probability": 0.95068359375}, {"start": 532.29, "end": 532.63, "word": " suppose", "probability": 0.9013671875}, {"start": 532.63, "end": 532.95, "word": " someone", "probability": 0.93798828125}, {"start": 532.95, "end": 533.43, "word": " else", "probability": 0.9033203125}, {"start": 533.43, "end": 534.51, "word": " says", "probability": 0.88330078125}, {"start": 534.51, "end": 534.89, "word": " that", "probability": 0.92529296875}, {"start": 534.89, "end": 535.99, "word": " I", "probability": 0.841796875}, {"start": 535.99, "end": 536.11, "word": " am", "probability": 0.400390625}, {"start": 536.11, "end": 536.49, "word": " 95", "probability": 0.98486328125}, {"start": 536.49, "end": 536.85, "word": "%", "probability": 0.7685546875}, {"start": 536.85, "end": 537.15, "word": " that", "probability": 0.5419921875}, {"start": 537.15, "end": 537.37, "word": " your", "probability": 0.88720703125}, {"start": 537.37, "end": 537.65, "word": " score", "probability": 0.86865234375}, {"start": 537.65, "end": 537.97, "word": " ranges", "probability": 0.5732421875}, {"start": 537.97, "end": 538.31, "word": " between", "probability": 0.72509765625}, {"start": 538.31, "end": 538.71, "word": " 60", "probability": 0.87353515625}, {"start": 538.71, "end": 538.89, "word": " to", "probability": 0.9228515625}, {"start": 538.89, "end": 539.21, "word": " 90.", "probability": 0.98779296875}, {"start": 539.73, "end": 540.03, "word": " It's", "probability": 0.9345703125}, {"start": 540.03, "end": 540.29, "word": " still", "probability": 0.9638671875}, {"start": 540.29, "end": 540.81, "word": " large,", "probability": 0.51708984375}, {"start": 540.91, "end": 541.07, "word": " but", "probability": 0.9208984375}, {"start": 541.07, "end": 541.27, "word": " it's", "probability": 0.92626953125}, {"start": 541.27, "end": 541.45, "word": " better", "probability": 0.86083984375}, {"start": 541.45, "end": 541.71, "word": " than", "probability": 0.9453125}, {"start": 541.71, "end": 541.85, "word": " the", "probability": 0.91943359375}, {"start": 541.85, "end": 542.13, "word": " previous", "probability": 0.85009765625}, {"start": 542.13, "end": 542.39, "word": " one.", "probability": 0.65771484375}, {"start": 543.81, "end": 544.51, "word": " One", "probability": 0.640625}, {"start": 544.51, "end": 544.91, "word": " else", "probability": 0.92138671875}, {"start": 544.91, "end": 545.41, "word": " says", "probability": 0.88818359375}, {"start": 545.41, "end": 546.09, "word": " I", "probability": 0.64404296875}, {"start": 546.09, "end": 546.21, "word": " am", "probability": 0.88037109375}, {"start": 546.21, "end": 546.49, "word": " 90", "probability": 0.9765625}, {"start": 546.49, "end": 546.97, "word": "%", "probability": 0.98974609375}, {"start": 546.97, "end": 549.17, "word": " sure", "probability": 0.9287109375}, {"start": 549.17, "end": 549.43, "word": " that", "probability": 0.93310546875}, {"start": 549.43, "end": 549.67, "word": " your", "probability": 0.88427734375}, {"start": 549.67, "end": 549.97, "word": " score", "probability": 0.86083984375}, {"start": 549.97, "end": 550.33, "word": " lies", "probability": 0.84130859375}, {"start": 550.33, "end": 550.61, "word": " between", "probability": 0.85546875}, {"start": 550.61, "end": 551.17, "word": " 70", "probability": 0.9853515625}, {"start": 551.17, "end": 551.99, "word": " to", "probability": 0.9677734375}, {"start": 551.99, "end": 552.47, "word": " 85.", "probability": 0.982421875}], "temperature": 1.0}, {"id": 21, "seek": 58390, "start": 554.88, "end": 583.9, "text": " Another one might be saying 85% of your score starts from 74 up to 80. Now here the range is small, so I can predict my scores just between 74 to 80. So we like to have confidence interval to be small as much as possible. So that's for the range. So in this case,", "tokens": [3996, 472, 1062, 312, 1566, 14695, 4, 295, 428, 6175, 3719, 490, 28868, 493, 281, 4688, 13, 823, 510, 264, 3613, 307, 1359, 11, 370, 286, 393, 6069, 452, 13444, 445, 1296, 28868, 281, 4688, 13, 407, 321, 411, 281, 362, 6687, 15035, 281, 312, 1359, 382, 709, 382, 1944, 13, 407, 300, 311, 337, 264, 3613, 13, 407, 294, 341, 1389, 11], "avg_logprob": -0.23107910016551614, "compression_ratio": 1.4193548387096775, "no_speech_prob": 0.0, "words": [{"start": 554.88, "end": 555.28, "word": " Another", "probability": 0.50244140625}, {"start": 555.28, "end": 555.62, "word": " one", "probability": 0.90625}, {"start": 555.62, "end": 556.02, "word": " might", "probability": 0.8310546875}, {"start": 556.02, "end": 556.44, "word": " be", "probability": 0.88232421875}, {"start": 556.44, "end": 557.34, "word": " saying", "probability": 0.8212890625}, {"start": 557.34, "end": 558.12, "word": " 85", "probability": 0.21484375}, {"start": 558.12, "end": 558.88, "word": "%", "probability": 0.72216796875}, {"start": 558.88, "end": 559.1, "word": " of", "probability": 0.264404296875}, {"start": 559.1, "end": 559.76, "word": " your", "probability": 0.271484375}, {"start": 559.76, "end": 560.24, "word": " score", "probability": 0.73095703125}, {"start": 560.24, "end": 560.98, "word": " starts", "probability": 0.82275390625}, {"start": 560.98, "end": 561.24, "word": " from", "probability": 0.87939453125}, {"start": 561.24, "end": 562.0, "word": " 74", "probability": 0.9580078125}, {"start": 562.0, "end": 563.04, "word": " up", "probability": 0.67236328125}, {"start": 563.04, "end": 563.18, "word": " to", "probability": 0.96435546875}, {"start": 563.18, "end": 563.48, "word": " 80.", "probability": 0.9814453125}, {"start": 565.6, "end": 566.4, "word": " Now", "probability": 0.9150390625}, {"start": 566.4, "end": 566.62, "word": " here", "probability": 0.68798828125}, {"start": 566.62, "end": 566.84, "word": " the", "probability": 0.587890625}, {"start": 566.84, "end": 567.36, "word": " range", "probability": 0.87255859375}, {"start": 567.36, "end": 567.62, "word": " is", "probability": 0.94287109375}, {"start": 567.62, "end": 568.46, "word": " small,", "probability": 0.943359375}, {"start": 568.94, "end": 569.18, "word": " so", "probability": 0.8818359375}, {"start": 569.18, "end": 569.56, "word": " I", "probability": 0.69580078125}, {"start": 569.56, "end": 569.84, "word": " can", "probability": 0.70068359375}, {"start": 569.84, "end": 570.1, "word": " predict", "probability": 0.67236328125}, {"start": 570.1, "end": 570.58, "word": " my", "probability": 0.95166015625}, {"start": 570.58, "end": 571.92, "word": " scores", "probability": 0.6806640625}, {"start": 571.92, "end": 572.36, "word": " just", "probability": 0.70068359375}, {"start": 572.36, "end": 572.64, "word": " between", "probability": 0.8525390625}, {"start": 572.64, "end": 573.12, "word": " 74", "probability": 0.95849609375}, {"start": 573.12, "end": 573.32, "word": " to", "probability": 0.87890625}, {"start": 573.32, "end": 573.6, "word": " 80.", "probability": 0.98388671875}, {"start": 574.18, "end": 574.74, "word": " So", "probability": 0.9130859375}, {"start": 574.74, "end": 575.28, "word": " we", "probability": 0.81591796875}, {"start": 575.28, "end": 575.74, "word": " like", "probability": 0.93359375}, {"start": 575.74, "end": 576.18, "word": " to", "probability": 0.9521484375}, {"start": 576.18, "end": 576.4, "word": " have", "probability": 0.947265625}, {"start": 576.4, "end": 576.9, "word": " confidence", "probability": 0.96435546875}, {"start": 576.9, "end": 577.3, "word": " interval", "probability": 0.80126953125}, {"start": 577.3, "end": 577.52, "word": " to", "probability": 0.9130859375}, {"start": 577.52, "end": 577.68, "word": " be", "probability": 0.947265625}, {"start": 577.68, "end": 578.04, "word": " small", "probability": 0.8330078125}, {"start": 578.04, "end": 578.66, "word": " as", "probability": 0.7021484375}, {"start": 578.66, "end": 578.88, "word": " much", "probability": 0.90234375}, {"start": 578.88, "end": 579.06, "word": " as", "probability": 0.9580078125}, {"start": 579.06, "end": 579.34, "word": " possible.", "probability": 0.9365234375}, {"start": 580.3, "end": 580.9, "word": " So", "probability": 0.923828125}, {"start": 580.9, "end": 581.2, "word": " that's", "probability": 0.936767578125}, {"start": 581.2, "end": 581.42, "word": " for", "probability": 0.82958984375}, {"start": 581.42, "end": 581.68, "word": " the", "probability": 0.91650390625}, {"start": 581.68, "end": 582.28, "word": " range.", "probability": 0.89990234375}, {"start": 583.02, "end": 583.28, "word": " So", "probability": 0.943359375}, {"start": 583.28, "end": 583.38, "word": " in", "probability": 0.90673828125}, {"start": 583.38, "end": 583.56, "word": " this", "probability": 0.94677734375}, {"start": 583.56, "end": 583.9, "word": " case,", "probability": 0.90966796875}], "temperature": 1.0}, {"id": 22, "seek": 60876, "start": 584.8, "end": 608.76, "text": " The true parameter is very close to one of these values. Always the confidence interval is stated in terms of level of confidence. The common ones are either 90%, 95% or 99%.", "tokens": [440, 2074, 13075, 307, 588, 1998, 281, 472, 295, 613, 4190, 13, 11270, 264, 6687, 15035, 307, 11323, 294, 2115, 295, 1496, 295, 6687, 13, 440, 2689, 2306, 366, 2139, 4289, 8923, 13420, 4, 420, 11803, 6856], "avg_logprob": -0.1447368428895348, "compression_ratio": 1.3358778625954197, "no_speech_prob": 0.0, "words": [{"start": 584.8, "end": 585.16, "word": " The", "probability": 0.70947265625}, {"start": 585.16, "end": 585.44, "word": " true", "probability": 0.779296875}, {"start": 585.44, "end": 585.88, "word": " parameter", "probability": 0.93505859375}, {"start": 585.88, "end": 587.02, "word": " is", "probability": 0.91259765625}, {"start": 587.02, "end": 587.3, "word": " very", "probability": 0.82666015625}, {"start": 587.3, "end": 587.68, "word": " close", "probability": 0.8916015625}, {"start": 587.68, "end": 587.84, "word": " to", "probability": 0.95947265625}, {"start": 587.84, "end": 587.98, "word": " one", "probability": 0.9052734375}, {"start": 587.98, "end": 588.12, "word": " of", "probability": 0.96435546875}, {"start": 588.12, "end": 588.42, "word": " these", "probability": 0.86376953125}, {"start": 588.42, "end": 589.18, "word": " values.", "probability": 0.91796875}, {"start": 591.72, "end": 592.6, "word": " Always", "probability": 0.740234375}, {"start": 592.6, "end": 593.16, "word": " the", "probability": 0.56201171875}, {"start": 593.16, "end": 593.6, "word": " confidence", "probability": 0.9599609375}, {"start": 593.6, "end": 594.06, "word": " interval", "probability": 0.93408203125}, {"start": 594.06, "end": 594.32, "word": " is", "probability": 0.91064453125}, {"start": 594.32, "end": 594.58, "word": " stated", "probability": 0.904296875}, {"start": 594.58, "end": 594.86, "word": " in", "probability": 0.9453125}, {"start": 594.86, "end": 595.2, "word": " terms", "probability": 0.90673828125}, {"start": 595.2, "end": 595.36, "word": " of", "probability": 0.96923828125}, {"start": 595.36, "end": 595.56, "word": " level", "probability": 0.85986328125}, {"start": 595.56, "end": 595.76, "word": " of", "probability": 0.97119140625}, {"start": 595.76, "end": 596.38, "word": " confidence.", "probability": 0.97900390625}, {"start": 597.56, "end": 597.82, "word": " The", "probability": 0.8857421875}, {"start": 597.82, "end": 598.28, "word": " common", "probability": 0.8603515625}, {"start": 598.28, "end": 598.82, "word": " ones", "probability": 0.9140625}, {"start": 598.82, "end": 599.38, "word": " are", "probability": 0.93701171875}, {"start": 599.38, "end": 600.12, "word": " either", "probability": 0.93212890625}, {"start": 600.12, "end": 601.52, "word": " 90%,", "probability": 0.749755859375}, {"start": 601.52, "end": 605.76, "word": " 95", "probability": 0.8603515625}, {"start": 605.76, "end": 606.58, "word": "%", "probability": 0.4775390625}, {"start": 606.58, "end": 607.78, "word": " or", "probability": 0.927734375}, {"start": 607.78, "end": 608.76, "word": " 99%.", "probability": 0.902099609375}], "temperature": 1.0}, {"id": 23, "seek": 62591, "start": 609.39, "end": 625.91, "text": " So these are the common levels of confidence. Next slide, Inshallah, will turn to the other side. Now, for example,", "tokens": [407, 613, 366, 264, 2689, 4358, 295, 6687, 13, 3087, 4137, 11, 682, 2716, 13492, 11, 486, 1261, 281, 264, 661, 1252, 13, 823, 11, 337, 1365, 11], "avg_logprob": -0.32354525451002447, "compression_ratio": 1.16, "no_speech_prob": 0.0, "words": [{"start": 609.39, "end": 609.69, "word": " So", "probability": 0.53466796875}, {"start": 609.69, "end": 609.91, "word": " these", "probability": 0.7412109375}, {"start": 609.91, "end": 610.19, "word": " are", "probability": 0.9462890625}, {"start": 610.19, "end": 610.37, "word": " the", "probability": 0.875}, {"start": 610.37, "end": 610.69, "word": " common", "probability": 0.84326171875}, {"start": 610.69, "end": 611.17, "word": " levels", "probability": 0.48193359375}, {"start": 611.17, "end": 611.55, "word": " of", "probability": 0.974609375}, {"start": 611.55, "end": 612.23, "word": " confidence.", "probability": 0.97021484375}, {"start": 621.23, "end": 621.49, "word": " Next", "probability": 0.7529296875}, {"start": 621.49, "end": 621.83, "word": " slide,", "probability": 0.9580078125}, {"start": 621.93, "end": 622.15, "word": " Inshallah,", "probability": 0.6381022135416666}, {"start": 622.41, "end": 622.41, "word": " will", "probability": 0.65234375}, {"start": 622.41, "end": 622.93, "word": " turn", "probability": 0.693359375}, {"start": 622.93, "end": 623.23, "word": " to", "probability": 0.53271484375}, {"start": 623.23, "end": 623.33, "word": " the", "probability": 0.85302734375}, {"start": 623.33, "end": 623.51, "word": " other", "probability": 0.8828125}, {"start": 623.51, "end": 623.85, "word": " side.", "probability": 0.86083984375}, {"start": 625.11, "end": 625.31, "word": " Now,", "probability": 0.828125}, {"start": 625.43, "end": 625.57, "word": " for", "probability": 0.95947265625}, {"start": 625.57, "end": 625.91, "word": " example,", "probability": 0.97216796875}], "temperature": 1.0}, {"id": 24, "seek": 66423, "start": 638.57, "end": 664.23, "text": " For example, serial example, suppose we know that the population mean mu is given to be 368 and sigma is 15. Suppose we know the population mean and the population summation. In reality, if these two parameters are unknown, we don't need to select a random sample.", "tokens": [1171, 1365, 11, 17436, 1365, 11, 7297, 321, 458, 300, 264, 4415, 914, 2992, 307, 2212, 281, 312, 8652, 23, 293, 12771, 307, 2119, 13, 21360, 321, 458, 264, 4415, 914, 293, 264, 4415, 28811, 13, 682, 4103, 11, 498, 613, 732, 9834, 366, 9841, 11, 321, 500, 380, 643, 281, 3048, 257, 4974, 6889, 13], "avg_logprob": -0.20312499372582687, "compression_ratio": 1.5963855421686748, "no_speech_prob": 0.0, "words": [{"start": 638.57, "end": 638.97, "word": " For", "probability": 0.787109375}, {"start": 638.97, "end": 639.43, "word": " example,", "probability": 0.97314453125}, {"start": 641.79, "end": 642.67, "word": " serial", "probability": 0.423095703125}, {"start": 642.67, "end": 643.79, "word": " example,", "probability": 0.6318359375}, {"start": 643.97, "end": 644.47, "word": " suppose", "probability": 0.89990234375}, {"start": 644.47, "end": 645.73, "word": " we", "probability": 0.87158203125}, {"start": 645.73, "end": 645.95, "word": " know", "probability": 0.88916015625}, {"start": 645.95, "end": 646.35, "word": " that", "probability": 0.93212890625}, {"start": 646.35, "end": 647.51, "word": " the", "probability": 0.86962890625}, {"start": 647.51, "end": 647.95, "word": " population", "probability": 0.95556640625}, {"start": 647.95, "end": 648.23, "word": " mean", "probability": 0.89404296875}, {"start": 648.23, "end": 648.49, "word": " mu", "probability": 0.5341796875}, {"start": 648.49, "end": 649.37, "word": " is", "probability": 0.919921875}, {"start": 649.37, "end": 649.63, "word": " given", "probability": 0.90673828125}, {"start": 649.63, "end": 649.85, "word": " to", "probability": 0.97314453125}, {"start": 649.85, "end": 650.03, "word": " be", "probability": 0.9482421875}, {"start": 650.03, "end": 651.19, "word": " 368", "probability": 0.921875}, {"start": 651.19, "end": 653.13, "word": " and", "probability": 0.51171875}, {"start": 653.13, "end": 653.45, "word": " sigma", "probability": 0.88232421875}, {"start": 653.45, "end": 653.67, "word": " is", "probability": 0.94921875}, {"start": 653.67, "end": 654.13, "word": " 15.", "probability": 0.87841796875}, {"start": 654.61, "end": 654.93, "word": " Suppose", "probability": 0.75439453125}, {"start": 654.93, "end": 655.19, "word": " we", "probability": 0.92578125}, {"start": 655.19, "end": 655.47, "word": " know", "probability": 0.8828125}, {"start": 655.47, "end": 655.87, "word": " the", "probability": 0.85546875}, {"start": 655.87, "end": 656.27, "word": " population", "probability": 0.95654296875}, {"start": 656.27, "end": 656.61, "word": " mean", "probability": 0.94287109375}, {"start": 656.61, "end": 656.89, "word": " and", "probability": 0.92236328125}, {"start": 656.89, "end": 657.01, "word": " the", "probability": 0.62255859375}, {"start": 657.01, "end": 657.43, "word": " population", "probability": 0.958984375}, {"start": 657.43, "end": 657.83, "word": " summation.", "probability": 0.181884765625}, {"start": 658.65, "end": 658.93, "word": " In", "probability": 0.82861328125}, {"start": 658.93, "end": 659.45, "word": " reality,", "probability": 0.95947265625}, {"start": 659.61, "end": 659.71, "word": " if", "probability": 0.8447265625}, {"start": 659.71, "end": 660.03, "word": " these", "probability": 0.84375}, {"start": 660.03, "end": 660.35, "word": " two", "probability": 0.8642578125}, {"start": 660.35, "end": 660.85, "word": " parameters", "probability": 0.97509765625}, {"start": 660.85, "end": 661.17, "word": " are", "probability": 0.94287109375}, {"start": 661.17, "end": 661.59, "word": " unknown,", "probability": 0.890625}, {"start": 662.21, "end": 662.47, "word": " we", "probability": 0.947265625}, {"start": 662.47, "end": 662.77, "word": " don't", "probability": 0.97119140625}, {"start": 662.77, "end": 663.05, "word": " need", "probability": 0.92626953125}, {"start": 663.05, "end": 663.25, "word": " to", "probability": 0.966796875}, {"start": 663.25, "end": 663.55, "word": " select", "probability": 0.8388671875}, {"start": 663.55, "end": 663.65, "word": " a", "probability": 0.779296875}, {"start": 663.65, "end": 663.89, "word": " random", "probability": 0.85888671875}, {"start": 663.89, "end": 664.23, "word": " sample.", "probability": 0.84912109375}], "temperature": 1.0}, {"id": 25, "seek": 68552, "start": 665.08, "end": 685.52, "text": " Because we are selecting the samples in order to estimate these unknown parameters. But this example is just for illustration. So again, a mu in reality is not given, is unknown, as well as the standard deviation. But suppose from previous studies we know that", "tokens": [1436, 321, 366, 18182, 264, 10938, 294, 1668, 281, 12539, 613, 9841, 9834, 13, 583, 341, 1365, 307, 445, 337, 22645, 13, 407, 797, 11, 257, 2992, 294, 4103, 307, 406, 2212, 11, 307, 9841, 11, 382, 731, 382, 264, 3832, 25163, 13, 583, 7297, 490, 3894, 5313, 321, 458, 300], "avg_logprob": -0.19486177225525564, "compression_ratio": 1.5263157894736843, "no_speech_prob": 0.0, "words": [{"start": 665.08, "end": 665.42, "word": " Because", "probability": 0.59619140625}, {"start": 665.42, "end": 665.58, "word": " we", "probability": 0.9130859375}, {"start": 665.58, "end": 665.7, "word": " are", "probability": 0.92041015625}, {"start": 665.7, "end": 666.14, "word": " selecting", "probability": 0.8564453125}, {"start": 666.14, "end": 666.38, "word": " the", "probability": 0.857421875}, {"start": 666.38, "end": 666.74, "word": " samples", "probability": 0.8681640625}, {"start": 666.74, "end": 667.02, "word": " in", "probability": 0.88134765625}, {"start": 667.02, "end": 667.24, "word": " order", "probability": 0.92529296875}, {"start": 667.24, "end": 667.48, "word": " to", "probability": 0.966796875}, {"start": 667.48, "end": 667.96, "word": " estimate", "probability": 0.93994140625}, {"start": 667.96, "end": 668.48, "word": " these", "probability": 0.83251953125}, {"start": 668.48, "end": 668.8, "word": " unknown", "probability": 0.88037109375}, {"start": 668.8, "end": 669.38, "word": " parameters.", "probability": 0.96728515625}, {"start": 670.74, "end": 670.94, "word": " But", "probability": 0.892578125}, {"start": 670.94, "end": 671.14, "word": " this", "probability": 0.91796875}, {"start": 671.14, "end": 671.48, "word": " example", "probability": 0.87841796875}, {"start": 671.48, "end": 671.6, "word": " is", "probability": 0.55419921875}, {"start": 671.6, "end": 671.76, "word": " just", "probability": 0.9052734375}, {"start": 671.76, "end": 671.96, "word": " for", "probability": 0.9189453125}, {"start": 671.96, "end": 672.48, "word": " illustration.", "probability": 0.89208984375}, {"start": 674.44, "end": 674.64, "word": " So", "probability": 0.87060546875}, {"start": 674.64, "end": 674.98, "word": " again,", "probability": 0.85595703125}, {"start": 675.24, "end": 675.38, "word": " a", "probability": 0.1990966796875}, {"start": 675.38, "end": 675.56, "word": " mu", "probability": 0.5107421875}, {"start": 675.56, "end": 676.64, "word": " in", "probability": 0.76904296875}, {"start": 676.64, "end": 677.1, "word": " reality", "probability": 0.9716796875}, {"start": 677.1, "end": 678.34, "word": " is", "probability": 0.904296875}, {"start": 678.34, "end": 678.54, "word": " not", "probability": 0.93701171875}, {"start": 678.54, "end": 678.84, "word": " given,", "probability": 0.865234375}, {"start": 679.0, "end": 679.1, "word": " is", "probability": 0.537109375}, {"start": 679.1, "end": 679.46, "word": " unknown,", "probability": 0.89208984375}, {"start": 680.18, "end": 680.92, "word": " as", "probability": 0.96044921875}, {"start": 680.92, "end": 681.08, "word": " well", "probability": 0.9345703125}, {"start": 681.08, "end": 681.5, "word": " as", "probability": 0.96044921875}, {"start": 681.5, "end": 681.82, "word": " the", "probability": 0.82568359375}, {"start": 681.82, "end": 682.02, "word": " standard", "probability": 0.93115234375}, {"start": 682.02, "end": 682.42, "word": " deviation.", "probability": 0.888671875}, {"start": 683.04, "end": 683.26, "word": " But", "probability": 0.94140625}, {"start": 683.26, "end": 683.62, "word": " suppose", "probability": 0.86962890625}, {"start": 683.62, "end": 683.86, "word": " from", "probability": 0.779296875}, {"start": 683.86, "end": 684.34, "word": " previous", "probability": 0.83740234375}, {"start": 684.34, "end": 684.78, "word": " studies", "probability": 0.96923828125}, {"start": 684.78, "end": 685.02, "word": " we", "probability": 0.58056640625}, {"start": 685.02, "end": 685.2, "word": " know", "probability": 0.88525390625}, {"start": 685.2, "end": 685.52, "word": " that", "probability": 0.939453125}], "temperature": 1.0}, {"id": 26, "seek": 71485, "start": 686.39, "end": 714.85, "text": " The population mean is given by 368, and the standard deviation for the population is about 15. Suppose we know this information from the history or from the previous studies. Now suppose we take a random sample of size 25, and this sample gives the following information. So here, we have mean of", "tokens": [440, 4415, 914, 307, 2212, 538, 8652, 23, 11, 293, 264, 3832, 25163, 337, 264, 4415, 307, 466, 2119, 13, 21360, 321, 458, 341, 1589, 490, 264, 2503, 420, 490, 264, 3894, 5313, 13, 823, 7297, 321, 747, 257, 4974, 6889, 295, 2744, 3552, 11, 293, 341, 6889, 2709, 264, 3480, 1589, 13, 407, 510, 11, 321, 362, 914, 295], "avg_logprob": -0.15612193306938546, "compression_ratio": 1.6021505376344085, "no_speech_prob": 0.0, "words": [{"start": 686.39, "end": 686.63, "word": " The", "probability": 0.66015625}, {"start": 686.63, "end": 687.03, "word": " population", "probability": 0.96533203125}, {"start": 687.03, "end": 687.35, "word": " mean", "probability": 0.9375}, {"start": 687.35, "end": 687.51, "word": " is", "probability": 0.943359375}, {"start": 687.51, "end": 687.73, "word": " given", "probability": 0.88818359375}, {"start": 687.73, "end": 688.09, "word": " by", "probability": 0.97265625}, {"start": 688.09, "end": 689.35, "word": " 368,", "probability": 0.827392578125}, {"start": 689.69, "end": 690.45, "word": " and", "probability": 0.93115234375}, {"start": 690.45, "end": 690.65, "word": " the", "probability": 0.90576171875}, {"start": 690.65, "end": 690.99, "word": " standard", "probability": 0.93115234375}, {"start": 690.99, "end": 691.49, "word": " deviation", "probability": 0.9150390625}, {"start": 691.49, "end": 692.31, "word": " for", "probability": 0.92626953125}, {"start": 692.31, "end": 692.45, "word": " the", "probability": 0.91845703125}, {"start": 692.45, "end": 692.79, "word": " population", "probability": 0.94873046875}, {"start": 692.79, "end": 693.09, "word": " is", "probability": 0.94580078125}, {"start": 693.09, "end": 693.35, "word": " about", "probability": 0.9150390625}, {"start": 693.35, "end": 693.83, "word": " 15.", "probability": 0.9404296875}, {"start": 694.17, "end": 694.49, "word": " Suppose", "probability": 0.80517578125}, {"start": 694.49, "end": 694.71, "word": " we", "probability": 0.92578125}, {"start": 694.71, "end": 694.83, "word": " know", "probability": 0.8623046875}, {"start": 694.83, "end": 695.01, "word": " this", "probability": 0.94482421875}, {"start": 695.01, "end": 695.49, "word": " information", "probability": 0.84375}, {"start": 695.49, "end": 695.93, "word": " from", "probability": 0.89306640625}, {"start": 695.93, "end": 697.11, "word": " the", "probability": 0.89501953125}, {"start": 697.11, "end": 697.57, "word": " history", "probability": 0.91552734375}, {"start": 697.57, "end": 698.23, "word": " or", "probability": 0.529296875}, {"start": 698.23, "end": 698.53, "word": " from", "probability": 0.87744140625}, {"start": 698.53, "end": 698.83, "word": " the", "probability": 0.91259765625}, {"start": 698.83, "end": 699.47, "word": " previous", "probability": 0.86181640625}, {"start": 699.47, "end": 699.85, "word": " studies.", "probability": 0.84521484375}, {"start": 701.17, "end": 701.89, "word": " Now", "probability": 0.9521484375}, {"start": 701.89, "end": 702.35, "word": " suppose", "probability": 0.666015625}, {"start": 702.35, "end": 702.53, "word": " we", "probability": 0.93701171875}, {"start": 702.53, "end": 702.73, "word": " take", "probability": 0.88427734375}, {"start": 702.73, "end": 702.87, "word": " a", "probability": 0.94970703125}, {"start": 702.87, "end": 703.07, "word": " random", "probability": 0.87841796875}, {"start": 703.07, "end": 703.53, "word": " sample", "probability": 0.84521484375}, {"start": 703.53, "end": 703.77, "word": " of", "probability": 0.916015625}, {"start": 703.77, "end": 704.13, "word": " size", "probability": 0.87158203125}, {"start": 704.13, "end": 704.67, "word": " 25,", "probability": 0.9619140625}, {"start": 705.67, "end": 705.93, "word": " and", "probability": 0.935546875}, {"start": 705.93, "end": 706.19, "word": " this", "probability": 0.94921875}, {"start": 706.19, "end": 706.65, "word": " sample", "probability": 0.87841796875}, {"start": 706.65, "end": 708.19, "word": " gives", "probability": 0.8984375}, {"start": 708.19, "end": 708.75, "word": " the", "probability": 0.919921875}, {"start": 708.75, "end": 709.01, "word": " following", "probability": 0.888671875}, {"start": 709.01, "end": 709.51, "word": " information.", "probability": 0.85498046875}, {"start": 711.69, "end": 712.41, "word": " So", "probability": 0.95068359375}, {"start": 712.41, "end": 712.67, "word": " here,", "probability": 0.814453125}, {"start": 713.15, "end": 713.75, "word": " we", "probability": 0.9580078125}, {"start": 713.75, "end": 714.03, "word": " have", "probability": 0.9453125}, {"start": 714.03, "end": 714.45, "word": " mean", "probability": 0.94384765625}, {"start": 714.45, "end": 714.85, "word": " of", "probability": 0.96826171875}], "temperature": 1.0}, {"id": 27, "seek": 73940, "start": 715.96, "end": 739.4, "text": " 68 sigma of 15. And let's see how can we construct the confidence interval as we mentioned in the previous lectures. If you remember the score, the x bar minus mu divided by sigma over root n. In this case, let's see how can we compute x bar from this equation.", "tokens": [23317, 12771, 295, 2119, 13, 400, 718, 311, 536, 577, 393, 321, 7690, 264, 6687, 15035, 382, 321, 2835, 294, 264, 3894, 16564, 13, 759, 291, 1604, 264, 6175, 11, 264, 2031, 2159, 3175, 2992, 6666, 538, 12771, 670, 5593, 297, 13, 682, 341, 1389, 11, 718, 311, 536, 577, 393, 321, 14722, 2031, 2159, 490, 341, 5367, 13], "avg_logprob": -0.21953124900658924, "compression_ratio": 1.5502958579881656, "no_speech_prob": 0.0, "words": [{"start": 715.96, "end": 716.6, "word": " 68", "probability": 0.1029052734375}, {"start": 716.6, "end": 717.24, "word": " sigma", "probability": 0.56298828125}, {"start": 717.24, "end": 717.62, "word": " of", "probability": 0.740234375}, {"start": 717.62, "end": 718.9, "word": " 15.", "probability": 0.9453125}, {"start": 719.6, "end": 720.02, "word": " And", "probability": 0.88134765625}, {"start": 720.02, "end": 720.26, "word": " let's", "probability": 0.9140625}, {"start": 720.26, "end": 720.38, "word": " see", "probability": 0.908203125}, {"start": 720.38, "end": 720.48, "word": " how", "probability": 0.91796875}, {"start": 720.48, "end": 720.68, "word": " can", "probability": 0.67626953125}, {"start": 720.68, "end": 720.8, "word": " we", "probability": 0.9521484375}, {"start": 720.8, "end": 721.4, "word": " construct", "probability": 0.96337890625}, {"start": 721.4, "end": 721.56, "word": " the", "probability": 0.705078125}, {"start": 721.56, "end": 721.94, "word": " confidence", "probability": 0.98095703125}, {"start": 721.94, "end": 722.42, "word": " interval", "probability": 0.96044921875}, {"start": 722.42, "end": 722.66, "word": " as", "probability": 0.71826171875}, {"start": 722.66, "end": 722.8, "word": " we", "probability": 0.94140625}, {"start": 722.8, "end": 723.22, "word": " mentioned", "probability": 0.81982421875}, {"start": 723.22, "end": 723.84, "word": " in", "probability": 0.93408203125}, {"start": 723.84, "end": 723.98, "word": " the", "probability": 0.91259765625}, {"start": 723.98, "end": 724.34, "word": " previous", "probability": 0.86328125}, {"start": 724.34, "end": 724.94, "word": " lectures.", "probability": 0.8759765625}, {"start": 726.48, "end": 726.9, "word": " If", "probability": 0.73486328125}, {"start": 726.9, "end": 727.0, "word": " you", "probability": 0.95751953125}, {"start": 727.0, "end": 727.24, "word": " remember", "probability": 0.87451171875}, {"start": 727.24, "end": 727.46, "word": " the", "probability": 0.47900390625}, {"start": 727.46, "end": 727.86, "word": " score,", "probability": 0.52880859375}, {"start": 729.0, "end": 729.2, "word": " the", "probability": 0.41162109375}, {"start": 729.2, "end": 729.52, "word": " x", "probability": 0.53759765625}, {"start": 729.52, "end": 729.74, "word": " bar", "probability": 0.76806640625}, {"start": 729.74, "end": 730.06, "word": " minus", "probability": 0.9716796875}, {"start": 730.06, "end": 730.38, "word": " mu", "probability": 0.685546875}, {"start": 730.38, "end": 730.8, "word": " divided", "probability": 0.685546875}, {"start": 730.8, "end": 731.02, "word": " by", "probability": 0.9677734375}, {"start": 731.02, "end": 731.36, "word": " sigma", "probability": 0.9345703125}, {"start": 731.36, "end": 731.6, "word": " over", "probability": 0.833984375}, {"start": 731.6, "end": 731.84, "word": " root", "probability": 0.892578125}, {"start": 731.84, "end": 732.04, "word": " n.", "probability": 0.72900390625}, {"start": 733.68, "end": 733.86, "word": " In", "probability": 0.92333984375}, {"start": 733.86, "end": 734.1, "word": " this", "probability": 0.94775390625}, {"start": 734.1, "end": 734.62, "word": " case,", "probability": 0.912109375}, {"start": 735.84, "end": 736.2, "word": " let's", "probability": 0.96240234375}, {"start": 736.2, "end": 736.3, "word": " see", "probability": 0.8955078125}, {"start": 736.3, "end": 736.42, "word": " how", "probability": 0.92919921875}, {"start": 736.42, "end": 736.62, "word": " can", "probability": 0.87646484375}, {"start": 736.62, "end": 736.92, "word": " we", "probability": 0.95458984375}, {"start": 736.92, "end": 738.0, "word": " compute", "probability": 0.91259765625}, {"start": 738.0, "end": 738.26, "word": " x", "probability": 0.98046875}, {"start": 738.26, "end": 738.52, "word": " bar", "probability": 0.9033203125}, {"start": 738.52, "end": 738.76, "word": " from", "probability": 0.86669921875}, {"start": 738.76, "end": 738.96, "word": " this", "probability": 0.94091796875}, {"start": 738.96, "end": 739.4, "word": " equation.", "probability": 0.9423828125}], "temperature": 1.0}, {"id": 28, "seek": 76668, "start": 740.24, "end": 766.68, "text": " So z equals x bar minus mu divided by sigma over root n, just cross multiplication, you will get x bar to be mu plus z sigma over root n. So this is the value of x bar, mu plus z sigma over root n, just cross multiplication. Now the value of z could be positive or negative, it depends on the direction of the z score you have. So if z score lies in the left side,", "tokens": [407, 710, 6915, 2031, 2159, 3175, 2992, 6666, 538, 12771, 670, 5593, 297, 11, 445, 3278, 27290, 11, 291, 486, 483, 2031, 2159, 281, 312, 2992, 1804, 710, 12771, 670, 5593, 297, 13, 407, 341, 307, 264, 2158, 295, 2031, 2159, 11, 2992, 1804, 710, 12771, 670, 5593, 297, 11, 445, 3278, 27290, 13, 823, 264, 2158, 295, 710, 727, 312, 3353, 420, 3671, 11, 309, 5946, 322, 264, 3513, 295, 264, 710, 6175, 291, 362, 13, 407, 498, 710, 6175, 9134, 294, 264, 1411, 1252, 11], "avg_logprob": -0.13547585159540176, "compression_ratio": 1.8622448979591837, "no_speech_prob": 0.0, "words": [{"start": 740.24, "end": 740.5, "word": " So", "probability": 0.85302734375}, {"start": 740.5, "end": 740.66, "word": " z", "probability": 0.5751953125}, {"start": 740.66, "end": 740.9, "word": " equals", "probability": 0.61328125}, {"start": 740.9, "end": 741.12, "word": " x", "probability": 0.951171875}, {"start": 741.12, "end": 741.28, "word": " bar", "probability": 0.80517578125}, {"start": 741.28, "end": 741.56, "word": " minus", "probability": 0.95263671875}, {"start": 741.56, "end": 741.74, "word": " mu", "probability": 0.7001953125}, {"start": 741.74, "end": 741.96, "word": " divided", "probability": 0.75146484375}, {"start": 741.96, "end": 742.14, "word": " by", "probability": 0.966796875}, {"start": 742.14, "end": 742.42, "word": " sigma", "probability": 0.91259765625}, {"start": 742.42, "end": 742.66, "word": " over", "probability": 0.86962890625}, {"start": 742.66, "end": 742.88, "word": " root", "probability": 0.93603515625}, {"start": 742.88, "end": 743.06, "word": " n,", "probability": 0.90380859375}, {"start": 743.16, "end": 743.34, "word": " just", "probability": 0.86572265625}, {"start": 743.34, "end": 743.7, "word": " cross", "probability": 0.86474609375}, {"start": 743.7, "end": 744.26, "word": " multiplication,", "probability": 0.84033203125}, {"start": 745.02, "end": 745.16, "word": " you", "probability": 0.95361328125}, {"start": 745.16, "end": 745.32, "word": " will", "probability": 0.79345703125}, {"start": 745.32, "end": 745.58, "word": " get", "probability": 0.94091796875}, {"start": 745.58, "end": 745.98, "word": " x", "probability": 0.98974609375}, {"start": 745.98, "end": 746.22, "word": " bar", "probability": 0.95166015625}, {"start": 746.22, "end": 746.4, "word": " to", "probability": 0.96435546875}, {"start": 746.4, "end": 746.54, "word": " be", "probability": 0.9521484375}, {"start": 746.54, "end": 746.84, "word": " mu", "probability": 0.95654296875}, {"start": 746.84, "end": 747.5, "word": " plus", "probability": 0.95751953125}, {"start": 747.5, "end": 748.06, "word": " z", "probability": 0.98974609375}, {"start": 748.06, "end": 749.12, "word": " sigma", "probability": 0.91455078125}, {"start": 749.12, "end": 749.74, "word": " over", "probability": 0.9140625}, {"start": 749.74, "end": 750.0, "word": " root", "probability": 0.923828125}, {"start": 750.0, "end": 750.2, "word": " n.", "probability": 0.984375}, {"start": 750.38, "end": 750.54, "word": " So", "probability": 0.95654296875}, {"start": 750.54, "end": 750.78, "word": " this", "probability": 0.89794921875}, {"start": 750.78, "end": 750.96, "word": " is", "probability": 0.94140625}, {"start": 750.96, "end": 751.1, "word": " the", "probability": 0.9189453125}, {"start": 751.1, "end": 751.48, "word": " value", "probability": 0.9716796875}, {"start": 751.48, "end": 752.0, "word": " of", "probability": 0.96240234375}, {"start": 752.0, "end": 752.2, "word": " x", "probability": 0.9912109375}, {"start": 752.2, "end": 752.5, "word": " bar,", "probability": 0.95361328125}, {"start": 753.06, "end": 753.26, "word": " mu", "probability": 0.9189453125}, {"start": 753.26, "end": 753.62, "word": " plus", "probability": 0.9599609375}, {"start": 753.62, "end": 753.9, "word": " z", "probability": 0.9892578125}, {"start": 753.9, "end": 754.2, "word": " sigma", "probability": 0.91845703125}, {"start": 754.2, "end": 754.38, "word": " over", "probability": 0.88623046875}, {"start": 754.38, "end": 754.58, "word": " root", "probability": 0.91650390625}, {"start": 754.58, "end": 754.7, "word": " n,", "probability": 0.9482421875}, {"start": 754.76, "end": 754.92, "word": " just", "probability": 0.90283203125}, {"start": 754.92, "end": 755.18, "word": " cross", "probability": 0.8916015625}, {"start": 755.18, "end": 755.64, "word": " multiplication.", "probability": 0.84716796875}, {"start": 757.04, "end": 757.24, "word": " Now", "probability": 0.9619140625}, {"start": 757.24, "end": 757.44, "word": " the", "probability": 0.52099609375}, {"start": 757.44, "end": 757.68, "word": " value", "probability": 0.97314453125}, {"start": 757.68, "end": 757.86, "word": " of", "probability": 0.96630859375}, {"start": 757.86, "end": 758.06, "word": " z", "probability": 0.98681640625}, {"start": 758.06, "end": 758.4, "word": " could", "probability": 0.875}, {"start": 758.4, "end": 758.52, "word": " be", "probability": 0.94287109375}, {"start": 758.52, "end": 758.82, "word": " positive", "probability": 0.88427734375}, {"start": 758.82, "end": 759.1, "word": " or", "probability": 0.96435546875}, {"start": 759.1, "end": 759.44, "word": " negative,", "probability": 0.94140625}, {"start": 759.52, "end": 759.6, "word": " it", "probability": 0.89306640625}, {"start": 759.6, "end": 759.92, "word": " depends", "probability": 0.9033203125}, {"start": 759.92, "end": 760.12, "word": " on", "probability": 0.94677734375}, {"start": 760.12, "end": 760.24, "word": " the", "probability": 0.91015625}, {"start": 760.24, "end": 760.6, "word": " direction", "probability": 0.94970703125}, {"start": 760.6, "end": 760.82, "word": " of", "probability": 0.94482421875}, {"start": 760.82, "end": 760.96, "word": " the", "probability": 0.89404296875}, {"start": 760.96, "end": 761.1, "word": " z", "probability": 0.93359375}, {"start": 761.1, "end": 761.32, "word": " score", "probability": 0.5244140625}, {"start": 761.32, "end": 761.5, "word": " you", "probability": 0.9443359375}, {"start": 761.5, "end": 761.74, "word": " have.", "probability": 0.9365234375}, {"start": 763.02, "end": 763.5, "word": " So", "probability": 0.92333984375}, {"start": 763.5, "end": 764.5, "word": " if", "probability": 0.77978515625}, {"start": 764.5, "end": 764.92, "word": " z", "probability": 0.857421875}, {"start": 764.92, "end": 765.26, "word": " score", "probability": 0.876953125}, {"start": 765.26, "end": 765.7, "word": " lies", "probability": 0.94140625}, {"start": 765.7, "end": 765.88, "word": " in", "probability": 0.72998046875}, {"start": 765.88, "end": 766.0, "word": " the", "probability": 0.91455078125}, {"start": 766.0, "end": 766.2, "word": " left", "probability": 0.94091796875}, {"start": 766.2, "end": 766.68, "word": " side,", "probability": 0.86328125}], "temperature": 1.0}, {"id": 29, "seek": 78475, "start": 770.29, "end": 784.75, "text": " The other one is positive. Because here we are talking about confidence interval, ranges from smallest value to the largest one. So z-score is negative, so x-bar plus equals mu plus or minus z-sigma over root.", "tokens": [440, 661, 472, 307, 3353, 13, 1436, 510, 321, 366, 1417, 466, 6687, 15035, 11, 22526, 490, 16998, 2158, 281, 264, 6443, 472, 13, 407, 710, 12, 4417, 418, 307, 3671, 11, 370, 2031, 12, 5356, 1804, 6915, 2992, 1804, 420, 3175, 710, 12, 82, 16150, 670, 5593, 13], "avg_logprob": -0.2215625038743019, "compression_ratio": 1.381578947368421, "no_speech_prob": 0.0, "words": [{"start": 770.29, "end": 770.53, "word": " The", "probability": 0.487060546875}, {"start": 770.53, "end": 770.77, "word": " other", "probability": 0.8671875}, {"start": 770.77, "end": 770.97, "word": " one", "probability": 0.88916015625}, {"start": 770.97, "end": 771.11, "word": " is", "probability": 0.88037109375}, {"start": 771.11, "end": 771.41, "word": " positive.", "probability": 0.97412109375}, {"start": 772.17, "end": 772.71, "word": " Because", "probability": 0.9072265625}, {"start": 772.71, "end": 772.91, "word": " here", "probability": 0.796875}, {"start": 772.91, "end": 773.07, "word": " we", "probability": 0.8369140625}, {"start": 773.07, "end": 773.21, "word": " are", "probability": 0.87646484375}, {"start": 773.21, "end": 773.49, "word": " talking", "probability": 0.83837890625}, {"start": 773.49, "end": 773.77, "word": " about", "probability": 0.90478515625}, {"start": 773.77, "end": 774.25, "word": " confidence", "probability": 0.9375}, {"start": 774.25, "end": 774.73, "word": " interval,", "probability": 0.8828125}, {"start": 774.89, "end": 775.23, "word": " ranges", "probability": 0.411865234375}, {"start": 775.23, "end": 775.59, "word": " from", "probability": 0.8896484375}, {"start": 775.59, "end": 776.17, "word": " smallest", "probability": 0.8896484375}, {"start": 776.17, "end": 776.57, "word": " value", "probability": 0.9638671875}, {"start": 776.57, "end": 776.81, "word": " to", "probability": 0.96728515625}, {"start": 776.81, "end": 777.03, "word": " the", "probability": 0.88623046875}, {"start": 777.03, "end": 777.39, "word": " largest", "probability": 0.919921875}, {"start": 777.39, "end": 777.75, "word": " one.", "probability": 0.87353515625}, {"start": 778.51, "end": 778.77, "word": " So", "probability": 0.9453125}, {"start": 778.77, "end": 778.93, "word": " z", "probability": 0.425537109375}, {"start": 778.93, "end": 779.21, "word": "-score", "probability": 0.7490234375}, {"start": 779.21, "end": 779.43, "word": " is", "probability": 0.77099609375}, {"start": 779.43, "end": 780.19, "word": " negative,", "probability": 0.93505859375}, {"start": 780.91, "end": 781.11, "word": " so", "probability": 0.87158203125}, {"start": 781.11, "end": 781.31, "word": " x", "probability": 0.85693359375}, {"start": 781.31, "end": 781.59, "word": "-bar", "probability": 0.82373046875}, {"start": 781.59, "end": 782.03, "word": " plus", "probability": 0.69482421875}, {"start": 782.03, "end": 782.91, "word": " equals", "probability": 0.8525390625}, {"start": 782.91, "end": 783.17, "word": " mu", "probability": 0.78955078125}, {"start": 783.17, "end": 783.45, "word": " plus", "probability": 0.9072265625}, {"start": 783.45, "end": 783.65, "word": " or", "probability": 0.916015625}, {"start": 783.65, "end": 783.93, "word": " minus", "probability": 0.98828125}, {"start": 783.93, "end": 784.15, "word": " z", "probability": 0.9111328125}, {"start": 784.15, "end": 784.43, "word": "-sigma", "probability": 0.916015625}, {"start": 784.43, "end": 784.61, "word": " over", "probability": 0.7626953125}, {"start": 784.61, "end": 784.75, "word": " root.", "probability": 0.8525390625}], "temperature": 1.0}, {"id": 30, "seek": 81367, "start": 786.15, "end": 813.67, "text": " Now let's imagine that the population mean is 368 and sigma is 15 and we select a random sample of 15 I'm sorry of 25 let's see the range or the values of x bar might be taken so x bar equals 368 plus or minus now for 95 percent", "tokens": [823, 718, 311, 3811, 300, 264, 4415, 914, 307, 8652, 23, 293, 12771, 307, 2119, 293, 321, 3048, 257, 4974, 6889, 295, 2119, 286, 478, 2597, 295, 3552, 718, 311, 536, 264, 3613, 420, 264, 4190, 295, 2031, 2159, 1062, 312, 2726, 370, 2031, 2159, 6915, 8652, 23, 1804, 420, 3175, 586, 337, 13420, 3043], "avg_logprob": -0.21568080277315207, "compression_ratio": 1.43125, "no_speech_prob": 0.0, "words": [{"start": 786.15, "end": 786.41, "word": " Now", "probability": 0.68115234375}, {"start": 786.41, "end": 786.71, "word": " let's", "probability": 0.781494140625}, {"start": 786.71, "end": 787.11, "word": " imagine", "probability": 0.8740234375}, {"start": 787.11, "end": 787.47, "word": " that", "probability": 0.9150390625}, {"start": 787.47, "end": 788.49, "word": " the", "probability": 0.71826171875}, {"start": 788.49, "end": 788.95, "word": " population", "probability": 0.94677734375}, {"start": 788.95, "end": 789.29, "word": " mean", "probability": 0.88623046875}, {"start": 789.29, "end": 789.47, "word": " is", "probability": 0.93994140625}, {"start": 789.47, "end": 790.69, "word": " 368", "probability": 0.6939697265625}, {"start": 790.69, "end": 791.71, "word": " and", "probability": 0.60009765625}, {"start": 791.71, "end": 792.09, "word": " sigma", "probability": 0.66650390625}, {"start": 792.09, "end": 792.53, "word": " is", "probability": 0.93115234375}, {"start": 792.53, "end": 793.59, "word": " 15", "probability": 0.91943359375}, {"start": 793.59, "end": 794.09, "word": " and", "probability": 0.59521484375}, {"start": 794.09, "end": 794.25, "word": " we", "probability": 0.73486328125}, {"start": 794.25, "end": 794.53, "word": " select", "probability": 0.8232421875}, {"start": 794.53, "end": 794.69, "word": " a", "probability": 0.95263671875}, {"start": 794.69, "end": 794.89, "word": " random", "probability": 0.8154296875}, {"start": 794.89, "end": 795.21, "word": " sample", "probability": 0.92431640625}, {"start": 795.21, "end": 795.39, "word": " of", "probability": 0.96875}, {"start": 795.39, "end": 795.79, "word": " 15", "probability": 0.72900390625}, {"start": 795.79, "end": 796.33, "word": " I'm", "probability": 0.6253662109375}, {"start": 796.33, "end": 796.51, "word": " sorry", "probability": 0.8623046875}, {"start": 796.51, "end": 796.73, "word": " of", "probability": 0.828125}, {"start": 796.73, "end": 797.21, "word": " 25", "probability": 0.958984375}, {"start": 797.21, "end": 798.35, "word": " let's", "probability": 0.689453125}, {"start": 798.35, "end": 798.57, "word": " see", "probability": 0.5732421875}, {"start": 798.57, "end": 798.93, "word": " the", "probability": 0.9130859375}, {"start": 798.93, "end": 799.37, "word": " range", "probability": 0.890625}, {"start": 799.37, "end": 799.61, "word": " or", "probability": 0.9443359375}, {"start": 799.61, "end": 799.75, "word": " the", "probability": 0.92041015625}, {"start": 799.75, "end": 800.25, "word": " values", "probability": 0.96728515625}, {"start": 800.25, "end": 801.09, "word": " of", "probability": 0.9560546875}, {"start": 801.09, "end": 801.35, "word": " x", "probability": 0.7158203125}, {"start": 801.35, "end": 801.63, "word": " bar", "probability": 0.783203125}, {"start": 801.63, "end": 802.03, "word": " might", "probability": 0.8916015625}, {"start": 802.03, "end": 802.21, "word": " be", "probability": 0.95361328125}, {"start": 802.21, "end": 802.53, "word": " taken", "probability": 0.8271484375}, {"start": 802.53, "end": 804.03, "word": " so", "probability": 0.5927734375}, {"start": 804.03, "end": 804.27, "word": " x", "probability": 0.98046875}, {"start": 804.27, "end": 804.47, "word": " bar", "probability": 0.90625}, {"start": 804.47, "end": 804.93, "word": " equals", "probability": 0.9189453125}, {"start": 804.93, "end": 808.05, "word": " 368", "probability": 0.8427734375}, {"start": 808.05, "end": 809.33, "word": " plus", "probability": 0.92138671875}, {"start": 809.33, "end": 809.59, "word": " or", "probability": 0.95556640625}, {"start": 809.59, "end": 809.93, "word": " minus", "probability": 0.98583984375}, {"start": 809.93, "end": 812.09, "word": " now", "probability": 0.7373046875}, {"start": 812.09, "end": 812.53, "word": " for", "probability": 0.93994140625}, {"start": 812.53, "end": 812.97, "word": " 95", "probability": 0.9580078125}, {"start": 812.97, "end": 813.67, "word": " percent", "probability": 0.6376953125}], "temperature": 1.0}, {"id": 31, "seek": 84270, "start": 814.88, "end": 842.7, "text": " The corresponding z value is 1.96, if you remember that. And the other one is negative or plus, it depends on the direction of the z-scope. So plus or minus 1.96. This value can be computed or found by using the normal table. Times sigma 15 divided by root 25.", "tokens": [440, 11760, 710, 2158, 307, 502, 13, 22962, 11, 498, 291, 1604, 300, 13, 400, 264, 661, 472, 307, 3671, 420, 1804, 11, 309, 5946, 322, 264, 3513, 295, 264, 710, 12, 4417, 1114, 13, 407, 1804, 420, 3175, 502, 13, 22962, 13, 639, 2158, 393, 312, 40610, 420, 1352, 538, 1228, 264, 2710, 3199, 13, 11366, 12771, 2119, 6666, 538, 5593, 3552, 13], "avg_logprob": -0.16694711538461537, "compression_ratio": 1.4184782608695652, "no_speech_prob": 0.0, "words": [{"start": 814.88, "end": 815.16, "word": " The", "probability": 0.7919921875}, {"start": 815.16, "end": 815.76, "word": " corresponding", "probability": 0.8408203125}, {"start": 815.76, "end": 816.04, "word": " z", "probability": 0.59326171875}, {"start": 816.04, "end": 816.38, "word": " value", "probability": 0.77294921875}, {"start": 816.38, "end": 816.64, "word": " is", "probability": 0.9404296875}, {"start": 816.64, "end": 816.84, "word": " 1", "probability": 0.8779296875}, {"start": 816.84, "end": 817.62, "word": ".96,", "probability": 0.986328125}, {"start": 819.42, "end": 819.9, "word": " if", "probability": 0.8857421875}, {"start": 819.9, "end": 819.96, "word": " you", "probability": 0.96240234375}, {"start": 819.96, "end": 820.2, "word": " remember", "probability": 0.873046875}, {"start": 820.2, "end": 820.46, "word": " that.", "probability": 0.93359375}, {"start": 821.78, "end": 822.4, "word": " And", "probability": 0.92626953125}, {"start": 822.4, "end": 822.52, "word": " the", "probability": 0.759765625}, {"start": 822.52, "end": 822.68, "word": " other", "probability": 0.880859375}, {"start": 822.68, "end": 822.88, "word": " one", "probability": 0.916015625}, {"start": 822.88, "end": 823.1, "word": " is", "probability": 0.93603515625}, {"start": 823.1, "end": 823.94, "word": " negative", "probability": 0.89990234375}, {"start": 823.94, "end": 824.18, "word": " or", "probability": 0.93994140625}, {"start": 824.18, "end": 824.5, "word": " plus,", "probability": 0.84033203125}, {"start": 824.64, "end": 824.74, "word": " it", "probability": 0.59521484375}, {"start": 824.74, "end": 824.98, "word": " depends", "probability": 0.912109375}, {"start": 824.98, "end": 825.16, "word": " on", "probability": 0.93798828125}, {"start": 825.16, "end": 825.32, "word": " the", "probability": 0.91796875}, {"start": 825.32, "end": 825.82, "word": " direction", "probability": 0.97314453125}, {"start": 825.82, "end": 826.08, "word": " of", "probability": 0.9560546875}, {"start": 826.08, "end": 826.36, "word": " the", "probability": 0.88818359375}, {"start": 826.36, "end": 826.7, "word": " z", "probability": 0.95947265625}, {"start": 826.7, "end": 827.0, "word": "-scope.", "probability": 0.6512044270833334}, {"start": 827.38, "end": 827.6, "word": " So", "probability": 0.93603515625}, {"start": 827.6, "end": 827.88, "word": " plus", "probability": 0.6376953125}, {"start": 827.88, "end": 828.1, "word": " or", "probability": 0.94921875}, {"start": 828.1, "end": 828.44, "word": " minus", "probability": 0.98583984375}, {"start": 828.44, "end": 828.64, "word": " 1", "probability": 0.97021484375}, {"start": 828.64, "end": 829.22, "word": ".96.", "probability": 0.98828125}, {"start": 831.44, "end": 832.06, "word": " This", "probability": 0.88671875}, {"start": 832.06, "end": 832.34, "word": " value", "probability": 0.9755859375}, {"start": 832.34, "end": 832.58, "word": " can", "probability": 0.94384765625}, {"start": 832.58, "end": 832.94, "word": " be", "probability": 0.9580078125}, {"start": 832.94, "end": 834.44, "word": " computed", "probability": 0.919921875}, {"start": 834.44, "end": 834.9, "word": " or", "probability": 0.91748046875}, {"start": 834.9, "end": 835.26, "word": " found", "probability": 0.90087890625}, {"start": 835.26, "end": 835.52, "word": " by", "probability": 0.9599609375}, {"start": 835.52, "end": 835.88, "word": " using", "probability": 0.93359375}, {"start": 835.88, "end": 836.12, "word": " the", "probability": 0.916015625}, {"start": 836.12, "end": 836.46, "word": " normal", "probability": 0.8681640625}, {"start": 836.46, "end": 836.8, "word": " table.", "probability": 0.7236328125}, {"start": 838.6, "end": 839.22, "word": " Times", "probability": 0.8955078125}, {"start": 839.22, "end": 839.7, "word": " sigma", "probability": 0.861328125}, {"start": 839.7, "end": 841.24, "word": " 15", "probability": 0.478515625}, {"start": 841.24, "end": 841.86, "word": " divided", "probability": 0.66162109375}, {"start": 841.86, "end": 842.06, "word": " by", "probability": 0.97314453125}, {"start": 842.06, "end": 842.26, "word": " root", "probability": 0.88916015625}, {"start": 842.26, "end": 842.7, "word": " 25.", "probability": 0.94580078125}], "temperature": 1.0}, {"id": 32, "seek": 87493, "start": 845.87, "end": 874.93, "text": " Just simple calculation will give 362, 373.8. Now, this interval, which ranges from 362 up to 373, contains 95% of the sample means. Suppose we have 100 sample means with different values, you can say that 95% out of these 100.", "tokens": [1449, 2199, 17108, 486, 976, 8652, 17, 11, 13435, 18, 13, 23, 13, 823, 11, 341, 15035, 11, 597, 22526, 490, 8652, 17, 493, 281, 13435, 18, 11, 8306, 13420, 4, 295, 264, 6889, 1355, 13, 21360, 321, 362, 2319, 6889, 1355, 365, 819, 4190, 11, 291, 393, 584, 300, 13420, 4, 484, 295, 613, 2319, 13], "avg_logprob": -0.21107219853277864, "compression_ratio": 1.3734939759036144, "no_speech_prob": 0.0, "words": [{"start": 845.87, "end": 846.19, "word": " Just", "probability": 0.515625}, {"start": 846.19, "end": 846.53, "word": " simple", "probability": 0.85888671875}, {"start": 846.53, "end": 847.13, "word": " calculation", "probability": 0.84375}, {"start": 847.13, "end": 847.37, "word": " will", "probability": 0.78369140625}, {"start": 847.37, "end": 847.69, "word": " give", "probability": 0.84619140625}, {"start": 847.69, "end": 849.25, "word": " 362,", "probability": 0.78466796875}, {"start": 850.33, "end": 851.47, "word": " 373", "probability": 0.7939453125}, {"start": 851.47, "end": 851.95, "word": ".8.", "probability": 0.724365234375}, {"start": 852.59, "end": 853.43, "word": " Now,", "probability": 0.95703125}, {"start": 854.27, "end": 854.59, "word": " this", "probability": 0.91796875}, {"start": 854.59, "end": 854.99, "word": " interval,", "probability": 0.90087890625}, {"start": 855.77, "end": 857.09, "word": " which", "probability": 0.94287109375}, {"start": 857.09, "end": 857.47, "word": " ranges", "probability": 0.9111328125}, {"start": 857.47, "end": 857.89, "word": " from", "probability": 0.89501953125}, {"start": 857.89, "end": 858.95, "word": " 362", "probability": 0.9638671875}, {"start": 858.95, "end": 859.21, "word": " up", "probability": 0.9560546875}, {"start": 859.21, "end": 859.81, "word": " to", "probability": 0.9716796875}, {"start": 859.81, "end": 861.21, "word": " 373,", "probability": 0.959228515625}, {"start": 861.65, "end": 862.39, "word": " contains", "probability": 0.931640625}, {"start": 862.39, "end": 863.41, "word": " 95", "probability": 0.98828125}, {"start": 863.41, "end": 863.95, "word": "%", "probability": 0.892578125}, {"start": 863.95, "end": 865.19, "word": " of", "probability": 0.96923828125}, {"start": 865.19, "end": 865.39, "word": " the", "probability": 0.87158203125}, {"start": 865.39, "end": 865.65, "word": " sample", "probability": 0.75439453125}, {"start": 865.65, "end": 865.97, "word": " means.", "probability": 0.27490234375}, {"start": 867.09, "end": 867.51, "word": " Suppose", "probability": 0.78515625}, {"start": 867.51, "end": 867.69, "word": " we", "probability": 0.931640625}, {"start": 867.69, "end": 867.87, "word": " have", "probability": 0.94091796875}, {"start": 867.87, "end": 868.33, "word": " 100", "probability": 0.91943359375}, {"start": 868.33, "end": 868.75, "word": " sample", "probability": 0.880859375}, {"start": 868.75, "end": 869.15, "word": " means", "probability": 0.8671875}, {"start": 869.15, "end": 870.13, "word": " with", "probability": 0.7265625}, {"start": 870.13, "end": 870.53, "word": " different", "probability": 0.8779296875}, {"start": 870.53, "end": 871.09, "word": " values,", "probability": 0.970703125}, {"start": 871.65, "end": 871.83, "word": " you", "probability": 0.95556640625}, {"start": 871.83, "end": 872.05, "word": " can", "probability": 0.9453125}, {"start": 872.05, "end": 872.29, "word": " say", "probability": 0.6982421875}, {"start": 872.29, "end": 872.57, "word": " that", "probability": 0.90966796875}, {"start": 872.57, "end": 873.13, "word": " 95", "probability": 0.98681640625}, {"start": 873.13, "end": 873.69, "word": "%", "probability": 0.98681640625}, {"start": 873.69, "end": 874.07, "word": " out", "probability": 0.87646484375}, {"start": 874.07, "end": 874.23, "word": " of", "probability": 0.970703125}, {"start": 874.23, "end": 874.45, "word": " these", "probability": 0.6669921875}, {"start": 874.45, "end": 874.93, "word": " 100.", "probability": 0.9306640625}], "temperature": 1.0}, {"id": 33, "seek": 89949, "start": 875.49, "end": 899.49, "text": " will contain the same meaning. That if a Mu is given. But again, in real life, you don't know the value of Mu. So when you don't know Mu, you can use X bar to estimate Mu. Now suppose on the other hand, here Mu is unknown.", "tokens": [486, 5304, 264, 912, 3620, 13, 663, 498, 257, 15601, 307, 2212, 13, 583, 797, 11, 294, 957, 993, 11, 291, 500, 380, 458, 264, 2158, 295, 15601, 13, 407, 562, 291, 500, 380, 458, 15601, 11, 291, 393, 764, 1783, 2159, 281, 12539, 15601, 13, 823, 7297, 322, 264, 661, 1011, 11, 510, 15601, 307, 9841, 13], "avg_logprob": -0.24059852402089005, "compression_ratio": 1.457516339869281, "no_speech_prob": 0.0, "words": [{"start": 875.49, "end": 875.83, "word": " will", "probability": 0.39599609375}, {"start": 875.83, "end": 876.37, "word": " contain", "probability": 0.89794921875}, {"start": 876.37, "end": 876.99, "word": " the", "probability": 0.86962890625}, {"start": 876.99, "end": 877.21, "word": " same", "probability": 0.51708984375}, {"start": 877.21, "end": 877.55, "word": " meaning.", "probability": 0.36865234375}, {"start": 878.71, "end": 879.05, "word": " That", "probability": 0.712890625}, {"start": 879.05, "end": 879.59, "word": " if", "probability": 0.5556640625}, {"start": 879.59, "end": 880.15, "word": " a", "probability": 0.71435546875}, {"start": 880.15, "end": 880.21, "word": " Mu", "probability": 0.265869140625}, {"start": 880.21, "end": 880.43, "word": " is", "probability": 0.8818359375}, {"start": 880.43, "end": 880.63, "word": " given.", "probability": 0.9130859375}, {"start": 881.79, "end": 882.07, "word": " But", "probability": 0.89306640625}, {"start": 882.07, "end": 882.43, "word": " again,", "probability": 0.8466796875}, {"start": 882.67, "end": 882.71, "word": " in", "probability": 0.93115234375}, {"start": 882.71, "end": 883.13, "word": " real", "probability": 0.9296875}, {"start": 883.13, "end": 883.53, "word": " life,", "probability": 0.89306640625}, {"start": 884.41, "end": 884.97, "word": " you", "probability": 0.9111328125}, {"start": 884.97, "end": 885.23, "word": " don't", "probability": 0.938232421875}, {"start": 885.23, "end": 885.41, "word": " know", "probability": 0.89892578125}, {"start": 885.41, "end": 885.57, "word": " the", "probability": 0.916015625}, {"start": 885.57, "end": 885.87, "word": " value", "probability": 0.98095703125}, {"start": 885.87, "end": 886.07, "word": " of", "probability": 0.96923828125}, {"start": 886.07, "end": 886.29, "word": " Mu.", "probability": 0.90478515625}, {"start": 886.99, "end": 887.25, "word": " So", "probability": 0.88134765625}, {"start": 887.25, "end": 887.49, "word": " when", "probability": 0.6630859375}, {"start": 887.49, "end": 887.65, "word": " you", "probability": 0.96484375}, {"start": 887.65, "end": 887.95, "word": " don't", "probability": 0.973388671875}, {"start": 887.95, "end": 888.21, "word": " know", "probability": 0.8779296875}, {"start": 888.21, "end": 888.41, "word": " Mu,", "probability": 0.9697265625}, {"start": 889.91, "end": 890.89, "word": " you", "probability": 0.93994140625}, {"start": 890.89, "end": 891.17, "word": " can", "probability": 0.93359375}, {"start": 891.17, "end": 891.53, "word": " use", "probability": 0.8837890625}, {"start": 891.53, "end": 891.89, "word": " X", "probability": 0.8134765625}, {"start": 891.89, "end": 892.33, "word": " bar", "probability": 0.55419921875}, {"start": 892.33, "end": 892.79, "word": " to", "probability": 0.9638671875}, {"start": 892.79, "end": 893.11, "word": " estimate", "probability": 0.9052734375}, {"start": 893.11, "end": 893.49, "word": " Mu.", "probability": 0.9580078125}, {"start": 894.71, "end": 894.99, "word": " Now", "probability": 0.92822265625}, {"start": 894.99, "end": 895.57, "word": " suppose", "probability": 0.63916015625}, {"start": 895.57, "end": 895.89, "word": " on", "probability": 0.58349609375}, {"start": 895.89, "end": 896.47, "word": " the", "probability": 0.90576171875}, {"start": 896.47, "end": 896.71, "word": " other", "probability": 0.8798828125}, {"start": 896.71, "end": 897.11, "word": " hand,", "probability": 0.9091796875}, {"start": 898.39, "end": 898.61, "word": " here", "probability": 0.8349609375}, {"start": 898.61, "end": 898.89, "word": " Mu", "probability": 0.93798828125}, {"start": 898.89, "end": 899.11, "word": " is", "probability": 0.95263671875}, {"start": 899.11, "end": 899.49, "word": " unknown.", "probability": 0.90576171875}], "temperature": 1.0}, {"id": 34, "seek": 92952, "start": 903.18, "end": 929.52, "text": " And we estimate mu by x bar, the sample mean. And suppose the sample size 25 gives sample mean of 362.3. So this sample mean by using a sample size of 25. Now let's see how can we construct the confidence interval in this case.", "tokens": [400, 321, 12539, 2992, 538, 2031, 2159, 11, 264, 6889, 914, 13, 400, 7297, 264, 6889, 2744, 3552, 2709, 6889, 914, 295, 8652, 17, 13, 18, 13, 407, 341, 6889, 914, 538, 1228, 257, 6889, 2744, 295, 3552, 13, 823, 718, 311, 536, 577, 393, 321, 7690, 264, 6687, 15035, 294, 341, 1389, 13], "avg_logprob": -0.15767045291987333, "compression_ratio": 1.52, "no_speech_prob": 0.0, "words": [{"start": 903.18, "end": 903.48, "word": " And", "probability": 0.82421875}, {"start": 903.48, "end": 903.66, "word": " we", "probability": 0.6640625}, {"start": 903.66, "end": 904.0, "word": " estimate", "probability": 0.8876953125}, {"start": 904.0, "end": 904.52, "word": " mu", "probability": 0.315185546875}, {"start": 904.52, "end": 905.16, "word": " by", "probability": 0.95947265625}, {"start": 905.16, "end": 906.66, "word": " x", "probability": 0.75537109375}, {"start": 906.66, "end": 906.96, "word": " bar,", "probability": 0.7646484375}, {"start": 907.08, "end": 907.16, "word": " the", "probability": 0.814453125}, {"start": 907.16, "end": 907.42, "word": " sample", "probability": 0.89013671875}, {"start": 907.42, "end": 907.7, "word": " mean.", "probability": 0.9658203125}, {"start": 908.14, "end": 908.36, "word": " And", "probability": 0.9404296875}, {"start": 908.36, "end": 908.82, "word": " suppose", "probability": 0.92041015625}, {"start": 908.82, "end": 909.7, "word": " the", "probability": 0.79736328125}, {"start": 909.7, "end": 910.04, "word": " sample", "probability": 0.89013671875}, {"start": 910.04, "end": 910.6, "word": " size", "probability": 0.880859375}, {"start": 910.6, "end": 911.44, "word": " 25", "probability": 0.59814453125}, {"start": 911.44, "end": 912.12, "word": " gives", "probability": 0.86865234375}, {"start": 912.12, "end": 913.74, "word": " sample", "probability": 0.73779296875}, {"start": 913.74, "end": 914.08, "word": " mean", "probability": 0.9609375}, {"start": 914.08, "end": 914.48, "word": " of", "probability": 0.97021484375}, {"start": 914.48, "end": 915.68, "word": " 362", "probability": 0.951904296875}, {"start": 915.68, "end": 916.32, "word": ".3.", "probability": 0.9931640625}, {"start": 917.16, "end": 917.68, "word": " So", "probability": 0.96923828125}, {"start": 917.68, "end": 917.92, "word": " this", "probability": 0.857421875}, {"start": 917.92, "end": 918.32, "word": " sample", "probability": 0.88037109375}, {"start": 918.32, "end": 919.22, "word": " mean", "probability": 0.6904296875}, {"start": 919.22, "end": 920.36, "word": " by", "probability": 0.73388671875}, {"start": 920.36, "end": 920.84, "word": " using", "probability": 0.939453125}, {"start": 920.84, "end": 921.12, "word": " a", "probability": 0.96435546875}, {"start": 921.12, "end": 921.38, "word": " sample", "probability": 0.87890625}, {"start": 921.38, "end": 921.94, "word": " size", "probability": 0.87109375}, {"start": 921.94, "end": 922.84, "word": " of", "probability": 0.96875}, {"start": 922.84, "end": 923.94, "word": " 25.", "probability": 0.94873046875}, {"start": 925.76, "end": 926.38, "word": " Now", "probability": 0.9033203125}, {"start": 926.38, "end": 926.64, "word": " let's", "probability": 0.8134765625}, {"start": 926.64, "end": 926.74, "word": " see", "probability": 0.90673828125}, {"start": 926.74, "end": 926.84, "word": " how", "probability": 0.93212890625}, {"start": 926.84, "end": 927.04, "word": " can", "probability": 0.7705078125}, {"start": 927.04, "end": 927.2, "word": " we", "probability": 0.94921875}, {"start": 927.2, "end": 927.74, "word": " construct", "probability": 0.97216796875}, {"start": 927.74, "end": 927.9, "word": " the", "probability": 0.51708984375}, {"start": 927.9, "end": 928.3, "word": " confidence", "probability": 0.97412109375}, {"start": 928.3, "end": 928.78, "word": " interval", "probability": 0.9775390625}, {"start": 928.78, "end": 929.0, "word": " in", "probability": 0.92333984375}, {"start": 929.0, "end": 929.18, "word": " this", "probability": 0.94580078125}, {"start": 929.18, "end": 929.52, "word": " case.", "probability": 0.9072265625}], "temperature": 1.0}, {"id": 35, "seek": 95615, "start": 930.77, "end": 956.15, "text": " Now x bar is given, so my interval should be mu equals x bar plus or minus z sigma over root n. Just replace this mu by x bar, you will get. So again, sample of size 25 gives sample mean of 362.12.", "tokens": [823, 2031, 2159, 307, 2212, 11, 370, 452, 15035, 820, 312, 2992, 6915, 2031, 2159, 1804, 420, 3175, 710, 12771, 670, 5593, 297, 13, 1449, 7406, 341, 2992, 538, 2031, 2159, 11, 291, 486, 483, 13, 407, 797, 11, 6889, 295, 2744, 3552, 2709, 6889, 914, 295, 8652, 17, 13, 4762, 13], "avg_logprob": -0.18912146001491906, "compression_ratio": 1.32, "no_speech_prob": 0.0, "words": [{"start": 930.77, "end": 931.03, "word": " Now", "probability": 0.58544921875}, {"start": 931.03, "end": 931.25, "word": " x", "probability": 0.5859375}, {"start": 931.25, "end": 931.39, "word": " bar", "probability": 0.79833984375}, {"start": 931.39, "end": 931.55, "word": " is", "probability": 0.935546875}, {"start": 931.55, "end": 931.79, "word": " given,", "probability": 0.884765625}, {"start": 932.31, "end": 932.69, "word": " so", "probability": 0.853515625}, {"start": 932.69, "end": 932.93, "word": " my", "probability": 0.9541015625}, {"start": 932.93, "end": 933.37, "word": " interval", "probability": 0.96142578125}, {"start": 933.37, "end": 933.99, "word": " should", "probability": 0.9267578125}, {"start": 933.99, "end": 934.21, "word": " be", "probability": 0.9462890625}, {"start": 934.21, "end": 934.51, "word": " mu", "probability": 0.5703125}, {"start": 934.51, "end": 935.05, "word": " equals", "probability": 0.4755859375}, {"start": 935.05, "end": 935.69, "word": " x", "probability": 0.953125}, {"start": 935.69, "end": 935.97, "word": " bar", "probability": 0.9423828125}, {"start": 935.97, "end": 936.29, "word": " plus", "probability": 0.923828125}, {"start": 936.29, "end": 936.53, "word": " or", "probability": 0.9404296875}, {"start": 936.53, "end": 936.85, "word": " minus", "probability": 0.986328125}, {"start": 936.85, "end": 937.25, "word": " z", "probability": 0.89404296875}, {"start": 937.25, "end": 938.35, "word": " sigma", "probability": 0.681640625}, {"start": 938.35, "end": 938.57, "word": " over", "probability": 0.89794921875}, {"start": 938.57, "end": 938.83, "word": " root", "probability": 0.89208984375}, {"start": 938.83, "end": 939.03, "word": " n.", "probability": 0.75146484375}, {"start": 940.51, "end": 941.13, "word": " Just", "probability": 0.810546875}, {"start": 941.13, "end": 941.65, "word": " replace", "probability": 0.9052734375}, {"start": 941.65, "end": 942.01, "word": " this", "probability": 0.9453125}, {"start": 942.01, "end": 942.27, "word": " mu", "probability": 0.88037109375}, {"start": 942.27, "end": 942.45, "word": " by", "probability": 0.943359375}, {"start": 942.45, "end": 942.75, "word": " x", "probability": 0.98681640625}, {"start": 942.75, "end": 943.07, "word": " bar,", "probability": 0.931640625}, {"start": 944.01, "end": 944.17, "word": " you", "probability": 0.919921875}, {"start": 944.17, "end": 944.33, "word": " will", "probability": 0.88525390625}, {"start": 944.33, "end": 944.59, "word": " get.", "probability": 0.93701171875}, {"start": 946.29, "end": 946.73, "word": " So", "probability": 0.90966796875}, {"start": 946.73, "end": 947.03, "word": " again,", "probability": 0.86474609375}, {"start": 947.31, "end": 947.61, "word": " sample", "probability": 0.59716796875}, {"start": 947.61, "end": 947.91, "word": " of", "probability": 0.95849609375}, {"start": 947.91, "end": 948.23, "word": " size", "probability": 0.8759765625}, {"start": 948.23, "end": 948.73, "word": " 25", "probability": 0.88916015625}, {"start": 948.73, "end": 950.35, "word": " gives", "probability": 0.84375}, {"start": 950.35, "end": 950.83, "word": " sample", "probability": 0.86865234375}, {"start": 950.83, "end": 951.13, "word": " mean", "probability": 0.9619140625}, {"start": 951.13, "end": 951.45, "word": " of", "probability": 0.96923828125}, {"start": 951.45, "end": 952.61, "word": " 362", "probability": 0.93017578125}, {"start": 952.61, "end": 956.15, "word": ".12.", "probability": 0.8466796875}], "temperature": 1.0}, {"id": 36, "seek": 98275, "start": 956.61, "end": 982.75, "text": " plus or minus 196, sigma divided by root 25. This interval could be new ranges between these two values, 356 approximately, 0.42 up to the 68th point. Now look at this interval.", "tokens": [1804, 420, 3175, 7998, 11, 12771, 6666, 538, 5593, 3552, 13, 639, 15035, 727, 312, 777, 22526, 1296, 613, 732, 4190, 11, 6976, 21, 10447, 11, 1958, 13, 15628, 493, 281, 264, 23317, 392, 935, 13, 823, 574, 412, 341, 15035, 13], "avg_logprob": -0.2883357474970263, "compression_ratio": 1.2714285714285714, "no_speech_prob": 0.0, "words": [{"start": 956.61, "end": 957.03, "word": " plus", "probability": 0.35986328125}, {"start": 957.03, "end": 957.29, "word": " or", "probability": 0.9287109375}, {"start": 957.29, "end": 957.63, "word": " minus", "probability": 0.98388671875}, {"start": 957.63, "end": 958.41, "word": " 196,", "probability": 0.84423828125}, {"start": 958.71, "end": 959.69, "word": " sigma", "probability": 0.63232421875}, {"start": 959.69, "end": 960.07, "word": " divided", "probability": 0.71630859375}, {"start": 960.07, "end": 960.27, "word": " by", "probability": 0.97509765625}, {"start": 960.27, "end": 960.49, "word": " root", "probability": 0.82177734375}, {"start": 960.49, "end": 961.05, "word": " 25.", "probability": 0.880859375}, {"start": 962.37, "end": 962.75, "word": " This", "probability": 0.84521484375}, {"start": 962.75, "end": 963.21, "word": " interval", "probability": 0.962890625}, {"start": 963.21, "end": 964.51, "word": " could", "probability": 0.833984375}, {"start": 964.51, "end": 964.85, "word": " be", "probability": 0.92578125}, {"start": 964.85, "end": 965.45, "word": " new", "probability": 0.2626953125}, {"start": 965.45, "end": 966.91, "word": " ranges", "probability": 0.8525390625}, {"start": 966.91, "end": 967.37, "word": " between", "probability": 0.88330078125}, {"start": 967.37, "end": 969.39, "word": " these", "probability": 0.82861328125}, {"start": 969.39, "end": 969.57, "word": " two", "probability": 0.8857421875}, {"start": 969.57, "end": 969.93, "word": " values,", "probability": 0.966796875}, {"start": 970.09, "end": 971.17, "word": " 356", "probability": 0.883544921875}, {"start": 971.17, "end": 972.05, "word": " approximately,", "probability": 0.80322265625}, {"start": 973.35, "end": 974.23, "word": " 0", "probability": 0.62548828125}, {"start": 974.23, "end": 974.73, "word": ".42", "probability": 0.99072265625}, {"start": 974.73, "end": 976.79, "word": " up", "probability": 0.6357421875}, {"start": 976.79, "end": 977.19, "word": " to", "probability": 0.95361328125}, {"start": 977.19, "end": 977.51, "word": " the", "probability": 0.677734375}, {"start": 977.51, "end": 980.23, "word": " 68th", "probability": 0.642578125}, {"start": 980.23, "end": 981.09, "word": " point.", "probability": 0.640625}, {"start": 981.49, "end": 981.85, "word": " Now", "probability": 0.52392578125}, {"start": 981.85, "end": 982.07, "word": " look", "probability": 0.77197265625}, {"start": 982.07, "end": 982.21, "word": " at", "probability": 0.96484375}, {"start": 982.21, "end": 982.39, "word": " this", "probability": 0.94189453125}, {"start": 982.39, "end": 982.75, "word": " interval.", "probability": 0.94189453125}], "temperature": 1.0}, {"id": 37, "seek": 100948, "start": 985.08, "end": 1009.48, "text": " We know the true mean equals 368. We got this interval for x bar. Now if mu is not given, is unknown, based on the sample we have of size 25, we got x bar to be 362, and we got the interval", "tokens": [492, 458, 264, 2074, 914, 6915, 8652, 23, 13, 492, 658, 341, 15035, 337, 2031, 2159, 13, 823, 498, 2992, 307, 406, 2212, 11, 307, 9841, 11, 2361, 322, 264, 6889, 321, 362, 295, 2744, 3552, 11, 321, 658, 2031, 2159, 281, 312, 8652, 17, 11, 293, 321, 658, 264, 15035], "avg_logprob": -0.21619591747338957, "compression_ratio": 1.3571428571428572, "no_speech_prob": 0.0, "words": [{"start": 985.08, "end": 985.34, "word": " We", "probability": 0.7666015625}, {"start": 985.34, "end": 985.62, "word": " know", "probability": 0.87353515625}, {"start": 985.62, "end": 985.84, "word": " the", "probability": 0.248779296875}, {"start": 985.84, "end": 986.08, "word": " true", "probability": 0.9169921875}, {"start": 986.08, "end": 986.38, "word": " mean", "probability": 0.96337890625}, {"start": 986.38, "end": 987.62, "word": " equals", "probability": 0.3984375}, {"start": 987.62, "end": 988.88, "word": " 368.", "probability": 0.925537109375}, {"start": 992.24, "end": 992.8, "word": " We", "probability": 0.9365234375}, {"start": 992.8, "end": 993.04, "word": " got", "probability": 0.884765625}, {"start": 993.04, "end": 993.3, "word": " this", "probability": 0.92529296875}, {"start": 993.3, "end": 993.68, "word": " interval", "probability": 0.81591796875}, {"start": 993.68, "end": 994.9, "word": " for", "probability": 0.82275390625}, {"start": 994.9, "end": 995.28, "word": " x", "probability": 0.59375}, {"start": 995.28, "end": 995.56, "word": " bar.", "probability": 0.8125}, {"start": 997.2, "end": 997.68, "word": " Now", "probability": 0.94287109375}, {"start": 997.68, "end": 997.92, "word": " if", "probability": 0.6220703125}, {"start": 997.92, "end": 998.14, "word": " mu", "probability": 0.3798828125}, {"start": 998.14, "end": 998.34, "word": " is", "probability": 0.94189453125}, {"start": 998.34, "end": 998.58, "word": " not", "probability": 0.9140625}, {"start": 998.58, "end": 998.9, "word": " given,", "probability": 0.8857421875}, {"start": 999.36, "end": 999.6, "word": " is", "probability": 0.763671875}, {"start": 999.6, "end": 1000.08, "word": " unknown,", "probability": 0.88818359375}, {"start": 1001.18, "end": 1001.46, "word": " based", "probability": 0.88916015625}, {"start": 1001.46, "end": 1001.6, "word": " on", "probability": 0.947265625}, {"start": 1001.6, "end": 1001.76, "word": " the", "probability": 0.91796875}, {"start": 1001.76, "end": 1002.04, "word": " sample", "probability": 0.8203125}, {"start": 1002.04, "end": 1002.28, "word": " we", "probability": 0.90771484375}, {"start": 1002.28, "end": 1002.58, "word": " have", "probability": 0.9345703125}, {"start": 1002.58, "end": 1003.0, "word": " of", "probability": 0.74560546875}, {"start": 1003.0, "end": 1003.24, "word": " size", "probability": 0.87548828125}, {"start": 1003.24, "end": 1003.68, "word": " 25,", "probability": 0.95263671875}, {"start": 1004.52, "end": 1004.8, "word": " we", "probability": 0.94873046875}, {"start": 1004.8, "end": 1005.04, "word": " got", "probability": 0.88623046875}, {"start": 1005.04, "end": 1005.3, "word": " x", "probability": 0.95556640625}, {"start": 1005.3, "end": 1005.56, "word": " bar", "probability": 0.9287109375}, {"start": 1005.56, "end": 1005.72, "word": " to", "probability": 0.96826171875}, {"start": 1005.72, "end": 1005.86, "word": " be", "probability": 0.955078125}, {"start": 1005.86, "end": 1006.84, "word": " 362,", "probability": 0.96044921875}, {"start": 1007.4, "end": 1008.56, "word": " and", "probability": 0.93701171875}, {"start": 1008.56, "end": 1008.72, "word": " we", "probability": 0.955078125}, {"start": 1008.72, "end": 1008.9, "word": " got", "probability": 0.91259765625}, {"start": 1008.9, "end": 1009.02, "word": " the", "probability": 0.85498046875}, {"start": 1009.02, "end": 1009.48, "word": " interval", "probability": 0.96337890625}], "temperature": 1.0}, {"id": 38, "seek": 103768, "start": 1010.74, "end": 1037.68, "text": " Four mu lies between 356 up to 368. So this case, you can say that I am 95% sure that this interval captures or contains the population mean. But you don't know exactly if this interval contains or not contain the population mean mu, because mu is unknown. So the first scenario here, if mu is given, is unknown.", "tokens": [7451, 2992, 9134, 1296, 6976, 21, 493, 281, 8652, 23, 13, 407, 341, 1389, 11, 291, 393, 584, 300, 286, 669, 13420, 4, 988, 300, 341, 15035, 27986, 420, 8306, 264, 4415, 914, 13, 583, 291, 500, 380, 458, 2293, 498, 341, 15035, 8306, 420, 406, 5304, 264, 4415, 914, 2992, 11, 570, 2992, 307, 9841, 13, 407, 264, 700, 9005, 510, 11, 498, 2992, 307, 2212, 11, 307, 9841, 13], "avg_logprob": -0.1996527781916989, "compression_ratio": 1.6302083333333333, "no_speech_prob": 0.0, "words": [{"start": 1010.74, "end": 1011.1, "word": " Four", "probability": 0.2607421875}, {"start": 1011.1, "end": 1011.28, "word": " mu", "probability": 0.63232421875}, {"start": 1011.28, "end": 1011.72, "word": " lies", "probability": 0.82861328125}, {"start": 1011.72, "end": 1012.96, "word": " between", "probability": 0.84619140625}, {"start": 1012.96, "end": 1014.16, "word": " 356", "probability": 0.75927734375}, {"start": 1014.16, "end": 1014.5, "word": " up", "probability": 0.91650390625}, {"start": 1014.5, "end": 1014.62, "word": " to", "probability": 0.94873046875}, {"start": 1014.62, "end": 1015.34, "word": " 368.", "probability": 0.9580078125}, {"start": 1015.76, "end": 1016.1, "word": " So", "probability": 0.93505859375}, {"start": 1016.1, "end": 1016.34, "word": " this", "probability": 0.390625}, {"start": 1016.34, "end": 1016.56, "word": " case,", "probability": 0.66943359375}, {"start": 1016.64, "end": 1016.7, "word": " you", "probability": 0.92822265625}, {"start": 1016.7, "end": 1016.9, "word": " can", "probability": 0.94482421875}, {"start": 1016.9, "end": 1017.12, "word": " say", "probability": 0.884765625}, {"start": 1017.12, "end": 1017.4, "word": " that", "probability": 0.919921875}, {"start": 1017.4, "end": 1017.6, "word": " I", "probability": 0.87255859375}, {"start": 1017.6, "end": 1017.74, "word": " am", "probability": 0.91015625}, {"start": 1017.74, "end": 1018.1, "word": " 95", "probability": 0.96484375}, {"start": 1018.1, "end": 1018.46, "word": "%", "probability": 0.888671875}, {"start": 1018.46, "end": 1018.84, "word": " sure", "probability": 0.91162109375}, {"start": 1018.84, "end": 1019.22, "word": " that", "probability": 0.9345703125}, {"start": 1019.22, "end": 1019.96, "word": " this", "probability": 0.93408203125}, {"start": 1019.96, "end": 1020.58, "word": " interval", "probability": 0.9736328125}, {"start": 1020.58, "end": 1021.64, "word": " captures", "probability": 0.86376953125}, {"start": 1021.64, "end": 1021.9, "word": " or", "probability": 0.88720703125}, {"start": 1021.9, "end": 1022.54, "word": " contains", "probability": 0.91064453125}, {"start": 1022.54, "end": 1023.5, "word": " the", "probability": 0.8984375}, {"start": 1023.5, "end": 1023.86, "word": " population", "probability": 0.9609375}, {"start": 1023.86, "end": 1024.12, "word": " mean.", "probability": 0.9423828125}, {"start": 1024.28, "end": 1024.56, "word": " But", "probability": 0.94482421875}, {"start": 1024.56, "end": 1024.68, "word": " you", "probability": 0.82861328125}, {"start": 1024.68, "end": 1024.92, "word": " don't", "probability": 0.973876953125}, {"start": 1024.92, "end": 1025.16, "word": " know", "probability": 0.89892578125}, {"start": 1025.16, "end": 1025.68, "word": " exactly", "probability": 0.8779296875}, {"start": 1025.68, "end": 1026.34, "word": " if", "probability": 0.9384765625}, {"start": 1026.34, "end": 1026.6, "word": " this", "probability": 0.94189453125}, {"start": 1026.6, "end": 1026.98, "word": " interval", "probability": 0.97119140625}, {"start": 1026.98, "end": 1027.62, "word": " contains", "probability": 0.921875}, {"start": 1027.62, "end": 1027.86, "word": " or", "probability": 0.94873046875}, {"start": 1027.86, "end": 1028.04, "word": " not", "probability": 0.7626953125}, {"start": 1028.04, "end": 1028.56, "word": " contain", "probability": 0.62744140625}, {"start": 1028.56, "end": 1029.6, "word": " the", "probability": 0.8408203125}, {"start": 1029.6, "end": 1029.98, "word": " population", "probability": 0.951171875}, {"start": 1029.98, "end": 1030.2, "word": " mean", "probability": 0.8466796875}, {"start": 1030.2, "end": 1030.34, "word": " mu,", "probability": 0.34375}, {"start": 1030.4, "end": 1030.72, "word": " because", "probability": 0.89404296875}, {"start": 1030.72, "end": 1030.98, "word": " mu", "probability": 0.8544921875}, {"start": 1030.98, "end": 1031.34, "word": " is", "probability": 0.95361328125}, {"start": 1031.34, "end": 1032.52, "word": " unknown.", "probability": 0.56982421875}, {"start": 1033.22, "end": 1033.82, "word": " So", "probability": 0.93701171875}, {"start": 1033.82, "end": 1034.0, "word": " the", "probability": 0.87060546875}, {"start": 1034.0, "end": 1034.28, "word": " first", "probability": 0.83642578125}, {"start": 1034.28, "end": 1034.72, "word": " scenario", "probability": 0.859375}, {"start": 1034.72, "end": 1035.1, "word": " here,", "probability": 0.83984375}, {"start": 1036.06, "end": 1036.22, "word": " if", "probability": 0.95458984375}, {"start": 1036.22, "end": 1036.44, "word": " mu", "probability": 0.6806640625}, {"start": 1036.44, "end": 1036.6, "word": " is", "probability": 0.8837890625}, {"start": 1036.6, "end": 1036.88, "word": " given,", "probability": 0.86767578125}, {"start": 1037.1, "end": 1037.32, "word": " is", "probability": 0.60791015625}, {"start": 1037.32, "end": 1037.68, "word": " unknown.", "probability": 0.90087890625}], "temperature": 1.0}, {"id": 39, "seek": 106579, "start": 1038.37, "end": 1065.79, "text": " You can say that 95% of the samples we have contain the sample means that lie between these two values. But the reality, as I mentioned before, mu is not given. So we can take a random sample. And from that sample, we can compute x bar. Then we can figure out that mu ranges between these two values.", "tokens": [509, 393, 584, 300, 13420, 4, 295, 264, 10938, 321, 362, 5304, 264, 6889, 1355, 300, 4544, 1296, 613, 732, 4190, 13, 583, 264, 4103, 11, 382, 286, 2835, 949, 11, 2992, 307, 406, 2212, 13, 407, 321, 393, 747, 257, 4974, 6889, 13, 400, 490, 300, 6889, 11, 321, 393, 14722, 2031, 2159, 13, 1396, 321, 393, 2573, 484, 300, 2992, 22526, 1296, 613, 732, 4190, 13], "avg_logprob": -0.205955615942029, "compression_ratio": 1.601063829787234, "no_speech_prob": 0.0, "words": [{"start": 1038.37, "end": 1038.59, "word": " You", "probability": 0.55615234375}, {"start": 1038.59, "end": 1038.89, "word": " can", "probability": 0.93994140625}, {"start": 1038.89, "end": 1039.17, "word": " say", "probability": 0.7138671875}, {"start": 1039.17, "end": 1039.47, "word": " that", "probability": 0.9208984375}, {"start": 1039.47, "end": 1040.07, "word": " 95", "probability": 0.9482421875}, {"start": 1040.07, "end": 1040.55, "word": "%", "probability": 0.85107421875}, {"start": 1040.55, "end": 1040.91, "word": " of", "probability": 0.96533203125}, {"start": 1040.91, "end": 1041.11, "word": " the", "probability": 0.92529296875}, {"start": 1041.11, "end": 1041.61, "word": " samples", "probability": 0.76904296875}, {"start": 1041.61, "end": 1043.07, "word": " we", "probability": 0.84033203125}, {"start": 1043.07, "end": 1043.43, "word": " have", "probability": 0.9404296875}, {"start": 1043.43, "end": 1045.49, "word": " contain", "probability": 0.461669921875}, {"start": 1045.49, "end": 1047.11, "word": " the", "probability": 0.1533203125}, {"start": 1047.11, "end": 1048.29, "word": " sample", "probability": 0.44287109375}, {"start": 1048.29, "end": 1048.61, "word": " means", "probability": 0.7314453125}, {"start": 1048.61, "end": 1049.11, "word": " that", "probability": 0.8583984375}, {"start": 1049.11, "end": 1049.29, "word": " lie", "probability": 0.82568359375}, {"start": 1049.29, "end": 1049.59, "word": " between", "probability": 0.869140625}, {"start": 1049.59, "end": 1049.81, "word": " these", "probability": 0.8564453125}, {"start": 1049.81, "end": 1050.01, "word": " two", "probability": 0.91748046875}, {"start": 1050.01, "end": 1050.31, "word": " values.", "probability": 0.86669921875}, {"start": 1050.95, "end": 1051.35, "word": " But", "probability": 0.9091796875}, {"start": 1051.35, "end": 1051.53, "word": " the", "probability": 0.533203125}, {"start": 1051.53, "end": 1051.93, "word": " reality,", "probability": 0.97412109375}, {"start": 1052.09, "end": 1052.17, "word": " as", "probability": 0.9638671875}, {"start": 1052.17, "end": 1052.29, "word": " I", "probability": 0.998046875}, {"start": 1052.29, "end": 1052.57, "word": " mentioned", "probability": 0.8310546875}, {"start": 1052.57, "end": 1053.01, "word": " before,", "probability": 0.86181640625}, {"start": 1054.75, "end": 1054.97, "word": " mu", "probability": 0.488525390625}, {"start": 1054.97, "end": 1055.15, "word": " is", "probability": 0.93115234375}, {"start": 1055.15, "end": 1055.37, "word": " not", "probability": 0.951171875}, {"start": 1055.37, "end": 1055.59, "word": " given.", "probability": 0.8837890625}, {"start": 1056.25, "end": 1056.63, "word": " So", "probability": 0.9580078125}, {"start": 1056.63, "end": 1056.79, "word": " we", "probability": 0.8623046875}, {"start": 1056.79, "end": 1056.95, "word": " can", "probability": 0.93505859375}, {"start": 1056.95, "end": 1057.21, "word": " take", "probability": 0.8740234375}, {"start": 1057.21, "end": 1057.37, "word": " a", "probability": 0.958984375}, {"start": 1057.37, "end": 1057.67, "word": " random", "probability": 0.88232421875}, {"start": 1057.67, "end": 1058.09, "word": " sample.", "probability": 0.875}, {"start": 1059.11, "end": 1059.57, "word": " And", "probability": 0.9521484375}, {"start": 1059.57, "end": 1060.01, "word": " from", "probability": 0.81591796875}, {"start": 1060.01, "end": 1060.25, "word": " that", "probability": 0.91845703125}, {"start": 1060.25, "end": 1060.49, "word": " sample,", "probability": 0.8671875}, {"start": 1060.59, "end": 1060.69, "word": " we", "probability": 0.94140625}, {"start": 1060.69, "end": 1060.87, "word": " can", "probability": 0.92919921875}, {"start": 1060.87, "end": 1061.25, "word": " compute", "probability": 0.93017578125}, {"start": 1061.25, "end": 1061.49, "word": " x", "probability": 0.78369140625}, {"start": 1061.49, "end": 1061.73, "word": " bar.", "probability": 0.68017578125}, {"start": 1062.47, "end": 1062.77, "word": " Then", "probability": 0.69384765625}, {"start": 1062.77, "end": 1062.91, "word": " we", "probability": 0.833984375}, {"start": 1062.91, "end": 1063.11, "word": " can", "probability": 0.94189453125}, {"start": 1063.11, "end": 1063.31, "word": " figure", "probability": 0.9697265625}, {"start": 1063.31, "end": 1063.57, "word": " out", "probability": 0.8828125}, {"start": 1063.57, "end": 1063.87, "word": " that", "probability": 0.93310546875}, {"start": 1063.87, "end": 1064.15, "word": " mu", "probability": 0.5849609375}, {"start": 1064.15, "end": 1064.67, "word": " ranges", "probability": 0.88330078125}, {"start": 1064.67, "end": 1065.03, "word": " between", "probability": 0.86474609375}, {"start": 1065.03, "end": 1065.29, "word": " these", "probability": 0.8583984375}, {"start": 1065.29, "end": 1065.45, "word": " two", "probability": 0.91650390625}, {"start": 1065.45, "end": 1065.79, "word": " values.", "probability": 0.966796875}], "temperature": 1.0}, {"id": 40, "seek": 109331, "start": 1066.75, "end": 1093.31, "text": " But here, you cannot say that this interval contains MU 100%. But you can say that 95% of random samples like this one will contain MU with 95%. So that's the logic for the confidence interval. But again, if we select many, many samples,", "tokens": [583, 510, 11, 291, 2644, 584, 300, 341, 15035, 8306, 17935, 2319, 6856, 583, 291, 393, 584, 300, 13420, 4, 295, 4974, 10938, 411, 341, 472, 486, 5304, 17935, 365, 13420, 6856, 407, 300, 311, 264, 9952, 337, 264, 6687, 15035, 13, 583, 797, 11, 498, 321, 3048, 867, 11, 867, 10938, 11], "avg_logprob": -0.1349102989942939, "compression_ratio": 1.4968553459119496, "no_speech_prob": 0.0, "words": [{"start": 1066.75, "end": 1067.05, "word": " But", "probability": 0.80517578125}, {"start": 1067.05, "end": 1067.29, "word": " here,", "probability": 0.8115234375}, {"start": 1067.71, "end": 1067.91, "word": " you", "probability": 0.96044921875}, {"start": 1067.91, "end": 1068.21, "word": " cannot", "probability": 0.8720703125}, {"start": 1068.21, "end": 1068.53, "word": " say", "probability": 0.92138671875}, {"start": 1068.53, "end": 1068.83, "word": " that", "probability": 0.92578125}, {"start": 1068.83, "end": 1069.43, "word": " this", "probability": 0.90869140625}, {"start": 1069.43, "end": 1069.85, "word": " interval", "probability": 0.97119140625}, {"start": 1069.85, "end": 1070.71, "word": " contains", "probability": 0.92529296875}, {"start": 1070.71, "end": 1071.13, "word": " MU", "probability": 0.421142578125}, {"start": 1071.13, "end": 1072.21, "word": " 100%.", "probability": 0.7333984375}, {"start": 1072.21, "end": 1073.77, "word": " But", "probability": 0.94482421875}, {"start": 1073.77, "end": 1074.35, "word": " you", "probability": 0.87841796875}, {"start": 1074.35, "end": 1074.55, "word": " can", "probability": 0.9482421875}, {"start": 1074.55, "end": 1074.73, "word": " say", "probability": 0.84619140625}, {"start": 1074.73, "end": 1074.97, "word": " that", "probability": 0.9287109375}, {"start": 1074.97, "end": 1075.35, "word": " 95", "probability": 0.98486328125}, {"start": 1075.35, "end": 1075.79, "word": "%", "probability": 0.9951171875}, {"start": 1075.79, "end": 1076.63, "word": " of", "probability": 0.9599609375}, {"start": 1076.63, "end": 1077.21, "word": " random", "probability": 0.75634765625}, {"start": 1077.21, "end": 1077.81, "word": " samples", "probability": 0.861328125}, {"start": 1077.81, "end": 1078.15, "word": " like", "probability": 0.497314453125}, {"start": 1078.15, "end": 1078.43, "word": " this", "probability": 0.951171875}, {"start": 1078.43, "end": 1078.77, "word": " one", "probability": 0.92333984375}, {"start": 1078.77, "end": 1079.99, "word": " will", "probability": 0.77197265625}, {"start": 1079.99, "end": 1080.45, "word": " contain", "probability": 0.78857421875}, {"start": 1080.45, "end": 1080.73, "word": " MU", "probability": 0.88720703125}, {"start": 1080.73, "end": 1081.19, "word": " with", "probability": 0.81005859375}, {"start": 1081.19, "end": 1082.15, "word": " 95%.", "probability": 0.970458984375}, {"start": 1082.15, "end": 1083.91, "word": " So", "probability": 0.96044921875}, {"start": 1083.91, "end": 1084.25, "word": " that's", "probability": 0.927490234375}, {"start": 1084.25, "end": 1084.55, "word": " the", "probability": 0.92138671875}, {"start": 1084.55, "end": 1085.25, "word": " logic", "probability": 0.94970703125}, {"start": 1085.25, "end": 1085.83, "word": " for", "probability": 0.94873046875}, {"start": 1085.83, "end": 1086.03, "word": " the", "probability": 0.923828125}, {"start": 1086.03, "end": 1086.59, "word": " confidence", "probability": 0.97705078125}, {"start": 1086.59, "end": 1087.89, "word": " interval.", "probability": 0.970703125}, {"start": 1089.19, "end": 1089.61, "word": " But", "probability": 0.9521484375}, {"start": 1089.61, "end": 1089.99, "word": " again,", "probability": 0.9140625}, {"start": 1091.11, "end": 1091.35, "word": " if", "probability": 0.955078125}, {"start": 1091.35, "end": 1091.79, "word": " we", "probability": 0.912109375}, {"start": 1091.79, "end": 1092.25, "word": " select", "probability": 0.86865234375}, {"start": 1092.25, "end": 1092.55, "word": " many,", "probability": 0.90234375}, {"start": 1092.63, "end": 1092.75, "word": " many", "probability": 0.89453125}, {"start": 1092.75, "end": 1093.31, "word": " samples,", "probability": 0.9130859375}], "temperature": 1.0}, {"id": 41, "seek": 111381, "start": 1093.99, "end": 1113.81, "text": " We will get different sample means, for sure. Now, suppose I have 1000 students. I select, for example, 50 samples, each one of size 30. Maybe the first sample will give average of", "tokens": [492, 486, 483, 819, 6889, 1355, 11, 337, 988, 13, 823, 11, 7297, 286, 362, 9714, 1731, 13, 286, 3048, 11, 337, 1365, 11, 2625, 10938, 11, 1184, 472, 295, 2744, 2217, 13, 2704, 264, 700, 6889, 486, 976, 4274, 295], "avg_logprob": -0.23902529513552076, "compression_ratio": 1.3308823529411764, "no_speech_prob": 0.0, "words": [{"start": 1093.99, "end": 1094.25, "word": " We", "probability": 0.2244873046875}, {"start": 1094.25, "end": 1094.45, "word": " will", "probability": 0.85205078125}, {"start": 1094.45, "end": 1094.77, "word": " get", "probability": 0.9140625}, {"start": 1094.77, "end": 1095.55, "word": " different", "probability": 0.84912109375}, {"start": 1095.55, "end": 1096.67, "word": " sample", "probability": 0.8486328125}, {"start": 1096.67, "end": 1097.13, "word": " means,", "probability": 0.84423828125}, {"start": 1098.45, "end": 1098.63, "word": " for", "probability": 0.89404296875}, {"start": 1098.63, "end": 1098.87, "word": " sure.", "probability": 0.94287109375}, {"start": 1099.33, "end": 1100.17, "word": " Now,", "probability": 0.81689453125}, {"start": 1100.85, "end": 1101.17, "word": " suppose", "probability": 0.86865234375}, {"start": 1101.17, "end": 1101.35, "word": " I", "probability": 0.9521484375}, {"start": 1101.35, "end": 1101.65, "word": " have", "probability": 0.95068359375}, {"start": 1101.65, "end": 1102.45, "word": " 1000", "probability": 0.42236328125}, {"start": 1102.45, "end": 1103.29, "word": " students.", "probability": 0.9658203125}, {"start": 1103.97, "end": 1104.31, "word": " I", "probability": 0.9814453125}, {"start": 1104.31, "end": 1104.77, "word": " select,", "probability": 0.873046875}, {"start": 1105.17, "end": 1105.29, "word": " for", "probability": 0.95703125}, {"start": 1105.29, "end": 1105.65, "word": " example,", "probability": 0.9736328125}, {"start": 1106.27, "end": 1106.75, "word": " 50", "probability": 0.9189453125}, {"start": 1106.75, "end": 1107.49, "word": " samples,", "probability": 0.87841796875}, {"start": 1107.99, "end": 1108.27, "word": " each", "probability": 0.9521484375}, {"start": 1108.27, "end": 1108.59, "word": " one", "probability": 0.92333984375}, {"start": 1108.59, "end": 1109.25, "word": " of", "probability": 0.9423828125}, {"start": 1109.25, "end": 1109.63, "word": " size", "probability": 0.8623046875}, {"start": 1109.63, "end": 1110.17, "word": " 30.", "probability": 0.951171875}, {"start": 1111.01, "end": 1111.45, "word": " Maybe", "probability": 0.88427734375}, {"start": 1111.45, "end": 1111.65, "word": " the", "probability": 0.8896484375}, {"start": 1111.65, "end": 1111.87, "word": " first", "probability": 0.88134765625}, {"start": 1111.87, "end": 1112.29, "word": " sample", "probability": 0.90869140625}, {"start": 1112.29, "end": 1112.65, "word": " will", "probability": 0.8740234375}, {"start": 1112.65, "end": 1112.91, "word": " give", "probability": 0.85595703125}, {"start": 1112.91, "end": 1113.33, "word": " average", "probability": 0.734375}, {"start": 1113.33, "end": 1113.81, "word": " of", "probability": 0.97021484375}], "temperature": 1.0}, {"id": 42, "seek": 114053, "start": 1115.25, "end": 1140.53, "text": " this value, 362. And I will end with this confidence limits, lower limit 356, upper limit 368. I know that from previous studies that the population means 368. So just ask this question, does this interval containing", "tokens": [341, 2158, 11, 8652, 17, 13, 400, 286, 486, 917, 365, 341, 6687, 10406, 11, 3126, 4948, 6976, 21, 11, 6597, 4948, 8652, 23, 13, 286, 458, 300, 490, 3894, 5313, 300, 264, 4415, 1355, 8652, 23, 13, 407, 445, 1029, 341, 1168, 11, 775, 341, 15035, 19273], "avg_logprob": -0.2632334086359764, "compression_ratio": 1.4, "no_speech_prob": 0.0, "words": [{"start": 1115.25, "end": 1115.63, "word": " this", "probability": 0.361572265625}, {"start": 1115.63, "end": 1115.97, "word": " value,", "probability": 0.95849609375}, {"start": 1116.11, "end": 1116.85, "word": " 362.", "probability": 0.892578125}, {"start": 1117.31, "end": 1117.83, "word": " And", "probability": 0.89013671875}, {"start": 1117.83, "end": 1118.01, "word": " I", "probability": 0.96240234375}, {"start": 1118.01, "end": 1118.21, "word": " will", "probability": 0.90380859375}, {"start": 1118.21, "end": 1118.49, "word": " end", "probability": 0.90478515625}, {"start": 1118.49, "end": 1118.85, "word": " with", "probability": 0.89990234375}, {"start": 1118.85, "end": 1119.29, "word": " this", "probability": 0.69970703125}, {"start": 1119.29, "end": 1119.71, "word": " confidence", "probability": 0.85791015625}, {"start": 1119.71, "end": 1121.65, "word": " limits,", "probability": 0.80126953125}, {"start": 1122.37, "end": 1122.61, "word": " lower", "probability": 0.8427734375}, {"start": 1122.61, "end": 1122.81, "word": " limit", "probability": 0.93505859375}, {"start": 1122.81, "end": 1123.89, "word": " 356,", "probability": 0.6275634765625}, {"start": 1124.59, "end": 1125.03, "word": " upper", "probability": 0.85791015625}, {"start": 1125.03, "end": 1125.37, "word": " limit", "probability": 0.97412109375}, {"start": 1125.37, "end": 1126.27, "word": " 368.", "probability": 0.97021484375}, {"start": 1127.25, "end": 1127.79, "word": " I", "probability": 0.986328125}, {"start": 1127.79, "end": 1127.97, "word": " know", "probability": 0.8916015625}, {"start": 1127.97, "end": 1128.49, "word": " that", "probability": 0.92529296875}, {"start": 1128.49, "end": 1130.83, "word": " from", "probability": 0.79833984375}, {"start": 1130.83, "end": 1131.27, "word": " previous", "probability": 0.87841796875}, {"start": 1131.27, "end": 1131.77, "word": " studies", "probability": 0.95849609375}, {"start": 1131.77, "end": 1132.77, "word": " that", "probability": 0.7138671875}, {"start": 1132.77, "end": 1133.49, "word": " the", "probability": 0.8974609375}, {"start": 1133.49, "end": 1133.83, "word": " population", "probability": 0.927734375}, {"start": 1133.83, "end": 1134.15, "word": " means", "probability": 0.6318359375}, {"start": 1134.15, "end": 1135.19, "word": " 368.", "probability": 0.840576171875}, {"start": 1135.81, "end": 1136.01, "word": " So", "probability": 0.69775390625}, {"start": 1136.01, "end": 1136.29, "word": " just", "probability": 0.54931640625}, {"start": 1136.29, "end": 1136.63, "word": " ask", "probability": 0.85888671875}, {"start": 1136.63, "end": 1137.39, "word": " this", "probability": 0.61669921875}, {"start": 1137.39, "end": 1137.81, "word": " question,", "probability": 0.916015625}, {"start": 1138.29, "end": 1138.79, "word": " does", "probability": 0.86865234375}, {"start": 1138.79, "end": 1139.11, "word": " this", "probability": 0.94677734375}, {"start": 1139.11, "end": 1139.69, "word": " interval", "probability": 0.96337890625}, {"start": 1139.69, "end": 1140.53, "word": " containing", "probability": 0.470703125}], "temperature": 1.0}, {"id": 43, "seek": 116274, "start": 1142.98, "end": 1162.74, "text": " The range from 356 up to 368.13 covers mu. So in this case, this interval continues. Maybe someone select another sample of the same size, and that sample gives sample mean of 369.5.", "tokens": [440, 3613, 490, 6976, 21, 493, 281, 8652, 23, 13, 7668, 10538, 2992, 13, 407, 294, 341, 1389, 11, 341, 15035, 6515, 13, 2704, 1580, 3048, 1071, 6889, 295, 264, 912, 2744, 11, 293, 300, 6889, 2709, 6889, 914, 295, 8652, 24, 13, 20, 13], "avg_logprob": -0.22078804671764374, "compression_ratio": 1.3357664233576643, "no_speech_prob": 0.0, "words": [{"start": 1142.98, "end": 1143.2, "word": " The", "probability": 0.405029296875}, {"start": 1143.2, "end": 1143.6, "word": " range", "probability": 0.8759765625}, {"start": 1143.6, "end": 1143.9, "word": " from", "probability": 0.88427734375}, {"start": 1143.9, "end": 1144.94, "word": " 356", "probability": 0.826171875}, {"start": 1144.94, "end": 1145.12, "word": " up", "probability": 0.8935546875}, {"start": 1145.12, "end": 1145.2, "word": " to", "probability": 0.94482421875}, {"start": 1145.2, "end": 1145.96, "word": " 368", "probability": 0.9619140625}, {"start": 1145.96, "end": 1146.86, "word": ".13", "probability": 0.802001953125}, {"start": 1146.86, "end": 1148.04, "word": " covers", "probability": 0.76318359375}, {"start": 1148.04, "end": 1148.4, "word": " mu.", "probability": 0.1346435546875}, {"start": 1148.92, "end": 1149.1, "word": " So", "probability": 0.85791015625}, {"start": 1149.1, "end": 1149.22, "word": " in", "probability": 0.73193359375}, {"start": 1149.22, "end": 1149.42, "word": " this", "probability": 0.94580078125}, {"start": 1149.42, "end": 1149.66, "word": " case,", "probability": 0.8994140625}, {"start": 1149.78, "end": 1149.98, "word": " this", "probability": 0.92626953125}, {"start": 1149.98, "end": 1150.54, "word": " interval", "probability": 0.97216796875}, {"start": 1150.54, "end": 1151.74, "word": " continues.", "probability": 0.54345703125}, {"start": 1153.04, "end": 1153.74, "word": " Maybe", "probability": 0.9169921875}, {"start": 1153.74, "end": 1154.28, "word": " someone", "probability": 0.9462890625}, {"start": 1154.28, "end": 1156.28, "word": " select", "probability": 0.53271484375}, {"start": 1156.28, "end": 1156.62, "word": " another", "probability": 0.53369140625}, {"start": 1156.62, "end": 1157.08, "word": " sample", "probability": 0.853515625}, {"start": 1157.08, "end": 1157.42, "word": " of", "probability": 0.9208984375}, {"start": 1157.42, "end": 1157.58, "word": " the", "probability": 0.91650390625}, {"start": 1157.58, "end": 1157.82, "word": " same", "probability": 0.9013671875}, {"start": 1157.82, "end": 1158.28, "word": " size,", "probability": 0.85498046875}, {"start": 1158.76, "end": 1158.98, "word": " and", "probability": 0.93017578125}, {"start": 1158.98, "end": 1159.2, "word": " that", "probability": 0.9345703125}, {"start": 1159.2, "end": 1159.6, "word": " sample", "probability": 0.88330078125}, {"start": 1159.6, "end": 1159.9, "word": " gives", "probability": 0.79931640625}, {"start": 1159.9, "end": 1160.26, "word": " sample", "probability": 0.80810546875}, {"start": 1160.26, "end": 1160.5, "word": " mean", "probability": 0.95068359375}, {"start": 1160.5, "end": 1160.76, "word": " of", "probability": 0.96826171875}, {"start": 1160.76, "end": 1162.16, "word": " 369", "probability": 0.931640625}, {"start": 1162.16, "end": 1162.74, "word": ".5.", "probability": 0.984619140625}], "temperature": 1.0}, {"id": 44, "seek": 119308, "start": 1164.85, "end": 1193.09, "text": " By using this equation, you can figure out lower limits and upper limits. Now, again, I ask you still this question. Does this interval contain a mu? The answer is yes, because 368 lies between these two values. So the first one contains a mu. The second one contains mu. Look at the third symbol. Suppose from my symbol, I got symbol mu to be 360. So lower and upper limits are?", "tokens": [3146, 1228, 341, 5367, 11, 291, 393, 2573, 484, 3126, 10406, 293, 6597, 10406, 13, 823, 11, 797, 11, 286, 1029, 291, 920, 341, 1168, 13, 4402, 341, 15035, 5304, 257, 2992, 30, 440, 1867, 307, 2086, 11, 570, 8652, 23, 9134, 1296, 613, 732, 4190, 13, 407, 264, 700, 472, 8306, 257, 2992, 13, 440, 1150, 472, 8306, 2992, 13, 2053, 412, 264, 2636, 5986, 13, 21360, 490, 452, 5986, 11, 286, 658, 5986, 2992, 281, 312, 13898, 13, 407, 3126, 293, 6597, 10406, 366, 30], "avg_logprob": -0.22318891588259826, "compression_ratio": 1.6309012875536482, "no_speech_prob": 0.0, "words": [{"start": 1164.85, "end": 1165.13, "word": " By", "probability": 0.436767578125}, {"start": 1165.13, "end": 1165.47, "word": " using", "probability": 0.91845703125}, {"start": 1165.47, "end": 1165.87, "word": " this", "probability": 0.9296875}, {"start": 1165.87, "end": 1166.61, "word": " equation,", "probability": 0.9677734375}, {"start": 1166.83, "end": 1166.89, "word": " you", "probability": 0.9287109375}, {"start": 1166.89, "end": 1167.25, "word": " can", "probability": 0.93359375}, {"start": 1167.25, "end": 1167.67, "word": " figure", "probability": 0.9482421875}, {"start": 1167.67, "end": 1168.09, "word": " out", "probability": 0.8759765625}, {"start": 1168.09, "end": 1168.79, "word": " lower", "probability": 0.822265625}, {"start": 1168.79, "end": 1169.27, "word": " limits", "probability": 0.951171875}, {"start": 1169.27, "end": 1169.51, "word": " and", "probability": 0.91796875}, {"start": 1169.51, "end": 1169.75, "word": " upper", "probability": 0.8271484375}, {"start": 1169.75, "end": 1170.13, "word": " limits.", "probability": 0.95556640625}, {"start": 1170.37, "end": 1170.53, "word": " Now,", "probability": 0.841796875}, {"start": 1170.59, "end": 1170.85, "word": " again,", "probability": 0.69873046875}, {"start": 1170.93, "end": 1170.93, "word": " I", "probability": 0.74462890625}, {"start": 1170.93, "end": 1171.19, "word": " ask", "probability": 0.755859375}, {"start": 1171.19, "end": 1171.43, "word": " you", "probability": 0.94384765625}, {"start": 1171.43, "end": 1172.71, "word": " still", "probability": 0.1275634765625}, {"start": 1172.71, "end": 1172.95, "word": " this", "probability": 0.9189453125}, {"start": 1172.95, "end": 1173.39, "word": " question.", "probability": 0.90869140625}, {"start": 1173.69, "end": 1174.19, "word": " Does", "probability": 0.90869140625}, {"start": 1174.19, "end": 1174.43, "word": " this", "probability": 0.9384765625}, {"start": 1174.43, "end": 1174.75, "word": " interval", "probability": 0.91796875}, {"start": 1174.75, "end": 1175.15, "word": " contain", "probability": 0.52099609375}, {"start": 1175.15, "end": 1175.33, "word": " a", "probability": 0.52978515625}, {"start": 1175.33, "end": 1175.37, "word": " mu?", "probability": 0.62060546875}, {"start": 1176.25, "end": 1176.67, "word": " The", "probability": 0.66064453125}, {"start": 1176.67, "end": 1177.03, "word": " answer", "probability": 0.96484375}, {"start": 1177.03, "end": 1177.33, "word": " is", "probability": 0.94384765625}, {"start": 1177.33, "end": 1177.57, "word": " yes,", "probability": 0.8955078125}, {"start": 1177.61, "end": 1177.93, "word": " because", "probability": 0.8837890625}, {"start": 1177.93, "end": 1178.71, "word": " 368", "probability": 0.921875}, {"start": 1178.71, "end": 1179.13, "word": " lies", "probability": 0.93505859375}, {"start": 1179.13, "end": 1179.49, "word": " between", "probability": 0.8603515625}, {"start": 1179.49, "end": 1179.75, "word": " these", "probability": 0.82861328125}, {"start": 1179.75, "end": 1179.89, "word": " two", "probability": 0.873046875}, {"start": 1179.89, "end": 1180.25, "word": " values.", "probability": 0.9384765625}, {"start": 1180.99, "end": 1181.33, "word": " So", "probability": 0.93798828125}, {"start": 1181.33, "end": 1181.65, "word": " the", "probability": 0.7080078125}, {"start": 1181.65, "end": 1181.93, "word": " first", "probability": 0.8779296875}, {"start": 1181.93, "end": 1182.15, "word": " one", "probability": 0.89697265625}, {"start": 1182.15, "end": 1182.57, "word": " contains", "probability": 0.91015625}, {"start": 1182.57, "end": 1182.73, "word": " a", "probability": 0.75146484375}, {"start": 1182.73, "end": 1182.87, "word": " mu.", "probability": 0.95849609375}, {"start": 1183.01, "end": 1183.17, "word": " The", "probability": 0.78076171875}, {"start": 1183.17, "end": 1183.45, "word": " second", "probability": 0.8896484375}, {"start": 1183.45, "end": 1183.69, "word": " one", "probability": 0.90576171875}, {"start": 1183.69, "end": 1184.03, "word": " contains", "probability": 0.91748046875}, {"start": 1184.03, "end": 1184.31, "word": " mu.", "probability": 0.68994140625}, {"start": 1184.61, "end": 1184.81, "word": " Look", "probability": 0.76611328125}, {"start": 1184.81, "end": 1184.93, "word": " at", "probability": 0.9599609375}, {"start": 1184.93, "end": 1185.09, "word": " the", "probability": 0.8662109375}, {"start": 1185.09, "end": 1185.31, "word": " third", "probability": 0.921875}, {"start": 1185.31, "end": 1185.65, "word": " symbol.", "probability": 0.78759765625}, {"start": 1186.55, "end": 1187.01, "word": " Suppose", "probability": 0.74169921875}, {"start": 1187.01, "end": 1187.27, "word": " from", "probability": 0.73291015625}, {"start": 1187.27, "end": 1187.55, "word": " my", "probability": 0.97119140625}, {"start": 1187.55, "end": 1187.83, "word": " symbol,", "probability": 0.79931640625}, {"start": 1187.93, "end": 1188.03, "word": " I", "probability": 0.9970703125}, {"start": 1188.03, "end": 1188.25, "word": " got", "probability": 0.7353515625}, {"start": 1188.25, "end": 1188.57, "word": " symbol", "probability": 0.7509765625}, {"start": 1188.57, "end": 1188.75, "word": " mu", "probability": 0.794921875}, {"start": 1188.75, "end": 1188.91, "word": " to", "probability": 0.96337890625}, {"start": 1188.91, "end": 1189.07, "word": " be", "probability": 0.9453125}, {"start": 1189.07, "end": 1189.57, "word": " 360.", "probability": 0.88671875}, {"start": 1190.65, "end": 1191.21, "word": " So", "probability": 0.9521484375}, {"start": 1191.21, "end": 1191.83, "word": " lower", "probability": 0.8486328125}, {"start": 1191.83, "end": 1192.09, "word": " and", "probability": 0.9365234375}, {"start": 1192.09, "end": 1192.25, "word": " upper", "probability": 0.85009765625}, {"start": 1192.25, "end": 1192.69, "word": " limits", "probability": 0.970703125}, {"start": 1192.69, "end": 1193.09, "word": " are?", "probability": 0.94580078125}], "temperature": 1.0}, {"id": 45, "seek": 122054, "start": 1195.0, "end": 1220.54, "text": " 354 and upper limit 365.88 now this value 368 is outside of this interval so maybe your interval will continue mu or will not continue mu but you don't know actually the value of mu so in this case you cannot say my interval will continue mu", "tokens": [6976, 19, 293, 6597, 4948, 22046, 13, 16919, 586, 341, 2158, 8652, 23, 307, 2380, 295, 341, 15035, 370, 1310, 428, 15035, 486, 2354, 2992, 420, 486, 406, 2354, 2992, 457, 291, 500, 380, 458, 767, 264, 2158, 295, 2992, 370, 294, 341, 1389, 291, 2644, 584, 452, 15035, 486, 2354, 2992], "avg_logprob": -0.23113207997016186, "compression_ratio": 1.6575342465753424, "no_speech_prob": 0.0, "words": [{"start": 1195.0000000000002, "end": 1195.8200000000002, "word": " 354", "probability": 0.53179931640625}, {"start": 1195.8200000000002, "end": 1196.64, "word": " and", "probability": 0.63427734375}, {"start": 1196.64, "end": 1196.92, "word": " upper", "probability": 0.45751953125}, {"start": 1196.92, "end": 1197.24, "word": " limit", "probability": 0.94970703125}, {"start": 1197.24, "end": 1198.72, "word": " 365", "probability": 0.6416015625}, {"start": 1198.72, "end": 1200.02, "word": ".88", "probability": 0.9296875}, {"start": 1200.02, "end": 1201.0, "word": " now", "probability": 0.256103515625}, {"start": 1201.0, "end": 1202.1, "word": " this", "probability": 0.82373046875}, {"start": 1202.1, "end": 1202.7, "word": " value", "probability": 0.96875}, {"start": 1202.7, "end": 1203.76, "word": " 368", "probability": 0.7822265625}, {"start": 1203.76, "end": 1205.12, "word": " is", "probability": 0.83642578125}, {"start": 1205.12, "end": 1205.88, "word": " outside", "probability": 0.84130859375}, {"start": 1205.88, "end": 1206.38, "word": " of", "probability": 0.84423828125}, {"start": 1206.38, "end": 1206.64, "word": " this", "probability": 0.92724609375}, {"start": 1206.64, "end": 1207.04, "word": " interval", "probability": 0.94677734375}, {"start": 1207.04, "end": 1208.72, "word": " so", "probability": 0.61572265625}, {"start": 1208.72, "end": 1209.18, "word": " maybe", "probability": 0.89013671875}, {"start": 1209.18, "end": 1209.66, "word": " your", "probability": 0.8515625}, {"start": 1209.66, "end": 1210.04, "word": " interval", "probability": 0.96337890625}, {"start": 1210.04, "end": 1210.32, "word": " will", "probability": 0.89697265625}, {"start": 1210.32, "end": 1210.76, "word": " continue", "probability": 0.89404296875}, {"start": 1210.76, "end": 1211.04, "word": " mu", "probability": 0.491943359375}, {"start": 1211.04, "end": 1211.64, "word": " or", "probability": 0.88818359375}, {"start": 1211.64, "end": 1211.84, "word": " will", "probability": 0.8037109375}, {"start": 1211.84, "end": 1212.1, "word": " not", "probability": 0.9404296875}, {"start": 1212.1, "end": 1212.52, "word": " continue", "probability": 0.943359375}, {"start": 1212.52, "end": 1212.8, "word": " mu", "probability": 0.92822265625}, {"start": 1212.8, "end": 1213.52, "word": " but", "probability": 0.76318359375}, {"start": 1213.52, "end": 1213.68, "word": " you", "probability": 0.939453125}, {"start": 1213.68, "end": 1213.92, "word": " don't", "probability": 0.91845703125}, {"start": 1213.92, "end": 1214.24, "word": " know", "probability": 0.861328125}, {"start": 1214.24, "end": 1214.82, "word": " actually", "probability": 0.88720703125}, {"start": 1214.82, "end": 1215.8, "word": " the", "probability": 0.89111328125}, {"start": 1215.8, "end": 1216.08, "word": " value", "probability": 0.96533203125}, {"start": 1216.08, "end": 1216.24, "word": " of", "probability": 0.796875}, {"start": 1216.24, "end": 1216.46, "word": " mu", "probability": 0.8564453125}, {"start": 1216.46, "end": 1217.1, "word": " so", "probability": 0.7333984375}, {"start": 1217.1, "end": 1217.42, "word": " in", "probability": 0.81298828125}, {"start": 1217.42, "end": 1217.6, "word": " this", "probability": 0.95068359375}, {"start": 1217.6, "end": 1217.82, "word": " case", "probability": 0.91845703125}, {"start": 1217.82, "end": 1217.98, "word": " you", "probability": 0.93798828125}, {"start": 1217.98, "end": 1218.26, "word": " cannot", "probability": 0.78564453125}, {"start": 1218.26, "end": 1218.78, "word": " say", "probability": 0.95703125}, {"start": 1218.78, "end": 1219.28, "word": " my", "probability": 0.86669921875}, {"start": 1219.28, "end": 1219.7, "word": " interval", "probability": 0.97021484375}, {"start": 1219.7, "end": 1220.0, "word": " will", "probability": 0.88623046875}, {"start": 1220.0, "end": 1220.32, "word": " continue", "probability": 0.923828125}, {"start": 1220.32, "end": 1220.54, "word": " mu", "probability": 0.93701171875}], "temperature": 1.0}, {"id": 46, "seek": 125170, "start": 1221.98, "end": 1251.7, "text": " But here, if we know mu, you can say that this interval contains mu, or the other one does not contain mu. But in the real world, you don't know the value of mu. So mu is unknown. But you can say that I am 95% sure that this interval will continue. Make sense? Based on the x dot? Based on the sample mean. And the other information we have.", "tokens": [583, 510, 11, 498, 321, 458, 2992, 11, 291, 393, 584, 300, 341, 15035, 8306, 2992, 11, 420, 264, 661, 472, 775, 406, 5304, 2992, 13, 583, 294, 264, 957, 1002, 11, 291, 500, 380, 458, 264, 2158, 295, 2992, 13, 407, 2992, 307, 9841, 13, 583, 291, 393, 584, 300, 286, 669, 13420, 4, 988, 300, 341, 15035, 486, 2354, 13, 4387, 2020, 30, 18785, 322, 264, 2031, 5893, 30, 18785, 322, 264, 6889, 914, 13, 400, 264, 661, 1589, 321, 362, 13], "avg_logprob": -0.20680147689931533, "compression_ratio": 1.6847290640394088, "no_speech_prob": 0.0, "words": [{"start": 1221.98, "end": 1222.22, "word": " But", "probability": 0.74169921875}, {"start": 1222.22, "end": 1222.4, "word": " here,", "probability": 0.57421875}, {"start": 1222.46, "end": 1222.66, "word": " if", "probability": 0.9345703125}, {"start": 1222.66, "end": 1222.88, "word": " we", "probability": 0.88818359375}, {"start": 1222.88, "end": 1223.08, "word": " know", "probability": 0.8740234375}, {"start": 1223.08, "end": 1223.22, "word": " mu,", "probability": 0.40966796875}, {"start": 1223.32, "end": 1223.5, "word": " you", "probability": 0.84912109375}, {"start": 1223.5, "end": 1223.72, "word": " can", "probability": 0.94384765625}, {"start": 1223.72, "end": 1224.04, "word": " say", "probability": 0.9130859375}, {"start": 1224.04, "end": 1224.54, "word": " that", "probability": 0.9072265625}, {"start": 1224.54, "end": 1225.2, "word": " this", "probability": 0.91796875}, {"start": 1225.2, "end": 1225.72, "word": " interval", "probability": 0.97119140625}, {"start": 1225.72, "end": 1226.84, "word": " contains", "probability": 0.931640625}, {"start": 1226.84, "end": 1227.12, "word": " mu,", "probability": 0.84912109375}, {"start": 1227.38, "end": 1228.12, "word": " or", "probability": 0.94580078125}, {"start": 1228.12, "end": 1228.26, "word": " the", "probability": 0.892578125}, {"start": 1228.26, "end": 1228.52, "word": " other", "probability": 0.89208984375}, {"start": 1228.52, "end": 1228.84, "word": " one", "probability": 0.91748046875}, {"start": 1228.84, "end": 1229.14, "word": " does", "probability": 0.95556640625}, {"start": 1229.14, "end": 1229.32, "word": " not", "probability": 0.94921875}, {"start": 1229.32, "end": 1229.7, "word": " contain", "probability": 0.497802734375}, {"start": 1229.7, "end": 1229.86, "word": " mu.", "probability": 0.8662109375}, {"start": 1230.82, "end": 1231.3, "word": " But", "probability": 0.92724609375}, {"start": 1231.3, "end": 1232.3, "word": " in", "probability": 0.86767578125}, {"start": 1232.3, "end": 1232.62, "word": " the", "probability": 0.45068359375}, {"start": 1232.62, "end": 1232.88, "word": " real", "probability": 0.9599609375}, {"start": 1232.88, "end": 1233.26, "word": " world,", "probability": 0.95703125}, {"start": 1233.7, "end": 1234.0, "word": " you", "probability": 0.80615234375}, {"start": 1234.0, "end": 1234.34, "word": " don't", "probability": 0.961669921875}, {"start": 1234.34, "end": 1234.5, "word": " know", "probability": 0.89794921875}, {"start": 1234.5, "end": 1234.68, "word": " the", "probability": 0.91455078125}, {"start": 1234.68, "end": 1234.92, "word": " value", "probability": 0.98486328125}, {"start": 1234.92, "end": 1235.12, "word": " of", "probability": 0.9658203125}, {"start": 1235.12, "end": 1235.34, "word": " mu.", "probability": 0.9345703125}, {"start": 1235.54, "end": 1235.84, "word": " So", "probability": 0.90966796875}, {"start": 1235.84, "end": 1235.98, "word": " mu", "probability": 0.81787109375}, {"start": 1235.98, "end": 1236.1, "word": " is", "probability": 0.9111328125}, {"start": 1236.1, "end": 1236.6, "word": " unknown.", "probability": 0.90283203125}, {"start": 1237.5, "end": 1237.92, "word": " But", "probability": 0.947265625}, {"start": 1237.92, "end": 1238.22, "word": " you", "probability": 0.95556640625}, {"start": 1238.22, "end": 1238.46, "word": " can", "probability": 0.943359375}, {"start": 1238.46, "end": 1238.7, "word": " say", "probability": 0.9169921875}, {"start": 1238.7, "end": 1238.98, "word": " that", "probability": 0.919921875}, {"start": 1238.98, "end": 1239.38, "word": " I", "probability": 0.759765625}, {"start": 1239.38, "end": 1239.56, "word": " am", "probability": 0.927734375}, {"start": 1239.56, "end": 1240.06, "word": " 95", "probability": 0.982421875}, {"start": 1240.06, "end": 1240.62, "word": "%", "probability": 0.95703125}, {"start": 1240.62, "end": 1241.6, "word": " sure", "probability": 0.91259765625}, {"start": 1241.6, "end": 1242.0, "word": " that", "probability": 0.92919921875}, {"start": 1242.0, "end": 1242.84, "word": " this", "probability": 0.94287109375}, {"start": 1242.84, "end": 1243.18, "word": " interval", "probability": 0.95361328125}, {"start": 1243.18, "end": 1243.6, "word": " will", "probability": 0.86865234375}, {"start": 1243.6, "end": 1244.06, "word": " continue.", "probability": 0.88916015625}, {"start": 1244.7, "end": 1245.22, "word": " Make", "probability": 0.55810546875}, {"start": 1245.22, "end": 1245.54, "word": " sense?", "probability": 0.83837890625}, {"start": 1245.84, "end": 1246.18, "word": " Based", "probability": 0.72216796875}, {"start": 1246.18, "end": 1246.36, "word": " on", "probability": 0.94482421875}, {"start": 1246.36, "end": 1246.58, "word": " the", "probability": 0.73486328125}, {"start": 1246.58, "end": 1246.78, "word": " x", "probability": 0.275390625}, {"start": 1246.78, "end": 1247.02, "word": " dot?", "probability": 0.410400390625}, {"start": 1247.18, "end": 1247.72, "word": " Based", "probability": 0.84130859375}, {"start": 1247.72, "end": 1247.9, "word": " on", "probability": 0.94091796875}, {"start": 1247.9, "end": 1248.04, "word": " the", "probability": 0.8671875}, {"start": 1248.04, "end": 1248.26, "word": " sample", "probability": 0.6923828125}, {"start": 1248.26, "end": 1248.48, "word": " mean.", "probability": 0.2047119140625}, {"start": 1249.32, "end": 1249.88, "word": " And", "probability": 0.9482421875}, {"start": 1249.88, "end": 1250.52, "word": " the", "probability": 0.66357421875}, {"start": 1250.52, "end": 1250.7, "word": " other", "probability": 0.8876953125}, {"start": 1250.7, "end": 1251.22, "word": " information", "probability": 0.86865234375}, {"start": 1251.22, "end": 1251.46, "word": " we", "probability": 0.9375}, {"start": 1251.46, "end": 1251.7, "word": " have.", "probability": 0.9453125}], "temperature": 1.0}, {"id": 47, "seek": 128079, "start": 1253.75, "end": 1280.79, "text": " sigma and N. Again, in practice, you only take one sample of size N, so you don't need to take many, many samples, just take one sample. In practice, you don't know Mu, so you don't know if the interval actually contains Mu. However, you do know that", "tokens": [12771, 293, 426, 13, 3764, 11, 294, 3124, 11, 291, 787, 747, 472, 6889, 295, 2744, 426, 11, 370, 291, 500, 380, 643, 281, 747, 867, 11, 867, 10938, 11, 445, 747, 472, 6889, 13, 682, 3124, 11, 291, 500, 380, 458, 15601, 11, 370, 291, 500, 380, 458, 498, 264, 15035, 767, 8306, 15601, 13, 2908, 11, 291, 360, 458, 300], "avg_logprob": -0.18402777825083053, "compression_ratio": 1.62987012987013, "no_speech_prob": 0.0, "words": [{"start": 1253.75, "end": 1254.31, "word": " sigma", "probability": 0.1707763671875}, {"start": 1254.31, "end": 1254.87, "word": " and", "probability": 0.6357421875}, {"start": 1254.87, "end": 1255.17, "word": " N.", "probability": 0.41162109375}, {"start": 1258.61, "end": 1259.15, "word": " Again,", "probability": 0.88818359375}, {"start": 1259.35, "end": 1259.43, "word": " in", "probability": 0.9384765625}, {"start": 1259.43, "end": 1259.85, "word": " practice,", "probability": 0.83154296875}, {"start": 1260.43, "end": 1261.83, "word": " you", "probability": 0.9541015625}, {"start": 1261.83, "end": 1262.13, "word": " only", "probability": 0.90869140625}, {"start": 1262.13, "end": 1262.51, "word": " take", "probability": 0.8740234375}, {"start": 1262.51, "end": 1262.81, "word": " one", "probability": 0.91650390625}, {"start": 1262.81, "end": 1263.15, "word": " sample", "probability": 0.89013671875}, {"start": 1263.15, "end": 1263.31, "word": " of", "probability": 0.96044921875}, {"start": 1263.31, "end": 1263.53, "word": " size", "probability": 0.85498046875}, {"start": 1263.53, "end": 1263.79, "word": " N,", "probability": 0.95263671875}, {"start": 1263.93, "end": 1264.01, "word": " so", "probability": 0.8994140625}, {"start": 1264.01, "end": 1264.13, "word": " you", "probability": 0.955078125}, {"start": 1264.13, "end": 1264.29, "word": " don't", "probability": 0.954345703125}, {"start": 1264.29, "end": 1264.49, "word": " need", "probability": 0.8828125}, {"start": 1264.49, "end": 1264.65, "word": " to", "probability": 0.95947265625}, {"start": 1264.65, "end": 1264.85, "word": " take", "probability": 0.8779296875}, {"start": 1264.85, "end": 1265.11, "word": " many,", "probability": 0.90380859375}, {"start": 1265.21, "end": 1265.29, "word": " many", "probability": 0.90625}, {"start": 1265.29, "end": 1265.61, "word": " samples,", "probability": 0.8916015625}, {"start": 1265.73, "end": 1265.89, "word": " just", "probability": 0.89404296875}, {"start": 1265.89, "end": 1266.13, "word": " take", "probability": 0.85107421875}, {"start": 1266.13, "end": 1266.35, "word": " one", "probability": 0.9208984375}, {"start": 1266.35, "end": 1266.63, "word": " sample.", "probability": 0.88916015625}, {"start": 1267.21, "end": 1267.77, "word": " In", "probability": 0.9638671875}, {"start": 1267.77, "end": 1268.21, "word": " practice,", "probability": 0.93310546875}, {"start": 1268.55, "end": 1268.75, "word": " you", "probability": 0.96435546875}, {"start": 1268.75, "end": 1269.11, "word": " don't", "probability": 0.9755859375}, {"start": 1269.11, "end": 1269.37, "word": " know", "probability": 0.90234375}, {"start": 1269.37, "end": 1269.63, "word": " Mu,", "probability": 0.619140625}, {"start": 1270.73, "end": 1271.47, "word": " so", "probability": 0.9482421875}, {"start": 1271.47, "end": 1271.75, "word": " you", "probability": 0.95703125}, {"start": 1271.75, "end": 1272.11, "word": " don't", "probability": 0.97509765625}, {"start": 1272.11, "end": 1272.39, "word": " know", "probability": 0.8818359375}, {"start": 1272.39, "end": 1272.63, "word": " if", "probability": 0.951171875}, {"start": 1272.63, "end": 1272.83, "word": " the", "probability": 0.92431640625}, {"start": 1272.83, "end": 1273.15, "word": " interval", "probability": 0.78173828125}, {"start": 1273.15, "end": 1273.73, "word": " actually", "probability": 0.8876953125}, {"start": 1273.73, "end": 1274.25, "word": " contains", "probability": 0.904296875}, {"start": 1274.25, "end": 1274.69, "word": " Mu.", "probability": 0.97412109375}, {"start": 1278.89, "end": 1279.45, "word": " However,", "probability": 0.86669921875}, {"start": 1279.55, "end": 1279.75, "word": " you", "probability": 0.9599609375}, {"start": 1279.75, "end": 1279.95, "word": " do", "probability": 0.94384765625}, {"start": 1279.95, "end": 1280.35, "word": " know", "probability": 0.88916015625}, {"start": 1280.35, "end": 1280.79, "word": " that", "probability": 0.9326171875}], "temperature": 1.0}, {"id": 48, "seek": 130778, "start": 1281.76, "end": 1307.78, "text": " 95% of the intervals formed in this manner will contain mu. So again, any interval can be constructed in this chapter one of these. You cannot say that this interval contains mu. You have to say that 95% of the intervals formed in this way will capture the population parameter mu.", "tokens": [13420, 4, 295, 264, 26651, 8693, 294, 341, 9060, 486, 5304, 2992, 13, 407, 797, 11, 604, 15035, 393, 312, 17083, 294, 341, 7187, 472, 295, 613, 13, 509, 2644, 584, 300, 341, 15035, 8306, 2992, 13, 509, 362, 281, 584, 300, 13420, 4, 295, 264, 26651, 8693, 294, 341, 636, 486, 7983, 264, 4415, 13075, 2992, 13], "avg_logprob": -0.17783368593555385, "compression_ratio": 1.7961783439490446, "no_speech_prob": 0.0, "words": [{"start": 1281.76, "end": 1282.28, "word": " 95", "probability": 0.65869140625}, {"start": 1282.28, "end": 1282.88, "word": "%", "probability": 0.85595703125}, {"start": 1282.88, "end": 1283.06, "word": " of", "probability": 0.95263671875}, {"start": 1283.06, "end": 1283.18, "word": " the", "probability": 0.80419921875}, {"start": 1283.18, "end": 1283.64, "word": " intervals", "probability": 0.8330078125}, {"start": 1283.64, "end": 1284.26, "word": " formed", "probability": 0.85498046875}, {"start": 1284.26, "end": 1284.46, "word": " in", "probability": 0.92822265625}, {"start": 1284.46, "end": 1284.7, "word": " this", "probability": 0.9423828125}, {"start": 1284.7, "end": 1285.04, "word": " manner", "probability": 0.89306640625}, {"start": 1285.04, "end": 1285.78, "word": " will", "probability": 0.8037109375}, {"start": 1285.78, "end": 1286.14, "word": " contain", "probability": 0.30908203125}, {"start": 1286.14, "end": 1286.38, "word": " mu.", "probability": 0.300537109375}, {"start": 1286.98, "end": 1287.16, "word": " So", "probability": 0.6943359375}, {"start": 1287.16, "end": 1287.46, "word": " again,", "probability": 0.73583984375}, {"start": 1288.5, "end": 1288.8, "word": " any", "probability": 0.87255859375}, {"start": 1288.8, "end": 1289.16, "word": " interval", "probability": 0.95703125}, {"start": 1289.16, "end": 1289.44, "word": " can", "probability": 0.837890625}, {"start": 1289.44, "end": 1289.62, "word": " be", "probability": 0.95654296875}, {"start": 1289.62, "end": 1290.24, "word": " constructed", "probability": 0.92529296875}, {"start": 1290.24, "end": 1290.52, "word": " in", "probability": 0.92333984375}, {"start": 1290.52, "end": 1290.74, "word": " this", "probability": 0.93212890625}, {"start": 1290.74, "end": 1291.22, "word": " chapter", "probability": 0.81884765625}, {"start": 1291.22, "end": 1293.22, "word": " one", "probability": 0.451171875}, {"start": 1293.22, "end": 1293.38, "word": " of", "probability": 0.9677734375}, {"start": 1293.38, "end": 1293.66, "word": " these.", "probability": 0.8271484375}, {"start": 1294.98, "end": 1295.66, "word": " You", "probability": 0.94189453125}, {"start": 1295.66, "end": 1296.0, "word": " cannot", "probability": 0.84326171875}, {"start": 1296.0, "end": 1296.44, "word": " say", "probability": 0.94970703125}, {"start": 1296.44, "end": 1296.84, "word": " that", "probability": 0.912109375}, {"start": 1296.84, "end": 1297.16, "word": " this", "probability": 0.9248046875}, {"start": 1297.16, "end": 1297.5, "word": " interval", "probability": 0.96630859375}, {"start": 1297.5, "end": 1297.9, "word": " contains", "probability": 0.63623046875}, {"start": 1297.9, "end": 1298.18, "word": " mu.", "probability": 0.92724609375}, {"start": 1299.36, "end": 1299.56, "word": " You", "probability": 0.94140625}, {"start": 1299.56, "end": 1299.76, "word": " have", "probability": 0.94140625}, {"start": 1299.76, "end": 1299.9, "word": " to", "probability": 0.96826171875}, {"start": 1299.9, "end": 1300.08, "word": " say", "probability": 0.95263671875}, {"start": 1300.08, "end": 1300.4, "word": " that", "probability": 0.92431640625}, {"start": 1300.4, "end": 1301.24, "word": " 95", "probability": 0.96875}, {"start": 1301.24, "end": 1301.84, "word": "%", "probability": 0.9892578125}, {"start": 1301.84, "end": 1302.3, "word": " of", "probability": 0.96435546875}, {"start": 1302.3, "end": 1302.46, "word": " the", "probability": 0.9150390625}, {"start": 1302.46, "end": 1302.92, "word": " intervals", "probability": 0.88720703125}, {"start": 1302.92, "end": 1303.5, "word": " formed", "probability": 0.89892578125}, {"start": 1303.5, "end": 1303.72, "word": " in", "probability": 0.92138671875}, {"start": 1303.72, "end": 1303.96, "word": " this", "probability": 0.9482421875}, {"start": 1303.96, "end": 1304.26, "word": " way", "probability": 0.96044921875}, {"start": 1304.26, "end": 1305.12, "word": " will", "probability": 0.8603515625}, {"start": 1305.12, "end": 1305.72, "word": " capture", "probability": 0.90771484375}, {"start": 1305.72, "end": 1306.36, "word": " the", "probability": 0.89990234375}, {"start": 1306.36, "end": 1307.0, "word": " population", "probability": 0.97509765625}, {"start": 1307.0, "end": 1307.46, "word": " parameter", "probability": 0.94287109375}, {"start": 1307.46, "end": 1307.78, "word": " mu.", "probability": 0.81640625}], "temperature": 1.0}, {"id": 49, "seek": 132371, "start": 1311.49, "end": 1323.71, "text": " Another one, thus based on one sample, you actually selected you can be 95% confident your interval will continue.", "tokens": [3996, 472, 11, 8807, 2361, 322, 472, 6889, 11, 291, 767, 8209, 291, 393, 312, 13420, 4, 6679, 428, 15035, 486, 2354, 13], "avg_logprob": -0.2568359387417634, "compression_ratio": 1.1855670103092784, "no_speech_prob": 0.0, "words": [{"start": 1311.49, "end": 1311.87, "word": " Another", "probability": 0.2135009765625}, {"start": 1311.87, "end": 1312.33, "word": " one,", "probability": 0.84912109375}, {"start": 1313.05, "end": 1313.39, "word": " thus", "probability": 0.509765625}, {"start": 1313.39, "end": 1313.89, "word": " based", "probability": 0.63916015625}, {"start": 1313.89, "end": 1314.35, "word": " on", "probability": 0.94921875}, {"start": 1314.35, "end": 1314.97, "word": " one", "probability": 0.88525390625}, {"start": 1314.97, "end": 1315.41, "word": " sample,", "probability": 0.88671875}, {"start": 1316.87, "end": 1317.07, "word": " you", "probability": 0.92431640625}, {"start": 1317.07, "end": 1317.63, "word": " actually", "probability": 0.77294921875}, {"start": 1317.63, "end": 1318.81, "word": " selected", "probability": 0.87744140625}, {"start": 1318.81, "end": 1319.45, "word": " you", "probability": 0.46875}, {"start": 1319.45, "end": 1319.75, "word": " can", "probability": 0.9443359375}, {"start": 1319.75, "end": 1319.93, "word": " be", "probability": 0.9404296875}, {"start": 1319.93, "end": 1320.49, "word": " 95", "probability": 0.92041015625}, {"start": 1320.49, "end": 1321.07, "word": "%", "probability": 0.908203125}, {"start": 1321.07, "end": 1321.61, "word": " confident", "probability": 0.95849609375}, {"start": 1321.61, "end": 1322.19, "word": " your", "probability": 0.85498046875}, {"start": 1322.19, "end": 1322.77, "word": " interval", "probability": 0.9794921875}, {"start": 1322.77, "end": 1323.25, "word": " will", "probability": 0.896484375}, {"start": 1323.25, "end": 1323.71, "word": " continue.", "probability": 0.91796875}], "temperature": 1.0}, {"id": 50, "seek": 135432, "start": 1325.02, "end": 1354.32, "text": " So again, this interval might be or might not cover Mu. Since I don't know the value of Mu, I cannot say that this interval will continue Mu. But you can say that 95% of the intervals constructed in this way will continue Mu. That's the interpretation of the confidence interval. 99, 95, whatever it is.", "tokens": [407, 797, 11, 341, 15035, 1062, 312, 420, 1062, 406, 2060, 15601, 13, 4162, 286, 500, 380, 458, 264, 2158, 295, 15601, 11, 286, 2644, 584, 300, 341, 15035, 486, 2354, 15601, 13, 583, 291, 393, 584, 300, 13420, 4, 295, 264, 26651, 17083, 294, 341, 636, 486, 2354, 15601, 13, 663, 311, 264, 14174, 295, 264, 6687, 15035, 13, 11803, 11, 13420, 11, 2035, 309, 307, 13], "avg_logprob": -0.16032609300336975, "compression_ratio": 1.6888888888888889, "no_speech_prob": 0.0, "words": [{"start": 1325.02, "end": 1325.3, "word": " So", "probability": 0.7001953125}, {"start": 1325.3, "end": 1325.56, "word": " again,", "probability": 0.765625}, {"start": 1325.7, "end": 1325.86, "word": " this", "probability": 0.92919921875}, {"start": 1325.86, "end": 1326.24, "word": " interval", "probability": 0.9619140625}, {"start": 1326.24, "end": 1327.32, "word": " might", "probability": 0.88916015625}, {"start": 1327.32, "end": 1327.6, "word": " be", "probability": 0.91650390625}, {"start": 1327.6, "end": 1327.86, "word": " or", "probability": 0.8544921875}, {"start": 1327.86, "end": 1328.14, "word": " might", "probability": 0.90087890625}, {"start": 1328.14, "end": 1328.58, "word": " not", "probability": 0.94091796875}, {"start": 1328.58, "end": 1329.6, "word": " cover", "probability": 0.77783203125}, {"start": 1329.6, "end": 1330.84, "word": " Mu.", "probability": 0.271484375}, {"start": 1331.54, "end": 1332.14, "word": " Since", "probability": 0.79638671875}, {"start": 1332.14, "end": 1332.4, "word": " I", "probability": 0.96826171875}, {"start": 1332.4, "end": 1332.62, "word": " don't", "probability": 0.934814453125}, {"start": 1332.62, "end": 1332.82, "word": " know", "probability": 0.9140625}, {"start": 1332.82, "end": 1332.96, "word": " the", "probability": 0.9150390625}, {"start": 1332.96, "end": 1333.16, "word": " value", "probability": 0.98583984375}, {"start": 1333.16, "end": 1333.32, "word": " of", "probability": 0.76220703125}, {"start": 1333.32, "end": 1333.52, "word": " Mu,", "probability": 0.978515625}, {"start": 1333.68, "end": 1333.9, "word": " I", "probability": 0.99365234375}, {"start": 1333.9, "end": 1334.24, "word": " cannot", "probability": 0.82421875}, {"start": 1334.24, "end": 1334.62, "word": " say", "probability": 0.943359375}, {"start": 1334.62, "end": 1334.98, "word": " that", "probability": 0.91845703125}, {"start": 1334.98, "end": 1335.6, "word": " this", "probability": 0.93212890625}, {"start": 1335.6, "end": 1336.0, "word": " interval", "probability": 0.974609375}, {"start": 1336.0, "end": 1336.26, "word": " will", "probability": 0.888671875}, {"start": 1336.26, "end": 1336.62, "word": " continue", "probability": 0.8779296875}, {"start": 1336.62, "end": 1336.82, "word": " Mu.", "probability": 0.81787109375}, {"start": 1336.9, "end": 1337.14, "word": " But", "probability": 0.9326171875}, {"start": 1337.14, "end": 1337.42, "word": " you", "probability": 0.78515625}, {"start": 1337.42, "end": 1337.64, "word": " can", "probability": 0.94677734375}, {"start": 1337.64, "end": 1337.92, "word": " say", "probability": 0.87890625}, {"start": 1337.92, "end": 1338.28, "word": " that", "probability": 0.927734375}, {"start": 1338.28, "end": 1339.22, "word": " 95", "probability": 0.93310546875}, {"start": 1339.22, "end": 1339.78, "word": "%", "probability": 0.837890625}, {"start": 1339.78, "end": 1340.24, "word": " of", "probability": 0.96630859375}, {"start": 1340.24, "end": 1340.44, "word": " the", "probability": 0.89208984375}, {"start": 1340.44, "end": 1341.08, "word": " intervals", "probability": 0.8935546875}, {"start": 1341.08, "end": 1342.34, "word": " constructed", "probability": 0.9150390625}, {"start": 1342.34, "end": 1342.64, "word": " in", "probability": 0.927734375}, {"start": 1342.64, "end": 1342.94, "word": " this", "probability": 0.94873046875}, {"start": 1342.94, "end": 1343.32, "word": " way", "probability": 0.9423828125}, {"start": 1343.32, "end": 1344.44, "word": " will", "probability": 0.8447265625}, {"start": 1344.44, "end": 1344.88, "word": " continue", "probability": 0.9130859375}, {"start": 1344.88, "end": 1345.14, "word": " Mu.", "probability": 0.95947265625}, {"start": 1345.9, "end": 1346.4, "word": " That's", "probability": 0.897705078125}, {"start": 1346.4, "end": 1346.92, "word": " the", "probability": 0.9189453125}, {"start": 1346.92, "end": 1347.5, "word": " interpretation", "probability": 0.9130859375}, {"start": 1347.5, "end": 1348.18, "word": " of", "probability": 0.96826171875}, {"start": 1348.18, "end": 1348.34, "word": " the", "probability": 0.8681640625}, {"start": 1348.34, "end": 1348.8, "word": " confidence", "probability": 0.94970703125}, {"start": 1348.8, "end": 1349.3, "word": " interval.", "probability": 0.96875}, {"start": 1352.18, "end": 1352.78, "word": " 99,", "probability": 0.39794921875}, {"start": 1353.04, "end": 1353.42, "word": " 95,", "probability": 0.91845703125}, {"start": 1353.6, "end": 1353.86, "word": " whatever", "probability": 0.89990234375}, {"start": 1353.86, "end": 1354.16, "word": " it", "probability": 0.65380859375}, {"start": 1354.16, "end": 1354.32, "word": " is.", "probability": 0.94580078125}], "temperature": 1.0}, {"id": 51, "seek": 138422, "start": 1356.36, "end": 1384.22, "text": " Now, the process of the estimation is, we have a huge population, and suppose we are interested in the population mean mu, and mu is not given or is unknown. We may select a random sample from this population with any size. From this sample, we can compute the average or the mean.", "tokens": [823, 11, 264, 1399, 295, 264, 35701, 307, 11, 321, 362, 257, 2603, 4415, 11, 293, 7297, 321, 366, 3102, 294, 264, 4415, 914, 2992, 11, 293, 2992, 307, 406, 2212, 420, 307, 9841, 13, 492, 815, 3048, 257, 4974, 6889, 490, 341, 4415, 365, 604, 2744, 13, 3358, 341, 6889, 11, 321, 393, 14722, 264, 4274, 420, 264, 914, 13], "avg_logprob": -0.18346774313719041, "compression_ratio": 1.5932203389830508, "no_speech_prob": 0.0, "words": [{"start": 1356.36, "end": 1356.72, "word": " Now,", "probability": 0.93310546875}, {"start": 1356.92, "end": 1357.14, "word": " the", "probability": 0.9033203125}, {"start": 1357.14, "end": 1357.64, "word": " process", "probability": 0.94580078125}, {"start": 1357.64, "end": 1357.86, "word": " of", "probability": 0.96728515625}, {"start": 1357.86, "end": 1358.0, "word": " the", "probability": 0.7900390625}, {"start": 1358.0, "end": 1358.48, "word": " estimation", "probability": 0.97314453125}, {"start": 1358.48, "end": 1359.52, "word": " is,", "probability": 0.935546875}, {"start": 1360.92, "end": 1361.32, "word": " we", "probability": 0.93505859375}, {"start": 1361.32, "end": 1361.6, "word": " have", "probability": 0.9482421875}, {"start": 1361.6, "end": 1361.8, "word": " a", "probability": 0.7548828125}, {"start": 1361.8, "end": 1362.0, "word": " huge", "probability": 0.8974609375}, {"start": 1362.0, "end": 1362.52, "word": " population,", "probability": 0.94140625}, {"start": 1363.74, "end": 1363.96, "word": " and", "probability": 0.93017578125}, {"start": 1363.96, "end": 1364.34, "word": " suppose", "probability": 0.91748046875}, {"start": 1364.34, "end": 1364.58, "word": " we", "probability": 0.90380859375}, {"start": 1364.58, "end": 1364.72, "word": " are", "probability": 0.92578125}, {"start": 1364.72, "end": 1365.22, "word": " interested", "probability": 0.8564453125}, {"start": 1365.22, "end": 1366.24, "word": " in", "probability": 0.94775390625}, {"start": 1366.24, "end": 1366.56, "word": " the", "probability": 0.9169921875}, {"start": 1366.56, "end": 1367.16, "word": " population", "probability": 0.9404296875}, {"start": 1367.16, "end": 1367.42, "word": " mean", "probability": 0.7392578125}, {"start": 1367.42, "end": 1367.64, "word": " mu,", "probability": 0.300537109375}, {"start": 1368.32, "end": 1369.52, "word": " and", "probability": 0.93798828125}, {"start": 1369.52, "end": 1369.94, "word": " mu", "probability": 0.78466796875}, {"start": 1369.94, "end": 1371.3, "word": " is", "probability": 0.9345703125}, {"start": 1371.3, "end": 1371.78, "word": " not", "probability": 0.9248046875}, {"start": 1371.78, "end": 1372.14, "word": " given", "probability": 0.87548828125}, {"start": 1372.14, "end": 1372.46, "word": " or", "probability": 0.7470703125}, {"start": 1372.46, "end": 1372.66, "word": " is", "probability": 0.8857421875}, {"start": 1372.66, "end": 1373.1, "word": " unknown.", "probability": 0.90234375}, {"start": 1374.04, "end": 1374.4, "word": " We", "probability": 0.96240234375}, {"start": 1374.4, "end": 1374.7, "word": " may", "probability": 0.9375}, {"start": 1374.7, "end": 1375.34, "word": " select", "probability": 0.85009765625}, {"start": 1375.34, "end": 1375.56, "word": " a", "probability": 0.98486328125}, {"start": 1375.56, "end": 1375.84, "word": " random", "probability": 0.857421875}, {"start": 1375.84, "end": 1376.22, "word": " sample", "probability": 0.85693359375}, {"start": 1376.22, "end": 1376.56, "word": " from", "probability": 0.87255859375}, {"start": 1376.56, "end": 1376.78, "word": " this", "probability": 0.94384765625}, {"start": 1376.78, "end": 1377.3, "word": " population", "probability": 0.93505859375}, {"start": 1377.3, "end": 1378.46, "word": " with", "probability": 0.728515625}, {"start": 1378.46, "end": 1378.76, "word": " any", "probability": 0.90966796875}, {"start": 1378.76, "end": 1379.18, "word": " size.", "probability": 0.818359375}, {"start": 1379.98, "end": 1380.3, "word": " From", "probability": 0.72412109375}, {"start": 1380.3, "end": 1380.6, "word": " this", "probability": 0.93701171875}, {"start": 1380.6, "end": 1380.94, "word": " sample,", "probability": 0.87060546875}, {"start": 1381.06, "end": 1381.18, "word": " we", "probability": 0.95166015625}, {"start": 1381.18, "end": 1381.52, "word": " can", "probability": 0.94287109375}, {"start": 1381.52, "end": 1382.14, "word": " compute", "probability": 0.91748046875}, {"start": 1382.14, "end": 1383.14, "word": " the", "probability": 0.9033203125}, {"start": 1383.14, "end": 1383.6, "word": " average", "probability": 0.814453125}, {"start": 1383.6, "end": 1383.84, "word": " or", "probability": 0.7998046875}, {"start": 1383.84, "end": 1384.0, "word": " the", "probability": 0.9296875}, {"start": 1384.0, "end": 1384.22, "word": " mean.", "probability": 0.98388671875}], "temperature": 1.0}, {"id": 52, "seek": 141128, "start": 1384.9, "end": 1411.28, "text": " Then after that, we can construct the confidence sample. For example, maybe I'm saying that I am 95% confident that mu is between 4 and 16. So the process of estimation, select a random sample from your population. This sample should be representative in order to generalize your results. Otherwise, you cannot do that. So this sample should be representative.", "tokens": [1396, 934, 300, 11, 321, 393, 7690, 264, 6687, 6889, 13, 1171, 1365, 11, 1310, 286, 478, 1566, 300, 286, 669, 13420, 4, 6679, 300, 2992, 307, 1296, 1017, 293, 3165, 13, 407, 264, 1399, 295, 35701, 11, 3048, 257, 4974, 6889, 490, 428, 4415, 13, 639, 6889, 820, 312, 12424, 294, 1668, 281, 2674, 1125, 428, 3542, 13, 10328, 11, 291, 2644, 360, 300, 13, 407, 341, 6889, 820, 312, 12424, 13], "avg_logprob": -0.15424408743510376, "compression_ratio": 1.6116071428571428, "no_speech_prob": 0.0, "words": [{"start": 1384.9, "end": 1385.38, "word": " Then", "probability": 0.52099609375}, {"start": 1385.38, "end": 1386.16, "word": " after", "probability": 0.59912109375}, {"start": 1386.16, "end": 1386.52, "word": " that,", "probability": 0.93212890625}, {"start": 1386.6, "end": 1386.74, "word": " we", "probability": 0.95654296875}, {"start": 1386.74, "end": 1387.52, "word": " can", "probability": 0.88916015625}, {"start": 1387.52, "end": 1388.14, "word": " construct", "probability": 0.87255859375}, {"start": 1388.14, "end": 1388.36, "word": " the", "probability": 0.828125}, {"start": 1388.36, "end": 1388.76, "word": " confidence", "probability": 0.83544921875}, {"start": 1388.76, "end": 1389.06, "word": " sample.", "probability": 0.46923828125}, {"start": 1389.96, "end": 1390.56, "word": " For", "probability": 0.9560546875}, {"start": 1390.56, "end": 1390.92, "word": " example,", "probability": 0.97802734375}, {"start": 1391.08, "end": 1391.3, "word": " maybe", "probability": 0.9189453125}, {"start": 1391.3, "end": 1392.44, "word": " I'm", "probability": 0.875732421875}, {"start": 1392.44, "end": 1392.68, "word": " saying", "probability": 0.88134765625}, {"start": 1392.68, "end": 1392.94, "word": " that", "probability": 0.923828125}, {"start": 1392.94, "end": 1393.14, "word": " I", "probability": 0.9794921875}, {"start": 1393.14, "end": 1393.3, "word": " am", "probability": 0.859375}, {"start": 1393.3, "end": 1393.7, "word": " 95", "probability": 0.97412109375}, {"start": 1393.7, "end": 1394.16, "word": "%", "probability": 0.96240234375}, {"start": 1394.16, "end": 1394.66, "word": " confident", "probability": 0.96630859375}, {"start": 1394.66, "end": 1395.08, "word": " that", "probability": 0.93994140625}, {"start": 1395.08, "end": 1395.42, "word": " mu", "probability": 0.59912109375}, {"start": 1395.42, "end": 1395.62, "word": " is", "probability": 0.9443359375}, {"start": 1395.62, "end": 1396.08, "word": " between", "probability": 0.88330078125}, {"start": 1396.08, "end": 1397.0, "word": " 4", "probability": 0.73095703125}, {"start": 1397.0, "end": 1397.18, "word": " and", "probability": 0.93408203125}, {"start": 1397.18, "end": 1397.54, "word": " 16.", "probability": 0.50244140625}, {"start": 1398.08, "end": 1398.28, "word": " So", "probability": 0.88720703125}, {"start": 1398.28, "end": 1398.42, "word": " the", "probability": 0.81689453125}, {"start": 1398.42, "end": 1398.92, "word": " process", "probability": 0.953125}, {"start": 1398.92, "end": 1399.14, "word": " of", "probability": 0.95703125}, {"start": 1399.14, "end": 1399.58, "word": " estimation,", "probability": 0.96533203125}, {"start": 1400.02, "end": 1400.34, "word": " select", "probability": 0.8642578125}, {"start": 1400.34, "end": 1400.5, "word": " a", "probability": 0.912109375}, {"start": 1400.5, "end": 1400.7, "word": " random", "probability": 0.8583984375}, {"start": 1400.7, "end": 1401.04, "word": " sample", "probability": 0.865234375}, {"start": 1401.04, "end": 1401.34, "word": " from", "probability": 0.88232421875}, {"start": 1401.34, "end": 1401.54, "word": " your", "probability": 0.8837890625}, {"start": 1401.54, "end": 1402.02, "word": " population.", "probability": 0.94384765625}, {"start": 1402.76, "end": 1402.98, "word": " This", "probability": 0.89599609375}, {"start": 1402.98, "end": 1403.22, "word": " sample", "probability": 0.93017578125}, {"start": 1403.22, "end": 1403.44, "word": " should", "probability": 0.95947265625}, {"start": 1403.44, "end": 1403.58, "word": " be", "probability": 0.955078125}, {"start": 1403.58, "end": 1404.1, "word": " representative", "probability": 0.82861328125}, {"start": 1404.1, "end": 1405.1, "word": " in", "probability": 0.73876953125}, {"start": 1405.1, "end": 1405.36, "word": " order", "probability": 0.92041015625}, {"start": 1405.36, "end": 1405.68, "word": " to", "probability": 0.9716796875}, {"start": 1405.68, "end": 1406.34, "word": " generalize", "probability": 0.925537109375}, {"start": 1406.34, "end": 1406.52, "word": " your", "probability": 0.87646484375}, {"start": 1406.52, "end": 1406.78, "word": " results.", "probability": 0.58203125}, {"start": 1406.9, "end": 1407.18, "word": " Otherwise,", "probability": 0.95068359375}, {"start": 1407.4, "end": 1407.52, "word": " you", "probability": 0.9375}, {"start": 1407.52, "end": 1407.7, "word": " cannot", "probability": 0.8701171875}, {"start": 1407.7, "end": 1407.88, "word": " do", "probability": 0.96044921875}, {"start": 1407.88, "end": 1408.08, "word": " that.", "probability": 0.93603515625}, {"start": 1408.72, "end": 1408.92, "word": " So", "probability": 0.962890625}, {"start": 1408.92, "end": 1409.22, "word": " this", "probability": 0.93310546875}, {"start": 1409.22, "end": 1409.46, "word": " sample", "probability": 0.9052734375}, {"start": 1409.46, "end": 1409.78, "word": " should", "probability": 0.96728515625}, {"start": 1409.78, "end": 1410.56, "word": " be", "probability": 0.9541015625}, {"start": 1410.56, "end": 1411.28, "word": " representative.", "probability": 0.91064453125}], "temperature": 1.0}, {"id": 53, "seek": 144079, "start": 1412.01, "end": 1440.79, "text": " And we, in chapter seven, we discussed four types of probability sampling techniques. Simple random samples, systematic, cluster, and stratified. So by using this sample, we can compute, for example, the sample mean. Then after that, I will show how can we do or construct or build the confidence interval. So this is the estimation process.", "tokens": [400, 321, 11, 294, 7187, 3407, 11, 321, 7152, 1451, 3467, 295, 8482, 21179, 7512, 13, 21532, 4974, 10938, 11, 27249, 11, 13630, 11, 293, 23674, 2587, 13, 407, 538, 1228, 341, 6889, 11, 321, 393, 14722, 11, 337, 1365, 11, 264, 6889, 914, 13, 1396, 934, 300, 11, 286, 486, 855, 577, 393, 321, 360, 420, 7690, 420, 1322, 264, 6687, 15035, 13, 407, 341, 307, 264, 35701, 1399, 13], "avg_logprob": -0.18337673217886025, "compression_ratio": 1.576036866359447, "no_speech_prob": 0.0, "words": [{"start": 1412.01, "end": 1412.63, "word": " And", "probability": 0.80908203125}, {"start": 1412.63, "end": 1412.83, "word": " we,", "probability": 0.6455078125}, {"start": 1413.11, "end": 1413.29, "word": " in", "probability": 0.90185546875}, {"start": 1413.29, "end": 1413.53, "word": " chapter", "probability": 0.53515625}, {"start": 1413.53, "end": 1413.83, "word": " seven,", "probability": 0.7392578125}, {"start": 1413.95, "end": 1414.03, "word": " we", "probability": 0.83056640625}, {"start": 1414.03, "end": 1414.73, "word": " discussed", "probability": 0.77685546875}, {"start": 1414.73, "end": 1415.33, "word": " four", "probability": 0.931640625}, {"start": 1415.33, "end": 1415.85, "word": " types", "probability": 0.83154296875}, {"start": 1415.85, "end": 1416.37, "word": " of", "probability": 0.96826171875}, {"start": 1416.37, "end": 1416.87, "word": " probability", "probability": 0.93115234375}, {"start": 1416.87, "end": 1417.31, "word": " sampling", "probability": 0.904296875}, {"start": 1417.31, "end": 1417.95, "word": " techniques.", "probability": 0.90380859375}, {"start": 1418.41, "end": 1418.93, "word": " Simple", "probability": 0.87353515625}, {"start": 1418.93, "end": 1419.21, "word": " random", "probability": 0.53271484375}, {"start": 1419.21, "end": 1419.59, "word": " samples,", "probability": 0.533203125}, {"start": 1419.77, "end": 1420.23, "word": " systematic,", "probability": 0.88525390625}, {"start": 1420.79, "end": 1421.77, "word": " cluster,", "probability": 0.69677734375}, {"start": 1422.75, "end": 1423.25, "word": " and", "probability": 0.9296875}, {"start": 1423.25, "end": 1424.21, "word": " stratified.", "probability": 0.929443359375}, {"start": 1425.67, "end": 1426.33, "word": " So", "probability": 0.94873046875}, {"start": 1426.33, "end": 1426.51, "word": " by", "probability": 0.8291015625}, {"start": 1426.51, "end": 1426.73, "word": " using", "probability": 0.93603515625}, {"start": 1426.73, "end": 1426.95, "word": " this", "probability": 0.90478515625}, {"start": 1426.95, "end": 1427.21, "word": " sample,", "probability": 0.62451171875}, {"start": 1427.33, "end": 1427.43, "word": " we", "probability": 0.94140625}, {"start": 1427.43, "end": 1427.63, "word": " can", "probability": 0.93994140625}, {"start": 1427.63, "end": 1427.97, "word": " compute,", "probability": 0.90185546875}, {"start": 1428.09, "end": 1428.15, "word": " for", "probability": 0.94873046875}, {"start": 1428.15, "end": 1428.49, "word": " example,", "probability": 0.970703125}, {"start": 1428.61, "end": 1428.75, "word": " the", "probability": 0.9130859375}, {"start": 1428.75, "end": 1429.03, "word": " sample", "probability": 0.85888671875}, {"start": 1429.03, "end": 1429.31, "word": " mean.", "probability": 0.95654296875}, {"start": 1430.21, "end": 1430.49, "word": " Then", "probability": 0.85009765625}, {"start": 1430.49, "end": 1430.85, "word": " after", "probability": 0.68359375}, {"start": 1430.85, "end": 1431.17, "word": " that,", "probability": 0.923828125}, {"start": 1431.23, "end": 1431.31, "word": " I", "probability": 0.7646484375}, {"start": 1431.31, "end": 1431.41, "word": " will", "probability": 0.64892578125}, {"start": 1431.41, "end": 1431.59, "word": " show", "probability": 0.93359375}, {"start": 1431.59, "end": 1431.75, "word": " how", "probability": 0.8623046875}, {"start": 1431.75, "end": 1431.99, "word": " can", "probability": 0.84033203125}, {"start": 1431.99, "end": 1432.31, "word": " we", "probability": 0.95654296875}, {"start": 1432.31, "end": 1433.33, "word": " do", "probability": 0.728515625}, {"start": 1433.33, "end": 1433.67, "word": " or", "probability": 0.8115234375}, {"start": 1433.67, "end": 1434.33, "word": " construct", "probability": 0.7373046875}, {"start": 1434.33, "end": 1435.31, "word": " or", "probability": 0.91064453125}, {"start": 1435.31, "end": 1435.67, "word": " build", "probability": 0.90478515625}, {"start": 1435.67, "end": 1435.95, "word": " the", "probability": 0.8837890625}, {"start": 1435.95, "end": 1436.47, "word": " confidence", "probability": 0.94677734375}, {"start": 1436.47, "end": 1436.87, "word": " interval.", "probability": 0.98095703125}, {"start": 1437.25, "end": 1437.41, "word": " So", "probability": 0.95849609375}, {"start": 1437.41, "end": 1437.65, "word": " this", "probability": 0.9296875}, {"start": 1437.65, "end": 1437.89, "word": " is", "probability": 0.943359375}, {"start": 1437.89, "end": 1438.31, "word": " the", "probability": 0.92724609375}, {"start": 1438.31, "end": 1439.15, "word": " estimation", "probability": 0.96337890625}, {"start": 1439.15, "end": 1440.79, "word": " process.", "probability": 0.9462890625}], "temperature": 1.0}, {"id": 54, "seek": 146874, "start": 1441.82, "end": 1468.74, "text": " Now, the general formula for all confidence intervals is, the general formula has mainly three components, point estimate, plus or minus, critical value, times standard error. Let's see the definition for each component. The first one, point estimate.", "tokens": [823, 11, 264, 2674, 8513, 337, 439, 6687, 26651, 307, 11, 264, 2674, 8513, 575, 8704, 1045, 6677, 11, 935, 12539, 11, 1804, 420, 3175, 11, 4924, 2158, 11, 1413, 3832, 6713, 13, 961, 311, 536, 264, 7123, 337, 1184, 6542, 13, 440, 700, 472, 11, 935, 12539, 13], "avg_logprob": -0.2606250071525574, "compression_ratio": 1.6363636363636365, "no_speech_prob": 0.0, "words": [{"start": 1441.82, "end": 1442.14, "word": " Now,", "probability": 0.73095703125}, {"start": 1442.62, "end": 1442.84, "word": " the", "probability": 0.82373046875}, {"start": 1442.84, "end": 1443.24, "word": " general", "probability": 0.8408203125}, {"start": 1443.24, "end": 1443.78, "word": " formula", "probability": 0.90478515625}, {"start": 1443.78, "end": 1444.58, "word": " for", "probability": 0.890625}, {"start": 1444.58, "end": 1444.98, "word": " all", "probability": 0.8193359375}, {"start": 1444.98, "end": 1445.6, "word": " confidence", "probability": 0.818359375}, {"start": 1445.6, "end": 1446.12, "word": " intervals", "probability": 0.52490234375}, {"start": 1446.12, "end": 1447.34, "word": " is,", "probability": 0.82861328125}, {"start": 1450.12, "end": 1450.92, "word": " the", "probability": 0.623046875}, {"start": 1450.92, "end": 1451.16, "word": " general", "probability": 0.84375}, {"start": 1451.16, "end": 1451.52, "word": " formula", "probability": 0.8955078125}, {"start": 1451.52, "end": 1451.78, "word": " has", "probability": 0.8935546875}, {"start": 1451.78, "end": 1452.1, "word": " mainly", "probability": 0.9599609375}, {"start": 1452.1, "end": 1452.34, "word": " three", "probability": 0.77978515625}, {"start": 1452.34, "end": 1452.94, "word": " components,", "probability": 0.91357421875}, {"start": 1454.06, "end": 1454.56, "word": " point", "probability": 0.79736328125}, {"start": 1454.56, "end": 1455.24, "word": " estimate,", "probability": 0.8046875}, {"start": 1457.3, "end": 1457.62, "word": " plus", "probability": 0.78759765625}, {"start": 1457.62, "end": 1457.98, "word": " or", "probability": 0.9501953125}, {"start": 1457.98, "end": 1458.5, "word": " minus,", "probability": 0.98828125}, {"start": 1459.86, "end": 1460.36, "word": " critical", "probability": 0.89306640625}, {"start": 1460.36, "end": 1460.98, "word": " value,", "probability": 0.9677734375}, {"start": 1462.08, "end": 1462.84, "word": " times", "probability": 0.875}, {"start": 1462.84, "end": 1463.38, "word": " standard", "probability": 0.433837890625}, {"start": 1463.38, "end": 1463.7, "word": " error.", "probability": 0.541015625}, {"start": 1464.12, "end": 1464.48, "word": " Let's", "probability": 0.92333984375}, {"start": 1464.48, "end": 1464.64, "word": " see", "probability": 0.9189453125}, {"start": 1464.64, "end": 1464.84, "word": " the", "probability": 0.91552734375}, {"start": 1464.84, "end": 1465.18, "word": " definition", "probability": 0.90673828125}, {"start": 1465.18, "end": 1465.48, "word": " for", "probability": 0.890625}, {"start": 1465.48, "end": 1465.72, "word": " each", "probability": 0.9345703125}, {"start": 1465.72, "end": 1466.16, "word": " component.", "probability": 0.85888671875}, {"start": 1466.94, "end": 1467.56, "word": " The", "probability": 0.87548828125}, {"start": 1467.56, "end": 1467.8, "word": " first", "probability": 0.88671875}, {"start": 1467.8, "end": 1468.0, "word": " one,", "probability": 0.896484375}, {"start": 1468.06, "end": 1468.28, "word": " point", "probability": 0.921875}, {"start": 1468.28, "end": 1468.74, "word": " estimate.", "probability": 0.92919921875}], "temperature": 1.0}, {"id": 55, "seek": 149884, "start": 1470.64, "end": 1498.84, "text": " is the sample statistic estimating this population parameter of interest. For example, if you go back to the previous age example, and suppose X bar is 18. So you can say that the point estimate, since 18 is the sample mean, and we are interested in the population mean, so 18 is", "tokens": [307, 264, 6889, 29588, 8017, 990, 341, 4415, 13075, 295, 1179, 13, 1171, 1365, 11, 498, 291, 352, 646, 281, 264, 3894, 3205, 1365, 11, 293, 7297, 1783, 2159, 307, 2443, 13, 407, 291, 393, 584, 300, 264, 935, 12539, 11, 1670, 2443, 307, 264, 6889, 914, 11, 293, 321, 366, 3102, 294, 264, 4415, 914, 11, 370, 2443, 307], "avg_logprob": -0.2118340232333199, "compression_ratio": 1.6568047337278107, "no_speech_prob": 0.0, "words": [{"start": 1470.64, "end": 1470.96, "word": " is", "probability": 0.412841796875}, {"start": 1470.96, "end": 1471.14, "word": " the", "probability": 0.84619140625}, {"start": 1471.14, "end": 1471.4, "word": " sample", "probability": 0.73291015625}, {"start": 1471.4, "end": 1472.02, "word": " statistic", "probability": 0.7275390625}, {"start": 1472.02, "end": 1472.98, "word": " estimating", "probability": 0.90234375}, {"start": 1472.98, "end": 1473.24, "word": " this", "probability": 0.38916015625}, {"start": 1473.24, "end": 1473.88, "word": " population", "probability": 0.95263671875}, {"start": 1473.88, "end": 1474.42, "word": " parameter", "probability": 0.94580078125}, {"start": 1474.42, "end": 1474.64, "word": " of", "probability": 0.8974609375}, {"start": 1474.64, "end": 1474.96, "word": " interest.", "probability": 0.888671875}, {"start": 1475.16, "end": 1475.28, "word": " For", "probability": 0.9501953125}, {"start": 1475.28, "end": 1475.66, "word": " example,", "probability": 0.974609375}, {"start": 1476.08, "end": 1476.28, "word": " if", "probability": 0.9423828125}, {"start": 1476.28, "end": 1476.38, "word": " you", "probability": 0.94482421875}, {"start": 1476.38, "end": 1476.58, "word": " go", "probability": 0.95947265625}, {"start": 1476.58, "end": 1476.92, "word": " back", "probability": 0.87451171875}, {"start": 1476.92, "end": 1477.18, "word": " to", "probability": 0.96484375}, {"start": 1477.18, "end": 1477.36, "word": " the", "probability": 0.9208984375}, {"start": 1477.36, "end": 1477.88, "word": " previous", "probability": 0.85595703125}, {"start": 1477.88, "end": 1479.4, "word": " age", "probability": 0.41015625}, {"start": 1479.4, "end": 1480.04, "word": " example,", "probability": 0.849609375}, {"start": 1482.28, "end": 1483.38, "word": " and", "probability": 0.88916015625}, {"start": 1483.38, "end": 1483.86, "word": " suppose", "probability": 0.91650390625}, {"start": 1483.86, "end": 1484.5, "word": " X", "probability": 0.51806640625}, {"start": 1484.5, "end": 1484.84, "word": " bar", "probability": 0.85791015625}, {"start": 1484.84, "end": 1485.04, "word": " is", "probability": 0.94189453125}, {"start": 1485.04, "end": 1485.4, "word": " 18.", "probability": 0.8916015625}, {"start": 1486.68, "end": 1487.14, "word": " So", "probability": 0.9541015625}, {"start": 1487.14, "end": 1487.32, "word": " you", "probability": 0.6572265625}, {"start": 1487.32, "end": 1487.56, "word": " can", "probability": 0.9443359375}, {"start": 1487.56, "end": 1487.78, "word": " say", "probability": 0.6494140625}, {"start": 1487.78, "end": 1488.06, "word": " that", "probability": 0.92236328125}, {"start": 1488.06, "end": 1489.28, "word": " the", "probability": 0.6240234375}, {"start": 1489.28, "end": 1489.56, "word": " point", "probability": 0.9658203125}, {"start": 1489.56, "end": 1490.18, "word": " estimate,", "probability": 0.919921875}, {"start": 1490.64, "end": 1490.94, "word": " since", "probability": 0.869140625}, {"start": 1490.94, "end": 1491.36, "word": " 18", "probability": 0.96337890625}, {"start": 1491.36, "end": 1491.64, "word": " is", "probability": 0.94287109375}, {"start": 1491.64, "end": 1493.3, "word": " the", "probability": 0.89794921875}, {"start": 1493.3, "end": 1493.64, "word": " sample", "probability": 0.875}, {"start": 1493.64, "end": 1493.92, "word": " mean,", "probability": 0.98095703125}, {"start": 1494.18, "end": 1494.44, "word": " and", "probability": 0.9384765625}, {"start": 1494.44, "end": 1494.54, "word": " we", "probability": 0.94677734375}, {"start": 1494.54, "end": 1494.68, "word": " are", "probability": 0.9248046875}, {"start": 1494.68, "end": 1495.12, "word": " interested", "probability": 0.85791015625}, {"start": 1495.12, "end": 1495.82, "word": " in", "probability": 0.94775390625}, {"start": 1495.82, "end": 1495.94, "word": " the", "probability": 0.9189453125}, {"start": 1495.94, "end": 1496.34, "word": " population", "probability": 0.94384765625}, {"start": 1496.34, "end": 1496.72, "word": " mean,", "probability": 0.9619140625}, {"start": 1497.22, "end": 1497.52, "word": " so", "probability": 0.9423828125}, {"start": 1497.52, "end": 1498.26, "word": " 18", "probability": 0.93505859375}, {"start": 1498.26, "end": 1498.84, "word": " is", "probability": 0.93310546875}], "temperature": 1.0}, {"id": 56, "seek": 152607, "start": 1500.07, "end": 1526.07, "text": " a point estimate for mu. This is the first one, this is the first component. So a point estimate is the simplest statistic, x bar is the simplest statistic. Estimating the population parameter of interest, I am interested suppose in the population mean, mu. So we have estimate.", "tokens": [257, 935, 12539, 337, 2992, 13, 639, 307, 264, 700, 472, 11, 341, 307, 264, 700, 6542, 13, 407, 257, 935, 12539, 307, 264, 22811, 29588, 11, 2031, 2159, 307, 264, 22811, 29588, 13, 4410, 332, 990, 264, 4415, 13075, 295, 1179, 11, 286, 669, 3102, 7297, 294, 264, 4415, 914, 11, 2992, 13, 407, 321, 362, 12539, 13], "avg_logprob": -0.3062499985098839, "compression_ratio": 1.86, "no_speech_prob": 0.0, "words": [{"start": 1500.07, "end": 1500.31, "word": " a", "probability": 0.07574462890625}, {"start": 1500.31, "end": 1500.59, "word": " point", "probability": 0.857421875}, {"start": 1500.59, "end": 1503.85, "word": " estimate", "probability": 0.845703125}, {"start": 1503.85, "end": 1506.21, "word": " for", "probability": 0.7001953125}, {"start": 1506.21, "end": 1506.41, "word": " mu.", "probability": 0.324462890625}, {"start": 1508.51, "end": 1508.99, "word": " This", "probability": 0.80322265625}, {"start": 1508.99, "end": 1509.09, "word": " is", "probability": 0.93896484375}, {"start": 1509.09, "end": 1509.21, "word": " the", "probability": 0.91650390625}, {"start": 1509.21, "end": 1509.41, "word": " first", "probability": 0.8740234375}, {"start": 1509.41, "end": 1509.67, "word": " one,", "probability": 0.85107421875}, {"start": 1509.77, "end": 1509.93, "word": " this", "probability": 0.469482421875}, {"start": 1509.93, "end": 1509.97, "word": " is", "probability": 0.9443359375}, {"start": 1509.97, "end": 1510.05, "word": " the", "probability": 0.92041015625}, {"start": 1510.05, "end": 1510.27, "word": " first", "probability": 0.8896484375}, {"start": 1510.27, "end": 1510.65, "word": " component.", "probability": 0.853515625}, {"start": 1511.49, "end": 1511.67, "word": " So", "probability": 0.9384765625}, {"start": 1511.67, "end": 1511.79, "word": " a", "probability": 0.4970703125}, {"start": 1511.79, "end": 1511.97, "word": " point", "probability": 0.97509765625}, {"start": 1511.97, "end": 1512.37, "word": " estimate", "probability": 0.92724609375}, {"start": 1512.37, "end": 1512.79, "word": " is", "probability": 0.93505859375}, {"start": 1512.79, "end": 1512.95, "word": " the", "probability": 0.77099609375}, {"start": 1512.95, "end": 1513.25, "word": " simplest", "probability": 0.321044921875}, {"start": 1513.25, "end": 1513.85, "word": " statistic,", "probability": 0.787109375}, {"start": 1514.01, "end": 1514.17, "word": " x", "probability": 0.623046875}, {"start": 1514.17, "end": 1514.37, "word": " bar", "probability": 0.8896484375}, {"start": 1514.37, "end": 1514.55, "word": " is", "probability": 0.939453125}, {"start": 1514.55, "end": 1514.61, "word": " the", "probability": 0.52880859375}, {"start": 1514.61, "end": 1514.83, "word": " simplest", "probability": 0.91796875}, {"start": 1514.83, "end": 1515.37, "word": " statistic.", "probability": 0.8447265625}, {"start": 1516.57, "end": 1517.05, "word": " Estimating", "probability": 0.9822591145833334}, {"start": 1517.05, "end": 1517.25, "word": " the", "probability": 0.9091796875}, {"start": 1517.25, "end": 1517.67, "word": " population", "probability": 0.974609375}, {"start": 1517.67, "end": 1518.13, "word": " parameter", "probability": 0.908203125}, {"start": 1518.13, "end": 1518.35, "word": " of", "probability": 0.9638671875}, {"start": 1518.35, "end": 1518.81, "word": " interest,", "probability": 0.904296875}, {"start": 1518.93, "end": 1519.01, "word": " I", "probability": 0.68115234375}, {"start": 1519.01, "end": 1519.21, "word": " am", "probability": 0.892578125}, {"start": 1519.21, "end": 1519.93, "word": " interested", "probability": 0.9140625}, {"start": 1519.93, "end": 1520.53, "word": " suppose", "probability": 0.3359375}, {"start": 1520.53, "end": 1520.97, "word": " in", "probability": 0.85205078125}, {"start": 1520.97, "end": 1521.13, "word": " the", "probability": 0.8662109375}, {"start": 1521.13, "end": 1521.55, "word": " population", "probability": 0.96240234375}, {"start": 1521.55, "end": 1521.93, "word": " mean,", "probability": 0.9619140625}, {"start": 1522.27, "end": 1522.45, "word": " mu.", "probability": 0.68359375}, {"start": 1524.75, "end": 1525.23, "word": " So", "probability": 0.95458984375}, {"start": 1525.23, "end": 1525.37, "word": " we", "probability": 0.796875}, {"start": 1525.37, "end": 1525.49, "word": " have", "probability": 0.94482421875}, {"start": 1525.49, "end": 1526.07, "word": " estimate.", "probability": 0.830078125}], "temperature": 1.0}, {"id": 57, "seek": 155811, "start": 1529.45, "end": 1558.11, "text": " A plus or minus, a critical value. The critical value here is a table value. That means we have to go back to the normal table again, the one we had discussed in Chapter 6. A critical value is a table value based on the sampling distribution in Chapter 7 of the point estimate and the desired confidence interval. For example, let's talk about again 90%.", "tokens": [316, 1804, 420, 3175, 11, 257, 4924, 2158, 13, 440, 4924, 2158, 510, 307, 257, 3199, 2158, 13, 663, 1355, 321, 362, 281, 352, 646, 281, 264, 2710, 3199, 797, 11, 264, 472, 321, 632, 7152, 294, 18874, 1386, 13, 316, 4924, 2158, 307, 257, 3199, 2158, 2361, 322, 264, 21179, 7316, 294, 18874, 1614, 295, 264, 935, 12539, 293, 264, 14721, 6687, 15035, 13, 1171, 1365, 11, 718, 311, 751, 466, 797, 4289, 6856], "avg_logprob": -0.1826685804285501, "compression_ratio": 1.6745283018867925, "no_speech_prob": 0.0, "words": [{"start": 1529.45, "end": 1529.63, "word": " A", "probability": 0.29931640625}, {"start": 1529.63, "end": 1529.87, "word": " plus", "probability": 0.923828125}, {"start": 1529.87, "end": 1530.09, "word": " or", "probability": 0.95458984375}, {"start": 1530.09, "end": 1530.51, "word": " minus,", "probability": 0.96435546875}, {"start": 1531.01, "end": 1531.21, "word": " a", "probability": 0.6171875}, {"start": 1531.21, "end": 1531.51, "word": " critical", "probability": 0.94140625}, {"start": 1531.51, "end": 1531.87, "word": " value.", "probability": 0.97412109375}, {"start": 1532.01, "end": 1532.17, "word": " The", "probability": 0.491455078125}, {"start": 1532.17, "end": 1532.53, "word": " critical", "probability": 0.94287109375}, {"start": 1532.53, "end": 1532.91, "word": " value", "probability": 0.974609375}, {"start": 1532.91, "end": 1533.25, "word": " here", "probability": 0.84619140625}, {"start": 1533.25, "end": 1534.35, "word": " is", "probability": 0.91650390625}, {"start": 1534.35, "end": 1534.67, "word": " a", "probability": 0.97900390625}, {"start": 1534.67, "end": 1534.91, "word": " table", "probability": 0.81884765625}, {"start": 1534.91, "end": 1535.37, "word": " value.", "probability": 0.9736328125}, {"start": 1535.73, "end": 1535.99, "word": " That", "probability": 0.89794921875}, {"start": 1535.99, "end": 1536.21, "word": " means", "probability": 0.92529296875}, {"start": 1536.21, "end": 1536.33, "word": " we", "probability": 0.89697265625}, {"start": 1536.33, "end": 1536.49, "word": " have", "probability": 0.94677734375}, {"start": 1536.49, "end": 1536.59, "word": " to", "probability": 0.96875}, {"start": 1536.59, "end": 1536.75, "word": " go", "probability": 0.96142578125}, {"start": 1536.75, "end": 1536.99, "word": " back", "probability": 0.8740234375}, {"start": 1536.99, "end": 1537.15, "word": " to", "probability": 0.9638671875}, {"start": 1537.15, "end": 1537.27, "word": " the", "probability": 0.90576171875}, {"start": 1537.27, "end": 1537.57, "word": " normal", "probability": 0.89697265625}, {"start": 1537.57, "end": 1537.85, "word": " table", "probability": 0.87890625}, {"start": 1537.85, "end": 1538.25, "word": " again,", "probability": 0.951171875}, {"start": 1539.15, "end": 1539.27, "word": " the", "probability": 0.90185546875}, {"start": 1539.27, "end": 1539.43, "word": " one", "probability": 0.9306640625}, {"start": 1539.43, "end": 1539.63, "word": " we", "probability": 0.95556640625}, {"start": 1539.63, "end": 1540.39, "word": " had", "probability": 0.68994140625}, {"start": 1540.39, "end": 1540.89, "word": " discussed", "probability": 0.8876953125}, {"start": 1540.89, "end": 1541.13, "word": " in", "probability": 0.931640625}, {"start": 1541.13, "end": 1541.47, "word": " Chapter", "probability": 0.33544921875}, {"start": 1541.47, "end": 1542.61, "word": " 6.", "probability": 0.642578125}, {"start": 1543.13, "end": 1543.33, "word": " A", "probability": 0.373046875}, {"start": 1543.33, "end": 1543.57, "word": " critical", "probability": 0.9248046875}, {"start": 1543.57, "end": 1543.93, "word": " value", "probability": 0.9736328125}, {"start": 1543.93, "end": 1544.07, "word": " is", "probability": 0.94189453125}, {"start": 1544.07, "end": 1544.19, "word": " a", "probability": 0.96728515625}, {"start": 1544.19, "end": 1544.41, "word": " table", "probability": 0.91796875}, {"start": 1544.41, "end": 1544.71, "word": " value", "probability": 0.95947265625}, {"start": 1544.71, "end": 1544.99, "word": " based", "probability": 0.90673828125}, {"start": 1544.99, "end": 1545.25, "word": " on", "probability": 0.9482421875}, {"start": 1545.25, "end": 1545.43, "word": " the", "probability": 0.88818359375}, {"start": 1545.43, "end": 1545.79, "word": " sampling", "probability": 0.9677734375}, {"start": 1545.79, "end": 1546.61, "word": " distribution", "probability": 0.87353515625}, {"start": 1546.61, "end": 1547.47, "word": " in", "probability": 0.6005859375}, {"start": 1547.47, "end": 1547.71, "word": " Chapter", "probability": 0.88623046875}, {"start": 1547.71, "end": 1548.17, "word": " 7", "probability": 0.9912109375}, {"start": 1548.17, "end": 1548.95, "word": " of", "probability": 0.8076171875}, {"start": 1548.95, "end": 1549.11, "word": " the", "probability": 0.91015625}, {"start": 1549.11, "end": 1549.37, "word": " point", "probability": 0.93310546875}, {"start": 1549.37, "end": 1549.85, "word": " estimate", "probability": 0.8681640625}, {"start": 1549.85, "end": 1550.25, "word": " and", "probability": 0.8544921875}, {"start": 1550.25, "end": 1550.37, "word": " the", "probability": 0.89599609375}, {"start": 1550.37, "end": 1550.75, "word": " desired", "probability": 0.84375}, {"start": 1550.75, "end": 1551.33, "word": " confidence", "probability": 0.974609375}, {"start": 1551.33, "end": 1551.75, "word": " interval.", "probability": 0.984375}, {"start": 1551.95, "end": 1552.11, "word": " For", "probability": 0.92041015625}, {"start": 1552.11, "end": 1552.49, "word": " example,", "probability": 0.97509765625}, {"start": 1553.79, "end": 1554.87, "word": " let's", "probability": 0.95703125}, {"start": 1554.87, "end": 1555.15, "word": " talk", "probability": 0.89013671875}, {"start": 1555.15, "end": 1555.43, "word": " about", "probability": 0.8701171875}, {"start": 1555.43, "end": 1555.85, "word": " again", "probability": 0.88330078125}, {"start": 1555.85, "end": 1558.11, "word": " 90%.", "probability": 0.554443359375}], "temperature": 1.0}, {"id": 58, "seek": 158802, "start": 1562.68, "end": 1588.02, "text": " Confidence is in this case. This area represents 90%. The area from lower limit to upper limit in this area. Now again by symmetric distribution we know that the remaining 10% is split into two halves. 5% to the right and 5% to the left.", "tokens": [11701, 2778, 307, 294, 341, 1389, 13, 639, 1859, 8855, 4289, 6856, 440, 1859, 490, 3126, 4948, 281, 6597, 4948, 294, 341, 1859, 13, 823, 797, 538, 32330, 7316, 321, 458, 300, 264, 8877, 1266, 4, 307, 7472, 666, 732, 38490, 13, 1025, 4, 281, 264, 558, 293, 1025, 4, 281, 264, 1411, 13], "avg_logprob": -0.22414772077040238, "compression_ratio": 1.4782608695652173, "no_speech_prob": 0.0, "words": [{"start": 1562.6800000000003, "end": 1563.2800000000002, "word": " Confidence", "probability": 0.620361328125}, {"start": 1563.2800000000002, "end": 1563.88, "word": " is", "probability": 0.53515625}, {"start": 1563.88, "end": 1564.0, "word": " in", "probability": 0.84619140625}, {"start": 1564.0, "end": 1564.26, "word": " this", "probability": 0.939453125}, {"start": 1564.26, "end": 1564.68, "word": " case.", "probability": 0.912109375}, {"start": 1567.14, "end": 1567.68, "word": " This", "probability": 0.765625}, {"start": 1567.68, "end": 1568.0, "word": " area", "probability": 0.87353515625}, {"start": 1568.0, "end": 1568.38, "word": " represents", "probability": 0.69970703125}, {"start": 1568.38, "end": 1570.14, "word": " 90%.", "probability": 0.64697265625}, {"start": 1570.14, "end": 1571.4, "word": " The", "probability": 0.7646484375}, {"start": 1571.4, "end": 1571.64, "word": " area", "probability": 0.8955078125}, {"start": 1571.64, "end": 1571.9, "word": " from", "probability": 0.8564453125}, {"start": 1571.9, "end": 1572.18, "word": " lower", "probability": 0.787109375}, {"start": 1572.18, "end": 1572.52, "word": " limit", "probability": 0.9248046875}, {"start": 1572.52, "end": 1572.72, "word": " to", "probability": 0.95849609375}, {"start": 1572.72, "end": 1572.96, "word": " upper", "probability": 0.77734375}, {"start": 1572.96, "end": 1573.24, "word": " limit", "probability": 0.96044921875}, {"start": 1573.24, "end": 1573.46, "word": " in", "probability": 0.190185546875}, {"start": 1573.46, "end": 1573.62, "word": " this", "probability": 0.9375}, {"start": 1573.62, "end": 1573.94, "word": " area.", "probability": 0.8828125}, {"start": 1575.52, "end": 1575.8, "word": " Now", "probability": 0.9052734375}, {"start": 1575.8, "end": 1576.0, "word": " again", "probability": 0.7802734375}, {"start": 1576.0, "end": 1576.28, "word": " by", "probability": 0.64990234375}, {"start": 1576.28, "end": 1577.4, "word": " symmetric", "probability": 0.76318359375}, {"start": 1577.4, "end": 1578.34, "word": " distribution", "probability": 0.853515625}, {"start": 1578.34, "end": 1578.58, "word": " we", "probability": 0.5166015625}, {"start": 1578.58, "end": 1578.7, "word": " know", "probability": 0.87744140625}, {"start": 1578.7, "end": 1579.04, "word": " that", "probability": 0.931640625}, {"start": 1579.04, "end": 1579.72, "word": " the", "probability": 0.84326171875}, {"start": 1579.72, "end": 1580.12, "word": " remaining", "probability": 0.8740234375}, {"start": 1580.12, "end": 1580.44, "word": " 10", "probability": 0.9111328125}, {"start": 1580.44, "end": 1581.0, "word": "%", "probability": 0.94189453125}, {"start": 1581.0, "end": 1582.96, "word": " is", "probability": 0.8955078125}, {"start": 1582.96, "end": 1583.26, "word": " split", "probability": 0.84326171875}, {"start": 1583.26, "end": 1583.5, "word": " into", "probability": 0.80615234375}, {"start": 1583.5, "end": 1583.72, "word": " two", "probability": 0.8056640625}, {"start": 1583.72, "end": 1584.1, "word": " halves.", "probability": 0.88330078125}, {"start": 1584.42, "end": 1584.7, "word": " 5", "probability": 0.87939453125}, {"start": 1584.7, "end": 1585.12, "word": "%", "probability": 0.9814453125}, {"start": 1585.12, "end": 1585.34, "word": " to", "probability": 0.9404296875}, {"start": 1585.34, "end": 1585.48, "word": " the", "probability": 0.91259765625}, {"start": 1585.48, "end": 1585.76, "word": " right", "probability": 0.916015625}, {"start": 1585.76, "end": 1586.86, "word": " and", "probability": 0.8154296875}, {"start": 1586.86, "end": 1587.16, "word": " 5", "probability": 0.994140625}, {"start": 1587.16, "end": 1587.46, "word": "%", "probability": 0.99755859375}, {"start": 1587.46, "end": 1587.68, "word": " to", "probability": 0.96533203125}, {"start": 1587.68, "end": 1587.8, "word": " the", "probability": 0.912109375}, {"start": 1587.8, "end": 1588.02, "word": " left.", "probability": 0.9453125}], "temperature": 1.0}, {"id": 59, "seek": 160751, "start": 1589.39, "end": 1607.51, "text": " Now the critical value in this case is the Z-score. Now by using the normal table, the standardized normal table, you can figure out Z in this case is negative. Look at the table. Look at 5%.", "tokens": [823, 264, 4924, 2158, 294, 341, 1389, 307, 264, 1176, 12, 4417, 418, 13, 823, 538, 1228, 264, 2710, 3199, 11, 264, 31677, 2710, 3199, 11, 291, 393, 2573, 484, 1176, 294, 341, 1389, 307, 3671, 13, 2053, 412, 264, 3199, 13, 2053, 412, 1025, 6856], "avg_logprob": -0.2752659574468085, "compression_ratio": 1.5, "no_speech_prob": 0.0, "words": [{"start": 1589.39, "end": 1589.67, "word": " Now", "probability": 0.59423828125}, {"start": 1589.67, "end": 1589.85, "word": " the", "probability": 0.5224609375}, {"start": 1589.85, "end": 1590.17, "word": " critical", "probability": 0.89697265625}, {"start": 1590.17, "end": 1590.67, "word": " value", "probability": 0.93798828125}, {"start": 1590.67, "end": 1591.13, "word": " in", "probability": 0.67724609375}, {"start": 1591.13, "end": 1591.39, "word": " this", "probability": 0.94677734375}, {"start": 1591.39, "end": 1591.79, "word": " case", "probability": 0.91162109375}, {"start": 1591.79, "end": 1592.09, "word": " is", "probability": 0.927734375}, {"start": 1592.09, "end": 1592.33, "word": " the", "probability": 0.6923828125}, {"start": 1592.33, "end": 1592.49, "word": " Z", "probability": 0.56298828125}, {"start": 1592.49, "end": 1593.33, "word": "-score.", "probability": 0.66259765625}, {"start": 1595.21, "end": 1595.45, "word": " Now", "probability": 0.76123046875}, {"start": 1595.45, "end": 1595.63, "word": " by", "probability": 0.73486328125}, {"start": 1595.63, "end": 1595.97, "word": " using", "probability": 0.9384765625}, {"start": 1595.97, "end": 1596.17, "word": " the", "probability": 0.74560546875}, {"start": 1596.17, "end": 1596.43, "word": " normal", "probability": 0.2998046875}, {"start": 1596.43, "end": 1596.83, "word": " table,", "probability": 0.82763671875}, {"start": 1597.09, "end": 1597.09, "word": " the", "probability": 0.492919921875}, {"start": 1597.09, "end": 1597.45, "word": " standardized", "probability": 0.681640625}, {"start": 1597.45, "end": 1597.83, "word": " normal", "probability": 0.8330078125}, {"start": 1597.83, "end": 1598.15, "word": " table,", "probability": 0.87353515625}, {"start": 1598.27, "end": 1598.39, "word": " you", "probability": 0.837890625}, {"start": 1598.39, "end": 1598.57, "word": " can", "probability": 0.93896484375}, {"start": 1598.57, "end": 1598.79, "word": " figure", "probability": 0.95361328125}, {"start": 1598.79, "end": 1599.11, "word": " out", "probability": 0.88623046875}, {"start": 1599.11, "end": 1599.87, "word": " Z", "probability": 0.53759765625}, {"start": 1599.87, "end": 1600.15, "word": " in", "probability": 0.78466796875}, {"start": 1600.15, "end": 1600.39, "word": " this", "probability": 0.95068359375}, {"start": 1600.39, "end": 1600.81, "word": " case", "probability": 0.9189453125}, {"start": 1600.81, "end": 1601.29, "word": " is", "probability": 0.93115234375}, {"start": 1601.29, "end": 1601.79, "word": " negative.", "probability": 0.91015625}, {"start": 1604.17, "end": 1604.51, "word": " Look", "probability": 0.79931640625}, {"start": 1604.51, "end": 1604.65, "word": " at", "probability": 0.96728515625}, {"start": 1604.65, "end": 1604.81, "word": " the", "probability": 0.9033203125}, {"start": 1604.81, "end": 1605.09, "word": " table.", "probability": 0.90283203125}, {"start": 1606.03, "end": 1606.29, "word": " Look", "probability": 0.765625}, {"start": 1606.29, "end": 1606.51, "word": " at", "probability": 0.96142578125}, {"start": 1606.51, "end": 1607.51, "word": " 5%.", "probability": 0.725830078125}], "temperature": 1.0}, {"id": 60, "seek": 163656, "start": 1617.94, "end": 1636.56, "text": " The value is minus 1.645, the other one is 1.645. So that's the critical value, plus or minus a critical value. Again, the table, if you are looking at", "tokens": [440, 2158, 307, 3175, 502, 13, 21, 8465, 11, 264, 661, 472, 307, 502, 13, 21, 8465, 13, 407, 300, 311, 264, 4924, 2158, 11, 1804, 420, 3175, 257, 4924, 2158, 13, 3764, 11, 264, 3199, 11, 498, 291, 366, 1237, 412], "avg_logprob": -0.22965116001838862, "compression_ratio": 1.3333333333333333, "no_speech_prob": 0.0, "words": [{"start": 1617.94, "end": 1618.22, "word": " The", "probability": 0.277099609375}, {"start": 1618.22, "end": 1618.5, "word": " value", "probability": 0.798828125}, {"start": 1618.5, "end": 1618.76, "word": " is", "probability": 0.74072265625}, {"start": 1618.76, "end": 1619.18, "word": " minus", "probability": 0.3271484375}, {"start": 1619.18, "end": 1619.46, "word": " 1", "probability": 0.751953125}, {"start": 1619.46, "end": 1620.66, "word": ".645,", "probability": 0.9295247395833334}, {"start": 1621.46, "end": 1621.98, "word": " the", "probability": 0.391845703125}, {"start": 1621.98, "end": 1622.16, "word": " other", "probability": 0.85107421875}, {"start": 1622.16, "end": 1622.34, "word": " one", "probability": 0.78564453125}, {"start": 1622.34, "end": 1622.5, "word": " is", "probability": 0.79541015625}, {"start": 1622.5, "end": 1622.68, "word": " 1", "probability": 0.90673828125}, {"start": 1622.68, "end": 1623.56, "word": ".645.", "probability": 0.9713541666666666}, {"start": 1627.06, "end": 1627.7, "word": " So", "probability": 0.9091796875}, {"start": 1627.7, "end": 1627.98, "word": " that's", "probability": 0.820068359375}, {"start": 1627.98, "end": 1628.16, "word": " the", "probability": 0.87841796875}, {"start": 1628.16, "end": 1628.52, "word": " critical", "probability": 0.92529296875}, {"start": 1628.52, "end": 1628.96, "word": " value,", "probability": 0.97998046875}, {"start": 1629.18, "end": 1629.52, "word": " plus", "probability": 0.89501953125}, {"start": 1629.52, "end": 1629.74, "word": " or", "probability": 0.953125}, {"start": 1629.74, "end": 1630.22, "word": " minus", "probability": 0.986328125}, {"start": 1630.22, "end": 1631.46, "word": " a", "probability": 0.51416015625}, {"start": 1631.46, "end": 1631.82, "word": " critical", "probability": 0.927734375}, {"start": 1631.82, "end": 1632.34, "word": " value.", "probability": 0.97509765625}, {"start": 1633.64, "end": 1634.08, "word": " Again,", "probability": 0.93115234375}, {"start": 1634.24, "end": 1634.4, "word": " the", "probability": 0.7001953125}, {"start": 1634.4, "end": 1634.74, "word": " table,", "probability": 0.89453125}, {"start": 1635.52, "end": 1635.8, "word": " if", "probability": 0.94140625}, {"start": 1635.8, "end": 1635.88, "word": " you", "probability": 0.95654296875}, {"start": 1635.88, "end": 1635.98, "word": " are", "probability": 0.74853515625}, {"start": 1635.98, "end": 1636.24, "word": " looking", "probability": 0.91015625}, {"start": 1636.24, "end": 1636.56, "word": " at", "probability": 0.96240234375}], "temperature": 1.0}, {"id": 61, "seek": 167327, "start": 1648.43, "end": 1673.27, "text": " You will get this value. Minus 1.6 under 4, under 5. Here we have 0045, 0055. We have the chip. Again look at the angle of this one.", "tokens": [509, 486, 483, 341, 2158, 13, 2829, 301, 502, 13, 21, 833, 1017, 11, 833, 1025, 13, 1692, 321, 362, 7143, 8465, 11, 7143, 13622, 13, 492, 362, 264, 11409, 13, 3764, 574, 412, 264, 5802, 295, 341, 472, 13], "avg_logprob": -0.3435594570345995, "compression_ratio": 1.1875, "no_speech_prob": 0.0, "words": [{"start": 1648.43, "end": 1648.79, "word": " You", "probability": 0.25732421875}, {"start": 1648.79, "end": 1649.05, "word": " will", "probability": 0.76806640625}, {"start": 1649.05, "end": 1649.63, "word": " get", "probability": 0.880859375}, {"start": 1649.63, "end": 1649.91, "word": " this", "probability": 0.9296875}, {"start": 1649.91, "end": 1650.25, "word": " value.", "probability": 0.95947265625}, {"start": 1651.77, "end": 1652.23, "word": " Minus", "probability": 0.728271484375}, {"start": 1652.23, "end": 1652.51, "word": " 1", "probability": 0.70361328125}, {"start": 1652.51, "end": 1655.29, "word": ".6", "probability": 0.80078125}, {"start": 1655.29, "end": 1655.93, "word": " under", "probability": 0.6083984375}, {"start": 1655.93, "end": 1656.43, "word": " 4,", "probability": 0.9111328125}, {"start": 1657.55, "end": 1658.07, "word": " under", "probability": 0.7646484375}, {"start": 1658.07, "end": 1658.63, "word": " 5.", "probability": 0.99072265625}, {"start": 1658.79, "end": 1659.01, "word": " Here", "probability": 0.7978515625}, {"start": 1659.01, "end": 1659.15, "word": " we", "probability": 0.8583984375}, {"start": 1659.15, "end": 1659.47, "word": " have", "probability": 0.9443359375}, {"start": 1659.47, "end": 1661.91, "word": " 0045,", "probability": 0.663818359375}, {"start": 1662.77, "end": 1665.75, "word": " 0055.", "probability": 0.829833984375}, {"start": 1667.59, "end": 1667.85, "word": " We", "probability": 0.190185546875}, {"start": 1667.85, "end": 1667.99, "word": " have", "probability": 0.7412109375}, {"start": 1667.99, "end": 1668.11, "word": " the", "probability": 0.85498046875}, {"start": 1668.11, "end": 1668.31, "word": " chip.", "probability": 0.724609375}, {"start": 1670.71, "end": 1671.13, "word": " Again", "probability": 0.70654296875}, {"start": 1671.13, "end": 1671.53, "word": " look", "probability": 0.4716796875}, {"start": 1671.53, "end": 1672.05, "word": " at", "probability": 0.9443359375}, {"start": 1672.05, "end": 1672.55, "word": " the", "probability": 0.9072265625}, {"start": 1672.55, "end": 1672.75, "word": " angle", "probability": 0.59912109375}, {"start": 1672.75, "end": 1672.89, "word": " of", "probability": 0.7275390625}, {"start": 1672.89, "end": 1673.03, "word": " this", "probability": 0.9384765625}, {"start": 1673.03, "end": 1673.27, "word": " one.", "probability": 0.481201171875}], "temperature": 1.0}, {"id": 62, "seek": 168948, "start": 1687.28, "end": 1689.48, "text": " 05, 05.", "tokens": [1958, 20, 11, 1958, 20, 13], "avg_logprob": -0.6205356972558158, "compression_ratio": 0.5, "no_speech_prob": 2.2351741790771484e-05, "words": [{"start": 1687.28, "end": 1688.68, "word": " 05,", "probability": 0.29119873046875}, {"start": 1688.8, "end": 1689.48, "word": " 05.", "probability": 0.80517578125}], "temperature": 1.0}, {"id": 63, "seek": 172523, "start": 1716.87, "end": 1725.23, "text": " So again, minus 1.6 under 4. 0505.", "tokens": [407, 797, 11, 3175, 502, 13, 21, 833, 1017, 13, 1958, 2803, 20, 13], "avg_logprob": -0.256249996026357, "compression_ratio": 0.813953488372093, "no_speech_prob": 0.0, "words": [{"start": 1716.87, "end": 1717.85, "word": " So", "probability": 0.50830078125}, {"start": 1717.85, "end": 1718.15, "word": " again,", "probability": 0.841796875}, {"start": 1718.71, "end": 1718.71, "word": " minus", "probability": 0.499267578125}, {"start": 1718.71, "end": 1719.03, "word": " 1", "probability": 0.8720703125}, {"start": 1719.03, "end": 1719.65, "word": ".6", "probability": 0.942138671875}, {"start": 1719.65, "end": 1719.91, "word": " under", "probability": 0.80126953125}, {"start": 1719.91, "end": 1720.33, "word": " 4.", "probability": 0.85498046875}, {"start": 1724.25, "end": 1725.23, "word": " 0505.", "probability": 0.7843424479166666}], "temperature": 1.0}, {"id": 64, "seek": 175071, "start": 1744.75, "end": 1750.71, "text": " So your answer could be either one of these negative one point", "tokens": [407, 428, 1867, 727, 312, 2139, 472, 295, 613, 3671, 472, 935], "avg_logprob": -0.27343750916994536, "compression_ratio": 0.984375, "no_speech_prob": 0.0, "words": [{"start": 1744.75, "end": 1745.45, "word": " So", "probability": 0.4208984375}, {"start": 1745.45, "end": 1746.15, "word": " your", "probability": 0.73095703125}, {"start": 1746.15, "end": 1746.55, "word": " answer", "probability": 0.962890625}, {"start": 1746.55, "end": 1746.85, "word": " could", "probability": 0.87158203125}, {"start": 1746.85, "end": 1747.13, "word": " be", "probability": 0.93408203125}, {"start": 1747.13, "end": 1748.25, "word": " either", "probability": 0.81640625}, {"start": 1748.25, "end": 1748.49, "word": " one", "probability": 0.90283203125}, {"start": 1748.49, "end": 1748.65, "word": " of", "probability": 0.96875}, {"start": 1748.65, "end": 1748.97, "word": " these", "probability": 0.83203125}, {"start": 1748.97, "end": 1750.01, "word": " negative", "probability": 0.370361328125}, {"start": 1750.01, "end": 1750.35, "word": " one", "probability": 0.837890625}, {"start": 1750.35, "end": 1750.71, "word": " point", "probability": 0.96044921875}], "temperature": 1.0}, {"id": 65, "seek": 177707, "start": 1756.45, "end": 1777.07, "text": " So in this case your z score minus 1.64 or minus 1.65 or the average of these two values minus 1.645. It's better to use this one.", "tokens": [407, 294, 341, 1389, 428, 710, 6175, 3175, 502, 13, 19395, 420, 3175, 502, 13, 16824, 420, 264, 4274, 295, 613, 732, 4190, 3175, 502, 13, 21, 8465, 13, 467, 311, 1101, 281, 764, 341, 472, 13], "avg_logprob": -0.17454769579987778, "compression_ratio": 1.297029702970297, "no_speech_prob": 0.0, "words": [{"start": 1756.45, "end": 1757.13, "word": " So", "probability": 0.73828125}, {"start": 1757.13, "end": 1757.27, "word": " in", "probability": 0.7431640625}, {"start": 1757.27, "end": 1757.43, "word": " this", "probability": 0.94970703125}, {"start": 1757.43, "end": 1757.85, "word": " case", "probability": 0.91845703125}, {"start": 1757.85, "end": 1758.17, "word": " your", "probability": 0.501953125}, {"start": 1758.17, "end": 1758.37, "word": " z", "probability": 0.66455078125}, {"start": 1758.37, "end": 1758.75, "word": " score", "probability": 0.458251953125}, {"start": 1758.75, "end": 1761.49, "word": " minus", "probability": 0.453125}, {"start": 1761.49, "end": 1761.77, "word": " 1", "probability": 0.74169921875}, {"start": 1761.77, "end": 1762.33, "word": ".64", "probability": 0.949951171875}, {"start": 1762.33, "end": 1763.85, "word": " or", "probability": 0.84228515625}, {"start": 1763.85, "end": 1766.37, "word": " minus", "probability": 0.9658203125}, {"start": 1766.37, "end": 1766.67, "word": " 1", "probability": 0.9951171875}, {"start": 1766.67, "end": 1767.31, "word": ".65", "probability": 0.99560546875}, {"start": 1767.31, "end": 1770.15, "word": " or", "probability": 0.64453125}, {"start": 1770.15, "end": 1770.33, "word": " the", "probability": 0.91650390625}, {"start": 1770.33, "end": 1770.83, "word": " average", "probability": 0.80908203125}, {"start": 1770.83, "end": 1771.21, "word": " of", "probability": 0.96435546875}, {"start": 1771.21, "end": 1771.43, "word": " these", "probability": 0.85400390625}, {"start": 1771.43, "end": 1771.61, "word": " two", "probability": 0.8505859375}, {"start": 1771.61, "end": 1772.13, "word": " values", "probability": 0.96630859375}, {"start": 1772.13, "end": 1773.63, "word": " minus", "probability": 0.91845703125}, {"start": 1773.63, "end": 1773.93, "word": " 1", "probability": 0.9931640625}, {"start": 1773.93, "end": 1774.83, "word": ".645.", "probability": 0.9669596354166666}, {"start": 1775.73, "end": 1775.97, "word": " It's", "probability": 0.8642578125}, {"start": 1775.97, "end": 1776.23, "word": " better", "probability": 0.9130859375}, {"start": 1776.23, "end": 1776.43, "word": " to", "probability": 0.96142578125}, {"start": 1776.43, "end": 1776.63, "word": " use", "probability": 0.87646484375}, {"start": 1776.63, "end": 1776.85, "word": " this", "probability": 0.94970703125}, {"start": 1776.85, "end": 1777.07, "word": " one.", "probability": 0.931640625}], "temperature": 1.0}, {"id": 66, "seek": 180726, "start": 1778.1, "end": 1807.26, "text": " So for 90%, 5% to the right, 5% to the left, just use the normal table, you will get these two values. So this is the critical value. Time. The standard error means the standard deviation of the point estimate. So the general formula for all confidence intervals is given by this equation. Point estimate.", "tokens": [407, 337, 4289, 8923, 1025, 4, 281, 264, 558, 11, 1025, 4, 281, 264, 1411, 11, 445, 764, 264, 2710, 3199, 11, 291, 486, 483, 613, 732, 4190, 13, 407, 341, 307, 264, 4924, 2158, 13, 6161, 13, 440, 3832, 6713, 1355, 264, 3832, 25163, 295, 264, 935, 12539, 13, 407, 264, 2674, 8513, 337, 439, 6687, 26651, 307, 2212, 538, 341, 5367, 13, 12387, 12539, 13], "avg_logprob": -0.16716451745699434, "compression_ratio": 1.577319587628866, "no_speech_prob": 0.0, "words": [{"start": 1778.1, "end": 1778.42, "word": " So", "probability": 0.623046875}, {"start": 1778.42, "end": 1778.66, "word": " for", "probability": 0.66455078125}, {"start": 1778.66, "end": 1779.58, "word": " 90%,", "probability": 0.6009521484375}, {"start": 1779.58, "end": 1780.52, "word": " 5", "probability": 0.8662109375}, {"start": 1780.52, "end": 1780.78, "word": "%", "probability": 0.97607421875}, {"start": 1780.78, "end": 1781.02, "word": " to", "probability": 0.95458984375}, {"start": 1781.02, "end": 1781.14, "word": " the", "probability": 0.90869140625}, {"start": 1781.14, "end": 1781.32, "word": " right,", "probability": 0.9013671875}, {"start": 1781.42, "end": 1781.6, "word": " 5", "probability": 0.96337890625}, {"start": 1781.6, "end": 1781.82, "word": "%", "probability": 0.998046875}, {"start": 1781.82, "end": 1782.04, "word": " to", "probability": 0.96533203125}, {"start": 1782.04, "end": 1782.16, "word": " the", "probability": 0.91455078125}, {"start": 1782.16, "end": 1782.36, "word": " left,", "probability": 0.9462890625}, {"start": 1782.8, "end": 1783.22, "word": " just", "probability": 0.88525390625}, {"start": 1783.22, "end": 1783.58, "word": " use", "probability": 0.837890625}, {"start": 1783.58, "end": 1783.7, "word": " the", "probability": 0.87744140625}, {"start": 1783.7, "end": 1783.98, "word": " normal", "probability": 0.8740234375}, {"start": 1783.98, "end": 1784.3, "word": " table,", "probability": 0.8583984375}, {"start": 1784.42, "end": 1784.44, "word": " you", "probability": 0.91015625}, {"start": 1784.44, "end": 1784.58, "word": " will", "probability": 0.818359375}, {"start": 1784.58, "end": 1784.76, "word": " get", "probability": 0.9345703125}, {"start": 1784.76, "end": 1784.98, "word": " these", "probability": 0.85302734375}, {"start": 1784.98, "end": 1785.16, "word": " two", "probability": 0.83935546875}, {"start": 1785.16, "end": 1785.5, "word": " values.", "probability": 0.958984375}, {"start": 1786.14, "end": 1786.32, "word": " So", "probability": 0.90771484375}, {"start": 1786.32, "end": 1786.54, "word": " this", "probability": 0.8681640625}, {"start": 1786.54, "end": 1786.66, "word": " is", "probability": 0.9453125}, {"start": 1786.66, "end": 1786.88, "word": " the", "probability": 0.88232421875}, {"start": 1786.88, "end": 1787.34, "word": " critical", "probability": 0.9228515625}, {"start": 1787.34, "end": 1787.82, "word": " value.", "probability": 0.974609375}, {"start": 1788.3, "end": 1788.6, "word": " Time.", "probability": 0.73193359375}, {"start": 1792.82, "end": 1793.16, "word": " The", "probability": 0.87939453125}, {"start": 1793.16, "end": 1793.66, "word": " standard", "probability": 0.87060546875}, {"start": 1793.66, "end": 1793.98, "word": " error", "probability": 0.85400390625}, {"start": 1793.98, "end": 1794.44, "word": " means", "probability": 0.51123046875}, {"start": 1794.44, "end": 1794.74, "word": " the", "probability": 0.873046875}, {"start": 1794.74, "end": 1795.08, "word": " standard", "probability": 0.94189453125}, {"start": 1795.08, "end": 1795.58, "word": " deviation", "probability": 0.91455078125}, {"start": 1795.58, "end": 1795.9, "word": " of", "probability": 0.96484375}, {"start": 1795.9, "end": 1796.02, "word": " the", "probability": 0.9072265625}, {"start": 1796.02, "end": 1796.28, "word": " point", "probability": 0.96240234375}, {"start": 1796.28, "end": 1796.74, "word": " estimate.", "probability": 0.88671875}, {"start": 1800.9, "end": 1801.54, "word": " So", "probability": 0.93994140625}, {"start": 1801.54, "end": 1801.82, "word": " the", "probability": 0.82861328125}, {"start": 1801.82, "end": 1802.16, "word": " general", "probability": 0.8857421875}, {"start": 1802.16, "end": 1802.66, "word": " formula", "probability": 0.9033203125}, {"start": 1802.66, "end": 1803.28, "word": " for", "probability": 0.91162109375}, {"start": 1803.28, "end": 1803.54, "word": " all", "probability": 0.94970703125}, {"start": 1803.54, "end": 1804.02, "word": " confidence", "probability": 0.9873046875}, {"start": 1804.02, "end": 1804.52, "word": " intervals", "probability": 0.88720703125}, {"start": 1804.52, "end": 1804.96, "word": " is", "probability": 0.93896484375}, {"start": 1804.96, "end": 1805.2, "word": " given", "probability": 0.88525390625}, {"start": 1805.2, "end": 1805.42, "word": " by", "probability": 0.96630859375}, {"start": 1805.42, "end": 1805.64, "word": " this", "probability": 0.94970703125}, {"start": 1805.64, "end": 1806.06, "word": " equation.", "probability": 0.982421875}, {"start": 1806.46, "end": 1806.84, "word": " Point", "probability": 0.86083984375}, {"start": 1806.84, "end": 1807.26, "word": " estimate.", "probability": 0.8203125}], "temperature": 1.0}, {"id": 67, "seek": 183711, "start": 1808.39, "end": 1837.11, "text": " a plus or minus critical value times standard error. For example, point estimate is x4, the critical value is z-score, times the standard error of the point estimate. So times this SE is the standard error of the estimate. So these are the three components in order to build any confidence interval. Another definition here,", "tokens": [257, 1804, 420, 3175, 4924, 2158, 1413, 3832, 6713, 13, 1171, 1365, 11, 935, 12539, 307, 2031, 19, 11, 264, 4924, 2158, 307, 710, 12, 4417, 418, 11, 1413, 264, 3832, 6713, 295, 264, 935, 12539, 13, 407, 1413, 341, 10269, 307, 264, 3832, 6713, 295, 264, 12539, 13, 407, 613, 366, 264, 1045, 6677, 294, 1668, 281, 1322, 604, 6687, 15035, 13, 3996, 7123, 510, 11], "avg_logprob": -0.21840533307370016, "compression_ratio": 1.7759562841530054, "no_speech_prob": 0.0, "words": [{"start": 1808.39, "end": 1808.65, "word": " a", "probability": 0.277099609375}, {"start": 1808.65, "end": 1808.93, "word": " plus", "probability": 0.82666015625}, {"start": 1808.93, "end": 1809.13, "word": " or", "probability": 0.95654296875}, {"start": 1809.13, "end": 1809.51, "word": " minus", "probability": 0.9716796875}, {"start": 1809.51, "end": 1809.93, "word": " critical", "probability": 0.84130859375}, {"start": 1809.93, "end": 1810.41, "word": " value", "probability": 0.9716796875}, {"start": 1810.41, "end": 1811.73, "word": " times", "probability": 0.78173828125}, {"start": 1811.73, "end": 1812.15, "word": " standard", "probability": 0.76953125}, {"start": 1812.15, "end": 1812.37, "word": " error.", "probability": 0.87646484375}, {"start": 1812.75, "end": 1813.23, "word": " For", "probability": 0.93212890625}, {"start": 1813.23, "end": 1813.57, "word": " example,", "probability": 0.9736328125}, {"start": 1813.71, "end": 1813.91, "word": " point", "probability": 0.80712890625}, {"start": 1813.91, "end": 1814.23, "word": " estimate", "probability": 0.9365234375}, {"start": 1814.23, "end": 1814.53, "word": " is", "probability": 0.94091796875}, {"start": 1814.53, "end": 1815.03, "word": " x4,", "probability": 0.59423828125}, {"start": 1816.05, "end": 1816.39, "word": " the", "probability": 0.363525390625}, {"start": 1816.39, "end": 1816.75, "word": " critical", "probability": 0.9423828125}, {"start": 1816.75, "end": 1817.09, "word": " value", "probability": 0.91455078125}, {"start": 1817.09, "end": 1817.23, "word": " is", "probability": 0.65380859375}, {"start": 1817.23, "end": 1817.41, "word": " z", "probability": 0.76123046875}, {"start": 1817.41, "end": 1817.75, "word": "-score,", "probability": 0.7306315104166666}, {"start": 1818.59, "end": 1819.31, "word": " times", "probability": 0.92333984375}, {"start": 1819.31, "end": 1819.69, "word": " the", "probability": 0.67822265625}, {"start": 1819.69, "end": 1820.11, "word": " standard", "probability": 0.92333984375}, {"start": 1820.11, "end": 1820.43, "word": " error", "probability": 0.87744140625}, {"start": 1820.43, "end": 1820.95, "word": " of", "probability": 0.9609375}, {"start": 1820.95, "end": 1822.03, "word": " the", "probability": 0.8974609375}, {"start": 1822.03, "end": 1822.33, "word": " point", "probability": 0.97265625}, {"start": 1822.33, "end": 1822.85, "word": " estimate.", "probability": 0.92431640625}, {"start": 1823.57, "end": 1823.75, "word": " So", "probability": 0.86376953125}, {"start": 1823.75, "end": 1824.15, "word": " times", "probability": 0.74365234375}, {"start": 1824.15, "end": 1824.89, "word": " this", "probability": 0.84765625}, {"start": 1824.89, "end": 1825.41, "word": " SE", "probability": 0.303955078125}, {"start": 1825.41, "end": 1825.89, "word": " is", "probability": 0.7919921875}, {"start": 1825.89, "end": 1826.13, "word": " the", "probability": 0.73974609375}, {"start": 1826.13, "end": 1826.39, "word": " standard", "probability": 0.92822265625}, {"start": 1826.39, "end": 1826.69, "word": " error", "probability": 0.8818359375}, {"start": 1826.69, "end": 1827.01, "word": " of", "probability": 0.96533203125}, {"start": 1827.01, "end": 1827.29, "word": " the", "probability": 0.91455078125}, {"start": 1827.29, "end": 1828.33, "word": " estimate.", "probability": 0.85791015625}, {"start": 1828.71, "end": 1829.01, "word": " So", "probability": 0.94189453125}, {"start": 1829.01, "end": 1829.23, "word": " these", "probability": 0.810546875}, {"start": 1829.23, "end": 1829.47, "word": " are", "probability": 0.93896484375}, {"start": 1829.47, "end": 1829.69, "word": " the", "probability": 0.92041015625}, {"start": 1829.69, "end": 1830.31, "word": " three", "probability": 0.9267578125}, {"start": 1830.31, "end": 1830.79, "word": " components", "probability": 0.92333984375}, {"start": 1830.79, "end": 1831.29, "word": " in", "probability": 0.921875}, {"start": 1831.29, "end": 1831.47, "word": " order", "probability": 0.919921875}, {"start": 1831.47, "end": 1831.71, "word": " to", "probability": 0.9697265625}, {"start": 1831.71, "end": 1832.07, "word": " build", "probability": 0.91357421875}, {"start": 1832.07, "end": 1833.25, "word": " any", "probability": 0.89111328125}, {"start": 1833.25, "end": 1833.99, "word": " confidence", "probability": 0.9814453125}, {"start": 1833.99, "end": 1834.65, "word": " interval.", "probability": 0.96875}, {"start": 1835.79, "end": 1836.25, "word": " Another", "probability": 0.87158203125}, {"start": 1836.25, "end": 1836.71, "word": " definition", "probability": 0.95361328125}, {"start": 1836.71, "end": 1837.11, "word": " here,", "probability": 0.85302734375}], "temperature": 1.0}, {"id": 68, "seek": 186756, "start": 1838.12, "end": 1867.56, "text": " The confidence level. The confidence level that the interval will contain the unknown population parameter. For example, we can say 90%, 95%, 99%. You never say 100%. So usually, the confidence level is a percentage that is less than 100%.", "tokens": [440, 6687, 1496, 13, 440, 6687, 1496, 300, 264, 15035, 486, 5304, 264, 9841, 4415, 13075, 13, 1171, 1365, 11, 321, 393, 584, 4289, 8923, 13420, 8923, 11803, 6856, 509, 1128, 584, 2319, 6856, 407, 2673, 11, 264, 6687, 1496, 307, 257, 9668, 300, 307, 1570, 813, 2319, 6856], "avg_logprob": -0.13203124910593034, "compression_ratio": 1.5286624203821657, "no_speech_prob": 0.0, "words": [{"start": 1838.12, "end": 1838.36, "word": " The", "probability": 0.65380859375}, {"start": 1838.36, "end": 1838.84, "word": " confidence", "probability": 0.90380859375}, {"start": 1838.84, "end": 1839.18, "word": " level.", "probability": 0.9443359375}, {"start": 1841.36, "end": 1842.28, "word": " The", "probability": 0.8740234375}, {"start": 1842.28, "end": 1842.76, "word": " confidence", "probability": 0.974609375}, {"start": 1842.76, "end": 1843.26, "word": " level", "probability": 0.9453125}, {"start": 1843.26, "end": 1843.62, "word": " that", "probability": 0.56640625}, {"start": 1843.62, "end": 1843.84, "word": " the", "probability": 0.86328125}, {"start": 1843.84, "end": 1844.24, "word": " interval", "probability": 0.9658203125}, {"start": 1844.24, "end": 1844.48, "word": " will", "probability": 0.8662109375}, {"start": 1844.48, "end": 1845.06, "word": " contain", "probability": 0.94970703125}, {"start": 1845.06, "end": 1845.34, "word": " the", "probability": 0.88525390625}, {"start": 1845.34, "end": 1845.92, "word": " unknown", "probability": 0.88720703125}, {"start": 1845.92, "end": 1846.68, "word": " population", "probability": 0.9677734375}, {"start": 1846.68, "end": 1847.12, "word": " parameter.", "probability": 0.8291015625}, {"start": 1848.0, "end": 1848.22, "word": " For", "probability": 0.95654296875}, {"start": 1848.22, "end": 1848.58, "word": " example,", "probability": 0.97509765625}, {"start": 1849.2, "end": 1849.44, "word": " we", "probability": 0.94677734375}, {"start": 1849.44, "end": 1849.68, "word": " can", "probability": 0.9482421875}, {"start": 1849.68, "end": 1850.06, "word": " say", "probability": 0.92626953125}, {"start": 1850.06, "end": 1851.22, "word": " 90%,", "probability": 0.679931640625}, {"start": 1851.22, "end": 1854.12, "word": " 95%,", "probability": 0.82421875}, {"start": 1854.12, "end": 1855.36, "word": " 99%.", "probability": 0.90087890625}, {"start": 1855.36, "end": 1857.46, "word": " You", "probability": 0.9453125}, {"start": 1857.46, "end": 1857.78, "word": " never", "probability": 0.90478515625}, {"start": 1857.78, "end": 1858.56, "word": " say", "probability": 0.89794921875}, {"start": 1858.56, "end": 1860.4, "word": " 100%.", "probability": 0.851318359375}, {"start": 1860.4, "end": 1860.82, "word": " So", "probability": 0.9541015625}, {"start": 1860.82, "end": 1861.38, "word": " usually,", "probability": 0.7998046875}, {"start": 1861.94, "end": 1862.28, "word": " the", "probability": 0.9189453125}, {"start": 1862.28, "end": 1862.62, "word": " confidence", "probability": 0.978515625}, {"start": 1862.62, "end": 1863.16, "word": " level", "probability": 0.93701171875}, {"start": 1863.16, "end": 1863.78, "word": " is", "probability": 0.9189453125}, {"start": 1863.78, "end": 1863.9, "word": " a", "probability": 0.99267578125}, {"start": 1863.9, "end": 1864.34, "word": " percentage", "probability": 0.896484375}, {"start": 1864.34, "end": 1865.34, "word": " that", "probability": 0.93212890625}, {"start": 1865.34, "end": 1865.58, "word": " is", "probability": 0.94384765625}, {"start": 1865.58, "end": 1865.9, "word": " less", "probability": 0.947265625}, {"start": 1865.9, "end": 1866.3, "word": " than", "probability": 0.9462890625}, {"start": 1866.3, "end": 1867.56, "word": " 100%.", "probability": 0.9462890625}], "temperature": 1.0}, {"id": 69, "seek": 189830, "start": 1873.24, "end": 1898.3, "text": " Now, for example, suppose the confidence level is 95. This 95 can be written as 1 minus alpha equals 95 percent. That means the confidence here is 95 percent, so the error", "tokens": [823, 11, 337, 1365, 11, 7297, 264, 6687, 1496, 307, 13420, 13, 639, 13420, 393, 312, 3720, 382, 502, 3175, 8961, 6915, 13420, 3043, 13, 663, 1355, 264, 6687, 510, 307, 13420, 3043, 11, 370, 264, 6713], "avg_logprob": -0.20867598488142616, "compression_ratio": 1.3983739837398375, "no_speech_prob": 0.0, "words": [{"start": 1873.24, "end": 1873.48, "word": " Now,", "probability": 0.75341796875}, {"start": 1873.56, "end": 1873.68, "word": " for", "probability": 0.9453125}, {"start": 1873.68, "end": 1874.0, "word": " example,", "probability": 0.97412109375}, {"start": 1874.14, "end": 1874.58, "word": " suppose", "probability": 0.8564453125}, {"start": 1874.58, "end": 1875.08, "word": " the", "probability": 0.8515625}, {"start": 1875.08, "end": 1875.46, "word": " confidence", "probability": 0.96875}, {"start": 1875.46, "end": 1877.12, "word": " level", "probability": 0.94140625}, {"start": 1877.12, "end": 1877.36, "word": " is", "probability": 0.951171875}, {"start": 1877.36, "end": 1877.72, "word": " 95.", "probability": 0.91357421875}, {"start": 1878.46, "end": 1879.02, "word": " This", "probability": 0.86669921875}, {"start": 1879.02, "end": 1879.42, "word": " 95", "probability": 0.9736328125}, {"start": 1879.42, "end": 1879.82, "word": " can", "probability": 0.953125}, {"start": 1879.82, "end": 1879.96, "word": " be", "probability": 0.95703125}, {"start": 1879.96, "end": 1880.26, "word": " written", "probability": 0.93798828125}, {"start": 1880.26, "end": 1880.74, "word": " as", "probability": 0.9716796875}, {"start": 1880.74, "end": 1882.96, "word": " 1", "probability": 0.638671875}, {"start": 1882.96, "end": 1883.28, "word": " minus", "probability": 0.8466796875}, {"start": 1883.28, "end": 1883.7, "word": " alpha", "probability": 0.7724609375}, {"start": 1883.7, "end": 1891.1, "word": " equals", "probability": 0.456298828125}, {"start": 1891.1, "end": 1891.72, "word": " 95", "probability": 0.982421875}, {"start": 1891.72, "end": 1892.28, "word": " percent.", "probability": 0.6181640625}, {"start": 1894.36, "end": 1894.72, "word": " That", "probability": 0.90087890625}, {"start": 1894.72, "end": 1895.02, "word": " means", "probability": 0.93115234375}, {"start": 1895.02, "end": 1895.22, "word": " the", "probability": 0.84228515625}, {"start": 1895.22, "end": 1895.62, "word": " confidence", "probability": 0.97802734375}, {"start": 1895.62, "end": 1896.1, "word": " here", "probability": 0.8203125}, {"start": 1896.1, "end": 1896.26, "word": " is", "probability": 0.9482421875}, {"start": 1896.26, "end": 1896.6, "word": " 95", "probability": 0.97509765625}, {"start": 1896.6, "end": 1897.14, "word": " percent,", "probability": 0.9462890625}, {"start": 1897.68, "end": 1897.9, "word": " so", "probability": 0.9365234375}, {"start": 1897.9, "end": 1898.04, "word": " the", "probability": 0.90625}, {"start": 1898.04, "end": 1898.3, "word": " error", "probability": 0.904296875}], "temperature": 1.0}, {"id": 70, "seek": 192510, "start": 1899.62, "end": 1925.1, "text": " Or alpha is 5%. So if the confidence level is 90%, it means the error is 10%. It is 90. So alpha is 10% and so on. So a relative frequency interpretation in this case, you can see that 95%", "tokens": [1610, 8961, 307, 1025, 6856, 407, 498, 264, 6687, 1496, 307, 4289, 8923, 309, 1355, 264, 6713, 307, 1266, 6856, 467, 307, 4289, 13, 407, 8961, 307, 1266, 4, 293, 370, 322, 13, 407, 257, 4972, 7893, 14174, 294, 341, 1389, 11, 291, 393, 536, 300, 13420, 4], "avg_logprob": -0.18526786200854242, "compression_ratio": 1.3597122302158273, "no_speech_prob": 0.0, "words": [{"start": 1899.62, "end": 1899.94, "word": " Or", "probability": 0.330810546875}, {"start": 1899.94, "end": 1900.28, "word": " alpha", "probability": 0.57958984375}, {"start": 1900.28, "end": 1900.66, "word": " is", "probability": 0.9443359375}, {"start": 1900.66, "end": 1902.04, "word": " 5%.", "probability": 0.708740234375}, {"start": 1902.04, "end": 1902.88, "word": " So", "probability": 0.92626953125}, {"start": 1902.88, "end": 1903.48, "word": " if", "probability": 0.71826171875}, {"start": 1903.48, "end": 1903.72, "word": " the", "probability": 0.9228515625}, {"start": 1903.72, "end": 1904.08, "word": " confidence", "probability": 0.9501953125}, {"start": 1904.08, "end": 1904.54, "word": " level", "probability": 0.94140625}, {"start": 1904.54, "end": 1904.98, "word": " is", "probability": 0.9501953125}, {"start": 1904.98, "end": 1905.9, "word": " 90%,", "probability": 0.912841796875}, {"start": 1905.9, "end": 1906.78, "word": " it", "probability": 0.93017578125}, {"start": 1906.78, "end": 1907.04, "word": " means", "probability": 0.9287109375}, {"start": 1907.04, "end": 1907.22, "word": " the", "probability": 0.88671875}, {"start": 1907.22, "end": 1907.46, "word": " error", "probability": 0.865234375}, {"start": 1907.46, "end": 1907.82, "word": " is", "probability": 0.9384765625}, {"start": 1907.82, "end": 1909.78, "word": " 10%.", "probability": 0.843505859375}, {"start": 1909.78, "end": 1910.62, "word": " It", "probability": 0.52099609375}, {"start": 1910.62, "end": 1910.78, "word": " is", "probability": 0.666015625}, {"start": 1910.78, "end": 1911.2, "word": " 90.", "probability": 0.9619140625}, {"start": 1914.42, "end": 1915.14, "word": " So", "probability": 0.9521484375}, {"start": 1915.14, "end": 1915.48, "word": " alpha", "probability": 0.90625}, {"start": 1915.48, "end": 1915.9, "word": " is", "probability": 0.95166015625}, {"start": 1915.9, "end": 1917.2, "word": " 10", "probability": 0.94970703125}, {"start": 1917.2, "end": 1917.58, "word": "%", "probability": 0.74072265625}, {"start": 1917.58, "end": 1917.78, "word": " and", "probability": 0.85986328125}, {"start": 1917.78, "end": 1917.94, "word": " so", "probability": 0.96044921875}, {"start": 1917.94, "end": 1918.08, "word": " on.", "probability": 0.83251953125}, {"start": 1919.12, "end": 1919.84, "word": " So", "probability": 0.95654296875}, {"start": 1919.84, "end": 1919.98, "word": " a", "probability": 0.66064453125}, {"start": 1919.98, "end": 1920.24, "word": " relative", "probability": 0.91259765625}, {"start": 1920.24, "end": 1920.78, "word": " frequency", "probability": 0.95947265625}, {"start": 1920.78, "end": 1921.26, "word": " interpretation", "probability": 0.84228515625}, {"start": 1921.26, "end": 1921.56, "word": " in", "probability": 0.80908203125}, {"start": 1921.56, "end": 1921.74, "word": " this", "probability": 0.9443359375}, {"start": 1921.74, "end": 1922.1, "word": " case,", "probability": 0.9111328125}, {"start": 1922.3, "end": 1922.42, "word": " you", "probability": 0.95556640625}, {"start": 1922.42, "end": 1922.64, "word": " can", "probability": 0.9462890625}, {"start": 1922.64, "end": 1922.84, "word": " see", "probability": 0.70947265625}, {"start": 1922.84, "end": 1923.16, "word": " that", "probability": 0.869140625}, {"start": 1923.16, "end": 1924.4, "word": " 95", "probability": 0.9853515625}, {"start": 1924.4, "end": 1925.1, "word": "%", "probability": 0.4833984375}], "temperature": 1.0}, {"id": 71, "seek": 195714, "start": 1927.56, "end": 1957.14, "text": " Of all the confidence interval that can be constructed will contain the unknown true parameter. A specific interval either will contain or will not contain the true parameter as we mentioned. So if you have confidence interval with confidence level 95%, this interval might be or might not contain the true parameter. So you are not sure 100% that the true parameter will contain", "tokens": [2720, 439, 264, 6687, 15035, 300, 393, 312, 17083, 486, 5304, 264, 9841, 2074, 13075, 13, 316, 2685, 15035, 2139, 486, 5304, 420, 486, 406, 5304, 264, 2074, 13075, 382, 321, 2835, 13, 407, 498, 291, 362, 6687, 15035, 365, 6687, 1496, 13420, 8923, 341, 15035, 1062, 312, 420, 1062, 406, 5304, 264, 2074, 13075, 13, 407, 291, 366, 406, 988, 2319, 4, 300, 264, 2074, 13075, 486, 5304], "avg_logprob": -0.13805803401129588, "compression_ratio": 2.054054054054054, "no_speech_prob": 0.0, "words": [{"start": 1927.56, "end": 1927.92, "word": " Of", "probability": 0.416748046875}, {"start": 1927.92, "end": 1928.32, "word": " all", "probability": 0.9453125}, {"start": 1928.32, "end": 1928.52, "word": " the", "probability": 0.89404296875}, {"start": 1928.52, "end": 1928.88, "word": " confidence", "probability": 0.81396484375}, {"start": 1928.88, "end": 1929.44, "word": " interval", "probability": 0.62548828125}, {"start": 1929.44, "end": 1929.8, "word": " that", "probability": 0.8291015625}, {"start": 1929.8, "end": 1930.0, "word": " can", "probability": 0.9248046875}, {"start": 1930.0, "end": 1930.2, "word": " be", "probability": 0.9462890625}, {"start": 1930.2, "end": 1930.84, "word": " constructed", "probability": 0.9267578125}, {"start": 1930.84, "end": 1931.1, "word": " will", "probability": 0.41552734375}, {"start": 1931.1, "end": 1931.58, "word": " contain", "probability": 0.94921875}, {"start": 1931.58, "end": 1931.76, "word": " the", "probability": 0.76806640625}, {"start": 1931.76, "end": 1932.04, "word": " unknown", "probability": 0.8759765625}, {"start": 1932.04, "end": 1932.34, "word": " true", "probability": 0.732421875}, {"start": 1932.34, "end": 1933.1, "word": " parameter.", "probability": 0.89306640625}, {"start": 1934.34, "end": 1935.12, "word": " A", "probability": 0.822265625}, {"start": 1935.12, "end": 1935.58, "word": " specific", "probability": 0.91943359375}, {"start": 1935.58, "end": 1936.16, "word": " interval", "probability": 0.9609375}, {"start": 1936.16, "end": 1936.8, "word": " either", "probability": 0.9013671875}, {"start": 1936.8, "end": 1937.14, "word": " will", "probability": 0.8876953125}, {"start": 1937.14, "end": 1937.74, "word": " contain", "probability": 0.958984375}, {"start": 1937.74, "end": 1938.14, "word": " or", "probability": 0.919921875}, {"start": 1938.14, "end": 1938.34, "word": " will", "probability": 0.88134765625}, {"start": 1938.34, "end": 1938.56, "word": " not", "probability": 0.9453125}, {"start": 1938.56, "end": 1939.04, "word": " contain", "probability": 0.955078125}, {"start": 1939.04, "end": 1939.28, "word": " the", "probability": 0.89208984375}, {"start": 1939.28, "end": 1939.46, "word": " true", "probability": 0.923828125}, {"start": 1939.46, "end": 1939.82, "word": " parameter", "probability": 0.8603515625}, {"start": 1939.82, "end": 1940.02, "word": " as", "probability": 0.60986328125}, {"start": 1940.02, "end": 1940.14, "word": " we", "probability": 0.93310546875}, {"start": 1940.14, "end": 1940.4, "word": " mentioned.", "probability": 0.78955078125}, {"start": 1941.1, "end": 1941.26, "word": " So", "probability": 0.94873046875}, {"start": 1941.26, "end": 1941.42, "word": " if", "probability": 0.81298828125}, {"start": 1941.42, "end": 1941.54, "word": " you", "probability": 0.95849609375}, {"start": 1941.54, "end": 1941.8, "word": " have", "probability": 0.9443359375}, {"start": 1941.8, "end": 1942.3, "word": " confidence", "probability": 0.93798828125}, {"start": 1942.3, "end": 1942.74, "word": " interval", "probability": 0.96826171875}, {"start": 1942.74, "end": 1943.12, "word": " with", "probability": 0.748046875}, {"start": 1943.12, "end": 1943.68, "word": " confidence", "probability": 0.9443359375}, {"start": 1943.68, "end": 1943.96, "word": " level", "probability": 0.84228515625}, {"start": 1943.96, "end": 1944.82, "word": " 95%,", "probability": 0.81494140625}, {"start": 1944.82, "end": 1945.8, "word": " this", "probability": 0.947265625}, {"start": 1945.8, "end": 1946.16, "word": " interval", "probability": 0.96484375}, {"start": 1946.16, "end": 1946.56, "word": " might", "probability": 0.8984375}, {"start": 1946.56, "end": 1946.76, "word": " be", "probability": 0.9345703125}, {"start": 1946.76, "end": 1946.98, "word": " or", "probability": 0.89892578125}, {"start": 1946.98, "end": 1947.18, "word": " might", "probability": 0.89892578125}, {"start": 1947.18, "end": 1947.58, "word": " not", "probability": 0.9453125}, {"start": 1947.58, "end": 1948.58, "word": " contain", "probability": 0.9345703125}, {"start": 1948.58, "end": 1949.68, "word": " the", "probability": 0.89111328125}, {"start": 1949.68, "end": 1950.12, "word": " true", "probability": 0.8818359375}, {"start": 1950.12, "end": 1950.66, "word": " parameter.", "probability": 0.94677734375}, {"start": 1951.08, "end": 1951.34, "word": " So", "probability": 0.95751953125}, {"start": 1951.34, "end": 1951.56, "word": " you", "probability": 0.93310546875}, {"start": 1951.56, "end": 1951.74, "word": " are", "probability": 0.9140625}, {"start": 1951.74, "end": 1952.0, "word": " not", "probability": 0.9443359375}, {"start": 1952.0, "end": 1952.3, "word": " sure", "probability": 0.9111328125}, {"start": 1952.3, "end": 1952.7, "word": " 100", "probability": 0.84912109375}, {"start": 1952.7, "end": 1953.12, "word": "%", "probability": 0.990234375}, {"start": 1953.12, "end": 1953.58, "word": " that", "probability": 0.9296875}, {"start": 1953.58, "end": 1954.5, "word": " the", "probability": 0.9013671875}, {"start": 1954.5, "end": 1954.9, "word": " true", "probability": 0.97265625}, {"start": 1954.9, "end": 1955.78, "word": " parameter", "probability": 0.970703125}, {"start": 1955.78, "end": 1956.22, "word": " will", "probability": 0.88330078125}, {"start": 1956.22, "end": 1957.14, "word": " contain", "probability": 0.9521484375}], "temperature": 1.0}, {"id": 72, "seek": 198606, "start": 1957.5, "end": 1986.06, "text": " in the confidence interval. So for this chapter, we are going to talk about confidence intervals for population mean and population proportion. For population mean, we will have two cases, when sigma is known and sigma is unknown. Now let's start with the confidence interval for immune", "tokens": [294, 264, 6687, 15035, 13, 407, 337, 341, 7187, 11, 321, 366, 516, 281, 751, 466, 6687, 26651, 337, 4415, 914, 293, 4415, 16068, 13, 1171, 4415, 914, 11, 321, 486, 362, 732, 3331, 11, 562, 12771, 307, 2570, 293, 12771, 307, 9841, 13, 823, 718, 311, 722, 365, 264, 6687, 15035, 337, 11992], "avg_logprob": -0.14488636688752607, "compression_ratio": 1.7826086956521738, "no_speech_prob": 0.0, "words": [{"start": 1957.5, "end": 1957.84, "word": " in", "probability": 0.37890625}, {"start": 1957.84, "end": 1958.14, "word": " the", "probability": 0.912109375}, {"start": 1958.14, "end": 1958.66, "word": " confidence", "probability": 0.955078125}, {"start": 1958.66, "end": 1959.86, "word": " interval.", "probability": 0.84130859375}, {"start": 1961.72, "end": 1962.48, "word": " So", "probability": 0.908203125}, {"start": 1962.48, "end": 1962.74, "word": " for", "probability": 0.703125}, {"start": 1962.74, "end": 1962.94, "word": " this", "probability": 0.9423828125}, {"start": 1962.94, "end": 1963.24, "word": " chapter,", "probability": 0.875}, {"start": 1963.3, "end": 1963.38, "word": " we", "probability": 0.96044921875}, {"start": 1963.38, "end": 1963.5, "word": " are", "probability": 0.90625}, {"start": 1963.5, "end": 1963.72, "word": " going", "probability": 0.947265625}, {"start": 1963.72, "end": 1963.86, "word": " to", "probability": 0.96826171875}, {"start": 1963.86, "end": 1964.08, "word": " talk", "probability": 0.89306640625}, {"start": 1964.08, "end": 1964.56, "word": " about", "probability": 0.900390625}, {"start": 1964.56, "end": 1966.96, "word": " confidence", "probability": 0.9453125}, {"start": 1966.96, "end": 1967.52, "word": " intervals", "probability": 0.857421875}, {"start": 1967.52, "end": 1968.74, "word": " for", "probability": 0.87939453125}, {"start": 1968.74, "end": 1970.06, "word": " population", "probability": 0.9599609375}, {"start": 1970.06, "end": 1970.38, "word": " mean", "probability": 0.96435546875}, {"start": 1970.38, "end": 1972.02, "word": " and", "probability": 0.888671875}, {"start": 1972.02, "end": 1972.44, "word": " population", "probability": 0.95751953125}, {"start": 1972.44, "end": 1973.02, "word": " proportion.", "probability": 0.8291015625}, {"start": 1973.62, "end": 1973.82, "word": " For", "probability": 0.9638671875}, {"start": 1973.82, "end": 1974.2, "word": " population", "probability": 0.9580078125}, {"start": 1974.2, "end": 1974.6, "word": " mean,", "probability": 0.970703125}, {"start": 1974.76, "end": 1974.94, "word": " we", "probability": 0.951171875}, {"start": 1974.94, "end": 1975.1, "word": " will", "probability": 0.8349609375}, {"start": 1975.1, "end": 1975.3, "word": " have", "probability": 0.9453125}, {"start": 1975.3, "end": 1975.48, "word": " two", "probability": 0.91943359375}, {"start": 1975.48, "end": 1976.0, "word": " cases,", "probability": 0.93701171875}, {"start": 1976.38, "end": 1976.6, "word": " when", "probability": 0.833984375}, {"start": 1976.6, "end": 1976.9, "word": " sigma", "probability": 0.8505859375}, {"start": 1976.9, "end": 1977.16, "word": " is", "probability": 0.9521484375}, {"start": 1977.16, "end": 1977.46, "word": " known", "probability": 0.72265625}, {"start": 1977.46, "end": 1978.26, "word": " and", "probability": 0.85791015625}, {"start": 1978.26, "end": 1978.6, "word": " sigma", "probability": 0.90283203125}, {"start": 1978.6, "end": 1979.08, "word": " is", "probability": 0.9453125}, {"start": 1979.08, "end": 1980.04, "word": " unknown.", "probability": 0.900390625}, {"start": 1981.48, "end": 1981.96, "word": " Now", "probability": 0.9462890625}, {"start": 1981.96, "end": 1982.24, "word": " let's", "probability": 0.798583984375}, {"start": 1982.24, "end": 1982.64, "word": " start", "probability": 0.8662109375}, {"start": 1982.64, "end": 1983.06, "word": " with", "probability": 0.896484375}, {"start": 1983.06, "end": 1984.18, "word": " the", "probability": 0.8984375}, {"start": 1984.18, "end": 1984.72, "word": " confidence", "probability": 0.9833984375}, {"start": 1984.72, "end": 1985.36, "word": " interval", "probability": 0.95556640625}, {"start": 1985.36, "end": 1985.74, "word": " for", "probability": 0.89501953125}, {"start": 1985.74, "end": 1986.06, "word": " immune", "probability": 0.73779296875}], "temperature": 1.0}, {"id": 73, "seek": 201011, "start": 1986.93, "end": 2010.11, "text": " when sigma is given. But again, if a mu is unknown, here we are talking about confidence interval for mu. Since we are talking about estimating a mu, that means a mu is unknown.", "tokens": [562, 12771, 307, 2212, 13, 583, 797, 11, 498, 257, 2992, 307, 9841, 11, 510, 321, 366, 1417, 466, 6687, 15035, 337, 2992, 13, 4162, 321, 366, 1417, 466, 8017, 990, 257, 2992, 11, 300, 1355, 257, 2992, 307, 9841, 13], "avg_logprob": -0.16927083191417513, "compression_ratio": 1.5213675213675213, "no_speech_prob": 0.0, "words": [{"start": 1986.93, "end": 1987.25, "word": " when", "probability": 0.353759765625}, {"start": 1987.25, "end": 1987.71, "word": " sigma", "probability": 0.5556640625}, {"start": 1987.71, "end": 1988.87, "word": " is", "probability": 0.9365234375}, {"start": 1988.87, "end": 1989.15, "word": " given.", "probability": 0.916015625}, {"start": 1990.13, "end": 1990.35, "word": " But", "probability": 0.89599609375}, {"start": 1990.35, "end": 1990.75, "word": " again,", "probability": 0.9130859375}, {"start": 1992.39, "end": 1992.59, "word": " if", "probability": 0.9462890625}, {"start": 1992.59, "end": 1992.75, "word": " a", "probability": 0.5}, {"start": 1992.75, "end": 1992.85, "word": " mu", "probability": 0.53515625}, {"start": 1992.85, "end": 1993.15, "word": " is", "probability": 0.94482421875}, {"start": 1993.15, "end": 1993.55, "word": " unknown,", "probability": 0.900390625}, {"start": 1994.83, "end": 1995.21, "word": " here", "probability": 0.81982421875}, {"start": 1995.21, "end": 1995.35, "word": " we", "probability": 0.92041015625}, {"start": 1995.35, "end": 1995.49, "word": " are", "probability": 0.919921875}, {"start": 1995.49, "end": 1995.85, "word": " talking", "probability": 0.8486328125}, {"start": 1995.85, "end": 1996.45, "word": " about", "probability": 0.9072265625}, {"start": 1996.45, "end": 1997.99, "word": " confidence", "probability": 0.95849609375}, {"start": 1997.99, "end": 1998.81, "word": " interval", "probability": 0.95849609375}, {"start": 1998.81, "end": 1999.15, "word": " for", "probability": 0.943359375}, {"start": 1999.15, "end": 1999.39, "word": " mu.", "probability": 0.76708984375}, {"start": 2002.65, "end": 2003.41, "word": " Since", "probability": 0.83447265625}, {"start": 2003.41, "end": 2003.55, "word": " we", "probability": 0.9365234375}, {"start": 2003.55, "end": 2003.69, "word": " are", "probability": 0.93408203125}, {"start": 2003.69, "end": 2004.07, "word": " talking", "probability": 0.8349609375}, {"start": 2004.07, "end": 2004.61, "word": " about", "probability": 0.89501953125}, {"start": 2004.61, "end": 2005.29, "word": " estimating", "probability": 0.96875}, {"start": 2005.29, "end": 2005.51, "word": " a", "probability": 0.87109375}, {"start": 2005.51, "end": 2005.71, "word": " mu,", "probability": 0.95751953125}, {"start": 2006.37, "end": 2006.77, "word": " that", "probability": 0.94189453125}, {"start": 2006.77, "end": 2007.15, "word": " means", "probability": 0.93603515625}, {"start": 2007.15, "end": 2007.39, "word": " a", "probability": 0.93408203125}, {"start": 2007.39, "end": 2007.71, "word": " mu", "probability": 0.9599609375}, {"start": 2007.71, "end": 2008.55, "word": " is", "probability": 0.9541015625}, {"start": 2008.55, "end": 2010.11, "word": " unknown.", "probability": 0.91162109375}], "temperature": 1.0}, {"id": 74, "seek": 203841, "start": 2013.19, "end": 2038.41, "text": " Now the first case here we are talking about sigma is unknown, is known. But we know that from chapter three that sigma square root sum x minus mu squared divided by capital N. So in order to compute sigma, we have to know", "tokens": [823, 264, 700, 1389, 510, 321, 366, 1417, 466, 12771, 307, 9841, 11, 307, 2570, 13, 583, 321, 458, 300, 490, 7187, 1045, 300, 12771, 3732, 5593, 2408, 2031, 3175, 2992, 8889, 6666, 538, 4238, 426, 13, 407, 294, 1668, 281, 14722, 12771, 11, 321, 362, 281, 458], "avg_logprob": -0.25717475462932976, "compression_ratio": 1.4671052631578947, "no_speech_prob": 0.0, "words": [{"start": 2013.19, "end": 2013.45, "word": " Now", "probability": 0.488037109375}, {"start": 2013.45, "end": 2013.65, "word": " the", "probability": 0.4775390625}, {"start": 2013.65, "end": 2013.99, "word": " first", "probability": 0.88427734375}, {"start": 2013.99, "end": 2014.35, "word": " case", "probability": 0.9111328125}, {"start": 2014.35, "end": 2014.59, "word": " here", "probability": 0.63525390625}, {"start": 2014.59, "end": 2014.75, "word": " we", "probability": 0.708984375}, {"start": 2014.75, "end": 2014.89, "word": " are", "probability": 0.828125}, {"start": 2014.89, "end": 2015.13, "word": " talking", "probability": 0.802734375}, {"start": 2015.13, "end": 2015.69, "word": " about", "probability": 0.908203125}, {"start": 2015.69, "end": 2016.27, "word": " sigma", "probability": 0.363037109375}, {"start": 2016.27, "end": 2016.49, "word": " is", "probability": 0.86865234375}, {"start": 2016.49, "end": 2016.77, "word": " unknown,", "probability": 0.88037109375}, {"start": 2016.87, "end": 2017.09, "word": " is", "probability": 0.90380859375}, {"start": 2017.09, "end": 2017.47, "word": " known.", "probability": 0.63818359375}, {"start": 2021.99, "end": 2022.67, "word": " But", "probability": 0.75732421875}, {"start": 2022.67, "end": 2022.89, "word": " we", "probability": 0.90869140625}, {"start": 2022.89, "end": 2023.01, "word": " know", "probability": 0.8857421875}, {"start": 2023.01, "end": 2023.23, "word": " that", "probability": 0.85693359375}, {"start": 2023.23, "end": 2023.43, "word": " from", "probability": 0.87060546875}, {"start": 2023.43, "end": 2023.77, "word": " chapter", "probability": 0.56689453125}, {"start": 2023.77, "end": 2024.19, "word": " three", "probability": 0.603515625}, {"start": 2024.19, "end": 2024.63, "word": " that", "probability": 0.76318359375}, {"start": 2024.63, "end": 2025.55, "word": " sigma", "probability": 0.80908203125}, {"start": 2025.55, "end": 2026.31, "word": " square", "probability": 0.39794921875}, {"start": 2026.31, "end": 2026.71, "word": " root", "probability": 0.95263671875}, {"start": 2026.71, "end": 2027.93, "word": " sum", "probability": 0.60595703125}, {"start": 2027.93, "end": 2028.47, "word": " x", "probability": 0.8115234375}, {"start": 2028.47, "end": 2028.79, "word": " minus", "probability": 0.95849609375}, {"start": 2028.79, "end": 2029.13, "word": " mu", "probability": 0.8740234375}, {"start": 2029.13, "end": 2029.57, "word": " squared", "probability": 0.6201171875}, {"start": 2029.57, "end": 2030.01, "word": " divided", "probability": 0.732421875}, {"start": 2030.01, "end": 2030.23, "word": " by", "probability": 0.9697265625}, {"start": 2030.23, "end": 2030.55, "word": " capital", "probability": 0.92041015625}, {"start": 2030.55, "end": 2030.79, "word": " N.", "probability": 0.890625}, {"start": 2032.89, "end": 2033.57, "word": " So", "probability": 0.9482421875}, {"start": 2033.57, "end": 2033.79, "word": " in", "probability": 0.7958984375}, {"start": 2033.79, "end": 2033.97, "word": " order", "probability": 0.92626953125}, {"start": 2033.97, "end": 2034.25, "word": " to", "probability": 0.97216796875}, {"start": 2034.25, "end": 2034.75, "word": " compute", "probability": 0.85888671875}, {"start": 2034.75, "end": 2035.21, "word": " sigma,", "probability": 0.919921875}, {"start": 2036.81, "end": 2037.59, "word": " we", "probability": 0.9326171875}, {"start": 2037.59, "end": 2037.97, "word": " have", "probability": 0.947265625}, {"start": 2037.97, "end": 2038.15, "word": " to", "probability": 0.9697265625}, {"start": 2038.15, "end": 2038.41, "word": " know", "probability": 0.89306640625}], "temperature": 1.0}, {"id": 75, "seek": 206741, "start": 2039.83, "end": 2067.41, "text": " Mu first, otherwise you cannot compute Sigma. So how can you say that Mu is unknown and Sigma is given or is known? So Mu should be unknown. Or Sigma is unknown. So Sigma is known has a big question mark. Because if Sigma is known, that means Mu should be known.", "tokens": [15601, 700, 11, 5911, 291, 2644, 14722, 36595, 13, 407, 577, 393, 291, 584, 300, 15601, 307, 9841, 293, 36595, 307, 2212, 420, 307, 2570, 30, 407, 15601, 820, 312, 9841, 13, 1610, 36595, 307, 9841, 13, 407, 36595, 307, 2570, 575, 257, 955, 1168, 1491, 13, 1436, 498, 36595, 307, 2570, 11, 300, 1355, 15601, 820, 312, 2570, 13], "avg_logprob": -0.21465163250438501, "compression_ratio": 1.6645569620253164, "no_speech_prob": 0.0, "words": [{"start": 2039.83, "end": 2040.15, "word": " Mu", "probability": 0.60302734375}, {"start": 2040.15, "end": 2040.55, "word": " first,", "probability": 0.83837890625}, {"start": 2041.11, "end": 2041.47, "word": " otherwise", "probability": 0.82275390625}, {"start": 2041.47, "end": 2041.73, "word": " you", "probability": 0.90234375}, {"start": 2041.73, "end": 2041.97, "word": " cannot", "probability": 0.7919921875}, {"start": 2041.97, "end": 2042.31, "word": " compute", "probability": 0.94091796875}, {"start": 2042.31, "end": 2042.71, "word": " Sigma.", "probability": 0.438720703125}, {"start": 2043.67, "end": 2044.09, "word": " So", "probability": 0.845703125}, {"start": 2044.09, "end": 2044.41, "word": " how", "probability": 0.71728515625}, {"start": 2044.41, "end": 2044.67, "word": " can", "probability": 0.9345703125}, {"start": 2044.67, "end": 2044.83, "word": " you", "probability": 0.9560546875}, {"start": 2044.83, "end": 2045.05, "word": " say", "probability": 0.94775390625}, {"start": 2045.05, "end": 2045.37, "word": " that", "probability": 0.89794921875}, {"start": 2045.37, "end": 2046.15, "word": " Mu", "probability": 0.56103515625}, {"start": 2046.15, "end": 2046.47, "word": " is", "probability": 0.94970703125}, {"start": 2046.47, "end": 2046.87, "word": " unknown", "probability": 0.88720703125}, {"start": 2046.87, "end": 2049.07, "word": " and", "probability": 0.7314453125}, {"start": 2049.07, "end": 2049.33, "word": " Sigma", "probability": 0.80322265625}, {"start": 2049.33, "end": 2049.55, "word": " is", "probability": 0.94921875}, {"start": 2049.55, "end": 2049.77, "word": " given", "probability": 0.8564453125}, {"start": 2049.77, "end": 2050.11, "word": " or", "probability": 0.56591796875}, {"start": 2050.11, "end": 2050.29, "word": " is", "probability": 0.84521484375}, {"start": 2050.29, "end": 2050.53, "word": " known?", "probability": 0.771484375}, {"start": 2052.87, "end": 2053.51, "word": " So", "probability": 0.62646484375}, {"start": 2053.51, "end": 2054.19, "word": " Mu", "probability": 0.85986328125}, {"start": 2054.19, "end": 2054.39, "word": " should", "probability": 0.95556640625}, {"start": 2054.39, "end": 2054.57, "word": " be", "probability": 0.96142578125}, {"start": 2054.57, "end": 2054.85, "word": " unknown.", "probability": 0.9384765625}, {"start": 2056.25, "end": 2056.89, "word": " Or", "probability": 0.93359375}, {"start": 2056.89, "end": 2057.21, "word": " Sigma", "probability": 0.8564453125}, {"start": 2057.21, "end": 2057.45, "word": " is", "probability": 0.95068359375}, {"start": 2057.45, "end": 2057.77, "word": " unknown.", "probability": 0.93310546875}, {"start": 2058.81, "end": 2058.97, "word": " So", "probability": 0.89306640625}, {"start": 2058.97, "end": 2060.15, "word": " Sigma", "probability": 0.521484375}, {"start": 2060.15, "end": 2060.47, "word": " is", "probability": 0.943359375}, {"start": 2060.47, "end": 2060.79, "word": " known", "probability": 0.75341796875}, {"start": 2060.79, "end": 2062.47, "word": " has", "probability": 0.708984375}, {"start": 2062.47, "end": 2062.61, "word": " a", "probability": 0.6025390625}, {"start": 2062.61, "end": 2062.77, "word": " big", "probability": 0.89013671875}, {"start": 2062.77, "end": 2063.25, "word": " question", "probability": 0.9072265625}, {"start": 2063.25, "end": 2063.55, "word": " mark.", "probability": 0.8896484375}, {"start": 2064.67, "end": 2065.03, "word": " Because", "probability": 0.880859375}, {"start": 2065.03, "end": 2065.17, "word": " if", "probability": 0.88232421875}, {"start": 2065.17, "end": 2065.37, "word": " Sigma", "probability": 0.89697265625}, {"start": 2065.37, "end": 2065.65, "word": " is", "probability": 0.95068359375}, {"start": 2065.65, "end": 2065.87, "word": " known,", "probability": 0.740234375}, {"start": 2066.11, "end": 2066.33, "word": " that", "probability": 0.8955078125}, {"start": 2066.33, "end": 2066.57, "word": " means", "probability": 0.93896484375}, {"start": 2066.57, "end": 2066.81, "word": " Mu", "probability": 0.68701171875}, {"start": 2066.81, "end": 2067.03, "word": " should", "probability": 0.96728515625}, {"start": 2067.03, "end": 2067.19, "word": " be", "probability": 0.95361328125}, {"start": 2067.19, "end": 2067.41, "word": " known.", "probability": 0.7646484375}], "temperature": 1.0}, {"id": 76, "seek": 209107, "start": 2069.45, "end": 2091.07, "text": " If µ is known, you don't need to select a random sample to estimate µ. Make sense? I know the value of µ. So what's the goal of selecting a random sample in order to estimate µ? You are saying sigma is given. That means µ is given or µ is known.", "tokens": [759, 1815, 113, 307, 2570, 11, 291, 500, 380, 643, 281, 3048, 257, 4974, 6889, 281, 12539, 1815, 113, 13, 4387, 2020, 30, 286, 458, 264, 2158, 295, 1815, 113, 13, 407, 437, 311, 264, 3387, 295, 18182, 257, 4974, 6889, 294, 1668, 281, 12539, 1815, 113, 30, 509, 366, 1566, 12771, 307, 2212, 13, 663, 1355, 1815, 113, 307, 2212, 420, 1815, 113, 307, 2570, 13], "avg_logprob": -0.17107077337363186, "compression_ratio": 1.5460122699386503, "no_speech_prob": 0.0, "words": [{"start": 2069.45, "end": 2069.79, "word": " If", "probability": 0.734375}, {"start": 2069.79, "end": 2070.05, "word": " µ", "probability": 0.548736572265625}, {"start": 2070.05, "end": 2070.31, "word": " is", "probability": 0.93603515625}, {"start": 2070.31, "end": 2070.63, "word": " known,", "probability": 0.7431640625}, {"start": 2070.89, "end": 2071.09, "word": " you", "probability": 0.93701171875}, {"start": 2071.09, "end": 2071.39, "word": " don't", "probability": 0.958740234375}, {"start": 2071.39, "end": 2071.65, "word": " need", "probability": 0.92236328125}, {"start": 2071.65, "end": 2071.87, "word": " to", "probability": 0.96875}, {"start": 2071.87, "end": 2072.21, "word": " select", "probability": 0.82275390625}, {"start": 2072.21, "end": 2072.43, "word": " a", "probability": 0.98876953125}, {"start": 2072.43, "end": 2072.65, "word": " random", "probability": 0.861328125}, {"start": 2072.65, "end": 2073.03, "word": " sample", "probability": 0.88623046875}, {"start": 2073.03, "end": 2073.31, "word": " to", "probability": 0.95458984375}, {"start": 2073.31, "end": 2073.63, "word": " estimate", "probability": 0.94921875}, {"start": 2073.63, "end": 2073.99, "word": " µ.", "probability": 0.950439453125}, {"start": 2074.75, "end": 2074.97, "word": " Make", "probability": 0.55517578125}, {"start": 2074.97, "end": 2075.29, "word": " sense?", "probability": 0.80712890625}, {"start": 2075.79, "end": 2076.39, "word": " I", "probability": 0.9501953125}, {"start": 2076.39, "end": 2076.53, "word": " know", "probability": 0.900390625}, {"start": 2076.53, "end": 2076.69, "word": " the", "probability": 0.9248046875}, {"start": 2076.69, "end": 2076.89, "word": " value", "probability": 0.982421875}, {"start": 2076.89, "end": 2077.01, "word": " of", "probability": 0.95703125}, {"start": 2077.01, "end": 2077.25, "word": " µ.", "probability": 0.980224609375}, {"start": 2078.23, "end": 2078.49, "word": " So", "probability": 0.931640625}, {"start": 2078.49, "end": 2079.05, "word": " what's", "probability": 0.7939453125}, {"start": 2079.05, "end": 2079.35, "word": " the", "probability": 0.927734375}, {"start": 2079.35, "end": 2079.73, "word": " goal", "probability": 0.96923828125}, {"start": 2079.73, "end": 2080.01, "word": " of", "probability": 0.9541015625}, {"start": 2080.01, "end": 2080.87, "word": " selecting", "probability": 0.89599609375}, {"start": 2080.87, "end": 2081.07, "word": " a", "probability": 0.990234375}, {"start": 2081.07, "end": 2081.29, "word": " random", "probability": 0.86767578125}, {"start": 2081.29, "end": 2081.75, "word": " sample", "probability": 0.88232421875}, {"start": 2081.75, "end": 2082.01, "word": " in", "probability": 0.9189453125}, {"start": 2082.01, "end": 2082.19, "word": " order", "probability": 0.923828125}, {"start": 2082.19, "end": 2082.59, "word": " to", "probability": 0.970703125}, {"start": 2082.59, "end": 2083.13, "word": " estimate", "probability": 0.94140625}, {"start": 2083.13, "end": 2083.47, "word": " µ?", "probability": 0.984375}, {"start": 2084.55, "end": 2085.15, "word": " You", "probability": 0.94775390625}, {"start": 2085.15, "end": 2085.29, "word": " are", "probability": 0.673828125}, {"start": 2085.29, "end": 2085.69, "word": " saying", "probability": 0.88623046875}, {"start": 2085.69, "end": 2086.55, "word": " sigma", "probability": 0.309326171875}, {"start": 2086.55, "end": 2086.77, "word": " is", "probability": 0.9521484375}, {"start": 2086.77, "end": 2087.07, "word": " given.", "probability": 0.89794921875}, {"start": 2088.33, "end": 2088.69, "word": " That", "probability": 0.9091796875}, {"start": 2088.69, "end": 2089.15, "word": " means", "probability": 0.9375}, {"start": 2089.15, "end": 2089.47, "word": " µ", "probability": 0.949951171875}, {"start": 2089.47, "end": 2089.67, "word": " is", "probability": 0.94970703125}, {"start": 2089.67, "end": 2089.93, "word": " given", "probability": 0.8740234375}, {"start": 2089.93, "end": 2090.43, "word": " or", "probability": 0.6376953125}, {"start": 2090.43, "end": 2090.63, "word": " µ", "probability": 0.9951171875}, {"start": 2090.63, "end": 2090.83, "word": " is", "probability": 0.94384765625}, {"start": 2090.83, "end": 2091.07, "word": " known.", "probability": 0.77001953125}], "temperature": 1.0}, {"id": 77, "seek": 212021, "start": 2094.05, "end": 2120.21, "text": " What's the benefit of selecting a random sample in order to estimate something is known? Always we estimate unknown parameter. But here, the reason why we are talking about sigma is given sometimes from previous studies. Sigma could be given, but we don't know the exact value of a mu. So we are interested", "tokens": [708, 311, 264, 5121, 295, 18182, 257, 4974, 6889, 294, 1668, 281, 12539, 746, 307, 2570, 30, 11270, 321, 12539, 9841, 13075, 13, 583, 510, 11, 264, 1778, 983, 321, 366, 1417, 466, 12771, 307, 2212, 2171, 490, 3894, 5313, 13, 36595, 727, 312, 2212, 11, 457, 321, 500, 380, 458, 264, 1900, 2158, 295, 257, 2992, 13, 407, 321, 366, 3102], "avg_logprob": -0.19952876652990068, "compression_ratio": 1.527363184079602, "no_speech_prob": 0.0, "words": [{"start": 2094.05, "end": 2094.43, "word": " What's", "probability": 0.72900390625}, {"start": 2094.43, "end": 2094.59, "word": " the", "probability": 0.9228515625}, {"start": 2094.59, "end": 2094.91, "word": " benefit", "probability": 0.802734375}, {"start": 2094.91, "end": 2095.85, "word": " of", "probability": 0.9423828125}, {"start": 2095.85, "end": 2096.29, "word": " selecting", "probability": 0.8818359375}, {"start": 2096.29, "end": 2096.49, "word": " a", "probability": 0.97802734375}, {"start": 2096.49, "end": 2096.71, "word": " random", "probability": 0.87060546875}, {"start": 2096.71, "end": 2097.15, "word": " sample", "probability": 0.8681640625}, {"start": 2097.15, "end": 2097.81, "word": " in", "probability": 0.8486328125}, {"start": 2097.81, "end": 2098.05, "word": " order", "probability": 0.92919921875}, {"start": 2098.05, "end": 2098.31, "word": " to", "probability": 0.96728515625}, {"start": 2098.31, "end": 2098.87, "word": " estimate", "probability": 0.921875}, {"start": 2098.87, "end": 2099.73, "word": " something", "probability": 0.76708984375}, {"start": 2099.73, "end": 2099.99, "word": " is", "probability": 0.8583984375}, {"start": 2099.99, "end": 2100.27, "word": " known?", "probability": 0.73974609375}, {"start": 2100.73, "end": 2101.37, "word": " Always", "probability": 0.783203125}, {"start": 2101.37, "end": 2101.61, "word": " we", "probability": 0.775390625}, {"start": 2101.61, "end": 2102.17, "word": " estimate", "probability": 0.91357421875}, {"start": 2102.17, "end": 2102.79, "word": " unknown", "probability": 0.87744140625}, {"start": 2102.79, "end": 2103.53, "word": " parameter.", "probability": 0.52783203125}, {"start": 2104.95, "end": 2105.21, "word": " But", "probability": 0.9248046875}, {"start": 2105.21, "end": 2105.57, "word": " here,", "probability": 0.837890625}, {"start": 2106.21, "end": 2106.53, "word": " the", "probability": 0.9169921875}, {"start": 2106.53, "end": 2106.81, "word": " reason", "probability": 0.970703125}, {"start": 2106.81, "end": 2107.09, "word": " why", "probability": 0.9111328125}, {"start": 2107.09, "end": 2107.25, "word": " we", "probability": 0.9560546875}, {"start": 2107.25, "end": 2107.43, "word": " are", "probability": 0.8896484375}, {"start": 2107.43, "end": 2107.85, "word": " talking", "probability": 0.84130859375}, {"start": 2107.85, "end": 2108.17, "word": " about", "probability": 0.9052734375}, {"start": 2108.17, "end": 2108.43, "word": " sigma", "probability": 0.5419921875}, {"start": 2108.43, "end": 2108.63, "word": " is", "probability": 0.912109375}, {"start": 2108.63, "end": 2108.91, "word": " given", "probability": 0.88916015625}, {"start": 2108.91, "end": 2109.99, "word": " sometimes", "probability": 0.58154296875}, {"start": 2109.99, "end": 2111.21, "word": " from", "probability": 0.724609375}, {"start": 2111.21, "end": 2111.75, "word": " previous", "probability": 0.884765625}, {"start": 2111.75, "end": 2112.25, "word": " studies.", "probability": 0.958984375}, {"start": 2113.27, "end": 2113.63, "word": " Sigma", "probability": 0.9150390625}, {"start": 2113.63, "end": 2114.09, "word": " could", "probability": 0.88671875}, {"start": 2114.09, "end": 2114.25, "word": " be", "probability": 0.9541015625}, {"start": 2114.25, "end": 2114.51, "word": " given,", "probability": 0.8857421875}, {"start": 2115.37, "end": 2115.65, "word": " but", "probability": 0.92138671875}, {"start": 2115.65, "end": 2115.85, "word": " we", "probability": 0.94921875}, {"start": 2115.85, "end": 2116.13, "word": " don't", "probability": 0.976318359375}, {"start": 2116.13, "end": 2116.35, "word": " know", "probability": 0.89306640625}, {"start": 2116.35, "end": 2116.57, "word": " the", "probability": 0.91845703125}, {"start": 2116.57, "end": 2117.05, "word": " exact", "probability": 0.9375}, {"start": 2117.05, "end": 2117.55, "word": " value", "probability": 0.9716796875}, {"start": 2117.55, "end": 2118.39, "word": " of", "probability": 0.9697265625}, {"start": 2118.39, "end": 2118.53, "word": " a", "probability": 0.4892578125}, {"start": 2118.53, "end": 2118.69, "word": " mu.", "probability": 0.67626953125}, {"start": 2118.97, "end": 2119.33, "word": " So", "probability": 0.9580078125}, {"start": 2119.33, "end": 2119.55, "word": " we", "probability": 0.69140625}, {"start": 2119.55, "end": 2119.71, "word": " are", "probability": 0.92578125}, {"start": 2119.71, "end": 2120.21, "word": " interested", "probability": 0.84423828125}], "temperature": 1.0}, {"id": 78, "seek": 214832, "start": 2120.6, "end": 2148.32, "text": " to estimate the population mean mu. So it might be mu is unknown, but from previous studies or from the history we have, maybe we know the value of sigma. But in real life, most likely sigma is unknown. That will be discussed next time, inshallah. So here we'll discuss just the confidence interval for mu when sigma is not known.", "tokens": [281, 12539, 264, 4415, 914, 2992, 13, 407, 309, 1062, 312, 2992, 307, 9841, 11, 457, 490, 3894, 5313, 420, 490, 264, 2503, 321, 362, 11, 1310, 321, 458, 264, 2158, 295, 12771, 13, 583, 294, 957, 993, 11, 881, 3700, 12771, 307, 9841, 13, 663, 486, 312, 7152, 958, 565, 11, 1028, 71, 13492, 13, 407, 510, 321, 603, 2248, 445, 264, 6687, 15035, 337, 2992, 562, 12771, 307, 406, 2570, 13], "avg_logprob": -0.19288428973507238, "compression_ratio": 1.5837320574162679, "no_speech_prob": 0.0, "words": [{"start": 2120.6, "end": 2120.86, "word": " to", "probability": 0.5712890625}, {"start": 2120.86, "end": 2121.4, "word": " estimate", "probability": 0.92919921875}, {"start": 2121.4, "end": 2122.2, "word": " the", "probability": 0.845703125}, {"start": 2122.2, "end": 2123.02, "word": " population", "probability": 0.947265625}, {"start": 2123.02, "end": 2123.44, "word": " mean", "probability": 0.91015625}, {"start": 2123.44, "end": 2124.1, "word": " mu.", "probability": 0.1710205078125}, {"start": 2124.44, "end": 2124.66, "word": " So", "probability": 0.88330078125}, {"start": 2124.66, "end": 2124.8, "word": " it", "probability": 0.810546875}, {"start": 2124.8, "end": 2125.02, "word": " might", "probability": 0.89892578125}, {"start": 2125.02, "end": 2125.18, "word": " be", "probability": 0.90185546875}, {"start": 2125.18, "end": 2125.42, "word": " mu", "probability": 0.75341796875}, {"start": 2125.42, "end": 2125.58, "word": " is", "probability": 0.900390625}, {"start": 2125.58, "end": 2125.94, "word": " unknown,", "probability": 0.89404296875}, {"start": 2126.42, "end": 2126.62, "word": " but", "probability": 0.9248046875}, {"start": 2126.62, "end": 2127.12, "word": " from", "probability": 0.87646484375}, {"start": 2127.12, "end": 2127.58, "word": " previous", "probability": 0.84619140625}, {"start": 2127.58, "end": 2127.9, "word": " studies", "probability": 0.95703125}, {"start": 2127.9, "end": 2128.2, "word": " or", "probability": 0.71142578125}, {"start": 2128.2, "end": 2128.44, "word": " from", "probability": 0.873046875}, {"start": 2128.44, "end": 2128.68, "word": " the", "probability": 0.91455078125}, {"start": 2128.68, "end": 2129.02, "word": " history", "probability": 0.86474609375}, {"start": 2129.02, "end": 2129.22, "word": " we", "probability": 0.93994140625}, {"start": 2129.22, "end": 2129.52, "word": " have,", "probability": 0.94482421875}, {"start": 2130.7, "end": 2131.04, "word": " maybe", "probability": 0.9443359375}, {"start": 2131.04, "end": 2131.28, "word": " we", "probability": 0.9541015625}, {"start": 2131.28, "end": 2131.5, "word": " know", "probability": 0.890625}, {"start": 2131.5, "end": 2131.68, "word": " the", "probability": 0.91748046875}, {"start": 2131.68, "end": 2132.0, "word": " value", "probability": 0.97412109375}, {"start": 2132.0, "end": 2132.28, "word": " of", "probability": 0.9677734375}, {"start": 2132.28, "end": 2132.56, "word": " sigma.", "probability": 0.802734375}, {"start": 2132.86, "end": 2133.16, "word": " But", "probability": 0.95068359375}, {"start": 2133.16, "end": 2133.48, "word": " in", "probability": 0.9384765625}, {"start": 2133.48, "end": 2133.88, "word": " real", "probability": 0.9599609375}, {"start": 2133.88, "end": 2134.32, "word": " life,", "probability": 0.92138671875}, {"start": 2135.2, "end": 2135.56, "word": " most", "probability": 0.9140625}, {"start": 2135.56, "end": 2135.98, "word": " likely", "probability": 0.91943359375}, {"start": 2135.98, "end": 2136.64, "word": " sigma", "probability": 0.71240234375}, {"start": 2136.64, "end": 2137.48, "word": " is", "probability": 0.9521484375}, {"start": 2137.48, "end": 2138.1, "word": " unknown.", "probability": 0.8994140625}, {"start": 2139.8, "end": 2140.44, "word": " That", "probability": 0.88525390625}, {"start": 2140.44, "end": 2140.58, "word": " will", "probability": 0.88720703125}, {"start": 2140.58, "end": 2140.76, "word": " be", "probability": 0.9560546875}, {"start": 2140.76, "end": 2141.28, "word": " discussed", "probability": 0.86328125}, {"start": 2141.28, "end": 2141.72, "word": " next", "probability": 0.9375}, {"start": 2141.72, "end": 2141.96, "word": " time,", "probability": 0.8916015625}, {"start": 2142.04, "end": 2142.28, "word": " inshallah.", "probability": 0.6846516927083334}, {"start": 2143.02, "end": 2143.4, "word": " So", "probability": 0.94677734375}, {"start": 2143.4, "end": 2143.76, "word": " here", "probability": 0.79541015625}, {"start": 2143.76, "end": 2144.04, "word": " we'll", "probability": 0.6611328125}, {"start": 2144.04, "end": 2144.38, "word": " discuss", "probability": 0.8564453125}, {"start": 2144.38, "end": 2144.74, "word": " just", "probability": 0.8955078125}, {"start": 2144.74, "end": 2144.94, "word": " the", "probability": 0.66259765625}, {"start": 2144.94, "end": 2145.22, "word": " confidence", "probability": 0.95703125}, {"start": 2145.22, "end": 2145.66, "word": " interval", "probability": 0.96240234375}, {"start": 2145.66, "end": 2146.02, "word": " for", "probability": 0.93994140625}, {"start": 2146.02, "end": 2146.26, "word": " mu", "probability": 0.93896484375}, {"start": 2146.26, "end": 2146.64, "word": " when", "probability": 0.7626953125}, {"start": 2146.64, "end": 2147.0, "word": " sigma", "probability": 0.94140625}, {"start": 2147.0, "end": 2147.48, "word": " is", "probability": 0.9501953125}, {"start": 2147.48, "end": 2147.9, "word": " not", "probability": 0.373291015625}, {"start": 2147.9, "end": 2148.32, "word": " known.", "probability": 0.72265625}], "temperature": 1.0}, {"id": 79, "seek": 217506, "start": 2149.0, "end": 2175.06, "text": " But again, if Sigma is known, a Mu should be known. Otherwise, we cannot compute Sigma. But here, when we are talking about Sigma is known, it means we know from previous studies that Sigma is given. Any question? Okay, so the assumptions for the first case are mainly there are two assumptions.", "tokens": [583, 797, 11, 498, 36595, 307, 2570, 11, 257, 15601, 820, 312, 2570, 13, 10328, 11, 321, 2644, 14722, 36595, 13, 583, 510, 11, 562, 321, 366, 1417, 466, 36595, 307, 2570, 11, 309, 1355, 321, 458, 490, 3894, 5313, 300, 36595, 307, 2212, 13, 2639, 1168, 30, 1033, 11, 370, 264, 17695, 337, 264, 700, 1389, 366, 8704, 456, 366, 732, 17695, 13], "avg_logprob": -0.2074519230769231, "compression_ratio": 1.5913978494623655, "no_speech_prob": 0.0, "words": [{"start": 2149.0, "end": 2149.28, "word": " But", "probability": 0.74658203125}, {"start": 2149.28, "end": 2149.58, "word": " again,", "probability": 0.89306640625}, {"start": 2150.04, "end": 2150.16, "word": " if", "probability": 0.9560546875}, {"start": 2150.16, "end": 2150.48, "word": " Sigma", "probability": 0.513671875}, {"start": 2150.48, "end": 2150.72, "word": " is", "probability": 0.9443359375}, {"start": 2150.72, "end": 2151.0, "word": " known,", "probability": 0.73681640625}, {"start": 2151.82, "end": 2152.0, "word": " a", "probability": 0.619140625}, {"start": 2152.0, "end": 2152.12, "word": " Mu", "probability": 0.7685546875}, {"start": 2152.12, "end": 2152.38, "word": " should", "probability": 0.96044921875}, {"start": 2152.38, "end": 2152.56, "word": " be", "probability": 0.95068359375}, {"start": 2152.56, "end": 2152.8, "word": " known.", "probability": 0.7236328125}, {"start": 2153.0, "end": 2153.38, "word": " Otherwise,", "probability": 0.91845703125}, {"start": 2153.64, "end": 2153.74, "word": " we", "probability": 0.95068359375}, {"start": 2153.74, "end": 2154.06, "word": " cannot", "probability": 0.84228515625}, {"start": 2154.06, "end": 2154.38, "word": " compute", "probability": 0.97265625}, {"start": 2154.38, "end": 2154.8, "word": " Sigma.", "probability": 0.8876953125}, {"start": 2155.64, "end": 2156.14, "word": " But", "probability": 0.9287109375}, {"start": 2156.14, "end": 2156.38, "word": " here,", "probability": 0.80810546875}, {"start": 2156.46, "end": 2156.56, "word": " when", "probability": 0.93359375}, {"start": 2156.56, "end": 2156.7, "word": " we", "probability": 0.958984375}, {"start": 2156.7, "end": 2156.84, "word": " are", "probability": 0.85888671875}, {"start": 2156.84, "end": 2157.14, "word": " talking", "probability": 0.84130859375}, {"start": 2157.14, "end": 2157.46, "word": " about", "probability": 0.8935546875}, {"start": 2157.46, "end": 2157.76, "word": " Sigma", "probability": 0.74267578125}, {"start": 2157.76, "end": 2158.02, "word": " is", "probability": 0.8310546875}, {"start": 2158.02, "end": 2158.24, "word": " known,", "probability": 0.7509765625}, {"start": 2158.34, "end": 2158.46, "word": " it", "probability": 0.92529296875}, {"start": 2158.46, "end": 2158.78, "word": " means", "probability": 0.9287109375}, {"start": 2158.78, "end": 2159.12, "word": " we", "probability": 0.80126953125}, {"start": 2159.12, "end": 2159.38, "word": " know", "probability": 0.88818359375}, {"start": 2159.38, "end": 2159.66, "word": " from", "probability": 0.86865234375}, {"start": 2159.66, "end": 2160.14, "word": " previous", "probability": 0.78662109375}, {"start": 2160.14, "end": 2160.5, "word": " studies", "probability": 0.9755859375}, {"start": 2160.5, "end": 2160.82, "word": " that", "probability": 0.8662109375}, {"start": 2160.82, "end": 2161.28, "word": " Sigma", "probability": 0.91357421875}, {"start": 2161.28, "end": 2161.52, "word": " is", "probability": 0.95263671875}, {"start": 2161.52, "end": 2161.76, "word": " given.", "probability": 0.91015625}, {"start": 2163.08, "end": 2163.3, "word": " Any", "probability": 0.9228515625}, {"start": 2163.3, "end": 2163.62, "word": " question?", "probability": 0.53466796875}, {"start": 2167.2, "end": 2167.44, "word": " Okay,", "probability": 0.52978515625}, {"start": 2167.78, "end": 2168.24, "word": " so", "probability": 0.93115234375}, {"start": 2168.24, "end": 2168.44, "word": " the", "probability": 0.86669921875}, {"start": 2168.44, "end": 2168.94, "word": " assumptions", "probability": 0.95068359375}, {"start": 2168.94, "end": 2171.04, "word": " for", "probability": 0.80712890625}, {"start": 2171.04, "end": 2171.22, "word": " the", "probability": 0.91455078125}, {"start": 2171.22, "end": 2171.54, "word": " first", "probability": 0.86181640625}, {"start": 2171.54, "end": 2171.98, "word": " case", "probability": 0.9150390625}, {"start": 2171.98, "end": 2172.42, "word": " are", "probability": 0.71630859375}, {"start": 2172.42, "end": 2173.94, "word": " mainly", "probability": 0.416259765625}, {"start": 2173.94, "end": 2174.32, "word": " there", "probability": 0.54150390625}, {"start": 2174.32, "end": 2174.46, "word": " are", "probability": 0.94384765625}, {"start": 2174.46, "end": 2174.62, "word": " two", "probability": 0.9130859375}, {"start": 2174.62, "end": 2175.06, "word": " assumptions.", "probability": 0.96533203125}], "temperature": 1.0}, {"id": 80, "seek": 219780, "start": 2176.86, "end": 2197.8, "text": " Number one, population standard deviation sigma is known, so we know the value of sigma. The second assumption, the population is normally distributed, but if the population is not normal, you can use large sample. In this case, you can apply the central limit theorem.", "tokens": [5118, 472, 11, 4415, 3832, 25163, 12771, 307, 2570, 11, 370, 321, 458, 264, 2158, 295, 12771, 13, 440, 1150, 15302, 11, 264, 4415, 307, 5646, 12631, 11, 457, 498, 264, 4415, 307, 406, 2710, 11, 291, 393, 764, 2416, 6889, 13, 682, 341, 1389, 11, 291, 393, 3079, 264, 5777, 4948, 20904, 13], "avg_logprob": -0.18039772673086688, "compression_ratio": 1.6167664670658684, "no_speech_prob": 0.0, "words": [{"start": 2176.86, "end": 2177.16, "word": " Number", "probability": 0.5009765625}, {"start": 2177.16, "end": 2177.46, "word": " one,", "probability": 0.6396484375}, {"start": 2177.84, "end": 2178.28, "word": " population", "probability": 0.744140625}, {"start": 2178.28, "end": 2178.74, "word": " standard", "probability": 0.77392578125}, {"start": 2178.74, "end": 2179.08, "word": " deviation", "probability": 0.9296875}, {"start": 2179.08, "end": 2179.42, "word": " sigma", "probability": 0.58642578125}, {"start": 2179.42, "end": 2179.64, "word": " is", "probability": 0.9287109375}, {"start": 2179.64, "end": 2179.9, "word": " known,", "probability": 0.76904296875}, {"start": 2181.88, "end": 2182.14, "word": " so", "probability": 0.9248046875}, {"start": 2182.14, "end": 2182.26, "word": " we", "probability": 0.9013671875}, {"start": 2182.26, "end": 2182.36, "word": " know", "probability": 0.89013671875}, {"start": 2182.36, "end": 2182.54, "word": " the", "probability": 0.908203125}, {"start": 2182.54, "end": 2182.78, "word": " value", "probability": 0.97216796875}, {"start": 2182.78, "end": 2182.98, "word": " of", "probability": 0.96240234375}, {"start": 2182.98, "end": 2183.3, "word": " sigma.", "probability": 0.912109375}, {"start": 2184.62, "end": 2184.96, "word": " The", "probability": 0.8369140625}, {"start": 2184.96, "end": 2185.22, "word": " second", "probability": 0.90771484375}, {"start": 2185.22, "end": 2185.72, "word": " assumption,", "probability": 0.9775390625}, {"start": 2186.26, "end": 2186.44, "word": " the", "probability": 0.8701171875}, {"start": 2186.44, "end": 2186.94, "word": " population", "probability": 0.9443359375}, {"start": 2186.94, "end": 2188.16, "word": " is", "probability": 0.94287109375}, {"start": 2188.16, "end": 2188.64, "word": " normally", "probability": 0.79541015625}, {"start": 2188.64, "end": 2189.36, "word": " distributed,", "probability": 0.92724609375}, {"start": 2190.98, "end": 2191.36, "word": " but", "probability": 0.91943359375}, {"start": 2191.36, "end": 2191.7, "word": " if", "probability": 0.9443359375}, {"start": 2191.7, "end": 2191.88, "word": " the", "probability": 0.9140625}, {"start": 2191.88, "end": 2192.26, "word": " population", "probability": 0.951171875}, {"start": 2192.26, "end": 2192.52, "word": " is", "probability": 0.94677734375}, {"start": 2192.52, "end": 2192.72, "word": " not", "probability": 0.94287109375}, {"start": 2192.72, "end": 2193.1, "word": " normal,", "probability": 0.77734375}, {"start": 2193.22, "end": 2193.34, "word": " you", "probability": 0.9443359375}, {"start": 2193.34, "end": 2193.54, "word": " can", "probability": 0.94140625}, {"start": 2193.54, "end": 2193.82, "word": " use", "probability": 0.880859375}, {"start": 2193.82, "end": 2194.2, "word": " large", "probability": 0.86669921875}, {"start": 2194.2, "end": 2194.64, "word": " sample.", "probability": 0.67822265625}, {"start": 2195.22, "end": 2195.38, "word": " In", "probability": 0.95458984375}, {"start": 2195.38, "end": 2195.62, "word": " this", "probability": 0.94580078125}, {"start": 2195.62, "end": 2195.88, "word": " case,", "probability": 0.9150390625}, {"start": 2195.98, "end": 2196.08, "word": " you", "probability": 0.9541015625}, {"start": 2196.08, "end": 2196.26, "word": " can", "probability": 0.728515625}, {"start": 2196.26, "end": 2196.62, "word": " apply", "probability": 0.7919921875}, {"start": 2196.62, "end": 2196.86, "word": " the", "probability": 0.8759765625}, {"start": 2196.86, "end": 2197.12, "word": " central", "probability": 0.82177734375}, {"start": 2197.12, "end": 2197.46, "word": " limit", "probability": 0.81103515625}, {"start": 2197.46, "end": 2197.8, "word": " theorem.", "probability": 0.56201171875}], "temperature": 1.0}, {"id": 81, "seek": 222542, "start": 2198.41, "end": 2225.43, "text": " Then we can say that the sampling distribution of X bar is approximately normally distributed with mean mu and standard deviation sigma over root N. So there are two assumptions here. One, sigma is known because here we are talking about sigma is known. The other one, the population is normally distributed or N is large if the population is not normal. In this case,", "tokens": [1396, 321, 393, 584, 300, 264, 21179, 7316, 295, 1783, 2159, 307, 10447, 5646, 12631, 365, 914, 2992, 293, 3832, 25163, 12771, 670, 5593, 426, 13, 407, 456, 366, 732, 17695, 510, 13, 1485, 11, 12771, 307, 2570, 570, 510, 321, 366, 1417, 466, 12771, 307, 2570, 13, 440, 661, 472, 11, 264, 4415, 307, 5646, 12631, 420, 426, 307, 2416, 498, 264, 4415, 307, 406, 2710, 13, 682, 341, 1389, 11], "avg_logprob": -0.20751284083274946, "compression_ratio": 1.740566037735849, "no_speech_prob": 0.0, "words": [{"start": 2198.41, "end": 2198.69, "word": " Then", "probability": 0.6025390625}, {"start": 2198.69, "end": 2198.85, "word": " we", "probability": 0.740234375}, {"start": 2198.85, "end": 2199.05, "word": " can", "probability": 0.9326171875}, {"start": 2199.05, "end": 2199.23, "word": " say", "probability": 0.6796875}, {"start": 2199.23, "end": 2199.51, "word": " that", "probability": 0.89501953125}, {"start": 2199.51, "end": 2199.67, "word": " the", "probability": 0.400390625}, {"start": 2199.67, "end": 2199.95, "word": " sampling", "probability": 0.86474609375}, {"start": 2199.95, "end": 2200.61, "word": " distribution", "probability": 0.85009765625}, {"start": 2200.61, "end": 2200.89, "word": " of", "probability": 0.951171875}, {"start": 2200.89, "end": 2201.09, "word": " X", "probability": 0.72509765625}, {"start": 2201.09, "end": 2201.45, "word": " bar", "probability": 0.7001953125}, {"start": 2201.45, "end": 2201.99, "word": " is", "probability": 0.923828125}, {"start": 2201.99, "end": 2203.15, "word": " approximately", "probability": 0.794921875}, {"start": 2203.15, "end": 2203.65, "word": " normally", "probability": 0.8583984375}, {"start": 2203.65, "end": 2204.31, "word": " distributed", "probability": 0.90283203125}, {"start": 2204.31, "end": 2205.11, "word": " with", "probability": 0.72900390625}, {"start": 2205.11, "end": 2205.35, "word": " mean", "probability": 0.49462890625}, {"start": 2205.35, "end": 2205.69, "word": " mu", "probability": 0.529296875}, {"start": 2205.69, "end": 2206.13, "word": " and", "probability": 0.8935546875}, {"start": 2206.13, "end": 2206.47, "word": " standard", "probability": 0.91650390625}, {"start": 2206.47, "end": 2206.73, "word": " deviation", "probability": 0.900390625}, {"start": 2206.73, "end": 2207.07, "word": " sigma", "probability": 0.84228515625}, {"start": 2207.07, "end": 2207.29, "word": " over", "probability": 0.880859375}, {"start": 2207.29, "end": 2207.55, "word": " root", "probability": 0.93701171875}, {"start": 2207.55, "end": 2207.79, "word": " N.", "probability": 0.54296875}, {"start": 2208.51, "end": 2208.91, "word": " So", "probability": 0.9345703125}, {"start": 2208.91, "end": 2209.07, "word": " there", "probability": 0.74072265625}, {"start": 2209.07, "end": 2209.19, "word": " are", "probability": 0.9326171875}, {"start": 2209.19, "end": 2209.35, "word": " two", "probability": 0.9169921875}, {"start": 2209.35, "end": 2209.89, "word": " assumptions", "probability": 0.95849609375}, {"start": 2209.89, "end": 2210.21, "word": " here.", "probability": 0.802734375}, {"start": 2211.15, "end": 2211.47, "word": " One,", "probability": 0.90478515625}, {"start": 2211.93, "end": 2212.25, "word": " sigma", "probability": 0.9091796875}, {"start": 2212.25, "end": 2212.47, "word": " is", "probability": 0.9501953125}, {"start": 2212.47, "end": 2212.71, "word": " known", "probability": 0.6767578125}, {"start": 2212.71, "end": 2214.09, "word": " because", "probability": 0.428466796875}, {"start": 2214.09, "end": 2214.29, "word": " here", "probability": 0.8251953125}, {"start": 2214.29, "end": 2214.41, "word": " we", "probability": 0.93310546875}, {"start": 2214.41, "end": 2214.53, "word": " are", "probability": 0.9189453125}, {"start": 2214.53, "end": 2214.87, "word": " talking", "probability": 0.85400390625}, {"start": 2214.87, "end": 2215.29, "word": " about", "probability": 0.90869140625}, {"start": 2215.29, "end": 2215.99, "word": " sigma", "probability": 0.8935546875}, {"start": 2215.99, "end": 2216.39, "word": " is", "probability": 0.87744140625}, {"start": 2216.39, "end": 2216.61, "word": " known.", "probability": 0.76708984375}, {"start": 2217.39, "end": 2217.61, "word": " The", "probability": 0.8896484375}, {"start": 2217.61, "end": 2217.83, "word": " other", "probability": 0.89208984375}, {"start": 2217.83, "end": 2218.11, "word": " one,", "probability": 0.92236328125}, {"start": 2218.29, "end": 2218.37, "word": " the", "probability": 0.9033203125}, {"start": 2218.37, "end": 2218.73, "word": " population", "probability": 0.96923828125}, {"start": 2218.73, "end": 2218.99, "word": " is", "probability": 0.94140625}, {"start": 2218.99, "end": 2219.35, "word": " normally", "probability": 0.9033203125}, {"start": 2219.35, "end": 2219.99, "word": " distributed", "probability": 0.9072265625}, {"start": 2219.99, "end": 2220.63, "word": " or", "probability": 0.8251953125}, {"start": 2220.63, "end": 2221.21, "word": " N", "probability": 0.47119140625}, {"start": 2221.21, "end": 2221.35, "word": " is", "probability": 0.95068359375}, {"start": 2221.35, "end": 2221.67, "word": " large", "probability": 0.56640625}, {"start": 2221.67, "end": 2222.13, "word": " if", "probability": 0.861328125}, {"start": 2222.13, "end": 2222.65, "word": " the", "probability": 0.87109375}, {"start": 2222.65, "end": 2222.99, "word": " population", "probability": 0.95263671875}, {"start": 2222.99, "end": 2223.39, "word": " is", "probability": 0.94677734375}, {"start": 2223.39, "end": 2223.63, "word": " not", "probability": 0.92724609375}, {"start": 2223.63, "end": 2223.97, "word": " normal.", "probability": 0.466552734375}, {"start": 2224.49, "end": 2224.79, "word": " In", "probability": 0.95751953125}, {"start": 2224.79, "end": 2225.03, "word": " this", "probability": 0.9462890625}, {"start": 2225.03, "end": 2225.43, "word": " case,", "probability": 0.91259765625}], "temperature": 1.0}, {"id": 82, "seek": 225382, "start": 2226.54, "end": 2253.82, "text": " The confidence interval estimate is given by this equation, x bar. As we mentioned before, any confidence interval can be built by using three components. Point estimate x bar, critical value z, forget about alpha over two for a minute, multiplied by the standard error of the estimate. And we know that, we know that the standard error of x bar is sigma over root n.", "tokens": [440, 6687, 15035, 12539, 307, 2212, 538, 341, 5367, 11, 2031, 2159, 13, 1018, 321, 2835, 949, 11, 604, 6687, 15035, 393, 312, 3094, 538, 1228, 1045, 6677, 13, 12387, 12539, 2031, 2159, 11, 4924, 2158, 710, 11, 2870, 466, 8961, 670, 732, 337, 257, 3456, 11, 17207, 538, 264, 3832, 6713, 295, 264, 12539, 13, 400, 321, 458, 300, 11, 321, 458, 300, 264, 3832, 6713, 295, 2031, 2159, 307, 12771, 670, 5593, 297, 13], "avg_logprob": -0.188514611937783, "compression_ratio": 1.727699530516432, "no_speech_prob": 0.0, "words": [{"start": 2226.54, "end": 2226.8, "word": " The", "probability": 0.55712890625}, {"start": 2226.8, "end": 2227.32, "word": " confidence", "probability": 0.93896484375}, {"start": 2227.32, "end": 2227.8, "word": " interval", "probability": 0.98193359375}, {"start": 2227.8, "end": 2228.48, "word": " estimate", "probability": 0.87158203125}, {"start": 2228.48, "end": 2229.12, "word": " is", "probability": 0.89990234375}, {"start": 2229.12, "end": 2229.38, "word": " given", "probability": 0.876953125}, {"start": 2229.38, "end": 2229.64, "word": " by", "probability": 0.9599609375}, {"start": 2229.64, "end": 2229.86, "word": " this", "probability": 0.9033203125}, {"start": 2229.86, "end": 2230.38, "word": " equation,", "probability": 0.97607421875}, {"start": 2231.02, "end": 2231.18, "word": " x", "probability": 0.6591796875}, {"start": 2231.18, "end": 2231.5, "word": " bar.", "probability": 0.77734375}, {"start": 2232.98, "end": 2233.38, "word": " As", "probability": 0.9375}, {"start": 2233.38, "end": 2233.5, "word": " we", "probability": 0.88818359375}, {"start": 2233.5, "end": 2233.74, "word": " mentioned", "probability": 0.8017578125}, {"start": 2233.74, "end": 2234.1, "word": " before,", "probability": 0.85498046875}, {"start": 2234.28, "end": 2234.48, "word": " any", "probability": 0.90478515625}, {"start": 2234.48, "end": 2234.96, "word": " confidence", "probability": 0.98388671875}, {"start": 2234.96, "end": 2235.32, "word": " interval", "probability": 0.9638671875}, {"start": 2235.32, "end": 2235.66, "word": " can", "probability": 0.95068359375}, {"start": 2235.66, "end": 2235.94, "word": " be", "probability": 0.95703125}, {"start": 2235.94, "end": 2236.3, "word": " built", "probability": 0.8466796875}, {"start": 2236.3, "end": 2236.82, "word": " by", "probability": 0.9169921875}, {"start": 2236.82, "end": 2237.16, "word": " using", "probability": 0.94091796875}, {"start": 2237.16, "end": 2237.48, "word": " three", "probability": 0.76416015625}, {"start": 2237.48, "end": 2238.14, "word": " components.", "probability": 0.91748046875}, {"start": 2238.92, "end": 2239.36, "word": " Point", "probability": 0.81982421875}, {"start": 2239.36, "end": 2239.88, "word": " estimate", "probability": 0.88818359375}, {"start": 2239.88, "end": 2240.26, "word": " x", "probability": 0.72412109375}, {"start": 2240.26, "end": 2240.6, "word": " bar,", "probability": 0.92333984375}, {"start": 2241.54, "end": 2242.14, "word": " critical", "probability": 0.89599609375}, {"start": 2242.14, "end": 2242.64, "word": " value", "probability": 0.94091796875}, {"start": 2242.64, "end": 2242.9, "word": " z,", "probability": 0.78076171875}, {"start": 2243.1, "end": 2243.4, "word": " forget", "probability": 0.8427734375}, {"start": 2243.4, "end": 2243.72, "word": " about", "probability": 0.896484375}, {"start": 2243.72, "end": 2243.96, "word": " alpha", "probability": 0.84716796875}, {"start": 2243.96, "end": 2244.24, "word": " over", "probability": 0.91552734375}, {"start": 2244.24, "end": 2244.54, "word": " two", "probability": 0.53173828125}, {"start": 2244.54, "end": 2244.78, "word": " for", "probability": 0.91259765625}, {"start": 2244.78, "end": 2244.9, "word": " a", "probability": 0.9912109375}, {"start": 2244.9, "end": 2245.14, "word": " minute,", "probability": 0.93212890625}, {"start": 2246.18, "end": 2246.56, "word": " multiplied", "probability": 0.68896484375}, {"start": 2246.56, "end": 2246.96, "word": " by", "probability": 0.96728515625}, {"start": 2246.96, "end": 2247.2, "word": " the", "probability": 0.90380859375}, {"start": 2247.2, "end": 2247.56, "word": " standard", "probability": 0.93408203125}, {"start": 2247.56, "end": 2247.88, "word": " error", "probability": 0.87646484375}, {"start": 2247.88, "end": 2248.48, "word": " of", "probability": 0.96533203125}, {"start": 2248.48, "end": 2248.68, "word": " the", "probability": 0.91796875}, {"start": 2248.68, "end": 2249.2, "word": " estimate.", "probability": 0.90625}, {"start": 2249.42, "end": 2249.56, "word": " And", "probability": 0.9228515625}, {"start": 2249.56, "end": 2249.72, "word": " we", "probability": 0.94677734375}, {"start": 2249.72, "end": 2249.96, "word": " know", "probability": 0.8876953125}, {"start": 2249.96, "end": 2250.18, "word": " that,", "probability": 0.88232421875}, {"start": 2250.28, "end": 2250.48, "word": " we", "probability": 0.859375}, {"start": 2250.48, "end": 2250.64, "word": " know", "probability": 0.88623046875}, {"start": 2250.64, "end": 2250.94, "word": " that", "probability": 0.93310546875}, {"start": 2250.94, "end": 2251.52, "word": " the", "probability": 0.73828125}, {"start": 2251.52, "end": 2251.92, "word": " standard", "probability": 0.943359375}, {"start": 2251.92, "end": 2252.14, "word": " error", "probability": 0.8330078125}, {"start": 2252.14, "end": 2252.28, "word": " of", "probability": 0.93359375}, {"start": 2252.28, "end": 2252.46, "word": " x", "probability": 0.9755859375}, {"start": 2252.46, "end": 2252.66, "word": " bar", "probability": 0.93310546875}, {"start": 2252.66, "end": 2252.84, "word": " is", "probability": 0.93798828125}, {"start": 2252.84, "end": 2253.06, "word": " sigma", "probability": 0.93310546875}, {"start": 2253.06, "end": 2253.34, "word": " over", "probability": 0.919921875}, {"start": 2253.34, "end": 2253.58, "word": " root", "probability": 0.94287109375}, {"start": 2253.58, "end": 2253.82, "word": " n.", "probability": 0.826171875}], "temperature": 1.0}, {"id": 83, "seek": 228216, "start": 2256.08, "end": 2282.16, "text": " Z alpha over 2 comes from the fact that we have for example here 95% this is 1 minus alpha. 1 minus alpha is 95%. So 5% remaining to both sides upper tail will have for example 2.5% and lower tail the same percent 2.5%.", "tokens": [1176, 8961, 670, 568, 1487, 490, 264, 1186, 300, 321, 362, 337, 1365, 510, 13420, 4, 341, 307, 502, 3175, 8961, 13, 502, 3175, 8961, 307, 13420, 6856, 407, 1025, 4, 8877, 281, 1293, 4881, 6597, 6838, 486, 362, 337, 1365, 568, 13, 20, 4, 293, 3126, 6838, 264, 912, 3043, 568, 13, 20, 6856], "avg_logprob": -0.23688615432807378, "compression_ratio": 1.4965986394557824, "no_speech_prob": 0.0, "words": [{"start": 2256.08, "end": 2256.46, "word": " Z", "probability": 0.2496337890625}, {"start": 2256.46, "end": 2256.72, "word": " alpha", "probability": 0.5810546875}, {"start": 2256.72, "end": 2257.0, "word": " over", "probability": 0.830078125}, {"start": 2257.0, "end": 2257.18, "word": " 2", "probability": 0.53564453125}, {"start": 2257.18, "end": 2257.58, "word": " comes", "probability": 0.77587890625}, {"start": 2257.58, "end": 2258.08, "word": " from", "probability": 0.8984375}, {"start": 2258.08, "end": 2259.0, "word": " the", "probability": 0.8818359375}, {"start": 2259.0, "end": 2259.3, "word": " fact", "probability": 0.90869140625}, {"start": 2259.3, "end": 2259.78, "word": " that", "probability": 0.9296875}, {"start": 2259.78, "end": 2260.88, "word": " we", "probability": 0.75537109375}, {"start": 2260.88, "end": 2261.32, "word": " have", "probability": 0.95263671875}, {"start": 2261.32, "end": 2261.86, "word": " for", "probability": 0.404541015625}, {"start": 2261.86, "end": 2262.24, "word": " example", "probability": 0.96630859375}, {"start": 2262.24, "end": 2262.52, "word": " here", "probability": 0.7578125}, {"start": 2262.52, "end": 2262.92, "word": " 95", "probability": 0.92041015625}, {"start": 2262.92, "end": 2263.46, "word": "%", "probability": 0.416015625}, {"start": 2263.46, "end": 2264.34, "word": " this", "probability": 0.5693359375}, {"start": 2264.34, "end": 2264.44, "word": " is", "probability": 0.89404296875}, {"start": 2264.44, "end": 2264.56, "word": " 1", "probability": 0.79248046875}, {"start": 2264.56, "end": 2264.82, "word": " minus", "probability": 0.77294921875}, {"start": 2264.82, "end": 2265.14, "word": " alpha.", "probability": 0.869140625}, {"start": 2267.34, "end": 2267.64, "word": " 1", "probability": 0.75048828125}, {"start": 2267.64, "end": 2267.92, "word": " minus", "probability": 0.9609375}, {"start": 2267.92, "end": 2268.12, "word": " alpha", "probability": 0.904296875}, {"start": 2268.12, "end": 2268.3, "word": " is", "probability": 0.94287109375}, {"start": 2268.3, "end": 2269.38, "word": " 95%.", "probability": 0.811767578125}, {"start": 2269.38, "end": 2270.54, "word": " So", "probability": 0.88720703125}, {"start": 2270.54, "end": 2270.84, "word": " 5", "probability": 0.76806640625}, {"start": 2270.84, "end": 2271.1, "word": "%", "probability": 0.96875}, {"start": 2271.1, "end": 2271.96, "word": " remaining", "probability": 0.84814453125}, {"start": 2271.96, "end": 2273.18, "word": " to", "probability": 0.91259765625}, {"start": 2273.18, "end": 2273.5, "word": " both", "probability": 0.896484375}, {"start": 2273.5, "end": 2273.82, "word": " sides", "probability": 0.87744140625}, {"start": 2273.82, "end": 2274.12, "word": " upper", "probability": 0.626953125}, {"start": 2274.12, "end": 2274.52, "word": " tail", "probability": 0.892578125}, {"start": 2274.52, "end": 2275.74, "word": " will", "probability": 0.8388671875}, {"start": 2275.74, "end": 2276.0, "word": " have", "probability": 0.94970703125}, {"start": 2276.0, "end": 2276.18, "word": " for", "probability": 0.8994140625}, {"start": 2276.18, "end": 2276.44, "word": " example", "probability": 0.97705078125}, {"start": 2276.44, "end": 2276.7, "word": " 2", "probability": 0.9521484375}, {"start": 2276.7, "end": 2277.34, "word": ".5", "probability": 0.99560546875}, {"start": 2277.34, "end": 2277.36, "word": "%", "probability": 0.325439453125}, {"start": 2277.36, "end": 2278.5, "word": " and", "probability": 0.91650390625}, {"start": 2278.5, "end": 2278.82, "word": " lower", "probability": 0.71728515625}, {"start": 2278.82, "end": 2279.22, "word": " tail", "probability": 0.89111328125}, {"start": 2279.22, "end": 2279.74, "word": " the", "probability": 0.724609375}, {"start": 2279.74, "end": 2279.98, "word": " same", "probability": 0.91357421875}, {"start": 2279.98, "end": 2280.26, "word": " percent", "probability": 0.72509765625}, {"start": 2280.26, "end": 2280.46, "word": " 2", "probability": 0.8193359375}, {"start": 2280.46, "end": 2282.16, "word": ".5%.", "probability": 0.9078776041666666}], "temperature": 1.0}, {"id": 84, "seek": 230856, "start": 2284.64, "end": 2308.56, "text": " Now since 1 minus alpha equals 95%, that means alpha is 5% for both sides. So this one alpha over 2, the other one is alpha over 2. So this is your z. So z alpha over 2. The other side plus z alpha over 2. So x bar is the point estimate.", "tokens": [823, 1670, 502, 3175, 8961, 6915, 13420, 8923, 300, 1355, 8961, 307, 1025, 4, 337, 1293, 4881, 13, 407, 341, 472, 8961, 670, 568, 11, 264, 661, 472, 307, 8961, 670, 568, 13, 407, 341, 307, 428, 710, 13, 407, 710, 8961, 670, 568, 13, 440, 661, 1252, 1804, 710, 8961, 670, 568, 13, 407, 2031, 2159, 307, 264, 935, 12539, 13], "avg_logprob": -0.19370039540623862, "compression_ratio": 1.63013698630137, "no_speech_prob": 0.0, "words": [{"start": 2284.64, "end": 2284.96, "word": " Now", "probability": 0.84423828125}, {"start": 2284.96, "end": 2285.36, "word": " since", "probability": 0.5791015625}, {"start": 2285.36, "end": 2285.64, "word": " 1", "probability": 0.52587890625}, {"start": 2285.64, "end": 2285.92, "word": " minus", "probability": 0.64111328125}, {"start": 2285.92, "end": 2286.18, "word": " alpha", "probability": 0.7294921875}, {"start": 2286.18, "end": 2286.44, "word": " equals", "probability": 0.68408203125}, {"start": 2286.44, "end": 2287.34, "word": " 95%,", "probability": 0.72900390625}, {"start": 2287.34, "end": 2287.66, "word": " that", "probability": 0.8828125}, {"start": 2287.66, "end": 2287.92, "word": " means", "probability": 0.916015625}, {"start": 2287.92, "end": 2288.24, "word": " alpha", "probability": 0.8701171875}, {"start": 2288.24, "end": 2288.48, "word": " is", "probability": 0.90869140625}, {"start": 2288.48, "end": 2288.8, "word": " 5", "probability": 0.89501953125}, {"start": 2288.8, "end": 2289.2, "word": "%", "probability": 0.392578125}, {"start": 2289.2, "end": 2290.68, "word": " for", "probability": 0.861328125}, {"start": 2290.68, "end": 2290.98, "word": " both", "probability": 0.88916015625}, {"start": 2290.98, "end": 2291.46, "word": " sides.", "probability": 0.8916015625}, {"start": 2291.92, "end": 2292.1, "word": " So", "probability": 0.92333984375}, {"start": 2292.1, "end": 2292.3, "word": " this", "probability": 0.8740234375}, {"start": 2292.3, "end": 2292.52, "word": " one", "probability": 0.88330078125}, {"start": 2292.52, "end": 2292.88, "word": " alpha", "probability": 0.52392578125}, {"start": 2292.88, "end": 2293.16, "word": " over", "probability": 0.91162109375}, {"start": 2293.16, "end": 2293.42, "word": " 2,", "probability": 0.83740234375}, {"start": 2294.06, "end": 2294.42, "word": " the", "probability": 0.810546875}, {"start": 2294.42, "end": 2294.64, "word": " other", "probability": 0.8935546875}, {"start": 2294.64, "end": 2294.84, "word": " one", "probability": 0.91357421875}, {"start": 2294.84, "end": 2294.98, "word": " is", "probability": 0.9306640625}, {"start": 2294.98, "end": 2295.18, "word": " alpha", "probability": 0.89306640625}, {"start": 2295.18, "end": 2295.4, "word": " over", "probability": 0.9267578125}, {"start": 2295.4, "end": 2295.52, "word": " 2.", "probability": 0.98046875}, {"start": 2295.56, "end": 2295.72, "word": " So", "probability": 0.94091796875}, {"start": 2295.72, "end": 2295.92, "word": " this", "probability": 0.91748046875}, {"start": 2295.92, "end": 2296.04, "word": " is", "probability": 0.9423828125}, {"start": 2296.04, "end": 2296.22, "word": " your", "probability": 0.88720703125}, {"start": 2296.22, "end": 2296.42, "word": " z.", "probability": 0.66162109375}, {"start": 2297.14, "end": 2297.52, "word": " So", "probability": 0.91552734375}, {"start": 2297.52, "end": 2297.68, "word": " z", "probability": 0.93115234375}, {"start": 2297.68, "end": 2298.06, "word": " alpha", "probability": 0.84716796875}, {"start": 2298.06, "end": 2298.34, "word": " over", "probability": 0.9228515625}, {"start": 2298.34, "end": 2298.64, "word": " 2.", "probability": 0.8837890625}, {"start": 2301.94, "end": 2302.18, "word": " The", "probability": 0.78955078125}, {"start": 2302.18, "end": 2302.38, "word": " other", "probability": 0.89599609375}, {"start": 2302.38, "end": 2302.86, "word": " side", "probability": 0.8818359375}, {"start": 2302.86, "end": 2304.38, "word": " plus", "probability": 0.7880859375}, {"start": 2304.38, "end": 2305.56, "word": " z", "probability": 0.6533203125}, {"start": 2305.56, "end": 2305.72, "word": " alpha", "probability": 0.84814453125}, {"start": 2305.72, "end": 2305.88, "word": " over", "probability": 0.890625}, {"start": 2305.88, "end": 2305.9, "word": " 2.", "probability": 0.98095703125}, {"start": 2306.4, "end": 2306.68, "word": " So", "probability": 0.9541015625}, {"start": 2306.68, "end": 2306.96, "word": " x", "probability": 0.84619140625}, {"start": 2306.96, "end": 2307.24, "word": " bar", "probability": 0.72802734375}, {"start": 2307.24, "end": 2307.62, "word": " is", "probability": 0.95263671875}, {"start": 2307.62, "end": 2307.82, "word": " the", "probability": 0.91357421875}, {"start": 2307.82, "end": 2308.08, "word": " point", "probability": 0.97119140625}, {"start": 2308.08, "end": 2308.56, "word": " estimate.", "probability": 0.9267578125}], "temperature": 1.0}, {"id": 85, "seek": 233840, "start": 2310.42, "end": 2338.4, "text": " Z alpha over 2 is the normal distribution in critical value for probability of alpha over 2 in each tail. And sigma over root n is a standard error. So again, this is the first formula we have in this chapter to construct the confidence interval with 1 minus alpha confidence level. So 1 minus alpha person confidence interval.", "tokens": [1176, 8961, 670, 568, 307, 264, 2710, 7316, 294, 4924, 2158, 337, 8482, 295, 8961, 670, 568, 294, 1184, 6838, 13, 400, 12771, 670, 5593, 297, 307, 257, 3832, 6713, 13, 407, 797, 11, 341, 307, 264, 700, 8513, 321, 362, 294, 341, 7187, 281, 7690, 264, 6687, 15035, 365, 502, 3175, 8961, 6687, 1496, 13, 407, 502, 3175, 8961, 954, 6687, 15035, 13], "avg_logprob": -0.19831730769230768, "compression_ratio": 1.6994818652849741, "no_speech_prob": 0.0, "words": [{"start": 2310.42, "end": 2310.78, "word": " Z", "probability": 0.53564453125}, {"start": 2310.78, "end": 2311.1, "word": " alpha", "probability": 0.85009765625}, {"start": 2311.1, "end": 2311.32, "word": " over", "probability": 0.87890625}, {"start": 2311.32, "end": 2311.66, "word": " 2", "probability": 0.71826171875}, {"start": 2311.66, "end": 2312.42, "word": " is", "probability": 0.85205078125}, {"start": 2312.42, "end": 2312.56, "word": " the", "probability": 0.90966796875}, {"start": 2312.56, "end": 2312.86, "word": " normal", "probability": 0.8603515625}, {"start": 2312.86, "end": 2313.48, "word": " distribution", "probability": 0.85107421875}, {"start": 2313.48, "end": 2313.68, "word": " in", "probability": 0.187255859375}, {"start": 2313.68, "end": 2313.96, "word": " critical", "probability": 0.89794921875}, {"start": 2313.96, "end": 2314.36, "word": " value", "probability": 0.9716796875}, {"start": 2314.36, "end": 2314.62, "word": " for", "probability": 0.85205078125}, {"start": 2314.62, "end": 2315.22, "word": " probability", "probability": 0.5322265625}, {"start": 2315.22, "end": 2315.46, "word": " of", "probability": 0.92333984375}, {"start": 2315.46, "end": 2315.76, "word": " alpha", "probability": 0.9306640625}, {"start": 2315.76, "end": 2316.02, "word": " over", "probability": 0.876953125}, {"start": 2316.02, "end": 2316.2, "word": " 2", "probability": 0.857421875}, {"start": 2316.2, "end": 2316.32, "word": " in", "probability": 0.82958984375}, {"start": 2316.32, "end": 2316.54, "word": " each", "probability": 0.94580078125}, {"start": 2316.54, "end": 2316.84, "word": " tail.", "probability": 0.39306640625}, {"start": 2318.98, "end": 2319.36, "word": " And", "probability": 0.94384765625}, {"start": 2319.36, "end": 2319.68, "word": " sigma", "probability": 0.8681640625}, {"start": 2319.68, "end": 2320.0, "word": " over", "probability": 0.900390625}, {"start": 2320.0, "end": 2320.26, "word": " root", "probability": 0.59716796875}, {"start": 2320.26, "end": 2320.52, "word": " n", "probability": 0.68359375}, {"start": 2320.52, "end": 2321.06, "word": " is", "probability": 0.9375}, {"start": 2321.06, "end": 2321.24, "word": " a", "probability": 0.712890625}, {"start": 2321.24, "end": 2321.58, "word": " standard", "probability": 0.77587890625}, {"start": 2321.58, "end": 2322.02, "word": " error.", "probability": 0.87841796875}, {"start": 2322.42, "end": 2322.66, "word": " So", "probability": 0.95068359375}, {"start": 2322.66, "end": 2322.96, "word": " again,", "probability": 0.87060546875}, {"start": 2323.12, "end": 2323.3, "word": " this", "probability": 0.943359375}, {"start": 2323.3, "end": 2323.42, "word": " is", "probability": 0.94677734375}, {"start": 2323.42, "end": 2323.54, "word": " the", "probability": 0.916015625}, {"start": 2323.54, "end": 2323.86, "word": " first", "probability": 0.85400390625}, {"start": 2323.86, "end": 2324.24, "word": " formula", "probability": 0.8916015625}, {"start": 2324.24, "end": 2324.44, "word": " we", "probability": 0.93017578125}, {"start": 2324.44, "end": 2324.7, "word": " have", "probability": 0.9443359375}, {"start": 2324.7, "end": 2324.9, "word": " in", "probability": 0.8935546875}, {"start": 2324.9, "end": 2325.08, "word": " this", "probability": 0.89111328125}, {"start": 2325.08, "end": 2325.48, "word": " chapter", "probability": 0.8671875}, {"start": 2325.48, "end": 2326.6, "word": " to", "probability": 0.8017578125}, {"start": 2326.6, "end": 2328.02, "word": " construct", "probability": 0.970703125}, {"start": 2328.02, "end": 2328.74, "word": " the", "probability": 0.91650390625}, {"start": 2328.74, "end": 2329.46, "word": " confidence", "probability": 0.98291015625}, {"start": 2329.46, "end": 2331.02, "word": " interval", "probability": 0.921875}, {"start": 2331.02, "end": 2331.36, "word": " with", "probability": 0.869140625}, {"start": 2331.36, "end": 2331.6, "word": " 1", "probability": 0.76708984375}, {"start": 2331.6, "end": 2331.86, "word": " minus", "probability": 0.91015625}, {"start": 2331.86, "end": 2332.2, "word": " alpha", "probability": 0.93701171875}, {"start": 2332.2, "end": 2332.62, "word": " confidence", "probability": 0.98486328125}, {"start": 2332.62, "end": 2332.98, "word": " level.", "probability": 0.93701171875}, {"start": 2333.64, "end": 2333.84, "word": " So", "probability": 0.9619140625}, {"start": 2333.84, "end": 2334.1, "word": " 1", "probability": 0.79296875}, {"start": 2334.1, "end": 2334.4, "word": " minus", "probability": 0.96533203125}, {"start": 2334.4, "end": 2334.82, "word": " alpha", "probability": 0.93701171875}, {"start": 2334.82, "end": 2336.46, "word": " person", "probability": 0.3427734375}, {"start": 2336.46, "end": 2337.22, "word": " confidence", "probability": 0.90283203125}, {"start": 2337.22, "end": 2338.4, "word": " interval.", "probability": 0.95751953125}], "temperature": 1.0}, {"id": 86, "seek": 236817, "start": 2339.41, "end": 2368.17, "text": " For mu is x bar plus or minus z alpha over 2 times plus or minus sigma x bar plus or minus z alpha over 2 times sigma over square root of n. So this is a formula can be used in order to construct confidence interval for the population mean mu. The lower limit is given by x bar minus this amount.", "tokens": [1171, 2992, 307, 2031, 2159, 1804, 420, 3175, 710, 8961, 670, 568, 1413, 1804, 420, 3175, 12771, 2031, 2159, 1804, 420, 3175, 710, 8961, 670, 568, 1413, 12771, 670, 3732, 5593, 295, 297, 13, 407, 341, 307, 257, 8513, 393, 312, 1143, 294, 1668, 281, 7690, 6687, 15035, 337, 264, 4415, 914, 2992, 13, 440, 3126, 4948, 307, 2212, 538, 2031, 2159, 3175, 341, 2372, 13], "avg_logprob": -0.20009328358208955, "compression_ratio": 1.716763005780347, "no_speech_prob": 0.0, "words": [{"start": 2339.41, "end": 2339.69, "word": " For", "probability": 0.55517578125}, {"start": 2339.69, "end": 2339.99, "word": " mu", "probability": 0.2008056640625}, {"start": 2339.99, "end": 2340.47, "word": " is", "probability": 0.497802734375}, {"start": 2340.47, "end": 2342.51, "word": " x", "probability": 0.74462890625}, {"start": 2342.51, "end": 2342.71, "word": " bar", "probability": 0.76318359375}, {"start": 2342.71, "end": 2342.95, "word": " plus", "probability": 0.96142578125}, {"start": 2342.95, "end": 2343.19, "word": " or", "probability": 0.94287109375}, {"start": 2343.19, "end": 2343.41, "word": " minus", "probability": 0.9912109375}, {"start": 2343.41, "end": 2343.75, "word": " z", "probability": 0.93505859375}, {"start": 2343.75, "end": 2345.23, "word": " alpha", "probability": 0.7109375}, {"start": 2345.23, "end": 2345.59, "word": " over", "probability": 0.92333984375}, {"start": 2345.59, "end": 2345.97, "word": " 2", "probability": 0.484619140625}, {"start": 2345.97, "end": 2346.83, "word": " times", "probability": 0.7490234375}, {"start": 2346.83, "end": 2347.73, "word": " plus", "probability": 0.91064453125}, {"start": 2347.73, "end": 2348.01, "word": " or", "probability": 0.95703125}, {"start": 2348.01, "end": 2348.25, "word": " minus", "probability": 0.9921875}, {"start": 2348.25, "end": 2348.69, "word": " sigma", "probability": 0.9306640625}, {"start": 2348.69, "end": 2350.03, "word": " x", "probability": 0.5263671875}, {"start": 2350.03, "end": 2350.21, "word": " bar", "probability": 0.95263671875}, {"start": 2350.21, "end": 2350.45, "word": " plus", "probability": 0.9560546875}, {"start": 2350.45, "end": 2350.65, "word": " or", "probability": 0.9140625}, {"start": 2350.65, "end": 2350.81, "word": " minus", "probability": 0.9912109375}, {"start": 2350.81, "end": 2351.05, "word": " z", "probability": 0.9697265625}, {"start": 2351.05, "end": 2351.25, "word": " alpha", "probability": 0.8896484375}, {"start": 2351.25, "end": 2351.51, "word": " over", "probability": 0.9169921875}, {"start": 2351.51, "end": 2351.69, "word": " 2", "probability": 0.9453125}, {"start": 2351.69, "end": 2351.95, "word": " times", "probability": 0.916015625}, {"start": 2351.95, "end": 2352.41, "word": " sigma", "probability": 0.939453125}, {"start": 2352.41, "end": 2352.95, "word": " over", "probability": 0.89208984375}, {"start": 2352.95, "end": 2353.43, "word": " square", "probability": 0.76708984375}, {"start": 2353.43, "end": 2353.65, "word": " root", "probability": 0.912109375}, {"start": 2353.65, "end": 2353.75, "word": " of", "probability": 0.49951171875}, {"start": 2353.75, "end": 2353.87, "word": " n.", "probability": 0.59033203125}, {"start": 2354.23, "end": 2354.45, "word": " So", "probability": 0.95068359375}, {"start": 2354.45, "end": 2354.65, "word": " this", "probability": 0.775390625}, {"start": 2354.65, "end": 2354.79, "word": " is", "probability": 0.873046875}, {"start": 2354.79, "end": 2354.89, "word": " a", "probability": 0.9091796875}, {"start": 2354.89, "end": 2355.17, "word": " formula", "probability": 0.92431640625}, {"start": 2355.17, "end": 2355.43, "word": " can", "probability": 0.4619140625}, {"start": 2355.43, "end": 2355.59, "word": " be", "probability": 0.94775390625}, {"start": 2355.59, "end": 2355.87, "word": " used", "probability": 0.9140625}, {"start": 2355.87, "end": 2356.07, "word": " in", "probability": 0.94140625}, {"start": 2356.07, "end": 2356.35, "word": " order", "probability": 0.9169921875}, {"start": 2356.35, "end": 2357.99, "word": " to", "probability": 0.9736328125}, {"start": 2357.99, "end": 2359.25, "word": " construct", "probability": 0.94873046875}, {"start": 2359.25, "end": 2359.75, "word": " confidence", "probability": 0.96875}, {"start": 2359.75, "end": 2360.21, "word": " interval", "probability": 0.94921875}, {"start": 2360.21, "end": 2360.65, "word": " for", "probability": 0.9501953125}, {"start": 2360.65, "end": 2360.87, "word": " the", "probability": 0.83935546875}, {"start": 2360.87, "end": 2361.25, "word": " population", "probability": 0.389404296875}, {"start": 2361.25, "end": 2361.69, "word": " mean", "probability": 0.88037109375}, {"start": 2361.69, "end": 2362.23, "word": " mu.", "probability": 0.384033203125}, {"start": 2364.45, "end": 2365.01, "word": " The", "probability": 0.88427734375}, {"start": 2365.01, "end": 2365.39, "word": " lower", "probability": 0.87109375}, {"start": 2365.39, "end": 2365.87, "word": " limit", "probability": 0.96875}, {"start": 2365.87, "end": 2366.17, "word": " is", "probability": 0.951171875}, {"start": 2366.17, "end": 2366.39, "word": " given", "probability": 0.896484375}, {"start": 2366.39, "end": 2366.63, "word": " by", "probability": 0.97216796875}, {"start": 2366.63, "end": 2366.83, "word": " x", "probability": 0.994140625}, {"start": 2366.83, "end": 2367.03, "word": " bar", "probability": 0.9462890625}, {"start": 2367.03, "end": 2367.39, "word": " minus", "probability": 0.9853515625}, {"start": 2367.39, "end": 2367.71, "word": " this", "probability": 0.951171875}, {"start": 2367.71, "end": 2368.17, "word": " amount.", "probability": 0.90625}], "temperature": 1.0}, {"id": 87, "seek": 239732, "start": 2369.7, "end": 2397.32, "text": " So the lower limit, X bar minus, and the upper limit is X bar plus. Upper limit. Now the point estimate is X bar. For the lower limit, we subtract this amount.", "tokens": [407, 264, 3126, 4948, 11, 1783, 2159, 3175, 11, 293, 264, 6597, 4948, 307, 1783, 2159, 1804, 13, 36926, 4948, 13, 823, 264, 935, 12539, 307, 1783, 2159, 13, 1171, 264, 3126, 4948, 11, 321, 16390, 341, 2372, 13], "avg_logprob": -0.21992188096046447, "compression_ratio": 1.4678899082568808, "no_speech_prob": 0.0, "words": [{"start": 2369.7, "end": 2369.96, "word": " So", "probability": 0.6044921875}, {"start": 2369.96, "end": 2370.14, "word": " the", "probability": 0.69580078125}, {"start": 2370.14, "end": 2370.3, "word": " lower", "probability": 0.83642578125}, {"start": 2370.3, "end": 2370.76, "word": " limit,", "probability": 0.96337890625}, {"start": 2374.74, "end": 2375.96, "word": " X", "probability": 0.475830078125}, {"start": 2375.96, "end": 2376.14, "word": " bar", "probability": 0.55419921875}, {"start": 2376.14, "end": 2376.48, "word": " minus,", "probability": 0.9658203125}, {"start": 2379.56, "end": 2380.1, "word": " and", "probability": 0.8359375}, {"start": 2380.1, "end": 2380.24, "word": " the", "probability": 0.904296875}, {"start": 2380.24, "end": 2380.46, "word": " upper", "probability": 0.837890625}, {"start": 2380.46, "end": 2380.72, "word": " limit", "probability": 0.96044921875}, {"start": 2380.72, "end": 2380.96, "word": " is", "probability": 0.87255859375}, {"start": 2380.96, "end": 2381.16, "word": " X", "probability": 0.93896484375}, {"start": 2381.16, "end": 2381.38, "word": " bar", "probability": 0.9404296875}, {"start": 2381.38, "end": 2381.84, "word": " plus.", "probability": 0.9541015625}, {"start": 2384.72, "end": 2385.14, "word": " Upper", "probability": 0.5849609375}, {"start": 2385.14, "end": 2387.88, "word": " limit.", "probability": 0.80322265625}, {"start": 2390.88, "end": 2391.08, "word": " Now", "probability": 0.93408203125}, {"start": 2391.08, "end": 2391.28, "word": " the", "probability": 0.75390625}, {"start": 2391.28, "end": 2391.5, "word": " point", "probability": 0.94677734375}, {"start": 2391.5, "end": 2391.82, "word": " estimate", "probability": 0.93994140625}, {"start": 2391.82, "end": 2392.08, "word": " is", "probability": 0.93896484375}, {"start": 2392.08, "end": 2392.28, "word": " X", "probability": 0.9638671875}, {"start": 2392.28, "end": 2392.54, "word": " bar.", "probability": 0.91357421875}, {"start": 2393.96, "end": 2394.46, "word": " For", "probability": 0.94970703125}, {"start": 2394.46, "end": 2394.7, "word": " the", "probability": 0.9130859375}, {"start": 2394.7, "end": 2394.92, "word": " lower", "probability": 0.86181640625}, {"start": 2394.92, "end": 2395.28, "word": " limit,", "probability": 0.96484375}, {"start": 2395.64, "end": 2395.94, "word": " we", "probability": 0.953125}, {"start": 2395.94, "end": 2396.42, "word": " subtract", "probability": 0.8564453125}, {"start": 2396.42, "end": 2396.88, "word": " this", "probability": 0.9443359375}, {"start": 2396.88, "end": 2397.32, "word": " amount.", "probability": 0.9150390625}], "temperature": 1.0}, {"id": 88, "seek": 242521, "start": 2398.63, "end": 2425.21, "text": " from x bar for the upper limit we add the same amount so subtracting specific amount and adding the same amount this amount later will call it margin of error this will be", "tokens": [490, 2031, 2159, 337, 264, 6597, 4948, 321, 909, 264, 912, 2372, 370, 16390, 278, 2685, 2372, 293, 5127, 264, 912, 2372, 341, 2372, 1780, 486, 818, 309, 10270, 295, 6713, 341, 486, 312], "avg_logprob": -0.2658482108797346, "compression_ratio": 1.5779816513761469, "no_speech_prob": 0.0, "words": [{"start": 2398.63, "end": 2398.91, "word": " from", "probability": 0.30126953125}, {"start": 2398.91, "end": 2399.15, "word": " x", "probability": 0.51220703125}, {"start": 2399.15, "end": 2399.43, "word": " bar", "probability": 0.75}, {"start": 2399.43, "end": 2401.33, "word": " for", "probability": 0.4296875}, {"start": 2401.33, "end": 2401.53, "word": " the", "probability": 0.92578125}, {"start": 2401.53, "end": 2401.79, "word": " upper", "probability": 0.845703125}, {"start": 2401.79, "end": 2402.21, "word": " limit", "probability": 0.9462890625}, {"start": 2402.21, "end": 2404.37, "word": " we", "probability": 0.626953125}, {"start": 2404.37, "end": 2404.69, "word": " add", "probability": 0.86962890625}, {"start": 2404.69, "end": 2404.87, "word": " the", "probability": 0.91748046875}, {"start": 2404.87, "end": 2405.15, "word": " same", "probability": 0.92041015625}, {"start": 2405.15, "end": 2405.69, "word": " amount", "probability": 0.92333984375}, {"start": 2405.69, "end": 2409.25, "word": " so", "probability": 0.474609375}, {"start": 2409.25, "end": 2410.47, "word": " subtracting", "probability": 0.849609375}, {"start": 2410.47, "end": 2412.07, "word": " specific", "probability": 0.724609375}, {"start": 2412.07, "end": 2413.11, "word": " amount", "probability": 0.904296875}, {"start": 2413.11, "end": 2414.99, "word": " and", "probability": 0.9306640625}, {"start": 2414.99, "end": 2415.49, "word": " adding", "probability": 0.83544921875}, {"start": 2415.49, "end": 2415.75, "word": " the", "probability": 0.912109375}, {"start": 2415.75, "end": 2415.97, "word": " same", "probability": 0.90869140625}, {"start": 2415.97, "end": 2416.45, "word": " amount", "probability": 0.9169921875}, {"start": 2416.45, "end": 2417.41, "word": " this", "probability": 0.8310546875}, {"start": 2417.41, "end": 2417.81, "word": " amount", "probability": 0.90869140625}, {"start": 2417.81, "end": 2418.23, "word": " later", "probability": 0.81005859375}, {"start": 2418.23, "end": 2418.49, "word": " will", "probability": 0.381591796875}, {"start": 2418.49, "end": 2418.77, "word": " call", "probability": 0.8623046875}, {"start": 2418.77, "end": 2418.99, "word": " it", "probability": 0.947265625}, {"start": 2418.99, "end": 2419.55, "word": " margin", "probability": 0.93017578125}, {"start": 2419.55, "end": 2422.65, "word": " of", "probability": 0.94580078125}, {"start": 2422.65, "end": 2422.97, "word": " error", "probability": 0.90380859375}, {"start": 2422.97, "end": 2424.73, "word": " this", "probability": 0.3974609375}, {"start": 2424.73, "end": 2424.93, "word": " will", "probability": 0.87353515625}, {"start": 2424.93, "end": 2425.21, "word": " be", "probability": 0.955078125}], "temperature": 1.0}, {"id": 89, "seek": 245221, "start": 2425.78, "end": 2452.22, "text": " maybe week after next week. So z alpha bar to sigma bar root n is the margin of error or the error because we add or subtract the same value of x bar. Now this slide just explains how can we", "tokens": [1310, 1243, 934, 958, 1243, 13, 407, 710, 8961, 2159, 281, 12771, 2159, 5593, 297, 307, 264, 10270, 295, 6713, 420, 264, 6713, 570, 321, 909, 420, 16390, 264, 912, 2158, 295, 2031, 2159, 13, 823, 341, 4137, 445, 13948, 577, 393, 321], "avg_logprob": -0.23348722166635774, "compression_ratio": 1.4148148148148147, "no_speech_prob": 0.0, "words": [{"start": 2425.78, "end": 2426.24, "word": " maybe", "probability": 0.382568359375}, {"start": 2426.24, "end": 2428.22, "word": " week", "probability": 0.76513671875}, {"start": 2428.22, "end": 2428.52, "word": " after", "probability": 0.857421875}, {"start": 2428.52, "end": 2428.8, "word": " next", "probability": 0.9345703125}, {"start": 2428.8, "end": 2429.22, "word": " week.", "probability": 0.93701171875}, {"start": 2430.88, "end": 2431.52, "word": " So", "probability": 0.62158203125}, {"start": 2431.52, "end": 2433.98, "word": " z", "probability": 0.351318359375}, {"start": 2433.98, "end": 2434.26, "word": " alpha", "probability": 0.77880859375}, {"start": 2434.26, "end": 2434.48, "word": " bar", "probability": 0.619140625}, {"start": 2434.48, "end": 2434.62, "word": " to", "probability": 0.60009765625}, {"start": 2434.62, "end": 2434.9, "word": " sigma", "probability": 0.9140625}, {"start": 2434.9, "end": 2435.14, "word": " bar", "probability": 0.6953125}, {"start": 2435.14, "end": 2435.36, "word": " root", "probability": 0.65283203125}, {"start": 2435.36, "end": 2435.58, "word": " n", "probability": 0.80126953125}, {"start": 2435.58, "end": 2435.8, "word": " is", "probability": 0.91650390625}, {"start": 2435.8, "end": 2435.94, "word": " the", "probability": 0.90869140625}, {"start": 2435.94, "end": 2436.2, "word": " margin", "probability": 0.93603515625}, {"start": 2436.2, "end": 2436.36, "word": " of", "probability": 0.92626953125}, {"start": 2436.36, "end": 2436.62, "word": " error", "probability": 0.83154296875}, {"start": 2436.62, "end": 2437.14, "word": " or", "probability": 0.297607421875}, {"start": 2437.14, "end": 2437.4, "word": " the", "probability": 0.81298828125}, {"start": 2437.4, "end": 2437.62, "word": " error", "probability": 0.90673828125}, {"start": 2437.62, "end": 2437.96, "word": " because", "probability": 0.7939453125}, {"start": 2437.96, "end": 2438.16, "word": " we", "probability": 0.9453125}, {"start": 2438.16, "end": 2438.44, "word": " add", "probability": 0.90283203125}, {"start": 2438.44, "end": 2438.76, "word": " or", "probability": 0.96142578125}, {"start": 2438.76, "end": 2439.4, "word": " subtract", "probability": 0.853515625}, {"start": 2439.4, "end": 2439.98, "word": " the", "probability": 0.90576171875}, {"start": 2439.98, "end": 2440.34, "word": " same", "probability": 0.90234375}, {"start": 2440.34, "end": 2441.68, "word": " value", "probability": 0.96533203125}, {"start": 2441.68, "end": 2442.2, "word": " of", "probability": 0.95751953125}, {"start": 2442.2, "end": 2443.38, "word": " x", "probability": 0.935546875}, {"start": 2443.38, "end": 2443.66, "word": " bar.", "probability": 0.56689453125}, {"start": 2446.48, "end": 2447.12, "word": " Now", "probability": 0.9482421875}, {"start": 2447.12, "end": 2447.34, "word": " this", "probability": 0.7099609375}, {"start": 2447.34, "end": 2447.8, "word": " slide", "probability": 0.9541015625}, {"start": 2447.8, "end": 2448.5, "word": " just", "probability": 0.892578125}, {"start": 2448.5, "end": 2450.16, "word": " explains", "probability": 0.8837890625}, {"start": 2450.16, "end": 2451.62, "word": " how", "probability": 0.896484375}, {"start": 2451.62, "end": 2451.96, "word": " can", "probability": 0.9267578125}, {"start": 2451.96, "end": 2452.22, "word": " we", "probability": 0.95751953125}], "temperature": 1.0}, {"id": 90, "seek": 248254, "start": 2453.66, "end": 2482.54, "text": " compute Z alpha over 2. And we did that for 90%. Now here for 95% confidence level, so alpha is 5%. So Z alpha over 2 in the lower tail is 1.96 minus. The other one plus 1.96. So the lower confidence limit minus or negative. Upper confidence limit is plus. So the same values, but", "tokens": [14722, 1176, 8961, 670, 568, 13, 400, 321, 630, 300, 337, 4289, 6856, 823, 510, 337, 13420, 4, 6687, 1496, 11, 370, 8961, 307, 1025, 6856, 407, 1176, 8961, 670, 568, 294, 264, 3126, 6838, 307, 502, 13, 22962, 3175, 13, 440, 661, 472, 1804, 502, 13, 22962, 13, 407, 264, 3126, 6687, 4948, 3175, 420, 3671, 13, 36926, 6687, 4948, 307, 1804, 13, 407, 264, 912, 4190, 11, 457], "avg_logprob": -0.24141725520013083, "compression_ratio": 1.6242774566473988, "no_speech_prob": 0.0, "words": [{"start": 2453.6600000000003, "end": 2454.32, "word": " compute", "probability": 0.343017578125}, {"start": 2454.32, "end": 2454.98, "word": " Z", "probability": 0.58544921875}, {"start": 2454.98, "end": 2455.18, "word": " alpha", "probability": 0.67236328125}, {"start": 2455.18, "end": 2455.46, "word": " over", "probability": 0.90576171875}, {"start": 2455.46, "end": 2455.72, "word": " 2.", "probability": 0.578125}, {"start": 2456.32, "end": 2456.98, "word": " And", "probability": 0.93603515625}, {"start": 2456.98, "end": 2457.14, "word": " we", "probability": 0.9296875}, {"start": 2457.14, "end": 2457.28, "word": " did", "probability": 0.81982421875}, {"start": 2457.28, "end": 2457.54, "word": " that", "probability": 0.9306640625}, {"start": 2457.54, "end": 2457.8, "word": " for", "probability": 0.9365234375}, {"start": 2457.8, "end": 2458.64, "word": " 90%.", "probability": 0.8310546875}, {"start": 2458.64, "end": 2460.38, "word": " Now", "probability": 0.9599609375}, {"start": 2460.38, "end": 2460.82, "word": " here", "probability": 0.458740234375}, {"start": 2460.82, "end": 2461.12, "word": " for", "probability": 0.7314453125}, {"start": 2461.12, "end": 2461.58, "word": " 95", "probability": 0.9794921875}, {"start": 2461.58, "end": 2462.28, "word": "%", "probability": 0.98583984375}, {"start": 2462.28, "end": 2462.8, "word": " confidence", "probability": 0.97998046875}, {"start": 2462.8, "end": 2463.52, "word": " level,", "probability": 0.865234375}, {"start": 2464.3, "end": 2464.56, "word": " so", "probability": 0.89453125}, {"start": 2464.56, "end": 2464.82, "word": " alpha", "probability": 0.90576171875}, {"start": 2464.82, "end": 2465.04, "word": " is", "probability": 0.9482421875}, {"start": 2465.04, "end": 2465.74, "word": " 5%.", "probability": 0.878173828125}, {"start": 2465.74, "end": 2466.8, "word": " So", "probability": 0.9560546875}, {"start": 2466.8, "end": 2467.02, "word": " Z", "probability": 0.87841796875}, {"start": 2467.02, "end": 2467.24, "word": " alpha", "probability": 0.90283203125}, {"start": 2467.24, "end": 2467.5, "word": " over", "probability": 0.91650390625}, {"start": 2467.5, "end": 2467.8, "word": " 2", "probability": 0.96044921875}, {"start": 2467.8, "end": 2468.3, "word": " in", "probability": 0.75}, {"start": 2468.3, "end": 2468.44, "word": " the", "probability": 0.9140625}, {"start": 2468.44, "end": 2468.7, "word": " lower", "probability": 0.85546875}, {"start": 2468.7, "end": 2469.08, "word": " tail", "probability": 0.454833984375}, {"start": 2469.08, "end": 2469.44, "word": " is", "probability": 0.912109375}, {"start": 2469.44, "end": 2469.68, "word": " 1", "probability": 0.97705078125}, {"start": 2469.68, "end": 2470.6, "word": ".96", "probability": 0.986572265625}, {"start": 2470.6, "end": 2471.06, "word": " minus.", "probability": 0.97021484375}, {"start": 2472.24, "end": 2472.64, "word": " The", "probability": 0.84619140625}, {"start": 2472.64, "end": 2472.82, "word": " other", "probability": 0.880859375}, {"start": 2472.82, "end": 2473.04, "word": " one", "probability": 0.86279296875}, {"start": 2473.04, "end": 2473.44, "word": " plus", "probability": 0.8212890625}, {"start": 2473.44, "end": 2474.02, "word": " 1", "probability": 0.8408203125}, {"start": 2474.02, "end": 2474.5, "word": ".96.", "probability": 0.98681640625}, {"start": 2474.94, "end": 2475.24, "word": " So", "probability": 0.953125}, {"start": 2475.24, "end": 2475.42, "word": " the", "probability": 0.85107421875}, {"start": 2475.42, "end": 2475.64, "word": " lower", "probability": 0.849609375}, {"start": 2475.64, "end": 2476.12, "word": " confidence", "probability": 0.9697265625}, {"start": 2476.12, "end": 2476.46, "word": " limit", "probability": 0.95263671875}, {"start": 2476.46, "end": 2477.14, "word": " minus", "probability": 0.89013671875}, {"start": 2477.14, "end": 2478.44, "word": " or", "probability": 0.623046875}, {"start": 2478.44, "end": 2478.8, "word": " negative.", "probability": 0.95947265625}, {"start": 2479.34, "end": 2479.76, "word": " Upper", "probability": 0.75}, {"start": 2479.76, "end": 2480.3, "word": " confidence", "probability": 0.958984375}, {"start": 2480.3, "end": 2480.58, "word": " limit", "probability": 0.93115234375}, {"start": 2480.58, "end": 2480.76, "word": " is", "probability": 0.873046875}, {"start": 2480.76, "end": 2481.12, "word": " plus.", "probability": 0.54052734375}, {"start": 2481.36, "end": 2481.52, "word": " So", "probability": 0.84375}, {"start": 2481.52, "end": 2481.66, "word": " the", "probability": 0.7763671875}, {"start": 2481.66, "end": 2481.9, "word": " same", "probability": 0.88916015625}, {"start": 2481.9, "end": 2482.22, "word": " values,", "probability": 0.357177734375}, {"start": 2482.34, "end": 2482.54, "word": " but", "probability": 0.92431640625}], "temperature": 1.0}, {"id": 91, "seek": 251191, "start": 2482.77, "end": 2511.91, "text": " different science. So I think we talk about how can we find the critical values of our talk. This table summarizes the commonly used confidence levels. And most likely, we are using 90%, 95%, 99%. So for 90%, it's 11645, as we did.", "tokens": [819, 3497, 13, 407, 286, 519, 321, 751, 466, 577, 393, 321, 915, 264, 4924, 4190, 295, 527, 751, 13, 639, 3199, 14611, 5660, 264, 12719, 1143, 6687, 4358, 13, 400, 881, 3700, 11, 321, 366, 1228, 4289, 8923, 13420, 8923, 11803, 6856, 407, 337, 4289, 8923, 309, 311, 2975, 21, 8465, 11, 382, 321, 630, 13], "avg_logprob": -0.21592133261006455, "compression_ratio": 1.3181818181818181, "no_speech_prob": 0.0, "words": [{"start": 2482.77, "end": 2483.25, "word": " different", "probability": 0.445068359375}, {"start": 2483.25, "end": 2484.37, "word": " science.", "probability": 0.3408203125}, {"start": 2485.13, "end": 2485.35, "word": " So", "probability": 0.91162109375}, {"start": 2485.35, "end": 2485.49, "word": " I", "probability": 0.8212890625}, {"start": 2485.49, "end": 2485.67, "word": " think", "probability": 0.919921875}, {"start": 2485.67, "end": 2485.83, "word": " we", "probability": 0.9423828125}, {"start": 2485.83, "end": 2486.09, "word": " talk", "probability": 0.69873046875}, {"start": 2486.09, "end": 2486.49, "word": " about", "probability": 0.90869140625}, {"start": 2486.49, "end": 2487.89, "word": " how", "probability": 0.7392578125}, {"start": 2487.89, "end": 2488.13, "word": " can", "probability": 0.9150390625}, {"start": 2488.13, "end": 2488.29, "word": " we", "probability": 0.951171875}, {"start": 2488.29, "end": 2488.57, "word": " find", "probability": 0.888671875}, {"start": 2488.57, "end": 2488.73, "word": " the", "probability": 0.83349609375}, {"start": 2488.73, "end": 2489.09, "word": " critical", "probability": 0.93994140625}, {"start": 2489.09, "end": 2489.59, "word": " values", "probability": 0.91796875}, {"start": 2489.59, "end": 2489.77, "word": " of", "probability": 0.73095703125}, {"start": 2489.77, "end": 2490.11, "word": " our", "probability": 0.77001953125}, {"start": 2490.11, "end": 2490.35, "word": " talk.", "probability": 0.255859375}, {"start": 2492.23, "end": 2493.11, "word": " This", "probability": 0.82666015625}, {"start": 2493.11, "end": 2493.57, "word": " table", "probability": 0.90966796875}, {"start": 2493.57, "end": 2494.99, "word": " summarizes", "probability": 0.931884765625}, {"start": 2494.99, "end": 2495.53, "word": " the", "probability": 0.89892578125}, {"start": 2495.53, "end": 2496.89, "word": " commonly", "probability": 0.79296875}, {"start": 2496.89, "end": 2497.33, "word": " used", "probability": 0.8974609375}, {"start": 2497.33, "end": 2497.87, "word": " confidence", "probability": 0.97998046875}, {"start": 2497.87, "end": 2498.29, "word": " levels.", "probability": 0.91650390625}, {"start": 2500.23, "end": 2500.49, "word": " And", "probability": 0.91552734375}, {"start": 2500.49, "end": 2500.71, "word": " most", "probability": 0.87890625}, {"start": 2500.71, "end": 2500.99, "word": " likely,", "probability": 0.9189453125}, {"start": 2501.07, "end": 2501.13, "word": " we", "probability": 0.89990234375}, {"start": 2501.13, "end": 2501.25, "word": " are", "probability": 0.92919921875}, {"start": 2501.25, "end": 2501.61, "word": " using", "probability": 0.9287109375}, {"start": 2501.61, "end": 2502.35, "word": " 90%,", "probability": 0.909423828125}, {"start": 2502.35, "end": 2503.47, "word": " 95%,", "probability": 0.78759765625}, {"start": 2503.47, "end": 2504.13, "word": " 99%.", "probability": 0.9453125}, {"start": 2504.13, "end": 2506.59, "word": " So", "probability": 0.9580078125}, {"start": 2506.59, "end": 2506.91, "word": " for", "probability": 0.89111328125}, {"start": 2506.91, "end": 2507.63, "word": " 90%,", "probability": 0.86767578125}, {"start": 2507.63, "end": 2508.09, "word": " it's", "probability": 0.938232421875}, {"start": 2508.09, "end": 2509.03, "word": " 11645,", "probability": 0.7752278645833334}, {"start": 2510.37, "end": 2510.97, "word": " as", "probability": 0.94873046875}, {"start": 2510.97, "end": 2511.17, "word": " we", "probability": 0.9365234375}, {"start": 2511.17, "end": 2511.91, "word": " did.", "probability": 0.923828125}], "temperature": 1.0}, {"id": 92, "seek": 254094, "start": 2512.48, "end": 2540.94, "text": " 95% 1.96, better to remember these values. For 99, it's 2.58. Now let's see. As the confidence level increases. Now here the confidence level. As the confidence level increases. Look at the corresponding z value.", "tokens": [13420, 4, 502, 13, 22962, 11, 1101, 281, 1604, 613, 4190, 13, 1171, 11803, 11, 309, 311, 568, 13, 20419, 13, 823, 718, 311, 536, 13, 1018, 264, 6687, 1496, 8637, 13, 823, 510, 264, 6687, 1496, 13, 1018, 264, 6687, 1496, 8637, 13, 2053, 412, 264, 11760, 710, 2158, 13], "avg_logprob": -0.21259014480389082, "compression_ratio": 1.5777777777777777, "no_speech_prob": 0.0, "words": [{"start": 2512.48, "end": 2512.98, "word": " 95", "probability": 0.849609375}, {"start": 2512.98, "end": 2513.6, "word": "%", "probability": 0.609375}, {"start": 2513.6, "end": 2513.84, "word": " 1", "probability": 0.43896484375}, {"start": 2513.84, "end": 2514.7, "word": ".96,", "probability": 0.980712890625}, {"start": 2514.94, "end": 2515.26, "word": " better", "probability": 0.7939453125}, {"start": 2515.26, "end": 2515.68, "word": " to", "probability": 0.95947265625}, {"start": 2515.68, "end": 2517.08, "word": " remember", "probability": 0.8603515625}, {"start": 2517.08, "end": 2517.38, "word": " these", "probability": 0.83935546875}, {"start": 2517.38, "end": 2517.82, "word": " values.", "probability": 0.9638671875}, {"start": 2519.0, "end": 2519.28, "word": " For", "probability": 0.94140625}, {"start": 2519.28, "end": 2519.66, "word": " 99,", "probability": 0.92919921875}, {"start": 2520.06, "end": 2520.38, "word": " it's", "probability": 0.93408203125}, {"start": 2520.38, "end": 2520.54, "word": " 2", "probability": 0.99609375}, {"start": 2520.54, "end": 2521.1, "word": ".58.", "probability": 0.99560546875}, {"start": 2524.66, "end": 2525.24, "word": " Now", "probability": 0.923828125}, {"start": 2525.24, "end": 2525.58, "word": " let's", "probability": 0.820068359375}, {"start": 2525.58, "end": 2525.9, "word": " see.", "probability": 0.91259765625}, {"start": 2528.68, "end": 2529.18, "word": " As", "probability": 0.96240234375}, {"start": 2529.18, "end": 2529.44, "word": " the", "probability": 0.91845703125}, {"start": 2529.44, "end": 2529.96, "word": " confidence", "probability": 0.97216796875}, {"start": 2529.96, "end": 2530.3, "word": " level", "probability": 0.94091796875}, {"start": 2530.3, "end": 2530.9, "word": " increases.", "probability": 0.93310546875}, {"start": 2532.78, "end": 2533.1, "word": " Now", "probability": 0.88037109375}, {"start": 2533.1, "end": 2533.3, "word": " here", "probability": 0.60888671875}, {"start": 2533.3, "end": 2533.5, "word": " the", "probability": 0.36962890625}, {"start": 2533.5, "end": 2533.84, "word": " confidence", "probability": 0.9794921875}, {"start": 2533.84, "end": 2534.32, "word": " level.", "probability": 0.951171875}, {"start": 2534.96, "end": 2535.42, "word": " As", "probability": 0.96337890625}, {"start": 2535.42, "end": 2535.62, "word": " the", "probability": 0.92041015625}, {"start": 2535.62, "end": 2536.08, "word": " confidence", "probability": 0.97705078125}, {"start": 2536.08, "end": 2536.42, "word": " level", "probability": 0.94189453125}, {"start": 2536.42, "end": 2536.96, "word": " increases.", "probability": 0.94140625}, {"start": 2538.52, "end": 2538.98, "word": " Look", "probability": 0.86328125}, {"start": 2538.98, "end": 2539.22, "word": " at", "probability": 0.9638671875}, {"start": 2539.22, "end": 2539.58, "word": " the", "probability": 0.91943359375}, {"start": 2539.58, "end": 2540.24, "word": " corresponding", "probability": 0.7919921875}, {"start": 2540.24, "end": 2540.52, "word": " z", "probability": 0.5126953125}, {"start": 2540.52, "end": 2540.94, "word": " value.", "probability": 0.8681640625}], "temperature": 1.0}, {"id": 93, "seek": 257142, "start": 2541.8, "end": 2571.42, "text": " is also increased. So in this case, the interval becomes wider or narrower? Wider. Because here we subtract this value, this amount, z alpha over 2 will increase. This is z alpha over 2. As confidence level increases, z value increases also. So that means this amount will go up.", "tokens": [307, 611, 6505, 13, 407, 294, 341, 1389, 11, 264, 15035, 3643, 11842, 420, 46751, 30, 343, 1438, 13, 1436, 510, 321, 16390, 341, 2158, 11, 341, 2372, 11, 710, 8961, 670, 568, 486, 3488, 13, 639, 307, 710, 8961, 670, 568, 13, 1018, 6687, 1496, 8637, 11, 710, 2158, 8637, 611, 13, 407, 300, 1355, 341, 2372, 486, 352, 493, 13], "avg_logprob": -0.20833333995607164, "compression_ratio": 1.627906976744186, "no_speech_prob": 0.0, "words": [{"start": 2541.8, "end": 2542.1, "word": " is", "probability": 0.19677734375}, {"start": 2542.1, "end": 2542.5, "word": " also", "probability": 0.88427734375}, {"start": 2542.5, "end": 2542.9, "word": " increased.", "probability": 0.297607421875}, {"start": 2544.64, "end": 2545.26, "word": " So", "probability": 0.91259765625}, {"start": 2545.26, "end": 2545.46, "word": " in", "probability": 0.70849609375}, {"start": 2545.46, "end": 2545.72, "word": " this", "probability": 0.94921875}, {"start": 2545.72, "end": 2546.16, "word": " case,", "probability": 0.9150390625}, {"start": 2546.34, "end": 2546.48, "word": " the", "probability": 0.90380859375}, {"start": 2546.48, "end": 2547.06, "word": " interval", "probability": 0.95166015625}, {"start": 2547.06, "end": 2547.68, "word": " becomes", "probability": 0.7626953125}, {"start": 2547.68, "end": 2548.08, "word": " wider", "probability": 0.8955078125}, {"start": 2548.08, "end": 2548.36, "word": " or", "probability": 0.88037109375}, {"start": 2548.36, "end": 2548.64, "word": " narrower?", "probability": 0.71435546875}, {"start": 2549.4, "end": 2550.2, "word": " Wider.", "probability": 0.748046875}, {"start": 2550.82, "end": 2551.22, "word": " Because", "probability": 0.7900390625}, {"start": 2551.22, "end": 2551.74, "word": " here", "probability": 0.85888671875}, {"start": 2551.74, "end": 2552.26, "word": " we", "probability": 0.5625}, {"start": 2552.26, "end": 2553.02, "word": " subtract", "probability": 0.84912109375}, {"start": 2553.02, "end": 2553.5, "word": " this", "probability": 0.93701171875}, {"start": 2553.5, "end": 2553.84, "word": " value,", "probability": 0.89501953125}, {"start": 2554.52, "end": 2554.7, "word": " this", "probability": 0.92626953125}, {"start": 2554.7, "end": 2555.02, "word": " amount,", "probability": 0.8857421875}, {"start": 2555.8, "end": 2555.94, "word": " z", "probability": 0.62744140625}, {"start": 2555.94, "end": 2556.16, "word": " alpha", "probability": 0.6513671875}, {"start": 2556.16, "end": 2556.44, "word": " over", "probability": 0.91357421875}, {"start": 2556.44, "end": 2556.68, "word": " 2", "probability": 0.64501953125}, {"start": 2556.68, "end": 2557.0, "word": " will", "probability": 0.87255859375}, {"start": 2557.0, "end": 2557.58, "word": " increase.", "probability": 0.849609375}, {"start": 2559.1, "end": 2559.32, "word": " This", "probability": 0.7880859375}, {"start": 2559.32, "end": 2559.42, "word": " is", "probability": 0.9462890625}, {"start": 2559.42, "end": 2559.54, "word": " z", "probability": 0.876953125}, {"start": 2559.54, "end": 2559.72, "word": " alpha", "probability": 0.92919921875}, {"start": 2559.72, "end": 2559.98, "word": " over", "probability": 0.91650390625}, {"start": 2559.98, "end": 2560.18, "word": " 2.", "probability": 0.96337890625}, {"start": 2561.04, "end": 2561.52, "word": " As", "probability": 0.9580078125}, {"start": 2561.52, "end": 2563.5, "word": " confidence", "probability": 0.9306640625}, {"start": 2563.5, "end": 2563.88, "word": " level", "probability": 0.9267578125}, {"start": 2563.88, "end": 2564.4, "word": " increases,", "probability": 0.9375}, {"start": 2565.04, "end": 2565.94, "word": " z", "probability": 0.90087890625}, {"start": 2565.94, "end": 2566.4, "word": " value", "probability": 0.95458984375}, {"start": 2566.4, "end": 2567.14, "word": " increases", "probability": 0.5888671875}, {"start": 2567.14, "end": 2567.56, "word": " also.", "probability": 0.869140625}, {"start": 2568.1, "end": 2568.38, "word": " So", "probability": 0.9521484375}, {"start": 2568.38, "end": 2568.64, "word": " that", "probability": 0.9228515625}, {"start": 2568.64, "end": 2568.9, "word": " means", "probability": 0.943359375}, {"start": 2568.9, "end": 2569.2, "word": " this", "probability": 0.93408203125}, {"start": 2569.2, "end": 2569.72, "word": " amount", "probability": 0.90869140625}, {"start": 2569.72, "end": 2570.88, "word": " will", "probability": 0.88427734375}, {"start": 2570.88, "end": 2571.08, "word": " go", "probability": 0.96826171875}, {"start": 2571.08, "end": 2571.42, "word": " up.", "probability": 0.96630859375}], "temperature": 1.0}, {"id": 94, "seek": 260073, "start": 2572.44, "end": 2600.74, "text": " will increase also. That means the confidence interval becomes wider. So as C level or confidence level goes up, increases, the corresponding interval, the interval becomes wider.", "tokens": [486, 3488, 611, 13, 663, 1355, 264, 6687, 15035, 3643, 11842, 13, 407, 382, 383, 1496, 420, 6687, 1496, 1709, 493, 11, 8637, 11, 264, 11760, 15035, 11, 264, 15035, 3643, 11842, 13], "avg_logprob": -0.22058824186815934, "compression_ratio": 1.592920353982301, "no_speech_prob": 0.0, "words": [{"start": 2572.44, "end": 2572.7, "word": " will", "probability": 0.482666015625}, {"start": 2572.7, "end": 2573.1, "word": " increase", "probability": 0.85986328125}, {"start": 2573.1, "end": 2573.5, "word": " also.", "probability": 0.86181640625}, {"start": 2573.96, "end": 2574.32, "word": " That", "probability": 0.861328125}, {"start": 2574.32, "end": 2574.7, "word": " means", "probability": 0.92919921875}, {"start": 2574.7, "end": 2575.76, "word": " the", "probability": 0.70751953125}, {"start": 2575.76, "end": 2576.24, "word": " confidence", "probability": 0.970703125}, {"start": 2576.24, "end": 2576.72, "word": " interval", "probability": 0.92138671875}, {"start": 2576.72, "end": 2577.4, "word": " becomes", "probability": 0.84033203125}, {"start": 2577.4, "end": 2578.34, "word": " wider.", "probability": 0.916015625}, {"start": 2579.1, "end": 2579.36, "word": " So", "probability": 0.91259765625}, {"start": 2579.36, "end": 2579.8, "word": " as", "probability": 0.8017578125}, {"start": 2579.8, "end": 2582.94, "word": " C", "probability": 0.580078125}, {"start": 2582.94, "end": 2583.22, "word": " level", "probability": 0.66943359375}, {"start": 2583.22, "end": 2584.6, "word": " or", "probability": 0.66552734375}, {"start": 2584.6, "end": 2585.14, "word": " confidence", "probability": 0.96337890625}, {"start": 2585.14, "end": 2585.56, "word": " level", "probability": 0.94140625}, {"start": 2585.56, "end": 2586.9, "word": " goes", "probability": 0.90380859375}, {"start": 2586.9, "end": 2587.28, "word": " up,", "probability": 0.970703125}, {"start": 2588.0, "end": 2588.44, "word": " increases,", "probability": 0.8076171875}, {"start": 2590.72, "end": 2590.82, "word": " the", "probability": 0.7177734375}, {"start": 2590.82, "end": 2592.6, "word": " corresponding", "probability": 0.79443359375}, {"start": 2592.6, "end": 2593.6, "word": " interval,", "probability": 0.96240234375}, {"start": 2595.24, "end": 2595.48, "word": " the", "probability": 0.8408203125}, {"start": 2595.48, "end": 2595.9, "word": " interval", "probability": 0.96826171875}, {"start": 2595.9, "end": 2598.3, "word": " becomes", "probability": 0.849609375}, {"start": 2598.3, "end": 2600.74, "word": " wider.", "probability": 0.8994140625}], "temperature": 1.0}, {"id": 95, "seek": 263107, "start": 2603.83, "end": 2631.07, "text": " So as C level increases, the confidence becomes wider. Vice versa, if the C level decreases, narrower. The confidence interval becomes narrower. It's better to have narrower confidence interval. So again, it's better if you remember these values for the confidence level and the corresponding Z value.", "tokens": [407, 382, 383, 1496, 8637, 11, 264, 6687, 3643, 11842, 13, 13276, 25650, 11, 498, 264, 383, 1496, 24108, 11, 46751, 13, 440, 6687, 15035, 3643, 46751, 13, 467, 311, 1101, 281, 362, 46751, 6687, 15035, 13, 407, 797, 11, 309, 311, 1101, 498, 291, 1604, 613, 4190, 337, 264, 6687, 1496, 293, 264, 11760, 1176, 2158, 13], "avg_logprob": -0.20921609765392238, "compression_ratio": 1.7976190476190477, "no_speech_prob": 0.0, "words": [{"start": 2603.83, "end": 2604.05, "word": " So", "probability": 0.78369140625}, {"start": 2604.05, "end": 2604.31, "word": " as", "probability": 0.7275390625}, {"start": 2604.31, "end": 2604.49, "word": " C", "probability": 0.56494140625}, {"start": 2604.49, "end": 2604.65, "word": " level", "probability": 0.64404296875}, {"start": 2604.65, "end": 2605.11, "word": " increases,", "probability": 0.8896484375}, {"start": 2605.43, "end": 2605.57, "word": " the", "probability": 0.7900390625}, {"start": 2605.57, "end": 2605.93, "word": " confidence", "probability": 0.96533203125}, {"start": 2605.93, "end": 2606.47, "word": " becomes", "probability": 0.65185546875}, {"start": 2606.47, "end": 2607.19, "word": " wider.", "probability": 0.9033203125}, {"start": 2608.29, "end": 2608.87, "word": " Vice", "probability": 0.79541015625}, {"start": 2608.87, "end": 2609.25, "word": " versa,", "probability": 0.77734375}, {"start": 2609.55, "end": 2609.73, "word": " if", "probability": 0.93701171875}, {"start": 2609.73, "end": 2609.93, "word": " the", "probability": 0.8466796875}, {"start": 2609.93, "end": 2610.09, "word": " C", "probability": 0.9765625}, {"start": 2610.09, "end": 2610.29, "word": " level", "probability": 0.94775390625}, {"start": 2610.29, "end": 2610.91, "word": " decreases,", "probability": 0.9541015625}, {"start": 2612.11, "end": 2612.41, "word": " narrower.", "probability": 0.5283203125}, {"start": 2612.63, "end": 2612.79, "word": " The", "probability": 0.7939453125}, {"start": 2612.79, "end": 2613.43, "word": " confidence", "probability": 0.9716796875}, {"start": 2613.43, "end": 2614.31, "word": " interval", "probability": 0.9501953125}, {"start": 2614.31, "end": 2614.79, "word": " becomes", "probability": 0.88232421875}, {"start": 2614.79, "end": 2615.23, "word": " narrower.", "probability": 0.736328125}, {"start": 2618.07, "end": 2618.81, "word": " It's", "probability": 0.59521484375}, {"start": 2618.81, "end": 2619.09, "word": " better", "probability": 0.90185546875}, {"start": 2619.09, "end": 2619.29, "word": " to", "probability": 0.958984375}, {"start": 2619.29, "end": 2619.61, "word": " have", "probability": 0.94140625}, {"start": 2619.61, "end": 2620.31, "word": " narrower", "probability": 0.40234375}, {"start": 2620.31, "end": 2620.87, "word": " confidence", "probability": 0.9296875}, {"start": 2620.87, "end": 2621.21, "word": " interval.", "probability": 0.82275390625}, {"start": 2622.97, "end": 2623.71, "word": " So", "probability": 0.92041015625}, {"start": 2623.71, "end": 2623.97, "word": " again,", "probability": 0.87451171875}, {"start": 2624.15, "end": 2624.63, "word": " it's", "probability": 0.74267578125}, {"start": 2624.63, "end": 2624.91, "word": " better", "probability": 0.90283203125}, {"start": 2624.91, "end": 2625.09, "word": " if", "probability": 0.91943359375}, {"start": 2625.09, "end": 2625.21, "word": " you", "probability": 0.95751953125}, {"start": 2625.21, "end": 2626.09, "word": " remember", "probability": 0.83740234375}, {"start": 2626.09, "end": 2626.41, "word": " these", "probability": 0.86181640625}, {"start": 2626.41, "end": 2626.89, "word": " values", "probability": 0.96728515625}, {"start": 2626.89, "end": 2627.91, "word": " for", "probability": 0.86474609375}, {"start": 2627.91, "end": 2628.33, "word": " the", "probability": 0.912109375}, {"start": 2628.33, "end": 2628.93, "word": " confidence", "probability": 0.97265625}, {"start": 2628.93, "end": 2629.33, "word": " level", "probability": 0.93212890625}, {"start": 2629.33, "end": 2629.85, "word": " and", "probability": 0.892578125}, {"start": 2629.85, "end": 2630.01, "word": " the", "probability": 0.87451171875}, {"start": 2630.01, "end": 2630.43, "word": " corresponding", "probability": 0.728515625}, {"start": 2630.43, "end": 2630.69, "word": " Z", "probability": 0.56982421875}, {"start": 2630.69, "end": 2631.07, "word": " value.", "probability": 0.865234375}], "temperature": 1.0}, {"id": 96, "seek": 266138, "start": 2634.16, "end": 2661.38, "text": " So that's the sea level for different sizes. This slide shows that some continents intervals may be containing mu. So the blue ones is the value of mu. The blue ones contains mu.", "tokens": [407, 300, 311, 264, 4158, 1496, 337, 819, 11602, 13, 639, 4137, 3110, 300, 512, 38598, 26651, 815, 312, 19273, 2992, 13, 407, 264, 3344, 2306, 307, 264, 2158, 295, 2992, 13, 440, 3344, 2306, 8306, 2992, 13], "avg_logprob": -0.2179487141279074, "compression_ratio": 1.4672131147540983, "no_speech_prob": 0.0, "words": [{"start": 2634.16, "end": 2635.0, "word": " So", "probability": 0.90869140625}, {"start": 2635.0, "end": 2635.84, "word": " that's", "probability": 0.90966796875}, {"start": 2635.84, "end": 2638.98, "word": " the", "probability": 0.876953125}, {"start": 2638.98, "end": 2639.4, "word": " sea", "probability": 0.83935546875}, {"start": 2639.4, "end": 2639.74, "word": " level", "probability": 0.9296875}, {"start": 2639.74, "end": 2640.48, "word": " for", "probability": 0.9345703125}, {"start": 2640.48, "end": 2640.92, "word": " different", "probability": 0.88232421875}, {"start": 2640.92, "end": 2641.52, "word": " sizes.", "probability": 0.91943359375}, {"start": 2644.06, "end": 2644.44, "word": " This", "probability": 0.7353515625}, {"start": 2644.44, "end": 2644.96, "word": " slide", "probability": 0.73046875}, {"start": 2644.96, "end": 2648.58, "word": " shows", "probability": 0.88330078125}, {"start": 2648.58, "end": 2649.02, "word": " that", "probability": 0.441162109375}, {"start": 2649.02, "end": 2650.58, "word": " some", "probability": 0.8916015625}, {"start": 2650.58, "end": 2651.06, "word": " continents", "probability": 0.95654296875}, {"start": 2651.06, "end": 2651.8, "word": " intervals", "probability": 0.322021484375}, {"start": 2651.8, "end": 2654.26, "word": " may", "probability": 0.387939453125}, {"start": 2654.26, "end": 2654.42, "word": " be", "probability": 0.8916015625}, {"start": 2654.42, "end": 2654.9, "word": " containing", "probability": 0.87451171875}, {"start": 2654.9, "end": 2655.16, "word": " mu.", "probability": 0.5712890625}, {"start": 2655.48, "end": 2655.68, "word": " So", "probability": 0.90087890625}, {"start": 2655.68, "end": 2656.1, "word": " the", "probability": 0.85205078125}, {"start": 2656.1, "end": 2656.36, "word": " blue", "probability": 0.9677734375}, {"start": 2656.36, "end": 2656.78, "word": " ones", "probability": 0.8515625}, {"start": 2656.78, "end": 2658.14, "word": " is", "probability": 0.494140625}, {"start": 2658.14, "end": 2658.28, "word": " the", "probability": 0.6728515625}, {"start": 2658.28, "end": 2658.48, "word": " value", "probability": 0.958984375}, {"start": 2658.48, "end": 2658.64, "word": " of", "probability": 0.9560546875}, {"start": 2658.64, "end": 2658.86, "word": " mu.", "probability": 0.9423828125}, {"start": 2659.64, "end": 2659.92, "word": " The", "probability": 0.88525390625}, {"start": 2659.92, "end": 2660.16, "word": " blue", "probability": 0.97314453125}, {"start": 2660.16, "end": 2660.46, "word": " ones", "probability": 0.84033203125}, {"start": 2660.46, "end": 2661.06, "word": " contains", "probability": 0.82421875}, {"start": 2661.06, "end": 2661.38, "word": " mu.", "probability": 0.81640625}], "temperature": 1.0}, {"id": 97, "seek": 269122, "start": 2663.4, "end": 2691.22, "text": " The red one does not, because mu in this case lies outside the confidence interval. So maybe the confidence interval you have to construct, it might cover the mu or not. I have to mention just one point here. It's better to say that my confidence interval covers mu. So you can say that I am 95% confident.", "tokens": [440, 2182, 472, 775, 406, 11, 570, 2992, 294, 341, 1389, 9134, 2380, 264, 6687, 15035, 13, 407, 1310, 264, 6687, 15035, 291, 362, 281, 7690, 11, 309, 1062, 2060, 264, 2992, 420, 406, 13, 286, 362, 281, 2152, 445, 472, 935, 510, 13, 467, 311, 1101, 281, 584, 300, 452, 6687, 15035, 10538, 2992, 13, 407, 291, 393, 584, 300, 286, 669, 13420, 4, 6679, 13], "avg_logprob": -0.13775276261217453, "compression_ratio": 1.734463276836158, "no_speech_prob": 0.0, "words": [{"start": 2663.4, "end": 2663.66, "word": " The", "probability": 0.72705078125}, {"start": 2663.66, "end": 2663.88, "word": " red", "probability": 0.8818359375}, {"start": 2663.88, "end": 2664.04, "word": " one", "probability": 0.912109375}, {"start": 2664.04, "end": 2664.24, "word": " does", "probability": 0.96240234375}, {"start": 2664.24, "end": 2664.52, "word": " not,", "probability": 0.94775390625}, {"start": 2664.96, "end": 2665.42, "word": " because", "probability": 0.8974609375}, {"start": 2665.42, "end": 2665.74, "word": " mu", "probability": 0.3486328125}, {"start": 2665.74, "end": 2666.44, "word": " in", "probability": 0.73486328125}, {"start": 2666.44, "end": 2666.72, "word": " this", "probability": 0.94873046875}, {"start": 2666.72, "end": 2667.02, "word": " case", "probability": 0.90966796875}, {"start": 2667.02, "end": 2667.4, "word": " lies", "probability": 0.8876953125}, {"start": 2667.4, "end": 2668.24, "word": " outside", "probability": 0.8359375}, {"start": 2668.24, "end": 2668.48, "word": " the", "probability": 0.85009765625}, {"start": 2668.48, "end": 2668.84, "word": " confidence", "probability": 0.97119140625}, {"start": 2668.84, "end": 2670.36, "word": " interval.", "probability": 0.892578125}, {"start": 2670.74, "end": 2671.16, "word": " So", "probability": 0.93505859375}, {"start": 2671.16, "end": 2671.46, "word": " maybe", "probability": 0.87890625}, {"start": 2671.46, "end": 2671.7, "word": " the", "probability": 0.84521484375}, {"start": 2671.7, "end": 2672.14, "word": " confidence", "probability": 0.97509765625}, {"start": 2672.14, "end": 2672.58, "word": " interval", "probability": 0.96875}, {"start": 2672.58, "end": 2672.8, "word": " you", "probability": 0.92919921875}, {"start": 2672.8, "end": 2673.14, "word": " have", "probability": 0.94580078125}, {"start": 2673.14, "end": 2674.22, "word": " to", "probability": 0.94921875}, {"start": 2674.22, "end": 2674.8, "word": " construct,", "probability": 0.9765625}, {"start": 2675.28, "end": 2675.68, "word": " it", "probability": 0.92041015625}, {"start": 2675.68, "end": 2675.96, "word": " might", "probability": 0.91064453125}, {"start": 2675.96, "end": 2676.78, "word": " cover", "probability": 0.50732421875}, {"start": 2676.78, "end": 2677.02, "word": " the", "probability": 0.7412109375}, {"start": 2677.02, "end": 2677.26, "word": " mu", "probability": 0.91162109375}, {"start": 2677.26, "end": 2678.88, "word": " or", "probability": 0.828125}, {"start": 2678.88, "end": 2679.14, "word": " not.", "probability": 0.93115234375}, {"start": 2679.9, "end": 2680.36, "word": " I", "probability": 0.99609375}, {"start": 2680.36, "end": 2680.52, "word": " have", "probability": 0.94091796875}, {"start": 2680.52, "end": 2680.62, "word": " to", "probability": 0.96337890625}, {"start": 2680.62, "end": 2680.86, "word": " mention", "probability": 0.9140625}, {"start": 2680.86, "end": 2681.22, "word": " just", "probability": 0.89990234375}, {"start": 2681.22, "end": 2681.48, "word": " one", "probability": 0.9130859375}, {"start": 2681.48, "end": 2681.9, "word": " point", "probability": 0.96875}, {"start": 2681.9, "end": 2682.18, "word": " here.", "probability": 0.84619140625}, {"start": 2682.32, "end": 2682.52, "word": " It's", "probability": 0.9658203125}, {"start": 2682.52, "end": 2682.76, "word": " better", "probability": 0.9072265625}, {"start": 2682.76, "end": 2683.04, "word": " to", "probability": 0.96240234375}, {"start": 2683.04, "end": 2683.26, "word": " say", "probability": 0.94091796875}, {"start": 2683.26, "end": 2683.56, "word": " that", "probability": 0.91455078125}, {"start": 2683.56, "end": 2684.6, "word": " my", "probability": 0.90625}, {"start": 2684.6, "end": 2685.1, "word": " confidence", "probability": 0.97509765625}, {"start": 2685.1, "end": 2685.62, "word": " interval", "probability": 0.94091796875}, {"start": 2685.62, "end": 2686.36, "word": " covers", "probability": 0.88232421875}, {"start": 2686.36, "end": 2686.72, "word": " mu.", "probability": 0.703125}, {"start": 2687.38, "end": 2687.78, "word": " So", "probability": 0.95654296875}, {"start": 2687.78, "end": 2688.1, "word": " you", "probability": 0.890625}, {"start": 2688.1, "end": 2688.32, "word": " can", "probability": 0.94287109375}, {"start": 2688.32, "end": 2688.52, "word": " say", "probability": 0.92822265625}, {"start": 2688.52, "end": 2688.8, "word": " that", "probability": 0.93115234375}, {"start": 2688.8, "end": 2689.04, "word": " I", "probability": 0.9931640625}, {"start": 2689.04, "end": 2689.24, "word": " am", "probability": 0.9267578125}, {"start": 2689.24, "end": 2690.16, "word": " 95", "probability": 0.978515625}, {"start": 2690.16, "end": 2690.7, "word": "%", "probability": 0.9619140625}, {"start": 2690.7, "end": 2691.22, "word": " confident.", "probability": 0.9248046875}], "temperature": 1.0}, {"id": 98, "seek": 271108, "start": 2694.14, "end": 2711.08, "text": " that the confidence interval contains mu, contains the true parameter mu, rather than saying mu lies.", "tokens": [300, 264, 6687, 15035, 8306, 2992, 11, 8306, 264, 2074, 13075, 2992, 11, 2831, 813, 1566, 2992, 9134, 13], "avg_logprob": -0.259765625, "compression_ratio": 1.275, "no_speech_prob": 0.0, "words": [{"start": 2694.14, "end": 2694.54, "word": " that", "probability": 0.436279296875}, {"start": 2694.54, "end": 2695.96, "word": " the", "probability": 0.72509765625}, {"start": 2695.96, "end": 2696.68, "word": " confidence", "probability": 0.95166015625}, {"start": 2696.68, "end": 2697.24, "word": " interval", "probability": 0.9892578125}, {"start": 2697.24, "end": 2699.96, "word": " contains", "probability": 0.90087890625}, {"start": 2699.96, "end": 2701.82, "word": " mu,", "probability": 0.23828125}, {"start": 2702.58, "end": 2703.08, "word": " contains", "probability": 0.8779296875}, {"start": 2703.08, "end": 2703.32, "word": " the", "probability": 0.91015625}, {"start": 2703.32, "end": 2703.7, "word": " true", "probability": 0.90771484375}, {"start": 2703.7, "end": 2705.74, "word": " parameter", "probability": 0.9658203125}, {"start": 2705.74, "end": 2706.12, "word": " mu,", "probability": 0.8603515625}, {"start": 2706.8, "end": 2707.36, "word": " rather", "probability": 0.958984375}, {"start": 2707.36, "end": 2707.94, "word": " than", "probability": 0.9501953125}, {"start": 2707.94, "end": 2708.54, "word": " saying", "probability": 0.9111328125}, {"start": 2708.54, "end": 2710.24, "word": " mu", "probability": 0.5810546875}, {"start": 2710.24, "end": 2711.08, "word": " lies.", "probability": 0.86962890625}], "temperature": 1.0}, {"id": 99, "seek": 274384, "start": 2716.32, "end": 2743.84, "text": " Because Mu is unknown. You cannot say Mu lies in the confidence. But we can say that 95% we are sure that my interval contains Mu. So don't say Mu lies in this interval. So it's better to say that we are 95% sure that my interval covers, contains Mu. Let's do one example.", "tokens": [1436, 15601, 307, 9841, 13, 509, 2644, 584, 15601, 9134, 294, 264, 6687, 13, 583, 321, 393, 584, 300, 13420, 4, 321, 366, 988, 300, 452, 15035, 8306, 15601, 13, 407, 500, 380, 584, 15601, 9134, 294, 341, 15035, 13, 407, 309, 311, 1101, 281, 584, 300, 321, 366, 13420, 4, 988, 300, 452, 15035, 10538, 11, 8306, 15601, 13, 961, 311, 360, 472, 1365, 13], "avg_logprob": -0.15706622467112186, "compression_ratio": 1.7169811320754718, "no_speech_prob": 0.0, "words": [{"start": 2716.32, "end": 2716.7, "word": " Because", "probability": 0.5478515625}, {"start": 2716.7, "end": 2716.88, "word": " Mu", "probability": 0.176513671875}, {"start": 2716.88, "end": 2717.02, "word": " is", "probability": 0.90673828125}, {"start": 2717.02, "end": 2717.4, "word": " unknown.", "probability": 0.8701171875}, {"start": 2717.64, "end": 2717.74, "word": " You", "probability": 0.92626953125}, {"start": 2717.74, "end": 2718.0, "word": " cannot", "probability": 0.7734375}, {"start": 2718.0, "end": 2718.34, "word": " say", "probability": 0.94384765625}, {"start": 2718.34, "end": 2718.6, "word": " Mu", "probability": 0.83544921875}, {"start": 2718.6, "end": 2718.98, "word": " lies", "probability": 0.9345703125}, {"start": 2718.98, "end": 2719.14, "word": " in", "probability": 0.92333984375}, {"start": 2719.14, "end": 2719.28, "word": " the", "probability": 0.62255859375}, {"start": 2719.28, "end": 2719.64, "word": " confidence.", "probability": 0.896484375}, {"start": 2720.1, "end": 2720.4, "word": " But", "probability": 0.9287109375}, {"start": 2720.4, "end": 2720.56, "word": " we", "probability": 0.8466796875}, {"start": 2720.56, "end": 2720.74, "word": " can", "probability": 0.9306640625}, {"start": 2720.74, "end": 2720.94, "word": " say", "probability": 0.93115234375}, {"start": 2720.94, "end": 2721.2, "word": " that", "probability": 0.8583984375}, {"start": 2721.2, "end": 2721.58, "word": " 95", "probability": 0.88720703125}, {"start": 2721.58, "end": 2722.22, "word": "%", "probability": 0.771484375}, {"start": 2722.22, "end": 2722.46, "word": " we", "probability": 0.5244140625}, {"start": 2722.46, "end": 2722.62, "word": " are", "probability": 0.92529296875}, {"start": 2722.62, "end": 2722.9, "word": " sure", "probability": 0.91552734375}, {"start": 2722.9, "end": 2723.3, "word": " that", "probability": 0.931640625}, {"start": 2723.3, "end": 2724.14, "word": " my", "probability": 0.88916015625}, {"start": 2724.14, "end": 2724.54, "word": " interval", "probability": 0.98291015625}, {"start": 2724.54, "end": 2726.3, "word": " contains", "probability": 0.90673828125}, {"start": 2726.3, "end": 2726.56, "word": " Mu.", "probability": 0.93701171875}, {"start": 2727.66, "end": 2727.96, "word": " So", "probability": 0.9345703125}, {"start": 2727.96, "end": 2728.22, "word": " don't", "probability": 0.88720703125}, {"start": 2728.22, "end": 2728.54, "word": " say", "probability": 0.95263671875}, {"start": 2728.54, "end": 2729.0, "word": " Mu", "probability": 0.802734375}, {"start": 2729.0, "end": 2729.38, "word": " lies", "probability": 0.927734375}, {"start": 2729.38, "end": 2729.54, "word": " in", "probability": 0.927734375}, {"start": 2729.54, "end": 2729.82, "word": " this", "probability": 0.931640625}, {"start": 2729.82, "end": 2730.18, "word": " interval.", "probability": 0.970703125}, {"start": 2731.4, "end": 2731.62, "word": " So", "probability": 0.78662109375}, {"start": 2731.62, "end": 2731.76, "word": " it's", "probability": 0.858154296875}, {"start": 2731.76, "end": 2731.94, "word": " better", "probability": 0.91357421875}, {"start": 2731.94, "end": 2732.16, "word": " to", "probability": 0.96240234375}, {"start": 2732.16, "end": 2732.34, "word": " say", "probability": 0.951171875}, {"start": 2732.34, "end": 2732.64, "word": " that", "probability": 0.92578125}, {"start": 2732.64, "end": 2733.4, "word": " we", "probability": 0.81787109375}, {"start": 2733.4, "end": 2733.74, "word": " are", "probability": 0.9404296875}, {"start": 2733.74, "end": 2734.2, "word": " 95", "probability": 0.9814453125}, {"start": 2734.2, "end": 2734.68, "word": "%", "probability": 0.990234375}, {"start": 2734.68, "end": 2735.02, "word": " sure", "probability": 0.912109375}, {"start": 2735.02, "end": 2735.34, "word": " that", "probability": 0.93017578125}, {"start": 2735.34, "end": 2735.7, "word": " my", "probability": 0.962890625}, {"start": 2735.7, "end": 2736.16, "word": " interval", "probability": 0.9658203125}, {"start": 2736.16, "end": 2737.2, "word": " covers,", "probability": 0.75927734375}, {"start": 2737.58, "end": 2738.22, "word": " contains", "probability": 0.8779296875}, {"start": 2738.22, "end": 2739.48, "word": " Mu.", "probability": 0.7001953125}, {"start": 2741.24, "end": 2741.92, "word": " Let's", "probability": 0.96923828125}, {"start": 2741.92, "end": 2742.22, "word": " do", "probability": 0.9560546875}, {"start": 2742.22, "end": 2742.84, "word": " one", "probability": 0.9248046875}, {"start": 2742.84, "end": 2743.84, "word": " example.", "probability": 0.9697265625}], "temperature": 1.0}, {"id": 100, "seek": 277847, "start": 2750.59, "end": 2778.47, "text": " Here we have a sample of 11 circuits from a large normal population. So now we have a random sample of 11. This sample is selected from normal populations. So the first assumption is okay. So normality is assumed to be satisfied. Now this sample", "tokens": [1692, 321, 362, 257, 6889, 295, 2975, 26354, 490, 257, 2416, 2710, 4415, 13, 407, 586, 321, 362, 257, 4974, 6889, 295, 2975, 13, 639, 6889, 307, 8209, 490, 2710, 12822, 13, 407, 264, 700, 15302, 307, 1392, 13, 407, 2026, 1860, 307, 15895, 281, 312, 11239, 13, 823, 341, 6889], "avg_logprob": -0.18449519775234735, "compression_ratio": 1.64, "no_speech_prob": 0.0, "words": [{"start": 2750.59, "end": 2750.91, "word": " Here", "probability": 0.61328125}, {"start": 2750.91, "end": 2751.09, "word": " we", "probability": 0.71728515625}, {"start": 2751.09, "end": 2751.39, "word": " have", "probability": 0.93896484375}, {"start": 2751.39, "end": 2752.17, "word": " a", "probability": 0.94091796875}, {"start": 2752.17, "end": 2752.49, "word": " sample", "probability": 0.84619140625}, {"start": 2752.49, "end": 2753.39, "word": " of", "probability": 0.94775390625}, {"start": 2753.39, "end": 2753.79, "word": " 11", "probability": 0.6044921875}, {"start": 2753.79, "end": 2754.57, "word": " circuits", "probability": 0.9267578125}, {"start": 2754.57, "end": 2756.25, "word": " from", "probability": 0.75537109375}, {"start": 2756.25, "end": 2756.57, "word": " a", "probability": 0.96630859375}, {"start": 2756.57, "end": 2756.93, "word": " large", "probability": 0.9443359375}, {"start": 2756.93, "end": 2757.35, "word": " normal", "probability": 0.8310546875}, {"start": 2757.35, "end": 2757.99, "word": " population.", "probability": 0.94287109375}, {"start": 2760.43, "end": 2760.87, "word": " So", "probability": 0.76025390625}, {"start": 2760.87, "end": 2761.11, "word": " now", "probability": 0.7939453125}, {"start": 2761.11, "end": 2761.69, "word": " we", "probability": 0.5478515625}, {"start": 2761.69, "end": 2761.89, "word": " have", "probability": 0.943359375}, {"start": 2761.89, "end": 2762.01, "word": " a", "probability": 0.7607421875}, {"start": 2762.01, "end": 2762.25, "word": " random", "probability": 0.84423828125}, {"start": 2762.25, "end": 2762.57, "word": " sample", "probability": 0.89599609375}, {"start": 2762.57, "end": 2762.77, "word": " of", "probability": 0.93798828125}, {"start": 2762.77, "end": 2763.03, "word": " 11.", "probability": 0.9150390625}, {"start": 2765.05, "end": 2765.81, "word": " This", "probability": 0.84912109375}, {"start": 2765.81, "end": 2766.11, "word": " sample", "probability": 0.8994140625}, {"start": 2766.11, "end": 2766.29, "word": " is", "probability": 0.92822265625}, {"start": 2766.29, "end": 2766.65, "word": " selected", "probability": 0.88818359375}, {"start": 2766.65, "end": 2767.11, "word": " from", "probability": 0.8896484375}, {"start": 2767.11, "end": 2768.25, "word": " normal", "probability": 0.775390625}, {"start": 2768.25, "end": 2768.79, "word": " populations.", "probability": 0.5966796875}, {"start": 2768.99, "end": 2769.13, "word": " So", "probability": 0.8974609375}, {"start": 2769.13, "end": 2769.31, "word": " the", "probability": 0.82373046875}, {"start": 2769.31, "end": 2769.55, "word": " first", "probability": 0.88134765625}, {"start": 2769.55, "end": 2769.95, "word": " assumption", "probability": 0.97119140625}, {"start": 2769.95, "end": 2770.23, "word": " is", "probability": 0.94482421875}, {"start": 2770.23, "end": 2770.55, "word": " okay.", "probability": 0.61669921875}, {"start": 2771.27, "end": 2771.53, "word": " So", "probability": 0.845703125}, {"start": 2771.53, "end": 2772.05, "word": " normality", "probability": 0.888671875}, {"start": 2772.05, "end": 2773.27, "word": " is", "probability": 0.9462890625}, {"start": 2773.27, "end": 2773.79, "word": " assumed", "probability": 0.8388671875}, {"start": 2773.79, "end": 2774.63, "word": " to", "probability": 0.94775390625}, {"start": 2774.63, "end": 2774.81, "word": " be", "probability": 0.9482421875}, {"start": 2774.81, "end": 2775.23, "word": " satisfied.", "probability": 0.84716796875}, {"start": 2776.97, "end": 2777.69, "word": " Now", "probability": 0.94287109375}, {"start": 2777.69, "end": 2778.01, "word": " this", "probability": 0.728515625}, {"start": 2778.01, "end": 2778.47, "word": " sample", "probability": 0.89599609375}], "temperature": 1.0}, {"id": 101, "seek": 280832, "start": 2779.72, "end": 2808.32, "text": " has a mean resistance of 2.2 ohms. So again, a sample of 11 circuits from a large normal population has a mean resistance of 2.2 ohms. That means X bar is 2.2. Ohms is the resistance unit. We know that", "tokens": [575, 257, 914, 7335, 295, 568, 13, 17, 1954, 2592, 13, 407, 797, 11, 257, 6889, 295, 2975, 26354, 490, 257, 2416, 2710, 4415, 575, 257, 914, 7335, 295, 568, 13, 17, 1954, 2592, 13, 663, 1355, 1783, 2159, 307, 568, 13, 17, 13, 876, 2592, 307, 264, 7335, 4985, 13, 492, 458, 300], "avg_logprob": -0.15497158494862642, "compression_ratio": 1.565891472868217, "no_speech_prob": 0.0, "words": [{"start": 2779.72, "end": 2780.2, "word": " has", "probability": 0.457763671875}, {"start": 2780.2, "end": 2780.5, "word": " a", "probability": 0.92578125}, {"start": 2780.5, "end": 2780.78, "word": " mean", "probability": 0.91357421875}, {"start": 2780.78, "end": 2781.9, "word": " resistance", "probability": 0.90478515625}, {"start": 2781.9, "end": 2783.18, "word": " of", "probability": 0.93408203125}, {"start": 2783.18, "end": 2783.42, "word": " 2", "probability": 0.931640625}, {"start": 2783.42, "end": 2783.76, "word": ".2", "probability": 0.988525390625}, {"start": 2783.76, "end": 2784.14, "word": " ohms.", "probability": 0.76806640625}, {"start": 2785.18, "end": 2785.42, "word": " So", "probability": 0.875}, {"start": 2785.42, "end": 2785.7, "word": " again,", "probability": 0.828125}, {"start": 2785.94, "end": 2786.02, "word": " a", "probability": 0.9189453125}, {"start": 2786.02, "end": 2786.26, "word": " sample", "probability": 0.87060546875}, {"start": 2786.26, "end": 2786.42, "word": " of", "probability": 0.96240234375}, {"start": 2786.42, "end": 2786.68, "word": " 11", "probability": 0.6328125}, {"start": 2786.68, "end": 2787.3, "word": " circuits", "probability": 0.9365234375}, {"start": 2787.3, "end": 2787.86, "word": " from", "probability": 0.736328125}, {"start": 2787.86, "end": 2788.18, "word": " a", "probability": 0.98828125}, {"start": 2788.18, "end": 2788.4, "word": " large", "probability": 0.9482421875}, {"start": 2788.4, "end": 2788.72, "word": " normal", "probability": 0.8271484375}, {"start": 2788.72, "end": 2789.34, "word": " population", "probability": 0.95947265625}, {"start": 2789.34, "end": 2790.4, "word": " has", "probability": 0.79052734375}, {"start": 2790.4, "end": 2790.66, "word": " a", "probability": 0.99072265625}, {"start": 2790.66, "end": 2790.8, "word": " mean", "probability": 0.95703125}, {"start": 2790.8, "end": 2791.22, "word": " resistance", "probability": 0.95263671875}, {"start": 2791.22, "end": 2791.6, "word": " of", "probability": 0.9638671875}, {"start": 2791.6, "end": 2792.28, "word": " 2", "probability": 0.98828125}, {"start": 2792.28, "end": 2793.1, "word": ".2", "probability": 0.997314453125}, {"start": 2793.1, "end": 2794.28, "word": " ohms.", "probability": 0.95361328125}, {"start": 2794.52, "end": 2794.82, "word": " That", "probability": 0.87109375}, {"start": 2794.82, "end": 2795.08, "word": " means", "probability": 0.93212890625}, {"start": 2795.08, "end": 2795.38, "word": " X", "probability": 0.343505859375}, {"start": 2795.38, "end": 2795.7, "word": " bar", "probability": 0.8798828125}, {"start": 2795.7, "end": 2797.46, "word": " is", "probability": 0.8671875}, {"start": 2797.46, "end": 2797.64, "word": " 2", "probability": 0.98779296875}, {"start": 2797.64, "end": 2798.04, "word": ".2.", "probability": 0.99658203125}, {"start": 2801.9, "end": 2802.56, "word": " Ohms", "probability": 0.92724609375}, {"start": 2802.56, "end": 2802.84, "word": " is", "probability": 0.814453125}, {"start": 2802.84, "end": 2803.32, "word": " the", "probability": 0.7578125}, {"start": 2803.32, "end": 2804.52, "word": " resistance", "probability": 0.78369140625}, {"start": 2804.52, "end": 2805.86, "word": " unit.", "probability": 0.919921875}, {"start": 2807.4, "end": 2807.88, "word": " We", "probability": 0.93115234375}, {"start": 2807.88, "end": 2808.06, "word": " know", "probability": 0.8916015625}, {"start": 2808.06, "end": 2808.32, "word": " that", "probability": 0.91748046875}], "temperature": 1.0}, {"id": 102, "seek": 283245, "start": 2809.21, "end": 2832.45, "text": " From past testing, it means from previous studies, we know that from past testing that the population standard deviation is 0.35. So sigma is 0.35. So sigma is known. So the second assumption is okay. But again, as we mentioned,", "tokens": [3358, 1791, 4997, 11, 309, 1355, 490, 3894, 5313, 11, 321, 458, 300, 490, 1791, 4997, 300, 264, 4415, 3832, 25163, 307, 1958, 13, 8794, 13, 407, 12771, 307, 1958, 13, 8794, 13, 407, 12771, 307, 2570, 13, 407, 264, 1150, 15302, 307, 1392, 13, 583, 797, 11, 382, 321, 2835, 11], "avg_logprob": -0.19221698338130735, "compression_ratio": 1.5578231292517006, "no_speech_prob": 0.0, "words": [{"start": 2809.21, "end": 2809.73, "word": " From", "probability": 0.416259765625}, {"start": 2809.73, "end": 2810.23, "word": " past", "probability": 0.89111328125}, {"start": 2810.23, "end": 2810.81, "word": " testing,", "probability": 0.865234375}, {"start": 2811.73, "end": 2812.21, "word": " it", "probability": 0.7958984375}, {"start": 2812.21, "end": 2812.49, "word": " means", "probability": 0.91015625}, {"start": 2812.49, "end": 2812.75, "word": " from", "probability": 0.8505859375}, {"start": 2812.75, "end": 2813.23, "word": " previous", "probability": 0.80810546875}, {"start": 2813.23, "end": 2813.61, "word": " studies,", "probability": 0.9736328125}, {"start": 2815.17, "end": 2815.31, "word": " we", "probability": 0.869140625}, {"start": 2815.31, "end": 2815.57, "word": " know", "probability": 0.87548828125}, {"start": 2815.57, "end": 2815.91, "word": " that", "probability": 0.82421875}, {"start": 2815.91, "end": 2816.59, "word": " from", "probability": 0.72314453125}, {"start": 2816.59, "end": 2817.09, "word": " past", "probability": 0.88330078125}, {"start": 2817.09, "end": 2817.45, "word": " testing", "probability": 0.8203125}, {"start": 2817.45, "end": 2817.79, "word": " that", "probability": 0.744140625}, {"start": 2817.79, "end": 2818.01, "word": " the", "probability": 0.80322265625}, {"start": 2818.01, "end": 2818.45, "word": " population", "probability": 0.95458984375}, {"start": 2818.45, "end": 2819.03, "word": " standard", "probability": 0.919921875}, {"start": 2819.03, "end": 2819.51, "word": " deviation", "probability": 0.93212890625}, {"start": 2819.51, "end": 2819.87, "word": " is", "probability": 0.943359375}, {"start": 2819.87, "end": 2820.07, "word": " 0", "probability": 0.8369140625}, {"start": 2820.07, "end": 2820.49, "word": ".35.", "probability": 0.9931640625}, {"start": 2821.39, "end": 2821.69, "word": " So", "probability": 0.90673828125}, {"start": 2821.69, "end": 2822.01, "word": " sigma", "probability": 0.285888671875}, {"start": 2822.01, "end": 2822.19, "word": " is", "probability": 0.9326171875}, {"start": 2822.19, "end": 2822.35, "word": " 0", "probability": 0.94775390625}, {"start": 2822.35, "end": 2822.71, "word": ".35.", "probability": 0.996826171875}, {"start": 2824.27, "end": 2824.89, "word": " So", "probability": 0.8994140625}, {"start": 2824.89, "end": 2825.19, "word": " sigma", "probability": 0.818359375}, {"start": 2825.19, "end": 2825.45, "word": " is", "probability": 0.9541015625}, {"start": 2825.45, "end": 2825.75, "word": " known.", "probability": 0.70068359375}, {"start": 2826.97, "end": 2827.33, "word": " So", "probability": 0.92236328125}, {"start": 2827.33, "end": 2827.49, "word": " the", "probability": 0.85546875}, {"start": 2827.49, "end": 2827.83, "word": " second", "probability": 0.90380859375}, {"start": 2827.83, "end": 2828.21, "word": " assumption", "probability": 0.978515625}, {"start": 2828.21, "end": 2828.53, "word": " is", "probability": 0.94921875}, {"start": 2828.53, "end": 2828.81, "word": " okay.", "probability": 0.74853515625}, {"start": 2831.11, "end": 2831.33, "word": " But", "probability": 0.92041015625}, {"start": 2831.33, "end": 2831.61, "word": " again,", "probability": 0.931640625}, {"start": 2831.69, "end": 2831.89, "word": " as", "probability": 0.9619140625}, {"start": 2831.89, "end": 2832.01, "word": " we", "probability": 0.95654296875}, {"start": 2832.01, "end": 2832.45, "word": " mentioned,", "probability": 0.8251953125}], "temperature": 1.0}, {"id": 103, "seek": 286251, "start": 2833.43, "end": 2862.51, "text": " Sigma's reality is not known. But here from past testing, I mean from previous knowledge, we know that sigma is 1 to the 35. Now the question is determined 95%. So C level or confidence level is 95%. Determined 95% confidence interval.", "tokens": [36595, 311, 4103, 307, 406, 2570, 13, 583, 510, 490, 1791, 4997, 11, 286, 914, 490, 3894, 3601, 11, 321, 458, 300, 12771, 307, 502, 281, 264, 6976, 13, 823, 264, 1168, 307, 9540, 13420, 6856, 407, 383, 1496, 420, 6687, 1496, 307, 13420, 6856, 4237, 966, 2001, 13420, 4, 6687, 15035, 13], "avg_logprob": -0.26967592537403107, "compression_ratio": 1.475, "no_speech_prob": 0.0, "words": [{"start": 2833.43, "end": 2834.21, "word": " Sigma's", "probability": 0.42724609375}, {"start": 2834.21, "end": 2834.59, "word": " reality", "probability": 0.92041015625}, {"start": 2834.59, "end": 2834.85, "word": " is", "probability": 0.94140625}, {"start": 2834.85, "end": 2835.11, "word": " not", "probability": 0.9150390625}, {"start": 2835.11, "end": 2835.45, "word": " known.", "probability": 0.7080078125}, {"start": 2835.79, "end": 2836.45, "word": " But", "probability": 0.7197265625}, {"start": 2836.45, "end": 2836.73, "word": " here", "probability": 0.728515625}, {"start": 2836.73, "end": 2837.23, "word": " from", "probability": 0.6015625}, {"start": 2837.23, "end": 2837.95, "word": " past", "probability": 0.78076171875}, {"start": 2837.95, "end": 2838.57, "word": " testing,", "probability": 0.8720703125}, {"start": 2838.89, "end": 2838.97, "word": " I", "probability": 0.7255859375}, {"start": 2838.97, "end": 2839.07, "word": " mean", "probability": 0.96533203125}, {"start": 2839.07, "end": 2839.27, "word": " from", "probability": 0.76318359375}, {"start": 2839.27, "end": 2839.79, "word": " previous", "probability": 0.86962890625}, {"start": 2839.79, "end": 2840.73, "word": " knowledge,", "probability": 0.92138671875}, {"start": 2841.35, "end": 2841.61, "word": " we", "probability": 0.9453125}, {"start": 2841.61, "end": 2841.81, "word": " know", "probability": 0.88671875}, {"start": 2841.81, "end": 2842.13, "word": " that", "probability": 0.935546875}, {"start": 2842.13, "end": 2842.57, "word": " sigma", "probability": 0.49560546875}, {"start": 2842.57, "end": 2842.81, "word": " is", "probability": 0.927734375}, {"start": 2842.81, "end": 2843.07, "word": " 1", "probability": 0.51123046875}, {"start": 2843.07, "end": 2843.19, "word": " to", "probability": 0.41357421875}, {"start": 2843.19, "end": 2843.35, "word": " the", "probability": 0.51806640625}, {"start": 2843.35, "end": 2843.59, "word": " 35.", "probability": 0.65771484375}, {"start": 2844.87, "end": 2845.33, "word": " Now", "probability": 0.73583984375}, {"start": 2845.33, "end": 2845.49, "word": " the", "probability": 0.68603515625}, {"start": 2845.49, "end": 2845.79, "word": " question", "probability": 0.9091796875}, {"start": 2845.79, "end": 2846.15, "word": " is", "probability": 0.9453125}, {"start": 2846.15, "end": 2847.47, "word": " determined", "probability": 0.371337890625}, {"start": 2847.47, "end": 2850.19, "word": " 95%.", "probability": 0.6175537109375}, {"start": 2850.19, "end": 2851.31, "word": " So", "probability": 0.94970703125}, {"start": 2851.31, "end": 2851.51, "word": " C", "probability": 0.65869140625}, {"start": 2851.51, "end": 2851.89, "word": " level", "probability": 0.7431640625}, {"start": 2851.89, "end": 2852.33, "word": " or", "probability": 0.54248046875}, {"start": 2852.33, "end": 2852.89, "word": " confidence", "probability": 0.97607421875}, {"start": 2852.89, "end": 2853.31, "word": " level", "probability": 0.94140625}, {"start": 2853.31, "end": 2856.15, "word": " is", "probability": 0.90234375}, {"start": 2856.15, "end": 2857.21, "word": " 95%.", "probability": 0.91796875}, {"start": 2857.21, "end": 2859.79, "word": " Determined", "probability": 0.8450520833333334}, {"start": 2859.79, "end": 2860.99, "word": " 95", "probability": 0.94970703125}, {"start": 2860.99, "end": 2861.47, "word": "%", "probability": 0.98291015625}, {"start": 2861.47, "end": 2862.07, "word": " confidence", "probability": 0.974609375}, {"start": 2862.07, "end": 2862.51, "word": " interval.", "probability": 0.921875}], "temperature": 1.0}, {"id": 104, "seek": 289225, "start": 2863.25, "end": 2892.25, "text": " For the true mean, true mean it means population mean, resistance of the population. So now the information that we have in this example, we select a random sample of size 11 from a normal population, so normality is assumed to be satisfied. This sample gives mean resistance of 2.2 and we know that.", "tokens": [1171, 264, 2074, 914, 11, 2074, 914, 309, 1355, 4415, 914, 11, 7335, 295, 264, 4415, 13, 407, 586, 264, 1589, 300, 321, 362, 294, 341, 1365, 11, 321, 3048, 257, 4974, 6889, 295, 2744, 2975, 490, 257, 2710, 4415, 11, 370, 2026, 1860, 307, 15895, 281, 312, 11239, 13, 639, 6889, 2709, 914, 7335, 295, 568, 13, 17, 293, 321, 458, 300, 13], "avg_logprob": -0.21838942307692308, "compression_ratio": 1.6910112359550562, "no_speech_prob": 0.0, "words": [{"start": 2863.25, "end": 2863.53, "word": " For", "probability": 0.64990234375}, {"start": 2863.53, "end": 2863.73, "word": " the", "probability": 0.8798828125}, {"start": 2863.73, "end": 2864.05, "word": " true", "probability": 0.89306640625}, {"start": 2864.05, "end": 2864.27, "word": " mean,", "probability": 0.9677734375}, {"start": 2864.57, "end": 2864.87, "word": " true", "probability": 0.51025390625}, {"start": 2864.87, "end": 2865.19, "word": " mean", "probability": 0.90576171875}, {"start": 2865.19, "end": 2865.39, "word": " it", "probability": 0.32763671875}, {"start": 2865.39, "end": 2865.79, "word": " means", "probability": 0.92529296875}, {"start": 2865.79, "end": 2867.41, "word": " population", "probability": 0.7373046875}, {"start": 2867.41, "end": 2867.75, "word": " mean,", "probability": 0.8828125}, {"start": 2868.45, "end": 2870.25, "word": " resistance", "probability": 0.418701171875}, {"start": 2870.25, "end": 2871.47, "word": " of", "probability": 0.9599609375}, {"start": 2871.47, "end": 2871.79, "word": " the", "probability": 0.90869140625}, {"start": 2871.79, "end": 2872.33, "word": " population.", "probability": 0.951171875}, {"start": 2872.97, "end": 2873.47, "word": " So", "probability": 0.8974609375}, {"start": 2873.47, "end": 2873.67, "word": " now", "probability": 0.6162109375}, {"start": 2873.67, "end": 2873.85, "word": " the", "probability": 0.73974609375}, {"start": 2873.85, "end": 2874.31, "word": " information", "probability": 0.85888671875}, {"start": 2874.31, "end": 2874.63, "word": " that", "probability": 0.91015625}, {"start": 2874.63, "end": 2874.77, "word": " we", "probability": 0.95849609375}, {"start": 2874.77, "end": 2875.05, "word": " have", "probability": 0.93603515625}, {"start": 2875.05, "end": 2875.33, "word": " in", "probability": 0.734375}, {"start": 2875.33, "end": 2875.57, "word": " this", "probability": 0.94482421875}, {"start": 2875.57, "end": 2876.07, "word": " example,", "probability": 0.978515625}, {"start": 2876.97, "end": 2877.11, "word": " we", "probability": 0.72119140625}, {"start": 2877.11, "end": 2877.43, "word": " select", "probability": 0.470703125}, {"start": 2877.43, "end": 2877.61, "word": " a", "probability": 0.89306640625}, {"start": 2877.61, "end": 2877.79, "word": " random", "probability": 0.8681640625}, {"start": 2877.79, "end": 2878.17, "word": " sample", "probability": 0.81884765625}, {"start": 2878.17, "end": 2878.37, "word": " of", "probability": 0.9052734375}, {"start": 2878.37, "end": 2878.63, "word": " size", "probability": 0.8642578125}, {"start": 2878.63, "end": 2879.01, "word": " 11", "probability": 0.8916015625}, {"start": 2879.01, "end": 2880.81, "word": " from", "probability": 0.68359375}, {"start": 2880.81, "end": 2881.17, "word": " a", "probability": 0.5439453125}, {"start": 2881.17, "end": 2881.39, "word": " normal", "probability": 0.90673828125}, {"start": 2881.39, "end": 2881.95, "word": " population,", "probability": 0.9462890625}, {"start": 2882.19, "end": 2882.31, "word": " so", "probability": 0.93017578125}, {"start": 2882.31, "end": 2882.89, "word": " normality", "probability": 0.7998046875}, {"start": 2882.89, "end": 2883.09, "word": " is", "probability": 0.9033203125}, {"start": 2883.09, "end": 2883.43, "word": " assumed", "probability": 0.87939453125}, {"start": 2883.43, "end": 2883.57, "word": " to", "probability": 0.96435546875}, {"start": 2883.57, "end": 2883.71, "word": " be", "probability": 0.93359375}, {"start": 2883.71, "end": 2884.23, "word": " satisfied.", "probability": 0.89501953125}, {"start": 2885.57, "end": 2886.01, "word": " This", "probability": 0.88720703125}, {"start": 2886.01, "end": 2886.43, "word": " sample", "probability": 0.87353515625}, {"start": 2886.43, "end": 2887.27, "word": " gives", "probability": 0.912109375}, {"start": 2887.27, "end": 2888.77, "word": " mean", "probability": 0.94091796875}, {"start": 2888.77, "end": 2889.33, "word": " resistance", "probability": 0.9501953125}, {"start": 2889.33, "end": 2889.77, "word": " of", "probability": 0.96337890625}, {"start": 2889.77, "end": 2889.91, "word": " 2", "probability": 0.9677734375}, {"start": 2889.91, "end": 2890.41, "word": ".2", "probability": 0.994384765625}, {"start": 2890.41, "end": 2891.57, "word": " and", "probability": 0.541015625}, {"start": 2891.57, "end": 2891.77, "word": " we", "probability": 0.85986328125}, {"start": 2891.77, "end": 2891.93, "word": " know", "probability": 0.89208984375}, {"start": 2891.93, "end": 2892.25, "word": " that.", "probability": 0.89794921875}], "temperature": 1.0}, {"id": 105, "seek": 292023, "start": 2893.47, "end": 2920.23, "text": " The standard deviation of the population is given by 0.35, and the question is to determine 95% confidence interval for the true mean resistance of the approximation. So this is the information we have. Now straightforward calculations will give this result. X bar plus or minus Z alpha over 2 sigma over 4N.", "tokens": [440, 3832, 25163, 295, 264, 4415, 307, 2212, 538, 1958, 13, 8794, 11, 293, 264, 1168, 307, 281, 6997, 13420, 4, 6687, 15035, 337, 264, 2074, 914, 7335, 295, 264, 28023, 13, 407, 341, 307, 264, 1589, 321, 362, 13, 823, 15325, 20448, 486, 976, 341, 1874, 13, 1783, 2159, 1804, 420, 3175, 1176, 8961, 670, 568, 12771, 670, 1017, 45, 13], "avg_logprob": -0.2834821438032483, "compression_ratio": 1.5147058823529411, "no_speech_prob": 0.0, "words": [{"start": 2893.47, "end": 2893.81, "word": " The", "probability": 0.61328125}, {"start": 2893.81, "end": 2894.41, "word": " standard", "probability": 0.88720703125}, {"start": 2894.41, "end": 2894.85, "word": " deviation", "probability": 0.935546875}, {"start": 2894.85, "end": 2895.11, "word": " of", "probability": 0.9375}, {"start": 2895.11, "end": 2895.23, "word": " the", "probability": 0.88037109375}, {"start": 2895.23, "end": 2895.63, "word": " population", "probability": 0.93603515625}, {"start": 2895.63, "end": 2895.89, "word": " is", "probability": 0.93701171875}, {"start": 2895.89, "end": 2896.11, "word": " given", "probability": 0.87939453125}, {"start": 2896.11, "end": 2896.51, "word": " by", "probability": 0.97119140625}, {"start": 2896.51, "end": 2897.29, "word": " 0", "probability": 0.484130859375}, {"start": 2897.29, "end": 2897.77, "word": ".35,", "probability": 0.989990234375}, {"start": 2898.49, "end": 2898.69, "word": " and", "probability": 0.92626953125}, {"start": 2898.69, "end": 2898.83, "word": " the", "probability": 0.9267578125}, {"start": 2898.83, "end": 2899.17, "word": " question", "probability": 0.9267578125}, {"start": 2899.17, "end": 2899.53, "word": " is", "probability": 0.94775390625}, {"start": 2899.53, "end": 2899.87, "word": " to", "probability": 0.272216796875}, {"start": 2899.87, "end": 2900.27, "word": " determine", "probability": 0.923828125}, {"start": 2900.27, "end": 2901.33, "word": " 95", "probability": 0.943359375}, {"start": 2901.33, "end": 2901.91, "word": "%", "probability": 0.8369140625}, {"start": 2901.91, "end": 2902.39, "word": " confidence", "probability": 0.66748046875}, {"start": 2902.39, "end": 2902.85, "word": " interval", "probability": 0.92529296875}, {"start": 2902.85, "end": 2903.41, "word": " for", "probability": 0.91943359375}, {"start": 2903.41, "end": 2904.25, "word": " the", "probability": 0.8857421875}, {"start": 2904.25, "end": 2904.51, "word": " true", "probability": 0.9638671875}, {"start": 2904.51, "end": 2904.81, "word": " mean", "probability": 0.9462890625}, {"start": 2904.81, "end": 2905.99, "word": " resistance", "probability": 0.92236328125}, {"start": 2905.99, "end": 2906.61, "word": " of", "probability": 0.95458984375}, {"start": 2906.61, "end": 2906.99, "word": " the", "probability": 0.91650390625}, {"start": 2906.99, "end": 2907.61, "word": " approximation.", "probability": 0.483154296875}, {"start": 2908.11, "end": 2908.35, "word": " So", "probability": 0.93017578125}, {"start": 2908.35, "end": 2908.59, "word": " this", "probability": 0.755859375}, {"start": 2908.59, "end": 2908.71, "word": " is", "probability": 0.9072265625}, {"start": 2908.71, "end": 2908.79, "word": " the", "probability": 0.748046875}, {"start": 2908.79, "end": 2909.21, "word": " information", "probability": 0.861328125}, {"start": 2909.21, "end": 2909.45, "word": " we", "probability": 0.90576171875}, {"start": 2909.45, "end": 2909.69, "word": " have.", "probability": 0.94482421875}, {"start": 2912.57, "end": 2912.85, "word": " Now", "probability": 0.947265625}, {"start": 2912.85, "end": 2913.47, "word": " straightforward", "probability": 0.58642578125}, {"start": 2913.47, "end": 2914.97, "word": " calculations", "probability": 0.9208984375}, {"start": 2914.97, "end": 2915.37, "word": " will", "probability": 0.84765625}, {"start": 2915.37, "end": 2915.65, "word": " give", "probability": 0.5498046875}, {"start": 2915.65, "end": 2916.39, "word": " this", "probability": 0.92333984375}, {"start": 2916.39, "end": 2916.83, "word": " result.", "probability": 0.9326171875}, {"start": 2917.55, "end": 2917.77, "word": " X", "probability": 0.666015625}, {"start": 2917.77, "end": 2917.91, "word": " bar", "probability": 0.78125}, {"start": 2917.91, "end": 2918.15, "word": " plus", "probability": 0.9423828125}, {"start": 2918.15, "end": 2918.39, "word": " or", "probability": 0.49853515625}, {"start": 2918.39, "end": 2918.59, "word": " minus", "probability": 0.9931640625}, {"start": 2918.59, "end": 2918.77, "word": " Z", "probability": 0.24658203125}, {"start": 2918.77, "end": 2918.97, "word": " alpha", "probability": 0.87939453125}, {"start": 2918.97, "end": 2919.17, "word": " over", "probability": 0.84130859375}, {"start": 2919.17, "end": 2919.37, "word": " 2", "probability": 0.58984375}, {"start": 2919.37, "end": 2919.59, "word": " sigma", "probability": 0.89501953125}, {"start": 2919.59, "end": 2919.81, "word": " over", "probability": 0.794921875}, {"start": 2919.81, "end": 2920.23, "word": " 4N.", "probability": 0.2274169921875}], "temperature": 1.0}, {"id": 106, "seek": 295084, "start": 2923.22, "end": 2950.84, "text": " 2.2 plus or minus 1.96 times sigma, which is 0.35 divided by root 11. This will give 2.2 plus or minus this amount. And as mentioned before, this amount is the margin term. So just subtract this value from 2.2, you will get 1.9932. And add this value to 2.2, you will get this result.", "tokens": [568, 13, 17, 1804, 420, 3175, 502, 13, 22962, 1413, 12771, 11, 597, 307, 1958, 13, 8794, 6666, 538, 5593, 2975, 13, 639, 486, 976, 568, 13, 17, 1804, 420, 3175, 341, 2372, 13, 400, 382, 2835, 949, 11, 341, 2372, 307, 264, 10270, 1433, 13, 407, 445, 16390, 341, 2158, 490, 568, 13, 17, 11, 291, 486, 483, 502, 13, 8494, 11440, 13, 400, 909, 341, 2158, 281, 568, 13, 17, 11, 291, 486, 483, 341, 1874, 13], "avg_logprob": -0.13876952473074197, "compression_ratio": 1.6193181818181819, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2923.22, "end": 2923.48, "word": " 2", "probability": 0.462646484375}, {"start": 2923.48, "end": 2923.86, "word": ".2", "probability": 0.96044921875}, {"start": 2923.86, "end": 2924.12, "word": " plus", "probability": 0.76513671875}, {"start": 2924.12, "end": 2924.3, "word": " or", "probability": 0.8818359375}, {"start": 2924.3, "end": 2924.64, "word": " minus", "probability": 0.98681640625}, {"start": 2924.64, "end": 2924.88, "word": " 1", "probability": 0.98388671875}, {"start": 2924.88, "end": 2925.46, "word": ".96", "probability": 0.860595703125}, {"start": 2925.46, "end": 2926.02, "word": " times", "probability": 0.87255859375}, {"start": 2926.02, "end": 2926.5, "word": " sigma,", "probability": 0.84228515625}, {"start": 2928.0, "end": 2928.4, "word": " which", "probability": 0.95166015625}, {"start": 2928.4, "end": 2928.58, "word": " is", "probability": 0.9345703125}, {"start": 2928.58, "end": 2928.8, "word": " 0", "probability": 0.64892578125}, {"start": 2928.8, "end": 2929.2, "word": ".35", "probability": 0.991455078125}, {"start": 2929.2, "end": 2929.88, "word": " divided", "probability": 0.662109375}, {"start": 2929.88, "end": 2930.1, "word": " by", "probability": 0.97509765625}, {"start": 2930.1, "end": 2930.32, "word": " root", "probability": 0.7978515625}, {"start": 2930.32, "end": 2930.6, "word": " 11.", "probability": 0.921875}, {"start": 2931.44, "end": 2931.8, "word": " This", "probability": 0.8857421875}, {"start": 2931.8, "end": 2932.02, "word": " will", "probability": 0.86767578125}, {"start": 2932.02, "end": 2932.3, "word": " give", "probability": 0.8291015625}, {"start": 2932.3, "end": 2932.66, "word": " 2", "probability": 0.96142578125}, {"start": 2932.66, "end": 2932.96, "word": ".2", "probability": 0.993896484375}, {"start": 2932.96, "end": 2933.22, "word": " plus", "probability": 0.95751953125}, {"start": 2933.22, "end": 2933.42, "word": " or", "probability": 0.94970703125}, {"start": 2933.42, "end": 2933.72, "word": " minus", "probability": 0.9873046875}, {"start": 2933.72, "end": 2934.02, "word": " this", "probability": 0.94775390625}, {"start": 2934.02, "end": 2934.34, "word": " amount.", "probability": 0.90771484375}, {"start": 2935.82, "end": 2936.42, "word": " And", "probability": 0.93212890625}, {"start": 2936.42, "end": 2936.6, "word": " as", "probability": 0.86474609375}, {"start": 2936.6, "end": 2937.02, "word": " mentioned", "probability": 0.783203125}, {"start": 2937.02, "end": 2938.1, "word": " before,", "probability": 0.81298828125}, {"start": 2938.34, "end": 2938.66, "word": " this", "probability": 0.94677734375}, {"start": 2938.66, "end": 2938.92, "word": " amount", "probability": 0.89599609375}, {"start": 2938.92, "end": 2939.16, "word": " is", "probability": 0.9453125}, {"start": 2939.16, "end": 2939.32, "word": " the", "probability": 0.9091796875}, {"start": 2939.32, "end": 2939.66, "word": " margin", "probability": 0.7490234375}, {"start": 2939.66, "end": 2939.94, "word": " term.", "probability": 0.84765625}, {"start": 2940.92, "end": 2941.46, "word": " So", "probability": 0.9521484375}, {"start": 2941.46, "end": 2941.84, "word": " just", "probability": 0.8232421875}, {"start": 2941.84, "end": 2942.36, "word": " subtract", "probability": 0.84619140625}, {"start": 2942.36, "end": 2942.76, "word": " this", "probability": 0.94482421875}, {"start": 2942.76, "end": 2943.14, "word": " value", "probability": 0.9716796875}, {"start": 2943.14, "end": 2943.5, "word": " from", "probability": 0.88818359375}, {"start": 2943.5, "end": 2943.76, "word": " 2", "probability": 0.99609375}, {"start": 2943.76, "end": 2944.26, "word": ".2,", "probability": 0.9990234375}, {"start": 2944.82, "end": 2945.36, "word": " you", "probability": 0.943359375}, {"start": 2945.36, "end": 2945.5, "word": " will", "probability": 0.84619140625}, {"start": 2945.5, "end": 2945.74, "word": " get", "probability": 0.93994140625}, {"start": 2945.74, "end": 2946.02, "word": " 1", "probability": 0.9970703125}, {"start": 2946.02, "end": 2947.1, "word": ".9932.", "probability": 0.9856770833333334}, {"start": 2947.84, "end": 2948.32, "word": " And", "probability": 0.94873046875}, {"start": 2948.32, "end": 2948.64, "word": " add", "probability": 0.88232421875}, {"start": 2948.64, "end": 2948.9, "word": " this", "probability": 0.94482421875}, {"start": 2948.9, "end": 2949.3, "word": " value", "probability": 0.97314453125}, {"start": 2949.3, "end": 2949.52, "word": " to", "probability": 0.9287109375}, {"start": 2949.52, "end": 2949.74, "word": " 2", "probability": 0.9814453125}, {"start": 2949.74, "end": 2950.06, "word": ".2,", "probability": 0.998291015625}, {"start": 2950.12, "end": 2950.18, "word": " you", "probability": 0.8740234375}, {"start": 2950.18, "end": 2950.24, "word": " will", "probability": 0.3095703125}, {"start": 2950.24, "end": 2950.4, "word": " get", "probability": 0.93212890625}, {"start": 2950.4, "end": 2950.64, "word": " this", "probability": 0.9326171875}, {"start": 2950.64, "end": 2950.84, "word": " result.", "probability": 0.30712890625}], "temperature": 1.0}, {"id": 107, "seek": 297945, "start": 2951.93, "end": 2979.45, "text": " So Mu greater than or equal to 1.99 all the way up to 2.4068. So that's the calculations we have. So Mu is between 95 percent. Now let's see our interpretation for this result.", "tokens": [407, 15601, 5044, 813, 420, 2681, 281, 502, 13, 8494, 439, 264, 636, 493, 281, 568, 13, 5254, 27102, 13, 407, 300, 311, 264, 20448, 321, 362, 13, 407, 15601, 307, 1296, 13420, 3043, 13, 823, 718, 311, 536, 527, 14174, 337, 341, 1874, 13], "avg_logprob": -0.20193614065647125, "compression_ratio": 1.2919708029197081, "no_speech_prob": 0.0, "words": [{"start": 2951.93, "end": 2952.27, "word": " So", "probability": 0.73095703125}, {"start": 2952.27, "end": 2952.63, "word": " Mu", "probability": 0.3076171875}, {"start": 2952.63, "end": 2953.37, "word": " greater", "probability": 0.654296875}, {"start": 2953.37, "end": 2953.67, "word": " than", "probability": 0.92529296875}, {"start": 2953.67, "end": 2953.79, "word": " or", "probability": 0.94580078125}, {"start": 2953.79, "end": 2953.99, "word": " equal", "probability": 0.90673828125}, {"start": 2953.99, "end": 2954.13, "word": " to", "probability": 0.5322265625}, {"start": 2954.13, "end": 2954.31, "word": " 1", "probability": 0.9462890625}, {"start": 2954.31, "end": 2954.85, "word": ".99", "probability": 0.9736328125}, {"start": 2954.85, "end": 2955.69, "word": " all", "probability": 0.63037109375}, {"start": 2955.69, "end": 2955.89, "word": " the", "probability": 0.916015625}, {"start": 2955.89, "end": 2956.09, "word": " way", "probability": 0.9501953125}, {"start": 2956.09, "end": 2956.31, "word": " up", "probability": 0.92822265625}, {"start": 2956.31, "end": 2956.55, "word": " to", "probability": 0.9560546875}, {"start": 2956.55, "end": 2956.77, "word": " 2", "probability": 0.9765625}, {"start": 2956.77, "end": 2959.65, "word": ".4068.", "probability": 0.91552734375}, {"start": 2963.95, "end": 2964.63, "word": " So", "probability": 0.9130859375}, {"start": 2964.63, "end": 2964.95, "word": " that's", "probability": 0.896240234375}, {"start": 2964.95, "end": 2965.07, "word": " the", "probability": 0.90185546875}, {"start": 2965.07, "end": 2965.63, "word": " calculations", "probability": 0.81591796875}, {"start": 2965.63, "end": 2965.93, "word": " we", "probability": 0.93505859375}, {"start": 2965.93, "end": 2966.15, "word": " have.", "probability": 0.923828125}, {"start": 2966.53, "end": 2966.79, "word": " So", "probability": 0.93798828125}, {"start": 2966.79, "end": 2967.11, "word": " Mu", "probability": 0.9130859375}, {"start": 2967.11, "end": 2969.01, "word": " is", "probability": 0.935546875}, {"start": 2969.01, "end": 2969.55, "word": " between", "probability": 0.8984375}, {"start": 2969.55, "end": 2970.95, "word": " 95", "probability": 0.6611328125}, {"start": 2970.95, "end": 2971.39, "word": " percent.", "probability": 0.2132568359375}, {"start": 2974.83, "end": 2975.51, "word": " Now", "probability": 0.91064453125}, {"start": 2975.51, "end": 2975.93, "word": " let's", "probability": 0.851318359375}, {"start": 2975.93, "end": 2976.25, "word": " see", "probability": 0.91943359375}, {"start": 2976.25, "end": 2977.81, "word": " our", "probability": 0.888671875}, {"start": 2977.81, "end": 2978.33, "word": " interpretation", "probability": 0.91552734375}, {"start": 2978.33, "end": 2978.87, "word": " for", "probability": 0.93310546875}, {"start": 2978.87, "end": 2979.11, "word": " this", "probability": 0.94140625}, {"start": 2979.11, "end": 2979.45, "word": " result.", "probability": 0.9365234375}], "temperature": 1.0}, {"id": 108, "seek": 300656, "start": 2980.68, "end": 3006.56, "text": " So again, straightforward calculations will give this result. Now, the interpretation, you should write the following. We are 95% confident that the true mean resistance is between these two values. Just saying, we are 95% sure that", "tokens": [407, 797, 11, 15325, 20448, 486, 976, 341, 1874, 13, 823, 11, 264, 14174, 11, 291, 820, 2464, 264, 3480, 13, 492, 366, 13420, 4, 6679, 300, 264, 2074, 914, 7335, 307, 1296, 613, 732, 4190, 13, 1449, 1566, 11, 321, 366, 13420, 4, 988, 300], "avg_logprob": -0.19281915400890595, "compression_ratio": 1.4036144578313252, "no_speech_prob": 0.0, "words": [{"start": 2980.68, "end": 2980.96, "word": " So", "probability": 0.744140625}, {"start": 2980.96, "end": 2981.26, "word": " again,", "probability": 0.828125}, {"start": 2981.42, "end": 2981.92, "word": " straightforward", "probability": 0.50634765625}, {"start": 2981.92, "end": 2982.68, "word": " calculations", "probability": 0.8896484375}, {"start": 2982.68, "end": 2982.96, "word": " will", "probability": 0.8564453125}, {"start": 2982.96, "end": 2983.26, "word": " give", "probability": 0.88427734375}, {"start": 2983.26, "end": 2987.38, "word": " this", "probability": 0.84912109375}, {"start": 2987.38, "end": 2987.86, "word": " result.", "probability": 0.9091796875}, {"start": 2989.64, "end": 2990.22, "word": " Now,", "probability": 0.89697265625}, {"start": 2990.34, "end": 2990.52, "word": " the", "probability": 0.60107421875}, {"start": 2990.52, "end": 2990.98, "word": " interpretation,", "probability": 0.9072265625}, {"start": 2991.38, "end": 2991.5, "word": " you", "probability": 0.8876953125}, {"start": 2991.5, "end": 2991.76, "word": " should", "probability": 0.96630859375}, {"start": 2991.76, "end": 2993.2, "word": " write", "probability": 0.91650390625}, {"start": 2993.2, "end": 2993.38, "word": " the", "probability": 0.9140625}, {"start": 2993.38, "end": 2993.64, "word": " following.", "probability": 0.8916015625}, {"start": 2994.14, "end": 2994.34, "word": " We", "probability": 0.79150390625}, {"start": 2994.34, "end": 2994.62, "word": " are", "probability": 0.939453125}, {"start": 2994.62, "end": 2996.5, "word": " 95", "probability": 0.92724609375}, {"start": 2996.5, "end": 2997.0, "word": "%", "probability": 0.916015625}, {"start": 2997.0, "end": 2997.46, "word": " confident", "probability": 0.95361328125}, {"start": 2997.46, "end": 2997.8, "word": " that", "probability": 0.92822265625}, {"start": 2997.8, "end": 2998.0, "word": " the", "probability": 0.89794921875}, {"start": 2998.0, "end": 2998.26, "word": " true", "probability": 0.95556640625}, {"start": 2998.26, "end": 2998.52, "word": " mean", "probability": 0.94189453125}, {"start": 2998.52, "end": 2999.2, "word": " resistance", "probability": 0.93701171875}, {"start": 2999.2, "end": 2999.66, "word": " is", "probability": 0.93505859375}, {"start": 2999.66, "end": 3000.1, "word": " between", "probability": 0.88427734375}, {"start": 3000.1, "end": 3000.56, "word": " these", "probability": 0.8642578125}, {"start": 3000.56, "end": 3000.76, "word": " two", "probability": 0.87060546875}, {"start": 3000.76, "end": 3001.12, "word": " values.", "probability": 0.7978515625}, {"start": 3003.42, "end": 3004.12, "word": " Just", "probability": 0.80029296875}, {"start": 3004.12, "end": 3004.52, "word": " saying,", "probability": 0.86572265625}, {"start": 3004.92, "end": 3005.1, "word": " we", "probability": 0.927734375}, {"start": 3005.1, "end": 3005.36, "word": " are", "probability": 0.94140625}, {"start": 3005.36, "end": 3005.76, "word": " 95", "probability": 0.97802734375}, {"start": 3005.76, "end": 3006.0, "word": "%", "probability": 0.890625}, {"start": 3006.0, "end": 3006.24, "word": " sure", "probability": 0.92919921875}, {"start": 3006.24, "end": 3006.56, "word": " that", "probability": 0.8271484375}], "temperature": 1.0}, {"id": 109, "seek": 303280, "start": 3007.14, "end": 3032.8, "text": " the true mean resistance is between these two values. Although the true mean may or may not be in this interval, but we are 95% of the intervals from form in this manner will contain the true mean. So again, you don't know exactly if the true mean", "tokens": [264, 2074, 914, 7335, 307, 1296, 613, 732, 4190, 13, 5780, 264, 2074, 914, 815, 420, 815, 406, 312, 294, 341, 15035, 11, 457, 321, 366, 13420, 4, 295, 264, 26651, 490, 1254, 294, 341, 9060, 486, 5304, 264, 2074, 914, 13, 407, 797, 11, 291, 500, 380, 458, 2293, 498, 264, 2074, 914], "avg_logprob": -0.1961647705598311, "compression_ratio": 1.5897435897435896, "no_speech_prob": 0.0, "words": [{"start": 3007.14, "end": 3007.38, "word": " the", "probability": 0.1983642578125}, {"start": 3007.38, "end": 3007.68, "word": " true", "probability": 0.8876953125}, {"start": 3007.68, "end": 3007.94, "word": " mean", "probability": 0.84619140625}, {"start": 3007.94, "end": 3008.56, "word": " resistance", "probability": 0.86376953125}, {"start": 3008.56, "end": 3009.32, "word": " is", "probability": 0.88037109375}, {"start": 3009.32, "end": 3009.88, "word": " between", "probability": 0.87353515625}, {"start": 3009.88, "end": 3010.26, "word": " these", "probability": 0.86181640625}, {"start": 3010.26, "end": 3010.44, "word": " two", "probability": 0.89892578125}, {"start": 3010.44, "end": 3010.8, "word": " values.", "probability": 0.828125}, {"start": 3011.28, "end": 3011.92, "word": " Although", "probability": 0.89794921875}, {"start": 3011.92, "end": 3013.3, "word": " the", "probability": 0.681640625}, {"start": 3013.3, "end": 3013.58, "word": " true", "probability": 0.97705078125}, {"start": 3013.58, "end": 3013.9, "word": " mean", "probability": 0.95751953125}, {"start": 3013.9, "end": 3014.34, "word": " may", "probability": 0.91943359375}, {"start": 3014.34, "end": 3014.6, "word": " or", "probability": 0.8544921875}, {"start": 3014.6, "end": 3015.04, "word": " may", "probability": 0.943359375}, {"start": 3015.04, "end": 3015.48, "word": " not", "probability": 0.92333984375}, {"start": 3015.48, "end": 3015.82, "word": " be", "probability": 0.951171875}, {"start": 3015.82, "end": 3016.04, "word": " in", "probability": 0.73388671875}, {"start": 3016.04, "end": 3016.26, "word": " this", "probability": 0.93798828125}, {"start": 3016.26, "end": 3016.72, "word": " interval,", "probability": 0.95166015625}, {"start": 3017.64, "end": 3018.02, "word": " but", "probability": 0.90673828125}, {"start": 3018.02, "end": 3018.82, "word": " we", "probability": 0.6591796875}, {"start": 3018.82, "end": 3019.12, "word": " are", "probability": 0.59423828125}, {"start": 3019.12, "end": 3019.62, "word": " 95", "probability": 0.87744140625}, {"start": 3019.62, "end": 3020.52, "word": "%", "probability": 0.7802734375}, {"start": 3020.52, "end": 3020.84, "word": " of", "probability": 0.92578125}, {"start": 3020.84, "end": 3021.0, "word": " the", "probability": 0.92529296875}, {"start": 3021.0, "end": 3021.62, "word": " intervals", "probability": 0.85888671875}, {"start": 3021.62, "end": 3023.66, "word": " from", "probability": 0.65869140625}, {"start": 3023.66, "end": 3024.26, "word": " form", "probability": 0.54052734375}, {"start": 3024.26, "end": 3024.66, "word": " in", "probability": 0.814453125}, {"start": 3024.66, "end": 3024.9, "word": " this", "probability": 0.94873046875}, {"start": 3024.9, "end": 3025.2, "word": " manner", "probability": 0.73974609375}, {"start": 3025.2, "end": 3025.46, "word": " will", "probability": 0.80078125}, {"start": 3025.46, "end": 3026.0, "word": " contain", "probability": 0.91015625}, {"start": 3026.0, "end": 3026.98, "word": " the", "probability": 0.8583984375}, {"start": 3026.98, "end": 3027.2, "word": " true", "probability": 0.85302734375}, {"start": 3027.2, "end": 3027.44, "word": " mean.", "probability": 0.69287109375}, {"start": 3028.3, "end": 3029.06, "word": " So", "probability": 0.92724609375}, {"start": 3029.06, "end": 3029.38, "word": " again,", "probability": 0.8134765625}, {"start": 3029.52, "end": 3029.66, "word": " you", "probability": 0.95263671875}, {"start": 3029.66, "end": 3029.96, "word": " don't", "probability": 0.97412109375}, {"start": 3029.96, "end": 3030.38, "word": " know", "probability": 0.89501953125}, {"start": 3030.38, "end": 3030.98, "word": " exactly", "probability": 0.8818359375}, {"start": 3030.98, "end": 3032.0, "word": " if", "probability": 0.9228515625}, {"start": 3032.0, "end": 3032.22, "word": " the", "probability": 0.92138671875}, {"start": 3032.22, "end": 3032.46, "word": " true", "probability": 0.9775390625}, {"start": 3032.46, "end": 3032.8, "word": " mean", "probability": 0.9638671875}], "temperature": 1.0}, {"id": 110, "seek": 306238, "start": 3033.36, "end": 3062.38, "text": " lies in the interval, but you could say that 95% of the intervals formed in this way will contain the true mean. So that's all for confidence estimation for the population mean immune. Any questions? Later, next time, inshallah, we'll talk about", "tokens": [9134, 294, 264, 15035, 11, 457, 291, 727, 584, 300, 13420, 4, 295, 264, 26651, 8693, 294, 341, 636, 486, 5304, 264, 2074, 914, 13, 407, 300, 311, 439, 337, 6687, 35701, 337, 264, 4415, 914, 11992, 13, 2639, 1651, 30, 11965, 11, 958, 565, 11, 1028, 71, 13492, 11, 321, 603, 751, 466], "avg_logprob": -0.19715908494862644, "compression_ratio": 1.4470588235294117, "no_speech_prob": 0.0, "words": [{"start": 3033.36, "end": 3033.96, "word": " lies", "probability": 0.52685546875}, {"start": 3033.96, "end": 3034.18, "word": " in", "probability": 0.935546875}, {"start": 3034.18, "end": 3034.32, "word": " the", "probability": 0.89306640625}, {"start": 3034.32, "end": 3034.68, "word": " interval,", "probability": 0.8955078125}, {"start": 3035.08, "end": 3035.32, "word": " but", "probability": 0.9169921875}, {"start": 3035.32, "end": 3035.5, "word": " you", "probability": 0.7890625}, {"start": 3035.5, "end": 3035.72, "word": " could", "probability": 0.85888671875}, {"start": 3035.72, "end": 3035.98, "word": " say", "probability": 0.89697265625}, {"start": 3035.98, "end": 3036.28, "word": " that", "probability": 0.8701171875}, {"start": 3036.28, "end": 3037.0, "word": " 95", "probability": 0.921875}, {"start": 3037.0, "end": 3037.52, "word": "%", "probability": 0.78515625}, {"start": 3037.52, "end": 3037.82, "word": " of", "probability": 0.9658203125}, {"start": 3037.82, "end": 3037.98, "word": " the", "probability": 0.904296875}, {"start": 3037.98, "end": 3038.38, "word": " intervals", "probability": 0.84912109375}, {"start": 3038.38, "end": 3039.06, "word": " formed", "probability": 0.85546875}, {"start": 3039.06, "end": 3039.96, "word": " in", "probability": 0.92919921875}, {"start": 3039.96, "end": 3040.24, "word": " this", "probability": 0.94970703125}, {"start": 3040.24, "end": 3040.56, "word": " way", "probability": 0.91943359375}, {"start": 3040.56, "end": 3041.48, "word": " will", "probability": 0.8271484375}, {"start": 3041.48, "end": 3042.1, "word": " contain", "probability": 0.9189453125}, {"start": 3042.1, "end": 3043.12, "word": " the", "probability": 0.58154296875}, {"start": 3043.12, "end": 3043.36, "word": " true", "probability": 0.923828125}, {"start": 3043.36, "end": 3043.58, "word": " mean.", "probability": 0.45654296875}, {"start": 3045.18, "end": 3045.94, "word": " So", "probability": 0.888671875}, {"start": 3045.94, "end": 3046.42, "word": " that's", "probability": 0.890380859375}, {"start": 3046.42, "end": 3046.84, "word": " all", "probability": 0.95166015625}, {"start": 3046.84, "end": 3047.56, "word": " for", "probability": 0.94775390625}, {"start": 3047.56, "end": 3048.8, "word": " confidence", "probability": 0.96533203125}, {"start": 3048.8, "end": 3049.82, "word": " estimation", "probability": 0.95849609375}, {"start": 3049.82, "end": 3050.78, "word": " for", "probability": 0.87890625}, {"start": 3050.78, "end": 3050.94, "word": " the", "probability": 0.92529296875}, {"start": 3050.94, "end": 3051.38, "word": " population", "probability": 0.96875}, {"start": 3051.38, "end": 3051.86, "word": " mean", "probability": 0.8271484375}, {"start": 3051.86, "end": 3052.88, "word": " immune.", "probability": 0.8173828125}, {"start": 3054.36, "end": 3054.56, "word": " Any", "probability": 0.8818359375}, {"start": 3054.56, "end": 3054.84, "word": " questions?", "probability": 0.51611328125}, {"start": 3059.64, "end": 3060.3, "word": " Later,", "probability": 0.73828125}, {"start": 3060.54, "end": 3060.7, "word": " next", "probability": 0.93798828125}, {"start": 3060.7, "end": 3060.96, "word": " time,", "probability": 0.8818359375}, {"start": 3061.1, "end": 3061.24, "word": " inshallah,", "probability": 0.6663411458333334}, {"start": 3061.28, "end": 3061.62, "word": " we'll", "probability": 0.8642578125}, {"start": 3061.62, "end": 3061.98, "word": " talk", "probability": 0.8955078125}, {"start": 3061.98, "end": 3062.38, "word": " about", "probability": 0.90478515625}], "temperature": 1.0}, {"id": 111, "seek": 308984, "start": 3064.16, "end": 3089.84, "text": " The confidence interval when sigma is unknown. I mean, do you ever truly know sigma? May not. In virtually all real world business situations, sigma is not known. If there is a situation,", "tokens": [440, 6687, 15035, 562, 12771, 307, 9841, 13, 286, 914, 11, 360, 291, 1562, 4908, 458, 12771, 30, 1891, 406, 13, 682, 14103, 439, 957, 1002, 1606, 6851, 11, 12771, 307, 406, 2570, 13, 759, 456, 307, 257, 2590, 11], "avg_logprob": -0.19855182345320538, "compression_ratio": 1.413533834586466, "no_speech_prob": 0.0, "words": [{"start": 3064.16, "end": 3064.52, "word": " The", "probability": 0.595703125}, {"start": 3064.52, "end": 3065.02, "word": " confidence", "probability": 0.9384765625}, {"start": 3065.02, "end": 3065.62, "word": " interval", "probability": 0.96142578125}, {"start": 3065.62, "end": 3067.48, "word": " when", "probability": 0.4736328125}, {"start": 3067.48, "end": 3067.86, "word": " sigma", "probability": 0.345703125}, {"start": 3067.86, "end": 3068.28, "word": " is", "probability": 0.94384765625}, {"start": 3068.28, "end": 3068.64, "word": " unknown.", "probability": 0.89990234375}, {"start": 3069.98, "end": 3070.18, "word": " I", "probability": 0.90771484375}, {"start": 3070.18, "end": 3070.5, "word": " mean,", "probability": 0.966796875}, {"start": 3073.74, "end": 3074.04, "word": " do", "probability": 0.8671875}, {"start": 3074.04, "end": 3074.2, "word": " you", "probability": 0.9716796875}, {"start": 3074.2, "end": 3074.58, "word": " ever", "probability": 0.88818359375}, {"start": 3074.58, "end": 3075.26, "word": " truly", "probability": 0.89404296875}, {"start": 3075.26, "end": 3075.94, "word": " know", "probability": 0.87841796875}, {"start": 3075.94, "end": 3076.4, "word": " sigma?", "probability": 0.86572265625}, {"start": 3077.42, "end": 3077.6, "word": " May", "probability": 0.55908203125}, {"start": 3077.6, "end": 3077.84, "word": " not.", "probability": 0.94091796875}, {"start": 3079.2, "end": 3079.4, "word": " In", "probability": 0.91796875}, {"start": 3079.4, "end": 3079.84, "word": " virtually", "probability": 0.8564453125}, {"start": 3079.84, "end": 3080.5, "word": " all", "probability": 0.91845703125}, {"start": 3080.5, "end": 3080.84, "word": " real", "probability": 0.94775390625}, {"start": 3080.84, "end": 3081.22, "word": " world", "probability": 0.69189453125}, {"start": 3081.22, "end": 3081.8, "word": " business", "probability": 0.88525390625}, {"start": 3081.8, "end": 3083.84, "word": " situations,", "probability": 0.7734375}, {"start": 3084.44, "end": 3084.9, "word": " sigma", "probability": 0.91064453125}, {"start": 3084.9, "end": 3085.96, "word": " is", "probability": 0.94873046875}, {"start": 3085.96, "end": 3086.2, "word": " not", "probability": 0.95068359375}, {"start": 3086.2, "end": 3086.56, "word": " known.", "probability": 0.7353515625}, {"start": 3088.04, "end": 3088.76, "word": " If", "probability": 0.96484375}, {"start": 3088.76, "end": 3088.98, "word": " there", "probability": 0.91259765625}, {"start": 3088.98, "end": 3089.12, "word": " is", "probability": 0.9326171875}, {"start": 3089.12, "end": 3089.24, "word": " a", "probability": 0.9921875}, {"start": 3089.24, "end": 3089.84, "word": " situation,", "probability": 0.9365234375}], "temperature": 1.0}, {"id": 112, "seek": 312056, "start": 3091.46, "end": 3120.56, "text": " Where Sigma is known, then Mu is also known. Because since to calculate Mu, you need to know Mu. In order to calculate Sigma, you should know the value of Mu. Finally, if you truly know Mu, if you know the value of Mu, there would be no need to gather a sample to estimate.", "tokens": [2305, 36595, 307, 2570, 11, 550, 15601, 307, 611, 2570, 13, 1436, 1670, 281, 8873, 15601, 11, 291, 643, 281, 458, 15601, 13, 682, 1668, 281, 8873, 36595, 11, 291, 820, 458, 264, 2158, 295, 15601, 13, 6288, 11, 498, 291, 4908, 458, 15601, 11, 498, 291, 458, 264, 2158, 295, 15601, 11, 456, 576, 312, 572, 643, 281, 5448, 257, 6889, 281, 12539, 13], "avg_logprob": -0.18217330064737436, "compression_ratio": 1.6809815950920246, "no_speech_prob": 0.0, "words": [{"start": 3091.46, "end": 3091.8, "word": " Where", "probability": 0.329345703125}, {"start": 3091.8, "end": 3092.12, "word": " Sigma", "probability": 0.339599609375}, {"start": 3092.12, "end": 3092.38, "word": " is", "probability": 0.9404296875}, {"start": 3092.38, "end": 3092.64, "word": " known,", "probability": 0.71533203125}, {"start": 3093.32, "end": 3093.5, "word": " then", "probability": 0.8642578125}, {"start": 3093.5, "end": 3093.76, "word": " Mu", "probability": 0.89501953125}, {"start": 3093.76, "end": 3093.98, "word": " is", "probability": 0.9443359375}, {"start": 3093.98, "end": 3094.36, "word": " also", "probability": 0.8671875}, {"start": 3094.36, "end": 3094.64, "word": " known.", "probability": 0.7119140625}, {"start": 3095.2, "end": 3095.62, "word": " Because", "probability": 0.779296875}, {"start": 3095.62, "end": 3097.36, "word": " since", "probability": 0.455322265625}, {"start": 3097.36, "end": 3097.54, "word": " to", "probability": 0.8115234375}, {"start": 3097.54, "end": 3097.94, "word": " calculate", "probability": 0.9033203125}, {"start": 3097.94, "end": 3098.32, "word": " Mu,", "probability": 0.818359375}, {"start": 3098.62, "end": 3098.8, "word": " you", "probability": 0.94482421875}, {"start": 3098.8, "end": 3099.06, "word": " need", "probability": 0.89208984375}, {"start": 3099.06, "end": 3099.26, "word": " to", "probability": 0.9638671875}, {"start": 3099.26, "end": 3099.5, "word": " know", "probability": 0.88818359375}, {"start": 3099.5, "end": 3099.78, "word": " Mu.", "probability": 0.97021484375}, {"start": 3100.4, "end": 3100.84, "word": " In", "probability": 0.91015625}, {"start": 3100.84, "end": 3101.02, "word": " order", "probability": 0.92041015625}, {"start": 3101.02, "end": 3101.28, "word": " to", "probability": 0.97119140625}, {"start": 3101.28, "end": 3101.84, "word": " calculate", "probability": 0.90283203125}, {"start": 3101.84, "end": 3102.3, "word": " Sigma,", "probability": 0.85693359375}, {"start": 3103.18, "end": 3103.54, "word": " you", "probability": 0.955078125}, {"start": 3103.54, "end": 3103.84, "word": " should", "probability": 0.9501953125}, {"start": 3103.84, "end": 3104.62, "word": " know", "probability": 0.88330078125}, {"start": 3104.62, "end": 3104.86, "word": " the", "probability": 0.90869140625}, {"start": 3104.86, "end": 3105.14, "word": " value", "probability": 0.97509765625}, {"start": 3105.14, "end": 3105.3, "word": " of", "probability": 0.97216796875}, {"start": 3105.3, "end": 3105.56, "word": " Mu.", "probability": 0.7578125}, {"start": 3108.6, "end": 3109.2, "word": " Finally,", "probability": 0.6845703125}, {"start": 3109.5, "end": 3109.86, "word": " if", "probability": 0.9443359375}, {"start": 3109.86, "end": 3110.02, "word": " you", "probability": 0.96533203125}, {"start": 3110.02, "end": 3110.66, "word": " truly", "probability": 0.85009765625}, {"start": 3110.66, "end": 3110.9, "word": " know", "probability": 0.8798828125}, {"start": 3110.9, "end": 3111.2, "word": " Mu,", "probability": 0.98193359375}, {"start": 3112.0, "end": 3112.22, "word": " if", "probability": 0.8720703125}, {"start": 3112.22, "end": 3112.34, "word": " you", "probability": 0.96240234375}, {"start": 3112.34, "end": 3112.46, "word": " know", "probability": 0.884765625}, {"start": 3112.46, "end": 3112.64, "word": " the", "probability": 0.91259765625}, {"start": 3112.64, "end": 3112.86, "word": " value", "probability": 0.97802734375}, {"start": 3112.86, "end": 3113.04, "word": " of", "probability": 0.96337890625}, {"start": 3113.04, "end": 3113.22, "word": " Mu,", "probability": 0.966796875}, {"start": 3113.8, "end": 3114.52, "word": " there", "probability": 0.7705078125}, {"start": 3114.52, "end": 3114.84, "word": " would", "probability": 0.8330078125}, {"start": 3114.84, "end": 3115.06, "word": " be", "probability": 0.95068359375}, {"start": 3115.06, "end": 3115.3, "word": " no", "probability": 0.95166015625}, {"start": 3115.3, "end": 3115.7, "word": " need", "probability": 0.93115234375}, {"start": 3115.7, "end": 3118.7, "word": " to", "probability": 0.9423828125}, {"start": 3118.7, "end": 3118.98, "word": " gather", "probability": 0.81005859375}, {"start": 3118.98, "end": 3119.68, "word": " a", "probability": 0.9541015625}, {"start": 3119.68, "end": 3119.98, "word": " sample", "probability": 0.8671875}, {"start": 3119.98, "end": 3120.26, "word": " to", "probability": 0.896484375}, {"start": 3120.26, "end": 3120.56, "word": " estimate.", "probability": 0.70947265625}], "temperature": 1.0}, {"id": 113, "seek": 315287, "start": 3124.05, "end": 3152.87, "text": " The value of the mean is given. You have to stop because you don't need to select a random sample in order to estimate the population mean. And again, in real life, sigma is unknown. So next time, we'll talk about confidence interval for mu when sigma is unknown. So that's all.", "tokens": [440, 2158, 295, 264, 914, 307, 2212, 13, 509, 362, 281, 1590, 570, 291, 500, 380, 643, 281, 3048, 257, 4974, 6889, 294, 1668, 281, 12539, 264, 4415, 914, 13, 400, 797, 11, 294, 957, 993, 11, 12771, 307, 9841, 13, 407, 958, 565, 11, 321, 603, 751, 466, 6687, 15035, 337, 2992, 562, 12771, 307, 9841, 13, 407, 300, 311, 439, 13], "avg_logprob": -0.15686035458929837, "compression_ratio": 1.516304347826087, "no_speech_prob": 0.0, "words": [{"start": 3124.05, "end": 3124.25, "word": " The", "probability": 0.341552734375}, {"start": 3124.25, "end": 3124.61, "word": " value", "probability": 0.94189453125}, {"start": 3124.61, "end": 3124.85, "word": " of", "probability": 0.96044921875}, {"start": 3124.85, "end": 3124.99, "word": " the", "probability": 0.8525390625}, {"start": 3124.99, "end": 3125.13, "word": " mean", "probability": 0.93798828125}, {"start": 3125.13, "end": 3125.29, "word": " is", "probability": 0.93115234375}, {"start": 3125.29, "end": 3125.55, "word": " given.", "probability": 0.8271484375}, {"start": 3126.91, "end": 3127.03, "word": " You", "probability": 0.85888671875}, {"start": 3127.03, "end": 3127.19, "word": " have", "probability": 0.91357421875}, {"start": 3127.19, "end": 3127.33, "word": " to", "probability": 0.97021484375}, {"start": 3127.33, "end": 3127.61, "word": " stop", "probability": 0.9560546875}, {"start": 3127.61, "end": 3127.93, "word": " because", "probability": 0.55908203125}, {"start": 3127.93, "end": 3128.07, "word": " you", "probability": 0.955078125}, {"start": 3128.07, "end": 3128.29, "word": " don't", "probability": 0.951171875}, {"start": 3128.29, "end": 3128.53, "word": " need", "probability": 0.90234375}, {"start": 3128.53, "end": 3128.89, "word": " to", "probability": 0.96533203125}, {"start": 3128.89, "end": 3130.81, "word": " select", "probability": 0.73828125}, {"start": 3130.81, "end": 3130.99, "word": " a", "probability": 0.94287109375}, {"start": 3130.99, "end": 3131.17, "word": " random", "probability": 0.82568359375}, {"start": 3131.17, "end": 3131.53, "word": " sample", "probability": 0.75439453125}, {"start": 3131.53, "end": 3131.85, "word": " in", "probability": 0.8935546875}, {"start": 3131.85, "end": 3132.05, "word": " order", "probability": 0.92626953125}, {"start": 3132.05, "end": 3132.71, "word": " to", "probability": 0.9658203125}, {"start": 3132.71, "end": 3134.17, "word": " estimate", "probability": 0.90966796875}, {"start": 3134.17, "end": 3134.95, "word": " the", "probability": 0.90771484375}, {"start": 3134.95, "end": 3135.37, "word": " population", "probability": 0.7607421875}, {"start": 3135.37, "end": 3135.71, "word": " mean.", "probability": 0.939453125}, {"start": 3137.37, "end": 3137.69, "word": " And", "probability": 0.89892578125}, {"start": 3137.69, "end": 3138.03, "word": " again,", "probability": 0.8896484375}, {"start": 3138.31, "end": 3138.41, "word": " in", "probability": 0.94384765625}, {"start": 3138.41, "end": 3138.67, "word": " real", "probability": 0.95947265625}, {"start": 3138.67, "end": 3139.09, "word": " life,", "probability": 0.90771484375}, {"start": 3139.95, "end": 3140.23, "word": " sigma", "probability": 0.587890625}, {"start": 3140.23, "end": 3140.77, "word": " is", "probability": 0.93896484375}, {"start": 3140.77, "end": 3141.77, "word": " unknown.", "probability": 0.83984375}, {"start": 3142.69, "end": 3142.99, "word": " So", "probability": 0.89208984375}, {"start": 3142.99, "end": 3143.55, "word": " next", "probability": 0.7548828125}, {"start": 3143.55, "end": 3144.03, "word": " time,", "probability": 0.89111328125}, {"start": 3144.69, "end": 3145.37, "word": " we'll", "probability": 0.923095703125}, {"start": 3145.37, "end": 3145.57, "word": " talk", "probability": 0.89892578125}, {"start": 3145.57, "end": 3145.89, "word": " about", "probability": 0.90087890625}, {"start": 3145.89, "end": 3146.37, "word": " confidence", "probability": 0.9453125}, {"start": 3146.37, "end": 3146.85, "word": " interval", "probability": 0.76123046875}, {"start": 3146.85, "end": 3147.89, "word": " for", "probability": 0.83935546875}, {"start": 3147.89, "end": 3148.19, "word": " mu", "probability": 0.51513671875}, {"start": 3148.19, "end": 3149.15, "word": " when", "probability": 0.6982421875}, {"start": 3149.15, "end": 3149.59, "word": " sigma", "probability": 0.94091796875}, {"start": 3149.59, "end": 3150.51, "word": " is", "probability": 0.95068359375}, {"start": 3150.51, "end": 3150.83, "word": " unknown.", "probability": 0.89453125}, {"start": 3151.95, "end": 3152.17, "word": " So", "probability": 0.9462890625}, {"start": 3152.17, "end": 3152.53, "word": " that's", "probability": 0.958251953125}, {"start": 3152.53, "end": 3152.87, "word": " all.", "probability": 0.94677734375}], "temperature": 1.0}, {"id": 114, "seek": 315450, "start": 3153.9, "end": 3154.5, "text": " Fourth day.", "tokens": [23773, 786, 13], "avg_logprob": -0.4345702975988388, "compression_ratio": 0.6, "no_speech_prob": 0.0, "words": [{"start": 3153.9, "end": 3154.3, "word": " Fourth", "probability": 0.282958984375}, {"start": 3154.3, "end": 3154.5, "word": " day.", "probability": 0.89013671875}], "temperature": 1.0}], "language": "en", "language_probability": 1.0, "duration": 3156.00975, "duration_after_vad": 2979.421874999985} \ No newline at end of file diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/_tPr3lwdN_Q_postprocess.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/_tPr3lwdN_Q_postprocess.srt new file mode 100644 index 0000000000000000000000000000000000000000..08baa28311b3c0b1376cecbc17528b8871ec1b0c --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/_tPr3lwdN_Q_postprocess.srt @@ -0,0 +1,1380 @@ +1 +00:00:14,040 --> 00:00:18,360 +Let's start chapter 11. Here we'll talk about some + +2 +00:00:18,360 --> 00:00:19,960 +of Cai's characters. + +3 +00:00:22,920 --> 00:00:27,180 +If you remember the first time we met, we + +4 +00:00:27,180 --> 00:00:30,470 +discussed that there are two types of data. One + +5 +00:00:30,470 --> 00:00:36,270 +was numerical data and other categorical data. In + +6 +00:00:36,270 --> 00:00:42,130 +chapter eight, we discussed testing and estimation + +7 +00:00:42,130 --> 00:00:46,650 +about one, either population mean, if the data is + +8 +00:00:46,650 --> 00:00:52,810 +numeric, or population proportion if the data is + +9 +00:00:52,810 --> 00:00:56,910 +not numeric, I mean categorical data. In chapter + +10 +00:00:56,910 --> 00:01:00,590 +nine and ten, I'm sorry, in chapter 10 we extend + +11 +00:01:00,590 --> 00:01:05,830 +for two population means and two population + +12 +00:01:05,830 --> 00:01:09,990 +proportions. Now this chapter talks about testing + +13 +00:01:09,990 --> 00:01:13,810 +about the difference between two population means + +14 +00:01:13,810 --> 00:01:17,790 +and two population proportions or comparing more + +15 +00:01:17,790 --> 00:01:20,650 +than two population proportions. So we are going + +16 +00:01:20,650 --> 00:01:23,690 +to test to see if there exists a difference + +17 +00:01:23,690 --> 00:01:29,240 +between two population proportions or comparison + +18 +00:01:29,240 --> 00:01:33,160 +among more than two population proportions. And + +19 +00:01:33,160 --> 00:01:37,240 +the second objective also will be test the + +20 +00:01:37,240 --> 00:01:41,180 +independent of two categorical variables. For + +21 +00:01:41,180 --> 00:01:43,820 +example, suppose we are interested in the + +22 +00:01:43,820 --> 00:01:47,970 +relationship between gender and education. And our + +23 +00:01:47,970 --> 00:01:51,050 +goal is to see if there exists a significant + +24 +00:01:51,050 --> 00:01:55,210 +relationship between education and gender, for + +25 +00:01:55,210 --> 00:01:59,190 +example, or if we are interested in health status + +26 +00:01:59,190 --> 00:02:03,450 +and smoking. Smoking, either the person is heavy + +27 +00:02:03,450 --> 00:02:08,270 +smoker or not heavy smoker. And health status + +28 +00:02:08,270 --> 00:02:10,370 +could be classified as bad, + +29 +00:02:13,250 --> 00:02:20,540 +medium, or good. So we are going to test to see if + +30 +00:02:20,540 --> 00:02:22,960 +there exists a difference or relationship between + +31 +00:02:22,960 --> 00:02:26,300 +two variables. So here we are interested in + +32 +00:02:26,300 --> 00:02:30,900 +qualitative data, either we have two or more than + +33 +00:02:30,900 --> 00:02:35,560 +two variables. So the objectives in this chapter + +34 +00:02:35,560 --> 00:02:39,860 +are when to use the chi-square test for + +35 +00:02:39,860 --> 00:02:44,130 +contingency tables. And the other, how to use the + +36 +00:02:44,130 --> 00:02:47,490 +chi-square test for contingency plots. So here we + +37 +00:02:47,490 --> 00:02:52,910 +are going to see what are the conditions. And what + +38 +00:02:52,910 --> 00:02:56,550 +are the conditions we can see, we can use the chi + +39 +00:02:56,550 --> 00:03:00,850 +-square test. So the condition is, we should have + +40 +00:03:00,850 --> 00:03:05,170 +qualitative data. So in this case, we'll talk + +41 +00:03:05,170 --> 00:03:05,890 +about just + +42 +00:03:09,330 --> 00:03:14,950 +gender, health status, education, income levels, + +43 +00:03:15,190 --> 00:03:18,450 +and so on. So we are not talking about numerical + +44 +00:03:18,450 --> 00:03:22,950 +data. So we are focusing on non-numerical data. I + +45 +00:03:22,950 --> 00:03:28,250 +mean on categorical data. Let's see the definition + +46 +00:03:28,250 --> 00:03:31,050 +of contingency table. For example, as I mentioned, + +47 +00:03:31,250 --> 00:03:37,090 +suppose we are interested in gender and education. + +48 +00:03:38,500 --> 00:03:42,660 +And for example, suppose gender is classified, + +49 +00:03:43,440 --> 00:03:50,180 +normal is classified as female and male. Education + +50 +00:03:50,180 --> 00:03:57,920 +could be classified as either secondary or + +51 +00:03:57,920 --> 00:04:07,100 +less, bachelor, master, PhD. So here we are + +52 +00:04:07,100 --> 00:04:11,920 +talking about a relationship between gender and + +53 +00:04:11,920 --> 00:04:18,200 +education. Now in this case, we have this table. + +54 +00:04:23,360 --> 00:04:28,360 +For example, one is gender. And gender is either + +55 +00:04:28,360 --> 00:04:31,360 +female or male. And the other is education. + +56 +00:04:34,080 --> 00:04:37,140 +And education in this case is classified into four + +57 +00:04:37,140 --> 00:04:40,140 +categories, secondary or less, + +58 +00:04:43,360 --> 00:04:49,820 +BA, master and PhD. This table is called + +59 +00:04:49,820 --> 00:04:57,700 +contingency table. It's two by four table. Two + +60 +00:04:57,700 --> 00:04:58,840 +because there are two rows. + +61 +00:05:07,220 --> 00:05:09,800 +In this case, there are two rules and two codes. + +62 +00:05:10,540 --> 00:05:13,640 +For example, gender, + +63 +00:05:16,260 --> 00:05:20,720 +male or female, and the other one, suppose, + +64 +00:05:21,100 --> 00:05:21,700 +smoking. + +65 +00:05:24,120 --> 00:05:29,490 +Either yes or no. Smoking or not smoking. So in + +66 +00:05:29,490 --> 00:05:32,330 +this case there are two rows and two columns. And + +67 +00:05:32,330 --> 00:05:35,910 +the goal here is we want to test to see if there + +68 +00:05:35,910 --> 00:05:38,750 +exists significant relationship between gender and + +69 +00:05:38,750 --> 00:05:42,090 +smoking. So the two variables of interest in this + +70 +00:05:42,090 --> 00:05:46,550 +case are categorical variables. So how can we test + +71 +00:05:46,550 --> 00:05:50,790 +to see if there exists a significant difference + +72 +00:05:50,790 --> 00:05:55,550 +between the two proportions or to see if smoking + +73 +00:05:55,550 --> 00:06:04,410 +and Gender are independent. So our goal is to see + +74 +00:06:04,410 --> 00:06:07,190 +if they are independent or not. It means if we + +75 +00:06:07,190 --> 00:06:10,990 +reject the null hypothesis of independence, it + +76 +00:06:10,990 --> 00:06:17,160 +means they are related. So again, a contingency + +77 +00:06:17,160 --> 00:06:19,920 +table, tables in this case useful in situations + +78 +00:06:19,920 --> 00:06:23,920 +involving multiple population proportions, so we + +79 +00:06:23,920 --> 00:06:26,820 +have more than two, or even two population + +80 +00:06:26,820 --> 00:06:30,640 +proportions, used to classify sample observations + +81 +00:06:30,640 --> 00:06:34,580 +according to two or more characteristics. In this + +82 +00:06:34,580 --> 00:06:37,760 +case, there are only two characteristics of + +83 +00:06:37,760 --> 00:06:40,140 +interest. One is education, the other is gender. + +84 +00:06:40,740 --> 00:06:44,360 +It could be we have another characteristic. The + +85 +00:06:44,360 --> 00:06:49,520 +other thing is these tables are called cross + +86 +00:06:49,520 --> 00:06:52,800 +-classification tables. Cross because we have + +87 +00:06:52,800 --> 00:06:58,180 +variable A versus variable B. For this reason it's + +88 +00:06:58,180 --> 00:07:04,440 +called cross-classification tables. There is + +89 +00:07:04,440 --> 00:07:07,360 +another example here. Here we are interested in + +90 +00:07:07,360 --> 00:07:14,700 +left-handed versus gender. So, dominant hand left + +91 +00:07:14,700 --> 00:07:22,320 +versus right, so the person either use left or + +92 +00:07:22,320 --> 00:07:27,180 +right hand, gender male or female. So in this case + +93 +00:07:27,180 --> 00:07:30,440 +there are two categories for each variable, so + +94 +00:07:30,440 --> 00:07:35,070 +this type of example is called Two by two table, + +95 +00:07:35,530 --> 00:07:39,310 +because there are two rows, two classifications + +96 +00:07:39,310 --> 00:07:44,410 +here. So hand either left or right, the person + +97 +00:07:44,410 --> 00:07:47,130 +either male or female. So we have two + +98 +00:07:47,130 --> 00:07:49,430 +characteristics for each one, so it's two by two + +99 +00:07:49,430 --> 00:07:53,270 +table. Suppose in this case we are examining a + +100 +00:07:53,270 --> 00:07:58,050 +sample of 300 children. So the sample size is 300, + +101 +00:07:58,510 --> 00:08:04,000 +and we have this result. So gender is classified + +102 +00:08:04,000 --> 00:08:09,200 +as males and females. Hand preference, either left + +103 +00:08:09,200 --> 00:08:15,780 +or right. So in this case, there are 120 females. + +104 +00:08:16,580 --> 00:08:22,840 +Twelve of them are using left hand. So it means + +105 +00:08:22,840 --> 00:08:27,100 +that there are twelve left handers for females. + +106 +00:08:29,310 --> 00:08:34,330 +while for males there are 180 females and 20 of + +107 +00:08:34,330 --> 00:08:40,310 +them left-handers and so again 120 females 12 were + +108 +00:08:40,310 --> 00:08:47,210 +left-handed 180 males and 20 were also left-handed + +109 +00:08:47,210 --> 00:08:51,750 +and now the question is we are going to test see + +110 +00:08:51,750 --> 00:08:56,470 +if the difference between the two proportions are + +111 +00:08:56,470 --> 00:09:01,110 +equal I mean Under zero, we are going to test to + +112 +00:09:01,110 --> 00:09:03,850 +see if pi 1 equals to pi 2. It means the + +113 +00:09:03,850 --> 00:09:09,970 +proportion of females who are left-handed is equal + +114 +00:09:09,970 --> 00:09:14,210 +to the proportion of males who are left-handed. So + +115 +00:09:14,210 --> 00:09:17,710 +it looks similar to the one we did in chapter 10 + +116 +00:09:17,710 --> 00:09:21,870 +when we are talking about testing for the + +117 +00:09:21,870 --> 00:09:23,830 +difference between two population proportions. + +118 +00:09:24,170 --> 00:09:26,890 +It's similar, but here we will use a different + +119 +00:09:26,890 --> 00:09:29,850 +statistic. It's called chi-square test. So we are + +120 +00:09:29,850 --> 00:09:32,850 +going to test if there is no significant + +121 +00:09:32,850 --> 00:09:35,230 +difference between the population proportions for + +122 +00:09:35,230 --> 00:09:39,770 +males and females left-handed against there exists + +123 +00:09:39,770 --> 00:09:43,580 +a difference. In this case, always we have two + +124 +00:09:43,580 --> 00:09:47,560 +-sided test for chi-square. Chi-square never be + +125 +00:09:47,560 --> 00:09:51,080 +negative, chi-square is always positive. So here + +126 +00:09:51,080 --> 00:09:54,820 +we are talking about two-sided test. It means the + +127 +00:09:54,820 --> 00:09:59,740 +two proportions are not the same. Hand preference + +128 +00:09:59,740 --> 00:10:03,120 +is not independent of gender. In other words, we + +129 +00:10:03,120 --> 00:10:06,840 +can say that hand preference is not independent of + +130 +00:10:06,840 --> 00:10:10,200 +gender. So here we can say that hand preference is + +131 +00:10:10,740 --> 00:10:16,280 +It is independent of gender. So it means under H0, + +132 +00:10:16,780 --> 00:10:25,520 +we assume hand preference and gender are + +133 +00:10:25,520 --> 00:10:30,520 +independent or + +134 +00:10:33,540 --> 00:10:37,620 +A proportion of females who are left-handed is + +135 +00:10:37,620 --> 00:10:40,860 +equal to the proportion of males who are left + +136 +00:10:40,860 --> 00:10:43,740 +-handed. It means they are independent. I mean, + +137 +00:10:43,840 --> 00:10:47,520 +hand preference and gender are independent against + +138 +00:10:47,520 --> 00:10:52,420 +either. You may write that the two proportions are + +139 +00:10:52,420 --> 00:10:58,000 +not the same or the two variables are dependent. + +140 +00:10:59,120 --> 00:11:01,780 +So you can say that hand preference + +141 +00:11:05,620 --> 00:11:12,360 +and gender are either you may say that are not + +142 +00:11:12,360 --> 00:11:25,180 +independent or related or dependent so + +143 +00:11:25,180 --> 00:11:30,440 +again not independent means either they are + +144 +00:11:30,440 --> 00:11:39,110 +related or dependent Now, if H0 is true, if we + +145 +00:11:39,110 --> 00:11:42,810 +assume H0 is true, it means the proportion of left + +146 +00:11:42,810 --> 00:11:47,850 +-handed females should be the same as the + +147 +00:11:47,850 --> 00:11:53,430 +proportion of left-handed males. It says that the + +148 +00:11:53,430 --> 00:11:59,110 +proportion is the same as, not equal to. Because + +149 +00:11:59,110 --> 00:12:04,020 +if we reject the null hypothesis, Then we have + +150 +00:12:04,020 --> 00:12:05,980 +sufficient evidence to support the alternative. + +151 +00:12:06,920 --> 00:12:10,060 +But if we don't reject the null, it doesn't imply + +152 +00:12:10,060 --> 00:12:13,740 +that H0 is true. It means there is insufficient + +153 +00:12:13,740 --> 00:12:17,420 +evidence to support the alternative hypothesis. So + +154 +00:12:17,420 --> 00:12:20,500 +it's better to say that the two proportions are + +155 +00:12:20,500 --> 00:12:24,540 +the same. Same does not mean equal. Same means + +156 +00:12:25,400 --> 00:12:29,640 +there exists a small difference, I mean not + +157 +00:12:29,640 --> 00:12:31,300 +significant difference between the two + +158 +00:12:31,300 --> 00:12:34,640 +proportions. So you have to be careful between, + +159 +00:12:35,080 --> 00:12:39,240 +distinguish actually between same and equal. So + +160 +00:12:39,240 --> 00:12:42,700 +same, it doesn't mean exactly they are equal, but + +161 +00:12:42,700 --> 00:12:45,580 +they are roughly equal, or approximately, or they + +162 +00:12:45,580 --> 00:12:49,320 +actually, they are close to each other. Against, + +163 +00:12:49,940 --> 00:12:52,640 +here again, against the two population proportions + +164 +00:12:52,640 --> 00:12:57,800 +are not the same. So let's see how can we examine + +165 +00:12:57,800 --> 00:13:01,420 +this null hypothesis by using a new statistic. + +166 +00:13:01,980 --> 00:13:06,700 +This statistic is called Chi-square. Chi-square is + +167 +00:13:06,700 --> 00:13:14,260 +denoted by this Greek letter, Chi-square. It's a + +168 +00:13:14,260 --> 00:13:19,840 +Greek letter. It's pronounced as Chi, C-H-I, Chi + +169 +00:13:19,840 --> 00:13:23,280 +-square. It looks like X. + +170 +00:13:30,500 --> 00:13:33,160 +And chi-square is given by, chi-square statistic + +171 +00:13:33,160 --> 00:13:39,180 +is given by this equation. So chi-square is the + +172 +00:13:39,180 --> 00:13:49,500 +sum of F for 0 minus F expected wanted square + +173 +00:13:49,500 --> 00:13:55,000 +divided by Fe. + +174 +00:13:56,150 --> 00:13:59,490 +Now, let's see the definition for each term here. + +175 +00:13:59,870 --> 00:14:04,450 +Fo, it means the observed frequency in a + +176 +00:14:04,450 --> 00:14:09,430 +particular cell in the table you have. Fe is the + +177 +00:14:09,430 --> 00:14:13,190 +expected frequency in a particular cell if it's 0 + +178 +00:14:13,190 --> 00:14:15,690 +is true. So if you go back a little bit to the + +179 +00:14:15,690 --> 00:14:23,790 +previous table, these values 12, 24, 108, 156 are + +180 +00:14:23,790 --> 00:14:28,880 +the observed frequency. So these values represent + +181 +00:14:28,880 --> 00:14:37,420 +Fo. So Fo is the observed frequency. + +182 +00:14:38,200 --> 00:14:40,040 +The frequency is from the sample. + +183 +00:14:45,080 --> 00:14:48,340 +Again, we are testing proportion 1 equals + +184 +00:14:48,340 --> 00:14:52,940 +proportion 2. Now for Fe. + +185 +00:14:54,780 --> 00:14:58,500 +Fe is the expected frequency in a particular cell + +186 +00:14:58,500 --> 00:15:02,800 +if each cell is true. If we are assuming the two + +187 +00:15:02,800 --> 00:15:05,980 +population proportions are the same, what do you + +188 +00:15:05,980 --> 00:15:10,540 +expect the frequency for each cell? So we are + +189 +00:15:10,540 --> 00:15:18,100 +going to compute the observed, I'm sorry, the + +190 +00:15:18,100 --> 00:15:23,840 +expected frequency for each cell. in this table so + +191 +00:15:23,840 --> 00:15:27,280 +let's see how can we do that by using the same + +192 +00:15:27,280 --> 00:15:35,300 +rule we had before now chi-square statistic for + +193 +00:15:35,300 --> 00:15:38,360 +the two by two case I mean if there are two rows + +194 +00:15:38,360 --> 00:15:41,300 +and two columns has only one degree of freedom + +195 +00:15:41,300 --> 00:15:46,440 +later on we'll see if we have more than two rows + +196 +00:15:46,440 --> 00:15:50,530 +and more than two columns we look for different + +197 +00:15:50,530 --> 00:15:54,230 +value for degrees of freedom. So for two by two + +198 +00:15:54,230 --> 00:15:59,670 +tables, there is only one degree of freedom. Now + +199 +00:15:59,670 --> 00:16:03,010 +the assumption here for using chi-square, each + +200 +00:16:03,010 --> 00:16:05,550 +cell in the contingency table has expected + +201 +00:16:05,550 --> 00:16:08,870 +frequency of at least five. So these expected + +202 +00:16:08,870 --> 00:16:13,350 +frequencies should be at least five for each cell. + +203 +00:16:13,830 --> 00:16:17,740 +So that's the condition for using So we have to + +204 +00:16:17,740 --> 00:16:21,080 +test + +205 +00:16:21,080 --> 00:16:24,600 +if the expected request for each cell is at least + +206 +00:16:24,600 --> 00:16:29,360 +5. So the condition is straightforward. Now, my + +207 +00:16:29,360 --> 00:16:35,540 +decision rule is, the chi-square is always one + +208 +00:16:35,540 --> 00:16:40,240 +-tailed. I mean, it's positive always. So the, + +209 +00:16:40,420 --> 00:16:42,020 +always chi-square. + +210 +00:16:44,720 --> 00:16:48,820 +is greater than or equal to zero. So we reject the + +211 +00:16:48,820 --> 00:16:52,000 +null hypothesis if the value of the chi-square + +212 +00:16:52,000 --> 00:16:57,040 +statistic lies in the rejection region and only + +213 +00:16:57,040 --> 00:17:00,840 +there is only one side. So there is only one + +214 +00:17:00,840 --> 00:17:03,780 +rejection region. So we reject the null hypothesis + +215 +00:17:04,750 --> 00:17:08,270 +If the value of chi-square falls in this rejection + +216 +00:17:08,270 --> 00:17:12,170 +region. I mean, if chi-square statistic is greater + +217 +00:17:12,170 --> 00:17:14,950 +than chi-square alpha, then we reject the null + +218 +00:17:14,950 --> 00:17:22,850 +hypothesis. Again, here we are testing H0 + +219 +00:17:22,850 --> 00:17:28,690 +by 1 equals by 2 against two-sided test. Even + +220 +00:17:28,690 --> 00:17:33,820 +there is only one side. But chi-square is designed + +221 +00:17:33,820 --> 00:17:37,780 +for testing pi 1 equals pi 2 against pi 1 does not + +222 +00:17:37,780 --> 00:17:40,520 +equal pi 2. In this case, you cannot know the + +223 +00:17:40,520 --> 00:17:45,280 +direction of this difference. I mean, you cannot + +224 +00:17:45,280 --> 00:17:48,940 +say pi 1 is greater than or pi 1 is smaller than. + +225 +00:17:49,440 --> 00:17:54,100 +Because chi-square is always positive. If you + +226 +00:17:54,100 --> 00:17:57,220 +remember from this statistic, when we are testing + +227 +00:17:57,220 --> 00:18:01,180 +pi 1 equals pi 2, Z could be positive or negative. + +228 +00:18:02,100 --> 00:18:05,640 +So, based on that, we can decide if pi 1 is + +229 +00:18:05,640 --> 00:18:09,160 +greater than or smaller than pi 2. But here, since + +230 +00:18:09,160 --> 00:18:12,240 +chi-square is always positive, then you cannot + +231 +00:18:12,240 --> 00:18:15,320 +determine the direction of the relationship. You + +232 +00:18:15,320 --> 00:18:17,620 +just say that there exists a significant + +233 +00:18:17,620 --> 00:18:21,940 +relationship between such and such. So by using + +234 +00:18:21,940 --> 00:18:25,320 +chi-square, you are doing just a test to see if + +235 +00:18:25,320 --> 00:18:29,840 +there is a relationship between x and y, or if + +236 +00:18:29,840 --> 00:18:32,460 +this relationship is not significant. But you + +237 +00:18:32,460 --> 00:18:36,160 +cannot determine either the strength, I mean you + +238 +00:18:36,160 --> 00:18:39,560 +cannot say there exists strong relationship, or + +239 +00:18:39,560 --> 00:18:42,980 +the direction, you cannot say there exists inverse + +240 +00:18:42,980 --> 00:18:46,220 +or direct positive or negative relationship, you + +241 +00:18:46,220 --> 00:18:50,660 +just say there exists a relationship between x and + +242 +00:18:50,660 --> 00:18:56,540 +y. So one more time, my decision rule is, if the + +243 +00:18:56,540 --> 00:19:01,000 +value of the chi-square greater than chi-square + +244 +00:19:01,000 --> 00:19:04,580 +alpha, then we reject the null hypothesis. So + +245 +00:19:04,580 --> 00:19:08,980 +there is also another way to reject by using b + +246 +00:19:08,980 --> 00:19:09,600 +-value approach. + +247 +00:19:12,650 --> 00:19:16,150 +B value in this case, complete deprivation of chi + +248 +00:19:16,150 --> 00:19:19,630 +-square greater than chi-square statistic, and + +249 +00:19:19,630 --> 00:19:24,870 +always we reject H0 if this B value is smaller + +250 +00:19:24,870 --> 00:19:27,610 +than alpha. So as we mentioned again before, + +251 +00:19:28,130 --> 00:19:35,770 +always we reject H0 if B value is smaller than + +252 +00:19:35,770 --> 00:19:36,010 +alpha. + +253 +00:19:39,290 --> 00:19:43,530 +So again, my decision rule is, we reject the null + +254 +00:19:43,530 --> 00:19:47,370 +hypothesis if the value of the statistic lies in + +255 +00:19:47,370 --> 00:19:50,610 +the rejection region. And again, there is only one + +256 +00:19:50,610 --> 00:19:54,350 +rejection region in this case, because chi-square + +257 +00:19:54,350 --> 00:19:58,770 +is always positive. If you look at this formula, F + +258 +00:19:58,770 --> 00:20:02,250 +observed minus F expected squared, so it's + +259 +00:20:02,250 --> 00:20:06,730 +positive. F is also positive, so chi-square is + +260 +00:20:06,730 --> 00:20:11,850 +always positive. Now let's see how can we compute + +261 +00:20:11,850 --> 00:20:18,450 +the value of the chi-square statistic. If we go + +262 +00:20:18,450 --> 00:20:24,090 +back a little bit to the data we have, in this + +263 +00:20:24,090 --> 00:20:31,510 +case there are one in twenty females and twelve + +264 +00:20:31,510 --> 00:20:34,870 +out of them are left-handed. + +265 +00:20:39,650 --> 00:20:50,630 +Left, right, 12, 108, 24, 156. The totals are 120, + +266 +00:20:51,290 --> 00:20:52,210 +180, + +267 +00:20:54,510 --> 00:21:00,290 +36, 264, and 300. So that's the table we have now. + +268 +00:21:00,920 --> 00:21:04,600 +Let's see how can we compute the value of the chi + +269 +00:21:04,600 --> 00:21:08,860 +-square statistic. The first step, compute the + +270 +00:21:08,860 --> 00:21:13,100 +average proportion, the same as the one we did in + +271 +00:21:13,100 --> 00:21:16,620 +chapter 10. It's called overall proportion, or + +272 +00:21:16,620 --> 00:21:23,100 +pooled proportion. And B dash, in this case, is + +273 +00:21:23,100 --> 00:21:30,270 +given by x1 plus x2 divided by n1 plus n2. in left + +274 +00:21:30,270 --> 00:21:34,690 +-handed, either males or females. In this sample, + +275 +00:21:34,990 --> 00:21:41,490 +there are 12 females, 12 left-handed females and + +276 +00:21:41,490 --> 00:21:51,850 +24 males. So 12 plus 24 divided by 1 plus 8 is 2. + +277 +00:21:52,010 --> 00:21:58,780 +There are 120 females and 180 females. So overall + +278 +00:21:58,780 --> 00:22:04,480 +proportion 12 plus 24, which is actually this + +279 +00:22:04,480 --> 00:22:10,320 +total 36, divided by overall total or the grand + +280 +00:22:10,320 --> 00:22:14,440 +total, which is 300. So the formula is + +281 +00:22:14,440 --> 00:22:17,400 +straightforward. Just suppose I am interested in + +282 +00:22:17,400 --> 00:22:22,240 +left-handed. So overall proportion for left-handed + +283 +00:22:22,240 --> 00:22:27,720 +equals 36 divided by 300. + +284 +00:22:31,720 --> 00:22:37,100 +That means, of all children, the proportion of + +285 +00:22:37,100 --> 00:22:42,960 +left-handers is 12%. Of all the children, the + +286 +00:22:42,960 --> 00:22:48,640 +proportion of left-handers is 12%. So that's the + +287 +00:22:48,640 --> 00:22:52,600 +proportion for left-handers. + +288 +00:22:57,500 --> 00:23:01,980 +So now to find the expected frequencies for males + +289 +00:23:01,980 --> 00:23:05,620 +and females, we have to multiply the average + +290 +00:23:05,620 --> 00:23:09,200 +proportion, left-handed B dash, by the total + +291 +00:23:09,200 --> 00:23:12,160 +number of females. In this case, there are 120 + +292 +00:23:12,160 --> 00:23:16,560 +females. So the expected frequency in this case is + +293 +00:23:16,560 --> 00:23:22,960 +just 120 multiplied by 12%. So 120 multiplied by + +294 +00:23:22,960 --> 00:23:30,580 +12% gives 14.4. So if it's zero is true, I mean if + +295 +00:23:30,580 --> 00:23:32,480 +the difference between the two population + +296 +00:23:32,480 --> 00:23:39,080 +proportions are the same, then we expect 14.4 left + +297 +00:23:39,080 --> 00:23:45,790 +handed females. Because overall proportion is 12% + +298 +00:23:45,790 --> 00:23:49,610 +for left-handed. And in this case, there are 120 + +299 +00:23:49,610 --> 00:23:54,610 +females. So we are expecting N times this + +300 +00:23:54,610 --> 00:23:58,990 +proportion. So N times V dash will give the + +301 +00:23:58,990 --> 00:24:04,130 +expected frequency for left-handed females. Now + +302 +00:24:04,130 --> 00:24:10,030 +what do you think the expected frequency for left + +303 +00:24:10,030 --> 00:24:10,750 +-handed males? + +304 +00:24:13,440 --> 00:24:20,620 +Again, there are 180 males multiplied by 12, and + +305 +00:24:20,620 --> 00:24:22,620 +that will give 21.6. + +306 +00:24:24,860 --> 00:24:31,820 +Or if you look at the total for 14.6, 21.6 is 36. + +307 +00:24:32,480 --> 00:24:38,140 +So the expected frequency for males left-handed is + +308 +00:24:38,140 --> 00:24:42,380 +just 36 minus + +309 +00:24:43,480 --> 00:24:49,120 +14.4 which is 21.6. So to get the expected + +310 +00:24:49,120 --> 00:24:54,640 +frequency for left-handed females, multiply the + +311 +00:24:54,640 --> 00:24:58,560 +overall proportion by the total number of females. + +312 +00:24:59,420 --> 00:25:02,820 +Again in this case there are 120 females, so 120 + +313 +00:25:02,820 --> 00:25:09,340 +multiplied by 0.12 will give 14.4. For females, + +314 +00:25:09,560 --> 00:25:13,860 +there are 180 females, so multiply 180 by 12% will + +315 +00:25:13,860 --> 00:25:19,120 +give 21.6, or just find a complement. Because + +316 +00:25:19,120 --> 00:25:23,760 +since there are 36 left-handed, or left-handers, + +317 +00:25:24,620 --> 00:25:31,240 +and 14.4 are females, so the complement, which is + +318 +00:25:31,240 --> 00:25:34,240 +21.6, should be for males. + +319 +00:25:36,890 --> 00:25:41,150 +So here, we have to compute the expected frequency + +320 +00:25:41,150 --> 00:25:47,850 +for each cell. I just computed the expected + +321 +00:25:47,850 --> 00:25:52,530 +frequency for left-handers. 14.4 for females and + +322 +00:25:52,530 --> 00:25:57,870 +21.6 for males. Now what's about right-handers? + +323 +00:25:58,870 --> 00:26:04,690 +Now for right-handers, since 12% overall are left + +324 +00:26:04,690 --> 00:26:12,410 +-handers, So 88% are right-handers, so multiply 88 + +325 +00:26:12,410 --> 00:26:18,850 +% by 120, that will give this expected frequency. + +326 +00:26:19,370 --> 00:26:25,010 +So if you multiply 88 by 120, that will give + +327 +00:26:25,010 --> 00:26:26,650 +10516. + +328 +00:26:31,340 --> 00:26:35,260 +Now there are 14.4 expected frequency for left + +329 +00:26:35,260 --> 00:26:40,420 +handers, females. Now total number of females are + +330 +00:26:40,420 --> 00:26:47,300 +120. Now 14.4 out of 120 females are left handers, + +331 +00:26:47,800 --> 00:26:52,820 +so remaining is right handers. So 120 minus this + +332 +00:26:52,820 --> 00:26:58,620 +value, so this equals 120 minus 14.4 which gives 1 + +333 +00:26:58,620 --> 00:27:02,510 +.5. So you don't need actually to compute the + +334 +00:27:02,510 --> 00:27:06,490 +expected frequency for the other cells. Since this + +335 +00:27:06,490 --> 00:27:12,350 +one is known or is computed by using sample size + +336 +00:27:12,350 --> 00:27:16,470 +for females times the overall proportion. The + +337 +00:27:16,470 --> 00:27:20,590 +other expected frequency is just the total number + +338 +00:27:20,590 --> 00:27:23,910 +of females minus this expected frequency. Now + +339 +00:27:23,910 --> 00:27:26,550 +what's about the other one? 158.4. + +340 +00:27:31,670 --> 00:27:40,090 +Multiplied by 120 will give 158 or the complement, + +341 +00:27:40,490 --> 00:27:44,810 +which is 180 minus 21.6 will give the same answer. + +342 +00:27:45,490 --> 00:27:51,930 +Because since there are 180 meals and 21.6 from + +343 +00:27:51,930 --> 00:27:56,550 +them are left-handers, so the remaining should be + +344 +00:27:56,550 --> 00:28:03,060 +right-handers, which is 158.5. So we just use this + +345 +00:28:03,060 --> 00:28:07,240 +rule for only one cell for this reason. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/_tPr3lwdN_Q_raw.json b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/_tPr3lwdN_Q_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..52d943c7f61fc8dc8877cec58881fe972ffcd665 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/_tPr3lwdN_Q_raw.json @@ -0,0 +1 @@ +{"segments": [{"id": 1, "seek": 2916, "start": 14.04, "end": 29.16, "text": " Let's start chapter 11. Here we'll talk about some of Cai's characters. If you remember the first time we met, we discussed that there are two types of data.", "tokens": [961, 311, 722, 7187, 2975, 13, 1692, 321, 603, 751, 466, 512, 295, 30983, 311, 4342, 13, 759, 291, 1604, 264, 700, 565, 321, 1131, 11, 321, 7152, 300, 456, 366, 732, 3467, 295, 1412, 13], "avg_logprob": -0.27238174709113866, "compression_ratio": 1.264, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 14.040000000000001, "end": 14.72, "word": " Let's", "probability": 0.612548828125}, {"start": 14.72, "end": 14.96, "word": " start", "probability": 0.90625}, {"start": 14.96, "end": 15.32, "word": " chapter", "probability": 0.59716796875}, {"start": 15.32, "end": 15.74, "word": " 11.", "probability": 0.7314453125}, {"start": 16.5, "end": 16.84, "word": " Here", "probability": 0.796875}, {"start": 16.84, "end": 17.06, "word": " we'll", "probability": 0.646484375}, {"start": 17.06, "end": 17.26, "word": " talk", "probability": 0.8974609375}, {"start": 17.26, "end": 17.78, "word": " about", "probability": 0.90673828125}, {"start": 17.78, "end": 18.36, "word": " some", "probability": 0.6953125}, {"start": 18.36, "end": 18.84, "word": " of", "probability": 0.9189453125}, {"start": 18.84, "end": 19.54, "word": " Cai's", "probability": 0.37164306640625}, {"start": 19.54, "end": 19.96, "word": " characters.", "probability": 0.791015625}, {"start": 22.92, "end": 23.28, "word": " If", "probability": 0.60400390625}, {"start": 23.28, "end": 24.08, "word": " you", "probability": 0.96630859375}, {"start": 24.08, "end": 24.36, "word": " remember", "probability": 0.87939453125}, {"start": 24.36, "end": 24.68, "word": " the", "probability": 0.53955078125}, {"start": 24.68, "end": 24.92, "word": " first", "probability": 0.87939453125}, {"start": 24.92, "end": 25.18, "word": " time", "probability": 0.830078125}, {"start": 25.18, "end": 25.4, "word": " we", "probability": 0.95068359375}, {"start": 25.4, "end": 25.92, "word": " met,", "probability": 0.8916015625}, {"start": 26.26, "end": 27.18, "word": " we", "probability": 0.93505859375}, {"start": 27.18, "end": 27.68, "word": " discussed", "probability": 0.87158203125}, {"start": 27.68, "end": 27.94, "word": " that", "probability": 0.9013671875}, {"start": 27.94, "end": 28.12, "word": " there", "probability": 0.89990234375}, {"start": 28.12, "end": 28.24, "word": " are", "probability": 0.8916015625}, {"start": 28.24, "end": 28.42, "word": " two", "probability": 0.91162109375}, {"start": 28.42, "end": 28.76, "word": " types", "probability": 0.837890625}, {"start": 28.76, "end": 28.94, "word": " of", "probability": 0.97314453125}, {"start": 28.94, "end": 29.16, "word": " data.", "probability": 0.76416015625}], "temperature": 1.0}, {"id": 2, "seek": 5861, "start": 30.09, "end": 58.61, "text": " One was numerical data and other categorical data. In chapter eight, we discussed testing and estimation about one, either population mean, if the data is numeric, or population proportion if the data is not numeric, I mean categorical data. In chapter nine and ten,", "tokens": [1485, 390, 29054, 1412, 293, 661, 19250, 804, 1412, 13, 682, 7187, 3180, 11, 321, 7152, 4997, 293, 35701, 466, 472, 11, 2139, 4415, 914, 11, 498, 264, 1412, 307, 7866, 299, 11, 420, 4415, 16068, 498, 264, 1412, 307, 406, 7866, 299, 11, 286, 914, 19250, 804, 1412, 13, 682, 7187, 4949, 293, 2064, 11], "avg_logprob": -0.18503289891962418, "compression_ratio": 1.867132867132867, "no_speech_prob": 0.0, "words": [{"start": 30.09, "end": 30.47, "word": " One", "probability": 0.556640625}, {"start": 30.47, "end": 31.01, "word": " was", "probability": 0.951171875}, {"start": 31.01, "end": 32.53, "word": " numerical", "probability": 0.71826171875}, {"start": 32.53, "end": 32.97, "word": " data", "probability": 0.9443359375}, {"start": 32.97, "end": 33.19, "word": " and", "probability": 0.7041015625}, {"start": 33.19, "end": 33.49, "word": " other", "probability": 0.431640625}, {"start": 33.49, "end": 34.91, "word": " categorical", "probability": 0.915283203125}, {"start": 34.91, "end": 35.27, "word": " data.", "probability": 0.921875}, {"start": 35.99, "end": 36.27, "word": " In", "probability": 0.95751953125}, {"start": 36.27, "end": 36.55, "word": " chapter", "probability": 0.65478515625}, {"start": 36.55, "end": 37.07, "word": " eight,", "probability": 0.7900390625}, {"start": 37.53, "end": 37.75, "word": " we", "probability": 0.9599609375}, {"start": 37.75, "end": 38.39, "word": " discussed", "probability": 0.84130859375}, {"start": 38.39, "end": 40.67, "word": " testing", "probability": 0.82177734375}, {"start": 40.67, "end": 41.59, "word": " and", "probability": 0.85302734375}, {"start": 41.59, "end": 42.13, "word": " estimation", "probability": 0.9873046875}, {"start": 42.13, "end": 42.87, "word": " about", "probability": 0.90234375}, {"start": 42.87, "end": 43.27, "word": " one,", "probability": 0.91162109375}, {"start": 43.77, "end": 44.07, "word": " either", "probability": 0.93896484375}, {"start": 44.07, "end": 45.35, "word": " population", "probability": 0.94873046875}, {"start": 45.35, "end": 45.73, "word": " mean,", "probability": 0.96728515625}, {"start": 46.07, "end": 46.15, "word": " if", "probability": 0.9501953125}, {"start": 46.15, "end": 46.27, "word": " the", "probability": 0.919921875}, {"start": 46.27, "end": 46.49, "word": " data", "probability": 0.94384765625}, {"start": 46.49, "end": 46.65, "word": " is", "probability": 0.85009765625}, {"start": 46.65, "end": 47.05, "word": " numeric,", "probability": 0.84326171875}, {"start": 47.15, "end": 48.73, "word": " or", "probability": 0.93798828125}, {"start": 48.73, "end": 49.39, "word": " population", "probability": 0.95263671875}, {"start": 49.39, "end": 49.99, "word": " proportion", "probability": 0.7734375}, {"start": 49.99, "end": 51.07, "word": " if", "probability": 0.662109375}, {"start": 51.07, "end": 52.01, "word": " the", "probability": 0.91455078125}, {"start": 52.01, "end": 52.41, "word": " data", "probability": 0.93505859375}, {"start": 52.41, "end": 52.81, "word": " is", "probability": 0.95068359375}, {"start": 52.81, "end": 53.17, "word": " not", "probability": 0.90283203125}, {"start": 53.17, "end": 53.67, "word": " numeric,", "probability": 0.93798828125}, {"start": 53.75, "end": 53.83, "word": " I", "probability": 0.98193359375}, {"start": 53.83, "end": 53.95, "word": " mean", "probability": 0.96875}, {"start": 53.95, "end": 54.49, "word": " categorical", "probability": 0.85009765625}, {"start": 54.49, "end": 54.85, "word": " data.", "probability": 0.9228515625}, {"start": 55.89, "end": 56.53, "word": " In", "probability": 0.55810546875}, {"start": 56.53, "end": 56.91, "word": " chapter", "probability": 0.83642578125}, {"start": 56.91, "end": 58.15, "word": " nine", "probability": 0.92724609375}, {"start": 58.15, "end": 58.41, "word": " and", "probability": 0.94482421875}, {"start": 58.41, "end": 58.61, "word": " ten,", "probability": 0.54296875}], "temperature": 1.0}, {"id": 3, "seek": 8665, "start": 58.91, "end": 86.65, "text": " I'm sorry, in chapter 10 we extend for two population means and two population proportions. Now this chapter talks about testing about the difference between two population means and two population proportions or comparing more than two population proportions. So we are going to test to see if there exists a difference between two population proportions", "tokens": [286, 478, 2597, 11, 294, 7187, 1266, 321, 10101, 337, 732, 4415, 1355, 293, 732, 4415, 32482, 13, 823, 341, 7187, 6686, 466, 4997, 466, 264, 2649, 1296, 732, 4415, 1355, 293, 732, 4415, 32482, 420, 15763, 544, 813, 732, 4415, 32482, 13, 407, 321, 366, 516, 281, 1500, 281, 536, 498, 456, 8198, 257, 2649, 1296, 732, 4415, 32482], "avg_logprob": -0.18545081869500582, "compression_ratio": 2.0941176470588236, "no_speech_prob": 0.0, "words": [{"start": 58.91, "end": 59.25, "word": " I'm", "probability": 0.801025390625}, {"start": 59.25, "end": 59.41, "word": " sorry,", "probability": 0.86572265625}, {"start": 59.49, "end": 59.57, "word": " in", "probability": 0.8388671875}, {"start": 59.57, "end": 59.73, "word": " chapter", "probability": 0.60888671875}, {"start": 59.73, "end": 59.97, "word": " 10", "probability": 0.74365234375}, {"start": 59.97, "end": 60.11, "word": " we", "probability": 0.494384765625}, {"start": 60.11, "end": 60.59, "word": " extend", "probability": 0.708984375}, {"start": 60.59, "end": 61.45, "word": " for", "probability": 0.5146484375}, {"start": 61.45, "end": 61.91, "word": " two", "probability": 0.84130859375}, {"start": 61.91, "end": 63.37, "word": " population", "probability": 0.90478515625}, {"start": 63.37, "end": 64.53, "word": " means", "probability": 0.91796875}, {"start": 64.53, "end": 65.21, "word": " and", "probability": 0.83203125}, {"start": 65.21, "end": 65.39, "word": " two", "probability": 0.9345703125}, {"start": 65.39, "end": 65.83, "word": " population", "probability": 0.91650390625}, {"start": 65.83, "end": 66.35, "word": " proportions.", "probability": 0.6181640625}, {"start": 67.29, "end": 67.85, "word": " Now", "probability": 0.95458984375}, {"start": 67.85, "end": 68.13, "word": " this", "probability": 0.6015625}, {"start": 68.13, "end": 68.43, "word": " chapter", "probability": 0.8662109375}, {"start": 68.43, "end": 68.75, "word": " talks", "probability": 0.86572265625}, {"start": 68.75, "end": 69.35, "word": " about", "probability": 0.90869140625}, {"start": 69.35, "end": 69.99, "word": " testing", "probability": 0.6162109375}, {"start": 69.99, "end": 70.49, "word": " about", "probability": 0.72900390625}, {"start": 70.49, "end": 70.77, "word": " the", "probability": 0.888671875}, {"start": 70.77, "end": 71.21, "word": " difference", "probability": 0.87548828125}, {"start": 71.21, "end": 71.79, "word": " between", "probability": 0.88525390625}, {"start": 71.79, "end": 73.11, "word": " two", "probability": 0.927734375}, {"start": 73.11, "end": 73.55, "word": " population", "probability": 0.95703125}, {"start": 73.55, "end": 73.81, "word": " means", "probability": 0.6357421875}, {"start": 73.81, "end": 74.07, "word": " and", "probability": 0.7880859375}, {"start": 74.07, "end": 74.25, "word": " two", "probability": 0.93896484375}, {"start": 74.25, "end": 74.75, "word": " population", "probability": 0.951171875}, {"start": 74.75, "end": 75.33, "word": " proportions", "probability": 0.78515625}, {"start": 75.33, "end": 76.73, "word": " or", "probability": 0.568359375}, {"start": 76.73, "end": 77.37, "word": " comparing", "probability": 0.95361328125}, {"start": 77.37, "end": 77.79, "word": " more", "probability": 0.93994140625}, {"start": 77.79, "end": 77.99, "word": " than", "probability": 0.9521484375}, {"start": 77.99, "end": 78.33, "word": " two", "probability": 0.93310546875}, {"start": 78.33, "end": 78.99, "word": " population", "probability": 0.94287109375}, {"start": 78.99, "end": 79.49, "word": " proportions.", "probability": 0.7880859375}, {"start": 79.75, "end": 79.89, "word": " So", "probability": 0.94677734375}, {"start": 79.89, "end": 80.01, "word": " we", "probability": 0.875}, {"start": 80.01, "end": 80.13, "word": " are", "probability": 0.88720703125}, {"start": 80.13, "end": 80.65, "word": " going", "probability": 0.94580078125}, {"start": 80.65, "end": 80.83, "word": " to", "probability": 0.966796875}, {"start": 80.83, "end": 81.09, "word": " test", "probability": 0.896484375}, {"start": 81.09, "end": 81.31, "word": " to", "probability": 0.947265625}, {"start": 81.31, "end": 81.55, "word": " see", "probability": 0.92333984375}, {"start": 81.55, "end": 81.81, "word": " if", "probability": 0.947265625}, {"start": 81.81, "end": 82.01, "word": " there", "probability": 0.919921875}, {"start": 82.01, "end": 82.45, "word": " exists", "probability": 0.8017578125}, {"start": 82.45, "end": 83.29, "word": " a", "probability": 0.994140625}, {"start": 83.29, "end": 83.69, "word": " difference", "probability": 0.8681640625}, {"start": 83.69, "end": 84.25, "word": " between", "probability": 0.8828125}, {"start": 84.25, "end": 85.63, "word": " two", "probability": 0.93359375}, {"start": 85.63, "end": 86.09, "word": " population", "probability": 0.9501953125}, {"start": 86.09, "end": 86.65, "word": " proportions", "probability": 0.81689453125}], "temperature": 1.0}, {"id": 4, "seek": 10590, "start": 88.34, "end": 105.9, "text": " or comparison among more than two population proportions. And the second objective also will be test the independent of two categorical variables. For example, suppose we are interested in the relationship between gender and education.", "tokens": [420, 9660, 3654, 544, 813, 732, 4415, 32482, 13, 400, 264, 1150, 10024, 611, 486, 312, 1500, 264, 6695, 295, 732, 19250, 804, 9102, 13, 1171, 1365, 11, 7297, 321, 366, 3102, 294, 264, 2480, 1296, 7898, 293, 3309, 13], "avg_logprob": -0.13862423780487804, "compression_ratio": 1.475, "no_speech_prob": 0.0, "words": [{"start": 88.34, "end": 88.68, "word": " or", "probability": 0.5419921875}, {"start": 88.68, "end": 89.24, "word": " comparison", "probability": 0.83642578125}, {"start": 89.24, "end": 89.76, "word": " among", "probability": 0.9384765625}, {"start": 89.76, "end": 90.42, "word": " more", "probability": 0.935546875}, {"start": 90.42, "end": 90.64, "word": " than", "probability": 0.953125}, {"start": 90.64, "end": 90.96, "word": " two", "probability": 0.92919921875}, {"start": 90.96, "end": 91.5, "word": " population", "probability": 0.890625}, {"start": 91.5, "end": 92.1, "word": " proportions.", "probability": 0.73876953125}, {"start": 93.04, "end": 93.16, "word": " And", "probability": 0.8984375}, {"start": 93.16, "end": 93.28, "word": " the", "probability": 0.853515625}, {"start": 93.28, "end": 93.54, "word": " second", "probability": 0.91552734375}, {"start": 93.54, "end": 94.0, "word": " objective", "probability": 0.953125}, {"start": 94.0, "end": 94.6, "word": " also", "probability": 0.79345703125}, {"start": 94.6, "end": 94.82, "word": " will", "probability": 0.84033203125}, {"start": 94.82, "end": 95.34, "word": " be", "probability": 0.91064453125}, {"start": 95.34, "end": 96.96, "word": " test", "probability": 0.5087890625}, {"start": 96.96, "end": 97.24, "word": " the", "probability": 0.666015625}, {"start": 97.24, "end": 97.72, "word": " independent", "probability": 0.66259765625}, {"start": 97.72, "end": 99.16, "word": " of", "probability": 0.97021484375}, {"start": 99.16, "end": 99.58, "word": " two", "probability": 0.93359375}, {"start": 99.58, "end": 100.54, "word": " categorical", "probability": 0.92822265625}, {"start": 100.54, "end": 100.94, "word": " variables.", "probability": 0.94775390625}, {"start": 101.08, "end": 101.18, "word": " For", "probability": 0.96240234375}, {"start": 101.18, "end": 101.58, "word": " example,", "probability": 0.9736328125}, {"start": 102.36, "end": 102.68, "word": " suppose", "probability": 0.9052734375}, {"start": 102.68, "end": 102.84, "word": " we", "probability": 0.9287109375}, {"start": 102.84, "end": 102.96, "word": " are", "probability": 0.92919921875}, {"start": 102.96, "end": 103.36, "word": " interested", "probability": 0.86376953125}, {"start": 103.36, "end": 103.72, "word": " in", "probability": 0.94677734375}, {"start": 103.72, "end": 103.82, "word": " the", "probability": 0.92041015625}, {"start": 103.82, "end": 104.32, "word": " relationship", "probability": 0.92041015625}, {"start": 104.32, "end": 104.72, "word": " between", "probability": 0.87841796875}, {"start": 104.72, "end": 105.14, "word": " gender", "probability": 0.8671875}, {"start": 105.14, "end": 105.46, "word": " and", "probability": 0.94091796875}, {"start": 105.46, "end": 105.9, "word": " education.", "probability": 0.947265625}], "temperature": 1.0}, {"id": 5, "seek": 13619, "start": 107.29, "end": 136.19, "text": " And our goal is to see if there exists a significant relationship between education and gender, for example, or if we are interested in health status and smoking. Smoking, either the person is heavy smoker or not heavy smoker. And health status could be classified as bad, medium, or good.", "tokens": [400, 527, 3387, 307, 281, 536, 498, 456, 8198, 257, 4776, 2480, 1296, 3309, 293, 7898, 11, 337, 1365, 11, 420, 498, 321, 366, 3102, 294, 1585, 6558, 293, 14055, 13, 3915, 5953, 11, 2139, 264, 954, 307, 4676, 899, 16722, 420, 406, 4676, 899, 16722, 13, 400, 1585, 6558, 727, 312, 20627, 382, 1578, 11, 6399, 11, 420, 665, 13], "avg_logprob": -0.16242439395958377, "compression_ratio": 1.5846994535519126, "no_speech_prob": 0.0, "words": [{"start": 107.29, "end": 107.61, "word": " And", "probability": 0.51318359375}, {"start": 107.61, "end": 107.97, "word": " our", "probability": 0.8759765625}, {"start": 107.97, "end": 108.25, "word": " goal", "probability": 0.97216796875}, {"start": 108.25, "end": 108.43, "word": " is", "probability": 0.9453125}, {"start": 108.43, "end": 108.55, "word": " to", "probability": 0.96630859375}, {"start": 108.55, "end": 108.79, "word": " see", "probability": 0.92724609375}, {"start": 108.79, "end": 109.21, "word": " if", "probability": 0.9521484375}, {"start": 109.21, "end": 109.47, "word": " there", "probability": 0.9248046875}, {"start": 109.47, "end": 109.91, "word": " exists", "probability": 0.78466796875}, {"start": 109.91, "end": 110.57, "word": " a", "probability": 0.97119140625}, {"start": 110.57, "end": 111.05, "word": " significant", "probability": 0.87841796875}, {"start": 111.05, "end": 111.75, "word": " relationship", "probability": 0.90576171875}, {"start": 111.75, "end": 112.21, "word": " between", "probability": 0.85302734375}, {"start": 112.21, "end": 113.21, "word": " education", "probability": 0.5283203125}, {"start": 113.21, "end": 114.27, "word": " and", "probability": 0.9169921875}, {"start": 114.27, "end": 115.01, "word": " gender,", "probability": 0.87255859375}, {"start": 115.19, "end": 115.21, "word": " for", "probability": 0.94091796875}, {"start": 115.21, "end": 115.59, "word": " example,", "probability": 0.96826171875}, {"start": 115.73, "end": 116.05, "word": " or", "probability": 0.8232421875}, {"start": 116.05, "end": 116.71, "word": " if", "probability": 0.9189453125}, {"start": 116.71, "end": 116.85, "word": " we", "probability": 0.94189453125}, {"start": 116.85, "end": 116.97, "word": " are", "probability": 0.90185546875}, {"start": 116.97, "end": 117.41, "word": " interested", "probability": 0.8681640625}, {"start": 117.41, "end": 117.95, "word": " in", "probability": 0.94873046875}, {"start": 117.95, "end": 118.73, "word": " health", "probability": 0.658203125}, {"start": 118.73, "end": 119.19, "word": " status", "probability": 0.9423828125}, {"start": 119.19, "end": 119.95, "word": " and", "probability": 0.92041015625}, {"start": 119.95, "end": 120.41, "word": " smoking.", "probability": 0.95263671875}, {"start": 121.15, "end": 121.83, "word": " Smoking,", "probability": 0.96337890625}, {"start": 121.99, "end": 122.23, "word": " either", "probability": 0.90380859375}, {"start": 122.23, "end": 122.45, "word": " the", "probability": 0.876953125}, {"start": 122.45, "end": 122.79, "word": " person", "probability": 0.908203125}, {"start": 122.79, "end": 123.17, "word": " is", "probability": 0.93896484375}, {"start": 123.17, "end": 123.45, "word": " heavy", "probability": 0.440185546875}, {"start": 123.45, "end": 123.95, "word": " smoker", "probability": 0.865234375}, {"start": 123.95, "end": 125.23, "word": " or", "probability": 0.8681640625}, {"start": 125.23, "end": 125.51, "word": " not", "probability": 0.9375}, {"start": 125.51, "end": 125.77, "word": " heavy", "probability": 0.7607421875}, {"start": 125.77, "end": 126.17, "word": " smoker.", "probability": 0.87060546875}, {"start": 127.09, "end": 127.51, "word": " And", "probability": 0.92724609375}, {"start": 127.51, "end": 127.75, "word": " health", "probability": 0.8662109375}, {"start": 127.75, "end": 128.27, "word": " status", "probability": 0.97314453125}, {"start": 128.27, "end": 128.53, "word": " could", "probability": 0.81884765625}, {"start": 128.53, "end": 128.71, "word": " be", "probability": 0.9609375}, {"start": 128.71, "end": 129.11, "word": " classified", "probability": 0.9111328125}, {"start": 129.11, "end": 129.75, "word": " as", "probability": 0.9638671875}, {"start": 129.75, "end": 130.37, "word": " bad,", "probability": 0.82177734375}, {"start": 133.25, "end": 134.43, "word": " medium,", "probability": 0.8662109375}, {"start": 135.39, "end": 135.79, "word": " or", "probability": 0.96533203125}, {"start": 135.79, "end": 136.19, "word": " good.", "probability": 0.93212890625}], "temperature": 1.0}, {"id": 6, "seek": 16100, "start": 138.68, "end": 161.0, "text": " So we are going to test to see if there exists a difference or relationship between two variables. So here we are interested in qualitative data, either we have two or more than two variables. So the objectives in this chapter are when to use the chi-square test for contingency tables.", "tokens": [407, 321, 366, 516, 281, 1500, 281, 536, 498, 456, 8198, 257, 2649, 420, 2480, 1296, 732, 9102, 13, 407, 510, 321, 366, 3102, 294, 31312, 1412, 11, 2139, 321, 362, 732, 420, 544, 813, 732, 9102, 13, 407, 264, 15961, 294, 341, 7187, 366, 562, 281, 764, 264, 13228, 12, 33292, 543, 1500, 337, 27820, 3020, 8020, 13], "avg_logprob": -0.18020832737286885, "compression_ratio": 1.6306818181818181, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 138.68, "end": 139.28, "word": " So", "probability": 0.6826171875}, {"start": 139.28, "end": 139.46, "word": " we", "probability": 0.63427734375}, {"start": 139.46, "end": 139.6, "word": " are", "probability": 0.91162109375}, {"start": 139.6, "end": 139.8, "word": " going", "probability": 0.935546875}, {"start": 139.8, "end": 139.94, "word": " to", "probability": 0.96435546875}, {"start": 139.94, "end": 140.12, "word": " test", "probability": 0.67041015625}, {"start": 140.12, "end": 140.26, "word": " to", "probability": 0.8369140625}, {"start": 140.26, "end": 140.4, "word": " see", "probability": 0.9228515625}, {"start": 140.4, "end": 140.54, "word": " if", "probability": 0.94287109375}, {"start": 140.54, "end": 140.68, "word": " there", "probability": 0.9169921875}, {"start": 140.68, "end": 140.98, "word": " exists", "probability": 0.72119140625}, {"start": 140.98, "end": 141.12, "word": " a", "probability": 0.98486328125}, {"start": 141.12, "end": 141.54, "word": " difference", "probability": 0.896484375}, {"start": 141.54, "end": 141.86, "word": " or", "probability": 0.85107421875}, {"start": 141.86, "end": 142.54, "word": " relationship", "probability": 0.63623046875}, {"start": 142.54, "end": 142.96, "word": " between", "probability": 0.8759765625}, {"start": 142.96, "end": 143.16, "word": " two", "probability": 0.91015625}, {"start": 143.16, "end": 143.5, "word": " variables.", "probability": 0.9169921875}, {"start": 144.16, "end": 144.38, "word": " So", "probability": 0.95068359375}, {"start": 144.38, "end": 144.62, "word": " here", "probability": 0.82470703125}, {"start": 144.62, "end": 144.82, "word": " we", "probability": 0.76318359375}, {"start": 144.82, "end": 145.06, "word": " are", "probability": 0.93896484375}, {"start": 145.06, "end": 145.62, "word": " interested", "probability": 0.86474609375}, {"start": 145.62, "end": 146.3, "word": " in", "probability": 0.947265625}, {"start": 146.3, "end": 147.24, "word": " qualitative", "probability": 0.849609375}, {"start": 147.24, "end": 147.74, "word": " data,", "probability": 0.89892578125}, {"start": 147.84, "end": 148.1, "word": " either", "probability": 0.92236328125}, {"start": 148.1, "end": 148.46, "word": " we", "probability": 0.953125}, {"start": 148.46, "end": 148.72, "word": " have", "probability": 0.94775390625}, {"start": 148.72, "end": 149.0, "word": " two", "probability": 0.91455078125}, {"start": 149.0, "end": 150.42, "word": " or", "probability": 0.84326171875}, {"start": 150.42, "end": 150.66, "word": " more", "probability": 0.93896484375}, {"start": 150.66, "end": 150.9, "word": " than", "probability": 0.94970703125}, {"start": 150.9, "end": 151.26, "word": " two", "probability": 0.9365234375}, {"start": 151.26, "end": 152.2, "word": " variables.", "probability": 0.94677734375}, {"start": 152.76, "end": 153.06, "word": " So", "probability": 0.9619140625}, {"start": 153.06, "end": 153.32, "word": " the", "probability": 0.86962890625}, {"start": 153.32, "end": 153.94, "word": " objectives", "probability": 0.8115234375}, {"start": 153.94, "end": 154.38, "word": " in", "probability": 0.93212890625}, {"start": 154.38, "end": 154.8, "word": " this", "probability": 0.9072265625}, {"start": 154.8, "end": 155.56, "word": " chapter", "probability": 0.64794921875}, {"start": 155.56, "end": 157.0, "word": " are", "probability": 0.8251953125}, {"start": 157.0, "end": 158.2, "word": " when", "probability": 0.654296875}, {"start": 158.2, "end": 158.36, "word": " to", "probability": 0.95947265625}, {"start": 158.36, "end": 158.64, "word": " use", "probability": 0.87548828125}, {"start": 158.64, "end": 158.82, "word": " the", "probability": 0.91357421875}, {"start": 158.82, "end": 159.04, "word": " chi", "probability": 0.393310546875}, {"start": 159.04, "end": 159.34, "word": "-square", "probability": 0.7771809895833334}, {"start": 159.34, "end": 159.64, "word": " test", "probability": 0.7646484375}, {"start": 159.64, "end": 159.86, "word": " for", "probability": 0.9248046875}, {"start": 159.86, "end": 160.5, "word": " contingency", "probability": 0.841064453125}, {"start": 160.5, "end": 161.0, "word": " tables.", "probability": 0.85546875}], "temperature": 1.0}, {"id": 7, "seek": 18589, "start": 162.13, "end": 185.89, "text": " And the other, how to use the chi-square test for contingency plots. So here we are going to see what are the conditions. And what are the conditions we can see, we can use the chi-square test. So the condition is, we should have qualitative data. So in this case, we'll talk about just", "tokens": [400, 264, 661, 11, 577, 281, 764, 264, 13228, 12, 33292, 543, 1500, 337, 27820, 3020, 28609, 13, 407, 510, 321, 366, 516, 281, 536, 437, 366, 264, 4487, 13, 400, 437, 366, 264, 4487, 321, 393, 536, 11, 321, 393, 764, 264, 13228, 12, 33292, 543, 1500, 13, 407, 264, 4188, 307, 11, 321, 820, 362, 31312, 1412, 13, 407, 294, 341, 1389, 11, 321, 603, 751, 466, 445], "avg_logprob": -0.2074163753381917, "compression_ratio": 1.79375, "no_speech_prob": 0.0, "words": [{"start": 162.13, "end": 162.47, "word": " And", "probability": 0.51025390625}, {"start": 162.47, "end": 162.59, "word": " the", "probability": 0.77880859375}, {"start": 162.59, "end": 162.87, "word": " other,", "probability": 0.84326171875}, {"start": 163.11, "end": 163.35, "word": " how", "probability": 0.87890625}, {"start": 163.35, "end": 163.63, "word": " to", "probability": 0.94921875}, {"start": 163.63, "end": 163.95, "word": " use", "probability": 0.8671875}, {"start": 163.95, "end": 164.13, "word": " the", "probability": 0.896484375}, {"start": 164.13, "end": 164.31, "word": " chi", "probability": 0.396728515625}, {"start": 164.31, "end": 164.63, "word": "-square", "probability": 0.88232421875}, {"start": 164.63, "end": 164.97, "word": " test", "probability": 0.72705078125}, {"start": 164.97, "end": 165.23, "word": " for", "probability": 0.88720703125}, {"start": 165.23, "end": 165.81, "word": " contingency", "probability": 0.8779296875}, {"start": 165.81, "end": 166.17, "word": " plots.", "probability": 0.19384765625}, {"start": 166.93, "end": 167.21, "word": " So", "probability": 0.9375}, {"start": 167.21, "end": 167.39, "word": " here", "probability": 0.7509765625}, {"start": 167.39, "end": 167.49, "word": " we", "probability": 0.7451171875}, {"start": 167.49, "end": 167.63, "word": " are", "probability": 0.91796875}, {"start": 167.63, "end": 167.87, "word": " going", "probability": 0.943359375}, {"start": 167.87, "end": 168.07, "word": " to", "probability": 0.96728515625}, {"start": 168.07, "end": 168.59, "word": " see", "probability": 0.92333984375}, {"start": 168.59, "end": 169.89, "word": " what", "probability": 0.8125}, {"start": 169.89, "end": 170.07, "word": " are", "probability": 0.9287109375}, {"start": 170.07, "end": 170.17, "word": " the", "probability": 0.92236328125}, {"start": 170.17, "end": 170.71, "word": " conditions.", "probability": 0.8837890625}, {"start": 172.39, "end": 172.67, "word": " And", "probability": 0.931640625}, {"start": 172.67, "end": 172.91, "word": " what", "probability": 0.923828125}, {"start": 172.91, "end": 173.45, "word": " are", "probability": 0.91650390625}, {"start": 173.45, "end": 173.59, "word": " the", "probability": 0.92333984375}, {"start": 173.59, "end": 173.85, "word": " conditions", "probability": 0.8017578125}, {"start": 173.85, "end": 174.17, "word": " we", "probability": 0.56298828125}, {"start": 174.17, "end": 174.45, "word": " can", "probability": 0.94091796875}, {"start": 174.45, "end": 174.75, "word": " see,", "probability": 0.66796875}, {"start": 174.93, "end": 175.09, "word": " we", "probability": 0.94189453125}, {"start": 175.09, "end": 175.31, "word": " can", "probability": 0.94287109375}, {"start": 175.31, "end": 175.67, "word": " use", "probability": 0.87890625}, {"start": 175.67, "end": 176.35, "word": " the", "probability": 0.86083984375}, {"start": 176.35, "end": 176.55, "word": " chi", "probability": 0.80224609375}, {"start": 176.55, "end": 176.83, "word": "-square", "probability": 0.9581705729166666}, {"start": 176.83, "end": 177.27, "word": " test.", "probability": 0.89013671875}, {"start": 178.39, "end": 178.95, "word": " So", "probability": 0.95361328125}, {"start": 178.95, "end": 179.11, "word": " the", "probability": 0.86767578125}, {"start": 179.11, "end": 179.51, "word": " condition", "probability": 0.94580078125}, {"start": 179.51, "end": 179.89, "word": " is,", "probability": 0.94775390625}, {"start": 179.99, "end": 180.15, "word": " we", "probability": 0.9541015625}, {"start": 180.15, "end": 180.41, "word": " should", "probability": 0.9658203125}, {"start": 180.41, "end": 180.85, "word": " have", "probability": 0.94287109375}, {"start": 180.85, "end": 182.89, "word": " qualitative", "probability": 0.7685546875}, {"start": 182.89, "end": 183.41, "word": " data.", "probability": 0.921875}, {"start": 183.95, "end": 184.19, "word": " So", "probability": 0.95458984375}, {"start": 184.19, "end": 184.35, "word": " in", "probability": 0.9013671875}, {"start": 184.35, "end": 184.53, "word": " this", "probability": 0.947265625}, {"start": 184.53, "end": 184.73, "word": " case,", "probability": 0.90380859375}, {"start": 184.77, "end": 184.97, "word": " we'll", "probability": 0.811767578125}, {"start": 184.97, "end": 185.17, "word": " talk", "probability": 0.89794921875}, {"start": 185.17, "end": 185.49, "word": " about", "probability": 0.90283203125}, {"start": 185.49, "end": 185.89, "word": " just", "probability": 0.921875}], "temperature": 1.0}, {"id": 8, "seek": 21709, "start": 189.33, "end": 217.09, "text": " gender, health status, education, income levels, and so on. So we are not talking about numerical data. So we are focusing on non-numerical data. I mean on categorical data. Let's see the definition of contingency table. For example, as I mentioned, suppose we are interested in gender and education.", "tokens": [7898, 11, 1585, 6558, 11, 3309, 11, 5742, 4358, 11, 293, 370, 322, 13, 407, 321, 366, 406, 1417, 466, 29054, 1412, 13, 407, 321, 366, 8416, 322, 2107, 12, 77, 15583, 804, 1412, 13, 286, 914, 322, 19250, 804, 1412, 13, 961, 311, 536, 264, 7123, 295, 27820, 3020, 3199, 13, 1171, 1365, 11, 382, 286, 2835, 11, 7297, 321, 366, 3102, 294, 7898, 293, 3309, 13], "avg_logprob": -0.11299818970155025, "compression_ratio": 1.6096256684491979, "no_speech_prob": 0.0, "words": [{"start": 189.33, "end": 190.03, "word": " gender,", "probability": 0.50390625}, {"start": 190.03, "end": 190.73, "word": " health", "probability": 0.92138671875}, {"start": 190.73, "end": 191.41, "word": " status,", "probability": 0.94775390625}, {"start": 192.25, "end": 192.87, "word": " education,", "probability": 0.93896484375}, {"start": 194.07, "end": 194.43, "word": " income", "probability": 0.94580078125}, {"start": 194.43, "end": 194.95, "word": " levels,", "probability": 0.90576171875}, {"start": 195.19, "end": 195.29, "word": " and", "probability": 0.9404296875}, {"start": 195.29, "end": 195.51, "word": " so", "probability": 0.9541015625}, {"start": 195.51, "end": 195.73, "word": " on.", "probability": 0.9462890625}, {"start": 195.85, "end": 196.03, "word": " So", "probability": 0.92529296875}, {"start": 196.03, "end": 196.17, "word": " we", "probability": 0.7763671875}, {"start": 196.17, "end": 196.31, "word": " are", "probability": 0.9345703125}, {"start": 196.31, "end": 196.69, "word": " not", "probability": 0.947265625}, {"start": 196.69, "end": 197.07, "word": " talking", "probability": 0.85546875}, {"start": 197.07, "end": 197.61, "word": " about", "probability": 0.908203125}, {"start": 197.61, "end": 198.45, "word": " numerical", "probability": 0.6171875}, {"start": 198.45, "end": 198.89, "word": " data.", "probability": 0.93310546875}, {"start": 198.99, "end": 199.13, "word": " So", "probability": 0.86181640625}, {"start": 199.13, "end": 199.37, "word": " we", "probability": 0.8935546875}, {"start": 199.37, "end": 199.51, "word": " are", "probability": 0.939453125}, {"start": 199.51, "end": 200.03, "word": " focusing", "probability": 0.912109375}, {"start": 200.03, "end": 200.79, "word": " on", "probability": 0.9482421875}, {"start": 200.79, "end": 201.27, "word": " non", "probability": 0.97119140625}, {"start": 201.27, "end": 202.15, "word": "-numerical", "probability": 0.949462890625}, {"start": 202.15, "end": 202.57, "word": " data.", "probability": 0.90869140625}, {"start": 202.71, "end": 202.95, "word": " I", "probability": 0.99609375}, {"start": 202.95, "end": 203.23, "word": " mean", "probability": 0.96142578125}, {"start": 203.23, "end": 203.69, "word": " on", "probability": 0.66259765625}, {"start": 203.69, "end": 204.59, "word": " categorical", "probability": 0.926513671875}, {"start": 204.59, "end": 204.97, "word": " data.", "probability": 0.92529296875}, {"start": 207.29, "end": 207.63, "word": " Let's", "probability": 0.901611328125}, {"start": 207.63, "end": 207.73, "word": " see", "probability": 0.91357421875}, {"start": 207.73, "end": 207.87, "word": " the", "probability": 0.92041015625}, {"start": 207.87, "end": 208.25, "word": " definition", "probability": 0.94140625}, {"start": 208.25, "end": 208.61, "word": " of", "probability": 0.970703125}, {"start": 208.61, "end": 209.19, "word": " contingency", "probability": 0.954833984375}, {"start": 209.19, "end": 209.47, "word": " table.", "probability": 0.82373046875}, {"start": 209.57, "end": 209.71, "word": " For", "probability": 0.95654296875}, {"start": 209.71, "end": 210.09, "word": " example,", "probability": 0.97802734375}, {"start": 210.37, "end": 210.61, "word": " as", "probability": 0.96044921875}, {"start": 210.61, "end": 210.73, "word": " I", "probability": 0.99560546875}, {"start": 210.73, "end": 211.05, "word": " mentioned,", "probability": 0.81787109375}, {"start": 211.25, "end": 211.57, "word": " suppose", "probability": 0.9033203125}, {"start": 211.57, "end": 213.17, "word": " we", "probability": 0.8740234375}, {"start": 213.17, "end": 213.37, "word": " are", "probability": 0.939453125}, {"start": 213.37, "end": 213.83, "word": " interested", "probability": 0.8603515625}, {"start": 213.83, "end": 214.09, "word": " in", "probability": 0.95263671875}, {"start": 214.09, "end": 214.43, "word": " gender", "probability": 0.88330078125}, {"start": 214.43, "end": 215.97, "word": " and", "probability": 0.89453125}, {"start": 215.97, "end": 217.09, "word": " education.", "probability": 0.9423828125}], "temperature": 1.0}, {"id": 9, "seek": 24786, "start": 218.5, "end": 247.86, "text": " And for example, suppose gender is classified, normal is classified as female and male. Education could be classified as either secondary or less, bachelor, master, PhD. So here we are talking about", "tokens": [400, 337, 1365, 11, 7297, 7898, 307, 20627, 11, 2710, 307, 20627, 382, 6556, 293, 7133, 13, 10680, 727, 312, 20627, 382, 2139, 11396, 420, 1570, 11, 25947, 11, 4505, 11, 14476, 13, 407, 510, 321, 366, 1417, 466], "avg_logprob": -0.2000000026077032, "compression_ratio": 1.4214285714285715, "no_speech_prob": 0.0, "words": [{"start": 218.5, "end": 218.8, "word": " And", "probability": 0.74658203125}, {"start": 218.8, "end": 219.02, "word": " for", "probability": 0.78466796875}, {"start": 219.02, "end": 219.46, "word": " example,", "probability": 0.974609375}, {"start": 220.0, "end": 220.5, "word": " suppose", "probability": 0.85498046875}, {"start": 220.5, "end": 220.96, "word": " gender", "probability": 0.71923828125}, {"start": 220.96, "end": 222.1, "word": " is", "probability": 0.93017578125}, {"start": 222.1, "end": 222.66, "word": " classified,", "probability": 0.93994140625}, {"start": 223.44, "end": 223.76, "word": " normal", "probability": 0.3701171875}, {"start": 223.76, "end": 224.0, "word": " is", "probability": 0.7919921875}, {"start": 224.0, "end": 224.56, "word": " classified", "probability": 0.9423828125}, {"start": 224.56, "end": 224.96, "word": " as", "probability": 0.9287109375}, {"start": 224.96, "end": 225.84, "word": " female", "probability": 0.7587890625}, {"start": 225.84, "end": 227.46, "word": " and", "probability": 0.92529296875}, {"start": 227.46, "end": 227.9, "word": " male.", "probability": 0.57568359375}, {"start": 229.18, "end": 230.18, "word": " Education", "probability": 0.75146484375}, {"start": 230.18, "end": 230.54, "word": " could", "probability": 0.7529296875}, {"start": 230.54, "end": 230.7, "word": " be", "probability": 0.96533203125}, {"start": 230.7, "end": 231.22, "word": " classified", "probability": 0.95068359375}, {"start": 231.22, "end": 231.78, "word": " as", "probability": 0.95263671875}, {"start": 231.78, "end": 232.98, "word": " either", "probability": 0.9375}, {"start": 232.98, "end": 234.6, "word": " secondary", "probability": 0.80810546875}, {"start": 234.6, "end": 237.92, "word": " or", "probability": 0.80224609375}, {"start": 237.92, "end": 238.32, "word": " less,", "probability": 0.9208984375}, {"start": 240.68, "end": 241.52, "word": " bachelor,", "probability": 0.86767578125}, {"start": 243.4, "end": 243.8, "word": " master,", "probability": 0.91162109375}, {"start": 243.98, "end": 244.32, "word": " PhD.", "probability": 0.8642578125}, {"start": 245.6, "end": 246.6, "word": " So", "probability": 0.9560546875}, {"start": 246.6, "end": 246.8, "word": " here", "probability": 0.783203125}, {"start": 246.8, "end": 246.94, "word": " we", "probability": 0.77685546875}, {"start": 246.94, "end": 247.1, "word": " are", "probability": 0.9384765625}, {"start": 247.1, "end": 247.42, "word": " talking", "probability": 0.859375}, {"start": 247.42, "end": 247.86, "word": " about", "probability": 0.90673828125}], "temperature": 1.0}, {"id": 10, "seek": 27136, "start": 249.12, "end": 271.36, "text": " a relationship between gender and education. Now in this case, we have this table. For example, one is gender. And gender is either female or male. And the other is education.", "tokens": [257, 2480, 1296, 7898, 293, 3309, 13, 823, 294, 341, 1389, 11, 321, 362, 341, 3199, 13, 1171, 1365, 11, 472, 307, 7898, 13, 400, 7898, 307, 2139, 6556, 420, 7133, 13, 400, 264, 661, 307, 3309, 13], "avg_logprob": -0.21193910714907524, "compression_ratio": 1.4426229508196722, "no_speech_prob": 0.0, "words": [{"start": 249.12, "end": 249.32, "word": " a", "probability": 0.155029296875}, {"start": 249.32, "end": 249.82, "word": " relationship", "probability": 0.87646484375}, {"start": 249.82, "end": 250.22, "word": " between", "probability": 0.8935546875}, {"start": 250.22, "end": 250.64, "word": " gender", "probability": 0.80859375}, {"start": 250.64, "end": 251.92, "word": " and", "probability": 0.9189453125}, {"start": 251.92, "end": 253.26, "word": " education.", "probability": 0.9423828125}, {"start": 253.94, "end": 254.32, "word": " Now", "probability": 0.76806640625}, {"start": 254.32, "end": 254.44, "word": " in", "probability": 0.6826171875}, {"start": 254.44, "end": 254.64, "word": " this", "probability": 0.94677734375}, {"start": 254.64, "end": 254.9, "word": " case,", "probability": 0.9072265625}, {"start": 254.98, "end": 255.1, "word": " we", "probability": 0.9541015625}, {"start": 255.1, "end": 255.44, "word": " have", "probability": 0.94677734375}, {"start": 255.44, "end": 257.74, "word": " this", "probability": 0.9287109375}, {"start": 257.74, "end": 258.2, "word": " table.", "probability": 0.8955078125}, {"start": 263.36, "end": 264.02, "word": " For", "probability": 0.94189453125}, {"start": 264.02, "end": 264.28, "word": " example,", "probability": 0.9765625}, {"start": 264.38, "end": 264.54, "word": " one", "probability": 0.92236328125}, {"start": 264.54, "end": 264.72, "word": " is", "probability": 0.9462890625}, {"start": 264.72, "end": 265.12, "word": " gender.", "probability": 0.85986328125}, {"start": 267.26, "end": 267.52, "word": " And", "probability": 0.626953125}, {"start": 267.52, "end": 267.76, "word": " gender", "probability": 0.890625}, {"start": 267.76, "end": 268.04, "word": " is", "probability": 0.94775390625}, {"start": 268.04, "end": 268.36, "word": " either", "probability": 0.94140625}, {"start": 268.36, "end": 268.76, "word": " female", "probability": 0.814453125}, {"start": 268.76, "end": 269.1, "word": " or", "probability": 0.96630859375}, {"start": 269.1, "end": 269.36, "word": " male.", "probability": 0.9111328125}, {"start": 270.02, "end": 270.32, "word": " And", "probability": 0.9443359375}, {"start": 270.32, "end": 270.44, "word": " the", "probability": 0.81396484375}, {"start": 270.44, "end": 270.6, "word": " other", "probability": 0.8974609375}, {"start": 270.6, "end": 270.88, "word": " is", "probability": 0.9453125}, {"start": 270.88, "end": 271.36, "word": " education.", "probability": 0.955078125}], "temperature": 1.0}, {"id": 11, "seek": 29883, "start": 274.08, "end": 298.84, "text": " And education in this case is classified into four categories, secondary or less, BA, master and PhD. This table is called contingency table. It's two by four table. Two because there are two rows.", "tokens": [400, 3309, 294, 341, 1389, 307, 20627, 666, 1451, 10479, 11, 11396, 420, 1570, 11, 21050, 11, 4505, 293, 14476, 13, 639, 3199, 307, 1219, 27820, 3020, 3199, 13, 467, 311, 732, 538, 1451, 3199, 13, 4453, 570, 456, 366, 732, 13241, 13], "avg_logprob": -0.23757102679122577, "compression_ratio": 1.3655172413793104, "no_speech_prob": 0.0, "words": [{"start": 274.08, "end": 274.44, "word": " And", "probability": 0.398193359375}, {"start": 274.44, "end": 274.88, "word": " education", "probability": 0.82958984375}, {"start": 274.88, "end": 275.14, "word": " in", "probability": 0.70751953125}, {"start": 275.14, "end": 275.36, "word": " this", "probability": 0.91845703125}, {"start": 275.36, "end": 275.6, "word": " case", "probability": 0.92431640625}, {"start": 275.6, "end": 275.78, "word": " is", "probability": 0.84716796875}, {"start": 275.78, "end": 276.36, "word": " classified", "probability": 0.90478515625}, {"start": 276.36, "end": 276.78, "word": " into", "probability": 0.8310546875}, {"start": 276.78, "end": 277.14, "word": " four", "probability": 0.8486328125}, {"start": 277.14, "end": 277.76, "word": " categories,", "probability": 0.93701171875}, {"start": 278.66, "end": 279.46, "word": " secondary", "probability": 0.64013671875}, {"start": 279.46, "end": 279.74, "word": " or", "probability": 0.82763671875}, {"start": 279.74, "end": 280.14, "word": " less,", "probability": 0.92578125}, {"start": 283.36, "end": 283.84, "word": " BA,", "probability": 0.483642578125}, {"start": 285.66, "end": 286.46, "word": " master", "probability": 0.5419921875}, {"start": 286.46, "end": 286.72, "word": " and", "probability": 0.66650390625}, {"start": 286.72, "end": 287.02, "word": " PhD.", "probability": 0.93212890625}, {"start": 288.04, "end": 288.86, "word": " This", "probability": 0.87109375}, {"start": 288.86, "end": 289.12, "word": " table", "probability": 0.9072265625}, {"start": 289.12, "end": 289.36, "word": " is", "probability": 0.94287109375}, {"start": 289.36, "end": 289.82, "word": " called", "probability": 0.9072265625}, {"start": 289.82, "end": 291.02, "word": " contingency", "probability": 0.8544921875}, {"start": 291.02, "end": 291.42, "word": " table.", "probability": 0.87646484375}, {"start": 292.14, "end": 292.78, "word": " It's", "probability": 0.95703125}, {"start": 292.78, "end": 293.56, "word": " two", "probability": 0.5869140625}, {"start": 293.56, "end": 295.16, "word": " by", "probability": 0.93408203125}, {"start": 295.16, "end": 295.62, "word": " four", "probability": 0.947265625}, {"start": 295.62, "end": 296.94, "word": " table.", "probability": 0.73779296875}, {"start": 297.52, "end": 297.7, "word": " Two", "probability": 0.9091796875}, {"start": 297.7, "end": 298.0, "word": " because", "probability": 0.83349609375}, {"start": 298.0, "end": 298.18, "word": " there", "probability": 0.90380859375}, {"start": 298.18, "end": 298.32, "word": " are", "probability": 0.94140625}, {"start": 298.32, "end": 298.48, "word": " two", "probability": 0.9228515625}, {"start": 298.48, "end": 298.84, "word": " rows.", "probability": 0.86376953125}], "temperature": 1.0}, {"id": 12, "seek": 32872, "start": 307.22, "end": 328.72, "text": " In this case, there are two rules and two codes. For example, gender, male or female, and the other one, suppose, smoking. Either yes or no. Smoking or not smoking.", "tokens": [682, 341, 1389, 11, 456, 366, 732, 4474, 293, 732, 14211, 13, 1171, 1365, 11, 7898, 11, 7133, 420, 6556, 11, 293, 264, 661, 472, 11, 7297, 11, 14055, 13, 13746, 2086, 420, 572, 13, 3915, 5953, 420, 406, 14055, 13], "avg_logprob": -0.29706099771317984, "compression_ratio": 1.3983050847457628, "no_speech_prob": 0.0, "words": [{"start": 307.22, "end": 307.78, "word": " In", "probability": 0.41259765625}, {"start": 307.78, "end": 308.08, "word": " this", "probability": 0.92529296875}, {"start": 308.08, "end": 308.3, "word": " case,", "probability": 0.916015625}, {"start": 308.4, "end": 308.48, "word": " there", "probability": 0.880859375}, {"start": 308.48, "end": 308.62, "word": " are", "probability": 0.9365234375}, {"start": 308.62, "end": 308.82, "word": " two", "probability": 0.873046875}, {"start": 308.82, "end": 309.1, "word": " rules", "probability": 0.341064453125}, {"start": 309.1, "end": 309.26, "word": " and", "probability": 0.85546875}, {"start": 309.26, "end": 309.44, "word": " two", "probability": 0.93603515625}, {"start": 309.44, "end": 309.8, "word": " codes.", "probability": 0.266845703125}, {"start": 310.54, "end": 310.7, "word": " For", "probability": 0.90478515625}, {"start": 310.7, "end": 311.16, "word": " example,", "probability": 0.96875}, {"start": 313.12, "end": 313.64, "word": " gender,", "probability": 0.62353515625}, {"start": 316.26, "end": 317.06, "word": " male", "probability": 0.90234375}, {"start": 317.06, "end": 317.42, "word": " or", "probability": 0.8935546875}, {"start": 317.42, "end": 317.8, "word": " female,", "probability": 0.849609375}, {"start": 319.48, "end": 319.76, "word": " and", "probability": 0.91015625}, {"start": 319.76, "end": 319.92, "word": " the", "probability": 0.5361328125}, {"start": 319.92, "end": 320.06, "word": " other", "probability": 0.89501953125}, {"start": 320.06, "end": 320.26, "word": " one,", "probability": 0.8701171875}, {"start": 320.72, "end": 320.72, "word": " suppose,", "probability": 0.85205078125}, {"start": 321.1, "end": 321.7, "word": " smoking.", "probability": 0.93505859375}, {"start": 324.12, "end": 324.5, "word": " Either", "probability": 0.73486328125}, {"start": 324.5, "end": 325.88, "word": " yes", "probability": 0.8271484375}, {"start": 325.88, "end": 326.22, "word": " or", "probability": 0.94775390625}, {"start": 326.22, "end": 326.82, "word": " no.", "probability": 0.9609375}, {"start": 327.2, "end": 327.82, "word": " Smoking", "probability": 0.956787109375}, {"start": 327.82, "end": 328.14, "word": " or", "probability": 0.9404296875}, {"start": 328.14, "end": 328.36, "word": " not", "probability": 0.9208984375}, {"start": 328.36, "end": 328.72, "word": " smoking.", "probability": 0.91796875}], "temperature": 1.0}, {"id": 13, "seek": 35811, "start": 329.09, "end": 358.11, "text": " So in this case there are two rows and two columns. And the goal here is we want to test to see if there exists significant relationship between gender and smoking. So the two variables of interest in this case are categorical variables. So how can we test to see if there exists a significant difference between the two proportions or to see if smoking and", "tokens": [407, 294, 341, 1389, 456, 366, 732, 13241, 293, 732, 13766, 13, 400, 264, 3387, 510, 307, 321, 528, 281, 1500, 281, 536, 498, 456, 8198, 4776, 2480, 1296, 7898, 293, 14055, 13, 407, 264, 732, 9102, 295, 1179, 294, 341, 1389, 366, 19250, 804, 9102, 13, 407, 577, 393, 321, 1500, 281, 536, 498, 456, 8198, 257, 4776, 2649, 1296, 264, 732, 32482, 420, 281, 536, 498, 14055, 293], "avg_logprob": -0.14326584800867967, "compression_ratio": 1.87434554973822, "no_speech_prob": 0.0, "words": [{"start": 329.09, "end": 329.37, "word": " So", "probability": 0.63037109375}, {"start": 329.37, "end": 329.49, "word": " in", "probability": 0.6533203125}, {"start": 329.49, "end": 329.65, "word": " this", "probability": 0.9453125}, {"start": 329.65, "end": 329.85, "word": " case", "probability": 0.91455078125}, {"start": 329.85, "end": 330.03, "word": " there", "probability": 0.62841796875}, {"start": 330.03, "end": 330.15, "word": " are", "probability": 0.93505859375}, {"start": 330.15, "end": 330.35, "word": " two", "probability": 0.81591796875}, {"start": 330.35, "end": 330.65, "word": " rows", "probability": 0.896484375}, {"start": 330.65, "end": 330.83, "word": " and", "probability": 0.93310546875}, {"start": 330.83, "end": 331.01, "word": " two", "probability": 0.93359375}, {"start": 331.01, "end": 331.41, "word": " columns.", "probability": 0.94775390625}, {"start": 332.17, "end": 332.33, "word": " And", "probability": 0.81298828125}, {"start": 332.33, "end": 332.47, "word": " the", "probability": 0.90234375}, {"start": 332.47, "end": 332.65, "word": " goal", "probability": 0.97216796875}, {"start": 332.65, "end": 332.89, "word": " here", "probability": 0.83984375}, {"start": 332.89, "end": 333.11, "word": " is", "probability": 0.94140625}, {"start": 333.11, "end": 333.65, "word": " we", "probability": 0.5869140625}, {"start": 333.65, "end": 333.85, "word": " want", "probability": 0.78857421875}, {"start": 333.85, "end": 333.99, "word": " to", "probability": 0.94189453125}, {"start": 333.99, "end": 334.13, "word": " test", "probability": 0.71044921875}, {"start": 334.13, "end": 334.29, "word": " to", "probability": 0.900390625}, {"start": 334.29, "end": 334.47, "word": " see", "probability": 0.92626953125}, {"start": 334.47, "end": 335.65, "word": " if", "probability": 0.91748046875}, {"start": 335.65, "end": 335.91, "word": " there", "probability": 0.9169921875}, {"start": 335.91, "end": 336.33, "word": " exists", "probability": 0.74267578125}, {"start": 336.33, "end": 337.15, "word": " significant", "probability": 0.72509765625}, {"start": 337.15, "end": 337.83, "word": " relationship", "probability": 0.89990234375}, {"start": 337.83, "end": 338.19, "word": " between", "probability": 0.86669921875}, {"start": 338.19, "end": 338.53, "word": " gender", "probability": 0.9287109375}, {"start": 338.53, "end": 338.75, "word": " and", "probability": 0.94287109375}, {"start": 338.75, "end": 339.15, "word": " smoking.", "probability": 0.9365234375}, {"start": 339.99, "end": 340.29, "word": " So", "probability": 0.93701171875}, {"start": 340.29, "end": 340.41, "word": " the", "probability": 0.84716796875}, {"start": 340.41, "end": 340.59, "word": " two", "probability": 0.93896484375}, {"start": 340.59, "end": 341.07, "word": " variables", "probability": 0.94091796875}, {"start": 341.07, "end": 341.27, "word": " of", "probability": 0.95166015625}, {"start": 341.27, "end": 341.65, "word": " interest", "probability": 0.888671875}, {"start": 341.65, "end": 341.87, "word": " in", "probability": 0.92333984375}, {"start": 341.87, "end": 342.09, "word": " this", "probability": 0.947265625}, {"start": 342.09, "end": 342.45, "word": " case", "probability": 0.91259765625}, {"start": 342.45, "end": 342.95, "word": " are", "probability": 0.9345703125}, {"start": 342.95, "end": 343.65, "word": " categorical", "probability": 0.8994140625}, {"start": 343.65, "end": 344.15, "word": " variables.", "probability": 0.93212890625}, {"start": 344.47, "end": 344.65, "word": " So", "probability": 0.9453125}, {"start": 344.65, "end": 344.81, "word": " how", "probability": 0.88232421875}, {"start": 344.81, "end": 345.03, "word": " can", "probability": 0.93896484375}, {"start": 345.03, "end": 345.31, "word": " we", "probability": 0.9560546875}, {"start": 345.31, "end": 346.55, "word": " test", "probability": 0.88427734375}, {"start": 346.55, "end": 347.43, "word": " to", "probability": 0.9287109375}, {"start": 347.43, "end": 347.81, "word": " see", "probability": 0.92236328125}, {"start": 347.81, "end": 348.49, "word": " if", "probability": 0.93408203125}, {"start": 348.49, "end": 348.73, "word": " there", "probability": 0.9189453125}, {"start": 348.73, "end": 349.23, "word": " exists", "probability": 0.82958984375}, {"start": 349.23, "end": 349.63, "word": " a", "probability": 0.9833984375}, {"start": 349.63, "end": 350.09, "word": " significant", "probability": 0.86962890625}, {"start": 350.09, "end": 350.79, "word": " difference", "probability": 0.8642578125}, {"start": 350.79, "end": 351.09, "word": " between", "probability": 0.87255859375}, {"start": 351.09, "end": 351.27, "word": " the", "probability": 0.91357421875}, {"start": 351.27, "end": 351.43, "word": " two", "probability": 0.94873046875}, {"start": 351.43, "end": 351.89, "word": " proportions", "probability": 0.84814453125}, {"start": 351.89, "end": 353.69, "word": " or", "probability": 0.44189453125}, {"start": 353.69, "end": 354.35, "word": " to", "probability": 0.9052734375}, {"start": 354.35, "end": 354.61, "word": " see", "probability": 0.91064453125}, {"start": 354.61, "end": 355.07, "word": " if", "probability": 0.9521484375}, {"start": 355.07, "end": 355.55, "word": " smoking", "probability": 0.93505859375}, {"start": 355.55, "end": 358.11, "word": " and", "probability": 0.89111328125}], "temperature": 1.0}, {"id": 14, "seek": 37307, "start": 358.39, "end": 373.07, "text": " Gender are independent. So our goal is to see if they are independent or not. It means if we reject the null hypothesis of independence, it means they are related.", "tokens": [48039, 366, 6695, 13, 407, 527, 3387, 307, 281, 536, 498, 436, 366, 6695, 420, 406, 13, 467, 1355, 498, 321, 8248, 264, 18184, 17291, 295, 14640, 11, 309, 1355, 436, 366, 4077, 13], "avg_logprob": -0.1328125, "compression_ratio": 1.4774774774774775, "no_speech_prob": 0.0, "words": [{"start": 358.39, "end": 358.91, "word": " Gender", "probability": 0.73046875}, {"start": 358.91, "end": 361.09, "word": " are", "probability": 0.7685546875}, {"start": 361.09, "end": 361.85, "word": " independent.", "probability": 0.873046875}, {"start": 363.05, "end": 363.51, "word": " So", "probability": 0.880859375}, {"start": 363.51, "end": 363.83, "word": " our", "probability": 0.7275390625}, {"start": 363.83, "end": 364.05, "word": " goal", "probability": 0.95849609375}, {"start": 364.05, "end": 364.21, "word": " is", "probability": 0.9423828125}, {"start": 364.21, "end": 364.29, "word": " to", "probability": 0.9580078125}, {"start": 364.29, "end": 364.41, "word": " see", "probability": 0.9228515625}, {"start": 364.41, "end": 364.57, "word": " if", "probability": 0.92578125}, {"start": 364.57, "end": 364.67, "word": " they", "probability": 0.90283203125}, {"start": 364.67, "end": 364.87, "word": " are", "probability": 0.93310546875}, {"start": 364.87, "end": 365.33, "word": " independent", "probability": 0.9130859375}, {"start": 365.33, "end": 365.61, "word": " or", "probability": 0.95849609375}, {"start": 365.61, "end": 365.85, "word": " not.", "probability": 0.94873046875}, {"start": 366.01, "end": 366.17, "word": " It", "probability": 0.8515625}, {"start": 366.17, "end": 366.47, "word": " means", "probability": 0.9296875}, {"start": 366.47, "end": 367.01, "word": " if", "probability": 0.7734375}, {"start": 367.01, "end": 367.19, "word": " we", "probability": 0.85791015625}, {"start": 367.19, "end": 367.75, "word": " reject", "probability": 0.90771484375}, {"start": 367.75, "end": 368.53, "word": " the", "probability": 0.89892578125}, {"start": 368.53, "end": 368.79, "word": " null", "probability": 0.84716796875}, {"start": 368.79, "end": 369.47, "word": " hypothesis", "probability": 0.791015625}, {"start": 369.47, "end": 369.67, "word": " of", "probability": 0.96728515625}, {"start": 369.67, "end": 370.15, "word": " independence,", "probability": 0.90478515625}, {"start": 370.81, "end": 370.99, "word": " it", "probability": 0.94384765625}, {"start": 370.99, "end": 371.41, "word": " means", "probability": 0.93798828125}, {"start": 371.41, "end": 371.89, "word": " they", "probability": 0.873046875}, {"start": 371.89, "end": 372.29, "word": " are", "probability": 0.9482421875}, {"start": 372.29, "end": 373.07, "word": " related.", "probability": 0.96435546875}], "temperature": 1.0}, {"id": 15, "seek": 40332, "start": 374.78, "end": 403.32, "text": " So again, a contingency table, tables in this case useful in situations involving multiple population proportions, so we have more than two, or even two population proportions, used to classify sample observations according to two or more characteristics. In this case, there are only two characteristics of interest. One is education, the other is gender. It could be we have another characteristic.", "tokens": [407, 797, 11, 257, 27820, 3020, 3199, 11, 8020, 294, 341, 1389, 4420, 294, 6851, 17030, 3866, 4415, 32482, 11, 370, 321, 362, 544, 813, 732, 11, 420, 754, 732, 4415, 32482, 11, 1143, 281, 33872, 6889, 18163, 4650, 281, 732, 420, 544, 10891, 13, 682, 341, 1389, 11, 456, 366, 787, 732, 10891, 295, 1179, 13, 1485, 307, 3309, 11, 264, 661, 307, 7898, 13, 467, 727, 312, 321, 362, 1071, 16282, 13], "avg_logprob": -0.17572916070620218, "compression_ratio": 1.8063063063063063, "no_speech_prob": 0.0, "words": [{"start": 374.78, "end": 375.02, "word": " So", "probability": 0.91650390625}, {"start": 375.02, "end": 375.32, "word": " again,", "probability": 0.81884765625}, {"start": 375.44, "end": 375.76, "word": " a", "probability": 0.67431640625}, {"start": 375.76, "end": 377.16, "word": " contingency", "probability": 0.84375}, {"start": 377.16, "end": 377.5, "word": " table,", "probability": 0.7734375}, {"start": 377.62, "end": 378.1, "word": " tables", "probability": 0.4990234375}, {"start": 378.1, "end": 378.24, "word": " in", "probability": 0.87060546875}, {"start": 378.24, "end": 378.44, "word": " this", "probability": 0.94482421875}, {"start": 378.44, "end": 378.68, "word": " case", "probability": 0.9013671875}, {"start": 378.68, "end": 379.12, "word": " useful", "probability": 0.73291015625}, {"start": 379.12, "end": 379.36, "word": " in", "probability": 0.912109375}, {"start": 379.36, "end": 379.92, "word": " situations", "probability": 0.7890625}, {"start": 379.92, "end": 380.46, "word": " involving", "probability": 0.9248046875}, {"start": 380.46, "end": 381.14, "word": " multiple", "probability": 0.88232421875}, {"start": 381.14, "end": 382.26, "word": " population", "probability": 0.951171875}, {"start": 382.26, "end": 382.82, "word": " proportions,", "probability": 0.775390625}, {"start": 383.42, "end": 383.8, "word": " so", "probability": 0.9384765625}, {"start": 383.8, "end": 383.92, "word": " we", "probability": 0.85400390625}, {"start": 383.92, "end": 384.02, "word": " have", "probability": 0.94677734375}, {"start": 384.02, "end": 384.22, "word": " more", "probability": 0.939453125}, {"start": 384.22, "end": 384.36, "word": " than", "probability": 0.9501953125}, {"start": 384.36, "end": 384.62, "word": " two,", "probability": 0.87060546875}, {"start": 385.38, "end": 385.72, "word": " or", "probability": 0.9560546875}, {"start": 385.72, "end": 386.04, "word": " even", "probability": 0.875}, {"start": 386.04, "end": 386.32, "word": " two", "probability": 0.93017578125}, {"start": 386.32, "end": 386.82, "word": " population", "probability": 0.9375}, {"start": 386.82, "end": 387.38, "word": " proportions,", "probability": 0.50439453125}, {"start": 388.06, "end": 388.6, "word": " used", "probability": 0.8759765625}, {"start": 388.6, "end": 388.88, "word": " to", "probability": 0.97119140625}, {"start": 388.88, "end": 389.42, "word": " classify", "probability": 0.97802734375}, {"start": 389.42, "end": 390.0, "word": " sample", "probability": 0.7763671875}, {"start": 390.0, "end": 390.64, "word": " observations", "probability": 0.8046875}, {"start": 390.64, "end": 391.22, "word": " according", "probability": 0.91064453125}, {"start": 391.22, "end": 391.52, "word": " to", "probability": 0.9658203125}, {"start": 391.52, "end": 391.86, "word": " two", "probability": 0.92822265625}, {"start": 391.86, "end": 392.68, "word": " or", "probability": 0.9384765625}, {"start": 392.68, "end": 393.04, "word": " more", "probability": 0.94091796875}, {"start": 393.04, "end": 393.8, "word": " characteristics.", "probability": 0.87841796875}, {"start": 394.2, "end": 394.38, "word": " In", "probability": 0.9404296875}, {"start": 394.38, "end": 394.58, "word": " this", "probability": 0.9453125}, {"start": 394.58, "end": 394.88, "word": " case,", "probability": 0.91064453125}, {"start": 395.44, "end": 395.68, "word": " there", "probability": 0.88916015625}, {"start": 395.68, "end": 396.06, "word": " are", "probability": 0.9384765625}, {"start": 396.06, "end": 396.4, "word": " only", "probability": 0.923828125}, {"start": 396.4, "end": 396.64, "word": " two", "probability": 0.9345703125}, {"start": 396.64, "end": 397.34, "word": " characteristics", "probability": 0.89892578125}, {"start": 397.34, "end": 397.76, "word": " of", "probability": 0.96923828125}, {"start": 397.76, "end": 398.12, "word": " interest.", "probability": 0.88623046875}, {"start": 398.3, "end": 398.5, "word": " One", "probability": 0.92578125}, {"start": 398.5, "end": 398.72, "word": " is", "probability": 0.93701171875}, {"start": 398.72, "end": 399.14, "word": " education,", "probability": 0.94775390625}, {"start": 399.32, "end": 399.4, "word": " the", "probability": 0.771484375}, {"start": 399.4, "end": 399.62, "word": " other", "probability": 0.89599609375}, {"start": 399.62, "end": 399.82, "word": " is", "probability": 0.93017578125}, {"start": 399.82, "end": 400.14, "word": " gender.", "probability": 0.89208984375}, {"start": 400.74, "end": 400.86, "word": " It", "probability": 0.927734375}, {"start": 400.86, "end": 401.02, "word": " could", "probability": 0.87158203125}, {"start": 401.02, "end": 401.18, "word": " be", "probability": 0.95654296875}, {"start": 401.18, "end": 401.36, "word": " we", "probability": 0.919921875}, {"start": 401.36, "end": 401.6, "word": " have", "probability": 0.9423828125}, {"start": 401.6, "end": 402.04, "word": " another", "probability": 0.9345703125}, {"start": 402.04, "end": 403.32, "word": " characteristic.", "probability": 0.91748046875}], "temperature": 1.0}, {"id": 16, "seek": 43014, "start": 404.04, "end": 430.14, "text": " The other thing is these tables are called cross-classification tables. Cross because we have variable A versus variable B. For this reason it's called cross-classification tables. There is another example here. Here we are interested in left-handed versus gender.", "tokens": [440, 661, 551, 307, 613, 8020, 366, 1219, 3278, 12, 11665, 3774, 8020, 13, 11623, 570, 321, 362, 7006, 316, 5717, 7006, 363, 13, 1171, 341, 1778, 309, 311, 1219, 3278, 12, 11665, 3774, 8020, 13, 821, 307, 1071, 1365, 510, 13, 1692, 321, 366, 3102, 294, 1411, 12, 25407, 5717, 7898, 13], "avg_logprob": -0.20732059522911353, "compression_ratio": 1.65625, "no_speech_prob": 0.0, "words": [{"start": 404.04, "end": 404.36, "word": " The", "probability": 0.73828125}, {"start": 404.36, "end": 404.6, "word": " other", "probability": 0.88818359375}, {"start": 404.6, "end": 404.9, "word": " thing", "probability": 0.888671875}, {"start": 404.9, "end": 405.24, "word": " is", "probability": 0.93603515625}, {"start": 405.24, "end": 406.94, "word": " these", "probability": 0.533203125}, {"start": 406.94, "end": 407.44, "word": " tables", "probability": 0.8232421875}, {"start": 407.44, "end": 408.48, "word": " are", "probability": 0.9208984375}, {"start": 408.48, "end": 408.92, "word": " called", "probability": 0.8681640625}, {"start": 408.92, "end": 409.52, "word": " cross", "probability": 0.7744140625}, {"start": 409.52, "end": 411.02, "word": "-classification", "probability": 0.7451985677083334}, {"start": 411.02, "end": 411.68, "word": " tables.", "probability": 0.61279296875}, {"start": 411.78, "end": 412.02, "word": " Cross", "probability": 0.68115234375}, {"start": 412.02, "end": 412.26, "word": " because", "probability": 0.76220703125}, {"start": 412.26, "end": 412.54, "word": " we", "probability": 0.92041015625}, {"start": 412.54, "end": 412.8, "word": " have", "probability": 0.94677734375}, {"start": 412.8, "end": 414.44, "word": " variable", "probability": 0.70654296875}, {"start": 414.44, "end": 414.9, "word": " A", "probability": 0.662109375}, {"start": 414.9, "end": 415.52, "word": " versus", "probability": 0.83349609375}, {"start": 415.52, "end": 416.06, "word": " variable", "probability": 0.9248046875}, {"start": 416.06, "end": 416.36, "word": " B.", "probability": 0.98779296875}, {"start": 416.46, "end": 416.66, "word": " For", "probability": 0.93017578125}, {"start": 416.66, "end": 416.9, "word": " this", "probability": 0.88330078125}, {"start": 416.9, "end": 417.96, "word": " reason", "probability": 0.5654296875}, {"start": 417.96, "end": 418.18, "word": " it's", "probability": 0.5673828125}, {"start": 418.18, "end": 418.6, "word": " called", "probability": 0.8876953125}, {"start": 418.6, "end": 419.92, "word": " cross", "probability": 0.68798828125}, {"start": 419.92, "end": 421.08, "word": "-classification", "probability": 0.92724609375}, {"start": 421.08, "end": 421.54, "word": " tables.", "probability": 0.806640625}, {"start": 423.46, "end": 424.26, "word": " There", "probability": 0.7734375}, {"start": 424.26, "end": 424.44, "word": " is", "probability": 0.90673828125}, {"start": 424.44, "end": 424.76, "word": " another", "probability": 0.91796875}, {"start": 424.76, "end": 425.16, "word": " example", "probability": 0.9736328125}, {"start": 425.16, "end": 425.54, "word": " here.", "probability": 0.8359375}, {"start": 426.04, "end": 426.38, "word": " Here", "probability": 0.837890625}, {"start": 426.38, "end": 426.48, "word": " we", "probability": 0.8408203125}, {"start": 426.48, "end": 426.62, "word": " are", "probability": 0.93115234375}, {"start": 426.62, "end": 427.06, "word": " interested", "probability": 0.88330078125}, {"start": 427.06, "end": 427.36, "word": " in", "probability": 0.94384765625}, {"start": 427.36, "end": 427.62, "word": " left", "probability": 0.91650390625}, {"start": 427.62, "end": 428.08, "word": "-handed", "probability": 0.830322265625}, {"start": 428.08, "end": 429.68, "word": " versus", "probability": 0.900390625}, {"start": 429.68, "end": 430.14, "word": " gender.", "probability": 0.95654296875}], "temperature": 1.0}, {"id": 17, "seek": 45292, "start": 432.0, "end": 452.92, "text": " So, dominant hand left versus right, so the person either use left or right hand, gender male or female. So in this case there are two categories for each variable, so this type of example is called", "tokens": [407, 11, 15657, 1011, 1411, 5717, 558, 11, 370, 264, 954, 2139, 764, 1411, 420, 558, 1011, 11, 7898, 7133, 420, 6556, 13, 407, 294, 341, 1389, 456, 366, 732, 10479, 337, 1184, 7006, 11, 370, 341, 2010, 295, 1365, 307, 1219], "avg_logprob": -0.30069040974905326, "compression_ratio": 1.4525547445255473, "no_speech_prob": 0.0, "words": [{"start": 432.0, "end": 432.88, "word": " So,", "probability": 0.307373046875}, {"start": 433.84, "end": 433.84, "word": " dominant", "probability": 0.44580078125}, {"start": 433.84, "end": 434.36, "word": " hand", "probability": 0.9052734375}, {"start": 434.36, "end": 434.7, "word": " left", "probability": 0.3125}, {"start": 434.7, "end": 435.16, "word": " versus", "probability": 0.5595703125}, {"start": 435.16, "end": 435.74, "word": " right,", "probability": 0.90869140625}, {"start": 436.24, "end": 436.34, "word": " so", "probability": 0.77490234375}, {"start": 436.34, "end": 436.5, "word": " the", "probability": 0.83642578125}, {"start": 436.5, "end": 436.92, "word": " person", "probability": 0.87890625}, {"start": 436.92, "end": 437.94, "word": " either", "probability": 0.83203125}, {"start": 437.94, "end": 439.68, "word": " use", "probability": 0.478515625}, {"start": 439.68, "end": 441.42, "word": " left", "probability": 0.8896484375}, {"start": 441.42, "end": 442.32, "word": " or", "probability": 0.9248046875}, {"start": 442.32, "end": 442.62, "word": " right", "probability": 0.919921875}, {"start": 442.62, "end": 443.0, "word": " hand,", "probability": 0.8994140625}, {"start": 443.52, "end": 444.38, "word": " gender", "probability": 0.873046875}, {"start": 444.38, "end": 444.96, "word": " male", "probability": 0.81787109375}, {"start": 444.96, "end": 445.48, "word": " or", "probability": 0.9619140625}, {"start": 445.48, "end": 445.92, "word": " female.", "probability": 0.83447265625}, {"start": 446.48, "end": 446.7, "word": " So", "probability": 0.8017578125}, {"start": 446.7, "end": 446.8, "word": " in", "probability": 0.4921875}, {"start": 446.8, "end": 446.96, "word": " this", "probability": 0.94873046875}, {"start": 446.96, "end": 447.18, "word": " case", "probability": 0.912109375}, {"start": 447.18, "end": 447.36, "word": " there", "probability": 0.5751953125}, {"start": 447.36, "end": 447.56, "word": " are", "probability": 0.93994140625}, {"start": 447.56, "end": 447.92, "word": " two", "probability": 0.84130859375}, {"start": 447.92, "end": 448.34, "word": " categories", "probability": 0.9443359375}, {"start": 448.34, "end": 448.66, "word": " for", "probability": 0.93896484375}, {"start": 448.66, "end": 448.92, "word": " each", "probability": 0.93994140625}, {"start": 448.92, "end": 449.4, "word": " variable,", "probability": 0.890625}, {"start": 450.18, "end": 450.44, "word": " so", "probability": 0.90234375}, {"start": 450.44, "end": 451.1, "word": " this", "probability": 0.93310546875}, {"start": 451.1, "end": 451.58, "word": " type", "probability": 0.9697265625}, {"start": 451.58, "end": 451.76, "word": " of", "probability": 0.955078125}, {"start": 451.76, "end": 452.18, "word": " example", "probability": 0.95654296875}, {"start": 452.18, "end": 452.48, "word": " is", "probability": 0.9375}, {"start": 452.48, "end": 452.92, "word": " called", "probability": 0.89501953125}], "temperature": 1.0}, {"id": 18, "seek": 48031, "start": 454.07, "end": 480.31, "text": " Two by two table, because there are two rows, two classifications here. So hand either left or right, the person either male or female. So we have two characteristics for each one, so it's two by two table. Suppose in this case we are examining a sample of 300 children. So the sample size is 300, and we have this result.", "tokens": [4453, 538, 732, 3199, 11, 570, 456, 366, 732, 13241, 11, 732, 1508, 7833, 510, 13, 407, 1011, 2139, 1411, 420, 558, 11, 264, 954, 2139, 7133, 420, 6556, 13, 407, 321, 362, 732, 10891, 337, 1184, 472, 11, 370, 309, 311, 732, 538, 732, 3199, 13, 21360, 294, 341, 1389, 321, 366, 34662, 257, 6889, 295, 6641, 2227, 13, 407, 264, 6889, 2744, 307, 6641, 11, 293, 321, 362, 341, 1874, 13], "avg_logprob": -0.17345861466349782, "compression_ratio": 1.6069651741293531, "no_speech_prob": 0.0, "words": [{"start": 454.07, "end": 454.37, "word": " Two", "probability": 0.1671142578125}, {"start": 454.37, "end": 454.55, "word": " by", "probability": 0.8251953125}, {"start": 454.55, "end": 454.75, "word": " two", "probability": 0.9404296875}, {"start": 454.75, "end": 455.07, "word": " table,", "probability": 0.8212890625}, {"start": 455.53, "end": 455.85, "word": " because", "probability": 0.900390625}, {"start": 455.85, "end": 456.03, "word": " there", "probability": 0.90185546875}, {"start": 456.03, "end": 456.21, "word": " are", "probability": 0.9423828125}, {"start": 456.21, "end": 456.63, "word": " two", "probability": 0.9345703125}, {"start": 456.63, "end": 458.09, "word": " rows,", "probability": 0.51611328125}, {"start": 458.31, "end": 458.51, "word": " two", "probability": 0.9306640625}, {"start": 458.51, "end": 459.31, "word": " classifications", "probability": 0.93994140625}, {"start": 459.31, "end": 459.69, "word": " here.", "probability": 0.775390625}, {"start": 460.47, "end": 460.85, "word": " So", "probability": 0.92138671875}, {"start": 460.85, "end": 461.69, "word": " hand", "probability": 0.72509765625}, {"start": 461.69, "end": 462.11, "word": " either", "probability": 0.77197265625}, {"start": 462.11, "end": 462.41, "word": " left", "probability": 0.947265625}, {"start": 462.41, "end": 462.61, "word": " or", "probability": 0.96337890625}, {"start": 462.61, "end": 463.05, "word": " right,", "probability": 0.9169921875}, {"start": 463.85, "end": 464.03, "word": " the", "probability": 0.74462890625}, {"start": 464.03, "end": 464.41, "word": " person", "probability": 0.88671875}, {"start": 464.41, "end": 464.75, "word": " either", "probability": 0.8994140625}, {"start": 464.75, "end": 465.07, "word": " male", "probability": 0.892578125}, {"start": 465.07, "end": 465.29, "word": " or", "probability": 0.9599609375}, {"start": 465.29, "end": 465.69, "word": " female.", "probability": 0.86328125}, {"start": 465.83, "end": 465.97, "word": " So", "probability": 0.95263671875}, {"start": 465.97, "end": 466.19, "word": " we", "probability": 0.91455078125}, {"start": 466.19, "end": 466.55, "word": " have", "probability": 0.94091796875}, {"start": 466.55, "end": 467.13, "word": " two", "probability": 0.92724609375}, {"start": 467.13, "end": 467.69, "word": " characteristics", "probability": 0.72119140625}, {"start": 467.69, "end": 468.01, "word": " for", "probability": 0.94091796875}, {"start": 468.01, "end": 468.17, "word": " each", "probability": 0.94091796875}, {"start": 468.17, "end": 468.39, "word": " one,", "probability": 0.93212890625}, {"start": 468.47, "end": 468.59, "word": " so", "probability": 0.93701171875}, {"start": 468.59, "end": 468.87, "word": " it's", "probability": 0.953369140625}, {"start": 468.87, "end": 469.07, "word": " two", "probability": 0.84326171875}, {"start": 469.07, "end": 469.23, "word": " by", "probability": 0.94189453125}, {"start": 469.23, "end": 469.43, "word": " two", "probability": 0.9365234375}, {"start": 469.43, "end": 469.73, "word": " table.", "probability": 0.8759765625}, {"start": 470.69, "end": 471.13, "word": " Suppose", "probability": 0.69384765625}, {"start": 471.13, "end": 471.31, "word": " in", "probability": 0.87939453125}, {"start": 471.31, "end": 471.51, "word": " this", "probability": 0.94580078125}, {"start": 471.51, "end": 471.73, "word": " case", "probability": 0.90673828125}, {"start": 471.73, "end": 471.99, "word": " we", "probability": 0.60498046875}, {"start": 471.99, "end": 472.33, "word": " are", "probability": 0.9404296875}, {"start": 472.33, "end": 472.91, "word": " examining", "probability": 0.96044921875}, {"start": 472.91, "end": 473.27, "word": " a", "probability": 0.97998046875}, {"start": 473.27, "end": 473.55, "word": " sample", "probability": 0.8974609375}, {"start": 473.55, "end": 474.07, "word": " of", "probability": 0.97412109375}, {"start": 474.07, "end": 474.53, "word": " 300", "probability": 0.95068359375}, {"start": 474.53, "end": 475.03, "word": " children.", "probability": 0.8271484375}, {"start": 475.21, "end": 475.33, "word": " So", "probability": 0.9619140625}, {"start": 475.33, "end": 475.55, "word": " the", "probability": 0.822265625}, {"start": 475.55, "end": 475.91, "word": " sample", "probability": 0.8837890625}, {"start": 475.91, "end": 476.95, "word": " size", "probability": 0.83154296875}, {"start": 476.95, "end": 477.61, "word": " is", "probability": 0.9267578125}, {"start": 477.61, "end": 478.05, "word": " 300,", "probability": 0.97412109375}, {"start": 478.51, "end": 478.85, "word": " and", "probability": 0.93701171875}, {"start": 478.85, "end": 479.01, "word": " we", "probability": 0.95849609375}, {"start": 479.01, "end": 479.19, "word": " have", "probability": 0.94189453125}, {"start": 479.19, "end": 479.63, "word": " this", "probability": 0.9404296875}, {"start": 479.63, "end": 480.31, "word": " result.", "probability": 0.95263671875}], "temperature": 1.0}, {"id": 19, "seek": 50710, "start": 481.48, "end": 507.1, "text": " So gender is classified as males and females. Hand preference, either left or right. So in this case, there are 120 females. Twelve of them are using left hand. So it means that there are twelve left handers for females.", "tokens": [407, 7898, 307, 20627, 382, 20776, 293, 21529, 13, 8854, 17502, 11, 2139, 1411, 420, 558, 13, 407, 294, 341, 1389, 11, 456, 366, 10411, 21529, 13, 48063, 295, 552, 366, 1228, 1411, 1011, 13, 407, 309, 1355, 300, 456, 366, 14390, 1411, 1011, 433, 337, 21529, 13], "avg_logprob": -0.18702168367346939, "compression_ratio": 1.5785714285714285, "no_speech_prob": 0.0, "words": [{"start": 481.48, "end": 481.86, "word": " So", "probability": 0.828125}, {"start": 481.86, "end": 482.28, "word": " gender", "probability": 0.6025390625}, {"start": 482.28, "end": 482.86, "word": " is", "probability": 0.94677734375}, {"start": 482.86, "end": 484.0, "word": " classified", "probability": 0.93115234375}, {"start": 484.0, "end": 484.54, "word": " as", "probability": 0.9677734375}, {"start": 484.54, "end": 485.04, "word": " males", "probability": 0.89892578125}, {"start": 485.04, "end": 485.44, "word": " and", "probability": 0.943359375}, {"start": 485.44, "end": 486.02, "word": " females.", "probability": 0.931640625}, {"start": 487.1, "end": 487.56, "word": " Hand", "probability": 0.62939453125}, {"start": 487.56, "end": 488.14, "word": " preference,", "probability": 0.87255859375}, {"start": 488.58, "end": 488.82, "word": " either", "probability": 0.92138671875}, {"start": 488.82, "end": 489.2, "word": " left", "probability": 0.93896484375}, {"start": 489.2, "end": 489.4, "word": " or", "probability": 0.96923828125}, {"start": 489.4, "end": 489.74, "word": " right.", "probability": 0.91845703125}, {"start": 490.42, "end": 490.58, "word": " So", "probability": 0.9169921875}, {"start": 490.58, "end": 490.7, "word": " in", "probability": 0.8798828125}, {"start": 490.7, "end": 490.9, "word": " this", "probability": 0.94677734375}, {"start": 490.9, "end": 491.26, "word": " case,", "probability": 0.91259765625}, {"start": 492.5, "end": 492.8, "word": " there", "probability": 0.90869140625}, {"start": 492.8, "end": 493.06, "word": " are", "probability": 0.94580078125}, {"start": 493.06, "end": 493.68, "word": " 120", "probability": 0.86376953125}, {"start": 493.68, "end": 495.78, "word": " females.", "probability": 0.75830078125}, {"start": 496.58, "end": 496.96, "word": " Twelve", "probability": 0.5986328125}, {"start": 496.96, "end": 497.22, "word": " of", "probability": 0.97119140625}, {"start": 497.22, "end": 497.96, "word": " them", "probability": 0.8916015625}, {"start": 497.96, "end": 499.04, "word": " are", "probability": 0.93212890625}, {"start": 499.04, "end": 499.5, "word": " using", "probability": 0.93408203125}, {"start": 499.5, "end": 500.82, "word": " left", "probability": 0.90869140625}, {"start": 500.82, "end": 501.22, "word": " hand.", "probability": 0.5751953125}, {"start": 501.46, "end": 501.56, "word": " So", "probability": 0.92529296875}, {"start": 501.56, "end": 502.58, "word": " it", "probability": 0.78857421875}, {"start": 502.58, "end": 502.84, "word": " means", "probability": 0.892578125}, {"start": 502.84, "end": 503.16, "word": " that", "probability": 0.86279296875}, {"start": 503.16, "end": 503.88, "word": " there", "probability": 0.89697265625}, {"start": 503.88, "end": 504.16, "word": " are", "probability": 0.94287109375}, {"start": 504.16, "end": 504.6, "word": " twelve", "probability": 0.89990234375}, {"start": 504.6, "end": 505.58, "word": " left", "probability": 0.9306640625}, {"start": 505.58, "end": 506.2, "word": " handers", "probability": 0.7568359375}, {"start": 506.2, "end": 506.64, "word": " for", "probability": 0.9150390625}, {"start": 506.64, "end": 507.1, "word": " females.", "probability": 0.916015625}], "temperature": 1.0}, {"id": 20, "seek": 53739, "start": 509.31, "end": 537.39, "text": " while for males there are 180 females and 20 of them left-handers and so again 120 females 12 were left-handed 180 males and 20 were also left-handed and now the question is we are going to test see if the difference between the two proportions are equal I mean", "tokens": [1339, 337, 20776, 456, 366, 11971, 21529, 293, 945, 295, 552, 1411, 12, 5543, 433, 293, 370, 797, 10411, 21529, 2272, 645, 1411, 12, 25407, 11971, 20776, 293, 945, 645, 611, 1411, 12, 25407, 293, 586, 264, 1168, 307, 321, 366, 516, 281, 1500, 536, 498, 264, 2649, 1296, 264, 732, 32482, 366, 2681, 286, 914], "avg_logprob": -0.20504385337494968, "compression_ratio": 1.6903225806451614, "no_speech_prob": 0.0, "words": [{"start": 509.31, "end": 510.11, "word": " while", "probability": 0.385498046875}, {"start": 510.11, "end": 510.47, "word": " for", "probability": 0.8955078125}, {"start": 510.47, "end": 510.87, "word": " males", "probability": 0.92236328125}, {"start": 510.87, "end": 511.27, "word": " there", "probability": 0.65625}, {"start": 511.27, "end": 511.51, "word": " are", "probability": 0.91162109375}, {"start": 511.51, "end": 512.61, "word": " 180", "probability": 0.6884765625}, {"start": 512.61, "end": 513.27, "word": " females", "probability": 0.95458984375}, {"start": 513.27, "end": 513.79, "word": " and", "probability": 0.84375}, {"start": 513.79, "end": 514.17, "word": " 20", "probability": 0.83056640625}, {"start": 514.17, "end": 514.33, "word": " of", "probability": 0.951171875}, {"start": 514.33, "end": 514.69, "word": " them", "probability": 0.9072265625}, {"start": 514.69, "end": 515.81, "word": " left", "probability": 0.80810546875}, {"start": 515.81, "end": 516.51, "word": "-handers", "probability": 0.7726236979166666}, {"start": 516.51, "end": 517.73, "word": " and", "probability": 0.298583984375}, {"start": 517.73, "end": 517.99, "word": " so", "probability": 0.908203125}, {"start": 517.99, "end": 518.33, "word": " again", "probability": 0.93798828125}, {"start": 518.33, "end": 519.03, "word": " 120", "probability": 0.7421875}, {"start": 519.03, "end": 519.67, "word": " females", "probability": 0.9384765625}, {"start": 519.67, "end": 520.03, "word": " 12", "probability": 0.54833984375}, {"start": 520.03, "end": 520.31, "word": " were", "probability": 0.84716796875}, {"start": 520.31, "end": 520.69, "word": " left", "probability": 0.9296875}, {"start": 520.69, "end": 521.13, "word": "-handed", "probability": 0.84423828125}, {"start": 521.13, "end": 522.31, "word": " 180", "probability": 0.78173828125}, {"start": 522.31, "end": 522.93, "word": " males", "probability": 0.9443359375}, {"start": 522.93, "end": 523.91, "word": " and", "probability": 0.9072265625}, {"start": 523.91, "end": 524.19, "word": " 20", "probability": 0.85693359375}, {"start": 524.19, "end": 524.51, "word": " were", "probability": 0.84765625}, {"start": 524.51, "end": 525.59, "word": " also", "probability": 0.822265625}, {"start": 525.59, "end": 526.21, "word": " left", "probability": 0.92333984375}, {"start": 526.21, "end": 527.21, "word": "-handed", "probability": 0.862548828125}, {"start": 527.21, "end": 528.03, "word": " and", "probability": 0.673828125}, {"start": 528.03, "end": 528.15, "word": " now", "probability": 0.9296875}, {"start": 528.15, "end": 528.33, "word": " the", "probability": 0.8994140625}, {"start": 528.33, "end": 528.63, "word": " question", "probability": 0.916015625}, {"start": 528.63, "end": 529.03, "word": " is", "probability": 0.9521484375}, {"start": 529.03, "end": 530.51, "word": " we", "probability": 0.779296875}, {"start": 530.51, "end": 530.65, "word": " are", "probability": 0.9384765625}, {"start": 530.65, "end": 530.89, "word": " going", "probability": 0.943359375}, {"start": 530.89, "end": 531.07, "word": " to", "probability": 0.96923828125}, {"start": 531.07, "end": 531.33, "word": " test", "probability": 0.87744140625}, {"start": 531.33, "end": 531.75, "word": " see", "probability": 0.472412109375}, {"start": 531.75, "end": 532.57, "word": " if", "probability": 0.9521484375}, {"start": 532.57, "end": 534.15, "word": " the", "probability": 0.89892578125}, {"start": 534.15, "end": 534.71, "word": " difference", "probability": 0.845703125}, {"start": 534.71, "end": 535.17, "word": " between", "probability": 0.87353515625}, {"start": 535.17, "end": 535.43, "word": " the", "probability": 0.91845703125}, {"start": 535.43, "end": 535.63, "word": " two", "probability": 0.9169921875}, {"start": 535.63, "end": 536.17, "word": " proportions", "probability": 0.81591796875}, {"start": 536.17, "end": 536.47, "word": " are", "probability": 0.9326171875}, {"start": 536.47, "end": 536.77, "word": " equal", "probability": 0.8798828125}, {"start": 536.77, "end": 537.13, "word": " I", "probability": 0.53662109375}, {"start": 537.13, "end": 537.39, "word": " mean", "probability": 0.962890625}], "temperature": 1.0}, {"id": 21, "seek": 55947, "start": 538.41, "end": 559.47, "text": " Under zero, we are going to test to see if pi 1 equals to pi 2. It means the proportion of females who are left-handed is equal to the proportion of males who are left-handed. So it looks similar to the one we did in chapter 10 when we are talking about", "tokens": [6974, 4018, 11, 321, 366, 516, 281, 1500, 281, 536, 498, 3895, 502, 6915, 281, 3895, 568, 13, 467, 1355, 264, 16068, 295, 21529, 567, 366, 1411, 12, 25407, 307, 2681, 281, 264, 16068, 295, 20776, 567, 366, 1411, 12, 25407, 13, 407, 309, 1542, 2531, 281, 264, 472, 321, 630, 294, 7187, 1266, 562, 321, 366, 1417, 466], "avg_logprob": -0.26119791666666664, "compression_ratio": 1.6178343949044587, "no_speech_prob": 0.0, "words": [{"start": 538.41, "end": 538.83, "word": " Under", "probability": 0.43798828125}, {"start": 538.83, "end": 539.29, "word": " zero,", "probability": 0.216064453125}, {"start": 539.95, "end": 540.15, "word": " we", "probability": 0.9013671875}, {"start": 540.15, "end": 540.29, "word": " are", "probability": 0.8662109375}, {"start": 540.29, "end": 540.49, "word": " going", "probability": 0.94091796875}, {"start": 540.49, "end": 540.67, "word": " to", "probability": 0.9658203125}, {"start": 540.67, "end": 540.93, "word": " test", "probability": 0.8427734375}, {"start": 540.93, "end": 541.11, "word": " to", "probability": 0.82275390625}, {"start": 541.11, "end": 541.29, "word": " see", "probability": 0.91650390625}, {"start": 541.29, "end": 541.53, "word": " if", "probability": 0.9345703125}, {"start": 541.53, "end": 541.79, "word": " pi", "probability": 0.473876953125}, {"start": 541.79, "end": 542.07, "word": " 1", "probability": 0.4501953125}, {"start": 542.07, "end": 542.25, "word": " equals", "probability": 0.541015625}, {"start": 542.25, "end": 542.45, "word": " to", "probability": 0.50146484375}, {"start": 542.45, "end": 542.49, "word": " pi", "probability": 0.6396484375}, {"start": 542.49, "end": 542.69, "word": " 2.", "probability": 0.95751953125}, {"start": 542.75, "end": 542.81, "word": " It", "probability": 0.7373046875}, {"start": 542.81, "end": 543.15, "word": " means", "probability": 0.9267578125}, {"start": 543.15, "end": 543.85, "word": " the", "probability": 0.56005859375}, {"start": 543.85, "end": 544.33, "word": " proportion", "probability": 0.76611328125}, {"start": 544.33, "end": 546.17, "word": " of", "probability": 0.91259765625}, {"start": 546.17, "end": 546.75, "word": " females", "probability": 0.9208984375}, {"start": 546.75, "end": 547.27, "word": " who", "probability": 0.88134765625}, {"start": 547.27, "end": 547.53, "word": " are", "probability": 0.94189453125}, {"start": 547.53, "end": 547.87, "word": " left", "probability": 0.95068359375}, {"start": 547.87, "end": 548.29, "word": "-handed", "probability": 0.772705078125}, {"start": 548.29, "end": 549.57, "word": " is", "probability": 0.82666015625}, {"start": 549.57, "end": 549.97, "word": " equal", "probability": 0.8720703125}, {"start": 549.97, "end": 550.21, "word": " to", "probability": 0.9697265625}, {"start": 550.21, "end": 550.35, "word": " the", "probability": 0.92236328125}, {"start": 550.35, "end": 550.81, "word": " proportion", "probability": 0.83935546875}, {"start": 550.81, "end": 551.07, "word": " of", "probability": 0.966796875}, {"start": 551.07, "end": 551.57, "word": " males", "probability": 0.91455078125}, {"start": 551.57, "end": 552.13, "word": " who", "probability": 0.88671875}, {"start": 552.13, "end": 552.51, "word": " are", "probability": 0.9423828125}, {"start": 552.51, "end": 553.11, "word": " left", "probability": 0.94384765625}, {"start": 553.11, "end": 553.53, "word": "-handed.", "probability": 0.88623046875}, {"start": 553.93, "end": 554.21, "word": " So", "probability": 0.8193359375}, {"start": 554.21, "end": 554.41, "word": " it", "probability": 0.6552734375}, {"start": 554.41, "end": 554.69, "word": " looks", "probability": 0.5625}, {"start": 554.69, "end": 555.09, "word": " similar", "probability": 0.95263671875}, {"start": 555.09, "end": 555.49, "word": " to", "probability": 0.9658203125}, {"start": 555.49, "end": 556.41, "word": " the", "probability": 0.7138671875}, {"start": 556.41, "end": 556.61, "word": " one", "probability": 0.8662109375}, {"start": 556.61, "end": 556.77, "word": " we", "probability": 0.951171875}, {"start": 556.77, "end": 556.97, "word": " did", "probability": 0.9453125}, {"start": 556.97, "end": 557.15, "word": " in", "probability": 0.93408203125}, {"start": 557.15, "end": 557.37, "word": " chapter", "probability": 0.62109375}, {"start": 557.37, "end": 557.71, "word": " 10", "probability": 0.8095703125}, {"start": 557.71, "end": 558.33, "word": " when", "probability": 0.54638671875}, {"start": 558.33, "end": 558.45, "word": " we", "probability": 0.95751953125}, {"start": 558.45, "end": 558.57, "word": " are", "probability": 0.62353515625}, {"start": 558.57, "end": 558.93, "word": " talking", "probability": 0.84765625}, {"start": 558.93, "end": 559.47, "word": " about", "probability": 0.90625}], "temperature": 1.0}, {"id": 22, "seek": 58061, "start": 560.87, "end": 580.61, "text": " testing for the difference between two population proportions. It's similar, but here we will use a different statistic. It's called chi-square test. So we are going to test if there is no significant difference between the population proportions for males and females left-handed against there exists a difference.", "tokens": [4997, 337, 264, 2649, 1296, 732, 4415, 32482, 13, 467, 311, 2531, 11, 457, 510, 321, 486, 764, 257, 819, 29588, 13, 467, 311, 1219, 13228, 12, 33292, 543, 1500, 13, 407, 321, 366, 516, 281, 1500, 498, 456, 307, 572, 4776, 2649, 1296, 264, 4415, 32482, 337, 20776, 293, 21529, 1411, 12, 25407, 1970, 456, 8198, 257, 2649, 13], "avg_logprob": -0.21836577380289796, "compression_ratio": 1.726775956284153, "no_speech_prob": 0.0, "words": [{"start": 560.87, "end": 561.43, "word": " testing", "probability": 0.318115234375}, {"start": 561.43, "end": 561.73, "word": " for", "probability": 0.904296875}, {"start": 561.73, "end": 561.87, "word": " the", "probability": 0.89453125}, {"start": 561.87, "end": 562.31, "word": " difference", "probability": 0.8447265625}, {"start": 562.31, "end": 562.65, "word": " between", "probability": 0.87255859375}, {"start": 562.65, "end": 562.87, "word": " two", "probability": 0.857421875}, {"start": 562.87, "end": 563.31, "word": " population", "probability": 0.80810546875}, {"start": 563.31, "end": 563.83, "word": " proportions.", "probability": 0.67626953125}, {"start": 564.17, "end": 564.37, "word": " It's", "probability": 0.82470703125}, {"start": 564.37, "end": 564.71, "word": " similar,", "probability": 0.96337890625}, {"start": 565.15, "end": 565.35, "word": " but", "probability": 0.9228515625}, {"start": 565.35, "end": 565.57, "word": " here", "probability": 0.84033203125}, {"start": 565.57, "end": 565.73, "word": " we", "probability": 0.85595703125}, {"start": 565.73, "end": 565.93, "word": " will", "probability": 0.8369140625}, {"start": 565.93, "end": 566.19, "word": " use", "probability": 0.8740234375}, {"start": 566.19, "end": 566.47, "word": " a", "probability": 0.83544921875}, {"start": 566.47, "end": 566.89, "word": " different", "probability": 0.87646484375}, {"start": 566.89, "end": 567.51, "word": " statistic.", "probability": 0.76708984375}, {"start": 567.75, "end": 567.75, "word": " It's", "probability": 0.914794921875}, {"start": 567.75, "end": 568.01, "word": " called", "probability": 0.8974609375}, {"start": 568.01, "end": 568.29, "word": " chi", "probability": 0.2236328125}, {"start": 568.29, "end": 568.59, "word": "-square", "probability": 0.81103515625}, {"start": 568.59, "end": 568.87, "word": " test.", "probability": 0.849609375}, {"start": 569.41, "end": 569.63, "word": " So", "probability": 0.88818359375}, {"start": 569.63, "end": 569.73, "word": " we", "probability": 0.65673828125}, {"start": 569.73, "end": 569.85, "word": " are", "probability": 0.9150390625}, {"start": 569.85, "end": 570.09, "word": " going", "probability": 0.94287109375}, {"start": 570.09, "end": 570.27, "word": " to", "probability": 0.96875}, {"start": 570.27, "end": 570.59, "word": " test", "probability": 0.8681640625}, {"start": 570.59, "end": 571.81, "word": " if", "probability": 0.89404296875}, {"start": 571.81, "end": 572.09, "word": " there", "probability": 0.91455078125}, {"start": 572.09, "end": 572.23, "word": " is", "probability": 0.9228515625}, {"start": 572.23, "end": 572.37, "word": " no", "probability": 0.95556640625}, {"start": 572.37, "end": 572.85, "word": " significant", "probability": 0.84326171875}, {"start": 572.85, "end": 573.29, "word": " difference", "probability": 0.86181640625}, {"start": 573.29, "end": 573.59, "word": " between", "probability": 0.85107421875}, {"start": 573.59, "end": 573.81, "word": " the", "probability": 0.9091796875}, {"start": 573.81, "end": 574.27, "word": " population", "probability": 0.93115234375}, {"start": 574.27, "end": 574.87, "word": " proportions", "probability": 0.701171875}, {"start": 574.87, "end": 575.23, "word": " for", "probability": 0.931640625}, {"start": 575.23, "end": 575.55, "word": " males", "probability": 0.9365234375}, {"start": 575.55, "end": 576.27, "word": " and", "probability": 0.9453125}, {"start": 576.27, "end": 576.75, "word": " females", "probability": 0.95263671875}, {"start": 576.75, "end": 577.03, "word": " left", "probability": 0.7060546875}, {"start": 577.03, "end": 577.49, "word": "-handed", "probability": 0.78076171875}, {"start": 577.49, "end": 578.55, "word": " against", "probability": 0.7568359375}, {"start": 578.55, "end": 579.23, "word": " there", "probability": 0.430908203125}, {"start": 579.23, "end": 579.77, "word": " exists", "probability": 0.76806640625}, {"start": 579.77, "end": 580.05, "word": " a", "probability": 0.9951171875}, {"start": 580.05, "end": 580.61, "word": " difference.", "probability": 0.85009765625}], "temperature": 1.0}, {"id": 23, "seek": 61020, "start": 581.26, "end": 610.2, "text": " In this case, always we have two-sided test for chi-square. Chi-square never be negative, chi-square is always positive. So here we are talking about two-sided test. It means the two proportions are not the same. Hand preference is not independent of gender. In other words, we can say that hand preference is not independent of gender. So here we can say that hand preference is", "tokens": [682, 341, 1389, 11, 1009, 321, 362, 732, 12, 30941, 1500, 337, 13228, 12, 33292, 543, 13, 17730, 12, 33292, 543, 1128, 312, 3671, 11, 13228, 12, 33292, 543, 307, 1009, 3353, 13, 407, 510, 321, 366, 1417, 466, 732, 12, 30941, 1500, 13, 467, 1355, 264, 732, 32482, 366, 406, 264, 912, 13, 8854, 17502, 307, 406, 6695, 295, 7898, 13, 682, 661, 2283, 11, 321, 393, 584, 300, 1011, 17502, 307, 406, 6695, 295, 7898, 13, 407, 510, 321, 393, 584, 300, 1011, 17502, 307], "avg_logprob": -0.15997870012440465, "compression_ratio": 1.958762886597938, "no_speech_prob": 0.0, "words": [{"start": 581.26, "end": 581.54, "word": " In", "probability": 0.77197265625}, {"start": 581.54, "end": 581.76, "word": " this", "probability": 0.94189453125}, {"start": 581.76, "end": 582.1, "word": " case,", "probability": 0.908203125}, {"start": 582.56, "end": 582.94, "word": " always", "probability": 0.75537109375}, {"start": 582.94, "end": 583.2, "word": " we", "probability": 0.77978515625}, {"start": 583.2, "end": 583.38, "word": " have", "probability": 0.9345703125}, {"start": 583.38, "end": 583.58, "word": " two", "probability": 0.80712890625}, {"start": 583.58, "end": 583.82, "word": "-sided", "probability": 0.719482421875}, {"start": 583.82, "end": 584.18, "word": " test", "probability": 0.64892578125}, {"start": 584.18, "end": 584.48, "word": " for", "probability": 0.8583984375}, {"start": 584.48, "end": 584.68, "word": " chi", "probability": 0.279541015625}, {"start": 584.68, "end": 584.98, "word": "-square.", "probability": 0.9007161458333334}, {"start": 585.6, "end": 585.96, "word": " Chi", "probability": 0.70703125}, {"start": 585.96, "end": 586.28, "word": "-square", "probability": 0.95361328125}, {"start": 586.28, "end": 586.62, "word": " never", "probability": 0.77490234375}, {"start": 586.62, "end": 587.56, "word": " be", "probability": 0.8447265625}, {"start": 587.56, "end": 588.0, "word": " negative,", "probability": 0.9345703125}, {"start": 588.44, "end": 588.62, "word": " chi", "probability": 0.814453125}, {"start": 588.62, "end": 588.88, "word": "-square", "probability": 0.96826171875}, {"start": 588.88, "end": 589.06, "word": " is", "probability": 0.9326171875}, {"start": 589.06, "end": 589.52, "word": " always", "probability": 0.8984375}, {"start": 589.52, "end": 590.02, "word": " positive.", "probability": 0.935546875}, {"start": 590.64, "end": 590.86, "word": " So", "probability": 0.9296875}, {"start": 590.86, "end": 591.08, "word": " here", "probability": 0.6435546875}, {"start": 591.08, "end": 591.22, "word": " we", "probability": 0.80224609375}, {"start": 591.22, "end": 591.38, "word": " are", "probability": 0.92138671875}, {"start": 591.38, "end": 591.68, "word": " talking", "probability": 0.85107421875}, {"start": 591.68, "end": 592.04, "word": " about", "probability": 0.90478515625}, {"start": 592.04, "end": 592.3, "word": " two", "probability": 0.9072265625}, {"start": 592.3, "end": 592.6, "word": "-sided", "probability": 0.92626953125}, {"start": 592.6, "end": 592.96, "word": " test.", "probability": 0.86767578125}, {"start": 593.44, "end": 593.62, "word": " It", "probability": 0.94921875}, {"start": 593.62, "end": 594.06, "word": " means", "probability": 0.923828125}, {"start": 594.06, "end": 594.82, "word": " the", "probability": 0.7373046875}, {"start": 594.82, "end": 595.06, "word": " two", "probability": 0.94970703125}, {"start": 595.06, "end": 595.62, "word": " proportions", "probability": 0.79443359375}, {"start": 595.62, "end": 596.3, "word": " are", "probability": 0.943359375}, {"start": 596.3, "end": 596.6, "word": " not", "probability": 0.947265625}, {"start": 596.6, "end": 596.84, "word": " the", "probability": 0.81396484375}, {"start": 596.84, "end": 597.14, "word": " same.", "probability": 0.90625}, {"start": 599.14, "end": 599.36, "word": " Hand", "probability": 0.7529296875}, {"start": 599.36, "end": 599.74, "word": " preference", "probability": 0.85400390625}, {"start": 599.74, "end": 600.1, "word": " is", "probability": 0.947265625}, {"start": 600.1, "end": 600.34, "word": " not", "probability": 0.94140625}, {"start": 600.34, "end": 600.72, "word": " independent", "probability": 0.90283203125}, {"start": 600.72, "end": 600.98, "word": " of", "probability": 0.9619140625}, {"start": 600.98, "end": 601.18, "word": " gender.", "probability": 0.97119140625}, {"start": 601.4, "end": 601.56, "word": " In", "probability": 0.83203125}, {"start": 601.56, "end": 601.72, "word": " other", "probability": 0.8310546875}, {"start": 601.72, "end": 602.94, "word": " words,", "probability": 0.7734375}, {"start": 603.12, "end": 603.12, "word": " we", "probability": 0.82666015625}, {"start": 603.12, "end": 603.3, "word": " can", "probability": 0.9443359375}, {"start": 603.3, "end": 603.52, "word": " say", "probability": 0.892578125}, {"start": 603.52, "end": 603.86, "word": " that", "probability": 0.9345703125}, {"start": 603.86, "end": 604.8, "word": " hand", "probability": 0.81201171875}, {"start": 604.8, "end": 605.3, "word": " preference", "probability": 0.9169921875}, {"start": 605.3, "end": 605.74, "word": " is", "probability": 0.94921875}, {"start": 605.74, "end": 605.94, "word": " not", "probability": 0.94287109375}, {"start": 605.94, "end": 606.4, "word": " independent", "probability": 0.88818359375}, {"start": 606.4, "end": 606.84, "word": " of", "probability": 0.95849609375}, {"start": 606.84, "end": 607.08, "word": " gender.", "probability": 0.90283203125}, {"start": 607.74, "end": 607.94, "word": " So", "probability": 0.951171875}, {"start": 607.94, "end": 608.16, "word": " here", "probability": 0.81591796875}, {"start": 608.16, "end": 608.32, "word": " we", "probability": 0.7666015625}, {"start": 608.32, "end": 608.5, "word": " can", "probability": 0.9404296875}, {"start": 608.5, "end": 608.68, "word": " say", "probability": 0.8466796875}, {"start": 608.68, "end": 608.98, "word": " that", "probability": 0.9345703125}, {"start": 608.98, "end": 609.28, "word": " hand", "probability": 0.876953125}, {"start": 609.28, "end": 609.76, "word": " preference", "probability": 0.92138671875}, {"start": 609.76, "end": 610.2, "word": " is", "probability": 0.94287109375}], "temperature": 1.0}, {"id": 24, "seek": 63052, "start": 610.74, "end": 630.52, "text": " It is independent of gender. So it means under H0, we assume hand preference and gender are independent or", "tokens": [467, 307, 6695, 295, 7898, 13, 407, 309, 1355, 833, 389, 15, 11, 321, 6552, 1011, 17502, 293, 7898, 366, 6695, 420], "avg_logprob": -0.4735054321911024, "compression_ratio": 1.2298850574712643, "no_speech_prob": 0.0, "words": [{"start": 610.74, "end": 611.06, "word": " It", "probability": 0.078369140625}, {"start": 611.06, "end": 611.4, "word": " is", "probability": 0.483642578125}, {"start": 611.4, "end": 612.64, "word": " independent", "probability": 0.8583984375}, {"start": 612.64, "end": 614.08, "word": " of", "probability": 0.80908203125}, {"start": 614.08, "end": 614.38, "word": " gender.", "probability": 0.822265625}, {"start": 614.56, "end": 614.68, "word": " So", "probability": 0.763671875}, {"start": 614.68, "end": 615.02, "word": " it", "probability": 0.71435546875}, {"start": 615.02, "end": 615.36, "word": " means", "probability": 0.9248046875}, {"start": 615.36, "end": 615.78, "word": " under", "probability": 0.7138671875}, {"start": 615.78, "end": 616.28, "word": " H0,", "probability": 0.6434326171875}, {"start": 616.78, "end": 616.92, "word": " we", "probability": 0.9072265625}, {"start": 616.92, "end": 617.44, "word": " assume", "probability": 0.89892578125}, {"start": 617.44, "end": 618.36, "word": " hand", "probability": 0.269775390625}, {"start": 618.36, "end": 621.36, "word": " preference", "probability": 0.693359375}, {"start": 621.36, "end": 623.28, "word": " and", "probability": 0.9072265625}, {"start": 623.28, "end": 623.74, "word": " gender", "probability": 0.87890625}, {"start": 623.74, "end": 625.52, "word": " are", "probability": 0.92041015625}, {"start": 625.52, "end": 627.72, "word": " independent", "probability": 0.892578125}, {"start": 627.72, "end": 630.52, "word": " or", "probability": 0.3857421875}], "temperature": 1.0}, {"id": 25, "seek": 66178, "start": 633.54, "end": 661.78, "text": " A proportion of females who are left-handed is equal to the proportion of males who are left-handed. It means they are independent. I mean, hand preference and gender are independent against either. You may write that the two proportions are not the same or the two variables are dependent. So you can say that hand preference", "tokens": [316, 16068, 295, 21529, 567, 366, 1411, 12, 25407, 307, 2681, 281, 264, 16068, 295, 20776, 567, 366, 1411, 12, 25407, 13, 467, 1355, 436, 366, 6695, 13, 286, 914, 11, 1011, 17502, 293, 7898, 366, 6695, 1970, 2139, 13, 509, 815, 2464, 300, 264, 732, 32482, 366, 406, 264, 912, 420, 264, 732, 9102, 366, 12334, 13, 407, 291, 393, 584, 300, 1011, 17502], "avg_logprob": -0.1513967854958592, "compression_ratio": 1.8901734104046244, "no_speech_prob": 0.0, "words": [{"start": 633.54, "end": 633.72, "word": " A", "probability": 0.4609375}, {"start": 633.72, "end": 634.16, "word": " proportion", "probability": 0.81689453125}, {"start": 634.16, "end": 634.7, "word": " of", "probability": 0.97265625}, {"start": 634.7, "end": 635.14, "word": " females", "probability": 0.94677734375}, {"start": 635.14, "end": 635.38, "word": " who", "probability": 0.9072265625}, {"start": 635.38, "end": 635.64, "word": " are", "probability": 0.94580078125}, {"start": 635.64, "end": 635.92, "word": " left", "probability": 0.94189453125}, {"start": 635.92, "end": 636.22, "word": "-handed", "probability": 0.833251953125}, {"start": 636.22, "end": 637.62, "word": " is", "probability": 0.88818359375}, {"start": 637.62, "end": 638.02, "word": " equal", "probability": 0.8916015625}, {"start": 638.02, "end": 638.34, "word": " to", "probability": 0.9697265625}, {"start": 638.34, "end": 638.48, "word": " the", "probability": 0.90087890625}, {"start": 638.48, "end": 638.9, "word": " proportion", "probability": 0.8759765625}, {"start": 638.9, "end": 639.14, "word": " of", "probability": 0.94580078125}, {"start": 639.14, "end": 639.38, "word": " males", "probability": 0.94921875}, {"start": 639.38, "end": 639.6, "word": " who", "probability": 0.90869140625}, {"start": 639.6, "end": 640.0, "word": " are", "probability": 0.9443359375}, {"start": 640.0, "end": 640.86, "word": " left", "probability": 0.9423828125}, {"start": 640.86, "end": 641.12, "word": "-handed.", "probability": 0.919189453125}, {"start": 641.14, "end": 641.34, "word": " It", "probability": 0.8203125}, {"start": 641.34, "end": 641.64, "word": " means", "probability": 0.9287109375}, {"start": 641.64, "end": 642.42, "word": " they", "probability": 0.77099609375}, {"start": 642.42, "end": 642.72, "word": " are", "probability": 0.9423828125}, {"start": 642.72, "end": 643.12, "word": " independent.", "probability": 0.896484375}, {"start": 643.4, "end": 643.54, "word": " I", "probability": 0.9453125}, {"start": 643.54, "end": 643.74, "word": " mean,", "probability": 0.96240234375}, {"start": 643.84, "end": 644.06, "word": " hand", "probability": 0.83740234375}, {"start": 644.06, "end": 644.44, "word": " preference", "probability": 0.88330078125}, {"start": 644.44, "end": 645.36, "word": " and", "probability": 0.94140625}, {"start": 645.36, "end": 645.78, "word": " gender", "probability": 0.884765625}, {"start": 645.78, "end": 646.32, "word": " are", "probability": 0.94140625}, {"start": 646.32, "end": 646.8, "word": " independent", "probability": 0.9111328125}, {"start": 646.8, "end": 647.52, "word": " against", "probability": 0.50830078125}, {"start": 647.52, "end": 648.78, "word": " either.", "probability": 0.88916015625}, {"start": 649.5, "end": 649.78, "word": " You", "probability": 0.95703125}, {"start": 649.78, "end": 649.92, "word": " may", "probability": 0.93212890625}, {"start": 649.92, "end": 650.24, "word": " write", "probability": 0.89453125}, {"start": 650.24, "end": 650.64, "word": " that", "probability": 0.916015625}, {"start": 650.64, "end": 651.42, "word": " the", "probability": 0.89892578125}, {"start": 651.42, "end": 651.58, "word": " two", "probability": 0.94921875}, {"start": 651.58, "end": 652.1, "word": " proportions", "probability": 0.82080078125}, {"start": 652.1, "end": 652.42, "word": " are", "probability": 0.92822265625}, {"start": 652.42, "end": 652.62, "word": " not", "probability": 0.8955078125}, {"start": 652.62, "end": 652.76, "word": " the", "probability": 0.92529296875}, {"start": 652.76, "end": 653.04, "word": " same", "probability": 0.90478515625}, {"start": 653.04, "end": 654.5, "word": " or", "probability": 0.47216796875}, {"start": 654.5, "end": 655.14, "word": " the", "probability": 0.802734375}, {"start": 655.14, "end": 655.4, "word": " two", "probability": 0.939453125}, {"start": 655.4, "end": 656.1, "word": " variables", "probability": 0.9599609375}, {"start": 656.1, "end": 657.5, "word": " are", "probability": 0.94189453125}, {"start": 657.5, "end": 658.0, "word": " dependent.", "probability": 0.95458984375}, {"start": 659.12, "end": 659.76, "word": " So", "probability": 0.94580078125}, {"start": 659.76, "end": 660.14, "word": " you", "probability": 0.8212890625}, {"start": 660.14, "end": 660.34, "word": " can", "probability": 0.9423828125}, {"start": 660.34, "end": 660.54, "word": " say", "probability": 0.88134765625}, {"start": 660.54, "end": 660.84, "word": " that", "probability": 0.93310546875}, {"start": 660.84, "end": 661.18, "word": " hand", "probability": 0.90087890625}, {"start": 661.18, "end": 661.78, "word": " preference", "probability": 0.91357421875}], "temperature": 1.0}, {"id": 26, "seek": 69488, "start": 665.62, "end": 694.88, "text": " and gender are either you may say that are not independent or related or dependent so again not independent means either they are related or dependent", "tokens": [293, 7898, 366, 2139, 291, 815, 584, 300, 366, 406, 6695, 420, 4077, 420, 12334, 370, 797, 406, 6695, 1355, 2139, 436, 366, 4077, 420, 12334], "avg_logprob": -0.28877315918604535, "compression_ratio": 1.696629213483146, "no_speech_prob": 0.0, "words": [{"start": 665.62, "end": 665.98, "word": " and", "probability": 0.255859375}, {"start": 665.98, "end": 666.4, "word": " gender", "probability": 0.71337890625}, {"start": 666.4, "end": 669.16, "word": " are", "probability": 0.76953125}, {"start": 669.16, "end": 669.82, "word": " either", "probability": 0.88720703125}, {"start": 669.82, "end": 670.58, "word": " you", "probability": 0.361572265625}, {"start": 670.58, "end": 670.82, "word": " may", "probability": 0.775390625}, {"start": 670.82, "end": 671.04, "word": " say", "probability": 0.93505859375}, {"start": 671.04, "end": 671.4, "word": " that", "probability": 0.57275390625}, {"start": 671.4, "end": 671.92, "word": " are", "probability": 0.74072265625}, {"start": 671.92, "end": 672.36, "word": " not", "probability": 0.9267578125}, {"start": 672.36, "end": 673.98, "word": " independent", "probability": 0.91162109375}, {"start": 673.98, "end": 676.6, "word": " or", "probability": 0.8701171875}, {"start": 676.6, "end": 678.22, "word": " related", "probability": 0.958984375}, {"start": 678.22, "end": 679.7, "word": " or", "probability": 0.9296875}, {"start": 679.7, "end": 682.0, "word": " dependent", "probability": 0.748046875}, {"start": 682.0, "end": 685.18, "word": " so", "probability": 0.28466796875}, {"start": 685.18, "end": 685.5, "word": " again", "probability": 0.76318359375}, {"start": 685.5, "end": 686.62, "word": " not", "probability": 0.53955078125}, {"start": 686.62, "end": 687.1, "word": " independent", "probability": 0.91796875}, {"start": 687.1, "end": 687.84, "word": " means", "probability": 0.9189453125}, {"start": 687.84, "end": 689.34, "word": " either", "probability": 0.9384765625}, {"start": 689.34, "end": 690.06, "word": " they", "probability": 0.896484375}, {"start": 690.06, "end": 690.44, "word": " are", "probability": 0.94580078125}, {"start": 690.44, "end": 692.14, "word": " related", "probability": 0.953125}, {"start": 692.14, "end": 693.28, "word": " or", "probability": 0.9658203125}, {"start": 693.28, "end": 694.88, "word": " dependent", "probability": 0.78125}], "temperature": 1.0}, {"id": 27, "seek": 72175, "start": 696.11, "end": 721.75, "text": " Now, if H0 is true, if we assume H0 is true, it means the proportion of left-handed females should be the same as the proportion of left-handed males. It says that the proportion is the same as, not equal to. Because if we reject the null hypothesis,", "tokens": [823, 11, 498, 389, 15, 307, 2074, 11, 498, 321, 6552, 389, 15, 307, 2074, 11, 309, 1355, 264, 16068, 295, 1411, 12, 25407, 21529, 820, 312, 264, 912, 382, 264, 16068, 295, 1411, 12, 25407, 20776, 13, 467, 1619, 300, 264, 16068, 307, 264, 912, 382, 11, 406, 2681, 281, 13, 1436, 498, 321, 8248, 264, 18184, 17291, 11], "avg_logprob": -0.14805327673427393, "compression_ratio": 1.7430555555555556, "no_speech_prob": 0.0, "words": [{"start": 696.11, "end": 696.49, "word": " Now,", "probability": 0.7783203125}, {"start": 696.81, "end": 697.27, "word": " if", "probability": 0.9501953125}, {"start": 697.27, "end": 697.71, "word": " H0", "probability": 0.726806640625}, {"start": 697.71, "end": 697.91, "word": " is", "probability": 0.90478515625}, {"start": 697.91, "end": 698.19, "word": " true,", "probability": 0.94677734375}, {"start": 698.77, "end": 698.93, "word": " if", "probability": 0.77978515625}, {"start": 698.93, "end": 699.11, "word": " we", "probability": 0.9453125}, {"start": 699.11, "end": 699.59, "word": " assume", "probability": 0.90673828125}, {"start": 699.59, "end": 700.03, "word": " H0", "probability": 0.871337890625}, {"start": 700.03, "end": 700.19, "word": " is", "probability": 0.93017578125}, {"start": 700.19, "end": 700.43, "word": " true,", "probability": 0.9658203125}, {"start": 700.57, "end": 700.69, "word": " it", "probability": 0.92529296875}, {"start": 700.69, "end": 701.07, "word": " means", "probability": 0.9345703125}, {"start": 701.07, "end": 701.89, "word": " the", "probability": 0.7744140625}, {"start": 701.89, "end": 702.33, "word": " proportion", "probability": 0.80517578125}, {"start": 702.33, "end": 702.59, "word": " of", "probability": 0.97216796875}, {"start": 702.59, "end": 702.81, "word": " left", "probability": 0.94189453125}, {"start": 702.81, "end": 703.25, "word": "-handed", "probability": 0.84423828125}, {"start": 703.25, "end": 703.69, "word": " females", "probability": 0.9384765625}, {"start": 703.69, "end": 704.01, "word": " should", "probability": 0.955078125}, {"start": 704.01, "end": 704.35, "word": " be", "probability": 0.95166015625}, {"start": 704.35, "end": 706.39, "word": " the", "probability": 0.8564453125}, {"start": 706.39, "end": 706.69, "word": " same", "probability": 0.8955078125}, {"start": 706.69, "end": 707.15, "word": " as", "probability": 0.9599609375}, {"start": 707.15, "end": 707.85, "word": " the", "probability": 0.90576171875}, {"start": 707.85, "end": 708.27, "word": " proportion", "probability": 0.86767578125}, {"start": 708.27, "end": 708.55, "word": " of", "probability": 0.96630859375}, {"start": 708.55, "end": 708.79, "word": " left", "probability": 0.9208984375}, {"start": 708.79, "end": 709.37, "word": "-handed", "probability": 0.915283203125}, {"start": 709.37, "end": 710.11, "word": " males.", "probability": 0.947265625}, {"start": 711.07, "end": 711.77, "word": " It", "probability": 0.931640625}, {"start": 711.77, "end": 712.07, "word": " says", "probability": 0.876953125}, {"start": 712.07, "end": 712.39, "word": " that", "probability": 0.87158203125}, {"start": 712.39, "end": 713.43, "word": " the", "probability": 0.77392578125}, {"start": 713.43, "end": 713.99, "word": " proportion", "probability": 0.83544921875}, {"start": 713.99, "end": 716.05, "word": " is", "probability": 0.379150390625}, {"start": 716.05, "end": 716.13, "word": " the", "probability": 0.82275390625}, {"start": 716.13, "end": 716.45, "word": " same", "probability": 0.90869140625}, {"start": 716.45, "end": 716.87, "word": " as,", "probability": 0.90478515625}, {"start": 716.89, "end": 717.11, "word": " not", "probability": 0.9326171875}, {"start": 717.11, "end": 717.47, "word": " equal", "probability": 0.892578125}, {"start": 717.47, "end": 717.79, "word": " to.", "probability": 0.96435546875}, {"start": 718.49, "end": 719.11, "word": " Because", "probability": 0.91064453125}, {"start": 719.11, "end": 719.97, "word": " if", "probability": 0.79296875}, {"start": 719.97, "end": 720.51, "word": " we", "probability": 0.95458984375}, {"start": 720.51, "end": 720.93, "word": " reject", "probability": 0.923828125}, {"start": 720.93, "end": 721.13, "word": " the", "probability": 0.884765625}, {"start": 721.13, "end": 721.25, "word": " null", "probability": 0.96875}, {"start": 721.25, "end": 721.75, "word": " hypothesis,", "probability": 0.7822265625}], "temperature": 1.0}, {"id": 28, "seek": 74454, "start": 723.38, "end": 744.54, "text": " Then we have sufficient evidence to support the alternative. But if we don't reject the null, it doesn't imply that H0 is true. It means there is insufficient evidence to support the alternative hypothesis. So it's better to say that the two proportions are the same. Same does not mean equal. Same means", "tokens": [1396, 321, 362, 11563, 4467, 281, 1406, 264, 8535, 13, 583, 498, 321, 500, 380, 8248, 264, 18184, 11, 309, 1177, 380, 33616, 300, 389, 15, 307, 2074, 13, 467, 1355, 456, 307, 41709, 4467, 281, 1406, 264, 8535, 17291, 13, 407, 309, 311, 1101, 281, 584, 300, 264, 732, 32482, 366, 264, 912, 13, 10635, 775, 406, 914, 2681, 13, 10635, 1355], "avg_logprob": -0.1495361344423145, "compression_ratio": 1.7329545454545454, "no_speech_prob": 0.0, "words": [{"start": 723.38, "end": 723.74, "word": " Then", "probability": 0.35498046875}, {"start": 723.74, "end": 723.86, "word": " we", "probability": 0.80419921875}, {"start": 723.86, "end": 724.02, "word": " have", "probability": 0.9248046875}, {"start": 724.02, "end": 724.42, "word": " sufficient", "probability": 0.90380859375}, {"start": 724.42, "end": 724.84, "word": " evidence", "probability": 0.94580078125}, {"start": 724.84, "end": 725.02, "word": " to", "probability": 0.9130859375}, {"start": 725.02, "end": 725.46, "word": " support", "probability": 0.9814453125}, {"start": 725.46, "end": 725.72, "word": " the", "probability": 0.654296875}, {"start": 725.72, "end": 725.98, "word": " alternative.", "probability": 0.5478515625}, {"start": 726.92, "end": 727.26, "word": " But", "probability": 0.87060546875}, {"start": 727.26, "end": 727.44, "word": " if", "probability": 0.91015625}, {"start": 727.44, "end": 727.58, "word": " we", "probability": 0.90966796875}, {"start": 727.58, "end": 727.82, "word": " don't", "probability": 0.942138671875}, {"start": 727.82, "end": 728.22, "word": " reject", "probability": 0.927734375}, {"start": 728.22, "end": 728.42, "word": " the", "probability": 0.9169921875}, {"start": 728.42, "end": 728.64, "word": " null,", "probability": 0.9375}, {"start": 728.82, "end": 729.0, "word": " it", "probability": 0.93310546875}, {"start": 729.0, "end": 729.44, "word": " doesn't", "probability": 0.944580078125}, {"start": 729.44, "end": 730.06, "word": " imply", "probability": 0.89892578125}, {"start": 730.06, "end": 730.64, "word": " that", "probability": 0.94189453125}, {"start": 730.64, "end": 731.12, "word": " H0", "probability": 0.75634765625}, {"start": 731.12, "end": 731.28, "word": " is", "probability": 0.9453125}, {"start": 731.28, "end": 731.62, "word": " true.", "probability": 0.96826171875}, {"start": 732.16, "end": 732.52, "word": " It", "probability": 0.9462890625}, {"start": 732.52, "end": 732.8, "word": " means", "probability": 0.92626953125}, {"start": 732.8, "end": 733.0, "word": " there", "probability": 0.80224609375}, {"start": 733.0, "end": 733.18, "word": " is", "probability": 0.912109375}, {"start": 733.18, "end": 733.74, "word": " insufficient", "probability": 0.876953125}, {"start": 733.74, "end": 734.42, "word": " evidence", "probability": 0.95556640625}, {"start": 734.42, "end": 734.6, "word": " to", "probability": 0.9501953125}, {"start": 734.6, "end": 735.0, "word": " support", "probability": 0.986328125}, {"start": 735.0, "end": 735.22, "word": " the", "probability": 0.861328125}, {"start": 735.22, "end": 735.58, "word": " alternative", "probability": 0.69384765625}, {"start": 735.58, "end": 736.14, "word": " hypothesis.", "probability": 0.79052734375}, {"start": 737.04, "end": 737.42, "word": " So", "probability": 0.90478515625}, {"start": 737.42, "end": 738.22, "word": " it's", "probability": 0.78955078125}, {"start": 738.22, "end": 738.42, "word": " better", "probability": 0.92919921875}, {"start": 738.42, "end": 738.64, "word": " to", "probability": 0.96630859375}, {"start": 738.64, "end": 738.82, "word": " say", "probability": 0.94384765625}, {"start": 738.82, "end": 739.1, "word": " that", "probability": 0.93017578125}, {"start": 739.1, "end": 739.3, "word": " the", "probability": 0.88818359375}, {"start": 739.3, "end": 739.52, "word": " two", "probability": 0.93896484375}, {"start": 739.52, "end": 740.06, "word": " proportions", "probability": 0.826171875}, {"start": 740.06, "end": 740.5, "word": " are", "probability": 0.94189453125}, {"start": 740.5, "end": 740.72, "word": " the", "probability": 0.92333984375}, {"start": 740.72, "end": 741.0, "word": " same.", "probability": 0.9150390625}, {"start": 741.76, "end": 742.04, "word": " Same", "probability": 0.68994140625}, {"start": 742.04, "end": 742.24, "word": " does", "probability": 0.9677734375}, {"start": 742.24, "end": 742.44, "word": " not", "probability": 0.9482421875}, {"start": 742.44, "end": 742.68, "word": " mean", "probability": 0.95751953125}, {"start": 742.68, "end": 743.08, "word": " equal.", "probability": 0.87744140625}, {"start": 743.9, "end": 744.14, "word": " Same", "probability": 0.77392578125}, {"start": 744.14, "end": 744.54, "word": " means", "probability": 0.85693359375}], "temperature": 1.0}, {"id": 29, "seek": 77388, "start": 745.4, "end": 773.88, "text": " there exists a small difference, I mean not significant difference between the two proportions. So you have to be careful between, distinguish actually between same and equal. So same, it doesn't mean exactly they are equal, but they are roughly equal, or approximately, or they actually, they are close to each other. Against, here again, against the two population proportions are not the same.", "tokens": [456, 8198, 257, 1359, 2649, 11, 286, 914, 406, 4776, 2649, 1296, 264, 732, 32482, 13, 407, 291, 362, 281, 312, 5026, 1296, 11, 20206, 767, 1296, 912, 293, 2681, 13, 407, 912, 11, 309, 1177, 380, 914, 2293, 436, 366, 2681, 11, 457, 436, 366, 9810, 2681, 11, 420, 10447, 11, 420, 436, 767, 11, 436, 366, 1998, 281, 1184, 661, 13, 29995, 11, 510, 797, 11, 1970, 264, 732, 4415, 32482, 366, 406, 264, 912, 13], "avg_logprob": -0.19946598176714742, "compression_ratio": 1.7723214285714286, "no_speech_prob": 0.0, "words": [{"start": 745.4, "end": 745.92, "word": " there", "probability": 0.44287109375}, {"start": 745.92, "end": 746.38, "word": " exists", "probability": 0.736328125}, {"start": 746.38, "end": 748.1, "word": " a", "probability": 0.458740234375}, {"start": 748.1, "end": 748.46, "word": " small", "probability": 0.9150390625}, {"start": 748.46, "end": 749.04, "word": " difference,", "probability": 0.869140625}, {"start": 749.24, "end": 749.3, "word": " I", "probability": 0.865234375}, {"start": 749.3, "end": 749.42, "word": " mean", "probability": 0.9677734375}, {"start": 749.42, "end": 749.64, "word": " not", "probability": 0.6611328125}, {"start": 749.64, "end": 750.12, "word": " significant", "probability": 0.81298828125}, {"start": 750.12, "end": 750.58, "word": " difference", "probability": 0.86083984375}, {"start": 750.58, "end": 750.92, "word": " between", "probability": 0.72900390625}, {"start": 750.92, "end": 751.12, "word": " the", "probability": 0.88818359375}, {"start": 751.12, "end": 751.3, "word": " two", "probability": 0.93212890625}, {"start": 751.3, "end": 751.76, "word": " proportions.", "probability": 0.60791015625}, {"start": 752.64, "end": 752.92, "word": " So", "probability": 0.92822265625}, {"start": 752.92, "end": 753.04, "word": " you", "probability": 0.751953125}, {"start": 753.04, "end": 753.2, "word": " have", "probability": 0.94873046875}, {"start": 753.2, "end": 753.4, "word": " to", "probability": 0.97314453125}, {"start": 753.4, "end": 753.7, "word": " be", "probability": 0.931640625}, {"start": 753.7, "end": 754.1, "word": " careful", "probability": 0.96630859375}, {"start": 754.1, "end": 754.64, "word": " between,", "probability": 0.65234375}, {"start": 755.08, "end": 755.72, "word": " distinguish", "probability": 0.85595703125}, {"start": 755.72, "end": 756.4, "word": " actually", "probability": 0.84130859375}, {"start": 756.4, "end": 756.94, "word": " between", "probability": 0.80322265625}, {"start": 756.94, "end": 757.88, "word": " same", "probability": 0.857421875}, {"start": 757.88, "end": 758.14, "word": " and", "probability": 0.94482421875}, {"start": 758.14, "end": 758.48, "word": " equal.", "probability": 0.89306640625}, {"start": 759.02, "end": 759.24, "word": " So", "probability": 0.94287109375}, {"start": 759.24, "end": 759.7, "word": " same,", "probability": 0.8125}, {"start": 759.86, "end": 760.0, "word": " it", "probability": 0.9521484375}, {"start": 760.0, "end": 760.4, "word": " doesn't", "probability": 0.9541015625}, {"start": 760.4, "end": 760.72, "word": " mean", "probability": 0.951171875}, {"start": 760.72, "end": 761.26, "word": " exactly", "probability": 0.89794921875}, {"start": 761.26, "end": 761.52, "word": " they", "probability": 0.88037109375}, {"start": 761.52, "end": 761.68, "word": " are", "probability": 0.93505859375}, {"start": 761.68, "end": 762.02, "word": " equal,", "probability": 0.88623046875}, {"start": 762.44, "end": 762.7, "word": " but", "probability": 0.92724609375}, {"start": 762.7, "end": 762.98, "word": " they", "probability": 0.89404296875}, {"start": 762.98, "end": 763.16, "word": " are", "probability": 0.9423828125}, {"start": 763.16, "end": 763.56, "word": " roughly", "probability": 0.86669921875}, {"start": 763.56, "end": 764.0, "word": " equal,", "probability": 0.85595703125}, {"start": 764.34, "end": 764.58, "word": " or", "probability": 0.9033203125}, {"start": 764.58, "end": 765.14, "word": " approximately,", "probability": 0.8662109375}, {"start": 765.26, "end": 765.38, "word": " or", "probability": 0.93212890625}, {"start": 765.38, "end": 765.58, "word": " they", "probability": 0.81201171875}, {"start": 765.58, "end": 766.22, "word": " actually,", "probability": 0.7421875}, {"start": 766.64, "end": 766.82, "word": " they", "probability": 0.89013671875}, {"start": 766.82, "end": 766.94, "word": " are", "probability": 0.9306640625}, {"start": 766.94, "end": 767.34, "word": " close", "probability": 0.806640625}, {"start": 767.34, "end": 767.54, "word": " to", "probability": 0.970703125}, {"start": 767.54, "end": 767.72, "word": " each", "probability": 0.9501953125}, {"start": 767.72, "end": 767.96, "word": " other.", "probability": 0.89306640625}, {"start": 768.88, "end": 769.32, "word": " Against,", "probability": 0.650390625}, {"start": 769.94, "end": 770.2, "word": " here", "probability": 0.81201171875}, {"start": 770.2, "end": 770.54, "word": " again,", "probability": 0.9375}, {"start": 770.62, "end": 771.0, "word": " against", "probability": 0.916015625}, {"start": 771.0, "end": 771.28, "word": " the", "probability": 0.7509765625}, {"start": 771.28, "end": 771.5, "word": " two", "probability": 0.93994140625}, {"start": 771.5, "end": 772.08, "word": " population", "probability": 0.84423828125}, {"start": 772.08, "end": 772.64, "word": " proportions", "probability": 0.81640625}, {"start": 772.64, "end": 773.08, "word": " are", "probability": 0.92138671875}, {"start": 773.08, "end": 773.36, "word": " not", "probability": 0.94482421875}, {"start": 773.36, "end": 773.62, "word": " the", "probability": 0.92236328125}, {"start": 773.62, "end": 773.88, "word": " same.", "probability": 0.9091796875}], "temperature": 1.0}, {"id": 30, "seek": 80328, "start": 775.32, "end": 803.28, "text": " So let's see how can we examine this null hypothesis by using a new statistic. This statistic is called Chi-square. Chi-square is denoted by this Greek letter, Chi-square. It's a Greek letter. It's pronounced as Chi, C-H-I, Chi-square. It looks like X.", "tokens": [407, 718, 311, 536, 577, 393, 321, 17496, 341, 18184, 17291, 538, 1228, 257, 777, 29588, 13, 639, 29588, 307, 1219, 17730, 12, 33292, 543, 13, 17730, 12, 33292, 543, 307, 1441, 23325, 538, 341, 10281, 5063, 11, 17730, 12, 33292, 543, 13, 467, 311, 257, 10281, 5063, 13, 467, 311, 23155, 382, 17730, 11, 383, 12, 39, 12, 40, 11, 17730, 12, 33292, 543, 13, 467, 1542, 411, 1783, 13], "avg_logprob": -0.16330295532113975, "compression_ratio": 1.6114649681528663, "no_speech_prob": 0.0, "words": [{"start": 775.32, "end": 775.58, "word": " So", "probability": 0.90380859375}, {"start": 775.58, "end": 775.8, "word": " let's", "probability": 0.838134765625}, {"start": 775.8, "end": 775.92, "word": " see", "probability": 0.919921875}, {"start": 775.92, "end": 776.04, "word": " how", "probability": 0.92919921875}, {"start": 776.04, "end": 776.26, "word": " can", "probability": 0.67626953125}, {"start": 776.26, "end": 776.86, "word": " we", "probability": 0.9462890625}, {"start": 776.86, "end": 777.8, "word": " examine", "probability": 0.9384765625}, {"start": 777.8, "end": 779.14, "word": " this", "probability": 0.92626953125}, {"start": 779.14, "end": 779.4, "word": " null", "probability": 0.873046875}, {"start": 779.4, "end": 779.9, "word": " hypothesis", "probability": 0.86328125}, {"start": 779.9, "end": 780.24, "word": " by", "probability": 0.94580078125}, {"start": 780.24, "end": 780.66, "word": " using", "probability": 0.93505859375}, {"start": 780.66, "end": 780.82, "word": " a", "probability": 0.58935546875}, {"start": 780.82, "end": 780.98, "word": " new", "probability": 0.91552734375}, {"start": 780.98, "end": 781.42, "word": " statistic.", "probability": 0.89892578125}, {"start": 781.98, "end": 782.5, "word": " This", "probability": 0.86767578125}, {"start": 782.5, "end": 783.0, "word": " statistic", "probability": 0.92578125}, {"start": 783.0, "end": 783.38, "word": " is", "probability": 0.95068359375}, {"start": 783.38, "end": 783.84, "word": " called", "probability": 0.90087890625}, {"start": 783.84, "end": 784.76, "word": " Chi", "probability": 0.40185546875}, {"start": 784.76, "end": 785.16, "word": "-square.", "probability": 0.77294921875}, {"start": 785.92, "end": 786.18, "word": " Chi", "probability": 0.73095703125}, {"start": 786.18, "end": 786.5, "word": "-square", "probability": 0.9596354166666666}, {"start": 786.5, "end": 786.7, "word": " is", "probability": 0.9453125}, {"start": 786.7, "end": 788.46, "word": " denoted", "probability": 0.884033203125}, {"start": 788.46, "end": 788.94, "word": " by", "probability": 0.96630859375}, {"start": 788.94, "end": 790.6, "word": " this", "probability": 0.9169921875}, {"start": 790.6, "end": 791.0, "word": " Greek", "probability": 0.89111328125}, {"start": 791.0, "end": 791.42, "word": " letter,", "probability": 0.95458984375}, {"start": 792.04, "end": 792.36, "word": " Chi", "probability": 0.640625}, {"start": 792.36, "end": 793.08, "word": "-square.", "probability": 0.9404296875}, {"start": 793.76, "end": 794.16, "word": " It's", "probability": 0.94580078125}, {"start": 794.16, "end": 794.26, "word": " a", "probability": 0.52587890625}, {"start": 794.26, "end": 794.44, "word": " Greek", "probability": 0.884765625}, {"start": 794.44, "end": 794.8, "word": " letter.", "probability": 0.947265625}, {"start": 795.6, "end": 795.94, "word": " It's", "probability": 0.973876953125}, {"start": 795.94, "end": 796.44, "word": " pronounced", "probability": 0.77490234375}, {"start": 796.44, "end": 796.88, "word": " as", "probability": 0.96337890625}, {"start": 796.88, "end": 797.3, "word": " Chi,", "probability": 0.6162109375}, {"start": 798.26, "end": 798.56, "word": " C", "probability": 0.6279296875}, {"start": 798.56, "end": 798.78, "word": "-H", "probability": 0.906005859375}, {"start": 798.78, "end": 799.16, "word": "-I,", "probability": 0.998046875}, {"start": 799.52, "end": 799.84, "word": " Chi", "probability": 0.84765625}, {"start": 799.84, "end": 800.34, "word": "-square.", "probability": 0.9444986979166666}, {"start": 801.74, "end": 802.36, "word": " It", "probability": 0.8203125}, {"start": 802.36, "end": 802.64, "word": " looks", "probability": 0.830078125}, {"start": 802.64, "end": 802.94, "word": " like", "probability": 0.93896484375}, {"start": 802.94, "end": 803.28, "word": " X.", "probability": 0.78271484375}], "temperature": 1.0}, {"id": 31, "seek": 83500, "start": 810.5, "end": 835.0, "text": " And chi-square is given by, chi-square statistic is given by this equation. So chi-square is the sum of F for 0 minus F expected wanted square divided by Fe.", "tokens": [400, 13228, 12, 33292, 543, 307, 2212, 538, 11, 13228, 12, 33292, 543, 29588, 307, 2212, 538, 341, 5367, 13, 407, 13228, 12, 33292, 543, 307, 264, 2408, 295, 479, 337, 1958, 3175, 479, 5176, 1415, 3732, 6666, 538, 3697, 13], "avg_logprob": -0.29724701671373277, "compression_ratio": 1.4107142857142858, "no_speech_prob": 0.0, "words": [{"start": 810.5, "end": 810.78, "word": " And", "probability": 0.849609375}, {"start": 810.78, "end": 810.98, "word": " chi", "probability": 0.268798828125}, {"start": 810.98, "end": 811.22, "word": "-square", "probability": 0.8209635416666666}, {"start": 811.22, "end": 811.4, "word": " is", "probability": 0.5654296875}, {"start": 811.4, "end": 811.62, "word": " given", "probability": 0.8251953125}, {"start": 811.62, "end": 811.94, "word": " by,", "probability": 0.96728515625}, {"start": 812.12, "end": 812.46, "word": " chi", "probability": 0.6494140625}, {"start": 812.46, "end": 812.64, "word": "-square", "probability": 0.9588216145833334}, {"start": 812.64, "end": 813.16, "word": " statistic", "probability": 0.49267578125}, {"start": 813.16, "end": 813.48, "word": " is", "probability": 0.927734375}, {"start": 813.48, "end": 813.68, "word": " given", "probability": 0.89794921875}, {"start": 813.68, "end": 813.96, "word": " by", "probability": 0.9677734375}, {"start": 813.96, "end": 814.54, "word": " this", "probability": 0.93505859375}, {"start": 814.54, "end": 815.02, "word": " equation.", "probability": 0.93603515625}, {"start": 816.42, "end": 817.02, "word": " So", "probability": 0.95068359375}, {"start": 817.02, "end": 817.3, "word": " chi", "probability": 0.53955078125}, {"start": 817.3, "end": 817.66, "word": "-square", "probability": 0.9401041666666666}, {"start": 817.66, "end": 818.98, "word": " is", "probability": 0.88427734375}, {"start": 818.98, "end": 819.18, "word": " the", "probability": 0.90625}, {"start": 819.18, "end": 819.52, "word": " sum", "probability": 0.93701171875}, {"start": 819.52, "end": 822.1, "word": " of", "probability": 0.92138671875}, {"start": 822.1, "end": 824.7, "word": " F", "probability": 0.53271484375}, {"start": 824.7, "end": 825.5, "word": " for", "probability": 0.66357421875}, {"start": 825.5, "end": 825.96, "word": " 0", "probability": 0.255615234375}, {"start": 825.96, "end": 826.74, "word": " minus", "probability": 0.9560546875}, {"start": 826.74, "end": 827.2, "word": " F", "probability": 0.9765625}, {"start": 827.2, "end": 828.14, "word": " expected", "probability": 0.83154296875}, {"start": 828.14, "end": 828.96, "word": " wanted", "probability": 0.11956787109375}, {"start": 828.96, "end": 829.5, "word": " square", "probability": 0.82666015625}, {"start": 829.5, "end": 831.0, "word": " divided", "probability": 0.673828125}, {"start": 831.0, "end": 831.46, "word": " by", "probability": 0.98291015625}, {"start": 831.46, "end": 835.0, "word": " Fe.", "probability": 0.474853515625}], "temperature": 1.0}, {"id": 32, "seek": 86533, "start": 836.15, "end": 865.33, "text": " Now, let's see the definition for each term here. Fo, it means the observed frequency in a particular cell in the table you have. Fe is the expected frequency in a particular cell if it's 0 is true. So if you go back a little bit to the previous table, these values 12, 24, 108, 156 are the observed frequency.", "tokens": [823, 11, 718, 311, 536, 264, 7123, 337, 1184, 1433, 510, 13, 8564, 11, 309, 1355, 264, 13095, 7893, 294, 257, 1729, 2815, 294, 264, 3199, 291, 362, 13, 3697, 307, 264, 5176, 7893, 294, 257, 1729, 2815, 498, 309, 311, 1958, 307, 2074, 13, 407, 498, 291, 352, 646, 257, 707, 857, 281, 264, 3894, 3199, 11, 613, 4190, 2272, 11, 4022, 11, 41342, 11, 2119, 21, 366, 264, 13095, 7893, 13], "avg_logprob": -0.18823902248530774, "compression_ratio": 1.6368421052631579, "no_speech_prob": 0.0, "words": [{"start": 836.15, "end": 836.41, "word": " Now,", "probability": 0.80859375}, {"start": 836.51, "end": 836.65, "word": " let's", "probability": 0.906005859375}, {"start": 836.65, "end": 836.85, "word": " see", "probability": 0.90869140625}, {"start": 836.85, "end": 837.35, "word": " the", "probability": 0.90673828125}, {"start": 837.35, "end": 838.01, "word": " definition", "probability": 0.93505859375}, {"start": 838.01, "end": 838.31, "word": " for", "probability": 0.90283203125}, {"start": 838.31, "end": 838.57, "word": " each", "probability": 0.94970703125}, {"start": 838.57, "end": 838.97, "word": " term", "probability": 0.94921875}, {"start": 838.97, "end": 839.49, "word": " here.", "probability": 0.74755859375}, {"start": 839.87, "end": 840.41, "word": " Fo,", "probability": 0.265869140625}, {"start": 840.71, "end": 840.93, "word": " it", "probability": 0.90185546875}, {"start": 840.93, "end": 841.21, "word": " means", "probability": 0.91943359375}, {"start": 841.21, "end": 841.39, "word": " the", "probability": 0.810546875}, {"start": 841.39, "end": 841.89, "word": " observed", "probability": 0.89306640625}, {"start": 841.89, "end": 842.87, "word": " frequency", "probability": 0.93701171875}, {"start": 842.87, "end": 844.17, "word": " in", "probability": 0.90869140625}, {"start": 844.17, "end": 844.45, "word": " a", "probability": 0.98193359375}, {"start": 844.45, "end": 844.87, "word": " particular", "probability": 0.9052734375}, {"start": 844.87, "end": 845.25, "word": " cell", "probability": 0.91064453125}, {"start": 845.25, "end": 845.43, "word": " in", "probability": 0.884765625}, {"start": 845.43, "end": 845.55, "word": " the", "probability": 0.90966796875}, {"start": 845.55, "end": 845.81, "word": " table", "probability": 0.87646484375}, {"start": 845.81, "end": 846.01, "word": " you", "probability": 0.9296875}, {"start": 846.01, "end": 846.29, "word": " have.", "probability": 0.95166015625}, {"start": 848.31, "end": 848.87, "word": " Fe", "probability": 0.90771484375}, {"start": 848.87, "end": 849.27, "word": " is", "probability": 0.8896484375}, {"start": 849.27, "end": 849.43, "word": " the", "probability": 0.9052734375}, {"start": 849.43, "end": 849.89, "word": " expected", "probability": 0.94384765625}, {"start": 849.89, "end": 850.63, "word": " frequency", "probability": 0.96044921875}, {"start": 850.63, "end": 851.27, "word": " in", "probability": 0.92626953125}, {"start": 851.27, "end": 851.61, "word": " a", "probability": 0.9384765625}, {"start": 851.61, "end": 852.03, "word": " particular", "probability": 0.88623046875}, {"start": 852.03, "end": 852.41, "word": " cell", "probability": 0.90234375}, {"start": 852.41, "end": 852.77, "word": " if", "probability": 0.810546875}, {"start": 852.77, "end": 853.01, "word": " it's", "probability": 0.692626953125}, {"start": 853.01, "end": 853.19, "word": " 0", "probability": 0.283447265625}, {"start": 853.19, "end": 853.35, "word": " is", "probability": 0.35888671875}, {"start": 853.35, "end": 853.57, "word": " true.", "probability": 0.9462890625}, {"start": 854.13, "end": 854.37, "word": " So", "probability": 0.93798828125}, {"start": 854.37, "end": 854.51, "word": " if", "probability": 0.6689453125}, {"start": 854.51, "end": 854.61, "word": " you", "probability": 0.92724609375}, {"start": 854.61, "end": 854.75, "word": " go", "probability": 0.95556640625}, {"start": 854.75, "end": 854.97, "word": " back", "probability": 0.88134765625}, {"start": 854.97, "end": 855.11, "word": " a", "probability": 0.8876953125}, {"start": 855.11, "end": 855.25, "word": " little", "probability": 0.8623046875}, {"start": 855.25, "end": 855.47, "word": " bit", "probability": 0.943359375}, {"start": 855.47, "end": 855.57, "word": " to", "probability": 0.93994140625}, {"start": 855.57, "end": 855.69, "word": " the", "probability": 0.91845703125}, {"start": 855.69, "end": 856.01, "word": " previous", "probability": 0.8671875}, {"start": 856.01, "end": 856.33, "word": " table,", "probability": 0.90283203125}, {"start": 857.67, "end": 858.01, "word": " these", "probability": 0.84619140625}, {"start": 858.01, "end": 858.65, "word": " values", "probability": 0.96337890625}, {"start": 858.65, "end": 860.69, "word": " 12,", "probability": 0.4140625}, {"start": 860.85, "end": 861.29, "word": " 24,", "probability": 0.94677734375}, {"start": 861.43, "end": 861.75, "word": " 108,", "probability": 0.96142578125}, {"start": 861.91, "end": 862.81, "word": " 156", "probability": 0.908447265625}, {"start": 862.81, "end": 863.79, "word": " are", "probability": 0.76318359375}, {"start": 863.79, "end": 864.17, "word": " the", "probability": 0.8955078125}, {"start": 864.17, "end": 864.73, "word": " observed", "probability": 0.86669921875}, {"start": 864.73, "end": 865.33, "word": " frequency.", "probability": 0.58203125}], "temperature": 1.0}, {"id": 33, "seek": 89294, "start": 866.14, "end": 892.94, "text": " So these values represent Fo. So Fo is the observed frequency. The frequency is from the sample. Again, we are testing proportion 1 equals proportion 2. Now for Fe.", "tokens": [407, 613, 4190, 2906, 8564, 13, 407, 8564, 307, 264, 13095, 7893, 13, 440, 7893, 307, 490, 264, 6889, 13, 3764, 11, 321, 366, 4997, 16068, 502, 6915, 16068, 568, 13, 823, 337, 3697, 13], "avg_logprob": -0.2845052008827527, "compression_ratio": 1.3306451612903225, "no_speech_prob": 0.0, "words": [{"start": 866.14, "end": 866.42, "word": " So", "probability": 0.82861328125}, {"start": 866.42, "end": 866.7, "word": " these", "probability": 0.58544921875}, {"start": 866.7, "end": 867.26, "word": " values", "probability": 0.96337890625}, {"start": 867.26, "end": 868.88, "word": " represent", "probability": 0.796875}, {"start": 868.88, "end": 870.14, "word": " Fo.", "probability": 0.35888671875}, {"start": 870.64, "end": 870.88, "word": " So", "probability": 0.908203125}, {"start": 870.88, "end": 871.3, "word": " Fo", "probability": 0.640625}, {"start": 871.3, "end": 873.6, "word": " is", "probability": 0.92236328125}, {"start": 873.6, "end": 873.76, "word": " the", "probability": 0.84619140625}, {"start": 873.76, "end": 874.34, "word": " observed", "probability": 0.8935546875}, {"start": 874.34, "end": 877.42, "word": " frequency.", "probability": 0.95458984375}, {"start": 878.2, "end": 878.3, "word": " The", "probability": 0.435791015625}, {"start": 878.3, "end": 878.66, "word": " frequency", "probability": 0.43701171875}, {"start": 878.66, "end": 879.32, "word": " is", "probability": 0.430419921875}, {"start": 879.32, "end": 879.56, "word": " from", "probability": 0.7236328125}, {"start": 879.56, "end": 879.78, "word": " the", "probability": 0.912109375}, {"start": 879.78, "end": 880.04, "word": " sample.", "probability": 0.8173828125}, {"start": 885.08, "end": 885.56, "word": " Again,", "probability": 0.90478515625}, {"start": 885.62, "end": 885.72, "word": " we", "probability": 0.943359375}, {"start": 885.72, "end": 885.86, "word": " are", "probability": 0.9189453125}, {"start": 885.86, "end": 886.32, "word": " testing", "probability": 0.8935546875}, {"start": 886.32, "end": 887.7, "word": " proportion", "probability": 0.7119140625}, {"start": 887.7, "end": 887.98, "word": " 1", "probability": 0.54443359375}, {"start": 887.98, "end": 888.34, "word": " equals", "probability": 0.8330078125}, {"start": 888.34, "end": 888.84, "word": " proportion", "probability": 0.82666015625}, {"start": 888.84, "end": 889.24, "word": " 2.", "probability": 0.9765625}, {"start": 890.44, "end": 890.88, "word": " Now", "probability": 0.9287109375}, {"start": 890.88, "end": 891.36, "word": " for", "probability": 0.693359375}, {"start": 891.36, "end": 892.94, "word": " Fe.", "probability": 0.98046875}], "temperature": 1.0}, {"id": 34, "seek": 92072, "start": 894.78, "end": 920.72, "text": " Fe is the expected frequency in a particular cell if each cell is true. If we are assuming the two population proportions are the same, what do you expect the frequency for each cell? So we are going to compute the observed, I'm sorry, the expected frequency for each cell.", "tokens": [3697, 307, 264, 5176, 7893, 294, 257, 1729, 2815, 498, 1184, 2815, 307, 2074, 13, 759, 321, 366, 11926, 264, 732, 4415, 32482, 366, 264, 912, 11, 437, 360, 291, 2066, 264, 7893, 337, 1184, 2815, 30, 407, 321, 366, 516, 281, 14722, 264, 13095, 11, 286, 478, 2597, 11, 264, 5176, 7893, 337, 1184, 2815, 13], "avg_logprob": -0.22871767857979083, "compression_ratio": 1.6809815950920246, "no_speech_prob": 0.0, "words": [{"start": 894.78, "end": 895.22, "word": " Fe", "probability": 0.07977294921875}, {"start": 895.22, "end": 895.42, "word": " is", "probability": 0.90771484375}, {"start": 895.42, "end": 895.58, "word": " the", "probability": 0.833984375}, {"start": 895.58, "end": 896.06, "word": " expected", "probability": 0.9130859375}, {"start": 896.06, "end": 896.86, "word": " frequency", "probability": 0.939453125}, {"start": 896.86, "end": 897.54, "word": " in", "probability": 0.650390625}, {"start": 897.54, "end": 897.66, "word": " a", "probability": 0.76171875}, {"start": 897.66, "end": 898.06, "word": " particular", "probability": 0.841796875}, {"start": 898.06, "end": 898.5, "word": " cell", "probability": 0.88818359375}, {"start": 898.5, "end": 898.84, "word": " if", "probability": 0.6865234375}, {"start": 898.84, "end": 899.08, "word": " each", "probability": 0.32568359375}, {"start": 899.08, "end": 899.2, "word": " cell", "probability": 0.83447265625}, {"start": 899.2, "end": 899.36, "word": " is", "probability": 0.94287109375}, {"start": 899.36, "end": 899.58, "word": " true.", "probability": 0.9423828125}, {"start": 899.88, "end": 900.14, "word": " If", "probability": 0.865234375}, {"start": 900.14, "end": 900.3, "word": " we", "probability": 0.95068359375}, {"start": 900.3, "end": 900.42, "word": " are", "probability": 0.8984375}, {"start": 900.42, "end": 900.86, "word": " assuming", "probability": 0.8876953125}, {"start": 900.86, "end": 902.58, "word": " the", "probability": 0.68994140625}, {"start": 902.58, "end": 902.8, "word": " two", "probability": 0.9052734375}, {"start": 902.8, "end": 903.52, "word": " population", "probability": 0.84912109375}, {"start": 903.52, "end": 904.14, "word": " proportions", "probability": 0.74560546875}, {"start": 904.14, "end": 904.44, "word": " are", "probability": 0.9423828125}, {"start": 904.44, "end": 904.62, "word": " the", "probability": 0.91748046875}, {"start": 904.62, "end": 904.96, "word": " same,", "probability": 0.91162109375}, {"start": 905.56, "end": 905.8, "word": " what", "probability": 0.9130859375}, {"start": 905.8, "end": 905.94, "word": " do", "probability": 0.74560546875}, {"start": 905.94, "end": 905.98, "word": " you", "probability": 0.82080078125}, {"start": 905.98, "end": 906.54, "word": " expect", "probability": 0.93115234375}, {"start": 906.54, "end": 907.36, "word": " the", "probability": 0.46923828125}, {"start": 907.36, "end": 907.88, "word": " frequency", "probability": 0.96484375}, {"start": 907.88, "end": 908.16, "word": " for", "probability": 0.93310546875}, {"start": 908.16, "end": 908.44, "word": " each", "probability": 0.93896484375}, {"start": 908.44, "end": 908.68, "word": " cell?", "probability": 0.892578125}, {"start": 909.7, "end": 910.16, "word": " So", "probability": 0.921875}, {"start": 910.16, "end": 910.38, "word": " we", "probability": 0.56298828125}, {"start": 910.38, "end": 910.54, "word": " are", "probability": 0.9228515625}, {"start": 910.54, "end": 910.94, "word": " going", "probability": 0.93896484375}, {"start": 910.94, "end": 912.98, "word": " to", "probability": 0.96240234375}, {"start": 912.98, "end": 913.54, "word": " compute", "probability": 0.94384765625}, {"start": 913.54, "end": 915.2, "word": " the", "probability": 0.84375}, {"start": 915.2, "end": 915.74, "word": " observed,", "probability": 0.65283203125}, {"start": 916.62, "end": 916.74, "word": " I'm", "probability": 0.9130859375}, {"start": 916.74, "end": 916.92, "word": " sorry,", "probability": 0.86279296875}, {"start": 917.2, "end": 918.1, "word": " the", "probability": 0.8623046875}, {"start": 918.1, "end": 919.26, "word": " expected", "probability": 0.91064453125}, {"start": 919.26, "end": 919.86, "word": " frequency", "probability": 0.958984375}, {"start": 919.86, "end": 920.14, "word": " for", "probability": 0.93798828125}, {"start": 920.14, "end": 920.4, "word": " each", "probability": 0.93603515625}, {"start": 920.4, "end": 920.72, "word": " cell.", "probability": 0.90283203125}], "temperature": 1.0}, {"id": 35, "seek": 94774, "start": 922.4, "end": 947.74, "text": " in this table so let's see how can we do that by using the same rule we had before now chi-square statistic for the two by two case I mean if there are two rows and two columns has only one degree of freedom later on we'll see if we have more than two rows and more than two columns", "tokens": [294, 341, 3199, 370, 718, 311, 536, 577, 393, 321, 360, 300, 538, 1228, 264, 912, 4978, 321, 632, 949, 586, 13228, 12, 33292, 543, 29588, 337, 264, 732, 538, 732, 1389, 286, 914, 498, 456, 366, 732, 13241, 293, 732, 13766, 575, 787, 472, 4314, 295, 5645, 1780, 322, 321, 603, 536, 498, 321, 362, 544, 813, 732, 13241, 293, 544, 813, 732, 13766], "avg_logprob": -0.1506865555138299, "compression_ratio": 1.6745562130177514, "no_speech_prob": 0.0, "words": [{"start": 922.4, "end": 922.68, "word": " in", "probability": 0.259521484375}, {"start": 922.68, "end": 922.88, "word": " this", "probability": 0.9228515625}, {"start": 922.88, "end": 923.18, "word": " table", "probability": 0.89892578125}, {"start": 923.18, "end": 923.84, "word": " so", "probability": 0.207275390625}, {"start": 923.84, "end": 924.08, "word": " let's", "probability": 0.959228515625}, {"start": 924.08, "end": 924.2, "word": " see", "probability": 0.92529296875}, {"start": 924.2, "end": 924.34, "word": " how", "probability": 0.939453125}, {"start": 924.34, "end": 924.54, "word": " can", "probability": 0.8505859375}, {"start": 924.54, "end": 924.66, "word": " we", "probability": 0.9638671875}, {"start": 924.66, "end": 924.82, "word": " do", "probability": 0.96240234375}, {"start": 924.82, "end": 925.08, "word": " that", "probability": 0.94091796875}, {"start": 925.08, "end": 925.3, "word": " by", "probability": 0.96142578125}, {"start": 925.3, "end": 925.74, "word": " using", "probability": 0.923828125}, {"start": 925.74, "end": 926.88, "word": " the", "probability": 0.89453125}, {"start": 926.88, "end": 927.28, "word": " same", "probability": 0.904296875}, {"start": 927.28, "end": 927.74, "word": " rule", "probability": 0.9072265625}, {"start": 927.74, "end": 928.28, "word": " we", "probability": 0.95361328125}, {"start": 928.28, "end": 928.7, "word": " had", "probability": 0.90966796875}, {"start": 928.7, "end": 929.36, "word": " before", "probability": 0.83935546875}, {"start": 929.36, "end": 930.58, "word": " now", "probability": 0.68408203125}, {"start": 930.58, "end": 931.48, "word": " chi", "probability": 0.5703125}, {"start": 931.48, "end": 931.98, "word": "-square", "probability": 0.83056640625}, {"start": 931.98, "end": 932.88, "word": " statistic", "probability": 0.81396484375}, {"start": 932.88, "end": 935.3, "word": " for", "probability": 0.94287109375}, {"start": 935.3, "end": 935.5, "word": " the", "probability": 0.92041015625}, {"start": 935.5, "end": 935.7, "word": " two", "probability": 0.66748046875}, {"start": 935.7, "end": 935.86, "word": " by", "probability": 0.8310546875}, {"start": 935.86, "end": 936.08, "word": " two", "probability": 0.9443359375}, {"start": 936.08, "end": 936.5, "word": " case", "probability": 0.9091796875}, {"start": 936.5, "end": 936.8, "word": " I", "probability": 0.486328125}, {"start": 936.8, "end": 936.96, "word": " mean", "probability": 0.966796875}, {"start": 936.96, "end": 937.2, "word": " if", "probability": 0.95263671875}, {"start": 937.2, "end": 937.42, "word": " there", "probability": 0.91162109375}, {"start": 937.42, "end": 937.62, "word": " are", "probability": 0.9453125}, {"start": 937.62, "end": 938.0, "word": " two", "probability": 0.93408203125}, {"start": 938.0, "end": 938.36, "word": " rows", "probability": 0.89794921875}, {"start": 938.36, "end": 938.52, "word": " and", "probability": 0.943359375}, {"start": 938.52, "end": 938.72, "word": " two", "probability": 0.9384765625}, {"start": 938.72, "end": 939.22, "word": " columns", "probability": 0.95703125}, {"start": 939.22, "end": 939.98, "word": " has", "probability": 0.880859375}, {"start": 939.98, "end": 940.38, "word": " only", "probability": 0.91650390625}, {"start": 940.38, "end": 940.58, "word": " one", "probability": 0.9296875}, {"start": 940.58, "end": 940.82, "word": " degree", "probability": 0.978515625}, {"start": 940.82, "end": 941.02, "word": " of", "probability": 0.9267578125}, {"start": 941.02, "end": 941.3, "word": " freedom", "probability": 0.94287109375}, {"start": 941.3, "end": 943.12, "word": " later", "probability": 0.88818359375}, {"start": 943.12, "end": 943.38, "word": " on", "probability": 0.94921875}, {"start": 943.38, "end": 943.6, "word": " we'll", "probability": 0.8955078125}, {"start": 943.6, "end": 943.88, "word": " see", "probability": 0.91455078125}, {"start": 943.88, "end": 944.18, "word": " if", "probability": 0.9541015625}, {"start": 944.18, "end": 944.36, "word": " we", "probability": 0.96240234375}, {"start": 944.36, "end": 944.68, "word": " have", "probability": 0.94580078125}, {"start": 944.68, "end": 945.78, "word": " more", "probability": 0.94384765625}, {"start": 945.78, "end": 945.94, "word": " than", "probability": 0.94189453125}, {"start": 945.94, "end": 946.18, "word": " two", "probability": 0.93359375}, {"start": 946.18, "end": 946.44, "word": " rows", "probability": 0.89501953125}, {"start": 946.44, "end": 946.64, "word": " and", "probability": 0.9375}, {"start": 946.64, "end": 946.84, "word": " more", "probability": 0.9404296875}, {"start": 946.84, "end": 947.02, "word": " than", "probability": 0.9423828125}, {"start": 947.02, "end": 947.2, "word": " two", "probability": 0.935546875}, {"start": 947.2, "end": 947.74, "word": " columns", "probability": 0.9609375}], "temperature": 1.0}, {"id": 36, "seek": 97565, "start": 948.93, "end": 975.65, "text": " we look for different value for degrees of freedom. So for two by two tables, there is only one degree of freedom. Now the assumption here for using chi-square, each cell in the contingency table has expected frequency of at least five. So these expected frequencies should be at least five for each cell. So that's the condition for using", "tokens": [321, 574, 337, 819, 2158, 337, 5310, 295, 5645, 13, 407, 337, 732, 538, 732, 8020, 11, 456, 307, 787, 472, 4314, 295, 5645, 13, 823, 264, 15302, 510, 337, 1228, 13228, 12, 33292, 543, 11, 1184, 2815, 294, 264, 27820, 3020, 3199, 575, 5176, 7893, 295, 412, 1935, 1732, 13, 407, 613, 5176, 20250, 820, 312, 412, 1935, 1732, 337, 1184, 2815, 13, 407, 300, 311, 264, 4188, 337, 1228], "avg_logprob": -0.1788194434096416, "compression_ratio": 1.7894736842105263, "no_speech_prob": 0.0, "words": [{"start": 948.93, "end": 949.19, "word": " we", "probability": 0.35205078125}, {"start": 949.19, "end": 949.53, "word": " look", "probability": 0.8564453125}, {"start": 949.53, "end": 949.85, "word": " for", "probability": 0.953125}, {"start": 949.85, "end": 950.53, "word": " different", "probability": 0.8564453125}, {"start": 950.53, "end": 951.81, "word": " value", "probability": 0.5439453125}, {"start": 951.81, "end": 952.11, "word": " for", "probability": 0.80908203125}, {"start": 952.11, "end": 952.37, "word": " degrees", "probability": 0.85986328125}, {"start": 952.37, "end": 952.51, "word": " of", "probability": 0.9443359375}, {"start": 952.51, "end": 952.73, "word": " freedom.", "probability": 0.955078125}, {"start": 953.27, "end": 953.35, "word": " So", "probability": 0.76025390625}, {"start": 953.35, "end": 953.63, "word": " for", "probability": 0.6875}, {"start": 953.63, "end": 953.89, "word": " two", "probability": 0.51025390625}, {"start": 953.89, "end": 954.05, "word": " by", "probability": 0.77587890625}, {"start": 954.05, "end": 954.23, "word": " two", "probability": 0.9443359375}, {"start": 954.23, "end": 954.75, "word": " tables,", "probability": 0.830078125}, {"start": 954.95, "end": 955.63, "word": " there", "probability": 0.9052734375}, {"start": 955.63, "end": 955.81, "word": " is", "probability": 0.9326171875}, {"start": 955.81, "end": 956.29, "word": " only", "probability": 0.9267578125}, {"start": 956.29, "end": 956.95, "word": " one", "probability": 0.91845703125}, {"start": 956.95, "end": 957.49, "word": " degree", "probability": 0.9765625}, {"start": 957.49, "end": 958.07, "word": " of", "probability": 0.95263671875}, {"start": 958.07, "end": 958.71, "word": " freedom.", "probability": 0.9384765625}, {"start": 959.37, "end": 959.67, "word": " Now", "probability": 0.9365234375}, {"start": 959.67, "end": 959.83, "word": " the", "probability": 0.67041015625}, {"start": 959.83, "end": 960.29, "word": " assumption", "probability": 0.9697265625}, {"start": 960.29, "end": 960.73, "word": " here", "probability": 0.84033203125}, {"start": 960.73, "end": 961.05, "word": " for", "probability": 0.86376953125}, {"start": 961.05, "end": 961.41, "word": " using", "probability": 0.93359375}, {"start": 961.41, "end": 961.67, "word": " chi", "probability": 0.68505859375}, {"start": 961.67, "end": 962.07, "word": "-square,", "probability": 0.8527018229166666}, {"start": 962.71, "end": 963.01, "word": " each", "probability": 0.853515625}, {"start": 963.01, "end": 963.31, "word": " cell", "probability": 0.90869140625}, {"start": 963.31, "end": 963.53, "word": " in", "probability": 0.90283203125}, {"start": 963.53, "end": 963.67, "word": " the", "probability": 0.92724609375}, {"start": 963.67, "end": 964.21, "word": " contingency", "probability": 0.836669921875}, {"start": 964.21, "end": 964.63, "word": " table", "probability": 0.8642578125}, {"start": 964.63, "end": 965.01, "word": " has", "probability": 0.869140625}, {"start": 965.01, "end": 965.55, "word": " expected", "probability": 0.91650390625}, {"start": 965.55, "end": 966.17, "word": " frequency", "probability": 0.90673828125}, {"start": 966.17, "end": 966.43, "word": " of", "probability": 0.91064453125}, {"start": 966.43, "end": 966.57, "word": " at", "probability": 0.96337890625}, {"start": 966.57, "end": 966.77, "word": " least", "probability": 0.9560546875}, {"start": 966.77, "end": 967.09, "word": " five.", "probability": 0.4599609375}, {"start": 967.69, "end": 967.95, "word": " So", "probability": 0.9365234375}, {"start": 967.95, "end": 968.29, "word": " these", "probability": 0.50927734375}, {"start": 968.29, "end": 968.87, "word": " expected", "probability": 0.8994140625}, {"start": 968.87, "end": 969.39, "word": " frequencies", "probability": 0.7978515625}, {"start": 969.39, "end": 969.87, "word": " should", "probability": 0.96728515625}, {"start": 969.87, "end": 970.25, "word": " be", "probability": 0.953125}, {"start": 970.25, "end": 971.41, "word": " at", "probability": 0.9541015625}, {"start": 971.41, "end": 971.69, "word": " least", "probability": 0.95751953125}, {"start": 971.69, "end": 972.09, "word": " five", "probability": 0.86376953125}, {"start": 972.09, "end": 972.57, "word": " for", "probability": 0.95068359375}, {"start": 972.57, "end": 973.03, "word": " each", "probability": 0.93994140625}, {"start": 973.03, "end": 973.35, "word": " cell.", "probability": 0.89453125}, {"start": 973.83, "end": 974.07, "word": " So", "probability": 0.94189453125}, {"start": 974.07, "end": 974.27, "word": " that's", "probability": 0.9326171875}, {"start": 974.27, "end": 974.41, "word": " the", "probability": 0.91552734375}, {"start": 974.41, "end": 974.81, "word": " condition", "probability": 0.947265625}, {"start": 974.81, "end": 975.17, "word": " for", "probability": 0.9443359375}, {"start": 975.17, "end": 975.65, "word": " using", "probability": 0.93603515625}], "temperature": 1.0}, {"id": 37, "seek": 100202, "start": 976.52, "end": 1002.02, "text": " So we have to test if the expected request for each cell is at least 5. So the condition is straightforward. Now, my decision rule is, the chi-square is always one-tailed. I mean, it's positive always. So the, always chi-square.", "tokens": [407, 321, 362, 281, 1500, 498, 264, 5176, 5308, 337, 1184, 2815, 307, 412, 1935, 1025, 13, 407, 264, 4188, 307, 15325, 13, 823, 11, 452, 3537, 4978, 307, 11, 264, 13228, 12, 33292, 543, 307, 1009, 472, 12, 14430, 292, 13, 286, 914, 11, 309, 311, 3353, 1009, 13, 407, 264, 11, 1009, 13228, 12, 33292, 543, 13], "avg_logprob": -0.3197916552424431, "compression_ratio": 1.4679487179487178, "no_speech_prob": 0.0, "words": [{"start": 976.52, "end": 977.18, "word": " So", "probability": 0.255126953125}, {"start": 977.18, "end": 977.32, "word": " we", "probability": 0.61279296875}, {"start": 977.32, "end": 977.5, "word": " have", "probability": 0.9375}, {"start": 977.5, "end": 977.74, "word": " to", "probability": 0.96923828125}, {"start": 977.74, "end": 981.08, "word": " test", "probability": 0.67138671875}, {"start": 981.08, "end": 982.3, "word": " if", "probability": 0.90380859375}, {"start": 982.3, "end": 982.62, "word": " the", "probability": 0.888671875}, {"start": 982.62, "end": 983.06, "word": " expected", "probability": 0.88525390625}, {"start": 983.06, "end": 983.56, "word": " request", "probability": 0.22265625}, {"start": 983.56, "end": 983.78, "word": " for", "probability": 0.861328125}, {"start": 983.78, "end": 983.98, "word": " each", "probability": 0.91015625}, {"start": 983.98, "end": 984.16, "word": " cell", "probability": 0.7939453125}, {"start": 984.16, "end": 984.26, "word": " is", "probability": 0.94091796875}, {"start": 984.26, "end": 984.4, "word": " at", "probability": 0.9423828125}, {"start": 984.4, "end": 984.6, "word": " least", "probability": 0.95263671875}, {"start": 984.6, "end": 984.88, "word": " 5.", "probability": 0.53564453125}, {"start": 985.14, "end": 985.4, "word": " So", "probability": 0.9326171875}, {"start": 985.4, "end": 985.58, "word": " the", "probability": 0.833984375}, {"start": 985.58, "end": 985.92, "word": " condition", "probability": 0.93994140625}, {"start": 985.92, "end": 986.26, "word": " is", "probability": 0.943359375}, {"start": 986.26, "end": 986.9, "word": " straightforward.", "probability": 0.833984375}, {"start": 988.58, "end": 989.08, "word": " Now,", "probability": 0.94970703125}, {"start": 989.24, "end": 989.36, "word": " my", "probability": 0.96240234375}, {"start": 989.36, "end": 989.66, "word": " decision", "probability": 0.89111328125}, {"start": 989.66, "end": 990.0, "word": " rule", "probability": 0.91796875}, {"start": 990.0, "end": 990.36, "word": " is,", "probability": 0.94775390625}, {"start": 991.7, "end": 992.8, "word": " the", "probability": 0.72705078125}, {"start": 992.8, "end": 993.0, "word": " chi", "probability": 0.4443359375}, {"start": 993.0, "end": 993.28, "word": "-square", "probability": 0.8411458333333334}, {"start": 993.28, "end": 993.5, "word": " is", "probability": 0.91162109375}, {"start": 993.5, "end": 994.72, "word": " always", "probability": 0.876953125}, {"start": 994.72, "end": 995.54, "word": " one", "probability": 0.53369140625}, {"start": 995.54, "end": 996.2, "word": "-tailed.", "probability": 0.6101481119791666}, {"start": 996.58, "end": 996.7, "word": " I", "probability": 0.9443359375}, {"start": 996.7, "end": 996.9, "word": " mean,", "probability": 0.95556640625}, {"start": 997.06, "end": 997.24, "word": " it's", "probability": 0.921875}, {"start": 997.24, "end": 997.82, "word": " positive", "probability": 0.916015625}, {"start": 997.82, "end": 998.36, "word": " always.", "probability": 0.76220703125}, {"start": 999.56, "end": 999.9, "word": " So", "probability": 0.9599609375}, {"start": 999.9, "end": 1000.24, "word": " the,", "probability": 0.349853515625}, {"start": 1000.42, "end": 1001.02, "word": " always", "probability": 0.86865234375}, {"start": 1001.02, "end": 1001.46, "word": " chi", "probability": 0.432373046875}, {"start": 1001.46, "end": 1002.02, "word": "-square.", "probability": 0.9122721354166666}], "temperature": 1.0}, {"id": 38, "seek": 102378, "start": 1004.72, "end": 1023.78, "text": " is greater than or equal to zero. So we reject the null hypothesis if the value of the chi-square statistic lies in the rejection region and only there is only one side. So there is only one rejection region. So we reject the null hypothesis", "tokens": [307, 5044, 813, 420, 2681, 281, 4018, 13, 407, 321, 8248, 264, 18184, 17291, 498, 264, 2158, 295, 264, 13228, 12, 33292, 543, 29588, 9134, 294, 264, 26044, 4458, 293, 787, 456, 307, 787, 472, 1252, 13, 407, 456, 307, 787, 472, 26044, 4458, 13, 407, 321, 8248, 264, 18184, 17291], "avg_logprob": -0.24534254578443673, "compression_ratio": 1.8473282442748091, "no_speech_prob": 0.0, "words": [{"start": 1004.72, "end": 1004.98, "word": " is", "probability": 0.2398681640625}, {"start": 1004.98, "end": 1005.28, "word": " greater", "probability": 0.426025390625}, {"start": 1005.28, "end": 1005.6, "word": " than", "probability": 0.90966796875}, {"start": 1005.6, "end": 1005.76, "word": " or", "probability": 0.92041015625}, {"start": 1005.76, "end": 1005.96, "word": " equal", "probability": 0.67041015625}, {"start": 1005.96, "end": 1006.12, "word": " to", "probability": 0.89599609375}, {"start": 1006.12, "end": 1006.4, "word": " zero.", "probability": 0.69677734375}, {"start": 1007.86, "end": 1008.18, "word": " So", "probability": 0.89404296875}, {"start": 1008.18, "end": 1008.34, "word": " we", "probability": 0.6865234375}, {"start": 1008.34, "end": 1008.66, "word": " reject", "probability": 0.89697265625}, {"start": 1008.66, "end": 1008.82, "word": " the", "probability": 0.845703125}, {"start": 1008.82, "end": 1008.94, "word": " null", "probability": 0.80126953125}, {"start": 1008.94, "end": 1009.44, "word": " hypothesis", "probability": 0.8271484375}, {"start": 1009.44, "end": 1010.66, "word": " if", "probability": 0.7890625}, {"start": 1010.66, "end": 1010.82, "word": " the", "probability": 0.91455078125}, {"start": 1010.82, "end": 1011.12, "word": " value", "probability": 0.9814453125}, {"start": 1011.12, "end": 1011.3, "word": " of", "probability": 0.9345703125}, {"start": 1011.3, "end": 1011.4, "word": " the", "probability": 0.787109375}, {"start": 1011.4, "end": 1011.7, "word": " chi", "probability": 0.583984375}, {"start": 1011.7, "end": 1012.0, "word": "-square", "probability": 0.7822265625}, {"start": 1012.0, "end": 1012.58, "word": " statistic", "probability": 0.7001953125}, {"start": 1012.58, "end": 1013.96, "word": " lies", "probability": 0.91259765625}, {"start": 1013.96, "end": 1014.38, "word": " in", "probability": 0.943359375}, {"start": 1014.38, "end": 1014.64, "word": " the", "probability": 0.91015625}, {"start": 1014.64, "end": 1015.06, "word": " rejection", "probability": 0.96630859375}, {"start": 1015.06, "end": 1015.52, "word": " region", "probability": 0.95068359375}, {"start": 1015.52, "end": 1016.54, "word": " and", "probability": 0.40283203125}, {"start": 1016.54, "end": 1017.04, "word": " only", "probability": 0.44189453125}, {"start": 1017.04, "end": 1017.28, "word": " there", "probability": 0.71826171875}, {"start": 1017.28, "end": 1017.44, "word": " is", "probability": 0.8935546875}, {"start": 1017.44, "end": 1017.72, "word": " only", "probability": 0.8662109375}, {"start": 1017.72, "end": 1018.12, "word": " one", "probability": 0.92431640625}, {"start": 1018.12, "end": 1018.9, "word": " side.", "probability": 0.70166015625}, {"start": 1019.54, "end": 1019.82, "word": " So", "probability": 0.93212890625}, {"start": 1019.82, "end": 1020.1, "word": " there", "probability": 0.71435546875}, {"start": 1020.1, "end": 1020.24, "word": " is", "probability": 0.884765625}, {"start": 1020.24, "end": 1020.46, "word": " only", "probability": 0.92626953125}, {"start": 1020.46, "end": 1020.84, "word": " one", "probability": 0.9169921875}, {"start": 1020.84, "end": 1021.52, "word": " rejection", "probability": 0.95947265625}, {"start": 1021.52, "end": 1021.9, "word": " region.", "probability": 0.94970703125}, {"start": 1022.2, "end": 1022.46, "word": " So", "probability": 0.943359375}, {"start": 1022.46, "end": 1022.6, "word": " we", "probability": 0.91845703125}, {"start": 1022.6, "end": 1022.96, "word": " reject", "probability": 0.93017578125}, {"start": 1022.96, "end": 1023.14, "word": " the", "probability": 0.87890625}, {"start": 1023.14, "end": 1023.28, "word": " null", "probability": 0.94775390625}, {"start": 1023.28, "end": 1023.78, "word": " hypothesis", "probability": 0.84228515625}], "temperature": 1.0}, {"id": 39, "seek": 105075, "start": 1024.75, "end": 1050.75, "text": " If the value of chi-square falls in this rejection region. I mean, if chi-square statistic is greater than chi-square alpha, then we reject the null hypothesis. Again, here we are testing H0 by 1 equals by 2 against two-sided test. Even there is only one side.", "tokens": [759, 264, 2158, 295, 13228, 12, 33292, 543, 8804, 294, 341, 26044, 4458, 13, 286, 914, 11, 498, 13228, 12, 33292, 543, 29588, 307, 5044, 813, 13228, 12, 33292, 543, 8961, 11, 550, 321, 8248, 264, 18184, 17291, 13, 3764, 11, 510, 321, 366, 4997, 389, 15, 538, 502, 6915, 538, 568, 1970, 732, 12, 30941, 1500, 13, 2754, 456, 307, 787, 472, 1252, 13], "avg_logprob": -0.23697916079651227, "compression_ratio": 1.5086705202312138, "no_speech_prob": 0.0, "words": [{"start": 1024.75, "end": 1025.05, "word": " If", "probability": 0.75146484375}, {"start": 1025.05, "end": 1025.21, "word": " the", "probability": 0.8857421875}, {"start": 1025.21, "end": 1025.47, "word": " value", "probability": 0.98095703125}, {"start": 1025.47, "end": 1025.65, "word": " of", "probability": 0.904296875}, {"start": 1025.65, "end": 1025.75, "word": " chi", "probability": 0.248046875}, {"start": 1025.75, "end": 1026.17, "word": "-square", "probability": 0.8434244791666666}, {"start": 1026.17, "end": 1027.37, "word": " falls", "probability": 0.74462890625}, {"start": 1027.37, "end": 1027.67, "word": " in", "probability": 0.9111328125}, {"start": 1027.67, "end": 1027.95, "word": " this", "probability": 0.92724609375}, {"start": 1027.95, "end": 1028.27, "word": " rejection", "probability": 0.90771484375}, {"start": 1028.27, "end": 1028.69, "word": " region.", "probability": 0.951171875}, {"start": 1028.83, "end": 1028.87, "word": " I", "probability": 0.92626953125}, {"start": 1028.87, "end": 1029.15, "word": " mean,", "probability": 0.9619140625}, {"start": 1029.77, "end": 1029.89, "word": " if", "probability": 0.951171875}, {"start": 1029.89, "end": 1030.15, "word": " chi", "probability": 0.76806640625}, {"start": 1030.15, "end": 1030.51, "word": "-square", "probability": 0.9552408854166666}, {"start": 1030.51, "end": 1031.09, "word": " statistic", "probability": 0.338623046875}, {"start": 1031.09, "end": 1031.67, "word": " is", "probability": 0.91259765625}, {"start": 1031.67, "end": 1032.17, "word": " greater", "probability": 0.88525390625}, {"start": 1032.17, "end": 1032.45, "word": " than", "probability": 0.94873046875}, {"start": 1032.45, "end": 1032.71, "word": " chi", "probability": 0.80810546875}, {"start": 1032.71, "end": 1033.03, "word": "-square", "probability": 0.9674479166666666}, {"start": 1033.03, "end": 1033.37, "word": " alpha,", "probability": 0.82421875}, {"start": 1033.75, "end": 1034.11, "word": " then", "probability": 0.82568359375}, {"start": 1034.11, "end": 1034.29, "word": " we", "probability": 0.93359375}, {"start": 1034.29, "end": 1034.67, "word": " reject", "probability": 0.9013671875}, {"start": 1034.67, "end": 1034.85, "word": " the", "probability": 0.8408203125}, {"start": 1034.85, "end": 1034.95, "word": " null", "probability": 0.98779296875}, {"start": 1034.95, "end": 1035.45, "word": " hypothesis.", "probability": 0.84423828125}, {"start": 1036.97, "end": 1037.39, "word": " Again,", "probability": 0.9150390625}, {"start": 1037.79, "end": 1038.13, "word": " here", "probability": 0.849609375}, {"start": 1038.13, "end": 1038.81, "word": " we", "probability": 0.62451171875}, {"start": 1038.81, "end": 1039.03, "word": " are", "probability": 0.93212890625}, {"start": 1039.03, "end": 1039.51, "word": " testing", "probability": 0.87158203125}, {"start": 1039.51, "end": 1042.85, "word": " H0", "probability": 0.5621337890625}, {"start": 1042.85, "end": 1043.61, "word": " by", "probability": 0.46484375}, {"start": 1043.61, "end": 1043.87, "word": " 1", "probability": 0.5888671875}, {"start": 1043.87, "end": 1044.33, "word": " equals", "probability": 0.8837890625}, {"start": 1044.33, "end": 1044.55, "word": " by", "probability": 0.88037109375}, {"start": 1044.55, "end": 1044.89, "word": " 2", "probability": 0.96435546875}, {"start": 1044.89, "end": 1046.03, "word": " against", "probability": 0.591796875}, {"start": 1046.03, "end": 1046.29, "word": " two", "probability": 0.81396484375}, {"start": 1046.29, "end": 1046.61, "word": "-sided", "probability": 0.820556640625}, {"start": 1046.61, "end": 1047.07, "word": " test.", "probability": 0.80810546875}, {"start": 1048.17, "end": 1048.69, "word": " Even", "probability": 0.84619140625}, {"start": 1048.69, "end": 1049.21, "word": " there", "probability": 0.490966796875}, {"start": 1049.21, "end": 1049.39, "word": " is", "probability": 0.91064453125}, {"start": 1049.39, "end": 1049.83, "word": " only", "probability": 0.9248046875}, {"start": 1049.83, "end": 1050.25, "word": " one", "probability": 0.9267578125}, {"start": 1050.25, "end": 1050.75, "word": " side.", "probability": 0.80322265625}], "temperature": 1.0}, {"id": 40, "seek": 107842, "start": 1051.46, "end": 1078.42, "text": " But chi-square is designed for testing pi 1 equals pi 2 against pi 1 does not equal pi 2. In this case, you cannot know the direction of this difference. I mean, you cannot say pi 1 is greater than or pi 1 is smaller than. Because chi-square is always positive. If you remember from this statistic, when we are testing pi 1 equals pi 2,", "tokens": [583, 13228, 12, 33292, 543, 307, 4761, 337, 4997, 3895, 502, 6915, 3895, 568, 1970, 3895, 502, 775, 406, 2681, 3895, 568, 13, 682, 341, 1389, 11, 291, 2644, 458, 264, 3513, 295, 341, 2649, 13, 286, 914, 11, 291, 2644, 584, 3895, 502, 307, 5044, 813, 420, 3895, 502, 307, 4356, 813, 13, 1436, 13228, 12, 33292, 543, 307, 1009, 3353, 13, 759, 291, 1604, 490, 341, 29588, 11, 562, 321, 366, 4997, 3895, 502, 6915, 3895, 568, 11], "avg_logprob": -0.19984567606890644, "compression_ratio": 1.719387755102041, "no_speech_prob": 0.0, "words": [{"start": 1051.46, "end": 1051.84, "word": " But", "probability": 0.69970703125}, {"start": 1051.84, "end": 1052.16, "word": " chi", "probability": 0.288818359375}, {"start": 1052.16, "end": 1052.38, "word": "-square", "probability": 0.8019205729166666}, {"start": 1052.38, "end": 1053.3, "word": " is", "probability": 0.9169921875}, {"start": 1053.3, "end": 1053.82, "word": " designed", "probability": 0.84521484375}, {"start": 1053.82, "end": 1054.22, "word": " for", "probability": 0.921875}, {"start": 1054.22, "end": 1054.78, "word": " testing", "probability": 0.86767578125}, {"start": 1054.78, "end": 1055.38, "word": " pi", "probability": 0.47021484375}, {"start": 1055.38, "end": 1055.58, "word": " 1", "probability": 0.460693359375}, {"start": 1055.58, "end": 1055.9, "word": " equals", "probability": 0.73193359375}, {"start": 1055.9, "end": 1056.08, "word": " pi", "probability": 0.88720703125}, {"start": 1056.08, "end": 1056.38, "word": " 2", "probability": 0.984375}, {"start": 1056.38, "end": 1056.96, "word": " against", "probability": 0.76220703125}, {"start": 1056.96, "end": 1057.28, "word": " pi", "probability": 0.25048828125}, {"start": 1057.28, "end": 1057.4, "word": " 1", "probability": 0.80126953125}, {"start": 1057.4, "end": 1057.6, "word": " does", "probability": 0.58203125}, {"start": 1057.6, "end": 1057.78, "word": " not", "probability": 0.9541015625}, {"start": 1057.78, "end": 1058.0, "word": " equal", "probability": 0.89208984375}, {"start": 1058.0, "end": 1058.26, "word": " pi", "probability": 0.7080078125}, {"start": 1058.26, "end": 1058.36, "word": " 2.", "probability": 0.982421875}, {"start": 1058.56, "end": 1058.82, "word": " In", "probability": 0.9130859375}, {"start": 1058.82, "end": 1059.0, "word": " this", "probability": 0.947265625}, {"start": 1059.0, "end": 1059.26, "word": " case,", "probability": 0.91796875}, {"start": 1059.34, "end": 1059.46, "word": " you", "probability": 0.951171875}, {"start": 1059.46, "end": 1059.72, "word": " cannot", "probability": 0.84423828125}, {"start": 1059.72, "end": 1060.16, "word": " know", "probability": 0.88525390625}, {"start": 1060.16, "end": 1060.52, "word": " the", "probability": 0.9091796875}, {"start": 1060.52, "end": 1061.16, "word": " direction", "probability": 0.9716796875}, {"start": 1061.16, "end": 1063.92, "word": " of", "probability": 0.921875}, {"start": 1063.92, "end": 1064.14, "word": " this", "probability": 0.93505859375}, {"start": 1064.14, "end": 1064.62, "word": " difference.", "probability": 0.87646484375}, {"start": 1064.74, "end": 1064.82, "word": " I", "probability": 0.93994140625}, {"start": 1064.82, "end": 1064.94, "word": " mean,", "probability": 0.96142578125}, {"start": 1064.98, "end": 1065.08, "word": " you", "probability": 0.94677734375}, {"start": 1065.08, "end": 1065.28, "word": " cannot", "probability": 0.84765625}, {"start": 1065.28, "end": 1065.74, "word": " say", "probability": 0.94580078125}, {"start": 1065.74, "end": 1066.52, "word": " pi", "probability": 0.708984375}, {"start": 1066.52, "end": 1066.72, "word": " 1", "probability": 0.982421875}, {"start": 1066.72, "end": 1066.82, "word": " is", "probability": 0.50390625}, {"start": 1066.82, "end": 1067.12, "word": " greater", "probability": 0.9130859375}, {"start": 1067.12, "end": 1067.46, "word": " than", "probability": 0.94287109375}, {"start": 1067.46, "end": 1067.74, "word": " or", "probability": 0.78857421875}, {"start": 1067.74, "end": 1067.94, "word": " pi", "probability": 0.89453125}, {"start": 1067.94, "end": 1068.12, "word": " 1", "probability": 0.9873046875}, {"start": 1068.12, "end": 1068.3, "word": " is", "probability": 0.93115234375}, {"start": 1068.3, "end": 1068.64, "word": " smaller", "probability": 0.86181640625}, {"start": 1068.64, "end": 1068.94, "word": " than.", "probability": 0.8662109375}, {"start": 1069.44, "end": 1069.84, "word": " Because", "probability": 0.92822265625}, {"start": 1069.84, "end": 1070.22, "word": " chi", "probability": 0.8662109375}, {"start": 1070.22, "end": 1070.72, "word": "-square", "probability": 0.9580078125}, {"start": 1070.72, "end": 1072.22, "word": " is", "probability": 0.943359375}, {"start": 1072.22, "end": 1072.62, "word": " always", "probability": 0.888671875}, {"start": 1072.62, "end": 1073.06, "word": " positive.", "probability": 0.93408203125}, {"start": 1073.8, "end": 1074.02, "word": " If", "probability": 0.94677734375}, {"start": 1074.02, "end": 1074.1, "word": " you", "probability": 0.958984375}, {"start": 1074.1, "end": 1074.38, "word": " remember", "probability": 0.88037109375}, {"start": 1074.38, "end": 1074.6, "word": " from", "probability": 0.48681640625}, {"start": 1074.6, "end": 1074.82, "word": " this", "probability": 0.51318359375}, {"start": 1074.82, "end": 1075.38, "word": " statistic,", "probability": 0.86962890625}, {"start": 1076.08, "end": 1076.42, "word": " when", "probability": 0.94287109375}, {"start": 1076.42, "end": 1076.54, "word": " we", "probability": 0.9443359375}, {"start": 1076.54, "end": 1076.68, "word": " are", "probability": 0.87548828125}, {"start": 1076.68, "end": 1077.22, "word": " testing", "probability": 0.85791015625}, {"start": 1077.22, "end": 1077.46, "word": " pi", "probability": 0.9091796875}, {"start": 1077.46, "end": 1077.68, "word": " 1", "probability": 0.986328125}, {"start": 1077.68, "end": 1077.9, "word": " equals", "probability": 0.73095703125}, {"start": 1077.9, "end": 1078.14, "word": " pi", "probability": 0.935546875}, {"start": 1078.14, "end": 1078.42, "word": " 2,", "probability": 0.99658203125}], "temperature": 1.0}, {"id": 41, "seek": 109994, "start": 1079.36, "end": 1099.94, "text": " Z could be positive or negative. So, based on that, we can decide if pi 1 is greater than or smaller than pi 2. But here, since chi-square is always positive, then you cannot determine the direction of the relationship. You just say that there exists a significant relationship between such and such.", "tokens": [1176, 727, 312, 3353, 420, 3671, 13, 407, 11, 2361, 322, 300, 11, 321, 393, 4536, 498, 3895, 502, 307, 5044, 813, 420, 4356, 813, 3895, 568, 13, 583, 510, 11, 1670, 13228, 12, 33292, 543, 307, 1009, 3353, 11, 550, 291, 2644, 6997, 264, 3513, 295, 264, 2480, 13, 509, 445, 584, 300, 456, 8198, 257, 4776, 2480, 1296, 1270, 293, 1270, 13], "avg_logprob": -0.22175480769230768, "compression_ratio": 1.5279187817258884, "no_speech_prob": 0.0, "words": [{"start": 1079.36, "end": 1079.78, "word": " Z", "probability": 0.135498046875}, {"start": 1079.78, "end": 1080.08, "word": " could", "probability": 0.79833984375}, {"start": 1080.08, "end": 1080.22, "word": " be", "probability": 0.9326171875}, {"start": 1080.22, "end": 1080.52, "word": " positive", "probability": 0.8974609375}, {"start": 1080.52, "end": 1080.8, "word": " or", "probability": 0.9580078125}, {"start": 1080.8, "end": 1081.18, "word": " negative.", "probability": 0.93701171875}, {"start": 1082.1, "end": 1082.26, "word": " So,", "probability": 0.52490234375}, {"start": 1082.76, "end": 1083.12, "word": " based", "probability": 0.9091796875}, {"start": 1083.12, "end": 1083.32, "word": " on", "probability": 0.94140625}, {"start": 1083.32, "end": 1083.54, "word": " that,", "probability": 0.931640625}, {"start": 1083.66, "end": 1083.76, "word": " we", "probability": 0.92919921875}, {"start": 1083.76, "end": 1084.1, "word": " can", "probability": 0.93212890625}, {"start": 1084.1, "end": 1084.86, "word": " decide", "probability": 0.93896484375}, {"start": 1084.86, "end": 1085.1, "word": " if", "probability": 0.93603515625}, {"start": 1085.1, "end": 1085.28, "word": " pi", "probability": 0.27587890625}, {"start": 1085.28, "end": 1085.46, "word": " 1", "probability": 0.49072265625}, {"start": 1085.46, "end": 1085.64, "word": " is", "probability": 0.8955078125}, {"start": 1085.64, "end": 1085.96, "word": " greater", "probability": 0.85986328125}, {"start": 1085.96, "end": 1086.38, "word": " than", "probability": 0.9208984375}, {"start": 1086.38, "end": 1086.52, "word": " or", "probability": 0.83203125}, {"start": 1086.52, "end": 1086.9, "word": " smaller", "probability": 0.85107421875}, {"start": 1086.9, "end": 1087.26, "word": " than", "probability": 0.9375}, {"start": 1087.26, "end": 1087.54, "word": " pi", "probability": 0.430419921875}, {"start": 1087.54, "end": 1087.72, "word": " 2.", "probability": 0.9697265625}, {"start": 1088.32, "end": 1088.56, "word": " But", "probability": 0.90771484375}, {"start": 1088.56, "end": 1088.8, "word": " here,", "probability": 0.72119140625}, {"start": 1088.94, "end": 1089.16, "word": " since", "probability": 0.8798828125}, {"start": 1089.16, "end": 1089.44, "word": " chi", "probability": 0.62158203125}, {"start": 1089.44, "end": 1089.76, "word": "-square", "probability": 0.8427734375}, {"start": 1089.76, "end": 1089.92, "word": " is", "probability": 0.94482421875}, {"start": 1089.92, "end": 1090.3, "word": " always", "probability": 0.89453125}, {"start": 1090.3, "end": 1090.86, "word": " positive,", "probability": 0.9326171875}, {"start": 1091.36, "end": 1091.68, "word": " then", "probability": 0.85498046875}, {"start": 1091.68, "end": 1091.88, "word": " you", "probability": 0.744140625}, {"start": 1091.88, "end": 1092.24, "word": " cannot", "probability": 0.86279296875}, {"start": 1092.24, "end": 1092.84, "word": " determine", "probability": 0.93359375}, {"start": 1092.84, "end": 1093.26, "word": " the", "probability": 0.9228515625}, {"start": 1093.26, "end": 1093.7, "word": " direction", "probability": 0.97412109375}, {"start": 1093.7, "end": 1094.14, "word": " of", "probability": 0.96728515625}, {"start": 1094.14, "end": 1094.26, "word": " the", "probability": 0.8984375}, {"start": 1094.26, "end": 1094.68, "word": " relationship.", "probability": 0.90283203125}, {"start": 1095.1, "end": 1095.32, "word": " You", "probability": 0.94677734375}, {"start": 1095.32, "end": 1095.64, "word": " just", "probability": 0.90478515625}, {"start": 1095.64, "end": 1095.94, "word": " say", "probability": 0.92041015625}, {"start": 1095.94, "end": 1096.22, "word": " that", "probability": 0.9267578125}, {"start": 1096.22, "end": 1096.5, "word": " there", "probability": 0.8671875}, {"start": 1096.5, "end": 1096.96, "word": " exists", "probability": 0.86474609375}, {"start": 1096.96, "end": 1097.22, "word": " a", "probability": 0.96923828125}, {"start": 1097.22, "end": 1097.62, "word": " significant", "probability": 0.85546875}, {"start": 1097.62, "end": 1098.38, "word": " relationship", "probability": 0.90478515625}, {"start": 1098.38, "end": 1099.16, "word": " between", "probability": 0.88232421875}, {"start": 1099.16, "end": 1099.5, "word": " such", "probability": 0.9716796875}, {"start": 1099.5, "end": 1099.66, "word": " and", "probability": 0.84619140625}, {"start": 1099.66, "end": 1099.94, "word": " such.", "probability": 0.94921875}], "temperature": 1.0}, {"id": 42, "seek": 112685, "start": 1101.14, "end": 1126.86, "text": " So by using chi-square, you are doing just a test to see if there is a relationship between x and y, or if this relationship is not significant. But you cannot determine either the strength, I mean you cannot say there exists strong relationship, or the direction, you cannot say there exists inverse or direct positive or negative relationship, you just say", "tokens": [407, 538, 1228, 13228, 12, 33292, 543, 11, 291, 366, 884, 445, 257, 1500, 281, 536, 498, 456, 307, 257, 2480, 1296, 2031, 293, 288, 11, 420, 498, 341, 2480, 307, 406, 4776, 13, 583, 291, 2644, 6997, 2139, 264, 3800, 11, 286, 914, 291, 2644, 584, 456, 8198, 2068, 2480, 11, 420, 264, 3513, 11, 291, 2644, 584, 456, 8198, 17340, 420, 2047, 3353, 420, 3671, 2480, 11, 291, 445, 584], "avg_logprob": -0.23394691454221125, "compression_ratio": 1.8697916666666667, "no_speech_prob": 0.0, "words": [{"start": 1101.14, "end": 1101.54, "word": " So", "probability": 0.492431640625}, {"start": 1101.54, "end": 1101.72, "word": " by", "probability": 0.68212890625}, {"start": 1101.72, "end": 1101.94, "word": " using", "probability": 0.931640625}, {"start": 1101.94, "end": 1102.14, "word": " chi", "probability": 0.40185546875}, {"start": 1102.14, "end": 1102.56, "word": "-square,", "probability": 0.8688151041666666}, {"start": 1102.68, "end": 1102.84, "word": " you", "probability": 0.93017578125}, {"start": 1102.84, "end": 1103.0, "word": " are", "probability": 0.8310546875}, {"start": 1103.0, "end": 1103.3, "word": " doing", "probability": 0.384521484375}, {"start": 1103.3, "end": 1103.7, "word": " just", "probability": 0.87060546875}, {"start": 1103.7, "end": 1103.9, "word": " a", "probability": 0.77783203125}, {"start": 1103.9, "end": 1104.08, "word": " test", "probability": 0.89501953125}, {"start": 1104.08, "end": 1104.26, "word": " to", "probability": 0.93896484375}, {"start": 1104.26, "end": 1104.5, "word": " see", "probability": 0.75830078125}, {"start": 1104.5, "end": 1105.32, "word": " if", "probability": 0.8935546875}, {"start": 1105.32, "end": 1105.9, "word": " there", "probability": 0.90625}, {"start": 1105.9, "end": 1106.06, "word": " is", "probability": 0.90478515625}, {"start": 1106.06, "end": 1106.28, "word": " a", "probability": 0.98046875}, {"start": 1106.28, "end": 1106.96, "word": " relationship", "probability": 0.9052734375}, {"start": 1106.96, "end": 1107.54, "word": " between", "probability": 0.8916015625}, {"start": 1107.54, "end": 1108.2, "word": " x", "probability": 0.666015625}, {"start": 1108.2, "end": 1108.38, "word": " and", "probability": 0.939453125}, {"start": 1108.38, "end": 1108.7, "word": " y,", "probability": 0.99462890625}, {"start": 1109.36, "end": 1109.56, "word": " or", "probability": 0.93115234375}, {"start": 1109.56, "end": 1109.84, "word": " if", "probability": 0.9501953125}, {"start": 1109.84, "end": 1110.08, "word": " this", "probability": 0.93408203125}, {"start": 1110.08, "end": 1110.5, "word": " relationship", "probability": 0.646484375}, {"start": 1110.5, "end": 1110.66, "word": " is", "probability": 0.93994140625}, {"start": 1110.66, "end": 1110.8, "word": " not", "probability": 0.8515625}, {"start": 1110.8, "end": 1111.28, "word": " significant.", "probability": 0.8349609375}, {"start": 1112.08, "end": 1112.34, "word": " But", "probability": 0.91357421875}, {"start": 1112.34, "end": 1112.46, "word": " you", "probability": 0.94091796875}, {"start": 1112.46, "end": 1112.76, "word": " cannot", "probability": 0.82958984375}, {"start": 1112.76, "end": 1113.34, "word": " determine", "probability": 0.90771484375}, {"start": 1113.34, "end": 1113.94, "word": " either", "probability": 0.927734375}, {"start": 1113.94, "end": 1114.62, "word": " the", "probability": 0.8916015625}, {"start": 1114.62, "end": 1115.22, "word": " strength,", "probability": 0.86669921875}, {"start": 1115.82, "end": 1115.94, "word": " I", "probability": 0.88232421875}, {"start": 1115.94, "end": 1116.06, "word": " mean", "probability": 0.966796875}, {"start": 1116.06, "end": 1116.16, "word": " you", "probability": 0.7978515625}, {"start": 1116.16, "end": 1116.36, "word": " cannot", "probability": 0.861328125}, {"start": 1116.36, "end": 1116.64, "word": " say", "probability": 0.93505859375}, {"start": 1116.64, "end": 1116.86, "word": " there", "probability": 0.806640625}, {"start": 1116.86, "end": 1117.24, "word": " exists", "probability": 0.83154296875}, {"start": 1117.24, "end": 1117.74, "word": " strong", "probability": 0.57275390625}, {"start": 1117.74, "end": 1118.3, "word": " relationship,", "probability": 0.90478515625}, {"start": 1119.04, "end": 1119.56, "word": " or", "probability": 0.93994140625}, {"start": 1119.56, "end": 1119.94, "word": " the", "probability": 0.90576171875}, {"start": 1119.94, "end": 1120.54, "word": " direction,", "probability": 0.97216796875}, {"start": 1120.8, "end": 1120.92, "word": " you", "probability": 0.9521484375}, {"start": 1120.92, "end": 1121.1, "word": " cannot", "probability": 0.875}, {"start": 1121.1, "end": 1121.32, "word": " say", "probability": 0.93896484375}, {"start": 1121.32, "end": 1121.48, "word": " there", "probability": 0.8740234375}, {"start": 1121.48, "end": 1121.88, "word": " exists", "probability": 0.8486328125}, {"start": 1121.88, "end": 1122.98, "word": " inverse", "probability": 0.74267578125}, {"start": 1122.98, "end": 1123.7, "word": " or", "probability": 0.68701171875}, {"start": 1123.7, "end": 1124.18, "word": " direct", "probability": 0.90234375}, {"start": 1124.18, "end": 1124.7, "word": " positive", "probability": 0.65576171875}, {"start": 1124.7, "end": 1125.12, "word": " or", "probability": 0.94775390625}, {"start": 1125.12, "end": 1125.48, "word": " negative", "probability": 0.9482421875}, {"start": 1125.48, "end": 1125.92, "word": " relationship,", "probability": 0.6298828125}, {"start": 1126.08, "end": 1126.22, "word": " you", "probability": 0.9482421875}, {"start": 1126.22, "end": 1126.52, "word": " just", "probability": 0.86572265625}, {"start": 1126.52, "end": 1126.86, "word": " say", "probability": 0.5458984375}], "temperature": 1.0}, {"id": 43, "seek": 114959, "start": 1127.42, "end": 1149.6, "text": " there exists a relationship between x and y. So one more time, my decision rule is, if the value of the chi-square greater than chi-square alpha, then we reject the null hypothesis. So there is also another way to reject by using b-value approach.", "tokens": [456, 8198, 257, 2480, 1296, 2031, 293, 288, 13, 407, 472, 544, 565, 11, 452, 3537, 4978, 307, 11, 498, 264, 2158, 295, 264, 13228, 12, 33292, 543, 5044, 813, 13228, 12, 33292, 543, 8961, 11, 550, 321, 8248, 264, 18184, 17291, 13, 407, 456, 307, 611, 1071, 636, 281, 8248, 538, 1228, 272, 12, 29155, 3109, 13], "avg_logprob": -0.20722987944796933, "compression_ratio": 1.5308641975308641, "no_speech_prob": 0.0, "words": [{"start": 1127.42, "end": 1127.88, "word": " there", "probability": 0.489013671875}, {"start": 1127.88, "end": 1128.36, "word": " exists", "probability": 0.69287109375}, {"start": 1128.36, "end": 1128.9, "word": " a", "probability": 0.9599609375}, {"start": 1128.9, "end": 1129.44, "word": " relationship", "probability": 0.869140625}, {"start": 1129.44, "end": 1130.02, "word": " between", "probability": 0.8984375}, {"start": 1130.02, "end": 1130.48, "word": " x", "probability": 0.5810546875}, {"start": 1130.48, "end": 1130.66, "word": " and", "probability": 0.9443359375}, {"start": 1130.66, "end": 1130.98, "word": " y.", "probability": 0.986328125}, {"start": 1132.62, "end": 1132.82, "word": " So", "probability": 0.64306640625}, {"start": 1132.82, "end": 1132.98, "word": " one", "probability": 0.59521484375}, {"start": 1132.98, "end": 1133.14, "word": " more", "probability": 0.93798828125}, {"start": 1133.14, "end": 1133.5, "word": " time,", "probability": 0.8857421875}, {"start": 1134.3, "end": 1134.52, "word": " my", "probability": 0.92578125}, {"start": 1134.52, "end": 1134.88, "word": " decision", "probability": 0.89501953125}, {"start": 1134.88, "end": 1135.18, "word": " rule", "probability": 0.89794921875}, {"start": 1135.18, "end": 1135.52, "word": " is,", "probability": 0.94677734375}, {"start": 1136.1, "end": 1136.34, "word": " if", "probability": 0.94189453125}, {"start": 1136.34, "end": 1136.54, "word": " the", "probability": 0.912109375}, {"start": 1136.54, "end": 1136.82, "word": " value", "probability": 0.98193359375}, {"start": 1136.82, "end": 1136.98, "word": " of", "probability": 0.96826171875}, {"start": 1136.98, "end": 1137.1, "word": " the", "probability": 0.472900390625}, {"start": 1137.1, "end": 1137.3, "word": " chi", "probability": 0.662109375}, {"start": 1137.3, "end": 1137.78, "word": "-square", "probability": 0.8986002604166666}, {"start": 1137.78, "end": 1140.08, "word": " greater", "probability": 0.525390625}, {"start": 1140.08, "end": 1140.42, "word": " than", "probability": 0.9462890625}, {"start": 1140.42, "end": 1140.66, "word": " chi", "probability": 0.7646484375}, {"start": 1140.66, "end": 1141.0, "word": "-square", "probability": 0.9580078125}, {"start": 1141.0, "end": 1141.3, "word": " alpha,", "probability": 0.8408203125}, {"start": 1141.56, "end": 1141.84, "word": " then", "probability": 0.85986328125}, {"start": 1141.84, "end": 1142.02, "word": " we", "probability": 0.94580078125}, {"start": 1142.02, "end": 1142.38, "word": " reject", "probability": 0.92138671875}, {"start": 1142.38, "end": 1142.56, "word": " the", "probability": 0.8369140625}, {"start": 1142.56, "end": 1142.66, "word": " null", "probability": 0.98828125}, {"start": 1142.66, "end": 1143.14, "word": " hypothesis.", "probability": 0.8857421875}, {"start": 1144.26, "end": 1144.58, "word": " So", "probability": 0.83837890625}, {"start": 1144.58, "end": 1145.06, "word": " there", "probability": 0.6708984375}, {"start": 1145.06, "end": 1145.2, "word": " is", "probability": 0.92041015625}, {"start": 1145.2, "end": 1145.48, "word": " also", "probability": 0.8544921875}, {"start": 1145.48, "end": 1145.84, "word": " another", "probability": 0.92041015625}, {"start": 1145.84, "end": 1146.14, "word": " way", "probability": 0.955078125}, {"start": 1146.14, "end": 1146.3, "word": " to", "probability": 0.9677734375}, {"start": 1146.3, "end": 1146.68, "word": " reject", "probability": 0.935546875}, {"start": 1146.68, "end": 1148.28, "word": " by", "probability": 0.50732421875}, {"start": 1148.28, "end": 1148.74, "word": " using", "probability": 0.92919921875}, {"start": 1148.74, "end": 1148.98, "word": " b", "probability": 0.432373046875}, {"start": 1148.98, "end": 1149.3, "word": "-value", "probability": 0.80712890625}, {"start": 1149.3, "end": 1149.6, "word": " approach.", "probability": 0.794921875}], "temperature": 1.0}, {"id": 44, "seek": 117601, "start": 1152.65, "end": 1176.01, "text": " B value in this case, complete deprivation of chi-square greater than chi-square statistic, and always we reject H0 if this B value is smaller than alpha. So as we mentioned again before, always we reject H0 if B value is smaller than alpha.", "tokens": [363, 2158, 294, 341, 1389, 11, 3566, 27095, 11116, 295, 13228, 12, 33292, 543, 5044, 813, 13228, 12, 33292, 543, 29588, 11, 293, 1009, 321, 8248, 389, 15, 498, 341, 363, 2158, 307, 4356, 813, 8961, 13, 407, 382, 321, 2835, 797, 949, 11, 1009, 321, 8248, 389, 15, 498, 363, 2158, 307, 4356, 813, 8961, 13], "avg_logprob": -0.2602370720485161, "compression_ratio": 1.7163120567375887, "no_speech_prob": 0.0, "words": [{"start": 1152.65, "end": 1152.89, "word": " B", "probability": 0.359130859375}, {"start": 1152.89, "end": 1153.23, "word": " value", "probability": 0.7705078125}, {"start": 1153.23, "end": 1153.39, "word": " in", "probability": 0.76806640625}, {"start": 1153.39, "end": 1153.57, "word": " this", "probability": 0.94970703125}, {"start": 1153.57, "end": 1153.89, "word": " case,", "probability": 0.92041015625}, {"start": 1154.99, "end": 1155.31, "word": " complete", "probability": 0.30517578125}, {"start": 1155.31, "end": 1155.81, "word": " deprivation", "probability": 0.59912109375}, {"start": 1155.81, "end": 1156.01, "word": " of", "probability": 0.78466796875}, {"start": 1156.01, "end": 1156.15, "word": " chi", "probability": 0.1395263671875}, {"start": 1156.15, "end": 1156.43, "word": "-square", "probability": 0.8326822916666666}, {"start": 1156.43, "end": 1156.83, "word": " greater", "probability": 0.6474609375}, {"start": 1156.83, "end": 1157.19, "word": " than", "probability": 0.9453125}, {"start": 1157.19, "end": 1157.41, "word": " chi", "probability": 0.759765625}, {"start": 1157.41, "end": 1157.67, "word": "-square", "probability": 0.9529622395833334}, {"start": 1157.67, "end": 1158.19, "word": " statistic,", "probability": 0.54248046875}, {"start": 1159.35, "end": 1159.63, "word": " and", "probability": 0.90234375}, {"start": 1159.63, "end": 1160.27, "word": " always", "probability": 0.892578125}, {"start": 1160.27, "end": 1162.79, "word": " we", "probability": 0.70654296875}, {"start": 1162.79, "end": 1163.21, "word": " reject", "probability": 0.88916015625}, {"start": 1163.21, "end": 1163.63, "word": " H0", "probability": 0.701171875}, {"start": 1163.63, "end": 1163.93, "word": " if", "probability": 0.87109375}, {"start": 1163.93, "end": 1164.17, "word": " this", "probability": 0.921875}, {"start": 1164.17, "end": 1164.35, "word": " B", "probability": 0.86865234375}, {"start": 1164.35, "end": 1164.55, "word": " value", "probability": 0.9228515625}, {"start": 1164.55, "end": 1164.65, "word": " is", "probability": 0.81689453125}, {"start": 1164.65, "end": 1164.87, "word": " smaller", "probability": 0.873046875}, {"start": 1164.87, "end": 1165.09, "word": " than", "probability": 0.9521484375}, {"start": 1165.09, "end": 1165.35, "word": " alpha.", "probability": 0.8115234375}, {"start": 1165.63, "end": 1166.19, "word": " So", "probability": 0.8486328125}, {"start": 1166.19, "end": 1166.41, "word": " as", "probability": 0.703125}, {"start": 1166.41, "end": 1166.53, "word": " we", "probability": 0.9560546875}, {"start": 1166.53, "end": 1166.85, "word": " mentioned", "probability": 0.83203125}, {"start": 1166.85, "end": 1167.17, "word": " again", "probability": 0.8642578125}, {"start": 1167.17, "end": 1167.61, "word": " before,", "probability": 0.85498046875}, {"start": 1168.13, "end": 1168.63, "word": " always", "probability": 0.88671875}, {"start": 1168.63, "end": 1169.99, "word": " we", "probability": 0.92041015625}, {"start": 1169.99, "end": 1170.65, "word": " reject", "probability": 0.912109375}, {"start": 1170.65, "end": 1172.61, "word": " H0", "probability": 0.994140625}, {"start": 1172.61, "end": 1174.13, "word": " if", "probability": 0.89794921875}, {"start": 1174.13, "end": 1174.37, "word": " B", "probability": 0.97021484375}, {"start": 1174.37, "end": 1174.75, "word": " value", "probability": 0.96630859375}, {"start": 1174.75, "end": 1175.13, "word": " is", "probability": 0.9521484375}, {"start": 1175.13, "end": 1175.49, "word": " smaller", "probability": 0.86376953125}, {"start": 1175.49, "end": 1175.77, "word": " than", "probability": 0.9501953125}, {"start": 1175.77, "end": 1176.01, "word": " alpha.", "probability": 0.912109375}], "temperature": 1.0}, {"id": 45, "seek": 120771, "start": 1179.29, "end": 1207.71, "text": " So again, my decision rule is, we reject the null hypothesis if the value of the statistic lies in the rejection region. And again, there is only one rejection region in this case, because chi-square is always positive. If you look at this formula, F observed minus F expected squared, so it's positive. F is also positive, so chi-square is always positive.", "tokens": [407, 797, 11, 452, 3537, 4978, 307, 11, 321, 8248, 264, 18184, 17291, 498, 264, 2158, 295, 264, 29588, 9134, 294, 264, 26044, 4458, 13, 400, 797, 11, 456, 307, 787, 472, 26044, 4458, 294, 341, 1389, 11, 570, 13228, 12, 33292, 543, 307, 1009, 3353, 13, 759, 291, 574, 412, 341, 8513, 11, 479, 13095, 3175, 479, 5176, 8889, 11, 370, 309, 311, 3353, 13, 479, 307, 611, 3353, 11, 370, 13228, 12, 33292, 543, 307, 1009, 3353, 13], "avg_logprob": -0.20331790417800716, "compression_ratio": 1.79, "no_speech_prob": 0.0, "words": [{"start": 1179.29, "end": 1179.97, "word": " So", "probability": 0.344482421875}, {"start": 1179.97, "end": 1180.65, "word": " again,", "probability": 0.79638671875}, {"start": 1181.23, "end": 1181.49, "word": " my", "probability": 0.9453125}, {"start": 1181.49, "end": 1181.83, "word": " decision", "probability": 0.87841796875}, {"start": 1181.83, "end": 1182.09, "word": " rule", "probability": 0.90869140625}, {"start": 1182.09, "end": 1182.45, "word": " is,", "probability": 0.94384765625}, {"start": 1182.79, "end": 1182.89, "word": " we", "probability": 0.89111328125}, {"start": 1182.89, "end": 1183.27, "word": " reject", "probability": 0.900390625}, {"start": 1183.27, "end": 1183.41, "word": " the", "probability": 0.7822265625}, {"start": 1183.41, "end": 1183.53, "word": " null", "probability": 0.9462890625}, {"start": 1183.53, "end": 1183.97, "word": " hypothesis", "probability": 0.8427734375}, {"start": 1183.97, "end": 1184.51, "word": " if", "probability": 0.849609375}, {"start": 1184.51, "end": 1184.65, "word": " the", "probability": 0.90673828125}, {"start": 1184.65, "end": 1184.89, "word": " value", "probability": 0.921875}, {"start": 1184.89, "end": 1185.01, "word": " of", "probability": 0.9423828125}, {"start": 1185.01, "end": 1185.11, "word": " the", "probability": 0.755859375}, {"start": 1185.11, "end": 1185.55, "word": " statistic", "probability": 0.81982421875}, {"start": 1185.55, "end": 1187.11, "word": " lies", "probability": 0.8818359375}, {"start": 1187.11, "end": 1187.37, "word": " in", "probability": 0.94580078125}, {"start": 1187.37, "end": 1187.51, "word": " the", "probability": 0.9033203125}, {"start": 1187.51, "end": 1187.81, "word": " rejection", "probability": 0.96484375}, {"start": 1187.81, "end": 1188.21, "word": " region.", "probability": 0.95263671875}, {"start": 1188.41, "end": 1188.57, "word": " And", "probability": 0.9306640625}, {"start": 1188.57, "end": 1189.01, "word": " again,", "probability": 0.91650390625}, {"start": 1189.19, "end": 1189.39, "word": " there", "probability": 0.90234375}, {"start": 1189.39, "end": 1189.57, "word": " is", "probability": 0.9189453125}, {"start": 1189.57, "end": 1190.07, "word": " only", "probability": 0.9287109375}, {"start": 1190.07, "end": 1190.61, "word": " one", "probability": 0.8916015625}, {"start": 1190.61, "end": 1191.19, "word": " rejection", "probability": 0.96875}, {"start": 1191.19, "end": 1191.71, "word": " region", "probability": 0.95068359375}, {"start": 1191.71, "end": 1192.35, "word": " in", "probability": 0.876953125}, {"start": 1192.35, "end": 1192.57, "word": " this", "probability": 0.9453125}, {"start": 1192.57, "end": 1192.95, "word": " case,", "probability": 0.91845703125}, {"start": 1193.23, "end": 1193.67, "word": " because", "probability": 0.90087890625}, {"start": 1193.67, "end": 1193.97, "word": " chi", "probability": 0.373779296875}, {"start": 1193.97, "end": 1194.35, "word": "-square", "probability": 0.7568359375}, {"start": 1194.35, "end": 1194.61, "word": " is", "probability": 0.9482421875}, {"start": 1194.61, "end": 1195.17, "word": " always", "probability": 0.89404296875}, {"start": 1195.17, "end": 1195.59, "word": " positive.", "probability": 0.92626953125}, {"start": 1196.35, "end": 1196.53, "word": " If", "probability": 0.96533203125}, {"start": 1196.53, "end": 1196.65, "word": " you", "probability": 0.9609375}, {"start": 1196.65, "end": 1196.85, "word": " look", "probability": 0.9677734375}, {"start": 1196.85, "end": 1196.99, "word": " at", "probability": 0.96337890625}, {"start": 1196.99, "end": 1197.21, "word": " this", "probability": 0.9443359375}, {"start": 1197.21, "end": 1197.65, "word": " formula,", "probability": 0.91650390625}, {"start": 1198.49, "end": 1198.77, "word": " F", "probability": 0.74365234375}, {"start": 1198.77, "end": 1199.29, "word": " observed", "probability": 0.81689453125}, {"start": 1199.29, "end": 1199.63, "word": " minus", "probability": 0.955078125}, {"start": 1199.63, "end": 1199.97, "word": " F", "probability": 0.96435546875}, {"start": 1199.97, "end": 1200.39, "word": " expected", "probability": 0.60546875}, {"start": 1200.39, "end": 1201.11, "word": " squared,", "probability": 0.83154296875}, {"start": 1201.91, "end": 1202.11, "word": " so", "probability": 0.61181640625}, {"start": 1202.11, "end": 1202.25, "word": " it's", "probability": 0.9365234375}, {"start": 1202.25, "end": 1202.75, "word": " positive.", "probability": 0.93359375}, {"start": 1204.13, "end": 1204.39, "word": " F", "probability": 0.6318359375}, {"start": 1204.39, "end": 1204.57, "word": " is", "probability": 0.2259521484375}, {"start": 1204.57, "end": 1205.07, "word": " also", "probability": 0.87451171875}, {"start": 1205.07, "end": 1205.51, "word": " positive,", "probability": 0.93359375}, {"start": 1205.83, "end": 1206.01, "word": " so", "probability": 0.95068359375}, {"start": 1206.01, "end": 1206.27, "word": " chi", "probability": 0.8193359375}, {"start": 1206.27, "end": 1206.53, "word": "-square", "probability": 0.88134765625}, {"start": 1206.53, "end": 1206.73, "word": " is", "probability": 0.939453125}, {"start": 1206.73, "end": 1207.25, "word": " always", "probability": 0.89404296875}, {"start": 1207.25, "end": 1207.71, "word": " positive.", "probability": 0.93359375}], "temperature": 1.0}, {"id": 46, "seek": 123486, "start": 1210.19, "end": 1234.87, "text": " Now let's see how can we compute the value of the chi-square statistic. If we go back a little bit to the data we have, in this case there are one in twenty females and twelve out of them are left-handed.", "tokens": [823, 718, 311, 536, 577, 393, 321, 14722, 264, 2158, 295, 264, 13228, 12, 33292, 543, 29588, 13, 759, 321, 352, 646, 257, 707, 857, 281, 264, 1412, 321, 362, 11, 294, 341, 1389, 456, 366, 472, 294, 7699, 21529, 293, 14390, 484, 295, 552, 366, 1411, 12, 25407, 13], "avg_logprob": -0.17356004785088933, "compression_ratio": 1.4335664335664335, "no_speech_prob": 0.0, "words": [{"start": 1210.19, "end": 1210.45, "word": " Now", "probability": 0.91162109375}, {"start": 1210.45, "end": 1210.71, "word": " let's", "probability": 0.797607421875}, {"start": 1210.71, "end": 1210.83, "word": " see", "probability": 0.912109375}, {"start": 1210.83, "end": 1210.95, "word": " how", "probability": 0.92578125}, {"start": 1210.95, "end": 1211.15, "word": " can", "probability": 0.5341796875}, {"start": 1211.15, "end": 1211.29, "word": " we", "probability": 0.95263671875}, {"start": 1211.29, "end": 1211.85, "word": " compute", "probability": 0.8935546875}, {"start": 1211.85, "end": 1213.99, "word": " the", "probability": 0.8740234375}, {"start": 1213.99, "end": 1214.45, "word": " value", "probability": 0.982421875}, {"start": 1214.45, "end": 1214.87, "word": " of", "probability": 0.96875}, {"start": 1214.87, "end": 1215.01, "word": " the", "probability": 0.89306640625}, {"start": 1215.01, "end": 1215.21, "word": " chi", "probability": 0.6318359375}, {"start": 1215.21, "end": 1215.49, "word": "-square", "probability": 0.8307291666666666}, {"start": 1215.49, "end": 1216.03, "word": " statistic.", "probability": 0.69482421875}, {"start": 1217.81, "end": 1218.09, "word": " If", "probability": 0.94580078125}, {"start": 1218.09, "end": 1218.27, "word": " we", "probability": 0.94189453125}, {"start": 1218.27, "end": 1218.45, "word": " go", "probability": 0.95947265625}, {"start": 1218.45, "end": 1218.83, "word": " back", "probability": 0.87744140625}, {"start": 1218.83, "end": 1219.33, "word": " a", "probability": 0.7138671875}, {"start": 1219.33, "end": 1219.53, "word": " little", "probability": 0.8623046875}, {"start": 1219.53, "end": 1219.79, "word": " bit", "probability": 0.943359375}, {"start": 1219.79, "end": 1219.93, "word": " to", "probability": 0.9560546875}, {"start": 1219.93, "end": 1220.07, "word": " the", "probability": 0.92138671875}, {"start": 1220.07, "end": 1220.35, "word": " data", "probability": 0.93603515625}, {"start": 1220.35, "end": 1220.53, "word": " we", "probability": 0.9453125}, {"start": 1220.53, "end": 1220.87, "word": " have,", "probability": 0.94140625}, {"start": 1223.07, "end": 1223.75, "word": " in", "probability": 0.92724609375}, {"start": 1223.75, "end": 1224.09, "word": " this", "probability": 0.94677734375}, {"start": 1224.09, "end": 1224.55, "word": " case", "probability": 0.91650390625}, {"start": 1224.55, "end": 1225.71, "word": " there", "probability": 0.62158203125}, {"start": 1225.71, "end": 1226.23, "word": " are", "probability": 0.9443359375}, {"start": 1226.23, "end": 1228.31, "word": " one", "probability": 0.44921875}, {"start": 1228.31, "end": 1228.43, "word": " in", "probability": 0.76171875}, {"start": 1228.43, "end": 1228.63, "word": " twenty", "probability": 0.78076171875}, {"start": 1228.63, "end": 1229.19, "word": " females", "probability": 0.94384765625}, {"start": 1229.19, "end": 1230.99, "word": " and", "probability": 0.66455078125}, {"start": 1230.99, "end": 1231.51, "word": " twelve", "probability": 0.8203125}, {"start": 1231.51, "end": 1232.03, "word": " out", "probability": 0.8466796875}, {"start": 1232.03, "end": 1232.19, "word": " of", "probability": 0.97021484375}, {"start": 1232.19, "end": 1232.49, "word": " them", "probability": 0.89990234375}, {"start": 1232.49, "end": 1233.39, "word": " are", "probability": 0.927734375}, {"start": 1233.39, "end": 1233.89, "word": " left", "probability": 0.95068359375}, {"start": 1233.89, "end": 1234.87, "word": "-handed.", "probability": 0.664794921875}], "temperature": 1.0}, {"id": 47, "seek": 126029, "start": 1239.65, "end": 1260.29, "text": " Left, right, 12, 108, 24, 156. The totals are 120, 180, 36, 264, and 300. So that's the table we have now.", "tokens": [16405, 11, 558, 11, 2272, 11, 41342, 11, 4022, 11, 2119, 21, 13, 440, 1993, 1124, 366, 10411, 11, 11971, 11, 8652, 11, 7551, 19, 11, 293, 6641, 13, 407, 300, 311, 264, 3199, 321, 362, 586, 13], "avg_logprob": -0.19471153616905212, "compression_ratio": 1.1145833333333333, "no_speech_prob": 0.0, "words": [{"start": 1239.65, "end": 1240.07, "word": " Left,", "probability": 0.256591796875}, {"start": 1240.23, "end": 1240.59, "word": " right,", "probability": 0.8056640625}, {"start": 1241.27, "end": 1242.41, "word": " 12,", "probability": 0.82080078125}, {"start": 1242.53, "end": 1242.93, "word": " 108,", "probability": 0.9482421875}, {"start": 1244.45, "end": 1245.11, "word": " 24,", "probability": 0.91259765625}, {"start": 1245.29, "end": 1246.09, "word": " 156.", "probability": 0.859130859375}, {"start": 1248.21, "end": 1249.07, "word": " The", "probability": 0.87353515625}, {"start": 1249.07, "end": 1249.59, "word": " totals", "probability": 0.929931640625}, {"start": 1249.59, "end": 1249.99, "word": " are", "probability": 0.9423828125}, {"start": 1249.99, "end": 1250.63, "word": " 120,", "probability": 0.88671875}, {"start": 1251.29, "end": 1252.21, "word": " 180,", "probability": 0.59814453125}, {"start": 1254.51, "end": 1255.41, "word": " 36,", "probability": 0.96630859375}, {"start": 1255.67, "end": 1256.57, "word": " 264,", "probability": 0.906005859375}, {"start": 1256.67, "end": 1256.83, "word": " and", "probability": 0.87060546875}, {"start": 1256.83, "end": 1257.27, "word": " 300.", "probability": 0.9765625}, {"start": 1258.35, "end": 1258.55, "word": " So", "probability": 0.947265625}, {"start": 1258.55, "end": 1258.79, "word": " that's", "probability": 0.929931640625}, {"start": 1258.79, "end": 1258.91, "word": " the", "probability": 0.92626953125}, {"start": 1258.91, "end": 1259.15, "word": " table", "probability": 0.86328125}, {"start": 1259.15, "end": 1259.35, "word": " we", "probability": 0.935546875}, {"start": 1259.35, "end": 1259.61, "word": " have", "probability": 0.9423828125}, {"start": 1259.61, "end": 1260.29, "word": " now.", "probability": 0.66650390625}], "temperature": 1.0}, {"id": 48, "seek": 128848, "start": 1260.92, "end": 1288.48, "text": " Let's see how can we compute the value of the chi-square statistic. The first step, compute the average proportion, the same as the one we did in chapter 10. It's called overall proportion, or pooled proportion. And B dash, in this case, is given by x1 plus x2 divided by n1 plus n2.", "tokens": [961, 311, 536, 577, 393, 321, 14722, 264, 2158, 295, 264, 13228, 12, 33292, 543, 29588, 13, 440, 700, 1823, 11, 14722, 264, 4274, 16068, 11, 264, 912, 382, 264, 472, 321, 630, 294, 7187, 1266, 13, 467, 311, 1219, 4787, 16068, 11, 420, 7005, 292, 16068, 13, 400, 363, 8240, 11, 294, 341, 1389, 11, 307, 2212, 538, 2031, 16, 1804, 2031, 17, 6666, 538, 297, 16, 1804, 297, 17, 13], "avg_logprob": -0.19627567839949098, "compression_ratio": 1.5268817204301075, "no_speech_prob": 0.0, "words": [{"start": 1260.92, "end": 1261.34, "word": " Let's", "probability": 0.85693359375}, {"start": 1261.34, "end": 1261.46, "word": " see", "probability": 0.89697265625}, {"start": 1261.46, "end": 1261.6, "word": " how", "probability": 0.931640625}, {"start": 1261.6, "end": 1261.82, "word": " can", "probability": 0.80078125}, {"start": 1261.82, "end": 1262.1, "word": " we", "probability": 0.9462890625}, {"start": 1262.1, "end": 1263.02, "word": " compute", "probability": 0.900390625}, {"start": 1263.02, "end": 1263.26, "word": " the", "probability": 0.91748046875}, {"start": 1263.26, "end": 1263.64, "word": " value", "probability": 0.982421875}, {"start": 1263.64, "end": 1264.02, "word": " of", "probability": 0.96728515625}, {"start": 1264.02, "end": 1264.3, "word": " the", "probability": 0.91357421875}, {"start": 1264.3, "end": 1264.6, "word": " chi", "probability": 0.5224609375}, {"start": 1264.6, "end": 1264.84, "word": "-square", "probability": 0.8151041666666666}, {"start": 1264.84, "end": 1265.34, "word": " statistic.", "probability": 0.693359375}, {"start": 1266.04, "end": 1266.34, "word": " The", "probability": 0.89990234375}, {"start": 1266.34, "end": 1266.62, "word": " first", "probability": 0.876953125}, {"start": 1266.62, "end": 1266.96, "word": " step,", "probability": 0.91015625}, {"start": 1268.08, "end": 1268.52, "word": " compute", "probability": 0.896484375}, {"start": 1268.52, "end": 1268.86, "word": " the", "probability": 0.91015625}, {"start": 1268.86, "end": 1269.3, "word": " average", "probability": 0.74169921875}, {"start": 1269.3, "end": 1269.92, "word": " proportion,", "probability": 0.7763671875}, {"start": 1270.82, "end": 1271.86, "word": " the", "probability": 0.88427734375}, {"start": 1271.86, "end": 1272.04, "word": " same", "probability": 0.91015625}, {"start": 1272.04, "end": 1272.18, "word": " as", "probability": 0.95654296875}, {"start": 1272.18, "end": 1272.34, "word": " the", "probability": 0.896484375}, {"start": 1272.34, "end": 1272.5, "word": " one", "probability": 0.93603515625}, {"start": 1272.5, "end": 1272.7, "word": " we", "probability": 0.94482421875}, {"start": 1272.7, "end": 1272.9, "word": " did", "probability": 0.958984375}, {"start": 1272.9, "end": 1273.1, "word": " in", "probability": 0.93115234375}, {"start": 1273.1, "end": 1273.32, "word": " chapter", "probability": 0.6630859375}, {"start": 1273.32, "end": 1273.76, "word": " 10.", "probability": 0.58837890625}, {"start": 1274.36, "end": 1274.78, "word": " It's", "probability": 0.936279296875}, {"start": 1274.78, "end": 1275.06, "word": " called", "probability": 0.86083984375}, {"start": 1275.06, "end": 1275.66, "word": " overall", "probability": 0.70263671875}, {"start": 1275.66, "end": 1276.24, "word": " proportion,", "probability": 0.822265625}, {"start": 1276.5, "end": 1276.62, "word": " or", "probability": 0.89453125}, {"start": 1276.62, "end": 1277.16, "word": " pooled", "probability": 0.6357421875}, {"start": 1277.16, "end": 1277.7, "word": " proportion.", "probability": 0.7216796875}, {"start": 1278.32, "end": 1278.54, "word": " And", "probability": 0.92724609375}, {"start": 1278.54, "end": 1278.76, "word": " B", "probability": 0.72509765625}, {"start": 1278.76, "end": 1279.12, "word": " dash,", "probability": 0.68310546875}, {"start": 1280.2, "end": 1280.7, "word": " in", "probability": 0.93408203125}, {"start": 1280.7, "end": 1280.92, "word": " this", "probability": 0.947265625}, {"start": 1280.92, "end": 1281.3, "word": " case,", "probability": 0.9150390625}, {"start": 1282.82, "end": 1283.1, "word": " is", "probability": 0.92578125}, {"start": 1283.1, "end": 1283.38, "word": " given", "probability": 0.900390625}, {"start": 1283.38, "end": 1283.9, "word": " by", "probability": 0.970703125}, {"start": 1283.9, "end": 1286.52, "word": " x1", "probability": 0.77880859375}, {"start": 1286.52, "end": 1286.74, "word": " plus", "probability": 0.8662109375}, {"start": 1286.74, "end": 1287.12, "word": " x2", "probability": 0.997314453125}, {"start": 1287.12, "end": 1287.38, "word": " divided", "probability": 0.75927734375}, {"start": 1287.38, "end": 1287.54, "word": " by", "probability": 0.9677734375}, {"start": 1287.54, "end": 1287.9, "word": " n1", "probability": 0.9443359375}, {"start": 1287.9, "end": 1288.2, "word": " plus", "probability": 0.95556640625}, {"start": 1288.2, "end": 1288.48, "word": " n2.", "probability": 0.997802734375}], "temperature": 1.0}, {"id": 49, "seek": 131627, "start": 1289.67, "end": 1316.27, "text": " in left-handed, either males or females. In this sample, there are 12 females, 12 left-handed females and 24 males. So 12 plus 24 divided by 1 plus 8 is 2. There are 120 females and 180 females.", "tokens": [294, 1411, 12, 25407, 11, 2139, 20776, 420, 21529, 13, 682, 341, 6889, 11, 456, 366, 2272, 21529, 11, 2272, 1411, 12, 25407, 21529, 293, 4022, 20776, 13, 407, 2272, 1804, 4022, 6666, 538, 502, 1804, 1649, 307, 568, 13, 821, 366, 10411, 21529, 293, 11971, 21529, 13], "avg_logprob": -0.24457908649833834, "compression_ratio": 1.5853658536585367, "no_speech_prob": 0.0, "words": [{"start": 1289.67, "end": 1289.99, "word": " in", "probability": 0.326171875}, {"start": 1289.99, "end": 1290.27, "word": " left", "probability": 0.88330078125}, {"start": 1290.27, "end": 1290.91, "word": "-handed,", "probability": 0.749755859375}, {"start": 1291.27, "end": 1291.81, "word": " either", "probability": 0.9228515625}, {"start": 1291.81, "end": 1292.21, "word": " males", "probability": 0.92626953125}, {"start": 1292.21, "end": 1292.43, "word": " or", "probability": 0.9521484375}, {"start": 1292.43, "end": 1292.89, "word": " females.", "probability": 0.93798828125}, {"start": 1293.59, "end": 1293.79, "word": " In", "probability": 0.92724609375}, {"start": 1293.79, "end": 1294.11, "word": " this", "probability": 0.9443359375}, {"start": 1294.11, "end": 1294.69, "word": " sample,", "probability": 0.81787109375}, {"start": 1294.99, "end": 1295.15, "word": " there", "probability": 0.91162109375}, {"start": 1295.15, "end": 1295.63, "word": " are", "probability": 0.94189453125}, {"start": 1295.63, "end": 1296.79, "word": " 12", "probability": 0.67626953125}, {"start": 1296.79, "end": 1298.35, "word": " females,", "probability": 0.9521484375}, {"start": 1299.33, "end": 1299.67, "word": " 12", "probability": 0.927734375}, {"start": 1299.67, "end": 1300.05, "word": " left", "probability": 0.93310546875}, {"start": 1300.05, "end": 1300.37, "word": "-handed", "probability": 0.90087890625}, {"start": 1300.37, "end": 1300.91, "word": " females", "probability": 0.9375}, {"start": 1300.91, "end": 1301.49, "word": " and", "probability": 0.57177734375}, {"start": 1301.49, "end": 1302.05, "word": " 24", "probability": 0.97412109375}, {"start": 1302.05, "end": 1302.47, "word": " males.", "probability": 0.93408203125}, {"start": 1302.67, "end": 1302.87, "word": " So", "probability": 0.84423828125}, {"start": 1302.87, "end": 1304.55, "word": " 12", "probability": 0.51123046875}, {"start": 1304.55, "end": 1305.05, "word": " plus", "probability": 0.865234375}, {"start": 1305.05, "end": 1306.99, "word": " 24", "probability": 0.9736328125}, {"start": 1306.99, "end": 1309.43, "word": " divided", "probability": 0.4775390625}, {"start": 1309.43, "end": 1309.87, "word": " by", "probability": 0.9697265625}, {"start": 1309.87, "end": 1311.01, "word": " 1", "probability": 0.21484375}, {"start": 1311.01, "end": 1311.37, "word": " plus", "probability": 0.93994140625}, {"start": 1311.37, "end": 1311.65, "word": " 8", "probability": 0.77734375}, {"start": 1311.65, "end": 1311.73, "word": " is", "probability": 0.282958984375}, {"start": 1311.73, "end": 1311.85, "word": " 2.", "probability": 0.82763671875}, {"start": 1312.01, "end": 1312.25, "word": " There", "probability": 0.7265625}, {"start": 1312.25, "end": 1312.49, "word": " are", "probability": 0.94384765625}, {"start": 1312.49, "end": 1312.97, "word": " 120", "probability": 0.87646484375}, {"start": 1312.97, "end": 1313.63, "word": " females", "probability": 0.9443359375}, {"start": 1313.63, "end": 1315.33, "word": " and", "probability": 0.88671875}, {"start": 1315.33, "end": 1315.77, "word": " 180", "probability": 0.921875}, {"start": 1315.77, "end": 1316.27, "word": " females.", "probability": 0.9111328125}], "temperature": 1.0}, {"id": 50, "seek": 134772, "start": 1318.1, "end": 1347.72, "text": " So overall proportion 12 plus 24, which is actually this total 36, divided by overall total or the grand total, which is 300. So the formula is straightforward. Just suppose I am interested in left-handed. So overall proportion for left-handed equals 36 divided by 300.", "tokens": [407, 4787, 16068, 2272, 1804, 4022, 11, 597, 307, 767, 341, 3217, 8652, 11, 6666, 538, 4787, 3217, 420, 264, 2697, 3217, 11, 597, 307, 6641, 13, 407, 264, 8513, 307, 15325, 13, 1449, 7297, 286, 669, 3102, 294, 1411, 12, 25407, 13, 407, 4787, 16068, 337, 1411, 12, 25407, 6915, 8652, 6666, 538, 6641, 13], "avg_logprob": -0.2225877192982456, "compression_ratio": 1.5976331360946745, "no_speech_prob": 0.0, "words": [{"start": 1318.1, "end": 1318.34, "word": " So", "probability": 0.775390625}, {"start": 1318.34, "end": 1318.78, "word": " overall", "probability": 0.56201171875}, {"start": 1318.78, "end": 1319.42, "word": " proportion", "probability": 0.77197265625}, {"start": 1319.42, "end": 1320.66, "word": " 12", "probability": 0.490478515625}, {"start": 1320.66, "end": 1321.18, "word": " plus", "probability": 0.476806640625}, {"start": 1321.18, "end": 1321.68, "word": " 24,", "probability": 0.96630859375}, {"start": 1321.8, "end": 1321.96, "word": " which", "probability": 0.93603515625}, {"start": 1321.96, "end": 1322.14, "word": " is", "probability": 0.93798828125}, {"start": 1322.14, "end": 1322.68, "word": " actually", "probability": 0.8740234375}, {"start": 1322.68, "end": 1324.48, "word": " this", "probability": 0.86328125}, {"start": 1324.48, "end": 1324.86, "word": " total", "probability": 0.744140625}, {"start": 1324.86, "end": 1325.56, "word": " 36,", "probability": 0.72607421875}, {"start": 1326.02, "end": 1328.0, "word": " divided", "probability": 0.6669921875}, {"start": 1328.0, "end": 1328.22, "word": " by", "probability": 0.96630859375}, {"start": 1328.22, "end": 1328.74, "word": " overall", "probability": 0.6767578125}, {"start": 1328.74, "end": 1329.14, "word": " total", "probability": 0.7861328125}, {"start": 1329.14, "end": 1329.78, "word": " or", "probability": 0.5654296875}, {"start": 1329.78, "end": 1330.02, "word": " the", "probability": 0.451416015625}, {"start": 1330.02, "end": 1330.32, "word": " grand", "probability": 0.77734375}, {"start": 1330.32, "end": 1330.6, "word": " total,", "probability": 0.888671875}, {"start": 1330.8, "end": 1330.94, "word": " which", "probability": 0.9375}, {"start": 1330.94, "end": 1331.08, "word": " is", "probability": 0.94775390625}, {"start": 1331.08, "end": 1331.58, "word": " 300.", "probability": 0.97216796875}, {"start": 1332.5, "end": 1333.34, "word": " So", "probability": 0.95166015625}, {"start": 1333.34, "end": 1333.56, "word": " the", "probability": 0.87890625}, {"start": 1333.56, "end": 1333.94, "word": " formula", "probability": 0.93310546875}, {"start": 1333.94, "end": 1334.44, "word": " is", "probability": 0.93310546875}, {"start": 1334.44, "end": 1334.92, "word": " straightforward.", "probability": 0.63818359375}, {"start": 1335.14, "end": 1335.44, "word": " Just", "probability": 0.62109375}, {"start": 1335.44, "end": 1336.56, "word": " suppose", "probability": 0.806640625}, {"start": 1336.56, "end": 1336.72, "word": " I", "probability": 0.9775390625}, {"start": 1336.72, "end": 1336.84, "word": " am", "probability": 0.64453125}, {"start": 1336.84, "end": 1337.16, "word": " interested", "probability": 0.8623046875}, {"start": 1337.16, "end": 1337.4, "word": " in", "probability": 0.93115234375}, {"start": 1337.4, "end": 1337.6, "word": " left", "probability": 0.89697265625}, {"start": 1337.6, "end": 1337.96, "word": "-handed.", "probability": 0.731689453125}, {"start": 1339.06, "end": 1339.44, "word": " So", "probability": 0.96435546875}, {"start": 1339.44, "end": 1340.04, "word": " overall", "probability": 0.8369140625}, {"start": 1340.04, "end": 1340.74, "word": " proportion", "probability": 0.83935546875}, {"start": 1340.74, "end": 1341.4, "word": " for", "probability": 0.95166015625}, {"start": 1341.4, "end": 1341.76, "word": " left", "probability": 0.8955078125}, {"start": 1341.76, "end": 1342.24, "word": "-handed", "probability": 0.90673828125}, {"start": 1342.24, "end": 1343.94, "word": " equals", "probability": 0.908203125}, {"start": 1343.94, "end": 1345.16, "word": " 36", "probability": 0.97265625}, {"start": 1345.16, "end": 1346.88, "word": " divided", "probability": 0.76416015625}, {"start": 1346.88, "end": 1347.14, "word": " by", "probability": 0.9677734375}, {"start": 1347.14, "end": 1347.72, "word": " 300.", "probability": 0.9775390625}], "temperature": 1.0}, {"id": 51, "seek": 137260, "start": 1351.72, "end": 1372.6, "text": " That means, of all children, the proportion of left-handers is 12%. Of all the children, the proportion of left-handers is 12%. So that's the proportion for left-handers.", "tokens": [663, 1355, 11, 295, 439, 2227, 11, 264, 16068, 295, 1411, 12, 5543, 433, 307, 2272, 6856, 2720, 439, 264, 2227, 11, 264, 16068, 295, 1411, 12, 5543, 433, 307, 2272, 6856, 407, 300, 311, 264, 16068, 337, 1411, 12, 5543, 433, 13], "avg_logprob": -0.2114701657132669, "compression_ratio": 1.8, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1351.72, "end": 1351.98, "word": " That", "probability": 0.370361328125}, {"start": 1351.98, "end": 1352.38, "word": " means,", "probability": 0.90869140625}, {"start": 1353.3, "end": 1354.32, "word": " of", "probability": 0.765625}, {"start": 1354.32, "end": 1354.72, "word": " all", "probability": 0.927734375}, {"start": 1354.72, "end": 1355.26, "word": " children,", "probability": 0.65380859375}, {"start": 1355.96, "end": 1356.38, "word": " the", "probability": 0.880859375}, {"start": 1356.38, "end": 1356.84, "word": " proportion", "probability": 0.734375}, {"start": 1356.84, "end": 1357.1, "word": " of", "probability": 0.93896484375}, {"start": 1357.1, "end": 1357.28, "word": " left", "probability": 0.90576171875}, {"start": 1357.28, "end": 1357.82, "word": "-handers", "probability": 0.8151041666666666}, {"start": 1357.82, "end": 1358.12, "word": " is", "probability": 0.92919921875}, {"start": 1358.12, "end": 1359.12, "word": " 12%.", "probability": 0.795654296875}, {"start": 1359.12, "end": 1360.68, "word": " Of", "probability": 0.70654296875}, {"start": 1360.68, "end": 1361.16, "word": " all", "probability": 0.9482421875}, {"start": 1361.16, "end": 1362.28, "word": " the", "probability": 0.71240234375}, {"start": 1362.28, "end": 1362.7, "word": " children,", "probability": 0.8408203125}, {"start": 1362.84, "end": 1362.96, "word": " the", "probability": 0.91455078125}, {"start": 1362.96, "end": 1363.42, "word": " proportion", "probability": 0.84814453125}, {"start": 1363.42, "end": 1364.0, "word": " of", "probability": 0.95703125}, {"start": 1364.0, "end": 1364.36, "word": " left", "probability": 0.91748046875}, {"start": 1364.36, "end": 1364.96, "word": "-handers", "probability": 0.93115234375}, {"start": 1364.96, "end": 1366.58, "word": " is", "probability": 0.92724609375}, {"start": 1366.58, "end": 1367.94, "word": " 12%.", "probability": 0.868896484375}, {"start": 1367.94, "end": 1368.18, "word": " So", "probability": 0.8740234375}, {"start": 1368.18, "end": 1368.46, "word": " that's", "probability": 0.7998046875}, {"start": 1368.46, "end": 1368.64, "word": " the", "probability": 0.91162109375}, {"start": 1368.64, "end": 1369.1, "word": " proportion", "probability": 0.8330078125}, {"start": 1369.1, "end": 1370.62, "word": " for", "probability": 0.84765625}, {"start": 1370.62, "end": 1371.62, "word": " left", "probability": 0.91796875}, {"start": 1371.62, "end": 1372.6, "word": "-handers.", "probability": 0.9241536458333334}], "temperature": 1.0}, {"id": 52, "seek": 140488, "start": 1377.5, "end": 1404.88, "text": " So now to find the expected frequencies for males and females, we have to multiply the average proportion, left-handed B dash, by the total number of females. In this case, there are 120 females. So the expected frequency in this case is just 120 multiplied by 12%. So 120 multiplied by 12% gives", "tokens": [407, 586, 281, 915, 264, 5176, 20250, 337, 20776, 293, 21529, 11, 321, 362, 281, 12972, 264, 4274, 16068, 11, 1411, 12, 25407, 363, 8240, 11, 538, 264, 3217, 1230, 295, 21529, 13, 682, 341, 1389, 11, 456, 366, 10411, 21529, 13, 407, 264, 5176, 7893, 294, 341, 1389, 307, 445, 10411, 17207, 538, 2272, 6856, 407, 10411, 17207, 538, 2272, 4, 2709], "avg_logprob": -0.18945312104187906, "compression_ratio": 1.6875, "no_speech_prob": 0.0, "words": [{"start": 1377.5, "end": 1377.76, "word": " So", "probability": 0.8505859375}, {"start": 1377.76, "end": 1377.94, "word": " now", "probability": 0.76123046875}, {"start": 1377.94, "end": 1378.22, "word": " to", "probability": 0.81005859375}, {"start": 1378.22, "end": 1378.72, "word": " find", "probability": 0.8818359375}, {"start": 1378.72, "end": 1378.9, "word": " the", "probability": 0.85888671875}, {"start": 1378.9, "end": 1379.26, "word": " expected", "probability": 0.927734375}, {"start": 1379.26, "end": 1379.9, "word": " frequencies", "probability": 0.73779296875}, {"start": 1379.9, "end": 1381.46, "word": " for", "probability": 0.7919921875}, {"start": 1381.46, "end": 1381.98, "word": " males", "probability": 0.8984375}, {"start": 1381.98, "end": 1382.2, "word": " and", "probability": 0.9443359375}, {"start": 1382.2, "end": 1382.66, "word": " females,", "probability": 0.89697265625}, {"start": 1382.9, "end": 1383.5, "word": " we", "probability": 0.94677734375}, {"start": 1383.5, "end": 1383.72, "word": " have", "probability": 0.9453125}, {"start": 1383.72, "end": 1383.88, "word": " to", "probability": 0.9638671875}, {"start": 1383.88, "end": 1384.46, "word": " multiply", "probability": 0.9189453125}, {"start": 1384.46, "end": 1385.16, "word": " the", "probability": 0.89404296875}, {"start": 1385.16, "end": 1385.62, "word": " average", "probability": 0.6962890625}, {"start": 1385.62, "end": 1386.28, "word": " proportion,", "probability": 0.7666015625}, {"start": 1386.7, "end": 1386.96, "word": " left", "probability": 0.87255859375}, {"start": 1386.96, "end": 1387.38, "word": "-handed", "probability": 0.705810546875}, {"start": 1387.38, "end": 1387.62, "word": " B", "probability": 0.61376953125}, {"start": 1387.62, "end": 1387.98, "word": " dash,", "probability": 0.489990234375}, {"start": 1388.56, "end": 1388.78, "word": " by", "probability": 0.97021484375}, {"start": 1388.78, "end": 1388.94, "word": " the", "probability": 0.92236328125}, {"start": 1388.94, "end": 1389.2, "word": " total", "probability": 0.83837890625}, {"start": 1389.2, "end": 1389.5, "word": " number", "probability": 0.94091796875}, {"start": 1389.5, "end": 1389.64, "word": " of", "probability": 0.97216796875}, {"start": 1389.64, "end": 1389.96, "word": " females.", "probability": 0.94091796875}, {"start": 1390.52, "end": 1390.74, "word": " In", "probability": 0.94189453125}, {"start": 1390.74, "end": 1390.94, "word": " this", "probability": 0.94580078125}, {"start": 1390.94, "end": 1391.2, "word": " case,", "probability": 0.90869140625}, {"start": 1391.28, "end": 1391.42, "word": " there", "probability": 0.9111328125}, {"start": 1391.42, "end": 1391.7, "word": " are", "probability": 0.9453125}, {"start": 1391.7, "end": 1392.16, "word": " 120", "probability": 0.8798828125}, {"start": 1392.16, "end": 1392.7, "word": " females.", "probability": 0.9501953125}, {"start": 1393.38, "end": 1393.78, "word": " So", "probability": 0.95849609375}, {"start": 1393.78, "end": 1394.64, "word": " the", "probability": 0.779296875}, {"start": 1394.64, "end": 1395.24, "word": " expected", "probability": 0.86328125}, {"start": 1395.24, "end": 1395.82, "word": " frequency", "probability": 0.935546875}, {"start": 1395.82, "end": 1396.0, "word": " in", "probability": 0.75634765625}, {"start": 1396.0, "end": 1396.16, "word": " this", "probability": 0.9443359375}, {"start": 1396.16, "end": 1396.4, "word": " case", "probability": 0.9130859375}, {"start": 1396.4, "end": 1396.56, "word": " is", "probability": 0.91015625}, {"start": 1396.56, "end": 1396.84, "word": " just", "probability": 0.8984375}, {"start": 1396.84, "end": 1397.46, "word": " 120", "probability": 0.9130859375}, {"start": 1397.46, "end": 1399.22, "word": " multiplied", "probability": 0.66162109375}, {"start": 1399.22, "end": 1399.7, "word": " by", "probability": 0.97265625}, {"start": 1399.7, "end": 1401.04, "word": " 12%.", "probability": 0.893310546875}, {"start": 1401.04, "end": 1401.72, "word": " So", "probability": 0.96240234375}, {"start": 1401.72, "end": 1402.16, "word": " 120", "probability": 0.888671875}, {"start": 1402.16, "end": 1402.68, "word": " multiplied", "probability": 0.70166015625}, {"start": 1402.68, "end": 1402.96, "word": " by", "probability": 0.9736328125}, {"start": 1402.96, "end": 1404.12, "word": " 12", "probability": 0.7958984375}, {"start": 1404.12, "end": 1404.56, "word": "%", "probability": 0.9697265625}, {"start": 1404.56, "end": 1404.88, "word": " gives", "probability": 0.896484375}], "temperature": 1.0}, {"id": 53, "seek": 142008, "start": 1405.82, "end": 1420.08, "text": " 14.4. So if it's zero is true, I mean if the difference between the two population proportions are the same, then we expect 14.4 left handed females.", "tokens": [3499, 13, 19, 13, 407, 498, 309, 311, 4018, 307, 2074, 11, 286, 914, 498, 264, 2649, 1296, 264, 732, 4415, 32482, 366, 264, 912, 11, 550, 321, 2066, 3499, 13, 19, 1411, 16013, 21529, 13], "avg_logprob": -0.23902027188120661, "compression_ratio": 1.2605042016806722, "no_speech_prob": 0.0, "words": [{"start": 1405.82, "end": 1406.32, "word": " 14", "probability": 0.273193359375}, {"start": 1406.32, "end": 1406.94, "word": ".4.", "probability": 0.983642578125}, {"start": 1407.64, "end": 1407.86, "word": " So", "probability": 0.86572265625}, {"start": 1407.86, "end": 1408.3, "word": " if", "probability": 0.7412109375}, {"start": 1408.3, "end": 1408.64, "word": " it's", "probability": 0.712158203125}, {"start": 1408.64, "end": 1408.82, "word": " zero", "probability": 0.4814453125}, {"start": 1408.82, "end": 1408.96, "word": " is", "probability": 0.382080078125}, {"start": 1408.96, "end": 1409.26, "word": " true,", "probability": 0.9248046875}, {"start": 1409.52, "end": 1409.66, "word": " I", "probability": 0.8798828125}, {"start": 1409.66, "end": 1409.92, "word": " mean", "probability": 0.9677734375}, {"start": 1409.92, "end": 1410.58, "word": " if", "probability": 0.5849609375}, {"start": 1410.58, "end": 1411.0, "word": " the", "probability": 0.91845703125}, {"start": 1411.0, "end": 1411.38, "word": " difference", "probability": 0.873046875}, {"start": 1411.38, "end": 1411.68, "word": " between", "probability": 0.86865234375}, {"start": 1411.68, "end": 1411.86, "word": " the", "probability": 0.91357421875}, {"start": 1411.86, "end": 1412.02, "word": " two", "probability": 0.92724609375}, {"start": 1412.02, "end": 1412.48, "word": " population", "probability": 0.892578125}, {"start": 1412.48, "end": 1413.16, "word": " proportions", "probability": 0.78466796875}, {"start": 1413.16, "end": 1413.4, "word": " are", "probability": 0.92236328125}, {"start": 1413.4, "end": 1413.6, "word": " the", "probability": 0.919921875}, {"start": 1413.6, "end": 1413.88, "word": " same,", "probability": 0.91064453125}, {"start": 1414.52, "end": 1414.84, "word": " then", "probability": 0.8486328125}, {"start": 1414.84, "end": 1415.08, "word": " we", "probability": 0.9619140625}, {"start": 1415.08, "end": 1415.86, "word": " expect", "probability": 0.921875}, {"start": 1415.86, "end": 1417.46, "word": " 14", "probability": 0.8505859375}, {"start": 1417.46, "end": 1418.18, "word": ".4", "probability": 0.999267578125}, {"start": 1418.18, "end": 1419.08, "word": " left", "probability": 0.93115234375}, {"start": 1419.08, "end": 1419.6, "word": " handed", "probability": 0.312744140625}, {"start": 1419.6, "end": 1420.08, "word": " females.", "probability": 0.91650390625}], "temperature": 1.0}, {"id": 54, "seek": 145075, "start": 1422.21, "end": 1450.75, "text": " Because overall proportion is 12% for left-handed. And in this case, there are 120 females. So we are expecting N times this proportion. So N times V dash will give the expected frequency for left-handed females. Now what do you think the expected frequency for left-handed males?", "tokens": [1436, 4787, 16068, 307, 2272, 4, 337, 1411, 12, 25407, 13, 400, 294, 341, 1389, 11, 456, 366, 10411, 21529, 13, 407, 321, 366, 9650, 426, 1413, 341, 16068, 13, 407, 426, 1413, 691, 8240, 486, 976, 264, 5176, 7893, 337, 1411, 12, 25407, 21529, 13, 823, 437, 360, 291, 519, 264, 5176, 7893, 337, 1411, 12, 25407, 20776, 30], "avg_logprob": -0.20530225800686194, "compression_ratio": 1.6927710843373494, "no_speech_prob": 0.0, "words": [{"start": 1422.21, "end": 1422.69, "word": " Because", "probability": 0.59912109375}, {"start": 1422.69, "end": 1423.47, "word": " overall", "probability": 0.6669921875}, {"start": 1423.47, "end": 1424.11, "word": " proportion", "probability": 0.81494140625}, {"start": 1424.11, "end": 1425.13, "word": " is", "probability": 0.91748046875}, {"start": 1425.13, "end": 1425.43, "word": " 12", "probability": 0.85546875}, {"start": 1425.43, "end": 1425.79, "word": "%", "probability": 0.8623046875}, {"start": 1425.79, "end": 1426.05, "word": " for", "probability": 0.94140625}, {"start": 1426.05, "end": 1426.35, "word": " left", "probability": 0.9013671875}, {"start": 1426.35, "end": 1426.71, "word": "-handed.", "probability": 0.699951171875}, {"start": 1427.45, "end": 1427.95, "word": " And", "probability": 0.85888671875}, {"start": 1427.95, "end": 1428.21, "word": " in", "probability": 0.89306640625}, {"start": 1428.21, "end": 1428.45, "word": " this", "probability": 0.94580078125}, {"start": 1428.45, "end": 1428.73, "word": " case,", "probability": 0.9140625}, {"start": 1428.83, "end": 1428.93, "word": " there", "probability": 0.7890625}, {"start": 1428.93, "end": 1429.17, "word": " are", "probability": 0.93505859375}, {"start": 1429.17, "end": 1429.61, "word": " 120", "probability": 0.80029296875}, {"start": 1429.61, "end": 1430.15, "word": " females.", "probability": 0.96240234375}, {"start": 1430.73, "end": 1430.91, "word": " So", "probability": 0.9453125}, {"start": 1430.91, "end": 1431.07, "word": " we", "probability": 0.638671875}, {"start": 1431.07, "end": 1431.23, "word": " are", "probability": 0.91748046875}, {"start": 1431.23, "end": 1431.85, "word": " expecting", "probability": 0.87744140625}, {"start": 1431.85, "end": 1433.15, "word": " N", "probability": 0.402587890625}, {"start": 1433.15, "end": 1434.29, "word": " times", "probability": 0.80517578125}, {"start": 1434.29, "end": 1434.61, "word": " this", "probability": 0.93115234375}, {"start": 1434.61, "end": 1435.19, "word": " proportion.", "probability": 0.86181640625}, {"start": 1435.79, "end": 1435.89, "word": " So", "probability": 0.9404296875}, {"start": 1435.89, "end": 1436.13, "word": " N", "probability": 0.791015625}, {"start": 1436.13, "end": 1436.45, "word": " times", "probability": 0.8515625}, {"start": 1436.45, "end": 1436.73, "word": " V", "probability": 0.73583984375}, {"start": 1436.73, "end": 1437.77, "word": " dash", "probability": 0.47412109375}, {"start": 1437.77, "end": 1438.33, "word": " will", "probability": 0.77783203125}, {"start": 1438.33, "end": 1438.63, "word": " give", "probability": 0.87353515625}, {"start": 1438.63, "end": 1438.99, "word": " the", "probability": 0.9033203125}, {"start": 1438.99, "end": 1439.61, "word": " expected", "probability": 0.90625}, {"start": 1439.61, "end": 1440.35, "word": " frequency", "probability": 0.95703125}, {"start": 1440.35, "end": 1440.81, "word": " for", "probability": 0.95166015625}, {"start": 1440.81, "end": 1441.65, "word": " left", "probability": 0.9375}, {"start": 1441.65, "end": 1442.05, "word": "-handed", "probability": 0.905517578125}, {"start": 1442.05, "end": 1442.71, "word": " females.", "probability": 0.943359375}, {"start": 1443.87, "end": 1444.13, "word": " Now", "probability": 0.947265625}, {"start": 1444.13, "end": 1444.37, "word": " what", "probability": 0.51416015625}, {"start": 1444.37, "end": 1444.49, "word": " do", "probability": 0.91259765625}, {"start": 1444.49, "end": 1444.57, "word": " you", "probability": 0.77880859375}, {"start": 1444.57, "end": 1444.91, "word": " think", "probability": 0.91162109375}, {"start": 1444.91, "end": 1447.09, "word": " the", "probability": 0.56103515625}, {"start": 1447.09, "end": 1448.37, "word": " expected", "probability": 0.91357421875}, {"start": 1448.37, "end": 1449.11, "word": " frequency", "probability": 0.95361328125}, {"start": 1449.11, "end": 1449.71, "word": " for", "probability": 0.9404296875}, {"start": 1449.71, "end": 1450.03, "word": " left", "probability": 0.93798828125}, {"start": 1450.03, "end": 1450.37, "word": "-handed", "probability": 0.909912109375}, {"start": 1450.37, "end": 1450.75, "word": " males?", "probability": 0.94970703125}], "temperature": 1.0}, {"id": 55, "seek": 148238, "start": 1453.44, "end": 1482.38, "text": " Again, there are 180 males multiplied by 12, and that will give 21.6. Or if you look at the total for 14.6, 21.6 is 36. So the expected frequency for males left-handed is just 36 minus", "tokens": [3764, 11, 456, 366, 11971, 20776, 17207, 538, 2272, 11, 293, 300, 486, 976, 5080, 13, 21, 13, 1610, 498, 291, 574, 412, 264, 3217, 337, 3499, 13, 21, 11, 5080, 13, 21, 307, 8652, 13, 407, 264, 5176, 7893, 337, 20776, 1411, 12, 25407, 307, 445, 8652, 3175], "avg_logprob": -0.22109374314546584, "compression_ratio": 1.2758620689655173, "no_speech_prob": 0.0, "words": [{"start": 1453.44, "end": 1453.9, "word": " Again,", "probability": 0.64990234375}, {"start": 1454.4, "end": 1454.56, "word": " there", "probability": 0.83837890625}, {"start": 1454.56, "end": 1454.88, "word": " are", "probability": 0.9296875}, {"start": 1454.88, "end": 1455.5, "word": " 180", "probability": 0.80322265625}, {"start": 1455.5, "end": 1457.58, "word": " males", "probability": 0.78076171875}, {"start": 1457.58, "end": 1458.96, "word": " multiplied", "probability": 0.59423828125}, {"start": 1458.96, "end": 1459.3, "word": " by", "probability": 0.97509765625}, {"start": 1459.3, "end": 1459.74, "word": " 12,", "probability": 0.91650390625}, {"start": 1460.18, "end": 1460.62, "word": " and", "probability": 0.85791015625}, {"start": 1460.62, "end": 1460.82, "word": " that", "probability": 0.923828125}, {"start": 1460.82, "end": 1461.02, "word": " will", "probability": 0.83203125}, {"start": 1461.02, "end": 1461.3, "word": " give", "probability": 0.8662109375}, {"start": 1461.3, "end": 1461.74, "word": " 21", "probability": 0.8740234375}, {"start": 1461.74, "end": 1462.62, "word": ".6.", "probability": 0.993408203125}, {"start": 1464.86, "end": 1465.74, "word": " Or", "probability": 0.92578125}, {"start": 1465.74, "end": 1466.52, "word": " if", "probability": 0.50146484375}, {"start": 1466.52, "end": 1466.6, "word": " you", "probability": 0.94921875}, {"start": 1466.6, "end": 1466.76, "word": " look", "probability": 0.96484375}, {"start": 1466.76, "end": 1466.88, "word": " at", "probability": 0.96630859375}, {"start": 1466.88, "end": 1467.04, "word": " the", "probability": 0.728515625}, {"start": 1467.04, "end": 1467.72, "word": " total", "probability": 0.76708984375}, {"start": 1467.72, "end": 1468.6, "word": " for", "probability": 0.7431640625}, {"start": 1468.6, "end": 1468.94, "word": " 14", "probability": 0.306884765625}, {"start": 1468.94, "end": 1469.38, "word": ".6,", "probability": 0.996826171875}, {"start": 1469.46, "end": 1469.7, "word": " 21", "probability": 0.91357421875}, {"start": 1469.7, "end": 1470.28, "word": ".6", "probability": 0.99853515625}, {"start": 1470.28, "end": 1471.3, "word": " is", "probability": 0.515625}, {"start": 1471.3, "end": 1471.82, "word": " 36.", "probability": 0.9384765625}, {"start": 1472.48, "end": 1473.36, "word": " So", "probability": 0.955078125}, {"start": 1473.36, "end": 1473.64, "word": " the", "probability": 0.67578125}, {"start": 1473.64, "end": 1474.04, "word": " expected", "probability": 0.93359375}, {"start": 1474.04, "end": 1474.7, "word": " frequency", "probability": 0.95849609375}, {"start": 1474.7, "end": 1475.68, "word": " for", "probability": 0.947265625}, {"start": 1475.68, "end": 1476.5, "word": " males", "probability": 0.91650390625}, {"start": 1476.5, "end": 1476.9, "word": " left", "probability": 0.91015625}, {"start": 1476.9, "end": 1477.38, "word": "-handed", "probability": 0.749267578125}, {"start": 1477.38, "end": 1478.14, "word": " is", "probability": 0.91748046875}, {"start": 1478.14, "end": 1478.6, "word": " just", "probability": 0.90966796875}, {"start": 1478.6, "end": 1479.56, "word": " 36", "probability": 0.97607421875}, {"start": 1479.56, "end": 1482.38, "word": " minus", "probability": 0.96142578125}], "temperature": 1.0}, {"id": 56, "seek": 150586, "start": 1483.48, "end": 1505.86, "text": " 14.4 which is 21.6. So to get the expected frequency for left-handed females, multiply the overall proportion by the total number of females. Again in this case there are 120 females, so 120 multiplied by 0.12 will give 14.4.", "tokens": [3499, 13, 19, 597, 307, 5080, 13, 21, 13, 407, 281, 483, 264, 5176, 7893, 337, 1411, 12, 25407, 21529, 11, 12972, 264, 4787, 16068, 538, 264, 3217, 1230, 295, 21529, 13, 3764, 294, 341, 1389, 456, 366, 10411, 21529, 11, 370, 10411, 17207, 538, 1958, 13, 4762, 486, 976, 3499, 13, 19, 13], "avg_logprob": -0.1741477213122628, "compression_ratio": 1.3865030674846626, "no_speech_prob": 0.0, "words": [{"start": 1483.48, "end": 1483.94, "word": " 14", "probability": 0.43212890625}, {"start": 1483.94, "end": 1484.38, "word": ".4", "probability": 0.970703125}, {"start": 1484.38, "end": 1484.6, "word": " which", "probability": 0.671875}, {"start": 1484.6, "end": 1484.76, "word": " is", "probability": 0.86962890625}, {"start": 1484.76, "end": 1485.56, "word": " 21", "probability": 0.359130859375}, {"start": 1485.56, "end": 1486.52, "word": ".6.", "probability": 0.993408203125}, {"start": 1487.4, "end": 1487.84, "word": " So", "probability": 0.900390625}, {"start": 1487.84, "end": 1488.0, "word": " to", "probability": 0.7568359375}, {"start": 1488.0, "end": 1488.26, "word": " get", "probability": 0.94677734375}, {"start": 1488.26, "end": 1488.66, "word": " the", "probability": 0.88427734375}, {"start": 1488.66, "end": 1489.12, "word": " expected", "probability": 0.88916015625}, {"start": 1489.12, "end": 1490.02, "word": " frequency", "probability": 0.9365234375}, {"start": 1490.02, "end": 1490.64, "word": " for", "probability": 0.94140625}, {"start": 1490.64, "end": 1491.72, "word": " left", "probability": 0.921875}, {"start": 1491.72, "end": 1492.08, "word": "-handed", "probability": 0.7783203125}, {"start": 1492.08, "end": 1492.72, "word": " females,", "probability": 0.94873046875}, {"start": 1493.22, "end": 1493.9, "word": " multiply", "probability": 0.916015625}, {"start": 1493.9, "end": 1494.64, "word": " the", "probability": 0.91796875}, {"start": 1494.64, "end": 1495.74, "word": " overall", "probability": 0.83056640625}, {"start": 1495.74, "end": 1496.48, "word": " proportion", "probability": 0.8251953125}, {"start": 1496.48, "end": 1497.36, "word": " by", "probability": 0.96142578125}, {"start": 1497.36, "end": 1497.54, "word": " the", "probability": 0.92138671875}, {"start": 1497.54, "end": 1497.78, "word": " total", "probability": 0.85400390625}, {"start": 1497.78, "end": 1498.08, "word": " number", "probability": 0.94091796875}, {"start": 1498.08, "end": 1498.24, "word": " of", "probability": 0.9716796875}, {"start": 1498.24, "end": 1498.56, "word": " females.", "probability": 0.93701171875}, {"start": 1499.42, "end": 1499.72, "word": " Again", "probability": 0.93701171875}, {"start": 1499.72, "end": 1499.84, "word": " in", "probability": 0.5068359375}, {"start": 1499.84, "end": 1500.02, "word": " this", "probability": 0.94580078125}, {"start": 1500.02, "end": 1500.22, "word": " case", "probability": 0.9052734375}, {"start": 1500.22, "end": 1500.38, "word": " there", "probability": 0.6396484375}, {"start": 1500.38, "end": 1500.52, "word": " are", "probability": 0.93701171875}, {"start": 1500.52, "end": 1500.88, "word": " 120", "probability": 0.9111328125}, {"start": 1500.88, "end": 1501.5, "word": " females,", "probability": 0.951171875}, {"start": 1502.34, "end": 1502.44, "word": " so", "probability": 0.91455078125}, {"start": 1502.44, "end": 1502.82, "word": " 120", "probability": 0.896484375}, {"start": 1502.82, "end": 1503.34, "word": " multiplied", "probability": 0.65625}, {"start": 1503.34, "end": 1503.68, "word": " by", "probability": 0.97509765625}, {"start": 1503.68, "end": 1504.0, "word": " 0", "probability": 0.80712890625}, {"start": 1504.0, "end": 1504.32, "word": ".12", "probability": 0.990478515625}, {"start": 1504.32, "end": 1504.68, "word": " will", "probability": 0.8505859375}, {"start": 1504.68, "end": 1504.92, "word": " give", "probability": 0.8828125}, {"start": 1504.92, "end": 1505.36, "word": " 14", "probability": 0.9423828125}, {"start": 1505.36, "end": 1505.86, "word": ".4.", "probability": 0.997802734375}], "temperature": 1.0}, {"id": 57, "seek": 153424, "start": 1507.74, "end": 1534.24, "text": " For females, there are 180 females, so multiply 180 by 12% will give 21.6, or just find a complement. Because since there are 36 left-handed, or left-handers, and 14.4 are females, so the complement, which is 21.6, should be for males.", "tokens": [1171, 21529, 11, 456, 366, 11971, 21529, 11, 370, 12972, 11971, 538, 2272, 4, 486, 976, 5080, 13, 21, 11, 420, 445, 915, 257, 17103, 13, 1436, 1670, 456, 366, 8652, 1411, 12, 25407, 11, 420, 1411, 12, 5543, 433, 11, 293, 3499, 13, 19, 366, 21529, 11, 370, 264, 17103, 11, 597, 307, 5080, 13, 21, 11, 820, 312, 337, 20776, 13], "avg_logprob": -0.16406250488944352, "compression_ratio": 1.5324675324675325, "no_speech_prob": 0.0, "words": [{"start": 1507.74, "end": 1508.54, "word": " For", "probability": 0.7880859375}, {"start": 1508.54, "end": 1509.34, "word": " females,", "probability": 0.91650390625}, {"start": 1509.56, "end": 1509.7, "word": " there", "probability": 0.89697265625}, {"start": 1509.7, "end": 1509.9, "word": " are", "probability": 0.92822265625}, {"start": 1509.9, "end": 1510.36, "word": " 180", "probability": 0.8359375}, {"start": 1510.36, "end": 1510.84, "word": " females,", "probability": 0.904296875}, {"start": 1511.02, "end": 1511.14, "word": " so", "probability": 0.89453125}, {"start": 1511.14, "end": 1511.54, "word": " multiply", "probability": 0.78759765625}, {"start": 1511.54, "end": 1512.02, "word": " 180", "probability": 0.92041015625}, {"start": 1512.02, "end": 1512.3, "word": " by", "probability": 0.953125}, {"start": 1512.3, "end": 1512.7, "word": " 12", "probability": 0.9521484375}, {"start": 1512.7, "end": 1513.2, "word": "%", "probability": 0.42138671875}, {"start": 1513.2, "end": 1513.86, "word": " will", "probability": 0.76953125}, {"start": 1513.86, "end": 1514.14, "word": " give", "probability": 0.88671875}, {"start": 1514.14, "end": 1514.66, "word": " 21", "probability": 0.96826171875}, {"start": 1514.66, "end": 1515.38, "word": ".6,", "probability": 0.99462890625}, {"start": 1515.82, "end": 1516.34, "word": " or", "probability": 0.95166015625}, {"start": 1516.34, "end": 1516.72, "word": " just", "probability": 0.91650390625}, {"start": 1516.72, "end": 1517.02, "word": " find", "probability": 0.89599609375}, {"start": 1517.02, "end": 1517.2, "word": " a", "probability": 0.87158203125}, {"start": 1517.2, "end": 1517.56, "word": " complement.", "probability": 0.8349609375}, {"start": 1518.66, "end": 1519.12, "word": " Because", "probability": 0.89892578125}, {"start": 1519.12, "end": 1519.44, "word": " since", "probability": 0.7626953125}, {"start": 1519.44, "end": 1519.68, "word": " there", "probability": 0.916015625}, {"start": 1519.68, "end": 1519.98, "word": " are", "probability": 0.9482421875}, {"start": 1519.98, "end": 1520.9, "word": " 36", "probability": 0.97509765625}, {"start": 1520.9, "end": 1521.88, "word": " left", "probability": 0.9453125}, {"start": 1521.88, "end": 1522.3, "word": "-handed,", "probability": 0.78662109375}, {"start": 1522.84, "end": 1523.04, "word": " or", "probability": 0.9501953125}, {"start": 1523.04, "end": 1523.28, "word": " left", "probability": 0.947265625}, {"start": 1523.28, "end": 1523.76, "word": "-handers,", "probability": 0.89794921875}, {"start": 1524.62, "end": 1525.02, "word": " and", "probability": 0.94091796875}, {"start": 1525.02, "end": 1526.0, "word": " 14", "probability": 0.96923828125}, {"start": 1526.0, "end": 1526.74, "word": ".4", "probability": 0.99853515625}, {"start": 1526.74, "end": 1527.98, "word": " are", "probability": 0.87158203125}, {"start": 1527.98, "end": 1528.52, "word": " females,", "probability": 0.93603515625}, {"start": 1529.26, "end": 1529.8, "word": " so", "probability": 0.9345703125}, {"start": 1529.8, "end": 1530.24, "word": " the", "probability": 0.8603515625}, {"start": 1530.24, "end": 1530.56, "word": " complement,", "probability": 0.91796875}, {"start": 1530.82, "end": 1531.0, "word": " which", "probability": 0.94287109375}, {"start": 1531.0, "end": 1531.24, "word": " is", "probability": 0.94287109375}, {"start": 1531.24, "end": 1531.58, "word": " 21", "probability": 0.97412109375}, {"start": 1531.58, "end": 1532.22, "word": ".6,", "probability": 0.99951171875}, {"start": 1532.54, "end": 1532.84, "word": " should", "probability": 0.96630859375}, {"start": 1532.84, "end": 1533.04, "word": " be", "probability": 0.94873046875}, {"start": 1533.04, "end": 1533.34, "word": " for", "probability": 0.908203125}, {"start": 1533.34, "end": 1534.24, "word": " males.", "probability": 0.8310546875}], "temperature": 1.0}, {"id": 58, "seek": 156519, "start": 1536.89, "end": 1565.19, "text": " So here, we have to compute the expected frequency for each cell. I just computed the expected frequency for left-handers. 14.4 for females and 21.6 for males. Now what's about right-handers? Now for right-handers, since 12% overall are left-handers,", "tokens": [407, 510, 11, 321, 362, 281, 14722, 264, 5176, 7893, 337, 1184, 2815, 13, 286, 445, 40610, 264, 5176, 7893, 337, 1411, 12, 5543, 433, 13, 3499, 13, 19, 337, 21529, 293, 5080, 13, 21, 337, 20776, 13, 823, 437, 311, 466, 558, 12, 5543, 433, 30, 823, 337, 558, 12, 5543, 433, 11, 1670, 2272, 4, 4787, 366, 1411, 12, 5543, 433, 11], "avg_logprob": -0.159375, "compression_ratio": 1.608974358974359, "no_speech_prob": 0.0, "words": [{"start": 1536.89, "end": 1537.19, "word": " So", "probability": 0.7109375}, {"start": 1537.19, "end": 1537.51, "word": " here,", "probability": 0.7158203125}, {"start": 1538.23, "end": 1538.75, "word": " we", "probability": 0.92333984375}, {"start": 1538.75, "end": 1539.01, "word": " have", "probability": 0.93896484375}, {"start": 1539.01, "end": 1539.23, "word": " to", "probability": 0.96923828125}, {"start": 1539.23, "end": 1539.69, "word": " compute", "probability": 0.91845703125}, {"start": 1539.69, "end": 1540.05, "word": " the", "probability": 0.86279296875}, {"start": 1540.05, "end": 1540.57, "word": " expected", "probability": 0.90234375}, {"start": 1540.57, "end": 1541.15, "word": " frequency", "probability": 0.95166015625}, {"start": 1541.15, "end": 1541.41, "word": " for", "probability": 0.92333984375}, {"start": 1541.41, "end": 1541.67, "word": " each", "probability": 0.94677734375}, {"start": 1541.67, "end": 1541.93, "word": " cell.", "probability": 0.93359375}, {"start": 1542.61, "end": 1542.81, "word": " I", "probability": 0.97607421875}, {"start": 1542.81, "end": 1543.25, "word": " just", "probability": 0.88232421875}, {"start": 1543.25, "end": 1544.01, "word": " computed", "probability": 0.94140625}, {"start": 1544.01, "end": 1544.95, "word": " the", "probability": 0.904296875}, {"start": 1544.95, "end": 1547.85, "word": " expected", "probability": 0.8740234375}, {"start": 1547.85, "end": 1548.43, "word": " frequency", "probability": 0.95703125}, {"start": 1548.43, "end": 1548.75, "word": " for", "probability": 0.9326171875}, {"start": 1548.75, "end": 1548.95, "word": " left", "probability": 0.921875}, {"start": 1548.95, "end": 1549.45, "word": "-handers.", "probability": 0.8009440104166666}, {"start": 1549.77, "end": 1550.47, "word": " 14", "probability": 0.54931640625}, {"start": 1550.47, "end": 1551.05, "word": ".4", "probability": 0.87939453125}, {"start": 1551.05, "end": 1551.25, "word": " for", "probability": 0.8837890625}, {"start": 1551.25, "end": 1551.95, "word": " females", "probability": 0.931640625}, {"start": 1551.95, "end": 1552.53, "word": " and", "probability": 0.7412109375}, {"start": 1552.53, "end": 1552.89, "word": " 21", "probability": 0.96875}, {"start": 1552.89, "end": 1553.51, "word": ".6", "probability": 0.973388671875}, {"start": 1553.51, "end": 1553.79, "word": " for", "probability": 0.94873046875}, {"start": 1553.79, "end": 1554.15, "word": " males.", "probability": 0.94775390625}, {"start": 1554.93, "end": 1555.11, "word": " Now", "probability": 0.9287109375}, {"start": 1555.11, "end": 1555.43, "word": " what's", "probability": 0.687744140625}, {"start": 1555.43, "end": 1555.87, "word": " about", "probability": 0.91259765625}, {"start": 1555.87, "end": 1557.29, "word": " right", "probability": 0.89697265625}, {"start": 1557.29, "end": 1557.87, "word": "-handers?", "probability": 0.9152018229166666}, {"start": 1558.87, "end": 1559.13, "word": " Now", "probability": 0.8994140625}, {"start": 1559.13, "end": 1559.35, "word": " for", "probability": 0.63623046875}, {"start": 1559.35, "end": 1559.63, "word": " right", "probability": 0.9287109375}, {"start": 1559.63, "end": 1560.15, "word": "-handers,", "probability": 0.92578125}, {"start": 1560.91, "end": 1561.33, "word": " since", "probability": 0.86669921875}, {"start": 1561.33, "end": 1562.17, "word": " 12", "probability": 0.83837890625}, {"start": 1562.17, "end": 1562.79, "word": "%", "probability": 0.77978515625}, {"start": 1562.79, "end": 1563.71, "word": " overall", "probability": 0.87109375}, {"start": 1563.71, "end": 1564.27, "word": " are", "probability": 0.92919921875}, {"start": 1564.27, "end": 1564.69, "word": " left", "probability": 0.94287109375}, {"start": 1564.69, "end": 1565.19, "word": "-handers,", "probability": 0.9261067708333334}], "temperature": 1.0}, {"id": 59, "seek": 158665, "start": 1565.51, "end": 1586.65, "text": " So 88% are right-handers, so multiply 88% by 120, that will give this expected frequency. So if you multiply 88 by 120, that will give 10516.", "tokens": [407, 24587, 4, 366, 558, 12, 5543, 433, 11, 370, 12972, 24587, 4, 538, 10411, 11, 300, 486, 976, 341, 5176, 7893, 13, 407, 498, 291, 12972, 24587, 538, 10411, 11, 300, 486, 976, 33705, 6866, 13], "avg_logprob": -0.20189144854482852, "compression_ratio": 1.3027522935779816, "no_speech_prob": 0.0, "words": [{"start": 1565.51, "end": 1565.79, "word": " So", "probability": 0.70751953125}, {"start": 1565.79, "end": 1566.33, "word": " 88", "probability": 0.74365234375}, {"start": 1566.33, "end": 1566.75, "word": "%", "probability": 0.8310546875}, {"start": 1566.75, "end": 1567.43, "word": " are", "probability": 0.90673828125}, {"start": 1567.43, "end": 1569.51, "word": " right", "probability": 0.8955078125}, {"start": 1569.51, "end": 1570.01, "word": "-handers,", "probability": 0.774169921875}, {"start": 1570.51, "end": 1570.69, "word": " so", "probability": 0.88232421875}, {"start": 1570.69, "end": 1571.35, "word": " multiply", "probability": 0.8427734375}, {"start": 1571.35, "end": 1572.41, "word": " 88", "probability": 0.9697265625}, {"start": 1572.41, "end": 1573.63, "word": "%", "probability": 0.47314453125}, {"start": 1573.63, "end": 1573.87, "word": " by", "probability": 0.97265625}, {"start": 1573.87, "end": 1574.25, "word": " 120,", "probability": 0.86767578125}, {"start": 1574.63, "end": 1575.69, "word": " that", "probability": 0.88671875}, {"start": 1575.69, "end": 1575.89, "word": " will", "probability": 0.8828125}, {"start": 1575.89, "end": 1576.13, "word": " give", "probability": 0.85107421875}, {"start": 1576.13, "end": 1576.45, "word": " this", "probability": 0.86474609375}, {"start": 1576.45, "end": 1578.23, "word": " expected", "probability": 0.5859375}, {"start": 1578.23, "end": 1578.85, "word": " frequency.", "probability": 0.95458984375}, {"start": 1579.37, "end": 1579.61, "word": " So", "probability": 0.95166015625}, {"start": 1579.61, "end": 1579.81, "word": " if", "probability": 0.8798828125}, {"start": 1579.81, "end": 1579.95, "word": " you", "probability": 0.890625}, {"start": 1579.95, "end": 1580.43, "word": " multiply", "probability": 0.91455078125}, {"start": 1580.43, "end": 1581.03, "word": " 88", "probability": 0.98193359375}, {"start": 1581.03, "end": 1582.85, "word": " by", "probability": 0.95068359375}, {"start": 1582.85, "end": 1583.43, "word": " 120,", "probability": 0.8642578125}, {"start": 1584.09, "end": 1584.49, "word": " that", "probability": 0.9404296875}, {"start": 1584.49, "end": 1584.69, "word": " will", "probability": 0.888671875}, {"start": 1584.69, "end": 1585.01, "word": " give", "probability": 0.8720703125}, {"start": 1585.01, "end": 1586.65, "word": " 10516.", "probability": 0.9169921875}], "temperature": 1.0}, {"id": 60, "seek": 161906, "start": 1591.34, "end": 1619.06, "text": " Now there are 14.4 expected frequency for left handers, females. Now total number of females are 120. Now 14.4 out of 120 females are left handers, so remaining is right handers. So 120 minus this value, so this equals 120 minus 14.4 which gives 1.5.", "tokens": [823, 456, 366, 3499, 13, 19, 5176, 7893, 337, 1411, 1011, 433, 11, 21529, 13, 823, 3217, 1230, 295, 21529, 366, 10411, 13, 823, 3499, 13, 19, 484, 295, 10411, 21529, 366, 1411, 1011, 433, 11, 370, 8877, 307, 558, 1011, 433, 13, 407, 10411, 3175, 341, 2158, 11, 370, 341, 6915, 10411, 3175, 3499, 13, 19, 597, 2709, 502, 13, 20, 13], "avg_logprob": -0.1590576118323952, "compression_ratio": 1.608974358974359, "no_speech_prob": 0.0, "words": [{"start": 1591.34, "end": 1591.7, "word": " Now", "probability": 0.74169921875}, {"start": 1591.7, "end": 1592.16, "word": " there", "probability": 0.5732421875}, {"start": 1592.16, "end": 1592.52, "word": " are", "probability": 0.90087890625}, {"start": 1592.52, "end": 1593.08, "word": " 14", "probability": 0.86767578125}, {"start": 1593.08, "end": 1593.66, "word": ".4", "probability": 0.981201171875}, {"start": 1593.66, "end": 1594.18, "word": " expected", "probability": 0.8359375}, {"start": 1594.18, "end": 1594.8, "word": " frequency", "probability": 0.73095703125}, {"start": 1594.8, "end": 1595.06, "word": " for", "probability": 0.92138671875}, {"start": 1595.06, "end": 1595.26, "word": " left", "probability": 0.89892578125}, {"start": 1595.26, "end": 1595.82, "word": " handers,", "probability": 0.744384765625}, {"start": 1596.76, "end": 1597.2, "word": " females.", "probability": 0.9619140625}, {"start": 1597.78, "end": 1598.58, "word": " Now", "probability": 0.89501953125}, {"start": 1598.58, "end": 1598.94, "word": " total", "probability": 0.66455078125}, {"start": 1598.94, "end": 1599.32, "word": " number", "probability": 0.93115234375}, {"start": 1599.32, "end": 1599.52, "word": " of", "probability": 0.96875}, {"start": 1599.52, "end": 1599.9, "word": " females", "probability": 0.9208984375}, {"start": 1599.9, "end": 1600.42, "word": " are", "probability": 0.85498046875}, {"start": 1600.42, "end": 1600.9, "word": " 120.", "probability": 0.9111328125}, {"start": 1602.92, "end": 1603.3, "word": " Now", "probability": 0.92822265625}, {"start": 1603.3, "end": 1604.12, "word": " 14", "probability": 0.71923828125}, {"start": 1604.12, "end": 1604.68, "word": ".4", "probability": 0.996337890625}, {"start": 1604.68, "end": 1605.0, "word": " out", "probability": 0.884765625}, {"start": 1605.0, "end": 1605.18, "word": " of", "probability": 0.97265625}, {"start": 1605.18, "end": 1605.56, "word": " 120", "probability": 0.89794921875}, {"start": 1605.56, "end": 1606.22, "word": " females", "probability": 0.912109375}, {"start": 1606.22, "end": 1606.54, "word": " are", "probability": 0.94140625}, {"start": 1606.54, "end": 1606.78, "word": " left", "probability": 0.94140625}, {"start": 1606.78, "end": 1607.3, "word": " handers,", "probability": 0.930908203125}, {"start": 1607.8, "end": 1608.04, "word": " so", "probability": 0.9296875}, {"start": 1608.04, "end": 1608.44, "word": " remaining", "probability": 0.90283203125}, {"start": 1608.44, "end": 1610.16, "word": " is", "probability": 0.89111328125}, {"start": 1610.16, "end": 1610.42, "word": " right", "probability": 0.9150390625}, {"start": 1610.42, "end": 1610.88, "word": " handers.", "probability": 0.9736328125}, {"start": 1611.16, "end": 1611.36, "word": " So", "probability": 0.93408203125}, {"start": 1611.36, "end": 1611.76, "word": " 120", "probability": 0.84521484375}, {"start": 1611.76, "end": 1612.42, "word": " minus", "probability": 0.96435546875}, {"start": 1612.42, "end": 1612.82, "word": " this", "probability": 0.9462890625}, {"start": 1612.82, "end": 1613.22, "word": " value,", "probability": 0.974609375}, {"start": 1613.8, "end": 1613.92, "word": " so", "probability": 0.833984375}, {"start": 1613.92, "end": 1614.12, "word": " this", "probability": 0.93505859375}, {"start": 1614.12, "end": 1614.58, "word": " equals", "probability": 0.91650390625}, {"start": 1614.58, "end": 1615.18, "word": " 120", "probability": 0.79296875}, {"start": 1615.18, "end": 1615.9, "word": " minus", "probability": 0.96923828125}, {"start": 1615.9, "end": 1617.0, "word": " 14", "probability": 0.97705078125}, {"start": 1617.0, "end": 1617.68, "word": ".4", "probability": 0.9970703125}, {"start": 1617.68, "end": 1617.98, "word": " which", "probability": 0.68017578125}, {"start": 1617.98, "end": 1618.34, "word": " gives", "probability": 0.88818359375}, {"start": 1618.34, "end": 1618.62, "word": " 1", "probability": 0.7783203125}, {"start": 1618.62, "end": 1619.06, "word": ".5.", "probability": 0.73828125}], "temperature": 1.0}, {"id": 61, "seek": 164655, "start": 1620.15, "end": 1646.55, "text": " So you don't need actually to compute the expected frequency for the other cells. Since this one is known or is computed by using sample size for females times the overall proportion. The other expected frequency is just the total number of females minus this expected frequency. Now what's about the other one? 158.4.", "tokens": [407, 291, 500, 380, 643, 767, 281, 14722, 264, 5176, 7893, 337, 264, 661, 5438, 13, 4162, 341, 472, 307, 2570, 420, 307, 40610, 538, 1228, 6889, 2744, 337, 21529, 1413, 264, 4787, 16068, 13, 440, 661, 5176, 7893, 307, 445, 264, 3217, 1230, 295, 21529, 3175, 341, 5176, 7893, 13, 823, 437, 311, 466, 264, 661, 472, 30, 2119, 23, 13, 19, 13], "avg_logprob": -0.18713942307692308, "compression_ratio": 1.6111111111111112, "no_speech_prob": 0.0, "words": [{"start": 1620.15, "end": 1620.45, "word": " So", "probability": 0.7216796875}, {"start": 1620.45, "end": 1620.59, "word": " you", "probability": 0.70654296875}, {"start": 1620.59, "end": 1620.79, "word": " don't", "probability": 0.940185546875}, {"start": 1620.79, "end": 1621.01, "word": " need", "probability": 0.873046875}, {"start": 1621.01, "end": 1621.51, "word": " actually", "probability": 0.72265625}, {"start": 1621.51, "end": 1621.79, "word": " to", "probability": 0.935546875}, {"start": 1621.79, "end": 1622.19, "word": " compute", "probability": 0.8974609375}, {"start": 1622.19, "end": 1622.51, "word": " the", "probability": 0.87158203125}, {"start": 1622.51, "end": 1622.93, "word": " expected", "probability": 0.91162109375}, {"start": 1622.93, "end": 1623.39, "word": " frequency", "probability": 0.9384765625}, {"start": 1623.39, "end": 1623.65, "word": " for", "probability": 0.87646484375}, {"start": 1623.65, "end": 1623.81, "word": " the", "probability": 0.86669921875}, {"start": 1623.81, "end": 1624.07, "word": " other", "probability": 0.88330078125}, {"start": 1624.07, "end": 1624.47, "word": " cells.", "probability": 0.412841796875}, {"start": 1625.07, "end": 1625.53, "word": " Since", "probability": 0.7998046875}, {"start": 1625.53, "end": 1626.49, "word": " this", "probability": 0.865234375}, {"start": 1626.49, "end": 1626.69, "word": " one", "probability": 0.9248046875}, {"start": 1626.69, "end": 1626.85, "word": " is", "probability": 0.94970703125}, {"start": 1626.85, "end": 1627.17, "word": " known", "probability": 0.79052734375}, {"start": 1627.17, "end": 1628.11, "word": " or", "probability": 0.5205078125}, {"start": 1628.11, "end": 1628.27, "word": " is", "probability": 0.7392578125}, {"start": 1628.27, "end": 1628.67, "word": " computed", "probability": 0.9345703125}, {"start": 1628.67, "end": 1629.61, "word": " by", "probability": 0.935546875}, {"start": 1629.61, "end": 1630.09, "word": " using", "probability": 0.9404296875}, {"start": 1630.09, "end": 1631.89, "word": " sample", "probability": 0.69287109375}, {"start": 1631.89, "end": 1632.35, "word": " size", "probability": 0.83251953125}, {"start": 1632.35, "end": 1632.69, "word": " for", "probability": 0.93212890625}, {"start": 1632.69, "end": 1633.27, "word": " females", "probability": 0.71533203125}, {"start": 1633.27, "end": 1633.71, "word": " times", "probability": 0.708984375}, {"start": 1633.71, "end": 1634.49, "word": " the", "probability": 0.8173828125}, {"start": 1634.49, "end": 1634.95, "word": " overall", "probability": 0.87744140625}, {"start": 1634.95, "end": 1635.55, "word": " proportion.", "probability": 0.80322265625}, {"start": 1636.29, "end": 1636.47, "word": " The", "probability": 0.85595703125}, {"start": 1636.47, "end": 1636.83, "word": " other", "probability": 0.8798828125}, {"start": 1636.83, "end": 1637.83, "word": " expected", "probability": 0.91259765625}, {"start": 1637.83, "end": 1638.29, "word": " frequency", "probability": 0.9111328125}, {"start": 1638.29, "end": 1638.49, "word": " is", "probability": 0.89111328125}, {"start": 1638.49, "end": 1638.83, "word": " just", "probability": 0.92431640625}, {"start": 1638.83, "end": 1639.95, "word": " the", "probability": 0.8759765625}, {"start": 1639.95, "end": 1640.25, "word": " total", "probability": 0.85009765625}, {"start": 1640.25, "end": 1640.59, "word": " number", "probability": 0.9345703125}, {"start": 1640.59, "end": 1640.75, "word": " of", "probability": 0.9697265625}, {"start": 1640.75, "end": 1641.15, "word": " females", "probability": 0.93798828125}, {"start": 1641.15, "end": 1641.59, "word": " minus", "probability": 0.97314453125}, {"start": 1641.59, "end": 1641.97, "word": " this", "probability": 0.931640625}, {"start": 1641.97, "end": 1642.33, "word": " expected", "probability": 0.89599609375}, {"start": 1642.33, "end": 1642.83, "word": " frequency.", "probability": 0.9619140625}, {"start": 1643.71, "end": 1643.91, "word": " Now", "probability": 0.94140625}, {"start": 1643.91, "end": 1644.09, "word": " what's", "probability": 0.717041015625}, {"start": 1644.09, "end": 1644.29, "word": " about", "probability": 0.9150390625}, {"start": 1644.29, "end": 1644.43, "word": " the", "probability": 0.916015625}, {"start": 1644.43, "end": 1644.67, "word": " other", "probability": 0.8935546875}, {"start": 1644.67, "end": 1644.95, "word": " one?", "probability": 0.91943359375}, {"start": 1645.23, "end": 1645.97, "word": " 158", "probability": 0.92138671875}, {"start": 1645.97, "end": 1646.55, "word": ".4.", "probability": 0.9892578125}], "temperature": 1.0}, {"id": 62, "seek": 167981, "start": 1651.67, "end": 1679.81, "text": " Multiplied by 120 will give 158 or the complement, which is 180 minus 21.6 will give the same answer. Because since there are 180 meals and 21.6 from them are left-handers, so the remaining should be right-handers, which is 158.5.", "tokens": [29238, 564, 1091, 538, 10411, 486, 976, 2119, 23, 420, 264, 17103, 11, 597, 307, 11971, 3175, 5080, 13, 21, 486, 976, 264, 912, 1867, 13, 1436, 1670, 456, 366, 11971, 12832, 293, 5080, 13, 21, 490, 552, 366, 1411, 12, 5543, 433, 11, 370, 264, 8877, 820, 312, 558, 12, 5543, 433, 11, 597, 307, 2119, 23, 13, 20, 13], "avg_logprob": -0.19531249807726953, "compression_ratio": 1.4620253164556962, "no_speech_prob": 0.0, "words": [{"start": 1651.67, "end": 1652.43, "word": " Multiplied", "probability": 0.7379557291666666}, {"start": 1652.43, "end": 1652.85, "word": " by", "probability": 0.9736328125}, {"start": 1652.85, "end": 1654.09, "word": " 120", "probability": 0.875}, {"start": 1654.09, "end": 1654.65, "word": " will", "probability": 0.7333984375}, {"start": 1654.65, "end": 1654.93, "word": " give", "probability": 0.81689453125}, {"start": 1654.93, "end": 1655.85, "word": " 158", "probability": 0.94970703125}, {"start": 1655.85, "end": 1657.03, "word": " or", "probability": 0.54541015625}, {"start": 1657.03, "end": 1659.69, "word": " the", "probability": 0.52490234375}, {"start": 1659.69, "end": 1660.09, "word": " complement,", "probability": 0.62451171875}, {"start": 1660.49, "end": 1660.61, "word": " which", "probability": 0.94580078125}, {"start": 1660.61, "end": 1660.95, "word": " is", "probability": 0.94482421875}, {"start": 1660.95, "end": 1661.69, "word": " 180", "probability": 0.91064453125}, {"start": 1661.69, "end": 1662.25, "word": " minus", "probability": 0.93798828125}, {"start": 1662.25, "end": 1662.73, "word": " 21", "probability": 0.744140625}, {"start": 1662.73, "end": 1663.49, "word": ".6", "probability": 0.98681640625}, {"start": 1663.49, "end": 1663.95, "word": " will", "probability": 0.7109375}, {"start": 1663.95, "end": 1664.11, "word": " give", "probability": 0.873046875}, {"start": 1664.11, "end": 1664.27, "word": " the", "probability": 0.90576171875}, {"start": 1664.27, "end": 1664.45, "word": " same", "probability": 0.89697265625}, {"start": 1664.45, "end": 1664.81, "word": " answer.", "probability": 0.94482421875}, {"start": 1665.49, "end": 1666.11, "word": " Because", "probability": 0.89990234375}, {"start": 1666.11, "end": 1667.01, "word": " since", "probability": 0.7724609375}, {"start": 1667.01, "end": 1667.17, "word": " there", "probability": 0.845703125}, {"start": 1667.17, "end": 1667.33, "word": " are", "probability": 0.94775390625}, {"start": 1667.33, "end": 1667.83, "word": " 180", "probability": 0.89111328125}, {"start": 1667.83, "end": 1668.27, "word": " meals", "probability": 0.67529296875}, {"start": 1668.27, "end": 1668.91, "word": " and", "probability": 0.82861328125}, {"start": 1668.91, "end": 1670.33, "word": " 21", "probability": 0.88427734375}, {"start": 1670.33, "end": 1671.21, "word": ".6", "probability": 0.9970703125}, {"start": 1671.21, "end": 1671.93, "word": " from", "probability": 0.8369140625}, {"start": 1671.93, "end": 1673.11, "word": " them", "probability": 0.8994140625}, {"start": 1673.11, "end": 1673.39, "word": " are", "probability": 0.9140625}, {"start": 1673.39, "end": 1673.67, "word": " left", "probability": 0.935546875}, {"start": 1673.67, "end": 1674.27, "word": "-handers,", "probability": 0.8072916666666666}, {"start": 1674.73, "end": 1674.85, "word": " so", "probability": 0.8701171875}, {"start": 1674.85, "end": 1675.01, "word": " the", "probability": 0.908203125}, {"start": 1675.01, "end": 1675.41, "word": " remaining", "probability": 0.88232421875}, {"start": 1675.41, "end": 1676.29, "word": " should", "probability": 0.96875}, {"start": 1676.29, "end": 1676.55, "word": " be", "probability": 0.94921875}, {"start": 1676.55, "end": 1677.49, "word": " right", "probability": 0.91455078125}, {"start": 1677.49, "end": 1677.97, "word": "-handers,", "probability": 0.9122721354166666}, {"start": 1678.03, "end": 1678.23, "word": " which", "probability": 0.9482421875}, {"start": 1678.23, "end": 1678.59, "word": " is", "probability": 0.939453125}, {"start": 1678.59, "end": 1679.35, "word": " 158", "probability": 0.958984375}, {"start": 1679.35, "end": 1679.81, "word": ".5.", "probability": 0.6549072265625}], "temperature": 1.0}, {"id": 63, "seek": 168724, "start": 1680.56, "end": 1687.24, "text": " So we just use this rule for only one cell for this reason.", "tokens": [407, 321, 445, 764, 341, 4978, 337, 787, 472, 2815, 337, 341, 1778, 13], "avg_logprob": -0.20104166467984516, "compression_ratio": 1.0169491525423728, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 1680.56, "end": 1681.0, "word": " So", "probability": 0.7080078125}, {"start": 1681.0, "end": 1681.3, "word": " we", "probability": 0.7509765625}, {"start": 1681.3, "end": 1681.68, "word": " just", "probability": 0.87353515625}, {"start": 1681.68, "end": 1682.76, "word": " use", "probability": 0.80029296875}, {"start": 1682.76, "end": 1683.06, "word": " this", "probability": 0.9482421875}, {"start": 1683.06, "end": 1683.46, "word": " rule", "probability": 0.931640625}, {"start": 1683.46, "end": 1684.7, "word": " for", "probability": 0.9140625}, {"start": 1684.7, "end": 1685.1, "word": " only", "probability": 0.9111328125}, {"start": 1685.1, "end": 1685.38, "word": " one", "probability": 0.90771484375}, {"start": 1685.38, "end": 1685.68, "word": " cell", "probability": 0.890625}, {"start": 1685.68, "end": 1686.72, "word": " for", "probability": 0.392822265625}, {"start": 1686.72, "end": 1686.98, "word": " this", "probability": 0.94580078125}, {"start": 1686.98, "end": 1687.24, "word": " reason.", "probability": 0.97314453125}], "temperature": 1.0}], "language": "en", "language_probability": 1.0, "duration": 1696.58925, "duration_after_vad": 1601.8247187499965} \ No newline at end of file diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/_tPr3lwdN_Q_raw.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/_tPr3lwdN_Q_raw.srt new file mode 100644 index 0000000000000000000000000000000000000000..08baa28311b3c0b1376cecbc17528b8871ec1b0c --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/_tPr3lwdN_Q_raw.srt @@ -0,0 +1,1380 @@ +1 +00:00:14,040 --> 00:00:18,360 +Let's start chapter 11. Here we'll talk about some + +2 +00:00:18,360 --> 00:00:19,960 +of Cai's characters. + +3 +00:00:22,920 --> 00:00:27,180 +If you remember the first time we met, we + +4 +00:00:27,180 --> 00:00:30,470 +discussed that there are two types of data. One + +5 +00:00:30,470 --> 00:00:36,270 +was numerical data and other categorical data. In + +6 +00:00:36,270 --> 00:00:42,130 +chapter eight, we discussed testing and estimation + +7 +00:00:42,130 --> 00:00:46,650 +about one, either population mean, if the data is + +8 +00:00:46,650 --> 00:00:52,810 +numeric, or population proportion if the data is + +9 +00:00:52,810 --> 00:00:56,910 +not numeric, I mean categorical data. In chapter + +10 +00:00:56,910 --> 00:01:00,590 +nine and ten, I'm sorry, in chapter 10 we extend + +11 +00:01:00,590 --> 00:01:05,830 +for two population means and two population + +12 +00:01:05,830 --> 00:01:09,990 +proportions. Now this chapter talks about testing + +13 +00:01:09,990 --> 00:01:13,810 +about the difference between two population means + +14 +00:01:13,810 --> 00:01:17,790 +and two population proportions or comparing more + +15 +00:01:17,790 --> 00:01:20,650 +than two population proportions. So we are going + +16 +00:01:20,650 --> 00:01:23,690 +to test to see if there exists a difference + +17 +00:01:23,690 --> 00:01:29,240 +between two population proportions or comparison + +18 +00:01:29,240 --> 00:01:33,160 +among more than two population proportions. And + +19 +00:01:33,160 --> 00:01:37,240 +the second objective also will be test the + +20 +00:01:37,240 --> 00:01:41,180 +independent of two categorical variables. For + +21 +00:01:41,180 --> 00:01:43,820 +example, suppose we are interested in the + +22 +00:01:43,820 --> 00:01:47,970 +relationship between gender and education. And our + +23 +00:01:47,970 --> 00:01:51,050 +goal is to see if there exists a significant + +24 +00:01:51,050 --> 00:01:55,210 +relationship between education and gender, for + +25 +00:01:55,210 --> 00:01:59,190 +example, or if we are interested in health status + +26 +00:01:59,190 --> 00:02:03,450 +and smoking. Smoking, either the person is heavy + +27 +00:02:03,450 --> 00:02:08,270 +smoker or not heavy smoker. And health status + +28 +00:02:08,270 --> 00:02:10,370 +could be classified as bad, + +29 +00:02:13,250 --> 00:02:20,540 +medium, or good. So we are going to test to see if + +30 +00:02:20,540 --> 00:02:22,960 +there exists a difference or relationship between + +31 +00:02:22,960 --> 00:02:26,300 +two variables. So here we are interested in + +32 +00:02:26,300 --> 00:02:30,900 +qualitative data, either we have two or more than + +33 +00:02:30,900 --> 00:02:35,560 +two variables. So the objectives in this chapter + +34 +00:02:35,560 --> 00:02:39,860 +are when to use the chi-square test for + +35 +00:02:39,860 --> 00:02:44,130 +contingency tables. And the other, how to use the + +36 +00:02:44,130 --> 00:02:47,490 +chi-square test for contingency plots. So here we + +37 +00:02:47,490 --> 00:02:52,910 +are going to see what are the conditions. And what + +38 +00:02:52,910 --> 00:02:56,550 +are the conditions we can see, we can use the chi + +39 +00:02:56,550 --> 00:03:00,850 +-square test. So the condition is, we should have + +40 +00:03:00,850 --> 00:03:05,170 +qualitative data. So in this case, we'll talk + +41 +00:03:05,170 --> 00:03:05,890 +about just + +42 +00:03:09,330 --> 00:03:14,950 +gender, health status, education, income levels, + +43 +00:03:15,190 --> 00:03:18,450 +and so on. So we are not talking about numerical + +44 +00:03:18,450 --> 00:03:22,950 +data. So we are focusing on non-numerical data. I + +45 +00:03:22,950 --> 00:03:28,250 +mean on categorical data. Let's see the definition + +46 +00:03:28,250 --> 00:03:31,050 +of contingency table. For example, as I mentioned, + +47 +00:03:31,250 --> 00:03:37,090 +suppose we are interested in gender and education. + +48 +00:03:38,500 --> 00:03:42,660 +And for example, suppose gender is classified, + +49 +00:03:43,440 --> 00:03:50,180 +normal is classified as female and male. Education + +50 +00:03:50,180 --> 00:03:57,920 +could be classified as either secondary or + +51 +00:03:57,920 --> 00:04:07,100 +less, bachelor, master, PhD. So here we are + +52 +00:04:07,100 --> 00:04:11,920 +talking about a relationship between gender and + +53 +00:04:11,920 --> 00:04:18,200 +education. Now in this case, we have this table. + +54 +00:04:23,360 --> 00:04:28,360 +For example, one is gender. And gender is either + +55 +00:04:28,360 --> 00:04:31,360 +female or male. And the other is education. + +56 +00:04:34,080 --> 00:04:37,140 +And education in this case is classified into four + +57 +00:04:37,140 --> 00:04:40,140 +categories, secondary or less, + +58 +00:04:43,360 --> 00:04:49,820 +BA, master and PhD. This table is called + +59 +00:04:49,820 --> 00:04:57,700 +contingency table. It's two by four table. Two + +60 +00:04:57,700 --> 00:04:58,840 +because there are two rows. + +61 +00:05:07,220 --> 00:05:09,800 +In this case, there are two rules and two codes. + +62 +00:05:10,540 --> 00:05:13,640 +For example, gender, + +63 +00:05:16,260 --> 00:05:20,720 +male or female, and the other one, suppose, + +64 +00:05:21,100 --> 00:05:21,700 +smoking. + +65 +00:05:24,120 --> 00:05:29,490 +Either yes or no. Smoking or not smoking. So in + +66 +00:05:29,490 --> 00:05:32,330 +this case there are two rows and two columns. And + +67 +00:05:32,330 --> 00:05:35,910 +the goal here is we want to test to see if there + +68 +00:05:35,910 --> 00:05:38,750 +exists significant relationship between gender and + +69 +00:05:38,750 --> 00:05:42,090 +smoking. So the two variables of interest in this + +70 +00:05:42,090 --> 00:05:46,550 +case are categorical variables. So how can we test + +71 +00:05:46,550 --> 00:05:50,790 +to see if there exists a significant difference + +72 +00:05:50,790 --> 00:05:55,550 +between the two proportions or to see if smoking + +73 +00:05:55,550 --> 00:06:04,410 +and Gender are independent. So our goal is to see + +74 +00:06:04,410 --> 00:06:07,190 +if they are independent or not. It means if we + +75 +00:06:07,190 --> 00:06:10,990 +reject the null hypothesis of independence, it + +76 +00:06:10,990 --> 00:06:17,160 +means they are related. So again, a contingency + +77 +00:06:17,160 --> 00:06:19,920 +table, tables in this case useful in situations + +78 +00:06:19,920 --> 00:06:23,920 +involving multiple population proportions, so we + +79 +00:06:23,920 --> 00:06:26,820 +have more than two, or even two population + +80 +00:06:26,820 --> 00:06:30,640 +proportions, used to classify sample observations + +81 +00:06:30,640 --> 00:06:34,580 +according to two or more characteristics. In this + +82 +00:06:34,580 --> 00:06:37,760 +case, there are only two characteristics of + +83 +00:06:37,760 --> 00:06:40,140 +interest. One is education, the other is gender. + +84 +00:06:40,740 --> 00:06:44,360 +It could be we have another characteristic. The + +85 +00:06:44,360 --> 00:06:49,520 +other thing is these tables are called cross + +86 +00:06:49,520 --> 00:06:52,800 +-classification tables. Cross because we have + +87 +00:06:52,800 --> 00:06:58,180 +variable A versus variable B. For this reason it's + +88 +00:06:58,180 --> 00:07:04,440 +called cross-classification tables. There is + +89 +00:07:04,440 --> 00:07:07,360 +another example here. Here we are interested in + +90 +00:07:07,360 --> 00:07:14,700 +left-handed versus gender. So, dominant hand left + +91 +00:07:14,700 --> 00:07:22,320 +versus right, so the person either use left or + +92 +00:07:22,320 --> 00:07:27,180 +right hand, gender male or female. So in this case + +93 +00:07:27,180 --> 00:07:30,440 +there are two categories for each variable, so + +94 +00:07:30,440 --> 00:07:35,070 +this type of example is called Two by two table, + +95 +00:07:35,530 --> 00:07:39,310 +because there are two rows, two classifications + +96 +00:07:39,310 --> 00:07:44,410 +here. So hand either left or right, the person + +97 +00:07:44,410 --> 00:07:47,130 +either male or female. So we have two + +98 +00:07:47,130 --> 00:07:49,430 +characteristics for each one, so it's two by two + +99 +00:07:49,430 --> 00:07:53,270 +table. Suppose in this case we are examining a + +100 +00:07:53,270 --> 00:07:58,050 +sample of 300 children. So the sample size is 300, + +101 +00:07:58,510 --> 00:08:04,000 +and we have this result. So gender is classified + +102 +00:08:04,000 --> 00:08:09,200 +as males and females. Hand preference, either left + +103 +00:08:09,200 --> 00:08:15,780 +or right. So in this case, there are 120 females. + +104 +00:08:16,580 --> 00:08:22,840 +Twelve of them are using left hand. So it means + +105 +00:08:22,840 --> 00:08:27,100 +that there are twelve left handers for females. + +106 +00:08:29,310 --> 00:08:34,330 +while for males there are 180 females and 20 of + +107 +00:08:34,330 --> 00:08:40,310 +them left-handers and so again 120 females 12 were + +108 +00:08:40,310 --> 00:08:47,210 +left-handed 180 males and 20 were also left-handed + +109 +00:08:47,210 --> 00:08:51,750 +and now the question is we are going to test see + +110 +00:08:51,750 --> 00:08:56,470 +if the difference between the two proportions are + +111 +00:08:56,470 --> 00:09:01,110 +equal I mean Under zero, we are going to test to + +112 +00:09:01,110 --> 00:09:03,850 +see if pi 1 equals to pi 2. It means the + +113 +00:09:03,850 --> 00:09:09,970 +proportion of females who are left-handed is equal + +114 +00:09:09,970 --> 00:09:14,210 +to the proportion of males who are left-handed. So + +115 +00:09:14,210 --> 00:09:17,710 +it looks similar to the one we did in chapter 10 + +116 +00:09:17,710 --> 00:09:21,870 +when we are talking about testing for the + +117 +00:09:21,870 --> 00:09:23,830 +difference between two population proportions. + +118 +00:09:24,170 --> 00:09:26,890 +It's similar, but here we will use a different + +119 +00:09:26,890 --> 00:09:29,850 +statistic. It's called chi-square test. So we are + +120 +00:09:29,850 --> 00:09:32,850 +going to test if there is no significant + +121 +00:09:32,850 --> 00:09:35,230 +difference between the population proportions for + +122 +00:09:35,230 --> 00:09:39,770 +males and females left-handed against there exists + +123 +00:09:39,770 --> 00:09:43,580 +a difference. In this case, always we have two + +124 +00:09:43,580 --> 00:09:47,560 +-sided test for chi-square. Chi-square never be + +125 +00:09:47,560 --> 00:09:51,080 +negative, chi-square is always positive. So here + +126 +00:09:51,080 --> 00:09:54,820 +we are talking about two-sided test. It means the + +127 +00:09:54,820 --> 00:09:59,740 +two proportions are not the same. Hand preference + +128 +00:09:59,740 --> 00:10:03,120 +is not independent of gender. In other words, we + +129 +00:10:03,120 --> 00:10:06,840 +can say that hand preference is not independent of + +130 +00:10:06,840 --> 00:10:10,200 +gender. So here we can say that hand preference is + +131 +00:10:10,740 --> 00:10:16,280 +It is independent of gender. So it means under H0, + +132 +00:10:16,780 --> 00:10:25,520 +we assume hand preference and gender are + +133 +00:10:25,520 --> 00:10:30,520 +independent or + +134 +00:10:33,540 --> 00:10:37,620 +A proportion of females who are left-handed is + +135 +00:10:37,620 --> 00:10:40,860 +equal to the proportion of males who are left + +136 +00:10:40,860 --> 00:10:43,740 +-handed. It means they are independent. I mean, + +137 +00:10:43,840 --> 00:10:47,520 +hand preference and gender are independent against + +138 +00:10:47,520 --> 00:10:52,420 +either. You may write that the two proportions are + +139 +00:10:52,420 --> 00:10:58,000 +not the same or the two variables are dependent. + +140 +00:10:59,120 --> 00:11:01,780 +So you can say that hand preference + +141 +00:11:05,620 --> 00:11:12,360 +and gender are either you may say that are not + +142 +00:11:12,360 --> 00:11:25,180 +independent or related or dependent so + +143 +00:11:25,180 --> 00:11:30,440 +again not independent means either they are + +144 +00:11:30,440 --> 00:11:39,110 +related or dependent Now, if H0 is true, if we + +145 +00:11:39,110 --> 00:11:42,810 +assume H0 is true, it means the proportion of left + +146 +00:11:42,810 --> 00:11:47,850 +-handed females should be the same as the + +147 +00:11:47,850 --> 00:11:53,430 +proportion of left-handed males. It says that the + +148 +00:11:53,430 --> 00:11:59,110 +proportion is the same as, not equal to. Because + +149 +00:11:59,110 --> 00:12:04,020 +if we reject the null hypothesis, Then we have + +150 +00:12:04,020 --> 00:12:05,980 +sufficient evidence to support the alternative. + +151 +00:12:06,920 --> 00:12:10,060 +But if we don't reject the null, it doesn't imply + +152 +00:12:10,060 --> 00:12:13,740 +that H0 is true. It means there is insufficient + +153 +00:12:13,740 --> 00:12:17,420 +evidence to support the alternative hypothesis. So + +154 +00:12:17,420 --> 00:12:20,500 +it's better to say that the two proportions are + +155 +00:12:20,500 --> 00:12:24,540 +the same. Same does not mean equal. Same means + +156 +00:12:25,400 --> 00:12:29,640 +there exists a small difference, I mean not + +157 +00:12:29,640 --> 00:12:31,300 +significant difference between the two + +158 +00:12:31,300 --> 00:12:34,640 +proportions. So you have to be careful between, + +159 +00:12:35,080 --> 00:12:39,240 +distinguish actually between same and equal. So + +160 +00:12:39,240 --> 00:12:42,700 +same, it doesn't mean exactly they are equal, but + +161 +00:12:42,700 --> 00:12:45,580 +they are roughly equal, or approximately, or they + +162 +00:12:45,580 --> 00:12:49,320 +actually, they are close to each other. Against, + +163 +00:12:49,940 --> 00:12:52,640 +here again, against the two population proportions + +164 +00:12:52,640 --> 00:12:57,800 +are not the same. So let's see how can we examine + +165 +00:12:57,800 --> 00:13:01,420 +this null hypothesis by using a new statistic. + +166 +00:13:01,980 --> 00:13:06,700 +This statistic is called Chi-square. Chi-square is + +167 +00:13:06,700 --> 00:13:14,260 +denoted by this Greek letter, Chi-square. It's a + +168 +00:13:14,260 --> 00:13:19,840 +Greek letter. It's pronounced as Chi, C-H-I, Chi + +169 +00:13:19,840 --> 00:13:23,280 +-square. It looks like X. + +170 +00:13:30,500 --> 00:13:33,160 +And chi-square is given by, chi-square statistic + +171 +00:13:33,160 --> 00:13:39,180 +is given by this equation. So chi-square is the + +172 +00:13:39,180 --> 00:13:49,500 +sum of F for 0 minus F expected wanted square + +173 +00:13:49,500 --> 00:13:55,000 +divided by Fe. + +174 +00:13:56,150 --> 00:13:59,490 +Now, let's see the definition for each term here. + +175 +00:13:59,870 --> 00:14:04,450 +Fo, it means the observed frequency in a + +176 +00:14:04,450 --> 00:14:09,430 +particular cell in the table you have. Fe is the + +177 +00:14:09,430 --> 00:14:13,190 +expected frequency in a particular cell if it's 0 + +178 +00:14:13,190 --> 00:14:15,690 +is true. So if you go back a little bit to the + +179 +00:14:15,690 --> 00:14:23,790 +previous table, these values 12, 24, 108, 156 are + +180 +00:14:23,790 --> 00:14:28,880 +the observed frequency. So these values represent + +181 +00:14:28,880 --> 00:14:37,420 +Fo. So Fo is the observed frequency. + +182 +00:14:38,200 --> 00:14:40,040 +The frequency is from the sample. + +183 +00:14:45,080 --> 00:14:48,340 +Again, we are testing proportion 1 equals + +184 +00:14:48,340 --> 00:14:52,940 +proportion 2. Now for Fe. + +185 +00:14:54,780 --> 00:14:58,500 +Fe is the expected frequency in a particular cell + +186 +00:14:58,500 --> 00:15:02,800 +if each cell is true. If we are assuming the two + +187 +00:15:02,800 --> 00:15:05,980 +population proportions are the same, what do you + +188 +00:15:05,980 --> 00:15:10,540 +expect the frequency for each cell? So we are + +189 +00:15:10,540 --> 00:15:18,100 +going to compute the observed, I'm sorry, the + +190 +00:15:18,100 --> 00:15:23,840 +expected frequency for each cell. in this table so + +191 +00:15:23,840 --> 00:15:27,280 +let's see how can we do that by using the same + +192 +00:15:27,280 --> 00:15:35,300 +rule we had before now chi-square statistic for + +193 +00:15:35,300 --> 00:15:38,360 +the two by two case I mean if there are two rows + +194 +00:15:38,360 --> 00:15:41,300 +and two columns has only one degree of freedom + +195 +00:15:41,300 --> 00:15:46,440 +later on we'll see if we have more than two rows + +196 +00:15:46,440 --> 00:15:50,530 +and more than two columns we look for different + +197 +00:15:50,530 --> 00:15:54,230 +value for degrees of freedom. So for two by two + +198 +00:15:54,230 --> 00:15:59,670 +tables, there is only one degree of freedom. Now + +199 +00:15:59,670 --> 00:16:03,010 +the assumption here for using chi-square, each + +200 +00:16:03,010 --> 00:16:05,550 +cell in the contingency table has expected + +201 +00:16:05,550 --> 00:16:08,870 +frequency of at least five. So these expected + +202 +00:16:08,870 --> 00:16:13,350 +frequencies should be at least five for each cell. + +203 +00:16:13,830 --> 00:16:17,740 +So that's the condition for using So we have to + +204 +00:16:17,740 --> 00:16:21,080 +test + +205 +00:16:21,080 --> 00:16:24,600 +if the expected request for each cell is at least + +206 +00:16:24,600 --> 00:16:29,360 +5. So the condition is straightforward. Now, my + +207 +00:16:29,360 --> 00:16:35,540 +decision rule is, the chi-square is always one + +208 +00:16:35,540 --> 00:16:40,240 +-tailed. I mean, it's positive always. So the, + +209 +00:16:40,420 --> 00:16:42,020 +always chi-square. + +210 +00:16:44,720 --> 00:16:48,820 +is greater than or equal to zero. So we reject the + +211 +00:16:48,820 --> 00:16:52,000 +null hypothesis if the value of the chi-square + +212 +00:16:52,000 --> 00:16:57,040 +statistic lies in the rejection region and only + +213 +00:16:57,040 --> 00:17:00,840 +there is only one side. So there is only one + +214 +00:17:00,840 --> 00:17:03,780 +rejection region. So we reject the null hypothesis + +215 +00:17:04,750 --> 00:17:08,270 +If the value of chi-square falls in this rejection + +216 +00:17:08,270 --> 00:17:12,170 +region. I mean, if chi-square statistic is greater + +217 +00:17:12,170 --> 00:17:14,950 +than chi-square alpha, then we reject the null + +218 +00:17:14,950 --> 00:17:22,850 +hypothesis. Again, here we are testing H0 + +219 +00:17:22,850 --> 00:17:28,690 +by 1 equals by 2 against two-sided test. Even + +220 +00:17:28,690 --> 00:17:33,820 +there is only one side. But chi-square is designed + +221 +00:17:33,820 --> 00:17:37,780 +for testing pi 1 equals pi 2 against pi 1 does not + +222 +00:17:37,780 --> 00:17:40,520 +equal pi 2. In this case, you cannot know the + +223 +00:17:40,520 --> 00:17:45,280 +direction of this difference. I mean, you cannot + +224 +00:17:45,280 --> 00:17:48,940 +say pi 1 is greater than or pi 1 is smaller than. + +225 +00:17:49,440 --> 00:17:54,100 +Because chi-square is always positive. If you + +226 +00:17:54,100 --> 00:17:57,220 +remember from this statistic, when we are testing + +227 +00:17:57,220 --> 00:18:01,180 +pi 1 equals pi 2, Z could be positive or negative. + +228 +00:18:02,100 --> 00:18:05,640 +So, based on that, we can decide if pi 1 is + +229 +00:18:05,640 --> 00:18:09,160 +greater than or smaller than pi 2. But here, since + +230 +00:18:09,160 --> 00:18:12,240 +chi-square is always positive, then you cannot + +231 +00:18:12,240 --> 00:18:15,320 +determine the direction of the relationship. You + +232 +00:18:15,320 --> 00:18:17,620 +just say that there exists a significant + +233 +00:18:17,620 --> 00:18:21,940 +relationship between such and such. So by using + +234 +00:18:21,940 --> 00:18:25,320 +chi-square, you are doing just a test to see if + +235 +00:18:25,320 --> 00:18:29,840 +there is a relationship between x and y, or if + +236 +00:18:29,840 --> 00:18:32,460 +this relationship is not significant. But you + +237 +00:18:32,460 --> 00:18:36,160 +cannot determine either the strength, I mean you + +238 +00:18:36,160 --> 00:18:39,560 +cannot say there exists strong relationship, or + +239 +00:18:39,560 --> 00:18:42,980 +the direction, you cannot say there exists inverse + +240 +00:18:42,980 --> 00:18:46,220 +or direct positive or negative relationship, you + +241 +00:18:46,220 --> 00:18:50,660 +just say there exists a relationship between x and + +242 +00:18:50,660 --> 00:18:56,540 +y. So one more time, my decision rule is, if the + +243 +00:18:56,540 --> 00:19:01,000 +value of the chi-square greater than chi-square + +244 +00:19:01,000 --> 00:19:04,580 +alpha, then we reject the null hypothesis. So + +245 +00:19:04,580 --> 00:19:08,980 +there is also another way to reject by using b + +246 +00:19:08,980 --> 00:19:09,600 +-value approach. + +247 +00:19:12,650 --> 00:19:16,150 +B value in this case, complete deprivation of chi + +248 +00:19:16,150 --> 00:19:19,630 +-square greater than chi-square statistic, and + +249 +00:19:19,630 --> 00:19:24,870 +always we reject H0 if this B value is smaller + +250 +00:19:24,870 --> 00:19:27,610 +than alpha. So as we mentioned again before, + +251 +00:19:28,130 --> 00:19:35,770 +always we reject H0 if B value is smaller than + +252 +00:19:35,770 --> 00:19:36,010 +alpha. + +253 +00:19:39,290 --> 00:19:43,530 +So again, my decision rule is, we reject the null + +254 +00:19:43,530 --> 00:19:47,370 +hypothesis if the value of the statistic lies in + +255 +00:19:47,370 --> 00:19:50,610 +the rejection region. And again, there is only one + +256 +00:19:50,610 --> 00:19:54,350 +rejection region in this case, because chi-square + +257 +00:19:54,350 --> 00:19:58,770 +is always positive. If you look at this formula, F + +258 +00:19:58,770 --> 00:20:02,250 +observed minus F expected squared, so it's + +259 +00:20:02,250 --> 00:20:06,730 +positive. F is also positive, so chi-square is + +260 +00:20:06,730 --> 00:20:11,850 +always positive. Now let's see how can we compute + +261 +00:20:11,850 --> 00:20:18,450 +the value of the chi-square statistic. If we go + +262 +00:20:18,450 --> 00:20:24,090 +back a little bit to the data we have, in this + +263 +00:20:24,090 --> 00:20:31,510 +case there are one in twenty females and twelve + +264 +00:20:31,510 --> 00:20:34,870 +out of them are left-handed. + +265 +00:20:39,650 --> 00:20:50,630 +Left, right, 12, 108, 24, 156. The totals are 120, + +266 +00:20:51,290 --> 00:20:52,210 +180, + +267 +00:20:54,510 --> 00:21:00,290 +36, 264, and 300. So that's the table we have now. + +268 +00:21:00,920 --> 00:21:04,600 +Let's see how can we compute the value of the chi + +269 +00:21:04,600 --> 00:21:08,860 +-square statistic. The first step, compute the + +270 +00:21:08,860 --> 00:21:13,100 +average proportion, the same as the one we did in + +271 +00:21:13,100 --> 00:21:16,620 +chapter 10. It's called overall proportion, or + +272 +00:21:16,620 --> 00:21:23,100 +pooled proportion. And B dash, in this case, is + +273 +00:21:23,100 --> 00:21:30,270 +given by x1 plus x2 divided by n1 plus n2. in left + +274 +00:21:30,270 --> 00:21:34,690 +-handed, either males or females. In this sample, + +275 +00:21:34,990 --> 00:21:41,490 +there are 12 females, 12 left-handed females and + +276 +00:21:41,490 --> 00:21:51,850 +24 males. So 12 plus 24 divided by 1 plus 8 is 2. + +277 +00:21:52,010 --> 00:21:58,780 +There are 120 females and 180 females. So overall + +278 +00:21:58,780 --> 00:22:04,480 +proportion 12 plus 24, which is actually this + +279 +00:22:04,480 --> 00:22:10,320 +total 36, divided by overall total or the grand + +280 +00:22:10,320 --> 00:22:14,440 +total, which is 300. So the formula is + +281 +00:22:14,440 --> 00:22:17,400 +straightforward. Just suppose I am interested in + +282 +00:22:17,400 --> 00:22:22,240 +left-handed. So overall proportion for left-handed + +283 +00:22:22,240 --> 00:22:27,720 +equals 36 divided by 300. + +284 +00:22:31,720 --> 00:22:37,100 +That means, of all children, the proportion of + +285 +00:22:37,100 --> 00:22:42,960 +left-handers is 12%. Of all the children, the + +286 +00:22:42,960 --> 00:22:48,640 +proportion of left-handers is 12%. So that's the + +287 +00:22:48,640 --> 00:22:52,600 +proportion for left-handers. + +288 +00:22:57,500 --> 00:23:01,980 +So now to find the expected frequencies for males + +289 +00:23:01,980 --> 00:23:05,620 +and females, we have to multiply the average + +290 +00:23:05,620 --> 00:23:09,200 +proportion, left-handed B dash, by the total + +291 +00:23:09,200 --> 00:23:12,160 +number of females. In this case, there are 120 + +292 +00:23:12,160 --> 00:23:16,560 +females. So the expected frequency in this case is + +293 +00:23:16,560 --> 00:23:22,960 +just 120 multiplied by 12%. So 120 multiplied by + +294 +00:23:22,960 --> 00:23:30,580 +12% gives 14.4. So if it's zero is true, I mean if + +295 +00:23:30,580 --> 00:23:32,480 +the difference between the two population + +296 +00:23:32,480 --> 00:23:39,080 +proportions are the same, then we expect 14.4 left + +297 +00:23:39,080 --> 00:23:45,790 +handed females. Because overall proportion is 12% + +298 +00:23:45,790 --> 00:23:49,610 +for left-handed. And in this case, there are 120 + +299 +00:23:49,610 --> 00:23:54,610 +females. So we are expecting N times this + +300 +00:23:54,610 --> 00:23:58,990 +proportion. So N times V dash will give the + +301 +00:23:58,990 --> 00:24:04,130 +expected frequency for left-handed females. Now + +302 +00:24:04,130 --> 00:24:10,030 +what do you think the expected frequency for left + +303 +00:24:10,030 --> 00:24:10,750 +-handed males? + +304 +00:24:13,440 --> 00:24:20,620 +Again, there are 180 males multiplied by 12, and + +305 +00:24:20,620 --> 00:24:22,620 +that will give 21.6. + +306 +00:24:24,860 --> 00:24:31,820 +Or if you look at the total for 14.6, 21.6 is 36. + +307 +00:24:32,480 --> 00:24:38,140 +So the expected frequency for males left-handed is + +308 +00:24:38,140 --> 00:24:42,380 +just 36 minus + +309 +00:24:43,480 --> 00:24:49,120 +14.4 which is 21.6. So to get the expected + +310 +00:24:49,120 --> 00:24:54,640 +frequency for left-handed females, multiply the + +311 +00:24:54,640 --> 00:24:58,560 +overall proportion by the total number of females. + +312 +00:24:59,420 --> 00:25:02,820 +Again in this case there are 120 females, so 120 + +313 +00:25:02,820 --> 00:25:09,340 +multiplied by 0.12 will give 14.4. For females, + +314 +00:25:09,560 --> 00:25:13,860 +there are 180 females, so multiply 180 by 12% will + +315 +00:25:13,860 --> 00:25:19,120 +give 21.6, or just find a complement. Because + +316 +00:25:19,120 --> 00:25:23,760 +since there are 36 left-handed, or left-handers, + +317 +00:25:24,620 --> 00:25:31,240 +and 14.4 are females, so the complement, which is + +318 +00:25:31,240 --> 00:25:34,240 +21.6, should be for males. + +319 +00:25:36,890 --> 00:25:41,150 +So here, we have to compute the expected frequency + +320 +00:25:41,150 --> 00:25:47,850 +for each cell. I just computed the expected + +321 +00:25:47,850 --> 00:25:52,530 +frequency for left-handers. 14.4 for females and + +322 +00:25:52,530 --> 00:25:57,870 +21.6 for males. Now what's about right-handers? + +323 +00:25:58,870 --> 00:26:04,690 +Now for right-handers, since 12% overall are left + +324 +00:26:04,690 --> 00:26:12,410 +-handers, So 88% are right-handers, so multiply 88 + +325 +00:26:12,410 --> 00:26:18,850 +% by 120, that will give this expected frequency. + +326 +00:26:19,370 --> 00:26:25,010 +So if you multiply 88 by 120, that will give + +327 +00:26:25,010 --> 00:26:26,650 +10516. + +328 +00:26:31,340 --> 00:26:35,260 +Now there are 14.4 expected frequency for left + +329 +00:26:35,260 --> 00:26:40,420 +handers, females. Now total number of females are + +330 +00:26:40,420 --> 00:26:47,300 +120. Now 14.4 out of 120 females are left handers, + +331 +00:26:47,800 --> 00:26:52,820 +so remaining is right handers. So 120 minus this + +332 +00:26:52,820 --> 00:26:58,620 +value, so this equals 120 minus 14.4 which gives 1 + +333 +00:26:58,620 --> 00:27:02,510 +.5. So you don't need actually to compute the + +334 +00:27:02,510 --> 00:27:06,490 +expected frequency for the other cells. Since this + +335 +00:27:06,490 --> 00:27:12,350 +one is known or is computed by using sample size + +336 +00:27:12,350 --> 00:27:16,470 +for females times the overall proportion. The + +337 +00:27:16,470 --> 00:27:20,590 +other expected frequency is just the total number + +338 +00:27:20,590 --> 00:27:23,910 +of females minus this expected frequency. Now + +339 +00:27:23,910 --> 00:27:26,550 +what's about the other one? 158.4. + +340 +00:27:31,670 --> 00:27:40,090 +Multiplied by 120 will give 158 or the complement, + +341 +00:27:40,490 --> 00:27:44,810 +which is 180 minus 21.6 will give the same answer. + +342 +00:27:45,490 --> 00:27:51,930 +Because since there are 180 meals and 21.6 from + +343 +00:27:51,930 --> 00:27:56,550 +them are left-handers, so the remaining should be + +344 +00:27:56,550 --> 00:28:03,060 +right-handers, which is 158.5. So we just use this + +345 +00:28:03,060 --> 00:28:07,240 +rule for only one cell for this reason. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/ge8DRGM5-04.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/ge8DRGM5-04.srt new file mode 100644 index 0000000000000000000000000000000000000000..5d891360364873389429a7018759480fcf2f7183 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/ge8DRGM5-04.srt @@ -0,0 +1,1515 @@ + +1 +00:00:05,430 --> 00:00:08,470 +We are going to do some practice problems for + +2 +00:00:08,470 --> 00:00:16,890 +chapter 6. First problem will be 6.36 on page 226. + +3 +00:00:19,930 --> 00:00:22,510 +So open page 226. + +4 +00:00:26,040 --> 00:00:30,020 +Insha'Allah today we'll discuss some problems from + +5 +00:00:30,020 --> 00:00:35,200 +chapter 6 and one of these problems will be 6.36 + +6 +00:00:35,200 --> 00:00:46,420 +so let's read 36 it it's asked about suppose that + +7 +00:00:46,420 --> 00:00:51,600 +the download time is normally distributed with + +8 +00:00:51,600 --> 00:00:55,380 +mean and the mean is given and the standard + +9 +00:00:55,380 --> 00:00:58,940 +deviation is given in this case the mean equals 0 + +10 +00:00:58,940 --> 00:01:06,260 +.8 second so again the mean is 0.8 where sigma + +11 +00:01:06,260 --> 00:01:12,340 +equals 0.2 seconds so mu is 0.8 seconds that the + +12 +00:01:12,340 --> 00:01:16,560 +time required to download from the internet an + +13 +00:01:16,560 --> 00:01:20,020 +image file or something like that Now he asks + +14 +00:01:20,020 --> 00:01:22,560 +about what's the probability that a download time + +15 +00:01:22,560 --> 00:01:27,900 +is part A less + +16 +00:01:27,900 --> 00:01:31,300 +than one second. So what's the probability of + +17 +00:01:31,300 --> 00:01:39,360 +getting time less than one second? Part B, what's + +18 +00:01:39,360 --> 00:01:44,160 +the probability that a download time is between 0 + +19 +00:01:44,160 --> 00:01:46,520 +.5 and 1.5 seconds? + +20 +00:01:51,810 --> 00:02:02,510 +and 1.5 seconds but see above 0.5 seconds last + +21 +00:02:02,510 --> 00:02:07,230 +time i think we did some something + +22 +00:02:07,230 --> 00:02:10,550 +like that so for the first one b of x smaller than + +23 +00:02:10,550 --> 00:02:13,400 +one The first step, we have to compute the z + +24 +00:02:13,400 --> 00:02:19,080 +-score. And that's straightforward. Just z less + +25 +00:02:19,080 --> 00:02:28,720 +than 1 minus 0.8 divided by sigma. So 1 minus 0.8 + +26 +00:02:28,720 --> 00:02:33,480 +is 0.2. So B of z less than 1. + +27 +00:02:36,620 --> 00:02:41,150 +Now by using the normal table, we have The normal + +28 +00:02:41,150 --> 00:02:44,990 +table, I think we did many times for this table. + +29 +00:02:46,930 --> 00:02:48,110 +Now for 1. + +30 +00:02:52,290 --> 00:03:01,010 +So 1 is 0.8413. So it's 0.8413. This is the + +31 +00:03:01,010 --> 00:03:05,850 +probability that a download time is smaller than + +32 +00:03:05,850 --> 00:03:06,010 +1. + +33 +00:03:14,230 --> 00:03:19,690 +Now the probability is between two values, 0.5 and + +34 +00:03:19,690 --> 00:03:25,570 +1.5. So in this case, we have to find the z values + +35 +00:03:25,570 --> 00:03:32,170 +for the corresponding x, 0.5 and 1.5. So in this + +36 +00:03:32,170 --> 00:03:39,080 +case, this one equals. 0.5 minus 0.8 divided by + +37 +00:03:39,080 --> 00:03:47,420 +sigma smaller than x is transformed to this form. + +38 +00:03:52,920 --> 00:03:57,480 +Smaller than 1.5 minus 0.8 divided by 0.2. + +39 +00:04:03,500 --> 00:04:10,320 +Exactly, minus 1.5. Smaller than z, smaller than 1 + +40 +00:04:10,320 --> 00:04:15,640 +.5 minus 0.8 is 0.7, divided by 0.2 is 3.4. + +41 +00:04:19,320 --> 00:04:23,640 +So, we are looking now for the probability of z + +42 +00:04:23,640 --> 00:04:28,740 +-score between minus 0.5 and smaller than 0.5. + +43 +00:04:29,460 --> 00:04:32,780 +Now, if we are looking for this kind of + +44 +00:04:32,780 --> 00:04:35,760 +probability, we have to find the probability of z + +45 +00:04:35,760 --> 00:04:43,640 +smaller than 3.5 minus z smaller than negative 1 + +46 +00:04:43,640 --> 00:04:51,720 +.5. Now, if we go back to the table we have. Now 3 + +47 +00:04:51,720 --> 00:04:56,640 +.5 all the way down up to the end of this round. + +48 +00:04:58,300 --> 00:05:05,260 +The table I have, the maximum value of Z is 3.4 + +49 +00:05:05,260 --> 00:05:15,900 +all the way up to 9. So that means I have only B + +50 +00:05:15,900 --> 00:05:16,360 +of Z + +51 +00:05:20,930 --> 00:05:26,590 +less than this value. And the corresponding area + +52 +00:05:26,590 --> 00:05:28,870 +is 9998. + +53 +00:05:31,910 --> 00:05:34,430 +But for this specific example, we are looking for + +54 +00:05:34,430 --> 00:05:38,090 +V of Z smaller than 3.5, which is roughly equal to + +55 +00:05:38,090 --> 00:05:42,270 +3.49. So the answer is around this value. + +56 +00:05:45,330 --> 00:05:55,950 +9998 approximately 9998 excuse me 9998 + +57 +00:05:55,950 --> 00:06:01,030 +this one minus + +58 +00:06:27,000 --> 00:06:32,670 +Again, we are looking for minus 1.5 up to 3.5 this + +59 +00:06:32,670 --> 00:06:40,030 +area now the dashed area which is between minus 1 + +60 +00:06:40,030 --> 00:06:44,010 +.5 all the way up to 3.5 equals the area to the + +61 +00:06:44,010 --> 00:06:49,930 +left of 3.5 which is B of Z less than 3.5 minus + +62 +00:06:49,930 --> 00:06:58,310 +the area to the left of negative 1 minus Z greater + +63 +00:06:58,310 --> 00:07:03,450 +than 1.5 this value if we are to compute the + +64 +00:07:03,450 --> 00:07:08,000 +probability of Z smaller than negative 1.5. Either + +65 +00:07:08,000 --> 00:07:12,240 +you can use the normal table directly, but the + +66 +00:07:12,240 --> 00:07:15,240 +other page were negative these scores. In this + +67 +00:07:15,240 --> 00:07:21,680 +case, minus 1.5. 0668. 0668. + +68 +00:07:25,420 --> 00:07:34,280 +Okay. Now imagine that you only have the positive + +69 +00:07:34,280 --> 00:07:35,520 +z-scope. + +70 +00:07:38,160 --> 00:07:42,060 +Again, suppose that the table you have in the exam + +71 +00:07:42,060 --> 00:07:47,700 +is just the positive values of z. How can we find + +72 +00:07:47,700 --> 00:07:51,620 +the probability of z greater than minus 1.5, + +73 +00:07:51,740 --> 00:07:58,060 +smaller than minus 1.5? In this case, b of z is + +74 +00:07:58,060 --> 00:08:02,560 +less than negative 1.5. The area to the left here + +75 +00:08:03,400 --> 00:08:08,840 +is the same as the area to the right of the same + +76 +00:08:08,840 --> 00:08:12,040 +value, but positive one. So this is equal to B of + +77 +00:08:12,040 --> 00:08:18,960 +Z greater than 1.5. Because the same area, minus 1 + +78 +00:08:18,960 --> 00:08:22,440 +.5 all the way up to minus infinity, that equals + +79 +00:08:22,440 --> 00:08:25,560 +from 1.5 to infinity because of symmetric + +80 +00:08:25,560 --> 00:08:30,950 +distribution. Now, B of Z greater than 1.5, The + +81 +00:08:30,950 --> 00:08:33,970 +table we have here gives the area to the left of + +82 +00:08:33,970 --> 00:08:43,410 +z. So this is 1 minus b of z less than 1.5. Now 1 + +83 +00:08:43,410 --> 00:08:46,650 +.5 from this table is 9332. + +84 +00:08:50,850 --> 00:08:56,030 +Okay, so that will get 0668, + +85 +00:08:56,470 --> 00:08:58,650 +which is the same result as we got directly from + +86 +00:08:58,650 --> 00:09:02,440 +the negative z table. but make sure that in the + +87 +00:09:02,440 --> 00:09:06,800 +exam I will give you just only the positive table + +88 +00:09:06,800 --> 00:09:12,060 +now subtract these two values you will get the + +89 +00:09:12,060 --> 00:09:22,600 +answer for this for part B 9 9 9 3 0 9 3 3 0 this + +90 +00:09:22,600 --> 00:09:29,020 +is the final result for this example now part C + +91 +00:09:31,570 --> 00:09:34,430 +What's the probability that the download time will + +92 +00:09:34,430 --> 00:09:36,370 +take above 0.5 seconds? + +93 +00:09:40,390 --> 00:09:49,050 +So we are looking again, B of X above 0.5. Similar + +94 +00:09:49,050 --> 00:09:51,230 +way as we did in part A. + +95 +00:09:54,090 --> 00:10:02,850 +So B of Z, 0.5 minus the mean. Divide by 6. + +96 +00:10:06,890 --> 00:10:13,850 +So B of Z greater + +97 +00:10:13,850 --> 00:10:20,330 +than negative 1.5. Now B of Z greater than minus 1 + +98 +00:10:20,330 --> 00:10:26,810 +.5. It means we are looking for the area above + +99 +00:10:26,810 --> 00:10:38,370 +minus 1.5 So + +100 +00:10:38,370 --> 00:10:42,850 +this area Now + +101 +00:10:42,850 --> 00:10:50,850 +the area above 1 minus 0.5 Equals 1 minus + +102 +00:10:55,050 --> 00:10:57,890 +B of Z less than negative 115. + +103 +00:11:00,210 --> 00:11:06,510 +As we did here, this probability is + +104 +00:11:06,510 --> 00:11:08,090 +0668. + +105 +00:11:09,770 --> 00:11:12,570 +So the answer again is 9334. + +106 +00:11:16,610 --> 00:11:24,590 +So that's for part C. Any question? Now, part D. + +107 +00:11:27,050 --> 00:11:35,910 +And they ask about 99%. 99 + +108 +00:11:35,910 --> 00:11:44,170 +% of the + +109 +00:11:44,170 --> 00:11:46,070 +download times. + +110 +00:11:55,380 --> 00:11:56,920 +How many seconds? + +111 +00:12:11,420 --> 00:12:15,420 +Exactly, in this case, the probability is given, + +112 +00:12:15,540 --> 00:12:16,820 +which is 99%. + +113 +00:12:19,190 --> 00:12:25,150 +Now, if 99% of the download times are above how + +114 +00:12:25,150 --> 00:12:29,090 +many seconds? So, in this case, we are looking for + +115 +00:12:29,090 --> 00:12:34,350 +the value, for example, for A, such that B of X + +116 +00:12:34,350 --> 00:12:41,130 +greater than A equals 99%. Now, in this type of + +117 +00:12:41,130 --> 00:12:44,270 +problems, we have to make a graph first in order + +118 +00:12:44,270 --> 00:12:48,080 +to determine the location of A. because it may be + +119 +00:12:48,080 --> 00:12:50,600 +to the right or to the left side. It depends + +120 +00:12:50,600 --> 00:12:54,840 +actually on two things. Number one, the size, the + +121 +00:12:54,840 --> 00:12:58,280 +greater than or smaller than, and the other is the + +122 +00:12:58,280 --> 00:13:01,800 +probability. Is it above 1.5 or smaller than 1.5? + +123 +00:13:02,220 --> 00:13:05,800 +So you have to keep careful for this type of + +124 +00:13:05,800 --> 00:13:11,260 +questions. So in this case. It should be to the + +125 +00:13:11,260 --> 00:13:15,180 +left. It should be to the left. because the area + +126 +00:13:15,180 --> 00:13:21,600 +to the left here makes sense it's 99% but if the + +127 +00:13:21,600 --> 00:13:25,460 +location is to the right side here it doesn't make + +128 +00:13:25,460 --> 00:13:28,580 +any sense that B makes greater than or equal to 99 + +129 +00:13:28,580 --> 00:13:33,460 +% because the area here is split into two halves + +130 +00:13:33,460 --> 00:13:38,460 +so 50% to the right 50% to the left of the + +131 +00:13:38,460 --> 00:13:42,900 +vertical line here so A should be to the left side + +132 +00:13:44,110 --> 00:13:48,870 +Make sense? Now, V of X greater than A equals 99%. + +133 +00:13:48,870 --> 00:13:52,130 +So + +134 +00:13:52,130 --> 00:13:58,230 +this area is 99%. Now, if we go back to the table + +135 +00:13:58,230 --> 00:14:01,670 +we have, the table again gives the area to the + +136 +00:14:01,670 --> 00:14:08,570 +left side. So this one exactly equals V of X + +137 +00:14:08,570 --> 00:14:16,140 +smaller than A, which is 1% because the area to + +138 +00:14:16,140 --> 00:14:20,860 +the right of A is 99 so the area to the left of A + +139 +00:14:20,860 --> 00:14:25,680 +is 1-99 which is 1% now here we have to look + +140 +00:14:25,680 --> 00:14:34,140 +inside the body of the table at the value of 01 so + +141 +00:14:34,140 --> 00:14:37,500 +in this case this score should be negative or + +142 +00:14:37,500 --> 00:14:42,310 +positive Since the probability is 100% smaller + +143 +00:14:42,310 --> 00:14:46,130 +than 1.5, so it should be negative. So if you go + +144 +00:14:46,130 --> 00:14:51,890 +back to the table, negative 1. Look at 0.1. + +145 +00:15:00,290 --> 00:15:02,550 +Minus 2.34. + +146 +00:15:12,640 --> 00:15:19,800 +So the approximate answer actually is 0099. + +147 +00:15:21,160 --> 00:15:26,660 +The closest value. You may take this value. You + +148 +00:15:26,660 --> 00:15:31,720 +will be okay. So this one is more closer to 01. + +149 +00:15:32,160 --> 00:15:36,800 +than 0102. So my corresponding z-score is negative + +150 +00:15:36,800 --> 00:15:47,820 +2.4, I'm sorry, 2.35. So z-score, negative 2.33, + +151 +00:15:50,900 --> 00:15:52,520 +0123. + +152 +00:15:54,500 --> 00:15:59,160 +So this is the approximate answer. sometimes maybe + +153 +00:15:59,160 --> 00:16:04,340 +if you have a calculator or excel you may + +154 +00:16:04,340 --> 00:16:08,720 +determine the exact value in this case which is + +155 +00:16:08,720 --> 00:16:15,500 +minus 2.3263 this is the exact answer but the + +156 +00:16:15,500 --> 00:16:20,460 +approximate one is 5 so my z score is negative 2 + +157 +00:16:20,460 --> 00:16:23,560 +.33 now the value of a + +158 +00:16:27,300 --> 00:16:32,000 +equals Mu plus Z Sigma. The one we just discussed + +159 +00:16:32,000 --> 00:16:39,580 +last time. Remember, when Z equals minus Mu + +160 +00:16:39,580 --> 00:16:44,320 +divided by Sigma, just cross multiplication, you + +161 +00:16:44,320 --> 00:16:48,980 +will get X + +162 +00:16:48,980 --> 00:16:52,340 +minus Mu equals Z Sigma. That means X equals Mu + +163 +00:16:52,340 --> 00:16:58,320 +plus Z Sigma. Fixed same as A, so A equals Mu plus + +164 +00:16:58,320 --> 00:17:05,640 +Z Sigma, Mu is given 0.8, Z negative 2.33 times + +165 +00:17:05,640 --> 00:17:13,100 +Sigma, that will give the final answer which is 0 + +166 +00:17:13,100 --> 00:17:14,060 +.3347. + +167 +00:17:17,480 --> 00:17:24,130 +So again, He said that 99% of the downward times + +168 +00:17:24,130 --> 00:17:27,810 +are above how many seconds. So we are looking for + +169 +00:17:27,810 --> 00:17:32,470 +the value of A, such that U makes greater than or + +170 +00:17:32,470 --> 00:17:38,930 +equal to 99%. So A is located to the left side of + +171 +00:17:38,930 --> 00:17:40,330 +the curve, normal curve. + +172 +00:17:43,110 --> 00:17:45,350 +And again, the table gives the area to the left of + +173 +00:17:45,350 --> 00:17:52,620 +Z. So the area to the left is 1%. Now if you check + +174 +00:17:52,620 --> 00:17:57,900 +the z value corresponding to this one, 101, you + +175 +00:17:57,900 --> 00:18:00,700 +figure that z, the approximate answer is negative + +176 +00:18:00,700 --> 00:18:07,160 +2.33. Now just use this value, and plug it into + +177 +00:18:07,160 --> 00:18:11,700 +this equation, you will get this result. Yes. Is + +178 +00:18:11,700 --> 00:18:12,760 +it negative? + +179 +00:18:22,750 --> 00:18:32,910 +Last part, part E 95 + +180 +00:18:32,910 --> 00:18:46,170 +% 95 + +181 +00:18:46,170 --> 00:18:46,810 +% + +182 +00:18:50,400 --> 00:18:55,980 +after the load times are between what two values + +183 +00:18:55,980 --> 00:18:59,120 +approximately distributed around the mean. + +184 +00:19:04,960 --> 00:19:17,340 +So around 95 + +185 +00:19:17,340 --> 00:19:25,090 +% Of the download times, what two values + +186 +00:19:25,090 --> 00:19:31,170 +symmetrically distributed around the mean? the + +187 +00:19:31,170 --> 00:19:36,250 +area here for example between + +188 +00:19:36,250 --> 00:19:44,850 +E and B is 95% and he mentioned the proximity so + +189 +00:19:44,850 --> 00:19:48,790 + +223 +00:23:30,490 --> 00:23:38,570 +minus 1.96, the score times 6. The other part, to + +224 +00:23:38,570 --> 00:23:43,250 +get the value of B, the probability of X smaller + +225 +00:23:43,250 --> 00:23:49,150 +than B equals 95 plus 2.5 is 97.5. + +226 +00:23:51,990 --> 00:23:55,090 +by using the same way we'll get that z score is 1 + +227 +00:23:55,090 --> 00:23:58,550 +.96 as we mentioned before because these two + +228 +00:23:58,550 --> 00:24:02,830 +values here should be the z score the same so now + +229 +00:24:02,830 --> 00:24:09,690 +b equals mu plus 1 + +230 +00:24:09,690 --> 00:24:17,390 +.96 times sigma and that will give you a 1.408 + +231 +00:24:23,280 --> 00:24:28,640 +And B equals 1.1920. + +232 +00:24:29,260 --> 00:24:34,520 +So these are the two values which has 95% between + +233 +00:24:34,520 --> 00:24:41,620 +them. So 95% of the data, I mean 95% of the + +234 +00:24:41,620 --> 00:24:50,180 +download times are between 0.4 seconds and 1.19 + +235 +00:24:50,180 --> 00:24:56,490 +seconds. make sense that is again 95 percent of + +236 +00:24:56,490 --> 00:25:00,870 +the download times are between approximately 0.4 + +237 +00:25:00,870 --> 00:25:09,410 +seconds and around 1.2 so this value is 0.4 the + +238 +00:25:09,410 --> 00:25:13,590 +other one is approximately 1.2 so again 95 percent + +239 +00:25:13,590 --> 00:25:18,550 +of the download times are between 0.4 seconds + +240 +00:25:18,550 --> 00:25:22,470 +approximately and one minute. This problem maybe + +241 +00:25:22,470 --> 00:25:25,590 +is the most important one for this chapter. + +242 +00:25:26,790 --> 00:25:30,550 +Exactly in the exam you will see something like + +243 +00:25:30,550 --> 00:25:35,170 +that. Either for part A, B and C which are the + +244 +00:25:35,170 --> 00:25:40,490 +same and the backward normal calculations as part + +245 +00:25:40,490 --> 00:25:44,430 +D and E. Any question? + +246 +00:25:51,660 --> 00:25:59,600 +let's go solve true and false problems for the + +247 +00:25:59,600 --> 00:26:03,240 +practice in chat asses + +248 +00:26:20,840 --> 00:26:24,380 +The Z-score should be one positive and the other + +249 +00:26:24,380 --> 00:26:28,580 +is negative, not A and B. The corresponding Z + +250 +00:26:28,580 --> 00:26:34,940 +-score here should have the same values but + +251 +00:26:34,940 --> 00:26:37,020 +negative sign, not A and B. + +252 +00:26:40,120 --> 00:26:48,380 +now let's do some rex problems for chapter 6 now + +253 +00:26:48,380 --> 00:26:53,320 +just look at the minus sign the probability that + +254 +00:26:53,320 --> 00:26:57,220 +standard normal random variable C falls between + +255 +00:26:57,220 --> 00:27:04,620 +minus 1.5 and 0.81 so it's similar to this one but + +256 +00:27:04,620 --> 00:27:08,540 +this is straight forward this score between minus + +257 +00:27:08,540 --> 00:27:18,750 +1.5 up to 0.81 okay + +258 +00:27:18,750 --> 00:27:27,970 +so + +259 +00:27:27,970 --> 00:27:32,050 +number 23 again the probability that standard + +260 +00:27:32,050 --> 00:27:36,790 +normal random variable z fall between minus 1.5 + +261 +00:27:36,790 --> 00:27:41,910 +and 0.81 So it's going to be, we are looking for + +262 +00:27:41,910 --> 00:27:42,530 +this probability. + +263 +00:27:48,150 --> 00:27:53,250 +So it's z less than one point one minus. + +264 +00:27:57,670 --> 00:28:00,330 +Now just do it by yourself, you will figure that + +265 +00:28:00,330 --> 00:28:04,910 +the final answer is point seven four. + +266 +00:28:07,970 --> 00:28:10,730 +That's for 23. I think straightforward one. + +267 +00:28:14,490 --> 00:28:17,330 +Let's do one more, 25 for example. + +268 +00:28:20,690 --> 00:28:23,230 +The probability that standard normal random + +269 +00:28:23,230 --> 00:28:26,450 +variable is below 196. + +270 +00:28:28,390 --> 00:28:38,970 +See? Below 1.96. Now from the table, if we look at + +271 +00:28:38,970 --> 00:28:46,510 +the normal table 1.96 + +272 +00:28:46,510 --> 00:28:49,570 +now + +273 +00:28:49,570 --> 00:28:58,050 +the area below 1.6 96 975 so + +274 +00:28:58,050 --> 00:29:02,030 +it's here it mentioned that it's 0.4 so so this + +275 +00:29:02,030 --> 00:29:08,860 +one is false or Because the area to the left of 1 + +276 +00:29:08,860 --> 00:29:13,400 +.96 is not 0.475, it's equal to 975. + +277 +00:29:16,200 --> 00:29:21,000 +That's for 25. Let's do the odd numbers. + +278 +00:29:24,440 --> 00:29:27,360 +The probability that standard normal, the random + +279 +00:29:27,360 --> 00:29:30,780 +variable, falls between minus 2 and minus 0.44. + +280 +00:29:38,540 --> 00:29:42,760 +I'm sorry minus between minus two and negative + +281 +00:29:42,760 --> 00:29:46,980 +point four four so it's the same as z is smaller + +282 +00:29:46,980 --> 00:29:51,340 +than negative point four four minus z less than + +283 +00:29:51,340 --> 00:29:55,580 +minus two it + +284 +00:29:55,580 --> 00:30:00,520 +says the answer is point six four seven two the + +285 +00:30:00,520 --> 00:30:07,180 +exact answer is point + +286 +00:30:07,180 --> 00:30:07,500 +three + +287 +00:30:12,810 --> 00:30:17,890 +So that one is incorrect, + +288 +00:30:18,010 --> 00:30:22,750 +27 is incorrect. You may figure this one by using + +289 +00:30:22,750 --> 00:30:28,290 +the table or sometimes by Excel you can do this + +290 +00:30:28,290 --> 00:30:31,030 +problem. Let's do different one. + +291 +00:30:40,300 --> 00:30:43,360 +Look at 29, the odd number, 29. + +292 +00:30:46,200 --> 00:30:56,640 +29 says that a worker earns 15 dollars per hour at + +293 +00:30:56,640 --> 00:31:01,140 +planet earth and is told that only 2.5 percent of + +294 +00:31:01,140 --> 00:31:06,300 +all workers make a higher wage if the wage is + +295 +00:31:06,300 --> 00:31:09,880 +assumed to be normally distributed. And the + +296 +00:31:09,880 --> 00:31:15,360 +standard deviation of wage rates is five per hour. + +297 +00:31:16,460 --> 00:31:22,100 +So the standard deviation is five per hour, five + +298 +00:31:22,100 --> 00:31:25,780 +dollars per hour. The average wage for the plant + +299 +00:31:25,780 --> 00:31:27,000 +is 75. + +300 +00:31:30,620 --> 00:31:33,560 +Now again, go back to the problem. It says that a + +301 +00:31:33,560 --> 00:31:37,390 +worker earns 15 dollars per hour. And it's told + +302 +00:31:37,390 --> 00:31:41,990 +that only 2.5% of all workers make a higher wage. + +303 +00:31:43,150 --> 00:31:55,330 +So it's X more than $15 equal 2.5%. That means + +304 +00:31:55,330 --> 00:31:59,490 +zero to five. So let's see if this one is true or + +305 +00:31:59,490 --> 00:32:05,820 +false. So again, this man, earns $15 per hour at a + +306 +00:32:05,820 --> 00:32:12,060 +plant. And he's told that only 2.5% of all workers + +307 +00:32:12,060 --> 00:32:18,120 +make higher wage, means greater than the one he + +308 +00:32:18,120 --> 00:32:21,460 +just got, which is $15. So people who make greater + +309 +00:32:21,460 --> 00:32:27,140 +than 15, they claim it's 2.5%. So let's see if + +310 +00:32:27,140 --> 00:32:32,230 +that percentage is true or false. So in this case, + +311 +00:32:32,290 --> 00:32:38,070 +we have to convert to this score. So it becomes B + +312 +00:32:38,070 --> 00:32:43,850 +of z greater than 15 minus the mean divided by + +313 +00:32:43,850 --> 00:32:51,110 +sigma. So B of z greater than 7.5 divided by 5 is + +314 +00:32:51,110 --> 00:32:53,850 +1.5. + +315 +00:33:00,750 --> 00:33:05,850 +Make sense? 1.5. So now B of Z greater than 1.5. 1 + +316 +00:33:05,850 --> 00:33:11,170 +minus B of Z is less than 1.5. Go back to the + +317 +00:33:11,170 --> 00:33:23,870 +table. 1.5 is? Look at the table. 9332. 9332. So + +318 +00:33:23,870 --> 00:33:27,430 +the answer should be 668. That means + +319 +00:33:31,130 --> 00:33:41,750 +6.68% of the workers have higher wage. Not 2.5%. + +320 +00:33:41,750 --> 00:33:44,770 +So that means this is incorrect. So that's false. + +321 +00:33:49,470 --> 00:33:53,830 +So the answer is false for this problem. + +322 +00:33:57,310 --> 00:33:58,530 +Look at 31. + +323 +00:34:01,550 --> 00:34:04,970 +Do you have any question for this one, for 29? In + +324 +00:34:04,970 --> 00:34:12,150 +29, the claim is 2.5% of all workers have higher + +325 +00:34:12,150 --> 00:34:17,930 +wage than $15. Let's see if this claim is true or + +326 +00:34:17,930 --> 00:34:22,190 +false. So the problem again is B probability of X + +327 +00:34:22,190 --> 00:34:26,630 +greater than 15 equals 2.5%. We figure out that + +328 +00:34:26,630 --> 00:34:33,250 +the answer is 6.68%. So the claim is false. Now, + +329 +00:34:33,530 --> 00:34:41,430 +31. Any set, any set of normality, oh I'm sorry, + +330 +00:34:41,490 --> 00:34:44,630 +any set of normally distributed data can be + +331 +00:34:44,630 --> 00:34:48,430 +transformed to its standardized form. True or + +332 +00:34:48,430 --> 00:34:51,190 +false? 31. + +333 +00:34:54,170 --> 00:34:59,870 +It says that any set of normally distributed can + +334 +00:34:59,870 --> 00:35:00,730 +be transformed. + +335 +00:35:06,630 --> 00:35:10,950 +Let's see 32. The middle spread. + +336 +00:35:16,660 --> 00:35:21,020 +That is the middle 50% of the normal distribution + +337 +00:35:21,020 --> 00:35:27,000 +is equal to one standard deviation. That's true. + +338 +00:35:27,240 --> 00:35:33,860 +It's incorrect. Because we mentioned that 68% of + +339 +00:35:33,860 --> 00:35:38,040 +the data are within one standard deviation above + +340 +00:35:38,040 --> 00:35:42,300 +the mean. Within the mean. So it's false. + +341 +00:35:46,160 --> 00:35:51,740 +Instead of 50, we have to say that 68%. The + +342 +00:35:51,740 --> 00:35:57,500 +empirical rule. 33. The normal probability plot, + +343 +00:35:58,600 --> 00:36:02,060 +the one we just discussed last time, may be used + +344 +00:36:02,060 --> 00:36:06,640 +to assess the assumption of normality for a + +345 +00:36:06,640 --> 00:36:09,560 +particular batch of data. As we mentioned before, + +346 +00:36:10,240 --> 00:36:15,220 +One of the rules that we can use to determine if + +347 +00:36:15,220 --> 00:36:17,940 +the data is normally distributed or not is called + +348 +00:36:17,940 --> 00:36:20,940 +the normal probability plot. So it's true. So + +349 +00:36:20,940 --> 00:36:26,000 +again, normal probability plot is used to assess + +350 +00:36:26,000 --> 00:36:31,120 +the assumption of normality for a data. + +351 +00:36:34,300 --> 00:36:41,410 +Let's see, for example, 35. The probability that a + +352 +00:36:41,410 --> 00:36:46,050 +standard normal variable z is positive is, + +353 +00:36:47,290 --> 00:36:52,910 +the probability that a standard normal variable z + +354 +00:36:52,910 --> 00:36:58,770 +is positive is, now if you, this is a table, z + +355 +00:36:58,770 --> 00:37:02,070 +cubed. It's one. + +356 +00:37:06,800 --> 00:37:12,660 +Again, the probability that + +357 +00:37:12,660 --> 00:37:16,960 +a standardized normal variable Z is positive is + +358 +00:37:16,960 --> 00:37:17,580 +the probability. + +359 +00:37:26,340 --> 00:37:28,820 +Let's do one more. + +360 +00:37:51,430 --> 00:37:54,970 +These problems are the same, some of these. + +361 +00:37:57,890 --> 00:38:07,490 +Now look at 6 and 7. Suppose Z has a standard + +362 +00:38:07,490 --> 00:38:11,750 +normal distribution with a mean of zero and + +363 +00:38:11,750 --> 00:38:15,450 +standard relation of one. And the probability Z is + +364 +00:38:15,450 --> 00:38:20,170 +less than 1.25. Straight forward, just go back to + +365 +00:38:20,170 --> 00:38:25,930 +the table. Z less than 1.15, 1.15, + +366 +00:38:30,610 --> 00:38:43,210 +08, 8749. So that's correct. 8749 is correct. Any + +367 +00:38:43,210 --> 00:38:47,570 +question? So let's move. + +368 +00:38:50,870 --> 00:38:55,930 +Chapter seven. So that's for the practice for + +369 +00:38:55,930 --> 00:38:56,590 +chapter seven. + +370 +00:38:59,430 --> 00:39:05,770 +Chapter seven talks about actually two things. One + +371 +00:39:05,770 --> 00:39:11,930 +is called sampling and the other topic is sampling + +372 +00:39:11,930 --> 00:39:16,490 +distributions. Mainly there are four learning + +373 +00:39:16,490 --> 00:39:23,090 +objectives for this chapter. Number one, we have + +374 +00:39:23,090 --> 00:39:25,210 +to distinguish between different sampling + +375 +00:39:25,210 --> 00:39:29,850 +techniques or methods. We'll talk about + +376 +00:39:29,850 --> 00:39:33,050 +probability and non-probability sampling, and we + +377 +00:39:33,050 --> 00:39:35,900 +have to distinguish between these two. The other + +378 +00:39:35,900 --> 00:39:40,200 +objective for this chapter will be the concept of + +379 +00:39:40,200 --> 00:39:44,640 +semantic distribution. Now, instead of using just + +380 +00:39:44,640 --> 00:39:49,120 +X, as we did in chapter three, what's the + +381 +00:39:49,120 --> 00:39:51,380 +probability of X, for example, less than 15? + +382 +00:39:52,320 --> 00:39:57,240 +Instead of that, we'll talk about not X, the + +383 +00:39:57,240 --> 00:40:00,180 +statistic itself, maybe X bar or the sample mean. + +384 +00:40:00,960 --> 00:40:04,120 +Number three is to compute probabilities related + +385 +00:40:04,120 --> 00:40:08,600 +to the sample mean, not the exact value of X. So + +386 +00:40:08,600 --> 00:40:12,340 +here we are looking for V of X bar smaller than 1 + +387 +00:40:12,340 --> 00:40:16,360 +.5. So in this case, to compute these + +388 +00:40:16,360 --> 00:40:21,320 +probabilities, we have to know that all the + +389 +00:40:23,350 --> 00:40:26,110 +concepts in chapter three should be understood. + +390 +00:40:26,530 --> 00:40:31,050 +Otherwise, you cannot do any problems here because + +391 +00:40:31,050 --> 00:40:34,050 +here will depend actually how can we find the + +392 +00:40:34,050 --> 00:40:36,870 +probabilities underneath the normal table. But + +393 +00:40:36,870 --> 00:40:40,430 +instead of X, in this case, we have the sample + +394 +00:40:40,430 --> 00:40:43,630 +mean X bar. So that's the difference between + +395 +00:40:43,630 --> 00:40:48,860 +chapter six and the next chapter. So here, In + +396 +00:40:48,860 --> 00:40:51,900 +chapter six, we are talking about the sampling + +397 +00:40:51,900 --> 00:40:56,220 +distribution of the sample mean. Also, there is + +398 +00:40:56,220 --> 00:41:00,120 +something new, which is called sample proportion. + +399 +00:41:00,680 --> 00:41:02,460 +Now, let's see the difference between these two. + +400 +00:41:03,980 --> 00:41:08,960 +If you remember, at the first class, we said there + +401 +00:41:08,960 --> 00:41:13,160 +are two types of data. One is called numerical, + +402 +00:41:13,380 --> 00:41:14,020 +which is quantitative. + +403 +00:41:16,790 --> 00:41:18,090 +And the other one is qualitative. + +404 +00:41:21,870 --> 00:41:24,530 +So we have numerical data, which means + +405 +00:41:24,530 --> 00:41:31,890 +quantitative data, and qualitative data. Now, for + +406 +00:41:31,890 --> 00:41:35,230 +numerical data, we can use the sample mean as a + +407 +00:41:35,230 --> 00:41:41,830 +measure of central tendency, x bar. But for + +408 +00:41:41,830 --> 00:41:45,390 +qualitative data, for example, if we have gender, + +409 +00:41:47,300 --> 00:41:51,980 +Gender either males or females. In this case, we + +410 +00:41:51,980 --> 00:41:56,860 +cannot say that the mean of females in this school + +411 +00:41:56,860 --> 00:42:02,200 +is 1.2, for example. It doesn't make any sense. + +412 +00:42:02,940 --> 00:42:08,280 +But it's better to say that IUG has, for example, \ No newline at end of file diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/ge8DRGM5-04_postprocess.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/ge8DRGM5-04_postprocess.srt new file mode 100644 index 0000000000000000000000000000000000000000..8f33d171f75f8490bd857c905940543d2524bc76 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/ge8DRGM5-04_postprocess.srt @@ -0,0 +1,1748 @@ +1 +00:00:05,430 --> 00:00:08,470 +We are going to do some practice problems for + +2 +00:00:08,470 --> 00:00:16,890 +chapter 6. First problem will be 6.36 on page 226. + +3 +00:00:19,930 --> 00:00:22,510 +So open page 226. + +4 +00:00:26,040 --> 00:00:30,020 +Insha'Allah today we'll discuss some problems from + +5 +00:00:30,020 --> 00:00:35,200 +chapter 6 and one of these problems will be 6.36 + +6 +00:00:35,200 --> 00:00:46,420 +so let's read 36 it it's asked about suppose that + +7 +00:00:46,420 --> 00:00:51,600 +the download time is normally distributed with + +8 +00:00:51,600 --> 00:00:55,380 +mean and the mean is given and the standard + +9 +00:00:55,380 --> 00:00:58,940 +deviation is given in this case the mean equals 0 + +10 +00:00:58,940 --> 00:01:06,260 +.8 second so again the mean is 0.8 where sigma + +11 +00:01:06,260 --> 00:01:12,340 +equals 0.2 seconds so mu is 0.8 seconds that the + +12 +00:01:12,340 --> 00:01:16,560 +time required to download from the internet an + +13 +00:01:16,560 --> 00:01:20,020 +image file or something like that Now he asks + +14 +00:01:20,020 --> 00:01:22,560 +about what's the probability that a download time + +15 +00:01:22,560 --> 00:01:27,900 +is part A less + +16 +00:01:27,900 --> 00:01:31,300 +than one second. So what's the probability of + +17 +00:01:31,300 --> 00:01:39,360 +getting time less than one second? Part B, what's + +18 +00:01:39,360 --> 00:01:44,160 +the probability that a download time is between 0 + +19 +00:01:44,160 --> 00:01:46,520 +.5 and 1.5 seconds? + +20 +00:01:51,810 --> 00:02:02,510 +and 1.5 seconds but see above 0.5 seconds last + +21 +00:02:02,510 --> 00:02:07,230 +time i think we did some something + +22 +00:02:07,230 --> 00:02:10,550 +like that so for the first one b of x smaller than + +23 +00:02:10,550 --> 00:02:13,400 +one The first step, we have to compute the z + +24 +00:02:13,400 --> 00:02:19,080 +-score. And that's straightforward. Just z less + +25 +00:02:19,080 --> 00:02:28,720 +than 1 minus 0.8 divided by sigma. So 1 minus 0.8 + +26 +00:02:28,720 --> 00:02:33,480 +is 0.2. So B of z less than 1. + +27 +00:02:36,620 --> 00:02:41,150 +Now by using the normal table, we have The normal + +28 +00:02:41,150 --> 00:02:44,990 +table, I think we did many times for this table. + +29 +00:02:46,930 --> 00:02:48,110 +Now for 1. + +30 +00:02:52,290 --> 00:03:01,010 +So 1 is 0.8413. So it's 0.8413. This is the + +31 +00:03:01,010 --> 00:03:05,850 +probability that a download time is smaller than + +32 +00:03:05,850 --> 00:03:06,010 +1. + +33 +00:03:14,230 --> 00:03:19,690 +Now the probability is between two values, 0.5 and + +34 +00:03:19,690 --> 00:03:25,570 +1.5. So in this case, we have to find the z values + +35 +00:03:25,570 --> 00:03:32,170 +for the corresponding x, 0.5 and 1.5. So in this + +36 +00:03:32,170 --> 00:03:39,080 +case, this one equals. 0.5 minus 0.8 divided by + +37 +00:03:39,080 --> 00:03:47,420 +sigma smaller than x is transformed to this form. + +38 +00:03:52,920 --> 00:03:57,480 +Smaller than 1.5 minus 0.8 divided by 0.2. + +39 +00:04:03,500 --> 00:04:10,320 +Exactly, minus 1.5. Smaller than z, smaller than 1 + +40 +00:04:10,320 --> 00:04:15,640 +.5 minus 0.8 is 0.7, divided by 0.2 is 3.4. + +41 +00:04:19,320 --> 00:04:23,640 +So, we are looking now for the probability of z + +42 +00:04:23,640 --> 00:04:28,740 +-score between minus 0.5 and smaller than 0.5. + +43 +00:04:29,460 --> 00:04:32,780 +Now, if we are looking for this kind of + +44 +00:04:32,780 --> 00:04:35,760 +probability, we have to find the probability of z + +45 +00:04:35,760 --> 00:04:43,640 +smaller than 3.5 minus z smaller than negative 1 + +46 +00:04:43,640 --> 00:04:51,720 +.5. Now, if we go back to the table we have. Now 3 + +47 +00:04:51,720 --> 00:04:56,640 +.5 all the way down up to the end of this round. + +48 +00:04:58,300 --> 00:05:05,260 +The table I have, the maximum value of Z is 3.4 + +49 +00:05:05,260 --> 00:05:15,900 +all the way up to 9. So that means I have only B + +50 +00:05:15,900 --> 00:05:16,360 +of Z + +51 +00:05:20,930 --> 00:05:26,590 +less than this value. And the corresponding area + +52 +00:05:26,590 --> 00:05:28,870 +is 9998. + +53 +00:05:31,910 --> 00:05:34,430 +But for this specific example, we are looking for + +54 +00:05:34,430 --> 00:05:38,090 +V of Z smaller than 3.5, which is roughly equal to + +55 +00:05:38,090 --> 00:05:42,270 +3.49. So the answer is around this value. + +56 +00:05:45,330 --> 00:05:55,950 +9998 approximately 9998 excuse me 9998 + +57 +00:05:55,950 --> 00:06:01,030 +this one minus + +58 +00:06:27,000 --> 00:06:32,670 +Again, we are looking for minus 1.5 up to 3.5 this + +59 +00:06:32,670 --> 00:06:40,030 +area now the dashed area which is between minus 1 + +60 +00:06:40,030 --> 00:06:44,010 +.5 all the way up to 3.5 equals the area to the + +61 +00:06:44,010 --> 00:06:49,930 +left of 3.5 which is B of Z less than 3.5 minus + +62 +00:06:49,930 --> 00:06:58,310 +the area to the left of negative 1 minus Z greater + +63 +00:06:58,310 --> 00:07:03,450 +than 1.5 this value if we are to compute the + +64 +00:07:03,450 --> 00:07:08,000 +probability of Z smaller than negative 1.5. Either + +65 +00:07:08,000 --> 00:07:12,240 +you can use the normal table directly, but the + +66 +00:07:12,240 --> 00:07:15,240 +other page were negative these scores. In this + +67 +00:07:15,240 --> 00:07:21,680 +case, minus 1.5. 0668. 0668. + +68 +00:07:25,420 --> 00:07:34,280 +Okay. Now imagine that you only have the positive + +69 +00:07:34,280 --> 00:07:35,520 +z-scope. + +70 +00:07:38,160 --> 00:07:42,060 +Again, suppose that the table you have in the exam + +71 +00:07:42,060 --> 00:07:47,700 +is just the positive values of z. How can we find + +72 +00:07:47,700 --> 00:07:51,620 +the probability of z greater than minus 1.5, + +73 +00:07:51,740 --> 00:07:58,060 +smaller than minus 1.5? In this case, b of z is + +74 +00:07:58,060 --> 00:08:02,560 +less than negative 1.5. The area to the left here + +75 +00:08:03,400 --> 00:08:08,840 +is the same as the area to the right of the same + +76 +00:08:08,840 --> 00:08:12,040 +value, but positive one. So this is equal to B of + +77 +00:08:12,040 --> 00:08:18,960 +Z greater than 1.5. Because the same area, minus 1 + +78 +00:08:18,960 --> 00:08:22,440 +.5 all the way up to minus infinity, that equals + +79 +00:08:22,440 --> 00:08:25,560 +from 1.5 to infinity because of symmetric + +80 +00:08:25,560 --> 00:08:30,950 +distribution. Now, B of Z greater than 1.5, The + +81 +00:08:30,950 --> 00:08:33,970 +table we have here gives the area to the left of + +82 +00:08:33,970 --> 00:08:43,410 +z. So this is 1 minus b of z less than 1.5. Now 1 + +83 +00:08:43,410 --> 00:08:46,650 +.5 from this table is 9332. + +84 +00:08:50,850 --> 00:08:56,030 +Okay, so that will get 0668, + +85 +00:08:56,470 --> 00:08:58,650 +which is the same result as we got directly from + +86 +00:08:58,650 --> 00:09:02,440 +the negative z table. but make sure that in the + +87 +00:09:02,440 --> 00:09:06,800 +exam I will give you just only the positive table + +88 +00:09:06,800 --> 00:09:12,060 +now subtract these two values you will get the + +89 +00:09:12,060 --> 00:09:22,600 +answer for this for part B 9 9 9 3 0 9 3 3 0 this + +90 +00:09:22,600 --> 00:09:29,020 +is the final result for this example now part C + +91 +00:09:31,570 --> 00:09:34,430 +What's the probability that the download time will + +92 +00:09:34,430 --> 00:09:36,370 +take above 0.5 seconds? + +93 +00:09:40,390 --> 00:09:49,050 +So we are looking again, B of X above 0.5. Similar + +94 +00:09:49,050 --> 00:09:51,230 +way as we did in part A. + +95 +00:09:54,090 --> 00:10:02,850 +So B of Z, 0.5 minus the mean. Divide by 6. + +96 +00:10:06,890 --> 00:10:13,850 +So B of Z greater + +97 +00:10:13,850 --> 00:10:20,330 +than negative 1.5. Now B of Z greater than minus 1 + +98 +00:10:20,330 --> 00:10:26,810 +.5. It means we are looking for the area above + +99 +00:10:26,810 --> 00:10:38,370 +minus 1.5 So + +100 +00:10:38,370 --> 00:10:42,850 +this area Now + +101 +00:10:42,850 --> 00:10:50,850 +the area above 1 minus 0.5 Equals 1 minus + +102 +00:10:55,050 --> 00:10:57,890 +B of Z less than negative 115. + +103 +00:11:00,210 --> 00:11:06,510 +As we did here, this probability is + +104 +00:11:06,510 --> 00:11:08,090 +0668. + +105 +00:11:09,770 --> 00:11:12,570 +So the answer again is 9334. + +106 +00:11:16,610 --> 00:11:24,590 +So that's for part C. Any question? Now, part D. + +107 +00:11:27,050 --> 00:11:35,910 +And they ask about 99%. 99 + +108 +00:11:35,910 --> 00:11:44,170 +% of the + +109 +00:11:44,170 --> 00:11:46,070 +download times. + +110 +00:11:55,380 --> 00:11:56,920 +How many seconds? + +111 +00:12:11,420 --> 00:12:15,420 +Exactly, in this case, the probability is given, + +112 +00:12:15,540 --> 00:12:16,820 +which is 99%. + +113 +00:12:19,190 --> 00:12:25,150 +Now, if 99% of the download times are above how + +114 +00:12:25,150 --> 00:12:29,090 +many seconds? So, in this case, we are looking for + +115 +00:12:29,090 --> 00:12:34,350 +the value, for example, for A, such that B of X + +116 +00:12:34,350 --> 00:12:41,130 +greater than A equals 99%. Now, in this type of + +117 +00:12:41,130 --> 00:12:44,270 +problems, we have to make a graph first in order + +118 +00:12:44,270 --> 00:12:48,080 +to determine the location of A. because it may be + +119 +00:12:48,080 --> 00:12:50,600 +to the right or to the left side. It depends + +120 +00:12:50,600 --> 00:12:54,840 +actually on two things. Number one, the size, the + +121 +00:12:54,840 --> 00:12:58,280 +greater than or smaller than, and the other is the + +122 +00:12:58,280 --> 00:13:01,800 +probability. Is it above 1.5 or smaller than 1.5? + +123 +00:13:02,220 --> 00:13:05,800 +So you have to keep careful for this type of + +124 +00:13:05,800 --> 00:13:11,260 +questions. So in this case. It should be to the + +125 +00:13:11,260 --> 00:13:15,180 +left. It should be to the left. because the area + +126 +00:13:15,180 --> 00:13:21,600 +to the left here makes sense it's 99% but if the + +127 +00:13:21,600 --> 00:13:25,460 +location is to the right side here it doesn't make + +128 +00:13:25,460 --> 00:13:28,580 +any sense that B makes greater than or equal to 99 + +129 +00:13:28,580 --> 00:13:33,460 +% because the area here is split into two halves + +130 +00:13:33,460 --> 00:13:38,460 +so 50% to the right 50% to the left of the + +131 +00:13:38,460 --> 00:13:42,900 +vertical line here so A should be to the left side + +132 +00:13:44,110 --> 00:13:48,870 +Make sense? Now, V of X greater than A equals 99%. + +133 +00:13:48,870 --> 00:13:52,130 +So + +134 +00:13:52,130 --> 00:13:58,230 +this area is 99%. Now, if we go back to the table + +135 +00:13:58,230 --> 00:14:01,670 +we have, the table again gives the area to the + +136 +00:14:01,670 --> 00:14:08,570 +left side. So this one exactly equals V of X + +137 +00:14:08,570 --> 00:14:16,140 +smaller than A, which is 1% because the area to + +138 +00:14:16,140 --> 00:14:20,860 +the right of A is 99 so the area to the left of A + +139 +00:14:20,860 --> 00:14:25,680 +is 1-99 which is 1% now here we have to look + +140 +00:14:25,680 --> 00:14:34,140 +inside the body of the table at the value of 01 so + +141 +00:14:34,140 --> 00:14:37,500 +in this case this score should be negative or + +142 +00:14:37,500 --> 00:14:42,310 +positive Since the probability is 100% smaller + +143 +00:14:42,310 --> 00:14:46,130 +than 1.5, so it should be negative. So if you go + +144 +00:14:46,130 --> 00:14:51,890 +back to the table, negative 1. Look at 0.1. + +145 +00:15:00,290 --> 00:15:02,550 +Minus 2.34. + +146 +00:15:12,640 --> 00:15:19,800 +So the approximate answer actually is 0099. + +147 +00:15:21,160 --> 00:15:26,660 +The closest value. You may take this value. You + +148 +00:15:26,660 --> 00:15:31,720 +will be okay. So this one is more closer to 01. + +149 +00:15:32,160 --> 00:15:36,800 +than 0102. So my corresponding z-score is negative + +150 +00:15:36,800 --> 00:15:47,820 +2.4, I'm sorry, 2.35. So z-score, negative 2.33, + +151 +00:15:50,900 --> 00:15:52,520 +0123. + +152 +00:15:54,500 --> 00:15:59,160 +So this is the approximate answer. sometimes maybe + +153 +00:15:59,160 --> 00:16:04,340 +if you have a calculator or excel you may + +154 +00:16:04,340 --> 00:16:08,720 +determine the exact value in this case which is + +155 +00:16:08,720 --> 00:16:15,500 +minus 2.3263 this is the exact answer but the + +156 +00:16:15,500 --> 00:16:20,460 +approximate one is 5 so my z score is negative 2 + +157 +00:16:20,460 --> 00:16:23,560 +.33 now the value of a + +158 +00:16:27,300 --> 00:16:32,000 +equals Mu plus Z Sigma. The one we just discussed + +159 +00:16:32,000 --> 00:16:39,580 +last time. Remember, when Z equals minus Mu + +160 +00:16:39,580 --> 00:16:44,320 +divided by Sigma, just cross multiplication, you + +161 +00:16:44,320 --> 00:16:48,980 +will get X + +162 +00:16:48,980 --> 00:16:52,340 +minus Mu equals Z Sigma. That means X equals Mu + +163 +00:16:52,340 --> 00:16:58,320 +plus Z Sigma. Fixed same as A, so A equals Mu plus + +164 +00:16:58,320 --> 00:17:05,640 +Z Sigma, Mu is given 0.8, Z negative 2.33 times + +165 +00:17:05,640 --> 00:17:13,100 +Sigma, that will give the final answer which is 0 + +166 +00:17:13,100 --> 00:17:14,060 +.3347. + +167 +00:17:17,480 --> 00:17:24,130 +So again, He said that 99% of the downward times + +168 +00:17:24,130 --> 00:17:27,810 +are above how many seconds. So we are looking for + +169 +00:17:27,810 --> 00:17:32,470 +the value of A, such that U makes greater than or + +170 +00:17:32,470 --> 00:17:38,930 +equal to 99%. So A is located to the left side of + +171 +00:17:38,930 --> 00:17:40,330 +the curve, normal curve. + +172 +00:17:43,110 --> 00:17:45,350 +And again, the table gives the area to the left of + +173 +00:17:45,350 --> 00:17:52,620 +Z. So the area to the left is 1%. Now if you check + +174 +00:17:52,620 --> 00:17:57,900 +the z value corresponding to this one, 101, you + +175 +00:17:57,900 --> 00:18:00,700 +figure that z, the approximate answer is negative + +176 +00:18:00,700 --> 00:18:07,160 +2.33. Now just use this value, and plug it into + +177 +00:18:07,160 --> 00:18:11,700 +this equation, you will get this result. Yes. Is + +178 +00:18:11,700 --> 00:18:12,760 +it negative? + +179 +00:18:22,750 --> 00:18:32,910 +Last part, part E 95 + +180 +00:18:32,910 --> 00:18:46,170 +% 95 + +181 +00:18:46,170 --> 00:18:46,810 +% + +182 +00:18:50,400 --> 00:18:55,980 +after the load times are between what two values + +183 +00:18:55,980 --> 00:18:59,120 +approximately distributed around the mean. + +184 +00:19:04,960 --> 00:19:17,340 +So around 95 + +185 +00:19:17,340 --> 00:19:25,090 +% Of the download times, what two values + +186 +00:19:25,090 --> 00:19:31,170 +symmetrically distributed around the mean? the + +187 +00:19:31,170 --> 00:19:36,250 +area here for example between + +188 +00:19:36,250 --> 00:19:44,850 +E and B is 95% and he mentioned the proximity so + +189 +00:19:44,850 --> 00:19:48,790 +this E is the same as B but still we have negative + +190 +00:19:48,790 --> 00:19:56,730 +sign so we are looking we + +191 +00:19:56,730 --> 00:19:59,170 +are looking for the probability of P of X + +192 +00:20:05,550 --> 00:20:10,290 +between A and B equal 95% now by symmetric + +193 +00:20:10,290 --> 00:20:14,350 +distribution exactly + +194 +00:20:14,350 --> 00:20:19,430 +this value A is the same as B but with negative + +195 +00:20:19,430 --> 00:20:25,270 +sign now since the area between A and B is 95% and + +196 +00:20:25,270 --> 00:20:29,890 +we have symmetric distribution 5% left divided by + +197 +00:20:29,890 --> 00:20:38,150 +2 that means 2.5 to the left of A and 2.5% to the + +198 +00:20:38,150 --> 00:20:38,690 +right of B. + +199 +00:20:43,130 --> 00:20:50,050 +Now, what are the values of A and B? Now, if you + +200 +00:20:50,050 --> 00:20:55,970 +look at this value, B of X less than A equals 2 + +201 +00:20:55,970 --> 00:20:56,910 +.5%. + +202 +00:21:00,780 --> 00:21:05,560 +Be careful, it's 0, 2, 2, 5, 0, 2, 5. Now, what's + +203 +00:21:05,560 --> 00:21:08,440 +the value of A softer, B makes smaller than A by + +204 +00:21:08,440 --> 00:21:13,760 +this one. The same I just we did in bar D. So + +205 +00:21:13,760 --> 00:21:16,360 +that's A. Now, what's the Z score in this case? + +206 +00:21:19,160 --> 00:21:26,180 +If we go back to the normal table, now we are + +207 +00:21:26,180 --> 00:21:26,840 +looking for + +208 +00:21:32,840 --> 00:21:34,220 +Zero to five. + +209 +00:21:37,360 --> 00:21:42,540 +So minus one point nine six. + +210 +00:21:47,840 --> 00:21:54,830 +So Z equals minus one point nine six. Okay. So now + +211 +00:21:54,830 --> 00:22:01,930 +my A equal Mu plus D Sigma Mu + +212 +00:22:01,930 --> 00:22:10,610 +is given is 0.8 Sigma is minus 1.9 Times + +213 +00:22:10,610 --> 00:22:17,190 +the value of Sigma which is 0.2 Similarly + +214 +00:22:17,190 --> 00:22:26,370 +to get the value of A B of X is less than B equals + +215 +00:22:26,370 --> 00:22:33,410 +now the area to the left of B 95% plus 2.5 so + +216 +00:22:33,410 --> 00:22:50,790 +that's minus 7.5 again + +217 +00:22:55,010 --> 00:23:02,330 +b of x smaller than a is 2.5 percent now to get + +218 +00:23:02,330 --> 00:23:06,250 +the corresponding z value for 0 to 5 we have to + +219 +00:23:06,250 --> 00:23:11,730 +look at the normal table inside the normal table + +220 +00:23:11,730 --> 00:23:16,910 +we get from 0 to 5 corresponding to z score of + +221 +00:23:16,910 --> 00:23:23,030 +minus 1.96 so my z score is negative 1.56 now use + +222 +00:23:23,030 --> 00:23:29,910 +this value here, so mu equals 0.8 is the mean, + +223 +00:23:30,490 --> 00:23:38,570 +minus 1.96, the score times 6. The other part, to + +224 +00:23:38,570 --> 00:23:43,250 +get the value of B, the probability of X smaller + +225 +00:23:43,250 --> 00:23:49,150 +than B equals 95 plus 2.5 is 975. + +226 +00:23:51,990 --> 00:23:55,090 +by using the same way we'll get that z score is 1 + +227 +00:23:55,090 --> 00:23:58,550 +.96 as we mentioned before because these two + +228 +00:23:58,550 --> 00:24:02,830 +values here should be the z score the same so now + +229 +00:24:02,830 --> 00:24:09,690 +b equals mu plus 1 + +230 +00:24:09,690 --> 00:24:17,390 +.96 times sigma and that will give you a 1.408 + +231 +00:24:23,280 --> 00:24:28,640 +And B equals 1.1920. + +232 +00:24:29,260 --> 00:24:34,520 +So these are the two values which has 95% between + +233 +00:24:34,520 --> 00:24:41,620 +them. So 95% of the data, I mean 95% of the + +234 +00:24:41,620 --> 00:24:50,180 +download times are between 0.4 seconds and 1.19 + +235 +00:24:50,180 --> 00:24:56,490 +seconds. make sense that is again 95 percent of + +236 +00:24:56,490 --> 00:25:00,870 +the download times are between approximately 0.4 + +237 +00:25:00,870 --> 00:25:09,410 +seconds and around 1.2 so this value is 0.4 the + +238 +00:25:09,410 --> 00:25:13,590 +other one is approximately 1.2 so again 95 percent + +239 +00:25:13,590 --> 00:25:18,550 +of the download times are between 0.4 seconds + +240 +00:25:18,550 --> 00:25:22,470 +approximately and one minute. This problem maybe + +241 +00:25:22,470 --> 00:25:25,590 +is the most important one for this chapter. + +242 +00:25:26,790 --> 00:25:30,550 +Exactly in the exam you will see something like + +243 +00:25:30,550 --> 00:25:35,170 +that. Either for part A, B and C which are the + +244 +00:25:35,170 --> 00:25:40,490 +same and the backward normal calculations as part + +245 +00:25:40,490 --> 00:25:44,430 +D and E. Any question? + +246 +00:25:51,660 --> 00:25:59,600 +let's go solve true and false problems for the + +247 +00:25:59,600 --> 00:26:03,240 +practice in chat asses + +248 +00:26:20,840 --> 00:26:24,380 +The Z-score should be one positive and the other + +249 +00:26:24,380 --> 00:26:28,580 +is negative, not A and B. The corresponding Z + +250 +00:26:28,580 --> 00:26:34,940 +-score here should have the same values but + +251 +00:26:34,940 --> 00:26:37,020 +negative sign, not A and B. + +252 +00:26:40,120 --> 00:26:48,380 +now let's do some rex problems for chapter 6 now + +253 +00:26:48,380 --> 00:26:53,320 +just look at the minus sign the probability that + +254 +00:26:53,320 --> 00:26:57,220 +standard normal random variable C falls between + +255 +00:26:57,220 --> 00:27:04,620 +minus 1.5 and 0.81 so it's similar to this one but + +256 +00:27:04,620 --> 00:27:08,540 +this is straight forward this score between minus + +257 +00:27:08,540 --> 00:27:18,750 +1.5 up to 0.81 okay + +258 +00:27:18,750 --> 00:27:27,970 +so + +259 +00:27:27,970 --> 00:27:32,050 +number 23 again the probability that standard + +260 +00:27:32,050 --> 00:27:36,790 +normal random variable z fall between minus 1.5 + +261 +00:27:36,790 --> 00:27:41,910 +and 0.81 So it's going to be, we are looking for + +262 +00:27:41,910 --> 00:27:42,530 +this probability. + +263 +00:27:48,150 --> 00:27:53,250 +So it's z less than one point one minus. + +264 +00:27:57,670 --> 00:28:00,330 +Now just do it by yourself, you will figure that + +265 +00:28:00,330 --> 00:28:04,910 +the final answer is point seven four. + +266 +00:28:07,970 --> 00:28:10,730 +That's for 23. I think straightforward one. + +267 +00:28:14,490 --> 00:28:17,330 +Let's do one more, 25 for example. + +268 +00:28:20,690 --> 00:28:23,230 +The probability that standard normal random + +269 +00:28:23,230 --> 00:28:26,450 +variable is below 196. + +270 +00:28:28,390 --> 00:28:38,970 +See? Below 1.96. Now from the table, if we look at + +271 +00:28:38,970 --> 00:28:46,510 +the normal table 1.96 + +272 +00:28:46,510 --> 00:28:49,570 +now + +273 +00:28:49,570 --> 00:28:58,050 +the area below 1.6 96 975 so + +274 +00:28:58,050 --> 00:29:02,030 +it's here it mentioned that it's 0.4 so so this + +275 +00:29:02,030 --> 00:29:08,860 +one is false or Because the area to the left of 1 + +276 +00:29:08,860 --> 00:29:13,400 +.96 is not 0.475, it's equal to 975. + +277 +00:29:16,200 --> 00:29:21,000 +That's for 25. Let's do the odd numbers. + +278 +00:29:24,440 --> 00:29:27,360 +The probability that standard normal, the random + +279 +00:29:27,360 --> 00:29:30,780 +variable, falls between minus 2 and minus 0.44. + +280 +00:29:38,540 --> 00:29:42,760 +I'm sorry minus between minus two and negative + +281 +00:29:42,760 --> 00:29:46,980 +point four four so it's the same as z is smaller + +282 +00:29:46,980 --> 00:29:51,340 +than negative point four four minus z less than + +283 +00:29:51,340 --> 00:29:55,580 +minus two it + +284 +00:29:55,580 --> 00:30:00,520 +says the answer is point six four seven two the + +285 +00:30:00,520 --> 00:30:07,180 +exact answer is point + +286 +00:30:07,180 --> 00:30:07,500 +three + +287 +00:30:12,810 --> 00:30:17,890 +So that one is incorrect, + +288 +00:30:18,010 --> 00:30:22,750 +27 is incorrect. You may figure this one by using + +289 +00:30:22,750 --> 00:30:28,290 +the table or sometimes by Excel you can do this + +290 +00:30:28,290 --> 00:30:31,030 +problem. Let's do different one. + +291 +00:30:40,300 --> 00:30:43,360 +Look at 29, the odd number, 29. + +292 +00:30:46,200 --> 00:30:56,640 +29 says that a worker earns 15 dollars per hour at + +293 +00:30:56,640 --> 00:31:01,140 +planet earth and is told that only 2.5 percent of + +294 +00:31:01,140 --> 00:31:06,300 +all workers make a higher wage if the wage is + +295 +00:31:06,300 --> 00:31:09,880 +assumed to be normally distributed. And the + +296 +00:31:09,880 --> 00:31:15,360 +standard deviation of wage rates is five per hour. + +297 +00:31:16,460 --> 00:31:22,100 +So the standard deviation is five per hour, five + +298 +00:31:22,100 --> 00:31:25,780 +dollars per hour. The average wage for the plant + +299 +00:31:25,780 --> 00:31:27,000 +is 75. + +300 +00:31:30,620 --> 00:31:33,560 +Now again, go back to the problem. It says that a + +301 +00:31:33,560 --> 00:31:37,390 +worker earns 15 dollars per hour. And it's told + +302 +00:31:37,390 --> 00:31:41,990 +that only 2.5% of all workers make a higher wage. + +303 +00:31:43,150 --> 00:31:55,330 +So it's X more than $15 equal 2.5%. That means + +304 +00:31:55,330 --> 00:31:59,490 +zero to five. So let's see if this one is true or + +305 +00:31:59,490 --> 00:32:05,820 +false. So again, this man, earns $15 per hour at a + +306 +00:32:05,820 --> 00:32:12,060 +plant. And he's told that only 2.5% of all workers + +307 +00:32:12,060 --> 00:32:18,120 +make higher wage, means greater than the one he + +308 +00:32:18,120 --> 00:32:21,460 +just got, which is $15. So people who make greater + +309 +00:32:21,460 --> 00:32:27,140 +than 15, they claim it's 2.5%. So let's see if + +310 +00:32:27,140 --> 00:32:32,230 +that percentage is true or false. So in this case, + +311 +00:32:32,290 --> 00:32:38,070 +we have to convert to this score. So it becomes B + +312 +00:32:38,070 --> 00:32:43,850 +of z greater than 15 minus the mean divided by + +313 +00:32:43,850 --> 00:32:51,110 +sigma. So B of z greater than 7.5 divided by 5 is + +314 +00:32:51,110 --> 00:32:53,850 +1.5. + +315 +00:33:00,750 --> 00:33:05,850 +Make sense? 1.5. So now B of Z greater than 1.5. 1 + +316 +00:33:05,850 --> 00:33:11,170 +minus B of Z is less than 1.5. Go back to the + +317 +00:33:11,170 --> 00:33:23,870 +table. 1.5 is? Look at the table. 9332. 9332. So + +318 +00:33:23,870 --> 00:33:27,430 +the answer should be 668. That means + +319 +00:33:31,130 --> 00:33:41,750 +6.68% of the workers have higher wage. Not 2.5%. + +320 +00:33:41,750 --> 00:33:44,770 +So that means this is incorrect. So that's false. + +321 +00:33:49,470 --> 00:33:53,830 +So the answer is false for this problem. + +322 +00:33:57,310 --> 00:33:58,530 +Look at 31. + +323 +00:34:01,550 --> 00:34:04,970 +Do you have any question for this one, for 29? In + +324 +00:34:04,970 --> 00:34:12,150 +29, the claim is 2.5% of all workers have higher + +325 +00:34:12,150 --> 00:34:17,930 +wage than $15. Let's see if this claim is true or + +326 +00:34:17,930 --> 00:34:22,190 +false. So the problem again is B probability of X + +327 +00:34:22,190 --> 00:34:26,630 +greater than 15 equals 2.5%. We figure out that + +328 +00:34:26,630 --> 00:34:33,250 +the answer is 6.68%. So the claim is false. Now, + +329 +00:34:33,530 --> 00:34:41,430 +31. Any set, any set of normality, oh I'm sorry, + +330 +00:34:41,490 --> 00:34:44,630 +any set of normally distributed data can be + +331 +00:34:44,630 --> 00:34:48,430 +transformed to its standardized form. True or + +332 +00:34:48,430 --> 00:34:51,190 +false? 31. + +333 +00:34:54,170 --> 00:34:59,870 +It says that any set of normally distributed can + +334 +00:34:59,870 --> 00:35:00,730 +be transformed. + +335 +00:35:06,630 --> 00:35:10,950 +Let's see 32. The middle spread. + +336 +00:35:16,660 --> 00:35:21,020 +That is the middle 50% of the normal distribution + +337 +00:35:21,020 --> 00:35:27,000 +is equal to one standard deviation. That's true. + +338 +00:35:27,240 --> 00:35:33,860 +It's incorrect. Because we mentioned that 68% of + +339 +00:35:33,860 --> 00:35:38,040 +the data are within one standard deviation above + +340 +00:35:38,040 --> 00:35:42,300 +the mean. Within the mean. So it's false. + +341 +00:35:46,160 --> 00:35:51,740 +Instead of 50, we have to say that 68%. The + +342 +00:35:51,740 --> 00:35:57,500 +empirical rule. 33. The normal probability plot, + +343 +00:35:58,600 --> 00:36:02,060 +the one we just discussed last time, may be used + +344 +00:36:02,060 --> 00:36:06,640 +to assess the assumption of normality for a + +345 +00:36:06,640 --> 00:36:09,560 +particular batch of data. As we mentioned before, + +346 +00:36:10,240 --> 00:36:15,220 +One of the rules that we can use to determine if + +347 +00:36:15,220 --> 00:36:17,940 +the data is normally distributed or not is called + +348 +00:36:17,940 --> 00:36:20,940 +the normal probability plot. So it's true. So + +349 +00:36:20,940 --> 00:36:26,000 +again, normal probability plot is used to assess + +350 +00:36:26,000 --> 00:36:31,120 +the assumption of normality for a data. + +351 +00:36:34,300 --> 00:36:41,410 +Let's see, for example, 35. The probability that a + +352 +00:36:41,410 --> 00:36:46,050 +standard normal variable z is positive is, + +353 +00:36:47,290 --> 00:36:52,910 +the probability that a standard normal variable z + +354 +00:36:52,910 --> 00:36:58,770 +is positive is, now if you, this is a table, z + +355 +00:36:58,770 --> 00:37:02,070 +cubed. It's one. + +356 +00:37:06,800 --> 00:37:12,660 +Again, the probability that + +357 +00:37:12,660 --> 00:37:16,960 +a standardized normal variable Z is positive is + +358 +00:37:16,960 --> 00:37:17,580 +the probability. + +359 +00:37:26,340 --> 00:37:28,820 +Let's do one more. + +360 +00:37:51,430 --> 00:37:54,970 +These problems are the same, some of these. + +361 +00:37:57,890 --> 00:38:07,490 +Now look at 6 and 7. Suppose Z has a standard + +362 +00:38:07,490 --> 00:38:11,750 +normal distribution with a mean of zero and + +363 +00:38:11,750 --> 00:38:15,450 +standard relation of one. And the probability Z is + +364 +00:38:15,450 --> 00:38:20,170 +less than 1.25. Straight forward, just go back to + +365 +00:38:20,170 --> 00:38:25,930 +the table. Z less than 1.15, 1.15, + +366 +00:38:30,610 --> 00:38:43,210 +08, 8749. So that's correct. 8749 is correct. Any + +367 +00:38:43,210 --> 00:38:47,570 +question? So let's move. + +368 +00:38:50,870 --> 00:38:55,930 +Chapter seven. So that's for the practice for + +369 +00:38:55,930 --> 00:38:56,590 +chapter seven. + +370 +00:38:59,430 --> 00:39:05,770 +Chapter seven talks about actually two things. One + +371 +00:39:05,770 --> 00:39:11,930 +is called sampling and the other topic is sampling + +372 +00:39:11,930 --> 00:39:16,490 +distributions. Mainly there are four learning + +373 +00:39:16,490 --> 00:39:23,090 +objectives for this chapter. Number one, we have + +374 +00:39:23,090 --> 00:39:25,210 +to distinguish between different sampling + +375 +00:39:25,210 --> 00:39:29,850 +techniques or methods. We'll talk about + +376 +00:39:29,850 --> 00:39:33,050 +probability and non-probability sampling, and we + +377 +00:39:33,050 --> 00:39:35,900 +have to distinguish between these two. The other + +378 +00:39:35,900 --> 00:39:40,200 +objective for this chapter will be the concept of + +379 +00:39:40,200 --> 00:39:44,640 +semantic distribution. Now, instead of using just + +380 +00:39:44,640 --> 00:39:49,120 +X, as we did in chapter three, what's the + +381 +00:39:49,120 --> 00:39:51,380 +probability of X, for example, less than 15? + +382 +00:39:52,320 --> 00:39:57,240 +Instead of that, we'll talk about not X, the + +383 +00:39:57,240 --> 00:40:00,180 +statistic itself, maybe X bar or the sample mean. + +384 +00:40:00,960 --> 00:40:04,120 +Number three is to compute probabilities related + +385 +00:40:04,120 --> 00:40:08,600 +to the sample mean, not the exact value of X. So + +386 +00:40:08,600 --> 00:40:12,340 +here we are looking for V of X bar smaller than 1 + +387 +00:40:12,340 --> 00:40:16,360 +.5. So in this case, to compute these + +388 +00:40:16,360 --> 00:40:21,320 +probabilities, we have to know that all the + +389 +00:40:23,350 --> 00:40:26,110 +concepts in chapter three should be understood. + +390 +00:40:26,530 --> 00:40:31,050 +Otherwise, you cannot do any problems here because + +391 +00:40:31,050 --> 00:40:34,050 +here will depend actually how can we find the + +392 +00:40:34,050 --> 00:40:36,870 +probabilities underneath the normal table. But + +393 +00:40:36,870 --> 00:40:40,430 +instead of X, in this case, we have the sample + +394 +00:40:40,430 --> 00:40:43,630 +mean X bar. So that's the difference between + +395 +00:40:43,630 --> 00:40:48,860 +chapter six and the next chapter. So here, In + +396 +00:40:48,860 --> 00:40:51,900 +chapter six, we are talking about the sampling + +397 +00:40:51,900 --> 00:40:56,220 +distribution of the sample mean. Also, there is + +398 +00:40:56,220 --> 00:41:00,120 +something new, which is called sample proportion. + +399 +00:41:00,680 --> 00:41:02,460 +Now, let's see the difference between these two. + +400 +00:41:03,980 --> 00:41:08,960 +If you remember, at the first class, we said there + +401 +00:41:08,960 --> 00:41:13,160 +are two types of data. One is called numerical, + +402 +00:41:13,380 --> 00:41:14,020 +which is quantitative. + +403 +00:41:16,790 --> 00:41:18,090 +And the other one is qualitative. + +404 +00:41:21,870 --> 00:41:24,530 +So we have numerical data, which means + +405 +00:41:24,530 --> 00:41:31,890 +quantitative data, and qualitative data. Now, for + +406 +00:41:31,890 --> 00:41:35,230 +numerical data, we can use the sample mean as a + +407 +00:41:35,230 --> 00:41:41,830 +measure of central tendency, x bar. But for + +408 +00:41:41,830 --> 00:41:45,390 +qualitative data, for example, if we have gender, + +409 +00:41:47,300 --> 00:41:51,980 +Gender either males or females. In this case, we + +410 +00:41:51,980 --> 00:41:56,860 +cannot say that the mean of females in this school + +411 +00:41:56,860 --> 00:42:02,200 +is 1.2, for example. It doesn't make any sense. + +412 +00:42:02,940 --> 00:42:08,280 +But it's better to say that IUG has, for example, + +413 +00:42:08,680 --> 00:42:13,220 +70% females. Makes sense. So this one is called + +414 +00:42:13,220 --> 00:42:16,500 +percentage or proportion. So here we will talk + +415 +00:42:16,500 --> 00:42:23,120 +about the sample proportion. So sample mean is + +416 +00:42:23,120 --> 00:42:27,700 +used for numerical data. But on the other hand, + +417 +00:42:28,480 --> 00:42:31,020 +the sample proportion is used for qualitative + +418 +00:42:31,020 --> 00:42:34,440 +data. In this chapter, we are going to know how + +419 +00:42:34,440 --> 00:42:39,200 +can we compute the probabilities related either to + +420 +00:42:39,200 --> 00:42:42,880 +the sample mean or to the sample proportion. The + +421 +00:42:42,880 --> 00:42:46,500 +last one, the last objective for this chapter will + +422 +00:42:46,500 --> 00:42:52,060 +be the importance of using something called + +423 +00:42:52,060 --> 00:42:56,120 +central limit theorem. Under specific conditions, + +424 +00:42:56,280 --> 00:42:59,980 +we'll know how to use this theorem. So these are + +425 +00:42:59,980 --> 00:43:04,400 +the mainly four objectives for this chapter. So + +426 +00:43:04,400 --> 00:43:10,300 +you have to make sure that you understand 100% how + +427 +00:43:10,300 --> 00:43:14,700 +to compute all types of probabilities under the + +428 +00:43:14,700 --> 00:43:18,610 +normal curve. either straightforward calculations + +429 +00:43:18,610 --> 00:43:21,030 +or backward calculations, I mean inverse + +430 +00:43:21,030 --> 00:43:23,570 +calculation. If the probability is given, how can + +431 +00:43:23,570 --> 00:43:27,590 +we find the corresponding X value? Or how can we + +432 +00:43:27,590 --> 00:43:30,930 +find the value by itself, the value of the + +433 +00:43:30,930 --> 00:43:33,930 +probability by itself? So next time, Insha'Allah, + +434 +00:43:34,350 --> 00:43:41,590 +Sunday, we are going to start Chapter seven, why + +435 +00:43:41,590 --> 00:43:45,490 +sample? Why most of the time in our researches we + +436 +00:43:45,490 --> 00:43:49,630 +are using the sample instead of using the + +437 +00:43:49,630 --> 00:43:53,790 +population? Any questions? So that's all. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/ge8DRGM5-04_raw.json b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/ge8DRGM5-04_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..af7236e07c720e6d0dfa390697f3fea749136a03 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/ge8DRGM5-04_raw.json @@ -0,0 +1 @@ +{"segments": [{"id": 1, "seek": 2251, "start": 5.43, "end": 22.51, "text": " We are going to do some practice problems for chapter 6. First problem will be 6.36 on page 226. So open page 226.", "tokens": [492, 366, 516, 281, 360, 512, 3124, 2740, 337, 7187, 1386, 13, 2386, 1154, 486, 312, 1386, 13, 11309, 322, 3028, 5853, 21, 13, 407, 1269, 3028, 5853, 21, 13], "avg_logprob": -0.19002016032895735, "compression_ratio": 1.1979166666666667, "no_speech_prob": 0.0, "words": [{"start": 5.43, "end": 5.43, "word": " We", "probability": 0.380126953125}, {"start": 5.43, "end": 5.55, "word": " are", "probability": 0.74169921875}, {"start": 5.55, "end": 5.85, "word": " going", "probability": 0.9462890625}, {"start": 5.85, "end": 6.05, "word": " to", "probability": 0.9697265625}, {"start": 6.05, "end": 6.31, "word": " do", "probability": 0.890625}, {"start": 6.31, "end": 6.63, "word": " some", "probability": 0.90380859375}, {"start": 6.63, "end": 7.29, "word": " practice", "probability": 0.9208984375}, {"start": 7.29, "end": 8.15, "word": " problems", "probability": 0.84619140625}, {"start": 8.15, "end": 8.47, "word": " for", "probability": 0.90380859375}, {"start": 8.47, "end": 8.79, "word": " chapter", "probability": 0.58056640625}, {"start": 8.79, "end": 9.29, "word": " 6.", "probability": 0.74267578125}, {"start": 10.23, "end": 11.05, "word": " First", "probability": 0.7353515625}, {"start": 11.05, "end": 11.77, "word": " problem", "probability": 0.85693359375}, {"start": 11.77, "end": 11.97, "word": " will", "probability": 0.875}, {"start": 11.97, "end": 12.67, "word": " be", "probability": 0.953125}, {"start": 12.67, "end": 13.69, "word": " 6", "probability": 0.82568359375}, {"start": 13.69, "end": 14.77, "word": ".36", "probability": 0.962646484375}, {"start": 14.77, "end": 15.23, "word": " on", "probability": 0.841796875}, {"start": 15.23, "end": 15.55, "word": " page", "probability": 0.9375}, {"start": 15.55, "end": 16.89, "word": " 226.", "probability": 0.93896484375}, {"start": 19.93, "end": 20.89, "word": " So", "probability": 0.8486328125}, {"start": 20.89, "end": 21.33, "word": " open", "probability": 0.59765625}, {"start": 21.33, "end": 21.63, "word": " page", "probability": 0.91748046875}, {"start": 21.63, "end": 22.51, "word": " 226.", "probability": 0.947265625}], "temperature": 1.0}, {"id": 2, "seek": 5204, "start": 26.04, "end": 52.04, "text": " Insha'Allah today we'll discuss some problems from chapter 6 and one of these problems will be 6.36 so let's read 36 it it's asked about suppose that the download time is normally distributed with mean", "tokens": [9442, 1641, 6, 26022, 965, 321, 603, 2248, 512, 2740, 490, 7187, 1386, 293, 472, 295, 613, 2740, 486, 312, 1386, 13, 11309, 370, 718, 311, 1401, 8652, 309, 309, 311, 2351, 466, 7297, 300, 264, 5484, 565, 307, 5646, 12631, 365, 914], "avg_logprob": -0.2785866531458768, "compression_ratio": 1.364864864864865, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 26.04, "end": 27.04, "word": " Insha", "probability": 0.63916015625}, {"start": 27.04, "end": 27.1, "word": "'Allah", "probability": 0.594970703125}, {"start": 27.1, "end": 27.38, "word": " today", "probability": 0.52734375}, {"start": 27.38, "end": 27.7, "word": " we'll", "probability": 0.5985107421875}, {"start": 27.7, "end": 28.26, "word": " discuss", "probability": 0.89892578125}, {"start": 28.26, "end": 29.02, "word": " some", "probability": 0.90869140625}, {"start": 29.02, "end": 29.76, "word": " problems", "probability": 0.81884765625}, {"start": 29.76, "end": 30.02, "word": " from", "probability": 0.87939453125}, {"start": 30.02, "end": 30.3, "word": " chapter", "probability": 0.646484375}, {"start": 30.3, "end": 30.78, "word": " 6", "probability": 0.61279296875}, {"start": 30.78, "end": 32.08, "word": " and", "probability": 0.29833984375}, {"start": 32.08, "end": 32.32, "word": " one", "probability": 0.90869140625}, {"start": 32.32, "end": 32.48, "word": " of", "probability": 0.966796875}, {"start": 32.48, "end": 32.7, "word": " these", "probability": 0.84814453125}, {"start": 32.7, "end": 33.2, "word": " problems", "probability": 0.841796875}, {"start": 33.2, "end": 33.42, "word": " will", "probability": 0.87255859375}, {"start": 33.42, "end": 33.76, "word": " be", "probability": 0.94970703125}, {"start": 33.76, "end": 34.4, "word": " 6", "probability": 0.91748046875}, {"start": 34.4, "end": 35.2, "word": ".36", "probability": 0.9296875}, {"start": 35.2, "end": 37.16, "word": " so", "probability": 0.33935546875}, {"start": 37.16, "end": 37.82, "word": " let's", "probability": 0.952392578125}, {"start": 37.82, "end": 38.48, "word": " read", "probability": 0.97705078125}, {"start": 38.48, "end": 39.68, "word": " 36", "probability": 0.92431640625}, {"start": 39.68, "end": 41.42, "word": " it", "probability": 0.71728515625}, {"start": 41.42, "end": 42.3, "word": " it's", "probability": 0.6695556640625}, {"start": 42.3, "end": 42.66, "word": " asked", "probability": 0.490234375}, {"start": 42.66, "end": 43.26, "word": " about", "probability": 0.9169921875}, {"start": 43.26, "end": 45.96, "word": " suppose", "probability": 0.7890625}, {"start": 45.96, "end": 46.42, "word": " that", "probability": 0.94287109375}, {"start": 46.42, "end": 46.76, "word": " the", "probability": 0.92041015625}, {"start": 46.76, "end": 47.14, "word": " download", "probability": 0.82568359375}, {"start": 47.14, "end": 47.66, "word": " time", "probability": 0.8896484375}, {"start": 47.66, "end": 48.78, "word": " is", "probability": 0.9501953125}, {"start": 48.78, "end": 49.16, "word": " normally", "probability": 0.90869140625}, {"start": 49.16, "end": 49.82, "word": " distributed", "probability": 0.91552734375}, {"start": 49.82, "end": 51.6, "word": " with", "probability": 0.91064453125}, {"start": 51.6, "end": 52.04, "word": " mean", "probability": 0.92724609375}], "temperature": 1.0}, {"id": 3, "seek": 7806, "start": 53.46, "end": 78.06, "text": " and the mean is given and the standard deviation is given in this case the mean equals 0.8 second so again the mean is 0.8 where sigma equals 0.2 seconds so mu is 0.8 seconds that the time required to download from the internet an image file or something like that", "tokens": [293, 264, 914, 307, 2212, 293, 264, 3832, 25163, 307, 2212, 294, 341, 1389, 264, 914, 6915, 1958, 13, 23, 1150, 370, 797, 264, 914, 307, 1958, 13, 23, 689, 12771, 6915, 1958, 13, 17, 3949, 370, 2992, 307, 1958, 13, 23, 3949, 300, 264, 565, 4739, 281, 5484, 490, 264, 4705, 364, 3256, 3991, 420, 746, 411, 300], "avg_logprob": -0.18203125397364298, "compression_ratio": 1.743421052631579, "no_speech_prob": 0.0, "words": [{"start": 53.46, "end": 53.7, "word": " and", "probability": 0.1966552734375}, {"start": 53.7, "end": 53.82, "word": " the", "probability": 0.86181640625}, {"start": 53.82, "end": 53.92, "word": " mean", "probability": 0.92333984375}, {"start": 53.92, "end": 54.06, "word": " is", "probability": 0.92919921875}, {"start": 54.06, "end": 54.34, "word": " given", "probability": 0.884765625}, {"start": 54.34, "end": 55.0, "word": " and", "probability": 0.69140625}, {"start": 55.0, "end": 55.2, "word": " the", "probability": 0.650390625}, {"start": 55.2, "end": 55.38, "word": " standard", "probability": 0.86767578125}, {"start": 55.38, "end": 55.72, "word": " deviation", "probability": 0.89404296875}, {"start": 55.72, "end": 55.92, "word": " is", "probability": 0.94677734375}, {"start": 55.92, "end": 56.14, "word": " given", "probability": 0.8876953125}, {"start": 56.14, "end": 57.14, "word": " in", "probability": 0.4541015625}, {"start": 57.14, "end": 57.38, "word": " this", "probability": 0.9482421875}, {"start": 57.38, "end": 57.66, "word": " case", "probability": 0.91455078125}, {"start": 57.66, "end": 57.88, "word": " the", "probability": 0.802734375}, {"start": 57.88, "end": 58.12, "word": " mean", "probability": 0.96435546875}, {"start": 58.12, "end": 58.72, "word": " equals", "probability": 0.84765625}, {"start": 58.72, "end": 58.94, "word": " 0", "probability": 0.64111328125}, {"start": 58.94, "end": 59.26, "word": ".8", "probability": 0.98095703125}, {"start": 59.26, "end": 59.7, "word": " second", "probability": 0.465087890625}, {"start": 59.7, "end": 61.5, "word": " so", "probability": 0.67529296875}, {"start": 61.5, "end": 61.72, "word": " again", "probability": 0.93115234375}, {"start": 61.72, "end": 61.92, "word": " the", "probability": 0.88671875}, {"start": 61.92, "end": 62.08, "word": " mean", "probability": 0.95849609375}, {"start": 62.08, "end": 62.24, "word": " is", "probability": 0.93798828125}, {"start": 62.24, "end": 62.62, "word": " 0", "probability": 0.8935546875}, {"start": 62.62, "end": 63.08, "word": ".8", "probability": 0.9970703125}, {"start": 63.08, "end": 65.88, "word": " where", "probability": 0.58154296875}, {"start": 65.88, "end": 66.26, "word": " sigma", "probability": 0.75146484375}, {"start": 66.26, "end": 66.92, "word": " equals", "probability": 0.90576171875}, {"start": 66.92, "end": 67.34, "word": " 0", "probability": 0.9287109375}, {"start": 67.34, "end": 67.56, "word": ".2", "probability": 0.99609375}, {"start": 67.56, "end": 68.06, "word": " seconds", "probability": 0.77587890625}, {"start": 68.06, "end": 70.62, "word": " so", "probability": 0.79345703125}, {"start": 70.62, "end": 70.88, "word": " mu", "probability": 0.6953125}, {"start": 70.88, "end": 71.14, "word": " is", "probability": 0.93505859375}, {"start": 71.14, "end": 71.34, "word": " 0", "probability": 0.92919921875}, {"start": 71.34, "end": 71.58, "word": ".8", "probability": 0.99609375}, {"start": 71.58, "end": 71.96, "word": " seconds", "probability": 0.78515625}, {"start": 71.96, "end": 72.18, "word": " that", "probability": 0.84326171875}, {"start": 72.18, "end": 72.34, "word": " the", "probability": 0.6298828125}, {"start": 72.34, "end": 72.6, "word": " time", "probability": 0.88818359375}, {"start": 72.6, "end": 73.0, "word": " required", "probability": 0.806640625}, {"start": 73.0, "end": 73.24, "word": " to", "probability": 0.9677734375}, {"start": 73.24, "end": 73.78, "word": " download", "probability": 0.96044921875}, {"start": 73.78, "end": 75.46, "word": " from", "probability": 0.8330078125}, {"start": 75.46, "end": 75.64, "word": " the", "probability": 0.91748046875}, {"start": 75.64, "end": 76.0, "word": " internet", "probability": 0.90087890625}, {"start": 76.0, "end": 76.56, "word": " an", "probability": 0.82373046875}, {"start": 76.56, "end": 76.84, "word": " image", "probability": 0.9404296875}, {"start": 76.84, "end": 77.14, "word": " file", "probability": 0.916015625}, {"start": 77.14, "end": 77.3, "word": " or", "probability": 0.958984375}, {"start": 77.3, "end": 77.56, "word": " something", "probability": 0.86572265625}, {"start": 77.56, "end": 77.78, "word": " like", "probability": 0.93798828125}, {"start": 77.78, "end": 78.06, "word": " that", "probability": 0.93798828125}], "temperature": 1.0}, {"id": 4, "seek": 10652, "start": 79.16, "end": 106.52, "text": " Now he asks about what's the probability that a download time is part A less than one second. So what's the probability of getting time less than one second? Part B, what's the probability that a download time is between 0.5 and 1.5 seconds?", "tokens": [823, 415, 8962, 466, 437, 311, 264, 8482, 300, 257, 5484, 565, 307, 644, 316, 1570, 813, 472, 1150, 13, 407, 437, 311, 264, 8482, 295, 1242, 565, 1570, 813, 472, 1150, 30, 4100, 363, 11, 437, 311, 264, 8482, 300, 257, 5484, 565, 307, 1296, 1958, 13, 20, 293, 502, 13, 20, 3949, 30], "avg_logprob": -0.19573102306042398, "compression_ratio": 1.8195488721804511, "no_speech_prob": 0.0, "words": [{"start": 79.16, "end": 79.56, "word": " Now", "probability": 0.810546875}, {"start": 79.56, "end": 79.82, "word": " he", "probability": 0.517578125}, {"start": 79.82, "end": 80.02, "word": " asks", "probability": 0.325927734375}, {"start": 80.02, "end": 80.4, "word": " about", "probability": 0.87744140625}, {"start": 80.4, "end": 80.94, "word": " what's", "probability": 0.82177734375}, {"start": 80.94, "end": 81.08, "word": " the", "probability": 0.90771484375}, {"start": 81.08, "end": 81.42, "word": " probability", "probability": 0.93603515625}, {"start": 81.42, "end": 81.8, "word": " that", "probability": 0.91748046875}, {"start": 81.8, "end": 81.96, "word": " a", "probability": 0.77099609375}, {"start": 81.96, "end": 82.2, "word": " download", "probability": 0.8974609375}, {"start": 82.2, "end": 82.56, "word": " time", "probability": 0.88671875}, {"start": 82.56, "end": 82.94, "word": " is", "probability": 0.94189453125}, {"start": 82.94, "end": 83.34, "word": " part", "probability": 0.71923828125}, {"start": 83.34, "end": 83.68, "word": " A", "probability": 0.66357421875}, {"start": 83.68, "end": 87.9, "word": " less", "probability": 0.50927734375}, {"start": 87.9, "end": 88.14, "word": " than", "probability": 0.93994140625}, {"start": 88.14, "end": 88.34, "word": " one", "probability": 0.62451171875}, {"start": 88.34, "end": 88.7, "word": " second.", "probability": 0.90478515625}, {"start": 89.68, "end": 90.44, "word": " So", "probability": 0.90673828125}, {"start": 90.44, "end": 90.68, "word": " what's", "probability": 0.89990234375}, {"start": 90.68, "end": 90.76, "word": " the", "probability": 0.8740234375}, {"start": 90.76, "end": 91.1, "word": " probability", "probability": 0.95166015625}, {"start": 91.1, "end": 91.3, "word": " of", "probability": 0.962890625}, {"start": 91.3, "end": 91.72, "word": " getting", "probability": 0.96240234375}, {"start": 91.72, "end": 93.0, "word": " time", "probability": 0.84130859375}, {"start": 93.0, "end": 94.4, "word": " less", "probability": 0.92138671875}, {"start": 94.4, "end": 94.8, "word": " than", "probability": 0.94384765625}, {"start": 94.8, "end": 95.46, "word": " one", "probability": 0.912109375}, {"start": 95.46, "end": 95.84, "word": " second?", "probability": 0.91259765625}, {"start": 96.74, "end": 97.08, "word": " Part", "probability": 0.78662109375}, {"start": 97.08, "end": 97.38, "word": " B,", "probability": 0.9794921875}, {"start": 99.0, "end": 99.36, "word": " what's", "probability": 0.96044921875}, {"start": 99.36, "end": 99.5, "word": " the", "probability": 0.921875}, {"start": 99.5, "end": 99.94, "word": " probability", "probability": 0.94140625}, {"start": 99.94, "end": 101.52, "word": " that", "probability": 0.93603515625}, {"start": 101.52, "end": 101.76, "word": " a", "probability": 0.95263671875}, {"start": 101.76, "end": 101.98, "word": " download", "probability": 0.95654296875}, {"start": 101.98, "end": 102.34, "word": " time", "probability": 0.8876953125}, {"start": 102.34, "end": 102.54, "word": " is", "probability": 0.9462890625}, {"start": 102.54, "end": 103.06, "word": " between", "probability": 0.8642578125}, {"start": 103.06, "end": 104.16, "word": " 0", "probability": 0.53662109375}, {"start": 104.16, "end": 104.66, "word": ".5", "probability": 0.997314453125}, {"start": 104.66, "end": 105.24, "word": " and", "probability": 0.92431640625}, {"start": 105.24, "end": 105.52, "word": " 1", "probability": 0.98828125}, {"start": 105.52, "end": 106.04, "word": ".5", "probability": 0.996826171875}, {"start": 106.04, "end": 106.52, "word": " seconds?", "probability": 0.78662109375}], "temperature": 1.0}, {"id": 5, "seek": 13079, "start": 111.81, "end": 130.79, "text": " and 1.5 seconds but see above 0.5 seconds last time i think we did some something like that so for the first one b of x smaller than one", "tokens": [293, 502, 13, 20, 3949, 457, 536, 3673, 1958, 13, 20, 3949, 1036, 565, 741, 519, 321, 630, 512, 746, 411, 300, 370, 337, 264, 700, 472, 272, 295, 2031, 4356, 813, 472], "avg_logprob": -0.2874540520064971, "compression_ratio": 1.3300970873786409, "no_speech_prob": 0.0, "words": [{"start": 111.81, "end": 112.11, "word": " and", "probability": 0.258056640625}, {"start": 112.11, "end": 112.29, "word": " 1", "probability": 0.8671875}, {"start": 112.29, "end": 112.73, "word": ".5", "probability": 0.9892578125}, {"start": 112.73, "end": 113.15, "word": " seconds", "probability": 0.76708984375}, {"start": 113.15, "end": 113.75, "word": " but", "probability": 0.2120361328125}, {"start": 113.75, "end": 114.05, "word": " see", "probability": 0.6337890625}, {"start": 114.05, "end": 115.29, "word": " above", "probability": 0.9599609375}, {"start": 115.29, "end": 115.59, "word": " 0", "probability": 0.66357421875}, {"start": 115.59, "end": 115.87, "word": ".5", "probability": 0.994873046875}, {"start": 115.87, "end": 116.39, "word": " seconds", "probability": 0.66357421875}, {"start": 116.39, "end": 122.51, "word": " last", "probability": 0.5205078125}, {"start": 122.51, "end": 122.73, "word": " time", "probability": 0.88916015625}, {"start": 122.73, "end": 122.85, "word": " i", "probability": 0.478271484375}, {"start": 122.85, "end": 123.05, "word": " think", "probability": 0.9267578125}, {"start": 123.05, "end": 123.27, "word": " we", "probability": 0.962890625}, {"start": 123.27, "end": 123.69, "word": " did", "probability": 0.95361328125}, {"start": 123.69, "end": 124.09, "word": " some", "probability": 0.8017578125}, {"start": 124.09, "end": 127.23, "word": " something", "probability": 0.81787109375}, {"start": 127.23, "end": 127.53, "word": " like", "probability": 0.94482421875}, {"start": 127.53, "end": 127.79, "word": " that", "probability": 0.9345703125}, {"start": 127.79, "end": 128.53, "word": " so", "probability": 0.88525390625}, {"start": 128.53, "end": 128.79, "word": " for", "probability": 0.95849609375}, {"start": 128.79, "end": 128.95, "word": " the", "probability": 0.931640625}, {"start": 128.95, "end": 129.19, "word": " first", "probability": 0.75}, {"start": 129.19, "end": 129.41, "word": " one", "probability": 0.91650390625}, {"start": 129.41, "end": 129.55, "word": " b", "probability": 0.241943359375}, {"start": 129.55, "end": 129.69, "word": " of", "probability": 0.83984375}, {"start": 129.69, "end": 129.93, "word": " x", "probability": 0.9814453125}, {"start": 129.93, "end": 130.31, "word": " smaller", "probability": 0.6943359375}, {"start": 130.31, "end": 130.55, "word": " than", "probability": 0.94873046875}, {"start": 130.55, "end": 130.79, "word": " one", "probability": 0.73583984375}], "temperature": 1.0}, {"id": 6, "seek": 15924, "start": 131.62, "end": 159.24, "text": " The first step, we have to compute the z-score. And that's straightforward. Just z less than 1 minus 0.8 divided by sigma. So 1 minus 0.8 is 0.2. So B of z less than 1. Now by using the normal table, we have", "tokens": [440, 700, 1823, 11, 321, 362, 281, 14722, 264, 710, 12, 4417, 418, 13, 400, 300, 311, 15325, 13, 1449, 710, 1570, 813, 502, 3175, 1958, 13, 23, 6666, 538, 12771, 13, 407, 502, 3175, 1958, 13, 23, 307, 1958, 13, 17, 13, 407, 363, 295, 710, 1570, 813, 502, 13, 823, 538, 1228, 264, 2710, 3199, 11, 321, 362], "avg_logprob": -0.2238729498425468, "compression_ratio": 1.3866666666666667, "no_speech_prob": 0.0, "words": [{"start": 131.62, "end": 131.86, "word": " The", "probability": 0.52734375}, {"start": 131.86, "end": 132.14, "word": " first", "probability": 0.8681640625}, {"start": 132.14, "end": 132.36, "word": " step,", "probability": 0.91748046875}, {"start": 132.5, "end": 132.5, "word": " we", "probability": 0.939453125}, {"start": 132.5, "end": 132.7, "word": " have", "probability": 0.9208984375}, {"start": 132.7, "end": 132.82, "word": " to", "probability": 0.96923828125}, {"start": 132.82, "end": 133.08, "word": " compute", "probability": 0.48046875}, {"start": 133.08, "end": 133.26, "word": " the", "probability": 0.849609375}, {"start": 133.26, "end": 133.4, "word": " z", "probability": 0.69677734375}, {"start": 133.4, "end": 133.84, "word": "-score.", "probability": 0.8123372395833334}, {"start": 135.82, "end": 136.1, "word": " And", "probability": 0.62255859375}, {"start": 136.1, "end": 136.38, "word": " that's", "probability": 0.888916015625}, {"start": 136.38, "end": 136.94, "word": " straightforward.", "probability": 0.53955078125}, {"start": 137.26, "end": 137.56, "word": " Just", "probability": 0.841796875}, {"start": 137.56, "end": 138.24, "word": " z", "probability": 0.88916015625}, {"start": 138.24, "end": 139.08, "word": " less", "probability": 0.77734375}, {"start": 139.08, "end": 139.32, "word": " than", "probability": 0.9482421875}, {"start": 139.32, "end": 139.68, "word": " 1", "probability": 0.49560546875}, {"start": 139.68, "end": 141.38, "word": " minus", "probability": 0.89599609375}, {"start": 141.38, "end": 141.68, "word": " 0", "probability": 0.78173828125}, {"start": 141.68, "end": 142.1, "word": ".8", "probability": 0.995849609375}, {"start": 142.1, "end": 144.16, "word": " divided", "probability": 0.611328125}, {"start": 144.16, "end": 144.38, "word": " by", "probability": 0.9755859375}, {"start": 144.38, "end": 144.66, "word": " sigma.", "probability": 0.8056640625}, {"start": 147.16, "end": 147.72, "word": " So", "probability": 0.9287109375}, {"start": 147.72, "end": 147.92, "word": " 1", "probability": 0.78125}, {"start": 147.92, "end": 148.22, "word": " minus", "probability": 0.98193359375}, {"start": 148.22, "end": 148.52, "word": " 0", "probability": 0.9833984375}, {"start": 148.52, "end": 148.72, "word": ".8", "probability": 0.998291015625}, {"start": 148.72, "end": 148.94, "word": " is", "probability": 0.93017578125}, {"start": 148.94, "end": 149.26, "word": " 0", "probability": 0.9736328125}, {"start": 149.26, "end": 149.7, "word": ".2.", "probability": 0.995361328125}, {"start": 150.36, "end": 150.88, "word": " So", "probability": 0.94287109375}, {"start": 150.88, "end": 151.02, "word": " B", "probability": 0.34765625}, {"start": 151.02, "end": 151.18, "word": " of", "probability": 0.888671875}, {"start": 151.18, "end": 151.44, "word": " z", "probability": 0.8681640625}, {"start": 151.44, "end": 152.88, "word": " less", "probability": 0.77490234375}, {"start": 152.88, "end": 153.08, "word": " than", "probability": 0.93994140625}, {"start": 153.08, "end": 153.48, "word": " 1.", "probability": 0.541015625}, {"start": 156.62, "end": 157.18, "word": " Now", "probability": 0.96044921875}, {"start": 157.18, "end": 157.36, "word": " by", "probability": 0.53857421875}, {"start": 157.36, "end": 157.64, "word": " using", "probability": 0.931640625}, {"start": 157.64, "end": 157.82, "word": " the", "probability": 0.916015625}, {"start": 157.82, "end": 158.08, "word": " normal", "probability": 0.87841796875}, {"start": 158.08, "end": 158.44, "word": " table,", "probability": 0.8837890625}, {"start": 158.72, "end": 158.92, "word": " we", "probability": 0.9580078125}, {"start": 158.92, "end": 159.24, "word": " have", "probability": 0.94580078125}], "temperature": 1.0}, {"id": 7, "seek": 18601, "start": 160.63, "end": 186.01, "text": " The normal table, I think we did many times for this table. Now for 1. So 1 is 0.8413. So it's 0.8413. This is the probability that a download time is smaller than 1.", "tokens": [440, 2710, 3199, 11, 286, 519, 321, 630, 867, 1413, 337, 341, 3199, 13, 823, 337, 502, 13, 407, 502, 307, 1958, 13, 25494, 7668, 13, 407, 309, 311, 1958, 13, 25494, 7668, 13, 639, 307, 264, 8482, 300, 257, 5484, 565, 307, 4356, 813, 502, 13], "avg_logprob": -0.19938150762269893, "compression_ratio": 1.31496062992126, "no_speech_prob": 0.0, "words": [{"start": 160.63, "end": 160.87, "word": " The", "probability": 0.55615234375}, {"start": 160.87, "end": 161.15, "word": " normal", "probability": 0.865234375}, {"start": 161.15, "end": 161.37, "word": " table,", "probability": 0.85009765625}, {"start": 161.49, "end": 161.51, "word": " I", "probability": 0.94873046875}, {"start": 161.51, "end": 161.69, "word": " think", "probability": 0.9150390625}, {"start": 161.69, "end": 161.89, "word": " we", "probability": 0.818359375}, {"start": 161.89, "end": 162.17, "word": " did", "probability": 0.9306640625}, {"start": 162.17, "end": 162.47, "word": " many", "probability": 0.8056640625}, {"start": 162.47, "end": 162.79, "word": " times", "probability": 0.9287109375}, {"start": 162.79, "end": 163.01, "word": " for", "probability": 0.93798828125}, {"start": 163.01, "end": 163.35, "word": " this", "probability": 0.94873046875}, {"start": 163.35, "end": 164.99, "word": " table.", "probability": 0.75732421875}, {"start": 166.93, "end": 167.55, "word": " Now", "probability": 0.9140625}, {"start": 167.55, "end": 167.81, "word": " for", "probability": 0.77197265625}, {"start": 167.81, "end": 168.11, "word": " 1.", "probability": 0.6005859375}, {"start": 172.29, "end": 172.91, "word": " So", "probability": 0.9228515625}, {"start": 172.91, "end": 173.19, "word": " 1", "probability": 0.69384765625}, {"start": 173.19, "end": 173.37, "word": " is", "probability": 0.783203125}, {"start": 173.37, "end": 173.59, "word": " 0", "probability": 0.6708984375}, {"start": 173.59, "end": 174.67, "word": ".8413.", "probability": 0.8216959635416666}, {"start": 175.83, "end": 176.15, "word": " So", "probability": 0.55615234375}, {"start": 176.15, "end": 176.47, "word": " it's", "probability": 0.880859375}, {"start": 176.47, "end": 177.99, "word": " 0", "probability": 0.91748046875}, {"start": 177.99, "end": 179.99, "word": ".8413.", "probability": 0.9222005208333334}, {"start": 180.47, "end": 180.75, "word": " This", "probability": 0.78564453125}, {"start": 180.75, "end": 180.89, "word": " is", "probability": 0.9462890625}, {"start": 180.89, "end": 181.01, "word": " the", "probability": 0.70556640625}, {"start": 181.01, "end": 181.43, "word": " probability", "probability": 0.9541015625}, {"start": 181.43, "end": 182.21, "word": " that", "probability": 0.9404296875}, {"start": 182.21, "end": 183.27, "word": " a", "probability": 0.90283203125}, {"start": 183.27, "end": 183.59, "word": " download", "probability": 0.93994140625}, {"start": 183.59, "end": 184.11, "word": " time", "probability": 0.88037109375}, {"start": 184.11, "end": 185.17, "word": " is", "probability": 0.9501953125}, {"start": 185.17, "end": 185.55, "word": " smaller", "probability": 0.87255859375}, {"start": 185.55, "end": 185.85, "word": " than", "probability": 0.94384765625}, {"start": 185.85, "end": 186.01, "word": " 1.", "probability": 0.81689453125}], "temperature": 1.0}, {"id": 8, "seek": 21395, "start": 194.23, "end": 213.95, "text": " Now the probability is between two values, 0.5 and 1.5. So in this case, we have to find the z values for the corresponding x, 0.5 and 1.5. So in this case, this one equals.", "tokens": [823, 264, 8482, 307, 1296, 732, 4190, 11, 1958, 13, 20, 293, 502, 13, 20, 13, 407, 294, 341, 1389, 11, 321, 362, 281, 915, 264, 710, 4190, 337, 264, 11760, 2031, 11, 1958, 13, 20, 293, 502, 13, 20, 13, 407, 294, 341, 1389, 11, 341, 472, 6915, 13], "avg_logprob": -0.16268382177633398, "compression_ratio": 1.45, "no_speech_prob": 0.0, "words": [{"start": 194.23, "end": 194.97, "word": " Now", "probability": 0.242431640625}, {"start": 194.97, "end": 195.71, "word": " the", "probability": 0.7099609375}, {"start": 195.71, "end": 196.75, "word": " probability", "probability": 0.43505859375}, {"start": 196.75, "end": 197.03, "word": " is", "probability": 0.8369140625}, {"start": 197.03, "end": 197.29, "word": " between", "probability": 0.89404296875}, {"start": 197.29, "end": 197.49, "word": " two", "probability": 0.85302734375}, {"start": 197.49, "end": 197.97, "word": " values,", "probability": 0.96337890625}, {"start": 198.55, "end": 198.97, "word": " 0", "probability": 0.67333984375}, {"start": 198.97, "end": 199.43, "word": ".5", "probability": 0.994140625}, {"start": 199.43, "end": 199.69, "word": " and", "probability": 0.939453125}, {"start": 199.69, "end": 199.93, "word": " 1", "probability": 0.99267578125}, {"start": 199.93, "end": 200.43, "word": ".5.", "probability": 0.99755859375}, {"start": 201.05, "end": 201.41, "word": " So", "probability": 0.8984375}, {"start": 201.41, "end": 201.55, "word": " in", "probability": 0.75146484375}, {"start": 201.55, "end": 201.77, "word": " this", "probability": 0.94921875}, {"start": 201.77, "end": 202.07, "word": " case,", "probability": 0.91845703125}, {"start": 202.15, "end": 202.25, "word": " we", "probability": 0.9560546875}, {"start": 202.25, "end": 202.51, "word": " have", "probability": 0.94921875}, {"start": 202.51, "end": 202.81, "word": " to", "probability": 0.970703125}, {"start": 202.81, "end": 203.59, "word": " find", "probability": 0.89404296875}, {"start": 203.59, "end": 204.93, "word": " the", "probability": 0.904296875}, {"start": 204.93, "end": 205.13, "word": " z", "probability": 0.64892578125}, {"start": 205.13, "end": 205.57, "word": " values", "probability": 0.787109375}, {"start": 205.57, "end": 205.81, "word": " for", "probability": 0.9482421875}, {"start": 205.81, "end": 205.99, "word": " the", "probability": 0.91259765625}, {"start": 205.99, "end": 206.45, "word": " corresponding", "probability": 0.85107421875}, {"start": 206.45, "end": 207.73, "word": " x,", "probability": 0.94873046875}, {"start": 208.19, "end": 208.69, "word": " 0", "probability": 0.97412109375}, {"start": 208.69, "end": 209.17, "word": ".5", "probability": 0.9990234375}, {"start": 209.17, "end": 209.63, "word": " and", "probability": 0.8583984375}, {"start": 209.63, "end": 210.61, "word": " 1", "probability": 0.99609375}, {"start": 210.61, "end": 211.15, "word": ".5.", "probability": 0.9990234375}, {"start": 211.53, "end": 211.85, "word": " So", "probability": 0.953125}, {"start": 211.85, "end": 212.01, "word": " in", "probability": 0.82470703125}, {"start": 212.01, "end": 212.17, "word": " this", "probability": 0.9462890625}, {"start": 212.17, "end": 212.51, "word": " case,", "probability": 0.91748046875}, {"start": 212.97, "end": 213.27, "word": " this", "probability": 0.9296875}, {"start": 213.27, "end": 213.49, "word": " one", "probability": 0.84912109375}, {"start": 213.49, "end": 213.95, "word": " equals.", "probability": 0.9404296875}], "temperature": 1.0}, {"id": 9, "seek": 22742, "start": 215.04, "end": 227.42, "text": " 0.5 minus 0.8 divided by sigma smaller than x is transformed to this form.", "tokens": [1958, 13, 20, 3175, 1958, 13, 23, 6666, 538, 12771, 4356, 813, 2031, 307, 16894, 281, 341, 1254, 13], "avg_logprob": -0.37949219793081285, "compression_ratio": 1.0135135135135136, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 215.04, "end": 215.42, "word": " 0", "probability": 0.1046142578125}, {"start": 215.42, "end": 215.94, "word": ".5", "probability": 0.959228515625}, {"start": 215.94, "end": 216.86, "word": " minus", "probability": 0.634765625}, {"start": 216.86, "end": 217.98, "word": " 0", "probability": 0.927734375}, {"start": 217.98, "end": 218.34, "word": ".8", "probability": 0.994384765625}, {"start": 218.34, "end": 218.72, "word": " divided", "probability": 0.708984375}, {"start": 218.72, "end": 219.08, "word": " by", "probability": 0.9794921875}, {"start": 219.08, "end": 219.86, "word": " sigma", "probability": 0.62353515625}, {"start": 219.86, "end": 222.58, "word": " smaller", "probability": 0.31640625}, {"start": 222.58, "end": 222.94, "word": " than", "probability": 0.9130859375}, {"start": 222.94, "end": 224.98, "word": " x", "probability": 0.4130859375}, {"start": 224.98, "end": 225.82, "word": " is", "probability": 0.6513671875}, {"start": 225.82, "end": 226.56, "word": " transformed", "probability": 0.8154296875}, {"start": 226.56, "end": 226.82, "word": " to", "probability": 0.849609375}, {"start": 226.82, "end": 227.02, "word": " this", "probability": 0.90771484375}, {"start": 227.02, "end": 227.42, "word": " form.", "probability": 0.66943359375}], "temperature": 1.0}, {"id": 10, "seek": 25564, "start": 232.92, "end": 255.64, "text": " Smaller than 1.5 minus 0.8 divided by 0.2. Exactly, minus 1.5. Smaller than z, smaller than 1.5 minus 0.8 is 0.7, divided by 0.2 is 3.4.", "tokens": [15287, 260, 813, 502, 13, 20, 3175, 1958, 13, 23, 6666, 538, 1958, 13, 17, 13, 7587, 11, 3175, 502, 13, 20, 13, 15287, 260, 813, 710, 11, 4356, 813, 502, 13, 20, 3175, 1958, 13, 23, 307, 1958, 13, 22, 11, 6666, 538, 1958, 13, 17, 307, 805, 13, 19, 13], "avg_logprob": -0.2609080053725333, "compression_ratio": 1.5930232558139534, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 232.92, "end": 233.6, "word": " Smaller", "probability": 0.55902099609375}, {"start": 233.6, "end": 233.78, "word": " than", "probability": 0.91357421875}, {"start": 233.78, "end": 234.0, "word": " 1", "probability": 0.826171875}, {"start": 234.0, "end": 234.52, "word": ".5", "probability": 0.978759765625}, {"start": 234.52, "end": 234.96, "word": " minus", "probability": 0.92626953125}, {"start": 234.96, "end": 235.24, "word": " 0", "probability": 0.6865234375}, {"start": 235.24, "end": 235.5, "word": ".8", "probability": 0.986328125}, {"start": 235.5, "end": 235.86, "word": " divided", "probability": 0.71875}, {"start": 235.86, "end": 236.08, "word": " by", "probability": 0.966796875}, {"start": 236.08, "end": 236.38, "word": " 0", "probability": 0.43798828125}, {"start": 236.38, "end": 237.48, "word": ".2.", "probability": 0.6859130859375}, {"start": 243.5, "end": 244.18, "word": " Exactly,", "probability": 0.2288818359375}, {"start": 244.46, "end": 244.68, "word": " minus", "probability": 0.92138671875}, {"start": 244.68, "end": 244.92, "word": " 1", "probability": 0.97314453125}, {"start": 244.92, "end": 245.48, "word": ".5.", "probability": 0.996337890625}, {"start": 246.48, "end": 247.16, "word": " Smaller", "probability": 0.901123046875}, {"start": 247.16, "end": 247.4, "word": " than", "probability": 0.93798828125}, {"start": 247.4, "end": 247.7, "word": " z,", "probability": 0.494140625}, {"start": 248.04, "end": 248.5, "word": " smaller", "probability": 0.82666015625}, {"start": 248.5, "end": 248.98, "word": " than", "probability": 0.93994140625}, {"start": 248.98, "end": 250.32, "word": " 1", "probability": 0.7255859375}, {"start": 250.32, "end": 250.88, "word": ".5", "probability": 0.997802734375}, {"start": 250.88, "end": 251.16, "word": " minus", "probability": 0.98388671875}, {"start": 251.16, "end": 251.44, "word": " 0", "probability": 0.978515625}, {"start": 251.44, "end": 251.82, "word": ".8", "probability": 0.998291015625}, {"start": 251.82, "end": 252.32, "word": " is", "probability": 0.67529296875}, {"start": 252.32, "end": 252.48, "word": " 0", "probability": 0.9638671875}, {"start": 252.48, "end": 252.9, "word": ".7,", "probability": 0.998046875}, {"start": 253.64, "end": 254.18, "word": " divided", "probability": 0.80712890625}, {"start": 254.18, "end": 254.36, "word": " by", "probability": 0.97509765625}, {"start": 254.36, "end": 254.6, "word": " 0", "probability": 0.98486328125}, {"start": 254.6, "end": 254.84, "word": ".2", "probability": 0.998046875}, {"start": 254.84, "end": 255.04, "word": " is", "probability": 0.9052734375}, {"start": 255.04, "end": 255.3, "word": " 3", "probability": 0.93212890625}, {"start": 255.3, "end": 255.64, "word": ".4.", "probability": 0.708984375}], "temperature": 1.0}, {"id": 11, "seek": 28820, "start": 259.32, "end": 288.2, "text": " So, we are looking now for the probability of z-score between minus 0.5 and smaller than 0.5. Now, if we are looking for this kind of probability, we have to find the probability of z smaller than 3.5 minus z smaller than negative 1.5. Now, if we go back", "tokens": [407, 11, 321, 366, 1237, 586, 337, 264, 8482, 295, 710, 12, 4417, 418, 1296, 3175, 1958, 13, 20, 293, 4356, 813, 1958, 13, 20, 13, 823, 11, 498, 321, 366, 1237, 337, 341, 733, 295, 8482, 11, 321, 362, 281, 915, 264, 8482, 295, 710, 4356, 813, 805, 13, 20, 3175, 710, 4356, 813, 3671, 502, 13, 20, 13, 823, 11, 498, 321, 352, 646], "avg_logprob": -0.1831856321043043, "compression_ratio": 1.7832167832167831, "no_speech_prob": 0.0, "words": [{"start": 259.32, "end": 259.78, "word": " So,", "probability": 0.59375}, {"start": 260.06, "end": 260.12, "word": " we", "probability": 0.90869140625}, {"start": 260.12, "end": 260.26, "word": " are", "probability": 0.8720703125}, {"start": 260.26, "end": 260.56, "word": " looking", "probability": 0.88134765625}, {"start": 260.56, "end": 260.94, "word": " now", "probability": 0.84326171875}, {"start": 260.94, "end": 262.5, "word": " for", "probability": 0.72802734375}, {"start": 262.5, "end": 262.76, "word": " the", "probability": 0.8720703125}, {"start": 262.76, "end": 263.2, "word": " probability", "probability": 0.8935546875}, {"start": 263.2, "end": 263.5, "word": " of", "probability": 0.9501953125}, {"start": 263.5, "end": 263.64, "word": " z", "probability": 0.420166015625}, {"start": 263.64, "end": 264.12, "word": "-score", "probability": 0.773681640625}, {"start": 264.12, "end": 265.06, "word": " between", "probability": 0.861328125}, {"start": 265.06, "end": 266.06, "word": " minus", "probability": 0.41796875}, {"start": 266.06, "end": 266.28, "word": " 0", "probability": 0.58740234375}, {"start": 266.28, "end": 266.76, "word": ".5", "probability": 0.99169921875}, {"start": 266.76, "end": 267.7, "word": " and", "probability": 0.9013671875}, {"start": 267.7, "end": 268.02, "word": " smaller", "probability": 0.8076171875}, {"start": 268.02, "end": 268.24, "word": " than", "probability": 0.9248046875}, {"start": 268.24, "end": 268.44, "word": " 0", "probability": 0.9228515625}, {"start": 268.44, "end": 268.74, "word": ".5.", "probability": 0.998046875}, {"start": 269.46, "end": 269.74, "word": " Now,", "probability": 0.880859375}, {"start": 270.04, "end": 270.86, "word": " if", "probability": 0.93798828125}, {"start": 270.86, "end": 271.0, "word": " we", "probability": 0.8916015625}, {"start": 271.0, "end": 271.1, "word": " are", "probability": 0.89599609375}, {"start": 271.1, "end": 271.36, "word": " looking", "probability": 0.908203125}, {"start": 271.36, "end": 271.6, "word": " for", "probability": 0.9462890625}, {"start": 271.6, "end": 271.86, "word": " this", "probability": 0.94091796875}, {"start": 271.86, "end": 272.6, "word": " kind", "probability": 0.8701171875}, {"start": 272.6, "end": 272.78, "word": " of", "probability": 0.9697265625}, {"start": 272.78, "end": 273.2, "word": " probability,", "probability": 0.9296875}, {"start": 273.92, "end": 274.28, "word": " we", "probability": 0.935546875}, {"start": 274.28, "end": 274.44, "word": " have", "probability": 0.93701171875}, {"start": 274.44, "end": 274.56, "word": " to", "probability": 0.96875}, {"start": 274.56, "end": 274.76, "word": " find", "probability": 0.89013671875}, {"start": 274.76, "end": 274.9, "word": " the", "probability": 0.892578125}, {"start": 274.9, "end": 275.22, "word": " probability", "probability": 0.93310546875}, {"start": 275.22, "end": 275.5, "word": " of", "probability": 0.95458984375}, {"start": 275.5, "end": 275.76, "word": " z", "probability": 0.89599609375}, {"start": 275.76, "end": 278.06, "word": " smaller", "probability": 0.4462890625}, {"start": 278.06, "end": 278.38, "word": " than", "probability": 0.94189453125}, {"start": 278.38, "end": 278.62, "word": " 3", "probability": 0.880859375}, {"start": 278.62, "end": 279.14, "word": ".5", "probability": 0.99609375}, {"start": 279.14, "end": 280.32, "word": " minus", "probability": 0.93017578125}, {"start": 280.32, "end": 280.86, "word": " z", "probability": 0.7841796875}, {"start": 280.86, "end": 281.92, "word": " smaller", "probability": 0.2939453125}, {"start": 281.92, "end": 282.34, "word": " than", "probability": 0.94140625}, {"start": 282.34, "end": 283.1, "word": " negative", "probability": 0.78759765625}, {"start": 283.1, "end": 283.64, "word": " 1", "probability": 0.97412109375}, {"start": 283.64, "end": 284.14, "word": ".5.", "probability": 0.998779296875}, {"start": 286.06, "end": 286.7, "word": " Now,", "probability": 0.94287109375}, {"start": 286.8, "end": 286.96, "word": " if", "probability": 0.91845703125}, {"start": 286.96, "end": 287.34, "word": " we", "probability": 0.9541015625}, {"start": 287.34, "end": 287.86, "word": " go", "probability": 0.95947265625}, {"start": 287.86, "end": 288.2, "word": " back", "probability": 0.87255859375}], "temperature": 1.0}, {"id": 12, "seek": 31636, "start": 289.02, "end": 316.36, "text": " to the table we have. Now 3.5 all the way down up to the end of this round. The table I have, the maximum value of Z is 3.4 all the way up to 9. So that means I have only B of Z", "tokens": [281, 264, 3199, 321, 362, 13, 823, 805, 13, 20, 439, 264, 636, 760, 493, 281, 264, 917, 295, 341, 3098, 13, 440, 3199, 286, 362, 11, 264, 6674, 2158, 295, 1176, 307, 805, 13, 19, 439, 264, 636, 493, 281, 1722, 13, 407, 300, 1355, 286, 362, 787, 363, 295, 1176], "avg_logprob": -0.14512086701842974, "compression_ratio": 1.4015748031496063, "no_speech_prob": 0.0, "words": [{"start": 289.02, "end": 289.24, "word": " to", "probability": 0.65576171875}, {"start": 289.24, "end": 289.38, "word": " the", "probability": 0.919921875}, {"start": 289.38, "end": 289.68, "word": " table", "probability": 0.8662109375}, {"start": 289.68, "end": 289.9, "word": " we", "probability": 0.87890625}, {"start": 289.9, "end": 290.22, "word": " have.", "probability": 0.93798828125}, {"start": 291.2, "end": 291.48, "word": " Now", "probability": 0.83935546875}, {"start": 291.48, "end": 291.72, "word": " 3", "probability": 0.67529296875}, {"start": 291.72, "end": 292.38, "word": ".5", "probability": 0.99169921875}, {"start": 292.38, "end": 294.48, "word": " all", "probability": 0.75439453125}, {"start": 294.48, "end": 294.64, "word": " the", "probability": 0.9189453125}, {"start": 294.64, "end": 294.78, "word": " way", "probability": 0.9560546875}, {"start": 294.78, "end": 295.06, "word": " down", "probability": 0.8408203125}, {"start": 295.06, "end": 295.28, "word": " up", "probability": 0.748046875}, {"start": 295.28, "end": 295.36, "word": " to", "probability": 0.958984375}, {"start": 295.36, "end": 295.48, "word": " the", "probability": 0.91162109375}, {"start": 295.48, "end": 295.64, "word": " end", "probability": 0.8955078125}, {"start": 295.64, "end": 295.76, "word": " of", "probability": 0.96533203125}, {"start": 295.76, "end": 296.06, "word": " this", "probability": 0.9404296875}, {"start": 296.06, "end": 296.64, "word": " round.", "probability": 0.77197265625}, {"start": 298.3, "end": 298.84, "word": " The", "probability": 0.80615234375}, {"start": 298.84, "end": 299.12, "word": " table", "probability": 0.90380859375}, {"start": 299.12, "end": 299.32, "word": " I", "probability": 0.98095703125}, {"start": 299.32, "end": 299.64, "word": " have,", "probability": 0.9521484375}, {"start": 301.36, "end": 301.78, "word": " the", "probability": 0.908203125}, {"start": 301.78, "end": 302.2, "word": " maximum", "probability": 0.9208984375}, {"start": 302.2, "end": 302.64, "word": " value", "probability": 0.978515625}, {"start": 302.64, "end": 302.84, "word": " of", "probability": 0.96142578125}, {"start": 302.84, "end": 303.02, "word": " Z", "probability": 0.54052734375}, {"start": 303.02, "end": 304.4, "word": " is", "probability": 0.88916015625}, {"start": 304.4, "end": 304.68, "word": " 3", "probability": 0.93408203125}, {"start": 304.68, "end": 305.26, "word": ".4", "probability": 0.997802734375}, {"start": 305.26, "end": 306.9, "word": " all", "probability": 0.8310546875}, {"start": 306.9, "end": 307.08, "word": " the", "probability": 0.91943359375}, {"start": 307.08, "end": 307.22, "word": " way", "probability": 0.95458984375}, {"start": 307.22, "end": 307.4, "word": " up", "probability": 0.96044921875}, {"start": 307.4, "end": 307.54, "word": " to", "probability": 0.96630859375}, {"start": 307.54, "end": 307.82, "word": " 9.", "probability": 0.81591796875}, {"start": 309.22, "end": 309.52, "word": " So", "probability": 0.939453125}, {"start": 309.52, "end": 309.78, "word": " that", "probability": 0.83349609375}, {"start": 309.78, "end": 310.18, "word": " means", "probability": 0.9326171875}, {"start": 310.18, "end": 311.18, "word": " I", "probability": 0.91796875}, {"start": 311.18, "end": 311.64, "word": " have", "probability": 0.92724609375}, {"start": 311.64, "end": 313.28, "word": " only", "probability": 0.93310546875}, {"start": 313.28, "end": 315.9, "word": " B", "probability": 0.81787109375}, {"start": 315.9, "end": 316.08, "word": " of", "probability": 0.73193359375}, {"start": 316.08, "end": 316.36, "word": " Z", "probability": 0.9912109375}], "temperature": 1.0}, {"id": 13, "seek": 34227, "start": 320.93, "end": 342.27, "text": " less than this value. And the corresponding area is 9998. But for this specific example, we are looking for V of Z smaller than 3.5, which is roughly equal to 3.49. So the answer is around this value.", "tokens": [1570, 813, 341, 2158, 13, 400, 264, 11760, 1859, 307, 1722, 8494, 23, 13, 583, 337, 341, 2685, 1365, 11, 321, 366, 1237, 337, 691, 295, 1176, 4356, 813, 805, 13, 20, 11, 597, 307, 9810, 2681, 281, 805, 13, 14938, 13, 407, 264, 1867, 307, 926, 341, 2158, 13], "avg_logprob": -0.21599265290241615, "compression_ratio": 1.3311258278145695, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 320.93, "end": 321.33, "word": " less", "probability": 0.188720703125}, {"start": 321.33, "end": 321.61, "word": " than", "probability": 0.90185546875}, {"start": 321.61, "end": 321.85, "word": " this", "probability": 0.921875}, {"start": 321.85, "end": 322.19, "word": " value.", "probability": 0.96044921875}, {"start": 323.85, "end": 324.19, "word": " And", "probability": 0.62060546875}, {"start": 324.19, "end": 324.67, "word": " the", "probability": 0.78662109375}, {"start": 324.67, "end": 326.17, "word": " corresponding", "probability": 0.66748046875}, {"start": 326.17, "end": 326.59, "word": " area", "probability": 0.90966796875}, {"start": 326.59, "end": 327.25, "word": " is", "probability": 0.94873046875}, {"start": 327.25, "end": 328.87, "word": " 9998.", "probability": 0.76953125}, {"start": 331.91, "end": 332.31, "word": " But", "probability": 0.8857421875}, {"start": 332.31, "end": 332.49, "word": " for", "probability": 0.90380859375}, {"start": 332.49, "end": 332.69, "word": " this", "probability": 0.947265625}, {"start": 332.69, "end": 333.11, "word": " specific", "probability": 0.873046875}, {"start": 333.11, "end": 333.53, "word": " example,", "probability": 0.97119140625}, {"start": 333.63, "end": 333.71, "word": " we", "probability": 0.94775390625}, {"start": 333.71, "end": 333.83, "word": " are", "probability": 0.92333984375}, {"start": 333.83, "end": 334.07, "word": " looking", "probability": 0.91064453125}, {"start": 334.07, "end": 334.43, "word": " for", "probability": 0.9541015625}, {"start": 334.43, "end": 334.67, "word": " V", "probability": 0.369873046875}, {"start": 334.67, "end": 334.81, "word": " of", "probability": 0.88134765625}, {"start": 334.81, "end": 335.01, "word": " Z", "probability": 0.7958984375}, {"start": 335.01, "end": 335.55, "word": " smaller", "probability": 0.65625}, {"start": 335.55, "end": 335.85, "word": " than", "probability": 0.9462890625}, {"start": 335.85, "end": 336.09, "word": " 3", "probability": 0.97119140625}, {"start": 336.09, "end": 336.59, "word": ".5,", "probability": 0.989501953125}, {"start": 337.07, "end": 337.29, "word": " which", "probability": 0.916015625}, {"start": 337.29, "end": 337.41, "word": " is", "probability": 0.939453125}, {"start": 337.41, "end": 337.69, "word": " roughly", "probability": 0.859375}, {"start": 337.69, "end": 337.99, "word": " equal", "probability": 0.841796875}, {"start": 337.99, "end": 338.09, "word": " to", "probability": 0.92919921875}, {"start": 338.09, "end": 338.27, "word": " 3", "probability": 0.9921875}, {"start": 338.27, "end": 338.65, "word": ".49.", "probability": 0.9912109375}, {"start": 339.21, "end": 339.75, "word": " So", "probability": 0.93212890625}, {"start": 339.75, "end": 339.91, "word": " the", "probability": 0.77197265625}, {"start": 339.91, "end": 340.17, "word": " answer", "probability": 0.94921875}, {"start": 340.17, "end": 340.39, "word": " is", "probability": 0.94677734375}, {"start": 340.39, "end": 340.85, "word": " around", "probability": 0.95068359375}, {"start": 340.85, "end": 341.91, "word": " this", "probability": 0.86181640625}, {"start": 341.91, "end": 342.27, "word": " value.", "probability": 0.974609375}], "temperature": 1.0}, {"id": 14, "seek": 36103, "start": 345.33, "end": 361.03, "text": " 9998 approximately 9998 excuse me 9998 this one minus", "tokens": [1722, 8494, 23, 10447, 1722, 8494, 23, 8960, 385, 1722, 8494, 23, 341, 472, 3175], "avg_logprob": -0.4509277269244194, "compression_ratio": 1.0188679245283019, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 345.33, "end": 346.57, "word": " 9998", "probability": 0.627197265625}, {"start": 346.57, "end": 347.11, "word": " approximately", "probability": 0.404541015625}, {"start": 347.11, "end": 348.25, "word": " 9998", "probability": 0.85986328125}, {"start": 348.25, "end": 350.13, "word": " excuse", "probability": 0.2354736328125}, {"start": 350.13, "end": 351.85, "word": " me", "probability": 0.95166015625}, {"start": 351.85, "end": 355.95, "word": " 9998", "probability": 0.8468424479166666}, {"start": 355.95, "end": 356.45, "word": " this", "probability": 0.35791015625}, {"start": 356.45, "end": 356.71, "word": " one", "probability": 0.41162109375}, {"start": 356.71, "end": 361.03, "word": " minus", "probability": 0.85791015625}], "temperature": 1.0}, {"id": 15, "seek": 39034, "start": 387.0, "end": 390.34, "text": " Again, we are looking for minus 1.5", "tokens": [3764, 11, 321, 366, 1237, 337, 3175, 502, 13, 20], "avg_logprob": -0.30379972674629907, "compression_ratio": 0.8181818181818182, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 387.0, "end": 387.72, "word": " Again,", "probability": 0.1578369140625}, {"start": 387.72, "end": 388.44, "word": " we", "probability": 0.931640625}, {"start": 388.44, "end": 388.56, "word": " are", "probability": 0.8798828125}, {"start": 388.56, "end": 388.78, "word": " looking", "probability": 0.91943359375}, {"start": 388.78, "end": 389.1, "word": " for", "probability": 0.94873046875}, {"start": 389.1, "end": 389.46, "word": " minus", "probability": 0.442138671875}, {"start": 389.46, "end": 389.72, "word": " 1", "probability": 0.7783203125}, {"start": 389.72, "end": 390.34, "word": ".5", "probability": 0.984375}], "temperature": 1.0}, {"id": 16, "seek": 40423, "start": 391.25, "end": 404.23, "text": " up to 3.5 this area now the dashed area which is between minus 1.5 all the way up to 3.5 equals the area to the left", "tokens": [493, 281, 805, 13, 20, 341, 1859, 586, 264, 8240, 292, 1859, 597, 307, 1296, 3175, 502, 13, 20, 439, 264, 636, 493, 281, 805, 13, 20, 6915, 264, 1859, 281, 264, 1411], "avg_logprob": -0.14901195466518402, "compression_ratio": 1.2857142857142858, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 391.25, "end": 391.57, "word": " up", "probability": 0.388916015625}, {"start": 391.57, "end": 391.71, "word": " to", "probability": 0.888671875}, {"start": 391.71, "end": 391.89, "word": " 3", "probability": 0.85498046875}, {"start": 391.89, "end": 392.37, "word": ".5", "probability": 0.98388671875}, {"start": 392.37, "end": 392.67, "word": " this", "probability": 0.44140625}, {"start": 392.67, "end": 393.03, "word": " area", "probability": 0.8994140625}, {"start": 393.03, "end": 395.49, "word": " now", "probability": 0.463623046875}, {"start": 395.49, "end": 396.33, "word": " the", "probability": 0.87939453125}, {"start": 396.33, "end": 396.77, "word": " dashed", "probability": 0.704345703125}, {"start": 396.77, "end": 397.15, "word": " area", "probability": 0.89306640625}, {"start": 397.15, "end": 399.09, "word": " which", "probability": 0.9375}, {"start": 399.09, "end": 399.23, "word": " is", "probability": 0.95556640625}, {"start": 399.23, "end": 399.51, "word": " between", "probability": 0.91357421875}, {"start": 399.51, "end": 399.81, "word": " minus", "probability": 0.83203125}, {"start": 399.81, "end": 400.03, "word": " 1", "probability": 0.7021484375}, {"start": 400.03, "end": 400.51, "word": ".5", "probability": 0.991455078125}, {"start": 400.51, "end": 400.89, "word": " all", "probability": 0.94970703125}, {"start": 400.89, "end": 401.01, "word": " the", "probability": 0.92041015625}, {"start": 401.01, "end": 401.13, "word": " way", "probability": 0.94580078125}, {"start": 401.13, "end": 401.23, "word": " up", "probability": 0.9404296875}, {"start": 401.23, "end": 401.33, "word": " to", "probability": 0.9560546875}, {"start": 401.33, "end": 401.53, "word": " 3", "probability": 0.970703125}, {"start": 401.53, "end": 402.09, "word": ".5", "probability": 0.997802734375}, {"start": 402.09, "end": 403.17, "word": " equals", "probability": 0.9560546875}, {"start": 403.17, "end": 403.39, "word": " the", "probability": 0.9150390625}, {"start": 403.39, "end": 403.67, "word": " area", "probability": 0.896484375}, {"start": 403.67, "end": 403.87, "word": " to", "probability": 0.96728515625}, {"start": 403.87, "end": 404.01, "word": " the", "probability": 0.919921875}, {"start": 404.01, "end": 404.23, "word": " left", "probability": 0.95263671875}], "temperature": 1.0}, {"id": 17, "seek": 42463, "start": 405.81, "end": 424.63, "text": " of 3.5 which is B of Z less than 3.5 minus the area to the left of negative 1 minus Z greater than 1.5 this value if we are to compute the probability of Z", "tokens": [295, 805, 13, 20, 597, 307, 363, 295, 1176, 1570, 813, 805, 13, 20, 3175, 264, 1859, 281, 264, 1411, 295, 3671, 502, 3175, 1176, 5044, 813, 502, 13, 20, 341, 2158, 498, 321, 366, 281, 14722, 264, 8482, 295, 1176], "avg_logprob": -0.22451636585451307, "compression_ratio": 1.3448275862068966, "no_speech_prob": 3.5762786865234375e-07, "words": [{"start": 405.81, "end": 406.07, "word": " of", "probability": 0.259521484375}, {"start": 406.07, "end": 406.29, "word": " 3", "probability": 0.64013671875}, {"start": 406.29, "end": 406.77, "word": ".5", "probability": 0.987548828125}, {"start": 406.77, "end": 406.95, "word": " which", "probability": 0.72705078125}, {"start": 406.95, "end": 407.11, "word": " is", "probability": 0.92333984375}, {"start": 407.11, "end": 407.29, "word": " B", "probability": 0.26025390625}, {"start": 407.29, "end": 407.43, "word": " of", "probability": 0.83837890625}, {"start": 407.43, "end": 407.55, "word": " Z", "probability": 0.7421875}, {"start": 407.55, "end": 407.79, "word": " less", "probability": 0.7177734375}, {"start": 407.79, "end": 407.93, "word": " than", "probability": 0.91748046875}, {"start": 407.93, "end": 408.15, "word": " 3", "probability": 0.95654296875}, {"start": 408.15, "end": 408.81, "word": ".5", "probability": 0.99658203125}, {"start": 408.81, "end": 409.93, "word": " minus", "probability": 0.9169921875}, {"start": 409.93, "end": 410.33, "word": " the", "probability": 0.88623046875}, {"start": 410.33, "end": 410.73, "word": " area", "probability": 0.87109375}, {"start": 410.73, "end": 411.17, "word": " to", "probability": 0.95556640625}, {"start": 411.17, "end": 411.37, "word": " the", "probability": 0.9228515625}, {"start": 411.37, "end": 411.61, "word": " left", "probability": 0.93701171875}, {"start": 411.61, "end": 411.81, "word": " of", "probability": 0.94189453125}, {"start": 411.81, "end": 412.19, "word": " negative", "probability": 0.7890625}, {"start": 412.19, "end": 412.41, "word": " 1", "probability": 0.8671875}, {"start": 412.41, "end": 412.55, "word": " minus", "probability": 0.498291015625}, {"start": 412.55, "end": 413.31, "word": " Z", "probability": 0.41650390625}, {"start": 413.31, "end": 418.31, "word": " greater", "probability": 0.7265625}, {"start": 418.31, "end": 418.67, "word": " than", "probability": 0.94140625}, {"start": 418.67, "end": 418.83, "word": " 1", "probability": 0.94921875}, {"start": 418.83, "end": 419.27, "word": ".5", "probability": 0.985595703125}, {"start": 419.27, "end": 421.07, "word": " this", "probability": 0.453125}, {"start": 421.07, "end": 421.43, "word": " value", "probability": 0.9677734375}, {"start": 421.43, "end": 421.67, "word": " if", "probability": 0.81884765625}, {"start": 421.67, "end": 421.83, "word": " we", "probability": 0.94287109375}, {"start": 421.83, "end": 422.01, "word": " are", "probability": 0.623046875}, {"start": 422.01, "end": 422.59, "word": " to", "probability": 0.958984375}, {"start": 422.59, "end": 423.01, "word": " compute", "probability": 0.90185546875}, {"start": 423.01, "end": 423.45, "word": " the", "probability": 0.80029296875}, {"start": 423.45, "end": 424.13, "word": " probability", "probability": 0.9501953125}, {"start": 424.13, "end": 424.41, "word": " of", "probability": 0.95947265625}, {"start": 424.41, "end": 424.63, "word": " Z", "probability": 0.921875}], "temperature": 1.0}, {"id": 18, "seek": 45186, "start": 425.1, "end": 451.86, "text": " smaller than negative 1.5. Either you can use the normal table directly, but the other page were negative these scores. In this case, minus 1.5. 0668. 0668. Okay. Now imagine that you only have", "tokens": [4356, 813, 3671, 502, 13, 20, 13, 13746, 291, 393, 764, 264, 2710, 3199, 3838, 11, 457, 264, 661, 3028, 645, 3671, 613, 13444, 13, 682, 341, 1389, 11, 3175, 502, 13, 20, 13, 1958, 15237, 23, 13, 1958, 15237, 23, 13, 1033, 13, 823, 3811, 300, 291, 787, 362], "avg_logprob": -0.20879289390994052, "compression_ratio": 1.3379310344827586, "no_speech_prob": 0.0, "words": [{"start": 425.1, "end": 425.88, "word": " smaller", "probability": 0.40380859375}, {"start": 425.88, "end": 426.14, "word": " than", "probability": 0.93359375}, {"start": 426.14, "end": 426.48, "word": " negative", "probability": 0.7197265625}, {"start": 426.48, "end": 426.76, "word": " 1", "probability": 0.90478515625}, {"start": 426.76, "end": 427.32, "word": ".5.", "probability": 0.989990234375}, {"start": 427.7, "end": 428.0, "word": " Either", "probability": 0.810546875}, {"start": 428.0, "end": 428.22, "word": " you", "probability": 0.9453125}, {"start": 428.22, "end": 428.44, "word": " can", "probability": 0.943359375}, {"start": 428.44, "end": 428.82, "word": " use", "probability": 0.87939453125}, {"start": 428.82, "end": 429.66, "word": " the", "probability": 0.89794921875}, {"start": 429.66, "end": 430.28, "word": " normal", "probability": 0.8876953125}, {"start": 430.28, "end": 430.68, "word": " table", "probability": 0.83642578125}, {"start": 430.68, "end": 431.3, "word": " directly,", "probability": 0.90185546875}, {"start": 431.92, "end": 432.06, "word": " but", "probability": 0.89404296875}, {"start": 432.06, "end": 432.24, "word": " the", "probability": 0.88330078125}, {"start": 432.24, "end": 432.5, "word": " other", "probability": 0.89501953125}, {"start": 432.5, "end": 432.8, "word": " page", "probability": 0.63720703125}, {"start": 432.8, "end": 432.96, "word": " were", "probability": 0.394287109375}, {"start": 432.96, "end": 433.34, "word": " negative", "probability": 0.8203125}, {"start": 433.34, "end": 433.62, "word": " these", "probability": 0.556640625}, {"start": 433.62, "end": 434.0, "word": " scores.", "probability": 0.7890625}, {"start": 434.84, "end": 435.06, "word": " In", "probability": 0.90087890625}, {"start": 435.06, "end": 435.24, "word": " this", "probability": 0.9453125}, {"start": 435.24, "end": 435.46, "word": " case,", "probability": 0.90771484375}, {"start": 435.52, "end": 435.78, "word": " minus", "probability": 0.93212890625}, {"start": 435.78, "end": 436.08, "word": " 1", "probability": 0.98486328125}, {"start": 436.08, "end": 436.76, "word": ".5.", "probability": 0.99609375}, {"start": 438.6, "end": 439.36, "word": " 0668.", "probability": 0.7127278645833334}, {"start": 440.92, "end": 441.68, "word": " 0668.", "probability": 0.8382161458333334}, {"start": 445.42, "end": 446.18, "word": " Okay.", "probability": 0.705078125}, {"start": 447.96, "end": 448.72, "word": " Now", "probability": 0.7666015625}, {"start": 448.72, "end": 449.14, "word": " imagine", "probability": 0.70703125}, {"start": 449.14, "end": 449.52, "word": " that", "probability": 0.93408203125}, {"start": 449.52, "end": 451.02, "word": " you", "probability": 0.9267578125}, {"start": 451.02, "end": 451.4, "word": " only", "probability": 0.935546875}, {"start": 451.4, "end": 451.86, "word": " have", "probability": 0.94775390625}], "temperature": 1.0}, {"id": 19, "seek": 48256, "start": 453.02, "end": 482.56, "text": " the positive z-scope. Again, suppose that the table you have in the exam is just the positive values of z. How can we find the probability of z greater than minus 1.5, smaller than minus 1.5? In this case, b of z is less than negative 1.5. The area to the left here", "tokens": [264, 3353, 710, 12, 4417, 1114, 13, 3764, 11, 7297, 300, 264, 3199, 291, 362, 294, 264, 1139, 307, 445, 264, 3353, 4190, 295, 710, 13, 1012, 393, 321, 915, 264, 8482, 295, 710, 5044, 813, 3175, 502, 13, 20, 11, 4356, 813, 3175, 502, 13, 20, 30, 682, 341, 1389, 11, 272, 295, 710, 307, 1570, 813, 3671, 502, 13, 20, 13, 440, 1859, 281, 264, 1411, 510], "avg_logprob": -0.1933035767504147, "compression_ratio": 1.5375722543352601, "no_speech_prob": 0.0, "words": [{"start": 453.02, "end": 453.44, "word": " the", "probability": 0.435546875}, {"start": 453.44, "end": 454.28, "word": " positive", "probability": 0.89453125}, {"start": 454.28, "end": 455.02, "word": " z", "probability": 0.72900390625}, {"start": 455.02, "end": 455.52, "word": "-scope.", "probability": 0.7854817708333334}, {"start": 458.16, "end": 458.82, "word": " Again,", "probability": 0.420166015625}, {"start": 459.46, "end": 459.8, "word": " suppose", "probability": 0.82958984375}, {"start": 459.8, "end": 460.06, "word": " that", "probability": 0.85546875}, {"start": 460.06, "end": 460.76, "word": " the", "probability": 0.85546875}, {"start": 460.76, "end": 461.06, "word": " table", "probability": 0.857421875}, {"start": 461.06, "end": 461.22, "word": " you", "probability": 0.94287109375}, {"start": 461.22, "end": 461.4, "word": " have", "probability": 0.9521484375}, {"start": 461.4, "end": 461.54, "word": " in", "probability": 0.94189453125}, {"start": 461.54, "end": 461.66, "word": " the", "probability": 0.9228515625}, {"start": 461.66, "end": 462.06, "word": " exam", "probability": 0.970703125}, {"start": 462.06, "end": 463.14, "word": " is", "probability": 0.8798828125}, {"start": 463.14, "end": 463.62, "word": " just", "probability": 0.921875}, {"start": 463.62, "end": 464.36, "word": " the", "probability": 0.88818359375}, {"start": 464.36, "end": 464.7, "word": " positive", "probability": 0.9423828125}, {"start": 464.7, "end": 465.18, "word": " values", "probability": 0.96630859375}, {"start": 465.18, "end": 465.36, "word": " of", "probability": 0.9736328125}, {"start": 465.36, "end": 465.54, "word": " z.", "probability": 0.8916015625}, {"start": 466.18, "end": 466.38, "word": " How", "probability": 0.91796875}, {"start": 466.38, "end": 466.62, "word": " can", "probability": 0.9423828125}, {"start": 466.62, "end": 466.86, "word": " we", "probability": 0.95849609375}, {"start": 466.86, "end": 467.7, "word": " find", "probability": 0.88818359375}, {"start": 467.7, "end": 468.54, "word": " the", "probability": 0.8818359375}, {"start": 468.54, "end": 468.92, "word": " probability", "probability": 0.89404296875}, {"start": 468.92, "end": 469.16, "word": " of", "probability": 0.85009765625}, {"start": 469.16, "end": 469.26, "word": " z", "probability": 0.94677734375}, {"start": 469.26, "end": 469.6, "word": " greater", "probability": 0.7578125}, {"start": 469.6, "end": 469.88, "word": " than", "probability": 0.9248046875}, {"start": 469.88, "end": 470.52, "word": " minus", "probability": 0.77587890625}, {"start": 470.52, "end": 470.78, "word": " 1", "probability": 0.6552734375}, {"start": 470.78, "end": 471.62, "word": ".5,", "probability": 0.978271484375}, {"start": 471.74, "end": 472.12, "word": " smaller", "probability": 0.73876953125}, {"start": 472.12, "end": 472.5, "word": " than", "probability": 0.93896484375}, {"start": 472.5, "end": 473.38, "word": " minus", "probability": 0.95263671875}, {"start": 473.38, "end": 473.62, "word": " 1", "probability": 0.97998046875}, {"start": 473.62, "end": 474.08, "word": ".5?", "probability": 0.998291015625}, {"start": 474.4, "end": 474.68, "word": " In", "probability": 0.9248046875}, {"start": 474.68, "end": 474.92, "word": " this", "probability": 0.94873046875}, {"start": 474.92, "end": 475.38, "word": " case,", "probability": 0.91259765625}, {"start": 477.04, "end": 477.64, "word": " b", "probability": 0.34716796875}, {"start": 477.64, "end": 477.78, "word": " of", "probability": 0.84765625}, {"start": 477.78, "end": 477.9, "word": " z", "probability": 0.9853515625}, {"start": 477.9, "end": 478.06, "word": " is", "probability": 0.91015625}, {"start": 478.06, "end": 478.28, "word": " less", "probability": 0.94580078125}, {"start": 478.28, "end": 478.5, "word": " than", "probability": 0.93994140625}, {"start": 478.5, "end": 478.78, "word": " negative", "probability": 0.88525390625}, {"start": 478.78, "end": 479.0, "word": " 1", "probability": 0.9599609375}, {"start": 479.0, "end": 479.54, "word": ".5.", "probability": 0.998779296875}, {"start": 480.44, "end": 480.7, "word": " The", "probability": 0.87451171875}, {"start": 480.7, "end": 480.98, "word": " area", "probability": 0.884765625}, {"start": 480.98, "end": 481.18, "word": " to", "probability": 0.96240234375}, {"start": 481.18, "end": 481.36, "word": " the", "probability": 0.9189453125}, {"start": 481.36, "end": 481.62, "word": " left", "probability": 0.953125}, {"start": 481.62, "end": 482.56, "word": " here", "probability": 0.73876953125}], "temperature": 1.0}, {"id": 20, "seek": 51008, "start": 483.4, "end": 510.08, "text": " is the same as the area to the right of the same value, but positive one. So this is equal to B of Z greater than 1.5. Because the same area, minus 1.5 all the way up to minus infinity, that equals from 1.5 to infinity because of symmetric distribution. Now, B of Z greater than 1.5,", "tokens": [307, 264, 912, 382, 264, 1859, 281, 264, 558, 295, 264, 912, 2158, 11, 457, 3353, 472, 13, 407, 341, 307, 2681, 281, 363, 295, 1176, 5044, 813, 502, 13, 20, 13, 1436, 264, 912, 1859, 11, 3175, 502, 13, 20, 439, 264, 636, 493, 281, 3175, 13202, 11, 300, 6915, 490, 502, 13, 20, 281, 13202, 570, 295, 32330, 7316, 13, 823, 11, 363, 295, 1176, 5044, 813, 502, 13, 20, 11], "avg_logprob": -0.21209881696346644, "compression_ratio": 1.6416184971098267, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 483.4, "end": 483.84, "word": " is", "probability": 0.2332763671875}, {"start": 483.84, "end": 484.08, "word": " the", "probability": 0.91943359375}, {"start": 484.08, "end": 484.5, "word": " same", "probability": 0.91259765625}, {"start": 484.5, "end": 486.22, "word": " as", "probability": 0.51611328125}, {"start": 486.22, "end": 486.42, "word": " the", "probability": 0.76611328125}, {"start": 486.42, "end": 486.7, "word": " area", "probability": 0.8818359375}, {"start": 486.7, "end": 486.92, "word": " to", "probability": 0.9541015625}, {"start": 486.92, "end": 487.1, "word": " the", "probability": 0.9208984375}, {"start": 487.1, "end": 487.36, "word": " right", "probability": 0.9169921875}, {"start": 487.36, "end": 488.44, "word": " of", "probability": 0.89453125}, {"start": 488.44, "end": 488.58, "word": " the", "probability": 0.91015625}, {"start": 488.58, "end": 488.84, "word": " same", "probability": 0.91259765625}, {"start": 488.84, "end": 489.14, "word": " value,", "probability": 0.8173828125}, {"start": 489.24, "end": 489.3, "word": " but", "probability": 0.428955078125}, {"start": 489.3, "end": 489.72, "word": " positive", "probability": 0.81884765625}, {"start": 489.72, "end": 490.12, "word": " one.", "probability": 0.8193359375}, {"start": 490.52, "end": 491.02, "word": " So", "probability": 0.8525390625}, {"start": 491.02, "end": 491.32, "word": " this", "probability": 0.6767578125}, {"start": 491.32, "end": 491.42, "word": " is", "probability": 0.771484375}, {"start": 491.42, "end": 491.72, "word": " equal", "probability": 0.89404296875}, {"start": 491.72, "end": 491.8, "word": " to", "probability": 0.5625}, {"start": 491.8, "end": 491.9, "word": " B", "probability": 0.53076171875}, {"start": 491.9, "end": 492.04, "word": " of", "probability": 0.8955078125}, {"start": 492.04, "end": 492.18, "word": " Z", "probability": 0.63134765625}, {"start": 492.18, "end": 492.58, "word": " greater", "probability": 0.89013671875}, {"start": 492.58, "end": 493.0, "word": " than", "probability": 0.94873046875}, {"start": 493.0, "end": 494.72, "word": " 1", "probability": 0.78271484375}, {"start": 494.72, "end": 495.32, "word": ".5.", "probability": 0.99072265625}, {"start": 496.24, "end": 496.84, "word": " Because", "probability": 0.72705078125}, {"start": 496.84, "end": 496.96, "word": " the", "probability": 0.68310546875}, {"start": 496.96, "end": 497.2, "word": " same", "probability": 0.9091796875}, {"start": 497.2, "end": 497.6, "word": " area,", "probability": 0.87841796875}, {"start": 498.26, "end": 498.68, "word": " minus", "probability": 0.81005859375}, {"start": 498.68, "end": 498.96, "word": " 1", "probability": 0.9833984375}, {"start": 498.96, "end": 499.48, "word": ".5", "probability": 0.999267578125}, {"start": 499.48, "end": 499.72, "word": " all", "probability": 0.755859375}, {"start": 499.72, "end": 499.84, "word": " the", "probability": 0.91796875}, {"start": 499.84, "end": 499.96, "word": " way", "probability": 0.95263671875}, {"start": 499.96, "end": 500.1, "word": " up", "probability": 0.93603515625}, {"start": 500.1, "end": 500.22, "word": " to", "probability": 0.96875}, {"start": 500.22, "end": 500.42, "word": " minus", "probability": 0.984375}, {"start": 500.42, "end": 500.96, "word": " infinity,", "probability": 0.89208984375}, {"start": 501.84, "end": 502.08, "word": " that", "probability": 0.921875}, {"start": 502.08, "end": 502.44, "word": " equals", "probability": 0.904296875}, {"start": 502.44, "end": 502.7, "word": " from", "probability": 0.8505859375}, {"start": 502.7, "end": 502.98, "word": " 1", "probability": 0.99072265625}, {"start": 502.98, "end": 503.64, "word": ".5", "probability": 0.999267578125}, {"start": 503.64, "end": 504.22, "word": " to", "probability": 0.96923828125}, {"start": 504.22, "end": 504.66, "word": " infinity", "probability": 0.90673828125}, {"start": 504.66, "end": 505.06, "word": " because", "probability": 0.66162109375}, {"start": 505.06, "end": 505.26, "word": " of", "probability": 0.96044921875}, {"start": 505.26, "end": 505.56, "word": " symmetric", "probability": 0.76318359375}, {"start": 505.56, "end": 506.26, "word": " distribution.", "probability": 0.841796875}, {"start": 507.72, "end": 508.04, "word": " Now,", "probability": 0.94970703125}, {"start": 508.48, "end": 508.64, "word": " B", "probability": 0.98583984375}, {"start": 508.64, "end": 508.78, "word": " of", "probability": 0.97265625}, {"start": 508.78, "end": 508.9, "word": " Z", "probability": 0.98828125}, {"start": 508.9, "end": 509.22, "word": " greater", "probability": 0.8349609375}, {"start": 509.22, "end": 509.46, "word": " than", "probability": 0.94677734375}, {"start": 509.46, "end": 509.64, "word": " 1", "probability": 0.99267578125}, {"start": 509.64, "end": 510.08, "word": ".5,", "probability": 0.99951171875}], "temperature": 1.0}, {"id": 21, "seek": 53991, "start": 510.67, "end": 539.91, "text": " The table we have here gives the area to the left of z. So this is 1 minus b of z less than 1.5. Now 1.5 from this table is 9332. Okay, so that will get 0668, which is the same result as we got directly from the negative z table.", "tokens": [440, 3199, 321, 362, 510, 2709, 264, 1859, 281, 264, 1411, 295, 710, 13, 407, 341, 307, 502, 3175, 272, 295, 710, 1570, 813, 502, 13, 20, 13, 823, 502, 13, 20, 490, 341, 3199, 307, 1722, 10191, 17, 13, 1033, 11, 370, 300, 486, 483, 1958, 15237, 23, 11, 597, 307, 264, 912, 1874, 382, 321, 658, 3838, 490, 264, 3671, 710, 3199, 13], "avg_logprob": -0.19318182134267056, "compression_ratio": 1.4110429447852761, "no_speech_prob": 0.0, "words": [{"start": 510.67, "end": 510.95, "word": " The", "probability": 0.54150390625}, {"start": 510.95, "end": 511.19, "word": " table", "probability": 0.78515625}, {"start": 511.19, "end": 511.35, "word": " we", "probability": 0.85791015625}, {"start": 511.35, "end": 511.51, "word": " have", "probability": 0.9482421875}, {"start": 511.51, "end": 511.81, "word": " here", "probability": 0.85302734375}, {"start": 511.81, "end": 512.89, "word": " gives", "probability": 0.66943359375}, {"start": 512.89, "end": 513.05, "word": " the", "probability": 0.859375}, {"start": 513.05, "end": 513.23, "word": " area", "probability": 0.8818359375}, {"start": 513.23, "end": 513.41, "word": " to", "probability": 0.96142578125}, {"start": 513.41, "end": 513.57, "word": " the", "probability": 0.9189453125}, {"start": 513.57, "end": 513.77, "word": " left", "probability": 0.9296875}, {"start": 513.77, "end": 513.97, "word": " of", "probability": 0.96875}, {"start": 513.97, "end": 514.09, "word": " z.", "probability": 0.62451171875}, {"start": 515.03, "end": 515.61, "word": " So", "probability": 0.82373046875}, {"start": 515.61, "end": 515.83, "word": " this", "probability": 0.615234375}, {"start": 515.83, "end": 515.95, "word": " is", "probability": 0.94287109375}, {"start": 515.95, "end": 516.13, "word": " 1", "probability": 0.560546875}, {"start": 516.13, "end": 516.57, "word": " minus", "probability": 0.8759765625}, {"start": 516.57, "end": 518.73, "word": " b", "probability": 0.57275390625}, {"start": 518.73, "end": 518.91, "word": " of", "probability": 0.919921875}, {"start": 518.91, "end": 519.15, "word": " z", "probability": 0.98583984375}, {"start": 519.15, "end": 519.65, "word": " less", "probability": 0.91943359375}, {"start": 519.65, "end": 519.89, "word": " than", "probability": 0.9375}, {"start": 519.89, "end": 520.23, "word": " 1", "probability": 0.919921875}, {"start": 520.23, "end": 520.59, "word": ".5.", "probability": 0.916015625}, {"start": 522.67, "end": 523.07, "word": " Now", "probability": 0.91943359375}, {"start": 523.07, "end": 523.41, "word": " 1", "probability": 0.66845703125}, {"start": 523.41, "end": 523.87, "word": ".5", "probability": 0.99755859375}, {"start": 523.87, "end": 524.11, "word": " from", "probability": 0.90185546875}, {"start": 524.11, "end": 524.39, "word": " this", "probability": 0.9482421875}, {"start": 524.39, "end": 524.79, "word": " table", "probability": 0.884765625}, {"start": 524.79, "end": 525.09, "word": " is", "probability": 0.88232421875}, {"start": 525.09, "end": 526.65, "word": " 9332.", "probability": 0.8025716145833334}, {"start": 530.85, "end": 531.43, "word": " Okay,", "probability": 0.52392578125}, {"start": 531.99, "end": 532.29, "word": " so", "probability": 0.93505859375}, {"start": 532.29, "end": 532.53, "word": " that", "probability": 0.93505859375}, {"start": 532.53, "end": 532.71, "word": " will", "probability": 0.8740234375}, {"start": 532.71, "end": 533.01, "word": " get", "probability": 0.4765625}, {"start": 533.01, "end": 536.03, "word": " 0668,", "probability": 0.95166015625}, {"start": 536.47, "end": 536.65, "word": " which", "probability": 0.77490234375}, {"start": 536.65, "end": 536.65, "word": " is", "probability": 0.94970703125}, {"start": 536.65, "end": 536.79, "word": " the", "probability": 0.91748046875}, {"start": 536.79, "end": 536.97, "word": " same", "probability": 0.91259765625}, {"start": 536.97, "end": 537.27, "word": " result", "probability": 0.93017578125}, {"start": 537.27, "end": 537.49, "word": " as", "probability": 0.9140625}, {"start": 537.49, "end": 537.61, "word": " we", "probability": 0.94287109375}, {"start": 537.61, "end": 537.77, "word": " got", "probability": 0.8701171875}, {"start": 537.77, "end": 538.23, "word": " directly", "probability": 0.88818359375}, {"start": 538.23, "end": 538.65, "word": " from", "probability": 0.8857421875}, {"start": 538.65, "end": 538.95, "word": " the", "probability": 0.9248046875}, {"start": 538.95, "end": 539.21, "word": " negative", "probability": 0.8583984375}, {"start": 539.21, "end": 539.55, "word": " z", "probability": 0.88720703125}, {"start": 539.55, "end": 539.91, "word": " table.", "probability": 0.81689453125}], "temperature": 1.0}, {"id": 22, "seek": 56902, "start": 540.94, "end": 569.02, "text": " but make sure that in the exam I will give you just only the positive table now subtract these two values you will get the answer for this for part B 9 9 9 3 0 9 3 3 0 this is the final result for this example now part C", "tokens": [457, 652, 988, 300, 294, 264, 1139, 286, 486, 976, 291, 445, 787, 264, 3353, 3199, 586, 16390, 613, 732, 4190, 291, 486, 483, 264, 1867, 337, 341, 337, 644, 363, 1722, 1722, 1722, 805, 1958, 1722, 805, 805, 1958, 341, 307, 264, 2572, 1874, 337, 341, 1365, 586, 644, 383], "avg_logprob": -0.19050480912511164, "compression_ratio": 1.556338028169014, "no_speech_prob": 0.0, "words": [{"start": 540.94, "end": 541.22, "word": " but", "probability": 0.30078125}, {"start": 541.22, "end": 541.8, "word": " make", "probability": 0.86279296875}, {"start": 541.8, "end": 541.98, "word": " sure", "probability": 0.927734375}, {"start": 541.98, "end": 542.18, "word": " that", "probability": 0.90869140625}, {"start": 542.18, "end": 542.32, "word": " in", "probability": 0.89697265625}, {"start": 542.32, "end": 542.44, "word": " the", "probability": 0.91552734375}, {"start": 542.44, "end": 542.8, "word": " exam", "probability": 0.97021484375}, {"start": 542.8, "end": 543.12, "word": " I", "probability": 0.6689453125}, {"start": 543.12, "end": 543.24, "word": " will", "probability": 0.87939453125}, {"start": 543.24, "end": 543.42, "word": " give", "probability": 0.87451171875}, {"start": 543.42, "end": 543.54, "word": " you", "probability": 0.9580078125}, {"start": 543.54, "end": 543.76, "word": " just", "probability": 0.84912109375}, {"start": 543.76, "end": 544.14, "word": " only", "probability": 0.908203125}, {"start": 544.14, "end": 545.46, "word": " the", "probability": 0.8525390625}, {"start": 545.46, "end": 545.78, "word": " positive", "probability": 0.9091796875}, {"start": 545.78, "end": 546.8, "word": " table", "probability": 0.83935546875}, {"start": 546.8, "end": 549.62, "word": " now", "probability": 0.19921875}, {"start": 549.62, "end": 550.08, "word": " subtract", "probability": 0.77587890625}, {"start": 550.08, "end": 550.42, "word": " these", "probability": 0.833984375}, {"start": 550.42, "end": 550.62, "word": " two", "probability": 0.884765625}, {"start": 550.62, "end": 550.94, "word": " values", "probability": 0.96728515625}, {"start": 550.94, "end": 551.08, "word": " you", "probability": 0.8681640625}, {"start": 551.08, "end": 551.2, "word": " will", "probability": 0.890625}, {"start": 551.2, "end": 551.56, "word": " get", "probability": 0.93798828125}, {"start": 551.56, "end": 552.06, "word": " the", "probability": 0.9150390625}, {"start": 552.06, "end": 553.24, "word": " answer", "probability": 0.95849609375}, {"start": 553.24, "end": 553.62, "word": " for", "probability": 0.94384765625}, {"start": 553.62, "end": 553.98, "word": " this", "probability": 0.93505859375}, {"start": 553.98, "end": 555.44, "word": " for", "probability": 0.83203125}, {"start": 555.44, "end": 555.72, "word": " part", "probability": 0.87060546875}, {"start": 555.72, "end": 556.02, "word": " B", "probability": 0.50537109375}, {"start": 556.02, "end": 558.06, "word": " 9", "probability": 0.65185546875}, {"start": 558.06, "end": 558.28, "word": " 9", "probability": 0.77099609375}, {"start": 558.28, "end": 558.54, "word": " 9", "probability": 0.65576171875}, {"start": 558.54, "end": 558.78, "word": " 3", "probability": 0.75}, {"start": 558.78, "end": 559.14, "word": " 0", "probability": 0.92333984375}, {"start": 559.14, "end": 559.86, "word": " 9", "probability": 0.9541015625}, {"start": 559.86, "end": 560.74, "word": " 3", "probability": 0.9365234375}, {"start": 560.74, "end": 561.26, "word": " 3", "probability": 0.974609375}, {"start": 561.26, "end": 561.68, "word": " 0", "probability": 0.99169921875}, {"start": 561.68, "end": 562.6, "word": " this", "probability": 0.86767578125}, {"start": 562.6, "end": 562.72, "word": " is", "probability": 0.9501953125}, {"start": 562.72, "end": 562.86, "word": " the", "probability": 0.919921875}, {"start": 562.86, "end": 563.08, "word": " final", "probability": 0.94482421875}, {"start": 563.08, "end": 563.4, "word": " result", "probability": 0.943359375}, {"start": 563.4, "end": 563.64, "word": " for", "probability": 0.94921875}, {"start": 563.64, "end": 563.98, "word": " this", "probability": 0.9482421875}, {"start": 563.98, "end": 564.82, "word": " example", "probability": 0.97021484375}, {"start": 564.82, "end": 566.58, "word": " now", "probability": 0.869140625}, {"start": 566.58, "end": 568.16, "word": " part", "probability": 0.59375}, {"start": 568.16, "end": 569.02, "word": " C", "probability": 0.8310546875}], "temperature": 1.0}, {"id": 23, "seek": 59969, "start": 571.57, "end": 599.69, "text": " What's the probability that the download time will take above 0.5 seconds? So we are looking again, B of X above 0.5. Similar way as we did in part A. So B of Z, 0.5 minus the mean.", "tokens": [708, 311, 264, 8482, 300, 264, 5484, 565, 486, 747, 3673, 1958, 13, 20, 3949, 30, 407, 321, 366, 1237, 797, 11, 363, 295, 1783, 3673, 1958, 13, 20, 13, 10905, 636, 382, 321, 630, 294, 644, 316, 13, 407, 363, 295, 1176, 11, 1958, 13, 20, 3175, 264, 914, 13], "avg_logprob": -0.1947115341631266, "compression_ratio": 1.3, "no_speech_prob": 0.0, "words": [{"start": 571.57, "end": 572.03, "word": " What's", "probability": 0.745361328125}, {"start": 572.03, "end": 572.15, "word": " the", "probability": 0.8984375}, {"start": 572.15, "end": 572.45, "word": " probability", "probability": 0.96484375}, {"start": 572.45, "end": 572.77, "word": " that", "probability": 0.57666015625}, {"start": 572.77, "end": 572.99, "word": " the", "probability": 0.875}, {"start": 572.99, "end": 573.33, "word": " download", "probability": 0.91552734375}, {"start": 573.33, "end": 573.77, "word": " time", "probability": 0.89013671875}, {"start": 573.77, "end": 574.43, "word": " will", "probability": 0.83056640625}, {"start": 574.43, "end": 574.83, "word": " take", "probability": 0.8984375}, {"start": 574.83, "end": 575.27, "word": " above", "probability": 0.95458984375}, {"start": 575.27, "end": 575.61, "word": " 0", "probability": 0.49462890625}, {"start": 575.61, "end": 575.87, "word": ".5", "probability": 0.993896484375}, {"start": 575.87, "end": 576.37, "word": " seconds?", "probability": 0.7607421875}, {"start": 580.39, "end": 581.11, "word": " So", "probability": 0.6103515625}, {"start": 581.11, "end": 581.25, "word": " we", "probability": 0.76220703125}, {"start": 581.25, "end": 581.35, "word": " are", "probability": 0.8828125}, {"start": 581.35, "end": 581.67, "word": " looking", "probability": 0.90576171875}, {"start": 581.67, "end": 582.17, "word": " again,", "probability": 0.89990234375}, {"start": 583.55, "end": 583.67, "word": " B", "probability": 0.340576171875}, {"start": 583.67, "end": 583.81, "word": " of", "probability": 0.892578125}, {"start": 583.81, "end": 584.17, "word": " X", "probability": 0.89501953125}, {"start": 584.17, "end": 585.07, "word": " above", "probability": 0.88818359375}, {"start": 585.07, "end": 585.69, "word": " 0", "probability": 0.8955078125}, {"start": 585.69, "end": 586.09, "word": ".5.", "probability": 0.997314453125}, {"start": 588.33, "end": 589.05, "word": " Similar", "probability": 0.51513671875}, {"start": 589.05, "end": 589.33, "word": " way", "probability": 0.884765625}, {"start": 589.33, "end": 589.49, "word": " as", "probability": 0.92529296875}, {"start": 589.49, "end": 589.63, "word": " we", "probability": 0.9580078125}, {"start": 589.63, "end": 590.03, "word": " did", "probability": 0.9599609375}, {"start": 590.03, "end": 590.63, "word": " in", "probability": 0.93505859375}, {"start": 590.63, "end": 590.91, "word": " part", "probability": 0.666015625}, {"start": 590.91, "end": 591.23, "word": " A.", "probability": 0.88525390625}, {"start": 594.09, "end": 594.81, "word": " So", "probability": 0.923828125}, {"start": 594.81, "end": 595.01, "word": " B", "probability": 0.8251953125}, {"start": 595.01, "end": 595.17, "word": " of", "probability": 0.9560546875}, {"start": 595.17, "end": 595.61, "word": " Z,", "probability": 0.9765625}, {"start": 597.31, "end": 598.59, "word": " 0", "probability": 0.9541015625}, {"start": 598.59, "end": 598.97, "word": ".5", "probability": 0.99853515625}, {"start": 598.97, "end": 599.33, "word": " minus", "probability": 0.9814453125}, {"start": 599.33, "end": 599.55, "word": " the", "probability": 0.91455078125}, {"start": 599.55, "end": 599.69, "word": " mean.", "probability": 0.97216796875}], "temperature": 1.0}, {"id": 24, "seek": 62099, "start": 601.93, "end": 620.99, "text": " Divide by 6. So B of Z greater than negative 1.5. Now B of Z greater than minus 1.5.", "tokens": [9886, 482, 538, 1386, 13, 407, 363, 295, 1176, 5044, 813, 3671, 502, 13, 20, 13, 823, 363, 295, 1176, 5044, 813, 3175, 502, 13, 20, 13], "avg_logprob": -0.28417968111378805, "compression_ratio": 1.1971830985915493, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 601.93, "end": 602.35, "word": " Divide", "probability": 0.556396484375}, {"start": 602.35, "end": 602.55, "word": " by", "probability": 0.94091796875}, {"start": 602.55, "end": 602.85, "word": " 6.", "probability": 0.5732421875}, {"start": 606.89, "end": 607.49, "word": " So", "probability": 0.859375}, {"start": 607.49, "end": 607.65, "word": " B", "probability": 0.328125}, {"start": 607.65, "end": 607.83, "word": " of", "probability": 0.90576171875}, {"start": 607.83, "end": 608.05, "word": " Z", "probability": 0.81982421875}, {"start": 608.05, "end": 613.85, "word": " greater", "probability": 0.5576171875}, {"start": 613.85, "end": 614.17, "word": " than", "probability": 0.93994140625}, {"start": 614.17, "end": 614.51, "word": " negative", "probability": 0.80517578125}, {"start": 614.51, "end": 614.77, "word": " 1", "probability": 0.86669921875}, {"start": 614.77, "end": 615.35, "word": ".5.", "probability": 0.974853515625}, {"start": 617.31, "end": 617.61, "word": " Now", "probability": 0.92333984375}, {"start": 617.61, "end": 617.91, "word": " B", "probability": 0.669921875}, {"start": 617.91, "end": 618.09, "word": " of", "probability": 0.97412109375}, {"start": 618.09, "end": 618.37, "word": " Z", "probability": 0.97705078125}, {"start": 618.37, "end": 619.47, "word": " greater", "probability": 0.88623046875}, {"start": 619.47, "end": 619.77, "word": " than", "probability": 0.947265625}, {"start": 619.77, "end": 620.07, "word": " minus", "probability": 0.96337890625}, {"start": 620.07, "end": 620.33, "word": " 1", "probability": 0.96826171875}, {"start": 620.33, "end": 620.99, "word": ".5.", "probability": 0.995849609375}], "temperature": 1.0}, {"id": 25, "seek": 65085, "start": 623.29, "end": 650.85, "text": " It means we are looking for the area above minus 1.5 So this area Now the area above 1 minus 0.5 Equals 1 minus", "tokens": [467, 1355, 321, 366, 1237, 337, 264, 1859, 3673, 3175, 502, 13, 20, 407, 341, 1859, 823, 264, 1859, 3673, 502, 3175, 1958, 13, 20, 15624, 1124, 502, 3175], "avg_logprob": -0.20924478620290757, "compression_ratio": 1.3176470588235294, "no_speech_prob": 0.0, "words": [{"start": 623.29, "end": 623.65, "word": " It", "probability": 0.37109375}, {"start": 623.65, "end": 624.07, "word": " means", "probability": 0.91064453125}, {"start": 624.07, "end": 624.47, "word": " we", "probability": 0.861328125}, {"start": 624.47, "end": 624.63, "word": " are", "probability": 0.93310546875}, {"start": 624.63, "end": 624.91, "word": " looking", "probability": 0.9150390625}, {"start": 624.91, "end": 625.17, "word": " for", "probability": 0.94873046875}, {"start": 625.17, "end": 625.37, "word": " the", "probability": 0.91943359375}, {"start": 625.37, "end": 625.75, "word": " area", "probability": 0.88720703125}, {"start": 625.75, "end": 626.81, "word": " above", "probability": 0.88232421875}, {"start": 626.81, "end": 627.33, "word": " minus", "probability": 0.7099609375}, {"start": 627.33, "end": 627.57, "word": " 1", "probability": 0.60302734375}, {"start": 627.57, "end": 628.11, "word": ".5", "probability": 0.958984375}, {"start": 628.11, "end": 638.37, "word": " So", "probability": 0.489013671875}, {"start": 638.37, "end": 638.63, "word": " this", "probability": 0.896484375}, {"start": 638.63, "end": 638.99, "word": " area", "probability": 0.873046875}, {"start": 638.99, "end": 642.85, "word": " Now", "probability": 0.6083984375}, {"start": 642.85, "end": 643.01, "word": " the", "probability": 0.87939453125}, {"start": 643.01, "end": 643.29, "word": " area", "probability": 0.88916015625}, {"start": 643.29, "end": 644.23, "word": " above", "probability": 0.9375}, {"start": 644.23, "end": 644.53, "word": " 1", "probability": 0.77587890625}, {"start": 644.53, "end": 644.85, "word": " minus", "probability": 0.88525390625}, {"start": 644.85, "end": 645.13, "word": " 0", "probability": 0.460693359375}, {"start": 645.13, "end": 645.65, "word": ".5", "probability": 0.991943359375}, {"start": 645.65, "end": 648.41, "word": " Equals", "probability": 0.7705078125}, {"start": 648.41, "end": 650.35, "word": " 1", "probability": 0.74267578125}, {"start": 650.35, "end": 650.85, "word": " minus", "probability": 0.9580078125}], "temperature": 1.0}, {"id": 26, "seek": 68459, "start": 655.05, "end": 684.59, "text": " B of Z less than negative 115. As we did here, this probability is 0668. So the answer again is 9334. So that's for part C. Any question? Now, part D.", "tokens": [363, 295, 1176, 1570, 813, 3671, 2975, 20, 13, 1018, 321, 630, 510, 11, 341, 8482, 307, 1958, 15237, 23, 13, 407, 264, 1867, 797, 307, 1722, 10191, 19, 13, 407, 300, 311, 337, 644, 383, 13, 2639, 1168, 30, 823, 11, 644, 413, 13], "avg_logprob": -0.2863451125829116, "compression_ratio": 1.1615384615384616, "no_speech_prob": 0.0, "words": [{"start": 655.05, "end": 655.29, "word": " B", "probability": 0.1915283203125}, {"start": 655.29, "end": 655.49, "word": " of", "probability": 0.8701171875}, {"start": 655.49, "end": 655.71, "word": " Z", "probability": 0.77392578125}, {"start": 655.71, "end": 656.55, "word": " less", "probability": 0.607421875}, {"start": 656.55, "end": 656.77, "word": " than", "probability": 0.92578125}, {"start": 656.77, "end": 657.11, "word": " negative", "probability": 0.69775390625}, {"start": 657.11, "end": 657.89, "word": " 115.", "probability": 0.5523681640625}, {"start": 660.21, "end": 660.73, "word": " As", "probability": 0.86181640625}, {"start": 660.73, "end": 660.87, "word": " we", "probability": 0.77001953125}, {"start": 660.87, "end": 661.03, "word": " did", "probability": 0.9619140625}, {"start": 661.03, "end": 661.37, "word": " here,", "probability": 0.83740234375}, {"start": 662.11, "end": 662.35, "word": " this", "probability": 0.9189453125}, {"start": 662.35, "end": 662.99, "word": " probability", "probability": 0.9306640625}, {"start": 662.99, "end": 666.51, "word": " is", "probability": 0.6044921875}, {"start": 666.51, "end": 668.09, "word": " 0668.", "probability": 0.912109375}, {"start": 669.77, "end": 670.65, "word": " So", "probability": 0.88134765625}, {"start": 670.65, "end": 670.79, "word": " the", "probability": 0.7109375}, {"start": 670.79, "end": 671.03, "word": " answer", "probability": 0.96435546875}, {"start": 671.03, "end": 671.35, "word": " again", "probability": 0.88818359375}, {"start": 671.35, "end": 671.59, "word": " is", "probability": 0.92919921875}, {"start": 671.59, "end": 672.57, "word": " 9334.", "probability": 0.7111002604166666}, {"start": 676.61, "end": 677.11, "word": " So", "probability": 0.89501953125}, {"start": 677.11, "end": 677.35, "word": " that's", "probability": 0.853515625}, {"start": 677.35, "end": 677.49, "word": " for", "probability": 0.8486328125}, {"start": 677.49, "end": 677.71, "word": " part", "probability": 0.6484375}, {"start": 677.71, "end": 677.97, "word": " C.", "probability": 0.77734375}, {"start": 680.03, "end": 680.69, "word": " Any", "probability": 0.7822265625}, {"start": 680.69, "end": 681.07, "word": " question?", "probability": 0.65185546875}, {"start": 682.61, "end": 683.15, "word": " Now,", "probability": 0.884765625}, {"start": 683.59, "end": 684.09, "word": " part", "probability": 0.79345703125}, {"start": 684.09, "end": 684.59, "word": " D.", "probability": 0.98828125}], "temperature": 1.0}, {"id": 27, "seek": 70607, "start": 687.05, "end": 706.07, "text": " And they ask about 99%. 99% of the download times.", "tokens": [400, 436, 1029, 466, 11803, 6856, 11803, 4, 295, 264, 5484, 1413, 13], "avg_logprob": -0.48214286991528105, "compression_ratio": 0.9622641509433962, "no_speech_prob": 0.0, "words": [{"start": 687.05, "end": 687.41, "word": " And", "probability": 0.60888671875}, {"start": 687.41, "end": 687.57, "word": " they", "probability": 0.64990234375}, {"start": 687.57, "end": 687.85, "word": " ask", "probability": 0.736328125}, {"start": 687.85, "end": 688.23, "word": " about", "probability": 0.88818359375}, {"start": 688.23, "end": 691.03, "word": " 99%.", "probability": 0.42047119140625}, {"start": 691.03, "end": 695.91, "word": " 99", "probability": 0.75341796875}, {"start": 695.91, "end": 696.59, "word": "%", "probability": 0.919921875}, {"start": 696.59, "end": 698.23, "word": " of", "probability": 0.8291015625}, {"start": 698.23, "end": 704.17, "word": " the", "probability": 0.689453125}, {"start": 704.17, "end": 705.45, "word": " download", "probability": 0.254638671875}, {"start": 705.45, "end": 706.07, "word": " times.", "probability": 0.75341796875}], "temperature": 1.0}, {"id": 28, "seek": 73682, "start": 715.38, "end": 736.82, "text": " How many seconds? Exactly, in this case, the probability is given, which is 99%.", "tokens": [1012, 867, 3949, 30, 7587, 11, 294, 341, 1389, 11, 264, 8482, 307, 2212, 11, 597, 307, 11803, 6856], "avg_logprob": -0.2791015595197678, "compression_ratio": 0.9759036144578314, "no_speech_prob": 0.0, "words": [{"start": 715.38, "end": 715.8, "word": " How", "probability": 0.390625}, {"start": 715.8, "end": 716.22, "word": " many", "probability": 0.89404296875}, {"start": 716.22, "end": 716.92, "word": " seconds?", "probability": 0.78857421875}, {"start": 731.42, "end": 732.1, "word": " Exactly,", "probability": 0.55078125}, {"start": 732.28, "end": 732.38, "word": " in", "probability": 0.9189453125}, {"start": 732.38, "end": 732.64, "word": " this", "probability": 0.94873046875}, {"start": 732.64, "end": 733.26, "word": " case,", "probability": 0.93212890625}, {"start": 734.24, "end": 734.46, "word": " the", "probability": 0.89013671875}, {"start": 734.46, "end": 734.86, "word": " probability", "probability": 0.943359375}, {"start": 734.86, "end": 735.14, "word": " is", "probability": 0.89453125}, {"start": 735.14, "end": 735.42, "word": " given,", "probability": 0.84716796875}, {"start": 735.54, "end": 735.68, "word": " which", "probability": 0.93505859375}, {"start": 735.68, "end": 735.86, "word": " is", "probability": 0.955078125}, {"start": 735.86, "end": 736.82, "word": " 99%.", "probability": 0.76318359375}], "temperature": 1.0}, {"id": 29, "seek": 76645, "start": 739.19, "end": 766.45, "text": " Now, if 99% of the download times are above how many seconds? So, in this case, we are looking for the value, for example, for A, such that B of X greater than A equals 99%. Now, in this type of problems, we have to make a graph first in order to determine the location of A.", "tokens": [823, 11, 498, 11803, 4, 295, 264, 5484, 1413, 366, 3673, 577, 867, 3949, 30, 407, 11, 294, 341, 1389, 11, 321, 366, 1237, 337, 264, 2158, 11, 337, 1365, 11, 337, 316, 11, 1270, 300, 363, 295, 1783, 5044, 813, 316, 6915, 11803, 6856, 823, 11, 294, 341, 2010, 295, 2740, 11, 321, 362, 281, 652, 257, 4295, 700, 294, 1668, 281, 6997, 264, 4914, 295, 316, 13], "avg_logprob": -0.26875000936644416, "compression_ratio": 1.4759358288770053, "no_speech_prob": 0.0, "words": [{"start": 739.1899999999999, "end": 739.77, "word": " Now,", "probability": 0.318603515625}, {"start": 740.03, "end": 740.39, "word": " if", "probability": 0.595703125}, {"start": 740.39, "end": 742.45, "word": " 99", "probability": 0.51171875}, {"start": 742.45, "end": 742.89, "word": "%", "probability": 0.79150390625}, {"start": 742.89, "end": 743.13, "word": " of", "probability": 0.82861328125}, {"start": 743.13, "end": 743.27, "word": " the", "probability": 0.8125}, {"start": 743.27, "end": 743.59, "word": " download", "probability": 0.432861328125}, {"start": 743.59, "end": 744.09, "word": " times", "probability": 0.75390625}, {"start": 744.09, "end": 744.45, "word": " are", "probability": 0.88720703125}, {"start": 744.45, "end": 744.83, "word": " above", "probability": 0.947265625}, {"start": 744.83, "end": 745.15, "word": " how", "probability": 0.61669921875}, {"start": 745.15, "end": 745.87, "word": " many", "probability": 0.90478515625}, {"start": 745.87, "end": 746.19, "word": " seconds?", "probability": 0.662109375}, {"start": 747.11, "end": 747.39, "word": " So,", "probability": 0.79052734375}, {"start": 747.67, "end": 747.77, "word": " in", "probability": 0.935546875}, {"start": 747.77, "end": 748.01, "word": " this", "probability": 0.9482421875}, {"start": 748.01, "end": 748.27, "word": " case,", "probability": 0.90625}, {"start": 748.33, "end": 748.41, "word": " we", "probability": 0.94873046875}, {"start": 748.41, "end": 748.53, "word": " are", "probability": 0.88525390625}, {"start": 748.53, "end": 748.79, "word": " looking", "probability": 0.90576171875}, {"start": 748.79, "end": 749.09, "word": " for", "probability": 0.9345703125}, {"start": 749.09, "end": 749.25, "word": " the", "probability": 0.78173828125}, {"start": 749.25, "end": 749.53, "word": " value,", "probability": 0.96630859375}, {"start": 749.63, "end": 749.71, "word": " for", "probability": 0.947265625}, {"start": 749.71, "end": 750.07, "word": " example,", "probability": 0.96484375}, {"start": 750.23, "end": 750.39, "word": " for", "probability": 0.3203125}, {"start": 750.39, "end": 750.71, "word": " A,", "probability": 0.4765625}, {"start": 751.61, "end": 752.09, "word": " such", "probability": 0.9375}, {"start": 752.09, "end": 752.55, "word": " that", "probability": 0.947265625}, {"start": 752.55, "end": 754.03, "word": " B", "probability": 0.434326171875}, {"start": 754.03, "end": 754.17, "word": " of", "probability": 0.7978515625}, {"start": 754.17, "end": 754.35, "word": " X", "probability": 0.869140625}, {"start": 754.35, "end": 754.65, "word": " greater", "probability": 0.7509765625}, {"start": 754.65, "end": 754.93, "word": " than", "probability": 0.9619140625}, {"start": 754.93, "end": 755.07, "word": " A", "probability": 0.66015625}, {"start": 755.07, "end": 755.51, "word": " equals", "probability": 0.89990234375}, {"start": 755.51, "end": 757.49, "word": " 99%.", "probability": 0.86865234375}, {"start": 757.49, "end": 759.07, "word": " Now,", "probability": 0.92333984375}, {"start": 759.17, "end": 759.37, "word": " in", "probability": 0.896484375}, {"start": 759.37, "end": 759.73, "word": " this", "probability": 0.88720703125}, {"start": 759.73, "end": 760.97, "word": " type", "probability": 0.96484375}, {"start": 760.97, "end": 761.13, "word": " of", "probability": 0.97021484375}, {"start": 761.13, "end": 761.49, "word": " problems,", "probability": 0.67919921875}, {"start": 761.61, "end": 761.73, "word": " we", "probability": 0.94970703125}, {"start": 761.73, "end": 761.93, "word": " have", "probability": 0.9423828125}, {"start": 761.93, "end": 762.19, "word": " to", "probability": 0.97119140625}, {"start": 762.19, "end": 762.49, "word": " make", "probability": 0.9287109375}, {"start": 762.49, "end": 762.63, "word": " a", "probability": 0.9873046875}, {"start": 762.63, "end": 762.93, "word": " graph", "probability": 0.9443359375}, {"start": 762.93, "end": 763.33, "word": " first", "probability": 0.8544921875}, {"start": 763.33, "end": 764.07, "word": " in", "probability": 0.6162109375}, {"start": 764.07, "end": 764.27, "word": " order", "probability": 0.9189453125}, {"start": 764.27, "end": 764.51, "word": " to", "probability": 0.96826171875}, {"start": 764.51, "end": 764.97, "word": " determine", "probability": 0.9375}, {"start": 764.97, "end": 765.49, "word": " the", "probability": 0.91650390625}, {"start": 765.49, "end": 766.07, "word": " location", "probability": 0.92822265625}, {"start": 766.07, "end": 766.31, "word": " of", "probability": 0.80712890625}, {"start": 766.31, "end": 766.45, "word": " A.", "probability": 0.2626953125}], "temperature": 1.0}, {"id": 30, "seek": 79334, "start": 767.3, "end": 793.34, "text": " because it may be to the right or to the left side. It depends actually on two things. Number one, the size, the greater than or smaller than, and the other is the probability. Is it above 1.5 or smaller than 1.5? So you have to keep careful for this type of questions. So in this case. It should be to the left. It should be to the left.", "tokens": [570, 309, 815, 312, 281, 264, 558, 420, 281, 264, 1411, 1252, 13, 467, 5946, 767, 322, 732, 721, 13, 5118, 472, 11, 264, 2744, 11, 264, 5044, 813, 420, 4356, 813, 11, 293, 264, 661, 307, 264, 8482, 13, 1119, 309, 3673, 502, 13, 20, 420, 4356, 813, 502, 13, 20, 30, 407, 291, 362, 281, 1066, 5026, 337, 341, 2010, 295, 1651, 13, 407, 294, 341, 1389, 13, 467, 820, 312, 281, 264, 1411, 13, 467, 820, 312, 281, 264, 1411, 13], "avg_logprob": -0.21930147339315975, "compression_ratio": 1.6865671641791045, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 767.3, "end": 767.62, "word": " because", "probability": 0.2568359375}, {"start": 767.62, "end": 767.78, "word": " it", "probability": 0.73486328125}, {"start": 767.78, "end": 767.92, "word": " may", "probability": 0.70654296875}, {"start": 767.92, "end": 768.08, "word": " be", "probability": 0.62451171875}, {"start": 768.08, "end": 768.3, "word": " to", "probability": 0.83203125}, {"start": 768.3, "end": 768.44, "word": " the", "probability": 0.9111328125}, {"start": 768.44, "end": 768.64, "word": " right", "probability": 0.890625}, {"start": 768.64, "end": 768.8, "word": " or", "probability": 0.91796875}, {"start": 768.8, "end": 768.9, "word": " to", "probability": 0.8310546875}, {"start": 768.9, "end": 769.04, "word": " the", "probability": 0.912109375}, {"start": 769.04, "end": 769.2, "word": " left", "probability": 0.94775390625}, {"start": 769.2, "end": 769.58, "word": " side.", "probability": 0.74755859375}, {"start": 770.1, "end": 770.32, "word": " It", "probability": 0.87255859375}, {"start": 770.32, "end": 770.6, "word": " depends", "probability": 0.8564453125}, {"start": 770.6, "end": 771.14, "word": " actually", "probability": 0.85302734375}, {"start": 771.14, "end": 771.48, "word": " on", "probability": 0.94287109375}, {"start": 771.48, "end": 771.74, "word": " two", "probability": 0.87744140625}, {"start": 771.74, "end": 772.12, "word": " things.", "probability": 0.85693359375}, {"start": 772.2, "end": 772.4, "word": " Number", "probability": 0.837890625}, {"start": 772.4, "end": 773.14, "word": " one,", "probability": 0.888671875}, {"start": 773.66, "end": 773.78, "word": " the", "probability": 0.88916015625}, {"start": 773.78, "end": 774.2, "word": " size,", "probability": 0.6103515625}, {"start": 774.74, "end": 774.84, "word": " the", "probability": 0.63623046875}, {"start": 774.84, "end": 775.14, "word": " greater", "probability": 0.95458984375}, {"start": 775.14, "end": 775.4, "word": " than", "probability": 0.8974609375}, {"start": 775.4, "end": 775.58, "word": " or", "probability": 0.931640625}, {"start": 775.58, "end": 775.98, "word": " smaller", "probability": 0.681640625}, {"start": 775.98, "end": 776.36, "word": " than,", "probability": 0.9423828125}, {"start": 776.76, "end": 777.3, "word": " and", "probability": 0.92431640625}, {"start": 777.3, "end": 777.42, "word": " the", "probability": 0.6884765625}, {"start": 777.42, "end": 777.66, "word": " other", "probability": 0.91357421875}, {"start": 777.66, "end": 777.98, "word": " is", "probability": 0.7724609375}, {"start": 777.98, "end": 778.28, "word": " the", "probability": 0.79296875}, {"start": 778.28, "end": 778.76, "word": " probability.", "probability": 0.9521484375}, {"start": 779.52, "end": 779.66, "word": " Is", "probability": 0.94140625}, {"start": 779.66, "end": 779.78, "word": " it", "probability": 0.9462890625}, {"start": 779.78, "end": 780.02, "word": " above", "probability": 0.9072265625}, {"start": 780.02, "end": 780.24, "word": " 1", "probability": 0.454345703125}, {"start": 780.24, "end": 780.54, "word": ".5", "probability": 0.975341796875}, {"start": 780.54, "end": 780.68, "word": " or", "probability": 0.8955078125}, {"start": 780.68, "end": 781.08, "word": " smaller", "probability": 0.86328125}, {"start": 781.08, "end": 781.32, "word": " than", "probability": 0.93017578125}, {"start": 781.32, "end": 781.48, "word": " 1", "probability": 0.861328125}, {"start": 781.48, "end": 781.8, "word": ".5?", "probability": 0.9990234375}, {"start": 782.22, "end": 782.42, "word": " So", "probability": 0.9423828125}, {"start": 782.42, "end": 782.54, "word": " you", "probability": 0.455078125}, {"start": 782.54, "end": 782.68, "word": " have", "probability": 0.947265625}, {"start": 782.68, "end": 782.8, "word": " to", "probability": 0.970703125}, {"start": 782.8, "end": 783.06, "word": " keep", "probability": 0.74560546875}, {"start": 783.06, "end": 783.82, "word": " careful", "probability": 0.94921875}, {"start": 783.82, "end": 784.08, "word": " for", "probability": 0.9072265625}, {"start": 784.08, "end": 784.3, "word": " this", "probability": 0.93408203125}, {"start": 784.3, "end": 784.68, "word": " type", "probability": 0.9326171875}, {"start": 784.68, "end": 785.8, "word": " of", "probability": 0.96533203125}, {"start": 785.8, "end": 786.24, "word": " questions.", "probability": 0.79833984375}, {"start": 787.0, "end": 787.0, "word": " So", "probability": 0.7080078125}, {"start": 787.0, "end": 787.12, "word": " in", "probability": 0.67578125}, {"start": 787.12, "end": 787.26, "word": " this", "probability": 0.9453125}, {"start": 787.26, "end": 787.62, "word": " case.", "probability": 0.916015625}, {"start": 790.12, "end": 790.6, "word": " It", "probability": 0.4140625}, {"start": 790.6, "end": 790.8, "word": " should", "probability": 0.96240234375}, {"start": 790.8, "end": 790.98, "word": " be", "probability": 0.9541015625}, {"start": 790.98, "end": 791.12, "word": " to", "probability": 0.9658203125}, {"start": 791.12, "end": 791.26, "word": " the", "probability": 0.9189453125}, {"start": 791.26, "end": 791.52, "word": " left.", "probability": 0.947265625}, {"start": 792.2, "end": 792.54, "word": " It", "probability": 0.783203125}, {"start": 792.54, "end": 792.7, "word": " should", "probability": 0.96875}, {"start": 792.7, "end": 792.86, "word": " be", "probability": 0.94873046875}, {"start": 792.86, "end": 793.0, "word": " to", "probability": 0.96533203125}, {"start": 793.0, "end": 793.14, "word": " the", "probability": 0.91259765625}, {"start": 793.14, "end": 793.34, "word": " left.", "probability": 0.951171875}], "temperature": 1.0}, {"id": 31, "seek": 82290, "start": 794.0, "end": 822.9, "text": " because the area to the left here makes sense it's 99% but if the location is to the right side here it doesn't make any sense that B makes greater than or equal to 99% because the area here is split into two halves so 50% to the right 50% to the left of the vertical line here so A should be to the left side", "tokens": [570, 264, 1859, 281, 264, 1411, 510, 1669, 2020, 309, 311, 11803, 4, 457, 498, 264, 4914, 307, 281, 264, 558, 1252, 510, 309, 1177, 380, 652, 604, 2020, 300, 363, 1669, 5044, 813, 420, 2681, 281, 11803, 4, 570, 264, 1859, 510, 307, 7472, 666, 732, 38490, 370, 2625, 4, 281, 264, 558, 2625, 4, 281, 264, 1411, 295, 264, 9429, 1622, 510, 370, 316, 820, 312, 281, 264, 1411, 1252], "avg_logprob": -0.22517122634469647, "compression_ratio": 1.7816091954022988, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 794.0, "end": 794.5, "word": " because", "probability": 0.381591796875}, {"start": 794.5, "end": 794.88, "word": " the", "probability": 0.806640625}, {"start": 794.88, "end": 795.18, "word": " area", "probability": 0.876953125}, {"start": 795.18, "end": 795.4, "word": " to", "probability": 0.9365234375}, {"start": 795.4, "end": 795.58, "word": " the", "probability": 0.91552734375}, {"start": 795.58, "end": 795.8, "word": " left", "probability": 0.94091796875}, {"start": 795.8, "end": 796.08, "word": " here", "probability": 0.7587890625}, {"start": 796.08, "end": 796.42, "word": " makes", "probability": 0.73779296875}, {"start": 796.42, "end": 796.78, "word": " sense", "probability": 0.82421875}, {"start": 796.78, "end": 797.1, "word": " it's", "probability": 0.6732177734375}, {"start": 797.1, "end": 797.46, "word": " 99", "probability": 0.76806640625}, {"start": 797.46, "end": 797.86, "word": "%", "probability": 0.66259765625}, {"start": 797.86, "end": 800.4, "word": " but", "probability": 0.734375}, {"start": 800.4, "end": 800.9, "word": " if", "probability": 0.93212890625}, {"start": 800.9, "end": 801.6, "word": " the", "probability": 0.88232421875}, {"start": 801.6, "end": 802.06, "word": " location", "probability": 0.82666015625}, {"start": 802.06, "end": 802.28, "word": " is", "probability": 0.85546875}, {"start": 802.28, "end": 802.42, "word": " to", "probability": 0.93603515625}, {"start": 802.42, "end": 802.58, "word": " the", "probability": 0.9150390625}, {"start": 802.58, "end": 802.8, "word": " right", "probability": 0.92041015625}, {"start": 802.8, "end": 803.2, "word": " side", "probability": 0.826171875}, {"start": 803.2, "end": 803.52, "word": " here", "probability": 0.703125}, {"start": 803.52, "end": 805.0, "word": " it", "probability": 0.7880859375}, {"start": 805.0, "end": 805.28, "word": " doesn't", "probability": 0.91259765625}, {"start": 805.28, "end": 805.46, "word": " make", "probability": 0.93505859375}, {"start": 805.46, "end": 805.76, "word": " any", "probability": 0.89794921875}, {"start": 805.76, "end": 806.0, "word": " sense", "probability": 0.82666015625}, {"start": 806.0, "end": 806.38, "word": " that", "probability": 0.927734375}, {"start": 806.38, "end": 807.08, "word": " B", "probability": 0.373779296875}, {"start": 807.08, "end": 807.36, "word": " makes", "probability": 0.71826171875}, {"start": 807.36, "end": 807.72, "word": " greater", "probability": 0.88427734375}, {"start": 807.72, "end": 808.0, "word": " than", "probability": 0.93017578125}, {"start": 808.0, "end": 808.1, "word": " or", "probability": 0.69580078125}, {"start": 808.1, "end": 808.2, "word": " equal", "probability": 0.90185546875}, {"start": 808.2, "end": 808.3, "word": " to", "probability": 0.6396484375}, {"start": 808.3, "end": 808.58, "word": " 99", "probability": 0.97265625}, {"start": 808.58, "end": 809.04, "word": "%", "probability": 0.92822265625}, {"start": 809.04, "end": 810.36, "word": " because", "probability": 0.86962890625}, {"start": 810.36, "end": 810.6, "word": " the", "probability": 0.90966796875}, {"start": 810.6, "end": 810.9, "word": " area", "probability": 0.88525390625}, {"start": 810.9, "end": 811.22, "word": " here", "probability": 0.806640625}, {"start": 811.22, "end": 811.62, "word": " is", "probability": 0.92431640625}, {"start": 811.62, "end": 811.94, "word": " split", "probability": 0.60400390625}, {"start": 811.94, "end": 812.24, "word": " into", "probability": 0.6025390625}, {"start": 812.24, "end": 812.92, "word": " two", "probability": 0.44921875}, {"start": 812.92, "end": 813.46, "word": " halves", "probability": 0.8720703125}, {"start": 813.46, "end": 814.96, "word": " so", "probability": 0.65087890625}, {"start": 814.96, "end": 815.7, "word": " 50", "probability": 0.80517578125}, {"start": 815.7, "end": 816.0, "word": "%", "probability": 0.9462890625}, {"start": 816.0, "end": 816.16, "word": " to", "probability": 0.96044921875}, {"start": 816.16, "end": 816.28, "word": " the", "probability": 0.908203125}, {"start": 816.28, "end": 816.5, "word": " right", "probability": 0.92041015625}, {"start": 816.5, "end": 816.74, "word": " 50", "probability": 0.427734375}, {"start": 816.74, "end": 817.08, "word": "%", "probability": 0.99609375}, {"start": 817.08, "end": 817.26, "word": " to", "probability": 0.9541015625}, {"start": 817.26, "end": 817.38, "word": " the", "probability": 0.91015625}, {"start": 817.38, "end": 817.62, "word": " left", "probability": 0.96337890625}, {"start": 817.62, "end": 817.8, "word": " of", "probability": 0.365966796875}, {"start": 817.8, "end": 818.46, "word": " the", "probability": 0.7373046875}, {"start": 818.46, "end": 818.8, "word": " vertical", "probability": 0.908203125}, {"start": 818.8, "end": 819.14, "word": " line", "probability": 0.93408203125}, {"start": 819.14, "end": 819.5, "word": " here", "probability": 0.77880859375}, {"start": 819.5, "end": 820.68, "word": " so", "probability": 0.77490234375}, {"start": 820.68, "end": 821.04, "word": " A", "probability": 0.7216796875}, {"start": 821.04, "end": 821.3, "word": " should", "probability": 0.96240234375}, {"start": 821.3, "end": 821.64, "word": " be", "probability": 0.9453125}, {"start": 821.64, "end": 822.3, "word": " to", "probability": 0.67431640625}, {"start": 822.3, "end": 822.42, "word": " the", "probability": 0.91357421875}, {"start": 822.42, "end": 822.56, "word": " left", "probability": 0.9443359375}, {"start": 822.56, "end": 822.9, "word": " side", "probability": 0.87060546875}], "temperature": 1.0}, {"id": 32, "seek": 85063, "start": 824.11, "end": 850.63, "text": " Make sense? Now, V of X greater than A equals 99%. So this area is 99%. Now, if we go back to the table we have, the table again gives the area to the left side. So this one exactly equals V of X smaller than A, which is", "tokens": [4387, 2020, 30, 823, 11, 691, 295, 1783, 5044, 813, 316, 6915, 11803, 6856, 407, 341, 1859, 307, 11803, 6856, 823, 11, 498, 321, 352, 646, 281, 264, 3199, 321, 362, 11, 264, 3199, 797, 2709, 264, 1859, 281, 264, 1411, 1252, 13, 407, 341, 472, 2293, 6915, 691, 295, 1783, 4356, 813, 316, 11, 597, 307], "avg_logprob": -0.294181025233762, "compression_ratio": 1.5241379310344827, "no_speech_prob": 0.0, "words": [{"start": 824.11, "end": 824.39, "word": " Make", "probability": 0.0802001953125}, {"start": 824.39, "end": 824.71, "word": " sense?", "probability": 0.72998046875}, {"start": 825.85, "end": 826.21, "word": " Now,", "probability": 0.791015625}, {"start": 826.63, "end": 826.81, "word": " V", "probability": 0.1978759765625}, {"start": 826.81, "end": 826.91, "word": " of", "probability": 0.81005859375}, {"start": 826.91, "end": 827.09, "word": " X", "probability": 0.87841796875}, {"start": 827.09, "end": 827.37, "word": " greater", "probability": 0.634765625}, {"start": 827.37, "end": 827.71, "word": " than", "probability": 0.95849609375}, {"start": 827.71, "end": 827.79, "word": " A", "probability": 0.34130859375}, {"start": 827.79, "end": 828.01, "word": " equals", "probability": 0.451416015625}, {"start": 828.01, "end": 828.87, "word": " 99%.", "probability": 0.628662109375}, {"start": 828.87, "end": 832.13, "word": " So", "probability": 0.8857421875}, {"start": 832.13, "end": 832.41, "word": " this", "probability": 0.6806640625}, {"start": 832.41, "end": 832.85, "word": " area", "probability": 0.88818359375}, {"start": 832.85, "end": 835.29, "word": " is", "probability": 0.8349609375}, {"start": 835.29, "end": 836.03, "word": " 99%.", "probability": 0.912841796875}, {"start": 836.03, "end": 836.85, "word": " Now,", "probability": 0.92138671875}, {"start": 837.03, "end": 837.21, "word": " if", "probability": 0.94140625}, {"start": 837.21, "end": 837.31, "word": " we", "probability": 0.9013671875}, {"start": 837.31, "end": 837.49, "word": " go", "probability": 0.95947265625}, {"start": 837.49, "end": 837.71, "word": " back", "probability": 0.8798828125}, {"start": 837.71, "end": 837.85, "word": " to", "probability": 0.96728515625}, {"start": 837.85, "end": 837.99, "word": " the", "probability": 0.9169921875}, {"start": 837.99, "end": 838.23, "word": " table", "probability": 0.8505859375}, {"start": 838.23, "end": 838.41, "word": " we", "probability": 0.8701171875}, {"start": 838.41, "end": 838.67, "word": " have,", "probability": 0.8505859375}, {"start": 839.31, "end": 839.51, "word": " the", "probability": 0.83203125}, {"start": 839.51, "end": 839.87, "word": " table", "probability": 0.85791015625}, {"start": 839.87, "end": 840.47, "word": " again", "probability": 0.86328125}, {"start": 840.47, "end": 840.87, "word": " gives", "probability": 0.85498046875}, {"start": 840.87, "end": 841.05, "word": " the", "probability": 0.88525390625}, {"start": 841.05, "end": 841.25, "word": " area", "probability": 0.84130859375}, {"start": 841.25, "end": 841.51, "word": " to", "probability": 0.54736328125}, {"start": 841.51, "end": 841.67, "word": " the", "probability": 0.9150390625}, {"start": 841.67, "end": 841.91, "word": " left", "probability": 0.9453125}, {"start": 841.91, "end": 842.41, "word": " side.", "probability": 0.8349609375}, {"start": 843.27, "end": 843.51, "word": " So", "probability": 0.94580078125}, {"start": 843.51, "end": 843.81, "word": " this", "probability": 0.8447265625}, {"start": 843.81, "end": 844.13, "word": " one", "probability": 0.91845703125}, {"start": 844.13, "end": 846.61, "word": " exactly", "probability": 0.65185546875}, {"start": 846.61, "end": 847.11, "word": " equals", "probability": 0.82861328125}, {"start": 847.11, "end": 848.05, "word": " V", "probability": 0.54052734375}, {"start": 848.05, "end": 848.21, "word": " of", "probability": 0.97314453125}, {"start": 848.21, "end": 848.57, "word": " X", "probability": 0.9873046875}, {"start": 848.57, "end": 849.31, "word": " smaller", "probability": 0.81298828125}, {"start": 849.31, "end": 849.69, "word": " than", "probability": 0.951171875}, {"start": 849.69, "end": 849.91, "word": " A,", "probability": 0.99072265625}, {"start": 850.05, "end": 850.27, "word": " which", "probability": 0.95361328125}, {"start": 850.27, "end": 850.63, "word": " is", "probability": 0.94140625}], "temperature": 1.0}, {"id": 33, "seek": 87780, "start": 852.32, "end": 877.8, "text": " 1% because the area to the right of A is 99 so the area to the left of A is 1-99 which is 1% now here we have to look inside the body of the table at the value of 01 so in this case this score should be negative or positive", "tokens": [502, 4, 570, 264, 1859, 281, 264, 558, 295, 316, 307, 11803, 370, 264, 1859, 281, 264, 1411, 295, 316, 307, 502, 12, 8494, 597, 307, 502, 4, 586, 510, 321, 362, 281, 574, 1854, 264, 1772, 295, 264, 3199, 412, 264, 2158, 295, 23185, 370, 294, 341, 1389, 341, 6175, 820, 312, 3671, 420, 3353], "avg_logprob": -0.21244517752998754, "compression_ratio": 1.5342465753424657, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 852.32, "end": 852.68, "word": " 1", "probability": 0.255615234375}, {"start": 852.68, "end": 852.94, "word": "%", "probability": 0.445068359375}, {"start": 852.94, "end": 855.14, "word": " because", "probability": 0.59375}, {"start": 855.14, "end": 855.6, "word": " the", "probability": 0.86328125}, {"start": 855.6, "end": 855.98, "word": " area", "probability": 0.833984375}, {"start": 855.98, "end": 856.14, "word": " to", "probability": 0.9482421875}, {"start": 856.14, "end": 856.28, "word": " the", "probability": 0.9140625}, {"start": 856.28, "end": 856.52, "word": " right", "probability": 0.9111328125}, {"start": 856.52, "end": 856.72, "word": " of", "probability": 0.95556640625}, {"start": 856.72, "end": 857.0, "word": " A", "probability": 0.69775390625}, {"start": 857.0, "end": 857.46, "word": " is", "probability": 0.9267578125}, {"start": 857.46, "end": 857.82, "word": " 99", "probability": 0.93994140625}, {"start": 857.82, "end": 859.4, "word": " so", "probability": 0.321533203125}, {"start": 859.4, "end": 859.66, "word": " the", "probability": 0.8515625}, {"start": 859.66, "end": 859.92, "word": " area", "probability": 0.8662109375}, {"start": 859.92, "end": 860.1, "word": " to", "probability": 0.94287109375}, {"start": 860.1, "end": 860.26, "word": " the", "probability": 0.91015625}, {"start": 860.26, "end": 860.46, "word": " left", "probability": 0.9521484375}, {"start": 860.46, "end": 860.66, "word": " of", "probability": 0.9599609375}, {"start": 860.66, "end": 860.86, "word": " A", "probability": 0.96630859375}, {"start": 860.86, "end": 861.08, "word": " is", "probability": 0.9423828125}, {"start": 861.08, "end": 861.32, "word": " 1", "probability": 0.88134765625}, {"start": 861.32, "end": 862.06, "word": "-99", "probability": 0.651611328125}, {"start": 862.06, "end": 862.42, "word": " which", "probability": 0.6083984375}, {"start": 862.42, "end": 862.52, "word": " is", "probability": 0.9453125}, {"start": 862.52, "end": 862.76, "word": " 1", "probability": 0.9873046875}, {"start": 862.76, "end": 863.04, "word": "%", "probability": 0.79833984375}, {"start": 863.04, "end": 864.16, "word": " now", "probability": 0.7509765625}, {"start": 864.16, "end": 865.06, "word": " here", "probability": 0.6455078125}, {"start": 865.06, "end": 865.2, "word": " we", "probability": 0.9189453125}, {"start": 865.2, "end": 865.38, "word": " have", "probability": 0.95068359375}, {"start": 865.38, "end": 865.5, "word": " to", "probability": 0.966796875}, {"start": 865.5, "end": 865.68, "word": " look", "probability": 0.96142578125}, {"start": 865.68, "end": 866.06, "word": " inside", "probability": 0.931640625}, {"start": 866.06, "end": 866.24, "word": " the", "probability": 0.9013671875}, {"start": 866.24, "end": 866.4, "word": " body", "probability": 0.84228515625}, {"start": 866.4, "end": 866.52, "word": " of", "probability": 0.96875}, {"start": 866.52, "end": 866.64, "word": " the", "probability": 0.91845703125}, {"start": 866.64, "end": 866.94, "word": " table", "probability": 0.87353515625}, {"start": 866.94, "end": 868.42, "word": " at", "probability": 0.9423828125}, {"start": 868.42, "end": 869.46, "word": " the", "probability": 0.90625}, {"start": 869.46, "end": 869.86, "word": " value", "probability": 0.98193359375}, {"start": 869.86, "end": 870.36, "word": " of", "probability": 0.966796875}, {"start": 870.36, "end": 870.96, "word": " 01", "probability": 0.328369140625}, {"start": 870.96, "end": 874.14, "word": " so", "probability": 0.6552734375}, {"start": 874.14, "end": 874.36, "word": " in", "probability": 0.8837890625}, {"start": 874.36, "end": 874.58, "word": " this", "probability": 0.94140625}, {"start": 874.58, "end": 874.88, "word": " case", "probability": 0.92333984375}, {"start": 874.88, "end": 875.14, "word": " this", "probability": 0.3759765625}, {"start": 875.14, "end": 875.48, "word": " score", "probability": 0.8671875}, {"start": 875.48, "end": 875.78, "word": " should", "probability": 0.9609375}, {"start": 875.78, "end": 876.1, "word": " be", "probability": 0.94677734375}, {"start": 876.1, "end": 877.26, "word": " negative", "probability": 0.78271484375}, {"start": 877.26, "end": 877.5, "word": " or", "probability": 0.9609375}, {"start": 877.5, "end": 877.8, "word": " positive", "probability": 0.93212890625}], "temperature": 1.0}, {"id": 34, "seek": 90255, "start": 880.05, "end": 902.55, "text": " Since the probability is 100% smaller than 1.5, so it should be negative. So if you go back to the table, negative 1. Look at 0.1. Minus 2.34.", "tokens": [4162, 264, 8482, 307, 2319, 4, 4356, 813, 502, 13, 20, 11, 370, 309, 820, 312, 3671, 13, 407, 498, 291, 352, 646, 281, 264, 3199, 11, 3671, 502, 13, 2053, 412, 1958, 13, 16, 13, 2829, 301, 568, 13, 12249, 13], "avg_logprob": -0.2872456353764201, "compression_ratio": 1.1916666666666667, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 880.05, "end": 880.45, "word": " Since", "probability": 0.36962890625}, {"start": 880.45, "end": 880.65, "word": " the", "probability": 0.81787109375}, {"start": 880.65, "end": 880.99, "word": " probability", "probability": 0.92919921875}, {"start": 880.99, "end": 881.27, "word": " is", "probability": 0.85205078125}, {"start": 881.27, "end": 881.55, "word": " 100", "probability": 0.457275390625}, {"start": 881.55, "end": 881.91, "word": "%", "probability": 0.77392578125}, {"start": 881.91, "end": 882.31, "word": " smaller", "probability": 0.70166015625}, {"start": 882.31, "end": 882.57, "word": " than", "probability": 0.91455078125}, {"start": 882.57, "end": 882.79, "word": " 1", "probability": 0.305419921875}, {"start": 882.79, "end": 883.11, "word": ".5,", "probability": 0.80419921875}, {"start": 883.63, "end": 883.81, "word": " so", "probability": 0.716796875}, {"start": 883.81, "end": 883.91, "word": " it", "probability": 0.89990234375}, {"start": 883.91, "end": 884.05, "word": " should", "probability": 0.95849609375}, {"start": 884.05, "end": 884.21, "word": " be", "probability": 0.95263671875}, {"start": 884.21, "end": 884.49, "word": " negative.", "probability": 0.86279296875}, {"start": 885.39, "end": 885.67, "word": " So", "probability": 0.8251953125}, {"start": 885.67, "end": 885.81, "word": " if", "probability": 0.7265625}, {"start": 885.81, "end": 885.91, "word": " you", "probability": 0.9501953125}, {"start": 885.91, "end": 886.13, "word": " go", "probability": 0.95703125}, {"start": 886.13, "end": 886.45, "word": " back", "probability": 0.88330078125}, {"start": 886.45, "end": 886.95, "word": " to", "probability": 0.951171875}, {"start": 886.95, "end": 887.11, "word": " the", "probability": 0.8603515625}, {"start": 887.11, "end": 887.57, "word": " table,", "probability": 0.89208984375}, {"start": 888.17, "end": 888.61, "word": " negative", "probability": 0.7099609375}, {"start": 888.61, "end": 889.31, "word": " 1.", "probability": 0.62841796875}, {"start": 890.61, "end": 891.25, "word": " Look", "probability": 0.8203125}, {"start": 891.25, "end": 891.45, "word": " at", "probability": 0.966796875}, {"start": 891.45, "end": 891.67, "word": " 0", "probability": 0.482177734375}, {"start": 891.67, "end": 891.89, "word": ".1.", "probability": 0.6646728515625}, {"start": 900.29, "end": 900.93, "word": " Minus", "probability": 0.861328125}, {"start": 900.93, "end": 901.93, "word": " 2", "probability": 0.84130859375}, {"start": 901.93, "end": 902.55, "word": ".34.", "probability": 0.69189453125}], "temperature": 1.0}, {"id": 35, "seek": 93172, "start": 912.64, "end": 931.72, "text": " So the approximate answer actually is 0099. The closest value. You may take this value. You will be okay. So this one is more closer to 01.", "tokens": [407, 264, 30874, 1867, 767, 307, 7143, 8494, 13, 440, 13699, 2158, 13, 509, 815, 747, 341, 2158, 13, 509, 486, 312, 1392, 13, 407, 341, 472, 307, 544, 4966, 281, 23185, 13], "avg_logprob": -0.2571231696535559, "compression_ratio": 1.25, "no_speech_prob": 0.0, "words": [{"start": 912.64, "end": 913.52, "word": " So", "probability": 0.294189453125}, {"start": 913.52, "end": 914.4, "word": " the", "probability": 0.72900390625}, {"start": 914.4, "end": 914.88, "word": " approximate", "probability": 0.8759765625}, {"start": 914.88, "end": 915.36, "word": " answer", "probability": 0.96435546875}, {"start": 915.36, "end": 915.84, "word": " actually", "probability": 0.81201171875}, {"start": 915.84, "end": 916.34, "word": " is", "probability": 0.94775390625}, {"start": 916.34, "end": 919.8, "word": " 0099.", "probability": 0.8681640625}, {"start": 921.16, "end": 922.04, "word": " The", "probability": 0.58935546875}, {"start": 922.04, "end": 922.54, "word": " closest", "probability": 0.931640625}, {"start": 922.54, "end": 923.5, "word": " value.", "probability": 0.9453125}, {"start": 923.74, "end": 923.88, "word": " You", "probability": 0.9365234375}, {"start": 923.88, "end": 924.08, "word": " may", "probability": 0.9228515625}, {"start": 924.08, "end": 924.36, "word": " take", "probability": 0.8759765625}, {"start": 924.36, "end": 924.62, "word": " this", "probability": 0.94921875}, {"start": 924.62, "end": 924.94, "word": " value.", "probability": 0.98583984375}, {"start": 926.52, "end": 926.66, "word": " You", "probability": 0.64599609375}, {"start": 926.66, "end": 926.76, "word": " will", "probability": 0.6884765625}, {"start": 926.76, "end": 926.88, "word": " be", "probability": 0.94921875}, {"start": 926.88, "end": 927.18, "word": " okay.", "probability": 0.8193359375}, {"start": 927.8, "end": 928.02, "word": " So", "probability": 0.61962890625}, {"start": 928.02, "end": 928.28, "word": " this", "probability": 0.9150390625}, {"start": 928.28, "end": 928.46, "word": " one", "probability": 0.92431640625}, {"start": 928.46, "end": 928.6, "word": " is", "probability": 0.8935546875}, {"start": 928.6, "end": 929.44, "word": " more", "probability": 0.9111328125}, {"start": 929.44, "end": 929.94, "word": " closer", "probability": 0.888671875}, {"start": 929.94, "end": 931.2, "word": " to", "probability": 0.9560546875}, {"start": 931.2, "end": 931.72, "word": " 01.", "probability": 0.8466796875}], "temperature": 1.0}, {"id": 36, "seek": 95656, "start": 932.16, "end": 956.56, "text": " than 0102. So my corresponding z-score is negative 2.4, I'm sorry, 2.35. So z-score, negative 2.33, 0123. So this is the approximate answer.", "tokens": [813, 1958, 3279, 17, 13, 407, 452, 11760, 710, 12, 4417, 418, 307, 3671, 568, 13, 19, 11, 286, 478, 2597, 11, 568, 13, 8794, 13, 407, 710, 12, 4417, 418, 11, 3671, 568, 13, 10191, 11, 1958, 4762, 18, 13, 407, 341, 307, 264, 30874, 1867, 13], "avg_logprob": -0.3174426117721869, "compression_ratio": 1.2260869565217392, "no_speech_prob": 0.0, "words": [{"start": 932.16, "end": 932.68, "word": " than", "probability": 0.239990234375}, {"start": 932.68, "end": 934.5, "word": " 0102.", "probability": 0.7527669270833334}, {"start": 935.0, "end": 935.16, "word": " So", "probability": 0.82861328125}, {"start": 935.16, "end": 935.38, "word": " my", "probability": 0.802734375}, {"start": 935.38, "end": 935.86, "word": " corresponding", "probability": 0.78271484375}, {"start": 935.86, "end": 936.16, "word": " z", "probability": 0.5908203125}, {"start": 936.16, "end": 936.34, "word": "-score", "probability": 0.7822265625}, {"start": 936.34, "end": 936.52, "word": " is", "probability": 0.93505859375}, {"start": 936.52, "end": 936.8, "word": " negative", "probability": 0.51318359375}, {"start": 936.8, "end": 937.12, "word": " 2", "probability": 0.90478515625}, {"start": 937.12, "end": 937.68, "word": ".4,", "probability": 0.7059326171875}, {"start": 937.94, "end": 940.68, "word": " I'm", "probability": 0.5916748046875}, {"start": 940.68, "end": 940.88, "word": " sorry,", "probability": 0.87353515625}, {"start": 940.98, "end": 941.14, "word": " 2", "probability": 0.8955078125}, {"start": 941.14, "end": 941.9, "word": ".35.", "probability": 0.986572265625}, {"start": 942.82, "end": 943.18, "word": " So", "probability": 0.93212890625}, {"start": 943.18, "end": 943.42, "word": " z", "probability": 0.814453125}, {"start": 943.42, "end": 943.8, "word": "-score,", "probability": 0.91455078125}, {"start": 945.6, "end": 946.0, "word": " negative", "probability": 0.93017578125}, {"start": 946.0, "end": 946.34, "word": " 2", "probability": 0.97998046875}, {"start": 946.34, "end": 947.82, "word": ".33,", "probability": 0.7242431640625}, {"start": 950.9, "end": 952.52, "word": " 0123.", "probability": 0.7902018229166666}, {"start": 954.5, "end": 955.24, "word": " So", "probability": 0.943359375}, {"start": 955.24, "end": 955.42, "word": " this", "probability": 0.92138671875}, {"start": 955.42, "end": 955.54, "word": " is", "probability": 0.94873046875}, {"start": 955.54, "end": 955.64, "word": " the", "probability": 0.79248046875}, {"start": 955.64, "end": 956.04, "word": " approximate", "probability": 0.8984375}, {"start": 956.04, "end": 956.56, "word": " answer.", "probability": 0.95751953125}], "temperature": 1.0}, {"id": 37, "seek": 98356, "start": 958.06, "end": 983.56, "text": " sometimes maybe if you have a calculator or excel you may determine the exact value in this case which is minus 2.3263 this is the exact answer but the approximate one is 5 so my z score is negative 2.33 now the value of a", "tokens": [2171, 1310, 498, 291, 362, 257, 24993, 420, 24015, 291, 815, 6997, 264, 1900, 2158, 294, 341, 1389, 597, 307, 3175, 568, 13, 18, 10880, 18, 341, 307, 264, 1900, 1867, 457, 264, 30874, 472, 307, 1025, 370, 452, 710, 6175, 307, 3671, 568, 13, 10191, 586, 264, 2158, 295, 257], "avg_logprob": -0.20117187442687842, "compression_ratio": 1.4966442953020134, "no_speech_prob": 0.0, "words": [{"start": 958.06, "end": 958.76, "word": " sometimes", "probability": 0.1920166015625}, {"start": 958.76, "end": 959.16, "word": " maybe", "probability": 0.494140625}, {"start": 959.16, "end": 959.62, "word": " if", "probability": 0.85400390625}, {"start": 959.62, "end": 959.74, "word": " you", "probability": 0.96044921875}, {"start": 959.74, "end": 959.9, "word": " have", "probability": 0.9462890625}, {"start": 959.9, "end": 960.04, "word": " a", "probability": 0.8427734375}, {"start": 960.04, "end": 960.5, "word": " calculator", "probability": 0.9169921875}, {"start": 960.5, "end": 961.48, "word": " or", "probability": 0.73388671875}, {"start": 961.48, "end": 962.84, "word": " excel", "probability": 0.52099609375}, {"start": 962.84, "end": 964.16, "word": " you", "probability": 0.74658203125}, {"start": 964.16, "end": 964.34, "word": " may", "probability": 0.91162109375}, {"start": 964.34, "end": 964.8, "word": " determine", "probability": 0.939453125}, {"start": 964.8, "end": 965.6, "word": " the", "probability": 0.89453125}, {"start": 965.6, "end": 966.16, "word": " exact", "probability": 0.93701171875}, {"start": 966.16, "end": 966.66, "word": " value", "probability": 0.97705078125}, {"start": 966.66, "end": 966.94, "word": " in", "probability": 0.63330078125}, {"start": 966.94, "end": 967.16, "word": " this", "probability": 0.94921875}, {"start": 967.16, "end": 967.56, "word": " case", "probability": 0.9130859375}, {"start": 967.56, "end": 968.5, "word": " which", "probability": 0.87890625}, {"start": 968.5, "end": 968.72, "word": " is", "probability": 0.94970703125}, {"start": 968.72, "end": 969.2, "word": " minus", "probability": 0.61474609375}, {"start": 969.2, "end": 969.96, "word": " 2", "probability": 0.65380859375}, {"start": 969.96, "end": 971.4, "word": ".3263", "probability": 0.862548828125}, {"start": 971.4, "end": 973.16, "word": " this", "probability": 0.70361328125}, {"start": 973.16, "end": 973.26, "word": " is", "probability": 0.951171875}, {"start": 973.26, "end": 973.38, "word": " the", "probability": 0.8837890625}, {"start": 973.38, "end": 973.66, "word": " exact", "probability": 0.95361328125}, {"start": 973.66, "end": 974.08, "word": " answer", "probability": 0.9501953125}, {"start": 974.08, "end": 975.34, "word": " but", "probability": 0.77099609375}, {"start": 975.34, "end": 975.5, "word": " the", "probability": 0.8623046875}, {"start": 975.5, "end": 975.88, "word": " approximate", "probability": 0.86181640625}, {"start": 975.88, "end": 976.18, "word": " one", "probability": 0.91796875}, {"start": 976.18, "end": 976.32, "word": " is", "probability": 0.951171875}, {"start": 976.32, "end": 976.62, "word": " 5", "probability": 0.6337890625}, {"start": 976.62, "end": 978.6, "word": " so", "probability": 0.7646484375}, {"start": 978.6, "end": 978.84, "word": " my", "probability": 0.9541015625}, {"start": 978.84, "end": 979.06, "word": " z", "probability": 0.94091796875}, {"start": 979.06, "end": 979.42, "word": " score", "probability": 0.6142578125}, {"start": 979.42, "end": 979.82, "word": " is", "probability": 0.9482421875}, {"start": 979.82, "end": 980.16, "word": " negative", "probability": 0.83642578125}, {"start": 980.16, "end": 980.46, "word": " 2", "probability": 0.9453125}, {"start": 980.46, "end": 981.04, "word": ".33", "probability": 0.9765625}, {"start": 981.04, "end": 982.38, "word": " now", "probability": 0.8544921875}, {"start": 982.38, "end": 982.96, "word": " the", "probability": 0.7255859375}, {"start": 982.96, "end": 983.2, "word": " value", "probability": 0.97998046875}, {"start": 983.2, "end": 983.4, "word": " of", "probability": 0.923828125}, {"start": 983.4, "end": 983.56, "word": " a", "probability": 0.51025390625}], "temperature": 1.0}, {"id": 38, "seek": 101462, "start": 987.3, "end": 1014.62, "text": " equals Mu plus Z Sigma. The one we just discussed last time. Remember, when Z equals minus Mu divided by Sigma, just cross multiplication, you will get X minus Mu equals Z Sigma. That means X equals Mu plus Z Sigma.", "tokens": [6915, 15601, 1804, 1176, 36595, 13, 440, 472, 321, 445, 7152, 1036, 565, 13, 5459, 11, 562, 1176, 6915, 3175, 15601, 6666, 538, 36595, 11, 445, 3278, 27290, 11, 291, 486, 483, 1783, 3175, 15601, 6915, 1176, 36595, 13, 663, 1355, 1783, 6915, 15601, 1804, 1176, 36595, 13], "avg_logprob": -0.25302933673469385, "compression_ratio": 1.5539568345323742, "no_speech_prob": 0.0, "words": [{"start": 987.3, "end": 987.92, "word": " equals", "probability": 0.208740234375}, {"start": 987.92, "end": 988.54, "word": " Mu", "probability": 0.431884765625}, {"start": 988.54, "end": 988.8, "word": " plus", "probability": 0.8134765625}, {"start": 988.8, "end": 988.98, "word": " Z", "probability": 0.84326171875}, {"start": 988.98, "end": 989.3, "word": " Sigma.", "probability": 0.626953125}, {"start": 990.48, "end": 991.1, "word": " The", "probability": 0.591796875}, {"start": 991.1, "end": 991.24, "word": " one", "probability": 0.93408203125}, {"start": 991.24, "end": 991.38, "word": " we", "probability": 0.919921875}, {"start": 991.38, "end": 991.58, "word": " just", "probability": 0.79248046875}, {"start": 991.58, "end": 992.0, "word": " discussed", "probability": 0.8916015625}, {"start": 992.0, "end": 992.24, "word": " last", "probability": 0.8310546875}, {"start": 992.24, "end": 992.68, "word": " time.", "probability": 0.90087890625}, {"start": 993.24, "end": 993.6, "word": " Remember,", "probability": 0.46044921875}, {"start": 995.2, "end": 995.48, "word": " when", "probability": 0.87890625}, {"start": 995.48, "end": 995.68, "word": " Z", "probability": 0.89208984375}, {"start": 995.68, "end": 996.52, "word": " equals", "probability": 0.87841796875}, {"start": 996.52, "end": 999.32, "word": " minus", "probability": 0.64404296875}, {"start": 999.32, "end": 999.58, "word": " Mu", "probability": 0.82763671875}, {"start": 999.58, "end": 999.78, "word": " divided", "probability": 0.62939453125}, {"start": 999.78, "end": 1000.02, "word": " by", "probability": 0.97509765625}, {"start": 1000.02, "end": 1000.38, "word": " Sigma,", "probability": 0.81640625}, {"start": 1001.44, "end": 1001.96, "word": " just", "probability": 0.88720703125}, {"start": 1001.96, "end": 1002.36, "word": " cross", "probability": 0.8017578125}, {"start": 1002.36, "end": 1003.1, "word": " multiplication,", "probability": 0.8359375}, {"start": 1004.2, "end": 1004.32, "word": " you", "probability": 0.943359375}, {"start": 1004.32, "end": 1004.5, "word": " will", "probability": 0.8837890625}, {"start": 1004.5, "end": 1004.84, "word": " get", "probability": 0.94091796875}, {"start": 1004.84, "end": 1008.98, "word": " X", "probability": 0.78857421875}, {"start": 1008.98, "end": 1009.26, "word": " minus", "probability": 0.9853515625}, {"start": 1009.26, "end": 1009.48, "word": " Mu", "probability": 0.96728515625}, {"start": 1009.48, "end": 1009.78, "word": " equals", "probability": 0.9228515625}, {"start": 1009.78, "end": 1009.92, "word": " Z", "probability": 0.9892578125}, {"start": 1009.92, "end": 1010.18, "word": " Sigma.", "probability": 0.86181640625}, {"start": 1010.34, "end": 1010.48, "word": " That", "probability": 0.86865234375}, {"start": 1010.48, "end": 1010.9, "word": " means", "probability": 0.92919921875}, {"start": 1010.9, "end": 1011.88, "word": " X", "probability": 0.7958984375}, {"start": 1011.88, "end": 1012.12, "word": " equals", "probability": 0.79638671875}, {"start": 1012.12, "end": 1012.34, "word": " Mu", "probability": 0.9794921875}, {"start": 1012.34, "end": 1012.72, "word": " plus", "probability": 0.94873046875}, {"start": 1012.72, "end": 1014.32, "word": " Z", "probability": 0.9921875}, {"start": 1014.32, "end": 1014.62, "word": " Sigma.", "probability": 0.91455078125}], "temperature": 1.0}, {"id": 39, "seek": 103848, "start": 1015.62, "end": 1038.48, "text": " Fixed same as A, so A equals Mu plus Z Sigma, Mu is given 0.8, Z negative 2.33 times Sigma, that will give the final answer which is 0.3347. So again,", "tokens": [25538, 292, 912, 382, 316, 11, 370, 316, 6915, 15601, 1804, 1176, 36595, 11, 15601, 307, 2212, 1958, 13, 23, 11, 1176, 3671, 568, 13, 10191, 1413, 36595, 11, 300, 486, 976, 264, 2572, 1867, 597, 307, 1958, 13, 10191, 14060, 13, 407, 797, 11], "avg_logprob": -0.3041779832995456, "compression_ratio": 1.208, "no_speech_prob": 0.0, "words": [{"start": 1015.62, "end": 1016.28, "word": " Fixed", "probability": 0.53302001953125}, {"start": 1016.28, "end": 1016.7, "word": " same", "probability": 0.3994140625}, {"start": 1016.7, "end": 1016.88, "word": " as", "probability": 0.958984375}, {"start": 1016.88, "end": 1017.12, "word": " A,", "probability": 0.61669921875}, {"start": 1017.28, "end": 1017.46, "word": " so", "probability": 0.8447265625}, {"start": 1017.46, "end": 1017.6, "word": " A", "probability": 0.865234375}, {"start": 1017.6, "end": 1017.9, "word": " equals", "probability": 0.29296875}, {"start": 1017.9, "end": 1018.08, "word": " Mu", "probability": 0.5341796875}, {"start": 1018.08, "end": 1018.32, "word": " plus", "probability": 0.828125}, {"start": 1018.32, "end": 1018.46, "word": " Z", "probability": 0.3935546875}, {"start": 1018.46, "end": 1018.76, "word": " Sigma,", "probability": 0.69287109375}, {"start": 1019.84, "end": 1020.18, "word": " Mu", "probability": 0.517578125}, {"start": 1020.18, "end": 1020.4, "word": " is", "probability": 0.89990234375}, {"start": 1020.4, "end": 1020.62, "word": " given", "probability": 0.857421875}, {"start": 1020.62, "end": 1021.08, "word": " 0", "probability": 0.61376953125}, {"start": 1021.08, "end": 1021.62, "word": ".8,", "probability": 0.990966796875}, {"start": 1022.52, "end": 1023.86, "word": " Z", "probability": 0.93505859375}, {"start": 1023.86, "end": 1024.32, "word": " negative", "probability": 0.75}, {"start": 1024.32, "end": 1024.58, "word": " 2", "probability": 0.875}, {"start": 1024.58, "end": 1025.16, "word": ".33", "probability": 0.982666015625}, {"start": 1025.16, "end": 1025.64, "word": " times", "probability": 0.83544921875}, {"start": 1025.64, "end": 1026.08, "word": " Sigma,", "probability": 0.69384765625}, {"start": 1026.94, "end": 1027.28, "word": " that", "probability": 0.9072265625}, {"start": 1027.28, "end": 1027.5, "word": " will", "probability": 0.896484375}, {"start": 1027.5, "end": 1027.82, "word": " give", "probability": 0.8916015625}, {"start": 1027.82, "end": 1030.34, "word": " the", "probability": 0.7744140625}, {"start": 1030.34, "end": 1030.72, "word": " final", "probability": 0.943359375}, {"start": 1030.72, "end": 1031.16, "word": " answer", "probability": 0.947265625}, {"start": 1031.16, "end": 1031.42, "word": " which", "probability": 0.73486328125}, {"start": 1031.42, "end": 1031.8, "word": " is", "probability": 0.9462890625}, {"start": 1031.8, "end": 1033.1, "word": " 0", "probability": 0.9189453125}, {"start": 1033.1, "end": 1034.06, "word": ".3347.", "probability": 0.9729817708333334}, {"start": 1037.48, "end": 1038.2, "word": " So", "probability": 0.94189453125}, {"start": 1038.2, "end": 1038.48, "word": " again,", "probability": 0.7978515625}], "temperature": 1.0}, {"id": 40, "seek": 106917, "start": 1040.39, "end": 1069.17, "text": " He said that 99% of the downward times are above how many seconds. So we are looking for the value of A, such that U makes greater than or equal to 99%. So A is located to the left side of the curve, normal curve. And again, the table gives the area to the left of Z. So the area to the left is 1%.", "tokens": [634, 848, 300, 11803, 4, 295, 264, 24805, 1413, 366, 3673, 577, 867, 3949, 13, 407, 321, 366, 1237, 337, 264, 2158, 295, 316, 11, 1270, 300, 624, 1669, 5044, 813, 420, 2681, 281, 11803, 6856, 407, 316, 307, 6870, 281, 264, 1411, 1252, 295, 264, 7605, 11, 2710, 7605, 13, 400, 797, 11, 264, 3199, 2709, 264, 1859, 281, 264, 1411, 295, 1176, 13, 407, 264, 1859, 281, 264, 1411, 307, 502, 6856], "avg_logprob": -0.29895832697550456, "compression_ratio": 1.5989304812834224, "no_speech_prob": 0.0, "words": [{"start": 1040.39, "end": 1040.63, "word": " He", "probability": 0.40771484375}, {"start": 1040.63, "end": 1040.91, "word": " said", "probability": 0.92138671875}, {"start": 1040.91, "end": 1041.31, "word": " that", "probability": 0.7236328125}, {"start": 1041.31, "end": 1041.83, "word": " 99", "probability": 0.89697265625}, {"start": 1041.83, "end": 1042.55, "word": "%", "probability": 0.82763671875}, {"start": 1042.55, "end": 1043.09, "word": " of", "probability": 0.9443359375}, {"start": 1043.09, "end": 1043.25, "word": " the", "probability": 0.71044921875}, {"start": 1043.25, "end": 1043.57, "word": " downward", "probability": 0.568359375}, {"start": 1043.57, "end": 1044.13, "word": " times", "probability": 0.41064453125}, {"start": 1044.13, "end": 1044.39, "word": " are", "probability": 0.8837890625}, {"start": 1044.39, "end": 1044.71, "word": " above", "probability": 0.52880859375}, {"start": 1044.71, "end": 1045.03, "word": " how", "probability": 0.85107421875}, {"start": 1045.03, "end": 1045.25, "word": " many", "probability": 0.89501953125}, {"start": 1045.25, "end": 1045.71, "word": " seconds.", "probability": 0.7822265625}, {"start": 1046.73, "end": 1047.03, "word": " So", "probability": 0.8896484375}, {"start": 1047.03, "end": 1047.19, "word": " we", "probability": 0.78076171875}, {"start": 1047.19, "end": 1047.31, "word": " are", "probability": 0.88818359375}, {"start": 1047.31, "end": 1047.55, "word": " looking", "probability": 0.91796875}, {"start": 1047.55, "end": 1047.81, "word": " for", "probability": 0.947265625}, {"start": 1047.81, "end": 1047.95, "word": " the", "probability": 0.8916015625}, {"start": 1047.95, "end": 1048.19, "word": " value", "probability": 0.97802734375}, {"start": 1048.19, "end": 1048.39, "word": " of", "probability": 0.912109375}, {"start": 1048.39, "end": 1048.57, "word": " A,", "probability": 0.61572265625}, {"start": 1049.93, "end": 1050.69, "word": " such", "probability": 0.90087890625}, {"start": 1050.69, "end": 1051.07, "word": " that", "probability": 0.94287109375}, {"start": 1051.07, "end": 1051.39, "word": " U", "probability": 0.2039794921875}, {"start": 1051.39, "end": 1051.67, "word": " makes", "probability": 0.424072265625}, {"start": 1051.67, "end": 1052.05, "word": " greater", "probability": 0.57275390625}, {"start": 1052.05, "end": 1052.35, "word": " than", "probability": 0.95556640625}, {"start": 1052.35, "end": 1052.47, "word": " or", "probability": 0.64892578125}, {"start": 1052.47, "end": 1052.63, "word": " equal", "probability": 0.8828125}, {"start": 1052.63, "end": 1052.77, "word": " to", "probability": 0.7587890625}, {"start": 1052.77, "end": 1053.51, "word": " 99%.", "probability": 0.89404296875}, {"start": 1053.51, "end": 1055.75, "word": " So", "probability": 0.90625}, {"start": 1055.75, "end": 1056.25, "word": " A", "probability": 0.744140625}, {"start": 1056.25, "end": 1056.87, "word": " is", "probability": 0.9443359375}, {"start": 1056.87, "end": 1057.33, "word": " located", "probability": 0.9140625}, {"start": 1057.33, "end": 1057.55, "word": " to", "probability": 0.869140625}, {"start": 1057.55, "end": 1057.71, "word": " the", "probability": 0.91162109375}, {"start": 1057.71, "end": 1057.89, "word": " left", "probability": 0.9482421875}, {"start": 1057.89, "end": 1058.23, "word": " side", "probability": 0.828125}, {"start": 1058.23, "end": 1058.93, "word": " of", "probability": 0.95947265625}, {"start": 1058.93, "end": 1059.11, "word": " the", "probability": 0.841796875}, {"start": 1059.11, "end": 1059.49, "word": " curve,", "probability": 0.1895751953125}, {"start": 1059.61, "end": 1059.99, "word": " normal", "probability": 0.5673828125}, {"start": 1059.99, "end": 1060.33, "word": " curve.", "probability": 0.94189453125}, {"start": 1063.11, "end": 1063.63, "word": " And", "probability": 0.349365234375}, {"start": 1063.63, "end": 1063.89, "word": " again,", "probability": 0.91748046875}, {"start": 1064.01, "end": 1064.09, "word": " the", "probability": 0.89404296875}, {"start": 1064.09, "end": 1064.27, "word": " table", "probability": 0.828125}, {"start": 1064.27, "end": 1064.49, "word": " gives", "probability": 0.5908203125}, {"start": 1064.49, "end": 1064.61, "word": " the", "probability": 0.423095703125}, {"start": 1064.61, "end": 1064.69, "word": " area", "probability": 0.87353515625}, {"start": 1064.69, "end": 1064.85, "word": " to", "probability": 0.96240234375}, {"start": 1064.85, "end": 1064.97, "word": " the", "probability": 0.916015625}, {"start": 1064.97, "end": 1065.19, "word": " left", "probability": 0.953125}, {"start": 1065.19, "end": 1065.35, "word": " of", "probability": 0.96044921875}, {"start": 1065.35, "end": 1065.55, "word": " Z.", "probability": 0.6357421875}, {"start": 1066.97, "end": 1067.49, "word": " So", "probability": 0.9248046875}, {"start": 1067.49, "end": 1067.67, "word": " the", "probability": 0.76025390625}, {"start": 1067.67, "end": 1067.85, "word": " area", "probability": 0.89794921875}, {"start": 1067.85, "end": 1068.05, "word": " to", "probability": 0.95654296875}, {"start": 1068.05, "end": 1068.21, "word": " the", "probability": 0.91455078125}, {"start": 1068.21, "end": 1068.41, "word": " left", "probability": 0.9443359375}, {"start": 1068.41, "end": 1068.61, "word": " is", "probability": 0.92041015625}, {"start": 1068.61, "end": 1069.17, "word": " 1%.", "probability": 0.824462890625}], "temperature": 1.0}, {"id": 41, "seek": 109276, "start": 1070.26, "end": 1092.76, "text": " Now if you check the z value corresponding to this one, 101, you figure that z, the approximate answer is negative 2.33. Now just use this value, and plug it into this equation, you will get this result. Yes. Is it negative?", "tokens": [823, 498, 291, 1520, 264, 710, 2158, 11760, 281, 341, 472, 11, 21055, 11, 291, 2573, 300, 710, 11, 264, 30874, 1867, 307, 3671, 568, 13, 10191, 13, 823, 445, 764, 341, 2158, 11, 293, 5452, 309, 666, 341, 5367, 11, 291, 486, 483, 341, 1874, 13, 1079, 13, 1119, 309, 3671, 30], "avg_logprob": -0.3451967559478901, "compression_ratio": 1.4240506329113924, "no_speech_prob": 0.0, "words": [{"start": 1070.26, "end": 1070.58, "word": " Now", "probability": 0.56005859375}, {"start": 1070.58, "end": 1070.78, "word": " if", "probability": 0.78271484375}, {"start": 1070.78, "end": 1071.08, "word": " you", "probability": 0.962890625}, {"start": 1071.08, "end": 1072.62, "word": " check", "probability": 0.8349609375}, {"start": 1072.62, "end": 1073.26, "word": " the", "probability": 0.8994140625}, {"start": 1073.26, "end": 1073.42, "word": " z", "probability": 0.55126953125}, {"start": 1073.42, "end": 1073.88, "word": " value", "probability": 0.88818359375}, {"start": 1073.88, "end": 1074.92, "word": " corresponding", "probability": 0.7314453125}, {"start": 1074.92, "end": 1075.28, "word": " to", "probability": 0.96533203125}, {"start": 1075.28, "end": 1075.58, "word": " this", "probability": 0.90625}, {"start": 1075.58, "end": 1075.9, "word": " one,", "probability": 0.8623046875}, {"start": 1076.28, "end": 1076.42, "word": " 101,", "probability": 0.49462890625}, {"start": 1077.62, "end": 1077.9, "word": " you", "probability": 0.6796875}, {"start": 1077.9, "end": 1078.14, "word": " figure", "probability": 0.478515625}, {"start": 1078.14, "end": 1078.56, "word": " that", "probability": 0.8408203125}, {"start": 1078.56, "end": 1079.14, "word": " z,", "probability": 0.828125}, {"start": 1079.32, "end": 1079.42, "word": " the", "probability": 0.89794921875}, {"start": 1079.42, "end": 1079.8, "word": " approximate", "probability": 0.8720703125}, {"start": 1079.8, "end": 1080.2, "word": " answer", "probability": 0.9658203125}, {"start": 1080.2, "end": 1080.4, "word": " is", "probability": 0.69091796875}, {"start": 1080.4, "end": 1080.7, "word": " negative", "probability": 0.53076171875}, {"start": 1080.7, "end": 1080.98, "word": " 2", "probability": 0.83251953125}, {"start": 1080.98, "end": 1081.46, "word": ".33.", "probability": 0.95556640625}, {"start": 1082.34, "end": 1082.74, "word": " Now", "probability": 0.402587890625}, {"start": 1082.74, "end": 1083.04, "word": " just", "probability": 0.61474609375}, {"start": 1083.04, "end": 1083.28, "word": " use", "probability": 0.8701171875}, {"start": 1083.28, "end": 1083.54, "word": " this", "probability": 0.95263671875}, {"start": 1083.54, "end": 1083.94, "word": " value,", "probability": 0.97265625}, {"start": 1084.12, "end": 1086.54, "word": " and", "probability": 0.92626953125}, {"start": 1086.54, "end": 1086.8, "word": " plug", "probability": 0.68408203125}, {"start": 1086.8, "end": 1086.94, "word": " it", "probability": 0.95458984375}, {"start": 1086.94, "end": 1087.16, "word": " into", "probability": 0.83544921875}, {"start": 1087.16, "end": 1087.4, "word": " this", "probability": 0.94873046875}, {"start": 1087.4, "end": 1087.82, "word": " equation,", "probability": 0.986328125}, {"start": 1088.98, "end": 1089.22, "word": " you", "probability": 0.91162109375}, {"start": 1089.22, "end": 1089.4, "word": " will", "probability": 0.880859375}, {"start": 1089.4, "end": 1089.68, "word": " get", "probability": 0.921875}, {"start": 1089.68, "end": 1089.94, "word": " this", "probability": 0.9462890625}, {"start": 1089.94, "end": 1090.24, "word": " result.", "probability": 0.9189453125}, {"start": 1090.82, "end": 1091.04, "word": " Yes.", "probability": 0.38720703125}, {"start": 1091.58, "end": 1091.7, "word": " Is", "probability": 0.08514404296875}, {"start": 1091.7, "end": 1091.94, "word": " it", "probability": 0.48779296875}, {"start": 1091.94, "end": 1092.76, "word": " negative?", "probability": 0.292236328125}], "temperature": 1.0}, {"id": 42, "seek": 112681, "start": 1102.75, "end": 1126.81, "text": " Last part, part E 95% 95%", "tokens": [5264, 644, 11, 644, 462, 13420, 4, 13420, 4], "avg_logprob": -0.42617187201976775, "compression_ratio": 0.9285714285714286, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 1102.75, "end": 1104.03, "word": " Last", "probability": 0.357177734375}, {"start": 1104.03, "end": 1104.59, "word": " part,", "probability": 0.8564453125}, {"start": 1104.75, "end": 1105.09, "word": " part", "probability": 0.7626953125}, {"start": 1105.09, "end": 1105.51, "word": " E", "probability": 0.48095703125}, {"start": 1105.51, "end": 1112.91, "word": " 95", "probability": 0.404541015625}, {"start": 1112.91, "end": 1114.57, "word": "%", "probability": 0.638671875}, {"start": 1114.57, "end": 1126.17, "word": " 95", "probability": 0.90087890625}, {"start": 1126.17, "end": 1126.81, "word": "%", "probability": 0.95068359375}], "temperature": 1.0}, {"id": 43, "seek": 115788, "start": 1130.4, "end": 1157.88, "text": " after the load times are between what two values approximately distributed around the mean. So around 95%", "tokens": [934, 264, 3677, 1413, 366, 1296, 437, 732, 4190, 10447, 12631, 926, 264, 914, 13, 407, 926, 13420, 4], "avg_logprob": -0.3453125134110451, "compression_ratio": 1.1397849462365592, "no_speech_prob": 0.0, "words": [{"start": 1130.4, "end": 1130.88, "word": " after", "probability": 0.16943359375}, {"start": 1130.88, "end": 1131.08, "word": " the", "probability": 0.91064453125}, {"start": 1131.08, "end": 1131.4, "word": " load", "probability": 0.90185546875}, {"start": 1131.4, "end": 1132.48, "word": " times", "probability": 0.88525390625}, {"start": 1132.48, "end": 1134.04, "word": " are", "probability": 0.8544921875}, {"start": 1134.04, "end": 1134.58, "word": " between", "probability": 0.91015625}, {"start": 1134.58, "end": 1135.18, "word": " what", "probability": 0.853515625}, {"start": 1135.18, "end": 1135.38, "word": " two", "probability": 0.7919921875}, {"start": 1135.38, "end": 1135.98, "word": " values", "probability": 0.96533203125}, {"start": 1135.98, "end": 1137.8, "word": " approximately", "probability": 0.76806640625}, {"start": 1137.8, "end": 1138.42, "word": " distributed", "probability": 0.7275390625}, {"start": 1138.42, "end": 1138.72, "word": " around", "probability": 0.92626953125}, {"start": 1138.72, "end": 1138.94, "word": " the", "probability": 0.876953125}, {"start": 1138.94, "end": 1139.12, "word": " mean.", "probability": 0.69091796875}, {"start": 1144.96, "end": 1145.18, "word": " So", "probability": 0.67236328125}, {"start": 1145.18, "end": 1145.56, "word": " around", "probability": 0.83349609375}, {"start": 1145.56, "end": 1157.34, "word": " 95", "probability": 0.72265625}, {"start": 1157.34, "end": 1157.88, "word": "%", "probability": 0.399169921875}], "temperature": 1.0}, {"id": 44, "seek": 116891, "start": 1159.51, "end": 1168.91, "text": " Of the download times, what two values symmetrically distributed around the mean?", "tokens": [2720, 264, 5484, 1413, 11, 437, 732, 4190, 14232, 27965, 984, 12631, 926, 264, 914, 30], "avg_logprob": -0.24632352941176472, "compression_ratio": 1.0379746835443038, "no_speech_prob": 0.0, "words": [{"start": 1159.51, "end": 1159.83, "word": " Of", "probability": 0.385009765625}, {"start": 1159.83, "end": 1159.99, "word": " the", "probability": 0.90087890625}, {"start": 1159.99, "end": 1160.39, "word": " download", "probability": 0.6982421875}, {"start": 1160.39, "end": 1161.11, "word": " times,", "probability": 0.734375}, {"start": 1162.25, "end": 1164.29, "word": " what", "probability": 0.90185546875}, {"start": 1164.29, "end": 1164.49, "word": " two", "probability": 0.76171875}, {"start": 1164.49, "end": 1165.09, "word": " values", "probability": 0.95458984375}, {"start": 1165.09, "end": 1167.07, "word": " symmetrically", "probability": 0.91650390625}, {"start": 1167.07, "end": 1168.35, "word": " distributed", "probability": 0.55224609375}, {"start": 1168.35, "end": 1168.65, "word": " around", "probability": 0.931640625}, {"start": 1168.65, "end": 1168.83, "word": " the", "probability": 0.703125}, {"start": 1168.83, "end": 1168.91, "word": " mean?", "probability": 0.7958984375}], "temperature": 1.0}, {"id": 45, "seek": 119917, "start": 1170.99, "end": 1199.17, "text": " the area here for example between E and B is 95% and he mentioned the proximity so this E is the same as B but still we have negative sign so we are looking we are looking for the probability of P of X", "tokens": [264, 1859, 510, 337, 1365, 1296, 462, 293, 363, 307, 13420, 4, 293, 415, 2835, 264, 27632, 370, 341, 462, 307, 264, 912, 382, 363, 457, 920, 321, 362, 3671, 1465, 370, 321, 366, 1237, 321, 366, 1237, 337, 264, 8482, 295, 430, 295, 1783], "avg_logprob": -0.30366848020449927, "compression_ratio": 1.4532374100719425, "no_speech_prob": 0.0, "words": [{"start": 1170.99, "end": 1171.17, "word": " the", "probability": 0.188232421875}, {"start": 1171.17, "end": 1171.49, "word": " area", "probability": 0.87890625}, {"start": 1171.49, "end": 1171.89, "word": " here", "probability": 0.8125}, {"start": 1171.89, "end": 1172.63, "word": " for", "probability": 0.6318359375}, {"start": 1172.63, "end": 1173.17, "word": " example", "probability": 0.96484375}, {"start": 1173.17, "end": 1176.25, "word": " between", "probability": 0.477783203125}, {"start": 1176.25, "end": 1176.45, "word": " E", "probability": 0.43798828125}, {"start": 1176.45, "end": 1176.61, "word": " and", "probability": 0.8818359375}, {"start": 1176.61, "end": 1176.87, "word": " B", "probability": 0.76904296875}, {"start": 1176.87, "end": 1177.89, "word": " is", "probability": 0.849609375}, {"start": 1177.89, "end": 1178.27, "word": " 95", "probability": 0.96728515625}, {"start": 1178.27, "end": 1178.85, "word": "%", "probability": 0.7900390625}, {"start": 1178.85, "end": 1180.51, "word": " and", "probability": 0.51806640625}, {"start": 1180.51, "end": 1180.71, "word": " he", "probability": 0.80615234375}, {"start": 1180.71, "end": 1181.05, "word": " mentioned", "probability": 0.794921875}, {"start": 1181.05, "end": 1181.49, "word": " the", "probability": 0.425537109375}, {"start": 1181.49, "end": 1182.57, "word": " proximity", "probability": 0.76708984375}, {"start": 1182.57, "end": 1184.85, "word": " so", "probability": 0.38916015625}, {"start": 1184.85, "end": 1185.09, "word": " this", "probability": 0.84912109375}, {"start": 1185.09, "end": 1185.47, "word": " E", "probability": 0.3984375}, {"start": 1185.47, "end": 1185.81, "word": " is", "probability": 0.7421875}, {"start": 1185.81, "end": 1185.95, "word": " the", "probability": 0.73779296875}, {"start": 1185.95, "end": 1186.13, "word": " same", "probability": 0.92041015625}, {"start": 1186.13, "end": 1186.37, "word": " as", "probability": 0.96240234375}, {"start": 1186.37, "end": 1186.55, "word": " B", "probability": 0.9365234375}, {"start": 1186.55, "end": 1186.81, "word": " but", "probability": 0.775390625}, {"start": 1186.81, "end": 1187.23, "word": " still", "probability": 0.84228515625}, {"start": 1187.23, "end": 1187.57, "word": " we", "probability": 0.90087890625}, {"start": 1187.57, "end": 1187.85, "word": " have", "probability": 0.94921875}, {"start": 1187.85, "end": 1188.79, "word": " negative", "probability": 0.7568359375}, {"start": 1188.79, "end": 1189.15, "word": " sign", "probability": 0.8837890625}, {"start": 1189.15, "end": 1191.49, "word": " so", "probability": 0.7548828125}, {"start": 1191.49, "end": 1191.65, "word": " we", "probability": 0.94970703125}, {"start": 1191.65, "end": 1191.75, "word": " are", "probability": 0.93359375}, {"start": 1191.75, "end": 1192.07, "word": " looking", "probability": 0.92724609375}, {"start": 1192.07, "end": 1196.73, "word": " we", "probability": 0.43408203125}, {"start": 1196.73, "end": 1196.89, "word": " are", "probability": 0.935546875}, {"start": 1196.89, "end": 1197.11, "word": " looking", "probability": 0.9150390625}, {"start": 1197.11, "end": 1197.33, "word": " for", "probability": 0.951171875}, {"start": 1197.33, "end": 1197.51, "word": " the", "probability": 0.88232421875}, {"start": 1197.51, "end": 1197.85, "word": " probability", "probability": 0.9677734375}, {"start": 1197.85, "end": 1198.21, "word": " of", "probability": 0.96875}, {"start": 1198.21, "end": 1198.49, "word": " P", "probability": 0.311767578125}, {"start": 1198.49, "end": 1198.83, "word": " of", "probability": 0.7734375}, {"start": 1198.83, "end": 1199.17, "word": " X", "probability": 0.92919921875}], "temperature": 1.0}, {"id": 46, "seek": 123151, "start": 1205.55, "end": 1231.51, "text": " between A and B equal 95% now by symmetric distribution exactly this value A is the same as B but with negative sign now since the area between A and B is 95% and we have symmetric distribution 5% left divided by 2 that means", "tokens": [1296, 316, 293, 363, 2681, 13420, 4, 586, 538, 32330, 7316, 2293, 341, 2158, 316, 307, 264, 912, 382, 363, 457, 365, 3671, 1465, 586, 1670, 264, 1859, 1296, 316, 293, 363, 307, 13420, 4, 293, 321, 362, 32330, 7316, 1025, 4, 1411, 6666, 538, 568, 300, 1355], "avg_logprob": -0.21332908163265307, "compression_ratio": 1.5694444444444444, "no_speech_prob": 0.0, "words": [{"start": 1205.55, "end": 1206.11, "word": " between", "probability": 0.52001953125}, {"start": 1206.11, "end": 1206.33, "word": " A", "probability": 0.77197265625}, {"start": 1206.33, "end": 1206.51, "word": " and", "probability": 0.939453125}, {"start": 1206.51, "end": 1206.63, "word": " B", "probability": 0.98974609375}, {"start": 1206.63, "end": 1206.83, "word": " equal", "probability": 0.445556640625}, {"start": 1206.83, "end": 1207.13, "word": " 95", "probability": 0.86962890625}, {"start": 1207.13, "end": 1207.65, "word": "%", "probability": 0.368896484375}, {"start": 1207.65, "end": 1209.63, "word": " now", "probability": 0.487548828125}, {"start": 1209.63, "end": 1209.87, "word": " by", "probability": 0.8837890625}, {"start": 1209.87, "end": 1210.29, "word": " symmetric", "probability": 0.77197265625}, {"start": 1210.29, "end": 1211.11, "word": " distribution", "probability": 0.81396484375}, {"start": 1211.11, "end": 1214.35, "word": " exactly", "probability": 0.75390625}, {"start": 1214.35, "end": 1214.67, "word": " this", "probability": 0.93408203125}, {"start": 1214.67, "end": 1215.09, "word": " value", "probability": 0.9619140625}, {"start": 1215.09, "end": 1215.85, "word": " A", "probability": 0.5859375}, {"start": 1215.85, "end": 1216.49, "word": " is", "probability": 0.8955078125}, {"start": 1216.49, "end": 1216.67, "word": " the", "probability": 0.8623046875}, {"start": 1216.67, "end": 1216.89, "word": " same", "probability": 0.91943359375}, {"start": 1216.89, "end": 1217.15, "word": " as", "probability": 0.95947265625}, {"start": 1217.15, "end": 1217.45, "word": " B", "probability": 0.97705078125}, {"start": 1217.45, "end": 1217.89, "word": " but", "probability": 0.73388671875}, {"start": 1217.89, "end": 1219.07, "word": " with", "probability": 0.5693359375}, {"start": 1219.07, "end": 1219.43, "word": " negative", "probability": 0.8798828125}, {"start": 1219.43, "end": 1219.89, "word": " sign", "probability": 0.9150390625}, {"start": 1219.89, "end": 1222.05, "word": " now", "probability": 0.5859375}, {"start": 1222.05, "end": 1222.69, "word": " since", "probability": 0.818359375}, {"start": 1222.69, "end": 1222.91, "word": " the", "probability": 0.9091796875}, {"start": 1222.91, "end": 1223.17, "word": " area", "probability": 0.88916015625}, {"start": 1223.17, "end": 1223.43, "word": " between", "probability": 0.880859375}, {"start": 1223.43, "end": 1223.61, "word": " A", "probability": 0.95654296875}, {"start": 1223.61, "end": 1223.77, "word": " and", "probability": 0.93896484375}, {"start": 1223.77, "end": 1223.89, "word": " B", "probability": 0.9951171875}, {"start": 1223.89, "end": 1224.01, "word": " is", "probability": 0.94580078125}, {"start": 1224.01, "end": 1224.33, "word": " 95", "probability": 0.9697265625}, {"start": 1224.33, "end": 1224.87, "word": "%", "probability": 0.96142578125}, {"start": 1224.87, "end": 1225.27, "word": " and", "probability": 0.93017578125}, {"start": 1225.27, "end": 1225.39, "word": " we", "probability": 0.83837890625}, {"start": 1225.39, "end": 1225.53, "word": " have", "probability": 0.953125}, {"start": 1225.53, "end": 1225.91, "word": " symmetric", "probability": 0.77783203125}, {"start": 1225.91, "end": 1226.65, "word": " distribution", "probability": 0.833984375}, {"start": 1226.65, "end": 1227.61, "word": " 5", "probability": 0.7734375}, {"start": 1227.61, "end": 1228.05, "word": "%", "probability": 0.94189453125}, {"start": 1228.05, "end": 1228.45, "word": " left", "probability": 0.78271484375}, {"start": 1228.45, "end": 1229.69, "word": " divided", "probability": 0.496826171875}, {"start": 1229.69, "end": 1229.89, "word": " by", "probability": 0.96923828125}, {"start": 1229.89, "end": 1230.17, "word": " 2", "probability": 0.77099609375}, {"start": 1230.17, "end": 1231.11, "word": " that", "probability": 0.80517578125}, {"start": 1231.11, "end": 1231.51, "word": " means", "probability": 0.92236328125}], "temperature": 1.0}, {"id": 47, "seek": 125691, "start": 1233.51, "end": 1256.91, "text": " 2.5 to the left of A and 2.5% to the right of B. Now, what are the values of A and B? Now, if you look at this value, B of X less than A equals 2.5%.", "tokens": [568, 13, 20, 281, 264, 1411, 295, 316, 293, 568, 13, 20, 4, 281, 264, 558, 295, 363, 13, 823, 11, 437, 366, 264, 4190, 295, 316, 293, 363, 30, 823, 11, 498, 291, 574, 412, 341, 2158, 11, 363, 295, 1783, 1570, 813, 316, 6915, 568, 13, 20, 6856], "avg_logprob": -0.1472885994350209, "compression_ratio": 1.3157894736842106, "no_speech_prob": 0.0, "words": [{"start": 1233.51, "end": 1233.81, "word": " 2", "probability": 0.50244140625}, {"start": 1233.81, "end": 1234.35, "word": ".5", "probability": 0.9853515625}, {"start": 1234.35, "end": 1234.65, "word": " to", "probability": 0.724609375}, {"start": 1234.65, "end": 1234.83, "word": " the", "probability": 0.916015625}, {"start": 1234.83, "end": 1235.09, "word": " left", "probability": 0.93701171875}, {"start": 1235.09, "end": 1235.33, "word": " of", "probability": 0.96484375}, {"start": 1235.33, "end": 1235.53, "word": " A", "probability": 0.7119140625}, {"start": 1235.53, "end": 1236.55, "word": " and", "probability": 0.6376953125}, {"start": 1236.55, "end": 1236.77, "word": " 2", "probability": 0.9814453125}, {"start": 1236.77, "end": 1237.33, "word": ".5", "probability": 0.99658203125}, {"start": 1237.33, "end": 1237.67, "word": "%", "probability": 0.73583984375}, {"start": 1237.67, "end": 1238.01, "word": " to", "probability": 0.9619140625}, {"start": 1238.01, "end": 1238.15, "word": " the", "probability": 0.91064453125}, {"start": 1238.15, "end": 1238.37, "word": " right", "probability": 0.9140625}, {"start": 1238.37, "end": 1238.55, "word": " of", "probability": 0.95751953125}, {"start": 1238.55, "end": 1238.69, "word": " B.", "probability": 0.9814453125}, {"start": 1243.13, "end": 1243.63, "word": " Now,", "probability": 0.89404296875}, {"start": 1243.77, "end": 1243.99, "word": " what", "probability": 0.9267578125}, {"start": 1243.99, "end": 1244.19, "word": " are", "probability": 0.943359375}, {"start": 1244.19, "end": 1244.33, "word": " the", "probability": 0.90185546875}, {"start": 1244.33, "end": 1244.59, "word": " values", "probability": 0.98193359375}, {"start": 1244.59, "end": 1244.75, "word": " of", "probability": 0.72802734375}, {"start": 1244.75, "end": 1244.81, "word": " A", "probability": 0.935546875}, {"start": 1244.81, "end": 1244.97, "word": " and", "probability": 0.951171875}, {"start": 1244.97, "end": 1245.15, "word": " B?", "probability": 0.99365234375}, {"start": 1246.85, "end": 1247.35, "word": " Now,", "probability": 0.62744140625}, {"start": 1248.79, "end": 1249.93, "word": " if", "probability": 0.9384765625}, {"start": 1249.93, "end": 1250.05, "word": " you", "probability": 0.88232421875}, {"start": 1250.05, "end": 1250.19, "word": " look", "probability": 0.96533203125}, {"start": 1250.19, "end": 1250.31, "word": " at", "probability": 0.966796875}, {"start": 1250.31, "end": 1250.53, "word": " this", "probability": 0.94384765625}, {"start": 1250.53, "end": 1250.95, "word": " value,", "probability": 0.9716796875}, {"start": 1252.33, "end": 1252.69, "word": " B", "probability": 0.89208984375}, {"start": 1252.69, "end": 1252.89, "word": " of", "probability": 0.89599609375}, {"start": 1252.89, "end": 1253.13, "word": " X", "probability": 0.8447265625}, {"start": 1253.13, "end": 1253.63, "word": " less", "probability": 0.8251953125}, {"start": 1253.63, "end": 1253.89, "word": " than", "probability": 0.9580078125}, {"start": 1253.89, "end": 1254.09, "word": " A", "probability": 0.95166015625}, {"start": 1254.09, "end": 1254.45, "word": " equals", "probability": 0.681640625}, {"start": 1254.45, "end": 1255.97, "word": " 2", "probability": 0.8544921875}, {"start": 1255.97, "end": 1256.91, "word": ".5%.", "probability": 0.9091796875}], "temperature": 1.0}, {"id": 48, "seek": 128683, "start": 1260.78, "end": 1286.84, "text": " Be careful, it's 0, 2, 2, 5, 0, 2, 5. Now, what's the value of A softer, B makes smaller than A by this one. The same I just we did in bar D. So that's A. Now, what's the Z score in this case? If we go back to the normal table, now we are looking for", "tokens": [879, 5026, 11, 309, 311, 1958, 11, 568, 11, 568, 11, 1025, 11, 1958, 11, 568, 11, 1025, 13, 823, 11, 437, 311, 264, 2158, 295, 316, 23119, 11, 363, 1669, 4356, 813, 316, 538, 341, 472, 13, 440, 912, 286, 445, 321, 630, 294, 2159, 413, 13, 407, 300, 311, 316, 13, 823, 11, 437, 311, 264, 1176, 6175, 294, 341, 1389, 30, 759, 321, 352, 646, 281, 264, 2710, 3199, 11, 586, 321, 366, 1237, 337], "avg_logprob": -0.2891613871236391, "compression_ratio": 1.4261363636363635, "no_speech_prob": 0.0, "words": [{"start": 1260.78, "end": 1260.98, "word": " Be", "probability": 0.6455078125}, {"start": 1260.98, "end": 1261.3, "word": " careful,", "probability": 0.939453125}, {"start": 1261.48, "end": 1261.72, "word": " it's", "probability": 0.899658203125}, {"start": 1261.72, "end": 1262.12, "word": " 0,", "probability": 0.5}, {"start": 1262.2, "end": 1262.36, "word": " 2,", "probability": 0.55712890625}, {"start": 1262.42, "end": 1262.54, "word": " 2,", "probability": 0.495361328125}, {"start": 1262.62, "end": 1262.74, "word": " 5,", "probability": 0.80419921875}, {"start": 1262.82, "end": 1263.1, "word": " 0,", "probability": 0.92333984375}, {"start": 1263.22, "end": 1263.36, "word": " 2,", "probability": 0.98095703125}, {"start": 1263.42, "end": 1263.76, "word": " 5.", "probability": 0.97265625}, {"start": 1264.54, "end": 1264.98, "word": " Now,", "probability": 0.93115234375}, {"start": 1265.12, "end": 1265.56, "word": " what's", "probability": 0.957275390625}, {"start": 1265.56, "end": 1265.66, "word": " the", "probability": 0.92138671875}, {"start": 1265.66, "end": 1265.86, "word": " value", "probability": 0.9794921875}, {"start": 1265.86, "end": 1266.0, "word": " of", "probability": 0.96240234375}, {"start": 1266.0, "end": 1266.2, "word": " A", "probability": 0.677734375}, {"start": 1266.2, "end": 1266.76, "word": " softer,", "probability": 0.1199951171875}, {"start": 1267.12, "end": 1267.26, "word": " B", "probability": 0.6220703125}, {"start": 1267.26, "end": 1267.5, "word": " makes", "probability": 0.1478271484375}, {"start": 1267.5, "end": 1267.92, "word": " smaller", "probability": 0.7509765625}, {"start": 1267.92, "end": 1268.16, "word": " than", "probability": 0.919921875}, {"start": 1268.16, "end": 1268.28, "word": " A", "probability": 0.64697265625}, {"start": 1268.28, "end": 1268.44, "word": " by", "probability": 0.6630859375}, {"start": 1268.44, "end": 1268.62, "word": " this", "probability": 0.94970703125}, {"start": 1268.62, "end": 1268.82, "word": " one.", "probability": 0.91162109375}, {"start": 1269.16, "end": 1269.32, "word": " The", "probability": 0.87548828125}, {"start": 1269.32, "end": 1269.5, "word": " same", "probability": 0.9091796875}, {"start": 1269.5, "end": 1269.66, "word": " I", "probability": 0.5126953125}, {"start": 1269.66, "end": 1269.88, "word": " just", "probability": 0.90966796875}, {"start": 1269.88, "end": 1270.18, "word": " we", "probability": 0.63330078125}, {"start": 1270.18, "end": 1271.42, "word": " did", "probability": 0.92626953125}, {"start": 1271.42, "end": 1271.58, "word": " in", "probability": 0.9013671875}, {"start": 1271.58, "end": 1271.8, "word": " bar", "probability": 0.4345703125}, {"start": 1271.8, "end": 1272.14, "word": " D.", "probability": 0.853515625}, {"start": 1273.32, "end": 1273.76, "word": " So", "probability": 0.94970703125}, {"start": 1273.76, "end": 1274.06, "word": " that's", "probability": 0.87255859375}, {"start": 1274.06, "end": 1274.24, "word": " A.", "probability": 0.96142578125}, {"start": 1274.6, "end": 1274.88, "word": " Now,", "probability": 0.9453125}, {"start": 1275.0, "end": 1275.28, "word": " what's", "probability": 0.939208984375}, {"start": 1275.28, "end": 1275.36, "word": " the", "probability": 0.91455078125}, {"start": 1275.36, "end": 1275.52, "word": " Z", "probability": 0.77734375}, {"start": 1275.52, "end": 1275.76, "word": " score", "probability": 0.490234375}, {"start": 1275.76, "end": 1275.92, "word": " in", "probability": 0.93115234375}, {"start": 1275.92, "end": 1276.08, "word": " this", "probability": 0.9462890625}, {"start": 1276.08, "end": 1276.36, "word": " case?", "probability": 0.9130859375}, {"start": 1279.16, "end": 1279.6, "word": " If", "probability": 0.939453125}, {"start": 1279.6, "end": 1279.82, "word": " we", "probability": 0.9599609375}, {"start": 1279.82, "end": 1280.06, "word": " go", "probability": 0.9638671875}, {"start": 1280.06, "end": 1280.38, "word": " back", "probability": 0.87548828125}, {"start": 1280.38, "end": 1280.56, "word": " to", "probability": 0.962890625}, {"start": 1280.56, "end": 1280.68, "word": " the", "probability": 0.912109375}, {"start": 1280.68, "end": 1280.96, "word": " normal", "probability": 0.8955078125}, {"start": 1280.96, "end": 1281.34, "word": " table,", "probability": 0.8828125}, {"start": 1283.14, "end": 1285.94, "word": " now", "probability": 0.76123046875}, {"start": 1285.94, "end": 1286.08, "word": " we", "probability": 0.8779296875}, {"start": 1286.08, "end": 1286.18, "word": " are", "probability": 0.9140625}, {"start": 1286.18, "end": 1286.42, "word": " looking", "probability": 0.92138671875}, {"start": 1286.42, "end": 1286.84, "word": " for", "probability": 0.9560546875}], "temperature": 1.0}, {"id": 49, "seek": 131336, "start": 1292.84, "end": 1313.36, "text": " Zero to five. So minus one point nine six. So Z equals minus one point nine six. Okay.", "tokens": [17182, 281, 1732, 13, 407, 3175, 472, 935, 4949, 2309, 13, 407, 1176, 6915, 3175, 472, 935, 4949, 2309, 13, 1033, 13], "avg_logprob": -0.40591031312942505, "compression_ratio": 1.3181818181818181, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 1292.8400000000001, "end": 1293.66, "word": " Zero", "probability": 0.1328125}, {"start": 1293.66, "end": 1293.86, "word": " to", "probability": 0.9580078125}, {"start": 1293.86, "end": 1294.22, "word": " five.", "probability": 0.7578125}, {"start": 1297.36, "end": 1298.18, "word": " So", "probability": 0.217041015625}, {"start": 1298.18, "end": 1300.66, "word": " minus", "probability": 0.7490234375}, {"start": 1300.66, "end": 1301.04, "word": " one", "probability": 0.51416015625}, {"start": 1301.04, "end": 1301.28, "word": " point", "probability": 0.87841796875}, {"start": 1301.28, "end": 1301.72, "word": " nine", "probability": 0.77978515625}, {"start": 1301.72, "end": 1302.54, "word": " six.", "probability": 0.810546875}, {"start": 1307.84, "end": 1308.66, "word": " So", "probability": 0.908203125}, {"start": 1308.66, "end": 1308.84, "word": " Z", "probability": 0.422119140625}, {"start": 1308.84, "end": 1309.12, "word": " equals", "probability": 0.70703125}, {"start": 1309.12, "end": 1309.48, "word": " minus", "probability": 0.9658203125}, {"start": 1309.48, "end": 1309.76, "word": " one", "probability": 0.73095703125}, {"start": 1309.76, "end": 1309.96, "word": " point", "probability": 0.9677734375}, {"start": 1309.96, "end": 1310.26, "word": " nine", "probability": 0.9423828125}, {"start": 1310.26, "end": 1310.72, "word": " six.", "probability": 0.9296875}, {"start": 1313.04, "end": 1313.36, "word": " Okay.", "probability": 0.7431640625}], "temperature": 1.0}, {"id": 50, "seek": 134285, "start": 1314.27, "end": 1342.85, "text": " So now my A equal Mu plus D Sigma Mu is given is 0.8 Sigma is minus 1.9 Times the value of Sigma which is 0.2 Similarly to get the value of A B of X", "tokens": [407, 586, 452, 316, 2681, 15601, 1804, 413, 36595, 15601, 307, 2212, 307, 1958, 13, 23, 36595, 307, 3175, 502, 13, 24, 11366, 264, 2158, 295, 36595, 597, 307, 1958, 13, 17, 13157, 281, 483, 264, 2158, 295, 316, 363, 295, 1783], "avg_logprob": -0.3054142580475918, "compression_ratio": 1.2956521739130435, "no_speech_prob": 0.0, "words": [{"start": 1314.27, "end": 1314.57, "word": " So", "probability": 0.478271484375}, {"start": 1314.57, "end": 1314.83, "word": " now", "probability": 0.81591796875}, {"start": 1314.83, "end": 1315.25, "word": " my", "probability": 0.6044921875}, {"start": 1315.25, "end": 1315.65, "word": " A", "probability": 0.435546875}, {"start": 1315.65, "end": 1316.65, "word": " equal", "probability": 0.2197265625}, {"start": 1316.65, "end": 1316.91, "word": " Mu", "probability": 0.30322265625}, {"start": 1316.91, "end": 1317.19, "word": " plus", "probability": 0.775390625}, {"start": 1317.19, "end": 1317.37, "word": " D", "probability": 0.478515625}, {"start": 1317.37, "end": 1317.69, "word": " Sigma", "probability": 0.76171875}, {"start": 1317.69, "end": 1321.93, "word": " Mu", "probability": 0.5419921875}, {"start": 1321.93, "end": 1322.15, "word": " is", "probability": 0.72021484375}, {"start": 1322.15, "end": 1322.39, "word": " given", "probability": 0.85009765625}, {"start": 1322.39, "end": 1322.61, "word": " is", "probability": 0.419921875}, {"start": 1322.61, "end": 1322.77, "word": " 0", "probability": 0.7431640625}, {"start": 1322.77, "end": 1323.21, "word": ".8", "probability": 0.9912109375}, {"start": 1323.21, "end": 1325.95, "word": " Sigma", "probability": 0.74755859375}, {"start": 1325.95, "end": 1326.23, "word": " is", "probability": 0.9384765625}, {"start": 1326.23, "end": 1326.61, "word": " minus", "probability": 0.5947265625}, {"start": 1326.61, "end": 1326.87, "word": " 1", "probability": 0.912109375}, {"start": 1326.87, "end": 1327.51, "word": ".9", "probability": 0.9912109375}, {"start": 1327.51, "end": 1330.61, "word": " Times", "probability": 0.362060546875}, {"start": 1330.61, "end": 1331.25, "word": " the", "probability": 0.83349609375}, {"start": 1331.25, "end": 1331.55, "word": " value", "probability": 0.97265625}, {"start": 1331.55, "end": 1331.71, "word": " of", "probability": 0.9609375}, {"start": 1331.71, "end": 1332.01, "word": " Sigma", "probability": 0.70947265625}, {"start": 1332.01, "end": 1332.29, "word": " which", "probability": 0.53271484375}, {"start": 1332.29, "end": 1332.39, "word": " is", "probability": 0.943359375}, {"start": 1332.39, "end": 1332.55, "word": " 0", "probability": 0.86865234375}, {"start": 1332.55, "end": 1332.91, "word": ".2", "probability": 0.995849609375}, {"start": 1332.91, "end": 1337.19, "word": " Similarly", "probability": 0.681640625}, {"start": 1337.19, "end": 1338.21, "word": " to", "probability": 0.71533203125}, {"start": 1338.21, "end": 1338.41, "word": " get", "probability": 0.94287109375}, {"start": 1338.41, "end": 1338.57, "word": " the", "probability": 0.91552734375}, {"start": 1338.57, "end": 1338.79, "word": " value", "probability": 0.9755859375}, {"start": 1338.79, "end": 1338.97, "word": " of", "probability": 0.96630859375}, {"start": 1338.97, "end": 1339.29, "word": " A", "probability": 0.97509765625}, {"start": 1339.29, "end": 1340.61, "word": " B", "probability": 0.267333984375}, {"start": 1340.61, "end": 1342.51, "word": " of", "probability": 0.931640625}, {"start": 1342.51, "end": 1342.85, "word": " X", "probability": 0.93701171875}], "temperature": 1.0}, {"id": 51, "seek": 137079, "start": 1344.19, "end": 1370.79, "text": " is less than B equals now the area to the left of B 95% plus 2.5 so that's minus 7.5 again", "tokens": [307, 1570, 813, 363, 6915, 586, 264, 1859, 281, 264, 1411, 295, 363, 13420, 4, 1804, 568, 13, 20, 370, 300, 311, 3175, 1614, 13, 20, 797], "avg_logprob": -0.24414061329194478, "compression_ratio": 1.0705882352941176, "no_speech_prob": 2.384185791015625e-07, "words": [{"start": 1344.19, "end": 1344.55, "word": " is", "probability": 0.2822265625}, {"start": 1344.55, "end": 1344.95, "word": " less", "probability": 0.89208984375}, {"start": 1344.95, "end": 1345.17, "word": " than", "probability": 0.90625}, {"start": 1345.17, "end": 1345.45, "word": " B", "probability": 0.55029296875}, {"start": 1345.45, "end": 1346.37, "word": " equals", "probability": 0.7880859375}, {"start": 1346.37, "end": 1346.89, "word": " now", "probability": 0.356689453125}, {"start": 1346.89, "end": 1347.09, "word": " the", "probability": 0.826171875}, {"start": 1347.09, "end": 1347.33, "word": " area", "probability": 0.86474609375}, {"start": 1347.33, "end": 1347.47, "word": " to", "probability": 0.96044921875}, {"start": 1347.47, "end": 1347.61, "word": " the", "probability": 0.92041015625}, {"start": 1347.61, "end": 1347.79, "word": " left", "probability": 0.93994140625}, {"start": 1347.79, "end": 1347.99, "word": " of", "probability": 0.9560546875}, {"start": 1347.99, "end": 1348.23, "word": " B", "probability": 0.97900390625}, {"start": 1348.23, "end": 1349.95, "word": " 95", "probability": 0.611328125}, {"start": 1349.95, "end": 1350.57, "word": "%", "probability": 0.44140625}, {"start": 1350.57, "end": 1351.79, "word": " plus", "probability": 0.7763671875}, {"start": 1351.79, "end": 1352.07, "word": " 2", "probability": 0.93798828125}, {"start": 1352.07, "end": 1352.65, "word": ".5", "probability": 0.973876953125}, {"start": 1352.65, "end": 1353.41, "word": " so", "probability": 0.6259765625}, {"start": 1353.41, "end": 1353.77, "word": " that's", "probability": 0.908447265625}, {"start": 1353.77, "end": 1354.17, "word": " minus", "probability": 0.58203125}, {"start": 1354.17, "end": 1354.41, "word": " 7", "probability": 0.9384765625}, {"start": 1354.41, "end": 1356.83, "word": ".5", "probability": 0.99609375}, {"start": 1356.83, "end": 1370.79, "word": " again", "probability": 0.6064453125}], "temperature": 1.0}, {"id": 52, "seek": 140409, "start": 1375.01, "end": 1404.09, "text": " b of x smaller than a is 2.5 percent now to get the corresponding z value for 0 to 5 we have to look at the normal table inside the normal table we get from 0 to 5 corresponding to z score of minus 1.96 so my z score is negative 1.56 now use this value", "tokens": [272, 295, 2031, 4356, 813, 257, 307, 568, 13, 20, 3043, 586, 281, 483, 264, 11760, 710, 2158, 337, 1958, 281, 1025, 321, 362, 281, 574, 412, 264, 2710, 3199, 1854, 264, 2710, 3199, 321, 483, 490, 1958, 281, 1025, 11760, 281, 710, 6175, 295, 3175, 502, 13, 22962, 370, 452, 710, 6175, 307, 3671, 502, 13, 18317, 586, 764, 341, 2158], "avg_logprob": -0.25793649658324225, "compression_ratio": 1.6012658227848102, "no_speech_prob": 0.0, "words": [{"start": 1375.01, "end": 1375.35, "word": " b", "probability": 0.116943359375}, {"start": 1375.35, "end": 1375.53, "word": " of", "probability": 0.7763671875}, {"start": 1375.53, "end": 1375.75, "word": " x", "probability": 0.83154296875}, {"start": 1375.75, "end": 1376.25, "word": " smaller", "probability": 0.6279296875}, {"start": 1376.25, "end": 1376.59, "word": " than", "probability": 0.93408203125}, {"start": 1376.59, "end": 1376.85, "word": " a", "probability": 0.8935546875}, {"start": 1376.85, "end": 1377.21, "word": " is", "probability": 0.87890625}, {"start": 1377.21, "end": 1377.37, "word": " 2", "probability": 0.9130859375}, {"start": 1377.37, "end": 1377.77, "word": ".5", "probability": 0.977783203125}, {"start": 1377.77, "end": 1378.11, "word": " percent", "probability": 0.44970703125}, {"start": 1378.11, "end": 1380.85, "word": " now", "probability": 0.465087890625}, {"start": 1380.85, "end": 1382.11, "word": " to", "probability": 0.8837890625}, {"start": 1382.11, "end": 1382.33, "word": " get", "probability": 0.94580078125}, {"start": 1382.33, "end": 1382.53, "word": " the", "probability": 0.9052734375}, {"start": 1382.53, "end": 1383.05, "word": " corresponding", "probability": 0.845703125}, {"start": 1383.05, "end": 1383.35, "word": " z", "probability": 0.8671875}, {"start": 1383.35, "end": 1383.73, "word": " value", "probability": 0.9267578125}, {"start": 1383.73, "end": 1383.99, "word": " for", "probability": 0.798828125}, {"start": 1383.99, "end": 1384.25, "word": " 0", "probability": 0.783203125}, {"start": 1384.25, "end": 1384.43, "word": " to", "probability": 0.5634765625}, {"start": 1384.43, "end": 1384.83, "word": " 5", "probability": 0.9716796875}, {"start": 1384.83, "end": 1385.85, "word": " we", "probability": 0.814453125}, {"start": 1385.85, "end": 1386.11, "word": " have", "probability": 0.9501953125}, {"start": 1386.11, "end": 1386.25, "word": " to", "probability": 0.96728515625}, {"start": 1386.25, "end": 1386.47, "word": " look", "probability": 0.96240234375}, {"start": 1386.47, "end": 1386.81, "word": " at", "probability": 0.96142578125}, {"start": 1386.81, "end": 1388.03, "word": " the", "probability": 0.80615234375}, {"start": 1388.03, "end": 1388.37, "word": " normal", "probability": 0.74658203125}, {"start": 1388.37, "end": 1388.77, "word": " table", "probability": 0.43359375}, {"start": 1388.77, "end": 1389.31, "word": " inside", "probability": 0.84912109375}, {"start": 1389.31, "end": 1391.13, "word": " the", "probability": 0.56396484375}, {"start": 1391.13, "end": 1391.41, "word": " normal", "probability": 0.8525390625}, {"start": 1391.41, "end": 1391.73, "word": " table", "probability": 0.85693359375}, {"start": 1391.73, "end": 1391.91, "word": " we", "probability": 0.5380859375}, {"start": 1391.91, "end": 1392.29, "word": " get", "probability": 0.37158203125}, {"start": 1392.29, "end": 1393.31, "word": " from", "probability": 0.332275390625}, {"start": 1393.31, "end": 1393.61, "word": " 0", "probability": 0.90966796875}, {"start": 1393.61, "end": 1393.79, "word": " to", "probability": 0.970703125}, {"start": 1393.79, "end": 1394.21, "word": " 5", "probability": 0.99267578125}, {"start": 1394.21, "end": 1395.89, "word": " corresponding", "probability": 0.72216796875}, {"start": 1395.89, "end": 1396.17, "word": " to", "probability": 0.95654296875}, {"start": 1396.17, "end": 1396.37, "word": " z", "probability": 0.87109375}, {"start": 1396.37, "end": 1396.67, "word": " score", "probability": 0.54296875}, {"start": 1396.67, "end": 1396.91, "word": " of", "probability": 0.9140625}, {"start": 1396.91, "end": 1397.31, "word": " minus", "probability": 0.63720703125}, {"start": 1397.31, "end": 1397.55, "word": " 1", "probability": 0.70361328125}, {"start": 1397.55, "end": 1398.25, "word": ".96", "probability": 0.967529296875}, {"start": 1398.25, "end": 1399.31, "word": " so", "probability": 0.810546875}, {"start": 1399.31, "end": 1399.57, "word": " my", "probability": 0.96337890625}, {"start": 1399.57, "end": 1399.75, "word": " z", "probability": 0.98828125}, {"start": 1399.75, "end": 1400.01, "word": " score", "probability": 0.8759765625}, {"start": 1400.01, "end": 1400.17, "word": " is", "probability": 0.9521484375}, {"start": 1400.17, "end": 1400.43, "word": " negative", "probability": 0.8193359375}, {"start": 1400.43, "end": 1400.67, "word": " 1", "probability": 0.93994140625}, {"start": 1400.67, "end": 1401.15, "word": ".56", "probability": 0.83642578125}, {"start": 1401.15, "end": 1402.59, "word": " now", "probability": 0.87109375}, {"start": 1402.59, "end": 1403.03, "word": " use", "probability": 0.47900390625}, {"start": 1403.03, "end": 1403.73, "word": " this", "probability": 0.89697265625}, {"start": 1403.73, "end": 1404.09, "word": " value", "probability": 0.64208984375}], "temperature": 1.0}, {"id": 53, "seek": 142915, "start": 1405.49, "end": 1429.15, "text": " here, so mu equals 0.8 is the mean, minus 1.96, the score times 6. The other part, to get the value of B, the probability of X smaller than B equals 95 plus 2.5 is 975.", "tokens": [510, 11, 370, 2992, 6915, 1958, 13, 23, 307, 264, 914, 11, 3175, 502, 13, 22962, 11, 264, 6175, 1413, 1386, 13, 440, 661, 644, 11, 281, 483, 264, 2158, 295, 363, 11, 264, 8482, 295, 1783, 4356, 813, 363, 6915, 13420, 1804, 568, 13, 20, 307, 1722, 11901, 13], "avg_logprob": -0.31004901610168756, "compression_ratio": 1.2706766917293233, "no_speech_prob": 0.0, "words": [{"start": 1405.49, "end": 1406.25, "word": " here,", "probability": 0.32275390625}, {"start": 1407.41, "end": 1407.83, "word": " so", "probability": 0.79833984375}, {"start": 1407.83, "end": 1407.97, "word": " mu", "probability": 0.478271484375}, {"start": 1407.97, "end": 1408.29, "word": " equals", "probability": 0.196533203125}, {"start": 1408.29, "end": 1408.95, "word": " 0", "probability": 0.65966796875}, {"start": 1408.95, "end": 1409.37, "word": ".8", "probability": 0.969970703125}, {"start": 1409.37, "end": 1409.55, "word": " is", "probability": 0.44091796875}, {"start": 1409.55, "end": 1409.73, "word": " the", "probability": 0.88623046875}, {"start": 1409.73, "end": 1409.91, "word": " mean,", "probability": 0.94775390625}, {"start": 1410.49, "end": 1410.83, "word": " minus", "probability": 0.923828125}, {"start": 1410.83, "end": 1411.07, "word": " 1", "probability": 0.9765625}, {"start": 1411.07, "end": 1411.77, "word": ".96,", "probability": 0.984130859375}, {"start": 1412.03, "end": 1412.15, "word": " the", "probability": 0.277099609375}, {"start": 1412.15, "end": 1412.51, "word": " score", "probability": 0.79443359375}, {"start": 1412.51, "end": 1413.11, "word": " times", "probability": 0.59521484375}, {"start": 1413.11, "end": 1413.37, "word": " 6.", "probability": 0.2335205078125}, {"start": 1415.61, "end": 1416.37, "word": " The", "probability": 0.8447265625}, {"start": 1416.37, "end": 1416.65, "word": " other", "probability": 0.89501953125}, {"start": 1416.65, "end": 1417.07, "word": " part,", "probability": 0.89501953125}, {"start": 1418.13, "end": 1418.57, "word": " to", "probability": 0.9609375}, {"start": 1418.57, "end": 1418.71, "word": " get", "probability": 0.9443359375}, {"start": 1418.71, "end": 1418.85, "word": " the", "probability": 0.91455078125}, {"start": 1418.85, "end": 1419.13, "word": " value", "probability": 0.974609375}, {"start": 1419.13, "end": 1419.33, "word": " of", "probability": 0.96044921875}, {"start": 1419.33, "end": 1419.49, "word": " B,", "probability": 0.66015625}, {"start": 1421.27, "end": 1421.67, "word": " the", "probability": 0.89013671875}, {"start": 1421.67, "end": 1422.11, "word": " probability", "probability": 0.935546875}, {"start": 1422.11, "end": 1422.45, "word": " of", "probability": 0.96435546875}, {"start": 1422.45, "end": 1422.71, "word": " X", "probability": 0.853515625}, {"start": 1422.71, "end": 1423.25, "word": " smaller", "probability": 0.70947265625}, {"start": 1423.25, "end": 1423.59, "word": " than", "probability": 0.94482421875}, {"start": 1423.59, "end": 1424.05, "word": " B", "probability": 0.98388671875}, {"start": 1424.05, "end": 1425.63, "word": " equals", "probability": 0.697265625}, {"start": 1425.63, "end": 1427.11, "word": " 95", "probability": 0.96142578125}, {"start": 1427.11, "end": 1427.63, "word": " plus", "probability": 0.90966796875}, {"start": 1427.63, "end": 1427.91, "word": " 2", "probability": 0.99560546875}, {"start": 1427.91, "end": 1428.39, "word": ".5", "probability": 0.969482421875}, {"start": 1428.39, "end": 1428.55, "word": " is", "probability": 0.317138671875}, {"start": 1428.55, "end": 1429.15, "word": " 975.", "probability": 0.8828125}], "temperature": 1.0}, {"id": 54, "seek": 145739, "start": 1431.99, "end": 1457.39, "text": " by using the same way we'll get that z score is 1.96 as we mentioned before because these two values here should be the z score the same so now b equals mu plus 1.96 times sigma and that will give you a 1.408", "tokens": [538, 1228, 264, 912, 636, 321, 603, 483, 300, 710, 6175, 307, 502, 13, 22962, 382, 321, 2835, 949, 570, 613, 732, 4190, 510, 820, 312, 264, 710, 6175, 264, 912, 370, 586, 272, 6915, 2992, 1804, 502, 13, 22962, 1413, 12771, 293, 300, 486, 976, 291, 257, 502, 13, 5254, 23], "avg_logprob": -0.2538325359236519, "compression_ratio": 1.4413793103448276, "no_speech_prob": 0.0, "words": [{"start": 1431.99, "end": 1432.23, "word": " by", "probability": 0.3154296875}, {"start": 1432.23, "end": 1432.49, "word": " using", "probability": 0.931640625}, {"start": 1432.49, "end": 1432.71, "word": " the", "probability": 0.908203125}, {"start": 1432.71, "end": 1432.93, "word": " same", "probability": 0.90087890625}, {"start": 1432.93, "end": 1433.17, "word": " way", "probability": 0.8271484375}, {"start": 1433.17, "end": 1433.83, "word": " we'll", "probability": 0.476806640625}, {"start": 1433.83, "end": 1434.05, "word": " get", "probability": 0.94091796875}, {"start": 1434.05, "end": 1434.25, "word": " that", "probability": 0.8076171875}, {"start": 1434.25, "end": 1434.43, "word": " z", "probability": 0.572265625}, {"start": 1434.43, "end": 1434.71, "word": " score", "probability": 0.52880859375}, {"start": 1434.71, "end": 1434.87, "word": " is", "probability": 0.91650390625}, {"start": 1434.87, "end": 1435.09, "word": " 1", "probability": 0.861328125}, {"start": 1435.09, "end": 1435.69, "word": ".96", "probability": 0.97412109375}, {"start": 1435.69, "end": 1436.09, "word": " as", "probability": 0.85498046875}, {"start": 1436.09, "end": 1436.21, "word": " we", "probability": 0.93310546875}, {"start": 1436.21, "end": 1436.43, "word": " mentioned", "probability": 0.853515625}, {"start": 1436.43, "end": 1436.83, "word": " before", "probability": 0.86572265625}, {"start": 1436.83, "end": 1438.01, "word": " because", "probability": 0.712890625}, {"start": 1438.01, "end": 1438.31, "word": " these", "probability": 0.875}, {"start": 1438.31, "end": 1438.55, "word": " two", "probability": 0.916015625}, {"start": 1438.55, "end": 1438.93, "word": " values", "probability": 0.95361328125}, {"start": 1438.93, "end": 1439.23, "word": " here", "probability": 0.8359375}, {"start": 1439.23, "end": 1439.59, "word": " should", "probability": 0.9658203125}, {"start": 1439.59, "end": 1439.85, "word": " be", "probability": 0.94189453125}, {"start": 1439.85, "end": 1440.09, "word": " the", "probability": 0.791015625}, {"start": 1440.09, "end": 1440.27, "word": " z", "probability": 0.97119140625}, {"start": 1440.27, "end": 1440.51, "word": " score", "probability": 0.5947265625}, {"start": 1440.51, "end": 1440.73, "word": " the", "probability": 0.7470703125}, {"start": 1440.73, "end": 1441.05, "word": " same", "probability": 0.9111328125}, {"start": 1441.05, "end": 1442.61, "word": " so", "probability": 0.7568359375}, {"start": 1442.61, "end": 1442.83, "word": " now", "probability": 0.9453125}, {"start": 1442.83, "end": 1443.07, "word": " b", "probability": 0.5224609375}, {"start": 1443.07, "end": 1443.57, "word": " equals", "probability": 0.9248046875}, {"start": 1443.57, "end": 1444.73, "word": " mu", "probability": 0.576171875}, {"start": 1444.73, "end": 1446.41, "word": " plus", "probability": 0.9482421875}, {"start": 1446.41, "end": 1449.69, "word": " 1", "probability": 0.9404296875}, {"start": 1449.69, "end": 1450.35, "word": ".96", "probability": 0.99072265625}, {"start": 1450.35, "end": 1450.69, "word": " times", "probability": 0.8984375}, {"start": 1450.69, "end": 1451.17, "word": " sigma", "probability": 0.90673828125}, {"start": 1451.17, "end": 1452.57, "word": " and", "probability": 0.80712890625}, {"start": 1452.57, "end": 1452.75, "word": " that", "probability": 0.939453125}, {"start": 1452.75, "end": 1452.97, "word": " will", "probability": 0.58740234375}, {"start": 1452.97, "end": 1453.23, "word": " give", "probability": 0.7236328125}, {"start": 1453.23, "end": 1453.99, "word": " you", "probability": 0.385986328125}, {"start": 1453.99, "end": 1456.15, "word": " a", "probability": 0.57373046875}, {"start": 1456.15, "end": 1456.43, "word": " 1", "probability": 0.291259765625}, {"start": 1456.43, "end": 1457.39, "word": ".408", "probability": 0.9558919270833334}], "temperature": 1.0}, {"id": 55, "seek": 149060, "start": 1463.28, "end": 1490.6, "text": " And B equals 1.1920. So these are the two values which has 95% between them. So 95% of the data, I mean 95% of the download times are between 0.4 seconds and 1.19 seconds.", "tokens": [400, 363, 6915, 502, 13, 3405, 2009, 13, 407, 613, 366, 264, 732, 4190, 597, 575, 13420, 4, 1296, 552, 13, 407, 13420, 4, 295, 264, 1412, 11, 286, 914, 13420, 4, 295, 264, 5484, 1413, 366, 1296, 1958, 13, 19, 3949, 293, 502, 13, 3405, 3949, 13], "avg_logprob": -0.17952806609017508, "compression_ratio": 1.3650793650793651, "no_speech_prob": 0.0, "words": [{"start": 1463.28, "end": 1463.62, "word": " And", "probability": 0.383056640625}, {"start": 1463.62, "end": 1463.9, "word": " B", "probability": 0.73828125}, {"start": 1463.9, "end": 1464.2, "word": " equals", "probability": 0.359130859375}, {"start": 1464.2, "end": 1464.44, "word": " 1", "probability": 0.90625}, {"start": 1464.44, "end": 1468.64, "word": ".1920.", "probability": 0.9749348958333334}, {"start": 1469.26, "end": 1469.48, "word": " So", "probability": 0.9052734375}, {"start": 1469.48, "end": 1469.7, "word": " these", "probability": 0.7412109375}, {"start": 1469.7, "end": 1469.9, "word": " are", "probability": 0.9404296875}, {"start": 1469.9, "end": 1470.02, "word": " the", "probability": 0.90185546875}, {"start": 1470.02, "end": 1470.18, "word": " two", "probability": 0.896484375}, {"start": 1470.18, "end": 1470.68, "word": " values", "probability": 0.96630859375}, {"start": 1470.68, "end": 1472.44, "word": " which", "probability": 0.6787109375}, {"start": 1472.44, "end": 1472.9, "word": " has", "probability": 0.7568359375}, {"start": 1472.9, "end": 1473.58, "word": " 95", "probability": 0.9404296875}, {"start": 1473.58, "end": 1474.04, "word": "%", "probability": 0.7490234375}, {"start": 1474.04, "end": 1474.52, "word": " between", "probability": 0.87548828125}, {"start": 1474.52, "end": 1474.84, "word": " them.", "probability": 0.89404296875}, {"start": 1476.04, "end": 1476.44, "word": " So", "probability": 0.919921875}, {"start": 1476.44, "end": 1476.98, "word": " 95", "probability": 0.935546875}, {"start": 1476.98, "end": 1477.5, "word": "%", "probability": 0.98779296875}, {"start": 1477.5, "end": 1477.82, "word": " of", "probability": 0.95947265625}, {"start": 1477.82, "end": 1477.96, "word": " the", "probability": 0.919921875}, {"start": 1477.96, "end": 1478.32, "word": " data,", "probability": 0.92138671875}, {"start": 1479.82, "end": 1480.08, "word": " I", "probability": 0.97802734375}, {"start": 1480.08, "end": 1480.26, "word": " mean", "probability": 0.96875}, {"start": 1480.26, "end": 1480.72, "word": " 95", "probability": 0.86572265625}, {"start": 1480.72, "end": 1481.14, "word": "%", "probability": 0.9951171875}, {"start": 1481.14, "end": 1481.42, "word": " of", "probability": 0.95703125}, {"start": 1481.42, "end": 1481.62, "word": " the", "probability": 0.8994140625}, {"start": 1481.62, "end": 1482.08, "word": " download", "probability": 0.1787109375}, {"start": 1482.08, "end": 1482.78, "word": " times", "probability": 0.94482421875}, {"start": 1482.78, "end": 1485.06, "word": " are", "probability": 0.59765625}, {"start": 1485.06, "end": 1485.58, "word": " between", "probability": 0.87255859375}, {"start": 1485.58, "end": 1487.3, "word": " 0", "probability": 0.6484375}, {"start": 1487.3, "end": 1487.76, "word": ".4", "probability": 0.977294921875}, {"start": 1487.76, "end": 1488.38, "word": " seconds", "probability": 0.79345703125}, {"start": 1488.38, "end": 1488.96, "word": " and", "probability": 0.93310546875}, {"start": 1488.96, "end": 1489.44, "word": " 1", "probability": 0.99609375}, {"start": 1489.44, "end": 1490.18, "word": ".19", "probability": 0.99658203125}, {"start": 1490.18, "end": 1490.6, "word": " seconds.", "probability": 0.79736328125}], "temperature": 1.0}, {"id": 56, "seek": 151621, "start": 1491.95, "end": 1516.21, "text": " make sense that is again 95 percent of the download times are between approximately 0.4 seconds and around 1.2 so this value is 0.4 the other one is approximately 1.2 so again 95 percent of the download times are between", "tokens": [652, 2020, 300, 307, 797, 13420, 3043, 295, 264, 5484, 1413, 366, 1296, 10447, 1958, 13, 19, 3949, 293, 926, 502, 13, 17, 370, 341, 2158, 307, 1958, 13, 19, 264, 661, 472, 307, 10447, 502, 13, 17, 370, 797, 13420, 3043, 295, 264, 5484, 1413, 366, 1296], "avg_logprob": -0.16820790329758, "compression_ratio": 1.7265625, "no_speech_prob": 0.0, "words": [{"start": 1491.95, "end": 1492.27, "word": " make", "probability": 0.2169189453125}, {"start": 1492.27, "end": 1492.61, "word": " sense", "probability": 0.7958984375}, {"start": 1492.61, "end": 1493.67, "word": " that", "probability": 0.54248046875}, {"start": 1493.67, "end": 1493.97, "word": " is", "probability": 0.92919921875}, {"start": 1493.97, "end": 1494.49, "word": " again", "probability": 0.927734375}, {"start": 1494.49, "end": 1495.65, "word": " 95", "probability": 0.873046875}, {"start": 1495.65, "end": 1496.23, "word": " percent", "probability": 0.4384765625}, {"start": 1496.23, "end": 1496.49, "word": " of", "probability": 0.97021484375}, {"start": 1496.49, "end": 1496.63, "word": " the", "probability": 0.91650390625}, {"start": 1496.63, "end": 1496.95, "word": " download", "probability": 0.86572265625}, {"start": 1496.95, "end": 1497.57, "word": " times", "probability": 0.91162109375}, {"start": 1497.57, "end": 1498.37, "word": " are", "probability": 0.94921875}, {"start": 1498.37, "end": 1498.79, "word": " between", "probability": 0.89111328125}, {"start": 1498.79, "end": 1500.23, "word": " approximately", "probability": 0.87744140625}, {"start": 1500.23, "end": 1500.57, "word": " 0", "probability": 0.79248046875}, {"start": 1500.57, "end": 1500.87, "word": ".4", "probability": 0.98974609375}, {"start": 1500.87, "end": 1501.33, "word": " seconds", "probability": 0.806640625}, {"start": 1501.33, "end": 1501.81, "word": " and", "probability": 0.94091796875}, {"start": 1501.81, "end": 1502.97, "word": " around", "probability": 0.9326171875}, {"start": 1502.97, "end": 1503.35, "word": " 1", "probability": 0.70458984375}, {"start": 1503.35, "end": 1504.11, "word": ".2", "probability": 0.949462890625}, {"start": 1504.11, "end": 1505.19, "word": " so", "probability": 0.34033203125}, {"start": 1505.19, "end": 1505.47, "word": " this", "probability": 0.93798828125}, {"start": 1505.47, "end": 1505.83, "word": " value", "probability": 0.9716796875}, {"start": 1505.83, "end": 1507.07, "word": " is", "probability": 0.93994140625}, {"start": 1507.07, "end": 1507.27, "word": " 0", "probability": 0.8955078125}, {"start": 1507.27, "end": 1507.63, "word": ".4", "probability": 0.99658203125}, {"start": 1507.63, "end": 1509.41, "word": " the", "probability": 0.763671875}, {"start": 1509.41, "end": 1509.65, "word": " other", "probability": 0.88671875}, {"start": 1509.65, "end": 1509.81, "word": " one", "probability": 0.9306640625}, {"start": 1509.81, "end": 1509.93, "word": " is", "probability": 0.953125}, {"start": 1509.93, "end": 1510.37, "word": " approximately", "probability": 0.90283203125}, {"start": 1510.37, "end": 1510.59, "word": " 1", "probability": 0.96728515625}, {"start": 1510.59, "end": 1510.97, "word": ".2", "probability": 0.99560546875}, {"start": 1510.97, "end": 1511.83, "word": " so", "probability": 0.7978515625}, {"start": 1511.83, "end": 1512.15, "word": " again", "probability": 0.9521484375}, {"start": 1512.15, "end": 1513.15, "word": " 95", "probability": 0.87890625}, {"start": 1513.15, "end": 1513.59, "word": " percent", "probability": 0.798828125}, {"start": 1513.59, "end": 1513.79, "word": " of", "probability": 0.96923828125}, {"start": 1513.79, "end": 1513.91, "word": " the", "probability": 0.92236328125}, {"start": 1513.91, "end": 1514.21, "word": " download", "probability": 0.9462890625}, {"start": 1514.21, "end": 1514.75, "word": " times", "probability": 0.9521484375}, {"start": 1514.75, "end": 1515.83, "word": " are", "probability": 0.94873046875}, {"start": 1515.83, "end": 1516.21, "word": " between", "probability": 0.8837890625}], "temperature": 1.0}, {"id": 57, "seek": 154443, "start": 1517.65, "end": 1544.43, "text": " 0.4 seconds approximately and one minute. This problem maybe is the most important one for this chapter. Exactly in the exam you will see something like that. Either for part A, B and C which are the same and the backward normal calculations as part D and E. Any question?", "tokens": [1958, 13, 19, 3949, 10447, 293, 472, 3456, 13, 639, 1154, 1310, 307, 264, 881, 1021, 472, 337, 341, 7187, 13, 7587, 294, 264, 1139, 291, 486, 536, 746, 411, 300, 13, 13746, 337, 644, 316, 11, 363, 293, 383, 597, 366, 264, 912, 293, 264, 23897, 2710, 20448, 382, 644, 413, 293, 462, 13, 2639, 1168, 30], "avg_logprob": -0.21159957121994535, "compression_ratio": 1.436842105263158, "no_speech_prob": 0.0, "words": [{"start": 1517.65, "end": 1517.89, "word": " 0", "probability": 0.367431640625}, {"start": 1517.89, "end": 1518.19, "word": ".4", "probability": 0.989013671875}, {"start": 1518.19, "end": 1518.55, "word": " seconds", "probability": 0.75439453125}, {"start": 1518.55, "end": 1519.15, "word": " approximately", "probability": 0.59814453125}, {"start": 1519.15, "end": 1519.59, "word": " and", "probability": 0.8369140625}, {"start": 1519.59, "end": 1520.17, "word": " one", "probability": 0.20263671875}, {"start": 1520.17, "end": 1520.45, "word": " minute.", "probability": 0.5859375}, {"start": 1521.25, "end": 1521.61, "word": " This", "probability": 0.83154296875}, {"start": 1521.61, "end": 1522.23, "word": " problem", "probability": 0.84326171875}, {"start": 1522.23, "end": 1522.47, "word": " maybe", "probability": 0.54736328125}, {"start": 1522.47, "end": 1522.69, "word": " is", "probability": 0.9013671875}, {"start": 1522.69, "end": 1523.05, "word": " the", "probability": 0.91552734375}, {"start": 1523.05, "end": 1523.33, "word": " most", "probability": 0.90185546875}, {"start": 1523.33, "end": 1523.95, "word": " important", "probability": 0.88916015625}, {"start": 1523.95, "end": 1524.33, "word": " one", "probability": 0.927734375}, {"start": 1524.33, "end": 1525.09, "word": " for", "probability": 0.91552734375}, {"start": 1525.09, "end": 1525.27, "word": " this", "probability": 0.9384765625}, {"start": 1525.27, "end": 1525.59, "word": " chapter.", "probability": 0.90966796875}, {"start": 1526.79, "end": 1527.51, "word": " Exactly", "probability": 0.7451171875}, {"start": 1527.51, "end": 1527.69, "word": " in", "probability": 0.88525390625}, {"start": 1527.69, "end": 1527.77, "word": " the", "probability": 0.92138671875}, {"start": 1527.77, "end": 1528.15, "word": " exam", "probability": 0.9765625}, {"start": 1528.15, "end": 1529.73, "word": " you", "probability": 0.50634765625}, {"start": 1529.73, "end": 1529.89, "word": " will", "probability": 0.8740234375}, {"start": 1529.89, "end": 1530.03, "word": " see", "probability": 0.92919921875}, {"start": 1530.03, "end": 1530.29, "word": " something", "probability": 0.86572265625}, {"start": 1530.29, "end": 1530.55, "word": " like", "probability": 0.94140625}, {"start": 1530.55, "end": 1531.41, "word": " that.", "probability": 0.93505859375}, {"start": 1532.61, "end": 1533.23, "word": " Either", "probability": 0.73095703125}, {"start": 1533.23, "end": 1533.59, "word": " for", "probability": 0.9140625}, {"start": 1533.59, "end": 1533.97, "word": " part", "probability": 0.8681640625}, {"start": 1533.97, "end": 1534.17, "word": " A,", "probability": 0.7099609375}, {"start": 1534.21, "end": 1534.31, "word": " B", "probability": 0.955078125}, {"start": 1534.31, "end": 1534.49, "word": " and", "probability": 0.74267578125}, {"start": 1534.49, "end": 1534.67, "word": " C", "probability": 0.99462890625}, {"start": 1534.67, "end": 1534.83, "word": " which", "probability": 0.6240234375}, {"start": 1534.83, "end": 1535.01, "word": " are", "probability": 0.9248046875}, {"start": 1535.01, "end": 1535.17, "word": " the", "probability": 0.9150390625}, {"start": 1535.17, "end": 1535.49, "word": " same", "probability": 0.90673828125}, {"start": 1535.49, "end": 1537.37, "word": " and", "probability": 0.48388671875}, {"start": 1537.37, "end": 1538.11, "word": " the", "probability": 0.88623046875}, {"start": 1538.11, "end": 1538.51, "word": " backward", "probability": 0.92529296875}, {"start": 1538.51, "end": 1538.99, "word": " normal", "probability": 0.859375}, {"start": 1538.99, "end": 1539.67, "word": " calculations", "probability": 0.8896484375}, {"start": 1539.67, "end": 1540.13, "word": " as", "probability": 0.9375}, {"start": 1540.13, "end": 1540.49, "word": " part", "probability": 0.8876953125}, {"start": 1540.49, "end": 1540.77, "word": " D", "probability": 0.8935546875}, {"start": 1540.77, "end": 1542.07, "word": " and", "probability": 0.94384765625}, {"start": 1542.07, "end": 1542.39, "word": " E.", "probability": 0.9931640625}, {"start": 1543.67, "end": 1544.09, "word": " Any", "probability": 0.90625}, {"start": 1544.09, "end": 1544.43, "word": " question?", "probability": 0.80322265625}], "temperature": 1.0}, {"id": 58, "seek": 156324, "start": 1551.66, "end": 1563.24, "text": " let's go solve true and false problems for the practice in chat asses", "tokens": [718, 311, 352, 5039, 2074, 293, 7908, 2740, 337, 264, 3124, 294, 5081, 1256, 279], "avg_logprob": -0.7001953013241291, "compression_ratio": 1.0606060606060606, "no_speech_prob": 2.6106834411621094e-05, "words": [{"start": 1551.6599999999999, "end": 1552.62, "word": " let's", "probability": 0.56414794921875}, {"start": 1552.62, "end": 1552.92, "word": " go", "probability": 0.64013671875}, {"start": 1552.92, "end": 1553.64, "word": " solve", "probability": 0.377197265625}, {"start": 1553.64, "end": 1556.42, "word": " true", "probability": 0.658203125}, {"start": 1556.42, "end": 1556.64, "word": " and", "probability": 0.9140625}, {"start": 1556.64, "end": 1556.94, "word": " false", "probability": 0.7861328125}, {"start": 1556.94, "end": 1557.42, "word": " problems", "probability": 0.86572265625}, {"start": 1557.42, "end": 1559.44, "word": " for", "probability": 0.65625}, {"start": 1559.44, "end": 1559.6, "word": " the", "probability": 0.85302734375}, {"start": 1559.6, "end": 1560.26, "word": " practice", "probability": 0.90869140625}, {"start": 1560.26, "end": 1562.58, "word": " in", "probability": 0.50830078125}, {"start": 1562.58, "end": 1562.84, "word": " chat", "probability": 0.086669921875}, {"start": 1562.84, "end": 1563.24, "word": " asses", "probability": 0.2396240234375}], "temperature": 1.0}, {"id": 59, "seek": 159702, "start": 1580.84, "end": 1597.02, "text": " The Z-score should be one positive and the other is negative, not A and B. The corresponding Z-score here should have the same values but negative sign, not A and B.", "tokens": [440, 1176, 12, 4417, 418, 820, 312, 472, 3353, 293, 264, 661, 307, 3671, 11, 406, 316, 293, 363, 13, 440, 11760, 1176, 12, 4417, 418, 510, 820, 362, 264, 912, 4190, 457, 3671, 1465, 11, 406, 316, 293, 363, 13], "avg_logprob": -0.23493303713344393, "compression_ratio": 1.4690265486725664, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1580.8400000000001, "end": 1581.4, "word": " The", "probability": 0.20654296875}, {"start": 1581.4, "end": 1581.66, "word": " Z", "probability": 0.55859375}, {"start": 1581.66, "end": 1582.1, "word": "-score", "probability": 0.6898600260416666}, {"start": 1582.1, "end": 1582.32, "word": " should", "probability": 0.9609375}, {"start": 1582.32, "end": 1582.6, "word": " be", "probability": 0.9375}, {"start": 1582.6, "end": 1583.5, "word": " one", "probability": 0.88037109375}, {"start": 1583.5, "end": 1583.86, "word": " positive", "probability": 0.92578125}, {"start": 1583.86, "end": 1584.12, "word": " and", "probability": 0.90673828125}, {"start": 1584.12, "end": 1584.2, "word": " the", "probability": 0.3603515625}, {"start": 1584.2, "end": 1584.38, "word": " other", "probability": 0.87158203125}, {"start": 1584.38, "end": 1584.54, "word": " is", "probability": 0.64453125}, {"start": 1584.54, "end": 1584.86, "word": " negative,", "probability": 0.94140625}, {"start": 1586.12, "end": 1586.44, "word": " not", "probability": 0.9296875}, {"start": 1586.44, "end": 1586.66, "word": " A", "probability": 0.5322265625}, {"start": 1586.66, "end": 1586.78, "word": " and", "probability": 0.92041015625}, {"start": 1586.78, "end": 1586.8, "word": " B.", "probability": 0.986328125}, {"start": 1587.54, "end": 1587.78, "word": " The", "probability": 0.87353515625}, {"start": 1587.78, "end": 1588.26, "word": " corresponding", "probability": 0.82470703125}, {"start": 1588.26, "end": 1588.58, "word": " Z", "probability": 0.94873046875}, {"start": 1588.58, "end": 1589.0, "word": "-score", "probability": 0.88232421875}, {"start": 1589.0, "end": 1590.6, "word": " here", "probability": 0.7021484375}, {"start": 1590.6, "end": 1590.88, "word": " should", "probability": 0.9375}, {"start": 1590.88, "end": 1592.88, "word": " have", "probability": 0.62548828125}, {"start": 1592.88, "end": 1593.08, "word": " the", "probability": 0.916015625}, {"start": 1593.08, "end": 1593.4, "word": " same", "probability": 0.90625}, {"start": 1593.4, "end": 1594.6, "word": " values", "probability": 0.884765625}, {"start": 1594.6, "end": 1594.94, "word": " but", "probability": 0.60400390625}, {"start": 1594.94, "end": 1595.6, "word": " negative", "probability": 0.80859375}, {"start": 1595.6, "end": 1596.02, "word": " sign,", "probability": 0.83837890625}, {"start": 1596.2, "end": 1596.4, "word": " not", "probability": 0.9326171875}, {"start": 1596.4, "end": 1596.64, "word": " A", "probability": 0.9873046875}, {"start": 1596.64, "end": 1596.82, "word": " and", "probability": 0.939453125}, {"start": 1596.82, "end": 1597.02, "word": " B.", "probability": 0.99853515625}], "temperature": 1.0}, {"id": 60, "seek": 162940, "start": 1600.12, "end": 1629.4, "text": " now let's do some rex problems for chapter 6 now just look at the minus sign the probability that standard normal random variable C falls between minus 1.5 and 0.81 so it's similar to this one but this is straight forward this score between minus 1.5", "tokens": [586, 718, 311, 360, 512, 319, 87, 2740, 337, 7187, 1386, 586, 445, 574, 412, 264, 3175, 1465, 264, 8482, 300, 3832, 2710, 4974, 7006, 383, 8804, 1296, 3175, 502, 13, 20, 293, 1958, 13, 32875, 370, 309, 311, 2531, 281, 341, 472, 457, 341, 307, 2997, 2128, 341, 6175, 1296, 3175, 502, 13, 20], "avg_logprob": -0.39704239794186186, "compression_ratio": 1.539877300613497, "no_speech_prob": 0.0, "words": [{"start": 1600.12, "end": 1600.52, "word": " now", "probability": 0.42041015625}, {"start": 1600.52, "end": 1600.88, "word": " let's", "probability": 0.8896484375}, {"start": 1600.88, "end": 1601.06, "word": " do", "probability": 0.93798828125}, {"start": 1601.06, "end": 1601.4, "word": " some", "probability": 0.90478515625}, {"start": 1601.4, "end": 1601.84, "word": " rex", "probability": 0.46356201171875}, {"start": 1601.84, "end": 1602.32, "word": " problems", "probability": 0.5966796875}, {"start": 1602.32, "end": 1602.56, "word": " for", "probability": 0.443115234375}, {"start": 1602.56, "end": 1602.86, "word": " chapter", "probability": 0.8076171875}, {"start": 1602.86, "end": 1603.82, "word": " 6", "probability": 0.5595703125}, {"start": 1603.82, "end": 1608.38, "word": " now", "probability": 0.68701171875}, {"start": 1608.38, "end": 1608.86, "word": " just", "probability": 0.84130859375}, {"start": 1608.86, "end": 1609.7, "word": " look", "probability": 0.958984375}, {"start": 1609.7, "end": 1609.94, "word": " at", "probability": 0.966796875}, {"start": 1609.94, "end": 1610.16, "word": " the", "probability": 0.51904296875}, {"start": 1610.16, "end": 1610.66, "word": " minus", "probability": 0.1129150390625}, {"start": 1610.66, "end": 1610.92, "word": " sign", "probability": 0.158203125}, {"start": 1610.92, "end": 1612.56, "word": " the", "probability": 0.2890625}, {"start": 1612.56, "end": 1613.02, "word": " probability", "probability": 0.9794921875}, {"start": 1613.02, "end": 1613.32, "word": " that", "probability": 0.87060546875}, {"start": 1613.32, "end": 1613.74, "word": " standard", "probability": 0.43359375}, {"start": 1613.74, "end": 1614.12, "word": " normal", "probability": 0.787109375}, {"start": 1614.12, "end": 1614.68, "word": " random", "probability": 0.2222900390625}, {"start": 1614.68, "end": 1615.04, "word": " variable", "probability": 0.86572265625}, {"start": 1615.04, "end": 1615.52, "word": " C", "probability": 0.126953125}, {"start": 1615.52, "end": 1616.62, "word": " falls", "probability": 0.7158203125}, {"start": 1616.62, "end": 1617.22, "word": " between", "probability": 0.890625}, {"start": 1617.22, "end": 1618.64, "word": " minus", "probability": 0.73388671875}, {"start": 1618.64, "end": 1618.9, "word": " 1", "probability": 0.759765625}, {"start": 1618.9, "end": 1619.5, "word": ".5", "probability": 0.982666015625}, {"start": 1619.5, "end": 1619.84, "word": " and", "probability": 0.92822265625}, {"start": 1619.84, "end": 1620.08, "word": " 0", "probability": 0.7529296875}, {"start": 1620.08, "end": 1620.46, "word": ".81", "probability": 0.98046875}, {"start": 1620.46, "end": 1621.76, "word": " so", "probability": 0.78076171875}, {"start": 1621.76, "end": 1622.0, "word": " it's", "probability": 0.9296875}, {"start": 1622.0, "end": 1622.3, "word": " similar", "probability": 0.96728515625}, {"start": 1622.3, "end": 1622.64, "word": " to", "probability": 0.9677734375}, {"start": 1622.64, "end": 1622.86, "word": " this", "probability": 0.951171875}, {"start": 1622.86, "end": 1623.1, "word": " one", "probability": 0.9375}, {"start": 1623.1, "end": 1624.62, "word": " but", "probability": 0.86572265625}, {"start": 1624.62, "end": 1625.06, "word": " this", "probability": 0.9306640625}, {"start": 1625.06, "end": 1625.22, "word": " is", "probability": 0.94091796875}, {"start": 1625.22, "end": 1625.6, "word": " straight", "probability": 0.494873046875}, {"start": 1625.6, "end": 1626.0, "word": " forward", "probability": 0.86328125}, {"start": 1626.0, "end": 1626.46, "word": " this", "probability": 0.56298828125}, {"start": 1626.46, "end": 1626.86, "word": " score", "probability": 0.26220703125}, {"start": 1626.86, "end": 1628.18, "word": " between", "probability": 0.62255859375}, {"start": 1628.18, "end": 1628.54, "word": " minus", "probability": 0.93310546875}, {"start": 1628.54, "end": 1628.78, "word": " 1", "probability": 0.93115234375}, {"start": 1628.78, "end": 1629.4, "word": ".5", "probability": 0.992431640625}], "temperature": 1.0}, {"id": 61, "seek": 165853, "start": 1630.17, "end": 1658.53, "text": " up to 0.81 okay so number 23 again the probability that standard normal random variable z fall between minus 1.5 and 0.81", "tokens": [493, 281, 1958, 13, 32875, 1392, 370, 1230, 6673, 797, 264, 8482, 300, 3832, 2710, 4974, 7006, 710, 2100, 1296, 3175, 502, 13, 20, 293, 1958, 13, 32875], "avg_logprob": -0.2474407204266252, "compression_ratio": 1.1730769230769231, "no_speech_prob": 0.0, "words": [{"start": 1630.17, "end": 1630.53, "word": " up", "probability": 0.53515625}, {"start": 1630.53, "end": 1630.81, "word": " to", "probability": 0.91259765625}, {"start": 1630.81, "end": 1631.29, "word": " 0", "probability": 0.362548828125}, {"start": 1631.29, "end": 1631.77, "word": ".81", "probability": 0.979736328125}, {"start": 1631.77, "end": 1638.75, "word": " okay", "probability": 0.16552734375}, {"start": 1638.75, "end": 1647.97, "word": " so", "probability": 0.41064453125}, {"start": 1647.97, "end": 1648.25, "word": " number", "probability": 0.9091796875}, {"start": 1648.25, "end": 1648.67, "word": " 23", "probability": 0.83984375}, {"start": 1648.67, "end": 1649.15, "word": " again", "probability": 0.9326171875}, {"start": 1649.15, "end": 1650.79, "word": " the", "probability": 0.75927734375}, {"start": 1650.79, "end": 1651.33, "word": " probability", "probability": 0.92529296875}, {"start": 1651.33, "end": 1651.59, "word": " that", "probability": 0.91455078125}, {"start": 1651.59, "end": 1652.05, "word": " standard", "probability": 0.8017578125}, {"start": 1652.05, "end": 1652.47, "word": " normal", "probability": 0.8486328125}, {"start": 1652.47, "end": 1652.85, "word": " random", "probability": 0.84228515625}, {"start": 1652.85, "end": 1653.43, "word": " variable", "probability": 0.9208984375}, {"start": 1653.43, "end": 1654.49, "word": " z", "probability": 0.673828125}, {"start": 1654.49, "end": 1654.83, "word": " fall", "probability": 0.67529296875}, {"start": 1654.83, "end": 1655.49, "word": " between", "probability": 0.8935546875}, {"start": 1655.49, "end": 1655.87, "word": " minus", "probability": 0.6357421875}, {"start": 1655.87, "end": 1656.15, "word": " 1", "probability": 0.84130859375}, {"start": 1656.15, "end": 1656.79, "word": ".5", "probability": 0.986328125}, {"start": 1656.79, "end": 1657.85, "word": " and", "probability": 0.9384765625}, {"start": 1657.85, "end": 1658.09, "word": " 0", "probability": 0.89501953125}, {"start": 1658.09, "end": 1658.53, "word": ".81", "probability": 0.993408203125}], "temperature": 1.0}, {"id": 62, "seek": 168491, "start": 1659.81, "end": 1684.91, "text": " So it's going to be, we are looking for this probability. So it's z less than one point one minus. Now just do it by yourself, you will figure that the final answer is point seven four.", "tokens": [407, 309, 311, 516, 281, 312, 11, 321, 366, 1237, 337, 341, 8482, 13, 407, 309, 311, 710, 1570, 813, 472, 935, 472, 3175, 13, 823, 445, 360, 309, 538, 1803, 11, 291, 486, 2573, 300, 264, 2572, 1867, 307, 935, 3407, 1451, 13], "avg_logprob": -0.3388888862397936, "compression_ratio": 1.3576642335766422, "no_speech_prob": 0.0, "words": [{"start": 1659.81, "end": 1660.03, "word": " So", "probability": 0.266845703125}, {"start": 1660.03, "end": 1660.25, "word": " it's", "probability": 0.434967041015625}, {"start": 1660.25, "end": 1660.35, "word": " going", "probability": 0.6650390625}, {"start": 1660.35, "end": 1660.55, "word": " to", "probability": 0.97265625}, {"start": 1660.55, "end": 1660.73, "word": " be,", "probability": 0.95654296875}, {"start": 1661.05, "end": 1661.25, "word": " we", "probability": 0.9150390625}, {"start": 1661.25, "end": 1661.39, "word": " are", "probability": 0.9033203125}, {"start": 1661.39, "end": 1661.67, "word": " looking", "probability": 0.9091796875}, {"start": 1661.67, "end": 1661.91, "word": " for", "probability": 0.92333984375}, {"start": 1661.91, "end": 1662.13, "word": " this", "probability": 0.88525390625}, {"start": 1662.13, "end": 1662.53, "word": " probability.", "probability": 0.64990234375}, {"start": 1668.15, "end": 1668.71, "word": " So", "probability": 0.66064453125}, {"start": 1668.71, "end": 1668.97, "word": " it's", "probability": 0.93310546875}, {"start": 1668.97, "end": 1669.19, "word": " z", "probability": 0.3427734375}, {"start": 1669.19, "end": 1669.47, "word": " less", "probability": 0.9169921875}, {"start": 1669.47, "end": 1669.71, "word": " than", "probability": 0.93310546875}, {"start": 1669.71, "end": 1669.97, "word": " one", "probability": 0.363525390625}, {"start": 1669.97, "end": 1670.33, "word": " point", "probability": 0.658203125}, {"start": 1670.33, "end": 1670.61, "word": " one", "probability": 0.345703125}, {"start": 1670.61, "end": 1673.25, "word": " minus.", "probability": 0.8935546875}, {"start": 1677.67, "end": 1678.01, "word": " Now", "probability": 0.73291015625}, {"start": 1678.01, "end": 1678.29, "word": " just", "probability": 0.7783203125}, {"start": 1678.29, "end": 1678.59, "word": " do", "probability": 0.966796875}, {"start": 1678.59, "end": 1678.79, "word": " it", "probability": 0.95361328125}, {"start": 1678.79, "end": 1679.07, "word": " by", "probability": 0.9404296875}, {"start": 1679.07, "end": 1679.49, "word": " yourself,", "probability": 0.84228515625}, {"start": 1679.59, "end": 1679.67, "word": " you", "probability": 0.95361328125}, {"start": 1679.67, "end": 1679.79, "word": " will", "probability": 0.59619140625}, {"start": 1679.79, "end": 1679.99, "word": " figure", "probability": 0.97021484375}, {"start": 1679.99, "end": 1680.33, "word": " that", "probability": 0.72900390625}, {"start": 1680.33, "end": 1680.87, "word": " the", "probability": 0.8427734375}, {"start": 1680.87, "end": 1681.27, "word": " final", "probability": 0.947265625}, {"start": 1681.27, "end": 1681.71, "word": " answer", "probability": 0.958984375}, {"start": 1681.71, "end": 1682.13, "word": " is", "probability": 0.95068359375}, {"start": 1682.13, "end": 1683.59, "word": " point", "probability": 0.7724609375}, {"start": 1683.59, "end": 1684.13, "word": " seven", "probability": 0.91650390625}, {"start": 1684.13, "end": 1684.91, "word": " four.", "probability": 0.408203125}], "temperature": 1.0}, {"id": 63, "seek": 171677, "start": 1687.97, "end": 1716.77, "text": " That's for 23. I think straightforward one. Let's do one more, 25 for example. The probability that standard normal random variable is below 196. See? Below 1.96. Now from the table, if we", "tokens": [663, 311, 337, 6673, 13, 286, 519, 15325, 472, 13, 961, 311, 360, 472, 544, 11, 3552, 337, 1365, 13, 440, 8482, 300, 3832, 2710, 4974, 7006, 307, 2507, 7998, 13, 3008, 30, 36261, 502, 13, 22962, 13, 823, 490, 264, 3199, 11, 498, 321], "avg_logprob": -0.2459239020295765, "compression_ratio": 1.277027027027027, "no_speech_prob": 0.0, "words": [{"start": 1687.97, "end": 1688.33, "word": " That's", "probability": 0.630615234375}, {"start": 1688.33, "end": 1688.47, "word": " for", "probability": 0.79345703125}, {"start": 1688.47, "end": 1688.85, "word": " 23.", "probability": 0.8701171875}, {"start": 1689.21, "end": 1689.47, "word": " I", "probability": 0.9794921875}, {"start": 1689.47, "end": 1689.69, "word": " think", "probability": 0.91748046875}, {"start": 1689.69, "end": 1690.17, "word": " straightforward", "probability": 0.73486328125}, {"start": 1690.17, "end": 1690.73, "word": " one.", "probability": 0.900390625}, {"start": 1694.49, "end": 1695.25, "word": " Let's", "probability": 0.84619140625}, {"start": 1695.25, "end": 1695.37, "word": " do", "probability": 0.96337890625}, {"start": 1695.37, "end": 1695.53, "word": " one", "probability": 0.9169921875}, {"start": 1695.53, "end": 1695.81, "word": " more,", "probability": 0.9443359375}, {"start": 1696.09, "end": 1696.63, "word": " 25", "probability": 0.88818359375}, {"start": 1696.63, "end": 1696.97, "word": " for", "probability": 0.80712890625}, {"start": 1696.97, "end": 1697.33, "word": " example.", "probability": 0.974609375}, {"start": 1700.69, "end": 1701.01, "word": " The", "probability": 0.884765625}, {"start": 1701.01, "end": 1701.51, "word": " probability", "probability": 0.9462890625}, {"start": 1701.51, "end": 1701.91, "word": " that", "probability": 0.9189453125}, {"start": 1701.91, "end": 1702.49, "word": " standard", "probability": 0.748046875}, {"start": 1702.49, "end": 1702.85, "word": " normal", "probability": 0.89501953125}, {"start": 1702.85, "end": 1703.23, "word": " random", "probability": 0.61376953125}, {"start": 1703.23, "end": 1703.67, "word": " variable", "probability": 0.9111328125}, {"start": 1703.67, "end": 1703.93, "word": " is", "probability": 0.92724609375}, {"start": 1703.93, "end": 1704.71, "word": " below", "probability": 0.8251953125}, {"start": 1704.71, "end": 1706.45, "word": " 196.", "probability": 0.8701171875}, {"start": 1708.39, "end": 1708.71, "word": " See?", "probability": 0.578125}, {"start": 1711.23, "end": 1711.99, "word": " Below", "probability": 0.8740234375}, {"start": 1711.99, "end": 1712.25, "word": " 1", "probability": 0.66552734375}, {"start": 1712.25, "end": 1713.69, "word": ".96.", "probability": 0.666748046875}, {"start": 1714.65, "end": 1714.85, "word": " Now", "probability": 0.943359375}, {"start": 1714.85, "end": 1715.11, "word": " from", "probability": 0.724609375}, {"start": 1715.11, "end": 1715.27, "word": " the", "probability": 0.92626953125}, {"start": 1715.27, "end": 1715.67, "word": " table,", "probability": 0.89013671875}, {"start": 1716.07, "end": 1716.55, "word": " if", "probability": 0.93017578125}, {"start": 1716.55, "end": 1716.77, "word": " we", "probability": 0.958984375}], "temperature": 1.0}, {"id": 64, "seek": 174487, "start": 1718.35, "end": 1744.87, "text": " look at the normal table 1.96 now the area below 1.6 96 975 so it's here it mentioned that it's 0.4 so so this one is false or", "tokens": [574, 412, 264, 2710, 3199, 502, 13, 22962, 586, 264, 1859, 2507, 502, 13, 21, 24124, 1722, 11901, 370, 309, 311, 510, 309, 2835, 300, 309, 311, 1958, 13, 19, 370, 370, 341, 472, 307, 7908, 420], "avg_logprob": -0.32175165336383016, "compression_ratio": 1.233009708737864, "no_speech_prob": 0.0, "words": [{"start": 1718.35, "end": 1718.71, "word": " look", "probability": 0.6376953125}, {"start": 1718.71, "end": 1718.97, "word": " at", "probability": 0.96923828125}, {"start": 1718.97, "end": 1719.25, "word": " the", "probability": 0.91015625}, {"start": 1719.25, "end": 1719.63, "word": " normal", "probability": 0.76318359375}, {"start": 1719.63, "end": 1720.11, "word": " table", "probability": 0.11834716796875}, {"start": 1720.11, "end": 1721.75, "word": " 1", "probability": 0.448974609375}, {"start": 1721.75, "end": 1726.51, "word": ".96", "probability": 0.945068359375}, {"start": 1726.51, "end": 1729.57, "word": " now", "probability": 0.724609375}, {"start": 1729.57, "end": 1730.27, "word": " the", "probability": 0.87060546875}, {"start": 1730.27, "end": 1730.57, "word": " area", "probability": 0.875}, {"start": 1730.57, "end": 1730.97, "word": " below", "probability": 0.89599609375}, {"start": 1730.97, "end": 1731.43, "word": " 1", "probability": 0.95263671875}, {"start": 1731.43, "end": 1732.13, "word": ".6", "probability": 0.8583984375}, {"start": 1732.13, "end": 1733.27, "word": " 96", "probability": 0.210205078125}, {"start": 1733.27, "end": 1734.57, "word": " 975", "probability": 0.79052734375}, {"start": 1734.57, "end": 1738.05, "word": " so", "probability": 0.64111328125}, {"start": 1738.05, "end": 1738.29, "word": " it's", "probability": 0.914794921875}, {"start": 1738.29, "end": 1738.51, "word": " here", "probability": 0.6171875}, {"start": 1738.51, "end": 1739.01, "word": " it", "probability": 0.6953125}, {"start": 1739.01, "end": 1739.45, "word": " mentioned", "probability": 0.734375}, {"start": 1739.45, "end": 1739.81, "word": " that", "probability": 0.943359375}, {"start": 1739.81, "end": 1740.03, "word": " it's", "probability": 0.87060546875}, {"start": 1740.03, "end": 1740.33, "word": " 0", "probability": 0.492919921875}, {"start": 1740.33, "end": 1740.79, "word": ".4", "probability": 0.994140625}, {"start": 1740.79, "end": 1741.53, "word": " so", "probability": 0.873046875}, {"start": 1741.53, "end": 1741.81, "word": " so", "probability": 0.386962890625}, {"start": 1741.81, "end": 1742.03, "word": " this", "probability": 0.95068359375}, {"start": 1742.03, "end": 1742.23, "word": " one", "probability": 0.93017578125}, {"start": 1742.23, "end": 1742.79, "word": " is", "probability": 0.9521484375}, {"start": 1742.79, "end": 1744.51, "word": " false", "probability": 0.892578125}, {"start": 1744.51, "end": 1744.87, "word": " or", "probability": 0.81787109375}], "temperature": 1.0}, {"id": 65, "seek": 177078, "start": 1747.42, "end": 1770.78, "text": " Because the area to the left of 1.96 is not 0.475, it's equal to 975. That's for 25. Let's do the odd numbers. The probability that standard normal, the random variable, falls between minus 2 and minus 0.44.", "tokens": [1436, 264, 1859, 281, 264, 1411, 295, 502, 13, 22962, 307, 406, 1958, 13, 19, 11901, 11, 309, 311, 2681, 281, 1722, 11901, 13, 663, 311, 337, 3552, 13, 961, 311, 360, 264, 7401, 3547, 13, 440, 8482, 300, 3832, 2710, 11, 264, 4974, 7006, 11, 8804, 1296, 3175, 568, 293, 3175, 1958, 13, 13912, 13], "avg_logprob": -0.1859923287441856, "compression_ratio": 1.3419354838709678, "no_speech_prob": 0.0, "words": [{"start": 1747.42, "end": 1747.72, "word": " Because", "probability": 0.306396484375}, {"start": 1747.72, "end": 1747.84, "word": " the", "probability": 0.86083984375}, {"start": 1747.84, "end": 1748.06, "word": " area", "probability": 0.8916015625}, {"start": 1748.06, "end": 1748.24, "word": " to", "probability": 0.94921875}, {"start": 1748.24, "end": 1748.36, "word": " the", "probability": 0.91845703125}, {"start": 1748.36, "end": 1748.54, "word": " left", "probability": 0.95361328125}, {"start": 1748.54, "end": 1748.68, "word": " of", "probability": 0.94921875}, {"start": 1748.68, "end": 1748.86, "word": " 1", "probability": 0.9345703125}, {"start": 1748.86, "end": 1749.44, "word": ".96", "probability": 0.97412109375}, {"start": 1749.44, "end": 1750.06, "word": " is", "probability": 0.89208984375}, {"start": 1750.06, "end": 1750.36, "word": " not", "probability": 0.9306640625}, {"start": 1750.36, "end": 1750.72, "word": " 0", "probability": 0.77783203125}, {"start": 1750.72, "end": 1751.32, "word": ".475,", "probability": 0.9215494791666666}, {"start": 1751.6, "end": 1751.88, "word": " it's", "probability": 0.886962890625}, {"start": 1751.88, "end": 1752.34, "word": " equal", "probability": 0.87939453125}, {"start": 1752.34, "end": 1753.0, "word": " to", "probability": 0.54052734375}, {"start": 1753.0, "end": 1753.4, "word": " 975.", "probability": 0.630126953125}, {"start": 1756.2, "end": 1756.88, "word": " That's", "probability": 0.91357421875}, {"start": 1756.88, "end": 1757.02, "word": " for", "probability": 0.8759765625}, {"start": 1757.02, "end": 1757.36, "word": " 25.", "probability": 0.50244140625}, {"start": 1759.88, "end": 1760.14, "word": " Let's", "probability": 0.859375}, {"start": 1760.14, "end": 1760.28, "word": " do", "probability": 0.68701171875}, {"start": 1760.28, "end": 1760.4, "word": " the", "probability": 0.88232421875}, {"start": 1760.4, "end": 1760.56, "word": " odd", "probability": 0.8349609375}, {"start": 1760.56, "end": 1761.0, "word": " numbers.", "probability": 0.89208984375}, {"start": 1764.44, "end": 1765.0, "word": " The", "probability": 0.875}, {"start": 1765.0, "end": 1765.46, "word": " probability", "probability": 0.94482421875}, {"start": 1765.46, "end": 1765.82, "word": " that", "probability": 0.91845703125}, {"start": 1765.82, "end": 1766.3, "word": " standard", "probability": 0.81884765625}, {"start": 1766.3, "end": 1766.68, "word": " normal,", "probability": 0.84619140625}, {"start": 1766.92, "end": 1767.02, "word": " the", "probability": 0.76416015625}, {"start": 1767.02, "end": 1767.36, "word": " random", "probability": 0.84375}, {"start": 1767.36, "end": 1767.78, "word": " variable,", "probability": 0.919921875}, {"start": 1768.32, "end": 1768.58, "word": " falls", "probability": 0.818359375}, {"start": 1768.58, "end": 1768.86, "word": " between", "probability": 0.87353515625}, {"start": 1768.86, "end": 1769.18, "word": " minus", "probability": 0.3671875}, {"start": 1769.18, "end": 1769.54, "word": " 2", "probability": 0.64794921875}, {"start": 1769.54, "end": 1769.82, "word": " and", "probability": 0.92919921875}, {"start": 1769.82, "end": 1770.2, "word": " minus", "probability": 0.97705078125}, {"start": 1770.2, "end": 1770.4, "word": " 0", "probability": 0.97607421875}, {"start": 1770.4, "end": 1770.78, "word": ".44.", "probability": 0.986328125}], "temperature": 1.0}, {"id": 66, "seek": 180750, "start": 1778.54, "end": 1807.5, "text": " I'm sorry minus between minus two and negative point four four so it's the same as z is smaller than negative point four four minus z less than minus two it says the answer is point six four seven two the exact answer is point three", "tokens": [286, 478, 2597, 3175, 1296, 3175, 732, 293, 3671, 935, 1451, 1451, 370, 309, 311, 264, 912, 382, 710, 307, 4356, 813, 3671, 935, 1451, 1451, 3175, 710, 1570, 813, 3175, 732, 309, 1619, 264, 1867, 307, 935, 2309, 1451, 3407, 732, 264, 1900, 1867, 307, 935, 1045], "avg_logprob": -0.1881377599677261, "compression_ratio": 1.7786259541984732, "no_speech_prob": 0.0, "words": [{"start": 1778.54, "end": 1778.82, "word": " I'm", "probability": 0.742431640625}, {"start": 1778.82, "end": 1779.02, "word": " sorry", "probability": 0.87451171875}, {"start": 1779.02, "end": 1779.48, "word": " minus", "probability": 0.50830078125}, {"start": 1779.48, "end": 1781.0, "word": " between", "probability": 0.5908203125}, {"start": 1781.0, "end": 1781.38, "word": " minus", "probability": 0.86767578125}, {"start": 1781.38, "end": 1781.72, "word": " two", "probability": 0.61279296875}, {"start": 1781.72, "end": 1782.46, "word": " and", "probability": 0.8740234375}, {"start": 1782.46, "end": 1782.76, "word": " negative", "probability": 0.94384765625}, {"start": 1782.76, "end": 1783.06, "word": " point", "probability": 0.5361328125}, {"start": 1783.06, "end": 1783.3, "word": " four", "probability": 0.8359375}, {"start": 1783.3, "end": 1783.64, "word": " four", "probability": 0.82666015625}, {"start": 1783.64, "end": 1784.6, "word": " so", "probability": 0.291748046875}, {"start": 1784.6, "end": 1784.8, "word": " it's", "probability": 0.85498046875}, {"start": 1784.8, "end": 1784.92, "word": " the", "probability": 0.9189453125}, {"start": 1784.92, "end": 1785.12, "word": " same", "probability": 0.9013671875}, {"start": 1785.12, "end": 1785.54, "word": " as", "probability": 0.95263671875}, {"start": 1785.54, "end": 1786.52, "word": " z", "probability": 0.8076171875}, {"start": 1786.52, "end": 1786.7, "word": " is", "probability": 0.468017578125}, {"start": 1786.7, "end": 1786.98, "word": " smaller", "probability": 0.890625}, {"start": 1786.98, "end": 1787.26, "word": " than", "probability": 0.9326171875}, {"start": 1787.26, "end": 1787.6, "word": " negative", "probability": 0.9365234375}, {"start": 1787.6, "end": 1787.88, "word": " point", "probability": 0.71630859375}, {"start": 1787.88, "end": 1788.12, "word": " four", "probability": 0.89306640625}, {"start": 1788.12, "end": 1788.5, "word": " four", "probability": 0.89794921875}, {"start": 1788.5, "end": 1789.92, "word": " minus", "probability": 0.96142578125}, {"start": 1789.92, "end": 1790.3, "word": " z", "probability": 0.876953125}, {"start": 1790.3, "end": 1791.16, "word": " less", "probability": 0.83251953125}, {"start": 1791.16, "end": 1791.34, "word": " than", "probability": 0.93017578125}, {"start": 1791.34, "end": 1791.62, "word": " minus", "probability": 0.98046875}, {"start": 1791.62, "end": 1791.88, "word": " two", "probability": 0.9130859375}, {"start": 1791.88, "end": 1795.58, "word": " it", "probability": 0.78662109375}, {"start": 1795.58, "end": 1796.04, "word": " says", "probability": 0.8798828125}, {"start": 1796.04, "end": 1796.32, "word": " the", "probability": 0.91943359375}, {"start": 1796.32, "end": 1796.7, "word": " answer", "probability": 0.95654296875}, {"start": 1796.7, "end": 1797.0, "word": " is", "probability": 0.94775390625}, {"start": 1797.0, "end": 1797.28, "word": " point", "probability": 0.89892578125}, {"start": 1797.28, "end": 1797.66, "word": " six", "probability": 0.94580078125}, {"start": 1797.66, "end": 1797.9, "word": " four", "probability": 0.95166015625}, {"start": 1797.9, "end": 1798.14, "word": " seven", "probability": 0.90185546875}, {"start": 1798.14, "end": 1798.48, "word": " two", "probability": 0.94482421875}, {"start": 1798.48, "end": 1800.52, "word": " the", "probability": 0.8017578125}, {"start": 1800.52, "end": 1800.96, "word": " exact", "probability": 0.9482421875}, {"start": 1800.96, "end": 1801.56, "word": " answer", "probability": 0.9609375}, {"start": 1801.56, "end": 1804.0, "word": " is", "probability": 0.87939453125}, {"start": 1804.0, "end": 1807.18, "word": " point", "probability": 0.94482421875}, {"start": 1807.18, "end": 1807.5, "word": " three", "probability": 0.94580078125}], "temperature": 1.0}, {"id": 67, "seek": 183103, "start": 1812.81, "end": 1831.03, "text": " So that one is incorrect, 27 is incorrect. You may figure this one by using the table or sometimes by Excel you can do this problem. Let's do different one.", "tokens": [407, 300, 472, 307, 18424, 11, 7634, 307, 18424, 13, 509, 815, 2573, 341, 472, 538, 1228, 264, 3199, 420, 2171, 538, 19060, 291, 393, 360, 341, 1154, 13, 961, 311, 360, 819, 472, 13], "avg_logprob": -0.24175347139437994, "compression_ratio": 1.2868852459016393, "no_speech_prob": 0.0, "words": [{"start": 1812.81, "end": 1813.53, "word": " So", "probability": 0.22216796875}, {"start": 1813.53, "end": 1814.25, "word": " that", "probability": 0.32666015625}, {"start": 1814.25, "end": 1814.41, "word": " one", "probability": 0.8876953125}, {"start": 1814.41, "end": 1814.99, "word": " is", "probability": 0.9072265625}, {"start": 1814.99, "end": 1817.89, "word": " incorrect,", "probability": 0.92919921875}, {"start": 1818.01, "end": 1818.41, "word": " 27", "probability": 0.71533203125}, {"start": 1818.41, "end": 1819.57, "word": " is", "probability": 0.908203125}, {"start": 1819.57, "end": 1820.11, "word": " incorrect.", "probability": 0.92431640625}, {"start": 1821.05, "end": 1821.29, "word": " You", "probability": 0.9453125}, {"start": 1821.29, "end": 1821.45, "word": " may", "probability": 0.9453125}, {"start": 1821.45, "end": 1821.75, "word": " figure", "probability": 0.97509765625}, {"start": 1821.75, "end": 1822.05, "word": " this", "probability": 0.9169921875}, {"start": 1822.05, "end": 1822.25, "word": " one", "probability": 0.92333984375}, {"start": 1822.25, "end": 1822.43, "word": " by", "probability": 0.92578125}, {"start": 1822.43, "end": 1822.75, "word": " using", "probability": 0.94189453125}, {"start": 1822.75, "end": 1823.03, "word": " the", "probability": 0.9033203125}, {"start": 1823.03, "end": 1823.61, "word": " table", "probability": 0.88525390625}, {"start": 1823.61, "end": 1824.69, "word": " or", "probability": 0.59423828125}, {"start": 1824.69, "end": 1825.23, "word": " sometimes", "probability": 0.93603515625}, {"start": 1825.23, "end": 1825.77, "word": " by", "probability": 0.92626953125}, {"start": 1825.77, "end": 1826.09, "word": " Excel", "probability": 0.744140625}, {"start": 1826.09, "end": 1826.27, "word": " you", "probability": 0.84814453125}, {"start": 1826.27, "end": 1826.49, "word": " can", "probability": 0.9462890625}, {"start": 1826.49, "end": 1826.79, "word": " do", "probability": 0.95947265625}, {"start": 1826.79, "end": 1828.29, "word": " this", "probability": 0.9423828125}, {"start": 1828.29, "end": 1828.71, "word": " problem.", "probability": 0.884765625}, {"start": 1829.29, "end": 1829.59, "word": " Let's", "probability": 0.908203125}, {"start": 1829.59, "end": 1829.87, "word": " do", "probability": 0.95361328125}, {"start": 1829.87, "end": 1830.71, "word": " different", "probability": 0.79736328125}, {"start": 1830.71, "end": 1831.03, "word": " one.", "probability": 0.36279296875}], "temperature": 1.0}, {"id": 68, "seek": 186830, "start": 1840.3, "end": 1868.3, "text": " Look at 29, the odd number, 29. 29 says that a worker earns 15 dollars per hour at planet earth and is told that only 2.5 percent of all workers make a higher wage if the wage is assumed to be normally distributed.", "tokens": [2053, 412, 9413, 11, 264, 7401, 1230, 11, 9413, 13, 9413, 1619, 300, 257, 11346, 46936, 2119, 3808, 680, 1773, 412, 5054, 4120, 293, 307, 1907, 300, 787, 568, 13, 20, 3043, 295, 439, 5600, 652, 257, 2946, 15444, 498, 264, 15444, 307, 15895, 281, 312, 5646, 12631, 13], "avg_logprob": -0.25062501192092895, "compression_ratio": 1.3870967741935485, "no_speech_prob": 0.0, "words": [{"start": 1840.3, "end": 1840.72, "word": " Look", "probability": 0.611328125}, {"start": 1840.72, "end": 1840.9, "word": " at", "probability": 0.96630859375}, {"start": 1840.9, "end": 1841.32, "word": " 29,", "probability": 0.65625}, {"start": 1842.16, "end": 1842.36, "word": " the", "probability": 0.88671875}, {"start": 1842.36, "end": 1842.54, "word": " odd", "probability": 0.943359375}, {"start": 1842.54, "end": 1842.86, "word": " number,", "probability": 0.9384765625}, {"start": 1842.96, "end": 1843.36, "word": " 29.", "probability": 0.91845703125}, {"start": 1846.2, "end": 1846.64, "word": " 29", "probability": 0.83740234375}, {"start": 1846.64, "end": 1847.26, "word": " says", "probability": 0.88525390625}, {"start": 1847.26, "end": 1847.58, "word": " that", "probability": 0.91357421875}, {"start": 1847.58, "end": 1848.68, "word": " a", "probability": 0.86328125}, {"start": 1848.68, "end": 1849.14, "word": " worker", "probability": 0.908203125}, {"start": 1849.14, "end": 1851.52, "word": " earns", "probability": 0.86279296875}, {"start": 1851.52, "end": 1852.52, "word": " 15", "probability": 0.46484375}, {"start": 1852.52, "end": 1852.88, "word": " dollars", "probability": 0.7666015625}, {"start": 1852.88, "end": 1853.16, "word": " per", "probability": 0.87939453125}, {"start": 1853.16, "end": 1853.66, "word": " hour", "probability": 0.9404296875}, {"start": 1853.66, "end": 1856.64, "word": " at", "probability": 0.456298828125}, {"start": 1856.64, "end": 1857.34, "word": " planet", "probability": 0.1552734375}, {"start": 1857.34, "end": 1857.56, "word": " earth", "probability": 0.62158203125}, {"start": 1857.56, "end": 1857.82, "word": " and", "probability": 0.59765625}, {"start": 1857.82, "end": 1858.08, "word": " is", "probability": 0.529296875}, {"start": 1858.08, "end": 1858.2, "word": " told", "probability": 0.9814453125}, {"start": 1858.2, "end": 1858.56, "word": " that", "probability": 0.93603515625}, {"start": 1858.56, "end": 1858.9, "word": " only", "probability": 0.9091796875}, {"start": 1858.9, "end": 1859.14, "word": " 2", "probability": 0.97998046875}, {"start": 1859.14, "end": 1859.62, "word": ".5", "probability": 0.983642578125}, {"start": 1859.62, "end": 1860.12, "word": " percent", "probability": 0.52294921875}, {"start": 1860.12, "end": 1861.14, "word": " of", "probability": 0.97021484375}, {"start": 1861.14, "end": 1861.42, "word": " all", "probability": 0.9462890625}, {"start": 1861.42, "end": 1862.02, "word": " workers", "probability": 0.9169921875}, {"start": 1862.02, "end": 1862.6, "word": " make", "probability": 0.9189453125}, {"start": 1862.6, "end": 1862.88, "word": " a", "probability": 0.98583984375}, {"start": 1862.88, "end": 1863.22, "word": " higher", "probability": 0.9287109375}, {"start": 1863.22, "end": 1863.66, "word": " wage", "probability": 0.92578125}, {"start": 1863.66, "end": 1865.72, "word": " if", "probability": 0.403076171875}, {"start": 1865.72, "end": 1865.9, "word": " the", "probability": 0.91259765625}, {"start": 1865.9, "end": 1866.1, "word": " wage", "probability": 0.7216796875}, {"start": 1866.1, "end": 1866.3, "word": " is", "probability": 0.927734375}, {"start": 1866.3, "end": 1866.64, "word": " assumed", "probability": 0.89453125}, {"start": 1866.64, "end": 1866.8, "word": " to", "probability": 0.96142578125}, {"start": 1866.8, "end": 1866.96, "word": " be", "probability": 0.92138671875}, {"start": 1866.96, "end": 1867.74, "word": " normally", "probability": 0.55615234375}, {"start": 1867.74, "end": 1868.3, "word": " distributed.", "probability": 0.9365234375}], "temperature": 1.0}, {"id": 69, "seek": 189542, "start": 1869.38, "end": 1895.42, "text": " And the standard deviation of wage rates is five per hour. So the standard deviation is five per hour, five dollars per hour. The average wage for the plant is 75. Now again, go back to the problem. It says that a worker earns 15 dollars per hour.", "tokens": [400, 264, 3832, 25163, 295, 15444, 6846, 307, 1732, 680, 1773, 13, 407, 264, 3832, 25163, 307, 1732, 680, 1773, 11, 1732, 3808, 680, 1773, 13, 440, 4274, 15444, 337, 264, 3709, 307, 9562, 13, 823, 797, 11, 352, 646, 281, 264, 1154, 13, 467, 1619, 300, 257, 11346, 46936, 2119, 3808, 680, 1773, 13], "avg_logprob": -0.1844308033053364, "compression_ratio": 1.6644295302013423, "no_speech_prob": 0.0, "words": [{"start": 1869.38, "end": 1869.72, "word": " And", "probability": 0.638671875}, {"start": 1869.72, "end": 1869.88, "word": " the", "probability": 0.86474609375}, {"start": 1869.88, "end": 1870.12, "word": " standard", "probability": 0.70751953125}, {"start": 1870.12, "end": 1871.74, "word": " deviation", "probability": 0.8759765625}, {"start": 1871.74, "end": 1872.04, "word": " of", "probability": 0.8779296875}, {"start": 1872.04, "end": 1872.36, "word": " wage", "probability": 0.880859375}, {"start": 1872.36, "end": 1872.92, "word": " rates", "probability": 0.94482421875}, {"start": 1872.92, "end": 1874.1, "word": " is", "probability": 0.93115234375}, {"start": 1874.1, "end": 1874.58, "word": " five", "probability": 0.5322265625}, {"start": 1874.58, "end": 1875.1, "word": " per", "probability": 0.634765625}, {"start": 1875.1, "end": 1875.36, "word": " hour.", "probability": 0.9599609375}, {"start": 1876.46, "end": 1877.06, "word": " So", "probability": 0.85400390625}, {"start": 1877.06, "end": 1877.5, "word": " the", "probability": 0.63720703125}, {"start": 1877.5, "end": 1877.78, "word": " standard", "probability": 0.958984375}, {"start": 1877.78, "end": 1878.2, "word": " deviation", "probability": 0.9326171875}, {"start": 1878.2, "end": 1880.9, "word": " is", "probability": 0.82666015625}, {"start": 1880.9, "end": 1881.18, "word": " five", "probability": 0.81982421875}, {"start": 1881.18, "end": 1881.38, "word": " per", "probability": 0.75}, {"start": 1881.38, "end": 1881.6, "word": " hour,", "probability": 0.9453125}, {"start": 1881.84, "end": 1882.1, "word": " five", "probability": 0.5849609375}, {"start": 1882.1, "end": 1882.36, "word": " dollars", "probability": 0.96142578125}, {"start": 1882.36, "end": 1882.54, "word": " per", "probability": 0.939453125}, {"start": 1882.54, "end": 1882.74, "word": " hour.", "probability": 0.93896484375}, {"start": 1883.24, "end": 1883.74, "word": " The", "probability": 0.84765625}, {"start": 1883.74, "end": 1884.24, "word": " average", "probability": 0.787109375}, {"start": 1884.24, "end": 1884.9, "word": " wage", "probability": 0.9228515625}, {"start": 1884.9, "end": 1885.3, "word": " for", "probability": 0.94482421875}, {"start": 1885.3, "end": 1885.48, "word": " the", "probability": 0.92626953125}, {"start": 1885.48, "end": 1885.78, "word": " plant", "probability": 0.9296875}, {"start": 1885.78, "end": 1886.12, "word": " is", "probability": 0.951171875}, {"start": 1886.12, "end": 1887.0, "word": " 75.", "probability": 0.5849609375}, {"start": 1890.62, "end": 1891.22, "word": " Now", "probability": 0.9541015625}, {"start": 1891.22, "end": 1891.46, "word": " again,", "probability": 0.71630859375}, {"start": 1891.52, "end": 1891.6, "word": " go", "probability": 0.884765625}, {"start": 1891.6, "end": 1891.78, "word": " back", "probability": 0.8740234375}, {"start": 1891.78, "end": 1891.9, "word": " to", "probability": 0.966796875}, {"start": 1891.9, "end": 1892.02, "word": " the", "probability": 0.91796875}, {"start": 1892.02, "end": 1892.24, "word": " problem.", "probability": 0.919921875}, {"start": 1892.4, "end": 1892.46, "word": " It", "probability": 0.96044921875}, {"start": 1892.46, "end": 1892.66, "word": " says", "probability": 0.8798828125}, {"start": 1892.66, "end": 1893.08, "word": " that", "probability": 0.92236328125}, {"start": 1893.08, "end": 1893.56, "word": " a", "probability": 0.9267578125}, {"start": 1893.56, "end": 1893.88, "word": " worker", "probability": 0.91650390625}, {"start": 1893.88, "end": 1894.3, "word": " earns", "probability": 0.9013671875}, {"start": 1894.3, "end": 1894.7, "word": " 15", "probability": 0.6201171875}, {"start": 1894.7, "end": 1895.0, "word": " dollars", "probability": 0.95556640625}, {"start": 1895.0, "end": 1895.18, "word": " per", "probability": 0.9462890625}, {"start": 1895.18, "end": 1895.42, "word": " hour.", "probability": 0.92822265625}], "temperature": 1.0}, {"id": 70, "seek": 192271, "start": 1896.51, "end": 1922.71, "text": " And it's told that only 2.5% of all workers make a higher wage. So it's X more than $15 equal 2.5%. That means zero to five. So let's see if this one is true or false. So again, this man,", "tokens": [400, 309, 311, 1907, 300, 787, 568, 13, 20, 4, 295, 439, 5600, 652, 257, 2946, 15444, 13, 407, 309, 311, 1783, 544, 813, 1848, 5211, 2681, 568, 13, 20, 6856, 663, 1355, 4018, 281, 1732, 13, 407, 718, 311, 536, 498, 341, 472, 307, 2074, 420, 7908, 13, 407, 797, 11, 341, 587, 11], "avg_logprob": -0.2041015654270138, "compression_ratio": 1.323943661971831, "no_speech_prob": 0.0, "words": [{"start": 1896.51, "end": 1896.91, "word": " And", "probability": 0.29248046875}, {"start": 1896.91, "end": 1897.21, "word": " it's", "probability": 0.810546875}, {"start": 1897.21, "end": 1897.39, "word": " told", "probability": 0.93603515625}, {"start": 1897.39, "end": 1897.81, "word": " that", "probability": 0.9345703125}, {"start": 1897.81, "end": 1898.89, "word": " only", "probability": 0.8779296875}, {"start": 1898.89, "end": 1899.15, "word": " 2", "probability": 0.9677734375}, {"start": 1899.15, "end": 1899.53, "word": ".5", "probability": 0.995361328125}, {"start": 1899.53, "end": 1899.85, "word": "%", "probability": 0.7734375}, {"start": 1899.85, "end": 1900.05, "word": " of", "probability": 0.9609375}, {"start": 1900.05, "end": 1900.29, "word": " all", "probability": 0.94482421875}, {"start": 1900.29, "end": 1900.73, "word": " workers", "probability": 0.93212890625}, {"start": 1900.73, "end": 1901.07, "word": " make", "probability": 0.90771484375}, {"start": 1901.07, "end": 1901.25, "word": " a", "probability": 0.96875}, {"start": 1901.25, "end": 1901.55, "word": " higher", "probability": 0.9248046875}, {"start": 1901.55, "end": 1901.99, "word": " wage.", "probability": 0.93408203125}, {"start": 1903.15, "end": 1903.27, "word": " So", "probability": 0.85498046875}, {"start": 1903.27, "end": 1904.33, "word": " it's", "probability": 0.802978515625}, {"start": 1904.33, "end": 1906.35, "word": " X", "probability": 0.50537109375}, {"start": 1906.35, "end": 1907.55, "word": " more", "probability": 0.83984375}, {"start": 1907.55, "end": 1908.07, "word": " than", "probability": 0.9443359375}, {"start": 1908.07, "end": 1910.27, "word": " $15", "probability": 0.73974609375}, {"start": 1910.27, "end": 1912.23, "word": " equal", "probability": 0.2476806640625}, {"start": 1912.23, "end": 1913.75, "word": " 2", "probability": 0.6005859375}, {"start": 1913.75, "end": 1914.63, "word": ".5%.", "probability": 0.8702799479166666}, {"start": 1914.63, "end": 1914.93, "word": " That", "probability": 0.90478515625}, {"start": 1914.93, "end": 1915.33, "word": " means", "probability": 0.9404296875}, {"start": 1915.33, "end": 1916.51, "word": " zero", "probability": 0.53271484375}, {"start": 1916.51, "end": 1916.67, "word": " to", "probability": 0.5810546875}, {"start": 1916.67, "end": 1916.85, "word": " five.", "probability": 0.498046875}, {"start": 1917.27, "end": 1917.57, "word": " So", "probability": 0.9443359375}, {"start": 1917.57, "end": 1917.83, "word": " let's", "probability": 0.9453125}, {"start": 1917.83, "end": 1918.03, "word": " see", "probability": 0.92138671875}, {"start": 1918.03, "end": 1918.27, "word": " if", "probability": 0.9189453125}, {"start": 1918.27, "end": 1918.61, "word": " this", "probability": 0.9443359375}, {"start": 1918.61, "end": 1918.83, "word": " one", "probability": 0.92578125}, {"start": 1918.83, "end": 1919.05, "word": " is", "probability": 0.94384765625}, {"start": 1919.05, "end": 1919.31, "word": " true", "probability": 0.96240234375}, {"start": 1919.31, "end": 1919.49, "word": " or", "probability": 0.958984375}, {"start": 1919.49, "end": 1919.85, "word": " false.", "probability": 0.91259765625}, {"start": 1920.67, "end": 1920.95, "word": " So", "probability": 0.95068359375}, {"start": 1920.95, "end": 1921.23, "word": " again,", "probability": 0.8876953125}, {"start": 1922.05, "end": 1922.41, "word": " this", "probability": 0.9462890625}, {"start": 1922.41, "end": 1922.71, "word": " man,", "probability": 0.931640625}], "temperature": 1.0}, {"id": 71, "seek": 195114, "start": 1923.28, "end": 1951.14, "text": " earns $15 per hour at a plant. And he's told that only 2.5% of all workers make higher wage, means greater than the one he just got, which is $15. So people who make greater than 15, they claim it's 2.5%. So let's see if that percentage is true or false.", "tokens": [46936, 1848, 5211, 680, 1773, 412, 257, 3709, 13, 400, 415, 311, 1907, 300, 787, 568, 13, 20, 4, 295, 439, 5600, 652, 2946, 15444, 11, 1355, 5044, 813, 264, 472, 415, 445, 658, 11, 597, 307, 1848, 5211, 13, 407, 561, 567, 652, 5044, 813, 2119, 11, 436, 3932, 309, 311, 568, 13, 20, 6856, 407, 718, 311, 536, 498, 300, 9668, 307, 2074, 420, 7908, 13], "avg_logprob": -0.22056159161139224, "compression_ratio": 1.4488636363636365, "no_speech_prob": 0.0, "words": [{"start": 1923.28, "end": 1923.78, "word": " earns", "probability": 0.447998046875}, {"start": 1923.78, "end": 1924.18, "word": " $15", "probability": 0.734130859375}, {"start": 1924.18, "end": 1924.64, "word": " per", "probability": 0.81640625}, {"start": 1924.64, "end": 1925.02, "word": " hour", "probability": 0.94189453125}, {"start": 1925.02, "end": 1925.7, "word": " at", "probability": 0.73876953125}, {"start": 1925.7, "end": 1925.82, "word": " a", "probability": 0.56494140625}, {"start": 1925.82, "end": 1926.1, "word": " plant.", "probability": 0.89599609375}, {"start": 1926.84, "end": 1927.2, "word": " And", "probability": 0.9111328125}, {"start": 1927.2, "end": 1927.68, "word": " he's", "probability": 0.842529296875}, {"start": 1927.68, "end": 1927.96, "word": " told", "probability": 0.970703125}, {"start": 1927.96, "end": 1928.32, "word": " that", "probability": 0.89453125}, {"start": 1928.32, "end": 1929.86, "word": " only", "probability": 0.87890625}, {"start": 1929.86, "end": 1930.26, "word": " 2", "probability": 0.9755859375}, {"start": 1930.26, "end": 1930.72, "word": ".5", "probability": 0.99658203125}, {"start": 1930.72, "end": 1931.0, "word": "%", "probability": 0.87939453125}, {"start": 1931.0, "end": 1931.22, "word": " of", "probability": 0.96484375}, {"start": 1931.22, "end": 1931.5, "word": " all", "probability": 0.951171875}, {"start": 1931.5, "end": 1932.06, "word": " workers", "probability": 0.92578125}, {"start": 1932.06, "end": 1933.98, "word": " make", "probability": 0.92919921875}, {"start": 1933.98, "end": 1934.48, "word": " higher", "probability": 0.63916015625}, {"start": 1934.48, "end": 1934.9, "word": " wage,", "probability": 0.9033203125}, {"start": 1935.98, "end": 1936.36, "word": " means", "probability": 0.72265625}, {"start": 1936.36, "end": 1936.74, "word": " greater", "probability": 0.9052734375}, {"start": 1936.74, "end": 1937.14, "word": " than", "probability": 0.9482421875}, {"start": 1937.14, "end": 1937.84, "word": " the", "probability": 0.9208984375}, {"start": 1937.84, "end": 1938.0, "word": " one", "probability": 0.93603515625}, {"start": 1938.0, "end": 1938.12, "word": " he", "probability": 0.96533203125}, {"start": 1938.12, "end": 1938.4, "word": " just", "probability": 0.9169921875}, {"start": 1938.4, "end": 1938.74, "word": " got,", "probability": 0.90673828125}, {"start": 1939.04, "end": 1939.32, "word": " which", "probability": 0.7744140625}, {"start": 1939.32, "end": 1939.36, "word": " is", "probability": 0.9228515625}, {"start": 1939.36, "end": 1939.68, "word": " $15.", "probability": 0.99169921875}, {"start": 1940.18, "end": 1940.66, "word": " So", "probability": 0.94580078125}, {"start": 1940.66, "end": 1940.86, "word": " people", "probability": 0.1903076171875}, {"start": 1940.86, "end": 1941.06, "word": " who", "probability": 0.260498046875}, {"start": 1941.06, "end": 1941.1, "word": " make", "probability": 0.4541015625}, {"start": 1941.1, "end": 1941.46, "word": " greater", "probability": 0.90234375}, {"start": 1941.46, "end": 1941.7, "word": " than", "probability": 0.943359375}, {"start": 1941.7, "end": 1942.1, "word": " 15,", "probability": 0.794921875}, {"start": 1943.4, "end": 1943.72, "word": " they", "probability": 0.76953125}, {"start": 1943.72, "end": 1944.3, "word": " claim", "probability": 0.93359375}, {"start": 1944.3, "end": 1944.7, "word": " it's", "probability": 0.488525390625}, {"start": 1944.7, "end": 1944.88, "word": " 2", "probability": 0.99072265625}, {"start": 1944.88, "end": 1945.62, "word": ".5%.", "probability": 0.9921875}, {"start": 1945.62, "end": 1946.36, "word": " So", "probability": 0.9619140625}, {"start": 1946.36, "end": 1946.6, "word": " let's", "probability": 0.955810546875}, {"start": 1946.6, "end": 1946.82, "word": " see", "probability": 0.9228515625}, {"start": 1946.82, "end": 1947.14, "word": " if", "probability": 0.94921875}, {"start": 1947.14, "end": 1947.8, "word": " that", "probability": 0.9375}, {"start": 1947.8, "end": 1949.7, "word": " percentage", "probability": 0.89306640625}, {"start": 1949.7, "end": 1950.44, "word": " is", "probability": 0.92626953125}, {"start": 1950.44, "end": 1950.68, "word": " true", "probability": 0.94091796875}, {"start": 1950.68, "end": 1950.88, "word": " or", "probability": 0.96533203125}, {"start": 1950.88, "end": 1951.14, "word": " false.", "probability": 0.91845703125}], "temperature": 1.0}, {"id": 72, "seek": 197385, "start": 1951.49, "end": 1973.85, "text": " So in this case, we have to convert to this score. So it becomes B of z greater than 15 minus the mean divided by sigma. So B of z greater than 7.5 divided by 5 is 1.5.", "tokens": [407, 294, 341, 1389, 11, 321, 362, 281, 7620, 281, 341, 6175, 13, 407, 309, 3643, 363, 295, 710, 5044, 813, 2119, 3175, 264, 914, 6666, 538, 12771, 13, 407, 363, 295, 710, 5044, 813, 1614, 13, 20, 6666, 538, 1025, 307, 502, 13, 20, 13], "avg_logprob": -0.1490192787444338, "compression_ratio": 1.4083333333333334, "no_speech_prob": 0.0, "words": [{"start": 1951.49, "end": 1951.75, "word": " So", "probability": 0.8349609375}, {"start": 1951.75, "end": 1951.89, "word": " in", "probability": 0.64013671875}, {"start": 1951.89, "end": 1952.01, "word": " this", "probability": 0.9453125}, {"start": 1952.01, "end": 1952.23, "word": " case,", "probability": 0.92041015625}, {"start": 1952.29, "end": 1952.39, "word": " we", "probability": 0.95947265625}, {"start": 1952.39, "end": 1952.63, "word": " have", "probability": 0.94677734375}, {"start": 1952.63, "end": 1952.85, "word": " to", "probability": 0.9677734375}, {"start": 1952.85, "end": 1953.41, "word": " convert", "probability": 0.90771484375}, {"start": 1953.41, "end": 1955.43, "word": " to", "probability": 0.6669921875}, {"start": 1955.43, "end": 1955.65, "word": " this", "probability": 0.71875}, {"start": 1955.65, "end": 1956.09, "word": " score.", "probability": 0.68408203125}, {"start": 1956.39, "end": 1956.59, "word": " So", "probability": 0.95556640625}, {"start": 1956.59, "end": 1956.71, "word": " it", "probability": 0.90185546875}, {"start": 1956.71, "end": 1957.21, "word": " becomes", "probability": 0.88525390625}, {"start": 1957.21, "end": 1958.07, "word": " B", "probability": 0.60986328125}, {"start": 1958.07, "end": 1959.01, "word": " of", "probability": 0.9599609375}, {"start": 1959.01, "end": 1959.27, "word": " z", "probability": 0.494873046875}, {"start": 1959.27, "end": 1959.85, "word": " greater", "probability": 0.8896484375}, {"start": 1959.85, "end": 1960.35, "word": " than", "probability": 0.95068359375}, {"start": 1960.35, "end": 1961.33, "word": " 15", "probability": 0.9345703125}, {"start": 1961.33, "end": 1961.73, "word": " minus", "probability": 0.98828125}, {"start": 1961.73, "end": 1961.95, "word": " the", "probability": 0.91748046875}, {"start": 1961.95, "end": 1962.17, "word": " mean", "probability": 0.96337890625}, {"start": 1962.17, "end": 1963.61, "word": " divided", "probability": 0.473388671875}, {"start": 1963.61, "end": 1963.85, "word": " by", "probability": 0.9775390625}, {"start": 1963.85, "end": 1964.17, "word": " sigma.", "probability": 0.87841796875}, {"start": 1965.69, "end": 1965.95, "word": " So", "probability": 0.9580078125}, {"start": 1965.95, "end": 1966.15, "word": " B", "probability": 0.908203125}, {"start": 1966.15, "end": 1966.31, "word": " of", "probability": 0.97119140625}, {"start": 1966.31, "end": 1966.59, "word": " z", "probability": 0.9638671875}, {"start": 1966.59, "end": 1967.79, "word": " greater", "probability": 0.77490234375}, {"start": 1967.79, "end": 1968.31, "word": " than", "probability": 0.94921875}, {"start": 1968.31, "end": 1969.17, "word": " 7", "probability": 0.9794921875}, {"start": 1969.17, "end": 1969.75, "word": ".5", "probability": 0.996826171875}, {"start": 1969.75, "end": 1970.05, "word": " divided", "probability": 0.8330078125}, {"start": 1970.05, "end": 1970.27, "word": " by", "probability": 0.96630859375}, {"start": 1970.27, "end": 1970.69, "word": " 5", "probability": 0.9560546875}, {"start": 1970.69, "end": 1971.11, "word": " is", "probability": 0.9423828125}, {"start": 1971.11, "end": 1973.31, "word": " 1", "probability": 0.94921875}, {"start": 1973.31, "end": 1973.85, "word": ".5.", "probability": 0.998779296875}], "temperature": 1.0}, {"id": 73, "seek": 200743, "start": 1980.75, "end": 2007.43, "text": " Make sense? 1.5. So now B of Z greater than 1.5. 1 minus B of Z is less than 1.5. Go back to the table. 1.5 is? Look at the table. 9332. 9332. So the answer should be 668. That means", "tokens": [4387, 2020, 30, 502, 13, 20, 13, 407, 586, 363, 295, 1176, 5044, 813, 502, 13, 20, 13, 502, 3175, 363, 295, 1176, 307, 1570, 813, 502, 13, 20, 13, 1037, 646, 281, 264, 3199, 13, 502, 13, 20, 307, 30, 2053, 412, 264, 3199, 13, 1722, 10191, 17, 13, 1722, 10191, 17, 13, 407, 264, 1867, 820, 312, 21126, 23, 13, 663, 1355], "avg_logprob": -0.19759615384615384, "compression_ratio": 1.3759398496240602, "no_speech_prob": 0.0, "words": [{"start": 1980.75, "end": 1981.13, "word": " Make", "probability": 0.25}, {"start": 1981.13, "end": 1981.43, "word": " sense?", "probability": 0.7783203125}, {"start": 1981.75, "end": 1982.07, "word": " 1", "probability": 0.90380859375}, {"start": 1982.07, "end": 1982.57, "word": ".5.", "probability": 0.984619140625}, {"start": 1983.05, "end": 1983.39, "word": " So", "probability": 0.90966796875}, {"start": 1983.39, "end": 1983.69, "word": " now", "probability": 0.822265625}, {"start": 1983.69, "end": 1984.07, "word": " B", "probability": 0.36083984375}, {"start": 1984.07, "end": 1984.21, "word": " of", "probability": 0.87939453125}, {"start": 1984.21, "end": 1984.31, "word": " Z", "probability": 0.8662109375}, {"start": 1984.31, "end": 1984.65, "word": " greater", "probability": 0.79345703125}, {"start": 1984.65, "end": 1984.93, "word": " than", "probability": 0.93896484375}, {"start": 1984.93, "end": 1985.11, "word": " 1", "probability": 0.97216796875}, {"start": 1985.11, "end": 1985.57, "word": ".5.", "probability": 0.990234375}, {"start": 1985.69, "end": 1985.85, "word": " 1", "probability": 0.798828125}, {"start": 1985.85, "end": 1986.31, "word": " minus", "probability": 0.97021484375}, {"start": 1986.31, "end": 1987.43, "word": " B", "probability": 0.8994140625}, {"start": 1987.43, "end": 1987.57, "word": " of", "probability": 0.96142578125}, {"start": 1987.57, "end": 1987.69, "word": " Z", "probability": 0.974609375}, {"start": 1987.69, "end": 1987.85, "word": " is", "probability": 0.91455078125}, {"start": 1987.85, "end": 1988.09, "word": " less", "probability": 0.94482421875}, {"start": 1988.09, "end": 1988.29, "word": " than", "probability": 0.93310546875}, {"start": 1988.29, "end": 1988.49, "word": " 1", "probability": 0.98046875}, {"start": 1988.49, "end": 1988.95, "word": ".5.", "probability": 0.99462890625}, {"start": 1990.11, "end": 1990.75, "word": " Go", "probability": 0.94482421875}, {"start": 1990.75, "end": 1990.93, "word": " back", "probability": 0.87939453125}, {"start": 1990.93, "end": 1991.05, "word": " to", "probability": 0.966796875}, {"start": 1991.05, "end": 1991.17, "word": " the", "probability": 0.92578125}, {"start": 1991.17, "end": 1991.43, "word": " table.", "probability": 0.89111328125}, {"start": 1993.27, "end": 1993.81, "word": " 1", "probability": 0.86669921875}, {"start": 1993.81, "end": 1994.39, "word": ".5", "probability": 0.994384765625}, {"start": 1994.39, "end": 1994.93, "word": " is?", "probability": 0.94482421875}, {"start": 1996.95, "end": 1997.59, "word": " Look", "probability": 0.468994140625}, {"start": 1997.59, "end": 1997.93, "word": " at", "probability": 0.8828125}, {"start": 1997.93, "end": 1998.13, "word": " the", "probability": 0.8701171875}, {"start": 1998.13, "end": 1998.35, "word": " table.", "probability": 0.88623046875}, {"start": 1999.65, "end": 2000.29, "word": " 9332.", "probability": 0.8191731770833334}, {"start": 2002.61, "end": 2003.25, "word": " 9332.", "probability": 0.9500325520833334}, {"start": 2003.61, "end": 2003.87, "word": " So", "probability": 0.89990234375}, {"start": 2003.87, "end": 2004.09, "word": " the", "probability": 0.80859375}, {"start": 2004.09, "end": 2004.41, "word": " answer", "probability": 0.9541015625}, {"start": 2004.41, "end": 2004.77, "word": " should", "probability": 0.9658203125}, {"start": 2004.77, "end": 2005.09, "word": " be", "probability": 0.95361328125}, {"start": 2005.09, "end": 2006.31, "word": " 668.", "probability": 0.913818359375}, {"start": 2006.61, "end": 2007.03, "word": " That", "probability": 0.89990234375}, {"start": 2007.03, "end": 2007.43, "word": " means", "probability": 0.9326171875}], "temperature": 1.0}, {"id": 74, "seek": 203853, "start": 2011.13, "end": 2038.53, "text": " 6.68% of the workers have higher wage. Not 2.5%. So that means this is incorrect. So that's false. So the answer is false for this problem. Look at 31.", "tokens": [1386, 13, 27102, 4, 295, 264, 5600, 362, 2946, 15444, 13, 1726, 568, 13, 20, 6856, 407, 300, 1355, 341, 307, 18424, 13, 407, 300, 311, 7908, 13, 407, 264, 1867, 307, 7908, 337, 341, 1154, 13, 2053, 412, 10353, 13], "avg_logprob": -0.16899181795971735, "compression_ratio": 1.2666666666666666, "no_speech_prob": 0.0, "words": [{"start": 2011.13, "end": 2011.25, "word": " 6", "probability": 0.5908203125}, {"start": 2011.25, "end": 2012.05, "word": ".68", "probability": 0.973388671875}, {"start": 2012.05, "end": 2012.75, "word": "%", "probability": 0.763671875}, {"start": 2012.75, "end": 2014.05, "word": " of", "probability": 0.95361328125}, {"start": 2014.05, "end": 2014.31, "word": " the", "probability": 0.90625}, {"start": 2014.31, "end": 2014.91, "word": " workers", "probability": 0.92333984375}, {"start": 2014.91, "end": 2016.57, "word": " have", "probability": 0.8857421875}, {"start": 2016.57, "end": 2017.09, "word": " higher", "probability": 0.8681640625}, {"start": 2017.09, "end": 2017.53, "word": " wage.", "probability": 0.8515625}, {"start": 2018.93, "end": 2019.25, "word": " Not", "probability": 0.85546875}, {"start": 2019.25, "end": 2019.41, "word": " 2", "probability": 0.94384765625}, {"start": 2019.41, "end": 2021.75, "word": ".5%.", "probability": 0.82861328125}, {"start": 2021.75, "end": 2022.19, "word": " So", "probability": 0.91064453125}, {"start": 2022.19, "end": 2022.41, "word": " that", "probability": 0.83349609375}, {"start": 2022.41, "end": 2022.69, "word": " means", "probability": 0.9140625}, {"start": 2022.69, "end": 2023.35, "word": " this", "probability": 0.443359375}, {"start": 2023.35, "end": 2023.45, "word": " is", "probability": 0.8974609375}, {"start": 2023.45, "end": 2023.79, "word": " incorrect.", "probability": 0.93896484375}, {"start": 2024.09, "end": 2024.19, "word": " So", "probability": 0.91455078125}, {"start": 2024.19, "end": 2024.49, "word": " that's", "probability": 0.88037109375}, {"start": 2024.49, "end": 2024.77, "word": " false.", "probability": 0.9189453125}, {"start": 2029.47, "end": 2029.81, "word": " So", "probability": 0.81689453125}, {"start": 2029.81, "end": 2029.93, "word": " the", "probability": 0.86572265625}, {"start": 2029.93, "end": 2030.29, "word": " answer", "probability": 0.9560546875}, {"start": 2030.29, "end": 2031.01, "word": " is", "probability": 0.92529296875}, {"start": 2031.01, "end": 2031.85, "word": " false", "probability": 0.86572265625}, {"start": 2031.85, "end": 2032.73, "word": " for", "probability": 0.9326171875}, {"start": 2032.73, "end": 2033.13, "word": " this", "probability": 0.94775390625}, {"start": 2033.13, "end": 2033.83, "word": " problem.", "probability": 0.884765625}, {"start": 2037.31, "end": 2037.79, "word": " Look", "probability": 0.79638671875}, {"start": 2037.79, "end": 2037.97, "word": " at", "probability": 0.9599609375}, {"start": 2037.97, "end": 2038.53, "word": " 31.", "probability": 0.900390625}], "temperature": 1.0}, {"id": 75, "seek": 205823, "start": 2041.55, "end": 2058.23, "text": " Do you have any question for this one, for 29? In 29, the claim is 2.5% of all workers have higher wage than $15. Let's see if this claim is true or false.", "tokens": [1144, 291, 362, 604, 1168, 337, 341, 472, 11, 337, 9413, 30, 682, 9413, 11, 264, 3932, 307, 568, 13, 20, 4, 295, 439, 5600, 362, 2946, 15444, 813, 1848, 5211, 13, 961, 311, 536, 498, 341, 3932, 307, 2074, 420, 7908, 13], "avg_logprob": -0.14932528612288562, "compression_ratio": 1.2283464566929134, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 2041.55, "end": 2041.77, "word": " Do", "probability": 0.8056640625}, {"start": 2041.77, "end": 2041.81, "word": " you", "probability": 0.95361328125}, {"start": 2041.81, "end": 2041.91, "word": " have", "probability": 0.9501953125}, {"start": 2041.91, "end": 2042.05, "word": " any", "probability": 0.83837890625}, {"start": 2042.05, "end": 2042.31, "word": " question", "probability": 0.70947265625}, {"start": 2042.31, "end": 2042.53, "word": " for", "probability": 0.9296875}, {"start": 2042.53, "end": 2042.71, "word": " this", "probability": 0.9345703125}, {"start": 2042.71, "end": 2042.97, "word": " one,", "probability": 0.92919921875}, {"start": 2043.63, "end": 2043.89, "word": " for", "probability": 0.9150390625}, {"start": 2043.89, "end": 2044.17, "word": " 29?", "probability": 0.78466796875}, {"start": 2044.73, "end": 2044.97, "word": " In", "probability": 0.50537109375}, {"start": 2044.97, "end": 2045.37, "word": " 29,", "probability": 0.96484375}, {"start": 2046.33, "end": 2046.57, "word": " the", "probability": 0.8818359375}, {"start": 2046.57, "end": 2046.85, "word": " claim", "probability": 0.9189453125}, {"start": 2046.85, "end": 2047.15, "word": " is", "probability": 0.9423828125}, {"start": 2047.15, "end": 2048.47, "word": " 2", "probability": 0.81884765625}, {"start": 2048.47, "end": 2048.95, "word": ".5", "probability": 0.994140625}, {"start": 2048.95, "end": 2049.37, "word": "%", "probability": 0.7802734375}, {"start": 2049.37, "end": 2050.19, "word": " of", "probability": 0.966796875}, {"start": 2050.19, "end": 2050.49, "word": " all", "probability": 0.9462890625}, {"start": 2050.49, "end": 2051.03, "word": " workers", "probability": 0.9287109375}, {"start": 2051.03, "end": 2051.73, "word": " have", "probability": 0.9248046875}, {"start": 2051.73, "end": 2052.15, "word": " higher", "probability": 0.89404296875}, {"start": 2052.15, "end": 2052.55, "word": " wage", "probability": 0.8544921875}, {"start": 2052.55, "end": 2053.35, "word": " than", "probability": 0.9150390625}, {"start": 2053.35, "end": 2054.81, "word": " $15.", "probability": 0.703125}, {"start": 2055.83, "end": 2056.41, "word": " Let's", "probability": 0.964599609375}, {"start": 2056.41, "end": 2056.55, "word": " see", "probability": 0.91845703125}, {"start": 2056.55, "end": 2056.69, "word": " if", "probability": 0.9501953125}, {"start": 2056.69, "end": 2056.89, "word": " this", "probability": 0.93310546875}, {"start": 2056.89, "end": 2057.23, "word": " claim", "probability": 0.90185546875}, {"start": 2057.23, "end": 2057.49, "word": " is", "probability": 0.943359375}, {"start": 2057.49, "end": 2057.73, "word": " true", "probability": 0.96240234375}, {"start": 2057.73, "end": 2057.93, "word": " or", "probability": 0.94775390625}, {"start": 2057.93, "end": 2058.23, "word": " false.", "probability": 0.93408203125}], "temperature": 1.0}, {"id": 76, "seek": 208863, "start": 2058.93, "end": 2088.63, "text": " So the problem again is B probability of X greater than 15 equals 2.5%. We figure out that the answer is 6.68%. So the claim is false. Now, 31. Any set, any set of normality, oh I'm sorry, any set of normally distributed data can be transformed to its standardized form. True or false?", "tokens": [407, 264, 1154, 797, 307, 363, 8482, 295, 1783, 5044, 813, 2119, 6915, 568, 13, 20, 6856, 492, 2573, 484, 300, 264, 1867, 307, 1386, 13, 27102, 6856, 407, 264, 3932, 307, 7908, 13, 823, 11, 10353, 13, 2639, 992, 11, 604, 992, 295, 2026, 1860, 11, 1954, 286, 478, 2597, 11, 604, 992, 295, 5646, 12631, 1412, 393, 312, 16894, 281, 1080, 31677, 1254, 13, 13587, 420, 7908, 30], "avg_logprob": -0.2429577393431059, "compression_ratio": 1.43, "no_speech_prob": 0.0, "words": [{"start": 2058.93, "end": 2059.29, "word": " So", "probability": 0.8740234375}, {"start": 2059.29, "end": 2059.77, "word": " the", "probability": 0.5068359375}, {"start": 2059.77, "end": 2060.07, "word": " problem", "probability": 0.86376953125}, {"start": 2060.07, "end": 2060.39, "word": " again", "probability": 0.8544921875}, {"start": 2060.39, "end": 2060.61, "word": " is", "probability": 0.8828125}, {"start": 2060.61, "end": 2060.91, "word": " B", "probability": 0.2509765625}, {"start": 2060.91, "end": 2061.79, "word": " probability", "probability": 0.53125}, {"start": 2061.79, "end": 2062.01, "word": " of", "probability": 0.8408203125}, {"start": 2062.01, "end": 2062.19, "word": " X", "probability": 0.90185546875}, {"start": 2062.19, "end": 2062.51, "word": " greater", "probability": 0.888671875}, {"start": 2062.51, "end": 2062.75, "word": " than", "probability": 0.94189453125}, {"start": 2062.75, "end": 2063.13, "word": " 15", "probability": 0.87109375}, {"start": 2063.13, "end": 2063.69, "word": " equals", "probability": 0.92626953125}, {"start": 2063.69, "end": 2064.43, "word": " 2", "probability": 0.951171875}, {"start": 2064.43, "end": 2065.15, "word": ".5%.", "probability": 0.8860677083333334}, {"start": 2065.15, "end": 2065.87, "word": " We", "probability": 0.89306640625}, {"start": 2065.87, "end": 2066.17, "word": " figure", "probability": 0.7666015625}, {"start": 2066.17, "end": 2066.45, "word": " out", "probability": 0.86962890625}, {"start": 2066.45, "end": 2066.63, "word": " that", "probability": 0.43017578125}, {"start": 2066.63, "end": 2066.87, "word": " the", "probability": 0.91259765625}, {"start": 2066.87, "end": 2067.15, "word": " answer", "probability": 0.951171875}, {"start": 2067.15, "end": 2067.39, "word": " is", "probability": 0.94091796875}, {"start": 2067.39, "end": 2067.87, "word": " 6", "probability": 0.96826171875}, {"start": 2067.87, "end": 2069.79, "word": ".68%.", "probability": 0.8121744791666666}, {"start": 2069.79, "end": 2070.35, "word": " So", "probability": 0.96337890625}, {"start": 2070.35, "end": 2070.51, "word": " the", "probability": 0.81396484375}, {"start": 2070.51, "end": 2070.73, "word": " claim", "probability": 0.272216796875}, {"start": 2070.73, "end": 2070.95, "word": " is", "probability": 0.94140625}, {"start": 2070.95, "end": 2071.35, "word": " false.", "probability": 0.86279296875}, {"start": 2072.79, "end": 2073.25, "word": " Now,", "probability": 0.84033203125}, {"start": 2073.53, "end": 2073.97, "word": " 31.", "probability": 0.8232421875}, {"start": 2075.13, "end": 2075.43, "word": " Any", "probability": 0.87744140625}, {"start": 2075.43, "end": 2075.83, "word": " set,", "probability": 0.953125}, {"start": 2076.33, "end": 2076.99, "word": " any", "probability": 0.90283203125}, {"start": 2076.99, "end": 2077.47, "word": " set", "probability": 0.947265625}, {"start": 2077.47, "end": 2079.55, "word": " of", "probability": 0.83056640625}, {"start": 2079.55, "end": 2080.59, "word": " normality,", "probability": 0.866455078125}, {"start": 2080.95, "end": 2081.07, "word": " oh", "probability": 0.60205078125}, {"start": 2081.07, "end": 2081.25, "word": " I'm", "probability": 0.75830078125}, {"start": 2081.25, "end": 2081.43, "word": " sorry,", "probability": 0.84765625}, {"start": 2081.49, "end": 2081.69, "word": " any", "probability": 0.89404296875}, {"start": 2081.69, "end": 2081.93, "word": " set", "probability": 0.951171875}, {"start": 2081.93, "end": 2082.09, "word": " of", "probability": 0.9677734375}, {"start": 2082.09, "end": 2082.47, "word": " normally", "probability": 0.716796875}, {"start": 2082.47, "end": 2082.99, "word": " distributed", "probability": 0.90185546875}, {"start": 2082.99, "end": 2083.57, "word": " data", "probability": 0.94140625}, {"start": 2083.57, "end": 2084.39, "word": " can", "probability": 0.84765625}, {"start": 2084.39, "end": 2084.63, "word": " be", "probability": 0.95654296875}, {"start": 2084.63, "end": 2085.23, "word": " transformed", "probability": 0.841796875}, {"start": 2085.23, "end": 2085.45, "word": " to", "probability": 0.88037109375}, {"start": 2085.45, "end": 2085.75, "word": " its", "probability": 0.7822265625}, {"start": 2085.75, "end": 2086.57, "word": " standardized", "probability": 0.84130859375}, {"start": 2086.57, "end": 2087.13, "word": " form.", "probability": 0.884765625}, {"start": 2087.63, "end": 2088.25, "word": " True", "probability": 0.7724609375}, {"start": 2088.25, "end": 2088.43, "word": " or", "probability": 0.96484375}, {"start": 2088.43, "end": 2088.63, "word": " false?", "probability": 0.82568359375}], "temperature": 1.0}, {"id": 77, "seek": 211094, "start": 2090.693, "end": 2110.95, "text": " 31. It says that any set of normally distributed can be transformed. Let's see 32. The middle spread.", "tokens": [10353, 13, 467, 1619, 300, 604, 992, 295, 5646, 12631, 393, 312, 16894, 13, 961, 311, 536, 8858, 13, 440, 2808, 3974, 13], "avg_logprob": -0.3264973983168602, "compression_ratio": 1.0851063829787233, "no_speech_prob": 0.0, "words": [{"start": 2090.6, "end": 2091.19, "word": " 31.", "probability": 0.41845703125}, {"start": 2094.17, "end": 2095.35, "word": " It", "probability": 0.88818359375}, {"start": 2095.35, "end": 2095.71, "word": " says", "probability": 0.88720703125}, {"start": 2095.71, "end": 2096.11, "word": " that", "probability": 0.91455078125}, {"start": 2096.11, "end": 2097.05, "word": " any", "probability": 0.85302734375}, {"start": 2097.05, "end": 2097.75, "word": " set", "probability": 0.787109375}, {"start": 2097.75, "end": 2098.35, "word": " of", "probability": 0.96337890625}, {"start": 2098.35, "end": 2098.91, "word": " normally", "probability": 0.5283203125}, {"start": 2098.91, "end": 2099.45, "word": " distributed", "probability": 0.8525390625}, {"start": 2099.45, "end": 2099.87, "word": " can", "probability": 0.56103515625}, {"start": 2099.87, "end": 2100.05, "word": " be", "probability": 0.95068359375}, {"start": 2100.05, "end": 2100.73, "word": " transformed.", "probability": 0.86279296875}, {"start": 2106.63, "end": 2107.81, "word": " Let's", "probability": 0.60504150390625}, {"start": 2107.81, "end": 2107.93, "word": " see", "probability": 0.86767578125}, {"start": 2107.93, "end": 2108.55, "word": " 32.", "probability": 0.63623046875}, {"start": 2109.91, "end": 2110.13, "word": " The", "probability": 0.86962890625}, {"start": 2110.13, "end": 2110.37, "word": " middle", "probability": 0.78271484375}, {"start": 2110.37, "end": 2110.95, "word": " spread.", "probability": 0.56005859375}], "temperature": 1.0}, {"id": 78, "seek": 214230, "start": 2116.66, "end": 2142.3, "text": " That is the middle 50% of the normal distribution is equal to one standard deviation. That's true. It's incorrect. Because we mentioned that 68% of the data are within one standard deviation above the mean. Within the mean. So it's false.", "tokens": [663, 307, 264, 2808, 2625, 4, 295, 264, 2710, 7316, 307, 2681, 281, 472, 3832, 25163, 13, 663, 311, 2074, 13, 467, 311, 18424, 13, 1436, 321, 2835, 300, 23317, 4, 295, 264, 1412, 366, 1951, 472, 3832, 25163, 3673, 264, 914, 13, 15996, 264, 914, 13, 407, 309, 311, 7908, 13], "avg_logprob": -0.17216981019613878, "compression_ratio": 1.551948051948052, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2116.66, "end": 2117.1, "word": " That", "probability": 0.619140625}, {"start": 2117.1, "end": 2117.5, "word": " is", "probability": 0.92724609375}, {"start": 2117.5, "end": 2117.84, "word": " the", "probability": 0.66064453125}, {"start": 2117.84, "end": 2118.16, "word": " middle", "probability": 0.88427734375}, {"start": 2118.16, "end": 2119.1, "word": " 50", "probability": 0.744140625}, {"start": 2119.1, "end": 2119.62, "word": "%", "probability": 0.865234375}, {"start": 2119.62, "end": 2119.86, "word": " of", "probability": 0.96923828125}, {"start": 2119.86, "end": 2120.0, "word": " the", "probability": 0.89111328125}, {"start": 2120.0, "end": 2120.28, "word": " normal", "probability": 0.8896484375}, {"start": 2120.28, "end": 2121.02, "word": " distribution", "probability": 0.84912109375}, {"start": 2121.02, "end": 2121.92, "word": " is", "probability": 0.80810546875}, {"start": 2121.92, "end": 2122.28, "word": " equal", "probability": 0.90673828125}, {"start": 2122.28, "end": 2122.56, "word": " to", "probability": 0.96630859375}, {"start": 2122.56, "end": 2122.74, "word": " one", "probability": 0.8779296875}, {"start": 2122.74, "end": 2123.06, "word": " standard", "probability": 0.95654296875}, {"start": 2123.06, "end": 2123.42, "word": " deviation.", "probability": 0.90185546875}, {"start": 2126.04, "end": 2126.76, "word": " That's", "probability": 0.76318359375}, {"start": 2126.76, "end": 2127.0, "word": " true.", "probability": 0.6767578125}, {"start": 2127.24, "end": 2127.56, "word": " It's", "probability": 0.940185546875}, {"start": 2127.56, "end": 2127.92, "word": " incorrect.", "probability": 0.896484375}, {"start": 2128.84, "end": 2129.48, "word": " Because", "probability": 0.92626953125}, {"start": 2129.48, "end": 2129.8, "word": " we", "probability": 0.9140625}, {"start": 2129.8, "end": 2130.16, "word": " mentioned", "probability": 0.775390625}, {"start": 2130.16, "end": 2130.56, "word": " that", "probability": 0.9267578125}, {"start": 2130.56, "end": 2132.22, "word": " 68", "probability": 0.96484375}, {"start": 2132.22, "end": 2132.96, "word": "%", "probability": 0.9833984375}, {"start": 2132.96, "end": 2133.86, "word": " of", "probability": 0.96728515625}, {"start": 2133.86, "end": 2134.02, "word": " the", "probability": 0.9208984375}, {"start": 2134.02, "end": 2134.44, "word": " data", "probability": 0.94775390625}, {"start": 2134.44, "end": 2135.48, "word": " are", "probability": 0.91162109375}, {"start": 2135.48, "end": 2136.0, "word": " within", "probability": 0.91748046875}, {"start": 2136.0, "end": 2136.38, "word": " one", "probability": 0.92626953125}, {"start": 2136.38, "end": 2136.74, "word": " standard", "probability": 0.94775390625}, {"start": 2136.74, "end": 2137.24, "word": " deviation", "probability": 0.89697265625}, {"start": 2137.24, "end": 2138.04, "word": " above", "probability": 0.8388671875}, {"start": 2138.04, "end": 2138.28, "word": " the", "probability": 0.9296875}, {"start": 2138.28, "end": 2138.48, "word": " mean.", "probability": 0.978515625}, {"start": 2139.96, "end": 2140.28, "word": " Within", "probability": 0.77392578125}, {"start": 2140.28, "end": 2140.48, "word": " the", "probability": 0.92724609375}, {"start": 2140.48, "end": 2140.62, "word": " mean.", "probability": 0.9248046875}, {"start": 2140.74, "end": 2140.9, "word": " So", "probability": 0.92578125}, {"start": 2140.9, "end": 2141.9, "word": " it's", "probability": 0.56829833984375}, {"start": 2141.9, "end": 2142.3, "word": " false.", "probability": 0.84375}], "temperature": 1.0}, {"id": 79, "seek": 216956, "start": 2146.16, "end": 2169.56, "text": " Instead of 50, we have to say that 68%. The empirical rule. 33. The normal probability plot, the one we just discussed last time, may be used to assess the assumption of normality for a particular batch of data. As we mentioned before,", "tokens": [7156, 295, 2625, 11, 321, 362, 281, 584, 300, 23317, 6856, 440, 31886, 4978, 13, 11816, 13, 440, 2710, 8482, 7542, 11, 264, 472, 321, 445, 7152, 1036, 565, 11, 815, 312, 1143, 281, 5877, 264, 15302, 295, 2026, 1860, 337, 257, 1729, 15245, 295, 1412, 13, 1018, 321, 2835, 949, 11], "avg_logprob": -0.1594929189052222, "compression_ratio": 1.3964497041420119, "no_speech_prob": 0.0, "words": [{"start": 2146.16, "end": 2146.64, "word": " Instead", "probability": 0.495361328125}, {"start": 2146.64, "end": 2146.88, "word": " of", "probability": 0.96923828125}, {"start": 2146.88, "end": 2147.24, "word": " 50,", "probability": 0.919921875}, {"start": 2147.68, "end": 2148.26, "word": " we", "probability": 0.833984375}, {"start": 2148.26, "end": 2148.36, "word": " have", "probability": 0.916015625}, {"start": 2148.36, "end": 2148.5, "word": " to", "probability": 0.96875}, {"start": 2148.5, "end": 2148.7, "word": " say", "probability": 0.888671875}, {"start": 2148.7, "end": 2149.0, "word": " that", "probability": 0.9208984375}, {"start": 2149.0, "end": 2150.3, "word": " 68%.", "probability": 0.651611328125}, {"start": 2150.3, "end": 2151.74, "word": " The", "probability": 0.72216796875}, {"start": 2151.74, "end": 2152.02, "word": " empirical", "probability": 0.93115234375}, {"start": 2152.02, "end": 2152.32, "word": " rule.", "probability": 0.89697265625}, {"start": 2153.96, "end": 2154.64, "word": " 33.", "probability": 0.65673828125}, {"start": 2155.76, "end": 2155.98, "word": " The", "probability": 0.41650390625}, {"start": 2155.98, "end": 2156.34, "word": " normal", "probability": 0.8466796875}, {"start": 2156.34, "end": 2157.0, "word": " probability", "probability": 0.9404296875}, {"start": 2157.0, "end": 2157.5, "word": " plot,", "probability": 0.97900390625}, {"start": 2158.6, "end": 2158.7, "word": " the", "probability": 0.8447265625}, {"start": 2158.7, "end": 2158.84, "word": " one", "probability": 0.9248046875}, {"start": 2158.84, "end": 2158.98, "word": " we", "probability": 0.9541015625}, {"start": 2158.98, "end": 2159.26, "word": " just", "probability": 0.8935546875}, {"start": 2159.26, "end": 2159.66, "word": " discussed", "probability": 0.88427734375}, {"start": 2159.66, "end": 2159.98, "word": " last", "probability": 0.85888671875}, {"start": 2159.98, "end": 2160.4, "word": " time,", "probability": 0.89111328125}, {"start": 2161.54, "end": 2161.72, "word": " may", "probability": 0.8701171875}, {"start": 2161.72, "end": 2161.84, "word": " be", "probability": 0.95947265625}, {"start": 2161.84, "end": 2162.06, "word": " used", "probability": 0.9130859375}, {"start": 2162.06, "end": 2162.24, "word": " to", "probability": 0.970703125}, {"start": 2162.24, "end": 2162.86, "word": " assess", "probability": 0.9462890625}, {"start": 2162.86, "end": 2163.86, "word": " the", "probability": 0.919921875}, {"start": 2163.86, "end": 2164.82, "word": " assumption", "probability": 0.9755859375}, {"start": 2164.82, "end": 2165.1, "word": " of", "probability": 0.96875}, {"start": 2165.1, "end": 2165.6, "word": " normality", "probability": 0.9638671875}, {"start": 2165.6, "end": 2166.44, "word": " for", "probability": 0.943359375}, {"start": 2166.44, "end": 2166.64, "word": " a", "probability": 0.95703125}, {"start": 2166.64, "end": 2167.04, "word": " particular", "probability": 0.90966796875}, {"start": 2167.04, "end": 2167.46, "word": " batch", "probability": 0.8359375}, {"start": 2167.46, "end": 2167.62, "word": " of", "probability": 0.970703125}, {"start": 2167.62, "end": 2167.9, "word": " data.", "probability": 0.93310546875}, {"start": 2168.38, "end": 2168.64, "word": " As", "probability": 0.96435546875}, {"start": 2168.64, "end": 2168.74, "word": " we", "probability": 0.8994140625}, {"start": 2168.74, "end": 2169.02, "word": " mentioned", "probability": 0.82666015625}, {"start": 2169.02, "end": 2169.56, "word": " before,", "probability": 0.8642578125}], "temperature": 1.0}, {"id": 80, "seek": 219876, "start": 2170.24, "end": 2198.76, "text": " One of the rules that we can use to determine if the data is normally distributed or not is called the normal probability plot. So it's true. So again, normal probability plot is used to assess the assumption of normality for a data. Let's see, for example, 35.", "tokens": [1485, 295, 264, 4474, 300, 321, 393, 764, 281, 6997, 498, 264, 1412, 307, 5646, 12631, 420, 406, 307, 1219, 264, 2710, 8482, 7542, 13, 407, 309, 311, 2074, 13, 407, 797, 11, 2710, 8482, 7542, 307, 1143, 281, 5877, 264, 15302, 295, 2026, 1860, 337, 257, 1412, 13, 961, 311, 536, 11, 337, 1365, 11, 6976, 13], "avg_logprob": -0.1300317766302723, "compression_ratio": 1.5975609756097562, "no_speech_prob": 0.0, "words": [{"start": 2170.24, "end": 2170.66, "word": " One", "probability": 0.787109375}, {"start": 2170.66, "end": 2171.06, "word": " of", "probability": 0.9716796875}, {"start": 2171.06, "end": 2171.46, "word": " the", "probability": 0.92236328125}, {"start": 2171.46, "end": 2172.1, "word": " rules", "probability": 0.57958984375}, {"start": 2172.1, "end": 2172.36, "word": " that", "probability": 0.93505859375}, {"start": 2172.36, "end": 2172.52, "word": " we", "probability": 0.95849609375}, {"start": 2172.52, "end": 2172.74, "word": " can", "probability": 0.9453125}, {"start": 2172.74, "end": 2173.18, "word": " use", "probability": 0.87548828125}, {"start": 2173.18, "end": 2174.56, "word": " to", "probability": 0.92919921875}, {"start": 2174.56, "end": 2175.04, "word": " determine", "probability": 0.92724609375}, {"start": 2175.04, "end": 2175.22, "word": " if", "probability": 0.78955078125}, {"start": 2175.22, "end": 2175.3, "word": " the", "probability": 0.78076171875}, {"start": 2175.3, "end": 2175.48, "word": " data", "probability": 0.94921875}, {"start": 2175.48, "end": 2175.68, "word": " is", "probability": 0.90380859375}, {"start": 2175.68, "end": 2176.04, "word": " normally", "probability": 0.56298828125}, {"start": 2176.04, "end": 2176.5, "word": " distributed", "probability": 0.9560546875}, {"start": 2176.5, "end": 2176.78, "word": " or", "probability": 0.94140625}, {"start": 2176.78, "end": 2177.06, "word": " not", "probability": 0.9453125}, {"start": 2177.06, "end": 2177.6, "word": " is", "probability": 0.7421875}, {"start": 2177.6, "end": 2177.94, "word": " called", "probability": 0.88232421875}, {"start": 2177.94, "end": 2178.14, "word": " the", "probability": 0.84716796875}, {"start": 2178.14, "end": 2178.44, "word": " normal", "probability": 0.6689453125}, {"start": 2178.44, "end": 2178.9, "word": " probability", "probability": 0.94580078125}, {"start": 2178.9, "end": 2179.22, "word": " plot.", "probability": 0.87646484375}, {"start": 2179.68, "end": 2179.92, "word": " So", "probability": 0.87744140625}, {"start": 2179.92, "end": 2180.12, "word": " it's", "probability": 0.887451171875}, {"start": 2180.12, "end": 2180.42, "word": " true.", "probability": 0.94384765625}, {"start": 2180.82, "end": 2180.94, "word": " So", "probability": 0.73876953125}, {"start": 2180.94, "end": 2181.26, "word": " again,", "probability": 0.8486328125}, {"start": 2181.86, "end": 2182.22, "word": " normal", "probability": 0.81982421875}, {"start": 2182.22, "end": 2182.84, "word": " probability", "probability": 0.9697265625}, {"start": 2182.84, "end": 2183.42, "word": " plot", "probability": 0.96435546875}, {"start": 2183.42, "end": 2184.1, "word": " is", "probability": 0.94970703125}, {"start": 2184.1, "end": 2184.6, "word": " used", "probability": 0.91455078125}, {"start": 2184.6, "end": 2185.46, "word": " to", "probability": 0.96337890625}, {"start": 2185.46, "end": 2186.0, "word": " assess", "probability": 0.91796875}, {"start": 2186.0, "end": 2187.02, "word": " the", "probability": 0.9228515625}, {"start": 2187.02, "end": 2187.62, "word": " assumption", "probability": 0.97509765625}, {"start": 2187.62, "end": 2189.18, "word": " of", "probability": 0.97021484375}, {"start": 2189.18, "end": 2189.66, "word": " normality", "probability": 0.94921875}, {"start": 2189.66, "end": 2190.1, "word": " for", "probability": 0.94970703125}, {"start": 2190.1, "end": 2190.88, "word": " a", "probability": 0.908203125}, {"start": 2190.88, "end": 2191.12, "word": " data.", "probability": 0.92822265625}, {"start": 2194.3, "end": 2195.12, "word": " Let's", "probability": 0.893310546875}, {"start": 2195.12, "end": 2195.42, "word": " see,", "probability": 0.5888671875}, {"start": 2196.42, "end": 2196.6, "word": " for", "probability": 0.95166015625}, {"start": 2196.6, "end": 2197.08, "word": " example,", "probability": 0.97265625}, {"start": 2198.28, "end": 2198.76, "word": " 35.", "probability": 0.8876953125}], "temperature": 1.0}, {"id": 81, "seek": 222207, "start": 2200.23, "end": 2222.07, "text": " The probability that a standard normal variable z is positive is, the probability that a standard normal variable z is positive is, now if you, this is a table, z cubed. It's one.", "tokens": [440, 8482, 300, 257, 3832, 2710, 7006, 710, 307, 3353, 307, 11, 264, 8482, 300, 257, 3832, 2710, 7006, 710, 307, 3353, 307, 11, 586, 498, 291, 11, 341, 307, 257, 3199, 11, 710, 36510, 13, 467, 311, 472, 13], "avg_logprob": -0.31459604821554044, "compression_ratio": 1.6981132075471699, "no_speech_prob": 0.0, "words": [{"start": 2200.23, "end": 2200.51, "word": " The", "probability": 0.73779296875}, {"start": 2200.51, "end": 2200.93, "word": " probability", "probability": 0.93798828125}, {"start": 2200.93, "end": 2201.21, "word": " that", "probability": 0.9326171875}, {"start": 2201.21, "end": 2201.41, "word": " a", "probability": 0.91650390625}, {"start": 2201.41, "end": 2201.79, "word": " standard", "probability": 0.943359375}, {"start": 2201.79, "end": 2202.23, "word": " normal", "probability": 0.8603515625}, {"start": 2202.23, "end": 2202.81, "word": " variable", "probability": 0.92626953125}, {"start": 2202.81, "end": 2203.05, "word": " z", "probability": 0.61083984375}, {"start": 2203.05, "end": 2203.21, "word": " is", "probability": 0.94189453125}, {"start": 2203.21, "end": 2203.63, "word": " positive", "probability": 0.94189453125}, {"start": 2203.63, "end": 2206.05, "word": " is,", "probability": 0.88134765625}, {"start": 2207.29, "end": 2209.31, "word": " the", "probability": 0.77294921875}, {"start": 2209.31, "end": 2209.75, "word": " probability", "probability": 0.9560546875}, {"start": 2209.75, "end": 2210.23, "word": " that", "probability": 0.92626953125}, {"start": 2210.23, "end": 2211.09, "word": " a", "probability": 0.94580078125}, {"start": 2211.09, "end": 2211.51, "word": " standard", "probability": 0.94091796875}, {"start": 2211.51, "end": 2211.93, "word": " normal", "probability": 0.87158203125}, {"start": 2211.93, "end": 2212.59, "word": " variable", "probability": 0.888671875}, {"start": 2212.59, "end": 2212.91, "word": " z", "probability": 0.97021484375}, {"start": 2212.91, "end": 2213.09, "word": " is", "probability": 0.94580078125}, {"start": 2213.09, "end": 2213.53, "word": " positive", "probability": 0.95068359375}, {"start": 2213.53, "end": 2214.09, "word": " is,", "probability": 0.90478515625}, {"start": 2215.23, "end": 2215.51, "word": " now", "probability": 0.7109375}, {"start": 2215.51, "end": 2215.71, "word": " if", "probability": 0.5029296875}, {"start": 2215.71, "end": 2215.89, "word": " you,", "probability": 0.70751953125}, {"start": 2216.75, "end": 2217.03, "word": " this", "probability": 0.5087890625}, {"start": 2217.03, "end": 2217.23, "word": " is", "probability": 0.83251953125}, {"start": 2217.23, "end": 2217.33, "word": " a", "probability": 0.62060546875}, {"start": 2217.33, "end": 2217.67, "word": " table,", "probability": 0.6923828125}, {"start": 2218.49, "end": 2218.77, "word": " z", "probability": 0.880859375}, {"start": 2218.77, "end": 2219.09, "word": " cubed.", "probability": 0.220947265625}, {"start": 2221.33, "end": 2221.99, "word": " It's", "probability": 0.531494140625}, {"start": 2221.99, "end": 2222.07, "word": " one.", "probability": 0.6064453125}], "temperature": 1.0}, {"id": 82, "seek": 224882, "start": 2226.8, "end": 2248.82, "text": " Again, the probability that a standardized normal variable Z is positive is the probability. Let's do one more.", "tokens": [3764, 11, 264, 8482, 300, 257, 31677, 2710, 7006, 1176, 307, 3353, 307, 264, 8482, 13, 961, 311, 360, 472, 544, 13], "avg_logprob": -0.262058410955512, "compression_ratio": 1.2043010752688172, "no_speech_prob": 0.0, "words": [{"start": 2226.8, "end": 2227.78, "word": " Again,", "probability": 0.25830078125}, {"start": 2228.38, "end": 2228.38, "word": " the", "probability": 0.81689453125}, {"start": 2228.38, "end": 2228.86, "word": " probability", "probability": 0.900390625}, {"start": 2228.86, "end": 2232.66, "word": " that", "probability": 0.54736328125}, {"start": 2232.66, "end": 2233.02, "word": " a", "probability": 0.63720703125}, {"start": 2233.02, "end": 2233.6, "word": " standardized", "probability": 0.87939453125}, {"start": 2233.6, "end": 2234.08, "word": " normal", "probability": 0.798828125}, {"start": 2234.08, "end": 2234.64, "word": " variable", "probability": 0.9228515625}, {"start": 2234.64, "end": 2235.02, "word": " Z", "probability": 0.595703125}, {"start": 2235.02, "end": 2236.24, "word": " is", "probability": 0.90087890625}, {"start": 2236.24, "end": 2236.74, "word": " positive", "probability": 0.89990234375}, {"start": 2236.74, "end": 2236.96, "word": " is", "probability": 0.76611328125}, {"start": 2236.96, "end": 2237.1, "word": " the", "probability": 0.6328125}, {"start": 2237.1, "end": 2237.58, "word": " probability.", "probability": 0.93505859375}, {"start": 2246.34, "end": 2247.32, "word": " Let's", "probability": 0.83544921875}, {"start": 2247.32, "end": 2247.64, "word": " do", "probability": 0.95166015625}, {"start": 2247.64, "end": 2248.48, "word": " one", "probability": 0.923828125}, {"start": 2248.48, "end": 2248.82, "word": " more.", "probability": 0.94140625}], "temperature": 1.0}, {"id": 83, "seek": 229659, "start": 2271.43, "end": 2296.59, "text": " These problems are the same, some of these. Now look at 6 and 7. Suppose Z has a standard normal distribution with a mean of zero and standard relation of one. And the probability Z is less than 1.25.", "tokens": [1981, 2740, 366, 264, 912, 11, 512, 295, 613, 13, 823, 574, 412, 1386, 293, 1614, 13, 21360, 1176, 575, 257, 3832, 2710, 7316, 365, 257, 914, 295, 4018, 293, 3832, 9721, 295, 472, 13, 400, 264, 8482, 1176, 307, 1570, 813, 502, 13, 6074, 13], "avg_logprob": -0.3390957586308743, "compression_ratio": 1.348993288590604, "no_speech_prob": 0.0, "words": [{"start": 2271.43, "end": 2271.99, "word": " These", "probability": 0.403076171875}, {"start": 2271.99, "end": 2272.39, "word": " problems", "probability": 0.81005859375}, {"start": 2272.39, "end": 2272.65, "word": " are", "probability": 0.94287109375}, {"start": 2272.65, "end": 2272.83, "word": " the", "probability": 0.90283203125}, {"start": 2272.83, "end": 2273.09, "word": " same,", "probability": 0.90576171875}, {"start": 2274.17, "end": 2274.49, "word": " some", "probability": 0.83203125}, {"start": 2274.49, "end": 2274.67, "word": " of", "probability": 0.94287109375}, {"start": 2274.67, "end": 2274.97, "word": " these.", "probability": 0.7861328125}, {"start": 2277.89, "end": 2278.45, "word": " Now", "probability": 0.398681640625}, {"start": 2278.45, "end": 2279.55, "word": " look", "probability": 0.5859375}, {"start": 2279.55, "end": 2279.73, "word": " at", "probability": 0.96533203125}, {"start": 2279.73, "end": 2279.99, "word": " 6", "probability": 0.546875}, {"start": 2279.99, "end": 2280.11, "word": " and", "probability": 0.2841796875}, {"start": 2280.11, "end": 2280.37, "word": " 7.", "probability": 0.99365234375}, {"start": 2281.43, "end": 2281.99, "word": " Suppose", "probability": 0.78515625}, {"start": 2281.99, "end": 2284.39, "word": " Z", "probability": 0.50927734375}, {"start": 2284.39, "end": 2287.03, "word": " has", "probability": 0.91259765625}, {"start": 2287.03, "end": 2287.23, "word": " a", "probability": 0.404052734375}, {"start": 2287.23, "end": 2287.49, "word": " standard", "probability": 0.919921875}, {"start": 2287.49, "end": 2287.87, "word": " normal", "probability": 0.8583984375}, {"start": 2287.87, "end": 2288.47, "word": " distribution", "probability": 0.90185546875}, {"start": 2288.47, "end": 2289.79, "word": " with", "probability": 0.86669921875}, {"start": 2289.79, "end": 2289.93, "word": " a", "probability": 0.8232421875}, {"start": 2289.93, "end": 2290.05, "word": " mean", "probability": 0.9765625}, {"start": 2290.05, "end": 2290.19, "word": " of", "probability": 0.96240234375}, {"start": 2290.19, "end": 2290.47, "word": " zero", "probability": 0.54345703125}, {"start": 2290.47, "end": 2291.75, "word": " and", "probability": 0.72412109375}, {"start": 2291.75, "end": 2292.09, "word": " standard", "probability": 0.71826171875}, {"start": 2292.09, "end": 2292.35, "word": " relation", "probability": 0.302978515625}, {"start": 2292.35, "end": 2292.51, "word": " of", "probability": 0.9482421875}, {"start": 2292.51, "end": 2292.75, "word": " one.", "probability": 0.70947265625}, {"start": 2294.13, "end": 2294.39, "word": " And", "probability": 0.54541015625}, {"start": 2294.39, "end": 2294.55, "word": " the", "probability": 0.86279296875}, {"start": 2294.55, "end": 2294.91, "word": " probability", "probability": 0.91064453125}, {"start": 2294.91, "end": 2295.25, "word": " Z", "probability": 0.57568359375}, {"start": 2295.25, "end": 2295.45, "word": " is", "probability": 0.8701171875}, {"start": 2295.45, "end": 2295.69, "word": " less", "probability": 0.9375}, {"start": 2295.69, "end": 2295.89, "word": " than", "probability": 0.9345703125}, {"start": 2295.89, "end": 2296.07, "word": " 1", "probability": 0.7041015625}, {"start": 2296.07, "end": 2296.59, "word": ".25.", "probability": 0.81884765625}], "temperature": 1.0}, {"id": 84, "seek": 232757, "start": 2298.51, "end": 2327.57, "text": " Straight forward, just go back to the table. Z less than 1.15, 1.15, 08, 8749. So that's correct. 8749 is correct. Any question? So let's move.", "tokens": [26908, 2128, 11, 445, 352, 646, 281, 264, 3199, 13, 1176, 1570, 813, 502, 13, 5211, 11, 502, 13, 5211, 11, 1958, 23, 11, 27990, 14938, 13, 407, 300, 311, 3006, 13, 27990, 14938, 307, 3006, 13, 2639, 1168, 30, 407, 718, 311, 1286, 13], "avg_logprob": -0.30078125712664233, "compression_ratio": 1.2, "no_speech_prob": 0.0, "words": [{"start": 2298.51, "end": 2299.01, "word": " Straight", "probability": 0.276611328125}, {"start": 2299.01, "end": 2299.33, "word": " forward,", "probability": 0.623046875}, {"start": 2299.45, "end": 2299.61, "word": " just", "probability": 0.87841796875}, {"start": 2299.61, "end": 2299.81, "word": " go", "probability": 0.95751953125}, {"start": 2299.81, "end": 2300.03, "word": " back", "probability": 0.880859375}, {"start": 2300.03, "end": 2300.17, "word": " to", "probability": 0.95556640625}, {"start": 2300.17, "end": 2300.27, "word": " the", "probability": 0.92236328125}, {"start": 2300.27, "end": 2300.55, "word": " table.", "probability": 0.74169921875}, {"start": 2301.07, "end": 2302.09, "word": " Z", "probability": 0.60693359375}, {"start": 2302.09, "end": 2302.33, "word": " less", "probability": 0.61376953125}, {"start": 2302.33, "end": 2302.49, "word": " than", "probability": 0.9150390625}, {"start": 2302.49, "end": 2302.73, "word": " 1", "probability": 0.8173828125}, {"start": 2302.73, "end": 2303.57, "word": ".15,", "probability": 0.947021484375}, {"start": 2304.33, "end": 2304.93, "word": " 1", "probability": 0.82666015625}, {"start": 2304.93, "end": 2305.93, "word": ".15,", "probability": 0.996337890625}, {"start": 2310.61, "end": 2311.35, "word": " 08,", "probability": 0.627685546875}, {"start": 2312.11, "end": 2314.19, "word": " 8749.", "probability": 0.931884765625}, {"start": 2315.11, "end": 2316.13, "word": " So", "probability": 0.81884765625}, {"start": 2316.13, "end": 2316.37, "word": " that's", "probability": 0.8876953125}, {"start": 2316.37, "end": 2316.71, "word": " correct.", "probability": 0.89794921875}, {"start": 2317.27, "end": 2318.27, "word": " 8749", "probability": 0.971923828125}, {"start": 2318.27, "end": 2319.55, "word": " is", "probability": 0.9208984375}, {"start": 2319.55, "end": 2319.93, "word": " correct.", "probability": 0.9052734375}, {"start": 2322.53, "end": 2323.21, "word": " Any", "probability": 0.89892578125}, {"start": 2323.21, "end": 2323.59, "word": " question?", "probability": 0.572265625}, {"start": 2325.69, "end": 2326.71, "word": " So", "probability": 0.58154296875}, {"start": 2326.71, "end": 2327.11, "word": " let's", "probability": 0.93310546875}, {"start": 2327.11, "end": 2327.57, "word": " move.", "probability": 0.943359375}], "temperature": 1.0}, {"id": 85, "seek": 235903, "start": 2330.87, "end": 2359.03, "text": " Chapter seven. So that's for the practice for chapter seven. Chapter seven talks about actually two things. One is called sampling and the other topic is sampling distributions. Mainly there are four learning objectives for this chapter.", "tokens": [18874, 3407, 13, 407, 300, 311, 337, 264, 3124, 337, 7187, 3407, 13, 18874, 3407, 6686, 466, 767, 732, 721, 13, 1485, 307, 1219, 21179, 293, 264, 661, 4829, 307, 21179, 37870, 13, 47468, 456, 366, 1451, 2539, 15961, 337, 341, 7187, 13], "avg_logprob": -0.22887073491107335, "compression_ratio": 1.576158940397351, "no_speech_prob": 0.0, "words": [{"start": 2330.87, "end": 2331.29, "word": " Chapter", "probability": 0.56787109375}, {"start": 2331.29, "end": 2331.61, "word": " seven.", "probability": 0.55322265625}, {"start": 2332.33, "end": 2332.51, "word": " So", "probability": 0.85205078125}, {"start": 2332.51, "end": 2333.09, "word": " that's", "probability": 0.87646484375}, {"start": 2333.09, "end": 2333.43, "word": " for", "probability": 0.4658203125}, {"start": 2333.43, "end": 2333.59, "word": " the", "probability": 0.478759765625}, {"start": 2333.59, "end": 2334.11, "word": " practice", "probability": 0.95263671875}, {"start": 2334.11, "end": 2335.93, "word": " for", "probability": 0.6943359375}, {"start": 2335.93, "end": 2336.19, "word": " chapter", "probability": 0.74560546875}, {"start": 2336.19, "end": 2336.59, "word": " seven.", "probability": 0.90576171875}, {"start": 2339.43, "end": 2340.27, "word": " Chapter", "probability": 0.931640625}, {"start": 2340.27, "end": 2340.75, "word": " seven", "probability": 0.90771484375}, {"start": 2340.75, "end": 2341.53, "word": " talks", "probability": 0.87158203125}, {"start": 2341.53, "end": 2342.03, "word": " about", "probability": 0.90185546875}, {"start": 2342.03, "end": 2342.99, "word": " actually", "probability": 0.837890625}, {"start": 2342.99, "end": 2345.07, "word": " two", "probability": 0.87353515625}, {"start": 2345.07, "end": 2345.47, "word": " things.", "probability": 0.865234375}, {"start": 2345.61, "end": 2345.77, "word": " One", "probability": 0.92822265625}, {"start": 2345.77, "end": 2345.99, "word": " is", "probability": 0.89697265625}, {"start": 2345.99, "end": 2346.31, "word": " called", "probability": 0.89599609375}, {"start": 2346.31, "end": 2346.75, "word": " sampling", "probability": 0.880859375}, {"start": 2346.75, "end": 2348.93, "word": " and", "probability": 0.53515625}, {"start": 2348.93, "end": 2349.09, "word": " the", "probability": 0.477294921875}, {"start": 2349.09, "end": 2349.37, "word": " other", "probability": 0.892578125}, {"start": 2349.37, "end": 2350.43, "word": " topic", "probability": 0.9423828125}, {"start": 2350.43, "end": 2351.53, "word": " is", "probability": 0.9150390625}, {"start": 2351.53, "end": 2351.93, "word": " sampling", "probability": 0.97314453125}, {"start": 2351.93, "end": 2353.19, "word": " distributions.", "probability": 0.447265625}, {"start": 2354.59, "end": 2355.01, "word": " Mainly", "probability": 0.662109375}, {"start": 2355.01, "end": 2355.43, "word": " there", "probability": 0.60205078125}, {"start": 2355.43, "end": 2355.61, "word": " are", "probability": 0.9423828125}, {"start": 2355.61, "end": 2356.01, "word": " four", "probability": 0.943359375}, {"start": 2356.01, "end": 2356.49, "word": " learning", "probability": 0.9609375}, {"start": 2356.49, "end": 2357.11, "word": " objectives", "probability": 0.8662109375}, {"start": 2357.11, "end": 2358.43, "word": " for", "probability": 0.94580078125}, {"start": 2358.43, "end": 2358.65, "word": " this", "probability": 0.9423828125}, {"start": 2358.65, "end": 2359.03, "word": " chapter.", "probability": 0.8701171875}], "temperature": 1.0}, {"id": 86, "seek": 237453, "start": 2360.79, "end": 2374.53, "text": " Number one, we have to distinguish between different sampling techniques or methods. We'll talk about probability and non-probability sampling, and we have to distinguish between these two.", "tokens": [5118, 472, 11, 321, 362, 281, 20206, 1296, 819, 21179, 7512, 420, 7150, 13, 492, 603, 751, 466, 8482, 293, 2107, 12, 41990, 2310, 21179, 11, 293, 321, 362, 281, 20206, 1296, 613, 732, 13], "avg_logprob": -0.13183594163921145, "compression_ratio": 1.532258064516129, "no_speech_prob": 0.0, "words": [{"start": 2360.79, "end": 2361.11, "word": " Number", "probability": 0.82373046875}, {"start": 2361.11, "end": 2361.37, "word": " one,", "probability": 0.84033203125}, {"start": 2362.27, "end": 2362.87, "word": " we", "probability": 0.958984375}, {"start": 2362.87, "end": 2363.09, "word": " have", "probability": 0.9501953125}, {"start": 2363.09, "end": 2363.19, "word": " to", "probability": 0.9599609375}, {"start": 2363.19, "end": 2363.61, "word": " distinguish", "probability": 0.90185546875}, {"start": 2363.61, "end": 2364.19, "word": " between", "probability": 0.876953125}, {"start": 2364.19, "end": 2364.85, "word": " different", "probability": 0.88232421875}, {"start": 2364.85, "end": 2365.21, "word": " sampling", "probability": 0.94677734375}, {"start": 2365.21, "end": 2365.89, "word": " techniques", "probability": 0.91259765625}, {"start": 2365.89, "end": 2366.49, "word": " or", "probability": 0.8759765625}, {"start": 2366.49, "end": 2366.89, "word": " methods.", "probability": 0.87841796875}, {"start": 2368.75, "end": 2369.31, "word": " We'll", "probability": 0.7392578125}, {"start": 2369.31, "end": 2369.51, "word": " talk", "probability": 0.90185546875}, {"start": 2369.51, "end": 2369.85, "word": " about", "probability": 0.90380859375}, {"start": 2369.85, "end": 2370.49, "word": " probability", "probability": 0.92431640625}, {"start": 2370.49, "end": 2371.15, "word": " and", "probability": 0.92724609375}, {"start": 2371.15, "end": 2371.37, "word": " non", "probability": 0.955078125}, {"start": 2371.37, "end": 2371.79, "word": "-probability", "probability": 0.8811848958333334}, {"start": 2371.79, "end": 2372.35, "word": " sampling,", "probability": 0.93701171875}, {"start": 2372.81, "end": 2372.95, "word": " and", "probability": 0.927734375}, {"start": 2372.95, "end": 2373.05, "word": " we", "probability": 0.9091796875}, {"start": 2373.05, "end": 2373.19, "word": " have", "probability": 0.9169921875}, {"start": 2373.19, "end": 2373.31, "word": " to", "probability": 0.80810546875}, {"start": 2373.31, "end": 2373.67, "word": " distinguish", "probability": 0.923828125}, {"start": 2373.67, "end": 2374.07, "word": " between", "probability": 0.85205078125}, {"start": 2374.07, "end": 2374.29, "word": " these", "probability": 0.86962890625}, {"start": 2374.29, "end": 2374.53, "word": " two.", "probability": 0.93701171875}], "temperature": 1.0}, {"id": 87, "seek": 239492, "start": 2375.36, "end": 2394.92, "text": " The other objective for this chapter will be the concept of semantic distribution. Now, instead of using just X, as we did in chapter three, what's the probability of X, for example, less than 15? Instead of that, we'll talk about not X,", "tokens": [440, 661, 10024, 337, 341, 7187, 486, 312, 264, 3410, 295, 47982, 7316, 13, 823, 11, 2602, 295, 1228, 445, 1783, 11, 382, 321, 630, 294, 7187, 1045, 11, 437, 311, 264, 8482, 295, 1783, 11, 337, 1365, 11, 1570, 813, 2119, 30, 7156, 295, 300, 11, 321, 603, 751, 466, 406, 1783, 11], "avg_logprob": -0.2286931872367859, "compression_ratio": 1.391812865497076, "no_speech_prob": 0.0, "words": [{"start": 2375.36, "end": 2375.66, "word": " The", "probability": 0.802734375}, {"start": 2375.66, "end": 2375.9, "word": " other", "probability": 0.89990234375}, {"start": 2375.9, "end": 2376.38, "word": " objective", "probability": 0.9404296875}, {"start": 2376.38, "end": 2376.8, "word": " for", "probability": 0.92041015625}, {"start": 2376.8, "end": 2377.04, "word": " this", "probability": 0.9423828125}, {"start": 2377.04, "end": 2377.38, "word": " chapter", "probability": 0.84716796875}, {"start": 2377.38, "end": 2378.5, "word": " will", "probability": 0.8349609375}, {"start": 2378.5, "end": 2378.8, "word": " be", "probability": 0.9560546875}, {"start": 2378.8, "end": 2379.16, "word": " the", "probability": 0.88330078125}, {"start": 2379.16, "end": 2379.76, "word": " concept", "probability": 0.87841796875}, {"start": 2379.76, "end": 2380.2, "word": " of", "probability": 0.97314453125}, {"start": 2380.2, "end": 2380.54, "word": " semantic", "probability": 0.09124755859375}, {"start": 2380.54, "end": 2381.18, "word": " distribution.", "probability": 0.82373046875}, {"start": 2383.06, "end": 2383.26, "word": " Now,", "probability": 0.92822265625}, {"start": 2383.32, "end": 2383.72, "word": " instead", "probability": 0.90625}, {"start": 2383.72, "end": 2383.9, "word": " of", "probability": 0.96142578125}, {"start": 2383.9, "end": 2384.2, "word": " using", "probability": 0.8798828125}, {"start": 2384.2, "end": 2384.64, "word": " just", "probability": 0.91259765625}, {"start": 2384.64, "end": 2385.04, "word": " X,", "probability": 0.580078125}, {"start": 2385.48, "end": 2385.8, "word": " as", "probability": 0.9609375}, {"start": 2385.8, "end": 2386.0, "word": " we", "probability": 0.962890625}, {"start": 2386.0, "end": 2386.54, "word": " did", "probability": 0.9560546875}, {"start": 2386.54, "end": 2387.6, "word": " in", "probability": 0.93115234375}, {"start": 2387.6, "end": 2387.86, "word": " chapter", "probability": 0.61962890625}, {"start": 2387.86, "end": 2388.7, "word": " three,", "probability": 0.568359375}, {"start": 2388.88, "end": 2389.02, "word": " what's", "probability": 0.765625}, {"start": 2389.02, "end": 2389.12, "word": " the", "probability": 0.9140625}, {"start": 2389.12, "end": 2389.28, "word": " probability", "probability": 0.379150390625}, {"start": 2389.28, "end": 2389.44, "word": " of", "probability": 0.91162109375}, {"start": 2389.44, "end": 2389.64, "word": " X,", "probability": 0.98046875}, {"start": 2389.86, "end": 2389.98, "word": " for", "probability": 0.94287109375}, {"start": 2389.98, "end": 2390.32, "word": " example,", "probability": 0.978515625}, {"start": 2390.6, "end": 2390.78, "word": " less", "probability": 0.93408203125}, {"start": 2390.78, "end": 2390.96, "word": " than", "probability": 0.94482421875}, {"start": 2390.96, "end": 2391.38, "word": " 15?", "probability": 0.8779296875}, {"start": 2392.32, "end": 2392.74, "word": " Instead", "probability": 0.88037109375}, {"start": 2392.74, "end": 2392.94, "word": " of", "probability": 0.96630859375}, {"start": 2392.94, "end": 2393.2, "word": " that,", "probability": 0.93798828125}, {"start": 2393.28, "end": 2393.44, "word": " we'll", "probability": 0.916015625}, {"start": 2393.44, "end": 2393.62, "word": " talk", "probability": 0.90087890625}, {"start": 2393.62, "end": 2393.98, "word": " about", "probability": 0.908203125}, {"start": 2393.98, "end": 2394.52, "word": " not", "probability": 0.791015625}, {"start": 2394.52, "end": 2394.92, "word": " X,", "probability": 0.88134765625}], "temperature": 1.0}, {"id": 88, "seek": 242132, "start": 2396.6, "end": 2421.32, "text": " the statistic itself, maybe X bar or the sample mean. Number three is to compute probabilities related to the sample mean, not the exact value of X. So here we are looking for V of X bar smaller than 1.5. So in this case, to compute these probabilities, we have to know that all the", "tokens": [264, 29588, 2564, 11, 1310, 1783, 2159, 420, 264, 6889, 914, 13, 5118, 1045, 307, 281, 14722, 33783, 4077, 281, 264, 6889, 914, 11, 406, 264, 1900, 2158, 295, 1783, 13, 407, 510, 321, 366, 1237, 337, 691, 295, 1783, 2159, 4356, 813, 502, 13, 20, 13, 407, 294, 341, 1389, 11, 281, 14722, 613, 33783, 11, 321, 362, 281, 458, 300, 439, 264], "avg_logprob": -0.20600961538461537, "compression_ratio": 1.5988700564971752, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 2396.6000000000004, "end": 2397.2400000000002, "word": " the", "probability": 0.3837890625}, {"start": 2397.2400000000002, "end": 2397.88, "word": " statistic", "probability": 0.58837890625}, {"start": 2397.88, "end": 2398.34, "word": " itself,", "probability": 0.69775390625}, {"start": 2398.74, "end": 2398.82, "word": " maybe", "probability": 0.86279296875}, {"start": 2398.82, "end": 2399.08, "word": " X", "probability": 0.5400390625}, {"start": 2399.08, "end": 2399.34, "word": " bar", "probability": 0.74658203125}, {"start": 2399.34, "end": 2399.54, "word": " or", "probability": 0.7763671875}, {"start": 2399.54, "end": 2399.7, "word": " the", "probability": 0.8447265625}, {"start": 2399.7, "end": 2399.9, "word": " sample", "probability": 0.8974609375}, {"start": 2399.9, "end": 2400.18, "word": " mean.", "probability": 0.96484375}, {"start": 2400.96, "end": 2401.34, "word": " Number", "probability": 0.8466796875}, {"start": 2401.34, "end": 2401.78, "word": " three", "probability": 0.81103515625}, {"start": 2401.78, "end": 2402.46, "word": " is", "probability": 0.38037109375}, {"start": 2402.46, "end": 2402.56, "word": " to", "probability": 0.93896484375}, {"start": 2402.56, "end": 2402.9, "word": " compute", "probability": 0.93408203125}, {"start": 2402.9, "end": 2403.5, "word": " probabilities", "probability": 0.8193359375}, {"start": 2403.5, "end": 2404.12, "word": " related", "probability": 0.939453125}, {"start": 2404.12, "end": 2404.46, "word": " to", "probability": 0.97021484375}, {"start": 2404.46, "end": 2404.64, "word": " the", "probability": 0.92333984375}, {"start": 2404.64, "end": 2405.0, "word": " sample", "probability": 0.87939453125}, {"start": 2405.0, "end": 2405.34, "word": " mean,", "probability": 0.93115234375}, {"start": 2405.86, "end": 2406.28, "word": " not", "probability": 0.94677734375}, {"start": 2406.28, "end": 2406.52, "word": " the", "probability": 0.68994140625}, {"start": 2406.52, "end": 2406.94, "word": " exact", "probability": 0.9228515625}, {"start": 2406.94, "end": 2407.34, "word": " value", "probability": 0.96142578125}, {"start": 2407.34, "end": 2407.52, "word": " of", "probability": 0.8603515625}, {"start": 2407.52, "end": 2407.72, "word": " X.", "probability": 0.953125}, {"start": 2408.34, "end": 2408.6, "word": " So", "probability": 0.9443359375}, {"start": 2408.6, "end": 2408.84, "word": " here", "probability": 0.7734375}, {"start": 2408.84, "end": 2409.5, "word": " we", "probability": 0.6396484375}, {"start": 2409.5, "end": 2409.64, "word": " are", "probability": 0.92578125}, {"start": 2409.64, "end": 2409.88, "word": " looking", "probability": 0.91748046875}, {"start": 2409.88, "end": 2410.2, "word": " for", "probability": 0.9541015625}, {"start": 2410.2, "end": 2410.52, "word": " V", "probability": 0.440673828125}, {"start": 2410.52, "end": 2410.64, "word": " of", "probability": 0.91259765625}, {"start": 2410.64, "end": 2410.9, "word": " X", "probability": 0.97998046875}, {"start": 2410.9, "end": 2411.5, "word": " bar", "probability": 0.92919921875}, {"start": 2411.5, "end": 2411.9, "word": " smaller", "probability": 0.66064453125}, {"start": 2411.9, "end": 2412.14, "word": " than", "probability": 0.9443359375}, {"start": 2412.14, "end": 2412.34, "word": " 1", "probability": 0.81494140625}, {"start": 2412.34, "end": 2412.76, "word": ".5.", "probability": 0.99365234375}, {"start": 2413.46, "end": 2413.78, "word": " So", "probability": 0.943359375}, {"start": 2413.78, "end": 2414.02, "word": " in", "probability": 0.87158203125}, {"start": 2414.02, "end": 2414.28, "word": " this", "probability": 0.947265625}, {"start": 2414.28, "end": 2414.74, "word": " case,", "probability": 0.90576171875}, {"start": 2415.64, "end": 2415.78, "word": " to", "probability": 0.94091796875}, {"start": 2415.78, "end": 2416.12, "word": " compute", "probability": 0.9228515625}, {"start": 2416.12, "end": 2416.36, "word": " these", "probability": 0.61865234375}, {"start": 2416.36, "end": 2416.74, "word": " probabilities,", "probability": 0.9091796875}, {"start": 2417.82, "end": 2418.14, "word": " we", "probability": 0.94921875}, {"start": 2418.14, "end": 2418.36, "word": " have", "probability": 0.94775390625}, {"start": 2418.36, "end": 2418.46, "word": " to", "probability": 0.96923828125}, {"start": 2418.46, "end": 2418.6, "word": " know", "probability": 0.88818359375}, {"start": 2418.6, "end": 2418.92, "word": " that", "probability": 0.74755859375}, {"start": 2418.92, "end": 2420.12, "word": " all", "probability": 0.9345703125}, {"start": 2420.12, "end": 2421.32, "word": " the", "probability": 0.76220703125}], "temperature": 1.0}, {"id": 89, "seek": 244753, "start": 2423.35, "end": 2447.53, "text": " concepts in chapter three should be understood. Otherwise, you cannot do any problems here because here will depend actually how can we find the probabilities underneath the normal table. But instead of X, in this case, we have the sample mean X bar. So that's the difference between chapter six and the next chapter. So here,", "tokens": [10392, 294, 7187, 1045, 820, 312, 7320, 13, 10328, 11, 291, 2644, 360, 604, 2740, 510, 570, 510, 486, 5672, 767, 577, 393, 321, 915, 264, 33783, 7223, 264, 2710, 3199, 13, 583, 2602, 295, 1783, 11, 294, 341, 1389, 11, 321, 362, 264, 6889, 914, 1783, 2159, 13, 407, 300, 311, 264, 2649, 1296, 7187, 2309, 293, 264, 958, 7187, 13, 407, 510, 11], "avg_logprob": -0.2131865591262326, "compression_ratio": 1.5497630331753554, "no_speech_prob": 0.0, "words": [{"start": 2423.35, "end": 2423.95, "word": " concepts", "probability": 0.466796875}, {"start": 2423.95, "end": 2424.17, "word": " in", "probability": 0.87841796875}, {"start": 2424.17, "end": 2424.41, "word": " chapter", "probability": 0.5712890625}, {"start": 2424.41, "end": 2424.63, "word": " three", "probability": 0.7490234375}, {"start": 2424.63, "end": 2424.89, "word": " should", "probability": 0.9521484375}, {"start": 2424.89, "end": 2425.19, "word": " be", "probability": 0.95947265625}, {"start": 2425.19, "end": 2426.11, "word": " understood.", "probability": 0.92529296875}, {"start": 2426.53, "end": 2426.97, "word": " Otherwise,", "probability": 0.9443359375}, {"start": 2427.45, "end": 2427.75, "word": " you", "probability": 0.96240234375}, {"start": 2427.75, "end": 2428.15, "word": " cannot", "probability": 0.88623046875}, {"start": 2428.15, "end": 2429.35, "word": " do", "probability": 0.90283203125}, {"start": 2429.35, "end": 2429.55, "word": " any", "probability": 0.8408203125}, {"start": 2429.55, "end": 2429.97, "word": " problems", "probability": 0.8232421875}, {"start": 2429.97, "end": 2430.33, "word": " here", "probability": 0.8427734375}, {"start": 2430.33, "end": 2431.05, "word": " because", "probability": 0.332275390625}, {"start": 2431.05, "end": 2431.23, "word": " here", "probability": 0.7138671875}, {"start": 2431.23, "end": 2431.41, "word": " will", "probability": 0.288818359375}, {"start": 2431.41, "end": 2431.71, "word": " depend", "probability": 0.94775390625}, {"start": 2431.71, "end": 2432.21, "word": " actually", "probability": 0.861328125}, {"start": 2432.21, "end": 2432.65, "word": " how", "probability": 0.62353515625}, {"start": 2432.65, "end": 2432.85, "word": " can", "probability": 0.93701171875}, {"start": 2432.85, "end": 2432.99, "word": " we", "probability": 0.94140625}, {"start": 2432.99, "end": 2433.39, "word": " find", "probability": 0.89306640625}, {"start": 2433.39, "end": 2434.05, "word": " the", "probability": 0.8818359375}, {"start": 2434.05, "end": 2434.55, "word": " probabilities", "probability": 0.88427734375}, {"start": 2434.55, "end": 2435.17, "word": " underneath", "probability": 0.94091796875}, {"start": 2435.17, "end": 2435.43, "word": " the", "probability": 0.78173828125}, {"start": 2435.43, "end": 2435.69, "word": " normal", "probability": 0.90283203125}, {"start": 2435.69, "end": 2436.05, "word": " table.", "probability": 0.83740234375}, {"start": 2436.53, "end": 2436.87, "word": " But", "probability": 0.95654296875}, {"start": 2436.87, "end": 2437.19, "word": " instead", "probability": 0.822265625}, {"start": 2437.19, "end": 2437.41, "word": " of", "probability": 0.96826171875}, {"start": 2437.41, "end": 2437.77, "word": " X,", "probability": 0.60888671875}, {"start": 2438.25, "end": 2438.55, "word": " in", "probability": 0.9287109375}, {"start": 2438.55, "end": 2438.73, "word": " this", "probability": 0.943359375}, {"start": 2438.73, "end": 2438.97, "word": " case,", "probability": 0.90673828125}, {"start": 2439.05, "end": 2439.15, "word": " we", "probability": 0.96435546875}, {"start": 2439.15, "end": 2439.51, "word": " have", "probability": 0.9453125}, {"start": 2439.51, "end": 2440.17, "word": " the", "probability": 0.90673828125}, {"start": 2440.17, "end": 2440.43, "word": " sample", "probability": 0.66650390625}, {"start": 2440.43, "end": 2440.67, "word": " mean", "probability": 0.63818359375}, {"start": 2440.67, "end": 2441.01, "word": " X", "probability": 0.72802734375}, {"start": 2441.01, "end": 2441.33, "word": " bar.", "probability": 0.89013671875}, {"start": 2441.81, "end": 2442.07, "word": " So", "probability": 0.9287109375}, {"start": 2442.07, "end": 2442.33, "word": " that's", "probability": 0.91748046875}, {"start": 2442.33, "end": 2442.45, "word": " the", "probability": 0.923828125}, {"start": 2442.45, "end": 2442.99, "word": " difference", "probability": 0.85888671875}, {"start": 2442.99, "end": 2443.63, "word": " between", "probability": 0.8759765625}, {"start": 2443.63, "end": 2444.89, "word": " chapter", "probability": 0.7216796875}, {"start": 2444.89, "end": 2445.37, "word": " six", "probability": 0.8857421875}, {"start": 2445.37, "end": 2445.95, "word": " and", "probability": 0.92626953125}, {"start": 2445.95, "end": 2446.07, "word": " the", "probability": 0.9140625}, {"start": 2446.07, "end": 2446.29, "word": " next", "probability": 0.94384765625}, {"start": 2446.29, "end": 2446.65, "word": " chapter.", "probability": 0.8681640625}, {"start": 2447.01, "end": 2447.25, "word": " So", "probability": 0.9580078125}, {"start": 2447.25, "end": 2447.53, "word": " here,", "probability": 0.837890625}], "temperature": 1.0}, {"id": 90, "seek": 247402, "start": 2448.58, "end": 2474.02, "text": " In chapter six, we are talking about the sampling distribution of the sample mean. Also, there is something new, which is called sample proportion. Now, let's see the difference between these two. If you remember, at the first class, we said there are two types of data. One is called numerical, which is quantitative.", "tokens": [682, 7187, 2309, 11, 321, 366, 1417, 466, 264, 21179, 7316, 295, 264, 6889, 914, 13, 2743, 11, 456, 307, 746, 777, 11, 597, 307, 1219, 6889, 16068, 13, 823, 11, 718, 311, 536, 264, 2649, 1296, 613, 732, 13, 759, 291, 1604, 11, 412, 264, 700, 1508, 11, 321, 848, 456, 366, 732, 3467, 295, 1412, 13, 1485, 307, 1219, 29054, 11, 597, 307, 27778, 13], "avg_logprob": -0.16624540791792028, "compression_ratio": 1.6030150753768844, "no_speech_prob": 0.0, "words": [{"start": 2448.58, "end": 2448.86, "word": " In", "probability": 0.80517578125}, {"start": 2448.86, "end": 2449.08, "word": " chapter", "probability": 0.63232421875}, {"start": 2449.08, "end": 2449.44, "word": " six,", "probability": 0.68359375}, {"start": 2449.52, "end": 2449.58, "word": " we", "probability": 0.9208984375}, {"start": 2449.58, "end": 2449.7, "word": " are", "probability": 0.86279296875}, {"start": 2449.7, "end": 2450.04, "word": " talking", "probability": 0.84912109375}, {"start": 2450.04, "end": 2450.46, "word": " about", "probability": 0.90185546875}, {"start": 2450.46, "end": 2451.38, "word": " the", "probability": 0.88720703125}, {"start": 2451.38, "end": 2451.9, "word": " sampling", "probability": 0.953125}, {"start": 2451.9, "end": 2452.54, "word": " distribution", "probability": 0.86279296875}, {"start": 2452.54, "end": 2453.06, "word": " of", "probability": 0.96923828125}, {"start": 2453.06, "end": 2453.24, "word": " the", "probability": 0.92041015625}, {"start": 2453.24, "end": 2453.52, "word": " sample", "probability": 0.89208984375}, {"start": 2453.52, "end": 2453.78, "word": " mean.", "probability": 0.755859375}, {"start": 2455.2, "end": 2455.78, "word": " Also,", "probability": 0.95361328125}, {"start": 2455.94, "end": 2456.02, "word": " there", "probability": 0.90869140625}, {"start": 2456.02, "end": 2456.22, "word": " is", "probability": 0.9462890625}, {"start": 2456.22, "end": 2456.64, "word": " something", "probability": 0.865234375}, {"start": 2456.64, "end": 2457.04, "word": " new,", "probability": 0.87158203125}, {"start": 2457.42, "end": 2457.64, "word": " which", "probability": 0.947265625}, {"start": 2457.64, "end": 2457.78, "word": " is", "probability": 0.9482421875}, {"start": 2457.78, "end": 2458.18, "word": " called", "probability": 0.90087890625}, {"start": 2458.18, "end": 2459.54, "word": " sample", "probability": 0.67333984375}, {"start": 2459.54, "end": 2460.12, "word": " proportion.", "probability": 0.7998046875}, {"start": 2460.68, "end": 2460.82, "word": " Now,", "probability": 0.939453125}, {"start": 2460.86, "end": 2461.04, "word": " let's", "probability": 0.963134765625}, {"start": 2461.04, "end": 2461.18, "word": " see", "probability": 0.92041015625}, {"start": 2461.18, "end": 2461.32, "word": " the", "probability": 0.91845703125}, {"start": 2461.32, "end": 2461.68, "word": " difference", "probability": 0.8662109375}, {"start": 2461.68, "end": 2461.98, "word": " between", "probability": 0.87255859375}, {"start": 2461.98, "end": 2462.28, "word": " these", "probability": 0.85009765625}, {"start": 2462.28, "end": 2462.46, "word": " two.", "probability": 0.93310546875}, {"start": 2463.98, "end": 2464.56, "word": " If", "probability": 0.85498046875}, {"start": 2464.56, "end": 2464.68, "word": " you", "probability": 0.9609375}, {"start": 2464.68, "end": 2465.0, "word": " remember,", "probability": 0.87744140625}, {"start": 2465.62, "end": 2465.82, "word": " at", "probability": 0.78564453125}, {"start": 2465.82, "end": 2465.96, "word": " the", "probability": 0.91357421875}, {"start": 2465.96, "end": 2466.3, "word": " first", "probability": 0.88720703125}, {"start": 2466.3, "end": 2466.84, "word": " class,", "probability": 0.96044921875}, {"start": 2466.86, "end": 2467.0, "word": " we", "probability": 0.9580078125}, {"start": 2467.0, "end": 2467.34, "word": " said", "probability": 0.9501953125}, {"start": 2467.34, "end": 2468.96, "word": " there", "probability": 0.74853515625}, {"start": 2468.96, "end": 2469.12, "word": " are", "probability": 0.9423828125}, {"start": 2469.12, "end": 2469.3, "word": " two", "probability": 0.93603515625}, {"start": 2469.3, "end": 2469.7, "word": " types", "probability": 0.82080078125}, {"start": 2469.7, "end": 2469.86, "word": " of", "probability": 0.96875}, {"start": 2469.86, "end": 2470.14, "word": " data.", "probability": 0.9345703125}, {"start": 2471.8, "end": 2472.38, "word": " One", "probability": 0.93310546875}, {"start": 2472.38, "end": 2472.54, "word": " is", "probability": 0.93896484375}, {"start": 2472.54, "end": 2472.76, "word": " called", "probability": 0.8857421875}, {"start": 2472.76, "end": 2473.16, "word": " numerical,", "probability": 0.427001953125}, {"start": 2473.38, "end": 2473.48, "word": " which", "probability": 0.927734375}, {"start": 2473.48, "end": 2473.58, "word": " is", "probability": 0.94189453125}, {"start": 2473.58, "end": 2474.02, "word": " quantitative.", "probability": 0.9375}], "temperature": 1.0}, {"id": 91, "seek": 250539, "start": 2476.79, "end": 2505.39, "text": " And the other one is qualitative. So we have numerical data, which means quantitative data, and qualitative data. Now, for numerical data, we can use the sample mean as a measure of central tendency, x bar. But for qualitative data, for example, if we have gender,", "tokens": [400, 264, 661, 472, 307, 31312, 13, 407, 321, 362, 29054, 1412, 11, 597, 1355, 27778, 1412, 11, 293, 31312, 1412, 13, 823, 11, 337, 29054, 1412, 11, 321, 393, 764, 264, 6889, 914, 382, 257, 3481, 295, 5777, 18187, 11, 2031, 2159, 13, 583, 337, 31312, 1412, 11, 337, 1365, 11, 498, 321, 362, 7898, 11], "avg_logprob": -0.17295258543614683, "compression_ratio": 1.6772151898734178, "no_speech_prob": 0.0, "words": [{"start": 2476.79, "end": 2477.11, "word": " And", "probability": 0.7861328125}, {"start": 2477.11, "end": 2477.21, "word": " the", "probability": 0.63720703125}, {"start": 2477.21, "end": 2477.37, "word": " other", "probability": 0.89306640625}, {"start": 2477.37, "end": 2477.57, "word": " one", "probability": 0.9111328125}, {"start": 2477.57, "end": 2477.71, "word": " is", "probability": 0.9384765625}, {"start": 2477.71, "end": 2478.09, "word": " qualitative.", "probability": 0.85400390625}, {"start": 2481.87, "end": 2482.55, "word": " So", "probability": 0.92236328125}, {"start": 2482.55, "end": 2482.73, "word": " we", "probability": 0.7646484375}, {"start": 2482.73, "end": 2482.85, "word": " have", "probability": 0.94580078125}, {"start": 2482.85, "end": 2483.25, "word": " numerical", "probability": 0.82666015625}, {"start": 2483.25, "end": 2483.75, "word": " data,", "probability": 0.95263671875}, {"start": 2484.07, "end": 2484.25, "word": " which", "probability": 0.95703125}, {"start": 2484.25, "end": 2484.53, "word": " means", "probability": 0.7841796875}, {"start": 2484.53, "end": 2485.21, "word": " quantitative", "probability": 0.91943359375}, {"start": 2485.21, "end": 2485.73, "word": " data,", "probability": 0.93115234375}, {"start": 2486.63, "end": 2489.13, "word": " and", "probability": 0.57958984375}, {"start": 2489.13, "end": 2489.61, "word": " qualitative", "probability": 0.93603515625}, {"start": 2489.61, "end": 2490.05, "word": " data.", "probability": 0.92138671875}, {"start": 2491.11, "end": 2491.51, "word": " Now,", "probability": 0.9482421875}, {"start": 2491.65, "end": 2491.89, "word": " for", "probability": 0.95263671875}, {"start": 2491.89, "end": 2492.29, "word": " numerical", "probability": 0.630859375}, {"start": 2492.29, "end": 2492.79, "word": " data,", "probability": 0.94140625}, {"start": 2492.87, "end": 2492.97, "word": " we", "probability": 0.9619140625}, {"start": 2492.97, "end": 2493.21, "word": " can", "probability": 0.94677734375}, {"start": 2493.21, "end": 2493.55, "word": " use", "probability": 0.89013671875}, {"start": 2493.55, "end": 2493.77, "word": " the", "probability": 0.80615234375}, {"start": 2493.77, "end": 2493.99, "word": " sample", "probability": 0.66650390625}, {"start": 2493.99, "end": 2494.77, "word": " mean", "probability": 0.96533203125}, {"start": 2494.77, "end": 2495.11, "word": " as", "probability": 0.919921875}, {"start": 2495.11, "end": 2495.23, "word": " a", "probability": 0.98583984375}, {"start": 2495.23, "end": 2495.49, "word": " measure", "probability": 0.8955078125}, {"start": 2495.49, "end": 2497.55, "word": " of", "probability": 0.9619140625}, {"start": 2497.55, "end": 2498.47, "word": " central", "probability": 0.712890625}, {"start": 2498.47, "end": 2499.05, "word": " tendency,", "probability": 0.931640625}, {"start": 2499.99, "end": 2500.13, "word": " x", "probability": 0.5009765625}, {"start": 2500.13, "end": 2500.39, "word": " bar.", "probability": 0.8271484375}, {"start": 2501.23, "end": 2501.65, "word": " But", "probability": 0.94482421875}, {"start": 2501.65, "end": 2501.83, "word": " for", "probability": 0.9345703125}, {"start": 2501.83, "end": 2502.25, "word": " qualitative", "probability": 0.84033203125}, {"start": 2502.25, "end": 2502.73, "word": " data,", "probability": 0.93310546875}, {"start": 2502.77, "end": 2502.89, "word": " for", "probability": 0.947265625}, {"start": 2502.89, "end": 2503.21, "word": " example,", "probability": 0.97509765625}, {"start": 2503.29, "end": 2503.43, "word": " if", "probability": 0.94921875}, {"start": 2503.43, "end": 2503.57, "word": " we", "probability": 0.9541015625}, {"start": 2503.57, "end": 2503.93, "word": " have", "probability": 0.943359375}, {"start": 2503.93, "end": 2505.39, "word": " gender,", "probability": 0.83837890625}], "temperature": 1.0}, {"id": 92, "seek": 252426, "start": 2507.3, "end": 2524.26, "text": " Gender either males or females. In this case, we cannot say that the mean of females in this school is 1.2, for example. It doesn't make any sense. But it's better to say that", "tokens": [48039, 2139, 20776, 420, 21529, 13, 682, 341, 1389, 11, 321, 2644, 584, 300, 264, 914, 295, 21529, 294, 341, 1395, 307, 502, 13, 17, 11, 337, 1365, 13, 467, 1177, 380, 652, 604, 2020, 13, 583, 309, 311, 1101, 281, 584, 300], "avg_logprob": -0.12757457826625218, "compression_ratio": 1.375, "no_speech_prob": 0.0, "words": [{"start": 2507.3, "end": 2507.68, "word": " Gender", "probability": 0.79833984375}, {"start": 2507.68, "end": 2507.92, "word": " either", "probability": 0.54443359375}, {"start": 2507.92, "end": 2508.8, "word": " males", "probability": 0.87646484375}, {"start": 2508.8, "end": 2509.08, "word": " or", "probability": 0.96240234375}, {"start": 2509.08, "end": 2509.54, "word": " females.", "probability": 0.947265625}, {"start": 2510.58, "end": 2510.78, "word": " In", "probability": 0.9521484375}, {"start": 2510.78, "end": 2510.98, "word": " this", "probability": 0.94775390625}, {"start": 2510.98, "end": 2511.2, "word": " case,", "probability": 0.9130859375}, {"start": 2511.34, "end": 2511.98, "word": " we", "probability": 0.96484375}, {"start": 2511.98, "end": 2512.36, "word": " cannot", "probability": 0.84228515625}, {"start": 2512.36, "end": 2512.84, "word": " say", "probability": 0.9248046875}, {"start": 2512.84, "end": 2513.3, "word": " that", "probability": 0.92822265625}, {"start": 2513.3, "end": 2514.24, "word": " the", "probability": 0.8701171875}, {"start": 2514.24, "end": 2514.56, "word": " mean", "probability": 0.9169921875}, {"start": 2514.56, "end": 2515.12, "word": " of", "probability": 0.970703125}, {"start": 2515.12, "end": 2516.0, "word": " females", "probability": 0.9404296875}, {"start": 2516.0, "end": 2516.26, "word": " in", "probability": 0.9345703125}, {"start": 2516.26, "end": 2516.46, "word": " this", "probability": 0.94677734375}, {"start": 2516.46, "end": 2516.86, "word": " school", "probability": 0.96337890625}, {"start": 2516.86, "end": 2517.16, "word": " is", "probability": 0.9501953125}, {"start": 2517.16, "end": 2519.04, "word": " 1", "probability": 0.92431640625}, {"start": 2519.04, "end": 2519.6, "word": ".2,", "probability": 0.994873046875}, {"start": 2519.68, "end": 2519.84, "word": " for", "probability": 0.94921875}, {"start": 2519.84, "end": 2520.18, "word": " example.", "probability": 0.97412109375}, {"start": 2520.72, "end": 2521.34, "word": " It", "probability": 0.966796875}, {"start": 2521.34, "end": 2521.54, "word": " doesn't", "probability": 0.95458984375}, {"start": 2521.54, "end": 2521.7, "word": " make", "probability": 0.94287109375}, {"start": 2521.7, "end": 2521.88, "word": " any", "probability": 0.91259765625}, {"start": 2521.88, "end": 2522.2, "word": " sense.", "probability": 0.822265625}, {"start": 2522.94, "end": 2523.2, "word": " But", "probability": 0.9501953125}, {"start": 2523.2, "end": 2523.38, "word": " it's", "probability": 0.937255859375}, {"start": 2523.38, "end": 2523.6, "word": " better", "probability": 0.92626953125}, {"start": 2523.6, "end": 2523.8, "word": " to", "probability": 0.96875}, {"start": 2523.8, "end": 2523.98, "word": " say", "probability": 0.94580078125}, {"start": 2523.98, "end": 2524.26, "word": " that", "probability": 0.92724609375}], "temperature": 1.0}, {"id": 93, "seek": 254482, "start": 2525.86, "end": 2544.82, "text": " IUG has, for example, 70% females. Makes sense. So this one is called percentage or proportion. So here we will talk about the sample proportion. So sample mean is used for numerical data.", "tokens": [44218, 38, 575, 11, 337, 1365, 11, 5285, 4, 21529, 13, 25245, 2020, 13, 407, 341, 472, 307, 1219, 9668, 420, 16068, 13, 407, 510, 321, 486, 751, 466, 264, 6889, 16068, 13, 407, 6889, 914, 307, 1143, 337, 29054, 1412, 13], "avg_logprob": -0.21438953211141187, "compression_ratio": 1.35, "no_speech_prob": 0.0, "words": [{"start": 2525.8599999999997, "end": 2526.66, "word": " IUG", "probability": 0.6337890625}, {"start": 2526.66, "end": 2527.46, "word": " has,", "probability": 0.91064453125}, {"start": 2527.7, "end": 2527.8, "word": " for", "probability": 0.953125}, {"start": 2527.8, "end": 2528.28, "word": " example,", "probability": 0.9736328125}, {"start": 2528.68, "end": 2529.3, "word": " 70", "probability": 0.91650390625}, {"start": 2529.3, "end": 2529.7, "word": "%", "probability": 0.82666015625}, {"start": 2529.7, "end": 2530.5, "word": " females.", "probability": 0.9677734375}, {"start": 2531.28, "end": 2531.52, "word": " Makes", "probability": 0.53173828125}, {"start": 2531.52, "end": 2531.74, "word": " sense.", "probability": 0.82177734375}, {"start": 2531.96, "end": 2531.96, "word": " So", "probability": 0.74267578125}, {"start": 2531.96, "end": 2532.48, "word": " this", "probability": 0.8349609375}, {"start": 2532.48, "end": 2532.8, "word": " one", "probability": 0.9326171875}, {"start": 2532.8, "end": 2532.98, "word": " is", "probability": 0.92578125}, {"start": 2532.98, "end": 2533.22, "word": " called", "probability": 0.8740234375}, {"start": 2533.22, "end": 2534.0, "word": " percentage", "probability": 0.8994140625}, {"start": 2534.0, "end": 2534.58, "word": " or", "probability": 0.60009765625}, {"start": 2534.58, "end": 2535.0, "word": " proportion.", "probability": 0.8046875}, {"start": 2535.64, "end": 2535.78, "word": " So", "probability": 0.9482421875}, {"start": 2535.78, "end": 2536.02, "word": " here", "probability": 0.83837890625}, {"start": 2536.02, "end": 2536.16, "word": " we", "probability": 0.74609375}, {"start": 2536.16, "end": 2536.28, "word": " will", "probability": 0.57080078125}, {"start": 2536.28, "end": 2536.5, "word": " talk", "probability": 0.8935546875}, {"start": 2536.5, "end": 2536.98, "word": " about", "probability": 0.90380859375}, {"start": 2536.98, "end": 2537.88, "word": " the", "probability": 0.9208984375}, {"start": 2537.88, "end": 2538.48, "word": " sample", "probability": 0.84423828125}, {"start": 2538.48, "end": 2538.94, "word": " proportion.", "probability": 0.80810546875}, {"start": 2540.1, "end": 2540.42, "word": " So", "probability": 0.943359375}, {"start": 2540.42, "end": 2541.34, "word": " sample", "probability": 0.69580078125}, {"start": 2541.34, "end": 2541.72, "word": " mean", "probability": 0.90380859375}, {"start": 2541.72, "end": 2543.12, "word": " is", "probability": 0.9326171875}, {"start": 2543.12, "end": 2543.48, "word": " used", "probability": 0.916015625}, {"start": 2543.48, "end": 2543.88, "word": " for", "probability": 0.9541015625}, {"start": 2543.88, "end": 2544.28, "word": " numerical", "probability": 0.94580078125}, {"start": 2544.28, "end": 2544.82, "word": " data.", "probability": 0.93310546875}], "temperature": 1.0}, {"id": 94, "seek": 257321, "start": 2546.22, "end": 2573.22, "text": " But on the other hand, the sample proportion is used for qualitative data. In this chapter, we are going to know how can we compute the probabilities related either to the sample mean or to the sample proportion. The last one, the last objective for this chapter will be the importance of using something called central limit theorem.", "tokens": [583, 322, 264, 661, 1011, 11, 264, 6889, 16068, 307, 1143, 337, 31312, 1412, 13, 682, 341, 7187, 11, 321, 366, 516, 281, 458, 577, 393, 321, 14722, 264, 33783, 4077, 2139, 281, 264, 6889, 914, 420, 281, 264, 6889, 16068, 13, 440, 1036, 472, 11, 264, 1036, 10024, 337, 341, 7187, 486, 312, 264, 7379, 295, 1228, 746, 1219, 5777, 4948, 20904, 13], "avg_logprob": -0.1293269230769231, "compression_ratio": 1.6834170854271358, "no_speech_prob": 0.0, "words": [{"start": 2546.22, "end": 2546.58, "word": " But", "probability": 0.7939453125}, {"start": 2546.58, "end": 2546.78, "word": " on", "probability": 0.84375}, {"start": 2546.78, "end": 2546.9, "word": " the", "probability": 0.92333984375}, {"start": 2546.9, "end": 2547.12, "word": " other", "probability": 0.89013671875}, {"start": 2547.12, "end": 2547.7, "word": " hand,", "probability": 0.9228515625}, {"start": 2548.48, "end": 2548.64, "word": " the", "probability": 0.89501953125}, {"start": 2548.64, "end": 2549.16, "word": " sample", "probability": 0.83984375}, {"start": 2549.16, "end": 2549.68, "word": " proportion", "probability": 0.8291015625}, {"start": 2549.68, "end": 2550.02, "word": " is", "probability": 0.94189453125}, {"start": 2550.02, "end": 2550.3, "word": " used", "probability": 0.904296875}, {"start": 2550.3, "end": 2550.56, "word": " for", "probability": 0.95166015625}, {"start": 2550.56, "end": 2551.02, "word": " qualitative", "probability": 0.9267578125}, {"start": 2551.02, "end": 2551.52, "word": " data.", "probability": 0.9267578125}, {"start": 2552.04, "end": 2552.12, "word": " In", "probability": 0.88330078125}, {"start": 2552.12, "end": 2552.72, "word": " this", "probability": 0.94580078125}, {"start": 2552.72, "end": 2553.06, "word": " chapter,", "probability": 0.755859375}, {"start": 2553.28, "end": 2553.48, "word": " we", "probability": 0.95751953125}, {"start": 2553.48, "end": 2553.6, "word": " are", "probability": 0.90234375}, {"start": 2553.6, "end": 2553.84, "word": " going", "probability": 0.94677734375}, {"start": 2553.84, "end": 2554.08, "word": " to", "probability": 0.970703125}, {"start": 2554.08, "end": 2554.3, "word": " know", "probability": 0.873046875}, {"start": 2554.3, "end": 2554.44, "word": " how", "probability": 0.8720703125}, {"start": 2554.44, "end": 2554.62, "word": " can", "probability": 0.74853515625}, {"start": 2554.62, "end": 2554.76, "word": " we", "probability": 0.953125}, {"start": 2554.76, "end": 2555.24, "word": " compute", "probability": 0.88232421875}, {"start": 2555.24, "end": 2556.36, "word": " the", "probability": 0.8984375}, {"start": 2556.36, "end": 2557.04, "word": " probabilities", "probability": 0.87890625}, {"start": 2557.04, "end": 2557.98, "word": " related", "probability": 0.93994140625}, {"start": 2557.98, "end": 2558.44, "word": " either", "probability": 0.90966796875}, {"start": 2558.44, "end": 2559.2, "word": " to", "probability": 0.9638671875}, {"start": 2559.2, "end": 2559.36, "word": " the", "probability": 0.92138671875}, {"start": 2559.36, "end": 2559.6, "word": " sample", "probability": 0.876953125}, {"start": 2559.6, "end": 2559.88, "word": " mean", "probability": 0.9482421875}, {"start": 2559.88, "end": 2560.12, "word": " or", "probability": 0.9130859375}, {"start": 2560.12, "end": 2560.26, "word": " to", "probability": 0.9150390625}, {"start": 2560.26, "end": 2560.42, "word": " the", "probability": 0.9111328125}, {"start": 2560.42, "end": 2560.66, "word": " sample", "probability": 0.84326171875}, {"start": 2560.66, "end": 2561.22, "word": " proportion.", "probability": 0.79345703125}, {"start": 2562.38, "end": 2562.88, "word": " The", "probability": 0.8818359375}, {"start": 2562.88, "end": 2563.68, "word": " last", "probability": 0.88037109375}, {"start": 2563.68, "end": 2564.06, "word": " one,", "probability": 0.58740234375}, {"start": 2564.28, "end": 2564.38, "word": " the", "probability": 0.9052734375}, {"start": 2564.38, "end": 2564.56, "word": " last", "probability": 0.85302734375}, {"start": 2564.56, "end": 2564.94, "word": " objective", "probability": 0.94482421875}, {"start": 2564.94, "end": 2565.36, "word": " for", "probability": 0.87744140625}, {"start": 2565.36, "end": 2565.76, "word": " this", "probability": 0.94091796875}, {"start": 2565.76, "end": 2566.26, "word": " chapter", "probability": 0.87451171875}, {"start": 2566.26, "end": 2566.5, "word": " will", "probability": 0.74560546875}, {"start": 2566.5, "end": 2567.38, "word": " be", "probability": 0.95458984375}, {"start": 2567.38, "end": 2568.92, "word": " the", "probability": 0.81201171875}, {"start": 2568.92, "end": 2569.7, "word": " importance", "probability": 0.96728515625}, {"start": 2569.7, "end": 2570.24, "word": " of", "probability": 0.96728515625}, {"start": 2570.24, "end": 2570.72, "word": " using", "probability": 0.93505859375}, {"start": 2570.72, "end": 2571.54, "word": " something", "probability": 0.87158203125}, {"start": 2571.54, "end": 2572.06, "word": " called", "probability": 0.89013671875}, {"start": 2572.06, "end": 2572.58, "word": " central", "probability": 0.5078125}, {"start": 2572.58, "end": 2572.84, "word": " limit", "probability": 0.9248046875}, {"start": 2572.84, "end": 2573.22, "word": " theorem.", "probability": 0.802734375}], "temperature": 1.0}, {"id": 95, "seek": 259530, "start": 2574.38, "end": 2595.3, "text": " Under specific conditions, we'll know how to use this theorem. So these are the mainly four objectives for this chapter. So you have to make sure that you understand 100% how to compute all types of probabilities under the normal curve.", "tokens": [6974, 2685, 4487, 11, 321, 603, 458, 577, 281, 764, 341, 20904, 13, 407, 613, 366, 264, 8704, 1451, 15961, 337, 341, 7187, 13, 407, 291, 362, 281, 652, 988, 300, 291, 1223, 2319, 4, 577, 281, 14722, 439, 3467, 295, 33783, 833, 264, 2710, 7605, 13], "avg_logprob": -0.14786783140152693, "compression_ratio": 1.427710843373494, "no_speech_prob": 0.0, "words": [{"start": 2574.38, "end": 2574.82, "word": " Under", "probability": 0.6220703125}, {"start": 2574.82, "end": 2575.48, "word": " specific", "probability": 0.888671875}, {"start": 2575.48, "end": 2576.12, "word": " conditions,", "probability": 0.88427734375}, {"start": 2576.28, "end": 2576.44, "word": " we'll", "probability": 0.6036376953125}, {"start": 2576.44, "end": 2576.56, "word": " know", "probability": 0.88037109375}, {"start": 2576.56, "end": 2576.74, "word": " how", "probability": 0.92822265625}, {"start": 2576.74, "end": 2576.88, "word": " to", "probability": 0.9697265625}, {"start": 2576.88, "end": 2577.22, "word": " use", "probability": 0.87451171875}, {"start": 2577.22, "end": 2578.36, "word": " this", "probability": 0.9326171875}, {"start": 2578.36, "end": 2579.0, "word": " theorem.", "probability": 0.65625}, {"start": 2579.34, "end": 2579.52, "word": " So", "probability": 0.890625}, {"start": 2579.52, "end": 2579.74, "word": " these", "probability": 0.7802734375}, {"start": 2579.74, "end": 2579.98, "word": " are", "probability": 0.94482421875}, {"start": 2579.98, "end": 2580.22, "word": " the", "probability": 0.654296875}, {"start": 2580.22, "end": 2581.4, "word": " mainly", "probability": 0.783203125}, {"start": 2581.4, "end": 2581.78, "word": " four", "probability": 0.880859375}, {"start": 2581.78, "end": 2582.48, "word": " objectives", "probability": 0.884765625}, {"start": 2582.48, "end": 2583.24, "word": " for", "probability": 0.943359375}, {"start": 2583.24, "end": 2583.5, "word": " this", "probability": 0.92529296875}, {"start": 2583.5, "end": 2583.94, "word": " chapter.", "probability": 0.87158203125}, {"start": 2584.28, "end": 2584.4, "word": " So", "probability": 0.93603515625}, {"start": 2584.4, "end": 2584.48, "word": " you", "probability": 0.80029296875}, {"start": 2584.48, "end": 2584.62, "word": " have", "probability": 0.9462890625}, {"start": 2584.62, "end": 2585.16, "word": " to", "probability": 0.97021484375}, {"start": 2585.16, "end": 2585.34, "word": " make", "probability": 0.939453125}, {"start": 2585.34, "end": 2585.5, "word": " sure", "probability": 0.9111328125}, {"start": 2585.5, "end": 2585.8, "word": " that", "probability": 0.93505859375}, {"start": 2585.8, "end": 2585.98, "word": " you", "probability": 0.9638671875}, {"start": 2585.98, "end": 2586.86, "word": " understand", "probability": 0.82470703125}, {"start": 2586.86, "end": 2588.3, "word": " 100", "probability": 0.8349609375}, {"start": 2588.3, "end": 2588.96, "word": "%", "probability": 0.76318359375}, {"start": 2588.96, "end": 2590.3, "word": " how", "probability": 0.935546875}, {"start": 2590.3, "end": 2590.5, "word": " to", "probability": 0.97265625}, {"start": 2590.5, "end": 2590.9, "word": " compute", "probability": 0.9140625}, {"start": 2590.9, "end": 2592.68, "word": " all", "probability": 0.912109375}, {"start": 2592.68, "end": 2593.24, "word": " types", "probability": 0.82568359375}, {"start": 2593.24, "end": 2593.46, "word": " of", "probability": 0.96826171875}, {"start": 2593.46, "end": 2593.96, "word": " probabilities", "probability": 0.9560546875}, {"start": 2593.96, "end": 2594.5, "word": " under", "probability": 0.92236328125}, {"start": 2594.5, "end": 2594.7, "word": " the", "probability": 0.90185546875}, {"start": 2594.7, "end": 2594.92, "word": " normal", "probability": 0.85888671875}, {"start": 2594.92, "end": 2595.3, "word": " curve.", "probability": 0.92529296875}], "temperature": 1.0}, {"id": 96, "seek": 261798, "start": 2595.95, "end": 2617.99, "text": " either straightforward calculations or backward calculations, I mean inverse calculation. If the probability is given, how can we find the corresponding X value? Or how can we find the value by itself, the value of the probability by itself? So next time, Insha'Allah, Sunday, we are going to start", "tokens": [2139, 15325, 20448, 420, 23897, 20448, 11, 286, 914, 17340, 17108, 13, 759, 264, 8482, 307, 2212, 11, 577, 393, 321, 915, 264, 11760, 1783, 2158, 30, 1610, 577, 393, 321, 915, 264, 2158, 538, 2564, 11, 264, 2158, 295, 264, 8482, 538, 2564, 30, 407, 958, 565, 11, 9442, 1641, 6, 26022, 11, 7776, 11, 321, 366, 516, 281, 722], "avg_logprob": -0.21509576444664308, "compression_ratio": 1.6988636363636365, "no_speech_prob": 0.0, "words": [{"start": 2595.95, "end": 2596.43, "word": " either", "probability": 0.4375}, {"start": 2596.43, "end": 2597.55, "word": " straightforward", "probability": 0.81640625}, {"start": 2597.55, "end": 2598.61, "word": " calculations", "probability": 0.91259765625}, {"start": 2598.61, "end": 2599.09, "word": " or", "probability": 0.88525390625}, {"start": 2599.09, "end": 2599.51, "word": " backward", "probability": 0.5966796875}, {"start": 2599.51, "end": 2600.41, "word": " calculations,", "probability": 0.826171875}, {"start": 2600.55, "end": 2600.59, "word": " I", "probability": 0.970703125}, {"start": 2600.59, "end": 2600.69, "word": " mean", "probability": 0.96826171875}, {"start": 2600.69, "end": 2601.03, "word": " inverse", "probability": 0.66455078125}, {"start": 2601.03, "end": 2601.51, "word": " calculation.", "probability": 0.8759765625}, {"start": 2601.95, "end": 2602.41, "word": " If", "probability": 0.97216796875}, {"start": 2602.41, "end": 2602.53, "word": " the", "probability": 0.92431640625}, {"start": 2602.53, "end": 2602.83, "word": " probability", "probability": 0.9189453125}, {"start": 2602.83, "end": 2603.03, "word": " is", "probability": 0.927734375}, {"start": 2603.03, "end": 2603.25, "word": " given,", "probability": 0.91357421875}, {"start": 2603.33, "end": 2603.43, "word": " how", "probability": 0.9248046875}, {"start": 2603.43, "end": 2603.57, "word": " can", "probability": 0.94580078125}, {"start": 2603.57, "end": 2603.71, "word": " we", "probability": 0.93505859375}, {"start": 2603.71, "end": 2603.95, "word": " find", "probability": 0.90771484375}, {"start": 2603.95, "end": 2604.13, "word": " the", "probability": 0.783203125}, {"start": 2604.13, "end": 2604.61, "word": " corresponding", "probability": 0.5546875}, {"start": 2604.61, "end": 2604.93, "word": " X", "probability": 0.47021484375}, {"start": 2604.93, "end": 2605.27, "word": " value?", "probability": 0.81494140625}, {"start": 2605.79, "end": 2605.91, "word": " Or", "probability": 0.94140625}, {"start": 2605.91, "end": 2606.05, "word": " how", "probability": 0.9169921875}, {"start": 2606.05, "end": 2607.43, "word": " can", "probability": 0.9326171875}, {"start": 2607.43, "end": 2607.59, "word": " we", "probability": 0.95166015625}, {"start": 2607.59, "end": 2607.99, "word": " find", "probability": 0.87158203125}, {"start": 2607.99, "end": 2608.63, "word": " the", "probability": 0.92041015625}, {"start": 2608.63, "end": 2608.89, "word": " value", "probability": 0.97998046875}, {"start": 2608.89, "end": 2609.07, "word": " by", "probability": 0.767578125}, {"start": 2609.07, "end": 2609.45, "word": " itself,", "probability": 0.81298828125}, {"start": 2610.21, "end": 2610.41, "word": " the", "probability": 0.9208984375}, {"start": 2610.41, "end": 2610.67, "word": " value", "probability": 0.9755859375}, {"start": 2610.67, "end": 2610.81, "word": " of", "probability": 0.96923828125}, {"start": 2610.81, "end": 2610.93, "word": " the", "probability": 0.9169921875}, {"start": 2610.93, "end": 2611.23, "word": " probability", "probability": 0.9453125}, {"start": 2611.23, "end": 2611.47, "word": " by", "probability": 0.9580078125}, {"start": 2611.47, "end": 2611.75, "word": " itself?", "probability": 0.83837890625}, {"start": 2612.61, "end": 2612.99, "word": " So", "probability": 0.91845703125}, {"start": 2612.99, "end": 2613.33, "word": " next", "probability": 0.869140625}, {"start": 2613.33, "end": 2613.59, "word": " time,", "probability": 0.751953125}, {"start": 2613.73, "end": 2613.81, "word": " Insha", "probability": 0.700439453125}, {"start": 2613.81, "end": 2613.93, "word": "'Allah,", "probability": 0.6016845703125}, {"start": 2614.35, "end": 2614.99, "word": " Sunday,", "probability": 0.64111328125}, {"start": 2615.61, "end": 2615.79, "word": " we", "probability": 0.9609375}, {"start": 2615.79, "end": 2615.93, "word": " are", "probability": 0.89892578125}, {"start": 2615.93, "end": 2616.35, "word": " going", "probability": 0.94482421875}, {"start": 2616.35, "end": 2617.41, "word": " to", "probability": 0.96875}, {"start": 2617.41, "end": 2617.99, "word": " start", "probability": 0.8916015625}], "temperature": 1.0}, {"id": 97, "seek": 263379, "start": 2619.83, "end": 2633.79, "text": " Chapter seven, why sample? Why most of the time in our researches we are using the sample instead of using the population? Any questions? So that's all.", "tokens": [18874, 3407, 11, 983, 6889, 30, 1545, 881, 295, 264, 565, 294, 527, 2132, 279, 321, 366, 1228, 264, 6889, 2602, 295, 1228, 264, 4415, 30, 2639, 1651, 30, 407, 300, 311, 439, 13], "avg_logprob": -0.16517856972558156, "compression_ratio": 1.2857142857142858, "no_speech_prob": 0.0, "words": [{"start": 2619.83, "end": 2620.17, "word": " Chapter", "probability": 0.482421875}, {"start": 2620.17, "end": 2620.57, "word": " seven,", "probability": 0.69677734375}, {"start": 2621.43, "end": 2621.59, "word": " why", "probability": 0.7392578125}, {"start": 2621.59, "end": 2622.03, "word": " sample?", "probability": 0.8173828125}, {"start": 2622.55, "end": 2622.81, "word": " Why", "probability": 0.86767578125}, {"start": 2622.81, "end": 2623.11, "word": " most", "probability": 0.86376953125}, {"start": 2623.11, "end": 2623.25, "word": " of", "probability": 0.96875}, {"start": 2623.25, "end": 2623.35, "word": " the", "probability": 0.9228515625}, {"start": 2623.35, "end": 2623.71, "word": " time", "probability": 0.8916015625}, {"start": 2623.71, "end": 2624.49, "word": " in", "probability": 0.76025390625}, {"start": 2624.49, "end": 2624.73, "word": " our", "probability": 0.892578125}, {"start": 2624.73, "end": 2625.31, "word": " researches", "probability": 0.7373046875}, {"start": 2625.31, "end": 2625.49, "word": " we", "probability": 0.6162109375}, {"start": 2625.49, "end": 2625.73, "word": " are", "probability": 0.94580078125}, {"start": 2625.73, "end": 2626.15, "word": " using", "probability": 0.93798828125}, {"start": 2626.15, "end": 2626.97, "word": " the", "probability": 0.91015625}, {"start": 2626.97, "end": 2627.35, "word": " sample", "probability": 0.88916015625}, {"start": 2627.35, "end": 2627.87, "word": " instead", "probability": 0.873046875}, {"start": 2627.87, "end": 2628.31, "word": " of", "probability": 0.966796875}, {"start": 2628.31, "end": 2628.77, "word": " using", "probability": 0.93017578125}, {"start": 2628.77, "end": 2629.63, "word": " the", "probability": 0.91796875}, {"start": 2629.63, "end": 2630.13, "word": " population?", "probability": 0.96240234375}, {"start": 2630.91, "end": 2631.15, "word": " Any", "probability": 0.91259765625}, {"start": 2631.15, "end": 2631.59, "word": " questions?", "probability": 0.95556640625}, {"start": 2632.89, "end": 2633.25, "word": " So", "probability": 0.95361328125}, {"start": 2633.25, "end": 2633.61, "word": " that's", "probability": 0.931640625}, {"start": 2633.61, "end": 2633.79, "word": " all.", "probability": 0.728515625}], "temperature": 1.0}], "language": "en", "language_probability": 1.0, "duration": 2634.4895, "duration_after_vad": 2464.7768749999905} \ No newline at end of file diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/ge8DRGM5-04_raw.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/ge8DRGM5-04_raw.srt new file mode 100644 index 0000000000000000000000000000000000000000..8f33d171f75f8490bd857c905940543d2524bc76 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/ge8DRGM5-04_raw.srt @@ -0,0 +1,1748 @@ +1 +00:00:05,430 --> 00:00:08,470 +We are going to do some practice problems for + +2 +00:00:08,470 --> 00:00:16,890 +chapter 6. First problem will be 6.36 on page 226. + +3 +00:00:19,930 --> 00:00:22,510 +So open page 226. + +4 +00:00:26,040 --> 00:00:30,020 +Insha'Allah today we'll discuss some problems from + +5 +00:00:30,020 --> 00:00:35,200 +chapter 6 and one of these problems will be 6.36 + +6 +00:00:35,200 --> 00:00:46,420 +so let's read 36 it it's asked about suppose that + +7 +00:00:46,420 --> 00:00:51,600 +the download time is normally distributed with + +8 +00:00:51,600 --> 00:00:55,380 +mean and the mean is given and the standard + +9 +00:00:55,380 --> 00:00:58,940 +deviation is given in this case the mean equals 0 + +10 +00:00:58,940 --> 00:01:06,260 +.8 second so again the mean is 0.8 where sigma + +11 +00:01:06,260 --> 00:01:12,340 +equals 0.2 seconds so mu is 0.8 seconds that the + +12 +00:01:12,340 --> 00:01:16,560 +time required to download from the internet an + +13 +00:01:16,560 --> 00:01:20,020 +image file or something like that Now he asks + +14 +00:01:20,020 --> 00:01:22,560 +about what's the probability that a download time + +15 +00:01:22,560 --> 00:01:27,900 +is part A less + +16 +00:01:27,900 --> 00:01:31,300 +than one second. So what's the probability of + +17 +00:01:31,300 --> 00:01:39,360 +getting time less than one second? Part B, what's + +18 +00:01:39,360 --> 00:01:44,160 +the probability that a download time is between 0 + +19 +00:01:44,160 --> 00:01:46,520 +.5 and 1.5 seconds? + +20 +00:01:51,810 --> 00:02:02,510 +and 1.5 seconds but see above 0.5 seconds last + +21 +00:02:02,510 --> 00:02:07,230 +time i think we did some something + +22 +00:02:07,230 --> 00:02:10,550 +like that so for the first one b of x smaller than + +23 +00:02:10,550 --> 00:02:13,400 +one The first step, we have to compute the z + +24 +00:02:13,400 --> 00:02:19,080 +-score. And that's straightforward. Just z less + +25 +00:02:19,080 --> 00:02:28,720 +than 1 minus 0.8 divided by sigma. So 1 minus 0.8 + +26 +00:02:28,720 --> 00:02:33,480 +is 0.2. So B of z less than 1. + +27 +00:02:36,620 --> 00:02:41,150 +Now by using the normal table, we have The normal + +28 +00:02:41,150 --> 00:02:44,990 +table, I think we did many times for this table. + +29 +00:02:46,930 --> 00:02:48,110 +Now for 1. + +30 +00:02:52,290 --> 00:03:01,010 +So 1 is 0.8413. So it's 0.8413. This is the + +31 +00:03:01,010 --> 00:03:05,850 +probability that a download time is smaller than + +32 +00:03:05,850 --> 00:03:06,010 +1. + +33 +00:03:14,230 --> 00:03:19,690 +Now the probability is between two values, 0.5 and + +34 +00:03:19,690 --> 00:03:25,570 +1.5. So in this case, we have to find the z values + +35 +00:03:25,570 --> 00:03:32,170 +for the corresponding x, 0.5 and 1.5. So in this + +36 +00:03:32,170 --> 00:03:39,080 +case, this one equals. 0.5 minus 0.8 divided by + +37 +00:03:39,080 --> 00:03:47,420 +sigma smaller than x is transformed to this form. + +38 +00:03:52,920 --> 00:03:57,480 +Smaller than 1.5 minus 0.8 divided by 0.2. + +39 +00:04:03,500 --> 00:04:10,320 +Exactly, minus 1.5. Smaller than z, smaller than 1 + +40 +00:04:10,320 --> 00:04:15,640 +.5 minus 0.8 is 0.7, divided by 0.2 is 3.4. + +41 +00:04:19,320 --> 00:04:23,640 +So, we are looking now for the probability of z + +42 +00:04:23,640 --> 00:04:28,740 +-score between minus 0.5 and smaller than 0.5. + +43 +00:04:29,460 --> 00:04:32,780 +Now, if we are looking for this kind of + +44 +00:04:32,780 --> 00:04:35,760 +probability, we have to find the probability of z + +45 +00:04:35,760 --> 00:04:43,640 +smaller than 3.5 minus z smaller than negative 1 + +46 +00:04:43,640 --> 00:04:51,720 +.5. Now, if we go back to the table we have. Now 3 + +47 +00:04:51,720 --> 00:04:56,640 +.5 all the way down up to the end of this round. + +48 +00:04:58,300 --> 00:05:05,260 +The table I have, the maximum value of Z is 3.4 + +49 +00:05:05,260 --> 00:05:15,900 +all the way up to 9. So that means I have only B + +50 +00:05:15,900 --> 00:05:16,360 +of Z + +51 +00:05:20,930 --> 00:05:26,590 +less than this value. And the corresponding area + +52 +00:05:26,590 --> 00:05:28,870 +is 9998. + +53 +00:05:31,910 --> 00:05:34,430 +But for this specific example, we are looking for + +54 +00:05:34,430 --> 00:05:38,090 +V of Z smaller than 3.5, which is roughly equal to + +55 +00:05:38,090 --> 00:05:42,270 +3.49. So the answer is around this value. + +56 +00:05:45,330 --> 00:05:55,950 +9998 approximately 9998 excuse me 9998 + +57 +00:05:55,950 --> 00:06:01,030 +this one minus + +58 +00:06:27,000 --> 00:06:32,670 +Again, we are looking for minus 1.5 up to 3.5 this + +59 +00:06:32,670 --> 00:06:40,030 +area now the dashed area which is between minus 1 + +60 +00:06:40,030 --> 00:06:44,010 +.5 all the way up to 3.5 equals the area to the + +61 +00:06:44,010 --> 00:06:49,930 +left of 3.5 which is B of Z less than 3.5 minus + +62 +00:06:49,930 --> 00:06:58,310 +the area to the left of negative 1 minus Z greater + +63 +00:06:58,310 --> 00:07:03,450 +than 1.5 this value if we are to compute the + +64 +00:07:03,450 --> 00:07:08,000 +probability of Z smaller than negative 1.5. Either + +65 +00:07:08,000 --> 00:07:12,240 +you can use the normal table directly, but the + +66 +00:07:12,240 --> 00:07:15,240 +other page were negative these scores. In this + +67 +00:07:15,240 --> 00:07:21,680 +case, minus 1.5. 0668. 0668. + +68 +00:07:25,420 --> 00:07:34,280 +Okay. Now imagine that you only have the positive + +69 +00:07:34,280 --> 00:07:35,520 +z-scope. + +70 +00:07:38,160 --> 00:07:42,060 +Again, suppose that the table you have in the exam + +71 +00:07:42,060 --> 00:07:47,700 +is just the positive values of z. How can we find + +72 +00:07:47,700 --> 00:07:51,620 +the probability of z greater than minus 1.5, + +73 +00:07:51,740 --> 00:07:58,060 +smaller than minus 1.5? In this case, b of z is + +74 +00:07:58,060 --> 00:08:02,560 +less than negative 1.5. The area to the left here + +75 +00:08:03,400 --> 00:08:08,840 +is the same as the area to the right of the same + +76 +00:08:08,840 --> 00:08:12,040 +value, but positive one. So this is equal to B of + +77 +00:08:12,040 --> 00:08:18,960 +Z greater than 1.5. Because the same area, minus 1 + +78 +00:08:18,960 --> 00:08:22,440 +.5 all the way up to minus infinity, that equals + +79 +00:08:22,440 --> 00:08:25,560 +from 1.5 to infinity because of symmetric + +80 +00:08:25,560 --> 00:08:30,950 +distribution. Now, B of Z greater than 1.5, The + +81 +00:08:30,950 --> 00:08:33,970 +table we have here gives the area to the left of + +82 +00:08:33,970 --> 00:08:43,410 +z. So this is 1 minus b of z less than 1.5. Now 1 + +83 +00:08:43,410 --> 00:08:46,650 +.5 from this table is 9332. + +84 +00:08:50,850 --> 00:08:56,030 +Okay, so that will get 0668, + +85 +00:08:56,470 --> 00:08:58,650 +which is the same result as we got directly from + +86 +00:08:58,650 --> 00:09:02,440 +the negative z table. but make sure that in the + +87 +00:09:02,440 --> 00:09:06,800 +exam I will give you just only the positive table + +88 +00:09:06,800 --> 00:09:12,060 +now subtract these two values you will get the + +89 +00:09:12,060 --> 00:09:22,600 +answer for this for part B 9 9 9 3 0 9 3 3 0 this + +90 +00:09:22,600 --> 00:09:29,020 +is the final result for this example now part C + +91 +00:09:31,570 --> 00:09:34,430 +What's the probability that the download time will + +92 +00:09:34,430 --> 00:09:36,370 +take above 0.5 seconds? + +93 +00:09:40,390 --> 00:09:49,050 +So we are looking again, B of X above 0.5. Similar + +94 +00:09:49,050 --> 00:09:51,230 +way as we did in part A. + +95 +00:09:54,090 --> 00:10:02,850 +So B of Z, 0.5 minus the mean. Divide by 6. + +96 +00:10:06,890 --> 00:10:13,850 +So B of Z greater + +97 +00:10:13,850 --> 00:10:20,330 +than negative 1.5. Now B of Z greater than minus 1 + +98 +00:10:20,330 --> 00:10:26,810 +.5. It means we are looking for the area above + +99 +00:10:26,810 --> 00:10:38,370 +minus 1.5 So + +100 +00:10:38,370 --> 00:10:42,850 +this area Now + +101 +00:10:42,850 --> 00:10:50,850 +the area above 1 minus 0.5 Equals 1 minus + +102 +00:10:55,050 --> 00:10:57,890 +B of Z less than negative 115. + +103 +00:11:00,210 --> 00:11:06,510 +As we did here, this probability is + +104 +00:11:06,510 --> 00:11:08,090 +0668. + +105 +00:11:09,770 --> 00:11:12,570 +So the answer again is 9334. + +106 +00:11:16,610 --> 00:11:24,590 +So that's for part C. Any question? Now, part D. + +107 +00:11:27,050 --> 00:11:35,910 +And they ask about 99%. 99 + +108 +00:11:35,910 --> 00:11:44,170 +% of the + +109 +00:11:44,170 --> 00:11:46,070 +download times. + +110 +00:11:55,380 --> 00:11:56,920 +How many seconds? + +111 +00:12:11,420 --> 00:12:15,420 +Exactly, in this case, the probability is given, + +112 +00:12:15,540 --> 00:12:16,820 +which is 99%. + +113 +00:12:19,190 --> 00:12:25,150 +Now, if 99% of the download times are above how + +114 +00:12:25,150 --> 00:12:29,090 +many seconds? So, in this case, we are looking for + +115 +00:12:29,090 --> 00:12:34,350 +the value, for example, for A, such that B of X + +116 +00:12:34,350 --> 00:12:41,130 +greater than A equals 99%. Now, in this type of + +117 +00:12:41,130 --> 00:12:44,270 +problems, we have to make a graph first in order + +118 +00:12:44,270 --> 00:12:48,080 +to determine the location of A. because it may be + +119 +00:12:48,080 --> 00:12:50,600 +to the right or to the left side. It depends + +120 +00:12:50,600 --> 00:12:54,840 +actually on two things. Number one, the size, the + +121 +00:12:54,840 --> 00:12:58,280 +greater than or smaller than, and the other is the + +122 +00:12:58,280 --> 00:13:01,800 +probability. Is it above 1.5 or smaller than 1.5? + +123 +00:13:02,220 --> 00:13:05,800 +So you have to keep careful for this type of + +124 +00:13:05,800 --> 00:13:11,260 +questions. So in this case. It should be to the + +125 +00:13:11,260 --> 00:13:15,180 +left. It should be to the left. because the area + +126 +00:13:15,180 --> 00:13:21,600 +to the left here makes sense it's 99% but if the + +127 +00:13:21,600 --> 00:13:25,460 +location is to the right side here it doesn't make + +128 +00:13:25,460 --> 00:13:28,580 +any sense that B makes greater than or equal to 99 + +129 +00:13:28,580 --> 00:13:33,460 +% because the area here is split into two halves + +130 +00:13:33,460 --> 00:13:38,460 +so 50% to the right 50% to the left of the + +131 +00:13:38,460 --> 00:13:42,900 +vertical line here so A should be to the left side + +132 +00:13:44,110 --> 00:13:48,870 +Make sense? Now, V of X greater than A equals 99%. + +133 +00:13:48,870 --> 00:13:52,130 +So + +134 +00:13:52,130 --> 00:13:58,230 +this area is 99%. Now, if we go back to the table + +135 +00:13:58,230 --> 00:14:01,670 +we have, the table again gives the area to the + +136 +00:14:01,670 --> 00:14:08,570 +left side. So this one exactly equals V of X + +137 +00:14:08,570 --> 00:14:16,140 +smaller than A, which is 1% because the area to + +138 +00:14:16,140 --> 00:14:20,860 +the right of A is 99 so the area to the left of A + +139 +00:14:20,860 --> 00:14:25,680 +is 1-99 which is 1% now here we have to look + +140 +00:14:25,680 --> 00:14:34,140 +inside the body of the table at the value of 01 so + +141 +00:14:34,140 --> 00:14:37,500 +in this case this score should be negative or + +142 +00:14:37,500 --> 00:14:42,310 +positive Since the probability is 100% smaller + +143 +00:14:42,310 --> 00:14:46,130 +than 1.5, so it should be negative. So if you go + +144 +00:14:46,130 --> 00:14:51,890 +back to the table, negative 1. Look at 0.1. + +145 +00:15:00,290 --> 00:15:02,550 +Minus 2.34. + +146 +00:15:12,640 --> 00:15:19,800 +So the approximate answer actually is 0099. + +147 +00:15:21,160 --> 00:15:26,660 +The closest value. You may take this value. You + +148 +00:15:26,660 --> 00:15:31,720 +will be okay. So this one is more closer to 01. + +149 +00:15:32,160 --> 00:15:36,800 +than 0102. So my corresponding z-score is negative + +150 +00:15:36,800 --> 00:15:47,820 +2.4, I'm sorry, 2.35. So z-score, negative 2.33, + +151 +00:15:50,900 --> 00:15:52,520 +0123. + +152 +00:15:54,500 --> 00:15:59,160 +So this is the approximate answer. sometimes maybe + +153 +00:15:59,160 --> 00:16:04,340 +if you have a calculator or excel you may + +154 +00:16:04,340 --> 00:16:08,720 +determine the exact value in this case which is + +155 +00:16:08,720 --> 00:16:15,500 +minus 2.3263 this is the exact answer but the + +156 +00:16:15,500 --> 00:16:20,460 +approximate one is 5 so my z score is negative 2 + +157 +00:16:20,460 --> 00:16:23,560 +.33 now the value of a + +158 +00:16:27,300 --> 00:16:32,000 +equals Mu plus Z Sigma. The one we just discussed + +159 +00:16:32,000 --> 00:16:39,580 +last time. Remember, when Z equals minus Mu + +160 +00:16:39,580 --> 00:16:44,320 +divided by Sigma, just cross multiplication, you + +161 +00:16:44,320 --> 00:16:48,980 +will get X + +162 +00:16:48,980 --> 00:16:52,340 +minus Mu equals Z Sigma. That means X equals Mu + +163 +00:16:52,340 --> 00:16:58,320 +plus Z Sigma. Fixed same as A, so A equals Mu plus + +164 +00:16:58,320 --> 00:17:05,640 +Z Sigma, Mu is given 0.8, Z negative 2.33 times + +165 +00:17:05,640 --> 00:17:13,100 +Sigma, that will give the final answer which is 0 + +166 +00:17:13,100 --> 00:17:14,060 +.3347. + +167 +00:17:17,480 --> 00:17:24,130 +So again, He said that 99% of the downward times + +168 +00:17:24,130 --> 00:17:27,810 +are above how many seconds. So we are looking for + +169 +00:17:27,810 --> 00:17:32,470 +the value of A, such that U makes greater than or + +170 +00:17:32,470 --> 00:17:38,930 +equal to 99%. So A is located to the left side of + +171 +00:17:38,930 --> 00:17:40,330 +the curve, normal curve. + +172 +00:17:43,110 --> 00:17:45,350 +And again, the table gives the area to the left of + +173 +00:17:45,350 --> 00:17:52,620 +Z. So the area to the left is 1%. Now if you check + +174 +00:17:52,620 --> 00:17:57,900 +the z value corresponding to this one, 101, you + +175 +00:17:57,900 --> 00:18:00,700 +figure that z, the approximate answer is negative + +176 +00:18:00,700 --> 00:18:07,160 +2.33. Now just use this value, and plug it into + +177 +00:18:07,160 --> 00:18:11,700 +this equation, you will get this result. Yes. Is + +178 +00:18:11,700 --> 00:18:12,760 +it negative? + +179 +00:18:22,750 --> 00:18:32,910 +Last part, part E 95 + +180 +00:18:32,910 --> 00:18:46,170 +% 95 + +181 +00:18:46,170 --> 00:18:46,810 +% + +182 +00:18:50,400 --> 00:18:55,980 +after the load times are between what two values + +183 +00:18:55,980 --> 00:18:59,120 +approximately distributed around the mean. + +184 +00:19:04,960 --> 00:19:17,340 +So around 95 + +185 +00:19:17,340 --> 00:19:25,090 +% Of the download times, what two values + +186 +00:19:25,090 --> 00:19:31,170 +symmetrically distributed around the mean? the + +187 +00:19:31,170 --> 00:19:36,250 +area here for example between + +188 +00:19:36,250 --> 00:19:44,850 +E and B is 95% and he mentioned the proximity so + +189 +00:19:44,850 --> 00:19:48,790 +this E is the same as B but still we have negative + +190 +00:19:48,790 --> 00:19:56,730 +sign so we are looking we + +191 +00:19:56,730 --> 00:19:59,170 +are looking for the probability of P of X + +192 +00:20:05,550 --> 00:20:10,290 +between A and B equal 95% now by symmetric + +193 +00:20:10,290 --> 00:20:14,350 +distribution exactly + +194 +00:20:14,350 --> 00:20:19,430 +this value A is the same as B but with negative + +195 +00:20:19,430 --> 00:20:25,270 +sign now since the area between A and B is 95% and + +196 +00:20:25,270 --> 00:20:29,890 +we have symmetric distribution 5% left divided by + +197 +00:20:29,890 --> 00:20:38,150 +2 that means 2.5 to the left of A and 2.5% to the + +198 +00:20:38,150 --> 00:20:38,690 +right of B. + +199 +00:20:43,130 --> 00:20:50,050 +Now, what are the values of A and B? Now, if you + +200 +00:20:50,050 --> 00:20:55,970 +look at this value, B of X less than A equals 2 + +201 +00:20:55,970 --> 00:20:56,910 +.5%. + +202 +00:21:00,780 --> 00:21:05,560 +Be careful, it's 0, 2, 2, 5, 0, 2, 5. Now, what's + +203 +00:21:05,560 --> 00:21:08,440 +the value of A softer, B makes smaller than A by + +204 +00:21:08,440 --> 00:21:13,760 +this one. The same I just we did in bar D. So + +205 +00:21:13,760 --> 00:21:16,360 +that's A. Now, what's the Z score in this case? + +206 +00:21:19,160 --> 00:21:26,180 +If we go back to the normal table, now we are + +207 +00:21:26,180 --> 00:21:26,840 +looking for + +208 +00:21:32,840 --> 00:21:34,220 +Zero to five. + +209 +00:21:37,360 --> 00:21:42,540 +So minus one point nine six. + +210 +00:21:47,840 --> 00:21:54,830 +So Z equals minus one point nine six. Okay. So now + +211 +00:21:54,830 --> 00:22:01,930 +my A equal Mu plus D Sigma Mu + +212 +00:22:01,930 --> 00:22:10,610 +is given is 0.8 Sigma is minus 1.9 Times + +213 +00:22:10,610 --> 00:22:17,190 +the value of Sigma which is 0.2 Similarly + +214 +00:22:17,190 --> 00:22:26,370 +to get the value of A B of X is less than B equals + +215 +00:22:26,370 --> 00:22:33,410 +now the area to the left of B 95% plus 2.5 so + +216 +00:22:33,410 --> 00:22:50,790 +that's minus 7.5 again + +217 +00:22:55,010 --> 00:23:02,330 +b of x smaller than a is 2.5 percent now to get + +218 +00:23:02,330 --> 00:23:06,250 +the corresponding z value for 0 to 5 we have to + +219 +00:23:06,250 --> 00:23:11,730 +look at the normal table inside the normal table + +220 +00:23:11,730 --> 00:23:16,910 +we get from 0 to 5 corresponding to z score of + +221 +00:23:16,910 --> 00:23:23,030 +minus 1.96 so my z score is negative 1.56 now use + +222 +00:23:23,030 --> 00:23:29,910 +this value here, so mu equals 0.8 is the mean, + +223 +00:23:30,490 --> 00:23:38,570 +minus 1.96, the score times 6. The other part, to + +224 +00:23:38,570 --> 00:23:43,250 +get the value of B, the probability of X smaller + +225 +00:23:43,250 --> 00:23:49,150 +than B equals 95 plus 2.5 is 975. + +226 +00:23:51,990 --> 00:23:55,090 +by using the same way we'll get that z score is 1 + +227 +00:23:55,090 --> 00:23:58,550 +.96 as we mentioned before because these two + +228 +00:23:58,550 --> 00:24:02,830 +values here should be the z score the same so now + +229 +00:24:02,830 --> 00:24:09,690 +b equals mu plus 1 + +230 +00:24:09,690 --> 00:24:17,390 +.96 times sigma and that will give you a 1.408 + +231 +00:24:23,280 --> 00:24:28,640 +And B equals 1.1920. + +232 +00:24:29,260 --> 00:24:34,520 +So these are the two values which has 95% between + +233 +00:24:34,520 --> 00:24:41,620 +them. So 95% of the data, I mean 95% of the + +234 +00:24:41,620 --> 00:24:50,180 +download times are between 0.4 seconds and 1.19 + +235 +00:24:50,180 --> 00:24:56,490 +seconds. make sense that is again 95 percent of + +236 +00:24:56,490 --> 00:25:00,870 +the download times are between approximately 0.4 + +237 +00:25:00,870 --> 00:25:09,410 +seconds and around 1.2 so this value is 0.4 the + +238 +00:25:09,410 --> 00:25:13,590 +other one is approximately 1.2 so again 95 percent + +239 +00:25:13,590 --> 00:25:18,550 +of the download times are between 0.4 seconds + +240 +00:25:18,550 --> 00:25:22,470 +approximately and one minute. This problem maybe + +241 +00:25:22,470 --> 00:25:25,590 +is the most important one for this chapter. + +242 +00:25:26,790 --> 00:25:30,550 +Exactly in the exam you will see something like + +243 +00:25:30,550 --> 00:25:35,170 +that. Either for part A, B and C which are the + +244 +00:25:35,170 --> 00:25:40,490 +same and the backward normal calculations as part + +245 +00:25:40,490 --> 00:25:44,430 +D and E. Any question? + +246 +00:25:51,660 --> 00:25:59,600 +let's go solve true and false problems for the + +247 +00:25:59,600 --> 00:26:03,240 +practice in chat asses + +248 +00:26:20,840 --> 00:26:24,380 +The Z-score should be one positive and the other + +249 +00:26:24,380 --> 00:26:28,580 +is negative, not A and B. The corresponding Z + +250 +00:26:28,580 --> 00:26:34,940 +-score here should have the same values but + +251 +00:26:34,940 --> 00:26:37,020 +negative sign, not A and B. + +252 +00:26:40,120 --> 00:26:48,380 +now let's do some rex problems for chapter 6 now + +253 +00:26:48,380 --> 00:26:53,320 +just look at the minus sign the probability that + +254 +00:26:53,320 --> 00:26:57,220 +standard normal random variable C falls between + +255 +00:26:57,220 --> 00:27:04,620 +minus 1.5 and 0.81 so it's similar to this one but + +256 +00:27:04,620 --> 00:27:08,540 +this is straight forward this score between minus + +257 +00:27:08,540 --> 00:27:18,750 +1.5 up to 0.81 okay + +258 +00:27:18,750 --> 00:27:27,970 +so + +259 +00:27:27,970 --> 00:27:32,050 +number 23 again the probability that standard + +260 +00:27:32,050 --> 00:27:36,790 +normal random variable z fall between minus 1.5 + +261 +00:27:36,790 --> 00:27:41,910 +and 0.81 So it's going to be, we are looking for + +262 +00:27:41,910 --> 00:27:42,530 +this probability. + +263 +00:27:48,150 --> 00:27:53,250 +So it's z less than one point one minus. + +264 +00:27:57,670 --> 00:28:00,330 +Now just do it by yourself, you will figure that + +265 +00:28:00,330 --> 00:28:04,910 +the final answer is point seven four. + +266 +00:28:07,970 --> 00:28:10,730 +That's for 23. I think straightforward one. + +267 +00:28:14,490 --> 00:28:17,330 +Let's do one more, 25 for example. + +268 +00:28:20,690 --> 00:28:23,230 +The probability that standard normal random + +269 +00:28:23,230 --> 00:28:26,450 +variable is below 196. + +270 +00:28:28,390 --> 00:28:38,970 +See? Below 1.96. Now from the table, if we look at + +271 +00:28:38,970 --> 00:28:46,510 +the normal table 1.96 + +272 +00:28:46,510 --> 00:28:49,570 +now + +273 +00:28:49,570 --> 00:28:58,050 +the area below 1.6 96 975 so + +274 +00:28:58,050 --> 00:29:02,030 +it's here it mentioned that it's 0.4 so so this + +275 +00:29:02,030 --> 00:29:08,860 +one is false or Because the area to the left of 1 + +276 +00:29:08,860 --> 00:29:13,400 +.96 is not 0.475, it's equal to 975. + +277 +00:29:16,200 --> 00:29:21,000 +That's for 25. Let's do the odd numbers. + +278 +00:29:24,440 --> 00:29:27,360 +The probability that standard normal, the random + +279 +00:29:27,360 --> 00:29:30,780 +variable, falls between minus 2 and minus 0.44. + +280 +00:29:38,540 --> 00:29:42,760 +I'm sorry minus between minus two and negative + +281 +00:29:42,760 --> 00:29:46,980 +point four four so it's the same as z is smaller + +282 +00:29:46,980 --> 00:29:51,340 +than negative point four four minus z less than + +283 +00:29:51,340 --> 00:29:55,580 +minus two it + +284 +00:29:55,580 --> 00:30:00,520 +says the answer is point six four seven two the + +285 +00:30:00,520 --> 00:30:07,180 +exact answer is point + +286 +00:30:07,180 --> 00:30:07,500 +three + +287 +00:30:12,810 --> 00:30:17,890 +So that one is incorrect, + +288 +00:30:18,010 --> 00:30:22,750 +27 is incorrect. You may figure this one by using + +289 +00:30:22,750 --> 00:30:28,290 +the table or sometimes by Excel you can do this + +290 +00:30:28,290 --> 00:30:31,030 +problem. Let's do different one. + +291 +00:30:40,300 --> 00:30:43,360 +Look at 29, the odd number, 29. + +292 +00:30:46,200 --> 00:30:56,640 +29 says that a worker earns 15 dollars per hour at + +293 +00:30:56,640 --> 00:31:01,140 +planet earth and is told that only 2.5 percent of + +294 +00:31:01,140 --> 00:31:06,300 +all workers make a higher wage if the wage is + +295 +00:31:06,300 --> 00:31:09,880 +assumed to be normally distributed. And the + +296 +00:31:09,880 --> 00:31:15,360 +standard deviation of wage rates is five per hour. + +297 +00:31:16,460 --> 00:31:22,100 +So the standard deviation is five per hour, five + +298 +00:31:22,100 --> 00:31:25,780 +dollars per hour. The average wage for the plant + +299 +00:31:25,780 --> 00:31:27,000 +is 75. + +300 +00:31:30,620 --> 00:31:33,560 +Now again, go back to the problem. It says that a + +301 +00:31:33,560 --> 00:31:37,390 +worker earns 15 dollars per hour. And it's told + +302 +00:31:37,390 --> 00:31:41,990 +that only 2.5% of all workers make a higher wage. + +303 +00:31:43,150 --> 00:31:55,330 +So it's X more than $15 equal 2.5%. That means + +304 +00:31:55,330 --> 00:31:59,490 +zero to five. So let's see if this one is true or + +305 +00:31:59,490 --> 00:32:05,820 +false. So again, this man, earns $15 per hour at a + +306 +00:32:05,820 --> 00:32:12,060 +plant. And he's told that only 2.5% of all workers + +307 +00:32:12,060 --> 00:32:18,120 +make higher wage, means greater than the one he + +308 +00:32:18,120 --> 00:32:21,460 +just got, which is $15. So people who make greater + +309 +00:32:21,460 --> 00:32:27,140 +than 15, they claim it's 2.5%. So let's see if + +310 +00:32:27,140 --> 00:32:32,230 +that percentage is true or false. So in this case, + +311 +00:32:32,290 --> 00:32:38,070 +we have to convert to this score. So it becomes B + +312 +00:32:38,070 --> 00:32:43,850 +of z greater than 15 minus the mean divided by + +313 +00:32:43,850 --> 00:32:51,110 +sigma. So B of z greater than 7.5 divided by 5 is + +314 +00:32:51,110 --> 00:32:53,850 +1.5. + +315 +00:33:00,750 --> 00:33:05,850 +Make sense? 1.5. So now B of Z greater than 1.5. 1 + +316 +00:33:05,850 --> 00:33:11,170 +minus B of Z is less than 1.5. Go back to the + +317 +00:33:11,170 --> 00:33:23,870 +table. 1.5 is? Look at the table. 9332. 9332. So + +318 +00:33:23,870 --> 00:33:27,430 +the answer should be 668. That means + +319 +00:33:31,130 --> 00:33:41,750 +6.68% of the workers have higher wage. Not 2.5%. + +320 +00:33:41,750 --> 00:33:44,770 +So that means this is incorrect. So that's false. + +321 +00:33:49,470 --> 00:33:53,830 +So the answer is false for this problem. + +322 +00:33:57,310 --> 00:33:58,530 +Look at 31. + +323 +00:34:01,550 --> 00:34:04,970 +Do you have any question for this one, for 29? In + +324 +00:34:04,970 --> 00:34:12,150 +29, the claim is 2.5% of all workers have higher + +325 +00:34:12,150 --> 00:34:17,930 +wage than $15. Let's see if this claim is true or + +326 +00:34:17,930 --> 00:34:22,190 +false. So the problem again is B probability of X + +327 +00:34:22,190 --> 00:34:26,630 +greater than 15 equals 2.5%. We figure out that + +328 +00:34:26,630 --> 00:34:33,250 +the answer is 6.68%. So the claim is false. Now, + +329 +00:34:33,530 --> 00:34:41,430 +31. Any set, any set of normality, oh I'm sorry, + +330 +00:34:41,490 --> 00:34:44,630 +any set of normally distributed data can be + +331 +00:34:44,630 --> 00:34:48,430 +transformed to its standardized form. True or + +332 +00:34:48,430 --> 00:34:51,190 +false? 31. + +333 +00:34:54,170 --> 00:34:59,870 +It says that any set of normally distributed can + +334 +00:34:59,870 --> 00:35:00,730 +be transformed. + +335 +00:35:06,630 --> 00:35:10,950 +Let's see 32. The middle spread. + +336 +00:35:16,660 --> 00:35:21,020 +That is the middle 50% of the normal distribution + +337 +00:35:21,020 --> 00:35:27,000 +is equal to one standard deviation. That's true. + +338 +00:35:27,240 --> 00:35:33,860 +It's incorrect. Because we mentioned that 68% of + +339 +00:35:33,860 --> 00:35:38,040 +the data are within one standard deviation above + +340 +00:35:38,040 --> 00:35:42,300 +the mean. Within the mean. So it's false. + +341 +00:35:46,160 --> 00:35:51,740 +Instead of 50, we have to say that 68%. The + +342 +00:35:51,740 --> 00:35:57,500 +empirical rule. 33. The normal probability plot, + +343 +00:35:58,600 --> 00:36:02,060 +the one we just discussed last time, may be used + +344 +00:36:02,060 --> 00:36:06,640 +to assess the assumption of normality for a + +345 +00:36:06,640 --> 00:36:09,560 +particular batch of data. As we mentioned before, + +346 +00:36:10,240 --> 00:36:15,220 +One of the rules that we can use to determine if + +347 +00:36:15,220 --> 00:36:17,940 +the data is normally distributed or not is called + +348 +00:36:17,940 --> 00:36:20,940 +the normal probability plot. So it's true. So + +349 +00:36:20,940 --> 00:36:26,000 +again, normal probability plot is used to assess + +350 +00:36:26,000 --> 00:36:31,120 +the assumption of normality for a data. + +351 +00:36:34,300 --> 00:36:41,410 +Let's see, for example, 35. The probability that a + +352 +00:36:41,410 --> 00:36:46,050 +standard normal variable z is positive is, + +353 +00:36:47,290 --> 00:36:52,910 +the probability that a standard normal variable z + +354 +00:36:52,910 --> 00:36:58,770 +is positive is, now if you, this is a table, z + +355 +00:36:58,770 --> 00:37:02,070 +cubed. It's one. + +356 +00:37:06,800 --> 00:37:12,660 +Again, the probability that + +357 +00:37:12,660 --> 00:37:16,960 +a standardized normal variable Z is positive is + +358 +00:37:16,960 --> 00:37:17,580 +the probability. + +359 +00:37:26,340 --> 00:37:28,820 +Let's do one more. + +360 +00:37:51,430 --> 00:37:54,970 +These problems are the same, some of these. + +361 +00:37:57,890 --> 00:38:07,490 +Now look at 6 and 7. Suppose Z has a standard + +362 +00:38:07,490 --> 00:38:11,750 +normal distribution with a mean of zero and + +363 +00:38:11,750 --> 00:38:15,450 +standard relation of one. And the probability Z is + +364 +00:38:15,450 --> 00:38:20,170 +less than 1.25. Straight forward, just go back to + +365 +00:38:20,170 --> 00:38:25,930 +the table. Z less than 1.15, 1.15, + +366 +00:38:30,610 --> 00:38:43,210 +08, 8749. So that's correct. 8749 is correct. Any + +367 +00:38:43,210 --> 00:38:47,570 +question? So let's move. + +368 +00:38:50,870 --> 00:38:55,930 +Chapter seven. So that's for the practice for + +369 +00:38:55,930 --> 00:38:56,590 +chapter seven. + +370 +00:38:59,430 --> 00:39:05,770 +Chapter seven talks about actually two things. One + +371 +00:39:05,770 --> 00:39:11,930 +is called sampling and the other topic is sampling + +372 +00:39:11,930 --> 00:39:16,490 +distributions. Mainly there are four learning + +373 +00:39:16,490 --> 00:39:23,090 +objectives for this chapter. Number one, we have + +374 +00:39:23,090 --> 00:39:25,210 +to distinguish between different sampling + +375 +00:39:25,210 --> 00:39:29,850 +techniques or methods. We'll talk about + +376 +00:39:29,850 --> 00:39:33,050 +probability and non-probability sampling, and we + +377 +00:39:33,050 --> 00:39:35,900 +have to distinguish between these two. The other + +378 +00:39:35,900 --> 00:39:40,200 +objective for this chapter will be the concept of + +379 +00:39:40,200 --> 00:39:44,640 +semantic distribution. Now, instead of using just + +380 +00:39:44,640 --> 00:39:49,120 +X, as we did in chapter three, what's the + +381 +00:39:49,120 --> 00:39:51,380 +probability of X, for example, less than 15? + +382 +00:39:52,320 --> 00:39:57,240 +Instead of that, we'll talk about not X, the + +383 +00:39:57,240 --> 00:40:00,180 +statistic itself, maybe X bar or the sample mean. + +384 +00:40:00,960 --> 00:40:04,120 +Number three is to compute probabilities related + +385 +00:40:04,120 --> 00:40:08,600 +to the sample mean, not the exact value of X. So + +386 +00:40:08,600 --> 00:40:12,340 +here we are looking for V of X bar smaller than 1 + +387 +00:40:12,340 --> 00:40:16,360 +.5. So in this case, to compute these + +388 +00:40:16,360 --> 00:40:21,320 +probabilities, we have to know that all the + +389 +00:40:23,350 --> 00:40:26,110 +concepts in chapter three should be understood. + +390 +00:40:26,530 --> 00:40:31,050 +Otherwise, you cannot do any problems here because + +391 +00:40:31,050 --> 00:40:34,050 +here will depend actually how can we find the + +392 +00:40:34,050 --> 00:40:36,870 +probabilities underneath the normal table. But + +393 +00:40:36,870 --> 00:40:40,430 +instead of X, in this case, we have the sample + +394 +00:40:40,430 --> 00:40:43,630 +mean X bar. So that's the difference between + +395 +00:40:43,630 --> 00:40:48,860 +chapter six and the next chapter. So here, In + +396 +00:40:48,860 --> 00:40:51,900 +chapter six, we are talking about the sampling + +397 +00:40:51,900 --> 00:40:56,220 +distribution of the sample mean. Also, there is + +398 +00:40:56,220 --> 00:41:00,120 +something new, which is called sample proportion. + +399 +00:41:00,680 --> 00:41:02,460 +Now, let's see the difference between these two. + +400 +00:41:03,980 --> 00:41:08,960 +If you remember, at the first class, we said there + +401 +00:41:08,960 --> 00:41:13,160 +are two types of data. One is called numerical, + +402 +00:41:13,380 --> 00:41:14,020 +which is quantitative. + +403 +00:41:16,790 --> 00:41:18,090 +And the other one is qualitative. + +404 +00:41:21,870 --> 00:41:24,530 +So we have numerical data, which means + +405 +00:41:24,530 --> 00:41:31,890 +quantitative data, and qualitative data. Now, for + +406 +00:41:31,890 --> 00:41:35,230 +numerical data, we can use the sample mean as a + +407 +00:41:35,230 --> 00:41:41,830 +measure of central tendency, x bar. But for + +408 +00:41:41,830 --> 00:41:45,390 +qualitative data, for example, if we have gender, + +409 +00:41:47,300 --> 00:41:51,980 +Gender either males or females. In this case, we + +410 +00:41:51,980 --> 00:41:56,860 +cannot say that the mean of females in this school + +411 +00:41:56,860 --> 00:42:02,200 +is 1.2, for example. It doesn't make any sense. + +412 +00:42:02,940 --> 00:42:08,280 +But it's better to say that IUG has, for example, + +413 +00:42:08,680 --> 00:42:13,220 +70% females. Makes sense. So this one is called + +414 +00:42:13,220 --> 00:42:16,500 +percentage or proportion. So here we will talk + +415 +00:42:16,500 --> 00:42:23,120 +about the sample proportion. So sample mean is + +416 +00:42:23,120 --> 00:42:27,700 +used for numerical data. But on the other hand, + +417 +00:42:28,480 --> 00:42:31,020 +the sample proportion is used for qualitative + +418 +00:42:31,020 --> 00:42:34,440 +data. In this chapter, we are going to know how + +419 +00:42:34,440 --> 00:42:39,200 +can we compute the probabilities related either to + +420 +00:42:39,200 --> 00:42:42,880 +the sample mean or to the sample proportion. The + +421 +00:42:42,880 --> 00:42:46,500 +last one, the last objective for this chapter will + +422 +00:42:46,500 --> 00:42:52,060 +be the importance of using something called + +423 +00:42:52,060 --> 00:42:56,120 +central limit theorem. Under specific conditions, + +424 +00:42:56,280 --> 00:42:59,980 +we'll know how to use this theorem. So these are + +425 +00:42:59,980 --> 00:43:04,400 +the mainly four objectives for this chapter. So + +426 +00:43:04,400 --> 00:43:10,300 +you have to make sure that you understand 100% how + +427 +00:43:10,300 --> 00:43:14,700 +to compute all types of probabilities under the + +428 +00:43:14,700 --> 00:43:18,610 +normal curve. either straightforward calculations + +429 +00:43:18,610 --> 00:43:21,030 +or backward calculations, I mean inverse + +430 +00:43:21,030 --> 00:43:23,570 +calculation. If the probability is given, how can + +431 +00:43:23,570 --> 00:43:27,590 +we find the corresponding X value? Or how can we + +432 +00:43:27,590 --> 00:43:30,930 +find the value by itself, the value of the + +433 +00:43:30,930 --> 00:43:33,930 +probability by itself? So next time, Insha'Allah, + +434 +00:43:34,350 --> 00:43:41,590 +Sunday, we are going to start Chapter seven, why + +435 +00:43:41,590 --> 00:43:45,490 +sample? Why most of the time in our researches we + +436 +00:43:45,490 --> 00:43:49,630 +are using the sample instead of using the + +437 +00:43:49,630 --> 00:43:53,790 +population? Any questions? So that's all. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/rTMlA8frV0A.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/rTMlA8frV0A.srt new file mode 100644 index 0000000000000000000000000000000000000000..696c36bfe64b3a380a6707b5871c2f02dfd5ba39 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/rTMlA8frV0A.srt @@ -0,0 +1,1890 @@ + +1 +00:00:06,210 --> 00:00:08,910 +Today, inshallah, we'll continue chapter nine. + +2 +00:00:09,390 --> 00:00:14,230 +We'll talk in details about one-tailed test. Last + +3 +00:00:14,230 --> 00:00:17,950 +time, we talked about two-tailed test. And we + +4 +00:00:17,950 --> 00:00:21,690 +discussed that there are three tools in order to + +5 +00:00:21,690 --> 00:00:25,370 +test the null hypothesis. The first one is + +6 +00:00:25,370 --> 00:00:28,510 +critical value approach, the second B value + +7 +00:00:28,510 --> 00:00:32,060 +approach, and confidence interval. Keep in mind + +8 +00:00:32,060 --> 00:00:35,480 +the last approach, confidence interval, is used + +9 +00:00:35,480 --> 00:00:39,880 +only for two-tailed tests. So now for one-tailed + +10 +00:00:39,880 --> 00:00:44,040 +test, we are going to use two approaches, critical + +11 +00:00:44,040 --> 00:00:49,500 +value and B value approach. So there are many + +12 +00:00:49,500 --> 00:00:54,340 +cases the alternative hypothesis focuses on + +13 +00:00:54,340 --> 00:00:58,420 +particular direction. In this case, we have a + +14 +00:00:58,420 --> 00:01:03,170 +certain direction. And this direction could be to + +15 +00:01:03,170 --> 00:01:07,510 +the left side. And it's called lower tail test. + +16 +00:01:08,290 --> 00:01:14,830 +And the other one greater than or upper tail test. + +17 +00:01:15,050 --> 00:01:18,050 +So there are two types of one tail test. One is + +18 +00:01:18,050 --> 00:01:21,070 +called lower test. In this case, they alternate by + +19 +00:01:21,070 --> 00:01:26,670 +hypothesis. Each one might be, for example, mu + +20 +00:01:26,670 --> 00:01:30,410 +smaller than 3. So the null hypothesis can be + +21 +00:01:30,410 --> 00:01:34,870 +written as mu is greater than or equal to 3. Just + +22 +00:01:34,870 --> 00:01:39,970 +for simple notation, we can use mu equal to 3 + +23 +00:01:39,970 --> 00:01:45,290 +instead of mu greater than or equal to 3. So we + +24 +00:01:45,290 --> 00:01:52,230 +can use this notation instead of using mu + +25 +00:01:52,230 --> 00:01:55,550 +is greater than or equal to 3. It's better to use + +26 +00:01:55,550 --> 00:02:00,210 +this notation. Because as we know, H1 is the + +27 +00:02:00,210 --> 00:02:04,870 +opposite of H0. So if under H0, we have a mu + +28 +00:02:04,870 --> 00:02:07,830 +greater than or equal to 3, then for the + +29 +00:02:07,830 --> 00:02:10,430 +alternative hypothesis, it could be a mu smaller + +30 +00:02:10,430 --> 00:02:14,810 +than 3. But just to use simple notation, we may + +31 +00:02:14,810 --> 00:02:18,790 +write mu equals 3 against the alternative. + +32 +00:02:19,270 --> 00:02:22,670 +Actually, we are focusing on the alternative + +33 +00:02:22,670 --> 00:02:27,990 +hypothesis, because the direction of this one + +34 +00:02:27,990 --> 00:02:34,610 +gives the rejection region of the test because my + +35 +00:02:34,610 --> 00:02:38,390 +rejection region actually based on the sign or the + +36 +00:02:38,390 --> 00:02:42,150 +direction under the alternative hypothesis so + +37 +00:02:42,150 --> 00:02:46,530 +again this is called lower tail test since the + +38 +00:02:46,530 --> 00:02:49,190 +alternative hypothesis is focused on the lower + +39 +00:02:49,190 --> 00:02:53,290 +tail below the mean of three three is just an + +40 +00:02:53,290 --> 00:02:58,120 +example the other one When we are testing Mu + +41 +00:02:58,120 --> 00:03:01,940 +smaller than or equal to 3 against Mu above 3, in + +42 +00:03:01,940 --> 00:03:05,300 +this case, this one is called an upper tail test + +43 +00:03:05,300 --> 00:03:09,000 +since the alternative hypothesis is focused on the + +44 +00:03:09,000 --> 00:03:12,840 +upper tail above the minimum 3. Now, how can we + +45 +00:03:12,840 --> 00:03:16,340 +state the appropriate null and alternative + +46 +00:03:16,340 --> 00:03:21,620 +hypothesis? The answer depends on the problem + +47 +00:03:21,620 --> 00:03:24,320 +itself. So by using the problem, we can determine + +48 +00:03:25,110 --> 00:03:29,990 +If the test is lower tail or upper tail. So you + +49 +00:03:29,990 --> 00:03:33,710 +have to state carefully both null and alternative + +50 +00:03:33,710 --> 00:03:37,230 +hypothesis. So for example, if the problem says + +51 +00:03:37,230 --> 00:03:44,250 +increase, it means a mu is above. If the problem + +52 +00:03:44,250 --> 00:03:50,130 +says smaller than or decrease from whatever the + +53 +00:03:50,130 --> 00:03:53,270 +value is, then we have to use a mu smaller than + +54 +00:03:53,270 --> 00:03:57,230 +that value. Any question? So that's how can we + +55 +00:03:57,230 --> 00:04:01,230 +state the null and alternative hypothesis for one + +56 +00:04:01,230 --> 00:04:04,830 +TAIL test. Let's see in detail the rejection + +57 +00:04:04,830 --> 00:04:09,390 +regions for lower and upper TAIL tests. For lower + +58 +00:04:09,390 --> 00:04:13,330 +TAIL test, so in this case we are focusing on the + +59 +00:04:13,330 --> 00:04:17,770 +left side, so my rejection region should be this + +60 +00:04:17,770 --> 00:04:24,670 +one. Now, we are testing our hypothesis at level + +61 +00:04:24,670 --> 00:04:28,950 +of significance alpha. For two-tiered test, we + +62 +00:04:28,950 --> 00:04:32,250 +split this alpha by two, because we have two + +63 +00:04:32,250 --> 00:04:35,090 +rejection regions, one to the right and other to + +64 +00:04:35,090 --> 00:04:38,150 +the left side. But here there is only one side, so + +65 +00:04:38,150 --> 00:04:41,010 +one direction, so we have to keep alpha as it is. + +66 +00:04:41,330 --> 00:04:43,810 +So we have alpha here, so they accept the + +67 +00:04:43,810 --> 00:04:50,250 +rejection region below minus z alpha or minus z + +68 +00:04:50,250 --> 00:04:55,080 +alpha, depends on Sigma, known or unknown. So it's + +69 +00:04:55,080 --> 00:04:58,220 +the same concepts as we discussed before. So my + +70 +00:04:58,220 --> 00:05:01,100 +rejection region is the area to the left side. So + +71 +00:05:01,100 --> 00:05:05,260 +for this particular hypothesis, if we are talking + +72 +00:05:05,260 --> 00:05:11,540 +about testing Mu smaller than 3, then we may + +73 +00:05:11,540 --> 00:05:12,820 +reject F0. + +74 +00:05:17,100 --> 00:05:25,980 +If U is a statistic, Or T is smaller than minus + +75 +00:05:25,980 --> 00:05:31,160 +the alpha. So there is only one rejection region + +76 +00:05:31,160 --> 00:05:34,600 +in this case because we are talking about one tail + +77 +00:05:34,600 --> 00:05:38,980 +test. So again, my critical value is only one + +78 +00:05:38,980 --> 00:05:42,040 +critical value because here it's just one tier + +79 +00:05:42,040 --> 00:05:46,160 +test. It's negative z alpha or negative z alpha. + +80 +00:05:46,620 --> 00:05:50,500 +So we reject the null hypothesis if the value of + +81 +00:05:50,500 --> 00:05:54,040 +the z statistic falls in the rejection region. It + +82 +00:05:54,040 --> 00:05:58,860 +means that Z statistic is smaller than minus Z + +83 +00:05:58,860 --> 00:06:03,760 +alpha. That's for lower tail test. For the upper + +84 +00:06:03,760 --> 00:06:06,860 +tail, vice versa actually. It's the same, but here + +85 +00:06:06,860 --> 00:06:12,540 +the rejection region is on the right side, the + +86 +00:06:12,540 --> 00:06:16,020 +upper tail. So here we reject the null hypothesis + +87 +00:06:16,020 --> 00:06:22,750 +for the other case. If we are testing Mu smaller + +88 +00:06:22,750 --> 00:06:27,330 +than or equal to 3 against Mu is above 3. The + +89 +00:06:27,330 --> 00:06:33,750 +rejection is really on this area. So Z alpha or + +90 +00:06:33,750 --> 00:06:40,330 +above it. So we reject H0 + +91 +00:06:40,330 --> 00:06:50,050 +if my Z statistic is above Z alpha. So that's for + +92 +00:06:50,050 --> 00:06:55,950 +lower and upper tail test. And actually, if these + +93 +00:06:55,950 --> 00:06:59,770 +statistics falls below in this case, Z alpha, then + +94 +00:06:59,770 --> 00:07:03,250 +it falls in the non-rejection region. So we don't + +95 +00:07:03,250 --> 00:07:06,050 +reject the null hypothesis. That means there is + +96 +00:07:06,050 --> 00:07:09,410 +insufficient evidence to support the alternative + +97 +00:07:09,410 --> 00:07:14,070 +hypothesis. So that's for one tail test, either + +98 +00:07:14,070 --> 00:07:21,120 +lower or the upper tail. Let's see an example for + +99 +00:07:21,120 --> 00:07:25,500 +opportunity test when sigma is unknown. Again, the + +100 +00:07:25,500 --> 00:07:28,400 +idea is the same. Sigma is known or unknown. We + +101 +00:07:28,400 --> 00:07:33,980 +just replace z by t, sigma by s. That's all. Here, + +102 +00:07:34,420 --> 00:07:38,740 +for example, a phone industry manager thinks that + +103 +00:07:38,740 --> 00:07:42,600 +customer monthly cell phone bills have increased. + +104 +00:07:43,990 --> 00:07:49,030 +So he thought that the bills have increased. Now, + +105 +00:07:49,110 --> 00:07:52,490 +by using this statement, we can figure out the + +106 +00:07:52,490 --> 00:07:57,230 +alternative hypothesis. It's aborted. And now, + +107 +00:07:57,350 --> 00:08:00,670 +average over $52 per month. + +108 +00:08:04,330 --> 00:08:11,090 +It says that he thinks that the customer monthly + +109 +00:08:11,090 --> 00:08:14,850 +cellphone bills have increased. Increased means mu + +110 +00:08:14,850 --> 00:08:17,710 +is above $52. + +111 +00:08:18,550 --> 00:08:23,190 +The company wishes to test this claim. Here we are + +112 +00:08:23,190 --> 00:08:28,330 +assuming the population is normally distributed. + +113 +00:08:29,270 --> 00:08:32,710 +So first step, we have to state the appropriate + +114 +00:08:32,710 --> 00:08:35,850 +null and alternative hypothesis. So based on this + +115 +00:08:35,850 --> 00:08:39,490 +problem, we can easily figure out that we are + +116 +00:08:39,490 --> 00:08:43,890 +testing mu smaller than or equal to 52 against the + +117 +00:08:43,890 --> 00:08:50,270 +alternative, mu is above 52. So again, mu is + +118 +00:08:50,270 --> 00:08:54,070 +smaller than versus + +119 +00:08:54,070 --> 00:09:00,580 +mu is above. So this is step number one. So we + +120 +00:09:00,580 --> 00:09:03,740 +have to form or to state a null and alternative + +121 +00:09:03,740 --> 00:09:09,400 +hypothesis. So the mu above 52 means the average + +122 +00:09:09,400 --> 00:09:15,460 +is greater than $52 per month. Now suppose, for + +123 +00:09:15,460 --> 00:09:20,820 +example, my alpha is 10%. And + +124 +00:09:20,820 --> 00:09:25,160 +we choose a random sample of size 25. Now by using + +125 +00:09:25,160 --> 00:09:27,300 +this information, we can determine the rejection + +126 +00:09:27,300 --> 00:09:32,170 +region. Now, the problem mentioned that sigma is + +127 +00:09:32,170 --> 00:09:34,250 +unknown, and the population is normally + +128 +00:09:34,250 --> 00:09:39,490 +distributed. In this case, we have to use T. Now, + +129 +00:09:39,570 --> 00:09:41,750 +my rejection region, since we are talking about + +130 +00:09:41,750 --> 00:09:46,010 +testing upper tilted, so my rejection region + +131 +00:09:46,010 --> 00:09:51,550 +should be to the right side, T alpha. It's above. + +132 +00:09:51,790 --> 00:09:56,600 +Now, T alpha, it means T 10%. And degrees of + +133 +00:09:56,600 --> 00:10:00,920 +freedom, 24. I think many times we used the T + +134 +00:10:00,920 --> 00:10:06,660 +table. So by using T table, you can find that your + +135 +00:10:06,660 --> 00:10:13,760 +critical value is 1.318. So that's your critical + +136 +00:10:13,760 --> 00:10:25,460 +value. Now, my decision is, if T statistic If the + +137 +00:10:25,460 --> 00:10:28,960 +value of T statistic lies in this rejection + +138 +00:10:28,960 --> 00:10:32,760 +region, we reject the null hypothesis. So if T + +139 +00:10:32,760 --> 00:10:44,660 +statistic is above 1.38, then we reject the null + +140 +00:10:44,660 --> 00:10:47,770 +hypothesis. So let's see if we reject it. or don't + +141 +00:10:47,770 --> 00:10:50,570 +reject. So we reject the null hypothesis if your t + +142 +00:10:50,570 --> 00:10:54,390 +-statistic value is above or is greater than 1 + +143 +00:10:54,390 --> 00:11:00,190 +.318. Now suppose for example a sample is taken + +144 +00:11:00,190 --> 00:11:01,690 +with the following results. + +145 +00:11:04,690 --> 00:11:09,130 +N is 25. So we select a random sample of size 25. + +146 +00:11:09,910 --> 00:11:16,950 +This sample gives an average of 53.1 And sample + +147 +00:11:16,950 --> 00:11:19,950 +standard deviation of 10. Now by using this + +148 +00:11:19,950 --> 00:11:22,910 +information, we can determine easily the value of + +149 +00:11:22,910 --> 00:11:27,130 +T statistic. So T stat, the formula as we + +150 +00:11:27,130 --> 00:11:32,370 +discussed in chapter 8, x bar minus b divided by s + +151 +00:11:32,370 --> 00:11:35,670 +over root, and here we replace sigma by s because + +152 +00:11:35,670 --> 00:11:38,790 +sigma is unknown. Straightforward calculation + +153 +00:11:38,790 --> 00:11:39,330 +gives + +154 +00:11:45,800 --> 00:11:51,320 +There are two approaches to reach your conclusion + +155 +00:11:51,320 --> 00:11:55,380 +about this example. Either use critical value + +156 +00:11:55,380 --> 00:11:59,120 +approach or B value. In this case, we cannot use + +157 +00:11:59,120 --> 00:12:03,520 +the two-sided confidence interval approach. So + +158 +00:12:03,520 --> 00:12:08,480 +let's see if this value, by using now approach + +159 +00:12:08,480 --> 00:12:08,980 +number one, + +160 +00:12:15,750 --> 00:12:20,190 +In the exam, you don't have to use both, just use + +161 +00:12:20,190 --> 00:12:24,670 +one of these. Unless the problem determines that + +162 +00:12:24,670 --> 00:12:28,890 +you have to use critical value or B value or + +163 +00:12:28,890 --> 00:12:32,750 +confidence interval, for example, if it's two + +164 +00:12:32,750 --> 00:12:35,770 +-sided. Sometimes maybe I will ask you to solve + +165 +00:12:35,770 --> 00:12:40,950 +the problems by using three different ways or two + +166 +00:12:40,950 --> 00:12:44,980 +different methods, whatever. But if it's just + +167 +00:12:44,980 --> 00:12:47,380 +solve the problem with P, you may use the critical + +168 +00:12:47,380 --> 00:12:53,340 +value approach or P-value. So let's see now. For + +169 +00:12:53,340 --> 00:12:57,840 +the critical value approach, one more time, your + +170 +00:12:57,840 --> 00:13:05,540 +critical value is 1.38. Now, is this value falls + +171 +00:13:05,540 --> 00:13:09,960 +in the rejection or non-rejection region? Now, + +172 +00:13:10,000 --> 00:13:13,810 +this value is smaller than 1.318. So this value + +173 +00:13:13,810 --> 00:13:18,590 +actually falls in the non-rejection region. So we + +174 +00:13:18,590 --> 00:13:26,350 +sense this step is smaller than, which is 0.55, + +175 +00:13:28,910 --> 00:13:32,670 +smaller + +176 +00:13:32,670 --> 00:13:39,750 +than 1.318, then we don't reject the null + +177 +00:13:39,750 --> 00:13:40,170 +hypothesis. + +178 +00:13:43,350 --> 00:13:49,770 +Don't reject. It's zero. That means there is + +179 +00:13:49,770 --> 00:13:53,310 +insufficient evidence to support the claim. And + +180 +00:13:53,310 --> 00:13:57,310 +the claim is the mean is above 50. So your + +181 +00:13:57,310 --> 00:14:02,750 +conclusion should be written by using this way. We + +182 +00:14:02,750 --> 00:14:05,950 +don't reject the null hypothesis because or since + +183 +00:14:05,950 --> 00:14:10,670 +this statistic is smaller than 1.318. That means + +184 +00:14:10,670 --> 00:14:13,750 +there's not sufficient evidence that the mean bill + +185 +00:14:13,750 --> 00:14:19,550 +is over $52. So that's by using a critical value + +186 +00:14:19,550 --> 00:14:19,910 +approach. + +187 +00:14:22,950 --> 00:14:27,650 +The other approach by using B-value. unfortunately + +188 +00:14:27,650 --> 00:14:34,130 +T-tables don't give the exact b-value so here we + +189 +00:14:34,130 --> 00:14:38,590 +can use either excel spreadsheet to get the exact + +190 +00:14:38,590 --> 00:14:44,310 +b-value or any statistical software packages might + +191 +00:14:44,310 --> 00:14:48,610 +give the accurate result but the tables we have + +192 +00:14:48,610 --> 00:14:51,650 +gives the approximate b-value + +1 + +223 +00:17:43,400 --> 00:17:49,660 +than R. Always. Now this rule does not depend on + +224 +00:17:49,660 --> 00:17:53,460 +the alternative hypothesis. Always we reject the + +225 +00:17:53,460 --> 00:17:56,680 +null hypothesis if my B value is smaller than R. + +226 +00:18:00,000 --> 00:18:03,920 +Does this B value smaller than alpha, alpha of 10 + +227 +00:18:03,920 --> 00:18:10,580 +%? My B value is above 25, greater than 0.25%. So + +228 +00:18:10,580 --> 00:18:16,180 +this B value is above 10%. So we have to reject if + +229 +00:18:16,180 --> 00:18:18,520 +B value is smaller than alpha. In this case, B + +230 +00:18:18,520 --> 00:18:21,300 +value is greater than alpha, so we don't reject + +231 +00:18:21,300 --> 00:18:23,920 +alpha. Make sense? + +232 +00:18:26,750 --> 00:18:31,050 +Now, the exact p-value is 0.2937. + +233 +00:18:33,370 --> 00:18:38,970 +T-table gives this result, greater than 25%. The + +234 +00:18:38,970 --> 00:18:44,430 +exact is 0.2937. Again, since my p-value is above + +235 +00:18:44,430 --> 00:18:49,930 +10% or greater than 10%, so we don't reject the + +236 +00:18:49,930 --> 00:18:53,430 +null hypothesis. Now, let's see how can we use + +237 +00:18:53,430 --> 00:18:56,350 +Excel to find the exact. + +238 +00:18:58,830 --> 00:19:04,970 +I will use Excel to find the exact p-value. + +239 +00:19:09,730 --> 00:19:12,190 +Here you press on function. + +240 +00:19:16,050 --> 00:19:19,270 +Then we have here T distribution. + +241 +00:19:29,570 --> 00:19:33,910 +X, it means the value of the test statistic. In + +242 +00:19:33,910 --> 00:19:39,550 +this case, it's 0.55. Degrees of freedom at one + +243 +00:19:39,550 --> 00:19:44,770 +floor. Tails, we are talking about one-tailed + +244 +00:19:44,770 --> 00:19:49,720 +test, so it's one. So here, just write the value + +245 +00:19:49,720 --> 00:19:53,260 +of the statistic, degrees of freedom, then 1. Now, + +246 +00:19:53,680 --> 00:19:58,900 +the exact p-value is 0.2937. So that's the exact p + +247 +00:19:58,900 --> 00:20:06,020 +-value. Exact answer equal 0.2937. + +248 +00:20:07,480 --> 00:20:12,050 +So either one will give the same conclusion. It's + +249 +00:20:12,050 --> 00:20:16,970 +2.937 or greater than 25%. We reject a p-value + +250 +00:20:16,970 --> 00:20:19,890 +smaller than alpha, but this p-value is greater + +251 +00:20:19,890 --> 00:20:22,390 +than alpha, so we don't reject another hypothesis. + +252 +00:20:24,710 --> 00:20:29,350 +So we end with the same conclusion. Actually, this + +253 +00:20:29,350 --> 00:20:38,960 +slide gives how can we use t-test, Excel to + +254 +00:20:38,960 --> 00:20:44,740 +find the value of, or find of B value. That's all + +255 +00:20:44,740 --> 00:20:48,660 +for testing about the population mean if sigma is + +256 +00:20:48,660 --> 00:20:52,460 +known or sigma is unknown for two sided test or + +257 +00:20:52,460 --> 00:20:58,510 +one sided, upper or lower sided test. We mentioned + +258 +00:20:58,510 --> 00:21:01,830 +before that there are two types of testing. One is + +259 +00:21:01,830 --> 00:21:06,370 +called hypothesis testing for the mean and the + +260 +00:21:06,370 --> 00:21:11,570 +other for the proportion. Because as we mentioned + +261 +00:21:11,570 --> 00:21:14,790 +before, there are two types of data. One is the + +262 +00:21:14,790 --> 00:21:15,550 +numerical data. + +263 +00:21:22,130 --> 00:21:26,450 +Now, for numerical data, we have to use the means. + +264 +00:21:27,050 --> 00:21:30,390 +Otherwise, if the data is not numeric, I mean if + +265 +00:21:30,390 --> 00:21:34,690 +it is qualitative, we have to use proportion. For + +266 +00:21:34,690 --> 00:21:39,010 +example, if we are talking about gender, so it's + +267 +00:21:39,010 --> 00:21:42,490 +proportion we have to use instead of the means. + +268 +00:21:44,810 --> 00:21:49,290 +Let's see. Now, if we have data on gender. Gender + +269 +00:21:49,290 --> 00:21:54,390 +is classified males and females. Suppose, for + +270 +00:21:54,390 --> 00:21:59,790 +example, you select a random sample of size 100, + +271 +00:22:01,910 --> 00:22:07,270 +and you are interested in the number of female + +272 +00:22:07,270 --> 00:22:07,810 +students. + +273 +00:22:12,410 --> 00:22:18,010 +And the sample has, for example, 46 females. + +274 +00:22:21,310 --> 00:22:26,230 +So now your proportion is x divided by n, 46 + +275 +00:22:26,230 --> 00:22:28,470 +divided by 100, so 0.46. + +276 +00:22:31,350 --> 00:22:35,230 +You cannot say for example the average of gender + +277 +00:22:35,230 --> 00:22:40,480 +for example is 1.3. It has no meaning that the + +278 +00:22:40,480 --> 00:22:44,860 +average is 1.3. You have to say that, for example, + +279 +00:22:45,100 --> 00:22:52,540 +males or females represents 46%. Rather than + +280 +00:22:52,540 --> 00:22:55,840 +saying the average, for example, is 1.3. This one + +281 +00:22:55,840 --> 00:22:57,200 +has no meaning. + +282 +00:23:00,120 --> 00:23:04,520 +So if we are talking about two possible outcomes, + +283 +00:23:05,160 --> 00:23:09,300 +success or failure, switch on or switch off, a + +284 +00:23:09,300 --> 00:23:12,680 +good item, defective item, and so on, we have to + +285 +00:23:12,680 --> 00:23:16,160 +use a proportion instead of the mean. Here we'll + +286 +00:23:16,160 --> 00:23:20,100 +talk about in details about hypothesis testing for + +287 +00:23:20,100 --> 00:23:26,900 +a proportions. So in this case, your problem + +288 +00:23:26,900 --> 00:23:29,520 +involves categorical variables. It means we have + +289 +00:23:31,390 --> 00:23:35,710 +classification either yes or no, males females and + +290 +00:23:35,710 --> 00:23:38,970 +so on, so two possible outcomes possess + +291 +00:23:38,970 --> 00:23:41,910 +the characteristic of interest, so suppose I am + +292 +00:23:41,910 --> 00:23:47,580 +interested in the number of defective items that is + +293 +00:23:47,580 --> 00:23:51,940 +produced by a firm. So my interest here in this + +294 +00:23:51,940 --> 00:23:56,020 +case is the number of defective items. So the number of + +295 +00:23:56,020 --> 00:24:01,620 +defective items is your x. So for example, suppose + +296 +00:24:01,620 --> 00:24:08,660 +the firm introduces 1,000 items, and we found that + +297 +00:24:08,660 --> 00:24:13,800 +10 of them are defective. So the proportion of + +298 +00:24:13,800 --> 00:24:17,130 +defective in this case is x over n. And we are + +299 +00:24:17,130 --> 00:24:20,730 +interested in the population proportion. As we + +300 +00:24:20,730 --> 00:24:24,950 +mentioned before, it's pi. So pi is the population + +301 +00:24:24,950 --> 00:24:30,890 +proportion. And actually, this p or pi ranges + +302 +00:24:30,890 --> 00:24:36,470 +between 0 and 1. So it never exceeds 1 or below 0. + +303 +00:24:37,830 --> 00:24:42,030 +So again, this slide actually is the same as we + +304 +00:24:42,030 --> 00:24:47,550 +have discussed in Chapter 8. The sample proportion + +305 +00:24:47,550 --> 00:24:54,210 +is P. P is X over N. And X is the number in + +306 +00:24:54,210 --> 00:24:57,510 +category of interest in sample. N is the sample + +307 +00:24:57,510 --> 00:25:03,790 +size. And we know that if the two conditions, N pi + +308 +00:25:03,790 --> 00:25:10,150 +and N times 1 minus pi are at least 5, then P can + +309 +00:25:10,150 --> 00:25:13,570 +be approximated by normal distribution with mean + +310 +00:25:13,570 --> 00:25:18,310 +equal pi. And standard deviation equals square + +311 +00:25:18,310 --> 00:25:22,070 +root of pi 1 minus pi divided by n. So actually, + +312 +00:25:22,230 --> 00:25:26,670 +this slide is repeated. So again, your p-value, + +313 +00:25:26,770 --> 00:25:31,150 +I'm sorry, your proportion is approximately + +314 +00:25:31,150 --> 00:25:38,250 +normally distributed of mean equals pi and sigma + +315 +00:25:38,250 --> 00:25:43,370 +pi 1 minus pi divided by n, all under the square + +316 +00:25:43,370 --> 00:25:48,880 +root. Now, based on this sampling distribution of + +317 +00:25:48,880 --> 00:25:54,980 +P, what's your z-statistic? As we mentioned in + +318 +00:25:54,980 --> 00:26:01,340 +chapter 6, the standard formula for z-statistic is + +319 +00:26:01,340 --> 00:26:07,260 +x minus mean of x divided by sigma of x. This is + +320 +00:26:07,260 --> 00:26:11,890 +the standard formula for the z-score. In chapter + +321 +00:26:11,890 --> 00:26:14,890 +8, there are two cases. One is the mean as we + +322 +00:26:14,890 --> 00:26:18,330 +mentioned. So in this case, if we are talking + +323 +00:26:18,330 --> 00:26:23,090 +about the mean, we should have x bar minus the + +324 +00:26:23,090 --> 00:26:25,290 +mean of x bar divided by sigma of x bar. So it's + +325 +00:26:25,290 --> 00:26:29,970 +similar to this one. But here, we replace x by x + +326 +00:26:29,970 --> 00:26:33,830 +bar. From chapter 8, we know that the mean of x + +327 +00:26:33,830 --> 00:26:38,690 +bar is mu. And sigma of x bar is sigma of the root + +328 +00:26:38,690 --> 00:26:46,210 +n. Now, for this proportion, use this statistic is + +329 +00:26:46,210 --> 00:26:52,390 +again replace x by p minus mu minus p divided by + +330 +00:26:52,390 --> 00:26:59,810 +sigma of p. So p minus. The mean of p is pi. So + +331 +00:26:59,810 --> 00:27:08,270 +this is pi. Divided by sigma root pi 1 minus pi + +332 +00:27:08,270 --> 00:27:14,340 +divided by. So this is your Z statistic. So + +333 +00:27:14,340 --> 00:27:19,040 +actually, there is nothing new. We just repeated + +334 +00:27:19,040 --> 00:27:23,720 +the concepts from Chapter 8. + +335 +00:27:26,910 --> 00:27:30,370 +So this is your Z statistic, P minus Pi divided by + +336 +00:27:30,370 --> 00:27:34,110 +root Pi, 1 minus Pi divided by the sample size. + +337 +00:27:34,550 --> 00:27:38,650 +That is valid only if the two conditions are + +338 +00:27:38,650 --> 00:27:43,810 +satisfied. Which are N times Pi at least 5, and N + +339 +00:27:43,810 --> 00:27:49,130 +times 1 minus Pi is at least 5. If at least one of + +340 +00:27:49,130 --> 00:27:53,070 +these conditions is not satisfied, then we cannot + +341 +00:27:53,070 --> 00:27:58,060 +use the Z statistic. So there are two cases. If + +342 +00:27:58,060 --> 00:28:01,860 +the two conditions together satisfied, then we + +343 +00:28:01,860 --> 00:28:05,340 +have to use this statistic. Otherwise, if one of + +344 +00:28:05,340 --> 00:28:07,740 +the conditions is not satisfied, then we cannot + +345 +00:28:07,740 --> 00:28:13,420 +use the Z-statistic, it says this case not discussed + +346 +00:28:13,420 --> 00:28:19,060 +in this chapter. So that's all for testing about + +347 +00:28:19,060 --> 00:28:24,760 +proportion. There is an equivalent form of this + +348 +00:28:24,760 --> 00:28:32,100 +statistic, we may replace p by X. For example, we + +349 +00:28:32,100 --> 00:28:38,920 +know that p equals X over N. Now let's see if we + +350 +00:28:38,920 --> 00:28:46,720 +multiply N for the numerator and the denominator. So if we + +351 +00:28:46,720 --> 00:28:47,560 +multiply N here, + +352 +00:28:59,680 --> 00:29:02,440 +I will reach another formula for this statistic. + +353 +00:29:03,680 --> 00:29:06,260 +The first one depends on the sample proportion. + +354 +00:29:07,020 --> 00:29:11,820 +The other one will depend on x. Now, n, this one + +355 +00:29:11,820 --> 00:29:19,120 +equals n times p, which is x minus n times n pi. + +356 +00:29:20,900 --> 00:29:32,320 +So again, n times p is x minus n pi. Now N, N dot + +357 +00:29:32,320 --> 00:29:36,720 +squared becomes N squared, so N squared, we have + +358 +00:29:36,720 --> 00:29:42,120 +N, so one cancelled, so N is left, so we have N pi + +359 +00:29:42,120 --> 00:29:43,500 +1. + +360 +00:29:45,140 --> 00:29:50,670 +These two statistics are equivalent. So you may + +361 +00:29:50,670 --> 00:29:56,290 +use p minus pi frequently. The common one actually + +362 +00:29:56,290 --> 00:30:00,270 +is this one, p minus pi divided by root, square + +363 +00:30:00,270 --> 00:30:03,390 +root of pi, 1 minus pi divided by n. So actually, + +364 +00:30:03,610 --> 00:30:07,390 +these two forms are equivalent. + +365 +00:30:09,190 --> 00:30:12,470 +In this case, the two conditions, x is greater + +366 +00:30:12,470 --> 00:30:17,790 +than or equal to 5, and n minus x is at least 5. + +367 +00:30:19,190 --> 00:30:24,790 +Let's look at this specific example. + +368 +00:30:26,650 --> 00:30:34,830 +A marketing company claims that it receives 8% + +369 +00:30:34,830 --> 00:30:40,170 +responses from its mailing. So now the claim is, + +370 +00:30:41,270 --> 00:30:46,770 +the company receives only 8% responses from its + +371 +00:30:46,770 --> 00:30:52,960 +mailing. So from their records, we know that the + +372 +00:30:52,960 --> 00:30:59,640 +proportion of response is 8%. To test this claim, + +373 +00:31:00,400 --> 00:31:06,800 +a random sample of 500 were surveyed with 25 + +374 +00:31:06,800 --> 00:31:10,980 +responses. So we are actually interested in the + +375 +00:31:10,980 --> 00:31:19,910 +number of responses. So x is 25. So this survey of + +376 +00:31:19,910 --> 00:31:26,630 +500 gives 25 responses. Now test at 5% + +377 +00:31:26,630 --> 00:31:31,370 +significance limit. Now the direction is not + +378 +00:31:31,370 --> 00:31:37,730 +given, just pi equals 8%. So we have to test H0 + +379 +00:31:37,730 --> 00:31:44,570 +against H1 of pi does not equal, since the + +380 +00:31:44,570 --> 00:31:48,210 +direction is not given. This problem does not say + +381 +00:31:48,210 --> 00:31:52,310 +it's above or greater or decreased or increased. + +382 +00:31:52,670 --> 00:31:57,630 +So it's two-tailed test. So we are testing if 0 pi + +383 +00:31:57,630 --> 00:32:02,350 +equals 0.08 against the alternate hypothesis mu is + +384 +00:32:02,350 --> 00:32:07,250 +not 0.8. Now, the first step, we have to check the + +385 +00:32:07,250 --> 00:32:15,100 +two conditions, n times pi. N is 500, Pi is 8%. So + +386 +00:32:15,100 --> 00:32:20,120 +5 times 8 gives 40. Now the other condition is the + +387 +00:32:20,120 --> 00:32:23,980 +complement actually. So since N equals 500, so + +388 +00:32:23,980 --> 00:32:28,940 +this one should be 460. The reason behind that is + +389 +00:32:28,940 --> 00:32:35,640 +if we add N Pi and the other condition, N times 1 + +390 +00:32:35,640 --> 00:32:41,590 +minus Pi. So this gives N Pi plus N, factor here, + +391 +00:32:41,690 --> 00:32:46,010 +n minus n, y. So this cancels. So we end with n. + +392 +00:32:46,570 --> 00:32:52,710 +So the total should be 500. So if the n times y is + +393 +00:32:52,710 --> 00:32:58,470 +40, then n times 1 minus y is 460. So two + +394 +00:32:58,470 --> 00:33:02,510 +conditions are satisfied. Then we can use, we can + +395 +00:33:02,510 --> 00:33:05,890 +say that the sample proportion is approximately + +396 +00:33:05,890 --> 00:33:12,040 +normally distributed with mean equals y, with + +397 +00:33:12,040 --> 00:33:15,900 +standard deviation of square root pi 1 minus pi + +398 +00:33:15,900 --> 00:33:20,560 +divided by n. So that's your mean. So the mean is + +399 +00:33:20,560 --> 00:33:33,460 +pi. And sigma root pi, pi is 8%, times + +400 +00:33:33,460 --> 00:33:38,440 +1 minus pi is 98, times 2, divided by n, 500. + +401 +00:33:43,210 --> 00:33:47,350 +So your Z statistic, P, + +402 +00:33:49,110 --> 00:33:55,550 +now what's the value of P? P equals X over N. X is + +403 +00:33:55,550 --> 00:34:06,990 +given, X is 25, divided by 500, so P is 5%. So + +404 +00:34:06,990 --> 00:34:07,950 +now, Z statistic. + +405 +00:34:12,080 --> 00:34:16,560 +p minus y divided + +406 +00:34:16,560 --> 00:34:26,080 +by this sigma which is 0.892 divided by 500. + +407 +00:34:27,120 --> 00:34:32,860 +This will give minus 2.47. So straightforward + +408 +00:34:32,860 --> 00:34:37,620 +calculation. It gives the value of negative point, + +409 +00:34:38,340 --> 00:34:42, + +445 +00:38:08,340 --> 00:38:13,660 +at the table for + +446 +00:38:13,660 --> 00:38:19,330 +the normal Negative side. Look at negative 2.47. + +447 +00:38:21,250 --> 00:38:26,750 +Negative 2.4 all the way under 7 will get this + +448 +00:38:26,750 --> 00:38:29,750 +result. So it's 0.068. + +449 +00:38:31,910 --> 00:38:37,350 +So this is one of the two areas. So the area below + +450 +00:38:37,350 --> 00:38:43,550 +negative 2.47 is 0.068. The area above the same + +451 +00:38:43,550 --> 00:38:46,870 +value is the same area because it's symmetric. So + +452 +00:38:46,870 --> 00:38:49,570 +it means that just multiplying this by 2, you will + +453 +00:38:49,570 --> 00:38:52,930 +get 0.0136. + +454 +00:38:56,690 --> 00:38:59,730 +Now, always, as we mentioned before, always we + +455 +00:38:59,730 --> 00:39:02,970 +reject the null hypothesis if your p value is + +456 +00:39:02,970 --> 00:39:06,610 +smaller than alpha. Now, alpha is given by 5%. + +457 +00:39:06,610 --> 00:39:09,950 +Now, is this value smaller than alpha? The answer + +458 +00:39:09,950 --> 00:39:16,450 +is yes. So since my p value equals 0.136, + +459 +00:39:16,570 --> 00:39:22,770 +smaller than 5%, then again, we reject the null + +460 +00:39:22,770 --> 00:39:26,170 +hypothesis. So we end with the same conclusion by + +461 +00:39:26,170 --> 00:39:33,570 +using the critical value. That's for using p value + +462 +00:39:33,570 --> 00:39:37,420 +approach. So here, we reject the null hypothesis + +463 +00:39:37,420 --> 00:39:43,000 +since your p value is smaller than alpha. The last + +464 +00:39:43,000 --> 00:39:48,180 +approach, which is not given in + +465 +00:39:48,180 --> 00:39:52,960 +the slides you have critical value. I'm sorry, + +466 +00:39:53,320 --> 00:39:56,060 +confidence interval. And we know that confidence + +467 +00:39:56,060 --> 00:39:56,620 +interval + +468 +00:40:02,370 --> 00:40:09,210 +p plus or minus Z square root p 1 minus p divided + +469 +00:40:09,210 --> 00:40:22,490 +by N. p is 0.5, 0.5 plus or minus 1.96 times 0.5 * 0.95 + +470 +00:40:22,490 --> 00:40:27,330 +divided by N. Let's compute the critical, the two + +471 +00:40:27,330 --> 00:40:27,710 +limits. + +472 +00:40:35,030 --> 00:40:43,250 +So we have this value here represents the margin + +473 +00:40:43,250 --> 00:40:46,070 +of error. So let's compute the margin of error + +474 +00:40:46,070 --> 00:40:54,350 +first. So it's 1.96 times square root of 0.05 + +475 +00:40:54,350 --> 00:40:57,850 +times 0.95 divided by 500. + +476 +00:41:00,390 --> 00:41:08,040 +So the margin of error is 0.019. So 0.5 plus + +477 +00:41:08,040 --> 00:41:11,660 +around 0.019, around 0.02. For example, let's + +478 +00:41:11,660 --> 00:41:16,240 +say it's 0.02. It's approximately 0.02. So now the + +479 +00:41:16,240 --> 00:41:22,360 +confidence interval for pi, so 95% confidence + +480 +00:41:22,360 --> 00:41:30,640 +interval of pi is pi greater than or equal 0.5 + +481 +00:41:30,640 --> 00:41:39,270 +minus 0.02 is 3%. And plus 0.02 gives 7%. So now, pi, + +482 +00:41:39,550 --> 00:41:45,330 +the population proportion, falls between 3% and + +483 +00:41:45,330 --> 00:41:52,230 +7%. So you range here from 3% all the way up to + +484 +00:41:52,230 --> 00:42:00,590 +7%. That's pi. Now, we are testing about it's 0.08 if + +485 +00:42:00,590 --> 00:42:07,950 +y equals 8%. Now, is this value lies in the + +486 +00:42:07,950 --> 00:42:11,970 +interval or outside? This value actually lies + +487 +00:42:11,970 --> 00:42:16,010 +outside this interval, so it's never equal 0.08, + +488 +00:42:17,090 --> 00:42:21,350 +because pi in this confidence interval ranges + +489 +00:42:21,350 --> 00:42:28,040 +between 3% all the way up to 7%. Now, does 8% lie + +490 +00:42:28,040 --> 00:42:31,620 +in this interval? The answer is no. That means it + +491 +00:42:31,620 --> 00:42:34,620 +never equals 8%. So we have to reject the null + +492 +00:42:34,620 --> 00:42:39,520 +hypothesis. So since the + +493 +00:42:39,520 --> 00:42:45,140 +confidence interval we have does not cover or + +494 +00:42:45,140 --> 00:42:51,640 +capture or contain pi of 8%, then we reject it. + +495 +00:42:54,600 --> 00:43:00,720 +So actually, there are three methods to solve this + +496 +00:43:00,720 --> 00:43:05,480 +problem. One is critical value approach. And in + +497 +00:43:05,480 --> 00:43:09,840 +this case, we just found the critical values here, + +498 +00:43:10,280 --> 00:43:14,300 +minus and plus 1.96. And the value of the + +499 +00:43:14,300 --> 00:43:19,230 +statistic falls in the rejection region, so we + +500 +00:43:19,230 --> 00:43:22,230 +reject the null hypothesis. That's for the first + +501 +00:43:22,230 --> 00:43:26,370 +approach. The other one, the p value, and we found + +502 +00:43:26,370 --> 00:43:29,770 +that the p value is 0.136, which is smaller + +503 +00:43:29,770 --> 00:43:32,590 +than alpha. Then again, we reject the null + +504 +00:43:32,590 --> 00:43:36,290 +hypothesis. The last approach is the confidence + +505 +00:43:36,290 --> 00:43:38,910 +interval approach, because here we are talking + +506 +00:43:38,910 --> 00:43:42,330 +about two-sided test. And the confidence is + +507 +00:43:42,330 --> 00:43:43,690 +approximately + +508 +00:43:45,410 --> 00:43:50,070 +ranges between three and seven percent. And my pi, + +509 +00:43:50,130 --> 00:43:52,990 +which I am talking about in the null hypothesis, + +510 +00:43:53,830 --> 00:43:56,490 +is not in the center, but I mean the confidence + +511 +00:43:56,490 --> 00:43:59,610 +interval does not capture the value of pi of eight + +512 +00:43:59,610 --> 00:44:04,550 +percent. Therefore, we reject the null hypothesis. + +513 +00:44:06,270 --> 00:44:11,790 +That's for testing about population proportion. + +514 +00:44:14,100 --> 00:44:14,720 +Any questions? + +515 +00:44:19,100 --> 00:44:23,060 +Next time, shall I start chapter 10? + +516 +00:44:27,060 --> 00:44:33,380 +Up to this point, we just explained hypothesis + +517 +00:44:33,380 --> 00:44:37,720 +testing about one sample. So there is only one + +518 +00:44:37,720 --> 00:44:41,000 +sample in chapter 9. Here, suppose there are two + +519 +00:44:41,000 --> 00:44:45,270 +samples. How can we conduct hypothesis testing for + +520 +00:44:45,270 --> 00:44:48,250 +comparing two population means. For example, + +521 +00:44:48,350 --> 00:44:52,010 +suppose we are teaching males and females, and we + +522 +00:44:52,010 --> 00:44:55,190 +are interested to see if there is a significant + +523 +00:44:55,190 --> 00:44:59,510 +difference between the performance of males and + +524 +00:44:59,510 --> 00:45:05,530 +females. For example, suppose the instructor + +525 +00:45:05,530 --> 00:45:11,670 +claimed that males do better than females in the + +526 +00:45:11,670 --> 00:45:16,430 +exam. So how can we test if, for example, the mean + +527 +00:45:16,430 --> 00:45:23,210 +score for group A, for example, is greater than + +528 +00:45:23,210 --> 00:45:28,190 +the mean of group B? So in this case, there are + +529 +00:45:28,190 --> 00:45:31,910 +two samples. So how can we conduct hypothesis + +530 +00:45:31,910 --> 00:45:36,530 +testing for two sample tests? That will be in + +531 +00:45:36,530 --> 00:45:42,990 +chapter 10. So chapter 10 is left for this course. In + +532 +00:45:42,990 --> 00:45:49,570 +addition to that, we'll discuss correlation and + +533 +00:45:49,570 --> 00:45:53,290 +simple linear regression after that. So maybe + +534 +00:45:53,290 --> 00:46:01,270 +we'll discuss two more chapters, 10 and 12. If we + +535 +00:46:01,270 --> 00:46:06,390 +have enough time, maybe we'll cover chapter 11. + +536 +00:46:07,550 --> 00:46:11,610 +Guys, it depends actually on the time we have. So + +537 +00:46:11,610 --> 00:46:17,130 +that's all for today. Any questions? Comments? + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/rTMlA8frV0A_postprocess.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/rTMlA8frV0A_postprocess.srt new file mode 100644 index 0000000000000000000000000000000000000000..126bebb2b0b3a64521d653c6f93b8790c8aea4c6 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/rTMlA8frV0A_postprocess.srt @@ -0,0 +1,2148 @@ +1 +00:00:06,210 --> 00:00:08,910 +Today, inshallah, we'll continue chapter nine. + +2 +00:00:09,390 --> 00:00:14,230 +We'll talk in details about one-tailed test. Last + +3 +00:00:14,230 --> 00:00:17,950 +time, we talked about two-tailed test. And we + +4 +00:00:17,950 --> 00:00:21,690 +discussed that there are three tools in order to + +5 +00:00:21,690 --> 00:00:25,370 +test the null hypothesis. The first one is + +6 +00:00:25,370 --> 00:00:28,510 +critical value approach, the second B value + +7 +00:00:28,510 --> 00:00:32,060 +approach, and confidence interval. Keep in mind + +8 +00:00:32,060 --> 00:00:35,480 +the last approach, confidence interval, is used + +9 +00:00:35,480 --> 00:00:39,880 +only for two-tailed tests. So now for one-tailed + +10 +00:00:39,880 --> 00:00:44,040 +test, we are going to use two approaches, critical + +11 +00:00:44,040 --> 00:00:49,500 +value and B value approach. So there are many + +12 +00:00:49,500 --> 00:00:54,340 +cases the alternative hypothesis focuses on + +13 +00:00:54,340 --> 00:00:58,420 +particular direction. In this case, we have a + +14 +00:00:58,420 --> 00:01:03,170 +certain direction. And this direction could be to + +15 +00:01:03,170 --> 00:01:07,510 +the left side. And it's called lower tail test. + +16 +00:01:08,290 --> 00:01:14,830 +And the other one greater than or upper tail test. + +17 +00:01:15,050 --> 00:01:18,050 +So there are two types of one tail test. One is + +18 +00:01:18,050 --> 00:01:21,070 +called lower test. In this case, they alternate by + +19 +00:01:21,070 --> 00:01:26,670 +hypothesis. Each one might be, for example, mu + +20 +00:01:26,670 --> 00:01:30,410 +smaller than 3. So the null hypothesis can be + +21 +00:01:30,410 --> 00:01:34,870 +written as mu is greater than or equal to 3. Just + +22 +00:01:34,870 --> 00:01:39,970 +for simple notation, we can use mu equal to 3 + +23 +00:01:39,970 --> 00:01:45,290 +instead of mu greater than or equal to 3. So we + +24 +00:01:45,290 --> 00:01:52,230 +can use this notation instead of using mu + +25 +00:01:52,230 --> 00:01:55,550 +is greater than or equal to 3. It's better to use + +26 +00:01:55,550 --> 00:02:00,210 +this notation. Because as we know, H1 is the + +27 +00:02:00,210 --> 00:02:04,870 +opposite of H0. So if under H0, we have a mu + +28 +00:02:04,870 --> 00:02:07,830 +greater than or equal to 3, then for the + +29 +00:02:07,830 --> 00:02:10,430 +alternative hypothesis, it could be a mu smaller + +30 +00:02:10,430 --> 00:02:14,810 +than 3. But just to use simple notation, we may + +31 +00:02:14,810 --> 00:02:18,790 +write mu equals 3 against the alternative. + +32 +00:02:19,270 --> 00:02:22,670 +Actually, we are focusing on the alternative + +33 +00:02:22,670 --> 00:02:27,990 +hypothesis, because the direction of this one + +34 +00:02:27,990 --> 00:02:34,610 +gives the rejection region of the test because my + +35 +00:02:34,610 --> 00:02:38,390 +rejection region actually based on the sign or the + +36 +00:02:38,390 --> 00:02:42,150 +direction under the alternative hypothesis so + +37 +00:02:42,150 --> 00:02:46,530 +again this is called lower tail test since the + +38 +00:02:46,530 --> 00:02:49,190 +alternative hypothesis is focused on the lower + +39 +00:02:49,190 --> 00:02:53,290 +tail below the mean of three three is just an + +40 +00:02:53,290 --> 00:02:58,120 +example the other one When we are testing Mu + +41 +00:02:58,120 --> 00:03:01,940 +smaller than or equal to 3 against Mu above 3, in + +42 +00:03:01,940 --> 00:03:05,300 +this case, this one is called an upper tail test + +43 +00:03:05,300 --> 00:03:09,000 +since the alternative hypothesis is focused on the + +44 +00:03:09,000 --> 00:03:12,840 +upper tail above the minimum 3. Now, how can we + +45 +00:03:12,840 --> 00:03:16,340 +state the appropriate null and alternative + +46 +00:03:16,340 --> 00:03:21,620 +hypothesis? The answer depends on the problem + +47 +00:03:21,620 --> 00:03:24,320 +itself. So by using the problem, we can determine + +48 +00:03:25,110 --> 00:03:29,990 +If the test is lower tail or upper tail. So you + +49 +00:03:29,990 --> 00:03:33,710 +have to state carefully both null and alternative + +50 +00:03:33,710 --> 00:03:37,230 +hypothesis. So for example, if the problem says + +51 +00:03:37,230 --> 00:03:44,250 +increase, it means a mu is above. If the problem + +52 +00:03:44,250 --> 00:03:50,130 +says smaller than or decrease from whatever the + +53 +00:03:50,130 --> 00:03:53,270 +value is, then we have to use a mu smaller than + +54 +00:03:53,270 --> 00:03:57,230 +that value. Any question? So that's how can we + +55 +00:03:57,230 --> 00:04:01,230 +state the null and alternative hypothesis for one + +56 +00:04:01,230 --> 00:04:04,830 +TAIL test. Let's see in detail the rejection + +57 +00:04:04,830 --> 00:04:09,390 +regions for lower and upper TAIL tests. For lower + +58 +00:04:09,390 --> 00:04:13,330 +TAIL test, so in this case we are focusing on the + +59 +00:04:13,330 --> 00:04:17,770 +left side, so my rejection region should be this + +60 +00:04:17,770 --> 00:04:24,670 +one. Now, we are testing our hypothesis at level + +61 +00:04:24,670 --> 00:04:28,950 +of significance alpha. For two-tiered test, we + +62 +00:04:28,950 --> 00:04:32,250 +split this alpha by two, because we have two + +63 +00:04:32,250 --> 00:04:35,090 +rejection regions, one to the right and other to + +64 +00:04:35,090 --> 00:04:38,150 +the left side. But here there is only one side, so + +65 +00:04:38,150 --> 00:04:41,010 +one direction, so we have to keep alpha as it is. + +66 +00:04:41,330 --> 00:04:43,810 +So we have alpha here, so they accept the + +67 +00:04:43,810 --> 00:04:50,250 +rejection region below minus z alpha or minus z + +68 +00:04:50,250 --> 00:04:55,080 +alpha, depends on Sigma, known or unknown. So it's + +69 +00:04:55,080 --> 00:04:58,220 +the same concepts as we discussed before. So my + +70 +00:04:58,220 --> 00:05:01,100 +rejection region is the area to the left side. So + +71 +00:05:01,100 --> 00:05:05,260 +for this particular hypothesis, if we are talking + +72 +00:05:05,260 --> 00:05:11,540 +about testing Mu smaller than 3, then we may + +73 +00:05:11,540 --> 00:05:12,820 +reject F0. + +74 +00:05:17,100 --> 00:05:25,980 +If U is a statistic, Or T is smaller than minus + +75 +00:05:25,980 --> 00:05:31,160 +the alpha. So there is only one rejection region + +76 +00:05:31,160 --> 00:05:34,600 +in this case because we are talking about one tail + +77 +00:05:34,600 --> 00:05:38,980 +test. So again, my critical value is only one + +78 +00:05:38,980 --> 00:05:42,040 +critical value because here it's just one tier + +79 +00:05:42,040 --> 00:05:46,160 +test. It's negative z alpha or negative z alpha. + +80 +00:05:46,620 --> 00:05:50,500 +So we reject the null hypothesis if the value of + +81 +00:05:50,500 --> 00:05:54,040 +the z statistic falls in the rejection region. It + +82 +00:05:54,040 --> 00:05:58,860 +means that Z statistic is smaller than minus Z + +83 +00:05:58,860 --> 00:06:03,760 +alpha. That's for lower tail test. For the upper + +84 +00:06:03,760 --> 00:06:06,860 +tail, vice versa actually. It's the same, but here + +85 +00:06:06,860 --> 00:06:12,540 +the rejection region is on the right side, the + +86 +00:06:12,540 --> 00:06:16,020 +upper tail. So here we reject the null hypothesis + +87 +00:06:16,020 --> 00:06:22,750 +for the other case. If we are testing Mu smaller + +88 +00:06:22,750 --> 00:06:27,330 +than or equal to 3 against Mu is above 3. The + +89 +00:06:27,330 --> 00:06:33,750 +rejection is really on this area. So Z alpha or + +90 +00:06:33,750 --> 00:06:40,330 +above it. So we reject H0 + +91 +00:06:40,330 --> 00:06:50,050 +if my Z statistic is above Z alpha. So that's for + +92 +00:06:50,050 --> 00:06:55,950 +lower and upper tail test. And actually, if these + +93 +00:06:55,950 --> 00:06:59,770 +statistics falls below in this case, Z alpha, then + +94 +00:06:59,770 --> 00:07:03,250 +it falls in the non-rejection region. So we don't + +95 +00:07:03,250 --> 00:07:06,050 +reject the null hypothesis. That means there is + +96 +00:07:06,050 --> 00:07:09,410 +insufficient evidence to support the alternative + +97 +00:07:09,410 --> 00:07:14,070 +hypothesis. So that's for one tail test, either + +98 +00:07:14,070 --> 00:07:21,120 +lower or the upper tail. Let's see an example for + +99 +00:07:21,120 --> 00:07:25,500 +opportunity test when sigma is unknown. Again, the + +100 +00:07:25,500 --> 00:07:28,400 +idea is the same. Sigma is known or unknown. We + +101 +00:07:28,400 --> 00:07:33,980 +just replace z by t, sigma by s. That's all. Here, + +102 +00:07:34,420 --> 00:07:38,740 +for example, a phone industry manager thinks that + +103 +00:07:38,740 --> 00:07:42,600 +customer monthly cell phone bills have increased. + +104 +00:07:43,990 --> 00:07:49,030 +So he thought that the bills have increased. Now, + +105 +00:07:49,110 --> 00:07:52,490 +by using this statement, we can figure out the + +106 +00:07:52,490 --> 00:07:57,230 +alternative hypothesis. It's aborted. And now, + +107 +00:07:57,350 --> 00:08:00,670 +average over $52 per month. + +108 +00:08:04,330 --> 00:08:11,090 +It says that he thinks that the customer monthly + +109 +00:08:11,090 --> 00:08:14,850 +cellphone bills have increased. Increased means mu + +110 +00:08:14,850 --> 00:08:17,710 +is above $52. + +111 +00:08:18,550 --> 00:08:23,190 +The company wishes to test this claim. Here we are + +112 +00:08:23,190 --> 00:08:28,330 +assuming the population is normally distributed. + +113 +00:08:29,270 --> 00:08:32,710 +So first step, we have to state the appropriate + +114 +00:08:32,710 --> 00:08:35,850 +null and alternative hypothesis. So based on this + +115 +00:08:35,850 --> 00:08:39,490 +problem, we can easily figure out that we are + +116 +00:08:39,490 --> 00:08:43,890 +testing mu smaller than or equal to 52 against the + +117 +00:08:43,890 --> 00:08:50,270 +alternative, mu is above 52. So again, mu is + +118 +00:08:50,270 --> 00:08:54,070 +smaller than versus + +119 +00:08:54,070 --> 00:09:00,580 +mu is above. So this is step number one. So we + +120 +00:09:00,580 --> 00:09:03,740 +have to form or to state a null and alternative + +121 +00:09:03,740 --> 00:09:09,400 +hypothesis. So the mu above 52 means the average + +122 +00:09:09,400 --> 00:09:15,460 +is greater than $52 per month. Now suppose, for + +123 +00:09:15,460 --> 00:09:20,820 +example, my alpha is 10%. And + +124 +00:09:20,820 --> 00:09:25,160 +we choose a random sample of size 25. Now by using + +125 +00:09:25,160 --> 00:09:27,300 +this information, we can determine the rejection + +126 +00:09:27,300 --> 00:09:32,170 +region. Now, the problem mentioned that sigma is + +127 +00:09:32,170 --> 00:09:34,250 +unknown, and the population is normally + +128 +00:09:34,250 --> 00:09:39,490 +distributed. In this case, we have to use T. Now, + +129 +00:09:39,570 --> 00:09:41,750 +my rejection region, since we are talking about + +130 +00:09:41,750 --> 00:09:46,010 +testing upper tilted, so my rejection region + +131 +00:09:46,010 --> 00:09:51,550 +should be to the right side, T alpha. It's above. + +132 +00:09:51,790 --> 00:09:56,600 +Now, T alpha, it means T 10%. And degrees of + +133 +00:09:56,600 --> 00:10:00,920 +freedom, 24. I think many times we used the T + +134 +00:10:00,920 --> 00:10:06,660 +table. So by using T table, you can find that your + +135 +00:10:06,660 --> 00:10:13,760 +critical value is 1.318. So that's your critical + +136 +00:10:13,760 --> 00:10:25,460 +value. Now, my decision is, if T statistic If the + +137 +00:10:25,460 --> 00:10:28,960 +value of T statistic lies in this rejection + +138 +00:10:28,960 --> 00:10:32,760 +region, we reject the null hypothesis. So if T + +139 +00:10:32,760 --> 00:10:44,660 +statistic is above 1.38, then we reject the null + +140 +00:10:44,660 --> 00:10:47,770 +hypothesis. So let's see if we reject it. or don't + +141 +00:10:47,770 --> 00:10:50,570 +reject. So we reject the null hypothesis if your t + +142 +00:10:50,570 --> 00:10:54,390 +-statistic value is above or is greater than 1 + +143 +00:10:54,390 --> 00:11:00,190 +.318. Now suppose for example a sample is taken + +144 +00:11:00,190 --> 00:11:01,690 +with the following results. + +145 +00:11:04,690 --> 00:11:09,130 +N is 25. So we select a random sample of size 25. + +146 +00:11:09,910 --> 00:11:16,950 +This sample gives an average of 53.1 And sample + +147 +00:11:16,950 --> 00:11:19,950 +standard deviation of 10. Now by using this + +148 +00:11:19,950 --> 00:11:22,910 +information, we can determine easily the value of + +149 +00:11:22,910 --> 00:11:27,130 +T statistic. So T stat, the formula as we + +150 +00:11:27,130 --> 00:11:32,370 +discussed in chapter 8, x bar minus b divided by s + +151 +00:11:32,370 --> 00:11:35,670 +over root, and here we replace sigma by s because + +152 +00:11:35,670 --> 00:11:38,790 +sigma is unknown. Straightforward calculation + +153 +00:11:38,790 --> 00:11:39,330 +gives + +154 +00:11:45,800 --> 00:11:51,320 +There are two approaches to reach your conclusion + +155 +00:11:51,320 --> 00:11:55,380 +about this example. Either use critical value + +156 +00:11:55,380 --> 00:11:59,120 +approach or B value. In this case, we cannot use + +157 +00:11:59,120 --> 00:12:03,520 +the two-sided confidence interval approach. So + +158 +00:12:03,520 --> 00:12:08,480 +let's see if this value, by using now approach + +159 +00:12:08,480 --> 00:12:08,980 +number one, + +160 +00:12:15,750 --> 00:12:20,190 +In the exam, you don't have to use both, just use + +161 +00:12:20,190 --> 00:12:24,670 +one of these. Unless the problem determines that + +162 +00:12:24,670 --> 00:12:28,890 +you have to use critical value or B value or + +163 +00:12:28,890 --> 00:12:32,750 +confidence interval, for example, if it's two + +164 +00:12:32,750 --> 00:12:35,770 +-sided. Sometimes maybe I will ask you to solve + +165 +00:12:35,770 --> 00:12:40,950 +the problems by using three different ways or two + +166 +00:12:40,950 --> 00:12:44,980 +different methods, whatever. But if it's just + +167 +00:12:44,980 --> 00:12:47,380 +solve the problem with P, you may use the critical + +168 +00:12:47,380 --> 00:12:53,340 +value approach or P-value. So let's see now. For + +169 +00:12:53,340 --> 00:12:57,840 +the critical value approach, one more time, your + +170 +00:12:57,840 --> 00:13:05,540 +critical value is 1.38. Now, is this value falls + +171 +00:13:05,540 --> 00:13:09,960 +in the rejection or non-rejection region? Now, + +172 +00:13:10,000 --> 00:13:13,810 +this value is smaller than 1.318. So this value + +173 +00:13:13,810 --> 00:13:18,590 +actually falls in the non-rejection region. So we + +174 +00:13:18,590 --> 00:13:26,350 +sense this step is smaller than, which is 0.55, + +175 +00:13:28,910 --> 00:13:32,670 +smaller + +176 +00:13:32,670 --> 00:13:39,750 +than 1.318, then we don't reject the null + +177 +00:13:39,750 --> 00:13:40,170 +hypothesis. + +178 +00:13:43,350 --> 00:13:49,770 +Don't reject. It's zero. That means there is + +179 +00:13:49,770 --> 00:13:53,310 +insufficient evidence to support the claim. And + +180 +00:13:53,310 --> 00:13:57,310 +the claim is the mean is above 50. So your + +181 +00:13:57,310 --> 00:14:02,750 +conclusion should be written by using this way. We + +182 +00:14:02,750 --> 00:14:05,950 +don't reject the null hypothesis because or since + +183 +00:14:05,950 --> 00:14:10,670 +this statistic is smaller than 1.318. That means + +184 +00:14:10,670 --> 00:14:13,750 +there's not sufficient evidence that the mean bill + +185 +00:14:13,750 --> 00:14:19,550 +is over $52. So that's by using a critical value + +186 +00:14:19,550 --> 00:14:19,910 +approach. + +187 +00:14:22,950 --> 00:14:27,650 +The other approach by using B-value. unfortunately + +188 +00:14:27,650 --> 00:14:34,130 +T-tables don't give the exact b-value so here we + +189 +00:14:34,130 --> 00:14:38,590 +can use either excel spreadsheet to get the exact + +190 +00:14:38,590 --> 00:14:44,310 +b-value or any statistical software packages might + +191 +00:14:44,310 --> 00:14:48,610 +give the accurate result but the tables we have + +192 +00:14:48,610 --> 00:14:51,650 +gives the approximate b-value + +193 +00:14:54,540 --> 00:14:59,300 +If you, if we have the table for the, for this + +194 +00:14:59,300 --> 00:15:02,640 +particular example, let's see how can we figure + +195 +00:15:02,640 --> 00:15:05,980 +out the B value by using the T table we have. + +196 +00:15:14,320 --> 00:15:22,860 +So let's open the T table. It takes a second. So + +197 +00:15:22,860 --> 00:15:24,180 +this is statistical table. + +198 +00:15:27,060 --> 00:15:31,960 +So this + +199 +00:15:31,960 --> 00:15:39,540 +is your T table. Now we are looking for B value at + +200 +00:15:39,540 --> 00:15:44,060 +degrees of freedom of 24. Because the sample size + +201 +00:15:44,060 --> 00:15:47,260 +is 25. Now look at 24. + +202 +00:15:49,880 --> 00:15:51,760 +So my V value again. + +203 +00:15:57,400 --> 00:16:01,920 +Your V value is + +204 +00:16:01,920 --> 00:16:08,120 +probability of T greater than 0.55. Because the T + +205 +00:16:08,120 --> 00:16:11,780 +statistic is 0.55. Now let's compute this area. + +206 +00:16:12,710 --> 00:16:19,830 +above 0.55 now if we have T table 24 degrees of + +207 +00:16:19,830 --> 00:16:26,530 +freedom we are looking for 0.55 the first value + +208 +00:16:26,530 --> 00:16:34,910 +here is 0.685 next one is 0.8 so it's increased so + +209 +00:16:34,910 --> 00:16:42,060 +my answer could be this P value over 25 or You + +210 +00:16:42,060 --> 00:16:45,540 +think my point 55 is to the left of this value? + +211 +00:16:45,660 --> 00:16:48,580 +Yes. So my B value is to the left of this point. + +212 +00:16:49,660 --> 00:16:54,520 +Now here, as this statistic decreases, B value + +213 +00:16:54,520 --> 00:16:59,520 +decreases. So vice versa. There is inverse + +214 +00:16:59,520 --> 00:17:04,780 +relationship between T value and B value. Now, 25 + +215 +00:17:04,780 --> 00:17:09,420 +to the right, 20, 15, so the values are decreasing + +216 +00:17:09,420 --> 00:17:12,460 +in this case. So what do you think? My B value is + +217 +00:17:12,460 --> 00:17:18,160 +above or smaller than 25? Above. So your B value + +218 +00:17:18,160 --> 00:17:24,320 +in this case is greater than 25. Again, T table + +219 +00:17:24,320 --> 00:17:28,120 +does not give the exact B value. So just you can + +220 +00:17:28,120 --> 00:17:32,820 +say my B value is above 25%. Always, as we + +221 +00:17:32,820 --> 00:17:35,300 +mentioned before, we reject the null hypothesis. + +222 +00:17:36,700 --> 00:17:43,400 +Always we reject H0 if your B value is smaller + +223 +00:17:43,400 --> 00:17:49,660 +than R. Always. Now this rule does not depend on + +224 +00:17:49,660 --> 00:17:53,460 +the alternative hypothesis. Always we reject the + +225 +00:17:53,460 --> 00:17:56,680 +null hypothesis if my B value is smaller than R. + +226 +00:18:00,000 --> 00:18:03,920 +Does this B value smaller than alpha, alpha of 10 + +227 +00:18:03,920 --> 00:18:10,580 +%? My B value is above 25, greater than 0.25%. So + +228 +00:18:10,580 --> 00:18:16,180 +this B value is above 10%. So we have to reject if + +229 +00:18:16,180 --> 00:18:18,520 +B value is smaller than alpha. In this case, B + +230 +00:18:18,520 --> 00:18:21,300 +value is greater than alpha, so we don't reject + +231 +00:18:21,300 --> 00:18:23,920 +alpha. Make sense? + +232 +00:18:26,750 --> 00:18:31,050 +Now, the exact v-value is 0.2937. + +233 +00:18:33,370 --> 00:18:38,970 +T-table gives this result, greater than 25%. The + +234 +00:18:38,970 --> 00:18:44,430 +exact is 0.2937. Again, since my v-value is above + +235 +00:18:44,430 --> 00:18:49,930 +10% or greater than 10%, so we don't reject the + +236 +00:18:49,930 --> 00:18:53,430 +null hypothesis. Now, let's see how can we use + +237 +00:18:53,430 --> 00:18:56,350 +Excel to find the exact. + +238 +00:18:58,830 --> 00:19:04,970 +I will use Excel to find the exact b-value. + +239 +00:19:09,730 --> 00:19:12,190 +Here you press on function. + +240 +00:19:16,050 --> 00:19:19,270 +Then we have here T distribution. + +241 +00:19:29,570 --> 00:19:33,910 +X, it means the value of the test statistic. In + +242 +00:19:33,910 --> 00:19:39,550 +this case, it's 0.55. Degrees of freedom at one + +243 +00:19:39,550 --> 00:19:44,770 +floor. Tails, we are talking about one-tailed + +244 +00:19:44,770 --> 00:19:49,720 +test, so it's one. So here, just write the value + +245 +00:19:49,720 --> 00:19:53,260 +of the statistic, degrees of freedom, then 1. Now, + +246 +00:19:53,680 --> 00:19:58,900 +the exact p-value is 0.2937. So that's the exact p + +247 +00:19:58,900 --> 00:20:06,020 +-value. Exact answer equal 0.2937. + +248 +00:20:07,480 --> 00:20:12,050 +So either one will give the same conclusion. It's + +249 +00:20:12,050 --> 00:20:16,970 +2.937 or greater than 25%. We reject a p-value + +250 +00:20:16,970 --> 00:20:19,890 +smaller than alpha, but this p-value is greater + +251 +00:20:19,890 --> 00:20:22,390 +than alpha, so we don't reject another hypothesis. + +252 +00:20:24,710 --> 00:20:29,350 +So we end with the same conclusion. Actually, this + +253 +00:20:29,350 --> 00:20:38,960 +slide gives how can we use t-test. Excelsior to + +254 +00:20:38,960 --> 00:20:44,740 +find the value of, or find of B value. That's all + +255 +00:20:44,740 --> 00:20:48,660 +for testing about the population mean if sigma is + +256 +00:20:48,660 --> 00:20:52,460 +known or sigma is unknown for two sided test or + +257 +00:20:52,460 --> 00:20:58,510 +one sided, upper or lower sided test. We mentioned + +258 +00:20:58,510 --> 00:21:01,830 +before that there are two types of testing. One is + +259 +00:21:01,830 --> 00:21:06,370 +called hypothesis testing for the mean and the + +260 +00:21:06,370 --> 00:21:11,570 +other for the proportion. Because as we mentioned + +261 +00:21:11,570 --> 00:21:14,790 +before, there are two types of data. One is the + +262 +00:21:14,790 --> 00:21:15,550 +numerical data. + +263 +00:21:22,130 --> 00:21:26,450 +Now, for numerical data, we have to use the means. + +264 +00:21:27,050 --> 00:21:30,390 +Otherwise, if the data is not numeric, I mean if + +265 +00:21:30,390 --> 00:21:34,690 +it is qualitative, we have to use proportion. For + +266 +00:21:34,690 --> 00:21:39,010 +example, if we are talking about gender, so it's + +267 +00:21:39,010 --> 00:21:42,490 +proportion we have to use instead of the means. + +268 +00:21:44,810 --> 00:21:49,290 +Let's see. Now, if we have data on gender. Gender + +269 +00:21:49,290 --> 00:21:54,390 +is classified males and females. Suppose, for + +270 +00:21:54,390 --> 00:21:59,790 +example, you select a random sample of size 100, + +271 +00:22:01,910 --> 00:22:07,270 +and you are interested in number of female + +272 +00:22:07,270 --> 00:22:07,810 +students. + +273 +00:22:12,410 --> 00:22:18,010 +And the sample has, for example, 46 females. + +274 +00:22:21,310 --> 00:22:26,230 +So now your proportion is x divided by n, 46 + +275 +00:22:26,230 --> 00:22:28,470 +divided by 100, so 0.46. + +276 +00:22:31,350 --> 00:22:35,230 +You cannot say for example the average of gender + +277 +00:22:35,230 --> 00:22:40,480 +for example is 1.3. It has no meaning that the + +278 +00:22:40,480 --> 00:22:44,860 +average is 1.3. You have to say that, for example, + +279 +00:22:45,100 --> 00:22:52,540 +males or females represents 46%. Rather than + +280 +00:22:52,540 --> 00:22:55,840 +saying the average, for example, is 1.3. This one + +281 +00:22:55,840 --> 00:22:57,200 +has no meaning. + +282 +00:23:00,120 --> 00:23:04,520 +So if we are talking about two possible outcomes, + +283 +00:23:05,160 --> 00:23:09,300 +success or failure, switch on or switch off, a + +284 +00:23:09,300 --> 00:23:12,680 +good item, defective item, and so on, we have to + +285 +00:23:12,680 --> 00:23:16,160 +use a proportion instead of the mean. Here we'll + +286 +00:23:16,160 --> 00:23:20,100 +talk about in details about hypothesis testing for + +287 +00:23:20,100 --> 00:23:26,900 +a proportions. So in this case, your problem + +288 +00:23:26,900 --> 00:23:29,520 +involves categorical variables. It means we have + +289 +00:23:31,390 --> 00:23:35,710 +classification either yes or no males females and + +290 +00:23:35,710 --> 00:23:38,970 +so on so two possible outcomes possesses + +291 +00:23:38,970 --> 00:23:41,910 +characteristic of interest so suppose i am + +292 +00:23:41,910 --> 00:23:47,580 +interested in number of defective items that is + +293 +00:23:47,580 --> 00:23:51,940 +produced by a firm. So my interest here in this + +294 +00:23:51,940 --> 00:23:56,020 +case is number of defective items. So number of + +295 +00:23:56,020 --> 00:24:01,620 +defective items is your x. So for example, suppose + +296 +00:24:01,620 --> 00:24:08,660 +the firm introduces 1,000 items, and we found that + +297 +00:24:08,660 --> 00:24:13,800 +10 of them are defective. So the proportion of + +298 +00:24:13,800 --> 00:24:17,130 +defective in this case is x over n. And we are + +299 +00:24:17,130 --> 00:24:20,730 +interested in the population proportion. As we + +300 +00:24:20,730 --> 00:24:24,950 +mentioned before, it's pi. So pi is the population + +301 +00:24:24,950 --> 00:24:30,890 +proportion. And actually, this p or pi ranges + +302 +00:24:30,890 --> 00:24:36,470 +between 0 and 1. So it never exceeds 1 or below 0. + +303 +00:24:37,830 --> 00:24:42,030 +So again, this slide actually is the same as we + +304 +00:24:42,030 --> 00:24:47,550 +have discussed in Chapter 8. The sample proportion + +305 +00:24:47,550 --> 00:24:54,210 +is P. P is X over N. And X is the number in + +306 +00:24:54,210 --> 00:24:57,510 +category of interest in sample. N is the sample + +307 +00:24:57,510 --> 00:25:03,790 +size. And we know that if the two conditions, N pi + +308 +00:25:03,790 --> 00:25:10,150 +and N times 1 minus pi are at least 5, then P can + +309 +00:25:10,150 --> 00:25:13,570 +be approximated by normal distribution with mean + +310 +00:25:13,570 --> 00:25:18,310 +equal pi. And standard deviation equals square + +311 +00:25:18,310 --> 00:25:22,070 +root of pi 1 minus pi divided by n. So actually, + +312 +00:25:22,230 --> 00:25:26,670 +this slide is repeated. So again, your B value, + +313 +00:25:26,770 --> 00:25:31,150 +I'm sorry, your proportion is approximately + +314 +00:25:31,150 --> 00:25:38,250 +normally distributed of mean equals pi and sigma + +315 +00:25:38,250 --> 00:25:43,370 +pi 1 minus pi divided by n, all under the square + +316 +00:25:43,370 --> 00:25:48,880 +root. Now, based on this sampling distribution of + +317 +00:25:48,880 --> 00:25:54,980 +P, what's your z-statistic? As we mentioned in + +318 +00:25:54,980 --> 00:26:01,340 +chapter 6, the standard formula for z-statistic is + +319 +00:26:01,340 --> 00:26:07,260 +x times mean of x divided by sigma of x. This is + +320 +00:26:07,260 --> 00:26:11,890 +the standard formula for the z-score. In chapter + +321 +00:26:11,890 --> 00:26:14,890 +8, there are two cases. One is the mean as we + +322 +00:26:14,890 --> 00:26:18,330 +mentioned. So in this case, if we are talking + +323 +00:26:18,330 --> 00:26:23,090 +about the mean, we should have x bar minus the + +324 +00:26:23,090 --> 00:26:25,290 +mean of x bar divided by sigma of x bar. So it's + +325 +00:26:25,290 --> 00:26:29,970 +similar to this one. But here, we replace x by x + +326 +00:26:29,970 --> 00:26:33,830 +bar. From chapter 8, we know that the mean of x + +327 +00:26:33,830 --> 00:26:38,690 +bar is mu. And sigma of x bar is sigma of the root + +328 +00:26:38,690 --> 00:26:46,210 +n. Now, for this proportion, use this statistic is + +329 +00:26:46,210 --> 00:26:52,390 +again replace x by p minus mu minus p divided by + +330 +00:26:52,390 --> 00:26:59,810 +sigma of p. So p minus. The mean of p is pi. So + +331 +00:26:59,810 --> 00:27:08,270 +this is pi. Divided by sigma root pi 1 minus pi + +332 +00:27:08,270 --> 00:27:14,340 +divided by. So this is your Z statistic. So + +333 +00:27:14,340 --> 00:27:19,040 +actually, there is nothing new. We just repeated + +334 +00:27:19,040 --> 00:27:23,720 +the concepts from Chapter 8. + +335 +00:27:26,910 --> 00:27:30,370 +So this is your Z statistic, P minus Pi divided by + +336 +00:27:30,370 --> 00:27:34,110 +root Pi, 1 minus Pi divided by the sample size. + +337 +00:27:34,550 --> 00:27:38,650 +That is valid only if the two conditions are + +338 +00:27:38,650 --> 00:27:43,810 +satisfied. Which are N times Pi at least 5, and N + +339 +00:27:43,810 --> 00:27:49,130 +times 1 minus Pi is at least 5. If at least one of + +340 +00:27:49,130 --> 00:27:53,070 +these conditions is not satisfied, then we cannot + +341 +00:27:53,070 --> 00:27:58,060 +use the Z statistic. So there are two cases. If + +342 +00:27:58,060 --> 00:28:01,860 +the two conditions together satisfied, then we + +343 +00:28:01,860 --> 00:28:05,340 +have to use this statistic. Otherwise, if one of + +344 +00:28:05,340 --> 00:28:07,740 +the conditions is not satisfied, then we cannot + +345 +00:28:07,740 --> 00:28:13,420 +use the Z-state that says this case not discussed + +346 +00:28:13,420 --> 00:28:19,060 +in this chapter. So that's all for testing about + +347 +00:28:19,060 --> 00:28:24,760 +proportion. There is an equivalent form. of this + +348 +00:28:24,760 --> 00:28:32,100 +statistic, we may replace B by X. For example, we + +349 +00:28:32,100 --> 00:28:38,920 +know that B equals X over N. Now let's see if we + +350 +00:28:38,920 --> 00:28:46,720 +multiply N for numerator and denominator. So if we + +351 +00:28:46,720 --> 00:28:47,560 +multiply N here, + +352 +00:28:59,680 --> 00:29:02,440 +I will reach another formula for this statistic. + +353 +00:29:03,680 --> 00:29:06,260 +The first one depends on the sample proportion. + +354 +00:29:07,020 --> 00:29:11,820 +The other one will depend on x. Now, n, this one + +355 +00:29:11,820 --> 00:29:19,120 +equals n times b, which is x minus n times y n pi. + +356 +00:29:20,900 --> 00:29:32,320 +So again, n times b is x minus n pi. Now N, N dot + +357 +00:29:32,320 --> 00:29:36,720 +squared becomes N squared, so N squared, we have + +358 +00:29:36,720 --> 00:29:42,120 +N, so one cancelled, so N is left, so we have N pi + +359 +00:29:42,120 --> 00:29:43,500 +1. + +360 +00:29:45,140 --> 00:29:50,670 +These two statistics are equivalent. So you may + +361 +00:29:50,670 --> 00:29:56,290 +use b minus pi frequently. The common one actually + +362 +00:29:56,290 --> 00:30:00,270 +is this one, b minus pi divided by root, square + +363 +00:30:00,270 --> 00:30:03,390 +root of pi, 1 minus pi divided by n. So actually, + +364 +00:30:03,610 --> 00:30:07,390 +these two forms are equivalent. + +365 +00:30:09,190 --> 00:30:12,470 +In this case, the two conditions, x is greater + +366 +00:30:12,470 --> 00:30:17,790 +than or equal to 5, and n minus x is at least 5. + +367 +00:30:19,190 --> 00:30:24,790 +Let's look at this specific example. + +368 +00:30:26,650 --> 00:30:34,830 +A marketing company claims that it receives 8% + +369 +00:30:34,830 --> 00:30:40,170 +responses from its mailing. So now the claim is, + +370 +00:30:41,270 --> 00:30:46,770 +the company receives only 8% responses from its + +371 +00:30:46,770 --> 00:30:52,960 +mailing. So from their records, we know that the + +372 +00:30:52,960 --> 00:30:59,640 +proportion of response is 8%. To test this claim, + +373 +00:31:00,400 --> 00:31:06,800 +a random sample of 500 were surveyed with 25 + +374 +00:31:06,800 --> 00:31:10,980 +responses. So we are actually interested in the + +375 +00:31:10,980 --> 00:31:19,910 +number of response. So x is 25. So this survey of + +376 +00:31:19,910 --> 00:31:26,630 +500 gives 25 responses. Now test at 5% + +377 +00:31:26,630 --> 00:31:31,370 +significance limit. Now the direction is not + +378 +00:31:31,370 --> 00:31:37,730 +given, just pi equals 8%. So we have to test H0 + +379 +00:31:37,730 --> 00:31:44,570 +against H1 of pi does not equal, since the + +380 +00:31:44,570 --> 00:31:48,210 +direction is not given. This problem does not say + +381 +00:31:48,210 --> 00:31:52,310 +it's above or greater or decreased or increased. + +382 +00:31:52,670 --> 00:31:57,630 +So it's two-tailed test. So we are testing if 0 pi + +383 +00:31:57,630 --> 00:32:02,350 +equals 0.08 against the alternate hypothesis mu is + +384 +00:32:02,350 --> 00:32:07,250 +not 0.8. Now, the first step, we have to check the + +385 +00:32:07,250 --> 00:32:15,100 +two conditions, n times pi. N is 500, Pi is 8%. So + +386 +00:32:15,100 --> 00:32:20,120 +5 times 8 gives 40. Now the other condition is the + +387 +00:32:20,120 --> 00:32:23,980 +complement actually. So since N equals 500, so + +388 +00:32:23,980 --> 00:32:28,940 +this one should be 460. The reason behind that is + +389 +00:32:28,940 --> 00:32:35,640 +if we add N Pi and the other condition, N times 1 + +390 +00:32:35,640 --> 00:32:41,590 +minus Pi. So this gives N Pi plus N. factor here, + +391 +00:32:41,690 --> 00:32:46,010 +n minus n, y. So this cancels. So we end with n. + +392 +00:32:46,570 --> 00:32:52,710 +So the total should be 500. So if the n times y is + +393 +00:32:52,710 --> 00:32:58,470 +40, then n times 1 minus y is 460. So two + +394 +00:32:58,470 --> 00:33:02,510 +conditions are satisfied. Then we can use, we can + +395 +00:33:02,510 --> 00:33:05,890 +say that the sample proportion is approximately + +396 +00:33:05,890 --> 00:33:12,040 +normally distributed with mean equals y. with + +397 +00:33:12,040 --> 00:33:15,900 +standard deviation of square root pi 1 minus pi + +398 +00:33:15,900 --> 00:33:20,560 +divided by n. So that's your mean. So the mean is + +399 +00:33:20,560 --> 00:33:33,460 +pi. And sigma root pi, pi is 8%, times + +400 +00:33:33,460 --> 00:33:38,440 +1 minus pi is 98, times 2, divided by n, 500. + +401 +00:33:43,210 --> 00:33:47,350 +So your Z statistic, P, + +402 +00:33:49,110 --> 00:33:55,550 +now what's the value of P? P equals X over N. X is + +403 +00:33:55,550 --> 00:34:06,990 +given, X is 25, divided by 500, so P is 5%. So + +404 +00:34:06,990 --> 00:34:07,950 +now, Z statistic. + +405 +00:34:12,080 --> 00:34:16,560 +B minus Y divided + +406 +00:34:16,560 --> 00:34:26,080 +by this sigma which is 0.892 divided by 500. + +407 +00:34:27,120 --> 00:34:32,860 +This will give minus 2.47. So straightforward + +408 +00:34:32,860 --> 00:34:37,620 +calculation. It gives value of negative point, + +409 +00:34:38,340 --> 00:34:42,840 +negative 2.47 for the value of the test statistic. + +410 +00:34:44,160 --> 00:34:50,140 +Now let's look at three different approaches. + +411 +00:35:05,860 --> 00:35:12,700 +Now we are talking about alpha of 0.5%. So my + +412 +00:35:12,700 --> 00:35:17,000 +rejection region is + +413 +00:35:17,000 --> 00:35:22,940 +minus 1.96 to the left side and 1.96 to the right + +414 +00:35:22,940 --> 00:35:29,940 +side. If the value of the statistic falls in one + +415 +00:35:29,940 --> 00:35:32,020 +of these two rejection regions, then we have to + +416 +00:35:32,020 --> 00:35:37,160 +reject them all. Now this value actually falls in + +417 +00:35:37,160 --> 00:35:42,240 +the rejection region to the left side. So since z + +418 +00:35:42,240 --> 00:35:48,440 +star equals negative 2.47, smaller than negative 1 + +419 +00:35:48,440 --> 00:35:53,700 +.26, So it means the value of the test statistic + +420 +00:35:53,700 --> 00:35:58,700 +falls in the rejection region. Then my decision is + +421 +00:35:58,700 --> 00:36:03,240 +reject F0. + +422 +00:36:05,740 --> 00:36:13,060 +So again, your Z statistic negative 2.47 falls in + +423 +00:36:13,060 --> 00:36:17,620 +the rejection region. So my decision is we reject + +424 +00:36:17,620 --> 00:36:21,460 +the null hypothesis. There is sufficient evidence + +425 +00:36:21,460 --> 00:36:27,340 +to reject the company claim of 8% responses rate. + +426 +00:36:28,140 --> 00:36:32,260 +So this claim actually is incorrect. So you have + +427 +00:36:32,260 --> 00:36:38,020 +to reject this claim. It means we have to support + +428 +00:36:38,020 --> 00:36:41,340 +alternative hypothesis. So we end with this + +429 +00:36:41,340 --> 00:36:49,160 +result. That is pi is not 8%. So that's by using a + +430 +00:36:49,160 --> 00:36:53,620 +critical value approach. Any question? + +431 +00:36:58,420 --> 00:37:04,420 +Let's compute B value for the other approach. Now, + +432 +00:37:04,480 --> 00:37:09,780 +in this case, we are testing two-tailed test. So + +433 +00:37:09,780 --> 00:37:17,750 +your B value may be greater than Or smaller than. + +434 +00:37:18,230 --> 00:37:20,450 +So we have to compute one of these two, then + +435 +00:37:20,450 --> 00:37:23,430 +multiply the answer by two. Because the area to + +436 +00:37:23,430 --> 00:37:27,390 +the right of 2.47 equals the area to the left of + +437 +00:37:27,390 --> 00:37:32,010 +negative 2.47. So now, by using the ideas from + +438 +00:37:32,010 --> 00:37:37,030 +chapter six, let's see how can we compute U of V + +439 +00:37:37,030 --> 00:37:40,410 +value. Again, we are talking about total test. So + +440 +00:37:40,410 --> 00:37:47,840 +the U of V value is the right of Z. 2.47 or + +441 +00:37:47,840 --> 00:37:55,100 +smaller than negative 2.47. Now the table we have + +442 +00:37:55,100 --> 00:37:57,660 +gives the area to the left side. So it's better to + +443 +00:37:57,660 --> 00:38:03,580 +use the smaller than. So now by using T table, B + +444 +00:38:03,580 --> 00:38:08,340 +of Z negative less than negative 2.47. Let's look + +445 +00:38:08,340 --> 00:38:13,660 +at the table for + +446 +00:38:13,660 --> 00:38:19,330 +the normal Negative side. Look at negative 2.47. + +447 +00:38:21,250 --> 00:38:26,750 +Negative 2.4 all the way under 7 will get this + +448 +00:38:26,750 --> 00:38:29,750 +result. So it's 0068. + +449 +00:38:31,910 --> 00:38:37,350 +So this is one of the two areas. So the area below + +450 +00:38:37,350 --> 00:38:43,550 +negative 2.47 is 0068. The area above the same + +451 +00:38:43,550 --> 00:38:46,870 +value is the same area because it's symmetric. So + +452 +00:38:46,870 --> 00:38:49,570 +it means that just multiplying this by 2, you will + +453 +00:38:49,570 --> 00:38:52,930 +get 0.0136. + +454 +00:38:56,690 --> 00:38:59,730 +Now, always, as we mentioned before, always we + +455 +00:38:59,730 --> 00:39:02,970 +reject the null hypothesis if your b value is + +456 +00:39:02,970 --> 00:39:06,610 +smaller than alpha. Now, alpha is given by 5%. + +457 +00:39:06,610 --> 00:39:09,950 +Now, is this value smaller than alpha? The answer + +458 +00:39:09,950 --> 00:39:16,450 +is yes. So since my B value equals 0, 1, 3, 6, + +459 +00:39:16,570 --> 00:39:22,770 +smaller than 5%, then again, we reject the null + +460 +00:39:22,770 --> 00:39:26,170 +hypothesis. So we end with the same conclusion by + +461 +00:39:26,170 --> 00:39:33,570 +using the critical value. That's for using B value + +462 +00:39:33,570 --> 00:39:37,420 +approach. So here, we reject another hypothesis + +463 +00:39:37,420 --> 00:39:43,000 +since your B value is smaller than alpha. The last + +464 +00:39:43,000 --> 00:39:48,180 +approach, which is not given in + +465 +00:39:48,180 --> 00:39:52,960 +the slides you have critical value. I'm sorry, + +466 +00:39:53,320 --> 00:39:56,060 +confidence interval. And we know that confidence + +467 +00:39:56,060 --> 00:39:56,620 +interval + +468 +00:40:02,370 --> 00:40:09,210 +P plus or minus Z square root P 1 minus P divided + +469 +00:40:09,210 --> 00:40:22,490 +by N. P is 0.5, 0.5 plus or minus 196 times 0.595 + +470 +00:40:22,490 --> 00:40:27,330 +divided by N. Let's compute the critical, the two + +471 +00:40:27,330 --> 00:40:27,710 +limits. + +472 +00:40:35,030 --> 00:40:43,250 +So we have this value here represents the margin + +473 +00:40:43,250 --> 00:40:46,070 +of error. So let's compute the margin of error + +474 +00:40:46,070 --> 00:40:54,350 +first. So it's 1.96 times square root of 0.05 + +475 +00:40:54,350 --> 00:40:57,850 +times 0.95 divided by 500. + +476 +00:41:00,390 --> 00:41:08,040 +So the margin of error is 0.19. So 0, 5, plus + +477 +00:41:08,040 --> 00:41:11,660 +around 0, 1, 0, 2, around 0. For example, let's + +478 +00:41:11,660 --> 00:41:16,240 +say it's 0, 2. It's approximately 0, 2. So now the + +479 +00:41:16,240 --> 00:41:22,360 +confidence interval for pi, so 95 confidence + +480 +00:41:22,360 --> 00:41:30,640 +interval of pi is pi greater than or equal 0, 5 + +481 +00:41:30,640 --> 00:41:39,270 +minus 0, 2 is 3%. And plus 2 gives 5%. So now, pi, + +482 +00:41:39,550 --> 00:41:45,330 +the population proportion, falls between 3% and + +483 +00:41:45,330 --> 00:41:52,230 +7%. So you range here from 3% all the way up to + +484 +00:41:52,230 --> 00:42:00,590 +7%. That's pi. Now, we are testing about it's 0 if + +485 +00:42:00,590 --> 00:42:07,950 +y equals 8%. Now, is this value lies in the + +486 +00:42:07,950 --> 00:42:11,970 +interval or outside? This value actually lies + +487 +00:42:11,970 --> 00:42:16,010 +outside this interval, so it's never equal 0.08, + +488 +00:42:17,090 --> 00:42:21,350 +because pi in this confidence interval ranges + +489 +00:42:21,350 --> 00:42:28,040 +between 3% all the way up to 7%. Now, does 8% lie + +490 +00:42:28,040 --> 00:42:31,620 +in this interval? The answer is no. That means it + +491 +00:42:31,620 --> 00:42:34,620 +never equals 8%. So we have to reject another + +492 +00:42:34,620 --> 00:42:39,520 +hypothesis. So since the + +493 +00:42:39,520 --> 00:42:45,140 +confidence interval we have does not cover or + +494 +00:42:45,140 --> 00:42:51,640 +capture or contain pi of 8%, then we reject it. + +495 +00:42:54,600 --> 00:43:00,720 +So actually, there are three methods to solve this + +496 +00:43:00,720 --> 00:43:05,480 +problem. One is critical value approach. And in + +497 +00:43:05,480 --> 00:43:09,840 +this case, we just found the critical values here, + +498 +00:43:10,280 --> 00:43:14,300 +minus and plus 1.96. And the value of the + +499 +00:43:14,300 --> 00:43:19,230 +statistic fourth in the rejection region, so we + +500 +00:43:19,230 --> 00:43:22,230 +reject the null hypothesis. That's for the first + +501 +00:43:22,230 --> 00:43:26,370 +approach. The other one, the B value, and we found + +502 +00:43:26,370 --> 00:43:29,770 +that the B value is 0, 1, 3, 6, which is smaller + +503 +00:43:29,770 --> 00:43:32,590 +than alpha. Then again, we reject the null + +504 +00:43:32,590 --> 00:43:36,290 +hypothesis. The last approach is the confidence + +505 +00:43:36,290 --> 00:43:38,910 +interval approach, because here we are talking + +506 +00:43:38,910 --> 00:43:42,330 +about two-sided test. And the confidence is + +507 +00:43:42,330 --> 00:43:43,690 +approximately + +508 +00:43:45,410 --> 00:43:50,070 +ranges between three and seven percent. And my pi, + +509 +00:43:50,130 --> 00:43:52,990 +which I am talking about in the null hypothesis, + +510 +00:43:53,830 --> 00:43:56,490 +is not in the center, but I mean the confidence + +511 +00:43:56,490 --> 00:43:59,610 +interval does not capture the value of pi of eight + +512 +00:43:59,610 --> 00:44:04,550 +percent. Therefore, we reject the null hypothesis. + +513 +00:44:06,270 --> 00:44:11,790 +That's for testing about population promotion. + +514 +00:44:14,100 --> 00:44:14,720 +Any questions? + +515 +00:44:19,100 --> 00:44:23,060 +Next time, shall I start chapter 10? + +516 +00:44:27,060 --> 00:44:33,380 +Up to this point, we just explained hypothesis + +517 +00:44:33,380 --> 00:44:37,720 +testing about one sample. So there is only one + +518 +00:44:37,720 --> 00:44:41,000 +sample in chapter 9. Here, suppose there are two + +519 +00:44:41,000 --> 00:44:45,270 +samples. How can we conduct Hypothesizing for + +520 +00:44:45,270 --> 00:44:48,250 +comparing two population means. For example, + +521 +00:44:48,350 --> 00:44:52,010 +suppose we are teaching males and females, and we + +522 +00:44:52,010 --> 00:44:55,190 +are interested to see if there is a significant + +523 +00:44:55,190 --> 00:44:59,510 +difference between the performance of males and + +524 +00:44:59,510 --> 00:45:05,530 +females. For example, suppose the instructor + +525 +00:45:05,530 --> 00:45:11,670 +claimed that Males do better than females in the + +526 +00:45:11,670 --> 00:45:16,430 +exam. So how can we test if, for example, the mean + +527 +00:45:16,430 --> 00:45:23,210 +score for group A, for example, is greater than + +528 +00:45:23,210 --> 00:45:28,190 +the mean of group B? So in this case, there are + +529 +00:45:28,190 --> 00:45:31,910 +two samples. So how can we conduct hypothesis + +530 +00:45:31,910 --> 00:45:36,530 +testing for two sample tests? That will be in + +531 +00:45:36,530 --> 00:45:42,990 +chapter 10. So chapter 10 left for this course. In + +532 +00:45:42,990 --> 00:45:49,570 +addition to that, we'll discuss correlation and + +533 +00:45:49,570 --> 00:45:53,290 +simple linear regression after that. So maybe + +534 +00:45:53,290 --> 00:46:01,270 +we'll discuss two more chapters, 10 and 12. If we + +535 +00:46:01,270 --> 00:46:06,390 +have enough time, maybe we'll cover chapter 11. + +536 +00:46:07,550 --> 00:46:11,610 +Guys, it depends actually on the time we have. So + +537 +00:46:11,610 --> 00:46:17,130 +that's all for today. Any questions? Comments? + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/rTMlA8frV0A_raw.json b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/rTMlA8frV0A_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..40f8a436dd22f01b5a085ac4a0aceb5ae0086d60 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/rTMlA8frV0A_raw.json @@ -0,0 +1 @@ +{"segments": [{"id": 1, "seek": 3081, "start": 6.21, "end": 30.81, "text": " Today, inshallah, we'll continue chapter nine. We'll talk in details about one-tailed test. Last time, we talked about two-tailed test. And we discussed that there are three tools in order to test the null hypothesis. The first one is critical value approach, the second B value approach, and confidence interval.", "tokens": [2692, 11, 1028, 71, 13492, 11, 321, 603, 2354, 7187, 4949, 13, 492, 603, 751, 294, 4365, 466, 472, 12, 14430, 292, 1500, 13, 5264, 565, 11, 321, 2825, 466, 732, 12, 14430, 292, 1500, 13, 400, 321, 7152, 300, 456, 366, 1045, 3873, 294, 1668, 281, 1500, 264, 18184, 17291, 13, 440, 700, 472, 307, 4924, 2158, 3109, 11, 264, 1150, 363, 2158, 3109, 11, 293, 6687, 15035, 13], "avg_logprob": -0.21830986083393367, "compression_ratio": 1.5939086294416243, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 6.21, "end": 6.75, "word": " Today,", "probability": 0.6708984375}, {"start": 6.89, "end": 7.03, "word": " inshallah,", "probability": 0.727294921875}, {"start": 7.13, "end": 7.47, "word": " we'll", "probability": 0.8662109375}, {"start": 7.47, "end": 8.03, "word": " continue", "probability": 0.9052734375}, {"start": 8.03, "end": 8.43, "word": " chapter", "probability": 0.5537109375}, {"start": 8.43, "end": 8.91, "word": " nine.", "probability": 0.6328125}, {"start": 9.39, "end": 10.07, "word": " We'll", "probability": 0.881103515625}, {"start": 10.07, "end": 10.35, "word": " talk", "probability": 0.8798828125}, {"start": 10.35, "end": 11.11, "word": " in", "probability": 0.91552734375}, {"start": 11.11, "end": 11.47, "word": " details", "probability": 0.775390625}, {"start": 11.47, "end": 11.97, "word": " about", "probability": 0.90283203125}, {"start": 11.97, "end": 13.37, "word": " one", "probability": 0.89794921875}, {"start": 13.37, "end": 13.79, "word": "-tailed", "probability": 0.7501627604166666}, {"start": 13.79, "end": 13.97, "word": " test.", "probability": 0.54052734375}, {"start": 14.09, "end": 14.23, "word": " Last", "probability": 0.87890625}, {"start": 14.23, "end": 14.45, "word": " time,", "probability": 0.88427734375}, {"start": 14.49, "end": 14.59, "word": " we", "probability": 0.96142578125}, {"start": 14.59, "end": 14.79, "word": " talked", "probability": 0.87744140625}, {"start": 14.79, "end": 15.27, "word": " about", "probability": 0.896484375}, {"start": 15.27, "end": 15.97, "word": " two", "probability": 0.76025390625}, {"start": 15.97, "end": 16.31, "word": "-tailed", "probability": 0.9163411458333334}, {"start": 16.31, "end": 16.57, "word": " test.", "probability": 0.7958984375}, {"start": 17.29, "end": 17.65, "word": " And", "probability": 0.943359375}, {"start": 17.65, "end": 17.95, "word": " we", "probability": 0.9580078125}, {"start": 17.95, "end": 18.87, "word": " discussed", "probability": 0.8759765625}, {"start": 18.87, "end": 19.17, "word": " that", "probability": 0.9072265625}, {"start": 19.17, "end": 19.33, "word": " there", "probability": 0.90966796875}, {"start": 19.33, "end": 19.51, "word": " are", "probability": 0.94482421875}, {"start": 19.51, "end": 19.79, "word": " three", "probability": 0.92431640625}, {"start": 19.79, "end": 20.31, "word": " tools", "probability": 0.8759765625}, {"start": 20.31, "end": 21.31, "word": " in", "probability": 0.7509765625}, {"start": 21.31, "end": 21.49, "word": " order", "probability": 0.9091796875}, {"start": 21.49, "end": 21.69, "word": " to", "probability": 0.9638671875}, {"start": 21.69, "end": 21.93, "word": " test", "probability": 0.876953125}, {"start": 21.93, "end": 22.07, "word": " the", "probability": 0.324951171875}, {"start": 22.07, "end": 22.19, "word": " null", "probability": 0.97998046875}, {"start": 22.19, "end": 22.63, "word": " hypothesis.", "probability": 0.57568359375}, {"start": 23.47, "end": 23.81, "word": " The", "probability": 0.82275390625}, {"start": 23.81, "end": 24.09, "word": " first", "probability": 0.8818359375}, {"start": 24.09, "end": 24.43, "word": " one", "probability": 0.92529296875}, {"start": 24.43, "end": 25.37, "word": " is", "probability": 0.93798828125}, {"start": 25.37, "end": 25.75, "word": " critical", "probability": 0.7177734375}, {"start": 25.75, "end": 26.07, "word": " value", "probability": 0.923828125}, {"start": 26.07, "end": 26.53, "word": " approach,", "probability": 0.935546875}, {"start": 27.35, "end": 27.51, "word": " the", "probability": 0.84765625}, {"start": 27.51, "end": 27.87, "word": " second", "probability": 0.90576171875}, {"start": 27.87, "end": 28.25, "word": " B", "probability": 0.378662109375}, {"start": 28.25, "end": 28.51, "word": " value", "probability": 0.5537109375}, {"start": 28.51, "end": 29.03, "word": " approach,", "probability": 0.92578125}, {"start": 29.25, "end": 29.93, "word": " and", "probability": 0.9296875}, {"start": 29.93, "end": 30.39, "word": " confidence", "probability": 0.90673828125}, {"start": 30.39, "end": 30.81, "word": " interval.", "probability": 0.98046875}], "temperature": 1.0}, {"id": 2, "seek": 5920, "start": 31.42, "end": 59.2, "text": " Keep in mind the last approach, confidence interval, is used only for two-tailed tests. So now for one-tailed test, we are going to use two approaches, critical value and B value approach. So there are many cases the alternative hypothesis focuses on particular direction. In this case, we have a certain direction.", "tokens": [5527, 294, 1575, 264, 1036, 3109, 11, 6687, 15035, 11, 307, 1143, 787, 337, 732, 12, 14430, 292, 6921, 13, 407, 586, 337, 472, 12, 14430, 292, 1500, 11, 321, 366, 516, 281, 764, 732, 11587, 11, 4924, 2158, 293, 363, 2158, 3109, 13, 407, 456, 366, 867, 3331, 264, 8535, 17291, 16109, 322, 1729, 3513, 13, 682, 341, 1389, 11, 321, 362, 257, 1629, 3513, 13], "avg_logprob": -0.19083179730702848, "compression_ratio": 1.6205128205128205, "no_speech_prob": 0.0, "words": [{"start": 31.42, "end": 31.66, "word": " Keep", "probability": 0.79248046875}, {"start": 31.66, "end": 31.82, "word": " in", "probability": 0.9501953125}, {"start": 31.82, "end": 32.06, "word": " mind", "probability": 0.89306640625}, {"start": 32.06, "end": 32.46, "word": " the", "probability": 0.317138671875}, {"start": 32.46, "end": 32.94, "word": " last", "probability": 0.84765625}, {"start": 32.94, "end": 33.38, "word": " approach,", "probability": 0.93212890625}, {"start": 33.52, "end": 33.84, "word": " confidence", "probability": 0.89501953125}, {"start": 33.84, "end": 34.3, "word": " interval,", "probability": 0.94970703125}, {"start": 34.78, "end": 35.08, "word": " is", "probability": 0.9404296875}, {"start": 35.08, "end": 35.48, "word": " used", "probability": 0.91845703125}, {"start": 35.48, "end": 35.84, "word": " only", "probability": 0.9267578125}, {"start": 35.84, "end": 36.12, "word": " for", "probability": 0.95166015625}, {"start": 36.12, "end": 36.3, "word": " two", "probability": 0.8984375}, {"start": 36.3, "end": 36.6, "word": "-tailed", "probability": 0.6432291666666666}, {"start": 36.6, "end": 36.82, "word": " tests.", "probability": 0.64794921875}, {"start": 38.14, "end": 38.46, "word": " So", "probability": 0.90576171875}, {"start": 38.46, "end": 39.04, "word": " now", "probability": 0.857421875}, {"start": 39.04, "end": 39.34, "word": " for", "probability": 0.72314453125}, {"start": 39.34, "end": 39.6, "word": " one", "probability": 0.92822265625}, {"start": 39.6, "end": 39.88, "word": "-tailed", "probability": 0.89599609375}, {"start": 39.88, "end": 40.06, "word": " test,", "probability": 0.80517578125}, {"start": 40.14, "end": 40.22, "word": " we", "probability": 0.95947265625}, {"start": 40.22, "end": 40.34, "word": " are", "probability": 0.931640625}, {"start": 40.34, "end": 40.62, "word": " going", "probability": 0.94384765625}, {"start": 40.62, "end": 41.12, "word": " to", "probability": 0.96923828125}, {"start": 41.12, "end": 41.66, "word": " use", "probability": 0.8837890625}, {"start": 41.66, "end": 42.12, "word": " two", "probability": 0.93310546875}, {"start": 42.12, "end": 42.56, "word": " approaches,", "probability": 0.75634765625}, {"start": 43.44, "end": 44.04, "word": " critical", "probability": 0.908203125}, {"start": 44.04, "end": 44.46, "word": " value", "probability": 0.9404296875}, {"start": 44.46, "end": 45.18, "word": " and", "probability": 0.916015625}, {"start": 45.18, "end": 46.0, "word": " B", "probability": 0.60546875}, {"start": 46.0, "end": 46.24, "word": " value", "probability": 0.53759765625}, {"start": 46.24, "end": 46.48, "word": " approach.", "probability": 0.82470703125}, {"start": 48.06, "end": 48.72, "word": " So", "probability": 0.9521484375}, {"start": 48.72, "end": 49.0, "word": " there", "probability": 0.87353515625}, {"start": 49.0, "end": 49.16, "word": " are", "probability": 0.9384765625}, {"start": 49.16, "end": 49.5, "word": " many", "probability": 0.7177734375}, {"start": 49.5, "end": 50.08, "word": " cases", "probability": 0.927734375}, {"start": 50.08, "end": 51.04, "word": " the", "probability": 0.383056640625}, {"start": 51.04, "end": 51.56, "word": " alternative", "probability": 0.61572265625}, {"start": 51.56, "end": 52.1, "word": " hypothesis", "probability": 0.75}, {"start": 52.1, "end": 52.96, "word": " focuses", "probability": 0.91162109375}, {"start": 52.96, "end": 54.34, "word": " on", "probability": 0.94580078125}, {"start": 54.34, "end": 55.46, "word": " particular", "probability": 0.8642578125}, {"start": 55.46, "end": 55.92, "word": " direction.", "probability": 0.97998046875}, {"start": 56.14, "end": 56.3, "word": " In", "probability": 0.9326171875}, {"start": 56.3, "end": 56.48, "word": " this", "probability": 0.9423828125}, {"start": 56.48, "end": 56.82, "word": " case,", "probability": 0.89990234375}, {"start": 57.5, "end": 57.78, "word": " we", "probability": 0.95849609375}, {"start": 57.78, "end": 58.12, "word": " have", "probability": 0.9443359375}, {"start": 58.12, "end": 58.42, "word": " a", "probability": 0.99609375}, {"start": 58.42, "end": 58.7, "word": " certain", "probability": 0.89599609375}, {"start": 58.7, "end": 59.2, "word": " direction.", "probability": 0.97412109375}], "temperature": 1.0}, {"id": 3, "seek": 8803, "start": 59.87, "end": 88.03, "text": " And this direction could be to the left side. And it's called lower tail test. And the other one greater than or upper tail test. So there are two types of one tail test. One is called lower test. In this case, they alternate by hypothesis. Each one might be, for example, mu smaller than 3.", "tokens": [400, 341, 3513, 727, 312, 281, 264, 1411, 1252, 13, 400, 309, 311, 1219, 3126, 6838, 1500, 13, 400, 264, 661, 472, 5044, 813, 420, 6597, 6838, 1500, 13, 407, 456, 366, 732, 3467, 295, 472, 6838, 1500, 13, 1485, 307, 1219, 3126, 1500, 13, 682, 341, 1389, 11, 436, 18873, 538, 17291, 13, 6947, 472, 1062, 312, 11, 337, 1365, 11, 2992, 4356, 813, 805, 13], "avg_logprob": -0.220703128944425, "compression_ratio": 1.5869565217391304, "no_speech_prob": 0.0, "words": [{"start": 59.87, "end": 60.15, "word": " And", "probability": 0.383056640625}, {"start": 60.15, "end": 60.35, "word": " this", "probability": 0.9375}, {"start": 60.35, "end": 60.71, "word": " direction", "probability": 0.7626953125}, {"start": 60.71, "end": 61.05, "word": " could", "probability": 0.8837890625}, {"start": 61.05, "end": 61.41, "word": " be", "probability": 0.9541015625}, {"start": 61.41, "end": 63.17, "word": " to", "probability": 0.85791015625}, {"start": 63.17, "end": 63.33, "word": " the", "probability": 0.9150390625}, {"start": 63.33, "end": 63.57, "word": " left", "probability": 0.94482421875}, {"start": 63.57, "end": 64.05, "word": " side.", "probability": 0.86181640625}, {"start": 64.83, "end": 65.17, "word": " And", "probability": 0.55615234375}, {"start": 65.17, "end": 65.33, "word": " it's", "probability": 0.885498046875}, {"start": 65.33, "end": 65.69, "word": " called", "probability": 0.8740234375}, {"start": 65.69, "end": 66.95, "word": " lower", "probability": 0.66259765625}, {"start": 66.95, "end": 67.23, "word": " tail", "probability": 0.51953125}, {"start": 67.23, "end": 67.51, "word": " test.", "probability": 0.7080078125}, {"start": 68.29, "end": 68.85, "word": " And", "probability": 0.953125}, {"start": 68.85, "end": 68.95, "word": " the", "probability": 0.87060546875}, {"start": 68.95, "end": 69.19, "word": " other", "probability": 0.8896484375}, {"start": 69.19, "end": 69.55, "word": " one", "probability": 0.92529296875}, {"start": 69.55, "end": 71.39, "word": " greater", "probability": 0.5263671875}, {"start": 71.39, "end": 71.81, "word": " than", "probability": 0.9482421875}, {"start": 71.81, "end": 72.03, "word": " or", "probability": 0.69873046875}, {"start": 72.03, "end": 72.43, "word": " upper", "probability": 0.60693359375}, {"start": 72.43, "end": 74.53, "word": " tail", "probability": 0.857421875}, {"start": 74.53, "end": 74.83, "word": " test.", "probability": 0.87353515625}, {"start": 75.05, "end": 75.21, "word": " So", "probability": 0.9541015625}, {"start": 75.21, "end": 75.39, "word": " there", "probability": 0.88818359375}, {"start": 75.39, "end": 75.57, "word": " are", "probability": 0.9384765625}, {"start": 75.57, "end": 75.81, "word": " two", "probability": 0.9326171875}, {"start": 75.81, "end": 76.33, "word": " types", "probability": 0.80712890625}, {"start": 76.33, "end": 76.51, "word": " of", "probability": 0.958984375}, {"start": 76.51, "end": 76.71, "word": " one", "probability": 0.9052734375}, {"start": 76.71, "end": 76.91, "word": " tail", "probability": 0.81005859375}, {"start": 76.91, "end": 77.23, "word": " test.", "probability": 0.853515625}, {"start": 77.65, "end": 77.89, "word": " One", "probability": 0.919921875}, {"start": 77.89, "end": 78.05, "word": " is", "probability": 0.9375}, {"start": 78.05, "end": 78.29, "word": " called", "probability": 0.876953125}, {"start": 78.29, "end": 78.59, "word": " lower", "probability": 0.83984375}, {"start": 78.59, "end": 79.01, "word": " test.", "probability": 0.85791015625}, {"start": 79.71, "end": 79.89, "word": " In", "probability": 0.8916015625}, {"start": 79.89, "end": 80.07, "word": " this", "probability": 0.9453125}, {"start": 80.07, "end": 80.31, "word": " case,", "probability": 0.90576171875}, {"start": 80.35, "end": 80.45, "word": " they", "probability": 0.486572265625}, {"start": 80.45, "end": 80.85, "word": " alternate", "probability": 0.367919921875}, {"start": 80.85, "end": 81.07, "word": " by", "probability": 0.7470703125}, {"start": 81.07, "end": 81.43, "word": " hypothesis.", "probability": 0.403076171875}, {"start": 83.15, "end": 83.37, "word": " Each", "probability": 0.66064453125}, {"start": 83.37, "end": 83.69, "word": " one", "probability": 0.931640625}, {"start": 83.69, "end": 84.93, "word": " might", "probability": 0.8896484375}, {"start": 84.93, "end": 85.17, "word": " be,", "probability": 0.95068359375}, {"start": 85.29, "end": 85.47, "word": " for", "probability": 0.9521484375}, {"start": 85.47, "end": 85.87, "word": " example,", "probability": 0.9736328125}, {"start": 86.07, "end": 86.67, "word": " mu", "probability": 0.70849609375}, {"start": 86.67, "end": 87.47, "word": " smaller", "probability": 0.744140625}, {"start": 87.47, "end": 87.75, "word": " than", "probability": 0.943359375}, {"start": 87.75, "end": 88.03, "word": " 3.", "probability": 0.49755859375}], "temperature": 1.0}, {"id": 4, "seek": 11665, "start": 88.91, "end": 116.65, "text": " So the null hypothesis can be written as mu is greater than or equal to 3. Just for simple notation, we can use mu equal to 3 instead of mu greater than or equal to 3. So we can use this notation instead of using mu is greater than or equal to 3. It's better to use this notation.", "tokens": [407, 264, 18184, 17291, 393, 312, 3720, 382, 2992, 307, 5044, 813, 420, 2681, 281, 805, 13, 1449, 337, 2199, 24657, 11, 321, 393, 764, 2992, 2681, 281, 805, 2602, 295, 2992, 5044, 813, 420, 2681, 281, 805, 13, 407, 321, 393, 764, 341, 24657, 2602, 295, 1228, 2992, 307, 5044, 813, 420, 2681, 281, 805, 13, 467, 311, 1101, 281, 764, 341, 24657, 13], "avg_logprob": -0.1708096649610635, "compression_ratio": 1.965034965034965, "no_speech_prob": 0.0, "words": [{"start": 88.91, "end": 89.25, "word": " So", "probability": 0.85498046875}, {"start": 89.25, "end": 89.41, "word": " the", "probability": 0.70556640625}, {"start": 89.41, "end": 89.57, "word": " null", "probability": 0.87548828125}, {"start": 89.57, "end": 89.97, "word": " hypothesis", "probability": 0.837890625}, {"start": 89.97, "end": 90.25, "word": " can", "probability": 0.9443359375}, {"start": 90.25, "end": 90.41, "word": " be", "probability": 0.95361328125}, {"start": 90.41, "end": 90.73, "word": " written", "probability": 0.92431640625}, {"start": 90.73, "end": 91.25, "word": " as", "probability": 0.95947265625}, {"start": 91.25, "end": 91.97, "word": " mu", "probability": 0.320556640625}, {"start": 91.97, "end": 92.31, "word": " is", "probability": 0.5888671875}, {"start": 92.31, "end": 92.59, "word": " greater", "probability": 0.88330078125}, {"start": 92.59, "end": 92.89, "word": " than", "probability": 0.927734375}, {"start": 92.89, "end": 92.99, "word": " or", "probability": 0.9423828125}, {"start": 92.99, "end": 93.21, "word": " equal", "probability": 0.87890625}, {"start": 93.21, "end": 93.41, "word": " to", "probability": 0.9345703125}, {"start": 93.41, "end": 93.63, "word": " 3.", "probability": 0.7236328125}, {"start": 94.31, "end": 94.87, "word": " Just", "probability": 0.81884765625}, {"start": 94.87, "end": 95.71, "word": " for", "probability": 0.8740234375}, {"start": 95.71, "end": 96.99, "word": " simple", "probability": 0.5546875}, {"start": 96.99, "end": 97.43, "word": " notation,", "probability": 0.95751953125}, {"start": 97.65, "end": 97.77, "word": " we", "probability": 0.7626953125}, {"start": 97.77, "end": 97.97, "word": " can", "probability": 0.93994140625}, {"start": 97.97, "end": 98.35, "word": " use", "probability": 0.89599609375}, {"start": 98.35, "end": 99.27, "word": " mu", "probability": 0.8525390625}, {"start": 99.27, "end": 99.57, "word": " equal", "probability": 0.59130859375}, {"start": 99.57, "end": 99.73, "word": " to", "probability": 0.36279296875}, {"start": 99.73, "end": 99.97, "word": " 3", "probability": 0.9365234375}, {"start": 99.97, "end": 100.47, "word": " instead", "probability": 0.82568359375}, {"start": 100.47, "end": 100.93, "word": " of", "probability": 0.97021484375}, {"start": 100.93, "end": 103.21, "word": " mu", "probability": 0.8828125}, {"start": 103.21, "end": 104.17, "word": " greater", "probability": 0.779296875}, {"start": 104.17, "end": 104.45, "word": " than", "probability": 0.93505859375}, {"start": 104.45, "end": 104.59, "word": " or", "probability": 0.84326171875}, {"start": 104.59, "end": 104.77, "word": " equal", "probability": 0.9365234375}, {"start": 104.77, "end": 104.79, "word": " to", "probability": 0.91845703125}, {"start": 104.79, "end": 104.87, "word": " 3.", "probability": 0.95458984375}, {"start": 104.95, "end": 105.11, "word": " So", "probability": 0.890625}, {"start": 105.11, "end": 105.29, "word": " we", "probability": 0.9208984375}, {"start": 105.29, "end": 105.47, "word": " can", "probability": 0.9326171875}, {"start": 105.47, "end": 105.65, "word": " use", "probability": 0.86669921875}, {"start": 105.65, "end": 105.83, "word": " this", "probability": 0.94140625}, {"start": 105.83, "end": 106.31, "word": " notation", "probability": 0.970703125}, {"start": 106.31, "end": 108.09, "word": " instead", "probability": 0.603515625}, {"start": 108.09, "end": 108.33, "word": " of", "probability": 0.96240234375}, {"start": 108.33, "end": 108.85, "word": " using", "probability": 0.9052734375}, {"start": 108.85, "end": 112.23, "word": " mu", "probability": 0.84423828125}, {"start": 112.23, "end": 112.43, "word": " is", "probability": 0.81689453125}, {"start": 112.43, "end": 112.73, "word": " greater", "probability": 0.9150390625}, {"start": 112.73, "end": 113.07, "word": " than", "probability": 0.92529296875}, {"start": 113.07, "end": 113.35, "word": " or", "probability": 0.93408203125}, {"start": 113.35, "end": 113.55, "word": " equal", "probability": 0.9287109375}, {"start": 113.55, "end": 113.73, "word": " to", "probability": 0.96337890625}, {"start": 113.73, "end": 113.97, "word": " 3.", "probability": 0.96337890625}, {"start": 114.37, "end": 114.67, "word": " It's", "probability": 0.890869140625}, {"start": 114.67, "end": 114.93, "word": " better", "probability": 0.92333984375}, {"start": 114.93, "end": 115.17, "word": " to", "probability": 0.962890625}, {"start": 115.17, "end": 115.55, "word": " use", "probability": 0.8681640625}, {"start": 115.55, "end": 116.15, "word": " this", "probability": 0.94091796875}, {"start": 116.15, "end": 116.65, "word": " notation.", "probability": 0.97412109375}], "temperature": 1.0}, {"id": 5, "seek": 14619, "start": 117.57, "end": 146.19, "text": " Because as we know, H1 is the opposite of H0. So if under H0, we have a mu greater than or equal to 3, then for the alternative hypothesis, it could be a mu smaller than 3. But just to use simple notation, we may write mu equals 3 against the alternative. Actually, we are focusing on the alternative hypothesis, because the direction", "tokens": [1436, 382, 321, 458, 11, 389, 16, 307, 264, 6182, 295, 389, 15, 13, 407, 498, 833, 389, 15, 11, 321, 362, 257, 2992, 5044, 813, 420, 2681, 281, 805, 11, 550, 337, 264, 8535, 17291, 11, 309, 727, 312, 257, 2992, 4356, 813, 805, 13, 583, 445, 281, 764, 2199, 24657, 11, 321, 815, 2464, 2992, 6915, 805, 1970, 264, 8535, 13, 5135, 11, 321, 366, 8416, 322, 264, 8535, 17291, 11, 570, 264, 3513], "avg_logprob": -0.18283278601510183, "compression_ratio": 1.6502463054187193, "no_speech_prob": 0.0, "words": [{"start": 117.57, "end": 117.95, "word": " Because", "probability": 0.67822265625}, {"start": 117.95, "end": 118.15, "word": " as", "probability": 0.732421875}, {"start": 118.15, "end": 118.31, "word": " we", "probability": 0.935546875}, {"start": 118.31, "end": 118.63, "word": " know,", "probability": 0.8798828125}, {"start": 119.27, "end": 119.65, "word": " H1", "probability": 0.697998046875}, {"start": 119.65, "end": 120.05, "word": " is", "probability": 0.951171875}, {"start": 120.05, "end": 120.21, "word": " the", "probability": 0.90380859375}, {"start": 120.21, "end": 120.57, "word": " opposite", "probability": 0.953125}, {"start": 120.57, "end": 121.25, "word": " of", "probability": 0.9599609375}, {"start": 121.25, "end": 121.75, "word": " H0.", "probability": 0.958984375}, {"start": 121.95, "end": 122.47, "word": " So", "probability": 0.9033203125}, {"start": 122.47, "end": 122.91, "word": " if", "probability": 0.89892578125}, {"start": 122.91, "end": 123.83, "word": " under", "probability": 0.8701171875}, {"start": 123.83, "end": 124.19, "word": " H0,", "probability": 0.954833984375}, {"start": 124.27, "end": 124.37, "word": " we", "probability": 0.9375}, {"start": 124.37, "end": 124.57, "word": " have", "probability": 0.94091796875}, {"start": 124.57, "end": 124.71, "word": " a", "probability": 0.6123046875}, {"start": 124.71, "end": 124.87, "word": " mu", "probability": 0.69677734375}, {"start": 124.87, "end": 125.37, "word": " greater", "probability": 0.9248046875}, {"start": 125.37, "end": 125.65, "word": " than", "probability": 0.9189453125}, {"start": 125.65, "end": 126.03, "word": " or", "probability": 0.9384765625}, {"start": 126.03, "end": 126.27, "word": " equal", "probability": 0.9111328125}, {"start": 126.27, "end": 126.45, "word": " to", "probability": 0.9453125}, {"start": 126.45, "end": 126.65, "word": " 3,", "probability": 0.828125}, {"start": 127.13, "end": 127.43, "word": " then", "probability": 0.81787109375}, {"start": 127.43, "end": 127.67, "word": " for", "probability": 0.8798828125}, {"start": 127.67, "end": 127.83, "word": " the", "probability": 0.75439453125}, {"start": 127.83, "end": 128.23, "word": " alternative", "probability": 0.80810546875}, {"start": 128.23, "end": 128.73, "word": " hypothesis,", "probability": 0.80615234375}, {"start": 128.95, "end": 128.95, "word": " it", "probability": 0.72900390625}, {"start": 128.95, "end": 129.07, "word": " could", "probability": 0.857421875}, {"start": 129.07, "end": 129.39, "word": " be", "probability": 0.94873046875}, {"start": 129.39, "end": 129.99, "word": " a", "probability": 0.52001953125}, {"start": 129.99, "end": 130.07, "word": " mu", "probability": 0.96630859375}, {"start": 130.07, "end": 130.43, "word": " smaller", "probability": 0.85595703125}, {"start": 130.43, "end": 130.65, "word": " than", "probability": 0.93017578125}, {"start": 130.65, "end": 130.89, "word": " 3.", "probability": 0.53759765625}, {"start": 131.61, "end": 132.33, "word": " But", "probability": 0.9384765625}, {"start": 132.33, "end": 132.73, "word": " just", "probability": 0.9111328125}, {"start": 132.73, "end": 132.97, "word": " to", "probability": 0.338134765625}, {"start": 132.97, "end": 133.15, "word": " use", "probability": 0.76025390625}, {"start": 133.15, "end": 133.39, "word": " simple", "probability": 0.93408203125}, {"start": 133.39, "end": 133.83, "word": " notation,", "probability": 0.95458984375}, {"start": 134.49, "end": 134.65, "word": " we", "probability": 0.95263671875}, {"start": 134.65, "end": 134.81, "word": " may", "probability": 0.94189453125}, {"start": 134.81, "end": 135.13, "word": " write", "probability": 0.9111328125}, {"start": 135.13, "end": 135.41, "word": " mu", "probability": 0.8193359375}, {"start": 135.41, "end": 136.37, "word": " equals", "probability": 0.82421875}, {"start": 136.37, "end": 136.77, "word": " 3", "probability": 0.962890625}, {"start": 136.77, "end": 137.65, "word": " against", "probability": 0.89501953125}, {"start": 137.65, "end": 138.35, "word": " the", "probability": 0.81494140625}, {"start": 138.35, "end": 138.79, "word": " alternative.", "probability": 0.91455078125}, {"start": 139.27, "end": 139.89, "word": " Actually,", "probability": 0.8544921875}, {"start": 140.45, "end": 140.89, "word": " we", "probability": 0.96142578125}, {"start": 140.89, "end": 141.09, "word": " are", "probability": 0.9384765625}, {"start": 141.09, "end": 141.55, "word": " focusing", "probability": 0.90283203125}, {"start": 141.55, "end": 142.05, "word": " on", "probability": 0.94384765625}, {"start": 142.05, "end": 142.31, "word": " the", "probability": 0.912109375}, {"start": 142.31, "end": 142.67, "word": " alternative", "probability": 0.9423828125}, {"start": 142.67, "end": 143.17, "word": " hypothesis,", "probability": 0.8583984375}, {"start": 143.85, "end": 145.35, "word": " because", "probability": 0.8916015625}, {"start": 145.35, "end": 145.63, "word": " the", "probability": 0.90478515625}, {"start": 145.63, "end": 146.19, "word": " direction", "probability": 0.97412109375}], "temperature": 1.0}, {"id": 6, "seek": 17571, "start": 147.11, "end": 175.71, "text": " of this one gives the rejection region of the test because my rejection region actually based on the sign or the direction under the alternative hypothesis so again this is called lower tail test since the alternative hypothesis is focused on the lower tail below the mean of three three is just an example the other one", "tokens": [295, 341, 472, 2709, 264, 26044, 4458, 295, 264, 1500, 570, 452, 26044, 4458, 767, 2361, 322, 264, 1465, 420, 264, 3513, 833, 264, 8535, 17291, 370, 797, 341, 307, 1219, 3126, 6838, 1500, 1670, 264, 8535, 17291, 307, 5178, 322, 264, 3126, 6838, 2507, 264, 914, 295, 1045, 1045, 307, 445, 364, 1365, 264, 661, 472], "avg_logprob": -0.22723598855322805, "compression_ratio": 1.9454545454545455, "no_speech_prob": 0.0, "words": [{"start": 147.11, "end": 147.49, "word": " of", "probability": 0.262939453125}, {"start": 147.49, "end": 147.73, "word": " this", "probability": 0.9306640625}, {"start": 147.73, "end": 147.99, "word": " one", "probability": 0.8642578125}, {"start": 147.99, "end": 148.31, "word": " gives", "probability": 0.80615234375}, {"start": 148.31, "end": 149.05, "word": " the", "probability": 0.86865234375}, {"start": 149.05, "end": 150.87, "word": " rejection", "probability": 0.7177734375}, {"start": 150.87, "end": 151.45, "word": " region", "probability": 0.9169921875}, {"start": 151.45, "end": 153.17, "word": " of", "probability": 0.92529296875}, {"start": 153.17, "end": 153.35, "word": " the", "probability": 0.900390625}, {"start": 153.35, "end": 153.65, "word": " test", "probability": 0.86376953125}, {"start": 153.65, "end": 154.21, "word": " because", "probability": 0.5185546875}, {"start": 154.21, "end": 154.61, "word": " my", "probability": 0.92822265625}, {"start": 154.61, "end": 154.93, "word": " rejection", "probability": 0.9560546875}, {"start": 154.93, "end": 155.25, "word": " region", "probability": 0.9248046875}, {"start": 155.25, "end": 155.77, "word": " actually", "probability": 0.69384765625}, {"start": 155.77, "end": 156.57, "word": " based", "probability": 0.76220703125}, {"start": 156.57, "end": 156.93, "word": " on", "probability": 0.9521484375}, {"start": 156.93, "end": 157.37, "word": " the", "probability": 0.9091796875}, {"start": 157.37, "end": 157.95, "word": " sign", "probability": 0.8466796875}, {"start": 157.95, "end": 158.27, "word": " or", "probability": 0.7861328125}, {"start": 158.27, "end": 158.39, "word": " the", "probability": 0.751953125}, {"start": 158.39, "end": 158.81, "word": " direction", "probability": 0.97216796875}, {"start": 158.81, "end": 159.43, "word": " under", "probability": 0.8623046875}, {"start": 159.43, "end": 160.23, "word": " the", "probability": 0.88134765625}, {"start": 160.23, "end": 160.65, "word": " alternative", "probability": 0.84814453125}, {"start": 160.65, "end": 161.17, "word": " hypothesis", "probability": 0.82861328125}, {"start": 161.17, "end": 162.15, "word": " so", "probability": 0.40966796875}, {"start": 162.15, "end": 162.47, "word": " again", "probability": 0.9013671875}, {"start": 162.47, "end": 163.77, "word": " this", "probability": 0.71337890625}, {"start": 163.77, "end": 163.91, "word": " is", "probability": 0.93505859375}, {"start": 163.91, "end": 164.19, "word": " called", "probability": 0.83203125}, {"start": 164.19, "end": 164.47, "word": " lower", "probability": 0.771484375}, {"start": 164.47, "end": 164.67, "word": " tail", "probability": 0.748046875}, {"start": 164.67, "end": 165.15, "word": " test", "probability": 0.77001953125}, {"start": 165.15, "end": 166.37, "word": " since", "probability": 0.82470703125}, {"start": 166.37, "end": 166.53, "word": " the", "probability": 0.85400390625}, {"start": 166.53, "end": 166.99, "word": " alternative", "probability": 0.892578125}, {"start": 166.99, "end": 167.53, "word": " hypothesis", "probability": 0.92333984375}, {"start": 167.53, "end": 167.95, "word": " is", "probability": 0.73046875}, {"start": 167.95, "end": 168.29, "word": " focused", "probability": 0.88134765625}, {"start": 168.29, "end": 168.75, "word": " on", "probability": 0.94384765625}, {"start": 168.75, "end": 168.95, "word": " the", "probability": 0.90478515625}, {"start": 168.95, "end": 169.19, "word": " lower", "probability": 0.86669921875}, {"start": 169.19, "end": 169.65, "word": " tail", "probability": 0.85595703125}, {"start": 169.65, "end": 170.57, "word": " below", "probability": 0.8017578125}, {"start": 170.57, "end": 170.81, "word": " the", "probability": 0.919921875}, {"start": 170.81, "end": 170.95, "word": " mean", "probability": 0.98388671875}, {"start": 170.95, "end": 171.09, "word": " of", "probability": 0.9677734375}, {"start": 171.09, "end": 171.39, "word": " three", "probability": 0.31591796875}, {"start": 171.39, "end": 172.01, "word": " three", "probability": 0.410400390625}, {"start": 172.01, "end": 172.11, "word": " is", "probability": 0.6572265625}, {"start": 172.11, "end": 172.91, "word": " just", "probability": 0.72998046875}, {"start": 172.91, "end": 173.29, "word": " an", "probability": 0.92822265625}, {"start": 173.29, "end": 173.67, "word": " example", "probability": 0.97705078125}, {"start": 173.67, "end": 175.15, "word": " the", "probability": 0.76904296875}, {"start": 175.15, "end": 175.43, "word": " other", "probability": 0.892578125}, {"start": 175.43, "end": 175.71, "word": " one", "probability": 0.927734375}], "temperature": 1.0}, {"id": 7, "seek": 20432, "start": 176.58, "end": 204.32, "text": " When we are testing Mu smaller than or equal to 3 against Mu above 3, in this case, this one is called an upper tail test since the alternative hypothesis is focused on the upper tail above the minimum 3. Now, how can we state the appropriate null and alternative hypothesis? The answer depends on the problem itself. So by using the problem, we can determine", "tokens": [1133, 321, 366, 4997, 15601, 4356, 813, 420, 2681, 281, 805, 1970, 15601, 3673, 805, 11, 294, 341, 1389, 11, 341, 472, 307, 1219, 364, 6597, 6838, 1500, 1670, 264, 8535, 17291, 307, 5178, 322, 264, 6597, 6838, 3673, 264, 7285, 805, 13, 823, 11, 577, 393, 321, 1785, 264, 6854, 18184, 293, 8535, 17291, 30, 440, 1867, 5946, 322, 264, 1154, 2564, 13, 407, 538, 1228, 264, 1154, 11, 321, 393, 6997], "avg_logprob": -0.20777027147847252, "compression_ratio": 1.6589861751152073, "no_speech_prob": 0.0, "words": [{"start": 176.58, "end": 176.88, "word": " When", "probability": 0.771484375}, {"start": 176.88, "end": 177.0, "word": " we", "probability": 0.94873046875}, {"start": 177.0, "end": 177.14, "word": " are", "probability": 0.91748046875}, {"start": 177.14, "end": 177.62, "word": " testing", "probability": 0.85986328125}, {"start": 177.62, "end": 178.12, "word": " Mu", "probability": 0.168701171875}, {"start": 178.12, "end": 178.6, "word": " smaller", "probability": 0.52587890625}, {"start": 178.6, "end": 178.88, "word": " than", "probability": 0.94189453125}, {"start": 178.88, "end": 179.04, "word": " or", "probability": 0.94287109375}, {"start": 179.04, "end": 179.26, "word": " equal", "probability": 0.9111328125}, {"start": 179.26, "end": 179.46, "word": " to", "probability": 0.94482421875}, {"start": 179.46, "end": 179.62, "word": " 3", "probability": 0.669921875}, {"start": 179.62, "end": 180.0, "word": " against", "probability": 0.86474609375}, {"start": 180.0, "end": 180.32, "word": " Mu", "probability": 0.7412109375}, {"start": 180.32, "end": 180.64, "word": " above", "probability": 0.6396484375}, {"start": 180.64, "end": 181.08, "word": " 3,", "probability": 0.955078125}, {"start": 181.66, "end": 181.94, "word": " in", "probability": 0.87060546875}, {"start": 181.94, "end": 182.16, "word": " this", "probability": 0.94580078125}, {"start": 182.16, "end": 182.54, "word": " case,", "probability": 0.908203125}, {"start": 183.14, "end": 183.54, "word": " this", "probability": 0.919921875}, {"start": 183.54, "end": 183.72, "word": " one", "probability": 0.8134765625}, {"start": 183.72, "end": 183.86, "word": " is", "probability": 0.84521484375}, {"start": 183.86, "end": 184.18, "word": " called", "probability": 0.90283203125}, {"start": 184.18, "end": 184.4, "word": " an", "probability": 0.822265625}, {"start": 184.4, "end": 184.64, "word": " upper", "probability": 0.372802734375}, {"start": 184.64, "end": 184.84, "word": " tail", "probability": 0.67578125}, {"start": 184.84, "end": 185.3, "word": " test", "probability": 0.76171875}, {"start": 185.3, "end": 186.16, "word": " since", "probability": 0.5263671875}, {"start": 186.16, "end": 186.34, "word": " the", "probability": 0.84912109375}, {"start": 186.34, "end": 186.76, "word": " alternative", "probability": 0.9091796875}, {"start": 186.76, "end": 187.32, "word": " hypothesis", "probability": 0.873046875}, {"start": 187.32, "end": 188.14, "word": " is", "probability": 0.93505859375}, {"start": 188.14, "end": 188.54, "word": " focused", "probability": 0.90673828125}, {"start": 188.54, "end": 188.86, "word": " on", "probability": 0.94482421875}, {"start": 188.86, "end": 189.0, "word": " the", "probability": 0.8974609375}, {"start": 189.0, "end": 189.24, "word": " upper", "probability": 0.8505859375}, {"start": 189.24, "end": 189.58, "word": " tail", "probability": 0.8857421875}, {"start": 189.58, "end": 190.06, "word": " above", "probability": 0.95849609375}, {"start": 190.06, "end": 190.42, "word": " the", "probability": 0.712890625}, {"start": 190.42, "end": 190.58, "word": " minimum", "probability": 0.771484375}, {"start": 190.58, "end": 190.9, "word": " 3.", "probability": 0.5703125}, {"start": 191.76, "end": 192.08, "word": " Now,", "probability": 0.89013671875}, {"start": 192.2, "end": 192.4, "word": " how", "probability": 0.9306640625}, {"start": 192.4, "end": 192.6, "word": " can", "probability": 0.931640625}, {"start": 192.6, "end": 192.84, "word": " we", "probability": 0.95703125}, {"start": 192.84, "end": 193.62, "word": " state", "probability": 0.9443359375}, {"start": 193.62, "end": 194.04, "word": " the", "probability": 0.77685546875}, {"start": 194.04, "end": 194.7, "word": " appropriate", "probability": 0.810546875}, {"start": 194.7, "end": 195.58, "word": " null", "probability": 0.95068359375}, {"start": 195.58, "end": 195.92, "word": " and", "probability": 0.90576171875}, {"start": 195.92, "end": 196.34, "word": " alternative", "probability": 0.91748046875}, {"start": 196.34, "end": 196.82, "word": " hypothesis?", "probability": 0.77978515625}, {"start": 197.54, "end": 197.76, "word": " The", "probability": 0.884765625}, {"start": 197.76, "end": 198.08, "word": " answer", "probability": 0.95654296875}, {"start": 198.08, "end": 199.48, "word": " depends", "probability": 0.89599609375}, {"start": 199.48, "end": 199.9, "word": " on", "probability": 0.94775390625}, {"start": 199.9, "end": 201.28, "word": " the", "probability": 0.92138671875}, {"start": 201.28, "end": 201.62, "word": " problem", "probability": 0.86474609375}, {"start": 201.62, "end": 202.1, "word": " itself.", "probability": 0.81689453125}, {"start": 202.56, "end": 202.74, "word": " So", "probability": 0.8701171875}, {"start": 202.74, "end": 202.9, "word": " by", "probability": 0.56689453125}, {"start": 202.9, "end": 203.1, "word": " using", "probability": 0.9384765625}, {"start": 203.1, "end": 203.28, "word": " the", "probability": 0.87939453125}, {"start": 203.28, "end": 203.52, "word": " problem,", "probability": 0.865234375}, {"start": 203.56, "end": 203.7, "word": " we", "probability": 0.95556640625}, {"start": 203.7, "end": 203.9, "word": " can", "probability": 0.939453125}, {"start": 203.9, "end": 204.32, "word": " determine", "probability": 0.9208984375}], "temperature": 1.0}, {"id": 8, "seek": 23391, "start": 205.11, "end": 233.91, "text": " If the test is lower tail or upper tail. So you have to state carefully both null and alternative hypothesis. So for example, if the problem says increase, it means a mu is above. If the problem says smaller than or decrease from whatever the value is, then we have to use a mu smaller than that value.", "tokens": [759, 264, 1500, 307, 3126, 6838, 420, 6597, 6838, 13, 407, 291, 362, 281, 1785, 7500, 1293, 18184, 293, 8535, 17291, 13, 407, 337, 1365, 11, 498, 264, 1154, 1619, 3488, 11, 309, 1355, 257, 2992, 307, 3673, 13, 759, 264, 1154, 1619, 4356, 813, 420, 11514, 490, 2035, 264, 2158, 307, 11, 550, 321, 362, 281, 764, 257, 2992, 4356, 813, 300, 2158, 13], "avg_logprob": -0.20371685538328055, "compression_ratio": 1.6557377049180328, "no_speech_prob": 0.0, "words": [{"start": 205.11, "end": 205.47, "word": " If", "probability": 0.6328125}, {"start": 205.47, "end": 205.67, "word": " the", "probability": 0.91259765625}, {"start": 205.67, "end": 206.01, "word": " test", "probability": 0.8896484375}, {"start": 206.01, "end": 206.49, "word": " is", "probability": 0.94189453125}, {"start": 206.49, "end": 207.03, "word": " lower", "probability": 0.84375}, {"start": 207.03, "end": 207.39, "word": " tail", "probability": 0.796875}, {"start": 207.39, "end": 208.49, "word": " or", "probability": 0.8359375}, {"start": 208.49, "end": 208.87, "word": " upper", "probability": 0.34375}, {"start": 208.87, "end": 209.17, "word": " tail.", "probability": 0.5615234375}, {"start": 209.69, "end": 209.87, "word": " So", "probability": 0.9267578125}, {"start": 209.87, "end": 209.99, "word": " you", "probability": 0.49951171875}, {"start": 209.99, "end": 210.15, "word": " have", "probability": 0.943359375}, {"start": 210.15, "end": 210.29, "word": " to", "probability": 0.96826171875}, {"start": 210.29, "end": 210.57, "word": " state", "probability": 0.78759765625}, {"start": 210.57, "end": 211.25, "word": " carefully", "probability": 0.798828125}, {"start": 211.25, "end": 211.95, "word": " both", "probability": 0.79296875}, {"start": 211.95, "end": 212.41, "word": " null", "probability": 0.79833984375}, {"start": 212.41, "end": 213.17, "word": " and", "probability": 0.91748046875}, {"start": 213.17, "end": 213.71, "word": " alternative", "probability": 0.90087890625}, {"start": 213.71, "end": 214.83, "word": " hypothesis.", "probability": 0.49560546875}, {"start": 215.33, "end": 215.51, "word": " So", "probability": 0.93115234375}, {"start": 215.51, "end": 215.71, "word": " for", "probability": 0.77734375}, {"start": 215.71, "end": 216.05, "word": " example,", "probability": 0.974609375}, {"start": 216.17, "end": 216.27, "word": " if", "probability": 0.95361328125}, {"start": 216.27, "end": 216.45, "word": " the", "probability": 0.916015625}, {"start": 216.45, "end": 216.79, "word": " problem", "probability": 0.86474609375}, {"start": 216.79, "end": 217.23, "word": " says", "probability": 0.8896484375}, {"start": 217.23, "end": 218.93, "word": " increase,", "probability": 0.64892578125}, {"start": 219.87, "end": 220.39, "word": " it", "probability": 0.91015625}, {"start": 220.39, "end": 220.77, "word": " means", "probability": 0.935546875}, {"start": 220.77, "end": 221.15, "word": " a", "probability": 0.2015380859375}, {"start": 221.15, "end": 221.29, "word": " mu", "probability": 0.64599609375}, {"start": 221.29, "end": 221.43, "word": " is", "probability": 0.88916015625}, {"start": 221.43, "end": 221.71, "word": " above.", "probability": 0.955078125}, {"start": 222.53, "end": 223.25, "word": " If", "probability": 0.96533203125}, {"start": 223.25, "end": 223.91, "word": " the", "probability": 0.7841796875}, {"start": 223.91, "end": 224.25, "word": " problem", "probability": 0.86083984375}, {"start": 224.25, "end": 224.75, "word": " says", "probability": 0.86474609375}, {"start": 224.75, "end": 225.61, "word": " smaller", "probability": 0.80517578125}, {"start": 225.61, "end": 225.97, "word": " than", "probability": 0.8994140625}, {"start": 225.97, "end": 227.35, "word": " or", "probability": 0.78955078125}, {"start": 227.35, "end": 227.93, "word": " decrease", "probability": 0.77587890625}, {"start": 227.93, "end": 228.67, "word": " from", "probability": 0.88134765625}, {"start": 228.67, "end": 229.89, "word": " whatever", "probability": 0.93017578125}, {"start": 229.89, "end": 230.13, "word": " the", "probability": 0.91943359375}, {"start": 230.13, "end": 230.35, "word": " value", "probability": 0.97265625}, {"start": 230.35, "end": 230.63, "word": " is,", "probability": 0.93994140625}, {"start": 230.97, "end": 231.17, "word": " then", "probability": 0.83837890625}, {"start": 231.17, "end": 231.29, "word": " we", "probability": 0.8896484375}, {"start": 231.29, "end": 231.47, "word": " have", "probability": 0.9443359375}, {"start": 231.47, "end": 231.61, "word": " to", "probability": 0.96826171875}, {"start": 231.61, "end": 231.93, "word": " use", "probability": 0.89501953125}, {"start": 231.93, "end": 232.39, "word": " a", "probability": 0.8798828125}, {"start": 232.39, "end": 232.55, "word": " mu", "probability": 0.94921875}, {"start": 232.55, "end": 232.95, "word": " smaller", "probability": 0.8125}, {"start": 232.95, "end": 233.27, "word": " than", "probability": 0.94287109375}, {"start": 233.27, "end": 233.53, "word": " that", "probability": 0.931640625}, {"start": 233.53, "end": 233.91, "word": " value.", "probability": 0.970703125}], "temperature": 1.0}, {"id": 9, "seek": 26225, "start": 234.65, "end": 262.25, "text": " Any question? So that's how can we state the null and alternative hypothesis for one TAIL test. Let's see in detail the rejection regions for lower and upper TAIL tests. For lower TAIL test, so in this case we are focusing on the left side, so my rejection region should be this one. Now, we are testing our hypothesis", "tokens": [2639, 1168, 30, 407, 300, 311, 577, 393, 321, 1785, 264, 18184, 293, 8535, 17291, 337, 472, 20094, 4620, 1500, 13, 961, 311, 536, 294, 2607, 264, 26044, 10682, 337, 3126, 293, 6597, 20094, 4620, 6921, 13, 1171, 3126, 20094, 4620, 1500, 11, 370, 294, 341, 1389, 321, 366, 8416, 322, 264, 1411, 1252, 11, 370, 452, 26044, 4458, 820, 312, 341, 472, 13, 823, 11, 321, 366, 4997, 527, 17291], "avg_logprob": -0.1720920122332043, "compression_ratio": 1.6358974358974359, "no_speech_prob": 0.0, "words": [{"start": 234.65, "end": 234.91, "word": " Any", "probability": 0.66943359375}, {"start": 234.91, "end": 235.25, "word": " question?", "probability": 0.7060546875}, {"start": 235.85, "end": 236.31, "word": " So", "probability": 0.93115234375}, {"start": 236.31, "end": 236.67, "word": " that's", "probability": 0.903076171875}, {"start": 236.67, "end": 236.81, "word": " how", "probability": 0.8974609375}, {"start": 236.81, "end": 237.03, "word": " can", "probability": 0.77294921875}, {"start": 237.03, "end": 237.23, "word": " we", "probability": 0.9501953125}, {"start": 237.23, "end": 237.71, "word": " state", "probability": 0.94384765625}, {"start": 237.71, "end": 238.15, "word": " the", "probability": 0.8818359375}, {"start": 238.15, "end": 238.41, "word": " null", "probability": 0.9375}, {"start": 238.41, "end": 238.69, "word": " and", "probability": 0.896484375}, {"start": 238.69, "end": 239.13, "word": " alternative", "probability": 0.89453125}, {"start": 239.13, "end": 239.65, "word": " hypothesis", "probability": 0.5380859375}, {"start": 239.65, "end": 240.65, "word": " for", "probability": 0.93408203125}, {"start": 240.65, "end": 241.23, "word": " one", "probability": 0.919921875}, {"start": 241.23, "end": 241.75, "word": " TAIL", "probability": 0.58856201171875}, {"start": 241.75, "end": 241.97, "word": " test.", "probability": 0.81201171875}, {"start": 242.59, "end": 243.25, "word": " Let's", "probability": 0.966796875}, {"start": 243.25, "end": 243.47, "word": " see", "probability": 0.75244140625}, {"start": 243.47, "end": 243.87, "word": " in", "probability": 0.86669921875}, {"start": 243.87, "end": 244.23, "word": " detail", "probability": 0.73046875}, {"start": 244.23, "end": 244.53, "word": " the", "probability": 0.85302734375}, {"start": 244.53, "end": 244.83, "word": " rejection", "probability": 0.9541015625}, {"start": 244.83, "end": 245.23, "word": " regions", "probability": 0.9599609375}, {"start": 245.23, "end": 245.95, "word": " for", "probability": 0.94482421875}, {"start": 245.95, "end": 246.65, "word": " lower", "probability": 0.857421875}, {"start": 246.65, "end": 246.89, "word": " and", "probability": 0.92919921875}, {"start": 246.89, "end": 247.19, "word": " upper", "probability": 0.837890625}, {"start": 247.19, "end": 247.47, "word": " TAIL", "probability": 0.93017578125}, {"start": 247.47, "end": 247.85, "word": " tests.", "probability": 0.611328125}, {"start": 248.55, "end": 249.07, "word": " For", "probability": 0.919921875}, {"start": 249.07, "end": 249.39, "word": " lower", "probability": 0.82421875}, {"start": 249.39, "end": 249.71, "word": " TAIL", "probability": 0.961669921875}, {"start": 249.71, "end": 250.11, "word": " test,", "probability": 0.71728515625}, {"start": 251.13, "end": 251.31, "word": " so", "probability": 0.740234375}, {"start": 251.31, "end": 251.43, "word": " in", "probability": 0.9150390625}, {"start": 251.43, "end": 251.57, "word": " this", "probability": 0.947265625}, {"start": 251.57, "end": 251.77, "word": " case", "probability": 0.90625}, {"start": 251.77, "end": 251.91, "word": " we", "probability": 0.638671875}, {"start": 251.91, "end": 252.11, "word": " are", "probability": 0.9384765625}, {"start": 252.11, "end": 252.91, "word": " focusing", "probability": 0.90185546875}, {"start": 252.91, "end": 253.19, "word": " on", "probability": 0.939453125}, {"start": 253.19, "end": 253.33, "word": " the", "probability": 0.912109375}, {"start": 253.33, "end": 253.55, "word": " left", "probability": 0.953125}, {"start": 253.55, "end": 254.75, "word": " side,", "probability": 0.85693359375}, {"start": 255.85, "end": 256.05, "word": " so", "probability": 0.9072265625}, {"start": 256.05, "end": 256.51, "word": " my", "probability": 0.9619140625}, {"start": 256.51, "end": 256.85, "word": " rejection", "probability": 0.955078125}, {"start": 256.85, "end": 257.19, "word": " region", "probability": 0.9052734375}, {"start": 257.19, "end": 257.43, "word": " should", "probability": 0.9619140625}, {"start": 257.43, "end": 257.57, "word": " be", "probability": 0.9482421875}, {"start": 257.57, "end": 257.77, "word": " this", "probability": 0.9501953125}, {"start": 257.77, "end": 258.03, "word": " one.", "probability": 0.8955078125}, {"start": 259.13, "end": 259.49, "word": " Now,", "probability": 0.947265625}, {"start": 259.59, "end": 259.85, "word": " we", "probability": 0.96142578125}, {"start": 259.85, "end": 260.03, "word": " are", "probability": 0.9384765625}, {"start": 260.03, "end": 260.51, "word": " testing", "probability": 0.8896484375}, {"start": 260.51, "end": 261.73, "word": " our", "probability": 0.90185546875}, {"start": 261.73, "end": 262.25, "word": " hypothesis", "probability": 0.8623046875}], "temperature": 1.0}, {"id": 10, "seek": 29135, "start": 263.69, "end": 291.35, "text": " at level of significance alpha. For two-tiered test, we split this alpha by two, because we have two rejection regions, one to the right and other to the left side. But here there is only one side, so one direction, so we have to keep alpha as it is. So we have alpha here, so they accept the rejection region below minus z alpha or minus z alpha, depends on", "tokens": [412, 1496, 295, 17687, 8961, 13, 1171, 732, 12, 25402, 292, 1500, 11, 321, 7472, 341, 8961, 538, 732, 11, 570, 321, 362, 732, 26044, 10682, 11, 472, 281, 264, 558, 293, 661, 281, 264, 1411, 1252, 13, 583, 510, 456, 307, 787, 472, 1252, 11, 370, 472, 3513, 11, 370, 321, 362, 281, 1066, 8961, 382, 309, 307, 13, 407, 321, 362, 8961, 510, 11, 370, 436, 3241, 264, 26044, 4458, 2507, 3175, 710, 8961, 420, 3175, 710, 8961, 11, 5946, 322], "avg_logprob": -0.21465773170902616, "compression_ratio": 1.7342995169082125, "no_speech_prob": 2.980232238769531e-07, "words": [{"start": 263.69, "end": 264.11, "word": " at", "probability": 0.2880859375}, {"start": 264.11, "end": 264.67, "word": " level", "probability": 0.78466796875}, {"start": 264.67, "end": 264.85, "word": " of", "probability": 0.95751953125}, {"start": 264.85, "end": 265.37, "word": " significance", "probability": 0.9130859375}, {"start": 265.37, "end": 265.71, "word": " alpha.", "probability": 0.64208984375}, {"start": 266.97, "end": 267.49, "word": " For", "probability": 0.8740234375}, {"start": 267.49, "end": 267.71, "word": " two", "probability": 0.65234375}, {"start": 267.71, "end": 268.01, "word": "-tiered", "probability": 0.580810546875}, {"start": 268.01, "end": 268.31, "word": " test,", "probability": 0.59619140625}, {"start": 268.45, "end": 268.95, "word": " we", "probability": 0.88671875}, {"start": 268.95, "end": 270.13, "word": " split", "probability": 0.93115234375}, {"start": 270.13, "end": 270.37, "word": " this", "probability": 0.9326171875}, {"start": 270.37, "end": 270.61, "word": " alpha", "probability": 0.90966796875}, {"start": 270.61, "end": 270.85, "word": " by", "probability": 0.97216796875}, {"start": 270.85, "end": 271.11, "word": " two,", "probability": 0.74609375}, {"start": 271.47, "end": 271.77, "word": " because", "probability": 0.8984375}, {"start": 271.77, "end": 271.91, "word": " we", "probability": 0.95556640625}, {"start": 271.91, "end": 272.05, "word": " have", "probability": 0.947265625}, {"start": 272.05, "end": 272.25, "word": " two", "probability": 0.92236328125}, {"start": 272.25, "end": 272.57, "word": " rejection", "probability": 0.970703125}, {"start": 272.57, "end": 273.03, "word": " regions,", "probability": 0.9697265625}, {"start": 273.45, "end": 273.79, "word": " one", "probability": 0.927734375}, {"start": 273.79, "end": 273.95, "word": " to", "probability": 0.9619140625}, {"start": 273.95, "end": 274.11, "word": " the", "probability": 0.9130859375}, {"start": 274.11, "end": 274.35, "word": " right", "probability": 0.9140625}, {"start": 274.35, "end": 274.61, "word": " and", "probability": 0.76904296875}, {"start": 274.61, "end": 274.87, "word": " other", "probability": 0.57763671875}, {"start": 274.87, "end": 275.09, "word": " to", "probability": 0.9599609375}, {"start": 275.09, "end": 275.25, "word": " the", "probability": 0.91259765625}, {"start": 275.25, "end": 275.41, "word": " left", "probability": 0.95068359375}, {"start": 275.41, "end": 275.81, "word": " side.", "probability": 0.84521484375}, {"start": 276.27, "end": 276.51, "word": " But", "probability": 0.9375}, {"start": 276.51, "end": 276.75, "word": " here", "probability": 0.83740234375}, {"start": 276.75, "end": 276.91, "word": " there", "probability": 0.662109375}, {"start": 276.91, "end": 277.05, "word": " is", "probability": 0.91064453125}, {"start": 277.05, "end": 277.25, "word": " only", "probability": 0.9248046875}, {"start": 277.25, "end": 277.49, "word": " one", "probability": 0.92431640625}, {"start": 277.49, "end": 277.87, "word": " side,", "probability": 0.84375}, {"start": 278.05, "end": 278.15, "word": " so", "probability": 0.8671875}, {"start": 278.15, "end": 278.33, "word": " one", "probability": 0.9033203125}, {"start": 278.33, "end": 278.73, "word": " direction,", "probability": 0.97314453125}, {"start": 279.29, "end": 279.39, "word": " so", "probability": 0.94287109375}, {"start": 279.39, "end": 279.53, "word": " we", "probability": 0.9482421875}, {"start": 279.53, "end": 279.65, "word": " have", "probability": 0.947265625}, {"start": 279.65, "end": 279.77, "word": " to", "probability": 0.96826171875}, {"start": 279.77, "end": 279.97, "word": " keep", "probability": 0.90185546875}, {"start": 279.97, "end": 280.35, "word": " alpha", "probability": 0.90771484375}, {"start": 280.35, "end": 280.63, "word": " as", "probability": 0.95703125}, {"start": 280.63, "end": 280.79, "word": " it", "probability": 0.9521484375}, {"start": 280.79, "end": 281.01, "word": " is.", "probability": 0.94580078125}, {"start": 281.33, "end": 281.51, "word": " So", "probability": 0.92578125}, {"start": 281.51, "end": 281.61, "word": " we", "probability": 0.798828125}, {"start": 281.61, "end": 281.73, "word": " have", "probability": 0.94873046875}, {"start": 281.73, "end": 282.01, "word": " alpha", "probability": 0.90576171875}, {"start": 282.01, "end": 282.33, "word": " here,", "probability": 0.8515625}, {"start": 282.83, "end": 282.99, "word": " so", "probability": 0.92333984375}, {"start": 282.99, "end": 283.17, "word": " they", "probability": 0.67822265625}, {"start": 283.17, "end": 283.55, "word": " accept", "probability": 0.9248046875}, {"start": 283.55, "end": 283.81, "word": " the", "probability": 0.82421875}, {"start": 283.81, "end": 284.19, "word": " rejection", "probability": 0.96435546875}, {"start": 284.19, "end": 284.65, "word": " region", "probability": 0.93310546875}, {"start": 284.65, "end": 285.79, "word": " below", "probability": 0.8544921875}, {"start": 285.79, "end": 286.75, "word": " minus", "probability": 0.9306640625}, {"start": 286.75, "end": 287.71, "word": " z", "probability": 0.66015625}, {"start": 287.71, "end": 288.75, "word": " alpha", "probability": 0.85205078125}, {"start": 288.75, "end": 289.55, "word": " or", "probability": 0.5908203125}, {"start": 289.55, "end": 290.01, "word": " minus", "probability": 0.98193359375}, {"start": 290.01, "end": 290.25, "word": " z", "probability": 0.548828125}, {"start": 290.25, "end": 290.47, "word": " alpha,", "probability": 0.87109375}, {"start": 290.47, "end": 290.93, "word": " depends", "probability": 0.5517578125}, {"start": 290.93, "end": 291.35, "word": " on", "probability": 0.951171875}], "temperature": 1.0}, {"id": 11, "seek": 32016, "start": 292.04, "end": 320.16, "text": " Sigma, known or unknown. So it's the same concepts as we discussed before. So my rejection region is the area to the left side. So for this particular hypothesis, if we are talking about testing Mu smaller than 3, then we may reject F0. If U is a statistic,", "tokens": [36595, 11, 2570, 420, 9841, 13, 407, 309, 311, 264, 912, 10392, 382, 321, 7152, 949, 13, 407, 452, 26044, 4458, 307, 264, 1859, 281, 264, 1411, 1252, 13, 407, 337, 341, 1729, 17291, 11, 498, 321, 366, 1417, 466, 4997, 15601, 4356, 813, 805, 11, 550, 321, 815, 8248, 479, 15, 13, 759, 624, 307, 257, 29588, 11], "avg_logprob": -0.29322916120290754, "compression_ratio": 1.4576271186440677, "no_speech_prob": 0.0, "words": [{"start": 292.04, "end": 292.52, "word": " Sigma,", "probability": 0.08697509765625}, {"start": 292.7, "end": 292.82, "word": " known", "probability": 0.6953125}, {"start": 292.82, "end": 293.04, "word": " or", "probability": 0.92529296875}, {"start": 293.04, "end": 293.38, "word": " unknown.", "probability": 0.85009765625}, {"start": 294.1, "end": 294.78, "word": " So", "probability": 0.84228515625}, {"start": 294.78, "end": 295.08, "word": " it's", "probability": 0.850341796875}, {"start": 295.08, "end": 295.3, "word": " the", "probability": 0.9169921875}, {"start": 295.3, "end": 295.48, "word": " same", "probability": 0.9072265625}, {"start": 295.48, "end": 295.98, "word": " concepts", "probability": 0.748046875}, {"start": 295.98, "end": 296.26, "word": " as", "probability": 0.91845703125}, {"start": 296.26, "end": 296.44, "word": " we", "probability": 0.9326171875}, {"start": 296.44, "end": 296.98, "word": " discussed", "probability": 0.837890625}, {"start": 296.98, "end": 297.36, "word": " before.", "probability": 0.8583984375}, {"start": 297.74, "end": 297.96, "word": " So", "probability": 0.93603515625}, {"start": 297.96, "end": 298.22, "word": " my", "probability": 0.84814453125}, {"start": 298.22, "end": 298.58, "word": " rejection", "probability": 0.9365234375}, {"start": 298.58, "end": 298.88, "word": " region", "probability": 0.57763671875}, {"start": 298.88, "end": 299.12, "word": " is", "probability": 0.83740234375}, {"start": 299.12, "end": 299.28, "word": " the", "probability": 0.59033203125}, {"start": 299.28, "end": 299.44, "word": " area", "probability": 0.85595703125}, {"start": 299.44, "end": 299.64, "word": " to", "probability": 0.93798828125}, {"start": 299.64, "end": 299.76, "word": " the", "probability": 0.91162109375}, {"start": 299.76, "end": 299.96, "word": " left", "probability": 0.943359375}, {"start": 299.96, "end": 300.34, "word": " side.", "probability": 0.84619140625}, {"start": 300.86, "end": 301.1, "word": " So", "probability": 0.9443359375}, {"start": 301.1, "end": 301.72, "word": " for", "probability": 0.6318359375}, {"start": 301.72, "end": 302.2, "word": " this", "probability": 0.9482421875}, {"start": 302.2, "end": 302.7, "word": " particular", "probability": 0.9013671875}, {"start": 302.7, "end": 303.52, "word": " hypothesis,", "probability": 0.8310546875}, {"start": 304.32, "end": 304.6, "word": " if", "probability": 0.927734375}, {"start": 304.6, "end": 304.76, "word": " we", "probability": 0.93115234375}, {"start": 304.76, "end": 304.9, "word": " are", "probability": 0.9287109375}, {"start": 304.9, "end": 305.26, "word": " talking", "probability": 0.85205078125}, {"start": 305.26, "end": 305.98, "word": " about", "probability": 0.892578125}, {"start": 305.98, "end": 308.62, "word": " testing", "probability": 0.69921875}, {"start": 308.62, "end": 309.16, "word": " Mu", "probability": 0.33935546875}, {"start": 309.16, "end": 309.62, "word": " smaller", "probability": 0.78515625}, {"start": 309.62, "end": 309.94, "word": " than", "probability": 0.94580078125}, {"start": 309.94, "end": 310.26, "word": " 3,", "probability": 0.5634765625}, {"start": 310.48, "end": 310.88, "word": " then", "probability": 0.8505859375}, {"start": 310.88, "end": 311.36, "word": " we", "probability": 0.95458984375}, {"start": 311.36, "end": 311.54, "word": " may", "probability": 0.94189453125}, {"start": 311.54, "end": 312.0, "word": " reject", "probability": 0.93310546875}, {"start": 312.0, "end": 312.82, "word": " F0.", "probability": 0.506591796875}, {"start": 317.1, "end": 317.78, "word": " If", "probability": 0.896484375}, {"start": 317.78, "end": 319.46, "word": " U", "probability": 0.55810546875}, {"start": 319.46, "end": 319.52, "word": " is", "probability": 0.469970703125}, {"start": 319.52, "end": 319.64, "word": " a", "probability": 0.37841796875}, {"start": 319.64, "end": 320.16, "word": " statistic,", "probability": 0.822265625}], "temperature": 1.0}, {"id": 12, "seek": 33492, "start": 322.16, "end": 334.92, "text": " Or T is smaller than minus the alpha. So there is only one rejection region in this case because we are talking about one tail test.", "tokens": [1610, 314, 307, 4356, 813, 3175, 264, 8961, 13, 407, 456, 307, 787, 472, 26044, 4458, 294, 341, 1389, 570, 321, 366, 1417, 466, 472, 6838, 1500, 13], "avg_logprob": -0.25053880132477857, "compression_ratio": 1.2666666666666666, "no_speech_prob": 0.0, "words": [{"start": 322.16, "end": 322.5, "word": " Or", "probability": 0.48583984375}, {"start": 322.5, "end": 322.8, "word": " T", "probability": 0.7138671875}, {"start": 322.8, "end": 323.62, "word": " is", "probability": 0.306640625}, {"start": 323.62, "end": 324.88, "word": " smaller", "probability": 0.87158203125}, {"start": 324.88, "end": 325.36, "word": " than", "probability": 0.93701171875}, {"start": 325.36, "end": 325.98, "word": " minus", "probability": 0.87548828125}, {"start": 325.98, "end": 326.78, "word": " the", "probability": 0.509765625}, {"start": 326.78, "end": 327.06, "word": " alpha.", "probability": 0.56005859375}, {"start": 328.86, "end": 329.5, "word": " So", "probability": 0.9326171875}, {"start": 329.5, "end": 329.68, "word": " there", "probability": 0.80322265625}, {"start": 329.68, "end": 329.82, "word": " is", "probability": 0.8984375}, {"start": 329.82, "end": 330.04, "word": " only", "probability": 0.92724609375}, {"start": 330.04, "end": 330.34, "word": " one", "probability": 0.896484375}, {"start": 330.34, "end": 330.72, "word": " rejection", "probability": 0.96826171875}, {"start": 330.72, "end": 331.16, "word": " region", "probability": 0.9423828125}, {"start": 331.16, "end": 331.44, "word": " in", "probability": 0.91064453125}, {"start": 331.44, "end": 331.64, "word": " this", "probability": 0.94677734375}, {"start": 331.64, "end": 331.88, "word": " case", "probability": 0.9208984375}, {"start": 331.88, "end": 332.2, "word": " because", "probability": 0.548828125}, {"start": 332.2, "end": 332.34, "word": " we", "probability": 0.951171875}, {"start": 332.34, "end": 332.46, "word": " are", "probability": 0.9130859375}, {"start": 332.46, "end": 332.78, "word": " talking", "probability": 0.841796875}, {"start": 332.78, "end": 333.36, "word": " about", "probability": 0.9091796875}, {"start": 333.36, "end": 334.34, "word": " one", "probability": 0.77001953125}, {"start": 334.34, "end": 334.6, "word": " tail", "probability": 0.51708984375}, {"start": 334.6, "end": 334.92, "word": " test.", "probability": 0.5361328125}], "temperature": 1.0}, {"id": 13, "seek": 35478, "start": 336.18, "end": 354.78, "text": " So again, my critical value is only one critical value because here it's just one tier test. It's negative z alpha or negative z alpha. So we reject the null hypothesis if the value of the z statistic falls in the rejection region. It means that", "tokens": [407, 797, 11, 452, 4924, 2158, 307, 787, 472, 4924, 2158, 570, 510, 309, 311, 445, 472, 12362, 1500, 13, 467, 311, 3671, 710, 8961, 420, 3671, 710, 8961, 13, 407, 321, 8248, 264, 18184, 17291, 498, 264, 2158, 295, 264, 710, 29588, 8804, 294, 264, 26044, 4458, 13, 467, 1355, 300], "avg_logprob": -0.21241155547915763, "compression_ratio": 1.618421052631579, "no_speech_prob": 0.0, "words": [{"start": 336.18, "end": 336.44, "word": " So", "probability": 0.88671875}, {"start": 336.44, "end": 336.66, "word": " again,", "probability": 0.77880859375}, {"start": 336.78, "end": 337.42, "word": " my", "probability": 0.5595703125}, {"start": 337.42, "end": 337.94, "word": " critical", "probability": 0.93896484375}, {"start": 337.94, "end": 338.36, "word": " value", "probability": 0.8671875}, {"start": 338.36, "end": 338.5, "word": " is", "probability": 0.884765625}, {"start": 338.5, "end": 338.74, "word": " only", "probability": 0.92626953125}, {"start": 338.74, "end": 338.98, "word": " one", "probability": 0.89111328125}, {"start": 338.98, "end": 339.36, "word": " critical", "probability": 0.935546875}, {"start": 339.36, "end": 339.78, "word": " value", "probability": 0.9775390625}, {"start": 339.78, "end": 340.12, "word": " because", "probability": 0.499755859375}, {"start": 340.12, "end": 340.46, "word": " here", "probability": 0.8349609375}, {"start": 340.46, "end": 341.2, "word": " it's", "probability": 0.8876953125}, {"start": 341.2, "end": 341.5, "word": " just", "probability": 0.92431640625}, {"start": 341.5, "end": 341.84, "word": " one", "probability": 0.7119140625}, {"start": 341.84, "end": 342.04, "word": " tier", "probability": 0.3125}, {"start": 342.04, "end": 342.42, "word": " test.", "probability": 0.5712890625}, {"start": 343.68, "end": 344.02, "word": " It's", "probability": 0.87646484375}, {"start": 344.02, "end": 344.5, "word": " negative", "probability": 0.92724609375}, {"start": 344.5, "end": 344.9, "word": " z", "probability": 0.57568359375}, {"start": 344.9, "end": 345.14, "word": " alpha", "probability": 0.88671875}, {"start": 345.14, "end": 345.36, "word": " or", "probability": 0.89599609375}, {"start": 345.36, "end": 345.64, "word": " negative", "probability": 0.92529296875}, {"start": 345.64, "end": 345.86, "word": " z", "probability": 0.75634765625}, {"start": 345.86, "end": 346.16, "word": " alpha.", "probability": 0.92138671875}, {"start": 346.62, "end": 346.92, "word": " So", "probability": 0.94482421875}, {"start": 346.92, "end": 347.08, "word": " we", "probability": 0.6953125}, {"start": 347.08, "end": 347.42, "word": " reject", "probability": 0.90087890625}, {"start": 347.42, "end": 347.6, "word": " the", "probability": 0.86328125}, {"start": 347.6, "end": 347.76, "word": " null", "probability": 0.98046875}, {"start": 347.76, "end": 348.24, "word": " hypothesis", "probability": 0.7998046875}, {"start": 348.24, "end": 349.18, "word": " if", "probability": 0.8486328125}, {"start": 349.18, "end": 350.0, "word": " the", "probability": 0.91748046875}, {"start": 350.0, "end": 350.36, "word": " value", "probability": 0.97216796875}, {"start": 350.36, "end": 350.5, "word": " of", "probability": 0.9482421875}, {"start": 350.5, "end": 350.64, "word": " the", "probability": 0.77978515625}, {"start": 350.64, "end": 350.78, "word": " z", "probability": 0.82177734375}, {"start": 350.78, "end": 351.28, "word": " statistic", "probability": 0.7236328125}, {"start": 351.28, "end": 352.5, "word": " falls", "probability": 0.8408203125}, {"start": 352.5, "end": 352.86, "word": " in", "probability": 0.93701171875}, {"start": 352.86, "end": 353.02, "word": " the", "probability": 0.9169921875}, {"start": 353.02, "end": 353.36, "word": " rejection", "probability": 0.97802734375}, {"start": 353.36, "end": 353.8, "word": " region.", "probability": 0.92822265625}, {"start": 353.92, "end": 354.04, "word": " It", "probability": 0.88037109375}, {"start": 354.04, "end": 354.4, "word": " means", "probability": 0.92431640625}, {"start": 354.4, "end": 354.78, "word": " that", "probability": 0.93994140625}], "temperature": 1.0}, {"id": 14, "seek": 38014, "start": 355.08, "end": 380.14, "text": " Z statistic is smaller than minus Z alpha. That's for lower tail test. For the upper tail, vice versa actually. It's the same, but here the rejection region is on the right side, the upper tail. So here we reject the null hypothesis for the other case. If we are testing", "tokens": [1176, 29588, 307, 4356, 813, 3175, 1176, 8961, 13, 663, 311, 337, 3126, 6838, 1500, 13, 1171, 264, 6597, 6838, 11, 11964, 25650, 767, 13, 467, 311, 264, 912, 11, 457, 510, 264, 26044, 4458, 307, 322, 264, 558, 1252, 11, 264, 6597, 6838, 13, 407, 510, 321, 8248, 264, 18184, 17291, 337, 264, 661, 1389, 13, 759, 321, 366, 4997], "avg_logprob": -0.2200100859326701, "compression_ratio": 1.5664739884393064, "no_speech_prob": 0.0, "words": [{"start": 355.08, "end": 355.52, "word": " Z", "probability": 0.4228515625}, {"start": 355.52, "end": 356.58, "word": " statistic", "probability": 0.447998046875}, {"start": 356.58, "end": 356.94, "word": " is", "probability": 0.74560546875}, {"start": 356.94, "end": 357.3, "word": " smaller", "probability": 0.87890625}, {"start": 357.3, "end": 357.7, "word": " than", "probability": 0.93896484375}, {"start": 357.7, "end": 358.28, "word": " minus", "probability": 0.9130859375}, {"start": 358.28, "end": 358.86, "word": " Z", "probability": 0.8310546875}, {"start": 358.86, "end": 359.08, "word": " alpha.", "probability": 0.65185546875}, {"start": 359.48, "end": 359.86, "word": " That's", "probability": 0.935791015625}, {"start": 359.86, "end": 360.2, "word": " for", "probability": 0.94140625}, {"start": 360.2, "end": 361.44, "word": " lower", "probability": 0.84716796875}, {"start": 361.44, "end": 362.3, "word": " tail", "probability": 0.57080078125}, {"start": 362.3, "end": 362.62, "word": " test.", "probability": 0.6484375}, {"start": 363.08, "end": 363.36, "word": " For", "probability": 0.958984375}, {"start": 363.36, "end": 363.5, "word": " the", "probability": 0.92041015625}, {"start": 363.5, "end": 363.76, "word": " upper", "probability": 0.7490234375}, {"start": 363.76, "end": 364.1, "word": " tail,", "probability": 0.85986328125}, {"start": 364.3, "end": 364.54, "word": " vice", "probability": 0.7880859375}, {"start": 364.54, "end": 364.84, "word": " versa", "probability": 0.78857421875}, {"start": 364.84, "end": 365.34, "word": " actually.", "probability": 0.56591796875}, {"start": 365.68, "end": 365.96, "word": " It's", "probability": 0.921142578125}, {"start": 365.96, "end": 366.14, "word": " the", "probability": 0.91796875}, {"start": 366.14, "end": 366.36, "word": " same,", "probability": 0.90234375}, {"start": 366.48, "end": 366.58, "word": " but", "probability": 0.91943359375}, {"start": 366.58, "end": 366.86, "word": " here", "probability": 0.84375}, {"start": 366.86, "end": 368.74, "word": " the", "probability": 0.6298828125}, {"start": 368.74, "end": 369.24, "word": " rejection", "probability": 0.9521484375}, {"start": 369.24, "end": 369.68, "word": " region", "probability": 0.94287109375}, {"start": 369.68, "end": 370.86, "word": " is", "probability": 0.935546875}, {"start": 370.86, "end": 371.08, "word": " on", "probability": 0.94189453125}, {"start": 371.08, "end": 371.32, "word": " the", "probability": 0.91455078125}, {"start": 371.32, "end": 371.72, "word": " right", "probability": 0.92138671875}, {"start": 371.72, "end": 372.14, "word": " side,", "probability": 0.86767578125}, {"start": 372.44, "end": 372.54, "word": " the", "probability": 0.468994140625}, {"start": 372.54, "end": 372.8, "word": " upper", "probability": 0.84521484375}, {"start": 372.8, "end": 373.08, "word": " tail.", "probability": 0.86962890625}, {"start": 373.58, "end": 373.74, "word": " So", "probability": 0.94482421875}, {"start": 373.74, "end": 373.96, "word": " here", "probability": 0.787109375}, {"start": 373.96, "end": 374.18, "word": " we", "probability": 0.63525390625}, {"start": 374.18, "end": 374.66, "word": " reject", "probability": 0.90234375}, {"start": 374.66, "end": 375.34, "word": " the", "probability": 0.8095703125}, {"start": 375.34, "end": 375.48, "word": " null", "probability": 0.9677734375}, {"start": 375.48, "end": 376.02, "word": " hypothesis", "probability": 0.81884765625}, {"start": 376.02, "end": 376.78, "word": " for", "probability": 0.8740234375}, {"start": 376.78, "end": 376.94, "word": " the", "probability": 0.89013671875}, {"start": 376.94, "end": 377.16, "word": " other", "probability": 0.89013671875}, {"start": 377.16, "end": 377.62, "word": " case.", "probability": 0.9248046875}, {"start": 378.44, "end": 378.98, "word": " If", "probability": 0.94970703125}, {"start": 378.98, "end": 379.14, "word": " we", "probability": 0.640625}, {"start": 379.14, "end": 379.36, "word": " are", "probability": 0.9228515625}, {"start": 379.36, "end": 380.14, "word": " testing", "probability": 0.859375}], "temperature": 1.0}, {"id": 15, "seek": 40623, "start": 380.99, "end": 406.23, "text": " Mu smaller than or equal to 3 against Mu is above 3. The rejection is really on this area. So Z alpha or above it. So we reject H0 if my Z statistic is above Z alpha.", "tokens": [15601, 4356, 813, 420, 2681, 281, 805, 1970, 15601, 307, 3673, 805, 13, 440, 26044, 307, 534, 322, 341, 1859, 13, 407, 1176, 8961, 420, 3673, 309, 13, 407, 321, 8248, 389, 15, 498, 452, 1176, 29588, 307, 3673, 1176, 8961, 13], "avg_logprob": -0.2803415628366692, "compression_ratio": 1.3688524590163935, "no_speech_prob": 0.0, "words": [{"start": 380.99, "end": 381.51, "word": " Mu", "probability": 0.50732421875}, {"start": 381.51, "end": 382.75, "word": " smaller", "probability": 0.59521484375}, {"start": 382.75, "end": 383.05, "word": " than", "probability": 0.93359375}, {"start": 383.05, "end": 383.21, "word": " or", "probability": 0.92236328125}, {"start": 383.21, "end": 383.41, "word": " equal", "probability": 0.91015625}, {"start": 383.41, "end": 383.57, "word": " to", "probability": 0.95556640625}, {"start": 383.57, "end": 383.73, "word": " 3", "probability": 0.7763671875}, {"start": 383.73, "end": 384.17, "word": " against", "probability": 0.85986328125}, {"start": 384.17, "end": 385.15, "word": " Mu", "probability": 0.300048828125}, {"start": 385.15, "end": 385.35, "word": " is", "probability": 0.74462890625}, {"start": 385.35, "end": 385.67, "word": " above", "probability": 0.9306640625}, {"start": 385.67, "end": 386.11, "word": " 3.", "probability": 0.87939453125}, {"start": 387.05, "end": 387.33, "word": " The", "probability": 0.80029296875}, {"start": 387.33, "end": 388.33, "word": " rejection", "probability": 0.92431640625}, {"start": 388.33, "end": 388.57, "word": " is", "probability": 0.365966796875}, {"start": 388.57, "end": 388.77, "word": " really", "probability": 0.68017578125}, {"start": 388.77, "end": 389.07, "word": " on", "probability": 0.77685546875}, {"start": 389.07, "end": 389.45, "word": " this", "probability": 0.85986328125}, {"start": 389.45, "end": 390.49, "word": " area.", "probability": 0.89208984375}, {"start": 391.33, "end": 391.81, "word": " So", "probability": 0.95068359375}, {"start": 391.81, "end": 392.33, "word": " Z", "probability": 0.480712890625}, {"start": 392.33, "end": 392.91, "word": " alpha", "probability": 0.845703125}, {"start": 392.91, "end": 393.75, "word": " or", "probability": 0.65576171875}, {"start": 393.75, "end": 394.03, "word": " above", "probability": 0.962890625}, {"start": 394.03, "end": 394.27, "word": " it.", "probability": 0.93798828125}, {"start": 394.93, "end": 395.19, "word": " So", "probability": 0.95947265625}, {"start": 395.19, "end": 395.39, "word": " we", "probability": 0.93505859375}, {"start": 395.39, "end": 395.81, "word": " reject", "probability": 0.91162109375}, {"start": 395.81, "end": 400.33, "word": " H0", "probability": 0.6568603515625}, {"start": 400.33, "end": 402.17, "word": " if", "probability": 0.6416015625}, {"start": 402.17, "end": 402.41, "word": " my", "probability": 0.9521484375}, {"start": 402.41, "end": 402.65, "word": " Z", "probability": 0.92138671875}, {"start": 402.65, "end": 403.15, "word": " statistic", "probability": 0.291015625}, {"start": 403.15, "end": 404.69, "word": " is", "probability": 0.912109375}, {"start": 404.69, "end": 405.19, "word": " above", "probability": 0.95703125}, {"start": 405.19, "end": 406.03, "word": " Z", "probability": 0.8349609375}, {"start": 406.03, "end": 406.23, "word": " alpha.", "probability": 0.841796875}], "temperature": 1.0}, {"id": 16, "seek": 43647, "start": 408.91, "end": 436.47, "text": " So that's for lower and upper tail test. And actually, if these statistics falls below in this case, Z alpha, then it falls in the non-rejection region. So we don't reject the null hypothesis. That means there is insufficient evidence to support the alternative hypothesis. So that's for one tail test, either lower or the upper tail.", "tokens": [407, 300, 311, 337, 3126, 293, 6597, 6838, 1500, 13, 400, 767, 11, 498, 613, 12523, 8804, 2507, 294, 341, 1389, 11, 1176, 8961, 11, 550, 309, 8804, 294, 264, 2107, 12, 265, 1020, 313, 4458, 13, 407, 321, 500, 380, 8248, 264, 18184, 17291, 13, 663, 1355, 456, 307, 41709, 4467, 281, 1406, 264, 8535, 17291, 13, 407, 300, 311, 337, 472, 6838, 1500, 11, 2139, 3126, 420, 264, 6597, 6838, 13], "avg_logprob": -0.16543496722305143, "compression_ratio": 1.6666666666666667, "no_speech_prob": 0.0, "words": [{"start": 408.91, "end": 409.23, "word": " So", "probability": 0.85693359375}, {"start": 409.23, "end": 409.65, "word": " that's", "probability": 0.914794921875}, {"start": 409.65, "end": 410.05, "word": " for", "probability": 0.94482421875}, {"start": 410.05, "end": 411.05, "word": " lower", "probability": 0.869140625}, {"start": 411.05, "end": 411.31, "word": " and", "probability": 0.92138671875}, {"start": 411.31, "end": 411.65, "word": " upper", "probability": 0.84765625}, {"start": 411.65, "end": 412.03, "word": " tail", "probability": 0.72900390625}, {"start": 412.03, "end": 413.21, "word": " test.", "probability": 0.66162109375}, {"start": 414.61, "end": 414.93, "word": " And", "probability": 0.80126953125}, {"start": 414.93, "end": 415.25, "word": " actually,", "probability": 0.76416015625}, {"start": 415.49, "end": 415.55, "word": " if", "probability": 0.8330078125}, {"start": 415.55, "end": 415.95, "word": " these", "probability": 0.428466796875}, {"start": 415.95, "end": 416.57, "word": " statistics", "probability": 0.90185546875}, {"start": 416.57, "end": 416.91, "word": " falls", "probability": 0.759765625}, {"start": 416.91, "end": 417.17, "word": " below", "probability": 0.94580078125}, {"start": 417.17, "end": 417.35, "word": " in", "probability": 0.849609375}, {"start": 417.35, "end": 417.57, "word": " this", "probability": 0.94091796875}, {"start": 417.57, "end": 417.97, "word": " case,", "probability": 0.91552734375}, {"start": 418.49, "end": 418.61, "word": " Z", "probability": 0.4755859375}, {"start": 418.61, "end": 418.91, "word": " alpha,", "probability": 0.849609375}, {"start": 419.45, "end": 419.77, "word": " then", "probability": 0.8505859375}, {"start": 419.77, "end": 420.15, "word": " it", "probability": 0.9296875}, {"start": 420.15, "end": 420.51, "word": " falls", "probability": 0.85400390625}, {"start": 420.51, "end": 420.71, "word": " in", "probability": 0.9189453125}, {"start": 420.71, "end": 420.89, "word": " the", "probability": 0.9091796875}, {"start": 420.89, "end": 421.11, "word": " non", "probability": 0.66845703125}, {"start": 421.11, "end": 421.63, "word": "-rejection", "probability": 0.8839111328125}, {"start": 421.63, "end": 421.95, "word": " region.", "probability": 0.7158203125}, {"start": 422.25, "end": 422.45, "word": " So", "probability": 0.96044921875}, {"start": 422.45, "end": 422.61, "word": " we", "probability": 0.845703125}, {"start": 422.61, "end": 423.25, "word": " don't", "probability": 0.978515625}, {"start": 423.25, "end": 423.77, "word": " reject", "probability": 0.92431640625}, {"start": 423.77, "end": 423.95, "word": " the", "probability": 0.8671875}, {"start": 423.95, "end": 424.09, "word": " null", "probability": 0.94287109375}, {"start": 424.09, "end": 424.57, "word": " hypothesis.", "probability": 0.83837890625}, {"start": 424.77, "end": 424.95, "word": " That", "probability": 0.9033203125}, {"start": 424.95, "end": 425.27, "word": " means", "probability": 0.9326171875}, {"start": 425.27, "end": 425.89, "word": " there", "probability": 0.83642578125}, {"start": 425.89, "end": 426.05, "word": " is", "probability": 0.93505859375}, {"start": 426.05, "end": 426.57, "word": " insufficient", "probability": 0.9013671875}, {"start": 426.57, "end": 427.09, "word": " evidence", "probability": 0.9560546875}, {"start": 427.09, "end": 427.33, "word": " to", "probability": 0.921875}, {"start": 427.33, "end": 427.79, "word": " support", "probability": 0.9814453125}, {"start": 427.79, "end": 428.87, "word": " the", "probability": 0.900390625}, {"start": 428.87, "end": 429.41, "word": " alternative", "probability": 0.92138671875}, {"start": 429.41, "end": 430.41, "word": " hypothesis.", "probability": 0.80615234375}, {"start": 431.01, "end": 431.47, "word": " So", "probability": 0.95458984375}, {"start": 431.47, "end": 431.79, "word": " that's", "probability": 0.9638671875}, {"start": 431.79, "end": 432.05, "word": " for", "probability": 0.9443359375}, {"start": 432.05, "end": 432.51, "word": " one", "probability": 0.91455078125}, {"start": 432.51, "end": 433.25, "word": " tail", "probability": 0.611328125}, {"start": 433.25, "end": 433.67, "word": " test,", "probability": 0.78857421875}, {"start": 433.79, "end": 434.07, "word": " either", "probability": 0.93359375}, {"start": 434.07, "end": 434.83, "word": " lower", "probability": 0.87451171875}, {"start": 434.83, "end": 435.71, "word": " or", "probability": 0.9326171875}, {"start": 435.71, "end": 435.87, "word": " the", "probability": 0.91748046875}, {"start": 435.87, "end": 436.13, "word": " upper", "probability": 0.84521484375}, {"start": 436.13, "end": 436.47, "word": " tail.", "probability": 0.85009765625}], "temperature": 1.0}, {"id": 17, "seek": 46260, "start": 438.28, "end": 462.6, "text": " Let's see an example for opportunity test when sigma is unknown. Again, the idea is the same. Sigma is known or unknown. We just replace z by t, sigma by s. That's all. Here, for example, a phone industry manager thinks that customer monthly cell phone bills have increased.", "tokens": [961, 311, 536, 364, 1365, 337, 2650, 1500, 562, 12771, 307, 9841, 13, 3764, 11, 264, 1558, 307, 264, 912, 13, 36595, 307, 2570, 420, 9841, 13, 492, 445, 7406, 710, 538, 256, 11, 12771, 538, 262, 13, 663, 311, 439, 13, 1692, 11, 337, 1365, 11, 257, 2593, 3518, 6598, 7309, 300, 5474, 12878, 2815, 2593, 12433, 362, 6505, 13], "avg_logprob": -0.21358366214459942, "compression_ratio": 1.4864864864864864, "no_speech_prob": 0.0, "words": [{"start": 438.28, "end": 439.0, "word": " Let's", "probability": 0.752197265625}, {"start": 439.0, "end": 439.72, "word": " see", "probability": 0.84326171875}, {"start": 439.72, "end": 439.98, "word": " an", "probability": 0.900390625}, {"start": 439.98, "end": 440.42, "word": " example", "probability": 0.9775390625}, {"start": 440.42, "end": 441.12, "word": " for", "probability": 0.8251953125}, {"start": 441.12, "end": 441.62, "word": " opportunity", "probability": 0.24365234375}, {"start": 441.62, "end": 442.24, "word": " test", "probability": 0.83837890625}, {"start": 442.24, "end": 442.9, "word": " when", "probability": 0.78466796875}, {"start": 442.9, "end": 443.18, "word": " sigma", "probability": 0.443115234375}, {"start": 443.18, "end": 443.34, "word": " is", "probability": 0.9287109375}, {"start": 443.34, "end": 443.62, "word": " unknown.", "probability": 0.89306640625}, {"start": 444.14, "end": 444.5, "word": " Again,", "probability": 0.83447265625}, {"start": 445.16, "end": 445.5, "word": " the", "probability": 0.8623046875}, {"start": 445.5, "end": 445.78, "word": " idea", "probability": 0.91357421875}, {"start": 445.78, "end": 445.96, "word": " is", "probability": 0.9541015625}, {"start": 445.96, "end": 446.1, "word": " the", "probability": 0.90576171875}, {"start": 446.1, "end": 446.36, "word": " same.", "probability": 0.9150390625}, {"start": 446.5, "end": 446.74, "word": " Sigma", "probability": 0.88134765625}, {"start": 446.74, "end": 446.94, "word": " is", "probability": 0.935546875}, {"start": 446.94, "end": 447.18, "word": " known", "probability": 0.7255859375}, {"start": 447.18, "end": 447.4, "word": " or", "probability": 0.93408203125}, {"start": 447.4, "end": 447.84, "word": " unknown.", "probability": 0.8818359375}, {"start": 448.22, "end": 448.4, "word": " We", "probability": 0.93505859375}, {"start": 448.4, "end": 448.7, "word": " just", "probability": 0.89501953125}, {"start": 448.7, "end": 449.06, "word": " replace", "probability": 0.9541015625}, {"start": 449.06, "end": 450.18, "word": " z", "probability": 0.6337890625}, {"start": 450.18, "end": 450.48, "word": " by", "probability": 0.9658203125}, {"start": 450.48, "end": 450.86, "word": " t,", "probability": 0.79736328125}, {"start": 451.18, "end": 451.46, "word": " sigma", "probability": 0.8662109375}, {"start": 451.46, "end": 451.72, "word": " by", "probability": 0.9736328125}, {"start": 451.72, "end": 452.06, "word": " s.", "probability": 0.8671875}, {"start": 452.6, "end": 452.96, "word": " That's", "probability": 0.9365234375}, {"start": 452.96, "end": 453.18, "word": " all.", "probability": 0.94775390625}, {"start": 453.7, "end": 453.98, "word": " Here,", "probability": 0.83154296875}, {"start": 454.42, "end": 454.58, "word": " for", "probability": 0.95556640625}, {"start": 454.58, "end": 454.94, "word": " example,", "probability": 0.974609375}, {"start": 455.7, "end": 455.9, "word": " a", "probability": 0.98046875}, {"start": 455.9, "end": 456.12, "word": " phone", "probability": 0.947265625}, {"start": 456.12, "end": 457.18, "word": " industry", "probability": 0.8134765625}, {"start": 457.18, "end": 457.66, "word": " manager", "probability": 0.9462890625}, {"start": 457.66, "end": 458.14, "word": " thinks", "probability": 0.88720703125}, {"start": 458.14, "end": 458.74, "word": " that", "probability": 0.9404296875}, {"start": 458.74, "end": 459.78, "word": " customer", "probability": 0.76708984375}, {"start": 459.78, "end": 460.22, "word": " monthly", "probability": 0.2366943359375}, {"start": 460.22, "end": 460.64, "word": " cell", "probability": 0.53564453125}, {"start": 460.64, "end": 460.98, "word": " phone", "probability": 0.8974609375}, {"start": 460.98, "end": 461.68, "word": " bills", "probability": 0.77001953125}, {"start": 461.68, "end": 462.02, "word": " have", "probability": 0.94580078125}, {"start": 462.02, "end": 462.6, "word": " increased.", "probability": 0.9560546875}], "temperature": 1.0}, {"id": 18, "seek": 48067, "start": 463.99, "end": 480.67, "text": " So he thought that the bills have increased. Now, by using this statement, we can figure out the alternative hypothesis. It's aborted. And now, average over $52 per month.", "tokens": [407, 415, 1194, 300, 264, 12433, 362, 6505, 13, 823, 11, 538, 1228, 341, 5629, 11, 321, 393, 2573, 484, 264, 8535, 17291, 13, 467, 311, 410, 14813, 13, 400, 586, 11, 4274, 670, 1848, 17602, 680, 1618, 13], "avg_logprob": -0.26171875074505807, "compression_ratio": 1.2463768115942029, "no_speech_prob": 0.0, "words": [{"start": 463.99, "end": 464.21, "word": " So", "probability": 0.89111328125}, {"start": 464.21, "end": 464.43, "word": " he", "probability": 0.8671875}, {"start": 464.43, "end": 464.79, "word": " thought", "probability": 0.87158203125}, {"start": 464.79, "end": 465.07, "word": " that", "probability": 0.8720703125}, {"start": 465.07, "end": 466.13, "word": " the", "probability": 0.900390625}, {"start": 466.13, "end": 466.49, "word": " bills", "probability": 0.7998046875}, {"start": 466.49, "end": 467.23, "word": " have", "probability": 0.6845703125}, {"start": 467.23, "end": 467.73, "word": " increased.", "probability": 0.95068359375}, {"start": 468.65, "end": 469.03, "word": " Now,", "probability": 0.9091796875}, {"start": 469.11, "end": 469.25, "word": " by", "probability": 0.96630859375}, {"start": 469.25, "end": 469.49, "word": " using", "probability": 0.92431640625}, {"start": 469.49, "end": 469.93, "word": " this", "probability": 0.93310546875}, {"start": 469.93, "end": 470.67, "word": " statement,", "probability": 0.91064453125}, {"start": 470.95, "end": 471.23, "word": " we", "probability": 0.95947265625}, {"start": 471.23, "end": 471.65, "word": " can", "probability": 0.9443359375}, {"start": 471.65, "end": 472.01, "word": " figure", "probability": 0.96826171875}, {"start": 472.01, "end": 472.33, "word": " out", "probability": 0.89013671875}, {"start": 472.33, "end": 472.49, "word": " the", "probability": 0.517578125}, {"start": 472.49, "end": 472.87, "word": " alternative", "probability": 0.9619140625}, {"start": 472.87, "end": 473.41, "word": " hypothesis.", "probability": 0.85595703125}, {"start": 473.99, "end": 474.79, "word": " It's", "probability": 0.840087890625}, {"start": 474.79, "end": 475.19, "word": " aborted.", "probability": 0.34124755859375}, {"start": 476.05, "end": 476.85, "word": " And", "probability": 0.76220703125}, {"start": 476.85, "end": 477.23, "word": " now,", "probability": 0.8466796875}, {"start": 477.35, "end": 477.77, "word": " average", "probability": 0.73388671875}, {"start": 477.77, "end": 478.35, "word": " over", "probability": 0.8017578125}, {"start": 478.35, "end": 479.07, "word": " $52", "probability": 0.890869140625}, {"start": 479.07, "end": 480.15, "word": " per", "probability": 0.5888671875}, {"start": 480.15, "end": 480.67, "word": " month.", "probability": 0.9423828125}], "temperature": 1.0}, {"id": 19, "seek": 50833, "start": 484.33, "end": 508.33, "text": " It says that he thinks that the customer monthly cellphone bills have increased. Increased means mu is above $52. The company wishes to test this claim. Here we are assuming the population is normally distributed.", "tokens": [467, 1619, 300, 415, 7309, 300, 264, 5474, 12878, 42524, 12433, 362, 6505, 13, 30367, 1937, 1355, 2992, 307, 3673, 1848, 17602, 13, 440, 2237, 15065, 281, 1500, 341, 3932, 13, 1692, 321, 366, 11926, 264, 4415, 307, 5646, 12631, 13], "avg_logprob": -0.16489955995764052, "compression_ratio": 1.3896103896103895, "no_speech_prob": 0.0, "words": [{"start": 484.33, "end": 484.73, "word": " It", "probability": 0.77392578125}, {"start": 484.73, "end": 485.21, "word": " says", "probability": 0.89306640625}, {"start": 485.21, "end": 485.69, "word": " that", "probability": 0.919921875}, {"start": 485.69, "end": 486.79, "word": " he", "probability": 0.55224609375}, {"start": 486.79, "end": 487.93, "word": " thinks", "probability": 0.890625}, {"start": 487.93, "end": 488.39, "word": " that", "probability": 0.90673828125}, {"start": 488.39, "end": 489.23, "word": " the", "probability": 0.90380859375}, {"start": 489.23, "end": 489.79, "word": " customer", "probability": 0.78125}, {"start": 489.79, "end": 491.09, "word": " monthly", "probability": 0.7861328125}, {"start": 491.09, "end": 491.65, "word": " cellphone", "probability": 0.494140625}, {"start": 491.65, "end": 492.17, "word": " bills", "probability": 0.79345703125}, {"start": 492.17, "end": 492.49, "word": " have", "probability": 0.9462890625}, {"start": 492.49, "end": 493.05, "word": " increased.", "probability": 0.9609375}, {"start": 493.81, "end": 494.23, "word": " Increased", "probability": 0.861328125}, {"start": 494.23, "end": 494.53, "word": " means", "probability": 0.89892578125}, {"start": 494.53, "end": 494.85, "word": " mu", "probability": 0.3388671875}, {"start": 494.85, "end": 495.07, "word": " is", "probability": 0.94384765625}, {"start": 495.07, "end": 495.53, "word": " above", "probability": 0.9560546875}, {"start": 495.53, "end": 497.71, "word": " $52.", "probability": 0.827880859375}, {"start": 498.55, "end": 499.51, "word": " The", "probability": 0.900390625}, {"start": 499.51, "end": 499.95, "word": " company", "probability": 0.90869140625}, {"start": 499.95, "end": 500.27, "word": " wishes", "probability": 0.86181640625}, {"start": 500.27, "end": 500.93, "word": " to", "probability": 0.96875}, {"start": 500.93, "end": 501.33, "word": " test", "probability": 0.88525390625}, {"start": 501.33, "end": 501.63, "word": " this", "probability": 0.88720703125}, {"start": 501.63, "end": 501.95, "word": " claim.", "probability": 0.9560546875}, {"start": 502.67, "end": 502.89, "word": " Here", "probability": 0.86376953125}, {"start": 502.89, "end": 503.05, "word": " we", "probability": 0.6201171875}, {"start": 503.05, "end": 503.19, "word": " are", "probability": 0.93505859375}, {"start": 503.19, "end": 503.65, "word": " assuming", "probability": 0.908203125}, {"start": 503.65, "end": 505.29, "word": " the", "probability": 0.89306640625}, {"start": 505.29, "end": 506.35, "word": " population", "probability": 0.93701171875}, {"start": 506.35, "end": 506.85, "word": " is", "probability": 0.9365234375}, {"start": 506.85, "end": 507.27, "word": " normally", "probability": 0.908203125}, {"start": 507.27, "end": 508.33, "word": " distributed.", "probability": 0.93115234375}], "temperature": 1.0}, {"id": 20, "seek": 53877, "start": 509.27, "end": 538.77, "text": " So first step, we have to state the appropriate null and alternative hypothesis. So based on this problem, we can easily figure out that we are testing mu smaller than or equal to 52 against the alternative, mu is above 52. So again, mu is smaller than versus mu is above. So this is step number one.", "tokens": [407, 700, 1823, 11, 321, 362, 281, 1785, 264, 6854, 18184, 293, 8535, 17291, 13, 407, 2361, 322, 341, 1154, 11, 321, 393, 3612, 2573, 484, 300, 321, 366, 4997, 2992, 4356, 813, 420, 2681, 281, 18079, 1970, 264, 8535, 11, 2992, 307, 3673, 18079, 13, 407, 797, 11, 2992, 307, 4356, 813, 5717, 2992, 307, 3673, 13, 407, 341, 307, 1823, 1230, 472, 13], "avg_logprob": -0.17069129329739194, "compression_ratio": 1.644808743169399, "no_speech_prob": 0.0, "words": [{"start": 509.27, "end": 509.57, "word": " So", "probability": 0.8525390625}, {"start": 509.57, "end": 509.95, "word": " first", "probability": 0.611328125}, {"start": 509.95, "end": 510.13, "word": " step,", "probability": 0.59326171875}, {"start": 510.21, "end": 510.27, "word": " we", "probability": 0.95556640625}, {"start": 510.27, "end": 510.45, "word": " have", "probability": 0.94189453125}, {"start": 510.45, "end": 510.59, "word": " to", "probability": 0.96337890625}, {"start": 510.59, "end": 510.91, "word": " state", "probability": 0.9150390625}, {"start": 510.91, "end": 512.05, "word": " the", "probability": 0.8505859375}, {"start": 512.05, "end": 512.71, "word": " appropriate", "probability": 0.82763671875}, {"start": 512.71, "end": 513.15, "word": " null", "probability": 0.88037109375}, {"start": 513.15, "end": 513.37, "word": " and", "probability": 0.89990234375}, {"start": 513.37, "end": 513.91, "word": " alternative", "probability": 0.87744140625}, {"start": 513.91, "end": 514.29, "word": " hypothesis.", "probability": 0.92138671875}, {"start": 514.89, "end": 515.07, "word": " So", "probability": 0.93798828125}, {"start": 515.07, "end": 515.37, "word": " based", "probability": 0.86474609375}, {"start": 515.37, "end": 515.59, "word": " on", "probability": 0.94677734375}, {"start": 515.59, "end": 515.85, "word": " this", "probability": 0.9404296875}, {"start": 515.85, "end": 516.27, "word": " problem,", "probability": 0.89013671875}, {"start": 516.43, "end": 516.57, "word": " we", "probability": 0.90087890625}, {"start": 516.57, "end": 516.83, "word": " can", "probability": 0.94091796875}, {"start": 516.83, "end": 517.49, "word": " easily", "probability": 0.91845703125}, {"start": 517.49, "end": 517.87, "word": " figure", "probability": 0.97021484375}, {"start": 517.87, "end": 518.19, "word": " out", "probability": 0.880859375}, {"start": 518.19, "end": 518.51, "word": " that", "probability": 0.92626953125}, {"start": 518.51, "end": 519.33, "word": " we", "probability": 0.919921875}, {"start": 519.33, "end": 519.49, "word": " are", "probability": 0.9248046875}, {"start": 519.49, "end": 519.89, "word": " testing", "probability": 0.833984375}, {"start": 519.89, "end": 520.27, "word": " mu", "probability": 0.308349609375}, {"start": 520.27, "end": 521.01, "word": " smaller", "probability": 0.7412109375}, {"start": 521.01, "end": 521.31, "word": " than", "probability": 0.93798828125}, {"start": 521.31, "end": 521.45, "word": " or", "probability": 0.771484375}, {"start": 521.45, "end": 521.79, "word": " equal", "probability": 0.9150390625}, {"start": 521.79, "end": 521.99, "word": " to", "probability": 0.52978515625}, {"start": 521.99, "end": 522.47, "word": " 52", "probability": 0.96484375}, {"start": 522.47, "end": 523.61, "word": " against", "probability": 0.7265625}, {"start": 523.61, "end": 523.89, "word": " the", "probability": 0.83251953125}, {"start": 523.89, "end": 524.33, "word": " alternative,", "probability": 0.9208984375}, {"start": 524.97, "end": 525.27, "word": " mu", "probability": 0.8974609375}, {"start": 525.27, "end": 525.67, "word": " is", "probability": 0.931640625}, {"start": 525.67, "end": 526.07, "word": " above", "probability": 0.95068359375}, {"start": 526.07, "end": 526.87, "word": " 52.", "probability": 0.8876953125}, {"start": 527.57, "end": 527.81, "word": " So", "probability": 0.951171875}, {"start": 527.81, "end": 528.15, "word": " again,", "probability": 0.90380859375}, {"start": 529.15, "end": 529.61, "word": " mu", "probability": 0.91357421875}, {"start": 529.61, "end": 530.27, "word": " is", "probability": 0.71484375}, {"start": 530.27, "end": 530.53, "word": " smaller", "probability": 0.87744140625}, {"start": 530.53, "end": 530.95, "word": " than", "probability": 0.9306640625}, {"start": 530.95, "end": 534.07, "word": " versus", "probability": 0.90771484375}, {"start": 534.07, "end": 535.81, "word": " mu", "probability": 0.9228515625}, {"start": 535.81, "end": 536.55, "word": " is", "probability": 0.9365234375}, {"start": 536.55, "end": 537.11, "word": " above.", "probability": 0.962890625}, {"start": 537.43, "end": 537.83, "word": " So", "probability": 0.94580078125}, {"start": 537.83, "end": 538.03, "word": " this", "probability": 0.8408203125}, {"start": 538.03, "end": 538.15, "word": " is", "probability": 0.8984375}, {"start": 538.15, "end": 538.33, "word": " step", "probability": 0.91455078125}, {"start": 538.33, "end": 538.55, "word": " number", "probability": 0.9453125}, {"start": 538.55, "end": 538.77, "word": " one.", "probability": 0.75341796875}], "temperature": 1.0}, {"id": 21, "seek": 56767, "start": 540.18, "end": 567.68, "text": " So we have to form or to state a null and alternative hypothesis. So the mu above 52 means the average is greater than $52 per month. Now suppose, for example, my alpha is 10%. And we choose a random sample of size 25. Now by using this information, we can determine the rejection region.", "tokens": [407, 321, 362, 281, 1254, 420, 281, 1785, 257, 18184, 293, 8535, 17291, 13, 407, 264, 2992, 3673, 18079, 1355, 264, 4274, 307, 5044, 813, 1848, 17602, 680, 1618, 13, 823, 7297, 11, 337, 1365, 11, 452, 8961, 307, 1266, 6856, 400, 321, 2826, 257, 4974, 6889, 295, 2744, 3552, 13, 823, 538, 1228, 341, 1589, 11, 321, 393, 6997, 264, 26044, 4458, 13], "avg_logprob": -0.1753605769230769, "compression_ratio": 1.4522613065326633, "no_speech_prob": 0.0, "words": [{"start": 540.18, "end": 540.42, "word": " So", "probability": 0.89697265625}, {"start": 540.42, "end": 540.58, "word": " we", "probability": 0.81591796875}, {"start": 540.58, "end": 540.76, "word": " have", "probability": 0.947265625}, {"start": 540.76, "end": 540.9, "word": " to", "probability": 0.908203125}, {"start": 540.9, "end": 541.34, "word": " form", "probability": 0.9267578125}, {"start": 541.34, "end": 541.94, "word": " or", "probability": 0.47802734375}, {"start": 541.94, "end": 542.14, "word": " to", "probability": 0.6865234375}, {"start": 542.14, "end": 542.56, "word": " state", "probability": 0.93359375}, {"start": 542.56, "end": 542.76, "word": " a", "probability": 0.19580078125}, {"start": 542.76, "end": 542.98, "word": " null", "probability": 0.95068359375}, {"start": 542.98, "end": 543.22, "word": " and", "probability": 0.8447265625}, {"start": 543.22, "end": 543.74, "word": " alternative", "probability": 0.9072265625}, {"start": 543.74, "end": 544.52, "word": " hypothesis.", "probability": 0.609375}, {"start": 545.98, "end": 546.34, "word": " So", "probability": 0.92822265625}, {"start": 546.34, "end": 546.84, "word": " the", "probability": 0.568359375}, {"start": 546.84, "end": 547.2, "word": " mu", "probability": 0.58642578125}, {"start": 547.2, "end": 547.82, "word": " above", "probability": 0.9130859375}, {"start": 547.82, "end": 548.3, "word": " 52", "probability": 0.974609375}, {"start": 548.3, "end": 548.76, "word": " means", "probability": 0.70458984375}, {"start": 548.76, "end": 549.0, "word": " the", "probability": 0.8701171875}, {"start": 549.0, "end": 549.4, "word": " average", "probability": 0.7900390625}, {"start": 549.4, "end": 549.64, "word": " is", "probability": 0.94091796875}, {"start": 549.64, "end": 549.98, "word": " greater", "probability": 0.904296875}, {"start": 549.98, "end": 550.5, "word": " than", "probability": 0.9443359375}, {"start": 550.5, "end": 551.86, "word": " $52", "probability": 0.773193359375}, {"start": 551.86, "end": 552.5, "word": " per", "probability": 0.84033203125}, {"start": 552.5, "end": 552.78, "word": " month.", "probability": 0.93359375}, {"start": 554.68, "end": 554.98, "word": " Now", "probability": 0.93994140625}, {"start": 554.98, "end": 555.26, "word": " suppose,", "probability": 0.7666015625}, {"start": 555.4, "end": 555.46, "word": " for", "probability": 0.9521484375}, {"start": 555.46, "end": 555.84, "word": " example,", "probability": 0.974609375}, {"start": 556.0, "end": 556.32, "word": " my", "probability": 0.96044921875}, {"start": 556.32, "end": 556.64, "word": " alpha", "probability": 0.75830078125}, {"start": 556.64, "end": 556.92, "word": " is", "probability": 0.9501953125}, {"start": 556.92, "end": 557.4, "word": " 10%.", "probability": 0.805419921875}, {"start": 557.4, "end": 560.82, "word": " And", "probability": 0.95166015625}, {"start": 560.82, "end": 561.06, "word": " we", "probability": 0.923828125}, {"start": 561.06, "end": 561.38, "word": " choose", "probability": 0.78564453125}, {"start": 561.38, "end": 561.58, "word": " a", "probability": 0.72900390625}, {"start": 561.58, "end": 561.82, "word": " random", "probability": 0.92138671875}, {"start": 561.82, "end": 562.24, "word": " sample", "probability": 0.88818359375}, {"start": 562.24, "end": 562.48, "word": " of", "probability": 0.94140625}, {"start": 562.48, "end": 562.88, "word": " size", "probability": 0.87255859375}, {"start": 562.88, "end": 563.38, "word": " 25.", "probability": 0.86328125}, {"start": 564.16, "end": 564.8, "word": " Now", "probability": 0.9638671875}, {"start": 564.8, "end": 564.96, "word": " by", "probability": 0.65283203125}, {"start": 564.96, "end": 565.16, "word": " using", "probability": 0.93310546875}, {"start": 565.16, "end": 565.36, "word": " this", "probability": 0.94873046875}, {"start": 565.36, "end": 565.74, "word": " information,", "probability": 0.865234375}, {"start": 566.08, "end": 566.22, "word": " we", "probability": 0.96044921875}, {"start": 566.22, "end": 566.44, "word": " can", "probability": 0.9267578125}, {"start": 566.44, "end": 566.82, "word": " determine", "probability": 0.9482421875}, {"start": 566.82, "end": 567.04, "word": " the", "probability": 0.90771484375}, {"start": 567.04, "end": 567.3, "word": " rejection", "probability": 0.97412109375}, {"start": 567.3, "end": 567.68, "word": " region.", "probability": 0.91455078125}], "temperature": 1.0}, {"id": 22, "seek": 59449, "start": 569.09, "end": 594.49, "text": " Now, the problem mentioned that sigma is unknown, and the population is normally distributed. In this case, we have to use T. Now, my rejection region, since we are talking about testing upper tilted, so my rejection region should be to the right side, T alpha. It's above. Now, T alpha, it means T 10%.", "tokens": [823, 11, 264, 1154, 2835, 300, 12771, 307, 9841, 11, 293, 264, 4415, 307, 5646, 12631, 13, 682, 341, 1389, 11, 321, 362, 281, 764, 314, 13, 823, 11, 452, 26044, 4458, 11, 1670, 321, 366, 1417, 466, 4997, 6597, 43229, 11, 370, 452, 26044, 4458, 820, 312, 281, 264, 558, 1252, 11, 314, 8961, 13, 467, 311, 3673, 13, 823, 11, 314, 8961, 11, 309, 1355, 314, 1266, 6856], "avg_logprob": -0.22304138037520396, "compression_ratio": 1.5431472081218274, "no_speech_prob": 0.0, "words": [{"start": 569.09, "end": 569.65, "word": " Now,", "probability": 0.61181640625}, {"start": 569.95, "end": 570.33, "word": " the", "probability": 0.865234375}, {"start": 570.33, "end": 570.67, "word": " problem", "probability": 0.8447265625}, {"start": 570.67, "end": 571.15, "word": " mentioned", "probability": 0.6455078125}, {"start": 571.15, "end": 571.51, "word": " that", "probability": 0.82275390625}, {"start": 571.51, "end": 571.95, "word": " sigma", "probability": 0.58740234375}, {"start": 571.95, "end": 572.17, "word": " is", "probability": 0.9482421875}, {"start": 572.17, "end": 572.61, "word": " unknown,", "probability": 0.60791015625}, {"start": 573.01, "end": 573.13, "word": " and", "probability": 0.93017578125}, {"start": 573.13, "end": 573.23, "word": " the", "probability": 0.89453125}, {"start": 573.23, "end": 573.63, "word": " population", "probability": 0.9560546875}, {"start": 573.63, "end": 573.87, "word": " is", "probability": 0.94970703125}, {"start": 573.87, "end": 574.25, "word": " normally", "probability": 0.89013671875}, {"start": 574.25, "end": 574.93, "word": " distributed.", "probability": 0.923828125}, {"start": 575.77, "end": 576.45, "word": " In", "probability": 0.95361328125}, {"start": 576.45, "end": 576.69, "word": " this", "probability": 0.94580078125}, {"start": 576.69, "end": 577.03, "word": " case,", "probability": 0.91455078125}, {"start": 577.11, "end": 577.23, "word": " we", "probability": 0.94482421875}, {"start": 577.23, "end": 577.43, "word": " have", "probability": 0.94287109375}, {"start": 577.43, "end": 577.53, "word": " to", "probability": 0.966796875}, {"start": 577.53, "end": 577.77, "word": " use", "probability": 0.890625}, {"start": 577.77, "end": 578.11, "word": " T.", "probability": 0.74169921875}, {"start": 578.85, "end": 579.49, "word": " Now,", "probability": 0.947265625}, {"start": 579.57, "end": 579.71, "word": " my", "probability": 0.93994140625}, {"start": 579.71, "end": 580.07, "word": " rejection", "probability": 0.966796875}, {"start": 580.07, "end": 580.37, "word": " region,", "probability": 0.72412109375}, {"start": 580.49, "end": 580.69, "word": " since", "probability": 0.84423828125}, {"start": 580.69, "end": 580.81, "word": " we", "probability": 0.95703125}, {"start": 580.81, "end": 580.93, "word": " are", "probability": 0.896484375}, {"start": 580.93, "end": 581.29, "word": " talking", "probability": 0.85009765625}, {"start": 581.29, "end": 581.75, "word": " about", "probability": 0.90625}, {"start": 581.75, "end": 582.41, "word": " testing", "probability": 0.828125}, {"start": 582.41, "end": 583.43, "word": " upper", "probability": 0.5400390625}, {"start": 583.43, "end": 583.83, "word": " tilted,", "probability": 0.5830078125}, {"start": 584.43, "end": 584.99, "word": " so", "probability": 0.892578125}, {"start": 584.99, "end": 585.35, "word": " my", "probability": 0.9228515625}, {"start": 585.35, "end": 585.71, "word": " rejection", "probability": 0.958984375}, {"start": 585.71, "end": 586.01, "word": " region", "probability": 0.8876953125}, {"start": 586.01, "end": 586.29, "word": " should", "probability": 0.9697265625}, {"start": 586.29, "end": 587.23, "word": " be", "probability": 0.9482421875}, {"start": 587.23, "end": 587.47, "word": " to", "probability": 0.638671875}, {"start": 587.47, "end": 587.63, "word": " the", "probability": 0.91748046875}, {"start": 587.63, "end": 587.89, "word": " right", "probability": 0.927734375}, {"start": 587.89, "end": 588.27, "word": " side,", "probability": 0.841796875}, {"start": 588.61, "end": 588.87, "word": " T", "probability": 0.80078125}, {"start": 588.87, "end": 589.13, "word": " alpha.", "probability": 0.67333984375}, {"start": 590.63, "end": 591.21, "word": " It's", "probability": 0.726318359375}, {"start": 591.21, "end": 591.55, "word": " above.", "probability": 0.9111328125}, {"start": 591.79, "end": 592.39, "word": " Now,", "probability": 0.92724609375}, {"start": 592.63, "end": 592.95, "word": " T", "probability": 0.96533203125}, {"start": 592.95, "end": 593.27, "word": " alpha,", "probability": 0.90576171875}, {"start": 593.41, "end": 593.49, "word": " it", "probability": 0.84814453125}, {"start": 593.49, "end": 593.73, "word": " means", "probability": 0.919921875}, {"start": 593.73, "end": 593.95, "word": " T", "probability": 0.85009765625}, {"start": 593.95, "end": 594.49, "word": " 10%.", "probability": 0.4945068359375}], "temperature": 1.0}, {"id": 23, "seek": 62334, "start": 595.7, "end": 623.34, "text": " And degrees of freedom, 24. I think many times we used the T table. So by using T table, you can find that your critical value is 1.318. So that's your critical value. Now, my decision is, if T statistic", "tokens": [400, 5310, 295, 5645, 11, 4022, 13, 286, 519, 867, 1413, 321, 1143, 264, 314, 3199, 13, 407, 538, 1228, 314, 3199, 11, 291, 393, 915, 300, 428, 4924, 2158, 307, 502, 13, 18, 6494, 13, 407, 300, 311, 428, 4924, 2158, 13, 823, 11, 452, 3537, 307, 11, 498, 314, 29588], "avg_logprob": -0.22243513476173832, "compression_ratio": 1.3783783783783783, "no_speech_prob": 0.0, "words": [{"start": 595.7, "end": 596.1, "word": " And", "probability": 0.29052734375}, {"start": 596.1, "end": 596.4, "word": " degrees", "probability": 0.9296875}, {"start": 596.4, "end": 596.6, "word": " of", "probability": 0.9599609375}, {"start": 596.6, "end": 596.88, "word": " freedom,", "probability": 0.95068359375}, {"start": 597.16, "end": 598.08, "word": " 24.", "probability": 0.849609375}, {"start": 598.98, "end": 599.16, "word": " I", "probability": 0.96240234375}, {"start": 599.16, "end": 599.3, "word": " think", "probability": 0.9150390625}, {"start": 599.3, "end": 599.5, "word": " many", "probability": 0.890625}, {"start": 599.5, "end": 599.82, "word": " times", "probability": 0.9208984375}, {"start": 599.82, "end": 600.0, "word": " we", "probability": 0.90283203125}, {"start": 600.0, "end": 600.4, "word": " used", "probability": 0.7939453125}, {"start": 600.4, "end": 600.78, "word": " the", "probability": 0.482666015625}, {"start": 600.78, "end": 600.92, "word": " T", "probability": 0.71337890625}, {"start": 600.92, "end": 601.24, "word": " table.", "probability": 0.471435546875}, {"start": 602.18, "end": 602.32, "word": " So", "probability": 0.92626953125}, {"start": 602.32, "end": 602.52, "word": " by", "probability": 0.8603515625}, {"start": 602.52, "end": 602.94, "word": " using", "probability": 0.923828125}, {"start": 602.94, "end": 603.52, "word": " T", "probability": 0.51025390625}, {"start": 603.52, "end": 603.84, "word": " table,", "probability": 0.87890625}, {"start": 603.96, "end": 604.06, "word": " you", "probability": 0.95849609375}, {"start": 604.06, "end": 604.44, "word": " can", "probability": 0.94482421875}, {"start": 604.44, "end": 605.58, "word": " find", "probability": 0.86572265625}, {"start": 605.58, "end": 605.92, "word": " that", "probability": 0.78759765625}, {"start": 605.92, "end": 606.66, "word": " your", "probability": 0.86376953125}, {"start": 606.66, "end": 607.5, "word": " critical", "probability": 0.92333984375}, {"start": 607.5, "end": 608.02, "word": " value", "probability": 0.982421875}, {"start": 608.02, "end": 609.64, "word": " is", "probability": 0.91552734375}, {"start": 609.64, "end": 609.86, "word": " 1", "probability": 0.9677734375}, {"start": 609.86, "end": 610.58, "word": ".318.", "probability": 0.9777018229166666}, {"start": 612.26, "end": 612.6, "word": " So", "probability": 0.74658203125}, {"start": 612.6, "end": 612.98, "word": " that's", "probability": 0.95361328125}, {"start": 612.98, "end": 613.26, "word": " your", "probability": 0.88916015625}, {"start": 613.26, "end": 613.76, "word": " critical", "probability": 0.9208984375}, {"start": 613.76, "end": 614.24, "word": " value.", "probability": 0.98388671875}, {"start": 616.54, "end": 616.86, "word": " Now,", "probability": 0.8974609375}, {"start": 618.22, "end": 620.24, "word": " my", "probability": 0.95703125}, {"start": 620.24, "end": 620.64, "word": " decision", "probability": 0.91650390625}, {"start": 620.64, "end": 621.04, "word": " is,", "probability": 0.93017578125}, {"start": 622.18, "end": 622.5, "word": " if", "probability": 0.93701171875}, {"start": 622.5, "end": 622.8, "word": " T", "probability": 0.66015625}, {"start": 622.8, "end": 623.34, "word": " statistic", "probability": 0.7626953125}], "temperature": 1.0}, {"id": 24, "seek": 64688, "start": 625.08, "end": 646.88, "text": " If the value of T statistic lies in this rejection region, we reject the null hypothesis. So if T statistic is above 1.38, then we reject the null hypothesis. So let's see if we reject it.", "tokens": [759, 264, 2158, 295, 314, 29588, 9134, 294, 341, 26044, 4458, 11, 321, 8248, 264, 18184, 17291, 13, 407, 498, 314, 29588, 307, 3673, 502, 13, 12625, 11, 550, 321, 8248, 264, 18184, 17291, 13, 407, 718, 311, 536, 498, 321, 8248, 309, 13], "avg_logprob": -0.25607639418707956, "compression_ratio": 1.6016949152542372, "no_speech_prob": 0.0, "words": [{"start": 625.08, "end": 625.36, "word": " If", "probability": 0.38623046875}, {"start": 625.36, "end": 625.46, "word": " the", "probability": 0.658203125}, {"start": 625.46, "end": 625.74, "word": " value", "probability": 0.96484375}, {"start": 625.74, "end": 625.88, "word": " of", "probability": 0.87451171875}, {"start": 625.88, "end": 625.98, "word": " T", "probability": 0.5283203125}, {"start": 625.98, "end": 626.44, "word": " statistic", "probability": 0.70751953125}, {"start": 626.44, "end": 627.72, "word": " lies", "probability": 0.83251953125}, {"start": 627.72, "end": 628.22, "word": " in", "probability": 0.9111328125}, {"start": 628.22, "end": 628.5, "word": " this", "probability": 0.845703125}, {"start": 628.5, "end": 628.96, "word": " rejection", "probability": 0.91064453125}, {"start": 628.96, "end": 629.48, "word": " region,", "probability": 0.94287109375}, {"start": 630.42, "end": 630.6, "word": " we", "probability": 0.69775390625}, {"start": 630.6, "end": 631.02, "word": " reject", "probability": 0.89990234375}, {"start": 631.02, "end": 631.16, "word": " the", "probability": 0.5390625}, {"start": 631.16, "end": 631.3, "word": " null", "probability": 0.994140625}, {"start": 631.3, "end": 631.74, "word": " hypothesis.", "probability": 0.8916015625}, {"start": 632.22, "end": 632.44, "word": " So", "probability": 0.82275390625}, {"start": 632.44, "end": 632.6, "word": " if", "probability": 0.80419921875}, {"start": 632.6, "end": 632.76, "word": " T", "probability": 0.91650390625}, {"start": 632.76, "end": 633.3, "word": " statistic", "probability": 0.85791015625}, {"start": 633.3, "end": 633.68, "word": " is", "probability": 0.27197265625}, {"start": 633.68, "end": 635.84, "word": " above", "probability": 0.85205078125}, {"start": 635.84, "end": 637.16, "word": " 1", "probability": 0.95068359375}, {"start": 637.16, "end": 637.82, "word": ".38,", "probability": 0.991943359375}, {"start": 638.26, "end": 638.82, "word": " then", "probability": 0.393310546875}, {"start": 638.82, "end": 640.88, "word": " we", "probability": 0.86865234375}, {"start": 640.88, "end": 642.64, "word": " reject", "probability": 0.94970703125}, {"start": 642.64, "end": 644.48, "word": " the", "probability": 0.84033203125}, {"start": 644.48, "end": 644.66, "word": " null", "probability": 0.96728515625}, {"start": 644.66, "end": 645.14, "word": " hypothesis.", "probability": 0.861328125}, {"start": 645.54, "end": 645.76, "word": " So", "probability": 0.8623046875}, {"start": 645.76, "end": 645.98, "word": " let's", "probability": 0.914306640625}, {"start": 645.98, "end": 646.16, "word": " see", "probability": 0.70166015625}, {"start": 646.16, "end": 646.3, "word": " if", "probability": 0.9306640625}, {"start": 646.3, "end": 646.46, "word": " we", "probability": 0.9306640625}, {"start": 646.46, "end": 646.78, "word": " reject", "probability": 0.923828125}, {"start": 646.78, "end": 646.88, "word": " it.", "probability": 0.2496337890625}], "temperature": 1.0}, {"id": 25, "seek": 67513, "start": 647.29, "end": 675.13, "text": " or don't reject. So we reject the null hypothesis if your t-statistic value is above or is greater than 1.318. Now suppose for example a sample is taken with the following results. N is 25. So we select a random sample of size 25. This sample gives an average of 53.1", "tokens": [420, 500, 380, 8248, 13, 407, 321, 8248, 264, 18184, 17291, 498, 428, 256, 12, 19435, 3142, 2158, 307, 3673, 420, 307, 5044, 813, 502, 13, 18, 6494, 13, 823, 7297, 337, 1365, 257, 6889, 307, 2726, 365, 264, 3480, 3542, 13, 426, 307, 3552, 13, 407, 321, 3048, 257, 4974, 6889, 295, 2744, 3552, 13, 639, 6889, 2709, 364, 4274, 295, 21860, 13, 16], "avg_logprob": -0.2459753778847781, "compression_ratio": 1.4725274725274726, "no_speech_prob": 0.0, "words": [{"start": 647.29, "end": 647.55, "word": " or", "probability": 0.2275390625}, {"start": 647.55, "end": 647.77, "word": " don't", "probability": 0.854736328125}, {"start": 647.77, "end": 647.99, "word": " reject.", "probability": 0.77685546875}, {"start": 648.09, "end": 648.19, "word": " So", "probability": 0.7451171875}, {"start": 648.19, "end": 648.33, "word": " we", "probability": 0.7431640625}, {"start": 648.33, "end": 648.63, "word": " reject", "probability": 0.87890625}, {"start": 648.63, "end": 648.79, "word": " the", "probability": 0.46044921875}, {"start": 648.79, "end": 648.87, "word": " null", "probability": 0.97802734375}, {"start": 648.87, "end": 649.35, "word": " hypothesis", "probability": 0.9619140625}, {"start": 649.35, "end": 650.15, "word": " if", "probability": 0.78857421875}, {"start": 650.15, "end": 650.39, "word": " your", "probability": 0.63330078125}, {"start": 650.39, "end": 650.57, "word": " t", "probability": 0.398193359375}, {"start": 650.57, "end": 651.07, "word": "-statistic", "probability": 0.8308919270833334}, {"start": 651.07, "end": 651.85, "word": " value", "probability": 0.923828125}, {"start": 651.85, "end": 652.19, "word": " is", "probability": 0.93310546875}, {"start": 652.19, "end": 652.55, "word": " above", "probability": 0.94287109375}, {"start": 652.55, "end": 652.85, "word": " or", "probability": 0.8427734375}, {"start": 652.85, "end": 653.03, "word": " is", "probability": 0.517578125}, {"start": 653.03, "end": 653.29, "word": " greater", "probability": 0.91796875}, {"start": 653.29, "end": 653.71, "word": " than", "probability": 0.9345703125}, {"start": 653.71, "end": 654.39, "word": " 1", "probability": 0.92626953125}, {"start": 654.39, "end": 655.09, "word": ".318.", "probability": 0.9803059895833334}, {"start": 656.27, "end": 656.59, "word": " Now", "probability": 0.927734375}, {"start": 656.59, "end": 657.07, "word": " suppose", "probability": 0.72607421875}, {"start": 657.07, "end": 657.25, "word": " for", "probability": 0.6533203125}, {"start": 657.25, "end": 657.77, "word": " example", "probability": 0.9755859375}, {"start": 657.77, "end": 659.11, "word": " a", "probability": 0.7236328125}, {"start": 659.11, "end": 659.45, "word": " sample", "probability": 0.64453125}, {"start": 659.45, "end": 659.73, "word": " is", "probability": 0.9248046875}, {"start": 659.73, "end": 660.19, "word": " taken", "probability": 0.82421875}, {"start": 660.19, "end": 660.57, "word": " with", "probability": 0.8984375}, {"start": 660.57, "end": 660.75, "word": " the", "probability": 0.89892578125}, {"start": 660.75, "end": 661.03, "word": " following", "probability": 0.8994140625}, {"start": 661.03, "end": 661.69, "word": " results.", "probability": 0.8427734375}, {"start": 664.69, "end": 665.01, "word": " N", "probability": 0.43115234375}, {"start": 665.01, "end": 665.13, "word": " is", "probability": 0.90966796875}, {"start": 665.13, "end": 665.61, "word": " 25.", "probability": 0.85205078125}, {"start": 666.25, "end": 666.81, "word": " So", "probability": 0.53466796875}, {"start": 666.81, "end": 666.95, "word": " we", "probability": 0.90185546875}, {"start": 666.95, "end": 667.27, "word": " select", "probability": 0.8583984375}, {"start": 667.27, "end": 667.41, "word": " a", "probability": 0.970703125}, {"start": 667.41, "end": 667.61, "word": " random", "probability": 0.82568359375}, {"start": 667.61, "end": 668.03, "word": " sample", "probability": 0.8984375}, {"start": 668.03, "end": 668.25, "word": " of", "probability": 0.876953125}, {"start": 668.25, "end": 668.57, "word": " size", "probability": 0.85888671875}, {"start": 668.57, "end": 669.13, "word": " 25.", "probability": 0.95849609375}, {"start": 669.91, "end": 670.25, "word": " This", "probability": 0.81884765625}, {"start": 670.25, "end": 670.65, "word": " sample", "probability": 0.908203125}, {"start": 670.65, "end": 671.17, "word": " gives", "probability": 0.91015625}, {"start": 671.17, "end": 671.67, "word": " an", "probability": 0.94384765625}, {"start": 671.67, "end": 672.05, "word": " average", "probability": 0.80224609375}, {"start": 672.05, "end": 672.51, "word": " of", "probability": 0.9638671875}, {"start": 672.51, "end": 674.39, "word": " 53", "probability": 0.94091796875}, {"start": 674.39, "end": 675.13, "word": ".1", "probability": 0.98779296875}], "temperature": 1.0}, {"id": 26, "seek": 69933, "start": 675.99, "end": 699.33, "text": " And sample standard deviation of 10. Now by using this information, we can determine easily the value of T statistic. So T stat, the formula as we discussed in chapter 8, x bar minus b divided by s over root, and here we replace sigma by s because sigma is unknown. Straightforward calculation gives", "tokens": [400, 6889, 3832, 25163, 295, 1266, 13, 823, 538, 1228, 341, 1589, 11, 321, 393, 6997, 3612, 264, 2158, 295, 314, 29588, 13, 407, 314, 2219, 11, 264, 8513, 382, 321, 7152, 294, 7187, 1649, 11, 2031, 2159, 3175, 272, 6666, 538, 262, 670, 5593, 11, 293, 510, 321, 7406, 12771, 538, 262, 570, 12771, 307, 9841, 13, 26908, 13305, 17108, 2709], "avg_logprob": -0.28546626889516435, "compression_ratio": 1.4705882352941178, "no_speech_prob": 0.0, "words": [{"start": 675.99, "end": 676.47, "word": " And", "probability": 0.325927734375}, {"start": 676.47, "end": 676.95, "word": " sample", "probability": 0.238525390625}, {"start": 676.95, "end": 677.33, "word": " standard", "probability": 0.68994140625}, {"start": 677.33, "end": 677.77, "word": " deviation", "probability": 0.93701171875}, {"start": 677.77, "end": 677.99, "word": " of", "probability": 0.91357421875}, {"start": 677.99, "end": 678.29, "word": " 10.", "probability": 0.7958984375}, {"start": 679.23, "end": 679.41, "word": " Now", "probability": 0.5322265625}, {"start": 679.41, "end": 679.57, "word": " by", "probability": 0.67041015625}, {"start": 679.57, "end": 679.77, "word": " using", "probability": 0.9248046875}, {"start": 679.77, "end": 679.95, "word": " this", "probability": 0.94677734375}, {"start": 679.95, "end": 680.35, "word": " information,", "probability": 0.82373046875}, {"start": 680.61, "end": 680.69, "word": " we", "probability": 0.95068359375}, {"start": 680.69, "end": 680.95, "word": " can", "probability": 0.93701171875}, {"start": 680.95, "end": 681.39, "word": " determine", "probability": 0.95751953125}, {"start": 681.39, "end": 681.95, "word": " easily", "probability": 0.90771484375}, {"start": 681.95, "end": 682.37, "word": " the", "probability": 0.88671875}, {"start": 682.37, "end": 682.71, "word": " value", "probability": 0.9736328125}, {"start": 682.71, "end": 682.91, "word": " of", "probability": 0.93603515625}, {"start": 682.91, "end": 683.07, "word": " T", "probability": 0.62744140625}, {"start": 683.07, "end": 683.55, "word": " statistic.", "probability": 0.763671875}, {"start": 684.63, "end": 684.83, "word": " So", "probability": 0.80029296875}, {"start": 684.83, "end": 685.07, "word": " T", "probability": 0.84765625}, {"start": 685.07, "end": 685.45, "word": " stat,", "probability": 0.2254638671875}, {"start": 686.09, "end": 686.27, "word": " the", "probability": 0.8779296875}, {"start": 686.27, "end": 686.57, "word": " formula", "probability": 0.9033203125}, {"start": 686.57, "end": 686.87, "word": " as", "probability": 0.83349609375}, {"start": 686.87, "end": 687.13, "word": " we", "probability": 0.955078125}, {"start": 687.13, "end": 688.25, "word": " discussed", "probability": 0.890625}, {"start": 688.25, "end": 688.61, "word": " in", "probability": 0.9375}, {"start": 688.61, "end": 688.93, "word": " chapter", "probability": 0.53173828125}, {"start": 688.93, "end": 689.73, "word": " 8,", "probability": 0.69775390625}, {"start": 690.75, "end": 691.05, "word": " x", "probability": 0.60791015625}, {"start": 691.05, "end": 691.25, "word": " bar", "probability": 0.75341796875}, {"start": 691.25, "end": 691.47, "word": " minus", "probability": 0.98779296875}, {"start": 691.47, "end": 691.65, "word": " b", "probability": 0.420654296875}, {"start": 691.65, "end": 691.93, "word": " divided", "probability": 0.6513671875}, {"start": 691.93, "end": 692.11, "word": " by", "probability": 0.966796875}, {"start": 692.11, "end": 692.37, "word": " s", "probability": 0.6337890625}, {"start": 692.37, "end": 692.53, "word": " over", "probability": 0.85107421875}, {"start": 692.53, "end": 692.83, "word": " root,", "probability": 0.935546875}, {"start": 692.85, "end": 693.01, "word": " and", "probability": 0.904296875}, {"start": 693.01, "end": 693.29, "word": " here", "probability": 0.85498046875}, {"start": 693.29, "end": 693.47, "word": " we", "probability": 0.83154296875}, {"start": 693.47, "end": 693.83, "word": " replace", "probability": 0.849609375}, {"start": 693.83, "end": 694.25, "word": " sigma", "probability": 0.92431640625}, {"start": 694.25, "end": 695.17, "word": " by", "probability": 0.9296875}, {"start": 695.17, "end": 695.39, "word": " s", "probability": 0.9150390625}, {"start": 695.39, "end": 695.67, "word": " because", "probability": 0.755859375}, {"start": 695.67, "end": 695.95, "word": " sigma", "probability": 0.9345703125}, {"start": 695.95, "end": 696.13, "word": " is", "probability": 0.93359375}, {"start": 696.13, "end": 696.49, "word": " unknown.", "probability": 0.8916015625}, {"start": 697.59, "end": 698.19, "word": " Straightforward", "probability": 0.784912109375}, {"start": 698.19, "end": 698.79, "word": " calculation", "probability": 0.91552734375}, {"start": 698.79, "end": 699.33, "word": " gives", "probability": 0.87841796875}], "temperature": 1.0}, {"id": 27, "seek": 72898, "start": 705.8, "end": 728.98, "text": " There are two approaches to reach your conclusion about this example. Either use critical value approach or B value. In this case, we cannot use the two-sided confidence interval approach. So let's see if this value, by using now approach number one,", "tokens": [821, 366, 732, 11587, 281, 2524, 428, 10063, 466, 341, 1365, 13, 13746, 764, 4924, 2158, 3109, 420, 363, 2158, 13, 682, 341, 1389, 11, 321, 2644, 764, 264, 732, 12, 30941, 6687, 15035, 3109, 13, 407, 718, 311, 536, 498, 341, 2158, 11, 538, 1228, 586, 3109, 1230, 472, 11], "avg_logprob": -0.16631611092732504, "compression_ratio": 1.5029940119760479, "no_speech_prob": 0.0, "words": [{"start": 705.8, "end": 706.1, "word": " There", "probability": 0.64453125}, {"start": 706.1, "end": 706.64, "word": " are", "probability": 0.9375}, {"start": 706.64, "end": 707.1, "word": " two", "probability": 0.923828125}, {"start": 707.1, "end": 707.62, "word": " approaches", "probability": 0.73974609375}, {"start": 707.62, "end": 708.34, "word": " to", "probability": 0.96826171875}, {"start": 708.34, "end": 710.44, "word": " reach", "probability": 0.9228515625}, {"start": 710.44, "end": 710.82, "word": " your", "probability": 0.8876953125}, {"start": 710.82, "end": 711.32, "word": " conclusion", "probability": 0.90380859375}, {"start": 711.32, "end": 711.74, "word": " about", "probability": 0.900390625}, {"start": 711.74, "end": 712.74, "word": " this", "probability": 0.94677734375}, {"start": 712.74, "end": 713.64, "word": " example.", "probability": 0.87109375}, {"start": 713.94, "end": 714.24, "word": " Either", "probability": 0.79150390625}, {"start": 714.24, "end": 714.6, "word": " use", "probability": 0.7880859375}, {"start": 714.6, "end": 715.04, "word": " critical", "probability": 0.62841796875}, {"start": 715.04, "end": 715.38, "word": " value", "probability": 0.947265625}, {"start": 715.38, "end": 715.9, "word": " approach", "probability": 0.9033203125}, {"start": 715.9, "end": 717.0, "word": " or", "probability": 0.71875}, {"start": 717.0, "end": 717.2, "word": " B", "probability": 0.5986328125}, {"start": 717.2, "end": 717.48, "word": " value.", "probability": 0.80029296875}, {"start": 717.54, "end": 717.66, "word": " In", "probability": 0.95361328125}, {"start": 717.66, "end": 717.84, "word": " this", "probability": 0.94580078125}, {"start": 717.84, "end": 718.06, "word": " case,", "probability": 0.912109375}, {"start": 718.12, "end": 718.26, "word": " we", "probability": 0.95947265625}, {"start": 718.26, "end": 718.56, "word": " cannot", "probability": 0.88671875}, {"start": 718.56, "end": 719.12, "word": " use", "probability": 0.86767578125}, {"start": 719.12, "end": 720.08, "word": " the", "probability": 0.90087890625}, {"start": 720.08, "end": 720.26, "word": " two", "probability": 0.9267578125}, {"start": 720.26, "end": 720.52, "word": "-sided", "probability": 0.740966796875}, {"start": 720.52, "end": 721.08, "word": " confidence", "probability": 0.97607421875}, {"start": 721.08, "end": 721.48, "word": " interval", "probability": 0.97705078125}, {"start": 721.48, "end": 722.0, "word": " approach.", "probability": 0.93505859375}, {"start": 723.24, "end": 723.52, "word": " So", "probability": 0.95654296875}, {"start": 723.52, "end": 723.84, "word": " let's", "probability": 0.935546875}, {"start": 723.84, "end": 724.12, "word": " see", "probability": 0.49658203125}, {"start": 724.12, "end": 725.96, "word": " if", "probability": 0.9072265625}, {"start": 725.96, "end": 726.3, "word": " this", "probability": 0.94970703125}, {"start": 726.3, "end": 726.7, "word": " value,", "probability": 0.97802734375}, {"start": 727.14, "end": 727.32, "word": " by", "probability": 0.9736328125}, {"start": 727.32, "end": 727.62, "word": " using", "probability": 0.93701171875}, {"start": 727.62, "end": 727.92, "word": " now", "probability": 0.74951171875}, {"start": 727.92, "end": 728.48, "word": " approach", "probability": 0.85302734375}, {"start": 728.48, "end": 728.7, "word": " number", "probability": 0.9384765625}, {"start": 728.7, "end": 728.98, "word": " one,", "probability": 0.71142578125}], "temperature": 1.0}, {"id": 28, "seek": 76241, "start": 735.75, "end": 762.41, "text": " In the exam, you don't have to use both, just use one of these. Unless the problem determines that you have to use critical value or B value or confidence interval, for example, if it's two-sided. Sometimes maybe I will ask you to solve the problems by using three different ways or two different methods, whatever.", "tokens": [682, 264, 1139, 11, 291, 500, 380, 362, 281, 764, 1293, 11, 445, 764, 472, 295, 613, 13, 16581, 264, 1154, 24799, 300, 291, 362, 281, 764, 4924, 2158, 420, 363, 2158, 420, 6687, 15035, 11, 337, 1365, 11, 498, 309, 311, 732, 12, 30941, 13, 4803, 1310, 286, 486, 1029, 291, 281, 5039, 264, 2740, 538, 1228, 1045, 819, 2098, 420, 732, 819, 7150, 11, 2035, 13], "avg_logprob": -0.20187953244084897, "compression_ratio": 1.572139303482587, "no_speech_prob": 0.0, "words": [{"start": 735.75, "end": 735.97, "word": " In", "probability": 0.64453125}, {"start": 735.97, "end": 736.11, "word": " the", "probability": 0.8505859375}, {"start": 736.11, "end": 736.37, "word": " exam,", "probability": 0.978515625}, {"start": 736.43, "end": 736.51, "word": " you", "probability": 0.95751953125}, {"start": 736.51, "end": 736.71, "word": " don't", "probability": 0.962646484375}, {"start": 736.71, "end": 736.99, "word": " have", "probability": 0.95263671875}, {"start": 736.99, "end": 737.35, "word": " to", "probability": 0.974609375}, {"start": 737.35, "end": 738.63, "word": " use", "probability": 0.8291015625}, {"start": 738.63, "end": 739.53, "word": " both,", "probability": 0.88134765625}, {"start": 739.69, "end": 739.95, "word": " just", "probability": 0.775390625}, {"start": 739.95, "end": 740.19, "word": " use", "probability": 0.818359375}, {"start": 740.19, "end": 740.37, "word": " one", "probability": 0.9267578125}, {"start": 740.37, "end": 740.49, "word": " of", "probability": 0.96435546875}, {"start": 740.49, "end": 740.75, "word": " these.", "probability": 0.857421875}, {"start": 742.25, "end": 742.85, "word": " Unless", "probability": 0.4306640625}, {"start": 742.85, "end": 743.51, "word": " the", "probability": 0.8173828125}, {"start": 743.51, "end": 743.87, "word": " problem", "probability": 0.845703125}, {"start": 743.87, "end": 744.35, "word": " determines", "probability": 0.96630859375}, {"start": 744.35, "end": 744.67, "word": " that", "probability": 0.91455078125}, {"start": 744.67, "end": 744.81, "word": " you", "probability": 0.95654296875}, {"start": 744.81, "end": 744.97, "word": " have", "probability": 0.9404296875}, {"start": 744.97, "end": 745.09, "word": " to", "probability": 0.9677734375}, {"start": 745.09, "end": 745.43, "word": " use", "probability": 0.86767578125}, {"start": 745.43, "end": 746.97, "word": " critical", "probability": 0.406005859375}, {"start": 746.97, "end": 747.53, "word": " value", "probability": 0.9697265625}, {"start": 747.53, "end": 747.79, "word": " or", "probability": 0.72021484375}, {"start": 747.79, "end": 747.95, "word": " B", "probability": 0.66650390625}, {"start": 747.95, "end": 748.37, "word": " value", "probability": 0.76220703125}, {"start": 748.37, "end": 748.89, "word": " or", "probability": 0.849609375}, {"start": 748.89, "end": 750.45, "word": " confidence", "probability": 0.93603515625}, {"start": 750.45, "end": 750.81, "word": " interval,", "probability": 0.95166015625}, {"start": 750.95, "end": 751.03, "word": " for", "probability": 0.9423828125}, {"start": 751.03, "end": 751.33, "word": " example,", "probability": 0.97216796875}, {"start": 751.45, "end": 751.55, "word": " if", "probability": 0.94287109375}, {"start": 751.55, "end": 752.59, "word": " it's", "probability": 0.7421875}, {"start": 752.59, "end": 752.75, "word": " two", "probability": 0.9072265625}, {"start": 752.75, "end": 753.05, "word": "-sided.", "probability": 0.789306640625}, {"start": 753.85, "end": 754.25, "word": " Sometimes", "probability": 0.88232421875}, {"start": 754.25, "end": 754.57, "word": " maybe", "probability": 0.6005859375}, {"start": 754.57, "end": 754.77, "word": " I", "probability": 0.97900390625}, {"start": 754.77, "end": 754.93, "word": " will", "probability": 0.8505859375}, {"start": 754.93, "end": 755.21, "word": " ask", "probability": 0.9208984375}, {"start": 755.21, "end": 755.35, "word": " you", "probability": 0.96240234375}, {"start": 755.35, "end": 755.51, "word": " to", "probability": 0.9658203125}, {"start": 755.51, "end": 755.77, "word": " solve", "probability": 0.8876953125}, {"start": 755.77, "end": 756.35, "word": " the", "probability": 0.8896484375}, {"start": 756.35, "end": 757.11, "word": " problems", "probability": 0.7890625}, {"start": 757.11, "end": 757.37, "word": " by", "probability": 0.95703125}, {"start": 757.37, "end": 757.93, "word": " using", "probability": 0.94140625}, {"start": 757.93, "end": 758.77, "word": " three", "probability": 0.90869140625}, {"start": 758.77, "end": 759.23, "word": " different", "probability": 0.87060546875}, {"start": 759.23, "end": 759.65, "word": " ways", "probability": 0.9111328125}, {"start": 759.65, "end": 760.77, "word": " or", "probability": 0.51953125}, {"start": 760.77, "end": 760.95, "word": " two", "probability": 0.93798828125}, {"start": 760.95, "end": 761.43, "word": " different", "probability": 0.87158203125}, {"start": 761.43, "end": 761.85, "word": " methods,", "probability": 0.90869140625}, {"start": 762.09, "end": 762.41, "word": " whatever.", "probability": 0.923828125}], "temperature": 1.0}, {"id": 29, "seek": 79220, "start": 763.34, "end": 792.2, "text": " But if it's just solve the problem with P, you may use the critical value approach or P-value. So let's see now. For the critical value approach, one more time, your critical value is 1.38. Now, is this value falls in the rejection or non-rejection region? Now, this value is smaller than 1.318.", "tokens": [583, 498, 309, 311, 445, 5039, 264, 1154, 365, 430, 11, 291, 815, 764, 264, 4924, 2158, 3109, 420, 430, 12, 29155, 13, 407, 718, 311, 536, 586, 13, 1171, 264, 4924, 2158, 3109, 11, 472, 544, 565, 11, 428, 4924, 2158, 307, 502, 13, 12625, 13, 823, 11, 307, 341, 2158, 8804, 294, 264, 26044, 420, 2107, 12, 265, 1020, 313, 4458, 30, 823, 11, 341, 2158, 307, 4356, 813, 502, 13, 18, 6494, 13], "avg_logprob": -0.2049512963790398, "compression_ratio": 1.6444444444444444, "no_speech_prob": 0.0, "words": [{"start": 763.34, "end": 763.82, "word": " But", "probability": 0.413330078125}, {"start": 763.82, "end": 764.38, "word": " if", "probability": 0.83544921875}, {"start": 764.38, "end": 764.68, "word": " it's", "probability": 0.783935546875}, {"start": 764.68, "end": 764.98, "word": " just", "probability": 0.8955078125}, {"start": 764.98, "end": 765.42, "word": " solve", "probability": 0.46484375}, {"start": 765.42, "end": 765.56, "word": " the", "probability": 0.78759765625}, {"start": 765.56, "end": 765.78, "word": " problem", "probability": 0.68896484375}, {"start": 765.78, "end": 766.02, "word": " with", "probability": 0.255859375}, {"start": 766.02, "end": 766.12, "word": " P,", "probability": 0.421875}, {"start": 766.14, "end": 766.24, "word": " you", "probability": 0.841796875}, {"start": 766.24, "end": 766.36, "word": " may", "probability": 0.86572265625}, {"start": 766.36, "end": 766.72, "word": " use", "probability": 0.86474609375}, {"start": 766.72, "end": 767.1, "word": " the", "probability": 0.77392578125}, {"start": 767.1, "end": 767.38, "word": " critical", "probability": 0.7451171875}, {"start": 767.38, "end": 767.66, "word": " value", "probability": 0.900390625}, {"start": 767.66, "end": 768.14, "word": " approach", "probability": 0.9228515625}, {"start": 768.14, "end": 769.18, "word": " or", "probability": 0.748046875}, {"start": 769.18, "end": 769.36, "word": " P", "probability": 0.49658203125}, {"start": 769.36, "end": 769.6, "word": "-value.", "probability": 0.743896484375}, {"start": 769.98, "end": 770.18, "word": " So", "probability": 0.91259765625}, {"start": 770.18, "end": 770.42, "word": " let's", "probability": 0.91552734375}, {"start": 770.42, "end": 770.54, "word": " see", "probability": 0.92041015625}, {"start": 770.54, "end": 770.92, "word": " now.", "probability": 0.93408203125}, {"start": 772.62, "end": 773.34, "word": " For", "probability": 0.791015625}, {"start": 773.34, "end": 773.5, "word": " the", "probability": 0.912109375}, {"start": 773.5, "end": 773.84, "word": " critical", "probability": 0.92724609375}, {"start": 773.84, "end": 774.22, "word": " value", "probability": 0.96435546875}, {"start": 774.22, "end": 774.68, "word": " approach,", "probability": 0.91357421875}, {"start": 775.56, "end": 776.02, "word": " one", "probability": 0.83544921875}, {"start": 776.02, "end": 776.22, "word": " more", "probability": 0.93212890625}, {"start": 776.22, "end": 776.56, "word": " time,", "probability": 0.8896484375}, {"start": 777.6, "end": 777.84, "word": " your", "probability": 0.865234375}, {"start": 777.84, "end": 778.34, "word": " critical", "probability": 0.939453125}, {"start": 778.34, "end": 779.92, "word": " value", "probability": 0.9736328125}, {"start": 779.92, "end": 780.4, "word": " is", "probability": 0.9443359375}, {"start": 780.4, "end": 780.62, "word": " 1", "probability": 0.97998046875}, {"start": 780.62, "end": 781.18, "word": ".38.", "probability": 0.9892578125}, {"start": 782.28, "end": 782.7, "word": " Now,", "probability": 0.9619140625}, {"start": 783.6, "end": 783.98, "word": " is", "probability": 0.603515625}, {"start": 783.98, "end": 784.28, "word": " this", "probability": 0.947265625}, {"start": 784.28, "end": 784.78, "word": " value", "probability": 0.9765625}, {"start": 784.78, "end": 785.54, "word": " falls", "probability": 0.5537109375}, {"start": 785.54, "end": 786.02, "word": " in", "probability": 0.93603515625}, {"start": 786.02, "end": 786.96, "word": " the", "probability": 0.90869140625}, {"start": 786.96, "end": 787.5, "word": " rejection", "probability": 0.9677734375}, {"start": 787.5, "end": 788.16, "word": " or", "probability": 0.92431640625}, {"start": 788.16, "end": 788.4, "word": " non", "probability": 0.96533203125}, {"start": 788.4, "end": 788.86, "word": "-rejection", "probability": 0.91357421875}, {"start": 788.86, "end": 789.08, "word": " region?", "probability": 0.7041015625}, {"start": 789.64, "end": 789.96, "word": " Now,", "probability": 0.87841796875}, {"start": 790.0, "end": 790.2, "word": " this", "probability": 0.94384765625}, {"start": 790.2, "end": 790.48, "word": " value", "probability": 0.96630859375}, {"start": 790.48, "end": 790.64, "word": " is", "probability": 0.80126953125}, {"start": 790.64, "end": 791.0, "word": " smaller", "probability": 0.8740234375}, {"start": 791.0, "end": 791.28, "word": " than", "probability": 0.93994140625}, {"start": 791.28, "end": 791.46, "word": " 1", "probability": 0.990234375}, {"start": 791.46, "end": 792.2, "word": ".318.", "probability": 0.9324544270833334}], "temperature": 1.0}, {"id": 30, "seek": 82017, "start": 792.89, "end": 820.17, "text": " So this value actually falls in the non-rejection region. So we sense this step is smaller than, which is 0.55, smaller than 1.318, then we don't reject the null hypothesis.", "tokens": [407, 341, 2158, 767, 8804, 294, 264, 2107, 12, 265, 1020, 313, 4458, 13, 407, 321, 2020, 341, 1823, 307, 4356, 813, 11, 597, 307, 1958, 13, 13622, 11, 4356, 813, 502, 13, 18, 6494, 11, 550, 321, 500, 380, 8248, 264, 18184, 17291, 13], "avg_logprob": -0.2406589735461318, "compression_ratio": 1.359375, "no_speech_prob": 0.0, "words": [{"start": 792.89, "end": 793.23, "word": " So", "probability": 0.50927734375}, {"start": 793.23, "end": 793.45, "word": " this", "probability": 0.703125}, {"start": 793.45, "end": 793.81, "word": " value", "probability": 0.95166015625}, {"start": 793.81, "end": 794.57, "word": " actually", "probability": 0.76123046875}, {"start": 794.57, "end": 795.67, "word": " falls", "probability": 0.73583984375}, {"start": 795.67, "end": 796.09, "word": " in", "probability": 0.88818359375}, {"start": 796.09, "end": 796.37, "word": " the", "probability": 0.8759765625}, {"start": 796.37, "end": 796.65, "word": " non", "probability": 0.8759765625}, {"start": 796.65, "end": 797.23, "word": "-rejection", "probability": 0.8082275390625}, {"start": 797.23, "end": 797.63, "word": " region.", "probability": 0.904296875}, {"start": 798.15, "end": 798.29, "word": " So", "probability": 0.78369140625}, {"start": 798.29, "end": 798.59, "word": " we", "probability": 0.68212890625}, {"start": 798.59, "end": 799.31, "word": " sense", "probability": 0.58984375}, {"start": 799.31, "end": 801.63, "word": " this", "probability": 0.56689453125}, {"start": 801.63, "end": 801.97, "word": " step", "probability": 0.8642578125}, {"start": 801.97, "end": 803.45, "word": " is", "probability": 0.87158203125}, {"start": 803.45, "end": 803.91, "word": " smaller", "probability": 0.8291015625}, {"start": 803.91, "end": 804.35, "word": " than,", "probability": 0.9150390625}, {"start": 804.55, "end": 805.03, "word": " which", "probability": 0.79248046875}, {"start": 805.03, "end": 805.15, "word": " is", "probability": 0.95068359375}, {"start": 805.15, "end": 805.47, "word": " 0", "probability": 0.61865234375}, {"start": 805.47, "end": 806.35, "word": ".55,", "probability": 0.8720703125}, {"start": 808.91, "end": 812.67, "word": " smaller", "probability": 0.77685546875}, {"start": 812.67, "end": 813.15, "word": " than", "probability": 0.931640625}, {"start": 813.15, "end": 815.59, "word": " 1", "probability": 0.88818359375}, {"start": 815.59, "end": 816.27, "word": ".318,", "probability": 0.9677734375}, {"start": 816.99, "end": 817.35, "word": " then", "probability": 0.84130859375}, {"start": 817.35, "end": 817.55, "word": " we", "probability": 0.958984375}, {"start": 817.55, "end": 817.99, "word": " don't", "probability": 0.972412109375}, {"start": 817.99, "end": 819.35, "word": " reject", "probability": 0.91943359375}, {"start": 819.35, "end": 819.61, "word": " the", "probability": 0.77197265625}, {"start": 819.61, "end": 819.75, "word": " null", "probability": 0.95556640625}, {"start": 819.75, "end": 820.17, "word": " hypothesis.", "probability": 0.87744140625}], "temperature": 1.0}, {"id": 31, "seek": 83555, "start": 823.35, "end": 835.55, "text": " Don't reject. It's zero. That means there is insufficient evidence to support the claim. And the claim is the mean is above 50.", "tokens": [1468, 380, 8248, 13, 467, 311, 4018, 13, 663, 1355, 456, 307, 41709, 4467, 281, 1406, 264, 3932, 13, 400, 264, 3932, 307, 264, 914, 307, 3673, 2625, 13], "avg_logprob": -0.3466145932674408, "compression_ratio": 1.1962616822429906, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 823.35, "end": 823.79, "word": " Don't", "probability": 0.5927734375}, {"start": 823.79, "end": 824.23, "word": " reject.", "probability": 0.904296875}, {"start": 826.07, "end": 826.79, "word": " It's", "probability": 0.83251953125}, {"start": 826.79, "end": 827.03, "word": " zero.", "probability": 0.76171875}, {"start": 827.21, "end": 827.41, "word": " That", "probability": 0.75048828125}, {"start": 827.41, "end": 827.87, "word": " means", "probability": 0.90185546875}, {"start": 827.87, "end": 829.23, "word": " there", "probability": 0.392822265625}, {"start": 829.23, "end": 829.77, "word": " is", "probability": 0.81591796875}, {"start": 829.77, "end": 830.31, "word": " insufficient", "probability": 0.90673828125}, {"start": 830.31, "end": 830.93, "word": " evidence", "probability": 0.94482421875}, {"start": 830.93, "end": 831.15, "word": " to", "probability": 0.8857421875}, {"start": 831.15, "end": 831.57, "word": " support", "probability": 0.99169921875}, {"start": 831.57, "end": 832.11, "word": " the", "probability": 0.70947265625}, {"start": 832.11, "end": 832.47, "word": " claim.", "probability": 0.8798828125}, {"start": 832.97, "end": 833.31, "word": " And", "probability": 0.72705078125}, {"start": 833.31, "end": 833.45, "word": " the", "probability": 0.89013671875}, {"start": 833.45, "end": 833.77, "word": " claim", "probability": 0.89013671875}, {"start": 833.77, "end": 834.17, "word": " is", "probability": 0.654296875}, {"start": 834.17, "end": 834.53, "word": " the", "probability": 0.212158203125}, {"start": 834.53, "end": 834.69, "word": " mean", "probability": 0.93359375}, {"start": 834.69, "end": 834.85, "word": " is", "probability": 0.8662109375}, {"start": 834.85, "end": 835.19, "word": " above", "probability": 0.94677734375}, {"start": 835.19, "end": 835.55, "word": " 50.", "probability": 0.70751953125}], "temperature": 1.0}, {"id": 32, "seek": 86533, "start": 836.21, "end": 865.33, "text": " So your conclusion should be written by using this way. We don't reject the null hypothesis because or since this statistic is smaller than 1.318. That means there's not sufficient evidence that the mean bill is over $52. So that's by using a critical value approach. The other approach by using B-value.", "tokens": [407, 428, 10063, 820, 312, 3720, 538, 1228, 341, 636, 13, 492, 500, 380, 8248, 264, 18184, 17291, 570, 420, 1670, 341, 29588, 307, 4356, 813, 502, 13, 18, 6494, 13, 663, 1355, 456, 311, 406, 11563, 4467, 300, 264, 914, 2961, 307, 670, 1848, 17602, 13, 407, 300, 311, 538, 1228, 257, 4924, 2158, 3109, 13, 440, 661, 3109, 538, 1228, 363, 12, 29155, 13], "avg_logprob": -0.21816697138458935, "compression_ratio": 1.50990099009901, "no_speech_prob": 0.0, "words": [{"start": 836.21, "end": 836.65, "word": " So", "probability": 0.86865234375}, {"start": 836.65, "end": 837.31, "word": " your", "probability": 0.7041015625}, {"start": 837.31, "end": 837.87, "word": " conclusion", "probability": 0.92041015625}, {"start": 837.87, "end": 838.73, "word": " should", "probability": 0.962890625}, {"start": 838.73, "end": 838.95, "word": " be", "probability": 0.95166015625}, {"start": 838.95, "end": 839.71, "word": " written", "probability": 0.89990234375}, {"start": 839.71, "end": 840.31, "word": " by", "probability": 0.7392578125}, {"start": 840.31, "end": 841.11, "word": " using", "probability": 0.91796875}, {"start": 841.11, "end": 841.39, "word": " this", "probability": 0.951171875}, {"start": 841.39, "end": 841.67, "word": " way.", "probability": 0.92578125}, {"start": 842.01, "end": 842.75, "word": " We", "probability": 0.919921875}, {"start": 842.75, "end": 843.01, "word": " don't", "probability": 0.955078125}, {"start": 843.01, "end": 843.29, "word": " reject", "probability": 0.97021484375}, {"start": 843.29, "end": 843.45, "word": " the", "probability": 0.619140625}, {"start": 843.45, "end": 843.55, "word": " null", "probability": 0.99169921875}, {"start": 843.55, "end": 844.01, "word": " hypothesis", "probability": 0.82763671875}, {"start": 844.01, "end": 845.25, "word": " because", "probability": 0.50537109375}, {"start": 845.25, "end": 845.65, "word": " or", "probability": 0.355224609375}, {"start": 845.65, "end": 845.95, "word": " since", "probability": 0.798828125}, {"start": 845.95, "end": 846.17, "word": " this", "probability": 0.75390625}, {"start": 846.17, "end": 846.73, "word": " statistic", "probability": 0.87890625}, {"start": 846.73, "end": 847.83, "word": " is", "probability": 0.91943359375}, {"start": 847.83, "end": 848.19, "word": " smaller", "probability": 0.87255859375}, {"start": 848.19, "end": 848.45, "word": " than", "probability": 0.92236328125}, {"start": 848.45, "end": 848.63, "word": " 1", "probability": 0.141357421875}, {"start": 848.63, "end": 849.13, "word": ".318.", "probability": 0.9783528645833334}, {"start": 849.85, "end": 850.31, "word": " That", "probability": 0.90478515625}, {"start": 850.31, "end": 850.67, "word": " means", "probability": 0.93017578125}, {"start": 850.67, "end": 851.09, "word": " there's", "probability": 0.697998046875}, {"start": 851.09, "end": 851.45, "word": " not", "probability": 0.93310546875}, {"start": 851.45, "end": 851.89, "word": " sufficient", "probability": 0.890625}, {"start": 851.89, "end": 852.27, "word": " evidence", "probability": 0.94970703125}, {"start": 852.27, "end": 852.75, "word": " that", "probability": 0.91015625}, {"start": 852.75, "end": 853.41, "word": " the", "probability": 0.8994140625}, {"start": 853.41, "end": 853.55, "word": " mean", "probability": 0.9560546875}, {"start": 853.55, "end": 853.75, "word": " bill", "probability": 0.6533203125}, {"start": 853.75, "end": 853.93, "word": " is", "probability": 0.9482421875}, {"start": 853.93, "end": 854.47, "word": " over", "probability": 0.93310546875}, {"start": 854.47, "end": 854.93, "word": " $52.", "probability": 0.751708984375}, {"start": 855.65, "end": 856.41, "word": " So", "probability": 0.9345703125}, {"start": 856.41, "end": 856.69, "word": " that's", "probability": 0.939208984375}, {"start": 856.69, "end": 856.85, "word": " by", "probability": 0.95751953125}, {"start": 856.85, "end": 857.27, "word": " using", "probability": 0.939453125}, {"start": 857.27, "end": 858.87, "word": " a", "probability": 0.74560546875}, {"start": 858.87, "end": 859.19, "word": " critical", "probability": 0.935546875}, {"start": 859.19, "end": 859.55, "word": " value", "probability": 0.8916015625}, {"start": 859.55, "end": 859.91, "word": " approach.", "probability": 0.75830078125}, {"start": 862.95, "end": 863.71, "word": " The", "probability": 0.8818359375}, {"start": 863.71, "end": 864.03, "word": " other", "probability": 0.89697265625}, {"start": 864.03, "end": 864.47, "word": " approach", "probability": 0.91162109375}, {"start": 864.47, "end": 864.67, "word": " by", "probability": 0.626953125}, {"start": 864.67, "end": 864.89, "word": " using", "probability": 0.9482421875}, {"start": 864.89, "end": 865.09, "word": " B", "probability": 0.440185546875}, {"start": 865.09, "end": 865.33, "word": "-value.", "probability": 0.666259765625}], "temperature": 1.0}, {"id": 33, "seek": 89165, "start": 866.93, "end": 891.65, "text": " unfortunately T-tables don't give the exact b-value so here we can use either excel spreadsheet to get the exact b-value or any statistical software packages might give the accurate result but the tables we have gives the approximate b-value", "tokens": [7015, 314, 12, 83, 2965, 500, 380, 976, 264, 1900, 272, 12, 29155, 370, 510, 321, 393, 764, 2139, 24015, 27733, 281, 483, 264, 1900, 272, 12, 29155, 420, 604, 22820, 4722, 17401, 1062, 976, 264, 8559, 1874, 457, 264, 8020, 321, 362, 2709, 264, 30874, 272, 12, 29155], "avg_logprob": -0.22828124701976776, "compression_ratio": 1.5714285714285714, "no_speech_prob": 0.0, "words": [{"start": 866.93, "end": 867.65, "word": " unfortunately", "probability": 0.454345703125}, {"start": 867.65, "end": 868.05, "word": " T", "probability": 0.35986328125}, {"start": 868.05, "end": 868.39, "word": "-tables", "probability": 0.6067708333333334}, {"start": 868.39, "end": 870.25, "word": " don't", "probability": 0.8740234375}, {"start": 870.25, "end": 870.51, "word": " give", "probability": 0.84765625}, {"start": 870.51, "end": 870.67, "word": " the", "probability": 0.75927734375}, {"start": 870.67, "end": 871.29, "word": " exact", "probability": 0.9296875}, {"start": 871.29, "end": 872.73, "word": " b", "probability": 0.322265625}, {"start": 872.73, "end": 873.03, "word": "-value", "probability": 0.828857421875}, {"start": 873.03, "end": 873.75, "word": " so", "probability": 0.4765625}, {"start": 873.75, "end": 873.99, "word": " here", "probability": 0.82275390625}, {"start": 873.99, "end": 874.13, "word": " we", "probability": 0.9375}, {"start": 874.13, "end": 874.51, "word": " can", "probability": 0.94091796875}, {"start": 874.51, "end": 875.03, "word": " use", "probability": 0.85009765625}, {"start": 875.03, "end": 875.41, "word": " either", "probability": 0.9267578125}, {"start": 875.41, "end": 875.89, "word": " excel", "probability": 0.60009765625}, {"start": 875.89, "end": 876.75, "word": " spreadsheet", "probability": 0.78369140625}, {"start": 876.75, "end": 877.73, "word": " to", "probability": 0.91455078125}, {"start": 877.73, "end": 877.95, "word": " get", "probability": 0.9423828125}, {"start": 877.95, "end": 878.15, "word": " the", "probability": 0.88671875}, {"start": 878.15, "end": 878.59, "word": " exact", "probability": 0.943359375}, {"start": 878.59, "end": 879.49, "word": " b", "probability": 0.7744140625}, {"start": 879.49, "end": 879.81, "word": "-value", "probability": 0.938232421875}, {"start": 879.81, "end": 880.71, "word": " or", "probability": 0.89990234375}, {"start": 880.71, "end": 881.27, "word": " any", "probability": 0.91357421875}, {"start": 881.27, "end": 882.07, "word": " statistical", "probability": 0.8876953125}, {"start": 882.07, "end": 882.77, "word": " software", "probability": 0.92578125}, {"start": 882.77, "end": 883.37, "word": " packages", "probability": 0.94775390625}, {"start": 883.37, "end": 884.31, "word": " might", "probability": 0.89208984375}, {"start": 884.31, "end": 884.63, "word": " give", "probability": 0.87353515625}, {"start": 884.63, "end": 884.91, "word": " the", "probability": 0.90087890625}, {"start": 884.91, "end": 885.97, "word": " accurate", "probability": 0.85205078125}, {"start": 885.97, "end": 886.45, "word": " result", "probability": 0.8427734375}, {"start": 886.45, "end": 887.55, "word": " but", "probability": 0.7021484375}, {"start": 887.55, "end": 887.77, "word": " the", "probability": 0.9072265625}, {"start": 887.77, "end": 888.11, "word": " tables", "probability": 0.796875}, {"start": 888.11, "end": 888.33, "word": " we", "probability": 0.94873046875}, {"start": 888.33, "end": 888.61, "word": " have", "probability": 0.9482421875}, {"start": 888.61, "end": 888.95, "word": " gives", "probability": 0.6982421875}, {"start": 888.95, "end": 889.25, "word": " the", "probability": 0.916015625}, {"start": 889.25, "end": 889.81, "word": " approximate", "probability": 0.85546875}, {"start": 889.81, "end": 891.37, "word": " b", "probability": 0.90576171875}, {"start": 891.37, "end": 891.65, "word": "-value", "probability": 0.946533203125}], "temperature": 1.0}, {"id": 34, "seek": 90598, "start": 894.54, "end": 905.98, "text": " If you, if we have the table for the, for this particular example, let's see how can we figure out the B value by using the T table we have.", "tokens": [759, 291, 11, 498, 321, 362, 264, 3199, 337, 264, 11, 337, 341, 1729, 1365, 11, 718, 311, 536, 577, 393, 321, 2573, 484, 264, 363, 2158, 538, 1228, 264, 314, 3199, 321, 362, 13], "avg_logprob": -0.1762152835726738, "compression_ratio": 1.3055555555555556, "no_speech_prob": 0.0, "words": [{"start": 894.54, "end": 894.86, "word": " If", "probability": 0.7451171875}, {"start": 894.86, "end": 895.04, "word": " you,", "probability": 0.5029296875}, {"start": 895.18, "end": 895.3, "word": " if", "probability": 0.9560546875}, {"start": 895.3, "end": 895.42, "word": " we", "probability": 0.849609375}, {"start": 895.42, "end": 895.58, "word": " have", "probability": 0.93798828125}, {"start": 895.58, "end": 895.7, "word": " the", "probability": 0.89306640625}, {"start": 895.7, "end": 896.02, "word": " table", "probability": 0.90234375}, {"start": 896.02, "end": 896.32, "word": " for", "probability": 0.955078125}, {"start": 896.32, "end": 896.58, "word": " the,", "probability": 0.67236328125}, {"start": 898.12, "end": 899.06, "word": " for", "probability": 0.93994140625}, {"start": 899.06, "end": 899.3, "word": " this", "probability": 0.95458984375}, {"start": 899.3, "end": 899.7, "word": " particular", "probability": 0.89892578125}, {"start": 899.7, "end": 900.02, "word": " example,", "probability": 0.9404296875}, {"start": 900.08, "end": 900.28, "word": " let's", "probability": 0.92041015625}, {"start": 900.28, "end": 900.5, "word": " see", "probability": 0.921875}, {"start": 900.5, "end": 901.5, "word": " how", "probability": 0.87451171875}, {"start": 901.5, "end": 901.76, "word": " can", "probability": 0.9228515625}, {"start": 901.76, "end": 902.04, "word": " we", "probability": 0.9580078125}, {"start": 902.04, "end": 902.64, "word": " figure", "probability": 0.9755859375}, {"start": 902.64, "end": 903.12, "word": " out", "probability": 0.88720703125}, {"start": 903.12, "end": 904.06, "word": " the", "probability": 0.9140625}, {"start": 904.06, "end": 904.2, "word": " B", "probability": 0.56689453125}, {"start": 904.2, "end": 904.46, "word": " value", "probability": 0.861328125}, {"start": 904.46, "end": 904.64, "word": " by", "probability": 0.943359375}, {"start": 904.64, "end": 904.92, "word": " using", "probability": 0.927734375}, {"start": 904.92, "end": 905.26, "word": " the", "probability": 0.83251953125}, {"start": 905.26, "end": 905.42, "word": " T", "probability": 0.81787109375}, {"start": 905.42, "end": 905.64, "word": " table", "probability": 0.689453125}, {"start": 905.64, "end": 905.78, "word": " we", "probability": 0.623046875}, {"start": 905.78, "end": 905.98, "word": " have.", "probability": 0.9384765625}], "temperature": 1.0}, {"id": 35, "seek": 94162, "start": 914.32, "end": 941.62, "text": " So let's open the T table. It takes a second. So this is statistical table. So this is your T table. Now we are looking for B value at degrees of freedom of 24.", "tokens": [407, 718, 311, 1269, 264, 314, 3199, 13, 467, 2516, 257, 1150, 13, 407, 341, 307, 22820, 3199, 13, 407, 341, 307, 428, 314, 3199, 13, 823, 321, 366, 1237, 337, 363, 2158, 412, 5310, 295, 5645, 295, 4022, 13], "avg_logprob": -0.1642530429654005, "compression_ratio": 1.3089430894308942, "no_speech_prob": 0.0, "words": [{"start": 914.32, "end": 914.64, "word": " So", "probability": 0.8486328125}, {"start": 914.64, "end": 914.9, "word": " let's", "probability": 0.859130859375}, {"start": 914.9, "end": 915.34, "word": " open", "probability": 0.9150390625}, {"start": 915.34, "end": 916.56, "word": " the", "probability": 0.86865234375}, {"start": 916.56, "end": 919.22, "word": " T", "probability": 0.445068359375}, {"start": 919.22, "end": 919.66, "word": " table.", "probability": 0.5576171875}, {"start": 919.96, "end": 920.56, "word": " It", "probability": 0.85498046875}, {"start": 920.56, "end": 921.08, "word": " takes", "probability": 0.779296875}, {"start": 921.08, "end": 921.6, "word": " a", "probability": 0.97265625}, {"start": 921.6, "end": 922.02, "word": " second.", "probability": 0.91650390625}, {"start": 922.7, "end": 922.86, "word": " So", "probability": 0.9033203125}, {"start": 922.86, "end": 923.04, "word": " this", "probability": 0.92919921875}, {"start": 923.04, "end": 923.2, "word": " is", "probability": 0.93408203125}, {"start": 923.2, "end": 923.7, "word": " statistical", "probability": 0.59765625}, {"start": 923.7, "end": 924.18, "word": " table.", "probability": 0.89697265625}, {"start": 927.06, "end": 927.94, "word": " So", "probability": 0.787109375}, {"start": 927.94, "end": 931.96, "word": " this", "probability": 0.921875}, {"start": 931.96, "end": 932.06, "word": " is", "probability": 0.9365234375}, {"start": 932.06, "end": 932.2, "word": " your", "probability": 0.8974609375}, {"start": 932.2, "end": 932.36, "word": " T", "probability": 0.97900390625}, {"start": 932.36, "end": 932.68, "word": " table.", "probability": 0.84423828125}, {"start": 935.24, "end": 935.44, "word": " Now", "probability": 0.95556640625}, {"start": 935.44, "end": 935.58, "word": " we", "probability": 0.6669921875}, {"start": 935.58, "end": 935.7, "word": " are", "probability": 0.931640625}, {"start": 935.7, "end": 935.96, "word": " looking", "probability": 0.9140625}, {"start": 935.96, "end": 936.9, "word": " for", "probability": 0.9580078125}, {"start": 936.9, "end": 938.24, "word": " B", "probability": 0.464599609375}, {"start": 938.24, "end": 938.66, "word": " value", "probability": 0.93017578125}, {"start": 938.66, "end": 939.54, "word": " at", "probability": 0.88818359375}, {"start": 939.54, "end": 940.02, "word": " degrees", "probability": 0.9404296875}, {"start": 940.02, "end": 940.26, "word": " of", "probability": 0.9677734375}, {"start": 940.26, "end": 940.64, "word": " freedom", "probability": 0.947265625}, {"start": 940.64, "end": 941.14, "word": " of", "probability": 0.96337890625}, {"start": 941.14, "end": 941.62, "word": " 24.", "probability": 0.96630859375}], "temperature": 1.0}, {"id": 36, "seek": 97178, "start": 942.42, "end": 971.78, "text": " Because the sample size is 25. Now look at 24. So my V value again. Your V value is probability of T greater than 0.55. Because the T statistic is 0.55. Now let's compute this area.", "tokens": [1436, 264, 6889, 2744, 307, 3552, 13, 823, 574, 412, 4022, 13, 407, 452, 691, 2158, 797, 13, 2260, 691, 2158, 307, 8482, 295, 314, 5044, 813, 1958, 13, 13622, 13, 1436, 264, 314, 29588, 307, 1958, 13, 13622, 13, 823, 718, 311, 14722, 341, 1859, 13], "avg_logprob": -0.1722005276630322, "compression_ratio": 1.3284671532846715, "no_speech_prob": 0.0, "words": [{"start": 942.42, "end": 942.9, "word": " Because", "probability": 0.59716796875}, {"start": 942.9, "end": 943.3, "word": " the", "probability": 0.818359375}, {"start": 943.3, "end": 943.62, "word": " sample", "probability": 0.78076171875}, {"start": 943.62, "end": 944.06, "word": " size", "probability": 0.84521484375}, {"start": 944.06, "end": 944.58, "word": " is", "probability": 0.93212890625}, {"start": 944.58, "end": 945.08, "word": " 25.", "probability": 0.87158203125}, {"start": 945.74, "end": 946.14, "word": " Now", "probability": 0.9150390625}, {"start": 946.14, "end": 946.48, "word": " look", "probability": 0.52001953125}, {"start": 946.48, "end": 946.64, "word": " at", "probability": 0.96337890625}, {"start": 946.64, "end": 947.26, "word": " 24.", "probability": 0.95947265625}, {"start": 949.88, "end": 950.68, "word": " So", "probability": 0.6357421875}, {"start": 950.68, "end": 950.94, "word": " my", "probability": 0.9140625}, {"start": 950.94, "end": 951.08, "word": " V", "probability": 0.3359375}, {"start": 951.08, "end": 951.34, "word": " value", "probability": 0.787109375}, {"start": 951.34, "end": 951.76, "word": " again.", "probability": 0.8837890625}, {"start": 957.4, "end": 957.74, "word": " Your", "probability": 0.701171875}, {"start": 957.74, "end": 957.98, "word": " V", "probability": 0.95556640625}, {"start": 957.98, "end": 958.46, "word": " value", "probability": 0.9560546875}, {"start": 958.46, "end": 961.92, "word": " is", "probability": 0.775390625}, {"start": 961.92, "end": 962.34, "word": " probability", "probability": 0.9130859375}, {"start": 962.34, "end": 963.3, "word": " of", "probability": 0.95556640625}, {"start": 963.3, "end": 963.52, "word": " T", "probability": 0.943359375}, {"start": 963.52, "end": 963.98, "word": " greater", "probability": 0.91162109375}, {"start": 963.98, "end": 964.46, "word": " than", "probability": 0.9365234375}, {"start": 964.46, "end": 966.12, "word": " 0", "probability": 0.77978515625}, {"start": 966.12, "end": 966.62, "word": ".55.", "probability": 0.988525390625}, {"start": 966.92, "end": 967.22, "word": " Because", "probability": 0.91796875}, {"start": 967.22, "end": 967.56, "word": " the", "probability": 0.89990234375}, {"start": 967.56, "end": 968.12, "word": " T", "probability": 0.806640625}, {"start": 968.12, "end": 968.58, "word": " statistic", "probability": 0.8583984375}, {"start": 968.58, "end": 968.86, "word": " is", "probability": 0.9443359375}, {"start": 968.86, "end": 969.04, "word": " 0", "probability": 0.93115234375}, {"start": 969.04, "end": 969.42, "word": ".55.", "probability": 0.98974609375}, {"start": 970.1, "end": 970.48, "word": " Now", "probability": 0.95458984375}, {"start": 970.48, "end": 970.8, "word": " let's", "probability": 0.906005859375}, {"start": 970.8, "end": 971.18, "word": " compute", "probability": 0.9033203125}, {"start": 971.18, "end": 971.5, "word": " this", "probability": 0.935546875}, {"start": 971.5, "end": 971.78, "word": " area.", "probability": 0.89453125}], "temperature": 1.0}, {"id": 37, "seek": 100109, "start": 972.71, "end": 1001.09, "text": " above 0.55 now if we have T table 24 degrees of freedom we are looking for 0.55 the first value here is 0.685 next one is 0.8 so it's increased so my answer could be this P value over 25 or", "tokens": [3673, 1958, 13, 13622, 586, 498, 321, 362, 314, 3199, 4022, 5310, 295, 5645, 321, 366, 1237, 337, 1958, 13, 13622, 264, 700, 2158, 510, 307, 1958, 13, 21, 19287, 958, 472, 307, 1958, 13, 23, 370, 309, 311, 6505, 370, 452, 1867, 727, 312, 341, 430, 2158, 670, 3552, 420], "avg_logprob": -0.2312199459053003, "compression_ratio": 1.3380281690140845, "no_speech_prob": 0.0, "words": [{"start": 972.71, "end": 973.13, "word": " above", "probability": 0.239013671875}, {"start": 973.13, "end": 973.77, "word": " 0", "probability": 0.355712890625}, {"start": 973.77, "end": 974.27, "word": ".55", "probability": 0.973876953125}, {"start": 974.27, "end": 975.99, "word": " now", "probability": 0.23779296875}, {"start": 975.99, "end": 976.59, "word": " if", "probability": 0.51123046875}, {"start": 976.59, "end": 976.83, "word": " we", "probability": 0.953125}, {"start": 976.83, "end": 977.07, "word": " have", "probability": 0.9501953125}, {"start": 977.07, "end": 977.39, "word": " T", "probability": 0.408935546875}, {"start": 977.39, "end": 977.79, "word": " table", "probability": 0.53564453125}, {"start": 977.79, "end": 979.31, "word": " 24", "probability": 0.81982421875}, {"start": 979.31, "end": 979.65, "word": " degrees", "probability": 0.9404296875}, {"start": 979.65, "end": 979.83, "word": " of", "probability": 0.95263671875}, {"start": 979.83, "end": 980.11, "word": " freedom", "probability": 0.95166015625}, {"start": 980.11, "end": 982.21, "word": " we", "probability": 0.77294921875}, {"start": 982.21, "end": 982.35, "word": " are", "probability": 0.93017578125}, {"start": 982.35, "end": 982.57, "word": " looking", "probability": 0.91943359375}, {"start": 982.57, "end": 982.81, "word": " for", "probability": 0.94677734375}, {"start": 982.81, "end": 983.07, "word": " 0", "probability": 0.93701171875}, {"start": 983.07, "end": 983.49, "word": ".55", "probability": 0.9921875}, {"start": 983.49, "end": 985.79, "word": " the", "probability": 0.76171875}, {"start": 985.79, "end": 986.15, "word": " first", "probability": 0.86328125}, {"start": 986.15, "end": 986.53, "word": " value", "probability": 0.9716796875}, {"start": 986.53, "end": 986.93, "word": " here", "probability": 0.82763671875}, {"start": 986.93, "end": 988.11, "word": " is", "probability": 0.9150390625}, {"start": 988.11, "end": 988.31, "word": " 0", "probability": 0.9462890625}, {"start": 988.31, "end": 989.07, "word": ".685", "probability": 0.96826171875}, {"start": 989.07, "end": 990.91, "word": " next", "probability": 0.7890625}, {"start": 990.91, "end": 991.13, "word": " one", "probability": 0.919921875}, {"start": 991.13, "end": 991.27, "word": " is", "probability": 0.9384765625}, {"start": 991.27, "end": 991.45, "word": " 0", "probability": 0.94384765625}, {"start": 991.45, "end": 991.83, "word": ".8", "probability": 0.982177734375}, {"start": 991.83, "end": 992.25, "word": " so", "probability": 0.767578125}, {"start": 992.25, "end": 992.55, "word": " it's", "probability": 0.897216796875}, {"start": 992.55, "end": 993.11, "word": " increased", "probability": 0.6728515625}, {"start": 993.11, "end": 994.91, "word": " so", "probability": 0.869140625}, {"start": 994.91, "end": 995.47, "word": " my", "probability": 0.95849609375}, {"start": 995.47, "end": 995.89, "word": " answer", "probability": 0.95849609375}, {"start": 995.89, "end": 996.23, "word": " could", "probability": 0.88525390625}, {"start": 996.23, "end": 996.57, "word": " be", "probability": 0.9541015625}, {"start": 996.57, "end": 998.37, "word": " this", "probability": 0.931640625}, {"start": 998.37, "end": 998.75, "word": " P", "probability": 0.458251953125}, {"start": 998.75, "end": 999.15, "word": " value", "probability": 0.8994140625}, {"start": 999.15, "end": 999.89, "word": " over", "probability": 0.54052734375}, {"start": 999.89, "end": 1000.41, "word": " 25", "probability": 0.6552734375}, {"start": 1000.41, "end": 1001.09, "word": " or", "probability": 0.94482421875}], "temperature": 1.0}, {"id": 38, "seek": 102830, "start": 1001.82, "end": 1028.3, "text": " You think my point 55 is to the left of this value? Yes. So my B value is to the left of this point. Now here, as this statistic decreases, B value decreases. So vice versa. There is inverse relationship between T value and B value. Now, 25 to the right, 20, 15, so the values are", "tokens": [509, 519, 452, 935, 12330, 307, 281, 264, 1411, 295, 341, 2158, 30, 1079, 13, 407, 452, 363, 2158, 307, 281, 264, 1411, 295, 341, 935, 13, 823, 510, 11, 382, 341, 29588, 24108, 11, 363, 2158, 24108, 13, 407, 11964, 25650, 13, 821, 307, 17340, 2480, 1296, 314, 2158, 293, 363, 2158, 13, 823, 11, 3552, 281, 264, 558, 11, 945, 11, 2119, 11, 370, 264, 4190, 366], "avg_logprob": -0.23750000212873731, "compression_ratio": 1.6826347305389222, "no_speech_prob": 0.0, "words": [{"start": 1001.82, "end": 1002.06, "word": " You", "probability": 0.422119140625}, {"start": 1002.06, "end": 1002.36, "word": " think", "probability": 0.88671875}, {"start": 1002.36, "end": 1002.96, "word": " my", "probability": 0.88525390625}, {"start": 1002.96, "end": 1003.48, "word": " point", "probability": 0.44677734375}, {"start": 1003.48, "end": 1003.86, "word": " 55", "probability": 0.541015625}, {"start": 1003.86, "end": 1004.42, "word": " is", "probability": 0.919921875}, {"start": 1004.42, "end": 1004.54, "word": " to", "probability": 0.9404296875}, {"start": 1004.54, "end": 1004.68, "word": " the", "probability": 0.9169921875}, {"start": 1004.68, "end": 1004.9, "word": " left", "probability": 0.94580078125}, {"start": 1004.9, "end": 1005.04, "word": " of", "probability": 0.96337890625}, {"start": 1005.04, "end": 1005.24, "word": " this", "probability": 0.9404296875}, {"start": 1005.24, "end": 1005.54, "word": " value?", "probability": 0.97021484375}, {"start": 1005.66, "end": 1005.88, "word": " Yes.", "probability": 0.52685546875}, {"start": 1006.2, "end": 1006.42, "word": " So", "probability": 0.92919921875}, {"start": 1006.42, "end": 1006.64, "word": " my", "probability": 0.830078125}, {"start": 1006.64, "end": 1006.78, "word": " B", "probability": 0.466796875}, {"start": 1006.78, "end": 1007.02, "word": " value", "probability": 0.8603515625}, {"start": 1007.02, "end": 1007.22, "word": " is", "probability": 0.94189453125}, {"start": 1007.22, "end": 1007.34, "word": " to", "probability": 0.9501953125}, {"start": 1007.34, "end": 1007.5, "word": " the", "probability": 0.912109375}, {"start": 1007.5, "end": 1007.76, "word": " left", "probability": 0.9453125}, {"start": 1007.76, "end": 1007.98, "word": " of", "probability": 0.943359375}, {"start": 1007.98, "end": 1008.22, "word": " this", "probability": 0.92041015625}, {"start": 1008.22, "end": 1008.58, "word": " point.", "probability": 0.59375}, {"start": 1009.66, "end": 1010.26, "word": " Now", "probability": 0.94482421875}, {"start": 1010.26, "end": 1010.62, "word": " here,", "probability": 0.64208984375}, {"start": 1010.76, "end": 1011.06, "word": " as", "probability": 0.95751953125}, {"start": 1011.06, "end": 1011.32, "word": " this", "probability": 0.403564453125}, {"start": 1011.32, "end": 1011.82, "word": " statistic", "probability": 0.82958984375}, {"start": 1011.82, "end": 1012.46, "word": " decreases,", "probability": 0.51318359375}, {"start": 1013.84, "end": 1014.22, "word": " B", "probability": 0.9541015625}, {"start": 1014.22, "end": 1014.52, "word": " value", "probability": 0.9697265625}, {"start": 1014.52, "end": 1015.04, "word": " decreases.", "probability": 0.96630859375}, {"start": 1016.72, "end": 1016.72, "word": " So", "probability": 0.27001953125}, {"start": 1016.72, "end": 1018.02, "word": " vice", "probability": 0.8525390625}, {"start": 1018.02, "end": 1018.38, "word": " versa.", "probability": 0.83203125}, {"start": 1018.38, "end": 1018.62, "word": " There", "probability": 0.783203125}, {"start": 1018.62, "end": 1018.82, "word": " is", "probability": 0.89794921875}, {"start": 1018.82, "end": 1019.52, "word": " inverse", "probability": 0.7548828125}, {"start": 1019.52, "end": 1020.08, "word": " relationship", "probability": 0.91162109375}, {"start": 1020.08, "end": 1020.5, "word": " between", "probability": 0.8623046875}, {"start": 1020.5, "end": 1020.84, "word": " T", "probability": 0.481201171875}, {"start": 1020.84, "end": 1021.18, "word": " value", "probability": 0.94921875}, {"start": 1021.18, "end": 1021.58, "word": " and", "probability": 0.94189453125}, {"start": 1021.58, "end": 1022.32, "word": " B", "probability": 0.88818359375}, {"start": 1022.32, "end": 1022.7, "word": " value.", "probability": 0.97412109375}, {"start": 1023.34, "end": 1023.62, "word": " Now,", "probability": 0.95849609375}, {"start": 1024.26, "end": 1024.78, "word": " 25", "probability": 0.88427734375}, {"start": 1024.78, "end": 1025.6, "word": " to", "probability": 0.89599609375}, {"start": 1025.6, "end": 1025.72, "word": " the", "probability": 0.85302734375}, {"start": 1025.72, "end": 1025.9, "word": " right,", "probability": 0.92041015625}, {"start": 1026.0, "end": 1026.28, "word": " 20,", "probability": 0.77880859375}, {"start": 1026.4, "end": 1026.86, "word": " 15,", "probability": 0.9658203125}, {"start": 1027.06, "end": 1027.3, "word": " so", "probability": 0.94677734375}, {"start": 1027.3, "end": 1027.5, "word": " the", "probability": 0.91552734375}, {"start": 1027.5, "end": 1027.92, "word": " values", "probability": 0.96435546875}, {"start": 1027.92, "end": 1028.3, "word": " are", "probability": 0.93896484375}], "temperature": 1.0}, {"id": 39, "seek": 105530, "start": 1028.92, "end": 1055.3, "text": " decreasing in this case. So what do you think? My B value is above or smaller than 25? Above. So your B value in this case is greater than 25. Again, T table does not give the exact B value. So just you can say my B value is above 25%. Always, as we mentioned before, we reject the null hypothesis.", "tokens": [23223, 294, 341, 1389, 13, 407, 437, 360, 291, 519, 30, 1222, 363, 2158, 307, 3673, 420, 4356, 813, 3552, 30, 32691, 13, 407, 428, 363, 2158, 294, 341, 1389, 307, 5044, 813, 3552, 13, 3764, 11, 314, 3199, 775, 406, 976, 264, 1900, 363, 2158, 13, 407, 445, 291, 393, 584, 452, 363, 2158, 307, 3673, 3552, 6856, 11270, 11, 382, 321, 2835, 949, 11, 321, 8248, 264, 18184, 17291, 13], "avg_logprob": -0.1584974331398533, "compression_ratio": 1.5654450261780104, "no_speech_prob": 0.0, "words": [{"start": 1028.92, "end": 1029.42, "word": " decreasing", "probability": 0.494873046875}, {"start": 1029.42, "end": 1029.62, "word": " in", "probability": 0.82861328125}, {"start": 1029.62, "end": 1029.8, "word": " this", "probability": 0.943359375}, {"start": 1029.8, "end": 1030.14, "word": " case.", "probability": 0.9140625}, {"start": 1030.58, "end": 1030.82, "word": " So", "probability": 0.94384765625}, {"start": 1030.82, "end": 1031.02, "word": " what", "probability": 0.8193359375}, {"start": 1031.02, "end": 1031.14, "word": " do", "probability": 0.8544921875}, {"start": 1031.14, "end": 1031.22, "word": " you", "probability": 0.92919921875}, {"start": 1031.22, "end": 1031.44, "word": " think?", "probability": 0.9130859375}, {"start": 1031.66, "end": 1031.84, "word": " My", "probability": 0.86572265625}, {"start": 1031.84, "end": 1031.96, "word": " B", "probability": 0.59326171875}, {"start": 1031.96, "end": 1032.2, "word": " value", "probability": 0.8154296875}, {"start": 1032.2, "end": 1032.46, "word": " is", "probability": 0.9453125}, {"start": 1032.46, "end": 1033.2, "word": " above", "probability": 0.91845703125}, {"start": 1033.2, "end": 1034.1, "word": " or", "probability": 0.88671875}, {"start": 1034.1, "end": 1034.62, "word": " smaller", "probability": 0.88232421875}, {"start": 1034.62, "end": 1034.98, "word": " than", "probability": 0.9462890625}, {"start": 1034.98, "end": 1036.04, "word": " 25?", "probability": 0.708984375}, {"start": 1036.36, "end": 1036.88, "word": " Above.", "probability": 0.74462890625}, {"start": 1037.14, "end": 1037.42, "word": " So", "probability": 0.89453125}, {"start": 1037.42, "end": 1037.7, "word": " your", "probability": 0.82421875}, {"start": 1037.7, "end": 1037.88, "word": " B", "probability": 0.95166015625}, {"start": 1037.88, "end": 1038.16, "word": " value", "probability": 0.96875}, {"start": 1038.16, "end": 1038.42, "word": " in", "probability": 0.86865234375}, {"start": 1038.42, "end": 1038.64, "word": " this", "probability": 0.9453125}, {"start": 1038.64, "end": 1039.06, "word": " case", "probability": 0.912109375}, {"start": 1039.06, "end": 1040.08, "word": " is", "probability": 0.88671875}, {"start": 1040.08, "end": 1040.44, "word": " greater", "probability": 0.85205078125}, {"start": 1040.44, "end": 1041.0, "word": " than", "probability": 0.9384765625}, {"start": 1041.0, "end": 1042.46, "word": " 25.", "probability": 0.9521484375}, {"start": 1043.16, "end": 1043.64, "word": " Again,", "probability": 0.94384765625}, {"start": 1043.92, "end": 1044.04, "word": " T", "probability": 0.53271484375}, {"start": 1044.04, "end": 1044.32, "word": " table", "probability": 0.5498046875}, {"start": 1044.32, "end": 1045.02, "word": " does", "probability": 0.97412109375}, {"start": 1045.02, "end": 1045.22, "word": " not", "probability": 0.94873046875}, {"start": 1045.22, "end": 1045.42, "word": " give", "probability": 0.88232421875}, {"start": 1045.42, "end": 1045.56, "word": " the", "probability": 0.90576171875}, {"start": 1045.56, "end": 1046.0, "word": " exact", "probability": 0.9287109375}, {"start": 1046.0, "end": 1046.26, "word": " B", "probability": 0.865234375}, {"start": 1046.26, "end": 1046.6, "word": " value.", "probability": 0.9638671875}, {"start": 1046.86, "end": 1047.32, "word": " So", "probability": 0.94873046875}, {"start": 1047.32, "end": 1047.72, "word": " just", "probability": 0.74169921875}, {"start": 1047.72, "end": 1047.94, "word": " you", "probability": 0.81396484375}, {"start": 1047.94, "end": 1048.12, "word": " can", "probability": 0.94140625}, {"start": 1048.12, "end": 1048.4, "word": " say", "probability": 0.82275390625}, {"start": 1048.4, "end": 1048.62, "word": " my", "probability": 0.8486328125}, {"start": 1048.62, "end": 1048.82, "word": " B", "probability": 0.98095703125}, {"start": 1048.82, "end": 1049.08, "word": " value", "probability": 0.96875}, {"start": 1049.08, "end": 1049.28, "word": " is", "probability": 0.939453125}, {"start": 1049.28, "end": 1049.58, "word": " above", "probability": 0.96337890625}, {"start": 1049.58, "end": 1050.56, "word": " 25%.", "probability": 0.927490234375}, {"start": 1050.56, "end": 1052.28, "word": " Always,", "probability": 0.8701171875}, {"start": 1052.48, "end": 1052.68, "word": " as", "probability": 0.96435546875}, {"start": 1052.68, "end": 1052.82, "word": " we", "probability": 0.9580078125}, {"start": 1052.82, "end": 1053.06, "word": " mentioned", "probability": 0.83642578125}, {"start": 1053.06, "end": 1053.52, "word": " before,", "probability": 0.86669921875}, {"start": 1053.76, "end": 1054.02, "word": " we", "probability": 0.96044921875}, {"start": 1054.02, "end": 1054.46, "word": " reject", "probability": 0.90576171875}, {"start": 1054.46, "end": 1054.66, "word": " the", "probability": 0.390869140625}, {"start": 1054.66, "end": 1054.8, "word": " null", "probability": 0.96240234375}, {"start": 1054.8, "end": 1055.3, "word": " hypothesis.", "probability": 0.8251953125}], "temperature": 1.0}, {"id": 40, "seek": 107668, "start": 1056.7, "end": 1076.68, "text": " Always we reject H0 if your B value is smaller than R. Always. Now this rule does not depend on the alternative hypothesis. Always we reject the null hypothesis if my B value is smaller than R.", "tokens": [11270, 321, 8248, 389, 15, 498, 428, 363, 2158, 307, 4356, 813, 497, 13, 11270, 13, 823, 341, 4978, 775, 406, 5672, 322, 264, 8535, 17291, 13, 11270, 321, 8248, 264, 18184, 17291, 498, 452, 363, 2158, 307, 4356, 813, 497, 13], "avg_logprob": -0.27870640921038253, "compression_ratio": 1.552, "no_speech_prob": 0.0, "words": [{"start": 1056.7, "end": 1057.5, "word": " Always", "probability": 0.1778564453125}, {"start": 1057.5, "end": 1058.3, "word": " we", "probability": 0.343994140625}, {"start": 1058.3, "end": 1058.72, "word": " reject", "probability": 0.88330078125}, {"start": 1058.72, "end": 1059.64, "word": " H0", "probability": 0.5721435546875}, {"start": 1059.64, "end": 1060.56, "word": " if", "probability": 0.8486328125}, {"start": 1060.56, "end": 1060.84, "word": " your", "probability": 0.779296875}, {"start": 1060.84, "end": 1061.0, "word": " B", "probability": 0.466064453125}, {"start": 1061.0, "end": 1061.46, "word": " value", "probability": 0.6455078125}, {"start": 1061.46, "end": 1062.88, "word": " is", "probability": 0.884765625}, {"start": 1062.88, "end": 1063.4, "word": " smaller", "probability": 0.80029296875}, {"start": 1063.4, "end": 1063.7, "word": " than", "probability": 0.93701171875}, {"start": 1063.7, "end": 1063.84, "word": " R.", "probability": 0.7158203125}, {"start": 1065.24, "end": 1065.84, "word": " Always.", "probability": 0.6845703125}, {"start": 1066.9, "end": 1067.28, "word": " Now", "probability": 0.83349609375}, {"start": 1067.28, "end": 1067.72, "word": " this", "probability": 0.66455078125}, {"start": 1067.72, "end": 1068.12, "word": " rule", "probability": 0.90478515625}, {"start": 1068.12, "end": 1068.48, "word": " does", "probability": 0.9521484375}, {"start": 1068.48, "end": 1068.66, "word": " not", "probability": 0.94482421875}, {"start": 1068.66, "end": 1069.02, "word": " depend", "probability": 0.93701171875}, {"start": 1069.02, "end": 1069.66, "word": " on", "probability": 0.943359375}, {"start": 1069.66, "end": 1069.96, "word": " the", "probability": 0.82177734375}, {"start": 1069.96, "end": 1070.34, "word": " alternative", "probability": 0.3701171875}, {"start": 1070.34, "end": 1070.9, "word": " hypothesis.", "probability": 0.80810546875}, {"start": 1071.84, "end": 1072.42, "word": " Always", "probability": 0.8037109375}, {"start": 1072.42, "end": 1072.88, "word": " we", "probability": 0.80517578125}, {"start": 1072.88, "end": 1073.26, "word": " reject", "probability": 0.93115234375}, {"start": 1073.26, "end": 1073.46, "word": " the", "probability": 0.8857421875}, {"start": 1073.46, "end": 1073.62, "word": " null", "probability": 0.9443359375}, {"start": 1073.62, "end": 1074.18, "word": " hypothesis", "probability": 0.84423828125}, {"start": 1074.18, "end": 1075.04, "word": " if", "probability": 0.86328125}, {"start": 1075.04, "end": 1075.32, "word": " my", "probability": 0.96728515625}, {"start": 1075.32, "end": 1075.48, "word": " B", "probability": 0.95458984375}, {"start": 1075.48, "end": 1075.8, "word": " value", "probability": 0.935546875}, {"start": 1075.8, "end": 1075.94, "word": " is", "probability": 0.94580078125}, {"start": 1075.94, "end": 1076.16, "word": " smaller", "probability": 0.84375}, {"start": 1076.16, "end": 1076.44, "word": " than", "probability": 0.9482421875}, {"start": 1076.44, "end": 1076.68, "word": " R.", "probability": 0.9921875}], "temperature": 1.0}, {"id": 41, "seek": 110392, "start": 1080.0, "end": 1103.92, "text": " Does this B value smaller than alpha, alpha of 10%? My B value is above 25, greater than 0.25%. So this B value is above 10%. So we have to reject if B value is smaller than alpha. In this case, B value is greater than alpha, so we don't reject alpha. Make sense?", "tokens": [4402, 341, 363, 2158, 4356, 813, 8961, 11, 8961, 295, 1266, 4, 30, 1222, 363, 2158, 307, 3673, 3552, 11, 5044, 813, 1958, 13, 6074, 6856, 407, 341, 363, 2158, 307, 3673, 1266, 6856, 407, 321, 362, 281, 8248, 498, 363, 2158, 307, 4356, 813, 8961, 13, 682, 341, 1389, 11, 363, 2158, 307, 5044, 813, 8961, 11, 370, 321, 500, 380, 8248, 8961, 13, 4387, 2020, 30], "avg_logprob": -0.18885869219683218, "compression_ratio": 1.736842105263158, "no_speech_prob": 0.0, "words": [{"start": 1080.0, "end": 1080.52, "word": " Does", "probability": 0.5771484375}, {"start": 1080.52, "end": 1080.8, "word": " this", "probability": 0.93115234375}, {"start": 1080.8, "end": 1080.94, "word": " B", "probability": 0.2626953125}, {"start": 1080.94, "end": 1081.34, "word": " value", "probability": 0.8701171875}, {"start": 1081.34, "end": 1081.92, "word": " smaller", "probability": 0.5693359375}, {"start": 1081.92, "end": 1082.16, "word": " than", "probability": 0.95166015625}, {"start": 1082.16, "end": 1082.5, "word": " alpha,", "probability": 0.8125}, {"start": 1082.92, "end": 1083.3, "word": " alpha", "probability": 0.9248046875}, {"start": 1083.3, "end": 1083.66, "word": " of", "probability": 0.96923828125}, {"start": 1083.66, "end": 1083.92, "word": " 10", "probability": 0.86767578125}, {"start": 1083.92, "end": 1084.32, "word": "%?", "probability": 0.62255859375}, {"start": 1084.94, "end": 1085.22, "word": " My", "probability": 0.434326171875}, {"start": 1085.22, "end": 1085.72, "word": " B", "probability": 0.93505859375}, {"start": 1085.72, "end": 1085.94, "word": " value", "probability": 0.9501953125}, {"start": 1085.94, "end": 1086.16, "word": " is", "probability": 0.93603515625}, {"start": 1086.16, "end": 1086.52, "word": " above", "probability": 0.93310546875}, {"start": 1086.52, "end": 1087.0, "word": " 25,", "probability": 0.97021484375}, {"start": 1087.22, "end": 1087.58, "word": " greater", "probability": 0.861328125}, {"start": 1087.58, "end": 1087.92, "word": " than", "probability": 0.94189453125}, {"start": 1087.92, "end": 1088.12, "word": " 0", "probability": 0.484130859375}, {"start": 1088.12, "end": 1090.22, "word": ".25%.", "probability": 0.9275716145833334}, {"start": 1090.22, "end": 1090.58, "word": " So", "probability": 0.9482421875}, {"start": 1090.58, "end": 1090.82, "word": " this", "probability": 0.8330078125}, {"start": 1090.82, "end": 1091.1, "word": " B", "probability": 0.96630859375}, {"start": 1091.1, "end": 1091.42, "word": " value", "probability": 0.97314453125}, {"start": 1091.42, "end": 1091.72, "word": " is", "probability": 0.94482421875}, {"start": 1091.72, "end": 1092.08, "word": " above", "probability": 0.9541015625}, {"start": 1092.08, "end": 1092.66, "word": " 10%.", "probability": 0.951171875}, {"start": 1092.66, "end": 1094.24, "word": " So", "probability": 0.9609375}, {"start": 1094.24, "end": 1094.52, "word": " we", "probability": 0.892578125}, {"start": 1094.52, "end": 1094.9, "word": " have", "probability": 0.9453125}, {"start": 1094.9, "end": 1095.44, "word": " to", "probability": 0.96484375}, {"start": 1095.44, "end": 1095.8, "word": " reject", "probability": 0.916015625}, {"start": 1095.8, "end": 1096.18, "word": " if", "probability": 0.7880859375}, {"start": 1096.18, "end": 1096.4, "word": " B", "probability": 0.7880859375}, {"start": 1096.4, "end": 1096.6, "word": " value", "probability": 0.91455078125}, {"start": 1096.6, "end": 1096.72, "word": " is", "probability": 0.69140625}, {"start": 1096.72, "end": 1096.94, "word": " smaller", "probability": 0.85986328125}, {"start": 1096.94, "end": 1097.14, "word": " than", "probability": 0.94921875}, {"start": 1097.14, "end": 1097.4, "word": " alpha.", "probability": 0.92138671875}, {"start": 1097.82, "end": 1097.98, "word": " In", "probability": 0.91943359375}, {"start": 1097.98, "end": 1098.16, "word": " this", "probability": 0.9482421875}, {"start": 1098.16, "end": 1098.34, "word": " case,", "probability": 0.9072265625}, {"start": 1098.38, "end": 1098.52, "word": " B", "probability": 0.94580078125}, {"start": 1098.52, "end": 1098.72, "word": " value", "probability": 0.95263671875}, {"start": 1098.72, "end": 1098.9, "word": " is", "probability": 0.9365234375}, {"start": 1098.9, "end": 1099.54, "word": " greater", "probability": 0.70849609375}, {"start": 1099.54, "end": 1099.82, "word": " than", "probability": 0.9482421875}, {"start": 1099.82, "end": 1100.08, "word": " alpha,", "probability": 0.9140625}, {"start": 1100.2, "end": 1100.38, "word": " so", "probability": 0.94580078125}, {"start": 1100.38, "end": 1100.62, "word": " we", "probability": 0.9521484375}, {"start": 1100.62, "end": 1100.9, "word": " don't", "probability": 0.96875}, {"start": 1100.9, "end": 1101.3, "word": " reject", "probability": 0.9052734375}, {"start": 1101.3, "end": 1101.58, "word": " alpha.", "probability": 0.376953125}, {"start": 1102.96, "end": 1103.54, "word": " Make", "probability": 0.8583984375}, {"start": 1103.54, "end": 1103.92, "word": " sense?", "probability": 0.84619140625}], "temperature": 1.0}, {"id": 42, "seek": 113634, "start": 1106.75, "end": 1136.35, "text": " Now, the exact v-value is 0.2937. T-table gives this result, greater than 25%. The exact is 0.2937. Again, since my v-value is above 10% or greater than 10%, so we don't reject the null hypothesis. Now, let's see how can we use Excel to find the exact.", "tokens": [823, 11, 264, 1900, 371, 12, 29155, 307, 1958, 13, 11871, 12851, 13, 314, 12, 23811, 2709, 341, 1874, 11, 5044, 813, 3552, 6856, 440, 1900, 307, 1958, 13, 11871, 12851, 13, 3764, 11, 1670, 452, 371, 12, 29155, 307, 3673, 1266, 4, 420, 5044, 813, 1266, 8923, 370, 321, 500, 380, 8248, 264, 18184, 17291, 13, 823, 11, 718, 311, 536, 577, 393, 321, 764, 19060, 281, 915, 264, 1900, 13], "avg_logprob": -0.1651327038464481, "compression_ratio": 1.4709302325581395, "no_speech_prob": 0.0, "words": [{"start": 1106.75, "end": 1107.13, "word": " Now,", "probability": 0.7587890625}, {"start": 1107.35, "end": 1107.61, "word": " the", "probability": 0.88037109375}, {"start": 1107.61, "end": 1108.03, "word": " exact", "probability": 0.91748046875}, {"start": 1108.03, "end": 1108.29, "word": " v", "probability": 0.293701171875}, {"start": 1108.29, "end": 1108.61, "word": "-value", "probability": 0.7252197265625}, {"start": 1108.61, "end": 1109.55, "word": " is", "probability": 0.93603515625}, {"start": 1109.55, "end": 1109.81, "word": " 0", "probability": 0.7783203125}, {"start": 1109.81, "end": 1111.05, "word": ".2937.", "probability": 0.97900390625}, {"start": 1113.37, "end": 1114.13, "word": " T", "probability": 0.57275390625}, {"start": 1114.13, "end": 1114.41, "word": "-table", "probability": 0.67919921875}, {"start": 1114.41, "end": 1114.77, "word": " gives", "probability": 0.9013671875}, {"start": 1114.77, "end": 1115.01, "word": " this", "probability": 0.9306640625}, {"start": 1115.01, "end": 1115.39, "word": " result,", "probability": 0.9169921875}, {"start": 1116.03, "end": 1116.53, "word": " greater", "probability": 0.86083984375}, {"start": 1116.53, "end": 1116.87, "word": " than", "probability": 0.9453125}, {"start": 1116.87, "end": 1117.69, "word": " 25%.", "probability": 0.889404296875}, {"start": 1117.69, "end": 1118.97, "word": " The", "probability": 0.8896484375}, {"start": 1118.97, "end": 1119.39, "word": " exact", "probability": 0.95361328125}, {"start": 1119.39, "end": 1119.63, "word": " is", "probability": 0.9365234375}, {"start": 1119.63, "end": 1119.83, "word": " 0", "probability": 0.98291015625}, {"start": 1119.83, "end": 1121.19, "word": ".2937.", "probability": 0.9856770833333334}, {"start": 1121.75, "end": 1122.07, "word": " Again,", "probability": 0.935546875}, {"start": 1122.73, "end": 1123.13, "word": " since", "probability": 0.8369140625}, {"start": 1123.13, "end": 1123.47, "word": " my", "probability": 0.9658203125}, {"start": 1123.47, "end": 1123.61, "word": " v", "probability": 0.8447265625}, {"start": 1123.61, "end": 1123.87, "word": "-value", "probability": 0.964599609375}, {"start": 1123.87, "end": 1124.07, "word": " is", "probability": 0.939453125}, {"start": 1124.07, "end": 1124.43, "word": " above", "probability": 0.9541015625}, {"start": 1124.43, "end": 1124.75, "word": " 10", "probability": 0.95947265625}, {"start": 1124.75, "end": 1125.15, "word": "%", "probability": 0.68115234375}, {"start": 1125.15, "end": 1125.37, "word": " or", "probability": 0.9619140625}, {"start": 1125.37, "end": 1125.71, "word": " greater", "probability": 0.9052734375}, {"start": 1125.71, "end": 1125.97, "word": " than", "probability": 0.93359375}, {"start": 1125.97, "end": 1126.73, "word": " 10%,", "probability": 0.880126953125}, {"start": 1126.73, "end": 1127.25, "word": " so", "probability": 0.85302734375}, {"start": 1127.25, "end": 1128.15, "word": " we", "probability": 0.93212890625}, {"start": 1128.15, "end": 1128.53, "word": " don't", "probability": 0.902099609375}, {"start": 1128.53, "end": 1129.13, "word": " reject", "probability": 0.91162109375}, {"start": 1129.13, "end": 1129.93, "word": " the", "probability": 0.869140625}, {"start": 1129.93, "end": 1130.31, "word": " null", "probability": 0.95751953125}, {"start": 1130.31, "end": 1130.73, "word": " hypothesis.", "probability": 0.87255859375}, {"start": 1131.15, "end": 1131.37, "word": " Now,", "probability": 0.94775390625}, {"start": 1131.41, "end": 1131.63, "word": " let's", "probability": 0.971435546875}, {"start": 1131.63, "end": 1131.73, "word": " see", "probability": 0.91357421875}, {"start": 1131.73, "end": 1131.85, "word": " how", "probability": 0.82080078125}, {"start": 1131.85, "end": 1132.89, "word": " can", "probability": 0.82958984375}, {"start": 1132.89, "end": 1133.01, "word": " we", "probability": 0.51220703125}, {"start": 1133.01, "end": 1133.43, "word": " use", "probability": 0.8701171875}, {"start": 1133.43, "end": 1133.77, "word": " Excel", "probability": 0.8857421875}, {"start": 1133.77, "end": 1134.11, "word": " to", "probability": 0.962890625}, {"start": 1134.11, "end": 1134.59, "word": " find", "probability": 0.88623046875}, {"start": 1134.59, "end": 1135.81, "word": " the", "probability": 0.92041015625}, {"start": 1135.81, "end": 1136.35, "word": " exact.", "probability": 0.962890625}], "temperature": 1.0}, {"id": 43, "seek": 115927, "start": 1138.83, "end": 1159.27, "text": " I will use Excel to find the exact b-value. Here you press on function. Then we have here T distribution.", "tokens": [286, 486, 764, 19060, 281, 915, 264, 1900, 272, 12, 29155, 13, 1692, 291, 1886, 322, 2445, 13, 1396, 321, 362, 510, 314, 7316, 13], "avg_logprob": -0.4002403754454393, "compression_ratio": 1.127659574468085, "no_speech_prob": 0.0, "words": [{"start": 1138.8300000000002, "end": 1139.8700000000001, "word": " I", "probability": 0.30322265625}, {"start": 1139.8700000000001, "end": 1140.91, "word": " will", "probability": 0.828125}, {"start": 1140.91, "end": 1141.25, "word": " use", "probability": 0.869140625}, {"start": 1141.25, "end": 1141.97, "word": " Excel", "probability": 0.5009765625}, {"start": 1141.97, "end": 1142.81, "word": " to", "probability": 0.87255859375}, {"start": 1142.81, "end": 1143.19, "word": " find", "probability": 0.8720703125}, {"start": 1143.19, "end": 1143.41, "word": " the", "probability": 0.7490234375}, {"start": 1143.41, "end": 1143.93, "word": " exact", "probability": 0.9091796875}, {"start": 1143.93, "end": 1144.65, "word": " b", "probability": 0.380126953125}, {"start": 1144.65, "end": 1144.97, "word": "-value.", "probability": 0.828369140625}, {"start": 1149.73, "end": 1150.77, "word": " Here", "probability": 0.477294921875}, {"start": 1150.77, "end": 1150.93, "word": " you", "probability": 0.68212890625}, {"start": 1150.93, "end": 1151.37, "word": " press", "probability": 0.76123046875}, {"start": 1151.37, "end": 1151.67, "word": " on", "probability": 0.89892578125}, {"start": 1151.67, "end": 1152.19, "word": " function.", "probability": 0.7529296875}, {"start": 1156.05, "end": 1157.09, "word": " Then", "probability": 0.771484375}, {"start": 1157.09, "end": 1157.55, "word": " we", "probability": 0.77880859375}, {"start": 1157.55, "end": 1157.85, "word": " have", "probability": 0.94970703125}, {"start": 1157.85, "end": 1158.19, "word": " here", "probability": 0.8349609375}, {"start": 1158.19, "end": 1158.55, "word": " T", "probability": 0.34912109375}, {"start": 1158.55, "end": 1159.27, "word": " distribution.", "probability": 0.61181640625}], "temperature": 1.0}, {"id": 44, "seek": 118603, "start": 1169.57, "end": 1186.03, "text": " X, it means the value of the test statistic. In this case, it's 0.55. Degrees of freedom at one floor. Tails, we are talking about one-tailed test, so it's one.", "tokens": [1783, 11, 309, 1355, 264, 2158, 295, 264, 1500, 29588, 13, 682, 341, 1389, 11, 309, 311, 1958, 13, 13622, 13, 413, 1146, 4856, 295, 5645, 412, 472, 4123, 13, 49888, 11, 321, 366, 1417, 466, 472, 12, 14430, 292, 1500, 11, 370, 309, 311, 472, 13], "avg_logprob": -0.20947265407691398, "compression_ratio": 1.3089430894308942, "no_speech_prob": 0.0, "words": [{"start": 1169.5700000000002, "end": 1170.17, "word": " X,", "probability": 0.7265625}, {"start": 1170.29, "end": 1170.49, "word": " it", "probability": 0.9384765625}, {"start": 1170.49, "end": 1170.77, "word": " means", "probability": 0.92578125}, {"start": 1170.77, "end": 1170.99, "word": " the", "probability": 0.90234375}, {"start": 1170.99, "end": 1171.29, "word": " value", "probability": 0.97509765625}, {"start": 1171.29, "end": 1171.55, "word": " of", "probability": 0.966796875}, {"start": 1171.55, "end": 1171.87, "word": " the", "probability": 0.912109375}, {"start": 1171.87, "end": 1172.39, "word": " test", "probability": 0.8359375}, {"start": 1172.39, "end": 1173.01, "word": " statistic.", "probability": 0.78955078125}, {"start": 1173.71, "end": 1173.91, "word": " In", "probability": 0.92724609375}, {"start": 1173.91, "end": 1174.13, "word": " this", "probability": 0.9453125}, {"start": 1174.13, "end": 1174.43, "word": " case,", "probability": 0.9130859375}, {"start": 1174.51, "end": 1174.57, "word": " it's", "probability": 0.906982421875}, {"start": 1174.57, "end": 1174.77, "word": " 0", "probability": 0.4765625}, {"start": 1174.77, "end": 1175.35, "word": ".55.", "probability": 0.988037109375}, {"start": 1177.35, "end": 1177.81, "word": " Degrees", "probability": 0.9417317708333334}, {"start": 1177.81, "end": 1178.03, "word": " of", "probability": 0.96923828125}, {"start": 1178.03, "end": 1178.41, "word": " freedom", "probability": 0.8798828125}, {"start": 1178.41, "end": 1179.33, "word": " at", "probability": 0.32373046875}, {"start": 1179.33, "end": 1179.55, "word": " one", "probability": 0.81103515625}, {"start": 1179.55, "end": 1179.93, "word": " floor.", "probability": 0.83642578125}, {"start": 1181.55, "end": 1181.91, "word": " Tails,", "probability": 0.5439453125}, {"start": 1182.89, "end": 1183.21, "word": " we", "probability": 0.9521484375}, {"start": 1183.21, "end": 1183.37, "word": " are", "probability": 0.92724609375}, {"start": 1183.37, "end": 1183.65, "word": " talking", "probability": 0.8486328125}, {"start": 1183.65, "end": 1183.99, "word": " about", "probability": 0.90966796875}, {"start": 1183.99, "end": 1184.35, "word": " one", "probability": 0.90869140625}, {"start": 1184.35, "end": 1184.77, "word": "-tailed", "probability": 0.7317708333333334}, {"start": 1184.77, "end": 1185.05, "word": " test,", "probability": 0.66845703125}, {"start": 1185.27, "end": 1185.51, "word": " so", "probability": 0.9404296875}, {"start": 1185.51, "end": 1185.79, "word": " it's", "probability": 0.967529296875}, {"start": 1185.79, "end": 1186.03, "word": " one.", "probability": 0.8603515625}], "temperature": 1.0}, {"id": 45, "seek": 121072, "start": 1187.42, "end": 1210.72, "text": " So here, just write the value of the statistic, degrees of freedom, then 1. Now, the exact p-value is 0.2937. So that's the exact p-value. Exact answer equal 0.2937. So either one will give the same conclusion.", "tokens": [407, 510, 11, 445, 2464, 264, 2158, 295, 264, 29588, 11, 5310, 295, 5645, 11, 550, 502, 13, 823, 11, 264, 1900, 280, 12, 29155, 307, 1958, 13, 11871, 12851, 13, 407, 300, 311, 264, 1900, 280, 12, 29155, 13, 7199, 1867, 2681, 1958, 13, 11871, 12851, 13, 407, 2139, 472, 486, 976, 264, 912, 10063, 13], "avg_logprob": -0.19733296822884988, "compression_ratio": 1.4452054794520548, "no_speech_prob": 0.0, "words": [{"start": 1187.42, "end": 1187.7, "word": " So", "probability": 0.8359375}, {"start": 1187.7, "end": 1187.94, "word": " here,", "probability": 0.68212890625}, {"start": 1188.04, "end": 1188.36, "word": " just", "probability": 0.69873046875}, {"start": 1188.36, "end": 1189.1, "word": " write", "probability": 0.87109375}, {"start": 1189.1, "end": 1189.48, "word": " the", "probability": 0.900390625}, {"start": 1189.48, "end": 1189.72, "word": " value", "probability": 0.9619140625}, {"start": 1189.72, "end": 1189.9, "word": " of", "probability": 0.734375}, {"start": 1189.9, "end": 1189.96, "word": " the", "probability": 0.72265625}, {"start": 1189.96, "end": 1190.64, "word": " statistic,", "probability": 0.75732421875}, {"start": 1191.2, "end": 1191.56, "word": " degrees", "probability": 0.94873046875}, {"start": 1191.56, "end": 1191.8, "word": " of", "probability": 0.9580078125}, {"start": 1191.8, "end": 1192.04, "word": " freedom,", "probability": 0.95361328125}, {"start": 1192.18, "end": 1192.3, "word": " then", "probability": 0.7939453125}, {"start": 1192.3, "end": 1192.52, "word": " 1.", "probability": 0.68798828125}, {"start": 1192.98, "end": 1193.26, "word": " Now,", "probability": 0.9423828125}, {"start": 1193.68, "end": 1193.86, "word": " the", "probability": 0.91455078125}, {"start": 1193.86, "end": 1194.32, "word": " exact", "probability": 0.92919921875}, {"start": 1194.32, "end": 1194.52, "word": " p", "probability": 0.291259765625}, {"start": 1194.52, "end": 1194.78, "word": "-value", "probability": 0.849853515625}, {"start": 1194.78, "end": 1195.48, "word": " is", "probability": 0.9384765625}, {"start": 1195.48, "end": 1195.8, "word": " 0", "probability": 0.86474609375}, {"start": 1195.8, "end": 1197.34, "word": ".2937.", "probability": 0.98291015625}, {"start": 1197.7, "end": 1197.86, "word": " So", "probability": 0.93505859375}, {"start": 1197.86, "end": 1198.14, "word": " that's", "probability": 0.92919921875}, {"start": 1198.14, "end": 1198.28, "word": " the", "probability": 0.91650390625}, {"start": 1198.28, "end": 1198.68, "word": " exact", "probability": 0.96533203125}, {"start": 1198.68, "end": 1198.9, "word": " p", "probability": 0.810546875}, {"start": 1198.9, "end": 1199.14, "word": "-value.", "probability": 0.974609375}, {"start": 1200.08, "end": 1200.68, "word": " Exact", "probability": 0.91162109375}, {"start": 1200.68, "end": 1201.36, "word": " answer", "probability": 0.951171875}, {"start": 1201.36, "end": 1203.78, "word": " equal", "probability": 0.462158203125}, {"start": 1203.78, "end": 1204.24, "word": " 0", "probability": 0.9609375}, {"start": 1204.24, "end": 1206.02, "word": ".2937.", "probability": 0.9832356770833334}, {"start": 1207.48, "end": 1207.84, "word": " So", "probability": 0.740234375}, {"start": 1207.84, "end": 1208.02, "word": " either", "probability": 0.90771484375}, {"start": 1208.02, "end": 1208.32, "word": " one", "probability": 0.92626953125}, {"start": 1208.32, "end": 1208.62, "word": " will", "probability": 0.837890625}, {"start": 1208.62, "end": 1208.9, "word": " give", "probability": 0.88037109375}, {"start": 1208.9, "end": 1209.2, "word": " the", "probability": 0.91650390625}, {"start": 1209.2, "end": 1209.6, "word": " same", "probability": 0.89892578125}, {"start": 1209.6, "end": 1210.72, "word": " conclusion.", "probability": 0.912109375}], "temperature": 1.0}, {"id": 46, "seek": 123531, "start": 1211.79, "end": 1235.31, "text": " It's 2.937 or greater than 25%. We reject a p-value smaller than alpha, but this p-value is greater than alpha, so we don't reject another hypothesis. So we end with the same conclusion. Actually, this slide gives how can we use t-test.", "tokens": [467, 311, 568, 13, 24, 12851, 420, 5044, 813, 3552, 6856, 492, 8248, 257, 280, 12, 29155, 4356, 813, 8961, 11, 457, 341, 280, 12, 29155, 307, 5044, 813, 8961, 11, 370, 321, 500, 380, 8248, 1071, 17291, 13, 407, 321, 917, 365, 264, 912, 10063, 13, 5135, 11, 341, 4137, 2709, 577, 393, 321, 764, 256, 12, 31636, 13], "avg_logprob": -0.19531249511437337, "compression_ratio": 1.4539877300613497, "no_speech_prob": 0.0, "words": [{"start": 1211.79, "end": 1212.05, "word": " It's", "probability": 0.791748046875}, {"start": 1212.05, "end": 1212.25, "word": " 2", "probability": 0.97021484375}, {"start": 1212.25, "end": 1213.11, "word": ".937", "probability": 0.96435546875}, {"start": 1213.11, "end": 1213.83, "word": " or", "probability": 0.6728515625}, {"start": 1213.83, "end": 1214.13, "word": " greater", "probability": 0.80322265625}, {"start": 1214.13, "end": 1214.39, "word": " than", "probability": 0.9482421875}, {"start": 1214.39, "end": 1215.41, "word": " 25%.", "probability": 0.91015625}, {"start": 1215.41, "end": 1216.19, "word": " We", "probability": 0.93798828125}, {"start": 1216.19, "end": 1216.53, "word": " reject", "probability": 0.47314453125}, {"start": 1216.53, "end": 1216.67, "word": " a", "probability": 0.44189453125}, {"start": 1216.67, "end": 1216.79, "word": " p", "probability": 0.419921875}, {"start": 1216.79, "end": 1216.97, "word": "-value", "probability": 0.6566162109375}, {"start": 1216.97, "end": 1217.31, "word": " smaller", "probability": 0.85791015625}, {"start": 1217.31, "end": 1217.53, "word": " than", "probability": 0.95361328125}, {"start": 1217.53, "end": 1217.85, "word": " alpha,", "probability": 0.8779296875}, {"start": 1218.21, "end": 1218.69, "word": " but", "probability": 0.92138671875}, {"start": 1218.69, "end": 1218.89, "word": " this", "probability": 0.92333984375}, {"start": 1218.89, "end": 1219.07, "word": " p", "probability": 0.9384765625}, {"start": 1219.07, "end": 1219.23, "word": "-value", "probability": 0.959716796875}, {"start": 1219.23, "end": 1219.41, "word": " is", "probability": 0.73046875}, {"start": 1219.41, "end": 1219.89, "word": " greater", "probability": 0.90087890625}, {"start": 1219.89, "end": 1220.15, "word": " than", "probability": 0.947265625}, {"start": 1220.15, "end": 1220.35, "word": " alpha,", "probability": 0.91357421875}, {"start": 1220.41, "end": 1220.55, "word": " so", "probability": 0.94775390625}, {"start": 1220.55, "end": 1220.73, "word": " we", "probability": 0.95556640625}, {"start": 1220.73, "end": 1221.07, "word": " don't", "probability": 0.976806640625}, {"start": 1221.07, "end": 1221.57, "word": " reject", "probability": 0.89990234375}, {"start": 1221.57, "end": 1221.93, "word": " another", "probability": 0.6298828125}, {"start": 1221.93, "end": 1222.39, "word": " hypothesis.", "probability": 0.9814453125}, {"start": 1224.71, "end": 1225.39, "word": " So", "probability": 0.95703125}, {"start": 1225.39, "end": 1225.61, "word": " we", "probability": 0.82861328125}, {"start": 1225.61, "end": 1225.99, "word": " end", "probability": 0.91455078125}, {"start": 1225.99, "end": 1226.47, "word": " with", "probability": 0.8994140625}, {"start": 1226.47, "end": 1226.71, "word": " the", "probability": 0.9140625}, {"start": 1226.71, "end": 1227.09, "word": " same", "probability": 0.900390625}, {"start": 1227.09, "end": 1228.09, "word": " conclusion.", "probability": 0.88623046875}, {"start": 1228.71, "end": 1229.11, "word": " Actually,", "probability": 0.8330078125}, {"start": 1229.19, "end": 1229.35, "word": " this", "probability": 0.8837890625}, {"start": 1229.35, "end": 1229.75, "word": " slide", "probability": 0.9697265625}, {"start": 1229.75, "end": 1232.59, "word": " gives", "probability": 0.85888671875}, {"start": 1232.59, "end": 1233.85, "word": " how", "probability": 0.810546875}, {"start": 1233.85, "end": 1234.07, "word": " can", "probability": 0.9169921875}, {"start": 1234.07, "end": 1234.23, "word": " we", "probability": 0.94580078125}, {"start": 1234.23, "end": 1234.57, "word": " use", "probability": 0.89013671875}, {"start": 1234.57, "end": 1234.93, "word": " t", "probability": 0.366455078125}, {"start": 1234.93, "end": 1235.31, "word": "-test.", "probability": 0.851318359375}], "temperature": 1.0}, {"id": 47, "seek": 125509, "start": 1237.88, "end": 1255.1, "text": " Excelsior to find the value of, or find of B value. That's all for testing about the population mean if sigma is known or sigma is unknown for two sided test or one sided, upper or lower sided test.", "tokens": [9368, 1625, 1973, 281, 915, 264, 2158, 295, 11, 420, 915, 295, 363, 2158, 13, 663, 311, 439, 337, 4997, 466, 264, 4415, 914, 498, 12771, 307, 2570, 420, 12771, 307, 9841, 337, 732, 41651, 1500, 420, 472, 41651, 11, 6597, 420, 3126, 41651, 1500, 13], "avg_logprob": -0.3144946897283514, "compression_ratio": 1.5075757575757576, "no_speech_prob": 0.0, "words": [{"start": 1237.8799999999999, "end": 1238.6, "word": " Excelsior", "probability": 0.5890706380208334}, {"start": 1238.6, "end": 1238.96, "word": " to", "probability": 0.7216796875}, {"start": 1238.96, "end": 1239.32, "word": " find", "probability": 0.89404296875}, {"start": 1239.32, "end": 1240.02, "word": " the", "probability": 0.90087890625}, {"start": 1240.02, "end": 1240.42, "word": " value", "probability": 0.95068359375}, {"start": 1240.42, "end": 1240.68, "word": " of,", "probability": 0.41845703125}, {"start": 1241.38, "end": 1241.96, "word": " or", "probability": 0.93115234375}, {"start": 1241.96, "end": 1242.34, "word": " find", "probability": 0.430908203125}, {"start": 1242.34, "end": 1242.62, "word": " of", "probability": 0.397216796875}, {"start": 1242.62, "end": 1242.86, "word": " B", "probability": 0.259033203125}, {"start": 1242.86, "end": 1243.14, "word": " value.", "probability": 0.9033203125}, {"start": 1243.88, "end": 1244.48, "word": " That's", "probability": 0.952880859375}, {"start": 1244.48, "end": 1244.74, "word": " all", "probability": 0.94677734375}, {"start": 1244.74, "end": 1245.06, "word": " for", "probability": 0.9541015625}, {"start": 1245.06, "end": 1245.5, "word": " testing", "probability": 0.88427734375}, {"start": 1245.5, "end": 1245.94, "word": " about", "probability": 0.91357421875}, {"start": 1245.94, "end": 1246.14, "word": " the", "probability": 0.77197265625}, {"start": 1246.14, "end": 1246.62, "word": " population", "probability": 0.95703125}, {"start": 1246.62, "end": 1247.48, "word": " mean", "probability": 0.87353515625}, {"start": 1247.48, "end": 1248.16, "word": " if", "probability": 0.295166015625}, {"start": 1248.16, "end": 1248.46, "word": " sigma", "probability": 0.73291015625}, {"start": 1248.46, "end": 1248.66, "word": " is", "probability": 0.95263671875}, {"start": 1248.66, "end": 1248.96, "word": " known", "probability": 0.67138671875}, {"start": 1248.96, "end": 1249.56, "word": " or", "probability": 0.7978515625}, {"start": 1249.56, "end": 1249.82, "word": " sigma", "probability": 0.908203125}, {"start": 1249.82, "end": 1249.98, "word": " is", "probability": 0.931640625}, {"start": 1249.98, "end": 1250.4, "word": " unknown", "probability": 0.9072265625}, {"start": 1250.4, "end": 1251.18, "word": " for", "probability": 0.68603515625}, {"start": 1251.18, "end": 1251.4, "word": " two", "probability": 0.92724609375}, {"start": 1251.4, "end": 1251.64, "word": " sided", "probability": 0.5712890625}, {"start": 1251.64, "end": 1252.08, "word": " test", "probability": 0.6572265625}, {"start": 1252.08, "end": 1252.46, "word": " or", "probability": 0.86181640625}, {"start": 1252.46, "end": 1252.92, "word": " one", "probability": 0.93310546875}, {"start": 1252.92, "end": 1253.26, "word": " sided,", "probability": 0.884765625}, {"start": 1253.38, "end": 1253.62, "word": " upper", "probability": 0.775390625}, {"start": 1253.62, "end": 1254.06, "word": " or", "probability": 0.943359375}, {"start": 1254.06, "end": 1254.5, "word": " lower", "probability": 0.857421875}, {"start": 1254.5, "end": 1254.84, "word": " sided", "probability": 0.82080078125}, {"start": 1254.84, "end": 1255.1, "word": " test.", "probability": 0.841796875}], "temperature": 1.0}, {"id": 48, "seek": 127555, "start": 1257.57, "end": 1275.55, "text": " We mentioned before that there are two types of testing. One is called hypothesis testing for the mean and the other for the proportion. Because as we mentioned before, there are two types of data. One is the numerical data.", "tokens": [492, 2835, 949, 300, 456, 366, 732, 3467, 295, 4997, 13, 1485, 307, 1219, 17291, 4997, 337, 264, 914, 293, 264, 661, 337, 264, 16068, 13, 1436, 382, 321, 2835, 949, 11, 456, 366, 732, 3467, 295, 1412, 13, 1485, 307, 264, 29054, 1412, 13], "avg_logprob": -0.18614131147446838, "compression_ratio": 1.7045454545454546, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 1257.5700000000002, "end": 1258.17, "word": " We", "probability": 0.4853515625}, {"start": 1258.17, "end": 1258.51, "word": " mentioned", "probability": 0.7998046875}, {"start": 1258.51, "end": 1258.81, "word": " before", "probability": 0.8505859375}, {"start": 1258.81, "end": 1259.15, "word": " that", "probability": 0.85595703125}, {"start": 1259.15, "end": 1259.79, "word": " there", "probability": 0.89208984375}, {"start": 1259.79, "end": 1259.97, "word": " are", "probability": 0.94482421875}, {"start": 1259.97, "end": 1260.17, "word": " two", "probability": 0.91796875}, {"start": 1260.17, "end": 1260.59, "word": " types", "probability": 0.8369140625}, {"start": 1260.59, "end": 1260.83, "word": " of", "probability": 0.970703125}, {"start": 1260.83, "end": 1261.27, "word": " testing.", "probability": 0.884765625}, {"start": 1261.49, "end": 1261.65, "word": " One", "probability": 0.9150390625}, {"start": 1261.65, "end": 1261.83, "word": " is", "probability": 0.91064453125}, {"start": 1261.83, "end": 1262.41, "word": " called", "probability": 0.8662109375}, {"start": 1262.41, "end": 1264.17, "word": " hypothesis", "probability": 0.7607421875}, {"start": 1264.17, "end": 1264.53, "word": " testing", "probability": 0.82373046875}, {"start": 1264.53, "end": 1264.75, "word": " for", "probability": 0.9287109375}, {"start": 1264.75, "end": 1264.89, "word": " the", "probability": 0.91064453125}, {"start": 1264.89, "end": 1265.11, "word": " mean", "probability": 0.93212890625}, {"start": 1265.11, "end": 1266.19, "word": " and", "probability": 0.58544921875}, {"start": 1266.19, "end": 1266.37, "word": " the", "probability": 0.77392578125}, {"start": 1266.37, "end": 1266.59, "word": " other", "probability": 0.8935546875}, {"start": 1266.59, "end": 1267.39, "word": " for", "probability": 0.84130859375}, {"start": 1267.39, "end": 1267.79, "word": " the", "probability": 0.9150390625}, {"start": 1267.79, "end": 1268.39, "word": " proportion.", "probability": 0.7919921875}, {"start": 1270.13, "end": 1270.71, "word": " Because", "probability": 0.90380859375}, {"start": 1270.71, "end": 1271.17, "word": " as", "probability": 0.69287109375}, {"start": 1271.17, "end": 1271.31, "word": " we", "probability": 0.9560546875}, {"start": 1271.31, "end": 1271.57, "word": " mentioned", "probability": 0.83154296875}, {"start": 1271.57, "end": 1271.91, "word": " before,", "probability": 0.85791015625}, {"start": 1271.97, "end": 1272.11, "word": " there", "probability": 0.89794921875}, {"start": 1272.11, "end": 1272.25, "word": " are", "probability": 0.94189453125}, {"start": 1272.25, "end": 1272.41, "word": " two", "probability": 0.9404296875}, {"start": 1272.41, "end": 1272.79, "word": " types", "probability": 0.8310546875}, {"start": 1272.79, "end": 1272.99, "word": " of", "probability": 0.96923828125}, {"start": 1272.99, "end": 1273.39, "word": " data.", "probability": 0.935546875}, {"start": 1274.27, "end": 1274.53, "word": " One", "probability": 0.927734375}, {"start": 1274.53, "end": 1274.69, "word": " is", "probability": 0.9462890625}, {"start": 1274.69, "end": 1274.79, "word": " the", "probability": 0.470458984375}, {"start": 1274.79, "end": 1275.07, "word": " numerical", "probability": 0.93408203125}, {"start": 1275.07, "end": 1275.55, "word": " data.", "probability": 0.9296875}], "temperature": 1.0}, {"id": 49, "seek": 130751, "start": 1282.13, "end": 1307.51, "text": " Now, for numerical data, we have to use the means. Otherwise, if the data is not numeric, I mean if it is qualitative, we have to use proportion. For example, if we are talking about gender, so it's proportion we have to use instead of the means. Let's see. Now, if we have data on gender.", "tokens": [823, 11, 337, 29054, 1412, 11, 321, 362, 281, 764, 264, 1355, 13, 10328, 11, 498, 264, 1412, 307, 406, 7866, 299, 11, 286, 914, 498, 309, 307, 31312, 11, 321, 362, 281, 764, 16068, 13, 1171, 1365, 11, 498, 321, 366, 1417, 466, 7898, 11, 370, 309, 311, 16068, 321, 362, 281, 764, 2602, 295, 264, 1355, 13, 961, 311, 536, 13, 823, 11, 498, 321, 362, 1412, 322, 7898, 13], "avg_logprob": -0.22838185258107643, "compression_ratio": 1.695906432748538, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 1282.13, "end": 1282.69, "word": " Now,", "probability": 0.04364013671875}, {"start": 1283.43, "end": 1283.55, "word": " for", "probability": 0.6923828125}, {"start": 1283.55, "end": 1283.99, "word": " numerical", "probability": 0.8330078125}, {"start": 1283.99, "end": 1284.37, "word": " data,", "probability": 0.94189453125}, {"start": 1284.47, "end": 1284.53, "word": " we", "probability": 0.9111328125}, {"start": 1284.53, "end": 1284.73, "word": " have", "probability": 0.890625}, {"start": 1284.73, "end": 1284.87, "word": " to", "probability": 0.97119140625}, {"start": 1284.87, "end": 1285.17, "word": " use", "probability": 0.89208984375}, {"start": 1285.17, "end": 1286.07, "word": " the", "probability": 0.63330078125}, {"start": 1286.07, "end": 1286.45, "word": " means.", "probability": 0.841796875}, {"start": 1287.05, "end": 1287.45, "word": " Otherwise,", "probability": 0.87890625}, {"start": 1287.73, "end": 1287.83, "word": " if", "probability": 0.93310546875}, {"start": 1287.83, "end": 1287.97, "word": " the", "probability": 0.88916015625}, {"start": 1287.97, "end": 1288.21, "word": " data", "probability": 0.93701171875}, {"start": 1288.21, "end": 1288.49, "word": " is", "probability": 0.9462890625}, {"start": 1288.49, "end": 1288.79, "word": " not", "probability": 0.94580078125}, {"start": 1288.79, "end": 1289.39, "word": " numeric,", "probability": 0.84033203125}, {"start": 1289.95, "end": 1290.09, "word": " I", "probability": 0.92529296875}, {"start": 1290.09, "end": 1290.25, "word": " mean", "probability": 0.966796875}, {"start": 1290.25, "end": 1290.39, "word": " if", "probability": 0.705078125}, {"start": 1290.39, "end": 1290.53, "word": " it", "probability": 0.9306640625}, {"start": 1290.53, "end": 1290.69, "word": " is", "probability": 0.81884765625}, {"start": 1290.69, "end": 1291.47, "word": " qualitative,", "probability": 0.9345703125}, {"start": 1291.71, "end": 1291.85, "word": " we", "probability": 0.912109375}, {"start": 1291.85, "end": 1292.01, "word": " have", "probability": 0.9296875}, {"start": 1292.01, "end": 1292.13, "word": " to", "probability": 0.970703125}, {"start": 1292.13, "end": 1292.37, "word": " use", "probability": 0.89453125}, {"start": 1292.37, "end": 1293.33, "word": " proportion.", "probability": 0.59326171875}, {"start": 1294.33, "end": 1294.69, "word": " For", "probability": 0.93505859375}, {"start": 1294.69, "end": 1295.07, "word": " example,", "probability": 0.97705078125}, {"start": 1295.19, "end": 1295.29, "word": " if", "probability": 0.94873046875}, {"start": 1295.29, "end": 1295.39, "word": " we", "probability": 0.9208984375}, {"start": 1295.39, "end": 1295.49, "word": " are", "probability": 0.91015625}, {"start": 1295.49, "end": 1295.79, "word": " talking", "probability": 0.84619140625}, {"start": 1295.79, "end": 1296.27, "word": " about", "probability": 0.9033203125}, {"start": 1296.27, "end": 1296.75, "word": " gender,", "probability": 0.8662109375}, {"start": 1298.45, "end": 1298.79, "word": " so", "probability": 0.68212890625}, {"start": 1298.79, "end": 1299.01, "word": " it's", "probability": 0.767333984375}, {"start": 1299.01, "end": 1299.53, "word": " proportion", "probability": 0.82421875}, {"start": 1299.53, "end": 1300.03, "word": " we", "probability": 0.8271484375}, {"start": 1300.03, "end": 1300.21, "word": " have", "probability": 0.9443359375}, {"start": 1300.21, "end": 1300.35, "word": " to", "probability": 0.96728515625}, {"start": 1300.35, "end": 1300.69, "word": " use", "probability": 0.8818359375}, {"start": 1300.69, "end": 1301.93, "word": " instead", "probability": 0.66455078125}, {"start": 1301.93, "end": 1302.13, "word": " of", "probability": 0.96875}, {"start": 1302.13, "end": 1302.25, "word": " the", "probability": 0.75830078125}, {"start": 1302.25, "end": 1302.49, "word": " means.", "probability": 0.677734375}, {"start": 1304.81, "end": 1305.23, "word": " Let's", "probability": 0.893310546875}, {"start": 1305.23, "end": 1305.43, "word": " see.", "probability": 0.91357421875}, {"start": 1305.73, "end": 1306.07, "word": " Now,", "probability": 0.92236328125}, {"start": 1306.17, "end": 1306.33, "word": " if", "probability": 0.94482421875}, {"start": 1306.33, "end": 1306.45, "word": " we", "probability": 0.9443359375}, {"start": 1306.45, "end": 1306.61, "word": " have", "probability": 0.943359375}, {"start": 1306.61, "end": 1306.95, "word": " data", "probability": 0.420654296875}, {"start": 1306.95, "end": 1307.19, "word": " on", "probability": 0.9248046875}, {"start": 1307.19, "end": 1307.51, "word": " gender.", "probability": 0.91845703125}], "temperature": 1.0}, {"id": 50, "seek": 133801, "start": 1308.77, "end": 1338.01, "text": " Gender is classified males and females. Suppose, for example, you select a random sample of size 100, and you are interested in number of female students. And the sample has, for example, 46 females.", "tokens": [48039, 307, 20627, 20776, 293, 21529, 13, 21360, 11, 337, 1365, 11, 291, 3048, 257, 4974, 6889, 295, 2744, 2319, 11, 293, 291, 366, 3102, 294, 1230, 295, 6556, 1731, 13, 400, 264, 6889, 575, 11, 337, 1365, 11, 17835, 21529, 13], "avg_logprob": -0.1922238316646842, "compression_ratio": 1.4388489208633093, "no_speech_prob": 0.0, "words": [{"start": 1308.77, "end": 1309.29, "word": " Gender", "probability": 0.83203125}, {"start": 1309.29, "end": 1309.55, "word": " is", "probability": 0.79150390625}, {"start": 1309.55, "end": 1310.05, "word": " classified", "probability": 0.84130859375}, {"start": 1310.05, "end": 1311.11, "word": " males", "probability": 0.69384765625}, {"start": 1311.11, "end": 1312.49, "word": " and", "probability": 0.9267578125}, {"start": 1312.49, "end": 1312.91, "word": " females.", "probability": 0.93115234375}, {"start": 1313.79, "end": 1314.17, "word": " Suppose,", "probability": 0.7333984375}, {"start": 1314.29, "end": 1314.39, "word": " for", "probability": 0.9482421875}, {"start": 1314.39, "end": 1314.73, "word": " example,", "probability": 0.97509765625}, {"start": 1314.91, "end": 1317.11, "word": " you", "probability": 0.263671875}, {"start": 1317.11, "end": 1317.97, "word": " select", "probability": 0.822265625}, {"start": 1317.97, "end": 1318.13, "word": " a", "probability": 0.88232421875}, {"start": 1318.13, "end": 1318.31, "word": " random", "probability": 0.86376953125}, {"start": 1318.31, "end": 1318.69, "word": " sample", "probability": 0.86767578125}, {"start": 1318.69, "end": 1318.97, "word": " of", "probability": 0.9150390625}, {"start": 1318.97, "end": 1319.27, "word": " size", "probability": 0.82666015625}, {"start": 1319.27, "end": 1319.79, "word": " 100,", "probability": 0.87255859375}, {"start": 1321.91, "end": 1322.31, "word": " and", "probability": 0.9228515625}, {"start": 1322.31, "end": 1322.45, "word": " you", "probability": 0.951171875}, {"start": 1322.45, "end": 1322.57, "word": " are", "probability": 0.68896484375}, {"start": 1322.57, "end": 1323.03, "word": " interested", "probability": 0.86181640625}, {"start": 1323.03, "end": 1324.33, "word": " in", "probability": 0.9521484375}, {"start": 1324.33, "end": 1324.93, "word": " number", "probability": 0.59912109375}, {"start": 1324.93, "end": 1326.67, "word": " of", "probability": 0.97119140625}, {"start": 1326.67, "end": 1327.27, "word": " female", "probability": 0.8427734375}, {"start": 1327.27, "end": 1327.81, "word": " students.", "probability": 0.9619140625}, {"start": 1332.41, "end": 1333.35, "word": " And", "probability": 0.927734375}, {"start": 1333.35, "end": 1333.55, "word": " the", "probability": 0.8330078125}, {"start": 1333.55, "end": 1333.87, "word": " sample", "probability": 0.87451171875}, {"start": 1333.87, "end": 1334.81, "word": " has,", "probability": 0.638671875}, {"start": 1335.47, "end": 1335.95, "word": " for", "probability": 0.95166015625}, {"start": 1335.95, "end": 1336.33, "word": " example,", "probability": 0.97607421875}, {"start": 1336.51, "end": 1337.47, "word": " 46", "probability": 0.9560546875}, {"start": 1337.47, "end": 1338.01, "word": " females.", "probability": 0.90576171875}], "temperature": 1.0}, {"id": 51, "seek": 135677, "start": 1341.31, "end": 1356.77, "text": " So now your proportion is x divided by n, 46 divided by 100, so 0.46. You cannot say for example the average of gender for example is 1.3.", "tokens": [407, 586, 428, 16068, 307, 2031, 6666, 538, 297, 11, 17835, 6666, 538, 2319, 11, 370, 1958, 13, 16169, 13, 509, 2644, 584, 337, 1365, 264, 4274, 295, 7898, 337, 1365, 307, 502, 13, 18, 13], "avg_logprob": -0.29666385779509674, "compression_ratio": 1.2522522522522523, "no_speech_prob": 0.0, "words": [{"start": 1341.31, "end": 1341.61, "word": " So", "probability": 0.277099609375}, {"start": 1341.61, "end": 1341.89, "word": " now", "probability": 0.71337890625}, {"start": 1341.89, "end": 1342.29, "word": " your", "probability": 0.15869140625}, {"start": 1342.29, "end": 1342.87, "word": " proportion", "probability": 0.71435546875}, {"start": 1342.87, "end": 1343.91, "word": " is", "probability": 0.66455078125}, {"start": 1343.91, "end": 1344.17, "word": " x", "probability": 0.72314453125}, {"start": 1344.17, "end": 1344.47, "word": " divided", "probability": 0.55419921875}, {"start": 1344.47, "end": 1344.71, "word": " by", "probability": 0.9677734375}, {"start": 1344.71, "end": 1344.97, "word": " n,", "probability": 0.689453125}, {"start": 1345.73, "end": 1346.23, "word": " 46", "probability": 0.833984375}, {"start": 1346.23, "end": 1346.51, "word": " divided", "probability": 0.8046875}, {"start": 1346.51, "end": 1346.75, "word": " by", "probability": 0.962890625}, {"start": 1346.75, "end": 1347.27, "word": " 100,", "probability": 0.8740234375}, {"start": 1347.53, "end": 1347.73, "word": " so", "probability": 0.56884765625}, {"start": 1347.73, "end": 1347.95, "word": " 0", "probability": 0.85302734375}, {"start": 1347.95, "end": 1348.47, "word": ".46.", "probability": 0.984130859375}, {"start": 1351.35, "end": 1351.95, "word": " You", "probability": 0.9345703125}, {"start": 1351.95, "end": 1352.19, "word": " cannot", "probability": 0.83837890625}, {"start": 1352.19, "end": 1352.43, "word": " say", "probability": 0.93701171875}, {"start": 1352.43, "end": 1352.57, "word": " for", "probability": 0.7109375}, {"start": 1352.57, "end": 1352.91, "word": " example", "probability": 0.9775390625}, {"start": 1352.91, "end": 1353.13, "word": " the", "probability": 0.6171875}, {"start": 1353.13, "end": 1353.63, "word": " average", "probability": 0.796875}, {"start": 1353.63, "end": 1354.85, "word": " of", "probability": 0.94775390625}, {"start": 1354.85, "end": 1355.23, "word": " gender", "probability": 0.91650390625}, {"start": 1355.23, "end": 1355.59, "word": " for", "probability": 0.364990234375}, {"start": 1355.59, "end": 1355.93, "word": " example", "probability": 0.9794921875}, {"start": 1355.93, "end": 1356.09, "word": " is", "probability": 0.9306640625}, {"start": 1356.09, "end": 1356.27, "word": " 1", "probability": 0.9775390625}, {"start": 1356.27, "end": 1356.77, "word": ".3.", "probability": 0.989501953125}], "temperature": 1.0}, {"id": 52, "seek": 138798, "start": 1358.5, "end": 1387.98, "text": " It has no meaning that the average is 1.3. You have to say that, for example, males or females represents 46%. Rather than saying the average, for example, is 1.3. This one has no meaning. So if we are talking about two possible outcomes, success or failure, switch on or switch off,", "tokens": [467, 575, 572, 3620, 300, 264, 4274, 307, 502, 13, 18, 13, 509, 362, 281, 584, 300, 11, 337, 1365, 11, 20776, 420, 21529, 8855, 17835, 6856, 16571, 813, 1566, 264, 4274, 11, 337, 1365, 11, 307, 502, 13, 18, 13, 639, 472, 575, 572, 3620, 13, 407, 498, 321, 366, 1417, 466, 732, 1944, 10070, 11, 2245, 420, 7763, 11, 3679, 322, 420, 3679, 766, 11], "avg_logprob": -0.17934283263543072, "compression_ratio": 1.5865921787709498, "no_speech_prob": 0.0, "words": [{"start": 1358.5, "end": 1358.64, "word": " It", "probability": 0.11944580078125}, {"start": 1358.64, "end": 1358.88, "word": " has", "probability": 0.90185546875}, {"start": 1358.88, "end": 1359.16, "word": " no", "probability": 0.9423828125}, {"start": 1359.16, "end": 1359.84, "word": " meaning", "probability": 0.8544921875}, {"start": 1359.84, "end": 1360.3, "word": " that", "probability": 0.68896484375}, {"start": 1360.3, "end": 1360.48, "word": " the", "probability": 0.701171875}, {"start": 1360.48, "end": 1360.84, "word": " average", "probability": 0.7783203125}, {"start": 1360.84, "end": 1361.06, "word": " is", "probability": 0.93310546875}, {"start": 1361.06, "end": 1361.26, "word": " 1", "probability": 0.87255859375}, {"start": 1361.26, "end": 1361.78, "word": ".3.", "probability": 0.95361328125}, {"start": 1362.0, "end": 1362.18, "word": " You", "probability": 0.71337890625}, {"start": 1362.18, "end": 1362.34, "word": " have", "probability": 0.94091796875}, {"start": 1362.34, "end": 1362.44, "word": " to", "probability": 0.9638671875}, {"start": 1362.44, "end": 1362.6, "word": " say", "probability": 0.84765625}, {"start": 1362.6, "end": 1362.82, "word": " that,", "probability": 0.92236328125}, {"start": 1362.92, "end": 1363.0, "word": " for", "probability": 0.95068359375}, {"start": 1363.0, "end": 1364.86, "word": " example,", "probability": 0.974609375}, {"start": 1365.1, "end": 1366.42, "word": " males", "probability": 0.8818359375}, {"start": 1366.42, "end": 1367.3, "word": " or", "probability": 0.90673828125}, {"start": 1367.3, "end": 1367.88, "word": " females", "probability": 0.95263671875}, {"start": 1367.88, "end": 1369.26, "word": " represents", "probability": 0.51708984375}, {"start": 1369.26, "end": 1371.12, "word": " 46%.", "probability": 0.7568359375}, {"start": 1371.12, "end": 1372.2, "word": " Rather", "probability": 0.708984375}, {"start": 1372.2, "end": 1372.54, "word": " than", "probability": 0.8291015625}, {"start": 1372.54, "end": 1372.9, "word": " saying", "probability": 0.818359375}, {"start": 1372.9, "end": 1373.1, "word": " the", "probability": 0.71533203125}, {"start": 1373.1, "end": 1373.54, "word": " average,", "probability": 0.80712890625}, {"start": 1373.68, "end": 1373.82, "word": " for", "probability": 0.9443359375}, {"start": 1373.82, "end": 1374.18, "word": " example,", "probability": 0.9755859375}, {"start": 1374.3, "end": 1374.46, "word": " is", "probability": 0.939453125}, {"start": 1374.46, "end": 1374.64, "word": " 1", "probability": 0.98876953125}, {"start": 1374.64, "end": 1375.12, "word": ".3.", "probability": 0.997314453125}, {"start": 1375.3, "end": 1375.68, "word": " This", "probability": 0.8984375}, {"start": 1375.68, "end": 1375.84, "word": " one", "probability": 0.92431640625}, {"start": 1375.84, "end": 1376.06, "word": " has", "probability": 0.94873046875}, {"start": 1376.06, "end": 1376.36, "word": " no", "probability": 0.943359375}, {"start": 1376.36, "end": 1377.2, "word": " meaning.", "probability": 0.8330078125}, {"start": 1380.12, "end": 1380.7, "word": " So", "probability": 0.76123046875}, {"start": 1380.7, "end": 1380.92, "word": " if", "probability": 0.9248046875}, {"start": 1380.92, "end": 1381.04, "word": " we", "probability": 0.86181640625}, {"start": 1381.04, "end": 1381.16, "word": " are", "probability": 0.931640625}, {"start": 1381.16, "end": 1381.52, "word": " talking", "probability": 0.85009765625}, {"start": 1381.52, "end": 1382.18, "word": " about", "probability": 0.89892578125}, {"start": 1382.18, "end": 1383.68, "word": " two", "probability": 0.91845703125}, {"start": 1383.68, "end": 1383.96, "word": " possible", "probability": 0.93359375}, {"start": 1383.96, "end": 1384.52, "word": " outcomes,", "probability": 0.89208984375}, {"start": 1385.16, "end": 1385.48, "word": " success", "probability": 0.8759765625}, {"start": 1385.48, "end": 1385.8, "word": " or", "probability": 0.94970703125}, {"start": 1385.8, "end": 1386.18, "word": " failure,", "probability": 0.93115234375}, {"start": 1386.8, "end": 1387.08, "word": " switch", "probability": 0.95849609375}, {"start": 1387.08, "end": 1387.28, "word": " on", "probability": 0.92041015625}, {"start": 1387.28, "end": 1387.38, "word": " or", "probability": 0.677734375}, {"start": 1387.38, "end": 1387.64, "word": " switch", "probability": 0.97119140625}, {"start": 1387.64, "end": 1387.98, "word": " off,", "probability": 0.89892578125}], "temperature": 1.0}, {"id": 53, "seek": 140952, "start": 1388.9, "end": 1409.52, "text": " a good item, defective item, and so on, we have to use a proportion instead of the mean. Here we'll talk about in details about hypothesis testing for a proportions. So in this case, your problem involves categorical variables. It means we have", "tokens": [257, 665, 3174, 11, 16445, 488, 3174, 11, 293, 370, 322, 11, 321, 362, 281, 764, 257, 16068, 2602, 295, 264, 914, 13, 1692, 321, 603, 751, 466, 294, 4365, 466, 17291, 4997, 337, 257, 32482, 13, 407, 294, 341, 1389, 11, 428, 1154, 11626, 19250, 804, 9102, 13, 467, 1355, 321, 362], "avg_logprob": -0.2877604116996129, "compression_ratio": 1.5123456790123457, "no_speech_prob": 4.172325134277344e-07, "words": [{"start": 1388.9, "end": 1389.3, "word": " a", "probability": 0.1827392578125}, {"start": 1389.3, "end": 1389.58, "word": " good", "probability": 0.60009765625}, {"start": 1389.58, "end": 1390.1, "word": " item,", "probability": 0.9658203125}, {"start": 1390.28, "end": 1390.74, "word": " defective", "probability": 0.89794921875}, {"start": 1390.74, "end": 1391.06, "word": " item,", "probability": 0.9404296875}, {"start": 1391.14, "end": 1391.28, "word": " and", "probability": 0.92822265625}, {"start": 1391.28, "end": 1391.44, "word": " so", "probability": 0.951171875}, {"start": 1391.44, "end": 1391.68, "word": " on,", "probability": 0.94677734375}, {"start": 1392.12, "end": 1392.36, "word": " we", "probability": 0.9208984375}, {"start": 1392.36, "end": 1392.58, "word": " have", "probability": 0.9443359375}, {"start": 1392.58, "end": 1392.68, "word": " to", "probability": 0.9736328125}, {"start": 1392.68, "end": 1393.02, "word": " use", "probability": 0.88671875}, {"start": 1393.02, "end": 1393.38, "word": " a", "probability": 0.779296875}, {"start": 1393.38, "end": 1393.82, "word": " proportion", "probability": 0.79150390625}, {"start": 1393.82, "end": 1394.68, "word": " instead", "probability": 0.8076171875}, {"start": 1394.68, "end": 1394.98, "word": " of", "probability": 0.9697265625}, {"start": 1394.98, "end": 1395.12, "word": " the", "probability": 0.79345703125}, {"start": 1395.12, "end": 1395.28, "word": " mean.", "probability": 0.95263671875}, {"start": 1395.64, "end": 1395.92, "word": " Here", "probability": 0.81005859375}, {"start": 1395.92, "end": 1396.16, "word": " we'll", "probability": 0.625244140625}, {"start": 1396.16, "end": 1396.36, "word": " talk", "probability": 0.8935546875}, {"start": 1396.36, "end": 1396.74, "word": " about", "probability": 0.75146484375}, {"start": 1396.74, "end": 1396.96, "word": " in", "probability": 0.362548828125}, {"start": 1396.96, "end": 1397.34, "word": " details", "probability": 0.68115234375}, {"start": 1397.34, "end": 1397.82, "word": " about", "probability": 0.76416015625}, {"start": 1397.82, "end": 1398.68, "word": " hypothesis", "probability": 0.44873046875}, {"start": 1398.68, "end": 1399.34, "word": " testing", "probability": 0.83251953125}, {"start": 1399.34, "end": 1400.1, "word": " for", "probability": 0.9462890625}, {"start": 1400.1, "end": 1400.8, "word": " a", "probability": 0.365234375}, {"start": 1400.8, "end": 1401.2, "word": " proportions.", "probability": 0.53955078125}, {"start": 1402.28, "end": 1402.56, "word": " So", "probability": 0.91357421875}, {"start": 1402.56, "end": 1402.8, "word": " in", "probability": 0.8291015625}, {"start": 1402.8, "end": 1403.02, "word": " this", "probability": 0.94775390625}, {"start": 1403.02, "end": 1403.46, "word": " case,", "probability": 0.904296875}, {"start": 1405.3, "end": 1406.46, "word": " your", "probability": 0.88818359375}, {"start": 1406.46, "end": 1406.9, "word": " problem", "probability": 0.861328125}, {"start": 1406.9, "end": 1407.4, "word": " involves", "probability": 0.72021484375}, {"start": 1407.4, "end": 1407.98, "word": " categorical", "probability": 0.941650390625}, {"start": 1407.98, "end": 1408.54, "word": " variables.", "probability": 0.92578125}, {"start": 1408.66, "end": 1408.82, "word": " It", "probability": 0.63232421875}, {"start": 1408.82, "end": 1409.04, "word": " means", "probability": 0.92578125}, {"start": 1409.04, "end": 1409.22, "word": " we", "probability": 0.92333984375}, {"start": 1409.22, "end": 1409.52, "word": " have", "probability": 0.94970703125}], "temperature": 1.0}, {"id": 54, "seek": 142613, "start": 1411.39, "end": 1426.13, "text": " classification either yes or no males females and so on so two possible outcomes possesses characteristic of interest so suppose i am interested in number of defective items", "tokens": [21538, 2139, 2086, 420, 572, 20776, 21529, 293, 370, 322, 370, 732, 1944, 10070, 17490, 279, 16282, 295, 1179, 370, 7297, 741, 669, 3102, 294, 1230, 295, 16445, 488, 4754], "avg_logprob": -0.21232359351650362, "compression_ratio": 1.4871794871794872, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 1411.39, "end": 1412.25, "word": " classification", "probability": 0.76220703125}, {"start": 1412.25, "end": 1413.11, "word": " either", "probability": 0.426513671875}, {"start": 1413.11, "end": 1413.39, "word": " yes", "probability": 0.89990234375}, {"start": 1413.39, "end": 1413.61, "word": " or", "probability": 0.95703125}, {"start": 1413.61, "end": 1413.97, "word": " no", "probability": 0.94970703125}, {"start": 1413.97, "end": 1415.07, "word": " males", "probability": 0.810546875}, {"start": 1415.07, "end": 1415.39, "word": " females", "probability": 0.70654296875}, {"start": 1415.39, "end": 1415.71, "word": " and", "probability": 0.92822265625}, {"start": 1415.71, "end": 1415.93, "word": " so", "probability": 0.95654296875}, {"start": 1415.93, "end": 1416.17, "word": " on", "probability": 0.951171875}, {"start": 1416.17, "end": 1416.91, "word": " so", "probability": 0.591796875}, {"start": 1416.91, "end": 1417.33, "word": " two", "probability": 0.88134765625}, {"start": 1417.33, "end": 1417.71, "word": " possible", "probability": 0.96240234375}, {"start": 1417.71, "end": 1418.27, "word": " outcomes", "probability": 0.83203125}, {"start": 1418.27, "end": 1418.97, "word": " possesses", "probability": 0.64111328125}, {"start": 1418.97, "end": 1419.41, "word": " characteristic", "probability": 0.76708984375}, {"start": 1419.41, "end": 1420.07, "word": " of", "probability": 0.96826171875}, {"start": 1420.07, "end": 1420.97, "word": " interest", "probability": 0.79833984375}, {"start": 1420.97, "end": 1421.27, "word": " so", "probability": 0.7822265625}, {"start": 1421.27, "end": 1421.65, "word": " suppose", "probability": 0.94091796875}, {"start": 1421.65, "end": 1421.77, "word": " i", "probability": 0.6318359375}, {"start": 1421.77, "end": 1421.91, "word": " am", "probability": 0.67529296875}, {"start": 1421.91, "end": 1422.27, "word": " interested", "probability": 0.499755859375}, {"start": 1422.27, "end": 1422.85, "word": " in", "probability": 0.94970703125}, {"start": 1422.85, "end": 1423.75, "word": " number", "probability": 0.90576171875}, {"start": 1423.75, "end": 1424.67, "word": " of", "probability": 0.97412109375}, {"start": 1424.67, "end": 1425.43, "word": " defective", "probability": 0.984619140625}, {"start": 1425.43, "end": 1426.13, "word": " items", "probability": 0.82861328125}], "temperature": 1.0}, {"id": 55, "seek": 145572, "start": 1426.98, "end": 1455.72, "text": " that is produced by a firm. So my interest here in this case is number of defective items. So number of defective items is your x. So for example, suppose the firm introduces 1,000 items, and we found that 10 of them are defective. So the proportion of defective in this case is x over n.", "tokens": [300, 307, 7126, 538, 257, 6174, 13, 407, 452, 1179, 510, 294, 341, 1389, 307, 1230, 295, 16445, 488, 4754, 13, 407, 1230, 295, 16445, 488, 4754, 307, 428, 2031, 13, 407, 337, 1365, 11, 7297, 264, 6174, 31472, 502, 11, 1360, 4754, 11, 293, 321, 1352, 300, 1266, 295, 552, 366, 16445, 488, 13, 407, 264, 16068, 295, 16445, 488, 294, 341, 1389, 307, 2031, 670, 297, 13], "avg_logprob": -0.16964286076171056, "compression_ratio": 1.7515151515151515, "no_speech_prob": 0.0, "words": [{"start": 1426.98, "end": 1427.38, "word": " that", "probability": 0.4560546875}, {"start": 1427.38, "end": 1427.58, "word": " is", "probability": 0.931640625}, {"start": 1427.58, "end": 1427.98, "word": " produced", "probability": 0.87451171875}, {"start": 1427.98, "end": 1428.38, "word": " by", "probability": 0.97119140625}, {"start": 1428.38, "end": 1428.76, "word": " a", "probability": 0.939453125}, {"start": 1428.76, "end": 1428.8, "word": " firm.", "probability": 0.6396484375}, {"start": 1429.06, "end": 1429.18, "word": " So", "probability": 0.751953125}, {"start": 1429.18, "end": 1430.46, "word": " my", "probability": 0.2476806640625}, {"start": 1430.46, "end": 1431.04, "word": " interest", "probability": 0.912109375}, {"start": 1431.04, "end": 1431.4, "word": " here", "probability": 0.68212890625}, {"start": 1431.4, "end": 1431.74, "word": " in", "probability": 0.76611328125}, {"start": 1431.74, "end": 1431.94, "word": " this", "probability": 0.94580078125}, {"start": 1431.94, "end": 1432.18, "word": " case", "probability": 0.90625}, {"start": 1432.18, "end": 1432.32, "word": " is", "probability": 0.873046875}, {"start": 1432.32, "end": 1432.54, "word": " number", "probability": 0.78759765625}, {"start": 1432.54, "end": 1432.74, "word": " of", "probability": 0.96728515625}, {"start": 1432.74, "end": 1433.22, "word": " defective", "probability": 0.972412109375}, {"start": 1433.22, "end": 1433.62, "word": " items.", "probability": 0.8359375}, {"start": 1434.8, "end": 1435.48, "word": " So", "probability": 0.9453125}, {"start": 1435.48, "end": 1435.8, "word": " number", "probability": 0.7119140625}, {"start": 1435.8, "end": 1436.02, "word": " of", "probability": 0.96728515625}, {"start": 1436.02, "end": 1436.46, "word": " defective", "probability": 0.96240234375}, {"start": 1436.46, "end": 1437.04, "word": " items", "probability": 0.84814453125}, {"start": 1437.04, "end": 1437.68, "word": " is", "probability": 0.939453125}, {"start": 1437.68, "end": 1437.9, "word": " your", "probability": 0.89501953125}, {"start": 1437.9, "end": 1438.2, "word": " x.", "probability": 0.6240234375}, {"start": 1439.28, "end": 1439.56, "word": " So", "probability": 0.6787109375}, {"start": 1439.56, "end": 1440.76, "word": " for", "probability": 0.74658203125}, {"start": 1440.76, "end": 1441.08, "word": " example,", "probability": 0.97509765625}, {"start": 1441.16, "end": 1441.62, "word": " suppose", "probability": 0.91357421875}, {"start": 1441.62, "end": 1442.48, "word": " the", "probability": 0.8974609375}, {"start": 1442.48, "end": 1442.74, "word": " firm", "probability": 0.880859375}, {"start": 1442.74, "end": 1444.3, "word": " introduces", "probability": 0.87646484375}, {"start": 1444.3, "end": 1445.22, "word": " 1", "probability": 0.88427734375}, {"start": 1445.22, "end": 1445.68, "word": ",000", "probability": 0.9970703125}, {"start": 1445.68, "end": 1446.14, "word": " items,", "probability": 0.68505859375}, {"start": 1446.94, "end": 1447.74, "word": " and", "probability": 0.931640625}, {"start": 1447.74, "end": 1447.94, "word": " we", "probability": 0.95654296875}, {"start": 1447.94, "end": 1448.24, "word": " found", "probability": 0.89013671875}, {"start": 1448.24, "end": 1448.66, "word": " that", "probability": 0.912109375}, {"start": 1448.66, "end": 1449.8, "word": " 10", "probability": 0.87158203125}, {"start": 1449.8, "end": 1449.98, "word": " of", "probability": 0.97265625}, {"start": 1449.98, "end": 1450.54, "word": " them", "probability": 0.89306640625}, {"start": 1450.54, "end": 1451.74, "word": " are", "probability": 0.91650390625}, {"start": 1451.74, "end": 1452.4, "word": " defective.", "probability": 0.967529296875}, {"start": 1452.66, "end": 1452.98, "word": " So", "probability": 0.95849609375}, {"start": 1452.98, "end": 1453.14, "word": " the", "probability": 0.91943359375}, {"start": 1453.14, "end": 1453.6, "word": " proportion", "probability": 0.8271484375}, {"start": 1453.6, "end": 1453.8, "word": " of", "probability": 0.939453125}, {"start": 1453.8, "end": 1454.26, "word": " defective", "probability": 0.83349609375}, {"start": 1454.26, "end": 1454.38, "word": " in", "probability": 0.8759765625}, {"start": 1454.38, "end": 1454.58, "word": " this", "probability": 0.94384765625}, {"start": 1454.58, "end": 1454.86, "word": " case", "probability": 0.904296875}, {"start": 1454.86, "end": 1455.12, "word": " is", "probability": 0.92138671875}, {"start": 1455.12, "end": 1455.32, "word": " x", "probability": 0.9697265625}, {"start": 1455.32, "end": 1455.48, "word": " over", "probability": 0.822265625}, {"start": 1455.48, "end": 1455.72, "word": " n.", "probability": 0.541015625}], "temperature": 1.0}, {"id": 56, "seek": 148343, "start": 1456.55, "end": 1483.43, "text": " And we are interested in the population proportion. As we mentioned before, it's pi. So pi is the population proportion. And actually, this p or pi ranges between 0 and 1. So it never exceeds 1 or below 0. So again, this slide actually is the same as we have discussed in Chapter 8.", "tokens": [400, 321, 366, 3102, 294, 264, 4415, 16068, 13, 1018, 321, 2835, 949, 11, 309, 311, 3895, 13, 407, 3895, 307, 264, 4415, 16068, 13, 400, 767, 11, 341, 280, 420, 3895, 22526, 1296, 1958, 293, 502, 13, 407, 309, 1128, 43305, 502, 420, 2507, 1958, 13, 407, 797, 11, 341, 4137, 767, 307, 264, 912, 382, 321, 362, 7152, 294, 18874, 1649, 13], "avg_logprob": -0.18629807692307693, "compression_ratio": 1.5898876404494382, "no_speech_prob": 0.0, "words": [{"start": 1456.55, "end": 1456.83, "word": " And", "probability": 0.77294921875}, {"start": 1456.83, "end": 1456.93, "word": " we", "probability": 0.9306640625}, {"start": 1456.93, "end": 1457.13, "word": " are", "probability": 0.92724609375}, {"start": 1457.13, "end": 1457.75, "word": " interested", "probability": 0.873046875}, {"start": 1457.75, "end": 1458.01, "word": " in", "probability": 0.94775390625}, {"start": 1458.01, "end": 1458.11, "word": " the", "probability": 0.9013671875}, {"start": 1458.11, "end": 1458.57, "word": " population", "probability": 0.9248046875}, {"start": 1458.57, "end": 1459.11, "word": " proportion.", "probability": 0.79833984375}, {"start": 1459.97, "end": 1460.57, "word": " As", "probability": 0.951171875}, {"start": 1460.57, "end": 1460.73, "word": " we", "probability": 0.955078125}, {"start": 1460.73, "end": 1461.05, "word": " mentioned", "probability": 0.82568359375}, {"start": 1461.05, "end": 1461.45, "word": " before,", "probability": 0.77880859375}, {"start": 1461.57, "end": 1461.61, "word": " it's", "probability": 0.8994140625}, {"start": 1461.61, "end": 1461.93, "word": " pi.", "probability": 0.478515625}, {"start": 1462.33, "end": 1462.57, "word": " So", "probability": 0.93017578125}, {"start": 1462.57, "end": 1462.87, "word": " pi", "probability": 0.73046875}, {"start": 1462.87, "end": 1463.95, "word": " is", "probability": 0.93408203125}, {"start": 1463.95, "end": 1464.21, "word": " the", "probability": 0.92041015625}, {"start": 1464.21, "end": 1464.95, "word": " population", "probability": 0.9365234375}, {"start": 1464.95, "end": 1466.21, "word": " proportion.", "probability": 0.7802734375}, {"start": 1466.95, "end": 1467.19, "word": " And", "probability": 0.95166015625}, {"start": 1467.19, "end": 1467.65, "word": " actually,", "probability": 0.87158203125}, {"start": 1468.05, "end": 1468.55, "word": " this", "probability": 0.93701171875}, {"start": 1468.55, "end": 1468.85, "word": " p", "probability": 0.4140625}, {"start": 1468.85, "end": 1469.01, "word": " or", "probability": 0.82421875}, {"start": 1469.01, "end": 1469.45, "word": " pi", "probability": 0.958984375}, {"start": 1469.45, "end": 1470.89, "word": " ranges", "probability": 0.908203125}, {"start": 1470.89, "end": 1471.53, "word": " between", "probability": 0.86572265625}, {"start": 1471.53, "end": 1472.85, "word": " 0", "probability": 0.7587890625}, {"start": 1472.85, "end": 1472.99, "word": " and", "probability": 0.923828125}, {"start": 1472.99, "end": 1473.15, "word": " 1.", "probability": 0.98486328125}, {"start": 1473.23, "end": 1473.31, "word": " So", "probability": 0.93798828125}, {"start": 1473.31, "end": 1473.47, "word": " it", "probability": 0.734375}, {"start": 1473.47, "end": 1473.63, "word": " never", "probability": 0.50048828125}, {"start": 1473.63, "end": 1474.09, "word": " exceeds", "probability": 0.8544921875}, {"start": 1474.09, "end": 1474.39, "word": " 1", "probability": 0.9140625}, {"start": 1474.39, "end": 1474.69, "word": " or", "probability": 0.91552734375}, {"start": 1474.69, "end": 1475.03, "word": " below", "probability": 0.9326171875}, {"start": 1475.03, "end": 1476.47, "word": " 0.", "probability": 0.9443359375}, {"start": 1477.83, "end": 1478.31, "word": " So", "probability": 0.95751953125}, {"start": 1478.31, "end": 1478.57, "word": " again,", "probability": 0.91943359375}, {"start": 1478.81, "end": 1480.19, "word": " this", "probability": 0.94873046875}, {"start": 1480.19, "end": 1480.65, "word": " slide", "probability": 0.95458984375}, {"start": 1480.65, "end": 1481.21, "word": " actually", "probability": 0.81787109375}, {"start": 1481.21, "end": 1481.41, "word": " is", "probability": 0.9248046875}, {"start": 1481.41, "end": 1481.57, "word": " the", "probability": 0.9189453125}, {"start": 1481.57, "end": 1481.73, "word": " same", "probability": 0.9111328125}, {"start": 1481.73, "end": 1481.89, "word": " as", "probability": 0.96435546875}, {"start": 1481.89, "end": 1482.03, "word": " we", "probability": 0.9638671875}, {"start": 1482.03, "end": 1482.23, "word": " have", "probability": 0.921875}, {"start": 1482.23, "end": 1482.73, "word": " discussed", "probability": 0.90576171875}, {"start": 1482.73, "end": 1482.95, "word": " in", "probability": 0.94189453125}, {"start": 1482.95, "end": 1483.17, "word": " Chapter", "probability": 0.4248046875}, {"start": 1483.17, "end": 1483.43, "word": " 8.", "probability": 0.90478515625}], "temperature": 1.0}, {"id": 57, "seek": 151453, "start": 1485.81, "end": 1514.53, "text": " The sample proportion is P. P is X over N. And X is the number in category of interest in sample. N is the sample size. And we know that if the two conditions, N pi and N times 1 minus pi are at least 5, then P can be approximated by normal distribution with mean equal pi.", "tokens": [440, 6889, 16068, 307, 430, 13, 430, 307, 1783, 670, 426, 13, 400, 1783, 307, 264, 1230, 294, 7719, 295, 1179, 294, 6889, 13, 426, 307, 264, 6889, 2744, 13, 400, 321, 458, 300, 498, 264, 732, 4487, 11, 426, 3895, 293, 426, 1413, 502, 3175, 3895, 366, 412, 1935, 1025, 11, 550, 430, 393, 312, 8542, 770, 538, 2710, 7316, 365, 914, 2681, 3895, 13], "avg_logprob": -0.19741138682436588, "compression_ratio": 1.4497354497354498, "no_speech_prob": 0.0, "words": [{"start": 1485.8100000000002, "end": 1486.41, "word": " The", "probability": 0.53955078125}, {"start": 1486.41, "end": 1486.87, "word": " sample", "probability": 0.6064453125}, {"start": 1486.87, "end": 1487.55, "word": " proportion", "probability": 0.79931640625}, {"start": 1487.55, "end": 1489.35, "word": " is", "probability": 0.8818359375}, {"start": 1489.35, "end": 1489.65, "word": " P.", "probability": 0.6103515625}, {"start": 1489.87, "end": 1490.09, "word": " P", "probability": 0.9892578125}, {"start": 1490.09, "end": 1490.29, "word": " is", "probability": 0.93701171875}, {"start": 1490.29, "end": 1490.55, "word": " X", "probability": 0.7119140625}, {"start": 1490.55, "end": 1490.79, "word": " over", "probability": 0.927734375}, {"start": 1490.79, "end": 1491.05, "word": " N.", "probability": 0.91162109375}, {"start": 1491.65, "end": 1491.99, "word": " And", "probability": 0.90625}, {"start": 1491.99, "end": 1492.21, "word": " X", "probability": 0.9619140625}, {"start": 1492.21, "end": 1492.47, "word": " is", "probability": 0.95068359375}, {"start": 1492.47, "end": 1492.65, "word": " the", "probability": 0.9111328125}, {"start": 1492.65, "end": 1493.05, "word": " number", "probability": 0.92626953125}, {"start": 1493.05, "end": 1494.21, "word": " in", "probability": 0.6220703125}, {"start": 1494.21, "end": 1494.55, "word": " category", "probability": 0.7177734375}, {"start": 1494.55, "end": 1494.77, "word": " of", "probability": 0.94384765625}, {"start": 1494.77, "end": 1495.19, "word": " interest", "probability": 0.873046875}, {"start": 1495.19, "end": 1495.43, "word": " in", "probability": 0.90966796875}, {"start": 1495.43, "end": 1495.77, "word": " sample.", "probability": 0.61328125}, {"start": 1496.73, "end": 1496.97, "word": " N", "probability": 0.96875}, {"start": 1496.97, "end": 1497.15, "word": " is", "probability": 0.947265625}, {"start": 1497.15, "end": 1497.31, "word": " the", "probability": 0.921875}, {"start": 1497.31, "end": 1497.51, "word": " sample", "probability": 0.90234375}, {"start": 1497.51, "end": 1497.97, "word": " size.", "probability": 0.85986328125}, {"start": 1499.91, "end": 1500.51, "word": " And", "probability": 0.927734375}, {"start": 1500.51, "end": 1500.77, "word": " we", "probability": 0.88818359375}, {"start": 1500.77, "end": 1500.89, "word": " know", "probability": 0.8837890625}, {"start": 1500.89, "end": 1501.19, "word": " that", "probability": 0.927734375}, {"start": 1501.19, "end": 1501.93, "word": " if", "probability": 0.880859375}, {"start": 1501.93, "end": 1502.11, "word": " the", "probability": 0.91064453125}, {"start": 1502.11, "end": 1502.31, "word": " two", "probability": 0.8974609375}, {"start": 1502.31, "end": 1502.95, "word": " conditions,", "probability": 0.8935546875}, {"start": 1503.27, "end": 1503.45, "word": " N", "probability": 0.9072265625}, {"start": 1503.45, "end": 1503.79, "word": " pi", "probability": 0.57861328125}, {"start": 1503.79, "end": 1505.31, "word": " and", "probability": 0.8095703125}, {"start": 1505.31, "end": 1505.57, "word": " N", "probability": 0.9853515625}, {"start": 1505.57, "end": 1505.87, "word": " times", "probability": 0.90380859375}, {"start": 1505.87, "end": 1506.11, "word": " 1", "probability": 0.58935546875}, {"start": 1506.11, "end": 1506.43, "word": " minus", "probability": 0.9775390625}, {"start": 1506.43, "end": 1506.87, "word": " pi", "probability": 0.85791015625}, {"start": 1506.87, "end": 1507.69, "word": " are", "probability": 0.68896484375}, {"start": 1507.69, "end": 1507.93, "word": " at", "probability": 0.96826171875}, {"start": 1507.93, "end": 1508.15, "word": " least", "probability": 0.95849609375}, {"start": 1508.15, "end": 1508.67, "word": " 5,", "probability": 0.685546875}, {"start": 1509.37, "end": 1509.71, "word": " then", "probability": 0.84423828125}, {"start": 1509.71, "end": 1509.91, "word": " P", "probability": 0.51416015625}, {"start": 1509.91, "end": 1510.15, "word": " can", "probability": 0.95166015625}, {"start": 1510.15, "end": 1510.31, "word": " be", "probability": 0.9609375}, {"start": 1510.31, "end": 1511.03, "word": " approximated", "probability": 0.96484375}, {"start": 1511.03, "end": 1511.31, "word": " by", "probability": 0.9638671875}, {"start": 1511.31, "end": 1511.67, "word": " normal", "probability": 0.80224609375}, {"start": 1511.67, "end": 1512.41, "word": " distribution", "probability": 0.84521484375}, {"start": 1512.41, "end": 1513.27, "word": " with", "probability": 0.77587890625}, {"start": 1513.27, "end": 1513.57, "word": " mean", "probability": 0.97412109375}, {"start": 1513.57, "end": 1514.03, "word": " equal", "probability": 0.86328125}, {"start": 1514.03, "end": 1514.53, "word": " pi.", "probability": 0.859375}], "temperature": 1.0}, {"id": 58, "seek": 154357, "start": 1515.29, "end": 1543.57, "text": " And standard deviation equals square root of pi 1 minus pi divided by n. So actually, this slide is repeated. So again, your B value, I'm sorry, your proportion is approximately normally distributed of mean equals pi and sigma pi 1 minus pi divided by n, all under the square root.", "tokens": [400, 3832, 25163, 6915, 3732, 5593, 295, 3895, 502, 3175, 3895, 6666, 538, 297, 13, 407, 767, 11, 341, 4137, 307, 10477, 13, 407, 797, 11, 428, 363, 2158, 11, 286, 478, 2597, 11, 428, 16068, 307, 10447, 5646, 12631, 295, 914, 6915, 3895, 293, 12771, 3895, 502, 3175, 3895, 6666, 538, 297, 11, 439, 833, 264, 3732, 5593, 13], "avg_logprob": -0.1926229576595494, "compression_ratio": 1.5842696629213484, "no_speech_prob": 0.0, "words": [{"start": 1515.29, "end": 1515.67, "word": " And", "probability": 0.7431640625}, {"start": 1515.67, "end": 1516.13, "word": " standard", "probability": 0.91650390625}, {"start": 1516.13, "end": 1516.59, "word": " deviation", "probability": 0.89599609375}, {"start": 1516.59, "end": 1517.89, "word": " equals", "probability": 0.86865234375}, {"start": 1517.89, "end": 1518.31, "word": " square", "probability": 0.79541015625}, {"start": 1518.31, "end": 1518.51, "word": " root", "probability": 0.92333984375}, {"start": 1518.51, "end": 1518.67, "word": " of", "probability": 0.96533203125}, {"start": 1518.67, "end": 1518.85, "word": " pi", "probability": 0.68017578125}, {"start": 1518.85, "end": 1519.05, "word": " 1", "probability": 0.55322265625}, {"start": 1519.05, "end": 1519.31, "word": " minus", "probability": 0.94287109375}, {"start": 1519.31, "end": 1519.51, "word": " pi", "probability": 0.87646484375}, {"start": 1519.51, "end": 1519.73, "word": " divided", "probability": 0.77392578125}, {"start": 1519.73, "end": 1519.89, "word": " by", "probability": 0.96337890625}, {"start": 1519.89, "end": 1520.09, "word": " n.", "probability": 0.73291015625}, {"start": 1520.79, "end": 1521.51, "word": " So", "probability": 0.93603515625}, {"start": 1521.51, "end": 1522.07, "word": " actually,", "probability": 0.7802734375}, {"start": 1522.23, "end": 1522.53, "word": " this", "probability": 0.9453125}, {"start": 1522.53, "end": 1522.81, "word": " slide", "probability": 0.89990234375}, {"start": 1522.81, "end": 1522.95, "word": " is", "probability": 0.94091796875}, {"start": 1522.95, "end": 1523.29, "word": " repeated.", "probability": 0.96630859375}, {"start": 1524.45, "end": 1524.95, "word": " So", "probability": 0.86572265625}, {"start": 1524.95, "end": 1525.31, "word": " again,", "probability": 0.921875}, {"start": 1525.89, "end": 1526.13, "word": " your", "probability": 0.8681640625}, {"start": 1526.13, "end": 1526.31, "word": " B", "probability": 0.284912109375}, {"start": 1526.31, "end": 1526.67, "word": " value,", "probability": 0.88720703125}, {"start": 1526.77, "end": 1526.91, "word": " I'm", "probability": 0.899169921875}, {"start": 1526.91, "end": 1527.11, "word": " sorry,", "probability": 0.85693359375}, {"start": 1527.23, "end": 1527.49, "word": " your", "probability": 0.87353515625}, {"start": 1527.49, "end": 1528.23, "word": " proportion", "probability": 0.7998046875}, {"start": 1528.23, "end": 1530.41, "word": " is", "probability": 0.81396484375}, {"start": 1530.41, "end": 1531.15, "word": " approximately", "probability": 0.87451171875}, {"start": 1531.15, "end": 1532.91, "word": " normally", "probability": 0.39697265625}, {"start": 1532.91, "end": 1533.57, "word": " distributed", "probability": 0.9189453125}, {"start": 1533.57, "end": 1534.95, "word": " of", "probability": 0.5830078125}, {"start": 1534.95, "end": 1535.25, "word": " mean", "probability": 0.90576171875}, {"start": 1535.25, "end": 1535.75, "word": " equals", "probability": 0.93896484375}, {"start": 1535.75, "end": 1536.19, "word": " pi", "probability": 0.91162109375}, {"start": 1536.19, "end": 1537.79, "word": " and", "probability": 0.79345703125}, {"start": 1537.79, "end": 1538.25, "word": " sigma", "probability": 0.9091796875}, {"start": 1538.25, "end": 1539.89, "word": " pi", "probability": 0.74609375}, {"start": 1539.89, "end": 1540.37, "word": " 1", "probability": 0.9443359375}, {"start": 1540.37, "end": 1540.73, "word": " minus", "probability": 0.98388671875}, {"start": 1540.73, "end": 1541.11, "word": " pi", "probability": 0.9189453125}, {"start": 1541.11, "end": 1541.49, "word": " divided", "probability": 0.84326171875}, {"start": 1541.49, "end": 1541.69, "word": " by", "probability": 0.96533203125}, {"start": 1541.69, "end": 1542.01, "word": " n,", "probability": 0.96826171875}, {"start": 1542.39, "end": 1542.81, "word": " all", "probability": 0.93505859375}, {"start": 1542.81, "end": 1543.07, "word": " under", "probability": 0.7802734375}, {"start": 1543.07, "end": 1543.19, "word": " the", "probability": 0.51904296875}, {"start": 1543.19, "end": 1543.37, "word": " square", "probability": 0.89501953125}, {"start": 1543.37, "end": 1543.57, "word": " root.", "probability": 0.92529296875}], "temperature": 1.0}, {"id": 59, "seek": 156932, "start": 1544.94, "end": 1569.32, "text": " Now, based on this sampling distribution of P, what's your z-statistic? As we mentioned in chapter 6, the standard formula for z-statistic is x times mean of x divided by sigma of x. This is the standard formula for the z-score.", "tokens": [823, 11, 2361, 322, 341, 21179, 7316, 295, 430, 11, 437, 311, 428, 710, 12, 19435, 3142, 30, 1018, 321, 2835, 294, 7187, 1386, 11, 264, 3832, 8513, 337, 710, 12, 19435, 3142, 307, 2031, 1413, 914, 295, 2031, 6666, 538, 12771, 295, 2031, 13, 639, 307, 264, 3832, 8513, 337, 264, 710, 12, 4417, 418, 13], "avg_logprob": -0.20420258338081426, "compression_ratio": 1.4679487179487178, "no_speech_prob": 0.0, "words": [{"start": 1544.94, "end": 1545.34, "word": " Now,", "probability": 0.77197265625}, {"start": 1545.8, "end": 1546.26, "word": " based", "probability": 0.91064453125}, {"start": 1546.26, "end": 1546.5, "word": " on", "probability": 0.951171875}, {"start": 1546.5, "end": 1546.96, "word": " this", "probability": 0.912109375}, {"start": 1546.96, "end": 1547.94, "word": " sampling", "probability": 0.353759765625}, {"start": 1547.94, "end": 1548.6, "word": " distribution", "probability": 0.8310546875}, {"start": 1548.6, "end": 1548.88, "word": " of", "probability": 0.9423828125}, {"start": 1548.88, "end": 1549.14, "word": " P,", "probability": 0.60205078125}, {"start": 1549.98, "end": 1550.6, "word": " what's", "probability": 0.8564453125}, {"start": 1550.6, "end": 1550.76, "word": " your", "probability": 0.91064453125}, {"start": 1550.76, "end": 1550.9, "word": " z", "probability": 0.40771484375}, {"start": 1550.9, "end": 1551.44, "word": "-statistic?", "probability": 0.791259765625}, {"start": 1553.42, "end": 1554.22, "word": " As", "probability": 0.947265625}, {"start": 1554.22, "end": 1554.38, "word": " we", "probability": 0.9384765625}, {"start": 1554.38, "end": 1554.74, "word": " mentioned", "probability": 0.826171875}, {"start": 1554.74, "end": 1554.98, "word": " in", "probability": 0.93212890625}, {"start": 1554.98, "end": 1555.22, "word": " chapter", "probability": 0.5478515625}, {"start": 1555.22, "end": 1555.72, "word": " 6,", "probability": 0.73486328125}, {"start": 1556.66, "end": 1556.98, "word": " the", "probability": 0.89453125}, {"start": 1556.98, "end": 1557.62, "word": " standard", "probability": 0.9248046875}, {"start": 1557.62, "end": 1558.12, "word": " formula", "probability": 0.90771484375}, {"start": 1558.12, "end": 1558.42, "word": " for", "probability": 0.9443359375}, {"start": 1558.42, "end": 1558.68, "word": " z", "probability": 0.5830078125}, {"start": 1558.68, "end": 1559.3, "word": "-statistic", "probability": 0.939453125}, {"start": 1559.3, "end": 1561.34, "word": " is", "probability": 0.88916015625}, {"start": 1561.34, "end": 1561.76, "word": " x", "probability": 0.72314453125}, {"start": 1561.76, "end": 1562.66, "word": " times", "probability": 0.328125}, {"start": 1562.66, "end": 1563.12, "word": " mean", "probability": 0.62841796875}, {"start": 1563.12, "end": 1563.8, "word": " of", "probability": 0.95947265625}, {"start": 1563.8, "end": 1564.24, "word": " x", "probability": 0.95654296875}, {"start": 1564.24, "end": 1565.4, "word": " divided", "probability": 0.5830078125}, {"start": 1565.4, "end": 1565.8, "word": " by", "probability": 0.97998046875}, {"start": 1565.8, "end": 1566.14, "word": " sigma", "probability": 0.8994140625}, {"start": 1566.14, "end": 1566.3, "word": " of", "probability": 0.8759765625}, {"start": 1566.3, "end": 1566.62, "word": " x.", "probability": 0.9892578125}, {"start": 1566.94, "end": 1567.18, "word": " This", "probability": 0.84814453125}, {"start": 1567.18, "end": 1567.26, "word": " is", "probability": 0.94775390625}, {"start": 1567.26, "end": 1567.46, "word": " the", "probability": 0.9140625}, {"start": 1567.46, "end": 1567.86, "word": " standard", "probability": 0.9384765625}, {"start": 1567.86, "end": 1568.44, "word": " formula", "probability": 0.90283203125}, {"start": 1568.44, "end": 1568.7, "word": " for", "probability": 0.94091796875}, {"start": 1568.7, "end": 1568.86, "word": " the", "probability": 0.8779296875}, {"start": 1568.86, "end": 1569.0, "word": " z", "probability": 0.98291015625}, {"start": 1569.0, "end": 1569.32, "word": "-score.", "probability": 0.8556315104166666}], "temperature": 1.0}, {"id": 60, "seek": 159891, "start": 1571.43, "end": 1598.91, "text": " In chapter 8, there are two cases. One is the mean as we mentioned. So in this case, if we are talking about the mean, we should have x bar minus the mean of x bar divided by sigma of x bar. So it's similar to this one. But here, we replace x by x bar. From chapter 8, we know that the mean of x bar is mu. And sigma of x bar is sigma of the root n.", "tokens": [682, 7187, 1649, 11, 456, 366, 732, 3331, 13, 1485, 307, 264, 914, 382, 321, 2835, 13, 407, 294, 341, 1389, 11, 498, 321, 366, 1417, 466, 264, 914, 11, 321, 820, 362, 2031, 2159, 3175, 264, 914, 295, 2031, 2159, 6666, 538, 12771, 295, 2031, 2159, 13, 407, 309, 311, 2531, 281, 341, 472, 13, 583, 510, 11, 321, 7406, 2031, 538, 2031, 2159, 13, 3358, 7187, 1649, 11, 321, 458, 300, 264, 914, 295, 2031, 2159, 307, 2992, 13, 400, 12771, 295, 2031, 2159, 307, 12771, 295, 264, 5593, 297, 13], "avg_logprob": -0.18716755160625945, "compression_ratio": 1.7676767676767677, "no_speech_prob": 0.0, "words": [{"start": 1571.43, "end": 1571.63, "word": " In", "probability": 0.61865234375}, {"start": 1571.63, "end": 1571.89, "word": " chapter", "probability": 0.56494140625}, {"start": 1571.89, "end": 1572.21, "word": " 8,", "probability": 0.68359375}, {"start": 1572.39, "end": 1572.55, "word": " there", "probability": 0.8701171875}, {"start": 1572.55, "end": 1572.67, "word": " are", "probability": 0.93310546875}, {"start": 1572.67, "end": 1572.85, "word": " two", "probability": 0.73828125}, {"start": 1572.85, "end": 1573.35, "word": " cases.", "probability": 0.90087890625}, {"start": 1573.73, "end": 1574.03, "word": " One", "probability": 0.85205078125}, {"start": 1574.03, "end": 1574.29, "word": " is", "probability": 0.89990234375}, {"start": 1574.29, "end": 1574.47, "word": " the", "probability": 0.87841796875}, {"start": 1574.47, "end": 1574.59, "word": " mean", "probability": 0.90576171875}, {"start": 1574.59, "end": 1574.77, "word": " as", "probability": 0.62744140625}, {"start": 1574.77, "end": 1574.89, "word": " we", "probability": 0.90771484375}, {"start": 1574.89, "end": 1575.27, "word": " mentioned.", "probability": 0.8056640625}, {"start": 1576.53, "end": 1576.87, "word": " So", "probability": 0.822265625}, {"start": 1576.87, "end": 1577.11, "word": " in", "probability": 0.6552734375}, {"start": 1577.11, "end": 1577.31, "word": " this", "probability": 0.9404296875}, {"start": 1577.31, "end": 1577.61, "word": " case,", "probability": 0.91845703125}, {"start": 1577.67, "end": 1577.79, "word": " if", "probability": 0.8984375}, {"start": 1577.79, "end": 1577.89, "word": " we", "probability": 0.77099609375}, {"start": 1577.89, "end": 1577.99, "word": " are", "probability": 0.81787109375}, {"start": 1577.99, "end": 1578.33, "word": " talking", "probability": 0.84326171875}, {"start": 1578.33, "end": 1578.63, "word": " about", "probability": 0.90673828125}, {"start": 1578.63, "end": 1578.83, "word": " the", "probability": 0.89892578125}, {"start": 1578.83, "end": 1579.05, "word": " mean,", "probability": 0.94921875}, {"start": 1579.81, "end": 1580.77, "word": " we", "probability": 0.662109375}, {"start": 1580.77, "end": 1581.01, "word": " should", "probability": 0.94921875}, {"start": 1581.01, "end": 1581.37, "word": " have", "probability": 0.9443359375}, {"start": 1581.37, "end": 1582.29, "word": " x", "probability": 0.60107421875}, {"start": 1582.29, "end": 1582.59, "word": " bar", "probability": 0.6943359375}, {"start": 1582.59, "end": 1582.93, "word": " minus", "probability": 0.91650390625}, {"start": 1582.93, "end": 1583.09, "word": " the", "probability": 0.88134765625}, {"start": 1583.09, "end": 1583.21, "word": " mean", "probability": 0.96142578125}, {"start": 1583.21, "end": 1583.33, "word": " of", "probability": 0.93701171875}, {"start": 1583.33, "end": 1583.49, "word": " x", "probability": 0.98828125}, {"start": 1583.49, "end": 1583.69, "word": " bar", "probability": 0.9453125}, {"start": 1583.69, "end": 1583.95, "word": " divided", "probability": 0.76953125}, {"start": 1583.95, "end": 1584.09, "word": " by", "probability": 0.974609375}, {"start": 1584.09, "end": 1584.37, "word": " sigma", "probability": 0.60498046875}, {"start": 1584.37, "end": 1584.53, "word": " of", "probability": 0.89111328125}, {"start": 1584.53, "end": 1584.67, "word": " x", "probability": 0.99365234375}, {"start": 1584.67, "end": 1584.93, "word": " bar.", "probability": 0.9482421875}, {"start": 1585.07, "end": 1585.17, "word": " So", "probability": 0.5517578125}, {"start": 1585.17, "end": 1585.29, "word": " it's", "probability": 0.85400390625}, {"start": 1585.29, "end": 1585.57, "word": " similar", "probability": 0.96484375}, {"start": 1585.57, "end": 1585.77, "word": " to", "probability": 0.9619140625}, {"start": 1585.77, "end": 1585.95, "word": " this", "probability": 0.94580078125}, {"start": 1585.95, "end": 1586.25, "word": " one.", "probability": 0.9228515625}, {"start": 1586.81, "end": 1587.07, "word": " But", "probability": 0.93408203125}, {"start": 1587.07, "end": 1587.25, "word": " here,", "probability": 0.85693359375}, {"start": 1587.31, "end": 1587.43, "word": " we", "probability": 0.95458984375}, {"start": 1587.43, "end": 1587.99, "word": " replace", "probability": 0.87890625}, {"start": 1587.99, "end": 1588.65, "word": " x", "probability": 0.994140625}, {"start": 1588.65, "end": 1589.71, "word": " by", "probability": 0.96630859375}, {"start": 1589.71, "end": 1589.97, "word": " x", "probability": 0.998046875}, {"start": 1589.97, "end": 1590.21, "word": " bar.", "probability": 0.9521484375}, {"start": 1591.13, "end": 1591.57, "word": " From", "probability": 0.7001953125}, {"start": 1591.57, "end": 1591.99, "word": " chapter", "probability": 0.85693359375}, {"start": 1591.99, "end": 1592.35, "word": " 8,", "probability": 0.97607421875}, {"start": 1592.49, "end": 1592.67, "word": " we", "probability": 0.93896484375}, {"start": 1592.67, "end": 1592.83, "word": " know", "probability": 0.88232421875}, {"start": 1592.83, "end": 1593.11, "word": " that", "probability": 0.9287109375}, {"start": 1593.11, "end": 1593.39, "word": " the", "probability": 0.9287109375}, {"start": 1593.39, "end": 1593.53, "word": " mean", "probability": 0.9677734375}, {"start": 1593.53, "end": 1593.67, "word": " of", "probability": 0.96484375}, {"start": 1593.67, "end": 1593.83, "word": " x", "probability": 0.99609375}, {"start": 1593.83, "end": 1594.01, "word": " bar", "probability": 0.955078125}, {"start": 1594.01, "end": 1594.19, "word": " is", "probability": 0.94873046875}, {"start": 1594.19, "end": 1594.41, "word": " mu.", "probability": 0.82763671875}, {"start": 1595.37, "end": 1595.81, "word": " And", "probability": 0.95263671875}, {"start": 1595.81, "end": 1596.19, "word": " sigma", "probability": 0.9052734375}, {"start": 1596.19, "end": 1596.37, "word": " of", "probability": 0.86962890625}, {"start": 1596.37, "end": 1596.57, "word": " x", "probability": 0.9990234375}, {"start": 1596.57, "end": 1596.87, "word": " bar", "probability": 0.951171875}, {"start": 1596.87, "end": 1598.03, "word": " is", "probability": 0.59619140625}, {"start": 1598.03, "end": 1598.29, "word": " sigma", "probability": 0.90234375}, {"start": 1598.29, "end": 1598.45, "word": " of", "probability": 0.486328125}, {"start": 1598.45, "end": 1598.51, "word": " the", "probability": 0.6884765625}, {"start": 1598.51, "end": 1598.69, "word": " root", "probability": 0.95458984375}, {"start": 1598.69, "end": 1598.91, "word": " n.", "probability": 0.8095703125}], "temperature": 1.0}, {"id": 61, "seek": 162889, "start": 1600.47, "end": 1628.89, "text": " Now, for this proportion, use this statistic is again replace x by p minus mu minus p divided by sigma of p. So p minus. The mean of p is pi. So this is pi. Divided by sigma root pi 1 minus pi divided by.", "tokens": [823, 11, 337, 341, 16068, 11, 764, 341, 29588, 307, 797, 7406, 2031, 538, 280, 3175, 2992, 3175, 280, 6666, 538, 12771, 295, 280, 13, 407, 280, 3175, 13, 440, 914, 295, 280, 307, 3895, 13, 407, 341, 307, 3895, 13, 413, 1843, 292, 538, 12771, 5593, 3895, 502, 3175, 3895, 6666, 538, 13], "avg_logprob": -0.2934659015048634, "compression_ratio": 1.553030303030303, "no_speech_prob": 0.0, "words": [{"start": 1600.47, "end": 1600.89, "word": " Now,", "probability": 0.84765625}, {"start": 1601.15, "end": 1601.39, "word": " for", "probability": 0.94921875}, {"start": 1601.39, "end": 1601.73, "word": " this", "probability": 0.94580078125}, {"start": 1601.73, "end": 1603.17, "word": " proportion,", "probability": 0.7333984375}, {"start": 1604.45, "end": 1604.67, "word": " use", "probability": 0.173828125}, {"start": 1604.67, "end": 1604.89, "word": " this", "probability": 0.88232421875}, {"start": 1604.89, "end": 1605.51, "word": " statistic", "probability": 0.849609375}, {"start": 1605.51, "end": 1606.21, "word": " is", "probability": 0.258544921875}, {"start": 1606.21, "end": 1606.59, "word": " again", "probability": 0.8466796875}, {"start": 1606.59, "end": 1607.63, "word": " replace", "probability": 0.60888671875}, {"start": 1607.63, "end": 1608.43, "word": " x", "probability": 0.6328125}, {"start": 1608.43, "end": 1608.71, "word": " by", "probability": 0.81298828125}, {"start": 1608.71, "end": 1608.97, "word": " p", "probability": 0.734375}, {"start": 1608.97, "end": 1610.35, "word": " minus", "probability": 0.73681640625}, {"start": 1610.35, "end": 1611.23, "word": " mu", "probability": 0.77587890625}, {"start": 1611.23, "end": 1611.57, "word": " minus", "probability": 0.98583984375}, {"start": 1611.57, "end": 1611.83, "word": " p", "probability": 0.84033203125}, {"start": 1611.83, "end": 1612.15, "word": " divided", "probability": 0.76513671875}, {"start": 1612.15, "end": 1612.39, "word": " by", "probability": 0.96826171875}, {"start": 1612.39, "end": 1612.69, "word": " sigma", "probability": 0.89990234375}, {"start": 1612.69, "end": 1612.93, "word": " of", "probability": 0.32177734375}, {"start": 1612.93, "end": 1613.17, "word": " p.", "probability": 0.7314453125}, {"start": 1613.97, "end": 1614.41, "word": " So", "probability": 0.7724609375}, {"start": 1614.41, "end": 1614.59, "word": " p", "probability": 0.578125}, {"start": 1614.59, "end": 1615.75, "word": " minus.", "probability": 0.84130859375}, {"start": 1617.49, "end": 1617.73, "word": " The", "probability": 0.78369140625}, {"start": 1617.73, "end": 1617.89, "word": " mean", "probability": 0.83447265625}, {"start": 1617.89, "end": 1618.01, "word": " of", "probability": 0.95947265625}, {"start": 1618.01, "end": 1618.13, "word": " p", "probability": 0.68359375}, {"start": 1618.13, "end": 1618.27, "word": " is", "probability": 0.95068359375}, {"start": 1618.27, "end": 1618.63, "word": " pi.", "probability": 0.87646484375}, {"start": 1619.49, "end": 1619.81, "word": " So", "probability": 0.90283203125}, {"start": 1619.81, "end": 1619.99, "word": " this", "probability": 0.89453125}, {"start": 1619.99, "end": 1620.11, "word": " is", "probability": 0.95361328125}, {"start": 1620.11, "end": 1620.45, "word": " pi.", "probability": 0.94482421875}, {"start": 1621.73, "end": 1622.41, "word": " Divided", "probability": 0.8313802083333334}, {"start": 1622.41, "end": 1622.61, "word": " by", "probability": 0.97021484375}, {"start": 1622.61, "end": 1623.07, "word": " sigma", "probability": 0.92626953125}, {"start": 1623.07, "end": 1624.37, "word": " root", "probability": 0.7802734375}, {"start": 1624.37, "end": 1626.01, "word": " pi", "probability": 0.74560546875}, {"start": 1626.01, "end": 1627.65, "word": " 1", "probability": 0.49462890625}, {"start": 1627.65, "end": 1627.99, "word": " minus", "probability": 0.984375}, {"start": 1627.99, "end": 1628.27, "word": " pi", "probability": 0.9345703125}, {"start": 1628.27, "end": 1628.63, "word": " divided", "probability": 0.8408203125}, {"start": 1628.63, "end": 1628.89, "word": " by.", "probability": 0.92041015625}], "temperature": 1.0}, {"id": 62, "seek": 164372, "start": 1630.58, "end": 1643.72, "text": " So this is your Z statistic. So actually, there is nothing new. We just repeated the concepts from Chapter 8.", "tokens": [407, 341, 307, 428, 1176, 29588, 13, 407, 767, 11, 456, 307, 1825, 777, 13, 492, 445, 10477, 264, 10392, 490, 18874, 1649, 13], "avg_logprob": -0.21984375, "compression_ratio": 1.1, "no_speech_prob": 0.0, "words": [{"start": 1630.58, "end": 1630.86, "word": " So", "probability": 0.79150390625}, {"start": 1630.86, "end": 1631.22, "word": " this", "probability": 0.767578125}, {"start": 1631.22, "end": 1631.36, "word": " is", "probability": 0.92578125}, {"start": 1631.36, "end": 1631.52, "word": " your", "probability": 0.87548828125}, {"start": 1631.52, "end": 1631.7, "word": " Z", "probability": 0.56396484375}, {"start": 1631.7, "end": 1632.12, "word": " statistic.", "probability": 0.53173828125}, {"start": 1633.9, "end": 1634.34, "word": " So", "probability": 0.7353515625}, {"start": 1634.34, "end": 1634.8, "word": " actually,", "probability": 0.82470703125}, {"start": 1635.04, "end": 1635.3, "word": " there", "probability": 0.900390625}, {"start": 1635.3, "end": 1635.44, "word": " is", "probability": 0.91552734375}, {"start": 1635.44, "end": 1635.74, "word": " nothing", "probability": 0.90087890625}, {"start": 1635.74, "end": 1636.1, "word": " new.", "probability": 0.90185546875}, {"start": 1637.24, "end": 1637.5, "word": " We", "probability": 0.9482421875}, {"start": 1637.5, "end": 1637.92, "word": " just", "probability": 0.8984375}, {"start": 1637.92, "end": 1639.04, "word": " repeated", "probability": 0.958984375}, {"start": 1639.04, "end": 1640.38, "word": " the", "probability": 0.9169921875}, {"start": 1640.38, "end": 1641.64, "word": " concepts", "probability": 0.873046875}, {"start": 1641.64, "end": 1642.2, "word": " from", "probability": 0.88427734375}, {"start": 1642.2, "end": 1642.56, "word": " Chapter", "probability": 0.486572265625}, {"start": 1642.56, "end": 1643.72, "word": " 8.", "probability": 0.77587890625}], "temperature": 1.0}, {"id": 63, "seek": 167447, "start": 1646.91, "end": 1674.47, "text": " So this is your Z statistic, P minus Pi divided by root Pi, 1 minus Pi divided by the sample size. That is valid only if the two conditions are satisfied. Which are N times Pi at least 5, and N times 1 minus Pi is at least 5. If at least one of these conditions is not satisfied, then we cannot use the Z statistic.", "tokens": [407, 341, 307, 428, 1176, 29588, 11, 430, 3175, 17741, 6666, 538, 5593, 17741, 11, 502, 3175, 17741, 6666, 538, 264, 6889, 2744, 13, 663, 307, 7363, 787, 498, 264, 732, 4487, 366, 11239, 13, 3013, 366, 426, 1413, 17741, 412, 1935, 1025, 11, 293, 426, 1413, 502, 3175, 17741, 307, 412, 1935, 1025, 13, 759, 412, 1935, 472, 295, 613, 4487, 307, 406, 11239, 11, 550, 321, 2644, 764, 264, 1176, 29588, 13], "avg_logprob": -0.1867708400885264, "compression_ratio": 1.7173913043478262, "no_speech_prob": 0.0, "words": [{"start": 1646.91, "end": 1647.25, "word": " So", "probability": 0.85009765625}, {"start": 1647.25, "end": 1647.53, "word": " this", "probability": 0.85205078125}, {"start": 1647.53, "end": 1647.67, "word": " is", "probability": 0.912109375}, {"start": 1647.67, "end": 1647.83, "word": " your", "probability": 0.87060546875}, {"start": 1647.83, "end": 1648.03, "word": " Z", "probability": 0.391357421875}, {"start": 1648.03, "end": 1648.53, "word": " statistic,", "probability": 0.748046875}, {"start": 1648.85, "end": 1649.05, "word": " P", "probability": 0.642578125}, {"start": 1649.05, "end": 1649.39, "word": " minus", "probability": 0.86474609375}, {"start": 1649.39, "end": 1649.75, "word": " Pi", "probability": 0.49462890625}, {"start": 1649.75, "end": 1650.17, "word": " divided", "probability": 0.69287109375}, {"start": 1650.17, "end": 1650.37, "word": " by", "probability": 0.97021484375}, {"start": 1650.37, "end": 1650.61, "word": " root", "probability": 0.73388671875}, {"start": 1650.61, "end": 1651.07, "word": " Pi,", "probability": 0.7529296875}, {"start": 1651.27, "end": 1651.57, "word": " 1", "probability": 0.72900390625}, {"start": 1651.57, "end": 1651.83, "word": " minus", "probability": 0.986328125}, {"start": 1651.83, "end": 1652.03, "word": " Pi", "probability": 0.96435546875}, {"start": 1652.03, "end": 1652.45, "word": " divided", "probability": 0.79248046875}, {"start": 1652.45, "end": 1652.81, "word": " by", "probability": 0.96435546875}, {"start": 1652.81, "end": 1653.49, "word": " the", "probability": 0.85400390625}, {"start": 1653.49, "end": 1653.73, "word": " sample", "probability": 0.6640625}, {"start": 1653.73, "end": 1654.11, "word": " size.", "probability": 0.86669921875}, {"start": 1654.55, "end": 1654.87, "word": " That", "probability": 0.73046875}, {"start": 1654.87, "end": 1655.87, "word": " is", "probability": 0.6201171875}, {"start": 1655.87, "end": 1656.27, "word": " valid", "probability": 0.95458984375}, {"start": 1656.27, "end": 1656.77, "word": " only", "probability": 0.9091796875}, {"start": 1656.77, "end": 1657.45, "word": " if", "probability": 0.94140625}, {"start": 1657.45, "end": 1657.63, "word": " the", "probability": 0.912109375}, {"start": 1657.63, "end": 1657.83, "word": " two", "probability": 0.90576171875}, {"start": 1657.83, "end": 1658.39, "word": " conditions", "probability": 0.87353515625}, {"start": 1658.39, "end": 1658.65, "word": " are", "probability": 0.93994140625}, {"start": 1658.65, "end": 1659.17, "word": " satisfied.", "probability": 0.87255859375}, {"start": 1659.87, "end": 1660.47, "word": " Which", "probability": 0.4775390625}, {"start": 1660.47, "end": 1660.95, "word": " are", "probability": 0.93310546875}, {"start": 1660.95, "end": 1661.51, "word": " N", "probability": 0.46826171875}, {"start": 1661.51, "end": 1661.79, "word": " times", "probability": 0.92919921875}, {"start": 1661.79, "end": 1662.07, "word": " Pi", "probability": 0.94091796875}, {"start": 1662.07, "end": 1662.25, "word": " at", "probability": 0.6982421875}, {"start": 1662.25, "end": 1662.43, "word": " least", "probability": 0.95703125}, {"start": 1662.43, "end": 1662.81, "word": " 5,", "probability": 0.66015625}, {"start": 1663.09, "end": 1663.51, "word": " and", "probability": 0.93212890625}, {"start": 1663.51, "end": 1663.81, "word": " N", "probability": 0.98583984375}, {"start": 1663.81, "end": 1664.05, "word": " times", "probability": 0.91845703125}, {"start": 1664.05, "end": 1664.25, "word": " 1", "probability": 0.97265625}, {"start": 1664.25, "end": 1664.55, "word": " minus", "probability": 0.98388671875}, {"start": 1664.55, "end": 1664.79, "word": " Pi", "probability": 0.97119140625}, {"start": 1664.79, "end": 1664.95, "word": " is", "probability": 0.8837890625}, {"start": 1664.95, "end": 1665.09, "word": " at", "probability": 0.9677734375}, {"start": 1665.09, "end": 1665.27, "word": " least", "probability": 0.95556640625}, {"start": 1665.27, "end": 1665.53, "word": " 5.", "probability": 0.974609375}, {"start": 1666.41, "end": 1666.79, "word": " If", "probability": 0.9677734375}, {"start": 1666.79, "end": 1668.45, "word": " at", "probability": 0.5751953125}, {"start": 1668.45, "end": 1668.75, "word": " least", "probability": 0.95947265625}, {"start": 1668.75, "end": 1669.01, "word": " one", "probability": 0.9169921875}, {"start": 1669.01, "end": 1669.13, "word": " of", "probability": 0.96533203125}, {"start": 1669.13, "end": 1669.31, "word": " these", "probability": 0.8349609375}, {"start": 1669.31, "end": 1669.87, "word": " conditions", "probability": 0.8818359375}, {"start": 1669.87, "end": 1670.69, "word": " is", "probability": 0.91845703125}, {"start": 1670.69, "end": 1670.91, "word": " not", "probability": 0.939453125}, {"start": 1670.91, "end": 1671.35, "word": " satisfied,", "probability": 0.89306640625}, {"start": 1672.35, "end": 1672.63, "word": " then", "probability": 0.84423828125}, {"start": 1672.63, "end": 1672.81, "word": " we", "probability": 0.95703125}, {"start": 1672.81, "end": 1673.07, "word": " cannot", "probability": 0.89208984375}, {"start": 1673.07, "end": 1673.59, "word": " use", "probability": 0.86376953125}, {"start": 1673.59, "end": 1673.85, "word": " the", "probability": 0.86962890625}, {"start": 1673.85, "end": 1674.07, "word": " Z", "probability": 0.96240234375}, {"start": 1674.07, "end": 1674.47, "word": " statistic.", "probability": 0.57861328125}], "temperature": 1.0}, {"id": 64, "seek": 170324, "start": 1675.48, "end": 1703.24, "text": " So there are two cases. If the two conditions together satisfied, then we have to use this statistic. Otherwise, if one of the conditions is not satisfied, then we cannot use the Z-state that says this case not discussed in this chapter. So that's all for testing about proportion. There is an equivalent form.", "tokens": [407, 456, 366, 732, 3331, 13, 759, 264, 732, 4487, 1214, 11239, 11, 550, 321, 362, 281, 764, 341, 29588, 13, 10328, 11, 498, 472, 295, 264, 4487, 307, 406, 11239, 11, 550, 321, 2644, 764, 264, 1176, 12, 15406, 300, 1619, 341, 1389, 406, 7152, 294, 341, 7187, 13, 407, 300, 311, 439, 337, 4997, 466, 16068, 13, 821, 307, 364, 10344, 1254, 13], "avg_logprob": -0.2017045517762502, "compression_ratio": 1.6455026455026456, "no_speech_prob": 0.0, "words": [{"start": 1675.48, "end": 1675.78, "word": " So", "probability": 0.86083984375}, {"start": 1675.78, "end": 1675.92, "word": " there", "probability": 0.75927734375}, {"start": 1675.92, "end": 1676.06, "word": " are", "probability": 0.9384765625}, {"start": 1676.06, "end": 1676.26, "word": " two", "probability": 0.9033203125}, {"start": 1676.26, "end": 1676.74, "word": " cases.", "probability": 0.931640625}, {"start": 1677.48, "end": 1678.06, "word": " If", "probability": 0.962890625}, {"start": 1678.06, "end": 1678.4, "word": " the", "probability": 0.91357421875}, {"start": 1678.4, "end": 1678.58, "word": " two", "probability": 0.92529296875}, {"start": 1678.58, "end": 1679.18, "word": " conditions", "probability": 0.90625}, {"start": 1679.18, "end": 1679.68, "word": " together", "probability": 0.83837890625}, {"start": 1679.68, "end": 1680.3, "word": " satisfied,", "probability": 0.364990234375}, {"start": 1681.38, "end": 1681.72, "word": " then", "probability": 0.83203125}, {"start": 1681.72, "end": 1681.86, "word": " we", "probability": 0.93896484375}, {"start": 1681.86, "end": 1682.06, "word": " have", "probability": 0.93896484375}, {"start": 1682.06, "end": 1682.18, "word": " to", "probability": 0.96484375}, {"start": 1682.18, "end": 1682.4, "word": " use", "probability": 0.87890625}, {"start": 1682.4, "end": 1682.54, "word": " this", "probability": 0.5810546875}, {"start": 1682.54, "end": 1683.02, "word": " statistic.", "probability": 0.841796875}, {"start": 1683.52, "end": 1684.02, "word": " Otherwise,", "probability": 0.951171875}, {"start": 1684.46, "end": 1684.92, "word": " if", "probability": 0.94873046875}, {"start": 1684.92, "end": 1685.22, "word": " one", "probability": 0.92431640625}, {"start": 1685.22, "end": 1685.34, "word": " of", "probability": 0.966796875}, {"start": 1685.34, "end": 1685.46, "word": " the", "probability": 0.92333984375}, {"start": 1685.46, "end": 1685.96, "word": " conditions", "probability": 0.8798828125}, {"start": 1685.96, "end": 1686.26, "word": " is", "probability": 0.9423828125}, {"start": 1686.26, "end": 1686.44, "word": " not", "probability": 0.9462890625}, {"start": 1686.44, "end": 1686.98, "word": " satisfied,", "probability": 0.90234375}, {"start": 1687.12, "end": 1687.3, "word": " then", "probability": 0.85009765625}, {"start": 1687.3, "end": 1687.46, "word": " we", "probability": 0.95068359375}, {"start": 1687.46, "end": 1687.74, "word": " cannot", "probability": 0.884765625}, {"start": 1687.74, "end": 1688.28, "word": " use", "probability": 0.86279296875}, {"start": 1688.28, "end": 1689.5, "word": " the", "probability": 0.52734375}, {"start": 1689.5, "end": 1689.8, "word": " Z", "probability": 0.3740234375}, {"start": 1689.8, "end": 1690.1, "word": "-state", "probability": 0.546630859375}, {"start": 1690.1, "end": 1690.36, "word": " that", "probability": 0.27734375}, {"start": 1690.36, "end": 1690.64, "word": " says", "probability": 0.8603515625}, {"start": 1690.64, "end": 1692.02, "word": " this", "probability": 0.6181640625}, {"start": 1692.02, "end": 1692.38, "word": " case", "probability": 0.912109375}, {"start": 1692.38, "end": 1692.82, "word": " not", "probability": 0.6796875}, {"start": 1692.82, "end": 1693.42, "word": " discussed", "probability": 0.896484375}, {"start": 1693.42, "end": 1694.26, "word": " in", "probability": 0.935546875}, {"start": 1694.26, "end": 1694.42, "word": " this", "probability": 0.90625}, {"start": 1694.42, "end": 1694.64, "word": " chapter.", "probability": 0.7763671875}, {"start": 1696.52, "end": 1697.04, "word": " So", "probability": 0.92578125}, {"start": 1697.04, "end": 1697.48, "word": " that's", "probability": 0.94970703125}, {"start": 1697.48, "end": 1697.82, "word": " all", "probability": 0.94921875}, {"start": 1697.82, "end": 1698.18, "word": " for", "probability": 0.953125}, {"start": 1698.18, "end": 1698.6, "word": " testing", "probability": 0.90673828125}, {"start": 1698.6, "end": 1699.06, "word": " about", "probability": 0.90966796875}, {"start": 1699.06, "end": 1699.58, "word": " proportion.", "probability": 0.81103515625}, {"start": 1701.2, "end": 1701.52, "word": " There", "probability": 0.79736328125}, {"start": 1701.52, "end": 1701.76, "word": " is", "probability": 0.9404296875}, {"start": 1701.76, "end": 1702.18, "word": " an", "probability": 0.9599609375}, {"start": 1702.18, "end": 1702.6, "word": " equivalent", "probability": 0.90625}, {"start": 1702.6, "end": 1703.24, "word": " form.", "probability": 0.908203125}], "temperature": 1.0}, {"id": 65, "seek": 172756, "start": 1704.1, "end": 1727.56, "text": " of this statistic, we may replace B by X. For example, we know that B equals X over N. Now let's see if we multiply N for numerator and denominator. So if we multiply N here,", "tokens": [295, 341, 29588, 11, 321, 815, 7406, 363, 538, 1783, 13, 1171, 1365, 11, 321, 458, 300, 363, 6915, 1783, 670, 426, 13, 823, 718, 311, 536, 498, 321, 12972, 426, 337, 30380, 293, 20687, 13, 407, 498, 321, 12972, 426, 510, 11], "avg_logprob": -0.25230824608694424, "compression_ratio": 1.3461538461538463, "no_speech_prob": 0.0, "words": [{"start": 1704.1, "end": 1704.42, "word": " of", "probability": 0.250244140625}, {"start": 1704.42, "end": 1704.76, "word": " this", "probability": 0.91015625}, {"start": 1704.76, "end": 1705.56, "word": " statistic,", "probability": 0.72265625}, {"start": 1706.42, "end": 1706.76, "word": " we", "probability": 0.91455078125}, {"start": 1706.76, "end": 1707.12, "word": " may", "probability": 0.87451171875}, {"start": 1707.12, "end": 1708.02, "word": " replace", "probability": 0.8974609375}, {"start": 1708.02, "end": 1709.6, "word": " B", "probability": 0.296875}, {"start": 1709.6, "end": 1709.84, "word": " by", "probability": 0.912109375}, {"start": 1709.84, "end": 1710.24, "word": " X.", "probability": 0.7197265625}, {"start": 1710.8, "end": 1711.0, "word": " For", "probability": 0.9111328125}, {"start": 1711.0, "end": 1711.42, "word": " example,", "probability": 0.96875}, {"start": 1711.92, "end": 1712.1, "word": " we", "probability": 0.921875}, {"start": 1712.1, "end": 1712.24, "word": " know", "probability": 0.87841796875}, {"start": 1712.24, "end": 1712.5, "word": " that", "probability": 0.9326171875}, {"start": 1712.5, "end": 1712.84, "word": " B", "probability": 0.94970703125}, {"start": 1712.84, "end": 1713.36, "word": " equals", "probability": 0.82373046875}, {"start": 1713.36, "end": 1714.56, "word": " X", "probability": 0.91357421875}, {"start": 1714.56, "end": 1714.8, "word": " over", "probability": 0.9130859375}, {"start": 1714.8, "end": 1715.1, "word": " N.", "probability": 0.93505859375}, {"start": 1717.48, "end": 1718.16, "word": " Now", "probability": 0.837890625}, {"start": 1718.16, "end": 1718.44, "word": " let's", "probability": 0.820068359375}, {"start": 1718.44, "end": 1718.6, "word": " see", "probability": 0.6875}, {"start": 1718.6, "end": 1718.78, "word": " if", "probability": 0.94873046875}, {"start": 1718.78, "end": 1718.92, "word": " we", "probability": 0.9345703125}, {"start": 1718.92, "end": 1719.38, "word": " multiply", "probability": 0.9091796875}, {"start": 1719.38, "end": 1722.26, "word": " N", "probability": 0.884765625}, {"start": 1722.26, "end": 1724.5, "word": " for", "probability": 0.71630859375}, {"start": 1724.5, "end": 1725.24, "word": " numerator", "probability": 0.5517578125}, {"start": 1725.24, "end": 1725.62, "word": " and", "probability": 0.94921875}, {"start": 1725.62, "end": 1725.92, "word": " denominator.", "probability": 0.57861328125}, {"start": 1726.28, "end": 1726.44, "word": " So", "probability": 0.86083984375}, {"start": 1726.44, "end": 1726.64, "word": " if", "probability": 0.6162109375}, {"start": 1726.64, "end": 1726.72, "word": " we", "probability": 0.65966796875}, {"start": 1726.72, "end": 1727.02, "word": " multiply", "probability": 0.916015625}, {"start": 1727.02, "end": 1727.3, "word": " N", "probability": 0.9736328125}, {"start": 1727.3, "end": 1727.56, "word": " here,", "probability": 0.83349609375}], "temperature": 1.0}, {"id": 66, "seek": 176712, "start": 1739.68, "end": 1767.12, "text": " I will reach another formula for this statistic. The first one depends on the sample proportion. The other one will depend on x. Now, n, this one equals n times b, which is x minus n times y n pi. So again, n times b is x minus n pi.", "tokens": [286, 486, 2524, 1071, 8513, 337, 341, 29588, 13, 440, 700, 472, 5946, 322, 264, 6889, 16068, 13, 440, 661, 472, 486, 5672, 322, 2031, 13, 823, 11, 297, 11, 341, 472, 6915, 297, 1413, 272, 11, 597, 307, 2031, 3175, 297, 1413, 288, 297, 3895, 13, 407, 797, 11, 297, 1413, 272, 307, 2031, 3175, 297, 3895, 13], "avg_logprob": -0.23854167088866235, "compression_ratio": 1.56, "no_speech_prob": 0.0, "words": [{"start": 1739.68, "end": 1739.9, "word": " I", "probability": 0.5830078125}, {"start": 1739.9, "end": 1740.1, "word": " will", "probability": 0.86328125}, {"start": 1740.1, "end": 1740.44, "word": " reach", "probability": 0.5205078125}, {"start": 1740.44, "end": 1740.84, "word": " another", "probability": 0.8935546875}, {"start": 1740.84, "end": 1741.32, "word": " formula", "probability": 0.9150390625}, {"start": 1741.32, "end": 1741.64, "word": " for", "probability": 0.9169921875}, {"start": 1741.64, "end": 1741.84, "word": " this", "probability": 0.67431640625}, {"start": 1741.84, "end": 1742.44, "word": " statistic.", "probability": 0.7607421875}, {"start": 1743.68, "end": 1744.1, "word": " The", "probability": 0.8603515625}, {"start": 1744.1, "end": 1744.36, "word": " first", "probability": 0.88916015625}, {"start": 1744.36, "end": 1744.56, "word": " one", "probability": 0.9189453125}, {"start": 1744.56, "end": 1744.88, "word": " depends", "probability": 0.87841796875}, {"start": 1744.88, "end": 1745.1, "word": " on", "probability": 0.94580078125}, {"start": 1745.1, "end": 1745.46, "word": " the", "probability": 0.60888671875}, {"start": 1745.46, "end": 1745.74, "word": " sample", "probability": 0.7265625}, {"start": 1745.74, "end": 1746.26, "word": " proportion.", "probability": 0.74853515625}, {"start": 1747.02, "end": 1747.16, "word": " The", "probability": 0.865234375}, {"start": 1747.16, "end": 1747.38, "word": " other", "probability": 0.873046875}, {"start": 1747.38, "end": 1747.58, "word": " one", "probability": 0.84765625}, {"start": 1747.58, "end": 1747.74, "word": " will", "probability": 0.464599609375}, {"start": 1747.74, "end": 1748.1, "word": " depend", "probability": 0.7685546875}, {"start": 1748.1, "end": 1748.82, "word": " on", "probability": 0.927734375}, {"start": 1748.82, "end": 1749.24, "word": " x.", "probability": 0.453857421875}, {"start": 1749.84, "end": 1750.12, "word": " Now,", "probability": 0.869140625}, {"start": 1750.62, "end": 1750.88, "word": " n,", "probability": 0.42431640625}, {"start": 1751.28, "end": 1751.62, "word": " this", "probability": 0.935546875}, {"start": 1751.62, "end": 1751.82, "word": " one", "probability": 0.90625}, {"start": 1751.82, "end": 1752.2, "word": " equals", "probability": 0.83203125}, {"start": 1752.2, "end": 1752.46, "word": " n", "probability": 0.9423828125}, {"start": 1752.46, "end": 1752.74, "word": " times", "probability": 0.8642578125}, {"start": 1752.74, "end": 1752.98, "word": " b,", "probability": 0.51611328125}, {"start": 1754.16, "end": 1755.1, "word": " which", "probability": 0.93798828125}, {"start": 1755.1, "end": 1755.24, "word": " is", "probability": 0.94921875}, {"start": 1755.24, "end": 1755.64, "word": " x", "probability": 0.99462890625}, {"start": 1755.64, "end": 1756.54, "word": " minus", "probability": 0.72802734375}, {"start": 1756.54, "end": 1757.36, "word": " n", "probability": 0.9375}, {"start": 1757.36, "end": 1757.78, "word": " times", "probability": 0.9228515625}, {"start": 1757.78, "end": 1758.24, "word": " y", "probability": 0.5068359375}, {"start": 1758.24, "end": 1758.8, "word": " n", "probability": 0.58984375}, {"start": 1758.8, "end": 1759.12, "word": " pi.", "probability": 0.86865234375}, {"start": 1760.9, "end": 1761.22, "word": " So", "probability": 0.9521484375}, {"start": 1761.22, "end": 1761.5, "word": " again,", "probability": 0.85302734375}, {"start": 1762.12, "end": 1763.06, "word": " n", "probability": 0.9833984375}, {"start": 1763.06, "end": 1763.32, "word": " times", "probability": 0.93359375}, {"start": 1763.32, "end": 1763.5, "word": " b", "probability": 0.94873046875}, {"start": 1763.5, "end": 1763.66, "word": " is", "probability": 0.94091796875}, {"start": 1763.66, "end": 1764.06, "word": " x", "probability": 0.99755859375}, {"start": 1764.06, "end": 1766.2, "word": " minus", "probability": 0.90673828125}, {"start": 1766.2, "end": 1766.74, "word": " n", "probability": 0.99755859375}, {"start": 1766.74, "end": 1767.12, "word": " pi.", "probability": 0.94580078125}], "temperature": 1.0}, {"id": 67, "seek": 178820, "start": 1768.42, "end": 1788.2, "text": " Now N, N dot squared becomes N squared, so N squared, we have N, so one cancelled, so N is left, so we have N pi 1. These two statistics are equivalent.", "tokens": [823, 426, 11, 426, 5893, 8889, 3643, 426, 8889, 11, 370, 426, 8889, 11, 321, 362, 426, 11, 370, 472, 25103, 11, 370, 426, 307, 1411, 11, 370, 321, 362, 426, 3895, 502, 13, 1981, 732, 12523, 366, 10344, 13], "avg_logprob": -0.4138719454044249, "compression_ratio": 1.4166666666666667, "no_speech_prob": 0.0, "words": [{"start": 1768.4199999999998, "end": 1769.1, "word": " Now", "probability": 0.52197265625}, {"start": 1769.1, "end": 1769.78, "word": " N,", "probability": 0.306396484375}, {"start": 1769.78, "end": 1772.18, "word": " N", "probability": 0.66162109375}, {"start": 1772.18, "end": 1772.32, "word": " dot", "probability": 0.44873046875}, {"start": 1772.32, "end": 1772.56, "word": " squared", "probability": 0.329345703125}, {"start": 1772.56, "end": 1772.9, "word": " becomes", "probability": 0.5869140625}, {"start": 1772.9, "end": 1773.8, "word": " N", "probability": 0.955078125}, {"start": 1773.8, "end": 1774.26, "word": " squared,", "probability": 0.77685546875}, {"start": 1775.46, "end": 1775.64, "word": " so", "probability": 0.84423828125}, {"start": 1775.64, "end": 1775.78, "word": " N", "probability": 0.9013671875}, {"start": 1775.78, "end": 1776.2, "word": " squared,", "probability": 0.8662109375}, {"start": 1776.4, "end": 1776.54, "word": " we", "probability": 0.841796875}, {"start": 1776.54, "end": 1776.72, "word": " have", "probability": 0.9521484375}, {"start": 1776.72, "end": 1777.0, "word": " N,", "probability": 0.98046875}, {"start": 1777.1, "end": 1777.24, "word": " so", "probability": 0.88671875}, {"start": 1777.24, "end": 1777.44, "word": " one", "probability": 0.521484375}, {"start": 1777.44, "end": 1777.96, "word": " cancelled,", "probability": 0.330810546875}, {"start": 1779.04, "end": 1779.4, "word": " so", "probability": 0.9013671875}, {"start": 1779.4, "end": 1779.88, "word": " N", "probability": 0.9677734375}, {"start": 1779.88, "end": 1780.04, "word": " is", "probability": 0.9365234375}, {"start": 1780.04, "end": 1780.3, "word": " left,", "probability": 0.91943359375}, {"start": 1780.54, "end": 1780.66, "word": " so", "probability": 0.89013671875}, {"start": 1780.66, "end": 1780.78, "word": " we", "probability": 0.81591796875}, {"start": 1780.78, "end": 1780.92, "word": " have", "probability": 0.94287109375}, {"start": 1780.92, "end": 1781.28, "word": " N", "probability": 0.98681640625}, {"start": 1781.28, "end": 1782.12, "word": " pi", "probability": 0.2491455078125}, {"start": 1782.12, "end": 1783.5, "word": " 1.", "probability": 0.3544921875}, {"start": 1785.14, "end": 1785.82, "word": " These", "probability": 0.86669921875}, {"start": 1785.82, "end": 1786.1, "word": " two", "probability": 0.86181640625}, {"start": 1786.1, "end": 1786.68, "word": " statistics", "probability": 0.90283203125}, {"start": 1786.68, "end": 1787.86, "word": " are", "probability": 0.9365234375}, {"start": 1787.86, "end": 1788.2, "word": " equivalent.", "probability": 0.29248046875}], "temperature": 1.0}, {"id": 68, "seek": 181779, "start": 1790.03, "end": 1817.79, "text": " So you may use b minus pi frequently. The common one actually is this one, b minus pi divided by root, square root of pi, 1 minus pi divided by n. So actually, these two forms are equivalent. In this case, the two conditions, x is greater than or equal to 5, and n minus x is at least 5.", "tokens": [407, 291, 815, 764, 272, 3175, 3895, 10374, 13, 440, 2689, 472, 767, 307, 341, 472, 11, 272, 3175, 3895, 6666, 538, 5593, 11, 3732, 5593, 295, 3895, 11, 502, 3175, 3895, 6666, 538, 297, 13, 407, 767, 11, 613, 732, 6422, 366, 10344, 13, 682, 341, 1389, 11, 264, 732, 4487, 11, 2031, 307, 5044, 813, 420, 2681, 281, 1025, 11, 293, 297, 3175, 2031, 307, 412, 1935, 1025, 13], "avg_logprob": -0.2014973999725448, "compression_ratio": 1.5824175824175823, "no_speech_prob": 0.0, "words": [{"start": 1790.03, "end": 1790.31, "word": " So", "probability": 0.92529296875}, {"start": 1790.31, "end": 1790.53, "word": " you", "probability": 0.84228515625}, {"start": 1790.53, "end": 1790.67, "word": " may", "probability": 0.7822265625}, {"start": 1790.67, "end": 1791.09, "word": " use", "probability": 0.888671875}, {"start": 1791.09, "end": 1791.61, "word": " b", "probability": 0.382080078125}, {"start": 1791.61, "end": 1791.91, "word": " minus", "probability": 0.8095703125}, {"start": 1791.91, "end": 1792.27, "word": " pi", "probability": 0.80419921875}, {"start": 1792.27, "end": 1793.81, "word": " frequently.", "probability": 0.5380859375}, {"start": 1795.03, "end": 1795.33, "word": " The", "probability": 0.8486328125}, {"start": 1795.33, "end": 1795.59, "word": " common", "probability": 0.88427734375}, {"start": 1795.59, "end": 1795.83, "word": " one", "probability": 0.92724609375}, {"start": 1795.83, "end": 1796.29, "word": " actually", "probability": 0.7421875}, {"start": 1796.29, "end": 1796.83, "word": " is", "probability": 0.88720703125}, {"start": 1796.83, "end": 1797.01, "word": " this", "probability": 0.94970703125}, {"start": 1797.01, "end": 1797.23, "word": " one,", "probability": 0.92578125}, {"start": 1797.53, "end": 1797.71, "word": " b", "probability": 0.89990234375}, {"start": 1797.71, "end": 1798.05, "word": " minus", "probability": 0.96923828125}, {"start": 1798.05, "end": 1798.53, "word": " pi", "probability": 0.943359375}, {"start": 1798.53, "end": 1798.85, "word": " divided", "probability": 0.744140625}, {"start": 1798.85, "end": 1799.05, "word": " by", "probability": 0.96142578125}, {"start": 1799.05, "end": 1799.41, "word": " root,", "probability": 0.736328125}, {"start": 1799.93, "end": 1800.27, "word": " square", "probability": 0.89892578125}, {"start": 1800.27, "end": 1800.45, "word": " root", "probability": 0.92041015625}, {"start": 1800.45, "end": 1800.59, "word": " of", "probability": 0.958984375}, {"start": 1800.59, "end": 1800.77, "word": " pi,", "probability": 0.6435546875}, {"start": 1800.83, "end": 1800.97, "word": " 1", "probability": 0.71923828125}, {"start": 1800.97, "end": 1801.21, "word": " minus", "probability": 0.98291015625}, {"start": 1801.21, "end": 1801.35, "word": " pi", "probability": 0.8515625}, {"start": 1801.35, "end": 1801.57, "word": " divided", "probability": 0.76806640625}, {"start": 1801.57, "end": 1801.71, "word": " by", "probability": 0.95166015625}, {"start": 1801.71, "end": 1801.89, "word": " n.", "probability": 0.88134765625}, {"start": 1802.49, "end": 1803.05, "word": " So", "probability": 0.96435546875}, {"start": 1803.05, "end": 1803.39, "word": " actually,", "probability": 0.85693359375}, {"start": 1803.61, "end": 1803.83, "word": " these", "probability": 0.8525390625}, {"start": 1803.83, "end": 1804.09, "word": " two", "probability": 0.9267578125}, {"start": 1804.09, "end": 1804.63, "word": " forms", "probability": 0.87890625}, {"start": 1804.63, "end": 1805.99, "word": " are", "probability": 0.93798828125}, {"start": 1805.99, "end": 1807.39, "word": " equivalent.", "probability": 0.8857421875}, {"start": 1809.19, "end": 1809.41, "word": " In", "probability": 0.6767578125}, {"start": 1809.41, "end": 1809.59, "word": " this", "probability": 0.93701171875}, {"start": 1809.59, "end": 1809.81, "word": " case,", "probability": 0.92626953125}, {"start": 1809.89, "end": 1810.01, "word": " the", "probability": 0.81591796875}, {"start": 1810.01, "end": 1810.47, "word": " two", "probability": 0.8232421875}, {"start": 1810.47, "end": 1811.05, "word": " conditions,", "probability": 0.87109375}, {"start": 1811.67, "end": 1811.95, "word": " x", "probability": 0.89599609375}, {"start": 1811.95, "end": 1812.19, "word": " is", "probability": 0.9267578125}, {"start": 1812.19, "end": 1812.47, "word": " greater", "probability": 0.92822265625}, {"start": 1812.47, "end": 1812.75, "word": " than", "probability": 0.9287109375}, {"start": 1812.75, "end": 1812.87, "word": " or", "probability": 0.955078125}, {"start": 1812.87, "end": 1813.03, "word": " equal", "probability": 0.90869140625}, {"start": 1813.03, "end": 1813.15, "word": " to", "probability": 0.92138671875}, {"start": 1813.15, "end": 1813.47, "word": " 5,", "probability": 0.82275390625}, {"start": 1813.81, "end": 1814.05, "word": " and", "probability": 0.94189453125}, {"start": 1814.05, "end": 1814.37, "word": " n", "probability": 0.68115234375}, {"start": 1814.37, "end": 1815.19, "word": " minus", "probability": 0.96826171875}, {"start": 1815.19, "end": 1815.67, "word": " x", "probability": 0.9912109375}, {"start": 1815.67, "end": 1816.97, "word": " is", "probability": 0.93310546875}, {"start": 1816.97, "end": 1817.17, "word": " at", "probability": 0.96630859375}, {"start": 1817.17, "end": 1817.41, "word": " least", "probability": 0.955078125}, {"start": 1817.41, "end": 1817.79, "word": " 5.", "probability": 0.947265625}], "temperature": 1.0}, {"id": 69, "seek": 184707, "start": 1819.19, "end": 1847.07, "text": " Let's look at this specific example. A marketing company claims that it receives 8% responses from its mailing. So now the claim is, the company receives only 8% responses from its mailing.", "tokens": [961, 311, 574, 412, 341, 2685, 1365, 13, 316, 6370, 2237, 9441, 300, 309, 20717, 1649, 4, 13019, 490, 1080, 41612, 13, 407, 586, 264, 3932, 307, 11, 264, 2237, 20717, 787, 1649, 4, 13019, 490, 1080, 41612, 13], "avg_logprob": -0.13466796912252904, "compression_ratio": 1.532258064516129, "no_speech_prob": 0.0, "words": [{"start": 1819.19, "end": 1819.93, "word": " Let's", "probability": 0.876220703125}, {"start": 1819.93, "end": 1821.77, "word": " look", "probability": 0.92822265625}, {"start": 1821.77, "end": 1822.23, "word": " at", "probability": 0.96728515625}, {"start": 1822.23, "end": 1822.75, "word": " this", "probability": 0.9326171875}, {"start": 1822.75, "end": 1823.55, "word": " specific", "probability": 0.8955078125}, {"start": 1823.55, "end": 1824.79, "word": " example.", "probability": 0.97216796875}, {"start": 1826.65, "end": 1827.77, "word": " A", "probability": 0.755859375}, {"start": 1827.77, "end": 1828.13, "word": " marketing", "probability": 0.78955078125}, {"start": 1828.13, "end": 1828.85, "word": " company", "probability": 0.916015625}, {"start": 1828.85, "end": 1829.63, "word": " claims", "probability": 0.80419921875}, {"start": 1829.63, "end": 1830.91, "word": " that", "probability": 0.9150390625}, {"start": 1830.91, "end": 1832.57, "word": " it", "probability": 0.9326171875}, {"start": 1832.57, "end": 1833.17, "word": " receives", "probability": 0.88916015625}, {"start": 1833.17, "end": 1834.47, "word": " 8", "probability": 0.7998046875}, {"start": 1834.47, "end": 1834.83, "word": "%", "probability": 0.9228515625}, {"start": 1834.83, "end": 1835.51, "word": " responses", "probability": 0.91943359375}, {"start": 1835.51, "end": 1836.03, "word": " from", "probability": 0.8916015625}, {"start": 1836.03, "end": 1836.41, "word": " its", "probability": 0.83740234375}, {"start": 1836.41, "end": 1836.77, "word": " mailing.", "probability": 0.90283203125}, {"start": 1838.51, "end": 1839.11, "word": " So", "probability": 0.86474609375}, {"start": 1839.11, "end": 1839.29, "word": " now", "probability": 0.8525390625}, {"start": 1839.29, "end": 1839.49, "word": " the", "probability": 0.74560546875}, {"start": 1839.49, "end": 1839.77, "word": " claim", "probability": 0.92724609375}, {"start": 1839.77, "end": 1840.17, "word": " is,", "probability": 0.9482421875}, {"start": 1841.27, "end": 1841.49, "word": " the", "probability": 0.77294921875}, {"start": 1841.49, "end": 1841.93, "word": " company", "probability": 0.916015625}, {"start": 1841.93, "end": 1843.33, "word": " receives", "probability": 0.89111328125}, {"start": 1843.33, "end": 1844.57, "word": " only", "probability": 0.9130859375}, {"start": 1844.57, "end": 1844.89, "word": " 8", "probability": 0.97802734375}, {"start": 1844.89, "end": 1845.25, "word": "%", "probability": 0.9931640625}, {"start": 1845.25, "end": 1846.11, "word": " responses", "probability": 0.9248046875}, {"start": 1846.11, "end": 1846.47, "word": " from", "probability": 0.88916015625}, {"start": 1846.47, "end": 1846.77, "word": " its", "probability": 0.87646484375}, {"start": 1846.77, "end": 1847.07, "word": " mailing.", "probability": 0.9365234375}], "temperature": 1.0}, {"id": 70, "seek": 187580, "start": 1848.54, "end": 1875.8, "text": " So from their records, we know that the proportion of response is 8%. To test this claim, a random sample of 500 were surveyed with 25 responses. So we are actually interested in the number of response. So x is 25.", "tokens": [407, 490, 641, 7724, 11, 321, 458, 300, 264, 16068, 295, 4134, 307, 1649, 6856, 1407, 1500, 341, 3932, 11, 257, 4974, 6889, 295, 5923, 645, 8984, 292, 365, 3552, 13019, 13, 407, 321, 366, 767, 3102, 294, 264, 1230, 295, 4134, 13, 407, 2031, 307, 3552, 13], "avg_logprob": -0.14883609207309023, "compression_ratio": 1.4052287581699345, "no_speech_prob": 0.0, "words": [{"start": 1848.54, "end": 1848.84, "word": " So", "probability": 0.74267578125}, {"start": 1848.84, "end": 1849.3, "word": " from", "probability": 0.70556640625}, {"start": 1849.3, "end": 1850.14, "word": " their", "probability": 0.70166015625}, {"start": 1850.14, "end": 1850.74, "word": " records,", "probability": 0.82470703125}, {"start": 1851.04, "end": 1851.22, "word": " we", "probability": 0.9267578125}, {"start": 1851.22, "end": 1851.38, "word": " know", "probability": 0.86181640625}, {"start": 1851.38, "end": 1851.76, "word": " that", "probability": 0.87548828125}, {"start": 1851.76, "end": 1852.96, "word": " the", "probability": 0.85205078125}, {"start": 1852.96, "end": 1853.5, "word": " proportion", "probability": 0.82666015625}, {"start": 1853.5, "end": 1854.58, "word": " of", "probability": 0.96484375}, {"start": 1854.58, "end": 1855.3, "word": " response", "probability": 0.83154296875}, {"start": 1855.3, "end": 1855.78, "word": " is", "probability": 0.94384765625}, {"start": 1855.78, "end": 1857.48, "word": " 8%.", "probability": 0.8349609375}, {"start": 1857.48, "end": 1858.78, "word": " To", "probability": 0.90673828125}, {"start": 1858.78, "end": 1859.04, "word": " test", "probability": 0.875}, {"start": 1859.04, "end": 1859.24, "word": " this", "probability": 0.90087890625}, {"start": 1859.24, "end": 1859.64, "word": " claim,", "probability": 0.732421875}, {"start": 1860.4, "end": 1860.62, "word": " a", "probability": 0.97314453125}, {"start": 1860.62, "end": 1860.88, "word": " random", "probability": 0.859375}, {"start": 1860.88, "end": 1861.24, "word": " sample", "probability": 0.88818359375}, {"start": 1861.24, "end": 1861.52, "word": " of", "probability": 0.97314453125}, {"start": 1861.52, "end": 1862.34, "word": " 500", "probability": 0.9638671875}, {"start": 1862.34, "end": 1864.72, "word": " were", "probability": 0.82666015625}, {"start": 1864.72, "end": 1865.46, "word": " surveyed", "probability": 0.92529296875}, {"start": 1865.46, "end": 1866.02, "word": " with", "probability": 0.775390625}, {"start": 1866.02, "end": 1866.8, "word": " 25", "probability": 0.9638671875}, {"start": 1866.8, "end": 1867.42, "word": " responses.", "probability": 0.93798828125}, {"start": 1868.1, "end": 1868.48, "word": " So", "probability": 0.94921875}, {"start": 1868.48, "end": 1868.96, "word": " we", "probability": 0.853515625}, {"start": 1868.96, "end": 1869.18, "word": " are", "probability": 0.935546875}, {"start": 1869.18, "end": 1869.56, "word": " actually", "probability": 0.89111328125}, {"start": 1869.56, "end": 1870.12, "word": " interested", "probability": 0.849609375}, {"start": 1870.12, "end": 1870.56, "word": " in", "probability": 0.94677734375}, {"start": 1870.56, "end": 1870.98, "word": " the", "probability": 0.9228515625}, {"start": 1870.98, "end": 1872.24, "word": " number", "probability": 0.935546875}, {"start": 1872.24, "end": 1872.44, "word": " of", "probability": 0.94921875}, {"start": 1872.44, "end": 1872.98, "word": " response.", "probability": 0.68115234375}, {"start": 1874.12, "end": 1874.98, "word": " So", "probability": 0.95556640625}, {"start": 1874.98, "end": 1875.26, "word": " x", "probability": 0.62646484375}, {"start": 1875.26, "end": 1875.4, "word": " is", "probability": 0.87939453125}, {"start": 1875.4, "end": 1875.8, "word": " 25.", "probability": 0.61865234375}], "temperature": 1.0}, {"id": 71, "seek": 190555, "start": 1878.05, "end": 1905.55, "text": " So this survey of 500 gives 25 responses. Now test at 5% significance limit. Now the direction is not given, just pi equals 8%. So we have to test H0 against H1 of pi does not equal, since the direction is not given.", "tokens": [407, 341, 8984, 295, 5923, 2709, 3552, 13019, 13, 823, 1500, 412, 1025, 4, 17687, 4948, 13, 823, 264, 3513, 307, 406, 2212, 11, 445, 3895, 6915, 1649, 6856, 407, 321, 362, 281, 1500, 389, 15, 1970, 389, 16, 295, 3895, 775, 406, 2681, 11, 1670, 264, 3513, 307, 406, 2212, 13], "avg_logprob": -0.21683372753971028, "compression_ratio": 1.4761904761904763, "no_speech_prob": 0.0, "words": [{"start": 1878.05, "end": 1878.29, "word": " So", "probability": 0.82470703125}, {"start": 1878.29, "end": 1878.57, "word": " this", "probability": 0.794921875}, {"start": 1878.57, "end": 1879.03, "word": " survey", "probability": 0.818359375}, {"start": 1879.03, "end": 1879.91, "word": " of", "probability": 0.90283203125}, {"start": 1879.91, "end": 1880.47, "word": " 500", "probability": 0.92919921875}, {"start": 1880.47, "end": 1882.39, "word": " gives", "probability": 0.62353515625}, {"start": 1882.39, "end": 1882.95, "word": " 25", "probability": 0.91748046875}, {"start": 1882.95, "end": 1883.69, "word": " responses.", "probability": 0.908203125}, {"start": 1884.31, "end": 1884.67, "word": " Now", "probability": 0.67236328125}, {"start": 1884.67, "end": 1885.11, "word": " test", "probability": 0.67529296875}, {"start": 1885.11, "end": 1885.65, "word": " at", "probability": 0.9033203125}, {"start": 1885.65, "end": 1886.07, "word": " 5", "probability": 0.953125}, {"start": 1886.07, "end": 1886.63, "word": "%", "probability": 0.6875}, {"start": 1886.63, "end": 1888.85, "word": " significance", "probability": 0.775390625}, {"start": 1888.85, "end": 1889.17, "word": " limit.", "probability": 0.7275390625}, {"start": 1889.93, "end": 1890.31, "word": " Now", "probability": 0.94580078125}, {"start": 1890.31, "end": 1890.55, "word": " the", "probability": 0.58154296875}, {"start": 1890.55, "end": 1890.89, "word": " direction", "probability": 0.8798828125}, {"start": 1890.89, "end": 1891.15, "word": " is", "probability": 0.94384765625}, {"start": 1891.15, "end": 1891.37, "word": " not", "probability": 0.947265625}, {"start": 1891.37, "end": 1891.63, "word": " given,", "probability": 0.88427734375}, {"start": 1892.63, "end": 1893.33, "word": " just", "probability": 0.90283203125}, {"start": 1893.33, "end": 1893.95, "word": " pi", "probability": 0.44091796875}, {"start": 1893.95, "end": 1894.29, "word": " equals", "probability": 0.436767578125}, {"start": 1894.29, "end": 1895.07, "word": " 8%.", "probability": 0.81103515625}, {"start": 1895.07, "end": 1895.81, "word": " So", "probability": 0.9619140625}, {"start": 1895.81, "end": 1896.03, "word": " we", "probability": 0.9169921875}, {"start": 1896.03, "end": 1896.19, "word": " have", "probability": 0.94482421875}, {"start": 1896.19, "end": 1896.33, "word": " to", "probability": 0.96044921875}, {"start": 1896.33, "end": 1896.65, "word": " test", "probability": 0.876953125}, {"start": 1896.65, "end": 1897.73, "word": " H0", "probability": 0.73681640625}, {"start": 1897.73, "end": 1898.27, "word": " against", "probability": 0.91357421875}, {"start": 1898.27, "end": 1900.05, "word": " H1", "probability": 0.989990234375}, {"start": 1900.05, "end": 1901.91, "word": " of", "probability": 0.433349609375}, {"start": 1901.91, "end": 1902.27, "word": " pi", "probability": 0.88818359375}, {"start": 1902.27, "end": 1902.61, "word": " does", "probability": 0.8046875}, {"start": 1902.61, "end": 1902.87, "word": " not", "probability": 0.951171875}, {"start": 1902.87, "end": 1903.19, "word": " equal,", "probability": 0.91748046875}, {"start": 1903.65, "end": 1904.39, "word": " since", "probability": 0.85888671875}, {"start": 1904.39, "end": 1904.57, "word": " the", "probability": 0.91650390625}, {"start": 1904.57, "end": 1904.93, "word": " direction", "probability": 0.96875}, {"start": 1904.93, "end": 1905.15, "word": " is", "probability": 0.9208984375}, {"start": 1905.15, "end": 1905.31, "word": " not", "probability": 0.93798828125}, {"start": 1905.31, "end": 1905.55, "word": " given.", "probability": 0.8798828125}], "temperature": 1.0}, {"id": 72, "seek": 193013, "start": 1906.81, "end": 1930.13, "text": " This problem does not say it's above or greater or decreased or increased. So it's two-tailed test. So we are testing if 0 pi equals 0.08 against the alternate hypothesis mu is not 0.8. Now, the first step, we have to check the two conditions, n times pi.", "tokens": [639, 1154, 775, 406, 584, 309, 311, 3673, 420, 5044, 420, 24436, 420, 6505, 13, 407, 309, 311, 732, 12, 14430, 292, 1500, 13, 407, 321, 366, 4997, 498, 1958, 3895, 6915, 1958, 13, 16133, 1970, 264, 18873, 17291, 2992, 307, 406, 1958, 13, 23, 13, 823, 11, 264, 700, 1823, 11, 321, 362, 281, 1520, 264, 732, 4487, 11, 297, 1413, 3895, 13], "avg_logprob": -0.22632211538461539, "compression_ratio": 1.4628571428571429, "no_speech_prob": 0.0, "words": [{"start": 1906.81, "end": 1907.13, "word": " This", "probability": 0.73681640625}, {"start": 1907.13, "end": 1907.43, "word": " problem", "probability": 0.6640625}, {"start": 1907.43, "end": 1907.65, "word": " does", "probability": 0.95556640625}, {"start": 1907.65, "end": 1907.85, "word": " not", "probability": 0.95166015625}, {"start": 1907.85, "end": 1908.21, "word": " say", "probability": 0.94580078125}, {"start": 1908.21, "end": 1908.81, "word": " it's", "probability": 0.879638671875}, {"start": 1908.81, "end": 1909.23, "word": " above", "probability": 0.9638671875}, {"start": 1909.23, "end": 1910.13, "word": " or", "probability": 0.81396484375}, {"start": 1910.13, "end": 1910.49, "word": " greater", "probability": 0.900390625}, {"start": 1910.49, "end": 1910.97, "word": " or", "probability": 0.8115234375}, {"start": 1910.97, "end": 1911.61, "word": " decreased", "probability": 0.8427734375}, {"start": 1911.61, "end": 1911.83, "word": " or", "probability": 0.94970703125}, {"start": 1911.83, "end": 1912.31, "word": " increased.", "probability": 0.95263671875}, {"start": 1912.67, "end": 1912.89, "word": " So", "probability": 0.931640625}, {"start": 1912.89, "end": 1914.33, "word": " it's", "probability": 0.9033203125}, {"start": 1914.33, "end": 1914.63, "word": " two", "probability": 0.73876953125}, {"start": 1914.63, "end": 1914.97, "word": "-tailed", "probability": 0.7097981770833334}, {"start": 1914.97, "end": 1915.21, "word": " test.", "probability": 0.72509765625}, {"start": 1915.75, "end": 1915.93, "word": " So", "probability": 0.94091796875}, {"start": 1915.93, "end": 1916.05, "word": " we", "probability": 0.89111328125}, {"start": 1916.05, "end": 1916.19, "word": " are", "probability": 0.9013671875}, {"start": 1916.19, "end": 1916.63, "word": " testing", "probability": 0.859375}, {"start": 1916.63, "end": 1916.85, "word": " if", "probability": 0.84033203125}, {"start": 1916.85, "end": 1917.17, "word": " 0", "probability": 0.779296875}, {"start": 1917.17, "end": 1917.63, "word": " pi", "probability": 0.413818359375}, {"start": 1917.63, "end": 1917.97, "word": " equals", "probability": 0.7744140625}, {"start": 1917.97, "end": 1918.35, "word": " 0", "probability": 0.82763671875}, {"start": 1918.35, "end": 1919.21, "word": ".08", "probability": 0.978759765625}, {"start": 1919.21, "end": 1920.13, "word": " against", "probability": 0.75927734375}, {"start": 1920.13, "end": 1920.29, "word": " the", "probability": 0.44921875}, {"start": 1920.29, "end": 1920.63, "word": " alternate", "probability": 0.701171875}, {"start": 1920.63, "end": 1921.21, "word": " hypothesis", "probability": 0.7412109375}, {"start": 1921.21, "end": 1922.03, "word": " mu", "probability": 0.3193359375}, {"start": 1922.03, "end": 1922.35, "word": " is", "probability": 0.9423828125}, {"start": 1922.35, "end": 1922.69, "word": " not", "probability": 0.9462890625}, {"start": 1922.69, "end": 1923.63, "word": " 0", "probability": 0.8701171875}, {"start": 1923.63, "end": 1923.81, "word": ".8.", "probability": 0.5880126953125}, {"start": 1924.27, "end": 1924.61, "word": " Now,", "probability": 0.95263671875}, {"start": 1924.71, "end": 1924.81, "word": " the", "probability": 0.87451171875}, {"start": 1924.81, "end": 1925.07, "word": " first", "probability": 0.87548828125}, {"start": 1925.07, "end": 1925.25, "word": " step,", "probability": 0.73681640625}, {"start": 1925.33, "end": 1925.39, "word": " we", "probability": 0.923828125}, {"start": 1925.39, "end": 1925.55, "word": " have", "probability": 0.94580078125}, {"start": 1925.55, "end": 1926.01, "word": " to", "probability": 0.97119140625}, {"start": 1926.01, "end": 1926.95, "word": " check", "probability": 0.9150390625}, {"start": 1926.95, "end": 1927.25, "word": " the", "probability": 0.9189453125}, {"start": 1927.25, "end": 1927.41, "word": " two", "probability": 0.92822265625}, {"start": 1927.41, "end": 1928.01, "word": " conditions,", "probability": 0.8720703125}, {"start": 1929.13, "end": 1929.43, "word": " n", "probability": 0.61572265625}, {"start": 1929.43, "end": 1929.79, "word": " times", "probability": 0.91943359375}, {"start": 1929.79, "end": 1930.13, "word": " pi.", "probability": 0.90869140625}], "temperature": 1.0}, {"id": 73, "seek": 196064, "start": 1931.78, "end": 1960.64, "text": " N is 500, Pi is 8%. So 5 times 8 gives 40. Now the other condition is the complement actually. So since N equals 500, so this one should be 460. The reason behind that is if we add N Pi and the other condition, N times 1 minus Pi. So this gives N Pi plus N.", "tokens": [426, 307, 5923, 11, 17741, 307, 1649, 6856, 407, 1025, 1413, 1649, 2709, 3356, 13, 823, 264, 661, 4188, 307, 264, 17103, 767, 13, 407, 1670, 426, 6915, 5923, 11, 370, 341, 472, 820, 312, 1017, 4550, 13, 440, 1778, 2261, 300, 307, 498, 321, 909, 426, 17741, 293, 264, 661, 4188, 11, 426, 1413, 502, 3175, 17741, 13, 407, 341, 2709, 426, 17741, 1804, 426, 13], "avg_logprob": -0.21806065782028086, "compression_ratio": 1.5, "no_speech_prob": 0.0, "words": [{"start": 1931.78, "end": 1932.06, "word": " N", "probability": 0.443115234375}, {"start": 1932.06, "end": 1932.22, "word": " is", "probability": 0.83251953125}, {"start": 1932.22, "end": 1932.76, "word": " 500,", "probability": 0.75390625}, {"start": 1933.02, "end": 1933.12, "word": " Pi", "probability": 0.465576171875}, {"start": 1933.12, "end": 1933.34, "word": " is", "probability": 0.94970703125}, {"start": 1933.34, "end": 1934.2, "word": " 8%.", "probability": 0.6328125}, {"start": 1934.2, "end": 1935.1, "word": " So", "probability": 0.92529296875}, {"start": 1935.1, "end": 1935.62, "word": " 5", "probability": 0.498046875}, {"start": 1935.62, "end": 1935.94, "word": " times", "probability": 0.6240234375}, {"start": 1935.94, "end": 1936.34, "word": " 8", "probability": 0.9833984375}, {"start": 1936.34, "end": 1936.72, "word": " gives", "probability": 0.712890625}, {"start": 1936.72, "end": 1937.26, "word": " 40.", "probability": 0.96435546875}, {"start": 1938.64, "end": 1938.98, "word": " Now", "probability": 0.9111328125}, {"start": 1938.98, "end": 1939.12, "word": " the", "probability": 0.685546875}, {"start": 1939.12, "end": 1939.32, "word": " other", "probability": 0.8818359375}, {"start": 1939.32, "end": 1939.76, "word": " condition", "probability": 0.9443359375}, {"start": 1939.76, "end": 1939.98, "word": " is", "probability": 0.9375}, {"start": 1939.98, "end": 1940.12, "word": " the", "probability": 0.83642578125}, {"start": 1940.12, "end": 1940.48, "word": " complement", "probability": 0.7666015625}, {"start": 1940.48, "end": 1940.98, "word": " actually.", "probability": 0.76416015625}, {"start": 1941.34, "end": 1941.5, "word": " So", "probability": 0.87548828125}, {"start": 1941.5, "end": 1941.94, "word": " since", "probability": 0.76416015625}, {"start": 1941.94, "end": 1942.36, "word": " N", "probability": 0.8154296875}, {"start": 1942.36, "end": 1942.62, "word": " equals", "probability": 0.56396484375}, {"start": 1942.62, "end": 1943.18, "word": " 500,", "probability": 0.962890625}, {"start": 1943.7, "end": 1943.98, "word": " so", "probability": 0.7265625}, {"start": 1943.98, "end": 1944.3, "word": " this", "probability": 0.94482421875}, {"start": 1944.3, "end": 1944.58, "word": " one", "probability": 0.8955078125}, {"start": 1944.58, "end": 1944.8, "word": " should", "probability": 0.96875}, {"start": 1944.8, "end": 1945.06, "word": " be", "probability": 0.95751953125}, {"start": 1945.06, "end": 1946.14, "word": " 460.", "probability": 0.93115234375}, {"start": 1946.78, "end": 1947.42, "word": " The", "probability": 0.57080078125}, {"start": 1947.42, "end": 1947.72, "word": " reason", "probability": 0.9755859375}, {"start": 1947.72, "end": 1948.1, "word": " behind", "probability": 0.9384765625}, {"start": 1948.1, "end": 1948.5, "word": " that", "probability": 0.927734375}, {"start": 1948.5, "end": 1948.94, "word": " is", "probability": 0.93994140625}, {"start": 1948.94, "end": 1950.0, "word": " if", "probability": 0.69287109375}, {"start": 1950.0, "end": 1950.28, "word": " we", "probability": 0.9560546875}, {"start": 1950.28, "end": 1950.86, "word": " add", "probability": 0.90087890625}, {"start": 1950.86, "end": 1951.14, "word": " N", "probability": 0.76025390625}, {"start": 1951.14, "end": 1951.48, "word": " Pi", "probability": 0.62841796875}, {"start": 1951.48, "end": 1952.8, "word": " and", "probability": 0.6708984375}, {"start": 1952.8, "end": 1952.94, "word": " the", "probability": 0.75732421875}, {"start": 1952.94, "end": 1953.18, "word": " other", "probability": 0.87451171875}, {"start": 1953.18, "end": 1953.72, "word": " condition,", "probability": 0.95166015625}, {"start": 1954.68, "end": 1955.04, "word": " N", "probability": 0.97509765625}, {"start": 1955.04, "end": 1955.36, "word": " times", "probability": 0.921875}, {"start": 1955.36, "end": 1955.64, "word": " 1", "probability": 0.89404296875}, {"start": 1955.64, "end": 1955.98, "word": " minus", "probability": 0.98046875}, {"start": 1955.98, "end": 1956.36, "word": " Pi.", "probability": 0.955078125}, {"start": 1957.82, "end": 1957.94, "word": " So", "probability": 0.8740234375}, {"start": 1957.94, "end": 1958.18, "word": " this", "probability": 0.919921875}, {"start": 1958.18, "end": 1958.44, "word": " gives", "probability": 0.8828125}, {"start": 1958.44, "end": 1958.68, "word": " N", "probability": 0.97314453125}, {"start": 1958.68, "end": 1959.02, "word": " Pi", "probability": 0.9609375}, {"start": 1959.02, "end": 1960.12, "word": " plus", "probability": 0.8173828125}, {"start": 1960.12, "end": 1960.64, "word": " N.", "probability": 0.98828125}], "temperature": 1.0}, {"id": 74, "seek": 198957, "start": 1960.91, "end": 1989.57, "text": " factor here, n minus n, y. So this cancels. So we end with n. So the total should be 500. So if the n times y is 40, then n times 1 minus y is 460. So two conditions are satisfied. Then we can use, we can say that the sample proportion is approximately normally distributed with mean equals y.", "tokens": [5952, 510, 11, 297, 3175, 297, 11, 288, 13, 407, 341, 393, 66, 1625, 13, 407, 321, 917, 365, 297, 13, 407, 264, 3217, 820, 312, 5923, 13, 407, 498, 264, 297, 1413, 288, 307, 3356, 11, 550, 297, 1413, 502, 3175, 288, 307, 1017, 4550, 13, 407, 732, 4487, 366, 11239, 13, 1396, 321, 393, 764, 11, 321, 393, 584, 300, 264, 6889, 16068, 307, 10447, 5646, 12631, 365, 914, 6915, 288, 13], "avg_logprob": -0.2040624992052714, "compression_ratio": 1.5638297872340425, "no_speech_prob": 0.0, "words": [{"start": 1960.91, "end": 1961.35, "word": " factor", "probability": 0.168701171875}, {"start": 1961.35, "end": 1961.59, "word": " here,", "probability": 0.80517578125}, {"start": 1961.69, "end": 1961.77, "word": " n", "probability": 0.6416015625}, {"start": 1961.77, "end": 1962.09, "word": " minus", "probability": 0.90185546875}, {"start": 1962.09, "end": 1962.29, "word": " n,", "probability": 0.97216796875}, {"start": 1962.37, "end": 1962.59, "word": " y.", "probability": 0.494384765625}, {"start": 1962.99, "end": 1963.33, "word": " So", "probability": 0.9501953125}, {"start": 1963.33, "end": 1963.55, "word": " this", "probability": 0.826171875}, {"start": 1963.55, "end": 1964.23, "word": " cancels.", "probability": 0.92529296875}, {"start": 1964.81, "end": 1965.27, "word": " So", "probability": 0.6884765625}, {"start": 1965.27, "end": 1965.43, "word": " we", "probability": 0.861328125}, {"start": 1965.43, "end": 1965.59, "word": " end", "probability": 0.9013671875}, {"start": 1965.59, "end": 1965.77, "word": " with", "probability": 0.892578125}, {"start": 1965.77, "end": 1966.01, "word": " n.", "probability": 0.892578125}, {"start": 1966.57, "end": 1967.05, "word": " So", "probability": 0.9462890625}, {"start": 1967.05, "end": 1967.35, "word": " the", "probability": 0.7958984375}, {"start": 1967.35, "end": 1967.65, "word": " total", "probability": 0.85498046875}, {"start": 1967.65, "end": 1967.95, "word": " should", "probability": 0.9736328125}, {"start": 1967.95, "end": 1968.29, "word": " be", "probability": 0.9580078125}, {"start": 1968.29, "end": 1969.23, "word": " 500.", "probability": 0.96875}, {"start": 1969.75, "end": 1969.99, "word": " So", "probability": 0.951171875}, {"start": 1969.99, "end": 1970.31, "word": " if", "probability": 0.912109375}, {"start": 1970.31, "end": 1971.57, "word": " the", "probability": 0.88330078125}, {"start": 1971.57, "end": 1971.95, "word": " n", "probability": 0.93359375}, {"start": 1971.95, "end": 1972.29, "word": " times", "probability": 0.931640625}, {"start": 1972.29, "end": 1972.51, "word": " y", "probability": 0.6220703125}, {"start": 1972.51, "end": 1972.71, "word": " is", "probability": 0.9462890625}, {"start": 1972.71, "end": 1973.05, "word": " 40,", "probability": 0.95068359375}, {"start": 1973.25, "end": 1973.61, "word": " then", "probability": 0.83984375}, {"start": 1973.61, "end": 1974.53, "word": " n", "probability": 0.9345703125}, {"start": 1974.53, "end": 1974.79, "word": " times", "probability": 0.92236328125}, {"start": 1974.79, "end": 1974.97, "word": " 1", "probability": 0.452392578125}, {"start": 1974.97, "end": 1975.23, "word": " minus", "probability": 0.98681640625}, {"start": 1975.23, "end": 1975.43, "word": " y", "probability": 0.90478515625}, {"start": 1975.43, "end": 1975.57, "word": " is", "probability": 0.9423828125}, {"start": 1975.57, "end": 1976.37, "word": " 460.", "probability": 0.9482421875}, {"start": 1977.47, "end": 1978.11, "word": " So", "probability": 0.9619140625}, {"start": 1978.11, "end": 1978.47, "word": " two", "probability": 0.8359375}, {"start": 1978.47, "end": 1978.95, "word": " conditions", "probability": 0.86181640625}, {"start": 1978.95, "end": 1979.21, "word": " are", "probability": 0.93212890625}, {"start": 1979.21, "end": 1979.71, "word": " satisfied.", "probability": 0.8994140625}, {"start": 1979.87, "end": 1980.01, "word": " Then", "probability": 0.87548828125}, {"start": 1980.01, "end": 1980.15, "word": " we", "probability": 0.88134765625}, {"start": 1980.15, "end": 1980.37, "word": " can", "probability": 0.94677734375}, {"start": 1980.37, "end": 1980.75, "word": " use,", "probability": 0.7783203125}, {"start": 1982.03, "end": 1982.27, "word": " we", "probability": 0.953125}, {"start": 1982.27, "end": 1982.51, "word": " can", "probability": 0.94677734375}, {"start": 1982.51, "end": 1982.69, "word": " say", "probability": 0.92626953125}, {"start": 1982.69, "end": 1983.01, "word": " that", "probability": 0.91845703125}, {"start": 1983.01, "end": 1983.69, "word": " the", "probability": 0.8203125}, {"start": 1983.69, "end": 1983.99, "word": " sample", "probability": 0.70068359375}, {"start": 1983.99, "end": 1984.53, "word": " proportion", "probability": 0.8349609375}, {"start": 1984.53, "end": 1985.29, "word": " is", "probability": 0.94677734375}, {"start": 1985.29, "end": 1985.89, "word": " approximately", "probability": 0.8671875}, {"start": 1985.89, "end": 1986.39, "word": " normally", "probability": 0.7763671875}, {"start": 1986.39, "end": 1987.19, "word": " distributed", "probability": 0.9072265625}, {"start": 1987.19, "end": 1988.23, "word": " with", "probability": 0.65625}, {"start": 1988.23, "end": 1988.57, "word": " mean", "probability": 0.97216796875}, {"start": 1988.57, "end": 1989.11, "word": " equals", "probability": 0.93701171875}, {"start": 1989.11, "end": 1989.57, "word": " y.", "probability": 0.93994140625}], "temperature": 1.0}, {"id": 75, "seek": 201844, "start": 1991.36, "end": 2018.44, "text": " with standard deviation of square root pi 1 minus pi divided by n. So that's your mean. So the mean is pi. And sigma root pi, pi is 8%, times 1 minus pi is 98, times 2, divided by n, 500.", "tokens": [365, 3832, 25163, 295, 3732, 5593, 3895, 502, 3175, 3895, 6666, 538, 297, 13, 407, 300, 311, 428, 914, 13, 407, 264, 914, 307, 3895, 13, 400, 12771, 5593, 3895, 11, 3895, 307, 1649, 8923, 1413, 502, 3175, 3895, 307, 20860, 11, 1413, 568, 11, 6666, 538, 297, 11, 5923, 13], "avg_logprob": -0.21138822545225805, "compression_ratio": 1.4351145038167938, "no_speech_prob": 0.0, "words": [{"start": 1991.36, "end": 1992.04, "word": " with", "probability": 0.32421875}, {"start": 1992.04, "end": 1992.72, "word": " standard", "probability": 0.82958984375}, {"start": 1992.72, "end": 1993.18, "word": " deviation", "probability": 0.88525390625}, {"start": 1993.18, "end": 1993.58, "word": " of", "probability": 0.9580078125}, {"start": 1993.58, "end": 1994.02, "word": " square", "probability": 0.71533203125}, {"start": 1994.02, "end": 1994.34, "word": " root", "probability": 0.91162109375}, {"start": 1994.34, "end": 1994.9, "word": " pi", "probability": 0.4521484375}, {"start": 1994.9, "end": 1995.2, "word": " 1", "probability": 0.58984375}, {"start": 1995.2, "end": 1995.54, "word": " minus", "probability": 0.8857421875}, {"start": 1995.54, "end": 1995.9, "word": " pi", "probability": 0.859375}, {"start": 1995.9, "end": 1996.96, "word": " divided", "probability": 0.5712890625}, {"start": 1996.96, "end": 1997.18, "word": " by", "probability": 0.97021484375}, {"start": 1997.18, "end": 1997.4, "word": " n.", "probability": 0.6171875}, {"start": 1997.92, "end": 1998.58, "word": " So", "probability": 0.861328125}, {"start": 1998.58, "end": 1998.94, "word": " that's", "probability": 0.906005859375}, {"start": 1998.94, "end": 1999.18, "word": " your", "probability": 0.89306640625}, {"start": 1999.18, "end": 1999.5, "word": " mean.", "probability": 0.98046875}, {"start": 1999.9, "end": 2000.08, "word": " So", "probability": 0.876953125}, {"start": 2000.08, "end": 2000.24, "word": " the", "probability": 0.89404296875}, {"start": 2000.24, "end": 2000.38, "word": " mean", "probability": 0.9658203125}, {"start": 2000.38, "end": 2000.56, "word": " is", "probability": 0.95263671875}, {"start": 2000.56, "end": 2000.9, "word": " pi.", "probability": 0.80908203125}, {"start": 2002.78, "end": 2003.46, "word": " And", "probability": 0.9443359375}, {"start": 2003.46, "end": 2004.02, "word": " sigma", "probability": 0.89208984375}, {"start": 2004.02, "end": 2005.62, "word": " root", "probability": 0.88671875}, {"start": 2005.62, "end": 2006.36, "word": " pi,", "probability": 0.83984375}, {"start": 2007.32, "end": 2007.66, "word": " pi", "probability": 0.91162109375}, {"start": 2007.66, "end": 2007.88, "word": " is", "probability": 0.947265625}, {"start": 2007.88, "end": 2008.62, "word": " 8%,", "probability": 0.627685546875}, {"start": 2008.62, "end": 2013.46, "word": " times", "probability": 0.92919921875}, {"start": 2013.46, "end": 2013.76, "word": " 1", "probability": 0.9345703125}, {"start": 2013.76, "end": 2014.04, "word": " minus", "probability": 0.9873046875}, {"start": 2014.04, "end": 2014.26, "word": " pi", "probability": 0.947265625}, {"start": 2014.26, "end": 2014.44, "word": " is", "probability": 0.876953125}, {"start": 2014.44, "end": 2014.9, "word": " 98,", "probability": 0.88134765625}, {"start": 2015.08, "end": 2015.3, "word": " times", "probability": 0.90673828125}, {"start": 2015.3, "end": 2015.6, "word": " 2,", "probability": 0.88232421875}, {"start": 2016.12, "end": 2016.4, "word": " divided", "probability": 0.81591796875}, {"start": 2016.4, "end": 2016.64, "word": " by", "probability": 0.962890625}, {"start": 2016.64, "end": 2016.94, "word": " n,", "probability": 0.95703125}, {"start": 2017.68, "end": 2018.44, "word": " 500.", "probability": 0.97607421875}], "temperature": 1.0}, {"id": 76, "seek": 204795, "start": 2023.21, "end": 2047.95, "text": " So your Z statistic, P, now what's the value of P? P equals X over N. X is given, X is 25, divided by 500, so P is 5%. So now, Z statistic.", "tokens": [407, 428, 1176, 29588, 11, 430, 11, 586, 437, 311, 264, 2158, 295, 430, 30, 430, 6915, 1783, 670, 426, 13, 1783, 307, 2212, 11, 1783, 307, 3552, 11, 6666, 538, 5923, 11, 370, 430, 307, 1025, 6856, 407, 586, 11, 1176, 29588, 13], "avg_logprob": -0.2437499933772617, "compression_ratio": 1.1666666666666667, "no_speech_prob": 0.0, "words": [{"start": 2023.21, "end": 2023.55, "word": " So", "probability": 0.56787109375}, {"start": 2023.55, "end": 2023.75, "word": " your", "probability": 0.52685546875}, {"start": 2023.75, "end": 2023.91, "word": " Z", "probability": 0.58154296875}, {"start": 2023.91, "end": 2024.45, "word": " statistic,", "probability": 0.6630859375}, {"start": 2025.41, "end": 2027.35, "word": " P,", "probability": 0.59716796875}, {"start": 2029.11, "end": 2029.43, "word": " now", "probability": 0.64306640625}, {"start": 2029.43, "end": 2029.61, "word": " what's", "probability": 0.876953125}, {"start": 2029.61, "end": 2029.73, "word": " the", "probability": 0.9208984375}, {"start": 2029.73, "end": 2029.93, "word": " value", "probability": 0.97802734375}, {"start": 2029.93, "end": 2030.07, "word": " of", "probability": 0.9560546875}, {"start": 2030.07, "end": 2030.27, "word": " P?", "probability": 0.96923828125}, {"start": 2031.45, "end": 2032.13, "word": " P", "probability": 0.7294921875}, {"start": 2032.13, "end": 2032.79, "word": " equals", "probability": 0.48828125}, {"start": 2032.79, "end": 2033.13, "word": " X", "probability": 0.72607421875}, {"start": 2033.13, "end": 2033.35, "word": " over", "probability": 0.91796875}, {"start": 2033.35, "end": 2033.65, "word": " N.", "probability": 0.9560546875}, {"start": 2034.87, "end": 2035.43, "word": " X", "probability": 0.9716796875}, {"start": 2035.43, "end": 2035.55, "word": " is", "probability": 0.94189453125}, {"start": 2035.55, "end": 2035.81, "word": " given,", "probability": 0.8486328125}, {"start": 2036.31, "end": 2036.65, "word": " X", "probability": 0.91357421875}, {"start": 2036.65, "end": 2036.99, "word": " is", "probability": 0.92919921875}, {"start": 2036.99, "end": 2037.49, "word": " 25,", "probability": 0.900390625}, {"start": 2037.89, "end": 2039.85, "word": " divided", "probability": 0.66259765625}, {"start": 2039.85, "end": 2040.13, "word": " by", "probability": 0.97021484375}, {"start": 2040.13, "end": 2040.73, "word": " 500,", "probability": 0.96240234375}, {"start": 2042.07, "end": 2042.39, "word": " so", "probability": 0.9365234375}, {"start": 2042.39, "end": 2042.63, "word": " P", "probability": 0.74853515625}, {"start": 2042.63, "end": 2043.03, "word": " is", "probability": 0.95263671875}, {"start": 2043.03, "end": 2044.27, "word": " 5%.", "probability": 0.872802734375}, {"start": 2044.27, "end": 2046.99, "word": " So", "probability": 0.955078125}, {"start": 2046.99, "end": 2047.21, "word": " now,", "probability": 0.9033203125}, {"start": 2047.37, "end": 2047.57, "word": " Z", "probability": 0.78173828125}, {"start": 2047.57, "end": 2047.95, "word": " statistic.", "probability": 0.826171875}], "temperature": 1.0}, {"id": 77, "seek": 207354, "start": 2052.08, "end": 2073.54, "text": " B minus Y divided by this sigma which is 0.892 divided by 500. This will give minus 2.47. So straightforward calculation.", "tokens": [363, 3175, 398, 6666, 538, 341, 12771, 597, 307, 1958, 13, 21115, 17, 6666, 538, 5923, 13, 639, 486, 976, 3175, 568, 13, 14060, 13, 407, 15325, 17108, 13], "avg_logprob": -0.32447917958100636, "compression_ratio": 1.1296296296296295, "no_speech_prob": 0.0, "words": [{"start": 2052.08, "end": 2052.4, "word": " B", "probability": 0.1832275390625}, {"start": 2052.4, "end": 2052.94, "word": " minus", "probability": 0.63232421875}, {"start": 2052.94, "end": 2053.44, "word": " Y", "probability": 0.2900390625}, {"start": 2053.44, "end": 2056.56, "word": " divided", "probability": 0.49951171875}, {"start": 2056.56, "end": 2057.14, "word": " by", "probability": 0.96875}, {"start": 2057.14, "end": 2058.22, "word": " this", "probability": 0.822265625}, {"start": 2058.22, "end": 2058.6, "word": " sigma", "probability": 0.60791015625}, {"start": 2058.6, "end": 2059.66, "word": " which", "probability": 0.50244140625}, {"start": 2059.66, "end": 2059.88, "word": " is", "probability": 0.94677734375}, {"start": 2059.88, "end": 2060.22, "word": " 0", "probability": 0.66259765625}, {"start": 2060.22, "end": 2062.58, "word": ".892", "probability": 0.72802734375}, {"start": 2062.58, "end": 2062.96, "word": " divided", "probability": 0.701171875}, {"start": 2062.96, "end": 2063.36, "word": " by", "probability": 0.96337890625}, {"start": 2063.36, "end": 2066.08, "word": " 500.", "probability": 0.90869140625}, {"start": 2067.12, "end": 2068.18, "word": " This", "probability": 0.87158203125}, {"start": 2068.18, "end": 2068.36, "word": " will", "probability": 0.87451171875}, {"start": 2068.36, "end": 2068.62, "word": " give", "probability": 0.833984375}, {"start": 2068.62, "end": 2069.54, "word": " minus", "probability": 0.81884765625}, {"start": 2069.54, "end": 2069.88, "word": " 2", "probability": 0.966796875}, {"start": 2069.88, "end": 2070.38, "word": ".47.", "probability": 0.9912109375}, {"start": 2071.88, "end": 2072.4, "word": " So", "probability": 0.9072265625}, {"start": 2072.4, "end": 2072.86, "word": " straightforward", "probability": 0.5771484375}, {"start": 2072.86, "end": 2073.54, "word": " calculation.", "probability": 0.9375}], "temperature": 1.0}, {"id": 78, "seek": 209014, "start": 2074.68, "end": 2090.14, "text": " It gives value of negative point, negative 2.47 for the value of the test statistic. Now let's look at three different approaches.", "tokens": [467, 2709, 2158, 295, 3671, 935, 11, 3671, 568, 13, 14060, 337, 264, 2158, 295, 264, 1500, 29588, 13, 823, 718, 311, 574, 412, 1045, 819, 11587, 13], "avg_logprob": -0.3488685344827586, "compression_ratio": 1.2596153846153846, "no_speech_prob": 0.0, "words": [{"start": 2074.68, "end": 2074.96, "word": " It", "probability": 0.1297607421875}, {"start": 2074.96, "end": 2075.32, "word": " gives", "probability": 0.80078125}, {"start": 2075.32, "end": 2076.18, "word": " value", "probability": 0.6279296875}, {"start": 2076.18, "end": 2076.6, "word": " of", "probability": 0.93359375}, {"start": 2076.6, "end": 2077.0, "word": " negative", "probability": 0.546875}, {"start": 2077.0, "end": 2077.62, "word": " point,", "probability": 0.424072265625}, {"start": 2078.34, "end": 2078.6, "word": " negative", "probability": 0.54296875}, {"start": 2078.6, "end": 2078.8, "word": " 2", "probability": 0.763671875}, {"start": 2078.8, "end": 2079.4, "word": ".47", "probability": 0.943115234375}, {"start": 2079.4, "end": 2079.74, "word": " for", "probability": 0.6669921875}, {"start": 2079.74, "end": 2079.92, "word": " the", "probability": 0.59716796875}, {"start": 2079.92, "end": 2080.44, "word": " value", "probability": 0.9296875}, {"start": 2080.44, "end": 2080.7, "word": " of", "probability": 0.9580078125}, {"start": 2080.7, "end": 2080.92, "word": " the", "probability": 0.465576171875}, {"start": 2080.92, "end": 2081.32, "word": " test", "probability": 0.78564453125}, {"start": 2081.32, "end": 2082.84, "word": " statistic.", "probability": 0.85888671875}, {"start": 2084.16, "end": 2084.58, "word": " Now", "probability": 0.8740234375}, {"start": 2084.58, "end": 2085.3, "word": " let's", "probability": 0.681396484375}, {"start": 2085.3, "end": 2085.58, "word": " look", "probability": 0.95751953125}, {"start": 2085.58, "end": 2086.08, "word": " at", "probability": 0.9658203125}, {"start": 2086.08, "end": 2088.56, "word": " three", "probability": 0.74560546875}, {"start": 2088.56, "end": 2089.18, "word": " different", "probability": 0.87646484375}, {"start": 2089.18, "end": 2090.14, "word": " approaches.", "probability": 0.77734375}], "temperature": 1.0}, {"id": 79, "seek": 212664, "start": 2105.86, "end": 2126.64, "text": " Now we are talking about alpha of 0.5%. So my rejection region is minus 1.96 to the left side and 1.96 to the right side. If the value of the statistic", "tokens": [823, 321, 366, 1417, 466, 8961, 295, 1958, 13, 20, 6856, 407, 452, 26044, 4458, 307, 3175, 502, 13, 22962, 281, 264, 1411, 1252, 293, 502, 13, 22962, 281, 264, 558, 1252, 13, 759, 264, 2158, 295, 264, 29588], "avg_logprob": -0.15820311829447747, "compression_ratio": 1.256198347107438, "no_speech_prob": 0.0, "words": [{"start": 2105.8599999999997, "end": 2106.7, "word": " Now", "probability": 0.5849609375}, {"start": 2106.7, "end": 2106.86, "word": " we", "probability": 0.77001953125}, {"start": 2106.86, "end": 2106.98, "word": " are", "probability": 0.91015625}, {"start": 2106.98, "end": 2107.26, "word": " talking", "probability": 0.83544921875}, {"start": 2107.26, "end": 2107.78, "word": " about", "probability": 0.91015625}, {"start": 2107.78, "end": 2108.46, "word": " alpha", "probability": 0.595703125}, {"start": 2108.46, "end": 2109.32, "word": " of", "probability": 0.93505859375}, {"start": 2109.32, "end": 2109.92, "word": " 0", "probability": 0.8603515625}, {"start": 2109.92, "end": 2110.74, "word": ".5%.", "probability": 0.6495768229166666}, {"start": 2110.74, "end": 2112.52, "word": " So", "probability": 0.9228515625}, {"start": 2112.52, "end": 2112.7, "word": " my", "probability": 0.84326171875}, {"start": 2112.7, "end": 2113.04, "word": " rejection", "probability": 0.97265625}, {"start": 2113.04, "end": 2113.5, "word": " region", "probability": 0.95556640625}, {"start": 2113.5, "end": 2117.0, "word": " is", "probability": 0.86328125}, {"start": 2117.0, "end": 2117.64, "word": " minus", "probability": 0.5791015625}, {"start": 2117.64, "end": 2118.16, "word": " 1", "probability": 0.9873046875}, {"start": 2118.16, "end": 2118.84, "word": ".96", "probability": 0.988037109375}, {"start": 2118.84, "end": 2119.14, "word": " to", "probability": 0.93505859375}, {"start": 2119.14, "end": 2119.26, "word": " the", "probability": 0.91064453125}, {"start": 2119.26, "end": 2119.44, "word": " left", "probability": 0.9453125}, {"start": 2119.44, "end": 2119.86, "word": " side", "probability": 0.86962890625}, {"start": 2119.86, "end": 2121.46, "word": " and", "probability": 0.6279296875}, {"start": 2121.46, "end": 2121.68, "word": " 1", "probability": 0.99560546875}, {"start": 2121.68, "end": 2122.28, "word": ".96", "probability": 0.992919921875}, {"start": 2122.28, "end": 2122.58, "word": " to", "probability": 0.96044921875}, {"start": 2122.58, "end": 2122.72, "word": " the", "probability": 0.91455078125}, {"start": 2122.72, "end": 2122.94, "word": " right", "probability": 0.91748046875}, {"start": 2122.94, "end": 2123.28, "word": " side.", "probability": 0.87255859375}, {"start": 2124.9, "end": 2125.54, "word": " If", "probability": 0.953125}, {"start": 2125.54, "end": 2125.7, "word": " the", "probability": 0.92236328125}, {"start": 2125.7, "end": 2125.92, "word": " value", "probability": 0.9775390625}, {"start": 2125.92, "end": 2126.06, "word": " of", "probability": 0.95849609375}, {"start": 2126.06, "end": 2126.16, "word": " the", "probability": 0.57958984375}, {"start": 2126.16, "end": 2126.64, "word": " statistic", "probability": 0.8720703125}], "temperature": 1.0}, {"id": 80, "seek": 214908, "start": 2128.88, "end": 2149.08, "text": " falls in one of these two rejection regions, then we have to reject them all. Now this value actually falls in the rejection region to the left side. So since z star equals negative 2.47, smaller than negative 1.26,", "tokens": [8804, 294, 472, 295, 613, 732, 26044, 10682, 11, 550, 321, 362, 281, 8248, 552, 439, 13, 823, 341, 2158, 767, 8804, 294, 264, 26044, 4458, 281, 264, 1411, 1252, 13, 407, 1670, 710, 3543, 6915, 3671, 568, 13, 14060, 11, 4356, 813, 3671, 502, 13, 10880, 11], "avg_logprob": -0.21396684160037915, "compression_ratio": 1.5, "no_speech_prob": 0.0, "words": [{"start": 2128.88, "end": 2129.34, "word": " falls", "probability": 0.100341796875}, {"start": 2129.34, "end": 2129.7, "word": " in", "probability": 0.92333984375}, {"start": 2129.7, "end": 2129.94, "word": " one", "probability": 0.91064453125}, {"start": 2129.94, "end": 2130.1, "word": " of", "probability": 0.96240234375}, {"start": 2130.1, "end": 2130.32, "word": " these", "probability": 0.84765625}, {"start": 2130.32, "end": 2130.54, "word": " two", "probability": 0.89453125}, {"start": 2130.54, "end": 2130.88, "word": " rejection", "probability": 0.921875}, {"start": 2130.88, "end": 2131.34, "word": " regions,", "probability": 0.9736328125}, {"start": 2131.46, "end": 2131.6, "word": " then", "probability": 0.85302734375}, {"start": 2131.6, "end": 2131.74, "word": " we", "probability": 0.91845703125}, {"start": 2131.74, "end": 2131.9, "word": " have", "probability": 0.94384765625}, {"start": 2131.9, "end": 2132.02, "word": " to", "probability": 0.97021484375}, {"start": 2132.02, "end": 2132.34, "word": " reject", "probability": 0.91064453125}, {"start": 2132.34, "end": 2132.54, "word": " them", "probability": 0.492431640625}, {"start": 2132.54, "end": 2132.68, "word": " all.", "probability": 0.389892578125}, {"start": 2133.02, "end": 2133.74, "word": " Now", "probability": 0.86669921875}, {"start": 2133.74, "end": 2134.04, "word": " this", "probability": 0.642578125}, {"start": 2134.04, "end": 2134.4, "word": " value", "probability": 0.9755859375}, {"start": 2134.4, "end": 2134.9, "word": " actually", "probability": 0.8623046875}, {"start": 2134.9, "end": 2135.56, "word": " falls", "probability": 0.82373046875}, {"start": 2135.56, "end": 2137.16, "word": " in", "probability": 0.91845703125}, {"start": 2137.16, "end": 2137.56, "word": " the", "probability": 0.91748046875}, {"start": 2137.56, "end": 2138.18, "word": " rejection", "probability": 0.9443359375}, {"start": 2138.18, "end": 2138.58, "word": " region", "probability": 0.9248046875}, {"start": 2138.58, "end": 2138.78, "word": " to", "probability": 0.93603515625}, {"start": 2138.78, "end": 2138.9, "word": " the", "probability": 0.9169921875}, {"start": 2138.9, "end": 2139.1, "word": " left", "probability": 0.9443359375}, {"start": 2139.1, "end": 2139.58, "word": " side.", "probability": 0.83935546875}, {"start": 2140.34, "end": 2140.5, "word": " So", "probability": 0.9306640625}, {"start": 2140.5, "end": 2140.98, "word": " since", "probability": 0.7705078125}, {"start": 2140.98, "end": 2142.24, "word": " z", "probability": 0.6103515625}, {"start": 2142.24, "end": 2142.58, "word": " star", "probability": 0.61669921875}, {"start": 2142.58, "end": 2144.56, "word": " equals", "probability": 0.8525390625}, {"start": 2144.56, "end": 2144.92, "word": " negative", "probability": 0.79052734375}, {"start": 2144.92, "end": 2145.16, "word": " 2", "probability": 0.83642578125}, {"start": 2145.16, "end": 2145.78, "word": ".47,", "probability": 0.990966796875}, {"start": 2146.76, "end": 2147.32, "word": " smaller", "probability": 0.7822265625}, {"start": 2147.32, "end": 2147.68, "word": " than", "probability": 0.93701171875}, {"start": 2147.68, "end": 2148.12, "word": " negative", "probability": 0.94482421875}, {"start": 2148.12, "end": 2148.44, "word": " 1", "probability": 0.99072265625}, {"start": 2148.44, "end": 2149.08, "word": ".26,", "probability": 0.887939453125}], "temperature": 1.0}, {"id": 81, "seek": 217836, "start": 2150.78, "end": 2178.36, "text": " So it means the value of the test statistic falls in the rejection region. Then my decision is reject F0. So again, your Z statistic negative 2.47 falls in the rejection region. So my decision is we reject the null hypothesis.", "tokens": [407, 309, 1355, 264, 2158, 295, 264, 1500, 29588, 8804, 294, 264, 26044, 4458, 13, 1396, 452, 3537, 307, 8248, 479, 15, 13, 407, 797, 11, 428, 1176, 29588, 3671, 568, 13, 14060, 8804, 294, 264, 26044, 4458, 13, 407, 452, 3537, 307, 321, 8248, 264, 18184, 17291, 13], "avg_logprob": -0.24343749344348908, "compression_ratio": 1.644927536231884, "no_speech_prob": 0.0, "words": [{"start": 2150.78, "end": 2151.06, "word": " So", "probability": 0.6416015625}, {"start": 2151.06, "end": 2151.18, "word": " it", "probability": 0.76220703125}, {"start": 2151.18, "end": 2151.56, "word": " means", "probability": 0.91650390625}, {"start": 2151.56, "end": 2151.94, "word": " the", "probability": 0.68701171875}, {"start": 2151.94, "end": 2152.3, "word": " value", "probability": 0.96630859375}, {"start": 2152.3, "end": 2152.7, "word": " of", "probability": 0.96337890625}, {"start": 2152.7, "end": 2152.9, "word": " the", "probability": 0.83984375}, {"start": 2152.9, "end": 2153.18, "word": " test", "probability": 0.84814453125}, {"start": 2153.18, "end": 2153.7, "word": " statistic", "probability": 0.841796875}, {"start": 2153.7, "end": 2154.14, "word": " falls", "probability": 0.7470703125}, {"start": 2154.14, "end": 2154.46, "word": " in", "probability": 0.884765625}, {"start": 2154.46, "end": 2154.96, "word": " the", "probability": 0.87744140625}, {"start": 2154.96, "end": 2155.22, "word": " rejection", "probability": 0.93798828125}, {"start": 2155.22, "end": 2155.66, "word": " region.", "probability": 0.94384765625}, {"start": 2156.36, "end": 2156.7, "word": " Then", "probability": 0.64111328125}, {"start": 2156.7, "end": 2157.68, "word": " my", "probability": 0.71337890625}, {"start": 2157.68, "end": 2158.1, "word": " decision", "probability": 0.93701171875}, {"start": 2158.1, "end": 2158.7, "word": " is", "probability": 0.94677734375}, {"start": 2158.7, "end": 2159.9, "word": " reject", "probability": 0.485595703125}, {"start": 2159.9, "end": 2163.24, "word": " F0.", "probability": 0.5303955078125}, {"start": 2165.74, "end": 2166.54, "word": " So", "probability": 0.92041015625}, {"start": 2166.54, "end": 2166.98, "word": " again,", "probability": 0.896484375}, {"start": 2168.82, "end": 2169.02, "word": " your", "probability": 0.71337890625}, {"start": 2169.02, "end": 2169.2, "word": " Z", "probability": 0.57666015625}, {"start": 2169.2, "end": 2169.88, "word": " statistic", "probability": 0.81884765625}, {"start": 2169.88, "end": 2170.24, "word": " negative", "probability": 0.463134765625}, {"start": 2170.24, "end": 2170.5, "word": " 2", "probability": 0.79052734375}, {"start": 2170.5, "end": 2171.14, "word": ".47", "probability": 0.983154296875}, {"start": 2171.14, "end": 2172.78, "word": " falls", "probability": 0.666015625}, {"start": 2172.78, "end": 2173.06, "word": " in", "probability": 0.9453125}, {"start": 2173.06, "end": 2173.66, "word": " the", "probability": 0.91943359375}, {"start": 2173.66, "end": 2174.22, "word": " rejection", "probability": 0.93994140625}, {"start": 2174.22, "end": 2174.66, "word": " region.", "probability": 0.9404296875}, {"start": 2175.62, "end": 2175.9, "word": " So", "probability": 0.95458984375}, {"start": 2175.9, "end": 2176.14, "word": " my", "probability": 0.9365234375}, {"start": 2176.14, "end": 2176.5, "word": " decision", "probability": 0.94091796875}, {"start": 2176.5, "end": 2176.9, "word": " is", "probability": 0.94970703125}, {"start": 2176.9, "end": 2177.2, "word": " we", "probability": 0.755859375}, {"start": 2177.2, "end": 2177.62, "word": " reject", "probability": 0.8984375}, {"start": 2177.62, "end": 2177.8, "word": " the", "probability": 0.290771484375}, {"start": 2177.8, "end": 2177.92, "word": " null", "probability": 0.9912109375}, {"start": 2177.92, "end": 2178.36, "word": " hypothesis.", "probability": 0.86669921875}], "temperature": 1.0}, {"id": 82, "seek": 220816, "start": 2179.96, "end": 2208.16, "text": " There is sufficient evidence to reject the company claim of 8% responses rate. So this claim actually is incorrect. So you have to reject this claim. It means we have to support alternative hypothesis. So we end with this result. That is pi is not 8%. So that's by using", "tokens": [821, 307, 11563, 4467, 281, 8248, 264, 2237, 3932, 295, 1649, 4, 13019, 3314, 13, 407, 341, 3932, 767, 307, 18424, 13, 407, 291, 362, 281, 8248, 341, 3932, 13, 467, 1355, 321, 362, 281, 1406, 8535, 17291, 13, 407, 321, 917, 365, 341, 1874, 13, 663, 307, 3895, 307, 406, 1649, 6856, 407, 300, 311, 538, 1228], "avg_logprob": -0.17756885391170696, "compression_ratio": 1.5397727272727273, "no_speech_prob": 0.0, "words": [{"start": 2179.96, "end": 2180.28, "word": " There", "probability": 0.32568359375}, {"start": 2180.28, "end": 2180.46, "word": " is", "probability": 0.91015625}, {"start": 2180.46, "end": 2180.98, "word": " sufficient", "probability": 0.89697265625}, {"start": 2180.98, "end": 2181.46, "word": " evidence", "probability": 0.951171875}, {"start": 2181.46, "end": 2181.72, "word": " to", "probability": 0.96923828125}, {"start": 2181.72, "end": 2182.14, "word": " reject", "probability": 0.90869140625}, {"start": 2182.14, "end": 2182.38, "word": " the", "probability": 0.90283203125}, {"start": 2182.38, "end": 2182.76, "word": " company", "probability": 0.91650390625}, {"start": 2182.76, "end": 2183.28, "word": " claim", "probability": 0.8330078125}, {"start": 2183.28, "end": 2185.28, "word": " of", "probability": 0.80078125}, {"start": 2185.28, "end": 2185.62, "word": " 8", "probability": 0.7490234375}, {"start": 2185.62, "end": 2186.12, "word": "%", "probability": 0.9384765625}, {"start": 2186.12, "end": 2186.94, "word": " responses", "probability": 0.64892578125}, {"start": 2186.94, "end": 2187.34, "word": " rate.", "probability": 0.703125}, {"start": 2188.14, "end": 2188.6, "word": " So", "probability": 0.93798828125}, {"start": 2188.6, "end": 2188.86, "word": " this", "probability": 0.84521484375}, {"start": 2188.86, "end": 2189.22, "word": " claim", "probability": 0.8935546875}, {"start": 2189.22, "end": 2189.74, "word": " actually", "probability": 0.8408203125}, {"start": 2189.74, "end": 2190.08, "word": " is", "probability": 0.9423828125}, {"start": 2190.08, "end": 2190.64, "word": " incorrect.", "probability": 0.90771484375}, {"start": 2191.8, "end": 2192.04, "word": " So", "probability": 0.83056640625}, {"start": 2192.04, "end": 2192.12, "word": " you", "probability": 0.5654296875}, {"start": 2192.12, "end": 2192.26, "word": " have", "probability": 0.94580078125}, {"start": 2192.26, "end": 2192.38, "word": " to", "probability": 0.96435546875}, {"start": 2192.38, "end": 2192.76, "word": " reject", "probability": 0.92431640625}, {"start": 2192.76, "end": 2193.6, "word": " this", "probability": 0.9306640625}, {"start": 2193.6, "end": 2194.04, "word": " claim.", "probability": 0.88232421875}, {"start": 2194.4, "end": 2194.78, "word": " It", "probability": 0.9365234375}, {"start": 2194.78, "end": 2195.18, "word": " means", "probability": 0.916015625}, {"start": 2195.18, "end": 2197.12, "word": " we", "probability": 0.673828125}, {"start": 2197.12, "end": 2197.32, "word": " have", "probability": 0.94287109375}, {"start": 2197.32, "end": 2197.5, "word": " to", "probability": 0.96240234375}, {"start": 2197.5, "end": 2198.02, "word": " support", "probability": 0.9814453125}, {"start": 2198.02, "end": 2198.6, "word": " alternative", "probability": 0.6728515625}, {"start": 2198.6, "end": 2199.1, "word": " hypothesis.", "probability": 0.74072265625}, {"start": 2200.02, "end": 2200.46, "word": " So", "probability": 0.95361328125}, {"start": 2200.46, "end": 2200.64, "word": " we", "probability": 0.93310546875}, {"start": 2200.64, "end": 2200.84, "word": " end", "probability": 0.90869140625}, {"start": 2200.84, "end": 2201.04, "word": " with", "probability": 0.888671875}, {"start": 2201.04, "end": 2201.34, "word": " this", "probability": 0.94482421875}, {"start": 2201.34, "end": 2201.84, "word": " result.", "probability": 0.94287109375}, {"start": 2202.0, "end": 2202.26, "word": " That", "probability": 0.9033203125}, {"start": 2202.26, "end": 2202.74, "word": " is", "probability": 0.953125}, {"start": 2202.74, "end": 2203.54, "word": " pi", "probability": 0.493408203125}, {"start": 2203.54, "end": 2204.5, "word": " is", "probability": 0.7919921875}, {"start": 2204.5, "end": 2204.88, "word": " not", "probability": 0.95263671875}, {"start": 2204.88, "end": 2206.44, "word": " 8%.", "probability": 0.92578125}, {"start": 2206.44, "end": 2207.32, "word": " So", "probability": 0.9619140625}, {"start": 2207.32, "end": 2207.58, "word": " that's", "probability": 0.93505859375}, {"start": 2207.58, "end": 2207.74, "word": " by", "probability": 0.8857421875}, {"start": 2207.74, "end": 2208.16, "word": " using", "probability": 0.93310546875}], "temperature": 1.0}, {"id": 83, "seek": 223486, "start": 2208.82, "end": 2234.86, "text": " a critical value approach. Any question? Let's compute B value for the other approach. Now, in this case, we are testing two-tailed test. So your B value may be greater than", "tokens": [257, 4924, 2158, 3109, 13, 2639, 1168, 30, 961, 311, 14722, 363, 2158, 337, 264, 661, 3109, 13, 823, 11, 294, 341, 1389, 11, 321, 366, 4997, 732, 12, 14430, 292, 1500, 13, 407, 428, 363, 2158, 815, 312, 5044, 813], "avg_logprob": -0.22377231894504457, "compression_ratio": 1.3282442748091603, "no_speech_prob": 0.0, "words": [{"start": 2208.82, "end": 2209.16, "word": " a", "probability": 0.2191162109375}, {"start": 2209.16, "end": 2209.66, "word": " critical", "probability": 0.9091796875}, {"start": 2209.66, "end": 2210.94, "word": " value", "probability": 0.94189453125}, {"start": 2210.94, "end": 2212.14, "word": " approach.", "probability": 0.88232421875}, {"start": 2212.92, "end": 2213.22, "word": " Any", "probability": 0.87353515625}, {"start": 2213.22, "end": 2213.62, "word": " question?", "probability": 0.5732421875}, {"start": 2218.42, "end": 2219.2, "word": " Let's", "probability": 0.933349609375}, {"start": 2219.2, "end": 2219.72, "word": " compute", "probability": 0.904296875}, {"start": 2219.72, "end": 2221.76, "word": " B", "probability": 0.447021484375}, {"start": 2221.76, "end": 2222.04, "word": " value", "probability": 0.70556640625}, {"start": 2222.04, "end": 2222.26, "word": " for", "probability": 0.93115234375}, {"start": 2222.26, "end": 2222.44, "word": " the", "probability": 0.89599609375}, {"start": 2222.44, "end": 2222.72, "word": " other", "probability": 0.89013671875}, {"start": 2222.72, "end": 2223.24, "word": " approach.", "probability": 0.8818359375}, {"start": 2224.1, "end": 2224.42, "word": " Now,", "probability": 0.92578125}, {"start": 2224.48, "end": 2224.56, "word": " in", "probability": 0.77099609375}, {"start": 2224.56, "end": 2224.74, "word": " this", "probability": 0.94775390625}, {"start": 2224.74, "end": 2225.12, "word": " case,", "probability": 0.9052734375}, {"start": 2225.7, "end": 2225.9, "word": " we", "probability": 0.958984375}, {"start": 2225.9, "end": 2226.08, "word": " are", "probability": 0.93408203125}, {"start": 2226.08, "end": 2226.56, "word": " testing", "probability": 0.8828125}, {"start": 2226.56, "end": 2227.96, "word": " two", "probability": 0.7802734375}, {"start": 2227.96, "end": 2228.24, "word": "-tailed", "probability": 0.8134765625}, {"start": 2228.24, "end": 2228.56, "word": " test.", "probability": 0.8447265625}, {"start": 2229.4, "end": 2229.78, "word": " So", "probability": 0.95166015625}, {"start": 2229.78, "end": 2230.36, "word": " your", "probability": 0.73486328125}, {"start": 2230.36, "end": 2230.6, "word": " B", "probability": 0.974609375}, {"start": 2230.6, "end": 2231.02, "word": " value", "probability": 0.953125}, {"start": 2231.02, "end": 2233.76, "word": " may", "probability": 0.6240234375}, {"start": 2233.76, "end": 2234.0, "word": " be", "probability": 0.96337890625}, {"start": 2234.0, "end": 2234.48, "word": " greater", "probability": 0.91943359375}, {"start": 2234.48, "end": 2234.86, "word": " than", "probability": 0.951171875}], "temperature": 1.0}, {"id": 84, "seek": 226433, "start": 2236.39, "end": 2264.33, "text": " Or smaller than. So we have to compute one of these two, then multiply the answer by two. Because the area to the right of 2.47 equals the area to the left of negative 2.47. So now, by using the ideas from chapter six, let's see how can we compute U of V value. Again, we are talking about total test. So the U of V value is the right of Z.", "tokens": [1610, 4356, 813, 13, 407, 321, 362, 281, 14722, 472, 295, 613, 732, 11, 550, 12972, 264, 1867, 538, 732, 13, 1436, 264, 1859, 281, 264, 558, 295, 568, 13, 14060, 6915, 264, 1859, 281, 264, 1411, 295, 3671, 568, 13, 14060, 13, 407, 586, 11, 538, 1228, 264, 3487, 490, 7187, 2309, 11, 718, 311, 536, 577, 393, 321, 14722, 624, 295, 691, 2158, 13, 3764, 11, 321, 366, 1417, 466, 3217, 1500, 13, 407, 264, 624, 295, 691, 2158, 307, 264, 558, 295, 1176, 13], "avg_logprob": -0.2262073794210499, "compression_ratio": 1.5934579439252337, "no_speech_prob": 0.0, "words": [{"start": 2236.39, "end": 2236.91, "word": " Or", "probability": 0.1640625}, {"start": 2236.91, "end": 2237.43, "word": " smaller", "probability": 0.66552734375}, {"start": 2237.43, "end": 2237.75, "word": " than.", "probability": 0.8994140625}, {"start": 2238.23, "end": 2238.47, "word": " So", "probability": 0.9482421875}, {"start": 2238.47, "end": 2238.59, "word": " we", "probability": 0.73486328125}, {"start": 2238.59, "end": 2238.73, "word": " have", "probability": 0.939453125}, {"start": 2238.73, "end": 2238.85, "word": " to", "probability": 0.97265625}, {"start": 2238.85, "end": 2239.15, "word": " compute", "probability": 0.79150390625}, {"start": 2239.15, "end": 2239.45, "word": " one", "probability": 0.83056640625}, {"start": 2239.45, "end": 2239.61, "word": " of", "probability": 0.96337890625}, {"start": 2239.61, "end": 2239.81, "word": " these", "probability": 0.85693359375}, {"start": 2239.81, "end": 2240.11, "word": " two,", "probability": 0.90869140625}, {"start": 2240.23, "end": 2240.45, "word": " then", "probability": 0.82421875}, {"start": 2240.45, "end": 2240.87, "word": " multiply", "probability": 0.9091796875}, {"start": 2240.87, "end": 2241.05, "word": " the", "probability": 0.8876953125}, {"start": 2241.05, "end": 2241.33, "word": " answer", "probability": 0.9501953125}, {"start": 2241.33, "end": 2241.55, "word": " by", "probability": 0.96728515625}, {"start": 2241.55, "end": 2241.81, "word": " two.", "probability": 0.75048828125}, {"start": 2242.25, "end": 2242.77, "word": " Because", "probability": 0.93359375}, {"start": 2242.77, "end": 2242.97, "word": " the", "probability": 0.90478515625}, {"start": 2242.97, "end": 2243.25, "word": " area", "probability": 0.900390625}, {"start": 2243.25, "end": 2243.43, "word": " to", "probability": 0.95947265625}, {"start": 2243.43, "end": 2243.59, "word": " the", "probability": 0.91796875}, {"start": 2243.59, "end": 2243.89, "word": " right", "probability": 0.92236328125}, {"start": 2243.89, "end": 2244.19, "word": " of", "probability": 0.9404296875}, {"start": 2244.19, "end": 2244.41, "word": " 2", "probability": 0.92529296875}, {"start": 2244.41, "end": 2245.01, "word": ".47", "probability": 0.99365234375}, {"start": 2245.01, "end": 2246.25, "word": " equals", "probability": 0.90087890625}, {"start": 2246.25, "end": 2246.53, "word": " the", "probability": 0.79345703125}, {"start": 2246.53, "end": 2246.69, "word": " area", "probability": 0.86669921875}, {"start": 2246.69, "end": 2246.87, "word": " to", "probability": 0.953125}, {"start": 2246.87, "end": 2247.01, "word": " the", "probability": 0.9140625}, {"start": 2247.01, "end": 2247.23, "word": " left", "probability": 0.9443359375}, {"start": 2247.23, "end": 2247.39, "word": " of", "probability": 0.96484375}, {"start": 2247.39, "end": 2247.69, "word": " negative", "probability": 0.80224609375}, {"start": 2247.69, "end": 2247.97, "word": " 2", "probability": 0.98974609375}, {"start": 2247.97, "end": 2248.41, "word": ".47.", "probability": 0.99609375}, {"start": 2248.91, "end": 2249.39, "word": " So", "probability": 0.95068359375}, {"start": 2249.39, "end": 2249.59, "word": " now,", "probability": 0.892578125}, {"start": 2249.73, "end": 2249.89, "word": " by", "probability": 0.97119140625}, {"start": 2249.89, "end": 2250.19, "word": " using", "probability": 0.93212890625}, {"start": 2250.19, "end": 2250.37, "word": " the", "probability": 0.6748046875}, {"start": 2250.37, "end": 2250.87, "word": " ideas", "probability": 0.91552734375}, {"start": 2250.87, "end": 2252.01, "word": " from", "probability": 0.88037109375}, {"start": 2252.01, "end": 2252.35, "word": " chapter", "probability": 0.49169921875}, {"start": 2252.35, "end": 2252.89, "word": " six,", "probability": 0.681640625}, {"start": 2254.61, "end": 2255.01, "word": " let's", "probability": 0.961669921875}, {"start": 2255.01, "end": 2255.23, "word": " see", "probability": 0.91796875}, {"start": 2255.23, "end": 2255.43, "word": " how", "probability": 0.90673828125}, {"start": 2255.43, "end": 2255.65, "word": " can", "probability": 0.83642578125}, {"start": 2255.65, "end": 2255.81, "word": " we", "probability": 0.947265625}, {"start": 2255.81, "end": 2256.39, "word": " compute", "probability": 0.91357421875}, {"start": 2256.39, "end": 2256.81, "word": " U", "probability": 0.381591796875}, {"start": 2256.81, "end": 2256.95, "word": " of", "probability": 0.59521484375}, {"start": 2256.95, "end": 2257.03, "word": " V", "probability": 0.59130859375}, {"start": 2257.03, "end": 2257.35, "word": " value.", "probability": 0.94287109375}, {"start": 2257.41, "end": 2257.71, "word": " Again,", "probability": 0.9130859375}, {"start": 2257.77, "end": 2257.89, "word": " we", "probability": 0.96142578125}, {"start": 2257.89, "end": 2258.03, "word": " are", "probability": 0.92919921875}, {"start": 2258.03, "end": 2258.33, "word": " talking", "probability": 0.853515625}, {"start": 2258.33, "end": 2258.65, "word": " about", "probability": 0.89453125}, {"start": 2258.65, "end": 2258.97, "word": " total", "probability": 0.33056640625}, {"start": 2258.97, "end": 2259.41, "word": " test.", "probability": 0.6943359375}, {"start": 2260.15, "end": 2260.41, "word": " So", "probability": 0.95849609375}, {"start": 2260.41, "end": 2260.57, "word": " the", "probability": 0.338623046875}, {"start": 2260.57, "end": 2260.73, "word": " U", "probability": 0.9033203125}, {"start": 2260.73, "end": 2260.75, "word": " of", "probability": 0.9267578125}, {"start": 2260.75, "end": 2260.85, "word": " V", "probability": 0.9912109375}, {"start": 2260.85, "end": 2261.31, "word": " value", "probability": 0.962890625}, {"start": 2261.31, "end": 2263.41, "word": " is", "probability": 0.9306640625}, {"start": 2263.41, "end": 2263.57, "word": " the", "probability": 0.31005859375}, {"start": 2263.57, "end": 2263.85, "word": " right", "probability": 0.340087890625}, {"start": 2263.85, "end": 2264.07, "word": " of", "probability": 0.95751953125}, {"start": 2264.07, "end": 2264.33, "word": " Z.", "probability": 0.4609375}], "temperature": 1.0}, {"id": 85, "seek": 229452, "start": 2266.4, "end": 2294.52, "text": " 2.47 or smaller than negative 2.47. Now the table we have gives the area to the left side. So it's better to use the smaller than. So now by using T table, B of Z negative less than negative 2.47. Let's look at the table for the normal", "tokens": [568, 13, 14060, 420, 4356, 813, 3671, 568, 13, 14060, 13, 823, 264, 3199, 321, 362, 2709, 264, 1859, 281, 264, 1411, 1252, 13, 407, 309, 311, 1101, 281, 764, 264, 4356, 813, 13, 407, 586, 538, 1228, 314, 3199, 11, 363, 295, 1176, 3671, 1570, 813, 3671, 568, 13, 14060, 13, 961, 311, 574, 412, 264, 3199, 337, 264, 2710], "avg_logprob": -0.25214214622974396, "compression_ratio": 1.6164383561643836, "no_speech_prob": 0.0, "words": [{"start": 2266.4, "end": 2266.6, "word": " 2", "probability": 0.1448974609375}, {"start": 2266.6, "end": 2267.14, "word": ".47", "probability": 0.976806640625}, {"start": 2267.14, "end": 2267.84, "word": " or", "probability": 0.693359375}, {"start": 2267.84, "end": 2270.28, "word": " smaller", "probability": 0.59912109375}, {"start": 2270.28, "end": 2270.6, "word": " than", "probability": 0.9296875}, {"start": 2270.6, "end": 2271.08, "word": " negative", "probability": 0.8701171875}, {"start": 2271.08, "end": 2272.1, "word": " 2", "probability": 0.89453125}, {"start": 2272.1, "end": 2272.62, "word": ".47.", "probability": 0.985107421875}, {"start": 2273.52, "end": 2274.14, "word": " Now", "probability": 0.88330078125}, {"start": 2274.14, "end": 2274.36, "word": " the", "probability": 0.5166015625}, {"start": 2274.36, "end": 2274.6, "word": " table", "probability": 0.82568359375}, {"start": 2274.6, "end": 2274.8, "word": " we", "probability": 0.7822265625}, {"start": 2274.8, "end": 2275.1, "word": " have", "probability": 0.93408203125}, {"start": 2275.1, "end": 2275.38, "word": " gives", "probability": 0.748046875}, {"start": 2275.38, "end": 2275.54, "word": " the", "probability": 0.69140625}, {"start": 2275.54, "end": 2275.7, "word": " area", "probability": 0.2685546875}, {"start": 2275.7, "end": 2275.84, "word": " to", "probability": 0.947265625}, {"start": 2275.84, "end": 2275.96, "word": " the", "probability": 0.90771484375}, {"start": 2275.96, "end": 2276.14, "word": " left", "probability": 0.951171875}, {"start": 2276.14, "end": 2276.56, "word": " side.", "probability": 0.82373046875}, {"start": 2276.84, "end": 2277.1, "word": " So", "probability": 0.434326171875}, {"start": 2277.1, "end": 2277.2, "word": " it's", "probability": 0.75244140625}, {"start": 2277.2, "end": 2277.44, "word": " better", "probability": 0.91552734375}, {"start": 2277.44, "end": 2277.66, "word": " to", "probability": 0.955078125}, {"start": 2277.66, "end": 2278.08, "word": " use", "probability": 0.8662109375}, {"start": 2278.08, "end": 2279.26, "word": " the", "probability": 0.50927734375}, {"start": 2279.26, "end": 2279.72, "word": " smaller", "probability": 0.8291015625}, {"start": 2279.72, "end": 2280.08, "word": " than.", "probability": 0.70654296875}, {"start": 2280.64, "end": 2281.26, "word": " So", "probability": 0.95166015625}, {"start": 2281.26, "end": 2281.46, "word": " now", "probability": 0.8916015625}, {"start": 2281.46, "end": 2281.66, "word": " by", "probability": 0.8154296875}, {"start": 2281.66, "end": 2281.96, "word": " using", "probability": 0.9169921875}, {"start": 2281.96, "end": 2282.18, "word": " T", "probability": 0.744140625}, {"start": 2282.18, "end": 2282.6, "word": " table,", "probability": 0.54638671875}, {"start": 2283.42, "end": 2283.58, "word": " B", "probability": 0.78173828125}, {"start": 2283.58, "end": 2283.78, "word": " of", "probability": 0.8740234375}, {"start": 2283.78, "end": 2283.9, "word": " Z", "probability": 0.8125}, {"start": 2283.9, "end": 2284.48, "word": " negative", "probability": 0.418701171875}, {"start": 2284.48, "end": 2285.26, "word": " less", "probability": 0.75}, {"start": 2285.26, "end": 2285.46, "word": " than", "probability": 0.93798828125}, {"start": 2285.46, "end": 2285.92, "word": " negative", "probability": 0.9462890625}, {"start": 2285.92, "end": 2286.94, "word": " 2", "probability": 0.95654296875}, {"start": 2286.94, "end": 2287.5, "word": ".47.", "probability": 0.996826171875}, {"start": 2287.9, "end": 2288.12, "word": " Let's", "probability": 0.972412109375}, {"start": 2288.12, "end": 2288.34, "word": " look", "probability": 0.962890625}, {"start": 2288.34, "end": 2288.58, "word": " at", "probability": 0.96484375}, {"start": 2288.58, "end": 2289.04, "word": " the", "probability": 0.92041015625}, {"start": 2289.04, "end": 2290.02, "word": " table", "probability": 0.89111328125}, {"start": 2290.02, "end": 2293.66, "word": " for", "probability": 0.8818359375}, {"start": 2293.66, "end": 2293.9, "word": " the", "probability": 0.9150390625}, {"start": 2293.9, "end": 2294.52, "word": " normal", "probability": 0.8681640625}], "temperature": 1.0}, {"id": 86, "seek": 232103, "start": 2295.23, "end": 2321.03, "text": " Negative side. Look at negative 2.47. Negative 2.4 all the way under 7 will get this result. So it's 0068. So this is one of the two areas. So the area below negative 2.47 is 0068.", "tokens": [43230, 1252, 13, 2053, 412, 3671, 568, 13, 14060, 13, 43230, 568, 13, 19, 439, 264, 636, 833, 1614, 486, 483, 341, 1874, 13, 407, 309, 311, 7143, 27102, 13, 407, 341, 307, 472, 295, 264, 732, 3179, 13, 407, 264, 1859, 2507, 3671, 568, 13, 14060, 307, 7143, 27102, 13], "avg_logprob": -0.21844951808452606, "compression_ratio": 1.4596774193548387, "no_speech_prob": 0.0, "words": [{"start": 2295.23, "end": 2295.71, "word": " Negative", "probability": 0.2362060546875}, {"start": 2295.71, "end": 2296.17, "word": " side.", "probability": 0.63916015625}, {"start": 2297.49, "end": 2298.07, "word": " Look", "probability": 0.75830078125}, {"start": 2298.07, "end": 2298.27, "word": " at", "probability": 0.96875}, {"start": 2298.27, "end": 2298.57, "word": " negative", "probability": 0.59814453125}, {"start": 2298.57, "end": 2298.79, "word": " 2", "probability": 0.85888671875}, {"start": 2298.79, "end": 2299.33, "word": ".47.", "probability": 0.977294921875}, {"start": 2301.25, "end": 2302.13, "word": " Negative", "probability": 0.8974609375}, {"start": 2302.13, "end": 2302.57, "word": " 2", "probability": 0.98095703125}, {"start": 2302.57, "end": 2303.23, "word": ".4", "probability": 0.9814453125}, {"start": 2303.23, "end": 2303.57, "word": " all", "probability": 0.74072265625}, {"start": 2303.57, "end": 2303.73, "word": " the", "probability": 0.912109375}, {"start": 2303.73, "end": 2303.89, "word": " way", "probability": 0.955078125}, {"start": 2303.89, "end": 2304.19, "word": " under", "probability": 0.859375}, {"start": 2304.19, "end": 2304.59, "word": " 7", "probability": 0.63671875}, {"start": 2304.59, "end": 2306.01, "word": " will", "probability": 0.415771484375}, {"start": 2306.01, "end": 2306.37, "word": " get", "probability": 0.91357421875}, {"start": 2306.37, "end": 2306.75, "word": " this", "probability": 0.939453125}, {"start": 2306.75, "end": 2307.13, "word": " result.", "probability": 0.9296875}, {"start": 2307.95, "end": 2308.67, "word": " So", "probability": 0.7880859375}, {"start": 2308.67, "end": 2308.83, "word": " it's", "probability": 0.5430908203125}, {"start": 2308.83, "end": 2309.75, "word": " 0068.", "probability": 0.861083984375}, {"start": 2311.91, "end": 2312.79, "word": " So", "probability": 0.8994140625}, {"start": 2312.79, "end": 2313.13, "word": " this", "probability": 0.71435546875}, {"start": 2313.13, "end": 2313.23, "word": " is", "probability": 0.90576171875}, {"start": 2313.23, "end": 2313.39, "word": " one", "probability": 0.8984375}, {"start": 2313.39, "end": 2313.57, "word": " of", "probability": 0.95849609375}, {"start": 2313.57, "end": 2314.01, "word": " the", "probability": 0.8515625}, {"start": 2314.01, "end": 2314.19, "word": " two", "probability": 0.90576171875}, {"start": 2314.19, "end": 2314.57, "word": " areas.", "probability": 0.94775390625}, {"start": 2315.25, "end": 2315.51, "word": " So", "probability": 0.81591796875}, {"start": 2315.51, "end": 2316.13, "word": " the", "probability": 0.78076171875}, {"start": 2316.13, "end": 2316.39, "word": " area", "probability": 0.904296875}, {"start": 2316.39, "end": 2317.35, "word": " below", "probability": 0.884765625}, {"start": 2317.35, "end": 2319.17, "word": " negative", "probability": 0.83642578125}, {"start": 2319.17, "end": 2319.43, "word": " 2", "probability": 0.98779296875}, {"start": 2319.43, "end": 2320.03, "word": ".47", "probability": 0.983642578125}, {"start": 2320.03, "end": 2320.25, "word": " is", "probability": 0.5400390625}, {"start": 2320.25, "end": 2321.03, "word": " 0068.", "probability": 0.953369140625}], "temperature": 1.0}, {"id": 87, "seek": 234871, "start": 2322.27, "end": 2348.71, "text": " The area above the same value is the same area because it's symmetric. So it means that just multiplying this by 2, you will get 0.0136. Now, always, as we mentioned before, always we reject the null hypothesis if your b value is smaller than alpha. Now, alpha is given by 5%. Now, is this value smaller than alpha?", "tokens": [440, 1859, 3673, 264, 912, 2158, 307, 264, 912, 1859, 570, 309, 311, 32330, 13, 407, 309, 1355, 300, 445, 30955, 341, 538, 568, 11, 291, 486, 483, 1958, 13, 15, 7668, 21, 13, 823, 11, 1009, 11, 382, 321, 2835, 949, 11, 1009, 321, 8248, 264, 18184, 17291, 498, 428, 272, 2158, 307, 4356, 813, 8961, 13, 823, 11, 8961, 307, 2212, 538, 1025, 6856, 823, 11, 307, 341, 2158, 4356, 813, 8961, 30], "avg_logprob": -0.1908922715014533, "compression_ratio": 1.58, "no_speech_prob": 0.0, "words": [{"start": 2322.27, "end": 2322.49, "word": " The", "probability": 0.63916015625}, {"start": 2322.49, "end": 2322.75, "word": " area", "probability": 0.88720703125}, {"start": 2322.75, "end": 2323.07, "word": " above", "probability": 0.92919921875}, {"start": 2323.07, "end": 2323.31, "word": " the", "probability": 0.91064453125}, {"start": 2323.31, "end": 2323.55, "word": " same", "probability": 0.9052734375}, {"start": 2323.55, "end": 2323.83, "word": " value", "probability": 0.8076171875}, {"start": 2323.83, "end": 2323.97, "word": " is", "probability": 0.849609375}, {"start": 2323.97, "end": 2324.11, "word": " the", "probability": 0.9140625}, {"start": 2324.11, "end": 2324.39, "word": " same", "probability": 0.90087890625}, {"start": 2324.39, "end": 2324.71, "word": " area", "probability": 0.91796875}, {"start": 2324.71, "end": 2325.05, "word": " because", "probability": 0.78173828125}, {"start": 2325.05, "end": 2325.25, "word": " it's", "probability": 0.703857421875}, {"start": 2325.25, "end": 2325.65, "word": " symmetric.", "probability": 0.85400390625}, {"start": 2326.35, "end": 2326.87, "word": " So", "probability": 0.6796875}, {"start": 2326.87, "end": 2327.07, "word": " it", "probability": 0.8203125}, {"start": 2327.07, "end": 2327.27, "word": " means", "probability": 0.9091796875}, {"start": 2327.27, "end": 2327.49, "word": " that", "probability": 0.90771484375}, {"start": 2327.49, "end": 2327.77, "word": " just", "probability": 0.7119140625}, {"start": 2327.77, "end": 2328.19, "word": " multiplying", "probability": 0.5166015625}, {"start": 2328.19, "end": 2328.55, "word": " this", "probability": 0.890625}, {"start": 2328.55, "end": 2328.73, "word": " by", "probability": 0.970703125}, {"start": 2328.73, "end": 2329.05, "word": " 2,", "probability": 0.62158203125}, {"start": 2329.27, "end": 2329.41, "word": " you", "probability": 0.919921875}, {"start": 2329.41, "end": 2329.57, "word": " will", "probability": 0.79052734375}, {"start": 2329.57, "end": 2329.87, "word": " get", "probability": 0.94189453125}, {"start": 2329.87, "end": 2331.09, "word": " 0", "probability": 0.60009765625}, {"start": 2331.09, "end": 2332.93, "word": ".0136.", "probability": 0.8519287109375}, {"start": 2336.69, "end": 2337.21, "word": " Now,", "probability": 0.91162109375}, {"start": 2337.51, "end": 2337.95, "word": " always,", "probability": 0.83203125}, {"start": 2337.99, "end": 2338.17, "word": " as", "probability": 0.95947265625}, {"start": 2338.17, "end": 2338.29, "word": " we", "probability": 0.75927734375}, {"start": 2338.29, "end": 2338.53, "word": " mentioned", "probability": 0.82958984375}, {"start": 2338.53, "end": 2338.99, "word": " before,", "probability": 0.8642578125}, {"start": 2339.17, "end": 2339.55, "word": " always", "probability": 0.79541015625}, {"start": 2339.55, "end": 2339.73, "word": " we", "probability": 0.6318359375}, {"start": 2339.73, "end": 2340.15, "word": " reject", "probability": 0.8876953125}, {"start": 2340.15, "end": 2340.31, "word": " the", "probability": 0.36865234375}, {"start": 2340.31, "end": 2340.45, "word": " null", "probability": 0.98974609375}, {"start": 2340.45, "end": 2340.91, "word": " hypothesis", "probability": 0.7275390625}, {"start": 2340.91, "end": 2341.91, "word": " if", "probability": 0.8349609375}, {"start": 2341.91, "end": 2342.19, "word": " your", "probability": 0.85302734375}, {"start": 2342.19, "end": 2342.33, "word": " b", "probability": 0.483154296875}, {"start": 2342.33, "end": 2342.71, "word": " value", "probability": 0.9140625}, {"start": 2342.71, "end": 2342.97, "word": " is", "probability": 0.94970703125}, {"start": 2342.97, "end": 2343.21, "word": " smaller", "probability": 0.8701171875}, {"start": 2343.21, "end": 2343.45, "word": " than", "probability": 0.9501953125}, {"start": 2343.45, "end": 2343.73, "word": " alpha.", "probability": 0.865234375}, {"start": 2344.59, "end": 2344.87, "word": " Now,", "probability": 0.9580078125}, {"start": 2344.91, "end": 2345.15, "word": " alpha", "probability": 0.91943359375}, {"start": 2345.15, "end": 2345.39, "word": " is", "probability": 0.9521484375}, {"start": 2345.39, "end": 2345.61, "word": " given", "probability": 0.8876953125}, {"start": 2345.61, "end": 2345.85, "word": " by", "probability": 0.96923828125}, {"start": 2345.85, "end": 2346.61, "word": " 5%.", "probability": 0.974853515625}, {"start": 2346.61, "end": 2346.93, "word": " Now,", "probability": 0.95068359375}, {"start": 2347.07, "end": 2347.31, "word": " is", "probability": 0.94091796875}, {"start": 2347.31, "end": 2347.57, "word": " this", "probability": 0.94482421875}, {"start": 2347.57, "end": 2347.83, "word": " value", "probability": 0.896484375}, {"start": 2347.83, "end": 2348.15, "word": " smaller", "probability": 0.853515625}, {"start": 2348.15, "end": 2348.37, "word": " than", "probability": 0.955078125}, {"start": 2348.37, "end": 2348.71, "word": " alpha?", "probability": 0.90576171875}], "temperature": 1.0}, {"id": 88, "seek": 237396, "start": 2349.49, "end": 2373.97, "text": " The answer is yes. So since my B value equals 0, 1, 3, 6, smaller than 5%, then again, we reject the null hypothesis. So we end with the same conclusion by using the critical value. That's for using B value approach.", "tokens": [440, 1867, 307, 2086, 13, 407, 1670, 452, 363, 2158, 6915, 1958, 11, 502, 11, 805, 11, 1386, 11, 4356, 813, 1025, 8923, 550, 797, 11, 321, 8248, 264, 18184, 17291, 13, 407, 321, 917, 365, 264, 912, 10063, 538, 1228, 264, 4924, 2158, 13, 663, 311, 337, 1228, 363, 2158, 3109, 13], "avg_logprob": -0.18648727072609794, "compression_ratio": 1.3734177215189873, "no_speech_prob": 0.0, "words": [{"start": 2349.49, "end": 2349.73, "word": " The", "probability": 0.2841796875}, {"start": 2349.73, "end": 2349.95, "word": " answer", "probability": 0.9775390625}, {"start": 2349.95, "end": 2350.17, "word": " is", "probability": 0.9482421875}, {"start": 2350.17, "end": 2350.37, "word": " yes.", "probability": 0.9306640625}, {"start": 2350.73, "end": 2350.73, "word": " So", "probability": 0.9033203125}, {"start": 2350.73, "end": 2351.49, "word": " since", "probability": 0.673828125}, {"start": 2351.49, "end": 2351.83, "word": " my", "probability": 0.96484375}, {"start": 2351.83, "end": 2352.03, "word": " B", "probability": 0.5029296875}, {"start": 2352.03, "end": 2352.37, "word": " value", "probability": 0.83544921875}, {"start": 2352.37, "end": 2355.37, "word": " equals", "probability": 0.865234375}, {"start": 2355.37, "end": 2355.67, "word": " 0,", "probability": 0.814453125}, {"start": 2355.71, "end": 2355.81, "word": " 1,", "probability": 0.7431640625}, {"start": 2355.89, "end": 2356.01, "word": " 3,", "probability": 0.6396484375}, {"start": 2356.13, "end": 2356.45, "word": " 6,", "probability": 0.9921875}, {"start": 2356.57, "end": 2356.85, "word": " smaller", "probability": 0.68701171875}, {"start": 2356.85, "end": 2357.13, "word": " than", "probability": 0.939453125}, {"start": 2357.13, "end": 2357.95, "word": " 5%,", "probability": 0.809326171875}, {"start": 2357.95, "end": 2358.99, "word": " then", "probability": 0.85986328125}, {"start": 2358.99, "end": 2359.53, "word": " again,", "probability": 0.888671875}, {"start": 2360.11, "end": 2360.45, "word": " we", "probability": 0.95751953125}, {"start": 2360.45, "end": 2361.31, "word": " reject", "probability": 0.8583984375}, {"start": 2361.31, "end": 2362.61, "word": " the", "probability": 0.80859375}, {"start": 2362.61, "end": 2362.77, "word": " null", "probability": 0.97314453125}, {"start": 2362.77, "end": 2363.29, "word": " hypothesis.", "probability": 0.84765625}, {"start": 2364.11, "end": 2364.43, "word": " So", "probability": 0.9619140625}, {"start": 2364.43, "end": 2364.61, "word": " we", "probability": 0.9521484375}, {"start": 2364.61, "end": 2364.83, "word": " end", "probability": 0.90234375}, {"start": 2364.83, "end": 2364.97, "word": " with", "probability": 0.896484375}, {"start": 2364.97, "end": 2365.11, "word": " the", "probability": 0.91455078125}, {"start": 2365.11, "end": 2365.33, "word": " same", "probability": 0.88330078125}, {"start": 2365.33, "end": 2365.89, "word": " conclusion", "probability": 0.8974609375}, {"start": 2365.89, "end": 2366.17, "word": " by", "probability": 0.892578125}, {"start": 2366.17, "end": 2366.49, "word": " using", "probability": 0.93017578125}, {"start": 2366.49, "end": 2366.67, "word": " the", "probability": 0.908203125}, {"start": 2366.67, "end": 2367.03, "word": " critical", "probability": 0.94287109375}, {"start": 2367.03, "end": 2367.33, "word": " value.", "probability": 0.8115234375}, {"start": 2369.57, "end": 2370.21, "word": " That's", "probability": 0.950439453125}, {"start": 2370.21, "end": 2370.57, "word": " for", "probability": 0.94140625}, {"start": 2370.57, "end": 2372.09, "word": " using", "probability": 0.93115234375}, {"start": 2372.09, "end": 2373.39, "word": " B", "probability": 0.68896484375}, {"start": 2373.39, "end": 2373.57, "word": " value", "probability": 0.814453125}, {"start": 2373.57, "end": 2373.97, "word": " approach.", "probability": 0.91015625}], "temperature": 1.0}, {"id": 89, "seek": 239662, "start": 2375.54, "end": 2396.62, "text": " So here, we reject another hypothesis since your B value is smaller than alpha. The last approach, which is not given in the slides you have critical value. I'm sorry, confidence interval. And we know that confidence interval", "tokens": [407, 510, 11, 321, 8248, 1071, 17291, 1670, 428, 363, 2158, 307, 4356, 813, 8961, 13, 440, 1036, 3109, 11, 597, 307, 406, 2212, 294, 264, 9788, 291, 362, 4924, 2158, 13, 286, 478, 2597, 11, 6687, 15035, 13, 400, 321, 458, 300, 6687, 15035], "avg_logprob": -0.25866168348685553, "compression_ratio": 1.4675324675324675, "no_speech_prob": 0.0, "words": [{"start": 2375.54, "end": 2375.96, "word": " So", "probability": 0.72705078125}, {"start": 2375.96, "end": 2376.18, "word": " here,", "probability": 0.7666015625}, {"start": 2376.26, "end": 2376.36, "word": " we", "probability": 0.93798828125}, {"start": 2376.36, "end": 2376.7, "word": " reject", "probability": 0.56103515625}, {"start": 2376.7, "end": 2376.94, "word": " another", "probability": 0.47705078125}, {"start": 2376.94, "end": 2377.42, "word": " hypothesis", "probability": 0.888671875}, {"start": 2377.42, "end": 2377.98, "word": " since", "probability": 0.481201171875}, {"start": 2377.98, "end": 2378.22, "word": " your", "probability": 0.86865234375}, {"start": 2378.22, "end": 2378.36, "word": " B", "probability": 0.369873046875}, {"start": 2378.36, "end": 2378.72, "word": " value", "probability": 0.68994140625}, {"start": 2378.72, "end": 2378.94, "word": " is", "probability": 0.94921875}, {"start": 2378.94, "end": 2379.22, "word": " smaller", "probability": 0.8798828125}, {"start": 2379.22, "end": 2379.6, "word": " than", "probability": 0.95751953125}, {"start": 2379.6, "end": 2379.94, "word": " alpha.", "probability": 0.79248046875}, {"start": 2381.5, "end": 2382.2, "word": " The", "probability": 0.87353515625}, {"start": 2382.2, "end": 2383.0, "word": " last", "probability": 0.87890625}, {"start": 2383.0, "end": 2383.66, "word": " approach,", "probability": 0.91015625}, {"start": 2384.06, "end": 2384.26, "word": " which", "probability": 0.9296875}, {"start": 2384.26, "end": 2384.4, "word": " is", "probability": 0.9287109375}, {"start": 2384.4, "end": 2384.58, "word": " not", "probability": 0.9453125}, {"start": 2384.58, "end": 2384.9, "word": " given", "probability": 0.86865234375}, {"start": 2384.9, "end": 2388.18, "word": " in", "probability": 0.876953125}, {"start": 2388.18, "end": 2388.4, "word": " the", "probability": 0.919921875}, {"start": 2388.4, "end": 2388.78, "word": " slides", "probability": 0.951171875}, {"start": 2388.78, "end": 2389.0, "word": " you", "probability": 0.89404296875}, {"start": 2389.0, "end": 2389.38, "word": " have", "probability": 0.93505859375}, {"start": 2389.38, "end": 2391.54, "word": " critical", "probability": 0.458251953125}, {"start": 2391.54, "end": 2392.02, "word": " value.", "probability": 0.94189453125}, {"start": 2392.54, "end": 2392.7, "word": " I'm", "probability": 0.97021484375}, {"start": 2392.7, "end": 2392.96, "word": " sorry,", "probability": 0.86474609375}, {"start": 2393.32, "end": 2393.86, "word": " confidence", "probability": 0.953125}, {"start": 2393.86, "end": 2394.32, "word": " interval.", "probability": 0.8056640625}, {"start": 2394.9, "end": 2395.24, "word": " And", "probability": 0.900390625}, {"start": 2395.24, "end": 2395.38, "word": " we", "probability": 0.7021484375}, {"start": 2395.38, "end": 2395.5, "word": " know", "probability": 0.87890625}, {"start": 2395.5, "end": 2395.66, "word": " that", "probability": 0.7470703125}, {"start": 2395.66, "end": 2396.06, "word": " confidence", "probability": 0.951171875}, {"start": 2396.06, "end": 2396.62, "word": " interval", "probability": 0.923828125}], "temperature": 1.0}, {"id": 90, "seek": 242771, "start": 2402.37, "end": 2427.71, "text": " P plus or minus Z square root P 1 minus P divided by N. P is 0.5, 0.5 plus or minus 196 times 0.595 divided by N. Let's compute the critical, the two limits.", "tokens": [430, 1804, 420, 3175, 1176, 3732, 5593, 430, 502, 3175, 430, 6666, 538, 426, 13, 430, 307, 1958, 13, 20, 11, 1958, 13, 20, 1804, 420, 3175, 7998, 1413, 1958, 13, 19600, 20, 6666, 538, 426, 13, 961, 311, 14722, 264, 4924, 11, 264, 732, 10406, 13], "avg_logprob": -0.27164714224636555, "compression_ratio": 1.3389830508474576, "no_speech_prob": 0.0, "words": [{"start": 2402.3700000000003, "end": 2403.11, "word": " P", "probability": 0.1727294921875}, {"start": 2403.11, "end": 2403.85, "word": " plus", "probability": 0.496337890625}, {"start": 2403.85, "end": 2404.05, "word": " or", "probability": 0.8369140625}, {"start": 2404.05, "end": 2404.33, "word": " minus", "probability": 0.98779296875}, {"start": 2404.33, "end": 2404.69, "word": " Z", "probability": 0.460205078125}, {"start": 2404.69, "end": 2405.47, "word": " square", "probability": 0.55615234375}, {"start": 2405.47, "end": 2405.75, "word": " root", "probability": 0.9208984375}, {"start": 2405.75, "end": 2406.11, "word": " P", "probability": 0.724609375}, {"start": 2406.11, "end": 2407.39, "word": " 1", "probability": 0.416015625}, {"start": 2407.39, "end": 2407.79, "word": " minus", "probability": 0.9794921875}, {"start": 2407.79, "end": 2408.31, "word": " P", "probability": 0.97998046875}, {"start": 2408.31, "end": 2409.21, "word": " divided", "probability": 0.57763671875}, {"start": 2409.21, "end": 2409.41, "word": " by", "probability": 0.9619140625}, {"start": 2409.41, "end": 2409.63, "word": " N.", "probability": 0.93798828125}, {"start": 2412.05, "end": 2412.79, "word": " P", "probability": 0.7568359375}, {"start": 2412.79, "end": 2413.13, "word": " is", "probability": 0.9365234375}, {"start": 2413.13, "end": 2414.01, "word": " 0", "probability": 0.74609375}, {"start": 2414.01, "end": 2414.35, "word": ".5,", "probability": 0.75}, {"start": 2414.49, "end": 2414.81, "word": " 0", "probability": 0.8203125}, {"start": 2414.81, "end": 2415.17, "word": ".5", "probability": 0.9921875}, {"start": 2415.17, "end": 2415.55, "word": " plus", "probability": 0.66064453125}, {"start": 2415.55, "end": 2415.81, "word": " or", "probability": 0.9384765625}, {"start": 2415.81, "end": 2416.09, "word": " minus", "probability": 0.98681640625}, {"start": 2416.09, "end": 2416.65, "word": " 196", "probability": 0.88525390625}, {"start": 2416.65, "end": 2418.73, "word": " times", "probability": 0.8583984375}, {"start": 2418.73, "end": 2419.11, "word": " 0", "probability": 0.97021484375}, {"start": 2419.11, "end": 2422.49, "word": ".595", "probability": 0.80078125}, {"start": 2422.49, "end": 2422.85, "word": " divided", "probability": 0.7998046875}, {"start": 2422.85, "end": 2423.11, "word": " by", "probability": 0.96484375}, {"start": 2423.11, "end": 2423.37, "word": " N.", "probability": 0.97900390625}, {"start": 2424.43, "end": 2424.87, "word": " Let's", "probability": 0.936767578125}, {"start": 2424.87, "end": 2425.31, "word": " compute", "probability": 0.8857421875}, {"start": 2425.31, "end": 2425.57, "word": " the", "probability": 0.8642578125}, {"start": 2425.57, "end": 2425.93, "word": " critical,", "probability": 0.52734375}, {"start": 2426.29, "end": 2426.49, "word": " the", "probability": 0.323974609375}, {"start": 2426.49, "end": 2427.33, "word": " two", "probability": 0.8857421875}, {"start": 2427.33, "end": 2427.71, "word": " limits.", "probability": 0.96142578125}], "temperature": 1.0}, {"id": 91, "seek": 246398, "start": 2435.03, "end": 2463.99, "text": " So we have this value here represents the margin of error. So let's compute the margin of error first. So it's 1.96 times square root of 0.05 times 0.95 divided by 500. So the margin of error is 0.19.", "tokens": [407, 321, 362, 341, 2158, 510, 8855, 264, 10270, 295, 6713, 13, 407, 718, 311, 14722, 264, 10270, 295, 6713, 700, 13, 407, 309, 311, 502, 13, 22962, 1413, 3732, 5593, 295, 1958, 13, 13328, 1413, 1958, 13, 15718, 6666, 538, 5923, 13, 407, 264, 10270, 295, 6713, 307, 1958, 13, 3405, 13], "avg_logprob": -0.14612268987629148, "compression_ratio": 1.534351145038168, "no_speech_prob": 0.0, "words": [{"start": 2435.03, "end": 2435.29, "word": " So", "probability": 0.70263671875}, {"start": 2435.29, "end": 2435.43, "word": " we", "probability": 0.72314453125}, {"start": 2435.43, "end": 2435.71, "word": " have", "probability": 0.947265625}, {"start": 2435.71, "end": 2437.19, "word": " this", "probability": 0.8798828125}, {"start": 2437.19, "end": 2438.15, "word": " value", "probability": 0.97607421875}, {"start": 2438.15, "end": 2438.55, "word": " here", "probability": 0.8564453125}, {"start": 2438.55, "end": 2440.27, "word": " represents", "probability": 0.413818359375}, {"start": 2440.27, "end": 2442.93, "word": " the", "probability": 0.8017578125}, {"start": 2442.93, "end": 2443.25, "word": " margin", "probability": 0.73046875}, {"start": 2443.25, "end": 2443.47, "word": " of", "probability": 0.74609375}, {"start": 2443.47, "end": 2443.59, "word": " error.", "probability": 0.88330078125}, {"start": 2444.23, "end": 2444.91, "word": " So", "probability": 0.8798828125}, {"start": 2444.91, "end": 2445.15, "word": " let's", "probability": 0.90771484375}, {"start": 2445.15, "end": 2445.39, "word": " compute", "probability": 0.7998046875}, {"start": 2445.39, "end": 2445.61, "word": " the", "probability": 0.8974609375}, {"start": 2445.61, "end": 2445.83, "word": " margin", "probability": 0.958984375}, {"start": 2445.83, "end": 2445.97, "word": " of", "probability": 0.9111328125}, {"start": 2445.97, "end": 2446.07, "word": " error", "probability": 0.8916015625}, {"start": 2446.07, "end": 2446.47, "word": " first.", "probability": 0.8056640625}, {"start": 2446.93, "end": 2447.23, "word": " So", "probability": 0.92822265625}, {"start": 2447.23, "end": 2447.47, "word": " it's", "probability": 0.939697265625}, {"start": 2447.47, "end": 2447.73, "word": " 1", "probability": 0.9140625}, {"start": 2447.73, "end": 2450.21, "word": ".96", "probability": 0.973876953125}, {"start": 2450.21, "end": 2450.99, "word": " times", "probability": 0.87060546875}, {"start": 2450.99, "end": 2451.45, "word": " square", "probability": 0.7041015625}, {"start": 2451.45, "end": 2451.79, "word": " root", "probability": 0.9404296875}, {"start": 2451.79, "end": 2453.45, "word": " of", "probability": 0.96240234375}, {"start": 2453.45, "end": 2453.85, "word": " 0", "probability": 0.908203125}, {"start": 2453.85, "end": 2454.35, "word": ".05", "probability": 0.708740234375}, {"start": 2454.35, "end": 2454.83, "word": " times", "probability": 0.90478515625}, {"start": 2454.83, "end": 2455.15, "word": " 0", "probability": 0.91796875}, {"start": 2455.15, "end": 2455.81, "word": ".95", "probability": 0.997314453125}, {"start": 2455.81, "end": 2456.85, "word": " divided", "probability": 0.623046875}, {"start": 2456.85, "end": 2457.19, "word": " by", "probability": 0.96728515625}, {"start": 2457.19, "end": 2457.85, "word": " 500.", "probability": 0.9619140625}, {"start": 2460.39, "end": 2461.07, "word": " So", "probability": 0.95263671875}, {"start": 2461.07, "end": 2461.39, "word": " the", "probability": 0.8779296875}, {"start": 2461.39, "end": 2461.81, "word": " margin", "probability": 0.96533203125}, {"start": 2461.81, "end": 2462.19, "word": " of", "probability": 0.96630859375}, {"start": 2462.19, "end": 2462.51, "word": " error", "probability": 0.90625}, {"start": 2462.51, "end": 2463.45, "word": " is", "probability": 0.94482421875}, {"start": 2463.45, "end": 2463.73, "word": " 0", "probability": 0.900390625}, {"start": 2463.73, "end": 2463.99, "word": ".19.", "probability": 0.80712890625}], "temperature": 1.0}, {"id": 92, "seek": 249264, "start": 2465.32, "end": 2492.64, "text": " So 0, 5, plus around 0, 1, 0, 2, around 0. For example, let's say it's 0, 2. It's approximately 0, 2. So now the confidence interval for pi, so 95 confidence interval of pi is pi greater than or equal 0, 5 minus 0, 2 is 3%.", "tokens": [407, 1958, 11, 1025, 11, 1804, 926, 1958, 11, 502, 11, 1958, 11, 568, 11, 926, 1958, 13, 1171, 1365, 11, 718, 311, 584, 309, 311, 1958, 11, 568, 13, 467, 311, 10447, 1958, 11, 568, 13, 407, 586, 264, 6687, 15035, 337, 3895, 11, 370, 13420, 6687, 15035, 295, 3895, 307, 3895, 5044, 813, 420, 2681, 1958, 11, 1025, 3175, 1958, 11, 568, 307, 805, 6856], "avg_logprob": -0.17612591254360535, "compression_ratio": 1.4545454545454546, "no_speech_prob": 0.0, "words": [{"start": 2465.32, "end": 2465.5, "word": " So", "probability": 0.61669921875}, {"start": 2465.5, "end": 2465.8, "word": " 0,", "probability": 0.5283203125}, {"start": 2466.0, "end": 2466.54, "word": " 5,", "probability": 0.7890625}, {"start": 2466.72, "end": 2468.04, "word": " plus", "probability": 0.689453125}, {"start": 2468.04, "end": 2468.5, "word": " around", "probability": 0.85400390625}, {"start": 2468.5, "end": 2468.82, "word": " 0,", "probability": 0.87890625}, {"start": 2468.9, "end": 2469.04, "word": " 1,", "probability": 0.52734375}, {"start": 2469.26, "end": 2469.44, "word": " 0,", "probability": 0.94970703125}, {"start": 2469.52, "end": 2469.76, "word": " 2,", "probability": 0.95947265625}, {"start": 2469.92, "end": 2470.14, "word": " around", "probability": 0.86962890625}, {"start": 2470.14, "end": 2470.48, "word": " 0.", "probability": 0.8466796875}, {"start": 2470.58, "end": 2470.74, "word": " For", "probability": 0.8984375}, {"start": 2470.74, "end": 2471.04, "word": " example,", "probability": 0.9619140625}, {"start": 2471.38, "end": 2471.66, "word": " let's", "probability": 0.939208984375}, {"start": 2471.66, "end": 2471.8, "word": " say", "probability": 0.94091796875}, {"start": 2471.8, "end": 2472.08, "word": " it's", "probability": 0.951904296875}, {"start": 2472.08, "end": 2472.38, "word": " 0,", "probability": 0.9228515625}, {"start": 2472.44, "end": 2472.56, "word": " 2.", "probability": 0.849609375}, {"start": 2473.1, "end": 2473.7, "word": " It's", "probability": 0.921142578125}, {"start": 2473.7, "end": 2474.16, "word": " approximately", "probability": 0.81689453125}, {"start": 2474.16, "end": 2474.5, "word": " 0,", "probability": 0.931640625}, {"start": 2474.58, "end": 2474.64, "word": " 2.", "probability": 0.9833984375}, {"start": 2475.3, "end": 2475.9, "word": " So", "probability": 0.9580078125}, {"start": 2475.9, "end": 2476.06, "word": " now", "probability": 0.8828125}, {"start": 2476.06, "end": 2476.24, "word": " the", "probability": 0.62060546875}, {"start": 2476.24, "end": 2476.58, "word": " confidence", "probability": 0.98046875}, {"start": 2476.58, "end": 2477.12, "word": " interval", "probability": 0.90673828125}, {"start": 2477.12, "end": 2478.48, "word": " for", "probability": 0.87939453125}, {"start": 2478.48, "end": 2478.88, "word": " pi,", "probability": 0.70458984375}, {"start": 2479.52, "end": 2479.92, "word": " so", "probability": 0.9033203125}, {"start": 2479.92, "end": 2480.36, "word": " 95", "probability": 0.9814453125}, {"start": 2480.36, "end": 2482.36, "word": " confidence", "probability": 0.5341796875}, {"start": 2482.36, "end": 2483.76, "word": " interval", "probability": 0.91748046875}, {"start": 2483.76, "end": 2484.74, "word": " of", "probability": 0.9130859375}, {"start": 2484.74, "end": 2485.14, "word": " pi", "probability": 0.8876953125}, {"start": 2485.14, "end": 2485.98, "word": " is", "probability": 0.9130859375}, {"start": 2485.98, "end": 2487.38, "word": " pi", "probability": 0.93212890625}, {"start": 2487.38, "end": 2488.4, "word": " greater", "probability": 0.90478515625}, {"start": 2488.4, "end": 2488.64, "word": " than", "probability": 0.9423828125}, {"start": 2488.64, "end": 2488.78, "word": " or", "probability": 0.9560546875}, {"start": 2488.78, "end": 2489.04, "word": " equal", "probability": 0.90771484375}, {"start": 2489.04, "end": 2490.28, "word": " 0,", "probability": 0.58349609375}, {"start": 2490.4, "end": 2490.64, "word": " 5", "probability": 0.94287109375}, {"start": 2490.64, "end": 2491.06, "word": " minus", "probability": 0.93212890625}, {"start": 2491.06, "end": 2491.4, "word": " 0,", "probability": 0.98583984375}, {"start": 2491.5, "end": 2491.62, "word": " 2", "probability": 0.97607421875}, {"start": 2491.62, "end": 2491.86, "word": " is", "probability": 0.8984375}, {"start": 2491.86, "end": 2492.64, "word": " 3%.", "probability": 0.8896484375}], "temperature": 1.0}, {"id": 93, "seek": 252194, "start": 2494.41, "end": 2521.95, "text": " And plus 2 gives 5%. So now, pi, the population proportion, falls between 3% and 7%. So you range here from 3% all the way up to 7%. That's pi. Now, we are testing about it's 0 if y equals 8%.", "tokens": [400, 1804, 568, 2709, 1025, 6856, 407, 586, 11, 3895, 11, 264, 4415, 16068, 11, 8804, 1296, 805, 4, 293, 1614, 6856, 407, 291, 3613, 510, 490, 805, 4, 439, 264, 636, 493, 281, 1614, 6856, 663, 311, 3895, 13, 823, 11, 321, 366, 4997, 466, 309, 311, 1958, 498, 288, 6915, 1649, 6856], "avg_logprob": -0.1931818127632141, "compression_ratio": 1.2953020134228188, "no_speech_prob": 0.0, "words": [{"start": 2494.41, "end": 2494.63, "word": " And", "probability": 0.64697265625}, {"start": 2494.63, "end": 2495.01, "word": " plus", "probability": 0.468994140625}, {"start": 2495.01, "end": 2495.31, "word": " 2", "probability": 0.6533203125}, {"start": 2495.31, "end": 2495.65, "word": " gives", "probability": 0.87646484375}, {"start": 2495.65, "end": 2496.29, "word": " 5%.", "probability": 0.944091796875}, {"start": 2496.29, "end": 2498.49, "word": " So", "probability": 0.9638671875}, {"start": 2498.49, "end": 2498.79, "word": " now,", "probability": 0.91064453125}, {"start": 2498.87, "end": 2499.27, "word": " pi,", "probability": 0.68701171875}, {"start": 2499.55, "end": 2499.87, "word": " the", "probability": 0.8759765625}, {"start": 2499.87, "end": 2500.33, "word": " population", "probability": 0.875}, {"start": 2500.33, "end": 2500.93, "word": " proportion,", "probability": 0.64111328125}, {"start": 2501.95, "end": 2502.53, "word": " falls", "probability": 0.76171875}, {"start": 2502.53, "end": 2503.05, "word": " between", "probability": 0.87158203125}, {"start": 2503.05, "end": 2504.27, "word": " 3", "probability": 0.9365234375}, {"start": 2504.27, "end": 2504.75, "word": "%", "probability": 0.798828125}, {"start": 2504.75, "end": 2505.33, "word": " and", "probability": 0.95166015625}, {"start": 2505.33, "end": 2506.03, "word": " 7%.", "probability": 0.980712890625}, {"start": 2506.03, "end": 2507.79, "word": " So", "probability": 0.9619140625}, {"start": 2507.79, "end": 2507.99, "word": " you", "probability": 0.53955078125}, {"start": 2507.99, "end": 2508.59, "word": " range", "probability": 0.65673828125}, {"start": 2508.59, "end": 2509.03, "word": " here", "probability": 0.83349609375}, {"start": 2509.03, "end": 2509.67, "word": " from", "probability": 0.8564453125}, {"start": 2509.67, "end": 2510.01, "word": " 3", "probability": 0.9912109375}, {"start": 2510.01, "end": 2510.45, "word": "%", "probability": 0.92626953125}, {"start": 2510.45, "end": 2511.57, "word": " all", "probability": 0.9453125}, {"start": 2511.57, "end": 2511.73, "word": " the", "probability": 0.91796875}, {"start": 2511.73, "end": 2511.87, "word": " way", "probability": 0.95068359375}, {"start": 2511.87, "end": 2512.07, "word": " up", "probability": 0.94189453125}, {"start": 2512.07, "end": 2512.23, "word": " to", "probability": 0.9638671875}, {"start": 2512.23, "end": 2512.83, "word": " 7%.", "probability": 0.9296875}, {"start": 2512.83, "end": 2513.33, "word": " That's", "probability": 0.953857421875}, {"start": 2513.33, "end": 2513.75, "word": " pi.", "probability": 0.94873046875}, {"start": 2514.35, "end": 2514.79, "word": " Now,", "probability": 0.94775390625}, {"start": 2516.15, "end": 2517.31, "word": " we", "probability": 0.962890625}, {"start": 2517.31, "end": 2517.51, "word": " are", "probability": 0.9326171875}, {"start": 2517.51, "end": 2517.99, "word": " testing", "probability": 0.8408203125}, {"start": 2517.99, "end": 2518.63, "word": " about", "probability": 0.91162109375}, {"start": 2518.63, "end": 2519.75, "word": " it's", "probability": 0.527191162109375}, {"start": 2519.75, "end": 2520.09, "word": " 0", "probability": 0.8505859375}, {"start": 2520.09, "end": 2520.59, "word": " if", "probability": 0.64013671875}, {"start": 2520.59, "end": 2520.79, "word": " y", "probability": 0.63720703125}, {"start": 2520.79, "end": 2521.19, "word": " equals", "probability": 0.8359375}, {"start": 2521.19, "end": 2521.95, "word": " 8%.", "probability": 0.984619140625}], "temperature": 1.0}, {"id": 94, "seek": 254201, "start": 2523.13, "end": 2542.01, "text": " Now, is this value lies in the interval or outside? This value actually lies outside this interval, so it's never equal 0.08, because pi in this confidence interval ranges between", "tokens": [823, 11, 307, 341, 2158, 9134, 294, 264, 15035, 420, 2380, 30, 639, 2158, 767, 9134, 2380, 341, 15035, 11, 370, 309, 311, 1128, 2681, 1958, 13, 16133, 11, 570, 3895, 294, 341, 6687, 15035, 22526, 1296], "avg_logprob": -0.2763157988849439, "compression_ratio": 1.4285714285714286, "no_speech_prob": 0.0, "words": [{"start": 2523.13, "end": 2523.45, "word": " Now,", "probability": 0.468994140625}, {"start": 2523.65, "end": 2523.77, "word": " is", "probability": 0.73974609375}, {"start": 2523.77, "end": 2524.03, "word": " this", "probability": 0.919921875}, {"start": 2524.03, "end": 2524.45, "word": " value", "probability": 0.95849609375}, {"start": 2524.45, "end": 2525.97, "word": " lies", "probability": 0.256591796875}, {"start": 2525.97, "end": 2527.79, "word": " in", "probability": 0.78173828125}, {"start": 2527.79, "end": 2527.95, "word": " the", "probability": 0.85546875}, {"start": 2527.95, "end": 2528.27, "word": " interval", "probability": 0.93798828125}, {"start": 2528.27, "end": 2528.55, "word": " or", "probability": 0.888671875}, {"start": 2528.55, "end": 2528.89, "word": " outside?", "probability": 0.87060546875}, {"start": 2529.89, "end": 2530.73, "word": " This", "probability": 0.80322265625}, {"start": 2530.73, "end": 2531.11, "word": " value", "probability": 0.96630859375}, {"start": 2531.11, "end": 2531.61, "word": " actually", "probability": 0.8115234375}, {"start": 2531.61, "end": 2531.97, "word": " lies", "probability": 0.94580078125}, {"start": 2531.97, "end": 2532.51, "word": " outside", "probability": 0.8310546875}, {"start": 2532.51, "end": 2532.83, "word": " this", "probability": 0.89990234375}, {"start": 2532.83, "end": 2533.27, "word": " interval,", "probability": 0.9599609375}, {"start": 2533.75, "end": 2534.11, "word": " so", "probability": 0.91357421875}, {"start": 2534.11, "end": 2534.47, "word": " it's", "probability": 0.85302734375}, {"start": 2534.47, "end": 2534.77, "word": " never", "probability": 0.92236328125}, {"start": 2534.77, "end": 2535.17, "word": " equal", "probability": 0.8466796875}, {"start": 2535.17, "end": 2535.49, "word": " 0", "probability": 0.53271484375}, {"start": 2535.49, "end": 2536.01, "word": ".08,", "probability": 0.971435546875}, {"start": 2537.09, "end": 2537.53, "word": " because", "probability": 0.884765625}, {"start": 2537.53, "end": 2538.33, "word": " pi", "probability": 0.3056640625}, {"start": 2538.33, "end": 2539.17, "word": " in", "probability": 0.8203125}, {"start": 2539.17, "end": 2539.41, "word": " this", "probability": 0.94091796875}, {"start": 2539.41, "end": 2539.91, "word": " confidence", "probability": 0.98828125}, {"start": 2539.91, "end": 2540.47, "word": " interval", "probability": 0.97119140625}, {"start": 2540.47, "end": 2541.35, "word": " ranges", "probability": 0.82421875}, {"start": 2541.35, "end": 2542.01, "word": " between", "probability": 0.88525390625}], "temperature": 1.0}, {"id": 95, "seek": 257164, "start": 2543.18, "end": 2571.64, "text": " 3% all the way up to 7%. Now, does 8% lie in this interval? The answer is no. That means it never equals 8%. So we have to reject another hypothesis. So since the confidence interval we have does not cover or capture or contain pi of 8%, then we reject it.", "tokens": [805, 4, 439, 264, 636, 493, 281, 1614, 6856, 823, 11, 775, 1649, 4, 4544, 294, 341, 15035, 30, 440, 1867, 307, 572, 13, 663, 1355, 309, 1128, 6915, 1649, 6856, 407, 321, 362, 281, 8248, 1071, 17291, 13, 407, 1670, 264, 6687, 15035, 321, 362, 775, 406, 2060, 420, 7983, 420, 5304, 3895, 295, 1649, 8923, 550, 321, 8248, 309, 13], "avg_logprob": -0.18253968443189347, "compression_ratio": 1.4855491329479769, "no_speech_prob": 0.0, "words": [{"start": 2543.18, "end": 2543.48, "word": " 3", "probability": 0.26025390625}, {"start": 2543.48, "end": 2543.9, "word": "%", "probability": 0.75390625}, {"start": 2543.9, "end": 2544.38, "word": " all", "probability": 0.775390625}, {"start": 2544.38, "end": 2544.56, "word": " the", "probability": 0.90869140625}, {"start": 2544.56, "end": 2544.72, "word": " way", "probability": 0.951171875}, {"start": 2544.72, "end": 2544.92, "word": " up", "probability": 0.92138671875}, {"start": 2544.92, "end": 2545.06, "word": " to", "probability": 0.95751953125}, {"start": 2545.06, "end": 2545.8, "word": " 7%.", "probability": 0.746826171875}, {"start": 2545.8, "end": 2546.44, "word": " Now,", "probability": 0.8349609375}, {"start": 2546.52, "end": 2546.84, "word": " does", "probability": 0.94970703125}, {"start": 2546.84, "end": 2547.14, "word": " 8", "probability": 0.94091796875}, {"start": 2547.14, "end": 2547.6, "word": "%", "probability": 0.9970703125}, {"start": 2547.6, "end": 2548.04, "word": " lie", "probability": 0.8056640625}, {"start": 2548.04, "end": 2548.26, "word": " in", "probability": 0.927734375}, {"start": 2548.26, "end": 2548.44, "word": " this", "probability": 0.93212890625}, {"start": 2548.44, "end": 2548.82, "word": " interval?", "probability": 0.953125}, {"start": 2549.38, "end": 2549.66, "word": " The", "probability": 0.81787109375}, {"start": 2549.66, "end": 2549.9, "word": " answer", "probability": 0.96240234375}, {"start": 2549.9, "end": 2550.12, "word": " is", "probability": 0.94921875}, {"start": 2550.12, "end": 2550.34, "word": " no.", "probability": 0.79638671875}, {"start": 2550.46, "end": 2550.72, "word": " That", "probability": 0.89013671875}, {"start": 2550.72, "end": 2551.12, "word": " means", "probability": 0.93994140625}, {"start": 2551.12, "end": 2551.62, "word": " it", "probability": 0.89794921875}, {"start": 2551.62, "end": 2552.02, "word": " never", "probability": 0.9013671875}, {"start": 2552.02, "end": 2552.6, "word": " equals", "probability": 0.923828125}, {"start": 2552.6, "end": 2553.2, "word": " 8%.", "probability": 0.89306640625}, {"start": 2553.2, "end": 2553.68, "word": " So", "probability": 0.96044921875}, {"start": 2553.68, "end": 2553.78, "word": " we", "probability": 0.495361328125}, {"start": 2553.78, "end": 2553.92, "word": " have", "probability": 0.94677734375}, {"start": 2553.92, "end": 2554.04, "word": " to", "probability": 0.96826171875}, {"start": 2554.04, "end": 2554.36, "word": " reject", "probability": 0.89892578125}, {"start": 2554.36, "end": 2554.62, "word": " another", "probability": 0.266357421875}, {"start": 2554.62, "end": 2555.1, "word": " hypothesis.", "probability": 0.98583984375}, {"start": 2555.8, "end": 2555.94, "word": " So", "probability": 0.916015625}, {"start": 2555.94, "end": 2556.38, "word": " since", "probability": 0.79150390625}, {"start": 2556.38, "end": 2559.52, "word": " the", "probability": 0.8779296875}, {"start": 2559.52, "end": 2560.0, "word": " confidence", "probability": 0.962890625}, {"start": 2560.0, "end": 2560.46, "word": " interval", "probability": 0.97314453125}, {"start": 2560.46, "end": 2560.76, "word": " we", "probability": 0.70849609375}, {"start": 2560.76, "end": 2561.08, "word": " have", "probability": 0.94140625}, {"start": 2561.08, "end": 2561.96, "word": " does", "probability": 0.974609375}, {"start": 2561.96, "end": 2562.4, "word": " not", "probability": 0.9462890625}, {"start": 2562.4, "end": 2564.08, "word": " cover", "probability": 0.9052734375}, {"start": 2564.08, "end": 2565.14, "word": " or", "probability": 0.7841796875}, {"start": 2565.14, "end": 2565.58, "word": " capture", "probability": 0.93701171875}, {"start": 2565.58, "end": 2565.88, "word": " or", "probability": 0.8818359375}, {"start": 2565.88, "end": 2566.32, "word": " contain", "probability": 0.94287109375}, {"start": 2566.32, "end": 2567.3, "word": " pi", "probability": 0.65625}, {"start": 2567.3, "end": 2567.68, "word": " of", "probability": 0.95556640625}, {"start": 2567.68, "end": 2568.5, "word": " 8%,", "probability": 0.94287109375}, {"start": 2568.5, "end": 2569.78, "word": " then", "probability": 0.8525390625}, {"start": 2569.78, "end": 2570.44, "word": " we", "probability": 0.953125}, {"start": 2570.44, "end": 2571.52, "word": " reject", "probability": 0.93603515625}, {"start": 2571.52, "end": 2571.64, "word": " it.", "probability": 0.392333984375}], "temperature": 1.0}, {"id": 96, "seek": 259480, "start": 2574.6, "end": 2594.8, "text": " So actually, there are three methods to solve this problem. One is critical value approach. And in this case, we just found the critical values here, minus and plus 1.96. And the value of the statistic", "tokens": [407, 767, 11, 456, 366, 1045, 7150, 281, 5039, 341, 1154, 13, 1485, 307, 4924, 2158, 3109, 13, 400, 294, 341, 1389, 11, 321, 445, 1352, 264, 4924, 4190, 510, 11, 3175, 293, 1804, 502, 13, 22962, 13, 400, 264, 2158, 295, 264, 29588], "avg_logprob": -0.19600693782170614, "compression_ratio": 1.4027777777777777, "no_speech_prob": 0.0, "words": [{"start": 2574.5999999999995, "end": 2575.3999999999996, "word": " So", "probability": 0.290283203125}, {"start": 2575.3999999999996, "end": 2576.2, "word": " actually,", "probability": 0.7216796875}, {"start": 2576.82, "end": 2577.48, "word": " there", "probability": 0.89453125}, {"start": 2577.48, "end": 2577.66, "word": " are", "probability": 0.943359375}, {"start": 2577.66, "end": 2578.08, "word": " three", "probability": 0.86669921875}, {"start": 2578.08, "end": 2579.48, "word": " methods", "probability": 0.85205078125}, {"start": 2579.48, "end": 2580.36, "word": " to", "probability": 0.94482421875}, {"start": 2580.36, "end": 2580.56, "word": " solve", "probability": 0.92578125}, {"start": 2580.56, "end": 2580.72, "word": " this", "probability": 0.9482421875}, {"start": 2580.72, "end": 2581.08, "word": " problem.", "probability": 0.8759765625}, {"start": 2581.44, "end": 2581.72, "word": " One", "probability": 0.9169921875}, {"start": 2581.72, "end": 2581.92, "word": " is", "probability": 0.943359375}, {"start": 2581.92, "end": 2582.36, "word": " critical", "probability": 0.71435546875}, {"start": 2582.36, "end": 2582.66, "word": " value", "probability": 0.93212890625}, {"start": 2582.66, "end": 2583.18, "word": " approach.", "probability": 0.92041015625}, {"start": 2584.52, "end": 2585.32, "word": " And", "probability": 0.7578125}, {"start": 2585.32, "end": 2585.48, "word": " in", "probability": 0.87158203125}, {"start": 2585.48, "end": 2585.74, "word": " this", "probability": 0.94677734375}, {"start": 2585.74, "end": 2586.26, "word": " case,", "probability": 0.9091796875}, {"start": 2586.68, "end": 2586.82, "word": " we", "probability": 0.955078125}, {"start": 2586.82, "end": 2587.22, "word": " just", "probability": 0.91796875}, {"start": 2587.22, "end": 2588.36, "word": " found", "probability": 0.9140625}, {"start": 2588.36, "end": 2588.66, "word": " the", "probability": 0.90869140625}, {"start": 2588.66, "end": 2589.0, "word": " critical", "probability": 0.9443359375}, {"start": 2589.0, "end": 2589.56, "word": " values", "probability": 0.9111328125}, {"start": 2589.56, "end": 2589.84, "word": " here,", "probability": 0.85205078125}, {"start": 2590.28, "end": 2590.68, "word": " minus", "probability": 0.962890625}, {"start": 2590.68, "end": 2591.0, "word": " and", "probability": 0.9150390625}, {"start": 2591.0, "end": 2591.38, "word": " plus", "probability": 0.8701171875}, {"start": 2591.38, "end": 2591.64, "word": " 1", "probability": 0.88916015625}, {"start": 2591.64, "end": 2592.22, "word": ".96.", "probability": 0.986083984375}, {"start": 2593.08, "end": 2593.48, "word": " And", "probability": 0.94384765625}, {"start": 2593.48, "end": 2593.8, "word": " the", "probability": 0.9091796875}, {"start": 2593.8, "end": 2594.06, "word": " value", "probability": 0.9794921875}, {"start": 2594.06, "end": 2594.18, "word": " of", "probability": 0.95654296875}, {"start": 2594.18, "end": 2594.3, "word": " the", "probability": 0.83837890625}, {"start": 2594.3, "end": 2594.8, "word": " statistic", "probability": 0.88134765625}], "temperature": 1.0}, {"id": 97, "seek": 262369, "start": 2597.17, "end": 2623.69, "text": " fourth in the rejection region, so we reject the null hypothesis. That's for the first approach. The other one, the B value, and we found that the B value is 0, 1, 3, 6, which is smaller than alpha. Then again, we reject the null hypothesis. The last approach is the confidence interval approach, because here we are talking about two-sided test. And the confidence is approximately", "tokens": [6409, 294, 264, 26044, 4458, 11, 370, 321, 8248, 264, 18184, 17291, 13, 663, 311, 337, 264, 700, 3109, 13, 440, 661, 472, 11, 264, 363, 2158, 11, 293, 321, 1352, 300, 264, 363, 2158, 307, 1958, 11, 502, 11, 805, 11, 1386, 11, 597, 307, 4356, 813, 8961, 13, 1396, 797, 11, 321, 8248, 264, 18184, 17291, 13, 440, 1036, 3109, 307, 264, 6687, 15035, 3109, 11, 570, 510, 321, 366, 1417, 466, 732, 12, 30941, 1500, 13, 400, 264, 6687, 307, 10447], "avg_logprob": -0.23970588095047896, "compression_ratio": 1.7813953488372094, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 2597.17, "end": 2597.71, "word": " fourth", "probability": 0.10418701171875}, {"start": 2597.71, "end": 2598.03, "word": " in", "probability": 0.83642578125}, {"start": 2598.03, "end": 2598.15, "word": " the", "probability": 0.88330078125}, {"start": 2598.15, "end": 2598.43, "word": " rejection", "probability": 0.923828125}, {"start": 2598.43, "end": 2598.77, "word": " region,", "probability": 0.90673828125}, {"start": 2598.97, "end": 2599.07, "word": " so", "probability": 0.8671875}, {"start": 2599.07, "end": 2599.23, "word": " we", "probability": 0.92919921875}, {"start": 2599.23, "end": 2599.57, "word": " reject", "probability": 0.830078125}, {"start": 2599.57, "end": 2599.69, "word": " the", "probability": 0.377197265625}, {"start": 2599.69, "end": 2599.77, "word": " null", "probability": 0.978515625}, {"start": 2599.77, "end": 2600.21, "word": " hypothesis.", "probability": 0.853515625}, {"start": 2601.01, "end": 2601.55, "word": " That's", "probability": 0.922607421875}, {"start": 2601.55, "end": 2601.71, "word": " for", "probability": 0.91357421875}, {"start": 2601.71, "end": 2601.85, "word": " the", "probability": 0.91650390625}, {"start": 2601.85, "end": 2602.23, "word": " first", "probability": 0.876953125}, {"start": 2602.23, "end": 2602.81, "word": " approach.", "probability": 0.935546875}, {"start": 2603.27, "end": 2603.45, "word": " The", "probability": 0.88330078125}, {"start": 2603.45, "end": 2603.67, "word": " other", "probability": 0.90478515625}, {"start": 2603.67, "end": 2603.93, "word": " one,", "probability": 0.92138671875}, {"start": 2604.07, "end": 2604.13, "word": " the", "probability": 0.91259765625}, {"start": 2604.13, "end": 2604.27, "word": " B", "probability": 0.4755859375}, {"start": 2604.27, "end": 2604.63, "word": " value,", "probability": 0.89404296875}, {"start": 2605.75, "end": 2605.99, "word": " and", "probability": 0.8955078125}, {"start": 2605.99, "end": 2606.15, "word": " we", "probability": 0.93359375}, {"start": 2606.15, "end": 2606.37, "word": " found", "probability": 0.890625}, {"start": 2606.37, "end": 2606.57, "word": " that", "probability": 0.72607421875}, {"start": 2606.57, "end": 2606.71, "word": " the", "probability": 0.89013671875}, {"start": 2606.71, "end": 2606.87, "word": " B", "probability": 0.97314453125}, {"start": 2606.87, "end": 2607.15, "word": " value", "probability": 0.95654296875}, {"start": 2607.15, "end": 2607.37, "word": " is", "probability": 0.8740234375}, {"start": 2607.37, "end": 2607.69, "word": " 0,", "probability": 0.806640625}, {"start": 2607.71, "end": 2607.81, "word": " 1,", "probability": 0.55517578125}, {"start": 2607.85, "end": 2608.03, "word": " 3,", "probability": 0.96435546875}, {"start": 2608.15, "end": 2608.49, "word": " 6,", "probability": 0.9873046875}, {"start": 2608.65, "end": 2608.79, "word": " which", "probability": 0.87841796875}, {"start": 2608.79, "end": 2608.89, "word": " is", "probability": 0.90576171875}, {"start": 2608.89, "end": 2609.77, "word": " smaller", "probability": 0.4033203125}, {"start": 2609.77, "end": 2610.07, "word": " than", "probability": 0.94091796875}, {"start": 2610.07, "end": 2610.41, "word": " alpha.", "probability": 0.79541015625}, {"start": 2611.23, "end": 2611.55, "word": " Then", "probability": 0.5810546875}, {"start": 2611.55, "end": 2611.87, "word": " again,", "probability": 0.87890625}, {"start": 2611.95, "end": 2612.05, "word": " we", "probability": 0.90478515625}, {"start": 2612.05, "end": 2612.31, "word": " reject", "probability": 0.833984375}, {"start": 2612.31, "end": 2612.47, "word": " the", "probability": 0.81982421875}, {"start": 2612.47, "end": 2612.59, "word": " null", "probability": 0.939453125}, {"start": 2612.59, "end": 2613.07, "word": " hypothesis.", "probability": 0.87841796875}, {"start": 2614.29, "end": 2614.69, "word": " The", "probability": 0.880859375}, {"start": 2614.69, "end": 2615.07, "word": " last", "probability": 0.84130859375}, {"start": 2615.07, "end": 2615.43, "word": " approach", "probability": 0.9150390625}, {"start": 2615.43, "end": 2615.79, "word": " is", "probability": 0.9345703125}, {"start": 2615.79, "end": 2615.97, "word": " the", "probability": 0.91357421875}, {"start": 2615.97, "end": 2616.29, "word": " confidence", "probability": 0.92822265625}, {"start": 2616.29, "end": 2616.83, "word": " interval", "probability": 0.98193359375}, {"start": 2616.83, "end": 2617.51, "word": " approach,", "probability": 0.9326171875}, {"start": 2617.83, "end": 2618.29, "word": " because", "probability": 0.869140625}, {"start": 2618.29, "end": 2618.45, "word": " here", "probability": 0.72705078125}, {"start": 2618.45, "end": 2618.53, "word": " we", "probability": 0.71875}, {"start": 2618.53, "end": 2618.67, "word": " are", "probability": 0.7646484375}, {"start": 2618.67, "end": 2618.91, "word": " talking", "probability": 0.85888671875}, {"start": 2618.91, "end": 2619.19, "word": " about", "probability": 0.90869140625}, {"start": 2619.19, "end": 2619.41, "word": " two", "probability": 0.87353515625}, {"start": 2619.41, "end": 2619.69, "word": "-sided", "probability": 0.822265625}, {"start": 2619.69, "end": 2620.21, "word": " test.", "probability": 0.5625}, {"start": 2621.13, "end": 2621.49, "word": " And", "probability": 0.94384765625}, {"start": 2621.49, "end": 2621.65, "word": " the", "probability": 0.908203125}, {"start": 2621.65, "end": 2621.95, "word": " confidence", "probability": 0.9697265625}, {"start": 2621.95, "end": 2622.33, "word": " is", "probability": 0.9423828125}, {"start": 2622.33, "end": 2623.69, "word": " approximately", "probability": 0.88134765625}], "temperature": 1.0}, {"id": 98, "seek": 265179, "start": 2625.41, "end": 2651.79, "text": " ranges between three and seven percent. And my pi, which I am talking about in the null hypothesis, is not in the center, but I mean the confidence interval does not capture the value of pi of eight percent. Therefore, we reject the null hypothesis. That's for testing about population promotion.", "tokens": [22526, 1296, 1045, 293, 3407, 3043, 13, 400, 452, 3895, 11, 597, 286, 669, 1417, 466, 294, 264, 18184, 17291, 11, 307, 406, 294, 264, 3056, 11, 457, 286, 914, 264, 6687, 15035, 775, 406, 7983, 264, 2158, 295, 3895, 295, 3180, 3043, 13, 7504, 11, 321, 8248, 264, 18184, 17291, 13, 663, 311, 337, 4997, 466, 4415, 15783, 13], "avg_logprob": -0.23245389344262296, "compression_ratio": 1.6054054054054054, "no_speech_prob": 0.0, "words": [{"start": 2625.41, "end": 2626.07, "word": " ranges", "probability": 0.54833984375}, {"start": 2626.07, "end": 2626.71, "word": " between", "probability": 0.875}, {"start": 2626.71, "end": 2627.83, "word": " three", "probability": 0.55859375}, {"start": 2627.83, "end": 2628.29, "word": " and", "probability": 0.9365234375}, {"start": 2628.29, "end": 2628.61, "word": " seven", "probability": 0.72314453125}, {"start": 2628.61, "end": 2629.13, "word": " percent.", "probability": 0.96044921875}, {"start": 2629.47, "end": 2629.61, "word": " And", "probability": 0.88720703125}, {"start": 2629.61, "end": 2629.83, "word": " my", "probability": 0.94287109375}, {"start": 2629.83, "end": 2630.07, "word": " pi,", "probability": 0.465087890625}, {"start": 2630.13, "end": 2630.31, "word": " which", "probability": 0.94970703125}, {"start": 2630.31, "end": 2630.43, "word": " I", "probability": 0.99609375}, {"start": 2630.43, "end": 2630.53, "word": " am", "probability": 0.458984375}, {"start": 2630.53, "end": 2630.87, "word": " talking", "probability": 0.8349609375}, {"start": 2630.87, "end": 2631.27, "word": " about", "probability": 0.9013671875}, {"start": 2631.27, "end": 2631.71, "word": " in", "probability": 0.340576171875}, {"start": 2631.71, "end": 2632.19, "word": " the", "probability": 0.90576171875}, {"start": 2632.19, "end": 2632.51, "word": " null", "probability": 0.93017578125}, {"start": 2632.51, "end": 2632.99, "word": " hypothesis,", "probability": 0.85205078125}, {"start": 2633.83, "end": 2634.29, "word": " is", "probability": 0.92236328125}, {"start": 2634.29, "end": 2634.51, "word": " not", "probability": 0.95361328125}, {"start": 2634.51, "end": 2634.65, "word": " in", "probability": 0.9375}, {"start": 2634.65, "end": 2634.81, "word": " the", "probability": 0.912109375}, {"start": 2634.81, "end": 2635.07, "word": " center,", "probability": 0.82666015625}, {"start": 2635.07, "end": 2635.35, "word": " but", "probability": 0.68212890625}, {"start": 2635.35, "end": 2635.61, "word": " I", "probability": 0.85302734375}, {"start": 2635.61, "end": 2635.85, "word": " mean", "probability": 0.96435546875}, {"start": 2635.85, "end": 2636.15, "word": " the", "probability": 0.61279296875}, {"start": 2636.15, "end": 2636.49, "word": " confidence", "probability": 0.86474609375}, {"start": 2636.49, "end": 2636.95, "word": " interval", "probability": 0.84814453125}, {"start": 2636.95, "end": 2637.27, "word": " does", "probability": 0.9287109375}, {"start": 2637.27, "end": 2637.63, "word": " not", "probability": 0.953125}, {"start": 2637.63, "end": 2638.23, "word": " capture", "probability": 0.86572265625}, {"start": 2638.23, "end": 2638.61, "word": " the", "probability": 0.90380859375}, {"start": 2638.61, "end": 2638.83, "word": " value", "probability": 0.9794921875}, {"start": 2638.83, "end": 2638.99, "word": " of", "probability": 0.884765625}, {"start": 2638.99, "end": 2639.17, "word": " pi", "probability": 0.92626953125}, {"start": 2639.17, "end": 2639.35, "word": " of", "probability": 0.7509765625}, {"start": 2639.35, "end": 2639.61, "word": " eight", "probability": 0.7529296875}, {"start": 2639.61, "end": 2640.17, "word": " percent.", "probability": 0.966796875}, {"start": 2641.01, "end": 2641.45, "word": " Therefore,", "probability": 0.79248046875}, {"start": 2641.79, "end": 2642.13, "word": " we", "probability": 0.9619140625}, {"start": 2642.13, "end": 2643.55, "word": " reject", "probability": 0.798828125}, {"start": 2643.55, "end": 2643.89, "word": " the", "probability": 0.90576171875}, {"start": 2643.89, "end": 2644.07, "word": " null", "probability": 0.94677734375}, {"start": 2644.07, "end": 2644.55, "word": " hypothesis.", "probability": 0.8369140625}, {"start": 2646.27, "end": 2646.95, "word": " That's", "probability": 0.92822265625}, {"start": 2646.95, "end": 2647.75, "word": " for", "probability": 0.95556640625}, {"start": 2647.75, "end": 2648.69, "word": " testing", "probability": 0.8857421875}, {"start": 2648.69, "end": 2649.47, "word": " about", "probability": 0.90966796875}, {"start": 2649.47, "end": 2650.59, "word": " population", "probability": 0.919921875}, {"start": 2650.59, "end": 2651.79, "word": " promotion.", "probability": 0.278076171875}], "temperature": 1.0}, {"id": 99, "seek": 268326, "start": 2654.1, "end": 2683.26, "text": " Any questions? Next time, shall I start chapter 10? Up to this point, we just explained hypothesis testing about one sample. So there is only one sample in chapter 9. Here, suppose there are two samples. How can we conduct", "tokens": [2639, 1651, 30, 3087, 565, 11, 4393, 286, 722, 7187, 1266, 30, 5858, 281, 341, 935, 11, 321, 445, 8825, 17291, 4997, 466, 472, 6889, 13, 407, 456, 307, 787, 472, 6889, 294, 7187, 1722, 13, 1692, 11, 7297, 456, 366, 732, 10938, 13, 1012, 393, 321, 6018], "avg_logprob": -0.17936862244897958, "compression_ratio": 1.4025157232704402, "no_speech_prob": 0.0, "words": [{"start": 2654.1, "end": 2654.4, "word": " Any", "probability": 0.60009765625}, {"start": 2654.4, "end": 2654.72, "word": " questions?", "probability": 0.467041015625}, {"start": 2659.1, "end": 2659.72, "word": " Next", "probability": 0.85986328125}, {"start": 2659.72, "end": 2660.0, "word": " time,", "probability": 0.8681640625}, {"start": 2660.1, "end": 2660.22, "word": " shall", "probability": 0.88818359375}, {"start": 2660.22, "end": 2660.38, "word": " I", "probability": 0.68798828125}, {"start": 2660.38, "end": 2661.12, "word": " start", "probability": 0.8828125}, {"start": 2661.12, "end": 2662.68, "word": " chapter", "probability": 0.5234375}, {"start": 2662.68, "end": 2663.06, "word": " 10?", "probability": 0.74755859375}, {"start": 2667.06, "end": 2667.68, "word": " Up", "probability": 0.95166015625}, {"start": 2667.68, "end": 2667.82, "word": " to", "probability": 0.96875}, {"start": 2667.82, "end": 2668.04, "word": " this", "probability": 0.947265625}, {"start": 2668.04, "end": 2668.34, "word": " point,", "probability": 0.96728515625}, {"start": 2668.58, "end": 2668.72, "word": " we", "probability": 0.92626953125}, {"start": 2668.72, "end": 2669.24, "word": " just", "probability": 0.85791015625}, {"start": 2669.24, "end": 2670.54, "word": " explained", "probability": 0.787109375}, {"start": 2670.54, "end": 2673.38, "word": " hypothesis", "probability": 0.85693359375}, {"start": 2673.38, "end": 2673.98, "word": " testing", "probability": 0.6591796875}, {"start": 2673.98, "end": 2674.64, "word": " about", "probability": 0.91015625}, {"start": 2674.64, "end": 2675.1, "word": " one", "probability": 0.923828125}, {"start": 2675.1, "end": 2675.46, "word": " sample.", "probability": 0.85888671875}, {"start": 2676.5, "end": 2676.76, "word": " So", "probability": 0.9052734375}, {"start": 2676.76, "end": 2676.98, "word": " there", "probability": 0.76123046875}, {"start": 2676.98, "end": 2677.16, "word": " is", "probability": 0.9208984375}, {"start": 2677.16, "end": 2677.44, "word": " only", "probability": 0.927734375}, {"start": 2677.44, "end": 2677.72, "word": " one", "probability": 0.93359375}, {"start": 2677.72, "end": 2678.02, "word": " sample", "probability": 0.88916015625}, {"start": 2678.02, "end": 2678.18, "word": " in", "probability": 0.931640625}, {"start": 2678.18, "end": 2678.44, "word": " chapter", "probability": 0.7607421875}, {"start": 2678.44, "end": 2678.8, "word": " 9.", "probability": 0.7197265625}, {"start": 2678.92, "end": 2679.18, "word": " Here,", "probability": 0.828125}, {"start": 2679.62, "end": 2680.06, "word": " suppose", "probability": 0.8974609375}, {"start": 2680.06, "end": 2680.64, "word": " there", "probability": 0.8388671875}, {"start": 2680.64, "end": 2680.78, "word": " are", "probability": 0.94189453125}, {"start": 2680.78, "end": 2681.0, "word": " two", "probability": 0.90283203125}, {"start": 2681.0, "end": 2681.44, "word": " samples.", "probability": 0.8876953125}, {"start": 2681.92, "end": 2682.24, "word": " How", "probability": 0.95703125}, {"start": 2682.24, "end": 2682.5, "word": " can", "probability": 0.94091796875}, {"start": 2682.5, "end": 2682.7, "word": " we", "probability": 0.9609375}, {"start": 2682.7, "end": 2683.26, "word": " conduct", "probability": 0.85107421875}], "temperature": 1.0}, {"id": 100, "seek": 270687, "start": 2683.75, "end": 2706.87, "text": " Hypothesizing for comparing two population means. For example, suppose we are teaching males and females, and we are interested to see if there is a significant difference between the performance of males and females. For example, suppose the instructor claimed that", "tokens": [45649, 4624, 3319, 337, 15763, 732, 4415, 1355, 13, 1171, 1365, 11, 7297, 321, 366, 4571, 20776, 293, 21529, 11, 293, 321, 366, 3102, 281, 536, 498, 456, 307, 257, 4776, 2649, 1296, 264, 3389, 295, 20776, 293, 21529, 13, 1171, 1365, 11, 7297, 264, 18499, 12941, 300], "avg_logprob": -0.2021683624812535, "compression_ratio": 1.6481481481481481, "no_speech_prob": 0.0, "words": [{"start": 2683.75, "end": 2684.69, "word": " Hypothesizing", "probability": 0.5047200520833334}, {"start": 2684.69, "end": 2685.27, "word": " for", "probability": 0.9169921875}, {"start": 2685.27, "end": 2686.67, "word": " comparing", "probability": 0.91845703125}, {"start": 2686.67, "end": 2687.05, "word": " two", "probability": 0.91845703125}, {"start": 2687.05, "end": 2687.45, "word": " population", "probability": 0.787109375}, {"start": 2687.45, "end": 2687.75, "word": " means.", "probability": 0.93896484375}, {"start": 2687.89, "end": 2687.95, "word": " For", "probability": 0.95751953125}, {"start": 2687.95, "end": 2688.25, "word": " example,", "probability": 0.97216796875}, {"start": 2688.35, "end": 2688.67, "word": " suppose", "probability": 0.91357421875}, {"start": 2688.67, "end": 2689.47, "word": " we", "probability": 0.8818359375}, {"start": 2689.47, "end": 2689.61, "word": " are", "probability": 0.935546875}, {"start": 2689.61, "end": 2689.97, "word": " teaching", "probability": 0.8984375}, {"start": 2689.97, "end": 2690.37, "word": " males", "probability": 0.94580078125}, {"start": 2690.37, "end": 2690.59, "word": " and", "probability": 0.94482421875}, {"start": 2690.59, "end": 2691.01, "word": " females,", "probability": 0.94775390625}, {"start": 2691.53, "end": 2691.87, "word": " and", "probability": 0.9287109375}, {"start": 2691.87, "end": 2692.01, "word": " we", "probability": 0.95751953125}, {"start": 2692.01, "end": 2692.13, "word": " are", "probability": 0.93408203125}, {"start": 2692.13, "end": 2692.61, "word": " interested", "probability": 0.85595703125}, {"start": 2692.61, "end": 2692.97, "word": " to", "probability": 0.96630859375}, {"start": 2692.97, "end": 2693.31, "word": " see", "probability": 0.92724609375}, {"start": 2693.31, "end": 2694.13, "word": " if", "probability": 0.94873046875}, {"start": 2694.13, "end": 2694.35, "word": " there", "probability": 0.91455078125}, {"start": 2694.35, "end": 2694.53, "word": " is", "probability": 0.9345703125}, {"start": 2694.53, "end": 2694.71, "word": " a", "probability": 0.99462890625}, {"start": 2694.71, "end": 2695.19, "word": " significant", "probability": 0.86083984375}, {"start": 2695.19, "end": 2695.71, "word": " difference", "probability": 0.8857421875}, {"start": 2695.71, "end": 2696.25, "word": " between", "probability": 0.86572265625}, {"start": 2696.25, "end": 2697.69, "word": " the", "probability": 0.9169921875}, {"start": 2697.69, "end": 2698.57, "word": " performance", "probability": 0.90966796875}, {"start": 2698.57, "end": 2699.03, "word": " of", "probability": 0.96875}, {"start": 2699.03, "end": 2699.29, "word": " males", "probability": 0.943359375}, {"start": 2699.29, "end": 2699.51, "word": " and", "probability": 0.94189453125}, {"start": 2699.51, "end": 2699.95, "word": " females.", "probability": 0.9482421875}, {"start": 2700.83, "end": 2701.07, "word": " For", "probability": 0.96435546875}, {"start": 2701.07, "end": 2701.43, "word": " example,", "probability": 0.97412109375}, {"start": 2701.79, "end": 2702.29, "word": " suppose", "probability": 0.9169921875}, {"start": 2702.29, "end": 2704.19, "word": " the", "probability": 0.58203125}, {"start": 2704.19, "end": 2705.53, "word": " instructor", "probability": 0.921875}, {"start": 2705.53, "end": 2706.41, "word": " claimed", "probability": 0.338134765625}, {"start": 2706.41, "end": 2706.87, "word": " that", "probability": 0.93701171875}], "temperature": 1.0}, {"id": 101, "seek": 273725, "start": 2708.25, "end": 2737.25, "text": " Males do better than females in the exam. So how can we test if, for example, the mean score for group A, for example, is greater than the mean of group B? So in this case, there are two samples. So how can we conduct hypothesis testing for two sample tests? That will be in chapter 10.", "tokens": [376, 4229, 360, 1101, 813, 21529, 294, 264, 1139, 13, 407, 577, 393, 321, 1500, 498, 11, 337, 1365, 11, 264, 914, 6175, 337, 1594, 316, 11, 337, 1365, 11, 307, 5044, 813, 264, 914, 295, 1594, 363, 30, 407, 294, 341, 1389, 11, 456, 366, 732, 10938, 13, 407, 577, 393, 321, 6018, 17291, 4997, 337, 732, 6889, 6921, 30, 663, 486, 312, 294, 7187, 1266, 13], "avg_logprob": -0.14628623188405798, "compression_ratio": 1.698224852071006, "no_speech_prob": 0.0, "words": [{"start": 2708.2500000000005, "end": 2708.8900000000003, "word": " Males", "probability": 0.6949462890625}, {"start": 2708.8900000000003, "end": 2709.53, "word": " do", "probability": 0.9541015625}, {"start": 2709.53, "end": 2709.85, "word": " better", "probability": 0.916015625}, {"start": 2709.85, "end": 2710.11, "word": " than", "probability": 0.94775390625}, {"start": 2710.11, "end": 2710.61, "word": " females", "probability": 0.94384765625}, {"start": 2710.61, "end": 2711.53, "word": " in", "probability": 0.859375}, {"start": 2711.53, "end": 2711.67, "word": " the", "probability": 0.8193359375}, {"start": 2711.67, "end": 2711.95, "word": " exam.", "probability": 0.8544921875}, {"start": 2712.07, "end": 2712.19, "word": " So", "probability": 0.94287109375}, {"start": 2712.19, "end": 2712.37, "word": " how", "probability": 0.82421875}, {"start": 2712.37, "end": 2712.55, "word": " can", "probability": 0.9345703125}, {"start": 2712.55, "end": 2712.71, "word": " we", "probability": 0.9501953125}, {"start": 2712.71, "end": 2713.07, "word": " test", "probability": 0.85302734375}, {"start": 2713.07, "end": 2713.87, "word": " if,", "probability": 0.9150390625}, {"start": 2714.03, "end": 2714.15, "word": " for", "probability": 0.951171875}, {"start": 2714.15, "end": 2714.55, "word": " example,", "probability": 0.9716796875}, {"start": 2715.89, "end": 2716.15, "word": " the", "probability": 0.9033203125}, {"start": 2716.15, "end": 2716.43, "word": " mean", "probability": 0.97509765625}, {"start": 2716.43, "end": 2717.57, "word": " score", "probability": 0.8681640625}, {"start": 2717.57, "end": 2718.81, "word": " for", "probability": 0.92236328125}, {"start": 2718.81, "end": 2719.27, "word": " group", "probability": 0.603515625}, {"start": 2719.27, "end": 2719.55, "word": " A,", "probability": 0.97265625}, {"start": 2719.67, "end": 2719.75, "word": " for", "probability": 0.94775390625}, {"start": 2719.75, "end": 2720.15, "word": " example,", "probability": 0.97265625}, {"start": 2722.03, "end": 2722.31, "word": " is", "probability": 0.943359375}, {"start": 2722.31, "end": 2722.71, "word": " greater", "probability": 0.90869140625}, {"start": 2722.71, "end": 2723.21, "word": " than", "probability": 0.9462890625}, {"start": 2723.21, "end": 2724.15, "word": " the", "probability": 0.923828125}, {"start": 2724.15, "end": 2724.45, "word": " mean", "probability": 0.9716796875}, {"start": 2724.45, "end": 2725.09, "word": " of", "probability": 0.94970703125}, {"start": 2725.09, "end": 2725.37, "word": " group", "probability": 0.88232421875}, {"start": 2725.37, "end": 2725.67, "word": " B?", "probability": 0.99658203125}, {"start": 2727.01, "end": 2727.39, "word": " So", "probability": 0.93896484375}, {"start": 2727.39, "end": 2727.51, "word": " in", "probability": 0.43115234375}, {"start": 2727.51, "end": 2727.63, "word": " this", "probability": 0.939453125}, {"start": 2727.63, "end": 2727.83, "word": " case,", "probability": 0.90185546875}, {"start": 2727.89, "end": 2728.01, "word": " there", "probability": 0.87646484375}, {"start": 2728.01, "end": 2728.19, "word": " are", "probability": 0.9375}, {"start": 2728.19, "end": 2728.41, "word": " two", "probability": 0.9169921875}, {"start": 2728.41, "end": 2728.79, "word": " samples.", "probability": 0.91748046875}, {"start": 2729.53, "end": 2729.77, "word": " So", "probability": 0.93896484375}, {"start": 2729.77, "end": 2729.95, "word": " how", "probability": 0.9287109375}, {"start": 2729.95, "end": 2730.13, "word": " can", "probability": 0.93798828125}, {"start": 2730.13, "end": 2730.45, "word": " we", "probability": 0.9619140625}, {"start": 2730.45, "end": 2731.33, "word": " conduct", "probability": 0.84130859375}, {"start": 2731.33, "end": 2731.91, "word": " hypothesis", "probability": 0.6552734375}, {"start": 2731.91, "end": 2732.35, "word": " testing", "probability": 0.460693359375}, {"start": 2732.35, "end": 2732.83, "word": " for", "probability": 0.94677734375}, {"start": 2732.83, "end": 2733.69, "word": " two", "probability": 0.9208984375}, {"start": 2733.69, "end": 2734.01, "word": " sample", "probability": 0.62890625}, {"start": 2734.01, "end": 2734.39, "word": " tests?", "probability": 0.78955078125}, {"start": 2734.87, "end": 2735.47, "word": " That", "probability": 0.89208984375}, {"start": 2735.47, "end": 2735.67, "word": " will", "probability": 0.8603515625}, {"start": 2735.67, "end": 2736.05, "word": " be", "probability": 0.95166015625}, {"start": 2736.05, "end": 2736.53, "word": " in", "probability": 0.93505859375}, {"start": 2736.53, "end": 2736.85, "word": " chapter", "probability": 0.5390625}, {"start": 2736.85, "end": 2737.25, "word": " 10.", "probability": 0.900390625}], "temperature": 1.0}, {"id": 102, "seek": 276639, "start": 2738.19, "end": 2766.39, "text": " So chapter 10 left for this course. In addition to that, we'll discuss correlation and simple linear regression after that. So maybe we'll discuss two more chapters, 10 and 12. If we have enough time, maybe we'll cover chapter 11.", "tokens": [407, 7187, 1266, 1411, 337, 341, 1164, 13, 682, 4500, 281, 300, 11, 321, 603, 2248, 20009, 293, 2199, 8213, 24590, 934, 300, 13, 407, 1310, 321, 603, 2248, 732, 544, 20013, 11, 1266, 293, 2272, 13, 759, 321, 362, 1547, 565, 11, 1310, 321, 603, 2060, 7187, 2975, 13], "avg_logprob": -0.1344209500387603, "compression_ratio": 1.5098039215686274, "no_speech_prob": 0.0, "words": [{"start": 2738.19, "end": 2738.67, "word": " So", "probability": 0.86328125}, {"start": 2738.67, "end": 2739.51, "word": " chapter", "probability": 0.5029296875}, {"start": 2739.51, "end": 2739.81, "word": " 10", "probability": 0.7548828125}, {"start": 2739.81, "end": 2740.13, "word": " left", "probability": 0.92919921875}, {"start": 2740.13, "end": 2740.65, "word": " for", "probability": 0.9443359375}, {"start": 2740.65, "end": 2741.15, "word": " this", "probability": 0.94580078125}, {"start": 2741.15, "end": 2741.85, "word": " course.", "probability": 0.962890625}, {"start": 2742.67, "end": 2742.99, "word": " In", "probability": 0.9609375}, {"start": 2742.99, "end": 2743.31, "word": " addition", "probability": 0.95703125}, {"start": 2743.31, "end": 2743.53, "word": " to", "probability": 0.96728515625}, {"start": 2743.53, "end": 2743.79, "word": " that,", "probability": 0.939453125}, {"start": 2743.89, "end": 2744.37, "word": " we'll", "probability": 0.7117919921875}, {"start": 2744.37, "end": 2746.95, "word": " discuss", "probability": 0.88818359375}, {"start": 2746.95, "end": 2748.35, "word": " correlation", "probability": 0.880859375}, {"start": 2748.35, "end": 2749.57, "word": " and", "probability": 0.853515625}, {"start": 2749.57, "end": 2750.11, "word": " simple", "probability": 0.708984375}, {"start": 2750.11, "end": 2750.95, "word": " linear", "probability": 0.91845703125}, {"start": 2750.95, "end": 2751.45, "word": " regression", "probability": 0.84130859375}, {"start": 2751.45, "end": 2751.99, "word": " after", "probability": 0.8271484375}, {"start": 2751.99, "end": 2752.31, "word": " that.", "probability": 0.93603515625}, {"start": 2752.77, "end": 2752.95, "word": " So", "probability": 0.95703125}, {"start": 2752.95, "end": 2753.29, "word": " maybe", "probability": 0.93310546875}, {"start": 2753.29, "end": 2755.11, "word": " we'll", "probability": 0.935791015625}, {"start": 2755.11, "end": 2757.07, "word": " discuss", "probability": 0.8935546875}, {"start": 2757.07, "end": 2757.37, "word": " two", "probability": 0.8662109375}, {"start": 2757.37, "end": 2758.09, "word": " more", "probability": 0.464111328125}, {"start": 2758.09, "end": 2758.57, "word": " chapters,", "probability": 0.90771484375}, {"start": 2759.31, "end": 2759.53, "word": " 10", "probability": 0.92138671875}, {"start": 2759.53, "end": 2759.73, "word": " and", "probability": 0.9404296875}, {"start": 2759.73, "end": 2760.17, "word": " 12.", "probability": 0.96875}, {"start": 2760.79, "end": 2761.09, "word": " If", "probability": 0.966796875}, {"start": 2761.09, "end": 2761.27, "word": " we", "probability": 0.9599609375}, {"start": 2761.27, "end": 2761.47, "word": " have", "probability": 0.9462890625}, {"start": 2761.47, "end": 2761.77, "word": " enough", "probability": 0.86572265625}, {"start": 2761.77, "end": 2762.21, "word": " time,", "probability": 0.89404296875}, {"start": 2762.55, "end": 2762.89, "word": " maybe", "probability": 0.9453125}, {"start": 2762.89, "end": 2765.43, "word": " we'll", "probability": 0.926513671875}, {"start": 2765.43, "end": 2765.69, "word": " cover", "probability": 0.95556640625}, {"start": 2765.69, "end": 2766.07, "word": " chapter", "probability": 0.82373046875}, {"start": 2766.07, "end": 2766.39, "word": " 11.", "probability": 0.96923828125}], "temperature": 1.0}, {"id": 103, "seek": 277713, "start": 2767.55, "end": 2777.13, "text": " Guys, it depends actually on the time we have. So that's all for today. Any questions? Comments?", "tokens": [7855, 11, 309, 5946, 767, 322, 264, 565, 321, 362, 13, 407, 300, 311, 439, 337, 965, 13, 2639, 1651, 30, 2432, 1117, 30], "avg_logprob": -0.2040625047683716, "compression_ratio": 1.0543478260869565, "no_speech_prob": 0.0, "words": [{"start": 2767.55, "end": 2767.91, "word": " Guys,", "probability": 0.253173828125}, {"start": 2768.21, "end": 2768.49, "word": " it", "probability": 0.62939453125}, {"start": 2768.49, "end": 2768.89, "word": " depends", "probability": 0.90283203125}, {"start": 2768.89, "end": 2769.37, "word": " actually", "probability": 0.7646484375}, {"start": 2769.37, "end": 2769.51, "word": " on", "probability": 0.890625}, {"start": 2769.51, "end": 2769.61, "word": " the", "probability": 0.8916015625}, {"start": 2769.61, "end": 2769.81, "word": " time", "probability": 0.904296875}, {"start": 2769.81, "end": 2769.97, "word": " we", "probability": 0.8974609375}, {"start": 2769.97, "end": 2770.17, "word": " have.", "probability": 0.95166015625}, {"start": 2771.29, "end": 2771.61, "word": " So", "probability": 0.87890625}, {"start": 2771.61, "end": 2771.93, "word": " that's", "probability": 0.88818359375}, {"start": 2771.93, "end": 2772.29, "word": " all", "probability": 0.95458984375}, {"start": 2772.29, "end": 2773.53, "word": " for", "probability": 0.9443359375}, {"start": 2773.53, "end": 2773.85, "word": " today.", "probability": 0.85400390625}, {"start": 2773.99, "end": 2774.17, "word": " Any", "probability": 0.8720703125}, {"start": 2774.17, "end": 2774.63, "word": " questions?", "probability": 0.94775390625}, {"start": 2776.49, "end": 2777.13, "word": " Comments?", "probability": 0.898193359375}], "temperature": 1.0}], "language": "en", "language_probability": 1.0, "duration": 2872.0065, "duration_after_vad": 2624.36312499999} \ No newline at end of file diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/sLp0uBxxi1M.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/sLp0uBxxi1M.srt new file mode 100644 index 0000000000000000000000000000000000000000..aebc16ac033c778711b780b5493b55b2b892f776 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/sLp0uBxxi1M.srt @@ -0,0 +1,2292 @@ + +1 +00:00:08,940 --> 00:00:15,340 +Last time we talked about two sample tests and we + +2 +00:00:15,340 --> 00:00:23,240 +covered one case when the population means we are + +3 +00:00:23,240 --> 00:00:26,640 +selecting random samples and these samples are + +4 +00:00:26,640 --> 00:00:28,940 +independent. So that's case number one, + +5 +00:00:29,600 --> 00:00:33,740 +independent samples. And we assume sigma 1 and + +6 +00:00:33,740 --> 00:00:39,880 +sigma 2 are unknown and equal. And we have discussed + +7 +00:00:39,880 --> 00:00:45,000 +this test. The test statistic was the + +8 +00:00:45,000 --> 00:00:49,600 +difference between the two sample means minus mu1 + +9 +00:00:49,600 --> 00:00:54,460 +minus mu2 under H0. Most of the time, this equals 0 + +10 +00:00:54,460 --> 00:00:57,920 +divided by the standard error of the + +11 +00:00:57,920 --> 00:01:02,320 +estimate, which is S squared B multiplied by 1 + +12 +00:01:02,320 --> 00:01:06,160 +over N, 1 plus 1 over N. S squared B is the bold + +13 +00:01:06,160 --> 00:01:07,360 +variance. + +14 +00:01:12,750 --> 00:01:17,250 +The other case, when sigma 1 and sigma 2 are + +15 +00:01:17,250 --> 00:01:24,150 +unknown, but they are not equal. In this case, the + +16 +00:01:24,150 --> 00:01:29,190 +assumptions remain the same except the last one. + +17 +00:01:29,890 --> 00:01:32,710 +Now, the last one becomes population variances are + +18 +00:01:32,710 --> 00:01:37,310 +unknown, the same as before, but we assume they + +19 +00:01:37,310 --> 00:01:40,590 +are not equal. So it cannot be assumed to be + +20 +00:01:40,590 --> 00:01:46,690 +equal. So in this case, Sigma 1 and + +21 +00:01:46,690 --> 00:01:55,570 +Sigma 2 unknown, and they are not equal. So we + +22 +00:01:55,570 --> 00:02:00,010 +assume both Sigmas unknown, but they are not + +23 +00:02:00,010 --> 00:02:04,850 +equal. In this case, we can use one of the + +24 +00:02:04,850 --> 00:02:12,050 +software packages as Excel Minitab SPSS. So I will + +25 +00:02:12,050 --> 00:02:14,930 +leave this part for the SPSS course, how can we + +26 +00:02:14,930 --> 00:02:18,430 +determine the test statistic if sigma is unknown + +27 +00:02:18,430 --> 00:02:24,270 +and we cannot assume they are equal. So that's the + +28 +00:02:24,270 --> 00:02:28,270 +case number one, when the samples are independent. + +29 +00:02:29,630 --> 00:02:32,810 +The second, suppose the populations of interest + +30 +00:02:32,810 --> 00:02:37,190 +are related. The most common example is before and + +31 +00:02:37,190 --> 00:02:37,550 +after. + +32 +00:02:41,040 --> 00:02:49,600 +For example, suppose we have patients who + +33 +00:02:49,600 --> 00:02:53,820 +are suffering from blood pressure and for example + +34 +00:02:53,820 --> 00:03:01,060 +suppose they use a drug A and + +35 +00:03:01,060 --> 00:03:03,700 +we have their blood pressures for example in + +36 +00:03:03,700 --> 00:03:08,590 +January for example number one person number one, + +37 +00:03:08,750 --> 00:03:15,050 +his or her blood pressure was in January 145. The + +38 +00:03:15,050 --> 00:03:18,730 +second person, his blood pressure, for example, is + +39 +00:03:18,730 --> 00:03:23,190 +160. The third one, for example, is 155, and so + +40 +00:03:23,190 --> 00:03:26,730 +on. And suppose we have 10 patients. And the last + +41 +00:03:26,730 --> 00:03:31,270 +one, for example, his or her blood pressure is + +42 +00:03:31,270 --> 00:03:38,000 +135. Now, these measures in January. Suppose these + +43 +00:03:38,000 --> 00:03:44,260 +patients now have a new treatment. Suppose they are + +44 +00:03:44,260 --> 00:03:51,800 +using now a drug B, a new drug, suppose for three + +45 +00:03:51,800 --> 00:03:59,000 +months. Then, we have new results. I mean new + +46 +00:03:59,000 --> 00:04:01,840 +measurements for their blood pressure, for + +47 +00:04:01,840 --> 00:04:02,920 +example, in April. + +48 +00:04:05,850 --> 00:04:10,810 +Now, the first person with a blood pressure of 145 + +49 +00:04:10,810 --> 00:04:15,770 +using drug A, it becomes, for example, 130 by + +50 +00:04:15,770 --> 00:04:21,730 +using drug B. The other one with 160, for example, + +51 +00:04:21,830 --> 00:04:27,410 +it becomes 150. 155 becomes 135. 135 becomes 120, + +52 +00:04:27,910 --> 00:04:35,360 +and so on. Now, this example is called before and + +53 +00:04:35,360 --> 00:04:39,740 +after. It means before using drug B and after + +54 +00:04:39,740 --> 00:04:44,400 +using drug B. And the question of interest is the + +55 +00:04:44,400 --> 00:04:48,860 +new drug effective or not. Effective means in this + +56 +00:04:48,860 --> 00:04:54,940 +case it reduces the high blood pressure. + +57 +00:04:57,980 --> 00:05:02,400 +This type of experiment is called related. + +58 +00:05:03,880 --> 00:05:08,840 +Sometimes it's called related. + +59 +00:05:09,920 --> 00:05:14,960 +Sometimes it's called paired. This name comes from + +60 +00:05:14,960 --> 00:05:18,860 +here we have two measurements for the same person. + +61 +00:05:19,880 --> 00:05:25,380 +So it looks like here, 145, 130. So this is pair + +62 +00:05:25,380 --> 00:05:29,380 +number one. The other pair or the second pair is + +63 +00:05:29,380 --> 00:05:34,830 +160, 150, and so on. So it's called paired, + +64 +00:05:35,290 --> 00:05:42,650 +sometimes called dependent, or matched samples. + +65 +00:05:45,110 --> 00:05:49,210 +So related, paired, dependent, or matched pairs, + +66 +00:05:49,310 --> 00:05:56,130 +the same meaning. So here we are testing about two + +67 +00:05:56,130 --> 00:05:59,870 +related populations. Again, it's called paired or + +68 +00:05:59,870 --> 00:06:01,810 +matched samples. They're repeated measures. + +69 +00:06:02,770 --> 00:06:05,830 +Repeated means before and after for the same + +70 +00:06:05,830 --> 00:06:09,550 +individual, for the same item, for the same + +71 +00:06:09,550 --> 00:06:13,670 +person. Now, for example, suppose measurements for + +72 +00:06:13,670 --> 00:06:18,990 +January is denoted by x1, and the other one is x2. + +73 +00:06:21,830 --> 00:06:24,990 +Now, the first thing we have to do is to find or + +74 +00:06:24,990 --> 00:06:27,250 +to compute the difference between paired values. + +75 +00:06:27,990 --> 00:06:33,490 +So Di is the difference between measurement in + +76 +00:06:33,490 --> 00:06:38,050 +January minus measurements in April. So we have + +77 +00:06:38,050 --> 00:06:43,050 +the difference between the two values. So here we + +78 +00:06:43,050 --> 00:06:50,810 +have Di. Di is just x1 minus x2 or x2 minus x1. + +79 +00:06:52,530 --> 00:06:57,930 +For example, it's x1 minus x2. Now, the assumptions + +80 +00:06:57,930 --> 00:07:01,870 +are the same. We assume both populations are + +81 +00:07:01,870 --> 00:07:05,210 +normally distributed. Otherwise, if the + +82 +00:07:05,210 --> 00:07:08,710 +populations are not normal, we should have large + +83 +00:07:08,710 --> 00:07:12,370 +samples. It means the sample size should be at + +84 +00:07:12,370 --> 00:07:16,270 +least 30 in order to apply the central limit + +85 +00:07:16,270 --> 00:07:19,530 +theorem if the underlying population is not + +86 +00:07:19,530 --> 00:07:23,530 +normally distributed. So again, we have + +87 +00:07:23,530 --> 00:07:29,980 +measurements x1 before, and measurements x2 after. + +88 +00:07:32,400 --> 00:07:38,200 +So we have here items, 1, 2, for example, up to + +89 +00:07:38,200 --> 00:07:44,460 +10. We have x1, x2, before, after. We compute the + +90 +00:07:44,460 --> 00:07:47,040 +difference, which is x1 minus x2, for example. + +91 +00:07:48,400 --> 00:07:52,280 +Now, for this data. I mean, for the difference + +92 +00:07:52,280 --> 00:07:54,760 +data, just apply + +93 +00:07:57,290 --> 00:08:04,330 +one sample t-test. So, simply apply one sample t + +94 +00:08:04,330 --> 00:08:06,350 +-test for the difference. + +95 +00:08:10,770 --> 00:08:15,710 +For example, suppose here we have the first item as we + +96 +00:08:15,710 --> 00:08:21,570 +mentioned, for example, is 160 it becomes 150. So, x1 + +97 +00:08:21,570 --> 00:08:30,960 +minus x2 becomes 10. The second And then 145, 130. + +98 +00:08:31,380 --> 00:08:37,940 +So, it's 15. Suppose the third one is 151. It + +99 +00:08:37,940 --> 00:08:43,860 +becomes 155. So, it's negative 4 and so on. So, for + +100 +00:08:43,860 --> 00:08:48,580 +example, the last one is 175. It becomes 160. So, + +101 +00:08:48,580 --> 00:08:54,520 +it's 15. Now, look at these differences. And, just + +102 +00:08:54,520 --> 00:08:59,000 +apply one semantic test. It means find the + +103 +00:08:59,000 --> 00:09:02,680 +difference, the semantic mean of the difference, + +104 +00:09:02,820 --> 00:09:07,720 +which is d bar. So, it's just sum of d divided by + +105 +00:09:07,720 --> 00:09:14,070 +n. So, d. So this is the I. It's one I minus two I. + +106 +00:09:14,250 --> 00:09:17,450 +I means one, two, three, four, up to ten, for + +107 +00:09:17,450 --> 00:09:21,150 +example. So, compute the mean of the differences. + +108 +00:09:21,930 --> 00:09:24,930 +Also, compute the standard deviation of the + +109 +00:09:24,930 --> 00:09:28,590 +difference of the same equation we had before. Sum + +110 +00:09:28,590 --> 00:09:33,030 +D minus D bar squared divided by N minus one. + +111 +00:09:35,790 --> 00:09:39,630 +Then, use the standard formula for the T test, + +112 +00:09:40,700 --> 00:09:44,900 +which is D bar in this case instead of X bar minus + +113 +00:09:44,900 --> 00:09:51,920 +Mu of D divided by standard deviation of D divided + +114 +00:09:51,920 --> 00:09:55,580 +by square root. And, this statistic has T + +115 +00:09:55,580 --> 00:09:59,400 +distribution with degrees of freedom equals N + +116 +00:09:59,400 --> 00:10:02,080 +minus 1, the same as we have discussed in chapter + +117 +00:10:02,080 --> 00:10:08,440 +9. So, the new concept here. We have two + +118 +00:10:08,440 --> 00:10:12,990 +observations. Before and after. The first step + +119 +00:10:12,990 --> 00:10:17,150 +here, we compute the difference between before + +120 +00:10:17,150 --> 00:10:20,970 +minus after or after minus before. In this case, + +121 +00:10:21,470 --> 00:10:26,650 +there is only one difference in the sign of the + +122 +00:10:26,650 --> 00:10:31,070 +average. Because if we have x1 minus x2 and the + +123 +00:10:31,070 --> 00:10:37,450 +answer is plus, then if we switch from x2 to x1, D + +124 +00:10:37,450 --> 00:10:41,070 +should be negative or opposite sign. But the + +125 +00:10:41,070 --> 00:10:44,330 +standard deviation remains the same sign because + +126 +00:10:44,330 --> 00:10:47,590 +always standard deviation is positive. Here, we + +127 +00:10:47,590 --> 00:10:50,850 +have squared for D minus D bar. So, it doesn't + +128 +00:10:50,850 --> 00:10:56,510 +matter if we have X1 minus X2 or X2 minus X1. So, + +129 +00:10:56,510 --> 00:11:01,110 +that's all for two related samples. So, again, we + +130 +00:11:01,110 --> 00:11:05,140 +have to compute the ith pair difference, Di, where + +131 +00:11:05,140 --> 00:11:09,820 +Di equals x1 minus x2. Now, the point estimate for + +132 +00:11:09,820 --> 00:11:12,180 +the paired difference, D bar is just sum of D + +133 +00:11:12,180 --> 00:11:15,400 +divided by N. So, that's the average of the + +134 +00:11:15,400 --> 00:11:19,260 +difference. The sample standard deviation is SD, + +135 +00:11:19,580 --> 00:11:23,280 +which is sum D minus D bar squared divided by N + +136 +00:11:23,280 --> 00:11:26,820 +minus 1. And N is the number of pairs in the + +137 +00:11:26,820 --> 00:11:31,680 +paired samples. For sure, here we have the same + +138 +00:11:31,680 --> 00:11:36,040 +sample sizes before and after. I mean, if we start + +139 +00:11:36,040 --> 00:11:38,740 +with 10 patients, we should end with the same + +140 +00:11:38,740 --> 00:11:41,920 +number because here we have repeated measures. + +141 +00:11:43,040 --> 00:11:46,880 +Suppose, for example, patient number 10, we have a + +142 +00:11:46,880 --> 00:11:53,540 +score of 175. Then, this one is missing. In this + +143 +00:11:53,540 --> 00:11:57,600 +case, the total observation should be ignored. I + +144 +00:11:57,600 --> 00:12:00,440 +mean if one is missing, you should ignore the + +145 +00:12:00,440 --> 00:12:04,960 +total observation. The whole information should be + +146 +00:12:04,960 --> 00:12:09,100 +ignored. So, in this case, if this is missing, so + +147 +00:12:09,100 --> 00:12:13,000 +the sample size becomes 9 instead of 10. Even if + +148 +00:12:13,000 --> 00:12:17,880 +you have a score for before, but you should have + +149 +00:12:17,880 --> 00:12:20,120 +before and after scores. + +150 +00:12:22,340 --> 00:12:26,930 +Again, the T statistic. D bar minus the mean of D + +151 +00:12:26,930 --> 00:12:30,970 +divided by S of the difference divided by square + +152 +00:12:30,970 --> 00:12:34,630 +root of N. And, this statistic actually has N minus + +153 +00:12:34,630 --> 00:12:38,650 +1 degrees of freedom. So, simply find the + +154 +00:12:38,650 --> 00:12:43,630 +differences, then apply one sample T test for this + +155 +00:12:43,630 --> 00:12:48,990 +difference in order to test about related samples. + +156 +00:12:51,270 --> 00:12:58,210 +Any question? So, it's quite similar to the 170 + +157 +00:12:58,210 --> 00:13:01,270 +test. In this case, the null and alternative + +158 +00:13:01,270 --> 00:13:05,670 +hypothesis could be one of these. Either two + +159 +00:13:05,670 --> 00:13:10,410 +-tailed test. In this case, we are testing. Mu D + +160 +00:13:10,410 --> 00:13:14,010 +equals 0 against mu D does not equal 0. This means + +161 +00:13:14,010 --> 00:13:18,270 +if we assume mu D equals 0 and mu D is not 0, that + +162 +00:13:18,270 --> 00:13:25,130 +means drug A and B. They are equal here. I mean, + +163 +00:13:25,270 --> 00:13:28,930 +there is no effect under the null hypothesis. And + +164 +00:13:28,930 --> 00:13:30,930 +for the alternative hypothesis, there is an + +165 +00:13:30,930 --> 00:13:35,890 +effect. But we don't know if this effect is + +166 +00:13:35,890 --> 00:13:39,550 +positive or negative. So, there is an effect of + +167 +00:13:39,550 --> 00:13:42,050 +using a drug B. And that effect. We don't know + +168 +00:13:42,050 --> 00:13:47,070 +actually, if it is improved or does not improve the + +169 +00:13:47,070 --> 00:13:52,070 +blood pressure. On the other cases, for the lower + +170 +00:13:52,070 --> 00:13:55,590 +and upper tail, you have the exact direction either + +171 +00:13:55,590 --> 00:14:00,270 +mu d is not I'm sorry mu d is smaller than zero + +172 +00:14:00,270 --> 00:14:06,710 +that means mu d is smaller than zero. That means mu + +173 +00:14:06,710 --> 00:14:11,350 +x1 is smaller than mu x2. In this case, that means + +174 +00:14:11,350 --> 00:14:14,730 +the mean of blood pressure before is smaller than + +175 +00:14:14,730 --> 00:14:22,950 +the other one, mu d is positive. This means mu x1 is + +176 +00:14:22,950 --> 00:14:27,850 +greater than mu x2. Now, suppose the question is + +177 +00:14:27,850 --> 00:14:31,210 +does + +178 +00:14:31,210 --> 00:14:38,170 +drug B improve blood pressure? Now improve in this + +179 +00:14:38,170 --> 00:14:43,990 +case means reduces because if the blood pressure + +180 +00:14:43,990 --> 00:14:48,610 +becomes smaller than before using a drug B, that + +181 +00:14:48,610 --> 00:14:52,790 +means the drug B has a positive effect. So, in this + +182 +00:14:52,790 --> 00:14:55,370 +case, we should have this one, mu D greater than + +183 +00:14:55,370 --> 00:15:00,010 +0. That if your difference is x1 minus x2. + +184 +00:15:02,850 --> 00:15:10,110 +Make sense? Because if D is x1 minus x2, and under + +185 +00:15:10,110 --> 00:15:12,250 +the alternative hypothesis, you have mu D greater + +186 +00:15:12,250 --> 00:15:17,490 +than 0. That means, on average, blood pressure + +187 +00:15:17,490 --> 00:15:23,610 +using drug A is significantly greater than blood + +188 +00:15:23,610 --> 00:15:27,250 +pressure using drug B. That means drug B is more + +189 +00:15:27,250 --> 00:15:33,950 +effective than using drug A. Otherwise, if you use + +190 +00:15:33,950 --> 00:15:40,050 +x2 minus x1, it's vice versa. It should be mu X2 + +191 +00:15:40,050 --> 00:15:44,330 +smaller than mu X1. And, finally + +223 +00:17:46,670 --> 00:17:50,270 +or minus T alpha over two times standard deviation + +224 +00:17:50,270 --> 00:17:53,330 +of the difference divided by square root of N. And + +225 +00:17:53,330 --> 00:17:55,550 +we know that the standard deviation is given by + +226 +00:17:55,550 --> 00:18:00,070 +this equation. So I think nothing is new in this + +227 +00:18:00,070 --> 00:18:03,230 +case except that we should compute the difference + +228 +00:18:03,230 --> 00:18:09,030 +between measurements before and after. That's all. + +229 +00:18:09,930 --> 00:18:13,210 +Let's look at this simple example. + +230 +00:18:16,100 --> 00:18:21,760 +Assume you send your salespeople to a customer + +231 +00:18:21,760 --> 00:18:25,740 +service training workshop. And the question is, + +232 +00:18:26,160 --> 00:18:29,060 +has the training made a difference in the number + +233 +00:18:29,060 --> 00:18:29,720 +of complaints? + +234 +00:18:32,300 --> 00:18:38,520 +And here we have a small sample size of five. And we + +235 +00:18:38,520 --> 00:18:42,980 +have the number of complaints before attending the + +236 +00:18:42,980 --> 00:18:46,950 +customer service training workshop, and after the + +237 +00:18:46,950 --> 00:18:50,390 +training course. And again, the question is, has + +238 +00:18:50,390 --> 00:18:53,950 +the training made a difference in the number of + +239 +00:18:53,950 --> 00:18:58,430 +complaints? For example, salesperson number one + +240 +00:18:58,430 --> 00:19:06,070 +has six complaints before attending the training + +241 +00:19:06,070 --> 00:19:11,650 +workshop. And after that, he has four. + +242 +00:19:15,760 --> 00:19:20,000 +20 complaints before it becomes 6, 3 becomes 2, 0, + +243 +00:19:20,100 --> 00:19:24,260 +0, no complaints, 4 becomes 0. Now the question + +244 +00:19:24,260 --> 00:19:27,800 +is, has the training made a difference? In this + +245 +00:19:27,800 --> 00:19:31,040 +case, you don't know the direction. If this + +246 +00:19:31,040 --> 00:19:35,440 +training workshop has a positive effect, it means it + +247 +00:19:35,440 --> 00:19:40,290 +reduces the number of complaints. So we should + +248 +00:19:40,290 --> 00:19:43,650 +have a two-tailed test. Since there is no direction, + +249 +00:19:44,150 --> 00:19:46,250 +positive or negative, improved or does not + +250 +00:19:46,250 --> 00:19:50,430 +improve, we should have a two-tailed test. So that's + +251 +00:19:50,430 --> 00:19:54,050 +step number one. You have to scale the appropriate + +252 +00:19:54,050 --> 00:19:57,990 +null and alternate hypothesis in order to + +253 +00:19:57,990 --> 00:20:03,720 +determine the critical regions for this test. So + +254 +00:20:03,720 --> 00:20:09,040 +this example is a two-tailed test. Because it asks + +255 +00:20:09,040 --> 00:20:11,420 +if there is a difference. And they don't know if + +256 +00:20:11,420 --> 00:20:15,040 +this difference is positive or negative. Now, + +257 +00:20:15,160 --> 00:20:19,100 +simple calculations will give the average of D and + +258 +00:20:19,100 --> 00:20:21,540 +the standard deviation of D. So the first step, you + +259 +00:20:21,540 --> 00:20:26,200 +have to compute the difference, Di. Again, it + +260 +00:20:26,200 --> 00:20:30,140 +doesn't matter actually if x2 minus x1 or x1 minus + +261 +00:20:30,140 --> 00:20:35,380 +x2. For this, in this case, we have x2 minus x1, 4 + +262 +00:20:35,380 --> 00:20:40,120 +minus 6, negative 2, 6 minus 20, negative 14, 2 + +263 +00:20:40,120 --> 00:20:43,860 +minus 3, negative 1, then 0, 0 minus 4 is negative + +264 +00:20:43,860 --> 00:20:49,380 +4. Now the average for this difference is negative + +265 +00:20:49,380 --> 00:20:55,320 +4. If the differences are 1 minus 2, I mean before + +266 +00:20:55,320 --> 00:20:59,380 +minus after, you will have the same value but + +267 +00:20:59,380 --> 00:21:03,760 +positive. The standard deviation will not change, + +268 +00:21:04,760 --> 00:21:07,480 +remain the same, because always the standard + +269 +00:21:07,480 --> 00:21:11,960 +deviation is positive. So SD equals 5.67. So + +270 +00:21:11,960 --> 00:21:19,880 +simple calculations give BR to be negative 4.2, + +271 +00:21:21,200 --> 00:21:29,960 +and SD to be 5.67. In real life, you should + +272 +00:21:29,960 --> 00:21:35,020 +have a large sample in order to test. Here, it's + +273 +00:21:35,020 --> 00:21:38,120 +just n equals five. This example is just for + +274 +00:21:38,120 --> 00:21:41,040 +illustration. How can we use the t-test? But in + +275 +00:21:41,040 --> 00:21:45,440 +reality, you should have a larger sample size than + +276 +00:21:45,440 --> 00:21:49,160 +this one. So, n equals five is not enough in order + +277 +00:21:49,160 --> 00:21:52,500 +to determine if the training workshop is effective + +278 +00:21:52,500 --> 00:21:55,360 +or not. Because if you look carefully at these + +279 +00:21:55,360 --> 00:21:59,050 +values, the first person has six complaints, it + +280 +00:21:59,050 --> 00:22:03,610 +becomes four. So the difference is two. Twenty to + +281 +00:22:03,610 --> 00:22:08,650 +six is a large difference, about fourteen. Now a small + +282 +00:22:08,650 --> 00:22:12,810 +difference is one, then zero, then four. So + +283 +00:22:12,810 --> 00:22:18,350 +sometimes you cannot determine if the training + +284 +00:22:18,350 --> 00:22:23,790 +workshop is effective based on the small sample + +285 +00:22:23,790 --> 00:22:24,250 +size. + +286 +00:22:26,990 --> 00:22:30,910 +Now, the question again is, has the training made + +287 +00:22:30,910 --> 00:22:33,990 +a difference in the number of complaints at 1% + +288 +00:22:33,990 --> 00:22:37,330 +level of significance? So in this case, we are + +289 +00:22:37,330 --> 00:22:43,510 +using alpha to be 1%. Most + +290 +00:22:43,510 --> 00:22:46,810 +of the time, we are using 5%. In this example, + +291 +00:22:47,110 --> 00:22:53,310 +alpha is 1%. The null hypothesis, MUD equals zero. + +292 +00:22:53,630 --> 00:22:57,180 +Again, MUD is not zero. So here we are talking + +293 +00:22:57,180 --> 00:23:04,220 +about a two-tiered test. So we should compute two + +294 +00:23:04,220 --> 00:23:08,000 +critical values, P alpha over 2, N minus 1, plus + +295 +00:23:08,000 --> 00:23:14,000 +or minus. So plus or minus. Alpha is 1%. So alpha + +296 +00:23:14,000 --> 00:23:20,940 +over 2 is 0.005. And + +297 +00:23:20,940 --> 00:23:26,930 +degrees of freedom is always N minus 1. And we + +298 +00:23:26,930 --> 00:23:32,950 +have 5 minus 1 is 4. Look at the data we have, you + +299 +00:23:32,950 --> 00:23:40,770 +will figure out that the critical value is 4.604. + +300 +00:23:41,290 --> 00:23:44,710 +So we reject the null hypothesis if the value of + +301 +00:23:44,710 --> 00:23:47,510 +the test statistic falls in the rejection regions + +302 +00:23:47,510 --> 00:23:47,890 +here. + +303 +00:23:50,780 --> 00:23:54,340 +or smaller than negative 4.6. So we should + +304 +00:23:54,340 --> 00:24:01,120 +calculate the test statistic. T-stat is just the + +305 +00:24:01,120 --> 00:24:03,880 +same equation, the same formula we had before. + +306 +00:24:07,420 --> 00:24:10,240 +D bar minus the mean of D divided by S over root + +307 +00:24:10,240 --> 00:24:20,070 +N. D bar is negative four minus MUD under H0 is 0. So + +308 +00:24:20,070 --> 00:24:22,050 +most of the time it's zero, but sometimes maybe, + +309 +00:24:22,230 --> 00:24:25,970 +for example, four or six, you should switch this + +310 +00:24:25,970 --> 00:24:31,990 +value here to block six instead of zero. But here, + +311 +00:24:32,550 --> 00:24:35,550 +we are talking about a difference of zero, so it + +312 +00:24:35,550 --> 00:24:43,890 +should be zero. Divide by S, which is five. Divide + +313 +00:24:43,890 --> 00:24:50,730 +by root N, and N is five. This gave negative 1.66. + +314 +00:24:52,790 --> 00:24:57,530 +Now the question is, is this value of negative 1 + +315 +00:24:57,530 --> 00:25:01,890 +.66 fall in the rejection regions in one of these + +316 +00:25:01,890 --> 00:25:07,390 +two? It's fall in the non-rejection regions, so we + +317 +00:25:07,390 --> 00:25:10,530 +don't reject the null hypothesis. So since this + +318 +00:25:10,530 --> 00:25:15,060 +statistic is not in the rejection region, then we + +319 +00:25:15,060 --> 00:25:20,060 +don't reject them. So now my conclusion is there + +320 +00:25:20,060 --> 00:25:22,780 +is not a significant change in the number of + +321 +00:25:22,780 --> 00:25:27,900 +complaints. Even there are some changes, but these + +322 +00:25:27,900 --> 00:25:32,180 +changes are not significant. I mean there is no + +323 +00:25:32,180 --> 00:25:35,800 +big difference between the number of complaints before + +324 +00:25:35,800 --> 00:25:40,360 +and after attending the training course or the + +325 +00:25:40,360 --> 00:25:41,680 +training workshop. + +326 +00:25:44,090 --> 00:25:49,850 +So that's for performing paired samples t-test. + +327 +00:25:51,250 --> 00:25:52,990 +Let's do another problem. + +328 +00:26:01,690 --> 00:26:04,490 +Let's look at one of the practice problems we + +329 +00:26:04,490 --> 00:26:04,830 +have. + +330 +00:26:23,360 --> 00:26:28,760 +to test the effectiveness of a business school + +331 +00:26:28,760 --> 00:26:33,520 +preparation course. To test the effectiveness of a + +332 +00:26:33,520 --> 00:26:36,980 +business school preparation course. Eight students + +333 +00:26:36,980 --> 00:26:40,240 +took a general business test before and after the + +334 +00:26:40,240 --> 00:26:45,000 +course. The results are given below. So we have + +335 +00:26:45,000 --> 00:26:51,560 +here eight students who took the general business test + +336 +00:26:51,560 --> 00:26:55,720 +before and after the course. So here we have + +337 +00:26:55,720 --> 00:27:02,920 +the business school preparation course, and we have + +338 +00:27:02,920 --> 00:27:06,740 +scores before taking this course, this preparation + +339 +00:27:06,740 --> 00:27:12,380 +course, and after. And the question is if + +340 +00:27:13,310 --> 00:27:17,370 +this course is effective or not. I mean, if the + +341 +00:27:17,370 --> 00:27:20,430 +course score before is smaller than after. I mean if the + +342 +00:27:20,430 --> 00:27:25,330 +course score after is above or is more than, or + +343 +00:27:25,330 --> 00:27:30,490 +greater than before. So we have this data for eight + +344 +00:27:30,490 --> 00:27:36,210 +different students. Now, the questions are: Number + +345 +00:27:36,210 --> 00:27:43,170 +of degrees of freedom. So since n equals 8, n + +346 +00:27:43,170 --> 00:27:51,190 +minus 1 is 7. So the first step, the first question, n + +347 +00:27:51,190 --> 00:27:52,710 +minus 1 is 7. + +348 +00:27:57,490 --> 00:28:02,010 +Next, the value of the sample mean difference is + +349 +00:28:02,010 --> 00:28:08,330 +if the difference scores reflect the results of + +350 +00:28:08,330 --> 00:28:13,090 +the exam after the course, minus the results of + +351 +00:28:13,090 --> 00:28:18,090 +the exam before. Let's see. I will use Excel in + +352 +00:28:18,090 --> 00:28:21,730 +order to find the means for before and after. + +353 +00:28:39,240 --> 00:28:47,480 +So these values are before, so x1 and after, so + +354 +00:28:47,480 --> 00:28:52,140 +the difference now, for example, it's x2 minus x1. So + +355 +00:28:52,140 --> 00:28:55,400 +x2 minus x1. + +356 +00:28:57,600 --> 00:29:01,260 +So we have these results. + +357 +00:29:09,130 --> 00:29:15,790 +Now, the average of this, the average of these values + +358 +00:29:15,790 --> 00:29:24,450 +from D1 all the way up to D9, so the average is 50, + +359 +00:29:24,450 --> 00:29:30,450 +so + +360 +00:29:30,450 --> 00:29:34,830 +that's the average, so + +361 +00:29:34,830 --> 00:29:39,520 +the value of the sample mean difference is 50. So + +362 +00:29:39,520 --> 00:29:42,740 +the answer is 50, if the difference score reflects + +363 +00:29:42,740 --> 00:29:50,040 +the result of the exam after minus 5. Next, the + +364 +00:29:50,040 --> 00:29:52,360 +value of the standard error of the difference + +365 +00:29:52,360 --> 00:29:58,420 +scores. Now, we should compute first SD. So here, + +366 +00:29:58,500 --> 00:30:05,830 +D bar is 50. First, we have to compute SD. He + +367 +00:30:05,830 --> 00:30:09,710 +asked about the standard error of this difference. + +368 +00:30:10,450 --> 00:30:17,150 +So SD over the square root of n. Now SD, the standard + +369 +00:30:17,150 --> 00:30:25,370 +deviation. Standard deviation of + +370 +00:30:25,370 --> 00:30:30,090 +the differences. Even some of these, some of the + +371 +00:30:30,090 --> 00:30:33,670 +differences are negative. But the standard + +372 +00:30:33,670 --> 00:30:38,430 +deviation should be positive. So I got the standard + +373 +00:30:38,430 --> 00:30:44,930 +deviation to be 65.02747. + +374 +00:30:46,310 --> 00:30:53,430 +I should divide this by the square root. The square root + +375 +00:30:53,430 --> 00:30:59,990 +of N. N is eight. I will get twenty-two point 22 + +376 +00:30:59,990 --> 00:31:03,530 +.99. So that's the standard error of the + +377 +00:31:03,530 --> 00:31:07,450 +differences. So the value of the standard of the + +378 +00:31:07,450 --> 00:31:12,550 +difference scores is 22.99. Now we have this + +379 +00:31:12,550 --> 00:31:17,350 +answer, 65. 65 again is the standard deviation. + +380 +00:31:18,830 --> 00:31:21,450 +Here, he asks about the standard error of the + +381 +00:31:21,450 --> 00:31:25,110 +difference. So this result should be divided by + +382 +00:31:25,110 --> 00:31:29,750 +the square root of n, which is eight. So I got the C + +383 +00:31:29,750 --> 00:31:31,750 +as the correct answer. + +384 +00:31:33,750 --> 00:31:39,130 +Next, what's the critical value for testing at the + +385 +00:31:39,130 --> 00:31:43,730 +5% level of significance, whether. Be careful, + +386 +00:31:44,290 --> 00:31:47,150 +whether the business school preparation course is + +387 +00:31:47,150 --> 00:31:53,030 +effective in improving exam scores. So again, here + +388 +00:31:53,030 --> 00:32:00,310 +we have D, X2 minus X1, after minus before. So it's + +389 +00:32:00,310 --> 00:32:08,130 +0, mu D equals 0 against H1, mu D. Do you think + +390 +00:32:08,130 --> 00:32:09,610 +positive or negative? Positive. + +391 +00:32:14,670 --> 00:32:18,170 +It's positive based on this definition, x2 minus + +392 +00:32:18,170 --> 00:32:23,430 +x1. That means, this means mu of x2 is greater + +393 +00:32:23,430 --> 00:32:27,310 +than mu of x1. That means the business school + +394 +00:32:27,310 --> 00:32:31,690 +preparation course is effective in improving exam + +395 +00:32:31,690 --> 00:32:36,390 +scores. So it means we have an upper-tailed test, the + +396 +00:32:36,390 --> 00:32:40,510 +alpha and n minus 1. And alpha for this specific + +397 +00:32:40,510 --> 00:32:45,890 +example is 5%, so we are looking for 5% and + +398 +00:32:45,890 --> 00:32:49,970 +degrees of freedom in this case is 7. Now just look + +399 +00:32:49,970 --> 00:32:57,650 +at the t-table, you will get this result, so the + +400 +00:32:57,650 --> 00:33:02,170 +critical region starts from this point all the way + +401 +00:33:02,170 --> 00:33:06,130 +up to infinity. So that's your critical value. You + +402 +00:33:06,130 --> 00:33:10,090 +may check this result by using the t + +403 +00:33:10,090 --> 00:33:18,450 +table. Now let's look at number 12. At 5% level of + +404 +00:33:18,450 --> 00:33:23,550 +significance, the decision for this hypothesis + +405 +00:33:23,550 --> 00:33:26,910 +would be. In order to answer this question, we + +406 +00:33:26,910 --> 00:33:33,930 +should compute either the P value or the test + +407 +00:33:33,930 --> 00:33:36,590 +statistic. We don't know the test statistic, so we + +408 +00:33:36,590 --> 00:33:40,750 +should calculate the T stat first. Because in + +409 +00:33:40,750 --> 00:33:43,810 +order to compute the p value, we have to compute + +410 +00:33:43,810 --> 00:33:47,310 +the statistic first. So this statistic is x bar + +411 +00:33:47,310 --> 00:33:51,330 +minus, I'm sorry, d bar minus the mean of D divided by + +412 +00:33:51,330 --> 00:33:58,130 +SD over root N. So that equals to: D bar is 50. + +413 +00:3 + +445 +00:36:36,640 --> 00:36:41,060 +using a critical value approach or p value + +446 +00:36:41,060 --> 00:36:45,160 +approach. Here we cannot use the confidence + +447 +00:36:45,160 --> 00:36:49,160 +interval approach because it's a one-tailed test. We + +448 +00:36:49,160 --> 00:36:52,760 +can use the two-sided confidence interval if we + +449 +00:36:52,760 --> 00:36:55,160 +have a two-tailed test. But in this case, there is + +450 +00:36:55,160 --> 00:36:58,480 +only one tail. So you have only two approaches, + +451 +00:36:58,640 --> 00:37:02,780 +either a critical value or the p value approach. + +452 +00:37:03,120 --> 00:37:05,800 +Let's see how can we find the p value of this + +453 +00:37:05,800 --> 00:37:09,520 +test. Now, the p value is given by. + +454 +00:37:19,840 --> 00:37:25,680 +value. Now we are looking for the probability + +455 +00:37:25,680 --> 00:37:32,160 +after 2.175. So if you look at the table you have + +456 +00:37:32,160 --> 00:37:39,180 +degrees of freedom 7. Look at this value. Most of + +457 +00:37:39,180 --> 00:37:43,100 +the time the t table does not give the exact p value. + +458 +00:37:43,760 --> 00:37:46,620 +So here you will see that your p value is between + +459 +00:37:46,620 --> 00:37:56,790 +0 to 5% and 0.5%. If you look at the table, let's see + +460 +00:37:56,790 --> 00:38:01,710 +the + +461 +00:38:01,710 --> 00:38:08,850 +statistical table here for the t-test now + +462 +00:38:08,850 --> 00:38:14,030 +look at seven degrees of freedom we are looking + +463 +00:38:14,030 --> 00:38:21,510 +for the value of 2.17, so 2.17 lies between these + +464 +00:38:21,510 --> 00:38:27,390 +two values, 1.895 to 1.365. So your p value lies + +465 +00:38:27,390 --> 00:38:34,850 +between 0.25 and 0.5%. So here my p value is + +466 +00:38:34,850 --> 00:38:42,910 +greater than 0.25% and smaller than 5%. As we + +467 +00:38:42,910 --> 00:38:47,510 +mentioned before, always we + +468 +00:38:47,510 --> 00:38:50,050 +reject H0 + +469 +00:38:51,230 --> 00:38:55,430 +if your p value is smaller than alpha, that's in + +470 +00:38:55,430 --> 00:38:58,730 +general. We reject the null hypothesis if your p + +471 +00:38:58,730 --> 00:39:02,010 +value is smaller than alpha. In this case alpha is + +472 +00:39:02,010 --> 00:39:06,630 +given to be 5%. Your p value is smaller + +473 +00:39:06,630 --> 00:39:08,810 +than 5% so we have to reject the null + +474 +00:39:08,810 --> 00:39:12,430 +hypothesis. So again we have the same decision: + +475 +00:39:12,430 --> 00:39:17,010 +reject the null hypothesis. Also, you may compute + +476 +00:39:17,010 --> 00:39:23,210 +the exact p value by using excel. You may use + +477 +00:39:23,210 --> 00:39:28,130 +the t distribution function. So here we have + +478 +00:39:28,130 --> 00:39:34,370 +statistics functions here. T distribution. So + +479 +00:39:34,370 --> 00:39:40,750 +t distribution. The value of the statistic, which + +480 +00:39:40,750 --> 00:39:42,790 +is 2.175 + +481 +00:39:47,490 --> 00:39:56,930 +So this value here is + +482 +00:39:56,930 --> 00:40:03,670 +2.17. Degrees of freedom is seven. Tails. We are + +483 +00:40:03,670 --> 00:40:09,810 +talking about one tail test. So it's one. So you + +484 +00:40:09,810 --> 00:40:12,810 +have this result. So your p value, the exact + +485 +00:40:12,810 --> 00:40:16,790 +answer is 0.033, which is the exact one. The approximate p + +486 +00:40:19,760 --> 00:40:24,560 +value is between 0.025 and 0.05. And for sure, this + +487 +00:40:24,560 --> 00:40:27,920 +value lies between 0.025 and 0.05. So since this p + +488 +00:40:27,920 --> 00:40:35,720 +value is smaller than alpha, then we reject the + +489 +00:40:35,720 --> 00:40:38,980 +null hypothesis. So go back to the practice + +490 +00:40:38,980 --> 00:40:42,040 +problems here. The exact answer is 0.031 using + +491 +00:40:42,040 --> 00:40:49,050 +Excel or between 0.025 and 0.05 by using a table with + +492 +00:40:49,050 --> 00:40:56,570 +seven degrees of freedom. + +493 +00:40:56,570 --> 00:40:59,890 +The last question for this table. In examining + +494 +00:41:02,450 --> 00:41:07,060 +differences, in examining the differences between + +495 +00:41:07,060 --> 00:41:09,900 +related samples, we are essentially sampling from + +496 +00:41:09,900 --> 00:41:14,020 +an underlying population of different scores, it's + +497 +00:41:14,020 --> 00:41:18,160 +correct. So that's a practice problem for using + +498 +00:41:18,160 --> 00:41:23,040 +paired sample t-tests. Any question? Because in + +499 +00:41:23,040 --> 00:41:33,820 +this question he asked about + +500 +00:41:33,820 --> 00:41:35,260 +this question he asked about + +501 +00:41:44,860 --> 00:41:50,120 +Under H0, since we are talking about what's the + +502 +00:41:50,120 --> 00:41:52,760 +critical value for testing at the 5% level of + +503 +00:41:52,760 --> 00:41:55,520 +significance, whether the business school + +504 +00:41:55,520 --> 00:42:00,040 +preparation course is effective. Effective means + +505 +00:42:00,040 --> 00:42:09,650 +the mean after. Here, D is X2 minus X1, after minus + +506 +00:42:09,650 --> 00:42:13,010 +before. So since we are talking about effective, + +507 +00:42:13,230 --> 00:42:19,730 +it means the mean after is greater than before. So + +508 +00:42:19,730 --> 00:42:23,750 +mean after minus mean before is positive. That + +509 +00:42:23,750 --> 00:42:28,610 +means mu D is positive. Now under H0, just use + +510 +00:42:28,610 --> 00:42:34,010 +either Mu equals zero just for simple writing the null + +511 +00:42:34,010 --> 00:42:37,190 +and alternate hypothesis. But to be more precise, + +512 +00:42:37,410 --> 00:42:42,610 +you should write Mu smaller than or equal to Mu. But actually, + +513 +00:42:42,750 --> 00:42:47,290 +when we compute the critical value, here we look + +514 +00:42:47,290 --> 00:42:52,990 +only at the alternate hypothesis. So your critical + +515 +00:42:52,990 --> 00:42:55,550 +value depends on the sign under the alternative + +516 +00:42:55,550 --> 00:42:59,530 +hypothesis. Your decision also based on the sign + +517 +00:42:59,530 --> 00:43:03,250 +of the alternative hypothesis. So always just look + +518 +00:43:03,250 --> 00:43:07,810 +at the sign under the alternative hypothesis and + +519 +00:43:07,810 --> 00:43:12,170 +ignore totally the sign under H0. Because here + +520 +00:43:12,170 --> 00:43:16,050 +it's an upper tailed test, so your critical value + +521 +00:43:16,050 --> 00:43:21,240 +should be to the right based on this sign. We + +522 +00:43:21,240 --> 00:43:26,100 +reject if the t statistic is greater than it. It comes from + +523 +00:43:26,100 --> 00:43:31,220 +this statement that is mu D is above 0. Is it + +524 +00:43:31,220 --> 00:43:36,820 +clear? The conclusion, + +525 +00:43:37,060 --> 00:43:40,600 +since we are rejecting the null hypothesis, that + +526 +00:43:40,600 --> 00:43:43,700 +means there is sufficient evidence to support the + +527 +00:43:43,700 --> 00:43:47,300 +alternative hypothesis. That's number one. That + +528 +00:43:47,300 --> 00:43:51,620 +means the score exam after taking the preparation + +529 +00:43:51,620 --> 00:43:55,940 +course is greater than the score exam before + +530 +00:43:55,940 --> 00:44:00,140 +taking the preparation course. That means the + +531 +00:44:00,140 --> 00:44:03,000 +business school preparation course does improve + +532 +00:44:03,000 --> 00:44:10,820 +exam score. That's the main conclusion for this + +533 +00:44:10,820 --> 00:44:17,640 +specific example. So far, we have discussed two + +534 +00:44:17,640 --> 00:44:22,450 +population means for independent samples and for + +535 +00:44:22,450 --> 00:44:28,630 +related samples. One is missing in order to + +536 +00:44:28,630 --> 00:44:31,630 +complete the story about hypothesis testing, which + +537 +00:44:31,630 --> 00:44:37,010 +is tests for two population proportions. That + +538 +00:44:37,010 --> 00:44:41,470 +means suppose you have two populations where + +539 +00:44:41,470 --> 00:44:46,270 +population proportions are pi1 and pi2, and our + +540 +00:44:46,270 --> 00:44:52,880 +goal is to test a hypothesis or form a confidence + +541 +00:44:52,880 --> 00:44:56,080 +interval for the difference between two population + +542 +00:44:56,080 --> 00:45:00,000 +proportions. So here we are interested in the + +543 +00:45:00,000 --> 00:45:02,880 +difference between pi1 and pi2. So we are + +544 +00:45:02,880 --> 00:45:06,660 +interested in pi1 minus pi2. Still we have the + +545 +00:45:06,660 --> 00:45:11,570 +same assumptions which are n times pi is at least + +546 +00:45:11,570 --> 00:45:15,990 +5, and n times 1 minus pi is also at least 5 for + +547 +00:45:15,990 --> 00:45:19,210 +the first population, and the same for the other + +548 +00:45:19,210 --> 00:45:21,610 +population. So these two conditions or two + +549 +00:45:21,610 --> 00:45:24,650 +assumptions should be satisfied in order to use + +550 +00:45:24,650 --> 00:45:31,350 +the z-statistic in this case. The point estimate, + +551 +00:45:31,510 --> 00:45:34,570 +if you remember, if we have only one population, P + +552 +00:45:34,570 --> 00:45:38,110 +was the first. It's a point estimate. + +553 +00:45:42,110 --> 00:45:46,970 +Now we are talking about an estimate of Pi 1 minus + +554 +00:45:46,970 --> 00:45:52,650 +Pi2. So that means P1 minus P2 is a point + +555 +00:45:52,650 --> 00:45:58,890 +estimate for the difference Pi1 minus Pi2. Also + +556 +00:45:58,890 --> 00:46:05,690 +P2 minus P1 is a point estimate of Pi2 minus Pi + +557 +00:46:05,690 --> 00:46:09,050 +1. So this is step number one. We have to find the + +558 +00:46:09,050 --> 00:46:13,340 +point estimate of the difference. So P1 minus P2 + +559 +00:46:13,340 --> 00:46:16,640 +is the point estimate for the difference pi 1 + +560 +00:46:16,640 --> 00:46:23,380 +minus pi2. Now, under the null hypothesis, always + +561 +00:46:23,380 --> 00:46:28,500 +we are assuming this difference is zero. So in the + +562 +00:46:28,500 --> 00:46:32,400 +null hypothesis, we assume the null is true. So + +563 +00:46:32,400 --> 00:46:36,680 +suppose the null is true, we assume pi1 equal pi + +564 +00:46:36,680 --> 00:46:40,810 +2. In this case, the pooled two sample + +565 +00:46:40,810 --> 00:46:44,670 +estimate p-bar is the old estimate for overall + +566 +00:46:44,670 --> 00:46:50,710 +proportion is called the overall + +567 +00:46:50,710 --> 00:46:55,170 +proportion. + +568 +00:46:58,230 --> 00:47:03,350 +Now p-bar equals. Now, if you remember the proportion + +569 +00:47:04,400 --> 00:47:08,480 +or the probability of success is X over N, the number + +570 +00:47:08,480 --> 00:47:11,300 +of successes divided by the sample size. That's if we + +571 +00:47:11,300 --> 00:47:13,800 +have only one sample. But if we have two + +572 +00:47:13,800 --> 00:47:17,740 +samples, then the proportion is the number of + +573 +00:47:17,740 --> 00:47:22,660 +successes in the two samples. So, suppose X1 and X2 + +574 +00:47:22,660 --> 00:47:25,320 +are the number of successes in the two samples + +575 +00:47:25,320 --> 00:47:29,100 +respectively, then the total number is given by X1 + +576 +00:47:29,100 --> 00:47:34,860 +plus X2 divided by the sample sizes, N1 plus N2. + +577 +00:47:35,340 --> 00:47:40,880 +So p-bar, the overall proportion is given by X1 + +578 +00:47:40,880 --> 00:47:47,530 +plus X2 divided by N1 plus N2. This one is called + +579 +00:47:47,530 --> 00:47:52,130 +the pooled estimate for the overall proportion. + +580 +00:47:56,090 --> 00:47:59,610 +Now, what's about the z-statistic? In order to + +581 +00:47:59,610 --> 00:48:02,070 +compute the z-statistic, we have to know the + +582 +00:48:02,070 --> 00:48:04,670 +standard error of the estimate. + +583 +00:48:07,990 --> 00:48:10,090 +Now, the standard error of the estimate is given + +584 +00:48:10,090 --> 00:48:10,990 +by this equation, + +585 +00:48:13,990 --> 00:48:19,570 +p-bar times 1-p-bar times 1 over n1 plus 1 over n2. So this is called + +586 +00:48:19,570 --> 00:48:24,050 +the standard error of the difference. So if the + +587 +00:48:24,050 --> 00:48:31,930 +question is, what's the value of the standard error + +588 +00:48:31,930 --> 00:48:37,010 +of the difference, you should compute + +589 +00:48:48,180 --> 00:48:55,700 +In this case, your z statistic is always, if you + +590 +00:48:55,700 --> 00:49:00,760 +remember in chapter 6, the z score was x minus the + +591 +00:49:00,760 --> 00:49:05,680 +mean of x divided by sigma. In chapter 7, when we + +592 +00:49:05,680 --> 00:49:09,200 +talked about the sampling distribution, we had x + +593 +00:49:09,200 --> 00:49:14,160 +bar. So we had x bar minus the mean of x bar + +594 +00:49:14,160 --> 00:49:18,380 +divided by sigma of x bar, and that was x bar + +595 +00:49:18,380 --> 00:49:20,520 +minus the mean divided by sigma over the square root. + +596 +00:49:23,500 --> 00:49:27,040 +At the beginning of chapter 10, we talked about + +597 +00:49:27,040 --> 00:49:31,100 +the difference between these two, x1 bar minus x2 + +598 +00:49:31,100 --> 00:49:33,440 +bar, so minus the mean. + +599 +00:49:37,020 --> 00:49:40,300 +This term actually is the standard error of the + +600 +00:49:40,300 --> 00:49:44,120 +estimate. And the standard error was S squared p + +601 +00:49:44,120 --> 00:49:51,960 +multiplied by 1 over N1 plus 1 over N2. Now, when + +602 +00:49:51,960 --> 00:49:54,400 +we are talking about testing for the difference + +603 +00:49:54,400 --> 00:49:59,210 +between two proportions, this statistic is. The + +604 +00:49:59,210 --> 00:50:04,970 +estimate of the difference, which is p1 minus p2, + +605 +00:50:05,090 --> 00:50:10,250 +as we mentioned, minus the hypothesized value, + +606 +00:50:10,370 --> 00:50:13,870 +which is pi1 minus pi2, most of the time equals + +607 +00:50:13,870 --> 00:50:17,590 +0, divided by the standard error of the estimate + +608 +00:50:17,590 --> 00:50:21,650 +of this equation. So the standard error is p-bar + +609 +00:50:21,650 --> 00:50:24,330 +1 minus p-bar. + +610 +00:50:28,470 --> 00:50:32,250 +So let's use this statistic. P1 minus P2 is the + +611 +00:50:32,250 --> 00:50:35,930 +point estimate of Pi 1 minus Pi 2 minus the + +612 +00:50:35,930 --> 00:50:40,130 +hypothesized value always, or most of the time + +613 +00:50:40,130 --> 00:50:43,830 +this equals zero, divided by the square root of + +614 +00:50:43,830 --> 00:50:46,870 +this estimate. And the square root of this + +615 +00:50:46,870 --> 00:50:49,090 +estimate is given by this equation, which is the + +616 +00:50:49,090 --> 00:50:54,040 +square root of p-bar. P-bar again is the overall + +617 +00:50:54,040 --> 00:50:56,560 +proportion, or the pooled proportion, which is X1 + +618 +00:50:56,560 --> 00:50:59,940 +plus X2 divided by N1 plus N2. So we have p-bar + +619 +00:50:59,940 --> 00:51:04,920 +times 1 minus p-bar times 1 over N1 plus 1 over + +620 +00:51:04,920 --> 00:51:07,380 +N2. That's your Z statistic. + +621 +00:51:10,300 --> 00:51:15,320 +So that's the new term for testing about the + +622 +00:51:15,320 --> 00:51:17,900 +difference between two population proportions. + +623 +00:51:18,800 --> 00:51:23,720 +Now, what's about the null and alternative hypothesis? + +624 +00:51:24,380 --> 00:51:27,180 +This slide actually is the same as the one we had + +625 +00:51:27,180 --> 00:51:28,960 +discussed for the difference between two + +626 +00:51:28,960 --> 00:51:35,040 +populations, except we replace mus by bis. So here + +627 +00:51:35,040 --> 00:51:38,940 +we have pi1 equals pi2 by 2, instead of mu1 equals + +628 +00:51:38,940 --> 00:51:44,140 +mu2. So if we go back a little bit to the + +629 +00:51:44,140 --> 00:51:45,080 +beginning of this chapter, + +630 +00:51:47,800 --> 00:51:52,180 +The same as this slide. Here we have means. We + +631 +00:51:52,180 --> 00:51:56,560 +just replace the means by pi's and we'll get + +632 +00:51:56,560 --> 00:52:03,380 +similar upper, lower, and two-tailed tests. So we + +633 +00:52:03,380 --> 00:52:08,540 +have pi1 equal pi2 against one of these. Either + +634 +00:52:08,540 --> 00:52:11,340 +pi1 does not equal pi2, that's the two-tailed test, + +635 +00:52:11,780 --> + +Please provide the lecture transcript you would like me to review. I will then identify and correct any typos or grammatical errors while preserving the original Arabic and English text. diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/sLp0uBxxi1M_postprocess.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/sLp0uBxxi1M_postprocess.srt new file mode 100644 index 0000000000000000000000000000000000000000..810e2b0f6fdedfa857b32b38b870c610b735bd91 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/sLp0uBxxi1M_postprocess.srt @@ -0,0 +1,2664 @@ +1 +00:00:08,940 --> 00:00:15,340 +Last time we talked about two sample tests and we + +2 +00:00:15,340 --> 00:00:23,240 +covered one case when the population means we are + +3 +00:00:23,240 --> 00:00:26,640 +selecting random samples and these samples are + +4 +00:00:26,640 --> 00:00:28,940 +independent. So that's case number one, + +5 +00:00:29,600 --> 00:00:33,740 +independent samples. And we assume sigma 1 and + +6 +00:00:33,740 --> 00:00:39,880 +sigma 2 are unknown and equal. And we have I + +7 +00:00:39,880 --> 00:00:45,000 +discussed this test. The test statistic was the + +8 +00:00:45,000 --> 00:00:49,600 +difference between the two sample means minus mu1 + +9 +00:00:49,600 --> 00:00:54,460 +minus mu2 under H0. Most of the time, this 1 + +10 +00:00:54,460 --> 00:00:57,920 +equals 0 divided by the standard error of the + +11 +00:00:57,920 --> 00:01:02,320 +estimate, which is S squared B multiplied by 1 + +12 +00:01:02,320 --> 00:01:06,160 +over N, 1 plus 1 over N. S squared B is the bold + +13 +00:01:06,160 --> 00:01:07,360 +variance. + +14 +00:01:12,750 --> 00:01:17,250 +The other case, when sigma 1 and sigma 2 are + +15 +00:01:17,250 --> 00:01:24,150 +unknown, but they are not equal. In this case, the + +16 +00:01:24,150 --> 00:01:29,190 +assumptions remain the same except the last one. + +17 +00:01:29,890 --> 00:01:32,710 +Now, the last one becomes population variances are + +18 +00:01:32,710 --> 00:01:37,310 +unknown, the same as before, but we assume they + +19 +00:01:37,310 --> 00:01:40,590 +are not equal. So it cannot be assumed to be + +20 +00:01:40,590 --> 00:01:46,690 +equal. So in this case, Sigma 1 and + +21 +00:01:46,690 --> 00:01:55,570 +Sigma 2 unknown, and they are not equal. So we + +22 +00:01:55,570 --> 00:02:00,010 +assume both Sigmas unknown, but they are not + +23 +00:02:00,010 --> 00:02:04,850 +equal. In this case, we can use one of the + +24 +00:02:04,850 --> 00:02:12,050 +software packages as Excel Minitab SPSS. So I will + +25 +00:02:12,050 --> 00:02:14,930 +leave this part for the SPSS course, how can we + +26 +00:02:14,930 --> 00:02:18,430 +determine the test statistic if sigma is unknown + +27 +00:02:18,430 --> 00:02:24,270 +and we cannot assume they are equal. So that's the + +28 +00:02:24,270 --> 00:02:28,270 +case number one, when the samples are independent. + +29 +00:02:29,630 --> 00:02:32,810 +The second, suppose the populations of interest + +30 +00:02:32,810 --> 00:02:37,190 +are related. The most common example is before and + +31 +00:02:37,190 --> 00:02:37,550 +after. + +32 +00:02:41,040 --> 00:02:49,600 +for example suppose we have patients who + +33 +00:02:49,600 --> 00:02:53,820 +are suffering from blood pressure and for example + +34 +00:02:53,820 --> 00:03:01,060 +suppose they use a drug A and + +35 +00:03:01,060 --> 00:03:03,700 +we have their blood pressures for example in + +36 +00:03:03,700 --> 00:03:08,590 +January for example number one Person number one, + +37 +00:03:08,750 --> 00:03:15,050 +his or her blood pressure was in January 145. The + +38 +00:03:15,050 --> 00:03:18,730 +second person, his blood pressure, for example, is + +39 +00:03:18,730 --> 00:03:23,190 +160. The third one, for example, is 155, and so + +40 +00:03:23,190 --> 00:03:26,730 +on. And suppose we have 10 patients. And the last + +41 +00:03:26,730 --> 00:03:31,270 +one, for example, his or her blood pressure is + +42 +00:03:31,270 --> 00:03:38,000 +135. Now these measures. in January. Suppose these + +43 +00:03:38,000 --> 00:03:44,260 +patients now have new treatment, suppose they are + +44 +00:03:44,260 --> 00:03:51,800 +using now a drug B, new drug, suppose for three + +45 +00:03:51,800 --> 00:03:59,000 +months. Then we have new results, I mean new + +46 +00:03:59,000 --> 00:04:01,840 +measurements for their blood pressure, for + +47 +00:04:01,840 --> 00:04:02,920 +example, in April. + +48 +00:04:05,850 --> 00:04:10,810 +Now, the first person with a blood pressure 145 + +49 +00:04:10,810 --> 00:04:15,770 +using drug A, it becomes, for example, 130 by + +50 +00:04:15,770 --> 00:04:21,730 +using drug B. The other one with 160, for example, + +51 +00:04:21,830 --> 00:04:27,410 +it becomes 150. 155 becomes 135. 135 becomes 120, + +52 +00:04:27,910 --> 00:04:35,360 +and so on. Now, this example is called before and + +53 +00:04:35,360 --> 00:04:39,740 +after. It means before using drug B and after + +54 +00:04:39,740 --> 00:04:44,400 +using drug B. And the question of interest is the + +55 +00:04:44,400 --> 00:04:48,860 +new drug effective or not. Effective means in this + +56 +00:04:48,860 --> 00:04:54,940 +case it reduces the high blood pressure. + +57 +00:04:57,980 --> 00:05:02,400 +This type of experiment is called related. + +58 +00:05:03,880 --> 00:05:08,840 +Sometimes it's called related. + +59 +00:05:09,920 --> 00:05:14,960 +Sometimes it's called pair. This name comes from + +60 +00:05:14,960 --> 00:05:18,860 +here we have two measurements for the same person. + +61 +00:05:19,880 --> 00:05:25,380 +So it looks like here, 145, 130. So this is pair + +62 +00:05:25,380 --> 00:05:29,380 +number one. The other pair or the second pair is + +63 +00:05:29,380 --> 00:05:34,830 +160, 150, and so on. So it's called paired, + +64 +00:05:35,290 --> 00:05:42,650 +sometimes called dependent, or matched samples. + +65 +00:05:45,110 --> 00:05:49,210 +So related, paired, dependent, or matched pairs, + +66 +00:05:49,310 --> 00:05:56,130 +the same meaning. So here we are testing about two + +67 +00:05:56,130 --> 00:05:59,870 +related populations. Again, it's called paired or + +68 +00:05:59,870 --> 00:06:01,810 +matched samples. They're repeated measures. + +69 +00:06:02,770 --> 00:06:05,830 +Repeated means before and after for the same + +70 +00:06:05,830 --> 00:06:09,550 +individual, for the same item, for the same + +71 +00:06:09,550 --> 00:06:13,670 +person. Now, for example, suppose measurements for + +72 +00:06:13,670 --> 00:06:18,990 +January is denoted by x1, and the other one is x2. + +73 +00:06:21,830 --> 00:06:24,990 +Now, the first thing we have to do is to find or + +74 +00:06:24,990 --> 00:06:27,250 +to compute the difference between paired values. + +75 +00:06:27,990 --> 00:06:33,490 +So Di is the difference between measurement in + +76 +00:06:33,490 --> 00:06:38,050 +January minus measurements in April. So we have + +77 +00:06:38,050 --> 00:06:43,050 +the difference between the two values. So here we + +78 +00:06:43,050 --> 00:06:50,810 +have Di. Di is just x1 minus x2 or x2 minus x1. + +79 +00:06:52,530 --> 00:06:57,930 +For example, it's x1 minus x2. Now the assumptions + +80 +00:06:57,930 --> 00:07:01,870 +are the same. We assume both populations are + +81 +00:07:01,870 --> 00:07:05,210 +normally distributed. Otherwise, if the + +82 +00:07:05,210 --> 00:07:08,710 +populations are not normal, we should have large + +83 +00:07:08,710 --> 00:07:12,370 +samples. It means the sample size should be at + +84 +00:07:12,370 --> 00:07:16,270 +least 30 in order to apply the central limit + +85 +00:07:16,270 --> 00:07:19,530 +theorem if the underlying population is not + +86 +00:07:19,530 --> 00:07:23,530 +normally distributed. So again, we have + +87 +00:07:23,530 --> 00:07:29,980 +measurements x. before, and measurements x2 after. + +88 +00:07:32,400 --> 00:07:38,200 +So we have here items, 1, 2, for example, up to + +89 +00:07:38,200 --> 00:07:44,460 +10. We have x1, x2, before, after. We compute the + +90 +00:07:44,460 --> 00:07:47,040 +difference, which is x1 minus x2, for example. + +91 +00:07:48,400 --> 00:07:52,280 +Now, for this data, I mean for the difference + +92 +00:07:52,280 --> 00:07:54,760 +data, just apply + +93 +00:07:57,290 --> 00:08:04,330 +one sample t-test. So simply apply one sample t + +94 +00:08:04,330 --> 00:08:06,350 +-test for the difference. + +95 +00:08:10,770 --> 00:08:15,710 +For example suppose here we have first item as we + +96 +00:08:15,710 --> 00:08:21,570 +mentioned for example is 160 it becomes 150. So x1 + +97 +00:08:21,570 --> 00:08:30,960 +minus x2 becomes 10. The second And then 145, 130. + +98 +00:08:31,380 --> 00:08:37,940 +So it's 15. Suppose the third one is 151. It + +99 +00:08:37,940 --> 00:08:43,860 +becomes 155. So it's negative 4 and so on. So for + +100 +00:08:43,860 --> 00:08:48,580 +example, the last one is 175. It becomes 160. So + +101 +00:08:48,580 --> 00:08:54,520 +it's 15. Now look at these differences. And just + +102 +00:08:54,520 --> 00:08:59,000 +apply one semantic test. It means find the + +103 +00:08:59,000 --> 00:09:02,680 +difference, the semantic mean of the difference, + +104 +00:09:02,820 --> 00:09:07,720 +which is d bar. So it's just sum of d divided by + +105 +00:09:07,720 --> 00:09:14,070 +n. So d. So this is the I. It's one I minus two I. + +106 +00:09:14,250 --> 00:09:17,450 +I means one, two, three, four, up to ten, for + +107 +00:09:17,450 --> 00:09:21,150 +example. So compute the mean of the differences. + +108 +00:09:21,930 --> 00:09:24,930 +Also compute the standard deviation of the + +109 +00:09:24,930 --> 00:09:28,590 +difference of the same equation we had before. Sum + +110 +00:09:28,590 --> 00:09:33,030 +D minus D bar squared divided by N minus one. + +111 +00:09:35,790 --> 00:09:39,630 +Then use the standard formula for the T test. + +112 +00:09:40,700 --> 00:09:44,900 +which is D bar in this case instead of X bar minus + +113 +00:09:44,900 --> 00:09:51,920 +Mu of D divided by standard deviation of D divided + +114 +00:09:51,920 --> 00:09:55,580 +by square root. And this statistic has T + +115 +00:09:55,580 --> 00:09:59,400 +distribution with degrees of freedom equals N + +116 +00:09:59,400 --> 00:10:02,080 +minus 1, the same as we have discussed in chapter + +117 +00:10:02,080 --> 00:10:08,440 +9. So the new concept here, we have two + +118 +00:10:08,440 --> 00:10:12,990 +observations. before and after. The first step + +119 +00:10:12,990 --> 00:10:17,150 +here, we compute the difference between before + +120 +00:10:17,150 --> 00:10:20,970 +minus after or after minus before. In this case, + +121 +00:10:21,470 --> 00:10:26,650 +there is only one difference in the sign of the + +122 +00:10:26,650 --> 00:10:31,070 +average. Because if we have x1 minus x2 and the + +123 +00:10:31,070 --> 00:10:37,450 +answer is plus, then if we switch from x2 to x1, D + +124 +00:10:37,450 --> 00:10:41,070 +should be negative or opposite sign. But the + +125 +00:10:41,070 --> 00:10:44,330 +standard deviation remains the same sign because + +126 +00:10:44,330 --> 00:10:47,590 +always standard deviation is positive. Here we + +127 +00:10:47,590 --> 00:10:50,850 +have squared for D minus D bar. So it doesn't + +128 +00:10:50,850 --> 00:10:56,510 +matter if we have X1 minus X2 or X2 minus X1. So + +129 +00:10:56,510 --> 00:11:01,110 +that's all for two related samples. So again, we + +130 +00:11:01,110 --> 00:11:05,140 +have to compute the ith pair difference, Di. where + +131 +00:11:05,140 --> 00:11:09,820 +Di equals x1 minus x2. Now the point estimate for + +132 +00:11:09,820 --> 00:11:12,180 +the paired difference D bar is just sum of D + +133 +00:11:12,180 --> 00:11:15,400 +divided by N. So that's the average of the + +134 +00:11:15,400 --> 00:11:19,260 +difference. The sample standard deviation is SD, + +135 +00:11:19,580 --> 00:11:23,280 +which is sum D minus D bar squared divided by N + +136 +00:11:23,280 --> 00:11:26,820 +minus 1. And N is the number of pairs in the + +137 +00:11:26,820 --> 00:11:31,680 +paired samples. For sure, here we have the same + +138 +00:11:31,680 --> 00:11:36,040 +sample sizes before and after. I mean, if we start + +139 +00:11:36,040 --> 00:11:38,740 +with 10 patients, we should end with the same + +140 +00:11:38,740 --> 00:11:41,920 +number, because here we have repeated measures. + +141 +00:11:43,040 --> 00:11:46,880 +Suppose, for example, patient number 10, we have a + +142 +00:11:46,880 --> 00:11:53,540 +score of 175, then this one is missing. In this + +143 +00:11:53,540 --> 00:11:57,600 +case, the total observation should be ignored. I + +144 +00:11:57,600 --> 00:12:00,440 +mean, if one is missing, you should ignore the + +145 +00:12:00,440 --> 00:12:04,960 +total observation. The whole information should be + +146 +00:12:04,960 --> 00:12:09,100 +ignored. So in this case, if this is missing, so + +147 +00:12:09,100 --> 00:12:13,000 +the sample size becomes 9 instead of 10. Even if + +148 +00:12:13,000 --> 00:12:17,880 +you have a score for before, but you should have + +149 +00:12:17,880 --> 00:12:20,120 +before and after scores. + +150 +00:12:22,340 --> 00:12:26,930 +Again, the T statistic. D bar minus the mean of D + +151 +00:12:26,930 --> 00:12:30,970 +divided by S of the difference divided by square + +152 +00:12:30,970 --> 00:12:34,630 +root of N. And this statistic actually has N minus + +153 +00:12:34,630 --> 00:12:38,650 +1 degrees of freedom. So simply find the + +154 +00:12:38,650 --> 00:12:43,630 +differences, then apply one sample T test for this + +155 +00:12:43,630 --> 00:12:48,990 +difference in order to test about related samples. + +156 +00:12:51,270 --> 00:12:58,210 +Any question? So it's quite similar to the 170 + +157 +00:12:58,210 --> 00:13:01,270 +test. In this case, the null and alternative + +158 +00:13:01,270 --> 00:13:05,670 +hypothesis could be one of these. Either two + +159 +00:13:05,670 --> 00:13:10,410 +-tailed test. In this case, we are testing. Mu D + +160 +00:13:10,410 --> 00:13:14,010 +equals 0 against mu D does not equal 0. This means + +161 +00:13:14,010 --> 00:13:18,270 +if we assume mu D equals 0 and mu D is not 0, that + +162 +00:13:18,270 --> 00:13:25,130 +means drug A and B They are equal here. I mean, + +163 +00:13:25,270 --> 00:13:28,930 +there is no effect under the null hypothesis. And + +164 +00:13:28,930 --> 00:13:30,930 +for the alternative hypothesis, there is an + +165 +00:13:30,930 --> 00:13:35,890 +effect. But we don't know that if this effect is + +166 +00:13:35,890 --> 00:13:39,550 +positive or negative. So there is an effect of + +167 +00:13:39,550 --> 00:13:42,050 +using a drug B. And that effect, we don't know + +168 +00:13:42,050 --> 00:13:47,070 +actually if it is improved or does not improve the + +169 +00:13:47,070 --> 00:13:52,070 +blood pressure. On the other cases, for the lower + +170 +00:13:52,070 --> 00:13:55,590 +and upper tail you have the exact direction either + +171 +00:13:55,590 --> 00:14:00,270 +mu d is not i'm sorry mu d is smaller than zero + +172 +00:14:00,270 --> 00:14:06,710 +that means mu d is smaller than zero that means mu + +173 +00:14:06,710 --> 00:14:11,350 +x1 is smaller than mu x2 in this case that means + +174 +00:14:11,350 --> 00:14:14,730 +the mean of blood pressure before is smaller than + +175 +00:14:14,730 --> 00:14:22,950 +the other one mu d is positive this means Mu x1 is + +176 +00:14:22,950 --> 00:14:27,850 +greater than Mu x2. Now suppose the question is + +177 +00:14:27,850 --> 00:14:31,210 +does + +178 +00:14:31,210 --> 00:14:38,170 +drug B improve blood pressure? Now improve in this + +179 +00:14:38,170 --> 00:14:43,990 +case means reduces because if the blood pressure + +180 +00:14:43,990 --> 00:14:48,610 +becomes smaller than Before using a drug B, that + +181 +00:14:48,610 --> 00:14:52,790 +means the drug B has positive effect. So in this + +182 +00:14:52,790 --> 00:14:55,370 +case, we should have this one, mu D greater than + +183 +00:14:55,370 --> 00:15:00,010 +0. That if your difference is x1 minus x2. + +184 +00:15:02,850 --> 00:15:10,110 +Make sense? Because if D is x1 minus x2, and under + +185 +00:15:10,110 --> 00:15:12,250 +the alternative hypothesis, you have mu D greater + +186 +00:15:12,250 --> 00:15:17,490 +than 0. That means, on average, Blood pressure + +187 +00:15:17,490 --> 00:15:23,610 +using drug A is significantly greater than blood + +188 +00:15:23,610 --> 00:15:27,250 +pressure using drug B. That means drug B is more + +189 +00:15:27,250 --> 00:15:33,950 +effective than using drug A. Otherwise, if you use + +190 +00:15:33,950 --> 00:15:40,050 +X2 minus X1, it's vice versa. It should be mu X2 + +191 +00:15:40,050 --> 00:15:44,330 +smaller than mu X1. And finally, you will end with + +192 +00:15:44,330 --> 00:15:47,890 +the same conclusion. So actually, it doesn't + +193 +00:15:47,890 --> 00:15:53,930 +matter if you state D as x1 minus x2 or x2 minus + +194 +00:15:53,930 --> 00:15:57,370 +x1. You have to be careful when you state the null + +195 +00:15:57,370 --> 00:16:02,650 +and alternative hypothesis. That's for state the + +196 +00:16:02,650 --> 00:16:05,690 +appropriate null and alternative hypothesis. And + +197 +00:16:05,690 --> 00:16:09,410 +actually, it depends on the problem itself. Now, + +198 +00:16:09,470 --> 00:16:11,450 +for the rejection regions, it's the same as we + +199 +00:16:11,450 --> 00:16:14,490 +discussed before. If we are talking about two + +200 +00:16:14,490 --> 00:16:19,050 +-tailed test, Always we reject if the test + +201 +00:16:19,050 --> 00:16:22,690 +statistic, this value, falls in the rejection + +202 +00:16:22,690 --> 00:16:25,550 +regions. Here there are two rejection regions, one + +203 +00:16:25,550 --> 00:16:31,510 +to the right of a upper critical value, which is T + +204 +00:16:31,510 --> 00:16:34,390 +alpha over 2. So we reject if T statistic is + +205 +00:16:34,390 --> 00:16:37,670 +greater than T alpha over 2, or if T statistic is + +206 +00:16:37,670 --> 00:16:41,270 +smaller than negative T alpha over 2. That's for + +207 +00:16:41,270 --> 00:16:44,510 +two-tiered test. But if we are talking about one + +208 +00:16:44,510 --> 00:16:49,380 +-tiered test, And for the lower tail, in this + +209 +00:16:49,380 --> 00:16:52,900 +case, the rejection region in the lower side, so + +210 +00:16:52,900 --> 00:16:55,280 +we reject if the T statistic is smaller than + +211 +00:16:55,280 --> 00:17:00,740 +negative T alpha. For upper tail T test, if the + +212 +00:17:00,740 --> 00:17:04,640 +alternative is mu D is greater than zero, then we + +213 +00:17:04,640 --> 00:17:07,620 +reject zero if T statistic is greater than T + +214 +00:17:07,620 --> 00:17:09,920 +alpha. So it's quite similar to the one we + +215 +00:17:09,920 --> 00:17:14,240 +discussed for the means, for the one sample mean. + +216 +00:17:15,850 --> 00:17:19,390 +The confidence interval for the difference, for + +217 +00:17:19,390 --> 00:17:22,070 +the mean difference, is the same as the one we had + +218 +00:17:22,070 --> 00:17:26,430 +discussed before. The previous, the old one was X + +219 +00:17:26,430 --> 00:17:32,250 +bar plus the point estimate, which was X bar plus + +220 +00:17:32,250 --> 00:17:38,070 +or minus T over 2 S over root N. Now, for the + +221 +00:17:38,070 --> 00:17:41,130 +differences, we have the average of the + +222 +00:17:41,130 --> 00:17:46,670 +difference, which is D bar, instead of X bar plus + +223 +00:17:46,670 --> 00:17:50,270 +or minus T alpha over two times standard deviation + +224 +00:17:50,270 --> 00:17:53,330 +of the difference divided by square root of N. And + +225 +00:17:53,330 --> 00:17:55,550 +we know that the standard deviation is given by + +226 +00:17:55,550 --> 00:18:00,070 +this equation. So I think nothing is new in this + +227 +00:18:00,070 --> 00:18:03,230 +case except that we should compute the difference + +228 +00:18:03,230 --> 00:18:09,030 +between measurements before and after. That's all. + +229 +00:18:09,930 --> 00:18:13,210 +Let's look at this simple example. + +230 +00:18:16,100 --> 00:18:21,760 +Assume you send your salespeople to a customer + +231 +00:18:21,760 --> 00:18:25,740 +service training workshop. And the question is, + +232 +00:18:26,160 --> 00:18:29,060 +has the training made a difference in the number + +233 +00:18:29,060 --> 00:18:29,720 +of complaints? + +234 +00:18:32,300 --> 00:18:38,520 +And here we have small sample size of five. And we + +235 +00:18:38,520 --> 00:18:42,980 +have number of complaints before attending the + +236 +00:18:42,980 --> 00:18:46,950 +customer service training workshop. and after the + +237 +00:18:46,950 --> 00:18:50,390 +training course. And again, the question is, has + +238 +00:18:50,390 --> 00:18:53,950 +the training made a difference in the number of + +239 +00:18:53,950 --> 00:18:58,430 +complaints? For example, salesperson number one + +240 +00:18:58,430 --> 00:19:06,070 +has six complaints before attending the training + +241 +00:19:06,070 --> 00:19:11,650 +workshop. And after that, he has four. + +242 +00:19:15,760 --> 00:19:20,000 +20 complaints before it becomes 6, 3 becomes 2, 0, + +243 +00:19:20,100 --> 00:19:24,260 +0, no complaints, 4 becomes 0. Now the question + +244 +00:19:24,260 --> 00:19:27,800 +is, has the training made a difference? In this + +245 +00:19:27,800 --> 00:19:31,040 +case, you don't know the direction. If this + +246 +00:19:31,040 --> 00:19:35,440 +training workshop has positive effect, it means it + +247 +00:19:35,440 --> 00:19:40,290 +reduces the number of complaints. So we should + +248 +00:19:40,290 --> 00:19:43,650 +have two-tailed test. Since there is no direction, + +249 +00:19:44,150 --> 00:19:46,250 +positive or negative, improved or does not + +250 +00:19:46,250 --> 00:19:50,430 +improve, we should have two-tailed test. So that's + +251 +00:19:50,430 --> 00:19:54,050 +step number one. You have to scale the appropriate + +252 +00:19:54,050 --> 00:19:57,990 +null and alternate hypothesis in order to + +253 +00:19:57,990 --> 00:20:03,720 +determine the critical regions for this test. So + +254 +00:20:03,720 --> 00:20:09,040 +this example is a two-tailed test. Because it asks + +255 +00:20:09,040 --> 00:20:11,420 +if there is a difference. And they don't know if + +256 +00:20:11,420 --> 00:20:15,040 +this difference is positive or negative. Now, + +257 +00:20:15,160 --> 00:20:19,100 +simple calculations will give the average of D and + +258 +00:20:19,100 --> 00:20:21,540 +the standard deviation of D. So first step, you + +259 +00:20:21,540 --> 00:20:26,200 +have to compute the difference, Di. Again, it + +260 +00:20:26,200 --> 00:20:30,140 +doesn't matter actually if x2 minus x1 or x1 minus + +261 +00:20:30,140 --> 00:20:35,380 +x2. For this, in this case, we have x2 minus x1, 4 + +262 +00:20:35,380 --> 00:20:40,120 +minus 6 negative 2, 6 minus 20 negative 14, 2 + +263 +00:20:40,120 --> 00:20:43,860 +minus 3 negative 1, then 0, 0 minus 4 is negative + +264 +00:20:43,860 --> 00:20:49,380 +4. Now the average for this difference is negative + +265 +00:20:49,380 --> 00:20:55,320 +4. If the differences are 1 minus 2, I mean before + +266 +00:20:55,320 --> 00:20:59,380 +minus after, you will have the same value but + +267 +00:20:59,380 --> 00:21:03,760 +positive. The standard deviation will not change, + +268 +00:21:04,760 --> 00:21:07,480 +remain the same, because always the standard + +269 +00:21:07,480 --> 00:21:11,960 +deviation is positive. So SD equals 5.67. So + +270 +00:21:11,960 --> 00:21:19,880 +simple calculations give BR to be negative 4.2, + +271 +00:21:21,200 --> 00:21:29,960 +and SD to be 5.67. In the real life, you should + +272 +00:21:29,960 --> 00:21:35,020 +have large sample in order to test. Here, it's + +273 +00:21:35,020 --> 00:21:38,120 +just n equals five. This example is just for + +274 +00:21:38,120 --> 00:21:41,040 +illustration. How can we use the t-test? But in + +275 +00:21:41,040 --> 00:21:45,440 +reality, you should have larger sample size than + +276 +00:21:45,440 --> 00:21:49,160 +this one. So, n equals five is not enough in order + +277 +00:21:49,160 --> 00:21:52,500 +to determine if the training workshop is effective + +278 +00:21:52,500 --> 00:21:55,360 +or not. Because if you look carefully at these + +279 +00:21:55,360 --> 00:21:59,050 +values, The first person has six complaints, it + +280 +00:21:59,050 --> 00:22:03,610 +becomes four. So the difference is two. Twenty to + +281 +00:22:03,610 --> 00:22:08,650 +six is large difference, about fourteen. Now small + +282 +00:22:08,650 --> 00:22:12,810 +difference is one, then zero, then four. So + +283 +00:22:12,810 --> 00:22:18,350 +sometimes you cannot determine if the training + +284 +00:22:18,350 --> 00:22:23,790 +workshop is effective based on the small sample + +285 +00:22:23,790 --> 00:22:24,250 +size. + +286 +00:22:26,990 --> 00:22:30,910 +Now, the question again is, has the training made + +287 +00:22:30,910 --> 00:22:33,990 +a difference in the number of complaints at 1% + +288 +00:22:33,990 --> 00:22:37,330 +level of significance? So in this case, we are + +289 +00:22:37,330 --> 00:22:43,510 +using alpha to be 1%. Most + +290 +00:22:43,510 --> 00:22:46,810 +of the time, we are using 5%. In this example, + +291 +00:22:47,110 --> 00:22:53,310 +alpha is 1%. Null hypothesis, MUD equals zero. + +292 +00:22:53,630 --> 00:22:57,180 +Again, MUD is not zero. So here we are talking + +293 +00:22:57,180 --> 00:23:04,220 +about two-tiered test. So we should compute two + +294 +00:23:04,220 --> 00:23:08,000 +critical values, P alpha over 2, N minus 1, plus + +295 +00:23:08,000 --> 00:23:14,000 +or minus. So plus or minus. Alpha is 1%. So alpha + +296 +00:23:14,000 --> 00:23:20,940 +over 2 is 0, 0. So we are looking for 0, 0, 5. And + +297 +00:23:20,940 --> 00:23:26,930 +degrees of freedom is always N minus 1. And we + +298 +00:23:26,930 --> 00:23:32,950 +have 5 minus 1 is 4. Look at the data we have, you + +299 +00:23:32,950 --> 00:23:40,770 +will figure out that the critical value is 4.604. + +300 +00:23:41,290 --> 00:23:44,710 +So we reject the null hypothesis if the value of + +301 +00:23:44,710 --> 00:23:47,510 +the test statistic falls in the rejection regions + +302 +00:23:47,510 --> 00:23:47,890 +here. + +303 +00:23:50,780 --> 00:23:54,340 +or smaller than negative 4.6. So we should + +304 +00:23:54,340 --> 00:24:01,120 +calculate the test statistic. T-stat is just the + +305 +00:24:01,120 --> 00:24:03,880 +same equation, the same formula we had before. + +306 +00:24:07,420 --> 00:24:10,240 +D bar minus the mean of D divided by S over root + +307 +00:24:10,240 --> 00:24:20,070 +N. D bar is negative minus MUD under H0 is 0. So + +308 +00:24:20,070 --> 00:24:22,050 +most of the time it's zero, but sometimes maybe, + +309 +00:24:22,230 --> 00:24:25,970 +for example, four or six, you should switch this + +310 +00:24:25,970 --> 00:24:31,990 +value here to block six instead of zero. But here, + +311 +00:24:32,550 --> 00:24:35,550 +we are talking about difference of zero, so it + +312 +00:24:35,550 --> 00:24:43,890 +should be zero. Divide by S, which is five. Divide + +313 +00:24:43,890 --> 00:24:50,730 +by root N, and N is five. This gave negative 1.66. + +314 +00:24:52,790 --> 00:24:57,530 +Now the question is, is this value of negative 1 + +315 +00:24:57,530 --> 00:25:01,890 +.66 full in the rejection regions in one of these + +316 +00:25:01,890 --> 00:25:07,390 +two? It's full in the non-rejection regions, so we + +317 +00:25:07,390 --> 00:25:10,530 +don't reject the null hypothesis. So since this + +318 +00:25:10,530 --> 00:25:15,060 +statistic is not in the rejection region, then we + +319 +00:25:15,060 --> 00:25:20,060 +don't reject them. So now my conclusion is there + +320 +00:25:20,060 --> 00:25:22,780 +is not a significant change in the number of + +321 +00:25:22,780 --> 00:25:27,900 +complaints. Even there are some changes, but these + +322 +00:25:27,900 --> 00:25:32,180 +changes are not significant. I mean there is no + +323 +00:25:32,180 --> 00:25:35,800 +big difference between number of complaints before + +324 +00:25:35,800 --> 00:25:40,360 +and after attending the training course or the + +325 +00:25:40,360 --> 00:25:41,680 +training workshop. + +326 +00:25:44,090 --> 00:25:49,850 +So that's for performing paired samples t-test. + +327 +00:25:51,250 --> 00:25:52,990 +Let's do another problem. + +328 +00:26:01,690 --> 00:26:04,490 +Let's look at one of the practice problems we + +329 +00:26:04,490 --> 00:26:04,830 +have. + +330 +00:26:23,360 --> 00:26:28,760 +to test the effectiveness of a business school + +331 +00:26:28,760 --> 00:26:33,520 +preparation course. To test the effectiveness of a + +332 +00:26:33,520 --> 00:26:36,980 +business school preparation course. Eight students + +333 +00:26:36,980 --> 00:26:40,240 +took a general business test before and after the + +334 +00:26:40,240 --> 00:26:45,000 +course. The results are given below. So we have + +335 +00:26:45,000 --> 00:26:51,560 +here eight students took the general business test + +336 +00:26:51,560 --> 00:26:55,720 +before and after the course. So here we have + +337 +00:26:55,720 --> 00:27:02,920 +business school preparation course and we have + +338 +00:27:02,920 --> 00:27:06,740 +scores before taking this course, this preparation + +339 +00:27:06,740 --> 00:27:12,380 +course and after. And the question is if + +340 +00:27:13,310 --> 00:27:17,370 +this course is effective or not. I mean if the + +341 +00:27:17,370 --> 00:27:20,430 +course before is smaller than after. I mean if the + +342 +00:27:20,430 --> 00:27:25,330 +course score after is above or is more than or + +343 +00:27:25,330 --> 00:27:30,490 +greater than. So we have this data for eight + +344 +00:27:30,490 --> 00:27:36,210 +different students. Now the questions are, Number + +345 +00:27:36,210 --> 00:27:43,170 +of degrees of freedom. So since n equals 8, n + +346 +00:27:43,170 --> 00:27:51,190 +minus 1 is 7. So first step, first question, n + +347 +00:27:51,190 --> 00:27:52,710 +minus 1 is 7. + +348 +00:27:57,490 --> 00:28:02,010 +Next. The value of the sample mean difference is + +349 +00:28:02,010 --> 00:28:08,330 +if the difference scores reflect the results of + +350 +00:28:08,330 --> 00:28:13,090 +the exam after, the course after minus the results + +351 +00:28:13,090 --> 00:28:18,090 +of the exam before. Let's see. I will use Excel in + +352 +00:28:18,090 --> 00:28:21,730 +order to find the means for before and after. + +353 +00:28:39,240 --> 00:28:47,480 +So these values are before. So x1 and after. So + +354 +00:28:47,480 --> 00:28:52,140 +difference now, for example, it's x2 minus x1. So + +355 +00:28:52,140 --> 00:28:55,400 +x2 minus x1. + +356 +00:28:57,600 --> 00:29:01,260 +So we have these results. + +357 +00:29:09,130 --> 00:29:15,790 +now the average of this average of these values + +358 +00:29:15,790 --> 00:29:24,450 +from D1 all the way up to D9 so the average is 50 + +359 +00:29:24,450 --> 00:29:30,450 +so + +360 +00:29:30,450 --> 00:29:34,830 +that's the average so + +361 +00:29:34,830 --> 00:29:39,520 +the value of the sample mean difference is 50. So + +362 +00:29:39,520 --> 00:29:42,740 +the answer is 50 if the difference score reflects + +363 +00:29:42,740 --> 00:29:50,040 +the result of the exam after minus 5. Next, the + +364 +00:29:50,040 --> 00:29:52,360 +value of the standard error of the difference + +365 +00:29:52,360 --> 00:29:58,420 +scores. Now, we should compute first SD. So here, + +366 +00:29:58,500 --> 00:30:05,830 +D bar is 50. First, we have to compute SD. He + +367 +00:30:05,830 --> 00:30:09,710 +asked about the standard error of this difference. + +368 +00:30:10,450 --> 00:30:17,150 +So SD over square root of 1. Now SD, the standard + +369 +00:30:17,150 --> 00:30:25,370 +deviation. Standard deviation of + +370 +00:30:25,370 --> 00:30:30,090 +the differences. Even some of these, some of the + +371 +00:30:30,090 --> 00:30:33,670 +differences are negative. But the standard + +372 +00:30:33,670 --> 00:30:38,430 +deviation should be positive. So I got standard + +373 +00:30:38,430 --> 00:30:44,930 +deviation to be 65.02747. + +374 +00:30:46,310 --> 00:30:53,430 +I should divide this by square root. Square root + +375 +00:30:53,430 --> 00:30:59,990 +of N. N is eight. I will get twenty-two point 22 + +376 +00:30:59,990 --> 00:31:03,530 +.99. So that's the standard error of the + +377 +00:31:03,530 --> 00:31:07,450 +differences. So the value of the standard of the + +378 +00:31:07,450 --> 00:31:12,550 +difference scores is 22.99. Now we have this + +379 +00:31:12,550 --> 00:31:17,350 +answer 65. 65 again is the standard deviation. + +380 +00:31:18,830 --> 00:31:21,450 +Here he asks about the standard error of the + +381 +00:31:21,450 --> 00:31:25,110 +reference. So this result should be divided by + +382 +00:31:25,110 --> 00:31:29,750 +square root of n, which is eight. So I got the C + +383 +00:31:29,750 --> 00:31:31,750 +is the correct answer. + +384 +00:31:33,750 --> 00:31:39,130 +Next, what's the critical value for testing at the + +385 +00:31:39,130 --> 00:31:43,730 +5% level of significance whether Be careful, + +386 +00:31:44,290 --> 00:31:47,150 +whether the business school preparation course is + +387 +00:31:47,150 --> 00:31:53,030 +effective in improving exam scores. So again, here + +388 +00:31:53,030 --> 00:32:00,310 +we have D, X2 minus X1 after minus before. So it's + +389 +00:32:00,310 --> 00:32:08,130 +0, mu D equals 0 against H1 mu D. You think + +390 +00:32:08,130 --> 00:32:09,610 +positive or negative? Positive. + +391 +00:32:14,670 --> 00:32:18,170 +It's positive based on this definition, x2 minus + +392 +00:32:18,170 --> 00:32:23,430 +x1. That means, this means mu of x2 is greater + +393 +00:32:23,430 --> 00:32:27,310 +than mu of x1. That means the business school + +394 +00:32:27,310 --> 00:32:31,690 +preparation course is effective in improving exam + +395 +00:32:31,690 --> 00:32:36,390 +scores. So it means we have upper third test, the + +396 +00:32:36,390 --> 00:32:40,510 +alpha and n minus 1. And alpha for this specific + +397 +00:32:40,510 --> 00:32:45,890 +example is 5%, so we are looking for 5% and + +398 +00:32:45,890 --> 00:32:49,970 +degrees of freedom in this case is 7 now just look + +399 +00:32:49,970 --> 00:32:57,650 +at the t table you will get this result so the + +400 +00:32:57,650 --> 00:33:02,170 +critical region starts from this point all the way + +401 +00:33:02,170 --> 00:33:06,130 +up to infinity so that's your critical value one + +402 +00:33:06,130 --> 00:33:10,090 +point you may check this result by using the t + +403 +00:33:10,090 --> 00:33:18,450 +table Now let's look at number 12. At 5% level of + +404 +00:33:18,450 --> 00:33:23,550 +significance, the decision for this hypothesis + +405 +00:33:23,550 --> 00:33:26,910 +would be. In order to answer this question, we + +406 +00:33:26,910 --> 00:33:33,930 +should compute either the B value or the test + +407 +00:33:33,930 --> 00:33:36,590 +statistic. We don't know the test statistic, so we + +408 +00:33:36,590 --> 00:33:40,750 +should calculate the T stat first. Because in + +409 +00:33:40,750 --> 00:33:43,810 +order to compute the b value, we have to compute + +410 +00:33:43,810 --> 00:33:47,310 +this statistic first. So this statistic is x bar + +411 +00:33:47,310 --> 00:33:51,330 +minus, I'm sorry, d bar minus mean of D divided by + +412 +00:33:51,330 --> 00:33:58,130 +SD over root N. So that equals to. D bar is 50. + +413 +00:34:00,000 --> 00:34:04,920 +minus zero, because here mu D equals zero. Again, + +414 +00:34:05,140 --> 00:34:12,120 +divide by SD over square root of N. Either 65.02 + +415 +00:34:12,120 --> 00:34:15,880 +divided by eight, square root of eight, or just + +416 +00:34:15,880 --> 00:34:20,040 +take this result. So divide by 22, that's easier. + +417 +00:34:22,420 --> 00:34:29,320 +So now 55. By doing Excel again, it's 50. Divide + +418 +00:34:29,320 --> 00:34:32,720 +by this result, 2.175. + +419 +00:34:35,100 --> 00:34:38,580 +Approximately 2.175. So the value of the test + +420 +00:34:38,580 --> 00:34:45,660 +statistic is 2.175. Now the question is, is this + +421 +00:34:45,660 --> 00:34:49,960 +value all in the rejection region? Since the + +422 +00:34:49,960 --> 00:34:53,180 +critical value was 1.89, and we are talking about + +423 +00:34:53,180 --> 00:34:57,460 +other TLC tests, So this value falls in the + +424 +00:34:57,460 --> 00:35:01,140 +rejection region, so we reject. So since T + +425 +00:35:01,140 --> 00:35:09,760 +statistic, T step equals 2.175 greater than 1.895, + +426 +00:35:10,660 --> 00:35:17,540 +so the answer is reject the null hypothesis. So in + +427 +00:35:17,540 --> 00:35:22,200 +this case, we reject the null hypothesis, number + +428 +00:35:22,200 --> 00:35:27,950 +13. At 5% level of significance, the conclusion + +429 +00:35:27,950 --> 00:35:32,890 +for this hypothesis test would be, since we reject + +430 +00:35:32,890 --> 00:35:35,970 +the null hypothesis, that means we have sufficient + +431 +00:35:35,970 --> 00:35:39,330 +evidence to support the alternative hypothesis. It + +432 +00:35:39,330 --> 00:35:43,010 +means the business school preparation course does + +433 +00:35:43,010 --> 00:35:47,790 +improve exam score. Since we are supporting + +434 +00:35:47,790 --> 00:35:51,110 +immunity greater than zero, that means the average + +435 +00:35:52,190 --> 00:35:57,910 +of the scores after taking the preparation course + +436 +00:35:57,910 --> 00:36:01,350 +is greater or significantly greater than or higher + +437 +00:36:01,350 --> 00:36:04,310 +than the average before taking the preparation + +438 +00:36:04,310 --> 00:36:09,330 +course. So that means we have improvement. So part + +439 +00:36:09,330 --> 00:36:13,390 +A, the first one, is the correct answer. So the + +440 +00:36:13,390 --> 00:36:16,390 +business school preparation course does improve + +441 +00:36:16,390 --> 00:36:17,910 +exam scores. + +442 +00:36:24,220 --> 00:36:27,540 +14, the calculated value of the statistic is 2 + +443 +00:36:27,540 --> 00:36:32,440 +.175, the same as we did. Now, as we mentioned + +444 +00:36:32,440 --> 00:36:36,640 +before, we can reach the same conclusion either by + +445 +00:36:36,640 --> 00:36:41,060 +using a critical value approach or b value + +446 +00:36:41,060 --> 00:36:45,160 +approach. Here we cannot use the confidence + +447 +00:36:45,160 --> 00:36:49,160 +interval approach because It's one-tailed test. We + +448 +00:36:49,160 --> 00:36:52,760 +can use the two-sided confidence interval if we + +449 +00:36:52,760 --> 00:36:55,160 +have two-tailed test. But in this case, there is + +450 +00:36:55,160 --> 00:36:58,480 +only one tail. So you have only two approaches, + +451 +00:36:58,640 --> 00:37:02,780 +either a critical value or the B value approach. + +452 +00:37:03,120 --> 00:37:05,800 +Let's see how can we find the B value of this + +453 +00:37:05,800 --> 00:37:09,520 +test. Now, the B value is given by. + +454 +00:37:19,840 --> 00:37:25,680 +value. Now we are looking for the probability + +455 +00:37:25,680 --> 00:37:32,160 +after 2.175. So if you look at the table you have + +456 +00:37:32,160 --> 00:37:39,180 +degrees of freedom 7. Look at this value. Most of + +457 +00:37:39,180 --> 00:37:43,100 +the time T table does not give the exact B value. + +458 +00:37:43,760 --> 00:37:46,620 +So here you will see that your B value is between + +459 +00:37:46,620 --> 00:37:56,790 +0 to 5. and 0.5 if you look at the table let's see + +460 +00:37:56,790 --> 00:38:01,710 +the + +461 +00:38:01,710 --> 00:38:08,850 +statistical table here for t-test now + +462 +00:38:08,850 --> 00:38:14,030 +look at seven degrees of freedom we are looking + +463 +00:38:14,030 --> 00:38:21,510 +for the value of 2.17 so 2.17 lies between these + +464 +00:38:21,510 --> 00:38:27,390 +two values, 1.895 to 1.365. So your B value lies + +465 +00:38:27,390 --> 00:38:34,850 +between 0.25 and 0.5. So here my B value is + +466 +00:38:34,850 --> 00:38:42,910 +greater than 0.25 and smaller than 5%. As we + +467 +00:38:42,910 --> 00:38:47,510 +mentioned before, always we + +468 +00:38:47,510 --> 00:38:50,050 +reject H0 + +469 +00:38:51,230 --> 00:38:55,430 +if your b value is smaller than alpha that's in + +470 +00:38:55,430 --> 00:38:58,730 +general we reject the null hypothesis if your b + +471 +00:38:58,730 --> 00:39:02,010 +value is smaller than alpha in this case alpha is + +472 +00:39:02,010 --> 00:39:06,630 +given to be five percent your b value is smaller + +473 +00:39:06,630 --> 00:39:08,810 +than five percent so we have to reject the null + +474 +00:39:08,810 --> 00:39:12,430 +hypothesis so again we have the same decision + +475 +00:39:12,430 --> 00:39:17,010 +reject the null hypothesis also you may compute + +476 +00:39:17,010 --> 00:39:23,210 +the exact and b value by using excel you may use + +477 +00:39:23,210 --> 00:39:28,130 +the t distribution function so here we have + +478 +00:39:28,130 --> 00:39:34,370 +statistics functions here t distribution so + +479 +00:39:34,370 --> 00:39:40,750 +t distribution oh the value of the statistic which + +480 +00:39:40,750 --> 00:39:42,790 +is 2.175 + +481 +00:39:47,490 --> 00:39:56,930 +So this value here is + +482 +00:39:56,930 --> 00:40:03,670 +2.17 degrees of freedom is seven tails. We are + +483 +00:40:03,670 --> 00:40:09,810 +talking about one tail test. So it's one. So you + +484 +00:40:09,810 --> 00:40:12,810 +have this result. So your B value, the exact + +485 +00:40:12,810 --> 00:40:16,790 +answer is seven one. + +486 +00:40:19,760 --> 00:40:24,560 +033, which is the exact one. The approximate p + +487 +00:40:24,560 --> 00:40:27,920 +-value is between 025 and 05. And for sure, this + +488 +00:40:27,920 --> 00:40:35,720 +value lies between 025 and 05. So since this p + +489 +00:40:35,720 --> 00:40:38,980 +-value is smaller than alpha, then we reject the + +490 +00:40:38,980 --> 00:40:42,040 +null hypothesis. So go back to the practice + +491 +00:40:42,040 --> 00:40:49,050 +problems here. The exact answer is 0031. using + +492 +00:40:49,050 --> 00:40:56,570 +Excel or between 025 and 05 by using a table with + +493 +00:40:56,570 --> 00:40:59,890 +seven degrees of freedom. + +494 +00:41:02,450 --> 00:41:07,060 +The last question for this table. In examining + +495 +00:41:07,060 --> 00:41:09,900 +differences, in examining the differences between + +496 +00:41:09,900 --> 00:41:14,020 +related samples, we are essentially sampling from + +497 +00:41:14,020 --> 00:41:18,160 +an underlying population of different scores, it's + +498 +00:41:18,160 --> 00:41:23,040 +correct. So that's a practice problem for using + +499 +00:41:23,040 --> 00:41:33,820 +paired sample status. Any question? Because in + +500 +00:41:33,820 --> 00:41:35,260 +this question he asked about + +501 +00:41:44,860 --> 00:41:50,120 +Under H0, since we are talking about what's the + +502 +00:41:50,120 --> 00:41:52,760 +critical value for testing at 5% level of + +503 +00:41:52,760 --> 00:41:55,520 +significance, whether the business school + +504 +00:41:55,520 --> 00:42:00,040 +preparation course is effective. Effective means + +505 +00:42:00,040 --> 00:42:09,650 +the mean after Here, D is X2 minus X1 after minus + +506 +00:42:09,650 --> 00:42:13,010 +before. So since we are talking about effective, + +507 +00:42:13,230 --> 00:42:19,730 +it means mean after is greater than before. So + +508 +00:42:19,730 --> 00:42:23,750 +mean after minus mean before is positive. That + +509 +00:42:23,750 --> 00:42:28,610 +means mu D is positive. Now under H0, just use + +510 +00:42:28,610 --> 00:42:34,010 +either Mu equals zero just for simple writing null + +511 +00:42:34,010 --> 00:42:37,190 +and alternate hypothesis. But to be more precise, + +512 +00:42:37,410 --> 00:42:42,610 +you should write Mu smaller than Mu. But actually, + +513 +00:42:42,750 --> 00:42:47,290 +when we compute the critical value, here we look + +514 +00:42:47,290 --> 00:42:52,990 +only at the alternate hypothesis. So your critical + +515 +00:42:52,990 --> 00:42:55,550 +value depends on the sign under the alternative + +516 +00:42:55,550 --> 00:42:59,530 +hypothesis, your decision also based on the sign + +517 +00:42:59,530 --> 00:43:03,250 +of the alternative hypothesis. So always just look + +518 +00:43:03,250 --> 00:43:07,810 +at the sign under the alternative hypothesis and + +519 +00:43:07,810 --> 00:43:12,170 +ignore totally the sign under H0. Because here + +520 +00:43:12,170 --> 00:43:16,050 +it's upper tilted test, so your critical value + +521 +00:43:16,050 --> 00:43:21,240 +should be to the right based on this sign. we + +522 +00:43:21,240 --> 00:43:26,100 +reject FT statistic greater than it comes from + +523 +00:43:26,100 --> 00:43:31,220 +this statement that is MLB is above C. Is it + +524 +00:43:31,220 --> 00:43:36,820 +clear? The conclusion, + +525 +00:43:37,060 --> 00:43:40,600 +since we are rejecting the null hypothesis, that + +526 +00:43:40,600 --> 00:43:43,700 +means there is sufficient evidence to support the + +527 +00:43:43,700 --> 00:43:47,300 +alternative hypothesis. That's number one. That + +528 +00:43:47,300 --> 00:43:51,620 +means The score exam after taking the preparation + +529 +00:43:51,620 --> 00:43:55,940 +course is greater than the score exam before + +530 +00:43:55,940 --> 00:44:00,140 +taking the preparation course. That means the + +531 +00:44:00,140 --> 00:44:03,000 +business school preparation course does improve + +532 +00:44:03,000 --> 00:44:10,820 +exam score. That's the main conclusion for this + +533 +00:44:10,820 --> 00:44:17,640 +specific example. So far we have discussed two + +534 +00:44:17,640 --> 00:44:22,450 +population means. for independent samples and for + +535 +00:44:22,450 --> 00:44:28,630 +related samples. One is missing in order to + +536 +00:44:28,630 --> 00:44:31,630 +complete the story about hypothesis testing, which + +537 +00:44:31,630 --> 00:44:37,010 +is tests for two population proportions. That + +538 +00:44:37,010 --> 00:44:41,470 +means suppose you have two populations where + +539 +00:44:41,470 --> 00:44:46,270 +population proportions are pi 1 and pi 2, and our + +540 +00:44:46,270 --> 00:44:52,880 +goal is to test a hypothesis or form a confidence + +541 +00:44:52,880 --> 00:44:56,080 +interval for the difference between two population + +542 +00:44:56,080 --> 00:45:00,000 +proportion. So here we are interested in the + +543 +00:45:00,000 --> 00:45:02,880 +difference between pi 1 and pi 2. So we are + +544 +00:45:02,880 --> 00:45:06,660 +interested in pi 1 minus pi 2. Still we have the + +545 +00:45:06,660 --> 00:45:11,570 +same assumptions which are n times pi is at least + +546 +00:45:11,570 --> 00:45:15,990 +5, and n times 1 minus pi is also at least 5 for + +547 +00:45:15,990 --> 00:45:19,210 +the first population, and the same for the other + +548 +00:45:19,210 --> 00:45:21,610 +population. So these two conditions or two + +549 +00:45:21,610 --> 00:45:24,650 +assumptions should be satisfied in order to use + +550 +00:45:24,650 --> 00:45:31,350 +the Z statistic in this case. The point estimate, + +551 +00:45:31,510 --> 00:45:34,570 +if you remember, if we have only one population, P + +552 +00:45:34,570 --> 00:45:38,110 +was first. It's a point estimate. + +553 +00:45:42,110 --> 00:45:46,970 +Now we are talking about an estimate of Pi 1 minus + +554 +00:45:46,970 --> 00:45:52,650 +Pi 2. So that means P1 minus P2 is a point + +555 +00:45:52,650 --> 00:45:58,890 +estimate for the difference Pi 1 minus Pi 2. Also + +556 +00:45:58,890 --> 00:46:05,690 +P2 minus P1 is a point estimate of Pi 2 minus Pi + +557 +00:46:05,690 --> 00:46:09,050 +1. So this is step number one. We have to find the + +558 +00:46:09,050 --> 00:46:13,340 +point estimate of the difference. So B1 minus B2 + +559 +00:46:13,340 --> 00:46:16,640 +is the point estimate for the difference pi 1 + +560 +00:46:16,640 --> 00:46:23,380 +minus pi 2. Now, under the null hypothesis, always + +561 +00:46:23,380 --> 00:46:28,500 +we are assuming this difference is zero. So in the + +562 +00:46:28,500 --> 00:46:32,400 +null hypothesis, we assume the null is true. So + +563 +00:46:32,400 --> 00:46:36,680 +suppose the null is true, we assume pi 1 equal pi + +564 +00:46:36,680 --> 00:46:40,810 +2. In this case, the pool, The two sample + +565 +00:46:40,810 --> 00:46:44,670 +estimates A is the old estimate for overall + +566 +00:46:44,670 --> 00:46:50,710 +proportion is B dash is called the overall + +567 +00:46:50,710 --> 00:46:55,170 +proportion. + +568 +00:46:58,230 --> 00:47:03,350 +Now B dash equals. Now if you remember proportion + +569 +00:47:04,400 --> 00:47:08,480 +Or the probability of success is X over N, number + +570 +00:47:08,480 --> 00:47:11,300 +of successes divided by the sample size. That's if + +571 +00:47:11,300 --> 00:47:13,800 +we have only one sample. But if we have two + +572 +00:47:13,800 --> 00:47:17,740 +samples, then the proportion is number of + +573 +00:47:17,740 --> 00:47:22,660 +successes in the two samples So suppose X1 and X2 + +574 +00:47:22,660 --> 00:47:25,320 +are the number of sexes in the two samples + +575 +00:47:25,320 --> 00:47:29,100 +respectively, then total number is given by X1 + +576 +00:47:29,100 --> 00:47:34,860 +plus X2 divided by the sample sizes, N1 plus N2. + +577 +00:47:35,340 --> 00:47:40,880 +So B dash is for overall proportion is given by X1 + +578 +00:47:40,880 --> 00:47:47,530 +plus X2 divided by N1 plus N2. This one is called + +579 +00:47:47,530 --> 00:47:52,130 +the bold estimate for the overall proportion. + +580 +00:47:56,090 --> 00:47:59,610 +Now, what's about the z-statistic? In order to + +581 +00:47:59,610 --> 00:48:02,070 +compute the z-statistic, we have to know the + +582 +00:48:02,070 --> 00:48:04,670 +standard error of the estimate. + +583 +00:48:07,990 --> 00:48:10,090 +Now, the standard error of the estimate is given + +584 +00:48:10,090 --> 00:48:10,990 +by this equation, + +585 +00:48:13,990 --> 00:48:19,570 +1-b-1 over n1 plus 1 over n2. So this is called + +586 +00:48:19,570 --> 00:48:24,050 +the standard error of the difference. So if the + +587 +00:48:24,050 --> 00:48:31,930 +question is what's the value of the standard error + +588 +00:48:31,930 --> 00:48:37,010 +of the difference, you should compute + +589 +00:48:48,180 --> 00:48:55,700 +In this case, your z statistic is always, if you + +590 +00:48:55,700 --> 00:49:00,760 +remember in chapter 6, z score was x minus the + +591 +00:49:00,760 --> 00:49:05,680 +mean of x divided by sigma. In chapter 7, when we + +592 +00:49:05,680 --> 00:49:09,200 +talked about the sampling distribution, we had x + +593 +00:49:09,200 --> 00:49:14,160 +bar. So we had x bar minus the mean of x bar + +594 +00:49:14,160 --> 00:49:18,380 +divided by sigma of x bar, and that was x bar + +595 +00:49:18,380 --> 00:49:20,520 +minus the mean divided by sigma over root. + +596 +00:49:23,500 --> 00:49:27,040 +At the beginning of chapter 10, we talked about + +597 +00:49:27,040 --> 00:49:31,100 +the difference between these two, x1 bar minus x2 + +598 +00:49:31,100 --> 00:49:33,440 +bar, so minus the mean. + +599 +00:49:37,020 --> 00:49:40,300 +This term actually is the standard error of the + +600 +00:49:40,300 --> 00:49:44,120 +estimate. And the standard error was S squared B + +601 +00:49:44,120 --> 00:49:51,960 +multiplied by 1 over N1 plus 1 over N2. Now, when + +602 +00:49:51,960 --> 00:49:54,400 +we are talking about testing for the difference + +603 +00:49:54,400 --> 00:49:59,210 +between two proportions, this statistic is. The + +604 +00:49:59,210 --> 00:50:04,970 +estimate of the difference, which is b1 minus b2, + +605 +00:50:05,090 --> 00:50:10,250 +as we mentioned, minus the hypothesized value, + +606 +00:50:10,370 --> 00:50:13,870 +which is pi 1 minus pi 2, most of the time equals + +607 +00:50:13,870 --> 00:50:17,590 +0, divided by the standard error of the estimate + +608 +00:50:17,590 --> 00:50:21,650 +of this equation. So the standard error is b dash + +609 +00:50:21,650 --> 00:50:24,330 +1 minus b dash. + +610 +00:50:28,470 --> 00:50:32,250 +So let's use this statistic. P1 minus P2 is the + +611 +00:50:32,250 --> 00:50:35,930 +point estimate of Pi 1 minus Pi 2 minus the + +612 +00:50:35,930 --> 00:50:40,130 +hypothesized value always, or most of the time + +613 +00:50:40,130 --> 00:50:43,830 +this equals zero, divided by the square root of + +614 +00:50:43,830 --> 00:50:46,870 +this estimate. And the square root of this + +615 +00:50:46,870 --> 00:50:49,090 +estimate is given by this equation, which is the + +616 +00:50:49,090 --> 00:50:54,040 +square root of P dash. P dash again is The overall + +617 +00:50:54,040 --> 00:50:56,560 +proportion, or the bold proportion, which is X1 + +618 +00:50:56,560 --> 00:50:59,940 +plus X2 divided by N1 plus N2. So we have B dash + +619 +00:50:59,940 --> 00:51:04,920 +times 1 minus B dash times 1 over N1 plus 1 over + +620 +00:51:04,920 --> 00:51:07,380 +N2. That's your Z statistic. + +621 +00:51:10,300 --> 00:51:15,320 +So that's the new term for testing about the + +622 +00:51:15,320 --> 00:51:17,900 +difference between two population proportions. + +623 +00:51:18,800 --> 00:51:23,720 +Now, what's about the non-alternative hypothesis. + +624 +00:51:24,380 --> 00:51:27,180 +This slide actually is the same as the one we had + +625 +00:51:27,180 --> 00:51:28,960 +discussed for the difference between two + +626 +00:51:28,960 --> 00:51:35,040 +populations, except we replace mus by bis. So here + +627 +00:51:35,040 --> 00:51:38,940 +we have mu1 equals mu2 by 2, instead of mu1 equals + +628 +00:51:38,940 --> 00:51:44,140 +mu2. So if we go back a little bit to the + +629 +00:51:44,140 --> 00:51:45,080 +beginning of this chapter, + +630 +00:51:47,800 --> 00:51:52,180 +The same as this slide. Here we have means. We + +631 +00:51:52,180 --> 00:51:56,560 +just replace the means by pi's and we'll get + +632 +00:51:56,560 --> 00:52:03,380 +similar upper, lower, and two-tailed test. So we + +633 +00:52:03,380 --> 00:52:08,540 +have pi 1 equal pi 2 against one of these. Either + +634 +00:52:08,540 --> 00:52:11,340 +pi 1 does not equal pi 2, that's two-tailed test, + +635 +00:52:11,780 --> 00:52:15,340 +or pi 1 greater than for upper tail, or pi 1 + +636 +00:52:15,340 --> 00:52:19,920 +smaller than for lower tail. Or this one could be + +637 +00:52:19,920 --> 00:52:23,320 +written as the difference between the two + +638 +00:52:23,320 --> 00:52:26,240 +population proportion is zero against the + +639 +00:52:26,240 --> 00:52:30,640 +difference is not zero. Here, pi 1 greater than pi + +640 +00:52:30,640 --> 00:52:33,380 +2, it means pi 1 minus pi 2 is positive, greater + +641 +00:52:33,380 --> 00:52:37,240 +than zero, or pi 1 minus pi 2 is negative, smaller + +642 +00:52:37,240 --> 00:52:40,480 +than zero. That's how can we state the null and + +643 +00:52:40,480 --> 00:52:43,970 +alternative hypothesis. Now, for the rejection + +644 +00:52:43,970 --> 00:52:47,430 +regions, the same as we discussed before, if we + +645 +00:52:47,430 --> 00:52:50,470 +are talking about two-tailed test, we reject. If + +646 +00:52:50,470 --> 00:52:53,390 +your Z statistic falls in the rejection region in + +647 +00:52:53,390 --> 00:52:57,150 +the left side, that means this statistic is less + +648 +00:52:57,150 --> 00:53:00,230 +than negative Z alpha over two, or your Z + +649 +00:53:00,230 --> 00:53:03,790 +statistic is above this critical value, greater + +650 +00:53:03,790 --> 00:53:06,610 +than Z alpha over two. So the same as we discussed + +651 +00:53:06,610 --> 00:53:10,930 +before. For lower tail, The rejection region is to + +652 +00:53:10,930 --> 00:53:13,610 +the left side, so we reject if Z statistic is + +653 +00:53:13,610 --> 00:53:16,990 +smaller than negative Z alpha. Similarly here, we + +654 +00:53:16,990 --> 00:53:19,970 +reject if Z statistic greater than alpha for the + +655 +00:53:19,970 --> 00:53:23,770 +other two figures. So again, critical regions. + +656 +00:53:26,130 --> 00:53:29,330 +Null and alternative hypotheses are the same, but + +657 +00:53:29,330 --> 00:53:31,950 +here we have a new Z statistic, and this one + +658 +00:53:31,950 --> 00:53:35,530 +depends actually on the point estimate. The + +659 +00:53:35,530 --> 00:53:38,210 +hypothesized value and the standard error is + +660 +00:53:38,210 --> 00:53:44,070 +similar to the standard form of this form. But + +661 +00:53:44,070 --> 00:53:48,530 +here we replace b1 minus b2 by x. So x is replaced + +662 +00:53:48,530 --> 00:53:52,530 +by this value. And mu is replaced by pi 1 minus pi + +663 +00:53:52,530 --> 00:53:56,270 +2. And this is the standard deviation of the + +664 +00:53:56,270 --> 00:54:01,870 +estimate, which is given by this quantity. So + +665 +00:54:01,870 --> 00:54:06,690 +that's all for today. So I will stop at this + +666 +00:54:06,690 --> 00:54:09,410 +example for testing two populations. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/sLp0uBxxi1M_raw.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/sLp0uBxxi1M_raw.srt new file mode 100644 index 0000000000000000000000000000000000000000..810e2b0f6fdedfa857b32b38b870c610b735bd91 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/sLp0uBxxi1M_raw.srt @@ -0,0 +1,2664 @@ +1 +00:00:08,940 --> 00:00:15,340 +Last time we talked about two sample tests and we + +2 +00:00:15,340 --> 00:00:23,240 +covered one case when the population means we are + +3 +00:00:23,240 --> 00:00:26,640 +selecting random samples and these samples are + +4 +00:00:26,640 --> 00:00:28,940 +independent. So that's case number one, + +5 +00:00:29,600 --> 00:00:33,740 +independent samples. And we assume sigma 1 and + +6 +00:00:33,740 --> 00:00:39,880 +sigma 2 are unknown and equal. And we have I + +7 +00:00:39,880 --> 00:00:45,000 +discussed this test. The test statistic was the + +8 +00:00:45,000 --> 00:00:49,600 +difference between the two sample means minus mu1 + +9 +00:00:49,600 --> 00:00:54,460 +minus mu2 under H0. Most of the time, this 1 + +10 +00:00:54,460 --> 00:00:57,920 +equals 0 divided by the standard error of the + +11 +00:00:57,920 --> 00:01:02,320 +estimate, which is S squared B multiplied by 1 + +12 +00:01:02,320 --> 00:01:06,160 +over N, 1 plus 1 over N. S squared B is the bold + +13 +00:01:06,160 --> 00:01:07,360 +variance. + +14 +00:01:12,750 --> 00:01:17,250 +The other case, when sigma 1 and sigma 2 are + +15 +00:01:17,250 --> 00:01:24,150 +unknown, but they are not equal. In this case, the + +16 +00:01:24,150 --> 00:01:29,190 +assumptions remain the same except the last one. + +17 +00:01:29,890 --> 00:01:32,710 +Now, the last one becomes population variances are + +18 +00:01:32,710 --> 00:01:37,310 +unknown, the same as before, but we assume they + +19 +00:01:37,310 --> 00:01:40,590 +are not equal. So it cannot be assumed to be + +20 +00:01:40,590 --> 00:01:46,690 +equal. So in this case, Sigma 1 and + +21 +00:01:46,690 --> 00:01:55,570 +Sigma 2 unknown, and they are not equal. So we + +22 +00:01:55,570 --> 00:02:00,010 +assume both Sigmas unknown, but they are not + +23 +00:02:00,010 --> 00:02:04,850 +equal. In this case, we can use one of the + +24 +00:02:04,850 --> 00:02:12,050 +software packages as Excel Minitab SPSS. So I will + +25 +00:02:12,050 --> 00:02:14,930 +leave this part for the SPSS course, how can we + +26 +00:02:14,930 --> 00:02:18,430 +determine the test statistic if sigma is unknown + +27 +00:02:18,430 --> 00:02:24,270 +and we cannot assume they are equal. So that's the + +28 +00:02:24,270 --> 00:02:28,270 +case number one, when the samples are independent. + +29 +00:02:29,630 --> 00:02:32,810 +The second, suppose the populations of interest + +30 +00:02:32,810 --> 00:02:37,190 +are related. The most common example is before and + +31 +00:02:37,190 --> 00:02:37,550 +after. + +32 +00:02:41,040 --> 00:02:49,600 +for example suppose we have patients who + +33 +00:02:49,600 --> 00:02:53,820 +are suffering from blood pressure and for example + +34 +00:02:53,820 --> 00:03:01,060 +suppose they use a drug A and + +35 +00:03:01,060 --> 00:03:03,700 +we have their blood pressures for example in + +36 +00:03:03,700 --> 00:03:08,590 +January for example number one Person number one, + +37 +00:03:08,750 --> 00:03:15,050 +his or her blood pressure was in January 145. The + +38 +00:03:15,050 --> 00:03:18,730 +second person, his blood pressure, for example, is + +39 +00:03:18,730 --> 00:03:23,190 +160. The third one, for example, is 155, and so + +40 +00:03:23,190 --> 00:03:26,730 +on. And suppose we have 10 patients. And the last + +41 +00:03:26,730 --> 00:03:31,270 +one, for example, his or her blood pressure is + +42 +00:03:31,270 --> 00:03:38,000 +135. Now these measures. in January. Suppose these + +43 +00:03:38,000 --> 00:03:44,260 +patients now have new treatment, suppose they are + +44 +00:03:44,260 --> 00:03:51,800 +using now a drug B, new drug, suppose for three + +45 +00:03:51,800 --> 00:03:59,000 +months. Then we have new results, I mean new + +46 +00:03:59,000 --> 00:04:01,840 +measurements for their blood pressure, for + +47 +00:04:01,840 --> 00:04:02,920 +example, in April. + +48 +00:04:05,850 --> 00:04:10,810 +Now, the first person with a blood pressure 145 + +49 +00:04:10,810 --> 00:04:15,770 +using drug A, it becomes, for example, 130 by + +50 +00:04:15,770 --> 00:04:21,730 +using drug B. The other one with 160, for example, + +51 +00:04:21,830 --> 00:04:27,410 +it becomes 150. 155 becomes 135. 135 becomes 120, + +52 +00:04:27,910 --> 00:04:35,360 +and so on. Now, this example is called before and + +53 +00:04:35,360 --> 00:04:39,740 +after. It means before using drug B and after + +54 +00:04:39,740 --> 00:04:44,400 +using drug B. And the question of interest is the + +55 +00:04:44,400 --> 00:04:48,860 +new drug effective or not. Effective means in this + +56 +00:04:48,860 --> 00:04:54,940 +case it reduces the high blood pressure. + +57 +00:04:57,980 --> 00:05:02,400 +This type of experiment is called related. + +58 +00:05:03,880 --> 00:05:08,840 +Sometimes it's called related. + +59 +00:05:09,920 --> 00:05:14,960 +Sometimes it's called pair. This name comes from + +60 +00:05:14,960 --> 00:05:18,860 +here we have two measurements for the same person. + +61 +00:05:19,880 --> 00:05:25,380 +So it looks like here, 145, 130. So this is pair + +62 +00:05:25,380 --> 00:05:29,380 +number one. The other pair or the second pair is + +63 +00:05:29,380 --> 00:05:34,830 +160, 150, and so on. So it's called paired, + +64 +00:05:35,290 --> 00:05:42,650 +sometimes called dependent, or matched samples. + +65 +00:05:45,110 --> 00:05:49,210 +So related, paired, dependent, or matched pairs, + +66 +00:05:49,310 --> 00:05:56,130 +the same meaning. So here we are testing about two + +67 +00:05:56,130 --> 00:05:59,870 +related populations. Again, it's called paired or + +68 +00:05:59,870 --> 00:06:01,810 +matched samples. They're repeated measures. + +69 +00:06:02,770 --> 00:06:05,830 +Repeated means before and after for the same + +70 +00:06:05,830 --> 00:06:09,550 +individual, for the same item, for the same + +71 +00:06:09,550 --> 00:06:13,670 +person. Now, for example, suppose measurements for + +72 +00:06:13,670 --> 00:06:18,990 +January is denoted by x1, and the other one is x2. + +73 +00:06:21,830 --> 00:06:24,990 +Now, the first thing we have to do is to find or + +74 +00:06:24,990 --> 00:06:27,250 +to compute the difference between paired values. + +75 +00:06:27,990 --> 00:06:33,490 +So Di is the difference between measurement in + +76 +00:06:33,490 --> 00:06:38,050 +January minus measurements in April. So we have + +77 +00:06:38,050 --> 00:06:43,050 +the difference between the two values. So here we + +78 +00:06:43,050 --> 00:06:50,810 +have Di. Di is just x1 minus x2 or x2 minus x1. + +79 +00:06:52,530 --> 00:06:57,930 +For example, it's x1 minus x2. Now the assumptions + +80 +00:06:57,930 --> 00:07:01,870 +are the same. We assume both populations are + +81 +00:07:01,870 --> 00:07:05,210 +normally distributed. Otherwise, if the + +82 +00:07:05,210 --> 00:07:08,710 +populations are not normal, we should have large + +83 +00:07:08,710 --> 00:07:12,370 +samples. It means the sample size should be at + +84 +00:07:12,370 --> 00:07:16,270 +least 30 in order to apply the central limit + +85 +00:07:16,270 --> 00:07:19,530 +theorem if the underlying population is not + +86 +00:07:19,530 --> 00:07:23,530 +normally distributed. So again, we have + +87 +00:07:23,530 --> 00:07:29,980 +measurements x. before, and measurements x2 after. + +88 +00:07:32,400 --> 00:07:38,200 +So we have here items, 1, 2, for example, up to + +89 +00:07:38,200 --> 00:07:44,460 +10. We have x1, x2, before, after. We compute the + +90 +00:07:44,460 --> 00:07:47,040 +difference, which is x1 minus x2, for example. + +91 +00:07:48,400 --> 00:07:52,280 +Now, for this data, I mean for the difference + +92 +00:07:52,280 --> 00:07:54,760 +data, just apply + +93 +00:07:57,290 --> 00:08:04,330 +one sample t-test. So simply apply one sample t + +94 +00:08:04,330 --> 00:08:06,350 +-test for the difference. + +95 +00:08:10,770 --> 00:08:15,710 +For example suppose here we have first item as we + +96 +00:08:15,710 --> 00:08:21,570 +mentioned for example is 160 it becomes 150. So x1 + +97 +00:08:21,570 --> 00:08:30,960 +minus x2 becomes 10. The second And then 145, 130. + +98 +00:08:31,380 --> 00:08:37,940 +So it's 15. Suppose the third one is 151. It + +99 +00:08:37,940 --> 00:08:43,860 +becomes 155. So it's negative 4 and so on. So for + +100 +00:08:43,860 --> 00:08:48,580 +example, the last one is 175. It becomes 160. So + +101 +00:08:48,580 --> 00:08:54,520 +it's 15. Now look at these differences. And just + +102 +00:08:54,520 --> 00:08:59,000 +apply one semantic test. It means find the + +103 +00:08:59,000 --> 00:09:02,680 +difference, the semantic mean of the difference, + +104 +00:09:02,820 --> 00:09:07,720 +which is d bar. So it's just sum of d divided by + +105 +00:09:07,720 --> 00:09:14,070 +n. So d. So this is the I. It's one I minus two I. + +106 +00:09:14,250 --> 00:09:17,450 +I means one, two, three, four, up to ten, for + +107 +00:09:17,450 --> 00:09:21,150 +example. So compute the mean of the differences. + +108 +00:09:21,930 --> 00:09:24,930 +Also compute the standard deviation of the + +109 +00:09:24,930 --> 00:09:28,590 +difference of the same equation we had before. Sum + +110 +00:09:28,590 --> 00:09:33,030 +D minus D bar squared divided by N minus one. + +111 +00:09:35,790 --> 00:09:39,630 +Then use the standard formula for the T test. + +112 +00:09:40,700 --> 00:09:44,900 +which is D bar in this case instead of X bar minus + +113 +00:09:44,900 --> 00:09:51,920 +Mu of D divided by standard deviation of D divided + +114 +00:09:51,920 --> 00:09:55,580 +by square root. And this statistic has T + +115 +00:09:55,580 --> 00:09:59,400 +distribution with degrees of freedom equals N + +116 +00:09:59,400 --> 00:10:02,080 +minus 1, the same as we have discussed in chapter + +117 +00:10:02,080 --> 00:10:08,440 +9. So the new concept here, we have two + +118 +00:10:08,440 --> 00:10:12,990 +observations. before and after. The first step + +119 +00:10:12,990 --> 00:10:17,150 +here, we compute the difference between before + +120 +00:10:17,150 --> 00:10:20,970 +minus after or after minus before. In this case, + +121 +00:10:21,470 --> 00:10:26,650 +there is only one difference in the sign of the + +122 +00:10:26,650 --> 00:10:31,070 +average. Because if we have x1 minus x2 and the + +123 +00:10:31,070 --> 00:10:37,450 +answer is plus, then if we switch from x2 to x1, D + +124 +00:10:37,450 --> 00:10:41,070 +should be negative or opposite sign. But the + +125 +00:10:41,070 --> 00:10:44,330 +standard deviation remains the same sign because + +126 +00:10:44,330 --> 00:10:47,590 +always standard deviation is positive. Here we + +127 +00:10:47,590 --> 00:10:50,850 +have squared for D minus D bar. So it doesn't + +128 +00:10:50,850 --> 00:10:56,510 +matter if we have X1 minus X2 or X2 minus X1. So + +129 +00:10:56,510 --> 00:11:01,110 +that's all for two related samples. So again, we + +130 +00:11:01,110 --> 00:11:05,140 +have to compute the ith pair difference, Di. where + +131 +00:11:05,140 --> 00:11:09,820 +Di equals x1 minus x2. Now the point estimate for + +132 +00:11:09,820 --> 00:11:12,180 +the paired difference D bar is just sum of D + +133 +00:11:12,180 --> 00:11:15,400 +divided by N. So that's the average of the + +134 +00:11:15,400 --> 00:11:19,260 +difference. The sample standard deviation is SD, + +135 +00:11:19,580 --> 00:11:23,280 +which is sum D minus D bar squared divided by N + +136 +00:11:23,280 --> 00:11:26,820 +minus 1. And N is the number of pairs in the + +137 +00:11:26,820 --> 00:11:31,680 +paired samples. For sure, here we have the same + +138 +00:11:31,680 --> 00:11:36,040 +sample sizes before and after. I mean, if we start + +139 +00:11:36,040 --> 00:11:38,740 +with 10 patients, we should end with the same + +140 +00:11:38,740 --> 00:11:41,920 +number, because here we have repeated measures. + +141 +00:11:43,040 --> 00:11:46,880 +Suppose, for example, patient number 10, we have a + +142 +00:11:46,880 --> 00:11:53,540 +score of 175, then this one is missing. In this + +143 +00:11:53,540 --> 00:11:57,600 +case, the total observation should be ignored. I + +144 +00:11:57,600 --> 00:12:00,440 +mean, if one is missing, you should ignore the + +145 +00:12:00,440 --> 00:12:04,960 +total observation. The whole information should be + +146 +00:12:04,960 --> 00:12:09,100 +ignored. So in this case, if this is missing, so + +147 +00:12:09,100 --> 00:12:13,000 +the sample size becomes 9 instead of 10. Even if + +148 +00:12:13,000 --> 00:12:17,880 +you have a score for before, but you should have + +149 +00:12:17,880 --> 00:12:20,120 +before and after scores. + +150 +00:12:22,340 --> 00:12:26,930 +Again, the T statistic. D bar minus the mean of D + +151 +00:12:26,930 --> 00:12:30,970 +divided by S of the difference divided by square + +152 +00:12:30,970 --> 00:12:34,630 +root of N. And this statistic actually has N minus + +153 +00:12:34,630 --> 00:12:38,650 +1 degrees of freedom. So simply find the + +154 +00:12:38,650 --> 00:12:43,630 +differences, then apply one sample T test for this + +155 +00:12:43,630 --> 00:12:48,990 +difference in order to test about related samples. + +156 +00:12:51,270 --> 00:12:58,210 +Any question? So it's quite similar to the 170 + +157 +00:12:58,210 --> 00:13:01,270 +test. In this case, the null and alternative + +158 +00:13:01,270 --> 00:13:05,670 +hypothesis could be one of these. Either two + +159 +00:13:05,670 --> 00:13:10,410 +-tailed test. In this case, we are testing. Mu D + +160 +00:13:10,410 --> 00:13:14,010 +equals 0 against mu D does not equal 0. This means + +161 +00:13:14,010 --> 00:13:18,270 +if we assume mu D equals 0 and mu D is not 0, that + +162 +00:13:18,270 --> 00:13:25,130 +means drug A and B They are equal here. I mean, + +163 +00:13:25,270 --> 00:13:28,930 +there is no effect under the null hypothesis. And + +164 +00:13:28,930 --> 00:13:30,930 +for the alternative hypothesis, there is an + +165 +00:13:30,930 --> 00:13:35,890 +effect. But we don't know that if this effect is + +166 +00:13:35,890 --> 00:13:39,550 +positive or negative. So there is an effect of + +167 +00:13:39,550 --> 00:13:42,050 +using a drug B. And that effect, we don't know + +168 +00:13:42,050 --> 00:13:47,070 +actually if it is improved or does not improve the + +169 +00:13:47,070 --> 00:13:52,070 +blood pressure. On the other cases, for the lower + +170 +00:13:52,070 --> 00:13:55,590 +and upper tail you have the exact direction either + +171 +00:13:55,590 --> 00:14:00,270 +mu d is not i'm sorry mu d is smaller than zero + +172 +00:14:00,270 --> 00:14:06,710 +that means mu d is smaller than zero that means mu + +173 +00:14:06,710 --> 00:14:11,350 +x1 is smaller than mu x2 in this case that means + +174 +00:14:11,350 --> 00:14:14,730 +the mean of blood pressure before is smaller than + +175 +00:14:14,730 --> 00:14:22,950 +the other one mu d is positive this means Mu x1 is + +176 +00:14:22,950 --> 00:14:27,850 +greater than Mu x2. Now suppose the question is + +177 +00:14:27,850 --> 00:14:31,210 +does + +178 +00:14:31,210 --> 00:14:38,170 +drug B improve blood pressure? Now improve in this + +179 +00:14:38,170 --> 00:14:43,990 +case means reduces because if the blood pressure + +180 +00:14:43,990 --> 00:14:48,610 +becomes smaller than Before using a drug B, that + +181 +00:14:48,610 --> 00:14:52,790 +means the drug B has positive effect. So in this + +182 +00:14:52,790 --> 00:14:55,370 +case, we should have this one, mu D greater than + +183 +00:14:55,370 --> 00:15:00,010 +0. That if your difference is x1 minus x2. + +184 +00:15:02,850 --> 00:15:10,110 +Make sense? Because if D is x1 minus x2, and under + +185 +00:15:10,110 --> 00:15:12,250 +the alternative hypothesis, you have mu D greater + +186 +00:15:12,250 --> 00:15:17,490 +than 0. That means, on average, Blood pressure + +187 +00:15:17,490 --> 00:15:23,610 +using drug A is significantly greater than blood + +188 +00:15:23,610 --> 00:15:27,250 +pressure using drug B. That means drug B is more + +189 +00:15:27,250 --> 00:15:33,950 +effective than using drug A. Otherwise, if you use + +190 +00:15:33,950 --> 00:15:40,050 +X2 minus X1, it's vice versa. It should be mu X2 + +191 +00:15:40,050 --> 00:15:44,330 +smaller than mu X1. And finally, you will end with + +192 +00:15:44,330 --> 00:15:47,890 +the same conclusion. So actually, it doesn't + +193 +00:15:47,890 --> 00:15:53,930 +matter if you state D as x1 minus x2 or x2 minus + +194 +00:15:53,930 --> 00:15:57,370 +x1. You have to be careful when you state the null + +195 +00:15:57,370 --> 00:16:02,650 +and alternative hypothesis. That's for state the + +196 +00:16:02,650 --> 00:16:05,690 +appropriate null and alternative hypothesis. And + +197 +00:16:05,690 --> 00:16:09,410 +actually, it depends on the problem itself. Now, + +198 +00:16:09,470 --> 00:16:11,450 +for the rejection regions, it's the same as we + +199 +00:16:11,450 --> 00:16:14,490 +discussed before. If we are talking about two + +200 +00:16:14,490 --> 00:16:19,050 +-tailed test, Always we reject if the test + +201 +00:16:19,050 --> 00:16:22,690 +statistic, this value, falls in the rejection + +202 +00:16:22,690 --> 00:16:25,550 +regions. Here there are two rejection regions, one + +203 +00:16:25,550 --> 00:16:31,510 +to the right of a upper critical value, which is T + +204 +00:16:31,510 --> 00:16:34,390 +alpha over 2. So we reject if T statistic is + +205 +00:16:34,390 --> 00:16:37,670 +greater than T alpha over 2, or if T statistic is + +206 +00:16:37,670 --> 00:16:41,270 +smaller than negative T alpha over 2. That's for + +207 +00:16:41,270 --> 00:16:44,510 +two-tiered test. But if we are talking about one + +208 +00:16:44,510 --> 00:16:49,380 +-tiered test, And for the lower tail, in this + +209 +00:16:49,380 --> 00:16:52,900 +case, the rejection region in the lower side, so + +210 +00:16:52,900 --> 00:16:55,280 +we reject if the T statistic is smaller than + +211 +00:16:55,280 --> 00:17:00,740 +negative T alpha. For upper tail T test, if the + +212 +00:17:00,740 --> 00:17:04,640 +alternative is mu D is greater than zero, then we + +213 +00:17:04,640 --> 00:17:07,620 +reject zero if T statistic is greater than T + +214 +00:17:07,620 --> 00:17:09,920 +alpha. So it's quite similar to the one we + +215 +00:17:09,920 --> 00:17:14,240 +discussed for the means, for the one sample mean. + +216 +00:17:15,850 --> 00:17:19,390 +The confidence interval for the difference, for + +217 +00:17:19,390 --> 00:17:22,070 +the mean difference, is the same as the one we had + +218 +00:17:22,070 --> 00:17:26,430 +discussed before. The previous, the old one was X + +219 +00:17:26,430 --> 00:17:32,250 +bar plus the point estimate, which was X bar plus + +220 +00:17:32,250 --> 00:17:38,070 +or minus T over 2 S over root N. Now, for the + +221 +00:17:38,070 --> 00:17:41,130 +differences, we have the average of the + +222 +00:17:41,130 --> 00:17:46,670 +difference, which is D bar, instead of X bar plus + +223 +00:17:46,670 --> 00:17:50,270 +or minus T alpha over two times standard deviation + +224 +00:17:50,270 --> 00:17:53,330 +of the difference divided by square root of N. And + +225 +00:17:53,330 --> 00:17:55,550 +we know that the standard deviation is given by + +226 +00:17:55,550 --> 00:18:00,070 +this equation. So I think nothing is new in this + +227 +00:18:00,070 --> 00:18:03,230 +case except that we should compute the difference + +228 +00:18:03,230 --> 00:18:09,030 +between measurements before and after. That's all. + +229 +00:18:09,930 --> 00:18:13,210 +Let's look at this simple example. + +230 +00:18:16,100 --> 00:18:21,760 +Assume you send your salespeople to a customer + +231 +00:18:21,760 --> 00:18:25,740 +service training workshop. And the question is, + +232 +00:18:26,160 --> 00:18:29,060 +has the training made a difference in the number + +233 +00:18:29,060 --> 00:18:29,720 +of complaints? + +234 +00:18:32,300 --> 00:18:38,520 +And here we have small sample size of five. And we + +235 +00:18:38,520 --> 00:18:42,980 +have number of complaints before attending the + +236 +00:18:42,980 --> 00:18:46,950 +customer service training workshop. and after the + +237 +00:18:46,950 --> 00:18:50,390 +training course. And again, the question is, has + +238 +00:18:50,390 --> 00:18:53,950 +the training made a difference in the number of + +239 +00:18:53,950 --> 00:18:58,430 +complaints? For example, salesperson number one + +240 +00:18:58,430 --> 00:19:06,070 +has six complaints before attending the training + +241 +00:19:06,070 --> 00:19:11,650 +workshop. And after that, he has four. + +242 +00:19:15,760 --> 00:19:20,000 +20 complaints before it becomes 6, 3 becomes 2, 0, + +243 +00:19:20,100 --> 00:19:24,260 +0, no complaints, 4 becomes 0. Now the question + +244 +00:19:24,260 --> 00:19:27,800 +is, has the training made a difference? In this + +245 +00:19:27,800 --> 00:19:31,040 +case, you don't know the direction. If this + +246 +00:19:31,040 --> 00:19:35,440 +training workshop has positive effect, it means it + +247 +00:19:35,440 --> 00:19:40,290 +reduces the number of complaints. So we should + +248 +00:19:40,290 --> 00:19:43,650 +have two-tailed test. Since there is no direction, + +249 +00:19:44,150 --> 00:19:46,250 +positive or negative, improved or does not + +250 +00:19:46,250 --> 00:19:50,430 +improve, we should have two-tailed test. So that's + +251 +00:19:50,430 --> 00:19:54,050 +step number one. You have to scale the appropriate + +252 +00:19:54,050 --> 00:19:57,990 +null and alternate hypothesis in order to + +253 +00:19:57,990 --> 00:20:03,720 +determine the critical regions for this test. So + +254 +00:20:03,720 --> 00:20:09,040 +this example is a two-tailed test. Because it asks + +255 +00:20:09,040 --> 00:20:11,420 +if there is a difference. And they don't know if + +256 +00:20:11,420 --> 00:20:15,040 +this difference is positive or negative. Now, + +257 +00:20:15,160 --> 00:20:19,100 +simple calculations will give the average of D and + +258 +00:20:19,100 --> 00:20:21,540 +the standard deviation of D. So first step, you + +259 +00:20:21,540 --> 00:20:26,200 +have to compute the difference, Di. Again, it + +260 +00:20:26,200 --> 00:20:30,140 +doesn't matter actually if x2 minus x1 or x1 minus + +261 +00:20:30,140 --> 00:20:35,380 +x2. For this, in this case, we have x2 minus x1, 4 + +262 +00:20:35,380 --> 00:20:40,120 +minus 6 negative 2, 6 minus 20 negative 14, 2 + +263 +00:20:40,120 --> 00:20:43,860 +minus 3 negative 1, then 0, 0 minus 4 is negative + +264 +00:20:43,860 --> 00:20:49,380 +4. Now the average for this difference is negative + +265 +00:20:49,380 --> 00:20:55,320 +4. If the differences are 1 minus 2, I mean before + +266 +00:20:55,320 --> 00:20:59,380 +minus after, you will have the same value but + +267 +00:20:59,380 --> 00:21:03,760 +positive. The standard deviation will not change, + +268 +00:21:04,760 --> 00:21:07,480 +remain the same, because always the standard + +269 +00:21:07,480 --> 00:21:11,960 +deviation is positive. So SD equals 5.67. So + +270 +00:21:11,960 --> 00:21:19,880 +simple calculations give BR to be negative 4.2, + +271 +00:21:21,200 --> 00:21:29,960 +and SD to be 5.67. In the real life, you should + +272 +00:21:29,960 --> 00:21:35,020 +have large sample in order to test. Here, it's + +273 +00:21:35,020 --> 00:21:38,120 +just n equals five. This example is just for + +274 +00:21:38,120 --> 00:21:41,040 +illustration. How can we use the t-test? But in + +275 +00:21:41,040 --> 00:21:45,440 +reality, you should have larger sample size than + +276 +00:21:45,440 --> 00:21:49,160 +this one. So, n equals five is not enough in order + +277 +00:21:49,160 --> 00:21:52,500 +to determine if the training workshop is effective + +278 +00:21:52,500 --> 00:21:55,360 +or not. Because if you look carefully at these + +279 +00:21:55,360 --> 00:21:59,050 +values, The first person has six complaints, it + +280 +00:21:59,050 --> 00:22:03,610 +becomes four. So the difference is two. Twenty to + +281 +00:22:03,610 --> 00:22:08,650 +six is large difference, about fourteen. Now small + +282 +00:22:08,650 --> 00:22:12,810 +difference is one, then zero, then four. So + +283 +00:22:12,810 --> 00:22:18,350 +sometimes you cannot determine if the training + +284 +00:22:18,350 --> 00:22:23,790 +workshop is effective based on the small sample + +285 +00:22:23,790 --> 00:22:24,250 +size. + +286 +00:22:26,990 --> 00:22:30,910 +Now, the question again is, has the training made + +287 +00:22:30,910 --> 00:22:33,990 +a difference in the number of complaints at 1% + +288 +00:22:33,990 --> 00:22:37,330 +level of significance? So in this case, we are + +289 +00:22:37,330 --> 00:22:43,510 +using alpha to be 1%. Most + +290 +00:22:43,510 --> 00:22:46,810 +of the time, we are using 5%. In this example, + +291 +00:22:47,110 --> 00:22:53,310 +alpha is 1%. Null hypothesis, MUD equals zero. + +292 +00:22:53,630 --> 00:22:57,180 +Again, MUD is not zero. So here we are talking + +293 +00:22:57,180 --> 00:23:04,220 +about two-tiered test. So we should compute two + +294 +00:23:04,220 --> 00:23:08,000 +critical values, P alpha over 2, N minus 1, plus + +295 +00:23:08,000 --> 00:23:14,000 +or minus. So plus or minus. Alpha is 1%. So alpha + +296 +00:23:14,000 --> 00:23:20,940 +over 2 is 0, 0. So we are looking for 0, 0, 5. And + +297 +00:23:20,940 --> 00:23:26,930 +degrees of freedom is always N minus 1. And we + +298 +00:23:26,930 --> 00:23:32,950 +have 5 minus 1 is 4. Look at the data we have, you + +299 +00:23:32,950 --> 00:23:40,770 +will figure out that the critical value is 4.604. + +300 +00:23:41,290 --> 00:23:44,710 +So we reject the null hypothesis if the value of + +301 +00:23:44,710 --> 00:23:47,510 +the test statistic falls in the rejection regions + +302 +00:23:47,510 --> 00:23:47,890 +here. + +303 +00:23:50,780 --> 00:23:54,340 +or smaller than negative 4.6. So we should + +304 +00:23:54,340 --> 00:24:01,120 +calculate the test statistic. T-stat is just the + +305 +00:24:01,120 --> 00:24:03,880 +same equation, the same formula we had before. + +306 +00:24:07,420 --> 00:24:10,240 +D bar minus the mean of D divided by S over root + +307 +00:24:10,240 --> 00:24:20,070 +N. D bar is negative minus MUD under H0 is 0. So + +308 +00:24:20,070 --> 00:24:22,050 +most of the time it's zero, but sometimes maybe, + +309 +00:24:22,230 --> 00:24:25,970 +for example, four or six, you should switch this + +310 +00:24:25,970 --> 00:24:31,990 +value here to block six instead of zero. But here, + +311 +00:24:32,550 --> 00:24:35,550 +we are talking about difference of zero, so it + +312 +00:24:35,550 --> 00:24:43,890 +should be zero. Divide by S, which is five. Divide + +313 +00:24:43,890 --> 00:24:50,730 +by root N, and N is five. This gave negative 1.66. + +314 +00:24:52,790 --> 00:24:57,530 +Now the question is, is this value of negative 1 + +315 +00:24:57,530 --> 00:25:01,890 +.66 full in the rejection regions in one of these + +316 +00:25:01,890 --> 00:25:07,390 +two? It's full in the non-rejection regions, so we + +317 +00:25:07,390 --> 00:25:10,530 +don't reject the null hypothesis. So since this + +318 +00:25:10,530 --> 00:25:15,060 +statistic is not in the rejection region, then we + +319 +00:25:15,060 --> 00:25:20,060 +don't reject them. So now my conclusion is there + +320 +00:25:20,060 --> 00:25:22,780 +is not a significant change in the number of + +321 +00:25:22,780 --> 00:25:27,900 +complaints. Even there are some changes, but these + +322 +00:25:27,900 --> 00:25:32,180 +changes are not significant. I mean there is no + +323 +00:25:32,180 --> 00:25:35,800 +big difference between number of complaints before + +324 +00:25:35,800 --> 00:25:40,360 +and after attending the training course or the + +325 +00:25:40,360 --> 00:25:41,680 +training workshop. + +326 +00:25:44,090 --> 00:25:49,850 +So that's for performing paired samples t-test. + +327 +00:25:51,250 --> 00:25:52,990 +Let's do another problem. + +328 +00:26:01,690 --> 00:26:04,490 +Let's look at one of the practice problems we + +329 +00:26:04,490 --> 00:26:04,830 +have. + +330 +00:26:23,360 --> 00:26:28,760 +to test the effectiveness of a business school + +331 +00:26:28,760 --> 00:26:33,520 +preparation course. To test the effectiveness of a + +332 +00:26:33,520 --> 00:26:36,980 +business school preparation course. Eight students + +333 +00:26:36,980 --> 00:26:40,240 +took a general business test before and after the + +334 +00:26:40,240 --> 00:26:45,000 +course. The results are given below. So we have + +335 +00:26:45,000 --> 00:26:51,560 +here eight students took the general business test + +336 +00:26:51,560 --> 00:26:55,720 +before and after the course. So here we have + +337 +00:26:55,720 --> 00:27:02,920 +business school preparation course and we have + +338 +00:27:02,920 --> 00:27:06,740 +scores before taking this course, this preparation + +339 +00:27:06,740 --> 00:27:12,380 +course and after. And the question is if + +340 +00:27:13,310 --> 00:27:17,370 +this course is effective or not. I mean if the + +341 +00:27:17,370 --> 00:27:20,430 +course before is smaller than after. I mean if the + +342 +00:27:20,430 --> 00:27:25,330 +course score after is above or is more than or + +343 +00:27:25,330 --> 00:27:30,490 +greater than. So we have this data for eight + +344 +00:27:30,490 --> 00:27:36,210 +different students. Now the questions are, Number + +345 +00:27:36,210 --> 00:27:43,170 +of degrees of freedom. So since n equals 8, n + +346 +00:27:43,170 --> 00:27:51,190 +minus 1 is 7. So first step, first question, n + +347 +00:27:51,190 --> 00:27:52,710 +minus 1 is 7. + +348 +00:27:57,490 --> 00:28:02,010 +Next. The value of the sample mean difference is + +349 +00:28:02,010 --> 00:28:08,330 +if the difference scores reflect the results of + +350 +00:28:08,330 --> 00:28:13,090 +the exam after, the course after minus the results + +351 +00:28:13,090 --> 00:28:18,090 +of the exam before. Let's see. I will use Excel in + +352 +00:28:18,090 --> 00:28:21,730 +order to find the means for before and after. + +353 +00:28:39,240 --> 00:28:47,480 +So these values are before. So x1 and after. So + +354 +00:28:47,480 --> 00:28:52,140 +difference now, for example, it's x2 minus x1. So + +355 +00:28:52,140 --> 00:28:55,400 +x2 minus x1. + +356 +00:28:57,600 --> 00:29:01,260 +So we have these results. + +357 +00:29:09,130 --> 00:29:15,790 +now the average of this average of these values + +358 +00:29:15,790 --> 00:29:24,450 +from D1 all the way up to D9 so the average is 50 + +359 +00:29:24,450 --> 00:29:30,450 +so + +360 +00:29:30,450 --> 00:29:34,830 +that's the average so + +361 +00:29:34,830 --> 00:29:39,520 +the value of the sample mean difference is 50. So + +362 +00:29:39,520 --> 00:29:42,740 +the answer is 50 if the difference score reflects + +363 +00:29:42,740 --> 00:29:50,040 +the result of the exam after minus 5. Next, the + +364 +00:29:50,040 --> 00:29:52,360 +value of the standard error of the difference + +365 +00:29:52,360 --> 00:29:58,420 +scores. Now, we should compute first SD. So here, + +366 +00:29:58,500 --> 00:30:05,830 +D bar is 50. First, we have to compute SD. He + +367 +00:30:05,830 --> 00:30:09,710 +asked about the standard error of this difference. + +368 +00:30:10,450 --> 00:30:17,150 +So SD over square root of 1. Now SD, the standard + +369 +00:30:17,150 --> 00:30:25,370 +deviation. Standard deviation of + +370 +00:30:25,370 --> 00:30:30,090 +the differences. Even some of these, some of the + +371 +00:30:30,090 --> 00:30:33,670 +differences are negative. But the standard + +372 +00:30:33,670 --> 00:30:38,430 +deviation should be positive. So I got standard + +373 +00:30:38,430 --> 00:30:44,930 +deviation to be 65.02747. + +374 +00:30:46,310 --> 00:30:53,430 +I should divide this by square root. Square root + +375 +00:30:53,430 --> 00:30:59,990 +of N. N is eight. I will get twenty-two point 22 + +376 +00:30:59,990 --> 00:31:03,530 +.99. So that's the standard error of the + +377 +00:31:03,530 --> 00:31:07,450 +differences. So the value of the standard of the + +378 +00:31:07,450 --> 00:31:12,550 +difference scores is 22.99. Now we have this + +379 +00:31:12,550 --> 00:31:17,350 +answer 65. 65 again is the standard deviation. + +380 +00:31:18,830 --> 00:31:21,450 +Here he asks about the standard error of the + +381 +00:31:21,450 --> 00:31:25,110 +reference. So this result should be divided by + +382 +00:31:25,110 --> 00:31:29,750 +square root of n, which is eight. So I got the C + +383 +00:31:29,750 --> 00:31:31,750 +is the correct answer. + +384 +00:31:33,750 --> 00:31:39,130 +Next, what's the critical value for testing at the + +385 +00:31:39,130 --> 00:31:43,730 +5% level of significance whether Be careful, + +386 +00:31:44,290 --> 00:31:47,150 +whether the business school preparation course is + +387 +00:31:47,150 --> 00:31:53,030 +effective in improving exam scores. So again, here + +388 +00:31:53,030 --> 00:32:00,310 +we have D, X2 minus X1 after minus before. So it's + +389 +00:32:00,310 --> 00:32:08,130 +0, mu D equals 0 against H1 mu D. You think + +390 +00:32:08,130 --> 00:32:09,610 +positive or negative? Positive. + +391 +00:32:14,670 --> 00:32:18,170 +It's positive based on this definition, x2 minus + +392 +00:32:18,170 --> 00:32:23,430 +x1. That means, this means mu of x2 is greater + +393 +00:32:23,430 --> 00:32:27,310 +than mu of x1. That means the business school + +394 +00:32:27,310 --> 00:32:31,690 +preparation course is effective in improving exam + +395 +00:32:31,690 --> 00:32:36,390 +scores. So it means we have upper third test, the + +396 +00:32:36,390 --> 00:32:40,510 +alpha and n minus 1. And alpha for this specific + +397 +00:32:40,510 --> 00:32:45,890 +example is 5%, so we are looking for 5% and + +398 +00:32:45,890 --> 00:32:49,970 +degrees of freedom in this case is 7 now just look + +399 +00:32:49,970 --> 00:32:57,650 +at the t table you will get this result so the + +400 +00:32:57,650 --> 00:33:02,170 +critical region starts from this point all the way + +401 +00:33:02,170 --> 00:33:06,130 +up to infinity so that's your critical value one + +402 +00:33:06,130 --> 00:33:10,090 +point you may check this result by using the t + +403 +00:33:10,090 --> 00:33:18,450 +table Now let's look at number 12. At 5% level of + +404 +00:33:18,450 --> 00:33:23,550 +significance, the decision for this hypothesis + +405 +00:33:23,550 --> 00:33:26,910 +would be. In order to answer this question, we + +406 +00:33:26,910 --> 00:33:33,930 +should compute either the B value or the test + +407 +00:33:33,930 --> 00:33:36,590 +statistic. We don't know the test statistic, so we + +408 +00:33:36,590 --> 00:33:40,750 +should calculate the T stat first. Because in + +409 +00:33:40,750 --> 00:33:43,810 +order to compute the b value, we have to compute + +410 +00:33:43,810 --> 00:33:47,310 +this statistic first. So this statistic is x bar + +411 +00:33:47,310 --> 00:33:51,330 +minus, I'm sorry, d bar minus mean of D divided by + +412 +00:33:51,330 --> 00:33:58,130 +SD over root N. So that equals to. D bar is 50. + +413 +00:34:00,000 --> 00:34:04,920 +minus zero, because here mu D equals zero. Again, + +414 +00:34:05,140 --> 00:34:12,120 +divide by SD over square root of N. Either 65.02 + +415 +00:34:12,120 --> 00:34:15,880 +divided by eight, square root of eight, or just + +416 +00:34:15,880 --> 00:34:20,040 +take this result. So divide by 22, that's easier. + +417 +00:34:22,420 --> 00:34:29,320 +So now 55. By doing Excel again, it's 50. Divide + +418 +00:34:29,320 --> 00:34:32,720 +by this result, 2.175. + +419 +00:34:35,100 --> 00:34:38,580 +Approximately 2.175. So the value of the test + +420 +00:34:38,580 --> 00:34:45,660 +statistic is 2.175. Now the question is, is this + +421 +00:34:45,660 --> 00:34:49,960 +value all in the rejection region? Since the + +422 +00:34:49,960 --> 00:34:53,180 +critical value was 1.89, and we are talking about + +423 +00:34:53,180 --> 00:34:57,460 +other TLC tests, So this value falls in the + +424 +00:34:57,460 --> 00:35:01,140 +rejection region, so we reject. So since T + +425 +00:35:01,140 --> 00:35:09,760 +statistic, T step equals 2.175 greater than 1.895, + +426 +00:35:10,660 --> 00:35:17,540 +so the answer is reject the null hypothesis. So in + +427 +00:35:17,540 --> 00:35:22,200 +this case, we reject the null hypothesis, number + +428 +00:35:22,200 --> 00:35:27,950 +13. At 5% level of significance, the conclusion + +429 +00:35:27,950 --> 00:35:32,890 +for this hypothesis test would be, since we reject + +430 +00:35:32,890 --> 00:35:35,970 +the null hypothesis, that means we have sufficient + +431 +00:35:35,970 --> 00:35:39,330 +evidence to support the alternative hypothesis. It + +432 +00:35:39,330 --> 00:35:43,010 +means the business school preparation course does + +433 +00:35:43,010 --> 00:35:47,790 +improve exam score. Since we are supporting + +434 +00:35:47,790 --> 00:35:51,110 +immunity greater than zero, that means the average + +435 +00:35:52,190 --> 00:35:57,910 +of the scores after taking the preparation course + +436 +00:35:57,910 --> 00:36:01,350 +is greater or significantly greater than or higher + +437 +00:36:01,350 --> 00:36:04,310 +than the average before taking the preparation + +438 +00:36:04,310 --> 00:36:09,330 +course. So that means we have improvement. So part + +439 +00:36:09,330 --> 00:36:13,390 +A, the first one, is the correct answer. So the + +440 +00:36:13,390 --> 00:36:16,390 +business school preparation course does improve + +441 +00:36:16,390 --> 00:36:17,910 +exam scores. + +442 +00:36:24,220 --> 00:36:27,540 +14, the calculated value of the statistic is 2 + +443 +00:36:27,540 --> 00:36:32,440 +.175, the same as we did. Now, as we mentioned + +444 +00:36:32,440 --> 00:36:36,640 +before, we can reach the same conclusion either by + +445 +00:36:36,640 --> 00:36:41,060 +using a critical value approach or b value + +446 +00:36:41,060 --> 00:36:45,160 +approach. Here we cannot use the confidence + +447 +00:36:45,160 --> 00:36:49,160 +interval approach because It's one-tailed test. We + +448 +00:36:49,160 --> 00:36:52,760 +can use the two-sided confidence interval if we + +449 +00:36:52,760 --> 00:36:55,160 +have two-tailed test. But in this case, there is + +450 +00:36:55,160 --> 00:36:58,480 +only one tail. So you have only two approaches, + +451 +00:36:58,640 --> 00:37:02,780 +either a critical value or the B value approach. + +452 +00:37:03,120 --> 00:37:05,800 +Let's see how can we find the B value of this + +453 +00:37:05,800 --> 00:37:09,520 +test. Now, the B value is given by. + +454 +00:37:19,840 --> 00:37:25,680 +value. Now we are looking for the probability + +455 +00:37:25,680 --> 00:37:32,160 +after 2.175. So if you look at the table you have + +456 +00:37:32,160 --> 00:37:39,180 +degrees of freedom 7. Look at this value. Most of + +457 +00:37:39,180 --> 00:37:43,100 +the time T table does not give the exact B value. + +458 +00:37:43,760 --> 00:37:46,620 +So here you will see that your B value is between + +459 +00:37:46,620 --> 00:37:56,790 +0 to 5. and 0.5 if you look at the table let's see + +460 +00:37:56,790 --> 00:38:01,710 +the + +461 +00:38:01,710 --> 00:38:08,850 +statistical table here for t-test now + +462 +00:38:08,850 --> 00:38:14,030 +look at seven degrees of freedom we are looking + +463 +00:38:14,030 --> 00:38:21,510 +for the value of 2.17 so 2.17 lies between these + +464 +00:38:21,510 --> 00:38:27,390 +two values, 1.895 to 1.365. So your B value lies + +465 +00:38:27,390 --> 00:38:34,850 +between 0.25 and 0.5. So here my B value is + +466 +00:38:34,850 --> 00:38:42,910 +greater than 0.25 and smaller than 5%. As we + +467 +00:38:42,910 --> 00:38:47,510 +mentioned before, always we + +468 +00:38:47,510 --> 00:38:50,050 +reject H0 + +469 +00:38:51,230 --> 00:38:55,430 +if your b value is smaller than alpha that's in + +470 +00:38:55,430 --> 00:38:58,730 +general we reject the null hypothesis if your b + +471 +00:38:58,730 --> 00:39:02,010 +value is smaller than alpha in this case alpha is + +472 +00:39:02,010 --> 00:39:06,630 +given to be five percent your b value is smaller + +473 +00:39:06,630 --> 00:39:08,810 +than five percent so we have to reject the null + +474 +00:39:08,810 --> 00:39:12,430 +hypothesis so again we have the same decision + +475 +00:39:12,430 --> 00:39:17,010 +reject the null hypothesis also you may compute + +476 +00:39:17,010 --> 00:39:23,210 +the exact and b value by using excel you may use + +477 +00:39:23,210 --> 00:39:28,130 +the t distribution function so here we have + +478 +00:39:28,130 --> 00:39:34,370 +statistics functions here t distribution so + +479 +00:39:34,370 --> 00:39:40,750 +t distribution oh the value of the statistic which + +480 +00:39:40,750 --> 00:39:42,790 +is 2.175 + +481 +00:39:47,490 --> 00:39:56,930 +So this value here is + +482 +00:39:56,930 --> 00:40:03,670 +2.17 degrees of freedom is seven tails. We are + +483 +00:40:03,670 --> 00:40:09,810 +talking about one tail test. So it's one. So you + +484 +00:40:09,810 --> 00:40:12,810 +have this result. So your B value, the exact + +485 +00:40:12,810 --> 00:40:16,790 +answer is seven one. + +486 +00:40:19,760 --> 00:40:24,560 +033, which is the exact one. The approximate p + +487 +00:40:24,560 --> 00:40:27,920 +-value is between 025 and 05. And for sure, this + +488 +00:40:27,920 --> 00:40:35,720 +value lies between 025 and 05. So since this p + +489 +00:40:35,720 --> 00:40:38,980 +-value is smaller than alpha, then we reject the + +490 +00:40:38,980 --> 00:40:42,040 +null hypothesis. So go back to the practice + +491 +00:40:42,040 --> 00:40:49,050 +problems here. The exact answer is 0031. using + +492 +00:40:49,050 --> 00:40:56,570 +Excel or between 025 and 05 by using a table with + +493 +00:40:56,570 --> 00:40:59,890 +seven degrees of freedom. + +494 +00:41:02,450 --> 00:41:07,060 +The last question for this table. In examining + +495 +00:41:07,060 --> 00:41:09,900 +differences, in examining the differences between + +496 +00:41:09,900 --> 00:41:14,020 +related samples, we are essentially sampling from + +497 +00:41:14,020 --> 00:41:18,160 +an underlying population of different scores, it's + +498 +00:41:18,160 --> 00:41:23,040 +correct. So that's a practice problem for using + +499 +00:41:23,040 --> 00:41:33,820 +paired sample status. Any question? Because in + +500 +00:41:33,820 --> 00:41:35,260 +this question he asked about + +501 +00:41:44,860 --> 00:41:50,120 +Under H0, since we are talking about what's the + +502 +00:41:50,120 --> 00:41:52,760 +critical value for testing at 5% level of + +503 +00:41:52,760 --> 00:41:55,520 +significance, whether the business school + +504 +00:41:55,520 --> 00:42:00,040 +preparation course is effective. Effective means + +505 +00:42:00,040 --> 00:42:09,650 +the mean after Here, D is X2 minus X1 after minus + +506 +00:42:09,650 --> 00:42:13,010 +before. So since we are talking about effective, + +507 +00:42:13,230 --> 00:42:19,730 +it means mean after is greater than before. So + +508 +00:42:19,730 --> 00:42:23,750 +mean after minus mean before is positive. That + +509 +00:42:23,750 --> 00:42:28,610 +means mu D is positive. Now under H0, just use + +510 +00:42:28,610 --> 00:42:34,010 +either Mu equals zero just for simple writing null + +511 +00:42:34,010 --> 00:42:37,190 +and alternate hypothesis. But to be more precise, + +512 +00:42:37,410 --> 00:42:42,610 +you should write Mu smaller than Mu. But actually, + +513 +00:42:42,750 --> 00:42:47,290 +when we compute the critical value, here we look + +514 +00:42:47,290 --> 00:42:52,990 +only at the alternate hypothesis. So your critical + +515 +00:42:52,990 --> 00:42:55,550 +value depends on the sign under the alternative + +516 +00:42:55,550 --> 00:42:59,530 +hypothesis, your decision also based on the sign + +517 +00:42:59,530 --> 00:43:03,250 +of the alternative hypothesis. So always just look + +518 +00:43:03,250 --> 00:43:07,810 +at the sign under the alternative hypothesis and + +519 +00:43:07,810 --> 00:43:12,170 +ignore totally the sign under H0. Because here + +520 +00:43:12,170 --> 00:43:16,050 +it's upper tilted test, so your critical value + +521 +00:43:16,050 --> 00:43:21,240 +should be to the right based on this sign. we + +522 +00:43:21,240 --> 00:43:26,100 +reject FT statistic greater than it comes from + +523 +00:43:26,100 --> 00:43:31,220 +this statement that is MLB is above C. Is it + +524 +00:43:31,220 --> 00:43:36,820 +clear? The conclusion, + +525 +00:43:37,060 --> 00:43:40,600 +since we are rejecting the null hypothesis, that + +526 +00:43:40,600 --> 00:43:43,700 +means there is sufficient evidence to support the + +527 +00:43:43,700 --> 00:43:47,300 +alternative hypothesis. That's number one. That + +528 +00:43:47,300 --> 00:43:51,620 +means The score exam after taking the preparation + +529 +00:43:51,620 --> 00:43:55,940 +course is greater than the score exam before + +530 +00:43:55,940 --> 00:44:00,140 +taking the preparation course. That means the + +531 +00:44:00,140 --> 00:44:03,000 +business school preparation course does improve + +532 +00:44:03,000 --> 00:44:10,820 +exam score. That's the main conclusion for this + +533 +00:44:10,820 --> 00:44:17,640 +specific example. So far we have discussed two + +534 +00:44:17,640 --> 00:44:22,450 +population means. for independent samples and for + +535 +00:44:22,450 --> 00:44:28,630 +related samples. One is missing in order to + +536 +00:44:28,630 --> 00:44:31,630 +complete the story about hypothesis testing, which + +537 +00:44:31,630 --> 00:44:37,010 +is tests for two population proportions. That + +538 +00:44:37,010 --> 00:44:41,470 +means suppose you have two populations where + +539 +00:44:41,470 --> 00:44:46,270 +population proportions are pi 1 and pi 2, and our + +540 +00:44:46,270 --> 00:44:52,880 +goal is to test a hypothesis or form a confidence + +541 +00:44:52,880 --> 00:44:56,080 +interval for the difference between two population + +542 +00:44:56,080 --> 00:45:00,000 +proportion. So here we are interested in the + +543 +00:45:00,000 --> 00:45:02,880 +difference between pi 1 and pi 2. So we are + +544 +00:45:02,880 --> 00:45:06,660 +interested in pi 1 minus pi 2. Still we have the + +545 +00:45:06,660 --> 00:45:11,570 +same assumptions which are n times pi is at least + +546 +00:45:11,570 --> 00:45:15,990 +5, and n times 1 minus pi is also at least 5 for + +547 +00:45:15,990 --> 00:45:19,210 +the first population, and the same for the other + +548 +00:45:19,210 --> 00:45:21,610 +population. So these two conditions or two + +549 +00:45:21,610 --> 00:45:24,650 +assumptions should be satisfied in order to use + +550 +00:45:24,650 --> 00:45:31,350 +the Z statistic in this case. The point estimate, + +551 +00:45:31,510 --> 00:45:34,570 +if you remember, if we have only one population, P + +552 +00:45:34,570 --> 00:45:38,110 +was first. It's a point estimate. + +553 +00:45:42,110 --> 00:45:46,970 +Now we are talking about an estimate of Pi 1 minus + +554 +00:45:46,970 --> 00:45:52,650 +Pi 2. So that means P1 minus P2 is a point + +555 +00:45:52,650 --> 00:45:58,890 +estimate for the difference Pi 1 minus Pi 2. Also + +556 +00:45:58,890 --> 00:46:05,690 +P2 minus P1 is a point estimate of Pi 2 minus Pi + +557 +00:46:05,690 --> 00:46:09,050 +1. So this is step number one. We have to find the + +558 +00:46:09,050 --> 00:46:13,340 +point estimate of the difference. So B1 minus B2 + +559 +00:46:13,340 --> 00:46:16,640 +is the point estimate for the difference pi 1 + +560 +00:46:16,640 --> 00:46:23,380 +minus pi 2. Now, under the null hypothesis, always + +561 +00:46:23,380 --> 00:46:28,500 +we are assuming this difference is zero. So in the + +562 +00:46:28,500 --> 00:46:32,400 +null hypothesis, we assume the null is true. So + +563 +00:46:32,400 --> 00:46:36,680 +suppose the null is true, we assume pi 1 equal pi + +564 +00:46:36,680 --> 00:46:40,810 +2. In this case, the pool, The two sample + +565 +00:46:40,810 --> 00:46:44,670 +estimates A is the old estimate for overall + +566 +00:46:44,670 --> 00:46:50,710 +proportion is B dash is called the overall + +567 +00:46:50,710 --> 00:46:55,170 +proportion. + +568 +00:46:58,230 --> 00:47:03,350 +Now B dash equals. Now if you remember proportion + +569 +00:47:04,400 --> 00:47:08,480 +Or the probability of success is X over N, number + +570 +00:47:08,480 --> 00:47:11,300 +of successes divided by the sample size. That's if + +571 +00:47:11,300 --> 00:47:13,800 +we have only one sample. But if we have two + +572 +00:47:13,800 --> 00:47:17,740 +samples, then the proportion is number of + +573 +00:47:17,740 --> 00:47:22,660 +successes in the two samples So suppose X1 and X2 + +574 +00:47:22,660 --> 00:47:25,320 +are the number of sexes in the two samples + +575 +00:47:25,320 --> 00:47:29,100 +respectively, then total number is given by X1 + +576 +00:47:29,100 --> 00:47:34,860 +plus X2 divided by the sample sizes, N1 plus N2. + +577 +00:47:35,340 --> 00:47:40,880 +So B dash is for overall proportion is given by X1 + +578 +00:47:40,880 --> 00:47:47,530 +plus X2 divided by N1 plus N2. This one is called + +579 +00:47:47,530 --> 00:47:52,130 +the bold estimate for the overall proportion. + +580 +00:47:56,090 --> 00:47:59,610 +Now, what's about the z-statistic? In order to + +581 +00:47:59,610 --> 00:48:02,070 +compute the z-statistic, we have to know the + +582 +00:48:02,070 --> 00:48:04,670 +standard error of the estimate. + +583 +00:48:07,990 --> 00:48:10,090 +Now, the standard error of the estimate is given + +584 +00:48:10,090 --> 00:48:10,990 +by this equation, + +585 +00:48:13,990 --> 00:48:19,570 +1-b-1 over n1 plus 1 over n2. So this is called + +586 +00:48:19,570 --> 00:48:24,050 +the standard error of the difference. So if the + +587 +00:48:24,050 --> 00:48:31,930 +question is what's the value of the standard error + +588 +00:48:31,930 --> 00:48:37,010 +of the difference, you should compute + +589 +00:48:48,180 --> 00:48:55,700 +In this case, your z statistic is always, if you + +590 +00:48:55,700 --> 00:49:00,760 +remember in chapter 6, z score was x minus the + +591 +00:49:00,760 --> 00:49:05,680 +mean of x divided by sigma. In chapter 7, when we + +592 +00:49:05,680 --> 00:49:09,200 +talked about the sampling distribution, we had x + +593 +00:49:09,200 --> 00:49:14,160 +bar. So we had x bar minus the mean of x bar + +594 +00:49:14,160 --> 00:49:18,380 +divided by sigma of x bar, and that was x bar + +595 +00:49:18,380 --> 00:49:20,520 +minus the mean divided by sigma over root. + +596 +00:49:23,500 --> 00:49:27,040 +At the beginning of chapter 10, we talked about + +597 +00:49:27,040 --> 00:49:31,100 +the difference between these two, x1 bar minus x2 + +598 +00:49:31,100 --> 00:49:33,440 +bar, so minus the mean. + +599 +00:49:37,020 --> 00:49:40,300 +This term actually is the standard error of the + +600 +00:49:40,300 --> 00:49:44,120 +estimate. And the standard error was S squared B + +601 +00:49:44,120 --> 00:49:51,960 +multiplied by 1 over N1 plus 1 over N2. Now, when + +602 +00:49:51,960 --> 00:49:54,400 +we are talking about testing for the difference + +603 +00:49:54,400 --> 00:49:59,210 +between two proportions, this statistic is. The + +604 +00:49:59,210 --> 00:50:04,970 +estimate of the difference, which is b1 minus b2, + +605 +00:50:05,090 --> 00:50:10,250 +as we mentioned, minus the hypothesized value, + +606 +00:50:10,370 --> 00:50:13,870 +which is pi 1 minus pi 2, most of the time equals + +607 +00:50:13,870 --> 00:50:17,590 +0, divided by the standard error of the estimate + +608 +00:50:17,590 --> 00:50:21,650 +of this equation. So the standard error is b dash + +609 +00:50:21,650 --> 00:50:24,330 +1 minus b dash. + +610 +00:50:28,470 --> 00:50:32,250 +So let's use this statistic. P1 minus P2 is the + +611 +00:50:32,250 --> 00:50:35,930 +point estimate of Pi 1 minus Pi 2 minus the + +612 +00:50:35,930 --> 00:50:40,130 +hypothesized value always, or most of the time + +613 +00:50:40,130 --> 00:50:43,830 +this equals zero, divided by the square root of + +614 +00:50:43,830 --> 00:50:46,870 +this estimate. And the square root of this + +615 +00:50:46,870 --> 00:50:49,090 +estimate is given by this equation, which is the + +616 +00:50:49,090 --> 00:50:54,040 +square root of P dash. P dash again is The overall + +617 +00:50:54,040 --> 00:50:56,560 +proportion, or the bold proportion, which is X1 + +618 +00:50:56,560 --> 00:50:59,940 +plus X2 divided by N1 plus N2. So we have B dash + +619 +00:50:59,940 --> 00:51:04,920 +times 1 minus B dash times 1 over N1 plus 1 over + +620 +00:51:04,920 --> 00:51:07,380 +N2. That's your Z statistic. + +621 +00:51:10,300 --> 00:51:15,320 +So that's the new term for testing about the + +622 +00:51:15,320 --> 00:51:17,900 +difference between two population proportions. + +623 +00:51:18,800 --> 00:51:23,720 +Now, what's about the non-alternative hypothesis. + +624 +00:51:24,380 --> 00:51:27,180 +This slide actually is the same as the one we had + +625 +00:51:27,180 --> 00:51:28,960 +discussed for the difference between two + +626 +00:51:28,960 --> 00:51:35,040 +populations, except we replace mus by bis. So here + +627 +00:51:35,040 --> 00:51:38,940 +we have mu1 equals mu2 by 2, instead of mu1 equals + +628 +00:51:38,940 --> 00:51:44,140 +mu2. So if we go back a little bit to the + +629 +00:51:44,140 --> 00:51:45,080 +beginning of this chapter, + +630 +00:51:47,800 --> 00:51:52,180 +The same as this slide. Here we have means. We + +631 +00:51:52,180 --> 00:51:56,560 +just replace the means by pi's and we'll get + +632 +00:51:56,560 --> 00:52:03,380 +similar upper, lower, and two-tailed test. So we + +633 +00:52:03,380 --> 00:52:08,540 +have pi 1 equal pi 2 against one of these. Either + +634 +00:52:08,540 --> 00:52:11,340 +pi 1 does not equal pi 2, that's two-tailed test, + +635 +00:52:11,780 --> 00:52:15,340 +or pi 1 greater than for upper tail, or pi 1 + +636 +00:52:15,340 --> 00:52:19,920 +smaller than for lower tail. Or this one could be + +637 +00:52:19,920 --> 00:52:23,320 +written as the difference between the two + +638 +00:52:23,320 --> 00:52:26,240 +population proportion is zero against the + +639 +00:52:26,240 --> 00:52:30,640 +difference is not zero. Here, pi 1 greater than pi + +640 +00:52:30,640 --> 00:52:33,380 +2, it means pi 1 minus pi 2 is positive, greater + +641 +00:52:33,380 --> 00:52:37,240 +than zero, or pi 1 minus pi 2 is negative, smaller + +642 +00:52:37,240 --> 00:52:40,480 +than zero. That's how can we state the null and + +643 +00:52:40,480 --> 00:52:43,970 +alternative hypothesis. Now, for the rejection + +644 +00:52:43,970 --> 00:52:47,430 +regions, the same as we discussed before, if we + +645 +00:52:47,430 --> 00:52:50,470 +are talking about two-tailed test, we reject. If + +646 +00:52:50,470 --> 00:52:53,390 +your Z statistic falls in the rejection region in + +647 +00:52:53,390 --> 00:52:57,150 +the left side, that means this statistic is less + +648 +00:52:57,150 --> 00:53:00,230 +than negative Z alpha over two, or your Z + +649 +00:53:00,230 --> 00:53:03,790 +statistic is above this critical value, greater + +650 +00:53:03,790 --> 00:53:06,610 +than Z alpha over two. So the same as we discussed + +651 +00:53:06,610 --> 00:53:10,930 +before. For lower tail, The rejection region is to + +652 +00:53:10,930 --> 00:53:13,610 +the left side, so we reject if Z statistic is + +653 +00:53:13,610 --> 00:53:16,990 +smaller than negative Z alpha. Similarly here, we + +654 +00:53:16,990 --> 00:53:19,970 +reject if Z statistic greater than alpha for the + +655 +00:53:19,970 --> 00:53:23,770 +other two figures. So again, critical regions. + +656 +00:53:26,130 --> 00:53:29,330 +Null and alternative hypotheses are the same, but + +657 +00:53:29,330 --> 00:53:31,950 +here we have a new Z statistic, and this one + +658 +00:53:31,950 --> 00:53:35,530 +depends actually on the point estimate. The + +659 +00:53:35,530 --> 00:53:38,210 +hypothesized value and the standard error is + +660 +00:53:38,210 --> 00:53:44,070 +similar to the standard form of this form. But + +661 +00:53:44,070 --> 00:53:48,530 +here we replace b1 minus b2 by x. So x is replaced + +662 +00:53:48,530 --> 00:53:52,530 +by this value. And mu is replaced by pi 1 minus pi + +663 +00:53:52,530 --> 00:53:56,270 +2. And this is the standard deviation of the + +664 +00:53:56,270 --> 00:54:01,870 +estimate, which is given by this quantity. So + +665 +00:54:01,870 --> 00:54:06,690 +that's all for today. So I will stop at this + +666 +00:54:06,690 --> 00:54:09,410 +example for testing two populations. + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/vMG3PxD5dOI.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/vMG3PxD5dOI.srt new file mode 100644 index 0000000000000000000000000000000000000000..da6f7090dbb8c062e2b4438652a358302cf8909a --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/vMG3PxD5dOI.srt @@ -0,0 +1,1682 @@ + +1 +00:00:09,090 --> 00:00:10,450 +Oh, Mohamed, you're ready? You're here? + +2 +00:00:17,950 --> 00:00:21,670 +Inshallah, today we'll talk about measures of + +3 +00:00:21,670 --> 00:00:25,290 +variation. And as we mentioned last time, there + +4 +00:00:25,290 --> 00:00:28,450 +are four measures of variation. And we talked + +5 +00:00:28,450 --> 00:00:31,790 +about range, or the range. And we know that the + +6 +00:00:31,790 --> 00:00:35,850 +range is the distance between the largest and the + +7 +00:00:35,850 --> 00:00:39,950 +smallest value. The other measure of variation is + +8 +00:00:39,950 --> 00:00:44,250 +called the variance. By using the variance, we can + +9 +00:00:44,250 --> 00:00:48,710 +also compute another measure called standard + +10 +00:00:48,710 --> 00:00:52,410 +deviation. Finally, we'll talk a little bit about + +11 +00:00:52,410 --> 00:00:56,630 +coefficient of variation to compare two different + +12 +00:00:56,630 --> 00:01:00,430 +datasets when we have different units or different + +13 +00:01:00,430 --> 00:01:05,360 +measurement. Measures of variation give + +14 +00:01:05,360 --> 00:01:08,280 +information about the spread. Spread means + +15 +00:01:08,280 --> 00:01:13,640 +variability or dispersion of the dataset values + +16 +00:01:13,640 --> 00:01:19,620 +that you have. As we mentioned last time, the + +17 +00:01:19,620 --> 00:01:21,800 +range is the simplest measure of variation, and + +18 +00:01:21,800 --> 00:01:25,720 +it's computed by using this simple formula. The + +19 +00:01:25,720 --> 00:01:28,800 +range is the difference between the largest and + +20 +00:01:28,800 --> 00:01:31,860 +the smallest. For a specific example, if we have + +21 +00:01:31,860 --> 00:01:35,720 +this dataset, one, two, three, up to 13, then the + +22 +00:01:35,720 --> 00:01:39,000 +range is going to be 13 minus one, which is 12. So + +23 +00:01:39,000 --> 00:01:42,020 +that's a straightforward example to compute the + +24 +00:01:42,020 --> 00:01:45,370 +range. Now, sometimes the range is misleading. I + +25 +00:01:45,370 --> 00:01:49,630 +mean, the range can give misleading results if the + +26 +00:01:49,630 --> 00:01:54,090 +data set has outliers. For example, imagine if we + +27 +00:01:54,090 --> 00:02:00,450 +have this systematic data, 7, 8, 9, 10, 11, 12. + +28 +00:02:02,350 --> 00:02:06,930 +For this example, the range is the maximum minus + +29 +00:02:06,930 --> 00:02:10,350 +minimum is five, so the distance between the + +30 +00:02:10,350 --> 00:02:13,590 +largest and smallest is five. For the other data, if + +31 +00:02:13,590 --> 00:02:16,850 +you look carefully, we have a gap between seven and + +32 +00:02:16,850 --> 00:02:23,330 +ten. For this reason, this data is more spread than + +33 +00:02:23,330 --> 00:02:27,230 +data number one, but each of them have the same + +34 +00:02:27,230 --> 00:02:31,130 +spread, the same range, because the maximum is + +35 +00:02:31,130 --> 00:02:35,330 +still 12. The smallest is seven, so that will give + +36 +00:02:35,330 --> 00:02:39,190 +five. So both have the same range, but the + +37 +00:02:39,190 --> 00:02:43,330 +variability in data A and data B are completely + +38 +00:02:43,330 --> 00:02:47,070 +different. So we cannot rely on the range as a + +39 +00:02:47,070 --> 00:02:51,530 +measure of spread or deviation. If you look at the + +40 +00:02:51,530 --> 00:02:58,190 +other one now, we have one two three four up to + +41 +00:02:58,190 --> 00:03:02,310 +five. Now the range in this case is four, five minus + +42 +00:03:02,310 --> 00:03:06,030 +one. Now if you replace the largest value which was + +43 +00:03:06,030 --> 00:03:10,540 +five, replace it by 120. So 120 is a big number + +44 +00:03:10,540 --> 00:03:14,740 +comparing to the next largest, which is four. So the + +45 +00:03:14,740 --> 00:03:18,060 +range is going to be 119. Now, if you compare the + +46 +00:03:18,060 --> 00:03:22,840 +range for data number one and the other one, four is + +47 +00:03:22,840 --> 00:03:27,640 +nothing comparing to 119. 119 is a big number. So + +48 +00:03:27,640 --> 00:03:34,260 +both datasets have minimum values here, one out of + +49 +00:03:34,260 --> 00:03:38,200 +five. And here also, we have the same minimum, and + +50 +00:03:38,200 --> 00:03:41,480 +approximately the same data values, but one value + +51 +00:03:41,480 --> 00:03:44,520 +is completely different, and your result is + +52 +00:03:44,520 --> 00:03:48,420 +completely different also. So I mean, one point + +53 +00:03:48,420 --> 00:03:55,040 +can change the range significantly. I mean, in a big + +54 +00:03:55,040 --> 00:03:56,940 +difference between the previous one and the other + +55 +00:03:56,940 --> 00:03:59,340 +one. So that means the range is sensitive to + +56 +00:03:59,340 --> 00:04:04,260 +outliers. So, so far, the mean and the range. + +57 +00:04:04,840 --> 00:04:08,360 +These two measures are sensitive to outliers. I + +58 +00:04:08,360 --> 00:04:13,580 +mean, outliers affect the range as well as the + +59 +00:04:13,580 --> 00:04:17,460 +mean. Now, let's move to the new measure of + +60 +00:04:17,460 --> 00:04:21,420 +variation, which is the most commonly used measure + +61 +00:04:21,420 --> 00:04:24,700 +in statistics as a measure of variation, which is + +62 +00:04:24,700 --> 00:04:27,400 +called the variance. + +63 +00:04:30,030 --> 00:04:33,870 +By the way, all the formulas in this class will be + +64 +00:04:33,870 --> 00:04:37,750 +given in the exam. I mean, you don't have to + +65 +00:04:37,750 --> 00:04:43,590 +memorize any formula for this course. So this is + +66 +00:04:43,590 --> 00:04:47,730 +the first one. We have the variance. Because later + +67 +00:04:47,730 --> 00:04:50,470 +on we will have complicated formulas, so you don't + +68 +00:04:50,470 --> 00:04:54,670 +need to memorize any of these. So both of them, + +69 +00:04:54,750 --> 00:05:00,830 +all of them, was given in a formula sheet. Now, the + +70 +00:05:00,830 --> 00:05:06,910 +variance. If you look at this formula, and let's + +71 +00:05:06,910 --> 00:05:09,210 +see how can we define the variance based on this + +72 +00:05:09,210 --> 00:05:16,100 +notation. The variance is the average, or + +73 +00:05:16,100 --> 00:05:18,780 +approximately the average. Because here we divide + +74 +00:05:18,780 --> 00:05:23,100 +by n minus 1, of n. So it's not exactly the + +75 +00:05:23,100 --> 00:05:27,100 +average, but approximately the average of what? Of + +76 +00:05:27,100 --> 00:05:32,720 +squared deviations, squared deviations of the + +77 +00:05:32,720 --> 00:05:38,720 +values from its mean. So again, see the variance? + +78 +00:05:39,810 --> 00:05:42,930 +It is the average or approximately the average of + +79 +00:05:42,930 --> 00:05:47,010 +square deviations of values, I mean of the + +80 +00:05:47,010 --> 00:05:50,390 +observations you have from the mean. That will + +81 +00:05:50,390 --> 00:05:55,510 +give the sample variance, actually. So S squared, + +82 +00:05:55,930 --> 00:06:00,490 +the formula is summation of Xi minus X bar + +83 +00:06:00,490 --> 00:06:03,130 +squared, and I goes from one up to N, N is the + +84 +00:06:03,130 --> 00:06:06,310 +sample size you have, then divide by N minus one. + +85 +00:06:06,930 --> 00:06:09,850 +Now, what's the reason for dividing by N minus one? + +86 +00:06:09,950 --> 00:06:13,190 +Later, insha'Allah, in chapter around chapter 8, + +87 +00:06:13,550 --> 00:06:16,890 +we'll talk about degrees of freedom. And in that + +88 +00:06:16,890 --> 00:06:20,930 +case, we can explain in detail why we divide by N + +89 +00:06:20,930 --> 00:06:24,130 +minus 1 instead of N. But also next week, we'll + +90 +00:06:24,130 --> 00:06:27,530 +talk about population variance. And in that + +91 +00:06:27,530 --> 00:06:30,550 +formula, you'll see that we divide this sum by + +92 +00:06:30,550 --> 00:06:33,730 +capital N. So you'll see the difference between + +93 +00:06:34,590 --> 00:06:40,630 +sample variance and population variance. Now X bar + +94 +00:06:40,630 --> 00:06:44,610 +is the arithmetic mean or the mean, N is the + +95 +00:06:44,610 --> 00:06:48,130 +sample size, and Xi is the ith value of the data + +96 +00:06:48,130 --> 00:06:53,570 +you have. So this is the formula to compute the + +97 +00:06:53,570 --> 00:06:55,590 +sample variance. + +98 +00:06:58,310 --> 00:07:01,950 +Now, the other measure is called the standard + +99 +00:07:01,950 --> 00:07:05,600 +deviation. Standard deviation is just the square + +100 +00:07:05,600 --> 00:07:12,640 +root of the variance. And both of them measure the + +101 +00:07:12,640 --> 00:07:17,960 +spread of the data around the mean. Now, most of + +102 +00:07:17,960 --> 00:07:21,780 +the time, we use the standard deviation, not the + +103 +00:07:21,780 --> 00:07:25,920 +variance, because the standard deviation has the + +104 +00:07:25,920 --> 00:07:29,140 +same units as the original data. For example, + +105 +00:07:29,600 --> 00:07:31,280 +imagine that we have age. + +106 +00:07:34,630 --> 00:07:43,690 +And age, the unit of age is years. For + +107 +00:07:43,690 --> 00:07:49,130 +example, if we compute the sample variance and + +108 +00:07:49,130 --> 00:07:55,070 +suppose the value was six, so the unit of a square + +109 +00:07:55,070 --> 00:07:59,810 +should be year square because we squared the + +110 +00:07:59,810 --> 00:08:03,050 +deviation. So the unit should be square of the + +111 +00:08:03,050 --> 00:08:07,450 +original unit. But if we take the square root of 6 + +112 +00:08:07,450 --> 00:08:11,310 +for example, and also you have to take the square + +113 +00:08:11,310 --> 00:08:15,410 +root of this unit, I mean you have to take the square + +114 +00:08:15,410 --> 00:08:20,190 +root of your square. That will give you. So here, the + +115 +00:08:20,190 --> 00:08:24,350 +standard deviation has the same unit as the + +116 +00:08:24,350 --> 00:08:27,850 +original unit. For this reason, most of the time, + +117 +00:08:28,010 --> 00:08:31,350 +we are using the standard deviation rather than + +118 +00:08:31,350 --> 00:08:36,930 +the variance. So again, standard deviation is the + +119 +00:08:36,930 --> 00:08:39,910 +most commonly used measure of variation in + +120 +00:08:39,910 --> 00:08:44,930 +statistics. Standard deviation shows variation + +121 +00:08:44,930 --> 00:08:48,650 +about the mean. And also, the standard deviation + +122 +00:08:48,650 --> 00:08:51,230 +is the square root of the variance. And as I + +123 +00:08:51,230 --> 00:08:53,490 +mentioned, it has the same unit as the original + +124 +00:08:53,490 --> 00:08:57,780 +data you have. Now, the question is how can we + +125 +00:08:57,780 --> 00:09:03,140 +compute the sample standard deviation? If you look + +126 +00:09:03,140 --> 00:09:08,320 +at the formula carefully, here we have sum of xi + +127 +00:09:08,320 --> 00:09:12,120 +minus x bar squared, divided by n minus one, we have + +128 +00:09:12,120 --> 00:09:14,880 +x bar, so that means first step you have to compute + +129 +00:09:14,880 --> 00:09:20,540 +the mean. Next step, compute the difference between + +130 +00:09:20,540 --> 00:09:24,980 +each value and the mean. So imagine that we have + +131 +00:09:25,630 --> 00:09:30,450 +data points, X, or random variable X, and we + +132 +00:09:30,450 --> 00:09:35,190 +computed X bar. Next step, we have to compute the + +133 +00:09:35,190 --> 00:09:40,200 +difference, or the distance, between each value and + +134 +00:09:40,200 --> 00:09:44,320 +the mean, the mean, for example, is whatever is that + +135 +00:09:44,320 --> 00:09:47,460 +value. So, x minus x bar. So, first step, you have to + +136 +00:09:47,460 --> 00:09:49,960 +compute the difference between each value and the + +137 +00:09:49,960 --> 00:09:53,180 +mean. Next step, or step number two, square each + +138 +00:09:53,180 --> 00:09:57,800 +difference, I mean take this value and square it. + +139 +00:09:57,800 --> 00:10:02,200 +Now, it makes sense that the variance should be + +140 +00:10:02,200 --> 00:10:07,790 +positive. Because you squared this distance, even + +141 +00:10:07,790 --> 00:10:10,630 +if it's negative, negative squared is positive. + +142 +00:10:11,470 --> 00:10:15,750 +So S squared is always positive. Later, we'll + +143 +00:10:15,750 --> 00:10:21,590 +talk a little bit about this point. After we + +144 +00:10:21,590 --> 00:10:25,930 +square each value, I mean each deviation, just in + +145 +00:10:25,930 --> 00:10:28,330 +step number three, you have to add the square + +146 +00:10:28,330 --> 00:10:32,730 +differences, I mean for this column, add these + +147 +00:10:32,730 --> 00:10:36,950 +values together, so we have the sum of X minus X + +148 +00:10:36,950 --> 00:10:43,930 +bar squared. Finally, to get the sample variance, + +149 +00:10:44,910 --> 00:10:49,950 +you have to divide this total by N minus 1. So + +150 +00:10:49,950 --> 00:10:54,130 +divide this value by n minus one, that will give the + +151 +00:10:54,130 --> 00:10:58,670 +sample variance. To compute the standard + +152 +00:10:58,670 --> 00:11:02,110 +deviation, just take the square root of your + +153 +00:11:02,110 --> 00:11:06,090 +result in step four, I mean for example imagine + +154 +00:11:06,090 --> 00:11:11,470 +that S squared is 36. This is the sample variance, + +155 +00:11:11,590 --> 00:11:15,790 +so S equals six. So, just take the square root of + +156 +00:11:15,790 --> 00:11:16,970 +that value. + +157 +00:11:20,320 --> 00:11:23,180 +Now, let's compute the standard deviation for a + +158 +00:11:23,180 --> 00:11:24,180 +specific dataset. + +159 +00:11:30,620 --> 00:11:34,420 +Suppose we have sample data as 10, 12, 14, up to + +160 +00:11:34,420 --> 00:11:37,800 +24 sample data just for illustration. Sometimes we + +161 +00:11:37,800 --> 00:11:41,040 +have a huge, or large dataset. In that case, you have + +162 +00:11:41,040 --> 00:11:44,480 +to use software, such as software, maybe use Excel, + +163 +00:11:45,260 --> 00:11:48,950 +SPSS, Minitab, or whatever software you have. For + +164 +00:11:48,950 --> 00:11:53,830 +this example, there are eight observations, so n + +165 +00:11:53,830 --> 00:11:56,690 +equals eight. So the sample size is eight. Just + +166 +00:11:56,690 --> 00:11:59,250 +count these values, one, two, three, four, up to + +167 +00:11:59,250 --> 00:12:02,270 +eight, so that the sample size is eight. So n + +168 +00:12:02,270 --> 00:12:07,330 +equals eight. The mean or the average, add these + +169 +00:12:07,330 --> 00:12:09,930 +values, as we mentioned last time, and divide by + +170 +00:12:09,930 --> 00:12:12,470 +the total number of observations. That will give + +171 +00:12:12,470 --> 00:12:16,170 +16, because if we add these values, the sum is + +172 +00:12:16,170 --> 00:12:22,210 +128. 128 divided by 8, that will give 16. So we + +173 +00:12:22,210 --> 00:12:28,510 +computed the average first. Step two, we have + +174 +00:12:28,510 --> 00:12:37,530 +data. For example, 10. Take the deviation for each + +175 +00:12:37,530 --> 00:12:43,070 +data value you have, from its mean. So, I mean, 10 + +176 +00:12:43,070 --> 00:12:49,190 +minus 16, that will give minus six. And do the same + +177 +00:12:49,190 --> 00:12:54,270 +for the rest. I mean, for 12, so 12 minus 16 is + +178 +00:12:54,270 --> 00:12:58,590 +negative four. All the way up to the last value, 24. + +179 +00:12:59,690 --> 00:13:06,990 +So 24 minus 16, gives 8. That's step number two. So, + +180 +00:13:06,990 --> 00:13:11,400 +we've computed the mean, then we found, or we + +181 +00:13:11,400 --> 00:13:15,380 +compute the distance between each value and its + +182 +00:13:15,380 --> 00:13:21,220 +mean. And now, it's squared. So x minus x bar + +183 +00:13:21,220 --> 00:13:28,620 +squared, so minus six squared, you get 36, 16, up to + +184 +00:13:28,620 --> 00:13:32,920 +64, all the way down, up to 64. Now, sum of these + +185 +00:13:32,920 --> 00:13:37,360 +values. Sum all of these values, you will get 130. + +186 +00:13:39,700 --> 00:13:43,360 +So the sum of the square deviation of each value + +187 +00:13:43,360 --> 00:13:47,480 +and the mean equals 130. So, the standard deviation + +188 +00:13:47,480 --> 00:13:51,420 +is the formula we have. Divide this value, I mean + +189 +00:13:51,420 --> 00:13:56,100 +divide 130 by n minus 1, the sample size as we + +190 +00:13:56,100 --> 00:14:00,400 +mentioned was 8. So 8 minus 1 is 7, so 130 + +191 +00:14:00,400 --> 00:14:06,540 +divided by 7, will give 4.3. So that's the way to + +1 + +223 +00:16:48,780 --> 00:16:52,740 +values. So it makes sense that this dataset has + +224 +00:16:52,740 --> 00:16:58,460 +the smallest deviation around the mean. And if you + +225 +00:16:58,460 --> 00:17:01,660 +compute the actual standard deviation, if you look + +226 +00:17:01,660 --> 00:17:07,480 +at that value, it's 0.926, so it's less than 1. If + +227 +00:17:07,480 --> 00:17:12,040 +you look at data number 1, now there is a gap + +228 +00:17:12,040 --> 00:17:17,780 +between this value 13 and 16. Also, there is a gap + +229 +00:17:17,780 --> 00:17:22,440 +between 18 and 21. That's why the standard + +230 +00:17:22,440 --> 00:17:25,040 +deviation becomes around + +231 +00:17:36,470 --> 00:17:41,420 +Now for data C. Maybe this is the worst + +232 +00:17:41,420 --> 00:17:45,020 +distribution compared to the previous two. Because + +233 +00:17:45,020 --> 00:17:48,240 +there is a big gap between or big distance between + +234 +00:17:48,240 --> 00:17:52,820 +12 and 19. For this reason, the standard deviation + +235 +00:17:52,820 --> 00:17:57,300 +is around 4.5. So it has the largest standard + +236 +00:17:57,300 --> 00:18:00,620 +deviation. So by using the standard deviation, we + +237 +00:18:00,620 --> 00:18:05,460 +can tell which one has the smallest or the largest + +238 +00:18:05,460 --> 00:18:09,850 +deviation. So in general, in statistics, we report + +239 +00:18:09,850 --> 00:18:13,370 +both the mean and the variance, or the standard + +240 +00:18:13,370 --> 00:18:16,170 +deviation actually. You have to say that the mean + +241 +00:18:16,170 --> 00:18:21,110 +is 15.5 with standard deviation 3.3. Another + +242 +00:18:21,110 --> 00:18:25,790 +dataset has the same mean but different standard + +243 +00:18:25,790 --> 00:18:28,530 +deviation. So in this case, we can tell which one + +244 +00:18:28,530 --> 00:18:31,030 +has more spread. + +245 +00:18:33,050 --> 00:18:35,850 +Suppose we have curves for + +246 +00:18:42,570 --> 00:18:47,410 +or symmetric distributions. Now, both of these + +247 +00:18:47,410 --> 00:18:50,590 +distributions are symmetric. I mean by symmetric, + +248 +00:18:51,310 --> 00:18:59,430 +the mean splits the data into two halves. the data + +249 +00:18:59,430 --> 00:19:03,690 +into two equally areas data A left to the right is + +250 +00:19:03,690 --> 00:19:05,970 +the same as the area to the left now if you look + +251 +00:19:05,970 --> 00:19:10,730 +at the mean for the center this dashed line it's + +252 +00:19:10,730 --> 00:19:15,830 +the same for each one so that means the means are + +253 +00:19:15,830 --> 00:19:19,690 +equal for data A and data B so the two graphs have + +254 +00:19:19,690 --> 00:19:23,090 +the same mean but if you look carefully at the + +255 +00:19:23,090 --> 00:19:27,790 +spread for the variability for figure number one + +256 +00:19:29,870 --> 00:19:35,430 +The spread here seems to be narrower than the + +257 +00:19:35,430 --> 00:19:41,390 +other one. The other one is wider. So, data A is + +258 +00:19:41,390 --> 00:19:45,430 +less spread than data B, even if they have the + +259 +00:19:45,430 --> 00:19:50,780 +same center or descent. See, you can tell if. the + +260 +00:19:50,780 --> 00:19:55,980 +data is smaller or larger style deviation just + +261 +00:19:55,980 --> 00:19:59,020 +based on the curve you have just look at the + +262 +00:19:59,020 --> 00:20:02,280 +distance between the vertical line I mean the mean + +263 +00:20:02,280 --> 00:20:06,080 +which splits the curve into two areas two equal + +264 +00:20:06,080 --> 00:20:13,080 +areas now the distance here this one actually it + +265 +00:20:13,080 --> 00:20:17,320 +is larger than the other so we can say that Data A + +266 +00:20:17,320 --> 00:20:21,000 +is less spread than data B. So by using the + +267 +00:20:21,000 --> 00:20:23,540 +standard deviation, I mean the actual value, or + +268 +00:20:23,540 --> 00:20:27,020 +just look at the graph. You can tell which one has + +269 +00:20:27,020 --> 00:20:28,020 +more spread. + +270 +00:20:33,740 --> 00:20:38,560 +To summarize some facts about standard deviation, + +271 +00:20:38,720 --> 00:20:42,780 +you can see that the more the data are spread out, + +272 +00:20:44,800 --> 00:20:47,800 +The greater the range, variance, and standard + +273 +00:20:47,800 --> 00:20:50,840 +deviation. So if the data is spread out, it means + +274 +00:20:50,840 --> 00:20:56,040 +you have large range, variance, and standard + +275 +00:20:56,040 --> 00:21:00,680 +deviation. The more the data are concentrated, I + +276 +00:21:00,680 --> 00:21:05,800 +mean gathered around each other, the smaller the + +277 +00:21:05,800 --> 00:21:08,360 +range, variance, and standard deviation. So that's + +278 +00:21:08,360 --> 00:21:11,140 +the difference when the data are concentrated or + +279 +00:21:11,140 --> 00:21:18,230 +spread out. large values and on the other hand we + +280 +00:21:18,230 --> 00:21:25,230 +have small values now there is one case the sample + +281 +00:21:25,230 --> 00:21:30,190 +mean or the variance or the range equals zero if + +282 +00:21:30,190 --> 00:21:34,290 +the data values are all the same and maybe never + +283 +00:21:34,290 --> 00:21:38,910 +happened in the real life in reality you never + +284 +00:21:38,910 --> 00:21:45,170 +maybe ninety nine percent the data set I mean the + +285 +00:21:45,170 --> 00:21:48,370 +values of innocence are not equal. But imagine + +286 +00:21:48,370 --> 00:21:51,870 +that you have a data of the same values. Suppose + +287 +00:21:51,870 --> 00:21:57,190 +we have five students of five children and their + +288 +00:21:57,190 --> 00:22:00,790 +age is five, five, five, five. So the average of + +289 +00:22:00,790 --> 00:22:03,490 +this one is five. Now what's the range? + +290 +00:22:07,030 --> 00:22:10,670 +minimum is the same as maximum so the range is + +291 +00:22:10,670 --> 00:22:15,090 +zero if you compute the variance because x minus x + +292 +00:22:15,090 --> 00:22:18,910 +bar this value minus x bar is zero for the risk + +293 +00:22:18,910 --> 00:22:23,970 +it's also zero so zero so the solution is zero so + +294 +00:22:23,970 --> 00:22:27,050 +this is the only time you see that the these + +295 +00:22:27,050 --> 00:22:30,610 +measures equal zero if all the values have the + +296 +00:22:30,610 --> 00:22:35,410 +same and that's never happened the other one is + +297 +00:22:35,410 --> 00:22:39,690 +none of these measures are ever negative because + +298 +00:22:39,690 --> 00:22:47,630 +the range is max minus min negative squared + +299 +00:22:47,630 --> 00:22:50,810 +becomes positive for this reason the variance and + +300 +00:22:50,810 --> 00:22:54,650 +standard deviation is always greater than zero so + +301 +00:22:54,650 --> 00:23:00,140 +you can see a square is greater than zero Or + +302 +00:23:00,140 --> 00:23:03,720 +equal, and equal just if the values are the same, + +303 +00:23:04,220 --> 00:23:11,300 +or are equal. Any question? So we explained range, + +304 +00:23:12,080 --> 00:23:13,820 +variance, and standard deviation. + +305 +00:23:18,460 --> 00:23:23,580 +Outliers affect the variance and standard + +306 +00:23:23,580 --> 00:23:28,720 +deviation. We said that outliers affect the range + +307 +00:23:28,720 --> 00:23:30,850 +and the mean. Now what's about standard deviation? + +308 +00:23:32,330 --> 00:23:39,630 +Look at this formula again. Now in + +309 +00:23:39,630 --> 00:23:42,170 +order to compute the sample standard deviation, we + +310 +00:23:42,170 --> 00:23:46,130 +have to compute the mean first. And we know that + +311 +00:23:46,130 --> 00:23:49,590 +outliers affect the mean much more than the + +312 +00:23:49,590 --> 00:23:54,950 +median. Since outliers affect X bar, and S is a + +313 +00:23:54,950 --> 00:23:58,890 +function of X bar, that means also outliers affect + +314 +00:23:58,890 --> 00:24:03,050 +the standard deviation. So, so far, outliers + +315 +00:24:03,050 --> 00:24:10,110 +affect mean, range, standard deviation. So in case + +316 +00:24:10,110 --> 00:24:12,810 +of outliers, I mean in the presence of outliers, + +317 +00:24:13,670 --> 00:24:17,430 +you have to avoid using these measures. We have to + +318 +00:24:17,430 --> 00:24:19,450 +use something else, and that's what we'll talk + +319 +00:24:19,450 --> 00:24:23,410 +later about, inshallah. So the mean is affected by + +320 +00:24:23,410 --> 00:24:26,970 +outlier. Last time we said that the median should + +321 +00:24:26,970 --> 00:24:30,430 +be used in this case because the median is less + +322 +00:24:30,430 --> 00:24:34,730 +affected by outliers than the mean. So outliers + +323 +00:24:34,730 --> 00:24:39,210 +affect the mean much more than the median. Now + +324 +00:24:39,210 --> 00:24:41,770 +what's about the range? The range is affected by + +325 +00:24:41,770 --> 00:24:45,690 +outliers. So in case of outliers, you have to use + +326 +00:24:45,690 --> 00:24:49,350 +another measure. That will be next time, + +327 +00:24:49,470 --> 00:24:54,300 +inshallah. Let's move to another measure of + +328 +00:24:54,300 --> 00:24:57,300 +variation which is called coefficient of + +329 +00:24:57,300 --> 00:24:57,700 +variation. + +330 +00:25:00,920 --> 00:25:04,900 +Consider we have two datasets, one for age, + +331 +00:25:09,260 --> 00:25:13,960 +for example, one for age and + +332 +00:25:13,960 --> 00:25:15,200 +other one for weight. + +333 +00:25:18,330 --> 00:25:22,730 +suppose someone computed the mean and the standard + +334 +00:25:22,730 --> 00:25:26,290 +deviation and he found that the standard deviation + +335 +00:25:26,290 --> 00:25:31,630 +is for example is 10 so 10 units because the unit + +336 +00:25:31,630 --> 00:25:36,190 +is units here and the standard deviation for + +337 +00:25:36,190 --> 00:25:42,610 +weight for example is 7 the weight the unit of the + +338 +00:25:42,610 --> 00:25:47,610 +weight is weight is kilogram now can you say that + +339 +00:25:49,240 --> 00:25:53,820 +Age, data for age is more spread than weight + +340 +00:25:53,820 --> 00:25:59,780 +because 10 is greater than 7. You cannot say that + +341 +00:25:59,780 --> 00:26:04,880 +because you have different units. So you cannot + +342 +00:26:04,880 --> 00:26:08,040 +compare the spread of the data just based on the + +343 +00:26:08,040 --> 00:26:10,780 +sample standard deviation. So we need a measure + +344 +00:26:10,780 --> 00:26:17,720 +that compute this variability regardless of the + +345 +00:26:17,720 --> 00:26:22,220 +original units. That measure is called coefficient + +346 +00:26:22,220 --> 00:26:25,640 +of variation. So coefficient of variation is a + +347 +00:26:25,640 --> 00:26:32,540 +measure for relative variation always represents + +348 +00:26:32,540 --> 00:26:37,360 +in percentage shows variation relative to the mean + +349 +00:26:37,360 --> 00:26:43,260 +and can be used to compare the variability of two + +350 +00:26:43,260 --> 00:26:47,760 +or more than two sets of data measured in + +351 +00:26:47,760 --> 00:26:53,680 +different units. The formula for CV or coefficient + +352 +00:26:53,680 --> 00:26:58,770 +of variation S divided by X bar. So we have to + +353 +00:26:58,770 --> 00:27:01,990 +compute both the mean and the standard deviation + +354 +00:27:01,990 --> 00:27:05,090 +in order to compute CV. S divided by X bar + +355 +00:27:05,090 --> 00:27:08,850 +multiplied by 100 gives the coefficient of + +356 +00:27:08,850 --> 00:27:13,730 +variation. Now, for example, suppose we have two + +357 +00:27:13,730 --> 00:27:19,490 +stock markets, stock A and stock B. And let's say + +358 +00:27:19,490 --> 00:27:22,830 +that the average price last year for stock A was + +359 +00:27:22,830 --> 00:27:29,380 +$50. And the standard deviation was five. For + +360 +00:27:29,380 --> 00:27:36,500 +stock B, the average price last year was $100 with + +361 +00:27:36,500 --> 00:27:40,560 +the same standard deviations. It's five. Now let's + +362 +00:27:40,560 --> 00:27:44,660 +compare which one, which price I mean, which stock + +363 +00:27:44,660 --> 00:27:49,060 +price is more spread than the other one. I mean, + +364 +00:27:49,260 --> 00:27:55,320 +which one has big gaps between the values and the + +365 +00:27:55,320 --> 00:27:59,300 +mean. I mean, which values are concentrated around + +366 +00:27:59,300 --> 00:28:02,580 +each other, stack A, price of stack A or stack B. + +367 +00:28:03,720 --> 00:28:07,520 +Just for simple calculation, CV for stack A is + +368 +00:28:07,520 --> 00:28:11,140 +just S over X bar multiplied by 100. That will + +369 +00:28:11,140 --> 00:28:17,360 +gives 5 divided by 50 times 100 gives 10%. So the + +370 +00:28:17,360 --> 00:28:19,260 +coefficient of variation in this case is 10. + +371 +00:28:21,670 --> 00:28:27,630 +What's about the other one? S against 5. The + +372 +00:28:27,630 --> 00:28:31,950 +average of stock B was last year was 100 + +373 +00:28:31,950 --> 00:28:40,520 +multiplied by 100 gives 5%. So CV for A. is + +374 +00:28:40,520 --> 00:28:45,820 +greater than CV for B that means stack A I mean + +375 +00:28:45,820 --> 00:28:52,460 +prices of stack A are more spread than stack B + +376 +00:28:52,460 --> 00:28:58,510 +that means stack B is more stable So the stability + +377 +00:28:58,510 --> 00:29:04,030 +in stock B is more than that in stock A, because + +378 +00:29:04,030 --> 00:29:08,210 +this one is less variable, less variability in the + +379 +00:29:08,210 --> 00:29:12,030 +data. So in this case, we can compute, I mean, we + +380 +00:29:12,030 --> 00:29:18,530 +can compare different data sets. That's all for + +381 +00:29:18,530 --> 00:29:21,470 +measures of variation. So we talked about range, + +382 +00:29:23,810 --> 00:29:28,230 +variance, standard deviation, And finally + +383 +00:29:28,230 --> 00:29:33,470 +coefficient of variation. Next topic talks about z + +384 +00:29:33,470 --> 00:29:36,950 +-score. Most of the time we are talking about + +385 +00:29:36,950 --> 00:29:41,550 +extreme values or extreme outliers. Now let's see + +386 +00:29:41,550 --> 00:29:45,370 +how can we tell if this point is considered to be + +387 +00:29:45,370 --> 00:29:51,110 +an outlier. We have some data values and you want + +388 +00:29:51,110 --> 00:29:55,150 +to use the mean as a measure of center or the + +389 +00:29:55,150 --> 00:29:58,670 +median, which one you have to use. If the data set + +390 +00:29:58,670 --> 00:30:01,670 +has outliers, as we mentioned, then use the + +391 +00:30:01,670 --> 00:30:04,370 +median. Now the question is how can you determine + +392 +00:30:04,370 --> 00:30:08,780 +if the data set has outliers? There are several + +393 +00:30:08,780 --> 00:30:12,680 +methods. Today we'll talk just about z-score, + +394 +00:30:12,800 --> 00:30:17,680 +later we'll talk about something else. And z-score + +395 +00:30:17,680 --> 00:30:20,580 +sometimes called standardized z-score. + +396 +00:30:22,380 --> 00:30:27,400 +Standardized value + +397 +00:30:27,400 --> 00:30:31,440 +or standardized score. So standardized stands for + +398 +00:30:31,440 --> 00:30:35,060 +z. More details about z-score will be in chapter 6 + +399 +00:30:35,060 --> 00:30:38,360 +when we talk about normal distribution, but at + +400 +00:30:38,360 --> 00:30:43,540 +least here we'll just use z-score to determine or + +401 +00:30:43,540 --> 00:30:46,840 +to know if this point is considered to be or + +402 +00:30:46,840 --> 00:30:50,640 +suspected to be extreme value or extreme outlier + +403 +00:30:50,640 --> 00:30:54,480 +only. To compute the z-score by data value, + +404 +00:30:54,820 --> 00:30:57,560 +subtract the mean and divide by the standard + +405 +00:30:57,560 --> 00:31:04,220 +deviation. That means suppose we have data set x + +406 +00:31:04,220 --> 00:31:07,540 +or data value for each one we can complete this + +407 +00:31:07,540 --> 00:31:15,800 +code we have data value x subtract x mean then + +408 +00:31:15,800 --> 00:31:20,440 +divide by standardization Now again to compute + +409 +00:31:20,440 --> 00:31:22,760 +this score you have to compute the mean and + +410 +00:31:22,760 --> 00:31:26,640 +standard deviation the same as CV. So this score + +411 +00:31:26,640 --> 00:31:31,700 +is the distance x minus x bar + +445 +00:34:18,970 --> 00:34:21,490 +meaning of one. So if z score is one, that means + +446 +00:34:21,490 --> 00:34:24,330 +my score is above the mean by one standard + +447 +00:34:24,330 --> 00:34:33,370 +deviation. Now suppose your friend, she has 60 in + +448 +00:34:33,370 --> 00:34:37,990 +the same class, the same course, for sure the same + +449 +00:34:37,990 --> 00:34:41,770 +mean and the same standard deviation. We are + +450 +00:34:41,770 --> 00:34:46,370 +talking about the same class. So here the score is + +451 +00:34:46,370 --> 00:34:51,930 +going to be 60 minus 70 divided by 5, so that's + +452 +00:34:51,930 --> 00:34:57,290 +minus 10. Now the difference between her score + +453 +00:34:57,290 --> 00:35:03,690 +and the average is 10, and 10 is twice the + +454 +00:35:03,690 --> 00:35:09,530 +standard deviation, below or above. below the mean + +455 +00:35:09,530 --> 00:35:13,030 +is negative, here score is 60, the average was 70, so + +456 +00:35:13,030 --> 00:35:16,090 +her score is below the mean so you can say that + +457 +00:35:16,090 --> 00:35:22,410 +her score is two standard deviation below the + +458 +00:35:22,410 --> 00:35:26,830 +mean. So you can tell if the score is above or + +459 +00:35:26,830 --> 00:35:32,830 +below the mean. Next, now we are looking to locate + +460 +00:35:32,830 --> 00:35:36,970 +extreme values. I mean, if you want to examine if + +461 +00:35:36,970 --> 00:35:41,090 +the data is an outlier or not. Now the rule of thumb + +462 +00:35:41,090 --> 00:35:46,690 +is, rule in general, a data value is considered an + +463 +00:35:46,690 --> 00:35:51,810 +extreme outlier if its z-score is less than + +464 +00:35:51,810 --> 00:35:56,910 +negative three or greater than three. So suppose + +465 +00:35:56,910 --> 00:35:58,970 +you compute a z-score for a data point, + +466 +00:36:01,650 --> 00:36:05,830 +and this score, for example, for this one was one. + +467 +00:36:07,190 --> 00:36:08,710 +So the question is, + +468 +00:36:11,050 --> 00:36:19,550 +does 75 outlier or not? The rule is that if the + +469 +00:36:19,550 --> 00:36:22,390 +data value is greater than three, I mean this + +470 +00:36:22,390 --> 00:36:29,160 +area, or smaller than negative three. In this + +471 +00:36:29,160 --> 00:36:32,900 +case, the point is suspected to be extreme + +472 +00:36:32,900 --> 00:36:36,700 +outlier, otherwise it's okay. Now my z-score is + +473 +00:36:36,700 --> 00:36:39,640 +one, so one lies between minus three and plus + +474 +00:36:39,640 --> 00:36:44,080 +three, so the point is not an outlier. So it's so + +475 +00:36:44,080 --> 00:36:46,760 +easy to determine if the point is outlier or not, + +476 +00:36:46,820 --> 00:36:51,540 +just look at the z-score. If it is above three, it's + +477 +00:36:51,540 --> 00:36:55,020 +an outlier. Less than minus three is an outlier. + +478 +00:36:55,140 --> 00:36:58,580 +Otherwise, the data is okay. The larger the + +479 +00:36:58,580 --> 00:37:02,020 +absolute value of the z-score, the farther the + +480 +00:37:02,020 --> 00:37:08,320 +data value is from the mean. That means suppose + +481 +00:37:08,320 --> 00:37:15,040 +you have a z-score of 3.5 and another one is 2.5. + +482 +00:37:16,340 --> 00:37:21,500 +Which one is the data value that's farther from the mean? + +483 +00:37:24,070 --> 00:37:31,190 +3.5, so this one is a data value that is further from the + +484 +00:37:31,190 --> 00:37:34,830 +mean. So again, this score diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/vMG3PxD5dOI_postprocess.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/vMG3PxD5dOI_postprocess.srt new file mode 100644 index 0000000000000000000000000000000000000000..b7a92acdde049fdb0a9427165bf88dee9b1bed13 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/vMG3PxD5dOI_postprocess.srt @@ -0,0 +1,1936 @@ +1 +00:00:09,090 --> 00:00:10,450 +Oh, Mohamed, you're ready? You're here? + +2 +00:00:17,950 --> 00:00:21,670 +Inshallah, today we'll talk about measures of + +3 +00:00:21,670 --> 00:00:25,290 +variation. And as we mentioned last time, there + +4 +00:00:25,290 --> 00:00:28,450 +are four measures of variation. And we talked + +5 +00:00:28,450 --> 00:00:31,790 +about range, or the range. And we know that the + +6 +00:00:31,790 --> 00:00:35,850 +range is the distance between the largest and the + +7 +00:00:35,850 --> 00:00:39,950 +smallest value. The other measure of variation is + +8 +00:00:39,950 --> 00:00:44,250 +called the variance. By using the variance, we can + +9 +00:00:44,250 --> 00:00:48,710 +also compute another measure is called standard + +10 +00:00:48,710 --> 00:00:52,410 +deviation. Finally, we'll talk a little bit about + +11 +00:00:52,410 --> 00:00:56,630 +coefficient of variation to compare two different + +12 +00:00:56,630 --> 00:01:00,430 +datasets when we have different units or different + +13 +00:01:00,430 --> 00:01:05,360 +measurement. Measures of variation give + +14 +00:01:05,360 --> 00:01:08,280 +information of the spread. Spread means + +15 +00:01:08,280 --> 00:01:13,640 +variability or dispersion of the dataset values + +16 +00:01:13,640 --> 00:01:19,620 +that you have. As we mentioned last time, the + +17 +00:01:19,620 --> 00:01:21,800 +range is the simplest measure of variation, and + +18 +00:01:21,800 --> 00:01:25,720 +it's computed by using this simple formula. The + +19 +00:01:25,720 --> 00:01:28,800 +range is the difference between the largest and + +20 +00:01:28,800 --> 00:01:31,860 +the smallest. For a specific example, if we have + +21 +00:01:31,860 --> 00:01:35,720 +this dataset, one, two, three, up to 13, then the + +22 +00:01:35,720 --> 00:01:39,000 +range is going to be 13 minus one, which is 12. So + +23 +00:01:39,000 --> 00:01:42,020 +that's straightforward example to compute the + +24 +00:01:42,020 --> 00:01:45,370 +range. Now, sometimes the range is misleading. I + +25 +00:01:45,370 --> 00:01:49,630 +mean, the range can give misleading results if the + +26 +00:01:49,630 --> 00:01:54,090 +data set has outliers. For example, imagine if we + +27 +00:01:54,090 --> 00:02:00,450 +have this systematic data, 7, 8, 9, 10, 11, 12. + +28 +00:02:02,350 --> 00:02:06,930 +for this example the range is the maximum minus + +29 +00:02:06,930 --> 00:02:10,350 +minimum is five so the distance between the + +30 +00:02:10,350 --> 00:02:13,590 +largest and smallest is five for the other data if + +31 +00:02:13,590 --> 00:02:16,850 +you look carefully we have a gap between seven and + +32 +00:02:16,850 --> 00:02:23,330 +ten for this reason This data is more spread than + +33 +00:02:23,330 --> 00:02:27,230 +data number one, but each of them have the same + +34 +00:02:27,230 --> 00:02:31,130 +spread, the same range, because the maximum is + +35 +00:02:31,130 --> 00:02:35,330 +still 12. The smallest is seven, so that will give + +36 +00:02:35,330 --> 00:02:39,190 +five. So both have the same range, but the + +37 +00:02:39,190 --> 00:02:43,330 +variability in data A and data B are completely + +38 +00:02:43,330 --> 00:02:47,070 +different. So we cannot rely on the range as a + +39 +00:02:47,070 --> 00:02:51,530 +measure of spread or deviation. if you look at the + +40 +00:02:51,530 --> 00:02:58,190 +other one now we have one two three four up to + +41 +00:02:58,190 --> 00:03:02,310 +five now the range in this case is four five minus + +42 +00:03:02,310 --> 00:03:06,030 +one now if you replace the largest value which was + +43 +00:03:06,030 --> 00:03:10,540 +five replace it by 120 So 120 is a big number + +44 +00:03:10,540 --> 00:03:14,740 +comparing to the next largest, which is 4. So the + +45 +00:03:14,740 --> 00:03:18,060 +range is going to be 119. Now, if you compare the + +46 +00:03:18,060 --> 00:03:22,840 +range for data number one and the other one, 4 is + +47 +00:03:22,840 --> 00:03:27,640 +nothing comparing to 119. 119 is a big number. So + +48 +00:03:27,640 --> 00:03:34,260 +both data sets have minimum values here, 1 out of + +49 +00:03:34,260 --> 00:03:38,200 +5. And here also, we have the same minimum. And + +50 +00:03:38,200 --> 00:03:41,480 +approximately the same data values, but one value + +51 +00:03:41,480 --> 00:03:44,520 +is completely different, and your result is + +52 +00:03:44,520 --> 00:03:48,420 +completely different also. So I mean, one point + +53 +00:03:48,420 --> 00:03:55,040 +can change the range significantly. I mean, in big + +54 +00:03:55,040 --> 00:03:56,940 +difference between the previous one and the other + +55 +00:03:56,940 --> 00:03:59,340 +one. So that means the range is sensitive to + +56 +00:03:59,340 --> 00:04:04,260 +outliers. So, so far, the mean And the range. + +57 +00:04:04,840 --> 00:04:08,360 +These two measures are sensitive to outliers. I + +58 +00:04:08,360 --> 00:04:13,580 +mean, outliers affect the range as well as the + +59 +00:04:13,580 --> 00:04:17,460 +mean. Now let's move to the new measure of + +60 +00:04:17,460 --> 00:04:21,420 +variation, which is the most commonly used measure + +61 +00:04:21,420 --> 00:04:24,700 +in statistics as a measure of variation, which is + +62 +00:04:24,700 --> 00:04:27,400 +called the variance. + +63 +00:04:30,030 --> 00:04:33,870 +By the way, all the formulas in this class will be + +64 +00:04:33,870 --> 00:04:37,750 +given in the exam. I mean, you don't have to + +65 +00:04:37,750 --> 00:04:43,590 +memorize any formula for this course. So this is + +66 +00:04:43,590 --> 00:04:47,730 +the first one. We have the variance. Because later + +67 +00:04:47,730 --> 00:04:50,470 +on we will have complicated formulas, so you don't + +68 +00:04:50,470 --> 00:04:54,670 +need to memorize any of these. So both of them, + +69 +00:04:54,750 --> 00:05:00,830 +all of them was given in a formula sheet. Now, the + +70 +00:05:00,830 --> 00:05:06,910 +variance, if you look at this formula, and let's + +71 +00:05:06,910 --> 00:05:09,210 +see how can we define the variance based on this + +72 +00:05:09,210 --> 00:05:16,100 +notation. The variance is the average, or + +73 +00:05:16,100 --> 00:05:18,780 +approximately the average. Because here we divide + +74 +00:05:18,780 --> 00:05:23,100 +by n minus 1 of n. So it's not exactly the + +75 +00:05:23,100 --> 00:05:27,100 +average, but approximately the average of what? Of + +76 +00:05:27,100 --> 00:05:32,720 +squared deviations, squared deviations of the + +77 +00:05:32,720 --> 00:05:38,720 +values from its mean. So again, see the variance? + +78 +00:05:39,810 --> 00:05:42,930 +is the average or approximately the average of + +79 +00:05:42,930 --> 00:05:47,010 +square deviations of values, I mean of the + +80 +00:05:47,010 --> 00:05:50,390 +observations you have from the mean. That will + +81 +00:05:50,390 --> 00:05:55,510 +give the sample variance actually. So S squared, + +82 +00:05:55,930 --> 00:06:00,490 +the formula is summation of Xi minus X bar + +83 +00:06:00,490 --> 00:06:03,130 +squared, and I goes from one up to N, N is the + +84 +00:06:03,130 --> 00:06:06,310 +sample size you have, then divide by N minus one. + +85 +00:06:06,930 --> 00:06:09,850 +Now, what's the reason by dividing N minus 1? + +86 +00:06:09,950 --> 00:06:13,190 +Later, inshallah, in chapter around chapter 8, + +87 +00:06:13,550 --> 00:06:16,890 +we'll talk about degrees of freedom. And in that + +88 +00:06:16,890 --> 00:06:20,930 +case, we can explain in details why we divide by N + +89 +00:06:20,930 --> 00:06:24,130 +minus 1 instead of N. But also next week, we'll + +90 +00:06:24,130 --> 00:06:27,530 +talk about population variance. And in that + +91 +00:06:27,530 --> 00:06:30,550 +formula, you'll see that we divide this sum by + +92 +00:06:30,550 --> 00:06:33,730 +capital N. So you'll see the difference between + +93 +00:06:34,590 --> 00:06:40,630 +sample variance and population variance. Now X bar + +94 +00:06:40,630 --> 00:06:44,610 +is the arithmetic mean or the mean, N is the + +95 +00:06:44,610 --> 00:06:48,130 +sample size, and Xi is the ith value of the data + +96 +00:06:48,130 --> 00:06:53,570 +you have. So this is the formula to compute the + +97 +00:06:53,570 --> 00:06:55,590 +sample variance. + +98 +00:06:58,310 --> 00:07:01,950 +Now, the other measure is called the standard + +99 +00:07:01,950 --> 00:07:05,600 +deviation. Standard deviation is just the square + +100 +00:07:05,600 --> 00:07:12,640 +root of the variance. And both of them measure the + +101 +00:07:12,640 --> 00:07:17,960 +spread of the data around the mean. Now, most of + +102 +00:07:17,960 --> 00:07:21,780 +the time, we use the standard deviation, not the + +103 +00:07:21,780 --> 00:07:25,920 +variance, because the standard deviation has the + +104 +00:07:25,920 --> 00:07:29,140 +same units as the original data. For example, + +105 +00:07:29,600 --> 00:07:31,280 +imagine that we have age. + +106 +00:07:34,630 --> 00:07:43,690 +and age the unit of age is years for + +107 +00:07:43,690 --> 00:07:49,130 +example if we compute the sample variance and + +108 +00:07:49,130 --> 00:07:55,070 +suppose the value was six so the unit of a square + +109 +00:07:55,070 --> 00:07:59,810 +should be year square because we squared the + +110 +00:07:59,810 --> 00:08:03,050 +deviation so the unit should be square of the + +111 +00:08:03,050 --> 00:08:07,450 +original unit but if we take the square root of 6 + +112 +00:08:07,450 --> 00:08:11,310 +for example and also you have to take the square + +113 +00:08:11,310 --> 00:08:15,410 +root of this unit I mean you have to take square + +114 +00:08:15,410 --> 00:08:20,190 +root of your square that will give you so here the + +115 +00:08:20,190 --> 00:08:24,350 +standard deviation has the same unit as the + +116 +00:08:24,350 --> 00:08:27,850 +original unit For this reason, most of the time, + +117 +00:08:28,010 --> 00:08:31,350 +we are using the standard deviation rather than + +118 +00:08:31,350 --> 00:08:36,930 +the variance. So again, standard deviation is the + +119 +00:08:36,930 --> 00:08:39,910 +most commonly used measure of variation in + +120 +00:08:39,910 --> 00:08:44,930 +statistics. Standard deviation shows variation + +121 +00:08:44,930 --> 00:08:48,650 +about the mean. And also, the standard deviation + +122 +00:08:48,650 --> 00:08:51,230 +is the square root of the variance. And as I + +123 +00:08:51,230 --> 00:08:53,490 +mentioned, it has the same unit as the original + +124 +00:08:53,490 --> 00:08:57,780 +data you have. now the question is how can we + +125 +00:08:57,780 --> 00:09:03,140 +compute the sample standard deviation if you look + +126 +00:09:03,140 --> 00:09:08,320 +at the formula carefully here we have sum of x i + +127 +00:09:08,320 --> 00:09:12,120 +minus x bar squared divided by n minus one we have + +128 +00:09:12,120 --> 00:09:14,880 +x bar so that means first step you have to compute + +129 +00:09:14,880 --> 00:09:20,540 +the mean next step compute the difference between + +130 +00:09:20,540 --> 00:09:24,980 +each value and the mean so imagine that we have + +131 +00:09:25,630 --> 00:09:30,450 +data points X or random variable X. And we + +132 +00:09:30,450 --> 00:09:35,190 +computed X bar. Next step, we have to compute the + +133 +00:09:35,190 --> 00:09:40,200 +difference or the distance between each value and + +134 +00:09:40,200 --> 00:09:44,320 +the mean the mean for example is whatever is that + +135 +00:09:44,320 --> 00:09:47,460 +value so x minus x bar so first step you have to + +136 +00:09:47,460 --> 00:09:49,960 +compute the difference between each value and the + +137 +00:09:49,960 --> 00:09:53,180 +mean next step or step number two square each + +138 +00:09:53,180 --> 00:09:57,800 +difference I mean take this value and square it + +139 +00:09:57,800 --> 00:10:02,200 +now it makes sense that the variance should be + +140 +00:10:02,200 --> 00:10:07,790 +positive Because you squared this distance. Even + +141 +00:10:07,790 --> 00:10:10,630 +if it's negative, negative squared is positive. + +142 +00:10:11,470 --> 00:10:15,750 +So, S squared is always positive. Later, we'll + +143 +00:10:15,750 --> 00:10:21,590 +talk a little bit about this point. After we + +144 +00:10:21,590 --> 00:10:25,930 +square each value, I mean each deviation, just in + +145 +00:10:25,930 --> 00:10:28,330 +step number three, you have to add the square + +146 +00:10:28,330 --> 00:10:32,730 +differences. I mean, for this column, add these + +147 +00:10:32,730 --> 00:10:36,950 +values together, so we have the sum of X minus X + +148 +00:10:36,950 --> 00:10:43,930 +bar squared. Finally, to get the sample variance, + +149 +00:10:44,910 --> 00:10:49,950 +you have to divide this total by N minus 1. So + +150 +00:10:49,950 --> 00:10:54,130 +divide this value by n minus 1, that will give the + +151 +00:10:54,130 --> 00:10:58,670 +sample variance. To compute the standard + +152 +00:10:58,670 --> 00:11:02,110 +deviation, just take the square root of your + +153 +00:11:02,110 --> 00:11:06,090 +result in step 4. I mean, for example, imagine + +154 +00:11:06,090 --> 00:11:11,470 +that S squared is 36. This is the sample variance, + +155 +00:11:11,590 --> 00:11:15,790 +so S equals 6. So just take the square root of + +156 +00:11:15,790 --> 00:11:16,970 +that value. + +157 +00:11:20,320 --> 00:11:23,180 +Now, let's compute the standard deviation for a + +158 +00:11:23,180 --> 00:11:24,180 +specific dataset. + +159 +00:11:30,620 --> 00:11:34,420 +Suppose we have sample data as 10, 12, 14, up to + +160 +00:11:34,420 --> 00:11:37,800 +24 sample data just for illustration. Sometimes we + +161 +00:11:37,800 --> 00:11:41,040 +have huge or large dataset. In that case, you have + +162 +00:11:41,040 --> 00:11:44,480 +to use software such as software, maybe use Excel, + +163 +00:11:45,260 --> 00:11:48,950 +SVSS, Minitab or whatever software you have. For + +164 +00:11:48,950 --> 00:11:53,830 +this example, there are eight observations. So n + +165 +00:11:53,830 --> 00:11:56,690 +equals eight. So the sample size is eight. Just + +166 +00:11:56,690 --> 00:11:59,250 +count these values, one, two, three, four, up to + +167 +00:11:59,250 --> 00:12:02,270 +eight, so that the sample size is eight. So n + +168 +00:12:02,270 --> 00:12:07,330 +equals eight. The mean or the average add these + +169 +00:12:07,330 --> 00:12:09,930 +values, as we mentioned last time, and divide by + +170 +00:12:09,930 --> 00:12:12,470 +the total number of observations. That will give + +171 +00:12:12,470 --> 00:12:16,170 +16. Because if we add these values, the sum is + +172 +00:12:16,170 --> 00:12:22,210 +128. 128 divided by 8, that will give 16. So we + +173 +00:12:22,210 --> 00:12:28,510 +computed the average first. Step two, we have + +174 +00:12:28,510 --> 00:12:37,530 +data. For example, 10. Take the deviation for each + +175 +00:12:37,530 --> 00:12:43,070 +data value you have from its mean. So I mean 10 + +176 +00:12:43,070 --> 00:12:49,190 +minus 16 that will give minus 6. And do the same + +177 +00:12:49,190 --> 00:12:54,270 +for the rest. I mean for 12. So 12 minus 16 is + +178 +00:12:54,270 --> 00:12:58,590 +negative for all the way up to the last value 24. + +179 +00:12:59,690 --> 00:13:06,990 +So 24 minus 16 gives 8. That's step number 2. So + +180 +00:13:06,990 --> 00:13:11,400 +we've computed the mean, then we found Or we + +181 +00:13:11,400 --> 00:13:15,380 +compute the distance between each value and its + +182 +00:13:15,380 --> 00:13:21,220 +mean. And now it's squared. So x minus x bar + +183 +00:13:21,220 --> 00:13:28,620 +squared. So minus 6 squared, you get 6, 16, up to + +184 +00:13:28,620 --> 00:13:32,920 +64, all the way down, up to 64. Now sum of these + +185 +00:13:32,920 --> 00:13:37,360 +values, sum all of these values, you will get 130. + +186 +00:13:39,700 --> 00:13:43,360 +So the sum of the square deviation of each value + +187 +00:13:43,360 --> 00:13:47,480 +and the mean equals 130. So the standard deviation + +188 +00:13:47,480 --> 00:13:51,420 +is the formula we have. Divide this value. I mean, + +189 +00:13:51,420 --> 00:13:56,100 +divide 130 by n minus 1. The sample size, as we + +190 +00:13:56,100 --> 00:14:00,400 +mentioned, was 8. So 8 minus 1 is 7. So 130 + +191 +00:14:00,400 --> 00:14:06,540 +divided by 7 will give 4.3. So that's the way to + +192 +00:14:06,540 --> 00:14:10,930 +compute the sample a standard deviation. + +193 +00:14:15,190 --> 00:14:20,770 +So again, the first step here, we have to find the + +194 +00:14:20,770 --> 00:14:23,990 +deviation from each value and its mean, then + +195 +00:14:23,990 --> 00:14:28,310 +square each one, each value, add these values + +196 +00:14:28,310 --> 00:14:32,070 +together. We add these values, the answer is 130. + +197 +00:14:33,600 --> 00:14:36,240 +The standard deviation is just square root of one + +198 +00:14:36,240 --> 00:14:41,360 +third divided by N minus one. So that will get us + +199 +00:14:41,360 --> 00:14:41,500 +in. + +200 +00:14:46,760 --> 00:14:47,760 +Any question? + +201 +00:14:51,700 --> 00:14:54,780 +Let's move to the next one. + +202 +00:15:05,160 --> 00:15:11,280 +Now this sheet compares the standard deviations + +203 +00:15:11,280 --> 00:15:16,300 +for three different data sets. Look at data number + +204 +00:15:16,300 --> 00:15:23,340 +one, data B, data C. All of them have the same + +205 +00:15:23,340 --> 00:15:29,480 +mean. The mean is 15.5. So each data has mean of + +206 +00:15:29,480 --> 00:15:30,440 +15.5. + +207 +00:15:33,710 --> 00:15:37,770 +The distribution is completely different from data + +208 +00:15:37,770 --> 00:15:43,650 +A to B to C. I mean, the mean by itself cannot be + +209 +00:15:43,650 --> 00:15:47,850 +used as a measure to describe the distribution of + +210 +00:15:47,850 --> 00:15:51,310 +the data, because each one has the same mean, but + +211 +00:15:51,310 --> 00:15:57,770 +actually have different spread or variability. So + +212 +00:15:57,770 --> 00:16:01,100 +for this reason, we have to compute. the variance + +213 +00:16:01,100 --> 00:16:05,600 +in order to see which one has the highest spread + +214 +00:16:05,600 --> 00:16:08,240 +or highest variability around the mean. If you + +215 +00:16:08,240 --> 00:16:14,940 +look at data B, all the points are gathered around + +216 +00:16:14,940 --> 00:16:19,040 +each other. I mean, there is no big difference + +217 +00:16:19,040 --> 00:16:23,620 +between or big distance between these points. All + +218 +00:16:23,620 --> 00:16:26,520 +of them ranges between 14 and 17. + +219 +00:16:30,780 --> 00:16:34,620 +All the datasets we have here, three datasets, all + +220 +00:16:34,620 --> 00:16:36,960 +of them have the same average or the same mean of + +221 +00:16:36,960 --> 00:16:42,440 +15.5. Data B, for example, the data points ranges + +222 +00:16:42,440 --> 00:16:48,780 +from 14 up to 17. So there is no gap between these + +223 +00:16:48,780 --> 00:16:52,740 +values. So it makes sense that this dataset has + +224 +00:16:52,740 --> 00:16:58,460 +the smallest deviation around the mean. And if you + +225 +00:16:58,460 --> 00:17:01,660 +compute the actual standard deviation, if you look + +226 +00:17:01,660 --> 00:17:07,480 +at that value, it's 0.926, so it's less than 1. If + +227 +00:17:07,480 --> 00:17:12,040 +you look at data number 1, now there is a gap + +228 +00:17:12,040 --> 00:17:17,780 +between this value 13 and 16. Also, there is a gap + +229 +00:17:17,780 --> 00:17:22,440 +between 18 and 21. That's why the standard + +230 +00:17:22,440 --> 00:17:25,040 +deviation becomes around + +231 +00:17:36,470 --> 00:17:41,420 +Now for data C. Maybe this is the worst + +232 +00:17:41,420 --> 00:17:45,020 +distribution compared to the previous two. Because + +233 +00:17:45,020 --> 00:17:48,240 +there is a big gap between or big distance between + +234 +00:17:48,240 --> 00:17:52,820 +12 and 19. For this reason, the standard deviation + +235 +00:17:52,820 --> 00:17:57,300 +is around 4.5. So it has the largest standard + +236 +00:17:57,300 --> 00:18:00,620 +deviation. So by using the standard deviation, we + +237 +00:18:00,620 --> 00:18:05,460 +can tell which one has the smallest or the largest + +238 +00:18:05,460 --> 00:18:09,850 +deviation. So in general, in statistics, we report + +239 +00:18:09,850 --> 00:18:13,370 +both the mean and the variance, or the standard + +240 +00:18:13,370 --> 00:18:16,170 +deviation actually. You have to say that the mean + +241 +00:18:16,170 --> 00:18:21,110 +is 15.5 with standard deviation 3.3. Another + +242 +00:18:21,110 --> 00:18:25,790 +dataset has the same mean but different standard + +243 +00:18:25,790 --> 00:18:28,530 +deviation. So in this case, we can tell which one + +244 +00:18:28,530 --> 00:18:31,030 +has more spread. + +245 +00:18:33,050 --> 00:18:35,850 +Suppose we have curves for + +246 +00:18:42,570 --> 00:18:47,410 +or symmetric distributions. Now, both of these + +247 +00:18:47,410 --> 00:18:50,590 +distributions are symmetric. I mean by symmetric, + +248 +00:18:51,310 --> 00:18:59,430 +the mean splits the data into two halves. the data + +249 +00:18:59,430 --> 00:19:03,690 +into two equally areas data A left to the right is + +250 +00:19:03,690 --> 00:19:05,970 +the same as the area to the left now if you look + +251 +00:19:05,970 --> 00:19:10,730 +at the mean for the center this dashed line it's + +252 +00:19:10,730 --> 00:19:15,830 +the same for each one so that means the means are + +253 +00:19:15,830 --> 00:19:19,690 +equal for data A and data B so the two graphs have + +254 +00:19:19,690 --> 00:19:23,090 +the same mean but if you look carefully at the + +255 +00:19:23,090 --> 00:19:27,790 +spread for the variability for figure number one + +256 +00:19:29,870 --> 00:19:35,430 +The spread here seems to be narrower than the + +257 +00:19:35,430 --> 00:19:41,390 +other one. The other one is wider. So, data A is + +258 +00:19:41,390 --> 00:19:45,430 +less spread than data B, even if they have the + +259 +00:19:45,430 --> 00:19:50,780 +same center or descent. See, you can tell if. the + +260 +00:19:50,780 --> 00:19:55,980 +data is smaller or larger style deviation just + +261 +00:19:55,980 --> 00:19:59,020 +based on the curve you have just look at the + +262 +00:19:59,020 --> 00:20:02,280 +distance between the vertical line I mean the mean + +263 +00:20:02,280 --> 00:20:06,080 +which splits the curve into two areas two equal + +264 +00:20:06,080 --> 00:20:13,080 +areas now the distance here this one actually it + +265 +00:20:13,080 --> 00:20:17,320 +is larger than the other so we can say that Data A + +266 +00:20:17,320 --> 00:20:21,000 +is less spread than data B. So by using the + +267 +00:20:21,000 --> 00:20:23,540 +standard deviation, I mean the actual value, or + +268 +00:20:23,540 --> 00:20:27,020 +just look at the graph. You can tell which one has + +269 +00:20:27,020 --> 00:20:28,020 +more spread. + +270 +00:20:33,740 --> 00:20:38,560 +To summarize some facts about standard deviation, + +271 +00:20:38,720 --> 00:20:42,780 +you can see that the more the data are spread out, + +272 +00:20:44,800 --> 00:20:47,800 +The greater the range, variance, and standard + +273 +00:20:47,800 --> 00:20:50,840 +deviation. So if the data is spread out, it means + +274 +00:20:50,840 --> 00:20:56,040 +you have large range, variance, and standard + +275 +00:20:56,040 --> 00:21:00,680 +deviation. The more the data are concentrated, I + +276 +00:21:00,680 --> 00:21:05,800 +mean gathered around each other, the smaller the + +277 +00:21:05,800 --> 00:21:08,360 +range, variance, and standard deviation. So that's + +278 +00:21:08,360 --> 00:21:11,140 +the difference when the data are concentrated or + +279 +00:21:11,140 --> 00:21:18,230 +spread out. large values and on the other hand we + +280 +00:21:18,230 --> 00:21:25,230 +have small values now there is one case the sample + +281 +00:21:25,230 --> 00:21:30,190 +mean or the variance or the range equals zero if + +282 +00:21:30,190 --> 00:21:34,290 +the data values are all the same and maybe never + +283 +00:21:34,290 --> 00:21:38,910 +happened in the real life in reality you never + +284 +00:21:38,910 --> 00:21:45,170 +maybe ninety nine percent the data set I mean the + +285 +00:21:45,170 --> 00:21:48,370 +values of innocence are not equal. But imagine + +286 +00:21:48,370 --> 00:21:51,870 +that you have a data of the same values. Suppose + +287 +00:21:51,870 --> 00:21:57,190 +we have five students of five children and their + +288 +00:21:57,190 --> 00:22:00,790 +age is five, five, five, five. So the average of + +289 +00:22:00,790 --> 00:22:03,490 +this one is five. Now what's the range? + +290 +00:22:07,030 --> 00:22:10,670 +minimum is the same as maximum so the range is + +291 +00:22:10,670 --> 00:22:15,090 +zero if you compute the variance because x minus x + +292 +00:22:15,090 --> 00:22:18,910 +bar this value minus x bar is zero for the risk + +293 +00:22:18,910 --> 00:22:23,970 +it's also zero so zero so the solution is zero so + +294 +00:22:23,970 --> 00:22:27,050 +this is the only time you see that the these + +295 +00:22:27,050 --> 00:22:30,610 +measures equal zero if all the values have the + +296 +00:22:30,610 --> 00:22:35,410 +same and that's never happened the other one is + +297 +00:22:35,410 --> 00:22:39,690 +none of these measures are ever negative because + +298 +00:22:39,690 --> 00:22:47,630 +the range is max minus min negative squared + +299 +00:22:47,630 --> 00:22:50,810 +becomes positive for this reason the variance and + +300 +00:22:50,810 --> 00:22:54,650 +standard deviation is always greater than zero so + +301 +00:22:54,650 --> 00:23:00,140 +you can see a square is greater than zero Or + +302 +00:23:00,140 --> 00:23:03,720 +equal, and equal just if the values are the same, + +303 +00:23:04,220 --> 00:23:11,300 +or are equal. Any question? So we explained range, + +304 +00:23:12,080 --> 00:23:13,820 +variance, and standard deviation. + +305 +00:23:18,460 --> 00:23:23,580 +Outliers affect the variance and standard + +306 +00:23:23,580 --> 00:23:28,720 +deviation. We said that outliers affect the range + +307 +00:23:28,720 --> 00:23:30,850 +and the mean. Now what's about standard deviation? + +308 +00:23:32,330 --> 00:23:39,630 +Look at this formula again. Now in + +309 +00:23:39,630 --> 00:23:42,170 +order to compute the sample standard deviation, we + +310 +00:23:42,170 --> 00:23:46,130 +have to compute the mean first. And we know that + +311 +00:23:46,130 --> 00:23:49,590 +outliers affect the mean much more than the + +312 +00:23:49,590 --> 00:23:54,950 +median. Since outliers affect X bar, and S is a + +313 +00:23:54,950 --> 00:23:58,890 +function of X bar, that means also outliers affect + +314 +00:23:58,890 --> 00:24:03,050 +the standard deviation. So, so far, outliers + +315 +00:24:03,050 --> 00:24:10,110 +affect mean, range, standard deviation. So in case + +316 +00:24:10,110 --> 00:24:12,810 +of outliers, I mean in the presence of outliers, + +317 +00:24:13,670 --> 00:24:17,430 +you have to avoid using these measures. We have to + +318 +00:24:17,430 --> 00:24:19,450 +use something else, and that's what we'll talk + +319 +00:24:19,450 --> 00:24:23,410 +later about, inshallah. So the mean is affected by + +320 +00:24:23,410 --> 00:24:26,970 +outlier. Last time we said that the median should + +321 +00:24:26,970 --> 00:24:30,430 +be used in this case because the median is less + +322 +00:24:30,430 --> 00:24:34,730 +affected by outliers than the mean. So outliers + +323 +00:24:34,730 --> 00:24:39,210 +affect the mean much more than the median. Now + +324 +00:24:39,210 --> 00:24:41,770 +what's about the range? The range is affected by + +325 +00:24:41,770 --> 00:24:45,690 +outliers. So in case of outliers, you have to use + +326 +00:24:45,690 --> 00:24:49,350 +another measure. That will be next time, + +327 +00:24:49,470 --> 00:24:54,300 +inshallah. Let's move to another measure of + +328 +00:24:54,300 --> 00:24:57,300 +variation which is called coefficient of + +329 +00:24:57,300 --> 00:24:57,700 +variation. + +330 +00:25:00,920 --> 00:25:04,900 +Consider we have two datasets, one for age, + +331 +00:25:09,260 --> 00:25:13,960 +for example, one for age and + +332 +00:25:13,960 --> 00:25:15,200 +other one for weight. + +333 +00:25:18,330 --> 00:25:22,730 +suppose someone computed the mean and the standard + +334 +00:25:22,730 --> 00:25:26,290 +deviation and he found that the standard deviation + +335 +00:25:26,290 --> 00:25:31,630 +is for example is 10 so 10 units because the unit + +336 +00:25:31,630 --> 00:25:36,190 +is units here and the standard deviation for + +337 +00:25:36,190 --> 00:25:42,610 +weight for example is 7 the weight the unit of the + +338 +00:25:42,610 --> 00:25:47,610 +weight is weight is kilogram now can you say that + +339 +00:25:49,240 --> 00:25:53,820 +Age, data for age is more spread than weight + +340 +00:25:53,820 --> 00:25:59,780 +because 10 is greater than 7. You cannot say that + +341 +00:25:59,780 --> 00:26:04,880 +because you have different units. So you cannot + +342 +00:26:04,880 --> 00:26:08,040 +compare the spread of the data just based on the + +343 +00:26:08,040 --> 00:26:10,780 +sample standard deviation. So we need a measure + +344 +00:26:10,780 --> 00:26:17,720 +that compute this variability regardless of the + +345 +00:26:17,720 --> 00:26:22,220 +original units. That measure is called coefficient + +346 +00:26:22,220 --> 00:26:25,640 +of variation. So coefficient of variation is a + +347 +00:26:25,640 --> 00:26:32,540 +measure for relative variation always represents + +348 +00:26:32,540 --> 00:26:37,360 +in percentage shows variation relative to the mean + +349 +00:26:37,360 --> 00:26:43,260 +and can be used to compare the variability of two + +350 +00:26:43,260 --> 00:26:47,760 +or more than two sets of data measured in + +351 +00:26:47,760 --> 00:26:53,680 +different units. The formula for CV or coefficient + +352 +00:26:53,680 --> 00:26:58,770 +of variation S divided by X bar. So we have to + +353 +00:26:58,770 --> 00:27:01,990 +compute both the mean and the standard deviation + +354 +00:27:01,990 --> 00:27:05,090 +in order to compute CV. S divided by X bar + +355 +00:27:05,090 --> 00:27:08,850 +multiplied by 100 gives the coefficient of + +356 +00:27:08,850 --> 00:27:13,730 +variation. Now, for example, suppose we have two + +357 +00:27:13,730 --> 00:27:19,490 +stock markets, stock A and stock B. And let's say + +358 +00:27:19,490 --> 00:27:22,830 +that the average price last year for stock A was + +359 +00:27:22,830 --> 00:27:29,380 +$50. And the standard deviation was five. For + +360 +00:27:29,380 --> 00:27:36,500 +stock B, the average price last year was $100 with + +361 +00:27:36,500 --> 00:27:40,560 +the same standard deviations. It's five. Now let's + +362 +00:27:40,560 --> 00:27:44,660 +compare which one, which price I mean, which stock + +363 +00:27:44,660 --> 00:27:49,060 +price is more spread than the other one. I mean, + +364 +00:27:49,260 --> 00:27:55,320 +which one has big gaps between the values and the + +365 +00:27:55,320 --> 00:27:59,300 +mean. I mean, which values are concentrated around + +366 +00:27:59,300 --> 00:28:02,580 +each other, stack A, price of stack A or stack B. + +367 +00:28:03,720 --> 00:28:07,520 +Just for simple calculation, CV for stack A is + +368 +00:28:07,520 --> 00:28:11,140 +just S over X bar multiplied by 100. That will + +369 +00:28:11,140 --> 00:28:17,360 +gives 5 divided by 50 times 100 gives 10%. So the + +370 +00:28:17,360 --> 00:28:19,260 +coefficient of variation in this case is 10. + +371 +00:28:21,670 --> 00:28:27,630 +What's about the other one? S against 5. The + +372 +00:28:27,630 --> 00:28:31,950 +average of stock B was last year was 100 + +373 +00:28:31,950 --> 00:28:40,520 +multiplied by 100 gives 5%. So CV for A. is + +374 +00:28:40,520 --> 00:28:45,820 +greater than CV for B that means stack A I mean + +375 +00:28:45,820 --> 00:28:52,460 +prices of stack A are more spread than stack B + +376 +00:28:52,460 --> 00:28:58,510 +that means stack B is more stable So the stability + +377 +00:28:58,510 --> 00:29:04,030 +in stock B is more than that in stock A, because + +378 +00:29:04,030 --> 00:29:08,210 +this one is less variable, less variability in the + +379 +00:29:08,210 --> 00:29:12,030 +data. So in this case, we can compute, I mean, we + +380 +00:29:12,030 --> 00:29:18,530 +can compare different data sets. That's all for + +381 +00:29:18,530 --> 00:29:21,470 +measures of variation. So we talked about range, + +382 +00:29:23,810 --> 00:29:28,230 +variance, standard deviation, And finally + +383 +00:29:28,230 --> 00:29:33,470 +coefficient of variation. Next topic talks about z + +384 +00:29:33,470 --> 00:29:36,950 +-score. Most of the time we are talking about + +385 +00:29:36,950 --> 00:29:41,550 +extreme values or extreme outliers. Now let's see + +386 +00:29:41,550 --> 00:29:45,370 +how can we tell if this point is considered to be + +387 +00:29:45,370 --> 00:29:51,110 +an outlier. We have some data values and you want + +388 +00:29:51,110 --> 00:29:55,150 +to use the mean as a measure of center or the + +389 +00:29:55,150 --> 00:29:58,670 +median, which one you have to use. If the data set + +390 +00:29:58,670 --> 00:30:01,670 +has outliers, as we mentioned, then use the + +391 +00:30:01,670 --> 00:30:04,370 +median. Now the question is how can you determine + +392 +00:30:04,370 --> 00:30:08,780 +if the data set has outliers? There are several + +393 +00:30:08,780 --> 00:30:12,680 +methods. Today we'll talk just about z-score, + +394 +00:30:12,800 --> 00:30:17,680 +later we'll talk about something else. And z-score + +395 +00:30:17,680 --> 00:30:20,580 +sometimes called standardized z-score. + +396 +00:30:22,380 --> 00:30:27,400 +Standardized value + +397 +00:30:27,400 --> 00:30:31,440 +or standardized score. So standardized stands for + +398 +00:30:31,440 --> 00:30:35,060 +z. More details about z-score will be in chapter 6 + +399 +00:30:35,060 --> 00:30:38,360 +when we talk about normal distribution, but at + +400 +00:30:38,360 --> 00:30:43,540 +least here we'll just use z-score to determine or + +401 +00:30:43,540 --> 00:30:46,840 +to know if this point is considered to be or + +402 +00:30:46,840 --> 00:30:50,640 +suspected to be extreme value or extreme outlier + +403 +00:30:50,640 --> 00:30:54,480 +only. To compute the z-score by data value, + +404 +00:30:54,820 --> 00:30:57,560 +subtract the mean and divide by the standard + +405 +00:30:57,560 --> 00:31:04,220 +deviation. That means suppose we have data set x + +406 +00:31:04,220 --> 00:31:07,540 +or data value for each one we can complete this + +407 +00:31:07,540 --> 00:31:15,800 +code we have data value x subtract x mean then + +408 +00:31:15,800 --> 00:31:20,440 +divide by standardization Now again to compute + +409 +00:31:20,440 --> 00:31:22,760 +this score you have to compute the mean and + +410 +00:31:22,760 --> 00:31:26,640 +standard deviation the same as CV. So this score + +411 +00:31:26,640 --> 00:31:31,700 +is the distance x minus x bar then divided by + +412 +00:31:31,700 --> 00:31:37,000 +standard deviation. Now do you think what are the + +413 +00:31:37,000 --> 00:31:41,720 +signs of this score? This score can be positive, + +414 +00:31:42,660 --> 00:31:47,930 +can be negative, or maybe equals zero because x + +415 +00:31:47,930 --> 00:31:54,230 +minus x bar if they are the same so if so z score + +416 +00:31:54,230 --> 00:31:58,350 +equals zero that's only if x equals x bar + +417 +00:31:58,350 --> 00:32:06,510 +otherwise z score may be positive or negative it's + +418 +00:32:06,510 --> 00:32:10,890 +positive when now the sign can be determined by + +419 +00:32:10,890 --> 00:32:16,340 +using this system If X minus X bar is negative, + +420 +00:32:17,080 --> 00:32:20,260 +then this score is negative. If X minus X bar is + +421 +00:32:20,260 --> 00:32:22,720 +positive, then this score is positive. So when + +422 +00:32:22,720 --> 00:32:26,580 +this score is positive, if the value of X is + +423 +00:32:26,580 --> 00:32:31,620 +greater than X bar. Otherwise, this score is + +424 +00:32:31,620 --> 00:32:35,620 +negative if the value of X is smaller than X bar. + +425 +00:32:36,620 --> 00:32:39,660 +So, z-score can be positive, can be negative, can + +426 +00:32:39,660 --> 00:32:43,540 +be equal to zero, it depends on the value of x, I + +427 +00:32:43,540 --> 00:32:46,640 +mean the value of the point and the average or the + +428 +00:32:46,640 --> 00:32:51,880 +mean. Now, what's the meaning of z-score? The z + +429 +00:32:51,880 --> 00:32:56,220 +-score is the number of standard deviations a data + +430 +00:32:56,220 --> 00:33:02,520 +value is from the mean. For example, imagine your + +431 +00:33:02,520 --> 00:33:09,140 +score for accounting A was + +432 +00:33:09,140 --> 00:33:19,380 +75 that's your score in course A the average of + +433 +00:33:19,380 --> 00:33:24,640 +the entire class was 70 with standard deviation 5 + +434 +00:33:26,490 --> 00:33:32,050 +So your score was 75, the average is 70, and + +435 +00:33:32,050 --> 00:33:36,070 +standard deviation is 5. Now look at this score. + +436 +00:33:38,330 --> 00:33:44,930 +It's just 75 minus 70 divided by 5 gives 1. 5 over + +437 +00:33:44,930 --> 00:33:49,410 +5 is 1. Now what's the mean of 1? This point says + +438 +00:33:49,410 --> 00:33:52,050 +that this score is the number of standard + +439 +00:33:52,050 --> 00:33:56,960 +deviations a data value is from the mean. Now, if + +440 +00:33:56,960 --> 00:34:00,900 +your score is 75 and the average was 70, that + +441 +00:34:00,900 --> 00:34:06,700 +means your score is above the mean by 5 units or 5 + +442 +00:34:06,700 --> 00:34:10,600 +points. Now, 5 points is what is the standard + +443 +00:34:10,600 --> 00:34:15,300 +deviation. So that means your score is above the + +444 +00:34:15,300 --> 00:34:18,970 +mean by one standard deviation. So that's the + +445 +00:34:18,970 --> 00:34:21,490 +meaning of one. So if z score is one, that means + +446 +00:34:21,490 --> 00:34:24,330 +my score is above the mean by one standard + +447 +00:34:24,330 --> 00:34:33,370 +deviation. Now suppose your friend, she has 60 in + +448 +00:34:33,370 --> 00:34:37,990 +the same class, the same course, for sure the same + +449 +00:34:37,990 --> 00:34:41,770 +mean and the same standard deviation. We are + +450 +00:34:41,770 --> 00:34:46,370 +talking about the same class. So here the score is + +451 +00:34:46,370 --> 00:34:51,930 +going to be 60 minus 70 divided by 5, so that's + +452 +00:34:51,930 --> 00:34:57,290 +minus 10. Now the difference between here's score + +453 +00:34:57,290 --> 00:35:03,690 +and the average is 10, and 10 is twice the + +454 +00:35:03,690 --> 00:35:09,530 +standard deviation, below or above. below the sign + +455 +00:35:09,530 --> 00:35:13,030 +is negative here score is 60 the average was 70 so + +456 +00:35:13,030 --> 00:35:16,090 +here score is below the mean so you can say that + +457 +00:35:16,090 --> 00:35:22,410 +here score is two standard deviation below the + +458 +00:35:22,410 --> 00:35:26,830 +mean so you can tell if the score is above or + +459 +00:35:26,830 --> 00:35:32,830 +below the mean Next, now we are looking to locate + +460 +00:35:32,830 --> 00:35:36,970 +extreme values. I mean, if you want to examine if + +461 +00:35:36,970 --> 00:35:41,090 +the data is outlier or not. Now the rule of thumb + +462 +00:35:41,090 --> 00:35:46,690 +is, rule in general, a data value is considered an + +463 +00:35:46,690 --> 00:35:51,810 +extreme outlier if its z-score is less than + +464 +00:35:51,810 --> 00:35:56,910 +negative three or greater than three. So suppose + +465 +00:35:56,910 --> 00:35:58,970 +you compute a z-score for a data point, + +466 +00:36:01,650 --> 00:36:05,830 +And this score, for example, for this one was one. + +467 +00:36:07,190 --> 00:36:08,710 +So the question is, + +468 +00:36:11,050 --> 00:36:19,550 +does 75 outlier or not? The rule is that if the + +469 +00:36:19,550 --> 00:36:22,390 +data value is greater than three, I mean this + +470 +00:36:22,390 --> 00:36:29,160 +area, Or smaller than negative three. In this + +471 +00:36:29,160 --> 00:36:32,900 +case, the point is suspected to be extreme + +472 +00:36:32,900 --> 00:36:36,700 +outlier, otherwise it's okay. Now my z-score is + +473 +00:36:36,700 --> 00:36:39,640 +one, so one lies between minus three and plus + +474 +00:36:39,640 --> 00:36:44,080 +three, so the point is not an outlier. So it's so + +475 +00:36:44,080 --> 00:36:46,760 +easy to determine if the point is outlier or not, + +476 +00:36:46,820 --> 00:36:51,540 +just look at z-score. If it is above three, it's + +477 +00:36:51,540 --> 00:36:55,020 +outlier. less than minus three is out there. + +478 +00:36:55,140 --> 00:36:58,580 +Otherwise, the data is okay. The larger the + +479 +00:36:58,580 --> 00:37:02,020 +absolute value of the z-score, the farther the + +480 +00:37:02,020 --> 00:37:08,320 +data value is from the mean. That means suppose + +481 +00:37:08,320 --> 00:37:15,040 +you have z-score of 3.5 and other one is 2.5. + +482 +00:37:16,340 --> 00:37:21,500 +Which one the data value is farther from the mean? + +483 +00:37:24,070 --> 00:37:31,190 +3.5, so this one is data value is further from the + +484 +00:37:31,190 --> 00:37:34,830 +mean. So again, this score + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/vMG3PxD5dOI_raw.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/vMG3PxD5dOI_raw.srt new file mode 100644 index 0000000000000000000000000000000000000000..b7a92acdde049fdb0a9427165bf88dee9b1bed13 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/vMG3PxD5dOI_raw.srt @@ -0,0 +1,1936 @@ +1 +00:00:09,090 --> 00:00:10,450 +Oh, Mohamed, you're ready? You're here? + +2 +00:00:17,950 --> 00:00:21,670 +Inshallah, today we'll talk about measures of + +3 +00:00:21,670 --> 00:00:25,290 +variation. And as we mentioned last time, there + +4 +00:00:25,290 --> 00:00:28,450 +are four measures of variation. And we talked + +5 +00:00:28,450 --> 00:00:31,790 +about range, or the range. And we know that the + +6 +00:00:31,790 --> 00:00:35,850 +range is the distance between the largest and the + +7 +00:00:35,850 --> 00:00:39,950 +smallest value. The other measure of variation is + +8 +00:00:39,950 --> 00:00:44,250 +called the variance. By using the variance, we can + +9 +00:00:44,250 --> 00:00:48,710 +also compute another measure is called standard + +10 +00:00:48,710 --> 00:00:52,410 +deviation. Finally, we'll talk a little bit about + +11 +00:00:52,410 --> 00:00:56,630 +coefficient of variation to compare two different + +12 +00:00:56,630 --> 00:01:00,430 +datasets when we have different units or different + +13 +00:01:00,430 --> 00:01:05,360 +measurement. Measures of variation give + +14 +00:01:05,360 --> 00:01:08,280 +information of the spread. Spread means + +15 +00:01:08,280 --> 00:01:13,640 +variability or dispersion of the dataset values + +16 +00:01:13,640 --> 00:01:19,620 +that you have. As we mentioned last time, the + +17 +00:01:19,620 --> 00:01:21,800 +range is the simplest measure of variation, and + +18 +00:01:21,800 --> 00:01:25,720 +it's computed by using this simple formula. The + +19 +00:01:25,720 --> 00:01:28,800 +range is the difference between the largest and + +20 +00:01:28,800 --> 00:01:31,860 +the smallest. For a specific example, if we have + +21 +00:01:31,860 --> 00:01:35,720 +this dataset, one, two, three, up to 13, then the + +22 +00:01:35,720 --> 00:01:39,000 +range is going to be 13 minus one, which is 12. So + +23 +00:01:39,000 --> 00:01:42,020 +that's straightforward example to compute the + +24 +00:01:42,020 --> 00:01:45,370 +range. Now, sometimes the range is misleading. I + +25 +00:01:45,370 --> 00:01:49,630 +mean, the range can give misleading results if the + +26 +00:01:49,630 --> 00:01:54,090 +data set has outliers. For example, imagine if we + +27 +00:01:54,090 --> 00:02:00,450 +have this systematic data, 7, 8, 9, 10, 11, 12. + +28 +00:02:02,350 --> 00:02:06,930 +for this example the range is the maximum minus + +29 +00:02:06,930 --> 00:02:10,350 +minimum is five so the distance between the + +30 +00:02:10,350 --> 00:02:13,590 +largest and smallest is five for the other data if + +31 +00:02:13,590 --> 00:02:16,850 +you look carefully we have a gap between seven and + +32 +00:02:16,850 --> 00:02:23,330 +ten for this reason This data is more spread than + +33 +00:02:23,330 --> 00:02:27,230 +data number one, but each of them have the same + +34 +00:02:27,230 --> 00:02:31,130 +spread, the same range, because the maximum is + +35 +00:02:31,130 --> 00:02:35,330 +still 12. The smallest is seven, so that will give + +36 +00:02:35,330 --> 00:02:39,190 +five. So both have the same range, but the + +37 +00:02:39,190 --> 00:02:43,330 +variability in data A and data B are completely + +38 +00:02:43,330 --> 00:02:47,070 +different. So we cannot rely on the range as a + +39 +00:02:47,070 --> 00:02:51,530 +measure of spread or deviation. if you look at the + +40 +00:02:51,530 --> 00:02:58,190 +other one now we have one two three four up to + +41 +00:02:58,190 --> 00:03:02,310 +five now the range in this case is four five minus + +42 +00:03:02,310 --> 00:03:06,030 +one now if you replace the largest value which was + +43 +00:03:06,030 --> 00:03:10,540 +five replace it by 120 So 120 is a big number + +44 +00:03:10,540 --> 00:03:14,740 +comparing to the next largest, which is 4. So the + +45 +00:03:14,740 --> 00:03:18,060 +range is going to be 119. Now, if you compare the + +46 +00:03:18,060 --> 00:03:22,840 +range for data number one and the other one, 4 is + +47 +00:03:22,840 --> 00:03:27,640 +nothing comparing to 119. 119 is a big number. So + +48 +00:03:27,640 --> 00:03:34,260 +both data sets have minimum values here, 1 out of + +49 +00:03:34,260 --> 00:03:38,200 +5. And here also, we have the same minimum. And + +50 +00:03:38,200 --> 00:03:41,480 +approximately the same data values, but one value + +51 +00:03:41,480 --> 00:03:44,520 +is completely different, and your result is + +52 +00:03:44,520 --> 00:03:48,420 +completely different also. So I mean, one point + +53 +00:03:48,420 --> 00:03:55,040 +can change the range significantly. I mean, in big + +54 +00:03:55,040 --> 00:03:56,940 +difference between the previous one and the other + +55 +00:03:56,940 --> 00:03:59,340 +one. So that means the range is sensitive to + +56 +00:03:59,340 --> 00:04:04,260 +outliers. So, so far, the mean And the range. + +57 +00:04:04,840 --> 00:04:08,360 +These two measures are sensitive to outliers. I + +58 +00:04:08,360 --> 00:04:13,580 +mean, outliers affect the range as well as the + +59 +00:04:13,580 --> 00:04:17,460 +mean. Now let's move to the new measure of + +60 +00:04:17,460 --> 00:04:21,420 +variation, which is the most commonly used measure + +61 +00:04:21,420 --> 00:04:24,700 +in statistics as a measure of variation, which is + +62 +00:04:24,700 --> 00:04:27,400 +called the variance. + +63 +00:04:30,030 --> 00:04:33,870 +By the way, all the formulas in this class will be + +64 +00:04:33,870 --> 00:04:37,750 +given in the exam. I mean, you don't have to + +65 +00:04:37,750 --> 00:04:43,590 +memorize any formula for this course. So this is + +66 +00:04:43,590 --> 00:04:47,730 +the first one. We have the variance. Because later + +67 +00:04:47,730 --> 00:04:50,470 +on we will have complicated formulas, so you don't + +68 +00:04:50,470 --> 00:04:54,670 +need to memorize any of these. So both of them, + +69 +00:04:54,750 --> 00:05:00,830 +all of them was given in a formula sheet. Now, the + +70 +00:05:00,830 --> 00:05:06,910 +variance, if you look at this formula, and let's + +71 +00:05:06,910 --> 00:05:09,210 +see how can we define the variance based on this + +72 +00:05:09,210 --> 00:05:16,100 +notation. The variance is the average, or + +73 +00:05:16,100 --> 00:05:18,780 +approximately the average. Because here we divide + +74 +00:05:18,780 --> 00:05:23,100 +by n minus 1 of n. So it's not exactly the + +75 +00:05:23,100 --> 00:05:27,100 +average, but approximately the average of what? Of + +76 +00:05:27,100 --> 00:05:32,720 +squared deviations, squared deviations of the + +77 +00:05:32,720 --> 00:05:38,720 +values from its mean. So again, see the variance? + +78 +00:05:39,810 --> 00:05:42,930 +is the average or approximately the average of + +79 +00:05:42,930 --> 00:05:47,010 +square deviations of values, I mean of the + +80 +00:05:47,010 --> 00:05:50,390 +observations you have from the mean. That will + +81 +00:05:50,390 --> 00:05:55,510 +give the sample variance actually. So S squared, + +82 +00:05:55,930 --> 00:06:00,490 +the formula is summation of Xi minus X bar + +83 +00:06:00,490 --> 00:06:03,130 +squared, and I goes from one up to N, N is the + +84 +00:06:03,130 --> 00:06:06,310 +sample size you have, then divide by N minus one. + +85 +00:06:06,930 --> 00:06:09,850 +Now, what's the reason by dividing N minus 1? + +86 +00:06:09,950 --> 00:06:13,190 +Later, inshallah, in chapter around chapter 8, + +87 +00:06:13,550 --> 00:06:16,890 +we'll talk about degrees of freedom. And in that + +88 +00:06:16,890 --> 00:06:20,930 +case, we can explain in details why we divide by N + +89 +00:06:20,930 --> 00:06:24,130 +minus 1 instead of N. But also next week, we'll + +90 +00:06:24,130 --> 00:06:27,530 +talk about population variance. And in that + +91 +00:06:27,530 --> 00:06:30,550 +formula, you'll see that we divide this sum by + +92 +00:06:30,550 --> 00:06:33,730 +capital N. So you'll see the difference between + +93 +00:06:34,590 --> 00:06:40,630 +sample variance and population variance. Now X bar + +94 +00:06:40,630 --> 00:06:44,610 +is the arithmetic mean or the mean, N is the + +95 +00:06:44,610 --> 00:06:48,130 +sample size, and Xi is the ith value of the data + +96 +00:06:48,130 --> 00:06:53,570 +you have. So this is the formula to compute the + +97 +00:06:53,570 --> 00:06:55,590 +sample variance. + +98 +00:06:58,310 --> 00:07:01,950 +Now, the other measure is called the standard + +99 +00:07:01,950 --> 00:07:05,600 +deviation. Standard deviation is just the square + +100 +00:07:05,600 --> 00:07:12,640 +root of the variance. And both of them measure the + +101 +00:07:12,640 --> 00:07:17,960 +spread of the data around the mean. Now, most of + +102 +00:07:17,960 --> 00:07:21,780 +the time, we use the standard deviation, not the + +103 +00:07:21,780 --> 00:07:25,920 +variance, because the standard deviation has the + +104 +00:07:25,920 --> 00:07:29,140 +same units as the original data. For example, + +105 +00:07:29,600 --> 00:07:31,280 +imagine that we have age. + +106 +00:07:34,630 --> 00:07:43,690 +and age the unit of age is years for + +107 +00:07:43,690 --> 00:07:49,130 +example if we compute the sample variance and + +108 +00:07:49,130 --> 00:07:55,070 +suppose the value was six so the unit of a square + +109 +00:07:55,070 --> 00:07:59,810 +should be year square because we squared the + +110 +00:07:59,810 --> 00:08:03,050 +deviation so the unit should be square of the + +111 +00:08:03,050 --> 00:08:07,450 +original unit but if we take the square root of 6 + +112 +00:08:07,450 --> 00:08:11,310 +for example and also you have to take the square + +113 +00:08:11,310 --> 00:08:15,410 +root of this unit I mean you have to take square + +114 +00:08:15,410 --> 00:08:20,190 +root of your square that will give you so here the + +115 +00:08:20,190 --> 00:08:24,350 +standard deviation has the same unit as the + +116 +00:08:24,350 --> 00:08:27,850 +original unit For this reason, most of the time, + +117 +00:08:28,010 --> 00:08:31,350 +we are using the standard deviation rather than + +118 +00:08:31,350 --> 00:08:36,930 +the variance. So again, standard deviation is the + +119 +00:08:36,930 --> 00:08:39,910 +most commonly used measure of variation in + +120 +00:08:39,910 --> 00:08:44,930 +statistics. Standard deviation shows variation + +121 +00:08:44,930 --> 00:08:48,650 +about the mean. And also, the standard deviation + +122 +00:08:48,650 --> 00:08:51,230 +is the square root of the variance. And as I + +123 +00:08:51,230 --> 00:08:53,490 +mentioned, it has the same unit as the original + +124 +00:08:53,490 --> 00:08:57,780 +data you have. now the question is how can we + +125 +00:08:57,780 --> 00:09:03,140 +compute the sample standard deviation if you look + +126 +00:09:03,140 --> 00:09:08,320 +at the formula carefully here we have sum of x i + +127 +00:09:08,320 --> 00:09:12,120 +minus x bar squared divided by n minus one we have + +128 +00:09:12,120 --> 00:09:14,880 +x bar so that means first step you have to compute + +129 +00:09:14,880 --> 00:09:20,540 +the mean next step compute the difference between + +130 +00:09:20,540 --> 00:09:24,980 +each value and the mean so imagine that we have + +131 +00:09:25,630 --> 00:09:30,450 +data points X or random variable X. And we + +132 +00:09:30,450 --> 00:09:35,190 +computed X bar. Next step, we have to compute the + +133 +00:09:35,190 --> 00:09:40,200 +difference or the distance between each value and + +134 +00:09:40,200 --> 00:09:44,320 +the mean the mean for example is whatever is that + +135 +00:09:44,320 --> 00:09:47,460 +value so x minus x bar so first step you have to + +136 +00:09:47,460 --> 00:09:49,960 +compute the difference between each value and the + +137 +00:09:49,960 --> 00:09:53,180 +mean next step or step number two square each + +138 +00:09:53,180 --> 00:09:57,800 +difference I mean take this value and square it + +139 +00:09:57,800 --> 00:10:02,200 +now it makes sense that the variance should be + +140 +00:10:02,200 --> 00:10:07,790 +positive Because you squared this distance. Even + +141 +00:10:07,790 --> 00:10:10,630 +if it's negative, negative squared is positive. + +142 +00:10:11,470 --> 00:10:15,750 +So, S squared is always positive. Later, we'll + +143 +00:10:15,750 --> 00:10:21,590 +talk a little bit about this point. After we + +144 +00:10:21,590 --> 00:10:25,930 +square each value, I mean each deviation, just in + +145 +00:10:25,930 --> 00:10:28,330 +step number three, you have to add the square + +146 +00:10:28,330 --> 00:10:32,730 +differences. I mean, for this column, add these + +147 +00:10:32,730 --> 00:10:36,950 +values together, so we have the sum of X minus X + +148 +00:10:36,950 --> 00:10:43,930 +bar squared. Finally, to get the sample variance, + +149 +00:10:44,910 --> 00:10:49,950 +you have to divide this total by N minus 1. So + +150 +00:10:49,950 --> 00:10:54,130 +divide this value by n minus 1, that will give the + +151 +00:10:54,130 --> 00:10:58,670 +sample variance. To compute the standard + +152 +00:10:58,670 --> 00:11:02,110 +deviation, just take the square root of your + +153 +00:11:02,110 --> 00:11:06,090 +result in step 4. I mean, for example, imagine + +154 +00:11:06,090 --> 00:11:11,470 +that S squared is 36. This is the sample variance, + +155 +00:11:11,590 --> 00:11:15,790 +so S equals 6. So just take the square root of + +156 +00:11:15,790 --> 00:11:16,970 +that value. + +157 +00:11:20,320 --> 00:11:23,180 +Now, let's compute the standard deviation for a + +158 +00:11:23,180 --> 00:11:24,180 +specific dataset. + +159 +00:11:30,620 --> 00:11:34,420 +Suppose we have sample data as 10, 12, 14, up to + +160 +00:11:34,420 --> 00:11:37,800 +24 sample data just for illustration. Sometimes we + +161 +00:11:37,800 --> 00:11:41,040 +have huge or large dataset. In that case, you have + +162 +00:11:41,040 --> 00:11:44,480 +to use software such as software, maybe use Excel, + +163 +00:11:45,260 --> 00:11:48,950 +SVSS, Minitab or whatever software you have. For + +164 +00:11:48,950 --> 00:11:53,830 +this example, there are eight observations. So n + +165 +00:11:53,830 --> 00:11:56,690 +equals eight. So the sample size is eight. Just + +166 +00:11:56,690 --> 00:11:59,250 +count these values, one, two, three, four, up to + +167 +00:11:59,250 --> 00:12:02,270 +eight, so that the sample size is eight. So n + +168 +00:12:02,270 --> 00:12:07,330 +equals eight. The mean or the average add these + +169 +00:12:07,330 --> 00:12:09,930 +values, as we mentioned last time, and divide by + +170 +00:12:09,930 --> 00:12:12,470 +the total number of observations. That will give + +171 +00:12:12,470 --> 00:12:16,170 +16. Because if we add these values, the sum is + +172 +00:12:16,170 --> 00:12:22,210 +128. 128 divided by 8, that will give 16. So we + +173 +00:12:22,210 --> 00:12:28,510 +computed the average first. Step two, we have + +174 +00:12:28,510 --> 00:12:37,530 +data. For example, 10. Take the deviation for each + +175 +00:12:37,530 --> 00:12:43,070 +data value you have from its mean. So I mean 10 + +176 +00:12:43,070 --> 00:12:49,190 +minus 16 that will give minus 6. And do the same + +177 +00:12:49,190 --> 00:12:54,270 +for the rest. I mean for 12. So 12 minus 16 is + +178 +00:12:54,270 --> 00:12:58,590 +negative for all the way up to the last value 24. + +179 +00:12:59,690 --> 00:13:06,990 +So 24 minus 16 gives 8. That's step number 2. So + +180 +00:13:06,990 --> 00:13:11,400 +we've computed the mean, then we found Or we + +181 +00:13:11,400 --> 00:13:15,380 +compute the distance between each value and its + +182 +00:13:15,380 --> 00:13:21,220 +mean. And now it's squared. So x minus x bar + +183 +00:13:21,220 --> 00:13:28,620 +squared. So minus 6 squared, you get 6, 16, up to + +184 +00:13:28,620 --> 00:13:32,920 +64, all the way down, up to 64. Now sum of these + +185 +00:13:32,920 --> 00:13:37,360 +values, sum all of these values, you will get 130. + +186 +00:13:39,700 --> 00:13:43,360 +So the sum of the square deviation of each value + +187 +00:13:43,360 --> 00:13:47,480 +and the mean equals 130. So the standard deviation + +188 +00:13:47,480 --> 00:13:51,420 +is the formula we have. Divide this value. I mean, + +189 +00:13:51,420 --> 00:13:56,100 +divide 130 by n minus 1. The sample size, as we + +190 +00:13:56,100 --> 00:14:00,400 +mentioned, was 8. So 8 minus 1 is 7. So 130 + +191 +00:14:00,400 --> 00:14:06,540 +divided by 7 will give 4.3. So that's the way to + +192 +00:14:06,540 --> 00:14:10,930 +compute the sample a standard deviation. + +193 +00:14:15,190 --> 00:14:20,770 +So again, the first step here, we have to find the + +194 +00:14:20,770 --> 00:14:23,990 +deviation from each value and its mean, then + +195 +00:14:23,990 --> 00:14:28,310 +square each one, each value, add these values + +196 +00:14:28,310 --> 00:14:32,070 +together. We add these values, the answer is 130. + +197 +00:14:33,600 --> 00:14:36,240 +The standard deviation is just square root of one + +198 +00:14:36,240 --> 00:14:41,360 +third divided by N minus one. So that will get us + +199 +00:14:41,360 --> 00:14:41,500 +in. + +200 +00:14:46,760 --> 00:14:47,760 +Any question? + +201 +00:14:51,700 --> 00:14:54,780 +Let's move to the next one. + +202 +00:15:05,160 --> 00:15:11,280 +Now this sheet compares the standard deviations + +203 +00:15:11,280 --> 00:15:16,300 +for three different data sets. Look at data number + +204 +00:15:16,300 --> 00:15:23,340 +one, data B, data C. All of them have the same + +205 +00:15:23,340 --> 00:15:29,480 +mean. The mean is 15.5. So each data has mean of + +206 +00:15:29,480 --> 00:15:30,440 +15.5. + +207 +00:15:33,710 --> 00:15:37,770 +The distribution is completely different from data + +208 +00:15:37,770 --> 00:15:43,650 +A to B to C. I mean, the mean by itself cannot be + +209 +00:15:43,650 --> 00:15:47,850 +used as a measure to describe the distribution of + +210 +00:15:47,850 --> 00:15:51,310 +the data, because each one has the same mean, but + +211 +00:15:51,310 --> 00:15:57,770 +actually have different spread or variability. So + +212 +00:15:57,770 --> 00:16:01,100 +for this reason, we have to compute. the variance + +213 +00:16:01,100 --> 00:16:05,600 +in order to see which one has the highest spread + +214 +00:16:05,600 --> 00:16:08,240 +or highest variability around the mean. If you + +215 +00:16:08,240 --> 00:16:14,940 +look at data B, all the points are gathered around + +216 +00:16:14,940 --> 00:16:19,040 +each other. I mean, there is no big difference + +217 +00:16:19,040 --> 00:16:23,620 +between or big distance between these points. All + +218 +00:16:23,620 --> 00:16:26,520 +of them ranges between 14 and 17. + +219 +00:16:30,780 --> 00:16:34,620 +All the datasets we have here, three datasets, all + +220 +00:16:34,620 --> 00:16:36,960 +of them have the same average or the same mean of + +221 +00:16:36,960 --> 00:16:42,440 +15.5. Data B, for example, the data points ranges + +222 +00:16:42,440 --> 00:16:48,780 +from 14 up to 17. So there is no gap between these + +223 +00:16:48,780 --> 00:16:52,740 +values. So it makes sense that this dataset has + +224 +00:16:52,740 --> 00:16:58,460 +the smallest deviation around the mean. And if you + +225 +00:16:58,460 --> 00:17:01,660 +compute the actual standard deviation, if you look + +226 +00:17:01,660 --> 00:17:07,480 +at that value, it's 0.926, so it's less than 1. If + +227 +00:17:07,480 --> 00:17:12,040 +you look at data number 1, now there is a gap + +228 +00:17:12,040 --> 00:17:17,780 +between this value 13 and 16. Also, there is a gap + +229 +00:17:17,780 --> 00:17:22,440 +between 18 and 21. That's why the standard + +230 +00:17:22,440 --> 00:17:25,040 +deviation becomes around + +231 +00:17:36,470 --> 00:17:41,420 +Now for data C. Maybe this is the worst + +232 +00:17:41,420 --> 00:17:45,020 +distribution compared to the previous two. Because + +233 +00:17:45,020 --> 00:17:48,240 +there is a big gap between or big distance between + +234 +00:17:48,240 --> 00:17:52,820 +12 and 19. For this reason, the standard deviation + +235 +00:17:52,820 --> 00:17:57,300 +is around 4.5. So it has the largest standard + +236 +00:17:57,300 --> 00:18:00,620 +deviation. So by using the standard deviation, we + +237 +00:18:00,620 --> 00:18:05,460 +can tell which one has the smallest or the largest + +238 +00:18:05,460 --> 00:18:09,850 +deviation. So in general, in statistics, we report + +239 +00:18:09,850 --> 00:18:13,370 +both the mean and the variance, or the standard + +240 +00:18:13,370 --> 00:18:16,170 +deviation actually. You have to say that the mean + +241 +00:18:16,170 --> 00:18:21,110 +is 15.5 with standard deviation 3.3. Another + +242 +00:18:21,110 --> 00:18:25,790 +dataset has the same mean but different standard + +243 +00:18:25,790 --> 00:18:28,530 +deviation. So in this case, we can tell which one + +244 +00:18:28,530 --> 00:18:31,030 +has more spread. + +245 +00:18:33,050 --> 00:18:35,850 +Suppose we have curves for + +246 +00:18:42,570 --> 00:18:47,410 +or symmetric distributions. Now, both of these + +247 +00:18:47,410 --> 00:18:50,590 +distributions are symmetric. I mean by symmetric, + +248 +00:18:51,310 --> 00:18:59,430 +the mean splits the data into two halves. the data + +249 +00:18:59,430 --> 00:19:03,690 +into two equally areas data A left to the right is + +250 +00:19:03,690 --> 00:19:05,970 +the same as the area to the left now if you look + +251 +00:19:05,970 --> 00:19:10,730 +at the mean for the center this dashed line it's + +252 +00:19:10,730 --> 00:19:15,830 +the same for each one so that means the means are + +253 +00:19:15,830 --> 00:19:19,690 +equal for data A and data B so the two graphs have + +254 +00:19:19,690 --> 00:19:23,090 +the same mean but if you look carefully at the + +255 +00:19:23,090 --> 00:19:27,790 +spread for the variability for figure number one + +256 +00:19:29,870 --> 00:19:35,430 +The spread here seems to be narrower than the + +257 +00:19:35,430 --> 00:19:41,390 +other one. The other one is wider. So, data A is + +258 +00:19:41,390 --> 00:19:45,430 +less spread than data B, even if they have the + +259 +00:19:45,430 --> 00:19:50,780 +same center or descent. See, you can tell if. the + +260 +00:19:50,780 --> 00:19:55,980 +data is smaller or larger style deviation just + +261 +00:19:55,980 --> 00:19:59,020 +based on the curve you have just look at the + +262 +00:19:59,020 --> 00:20:02,280 +distance between the vertical line I mean the mean + +263 +00:20:02,280 --> 00:20:06,080 +which splits the curve into two areas two equal + +264 +00:20:06,080 --> 00:20:13,080 +areas now the distance here this one actually it + +265 +00:20:13,080 --> 00:20:17,320 +is larger than the other so we can say that Data A + +266 +00:20:17,320 --> 00:20:21,000 +is less spread than data B. So by using the + +267 +00:20:21,000 --> 00:20:23,540 +standard deviation, I mean the actual value, or + +268 +00:20:23,540 --> 00:20:27,020 +just look at the graph. You can tell which one has + +269 +00:20:27,020 --> 00:20:28,020 +more spread. + +270 +00:20:33,740 --> 00:20:38,560 +To summarize some facts about standard deviation, + +271 +00:20:38,720 --> 00:20:42,780 +you can see that the more the data are spread out, + +272 +00:20:44,800 --> 00:20:47,800 +The greater the range, variance, and standard + +273 +00:20:47,800 --> 00:20:50,840 +deviation. So if the data is spread out, it means + +274 +00:20:50,840 --> 00:20:56,040 +you have large range, variance, and standard + +275 +00:20:56,040 --> 00:21:00,680 +deviation. The more the data are concentrated, I + +276 +00:21:00,680 --> 00:21:05,800 +mean gathered around each other, the smaller the + +277 +00:21:05,800 --> 00:21:08,360 +range, variance, and standard deviation. So that's + +278 +00:21:08,360 --> 00:21:11,140 +the difference when the data are concentrated or + +279 +00:21:11,140 --> 00:21:18,230 +spread out. large values and on the other hand we + +280 +00:21:18,230 --> 00:21:25,230 +have small values now there is one case the sample + +281 +00:21:25,230 --> 00:21:30,190 +mean or the variance or the range equals zero if + +282 +00:21:30,190 --> 00:21:34,290 +the data values are all the same and maybe never + +283 +00:21:34,290 --> 00:21:38,910 +happened in the real life in reality you never + +284 +00:21:38,910 --> 00:21:45,170 +maybe ninety nine percent the data set I mean the + +285 +00:21:45,170 --> 00:21:48,370 +values of innocence are not equal. But imagine + +286 +00:21:48,370 --> 00:21:51,870 +that you have a data of the same values. Suppose + +287 +00:21:51,870 --> 00:21:57,190 +we have five students of five children and their + +288 +00:21:57,190 --> 00:22:00,790 +age is five, five, five, five. So the average of + +289 +00:22:00,790 --> 00:22:03,490 +this one is five. Now what's the range? + +290 +00:22:07,030 --> 00:22:10,670 +minimum is the same as maximum so the range is + +291 +00:22:10,670 --> 00:22:15,090 +zero if you compute the variance because x minus x + +292 +00:22:15,090 --> 00:22:18,910 +bar this value minus x bar is zero for the risk + +293 +00:22:18,910 --> 00:22:23,970 +it's also zero so zero so the solution is zero so + +294 +00:22:23,970 --> 00:22:27,050 +this is the only time you see that the these + +295 +00:22:27,050 --> 00:22:30,610 +measures equal zero if all the values have the + +296 +00:22:30,610 --> 00:22:35,410 +same and that's never happened the other one is + +297 +00:22:35,410 --> 00:22:39,690 +none of these measures are ever negative because + +298 +00:22:39,690 --> 00:22:47,630 +the range is max minus min negative squared + +299 +00:22:47,630 --> 00:22:50,810 +becomes positive for this reason the variance and + +300 +00:22:50,810 --> 00:22:54,650 +standard deviation is always greater than zero so + +301 +00:22:54,650 --> 00:23:00,140 +you can see a square is greater than zero Or + +302 +00:23:00,140 --> 00:23:03,720 +equal, and equal just if the values are the same, + +303 +00:23:04,220 --> 00:23:11,300 +or are equal. Any question? So we explained range, + +304 +00:23:12,080 --> 00:23:13,820 +variance, and standard deviation. + +305 +00:23:18,460 --> 00:23:23,580 +Outliers affect the variance and standard + +306 +00:23:23,580 --> 00:23:28,720 +deviation. We said that outliers affect the range + +307 +00:23:28,720 --> 00:23:30,850 +and the mean. Now what's about standard deviation? + +308 +00:23:32,330 --> 00:23:39,630 +Look at this formula again. Now in + +309 +00:23:39,630 --> 00:23:42,170 +order to compute the sample standard deviation, we + +310 +00:23:42,170 --> 00:23:46,130 +have to compute the mean first. And we know that + +311 +00:23:46,130 --> 00:23:49,590 +outliers affect the mean much more than the + +312 +00:23:49,590 --> 00:23:54,950 +median. Since outliers affect X bar, and S is a + +313 +00:23:54,950 --> 00:23:58,890 +function of X bar, that means also outliers affect + +314 +00:23:58,890 --> 00:24:03,050 +the standard deviation. So, so far, outliers + +315 +00:24:03,050 --> 00:24:10,110 +affect mean, range, standard deviation. So in case + +316 +00:24:10,110 --> 00:24:12,810 +of outliers, I mean in the presence of outliers, + +317 +00:24:13,670 --> 00:24:17,430 +you have to avoid using these measures. We have to + +318 +00:24:17,430 --> 00:24:19,450 +use something else, and that's what we'll talk + +319 +00:24:19,450 --> 00:24:23,410 +later about, inshallah. So the mean is affected by + +320 +00:24:23,410 --> 00:24:26,970 +outlier. Last time we said that the median should + +321 +00:24:26,970 --> 00:24:30,430 +be used in this case because the median is less + +322 +00:24:30,430 --> 00:24:34,730 +affected by outliers than the mean. So outliers + +323 +00:24:34,730 --> 00:24:39,210 +affect the mean much more than the median. Now + +324 +00:24:39,210 --> 00:24:41,770 +what's about the range? The range is affected by + +325 +00:24:41,770 --> 00:24:45,690 +outliers. So in case of outliers, you have to use + +326 +00:24:45,690 --> 00:24:49,350 +another measure. That will be next time, + +327 +00:24:49,470 --> 00:24:54,300 +inshallah. Let's move to another measure of + +328 +00:24:54,300 --> 00:24:57,300 +variation which is called coefficient of + +329 +00:24:57,300 --> 00:24:57,700 +variation. + +330 +00:25:00,920 --> 00:25:04,900 +Consider we have two datasets, one for age, + +331 +00:25:09,260 --> 00:25:13,960 +for example, one for age and + +332 +00:25:13,960 --> 00:25:15,200 +other one for weight. + +333 +00:25:18,330 --> 00:25:22,730 +suppose someone computed the mean and the standard + +334 +00:25:22,730 --> 00:25:26,290 +deviation and he found that the standard deviation + +335 +00:25:26,290 --> 00:25:31,630 +is for example is 10 so 10 units because the unit + +336 +00:25:31,630 --> 00:25:36,190 +is units here and the standard deviation for + +337 +00:25:36,190 --> 00:25:42,610 +weight for example is 7 the weight the unit of the + +338 +00:25:42,610 --> 00:25:47,610 +weight is weight is kilogram now can you say that + +339 +00:25:49,240 --> 00:25:53,820 +Age, data for age is more spread than weight + +340 +00:25:53,820 --> 00:25:59,780 +because 10 is greater than 7. You cannot say that + +341 +00:25:59,780 --> 00:26:04,880 +because you have different units. So you cannot + +342 +00:26:04,880 --> 00:26:08,040 +compare the spread of the data just based on the + +343 +00:26:08,040 --> 00:26:10,780 +sample standard deviation. So we need a measure + +344 +00:26:10,780 --> 00:26:17,720 +that compute this variability regardless of the + +345 +00:26:17,720 --> 00:26:22,220 +original units. That measure is called coefficient + +346 +00:26:22,220 --> 00:26:25,640 +of variation. So coefficient of variation is a + +347 +00:26:25,640 --> 00:26:32,540 +measure for relative variation always represents + +348 +00:26:32,540 --> 00:26:37,360 +in percentage shows variation relative to the mean + +349 +00:26:37,360 --> 00:26:43,260 +and can be used to compare the variability of two + +350 +00:26:43,260 --> 00:26:47,760 +or more than two sets of data measured in + +351 +00:26:47,760 --> 00:26:53,680 +different units. The formula for CV or coefficient + +352 +00:26:53,680 --> 00:26:58,770 +of variation S divided by X bar. So we have to + +353 +00:26:58,770 --> 00:27:01,990 +compute both the mean and the standard deviation + +354 +00:27:01,990 --> 00:27:05,090 +in order to compute CV. S divided by X bar + +355 +00:27:05,090 --> 00:27:08,850 +multiplied by 100 gives the coefficient of + +356 +00:27:08,850 --> 00:27:13,730 +variation. Now, for example, suppose we have two + +357 +00:27:13,730 --> 00:27:19,490 +stock markets, stock A and stock B. And let's say + +358 +00:27:19,490 --> 00:27:22,830 +that the average price last year for stock A was + +359 +00:27:22,830 --> 00:27:29,380 +$50. And the standard deviation was five. For + +360 +00:27:29,380 --> 00:27:36,500 +stock B, the average price last year was $100 with + +361 +00:27:36,500 --> 00:27:40,560 +the same standard deviations. It's five. Now let's + +362 +00:27:40,560 --> 00:27:44,660 +compare which one, which price I mean, which stock + +363 +00:27:44,660 --> 00:27:49,060 +price is more spread than the other one. I mean, + +364 +00:27:49,260 --> 00:27:55,320 +which one has big gaps between the values and the + +365 +00:27:55,320 --> 00:27:59,300 +mean. I mean, which values are concentrated around + +366 +00:27:59,300 --> 00:28:02,580 +each other, stack A, price of stack A or stack B. + +367 +00:28:03,720 --> 00:28:07,520 +Just for simple calculation, CV for stack A is + +368 +00:28:07,520 --> 00:28:11,140 +just S over X bar multiplied by 100. That will + +369 +00:28:11,140 --> 00:28:17,360 +gives 5 divided by 50 times 100 gives 10%. So the + +370 +00:28:17,360 --> 00:28:19,260 +coefficient of variation in this case is 10. + +371 +00:28:21,670 --> 00:28:27,630 +What's about the other one? S against 5. The + +372 +00:28:27,630 --> 00:28:31,950 +average of stock B was last year was 100 + +373 +00:28:31,950 --> 00:28:40,520 +multiplied by 100 gives 5%. So CV for A. is + +374 +00:28:40,520 --> 00:28:45,820 +greater than CV for B that means stack A I mean + +375 +00:28:45,820 --> 00:28:52,460 +prices of stack A are more spread than stack B + +376 +00:28:52,460 --> 00:28:58,510 +that means stack B is more stable So the stability + +377 +00:28:58,510 --> 00:29:04,030 +in stock B is more than that in stock A, because + +378 +00:29:04,030 --> 00:29:08,210 +this one is less variable, less variability in the + +379 +00:29:08,210 --> 00:29:12,030 +data. So in this case, we can compute, I mean, we + +380 +00:29:12,030 --> 00:29:18,530 +can compare different data sets. That's all for + +381 +00:29:18,530 --> 00:29:21,470 +measures of variation. So we talked about range, + +382 +00:29:23,810 --> 00:29:28,230 +variance, standard deviation, And finally + +383 +00:29:28,230 --> 00:29:33,470 +coefficient of variation. Next topic talks about z + +384 +00:29:33,470 --> 00:29:36,950 +-score. Most of the time we are talking about + +385 +00:29:36,950 --> 00:29:41,550 +extreme values or extreme outliers. Now let's see + +386 +00:29:41,550 --> 00:29:45,370 +how can we tell if this point is considered to be + +387 +00:29:45,370 --> 00:29:51,110 +an outlier. We have some data values and you want + +388 +00:29:51,110 --> 00:29:55,150 +to use the mean as a measure of center or the + +389 +00:29:55,150 --> 00:29:58,670 +median, which one you have to use. If the data set + +390 +00:29:58,670 --> 00:30:01,670 +has outliers, as we mentioned, then use the + +391 +00:30:01,670 --> 00:30:04,370 +median. Now the question is how can you determine + +392 +00:30:04,370 --> 00:30:08,780 +if the data set has outliers? There are several + +393 +00:30:08,780 --> 00:30:12,680 +methods. Today we'll talk just about z-score, + +394 +00:30:12,800 --> 00:30:17,680 +later we'll talk about something else. And z-score + +395 +00:30:17,680 --> 00:30:20,580 +sometimes called standardized z-score. + +396 +00:30:22,380 --> 00:30:27,400 +Standardized value + +397 +00:30:27,400 --> 00:30:31,440 +or standardized score. So standardized stands for + +398 +00:30:31,440 --> 00:30:35,060 +z. More details about z-score will be in chapter 6 + +399 +00:30:35,060 --> 00:30:38,360 +when we talk about normal distribution, but at + +400 +00:30:38,360 --> 00:30:43,540 +least here we'll just use z-score to determine or + +401 +00:30:43,540 --> 00:30:46,840 +to know if this point is considered to be or + +402 +00:30:46,840 --> 00:30:50,640 +suspected to be extreme value or extreme outlier + +403 +00:30:50,640 --> 00:30:54,480 +only. To compute the z-score by data value, + +404 +00:30:54,820 --> 00:30:57,560 +subtract the mean and divide by the standard + +405 +00:30:57,560 --> 00:31:04,220 +deviation. That means suppose we have data set x + +406 +00:31:04,220 --> 00:31:07,540 +or data value for each one we can complete this + +407 +00:31:07,540 --> 00:31:15,800 +code we have data value x subtract x mean then + +408 +00:31:15,800 --> 00:31:20,440 +divide by standardization Now again to compute + +409 +00:31:20,440 --> 00:31:22,760 +this score you have to compute the mean and + +410 +00:31:22,760 --> 00:31:26,640 +standard deviation the same as CV. So this score + +411 +00:31:26,640 --> 00:31:31,700 +is the distance x minus x bar then divided by + +412 +00:31:31,700 --> 00:31:37,000 +standard deviation. Now do you think what are the + +413 +00:31:37,000 --> 00:31:41,720 +signs of this score? This score can be positive, + +414 +00:31:42,660 --> 00:31:47,930 +can be negative, or maybe equals zero because x + +415 +00:31:47,930 --> 00:31:54,230 +minus x bar if they are the same so if so z score + +416 +00:31:54,230 --> 00:31:58,350 +equals zero that's only if x equals x bar + +417 +00:31:58,350 --> 00:32:06,510 +otherwise z score may be positive or negative it's + +418 +00:32:06,510 --> 00:32:10,890 +positive when now the sign can be determined by + +419 +00:32:10,890 --> 00:32:16,340 +using this system If X minus X bar is negative, + +420 +00:32:17,080 --> 00:32:20,260 +then this score is negative. If X minus X bar is + +421 +00:32:20,260 --> 00:32:22,720 +positive, then this score is positive. So when + +422 +00:32:22,720 --> 00:32:26,580 +this score is positive, if the value of X is + +423 +00:32:26,580 --> 00:32:31,620 +greater than X bar. Otherwise, this score is + +424 +00:32:31,620 --> 00:32:35,620 +negative if the value of X is smaller than X bar. + +425 +00:32:36,620 --> 00:32:39,660 +So, z-score can be positive, can be negative, can + +426 +00:32:39,660 --> 00:32:43,540 +be equal to zero, it depends on the value of x, I + +427 +00:32:43,540 --> 00:32:46,640 +mean the value of the point and the average or the + +428 +00:32:46,640 --> 00:32:51,880 +mean. Now, what's the meaning of z-score? The z + +429 +00:32:51,880 --> 00:32:56,220 +-score is the number of standard deviations a data + +430 +00:32:56,220 --> 00:33:02,520 +value is from the mean. For example, imagine your + +431 +00:33:02,520 --> 00:33:09,140 +score for accounting A was + +432 +00:33:09,140 --> 00:33:19,380 +75 that's your score in course A the average of + +433 +00:33:19,380 --> 00:33:24,640 +the entire class was 70 with standard deviation 5 + +434 +00:33:26,490 --> 00:33:32,050 +So your score was 75, the average is 70, and + +435 +00:33:32,050 --> 00:33:36,070 +standard deviation is 5. Now look at this score. + +436 +00:33:38,330 --> 00:33:44,930 +It's just 75 minus 70 divided by 5 gives 1. 5 over + +437 +00:33:44,930 --> 00:33:49,410 +5 is 1. Now what's the mean of 1? This point says + +438 +00:33:49,410 --> 00:33:52,050 +that this score is the number of standard + +439 +00:33:52,050 --> 00:33:56,960 +deviations a data value is from the mean. Now, if + +440 +00:33:56,960 --> 00:34:00,900 +your score is 75 and the average was 70, that + +441 +00:34:00,900 --> 00:34:06,700 +means your score is above the mean by 5 units or 5 + +442 +00:34:06,700 --> 00:34:10,600 +points. Now, 5 points is what is the standard + +443 +00:34:10,600 --> 00:34:15,300 +deviation. So that means your score is above the + +444 +00:34:15,300 --> 00:34:18,970 +mean by one standard deviation. So that's the + +445 +00:34:18,970 --> 00:34:21,490 +meaning of one. So if z score is one, that means + +446 +00:34:21,490 --> 00:34:24,330 +my score is above the mean by one standard + +447 +00:34:24,330 --> 00:34:33,370 +deviation. Now suppose your friend, she has 60 in + +448 +00:34:33,370 --> 00:34:37,990 +the same class, the same course, for sure the same + +449 +00:34:37,990 --> 00:34:41,770 +mean and the same standard deviation. We are + +450 +00:34:41,770 --> 00:34:46,370 +talking about the same class. So here the score is + +451 +00:34:46,370 --> 00:34:51,930 +going to be 60 minus 70 divided by 5, so that's + +452 +00:34:51,930 --> 00:34:57,290 +minus 10. Now the difference between here's score + +453 +00:34:57,290 --> 00:35:03,690 +and the average is 10, and 10 is twice the + +454 +00:35:03,690 --> 00:35:09,530 +standard deviation, below or above. below the sign + +455 +00:35:09,530 --> 00:35:13,030 +is negative here score is 60 the average was 70 so + +456 +00:35:13,030 --> 00:35:16,090 +here score is below the mean so you can say that + +457 +00:35:16,090 --> 00:35:22,410 +here score is two standard deviation below the + +458 +00:35:22,410 --> 00:35:26,830 +mean so you can tell if the score is above or + +459 +00:35:26,830 --> 00:35:32,830 +below the mean Next, now we are looking to locate + +460 +00:35:32,830 --> 00:35:36,970 +extreme values. I mean, if you want to examine if + +461 +00:35:36,970 --> 00:35:41,090 +the data is outlier or not. Now the rule of thumb + +462 +00:35:41,090 --> 00:35:46,690 +is, rule in general, a data value is considered an + +463 +00:35:46,690 --> 00:35:51,810 +extreme outlier if its z-score is less than + +464 +00:35:51,810 --> 00:35:56,910 +negative three or greater than three. So suppose + +465 +00:35:56,910 --> 00:35:58,970 +you compute a z-score for a data point, + +466 +00:36:01,650 --> 00:36:05,830 +And this score, for example, for this one was one. + +467 +00:36:07,190 --> 00:36:08,710 +So the question is, + +468 +00:36:11,050 --> 00:36:19,550 +does 75 outlier or not? The rule is that if the + +469 +00:36:19,550 --> 00:36:22,390 +data value is greater than three, I mean this + +470 +00:36:22,390 --> 00:36:29,160 +area, Or smaller than negative three. In this + +471 +00:36:29,160 --> 00:36:32,900 +case, the point is suspected to be extreme + +472 +00:36:32,900 --> 00:36:36,700 +outlier, otherwise it's okay. Now my z-score is + +473 +00:36:36,700 --> 00:36:39,640 +one, so one lies between minus three and plus + +474 +00:36:39,640 --> 00:36:44,080 +three, so the point is not an outlier. So it's so + +475 +00:36:44,080 --> 00:36:46,760 +easy to determine if the point is outlier or not, + +476 +00:36:46,820 --> 00:36:51,540 +just look at z-score. If it is above three, it's + +477 +00:36:51,540 --> 00:36:55,020 +outlier. less than minus three is out there. + +478 +00:36:55,140 --> 00:36:58,580 +Otherwise, the data is okay. The larger the + +479 +00:36:58,580 --> 00:37:02,020 +absolute value of the z-score, the farther the + +480 +00:37:02,020 --> 00:37:08,320 +data value is from the mean. That means suppose + +481 +00:37:08,320 --> 00:37:15,040 +you have z-score of 3.5 and other one is 2.5. + +482 +00:37:16,340 --> 00:37:21,500 +Which one the data value is farther from the mean? + +483 +00:37:24,070 --> 00:37:31,190 +3.5, so this one is data value is further from the + +484 +00:37:31,190 --> 00:37:34,830 +mean. So again, this score + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/vioR30IO0qQ.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/vioR30IO0qQ.srt new file mode 100644 index 0000000000000000000000000000000000000000..27183c1340114bdfd7785c9e93ee42ef0c826f0c --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/vioR30IO0qQ.srt @@ -0,0 +1,2432 @@ + +1 +00:00:06,600 --> 00:00:10,640 +the sum and distribution of the sum and mean, and + +2 +00:00:10,640 --> 00:00:16,400 +we have produced two cases. The first one, if the + +3 +00:00:16,400 --> 00:00:20,320 +population is normal and conservative, in this + +4 +00:00:20,320 --> 00:00:24,580 +case, the sum and distribution of the sum and mean + +5 +00:00:24,580 --> 00:00:29,540 +is exactly normal, with mean of x bar equal the + +6 +00:00:29,540 --> 00:00:30,780 +whole population mean. + +7 +00:00:33,650 --> 00:00:38,050 +And sigma of x bar is called the standard error of + +8 +00:00:38,050 --> 00:00:43,870 +the sample mean given by sigma. Also, we mentioned + +9 +00:00:43,870 --> 00:00:47,950 +that the standard error of the sample mean is + +10 +00:00:47,950 --> 00:00:53,270 +always smaller than sigma. And also, as m + +11 +00:00:53,270 --> 00:00:56,610 +increases, the standard error of x bar increases. + +12 +00:00:56,790 --> 00:01:00,450 +So there is a gorgeous negative relationship. + +13 +00:01:01,360 --> 00:01:04,840 +between our fingers, relationship between the + +14 +00:01:04,840 --> 00:01:08,740 +standard error of x bar and the sample size. + +15 +00:01:09,300 --> 00:01:13,680 +That's when the operation is normally distributed. + +16 +00:01:15,540 --> 00:01:19,860 +So again, in this case, the new z score is given + +17 +00:01:19,860 --> 00:01:23,780 +by this equation. Here, we replace sigma by sigma + +18 +00:01:23,780 --> 00:01:27,700 +of x bar. So z score is given by x bar minus the + +19 +00:01:27,700 --> 00:01:32,880 +mean divided by sigma over root n. The second + +20 +00:01:32,880 --> 00:01:40,780 +case, if the population is not normal, in this + +21 +00:01:40,780 --> 00:01:45,140 +case we can apply the central limit theorem, and + +22 +00:01:45,140 --> 00:01:48,260 +this theorem can be applied if the sample size is + +23 +00:01:48,260 --> 00:01:52,500 +large enough. So if N is large, in this case, we + +24 +00:01:52,500 --> 00:01:55,580 +can say that the sampling distribution of the + +25 +00:01:55,580 --> 00:01:59,080 +sample mean is approximately normally distributed + +26 +00:01:59,080 --> 00:02:04,120 +as long as the sample size is large enough. Now, + +27 +00:02:04,480 --> 00:02:08,460 +how large is large enough? We mentioned that for + +28 +00:02:08,460 --> 00:02:13,440 +most distributions, N above 30 will give sampling + +29 +00:02:13,440 --> 00:02:16,280 +distribution that is nearly symmetric or normal. + +30 +00:02:17,730 --> 00:02:20,210 +But on the other hand, for fairly symmetrical + +31 +00:02:20,210 --> 00:02:25,690 +distribution, and above 15 will usually give some + +32 +00:02:25,690 --> 00:02:29,790 +distribution is almost normal. So we have three + +33 +00:02:29,790 --> 00:02:34,290 +cases. For most distributions, we need large + +34 +00:02:34,290 --> 00:02:38,370 +sample to be in above 30. For symmetric + +35 +00:02:38,370 --> 00:02:43,530 +distributions, in above 15. But if the population + +36 +00:02:43,530 --> 00:02:48,030 +is normally distributed, then regardless of the + +37 +00:02:48,030 --> 00:02:50,830 +sample size, the sampling distribution of the + +38 +00:02:50,830 --> 00:02:54,170 +sample mean is always normally distributed. So + +39 +00:02:54,170 --> 00:02:58,440 +here, it's nearly symmetric. almost symmetric and + +40 +00:02:58,440 --> 00:03:00,480 +the other one is always symmetric. So we have to + +41 +00:03:00,480 --> 00:03:04,760 +distinguish between these three cases. And I think + +42 +00:03:04,760 --> 00:03:10,080 +we gave one example. Now let's move to another + +43 +00:03:10,080 --> 00:03:14,380 +part. As we mentioned in the beginning of this + +44 +00:03:14,380 --> 00:03:19,980 +class, data has two types mainly, quantitative and + +45 +00:03:19,980 --> 00:03:24,100 +qualitative. Quantitative, for example, your + +46 +00:03:24,100 --> 00:03:29,800 +score. Score is numerical value, 60, 65, and so + +47 +00:03:29,800 --> 00:03:33,180 +on. In this case, we can talk about assembly. + +48 +00:03:35,560 --> 00:03:46,400 +So first, for quantitative data, + +49 +00:03:47,020 --> 00:03:49,320 +we talk about assembly. + +50 +00:03:55,440 --> 00:04:01,780 +Number two, if we talk about, if we talk about + +51 +00:04:01,780 --> 00:04:09,900 +qualitative data, in this case we talk about + +52 +00:04:09,900 --> 00:04:15,760 +sample of + +53 +00:04:15,760 --> 00:04:16,100 +water. + +54 +00:04:19,220 --> 00:04:24,110 +In this case, proportion is what's the sampling + +55 +00:04:24,110 --> 00:04:28,770 +distribution of the sample proportion. So we are + +56 +00:04:28,770 --> 00:04:35,690 +looking for the sampling distribution + +57 +00:04:35,690 --> 00:04:46,410 +of the sample population + +58 +00:04:46,410 --> 00:04:51,190 +having some characteristic. For example, let's buy + +59 +00:04:52,160 --> 00:05:02,820 +denotes number of smokers among + +60 +00:05:02,820 --> 00:05:03,620 +females. + +61 +00:05:06,500 --> 00:05:12,280 +So let's use Y as number of smokers among all + +62 +00:05:12,280 --> 00:05:19,560 +females. All females have IUDs. So here we are + +63 +00:05:19,560 --> 00:05:24,560 +talking about My population is IUG students, but + +64 +00:05:24,560 --> 00:05:30,240 +here we're focused on female students. And pi is + +65 +00:05:30,240 --> 00:05:33,840 +number of smokers among all female students. + +66 +00:05:36,820 --> 00:05:39,660 +Now sampling proportion, so my characteristic here + +67 +00:05:39,660 --> 00:05:46,000 +is smokers. Maybe your characteristic is person or + +68 +00:05:46,000 --> 00:05:48,800 +student's number. + +69 +00:06:01,190 --> 00:06:08,130 +proportion of students have + +70 +00:06:08,130 --> 00:06:12,790 +scores above + +71 +00:06:12,790 --> 00:06:19,210 +80. So by proportion of students have scores above + +72 +00:06:19,210 --> 00:06:23,430 +80, and so on. So here, in this case, your + +73 +00:06:23,430 --> 00:06:27,810 +characteristic is scores above 80, and so on. + +74 +00:06:29,300 --> 00:06:33,120 +Simple proportion provides an estimate of Pi, + +75 +00:06:33,340 --> 00:06:36,780 +because generally speaking, Pi is not given, or is + +76 +00:06:36,780 --> 00:06:40,900 +unknown, similar as Mu. And Mu is the population + +77 +00:06:40,900 --> 00:06:44,840 +mean, is always unknown, and we are interested to + +78 +00:06:44,840 --> 00:06:48,680 +estimate the population mean. In this case, the + +79 +00:06:48,680 --> 00:06:53,780 +point estimate of Pi is P. So P is the point + +80 +00:06:53,780 --> 00:06:57,980 +estimate, sample, a proportion, + +81 +00:07:00,590 --> 00:07:05,150 +which is the point estimate of Y. Now, for + +82 +00:07:05,150 --> 00:07:07,490 +example, let's go back to the proportion of + +83 +00:07:07,490 --> 00:07:11,350 +smokers among all female students at IUG. Let's + +84 +00:07:11,350 --> 00:07:16,510 +assume that number of females at IUG, for example, + +85 +00:07:16,650 --> 00:07:17,970 +is 1,000 students. + +86 +00:07:23,570 --> 00:07:29,020 +And we talk here around a sample of 1,000. We know + +87 +00:07:29,020 --> 00:07:32,420 +that there are too many female students at IUG. + +88 +00:07:33,040 --> 00:07:35,980 +Suppose we select a random sample of size 1,000. + +89 +00:07:38,040 --> 00:07:44,940 +So that's the sample size. So here, we selected a + +90 +00:07:44,940 --> 00:07:51,380 +random sample of size 1,000 from the population of + +91 +00:07:51,380 --> 00:07:55,840 +females. And suppose we found that number of + +92 +00:07:55,840 --> 00:08:03,640 +smokers among these equal one hundred. So out of + +93 +00:08:03,640 --> 00:08:08,040 +one thousand, one hundred students are smoking. + +94 +00:08:08,820 --> 00:08:11,840 +Now what's the percentage of smokers in this case? + +95 +00:08:13,220 --> 00:08:16,700 +So P equals this one hundred divided by a + +96 +00:08:16,700 --> 00:08:20,760 +thousand, so that's ten percent or point one. + +97 +00:08:25,220 --> 00:08:32,500 +Let's assume that X denotes the number of items in + +98 +00:08:32,500 --> 00:08:34,900 +the sample having the characteristic of interest. + +99 +00:08:35,300 --> 00:08:39,920 +In this case, X number of smokers in your sample, + +100 +00:08:40,040 --> 00:08:44,820 +so X equals 100. N is 1000, so proportion is + +101 +00:08:44,820 --> 00:08:53,900 +always equals X divided by N, X number of items in + +102 +00:08:53,900 --> 00:08:57,580 +the characteristic you have. N is the sample size, + +103 +00:08:57,700 --> 00:09:01,240 +so B equals X over N. Sometimes it might be X + +104 +00:09:01,240 --> 00:09:05,800 +equals zero. So for example suppose I select a + +105 +00:09:05,800 --> 00:09:12,160 +random sample of size 1,000 and none of them So in + +106 +00:09:12,160 --> 00:09:15,820 +this case, x equals zero, that means the + +107 +00:09:15,820 --> 00:09:20,500 +percentage equals zero. So we can have zero + +108 +00:09:20,500 --> 00:09:24,300 +percentage if x equals zero. Also in the upper + +109 +00:09:24,300 --> 00:09:27,980 +limit, suppose here we are talking about students + +110 +00:09:27,980 --> 00:09:32,020 +have scores above 80. And we select a random + +111 +00:09:32,020 --> 00:09:36,100 +sample, and that sample has All of the students + +112 +00:09:36,100 --> 00:09:41,520 +have scores above 80, so that means x equals 1000, + +113 +00:09:42,380 --> 00:09:45,720 +so percentage is 1. So 1000 divided by 1000 is 1. + +114 +00:09:46,220 --> 00:09:50,460 +So the minimum value of B is 0, the maximum is 1, + +115 +00:09:50,600 --> 00:09:53,580 +that means B ranges from 0 to 1. So it could be 0, + +116 +00:09:53,640 --> 00:10:01,240 +it could be 1, but almost B is between 0 and 1. B + +117 +00:10:01,240 --> 00:10:01,940 +is the proportion. + +118 +00:10:04,610 --> 00:10:07,030 +So, B is, in this case, is approximately + +119 +00:10:07,030 --> 00:10:10,190 +distributed as normal distribution when N is + +120 +00:10:10,190 --> 00:10:15,130 +large. But again, how large is large enough, we'll + +121 +00:10:15,130 --> 00:10:18,830 +talk about later. So, B is approximately normal + +122 +00:10:18,830 --> 00:10:24,630 +distributed when N is large. Here, we assume + +123 +00:10:24,630 --> 00:10:29,230 +sampling distribution with replacement if the + +124 +00:10:29,230 --> 00:10:32,660 +population is finite. And sampling without + +125 +00:10:32,660 --> 00:10:35,580 +replacement from infinite population. Finite + +126 +00:10:35,580 --> 00:10:41,520 +population means limited, has fixed size. Infinite + +127 +00:10:41,520 --> 00:10:46,940 +population means unlimited. Because if we select a + +128 +00:10:46,940 --> 00:10:49,560 +random sample without replacement from infinite + +129 +00:10:49,560 --> 00:10:55,540 +population, for example suppose N equals 10,000. + +130 +00:10:57,300 --> 00:11:00,480 +In this case, each person has a chance of being + +131 +00:11:00,480 --> 00:11:05,460 +selected one over 10,000. Let's assume that we + +132 +00:11:05,460 --> 00:11:10,760 +select someone without replacement. So in this + +133 +00:11:10,760 --> 00:11:14,000 +case, if we select one with a proportion of one + +134 +00:11:14,000 --> 00:11:17,820 +over 10,000, it means the second one has a chance + +135 +00:11:17,820 --> 00:11:22,340 +of one divided by 9999. + +136 +00:11:23,000 --> 00:11:25,840 +And the difference between these two is very + +137 +00:11:25,840 --> 00:11:30,250 +small. For this reason, we can select a random + +138 +00:11:30,250 --> 00:11:34,850 +sample without replacement if the population size + +139 +00:11:34,850 --> 00:11:41,310 +is large, because the probability to select an + +140 +00:11:41,310 --> 00:11:45,610 +item from that population remains the same, or + +141 +00:11:45,610 --> 00:11:48,490 +approximately the same, because 1 over 10,000 is + +142 +00:11:48,490 --> 00:11:51,970 +roughly equal to 1 over 9999. + +143 +00:11:54,060 --> 00:11:57,440 +Now the two conditions we have to check in order + +144 +00:11:57,440 --> 00:12:01,900 +to apply or in order to say that B is + +145 +00:12:01,900 --> 00:12:04,740 +approximately normally distributed. The two + +146 +00:12:04,740 --> 00:12:10,020 +conditions are n times pi is at least 5 and we + +147 +00:12:10,020 --> 00:12:15,660 +have n, pi is given. Also, n times 1 minus pi is + +148 +00:12:15,660 --> 00:12:19,380 +at least 5. So there are two conditions should be + +149 +00:12:19,380 --> 00:12:24,260 +satisfied in order to use the normal distribution. + +150 +00:12:24,900 --> 00:12:31,200 +Again, the first one, n times pi is at least 5, n + +151 +00:12:31,200 --> 00:12:36,880 +times 1 minus pi is also at least 5. If these two + +152 +00:12:36,880 --> 00:12:41,600 +conditions are satisfied, then you can say that B, + +153 +00:12:41,780 --> 00:12:44,520 +or the sample proportion, is approximately + +154 +00:12:44,520 --> 00:12:47,780 +normally distributed, so that's the shape of the + +155 +00:12:47,780 --> 00:12:54,900 +distribution, with mean of? Mu of p equals pi, so + +156 +00:12:54,900 --> 00:12:59,180 +the mean of p equals pi, with sigma of p equals + +157 +00:12:59,180 --> 00:13:04,100 +square root of + +158 +00:13:04,100 --> 00:13:09,160 +pi times 1 minus pi, divided by n. So that's the + +159 +00:13:09,160 --> 00:13:14,120 +mean of p is always pi, and sigma of p equals + +160 +00:13:14,120 --> 00:13:16,940 +square root of pi times 1 minus pi divided by n. + +161 +00:13:18,480 --> 00:13:23,810 +Let's compare this result. with the sampling + +162 +00:13:23,810 --> 00:13:28,030 +distribution of the sample mean. If you remember, + +163 +00:13:28,610 --> 00:13:36,990 +the mean of x bar was mu. And here, the statistic + +164 +00:13:36,990 --> 00:13:41,230 +is the sample mean, and the mean of x bar is mu. + +165 +00:13:42,330 --> 00:13:45,430 +On the other hand, the mean of the statistic, my + +166 +00:13:45,430 --> 00:13:49,010 +statistic is the sample proportion is five. So in + +167 +00:13:49,010 --> 00:13:52,930 +the two cases, the mean equals the true value. I + +168 +00:13:52,930 --> 00:13:55,910 +mean, the true parameter. So in this case, the + +169 +00:13:55,910 --> 00:13:59,510 +mean of x bar equal mu, and mu of p equal pi. On + +170 +00:13:59,510 --> 00:14:03,030 +the other hand, the sigma of x bar was sigma over + +171 +00:14:03,030 --> 00:14:08,310 +root n. This looks similar, because this one's + +172 +00:14:08,310 --> 00:14:12,930 +just sigma squared over n. But here, sigma squared + +173 +00:14:12,930 --> 00:14:14,630 +is pi times 1 minus pi. + +174 +00:14:18,430 --> 00:14:21,430 +So again, the standard distribution of B is + +175 +00:14:21,430 --> 00:14:24,290 +roughly symmetric or approximately normally + +176 +00:14:24,290 --> 00:14:30,810 +distributed if these two conditions are satisfied + +177 +00:14:30,810 --> 00:14:34,430 +and mu of B equals pi and sigma of B equals square + +178 +00:14:34,430 --> 00:14:38,230 +root of pi times 1 minus pi over n. Now, this + +179 +00:14:38,230 --> 00:14:44,490 +score, as we mentioned before, the standard + +180 +00:14:44,490 --> 00:14:47,430 +equation is given by x minus the mean of x divided + +181 +00:14:47,430 --> 00:14:52,550 +by sigma. Last time, we talked about semi + +182 +00:14:52,550 --> 00:14:53,730 +-distribution of x bar. + +183 +00:14:56,570 --> 00:15:00,610 +So your z score + +184 +00:15:00,610 --> 00:15:05,050 +equals x bar minus the mean of x bar divided by + +185 +00:15:05,050 --> 00:15:11,250 +sigma of x bar. So that's x bar minus mu, because + +186 +00:15:11,250 --> 00:15:16,050 +the mean of x bar is mu, divided by sigma of x + +187 +00:15:16,050 --> 00:15:18,810 +bar, sigma over root n. + +188 +00:15:22,620 --> 00:15:27,720 +Now let's compute z-score for p. So z equals p + +189 +00:15:27,720 --> 00:15:30,400 +minus the mean of p divided by sigma. + +190 +00:15:33,680 --> 00:15:39,720 +So in this case, z equals the mean of p, which is + +191 +00:15:39,720 --> 00:15:48,060 +pi, divided by pi, 1 minus pi divided by p. So in + +192 +00:15:48,060 --> 00:15:52,090 +this case, the new formula for the z-score is + +193 +00:15:52,090 --> 00:15:59,530 +given by pi minus one. So the zero zero score is + +194 +00:15:59,530 --> 00:16:04,610 +equal to pi minus, p minus pi divided by root pi + +195 +00:16:04,610 --> 00:16:08,390 +times one minus pi divided by small size n + +223 +00:18:59,050 --> 00:19:04,110 +satisfied, the first condition. Can you figure out + +224 +00:19:04,110 --> 00:19:07,210 +the value of N times 1 minus Pi without + +225 +00:19:07,210 --> 00:19:16,110 +calculation? 200 minus 80. So this value is 200 + +226 +00:19:16,110 --> 00:19:24,730 +minus 80. Or just 200 times 1 minus 0.4. 200 times + +227 +00:19:24,730 --> 00:19:29,890 +0.6 is 120. So if you just find the first one and + +228 +00:19:29,890 --> 00:19:35,650 +times 5, the other one is n minus 80 will give the + +229 +00:19:35,650 --> 00:19:38,170 +other condition. So now the two conditions are + +230 +00:19:38,170 --> 00:19:43,400 +satisfied. Then we can use the z-score. I mean, we + +231 +00:19:43,400 --> 00:19:47,880 +can say that the sampling proportion is normally + +232 +00:19:47,880 --> 00:19:54,980 +distributed with mean equal pi, always pi, and pi + +233 +00:19:54,980 --> 00:20:02,700 +is given as 40%. And the sigma of p equals square + +234 +00:20:02,700 --> 00:20:08,360 +root of pi 1 minus pi divided by n. That's your + +235 +00:20:08,360 --> 00:20:15,030 +square root of 0.4 times. divided by 200, and that + +236 +00:20:15,030 --> 00:20:18,830 +will give 0 + +237 +00:20:18,830 --> 00:20:22,810 +.0346. + +238 +00:20:26,210 --> 00:20:29,250 +So, the first step, we have to check the two + +239 +00:20:29,250 --> 00:20:33,730 +conditions. Second step, compute the mean of P, + +240 +00:20:33,970 --> 00:20:42,270 +sigma of P. Now, finally, find the z-score of your + +241 +00:20:42,270 --> 00:20:45,970 +problem, here he asks about what's the probability + +242 +00:20:45,970 --> 00:20:54,210 +that B lies between 0 + +243 +00:20:54,210 --> 00:20:58,430 +.4 and 0.45 percent. So we have to find the score + +244 +00:20:58,430 --> 00:21:05,490 +for 0.4. So that's 0.4 minus again the mean of B + +245 +00:21:05,490 --> 00:21:11,550 +which is again 0.4, so that's valid. Divide by 0 + +246 +00:21:11,550 --> 00:21:26,210 +.0346. So again, b is 0.4. This is pi. And pi, in + +247 +00:21:26,210 --> 00:21:32,150 +this case, is always 0.4. And this is your b. So 0 + +248 +00:21:32,150 --> 00:21:37,350 +.4 minus 0.4 divided by 0.0346 plus 1 equals 0. + +249 +00:21:38,600 --> 00:21:44,000 +The other z-score, for the other value, here, 0 + +250 +00:21:44,000 --> 00:21:44,540 +.45. + +251 +00:21:50,340 --> 00:21:53,360 +And this gives 1.4. + +252 +00:21:56,900 --> 00:22:01,400 +After that, this problem is converted to + +253 +00:22:01,400 --> 00:22:06,840 +standardized normal value. So instead of P, we + +254 +00:22:06,840 --> 00:22:14,920 +have Z. between 0 and 1.44. That's all for using + +255 +00:22:14,920 --> 00:22:21,540 +Chapter 7. Now, to complete your answer, you have + +256 +00:22:21,540 --> 00:22:26,920 +to use Chapter 6. Now, V is between 0 and 1.44. As + +257 +00:22:26,920 --> 00:22:33,160 +we mentioned many times, the area here equals, + +258 +00:22:33,660 --> 00:22:39,960 +I mean, the dashed area equals V of Z. Less than 1 + +259 +00:22:39,960 --> 00:22:45,200 +.44 minus 0.5. Exactly minus 0.5, because the area + +260 +00:22:45,200 --> 00:22:50,580 +to the left of 0 is 0.5. Now by using the normal + +261 +00:22:50,580 --> 00:22:53,980 +table, or standard normal table, P of Z is smaller + +262 +00:22:53,980 --> 00:22:59,780 +than 1.44 is given by 9251. + +263 +00:23:01,540 --> 00:23:05,840 +Just check your table, minus 0.5, so the final + +264 +00:23:05,840 --> 00:23:15,340 +result is 0.4251. That means around 42 percent, 0 + +265 +00:23:15,340 --> 00:23:22,280 +.51, that the proportion lie between 40 and 45 + +266 +00:23:22,280 --> 00:23:28,000 +percent. So that's how can we compute the + +267 +00:23:28,000 --> 00:23:32,080 +probabilities underneath the normal curve if we + +268 +00:23:32,080 --> 00:23:34,680 +are interested in the sample proportion. + +269 +00:23:39,630 --> 00:23:45,870 +Now, in case if one of these conditions is not + +270 +00:23:45,870 --> 00:23:51,510 +satisfied, we cannot use this code unless we + +271 +00:23:51,510 --> 00:23:54,590 +increase the sample size. So here, the two + +272 +00:23:54,590 --> 00:23:56,790 +conditions should be satisfied. For example, + +273 +00:23:56,950 --> 00:24:00,910 +suppose n + +274 +00:24:00,910 --> 00:24:13,390 +equals 200. But this probability of Pi is + +275 +00:24:13,390 --> 00:24:17,570 +1%. Suppose + +276 +00:24:17,570 --> 00:24:26,250 +again N is 200 and Pi is 1%. So this condition is + +277 +00:24:26,250 --> 00:24:33,090 +not satisfied because N times Pi is just 2. So you + +278 +00:24:33,090 --> 00:24:35,450 +cannot use the z-score, because it should be at + +279 +00:24:35,450 --> 00:24:38,810 +least 5. So even if the sample size is large, + +280 +00:24:39,390 --> 00:24:42,070 +maybe one of the conditions is not satisfied + +281 +00:24:42,070 --> 00:24:46,710 +because you have a small true proportion. So if + +282 +00:24:46,710 --> 00:24:49,470 +the true proportion is very, very small, in this + +283 +00:24:49,470 --> 00:24:53,750 +case you have to increase your sample size. Make + +284 +00:24:53,750 --> 00:24:56,270 +sense? Any question? + +285 +00:24:58,950 --> 00:25:05,730 +Now I will discuss some practice problems for + +286 +00:25:05,730 --> 00:25:07,310 +chapter 7. + +287 +00:25:09,450 --> 00:25:16,130 +Let's do some practice problems for chapter 7. I + +288 +00:25:16,130 --> 00:25:17,770 +will give some + +289 +00:25:28,710 --> 00:25:34,150 +We have four choices. Parameters. Now, same + +290 +00:25:34,150 --> 00:25:36,390 +distribution always describes the same + +291 +00:25:36,390 --> 00:25:42,350 +distribution of statistics. So, not parameters, we + +292 +00:25:42,350 --> 00:25:45,970 +have to choose the statistics. Same distribution + +293 +00:25:45,970 --> 00:25:51,250 +describes distribution of always statistics. Next. + +294 +00:25:53,490 --> 00:25:56,890 +The central limit theorem is important in + +295 +00:25:56,890 --> 00:26:01,310 +statistics because we have four choices. Let's see + +296 +00:26:01,310 --> 00:26:07,810 +why C is correct. Part A says, for a large N, it + +297 +00:26:07,810 --> 00:26:11,550 +says that the population is approximately normal. + +298 +00:26:12,710 --> 00:26:16,210 +We cannot say the population is normal. The + +299 +00:26:16,210 --> 00:26:19,510 +standard distribution of a statistic is + +300 +00:26:19,510 --> 00:26:24,410 +approximately normal. So, for this reason, part A + +301 +00:26:24,410 --> 00:26:28,490 +is incorrect. For the other one, for any + +302 +00:26:28,490 --> 00:26:34,690 +population, so regardless of the population, it + +303 +00:26:34,690 --> 00:26:38,950 +says the sample distribution, the sample mean is + +304 +00:26:38,950 --> 00:26:42,730 +approximately normal regardless of the sample + +305 +00:26:42,730 --> 00:26:45,830 +size. This is incorrect because it says for any + +306 +00:26:45,830 --> 00:26:53,940 +population. So P is incorrect. But if it says for + +307 +00:26:53,940 --> 00:26:58,180 +normal population, then we can say the sampling + +308 +00:26:58,180 --> 00:27:00,920 +distribution of the sample mean is approximately + +309 +00:27:00,920 --> 00:27:04,820 +normal regardless of the sample size. But it says + +310 +00:27:04,820 --> 00:27:09,320 +for any, so that means incorrect. Now part D, for + +311 +00:27:09,320 --> 00:27:13,500 +example, for any size sample, it means regardless + +312 +00:27:13,500 --> 00:27:17,480 +of the sample size, the theorem says the sample + +313 +00:27:17,480 --> 00:27:19,840 +distribution of the sample mean is approximately + +314 +00:27:19,840 --> 00:27:24,520 +normal. That's incorrect. Part C, for large N, it + +315 +00:27:24,520 --> 00:27:26,820 +says the sample distribution of the sample mean is + +316 +00:27:26,820 --> 00:27:31,110 +approximately Normal, regardless of the shape of + +317 +00:27:31,110 --> 00:27:34,310 +the distribution, that's true, because here we + +318 +00:27:34,310 --> 00:27:37,170 +have large sample size. So regardless of the + +319 +00:27:37,170 --> 00:27:39,690 +population, the shape of the population, we can + +320 +00:27:39,690 --> 00:27:42,830 +say that the sample distribution of the sample + +321 +00:27:42,830 --> 00:27:48,090 +mean is approximately normally distributed. Number + +322 +00:27:48,090 --> 00:27:52,610 +three. Which of the following statements about the + +323 +00:27:52,610 --> 00:27:54,750 +sample distribution of the sample mean is + +324 +00:27:54,750 --> 00:27:58,860 +incorrect? Here we are looking for the incorrect + +325 +00:27:58,860 --> 00:27:59,480 +statement. + +326 +00:28:02,780 --> 00:28:06,940 +Look at A, the sample distribution of the sample + +327 +00:28:06,940 --> 00:28:10,440 +mean is approximately normal whenever the sample + +328 +00:28:10,440 --> 00:28:14,620 +size is sufficiently large. This is correct. B, + +329 +00:28:14,740 --> 00:28:16,920 +the sample distribution of the sample mean is + +330 +00:28:16,920 --> 00:28:20,360 +generated by repeatedly taking samples of size N + +331 +00:28:20,360 --> 00:28:24,500 +and computing the sample means. That's also + +332 +00:28:24,500 --> 00:28:28,440 +correct. The sample mean, I'm sorry, the mean of + +333 +00:28:28,440 --> 00:28:30,460 +the sample distribution of the sample mean is + +334 +00:28:30,460 --> 00:28:33,680 +always equal to mu, that's correct, because we + +335 +00:28:33,680 --> 00:28:37,340 +know that the mean of x bar is a mu. Now, the + +336 +00:28:37,340 --> 00:28:41,020 +standard deviation of the sampling distribution of + +337 +00:28:41,020 --> 00:28:45,100 +the sample mean is equal to sigma. And we, yes, + +338 +00:28:45,240 --> 00:28:48,780 +exactly, the standard error, which is sigma of x + +339 +00:28:48,780 --> 00:28:56,330 +bar, not sigma, equals sigma Divide by root n. For + +340 +00:28:56,330 --> 00:28:59,570 +this reason, this one is incorrect statement. + +341 +00:29:00,290 --> 00:29:07,210 +Because we have to divide this sigma by square + +342 +00:29:07,210 --> 00:29:12,790 +root of n. Number four. + +343 +00:29:16,390 --> 00:29:23,340 +Which of the following is true? about the sampling + +344 +00:29:23,340 --> 00:29:28,040 +distribution of the sample mean. Again, the mean + +345 +00:29:28,040 --> 00:29:32,200 +of the sampling distribution is always Mu. That's + +346 +00:29:32,200 --> 00:29:34,600 +correct statement. + +347 +00:29:35,860 --> 00:29:39,520 +Now look at V. The standard deviation of the + +348 +00:29:39,520 --> 00:29:42,700 +sampling distribution is always Sigma. Incorrect + +349 +00:29:42,700 --> 00:29:47,540 +because Sigma over root N. Part C, the shape of + +350 +00:29:47,540 --> 00:29:50,000 +the sampling distribution is always approximately + +351 +00:29:50,000 --> 00:29:54,650 +normal. If N is large, then we can say it's + +352 +00:29:54,650 --> 00:29:58,550 +approximately normal. All of the above are true is + +353 +00:29:58,550 --> 00:30:07,370 +incorrect. So that's number 6. + +354 +00:30:15,090 --> 00:30:22,610 +Look at number 10. Number 10. A telemarketer set + +355 +00:30:22,610 --> 00:30:27,130 +the company's computerized dialing system to + +356 +00:30:27,130 --> 00:30:33,910 +contact every 25th person listed in the local + +357 +00:30:33,910 --> 00:30:38,450 +telephone directory. So the company selects the + +358 +00:30:38,450 --> 00:30:45,510 +person that's in the 25th position. So the 25th + +359 +00:30:45,510 --> 00:30:49,600 +person is being selected. The other one is the + +360 +00:30:49,600 --> 00:30:55,940 +first item. The second item should be... + +361 +00:30:55,940 --> 00:31:01,120 +And your key is twenty-fifth. So one, two, three, + +362 +00:31:01,260 --> 00:31:05,440 +four. So number twenty-fifth is the first item. + +363 +00:31:08,500 --> 00:31:11,780 +Maybe you have something more. Then the second + +364 +00:31:11,780 --> 00:31:15,140 +item is number fifty and so on. + +365 +00:31:19,250 --> 00:31:26,650 +What sampling method was used? Systematic sample + +366 +00:31:26,650 --> 00:31:31,450 +because we chose the 50th person, then the second + +367 +00:31:31,450 --> 00:31:38,270 +one is the 50th, and so on. So 25, 50, 75, and so + +368 +00:31:38,270 --> 00:31:43,690 +on. Number 11, which of the following methods were + +369 +00:31:43,690 --> 00:31:49,280 +more likely be susceptible to ethical violation + +370 +00:31:49,280 --> 00:31:54,600 +when used to form conclusions about the entire + +371 +00:31:54,600 --> 00:31:55,360 +population. + +372 +00:31:57,740 --> 00:31:59,800 +Now, the correct answer is convenience sample. + +373 +00:32:01,260 --> 00:32:04,380 +Because number one, convenience sample is used + +374 +00:32:04,380 --> 00:32:10,520 +because it is easy, inexpensive, costly, and it's + +375 +00:32:10,520 --> 00:32:14,880 +used I mean, we select the sample if it is + +376 +00:32:14,880 --> 00:32:19,400 +convenient to the researcher by himself. But maybe + +377 +00:32:19,400 --> 00:32:22,580 +in this case, we have biased collection. For this + +378 +00:32:22,580 --> 00:32:27,260 +reason, this is incorrect sampling. Most of the + +379 +00:32:27,260 --> 00:32:32,880 +time, we are going to avoid using this technique + +380 +00:32:32,880 --> 00:32:39,560 +unless your sample is unbiased. Because if, for + +381 +00:32:39,560 --> 00:32:45,330 +example, suppose I love T of type A. + +382 +00:32:48,470 --> 00:32:52,170 +And my sample, I select a sample of size 20. + +383 +00:32:56,770 --> 00:33:02,430 +Since I love type A, I choose these 20 students or + +384 +00:33:02,430 --> 00:33:08,370 +20 persons that have, that like T of type A. That + +385 +00:33:08,370 --> 00:33:13,320 +means your sample It's convenient for you, but + +386 +00:33:13,320 --> 00:33:13,740 +it's biased. + +387 +00:33:16,860 --> 00:33:19,200 +Okay, so in this case it's called convenient + +388 +00:33:19,200 --> 00:33:24,740 +sample, so it will give incorrect results. So it's + +389 +00:33:24,740 --> 00:33:26,320 +convenient sample. + +390 +00:33:30,360 --> 00:33:34,900 +Let's do one, the other section, one of these + +391 +00:33:34,900 --> 00:33:39,160 +problems, true, false. Let's do some of these. + +392 +00:33:41,910 --> 00:33:45,110 +Now for a true and false problem, try to avoid + +393 +00:33:45,110 --> 00:33:49,370 +calculations as much as you can. Just solve the + +394 +00:33:49,370 --> 00:33:53,010 +problem without any computations. Maybe simple + +395 +00:33:53,010 --> 00:33:57,210 +calculations might be used, but don't use the + +396 +00:33:57,210 --> 00:34:00,890 +exact calculations because the problem asks just + +397 +00:34:00,890 --> 00:34:05,150 +true or false. So sometimes the problem makes + +398 +00:34:05,150 --> 00:34:07,630 +sense, the answer is true, so just say true + +399 +00:34:07,630 --> 00:34:12,990 +without doing the complete calculations. Because + +400 +00:34:12,990 --> 00:34:14,730 +you will waste time. Because as you know, we have + +401 +00:34:14,730 --> 00:34:18,990 +exam of just 60 minutes. And for true and false, I + +402 +00:34:18,990 --> 00:34:22,490 +expect your answer to be within, for example, 15 + +403 +00:34:22,490 --> 00:34:29,170 +seconds. Just read the problem, then figure your + +404 +00:34:29,170 --> 00:34:33,910 +answer. So in this case, sometimes you don't need + +405 +00:34:33,910 --> 00:34:38,000 +to do the exact calculations. But for the free + +406 +00:34:38,000 --> 00:34:40,840 +response problems you have to do the calculations. + +407 +00:34:41,000 --> 00:34:43,820 +But here just think about it a little bit and + +408 +00:34:43,820 --> 00:34:46,740 +within a few seconds you can figure out if it is + +409 +00:34:46,740 --> 00:34:50,680 +true or false. Now let's think about number one. + +410 +00:34:52,480 --> 00:34:55,700 +Now each of these problems I will ask all of you + +411 +00:34:55,700 --> 00:34:59,30 + +445 +00:37:49,360 --> 00:37:53,860 +That's correct, but to do the problem, the + +446 +00:37:53,860 --> 00:37:56,320 +complete answer, yes, you have to convert to this + +447 +00:37:56,320 --> 00:37:59,620 +score, because n is large, then you can figure out + +448 +00:37:59,620 --> 00:38:03,460 +the answer. So she said that. But I'm looking for + +449 +00:38:03,460 --> 00:38:08,520 +something different. First, convert to z. Our root + +450 +00:38:08,520 --> 00:38:13,860 +n, sigma is n. Divide by square root of 64, which + +451 +00:38:13,860 --> 00:38:21,620 +is also 8. So it is Z greater than 1. Sorry, 6 + +452 +00:38:21,620 --> 00:38:29,080 +divided by 1 is 6. Now, Z greater than 6 if you go + +453 +00:38:29,080 --> 00:38:31,000 +back to the normal table. + +454 +00:38:33,560 --> 00:38:42,290 +Now, the table we have given it to try. So this + +455 +00:38:42,290 --> 00:38:50,790 +one is 1 minus z less than 6. For 6, we have 0 + +456 +00:38:50,790 --> 00:38:51,970 +.9999. + +457 +00:38:54,890 --> 00:38:58,490 +So the answer is approximately zero. So it makes + +458 +00:38:58,490 --> 00:39:02,150 +sense, it's zero. This is the complete answer. It + +459 +00:39:02,150 --> 00:39:05,490 +takes time. Because you have to convert the score. + +460 +00:39:06,700 --> 00:39:09,760 +Do some calculations, then use the normal table, + +461 +00:39:09,940 --> 00:39:13,460 +start the normal table. I mentioned before, it + +462 +00:39:13,460 --> 00:39:17,960 +tries to avoid this + +463 +00:39:17,960 --> 00:39:22,080 +kind of calculations. Now, how can we figure out? + +464 +00:39:26,000 --> 00:39:31,560 +Now, since the problem says Q to the left and N is + +465 +00:39:31,560 --> 00:39:36,680 +large, We can assume that X bar is approximately + +466 +00:39:36,680 --> 00:39:39,840 +normal, so don't worry about the shape of the + +467 +00:39:39,840 --> 00:39:42,560 +distribution because we select a random size of + +468 +00:39:42,560 --> 00:39:45,720 +size 6 to 4, so we can apply the central limit + +469 +00:39:45,720 --> 00:39:49,580 +theorem. Any idea? + +470 +00:39:54,580 --> 00:40:01,290 +Again, this way is true to do the complete + +471 +00:40:01,290 --> 00:40:05,350 +calculations, but again you have to avoid using + +472 +00:40:05,350 --> 00:40:06,450 +this one. I will give you a hint. + +473 +00:40:09,510 --> 00:40:15,730 +Now, 71, what's the difference between the true + +474 +00:40:15,730 --> 00:40:18,290 +mean and 76? + +475 +00:40:24,950 --> 00:40:31,060 +Okay, so that's sigma of x bar is 8 divided by + +476 +00:40:31,060 --> 00:40:38,600 +square root of 64 is 1. Again. Continuous, just 1. + +477 +00:40:42,980 --> 00:40:43,200 +Why? + +478 +00:40:51,340 --> 00:40:58,110 +The difference between 71 and 656 and 6 It's six + +479 +00:40:58,110 --> 00:41:03,730 +times the standard error of the sample mean. And + +480 +00:41:03,730 --> 00:41:12,610 +we know that, if you remember this rule, 65, 68, + +481 +00:41:14,110 --> 00:41:21,050 +95, 99, we can't, don't, don't use chebyshev in + +482 +00:41:21,050 --> 00:41:23,370 +this case because the distribution is roughly + +483 +00:41:23,370 --> 00:41:28,170 +normal. We know this rule, the empirical rule. We + +484 +00:41:28,170 --> 00:41:33,890 +said that 68% of the observations lie within one + +485 +00:41:33,890 --> 00:41:39,590 +standard deviation of the mean. 95% in standard + +486 +00:41:39,590 --> 00:41:46,210 +deviation and so on. Now if you go back, now let's + +487 +00:41:46,210 --> 00:41:51,510 +say mu minus three sigma and mu plus three sigma. + +488 +00:41:52,010 --> 00:41:56,410 +But here we are talking about x bar. So the mean + +489 +00:41:56,410 --> 00:42:04,590 +of x bar, sigma of x bar. So empirical rule says + +490 +00:42:04,590 --> 00:42:11,690 +that this area between + +491 +00:42:11,690 --> 00:42:20,490 +mu minus 3 sigma of x bar is around 99.7. Let's + +492 +00:42:20,490 --> 00:42:27,260 +compute the lower bound. Mu is 65. Three times + +493 +00:42:27,260 --> 00:42:31,640 +sigma of x bar is one, so that's 62. + +494 +00:42:33,700 --> 00:42:41,780 +The maximum here equal mu again 65 plus three + +495 +00:42:41,780 --> 00:42:49,020 +times one, 16. So now 99.7% of the sample means + +496 +00:42:49,020 --> 00:42:59,500 +lie between 62 and 68. Now, what's left? 0.3% for + +497 +00:42:59,500 --> 00:43:04,700 +both sides, so that's 0.15 to the right and the + +498 +00:43:04,700 --> 00:43:05,360 +same to the left. + +499 +00:43:08,500 --> 00:43:15,360 +Now, we are looking for x bar exceeds 71. 68 this + +500 +00:43:15,360 --> 00:43:20,360 +point, 71 should be to the right. Now, what's the + +501 +00:43:20,360 --> 00:43:24,290 +probability to the right of 71? is almost zero. + +502 +00:43:26,130 --> 00:43:30,630 +Because since we are saying that most of the data + +503 +00:43:30,630 --> 00:43:35,010 +lies within the three standard deviations of the + +504 +00:43:35,010 --> 00:43:39,230 +mean. Now here, the difference between this point + +505 +00:43:39,230 --> 00:43:47,270 +71 and 65 is 6, and sigma of x bar is 1. So it + +506 +00:43:47,270 --> 00:43:49,770 +means the difference between mu and x bar is + +507 +00:43:49,770 --> 00:43:54,600 +around 6 times sigma of x bar. So you are sure + +508 +00:43:54,600 --> 00:44:01,320 +that 99.7% of the data lies between 62 and 68, and + +509 +00:44:01,320 --> 00:44:05,160 +just 0.3% of the area left to the other side, two + +510 +00:44:05,160 --> 00:44:09,560 +sides, split in half, so we have 0.15 to the right + +511 +00:44:09,560 --> 00:44:12,700 +and the other to the left. But again, we are + +512 +00:44:12,700 --> 00:44:16,380 +looking for expiry exceeds 71, so we have to go + +513 +00:44:16,380 --> 00:44:20,340 +further to the right side, so the area becomes + +514 +00:44:20,340 --> 00:44:20,920 +very small. + +515 +00:44:27,080 --> 00:44:32,020 +This method is faster if you figure out this one + +516 +00:44:32,020 --> 00:44:38,060 +we can apply the empirical rule. Let's do similar + +517 +00:44:38,060 --> 00:44:41,780 +questions. Look at the other one. + +518 +00:44:46,940 --> 00:44:48,180 +Number five. + +519 +00:44:59,270 --> 00:45:02,050 +The amount of gasoline purchased per car at large + +520 +00:45:02,050 --> 00:45:08,110 +surface stations has population mean of 15. So the + +521 +00:45:08,110 --> 00:45:10,070 +mean is 15. + +522 +00:45:13,010 --> 00:45:18,150 +And the population standard deviation of 4. Sigma + +523 +00:45:18,150 --> 00:45:24,330 +is 4. It is assumed that the amount of gasoline + +524 +00:45:24,330 --> 00:45:26,050 +purchased per car is symmetric. + +525 +00:45:28,870 --> 00:45:38,990 +there is approximately 68.26% it shows that a + +526 +00:45:38,990 --> 00:45:44,970 +random sample of 16 cars so n equals 16 cars will + +527 +00:45:44,970 --> 00:45:49,430 +have sample mean between 14 and 16 so it says that + +528 +00:45:49,430 --> 00:45:55,830 +between 14 and 16 the answer is 68.24% + +529 +00:45:58,710 --> 00:46:01,890 +So again, we have a population, this population is + +530 +00:46:01,890 --> 00:46:08,550 +symmetric, with mean of 15 sigma of 4. We select a + +531 +00:46:08,550 --> 00:46:14,170 +random sample of 16. The problem says that the + +532 +00:46:14,170 --> 00:46:17,930 +probability of X bar between 14 and 16 equals 68%. + +533 +00:46:17,930 --> 00:46:24,450 +And the answer is true. Why? How can we apply or + +534 +00:46:24,450 --> 00:46:29,490 +use the empirical rule in this case? A mu minus 3 + +535 +00:46:29,490 --> 00:46:30,150 +standard deviation? + +536 +00:46:34,390 --> 00:46:42,690 +It says 68, 1. So it's a mu minus 1 standard + +537 +00:46:42,690 --> 00:46:46,770 +deviation, 1 sigma X bar, and let's look at 1 plus + +538 +00:46:46,770 --> 00:46:56,400 +sigma X bar. A mu is 15. Now Sigma of X1 is + +539 +00:46:56,400 --> 00:47:00,680 +also 1 because 4 divided by square root of 16 is + +540 +00:47:00,680 --> 00:47:01,580 +1. + +541 +00:47:04,040 --> 00:47:12,540 +So 1 times 1 is 14. On the other hand 15 plus 1 + +542 +00:47:12,540 --> 00:47:14,160 +times 1 is 16. + +543 +00:47:16,790 --> 00:47:21,450 +The problem says 68.4% of the answers is correct. + +544 +00:47:22,810 --> 00:47:28,010 +Because we know that 68% of the data will fall + +545 +00:47:28,010 --> 00:47:32,390 +within one standard deviation around the mean. So + +546 +00:47:32,390 --> 00:47:35,970 +it's, I think, it's very quickly to get your + +547 +00:47:35,970 --> 00:47:39,870 +answer rather than using the exact calculation. + +548 +00:47:40,070 --> 00:47:44,910 +Because for the exact one, you have to do 14 minus + +549 +00:47:44,910 --> 00:47:53,990 +the mean. divided by sigma z, then 16 minus the + +550 +00:47:53,990 --> 00:47:58,430 +mean divided by 1. Then use the normal theorem to + +551 +00:47:58,430 --> 00:48:05,470 +use the empirical rule in this case. So if we + +552 +00:48:05,470 --> 00:48:10,930 +select a sample of size 16, the probability that x + +553 +00:48:10,930 --> 00:48:14,530 +bar lies between 14 and 16 is around 60%. + +554 +00:48:20,320 --> 00:48:27,740 +Look at number six. Again, we assume we have the + +555 +00:48:27,740 --> 00:48:36,560 +same information for the mean. Mu is 15. It's the + +556 +00:48:36,560 --> 00:48:41,640 +same standard deviation is 4. But here, a random + +557 +00:48:41,640 --> 00:48:47,460 +sample of 64 cars selected. So instead of + +558 +00:48:47,460 --> 00:48:51,880 +selecting 16 cars, we select a random sample of + +559 +00:48:51,880 --> 00:48:53,900 +size 64. + +560 +00:48:55,760 --> 00:49:03,560 +Now it says there is approximately 95.44 + +561 +00:49:03,560 --> 00:49:07,960 +% which shows that the sample mean will be between + +562 +00:49:07,960 --> 00:49:14,460 +14 and 16. So again, this probability between 14 + +563 +00:49:14,460 --> 00:49:19,670 +and 16 is also Mu minus 2 standard deviation. And + +564 +00:49:19,670 --> 00:49:22,950 +equals 9.44. Let's see if it's correct or not. + +565 +00:49:23,890 --> 00:49:28,250 +Since it's mentioned that 95%, so it means that we + +566 +00:49:28,250 --> 00:49:31,070 +are talking about two standard deviations. So + +567 +00:49:31,070 --> 00:49:34,630 +let's just compute mu minus 2 standard deviation + +568 +00:49:34,630 --> 00:49:43,090 +and plus 2 sigma x. So the mean is 15 minus plus 2 + +569 +00:49:43,090 --> 00:49:49,510 +times again 1. One is not one because sigma of x + +570 +00:49:49,510 --> 00:49:56,110 +bar was one because n was 16. Now my new sigma + +571 +00:49:56,110 --> 00:50:01,730 +equal one-half because sigma over root n. Now + +572 +00:50:01,730 --> 00:50:10,070 +sigma again the same value four divided by 64 over + +573 +00:50:10,070 --> 00:50:13,970 +eight so that's one-half. So this one should be + +574 +00:50:13,970 --> 00:50:14,810 +two times + +575 +00:50:20,340 --> 00:50:26,820 +So that will give 15 minus 1, 14. 15 plus 1 is 16. + +576 +00:50:30,480 --> 00:50:35,180 +The probability between 14 and 16, I mean X bar + +577 +00:50:35,180 --> 00:50:40,840 +lies between 14 and 16, equals 95%. And we just + +578 +00:50:40,840 --> 00:50:45,210 +change some information a little bit. But the + +579 +00:50:45,210 --> 00:50:48,450 +answer was the probability between X bar, the + +580 +00:50:48,450 --> 00:50:51,790 +probability that X bar lies between 14 and 16 is + +581 +00:50:51,790 --> 00:50:56,690 +around 68%. Now what's the difference between + +582 +00:50:56,690 --> 00:51:03,030 +number 5 and 6? N is large. As N increases, + +583 +00:51:04,530 --> 00:51:06,790 +standard deviation decreases. Standard deviation + +584 +00:51:06,790 --> 00:51:09,910 +decreases. If you look here, sigma X bar was 1. + +585 +00:51:10,560 --> 00:51:13,440 +Then my new sigma of x bar is one half. As we + +586 +00:51:13,440 --> 00:51:16,260 +mentioned before, as n increases, sigma of x bar + +587 +00:51:16,260 --> 00:51:21,760 +decreases. In this case, we have larger + +588 +00:51:21,760 --> 00:51:25,460 +probability. So if we increase the sample size, + +589 +00:51:26,160 --> 00:51:30,760 +then the probability that x bar is between 14 and + +590 +00:51:30,760 --> 00:51:35,040 +16 will increase. So this proportion is increased + +591 +00:51:35,040 --> 00:51:40,630 +around 30 or around 27%. Because you just increase + +592 +00:51:40,630 --> 00:51:46,950 +the sample size from 16 all the way up to 64. So + +593 +00:51:46,950 --> 00:51:50,050 +since, as we know, as the sample size increases, + +594 +00:51:50,770 --> 00:51:55,210 +sigma of X bar decreases. So we are more sure that + +595 +00:51:55,210 --> 00:52:00,710 +X bar lies between 14 and 16. The previous one, + +596 +00:52:01,250 --> 00:52:05,590 +the chance that, or the probability that X bar + +597 +00:52:05,590 --> 00:52:09,450 +lies between 14 and 16 is around two-third. around + +598 +00:52:09,450 --> 00:52:14,530 +68%. But here, for the same kind of probability, + +599 +00:52:15,010 --> 00:52:19,190 +it's equal around 95%, because you increase the + +600 +00:52:19,190 --> 00:52:20,130 +sample size. + +601 +00:52:23,670 --> 00:52:30,210 +Any question? Let's do one more. + +602 +00:52:34,550 --> 00:52:35,490 +Suppose + +603 +00:52:53,560 --> 00:52:56,960 +Sigma squared, it means the variance is 100. + +604 +00:53:00,100 --> 00:53:04,320 +So sigma don't forget to take the square root of + +605 +00:53:04,320 --> 00:53:11,660 +sigma squared in order to find sigma. In a sample + +606 +00:53:11,660 --> 00:53:12,380 +of 100, + +607 +00:53:15,480 --> 00:53:20,540 +95% of all possible sample means will fall between + +608 +00:53:29,270 --> 00:53:36,430 +This equals around 95%. Now without calculations, + +609 +00:53:37,010 --> 00:53:40,450 +since it says the problem is normally distributed + +610 +00:53:40,450 --> 00:53:46,170 +or N is large. In this case, N is large enough to + +611 +00:53:46,170 --> 00:53:49,570 +apply the central limit theorem. Then we can use + +612 +00:53:49,570 --> 00:53:54,190 +the empirical rule. It says 95% is too strong + +613 +00:53:54,190 --> 00:53:59,630 +deviation of the mean. So mu minus plus. 2 sigma + +614 +00:53:59,630 --> 00:54:05,850 +of x bar. Now, mu is 50 minus plus 2 sigma of x + +615 +00:54:05,850 --> 00:54:09,290 +bar. Yes, I have to compute sigma of x bar first. + +616 +00:54:09,910 --> 00:54:15,410 +So, sigma divided by is 1. So, simple calculation, + +617 +00:54:15,550 --> 00:54:21,570 +just sigma over root n is 1. So, minus 1. 50 minus + +618 +00:54:21,570 --> 00:54:29,290 +2 is 48. 50 plus 2 is 52. So it's between 40, 8, + +619 +00:54:29,370 --> 00:54:35,690 +and 52, and this probability is 95%. So it's true. + +620 +00:54:36,950 --> 00:54:40,690 +Now which is faster? To use this method, which is + +621 +00:54:40,690 --> 00:54:42,970 +maybe less than one minute, you can figure out the + +622 +00:54:42,970 --> 00:54:46,890 +answer, or use the complete calculations. In this + +623 +00:54:46,890 --> 00:54:48,990 +case, you have to find z-score for the first one, + +624 +00:54:49,090 --> 00:54:51,930 +z for the other one, then use the normal table, + +625 +00:54:51,990 --> 00:54:56,610 +and that will take at least five minutes. So to + +626 +00:54:56,610 --> 00:54:59,330 +use the empirical rule, most of the time gives + +627 +00:54:59,330 --> 00:55:03,410 +shorter time. + +628 +00:55:05,290 --> 00:55:06,030 +Any question? + +629 +00:55:08,770 --> 00:55:11,430 +Let's + +667 +00:58:33,610 --> 00:58:37,350 +He asked about sigma of x bar. We know that sigma + +668 +00:58:37,350 --> 00:58:40,650 +of x bar equals sigma over root n, but sigma is + +669 +00:58:40,650 --> 00:58:47,020 +not given. S is a square, S is a 20, but what's + +670 +00:58:47,020 --> 00:58:52,260 +Sigma? Because Sigma of X bar, Sigma over root N, + +671 +00:58:53,020 --> 00:58:58,640 +Sigma is not given. But as we know before, since N + +672 +00:58:58,640 --> 00:59:04,080 +is large enough, so if N is large, in this case, + +673 +00:59:04,420 --> 00:59:09,120 +if N is large, if N is large, we can replace Sigma + +674 +00:59:09,120 --> 00:59:16,550 +by S. And this is just S over root N. So if N is + +675 +00:59:16,550 --> 00:59:24,010 +large enough, more than 15, so we can use or we + +676 +00:59:24,010 --> 00:59:27,530 +can apply the central limit theorem. So we get the + +677 +00:59:27,530 --> 00:59:35,500 +sigma YS. So S equals 20 divided by 5 is 4. So the + +678 +00:59:35,500 --> 00:59:38,480 +standard error equals 4 in this case. So if S + +679 +00:59:38,480 --> 00:59:43,120 +squared is given instead of sigma squared, we can + +680 +00:59:43,120 --> 00:59:48,300 +replace sigma by S if N is larger. So in this + +681 +00:59:48,300 --> 00:59:52,120 +case, we replace sigma by S. So sigma of x bar + +682 +00:59:52,120 --> 00:59:55,060 +equals S over root N. That will give 4. + +683 +00:59:58,400 --> 01:00:02,040 +S is the sample variance. + +684 +01:00:05,740 --> 01:00:09,340 +Sigma is + +685 +01:00:09,340 --> 01:00:12,820 +population S squared. + +686 +01:00:15,720 --> 01:00:21,700 +Sigma squared is the population variance. And we + +687 +01:00:21,700 --> 01:00:25,400 +know that S squared is the sample variance, where + +688 +01:00:25,400 --> 01:00:28,180 +sigma squared is the population variance. And + +689 +01:00:28,180 --> 01:00:30,940 +population in this case is not given. So we + +690 +01:00:30,940 --> 01:00:34,060 +replace the population variance by sample + +691 +01:00:34,060 --> 01:00:37,160 +variance, if the population is normal or N is + +692 +01:00:37,160 --> 01:00:43,200 +large. Because if N is large enough, we can apply + +693 +01:00:43,200 --> 01:00:46,760 +the central limit theorem. And if you go back The + +694 +01:00:46,760 --> 01:00:49,080 +formula for S, if you remember, we divide by N + +695 +01:00:49,080 --> 01:00:55,500 +minus one. And for sigma, we divide by capital. So + +696 +01:00:55,500 --> 01:00:59,760 +if N is large enough, then there is small + +697 +01:00:59,760 --> 01:01:03,120 +difference between sample variance and population. + +698 +01:01:04,260 --> 01:01:04,360 +Okay. + +699 +01:01:11,040 --> 01:01:15,000 +Any questions? Any question? + +700 +01:01:19,130 --> 01:01:25,830 +If it is above 15, it's okay. 15 or 30, depends on + +701 +01:01:25,830 --> 01:01:28,890 +the population you have. But here, if you divide + +702 +01:01:28,890 --> 01:01:35,150 +by 24 or 25, the difference is small. I mean, if + +703 +01:01:35,150 --> 01:01:41,120 +you divide this one by 24 or by 25, The difference + +704 +01:01:41,120 --> 01:01:43,740 +between S and Sigma is small, so we can replace + +705 +01:01:43,740 --> 01:01:48,100 +Sigma by S, if Sigma is unknown. Questions? + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/vioR30IO0qQ_raw.json b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/vioR30IO0qQ_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..e10d29e16bfd2fc59e7e458ff81e17ebe916c467 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/vioR30IO0qQ_raw.json @@ -0,0 +1 @@ +{"segments": [{"id": 1, "seek": 3078, "start": 6.6, "end": 30.78, "text": " the sum and distribution of the sum and mean, and we have produced two cases. The first one, if the population is normal and conservative, in this case, the sum and distribution of the sum and mean is exactly normal, with mean of x bar equal the whole population mean.", "tokens": [264, 2408, 293, 7316, 295, 264, 2408, 293, 914, 11, 293, 321, 362, 7126, 732, 3331, 13, 440, 700, 472, 11, 498, 264, 4415, 307, 2710, 293, 13780, 11, 294, 341, 1389, 11, 264, 2408, 293, 7316, 295, 264, 2408, 293, 914, 307, 2293, 2710, 11, 365, 914, 295, 2031, 2159, 2681, 264, 1379, 4415, 914, 13], "avg_logprob": -0.3275862166593815, "compression_ratio": 1.8175675675675675, "no_speech_prob": 8.940696716308594e-07, "words": [{"start": 6.6, "end": 6.82, "word": " the", "probability": 0.23974609375}, {"start": 6.82, "end": 7.12, "word": " sum", "probability": 0.321044921875}, {"start": 7.12, "end": 7.26, "word": " and", "probability": 0.6298828125}, {"start": 7.26, "end": 7.82, "word": " distribution", "probability": 0.83447265625}, {"start": 7.82, "end": 8.7, "word": " of", "probability": 0.94921875}, {"start": 8.7, "end": 8.92, "word": " the", "probability": 0.6455078125}, {"start": 8.92, "end": 9.08, "word": " sum", "probability": 0.467041015625}, {"start": 9.08, "end": 9.22, "word": " and", "probability": 0.9228515625}, {"start": 9.22, "end": 9.48, "word": " mean,", "probability": 0.9599609375}, {"start": 10.2, "end": 10.64, "word": " and", "probability": 0.8623046875}, {"start": 10.64, "end": 11.08, "word": " we", "probability": 0.94189453125}, {"start": 11.08, "end": 11.8, "word": " have", "probability": 0.252197265625}, {"start": 11.8, "end": 12.34, "word": " produced", "probability": 0.77490234375}, {"start": 12.34, "end": 13.34, "word": " two", "probability": 0.8779296875}, {"start": 13.34, "end": 13.78, "word": " cases.", "probability": 0.9375}, {"start": 14.64, "end": 14.82, "word": " The", "probability": 0.853515625}, {"start": 14.82, "end": 15.14, "word": " first", "probability": 0.88623046875}, {"start": 15.14, "end": 15.46, "word": " one,", "probability": 0.9287109375}, {"start": 16.06, "end": 16.26, "word": " if", "probability": 0.93115234375}, {"start": 16.26, "end": 16.4, "word": " the", "probability": 0.89990234375}, {"start": 16.4, "end": 16.76, "word": " population", "probability": 0.888671875}, {"start": 16.76, "end": 17.1, "word": " is", "probability": 0.9443359375}, {"start": 17.1, "end": 17.42, "word": " normal", "probability": 0.6669921875}, {"start": 17.42, "end": 17.7, "word": " and", "probability": 0.5}, {"start": 17.7, "end": 18.02, "word": " conservative,", "probability": 0.311279296875}, {"start": 19.5, "end": 20.04, "word": " in", "probability": 0.84619140625}, {"start": 20.04, "end": 20.32, "word": " this", "probability": 0.9453125}, {"start": 20.32, "end": 20.78, "word": " case,", "probability": 0.9228515625}, {"start": 21.12, "end": 21.46, "word": " the", "probability": 0.9130859375}, {"start": 21.46, "end": 21.84, "word": " sum", "probability": 0.966796875}, {"start": 21.84, "end": 21.98, "word": " and", "probability": 0.92333984375}, {"start": 21.98, "end": 22.56, "word": " distribution", "probability": 0.8330078125}, {"start": 22.56, "end": 22.9, "word": " of", "probability": 0.9326171875}, {"start": 22.9, "end": 23.86, "word": " the", "probability": 0.89404296875}, {"start": 23.86, "end": 24.18, "word": " sum", "probability": 0.95703125}, {"start": 24.18, "end": 24.34, "word": " and", "probability": 0.947265625}, {"start": 24.34, "end": 24.58, "word": " mean", "probability": 0.9443359375}, {"start": 24.58, "end": 24.92, "word": " is", "probability": 0.91259765625}, {"start": 24.92, "end": 25.44, "word": " exactly", "probability": 0.89013671875}, {"start": 25.44, "end": 25.74, "word": " normal,", "probability": 0.75341796875}, {"start": 26.2, "end": 27.08, "word": " with", "probability": 0.826171875}, {"start": 27.08, "end": 27.44, "word": " mean", "probability": 0.93994140625}, {"start": 27.44, "end": 27.76, "word": " of", "probability": 0.95947265625}, {"start": 27.76, "end": 28.0, "word": " x", "probability": 0.62109375}, {"start": 28.0, "end": 28.24, "word": " bar", "probability": 0.7822265625}, {"start": 28.24, "end": 28.62, "word": " equal", "probability": 0.87548828125}, {"start": 28.62, "end": 29.54, "word": " the", "probability": 0.68017578125}, {"start": 29.54, "end": 29.82, "word": " whole", "probability": 0.1558837890625}, {"start": 29.82, "end": 30.32, "word": " population", "probability": 0.96533203125}, {"start": 30.32, "end": 30.78, "word": " mean.", "probability": 0.8720703125}], "temperature": 1.0}, {"id": 2, "seek": 6045, "start": 33.65, "end": 60.45, "text": " And sigma of x bar is called the standard error of the sample mean given by sigma. Also, we mentioned that the standard error of the sample mean is always smaller than sigma. And also, as m increases, the standard error of x bar increases. So there is a gorgeous negative relationship.", "tokens": [400, 12771, 295, 2031, 2159, 307, 1219, 264, 3832, 6713, 295, 264, 6889, 914, 2212, 538, 12771, 13, 2743, 11, 321, 2835, 300, 264, 3832, 6713, 295, 264, 6889, 914, 307, 1009, 4356, 813, 12771, 13, 400, 611, 11, 382, 275, 8637, 11, 264, 3832, 6713, 295, 2031, 2159, 8637, 13, 407, 456, 307, 257, 12291, 3671, 2480, 13], "avg_logprob": -0.31796875794728596, "compression_ratio": 1.8451612903225807, "no_speech_prob": 5.364418029785156e-07, "words": [{"start": 33.65, "end": 34.09, "word": " And", "probability": 0.5908203125}, {"start": 34.09, "end": 34.79, "word": " sigma", "probability": 0.7490234375}, {"start": 34.79, "end": 35.33, "word": " of", "probability": 0.9169921875}, {"start": 35.33, "end": 35.55, "word": " x", "probability": 0.71728515625}, {"start": 35.55, "end": 35.83, "word": " bar", "probability": 0.52294921875}, {"start": 35.83, "end": 36.05, "word": " is", "probability": 0.371337890625}, {"start": 36.05, "end": 36.37, "word": " called", "probability": 0.49072265625}, {"start": 36.37, "end": 36.61, "word": " the", "probability": 0.90380859375}, {"start": 36.61, "end": 36.97, "word": " standard", "probability": 0.86962890625}, {"start": 36.97, "end": 37.29, "word": " error", "probability": 0.88623046875}, {"start": 37.29, "end": 38.05, "word": " of", "probability": 0.9287109375}, {"start": 38.05, "end": 38.25, "word": " the", "probability": 0.90869140625}, {"start": 38.25, "end": 38.47, "word": " sample", "probability": 0.48583984375}, {"start": 38.47, "end": 38.81, "word": " mean", "probability": 0.94970703125}, {"start": 38.81, "end": 40.03, "word": " given", "probability": 0.8095703125}, {"start": 40.03, "end": 40.35, "word": " by", "probability": 0.97265625}, {"start": 40.35, "end": 40.71, "word": " sigma.", "probability": 0.89453125}, {"start": 42.61, "end": 43.29, "word": " Also,", "probability": 0.9453125}, {"start": 43.43, "end": 43.55, "word": " we", "probability": 0.95166015625}, {"start": 43.55, "end": 43.87, "word": " mentioned", "probability": 0.76416015625}, {"start": 43.87, "end": 44.23, "word": " that", "probability": 0.91259765625}, {"start": 44.23, "end": 45.43, "word": " the", "probability": 0.88134765625}, {"start": 45.43, "end": 45.95, "word": " standard", "probability": 0.9404296875}, {"start": 45.95, "end": 46.33, "word": " error", "probability": 0.8779296875}, {"start": 46.33, "end": 47.03, "word": " of", "probability": 0.94384765625}, {"start": 47.03, "end": 47.21, "word": " the", "probability": 0.83544921875}, {"start": 47.21, "end": 47.49, "word": " sample", "probability": 0.859375}, {"start": 47.49, "end": 47.73, "word": " mean", "probability": 0.77001953125}, {"start": 47.73, "end": 47.95, "word": " is", "probability": 0.94189453125}, {"start": 47.95, "end": 48.57, "word": " always", "probability": 0.904296875}, {"start": 48.57, "end": 49.43, "word": " smaller", "probability": 0.83349609375}, {"start": 49.43, "end": 49.79, "word": " than", "probability": 0.93115234375}, {"start": 49.79, "end": 50.15, "word": " sigma.", "probability": 0.88671875}, {"start": 50.89, "end": 51.57, "word": " And", "probability": 0.7763671875}, {"start": 51.57, "end": 52.01, "word": " also,", "probability": 0.83056640625}, {"start": 52.71, "end": 52.97, "word": " as", "probability": 0.958984375}, {"start": 52.97, "end": 53.27, "word": " m", "probability": 0.26708984375}, {"start": 53.27, "end": 53.85, "word": " increases,", "probability": 0.9326171875}, {"start": 54.55, "end": 54.81, "word": " the", "probability": 0.86083984375}, {"start": 54.81, "end": 55.13, "word": " standard", "probability": 0.92333984375}, {"start": 55.13, "end": 55.41, "word": " error", "probability": 0.80859375}, {"start": 55.41, "end": 55.65, "word": " of", "probability": 0.62939453125}, {"start": 55.65, "end": 55.79, "word": " x", "probability": 0.95947265625}, {"start": 55.79, "end": 55.97, "word": " bar", "probability": 0.95263671875}, {"start": 55.97, "end": 56.61, "word": " increases.", "probability": 0.72802734375}, {"start": 56.79, "end": 56.91, "word": " So", "probability": 0.91357421875}, {"start": 56.91, "end": 57.19, "word": " there", "probability": 0.7724609375}, {"start": 57.19, "end": 57.41, "word": " is", "probability": 0.86865234375}, {"start": 57.41, "end": 58.09, "word": " a", "probability": 0.3193359375}, {"start": 58.09, "end": 58.47, "word": " gorgeous", "probability": 0.047393798828125}, {"start": 58.47, "end": 59.69, "word": " negative", "probability": 0.72412109375}, {"start": 59.69, "end": 60.45, "word": " relationship.", "probability": 0.92529296875}], "temperature": 1.0}, {"id": 3, "seek": 9088, "start": 61.36, "end": 90.88, "text": " between our fingers, relationship between the standard error of x bar and the sample size. That's when the operation is normally distributed. So again, in this case, the new z score is given by this equation. Here, we replace sigma by sigma of x bar. So z score is given by x bar minus the mean divided by sigma over root n.", "tokens": [1296, 527, 7350, 11, 2480, 1296, 264, 3832, 6713, 295, 2031, 2159, 293, 264, 6889, 2744, 13, 663, 311, 562, 264, 6916, 307, 5646, 12631, 13, 407, 797, 11, 294, 341, 1389, 11, 264, 777, 710, 6175, 307, 2212, 538, 341, 5367, 13, 1692, 11, 321, 7406, 12771, 538, 12771, 295, 2031, 2159, 13, 407, 710, 6175, 307, 2212, 538, 2031, 2159, 3175, 264, 914, 6666, 538, 12771, 670, 5593, 297, 13], "avg_logprob": -0.24743150358330712, "compression_ratio": 1.6581632653061225, "no_speech_prob": 0.0, "words": [{"start": 61.36, "end": 61.96, "word": " between", "probability": 0.38916015625}, {"start": 61.96, "end": 62.24, "word": " our", "probability": 0.483642578125}, {"start": 62.24, "end": 62.66, "word": " fingers,", "probability": 0.8173828125}, {"start": 62.86, "end": 63.36, "word": " relationship", "probability": 0.5966796875}, {"start": 63.36, "end": 63.96, "word": " between", "probability": 0.8837890625}, {"start": 63.96, "end": 64.84, "word": " the", "probability": 0.65380859375}, {"start": 64.84, "end": 65.28, "word": " standard", "probability": 0.81787109375}, {"start": 65.28, "end": 65.54, "word": " error", "probability": 0.197265625}, {"start": 65.54, "end": 65.7, "word": " of", "probability": 0.86572265625}, {"start": 65.7, "end": 65.86, "word": " x", "probability": 0.6845703125}, {"start": 65.86, "end": 66.16, "word": " bar", "probability": 0.66943359375}, {"start": 66.16, "end": 66.88, "word": " and", "probability": 0.8662109375}, {"start": 66.88, "end": 68.22, "word": " the", "probability": 0.84521484375}, {"start": 68.22, "end": 68.46, "word": " sample", "probability": 0.1046142578125}, {"start": 68.46, "end": 68.74, "word": " size.", "probability": 0.8720703125}, {"start": 69.3, "end": 69.86, "word": " That's", "probability": 0.942138671875}, {"start": 69.86, "end": 70.06, "word": " when", "probability": 0.9384765625}, {"start": 70.06, "end": 70.34, "word": " the", "probability": 0.86767578125}, {"start": 70.34, "end": 70.74, "word": " operation", "probability": 0.49560546875}, {"start": 70.74, "end": 71.38, "word": " is", "probability": 0.94970703125}, {"start": 71.38, "end": 71.86, "word": " normally", "probability": 0.92138671875}, {"start": 71.86, "end": 73.68, "word": " distributed.", "probability": 0.87158203125}, {"start": 75.54, "end": 76.14, "word": " So", "probability": 0.9375}, {"start": 76.14, "end": 76.4, "word": " again,", "probability": 0.86474609375}, {"start": 76.44, "end": 76.56, "word": " in", "probability": 0.91845703125}, {"start": 76.56, "end": 76.74, "word": " this", "probability": 0.9453125}, {"start": 76.74, "end": 77.16, "word": " case,", "probability": 0.9189453125}, {"start": 78.24, "end": 78.66, "word": " the", "probability": 0.90380859375}, {"start": 78.66, "end": 79.02, "word": " new", "probability": 0.6708984375}, {"start": 79.02, "end": 79.18, "word": " z", "probability": 0.583984375}, {"start": 79.18, "end": 79.42, "word": " score", "probability": 0.482666015625}, {"start": 79.42, "end": 79.66, "word": " is", "probability": 0.94091796875}, {"start": 79.66, "end": 79.86, "word": " given", "probability": 0.89990234375}, {"start": 79.86, "end": 80.08, "word": " by", "probability": 0.9677734375}, {"start": 80.08, "end": 80.3, "word": " this", "probability": 0.93994140625}, {"start": 80.3, "end": 80.7, "word": " equation.", "probability": 0.9677734375}, {"start": 81.56, "end": 81.86, "word": " Here,", "probability": 0.84912109375}, {"start": 81.98, "end": 82.12, "word": " we", "probability": 0.9541015625}, {"start": 82.12, "end": 82.56, "word": " replace", "probability": 0.88671875}, {"start": 82.56, "end": 83.06, "word": " sigma", "probability": 0.87939453125}, {"start": 83.06, "end": 83.52, "word": " by", "probability": 0.96484375}, {"start": 83.52, "end": 83.78, "word": " sigma", "probability": 0.94140625}, {"start": 83.78, "end": 83.94, "word": " of", "probability": 0.94091796875}, {"start": 83.94, "end": 84.14, "word": " x", "probability": 0.9892578125}, {"start": 84.14, "end": 84.42, "word": " bar.", "probability": 0.9482421875}, {"start": 85.14, "end": 85.52, "word": " So", "probability": 0.96435546875}, {"start": 85.52, "end": 85.8, "word": " z", "probability": 0.444091796875}, {"start": 85.8, "end": 86.12, "word": " score", "probability": 0.83447265625}, {"start": 86.12, "end": 86.38, "word": " is", "probability": 0.9453125}, {"start": 86.38, "end": 86.56, "word": " given", "probability": 0.90478515625}, {"start": 86.56, "end": 86.78, "word": " by", "probability": 0.9716796875}, {"start": 86.78, "end": 86.96, "word": " x", "probability": 0.994140625}, {"start": 86.96, "end": 87.16, "word": " bar", "probability": 0.9287109375}, {"start": 87.16, "end": 87.46, "word": " minus", "probability": 0.986328125}, {"start": 87.46, "end": 87.7, "word": " the", "probability": 0.93017578125}, {"start": 87.7, "end": 87.94, "word": " mean", "probability": 0.97705078125}, {"start": 87.94, "end": 89.02, "word": " divided", "probability": 0.5283203125}, {"start": 89.02, "end": 89.26, "word": " by", "probability": 0.97314453125}, {"start": 89.26, "end": 89.66, "word": " sigma", "probability": 0.939453125}, {"start": 89.66, "end": 90.06, "word": " over", "probability": 0.7685546875}, {"start": 90.06, "end": 90.38, "word": " root", "probability": 0.94482421875}, {"start": 90.38, "end": 90.88, "word": " n.", "probability": 0.73388671875}], "temperature": 1.0}, {"id": 4, "seek": 10888, "start": 91.68, "end": 108.88, "text": " The second case, if the population is not normal, in this case we can apply the central limit theorem, and this theorem can be applied if the sample size is large enough.", "tokens": [440, 1150, 1389, 11, 498, 264, 4415, 307, 406, 2710, 11, 294, 341, 1389, 321, 393, 3079, 264, 5777, 4948, 20904, 11, 293, 341, 20904, 393, 312, 6456, 498, 264, 6889, 2744, 307, 2416, 1547, 13], "avg_logprob": -0.14178632401131294, "compression_ratio": 1.425, "no_speech_prob": 0.0, "words": [{"start": 91.68, "end": 92.08, "word": " The", "probability": 0.80517578125}, {"start": 92.08, "end": 92.88, "word": " second", "probability": 0.8984375}, {"start": 92.88, "end": 93.48, "word": " case,", "probability": 0.9072265625}, {"start": 95.56, "end": 95.94, "word": " if", "probability": 0.94287109375}, {"start": 95.94, "end": 96.12, "word": " the", "probability": 0.91259765625}, {"start": 96.12, "end": 96.54, "word": " population", "probability": 0.97265625}, {"start": 96.54, "end": 98.18, "word": " is", "probability": 0.9521484375}, {"start": 98.18, "end": 98.44, "word": " not", "probability": 0.94580078125}, {"start": 98.44, "end": 98.92, "word": " normal,", "probability": 0.876953125}, {"start": 100.32, "end": 100.58, "word": " in", "probability": 0.8740234375}, {"start": 100.58, "end": 100.78, "word": " this", "probability": 0.9462890625}, {"start": 100.78, "end": 101.0, "word": " case", "probability": 0.9140625}, {"start": 101.0, "end": 101.16, "word": " we", "probability": 0.70947265625}, {"start": 101.16, "end": 101.36, "word": " can", "probability": 0.94873046875}, {"start": 101.36, "end": 101.72, "word": " apply", "probability": 0.93212890625}, {"start": 101.72, "end": 102.22, "word": " the", "probability": 0.90283203125}, {"start": 102.22, "end": 102.62, "word": " central", "probability": 0.734375}, {"start": 102.62, "end": 102.94, "word": " limit", "probability": 0.96240234375}, {"start": 102.94, "end": 103.38, "word": " theorem,", "probability": 0.888671875}, {"start": 104.8, "end": 105.14, "word": " and", "probability": 0.92333984375}, {"start": 105.14, "end": 105.38, "word": " this", "probability": 0.94482421875}, {"start": 105.38, "end": 105.64, "word": " theorem", "probability": 0.8671875}, {"start": 105.64, "end": 105.86, "word": " can", "probability": 0.94482421875}, {"start": 105.86, "end": 106.02, "word": " be", "probability": 0.955078125}, {"start": 106.02, "end": 106.44, "word": " applied", "probability": 0.78369140625}, {"start": 106.44, "end": 107.0, "word": " if", "probability": 0.93798828125}, {"start": 107.0, "end": 107.28, "word": " the", "probability": 0.9208984375}, {"start": 107.28, "end": 107.52, "word": " sample", "probability": 0.8994140625}, {"start": 107.52, "end": 108.02, "word": " size", "probability": 0.85302734375}, {"start": 108.02, "end": 108.26, "word": " is", "probability": 0.953125}, {"start": 108.26, "end": 108.52, "word": " large", "probability": 0.9697265625}, {"start": 108.52, "end": 108.88, "word": " enough.", "probability": 0.85546875}], "temperature": 1.0}, {"id": 5, "seek": 13628, "start": 109.72, "end": 136.28, "text": " So if N is large, in this case, we can say that the sampling distribution of the sample mean is approximately normally distributed as long as the sample size is large enough. Now, how large is large enough? We mentioned that for most distributions, N above 30 will give sampling distribution that is nearly symmetric or normal.", "tokens": [407, 498, 426, 307, 2416, 11, 294, 341, 1389, 11, 321, 393, 584, 300, 264, 21179, 7316, 295, 264, 6889, 914, 307, 10447, 5646, 12631, 382, 938, 382, 264, 6889, 2744, 307, 2416, 1547, 13, 823, 11, 577, 2416, 307, 2416, 1547, 30, 492, 2835, 300, 337, 881, 37870, 11, 426, 3673, 2217, 486, 976, 21179, 7316, 300, 307, 6217, 32330, 420, 2710, 13], "avg_logprob": -0.20600961538461537, "compression_ratio": 1.7634408602150538, "no_speech_prob": 0.0, "words": [{"start": 109.72, "end": 110.08, "word": " So", "probability": 0.865234375}, {"start": 110.08, "end": 110.3, "word": " if", "probability": 0.77392578125}, {"start": 110.3, "end": 110.5, "word": " N", "probability": 0.49658203125}, {"start": 110.5, "end": 110.68, "word": " is", "probability": 0.951171875}, {"start": 110.68, "end": 111.16, "word": " large,", "probability": 0.96240234375}, {"start": 111.58, "end": 111.78, "word": " in", "probability": 0.8955078125}, {"start": 111.78, "end": 111.96, "word": " this", "probability": 0.947265625}, {"start": 111.96, "end": 112.28, "word": " case,", "probability": 0.92041015625}, {"start": 112.4, "end": 112.5, "word": " we", "probability": 0.9541015625}, {"start": 112.5, "end": 112.74, "word": " can", "probability": 0.94482421875}, {"start": 112.74, "end": 112.98, "word": " say", "probability": 0.79248046875}, {"start": 112.98, "end": 113.3, "word": " that", "probability": 0.916015625}, {"start": 113.3, "end": 114.08, "word": " the", "probability": 0.81591796875}, {"start": 114.08, "end": 114.32, "word": " sampling", "probability": 0.153564453125}, {"start": 114.32, "end": 114.96, "word": " distribution", "probability": 0.85498046875}, {"start": 114.96, "end": 115.3, "word": " of", "probability": 0.9306640625}, {"start": 115.3, "end": 115.58, "word": " the", "probability": 0.75927734375}, {"start": 115.58, "end": 115.8, "word": " sample", "probability": 0.81201171875}, {"start": 115.8, "end": 116.12, "word": " mean", "probability": 0.919921875}, {"start": 116.12, "end": 117.02, "word": " is", "probability": 0.90673828125}, {"start": 117.02, "end": 117.88, "word": " approximately", "probability": 0.8525390625}, {"start": 117.88, "end": 118.4, "word": " normally", "probability": 0.87744140625}, {"start": 118.4, "end": 119.08, "word": " distributed", "probability": 0.9072265625}, {"start": 119.08, "end": 119.42, "word": " as", "probability": 0.599609375}, {"start": 119.42, "end": 119.66, "word": " long", "probability": 0.9248046875}, {"start": 119.66, "end": 120.14, "word": " as", "probability": 0.9638671875}, {"start": 120.14, "end": 120.8, "word": " the", "probability": 0.9111328125}, {"start": 120.8, "end": 121.08, "word": " sample", "probability": 0.892578125}, {"start": 121.08, "end": 121.58, "word": " size", "probability": 0.857421875}, {"start": 121.58, "end": 121.84, "word": " is", "probability": 0.9521484375}, {"start": 121.84, "end": 122.06, "word": " large", "probability": 0.970703125}, {"start": 122.06, "end": 122.36, "word": " enough.", "probability": 0.85888671875}, {"start": 123.68, "end": 124.12, "word": " Now,", "probability": 0.63818359375}, {"start": 124.48, "end": 124.86, "word": " how", "probability": 0.927734375}, {"start": 124.86, "end": 125.2, "word": " large", "probability": 0.96630859375}, {"start": 125.2, "end": 125.4, "word": " is", "probability": 0.93896484375}, {"start": 125.4, "end": 125.8, "word": " large", "probability": 0.9462890625}, {"start": 125.8, "end": 126.56, "word": " enough?", "probability": 0.81005859375}, {"start": 126.76, "end": 126.98, "word": " We", "probability": 0.9541015625}, {"start": 126.98, "end": 127.32, "word": " mentioned", "probability": 0.80224609375}, {"start": 127.32, "end": 127.74, "word": " that", "probability": 0.93017578125}, {"start": 127.74, "end": 128.46, "word": " for", "probability": 0.8310546875}, {"start": 128.46, "end": 128.8, "word": " most", "probability": 0.900390625}, {"start": 128.8, "end": 129.62, "word": " distributions,", "probability": 0.90771484375}, {"start": 130.54, "end": 130.84, "word": " N", "probability": 0.92578125}, {"start": 130.84, "end": 131.22, "word": " above", "probability": 0.92919921875}, {"start": 131.22, "end": 131.7, "word": " 30", "probability": 0.89013671875}, {"start": 131.7, "end": 132.56, "word": " will", "probability": 0.84716796875}, {"start": 132.56, "end": 132.94, "word": " give", "probability": 0.65283203125}, {"start": 132.94, "end": 133.44, "word": " sampling", "probability": 0.63525390625}, {"start": 133.44, "end": 134.0, "word": " distribution", "probability": 0.828125}, {"start": 134.0, "end": 134.26, "word": " that", "probability": 0.9130859375}, {"start": 134.26, "end": 134.5, "word": " is", "probability": 0.94580078125}, {"start": 134.5, "end": 134.86, "word": " nearly", "probability": 0.84375}, {"start": 134.86, "end": 135.36, "word": " symmetric", "probability": 0.64990234375}, {"start": 135.36, "end": 135.92, "word": " or", "probability": 0.60791015625}, {"start": 135.92, "end": 136.28, "word": " normal.", "probability": 0.8681640625}], "temperature": 1.0}, {"id": 6, "seek": 15041, "start": 137.73, "end": 150.41, "text": " But on the other hand, for fairly symmetrical distribution, and above 15 will usually give some distribution is almost normal. So we have three cases.", "tokens": [583, 322, 264, 661, 1011, 11, 337, 6457, 40360, 7316, 11, 293, 3673, 2119, 486, 2673, 976, 512, 7316, 307, 1920, 2710, 13, 407, 321, 362, 1045, 3331, 13], "avg_logprob": -0.3739583412806193, "compression_ratio": 1.2796610169491525, "no_speech_prob": 0.0, "words": [{"start": 137.73, "end": 138.19, "word": " But", "probability": 0.7294921875}, {"start": 138.19, "end": 138.53, "word": " on", "probability": 0.6357421875}, {"start": 138.53, "end": 138.65, "word": " the", "probability": 0.90478515625}, {"start": 138.65, "end": 138.85, "word": " other", "probability": 0.89453125}, {"start": 138.85, "end": 139.15, "word": " hand,", "probability": 0.904296875}, {"start": 139.25, "end": 139.37, "word": " for", "probability": 0.92138671875}, {"start": 139.37, "end": 139.71, "word": " fairly", "probability": 0.748046875}, {"start": 139.71, "end": 140.21, "word": " symmetrical", "probability": 0.80810546875}, {"start": 140.21, "end": 141.11, "word": " distribution,", "probability": 0.64794921875}, {"start": 142.29, "end": 142.59, "word": " and", "probability": 0.2314453125}, {"start": 142.59, "end": 142.99, "word": " above", "probability": 0.94775390625}, {"start": 142.99, "end": 143.71, "word": " 15", "probability": 0.8642578125}, {"start": 143.71, "end": 144.37, "word": " will", "probability": 0.59814453125}, {"start": 144.37, "end": 145.03, "word": " usually", "probability": 0.76416015625}, {"start": 145.03, "end": 145.35, "word": " give", "probability": 0.65869140625}, {"start": 145.35, "end": 145.69, "word": " some", "probability": 0.3134765625}, {"start": 145.69, "end": 146.33, "word": " distribution", "probability": 0.6611328125}, {"start": 146.33, "end": 146.71, "word": " is", "probability": 0.61767578125}, {"start": 146.71, "end": 147.27, "word": " almost", "probability": 0.8271484375}, {"start": 147.27, "end": 147.81, "word": " normal.", "probability": 0.1959228515625}, {"start": 148.79, "end": 149.03, "word": " So", "probability": 0.943359375}, {"start": 149.03, "end": 149.15, "word": " we", "probability": 0.6845703125}, {"start": 149.15, "end": 149.35, "word": " have", "probability": 0.94921875}, {"start": 149.35, "end": 149.79, "word": " three", "probability": 0.798828125}, {"start": 149.79, "end": 150.41, "word": " cases.", "probability": 0.9140625}], "temperature": 1.0}, {"id": 7, "seek": 17557, "start": 152.13, "end": 175.57, "text": " For most distributions, we need large sample to be in above 30. For symmetric distributions, in above 15. But if the population is normally distributed, then regardless of the sample size, the sampling distribution of the sample mean is always normally distributed. So here, it's nearly symmetric.", "tokens": [1171, 881, 37870, 11, 321, 643, 2416, 6889, 281, 312, 294, 3673, 2217, 13, 1171, 32330, 37870, 11, 294, 3673, 2119, 13, 583, 498, 264, 4415, 307, 5646, 12631, 11, 550, 10060, 295, 264, 6889, 2744, 11, 264, 21179, 7316, 295, 264, 6889, 914, 307, 1009, 5646, 12631, 13, 407, 510, 11, 309, 311, 6217, 32330, 13], "avg_logprob": -0.20096982244787545, "compression_ratio": 1.784431137724551, "no_speech_prob": 0.0, "words": [{"start": 152.13, "end": 152.45, "word": " For", "probability": 0.595703125}, {"start": 152.45, "end": 152.77, "word": " most", "probability": 0.89501953125}, {"start": 152.77, "end": 153.41, "word": " distributions,", "probability": 0.85009765625}, {"start": 153.55, "end": 153.65, "word": " we", "probability": 0.93603515625}, {"start": 153.65, "end": 153.87, "word": " need", "probability": 0.9267578125}, {"start": 153.87, "end": 154.29, "word": " large", "probability": 0.89013671875}, {"start": 154.29, "end": 154.71, "word": " sample", "probability": 0.6435546875}, {"start": 154.71, "end": 154.97, "word": " to", "probability": 0.94970703125}, {"start": 154.97, "end": 155.27, "word": " be", "probability": 0.95751953125}, {"start": 155.27, "end": 156.05, "word": " in", "probability": 0.8701171875}, {"start": 156.05, "end": 156.41, "word": " above", "probability": 0.927734375}, {"start": 156.41, "end": 156.85, "word": " 30.", "probability": 0.91162109375}, {"start": 157.35, "end": 158.01, "word": " For", "probability": 0.85498046875}, {"start": 158.01, "end": 158.37, "word": " symmetric", "probability": 0.81396484375}, {"start": 158.37, "end": 159.21, "word": " distributions,", "probability": 0.86279296875}, {"start": 159.81, "end": 160.29, "word": " in", "probability": 0.7431640625}, {"start": 160.29, "end": 160.59, "word": " above", "probability": 0.9052734375}, {"start": 160.59, "end": 161.15, "word": " 15.", "probability": 0.9658203125}, {"start": 161.81, "end": 162.11, "word": " But", "probability": 0.94775390625}, {"start": 162.11, "end": 162.55, "word": " if", "probability": 0.837890625}, {"start": 162.55, "end": 162.75, "word": " the", "probability": 0.91455078125}, {"start": 162.75, "end": 163.53, "word": " population", "probability": 0.931640625}, {"start": 163.53, "end": 164.47, "word": " is", "probability": 0.923828125}, {"start": 164.47, "end": 164.87, "word": " normally", "probability": 0.89697265625}, {"start": 164.87, "end": 165.55, "word": " distributed,", "probability": 0.90673828125}, {"start": 166.17, "end": 166.55, "word": " then", "probability": 0.84033203125}, {"start": 166.55, "end": 167.53, "word": " regardless", "probability": 0.85791015625}, {"start": 167.53, "end": 167.87, "word": " of", "probability": 0.9658203125}, {"start": 167.87, "end": 168.03, "word": " the", "probability": 0.91845703125}, {"start": 168.03, "end": 168.27, "word": " sample", "probability": 0.83251953125}, {"start": 168.27, "end": 168.85, "word": " size,", "probability": 0.84912109375}, {"start": 169.39, "end": 169.55, "word": " the", "probability": 0.89990234375}, {"start": 169.55, "end": 169.81, "word": " sampling", "probability": 0.51513671875}, {"start": 169.81, "end": 170.39, "word": " distribution", "probability": 0.87255859375}, {"start": 170.39, "end": 170.65, "word": " of", "probability": 0.673828125}, {"start": 170.65, "end": 170.83, "word": " the", "probability": 0.90234375}, {"start": 170.83, "end": 171.13, "word": " sample", "probability": 0.65673828125}, {"start": 171.13, "end": 171.39, "word": " mean", "probability": 0.439453125}, {"start": 171.39, "end": 171.63, "word": " is", "probability": 0.919921875}, {"start": 171.63, "end": 172.19, "word": " always", "probability": 0.8974609375}, {"start": 172.19, "end": 172.91, "word": " normally", "probability": 0.7353515625}, {"start": 172.91, "end": 173.41, "word": " distributed.", "probability": 0.93017578125}, {"start": 174.01, "end": 174.17, "word": " So", "probability": 0.955078125}, {"start": 174.17, "end": 174.41, "word": " here,", "probability": 0.8173828125}, {"start": 174.55, "end": 174.83, "word": " it's", "probability": 0.94677734375}, {"start": 174.83, "end": 175.13, "word": " nearly", "probability": 0.468505859375}, {"start": 175.13, "end": 175.57, "word": " symmetric.", "probability": 0.455322265625}], "temperature": 1.0}, {"id": 8, "seek": 20456, "start": 176.8, "end": 204.56, "text": " almost symmetric and the other one is always symmetric. So we have to distinguish between these three cases. And I think we gave one example. Now let's move to another part. As we mentioned in the beginning of this class, data has two types mainly, quantitative and qualitative. Quantitative, for example, your score.", "tokens": [1920, 32330, 293, 264, 661, 472, 307, 1009, 32330, 13, 407, 321, 362, 281, 20206, 1296, 613, 1045, 3331, 13, 400, 286, 519, 321, 2729, 472, 1365, 13, 823, 718, 311, 1286, 281, 1071, 644, 13, 1018, 321, 2835, 294, 264, 2863, 295, 341, 1508, 11, 1412, 575, 732, 3467, 8704, 11, 27778, 293, 31312, 13, 26968, 14275, 11, 337, 1365, 11, 428, 6175, 13], "avg_logprob": -0.18619792253682108, "compression_ratio": 1.551219512195122, "no_speech_prob": 0.0, "words": [{"start": 176.8, "end": 177.34, "word": " almost", "probability": 0.26611328125}, {"start": 177.34, "end": 177.8, "word": " symmetric", "probability": 0.791015625}, {"start": 177.8, "end": 178.44, "word": " and", "probability": 0.68017578125}, {"start": 178.44, "end": 178.56, "word": " the", "probability": 0.58447265625}, {"start": 178.56, "end": 178.74, "word": " other", "probability": 0.89453125}, {"start": 178.74, "end": 178.94, "word": " one", "probability": 0.890625}, {"start": 178.94, "end": 179.08, "word": " is", "probability": 0.9267578125}, {"start": 179.08, "end": 179.4, "word": " always", "probability": 0.84814453125}, {"start": 179.4, "end": 179.78, "word": " symmetric.", "probability": 0.8056640625}, {"start": 179.96, "end": 180.1, "word": " So", "probability": 0.8271484375}, {"start": 180.1, "end": 180.2, "word": " we", "probability": 0.60107421875}, {"start": 180.2, "end": 180.36, "word": " have", "probability": 0.93701171875}, {"start": 180.36, "end": 180.48, "word": " to", "probability": 0.953125}, {"start": 180.48, "end": 180.9, "word": " distinguish", "probability": 0.890625}, {"start": 180.9, "end": 181.34, "word": " between", "probability": 0.8447265625}, {"start": 181.34, "end": 182.62, "word": " these", "probability": 0.8466796875}, {"start": 182.62, "end": 183.0, "word": " three", "probability": 0.8603515625}, {"start": 183.0, "end": 183.52, "word": " cases.", "probability": 0.89306640625}, {"start": 184.16, "end": 184.44, "word": " And", "probability": 0.873046875}, {"start": 184.44, "end": 184.58, "word": " I", "probability": 0.9755859375}, {"start": 184.58, "end": 184.76, "word": " think", "probability": 0.91455078125}, {"start": 184.76, "end": 184.92, "word": " we", "probability": 0.9423828125}, {"start": 184.92, "end": 185.26, "word": " gave", "probability": 0.72998046875}, {"start": 185.26, "end": 186.14, "word": " one", "probability": 0.91748046875}, {"start": 186.14, "end": 186.58, "word": " example.", "probability": 0.97265625}, {"start": 187.44, "end": 187.66, "word": " Now", "probability": 0.92822265625}, {"start": 187.66, "end": 187.92, "word": " let's", "probability": 0.885009765625}, {"start": 187.92, "end": 188.3, "word": " move", "probability": 0.94189453125}, {"start": 188.3, "end": 189.0, "word": " to", "probability": 0.95263671875}, {"start": 189.0, "end": 190.08, "word": " another", "probability": 0.473388671875}, {"start": 190.08, "end": 190.64, "word": " part.", "probability": 0.90625}, {"start": 192.46, "end": 192.84, "word": " As", "probability": 0.958984375}, {"start": 192.84, "end": 192.98, "word": " we", "probability": 0.947265625}, {"start": 192.98, "end": 193.38, "word": " mentioned", "probability": 0.82861328125}, {"start": 193.38, "end": 193.74, "word": " in", "probability": 0.86962890625}, {"start": 193.74, "end": 193.86, "word": " the", "probability": 0.92138671875}, {"start": 193.86, "end": 194.06, "word": " beginning", "probability": 0.90771484375}, {"start": 194.06, "end": 194.24, "word": " of", "probability": 0.93701171875}, {"start": 194.24, "end": 194.38, "word": " this", "probability": 0.93994140625}, {"start": 194.38, "end": 194.88, "word": " class,", "probability": 0.96728515625}, {"start": 195.72, "end": 196.04, "word": " data", "probability": 0.81884765625}, {"start": 196.04, "end": 196.86, "word": " has", "probability": 0.93603515625}, {"start": 196.86, "end": 197.08, "word": " two", "probability": 0.93310546875}, {"start": 197.08, "end": 197.48, "word": " types", "probability": 0.8369140625}, {"start": 197.48, "end": 197.86, "word": " mainly,", "probability": 0.83642578125}, {"start": 198.76, "end": 199.12, "word": " quantitative", "probability": 0.93798828125}, {"start": 199.12, "end": 199.98, "word": " and", "probability": 0.93359375}, {"start": 199.98, "end": 200.64, "word": " qualitative.", "probability": 0.8564453125}, {"start": 202.16, "end": 202.92, "word": " Quantitative,", "probability": 0.822021484375}, {"start": 203.1, "end": 203.26, "word": " for", "probability": 0.94775390625}, {"start": 203.26, "end": 203.68, "word": " example,", "probability": 0.974609375}, {"start": 203.86, "end": 204.1, "word": " your", "probability": 0.82080078125}, {"start": 204.1, "end": 204.56, "word": " score.", "probability": 0.759765625}], "temperature": 1.0}, {"id": 9, "seek": 22932, "start": 205.62, "end": 229.32, "text": " Score is numerical value, 60, 65, and so on. In this case, we can talk about assembly. So first, for quantitative data, we talk about assembly.", "tokens": [47901, 307, 29054, 2158, 11, 4060, 11, 11624, 11, 293, 370, 322, 13, 682, 341, 1389, 11, 321, 393, 751, 466, 12103, 13, 407, 700, 11, 337, 27778, 1412, 11, 321, 751, 466, 12103, 13], "avg_logprob": -0.287109369205104, "compression_ratio": 1.2972972972972974, "no_speech_prob": 0.0, "words": [{"start": 205.62, "end": 206.3, "word": " Score", "probability": 0.180908203125}, {"start": 206.3, "end": 206.74, "word": " is", "probability": 0.81298828125}, {"start": 206.74, "end": 207.4, "word": " numerical", "probability": 0.8466796875}, {"start": 207.4, "end": 207.7, "word": " value,", "probability": 0.66259765625}, {"start": 207.92, "end": 208.58, "word": " 60,", "probability": 0.69873046875}, {"start": 208.8, "end": 209.36, "word": " 65,", "probability": 0.9541015625}, {"start": 209.5, "end": 209.62, "word": " and", "probability": 0.91064453125}, {"start": 209.62, "end": 209.8, "word": " so", "probability": 0.95458984375}, {"start": 209.8, "end": 209.96, "word": " on.", "probability": 0.9443359375}, {"start": 210.3, "end": 210.56, "word": " In", "probability": 0.93212890625}, {"start": 210.56, "end": 210.78, "word": " this", "probability": 0.94873046875}, {"start": 210.78, "end": 210.98, "word": " case,", "probability": 0.91650390625}, {"start": 211.0, "end": 211.18, "word": " we", "probability": 0.95361328125}, {"start": 211.18, "end": 211.4, "word": " can", "probability": 0.9404296875}, {"start": 211.4, "end": 211.62, "word": " talk", "probability": 0.896484375}, {"start": 211.62, "end": 212.42, "word": " about", "probability": 0.9072265625}, {"start": 212.42, "end": 213.18, "word": " assembly.", "probability": 0.3857421875}, {"start": 215.56, "end": 216.24, "word": " So", "probability": 0.9140625}, {"start": 216.24, "end": 216.94, "word": " first,", "probability": 0.7568359375}, {"start": 217.2, "end": 217.5, "word": " for", "probability": 0.9052734375}, {"start": 217.5, "end": 220.42, "word": " quantitative", "probability": 0.52099609375}, {"start": 220.42, "end": 226.4, "word": " data,", "probability": 0.88818359375}, {"start": 227.02, "end": 228.04, "word": " we", "probability": 0.880859375}, {"start": 228.04, "end": 228.32, "word": " talk", "probability": 0.59765625}, {"start": 228.32, "end": 228.76, "word": " about", "probability": 0.9111328125}, {"start": 228.76, "end": 229.32, "word": " assembly.", "probability": 0.890625}], "temperature": 1.0}, {"id": 10, "seek": 26082, "start": 235.44, "end": 260.82, "text": " Number two, if we talk about, if we talk about qualitative data, in this case we talk about sample of water. In this case,", "tokens": [5118, 732, 11, 498, 321, 751, 466, 11, 498, 321, 751, 466, 31312, 1412, 11, 294, 341, 1389, 321, 751, 466, 6889, 295, 1281, 13, 682, 341, 1389, 11], "avg_logprob": -0.3864583263794581, "compression_ratio": 1.4470588235294117, "no_speech_prob": 0.0, "words": [{"start": 235.44, "end": 235.74, "word": " Number", "probability": 0.833984375}, {"start": 235.74, "end": 236.14, "word": " two,", "probability": 0.83349609375}, {"start": 237.86, "end": 238.1, "word": " if", "probability": 0.9482421875}, {"start": 238.1, "end": 238.3, "word": " we", "probability": 0.95068359375}, {"start": 238.3, "end": 238.58, "word": " talk", "probability": 0.44921875}, {"start": 238.58, "end": 239.16, "word": " about,", "probability": 0.916015625}, {"start": 239.18, "end": 241.04, "word": " if", "probability": 0.57177734375}, {"start": 241.04, "end": 241.18, "word": " we", "probability": 0.91748046875}, {"start": 241.18, "end": 241.36, "word": " talk", "probability": 0.880859375}, {"start": 241.36, "end": 241.78, "word": " about", "probability": 0.90771484375}, {"start": 241.78, "end": 244.24, "word": " qualitative", "probability": 0.86376953125}, {"start": 244.24, "end": 244.76, "word": " data,", "probability": 0.9365234375}, {"start": 247.12, "end": 247.12, "word": " in", "probability": 0.56640625}, {"start": 247.12, "end": 248.78, "word": " this", "probability": 0.947265625}, {"start": 248.78, "end": 249.12, "word": " case", "probability": 0.9111328125}, {"start": 249.12, "end": 249.3, "word": " we", "probability": 0.568359375}, {"start": 249.3, "end": 249.54, "word": " talk", "probability": 0.81103515625}, {"start": 249.54, "end": 249.9, "word": " about", "probability": 0.91064453125}, {"start": 249.9, "end": 250.3, "word": " sample", "probability": 0.1966552734375}, {"start": 250.3, "end": 255.76, "word": " of", "probability": 0.267578125}, {"start": 255.76, "end": 256.1, "word": " water.", "probability": 0.312744140625}, {"start": 259.22, "end": 260.02, "word": " In", "probability": 0.95458984375}, {"start": 260.02, "end": 260.38, "word": " this", "probability": 0.94873046875}, {"start": 260.38, "end": 260.82, "word": " case,", "probability": 0.92822265625}], "temperature": 1.0}, {"id": 11, "seek": 29119, "start": 262.19, "end": 291.19, "text": " proportion is what's the sampling distribution of the sample proportion. So we are looking for the sampling distribution of the sample population having some characteristic. For example, let's buy", "tokens": [16068, 307, 437, 311, 264, 21179, 7316, 295, 264, 6889, 16068, 13, 407, 321, 366, 1237, 337, 264, 21179, 7316, 295, 264, 6889, 4415, 1419, 512, 16282, 13, 1171, 1365, 11, 718, 311, 2256], "avg_logprob": -0.29308034351893836, "compression_ratio": 1.6837606837606838, "no_speech_prob": 0.0, "words": [{"start": 262.19, "end": 262.71, "word": " proportion", "probability": 0.204833984375}, {"start": 262.71, "end": 263.09, "word": " is", "probability": 0.80859375}, {"start": 263.09, "end": 263.69, "word": " what's", "probability": 0.75341796875}, {"start": 263.69, "end": 263.89, "word": " the", "probability": 0.84228515625}, {"start": 263.89, "end": 264.11, "word": " sampling", "probability": 0.7470703125}, {"start": 264.11, "end": 264.83, "word": " distribution", "probability": 0.82373046875}, {"start": 264.83, "end": 266.71, "word": " of", "probability": 0.9287109375}, {"start": 266.71, "end": 267.07, "word": " the", "probability": 0.9091796875}, {"start": 267.07, "end": 267.53, "word": " sample", "probability": 0.8486328125}, {"start": 267.53, "end": 268.07, "word": " proportion.", "probability": 0.73779296875}, {"start": 268.33, "end": 268.47, "word": " So", "probability": 0.916015625}, {"start": 268.47, "end": 268.65, "word": " we", "probability": 0.6640625}, {"start": 268.65, "end": 268.77, "word": " are", "probability": 0.91943359375}, {"start": 268.77, "end": 269.03, "word": " looking", "probability": 0.908203125}, {"start": 269.03, "end": 269.49, "word": " for", "probability": 0.95556640625}, {"start": 269.49, "end": 270.47, "word": " the", "probability": 0.71533203125}, {"start": 270.47, "end": 270.91, "word": " sampling", "probability": 0.787109375}, {"start": 270.91, "end": 275.69, "word": " distribution", "probability": 0.6806640625}, {"start": 275.69, "end": 278.29, "word": " of", "probability": 0.939453125}, {"start": 278.29, "end": 280.99, "word": " the", "probability": 0.900390625}, {"start": 280.99, "end": 281.75, "word": " sample", "probability": 0.82470703125}, {"start": 281.75, "end": 286.41, "word": " population", "probability": 0.160400390625}, {"start": 286.41, "end": 287.25, "word": " having", "probability": 0.89501953125}, {"start": 287.25, "end": 287.83, "word": " some", "probability": 0.90234375}, {"start": 287.83, "end": 288.49, "word": " characteristic.", "probability": 0.7529296875}, {"start": 288.77, "end": 288.93, "word": " For", "probability": 0.9404296875}, {"start": 288.93, "end": 289.29, "word": " example,", "probability": 0.97216796875}, {"start": 290.45, "end": 290.89, "word": " let's", "probability": 0.960693359375}, {"start": 290.89, "end": 291.19, "word": " buy", "probability": 0.77490234375}], "temperature": 1.0}, {"id": 12, "seek": 32030, "start": 292.16, "end": 320.3, "text": " denotes number of smokers among females. So let's use Y as number of smokers among all females. All females have IUDs. So here we are talking about", "tokens": [1441, 17251, 1230, 295, 32073, 433, 3654, 21529, 13, 407, 718, 311, 764, 398, 382, 1230, 295, 32073, 433, 3654, 439, 21529, 13, 1057, 21529, 362, 286, 9438, 82, 13, 407, 510, 321, 366, 1417, 466], "avg_logprob": -0.22719594916781863, "compression_ratio": 1.4368932038834952, "no_speech_prob": 0.0, "words": [{"start": 292.16, "end": 292.84, "word": " denotes", "probability": 0.8193359375}, {"start": 292.84, "end": 294.66, "word": " number", "probability": 0.6796875}, {"start": 294.66, "end": 297.1, "word": " of", "probability": 0.9609375}, {"start": 297.1, "end": 297.96, "word": " smokers", "probability": 0.9794921875}, {"start": 297.96, "end": 302.82, "word": " among", "probability": 0.916015625}, {"start": 302.82, "end": 303.62, "word": " females.", "probability": 0.90576171875}, {"start": 306.5, "end": 306.94, "word": " So", "probability": 0.572265625}, {"start": 306.94, "end": 307.44, "word": " let's", "probability": 0.74658203125}, {"start": 307.44, "end": 309.36, "word": " use", "probability": 0.8701171875}, {"start": 309.36, "end": 309.74, "word": " Y", "probability": 0.443603515625}, {"start": 309.74, "end": 310.34, "word": " as", "probability": 0.9541015625}, {"start": 310.34, "end": 310.62, "word": " number", "probability": 0.88037109375}, {"start": 310.62, "end": 310.86, "word": " of", "probability": 0.9541015625}, {"start": 310.86, "end": 311.34, "word": " smokers", "probability": 0.968994140625}, {"start": 311.34, "end": 311.84, "word": " among", "probability": 0.9326171875}, {"start": 311.84, "end": 312.28, "word": " all", "probability": 0.9375}, {"start": 312.28, "end": 312.8, "word": " females.", "probability": 0.927734375}, {"start": 313.26, "end": 313.9, "word": " All", "probability": 0.923828125}, {"start": 313.9, "end": 314.46, "word": " females", "probability": 0.90576171875}, {"start": 314.46, "end": 314.96, "word": " have", "probability": 0.31103515625}, {"start": 314.96, "end": 316.94, "word": " IUDs.", "probability": 0.8063151041666666}, {"start": 317.74, "end": 318.76, "word": " So", "probability": 0.9404296875}, {"start": 318.76, "end": 319.04, "word": " here", "probability": 0.79833984375}, {"start": 319.04, "end": 319.42, "word": " we", "probability": 0.5625}, {"start": 319.42, "end": 319.56, "word": " are", "probability": 0.923828125}, {"start": 319.56, "end": 319.92, "word": " talking", "probability": 0.85498046875}, {"start": 319.92, "end": 320.3, "word": " about", "probability": 0.91015625}], "temperature": 1.0}, {"id": 13, "seek": 34880, "start": 320.98, "end": 348.8, "text": " My population is IUG students, but here we're focused on female students. And pi is number of smokers among all female students. Now sampling proportion, so my characteristic here is smokers. Maybe your characteristic is person or student's number.", "tokens": [1222, 4415, 307, 44218, 38, 1731, 11, 457, 510, 321, 434, 5178, 322, 6556, 1731, 13, 400, 3895, 307, 1230, 295, 32073, 433, 3654, 439, 6556, 1731, 13, 823, 21179, 16068, 11, 370, 452, 16282, 510, 307, 32073, 433, 13, 2704, 428, 16282, 307, 954, 420, 3107, 311, 1230, 13], "avg_logprob": -0.221200973964205, "compression_ratio": 1.5660377358490567, "no_speech_prob": 0.0, "words": [{"start": 320.98, "end": 321.38, "word": " My", "probability": 0.76904296875}, {"start": 321.38, "end": 322.08, "word": " population", "probability": 0.9033203125}, {"start": 322.08, "end": 322.3, "word": " is", "probability": 0.91259765625}, {"start": 322.3, "end": 322.78, "word": " IUG", "probability": 0.76220703125}, {"start": 322.78, "end": 323.46, "word": " students,", "probability": 0.9658203125}, {"start": 324.38, "end": 324.56, "word": " but", "probability": 0.6484375}, {"start": 324.56, "end": 324.7, "word": " here", "probability": 0.71142578125}, {"start": 324.7, "end": 324.86, "word": " we're", "probability": 0.625732421875}, {"start": 324.86, "end": 325.34, "word": " focused", "probability": 0.9091796875}, {"start": 325.34, "end": 325.9, "word": " on", "probability": 0.9482421875}, {"start": 325.9, "end": 326.24, "word": " female", "probability": 0.857421875}, {"start": 326.24, "end": 326.92, "word": " students.", "probability": 0.96923828125}, {"start": 328.02, "end": 328.98, "word": " And", "probability": 0.94482421875}, {"start": 328.98, "end": 329.74, "word": " pi", "probability": 0.6005859375}, {"start": 329.74, "end": 330.24, "word": " is", "probability": 0.9462890625}, {"start": 330.24, "end": 330.58, "word": " number", "probability": 0.88720703125}, {"start": 330.58, "end": 330.78, "word": " of", "probability": 0.9658203125}, {"start": 330.78, "end": 331.34, "word": " smokers", "probability": 0.9775390625}, {"start": 331.34, "end": 332.44, "word": " among", "probability": 0.81787109375}, {"start": 332.44, "end": 333.0, "word": " all", "probability": 0.94921875}, {"start": 333.0, "end": 333.34, "word": " female", "probability": 0.80908203125}, {"start": 333.34, "end": 333.84, "word": " students.", "probability": 0.97119140625}, {"start": 336.82, "end": 337.16, "word": " Now", "probability": 0.94140625}, {"start": 337.16, "end": 337.52, "word": " sampling", "probability": 0.256591796875}, {"start": 337.52, "end": 338.08, "word": " proportion,", "probability": 0.58837890625}, {"start": 338.34, "end": 338.48, "word": " so", "probability": 0.93896484375}, {"start": 338.48, "end": 338.78, "word": " my", "probability": 0.970703125}, {"start": 338.78, "end": 339.3, "word": " characteristic", "probability": 0.86328125}, {"start": 339.3, "end": 339.66, "word": " here", "probability": 0.84521484375}, {"start": 339.66, "end": 340.0, "word": " is", "probability": 0.9482421875}, {"start": 340.0, "end": 341.12, "word": " smokers.", "probability": 0.827880859375}, {"start": 342.38, "end": 342.8, "word": " Maybe", "probability": 0.93994140625}, {"start": 342.8, "end": 343.02, "word": " your", "probability": 0.7919921875}, {"start": 343.02, "end": 343.56, "word": " characteristic", "probability": 0.912109375}, {"start": 343.56, "end": 344.04, "word": " is", "probability": 0.9443359375}, {"start": 344.04, "end": 344.52, "word": " person", "probability": 0.892578125}, {"start": 344.52, "end": 346.0, "word": " or", "probability": 0.83642578125}, {"start": 346.0, "end": 347.04, "word": " student's", "probability": 0.6572265625}, {"start": 347.04, "end": 348.8, "word": " number.", "probability": 0.9345703125}], "temperature": 1.0}, {"id": 14, "seek": 38781, "start": 361.19, "end": 387.81, "text": " proportion of students have scores above 80. So by proportion of students have scores above 80, and so on. So here, in this case, your characteristic is scores above 80, and so on.", "tokens": [16068, 295, 1731, 362, 13444, 3673, 4688, 13, 407, 538, 16068, 295, 1731, 362, 13444, 3673, 4688, 11, 293, 370, 322, 13, 407, 510, 11, 294, 341, 1389, 11, 428, 16282, 307, 13444, 3673, 4688, 11, 293, 370, 322, 13], "avg_logprob": -0.20236279906296148, "compression_ratio": 1.7920792079207921, "no_speech_prob": 0.0, "words": [{"start": 361.19, "end": 361.67, "word": " proportion", "probability": 0.1881103515625}, {"start": 361.67, "end": 362.15, "word": " of", "probability": 0.95751953125}, {"start": 362.15, "end": 364.21, "word": " students", "probability": 0.96240234375}, {"start": 364.21, "end": 368.13, "word": " have", "probability": 0.56396484375}, {"start": 368.13, "end": 369.67, "word": " scores", "probability": 0.64990234375}, {"start": 369.67, "end": 372.79, "word": " above", "probability": 0.9462890625}, {"start": 372.79, "end": 373.19, "word": " 80.", "probability": 0.90625}, {"start": 375.79, "end": 376.49, "word": " So", "probability": 0.9072265625}, {"start": 376.49, "end": 376.77, "word": " by", "probability": 0.662109375}, {"start": 376.77, "end": 377.37, "word": " proportion", "probability": 0.845703125}, {"start": 377.37, "end": 377.61, "word": " of", "probability": 0.8056640625}, {"start": 377.61, "end": 378.17, "word": " students", "probability": 0.95458984375}, {"start": 378.17, "end": 378.45, "word": " have", "probability": 0.88232421875}, {"start": 378.45, "end": 378.91, "word": " scores", "probability": 0.80859375}, {"start": 378.91, "end": 379.21, "word": " above", "probability": 0.9560546875}, {"start": 379.21, "end": 379.59, "word": " 80,", "probability": 0.97216796875}, {"start": 379.95, "end": 380.17, "word": " and", "probability": 0.939453125}, {"start": 380.17, "end": 380.35, "word": " so", "probability": 0.95166015625}, {"start": 380.35, "end": 380.53, "word": " on.", "probability": 0.94677734375}, {"start": 380.79, "end": 380.97, "word": " So", "probability": 0.95654296875}, {"start": 380.97, "end": 381.27, "word": " here,", "probability": 0.81884765625}, {"start": 381.47, "end": 381.57, "word": " in", "probability": 0.8857421875}, {"start": 381.57, "end": 381.77, "word": " this", "probability": 0.9482421875}, {"start": 381.77, "end": 382.09, "word": " case,", "probability": 0.92041015625}, {"start": 383.21, "end": 383.43, "word": " your", "probability": 0.85498046875}, {"start": 383.43, "end": 384.17, "word": " characteristic", "probability": 0.8701171875}, {"start": 384.17, "end": 385.59, "word": " is", "probability": 0.62939453125}, {"start": 385.59, "end": 386.09, "word": " scores", "probability": 0.83349609375}, {"start": 386.09, "end": 386.43, "word": " above", "probability": 0.9189453125}, {"start": 386.43, "end": 386.81, "word": " 80,", "probability": 0.97119140625}, {"start": 387.27, "end": 387.47, "word": " and", "probability": 0.9326171875}, {"start": 387.47, "end": 387.61, "word": " so", "probability": 0.94921875}, {"start": 387.61, "end": 387.81, "word": " on.", "probability": 0.9443359375}], "temperature": 1.0}, {"id": 15, "seek": 41798, "start": 389.3, "end": 417.98, "text": " Simple proportion provides an estimate of Pi, because generally speaking, Pi is not given, or is unknown, similar as Mu. And Mu is the population mean, is always unknown, and we are interested to estimate the population mean. In this case, the point estimate of Pi is P. So P is the point estimate, sample, a proportion,", "tokens": [21532, 16068, 6417, 364, 12539, 295, 17741, 11, 570, 5101, 4124, 11, 17741, 307, 406, 2212, 11, 420, 307, 9841, 11, 2531, 382, 15601, 13, 400, 15601, 307, 264, 4415, 914, 11, 307, 1009, 9841, 11, 293, 321, 366, 3102, 281, 12539, 264, 4415, 914, 13, 682, 341, 1389, 11, 264, 935, 12539, 295, 17741, 307, 430, 13, 407, 430, 307, 264, 935, 12539, 11, 6889, 11, 257, 16068, 11], "avg_logprob": -0.2623239537359963, "compression_ratio": 1.7351351351351352, "no_speech_prob": 0.0, "words": [{"start": 389.3, "end": 389.76, "word": " Simple", "probability": 0.317626953125}, {"start": 389.76, "end": 390.22, "word": " proportion", "probability": 0.611328125}, {"start": 390.22, "end": 391.82, "word": " provides", "probability": 0.330810546875}, {"start": 391.82, "end": 392.08, "word": " an", "probability": 0.9013671875}, {"start": 392.08, "end": 392.54, "word": " estimate", "probability": 0.91552734375}, {"start": 392.54, "end": 392.84, "word": " of", "probability": 0.9619140625}, {"start": 392.84, "end": 393.12, "word": " Pi,", "probability": 0.388427734375}, {"start": 393.34, "end": 393.76, "word": " because", "probability": 0.8779296875}, {"start": 393.76, "end": 394.28, "word": " generally", "probability": 0.68798828125}, {"start": 394.28, "end": 394.9, "word": " speaking,", "probability": 0.91943359375}, {"start": 395.4, "end": 395.58, "word": " Pi", "probability": 0.94970703125}, {"start": 395.58, "end": 395.78, "word": " is", "probability": 0.94970703125}, {"start": 395.78, "end": 396.0, "word": " not", "probability": 0.92919921875}, {"start": 396.0, "end": 396.3, "word": " given,", "probability": 0.8505859375}, {"start": 396.5, "end": 396.66, "word": " or", "probability": 0.74853515625}, {"start": 396.66, "end": 396.78, "word": " is", "probability": 0.80029296875}, {"start": 396.78, "end": 397.14, "word": " unknown,", "probability": 0.896484375}, {"start": 397.94, "end": 398.36, "word": " similar", "probability": 0.9453125}, {"start": 398.36, "end": 398.84, "word": " as", "probability": 0.87890625}, {"start": 398.84, "end": 399.14, "word": " Mu.", "probability": 0.80029296875}, {"start": 399.86, "end": 400.06, "word": " And", "probability": 0.424072265625}, {"start": 400.06, "end": 400.2, "word": " Mu", "probability": 0.8974609375}, {"start": 400.2, "end": 400.36, "word": " is", "probability": 0.828125}, {"start": 400.36, "end": 400.48, "word": " the", "probability": 0.818359375}, {"start": 400.48, "end": 400.9, "word": " population", "probability": 0.97314453125}, {"start": 400.9, "end": 401.22, "word": " mean,", "probability": 0.9638671875}, {"start": 401.66, "end": 401.84, "word": " is", "probability": 0.440185546875}, {"start": 401.84, "end": 402.32, "word": " always", "probability": 0.91015625}, {"start": 402.32, "end": 403.62, "word": " unknown,", "probability": 0.53759765625}, {"start": 403.82, "end": 403.94, "word": " and", "probability": 0.7900390625}, {"start": 403.94, "end": 404.04, "word": " we", "probability": 0.95263671875}, {"start": 404.04, "end": 404.16, "word": " are", "probability": 0.91552734375}, {"start": 404.16, "end": 404.62, "word": " interested", "probability": 0.87255859375}, {"start": 404.62, "end": 404.84, "word": " to", "probability": 0.88720703125}, {"start": 404.84, "end": 405.28, "word": " estimate", "probability": 0.92919921875}, {"start": 405.28, "end": 406.14, "word": " the", "probability": 0.8955078125}, {"start": 406.14, "end": 406.52, "word": " population", "probability": 0.95849609375}, {"start": 406.52, "end": 406.78, "word": " mean.", "probability": 0.951171875}, {"start": 407.08, "end": 407.3, "word": " In", "probability": 0.93603515625}, {"start": 407.3, "end": 407.58, "word": " this", "probability": 0.94677734375}, {"start": 407.58, "end": 407.9, "word": " case,", "probability": 0.9130859375}, {"start": 408.44, "end": 408.68, "word": " the", "probability": 0.9189453125}, {"start": 408.68, "end": 408.96, "word": " point", "probability": 0.97119140625}, {"start": 408.96, "end": 409.46, "word": " estimate", "probability": 0.91845703125}, {"start": 409.46, "end": 409.92, "word": " of", "probability": 0.96484375}, {"start": 409.92, "end": 410.3, "word": " Pi", "probability": 0.9814453125}, {"start": 410.3, "end": 411.74, "word": " is", "probability": 0.916015625}, {"start": 411.74, "end": 411.94, "word": " P.", "probability": 0.6201171875}, {"start": 412.32, "end": 412.82, "word": " So", "probability": 0.94921875}, {"start": 412.82, "end": 413.06, "word": " P", "probability": 0.619140625}, {"start": 413.06, "end": 413.36, "word": " is", "probability": 0.953125}, {"start": 413.36, "end": 413.48, "word": " the", "probability": 0.87353515625}, {"start": 413.48, "end": 413.78, "word": " point", "probability": 0.9609375}, {"start": 413.78, "end": 414.4, "word": " estimate,", "probability": 0.92578125}, {"start": 415.18, "end": 415.66, "word": " sample,", "probability": 0.401123046875}, {"start": 417.36, "end": 417.58, "word": " a", "probability": 0.5087890625}, {"start": 417.58, "end": 417.98, "word": " proportion,", "probability": 0.85107421875}], "temperature": 1.0}, {"id": 16, "seek": 44745, "start": 420.59, "end": 447.45, "text": " which is the point estimate of Y. Now, for example, let's go back to the proportion of smokers among all female students at IUG. Let's assume that number of females at IUG, for example, is 1,000 students. And we talk here around a sample of 1,000.", "tokens": [597, 307, 264, 935, 12539, 295, 398, 13, 823, 11, 337, 1365, 11, 718, 311, 352, 646, 281, 264, 16068, 295, 32073, 433, 3654, 439, 6556, 1731, 412, 44218, 38, 13, 961, 311, 6552, 300, 1230, 295, 21529, 412, 44218, 38, 11, 337, 1365, 11, 307, 502, 11, 1360, 1731, 13, 400, 321, 751, 510, 926, 257, 6889, 295, 502, 11, 1360, 13], "avg_logprob": -0.16284179175272584, "compression_ratio": 1.4850299401197604, "no_speech_prob": 0.0, "words": [{"start": 420.59, "end": 420.89, "word": " which", "probability": 0.4951171875}, {"start": 420.89, "end": 421.03, "word": " is", "probability": 0.943359375}, {"start": 421.03, "end": 421.15, "word": " the", "probability": 0.88427734375}, {"start": 421.15, "end": 421.39, "word": " point", "probability": 0.89892578125}, {"start": 421.39, "end": 421.87, "word": " estimate", "probability": 0.92724609375}, {"start": 421.87, "end": 422.53, "word": " of", "probability": 0.9638671875}, {"start": 422.53, "end": 422.85, "word": " Y.", "probability": 0.71142578125}, {"start": 424.71, "end": 424.97, "word": " Now,", "probability": 0.78564453125}, {"start": 425.03, "end": 425.15, "word": " for", "probability": 0.9501953125}, {"start": 425.15, "end": 425.45, "word": " example,", "probability": 0.978515625}, {"start": 425.55, "end": 425.69, "word": " let's", "probability": 0.958740234375}, {"start": 425.69, "end": 425.83, "word": " go", "probability": 0.9619140625}, {"start": 425.83, "end": 426.15, "word": " back", "probability": 0.87890625}, {"start": 426.15, "end": 426.37, "word": " to", "probability": 0.9658203125}, {"start": 426.37, "end": 426.65, "word": " the", "probability": 0.91357421875}, {"start": 426.65, "end": 427.17, "word": " proportion", "probability": 0.7392578125}, {"start": 427.17, "end": 427.49, "word": " of", "probability": 0.962890625}, {"start": 427.49, "end": 428.03, "word": " smokers", "probability": 0.958984375}, {"start": 428.03, "end": 428.39, "word": " among", "probability": 0.935546875}, {"start": 428.39, "end": 428.75, "word": " all", "probability": 0.94482421875}, {"start": 428.75, "end": 429.09, "word": " female", "probability": 0.79638671875}, {"start": 429.09, "end": 429.49, "word": " students", "probability": 0.96875}, {"start": 429.49, "end": 429.75, "word": " at", "probability": 0.935546875}, {"start": 429.75, "end": 430.11, "word": " IUG.", "probability": 0.788330078125}, {"start": 430.71, "end": 431.35, "word": " Let's", "probability": 0.970947265625}, {"start": 431.35, "end": 431.69, "word": " assume", "probability": 0.91796875}, {"start": 431.69, "end": 432.03, "word": " that", "probability": 0.88427734375}, {"start": 432.03, "end": 432.73, "word": " number", "probability": 0.64697265625}, {"start": 432.73, "end": 432.97, "word": " of", "probability": 0.97314453125}, {"start": 432.97, "end": 433.51, "word": " females", "probability": 0.9326171875}, {"start": 433.51, "end": 435.21, "word": " at", "probability": 0.9189453125}, {"start": 435.21, "end": 435.83, "word": " IUG,", "probability": 0.941162109375}, {"start": 435.97, "end": 436.19, "word": " for", "probability": 0.951171875}, {"start": 436.19, "end": 436.51, "word": " example,", "probability": 0.9775390625}, {"start": 436.65, "end": 436.75, "word": " is", "probability": 0.943359375}, {"start": 436.75, "end": 436.91, "word": " 1", "probability": 0.33251953125}, {"start": 436.91, "end": 437.23, "word": ",000", "probability": 0.992431640625}, {"start": 437.23, "end": 437.97, "word": " students.", "probability": 0.95263671875}, {"start": 443.57, "end": 444.21, "word": " And", "probability": 0.5078125}, {"start": 444.21, "end": 444.41, "word": " we", "probability": 0.9296875}, {"start": 444.41, "end": 444.77, "word": " talk", "probability": 0.321044921875}, {"start": 444.77, "end": 445.67, "word": " here", "probability": 0.7890625}, {"start": 445.67, "end": 446.05, "word": " around", "probability": 0.79052734375}, {"start": 446.05, "end": 446.27, "word": " a", "probability": 0.5625}, {"start": 446.27, "end": 446.53, "word": " sample", "probability": 0.85009765625}, {"start": 446.53, "end": 446.79, "word": " of", "probability": 0.96337890625}, {"start": 446.79, "end": 447.03, "word": " 1", "probability": 0.96337890625}, {"start": 447.03, "end": 447.45, "word": ",000.", "probability": 0.99853515625}], "temperature": 1.0}, {"id": 17, "seek": 47728, "start": 448.6, "end": 477.28, "text": " We know that there are too many female students at IUG. Suppose we select a random sample of size 1,000. So that's the sample size. So here, we selected a random sample of size 1,000 from the population of females. And suppose we found that number of smokers among these", "tokens": [492, 458, 300, 456, 366, 886, 867, 6556, 1731, 412, 44218, 38, 13, 21360, 321, 3048, 257, 4974, 6889, 295, 2744, 502, 11, 1360, 13, 407, 300, 311, 264, 6889, 2744, 13, 407, 510, 11, 321, 8209, 257, 4974, 6889, 295, 2744, 502, 11, 1360, 490, 264, 4415, 295, 21529, 13, 400, 7297, 321, 1352, 300, 1230, 295, 32073, 433, 3654, 613], "avg_logprob": -0.1447172595394982, "compression_ratio": 1.6325301204819278, "no_speech_prob": 0.0, "words": [{"start": 448.6, "end": 448.9, "word": " We", "probability": 0.73974609375}, {"start": 448.9, "end": 449.02, "word": " know", "probability": 0.85791015625}, {"start": 449.02, "end": 449.26, "word": " that", "probability": 0.91455078125}, {"start": 449.26, "end": 449.54, "word": " there", "probability": 0.90771484375}, {"start": 449.54, "end": 449.86, "word": " are", "probability": 0.94189453125}, {"start": 449.86, "end": 450.44, "word": " too", "probability": 0.935546875}, {"start": 450.44, "end": 450.66, "word": " many", "probability": 0.9052734375}, {"start": 450.66, "end": 451.38, "word": " female", "probability": 0.865234375}, {"start": 451.38, "end": 451.78, "word": " students", "probability": 0.96533203125}, {"start": 451.78, "end": 452.02, "word": " at", "probability": 0.92431640625}, {"start": 452.02, "end": 452.42, "word": " IUG.", "probability": 0.736572265625}, {"start": 453.04, "end": 453.42, "word": " Suppose", "probability": 0.80224609375}, {"start": 453.42, "end": 453.64, "word": " we", "probability": 0.89892578125}, {"start": 453.64, "end": 453.96, "word": " select", "probability": 0.83642578125}, {"start": 453.96, "end": 454.16, "word": " a", "probability": 0.9580078125}, {"start": 454.16, "end": 454.38, "word": " random", "probability": 0.87451171875}, {"start": 454.38, "end": 454.82, "word": " sample", "probability": 0.88818359375}, {"start": 454.82, "end": 455.02, "word": " of", "probability": 0.900390625}, {"start": 455.02, "end": 455.3, "word": " size", "probability": 0.8359375}, {"start": 455.3, "end": 455.6, "word": " 1", "probability": 0.5830078125}, {"start": 455.6, "end": 455.98, "word": ",000.", "probability": 0.992919921875}, {"start": 458.04, "end": 458.42, "word": " So", "probability": 0.94970703125}, {"start": 458.42, "end": 458.78, "word": " that's", "probability": 0.896484375}, {"start": 458.78, "end": 458.98, "word": " the", "probability": 0.744140625}, {"start": 458.98, "end": 459.24, "word": " sample", "probability": 0.73486328125}, {"start": 459.24, "end": 460.04, "word": " size.", "probability": 0.8232421875}, {"start": 461.78, "end": 462.1, "word": " So", "probability": 0.90283203125}, {"start": 462.1, "end": 462.36, "word": " here,", "probability": 0.82421875}, {"start": 463.24, "end": 464.34, "word": " we", "probability": 0.93798828125}, {"start": 464.34, "end": 464.74, "word": " selected", "probability": 0.86279296875}, {"start": 464.74, "end": 464.94, "word": " a", "probability": 0.9794921875}, {"start": 464.94, "end": 465.16, "word": " random", "probability": 0.8681640625}, {"start": 465.16, "end": 465.58, "word": " sample", "probability": 0.8740234375}, {"start": 465.58, "end": 465.8, "word": " of", "probability": 0.962890625}, {"start": 465.8, "end": 466.04, "word": " size", "probability": 0.82373046875}, {"start": 466.04, "end": 466.32, "word": " 1", "probability": 0.95703125}, {"start": 466.32, "end": 466.76, "word": ",000", "probability": 0.999267578125}, {"start": 466.76, "end": 468.6, "word": " from", "probability": 0.70166015625}, {"start": 468.6, "end": 469.14, "word": " the", "probability": 0.92529296875}, {"start": 469.14, "end": 471.12, "word": " population", "probability": 0.93310546875}, {"start": 471.12, "end": 471.38, "word": " of", "probability": 0.9697265625}, {"start": 471.38, "end": 471.72, "word": " females.", "probability": 0.95166015625}, {"start": 472.96, "end": 473.26, "word": " And", "probability": 0.95751953125}, {"start": 473.26, "end": 473.7, "word": " suppose", "probability": 0.9208984375}, {"start": 473.7, "end": 473.92, "word": " we", "probability": 0.93701171875}, {"start": 473.92, "end": 474.36, "word": " found", "probability": 0.90771484375}, {"start": 474.36, "end": 474.68, "word": " that", "probability": 0.71826171875}, {"start": 474.68, "end": 475.62, "word": " number", "probability": 0.75439453125}, {"start": 475.62, "end": 475.84, "word": " of", "probability": 0.96875}, {"start": 475.84, "end": 476.38, "word": " smokers", "probability": 0.936279296875}, {"start": 476.38, "end": 476.76, "word": " among", "probability": 0.94921875}, {"start": 476.76, "end": 477.28, "word": " these", "probability": 0.6650390625}], "temperature": 1.0}, {"id": 18, "seek": 50756, "start": 478.1, "end": 507.56, "text": " equal one hundred. So out of one thousand, one hundred students are smoking. Now what's the percentage of smokers in this case? So P equals this one hundred divided by a thousand, so that's ten percent or point one. Let's assume that X", "tokens": [2681, 472, 3262, 13, 407, 484, 295, 472, 4714, 11, 472, 3262, 1731, 366, 14055, 13, 823, 437, 311, 264, 9668, 295, 32073, 433, 294, 341, 1389, 30, 407, 430, 6915, 341, 472, 3262, 6666, 538, 257, 4714, 11, 370, 300, 311, 2064, 3043, 420, 935, 472, 13, 961, 311, 6552, 300, 1783], "avg_logprob": -0.2666377419674838, "compression_ratio": 1.5629139072847682, "no_speech_prob": 0.0, "words": [{"start": 478.1, "end": 478.56, "word": " equal", "probability": 0.11907958984375}, {"start": 478.56, "end": 481.02, "word": " one", "probability": 0.223388671875}, {"start": 481.02, "end": 481.38, "word": " hundred.", "probability": 0.6474609375}, {"start": 482.5, "end": 483.22, "word": " So", "probability": 0.880859375}, {"start": 483.22, "end": 483.5, "word": " out", "probability": 0.70751953125}, {"start": 483.5, "end": 483.64, "word": " of", "probability": 0.96923828125}, {"start": 483.64, "end": 483.82, "word": " one", "probability": 0.8271484375}, {"start": 483.82, "end": 484.22, "word": " thousand,", "probability": 0.85400390625}, {"start": 484.86, "end": 485.16, "word": " one", "probability": 0.89501953125}, {"start": 485.16, "end": 485.48, "word": " hundred", "probability": 0.92919921875}, {"start": 485.48, "end": 486.12, "word": " students", "probability": 0.966796875}, {"start": 486.12, "end": 487.7, "word": " are", "probability": 0.875}, {"start": 487.7, "end": 488.04, "word": " smoking.", "probability": 0.88818359375}, {"start": 488.82, "end": 489.22, "word": " Now", "probability": 0.92431640625}, {"start": 489.22, "end": 489.72, "word": " what's", "probability": 0.739013671875}, {"start": 489.72, "end": 489.84, "word": " the", "probability": 0.9228515625}, {"start": 489.84, "end": 490.34, "word": " percentage", "probability": 0.86083984375}, {"start": 490.34, "end": 490.62, "word": " of", "probability": 0.96630859375}, {"start": 490.62, "end": 491.16, "word": " smokers", "probability": 0.916259765625}, {"start": 491.16, "end": 491.34, "word": " in", "probability": 0.935546875}, {"start": 491.34, "end": 491.54, "word": " this", "probability": 0.94873046875}, {"start": 491.54, "end": 491.84, "word": " case?", "probability": 0.91552734375}, {"start": 493.22, "end": 493.94, "word": " So", "probability": 0.52587890625}, {"start": 493.94, "end": 494.2, "word": " P", "probability": 0.317626953125}, {"start": 494.2, "end": 494.64, "word": " equals", "probability": 0.7099609375}, {"start": 494.64, "end": 495.06, "word": " this", "probability": 0.81396484375}, {"start": 495.06, "end": 495.28, "word": " one", "probability": 0.8193359375}, {"start": 495.28, "end": 495.56, "word": " hundred", "probability": 0.92724609375}, {"start": 495.56, "end": 495.88, "word": " divided", "probability": 0.6328125}, {"start": 495.88, "end": 496.2, "word": " by", "probability": 0.9736328125}, {"start": 496.2, "end": 496.7, "word": " a", "probability": 0.658203125}, {"start": 496.7, "end": 497.04, "word": " thousand,", "probability": 0.87158203125}, {"start": 498.18, "end": 498.34, "word": " so", "probability": 0.92333984375}, {"start": 498.34, "end": 498.7, "word": " that's", "probability": 0.906005859375}, {"start": 498.7, "end": 499.02, "word": " ten", "probability": 0.6962890625}, {"start": 499.02, "end": 499.38, "word": " percent", "probability": 0.955078125}, {"start": 499.38, "end": 500.2, "word": " or", "probability": 0.499755859375}, {"start": 500.2, "end": 500.52, "word": " point", "probability": 0.78759765625}, {"start": 500.52, "end": 500.76, "word": " one.", "probability": 0.89990234375}, {"start": 505.22, "end": 505.94, "word": " Let's", "probability": 0.960205078125}, {"start": 505.94, "end": 506.34, "word": " assume", "probability": 0.9072265625}, {"start": 506.34, "end": 506.7, "word": " that", "probability": 0.93701171875}, {"start": 506.7, "end": 507.56, "word": " X", "probability": 0.71630859375}], "temperature": 1.0}, {"id": 19, "seek": 52532, "start": 509.52, "end": 525.32, "text": " denotes the number of items in the sample having the characteristic of interest. In this case, X number of smokers in your sample, so X equals 100. N is 1000, so proportion is always", "tokens": [1441, 17251, 264, 1230, 295, 4754, 294, 264, 6889, 1419, 264, 16282, 295, 1179, 13, 682, 341, 1389, 11, 1783, 1230, 295, 32073, 433, 294, 428, 6889, 11, 370, 1783, 6915, 2319, 13, 426, 307, 9714, 11, 370, 16068, 307, 1009], "avg_logprob": -0.3028273830811183, "compression_ratio": 1.4076923076923078, "no_speech_prob": 0.0, "words": [{"start": 509.52, "end": 510.08, "word": " denotes", "probability": 0.622314453125}, {"start": 510.08, "end": 510.3, "word": " the", "probability": 0.91015625}, {"start": 510.3, "end": 510.58, "word": " number", "probability": 0.93701171875}, {"start": 510.58, "end": 510.82, "word": " of", "probability": 0.9677734375}, {"start": 510.82, "end": 511.5, "word": " items", "probability": 0.8369140625}, {"start": 511.5, "end": 512.5, "word": " in", "probability": 0.92626953125}, {"start": 512.5, "end": 512.7, "word": " the", "probability": 0.90576171875}, {"start": 512.7, "end": 512.96, "word": " sample", "probability": 0.85498046875}, {"start": 512.96, "end": 513.38, "word": " having", "probability": 0.87255859375}, {"start": 513.38, "end": 513.6, "word": " the", "probability": 0.7138671875}, {"start": 513.6, "end": 514.22, "word": " characteristic", "probability": 0.77587890625}, {"start": 514.22, "end": 514.5, "word": " of", "probability": 0.94091796875}, {"start": 514.5, "end": 514.9, "word": " interest.", "probability": 0.92041015625}, {"start": 515.3, "end": 515.44, "word": " In", "probability": 0.9306640625}, {"start": 515.44, "end": 515.72, "word": " this", "probability": 0.95166015625}, {"start": 515.72, "end": 516.32, "word": " case,", "probability": 0.9169921875}, {"start": 516.9, "end": 517.4, "word": " X", "probability": 0.40087890625}, {"start": 517.4, "end": 517.72, "word": " number", "probability": 0.78759765625}, {"start": 517.72, "end": 517.92, "word": " of", "probability": 0.958984375}, {"start": 517.92, "end": 518.5, "word": " smokers", "probability": 0.973388671875}, {"start": 518.5, "end": 519.4, "word": " in", "probability": 0.880859375}, {"start": 519.4, "end": 519.58, "word": " your", "probability": 0.87548828125}, {"start": 519.58, "end": 519.92, "word": " sample,", "probability": 0.85400390625}, {"start": 520.04, "end": 520.14, "word": " so", "probability": 0.90966796875}, {"start": 520.14, "end": 520.34, "word": " X", "probability": 0.9091796875}, {"start": 520.34, "end": 520.64, "word": " equals", "probability": 0.375244140625}, {"start": 520.64, "end": 520.98, "word": " 100.", "probability": 0.441650390625}, {"start": 521.68, "end": 521.9, "word": " N", "probability": 0.849609375}, {"start": 521.9, "end": 522.04, "word": " is", "probability": 0.91259765625}, {"start": 522.04, "end": 522.54, "word": " 1000,", "probability": 0.66748046875}, {"start": 523.18, "end": 523.5, "word": " so", "probability": 0.93408203125}, {"start": 523.5, "end": 524.48, "word": " proportion", "probability": 0.54443359375}, {"start": 524.48, "end": 524.82, "word": " is", "probability": 0.93115234375}, {"start": 524.82, "end": 525.32, "word": " always", "probability": 0.912109375}], "temperature": 1.0}, {"id": 20, "seek": 54948, "start": 527.06, "end": 549.48, "text": " equals X divided by N, X number of items in the characteristic you have. N is the sample size, so B equals X over N. Sometimes it might be X equals zero. So for example suppose I select a random sample of size 1,000 and none of them", "tokens": [6915, 1783, 6666, 538, 426, 11, 1783, 1230, 295, 4754, 294, 264, 16282, 291, 362, 13, 426, 307, 264, 6889, 2744, 11, 370, 363, 6915, 1783, 670, 426, 13, 4803, 309, 1062, 312, 1783, 6915, 4018, 13, 407, 337, 1365, 7297, 286, 3048, 257, 4974, 6889, 295, 2744, 502, 11, 1360, 293, 6022, 295, 552], "avg_logprob": -0.26241630049688475, "compression_ratio": 1.420731707317073, "no_speech_prob": 0.0, "words": [{"start": 527.06, "end": 527.6199999999999, "word": " equals", "probability": 0.25341796875}, {"start": 527.6199999999999, "end": 528.18, "word": " X", "probability": 0.429443359375}, {"start": 528.18, "end": 528.46, "word": " divided", "probability": 0.74658203125}, {"start": 528.46, "end": 528.62, "word": " by", "probability": 0.97265625}, {"start": 528.62, "end": 528.78, "word": " N,", "probability": 0.84375}, {"start": 529.26, "end": 529.52, "word": " X", "probability": 0.96533203125}, {"start": 529.52, "end": 529.86, "word": " number", "probability": 0.9423828125}, {"start": 529.86, "end": 530.14, "word": " of", "probability": 0.9619140625}, {"start": 530.14, "end": 530.76, "word": " items", "probability": 0.8251953125}, {"start": 530.76, "end": 533.9, "word": " in", "probability": 0.347900390625}, {"start": 533.9, "end": 534.16, "word": " the", "probability": 0.91796875}, {"start": 534.16, "end": 534.86, "word": " characteristic", "probability": 0.8525390625}, {"start": 534.86, "end": 535.14, "word": " you", "probability": 0.9169921875}, {"start": 535.14, "end": 535.42, "word": " have.", "probability": 0.94873046875}, {"start": 536.16, "end": 536.44, "word": " N", "probability": 0.93896484375}, {"start": 536.44, "end": 536.68, "word": " is", "probability": 0.9453125}, {"start": 536.68, "end": 536.86, "word": " the", "probability": 0.9140625}, {"start": 536.86, "end": 537.08, "word": " sample", "probability": 0.7978515625}, {"start": 537.08, "end": 537.58, "word": " size,", "probability": 0.8623046875}, {"start": 537.7, "end": 537.84, "word": " so", "probability": 0.9375}, {"start": 537.84, "end": 538.0, "word": " B", "probability": 0.53662109375}, {"start": 538.0, "end": 538.28, "word": " equals", "probability": 0.662109375}, {"start": 538.28, "end": 538.48, "word": " X", "probability": 0.96044921875}, {"start": 538.48, "end": 538.64, "word": " over", "probability": 0.884765625}, {"start": 538.64, "end": 538.84, "word": " N.", "probability": 0.9951171875}, {"start": 539.7, "end": 540.26, "word": " Sometimes", "probability": 0.91015625}, {"start": 540.26, "end": 540.58, "word": " it", "probability": 0.4208984375}, {"start": 540.58, "end": 540.74, "word": " might", "probability": 0.87939453125}, {"start": 540.74, "end": 540.98, "word": " be", "probability": 0.939453125}, {"start": 540.98, "end": 541.24, "word": " X", "probability": 0.9697265625}, {"start": 541.24, "end": 541.64, "word": " equals", "probability": 0.857421875}, {"start": 541.64, "end": 542.48, "word": " zero.", "probability": 0.47900390625}, {"start": 543.9, "end": 544.12, "word": " So", "probability": 0.900390625}, {"start": 544.12, "end": 544.34, "word": " for", "probability": 0.68798828125}, {"start": 544.34, "end": 544.68, "word": " example", "probability": 0.97509765625}, {"start": 544.68, "end": 545.04, "word": " suppose", "probability": 0.51025390625}, {"start": 545.04, "end": 545.34, "word": " I", "probability": 0.95361328125}, {"start": 545.34, "end": 545.66, "word": " select", "probability": 0.86181640625}, {"start": 545.66, "end": 545.8, "word": " a", "probability": 0.9697265625}, {"start": 545.8, "end": 546.0, "word": " random", "probability": 0.8720703125}, {"start": 546.0, "end": 546.44, "word": " sample", "probability": 0.85791015625}, {"start": 546.44, "end": 546.72, "word": " of", "probability": 0.59130859375}, {"start": 546.72, "end": 547.34, "word": " size", "probability": 0.80712890625}, {"start": 547.34, "end": 547.62, "word": " 1", "probability": 0.299560546875}, {"start": 547.62, "end": 547.92, "word": ",000", "probability": 0.984130859375}, {"start": 547.92, "end": 548.72, "word": " and", "probability": 0.67578125}, {"start": 548.72, "end": 549.06, "word": " none", "probability": 0.93505859375}, {"start": 549.06, "end": 549.22, "word": " of", "probability": 0.97021484375}, {"start": 549.22, "end": 549.48, "word": " them", "probability": 0.89501953125}], "temperature": 1.0}, {"id": 21, "seek": 57362, "start": 550.94, "end": 573.62, "text": " So in this case, x equals zero, that means the percentage equals zero. So we can have zero percentage if x equals zero. Also in the upper limit, suppose here we are talking about students have scores above 80. And we select a random sample, and that sample has", "tokens": [407, 294, 341, 1389, 11, 2031, 6915, 4018, 11, 300, 1355, 264, 9668, 6915, 4018, 13, 407, 321, 393, 362, 4018, 9668, 498, 2031, 6915, 4018, 13, 2743, 294, 264, 6597, 4948, 11, 7297, 510, 321, 366, 1417, 466, 1731, 362, 13444, 3673, 4688, 13, 400, 321, 3048, 257, 4974, 6889, 11, 293, 300, 6889, 575], "avg_logprob": -0.23807565371195474, "compression_ratio": 1.6111111111111112, "no_speech_prob": 0.0, "words": [{"start": 550.94, "end": 551.62, "word": " So", "probability": 0.28271484375}, {"start": 551.62, "end": 552.16, "word": " in", "probability": 0.70458984375}, {"start": 552.16, "end": 552.34, "word": " this", "probability": 0.94384765625}, {"start": 552.34, "end": 552.58, "word": " case,", "probability": 0.92724609375}, {"start": 552.64, "end": 552.92, "word": " x", "probability": 0.5595703125}, {"start": 552.92, "end": 553.28, "word": " equals", "probability": 0.68359375}, {"start": 553.28, "end": 553.64, "word": " zero,", "probability": 0.54345703125}, {"start": 554.04, "end": 554.38, "word": " that", "probability": 0.88671875}, {"start": 554.38, "end": 554.86, "word": " means", "probability": 0.9375}, {"start": 554.86, "end": 555.82, "word": " the", "probability": 0.7451171875}, {"start": 555.82, "end": 556.3, "word": " percentage", "probability": 0.76806640625}, {"start": 556.3, "end": 557.12, "word": " equals", "probability": 0.81298828125}, {"start": 557.12, "end": 557.46, "word": " zero.", "probability": 0.87548828125}, {"start": 558.82, "end": 559.34, "word": " So", "probability": 0.94970703125}, {"start": 559.34, "end": 559.48, "word": " we", "probability": 0.8623046875}, {"start": 559.48, "end": 559.68, "word": " can", "probability": 0.9453125}, {"start": 559.68, "end": 560.02, "word": " have", "probability": 0.94189453125}, {"start": 560.02, "end": 560.5, "word": " zero", "probability": 0.791015625}, {"start": 560.5, "end": 561.0, "word": " percentage", "probability": 0.8583984375}, {"start": 561.0, "end": 561.36, "word": " if", "probability": 0.89306640625}, {"start": 561.36, "end": 561.68, "word": " x", "probability": 0.95654296875}, {"start": 561.68, "end": 562.0, "word": " equals", "probability": 0.91015625}, {"start": 562.0, "end": 562.3, "word": " zero.", "probability": 0.8779296875}, {"start": 563.24, "end": 563.68, "word": " Also", "probability": 0.94140625}, {"start": 563.68, "end": 563.86, "word": " in", "probability": 0.75}, {"start": 563.86, "end": 564.06, "word": " the", "probability": 0.91748046875}, {"start": 564.06, "end": 564.3, "word": " upper", "probability": 0.70947265625}, {"start": 564.3, "end": 564.76, "word": " limit,", "probability": 0.966796875}, {"start": 565.76, "end": 566.04, "word": " suppose", "probability": 0.59619140625}, {"start": 566.04, "end": 566.28, "word": " here", "probability": 0.80712890625}, {"start": 566.28, "end": 566.4, "word": " we", "probability": 0.90625}, {"start": 566.4, "end": 566.52, "word": " are", "probability": 0.919921875}, {"start": 566.52, "end": 566.86, "word": " talking", "probability": 0.8466796875}, {"start": 566.86, "end": 567.28, "word": " about", "probability": 0.90478515625}, {"start": 567.28, "end": 567.98, "word": " students", "probability": 0.96728515625}, {"start": 567.98, "end": 569.04, "word": " have", "probability": 0.78857421875}, {"start": 569.04, "end": 569.58, "word": " scores", "probability": 0.79541015625}, {"start": 569.58, "end": 570.02, "word": " above", "probability": 0.95947265625}, {"start": 570.02, "end": 570.38, "word": " 80.", "probability": 0.8212890625}, {"start": 571.08, "end": 571.3, "word": " And", "probability": 0.89697265625}, {"start": 571.3, "end": 571.42, "word": " we", "probability": 0.74609375}, {"start": 571.42, "end": 571.7, "word": " select", "probability": 0.841796875}, {"start": 571.7, "end": 571.84, "word": " a", "probability": 0.8544921875}, {"start": 571.84, "end": 572.02, "word": " random", "probability": 0.84375}, {"start": 572.02, "end": 572.4, "word": " sample,", "probability": 0.87060546875}, {"start": 572.52, "end": 572.62, "word": " and", "probability": 0.93310546875}, {"start": 572.62, "end": 572.86, "word": " that", "probability": 0.93896484375}, {"start": 572.86, "end": 573.18, "word": " sample", "probability": 0.85986328125}, {"start": 573.18, "end": 573.62, "word": " has", "probability": 0.93359375}], "temperature": 1.0}, {"id": 22, "seek": 60194, "start": 575.1, "end": 601.94, "text": " All of the students have scores above 80, so that means x equals 1000, so percentage is 1. So 1000 divided by 1000 is 1. So the minimum value of B is 0, the maximum is 1, that means B ranges from 0 to 1. So it could be 0, it could be 1, but almost B is between 0 and 1. B is the proportion.", "tokens": [1057, 295, 264, 1731, 362, 13444, 3673, 4688, 11, 370, 300, 1355, 2031, 6915, 9714, 11, 370, 9668, 307, 502, 13, 407, 9714, 6666, 538, 9714, 307, 502, 13, 407, 264, 7285, 2158, 295, 363, 307, 1958, 11, 264, 6674, 307, 502, 11, 300, 1355, 363, 22526, 490, 1958, 281, 502, 13, 407, 309, 727, 312, 1958, 11, 309, 727, 312, 502, 11, 457, 1920, 363, 307, 1296, 1958, 293, 502, 13, 363, 307, 264, 16068, 13], "avg_logprob": -0.18830128319752523, "compression_ratio": 1.6348314606741574, "no_speech_prob": 0.0, "words": [{"start": 575.1, "end": 575.44, "word": " All", "probability": 0.744140625}, {"start": 575.44, "end": 575.56, "word": " of", "probability": 0.481689453125}, {"start": 575.56, "end": 575.68, "word": " the", "probability": 0.91748046875}, {"start": 575.68, "end": 576.1, "word": " students", "probability": 0.97119140625}, {"start": 576.1, "end": 576.44, "word": " have", "probability": 0.9150390625}, {"start": 576.44, "end": 576.92, "word": " scores", "probability": 0.78076171875}, {"start": 576.92, "end": 577.32, "word": " above", "probability": 0.9609375}, {"start": 577.32, "end": 578.62, "word": " 80,", "probability": 0.6875}, {"start": 579.08, "end": 579.26, "word": " so", "probability": 0.9150390625}, {"start": 579.26, "end": 579.5, "word": " that", "probability": 0.93359375}, {"start": 579.5, "end": 579.84, "word": " means", "probability": 0.92529296875}, {"start": 579.84, "end": 580.6, "word": " x", "probability": 0.580078125}, {"start": 580.6, "end": 581.0, "word": " equals", "probability": 0.48095703125}, {"start": 581.0, "end": 581.52, "word": " 1000,", "probability": 0.5966796875}, {"start": 582.38, "end": 582.64, "word": " so", "probability": 0.9189453125}, {"start": 582.64, "end": 583.12, "word": " percentage", "probability": 0.427978515625}, {"start": 583.12, "end": 583.38, "word": " is", "probability": 0.93701171875}, {"start": 583.38, "end": 583.58, "word": " 1.", "probability": 0.64306640625}, {"start": 583.7, "end": 583.82, "word": " So", "probability": 0.8583984375}, {"start": 583.82, "end": 584.3, "word": " 1000", "probability": 0.716796875}, {"start": 584.3, "end": 584.64, "word": " divided", "probability": 0.72802734375}, {"start": 584.64, "end": 584.88, "word": " by", "probability": 0.97021484375}, {"start": 584.88, "end": 585.28, "word": " 1000", "probability": 0.79638671875}, {"start": 585.28, "end": 585.54, "word": " is", "probability": 0.82080078125}, {"start": 585.54, "end": 585.72, "word": " 1.", "probability": 0.841796875}, {"start": 586.22, "end": 586.44, "word": " So", "probability": 0.908203125}, {"start": 586.44, "end": 586.66, "word": " the", "probability": 0.830078125}, {"start": 586.66, "end": 586.94, "word": " minimum", "probability": 0.96240234375}, {"start": 586.94, "end": 587.36, "word": " value", "probability": 0.970703125}, {"start": 587.36, "end": 587.5, "word": " of", "probability": 0.87109375}, {"start": 587.5, "end": 587.58, "word": " B", "probability": 0.546875}, {"start": 587.58, "end": 587.76, "word": " is", "probability": 0.9501953125}, {"start": 587.76, "end": 588.02, "word": " 0,", "probability": 0.72216796875}, {"start": 589.34, "end": 589.64, "word": " the", "probability": 0.8857421875}, {"start": 589.64, "end": 590.0, "word": " maximum", "probability": 0.93994140625}, {"start": 590.0, "end": 590.22, "word": " is", "probability": 0.9375}, {"start": 590.22, "end": 590.46, "word": " 1,", "probability": 0.94970703125}, {"start": 590.6, "end": 590.78, "word": " that", "probability": 0.82666015625}, {"start": 590.78, "end": 591.14, "word": " means", "probability": 0.93115234375}, {"start": 591.14, "end": 591.4, "word": " B", "probability": 0.8671875}, {"start": 591.4, "end": 591.82, "word": " ranges", "probability": 0.884765625}, {"start": 591.82, "end": 592.16, "word": " from", "probability": 0.88427734375}, {"start": 592.16, "end": 592.42, "word": " 0", "probability": 0.97021484375}, {"start": 592.42, "end": 592.56, "word": " to", "probability": 0.9765625}, {"start": 592.56, "end": 592.78, "word": " 1.", "probability": 0.9970703125}, {"start": 592.88, "end": 593.0, "word": " So", "probability": 0.92626953125}, {"start": 593.0, "end": 593.1, "word": " it", "probability": 0.92529296875}, {"start": 593.1, "end": 593.24, "word": " could", "probability": 0.88623046875}, {"start": 593.24, "end": 593.38, "word": " be", "probability": 0.9462890625}, {"start": 593.38, "end": 593.58, "word": " 0,", "probability": 0.9443359375}, {"start": 593.64, "end": 593.68, "word": " it", "probability": 0.91943359375}, {"start": 593.68, "end": 593.78, "word": " could", "probability": 0.8896484375}, {"start": 593.78, "end": 593.92, "word": " be", "probability": 0.94580078125}, {"start": 593.92, "end": 594.16, "word": " 1,", "probability": 0.9931640625}, {"start": 594.84, "end": 595.12, "word": " but", "probability": 0.92822265625}, {"start": 595.12, "end": 596.04, "word": " almost", "probability": 0.82080078125}, {"start": 596.04, "end": 597.44, "word": " B", "probability": 0.931640625}, {"start": 597.44, "end": 598.36, "word": " is", "probability": 0.93798828125}, {"start": 598.36, "end": 598.64, "word": " between", "probability": 0.88427734375}, {"start": 598.64, "end": 598.86, "word": " 0", "probability": 0.9931640625}, {"start": 598.86, "end": 599.02, "word": " and", "probability": 0.939453125}, {"start": 599.02, "end": 599.22, "word": " 1.", "probability": 0.998046875}, {"start": 600.72, "end": 601.24, "word": " B", "probability": 0.986328125}, {"start": 601.24, "end": 601.4, "word": " is", "probability": 0.935546875}, {"start": 601.4, "end": 601.52, "word": " the", "probability": 0.92041015625}, {"start": 601.52, "end": 601.94, "word": " proportion.", "probability": 0.80908203125}], "temperature": 1.0}, {"id": 23, "seek": 63016, "start": 604.61, "end": 630.17, "text": " So, B is, in this case, is approximately distributed as normal distribution when N is large. But again, how large is large enough, we'll talk about later. So, B is approximately normal distributed when N is large. Here, we assume sampling distribution with replacement if the population is finite.", "tokens": [407, 11, 363, 307, 11, 294, 341, 1389, 11, 307, 10447, 12631, 382, 2710, 7316, 562, 426, 307, 2416, 13, 583, 797, 11, 577, 2416, 307, 2416, 1547, 11, 321, 603, 751, 466, 1780, 13, 407, 11, 363, 307, 10447, 2710, 12631, 562, 426, 307, 2416, 13, 1692, 11, 321, 6552, 21179, 7316, 365, 14419, 498, 264, 4415, 307, 19362, 13], "avg_logprob": -0.19480847375046823, "compression_ratio": 1.7126436781609196, "no_speech_prob": 0.0, "words": [{"start": 604.61, "end": 604.93, "word": " So,", "probability": 0.8623046875}, {"start": 605.09, "end": 605.13, "word": " B", "probability": 0.62353515625}, {"start": 605.13, "end": 605.49, "word": " is,", "probability": 0.6845703125}, {"start": 605.59, "end": 605.71, "word": " in", "probability": 0.94189453125}, {"start": 605.71, "end": 605.91, "word": " this", "probability": 0.94970703125}, {"start": 605.91, "end": 606.21, "word": " case,", "probability": 0.91845703125}, {"start": 606.35, "end": 606.47, "word": " is", "probability": 0.86181640625}, {"start": 606.47, "end": 607.03, "word": " approximately", "probability": 0.802734375}, {"start": 607.03, "end": 607.73, "word": " distributed", "probability": 0.93701171875}, {"start": 607.73, "end": 608.01, "word": " as", "probability": 0.94775390625}, {"start": 608.01, "end": 608.35, "word": " normal", "probability": 0.783203125}, {"start": 608.35, "end": 608.97, "word": " distribution", "probability": 0.85888671875}, {"start": 608.97, "end": 609.79, "word": " when", "probability": 0.783203125}, {"start": 609.79, "end": 610.01, "word": " N", "probability": 0.71044921875}, {"start": 610.01, "end": 610.19, "word": " is", "probability": 0.955078125}, {"start": 610.19, "end": 610.57, "word": " large.", "probability": 0.97021484375}, {"start": 611.39, "end": 611.75, "word": " But", "probability": 0.9345703125}, {"start": 611.75, "end": 612.07, "word": " again,", "probability": 0.79248046875}, {"start": 612.49, "end": 612.69, "word": " how", "probability": 0.91162109375}, {"start": 612.69, "end": 613.05, "word": " large", "probability": 0.97119140625}, {"start": 613.05, "end": 613.21, "word": " is", "probability": 0.93115234375}, {"start": 613.21, "end": 613.47, "word": " large", "probability": 0.94970703125}, {"start": 613.47, "end": 613.93, "word": " enough,", "probability": 0.87548828125}, {"start": 614.67, "end": 615.13, "word": " we'll", "probability": 0.741943359375}, {"start": 615.13, "end": 615.33, "word": " talk", "probability": 0.900390625}, {"start": 615.33, "end": 615.53, "word": " about", "probability": 0.9091796875}, {"start": 615.53, "end": 615.91, "word": " later.", "probability": 0.84912109375}, {"start": 616.57, "end": 616.87, "word": " So,", "probability": 0.9453125}, {"start": 617.41, "end": 617.77, "word": " B", "probability": 0.9853515625}, {"start": 617.77, "end": 617.95, "word": " is", "probability": 0.958984375}, {"start": 617.95, "end": 618.43, "word": " approximately", "probability": 0.87255859375}, {"start": 618.43, "end": 618.83, "word": " normal", "probability": 0.52001953125}, {"start": 618.83, "end": 619.43, "word": " distributed", "probability": 0.7197265625}, {"start": 619.43, "end": 620.07, "word": " when", "probability": 0.90966796875}, {"start": 620.07, "end": 620.25, "word": " N", "probability": 0.98046875}, {"start": 620.25, "end": 620.43, "word": " is", "probability": 0.95166015625}, {"start": 620.43, "end": 620.77, "word": " large.", "probability": 0.9697265625}, {"start": 622.67, "end": 623.07, "word": " Here,", "probability": 0.85302734375}, {"start": 623.53, "end": 624.07, "word": " we", "probability": 0.6552734375}, {"start": 624.07, "end": 624.63, "word": " assume", "probability": 0.8935546875}, {"start": 624.63, "end": 625.53, "word": " sampling", "probability": 0.5478515625}, {"start": 625.53, "end": 626.29, "word": " distribution", "probability": 0.87353515625}, {"start": 626.29, "end": 626.63, "word": " with", "probability": 0.90673828125}, {"start": 626.63, "end": 627.11, "word": " replacement", "probability": 0.70263671875}, {"start": 627.11, "end": 629.07, "word": " if", "probability": 0.68603515625}, {"start": 629.07, "end": 629.23, "word": " the", "probability": 0.91748046875}, {"start": 629.23, "end": 629.53, "word": " population", "probability": 0.9833984375}, {"start": 629.53, "end": 629.83, "word": " is", "probability": 0.94140625}, {"start": 629.83, "end": 630.17, "word": " finite.", "probability": 0.98583984375}], "temperature": 1.0}, {"id": 24, "seek": 65554, "start": 631.56, "end": 655.54, "text": " And sampling without replacement from infinite population. Finite population means limited, has fixed size. Infinite population means unlimited. Because if we select a random sample without replacement from infinite population, for example suppose N equals 10,000.", "tokens": [400, 21179, 1553, 14419, 490, 13785, 4415, 13, 3773, 642, 4415, 1355, 5567, 11, 575, 6806, 2744, 13, 43368, 4415, 1355, 21950, 13, 1436, 498, 321, 3048, 257, 4974, 6889, 1553, 14419, 490, 13785, 4415, 11, 337, 1365, 7297, 426, 6915, 1266, 11, 1360, 13], "avg_logprob": -0.2629076203574305, "compression_ratio": 1.7666666666666666, "no_speech_prob": 0.0, "words": [{"start": 631.56, "end": 631.96, "word": " And", "probability": 0.383056640625}, {"start": 631.96, "end": 632.26, "word": " sampling", "probability": 0.18603515625}, {"start": 632.26, "end": 632.66, "word": " without", "probability": 0.89111328125}, {"start": 632.66, "end": 633.18, "word": " replacement", "probability": 0.814453125}, {"start": 633.18, "end": 633.62, "word": " from", "probability": 0.833984375}, {"start": 633.62, "end": 634.1, "word": " infinite", "probability": 0.826171875}, {"start": 634.1, "end": 634.64, "word": " population.", "probability": 0.873046875}, {"start": 635.22, "end": 635.58, "word": " Finite", "probability": 0.80078125}, {"start": 635.58, "end": 636.04, "word": " population", "probability": 0.93505859375}, {"start": 636.04, "end": 636.5, "word": " means", "probability": 0.90625}, {"start": 636.5, "end": 637.46, "word": " limited,", "probability": 0.802734375}, {"start": 638.12, "end": 638.9, "word": " has", "probability": 0.83447265625}, {"start": 638.9, "end": 639.26, "word": " fixed", "probability": 0.85546875}, {"start": 639.26, "end": 639.78, "word": " size.", "probability": 0.84814453125}, {"start": 640.6, "end": 641.52, "word": " Infinite", "probability": 0.8466796875}, {"start": 641.52, "end": 642.22, "word": " population", "probability": 0.939453125}, {"start": 642.22, "end": 642.88, "word": " means", "probability": 0.9326171875}, {"start": 642.88, "end": 644.3, "word": " unlimited.", "probability": 0.91943359375}, {"start": 645.4, "end": 645.92, "word": " Because", "probability": 0.89111328125}, {"start": 645.92, "end": 646.28, "word": " if", "probability": 0.88134765625}, {"start": 646.28, "end": 646.46, "word": " we", "probability": 0.94287109375}, {"start": 646.46, "end": 646.78, "word": " select", "probability": 0.8349609375}, {"start": 646.78, "end": 646.94, "word": " a", "probability": 0.454345703125}, {"start": 646.94, "end": 647.14, "word": " random", "probability": 0.94189453125}, {"start": 647.14, "end": 647.6, "word": " sample", "probability": 0.91650390625}, {"start": 647.6, "end": 648.26, "word": " without", "probability": 0.8701171875}, {"start": 648.26, "end": 648.8, "word": " replacement", "probability": 0.7958984375}, {"start": 648.8, "end": 649.16, "word": " from", "probability": 0.87548828125}, {"start": 649.16, "end": 649.56, "word": " infinite", "probability": 0.8857421875}, {"start": 649.56, "end": 650.22, "word": " population,", "probability": 0.93408203125}, {"start": 652.5, "end": 652.82, "word": " for", "probability": 0.8916015625}, {"start": 652.82, "end": 653.12, "word": " example", "probability": 0.97216796875}, {"start": 653.12, "end": 653.56, "word": " suppose", "probability": 0.67822265625}, {"start": 653.56, "end": 653.76, "word": " N", "probability": 0.5478515625}, {"start": 653.76, "end": 654.12, "word": " equals", "probability": 0.2099609375}, {"start": 654.12, "end": 655.1, "word": " 10", "probability": 0.51416015625}, {"start": 655.1, "end": 655.54, "word": ",000.", "probability": 0.817138671875}], "temperature": 1.0}, {"id": 25, "seek": 68620, "start": 657.3, "end": 686.2, "text": " In this case, each person has a chance of being selected one over 10,000. Let's assume that we select someone without replacement. So in this case, if we select one with a proportion of one over 10,000, it means the second one has a chance of one divided by 9999. And the difference between these two is very small.", "tokens": [682, 341, 1389, 11, 1184, 954, 575, 257, 2931, 295, 885, 8209, 472, 670, 1266, 11, 1360, 13, 961, 311, 6552, 300, 321, 3048, 1580, 1553, 14419, 13, 407, 294, 341, 1389, 11, 498, 321, 3048, 472, 365, 257, 16068, 295, 472, 670, 1266, 11, 1360, 11, 309, 1355, 264, 1150, 472, 575, 257, 2931, 295, 472, 6666, 538, 1722, 49017, 13, 400, 264, 2649, 1296, 613, 732, 307, 588, 1359, 13], "avg_logprob": -0.15004280168716222, "compression_ratio": 1.6373056994818653, "no_speech_prob": 0.0, "words": [{"start": 657.3, "end": 657.54, "word": " In", "probability": 0.84326171875}, {"start": 657.54, "end": 657.78, "word": " this", "probability": 0.9453125}, {"start": 657.78, "end": 658.18, "word": " case,", "probability": 0.9150390625}, {"start": 658.38, "end": 658.68, "word": " each", "probability": 0.94775390625}, {"start": 658.68, "end": 659.3, "word": " person", "probability": 0.91748046875}, {"start": 659.3, "end": 659.7, "word": " has", "probability": 0.9345703125}, {"start": 659.7, "end": 659.86, "word": " a", "probability": 0.9462890625}, {"start": 659.86, "end": 660.18, "word": " chance", "probability": 0.97119140625}, {"start": 660.18, "end": 660.32, "word": " of", "probability": 0.958984375}, {"start": 660.32, "end": 660.48, "word": " being", "probability": 0.94384765625}, {"start": 660.48, "end": 661.0, "word": " selected", "probability": 0.87841796875}, {"start": 661.0, "end": 661.38, "word": " one", "probability": 0.5537109375}, {"start": 661.38, "end": 662.42, "word": " over", "probability": 0.72509765625}, {"start": 662.42, "end": 662.78, "word": " 10", "probability": 0.58935546875}, {"start": 662.78, "end": 663.1, "word": ",000.", "probability": 0.94482421875}, {"start": 664.48, "end": 664.84, "word": " Let's", "probability": 0.94580078125}, {"start": 664.84, "end": 665.08, "word": " assume", "probability": 0.91064453125}, {"start": 665.08, "end": 665.28, "word": " that", "probability": 0.904296875}, {"start": 665.28, "end": 665.46, "word": " we", "probability": 0.9482421875}, {"start": 665.46, "end": 665.96, "word": " select", "probability": 0.8359375}, {"start": 665.96, "end": 667.26, "word": " someone", "probability": 0.461669921875}, {"start": 667.26, "end": 668.1, "word": " without", "probability": 0.91015625}, {"start": 668.1, "end": 669.12, "word": " replacement.", "probability": 0.64013671875}, {"start": 669.92, "end": 670.3, "word": " So", "probability": 0.93359375}, {"start": 670.3, "end": 670.54, "word": " in", "probability": 0.69189453125}, {"start": 670.54, "end": 670.76, "word": " this", "probability": 0.94677734375}, {"start": 670.76, "end": 671.06, "word": " case,", "probability": 0.91552734375}, {"start": 671.14, "end": 671.24, "word": " if", "probability": 0.91943359375}, {"start": 671.24, "end": 671.34, "word": " we", "probability": 0.57666015625}, {"start": 671.34, "end": 671.66, "word": " select", "probability": 0.859375}, {"start": 671.66, "end": 671.92, "word": " one", "probability": 0.908203125}, {"start": 671.92, "end": 672.18, "word": " with", "probability": 0.8701171875}, {"start": 672.18, "end": 673.2, "word": " a", "probability": 0.77734375}, {"start": 673.2, "end": 673.58, "word": " proportion", "probability": 0.806640625}, {"start": 673.58, "end": 673.82, "word": " of", "probability": 0.93701171875}, {"start": 673.82, "end": 674.0, "word": " one", "probability": 0.7236328125}, {"start": 674.0, "end": 674.26, "word": " over", "probability": 0.90966796875}, {"start": 674.26, "end": 674.58, "word": " 10", "probability": 0.91259765625}, {"start": 674.58, "end": 674.98, "word": ",000,", "probability": 0.994873046875}, {"start": 675.42, "end": 675.6, "word": " it", "probability": 0.921875}, {"start": 675.6, "end": 675.82, "word": " means", "probability": 0.931640625}, {"start": 675.82, "end": 676.0, "word": " the", "probability": 0.88720703125}, {"start": 676.0, "end": 676.32, "word": " second", "probability": 0.892578125}, {"start": 676.32, "end": 676.72, "word": " one", "probability": 0.93359375}, {"start": 676.72, "end": 677.34, "word": " has", "probability": 0.91259765625}, {"start": 677.34, "end": 677.5, "word": " a", "probability": 0.98486328125}, {"start": 677.5, "end": 677.82, "word": " chance", "probability": 0.97412109375}, {"start": 677.82, "end": 678.02, "word": " of", "probability": 0.9599609375}, {"start": 678.02, "end": 678.22, "word": " one", "probability": 0.8203125}, {"start": 678.22, "end": 678.6, "word": " divided", "probability": 0.69091796875}, {"start": 678.6, "end": 679.32, "word": " by", "probability": 0.9697265625}, {"start": 679.32, "end": 682.34, "word": " 9999.", "probability": 0.83740234375}, {"start": 683.0, "end": 683.42, "word": " And", "probability": 0.93798828125}, {"start": 683.42, "end": 683.58, "word": " the", "probability": 0.9140625}, {"start": 683.58, "end": 683.92, "word": " difference", "probability": 0.86474609375}, {"start": 683.92, "end": 684.3, "word": " between", "probability": 0.8681640625}, {"start": 684.3, "end": 684.58, "word": " these", "probability": 0.85498046875}, {"start": 684.58, "end": 684.9, "word": " two", "probability": 0.93408203125}, {"start": 684.9, "end": 685.56, "word": " is", "probability": 0.9453125}, {"start": 685.56, "end": 685.84, "word": " very", "probability": 0.84765625}, {"start": 685.84, "end": 686.2, "word": " small.", "probability": 0.92724609375}], "temperature": 1.0}, {"id": 26, "seek": 71197, "start": 688.03, "end": 711.97, "text": " For this reason, we can select a random sample without replacement if the population size is large, because the probability to select an item from that population remains the same, or approximately the same, because 1 over 10,000 is roughly equal to 1 over 9999.", "tokens": [1171, 341, 1778, 11, 321, 393, 3048, 257, 4974, 6889, 1553, 14419, 498, 264, 4415, 2744, 307, 2416, 11, 570, 264, 8482, 281, 3048, 364, 3174, 490, 300, 4415, 7023, 264, 912, 11, 420, 10447, 264, 912, 11, 570, 502, 670, 1266, 11, 1360, 307, 9810, 2681, 281, 502, 670, 1722, 49017, 13], "avg_logprob": -0.1775173638705854, "compression_ratio": 1.5380116959064327, "no_speech_prob": 0.0, "words": [{"start": 688.03, "end": 688.25, "word": " For", "probability": 0.76904296875}, {"start": 688.25, "end": 688.49, "word": " this", "probability": 0.9345703125}, {"start": 688.49, "end": 688.85, "word": " reason,", "probability": 0.96826171875}, {"start": 688.97, "end": 689.11, "word": " we", "probability": 0.95068359375}, {"start": 689.11, "end": 689.41, "word": " can", "probability": 0.94287109375}, {"start": 689.41, "end": 689.81, "word": " select", "probability": 0.814453125}, {"start": 689.81, "end": 690.01, "word": " a", "probability": 0.98583984375}, {"start": 690.01, "end": 690.25, "word": " random", "probability": 0.84619140625}, {"start": 690.25, "end": 690.59, "word": " sample", "probability": 0.9365234375}, {"start": 690.59, "end": 691.01, "word": " without", "probability": 0.880859375}, {"start": 691.01, "end": 691.97, "word": " replacement", "probability": 0.482177734375}, {"start": 691.97, "end": 693.39, "word": " if", "probability": 0.88037109375}, {"start": 693.39, "end": 693.57, "word": " the", "probability": 0.9091796875}, {"start": 693.57, "end": 694.01, "word": " population", "probability": 0.9375}, {"start": 694.01, "end": 694.85, "word": " size", "probability": 0.7978515625}, {"start": 694.85, "end": 695.71, "word": " is", "probability": 0.9453125}, {"start": 695.71, "end": 696.19, "word": " large,", "probability": 0.96142578125}, {"start": 696.85, "end": 697.37, "word": " because", "probability": 0.88916015625}, {"start": 697.37, "end": 697.73, "word": " the", "probability": 0.9091796875}, {"start": 697.73, "end": 698.35, "word": " probability", "probability": 0.9462890625}, {"start": 698.35, "end": 698.71, "word": " to", "probability": 0.6689453125}, {"start": 698.71, "end": 700.55, "word": " select", "probability": 0.81982421875}, {"start": 700.55, "end": 701.31, "word": " an", "probability": 0.9501953125}, {"start": 701.31, "end": 701.67, "word": " item", "probability": 0.96533203125}, {"start": 701.67, "end": 701.93, "word": " from", "probability": 0.86328125}, {"start": 701.93, "end": 702.13, "word": " that", "probability": 0.9208984375}, {"start": 702.13, "end": 702.75, "word": " population", "probability": 0.9208984375}, {"start": 702.75, "end": 704.39, "word": " remains", "probability": 0.84033203125}, {"start": 704.39, "end": 704.73, "word": " the", "probability": 0.63232421875}, {"start": 704.73, "end": 705.05, "word": " same,", "probability": 0.9150390625}, {"start": 705.37, "end": 705.61, "word": " or", "probability": 0.5390625}, {"start": 705.61, "end": 706.11, "word": " approximately", "probability": 0.88232421875}, {"start": 706.11, "end": 706.37, "word": " the", "probability": 0.91064453125}, {"start": 706.37, "end": 706.49, "word": " same,", "probability": 0.9091796875}, {"start": 706.55, "end": 706.79, "word": " because", "probability": 0.89990234375}, {"start": 706.79, "end": 707.07, "word": " 1", "probability": 0.72216796875}, {"start": 707.07, "end": 707.25, "word": " over", "probability": 0.61962890625}, {"start": 707.25, "end": 707.53, "word": " 10", "probability": 0.81591796875}, {"start": 707.53, "end": 707.89, "word": ",000", "probability": 0.9384765625}, {"start": 707.89, "end": 708.49, "word": " is", "probability": 0.94287109375}, {"start": 708.49, "end": 708.83, "word": " roughly", "probability": 0.85498046875}, {"start": 708.83, "end": 709.29, "word": " equal", "probability": 0.88916015625}, {"start": 709.29, "end": 709.77, "word": " to", "probability": 0.73974609375}, {"start": 709.77, "end": 709.91, "word": " 1", "probability": 0.99072265625}, {"start": 709.91, "end": 710.15, "word": " over", "probability": 0.91015625}, {"start": 710.15, "end": 711.97, "word": " 9999.", "probability": 0.765869140625}], "temperature": 1.0}, {"id": 27, "seek": 74108, "start": 714.06, "end": 741.08, "text": " Now the two conditions we have to check in order to apply or in order to say that B is approximately normally distributed. The two conditions are n times pi is at least 5 and we have n, pi is given. Also, n times 1 minus pi is at least 5. So there are two conditions should be satisfied in order to use", "tokens": [823, 264, 732, 4487, 321, 362, 281, 1520, 294, 1668, 281, 3079, 420, 294, 1668, 281, 584, 300, 363, 307, 10447, 5646, 12631, 13, 440, 732, 4487, 366, 297, 1413, 3895, 307, 412, 1935, 1025, 293, 321, 362, 297, 11, 3895, 307, 2212, 13, 2743, 11, 297, 1413, 502, 3175, 3895, 307, 412, 1935, 1025, 13, 407, 456, 366, 732, 4487, 820, 312, 11239, 294, 1668, 281, 764], "avg_logprob": -0.17278080401213272, "compression_ratio": 1.7514450867052023, "no_speech_prob": 0.0, "words": [{"start": 714.06, "end": 714.36, "word": " Now", "probability": 0.87255859375}, {"start": 714.36, "end": 714.52, "word": " the", "probability": 0.49169921875}, {"start": 714.52, "end": 714.68, "word": " two", "probability": 0.86669921875}, {"start": 714.68, "end": 715.14, "word": " conditions", "probability": 0.88427734375}, {"start": 715.14, "end": 715.3, "word": " we", "probability": 0.89501953125}, {"start": 715.3, "end": 715.52, "word": " have", "probability": 0.9423828125}, {"start": 715.52, "end": 715.82, "word": " to", "probability": 0.97216796875}, {"start": 715.82, "end": 716.64, "word": " check", "probability": 0.91748046875}, {"start": 716.64, "end": 717.22, "word": " in", "probability": 0.89599609375}, {"start": 717.22, "end": 717.44, "word": " order", "probability": 0.921875}, {"start": 717.44, "end": 717.78, "word": " to", "probability": 0.97119140625}, {"start": 717.78, "end": 718.6, "word": " apply", "probability": 0.83740234375}, {"start": 718.6, "end": 719.58, "word": " or", "probability": 0.53076171875}, {"start": 719.58, "end": 720.08, "word": " in", "probability": 0.82275390625}, {"start": 720.08, "end": 720.28, "word": " order", "probability": 0.93505859375}, {"start": 720.28, "end": 720.5, "word": " to", "probability": 0.96484375}, {"start": 720.5, "end": 720.7, "word": " say", "probability": 0.931640625}, {"start": 720.7, "end": 721.08, "word": " that", "probability": 0.94189453125}, {"start": 721.08, "end": 721.7, "word": " B", "probability": 0.58740234375}, {"start": 721.7, "end": 721.9, "word": " is", "probability": 0.9541015625}, {"start": 721.9, "end": 722.44, "word": " approximately", "probability": 0.8310546875}, {"start": 722.44, "end": 722.9, "word": " normally", "probability": 0.86279296875}, {"start": 722.9, "end": 723.5, "word": " distributed.", "probability": 0.90576171875}, {"start": 724.34, "end": 724.56, "word": " The", "probability": 0.8857421875}, {"start": 724.56, "end": 724.74, "word": " two", "probability": 0.91796875}, {"start": 724.74, "end": 725.24, "word": " conditions", "probability": 0.87646484375}, {"start": 725.24, "end": 725.72, "word": " are", "probability": 0.939453125}, {"start": 725.72, "end": 726.72, "word": " n", "probability": 0.447265625}, {"start": 726.72, "end": 727.0, "word": " times", "probability": 0.81298828125}, {"start": 727.0, "end": 727.36, "word": " pi", "probability": 0.83642578125}, {"start": 727.36, "end": 728.26, "word": " is", "probability": 0.9130859375}, {"start": 728.26, "end": 728.42, "word": " at", "probability": 0.97412109375}, {"start": 728.42, "end": 728.62, "word": " least", "probability": 0.953125}, {"start": 728.62, "end": 729.0, "word": " 5", "probability": 0.4189453125}, {"start": 729.0, "end": 729.86, "word": " and", "probability": 0.56689453125}, {"start": 729.86, "end": 730.02, "word": " we", "probability": 0.92919921875}, {"start": 730.02, "end": 730.18, "word": " have", "probability": 0.947265625}, {"start": 730.18, "end": 730.44, "word": " n,", "probability": 0.9208984375}, {"start": 730.96, "end": 731.3, "word": " pi", "probability": 0.8896484375}, {"start": 731.3, "end": 731.48, "word": " is", "probability": 0.943359375}, {"start": 731.48, "end": 731.74, "word": " given.", "probability": 0.89306640625}, {"start": 733.08, "end": 733.64, "word": " Also,", "probability": 0.93798828125}, {"start": 733.88, "end": 734.34, "word": " n", "probability": 0.97265625}, {"start": 734.34, "end": 734.64, "word": " times", "probability": 0.92578125}, {"start": 734.64, "end": 734.88, "word": " 1", "probability": 0.845703125}, {"start": 734.88, "end": 735.18, "word": " minus", "probability": 0.9619140625}, {"start": 735.18, "end": 735.5, "word": " pi", "probability": 0.9423828125}, {"start": 735.5, "end": 735.66, "word": " is", "probability": 0.939453125}, {"start": 735.66, "end": 735.82, "word": " at", "probability": 0.96630859375}, {"start": 735.82, "end": 736.0, "word": " least", "probability": 0.96142578125}, {"start": 736.0, "end": 736.3, "word": " 5.", "probability": 0.94873046875}, {"start": 736.62, "end": 736.84, "word": " So", "probability": 0.943359375}, {"start": 736.84, "end": 737.02, "word": " there", "probability": 0.68310546875}, {"start": 737.02, "end": 737.2, "word": " are", "probability": 0.93896484375}, {"start": 737.2, "end": 737.38, "word": " two", "probability": 0.92529296875}, {"start": 737.38, "end": 738.02, "word": " conditions", "probability": 0.87255859375}, {"start": 738.02, "end": 739.16, "word": " should", "probability": 0.771484375}, {"start": 739.16, "end": 739.38, "word": " be", "probability": 0.939453125}, {"start": 739.38, "end": 739.88, "word": " satisfied", "probability": 0.88037109375}, {"start": 739.88, "end": 740.24, "word": " in", "probability": 0.93408203125}, {"start": 740.24, "end": 740.46, "word": " order", "probability": 0.923828125}, {"start": 740.46, "end": 740.72, "word": " to", "probability": 0.966796875}, {"start": 740.72, "end": 741.08, "word": " use", "probability": 0.875}], "temperature": 1.0}, {"id": 28, "seek": 77028, "start": 742.38, "end": 770.28, "text": " the normal distribution. Again, the first one, n times pi is at least 5, n times 1 minus pi is also at least 5. If these two conditions are satisfied, then you can say that B, or the sample proportion, is approximately normally distributed, so that's the shape of the distribution, with mean of?", "tokens": [264, 2710, 7316, 13, 3764, 11, 264, 700, 472, 11, 297, 1413, 3895, 307, 412, 1935, 1025, 11, 297, 1413, 502, 3175, 3895, 307, 611, 412, 1935, 1025, 13, 759, 613, 732, 4487, 366, 11239, 11, 550, 291, 393, 584, 300, 363, 11, 420, 264, 6889, 16068, 11, 307, 10447, 5646, 12631, 11, 370, 300, 311, 264, 3909, 295, 264, 7316, 11, 365, 914, 295, 30], "avg_logprob": -0.21723413645331538, "compression_ratio": 1.6263736263736264, "no_speech_prob": 0.0, "words": [{"start": 742.38, "end": 742.76, "word": " the", "probability": 0.2044677734375}, {"start": 742.76, "end": 743.32, "word": " normal", "probability": 0.86962890625}, {"start": 743.32, "end": 744.26, "word": " distribution.", "probability": 0.81640625}, {"start": 744.9, "end": 745.22, "word": " Again,", "probability": 0.86572265625}, {"start": 745.32, "end": 745.42, "word": " the", "probability": 0.87255859375}, {"start": 745.42, "end": 745.68, "word": " first", "probability": 0.873046875}, {"start": 745.68, "end": 746.02, "word": " one,", "probability": 0.91552734375}, {"start": 746.62, "end": 746.78, "word": " n", "probability": 0.60595703125}, {"start": 746.78, "end": 747.06, "word": " times", "probability": 0.79150390625}, {"start": 747.06, "end": 747.44, "word": " pi", "probability": 0.75439453125}, {"start": 747.44, "end": 749.22, "word": " is", "probability": 0.478515625}, {"start": 749.22, "end": 749.34, "word": " at", "probability": 0.95068359375}, {"start": 749.34, "end": 749.56, "word": " least", "probability": 0.97021484375}, {"start": 749.56, "end": 749.92, "word": " 5,", "probability": 0.60302734375}, {"start": 750.84, "end": 751.2, "word": " n", "probability": 0.84912109375}, {"start": 751.2, "end": 751.58, "word": " times", "probability": 0.92822265625}, {"start": 751.58, "end": 752.7, "word": " 1", "probability": 0.8916015625}, {"start": 752.7, "end": 752.98, "word": " minus", "probability": 0.96484375}, {"start": 752.98, "end": 753.16, "word": " pi", "probability": 0.9169921875}, {"start": 753.16, "end": 753.32, "word": " is", "probability": 0.931640625}, {"start": 753.32, "end": 753.8, "word": " also", "probability": 0.87646484375}, {"start": 753.8, "end": 754.14, "word": " at", "probability": 0.95263671875}, {"start": 754.14, "end": 754.32, "word": " least", "probability": 0.95947265625}, {"start": 754.32, "end": 754.6, "word": " 5.", "probability": 0.9326171875}, {"start": 755.7, "end": 756.42, "word": " If", "probability": 0.95703125}, {"start": 756.42, "end": 756.7, "word": " these", "probability": 0.8564453125}, {"start": 756.7, "end": 756.88, "word": " two", "probability": 0.83642578125}, {"start": 756.88, "end": 757.38, "word": " conditions", "probability": 0.89013671875}, {"start": 757.38, "end": 757.76, "word": " are", "probability": 0.9453125}, {"start": 757.76, "end": 758.42, "word": " satisfied,", "probability": 0.89697265625}, {"start": 758.82, "end": 759.24, "word": " then", "probability": 0.85205078125}, {"start": 759.24, "end": 759.6, "word": " you", "probability": 0.9501953125}, {"start": 759.6, "end": 759.88, "word": " can", "probability": 0.94140625}, {"start": 759.88, "end": 760.12, "word": " say", "probability": 0.89599609375}, {"start": 760.12, "end": 760.56, "word": " that", "probability": 0.9443359375}, {"start": 760.56, "end": 761.6, "word": " B,", "probability": 0.71240234375}, {"start": 761.78, "end": 762.04, "word": " or", "probability": 0.95166015625}, {"start": 762.04, "end": 762.18, "word": " the", "probability": 0.91455078125}, {"start": 762.18, "end": 762.42, "word": " sample", "probability": 0.46240234375}, {"start": 762.42, "end": 762.92, "word": " proportion,", "probability": 0.81103515625}, {"start": 763.62, "end": 763.9, "word": " is", "probability": 0.953125}, {"start": 763.9, "end": 764.52, "word": " approximately", "probability": 0.8916015625}, {"start": 764.52, "end": 765.08, "word": " normally", "probability": 0.87109375}, {"start": 765.08, "end": 765.78, "word": " distributed,", "probability": 0.9111328125}, {"start": 766.02, "end": 766.16, "word": " so", "probability": 0.93408203125}, {"start": 766.16, "end": 766.42, "word": " that's", "probability": 0.930419921875}, {"start": 766.42, "end": 766.62, "word": " the", "probability": 0.91943359375}, {"start": 766.62, "end": 767.0, "word": " shape", "probability": 0.9140625}, {"start": 767.0, "end": 767.62, "word": " of", "probability": 0.96533203125}, {"start": 767.62, "end": 767.78, "word": " the", "probability": 0.8486328125}, {"start": 767.78, "end": 768.28, "word": " distribution,", "probability": 0.85791015625}, {"start": 769.32, "end": 769.6, "word": " with", "probability": 0.91748046875}, {"start": 769.6, "end": 769.86, "word": " mean", "probability": 0.9609375}, {"start": 769.86, "end": 770.28, "word": " of?", "probability": 0.97412109375}], "temperature": 1.0}, {"id": 29, "seek": 80086, "start": 771.98, "end": 800.86, "text": " Mu of p equals pi, so the mean of p equals pi, with sigma of p equals square root of pi times 1 minus pi, divided by n. So that's the mean of p is always pi, and sigma of p equals square root of pi times 1 minus pi divided by n. Let's compare this result.", "tokens": [15601, 295, 280, 6915, 3895, 11, 370, 264, 914, 295, 280, 6915, 3895, 11, 365, 12771, 295, 280, 6915, 3732, 5593, 295, 3895, 1413, 502, 3175, 3895, 11, 6666, 538, 297, 13, 407, 300, 311, 264, 914, 295, 280, 307, 1009, 3895, 11, 293, 12771, 295, 280, 6915, 3732, 5593, 295, 3895, 1413, 502, 3175, 3895, 6666, 538, 297, 13, 961, 311, 6794, 341, 1874, 13], "avg_logprob": -0.20009328358208955, "compression_ratio": 2.0, "no_speech_prob": 0.0, "words": [{"start": 771.98, "end": 772.36, "word": " Mu", "probability": 0.411376953125}, {"start": 772.36, "end": 772.64, "word": " of", "probability": 0.89990234375}, {"start": 772.64, "end": 772.76, "word": " p", "probability": 0.27392578125}, {"start": 772.76, "end": 773.1, "word": " equals", "probability": 0.64794921875}, {"start": 773.1, "end": 773.56, "word": " pi,", "probability": 0.82568359375}, {"start": 774.52, "end": 774.9, "word": " so", "probability": 0.92626953125}, {"start": 774.9, "end": 775.4, "word": " the", "probability": 0.8720703125}, {"start": 775.4, "end": 775.64, "word": " mean", "probability": 0.974609375}, {"start": 775.64, "end": 776.02, "word": " of", "probability": 0.9580078125}, {"start": 776.02, "end": 776.18, "word": " p", "probability": 0.84228515625}, {"start": 776.18, "end": 776.5, "word": " equals", "probability": 0.85595703125}, {"start": 776.5, "end": 776.96, "word": " pi,", "probability": 0.94873046875}, {"start": 777.52, "end": 777.86, "word": " with", "probability": 0.84228515625}, {"start": 777.86, "end": 778.24, "word": " sigma", "probability": 0.90673828125}, {"start": 778.24, "end": 778.54, "word": " of", "probability": 0.951171875}, {"start": 778.54, "end": 778.74, "word": " p", "probability": 0.83154296875}, {"start": 778.74, "end": 779.18, "word": " equals", "probability": 0.92919921875}, {"start": 779.18, "end": 779.52, "word": " square", "probability": 0.80078125}, {"start": 779.52, "end": 779.88, "word": " root", "probability": 0.93212890625}, {"start": 779.88, "end": 784.1, "word": " of", "probability": 0.68798828125}, {"start": 784.1, "end": 784.4, "word": " pi", "probability": 0.94677734375}, {"start": 784.4, "end": 784.78, "word": " times", "probability": 0.88818359375}, {"start": 784.78, "end": 784.98, "word": " 1", "probability": 0.77099609375}, {"start": 784.98, "end": 785.22, "word": " minus", "probability": 0.92138671875}, {"start": 785.22, "end": 785.56, "word": " pi,", "probability": 0.95751953125}, {"start": 785.74, "end": 785.98, "word": " divided", "probability": 0.798828125}, {"start": 785.98, "end": 786.18, "word": " by", "probability": 0.9697265625}, {"start": 786.18, "end": 786.42, "word": " n.", "probability": 0.76611328125}, {"start": 787.66, "end": 788.32, "word": " So", "probability": 0.95068359375}, {"start": 788.32, "end": 788.9, "word": " that's", "probability": 0.804443359375}, {"start": 788.9, "end": 789.16, "word": " the", "probability": 0.52490234375}, {"start": 789.16, "end": 789.32, "word": " mean", "probability": 0.947265625}, {"start": 789.32, "end": 789.46, "word": " of", "probability": 0.9658203125}, {"start": 789.46, "end": 789.72, "word": " p", "probability": 0.751953125}, {"start": 789.72, "end": 790.4, "word": " is", "probability": 0.7939453125}, {"start": 790.4, "end": 790.94, "word": " always", "probability": 0.8896484375}, {"start": 790.94, "end": 791.62, "word": " pi,", "probability": 0.88623046875}, {"start": 792.38, "end": 792.78, "word": " and", "probability": 0.89453125}, {"start": 792.78, "end": 793.08, "word": " sigma", "probability": 0.92138671875}, {"start": 793.08, "end": 793.32, "word": " of", "probability": 0.92822265625}, {"start": 793.32, "end": 793.6, "word": " p", "probability": 0.66943359375}, {"start": 793.6, "end": 794.12, "word": " equals", "probability": 0.93701171875}, {"start": 794.12, "end": 794.42, "word": " square", "probability": 0.81787109375}, {"start": 794.42, "end": 794.64, "word": " root", "probability": 0.91845703125}, {"start": 794.64, "end": 794.86, "word": " of", "probability": 0.96484375}, {"start": 794.86, "end": 795.12, "word": " pi", "probability": 0.95361328125}, {"start": 795.12, "end": 795.48, "word": " times", "probability": 0.908203125}, {"start": 795.48, "end": 795.7, "word": " 1", "probability": 0.9462890625}, {"start": 795.7, "end": 796.02, "word": " minus", "probability": 0.9755859375}, {"start": 796.02, "end": 796.28, "word": " pi", "probability": 0.95263671875}, {"start": 796.28, "end": 796.5, "word": " divided", "probability": 0.7177734375}, {"start": 796.5, "end": 796.74, "word": " by", "probability": 0.97021484375}, {"start": 796.74, "end": 796.94, "word": " n.", "probability": 0.97705078125}, {"start": 798.48, "end": 799.08, "word": " Let's", "probability": 0.96142578125}, {"start": 799.08, "end": 799.48, "word": " compare", "probability": 0.96044921875}, {"start": 799.48, "end": 800.26, "word": " this", "probability": 0.935546875}, {"start": 800.26, "end": 800.86, "word": " result.", "probability": 0.95263671875}], "temperature": 1.0}, {"id": 30, "seek": 83205, "start": 802.83, "end": 832.05, "text": " with the sampling distribution of the sample mean. If you remember, the mean of x bar was mu. And here, the statistic is the sample mean, and the mean of x bar is mu. On the other hand, the mean of the statistic, my statistic is the sample proportion is five. So in the two cases, the mean equals the true value.", "tokens": [365, 264, 21179, 7316, 295, 264, 6889, 914, 13, 759, 291, 1604, 11, 264, 914, 295, 2031, 2159, 390, 2992, 13, 400, 510, 11, 264, 29588, 307, 264, 6889, 914, 11, 293, 264, 914, 295, 2031, 2159, 307, 2992, 13, 1282, 264, 661, 1011, 11, 264, 914, 295, 264, 29588, 11, 452, 29588, 307, 264, 6889, 16068, 307, 1732, 13, 407, 294, 264, 732, 3331, 11, 264, 914, 6915, 264, 2074, 2158, 13], "avg_logprob": -0.1993243283516652, "compression_ratio": 1.9202453987730062, "no_speech_prob": 0.0, "words": [{"start": 802.83, "end": 803.35, "word": " with", "probability": 0.414306640625}, {"start": 803.35, "end": 803.61, "word": " the", "probability": 0.422119140625}, {"start": 803.61, "end": 803.81, "word": " sampling", "probability": 0.5048828125}, {"start": 803.81, "end": 804.51, "word": " distribution", "probability": 0.8603515625}, {"start": 804.51, "end": 805.39, "word": " of", "probability": 0.94677734375}, {"start": 805.39, "end": 805.57, "word": " the", "probability": 0.9072265625}, {"start": 805.57, "end": 805.85, "word": " sample", "probability": 0.84912109375}, {"start": 805.85, "end": 806.09, "word": " mean.", "probability": 0.9169921875}, {"start": 807.23, "end": 807.61, "word": " If", "probability": 0.8828125}, {"start": 807.61, "end": 807.65, "word": " you", "probability": 0.96044921875}, {"start": 807.65, "end": 808.03, "word": " remember,", "probability": 0.87841796875}, {"start": 808.61, "end": 808.85, "word": " the", "probability": 0.91650390625}, {"start": 808.85, "end": 809.03, "word": " mean", "probability": 0.96728515625}, {"start": 809.03, "end": 810.85, "word": " of", "probability": 0.96044921875}, {"start": 810.85, "end": 811.07, "word": " x", "probability": 0.41064453125}, {"start": 811.07, "end": 811.29, "word": " bar", "probability": 0.7705078125}, {"start": 811.29, "end": 811.59, "word": " was", "probability": 0.8955078125}, {"start": 811.59, "end": 811.87, "word": " mu.", "probability": 0.53955078125}, {"start": 812.87, "end": 813.43, "word": " And", "probability": 0.86376953125}, {"start": 813.43, "end": 813.75, "word": " here,", "probability": 0.85595703125}, {"start": 815.37, "end": 815.63, "word": " the", "probability": 0.818359375}, {"start": 815.63, "end": 816.99, "word": " statistic", "probability": 0.85546875}, {"start": 816.99, "end": 817.33, "word": " is", "probability": 0.94580078125}, {"start": 817.33, "end": 817.49, "word": " the", "probability": 0.9189453125}, {"start": 817.49, "end": 817.71, "word": " sample", "probability": 0.85009765625}, {"start": 817.71, "end": 817.99, "word": " mean,", "probability": 0.97119140625}, {"start": 818.83, "end": 819.01, "word": " and", "probability": 0.9365234375}, {"start": 819.01, "end": 819.15, "word": " the", "probability": 0.91845703125}, {"start": 819.15, "end": 819.41, "word": " mean", "probability": 0.96484375}, {"start": 819.41, "end": 820.03, "word": " of", "probability": 0.96533203125}, {"start": 820.03, "end": 820.61, "word": " x", "probability": 0.98486328125}, {"start": 820.61, "end": 820.81, "word": " bar", "probability": 0.9375}, {"start": 820.81, "end": 820.99, "word": " is", "probability": 0.9462890625}, {"start": 820.99, "end": 821.23, "word": " mu.", "probability": 0.9384765625}, {"start": 822.33, "end": 822.59, "word": " On", "probability": 0.9541015625}, {"start": 822.59, "end": 822.69, "word": " the", "probability": 0.921875}, {"start": 822.69, "end": 822.93, "word": " other", "probability": 0.884765625}, {"start": 822.93, "end": 823.33, "word": " hand,", "probability": 0.90771484375}, {"start": 823.81, "end": 824.03, "word": " the", "probability": 0.89208984375}, {"start": 824.03, "end": 824.27, "word": " mean", "probability": 0.97265625}, {"start": 824.27, "end": 824.51, "word": " of", "probability": 0.96923828125}, {"start": 824.51, "end": 824.69, "word": " the", "probability": 0.6396484375}, {"start": 824.69, "end": 825.19, "word": " statistic,", "probability": 0.91455078125}, {"start": 825.29, "end": 825.43, "word": " my", "probability": 0.94482421875}, {"start": 825.43, "end": 825.79, "word": " statistic", "probability": 0.90234375}, {"start": 825.79, "end": 826.13, "word": " is", "probability": 0.62548828125}, {"start": 826.13, "end": 826.33, "word": " the", "probability": 0.83203125}, {"start": 826.33, "end": 826.57, "word": " sample", "probability": 0.70068359375}, {"start": 826.57, "end": 827.17, "word": " proportion", "probability": 0.7412109375}, {"start": 827.17, "end": 827.89, "word": " is", "probability": 0.67919921875}, {"start": 827.89, "end": 828.21, "word": " five.", "probability": 0.33447265625}, {"start": 828.55, "end": 828.77, "word": " So", "probability": 0.9169921875}, {"start": 828.77, "end": 829.01, "word": " in", "probability": 0.8134765625}, {"start": 829.01, "end": 829.31, "word": " the", "probability": 0.85009765625}, {"start": 829.31, "end": 829.47, "word": " two", "probability": 0.94580078125}, {"start": 829.47, "end": 830.01, "word": " cases,", "probability": 0.9267578125}, {"start": 830.55, "end": 830.77, "word": " the", "probability": 0.9140625}, {"start": 830.77, "end": 830.93, "word": " mean", "probability": 0.97412109375}, {"start": 830.93, "end": 831.33, "word": " equals", "probability": 0.7412109375}, {"start": 831.33, "end": 831.51, "word": " the", "probability": 0.8974609375}, {"start": 831.51, "end": 831.71, "word": " true", "probability": 0.962890625}, {"start": 831.71, "end": 832.05, "word": " value.", "probability": 0.97314453125}], "temperature": 1.0}, {"id": 31, "seek": 85463, "start": 832.73, "end": 854.63, "text": " I mean, the true parameter. So in this case, the mean of x bar equal mu, and mu of p equal pi. On the other hand, the sigma of x bar was sigma over root n. This looks similar, because this one's just sigma squared over n. But here, sigma squared is pi times 1 minus pi.", "tokens": [286, 914, 11, 264, 2074, 13075, 13, 407, 294, 341, 1389, 11, 264, 914, 295, 2031, 2159, 2681, 2992, 11, 293, 2992, 295, 280, 2681, 3895, 13, 1282, 264, 661, 1011, 11, 264, 12771, 295, 2031, 2159, 390, 12771, 670, 5593, 297, 13, 639, 1542, 2531, 11, 570, 341, 472, 311, 445, 12771, 8889, 670, 297, 13, 583, 510, 11, 12771, 8889, 307, 3895, 1413, 502, 3175, 3895, 13], "avg_logprob": -0.23370535650423596, "compression_ratio": 1.6167664670658684, "no_speech_prob": 0.0, "words": [{"start": 832.73, "end": 832.93, "word": " I", "probability": 0.7041015625}, {"start": 832.93, "end": 833.17, "word": " mean,", "probability": 0.9638671875}, {"start": 833.63, "end": 833.77, "word": " the", "probability": 0.76611328125}, {"start": 833.77, "end": 834.03, "word": " true", "probability": 0.8876953125}, {"start": 834.03, "end": 834.61, "word": " parameter.", "probability": 0.94482421875}, {"start": 835.07, "end": 835.25, "word": " So", "probability": 0.94580078125}, {"start": 835.25, "end": 835.37, "word": " in", "probability": 0.7109375}, {"start": 835.37, "end": 835.51, "word": " this", "probability": 0.9462890625}, {"start": 835.51, "end": 835.73, "word": " case,", "probability": 0.91845703125}, {"start": 835.83, "end": 835.91, "word": " the", "probability": 0.91357421875}, {"start": 835.91, "end": 836.03, "word": " mean", "probability": 0.95361328125}, {"start": 836.03, "end": 836.15, "word": " of", "probability": 0.966796875}, {"start": 836.15, "end": 836.35, "word": " x", "probability": 0.72509765625}, {"start": 836.35, "end": 836.49, "word": " bar", "probability": 0.8681640625}, {"start": 836.49, "end": 836.73, "word": " equal", "probability": 0.474609375}, {"start": 836.73, "end": 837.01, "word": " mu,", "probability": 0.6787109375}, {"start": 837.39, "end": 837.93, "word": " and", "probability": 0.94482421875}, {"start": 837.93, "end": 838.21, "word": " mu", "probability": 0.88427734375}, {"start": 838.21, "end": 838.37, "word": " of", "probability": 0.81982421875}, {"start": 838.37, "end": 838.47, "word": " p", "probability": 0.470947265625}, {"start": 838.47, "end": 838.75, "word": " equal", "probability": 0.7646484375}, {"start": 838.75, "end": 838.97, "word": " pi.", "probability": 0.88037109375}, {"start": 839.19, "end": 839.51, "word": " On", "probability": 0.9013671875}, {"start": 839.51, "end": 839.61, "word": " the", "probability": 0.92529296875}, {"start": 839.61, "end": 839.77, "word": " other", "probability": 0.88671875}, {"start": 839.77, "end": 840.17, "word": " hand,", "probability": 0.91455078125}, {"start": 840.43, "end": 840.63, "word": " the", "probability": 0.75341796875}, {"start": 840.63, "end": 840.85, "word": " sigma", "probability": 0.91455078125}, {"start": 840.85, "end": 841.03, "word": " of", "probability": 0.9326171875}, {"start": 841.03, "end": 841.19, "word": " x", "probability": 0.98486328125}, {"start": 841.19, "end": 841.45, "word": " bar", "probability": 0.94970703125}, {"start": 841.45, "end": 841.85, "word": " was", "probability": 0.92333984375}, {"start": 841.85, "end": 842.75, "word": " sigma", "probability": 0.8955078125}, {"start": 842.75, "end": 843.03, "word": " over", "probability": 0.8251953125}, {"start": 843.03, "end": 843.29, "word": " root", "probability": 0.9638671875}, {"start": 843.29, "end": 843.55, "word": " n.", "probability": 0.814453125}, {"start": 844.81, "end": 845.11, "word": " This", "probability": 0.384033203125}, {"start": 845.11, "end": 845.41, "word": " looks", "probability": 0.78076171875}, {"start": 845.41, "end": 846.19, "word": " similar,", "probability": 0.9638671875}, {"start": 847.13, "end": 847.69, "word": " because", "probability": 0.57275390625}, {"start": 847.69, "end": 847.93, "word": " this", "probability": 0.94384765625}, {"start": 847.93, "end": 848.31, "word": " one's", "probability": 0.70654296875}, {"start": 848.31, "end": 848.73, "word": " just", "probability": 0.9072265625}, {"start": 848.73, "end": 849.13, "word": " sigma", "probability": 0.9013671875}, {"start": 849.13, "end": 849.43, "word": " squared", "probability": 0.69482421875}, {"start": 849.43, "end": 849.61, "word": " over", "probability": 0.83642578125}, {"start": 849.61, "end": 849.81, "word": " n.", "probability": 0.8212890625}, {"start": 851.77, "end": 852.29, "word": " But", "probability": 0.41162109375}, {"start": 852.29, "end": 852.41, "word": " here,", "probability": 0.728515625}, {"start": 852.47, "end": 852.63, "word": " sigma", "probability": 0.93603515625}, {"start": 852.63, "end": 852.93, "word": " squared", "probability": 0.85302734375}, {"start": 852.93, "end": 853.11, "word": " is", "probability": 0.9482421875}, {"start": 853.11, "end": 853.39, "word": " pi", "probability": 0.9169921875}, {"start": 853.39, "end": 853.79, "word": " times", "probability": 0.923828125}, {"start": 853.79, "end": 854.03, "word": " 1", "probability": 0.58544921875}, {"start": 854.03, "end": 854.31, "word": " minus", "probability": 0.9775390625}, {"start": 854.31, "end": 854.63, "word": " pi.", "probability": 0.958984375}], "temperature": 1.0}, {"id": 32, "seek": 88791, "start": 858.43, "end": 887.91, "text": " So again, the standard distribution of B is roughly symmetric or approximately normally distributed if these two conditions are satisfied and mu of B equals pi and sigma of B equals square root of pi times 1 minus pi over n. Now, this score, as we mentioned before, the standard equation is given by x minus the mean of x divided by sigma.", "tokens": [407, 797, 11, 264, 3832, 7316, 295, 363, 307, 9810, 32330, 420, 10447, 5646, 12631, 498, 613, 732, 4487, 366, 11239, 293, 2992, 295, 363, 6915, 3895, 293, 12771, 295, 363, 6915, 3732, 5593, 295, 3895, 1413, 502, 3175, 3895, 670, 297, 13, 823, 11, 341, 6175, 11, 382, 321, 2835, 949, 11, 264, 3832, 5367, 307, 2212, 538, 2031, 3175, 264, 914, 295, 2031, 6666, 538, 12771, 13], "avg_logprob": -0.20870536246470042, "compression_ratio": 1.642512077294686, "no_speech_prob": 0.0, "words": [{"start": 858.43, "end": 858.77, "word": " So", "probability": 0.88037109375}, {"start": 858.77, "end": 859.13, "word": " again,", "probability": 0.84814453125}, {"start": 859.47, "end": 859.75, "word": " the", "probability": 0.90576171875}, {"start": 859.75, "end": 860.03, "word": " standard", "probability": 0.64697265625}, {"start": 860.03, "end": 860.55, "word": " distribution", "probability": 0.82470703125}, {"start": 860.55, "end": 860.79, "word": " of", "probability": 0.94775390625}, {"start": 860.79, "end": 860.97, "word": " B", "probability": 0.453125}, {"start": 860.97, "end": 861.43, "word": " is", "probability": 0.94775390625}, {"start": 861.43, "end": 861.81, "word": " roughly", "probability": 0.84521484375}, {"start": 861.81, "end": 862.47, "word": " symmetric", "probability": 0.84423828125}, {"start": 862.47, "end": 863.39, "word": " or", "probability": 0.357177734375}, {"start": 863.39, "end": 863.85, "word": " approximately", "probability": 0.810546875}, {"start": 863.85, "end": 864.29, "word": " normally", "probability": 0.75341796875}, {"start": 864.29, "end": 864.89, "word": " distributed", "probability": 0.9033203125}, {"start": 864.89, "end": 866.95, "word": " if", "probability": 0.5556640625}, {"start": 866.95, "end": 868.11, "word": " these", "probability": 0.83447265625}, {"start": 868.11, "end": 868.31, "word": " two", "probability": 0.89013671875}, {"start": 868.31, "end": 868.93, "word": " conditions", "probability": 0.88330078125}, {"start": 868.93, "end": 870.27, "word": " are", "probability": 0.9345703125}, {"start": 870.27, "end": 870.81, "word": " satisfied", "probability": 0.88671875}, {"start": 870.81, "end": 872.09, "word": " and", "probability": 0.56396484375}, {"start": 872.09, "end": 872.31, "word": " mu", "probability": 0.595703125}, {"start": 872.31, "end": 872.43, "word": " of", "probability": 0.5048828125}, {"start": 872.43, "end": 872.51, "word": " B", "probability": 0.85791015625}, {"start": 872.51, "end": 872.77, "word": " equals", "probability": 0.55615234375}, {"start": 872.77, "end": 872.99, "word": " pi", "probability": 0.7685546875}, {"start": 872.99, "end": 873.19, "word": " and", "probability": 0.8056640625}, {"start": 873.19, "end": 873.39, "word": " sigma", "probability": 0.90869140625}, {"start": 873.39, "end": 873.57, "word": " of", "probability": 0.9169921875}, {"start": 873.57, "end": 873.69, "word": " B", "probability": 0.94921875}, {"start": 873.69, "end": 874.01, "word": " equals", "probability": 0.9326171875}, {"start": 874.01, "end": 874.43, "word": " square", "probability": 0.72412109375}, {"start": 874.43, "end": 874.59, "word": " root", "probability": 0.93896484375}, {"start": 874.59, "end": 874.75, "word": " of", "probability": 0.96826171875}, {"start": 874.75, "end": 874.97, "word": " pi", "probability": 0.9267578125}, {"start": 874.97, "end": 875.29, "word": " times", "probability": 0.912109375}, {"start": 875.29, "end": 875.53, "word": " 1", "probability": 0.47021484375}, {"start": 875.53, "end": 875.81, "word": " minus", "probability": 0.97265625}, {"start": 875.81, "end": 876.03, "word": " pi", "probability": 0.931640625}, {"start": 876.03, "end": 876.17, "word": " over", "probability": 0.8955078125}, {"start": 876.17, "end": 876.39, "word": " n.", "probability": 0.51416015625}, {"start": 877.09, "end": 877.43, "word": " Now,", "probability": 0.94921875}, {"start": 877.95, "end": 878.23, "word": " this", "probability": 0.88232421875}, {"start": 878.23, "end": 878.71, "word": " score,", "probability": 0.80419921875}, {"start": 879.09, "end": 879.25, "word": " as", "probability": 0.95166015625}, {"start": 879.25, "end": 879.39, "word": " we", "probability": 0.9501953125}, {"start": 879.39, "end": 879.69, "word": " mentioned", "probability": 0.8271484375}, {"start": 879.69, "end": 880.19, "word": " before,", "probability": 0.8583984375}, {"start": 881.55, "end": 884.05, "word": " the", "probability": 0.8427734375}, {"start": 884.05, "end": 884.49, "word": " standard", "probability": 0.9287109375}, {"start": 884.49, "end": 884.97, "word": " equation", "probability": 0.966796875}, {"start": 884.97, "end": 885.21, "word": " is", "probability": 0.94384765625}, {"start": 885.21, "end": 885.39, "word": " given", "probability": 0.89013671875}, {"start": 885.39, "end": 885.79, "word": " by", "probability": 0.9716796875}, {"start": 885.79, "end": 886.19, "word": " x", "probability": 0.65478515625}, {"start": 886.19, "end": 886.53, "word": " minus", "probability": 0.986328125}, {"start": 886.53, "end": 886.73, "word": " the", "probability": 0.86865234375}, {"start": 886.73, "end": 886.85, "word": " mean", "probability": 0.9619140625}, {"start": 886.85, "end": 886.97, "word": " of", "probability": 0.96337890625}, {"start": 886.97, "end": 887.15, "word": " x", "probability": 0.94287109375}, {"start": 887.15, "end": 887.43, "word": " divided", "probability": 0.7978515625}, {"start": 887.43, "end": 887.63, "word": " by", "probability": 0.974609375}, {"start": 887.63, "end": 887.91, "word": " sigma.", "probability": 0.91943359375}], "temperature": 1.0}, {"id": 33, "seek": 91881, "start": 890.17, "end": 918.81, "text": " Last time, we talked about semi-distribution of x bar. So your z score equals x bar minus the mean of x bar divided by sigma of x bar. So that's x bar minus mu, because the mean of x bar is mu, divided by sigma of x bar, sigma over root n.", "tokens": [5264, 565, 11, 321, 2825, 466, 12909, 12, 42649, 30783, 295, 2031, 2159, 13, 407, 428, 710, 6175, 6915, 2031, 2159, 3175, 264, 914, 295, 2031, 2159, 6666, 538, 12771, 295, 2031, 2159, 13, 407, 300, 311, 2031, 2159, 3175, 2992, 11, 570, 264, 914, 295, 2031, 2159, 307, 2992, 11, 6666, 538, 12771, 295, 2031, 2159, 11, 12771, 670, 5593, 297, 13], "avg_logprob": -0.19702148088254035, "compression_ratio": 1.7777777777777777, "no_speech_prob": 0.0, "words": [{"start": 890.17, "end": 890.61, "word": " Last", "probability": 0.765625}, {"start": 890.61, "end": 891.05, "word": " time,", "probability": 0.8896484375}, {"start": 891.29, "end": 891.73, "word": " we", "probability": 0.951171875}, {"start": 891.73, "end": 891.99, "word": " talked", "probability": 0.87841796875}, {"start": 891.99, "end": 892.25, "word": " about", "probability": 0.9150390625}, {"start": 892.25, "end": 892.55, "word": " semi", "probability": 0.08856201171875}, {"start": 892.55, "end": 893.07, "word": "-distribution", "probability": 0.9191080729166666}, {"start": 893.07, "end": 893.31, "word": " of", "probability": 0.931640625}, {"start": 893.31, "end": 893.45, "word": " x", "probability": 0.490966796875}, {"start": 893.45, "end": 893.73, "word": " bar.", "probability": 0.73095703125}, {"start": 896.57, "end": 896.95, "word": " So", "probability": 0.89306640625}, {"start": 896.95, "end": 897.27, "word": " your", "probability": 0.6337890625}, {"start": 897.27, "end": 897.51, "word": " z", "probability": 0.7138671875}, {"start": 897.51, "end": 900.61, "word": " score", "probability": 0.51025390625}, {"start": 900.61, "end": 902.83, "word": " equals", "probability": 0.65283203125}, {"start": 902.83, "end": 903.39, "word": " x", "probability": 0.9677734375}, {"start": 903.39, "end": 903.59, "word": " bar", "probability": 0.8935546875}, {"start": 903.59, "end": 903.89, "word": " minus", "probability": 0.97705078125}, {"start": 903.89, "end": 904.07, "word": " the", "probability": 0.9013671875}, {"start": 904.07, "end": 904.17, "word": " mean", "probability": 0.97705078125}, {"start": 904.17, "end": 904.29, "word": " of", "probability": 0.96142578125}, {"start": 904.29, "end": 904.47, "word": " x", "probability": 0.9912109375}, {"start": 904.47, "end": 904.67, "word": " bar", "probability": 0.9404296875}, {"start": 904.67, "end": 904.83, "word": " divided", "probability": 0.708984375}, {"start": 904.83, "end": 905.05, "word": " by", "probability": 0.9794921875}, {"start": 905.05, "end": 905.35, "word": " sigma", "probability": 0.82666015625}, {"start": 905.35, "end": 905.51, "word": " of", "probability": 0.89453125}, {"start": 905.51, "end": 905.69, "word": " x", "probability": 0.99560546875}, {"start": 905.69, "end": 906.01, "word": " bar.", "probability": 0.94580078125}, {"start": 908.17, "end": 908.69, "word": " So", "probability": 0.912109375}, {"start": 908.69, "end": 908.95, "word": " that's", "probability": 0.904541015625}, {"start": 908.95, "end": 909.13, "word": " x", "probability": 0.9912109375}, {"start": 909.13, "end": 909.33, "word": " bar", "probability": 0.93017578125}, {"start": 909.33, "end": 909.63, "word": " minus", "probability": 0.98583984375}, {"start": 909.63, "end": 909.93, "word": " mu,", "probability": 0.8193359375}, {"start": 910.97, "end": 911.25, "word": " because", "probability": 0.90966796875}, {"start": 911.25, "end": 911.41, "word": " the", "probability": 0.9228515625}, {"start": 911.41, "end": 911.51, "word": " mean", "probability": 0.96240234375}, {"start": 911.51, "end": 911.61, "word": " of", "probability": 0.96533203125}, {"start": 911.61, "end": 911.77, "word": " x", "probability": 0.99365234375}, {"start": 911.77, "end": 911.95, "word": " bar", "probability": 0.9482421875}, {"start": 911.95, "end": 912.09, "word": " is", "probability": 0.94775390625}, {"start": 912.09, "end": 912.37, "word": " mu,", "probability": 0.78857421875}, {"start": 913.19, "end": 914.95, "word": " divided", "probability": 0.84375}, {"start": 914.95, "end": 915.37, "word": " by", "probability": 0.97705078125}, {"start": 915.37, "end": 915.71, "word": " sigma", "probability": 0.91650390625}, {"start": 915.71, "end": 915.89, "word": " of", "probability": 0.87158203125}, {"start": 915.89, "end": 916.05, "word": " x", "probability": 0.99853515625}, {"start": 916.05, "end": 916.39, "word": " bar,", "probability": 0.94921875}, {"start": 917.47, "end": 918.17, "word": " sigma", "probability": 0.8837890625}, {"start": 918.17, "end": 918.43, "word": " over", "probability": 0.82421875}, {"start": 918.43, "end": 918.59, "word": " root", "probability": 0.93603515625}, {"start": 918.59, "end": 918.81, "word": " n.", "probability": 0.7392578125}], "temperature": 1.0}, {"id": 34, "seek": 95050, "start": 922.62, "end": 950.5, "text": " Now let's compute z-score for p. So z equals p minus the mean of p divided by sigma. So in this case, z equals the mean of p, which is pi, divided by pi, 1 minus pi divided by p. So in this case, the new formula for the z-score", "tokens": [823, 718, 311, 14722, 710, 12, 4417, 418, 337, 280, 13, 407, 710, 6915, 280, 3175, 264, 914, 295, 280, 6666, 538, 12771, 13, 407, 294, 341, 1389, 11, 710, 6915, 264, 914, 295, 280, 11, 597, 307, 3895, 11, 6666, 538, 3895, 11, 502, 3175, 3895, 6666, 538, 280, 13, 407, 294, 341, 1389, 11, 264, 777, 8513, 337, 264, 710, 12, 4417, 418], "avg_logprob": -0.2298768903269912, "compression_ratio": 1.6521739130434783, "no_speech_prob": 0.0, "words": [{"start": 922.62, "end": 922.92, "word": " Now", "probability": 0.74609375}, {"start": 922.92, "end": 923.48, "word": " let's", "probability": 0.82958984375}, {"start": 923.48, "end": 924.08, "word": " compute", "probability": 0.9052734375}, {"start": 924.08, "end": 924.32, "word": " z", "probability": 0.541015625}, {"start": 924.32, "end": 924.62, "word": "-score", "probability": 0.730712890625}, {"start": 924.62, "end": 925.08, "word": " for", "probability": 0.94384765625}, {"start": 925.08, "end": 925.98, "word": " p.", "probability": 0.24951171875}, {"start": 926.6, "end": 926.92, "word": " So", "probability": 0.8984375}, {"start": 926.92, "end": 927.1, "word": " z", "probability": 0.76416015625}, {"start": 927.1, "end": 927.4, "word": " equals", "probability": 0.418212890625}, {"start": 927.4, "end": 927.72, "word": " p", "probability": 0.783203125}, {"start": 927.72, "end": 928.98, "word": " minus", "probability": 0.8916015625}, {"start": 928.98, "end": 929.18, "word": " the", "probability": 0.712890625}, {"start": 929.18, "end": 929.3, "word": " mean", "probability": 0.9453125}, {"start": 929.3, "end": 929.44, "word": " of", "probability": 0.95263671875}, {"start": 929.44, "end": 929.58, "word": " p", "probability": 0.80322265625}, {"start": 929.58, "end": 929.84, "word": " divided", "probability": 0.7392578125}, {"start": 929.84, "end": 930.08, "word": " by", "probability": 0.97607421875}, {"start": 930.08, "end": 930.4, "word": " sigma.", "probability": 0.85693359375}, {"start": 933.68, "end": 934.28, "word": " So", "probability": 0.90673828125}, {"start": 934.28, "end": 934.42, "word": " in", "probability": 0.86083984375}, {"start": 934.42, "end": 934.62, "word": " this", "probability": 0.9521484375}, {"start": 934.62, "end": 934.98, "word": " case,", "probability": 0.91845703125}, {"start": 935.18, "end": 935.32, "word": " z", "probability": 0.96875}, {"start": 935.32, "end": 935.8, "word": " equals", "probability": 0.88134765625}, {"start": 935.8, "end": 938.28, "word": " the", "probability": 0.69677734375}, {"start": 938.28, "end": 938.54, "word": " mean", "probability": 0.94140625}, {"start": 938.54, "end": 938.76, "word": " of", "probability": 0.9501953125}, {"start": 938.76, "end": 939.08, "word": " p,", "probability": 0.744140625}, {"start": 939.32, "end": 939.54, "word": " which", "probability": 0.93505859375}, {"start": 939.54, "end": 939.72, "word": " is", "probability": 0.9521484375}, {"start": 939.72, "end": 940.06, "word": " pi,", "probability": 0.460693359375}, {"start": 942.56, "end": 943.34, "word": " divided", "probability": 0.87060546875}, {"start": 943.34, "end": 943.6, "word": " by", "probability": 0.97705078125}, {"start": 943.6, "end": 944.0, "word": " pi,", "probability": 0.7412109375}, {"start": 944.76, "end": 945.06, "word": " 1", "probability": 0.509765625}, {"start": 945.06, "end": 945.38, "word": " minus", "probability": 0.98388671875}, {"start": 945.38, "end": 945.62, "word": " pi", "probability": 0.9541015625}, {"start": 945.62, "end": 945.86, "word": " divided", "probability": 0.77587890625}, {"start": 945.86, "end": 946.12, "word": " by", "probability": 0.9736328125}, {"start": 946.12, "end": 946.28, "word": " p.", "probability": 0.457763671875}, {"start": 947.32, "end": 947.92, "word": " So", "probability": 0.94189453125}, {"start": 947.92, "end": 948.06, "word": " in", "probability": 0.90478515625}, {"start": 948.06, "end": 948.24, "word": " this", "probability": 0.9462890625}, {"start": 948.24, "end": 948.58, "word": " case,", "probability": 0.9169921875}, {"start": 948.68, "end": 948.84, "word": " the", "probability": 0.90673828125}, {"start": 948.84, "end": 949.1, "word": " new", "probability": 0.9267578125}, {"start": 949.1, "end": 949.44, "word": " formula", "probability": 0.90087890625}, {"start": 949.44, "end": 949.76, "word": " for", "probability": 0.94873046875}, {"start": 949.76, "end": 949.92, "word": " the", "probability": 0.9091796875}, {"start": 949.92, "end": 950.08, "word": " z", "probability": 0.9853515625}, {"start": 950.08, "end": 950.5, "word": "-score", "probability": 0.9129231770833334}], "temperature": 1.0}, {"id": 35, "seek": 98045, "start": 951.73, "end": 980.45, "text": " is given by pi minus one. So the zero zero score is equal to pi minus, p minus pi divided by root pi times one minus pi divided by small size n. Now let's do one example.", "tokens": [307, 2212, 538, 3895, 3175, 472, 13, 407, 264, 4018, 4018, 6175, 307, 2681, 281, 3895, 3175, 11, 280, 3175, 3895, 6666, 538, 5593, 3895, 1413, 472, 3175, 3895, 6666, 538, 1359, 2744, 297, 13, 823, 718, 311, 360, 472, 1365, 13], "avg_logprob": -0.30886626520822213, "compression_ratio": 1.4741379310344827, "no_speech_prob": 0.0, "words": [{"start": 951.73, "end": 952.09, "word": " is", "probability": 0.36572265625}, {"start": 952.09, "end": 952.39, "word": " given", "probability": 0.85595703125}, {"start": 952.39, "end": 952.83, "word": " by", "probability": 0.97216796875}, {"start": 952.83, "end": 954.33, "word": " pi", "probability": 0.5546875}, {"start": 954.33, "end": 954.71, "word": " minus", "probability": 0.953125}, {"start": 954.71, "end": 955.11, "word": " one.", "probability": 0.5810546875}, {"start": 957.69, "end": 958.33, "word": " So", "probability": 0.88134765625}, {"start": 958.33, "end": 958.51, "word": " the", "probability": 0.447021484375}, {"start": 958.51, "end": 958.69, "word": " zero", "probability": 0.250244140625}, {"start": 958.69, "end": 958.81, "word": " zero", "probability": 0.1995849609375}, {"start": 958.81, "end": 959.25, "word": " score", "probability": 0.685546875}, {"start": 959.25, "end": 959.53, "word": " is", "probability": 0.81787109375}, {"start": 959.53, "end": 959.79, "word": " equal", "probability": 0.8564453125}, {"start": 959.79, "end": 959.93, "word": " to", "probability": 0.90380859375}, {"start": 959.93, "end": 960.11, "word": " pi", "probability": 0.681640625}, {"start": 960.11, "end": 960.53, "word": " minus,", "probability": 0.98388671875}, {"start": 960.73, "end": 960.85, "word": " p", "probability": 0.3876953125}, {"start": 960.85, "end": 961.13, "word": " minus", "probability": 0.98828125}, {"start": 961.13, "end": 961.51, "word": " pi", "probability": 0.94091796875}, {"start": 961.51, "end": 962.41, "word": " divided", "probability": 0.57763671875}, {"start": 962.41, "end": 962.63, "word": " by", "probability": 0.96630859375}, {"start": 962.63, "end": 962.99, "word": " root", "probability": 0.84912109375}, {"start": 962.99, "end": 964.61, "word": " pi", "probability": 0.65625}, {"start": 964.61, "end": 964.95, "word": " times", "probability": 0.9013671875}, {"start": 964.95, "end": 965.19, "word": " one", "probability": 0.88330078125}, {"start": 965.19, "end": 965.47, "word": " minus", "probability": 0.98193359375}, {"start": 965.47, "end": 965.73, "word": " pi", "probability": 0.89892578125}, {"start": 965.73, "end": 966.05, "word": " divided", "probability": 0.791015625}, {"start": 966.05, "end": 966.39, "word": " by", "probability": 0.9677734375}, {"start": 966.39, "end": 967.87, "word": " small", "probability": 0.8974609375}, {"start": 967.87, "end": 968.17, "word": " size", "probability": 0.81494140625}, {"start": 968.17, "end": 968.39, "word": " n.", "probability": 0.77294921875}, {"start": 978.27, "end": 978.91, "word": " Now", "probability": 0.8896484375}, {"start": 978.91, "end": 979.13, "word": " let's", "probability": 0.88671875}, {"start": 979.13, "end": 979.39, "word": " do", "probability": 0.9609375}, {"start": 979.39, "end": 980.09, "word": " one", "probability": 0.9296875}, {"start": 980.09, "end": 980.45, "word": " example.", "probability": 0.97021484375}], "temperature": 1.0}, {"id": 36, "seek": 100534, "start": 983.24, "end": 1005.34, "text": " The example says that if the true proportion of waters who support proposition A is 40%. So in this case, pi is 40%. The question is, what's the probability that", "tokens": [440, 1365, 1619, 300, 498, 264, 2074, 16068, 295, 12975, 567, 1406, 24830, 316, 307, 3356, 6856, 407, 294, 341, 1389, 11, 3895, 307, 3356, 6856, 440, 1168, 307, 11, 437, 311, 264, 8482, 300], "avg_logprob": -0.2213541625274552, "compression_ratio": 1.3064516129032258, "no_speech_prob": 0.0, "words": [{"start": 983.24, "end": 984.24, "word": " The", "probability": 0.404052734375}, {"start": 984.24, "end": 984.7, "word": " example", "probability": 0.90771484375}, {"start": 984.7, "end": 985.26, "word": " says", "probability": 0.8427734375}, {"start": 985.26, "end": 985.64, "word": " that", "probability": 0.90380859375}, {"start": 985.64, "end": 987.18, "word": " if", "probability": 0.8203125}, {"start": 987.18, "end": 987.88, "word": " the", "probability": 0.9052734375}, {"start": 987.88, "end": 988.14, "word": " true", "probability": 0.9453125}, {"start": 988.14, "end": 988.7, "word": " proportion", "probability": 0.8369140625}, {"start": 988.7, "end": 989.38, "word": " of", "probability": 0.97314453125}, {"start": 989.38, "end": 989.94, "word": " waters", "probability": 0.7109375}, {"start": 989.94, "end": 991.0, "word": " who", "probability": 0.4384765625}, {"start": 991.0, "end": 991.58, "word": " support", "probability": 0.94873046875}, {"start": 991.58, "end": 992.18, "word": " proposition", "probability": 0.7490234375}, {"start": 992.18, "end": 992.68, "word": " A", "probability": 0.80712890625}, {"start": 992.68, "end": 994.26, "word": " is", "probability": 0.92529296875}, {"start": 994.26, "end": 995.6, "word": " 40%.", "probability": 0.761962890625}, {"start": 995.6, "end": 997.4, "word": " So", "probability": 0.8544921875}, {"start": 997.4, "end": 997.62, "word": " in", "probability": 0.68701171875}, {"start": 997.62, "end": 997.82, "word": " this", "probability": 0.95068359375}, {"start": 997.82, "end": 998.14, "word": " case,", "probability": 0.8984375}, {"start": 998.2, "end": 998.42, "word": " pi", "probability": 0.50732421875}, {"start": 998.42, "end": 998.7, "word": " is", "probability": 0.9345703125}, {"start": 998.7, "end": 999.54, "word": " 40%.", "probability": 0.95166015625}, {"start": 999.54, "end": 1002.1, "word": " The", "probability": 0.88232421875}, {"start": 1002.1, "end": 1002.46, "word": " question", "probability": 0.91357421875}, {"start": 1002.46, "end": 1002.78, "word": " is,", "probability": 0.94775390625}, {"start": 1003.7, "end": 1004.26, "word": " what's", "probability": 0.84521484375}, {"start": 1004.26, "end": 1004.42, "word": " the", "probability": 0.9248046875}, {"start": 1004.42, "end": 1004.86, "word": " probability", "probability": 0.9423828125}, {"start": 1004.86, "end": 1005.34, "word": " that", "probability": 0.93798828125}], "temperature": 1.0}, {"id": 37, "seek": 103528, "start": 1006.98, "end": 1035.28, "text": " A sample of size 200, so we select a random sample of 200, yields a sample proportion between 40% and 45%. So in this case, we are looking for the probability between 40% and 45%. Now, if the problem says,", "tokens": [316, 6889, 295, 2744, 2331, 11, 370, 321, 3048, 257, 4974, 6889, 295, 2331, 11, 32168, 257, 6889, 16068, 1296, 3356, 4, 293, 6905, 6856, 407, 294, 341, 1389, 11, 321, 366, 1237, 337, 264, 8482, 1296, 3356, 4, 293, 6905, 6856, 823, 11, 498, 264, 1154, 1619, 11], "avg_logprob": -0.174062497317791, "compression_ratio": 1.4609929078014185, "no_speech_prob": 0.0, "words": [{"start": 1006.98, "end": 1007.24, "word": " A", "probability": 0.486328125}, {"start": 1007.24, "end": 1007.5, "word": " sample", "probability": 0.892578125}, {"start": 1007.5, "end": 1008.48, "word": " of", "probability": 0.810546875}, {"start": 1008.48, "end": 1008.8, "word": " size", "probability": 0.83447265625}, {"start": 1008.8, "end": 1009.38, "word": " 200,", "probability": 0.8984375}, {"start": 1010.42, "end": 1010.64, "word": " so", "probability": 0.79296875}, {"start": 1010.64, "end": 1010.82, "word": " we", "probability": 0.80810546875}, {"start": 1010.82, "end": 1011.16, "word": " select", "probability": 0.8544921875}, {"start": 1011.16, "end": 1011.46, "word": " a", "probability": 0.98388671875}, {"start": 1011.46, "end": 1011.74, "word": " random", "probability": 0.85205078125}, {"start": 1011.74, "end": 1012.22, "word": " sample", "probability": 0.88623046875}, {"start": 1012.22, "end": 1013.32, "word": " of", "probability": 0.95654296875}, {"start": 1013.32, "end": 1013.84, "word": " 200,", "probability": 0.94189453125}, {"start": 1014.7, "end": 1015.18, "word": " yields", "probability": 0.79296875}, {"start": 1015.18, "end": 1015.52, "word": " a", "probability": 0.97607421875}, {"start": 1015.52, "end": 1016.22, "word": " sample", "probability": 0.806640625}, {"start": 1016.22, "end": 1016.76, "word": " proportion", "probability": 0.80908203125}, {"start": 1016.76, "end": 1017.22, "word": " between", "probability": 0.8857421875}, {"start": 1017.22, "end": 1017.8, "word": " 40", "probability": 0.96484375}, {"start": 1017.8, "end": 1018.12, "word": "%", "probability": 0.8046875}, {"start": 1018.12, "end": 1018.66, "word": " and", "probability": 0.94677734375}, {"start": 1018.66, "end": 1019.64, "word": " 45%.", "probability": 0.9501953125}, {"start": 1019.64, "end": 1021.92, "word": " So", "probability": 0.935546875}, {"start": 1021.92, "end": 1022.28, "word": " in", "probability": 0.6865234375}, {"start": 1022.28, "end": 1022.52, "word": " this", "probability": 0.947265625}, {"start": 1022.52, "end": 1022.88, "word": " case,", "probability": 0.90771484375}, {"start": 1022.94, "end": 1023.06, "word": " we", "probability": 0.96240234375}, {"start": 1023.06, "end": 1023.18, "word": " are", "probability": 0.92236328125}, {"start": 1023.18, "end": 1023.44, "word": " looking", "probability": 0.90869140625}, {"start": 1023.44, "end": 1023.88, "word": " for", "probability": 0.95068359375}, {"start": 1023.88, "end": 1024.68, "word": " the", "probability": 0.91357421875}, {"start": 1024.68, "end": 1025.28, "word": " probability", "probability": 0.9716796875}, {"start": 1025.28, "end": 1027.44, "word": " between", "probability": 0.8525390625}, {"start": 1027.44, "end": 1029.48, "word": " 40", "probability": 0.96923828125}, {"start": 1029.48, "end": 1029.64, "word": "%", "probability": 0.55517578125}, {"start": 1029.64, "end": 1030.32, "word": " and", "probability": 0.9462890625}, {"start": 1030.32, "end": 1031.66, "word": " 45%.", "probability": 0.968994140625}, {"start": 1031.66, "end": 1034.02, "word": " Now,", "probability": 0.9580078125}, {"start": 1034.14, "end": 1034.3, "word": " if", "probability": 0.9541015625}, {"start": 1034.3, "end": 1034.46, "word": " the", "probability": 0.83203125}, {"start": 1034.46, "end": 1034.84, "word": " problem", "probability": 0.87890625}, {"start": 1034.84, "end": 1035.28, "word": " says,", "probability": 0.876953125}], "temperature": 1.0}, {"id": 38, "seek": 106499, "start": 1036.33, "end": 1064.99, "text": " The true proportion, it means pi. The true mean, it means also mu. So true, it means we are talking about the entire population. So true proportion, it means the population proportion. So the word true in statistics in this case means population. So the population proportion equals", "tokens": [440, 2074, 16068, 11, 309, 1355, 3895, 13, 440, 2074, 914, 11, 309, 1355, 611, 2992, 13, 407, 2074, 11, 309, 1355, 321, 366, 1417, 466, 264, 2302, 4415, 13, 407, 2074, 16068, 11, 309, 1355, 264, 4415, 16068, 13, 407, 264, 1349, 2074, 294, 12523, 294, 341, 1389, 1355, 4415, 13, 407, 264, 4415, 16068, 6915], "avg_logprob": -0.22561961540888095, "compression_ratio": 2.0808823529411766, "no_speech_prob": 0.0, "words": [{"start": 1036.33, "end": 1036.67, "word": " The", "probability": 0.642578125}, {"start": 1036.67, "end": 1036.87, "word": " true", "probability": 0.76123046875}, {"start": 1036.87, "end": 1037.41, "word": " proportion,", "probability": 0.79150390625}, {"start": 1037.77, "end": 1039.17, "word": " it", "probability": 0.8798828125}, {"start": 1039.17, "end": 1039.47, "word": " means", "probability": 0.9208984375}, {"start": 1039.47, "end": 1039.83, "word": " pi.", "probability": 0.1854248046875}, {"start": 1042.33, "end": 1043.01, "word": " The", "probability": 0.87744140625}, {"start": 1043.01, "end": 1043.27, "word": " true", "probability": 0.951171875}, {"start": 1043.27, "end": 1043.55, "word": " mean,", "probability": 0.5205078125}, {"start": 1043.75, "end": 1043.95, "word": " it", "probability": 0.93896484375}, {"start": 1043.95, "end": 1044.21, "word": " means", "probability": 0.84619140625}, {"start": 1044.21, "end": 1044.75, "word": " also", "probability": 0.8525390625}, {"start": 1044.75, "end": 1045.33, "word": " mu.", "probability": 0.65771484375}, {"start": 1046.21, "end": 1046.49, "word": " So", "probability": 0.890625}, {"start": 1046.49, "end": 1046.77, "word": " true,", "probability": 0.62451171875}, {"start": 1046.87, "end": 1046.95, "word": " it", "probability": 0.92333984375}, {"start": 1046.95, "end": 1047.17, "word": " means", "probability": 0.92919921875}, {"start": 1047.17, "end": 1047.33, "word": " we", "probability": 0.8515625}, {"start": 1047.33, "end": 1047.43, "word": " are", "probability": 0.83642578125}, {"start": 1047.43, "end": 1047.77, "word": " talking", "probability": 0.84912109375}, {"start": 1047.77, "end": 1048.21, "word": " about", "probability": 0.90869140625}, {"start": 1048.21, "end": 1048.59, "word": " the", "probability": 0.916015625}, {"start": 1048.59, "end": 1048.99, "word": " entire", "probability": 0.88330078125}, {"start": 1048.99, "end": 1049.61, "word": " population.", "probability": 0.94140625}, {"start": 1050.89, "end": 1051.19, "word": " So", "probability": 0.9296875}, {"start": 1051.19, "end": 1051.53, "word": " true", "probability": 0.78564453125}, {"start": 1051.53, "end": 1052.27, "word": " proportion,", "probability": 0.82177734375}, {"start": 1052.53, "end": 1052.67, "word": " it", "probability": 0.9228515625}, {"start": 1052.67, "end": 1052.97, "word": " means", "probability": 0.92724609375}, {"start": 1052.97, "end": 1053.17, "word": " the", "probability": 0.89892578125}, {"start": 1053.17, "end": 1053.63, "word": " population", "probability": 0.9599609375}, {"start": 1053.63, "end": 1054.07, "word": " proportion.", "probability": 0.75634765625}, {"start": 1054.77, "end": 1054.99, "word": " So", "probability": 0.95166015625}, {"start": 1054.99, "end": 1055.19, "word": " the", "probability": 0.87109375}, {"start": 1055.19, "end": 1055.51, "word": " word", "probability": 0.93798828125}, {"start": 1055.51, "end": 1056.05, "word": " true", "probability": 0.78955078125}, {"start": 1056.05, "end": 1056.25, "word": " in", "probability": 0.80419921875}, {"start": 1056.25, "end": 1056.77, "word": " statistics", "probability": 0.70166015625}, {"start": 1056.77, "end": 1057.41, "word": " in", "probability": 0.61083984375}, {"start": 1057.41, "end": 1057.65, "word": " this", "probability": 0.947265625}, {"start": 1057.65, "end": 1057.93, "word": " case", "probability": 0.9287109375}, {"start": 1057.93, "end": 1058.45, "word": " means", "probability": 0.87939453125}, {"start": 1058.45, "end": 1061.59, "word": " population.", "probability": 0.8818359375}, {"start": 1062.21, "end": 1062.45, "word": " So", "probability": 0.9560546875}, {"start": 1062.45, "end": 1062.59, "word": " the", "probability": 0.87158203125}, {"start": 1062.59, "end": 1062.99, "word": " population", "probability": 0.94921875}, {"start": 1062.99, "end": 1063.67, "word": " proportion", "probability": 0.84423828125}, {"start": 1063.67, "end": 1064.99, "word": " equals", "probability": 0.8935546875}], "temperature": 1.0}, {"id": 39, "seek": 109242, "start": 1065.86, "end": 1092.42, "text": " 0.4, so pi is equal to 0.4. Now, if pi equals 0.4 and the sample size is 200, what's the probability that this sample proportion lies between 40 and 35 percent? Now, in order to use this score, you have to check the two assumptions.", "tokens": [1958, 13, 19, 11, 370, 3895, 307, 2681, 281, 1958, 13, 19, 13, 823, 11, 498, 3895, 6915, 1958, 13, 19, 293, 264, 6889, 2744, 307, 2331, 11, 437, 311, 264, 8482, 300, 341, 6889, 16068, 9134, 1296, 3356, 293, 6976, 3043, 30, 823, 11, 294, 1668, 281, 764, 341, 6175, 11, 291, 362, 281, 1520, 264, 732, 17695, 13], "avg_logprob": -0.1967213144067858, "compression_ratio": 1.4382716049382716, "no_speech_prob": 0.0, "words": [{"start": 1065.86, "end": 1066.14, "word": " 0", "probability": 0.163818359375}, {"start": 1066.14, "end": 1066.56, "word": ".4,", "probability": 0.98876953125}, {"start": 1066.78, "end": 1066.94, "word": " so", "probability": 0.92138671875}, {"start": 1066.94, "end": 1067.12, "word": " pi", "probability": 0.49462890625}, {"start": 1067.12, "end": 1067.3, "word": " is", "probability": 0.65185546875}, {"start": 1067.3, "end": 1067.54, "word": " equal", "probability": 0.89453125}, {"start": 1067.54, "end": 1067.64, "word": " to", "probability": 0.6083984375}, {"start": 1067.64, "end": 1067.8, "word": " 0", "probability": 0.93896484375}, {"start": 1067.8, "end": 1068.1, "word": ".4.", "probability": 0.998779296875}, {"start": 1069.46, "end": 1069.8, "word": " Now,", "probability": 0.927734375}, {"start": 1069.94, "end": 1070.12, "word": " if", "probability": 0.9599609375}, {"start": 1070.12, "end": 1070.4, "word": " pi", "probability": 0.92431640625}, {"start": 1070.4, "end": 1070.62, "word": " equals", "probability": 0.4443359375}, {"start": 1070.62, "end": 1070.84, "word": " 0", "probability": 0.95654296875}, {"start": 1070.84, "end": 1071.18, "word": ".4", "probability": 0.998779296875}, {"start": 1071.18, "end": 1071.48, "word": " and", "probability": 0.6650390625}, {"start": 1071.48, "end": 1071.7, "word": " the", "probability": 0.814453125}, {"start": 1071.7, "end": 1071.88, "word": " sample", "probability": 0.89404296875}, {"start": 1071.88, "end": 1072.22, "word": " size", "probability": 0.84375}, {"start": 1072.22, "end": 1072.32, "word": " is", "probability": 0.6064453125}, {"start": 1072.32, "end": 1072.72, "word": " 200,", "probability": 0.9326171875}, {"start": 1074.9, "end": 1075.48, "word": " what's", "probability": 0.88330078125}, {"start": 1075.48, "end": 1075.66, "word": " the", "probability": 0.9169921875}, {"start": 1075.66, "end": 1076.14, "word": " probability", "probability": 0.943359375}, {"start": 1076.14, "end": 1076.86, "word": " that", "probability": 0.93359375}, {"start": 1076.86, "end": 1078.16, "word": " this", "probability": 0.92236328125}, {"start": 1078.16, "end": 1078.5, "word": " sample", "probability": 0.83203125}, {"start": 1078.5, "end": 1079.1, "word": " proportion", "probability": 0.771484375}, {"start": 1079.1, "end": 1080.72, "word": " lies", "probability": 0.939453125}, {"start": 1080.72, "end": 1081.22, "word": " between", "probability": 0.86279296875}, {"start": 1081.22, "end": 1082.0, "word": " 40", "probability": 0.970703125}, {"start": 1082.0, "end": 1082.28, "word": " and", "probability": 0.685546875}, {"start": 1082.28, "end": 1082.96, "word": " 35", "probability": 0.630859375}, {"start": 1082.96, "end": 1083.5, "word": " percent?", "probability": 0.57080078125}, {"start": 1085.46, "end": 1086.1, "word": " Now,", "probability": 0.95068359375}, {"start": 1086.46, "end": 1086.68, "word": " in", "probability": 0.94580078125}, {"start": 1086.68, "end": 1086.88, "word": " order", "probability": 0.91796875}, {"start": 1086.88, "end": 1087.08, "word": " to", "probability": 0.96826171875}, {"start": 1087.08, "end": 1087.5, "word": " use", "probability": 0.87451171875}, {"start": 1087.5, "end": 1089.72, "word": " this", "probability": 0.8916015625}, {"start": 1089.72, "end": 1090.32, "word": " score,", "probability": 0.65185546875}, {"start": 1090.66, "end": 1090.84, "word": " you", "probability": 0.9521484375}, {"start": 1090.84, "end": 1091.04, "word": " have", "probability": 0.9404296875}, {"start": 1091.04, "end": 1091.14, "word": " to", "probability": 0.96875}, {"start": 1091.14, "end": 1091.5, "word": " check", "probability": 0.94873046875}, {"start": 1091.5, "end": 1091.8, "word": " the", "probability": 0.86767578125}, {"start": 1091.8, "end": 1091.96, "word": " two", "probability": 0.93603515625}, {"start": 1091.96, "end": 1092.42, "word": " assumptions.", "probability": 0.96875}], "temperature": 1.0}, {"id": 40, "seek": 112097, "start": 1094.17, "end": 1120.97, "text": " But since N is large enough, N is 200. For sure the two conditions most of the time are satisfied. But you have to check in order to apply this z-score. Let's see first how can we check the two conditions. Now, N equals 200.", "tokens": [583, 1670, 426, 307, 2416, 1547, 11, 426, 307, 2331, 13, 1171, 988, 264, 732, 4487, 881, 295, 264, 565, 366, 11239, 13, 583, 291, 362, 281, 1520, 294, 1668, 281, 3079, 341, 710, 12, 4417, 418, 13, 961, 311, 536, 700, 577, 393, 321, 1520, 264, 732, 4487, 13, 823, 11, 426, 6915, 2331, 13], "avg_logprob": -0.1888706182178698, "compression_ratio": 1.4423076923076923, "no_speech_prob": 0.0, "words": [{"start": 1094.17, "end": 1094.43, "word": " But", "probability": 0.69091796875}, {"start": 1094.43, "end": 1094.77, "word": " since", "probability": 0.78466796875}, {"start": 1094.77, "end": 1094.95, "word": " N", "probability": 0.60546875}, {"start": 1094.95, "end": 1095.11, "word": " is", "probability": 0.9404296875}, {"start": 1095.11, "end": 1095.45, "word": " large", "probability": 0.94140625}, {"start": 1095.45, "end": 1095.87, "word": " enough,", "probability": 0.88330078125}, {"start": 1097.17, "end": 1097.57, "word": " N", "probability": 0.80615234375}, {"start": 1097.57, "end": 1097.69, "word": " is", "probability": 0.68115234375}, {"start": 1097.69, "end": 1098.01, "word": " 200.", "probability": 0.845703125}, {"start": 1100.25, "end": 1100.67, "word": " For", "probability": 0.9375}, {"start": 1100.67, "end": 1100.89, "word": " sure", "probability": 0.92431640625}, {"start": 1100.89, "end": 1101.05, "word": " the", "probability": 0.56396484375}, {"start": 1101.05, "end": 1101.21, "word": " two", "probability": 0.86181640625}, {"start": 1101.21, "end": 1101.75, "word": " conditions", "probability": 0.90771484375}, {"start": 1101.75, "end": 1102.63, "word": " most", "probability": 0.70068359375}, {"start": 1102.63, "end": 1102.79, "word": " of", "probability": 0.9697265625}, {"start": 1102.79, "end": 1102.87, "word": " the", "probability": 0.9189453125}, {"start": 1102.87, "end": 1103.07, "word": " time", "probability": 0.8642578125}, {"start": 1103.07, "end": 1103.21, "word": " are", "probability": 0.88720703125}, {"start": 1103.21, "end": 1103.71, "word": " satisfied.", "probability": 0.8720703125}, {"start": 1104.49, "end": 1104.79, "word": " But", "probability": 0.935546875}, {"start": 1104.79, "end": 1104.89, "word": " you", "probability": 0.6181640625}, {"start": 1104.89, "end": 1105.03, "word": " have", "probability": 0.9423828125}, {"start": 1105.03, "end": 1105.17, "word": " to", "probability": 0.9697265625}, {"start": 1105.17, "end": 1105.43, "word": " check", "probability": 0.93701171875}, {"start": 1105.43, "end": 1105.69, "word": " in", "probability": 0.876953125}, {"start": 1105.69, "end": 1105.89, "word": " order", "probability": 0.9208984375}, {"start": 1105.89, "end": 1106.15, "word": " to", "probability": 0.96826171875}, {"start": 1106.15, "end": 1106.65, "word": " apply", "probability": 0.92626953125}, {"start": 1106.65, "end": 1108.07, "word": " this", "probability": 0.884765625}, {"start": 1108.07, "end": 1108.35, "word": " z", "probability": 0.51904296875}, {"start": 1108.35, "end": 1108.63, "word": "-score.", "probability": 0.8408203125}, {"start": 1108.95, "end": 1109.31, "word": " Let's", "probability": 0.927978515625}, {"start": 1109.31, "end": 1109.59, "word": " see", "probability": 0.91796875}, {"start": 1109.59, "end": 1111.39, "word": " first", "probability": 0.77978515625}, {"start": 1111.39, "end": 1115.51, "word": " how", "probability": 0.65380859375}, {"start": 1115.51, "end": 1115.81, "word": " can", "probability": 0.845703125}, {"start": 1115.81, "end": 1115.99, "word": " we", "probability": 0.94580078125}, {"start": 1115.99, "end": 1116.37, "word": " check", "probability": 0.943359375}, {"start": 1116.37, "end": 1116.81, "word": " the", "probability": 0.87353515625}, {"start": 1116.81, "end": 1117.01, "word": " two", "probability": 0.9228515625}, {"start": 1117.01, "end": 1117.59, "word": " conditions.", "probability": 0.8837890625}, {"start": 1118.59, "end": 1118.99, "word": " Now,", "probability": 0.94580078125}, {"start": 1119.53, "end": 1119.83, "word": " N", "probability": 0.96484375}, {"start": 1119.83, "end": 1120.55, "word": " equals", "probability": 0.52880859375}, {"start": 1120.55, "end": 1120.97, "word": " 200.", "probability": 0.91015625}], "temperature": 1.0}, {"id": 41, "seek": 115129, "start": 1123.69, "end": 1151.29, "text": " Pi is 40%, so N times Pi is 80. 200 times 0.4 is 80%. So the first one is satisfied, the first condition. Can you figure out the value of N times 1 minus Pi without calculation? 200 minus 80.", "tokens": [17741, 307, 3356, 8923, 370, 426, 1413, 17741, 307, 4688, 13, 2331, 1413, 1958, 13, 19, 307, 4688, 6856, 407, 264, 700, 472, 307, 11239, 11, 264, 700, 4188, 13, 1664, 291, 2573, 484, 264, 2158, 295, 426, 1413, 502, 3175, 17741, 1553, 17108, 30, 2331, 3175, 4688, 13], "avg_logprob": -0.20109375268220903, "compression_ratio": 1.4014598540145986, "no_speech_prob": 0.0, "words": [{"start": 1123.69, "end": 1124.47, "word": " Pi", "probability": 0.4267578125}, {"start": 1124.47, "end": 1125.25, "word": " is", "probability": 0.90283203125}, {"start": 1125.25, "end": 1126.23, "word": " 40%,", "probability": 0.568115234375}, {"start": 1126.23, "end": 1127.69, "word": " so", "probability": 0.89111328125}, {"start": 1127.69, "end": 1127.93, "word": " N", "probability": 0.425048828125}, {"start": 1127.93, "end": 1128.23, "word": " times", "probability": 0.72216796875}, {"start": 1128.23, "end": 1128.63, "word": " Pi", "probability": 0.71337890625}, {"start": 1128.63, "end": 1132.07, "word": " is", "probability": 0.90625}, {"start": 1132.07, "end": 1132.39, "word": " 80.", "probability": 0.9697265625}, {"start": 1133.47, "end": 1134.25, "word": " 200", "probability": 0.61767578125}, {"start": 1134.25, "end": 1134.73, "word": " times", "probability": 0.87109375}, {"start": 1134.73, "end": 1134.93, "word": " 0", "probability": 0.74169921875}, {"start": 1134.93, "end": 1135.19, "word": ".4", "probability": 0.986328125}, {"start": 1135.19, "end": 1135.33, "word": " is", "probability": 0.9111328125}, {"start": 1135.33, "end": 1135.99, "word": " 80%.", "probability": 0.8837890625}, {"start": 1135.99, "end": 1137.29, "word": " So", "probability": 0.91552734375}, {"start": 1137.29, "end": 1137.47, "word": " the", "probability": 0.74755859375}, {"start": 1137.47, "end": 1137.81, "word": " first", "probability": 0.8828125}, {"start": 1137.81, "end": 1138.81, "word": " one", "probability": 0.90673828125}, {"start": 1138.81, "end": 1139.05, "word": " is", "probability": 0.93017578125}, {"start": 1139.05, "end": 1139.63, "word": " satisfied,", "probability": 0.88037109375}, {"start": 1139.87, "end": 1140.03, "word": " the", "probability": 0.84130859375}, {"start": 1140.03, "end": 1140.29, "word": " first", "probability": 0.8720703125}, {"start": 1140.29, "end": 1140.73, "word": " condition.", "probability": 0.9453125}, {"start": 1143.03, "end": 1143.47, "word": " Can", "probability": 0.96875}, {"start": 1143.47, "end": 1143.61, "word": " you", "probability": 0.634765625}, {"start": 1143.61, "end": 1143.79, "word": " figure", "probability": 0.96533203125}, {"start": 1143.79, "end": 1144.11, "word": " out", "probability": 0.89013671875}, {"start": 1144.11, "end": 1144.51, "word": " the", "probability": 0.90869140625}, {"start": 1144.51, "end": 1144.91, "word": " value", "probability": 0.97705078125}, {"start": 1144.91, "end": 1145.33, "word": " of", "probability": 0.95947265625}, {"start": 1145.33, "end": 1145.73, "word": " N", "probability": 0.97802734375}, {"start": 1145.73, "end": 1146.15, "word": " times", "probability": 0.91357421875}, {"start": 1146.15, "end": 1146.39, "word": " 1", "probability": 0.8466796875}, {"start": 1146.39, "end": 1146.67, "word": " minus", "probability": 0.9462890625}, {"start": 1146.67, "end": 1146.95, "word": " Pi", "probability": 0.8779296875}, {"start": 1146.95, "end": 1147.21, "word": " without", "probability": 0.88134765625}, {"start": 1147.21, "end": 1147.81, "word": " calculation?", "probability": 0.89794921875}, {"start": 1148.29, "end": 1148.67, "word": " 200", "probability": 0.52587890625}, {"start": 1148.67, "end": 1150.81, "word": " minus", "probability": 0.97509765625}, {"start": 1150.81, "end": 1151.29, "word": " 80.", "probability": 0.96337890625}], "temperature": 1.0}, {"id": 42, "seek": 118234, "start": 1152.95, "end": 1182.35, "text": " So this value is 200 minus 80. Or just 200 times 1 minus 0.4. 200 times 0.6 is 120. So if you just find the first one and times 5, the other one is n minus 80 will give the other condition. So now the two conditions are satisfied. Then we can use the z-score.", "tokens": [407, 341, 2158, 307, 2331, 3175, 4688, 13, 1610, 445, 2331, 1413, 502, 3175, 1958, 13, 19, 13, 2331, 1413, 1958, 13, 21, 307, 10411, 13, 407, 498, 291, 445, 915, 264, 700, 472, 293, 1413, 1025, 11, 264, 661, 472, 307, 297, 3175, 4688, 486, 976, 264, 661, 4188, 13, 407, 586, 264, 732, 4487, 366, 11239, 13, 1396, 321, 393, 764, 264, 710, 12, 4417, 418, 13], "avg_logprob": -0.1675223273890359, "compression_ratio": 1.5853658536585367, "no_speech_prob": 0.0, "words": [{"start": 1152.95, "end": 1153.21, "word": " So", "probability": 0.9248046875}, {"start": 1153.21, "end": 1153.49, "word": " this", "probability": 0.83154296875}, {"start": 1153.49, "end": 1153.89, "word": " value", "probability": 0.97216796875}, {"start": 1153.89, "end": 1155.69, "word": " is", "probability": 0.91650390625}, {"start": 1155.69, "end": 1156.11, "word": " 200", "probability": 0.921875}, {"start": 1156.11, "end": 1157.47, "word": " minus", "probability": 0.876953125}, {"start": 1157.47, "end": 1157.87, "word": " 80.", "probability": 0.86083984375}, {"start": 1158.61, "end": 1159.19, "word": " Or", "probability": 0.955078125}, {"start": 1159.19, "end": 1159.49, "word": " just", "probability": 0.89990234375}, {"start": 1159.49, "end": 1160.33, "word": " 200", "probability": 0.91162109375}, {"start": 1160.33, "end": 1160.97, "word": " times", "probability": 0.9130859375}, {"start": 1160.97, "end": 1161.29, "word": " 1", "probability": 0.8779296875}, {"start": 1161.29, "end": 1161.71, "word": " minus", "probability": 0.982421875}, {"start": 1161.71, "end": 1162.31, "word": " 0", "probability": 0.8056640625}, {"start": 1162.31, "end": 1162.77, "word": ".4.", "probability": 0.99658203125}, {"start": 1163.89, "end": 1164.37, "word": " 200", "probability": 0.91796875}, {"start": 1164.37, "end": 1164.73, "word": " times", "probability": 0.92041015625}, {"start": 1164.73, "end": 1164.93, "word": " 0", "probability": 0.90771484375}, {"start": 1164.93, "end": 1165.31, "word": ".6", "probability": 0.997314453125}, {"start": 1165.31, "end": 1165.55, "word": " is", "probability": 0.93408203125}, {"start": 1165.55, "end": 1165.87, "word": " 120.", "probability": 0.90625}, {"start": 1167.07, "end": 1167.75, "word": " So", "probability": 0.95654296875}, {"start": 1167.75, "end": 1167.97, "word": " if", "probability": 0.8798828125}, {"start": 1167.97, "end": 1168.09, "word": " you", "probability": 0.95849609375}, {"start": 1168.09, "end": 1168.47, "word": " just", "probability": 0.9169921875}, {"start": 1168.47, "end": 1169.01, "word": " find", "probability": 0.8935546875}, {"start": 1169.01, "end": 1169.25, "word": " the", "probability": 0.916015625}, {"start": 1169.25, "end": 1169.55, "word": " first", "probability": 0.8603515625}, {"start": 1169.55, "end": 1169.79, "word": " one", "probability": 0.8818359375}, {"start": 1169.79, "end": 1169.89, "word": " and", "probability": 0.328857421875}, {"start": 1169.89, "end": 1170.21, "word": " times", "probability": 0.4521484375}, {"start": 1170.21, "end": 1170.57, "word": " 5,", "probability": 0.61572265625}, {"start": 1171.17, "end": 1171.37, "word": " the", "probability": 0.90771484375}, {"start": 1171.37, "end": 1171.61, "word": " other", "probability": 0.88330078125}, {"start": 1171.61, "end": 1171.83, "word": " one", "probability": 0.8984375}, {"start": 1171.83, "end": 1172.03, "word": " is", "probability": 0.93896484375}, {"start": 1172.03, "end": 1172.23, "word": " n", "probability": 0.40283203125}, {"start": 1172.23, "end": 1172.71, "word": " minus", "probability": 0.97509765625}, {"start": 1172.71, "end": 1174.55, "word": " 80", "probability": 0.8994140625}, {"start": 1174.55, "end": 1174.77, "word": " will", "probability": 0.6787109375}, {"start": 1174.77, "end": 1175.11, "word": " give", "probability": 0.88818359375}, {"start": 1175.11, "end": 1175.65, "word": " the", "probability": 0.88525390625}, {"start": 1175.65, "end": 1175.87, "word": " other", "probability": 0.880859375}, {"start": 1175.87, "end": 1176.29, "word": " condition.", "probability": 0.88525390625}, {"start": 1176.81, "end": 1176.97, "word": " So", "probability": 0.95751953125}, {"start": 1176.97, "end": 1177.13, "word": " now", "probability": 0.9130859375}, {"start": 1177.13, "end": 1177.31, "word": " the", "probability": 0.7822265625}, {"start": 1177.31, "end": 1177.47, "word": " two", "probability": 0.892578125}, {"start": 1177.47, "end": 1177.93, "word": " conditions", "probability": 0.8671875}, {"start": 1177.93, "end": 1178.17, "word": " are", "probability": 0.9365234375}, {"start": 1178.17, "end": 1178.69, "word": " satisfied.", "probability": 0.9150390625}, {"start": 1180.03, "end": 1180.71, "word": " Then", "probability": 0.8662109375}, {"start": 1180.71, "end": 1180.89, "word": " we", "probability": 0.9033203125}, {"start": 1180.89, "end": 1181.21, "word": " can", "probability": 0.9443359375}, {"start": 1181.21, "end": 1181.63, "word": " use", "probability": 0.8759765625}, {"start": 1181.63, "end": 1181.83, "word": " the", "probability": 0.84423828125}, {"start": 1181.83, "end": 1182.01, "word": " z", "probability": 0.685546875}, {"start": 1182.01, "end": 1182.35, "word": "-score.", "probability": 0.8068033854166666}], "temperature": 1.0}, {"id": 43, "seek": 121048, "start": 1182.72, "end": 1210.48, "text": " I mean, we can say that the sampling proportion is normally distributed with mean equal pi, always pi, and pi is given as 40%. And the sigma of p equals square root of pi 1 minus pi divided by n. That's your square root of 0.4 times.", "tokens": [286, 914, 11, 321, 393, 584, 300, 264, 21179, 16068, 307, 5646, 12631, 365, 914, 2681, 3895, 11, 1009, 3895, 11, 293, 3895, 307, 2212, 382, 3356, 6856, 400, 264, 12771, 295, 280, 6915, 3732, 5593, 295, 3895, 502, 3175, 3895, 6666, 538, 297, 13, 663, 311, 428, 3732, 5593, 295, 1958, 13, 19, 1413, 13], "avg_logprob": -0.24369517543859648, "compression_ratio": 1.4268292682926829, "no_speech_prob": 0.0, "words": [{"start": 1182.72, "end": 1182.94, "word": " I", "probability": 0.73974609375}, {"start": 1182.94, "end": 1183.14, "word": " mean,", "probability": 0.9619140625}, {"start": 1183.28, "end": 1183.4, "word": " we", "probability": 0.95263671875}, {"start": 1183.4, "end": 1183.72, "word": " can", "probability": 0.9482421875}, {"start": 1183.72, "end": 1184.14, "word": " say", "probability": 0.888671875}, {"start": 1184.14, "end": 1184.4, "word": " that", "probability": 0.91259765625}, {"start": 1184.4, "end": 1185.3, "word": " the", "probability": 0.7978515625}, {"start": 1185.3, "end": 1185.64, "word": " sampling", "probability": 0.408935546875}, {"start": 1185.64, "end": 1186.12, "word": " proportion", "probability": 0.87548828125}, {"start": 1186.12, "end": 1186.78, "word": " is", "probability": 0.94970703125}, {"start": 1186.78, "end": 1187.88, "word": " normally", "probability": 0.88330078125}, {"start": 1187.88, "end": 1188.58, "word": " distributed", "probability": 0.91015625}, {"start": 1188.58, "end": 1189.7, "word": " with", "probability": 0.84326171875}, {"start": 1189.7, "end": 1190.08, "word": " mean", "probability": 0.96240234375}, {"start": 1190.08, "end": 1192.36, "word": " equal", "probability": 0.489990234375}, {"start": 1192.36, "end": 1192.74, "word": " pi,", "probability": 0.3955078125}, {"start": 1192.82, "end": 1193.22, "word": " always", "probability": 0.8740234375}, {"start": 1193.22, "end": 1193.62, "word": " pi,", "probability": 0.94384765625}, {"start": 1194.08, "end": 1194.72, "word": " and", "probability": 0.94580078125}, {"start": 1194.72, "end": 1194.98, "word": " pi", "probability": 0.9287109375}, {"start": 1194.98, "end": 1195.16, "word": " is", "probability": 0.890625}, {"start": 1195.16, "end": 1195.48, "word": " given", "probability": 0.87109375}, {"start": 1195.48, "end": 1196.14, "word": " as", "probability": 0.7314453125}, {"start": 1196.14, "end": 1197.04, "word": " 40%.", "probability": 0.82373046875}, {"start": 1197.04, "end": 1199.78, "word": " And", "probability": 0.9443359375}, {"start": 1199.78, "end": 1200.52, "word": " the", "probability": 0.88818359375}, {"start": 1200.52, "end": 1200.9, "word": " sigma", "probability": 0.83447265625}, {"start": 1200.9, "end": 1201.7, "word": " of", "probability": 0.6962890625}, {"start": 1201.7, "end": 1201.88, "word": " p", "probability": 0.3544921875}, {"start": 1201.88, "end": 1202.38, "word": " equals", "probability": 0.90673828125}, {"start": 1202.38, "end": 1202.7, "word": " square", "probability": 0.76513671875}, {"start": 1202.7, "end": 1203.02, "word": " root", "probability": 0.9306640625}, {"start": 1203.02, "end": 1204.24, "word": " of", "probability": 0.480712890625}, {"start": 1204.24, "end": 1204.52, "word": " pi", "probability": 0.94384765625}, {"start": 1204.52, "end": 1204.74, "word": " 1", "probability": 0.69384765625}, {"start": 1204.74, "end": 1205.02, "word": " minus", "probability": 0.908203125}, {"start": 1205.02, "end": 1205.28, "word": " pi", "probability": 0.85302734375}, {"start": 1205.28, "end": 1205.56, "word": " divided", "probability": 0.7275390625}, {"start": 1205.56, "end": 1205.7, "word": " by", "probability": 0.97314453125}, {"start": 1205.7, "end": 1206.0, "word": " n.", "probability": 0.7353515625}, {"start": 1207.46, "end": 1208.14, "word": " That's", "probability": 0.89404296875}, {"start": 1208.14, "end": 1208.36, "word": " your", "probability": 0.42529296875}, {"start": 1208.36, "end": 1208.66, "word": " square", "probability": 0.78466796875}, {"start": 1208.66, "end": 1208.86, "word": " root", "probability": 0.9287109375}, {"start": 1208.86, "end": 1209.08, "word": " of", "probability": 0.9677734375}, {"start": 1209.08, "end": 1209.42, "word": " 0", "probability": 0.77978515625}, {"start": 1209.42, "end": 1209.86, "word": ".4", "probability": 0.98388671875}, {"start": 1209.86, "end": 1210.48, "word": " times.", "probability": 0.908203125}], "temperature": 1.0}, {"id": 44, "seek": 124103, "start": 1212.33, "end": 1241.03, "text": " divided by 200, and that will give 0.0346. So, the first step, we have to check the two conditions. Second step, compute the mean of P, sigma of P. Now, finally, find the z-score", "tokens": [6666, 538, 2331, 11, 293, 300, 486, 976, 1958, 13, 15, 12249, 21, 13, 407, 11, 264, 700, 1823, 11, 321, 362, 281, 1520, 264, 732, 4487, 13, 5736, 1823, 11, 14722, 264, 914, 295, 430, 11, 12771, 295, 430, 13, 823, 11, 2721, 11, 915, 264, 710, 12, 4417, 418], "avg_logprob": -0.26667667638797027, "compression_ratio": 1.3065693430656935, "no_speech_prob": 0.0, "words": [{"start": 1212.33, "end": 1212.87, "word": " divided", "probability": 0.1522216796875}, {"start": 1212.87, "end": 1213.21, "word": " by", "probability": 0.9736328125}, {"start": 1213.21, "end": 1213.63, "word": " 200,", "probability": 0.8671875}, {"start": 1214.61, "end": 1214.83, "word": " and", "probability": 0.92919921875}, {"start": 1214.83, "end": 1215.03, "word": " that", "probability": 0.92724609375}, {"start": 1215.03, "end": 1215.21, "word": " will", "probability": 0.88037109375}, {"start": 1215.21, "end": 1215.55, "word": " give", "probability": 0.849609375}, {"start": 1215.55, "end": 1218.83, "word": " 0", "probability": 0.46826171875}, {"start": 1218.83, "end": 1222.81, "word": ".0346.", "probability": 0.802978515625}, {"start": 1226.21, "end": 1226.93, "word": " So,", "probability": 0.94677734375}, {"start": 1227.09, "end": 1227.17, "word": " the", "probability": 0.88916015625}, {"start": 1227.17, "end": 1227.47, "word": " first", "probability": 0.8759765625}, {"start": 1227.47, "end": 1227.85, "word": " step,", "probability": 0.92236328125}, {"start": 1227.99, "end": 1228.07, "word": " we", "probability": 0.94189453125}, {"start": 1228.07, "end": 1228.23, "word": " have", "probability": 0.9306640625}, {"start": 1228.23, "end": 1228.33, "word": " to", "probability": 0.96728515625}, {"start": 1228.33, "end": 1228.67, "word": " check", "probability": 0.95263671875}, {"start": 1228.67, "end": 1229.09, "word": " the", "probability": 0.86376953125}, {"start": 1229.09, "end": 1229.25, "word": " two", "probability": 0.8759765625}, {"start": 1229.25, "end": 1229.79, "word": " conditions.", "probability": 0.880859375}, {"start": 1230.87, "end": 1231.27, "word": " Second", "probability": 0.7666015625}, {"start": 1231.27, "end": 1231.79, "word": " step,", "probability": 0.916015625}, {"start": 1232.23, "end": 1232.65, "word": " compute", "probability": 0.91650390625}, {"start": 1232.65, "end": 1233.11, "word": " the", "probability": 0.91650390625}, {"start": 1233.11, "end": 1233.33, "word": " mean", "probability": 0.837890625}, {"start": 1233.33, "end": 1233.53, "word": " of", "probability": 0.923828125}, {"start": 1233.53, "end": 1233.73, "word": " P,", "probability": 0.55224609375}, {"start": 1233.97, "end": 1234.25, "word": " sigma", "probability": 0.70849609375}, {"start": 1234.25, "end": 1234.57, "word": " of", "probability": 0.9267578125}, {"start": 1234.57, "end": 1234.77, "word": " P.", "probability": 0.9462890625}, {"start": 1235.55, "end": 1236.09, "word": " Now,", "probability": 0.951171875}, {"start": 1238.17, "end": 1238.87, "word": " finally,", "probability": 0.865234375}, {"start": 1239.73, "end": 1240.21, "word": " find", "probability": 0.8876953125}, {"start": 1240.21, "end": 1240.43, "word": " the", "probability": 0.623046875}, {"start": 1240.43, "end": 1240.65, "word": " z", "probability": 0.58056640625}, {"start": 1240.65, "end": 1241.03, "word": "-score", "probability": 0.8541666666666666}], "temperature": 1.0}, {"id": 45, "seek": 126789, "start": 1241.69, "end": 1267.89, "text": " of your problem, here he asks about what's the probability that B lies between 0.4 and 0.45 percent. So we have to find the score for 0.4. So that's 0.4 minus again the mean of B which is again 0.4, so that's valid.", "tokens": [295, 428, 1154, 11, 510, 415, 8962, 466, 437, 311, 264, 8482, 300, 363, 9134, 1296, 1958, 13, 19, 293, 1958, 13, 8465, 3043, 13, 407, 321, 362, 281, 915, 264, 6175, 337, 1958, 13, 19, 13, 407, 300, 311, 1958, 13, 19, 3175, 797, 264, 914, 295, 363, 597, 307, 797, 1958, 13, 19, 11, 370, 300, 311, 7363, 13], "avg_logprob": -0.22580644632539443, "compression_ratio": 1.411764705882353, "no_speech_prob": 0.0, "words": [{"start": 1241.69, "end": 1242.01, "word": " of", "probability": 0.42041015625}, {"start": 1242.01, "end": 1242.27, "word": " your", "probability": 0.8955078125}, {"start": 1242.27, "end": 1242.81, "word": " problem,", "probability": 0.876953125}, {"start": 1243.29, "end": 1243.65, "word": " here", "probability": 0.61083984375}, {"start": 1243.65, "end": 1243.97, "word": " he", "probability": 0.7978515625}, {"start": 1243.97, "end": 1244.23, "word": " asks", "probability": 0.489990234375}, {"start": 1244.23, "end": 1244.59, "word": " about", "probability": 0.89697265625}, {"start": 1244.59, "end": 1245.33, "word": " what's", "probability": 0.82861328125}, {"start": 1245.33, "end": 1245.47, "word": " the", "probability": 0.91748046875}, {"start": 1245.47, "end": 1245.97, "word": " probability", "probability": 0.95166015625}, {"start": 1245.97, "end": 1247.13, "word": " that", "probability": 0.9404296875}, {"start": 1247.13, "end": 1248.15, "word": " B", "probability": 0.65576171875}, {"start": 1248.15, "end": 1250.11, "word": " lies", "probability": 0.9287109375}, {"start": 1250.11, "end": 1250.71, "word": " between", "probability": 0.86181640625}, {"start": 1250.71, "end": 1254.21, "word": " 0", "probability": 0.5185546875}, {"start": 1254.21, "end": 1254.61, "word": ".4", "probability": 0.994140625}, {"start": 1254.61, "end": 1254.83, "word": " and", "probability": 0.93701171875}, {"start": 1254.83, "end": 1254.99, "word": " 0", "probability": 0.97607421875}, {"start": 1254.99, "end": 1255.47, "word": ".45", "probability": 0.994140625}, {"start": 1255.47, "end": 1255.85, "word": " percent.", "probability": 0.5517578125}, {"start": 1257.21, "end": 1257.49, "word": " So", "probability": 0.89453125}, {"start": 1257.49, "end": 1257.59, "word": " we", "probability": 0.7255859375}, {"start": 1257.59, "end": 1257.73, "word": " have", "probability": 0.9453125}, {"start": 1257.73, "end": 1257.83, "word": " to", "probability": 0.96435546875}, {"start": 1257.83, "end": 1258.03, "word": " find", "probability": 0.8994140625}, {"start": 1258.03, "end": 1258.21, "word": " the", "probability": 0.6083984375}, {"start": 1258.21, "end": 1258.43, "word": " score", "probability": 0.6611328125}, {"start": 1258.43, "end": 1258.79, "word": " for", "probability": 0.923828125}, {"start": 1258.79, "end": 1259.49, "word": " 0", "probability": 0.92578125}, {"start": 1259.49, "end": 1259.87, "word": ".4.", "probability": 0.998291015625}, {"start": 1261.19, "end": 1261.63, "word": " So", "probability": 0.94873046875}, {"start": 1261.63, "end": 1261.91, "word": " that's", "probability": 0.916748046875}, {"start": 1261.91, "end": 1262.17, "word": " 0", "probability": 0.9482421875}, {"start": 1262.17, "end": 1262.49, "word": ".4", "probability": 0.99853515625}, {"start": 1262.49, "end": 1263.11, "word": " minus", "probability": 0.97900390625}, {"start": 1263.11, "end": 1263.85, "word": " again", "probability": 0.66748046875}, {"start": 1263.85, "end": 1264.05, "word": " the", "probability": 0.8466796875}, {"start": 1264.05, "end": 1264.39, "word": " mean", "probability": 0.96337890625}, {"start": 1264.39, "end": 1265.29, "word": " of", "probability": 0.90771484375}, {"start": 1265.29, "end": 1265.49, "word": " B", "probability": 0.18212890625}, {"start": 1265.49, "end": 1265.63, "word": " which", "probability": 0.454345703125}, {"start": 1265.63, "end": 1265.81, "word": " is", "probability": 0.92431640625}, {"start": 1265.81, "end": 1266.33, "word": " again", "probability": 0.91943359375}, {"start": 1266.33, "end": 1266.63, "word": " 0", "probability": 0.95166015625}, {"start": 1266.63, "end": 1266.89, "word": ".4,", "probability": 0.99853515625}, {"start": 1267.19, "end": 1267.37, "word": " so", "probability": 0.93603515625}, {"start": 1267.37, "end": 1267.61, "word": " that's", "probability": 0.7607421875}, {"start": 1267.61, "end": 1267.89, "word": " valid.", "probability": 0.7841796875}], "temperature": 1.0}, {"id": 46, "seek": 129734, "start": 1269.89, "end": 1297.35, "text": " Divide by 0.0346. So again, b is 0.4. This is pi. And pi, in this case, is always 0.4. And this is your b. So 0.4 minus 0.4 divided by 0.0346 plus 1 equals 0.", "tokens": [9886, 482, 538, 1958, 13, 15, 12249, 21, 13, 407, 797, 11, 272, 307, 1958, 13, 19, 13, 639, 307, 3895, 13, 400, 3895, 11, 294, 341, 1389, 11, 307, 1009, 1958, 13, 19, 13, 400, 341, 307, 428, 272, 13, 407, 1958, 13, 19, 3175, 1958, 13, 19, 6666, 538, 1958, 13, 15, 12249, 21, 1804, 502, 6915, 1958, 13], "avg_logprob": -0.21219758425028093, "compression_ratio": 1.325, "no_speech_prob": 0.0, "words": [{"start": 1269.89, "end": 1270.31, "word": " Divide", "probability": 0.669189453125}, {"start": 1270.31, "end": 1270.65, "word": " by", "probability": 0.93310546875}, {"start": 1270.65, "end": 1271.55, "word": " 0", "probability": 0.5908203125}, {"start": 1271.55, "end": 1273.65, "word": ".0346.", "probability": 0.82159423828125}, {"start": 1274.55, "end": 1275.23, "word": " So", "probability": 0.93798828125}, {"start": 1275.23, "end": 1275.53, "word": " again,", "probability": 0.81640625}, {"start": 1277.29, "end": 1277.61, "word": " b", "probability": 0.41845703125}, {"start": 1277.61, "end": 1277.79, "word": " is", "probability": 0.91552734375}, {"start": 1277.79, "end": 1278.01, "word": " 0", "probability": 0.81005859375}, {"start": 1278.01, "end": 1278.37, "word": ".4.", "probability": 0.9951171875}, {"start": 1279.91, "end": 1280.59, "word": " This", "probability": 0.83154296875}, {"start": 1280.59, "end": 1280.73, "word": " is", "probability": 0.95458984375}, {"start": 1280.73, "end": 1281.03, "word": " pi.", "probability": 0.7109375}, {"start": 1283.67, "end": 1284.35, "word": " And", "probability": 0.93603515625}, {"start": 1284.35, "end": 1284.79, "word": " pi,", "probability": 0.90966796875}, {"start": 1285.39, "end": 1286.21, "word": " in", "probability": 0.94384765625}, {"start": 1286.21, "end": 1286.45, "word": " this", "probability": 0.93505859375}, {"start": 1286.45, "end": 1286.73, "word": " case,", "probability": 0.9072265625}, {"start": 1286.81, "end": 1286.91, "word": " is", "probability": 0.9296875}, {"start": 1286.91, "end": 1287.39, "word": " always", "probability": 0.89453125}, {"start": 1287.39, "end": 1287.73, "word": " 0", "probability": 0.59375}, {"start": 1287.73, "end": 1288.09, "word": ".4.", "probability": 0.997802734375}, {"start": 1289.87, "end": 1290.55, "word": " And", "probability": 0.78076171875}, {"start": 1290.55, "end": 1290.67, "word": " this", "probability": 0.759765625}, {"start": 1290.67, "end": 1290.73, "word": " is", "probability": 0.9208984375}, {"start": 1290.73, "end": 1290.91, "word": " your", "probability": 0.8994140625}, {"start": 1290.91, "end": 1291.13, "word": " b.", "probability": 0.93505859375}, {"start": 1291.57, "end": 1291.89, "word": " So", "probability": 0.93896484375}, {"start": 1291.89, "end": 1292.15, "word": " 0", "probability": 0.83056640625}, {"start": 1292.15, "end": 1292.39, "word": ".4", "probability": 0.998291015625}, {"start": 1292.39, "end": 1292.69, "word": " minus", "probability": 0.97216796875}, {"start": 1292.69, "end": 1292.89, "word": " 0", "probability": 0.88232421875}, {"start": 1292.89, "end": 1293.09, "word": ".4", "probability": 0.9990234375}, {"start": 1293.09, "end": 1293.41, "word": " divided", "probability": 0.5302734375}, {"start": 1293.41, "end": 1293.73, "word": " by", "probability": 0.9580078125}, {"start": 1293.73, "end": 1294.79, "word": " 0", "probability": 0.798828125}, {"start": 1294.79, "end": 1296.07, "word": ".0346", "probability": 0.80865478515625}, {"start": 1296.07, "end": 1296.49, "word": " plus", "probability": 0.37158203125}, {"start": 1296.49, "end": 1296.73, "word": " 1", "probability": 0.85546875}, {"start": 1296.73, "end": 1297.01, "word": " equals", "probability": 0.91650390625}, {"start": 1297.01, "end": 1297.35, "word": " 0.", "probability": 0.919921875}], "temperature": 1.0}, {"id": 47, "seek": 132726, "start": 1298.6, "end": 1327.26, "text": " The other z-score, for the other value, here, 0.45. And this gives 1.4. After that, this problem is converted to standardized normal value. So instead of P, we have Z.", "tokens": [440, 661, 710, 12, 4417, 418, 11, 337, 264, 661, 2158, 11, 510, 11, 1958, 13, 8465, 13, 400, 341, 2709, 502, 13, 19, 13, 2381, 300, 11, 341, 1154, 307, 16424, 281, 31677, 2710, 2158, 13, 407, 2602, 295, 430, 11, 321, 362, 1176, 13], "avg_logprob": -0.30285904508955935, "compression_ratio": 1.263157894736842, "no_speech_prob": 0.0, "words": [{"start": 1298.6, "end": 1298.86, "word": " The", "probability": 0.68408203125}, {"start": 1298.86, "end": 1299.24, "word": " other", "probability": 0.77587890625}, {"start": 1299.24, "end": 1299.54, "word": " z", "probability": 0.5791015625}, {"start": 1299.54, "end": 1299.96, "word": "-score,", "probability": 0.7918294270833334}, {"start": 1300.9, "end": 1302.24, "word": " for", "probability": 0.6279296875}, {"start": 1302.24, "end": 1302.38, "word": " the", "probability": 0.83935546875}, {"start": 1302.38, "end": 1302.56, "word": " other", "probability": 0.8818359375}, {"start": 1302.56, "end": 1302.9, "word": " value,", "probability": 0.96337890625}, {"start": 1303.2, "end": 1303.6, "word": " here,", "probability": 0.67236328125}, {"start": 1303.76, "end": 1304.0, "word": " 0", "probability": 0.69775390625}, {"start": 1304.0, "end": 1304.54, "word": ".45.", "probability": 0.981201171875}, {"start": 1310.34, "end": 1311.04, "word": " And", "probability": 0.81396484375}, {"start": 1311.04, "end": 1311.28, "word": " this", "probability": 0.92578125}, {"start": 1311.28, "end": 1311.56, "word": " gives", "probability": 0.880859375}, {"start": 1311.56, "end": 1311.82, "word": " 1", "probability": 0.93896484375}, {"start": 1311.82, "end": 1313.36, "word": ".4.", "probability": 0.5928955078125}, {"start": 1316.9, "end": 1317.6, "word": " After", "probability": 0.86474609375}, {"start": 1317.6, "end": 1317.94, "word": " that,", "probability": 0.93603515625}, {"start": 1318.12, "end": 1318.46, "word": " this", "probability": 0.94580078125}, {"start": 1318.46, "end": 1318.92, "word": " problem", "probability": 0.8818359375}, {"start": 1318.92, "end": 1319.18, "word": " is", "probability": 0.939453125}, {"start": 1319.18, "end": 1319.72, "word": " converted", "probability": 0.8623046875}, {"start": 1319.72, "end": 1321.4, "word": " to", "probability": 0.666015625}, {"start": 1321.4, "end": 1321.88, "word": " standardized", "probability": 0.36328125}, {"start": 1321.88, "end": 1322.26, "word": " normal", "probability": 0.57958984375}, {"start": 1322.26, "end": 1323.68, "word": " value.", "probability": 0.80859375}, {"start": 1324.04, "end": 1324.32, "word": " So", "probability": 0.92431640625}, {"start": 1324.32, "end": 1324.94, "word": " instead", "probability": 0.3310546875}, {"start": 1324.94, "end": 1326.04, "word": " of", "probability": 0.9580078125}, {"start": 1326.04, "end": 1326.4, "word": " P,", "probability": 0.63427734375}, {"start": 1326.72, "end": 1326.84, "word": " we", "probability": 0.9423828125}, {"start": 1326.84, "end": 1327.04, "word": " have", "probability": 0.94677734375}, {"start": 1327.04, "end": 1327.26, "word": " Z.", "probability": 0.6376953125}], "temperature": 1.0}, {"id": 48, "seek": 135768, "start": 1328.18, "end": 1357.68, "text": " between 0 and 1.44. That's all for using Chapter 7. Now, to complete your answer, you have to use Chapter 6. Now, V is between 0 and 1.44. As we mentioned many times, the area here equals, I mean, the dashed area equals V of Z.", "tokens": [1296, 1958, 293, 502, 13, 13912, 13, 663, 311, 439, 337, 1228, 18874, 1614, 13, 823, 11, 281, 3566, 428, 1867, 11, 291, 362, 281, 764, 18874, 1386, 13, 823, 11, 691, 307, 1296, 1958, 293, 502, 13, 13912, 13, 1018, 321, 2835, 867, 1413, 11, 264, 1859, 510, 6915, 11, 286, 914, 11, 264, 8240, 292, 1859, 6915, 691, 295, 1176, 13], "avg_logprob": -0.18591308081522584, "compression_ratio": 1.4522292993630572, "no_speech_prob": 0.0, "words": [{"start": 1328.18, "end": 1328.6, "word": " between", "probability": 0.42333984375}, {"start": 1328.6, "end": 1329.0, "word": " 0", "probability": 0.58544921875}, {"start": 1329.0, "end": 1329.98, "word": " and", "probability": 0.94091796875}, {"start": 1329.98, "end": 1330.24, "word": " 1", "probability": 0.96630859375}, {"start": 1330.24, "end": 1331.36, "word": ".44.", "probability": 0.6170654296875}, {"start": 1333.08, "end": 1333.76, "word": " That's", "probability": 0.93798828125}, {"start": 1333.76, "end": 1334.2, "word": " all", "probability": 0.9521484375}, {"start": 1334.2, "end": 1334.56, "word": " for", "probability": 0.9326171875}, {"start": 1334.56, "end": 1334.92, "word": " using", "probability": 0.92822265625}, {"start": 1334.92, "end": 1335.26, "word": " Chapter", "probability": 0.48193359375}, {"start": 1335.26, "end": 1335.58, "word": " 7.", "probability": 0.81982421875}, {"start": 1337.08, "end": 1337.54, "word": " Now,", "probability": 0.95068359375}, {"start": 1337.8, "end": 1338.48, "word": " to", "probability": 0.962890625}, {"start": 1338.48, "end": 1338.88, "word": " complete", "probability": 0.8076171875}, {"start": 1338.88, "end": 1339.22, "word": " your", "probability": 0.8818359375}, {"start": 1339.22, "end": 1341.24, "word": " answer,", "probability": 0.94873046875}, {"start": 1341.34, "end": 1341.42, "word": " you", "probability": 0.94921875}, {"start": 1341.42, "end": 1341.54, "word": " have", "probability": 0.876953125}, {"start": 1341.54, "end": 1341.62, "word": " to", "probability": 0.9697265625}, {"start": 1341.62, "end": 1341.76, "word": " use", "probability": 0.8828125}, {"start": 1341.76, "end": 1342.02, "word": " Chapter", "probability": 0.90185546875}, {"start": 1342.02, "end": 1342.44, "word": " 6.", "probability": 0.99853515625}, {"start": 1343.1, "end": 1343.36, "word": " Now,", "probability": 0.9326171875}, {"start": 1343.38, "end": 1343.5, "word": " V", "probability": 0.5830078125}, {"start": 1343.5, "end": 1343.66, "word": " is", "probability": 0.9462890625}, {"start": 1343.66, "end": 1343.92, "word": " between", "probability": 0.86865234375}, {"start": 1343.92, "end": 1344.28, "word": " 0", "probability": 0.94189453125}, {"start": 1344.28, "end": 1344.44, "word": " and", "probability": 0.94677734375}, {"start": 1344.44, "end": 1344.66, "word": " 1", "probability": 0.99609375}, {"start": 1344.66, "end": 1345.18, "word": ".44.", "probability": 0.986572265625}, {"start": 1346.38, "end": 1346.92, "word": " As", "probability": 0.96533203125}, {"start": 1346.92, "end": 1347.1, "word": " we", "probability": 0.95458984375}, {"start": 1347.1, "end": 1347.42, "word": " mentioned", "probability": 0.80810546875}, {"start": 1347.42, "end": 1347.66, "word": " many", "probability": 0.91748046875}, {"start": 1347.66, "end": 1348.14, "word": " times,", "probability": 0.92236328125}, {"start": 1349.24, "end": 1349.56, "word": " the", "probability": 0.904296875}, {"start": 1349.56, "end": 1349.9, "word": " area", "probability": 0.90869140625}, {"start": 1349.9, "end": 1350.32, "word": " here", "probability": 0.8564453125}, {"start": 1350.32, "end": 1353.16, "word": " equals,", "probability": 0.8603515625}, {"start": 1353.66, "end": 1353.92, "word": " I", "probability": 0.982421875}, {"start": 1353.92, "end": 1354.02, "word": " mean,", "probability": 0.962890625}, {"start": 1354.08, "end": 1354.22, "word": " the", "probability": 0.927734375}, {"start": 1354.22, "end": 1354.6, "word": " dashed", "probability": 0.9013671875}, {"start": 1354.6, "end": 1354.98, "word": " area", "probability": 0.88720703125}, {"start": 1354.98, "end": 1357.06, "word": " equals", "probability": 0.7080078125}, {"start": 1357.06, "end": 1357.28, "word": " V", "probability": 0.892578125}, {"start": 1357.28, "end": 1357.42, "word": " of", "probability": 0.9599609375}, {"start": 1357.42, "end": 1357.68, "word": " Z.", "probability": 0.74951171875}], "temperature": 1.0}, {"id": 49, "seek": 138628, "start": 1359.06, "end": 1386.28, "text": " Less than 1.44 minus 0.5. Exactly minus 0.5, because the area to the left of 0 is 0.5. Now by using the normal table, or standard normal table, P of Z is smaller than 1.44 is given by 9251. Just check your table, minus 0.5, so the final result", "tokens": [18649, 813, 502, 13, 13912, 3175, 1958, 13, 20, 13, 7587, 3175, 1958, 13, 20, 11, 570, 264, 1859, 281, 264, 1411, 295, 1958, 307, 1958, 13, 20, 13, 823, 538, 1228, 264, 2710, 3199, 11, 420, 3832, 2710, 3199, 11, 430, 295, 1176, 307, 4356, 813, 502, 13, 13912, 307, 2212, 538, 1722, 6074, 16, 13, 1449, 1520, 428, 3199, 11, 3175, 1958, 13, 20, 11, 370, 264, 2572, 1874], "avg_logprob": -0.2573784751196702, "compression_ratio": 1.5061728395061729, "no_speech_prob": 0.0, "words": [{"start": 1359.06, "end": 1359.42, "word": " Less", "probability": 0.087890625}, {"start": 1359.42, "end": 1359.7, "word": " than", "probability": 0.93017578125}, {"start": 1359.7, "end": 1359.96, "word": " 1", "probability": 0.64990234375}, {"start": 1359.96, "end": 1360.66, "word": ".44", "probability": 0.80078125}, {"start": 1360.66, "end": 1361.0, "word": " minus", "probability": 0.227783203125}, {"start": 1361.0, "end": 1361.44, "word": " 0", "probability": 0.58056640625}, {"start": 1361.44, "end": 1361.88, "word": ".5.", "probability": 0.9609375}, {"start": 1362.84, "end": 1363.28, "word": " Exactly", "probability": 0.47265625}, {"start": 1363.28, "end": 1363.62, "word": " minus", "probability": 0.751953125}, {"start": 1363.62, "end": 1363.88, "word": " 0", "probability": 0.9091796875}, {"start": 1363.88, "end": 1364.26, "word": ".5,", "probability": 0.997314453125}, {"start": 1364.44, "end": 1364.8, "word": " because", "probability": 0.88818359375}, {"start": 1364.8, "end": 1365.0, "word": " the", "probability": 0.7958984375}, {"start": 1365.0, "end": 1365.2, "word": " area", "probability": 0.62646484375}, {"start": 1365.2, "end": 1365.4, "word": " to", "probability": 0.9560546875}, {"start": 1365.4, "end": 1365.56, "word": " the", "probability": 0.9189453125}, {"start": 1365.56, "end": 1365.78, "word": " left", "probability": 0.94873046875}, {"start": 1365.78, "end": 1365.96, "word": " of", "probability": 0.95849609375}, {"start": 1365.96, "end": 1366.3, "word": " 0", "probability": 0.5166015625}, {"start": 1366.3, "end": 1367.22, "word": " is", "probability": 0.88720703125}, {"start": 1367.22, "end": 1367.46, "word": " 0", "probability": 0.93115234375}, {"start": 1367.46, "end": 1367.86, "word": ".5.", "probability": 0.99755859375}, {"start": 1369.28, "end": 1369.58, "word": " Now", "probability": 0.947265625}, {"start": 1369.58, "end": 1369.78, "word": " by", "probability": 0.62890625}, {"start": 1369.78, "end": 1370.06, "word": " using", "probability": 0.93017578125}, {"start": 1370.06, "end": 1370.26, "word": " the", "probability": 0.9091796875}, {"start": 1370.26, "end": 1370.58, "word": " normal", "probability": 0.837890625}, {"start": 1370.58, "end": 1371.0, "word": " table,", "probability": 0.87841796875}, {"start": 1371.54, "end": 1371.62, "word": " or", "probability": 0.81787109375}, {"start": 1371.62, "end": 1371.96, "word": " standard", "probability": 0.486328125}, {"start": 1371.96, "end": 1372.26, "word": " normal", "probability": 0.865234375}, {"start": 1372.26, "end": 1372.64, "word": " table,", "probability": 0.8798828125}, {"start": 1373.2, "end": 1373.34, "word": " P", "probability": 0.51953125}, {"start": 1373.34, "end": 1373.48, "word": " of", "probability": 0.79248046875}, {"start": 1373.48, "end": 1373.62, "word": " Z", "probability": 0.57763671875}, {"start": 1373.62, "end": 1373.74, "word": " is", "probability": 0.8525390625}, {"start": 1373.74, "end": 1373.98, "word": " smaller", "probability": 0.87158203125}, {"start": 1373.98, "end": 1374.2, "word": " than", "probability": 0.9423828125}, {"start": 1374.2, "end": 1374.4, "word": " 1", "probability": 0.9794921875}, {"start": 1374.4, "end": 1374.88, "word": ".44", "probability": 0.984375}, {"start": 1374.88, "end": 1375.52, "word": " is", "probability": 0.56591796875}, {"start": 1375.52, "end": 1375.72, "word": " given", "probability": 0.830078125}, {"start": 1375.72, "end": 1375.96, "word": " by", "probability": 0.96875}, {"start": 1375.96, "end": 1379.78, "word": " 9251.", "probability": 0.8712565104166666}, {"start": 1381.54, "end": 1381.86, "word": " Just", "probability": 0.69970703125}, {"start": 1381.86, "end": 1382.18, "word": " check", "probability": 0.93896484375}, {"start": 1382.18, "end": 1382.36, "word": " your", "probability": 0.87939453125}, {"start": 1382.36, "end": 1382.68, "word": " table,", "probability": 0.89501953125}, {"start": 1383.36, "end": 1384.04, "word": " minus", "probability": 0.9443359375}, {"start": 1384.04, "end": 1384.38, "word": " 0", "probability": 0.95068359375}, {"start": 1384.38, "end": 1384.94, "word": ".5,", "probability": 0.9970703125}, {"start": 1385.12, "end": 1385.34, "word": " so", "probability": 0.8935546875}, {"start": 1385.34, "end": 1385.54, "word": " the", "probability": 0.8916015625}, {"start": 1385.54, "end": 1385.84, "word": " final", "probability": 0.93994140625}, {"start": 1385.84, "end": 1386.28, "word": " result", "probability": 0.93359375}], "temperature": 1.0}, {"id": 50, "seek": 141468, "start": 1388.3, "end": 1414.68, "text": " is 0.4251. That means around 42 percent, 0.51, that the proportion lie between 40 and 45 percent. So that's how can we compute the probabilities underneath the normal curve if we are interested in the sample proportion.", "tokens": [307, 1958, 13, 19, 6074, 16, 13, 663, 1355, 926, 14034, 3043, 11, 1958, 13, 18682, 11, 300, 264, 16068, 4544, 1296, 3356, 293, 6905, 3043, 13, 407, 300, 311, 577, 393, 321, 14722, 264, 33783, 7223, 264, 2710, 7605, 498, 321, 366, 3102, 294, 264, 6889, 16068, 13], "avg_logprob": -0.20984374850988388, "compression_ratio": 1.4569536423841059, "no_speech_prob": 0.0, "words": [{"start": 1388.3, "end": 1388.6, "word": " is", "probability": 0.27783203125}, {"start": 1388.6, "end": 1388.82, "word": " 0", "probability": 0.76513671875}, {"start": 1388.82, "end": 1389.8, "word": ".4251.", "probability": 0.9429931640625}, {"start": 1390.2, "end": 1390.54, "word": " That", "probability": 0.89208984375}, {"start": 1390.54, "end": 1391.0, "word": " means", "probability": 0.92236328125}, {"start": 1391.0, "end": 1392.82, "word": " around", "probability": 0.82275390625}, {"start": 1392.82, "end": 1393.34, "word": " 42", "probability": 0.86669921875}, {"start": 1393.34, "end": 1393.88, "word": " percent,", "probability": 0.326171875}, {"start": 1394.92, "end": 1395.34, "word": " 0", "probability": 0.67138671875}, {"start": 1395.34, "end": 1395.96, "word": ".51,", "probability": 0.92138671875}, {"start": 1397.74, "end": 1398.34, "word": " that", "probability": 0.66015625}, {"start": 1398.34, "end": 1398.6, "word": " the", "probability": 0.89892578125}, {"start": 1398.6, "end": 1399.22, "word": " proportion", "probability": 0.72998046875}, {"start": 1399.22, "end": 1400.28, "word": " lie", "probability": 0.54931640625}, {"start": 1400.28, "end": 1400.86, "word": " between", "probability": 0.8681640625}, {"start": 1400.86, "end": 1401.62, "word": " 40", "probability": 0.9609375}, {"start": 1401.62, "end": 1401.8, "word": " and", "probability": 0.85986328125}, {"start": 1401.8, "end": 1402.28, "word": " 45", "probability": 0.96630859375}, {"start": 1402.28, "end": 1404.0, "word": " percent.", "probability": 0.9033203125}, {"start": 1405.2, "end": 1405.82, "word": " So", "probability": 0.93896484375}, {"start": 1405.82, "end": 1406.4, "word": " that's", "probability": 0.9052734375}, {"start": 1406.4, "end": 1406.86, "word": " how", "probability": 0.7890625}, {"start": 1406.86, "end": 1407.08, "word": " can", "probability": 0.83837890625}, {"start": 1407.08, "end": 1407.24, "word": " we", "probability": 0.9404296875}, {"start": 1407.24, "end": 1407.68, "word": " compute", "probability": 0.9208984375}, {"start": 1407.68, "end": 1408.0, "word": " the", "probability": 0.890625}, {"start": 1408.0, "end": 1408.72, "word": " probabilities", "probability": 0.8701171875}, {"start": 1408.72, "end": 1409.44, "word": " underneath", "probability": 0.95751953125}, {"start": 1409.44, "end": 1410.18, "word": " the", "probability": 0.9130859375}, {"start": 1410.18, "end": 1410.5, "word": " normal", "probability": 0.8828125}, {"start": 1410.5, "end": 1410.88, "word": " curve", "probability": 0.947265625}, {"start": 1410.88, "end": 1411.9, "word": " if", "probability": 0.6962890625}, {"start": 1411.9, "end": 1412.08, "word": " we", "probability": 0.96044921875}, {"start": 1412.08, "end": 1412.24, "word": " are", "probability": 0.93359375}, {"start": 1412.24, "end": 1412.66, "word": " interested", "probability": 0.8994140625}, {"start": 1412.66, "end": 1413.0, "word": " in", "probability": 0.9482421875}, {"start": 1413.0, "end": 1413.16, "word": " the", "probability": 0.92138671875}, {"start": 1413.16, "end": 1413.54, "word": " sample", "probability": 0.78955078125}, {"start": 1413.54, "end": 1414.68, "word": " proportion.", "probability": 0.7568359375}], "temperature": 1.0}, {"id": 51, "seek": 144165, "start": 1419.63, "end": 1441.65, "text": " Now, in case if one of these conditions is not satisfied, we cannot use this code unless we increase the sample size. So here, the two conditions should be satisfied. For example, suppose n equals 200.", "tokens": [823, 11, 294, 1389, 498, 472, 295, 613, 4487, 307, 406, 11239, 11, 321, 2644, 764, 341, 3089, 5969, 321, 3488, 264, 6889, 2744, 13, 407, 510, 11, 264, 732, 4487, 820, 312, 11239, 13, 1171, 1365, 11, 7297, 297, 6915, 2331, 13], "avg_logprob": -0.2414772713726217, "compression_ratio": 1.4225352112676057, "no_speech_prob": 0.0, "words": [{"start": 1419.63, "end": 1420.47, "word": " Now,", "probability": 0.08642578125}, {"start": 1420.87, "end": 1421.05, "word": " in", "probability": 0.8193359375}, {"start": 1421.05, "end": 1421.37, "word": " case", "probability": 0.91015625}, {"start": 1421.37, "end": 1422.39, "word": " if", "probability": 0.71630859375}, {"start": 1422.39, "end": 1422.67, "word": " one", "probability": 0.92333984375}, {"start": 1422.67, "end": 1423.23, "word": " of", "probability": 0.88671875}, {"start": 1423.23, "end": 1423.81, "word": " these", "probability": 0.82177734375}, {"start": 1423.81, "end": 1424.41, "word": " conditions", "probability": 0.89404296875}, {"start": 1424.41, "end": 1425.59, "word": " is", "probability": 0.93701171875}, {"start": 1425.59, "end": 1425.87, "word": " not", "probability": 0.947265625}, {"start": 1425.87, "end": 1426.43, "word": " satisfied,", "probability": 0.84521484375}, {"start": 1426.99, "end": 1427.27, "word": " we", "probability": 0.95654296875}, {"start": 1427.27, "end": 1427.63, "word": " cannot", "probability": 0.88671875}, {"start": 1427.63, "end": 1428.25, "word": " use", "probability": 0.87744140625}, {"start": 1428.25, "end": 1429.19, "word": " this", "probability": 0.85595703125}, {"start": 1429.19, "end": 1429.51, "word": " code", "probability": 0.42822265625}, {"start": 1429.51, "end": 1430.65, "word": " unless", "probability": 0.458251953125}, {"start": 1430.65, "end": 1431.51, "word": " we", "probability": 0.94775390625}, {"start": 1431.51, "end": 1431.93, "word": " increase", "probability": 0.84912109375}, {"start": 1431.93, "end": 1432.11, "word": " the", "probability": 0.9189453125}, {"start": 1432.11, "end": 1432.35, "word": " sample", "probability": 0.87548828125}, {"start": 1432.35, "end": 1432.77, "word": " size.", "probability": 0.83642578125}, {"start": 1433.59, "end": 1433.83, "word": " So", "probability": 0.95166015625}, {"start": 1433.83, "end": 1434.09, "word": " here,", "probability": 0.6220703125}, {"start": 1434.21, "end": 1434.39, "word": " the", "probability": 0.89306640625}, {"start": 1434.39, "end": 1434.59, "word": " two", "probability": 0.92626953125}, {"start": 1434.59, "end": 1435.03, "word": " conditions", "probability": 0.8994140625}, {"start": 1435.03, "end": 1435.45, "word": " should", "probability": 0.966796875}, {"start": 1435.45, "end": 1435.69, "word": " be", "probability": 0.9521484375}, {"start": 1435.69, "end": 1436.19, "word": " satisfied.", "probability": 0.904296875}, {"start": 1436.31, "end": 1436.43, "word": " For", "probability": 0.96533203125}, {"start": 1436.43, "end": 1436.79, "word": " example,", "probability": 0.97265625}, {"start": 1436.95, "end": 1437.45, "word": " suppose", "probability": 0.91748046875}, {"start": 1437.45, "end": 1440.91, "word": " n", "probability": 0.4287109375}, {"start": 1440.91, "end": 1441.19, "word": " equals", "probability": 0.5048828125}, {"start": 1441.19, "end": 1441.65, "word": " 200.", "probability": 0.8271484375}], "temperature": 1.0}, {"id": 52, "seek": 147115, "start": 1443.79, "end": 1471.15, "text": " But this probability of Pi is 1%. Suppose again N is 200 and Pi is 1%. So this condition is not satisfied because N times Pi is just 2.", "tokens": [583, 341, 8482, 295, 17741, 307, 502, 6856, 21360, 797, 426, 307, 2331, 293, 17741, 307, 502, 6856, 407, 341, 4188, 307, 406, 11239, 570, 426, 1413, 17741, 307, 445, 568, 13], "avg_logprob": -0.18903882575757575, "compression_ratio": 1.2363636363636363, "no_speech_prob": 0.0, "words": [{"start": 1443.79, "end": 1444.35, "word": " But", "probability": 0.7470703125}, {"start": 1444.35, "end": 1445.65, "word": " this", "probability": 0.7451171875}, {"start": 1445.65, "end": 1446.21, "word": " probability", "probability": 0.93603515625}, {"start": 1446.21, "end": 1448.31, "word": " of", "probability": 0.91064453125}, {"start": 1448.31, "end": 1448.73, "word": " Pi", "probability": 0.480224609375}, {"start": 1448.73, "end": 1453.39, "word": " is", "probability": 0.87158203125}, {"start": 1453.39, "end": 1453.99, "word": " 1%.", "probability": 0.731201171875}, {"start": 1453.99, "end": 1457.57, "word": " Suppose", "probability": 0.724609375}, {"start": 1457.57, "end": 1458.17, "word": " again", "probability": 0.7490234375}, {"start": 1458.17, "end": 1458.41, "word": " N", "probability": 0.460205078125}, {"start": 1458.41, "end": 1458.53, "word": " is", "probability": 0.8994140625}, {"start": 1458.53, "end": 1458.85, "word": " 200", "probability": 0.9296875}, {"start": 1458.85, "end": 1460.55, "word": " and", "probability": 0.67138671875}, {"start": 1460.55, "end": 1461.01, "word": " Pi", "probability": 0.94873046875}, {"start": 1461.01, "end": 1462.47, "word": " is", "probability": 0.951171875}, {"start": 1462.47, "end": 1463.23, "word": " 1%.", "probability": 0.92041015625}, {"start": 1463.23, "end": 1465.21, "word": " So", "probability": 0.93896484375}, {"start": 1465.21, "end": 1465.63, "word": " this", "probability": 0.7822265625}, {"start": 1465.63, "end": 1466.07, "word": " condition", "probability": 0.94775390625}, {"start": 1466.07, "end": 1466.25, "word": " is", "probability": 0.91455078125}, {"start": 1466.25, "end": 1466.43, "word": " not", "probability": 0.912109375}, {"start": 1466.43, "end": 1466.89, "word": " satisfied", "probability": 0.7548828125}, {"start": 1466.89, "end": 1467.41, "word": " because", "probability": 0.69921875}, {"start": 1467.41, "end": 1468.49, "word": " N", "probability": 0.95849609375}, {"start": 1468.49, "end": 1468.81, "word": " times", "probability": 0.75830078125}, {"start": 1468.81, "end": 1469.23, "word": " Pi", "probability": 0.9716796875}, {"start": 1469.23, "end": 1470.59, "word": " is", "probability": 0.94287109375}, {"start": 1470.59, "end": 1470.87, "word": " just", "probability": 0.88916015625}, {"start": 1470.87, "end": 1471.15, "word": " 2.", "probability": 0.880859375}], "temperature": 1.0}, {"id": 53, "seek": 149627, "start": 1472.71, "end": 1496.27, "text": " So you cannot use the z-score, because it should be at least 5. So even if the sample size is large, maybe one of the conditions is not satisfied because you have a small true proportion. So if the true proportion is very, very small, in this case you have to increase your sample size. Make sense? Any question?", "tokens": [407, 291, 2644, 764, 264, 710, 12, 4417, 418, 11, 570, 309, 820, 312, 412, 1935, 1025, 13, 407, 754, 498, 264, 6889, 2744, 307, 2416, 11, 1310, 472, 295, 264, 4487, 307, 406, 11239, 570, 291, 362, 257, 1359, 2074, 16068, 13, 407, 498, 264, 2074, 16068, 307, 588, 11, 588, 1359, 11, 294, 341, 1389, 291, 362, 281, 3488, 428, 6889, 2744, 13, 4387, 2020, 30, 2639, 1168, 30], "avg_logprob": -0.15483940434124735, "compression_ratio": 1.6473684210526316, "no_speech_prob": 0.0, "words": [{"start": 1472.71, "end": 1472.93, "word": " So", "probability": 0.73876953125}, {"start": 1472.93, "end": 1473.09, "word": " you", "probability": 0.7919921875}, {"start": 1473.09, "end": 1473.31, "word": " cannot", "probability": 0.796875}, {"start": 1473.31, "end": 1473.87, "word": " use", "probability": 0.84423828125}, {"start": 1473.87, "end": 1474.05, "word": " the", "probability": 0.52001953125}, {"start": 1474.05, "end": 1474.19, "word": " z", "probability": 0.7001953125}, {"start": 1474.19, "end": 1474.55, "word": "-score,", "probability": 0.7781575520833334}, {"start": 1474.79, "end": 1475.01, "word": " because", "probability": 0.72021484375}, {"start": 1475.01, "end": 1475.05, "word": " it", "probability": 0.82470703125}, {"start": 1475.05, "end": 1475.19, "word": " should", "probability": 0.97314453125}, {"start": 1475.19, "end": 1475.31, "word": " be", "probability": 0.935546875}, {"start": 1475.31, "end": 1475.45, "word": " at", "probability": 0.966796875}, {"start": 1475.45, "end": 1475.65, "word": " least", "probability": 0.95947265625}, {"start": 1475.65, "end": 1475.99, "word": " 5.", "probability": 0.544921875}, {"start": 1476.67, "end": 1476.97, "word": " So", "probability": 0.94677734375}, {"start": 1476.97, "end": 1477.27, "word": " even", "probability": 0.8251953125}, {"start": 1477.27, "end": 1477.43, "word": " if", "probability": 0.94921875}, {"start": 1477.43, "end": 1477.59, "word": " the", "probability": 0.91357421875}, {"start": 1477.59, "end": 1477.85, "word": " sample", "probability": 0.8818359375}, {"start": 1477.85, "end": 1478.17, "word": " size", "probability": 0.86767578125}, {"start": 1478.17, "end": 1478.35, "word": " is", "probability": 0.9375}, {"start": 1478.35, "end": 1478.81, "word": " large,", "probability": 0.9697265625}, {"start": 1479.39, "end": 1479.79, "word": " maybe", "probability": 0.9443359375}, {"start": 1479.79, "end": 1480.05, "word": " one", "probability": 0.91943359375}, {"start": 1480.05, "end": 1480.19, "word": " of", "probability": 0.9677734375}, {"start": 1480.19, "end": 1480.31, "word": " the", "probability": 0.923828125}, {"start": 1480.31, "end": 1480.89, "word": " conditions", "probability": 0.89501953125}, {"start": 1480.89, "end": 1481.45, "word": " is", "probability": 0.93505859375}, {"start": 1481.45, "end": 1481.65, "word": " not", "probability": 0.91357421875}, {"start": 1481.65, "end": 1482.07, "word": " satisfied", "probability": 0.8603515625}, {"start": 1482.07, "end": 1482.47, "word": " because", "probability": 0.65771484375}, {"start": 1482.47, "end": 1482.69, "word": " you", "probability": 0.95458984375}, {"start": 1482.69, "end": 1482.89, "word": " have", "probability": 0.9423828125}, {"start": 1482.89, "end": 1483.03, "word": " a", "probability": 0.884765625}, {"start": 1483.03, "end": 1483.43, "word": " small", "probability": 0.92919921875}, {"start": 1483.43, "end": 1484.93, "word": " true", "probability": 0.77978515625}, {"start": 1484.93, "end": 1485.31, "word": " proportion.", "probability": 0.8583984375}, {"start": 1486.27, "end": 1486.49, "word": " So", "probability": 0.95751953125}, {"start": 1486.49, "end": 1486.71, "word": " if", "probability": 0.89208984375}, {"start": 1486.71, "end": 1486.91, "word": " the", "probability": 0.91162109375}, {"start": 1486.91, "end": 1487.13, "word": " true", "probability": 0.94580078125}, {"start": 1487.13, "end": 1487.45, "word": " proportion", "probability": 0.89013671875}, {"start": 1487.45, "end": 1487.69, "word": " is", "probability": 0.89404296875}, {"start": 1487.69, "end": 1487.95, "word": " very,", "probability": 0.857421875}, {"start": 1488.01, "end": 1488.19, "word": " very", "probability": 0.84716796875}, {"start": 1488.19, "end": 1488.67, "word": " small,", "probability": 0.91943359375}, {"start": 1489.09, "end": 1489.27, "word": " in", "probability": 0.87255859375}, {"start": 1489.27, "end": 1489.47, "word": " this", "probability": 0.94287109375}, {"start": 1489.47, "end": 1489.75, "word": " case", "probability": 0.91845703125}, {"start": 1489.75, "end": 1489.91, "word": " you", "probability": 0.5341796875}, {"start": 1489.91, "end": 1490.11, "word": " have", "probability": 0.9375}, {"start": 1490.11, "end": 1490.23, "word": " to", "probability": 0.9697265625}, {"start": 1490.23, "end": 1490.81, "word": " increase", "probability": 0.86767578125}, {"start": 1490.81, "end": 1491.25, "word": " your", "probability": 0.87841796875}, {"start": 1491.25, "end": 1492.25, "word": " sample", "probability": 0.87890625}, {"start": 1492.25, "end": 1492.69, "word": " size.", "probability": 0.8466796875}, {"start": 1493.29, "end": 1493.75, "word": " Make", "probability": 0.81884765625}, {"start": 1493.75, "end": 1494.11, "word": " sense?", "probability": 0.8466796875}, {"start": 1495.41, "end": 1495.93, "word": " Any", "probability": 0.92236328125}, {"start": 1495.93, "end": 1496.27, "word": " question?", "probability": 0.6357421875}], "temperature": 1.0}, {"id": 54, "seek": 151777, "start": 1498.95, "end": 1517.77, "text": " Now I will discuss some practice problems for chapter 7. Let's do some practice problems for chapter 7. I will give some", "tokens": [823, 286, 486, 2248, 512, 3124, 2740, 337, 7187, 1614, 13, 961, 311, 360, 512, 3124, 2740, 337, 7187, 1614, 13, 286, 486, 976, 512], "avg_logprob": -0.2070312459881489, "compression_ratio": 1.475609756097561, "no_speech_prob": 0.0, "words": [{"start": 1498.95, "end": 1500.01, "word": " Now", "probability": 0.77001953125}, {"start": 1500.01, "end": 1500.55, "word": " I", "probability": 0.69921875}, {"start": 1500.55, "end": 1500.77, "word": " will", "probability": 0.890625}, {"start": 1500.77, "end": 1501.23, "word": " discuss", "probability": 0.90673828125}, {"start": 1501.23, "end": 1501.69, "word": " some", "probability": 0.8984375}, {"start": 1501.69, "end": 1502.81, "word": " practice", "probability": 0.90966796875}, {"start": 1502.81, "end": 1503.47, "word": " problems", "probability": 0.82568359375}, {"start": 1503.47, "end": 1505.73, "word": " for", "probability": 0.7802734375}, {"start": 1505.73, "end": 1506.07, "word": " chapter", "probability": 0.58544921875}, {"start": 1506.07, "end": 1507.31, "word": " 7.", "probability": 0.71533203125}, {"start": 1509.45, "end": 1510.21, "word": " Let's", "probability": 0.87646484375}, {"start": 1510.21, "end": 1510.39, "word": " do", "probability": 0.93701171875}, {"start": 1510.39, "end": 1510.87, "word": " some", "probability": 0.91015625}, {"start": 1510.87, "end": 1511.83, "word": " practice", "probability": 0.93896484375}, {"start": 1511.83, "end": 1513.87, "word": " problems", "probability": 0.84130859375}, {"start": 1513.87, "end": 1514.25, "word": " for", "probability": 0.93212890625}, {"start": 1514.25, "end": 1514.49, "word": " chapter", "probability": 0.80419921875}, {"start": 1514.49, "end": 1515.01, "word": " 7.", "probability": 0.9794921875}, {"start": 1515.91, "end": 1516.13, "word": " I", "probability": 0.98193359375}, {"start": 1516.13, "end": 1516.27, "word": " will", "probability": 0.8916015625}, {"start": 1516.27, "end": 1516.71, "word": " give", "probability": 0.783203125}, {"start": 1516.71, "end": 1517.77, "word": " some", "probability": 0.87744140625}], "temperature": 1.0}, {"id": 55, "seek": 155125, "start": 1528.71, "end": 1551.25, "text": " We have four choices. Parameters. Now, same distribution always describes the same distribution of statistics. So, not parameters, we have to choose the statistics. Same distribution describes distribution of always statistics. Next.", "tokens": [492, 362, 1451, 7994, 13, 34882, 6202, 13, 823, 11, 912, 7316, 1009, 15626, 264, 912, 7316, 295, 12523, 13, 407, 11, 406, 9834, 11, 321, 362, 281, 2826, 264, 12523, 13, 10635, 7316, 15626, 7316, 295, 1009, 12523, 13, 3087, 13], "avg_logprob": -0.3670058111811793, "compression_ratio": 1.828125, "no_speech_prob": 0.0, "words": [{"start": 1528.71, "end": 1529.75, "word": " We", "probability": 0.2308349609375}, {"start": 1529.75, "end": 1529.99, "word": " have", "probability": 0.931640625}, {"start": 1529.99, "end": 1530.21, "word": " four", "probability": 0.78466796875}, {"start": 1530.21, "end": 1530.83, "word": " choices.", "probability": 0.93603515625}, {"start": 1531.65, "end": 1532.29, "word": " Parameters.", "probability": 0.88232421875}, {"start": 1533.49, "end": 1533.67, "word": " Now,", "probability": 0.81201171875}, {"start": 1534.01, "end": 1534.15, "word": " same", "probability": 0.269775390625}, {"start": 1534.15, "end": 1534.69, "word": " distribution", "probability": 0.84423828125}, {"start": 1534.69, "end": 1535.29, "word": " always", "probability": 0.83349609375}, {"start": 1535.29, "end": 1535.91, "word": " describes", "probability": 0.371826171875}, {"start": 1535.91, "end": 1536.15, "word": " the", "probability": 0.78564453125}, {"start": 1536.15, "end": 1536.39, "word": " same", "probability": 0.8818359375}, {"start": 1536.39, "end": 1536.91, "word": " distribution", "probability": 0.84716796875}, {"start": 1536.91, "end": 1537.11, "word": " of", "probability": 0.94384765625}, {"start": 1537.11, "end": 1537.71, "word": " statistics.", "probability": 0.64599609375}, {"start": 1539.91, "end": 1540.11, "word": " So,", "probability": 0.68359375}, {"start": 1541.39, "end": 1541.57, "word": " not", "probability": 0.82568359375}, {"start": 1541.57, "end": 1542.07, "word": " parameters,", "probability": 0.94775390625}, {"start": 1542.33, "end": 1542.35, "word": " we", "probability": 0.81103515625}, {"start": 1542.35, "end": 1542.51, "word": " have", "probability": 0.875}, {"start": 1542.51, "end": 1542.67, "word": " to", "probability": 0.962890625}, {"start": 1542.67, "end": 1543.05, "word": " choose", "probability": 0.88916015625}, {"start": 1543.05, "end": 1543.71, "word": " the", "probability": 0.50244140625}, {"start": 1543.71, "end": 1544.35, "word": " statistics.", "probability": 0.916015625}, {"start": 1544.95, "end": 1545.31, "word": " Same", "probability": 0.35546875}, {"start": 1545.31, "end": 1545.97, "word": " distribution", "probability": 0.65869140625}, {"start": 1545.97, "end": 1546.69, "word": " describes", "probability": 0.60205078125}, {"start": 1546.69, "end": 1547.27, "word": " distribution", "probability": 0.58349609375}, {"start": 1547.27, "end": 1547.73, "word": " of", "probability": 0.92626953125}, {"start": 1547.73, "end": 1548.95, "word": " always", "probability": 0.6689453125}, {"start": 1548.95, "end": 1549.65, "word": " statistics.", "probability": 0.90771484375}, {"start": 1550.85, "end": 1551.25, "word": " Next.", "probability": 0.9248046875}], "temperature": 1.0}, {"id": 56, "seek": 158055, "start": 1553.49, "end": 1580.55, "text": " The central limit theorem is important in statistics because we have four choices. Let's see why C is correct. Part A says, for a large N, it says that the population is approximately normal. We cannot say the population is normal. The standard distribution of a statistic is approximately normal.", "tokens": [440, 5777, 4948, 20904, 307, 1021, 294, 12523, 570, 321, 362, 1451, 7994, 13, 961, 311, 536, 983, 383, 307, 3006, 13, 4100, 316, 1619, 11, 337, 257, 2416, 426, 11, 309, 1619, 300, 264, 4415, 307, 10447, 2710, 13, 492, 2644, 584, 264, 4415, 307, 2710, 13, 440, 3832, 7316, 295, 257, 29588, 307, 10447, 2710, 13], "avg_logprob": -0.21994174334962488, "compression_ratio": 1.6195652173913044, "no_speech_prob": 0.0, "words": [{"start": 1553.49, "end": 1554.29, "word": " The", "probability": 0.626953125}, {"start": 1554.29, "end": 1554.79, "word": " central", "probability": 0.60205078125}, {"start": 1554.79, "end": 1555.09, "word": " limit", "probability": 0.9287109375}, {"start": 1555.09, "end": 1555.53, "word": " theorem", "probability": 0.86181640625}, {"start": 1555.53, "end": 1556.13, "word": " is", "probability": 0.921875}, {"start": 1556.13, "end": 1556.59, "word": " important", "probability": 0.8681640625}, {"start": 1556.59, "end": 1556.89, "word": " in", "probability": 0.92919921875}, {"start": 1556.89, "end": 1557.47, "word": " statistics", "probability": 0.8408203125}, {"start": 1557.47, "end": 1558.27, "word": " because", "probability": 0.79248046875}, {"start": 1558.27, "end": 1559.07, "word": " we", "probability": 0.830078125}, {"start": 1559.07, "end": 1559.23, "word": " have", "probability": 0.9482421875}, {"start": 1559.23, "end": 1559.47, "word": " four", "probability": 0.72705078125}, {"start": 1559.47, "end": 1559.99, "word": " choices.", "probability": 0.9326171875}, {"start": 1560.79, "end": 1561.01, "word": " Let's", "probability": 0.890625}, {"start": 1561.01, "end": 1561.31, "word": " see", "probability": 0.91064453125}, {"start": 1561.31, "end": 1562.27, "word": " why", "probability": 0.8330078125}, {"start": 1562.27, "end": 1563.49, "word": " C", "probability": 0.40966796875}, {"start": 1563.49, "end": 1563.71, "word": " is", "probability": 0.94921875}, {"start": 1563.71, "end": 1564.15, "word": " correct.", "probability": 0.896484375}, {"start": 1565.29, "end": 1565.57, "word": " Part", "probability": 0.8798828125}, {"start": 1565.57, "end": 1565.81, "word": " A", "probability": 0.6005859375}, {"start": 1565.81, "end": 1566.19, "word": " says,", "probability": 0.87744140625}, {"start": 1566.47, "end": 1566.69, "word": " for", "probability": 0.87548828125}, {"start": 1566.69, "end": 1566.83, "word": " a", "probability": 0.89013671875}, {"start": 1566.83, "end": 1567.09, "word": " large", "probability": 0.95947265625}, {"start": 1567.09, "end": 1567.37, "word": " N,", "probability": 0.79345703125}, {"start": 1567.67, "end": 1567.81, "word": " it", "probability": 0.7265625}, {"start": 1567.81, "end": 1568.11, "word": " says", "probability": 0.89013671875}, {"start": 1568.11, "end": 1568.51, "word": " that", "probability": 0.91943359375}, {"start": 1568.51, "end": 1569.59, "word": " the", "probability": 0.84423828125}, {"start": 1569.59, "end": 1570.03, "word": " population", "probability": 0.962890625}, {"start": 1570.03, "end": 1570.35, "word": " is", "probability": 0.9482421875}, {"start": 1570.35, "end": 1571.07, "word": " approximately", "probability": 0.87548828125}, {"start": 1571.07, "end": 1571.55, "word": " normal.", "probability": 0.85986328125}, {"start": 1572.71, "end": 1573.07, "word": " We", "probability": 0.9443359375}, {"start": 1573.07, "end": 1573.33, "word": " cannot", "probability": 0.8603515625}, {"start": 1573.33, "end": 1573.71, "word": " say", "probability": 0.95166015625}, {"start": 1573.71, "end": 1574.23, "word": " the", "probability": 0.765625}, {"start": 1574.23, "end": 1574.65, "word": " population", "probability": 0.9228515625}, {"start": 1574.65, "end": 1574.89, "word": " is", "probability": 0.95458984375}, {"start": 1574.89, "end": 1575.27, "word": " normal.", "probability": 0.86328125}, {"start": 1576.05, "end": 1576.21, "word": " The", "probability": 0.892578125}, {"start": 1576.21, "end": 1576.49, "word": " standard", "probability": 0.07855224609375}, {"start": 1576.49, "end": 1577.27, "word": " distribution", "probability": 0.85546875}, {"start": 1577.27, "end": 1577.95, "word": " of", "probability": 0.9091796875}, {"start": 1577.95, "end": 1578.15, "word": " a", "probability": 0.9296875}, {"start": 1578.15, "end": 1578.61, "word": " statistic", "probability": 0.892578125}, {"start": 1578.61, "end": 1579.51, "word": " is", "probability": 0.9501953125}, {"start": 1579.51, "end": 1580.17, "word": " approximately", "probability": 0.87939453125}, {"start": 1580.17, "end": 1580.55, "word": " normal.", "probability": 0.8662109375}], "temperature": 1.0}, {"id": 57, "seek": 160651, "start": 1581.49, "end": 1606.51, "text": " So, for this reason, part A is incorrect. For the other one, for any population, so regardless of the population, it says the sample distribution, the sample mean is approximately normal regardless of the sample size. This is incorrect because it says for any population.", "tokens": [407, 11, 337, 341, 1778, 11, 644, 316, 307, 18424, 13, 1171, 264, 661, 472, 11, 337, 604, 4415, 11, 370, 10060, 295, 264, 4415, 11, 309, 1619, 264, 6889, 7316, 11, 264, 6889, 914, 307, 10447, 2710, 10060, 295, 264, 6889, 2744, 13, 639, 307, 18424, 570, 309, 1619, 337, 604, 4415, 13], "avg_logprob": -0.2555397781458768, "compression_ratio": 1.7662337662337662, "no_speech_prob": 0.0, "words": [{"start": 1581.49, "end": 1581.79, "word": " So,", "probability": 0.60302734375}, {"start": 1581.99, "end": 1582.35, "word": " for", "probability": 0.9013671875}, {"start": 1582.35, "end": 1582.61, "word": " this", "probability": 0.9208984375}, {"start": 1582.61, "end": 1582.99, "word": " reason,", "probability": 0.96435546875}, {"start": 1583.69, "end": 1584.15, "word": " part", "probability": 0.49267578125}, {"start": 1584.15, "end": 1584.41, "word": " A", "probability": 0.51025390625}, {"start": 1584.41, "end": 1584.57, "word": " is", "probability": 0.923828125}, {"start": 1584.57, "end": 1584.99, "word": " incorrect.", "probability": 0.8779296875}, {"start": 1586.25, "end": 1586.61, "word": " For", "probability": 0.85205078125}, {"start": 1586.61, "end": 1586.73, "word": " the", "probability": 0.74951171875}, {"start": 1586.73, "end": 1586.93, "word": " other", "probability": 0.88427734375}, {"start": 1586.93, "end": 1587.25, "word": " one,", "probability": 0.9091796875}, {"start": 1588.03, "end": 1588.23, "word": " for", "probability": 0.8056640625}, {"start": 1588.23, "end": 1588.49, "word": " any", "probability": 0.89501953125}, {"start": 1588.49, "end": 1589.11, "word": " population,", "probability": 0.9384765625}, {"start": 1591.35, "end": 1591.77, "word": " so", "probability": 0.2373046875}, {"start": 1591.77, "end": 1592.27, "word": " regardless", "probability": 0.84326171875}, {"start": 1592.27, "end": 1592.95, "word": " of", "probability": 0.96728515625}, {"start": 1592.95, "end": 1593.09, "word": " the", "probability": 0.8173828125}, {"start": 1593.09, "end": 1593.55, "word": " population,", "probability": 0.92919921875}, {"start": 1594.57, "end": 1594.69, "word": " it", "probability": 0.8994140625}, {"start": 1594.69, "end": 1595.13, "word": " says", "probability": 0.87158203125}, {"start": 1595.13, "end": 1595.95, "word": " the", "probability": 0.466552734375}, {"start": 1595.95, "end": 1596.27, "word": " sample", "probability": 0.405029296875}, {"start": 1596.27, "end": 1596.93, "word": " distribution,", "probability": 0.8486328125}, {"start": 1597.13, "end": 1597.27, "word": " the", "probability": 0.8173828125}, {"start": 1597.27, "end": 1597.59, "word": " sample", "probability": 0.8818359375}, {"start": 1597.59, "end": 1597.95, "word": " mean", "probability": 0.89990234375}, {"start": 1597.95, "end": 1598.95, "word": " is", "probability": 0.66259765625}, {"start": 1598.95, "end": 1599.89, "word": " approximately", "probability": 0.89892578125}, {"start": 1599.89, "end": 1600.43, "word": " normal", "probability": 0.83251953125}, {"start": 1600.43, "end": 1601.77, "word": " regardless", "probability": 0.818359375}, {"start": 1601.77, "end": 1602.41, "word": " of", "probability": 0.966796875}, {"start": 1602.41, "end": 1602.53, "word": " the", "probability": 0.51611328125}, {"start": 1602.53, "end": 1602.73, "word": " sample", "probability": 0.9091796875}, {"start": 1602.73, "end": 1603.01, "word": " size.", "probability": 0.806640625}, {"start": 1603.41, "end": 1603.65, "word": " This", "probability": 0.6044921875}, {"start": 1603.65, "end": 1603.73, "word": " is", "probability": 0.94091796875}, {"start": 1603.73, "end": 1604.11, "word": " incorrect", "probability": 0.94189453125}, {"start": 1604.11, "end": 1604.61, "word": " because", "probability": 0.77392578125}, {"start": 1604.61, "end": 1604.85, "word": " it", "probability": 0.9384765625}, {"start": 1604.85, "end": 1605.19, "word": " says", "probability": 0.85595703125}, {"start": 1605.19, "end": 1605.51, "word": " for", "probability": 0.61181640625}, {"start": 1605.51, "end": 1605.83, "word": " any", "probability": 0.875}, {"start": 1605.83, "end": 1606.51, "word": " population.", "probability": 0.9033203125}], "temperature": 1.0}, {"id": 58, "seek": 162274, "start": 1607.78, "end": 1622.74, "text": " So P is incorrect. But if it says for normal population, then we can say the sampling distribution of the sample mean is approximately normal regardless of the sample size.", "tokens": [407, 430, 307, 18424, 13, 583, 498, 309, 1619, 337, 2710, 4415, 11, 550, 321, 393, 584, 264, 21179, 7316, 295, 264, 6889, 914, 307, 10447, 2710, 10060, 295, 264, 6889, 2744, 13], "avg_logprob": -0.25804228642407584, "compression_ratio": 1.3951612903225807, "no_speech_prob": 0.0, "words": [{"start": 1607.78, "end": 1608.14, "word": " So", "probability": 0.7099609375}, {"start": 1608.14, "end": 1608.46, "word": " P", "probability": 0.2374267578125}, {"start": 1608.46, "end": 1608.68, "word": " is", "probability": 0.94140625}, {"start": 1608.68, "end": 1610.26, "word": " incorrect.", "probability": 0.9365234375}, {"start": 1611.22, "end": 1611.98, "word": " But", "probability": 0.73193359375}, {"start": 1611.98, "end": 1612.22, "word": " if", "probability": 0.9208984375}, {"start": 1612.22, "end": 1612.88, "word": " it", "probability": 0.9228515625}, {"start": 1612.88, "end": 1613.3, "word": " says", "probability": 0.85546875}, {"start": 1613.3, "end": 1613.94, "word": " for", "probability": 0.81982421875}, {"start": 1613.94, "end": 1614.62, "word": " normal", "probability": 0.88134765625}, {"start": 1614.62, "end": 1615.14, "word": " population,", "probability": 0.93017578125}, {"start": 1616.1, "end": 1616.4, "word": " then", "probability": 0.85693359375}, {"start": 1616.4, "end": 1616.58, "word": " we", "probability": 0.939453125}, {"start": 1616.58, "end": 1616.86, "word": " can", "probability": 0.9462890625}, {"start": 1616.86, "end": 1617.26, "word": " say", "probability": 0.939453125}, {"start": 1617.26, "end": 1617.7, "word": " the", "probability": 0.81640625}, {"start": 1617.7, "end": 1618.18, "word": " sampling", "probability": 0.5732421875}, {"start": 1618.18, "end": 1618.8, "word": " distribution", "probability": 0.87646484375}, {"start": 1618.8, "end": 1619.06, "word": " of", "probability": 0.4375}, {"start": 1619.06, "end": 1619.24, "word": " the", "probability": 0.84912109375}, {"start": 1619.24, "end": 1619.52, "word": " sample", "probability": 0.79736328125}, {"start": 1619.52, "end": 1619.84, "word": " mean", "probability": 0.64208984375}, {"start": 1619.84, "end": 1620.36, "word": " is", "probability": 0.9326171875}, {"start": 1620.36, "end": 1620.92, "word": " approximately", "probability": 0.8642578125}, {"start": 1620.92, "end": 1621.34, "word": " normal", "probability": 0.8720703125}, {"start": 1621.34, "end": 1621.84, "word": " regardless", "probability": 0.347412109375}, {"start": 1621.84, "end": 1622.0, "word": " of", "probability": 0.92822265625}, {"start": 1622.0, "end": 1622.16, "word": " the", "probability": 0.76513671875}, {"start": 1622.16, "end": 1622.38, "word": " sample", "probability": 0.8388671875}, {"start": 1622.38, "end": 1622.74, "word": " size.", "probability": 0.81298828125}], "temperature": 1.0}, {"id": 59, "seek": 164734, "start": 1623.88, "end": 1647.34, "text": " But it says for any, so that means incorrect. Now part D, for example, for any size sample, it means regardless of the sample size, the theorem says the sample distribution of the sample mean is approximately normal. That's incorrect. Part C, for large N, it says the sample distribution of the sample mean is approximately", "tokens": [583, 309, 1619, 337, 604, 11, 370, 300, 1355, 18424, 13, 823, 644, 413, 11, 337, 1365, 11, 337, 604, 2744, 6889, 11, 309, 1355, 10060, 295, 264, 6889, 2744, 11, 264, 20904, 1619, 264, 6889, 7316, 295, 264, 6889, 914, 307, 10447, 2710, 13, 663, 311, 18424, 13, 4100, 383, 11, 337, 2416, 426, 11, 309, 1619, 264, 6889, 7316, 295, 264, 6889, 914, 307, 10447], "avg_logprob": -0.22001379311961286, "compression_ratio": 1.9877300613496933, "no_speech_prob": 0.0, "words": [{"start": 1623.88, "end": 1624.22, "word": " But", "probability": 0.447509765625}, {"start": 1624.22, "end": 1624.52, "word": " it", "probability": 0.8896484375}, {"start": 1624.52, "end": 1624.82, "word": " says", "probability": 0.861328125}, {"start": 1624.82, "end": 1625.1, "word": " for", "probability": 0.77392578125}, {"start": 1625.1, "end": 1625.4, "word": " any,", "probability": 0.8349609375}, {"start": 1625.78, "end": 1626.12, "word": " so", "probability": 0.92822265625}, {"start": 1626.12, "end": 1626.32, "word": " that", "probability": 0.92724609375}, {"start": 1626.32, "end": 1626.7, "word": " means", "probability": 0.923828125}, {"start": 1626.7, "end": 1627.3, "word": " incorrect.", "probability": 0.884765625}, {"start": 1627.96, "end": 1628.22, "word": " Now", "probability": 0.91162109375}, {"start": 1628.22, "end": 1628.84, "word": " part", "probability": 0.296630859375}, {"start": 1628.84, "end": 1629.1, "word": " D,", "probability": 0.5927734375}, {"start": 1629.2, "end": 1629.32, "word": " for", "probability": 0.94775390625}, {"start": 1629.32, "end": 1629.72, "word": " example,", "probability": 0.95654296875}, {"start": 1629.88, "end": 1630.08, "word": " for", "probability": 0.93994140625}, {"start": 1630.08, "end": 1630.42, "word": " any", "probability": 0.90625}, {"start": 1630.42, "end": 1630.9, "word": " size", "probability": 0.76953125}, {"start": 1630.9, "end": 1631.4, "word": " sample,", "probability": 0.58642578125}, {"start": 1632.02, "end": 1632.28, "word": " it", "probability": 0.461669921875}, {"start": 1632.28, "end": 1632.56, "word": " means", "probability": 0.93310546875}, {"start": 1632.56, "end": 1633.5, "word": " regardless", "probability": 0.8671875}, {"start": 1633.5, "end": 1633.76, "word": " of", "probability": 0.95654296875}, {"start": 1633.76, "end": 1633.92, "word": " the", "probability": 0.88134765625}, {"start": 1633.92, "end": 1634.16, "word": " sample", "probability": 0.8720703125}, {"start": 1634.16, "end": 1634.6, "word": " size,", "probability": 0.83447265625}, {"start": 1635.44, "end": 1635.74, "word": " the", "probability": 0.8828125}, {"start": 1635.74, "end": 1636.22, "word": " theorem", "probability": 0.76513671875}, {"start": 1636.22, "end": 1636.72, "word": " says", "probability": 0.888671875}, {"start": 1636.72, "end": 1637.32, "word": " the", "probability": 0.69482421875}, {"start": 1637.32, "end": 1637.48, "word": " sample", "probability": 0.47412109375}, {"start": 1637.48, "end": 1638.16, "word": " distribution", "probability": 0.85107421875}, {"start": 1638.16, "end": 1638.6, "word": " of", "probability": 0.9658203125}, {"start": 1638.6, "end": 1638.76, "word": " the", "probability": 0.91748046875}, {"start": 1638.76, "end": 1639.02, "word": " sample", "probability": 0.84130859375}, {"start": 1639.02, "end": 1639.22, "word": " mean", "probability": 0.8388671875}, {"start": 1639.22, "end": 1639.36, "word": " is", "probability": 0.9501953125}, {"start": 1639.36, "end": 1639.84, "word": " approximately", "probability": 0.86083984375}, {"start": 1639.84, "end": 1640.24, "word": " normal.", "probability": 0.849609375}, {"start": 1640.98, "end": 1641.28, "word": " That's", "probability": 0.94189453125}, {"start": 1641.28, "end": 1641.66, "word": " incorrect.", "probability": 0.93505859375}, {"start": 1642.54, "end": 1642.8, "word": " Part", "probability": 0.888671875}, {"start": 1642.8, "end": 1643.08, "word": " C,", "probability": 0.97119140625}, {"start": 1643.34, "end": 1643.58, "word": " for", "probability": 0.95166015625}, {"start": 1643.58, "end": 1643.86, "word": " large", "probability": 0.94091796875}, {"start": 1643.86, "end": 1644.1, "word": " N,", "probability": 0.84228515625}, {"start": 1644.3, "end": 1644.52, "word": " it", "probability": 0.92138671875}, {"start": 1644.52, "end": 1644.82, "word": " says", "probability": 0.8876953125}, {"start": 1644.82, "end": 1645.12, "word": " the", "probability": 0.748046875}, {"start": 1645.12, "end": 1645.32, "word": " sample", "probability": 0.73876953125}, {"start": 1645.32, "end": 1645.94, "word": " distribution", "probability": 0.861328125}, {"start": 1645.94, "end": 1646.16, "word": " of", "probability": 0.95947265625}, {"start": 1646.16, "end": 1646.28, "word": " the", "probability": 0.912109375}, {"start": 1646.28, "end": 1646.48, "word": " sample", "probability": 0.87744140625}, {"start": 1646.48, "end": 1646.66, "word": " mean", "probability": 0.9384765625}, {"start": 1646.66, "end": 1646.82, "word": " is", "probability": 0.95068359375}, {"start": 1646.82, "end": 1647.34, "word": " approximately", "probability": 0.86962890625}], "temperature": 1.0}, {"id": 60, "seek": 167513, "start": 1648.99, "end": 1675.13, "text": " Normal, regardless of the shape of the distribution, that's true, because here we have large sample size. So regardless of the population, the shape of the population, we can say that the sample distribution of the sample mean is approximately normally distributed. Number three. Which of the following statements about the sample distribution of the sample mean is incorrect?", "tokens": [21277, 11, 10060, 295, 264, 3909, 295, 264, 7316, 11, 300, 311, 2074, 11, 570, 510, 321, 362, 2416, 6889, 2744, 13, 407, 10060, 295, 264, 4415, 11, 264, 3909, 295, 264, 4415, 11, 321, 393, 584, 300, 264, 6889, 7316, 295, 264, 6889, 914, 307, 10447, 5646, 12631, 13, 5118, 1045, 13, 3013, 295, 264, 3480, 12363, 466, 264, 6889, 7316, 295, 264, 6889, 914, 307, 18424, 30], "avg_logprob": -0.19084820917674472, "compression_ratio": 1.9635416666666667, "no_speech_prob": 0.0, "words": [{"start": 1648.9899999999998, "end": 1649.6299999999999, "word": " Normal,", "probability": 0.42236328125}, {"start": 1649.6299999999999, "end": 1650.27, "word": " regardless", "probability": 0.8779296875}, {"start": 1650.27, "end": 1650.53, "word": " of", "probability": 0.9560546875}, {"start": 1650.53, "end": 1650.73, "word": " the", "probability": 0.89453125}, {"start": 1650.73, "end": 1650.97, "word": " shape", "probability": 0.90283203125}, {"start": 1650.97, "end": 1651.11, "word": " of", "probability": 0.9482421875}, {"start": 1651.11, "end": 1651.21, "word": " the", "probability": 0.5986328125}, {"start": 1651.21, "end": 1651.65, "word": " distribution,", "probability": 0.8369140625}, {"start": 1651.95, "end": 1652.17, "word": " that's", "probability": 0.830810546875}, {"start": 1652.17, "end": 1652.61, "word": " true,", "probability": 0.94873046875}, {"start": 1653.01, "end": 1653.45, "word": " because", "probability": 0.88671875}, {"start": 1653.45, "end": 1654.13, "word": " here", "probability": 0.81298828125}, {"start": 1654.13, "end": 1654.31, "word": " we", "probability": 0.89794921875}, {"start": 1654.31, "end": 1654.59, "word": " have", "probability": 0.94384765625}, {"start": 1654.59, "end": 1655.05, "word": " large", "probability": 0.81689453125}, {"start": 1655.05, "end": 1655.41, "word": " sample", "probability": 0.8837890625}, {"start": 1655.41, "end": 1655.89, "word": " size.", "probability": 0.7119140625}, {"start": 1656.05, "end": 1656.19, "word": " So", "probability": 0.89697265625}, {"start": 1656.19, "end": 1656.65, "word": " regardless", "probability": 0.703125}, {"start": 1656.65, "end": 1657.03, "word": " of", "probability": 0.96630859375}, {"start": 1657.03, "end": 1657.17, "word": " the", "probability": 0.8388671875}, {"start": 1657.17, "end": 1657.57, "word": " population,", "probability": 0.97998046875}, {"start": 1658.25, "end": 1658.43, "word": " the", "probability": 0.85986328125}, {"start": 1658.43, "end": 1658.69, "word": " shape", "probability": 0.8955078125}, {"start": 1658.69, "end": 1658.85, "word": " of", "probability": 0.96240234375}, {"start": 1658.85, "end": 1658.97, "word": " the", "probability": 0.84130859375}, {"start": 1658.97, "end": 1659.27, "word": " population,", "probability": 0.94970703125}, {"start": 1659.47, "end": 1659.51, "word": " we", "probability": 0.791015625}, {"start": 1659.51, "end": 1659.69, "word": " can", "probability": 0.9462890625}, {"start": 1659.69, "end": 1659.91, "word": " say", "probability": 0.9423828125}, {"start": 1659.91, "end": 1660.31, "word": " that", "probability": 0.93310546875}, {"start": 1660.31, "end": 1661.39, "word": " the", "probability": 0.79052734375}, {"start": 1661.39, "end": 1661.67, "word": " sample", "probability": 0.417724609375}, {"start": 1661.67, "end": 1662.27, "word": " distribution", "probability": 0.88671875}, {"start": 1662.27, "end": 1662.47, "word": " of", "probability": 0.720703125}, {"start": 1662.47, "end": 1662.63, "word": " the", "probability": 0.8876953125}, {"start": 1662.63, "end": 1662.83, "word": " sample", "probability": 0.873046875}, {"start": 1662.83, "end": 1663.07, "word": " mean", "probability": 0.89404296875}, {"start": 1663.07, "end": 1663.55, "word": " is", "probability": 0.94189453125}, {"start": 1663.55, "end": 1664.29, "word": " approximately", "probability": 0.88671875}, {"start": 1664.29, "end": 1665.19, "word": " normally", "probability": 0.8046875}, {"start": 1665.19, "end": 1666.05, "word": " distributed.", "probability": 0.91064453125}, {"start": 1667.45, "end": 1668.09, "word": " Number", "probability": 0.75341796875}, {"start": 1668.09, "end": 1668.47, "word": " three.", "probability": 0.69482421875}, {"start": 1670.23, "end": 1670.63, "word": " Which", "probability": 0.7890625}, {"start": 1670.63, "end": 1670.77, "word": " of", "probability": 0.966796875}, {"start": 1670.77, "end": 1670.91, "word": " the", "probability": 0.9169921875}, {"start": 1670.91, "end": 1671.25, "word": " following", "probability": 0.87060546875}, {"start": 1671.25, "end": 1671.97, "word": " statements", "probability": 0.82373046875}, {"start": 1671.97, "end": 1672.37, "word": " about", "probability": 0.9091796875}, {"start": 1672.37, "end": 1672.61, "word": " the", "probability": 0.9150390625}, {"start": 1672.61, "end": 1672.83, "word": " sample", "probability": 0.85546875}, {"start": 1672.83, "end": 1673.49, "word": " distribution", "probability": 0.8720703125}, {"start": 1673.49, "end": 1673.85, "word": " of", "probability": 0.95361328125}, {"start": 1673.85, "end": 1674.03, "word": " the", "probability": 0.90576171875}, {"start": 1674.03, "end": 1674.27, "word": " sample", "probability": 0.90283203125}, {"start": 1674.27, "end": 1674.55, "word": " mean", "probability": 0.9501953125}, {"start": 1674.55, "end": 1674.75, "word": " is", "probability": 0.93798828125}, {"start": 1674.75, "end": 1675.13, "word": " incorrect?", "probability": 0.9375}], "temperature": 1.0}, {"id": 61, "seek": 170494, "start": 1676.7, "end": 1704.94, "text": " Here we are looking for the incorrect statement. Look at A, the sample distribution of the sample mean is approximately normal whenever the sample size is sufficiently large. This is correct. B, the sample distribution of the sample mean is generated by repeatedly taking samples of size N and computing the sample means. That's also correct.", "tokens": [1692, 321, 366, 1237, 337, 264, 18424, 5629, 13, 2053, 412, 316, 11, 264, 6889, 7316, 295, 264, 6889, 914, 307, 10447, 2710, 5699, 264, 6889, 2744, 307, 31868, 2416, 13, 639, 307, 3006, 13, 363, 11, 264, 6889, 7316, 295, 264, 6889, 914, 307, 10833, 538, 18227, 1940, 10938, 295, 2744, 426, 293, 15866, 264, 6889, 1355, 13, 663, 311, 611, 3006, 13], "avg_logprob": -0.15673076923076923, "compression_ratio": 1.7864583333333333, "no_speech_prob": 0.0, "words": [{"start": 1676.7, "end": 1677.0, "word": " Here", "probability": 0.80908203125}, {"start": 1677.0, "end": 1677.14, "word": " we", "probability": 0.69482421875}, {"start": 1677.14, "end": 1677.34, "word": " are", "probability": 0.9189453125}, {"start": 1677.34, "end": 1677.84, "word": " looking", "probability": 0.90625}, {"start": 1677.84, "end": 1678.2, "word": " for", "probability": 0.9501953125}, {"start": 1678.2, "end": 1678.4, "word": " the", "probability": 0.8837890625}, {"start": 1678.4, "end": 1678.86, "word": " incorrect", "probability": 0.94580078125}, {"start": 1678.86, "end": 1679.48, "word": " statement.", "probability": 0.9111328125}, {"start": 1682.78, "end": 1683.48, "word": " Look", "probability": 0.7998046875}, {"start": 1683.48, "end": 1683.72, "word": " at", "probability": 0.82421875}, {"start": 1683.72, "end": 1684.94, "word": " A,", "probability": 0.425048828125}, {"start": 1685.3, "end": 1685.56, "word": " the", "probability": 0.8125}, {"start": 1685.56, "end": 1685.8, "word": " sample", "probability": 0.408447265625}, {"start": 1685.8, "end": 1686.4, "word": " distribution", "probability": 0.85546875}, {"start": 1686.4, "end": 1686.6, "word": " of", "probability": 0.91455078125}, {"start": 1686.6, "end": 1686.74, "word": " the", "probability": 0.88232421875}, {"start": 1686.74, "end": 1686.94, "word": " sample", "probability": 0.8046875}, {"start": 1686.94, "end": 1687.16, "word": " mean", "probability": 0.82568359375}, {"start": 1687.16, "end": 1687.34, "word": " is", "probability": 0.9501953125}, {"start": 1687.34, "end": 1687.96, "word": " approximately", "probability": 0.8759765625}, {"start": 1687.96, "end": 1688.46, "word": " normal", "probability": 0.8359375}, {"start": 1688.46, "end": 1688.94, "word": " whenever", "probability": 0.90283203125}, {"start": 1688.94, "end": 1690.16, "word": " the", "probability": 0.89892578125}, {"start": 1690.16, "end": 1690.44, "word": " sample", "probability": 0.91455078125}, {"start": 1690.44, "end": 1690.7, "word": " size", "probability": 0.8505859375}, {"start": 1690.7, "end": 1690.88, "word": " is", "probability": 0.8203125}, {"start": 1690.88, "end": 1691.28, "word": " sufficiently", "probability": 0.92041015625}, {"start": 1691.28, "end": 1691.76, "word": " large.", "probability": 0.9658203125}, {"start": 1692.04, "end": 1692.16, "word": " This", "probability": 0.74755859375}, {"start": 1692.16, "end": 1692.26, "word": " is", "probability": 0.95166015625}, {"start": 1692.26, "end": 1692.68, "word": " correct.", "probability": 0.89013671875}, {"start": 1694.06, "end": 1694.62, "word": " B,", "probability": 0.96240234375}, {"start": 1694.74, "end": 1694.92, "word": " the", "probability": 0.88623046875}, {"start": 1694.92, "end": 1695.18, "word": " sample", "probability": 0.890625}, {"start": 1695.18, "end": 1695.84, "word": " distribution", "probability": 0.86962890625}, {"start": 1695.84, "end": 1696.04, "word": " of", "probability": 0.94873046875}, {"start": 1696.04, "end": 1696.18, "word": " the", "probability": 0.90478515625}, {"start": 1696.18, "end": 1696.4, "word": " sample", "probability": 0.8896484375}, {"start": 1696.4, "end": 1696.72, "word": " mean", "probability": 0.93896484375}, {"start": 1696.72, "end": 1696.92, "word": " is", "probability": 0.94580078125}, {"start": 1696.92, "end": 1697.42, "word": " generated", "probability": 0.8583984375}, {"start": 1697.42, "end": 1697.72, "word": " by", "probability": 0.978515625}, {"start": 1697.72, "end": 1698.18, "word": " repeatedly", "probability": 0.97216796875}, {"start": 1698.18, "end": 1698.98, "word": " taking", "probability": 0.89453125}, {"start": 1698.98, "end": 1699.46, "word": " samples", "probability": 0.87548828125}, {"start": 1699.46, "end": 1699.72, "word": " of", "probability": 0.97021484375}, {"start": 1699.72, "end": 1700.0, "word": " size", "probability": 0.83935546875}, {"start": 1700.0, "end": 1700.36, "word": " N", "probability": 0.6025390625}, {"start": 1700.36, "end": 1701.38, "word": " and", "probability": 0.87060546875}, {"start": 1701.38, "end": 1701.86, "word": " computing", "probability": 0.8642578125}, {"start": 1701.86, "end": 1702.24, "word": " the", "probability": 0.90869140625}, {"start": 1702.24, "end": 1702.54, "word": " sample", "probability": 0.8974609375}, {"start": 1702.54, "end": 1702.98, "word": " means.", "probability": 0.84033203125}, {"start": 1703.68, "end": 1704.08, "word": " That's", "probability": 0.773193359375}, {"start": 1704.08, "end": 1704.5, "word": " also", "probability": 0.88427734375}, {"start": 1704.5, "end": 1704.94, "word": " correct.", "probability": 0.900390625}], "temperature": 1.0}, {"id": 62, "seek": 173302, "start": 1706.2, "end": 1733.02, "text": " The sample mean, I'm sorry, the mean of the sample distribution of the sample mean is always equal to mu, that's correct, because we know that the mean of x bar is a mu. Now, the standard deviation of the sampling distribution of the sample mean is equal to sigma. And we, yes, exactly, the standard error, which is sigma of x bar, not sigma, equals sigma", "tokens": [440, 6889, 914, 11, 286, 478, 2597, 11, 264, 914, 295, 264, 6889, 7316, 295, 264, 6889, 914, 307, 1009, 2681, 281, 2992, 11, 300, 311, 3006, 11, 570, 321, 458, 300, 264, 914, 295, 2031, 2159, 307, 257, 2992, 13, 823, 11, 264, 3832, 25163, 295, 264, 21179, 7316, 295, 264, 6889, 914, 307, 2681, 281, 12771, 13, 400, 321, 11, 2086, 11, 2293, 11, 264, 3832, 6713, 11, 597, 307, 12771, 295, 2031, 2159, 11, 406, 12771, 11, 6915, 12771], "avg_logprob": -0.22232680866517215, "compression_ratio": 1.956043956043956, "no_speech_prob": 0.0, "words": [{"start": 1706.2, "end": 1706.46, "word": " The", "probability": 0.53564453125}, {"start": 1706.46, "end": 1706.78, "word": " sample", "probability": 0.366943359375}, {"start": 1706.78, "end": 1707.14, "word": " mean,", "probability": 0.94873046875}, {"start": 1707.46, "end": 1707.7, "word": " I'm", "probability": 0.953857421875}, {"start": 1707.7, "end": 1707.88, "word": " sorry,", "probability": 0.85546875}, {"start": 1708.0, "end": 1708.14, "word": " the", "probability": 0.89453125}, {"start": 1708.14, "end": 1708.32, "word": " mean", "probability": 0.9716796875}, {"start": 1708.32, "end": 1708.44, "word": " of", "probability": 0.96484375}, {"start": 1708.44, "end": 1708.58, "word": " the", "probability": 0.88134765625}, {"start": 1708.58, "end": 1708.78, "word": " sample", "probability": 0.580078125}, {"start": 1708.78, "end": 1709.38, "word": " distribution", "probability": 0.8271484375}, {"start": 1709.38, "end": 1709.58, "word": " of", "probability": 0.728515625}, {"start": 1709.58, "end": 1709.74, "word": " the", "probability": 0.91064453125}, {"start": 1709.74, "end": 1710.0, "word": " sample", "probability": 0.875}, {"start": 1710.0, "end": 1710.3, "word": " mean", "probability": 0.865234375}, {"start": 1710.3, "end": 1710.46, "word": " is", "probability": 0.91259765625}, {"start": 1710.46, "end": 1711.04, "word": " always", "probability": 0.916015625}, {"start": 1711.04, "end": 1711.4, "word": " equal", "probability": 0.896484375}, {"start": 1711.4, "end": 1711.54, "word": " to", "probability": 0.93359375}, {"start": 1711.54, "end": 1711.66, "word": " mu,", "probability": 0.6201171875}, {"start": 1711.76, "end": 1712.14, "word": " that's", "probability": 0.93603515625}, {"start": 1712.14, "end": 1712.64, "word": " correct,", "probability": 0.88623046875}, {"start": 1713.32, "end": 1713.56, "word": " because", "probability": 0.837890625}, {"start": 1713.56, "end": 1713.68, "word": " we", "probability": 0.9365234375}, {"start": 1713.68, "end": 1713.8, "word": " know", "probability": 0.8857421875}, {"start": 1713.8, "end": 1714.0, "word": " that", "probability": 0.91796875}, {"start": 1714.0, "end": 1714.22, "word": " the", "probability": 0.9013671875}, {"start": 1714.22, "end": 1714.36, "word": " mean", "probability": 0.97021484375}, {"start": 1714.36, "end": 1714.48, "word": " of", "probability": 0.96923828125}, {"start": 1714.48, "end": 1714.6, "word": " x", "probability": 0.61767578125}, {"start": 1714.6, "end": 1714.78, "word": " bar", "probability": 0.9169921875}, {"start": 1714.78, "end": 1714.96, "word": " is", "probability": 0.951171875}, {"start": 1714.96, "end": 1715.12, "word": " a", "probability": 0.4443359375}, {"start": 1715.12, "end": 1715.34, "word": " mu.", "probability": 0.955078125}, {"start": 1716.42, "end": 1716.76, "word": " Now,", "probability": 0.955078125}, {"start": 1717.18, "end": 1717.34, "word": " the", "probability": 0.91845703125}, {"start": 1717.34, "end": 1717.76, "word": " standard", "probability": 0.919921875}, {"start": 1717.76, "end": 1718.3, "word": " deviation", "probability": 0.8896484375}, {"start": 1718.3, "end": 1719.4, "word": " of", "probability": 0.95458984375}, {"start": 1719.4, "end": 1719.68, "word": " the", "probability": 0.9228515625}, {"start": 1719.68, "end": 1720.0, "word": " sampling", "probability": 0.9677734375}, {"start": 1720.0, "end": 1720.78, "word": " distribution", "probability": 0.83349609375}, {"start": 1720.78, "end": 1721.02, "word": " of", "probability": 0.70263671875}, {"start": 1721.02, "end": 1721.16, "word": " the", "probability": 0.9130859375}, {"start": 1721.16, "end": 1721.44, "word": " sample", "probability": 0.8662109375}, {"start": 1721.44, "end": 1721.76, "word": " mean", "probability": 0.94384765625}, {"start": 1721.76, "end": 1721.94, "word": " is", "probability": 0.935546875}, {"start": 1721.94, "end": 1722.22, "word": " equal", "probability": 0.91552734375}, {"start": 1722.22, "end": 1722.46, "word": " to", "probability": 0.97216796875}, {"start": 1722.46, "end": 1722.8, "word": " sigma.", "probability": 0.8671875}, {"start": 1723.96, "end": 1724.4, "word": " And", "probability": 0.4326171875}, {"start": 1724.4, "end": 1724.6, "word": " we,", "probability": 0.60791015625}, {"start": 1724.8, "end": 1725.1, "word": " yes,", "probability": 0.5595703125}, {"start": 1725.24, "end": 1725.56, "word": " exactly,", "probability": 0.8974609375}, {"start": 1725.68, "end": 1725.8, "word": " the", "probability": 0.87158203125}, {"start": 1725.8, "end": 1726.12, "word": " standard", "probability": 0.91748046875}, {"start": 1726.12, "end": 1726.48, "word": " error,", "probability": 0.54443359375}, {"start": 1727.64, "end": 1727.88, "word": " which", "probability": 0.92919921875}, {"start": 1727.88, "end": 1728.06, "word": " is", "probability": 0.94921875}, {"start": 1728.06, "end": 1728.38, "word": " sigma", "probability": 0.92431640625}, {"start": 1728.38, "end": 1728.58, "word": " of", "probability": 0.61328125}, {"start": 1728.58, "end": 1728.78, "word": " x", "probability": 0.98583984375}, {"start": 1728.78, "end": 1729.16, "word": " bar,", "probability": 0.9541015625}, {"start": 1729.98, "end": 1730.38, "word": " not", "probability": 0.9384765625}, {"start": 1730.38, "end": 1730.74, "word": " sigma,", "probability": 0.9462890625}, {"start": 1731.24, "end": 1731.78, "word": " equals", "probability": 0.95263671875}, {"start": 1731.78, "end": 1733.02, "word": " sigma", "probability": 0.92138671875}], "temperature": 1.0}, {"id": 63, "seek": 176061, "start": 1734.23, "end": 1760.61, "text": " Divide by root n. For this reason, this one is incorrect statement. Because we have to divide this sigma by square root of n. Number four. Which of the following is true?", "tokens": [9886, 482, 538, 5593, 297, 13, 1171, 341, 1778, 11, 341, 472, 307, 18424, 5629, 13, 1436, 321, 362, 281, 9845, 341, 12771, 538, 3732, 5593, 295, 297, 13, 5118, 1451, 13, 3013, 295, 264, 3480, 307, 2074, 30], "avg_logprob": -0.22656249739229678, "compression_ratio": 1.3153846153846154, "no_speech_prob": 0.0, "words": [{"start": 1734.23, "end": 1734.65, "word": " Divide", "probability": 0.68408203125}, {"start": 1734.65, "end": 1734.81, "word": " by", "probability": 0.9443359375}, {"start": 1734.81, "end": 1735.01, "word": " root", "probability": 0.69482421875}, {"start": 1735.01, "end": 1735.27, "word": " n.", "probability": 0.47216796875}, {"start": 1735.71, "end": 1736.33, "word": " For", "probability": 0.908203125}, {"start": 1736.33, "end": 1736.53, "word": " this", "probability": 0.921875}, {"start": 1736.53, "end": 1736.79, "word": " reason,", "probability": 0.97412109375}, {"start": 1736.93, "end": 1737.05, "word": " this", "probability": 0.93505859375}, {"start": 1737.05, "end": 1737.23, "word": " one", "probability": 0.86279296875}, {"start": 1737.23, "end": 1737.37, "word": " is", "probability": 0.93408203125}, {"start": 1737.37, "end": 1737.85, "word": " incorrect", "probability": 0.9091796875}, {"start": 1737.85, "end": 1739.57, "word": " statement.", "probability": 0.493408203125}, {"start": 1740.29, "end": 1740.65, "word": " Because", "probability": 0.810546875}, {"start": 1740.65, "end": 1740.79, "word": " we", "probability": 0.89208984375}, {"start": 1740.79, "end": 1740.97, "word": " have", "probability": 0.9443359375}, {"start": 1740.97, "end": 1741.11, "word": " to", "probability": 0.9697265625}, {"start": 1741.11, "end": 1741.57, "word": " divide", "probability": 0.92431640625}, {"start": 1741.57, "end": 1742.77, "word": " this", "probability": 0.93994140625}, {"start": 1742.77, "end": 1743.21, "word": " sigma", "probability": 0.72119140625}, {"start": 1743.21, "end": 1744.79, "word": " by", "probability": 0.92626953125}, {"start": 1744.79, "end": 1747.21, "word": " square", "probability": 0.6064453125}, {"start": 1747.21, "end": 1747.67, "word": " root", "probability": 0.92822265625}, {"start": 1747.67, "end": 1749.71, "word": " of", "probability": 0.732421875}, {"start": 1749.71, "end": 1749.87, "word": " n.", "probability": 0.80908203125}, {"start": 1751.25, "end": 1751.83, "word": " Number", "probability": 0.8134765625}, {"start": 1751.83, "end": 1752.79, "word": " four.", "probability": 0.74951171875}, {"start": 1756.39, "end": 1757.27, "word": " Which", "probability": 0.9111328125}, {"start": 1757.27, "end": 1757.45, "word": " of", "probability": 0.9306640625}, {"start": 1757.45, "end": 1757.57, "word": " the", "probability": 0.919921875}, {"start": 1757.57, "end": 1758.01, "word": " following", "probability": 0.89697265625}, {"start": 1758.01, "end": 1760.15, "word": " is", "probability": 0.869140625}, {"start": 1760.15, "end": 1760.61, "word": " true?", "probability": 0.77783203125}], "temperature": 1.0}, {"id": 64, "seek": 179038, "start": 1761.98, "end": 1790.38, "text": " about the sampling distribution of the sample mean. Again, the mean of the sampling distribution is always Mu. That's correct statement. Now look at V. The standard deviation of the sampling distribution is always Sigma. Incorrect because Sigma over root N. Part C, the shape of the sampling distribution is always approximately normal.", "tokens": [466, 264, 21179, 7316, 295, 264, 6889, 914, 13, 3764, 11, 264, 914, 295, 264, 21179, 7316, 307, 1009, 15601, 13, 663, 311, 3006, 5629, 13, 823, 574, 412, 691, 13, 440, 3832, 25163, 295, 264, 21179, 7316, 307, 1009, 36595, 13, 39120, 2554, 570, 36595, 670, 5593, 426, 13, 4100, 383, 11, 264, 3909, 295, 264, 21179, 7316, 307, 1009, 10447, 2710, 13], "avg_logprob": -0.3021634615384615, "compression_ratio": 1.8932584269662922, "no_speech_prob": 0.0, "words": [{"start": 1761.98, "end": 1762.56, "word": " about", "probability": 0.47412109375}, {"start": 1762.56, "end": 1763.12, "word": " the", "probability": 0.89501953125}, {"start": 1763.12, "end": 1763.34, "word": " sampling", "probability": 0.66748046875}, {"start": 1763.34, "end": 1764.06, "word": " distribution", "probability": 0.85009765625}, {"start": 1764.06, "end": 1764.64, "word": " of", "probability": 0.94287109375}, {"start": 1764.64, "end": 1764.84, "word": " the", "probability": 0.89501953125}, {"start": 1764.84, "end": 1765.06, "word": " sample", "probability": 0.46240234375}, {"start": 1765.06, "end": 1765.38, "word": " mean.", "probability": 0.90283203125}, {"start": 1766.98, "end": 1767.36, "word": " Again,", "probability": 0.86572265625}, {"start": 1767.72, "end": 1767.84, "word": " the", "probability": 0.90966796875}, {"start": 1767.84, "end": 1768.04, "word": " mean", "probability": 0.94873046875}, {"start": 1768.04, "end": 1768.16, "word": " of", "probability": 0.95703125}, {"start": 1768.16, "end": 1768.28, "word": " the", "probability": 0.465087890625}, {"start": 1768.28, "end": 1768.82, "word": " sampling", "probability": 0.8896484375}, {"start": 1768.82, "end": 1770.08, "word": " distribution", "probability": 0.82763671875}, {"start": 1770.08, "end": 1770.32, "word": " is", "probability": 0.92919921875}, {"start": 1770.32, "end": 1770.6, "word": " always", "probability": 0.9052734375}, {"start": 1770.6, "end": 1770.92, "word": " Mu.", "probability": 0.277587890625}, {"start": 1771.68, "end": 1772.2, "word": " That's", "probability": 0.869873046875}, {"start": 1772.2, "end": 1772.76, "word": " correct", "probability": 0.8603515625}, {"start": 1772.76, "end": 1774.6, "word": " statement.", "probability": 0.64697265625}, {"start": 1775.86, "end": 1776.06, "word": " Now", "probability": 0.50048828125}, {"start": 1776.06, "end": 1776.32, "word": " look", "probability": 0.5703125}, {"start": 1776.32, "end": 1776.5, "word": " at", "probability": 0.9677734375}, {"start": 1776.5, "end": 1776.7, "word": " V.", "probability": 0.464111328125}, {"start": 1777.36, "end": 1777.68, "word": " The", "probability": 0.8583984375}, {"start": 1777.68, "end": 1778.0, "word": " standard", "probability": 0.93798828125}, {"start": 1778.0, "end": 1778.46, "word": " deviation", "probability": 0.91015625}, {"start": 1778.46, "end": 1779.32, "word": " of", "probability": 0.96435546875}, {"start": 1779.32, "end": 1779.52, "word": " the", "probability": 0.8115234375}, {"start": 1779.52, "end": 1779.78, "word": " sampling", "probability": 0.8935546875}, {"start": 1779.78, "end": 1780.32, "word": " distribution", "probability": 0.82421875}, {"start": 1780.32, "end": 1780.54, "word": " is", "probability": 0.9091796875}, {"start": 1780.54, "end": 1780.82, "word": " always", "probability": 0.89453125}, {"start": 1780.82, "end": 1781.16, "word": " Sigma.", "probability": 0.73681640625}, {"start": 1782.04, "end": 1782.7, "word": " Incorrect", "probability": 0.916259765625}, {"start": 1782.7, "end": 1783.08, "word": " because", "probability": 0.6650390625}, {"start": 1783.08, "end": 1783.94, "word": " Sigma", "probability": 0.82177734375}, {"start": 1783.94, "end": 1784.16, "word": " over", "probability": 0.27587890625}, {"start": 1784.16, "end": 1784.3, "word": " root", "probability": 0.8408203125}, {"start": 1784.3, "end": 1784.46, "word": " N.", "probability": 0.68017578125}, {"start": 1785.14, "end": 1785.36, "word": " Part", "probability": 0.484375}, {"start": 1785.36, "end": 1785.66, "word": " C,", "probability": 0.79736328125}, {"start": 1786.64, "end": 1786.94, "word": " the", "probability": 0.88818359375}, {"start": 1786.94, "end": 1787.28, "word": " shape", "probability": 0.243896484375}, {"start": 1787.28, "end": 1787.54, "word": " of", "probability": 0.9697265625}, {"start": 1787.54, "end": 1787.72, "word": " the", "probability": 0.908203125}, {"start": 1787.72, "end": 1788.0, "word": " sampling", "probability": 0.931640625}, {"start": 1788.0, "end": 1788.62, "word": " distribution", "probability": 0.845703125}, {"start": 1788.62, "end": 1788.92, "word": " is", "probability": 0.92236328125}, {"start": 1788.92, "end": 1789.28, "word": " always", "probability": 0.890625}, {"start": 1789.28, "end": 1790.0, "word": " approximately", "probability": 0.85546875}, {"start": 1790.0, "end": 1790.38, "word": " normal.", "probability": 0.81689453125}], "temperature": 1.0}, {"id": 65, "seek": 181961, "start": 1792.07, "end": 1819.61, "text": " If N is large, then we can say it's approximately normal. All of the above are true is incorrect. So that's number 6. Look at number 10. Number 10.", "tokens": [759, 426, 307, 2416, 11, 550, 321, 393, 584, 309, 311, 10447, 2710, 13, 1057, 295, 264, 3673, 366, 2074, 307, 18424, 13, 407, 300, 311, 1230, 1386, 13, 2053, 412, 1230, 1266, 13, 5118, 1266, 13], "avg_logprob": -0.21011513510816976, "compression_ratio": 1.2231404958677685, "no_speech_prob": 0.0, "words": [{"start": 1792.07, "end": 1792.51, "word": " If", "probability": 0.7412109375}, {"start": 1792.51, "end": 1792.77, "word": " N", "probability": 0.42431640625}, {"start": 1792.77, "end": 1793.25, "word": " is", "probability": 0.6630859375}, {"start": 1793.25, "end": 1793.57, "word": " large,", "probability": 0.94873046875}, {"start": 1793.69, "end": 1793.81, "word": " then", "probability": 0.83447265625}, {"start": 1793.81, "end": 1793.95, "word": " we", "probability": 0.912109375}, {"start": 1793.95, "end": 1794.15, "word": " can", "probability": 0.92041015625}, {"start": 1794.15, "end": 1794.39, "word": " say", "probability": 0.94482421875}, {"start": 1794.39, "end": 1794.65, "word": " it's", "probability": 0.84912109375}, {"start": 1794.65, "end": 1795.23, "word": " approximately", "probability": 0.85546875}, {"start": 1795.23, "end": 1795.63, "word": " normal.", "probability": 0.85302734375}, {"start": 1796.41, "end": 1796.65, "word": " All", "probability": 0.91796875}, {"start": 1796.65, "end": 1796.87, "word": " of", "probability": 0.92724609375}, {"start": 1796.87, "end": 1797.01, "word": " the", "probability": 0.8974609375}, {"start": 1797.01, "end": 1797.41, "word": " above", "probability": 0.94580078125}, {"start": 1797.41, "end": 1797.83, "word": " are", "probability": 0.76318359375}, {"start": 1797.83, "end": 1798.21, "word": " true", "probability": 0.96240234375}, {"start": 1798.21, "end": 1798.55, "word": " is", "probability": 0.3779296875}, {"start": 1798.55, "end": 1798.93, "word": " incorrect.", "probability": 0.9443359375}, {"start": 1800.01, "end": 1800.19, "word": " So", "probability": 0.818359375}, {"start": 1800.19, "end": 1801.93, "word": " that's", "probability": 0.893798828125}, {"start": 1801.93, "end": 1802.55, "word": " number", "probability": 0.60791015625}, {"start": 1802.55, "end": 1807.37, "word": " 6.", "probability": 0.464599609375}, {"start": 1815.09, "end": 1815.77, "word": " Look", "probability": 0.81396484375}, {"start": 1815.77, "end": 1815.95, "word": " at", "probability": 0.96875}, {"start": 1815.95, "end": 1816.21, "word": " number", "probability": 0.91748046875}, {"start": 1816.21, "end": 1816.53, "word": " 10.", "probability": 0.9560546875}, {"start": 1818.61, "end": 1819.29, "word": " Number", "probability": 0.87939453125}, {"start": 1819.29, "end": 1819.61, "word": " 10.", "probability": 0.95849609375}], "temperature": 1.0}, {"id": 66, "seek": 184705, "start": 1820.17, "end": 1847.05, "text": " A telemarketer set the company's computerized dialing system to contact every 25th person listed in the local telephone directory. So the company selects the person that's in the 25th position. So the 25th person is being selected.", "tokens": [316, 4304, 5638, 2398, 992, 264, 2237, 311, 3820, 1602, 5502, 278, 1185, 281, 3385, 633, 3552, 392, 954, 10052, 294, 264, 2654, 19800, 21120, 13, 407, 264, 2237, 3048, 82, 264, 954, 300, 311, 294, 264, 3552, 392, 2535, 13, 407, 264, 3552, 392, 954, 307, 885, 8209, 13], "avg_logprob": -0.15824142039990893, "compression_ratio": 1.5782312925170068, "no_speech_prob": 0.0, "words": [{"start": 1820.17, "end": 1820.49, "word": " A", "probability": 0.305419921875}, {"start": 1820.49, "end": 1822.13, "word": " telemarketer", "probability": 0.9768880208333334}, {"start": 1822.13, "end": 1822.61, "word": " set", "probability": 0.58984375}, {"start": 1822.61, "end": 1824.27, "word": " the", "probability": 0.8916015625}, {"start": 1824.27, "end": 1824.95, "word": " company's", "probability": 0.86083984375}, {"start": 1824.95, "end": 1825.69, "word": " computerized", "probability": 0.91455078125}, {"start": 1825.69, "end": 1826.07, "word": " dialing", "probability": 0.886474609375}, {"start": 1826.07, "end": 1826.63, "word": " system", "probability": 0.947265625}, {"start": 1826.63, "end": 1827.13, "word": " to", "probability": 0.95703125}, {"start": 1827.13, "end": 1827.69, "word": " contact", "probability": 0.90673828125}, {"start": 1827.69, "end": 1828.33, "word": " every", "probability": 0.8212890625}, {"start": 1828.33, "end": 1829.99, "word": " 25th", "probability": 0.895751953125}, {"start": 1829.99, "end": 1830.63, "word": " person", "probability": 0.92333984375}, {"start": 1830.63, "end": 1832.41, "word": " listed", "probability": 0.9072265625}, {"start": 1832.41, "end": 1833.37, "word": " in", "probability": 0.9404296875}, {"start": 1833.37, "end": 1833.53, "word": " the", "probability": 0.9111328125}, {"start": 1833.53, "end": 1833.91, "word": " local", "probability": 0.88720703125}, {"start": 1833.91, "end": 1835.01, "word": " telephone", "probability": 0.91259765625}, {"start": 1835.01, "end": 1835.51, "word": " directory.", "probability": 0.96044921875}, {"start": 1836.59, "end": 1836.91, "word": " So", "probability": 0.9443359375}, {"start": 1836.91, "end": 1837.11, "word": " the", "probability": 0.80419921875}, {"start": 1837.11, "end": 1837.59, "word": " company", "probability": 0.90380859375}, {"start": 1837.59, "end": 1838.37, "word": " selects", "probability": 0.5794677734375}, {"start": 1838.37, "end": 1838.45, "word": " the", "probability": 0.484619140625}, {"start": 1838.45, "end": 1838.99, "word": " person", "probability": 0.92041015625}, {"start": 1838.99, "end": 1840.31, "word": " that's", "probability": 0.700439453125}, {"start": 1840.31, "end": 1840.41, "word": " in", "probability": 0.939453125}, {"start": 1840.41, "end": 1840.65, "word": " the", "probability": 0.90869140625}, {"start": 1840.65, "end": 1841.55, "word": " 25th", "probability": 0.980712890625}, {"start": 1841.55, "end": 1842.63, "word": " position.", "probability": 0.94580078125}, {"start": 1844.09, "end": 1844.83, "word": " So", "probability": 0.955078125}, {"start": 1844.83, "end": 1844.99, "word": " the", "probability": 0.80712890625}, {"start": 1844.99, "end": 1845.51, "word": " 25th", "probability": 0.990966796875}, {"start": 1845.51, "end": 1845.99, "word": " person", "probability": 0.9267578125}, {"start": 1845.99, "end": 1846.37, "word": " is", "probability": 0.8974609375}, {"start": 1846.37, "end": 1846.57, "word": " being", "probability": 0.94580078125}, {"start": 1846.57, "end": 1847.05, "word": " selected.", "probability": 0.88037109375}], "temperature": 1.0}, {"id": 67, "seek": 187514, "start": 1848.06, "end": 1875.14, "text": " The other one is the first item. The second item should be... And your key is twenty-fifth. So one, two, three, four. So number twenty-fifth is the first item. Maybe you have something more. Then the second item is number fifty and so on.", "tokens": [440, 661, 472, 307, 264, 700, 3174, 13, 440, 1150, 3174, 820, 312, 485, 400, 428, 2141, 307, 7699, 12, 69, 351, 392, 13, 407, 472, 11, 732, 11, 1045, 11, 1451, 13, 407, 1230, 7699, 12, 69, 351, 392, 307, 264, 700, 3174, 13, 2704, 291, 362, 746, 544, 13, 1396, 264, 1150, 3174, 307, 1230, 13442, 293, 370, 322, 13], "avg_logprob": -0.2631448469464741, "compression_ratio": 1.6597222222222223, "no_speech_prob": 0.0, "words": [{"start": 1848.06, "end": 1848.26, "word": " The", "probability": 0.65869140625}, {"start": 1848.26, "end": 1848.54, "word": " other", "probability": 0.8193359375}, {"start": 1848.54, "end": 1848.86, "word": " one", "probability": 0.89794921875}, {"start": 1848.86, "end": 1849.42, "word": " is", "probability": 0.72265625}, {"start": 1849.42, "end": 1849.6, "word": " the", "probability": 0.87890625}, {"start": 1849.6, "end": 1849.86, "word": " first", "probability": 0.84228515625}, {"start": 1849.86, "end": 1850.34, "word": " item.", "probability": 0.96728515625}, {"start": 1851.68, "end": 1851.84, "word": " The", "probability": 0.77294921875}, {"start": 1851.84, "end": 1852.1, "word": " second", "probability": 0.90185546875}, {"start": 1852.1, "end": 1852.48, "word": " item", "probability": 0.93994140625}, {"start": 1852.48, "end": 1852.72, "word": " should", "probability": 0.95361328125}, {"start": 1852.72, "end": 1855.94, "word": " be...", "probability": 0.6246337890625}, {"start": 1855.94, "end": 1856.28, "word": " And", "probability": 0.241455078125}, {"start": 1856.28, "end": 1856.48, "word": " your", "probability": 0.86083984375}, {"start": 1856.48, "end": 1856.68, "word": " key", "probability": 0.280029296875}, {"start": 1856.68, "end": 1856.78, "word": " is", "probability": 0.943359375}, {"start": 1856.78, "end": 1857.02, "word": " twenty", "probability": 0.29638671875}, {"start": 1857.02, "end": 1857.3, "word": "-fifth.", "probability": 0.898193359375}, {"start": 1857.72, "end": 1858.26, "word": " So", "probability": 0.86474609375}, {"start": 1858.26, "end": 1859.24, "word": " one,", "probability": 0.17431640625}, {"start": 1860.68, "end": 1860.8, "word": " two,", "probability": 0.94580078125}, {"start": 1860.92, "end": 1861.12, "word": " three,", "probability": 0.94189453125}, {"start": 1861.26, "end": 1861.56, "word": " four.", "probability": 0.93603515625}, {"start": 1862.0, "end": 1862.24, "word": " So", "probability": 0.7021484375}, {"start": 1862.24, "end": 1862.52, "word": " number", "probability": 0.79150390625}, {"start": 1862.52, "end": 1862.88, "word": " twenty", "probability": 0.73388671875}, {"start": 1862.88, "end": 1863.28, "word": "-fifth", "probability": 0.975341796875}, {"start": 1863.28, "end": 1864.64, "word": " is", "probability": 0.55322265625}, {"start": 1864.64, "end": 1864.74, "word": " the", "probability": 0.87646484375}, {"start": 1864.74, "end": 1865.02, "word": " first", "probability": 0.88818359375}, {"start": 1865.02, "end": 1865.44, "word": " item.", "probability": 0.96240234375}, {"start": 1868.5, "end": 1869.1, "word": " Maybe", "probability": 0.70703125}, {"start": 1869.1, "end": 1869.22, "word": " you", "probability": 0.94287109375}, {"start": 1869.22, "end": 1869.34, "word": " have", "probability": 0.845703125}, {"start": 1869.34, "end": 1869.74, "word": " something", "probability": 0.859375}, {"start": 1869.74, "end": 1870.18, "word": " more.", "probability": 0.9296875}, {"start": 1870.82, "end": 1871.12, "word": " Then", "probability": 0.7578125}, {"start": 1871.12, "end": 1871.44, "word": " the", "probability": 0.75146484375}, {"start": 1871.44, "end": 1871.78, "word": " second", "probability": 0.90576171875}, {"start": 1871.78, "end": 1872.32, "word": " item", "probability": 0.96337890625}, {"start": 1872.32, "end": 1873.94, "word": " is", "probability": 0.91015625}, {"start": 1873.94, "end": 1874.18, "word": " number", "probability": 0.923828125}, {"start": 1874.18, "end": 1874.5, "word": " fifty", "probability": 0.865234375}, {"start": 1874.5, "end": 1874.7, "word": " and", "probability": 0.783203125}, {"start": 1874.7, "end": 1874.96, "word": " so", "probability": 0.951171875}, {"start": 1874.96, "end": 1875.14, "word": " on.", "probability": 0.95068359375}], "temperature": 1.0}, {"id": 68, "seek": 190449, "start": 1879.25, "end": 1904.49, "text": " What sampling method was used? Systematic sample because we chose the 50th person, then the second one is the 50th, and so on. So 25, 50, 75, and so on. Number 11, which of the following methods were more likely", "tokens": [708, 21179, 3170, 390, 1143, 30, 8910, 2399, 6889, 570, 321, 5111, 264, 2625, 392, 954, 11, 550, 264, 1150, 472, 307, 264, 2625, 392, 11, 293, 370, 322, 13, 407, 3552, 11, 2625, 11, 9562, 11, 293, 370, 322, 13, 5118, 2975, 11, 597, 295, 264, 3480, 7150, 645, 544, 3700], "avg_logprob": -0.23540683849802557, "compression_ratio": 1.4133333333333333, "no_speech_prob": 0.0, "words": [{"start": 1879.25, "end": 1880.01, "word": " What", "probability": 0.457275390625}, {"start": 1880.01, "end": 1880.55, "word": " sampling", "probability": 0.97216796875}, {"start": 1880.55, "end": 1881.93, "word": " method", "probability": 0.943359375}, {"start": 1881.93, "end": 1882.23, "word": " was", "probability": 0.95556640625}, {"start": 1882.23, "end": 1882.63, "word": " used?", "probability": 0.9267578125}, {"start": 1884.59, "end": 1885.35, "word": " Systematic", "probability": 0.7978515625}, {"start": 1885.35, "end": 1886.65, "word": " sample", "probability": 0.344970703125}, {"start": 1886.65, "end": 1887.03, "word": " because", "probability": 0.60595703125}, {"start": 1887.03, "end": 1887.23, "word": " we", "probability": 0.63671875}, {"start": 1887.23, "end": 1887.43, "word": " chose", "probability": 0.56396484375}, {"start": 1887.43, "end": 1887.61, "word": " the", "probability": 0.91064453125}, {"start": 1887.61, "end": 1888.29, "word": " 50th", "probability": 0.80224609375}, {"start": 1888.29, "end": 1889.73, "word": " person,", "probability": 0.8994140625}, {"start": 1890.69, "end": 1890.95, "word": " then", "probability": 0.5322265625}, {"start": 1890.95, "end": 1891.17, "word": " the", "probability": 0.8974609375}, {"start": 1891.17, "end": 1891.45, "word": " second", "probability": 0.82470703125}, {"start": 1891.45, "end": 1891.69, "word": " one", "probability": 0.92724609375}, {"start": 1891.69, "end": 1891.89, "word": " is", "probability": 0.91748046875}, {"start": 1891.89, "end": 1892.29, "word": " the", "probability": 0.90625}, {"start": 1892.29, "end": 1893.39, "word": " 50th,", "probability": 0.953369140625}, {"start": 1893.45, "end": 1893.55, "word": " and", "probability": 0.916015625}, {"start": 1893.55, "end": 1893.77, "word": " so", "probability": 0.951171875}, {"start": 1893.77, "end": 1893.97, "word": " on.", "probability": 0.94287109375}, {"start": 1894.01, "end": 1894.11, "word": " So", "probability": 0.84228515625}, {"start": 1894.11, "end": 1894.51, "word": " 25,", "probability": 0.78369140625}, {"start": 1895.19, "end": 1896.07, "word": " 50,", "probability": 0.77099609375}, {"start": 1896.53, "end": 1897.13, "word": " 75,", "probability": 0.96044921875}, {"start": 1897.49, "end": 1898.09, "word": " and", "probability": 0.9345703125}, {"start": 1898.09, "end": 1898.27, "word": " so", "probability": 0.95068359375}, {"start": 1898.27, "end": 1898.51, "word": " on.", "probability": 0.94775390625}, {"start": 1900.35, "end": 1900.89, "word": " Number", "probability": 0.83935546875}, {"start": 1900.89, "end": 1901.25, "word": " 11,", "probability": 0.9287109375}, {"start": 1901.95, "end": 1902.29, "word": " which", "probability": 0.9091796875}, {"start": 1902.29, "end": 1902.45, "word": " of", "probability": 0.92431640625}, {"start": 1902.45, "end": 1902.57, "word": " the", "probability": 0.91748046875}, {"start": 1902.57, "end": 1902.97, "word": " following", "probability": 0.8779296875}, {"start": 1902.97, "end": 1903.51, "word": " methods", "probability": 0.88134765625}, {"start": 1903.51, "end": 1903.69, "word": " were", "probability": 0.3984375}, {"start": 1903.69, "end": 1903.99, "word": " more", "probability": 0.9208984375}, {"start": 1903.99, "end": 1904.49, "word": " likely", "probability": 0.9365234375}], "temperature": 1.0}, {"id": 69, "seek": 193098, "start": 1905.74, "end": 1930.98, "text": " be susceptible to ethical violation when used to form conclusions about the entire population. Now, the correct answer is convenience sample. Because number one, convenience sample is used because it is easy, inexpensive, costly, and it's used", "tokens": [312, 31249, 281, 18890, 22840, 562, 1143, 281, 1254, 22865, 466, 264, 2302, 4415, 13, 823, 11, 264, 3006, 1867, 307, 19283, 6889, 13, 1436, 1230, 472, 11, 19283, 6889, 307, 1143, 570, 309, 307, 1858, 11, 28382, 11, 28328, 11, 293, 309, 311, 1143], "avg_logprob": -0.22452445133872653, "compression_ratio": 1.5443037974683544, "no_speech_prob": 0.0, "words": [{"start": 1905.74, "end": 1906.12, "word": " be", "probability": 0.341552734375}, {"start": 1906.12, "end": 1906.92, "word": " susceptible", "probability": 0.483154296875}, {"start": 1906.92, "end": 1907.86, "word": " to", "probability": 0.97216796875}, {"start": 1907.86, "end": 1908.5, "word": " ethical", "probability": 0.94287109375}, {"start": 1908.5, "end": 1909.28, "word": " violation", "probability": 0.85205078125}, {"start": 1909.28, "end": 1911.8, "word": " when", "probability": 0.67138671875}, {"start": 1911.8, "end": 1912.16, "word": " used", "probability": 0.9287109375}, {"start": 1912.16, "end": 1912.42, "word": " to", "probability": 0.96142578125}, {"start": 1912.42, "end": 1912.8, "word": " form", "probability": 0.92578125}, {"start": 1912.8, "end": 1913.6, "word": " conclusions", "probability": 0.9296875}, {"start": 1913.6, "end": 1914.04, "word": " about", "probability": 0.904296875}, {"start": 1914.04, "end": 1914.26, "word": " the", "probability": 0.8916015625}, {"start": 1914.26, "end": 1914.6, "word": " entire", "probability": 0.9140625}, {"start": 1914.6, "end": 1915.36, "word": " population.", "probability": 0.9296875}, {"start": 1917.74, "end": 1918.04, "word": " Now,", "probability": 0.7724609375}, {"start": 1918.1, "end": 1918.22, "word": " the", "probability": 0.91748046875}, {"start": 1918.22, "end": 1918.46, "word": " correct", "probability": 0.90771484375}, {"start": 1918.46, "end": 1918.68, "word": " answer", "probability": 0.95361328125}, {"start": 1918.68, "end": 1918.9, "word": " is", "probability": 0.91552734375}, {"start": 1918.9, "end": 1919.42, "word": " convenience", "probability": 0.72216796875}, {"start": 1919.42, "end": 1919.8, "word": " sample.", "probability": 0.6162109375}, {"start": 1921.26, "end": 1922.02, "word": " Because", "probability": 0.60595703125}, {"start": 1922.02, "end": 1922.32, "word": " number", "probability": 0.546875}, {"start": 1922.32, "end": 1922.6, "word": " one,", "probability": 0.92138671875}, {"start": 1922.74, "end": 1923.14, "word": " convenience", "probability": 0.88720703125}, {"start": 1923.14, "end": 1923.42, "word": " sample", "probability": 0.84326171875}, {"start": 1923.42, "end": 1923.7, "word": " is", "probability": 0.93701171875}, {"start": 1923.7, "end": 1924.38, "word": " used", "probability": 0.87890625}, {"start": 1924.38, "end": 1924.74, "word": " because", "probability": 0.8896484375}, {"start": 1924.74, "end": 1924.94, "word": " it", "probability": 0.88134765625}, {"start": 1924.94, "end": 1925.06, "word": " is", "probability": 0.70751953125}, {"start": 1925.06, "end": 1925.42, "word": " easy,", "probability": 0.9150390625}, {"start": 1925.72, "end": 1926.24, "word": " inexpensive,", "probability": 0.8466796875}, {"start": 1927.26, "end": 1928.16, "word": " costly,", "probability": 0.73583984375}, {"start": 1929.1, "end": 1929.82, "word": " and", "probability": 0.93994140625}, {"start": 1929.82, "end": 1930.52, "word": " it's", "probability": 0.9521484375}, {"start": 1930.52, "end": 1930.98, "word": " used", "probability": 0.92431640625}], "temperature": 1.0}, {"id": 70, "seek": 196154, "start": 1932.74, "end": 1961.54, "text": " I mean, we select the sample if it is convenient to the researcher by himself. But maybe in this case, we have biased collection. For this reason, this is incorrect sampling. Most of the time, we are going to avoid using this technique unless your sample is unbiased. Because if, for example, suppose I love", "tokens": [286, 914, 11, 321, 3048, 264, 6889, 498, 309, 307, 10851, 281, 264, 21751, 538, 3647, 13, 583, 1310, 294, 341, 1389, 11, 321, 362, 28035, 5765, 13, 1171, 341, 1778, 11, 341, 307, 18424, 21179, 13, 4534, 295, 264, 565, 11, 321, 366, 516, 281, 5042, 1228, 341, 6532, 5969, 428, 6889, 307, 517, 5614, 1937, 13, 1436, 498, 11, 337, 1365, 11, 7297, 286, 959], "avg_logprob": -0.1312040502534193, "compression_ratio": 1.5634517766497462, "no_speech_prob": 0.0, "words": [{"start": 1932.74, "end": 1933.02, "word": " I", "probability": 0.80078125}, {"start": 1933.02, "end": 1933.16, "word": " mean,", "probability": 0.96240234375}, {"start": 1933.2, "end": 1933.34, "word": " we", "probability": 0.86572265625}, {"start": 1933.34, "end": 1933.62, "word": " select", "probability": 0.8349609375}, {"start": 1933.62, "end": 1933.82, "word": " the", "probability": 0.82568359375}, {"start": 1933.82, "end": 1934.04, "word": " sample", "probability": 0.75390625}, {"start": 1934.04, "end": 1934.56, "word": " if", "probability": 0.88525390625}, {"start": 1934.56, "end": 1934.74, "word": " it", "probability": 0.92138671875}, {"start": 1934.74, "end": 1934.88, "word": " is", "probability": 0.775390625}, {"start": 1934.88, "end": 1935.36, "word": " convenient", "probability": 0.921875}, {"start": 1935.36, "end": 1936.64, "word": " to", "probability": 0.90966796875}, {"start": 1936.64, "end": 1936.8, "word": " the", "probability": 0.916015625}, {"start": 1936.8, "end": 1937.24, "word": " researcher", "probability": 0.9453125}, {"start": 1937.24, "end": 1937.74, "word": " by", "probability": 0.93212890625}, {"start": 1937.74, "end": 1938.24, "word": " himself.", "probability": 0.900390625}, {"start": 1938.74, "end": 1939.04, "word": " But", "probability": 0.9462890625}, {"start": 1939.04, "end": 1939.4, "word": " maybe", "probability": 0.8974609375}, {"start": 1939.4, "end": 1939.74, "word": " in", "probability": 0.74755859375}, {"start": 1939.74, "end": 1939.94, "word": " this", "probability": 0.94287109375}, {"start": 1939.94, "end": 1940.16, "word": " case,", "probability": 0.921875}, {"start": 1940.22, "end": 1940.32, "word": " we", "probability": 0.93017578125}, {"start": 1940.32, "end": 1940.46, "word": " have", "probability": 0.9462890625}, {"start": 1940.46, "end": 1940.9, "word": " biased", "probability": 0.9267578125}, {"start": 1940.9, "end": 1941.52, "word": " collection.", "probability": 0.70458984375}, {"start": 1941.98, "end": 1942.34, "word": " For", "probability": 0.9638671875}, {"start": 1942.34, "end": 1942.58, "word": " this", "probability": 0.94287109375}, {"start": 1942.58, "end": 1942.92, "word": " reason,", "probability": 0.97265625}, {"start": 1943.06, "end": 1943.22, "word": " this", "probability": 0.93408203125}, {"start": 1943.22, "end": 1943.34, "word": " is", "probability": 0.9404296875}, {"start": 1943.34, "end": 1943.94, "word": " incorrect", "probability": 0.90771484375}, {"start": 1943.94, "end": 1946.1, "word": " sampling.", "probability": 0.97021484375}, {"start": 1946.68, "end": 1947.0, "word": " Most", "probability": 0.8828125}, {"start": 1947.0, "end": 1947.14, "word": " of", "probability": 0.966796875}, {"start": 1947.14, "end": 1947.26, "word": " the", "probability": 0.9169921875}, {"start": 1947.26, "end": 1947.56, "word": " time,", "probability": 0.8916015625}, {"start": 1948.3, "end": 1948.6, "word": " we", "probability": 0.9619140625}, {"start": 1948.6, "end": 1948.76, "word": " are", "probability": 0.9375}, {"start": 1948.76, "end": 1949.04, "word": " going", "probability": 0.9462890625}, {"start": 1949.04, "end": 1949.98, "word": " to", "probability": 0.97265625}, {"start": 1949.98, "end": 1951.58, "word": " avoid", "probability": 0.91015625}, {"start": 1951.58, "end": 1952.06, "word": " using", "probability": 0.92724609375}, {"start": 1952.06, "end": 1952.42, "word": " this", "probability": 0.9453125}, {"start": 1952.42, "end": 1952.88, "word": " technique", "probability": 0.9462890625}, {"start": 1952.88, "end": 1953.68, "word": " unless", "probability": 0.60546875}, {"start": 1953.68, "end": 1954.46, "word": " your", "probability": 0.8876953125}, {"start": 1954.46, "end": 1954.82, "word": " sample", "probability": 0.88623046875}, {"start": 1954.82, "end": 1955.42, "word": " is", "probability": 0.947265625}, {"start": 1955.42, "end": 1957.1, "word": " unbiased.", "probability": 0.9620768229166666}, {"start": 1958.4, "end": 1958.9, "word": " Because", "probability": 0.8994140625}, {"start": 1958.9, "end": 1959.24, "word": " if,", "probability": 0.869140625}, {"start": 1959.42, "end": 1959.56, "word": " for", "probability": 0.95556640625}, {"start": 1959.56, "end": 1959.82, "word": " example,", "probability": 0.9755859375}, {"start": 1959.92, "end": 1960.38, "word": " suppose", "probability": 0.90771484375}, {"start": 1960.38, "end": 1961.2, "word": " I", "probability": 0.93115234375}, {"start": 1961.2, "end": 1961.54, "word": " love", "probability": 0.89990234375}], "temperature": 1.0}, {"id": 71, "seek": 198921, "start": 1963.25, "end": 1989.21, "text": " T of type A. And my sample, I select a sample of size 20. Since I love type A, I choose these 20 students or 20 persons that have, that like T of type A. That means your sample", "tokens": [314, 295, 2010, 316, 13, 400, 452, 6889, 11, 286, 3048, 257, 6889, 295, 2744, 945, 13, 4162, 286, 959, 2010, 316, 11, 286, 2826, 613, 945, 1731, 420, 945, 14453, 300, 362, 11, 300, 411, 314, 295, 2010, 316, 13, 663, 1355, 428, 6889], "avg_logprob": -0.21976902595032816, "compression_ratio": 1.4047619047619047, "no_speech_prob": 0.0, "words": [{"start": 1963.25, "end": 1963.97, "word": " T", "probability": 0.202392578125}, {"start": 1963.97, "end": 1964.69, "word": " of", "probability": 0.82373046875}, {"start": 1964.69, "end": 1965.05, "word": " type", "probability": 0.8193359375}, {"start": 1965.05, "end": 1965.33, "word": " A.", "probability": 0.927734375}, {"start": 1968.47, "end": 1969.19, "word": " And", "probability": 0.1905517578125}, {"start": 1969.19, "end": 1969.43, "word": " my", "probability": 0.8720703125}, {"start": 1969.43, "end": 1969.81, "word": " sample,", "probability": 0.828125}, {"start": 1970.13, "end": 1970.37, "word": " I", "probability": 0.9892578125}, {"start": 1970.37, "end": 1970.63, "word": " select", "probability": 0.6982421875}, {"start": 1970.63, "end": 1970.81, "word": " a", "probability": 0.88134765625}, {"start": 1970.81, "end": 1971.01, "word": " sample", "probability": 0.92333984375}, {"start": 1971.01, "end": 1971.27, "word": " of", "probability": 0.943359375}, {"start": 1971.27, "end": 1971.63, "word": " size", "probability": 0.8603515625}, {"start": 1971.63, "end": 1972.17, "word": " 20.", "probability": 0.82666015625}, {"start": 1976.77, "end": 1977.49, "word": " Since", "probability": 0.84375}, {"start": 1977.49, "end": 1977.73, "word": " I", "probability": 0.99462890625}, {"start": 1977.73, "end": 1977.97, "word": " love", "probability": 0.921875}, {"start": 1977.97, "end": 1978.35, "word": " type", "probability": 0.93701171875}, {"start": 1978.35, "end": 1978.77, "word": " A,", "probability": 0.9912109375}, {"start": 1980.27, "end": 1980.53, "word": " I", "probability": 0.998046875}, {"start": 1980.53, "end": 1980.95, "word": " choose", "probability": 0.77880859375}, {"start": 1980.95, "end": 1981.23, "word": " these", "probability": 0.78271484375}, {"start": 1981.23, "end": 1981.57, "word": " 20", "probability": 0.90673828125}, {"start": 1981.57, "end": 1982.17, "word": " students", "probability": 0.970703125}, {"start": 1982.17, "end": 1982.43, "word": " or", "probability": 0.70751953125}, {"start": 1982.43, "end": 1982.65, "word": " 20", "probability": 0.91748046875}, {"start": 1982.65, "end": 1983.21, "word": " persons", "probability": 0.83984375}, {"start": 1983.21, "end": 1983.51, "word": " that", "probability": 0.931640625}, {"start": 1983.51, "end": 1983.93, "word": " have,", "probability": 0.740234375}, {"start": 1984.09, "end": 1984.33, "word": " that", "probability": 0.9462890625}, {"start": 1984.33, "end": 1984.77, "word": " like", "probability": 0.94287109375}, {"start": 1984.77, "end": 1986.05, "word": " T", "probability": 0.7470703125}, {"start": 1986.05, "end": 1986.81, "word": " of", "probability": 0.96826171875}, {"start": 1986.81, "end": 1987.11, "word": " type", "probability": 0.97412109375}, {"start": 1987.11, "end": 1987.35, "word": " A.", "probability": 0.9970703125}, {"start": 1987.95, "end": 1988.37, "word": " That", "probability": 0.8935546875}, {"start": 1988.37, "end": 1988.61, "word": " means", "probability": 0.92333984375}, {"start": 1988.61, "end": 1988.81, "word": " your", "probability": 0.83837890625}, {"start": 1988.81, "end": 1989.21, "word": " sample", "probability": 0.90478515625}], "temperature": 1.0}, {"id": 72, "seek": 201916, "start": 1990.7, "end": 2019.16, "text": " It's convenient for you, but it's biased. Okay, so in this case it's called convenient sample, so it will give incorrect results. So it's convenient sample. Let's do one, the other section, one of these problems, true, false. Let's do some of these.", "tokens": [467, 311, 10851, 337, 291, 11, 457, 309, 311, 28035, 13, 1033, 11, 370, 294, 341, 1389, 309, 311, 1219, 10851, 6889, 11, 370, 309, 486, 976, 18424, 3542, 13, 407, 309, 311, 10851, 6889, 13, 961, 311, 360, 472, 11, 264, 661, 3541, 11, 472, 295, 613, 2740, 11, 2074, 11, 7908, 13, 961, 311, 360, 512, 295, 613, 13], "avg_logprob": -0.250882048760691, "compression_ratio": 1.6025641025641026, "no_speech_prob": 0.0, "words": [{"start": 1990.7, "end": 1991.3, "word": " It's", "probability": 0.5889892578125}, {"start": 1991.3, "end": 1991.6, "word": " convenient", "probability": 0.45166015625}, {"start": 1991.6, "end": 1992.06, "word": " for", "probability": 0.943359375}, {"start": 1992.06, "end": 1992.4, "word": " you,", "probability": 0.958984375}, {"start": 1992.52, "end": 1993.32, "word": " but", "probability": 0.84130859375}, {"start": 1993.32, "end": 1993.54, "word": " it's", "probability": 0.779296875}, {"start": 1993.54, "end": 1993.74, "word": " biased.", "probability": 0.71142578125}, {"start": 1996.86, "end": 1997.46, "word": " Okay,", "probability": 0.1470947265625}, {"start": 1997.52, "end": 1997.7, "word": " so", "probability": 0.884765625}, {"start": 1997.7, "end": 1997.86, "word": " in", "probability": 0.83154296875}, {"start": 1997.86, "end": 1998.0, "word": " this", "probability": 0.9501953125}, {"start": 1998.0, "end": 1998.3, "word": " case", "probability": 0.9140625}, {"start": 1998.3, "end": 1998.5, "word": " it's", "probability": 0.79248046875}, {"start": 1998.5, "end": 1998.76, "word": " called", "probability": 0.89111328125}, {"start": 1998.76, "end": 1999.2, "word": " convenient", "probability": 0.466064453125}, {"start": 1999.2, "end": 1999.58, "word": " sample,", "probability": 0.765625}, {"start": 1999.66, "end": 1999.84, "word": " so", "probability": 0.91259765625}, {"start": 1999.84, "end": 1999.96, "word": " it", "probability": 0.9384765625}, {"start": 1999.96, "end": 2000.12, "word": " will", "probability": 0.8447265625}, {"start": 2000.12, "end": 2000.34, "word": " give", "probability": 0.875}, {"start": 2000.34, "end": 2000.92, "word": " incorrect", "probability": 0.93603515625}, {"start": 2000.92, "end": 2002.76, "word": " results.", "probability": 0.86572265625}, {"start": 2003.9, "end": 2004.5, "word": " So", "probability": 0.94970703125}, {"start": 2004.5, "end": 2004.74, "word": " it's", "probability": 0.892822265625}, {"start": 2004.74, "end": 2005.28, "word": " convenient", "probability": 0.857421875}, {"start": 2005.28, "end": 2006.32, "word": " sample.", "probability": 0.80224609375}, {"start": 2010.36, "end": 2010.96, "word": " Let's", "probability": 0.963623046875}, {"start": 2010.96, "end": 2011.2, "word": " do", "probability": 0.9521484375}, {"start": 2011.2, "end": 2011.58, "word": " one,", "probability": 0.876953125}, {"start": 2012.98, "end": 2013.3, "word": " the", "probability": 0.72607421875}, {"start": 2013.3, "end": 2013.5, "word": " other", "probability": 0.8955078125}, {"start": 2013.5, "end": 2013.98, "word": " section,", "probability": 0.8896484375}, {"start": 2014.38, "end": 2014.58, "word": " one", "probability": 0.91796875}, {"start": 2014.58, "end": 2014.72, "word": " of", "probability": 0.96728515625}, {"start": 2014.72, "end": 2014.9, "word": " these", "probability": 0.83740234375}, {"start": 2014.9, "end": 2015.54, "word": " problems,", "probability": 0.8349609375}, {"start": 2016.26, "end": 2016.68, "word": " true,", "probability": 0.66650390625}, {"start": 2017.0, "end": 2017.38, "word": " false.", "probability": 0.9072265625}, {"start": 2018.16, "end": 2018.4, "word": " Let's", "probability": 0.97314453125}, {"start": 2018.4, "end": 2018.52, "word": " do", "probability": 0.95556640625}, {"start": 2018.52, "end": 2018.76, "word": " some", "probability": 0.908203125}, {"start": 2018.76, "end": 2018.9, "word": " of", "probability": 0.97216796875}, {"start": 2018.9, "end": 2019.16, "word": " these.", "probability": 0.70751953125}], "temperature": 1.0}, {"id": 73, "seek": 205129, "start": 2021.91, "end": 2051.29, "text": " Now for a true and false problem, try to avoid calculations as much as you can. Just solve the problem without any computations. Maybe simple calculations might be used, but don't use the exact calculations because the problem asks just true or false. So sometimes the problem makes sense, the answer is true, so just say true without doing the complete calculations.", "tokens": [823, 337, 257, 2074, 293, 7908, 1154, 11, 853, 281, 5042, 20448, 382, 709, 382, 291, 393, 13, 1449, 5039, 264, 1154, 1553, 604, 2807, 763, 13, 2704, 2199, 20448, 1062, 312, 1143, 11, 457, 500, 380, 764, 264, 1900, 20448, 570, 264, 1154, 8962, 445, 2074, 420, 7908, 13, 407, 2171, 264, 1154, 1669, 2020, 11, 264, 1867, 307, 2074, 11, 370, 445, 584, 2074, 1553, 884, 264, 3566, 20448, 13], "avg_logprob": -0.1750856099063403, "compression_ratio": 1.7864077669902914, "no_speech_prob": 0.0, "words": [{"start": 2021.91, "end": 2022.21, "word": " Now", "probability": 0.7548828125}, {"start": 2022.21, "end": 2022.45, "word": " for", "probability": 0.5908203125}, {"start": 2022.45, "end": 2022.61, "word": " a", "probability": 0.364990234375}, {"start": 2022.61, "end": 2022.67, "word": " true", "probability": 0.9365234375}, {"start": 2022.67, "end": 2022.89, "word": " and", "probability": 0.8466796875}, {"start": 2022.89, "end": 2023.15, "word": " false", "probability": 0.8916015625}, {"start": 2023.15, "end": 2023.63, "word": " problem,", "probability": 0.84130859375}, {"start": 2023.89, "end": 2024.29, "word": " try", "probability": 0.92626953125}, {"start": 2024.29, "end": 2024.65, "word": " to", "probability": 0.97021484375}, {"start": 2024.65, "end": 2025.11, "word": " avoid", "probability": 0.9140625}, {"start": 2025.11, "end": 2025.81, "word": " calculations", "probability": 0.89208984375}, {"start": 2025.81, "end": 2026.13, "word": " as", "probability": 0.96337890625}, {"start": 2026.13, "end": 2026.47, "word": " much", "probability": 0.91650390625}, {"start": 2026.47, "end": 2026.65, "word": " as", "probability": 0.96826171875}, {"start": 2026.65, "end": 2026.79, "word": " you", "probability": 0.96826171875}, {"start": 2026.79, "end": 2027.05, "word": " can.", "probability": 0.94873046875}, {"start": 2028.17, "end": 2028.61, "word": " Just", "probability": 0.853515625}, {"start": 2028.61, "end": 2029.23, "word": " solve", "probability": 0.93310546875}, {"start": 2029.23, "end": 2029.37, "word": " the", "probability": 0.91455078125}, {"start": 2029.37, "end": 2029.71, "word": " problem", "probability": 0.87353515625}, {"start": 2029.71, "end": 2030.11, "word": " without", "probability": 0.89501953125}, {"start": 2030.11, "end": 2030.51, "word": " any", "probability": 0.8994140625}, {"start": 2030.51, "end": 2031.15, "word": " computations.", "probability": 0.965576171875}, {"start": 2031.39, "end": 2031.59, "word": " Maybe", "probability": 0.9052734375}, {"start": 2031.59, "end": 2033.01, "word": " simple", "probability": 0.68603515625}, {"start": 2033.01, "end": 2033.65, "word": " calculations", "probability": 0.7841796875}, {"start": 2033.65, "end": 2034.65, "word": " might", "probability": 0.84619140625}, {"start": 2034.65, "end": 2034.89, "word": " be", "probability": 0.9560546875}, {"start": 2034.89, "end": 2035.23, "word": " used,", "probability": 0.9140625}, {"start": 2035.65, "end": 2035.79, "word": " but", "probability": 0.92626953125}, {"start": 2035.79, "end": 2036.07, "word": " don't", "probability": 0.9755859375}, {"start": 2036.07, "end": 2036.51, "word": " use", "probability": 0.880859375}, {"start": 2036.51, "end": 2037.21, "word": " the", "probability": 0.90234375}, {"start": 2037.21, "end": 2037.65, "word": " exact", "probability": 0.939453125}, {"start": 2037.65, "end": 2038.27, "word": " calculations", "probability": 0.9091796875}, {"start": 2038.27, "end": 2038.93, "word": " because", "probability": 0.39208984375}, {"start": 2038.93, "end": 2039.85, "word": " the", "probability": 0.91552734375}, {"start": 2039.85, "end": 2040.15, "word": " problem", "probability": 0.86572265625}, {"start": 2040.15, "end": 2040.43, "word": " asks", "probability": 0.59423828125}, {"start": 2040.43, "end": 2040.89, "word": " just", "probability": 0.81689453125}, {"start": 2040.89, "end": 2041.89, "word": " true", "probability": 0.5634765625}, {"start": 2041.89, "end": 2042.15, "word": " or", "probability": 0.95556640625}, {"start": 2042.15, "end": 2042.55, "word": " false.", "probability": 0.90966796875}, {"start": 2043.11, "end": 2043.35, "word": " So", "probability": 0.85498046875}, {"start": 2043.35, "end": 2043.87, "word": " sometimes", "probability": 0.89404296875}, {"start": 2043.87, "end": 2044.07, "word": " the", "probability": 0.6220703125}, {"start": 2044.07, "end": 2044.45, "word": " problem", "probability": 0.86962890625}, {"start": 2044.45, "end": 2045.15, "word": " makes", "probability": 0.8203125}, {"start": 2045.15, "end": 2045.51, "word": " sense,", "probability": 0.818359375}, {"start": 2045.65, "end": 2045.77, "word": " the", "probability": 0.900390625}, {"start": 2045.77, "end": 2046.09, "word": " answer", "probability": 0.94091796875}, {"start": 2046.09, "end": 2046.29, "word": " is", "probability": 0.89501953125}, {"start": 2046.29, "end": 2046.55, "word": " true,", "probability": 0.92236328125}, {"start": 2046.75, "end": 2046.87, "word": " so", "probability": 0.91943359375}, {"start": 2046.87, "end": 2047.13, "word": " just", "probability": 0.912109375}, {"start": 2047.13, "end": 2047.35, "word": " say", "probability": 0.90673828125}, {"start": 2047.35, "end": 2047.63, "word": " true", "probability": 0.9111328125}, {"start": 2047.63, "end": 2049.09, "word": " without", "probability": 0.7294921875}, {"start": 2049.09, "end": 2049.71, "word": " doing", "probability": 0.9638671875}, {"start": 2049.71, "end": 2049.95, "word": " the", "probability": 0.90576171875}, {"start": 2049.95, "end": 2050.45, "word": " complete", "probability": 0.79248046875}, {"start": 2050.45, "end": 2051.29, "word": " calculations.", "probability": 0.931640625}], "temperature": 1.0}, {"id": 74, "seek": 207587, "start": 2052.57, "end": 2075.87, "text": " Because you will waste time. Because as you know, we have exam of just 60 minutes. And for true and false, I expect your answer to be within, for example, 15 seconds. Just read the problem, then figure your answer. So in this case, sometimes you don't need to do the exact calculations.", "tokens": [1436, 291, 486, 5964, 565, 13, 1436, 382, 291, 458, 11, 321, 362, 1139, 295, 445, 4060, 2077, 13, 400, 337, 2074, 293, 7908, 11, 286, 2066, 428, 1867, 281, 312, 1951, 11, 337, 1365, 11, 2119, 3949, 13, 1449, 1401, 264, 1154, 11, 550, 2573, 428, 1867, 13, 407, 294, 341, 1389, 11, 2171, 291, 500, 380, 643, 281, 360, 264, 1900, 20448, 13], "avg_logprob": -0.21176609916217398, "compression_ratio": 1.4793814432989691, "no_speech_prob": 0.0, "words": [{"start": 2052.57, "end": 2052.99, "word": " Because", "probability": 0.55322265625}, {"start": 2052.99, "end": 2053.11, "word": " you", "probability": 0.66162109375}, {"start": 2053.11, "end": 2053.23, "word": " will", "probability": 0.79833984375}, {"start": 2053.23, "end": 2053.47, "word": " waste", "probability": 0.84912109375}, {"start": 2053.47, "end": 2053.67, "word": " time.", "probability": 0.869140625}, {"start": 2053.77, "end": 2053.91, "word": " Because", "probability": 0.58251953125}, {"start": 2053.91, "end": 2054.21, "word": " as", "probability": 0.322021484375}, {"start": 2054.21, "end": 2054.37, "word": " you", "probability": 0.95849609375}, {"start": 2054.37, "end": 2054.45, "word": " know,", "probability": 0.88330078125}, {"start": 2054.51, "end": 2054.59, "word": " we", "probability": 0.8916015625}, {"start": 2054.59, "end": 2054.73, "word": " have", "probability": 0.90283203125}, {"start": 2054.73, "end": 2055.01, "word": " exam", "probability": 0.76953125}, {"start": 2055.01, "end": 2055.21, "word": " of", "probability": 0.76806640625}, {"start": 2055.21, "end": 2055.77, "word": " just", "probability": 0.88818359375}, {"start": 2055.77, "end": 2056.15, "word": " 60", "probability": 0.85791015625}, {"start": 2056.15, "end": 2056.61, "word": " minutes.", "probability": 0.89501953125}, {"start": 2057.57, "end": 2057.71, "word": " And", "probability": 0.86767578125}, {"start": 2057.71, "end": 2057.97, "word": " for", "probability": 0.52490234375}, {"start": 2057.97, "end": 2058.21, "word": " true", "probability": 0.673828125}, {"start": 2058.21, "end": 2058.31, "word": " and", "probability": 0.82861328125}, {"start": 2058.31, "end": 2058.71, "word": " false,", "probability": 0.9052734375}, {"start": 2058.81, "end": 2058.99, "word": " I", "probability": 0.98828125}, {"start": 2058.99, "end": 2059.31, "word": " expect", "probability": 0.912109375}, {"start": 2059.31, "end": 2059.51, "word": " your", "probability": 0.87060546875}, {"start": 2059.51, "end": 2059.79, "word": " answer", "probability": 0.9453125}, {"start": 2059.79, "end": 2059.99, "word": " to", "probability": 0.95849609375}, {"start": 2059.99, "end": 2060.13, "word": " be", "probability": 0.94921875}, {"start": 2060.13, "end": 2060.51, "word": " within,", "probability": 0.890625}, {"start": 2060.95, "end": 2061.07, "word": " for", "probability": 0.9541015625}, {"start": 2061.07, "end": 2061.41, "word": " example,", "probability": 0.97021484375}, {"start": 2061.53, "end": 2062.49, "word": " 15", "probability": 0.65283203125}, {"start": 2062.49, "end": 2063.11, "word": " seconds.", "probability": 0.759765625}, {"start": 2064.73, "end": 2065.27, "word": " Just", "probability": 0.75732421875}, {"start": 2065.27, "end": 2065.69, "word": " read", "probability": 0.9560546875}, {"start": 2065.69, "end": 2065.85, "word": " the", "probability": 0.91796875}, {"start": 2065.85, "end": 2066.17, "word": " problem,", "probability": 0.9013671875}, {"start": 2067.41, "end": 2068.69, "word": " then", "probability": 0.84521484375}, {"start": 2068.69, "end": 2068.97, "word": " figure", "probability": 0.9638671875}, {"start": 2068.97, "end": 2069.17, "word": " your", "probability": 0.8193359375}, {"start": 2069.17, "end": 2069.49, "word": " answer.", "probability": 0.9599609375}, {"start": 2071.29, "end": 2071.51, "word": " So", "probability": 0.90576171875}, {"start": 2071.51, "end": 2072.05, "word": " in", "probability": 0.6494140625}, {"start": 2072.05, "end": 2072.19, "word": " this", "probability": 0.94384765625}, {"start": 2072.19, "end": 2072.45, "word": " case,", "probability": 0.91455078125}, {"start": 2072.69, "end": 2073.19, "word": " sometimes", "probability": 0.9501953125}, {"start": 2073.19, "end": 2073.39, "word": " you", "probability": 0.88916015625}, {"start": 2073.39, "end": 2073.61, "word": " don't", "probability": 0.9677734375}, {"start": 2073.61, "end": 2073.91, "word": " need", "probability": 0.92529296875}, {"start": 2073.91, "end": 2074.27, "word": " to", "probability": 0.966796875}, {"start": 2074.27, "end": 2074.51, "word": " do", "probability": 0.95849609375}, {"start": 2074.51, "end": 2074.83, "word": " the", "probability": 0.87451171875}, {"start": 2074.83, "end": 2075.23, "word": " exact", "probability": 0.947265625}, {"start": 2075.23, "end": 2075.87, "word": " calculations.", "probability": 0.92626953125}], "temperature": 1.0}, {"id": 75, "seek": 210116, "start": 2077.16, "end": 2101.16, "text": " But for the free response problems you have to do the calculations. But here just think about it a little bit and within a few seconds you can figure out if it is true or false. Now let's think about number one. Now each of these problems I will ask all of you to figure out the answer and I will give bonus, one point for each one.", "tokens": [583, 337, 264, 1737, 4134, 2740, 291, 362, 281, 360, 264, 20448, 13, 583, 510, 445, 519, 466, 309, 257, 707, 857, 293, 1951, 257, 1326, 3949, 291, 393, 2573, 484, 498, 309, 307, 2074, 420, 7908, 13, 823, 718, 311, 519, 466, 1230, 472, 13, 823, 1184, 295, 613, 2740, 286, 486, 1029, 439, 295, 291, 281, 2573, 484, 264, 1867, 293, 286, 486, 976, 10882, 11, 472, 935, 337, 1184, 472, 13], "avg_logprob": -0.17010416865348815, "compression_ratio": 1.6567164179104477, "no_speech_prob": 0.0, "words": [{"start": 2077.16, "end": 2077.44, "word": " But", "probability": 0.58056640625}, {"start": 2077.44, "end": 2077.68, "word": " for", "probability": 0.8720703125}, {"start": 2077.68, "end": 2077.86, "word": " the", "probability": 0.86572265625}, {"start": 2077.86, "end": 2078.0, "word": " free", "probability": 0.7646484375}, {"start": 2078.0, "end": 2078.44, "word": " response", "probability": 0.7490234375}, {"start": 2078.44, "end": 2078.88, "word": " problems", "probability": 0.485595703125}, {"start": 2078.88, "end": 2079.08, "word": " you", "probability": 0.44921875}, {"start": 2079.08, "end": 2079.26, "word": " have", "probability": 0.9384765625}, {"start": 2079.26, "end": 2079.4, "word": " to", "probability": 0.97119140625}, {"start": 2079.4, "end": 2079.64, "word": " do", "probability": 0.94677734375}, {"start": 2079.64, "end": 2080.24, "word": " the", "probability": 0.79248046875}, {"start": 2080.24, "end": 2080.84, "word": " calculations.", "probability": 0.8740234375}, {"start": 2081.0, "end": 2081.14, "word": " But", "probability": 0.82958984375}, {"start": 2081.14, "end": 2081.36, "word": " here", "probability": 0.84130859375}, {"start": 2081.36, "end": 2081.66, "word": " just", "probability": 0.64892578125}, {"start": 2081.66, "end": 2082.02, "word": " think", "probability": 0.91064453125}, {"start": 2082.02, "end": 2082.3, "word": " about", "probability": 0.89404296875}, {"start": 2082.3, "end": 2082.52, "word": " it", "probability": 0.94091796875}, {"start": 2082.52, "end": 2082.68, "word": " a", "probability": 0.7177734375}, {"start": 2082.68, "end": 2082.82, "word": " little", "probability": 0.85595703125}, {"start": 2082.82, "end": 2083.12, "word": " bit", "probability": 0.94580078125}, {"start": 2083.12, "end": 2083.82, "word": " and", "probability": 0.69189453125}, {"start": 2083.82, "end": 2084.08, "word": " within", "probability": 0.90283203125}, {"start": 2084.08, "end": 2084.24, "word": " a", "probability": 0.96484375}, {"start": 2084.24, "end": 2084.44, "word": " few", "probability": 0.90673828125}, {"start": 2084.44, "end": 2085.06, "word": " seconds", "probability": 0.81640625}, {"start": 2085.06, "end": 2085.28, "word": " you", "probability": 0.92626953125}, {"start": 2085.28, "end": 2085.58, "word": " can", "probability": 0.93701171875}, {"start": 2085.58, "end": 2085.86, "word": " figure", "probability": 0.9658203125}, {"start": 2085.86, "end": 2086.24, "word": " out", "probability": 0.87890625}, {"start": 2086.24, "end": 2086.5, "word": " if", "probability": 0.9384765625}, {"start": 2086.5, "end": 2086.7, "word": " it", "probability": 0.740234375}, {"start": 2086.7, "end": 2086.74, "word": " is", "probability": 0.55126953125}, {"start": 2086.74, "end": 2086.94, "word": " true", "probability": 0.91845703125}, {"start": 2086.94, "end": 2087.14, "word": " or", "probability": 0.95361328125}, {"start": 2087.14, "end": 2087.46, "word": " false.", "probability": 0.916015625}, {"start": 2088.7, "end": 2089.12, "word": " Now", "probability": 0.91064453125}, {"start": 2089.12, "end": 2089.42, "word": " let's", "probability": 0.84033203125}, {"start": 2089.42, "end": 2089.72, "word": " think", "probability": 0.90087890625}, {"start": 2089.72, "end": 2090.04, "word": " about", "probability": 0.91259765625}, {"start": 2090.04, "end": 2090.32, "word": " number", "probability": 0.87890625}, {"start": 2090.32, "end": 2090.68, "word": " one.", "probability": 0.70751953125}, {"start": 2092.48, "end": 2092.74, "word": " Now", "probability": 0.90234375}, {"start": 2092.74, "end": 2093.6, "word": " each", "probability": 0.6904296875}, {"start": 2093.6, "end": 2093.76, "word": " of", "probability": 0.96337890625}, {"start": 2093.76, "end": 2093.94, "word": " these", "probability": 0.8203125}, {"start": 2093.94, "end": 2094.34, "word": " problems", "probability": 0.8349609375}, {"start": 2094.34, "end": 2094.62, "word": " I", "probability": 0.79296875}, {"start": 2094.62, "end": 2094.74, "word": " will", "probability": 0.8837890625}, {"start": 2094.74, "end": 2095.08, "word": " ask", "probability": 0.91943359375}, {"start": 2095.08, "end": 2095.42, "word": " all", "probability": 0.9423828125}, {"start": 2095.42, "end": 2095.54, "word": " of", "probability": 0.96728515625}, {"start": 2095.54, "end": 2095.7, "word": " you", "probability": 0.9619140625}, {"start": 2095.7, "end": 2095.96, "word": " to", "probability": 0.96337890625}, {"start": 2095.96, "end": 2096.24, "word": " figure", "probability": 0.96240234375}, {"start": 2096.24, "end": 2096.48, "word": " out", "probability": 0.88623046875}, {"start": 2096.48, "end": 2096.66, "word": " the", "probability": 0.91796875}, {"start": 2096.66, "end": 2097.0, "word": " answer", "probability": 0.94384765625}, {"start": 2097.0, "end": 2098.06, "word": " and", "probability": 0.73291015625}, {"start": 2098.06, "end": 2098.2, "word": " I", "probability": 0.99072265625}, {"start": 2098.2, "end": 2098.34, "word": " will", "probability": 0.8876953125}, {"start": 2098.34, "end": 2098.64, "word": " give", "probability": 0.87646484375}, {"start": 2098.64, "end": 2099.3, "word": " bonus,", "probability": 0.9365234375}, {"start": 2099.5, "end": 2099.76, "word": " one", "probability": 0.8466796875}, {"start": 2099.76, "end": 2100.2, "word": " point", "probability": 0.9677734375}, {"start": 2100.2, "end": 2100.72, "word": " for", "probability": 0.80419921875}, {"start": 2100.72, "end": 2100.9, "word": " each", "probability": 0.935546875}, {"start": 2100.9, "end": 2101.16, "word": " one.", "probability": 0.91064453125}], "temperature": 1.0}, {"id": 76, "seek": 212861, "start": 2103.43, "end": 2128.61, "text": " Forget about the answer is it true, I need explanation why is it true. Now let's see. The amount of time it takes to complete an examination has skewed left distribution with a mean of 65 minutes and standard deviation of 8 minutes.", "tokens": [18675, 466, 264, 1867, 307, 309, 2074, 11, 286, 643, 10835, 983, 307, 309, 2074, 13, 823, 718, 311, 536, 13, 440, 2372, 295, 565, 309, 2516, 281, 3566, 364, 23874, 575, 8756, 26896, 1411, 7316, 365, 257, 914, 295, 11624, 2077, 293, 3832, 25163, 295, 1649, 2077, 13], "avg_logprob": -0.19546875536441802, "compression_ratio": 1.4472049689440993, "no_speech_prob": 0.0, "words": [{"start": 2103.4300000000003, "end": 2104.11, "word": " Forget", "probability": 0.71240234375}, {"start": 2104.11, "end": 2104.45, "word": " about", "probability": 0.87548828125}, {"start": 2104.45, "end": 2104.67, "word": " the", "probability": 0.7958984375}, {"start": 2104.67, "end": 2104.97, "word": " answer", "probability": 0.8583984375}, {"start": 2104.97, "end": 2105.13, "word": " is", "probability": 0.471435546875}, {"start": 2105.13, "end": 2105.29, "word": " it", "probability": 0.63037109375}, {"start": 2105.29, "end": 2105.49, "word": " true,", "probability": 0.9033203125}, {"start": 2105.81, "end": 2105.99, "word": " I", "probability": 0.96240234375}, {"start": 2105.99, "end": 2106.29, "word": " need", "probability": 0.935546875}, {"start": 2106.29, "end": 2107.13, "word": " explanation", "probability": 0.76513671875}, {"start": 2107.13, "end": 2107.93, "word": " why", "probability": 0.4970703125}, {"start": 2107.93, "end": 2108.79, "word": " is", "probability": 0.57421875}, {"start": 2108.79, "end": 2108.93, "word": " it", "probability": 0.87890625}, {"start": 2108.93, "end": 2109.21, "word": " true.", "probability": 0.958984375}, {"start": 2110.27, "end": 2110.53, "word": " Now", "probability": 0.7001953125}, {"start": 2110.53, "end": 2110.83, "word": " let's", "probability": 0.8486328125}, {"start": 2110.83, "end": 2111.05, "word": " see.", "probability": 0.693359375}, {"start": 2112.71, "end": 2113.05, "word": " The", "probability": 0.88427734375}, {"start": 2113.05, "end": 2113.43, "word": " amount", "probability": 0.89697265625}, {"start": 2113.43, "end": 2113.63, "word": " of", "probability": 0.9677734375}, {"start": 2113.63, "end": 2114.05, "word": " time", "probability": 0.8916015625}, {"start": 2114.05, "end": 2116.29, "word": " it", "probability": 0.89013671875}, {"start": 2116.29, "end": 2116.79, "word": " takes", "probability": 0.7998046875}, {"start": 2116.79, "end": 2117.05, "word": " to", "probability": 0.96728515625}, {"start": 2117.05, "end": 2117.49, "word": " complete", "probability": 0.83544921875}, {"start": 2117.49, "end": 2117.69, "word": " an", "probability": 0.9599609375}, {"start": 2117.69, "end": 2118.31, "word": " examination", "probability": 0.94873046875}, {"start": 2118.31, "end": 2119.27, "word": " has", "probability": 0.85888671875}, {"start": 2119.27, "end": 2120.65, "word": " skewed", "probability": 0.95166015625}, {"start": 2120.65, "end": 2120.93, "word": " left", "probability": 0.91259765625}, {"start": 2120.93, "end": 2121.67, "word": " distribution", "probability": 0.84619140625}, {"start": 2121.67, "end": 2122.59, "word": " with", "probability": 0.80859375}, {"start": 2122.59, "end": 2122.75, "word": " a", "probability": 0.97412109375}, {"start": 2122.75, "end": 2122.89, "word": " mean", "probability": 0.955078125}, {"start": 2122.89, "end": 2123.05, "word": " of", "probability": 0.96875}, {"start": 2123.05, "end": 2123.57, "word": " 65", "probability": 0.853515625}, {"start": 2123.57, "end": 2124.09, "word": " minutes", "probability": 0.89599609375}, {"start": 2124.09, "end": 2126.49, "word": " and", "probability": 0.869140625}, {"start": 2126.49, "end": 2127.31, "word": " standard", "probability": 0.89013671875}, {"start": 2127.31, "end": 2127.71, "word": " deviation", "probability": 0.90478515625}, {"start": 2127.71, "end": 2127.95, "word": " of", "probability": 0.962890625}, {"start": 2127.95, "end": 2128.21, "word": " 8", "probability": 0.61328125}, {"start": 2128.21, "end": 2128.61, "word": " minutes.", "probability": 0.88427734375}], "temperature": 1.0}, {"id": 77, "seek": 216073, "start": 2131.46, "end": 2160.74, "text": " If 64 students were randomly sampled. So we select a random sample of 64. Now we ask about the probability that the sample mean of the sampled students exceeds 71 minutes is approximately zero. Probability that the sample mean of the 64 students", "tokens": [759, 12145, 1731, 645, 16979, 3247, 15551, 13, 407, 321, 3048, 257, 4974, 6889, 295, 12145, 13, 823, 321, 1029, 466, 264, 8482, 300, 264, 6889, 914, 295, 264, 3247, 15551, 1731, 43305, 30942, 2077, 307, 10447, 4018, 13, 8736, 2310, 300, 264, 6889, 914, 295, 264, 12145, 1731], "avg_logprob": -0.19046874314546586, "compression_ratio": 1.732394366197183, "no_speech_prob": 0.0, "words": [{"start": 2131.46, "end": 2131.74, "word": " If", "probability": 0.5576171875}, {"start": 2131.74, "end": 2132.2, "word": " 64", "probability": 0.888671875}, {"start": 2132.2, "end": 2132.82, "word": " students", "probability": 0.96240234375}, {"start": 2132.82, "end": 2133.04, "word": " were", "probability": 0.88623046875}, {"start": 2133.04, "end": 2133.4, "word": " randomly", "probability": 0.81494140625}, {"start": 2133.4, "end": 2134.26, "word": " sampled.", "probability": 0.7548828125}, {"start": 2135.02, "end": 2135.84, "word": " So", "probability": 0.86279296875}, {"start": 2135.84, "end": 2136.04, "word": " we", "probability": 0.68115234375}, {"start": 2136.04, "end": 2136.54, "word": " select", "probability": 0.791015625}, {"start": 2136.54, "end": 2136.68, "word": " a", "probability": 0.91943359375}, {"start": 2136.68, "end": 2136.94, "word": " random", "probability": 0.86279296875}, {"start": 2136.94, "end": 2137.34, "word": " sample", "probability": 0.912109375}, {"start": 2137.34, "end": 2137.56, "word": " of", "probability": 0.95361328125}, {"start": 2137.56, "end": 2138.06, "word": " 64.", "probability": 0.95703125}, {"start": 2140.16, "end": 2140.42, "word": " Now", "probability": 0.93603515625}, {"start": 2140.42, "end": 2140.52, "word": " we", "probability": 0.394287109375}, {"start": 2140.52, "end": 2140.74, "word": " ask", "probability": 0.9404296875}, {"start": 2140.74, "end": 2140.98, "word": " about", "probability": 0.91748046875}, {"start": 2140.98, "end": 2141.86, "word": " the", "probability": 0.75439453125}, {"start": 2141.86, "end": 2142.38, "word": " probability", "probability": 0.9423828125}, {"start": 2142.38, "end": 2142.8, "word": " that", "probability": 0.9296875}, {"start": 2142.8, "end": 2144.0, "word": " the", "probability": 0.7734375}, {"start": 2144.0, "end": 2144.44, "word": " sample", "probability": 0.9013671875}, {"start": 2144.44, "end": 2144.78, "word": " mean", "probability": 0.94189453125}, {"start": 2144.78, "end": 2146.54, "word": " of", "probability": 0.87939453125}, {"start": 2146.54, "end": 2146.82, "word": " the", "probability": 0.91845703125}, {"start": 2146.82, "end": 2147.26, "word": " sampled", "probability": 0.833251953125}, {"start": 2147.26, "end": 2148.08, "word": " students", "probability": 0.95751953125}, {"start": 2148.08, "end": 2148.96, "word": " exceeds", "probability": 0.7685546875}, {"start": 2148.96, "end": 2149.76, "word": " 71", "probability": 0.95263671875}, {"start": 2149.76, "end": 2150.28, "word": " minutes", "probability": 0.90625}, {"start": 2150.28, "end": 2150.58, "word": " is", "probability": 0.88671875}, {"start": 2150.58, "end": 2151.18, "word": " approximately", "probability": 0.8896484375}, {"start": 2151.18, "end": 2151.96, "word": " zero.", "probability": 0.6435546875}, {"start": 2152.98, "end": 2153.7, "word": " Probability", "probability": 0.951171875}, {"start": 2153.7, "end": 2154.08, "word": " that", "probability": 0.900390625}, {"start": 2154.08, "end": 2157.04, "word": " the", "probability": 0.76708984375}, {"start": 2157.04, "end": 2157.3, "word": " sample", "probability": 0.89697265625}, {"start": 2157.3, "end": 2157.68, "word": " mean", "probability": 0.96044921875}, {"start": 2157.68, "end": 2158.64, "word": " of", "probability": 0.9658203125}, {"start": 2158.64, "end": 2159.44, "word": " the", "probability": 0.822265625}, {"start": 2159.44, "end": 2159.9, "word": " 64", "probability": 0.96923828125}, {"start": 2159.9, "end": 2160.74, "word": " students", "probability": 0.9716796875}], "temperature": 1.0}, {"id": 78, "seek": 218730, "start": 2163.24, "end": 2187.3, "text": " score above 71 this probability is approximately zero so the problem says the amount of time it takes to complete the final examination is not normal it's left skewed skewed to the left here we select a random sample of 65", "tokens": [6175, 3673, 30942, 341, 8482, 307, 10447, 4018, 370, 264, 1154, 1619, 264, 2372, 295, 565, 309, 2516, 281, 3566, 264, 2572, 23874, 307, 406, 2710, 309, 311, 1411, 8756, 26896, 8756, 26896, 281, 264, 1411, 510, 321, 3048, 257, 4974, 6889, 295, 11624], "avg_logprob": -0.22517360581292045, "compression_ratio": 1.4768211920529801, "no_speech_prob": 0.0, "words": [{"start": 2163.24, "end": 2163.68, "word": " score", "probability": 0.312255859375}, {"start": 2163.68, "end": 2164.42, "word": " above", "probability": 0.90625}, {"start": 2164.42, "end": 2165.34, "word": " 71", "probability": 0.9208984375}, {"start": 2165.34, "end": 2166.5, "word": " this", "probability": 0.482666015625}, {"start": 2166.5, "end": 2167.0, "word": " probability", "probability": 0.9599609375}, {"start": 2167.0, "end": 2167.84, "word": " is", "probability": 0.94287109375}, {"start": 2167.84, "end": 2168.54, "word": " approximately", "probability": 0.85302734375}, {"start": 2168.54, "end": 2168.96, "word": " zero", "probability": 0.46484375}, {"start": 2168.96, "end": 2172.32, "word": " so", "probability": 0.294921875}, {"start": 2172.32, "end": 2172.5, "word": " the", "probability": 0.87060546875}, {"start": 2172.5, "end": 2172.78, "word": " problem", "probability": 0.8955078125}, {"start": 2172.78, "end": 2173.26, "word": " says", "probability": 0.88720703125}, {"start": 2173.26, "end": 2174.34, "word": " the", "probability": 0.7294921875}, {"start": 2174.34, "end": 2174.66, "word": " amount", "probability": 0.87548828125}, {"start": 2174.66, "end": 2174.88, "word": " of", "probability": 0.96826171875}, {"start": 2174.88, "end": 2175.34, "word": " time", "probability": 0.890625}, {"start": 2175.34, "end": 2176.42, "word": " it", "probability": 0.9453125}, {"start": 2176.42, "end": 2176.82, "word": " takes", "probability": 0.80322265625}, {"start": 2176.82, "end": 2176.98, "word": " to", "probability": 0.90673828125}, {"start": 2176.98, "end": 2177.36, "word": " complete", "probability": 0.79443359375}, {"start": 2177.36, "end": 2177.8, "word": " the", "probability": 0.6259765625}, {"start": 2177.8, "end": 2178.02, "word": " final", "probability": 0.95263671875}, {"start": 2178.02, "end": 2178.58, "word": " examination", "probability": 0.92333984375}, {"start": 2178.58, "end": 2179.72, "word": " is", "probability": 0.92578125}, {"start": 2179.72, "end": 2179.92, "word": " not", "probability": 0.94384765625}, {"start": 2179.92, "end": 2180.36, "word": " normal", "probability": 0.85107421875}, {"start": 2180.36, "end": 2181.32, "word": " it's", "probability": 0.7490234375}, {"start": 2181.32, "end": 2181.66, "word": " left", "probability": 0.93359375}, {"start": 2181.66, "end": 2182.24, "word": " skewed", "probability": 0.867919921875}, {"start": 2182.24, "end": 2183.48, "word": " skewed", "probability": 0.741455078125}, {"start": 2183.48, "end": 2183.62, "word": " to", "probability": 0.96728515625}, {"start": 2183.62, "end": 2183.76, "word": " the", "probability": 0.92138671875}, {"start": 2183.76, "end": 2184.04, "word": " left", "probability": 0.94384765625}, {"start": 2184.04, "end": 2184.94, "word": " here", "probability": 0.70263671875}, {"start": 2184.94, "end": 2185.1, "word": " we", "probability": 0.91015625}, {"start": 2185.1, "end": 2185.38, "word": " select", "probability": 0.849609375}, {"start": 2185.38, "end": 2185.5, "word": " a", "probability": 0.97705078125}, {"start": 2185.5, "end": 2185.72, "word": " random", "probability": 0.87841796875}, {"start": 2185.72, "end": 2186.04, "word": " sample", "probability": 0.91015625}, {"start": 2186.04, "end": 2186.86, "word": " of", "probability": 0.57470703125}, {"start": 2186.86, "end": 2187.3, "word": " 65", "probability": 0.88525390625}], "temperature": 1.0}, {"id": 79, "seek": 221808, "start": 2189.16, "end": 2218.08, "text": " I'm sorry, of 64 students. That gives sample mean of 65, and we have standard deviation of 8. Now, the population by itself is not normal, and the population has mean of 65, as we mentioned, and standard deviation of 8. Now we select a random sample of 64, it's large sample size.", "tokens": [286, 478, 2597, 11, 295, 12145, 1731, 13, 663, 2709, 6889, 914, 295, 11624, 11, 293, 321, 362, 3832, 25163, 295, 1649, 13, 823, 11, 264, 4415, 538, 2564, 307, 406, 2710, 11, 293, 264, 4415, 575, 914, 295, 11624, 11, 382, 321, 2835, 11, 293, 3832, 25163, 295, 1649, 13, 823, 321, 3048, 257, 4974, 6889, 295, 12145, 11, 309, 311, 2416, 6889, 2744, 13], "avg_logprob": -0.1646455268361675, "compression_ratio": 1.6726190476190477, "no_speech_prob": 0.0, "words": [{"start": 2189.16, "end": 2189.44, "word": " I'm", "probability": 0.7088623046875}, {"start": 2189.44, "end": 2189.64, "word": " sorry,", "probability": 0.85498046875}, {"start": 2189.78, "end": 2190.0, "word": " of", "probability": 0.8955078125}, {"start": 2190.0, "end": 2190.66, "word": " 64", "probability": 0.8505859375}, {"start": 2190.66, "end": 2191.28, "word": " students.", "probability": 0.96826171875}, {"start": 2191.52, "end": 2191.98, "word": " That", "probability": 0.86962890625}, {"start": 2191.98, "end": 2192.3, "word": " gives", "probability": 0.8408203125}, {"start": 2192.3, "end": 2192.68, "word": " sample", "probability": 0.771484375}, {"start": 2192.68, "end": 2192.92, "word": " mean", "probability": 0.92724609375}, {"start": 2192.92, "end": 2193.14, "word": " of", "probability": 0.9677734375}, {"start": 2193.14, "end": 2193.68, "word": " 65,", "probability": 0.96630859375}, {"start": 2195.26, "end": 2196.52, "word": " and", "probability": 0.86328125}, {"start": 2196.52, "end": 2196.7, "word": " we", "probability": 0.953125}, {"start": 2196.7, "end": 2196.86, "word": " have", "probability": 0.93408203125}, {"start": 2196.86, "end": 2197.14, "word": " standard", "probability": 0.9453125}, {"start": 2197.14, "end": 2197.5, "word": " deviation", "probability": 0.9375}, {"start": 2197.5, "end": 2197.72, "word": " of", "probability": 0.9697265625}, {"start": 2197.72, "end": 2197.9, "word": " 8.", "probability": 0.80712890625}, {"start": 2198.66, "end": 2199.08, "word": " Now,", "probability": 0.93359375}, {"start": 2200.44, "end": 2201.28, "word": " the", "probability": 0.84619140625}, {"start": 2201.28, "end": 2201.76, "word": " population", "probability": 0.94287109375}, {"start": 2201.76, "end": 2202.4, "word": " by", "probability": 0.89990234375}, {"start": 2202.4, "end": 2202.86, "word": " itself", "probability": 0.84033203125}, {"start": 2202.86, "end": 2203.48, "word": " is", "probability": 0.92626953125}, {"start": 2203.48, "end": 2203.74, "word": " not", "probability": 0.943359375}, {"start": 2203.74, "end": 2204.18, "word": " normal,", "probability": 0.873046875}, {"start": 2204.7, "end": 2204.9, "word": " and", "probability": 0.93359375}, {"start": 2204.9, "end": 2205.02, "word": " the", "probability": 0.8984375}, {"start": 2205.02, "end": 2205.42, "word": " population", "probability": 0.9345703125}, {"start": 2205.42, "end": 2205.74, "word": " has", "probability": 0.93798828125}, {"start": 2205.74, "end": 2206.06, "word": " mean", "probability": 0.931640625}, {"start": 2206.06, "end": 2206.48, "word": " of", "probability": 0.966796875}, {"start": 2206.48, "end": 2207.34, "word": " 65,", "probability": 0.97216796875}, {"start": 2208.34, "end": 2208.78, "word": " as", "probability": 0.9609375}, {"start": 2208.78, "end": 2208.9, "word": " we", "probability": 0.9111328125}, {"start": 2208.9, "end": 2209.2, "word": " mentioned,", "probability": 0.828125}, {"start": 2209.86, "end": 2210.14, "word": " and", "probability": 0.939453125}, {"start": 2210.14, "end": 2211.44, "word": " standard", "probability": 0.93896484375}, {"start": 2211.44, "end": 2211.88, "word": " deviation", "probability": 0.923828125}, {"start": 2211.88, "end": 2212.5, "word": " of", "probability": 0.96337890625}, {"start": 2212.5, "end": 2212.84, "word": " 8.", "probability": 0.9609375}, {"start": 2214.1, "end": 2214.62, "word": " Now", "probability": 0.94677734375}, {"start": 2214.62, "end": 2214.78, "word": " we", "probability": 0.51513671875}, {"start": 2214.78, "end": 2215.06, "word": " select", "probability": 0.83544921875}, {"start": 2215.06, "end": 2215.2, "word": " a", "probability": 0.9521484375}, {"start": 2215.2, "end": 2215.4, "word": " random", "probability": 0.86279296875}, {"start": 2215.4, "end": 2215.72, "word": " sample", "probability": 0.93505859375}, {"start": 2215.72, "end": 2215.9, "word": " of", "probability": 0.9609375}, {"start": 2215.9, "end": 2216.44, "word": " 64,", "probability": 0.9716796875}, {"start": 2216.76, "end": 2216.94, "word": " it's", "probability": 0.8876953125}, {"start": 2216.94, "end": 2217.26, "word": " large", "probability": 0.77490234375}, {"start": 2217.26, "end": 2217.64, "word": " sample", "probability": 0.88720703125}, {"start": 2217.64, "end": 2218.08, "word": " size.", "probability": 0.85546875}], "temperature": 1.0}, {"id": 80, "seek": 224157, "start": 2219.55, "end": 2241.57, "text": " What's the probability that the sample mean exceeds 71? It says the answer is approximately zero. Think about it. Why the answer is true in this case? It might be because 71 is more than the answer.", "tokens": [708, 311, 264, 8482, 300, 264, 6889, 914, 43305, 30942, 30, 467, 1619, 264, 1867, 307, 10447, 4018, 13, 6557, 466, 309, 13, 1545, 264, 1867, 307, 2074, 294, 341, 1389, 30, 467, 1062, 312, 570, 30942, 307, 544, 813, 264, 1867, 13], "avg_logprob": -0.1899857913905924, "compression_ratio": 1.4113475177304964, "no_speech_prob": 0.0, "words": [{"start": 2219.55, "end": 2220.05, "word": " What's", "probability": 0.783447265625}, {"start": 2220.05, "end": 2220.25, "word": " the", "probability": 0.91357421875}, {"start": 2220.25, "end": 2220.61, "word": " probability", "probability": 0.96533203125}, {"start": 2220.61, "end": 2221.11, "word": " that", "probability": 0.9228515625}, {"start": 2221.11, "end": 2222.87, "word": " the", "probability": 0.85009765625}, {"start": 2222.87, "end": 2223.19, "word": " sample", "probability": 0.6298828125}, {"start": 2223.19, "end": 2223.53, "word": " mean", "probability": 0.8955078125}, {"start": 2223.53, "end": 2224.65, "word": " exceeds", "probability": 0.9345703125}, {"start": 2224.65, "end": 2225.17, "word": " 71?", "probability": 0.95068359375}, {"start": 2225.29, "end": 2225.45, "word": " It", "probability": 0.9140625}, {"start": 2225.45, "end": 2225.81, "word": " says", "probability": 0.8896484375}, {"start": 2225.81, "end": 2226.47, "word": " the", "probability": 0.76318359375}, {"start": 2226.47, "end": 2226.79, "word": " answer", "probability": 0.958984375}, {"start": 2226.79, "end": 2226.99, "word": " is", "probability": 0.95068359375}, {"start": 2226.99, "end": 2227.47, "word": " approximately", "probability": 0.89501953125}, {"start": 2227.47, "end": 2227.93, "word": " zero.", "probability": 0.6865234375}, {"start": 2228.87, "end": 2229.15, "word": " Think", "probability": 0.94091796875}, {"start": 2229.15, "end": 2229.51, "word": " about", "probability": 0.90869140625}, {"start": 2229.51, "end": 2229.83, "word": " it.", "probability": 0.8310546875}, {"start": 2230.73, "end": 2231.11, "word": " Why", "probability": 0.90576171875}, {"start": 2231.11, "end": 2233.15, "word": " the", "probability": 0.6142578125}, {"start": 2233.15, "end": 2233.63, "word": " answer", "probability": 0.962890625}, {"start": 2233.63, "end": 2235.45, "word": " is", "probability": 0.806640625}, {"start": 2235.45, "end": 2235.73, "word": " true", "probability": 0.80322265625}, {"start": 2235.73, "end": 2235.89, "word": " in", "probability": 0.91943359375}, {"start": 2235.89, "end": 2236.05, "word": " this", "probability": 0.95068359375}, {"start": 2236.05, "end": 2236.41, "word": " case?", "probability": 0.923828125}, {"start": 2237.89, "end": 2238.07, "word": " It", "probability": 0.60107421875}, {"start": 2238.07, "end": 2238.41, "word": " might", "probability": 0.88818359375}, {"start": 2238.41, "end": 2238.67, "word": " be", "probability": 0.9423828125}, {"start": 2238.67, "end": 2239.15, "word": " because", "probability": 0.89892578125}, {"start": 2239.15, "end": 2239.91, "word": " 71", "probability": 0.92333984375}, {"start": 2239.91, "end": 2240.35, "word": " is", "probability": 0.95166015625}, {"start": 2240.35, "end": 2240.65, "word": " more", "probability": 0.93212890625}, {"start": 2240.65, "end": 2241.05, "word": " than", "probability": 0.95361328125}, {"start": 2241.05, "end": 2241.33, "word": " the", "probability": 0.64404296875}, {"start": 2241.33, "end": 2241.57, "word": " answer.", "probability": 0.1683349609375}], "temperature": 1.0}, {"id": 81, "seek": 226450, "start": 2243.68, "end": 2264.5, "text": " So the first guessing here is just 71 is above 65. Yes? We can apply the CLT theorem because N is above 30, so we can consider that this distribution is normal, so we transform from", "tokens": [407, 264, 700, 17939, 510, 307, 445, 30942, 307, 3673, 11624, 13, 1079, 30, 492, 393, 3079, 264, 12855, 51, 20904, 570, 426, 307, 3673, 2217, 11, 370, 321, 393, 1949, 300, 341, 7316, 307, 2710, 11, 370, 321, 4088, 490], "avg_logprob": -0.24590775086766198, "compression_ratio": 1.2907801418439717, "no_speech_prob": 0.0, "words": [{"start": 2243.68, "end": 2243.96, "word": " So", "probability": 0.7333984375}, {"start": 2243.96, "end": 2244.18, "word": " the", "probability": 0.65966796875}, {"start": 2244.18, "end": 2244.54, "word": " first", "probability": 0.87060546875}, {"start": 2244.54, "end": 2245.7, "word": " guessing", "probability": 0.94873046875}, {"start": 2245.7, "end": 2246.06, "word": " here", "probability": 0.83447265625}, {"start": 2246.06, "end": 2246.24, "word": " is", "probability": 0.9267578125}, {"start": 2246.24, "end": 2246.62, "word": " just", "probability": 0.896484375}, {"start": 2246.62, "end": 2247.48, "word": " 71", "probability": 0.79541015625}, {"start": 2247.48, "end": 2247.82, "word": " is", "probability": 0.92041015625}, {"start": 2247.82, "end": 2248.16, "word": " above", "probability": 0.96826171875}, {"start": 2248.16, "end": 2249.46, "word": " 65.", "probability": 0.96728515625}, {"start": 2252.54, "end": 2253.26, "word": " Yes?", "probability": 0.70947265625}, {"start": 2253.86, "end": 2254.18, "word": " We", "probability": 0.201416015625}, {"start": 2254.18, "end": 2254.38, "word": " can", "probability": 0.487060546875}, {"start": 2254.38, "end": 2254.66, "word": " apply", "probability": 0.94287109375}, {"start": 2254.66, "end": 2255.14, "word": " the", "probability": 0.890625}, {"start": 2255.14, "end": 2256.96, "word": " CLT", "probability": 0.71337890625}, {"start": 2256.96, "end": 2257.48, "word": " theorem", "probability": 0.77734375}, {"start": 2257.48, "end": 2257.88, "word": " because", "probability": 0.74658203125}, {"start": 2257.88, "end": 2258.26, "word": " N", "probability": 0.490966796875}, {"start": 2258.26, "end": 2258.44, "word": " is", "probability": 0.939453125}, {"start": 2258.44, "end": 2258.74, "word": " above", "probability": 0.95556640625}, {"start": 2258.74, "end": 2259.34, "word": " 30,", "probability": 0.93994140625}, {"start": 2259.5, "end": 2259.64, "word": " so", "probability": 0.921875}, {"start": 2259.64, "end": 2259.86, "word": " we", "probability": 0.94580078125}, {"start": 2259.86, "end": 2260.18, "word": " can", "probability": 0.89697265625}, {"start": 2260.18, "end": 2260.98, "word": " consider", "probability": 0.91015625}, {"start": 2260.98, "end": 2261.28, "word": " that", "probability": 0.84423828125}, {"start": 2261.28, "end": 2261.56, "word": " this", "probability": 0.91455078125}, {"start": 2261.56, "end": 2262.1, "word": " distribution", "probability": 0.8125}, {"start": 2262.1, "end": 2262.56, "word": " is", "probability": 0.927734375}, {"start": 2262.56, "end": 2263.1, "word": " normal,", "probability": 0.84619140625}, {"start": 2263.3, "end": 2263.44, "word": " so", "probability": 0.93408203125}, {"start": 2263.44, "end": 2263.62, "word": " we", "probability": 0.95361328125}, {"start": 2263.62, "end": 2264.08, "word": " transform", "probability": 0.89111328125}, {"start": 2264.08, "end": 2264.5, "word": " from", "probability": 0.830078125}], "temperature": 1.0}, {"id": 82, "seek": 228982, "start": 2264.98, "end": 2289.82, "text": " It's part of standardized and do whatever you want. That's correct, but to do the problem, the complete answer, yes, you have to convert to this score, because n is large, then you can figure out the answer. So she said that. But I'm looking for something different. First, convert to z. Our root n, sigma is n.", "tokens": [467, 311, 644, 295, 31677, 293, 360, 2035, 291, 528, 13, 663, 311, 3006, 11, 457, 281, 360, 264, 1154, 11, 264, 3566, 1867, 11, 2086, 11, 291, 362, 281, 7620, 281, 341, 6175, 11, 570, 297, 307, 2416, 11, 550, 291, 393, 2573, 484, 264, 1867, 13, 407, 750, 848, 300, 13, 583, 286, 478, 1237, 337, 746, 819, 13, 2386, 11, 7620, 281, 710, 13, 2621, 5593, 297, 11, 12771, 307, 297, 13], "avg_logprob": -0.3587582197628523, "compression_ratio": 1.492822966507177, "no_speech_prob": 0.0, "words": [{"start": 2264.98, "end": 2265.36, "word": " It's", "probability": 0.5767822265625}, {"start": 2265.36, "end": 2265.66, "word": " part", "probability": 0.71875}, {"start": 2265.66, "end": 2265.86, "word": " of", "probability": 0.32421875}, {"start": 2265.86, "end": 2266.9, "word": " standardized", "probability": 0.6552734375}, {"start": 2266.9, "end": 2267.52, "word": " and", "probability": 0.6689453125}, {"start": 2267.52, "end": 2268.08, "word": " do", "probability": 0.417724609375}, {"start": 2268.08, "end": 2268.58, "word": " whatever", "probability": 0.422119140625}, {"start": 2268.58, "end": 2268.8, "word": " you", "probability": 0.292724609375}, {"start": 2268.8, "end": 2269.14, "word": " want.", "probability": 0.53369140625}, {"start": 2269.36, "end": 2269.88, "word": " That's", "probability": 0.834716796875}, {"start": 2269.88, "end": 2270.28, "word": " correct,", "probability": 0.89013671875}, {"start": 2270.48, "end": 2270.68, "word": " but", "probability": 0.8779296875}, {"start": 2270.68, "end": 2271.98, "word": " to", "probability": 0.39013671875}, {"start": 2271.98, "end": 2272.2, "word": " do", "probability": 0.8408203125}, {"start": 2272.2, "end": 2272.4, "word": " the", "probability": 0.84228515625}, {"start": 2272.4, "end": 2273.48, "word": " problem,", "probability": 0.7802734375}, {"start": 2273.76, "end": 2273.86, "word": " the", "probability": 0.52001953125}, {"start": 2273.86, "end": 2274.2, "word": " complete", "probability": 0.8076171875}, {"start": 2274.2, "end": 2274.6, "word": " answer,", "probability": 0.953125}, {"start": 2274.72, "end": 2274.88, "word": " yes,", "probability": 0.77197265625}, {"start": 2274.96, "end": 2275.06, "word": " you", "probability": 0.96044921875}, {"start": 2275.06, "end": 2275.26, "word": " have", "probability": 0.93896484375}, {"start": 2275.26, "end": 2275.48, "word": " to", "probability": 0.96240234375}, {"start": 2275.48, "end": 2275.98, "word": " convert", "probability": 0.91845703125}, {"start": 2275.98, "end": 2276.16, "word": " to", "probability": 0.85693359375}, {"start": 2276.16, "end": 2276.32, "word": " this", "probability": 0.35205078125}, {"start": 2276.32, "end": 2276.78, "word": " score,", "probability": 0.82470703125}, {"start": 2277.74, "end": 2278.08, "word": " because", "probability": 0.89599609375}, {"start": 2278.08, "end": 2278.28, "word": " n", "probability": 0.431640625}, {"start": 2278.28, "end": 2278.44, "word": " is", "probability": 0.9501953125}, {"start": 2278.44, "end": 2278.76, "word": " large,", "probability": 0.94873046875}, {"start": 2278.86, "end": 2279.0, "word": " then", "probability": 0.80810546875}, {"start": 2279.0, "end": 2279.1, "word": " you", "probability": 0.9033203125}, {"start": 2279.1, "end": 2279.26, "word": " can", "probability": 0.9248046875}, {"start": 2279.26, "end": 2279.48, "word": " figure", "probability": 0.9482421875}, {"start": 2279.48, "end": 2279.62, "word": " out", "probability": 0.88525390625}, {"start": 2279.62, "end": 2279.76, "word": " the", "probability": 0.90576171875}, {"start": 2279.76, "end": 2280.0, "word": " answer.", "probability": 0.61962890625}, {"start": 2280.26, "end": 2280.44, "word": " So", "probability": 0.77001953125}, {"start": 2280.44, "end": 2281.2, "word": " she", "probability": 0.64990234375}, {"start": 2281.2, "end": 2281.56, "word": " said", "probability": 0.94482421875}, {"start": 2281.56, "end": 2281.86, "word": " that.", "probability": 0.916015625}, {"start": 2282.48, "end": 2282.76, "word": " But", "probability": 0.349365234375}, {"start": 2282.76, "end": 2283.08, "word": " I'm", "probability": 0.94677734375}, {"start": 2283.08, "end": 2283.26, "word": " looking", "probability": 0.9130859375}, {"start": 2283.26, "end": 2283.46, "word": " for", "probability": 0.94873046875}, {"start": 2283.46, "end": 2283.78, "word": " something", "probability": 0.86083984375}, {"start": 2283.78, "end": 2284.22, "word": " different.", "probability": 0.873046875}, {"start": 2285.76, "end": 2286.1, "word": " First,", "probability": 0.86328125}, {"start": 2286.68, "end": 2287.22, "word": " convert", "probability": 0.89892578125}, {"start": 2287.22, "end": 2287.48, "word": " to", "probability": 0.953125}, {"start": 2287.48, "end": 2287.7, "word": " z.", "probability": 0.81201171875}, {"start": 2287.98, "end": 2288.22, "word": " Our", "probability": 0.269287109375}, {"start": 2288.22, "end": 2288.52, "word": " root", "probability": 0.560546875}, {"start": 2288.52, "end": 2288.78, "word": " n,", "probability": 0.787109375}, {"start": 2289.02, "end": 2289.32, "word": " sigma", "probability": 0.89453125}, {"start": 2289.32, "end": 2289.56, "word": " is", "probability": 0.919921875}, {"start": 2289.56, "end": 2289.82, "word": " n.", "probability": 0.52880859375}], "temperature": 1.0}, {"id": 83, "seek": 231980, "start": 2291.42, "end": 2319.8, "text": " Divide by square root of 64, which is also 8. So it is Z greater than 1. Sorry, 6 divided by 1 is 6. Now, Z greater than 6 if you go back to the normal table. Now, the table we have given it to try.", "tokens": [9886, 482, 538, 3732, 5593, 295, 12145, 11, 597, 307, 611, 1649, 13, 407, 309, 307, 1176, 5044, 813, 502, 13, 4919, 11, 1386, 6666, 538, 502, 307, 1386, 13, 823, 11, 1176, 5044, 813, 1386, 498, 291, 352, 646, 281, 264, 2710, 3199, 13, 823, 11, 264, 3199, 321, 362, 2212, 309, 281, 853, 13], "avg_logprob": -0.355811407691554, "compression_ratio": 1.4014084507042253, "no_speech_prob": 0.0, "words": [{"start": 2291.42, "end": 2291.84, "word": " Divide", "probability": 0.6519775390625}, {"start": 2291.84, "end": 2292.02, "word": " by", "probability": 0.94677734375}, {"start": 2292.02, "end": 2292.42, "word": " square", "probability": 0.45751953125}, {"start": 2292.42, "end": 2292.66, "word": " root", "probability": 0.9228515625}, {"start": 2292.66, "end": 2292.86, "word": " of", "probability": 0.96533203125}, {"start": 2292.86, "end": 2293.38, "word": " 64,", "probability": 0.56201171875}, {"start": 2293.7, "end": 2293.86, "word": " which", "probability": 0.8642578125}, {"start": 2293.86, "end": 2294.02, "word": " is", "probability": 0.9501953125}, {"start": 2294.02, "end": 2294.44, "word": " also", "probability": 0.88037109375}, {"start": 2294.44, "end": 2294.92, "word": " 8.", "probability": 0.72509765625}, {"start": 2296.16, "end": 2296.44, "word": " So", "probability": 0.716796875}, {"start": 2296.44, "end": 2296.72, "word": " it", "probability": 0.169921875}, {"start": 2296.72, "end": 2297.78, "word": " is", "probability": 0.51318359375}, {"start": 2297.78, "end": 2298.16, "word": " Z", "probability": 0.44482421875}, {"start": 2298.16, "end": 2298.7, "word": " greater", "probability": 0.86328125}, {"start": 2298.7, "end": 2299.02, "word": " than", "probability": 0.93798828125}, {"start": 2299.02, "end": 2299.22, "word": " 1.", "probability": 0.5673828125}, {"start": 2300.26, "end": 2300.88, "word": " Sorry,", "probability": 0.376953125}, {"start": 2301.22, "end": 2301.62, "word": " 6", "probability": 0.92529296875}, {"start": 2301.62, "end": 2301.9, "word": " divided", "probability": 0.693359375}, {"start": 2301.9, "end": 2302.1, "word": " by", "probability": 0.966796875}, {"start": 2302.1, "end": 2302.3, "word": " 1", "probability": 0.970703125}, {"start": 2302.3, "end": 2302.48, "word": " is", "probability": 0.89404296875}, {"start": 2302.48, "end": 2302.78, "word": " 6.", "probability": 0.99169921875}, {"start": 2305.46, "end": 2305.96, "word": " Now,", "probability": 0.8291015625}, {"start": 2306.08, "end": 2306.48, "word": " Z", "probability": 0.85107421875}, {"start": 2306.48, "end": 2306.96, "word": " greater", "probability": 0.7490234375}, {"start": 2306.96, "end": 2307.56, "word": " than", "probability": 0.93896484375}, {"start": 2307.56, "end": 2308.08, "word": " 6", "probability": 0.98876953125}, {"start": 2308.08, "end": 2308.54, "word": " if", "probability": 0.60107421875}, {"start": 2308.54, "end": 2308.78, "word": " you", "probability": 0.94189453125}, {"start": 2308.78, "end": 2309.08, "word": " go", "probability": 0.95654296875}, {"start": 2309.08, "end": 2309.48, "word": " back", "probability": 0.87646484375}, {"start": 2309.48, "end": 2309.74, "word": " to", "probability": 0.9658203125}, {"start": 2309.74, "end": 2309.98, "word": " the", "probability": 0.90576171875}, {"start": 2309.98, "end": 2310.44, "word": " normal", "probability": 0.8955078125}, {"start": 2310.44, "end": 2311.0, "word": " table.", "probability": 0.87109375}, {"start": 2313.56, "end": 2314.0, "word": " Now,", "probability": 0.9248046875}, {"start": 2314.1, "end": 2314.26, "word": " the", "probability": 0.724609375}, {"start": 2314.26, "end": 2314.76, "word": " table", "probability": 0.890625}, {"start": 2314.76, "end": 2315.58, "word": " we", "probability": 0.6591796875}, {"start": 2315.58, "end": 2316.12, "word": " have", "probability": 0.611328125}, {"start": 2316.12, "end": 2318.9, "word": " given", "probability": 0.5546875}, {"start": 2318.9, "end": 2319.3, "word": " it", "probability": 0.58740234375}, {"start": 2319.3, "end": 2319.48, "word": " to", "probability": 0.417236328125}, {"start": 2319.48, "end": 2319.8, "word": " try.", "probability": 0.220703125}], "temperature": 1.0}, {"id": 84, "seek": 234548, "start": 2321.09, "end": 2345.49, "text": " So this one is 1 minus z less than 6. For 6, we have 0.9999. So the answer is approximately zero. So it makes sense, it's zero. This is the complete answer. It takes time. Because you have to convert the score.", "tokens": [407, 341, 472, 307, 502, 3175, 710, 1570, 813, 1386, 13, 1171, 1386, 11, 321, 362, 1958, 13, 8494, 8494, 13, 407, 264, 1867, 307, 10447, 4018, 13, 407, 309, 1669, 2020, 11, 309, 311, 4018, 13, 639, 307, 264, 3566, 1867, 13, 467, 2516, 565, 13, 1436, 291, 362, 281, 7620, 264, 6175, 13], "avg_logprob": -0.29966518548982485, "compression_ratio": 1.388157894736842, "no_speech_prob": 0.0, "words": [{"start": 2321.09, "end": 2321.73, "word": " So", "probability": 0.16455078125}, {"start": 2321.73, "end": 2322.29, "word": " this", "probability": 0.708984375}, {"start": 2322.29, "end": 2322.51, "word": " one", "probability": 0.85009765625}, {"start": 2322.51, "end": 2322.65, "word": " is", "probability": 0.91455078125}, {"start": 2322.65, "end": 2322.89, "word": " 1", "probability": 0.4482421875}, {"start": 2322.89, "end": 2323.47, "word": " minus", "probability": 0.91845703125}, {"start": 2323.47, "end": 2324.85, "word": " z", "probability": 0.213134765625}, {"start": 2324.85, "end": 2325.09, "word": " less", "probability": 0.90087890625}, {"start": 2325.09, "end": 2325.27, "word": " than", "probability": 0.9296875}, {"start": 2325.27, "end": 2325.69, "word": " 6.", "probability": 0.85400390625}, {"start": 2327.91, "end": 2328.55, "word": " For", "probability": 0.60400390625}, {"start": 2328.55, "end": 2329.13, "word": " 6,", "probability": 0.8837890625}, {"start": 2329.47, "end": 2329.65, "word": " we", "probability": 0.93701171875}, {"start": 2329.65, "end": 2330.09, "word": " have", "probability": 0.94921875}, {"start": 2330.09, "end": 2330.79, "word": " 0", "probability": 0.75927734375}, {"start": 2330.79, "end": 2331.97, "word": ".9999.", "probability": 0.90380859375}, {"start": 2334.89, "end": 2335.53, "word": " So", "probability": 0.87890625}, {"start": 2335.53, "end": 2335.71, "word": " the", "probability": 0.8115234375}, {"start": 2335.71, "end": 2336.01, "word": " answer", "probability": 0.955078125}, {"start": 2336.01, "end": 2336.27, "word": " is", "probability": 0.94384765625}, {"start": 2336.27, "end": 2336.75, "word": " approximately", "probability": 0.88720703125}, {"start": 2336.75, "end": 2337.21, "word": " zero.", "probability": 0.50146484375}, {"start": 2337.99, "end": 2338.21, "word": " So", "probability": 0.489501953125}, {"start": 2338.21, "end": 2338.33, "word": " it", "probability": 0.84130859375}, {"start": 2338.33, "end": 2338.49, "word": " makes", "probability": 0.81689453125}, {"start": 2338.49, "end": 2338.73, "word": " sense,", "probability": 0.80810546875}, {"start": 2338.79, "end": 2338.91, "word": " it's", "probability": 0.721923828125}, {"start": 2338.91, "end": 2339.13, "word": " zero.", "probability": 0.8955078125}, {"start": 2339.57, "end": 2339.83, "word": " This", "probability": 0.85791015625}, {"start": 2339.83, "end": 2339.93, "word": " is", "probability": 0.9482421875}, {"start": 2339.93, "end": 2340.05, "word": " the", "probability": 0.86572265625}, {"start": 2340.05, "end": 2340.39, "word": " complete", "probability": 0.7763671875}, {"start": 2340.39, "end": 2340.91, "word": " answer.", "probability": 0.9560546875}, {"start": 2341.93, "end": 2342.15, "word": " It", "probability": 0.9404296875}, {"start": 2342.15, "end": 2342.37, "word": " takes", "probability": 0.78662109375}, {"start": 2342.37, "end": 2342.71, "word": " time.", "probability": 0.873046875}, {"start": 2343.81, "end": 2344.07, "word": " Because", "probability": 0.83251953125}, {"start": 2344.07, "end": 2344.19, "word": " you", "probability": 0.92578125}, {"start": 2344.19, "end": 2344.35, "word": " have", "probability": 0.943359375}, {"start": 2344.35, "end": 2344.47, "word": " to", "probability": 0.970703125}, {"start": 2344.47, "end": 2344.83, "word": " convert", "probability": 0.89404296875}, {"start": 2344.83, "end": 2345.05, "word": " the", "probability": 0.420166015625}, {"start": 2345.05, "end": 2345.49, "word": " score.", "probability": 0.8154296875}], "temperature": 1.0}, {"id": 85, "seek": 237196, "start": 2346.7, "end": 2371.96, "text": " Do some calculations, then use the normal table, start the normal table. I mentioned before, it tries to avoid this kind of calculations. Now, how can we figure out? Now, since the problem says Q to the left and N is large,", "tokens": [1144, 512, 20448, 11, 550, 764, 264, 2710, 3199, 11, 722, 264, 2710, 3199, 13, 286, 2835, 949, 11, 309, 9898, 281, 5042, 341, 733, 295, 20448, 13, 823, 11, 577, 393, 321, 2573, 484, 30, 823, 11, 1670, 264, 1154, 1619, 1249, 281, 264, 1411, 293, 426, 307, 2416, 11], "avg_logprob": -0.2423377400980546, "compression_ratio": 1.4933333333333334, "no_speech_prob": 0.0, "words": [{"start": 2346.7, "end": 2346.92, "word": " Do", "probability": 0.386962890625}, {"start": 2346.92, "end": 2347.2, "word": " some", "probability": 0.896484375}, {"start": 2347.2, "end": 2347.74, "word": " calculations,", "probability": 0.8623046875}, {"start": 2348.44, "end": 2348.74, "word": " then", "probability": 0.8427734375}, {"start": 2348.74, "end": 2348.98, "word": " use", "probability": 0.7705078125}, {"start": 2348.98, "end": 2349.12, "word": " the", "probability": 0.87255859375}, {"start": 2349.12, "end": 2349.38, "word": " normal", "probability": 0.8173828125}, {"start": 2349.38, "end": 2349.76, "word": " table,", "probability": 0.86962890625}, {"start": 2349.94, "end": 2350.18, "word": " start", "probability": 0.654296875}, {"start": 2350.18, "end": 2350.32, "word": " the", "probability": 0.6591796875}, {"start": 2350.32, "end": 2350.54, "word": " normal", "probability": 0.87548828125}, {"start": 2350.54, "end": 2350.92, "word": " table.", "probability": 0.85400390625}, {"start": 2351.8, "end": 2352.4, "word": " I", "probability": 0.95068359375}, {"start": 2352.4, "end": 2352.7, "word": " mentioned", "probability": 0.78662109375}, {"start": 2352.7, "end": 2353.18, "word": " before,", "probability": 0.86474609375}, {"start": 2353.34, "end": 2353.46, "word": " it", "probability": 0.4892578125}, {"start": 2353.46, "end": 2353.78, "word": " tries", "probability": 0.85986328125}, {"start": 2353.78, "end": 2354.02, "word": " to", "probability": 0.974609375}, {"start": 2354.02, "end": 2354.62, "word": " avoid", "probability": 0.90625}, {"start": 2354.62, "end": 2357.96, "word": " this", "probability": 0.58544921875}, {"start": 2357.96, "end": 2358.38, "word": " kind", "probability": 0.875}, {"start": 2358.38, "end": 2358.56, "word": " of", "probability": 0.97119140625}, {"start": 2358.56, "end": 2359.14, "word": " calculations.", "probability": 0.90087890625}, {"start": 2360.8, "end": 2361.06, "word": " Now,", "probability": 0.93408203125}, {"start": 2361.12, "end": 2361.22, "word": " how", "probability": 0.93603515625}, {"start": 2361.22, "end": 2361.42, "word": " can", "probability": 0.9365234375}, {"start": 2361.42, "end": 2361.54, "word": " we", "probability": 0.58642578125}, {"start": 2361.54, "end": 2361.76, "word": " figure", "probability": 0.96923828125}, {"start": 2361.76, "end": 2362.08, "word": " out?", "probability": 0.77197265625}, {"start": 2366.0, "end": 2366.56, "word": " Now,", "probability": 0.8740234375}, {"start": 2366.66, "end": 2366.98, "word": " since", "probability": 0.8388671875}, {"start": 2366.98, "end": 2367.32, "word": " the", "probability": 0.9091796875}, {"start": 2367.32, "end": 2368.44, "word": " problem", "probability": 0.787109375}, {"start": 2368.44, "end": 2368.9, "word": " says", "probability": 0.849609375}, {"start": 2368.9, "end": 2369.2, "word": " Q", "probability": 0.49365234375}, {"start": 2369.2, "end": 2369.34, "word": " to", "probability": 0.91552734375}, {"start": 2369.34, "end": 2369.46, "word": " the", "probability": 0.92578125}, {"start": 2369.46, "end": 2369.74, "word": " left", "probability": 0.89990234375}, {"start": 2369.74, "end": 2371.14, "word": " and", "probability": 0.64208984375}, {"start": 2371.14, "end": 2371.36, "word": " N", "probability": 0.94970703125}, {"start": 2371.36, "end": 2371.56, "word": " is", "probability": 0.95361328125}, {"start": 2371.56, "end": 2371.96, "word": " large,", "probability": 0.9619140625}], "temperature": 1.0}, {"id": 86, "seek": 239952, "start": 2372.44, "end": 2399.52, "text": " We can assume that X bar is approximately normal, so don't worry about the shape of the distribution because we select a random size of size 6 to 4, so we can apply the central limit theorem. Any idea? Again, this way is true to do", "tokens": [492, 393, 6552, 300, 1783, 2159, 307, 10447, 2710, 11, 370, 500, 380, 3292, 466, 264, 3909, 295, 264, 7316, 570, 321, 3048, 257, 4974, 2744, 295, 2744, 1386, 281, 1017, 11, 370, 321, 393, 3079, 264, 5777, 4948, 20904, 13, 2639, 1558, 30, 3764, 11, 341, 636, 307, 2074, 281, 360], "avg_logprob": -0.2620872551540159, "compression_ratio": 1.406060606060606, "no_speech_prob": 0.0, "words": [{"start": 2372.44, "end": 2372.72, "word": " We", "probability": 0.41650390625}, {"start": 2372.72, "end": 2373.04, "word": " can", "probability": 0.94580078125}, {"start": 2373.04, "end": 2373.46, "word": " assume", "probability": 0.9140625}, {"start": 2373.46, "end": 2373.88, "word": " that", "probability": 0.9130859375}, {"start": 2373.88, "end": 2374.6, "word": " X", "probability": 0.55224609375}, {"start": 2374.6, "end": 2375.0, "word": " bar", "probability": 0.828125}, {"start": 2375.0, "end": 2376.06, "word": " is", "probability": 0.94384765625}, {"start": 2376.06, "end": 2376.68, "word": " approximately", "probability": 0.86376953125}, {"start": 2376.68, "end": 2377.04, "word": " normal,", "probability": 0.8408203125}, {"start": 2377.2, "end": 2377.4, "word": " so", "probability": 0.9453125}, {"start": 2377.4, "end": 2377.74, "word": " don't", "probability": 0.893310546875}, {"start": 2377.74, "end": 2377.94, "word": " worry", "probability": 0.94287109375}, {"start": 2377.94, "end": 2378.26, "word": " about", "probability": 0.90966796875}, {"start": 2378.26, "end": 2379.28, "word": " the", "probability": 0.88134765625}, {"start": 2379.28, "end": 2379.58, "word": " shape", "probability": 0.91064453125}, {"start": 2379.58, "end": 2379.7, "word": " of", "probability": 0.962890625}, {"start": 2379.7, "end": 2379.84, "word": " the", "probability": 0.464599609375}, {"start": 2379.84, "end": 2380.2, "word": " distribution", "probability": 0.468017578125}, {"start": 2380.2, "end": 2380.6, "word": " because", "probability": 0.496337890625}, {"start": 2380.6, "end": 2380.82, "word": " we", "probability": 0.9404296875}, {"start": 2380.82, "end": 2381.12, "word": " select", "probability": 0.6669921875}, {"start": 2381.12, "end": 2381.28, "word": " a", "probability": 0.76611328125}, {"start": 2381.28, "end": 2381.46, "word": " random", "probability": 0.77392578125}, {"start": 2381.46, "end": 2382.2, "word": " size", "probability": 0.677734375}, {"start": 2382.2, "end": 2382.56, "word": " of", "probability": 0.91162109375}, {"start": 2382.56, "end": 2382.94, "word": " size", "probability": 0.6630859375}, {"start": 2382.94, "end": 2383.3, "word": " 6", "probability": 0.462646484375}, {"start": 2383.3, "end": 2383.42, "word": " to", "probability": 0.77880859375}, {"start": 2383.42, "end": 2383.74, "word": " 4,", "probability": 0.990234375}, {"start": 2384.26, "end": 2384.44, "word": " so", "probability": 0.94482421875}, {"start": 2384.44, "end": 2384.58, "word": " we", "probability": 0.9599609375}, {"start": 2384.58, "end": 2384.78, "word": " can", "probability": 0.9443359375}, {"start": 2384.78, "end": 2385.06, "word": " apply", "probability": 0.9296875}, {"start": 2385.06, "end": 2385.28, "word": " the", "probability": 0.90771484375}, {"start": 2385.28, "end": 2385.52, "word": " central", "probability": 0.54638671875}, {"start": 2385.52, "end": 2385.72, "word": " limit", "probability": 0.9140625}, {"start": 2385.72, "end": 2386.08, "word": " theorem.", "probability": 0.79345703125}, {"start": 2388.64, "end": 2389.1, "word": " Any", "probability": 0.89697265625}, {"start": 2389.1, "end": 2389.58, "word": " idea?", "probability": 0.79736328125}, {"start": 2394.58, "end": 2395.26, "word": " Again,", "probability": 0.90966796875}, {"start": 2395.46, "end": 2396.7, "word": " this", "probability": 0.94873046875}, {"start": 2396.7, "end": 2396.96, "word": " way", "probability": 0.8271484375}, {"start": 2396.96, "end": 2397.38, "word": " is", "probability": 0.93603515625}, {"start": 2397.38, "end": 2397.84, "word": " true", "probability": 0.93994140625}, {"start": 2397.84, "end": 2399.2, "word": " to", "probability": 0.498291015625}, {"start": 2399.2, "end": 2399.52, "word": " do", "probability": 0.96240234375}], "temperature": 1.0}, {"id": 87, "seek": 242861, "start": 2400.53, "end": 2428.61, "text": " the complete calculations, but again you have to avoid using this one. I will give you a hint. Now, 71, what's the difference between the true mean and 76? Okay, so that's sigma of x bar", "tokens": [264, 3566, 20448, 11, 457, 797, 291, 362, 281, 5042, 1228, 341, 472, 13, 286, 486, 976, 291, 257, 12075, 13, 823, 11, 30942, 11, 437, 311, 264, 2649, 1296, 264, 2074, 914, 293, 24733, 30, 1033, 11, 370, 300, 311, 12771, 295, 2031, 2159], "avg_logprob": -0.2557744604089986, "compression_ratio": 1.2808219178082192, "no_speech_prob": 0.0, "words": [{"start": 2400.53, "end": 2400.87, "word": " the", "probability": 0.1312255859375}, {"start": 2400.87, "end": 2401.29, "word": " complete", "probability": 0.748046875}, {"start": 2401.29, "end": 2402.09, "word": " calculations,", "probability": 0.8271484375}, {"start": 2402.37, "end": 2402.37, "word": " but", "probability": 0.87109375}, {"start": 2402.37, "end": 2402.97, "word": " again", "probability": 0.87841796875}, {"start": 2402.97, "end": 2403.17, "word": " you", "probability": 0.7294921875}, {"start": 2403.17, "end": 2403.33, "word": " have", "probability": 0.939453125}, {"start": 2403.33, "end": 2403.87, "word": " to", "probability": 0.97509765625}, {"start": 2403.87, "end": 2404.97, "word": " avoid", "probability": 0.88427734375}, {"start": 2404.97, "end": 2405.35, "word": " using", "probability": 0.92333984375}, {"start": 2405.35, "end": 2405.57, "word": " this", "probability": 0.8955078125}, {"start": 2405.57, "end": 2405.67, "word": " one.", "probability": 0.4814453125}, {"start": 2405.67, "end": 2405.75, "word": " I", "probability": 0.9541015625}, {"start": 2405.75, "end": 2405.87, "word": " will", "probability": 0.859375}, {"start": 2405.87, "end": 2406.01, "word": " give", "probability": 0.8759765625}, {"start": 2406.01, "end": 2406.15, "word": " you", "probability": 0.9658203125}, {"start": 2406.15, "end": 2406.19, "word": " a", "probability": 0.79931640625}, {"start": 2406.19, "end": 2406.45, "word": " hint.", "probability": 0.91064453125}, {"start": 2409.51, "end": 2409.87, "word": " Now,", "probability": 0.92626953125}, {"start": 2410.67, "end": 2411.31, "word": " 71,", "probability": 0.6318359375}, {"start": 2412.11, "end": 2413.23, "word": " what's", "probability": 0.935791015625}, {"start": 2413.23, "end": 2413.35, "word": " the", "probability": 0.9228515625}, {"start": 2413.35, "end": 2413.75, "word": " difference", "probability": 0.8447265625}, {"start": 2413.75, "end": 2414.27, "word": " between", "probability": 0.873046875}, {"start": 2414.27, "end": 2415.49, "word": " the", "probability": 0.91259765625}, {"start": 2415.49, "end": 2415.73, "word": " true", "probability": 0.958984375}, {"start": 2415.73, "end": 2416.09, "word": " mean", "probability": 0.97509765625}, {"start": 2416.09, "end": 2417.41, "word": " and", "probability": 0.88623046875}, {"start": 2417.41, "end": 2418.29, "word": " 76?", "probability": 0.8818359375}, {"start": 2424.95, "end": 2425.71, "word": " Okay,", "probability": 0.315185546875}, {"start": 2425.93, "end": 2426.07, "word": " so", "probability": 0.93017578125}, {"start": 2426.07, "end": 2426.57, "word": " that's", "probability": 0.94775390625}, {"start": 2426.57, "end": 2426.97, "word": " sigma", "probability": 0.70361328125}, {"start": 2426.97, "end": 2427.95, "word": " of", "probability": 0.927734375}, {"start": 2427.95, "end": 2428.23, "word": " x", "probability": 0.71826171875}, {"start": 2428.23, "end": 2428.61, "word": " bar", "probability": 0.78466796875}], "temperature": 1.0}, {"id": 88, "seek": 245576, "start": 2429.48, "end": 2455.76, "text": " is 8 divided by square root of 64 is 1. Again. Continuous, just 1. Why? The difference between 71 and 656 and 6", "tokens": [307, 1649, 6666, 538, 3732, 5593, 295, 12145, 307, 502, 13, 3764, 13, 14674, 12549, 11, 445, 502, 13, 1545, 30, 440, 2649, 1296, 30942, 293, 11624, 21, 293, 1386], "avg_logprob": -0.3692036194186057, "compression_ratio": 1.0566037735849056, "no_speech_prob": 0.0, "words": [{"start": 2429.48, "end": 2429.88, "word": " is", "probability": 0.2288818359375}, {"start": 2429.88, "end": 2430.22, "word": " 8", "probability": 0.327392578125}, {"start": 2430.22, "end": 2430.7, "word": " divided", "probability": 0.568359375}, {"start": 2430.7, "end": 2431.06, "word": " by", "probability": 0.966796875}, {"start": 2431.06, "end": 2431.4, "word": " square", "probability": 0.5009765625}, {"start": 2431.4, "end": 2431.64, "word": " root", "probability": 0.916015625}, {"start": 2431.64, "end": 2431.8, "word": " of", "probability": 0.958984375}, {"start": 2431.8, "end": 2432.1, "word": " 64", "probability": 0.472412109375}, {"start": 2432.1, "end": 2432.6, "word": " is", "probability": 0.7060546875}, {"start": 2432.6, "end": 2432.9, "word": " 1.", "probability": 0.81201171875}, {"start": 2434.14, "end": 2434.52, "word": " Again.", "probability": 0.607421875}, {"start": 2437.0, "end": 2437.74, "word": " Continuous,", "probability": 0.7734375}, {"start": 2437.92, "end": 2438.28, "word": " just", "probability": 0.755859375}, {"start": 2438.28, "end": 2438.6, "word": " 1.", "probability": 0.71484375}, {"start": 2442.98, "end": 2443.2, "word": " Why?", "probability": 0.61474609375}, {"start": 2451.34, "end": 2452.08, "word": " The", "probability": 0.8818359375}, {"start": 2452.08, "end": 2452.46, "word": " difference", "probability": 0.84521484375}, {"start": 2452.46, "end": 2452.8, "word": " between", "probability": 0.89208984375}, {"start": 2452.8, "end": 2453.14, "word": " 71", "probability": 0.9404296875}, {"start": 2453.14, "end": 2453.46, "word": " and", "probability": 0.92578125}, {"start": 2453.46, "end": 2454.46, "word": " 656", "probability": 0.77587890625}, {"start": 2454.46, "end": 2455.26, "word": " and", "probability": 0.66259765625}, {"start": 2455.26, "end": 2455.76, "word": " 6", "probability": 0.9384765625}], "temperature": 1.0}, {"id": 89, "seek": 248639, "start": 2457.13, "end": 2486.39, "text": " It's six times the standard error of the sample mean. And we know that, if you remember this rule, 65, 68, 95, 99, we can't, don't, don't use chebyshev in this case because the distribution is roughly normal. We know this rule, the empirical rule.", "tokens": [467, 311, 2309, 1413, 264, 3832, 6713, 295, 264, 6889, 914, 13, 400, 321, 458, 300, 11, 498, 291, 1604, 341, 4978, 11, 11624, 11, 23317, 11, 13420, 11, 11803, 11, 321, 393, 380, 11, 500, 380, 11, 500, 380, 764, 947, 65, 749, 675, 85, 294, 341, 1389, 570, 264, 7316, 307, 9810, 2710, 13, 492, 458, 341, 4978, 11, 264, 31886, 4978, 13], "avg_logprob": -0.33806818001198047, "compression_ratio": 1.4588235294117646, "no_speech_prob": 0.0, "words": [{"start": 2457.13, "end": 2457.81, "word": " It's", "probability": 0.6153564453125}, {"start": 2457.81, "end": 2458.11, "word": " six", "probability": 0.4638671875}, {"start": 2458.11, "end": 2458.77, "word": " times", "probability": 0.90234375}, {"start": 2458.77, "end": 2459.53, "word": " the", "probability": 0.81005859375}, {"start": 2459.53, "end": 2460.05, "word": " standard", "probability": 0.91015625}, {"start": 2460.05, "end": 2460.63, "word": " error", "probability": 0.82763671875}, {"start": 2460.63, "end": 2461.05, "word": " of", "probability": 0.95166015625}, {"start": 2461.05, "end": 2461.23, "word": " the", "probability": 0.86767578125}, {"start": 2461.23, "end": 2461.51, "word": " sample", "probability": 0.88525390625}, {"start": 2461.51, "end": 2461.83, "word": " mean.", "probability": 0.9384765625}, {"start": 2463.39, "end": 2463.73, "word": " And", "probability": 0.57958984375}, {"start": 2463.73, "end": 2464.31, "word": " we", "probability": 0.85595703125}, {"start": 2464.31, "end": 2464.57, "word": " know", "probability": 0.88525390625}, {"start": 2464.57, "end": 2464.91, "word": " that,", "probability": 0.90673828125}, {"start": 2465.97, "end": 2468.55, "word": " if", "probability": 0.8359375}, {"start": 2468.55, "end": 2468.67, "word": " you", "probability": 0.9501953125}, {"start": 2468.67, "end": 2468.89, "word": " remember", "probability": 0.88232421875}, {"start": 2468.89, "end": 2469.21, "word": " this", "probability": 0.9326171875}, {"start": 2469.21, "end": 2469.45, "word": " rule,", "probability": 0.9287109375}, {"start": 2471.09, "end": 2471.85, "word": " 65,", "probability": 0.607421875}, {"start": 2472.03, "end": 2472.61, "word": " 68,", "probability": 0.9521484375}, {"start": 2474.11, "end": 2474.85, "word": " 95,", "probability": 0.974609375}, {"start": 2476.73, "end": 2477.81, "word": " 99,", "probability": 0.71435546875}, {"start": 2478.09, "end": 2478.09, "word": " we", "probability": 0.280517578125}, {"start": 2478.09, "end": 2478.37, "word": " can't,", "probability": 0.621337890625}, {"start": 2478.43, "end": 2478.83, "word": " don't,", "probability": 0.8837890625}, {"start": 2478.89, "end": 2480.15, "word": " don't", "probability": 0.97607421875}, {"start": 2480.15, "end": 2480.41, "word": " use", "probability": 0.87744140625}, {"start": 2480.41, "end": 2480.91, "word": " chebyshev", "probability": 0.47213134765625}, {"start": 2480.91, "end": 2481.05, "word": " in", "probability": 0.93212890625}, {"start": 2481.05, "end": 2481.31, "word": " this", "probability": 0.94970703125}, {"start": 2481.31, "end": 2481.81, "word": " case", "probability": 0.91943359375}, {"start": 2481.81, "end": 2482.17, "word": " because", "probability": 0.6689453125}, {"start": 2482.17, "end": 2482.43, "word": " the", "probability": 0.7685546875}, {"start": 2482.43, "end": 2482.79, "word": " distribution", "probability": 0.60595703125}, {"start": 2482.79, "end": 2483.05, "word": " is", "probability": 0.951171875}, {"start": 2483.05, "end": 2483.37, "word": " roughly", "probability": 0.80859375}, {"start": 2483.37, "end": 2483.73, "word": " normal.", "probability": 0.8544921875}, {"start": 2484.15, "end": 2484.63, "word": " We", "probability": 0.943359375}, {"start": 2484.63, "end": 2484.77, "word": " know", "probability": 0.8857421875}, {"start": 2484.77, "end": 2485.01, "word": " this", "probability": 0.947265625}, {"start": 2485.01, "end": 2485.31, "word": " rule,", "probability": 0.94287109375}, {"start": 2485.57, "end": 2485.75, "word": " the", "probability": 0.89794921875}, {"start": 2485.75, "end": 2486.07, "word": " empirical", "probability": 0.9326171875}, {"start": 2486.07, "end": 2486.39, "word": " rule.", "probability": 0.90185546875}], "temperature": 1.0}, {"id": 90, "seek": 251459, "start": 2487.49, "end": 2514.59, "text": " We said that 68% of the observations lie within one standard deviation of the mean. 95% in standard deviation and so on. Now if you go back, now let's say mu minus three sigma and mu plus three sigma. But here we are talking about x bar.", "tokens": [492, 848, 300, 23317, 4, 295, 264, 18163, 4544, 1951, 472, 3832, 25163, 295, 264, 914, 13, 13420, 4, 294, 3832, 25163, 293, 370, 322, 13, 823, 498, 291, 352, 646, 11, 586, 718, 311, 584, 2992, 3175, 1045, 12771, 293, 2992, 1804, 1045, 12771, 13, 583, 510, 321, 366, 1417, 466, 2031, 2159, 13], "avg_logprob": -0.23172433621117047, "compression_ratio": 1.5063291139240507, "no_speech_prob": 0.0, "words": [{"start": 2487.4900000000002, "end": 2488.17, "word": " We", "probability": 0.56689453125}, {"start": 2488.17, "end": 2488.47, "word": " said", "probability": 0.93212890625}, {"start": 2488.47, "end": 2488.81, "word": " that", "probability": 0.859375}, {"start": 2488.81, "end": 2490.77, "word": " 68", "probability": 0.91552734375}, {"start": 2490.77, "end": 2491.61, "word": "%", "probability": 0.8251953125}, {"start": 2491.61, "end": 2492.17, "word": " of", "probability": 0.95751953125}, {"start": 2492.17, "end": 2492.33, "word": " the", "probability": 0.8603515625}, {"start": 2492.33, "end": 2492.91, "word": " observations", "probability": 0.74462890625}, {"start": 2492.91, "end": 2493.35, "word": " lie", "probability": 0.86865234375}, {"start": 2493.35, "end": 2493.67, "word": " within", "probability": 0.90869140625}, {"start": 2493.67, "end": 2493.89, "word": " one", "probability": 0.8505859375}, {"start": 2493.89, "end": 2494.31, "word": " standard", "probability": 0.9091796875}, {"start": 2494.31, "end": 2494.73, "word": " deviation", "probability": 0.9033203125}, {"start": 2494.73, "end": 2495.65, "word": " of", "probability": 0.9580078125}, {"start": 2495.65, "end": 2495.81, "word": " the", "probability": 0.89453125}, {"start": 2495.81, "end": 2496.01, "word": " mean.", "probability": 0.97265625}, {"start": 2497.53, "end": 2498.09, "word": " 95", "probability": 0.67578125}, {"start": 2498.09, "end": 2498.83, "word": "%", "probability": 0.84423828125}, {"start": 2498.83, "end": 2499.17, "word": " in", "probability": 0.1566162109375}, {"start": 2499.17, "end": 2499.59, "word": " standard", "probability": 0.65087890625}, {"start": 2499.59, "end": 2500.51, "word": " deviation", "probability": 0.7734375}, {"start": 2500.51, "end": 2500.73, "word": " and", "probability": 0.7529296875}, {"start": 2500.73, "end": 2500.89, "word": " so", "probability": 0.9462890625}, {"start": 2500.89, "end": 2501.15, "word": " on.", "probability": 0.94677734375}, {"start": 2501.83, "end": 2502.17, "word": " Now", "probability": 0.91748046875}, {"start": 2502.17, "end": 2502.51, "word": " if", "probability": 0.5048828125}, {"start": 2502.51, "end": 2502.63, "word": " you", "probability": 0.9580078125}, {"start": 2502.63, "end": 2502.91, "word": " go", "probability": 0.9580078125}, {"start": 2502.91, "end": 2503.25, "word": " back,", "probability": 0.86083984375}, {"start": 2505.65, "end": 2505.91, "word": " now", "probability": 0.7685546875}, {"start": 2505.91, "end": 2506.21, "word": " let's", "probability": 0.9140625}, {"start": 2506.21, "end": 2506.53, "word": " say", "probability": 0.87646484375}, {"start": 2506.53, "end": 2507.05, "word": " mu", "probability": 0.384033203125}, {"start": 2507.05, "end": 2507.93, "word": " minus", "probability": 0.88037109375}, {"start": 2507.93, "end": 2508.29, "word": " three", "probability": 0.54345703125}, {"start": 2508.29, "end": 2508.75, "word": " sigma", "probability": 0.89111328125}, {"start": 2508.75, "end": 2510.35, "word": " and", "probability": 0.7763671875}, {"start": 2510.35, "end": 2510.59, "word": " mu", "probability": 0.8427734375}, {"start": 2510.59, "end": 2510.91, "word": " plus", "probability": 0.947265625}, {"start": 2510.91, "end": 2511.17, "word": " three", "probability": 0.93994140625}, {"start": 2511.17, "end": 2511.51, "word": " sigma.", "probability": 0.9140625}, {"start": 2512.01, "end": 2512.21, "word": " But", "probability": 0.92529296875}, {"start": 2512.21, "end": 2512.37, "word": " here", "probability": 0.833984375}, {"start": 2512.37, "end": 2512.49, "word": " we", "probability": 0.8779296875}, {"start": 2512.49, "end": 2512.63, "word": " are", "probability": 0.923828125}, {"start": 2512.63, "end": 2512.97, "word": " talking", "probability": 0.85888671875}, {"start": 2512.97, "end": 2513.65, "word": " about", "probability": 0.921875}, {"start": 2513.65, "end": 2514.29, "word": " x", "probability": 0.689453125}, {"start": 2514.29, "end": 2514.59, "word": " bar.", "probability": 0.80908203125}], "temperature": 1.0}, {"id": 91, "seek": 254446, "start": 2515.89, "end": 2544.47, "text": " So the mean of x bar, sigma of x bar. So empirical rule says that this area between mu minus 3 sigma of x bar is around 99.7. Let's compute the lower bound. Mu is 65.", "tokens": [407, 264, 914, 295, 2031, 2159, 11, 12771, 295, 2031, 2159, 13, 407, 31886, 4978, 1619, 300, 341, 1859, 1296, 2992, 3175, 805, 12771, 295, 2031, 2159, 307, 926, 11803, 13, 22, 13, 961, 311, 14722, 264, 3126, 5472, 13, 15601, 307, 11624, 13], "avg_logprob": -0.19409721559948392, "compression_ratio": 1.31496062992126, "no_speech_prob": 0.0, "words": [{"start": 2515.89, "end": 2516.11, "word": " So", "probability": 0.434326171875}, {"start": 2516.11, "end": 2516.27, "word": " the", "probability": 0.630859375}, {"start": 2516.27, "end": 2516.41, "word": " mean", "probability": 0.83642578125}, {"start": 2516.41, "end": 2516.55, "word": " of", "probability": 0.95703125}, {"start": 2516.55, "end": 2516.75, "word": " x", "probability": 0.447998046875}, {"start": 2516.75, "end": 2517.07, "word": " bar,", "probability": 0.759765625}, {"start": 2517.97, "end": 2518.39, "word": " sigma", "probability": 0.9052734375}, {"start": 2518.39, "end": 2518.55, "word": " of", "probability": 0.884765625}, {"start": 2518.55, "end": 2518.73, "word": " x", "probability": 0.9912109375}, {"start": 2518.73, "end": 2518.99, "word": " bar.", "probability": 0.94091796875}, {"start": 2521.43, "end": 2521.97, "word": " So", "probability": 0.9501953125}, {"start": 2521.97, "end": 2523.13, "word": " empirical", "probability": 0.544921875}, {"start": 2523.13, "end": 2523.59, "word": " rule", "probability": 0.88037109375}, {"start": 2523.59, "end": 2524.59, "word": " says", "probability": 0.8720703125}, {"start": 2524.59, "end": 2524.99, "word": " that", "probability": 0.90966796875}, {"start": 2524.99, "end": 2527.83, "word": " this", "probability": 0.75}, {"start": 2527.83, "end": 2528.33, "word": " area", "probability": 0.89111328125}, {"start": 2528.33, "end": 2531.69, "word": " between", "probability": 0.74951171875}, {"start": 2531.69, "end": 2531.99, "word": " mu", "probability": 0.67333984375}, {"start": 2531.99, "end": 2532.35, "word": " minus", "probability": 0.96484375}, {"start": 2532.35, "end": 2532.61, "word": " 3", "probability": 0.30859375}, {"start": 2532.61, "end": 2532.97, "word": " sigma", "probability": 0.9248046875}, {"start": 2532.97, "end": 2533.37, "word": " of", "probability": 0.94921875}, {"start": 2533.37, "end": 2533.59, "word": " x", "probability": 0.9892578125}, {"start": 2533.59, "end": 2533.93, "word": " bar", "probability": 0.947265625}, {"start": 2533.93, "end": 2536.81, "word": " is", "probability": 0.8076171875}, {"start": 2536.81, "end": 2537.21, "word": " around", "probability": 0.92431640625}, {"start": 2537.21, "end": 2537.81, "word": " 99", "probability": 0.943359375}, {"start": 2537.81, "end": 2538.61, "word": ".7.", "probability": 0.991455078125}, {"start": 2539.85, "end": 2540.49, "word": " Let's", "probability": 0.9658203125}, {"start": 2540.49, "end": 2540.81, "word": " compute", "probability": 0.9072265625}, {"start": 2540.81, "end": 2541.15, "word": " the", "probability": 0.9091796875}, {"start": 2541.15, "end": 2541.41, "word": " lower", "probability": 0.8740234375}, {"start": 2541.41, "end": 2541.79, "word": " bound.", "probability": 0.8896484375}, {"start": 2542.89, "end": 2543.31, "word": " Mu", "probability": 0.96533203125}, {"start": 2543.31, "end": 2543.73, "word": " is", "probability": 0.9541015625}, {"start": 2543.73, "end": 2544.47, "word": " 65.", "probability": 0.853515625}], "temperature": 1.0}, {"id": 92, "seek": 257190, "start": 2545.86, "end": 2571.9, "text": " Three times sigma of x bar is one, so that's 62. The maximum here equal mu again 65 plus three times one, 16. So now 99.7% of the sample means lie between 62 and 68.", "tokens": [6244, 1413, 12771, 295, 2031, 2159, 307, 472, 11, 370, 300, 311, 24536, 13, 440, 6674, 510, 2681, 2992, 797, 11624, 1804, 1045, 1413, 472, 11, 3165, 13, 407, 586, 11803, 13, 22, 4, 295, 264, 6889, 1355, 4544, 1296, 24536, 293, 23317, 13], "avg_logprob": -0.2663194417953491, "compression_ratio": 1.2671755725190839, "no_speech_prob": 0.0, "words": [{"start": 2545.86, "end": 2546.76, "word": " Three", "probability": 0.170166015625}, {"start": 2546.76, "end": 2547.26, "word": " times", "probability": 0.92041015625}, {"start": 2547.26, "end": 2547.64, "word": " sigma", "probability": 0.70556640625}, {"start": 2547.64, "end": 2547.84, "word": " of", "probability": 0.447021484375}, {"start": 2547.84, "end": 2548.02, "word": " x", "probability": 0.728515625}, {"start": 2548.02, "end": 2548.36, "word": " bar", "probability": 0.79833984375}, {"start": 2548.36, "end": 2548.82, "word": " is", "probability": 0.90185546875}, {"start": 2548.82, "end": 2549.12, "word": " one,", "probability": 0.85595703125}, {"start": 2549.86, "end": 2550.08, "word": " so", "probability": 0.9052734375}, {"start": 2550.08, "end": 2550.56, "word": " that's", "probability": 0.92578125}, {"start": 2550.56, "end": 2551.64, "word": " 62.", "probability": 0.62890625}, {"start": 2553.7, "end": 2554.6, "word": " The", "probability": 0.87890625}, {"start": 2554.6, "end": 2555.02, "word": " maximum", "probability": 0.92626953125}, {"start": 2555.02, "end": 2555.42, "word": " here", "probability": 0.84912109375}, {"start": 2555.42, "end": 2557.5, "word": " equal", "probability": 0.396240234375}, {"start": 2557.5, "end": 2557.92, "word": " mu", "probability": 0.61376953125}, {"start": 2557.92, "end": 2558.46, "word": " again", "probability": 0.50732421875}, {"start": 2558.46, "end": 2559.1, "word": " 65", "probability": 0.79345703125}, {"start": 2559.1, "end": 2561.48, "word": " plus", "probability": 0.658203125}, {"start": 2561.48, "end": 2561.78, "word": " three", "probability": 0.68408203125}, {"start": 2561.78, "end": 2562.06, "word": " times", "probability": 0.93359375}, {"start": 2562.06, "end": 2562.34, "word": " one,", "probability": 0.82373046875}, {"start": 2562.4, "end": 2562.7, "word": " 16.", "probability": 0.62255859375}, {"start": 2564.78, "end": 2565.68, "word": " So", "probability": 0.9326171875}, {"start": 2565.68, "end": 2565.92, "word": " now", "probability": 0.84912109375}, {"start": 2565.92, "end": 2566.62, "word": " 99", "probability": 0.734375}, {"start": 2566.62, "end": 2567.24, "word": ".7", "probability": 0.9912109375}, {"start": 2567.24, "end": 2567.66, "word": "%", "probability": 0.77734375}, {"start": 2567.66, "end": 2567.92, "word": " of", "probability": 0.96240234375}, {"start": 2567.92, "end": 2568.16, "word": " the", "probability": 0.87255859375}, {"start": 2568.16, "end": 2568.6, "word": " sample", "probability": 0.89501953125}, {"start": 2568.6, "end": 2569.02, "word": " means", "probability": 0.90869140625}, {"start": 2569.02, "end": 2569.66, "word": " lie", "probability": 0.82275390625}, {"start": 2569.66, "end": 2570.14, "word": " between", "probability": 0.87353515625}, {"start": 2570.14, "end": 2570.78, "word": " 62", "probability": 0.96875}, {"start": 2570.78, "end": 2571.26, "word": " and", "probability": 0.94384765625}, {"start": 2571.26, "end": 2571.9, "word": " 68.", "probability": 0.98486328125}], "temperature": 1.0}, {"id": 93, "seek": 260221, "start": 2573.66, "end": 2602.22, "text": " Now, what's left? 0.3% for both sides, so that's 0.15 to the right and the same to the left. Now, we are looking for x bar exceeds 71. 68 this point, 71 should be to the right. Now, what's the probability to the right of 71?", "tokens": [823, 11, 437, 311, 1411, 30, 1958, 13, 18, 4, 337, 1293, 4881, 11, 370, 300, 311, 1958, 13, 5211, 281, 264, 558, 293, 264, 912, 281, 264, 1411, 13, 823, 11, 321, 366, 1237, 337, 2031, 2159, 43305, 30942, 13, 23317, 341, 935, 11, 30942, 820, 312, 281, 264, 558, 13, 823, 11, 437, 311, 264, 8482, 281, 264, 558, 295, 30942, 30], "avg_logprob": -0.18353365384615383, "compression_ratio": 1.5, "no_speech_prob": 0.0, "words": [{"start": 2573.66, "end": 2573.9, "word": " Now,", "probability": 0.75390625}, {"start": 2573.96, "end": 2574.18, "word": " what's", "probability": 0.9248046875}, {"start": 2574.18, "end": 2574.44, "word": " left?", "probability": 0.91650390625}, {"start": 2574.94, "end": 2575.48, "word": " 0", "probability": 0.72216796875}, {"start": 2575.48, "end": 2575.88, "word": ".3", "probability": 0.98828125}, {"start": 2575.88, "end": 2577.96, "word": "%", "probability": 0.50732421875}, {"start": 2577.96, "end": 2579.5, "word": " for", "probability": 0.740234375}, {"start": 2579.5, "end": 2580.02, "word": " both", "probability": 0.8662109375}, {"start": 2580.02, "end": 2580.58, "word": " sides,", "probability": 0.88330078125}, {"start": 2580.8, "end": 2580.94, "word": " so", "probability": 0.8818359375}, {"start": 2580.94, "end": 2581.36, "word": " that's", "probability": 0.9287109375}, {"start": 2581.36, "end": 2583.38, "word": " 0", "probability": 0.89794921875}, {"start": 2583.38, "end": 2583.8, "word": ".15", "probability": 0.981201171875}, {"start": 2583.8, "end": 2583.98, "word": " to", "probability": 0.67822265625}, {"start": 2583.98, "end": 2584.12, "word": " the", "probability": 0.91748046875}, {"start": 2584.12, "end": 2584.36, "word": " right", "probability": 0.91357421875}, {"start": 2584.36, "end": 2584.54, "word": " and", "probability": 0.74755859375}, {"start": 2584.54, "end": 2584.7, "word": " the", "probability": 0.7890625}, {"start": 2584.7, "end": 2584.88, "word": " same", "probability": 0.90234375}, {"start": 2584.88, "end": 2585.0, "word": " to", "probability": 0.8955078125}, {"start": 2585.0, "end": 2585.14, "word": " the", "probability": 0.91796875}, {"start": 2585.14, "end": 2585.36, "word": " left.", "probability": 0.94970703125}, {"start": 2588.5, "end": 2589.14, "word": " Now,", "probability": 0.93994140625}, {"start": 2589.64, "end": 2589.9, "word": " we", "probability": 0.95556640625}, {"start": 2589.9, "end": 2590.04, "word": " are", "probability": 0.880859375}, {"start": 2590.04, "end": 2590.32, "word": " looking", "probability": 0.91650390625}, {"start": 2590.32, "end": 2590.58, "word": " for", "probability": 0.94580078125}, {"start": 2590.58, "end": 2590.8, "word": " x", "probability": 0.60693359375}, {"start": 2590.8, "end": 2591.08, "word": " bar", "probability": 0.86669921875}, {"start": 2591.08, "end": 2591.72, "word": " exceeds", "probability": 0.84814453125}, {"start": 2591.72, "end": 2592.54, "word": " 71.", "probability": 0.90087890625}, {"start": 2594.22, "end": 2594.82, "word": " 68", "probability": 0.262451171875}, {"start": 2594.82, "end": 2595.36, "word": " this", "probability": 0.474853515625}, {"start": 2595.36, "end": 2595.68, "word": " point,", "probability": 0.93896484375}, {"start": 2596.38, "end": 2596.76, "word": " 71", "probability": 0.81884765625}, {"start": 2596.76, "end": 2597.18, "word": " should", "probability": 0.96337890625}, {"start": 2597.18, "end": 2597.5, "word": " be", "probability": 0.95068359375}, {"start": 2597.5, "end": 2598.3, "word": " to", "probability": 0.95703125}, {"start": 2598.3, "end": 2598.46, "word": " the", "probability": 0.92236328125}, {"start": 2598.46, "end": 2598.64, "word": " right.", "probability": 0.916015625}, {"start": 2599.66, "end": 2599.98, "word": " Now,", "probability": 0.93603515625}, {"start": 2600.0, "end": 2600.24, "word": " what's", "probability": 0.941650390625}, {"start": 2600.24, "end": 2600.36, "word": " the", "probability": 0.8623046875}, {"start": 2600.36, "end": 2600.72, "word": " probability", "probability": 0.9638671875}, {"start": 2600.72, "end": 2600.98, "word": " to", "probability": 0.95654296875}, {"start": 2600.98, "end": 2601.14, "word": " the", "probability": 0.91064453125}, {"start": 2601.14, "end": 2601.42, "word": " right", "probability": 0.919921875}, {"start": 2601.42, "end": 2601.76, "word": " of", "probability": 0.96240234375}, {"start": 2601.76, "end": 2602.22, "word": " 71?", "probability": 0.97119140625}], "temperature": 1.0}, {"id": 94, "seek": 263185, "start": 2603.39, "end": 2631.85, "text": " is almost zero. Because since we are saying that most of the data lies within the three standard deviations of the mean. Now here, the difference between this point 71 and 65 is 6, and sigma of x bar is 1. So it means the difference between mu and x bar is around 6 times sigma of x bar.", "tokens": [307, 1920, 4018, 13, 1436, 1670, 321, 366, 1566, 300, 881, 295, 264, 1412, 9134, 1951, 264, 1045, 3832, 31219, 763, 295, 264, 914, 13, 823, 510, 11, 264, 2649, 1296, 341, 935, 30942, 293, 11624, 307, 1386, 11, 293, 12771, 295, 2031, 2159, 307, 502, 13, 407, 309, 1355, 264, 2649, 1296, 2992, 293, 2031, 2159, 307, 926, 1386, 1413, 12771, 295, 2031, 2159, 13], "avg_logprob": -0.20499067431065573, "compression_ratio": 1.6363636363636365, "no_speech_prob": 0.0, "words": [{"start": 2603.39, "end": 2603.65, "word": " is", "probability": 0.2158203125}, {"start": 2603.65, "end": 2603.91, "word": " almost", "probability": 0.806640625}, {"start": 2603.91, "end": 2604.29, "word": " zero.", "probability": 0.67578125}, {"start": 2606.13, "end": 2606.67, "word": " Because", "probability": 0.91357421875}, {"start": 2606.67, "end": 2607.07, "word": " since", "probability": 0.78564453125}, {"start": 2607.07, "end": 2607.37, "word": " we", "probability": 0.92041015625}, {"start": 2607.37, "end": 2607.51, "word": " are", "probability": 0.88916015625}, {"start": 2607.51, "end": 2607.77, "word": " saying", "probability": 0.86669921875}, {"start": 2607.77, "end": 2608.15, "word": " that", "probability": 0.93017578125}, {"start": 2608.15, "end": 2610.01, "word": " most", "probability": 0.859375}, {"start": 2610.01, "end": 2610.19, "word": " of", "probability": 0.97119140625}, {"start": 2610.19, "end": 2610.31, "word": " the", "probability": 0.919921875}, {"start": 2610.31, "end": 2610.63, "word": " data", "probability": 0.93896484375}, {"start": 2610.63, "end": 2612.39, "word": " lies", "probability": 0.8681640625}, {"start": 2612.39, "end": 2612.75, "word": " within", "probability": 0.8984375}, {"start": 2612.75, "end": 2612.91, "word": " the", "probability": 0.4794921875}, {"start": 2612.91, "end": 2613.15, "word": " three", "probability": 0.876953125}, {"start": 2613.15, "end": 2613.51, "word": " standard", "probability": 0.9189453125}, {"start": 2613.51, "end": 2614.15, "word": " deviations", "probability": 0.93212890625}, {"start": 2614.15, "end": 2614.87, "word": " of", "probability": 0.95947265625}, {"start": 2614.87, "end": 2615.01, "word": " the", "probability": 0.92724609375}, {"start": 2615.01, "end": 2615.25, "word": " mean.", "probability": 0.96826171875}, {"start": 2615.71, "end": 2616.01, "word": " Now", "probability": 0.958984375}, {"start": 2616.01, "end": 2616.37, "word": " here,", "probability": 0.68701171875}, {"start": 2617.47, "end": 2617.79, "word": " the", "probability": 0.87841796875}, {"start": 2617.79, "end": 2618.21, "word": " difference", "probability": 0.85205078125}, {"start": 2618.21, "end": 2618.57, "word": " between", "probability": 0.865234375}, {"start": 2618.57, "end": 2618.89, "word": " this", "probability": 0.94921875}, {"start": 2618.89, "end": 2619.23, "word": " point", "probability": 0.955078125}, {"start": 2619.23, "end": 2620.67, "word": " 71", "probability": 0.4658203125}, {"start": 2620.67, "end": 2621.69, "word": " and", "probability": 0.89306640625}, {"start": 2621.69, "end": 2622.43, "word": " 65", "probability": 0.97509765625}, {"start": 2622.43, "end": 2622.97, "word": " is", "probability": 0.8916015625}, {"start": 2622.97, "end": 2623.39, "word": " 6,", "probability": 0.57958984375}, {"start": 2624.47, "end": 2624.89, "word": " and", "probability": 0.93798828125}, {"start": 2624.89, "end": 2625.47, "word": " sigma", "probability": 0.43701171875}, {"start": 2625.47, "end": 2625.63, "word": " of", "probability": 0.787109375}, {"start": 2625.63, "end": 2625.77, "word": " x", "probability": 0.7958984375}, {"start": 2625.77, "end": 2625.95, "word": " bar", "probability": 0.84033203125}, {"start": 2625.95, "end": 2626.11, "word": " is", "probability": 0.9482421875}, {"start": 2626.11, "end": 2626.41, "word": " 1.", "probability": 0.8662109375}, {"start": 2626.89, "end": 2627.07, "word": " So", "probability": 0.95751953125}, {"start": 2627.07, "end": 2627.27, "word": " it", "probability": 0.85546875}, {"start": 2627.27, "end": 2627.51, "word": " means", "probability": 0.923828125}, {"start": 2627.51, "end": 2627.67, "word": " the", "probability": 0.875}, {"start": 2627.67, "end": 2628.01, "word": " difference", "probability": 0.85888671875}, {"start": 2628.01, "end": 2628.33, "word": " between", "probability": 0.87451171875}, {"start": 2628.33, "end": 2628.61, "word": " mu", "probability": 0.7568359375}, {"start": 2628.61, "end": 2628.83, "word": " and", "probability": 0.94921875}, {"start": 2628.83, "end": 2629.05, "word": " x", "probability": 0.986328125}, {"start": 2629.05, "end": 2629.35, "word": " bar", "probability": 0.9228515625}, {"start": 2629.35, "end": 2629.77, "word": " is", "probability": 0.94091796875}, {"start": 2629.77, "end": 2630.03, "word": " around", "probability": 0.9248046875}, {"start": 2630.03, "end": 2630.45, "word": " 6", "probability": 0.771484375}, {"start": 2630.45, "end": 2630.89, "word": " times", "probability": 0.9169921875}, {"start": 2630.89, "end": 2631.25, "word": " sigma", "probability": 0.9423828125}, {"start": 2631.25, "end": 2631.39, "word": " of", "probability": 0.76318359375}, {"start": 2631.39, "end": 2631.57, "word": " x", "probability": 0.9970703125}, {"start": 2631.57, "end": 2631.85, "word": " bar.", "probability": 0.93798828125}], "temperature": 1.0}, {"id": 95, "seek": 266092, "start": 2633.0, "end": 2660.92, "text": " So you are sure that 99.7% of the data lies between 62 and 68, and just 0.3% of the area left to the other side, two sides, split in half, so we have 0.15 to the right and the other to the left. But again, we are looking for expiry exceeds 71, so we have to go further to the right side, so the area becomes very small.", "tokens": [407, 291, 366, 988, 300, 11803, 13, 22, 4, 295, 264, 1412, 9134, 1296, 24536, 293, 23317, 11, 293, 445, 1958, 13, 18, 4, 295, 264, 1859, 1411, 281, 264, 661, 1252, 11, 732, 4881, 11, 7472, 294, 1922, 11, 370, 321, 362, 1958, 13, 5211, 281, 264, 558, 293, 264, 661, 281, 264, 1411, 13, 583, 797, 11, 321, 366, 1237, 337, 1278, 12781, 43305, 30942, 11, 370, 321, 362, 281, 352, 3052, 281, 264, 558, 1252, 11, 370, 264, 1859, 3643, 588, 1359, 13], "avg_logprob": -0.1667564658597968, "compression_ratio": 1.6080402010050252, "no_speech_prob": 0.0, "words": [{"start": 2633.0, "end": 2633.56, "word": " So", "probability": 0.90771484375}, {"start": 2633.56, "end": 2634.12, "word": " you", "probability": 0.64111328125}, {"start": 2634.12, "end": 2634.34, "word": " are", "probability": 0.91943359375}, {"start": 2634.34, "end": 2634.6, "word": " sure", "probability": 0.900390625}, {"start": 2634.6, "end": 2634.92, "word": " that", "probability": 0.93017578125}, {"start": 2634.92, "end": 2635.34, "word": " 99", "probability": 0.95654296875}, {"start": 2635.34, "end": 2635.88, "word": ".7", "probability": 0.971435546875}, {"start": 2635.88, "end": 2636.14, "word": "%", "probability": 0.8544921875}, {"start": 2636.14, "end": 2636.46, "word": " of", "probability": 0.9560546875}, {"start": 2636.46, "end": 2636.6, "word": " the", "probability": 0.91845703125}, {"start": 2636.6, "end": 2636.96, "word": " data", "probability": 0.94482421875}, {"start": 2636.96, "end": 2638.12, "word": " lies", "probability": 0.923828125}, {"start": 2638.12, "end": 2638.46, "word": " between", "probability": 0.86962890625}, {"start": 2638.46, "end": 2638.92, "word": " 62", "probability": 0.9169921875}, {"start": 2638.92, "end": 2639.12, "word": " and", "probability": 0.8115234375}, {"start": 2639.12, "end": 2639.54, "word": " 68,", "probability": 0.98828125}, {"start": 2640.96, "end": 2641.32, "word": " and", "probability": 0.93701171875}, {"start": 2641.32, "end": 2641.74, "word": " just", "probability": 0.92578125}, {"start": 2641.74, "end": 2642.16, "word": " 0", "probability": 0.88916015625}, {"start": 2642.16, "end": 2642.44, "word": ".3", "probability": 0.99462890625}, {"start": 2642.44, "end": 2642.74, "word": "%", "probability": 0.98828125}, {"start": 2642.74, "end": 2643.04, "word": " of", "probability": 0.9609375}, {"start": 2643.04, "end": 2643.18, "word": " the", "probability": 0.91357421875}, {"start": 2643.18, "end": 2643.4, "word": " area", "probability": 0.8837890625}, {"start": 2643.4, "end": 2643.7, "word": " left", "probability": 0.92578125}, {"start": 2643.7, "end": 2643.96, "word": " to", "probability": 0.9443359375}, {"start": 2643.96, "end": 2644.08, "word": " the", "probability": 0.86572265625}, {"start": 2644.08, "end": 2644.34, "word": " other", "probability": 0.89013671875}, {"start": 2644.34, "end": 2644.94, "word": " side,", "probability": 0.81787109375}, {"start": 2645.04, "end": 2645.16, "word": " two", "probability": 0.77734375}, {"start": 2645.16, "end": 2645.64, "word": " sides,", "probability": 0.90234375}, {"start": 2646.42, "end": 2646.74, "word": " split", "probability": 0.9453125}, {"start": 2646.74, "end": 2647.38, "word": " in", "probability": 0.947265625}, {"start": 2647.38, "end": 2647.74, "word": " half,", "probability": 0.87158203125}, {"start": 2647.88, "end": 2648.02, "word": " so", "probability": 0.9326171875}, {"start": 2648.02, "end": 2648.14, "word": " we", "probability": 0.861328125}, {"start": 2648.14, "end": 2648.28, "word": " have", "probability": 0.94873046875}, {"start": 2648.28, "end": 2648.62, "word": " 0", "probability": 0.95166015625}, {"start": 2648.62, "end": 2648.98, "word": ".15", "probability": 0.99462890625}, {"start": 2648.98, "end": 2649.2, "word": " to", "probability": 0.87646484375}, {"start": 2649.2, "end": 2649.32, "word": " the", "probability": 0.91552734375}, {"start": 2649.32, "end": 2649.56, "word": " right", "probability": 0.9130859375}, {"start": 2649.56, "end": 2649.76, "word": " and", "probability": 0.80810546875}, {"start": 2649.76, "end": 2649.86, "word": " the", "probability": 0.57080078125}, {"start": 2649.86, "end": 2650.02, "word": " other", "probability": 0.8896484375}, {"start": 2650.02, "end": 2650.22, "word": " to", "probability": 0.962890625}, {"start": 2650.22, "end": 2650.36, "word": " the", "probability": 0.91455078125}, {"start": 2650.36, "end": 2650.56, "word": " left.", "probability": 0.953125}, {"start": 2651.88, "end": 2652.12, "word": " But", "probability": 0.9423828125}, {"start": 2652.12, "end": 2652.46, "word": " again,", "probability": 0.88671875}, {"start": 2652.48, "end": 2652.6, "word": " we", "probability": 0.95068359375}, {"start": 2652.6, "end": 2652.7, "word": " are", "probability": 0.923828125}, {"start": 2652.7, "end": 2652.94, "word": " looking", "probability": 0.9111328125}, {"start": 2652.94, "end": 2653.22, "word": " for", "probability": 0.95703125}, {"start": 2653.22, "end": 2653.66, "word": " expiry", "probability": 0.26171875}, {"start": 2653.66, "end": 2654.3, "word": " exceeds", "probability": 0.47021484375}, {"start": 2654.3, "end": 2655.08, "word": " 71,", "probability": 0.955078125}, {"start": 2655.28, "end": 2655.4, "word": " so", "probability": 0.9462890625}, {"start": 2655.4, "end": 2655.52, "word": " we", "probability": 0.95068359375}, {"start": 2655.52, "end": 2655.66, "word": " have", "probability": 0.94482421875}, {"start": 2655.66, "end": 2655.86, "word": " to", "probability": 0.96484375}, {"start": 2655.86, "end": 2656.38, "word": " go", "probability": 0.96044921875}, {"start": 2656.38, "end": 2656.82, "word": " further", "probability": 0.921875}, {"start": 2656.82, "end": 2657.36, "word": " to", "probability": 0.81591796875}, {"start": 2657.36, "end": 2657.5, "word": " the", "probability": 0.9111328125}, {"start": 2657.5, "end": 2657.7, "word": " right", "probability": 0.9169921875}, {"start": 2657.7, "end": 2658.1, "word": " side,", "probability": 0.8720703125}, {"start": 2658.86, "end": 2659.04, "word": " so", "probability": 0.94873046875}, {"start": 2659.04, "end": 2659.72, "word": " the", "probability": 0.7919921875}, {"start": 2659.72, "end": 2659.94, "word": " area", "probability": 0.92919921875}, {"start": 2659.94, "end": 2660.34, "word": " becomes", "probability": 0.87939453125}, {"start": 2660.34, "end": 2660.58, "word": " very", "probability": 0.85498046875}, {"start": 2660.58, "end": 2660.92, "word": " small.", "probability": 0.9267578125}], "temperature": 1.0}, {"id": 96, "seek": 268818, "start": 2667.08, "end": 2688.18, "text": " This method is faster if you figure out this one we can apply the empirical rule. Let's do similar questions. Look at the other one. Number five.", "tokens": [639, 3170, 307, 4663, 498, 291, 2573, 484, 341, 472, 321, 393, 3079, 264, 31886, 4978, 13, 961, 311, 360, 2531, 1651, 13, 2053, 412, 264, 661, 472, 13, 5118, 1732, 13], "avg_logprob": -0.2020596590909091, "compression_ratio": 1.2586206896551724, "no_speech_prob": 0.0, "words": [{"start": 2667.08, "end": 2667.92, "word": " This", "probability": 0.29638671875}, {"start": 2667.92, "end": 2668.68, "word": " method", "probability": 0.9267578125}, {"start": 2668.68, "end": 2669.28, "word": " is", "probability": 0.9296875}, {"start": 2669.28, "end": 2669.74, "word": " faster", "probability": 0.9052734375}, {"start": 2669.74, "end": 2670.2, "word": " if", "probability": 0.79541015625}, {"start": 2670.2, "end": 2670.38, "word": " you", "probability": 0.9365234375}, {"start": 2670.38, "end": 2670.64, "word": " figure", "probability": 0.91259765625}, {"start": 2670.64, "end": 2670.98, "word": " out", "probability": 0.87255859375}, {"start": 2670.98, "end": 2671.72, "word": " this", "probability": 0.75439453125}, {"start": 2671.72, "end": 2672.02, "word": " one", "probability": 0.87890625}, {"start": 2672.02, "end": 2672.42, "word": " we", "probability": 0.4853515625}, {"start": 2672.42, "end": 2672.64, "word": " can", "probability": 0.94970703125}, {"start": 2672.64, "end": 2673.12, "word": " apply", "probability": 0.921875}, {"start": 2673.12, "end": 2673.48, "word": " the", "probability": 0.86083984375}, {"start": 2673.48, "end": 2673.88, "word": " empirical", "probability": 0.8916015625}, {"start": 2673.88, "end": 2674.3, "word": " rule.", "probability": 0.8876953125}, {"start": 2676.84, "end": 2677.22, "word": " Let's", "probability": 0.912841796875}, {"start": 2677.22, "end": 2677.46, "word": " do", "probability": 0.87841796875}, {"start": 2677.46, "end": 2678.06, "word": " similar", "probability": 0.92724609375}, {"start": 2678.06, "end": 2678.7, "word": " questions.", "probability": 0.9501953125}, {"start": 2680.32, "end": 2680.96, "word": " Look", "probability": 0.7412109375}, {"start": 2680.96, "end": 2681.1, "word": " at", "probability": 0.9658203125}, {"start": 2681.1, "end": 2681.2, "word": " the", "probability": 0.87744140625}, {"start": 2681.2, "end": 2681.36, "word": " other", "probability": 0.884765625}, {"start": 2681.36, "end": 2681.78, "word": " one.", "probability": 0.92578125}, {"start": 2686.94, "end": 2687.78, "word": " Number", "probability": 0.79345703125}, {"start": 2687.78, "end": 2688.18, "word": " five.", "probability": 0.5419921875}], "temperature": 1.0}, {"id": 97, "seek": 272605, "start": 2699.27, "end": 2726.05, "text": " The amount of gasoline purchased per car at large surface stations has population mean of 15. So the mean is 15. And the population standard deviation of 4. Sigma is 4. It is assumed that the amount of gasoline purchased per car is symmetric.", "tokens": [440, 2372, 295, 28914, 14734, 680, 1032, 412, 2416, 3753, 13390, 575, 4415, 914, 295, 2119, 13, 407, 264, 914, 307, 2119, 13, 400, 264, 4415, 3832, 25163, 295, 1017, 13, 36595, 307, 1017, 13, 467, 307, 15895, 300, 264, 2372, 295, 28914, 14734, 680, 1032, 307, 32330, 13], "avg_logprob": -0.19234375685453414, "compression_ratio": 1.653061224489796, "no_speech_prob": 0.0, "words": [{"start": 2699.27, "end": 2699.37, "word": " The", "probability": 0.2313232421875}, {"start": 2699.37, "end": 2699.37, "word": " amount", "probability": 0.79248046875}, {"start": 2699.37, "end": 2699.49, "word": " of", "probability": 0.9423828125}, {"start": 2699.49, "end": 2699.95, "word": " gasoline", "probability": 0.927734375}, {"start": 2699.95, "end": 2700.59, "word": " purchased", "probability": 0.970703125}, {"start": 2700.59, "end": 2700.95, "word": " per", "probability": 0.92919921875}, {"start": 2700.95, "end": 2701.35, "word": " car", "probability": 0.9013671875}, {"start": 2701.35, "end": 2701.67, "word": " at", "probability": 0.9189453125}, {"start": 2701.67, "end": 2702.05, "word": " large", "probability": 0.853515625}, {"start": 2702.05, "end": 2702.47, "word": " surface", "probability": 0.482666015625}, {"start": 2702.47, "end": 2703.15, "word": " stations", "probability": 0.62890625}, {"start": 2703.15, "end": 2704.05, "word": " has", "probability": 0.90869140625}, {"start": 2704.05, "end": 2704.51, "word": " population", "probability": 0.77392578125}, {"start": 2704.51, "end": 2704.89, "word": " mean", "probability": 0.9130859375}, {"start": 2704.89, "end": 2705.19, "word": " of", "probability": 0.962890625}, {"start": 2705.19, "end": 2705.63, "word": " 15.", "probability": 0.84912109375}, {"start": 2707.69, "end": 2707.95, "word": " So", "probability": 0.78955078125}, {"start": 2707.95, "end": 2708.11, "word": " the", "probability": 0.740234375}, {"start": 2708.11, "end": 2708.37, "word": " mean", "probability": 0.9580078125}, {"start": 2708.37, "end": 2709.53, "word": " is", "probability": 0.9384765625}, {"start": 2709.53, "end": 2710.07, "word": " 15.", "probability": 0.90771484375}, {"start": 2713.01, "end": 2713.55, "word": " And", "probability": 0.90234375}, {"start": 2713.55, "end": 2713.67, "word": " the", "probability": 0.6328125}, {"start": 2713.67, "end": 2714.09, "word": " population", "probability": 0.96240234375}, {"start": 2714.09, "end": 2714.47, "word": " standard", "probability": 0.66259765625}, {"start": 2714.47, "end": 2714.93, "word": " deviation", "probability": 0.93359375}, {"start": 2714.93, "end": 2715.19, "word": " of", "probability": 0.76904296875}, {"start": 2715.19, "end": 2715.63, "word": " 4.", "probability": 0.79638671875}, {"start": 2717.53, "end": 2718.15, "word": " Sigma", "probability": 0.8681640625}, {"start": 2718.15, "end": 2719.17, "word": " is", "probability": 0.912109375}, {"start": 2719.17, "end": 2719.55, "word": " 4.", "probability": 0.91552734375}, {"start": 2721.33, "end": 2721.61, "word": " It", "probability": 0.935546875}, {"start": 2721.61, "end": 2721.75, "word": " is", "probability": 0.94482421875}, {"start": 2721.75, "end": 2722.21, "word": " assumed", "probability": 0.869140625}, {"start": 2722.21, "end": 2722.67, "word": " that", "probability": 0.9375}, {"start": 2722.67, "end": 2723.43, "word": " the", "probability": 0.83544921875}, {"start": 2723.43, "end": 2723.77, "word": " amount", "probability": 0.89404296875}, {"start": 2723.77, "end": 2723.93, "word": " of", "probability": 0.962890625}, {"start": 2723.93, "end": 2724.33, "word": " gasoline", "probability": 0.96533203125}, {"start": 2724.33, "end": 2724.71, "word": " purchased", "probability": 0.9755859375}, {"start": 2724.71, "end": 2725.05, "word": " per", "probability": 0.943359375}, {"start": 2725.05, "end": 2725.33, "word": " car", "probability": 0.9228515625}, {"start": 2725.33, "end": 2725.59, "word": " is", "probability": 0.951171875}, {"start": 2725.59, "end": 2726.05, "word": " symmetric.", "probability": 0.85693359375}], "temperature": 1.0}, {"id": 98, "seek": 275583, "start": 2728.87, "end": 2755.83, "text": " there is approximately 68.26% it shows that a random sample of 16 cars so n equals 16 cars will have sample mean between 14 and 16 so it says that between 14 and 16 the answer is 68.24%", "tokens": [456, 307, 10447, 23317, 13, 10880, 4, 309, 3110, 300, 257, 4974, 6889, 295, 3165, 5163, 370, 297, 6915, 3165, 5163, 486, 362, 6889, 914, 1296, 3499, 293, 3165, 370, 309, 1619, 300, 1296, 3499, 293, 3165, 264, 1867, 307, 23317, 13, 7911, 4], "avg_logprob": -0.21718749867545234, "compression_ratio": 1.4645669291338583, "no_speech_prob": 0.0, "words": [{"start": 2728.87, "end": 2729.25, "word": " there", "probability": 0.480224609375}, {"start": 2729.25, "end": 2729.61, "word": " is", "probability": 0.92236328125}, {"start": 2729.61, "end": 2730.73, "word": " approximately", "probability": 0.84375}, {"start": 2730.73, "end": 2732.89, "word": " 68", "probability": 0.9541015625}, {"start": 2732.89, "end": 2735.53, "word": ".26", "probability": 0.861572265625}, {"start": 2735.53, "end": 2736.03, "word": "%", "probability": 0.5595703125}, {"start": 2736.03, "end": 2737.77, "word": " it", "probability": 0.267578125}, {"start": 2737.77, "end": 2738.11, "word": " shows", "probability": 0.5537109375}, {"start": 2738.11, "end": 2738.55, "word": " that", "probability": 0.92919921875}, {"start": 2738.55, "end": 2738.99, "word": " a", "probability": 0.90478515625}, {"start": 2738.99, "end": 2739.29, "word": " random", "probability": 0.86962890625}, {"start": 2739.29, "end": 2739.65, "word": " sample", "probability": 0.8642578125}, {"start": 2739.65, "end": 2739.85, "word": " of", "probability": 0.92919921875}, {"start": 2739.85, "end": 2740.31, "word": " 16", "probability": 0.81689453125}, {"start": 2740.31, "end": 2740.85, "word": " cars", "probability": 0.85498046875}, {"start": 2740.85, "end": 2743.63, "word": " so", "probability": 0.2027587890625}, {"start": 2743.63, "end": 2743.77, "word": " n", "probability": 0.57666015625}, {"start": 2743.77, "end": 2743.97, "word": " equals", "probability": 0.2164306640625}, {"start": 2743.97, "end": 2744.43, "word": " 16", "probability": 0.9111328125}, {"start": 2744.43, "end": 2744.79, "word": " cars", "probability": 0.89306640625}, {"start": 2744.79, "end": 2744.97, "word": " will", "probability": 0.84521484375}, {"start": 2744.97, "end": 2745.19, "word": " have", "probability": 0.9453125}, {"start": 2745.19, "end": 2745.59, "word": " sample", "probability": 0.80322265625}, {"start": 2745.59, "end": 2745.97, "word": " mean", "probability": 0.92529296875}, {"start": 2745.97, "end": 2746.83, "word": " between", "probability": 0.88671875}, {"start": 2746.83, "end": 2747.25, "word": " 14", "probability": 0.9580078125}, {"start": 2747.25, "end": 2747.43, "word": " and", "probability": 0.93896484375}, {"start": 2747.43, "end": 2747.89, "word": " 16", "probability": 0.9716796875}, {"start": 2747.89, "end": 2748.55, "word": " so", "probability": 0.580078125}, {"start": 2748.55, "end": 2748.73, "word": " it", "probability": 0.90380859375}, {"start": 2748.73, "end": 2748.99, "word": " says", "probability": 0.85107421875}, {"start": 2748.99, "end": 2749.43, "word": " that", "probability": 0.935546875}, {"start": 2749.43, "end": 2751.89, "word": " between", "probability": 0.83203125}, {"start": 2751.89, "end": 2752.49, "word": " 14", "probability": 0.97119140625}, {"start": 2752.49, "end": 2752.67, "word": " and", "probability": 0.9375}, {"start": 2752.67, "end": 2753.05, "word": " 16", "probability": 0.97607421875}, {"start": 2753.05, "end": 2753.55, "word": " the", "probability": 0.744140625}, {"start": 2753.55, "end": 2753.87, "word": " answer", "probability": 0.95556640625}, {"start": 2753.87, "end": 2754.21, "word": " is", "probability": 0.94384765625}, {"start": 2754.21, "end": 2754.73, "word": " 68", "probability": 0.986328125}, {"start": 2754.73, "end": 2755.57, "word": ".24", "probability": 0.985107421875}, {"start": 2755.57, "end": 2755.83, "word": "%", "probability": 0.76416015625}], "temperature": 1.0}, {"id": 99, "seek": 278665, "start": 2758.71, "end": 2786.65, "text": " So again, we have a population, this population is symmetric, with mean of 15 sigma of 4. We select a random sample of 16. The problem says that the probability of X bar between 14 and 16 equals 68%. And the answer is true. Why? How can we apply or use the empirical rule in this case?", "tokens": [407, 797, 11, 321, 362, 257, 4415, 11, 341, 4415, 307, 32330, 11, 365, 914, 295, 2119, 12771, 295, 1017, 13, 492, 3048, 257, 4974, 6889, 295, 3165, 13, 440, 1154, 1619, 300, 264, 8482, 295, 1783, 2159, 1296, 3499, 293, 3165, 6915, 23317, 6856, 400, 264, 1867, 307, 2074, 13, 1545, 30, 1012, 393, 321, 3079, 420, 764, 264, 31886, 4978, 294, 341, 1389, 30], "avg_logprob": -0.1562499977759461, "compression_ratio": 1.43, "no_speech_prob": 0.0, "words": [{"start": 2758.71, "end": 2758.99, "word": " So", "probability": 0.8037109375}, {"start": 2758.99, "end": 2759.25, "word": " again,", "probability": 0.81494140625}, {"start": 2759.93, "end": 2760.11, "word": " we", "probability": 0.908203125}, {"start": 2760.11, "end": 2760.27, "word": " have", "probability": 0.94970703125}, {"start": 2760.27, "end": 2760.39, "word": " a", "probability": 0.966796875}, {"start": 2760.39, "end": 2760.81, "word": " population,", "probability": 0.9501953125}, {"start": 2761.15, "end": 2761.35, "word": " this", "probability": 0.7822265625}, {"start": 2761.35, "end": 2761.69, "word": " population", "probability": 0.93115234375}, {"start": 2761.69, "end": 2761.89, "word": " is", "probability": 0.8916015625}, {"start": 2761.89, "end": 2762.25, "word": " symmetric,", "probability": 0.8154296875}, {"start": 2763.41, "end": 2763.75, "word": " with", "probability": 0.90966796875}, {"start": 2763.75, "end": 2763.99, "word": " mean", "probability": 0.94677734375}, {"start": 2763.99, "end": 2764.35, "word": " of", "probability": 0.9609375}, {"start": 2764.35, "end": 2765.03, "word": " 15", "probability": 0.92236328125}, {"start": 2765.03, "end": 2765.51, "word": " sigma", "probability": 0.469482421875}, {"start": 2765.51, "end": 2765.81, "word": " of", "probability": 0.65185546875}, {"start": 2765.81, "end": 2766.19, "word": " 4.", "probability": 0.884765625}, {"start": 2767.57, "end": 2768.07, "word": " We", "probability": 0.94970703125}, {"start": 2768.07, "end": 2768.41, "word": " select", "probability": 0.84521484375}, {"start": 2768.41, "end": 2768.55, "word": " a", "probability": 0.9833984375}, {"start": 2768.55, "end": 2768.73, "word": " random", "probability": 0.8525390625}, {"start": 2768.73, "end": 2769.13, "word": " sample", "probability": 0.8955078125}, {"start": 2769.13, "end": 2769.29, "word": " of", "probability": 0.9453125}, {"start": 2769.29, "end": 2769.75, "word": " 16.", "probability": 0.94775390625}, {"start": 2771.11, "end": 2771.45, "word": " The", "probability": 0.78759765625}, {"start": 2771.45, "end": 2771.83, "word": " problem", "probability": 0.76708984375}, {"start": 2771.83, "end": 2772.29, "word": " says", "probability": 0.90576171875}, {"start": 2772.29, "end": 2772.67, "word": " that", "probability": 0.92529296875}, {"start": 2772.67, "end": 2774.17, "word": " the", "probability": 0.7158203125}, {"start": 2774.17, "end": 2774.67, "word": " probability", "probability": 0.9365234375}, {"start": 2774.67, "end": 2774.95, "word": " of", "probability": 0.9658203125}, {"start": 2774.95, "end": 2775.19, "word": " X", "probability": 0.6572265625}, {"start": 2775.19, "end": 2775.43, "word": " bar", "probability": 0.85107421875}, {"start": 2775.43, "end": 2775.77, "word": " between", "probability": 0.89404296875}, {"start": 2775.77, "end": 2776.17, "word": " 14", "probability": 0.94189453125}, {"start": 2776.17, "end": 2776.35, "word": " and", "probability": 0.94384765625}, {"start": 2776.35, "end": 2776.73, "word": " 16", "probability": 0.9736328125}, {"start": 2776.73, "end": 2776.99, "word": " equals", "probability": 0.72900390625}, {"start": 2776.99, "end": 2777.93, "word": " 68%.", "probability": 0.7216796875}, {"start": 2777.93, "end": 2778.67, "word": " And", "probability": 0.93408203125}, {"start": 2778.67, "end": 2778.83, "word": " the", "probability": 0.8671875}, {"start": 2778.83, "end": 2779.11, "word": " answer", "probability": 0.95556640625}, {"start": 2779.11, "end": 2779.45, "word": " is", "probability": 0.947265625}, {"start": 2779.45, "end": 2779.75, "word": " true.", "probability": 0.7109375}, {"start": 2780.01, "end": 2780.33, "word": " Why?", "probability": 0.83984375}, {"start": 2781.83, "end": 2782.43, "word": " How", "probability": 0.958984375}, {"start": 2782.43, "end": 2782.63, "word": " can", "probability": 0.93798828125}, {"start": 2782.63, "end": 2782.77, "word": " we", "probability": 0.962890625}, {"start": 2782.77, "end": 2783.29, "word": " apply", "probability": 0.92919921875}, {"start": 2783.29, "end": 2784.45, "word": " or", "probability": 0.82763671875}, {"start": 2784.45, "end": 2784.81, "word": " use", "probability": 0.87451171875}, {"start": 2784.81, "end": 2785.13, "word": " the", "probability": 0.904296875}, {"start": 2785.13, "end": 2785.55, "word": " empirical", "probability": 0.91943359375}, {"start": 2785.55, "end": 2785.95, "word": " rule", "probability": 0.91064453125}, {"start": 2785.95, "end": 2786.11, "word": " in", "probability": 0.93994140625}, {"start": 2786.11, "end": 2786.31, "word": " this", "probability": 0.947265625}, {"start": 2786.31, "end": 2786.65, "word": " case?", "probability": 0.9169921875}], "temperature": 1.0}, {"id": 100, "seek": 280943, "start": 2788.59, "end": 2809.43, "text": " A mu minus 3 standard deviation? It says 68, 1. So it's a mu minus 1 standard deviation, 1 sigma X bar, and let's look at 1 plus sigma X bar. A mu is 15.", "tokens": [316, 2992, 3175, 805, 3832, 25163, 30, 467, 1619, 23317, 11, 502, 13, 407, 309, 311, 257, 2992, 3175, 502, 3832, 25163, 11, 502, 12771, 1783, 2159, 11, 293, 718, 311, 574, 412, 502, 1804, 12771, 1783, 2159, 13, 316, 2992, 307, 2119, 13], "avg_logprob": -0.2710069391462538, "compression_ratio": 1.3628318584070795, "no_speech_prob": 0.0, "words": [{"start": 2788.59, "end": 2788.85, "word": " A", "probability": 0.1322021484375}, {"start": 2788.85, "end": 2788.85, "word": " mu", "probability": 0.47998046875}, {"start": 2788.85, "end": 2789.19, "word": " minus", "probability": 0.95947265625}, {"start": 2789.19, "end": 2789.49, "word": " 3", "probability": 0.5830078125}, {"start": 2789.49, "end": 2789.71, "word": " standard", "probability": 0.90380859375}, {"start": 2789.71, "end": 2790.15, "word": " deviation?", "probability": 0.8330078125}, {"start": 2794.39, "end": 2795.03, "word": " It", "probability": 0.79345703125}, {"start": 2795.03, "end": 2795.43, "word": " says", "probability": 0.7265625}, {"start": 2795.43, "end": 2796.85, "word": " 68,", "probability": 0.83642578125}, {"start": 2797.21, "end": 2797.73, "word": " 1.", "probability": 0.82080078125}, {"start": 2798.55, "end": 2799.03, "word": " So", "probability": 0.91162109375}, {"start": 2799.03, "end": 2799.25, "word": " it's", "probability": 0.8818359375}, {"start": 2799.25, "end": 2799.39, "word": " a", "probability": 0.64501953125}, {"start": 2799.39, "end": 2799.61, "word": " mu", "probability": 0.93798828125}, {"start": 2799.61, "end": 2801.67, "word": " minus", "probability": 0.96484375}, {"start": 2801.67, "end": 2802.23, "word": " 1", "probability": 0.7353515625}, {"start": 2802.23, "end": 2802.69, "word": " standard", "probability": 0.916015625}, {"start": 2802.69, "end": 2803.09, "word": " deviation,", "probability": 0.94482421875}, {"start": 2803.39, "end": 2803.59, "word": " 1", "probability": 0.892578125}, {"start": 2803.59, "end": 2803.83, "word": " sigma", "probability": 0.78564453125}, {"start": 2803.83, "end": 2804.09, "word": " X", "probability": 0.1588134765625}, {"start": 2804.09, "end": 2804.35, "word": " bar,", "probability": 0.7841796875}, {"start": 2804.85, "end": 2805.09, "word": " and", "probability": 0.923828125}, {"start": 2805.09, "end": 2805.33, "word": " let's", "probability": 0.824462890625}, {"start": 2805.33, "end": 2805.53, "word": " look", "probability": 0.96337890625}, {"start": 2805.53, "end": 2805.71, "word": " at", "probability": 0.96484375}, {"start": 2805.71, "end": 2805.99, "word": " 1", "probability": 0.93798828125}, {"start": 2805.99, "end": 2806.77, "word": " plus", "probability": 0.9326171875}, {"start": 2806.77, "end": 2807.09, "word": " sigma", "probability": 0.95458984375}, {"start": 2807.09, "end": 2807.41, "word": " X", "probability": 0.91845703125}, {"start": 2807.41, "end": 2807.55, "word": " bar.", "probability": 0.9111328125}, {"start": 2808.55, "end": 2808.71, "word": " A", "probability": 0.875}, {"start": 2808.71, "end": 2808.83, "word": " mu", "probability": 0.9580078125}, {"start": 2808.83, "end": 2809.01, "word": " is", "probability": 0.94873046875}, {"start": 2809.01, "end": 2809.43, "word": " 15.", "probability": 0.96240234375}], "temperature": 1.0}, {"id": 101, "seek": 283416, "start": 2810.78, "end": 2834.16, "text": " Now Sigma of X1 is also 1 because 4 divided by square root of 16 is 1. So 1 times 1 is 14. On the other hand 15 plus 1 times 1 is 16.", "tokens": [823, 36595, 295, 1783, 16, 307, 611, 502, 570, 1017, 6666, 538, 3732, 5593, 295, 3165, 307, 502, 13, 407, 502, 1413, 502, 307, 3499, 13, 1282, 264, 661, 1011, 2119, 1804, 502, 1413, 502, 307, 3165, 13], "avg_logprob": -0.2734374877734062, "compression_ratio": 1.2293577981651376, "no_speech_prob": 0.0, "words": [{"start": 2810.78, "end": 2811.08, "word": " Now", "probability": 0.5888671875}, {"start": 2811.08, "end": 2811.38, "word": " Sigma", "probability": 0.31884765625}, {"start": 2811.38, "end": 2811.54, "word": " of", "probability": 0.88916015625}, {"start": 2811.54, "end": 2812.02, "word": " X1", "probability": 0.6240234375}, {"start": 2812.02, "end": 2816.4, "word": " is", "probability": 0.316650390625}, {"start": 2816.4, "end": 2816.76, "word": " also", "probability": 0.748046875}, {"start": 2816.76, "end": 2817.02, "word": " 1", "probability": 0.61279296875}, {"start": 2817.02, "end": 2817.42, "word": " because", "probability": 0.70361328125}, {"start": 2817.42, "end": 2818.18, "word": " 4", "probability": 0.904296875}, {"start": 2818.18, "end": 2818.56, "word": " divided", "probability": 0.6357421875}, {"start": 2818.56, "end": 2818.8, "word": " by", "probability": 0.96630859375}, {"start": 2818.8, "end": 2819.12, "word": " square", "probability": 0.447021484375}, {"start": 2819.12, "end": 2819.38, "word": " root", "probability": 0.9072265625}, {"start": 2819.38, "end": 2819.52, "word": " of", "probability": 0.8544921875}, {"start": 2819.52, "end": 2820.0, "word": " 16", "probability": 0.96142578125}, {"start": 2820.0, "end": 2820.68, "word": " is", "probability": 0.89794921875}, {"start": 2820.68, "end": 2821.58, "word": " 1.", "probability": 0.92333984375}, {"start": 2824.04, "end": 2824.5, "word": " So", "probability": 0.88916015625}, {"start": 2824.5, "end": 2825.56, "word": " 1", "probability": 0.6640625}, {"start": 2825.56, "end": 2826.06, "word": " times", "probability": 0.734375}, {"start": 2826.06, "end": 2826.6, "word": " 1", "probability": 0.9658203125}, {"start": 2826.6, "end": 2827.1, "word": " is", "probability": 0.57080078125}, {"start": 2827.1, "end": 2828.88, "word": " 14.", "probability": 0.92431640625}, {"start": 2829.96, "end": 2830.14, "word": " On", "probability": 0.7880859375}, {"start": 2830.14, "end": 2830.28, "word": " the", "probability": 0.9013671875}, {"start": 2830.28, "end": 2830.46, "word": " other", "probability": 0.8984375}, {"start": 2830.46, "end": 2830.8, "word": " hand", "probability": 0.91162109375}, {"start": 2830.8, "end": 2831.18, "word": " 15", "probability": 0.56103515625}, {"start": 2831.18, "end": 2831.6, "word": " plus", "probability": 0.900390625}, {"start": 2831.6, "end": 2832.54, "word": " 1", "probability": 0.95849609375}, {"start": 2832.54, "end": 2832.92, "word": " times", "probability": 0.93505859375}, {"start": 2832.92, "end": 2833.2, "word": " 1", "probability": 0.97509765625}, {"start": 2833.2, "end": 2833.32, "word": " is", "probability": 0.880859375}, {"start": 2833.32, "end": 2834.16, "word": " 16.", "probability": 0.9296875}], "temperature": 1.0}, {"id": 102, "seek": 286535, "start": 2836.79, "end": 2865.35, "text": " The problem says 68.4% of the answers is correct. Because we know that 68% of the data will fall within one standard deviation around the mean. So it's, I think, it's very quickly to get your answer rather than using the exact calculation. Because for the exact one, you have to do 14 minus the mean.", "tokens": [440, 1154, 1619, 23317, 13, 19, 4, 295, 264, 6338, 307, 3006, 13, 1436, 321, 458, 300, 23317, 4, 295, 264, 1412, 486, 2100, 1951, 472, 3832, 25163, 926, 264, 914, 13, 407, 309, 311, 11, 286, 519, 11, 309, 311, 588, 2661, 281, 483, 428, 1867, 2831, 813, 1228, 264, 1900, 17108, 13, 1436, 337, 264, 1900, 472, 11, 291, 362, 281, 360, 3499, 3175, 264, 914, 13], "avg_logprob": -0.17198661203895296, "compression_ratio": 1.52020202020202, "no_speech_prob": 0.0, "words": [{"start": 2836.79, "end": 2837.01, "word": " The", "probability": 0.64794921875}, {"start": 2837.01, "end": 2837.43, "word": " problem", "probability": 0.84814453125}, {"start": 2837.43, "end": 2837.89, "word": " says", "probability": 0.8857421875}, {"start": 2837.89, "end": 2838.99, "word": " 68", "probability": 0.798828125}, {"start": 2838.99, "end": 2839.35, "word": ".4", "probability": 0.6510009765625}, {"start": 2839.35, "end": 2839.81, "word": "%", "probability": 0.796875}, {"start": 2839.81, "end": 2840.19, "word": " of", "probability": 0.65869140625}, {"start": 2840.19, "end": 2840.23, "word": " the", "probability": 0.87109375}, {"start": 2840.23, "end": 2840.53, "word": " answers", "probability": 0.69580078125}, {"start": 2840.53, "end": 2840.99, "word": " is", "probability": 0.728515625}, {"start": 2840.99, "end": 2841.45, "word": " correct.", "probability": 0.90966796875}, {"start": 2842.81, "end": 2843.55, "word": " Because", "probability": 0.9169921875}, {"start": 2843.55, "end": 2843.81, "word": " we", "probability": 0.935546875}, {"start": 2843.81, "end": 2843.97, "word": " know", "probability": 0.88427734375}, {"start": 2843.97, "end": 2844.35, "word": " that", "probability": 0.93603515625}, {"start": 2844.35, "end": 2846.05, "word": " 68", "probability": 0.96240234375}, {"start": 2846.05, "end": 2846.45, "word": "%", "probability": 0.92578125}, {"start": 2846.45, "end": 2846.77, "word": " of", "probability": 0.96044921875}, {"start": 2846.77, "end": 2846.91, "word": " the", "probability": 0.9150390625}, {"start": 2846.91, "end": 2847.33, "word": " data", "probability": 0.9462890625}, {"start": 2847.33, "end": 2847.73, "word": " will", "probability": 0.86865234375}, {"start": 2847.73, "end": 2848.01, "word": " fall", "probability": 0.83935546875}, {"start": 2848.01, "end": 2848.35, "word": " within", "probability": 0.91650390625}, {"start": 2848.35, "end": 2848.67, "word": " one", "probability": 0.88818359375}, {"start": 2848.67, "end": 2849.03, "word": " standard", "probability": 0.8681640625}, {"start": 2849.03, "end": 2849.49, "word": " deviation", "probability": 0.921875}, {"start": 2849.49, "end": 2850.59, "word": " around", "probability": 0.8896484375}, {"start": 2850.59, "end": 2850.79, "word": " the", "probability": 0.8642578125}, {"start": 2850.79, "end": 2850.93, "word": " mean.", "probability": 0.9521484375}, {"start": 2851.95, "end": 2852.39, "word": " So", "probability": 0.94140625}, {"start": 2852.39, "end": 2852.71, "word": " it's,", "probability": 0.806396484375}, {"start": 2852.97, "end": 2853.13, "word": " I", "probability": 0.99609375}, {"start": 2853.13, "end": 2853.39, "word": " think,", "probability": 0.91650390625}, {"start": 2853.91, "end": 2854.03, "word": " it's", "probability": 0.7978515625}, {"start": 2854.03, "end": 2854.21, "word": " very", "probability": 0.84716796875}, {"start": 2854.21, "end": 2854.63, "word": " quickly", "probability": 0.89599609375}, {"start": 2854.63, "end": 2855.43, "word": " to", "probability": 0.935546875}, {"start": 2855.43, "end": 2855.67, "word": " get", "probability": 0.943359375}, {"start": 2855.67, "end": 2855.97, "word": " your", "probability": 0.88037109375}, {"start": 2855.97, "end": 2856.69, "word": " answer", "probability": 0.95458984375}, {"start": 2856.69, "end": 2857.07, "word": " rather", "probability": 0.84423828125}, {"start": 2857.07, "end": 2857.37, "word": " than", "probability": 0.9482421875}, {"start": 2857.37, "end": 2857.79, "word": " using", "probability": 0.92529296875}, {"start": 2857.79, "end": 2858.25, "word": " the", "probability": 0.91357421875}, {"start": 2858.25, "end": 2859.25, "word": " exact", "probability": 0.95849609375}, {"start": 2859.25, "end": 2859.87, "word": " calculation.", "probability": 0.465087890625}, {"start": 2860.07, "end": 2860.45, "word": " Because", "probability": 0.9287109375}, {"start": 2860.45, "end": 2861.25, "word": " for", "probability": 0.861328125}, {"start": 2861.25, "end": 2861.41, "word": " the", "probability": 0.91796875}, {"start": 2861.41, "end": 2861.77, "word": " exact", "probability": 0.94482421875}, {"start": 2861.77, "end": 2862.01, "word": " one,", "probability": 0.93896484375}, {"start": 2862.05, "end": 2862.15, "word": " you", "probability": 0.9599609375}, {"start": 2862.15, "end": 2862.43, "word": " have", "probability": 0.9453125}, {"start": 2862.43, "end": 2862.73, "word": " to", "probability": 0.96484375}, {"start": 2862.73, "end": 2863.51, "word": " do", "probability": 0.951171875}, {"start": 2863.51, "end": 2864.47, "word": " 14", "probability": 0.94970703125}, {"start": 2864.47, "end": 2864.91, "word": " minus", "probability": 0.98583984375}, {"start": 2864.91, "end": 2865.15, "word": " the", "probability": 0.9267578125}, {"start": 2865.15, "end": 2865.35, "word": " mean.", "probability": 0.96630859375}], "temperature": 1.0}, {"id": 103, "seek": 289453, "start": 2866.59, "end": 2894.53, "text": " divided by sigma z, then 16 minus the mean divided by 1. Then use the normal theorem to use the empirical rule in this case. So if we select a sample of size 16, the probability that x bar lies between 14 and 16 is around 60%.", "tokens": [6666, 538, 12771, 710, 11, 550, 3165, 3175, 264, 914, 6666, 538, 502, 13, 1396, 764, 264, 2710, 264, 37956, 281, 764, 264, 31886, 4978, 294, 341, 1389, 13, 407, 498, 321, 3048, 257, 6889, 295, 2744, 3165, 11, 264, 8482, 300, 2031, 2159, 9134, 1296, 3499, 293, 3165, 307, 926, 4060, 6856], "avg_logprob": -0.2479745359332473, "compression_ratio": 1.4367088607594938, "no_speech_prob": 0.0, "words": [{"start": 2866.59, "end": 2866.97, "word": " divided", "probability": 0.133544921875}, {"start": 2866.97, "end": 2867.23, "word": " by", "probability": 0.97705078125}, {"start": 2867.23, "end": 2867.65, "word": " sigma", "probability": 0.78466796875}, {"start": 2867.65, "end": 2868.21, "word": " z,", "probability": 0.462158203125}, {"start": 2869.29, "end": 2869.75, "word": " then", "probability": 0.82080078125}, {"start": 2869.75, "end": 2870.55, "word": " 16", "probability": 0.84228515625}, {"start": 2870.55, "end": 2872.59, "word": " minus", "probability": 0.974609375}, {"start": 2872.59, "end": 2873.99, "word": " the", "probability": 0.90673828125}, {"start": 2873.99, "end": 2874.15, "word": " mean", "probability": 0.9765625}, {"start": 2874.15, "end": 2874.45, "word": " divided", "probability": 0.78125}, {"start": 2874.45, "end": 2874.69, "word": " by", "probability": 0.97119140625}, {"start": 2874.69, "end": 2875.03, "word": " 1.", "probability": 0.71826171875}, {"start": 2876.39, "end": 2876.79, "word": " Then", "probability": 0.814453125}, {"start": 2876.79, "end": 2877.17, "word": " use", "probability": 0.7587890625}, {"start": 2877.17, "end": 2877.49, "word": " the", "probability": 0.904296875}, {"start": 2877.49, "end": 2877.97, "word": " normal", "probability": 0.73046875}, {"start": 2877.97, "end": 2878.17, "word": " theorem", "probability": 0.4522705078125}, {"start": 2878.17, "end": 2878.43, "word": " to", "probability": 0.822265625}, {"start": 2878.43, "end": 2878.79, "word": " use", "probability": 0.86474609375}, {"start": 2878.79, "end": 2879.23, "word": " the", "probability": 0.88330078125}, {"start": 2879.23, "end": 2879.67, "word": " empirical", "probability": 0.90234375}, {"start": 2879.67, "end": 2880.19, "word": " rule", "probability": 0.9130859375}, {"start": 2880.19, "end": 2880.73, "word": " in", "probability": 0.87939453125}, {"start": 2880.73, "end": 2880.93, "word": " this", "probability": 0.947265625}, {"start": 2880.93, "end": 2881.11, "word": " case.", "probability": 0.8828125}, {"start": 2883.89, "end": 2884.65, "word": " So", "probability": 0.92578125}, {"start": 2884.65, "end": 2885.31, "word": " if", "probability": 0.603515625}, {"start": 2885.31, "end": 2885.47, "word": " we", "probability": 0.85791015625}, {"start": 2885.47, "end": 2885.83, "word": " select", "probability": 0.86962890625}, {"start": 2885.83, "end": 2886.05, "word": " a", "probability": 0.99072265625}, {"start": 2886.05, "end": 2886.37, "word": " sample", "probability": 0.89599609375}, {"start": 2886.37, "end": 2886.55, "word": " of", "probability": 0.77880859375}, {"start": 2886.55, "end": 2886.77, "word": " size", "probability": 0.8505859375}, {"start": 2886.77, "end": 2887.37, "word": " 16,", "probability": 0.94775390625}, {"start": 2888.87, "end": 2889.87, "word": " the", "probability": 0.9130859375}, {"start": 2889.87, "end": 2890.31, "word": " probability", "probability": 0.947265625}, {"start": 2890.31, "end": 2890.65, "word": " that", "probability": 0.935546875}, {"start": 2890.65, "end": 2890.93, "word": " x", "probability": 0.5791015625}, {"start": 2890.93, "end": 2891.27, "word": " bar", "probability": 0.8154296875}, {"start": 2891.27, "end": 2891.71, "word": " lies", "probability": 0.95703125}, {"start": 2891.71, "end": 2892.03, "word": " between", "probability": 0.88671875}, {"start": 2892.03, "end": 2892.43, "word": " 14", "probability": 0.96044921875}, {"start": 2892.43, "end": 2892.61, "word": " and", "probability": 0.94189453125}, {"start": 2892.61, "end": 2893.07, "word": " 16", "probability": 0.9697265625}, {"start": 2893.07, "end": 2893.37, "word": " is", "probability": 0.9443359375}, {"start": 2893.37, "end": 2893.67, "word": " around", "probability": 0.92138671875}, {"start": 2893.67, "end": 2894.53, "word": " 60%.", "probability": 0.622314453125}], "temperature": 1.0}, {"id": 104, "seek": 292462, "start": 2900.32, "end": 2924.62, "text": " Look at number six. Again, we assume we have the same information for the mean. Mu is 15. It's the same standard deviation is 4. But here, a random sample of 64 cars selected.", "tokens": [2053, 412, 1230, 2309, 13, 3764, 11, 321, 6552, 321, 362, 264, 912, 1589, 337, 264, 914, 13, 15601, 307, 2119, 13, 467, 311, 264, 912, 3832, 25163, 307, 1017, 13, 583, 510, 11, 257, 4974, 6889, 295, 12145, 5163, 8209, 13], "avg_logprob": -0.25981104512547337, "compression_ratio": 1.3134328358208955, "no_speech_prob": 0.0, "words": [{"start": 2900.32, "end": 2900.62, "word": " Look", "probability": 0.60791015625}, {"start": 2900.62, "end": 2900.82, "word": " at", "probability": 0.9462890625}, {"start": 2900.82, "end": 2901.08, "word": " number", "probability": 0.90087890625}, {"start": 2901.08, "end": 2901.52, "word": " six.", "probability": 0.64453125}, {"start": 2903.2, "end": 2903.6, "word": " Again,", "probability": 0.9306640625}, {"start": 2903.68, "end": 2903.76, "word": " we", "probability": 0.962890625}, {"start": 2903.76, "end": 2904.34, "word": " assume", "probability": 0.89599609375}, {"start": 2904.34, "end": 2906.86, "word": " we", "probability": 0.802734375}, {"start": 2906.86, "end": 2907.32, "word": " have", "probability": 0.9482421875}, {"start": 2907.32, "end": 2907.74, "word": " the", "probability": 0.8251953125}, {"start": 2907.74, "end": 2910.28, "word": " same", "probability": 0.90185546875}, {"start": 2910.28, "end": 2911.04, "word": " information", "probability": 0.8466796875}, {"start": 2911.04, "end": 2912.9, "word": " for", "probability": 0.857421875}, {"start": 2912.9, "end": 2913.12, "word": " the", "probability": 0.833984375}, {"start": 2913.12, "end": 2913.34, "word": " mean.", "probability": 0.86376953125}, {"start": 2913.82, "end": 2914.06, "word": " Mu", "probability": 0.3857421875}, {"start": 2914.06, "end": 2914.38, "word": " is", "probability": 0.943359375}, {"start": 2914.38, "end": 2914.92, "word": " 15.", "probability": 0.85302734375}, {"start": 2916.14, "end": 2916.38, "word": " It's", "probability": 0.754638671875}, {"start": 2916.38, "end": 2916.56, "word": " the", "probability": 0.90625}, {"start": 2916.56, "end": 2916.84, "word": " same", "probability": 0.88525390625}, {"start": 2916.84, "end": 2917.34, "word": " standard", "probability": 0.91259765625}, {"start": 2917.34, "end": 2917.8, "word": " deviation", "probability": 0.60009765625}, {"start": 2917.8, "end": 2918.1, "word": " is", "probability": 0.358154296875}, {"start": 2918.1, "end": 2918.46, "word": " 4.", "probability": 0.61376953125}, {"start": 2919.5, "end": 2919.92, "word": " But", "probability": 0.86083984375}, {"start": 2919.92, "end": 2920.2, "word": " here,", "probability": 0.76513671875}, {"start": 2921.1, "end": 2921.32, "word": " a", "probability": 0.86328125}, {"start": 2921.32, "end": 2921.64, "word": " random", "probability": 0.880859375}, {"start": 2921.64, "end": 2922.22, "word": " sample", "probability": 0.8955078125}, {"start": 2922.22, "end": 2922.74, "word": " of", "probability": 0.9677734375}, {"start": 2922.74, "end": 2923.52, "word": " 64", "probability": 0.9677734375}, {"start": 2923.52, "end": 2924.0, "word": " cars", "probability": 0.5078125}, {"start": 2924.0, "end": 2924.62, "word": " selected.", "probability": 0.771484375}], "temperature": 1.0}, {"id": 105, "seek": 295584, "start": 2926.58, "end": 2955.84, "text": " So instead of selecting 16 cars, we select a random sample of size 64. Now it says there is approximately 95.44% which shows that the sample mean will be between 14 and 16. So again, this probability between 14 and 16 is also", "tokens": [407, 2602, 295, 18182, 3165, 5163, 11, 321, 3048, 257, 4974, 6889, 295, 2744, 12145, 13, 823, 309, 1619, 456, 307, 10447, 13420, 13, 13912, 4, 597, 3110, 300, 264, 6889, 914, 486, 312, 1296, 3499, 293, 3165, 13, 407, 797, 11, 341, 8482, 1296, 3499, 293, 3165, 307, 611], "avg_logprob": -0.18612131768581913, "compression_ratio": 1.4303797468354431, "no_speech_prob": 0.0, "words": [{"start": 2926.58, "end": 2926.84, "word": " So", "probability": 0.76171875}, {"start": 2926.84, "end": 2927.24, "word": " instead", "probability": 0.6923828125}, {"start": 2927.24, "end": 2927.46, "word": " of", "probability": 0.958984375}, {"start": 2927.46, "end": 2927.92, "word": " selecting", "probability": 0.6572265625}, {"start": 2927.92, "end": 2928.8, "word": " 16", "probability": 0.7734375}, {"start": 2928.8, "end": 2929.3, "word": " cars,", "probability": 0.72265625}, {"start": 2930.06, "end": 2930.58, "word": " we", "probability": 0.91357421875}, {"start": 2930.58, "end": 2930.98, "word": " select", "probability": 0.787109375}, {"start": 2930.98, "end": 2931.14, "word": " a", "probability": 0.9521484375}, {"start": 2931.14, "end": 2931.42, "word": " random", "probability": 0.79833984375}, {"start": 2931.42, "end": 2931.7, "word": " sample", "probability": 0.89892578125}, {"start": 2931.7, "end": 2931.88, "word": " of", "probability": 0.91552734375}, {"start": 2931.88, "end": 2932.1, "word": " size", "probability": 0.8037109375}, {"start": 2932.1, "end": 2933.9, "word": " 64.", "probability": 0.62353515625}, {"start": 2935.76, "end": 2936.52, "word": " Now", "probability": 0.89404296875}, {"start": 2936.52, "end": 2936.98, "word": " it", "probability": 0.572265625}, {"start": 2936.98, "end": 2937.28, "word": " says", "probability": 0.884765625}, {"start": 2937.28, "end": 2937.66, "word": " there", "probability": 0.76513671875}, {"start": 2937.66, "end": 2937.9, "word": " is", "probability": 0.8798828125}, {"start": 2937.9, "end": 2938.66, "word": " approximately", "probability": 0.8857421875}, {"start": 2938.66, "end": 2940.04, "word": " 95", "probability": 0.95556640625}, {"start": 2940.04, "end": 2943.56, "word": ".44", "probability": 0.930419921875}, {"start": 2943.56, "end": 2944.08, "word": "%", "probability": 0.548828125}, {"start": 2944.08, "end": 2945.94, "word": " which", "probability": 0.41552734375}, {"start": 2945.94, "end": 2946.24, "word": " shows", "probability": 0.619140625}, {"start": 2946.24, "end": 2946.52, "word": " that", "probability": 0.91015625}, {"start": 2946.52, "end": 2946.7, "word": " the", "probability": 0.9052734375}, {"start": 2946.7, "end": 2946.98, "word": " sample", "probability": 0.890625}, {"start": 2946.98, "end": 2947.26, "word": " mean", "probability": 0.88916015625}, {"start": 2947.26, "end": 2947.42, "word": " will", "probability": 0.81201171875}, {"start": 2947.42, "end": 2947.58, "word": " be", "probability": 0.93994140625}, {"start": 2947.58, "end": 2947.96, "word": " between", "probability": 0.8984375}, {"start": 2947.96, "end": 2948.3, "word": " 14", "probability": 0.9638671875}, {"start": 2948.3, "end": 2948.46, "word": " and", "probability": 0.92236328125}, {"start": 2948.46, "end": 2948.86, "word": " 16.", "probability": 0.9736328125}, {"start": 2949.72, "end": 2950.06, "word": " So", "probability": 0.93212890625}, {"start": 2950.06, "end": 2950.42, "word": " again,", "probability": 0.89990234375}, {"start": 2951.52, "end": 2951.76, "word": " this", "probability": 0.9453125}, {"start": 2951.76, "end": 2952.36, "word": " probability", "probability": 0.9677734375}, {"start": 2952.36, "end": 2953.44, "word": " between", "probability": 0.80322265625}, {"start": 2953.44, "end": 2954.46, "word": " 14", "probability": 0.93505859375}, {"start": 2954.46, "end": 2954.64, "word": " and", "probability": 0.93408203125}, {"start": 2954.64, "end": 2955.0, "word": " 16", "probability": 0.97314453125}, {"start": 2955.0, "end": 2955.28, "word": " is", "probability": 0.9228515625}, {"start": 2955.28, "end": 2955.84, "word": " also", "probability": 0.87060546875}], "temperature": 1.0}, {"id": 106, "seek": 298449, "start": 2957.95, "end": 2984.49, "text": " Mu minus 2 standard deviation. And equals 9.44. Let's see if it's correct or not. Since it's mentioned that 95%, so it means that we are talking about two standard deviations. So let's just compute mu minus 2 standard deviation and plus 2 sigma x. So the mean is 15 minus plus 2 times again 1.", "tokens": [15601, 3175, 568, 3832, 25163, 13, 400, 6915, 1722, 13, 13912, 13, 961, 311, 536, 498, 309, 311, 3006, 420, 406, 13, 4162, 309, 311, 2835, 300, 13420, 8923, 370, 309, 1355, 300, 321, 366, 1417, 466, 732, 3832, 31219, 763, 13, 407, 718, 311, 445, 14722, 2992, 3175, 568, 3832, 25163, 293, 1804, 568, 12771, 2031, 13, 407, 264, 914, 307, 2119, 3175, 1804, 568, 1413, 797, 502, 13], "avg_logprob": -0.26540492202194643, "compression_ratio": 1.6243093922651934, "no_speech_prob": 0.0, "words": [{"start": 2957.95, "end": 2958.19, "word": " Mu", "probability": 0.09234619140625}, {"start": 2958.19, "end": 2958.75, "word": " minus", "probability": 0.9560546875}, {"start": 2958.75, "end": 2959.23, "word": " 2", "probability": 0.51025390625}, {"start": 2959.23, "end": 2959.53, "word": " standard", "probability": 0.67138671875}, {"start": 2959.53, "end": 2959.53, "word": " deviation.", "probability": 0.75390625}, {"start": 2959.53, "end": 2959.67, "word": " And", "probability": 0.278076171875}, {"start": 2959.67, "end": 2960.05, "word": " equals", "probability": 0.68994140625}, {"start": 2960.05, "end": 2960.39, "word": " 9", "probability": 0.75830078125}, {"start": 2960.39, "end": 2961.03, "word": ".44.", "probability": 0.6617431640625}, {"start": 2961.39, "end": 2961.91, "word": " Let's", "probability": 0.96875}, {"start": 2961.91, "end": 2962.03, "word": " see", "probability": 0.54248046875}, {"start": 2962.03, "end": 2962.11, "word": " if", "probability": 0.88671875}, {"start": 2962.11, "end": 2962.29, "word": " it's", "probability": 0.698486328125}, {"start": 2962.29, "end": 2962.63, "word": " correct", "probability": 0.89794921875}, {"start": 2962.63, "end": 2962.77, "word": " or", "probability": 0.93896484375}, {"start": 2962.77, "end": 2962.95, "word": " not.", "probability": 0.92333984375}, {"start": 2963.89, "end": 2964.25, "word": " Since", "probability": 0.841796875}, {"start": 2964.25, "end": 2964.53, "word": " it's", "probability": 0.765380859375}, {"start": 2964.53, "end": 2964.95, "word": " mentioned", "probability": 0.82666015625}, {"start": 2964.95, "end": 2965.29, "word": " that", "probability": 0.78564453125}, {"start": 2965.29, "end": 2966.63, "word": " 95%,", "probability": 0.84765625}, {"start": 2966.63, "end": 2967.39, "word": " so", "probability": 0.82421875}, {"start": 2967.39, "end": 2967.55, "word": " it", "probability": 0.9091796875}, {"start": 2967.55, "end": 2967.87, "word": " means", "probability": 0.9296875}, {"start": 2967.87, "end": 2968.13, "word": " that", "probability": 0.93115234375}, {"start": 2968.13, "end": 2968.25, "word": " we", "probability": 0.9326171875}, {"start": 2968.25, "end": 2968.37, "word": " are", "probability": 0.912109375}, {"start": 2968.37, "end": 2968.69, "word": " talking", "probability": 0.8447265625}, {"start": 2968.69, "end": 2968.97, "word": " about", "probability": 0.90283203125}, {"start": 2968.97, "end": 2969.17, "word": " two", "probability": 0.6767578125}, {"start": 2969.17, "end": 2969.53, "word": " standard", "probability": 0.78173828125}, {"start": 2969.53, "end": 2970.35, "word": " deviations.", "probability": 0.9072265625}, {"start": 2970.87, "end": 2971.07, "word": " So", "probability": 0.91650390625}, {"start": 2971.07, "end": 2971.45, "word": " let's", "probability": 0.88427734375}, {"start": 2971.45, "end": 2971.77, "word": " just", "probability": 0.8974609375}, {"start": 2971.77, "end": 2972.15, "word": " compute", "probability": 0.8251953125}, {"start": 2972.15, "end": 2972.49, "word": " mu", "probability": 0.397705078125}, {"start": 2972.49, "end": 2973.25, "word": " minus", "probability": 0.9873046875}, {"start": 2973.25, "end": 2973.67, "word": " 2", "probability": 0.5341796875}, {"start": 2973.67, "end": 2974.01, "word": " standard", "probability": 0.8193359375}, {"start": 2974.01, "end": 2974.63, "word": " deviation", "probability": 0.84375}, {"start": 2974.63, "end": 2975.53, "word": " and", "probability": 0.496337890625}, {"start": 2975.53, "end": 2975.95, "word": " plus", "probability": 0.73193359375}, {"start": 2975.95, "end": 2976.91, "word": " 2", "probability": 0.7294921875}, {"start": 2976.91, "end": 2977.25, "word": " sigma", "probability": 0.90283203125}, {"start": 2977.25, "end": 2977.53, "word": " x.", "probability": 0.245849609375}, {"start": 2978.33, "end": 2979.01, "word": " So", "probability": 0.955078125}, {"start": 2979.01, "end": 2979.17, "word": " the", "probability": 0.86181640625}, {"start": 2979.17, "end": 2979.31, "word": " mean", "probability": 0.966796875}, {"start": 2979.31, "end": 2979.47, "word": " is", "probability": 0.94970703125}, {"start": 2979.47, "end": 2979.93, "word": " 15", "probability": 0.97119140625}, {"start": 2979.93, "end": 2982.15, "word": " minus", "probability": 0.80419921875}, {"start": 2982.15, "end": 2982.65, "word": " plus", "probability": 0.7490234375}, {"start": 2982.65, "end": 2983.09, "word": " 2", "probability": 0.94140625}, {"start": 2983.09, "end": 2983.51, "word": " times", "probability": 0.94287109375}, {"start": 2983.51, "end": 2984.17, "word": " again", "probability": 0.77392578125}, {"start": 2984.17, "end": 2984.49, "word": " 1.", "probability": 0.896484375}], "temperature": 1.0}, {"id": 107, "seek": 301481, "start": 2985.73, "end": 3014.81, "text": " One is not one because sigma of x bar was one because n was 16. Now my new sigma equal one-half because sigma over root n. Now sigma again the same value four divided by 64 over eight so that's one-half. So this one should be two times", "tokens": [1485, 307, 406, 472, 570, 12771, 295, 2031, 2159, 390, 472, 570, 297, 390, 3165, 13, 823, 452, 777, 12771, 2681, 472, 12, 25461, 570, 12771, 670, 5593, 297, 13, 823, 12771, 797, 264, 912, 2158, 1451, 6666, 538, 12145, 670, 3180, 370, 300, 311, 472, 12, 25461, 13, 407, 341, 472, 820, 312, 732, 1413], "avg_logprob": -0.2837171136287221, "compression_ratio": 1.5526315789473684, "no_speech_prob": 0.0, "words": [{"start": 2985.7300000000005, "end": 2986.4500000000003, "word": " One", "probability": 0.252197265625}, {"start": 2986.4500000000003, "end": 2987.17, "word": " is", "probability": 0.73779296875}, {"start": 2987.17, "end": 2987.41, "word": " not", "probability": 0.9443359375}, {"start": 2987.41, "end": 2987.65, "word": " one", "probability": 0.88134765625}, {"start": 2987.65, "end": 2988.19, "word": " because", "probability": 0.6142578125}, {"start": 2988.19, "end": 2989.05, "word": " sigma", "probability": 0.55029296875}, {"start": 2989.05, "end": 2989.27, "word": " of", "probability": 0.87451171875}, {"start": 2989.27, "end": 2989.51, "word": " x", "probability": 0.806640625}, {"start": 2989.51, "end": 2989.83, "word": " bar", "probability": 0.8330078125}, {"start": 2989.83, "end": 2991.45, "word": " was", "probability": 0.84912109375}, {"start": 2991.45, "end": 2991.77, "word": " one", "probability": 0.8779296875}, {"start": 2991.77, "end": 2993.35, "word": " because", "probability": 0.6416015625}, {"start": 2993.35, "end": 2993.69, "word": " n", "probability": 0.8466796875}, {"start": 2993.69, "end": 2993.91, "word": " was", "probability": 0.9384765625}, {"start": 2993.91, "end": 2994.49, "word": " 16.", "probability": 0.466552734375}, {"start": 2994.89, "end": 2995.09, "word": " Now", "probability": 0.95068359375}, {"start": 2995.09, "end": 2995.43, "word": " my", "probability": 0.72509765625}, {"start": 2995.43, "end": 2995.67, "word": " new", "probability": 0.8876953125}, {"start": 2995.67, "end": 2996.11, "word": " sigma", "probability": 0.93994140625}, {"start": 2996.11, "end": 2998.03, "word": " equal", "probability": 0.564453125}, {"start": 2998.03, "end": 2998.25, "word": " one", "probability": 0.759765625}, {"start": 2998.25, "end": 2998.51, "word": "-half", "probability": 0.6844482421875}, {"start": 2998.51, "end": 2999.31, "word": " because", "probability": 0.71240234375}, {"start": 2999.31, "end": 2999.59, "word": " sigma", "probability": 0.92822265625}, {"start": 2999.59, "end": 2999.81, "word": " over", "probability": 0.8349609375}, {"start": 2999.81, "end": 3000.05, "word": " root", "probability": 0.90234375}, {"start": 3000.05, "end": 3000.37, "word": " n.", "probability": 0.90185546875}, {"start": 3001.45, "end": 3001.73, "word": " Now", "probability": 0.90283203125}, {"start": 3001.73, "end": 3002.27, "word": " sigma", "probability": 0.73095703125}, {"start": 3002.27, "end": 3003.61, "word": " again", "probability": 0.60107421875}, {"start": 3003.61, "end": 3004.05, "word": " the", "probability": 0.49951171875}, {"start": 3004.05, "end": 3004.29, "word": " same", "probability": 0.90234375}, {"start": 3004.29, "end": 3004.59, "word": " value", "probability": 0.91162109375}, {"start": 3004.59, "end": 3004.95, "word": " four", "probability": 0.466796875}, {"start": 3004.95, "end": 3007.19, "word": " divided", "probability": 0.427978515625}, {"start": 3007.19, "end": 3007.55, "word": " by", "probability": 0.96484375}, {"start": 3007.55, "end": 3008.19, "word": " 64", "probability": 0.8505859375}, {"start": 3008.19, "end": 3010.07, "word": " over", "probability": 0.88232421875}, {"start": 3010.07, "end": 3010.47, "word": " eight", "probability": 0.71142578125}, {"start": 3010.47, "end": 3010.73, "word": " so", "probability": 0.364501953125}, {"start": 3010.73, "end": 3011.43, "word": " that's", "probability": 0.87060546875}, {"start": 3011.43, "end": 3011.81, "word": " one", "probability": 0.935546875}, {"start": 3011.81, "end": 3012.13, "word": "-half.", "probability": 0.875244140625}, {"start": 3012.67, "end": 3013.05, "word": " So", "probability": 0.935546875}, {"start": 3013.05, "end": 3013.27, "word": " this", "probability": 0.91259765625}, {"start": 3013.27, "end": 3013.45, "word": " one", "probability": 0.91796875}, {"start": 3013.45, "end": 3013.65, "word": " should", "probability": 0.96875}, {"start": 3013.65, "end": 3013.97, "word": " be", "probability": 0.95166015625}, {"start": 3013.97, "end": 3014.39, "word": " two", "probability": 0.900390625}, {"start": 3014.39, "end": 3014.81, "word": " times", "probability": 0.927734375}], "temperature": 1.0}, {"id": 108, "seek": 304296, "start": 3020.34, "end": 3042.96, "text": " So that will give 15 minus 1, 14. 15 plus 1 is 16. The probability between 14 and 16, I mean X bar lies between 14 and 16, equals 95%. And we just change some information a little bit.", "tokens": [407, 300, 486, 976, 2119, 3175, 502, 11, 3499, 13, 2119, 1804, 502, 307, 3165, 13, 440, 8482, 1296, 3499, 293, 3165, 11, 286, 914, 1783, 2159, 9134, 1296, 3499, 293, 3165, 11, 6915, 13420, 6856, 400, 321, 445, 1319, 512, 1589, 257, 707, 857, 13], "avg_logprob": -0.20378989425111324, "compression_ratio": 1.330935251798561, "no_speech_prob": 0.0, "words": [{"start": 3020.34, "end": 3020.98, "word": " So", "probability": 0.269287109375}, {"start": 3020.98, "end": 3021.62, "word": " that", "probability": 0.75830078125}, {"start": 3021.62, "end": 3021.78, "word": " will", "probability": 0.8349609375}, {"start": 3021.78, "end": 3022.08, "word": " give", "probability": 0.79443359375}, {"start": 3022.08, "end": 3022.78, "word": " 15", "probability": 0.828125}, {"start": 3022.78, "end": 3023.04, "word": " minus", "probability": 0.80712890625}, {"start": 3023.04, "end": 3023.44, "word": " 1,", "probability": 0.80908203125}, {"start": 3023.66, "end": 3024.16, "word": " 14.", "probability": 0.91552734375}, {"start": 3025.0, "end": 3025.46, "word": " 15", "probability": 0.828125}, {"start": 3025.46, "end": 3025.74, "word": " plus", "probability": 0.9482421875}, {"start": 3025.74, "end": 3026.1, "word": " 1", "probability": 0.951171875}, {"start": 3026.1, "end": 3026.34, "word": " is", "probability": 0.888671875}, {"start": 3026.34, "end": 3026.82, "word": " 16.", "probability": 0.97216796875}, {"start": 3030.48, "end": 3031.12, "word": " The", "probability": 0.8134765625}, {"start": 3031.12, "end": 3031.66, "word": " probability", "probability": 0.95166015625}, {"start": 3031.66, "end": 3033.14, "word": " between", "probability": 0.87158203125}, {"start": 3033.14, "end": 3033.6, "word": " 14", "probability": 0.97119140625}, {"start": 3033.6, "end": 3033.8, "word": " and", "probability": 0.943359375}, {"start": 3033.8, "end": 3034.32, "word": " 16,", "probability": 0.97021484375}, {"start": 3034.46, "end": 3034.56, "word": " I", "probability": 0.9365234375}, {"start": 3034.56, "end": 3034.72, "word": " mean", "probability": 0.97021484375}, {"start": 3034.72, "end": 3034.92, "word": " X", "probability": 0.4619140625}, {"start": 3034.92, "end": 3035.18, "word": " bar", "probability": 0.78466796875}, {"start": 3035.18, "end": 3035.46, "word": " lies", "probability": 0.888671875}, {"start": 3035.46, "end": 3035.66, "word": " between", "probability": 0.87158203125}, {"start": 3035.66, "end": 3035.86, "word": " 14", "probability": 0.91796875}, {"start": 3035.86, "end": 3035.98, "word": " and", "probability": 0.94091796875}, {"start": 3035.98, "end": 3036.3, "word": " 16,", "probability": 0.9697265625}, {"start": 3036.34, "end": 3036.74, "word": " equals", "probability": 0.90673828125}, {"start": 3036.74, "end": 3038.06, "word": " 95%.", "probability": 0.85498046875}, {"start": 3038.06, "end": 3040.28, "word": " And", "probability": 0.9423828125}, {"start": 3040.28, "end": 3040.48, "word": " we", "probability": 0.94677734375}, {"start": 3040.48, "end": 3040.84, "word": " just", "probability": 0.888671875}, {"start": 3040.84, "end": 3041.48, "word": " change", "probability": 0.5234375}, {"start": 3041.48, "end": 3041.8, "word": " some", "probability": 0.89892578125}, {"start": 3041.8, "end": 3042.28, "word": " information", "probability": 0.8388671875}, {"start": 3042.28, "end": 3042.52, "word": " a", "probability": 0.7841796875}, {"start": 3042.52, "end": 3042.64, "word": " little", "probability": 0.8623046875}, {"start": 3042.64, "end": 3042.96, "word": " bit.", "probability": 0.9580078125}], "temperature": 1.0}, {"id": 109, "seek": 306991, "start": 3044.23, "end": 3069.91, "text": " But the answer was the probability between X bar, the probability that X bar lies between 14 and 16 is around 68%. Now what's the difference between number 5 and 6? N is large. As N increases, standard deviation decreases. Standard deviation decreases. If you look here, sigma X bar was 1.", "tokens": [583, 264, 1867, 390, 264, 8482, 1296, 1783, 2159, 11, 264, 8482, 300, 1783, 2159, 9134, 1296, 3499, 293, 3165, 307, 926, 23317, 6856, 823, 437, 311, 264, 2649, 1296, 1230, 1025, 293, 1386, 30, 426, 307, 2416, 13, 1018, 426, 8637, 11, 3832, 25163, 24108, 13, 21298, 25163, 24108, 13, 759, 291, 574, 510, 11, 12771, 1783, 2159, 390, 502, 13], "avg_logprob": -0.20002479780287968, "compression_ratio": 1.6111111111111112, "no_speech_prob": 0.0, "words": [{"start": 3044.23, "end": 3044.71, "word": " But", "probability": 0.734375}, {"start": 3044.71, "end": 3045.21, "word": " the", "probability": 0.74560546875}, {"start": 3045.21, "end": 3045.65, "word": " answer", "probability": 0.9521484375}, {"start": 3045.65, "end": 3046.13, "word": " was", "probability": 0.93798828125}, {"start": 3046.13, "end": 3046.69, "word": " the", "probability": 0.55419921875}, {"start": 3046.69, "end": 3047.03, "word": " probability", "probability": 0.7529296875}, {"start": 3047.03, "end": 3047.33, "word": " between", "probability": 0.5078125}, {"start": 3047.33, "end": 3047.51, "word": " X", "probability": 0.591796875}, {"start": 3047.51, "end": 3047.79, "word": " bar,", "probability": 0.8095703125}, {"start": 3048.17, "end": 3048.45, "word": " the", "probability": 0.85595703125}, {"start": 3048.45, "end": 3048.71, "word": " probability", "probability": 0.9501953125}, {"start": 3048.71, "end": 3048.89, "word": " that", "probability": 0.7802734375}, {"start": 3048.89, "end": 3049.07, "word": " X", "probability": 0.97900390625}, {"start": 3049.07, "end": 3049.23, "word": " bar", "probability": 0.9521484375}, {"start": 3049.23, "end": 3049.45, "word": " lies", "probability": 0.97998046875}, {"start": 3049.45, "end": 3050.03, "word": " between", "probability": 0.89111328125}, {"start": 3050.03, "end": 3051.03, "word": " 14", "probability": 0.88037109375}, {"start": 3051.03, "end": 3051.19, "word": " and", "probability": 0.88134765625}, {"start": 3051.19, "end": 3051.51, "word": " 16", "probability": 0.966796875}, {"start": 3051.51, "end": 3051.79, "word": " is", "probability": 0.8193359375}, {"start": 3051.79, "end": 3052.15, "word": " around", "probability": 0.92138671875}, {"start": 3052.15, "end": 3053.29, "word": " 68%.", "probability": 0.837890625}, {"start": 3053.29, "end": 3055.29, "word": " Now", "probability": 0.9453125}, {"start": 3055.29, "end": 3055.59, "word": " what's", "probability": 0.77587890625}, {"start": 3055.59, "end": 3055.71, "word": " the", "probability": 0.9208984375}, {"start": 3055.71, "end": 3056.11, "word": " difference", "probability": 0.8603515625}, {"start": 3056.11, "end": 3056.69, "word": " between", "probability": 0.85693359375}, {"start": 3056.69, "end": 3058.19, "word": " number", "probability": 0.88037109375}, {"start": 3058.19, "end": 3058.63, "word": " 5", "probability": 0.64892578125}, {"start": 3058.63, "end": 3059.19, "word": " and", "probability": 0.93701171875}, {"start": 3059.19, "end": 3059.55, "word": " 6?", "probability": 0.99072265625}, {"start": 3059.63, "end": 3059.75, "word": " N", "probability": 0.54638671875}, {"start": 3059.75, "end": 3060.03, "word": " is", "probability": 0.923828125}, {"start": 3060.03, "end": 3060.27, "word": " large.", "probability": 0.94287109375}, {"start": 3061.71, "end": 3062.37, "word": " As", "probability": 0.51806640625}, {"start": 3062.37, "end": 3062.59, "word": " N", "probability": 0.8994140625}, {"start": 3062.59, "end": 3063.03, "word": " increases,", "probability": 0.9404296875}, {"start": 3064.53, "end": 3064.81, "word": " standard", "probability": 0.73291015625}, {"start": 3064.81, "end": 3065.21, "word": " deviation", "probability": 0.92236328125}, {"start": 3065.21, "end": 3065.63, "word": " decreases.", "probability": 0.90966796875}, {"start": 3065.97, "end": 3066.37, "word": " Standard", "probability": 0.548828125}, {"start": 3066.37, "end": 3066.79, "word": " deviation", "probability": 0.939453125}, {"start": 3066.79, "end": 3067.21, "word": " decreases.", "probability": 0.966796875}, {"start": 3067.41, "end": 3067.51, "word": " If", "probability": 0.9189453125}, {"start": 3067.51, "end": 3067.57, "word": " you", "probability": 0.84619140625}, {"start": 3067.57, "end": 3067.77, "word": " look", "probability": 0.96728515625}, {"start": 3067.77, "end": 3068.05, "word": " here,", "probability": 0.85009765625}, {"start": 3068.21, "end": 3068.53, "word": " sigma", "probability": 0.666015625}, {"start": 3068.53, "end": 3069.11, "word": " X", "probability": 0.70849609375}, {"start": 3069.11, "end": 3069.39, "word": " bar", "probability": 0.96240234375}, {"start": 3069.39, "end": 3069.67, "word": " was", "probability": 0.9296875}, {"start": 3069.67, "end": 3069.91, "word": " 1.", "probability": 0.7685546875}], "temperature": 1.0}, {"id": 110, "seek": 309812, "start": 3070.56, "end": 3098.12, "text": " Then my new sigma of x bar is one half. As we mentioned before, as n increases, sigma of x bar decreases. In this case, we have larger probability. So if we increase the sample size, then the probability that x bar is between 14 and 16 will increase. So this proportion is increased around 30 or around 27%.", "tokens": [1396, 452, 777, 12771, 295, 2031, 2159, 307, 472, 1922, 13, 1018, 321, 2835, 949, 11, 382, 297, 8637, 11, 12771, 295, 2031, 2159, 24108, 13, 682, 341, 1389, 11, 321, 362, 4833, 8482, 13, 407, 498, 321, 3488, 264, 6889, 2744, 11, 550, 264, 8482, 300, 2031, 2159, 307, 1296, 3499, 293, 3165, 486, 3488, 13, 407, 341, 16068, 307, 6505, 926, 2217, 420, 926, 7634, 6856], "avg_logprob": -0.16394927017930624, "compression_ratio": 1.6296296296296295, "no_speech_prob": 0.0, "words": [{"start": 3070.56, "end": 3071.0, "word": " Then", "probability": 0.27978515625}, {"start": 3071.0, "end": 3071.42, "word": " my", "probability": 0.76025390625}, {"start": 3071.42, "end": 3071.72, "word": " new", "probability": 0.5986328125}, {"start": 3071.72, "end": 3071.98, "word": " sigma", "probability": 0.86865234375}, {"start": 3071.98, "end": 3072.12, "word": " of", "probability": 0.56884765625}, {"start": 3072.12, "end": 3072.24, "word": " x", "probability": 0.6630859375}, {"start": 3072.24, "end": 3072.4, "word": " bar", "probability": 0.81494140625}, {"start": 3072.4, "end": 3072.58, "word": " is", "probability": 0.9072265625}, {"start": 3072.58, "end": 3072.76, "word": " one", "probability": 0.7587890625}, {"start": 3072.76, "end": 3073.06, "word": " half.", "probability": 0.52978515625}, {"start": 3073.2, "end": 3073.34, "word": " As", "probability": 0.966796875}, {"start": 3073.34, "end": 3073.44, "word": " we", "probability": 0.95751953125}, {"start": 3073.44, "end": 3073.72, "word": " mentioned", "probability": 0.82421875}, {"start": 3073.72, "end": 3074.14, "word": " before,", "probability": 0.8515625}, {"start": 3074.52, "end": 3074.74, "word": " as", "probability": 0.94921875}, {"start": 3074.74, "end": 3074.9, "word": " n", "probability": 0.80810546875}, {"start": 3074.9, "end": 3075.36, "word": " increases,", "probability": 0.9453125}, {"start": 3075.54, "end": 3075.78, "word": " sigma", "probability": 0.923828125}, {"start": 3075.78, "end": 3075.94, "word": " of", "probability": 0.91845703125}, {"start": 3075.94, "end": 3076.06, "word": " x", "probability": 0.9970703125}, {"start": 3076.06, "end": 3076.26, "word": " bar", "probability": 0.95556640625}, {"start": 3076.26, "end": 3076.7, "word": " decreases.", "probability": 0.96435546875}, {"start": 3078.44, "end": 3079.08, "word": " In", "probability": 0.92626953125}, {"start": 3079.08, "end": 3079.42, "word": " this", "probability": 0.9443359375}, {"start": 3079.42, "end": 3079.92, "word": " case,", "probability": 0.91357421875}, {"start": 3080.12, "end": 3080.4, "word": " we", "probability": 0.95556640625}, {"start": 3080.4, "end": 3081.12, "word": " have", "probability": 0.94775390625}, {"start": 3081.12, "end": 3081.76, "word": " larger", "probability": 0.48583984375}, {"start": 3081.76, "end": 3082.14, "word": " probability.", "probability": 0.95703125}, {"start": 3083.0, "end": 3083.56, "word": " So", "probability": 0.95458984375}, {"start": 3083.56, "end": 3083.76, "word": " if", "probability": 0.85009765625}, {"start": 3083.76, "end": 3083.94, "word": " we", "probability": 0.95654296875}, {"start": 3083.94, "end": 3084.38, "word": " increase", "probability": 0.8681640625}, {"start": 3084.38, "end": 3084.6, "word": " the", "probability": 0.9189453125}, {"start": 3084.6, "end": 3084.88, "word": " sample", "probability": 0.87255859375}, {"start": 3084.88, "end": 3085.46, "word": " size,", "probability": 0.8583984375}, {"start": 3086.16, "end": 3086.44, "word": " then", "probability": 0.8525390625}, {"start": 3086.44, "end": 3086.66, "word": " the", "probability": 0.9189453125}, {"start": 3086.66, "end": 3087.1, "word": " probability", "probability": 0.95166015625}, {"start": 3087.1, "end": 3087.48, "word": " that", "probability": 0.9296875}, {"start": 3087.48, "end": 3087.8, "word": " x", "probability": 0.98828125}, {"start": 3087.8, "end": 3088.24, "word": " bar", "probability": 0.94580078125}, {"start": 3088.24, "end": 3088.66, "word": " is", "probability": 0.9443359375}, {"start": 3088.66, "end": 3089.24, "word": " between", "probability": 0.88525390625}, {"start": 3089.24, "end": 3090.58, "word": " 14", "probability": 0.9404296875}, {"start": 3090.58, "end": 3090.76, "word": " and", "probability": 0.919921875}, {"start": 3090.76, "end": 3091.4, "word": " 16", "probability": 0.974609375}, {"start": 3091.4, "end": 3091.9, "word": " will", "probability": 0.63037109375}, {"start": 3091.9, "end": 3093.2, "word": " increase.", "probability": 0.85107421875}, {"start": 3093.54, "end": 3093.74, "word": " So", "probability": 0.93505859375}, {"start": 3093.74, "end": 3093.98, "word": " this", "probability": 0.91796875}, {"start": 3093.98, "end": 3094.46, "word": " proportion", "probability": 0.82470703125}, {"start": 3094.46, "end": 3094.7, "word": " is", "probability": 0.6171875}, {"start": 3094.7, "end": 3095.04, "word": " increased", "probability": 0.91748046875}, {"start": 3095.04, "end": 3095.42, "word": " around", "probability": 0.912109375}, {"start": 3095.42, "end": 3096.6, "word": " 30", "probability": 0.94482421875}, {"start": 3096.6, "end": 3096.88, "word": " or", "probability": 0.62353515625}, {"start": 3096.88, "end": 3097.2, "word": " around", "probability": 0.92578125}, {"start": 3097.2, "end": 3098.12, "word": " 27%.", "probability": 0.841796875}], "temperature": 1.0}, {"id": 111, "seek": 312829, "start": 3099.45, "end": 3128.29, "text": " Because you just increase the sample size from 16 all the way up to 64. So since, as we know, as the sample size increases, sigma of X bar decreases. So we are more sure that X bar lies between 14 and 16. The previous one, the chance that, or the probability that X bar lies between 14 and 16 is around two-third.", "tokens": [1436, 291, 445, 3488, 264, 6889, 2744, 490, 3165, 439, 264, 636, 493, 281, 12145, 13, 407, 1670, 11, 382, 321, 458, 11, 382, 264, 6889, 2744, 8637, 11, 12771, 295, 1783, 2159, 24108, 13, 407, 321, 366, 544, 988, 300, 1783, 2159, 9134, 1296, 3499, 293, 3165, 13, 440, 3894, 472, 11, 264, 2931, 300, 11, 420, 264, 8482, 300, 1783, 2159, 9134, 1296, 3499, 293, 3165, 307, 926, 732, 12, 25095, 13], "avg_logprob": -0.16197916706403095, "compression_ratio": 1.6526315789473685, "no_speech_prob": 0.0, "words": [{"start": 3099.45, "end": 3099.79, "word": " Because", "probability": 0.5361328125}, {"start": 3099.79, "end": 3099.95, "word": " you", "probability": 0.75537109375}, {"start": 3099.95, "end": 3100.21, "word": " just", "probability": 0.90185546875}, {"start": 3100.21, "end": 3100.63, "word": " increase", "probability": 0.69921875}, {"start": 3100.63, "end": 3100.79, "word": " the", "probability": 0.916015625}, {"start": 3100.79, "end": 3101.01, "word": " sample", "probability": 0.869140625}, {"start": 3101.01, "end": 3101.47, "word": " size", "probability": 0.8349609375}, {"start": 3101.47, "end": 3102.55, "word": " from", "probability": 0.86767578125}, {"start": 3102.55, "end": 3103.21, "word": " 16", "probability": 0.916015625}, {"start": 3103.21, "end": 3103.95, "word": " all", "probability": 0.84326171875}, {"start": 3103.95, "end": 3104.13, "word": " the", "probability": 0.91845703125}, {"start": 3104.13, "end": 3104.27, "word": " way", "probability": 0.951171875}, {"start": 3104.27, "end": 3104.53, "word": " up", "probability": 0.95068359375}, {"start": 3104.53, "end": 3104.81, "word": " to", "probability": 0.96728515625}, {"start": 3104.81, "end": 3105.63, "word": " 64.", "probability": 0.97412109375}, {"start": 3106.51, "end": 3106.95, "word": " So", "probability": 0.93408203125}, {"start": 3106.95, "end": 3107.37, "word": " since,", "probability": 0.6796875}, {"start": 3107.93, "end": 3108.11, "word": " as", "probability": 0.9638671875}, {"start": 3108.11, "end": 3108.23, "word": " we", "probability": 0.92431640625}, {"start": 3108.23, "end": 3108.35, "word": " know,", "probability": 0.884765625}, {"start": 3108.45, "end": 3108.53, "word": " as", "probability": 0.91162109375}, {"start": 3108.53, "end": 3108.69, "word": " the", "probability": 0.9150390625}, {"start": 3108.69, "end": 3108.95, "word": " sample", "probability": 0.87255859375}, {"start": 3108.95, "end": 3109.37, "word": " size", "probability": 0.869140625}, {"start": 3109.37, "end": 3110.05, "word": " increases,", "probability": 0.9384765625}, {"start": 3110.77, "end": 3111.07, "word": " sigma", "probability": 0.8203125}, {"start": 3111.07, "end": 3111.27, "word": " of", "probability": 0.89697265625}, {"start": 3111.27, "end": 3111.45, "word": " X", "probability": 0.56884765625}, {"start": 3111.45, "end": 3111.63, "word": " bar", "probability": 0.86279296875}, {"start": 3111.63, "end": 3112.11, "word": " decreases.", "probability": 0.9560546875}, {"start": 3112.59, "end": 3112.93, "word": " So", "probability": 0.96630859375}, {"start": 3112.93, "end": 3113.21, "word": " we", "probability": 0.8720703125}, {"start": 3113.21, "end": 3113.59, "word": " are", "probability": 0.94140625}, {"start": 3113.59, "end": 3114.43, "word": " more", "probability": 0.93212890625}, {"start": 3114.43, "end": 3114.73, "word": " sure", "probability": 0.91748046875}, {"start": 3114.73, "end": 3115.21, "word": " that", "probability": 0.9296875}, {"start": 3115.21, "end": 3116.09, "word": " X", "probability": 0.97802734375}, {"start": 3116.09, "end": 3116.45, "word": " bar", "probability": 0.9462890625}, {"start": 3116.45, "end": 3116.85, "word": " lies", "probability": 0.93359375}, {"start": 3116.85, "end": 3117.17, "word": " between", "probability": 0.8759765625}, {"start": 3117.17, "end": 3117.49, "word": " 14", "probability": 0.9052734375}, {"start": 3117.49, "end": 3117.65, "word": " and", "probability": 0.93994140625}, {"start": 3117.65, "end": 3118.09, "word": " 16.", "probability": 0.96826171875}, {"start": 3119.57, "end": 3120.05, "word": " The", "probability": 0.8095703125}, {"start": 3120.05, "end": 3120.33, "word": " previous", "probability": 0.82275390625}, {"start": 3120.33, "end": 3120.71, "word": " one,", "probability": 0.92724609375}, {"start": 3121.25, "end": 3121.43, "word": " the", "probability": 0.89892578125}, {"start": 3121.43, "end": 3122.03, "word": " chance", "probability": 0.95947265625}, {"start": 3122.03, "end": 3122.47, "word": " that,", "probability": 0.33935546875}, {"start": 3123.31, "end": 3123.53, "word": " or", "probability": 0.96337890625}, {"start": 3123.53, "end": 3123.67, "word": " the", "probability": 0.91455078125}, {"start": 3123.67, "end": 3124.09, "word": " probability", "probability": 0.958984375}, {"start": 3124.09, "end": 3124.47, "word": " that", "probability": 0.91162109375}, {"start": 3124.47, "end": 3125.25, "word": " X", "probability": 0.76123046875}, {"start": 3125.25, "end": 3125.59, "word": " bar", "probability": 0.931640625}, {"start": 3125.59, "end": 3125.93, "word": " lies", "probability": 0.93408203125}, {"start": 3125.93, "end": 3126.25, "word": " between", "probability": 0.88134765625}, {"start": 3126.25, "end": 3126.73, "word": " 14", "probability": 0.96533203125}, {"start": 3126.73, "end": 3126.87, "word": " and", "probability": 0.93701171875}, {"start": 3126.87, "end": 3127.31, "word": " 16", "probability": 0.96923828125}, {"start": 3127.31, "end": 3127.55, "word": " is", "probability": 0.8330078125}, {"start": 3127.55, "end": 3127.83, "word": " around", "probability": 0.92529296875}, {"start": 3127.83, "end": 3128.05, "word": " two", "probability": 0.662109375}, {"start": 3128.05, "end": 3128.29, "word": "-third.", "probability": 0.6513671875}], "temperature": 1.0}, {"id": 112, "seek": 315549, "start": 3129.15, "end": 3155.49, "text": " around 68%. But here, for the same kind of probability, it's equal around 95%, because you increase the sample size. Any question? Let's do one more. Suppose", "tokens": [926, 23317, 6856, 583, 510, 11, 337, 264, 912, 733, 295, 8482, 11, 309, 311, 2681, 926, 13420, 8923, 570, 291, 3488, 264, 6889, 2744, 13, 2639, 1168, 30, 961, 311, 360, 472, 544, 13, 21360], "avg_logprob": -0.22318411839974894, "compression_ratio": 1.2061068702290076, "no_speech_prob": 0.0, "words": [{"start": 3129.15, "end": 3129.45, "word": " around", "probability": 0.501953125}, {"start": 3129.45, "end": 3130.51, "word": " 68%.", "probability": 0.5177001953125}, {"start": 3130.51, "end": 3131.65, "word": " But", "probability": 0.91796875}, {"start": 3131.65, "end": 3132.01, "word": " here,", "probability": 0.841796875}, {"start": 3132.37, "end": 3132.51, "word": " for", "probability": 0.9375}, {"start": 3132.51, "end": 3132.73, "word": " the", "probability": 0.92041015625}, {"start": 3132.73, "end": 3133.09, "word": " same", "probability": 0.921875}, {"start": 3133.09, "end": 3133.93, "word": " kind", "probability": 0.849609375}, {"start": 3133.93, "end": 3134.07, "word": " of", "probability": 0.97119140625}, {"start": 3134.07, "end": 3134.53, "word": " probability,", "probability": 0.97021484375}, {"start": 3135.01, "end": 3135.39, "word": " it's", "probability": 0.9580078125}, {"start": 3135.39, "end": 3135.77, "word": " equal", "probability": 0.90576171875}, {"start": 3135.77, "end": 3136.23, "word": " around", "probability": 0.82373046875}, {"start": 3136.23, "end": 3136.99, "word": " 95%,", "probability": 0.68505859375}, {"start": 3136.99, "end": 3138.11, "word": " because", "probability": 0.89404296875}, {"start": 3138.11, "end": 3138.29, "word": " you", "probability": 0.95361328125}, {"start": 3138.29, "end": 3138.77, "word": " increase", "probability": 0.64208984375}, {"start": 3138.77, "end": 3139.19, "word": " the", "probability": 0.92333984375}, {"start": 3139.19, "end": 3139.71, "word": " sample", "probability": 0.93994140625}, {"start": 3139.71, "end": 3140.13, "word": " size.", "probability": 0.849609375}, {"start": 3143.67, "end": 3144.61, "word": " Any", "probability": 0.91162109375}, {"start": 3144.61, "end": 3144.97, "word": " question?", "probability": 0.5283203125}, {"start": 3147.19, "end": 3147.95, "word": " Let's", "probability": 0.963623046875}, {"start": 3147.95, "end": 3148.51, "word": " do", "probability": 0.9658203125}, {"start": 3148.51, "end": 3149.87, "word": " one", "probability": 0.9287109375}, {"start": 3149.87, "end": 3150.21, "word": " more.", "probability": 0.939453125}, {"start": 3154.55, "end": 3155.49, "word": " Suppose", "probability": 0.72705078125}], "temperature": 1.0}, {"id": 113, "seek": 320054, "start": 3173.56, "end": 3200.54, "text": " Sigma squared, it means the variance is 100. So sigma don't forget to take the square root of sigma squared in order to find sigma. In a sample of 100, 95% of all possible sample means will fall between", "tokens": [36595, 8889, 11, 309, 1355, 264, 21977, 307, 2319, 13, 407, 12771, 500, 380, 2870, 281, 747, 264, 3732, 5593, 295, 12771, 8889, 294, 1668, 281, 915, 12771, 13, 682, 257, 6889, 295, 2319, 11, 13420, 4, 295, 439, 1944, 6889, 1355, 486, 2100, 1296], "avg_logprob": -0.21976902595032816, "compression_ratio": 1.4295774647887325, "no_speech_prob": 0.0, "words": [{"start": 3173.56, "end": 3174.08, "word": " Sigma", "probability": 0.4345703125}, {"start": 3174.08, "end": 3174.58, "word": " squared,", "probability": 0.615234375}, {"start": 3174.8, "end": 3175.0, "word": " it", "probability": 0.8935546875}, {"start": 3175.0, "end": 3175.18, "word": " means", "probability": 0.92333984375}, {"start": 3175.18, "end": 3175.34, "word": " the", "probability": 0.8837890625}, {"start": 3175.34, "end": 3175.84, "word": " variance", "probability": 0.931640625}, {"start": 3175.84, "end": 3176.52, "word": " is", "probability": 0.73583984375}, {"start": 3176.52, "end": 3176.96, "word": " 100.", "probability": 0.71044921875}, {"start": 3180.1, "end": 3180.4, "word": " So", "probability": 0.8505859375}, {"start": 3180.4, "end": 3180.78, "word": " sigma", "probability": 0.4755859375}, {"start": 3180.78, "end": 3181.36, "word": " don't", "probability": 0.745361328125}, {"start": 3181.36, "end": 3181.9, "word": " forget", "probability": 0.449462890625}, {"start": 3181.9, "end": 3183.12, "word": " to", "probability": 0.85986328125}, {"start": 3183.12, "end": 3183.38, "word": " take", "probability": 0.88720703125}, {"start": 3183.38, "end": 3183.6, "word": " the", "probability": 0.91748046875}, {"start": 3183.6, "end": 3183.9, "word": " square", "probability": 0.90087890625}, {"start": 3183.9, "end": 3184.14, "word": " root", "probability": 0.93994140625}, {"start": 3184.14, "end": 3184.32, "word": " of", "probability": 0.96240234375}, {"start": 3184.32, "end": 3184.54, "word": " sigma", "probability": 0.86328125}, {"start": 3184.54, "end": 3184.94, "word": " squared", "probability": 0.82080078125}, {"start": 3184.94, "end": 3185.16, "word": " in", "probability": 0.904296875}, {"start": 3185.16, "end": 3185.28, "word": " order", "probability": 0.92822265625}, {"start": 3185.28, "end": 3187.04, "word": " to", "probability": 0.97021484375}, {"start": 3187.04, "end": 3187.46, "word": " find", "probability": 0.9140625}, {"start": 3187.46, "end": 3187.84, "word": " sigma.", "probability": 0.8818359375}, {"start": 3190.22, "end": 3190.56, "word": " In", "probability": 0.39697265625}, {"start": 3190.56, "end": 3191.4, "word": " a", "probability": 0.9716796875}, {"start": 3191.4, "end": 3191.66, "word": " sample", "probability": 0.9248046875}, {"start": 3191.66, "end": 3191.9, "word": " of", "probability": 0.97314453125}, {"start": 3191.9, "end": 3192.38, "word": " 100,", "probability": 0.8671875}, {"start": 3195.48, "end": 3197.36, "word": " 95", "probability": 0.96240234375}, {"start": 3197.36, "end": 3197.88, "word": "%", "probability": 0.77490234375}, {"start": 3197.88, "end": 3198.16, "word": " of", "probability": 0.9599609375}, {"start": 3198.16, "end": 3198.38, "word": " all", "probability": 0.95166015625}, {"start": 3198.38, "end": 3198.68, "word": " possible", "probability": 0.947265625}, {"start": 3198.68, "end": 3199.1, "word": " sample", "probability": 0.84619140625}, {"start": 3199.1, "end": 3199.4, "word": " means", "probability": 0.853515625}, {"start": 3199.4, "end": 3199.62, "word": " will", "probability": 0.853515625}, {"start": 3199.62, "end": 3199.96, "word": " fall", "probability": 0.83984375}, {"start": 3199.96, "end": 3200.54, "word": " between", "probability": 0.86572265625}], "temperature": 1.0}, {"id": 114, "seek": 323735, "start": 3209.27, "end": 3237.35, "text": " This equals around 95%. Now without calculations, since it says the problem is normally distributed or N is large. In this case, N is large enough to apply the central limit theorem. Then we can use the empirical rule. It says 95% is too strong deviation of the mean. So mu minus plus.", "tokens": [639, 6915, 926, 13420, 6856, 823, 1553, 20448, 11, 1670, 309, 1619, 264, 1154, 307, 5646, 12631, 420, 426, 307, 2416, 13, 682, 341, 1389, 11, 426, 307, 2416, 1547, 281, 3079, 264, 5777, 4948, 20904, 13, 1396, 321, 393, 764, 264, 31886, 4978, 13, 467, 1619, 13420, 4, 307, 886, 2068, 25163, 295, 264, 914, 13, 407, 2992, 3175, 1804, 13], "avg_logprob": -0.22457836781229293, "compression_ratio": 1.4742268041237114, "no_speech_prob": 0.0, "words": [{"start": 3209.27, "end": 3209.87, "word": " This", "probability": 0.556640625}, {"start": 3209.87, "end": 3210.47, "word": " equals", "probability": 0.87255859375}, {"start": 3210.47, "end": 3211.05, "word": " around", "probability": 0.90869140625}, {"start": 3211.05, "end": 3212.65, "word": " 95%.", "probability": 0.75048828125}, {"start": 3212.65, "end": 3215.01, "word": " Now", "probability": 0.91064453125}, {"start": 3215.01, "end": 3215.75, "word": " without", "probability": 0.4091796875}, {"start": 3215.75, "end": 3216.43, "word": " calculations,", "probability": 0.88818359375}, {"start": 3217.01, "end": 3217.99, "word": " since", "probability": 0.8662109375}, {"start": 3217.99, "end": 3218.21, "word": " it", "probability": 0.93603515625}, {"start": 3218.21, "end": 3218.43, "word": " says", "probability": 0.890625}, {"start": 3218.43, "end": 3218.59, "word": " the", "probability": 0.85400390625}, {"start": 3218.59, "end": 3218.89, "word": " problem", "probability": 0.88134765625}, {"start": 3218.89, "end": 3219.25, "word": " is", "probability": 0.9560546875}, {"start": 3219.25, "end": 3219.79, "word": " normally", "probability": 0.865234375}, {"start": 3219.79, "end": 3220.45, "word": " distributed", "probability": 0.91796875}, {"start": 3220.45, "end": 3221.51, "word": " or", "probability": 0.55029296875}, {"start": 3221.51, "end": 3221.69, "word": " N", "probability": 0.6337890625}, {"start": 3221.69, "end": 3221.83, "word": " is", "probability": 0.9462890625}, {"start": 3221.83, "end": 3222.23, "word": " large.", "probability": 0.9658203125}, {"start": 3223.09, "end": 3223.73, "word": " In", "probability": 0.955078125}, {"start": 3223.73, "end": 3223.95, "word": " this", "probability": 0.947265625}, {"start": 3223.95, "end": 3224.25, "word": " case,", "probability": 0.91259765625}, {"start": 3224.35, "end": 3224.45, "word": " N", "probability": 0.96240234375}, {"start": 3224.45, "end": 3224.57, "word": " is", "probability": 0.95458984375}, {"start": 3224.57, "end": 3224.85, "word": " large", "probability": 0.96728515625}, {"start": 3224.85, "end": 3225.25, "word": " enough", "probability": 0.865234375}, {"start": 3225.25, "end": 3226.17, "word": " to", "probability": 0.955078125}, {"start": 3226.17, "end": 3226.51, "word": " apply", "probability": 0.931640625}, {"start": 3226.51, "end": 3226.71, "word": " the", "probability": 0.90576171875}, {"start": 3226.71, "end": 3227.03, "word": " central", "probability": 0.73583984375}, {"start": 3227.03, "end": 3227.27, "word": " limit", "probability": 0.80029296875}, {"start": 3227.27, "end": 3227.67, "word": " theorem.", "probability": 0.81396484375}, {"start": 3228.13, "end": 3228.85, "word": " Then", "probability": 0.84033203125}, {"start": 3228.85, "end": 3229.03, "word": " we", "probability": 0.82177734375}, {"start": 3229.03, "end": 3229.27, "word": " can", "probability": 0.94287109375}, {"start": 3229.27, "end": 3229.57, "word": " use", "probability": 0.87548828125}, {"start": 3229.57, "end": 3229.85, "word": " the", "probability": 0.90771484375}, {"start": 3229.85, "end": 3230.33, "word": " empirical", "probability": 0.91796875}, {"start": 3230.33, "end": 3230.79, "word": " rule.", "probability": 0.93798828125}, {"start": 3231.57, "end": 3231.97, "word": " It", "probability": 0.95361328125}, {"start": 3231.97, "end": 3232.35, "word": " says", "probability": 0.875}, {"start": 3232.35, "end": 3233.01, "word": " 95", "probability": 0.94677734375}, {"start": 3233.01, "end": 3233.51, "word": "%", "probability": 0.7431640625}, {"start": 3233.51, "end": 3233.63, "word": " is", "probability": 0.435546875}, {"start": 3233.63, "end": 3233.85, "word": " too", "probability": 0.62109375}, {"start": 3233.85, "end": 3234.19, "word": " strong", "probability": 0.87744140625}, {"start": 3234.19, "end": 3234.61, "word": " deviation", "probability": 0.40087890625}, {"start": 3234.61, "end": 3234.93, "word": " of", "probability": 0.85546875}, {"start": 3234.93, "end": 3235.03, "word": " the", "probability": 0.8974609375}, {"start": 3235.03, "end": 3235.21, "word": " mean.", "probability": 0.95556640625}, {"start": 3235.71, "end": 3236.05, "word": " So", "probability": 0.9609375}, {"start": 3236.05, "end": 3236.39, "word": " mu", "probability": 0.411376953125}, {"start": 3236.39, "end": 3236.81, "word": " minus", "probability": 0.96044921875}, {"start": 3236.81, "end": 3237.35, "word": " plus.", "probability": 0.9248046875}], "temperature": 1.0}, {"id": 115, "seek": 326455, "start": 3239.07, "end": 3264.55, "text": " 2 sigma of x bar. Now, mu is 50 minus plus 2 sigma of x bar. Yes, I have to compute sigma of x bar first. So, sigma divided by is 1. So, simple calculation, just sigma over root n is 1. So, minus 1. 50 minus 2 is 48. 50 plus 2 is 52.", "tokens": [568, 12771, 295, 2031, 2159, 13, 823, 11, 2992, 307, 2625, 3175, 1804, 568, 12771, 295, 2031, 2159, 13, 1079, 11, 286, 362, 281, 14722, 12771, 295, 2031, 2159, 700, 13, 407, 11, 12771, 6666, 538, 307, 502, 13, 407, 11, 2199, 17108, 11, 445, 12771, 670, 5593, 297, 307, 502, 13, 407, 11, 3175, 502, 13, 2625, 3175, 568, 307, 11174, 13, 2625, 1804, 568, 307, 18079, 13], "avg_logprob": -0.215625, "compression_ratio": 1.5810810810810811, "no_speech_prob": 0.0, "words": [{"start": 3239.07, "end": 3239.29, "word": " 2", "probability": 0.1529541015625}, {"start": 3239.29, "end": 3239.63, "word": " sigma", "probability": 0.79052734375}, {"start": 3239.63, "end": 3239.83, "word": " of", "probability": 0.751953125}, {"start": 3239.83, "end": 3239.97, "word": " x", "probability": 0.79638671875}, {"start": 3239.97, "end": 3240.23, "word": " bar.", "probability": 0.80712890625}, {"start": 3241.17, "end": 3241.69, "word": " Now,", "probability": 0.93310546875}, {"start": 3241.93, "end": 3242.17, "word": " mu", "probability": 0.396728515625}, {"start": 3242.17, "end": 3242.55, "word": " is", "probability": 0.93701171875}, {"start": 3242.55, "end": 3243.11, "word": " 50", "probability": 0.9248046875}, {"start": 3243.11, "end": 3244.57, "word": " minus", "probability": 0.90478515625}, {"start": 3244.57, "end": 3245.01, "word": " plus", "probability": 0.75830078125}, {"start": 3245.01, "end": 3245.25, "word": " 2", "probability": 0.95947265625}, {"start": 3245.25, "end": 3245.55, "word": " sigma", "probability": 0.93017578125}, {"start": 3245.55, "end": 3245.69, "word": " of", "probability": 0.6943359375}, {"start": 3245.69, "end": 3245.85, "word": " x", "probability": 0.9970703125}, {"start": 3245.85, "end": 3246.15, "word": " bar.", "probability": 0.94873046875}, {"start": 3246.33, "end": 3246.67, "word": " Yes,", "probability": 0.7607421875}, {"start": 3246.81, "end": 3246.93, "word": " I", "probability": 0.978515625}, {"start": 3246.93, "end": 3247.15, "word": " have", "probability": 0.9130859375}, {"start": 3247.15, "end": 3247.67, "word": " to", "probability": 0.81884765625}, {"start": 3247.67, "end": 3248.23, "word": " compute", "probability": 0.66064453125}, {"start": 3248.23, "end": 3248.49, "word": " sigma", "probability": 0.93212890625}, {"start": 3248.49, "end": 3248.61, "word": " of", "probability": 0.802734375}, {"start": 3248.61, "end": 3248.75, "word": " x", "probability": 0.99462890625}, {"start": 3248.75, "end": 3248.97, "word": " bar", "probability": 0.92529296875}, {"start": 3248.97, "end": 3249.29, "word": " first.", "probability": 0.451171875}, {"start": 3249.91, "end": 3250.17, "word": " So,", "probability": 0.91552734375}, {"start": 3250.33, "end": 3250.49, "word": " sigma", "probability": 0.91748046875}, {"start": 3250.49, "end": 3250.89, "word": " divided", "probability": 0.7861328125}, {"start": 3250.89, "end": 3251.13, "word": " by", "probability": 0.970703125}, {"start": 3251.13, "end": 3253.43, "word": " is", "probability": 0.32958984375}, {"start": 3253.43, "end": 3253.71, "word": " 1.", "probability": 0.7607421875}, {"start": 3254.51, "end": 3254.71, "word": " So,", "probability": 0.44873046875}, {"start": 3254.77, "end": 3254.95, "word": " simple", "probability": 0.814453125}, {"start": 3254.95, "end": 3255.41, "word": " calculation,", "probability": 0.87353515625}, {"start": 3255.55, "end": 3255.71, "word": " just", "probability": 0.91015625}, {"start": 3255.71, "end": 3256.01, "word": " sigma", "probability": 0.935546875}, {"start": 3256.01, "end": 3256.23, "word": " over", "probability": 0.78857421875}, {"start": 3256.23, "end": 3256.45, "word": " root", "probability": 0.81640625}, {"start": 3256.45, "end": 3256.65, "word": " n", "probability": 0.7900390625}, {"start": 3256.65, "end": 3256.89, "word": " is", "probability": 0.92578125}, {"start": 3256.89, "end": 3257.15, "word": " 1.", "probability": 0.9345703125}, {"start": 3258.05, "end": 3258.51, "word": " So,", "probability": 0.95654296875}, {"start": 3258.99, "end": 3259.47, "word": " minus", "probability": 0.978515625}, {"start": 3259.47, "end": 3259.87, "word": " 1.", "probability": 0.96142578125}, {"start": 3260.99, "end": 3261.31, "word": " 50", "probability": 0.89990234375}, {"start": 3261.31, "end": 3261.57, "word": " minus", "probability": 0.98486328125}, {"start": 3261.57, "end": 3261.85, "word": " 2", "probability": 0.9892578125}, {"start": 3261.85, "end": 3262.01, "word": " is", "probability": 0.94189453125}, {"start": 3262.01, "end": 3262.33, "word": " 48.", "probability": 0.97216796875}, {"start": 3263.07, "end": 3263.57, "word": " 50", "probability": 0.94091796875}, {"start": 3263.57, "end": 3263.83, "word": " plus", "probability": 0.95703125}, {"start": 3263.83, "end": 3264.03, "word": " 2", "probability": 0.9892578125}, {"start": 3264.03, "end": 3264.17, "word": " is", "probability": 0.94677734375}, {"start": 3264.17, "end": 3264.55, "word": " 52.", "probability": 0.92919921875}], "temperature": 1.0}, {"id": 116, "seek": 329395, "start": 3266.75, "end": 3293.95, "text": " So it's between 40, 8, and 52, and this probability is 95%. So it's true. Now which is faster? To use this method, which is maybe less than one minute, you can figure out the answer, or use the complete calculations. In this case, you have to find z-score for the first one, z for the other one, then use the normal table, and that will take at least five minutes.", "tokens": [407, 309, 311, 1296, 3356, 11, 1649, 11, 293, 18079, 11, 293, 341, 8482, 307, 13420, 6856, 407, 309, 311, 2074, 13, 823, 597, 307, 4663, 30, 1407, 764, 341, 3170, 11, 597, 307, 1310, 1570, 813, 472, 3456, 11, 291, 393, 2573, 484, 264, 1867, 11, 420, 764, 264, 3566, 20448, 13, 682, 341, 1389, 11, 291, 362, 281, 915, 710, 12, 4417, 418, 337, 264, 700, 472, 11, 710, 337, 264, 661, 472, 11, 550, 764, 264, 2710, 3199, 11, 293, 300, 486, 747, 412, 1935, 1732, 2077, 13], "avg_logprob": -0.17382812062683312, "compression_ratio": 1.5732758620689655, "no_speech_prob": 0.0, "words": [{"start": 3266.75, "end": 3266.99, "word": " So", "probability": 0.83935546875}, {"start": 3266.99, "end": 3267.13, "word": " it's", "probability": 0.88232421875}, {"start": 3267.13, "end": 3267.57, "word": " between", "probability": 0.90185546875}, {"start": 3267.57, "end": 3268.51, "word": " 40,", "probability": 0.391845703125}, {"start": 3268.83, "end": 3269.29, "word": " 8,", "probability": 0.91796875}, {"start": 3269.37, "end": 3269.59, "word": " and", "probability": 0.9326171875}, {"start": 3269.59, "end": 3270.07, "word": " 52,", "probability": 0.98095703125}, {"start": 3270.65, "end": 3271.19, "word": " and", "probability": 0.93212890625}, {"start": 3271.19, "end": 3271.37, "word": " this", "probability": 0.8994140625}, {"start": 3271.37, "end": 3271.79, "word": " probability", "probability": 0.9541015625}, {"start": 3271.79, "end": 3272.27, "word": " is", "probability": 0.9501953125}, {"start": 3272.27, "end": 3273.75, "word": " 95%.", "probability": 0.857666015625}, {"start": 3273.75, "end": 3275.13, "word": " So", "probability": 0.77197265625}, {"start": 3275.13, "end": 3275.37, "word": " it's", "probability": 0.93994140625}, {"start": 3275.37, "end": 3275.69, "word": " true.", "probability": 0.8193359375}, {"start": 3276.95, "end": 3277.17, "word": " Now", "probability": 0.86474609375}, {"start": 3277.17, "end": 3277.33, "word": " which", "probability": 0.54833984375}, {"start": 3277.33, "end": 3277.47, "word": " is", "probability": 0.94970703125}, {"start": 3277.47, "end": 3277.87, "word": " faster?", "probability": 0.890625}, {"start": 3278.61, "end": 3278.95, "word": " To", "probability": 0.9189453125}, {"start": 3278.95, "end": 3279.17, "word": " use", "probability": 0.8740234375}, {"start": 3279.17, "end": 3279.41, "word": " this", "probability": 0.94921875}, {"start": 3279.41, "end": 3279.79, "word": " method,", "probability": 0.9384765625}, {"start": 3280.41, "end": 3280.59, "word": " which", "probability": 0.79248046875}, {"start": 3280.59, "end": 3280.69, "word": " is", "probability": 0.9130859375}, {"start": 3280.69, "end": 3281.21, "word": " maybe", "probability": 0.904296875}, {"start": 3281.21, "end": 3281.55, "word": " less", "probability": 0.93017578125}, {"start": 3281.55, "end": 3281.75, "word": " than", "probability": 0.94140625}, {"start": 3281.75, "end": 3281.93, "word": " one", "probability": 0.7822265625}, {"start": 3281.93, "end": 3282.15, "word": " minute,", "probability": 0.94970703125}, {"start": 3282.21, "end": 3282.27, "word": " you", "probability": 0.93212890625}, {"start": 3282.27, "end": 3282.43, "word": " can", "probability": 0.93896484375}, {"start": 3282.43, "end": 3282.65, "word": " figure", "probability": 0.9697265625}, {"start": 3282.65, "end": 3282.81, "word": " out", "probability": 0.884765625}, {"start": 3282.81, "end": 3282.97, "word": " the", "probability": 0.92041015625}, {"start": 3282.97, "end": 3283.23, "word": " answer,", "probability": 0.9521484375}, {"start": 3283.73, "end": 3283.97, "word": " or", "probability": 0.962890625}, {"start": 3283.97, "end": 3284.21, "word": " use", "probability": 0.85986328125}, {"start": 3284.21, "end": 3284.35, "word": " the", "probability": 0.89453125}, {"start": 3284.35, "end": 3284.73, "word": " complete", "probability": 0.79736328125}, {"start": 3284.73, "end": 3285.83, "word": " calculations.", "probability": 0.92041015625}, {"start": 3286.51, "end": 3286.71, "word": " In", "probability": 0.96044921875}, {"start": 3286.71, "end": 3286.89, "word": " this", "probability": 0.9443359375}, {"start": 3286.89, "end": 3287.09, "word": " case,", "probability": 0.92041015625}, {"start": 3287.13, "end": 3287.21, "word": " you", "probability": 0.95703125}, {"start": 3287.21, "end": 3287.35, "word": " have", "probability": 0.943359375}, {"start": 3287.35, "end": 3287.47, "word": " to", "probability": 0.96630859375}, {"start": 3287.47, "end": 3287.67, "word": " find", "probability": 0.90185546875}, {"start": 3287.67, "end": 3287.89, "word": " z", "probability": 0.399658203125}, {"start": 3287.89, "end": 3288.11, "word": "-score", "probability": 0.63916015625}, {"start": 3288.11, "end": 3288.33, "word": " for", "probability": 0.943359375}, {"start": 3288.33, "end": 3288.47, "word": " the", "probability": 0.9169921875}, {"start": 3288.47, "end": 3288.73, "word": " first", "probability": 0.87841796875}, {"start": 3288.73, "end": 3288.99, "word": " one,", "probability": 0.91748046875}, {"start": 3289.09, "end": 3289.21, "word": " z", "probability": 0.97607421875}, {"start": 3289.21, "end": 3289.39, "word": " for", "probability": 0.88671875}, {"start": 3289.39, "end": 3289.55, "word": " the", "probability": 0.91796875}, {"start": 3289.55, "end": 3289.79, "word": " other", "probability": 0.88427734375}, {"start": 3289.79, "end": 3290.17, "word": " one,", "probability": 0.9248046875}, {"start": 3290.69, "end": 3290.99, "word": " then", "probability": 0.8310546875}, {"start": 3290.99, "end": 3291.21, "word": " use", "probability": 0.875}, {"start": 3291.21, "end": 3291.33, "word": " the", "probability": 0.83984375}, {"start": 3291.33, "end": 3291.61, "word": " normal", "probability": 0.8837890625}, {"start": 3291.61, "end": 3291.93, "word": " table,", "probability": 0.89501953125}, {"start": 3291.99, "end": 3292.09, "word": " and", "probability": 0.75341796875}, {"start": 3292.09, "end": 3292.23, "word": " that", "probability": 0.93310546875}, {"start": 3292.23, "end": 3292.41, "word": " will", "probability": 0.8876953125}, {"start": 3292.41, "end": 3292.77, "word": " take", "probability": 0.89794921875}, {"start": 3292.77, "end": 3292.97, "word": " at", "probability": 0.96142578125}, {"start": 3292.97, "end": 3293.19, "word": " least", "probability": 0.95654296875}, {"start": 3293.19, "end": 3293.59, "word": " five", "probability": 0.79296875}, {"start": 3293.59, "end": 3293.95, "word": " minutes.", "probability": 0.93310546875}], "temperature": 1.0}, {"id": 117, "seek": 332429, "start": 3295.85, "end": 3324.29, "text": " So to use the empirical rule, most of the time gives shorter time. Any question? Let's do one more. One more. Number 10. The amount of bleach a machine", "tokens": [407, 281, 764, 264, 31886, 4978, 11, 881, 295, 264, 565, 2709, 11639, 565, 13, 2639, 1168, 30, 961, 311, 360, 472, 544, 13, 1485, 544, 13, 5118, 1266, 13, 440, 2372, 295, 39631, 257, 3479], "avg_logprob": -0.23057432915713336, "compression_ratio": 1.256198347107438, "no_speech_prob": 0.0, "words": [{"start": 3295.85, "end": 3296.29, "word": " So", "probability": 0.73046875}, {"start": 3296.29, "end": 3296.61, "word": " to", "probability": 0.14404296875}, {"start": 3296.61, "end": 3296.89, "word": " use", "probability": 0.88427734375}, {"start": 3296.89, "end": 3297.01, "word": " the", "probability": 0.62451171875}, {"start": 3297.01, "end": 3297.35, "word": " empirical", "probability": 0.88623046875}, {"start": 3297.35, "end": 3297.79, "word": " rule,", "probability": 0.8828125}, {"start": 3298.33, "end": 3298.49, "word": " most", "probability": 0.876953125}, {"start": 3298.49, "end": 3298.65, "word": " of", "probability": 0.96728515625}, {"start": 3298.65, "end": 3298.73, "word": " the", "probability": 0.90625}, {"start": 3298.73, "end": 3298.95, "word": " time", "probability": 0.8955078125}, {"start": 3298.95, "end": 3299.33, "word": " gives", "probability": 0.77685546875}, {"start": 3299.33, "end": 3300.99, "word": " shorter", "probability": 0.9052734375}, {"start": 3300.99, "end": 3303.41, "word": " time.", "probability": 0.853515625}, {"start": 3305.29, "end": 3305.67, "word": " Any", "probability": 0.884765625}, {"start": 3305.67, "end": 3306.03, "word": " question?", "probability": 0.52734375}, {"start": 3308.77, "end": 3309.53, "word": " Let's", "probability": 0.95947265625}, {"start": 3309.53, "end": 3309.83, "word": " do", "probability": 0.9638671875}, {"start": 3309.83, "end": 3311.09, "word": " one", "probability": 0.9228515625}, {"start": 3311.09, "end": 3311.43, "word": " more.", "probability": 0.9404296875}, {"start": 3314.35, "end": 3314.69, "word": " One", "probability": 0.779296875}, {"start": 3314.69, "end": 3314.97, "word": " more.", "probability": 0.9423828125}, {"start": 3317.17, "end": 3317.71, "word": " Number", "probability": 0.85693359375}, {"start": 3317.71, "end": 3318.15, "word": " 10.", "probability": 0.57275390625}, {"start": 3319.67, "end": 3319.97, "word": " The", "probability": 0.87841796875}, {"start": 3319.97, "end": 3320.61, "word": " amount", "probability": 0.89794921875}, {"start": 3320.61, "end": 3323.11, "word": " of", "probability": 0.7705078125}, {"start": 3323.11, "end": 3323.53, "word": " bleach", "probability": 0.94189453125}, {"start": 3323.53, "end": 3323.85, "word": " a", "probability": 0.97607421875}, {"start": 3323.85, "end": 3324.29, "word": " machine", "probability": 0.8857421875}], "temperature": 1.0}, {"id": 118, "seek": 335157, "start": 3325.57, "end": 3351.57, "text": " pours into bottles has a mean of 36. The mean is 36. Sigma is 0.15 ounces. Suppose we take around a sample of size 36 bottles filled by this machine. The sampling distribution or the sample mean has standard error of", "tokens": [2016, 82, 666, 15923, 575, 257, 914, 295, 8652, 13, 440, 914, 307, 8652, 13, 36595, 307, 1958, 13, 5211, 27343, 13, 21360, 321, 747, 926, 257, 6889, 295, 2744, 8652, 15923, 6412, 538, 341, 3479, 13, 440, 21179, 7316, 420, 264, 6889, 914, 575, 3832, 6713, 295], "avg_logprob": -0.25845024537067024, "compression_ratio": 1.4466666666666668, "no_speech_prob": 0.0, "words": [{"start": 3325.57, "end": 3326.07, "word": " pours", "probability": 0.514862060546875}, {"start": 3326.07, "end": 3326.39, "word": " into", "probability": 0.85498046875}, {"start": 3326.39, "end": 3326.79, "word": " bottles", "probability": 0.9033203125}, {"start": 3326.79, "end": 3327.11, "word": " has", "probability": 0.89013671875}, {"start": 3327.11, "end": 3327.27, "word": " a", "probability": 0.986328125}, {"start": 3327.27, "end": 3327.37, "word": " mean", "probability": 0.93994140625}, {"start": 3327.37, "end": 3327.59, "word": " of", "probability": 0.9697265625}, {"start": 3327.59, "end": 3328.33, "word": " 36.", "probability": 0.89208984375}, {"start": 3329.95, "end": 3330.61, "word": " The", "probability": 0.81396484375}, {"start": 3330.61, "end": 3331.53, "word": " mean", "probability": 0.74658203125}, {"start": 3331.53, "end": 3331.67, "word": " is", "probability": 0.92724609375}, {"start": 3331.67, "end": 3332.25, "word": " 36.", "probability": 0.96337890625}, {"start": 3333.29, "end": 3333.91, "word": " Sigma", "probability": 0.89697265625}, {"start": 3333.91, "end": 3335.51, "word": " is", "probability": 0.9208984375}, {"start": 3335.51, "end": 3335.77, "word": " 0", "probability": 0.57666015625}, {"start": 3335.77, "end": 3336.37, "word": ".15", "probability": 0.993408203125}, {"start": 3336.37, "end": 3336.97, "word": " ounces.", "probability": 0.78564453125}, {"start": 3339.07, "end": 3339.79, "word": " Suppose", "probability": 0.57958984375}, {"start": 3339.79, "end": 3340.11, "word": " we", "probability": 0.92041015625}, {"start": 3340.11, "end": 3340.31, "word": " take", "probability": 0.890625}, {"start": 3340.31, "end": 3340.55, "word": " around", "probability": 0.3193359375}, {"start": 3340.55, "end": 3340.77, "word": " a", "probability": 0.6376953125}, {"start": 3340.77, "end": 3340.99, "word": " sample", "probability": 0.86474609375}, {"start": 3340.99, "end": 3341.21, "word": " of", "probability": 0.953125}, {"start": 3341.21, "end": 3341.51, "word": " size", "probability": 0.76708984375}, {"start": 3341.51, "end": 3342.27, "word": " 36", "probability": 0.97119140625}, {"start": 3342.27, "end": 3344.63, "word": " bottles", "probability": 0.501953125}, {"start": 3344.63, "end": 3345.07, "word": " filled", "probability": 0.8193359375}, {"start": 3345.07, "end": 3345.35, "word": " by", "probability": 0.9638671875}, {"start": 3345.35, "end": 3345.59, "word": " this", "probability": 0.94775390625}, {"start": 3345.59, "end": 3346.03, "word": " machine.", "probability": 0.880859375}, {"start": 3347.63, "end": 3347.87, "word": " The", "probability": 0.888671875}, {"start": 3347.87, "end": 3348.11, "word": " sampling", "probability": 0.4736328125}, {"start": 3348.11, "end": 3348.77, "word": " distribution", "probability": 0.84765625}, {"start": 3348.77, "end": 3349.01, "word": " or", "probability": 0.61279296875}, {"start": 3349.01, "end": 3349.15, "word": " the", "probability": 0.89404296875}, {"start": 3349.15, "end": 3349.39, "word": " sample", "probability": 0.599609375}, {"start": 3349.39, "end": 3349.71, "word": " mean", "probability": 0.95068359375}, {"start": 3349.71, "end": 3350.03, "word": " has", "probability": 0.93505859375}, {"start": 3350.03, "end": 3350.53, "word": " standard", "probability": 0.83740234375}, {"start": 3350.53, "end": 3350.93, "word": " error", "probability": 0.9033203125}, {"start": 3350.93, "end": 3351.57, "word": " of", "probability": 0.96875}], "temperature": 1.0}, {"id": 119, "seek": 337821, "start": 3352.39, "end": 3378.21, "text": " So standard error means sigma of x bar is 0.15. Again, it says the sample distribution of the sample mean has standard error of 0.15. I mean sigma of x bar is 0.15. It's false. Because sigma of x bar equals sigma over root n. So 0.15 divided by 6.", "tokens": [407, 3832, 6713, 1355, 12771, 295, 2031, 2159, 307, 1958, 13, 5211, 13, 3764, 11, 309, 1619, 264, 6889, 7316, 295, 264, 6889, 914, 575, 3832, 6713, 295, 1958, 13, 5211, 13, 286, 914, 12771, 295, 2031, 2159, 307, 1958, 13, 5211, 13, 467, 311, 7908, 13, 1436, 12771, 295, 2031, 2159, 6915, 12771, 670, 5593, 297, 13, 407, 1958, 13, 5211, 6666, 538, 1386, 13], "avg_logprob": -0.22119869225060762, "compression_ratio": 1.6533333333333333, "no_speech_prob": 0.0, "words": [{"start": 3352.39, "end": 3352.65, "word": " So", "probability": 0.73486328125}, {"start": 3352.65, "end": 3353.17, "word": " standard", "probability": 0.68115234375}, {"start": 3353.17, "end": 3353.47, "word": " error", "probability": 0.7900390625}, {"start": 3353.47, "end": 3353.73, "word": " means", "probability": 0.496826171875}, {"start": 3353.73, "end": 3354.03, "word": " sigma", "probability": 0.7275390625}, {"start": 3354.03, "end": 3354.23, "word": " of", "probability": 0.41162109375}, {"start": 3354.23, "end": 3354.39, "word": " x", "probability": 0.83154296875}, {"start": 3354.39, "end": 3354.73, "word": " bar", "probability": 0.85009765625}, {"start": 3354.73, "end": 3354.89, "word": " is", "probability": 0.47705078125}, {"start": 3354.89, "end": 3355.07, "word": " 0", "probability": 0.318359375}, {"start": 3355.07, "end": 3355.29, "word": ".15.", "probability": 0.98583984375}, {"start": 3358.15, "end": 3358.75, "word": " Again,", "probability": 0.81689453125}, {"start": 3359.87, "end": 3360.17, "word": " it", "probability": 0.8974609375}, {"start": 3360.17, "end": 3360.51, "word": " says", "probability": 0.8828125}, {"start": 3360.51, "end": 3360.79, "word": " the", "probability": 0.68212890625}, {"start": 3360.79, "end": 3361.03, "word": " sample", "probability": 0.49169921875}, {"start": 3361.03, "end": 3361.63, "word": " distribution", "probability": 0.80078125}, {"start": 3361.63, "end": 3361.85, "word": " of", "probability": 0.72998046875}, {"start": 3361.85, "end": 3361.99, "word": " the", "probability": 0.86083984375}, {"start": 3361.99, "end": 3362.37, "word": " sample", "probability": 0.8515625}, {"start": 3362.37, "end": 3362.67, "word": " mean", "probability": 0.91650390625}, {"start": 3362.67, "end": 3363.13, "word": " has", "probability": 0.9384765625}, {"start": 3363.13, "end": 3364.29, "word": " standard", "probability": 0.65869140625}, {"start": 3364.29, "end": 3364.77, "word": " error", "probability": 0.8818359375}, {"start": 3364.77, "end": 3365.69, "word": " of", "probability": 0.943359375}, {"start": 3365.69, "end": 3366.29, "word": " 0", "probability": 0.7900390625}, {"start": 3366.29, "end": 3366.63, "word": ".15.", "probability": 0.998291015625}, {"start": 3366.71, "end": 3366.79, "word": " I", "probability": 0.888671875}, {"start": 3366.79, "end": 3366.91, "word": " mean", "probability": 0.96484375}, {"start": 3366.91, "end": 3367.19, "word": " sigma", "probability": 0.51708984375}, {"start": 3367.19, "end": 3367.27, "word": " of", "probability": 0.9033203125}, {"start": 3367.27, "end": 3367.43, "word": " x", "probability": 0.9912109375}, {"start": 3367.43, "end": 3367.61, "word": " bar", "probability": 0.927734375}, {"start": 3367.61, "end": 3367.75, "word": " is", "probability": 0.92822265625}, {"start": 3367.75, "end": 3368.05, "word": " 0", "probability": 0.8740234375}, {"start": 3368.05, "end": 3368.45, "word": ".15.", "probability": 0.995849609375}, {"start": 3368.91, "end": 3369.25, "word": " It's", "probability": 0.92626953125}, {"start": 3369.25, "end": 3369.71, "word": " false.", "probability": 0.8916015625}, {"start": 3370.57, "end": 3371.05, "word": " Because", "probability": 0.9287109375}, {"start": 3371.05, "end": 3371.75, "word": " sigma", "probability": 0.73193359375}, {"start": 3371.75, "end": 3372.01, "word": " of", "probability": 0.9404296875}, {"start": 3372.01, "end": 3372.23, "word": " x", "probability": 0.99267578125}, {"start": 3372.23, "end": 3372.63, "word": " bar", "probability": 0.95068359375}, {"start": 3372.63, "end": 3373.89, "word": " equals", "probability": 0.86474609375}, {"start": 3373.89, "end": 3374.53, "word": " sigma", "probability": 0.9423828125}, {"start": 3374.53, "end": 3374.93, "word": " over", "probability": 0.85009765625}, {"start": 3374.93, "end": 3375.23, "word": " root", "probability": 0.93896484375}, {"start": 3375.23, "end": 3375.53, "word": " n.", "probability": 0.736328125}, {"start": 3376.17, "end": 3376.35, "word": " So", "probability": 0.91748046875}, {"start": 3376.35, "end": 3376.63, "word": " 0", "probability": 0.7568359375}, {"start": 3376.63, "end": 3377.15, "word": ".15", "probability": 0.998779296875}, {"start": 3377.15, "end": 3377.53, "word": " divided", "probability": 0.74560546875}, {"start": 3377.53, "end": 3377.79, "word": " by", "probability": 0.97216796875}, {"start": 3377.79, "end": 3378.21, "word": " 6.", "probability": 0.69970703125}], "temperature": 1.0}, {"id": 120, "seek": 340655, "start": 3380.99, "end": 3406.55, "text": " So it's incorrect. That's false. False because this value is just the standard deviation. But he asked about the sampling distribution of the sample mean has standard error. And standard error means the standard deviation of the sample mean. So we have to divide sigma by square root of that. Mixed, 11.", "tokens": [407, 309, 311, 18424, 13, 663, 311, 7908, 13, 50040, 570, 341, 2158, 307, 445, 264, 3832, 25163, 13, 583, 415, 2351, 466, 264, 21179, 7316, 295, 264, 6889, 914, 575, 3832, 6713, 13, 400, 3832, 6713, 1355, 264, 3832, 25163, 295, 264, 6889, 914, 13, 407, 321, 362, 281, 9845, 12771, 538, 3732, 5593, 295, 300, 13, 12769, 292, 11, 2975, 13], "avg_logprob": -0.21691893925890326, "compression_ratio": 1.6703296703296704, "no_speech_prob": 0.0, "words": [{"start": 3380.99, "end": 3381.27, "word": " So", "probability": 0.75634765625}, {"start": 3381.27, "end": 3381.57, "word": " it's", "probability": 0.804443359375}, {"start": 3381.57, "end": 3382.07, "word": " incorrect.", "probability": 0.90234375}, {"start": 3382.99, "end": 3383.41, "word": " That's", "probability": 0.907958984375}, {"start": 3383.41, "end": 3383.75, "word": " false.", "probability": 0.904296875}, {"start": 3384.97, "end": 3385.63, "word": " False", "probability": 0.9638671875}, {"start": 3385.63, "end": 3386.15, "word": " because", "probability": 0.83349609375}, {"start": 3386.15, "end": 3387.23, "word": " this", "probability": 0.921875}, {"start": 3387.23, "end": 3387.71, "word": " value", "probability": 0.96826171875}, {"start": 3387.71, "end": 3389.57, "word": " is", "probability": 0.92822265625}, {"start": 3389.57, "end": 3390.03, "word": " just", "probability": 0.919921875}, {"start": 3390.03, "end": 3391.07, "word": " the", "probability": 0.87646484375}, {"start": 3391.07, "end": 3391.39, "word": " standard", "probability": 0.93408203125}, {"start": 3391.39, "end": 3391.85, "word": " deviation.", "probability": 0.88623046875}, {"start": 3393.03, "end": 3393.35, "word": " But", "probability": 0.939453125}, {"start": 3393.35, "end": 3393.53, "word": " he", "probability": 0.935546875}, {"start": 3393.53, "end": 3393.85, "word": " asked", "probability": 0.60693359375}, {"start": 3393.85, "end": 3394.29, "word": " about", "probability": 0.90234375}, {"start": 3394.29, "end": 3394.73, "word": " the", "probability": 0.8818359375}, {"start": 3394.73, "end": 3395.13, "word": " sampling", "probability": 0.861328125}, {"start": 3395.13, "end": 3395.81, "word": " distribution", "probability": 0.86669921875}, {"start": 3395.81, "end": 3396.09, "word": " of", "probability": 0.9306640625}, {"start": 3396.09, "end": 3396.27, "word": " the", "probability": 0.8935546875}, {"start": 3396.27, "end": 3396.61, "word": " sample", "probability": 0.86767578125}, {"start": 3396.61, "end": 3396.97, "word": " mean", "probability": 0.54736328125}, {"start": 3396.97, "end": 3397.83, "word": " has", "probability": 0.64794921875}, {"start": 3397.83, "end": 3398.43, "word": " standard", "probability": 0.82275390625}, {"start": 3398.43, "end": 3398.73, "word": " error.", "probability": 0.8740234375}, {"start": 3398.91, "end": 3399.05, "word": " And", "probability": 0.78466796875}, {"start": 3399.05, "end": 3399.47, "word": " standard", "probability": 0.90625}, {"start": 3399.47, "end": 3399.73, "word": " error", "probability": 0.86376953125}, {"start": 3399.73, "end": 3400.09, "word": " means", "probability": 0.5830078125}, {"start": 3400.09, "end": 3400.33, "word": " the", "probability": 0.8798828125}, {"start": 3400.33, "end": 3400.65, "word": " standard", "probability": 0.951171875}, {"start": 3400.65, "end": 3401.13, "word": " deviation", "probability": 0.89990234375}, {"start": 3401.13, "end": 3401.41, "word": " of", "probability": 0.96728515625}, {"start": 3401.41, "end": 3401.57, "word": " the", "probability": 0.91748046875}, {"start": 3401.57, "end": 3401.81, "word": " sample", "probability": 0.880859375}, {"start": 3401.81, "end": 3402.11, "word": " mean.", "probability": 0.947265625}, {"start": 3402.71, "end": 3402.97, "word": " So", "probability": 0.525390625}, {"start": 3402.97, "end": 3403.05, "word": " we", "probability": 0.52587890625}, {"start": 3403.05, "end": 3403.19, "word": " have", "probability": 0.89453125}, {"start": 3403.19, "end": 3403.27, "word": " to", "probability": 0.865234375}, {"start": 3403.27, "end": 3403.49, "word": " divide", "probability": 0.94970703125}, {"start": 3403.49, "end": 3403.79, "word": " sigma", "probability": 0.8154296875}, {"start": 3403.79, "end": 3403.99, "word": " by", "probability": 0.9599609375}, {"start": 3403.99, "end": 3404.33, "word": " square", "probability": 0.294189453125}, {"start": 3404.33, "end": 3404.51, "word": " root", "probability": 0.66259765625}, {"start": 3404.51, "end": 3404.61, "word": " of", "probability": 0.9208984375}, {"start": 3404.61, "end": 3404.75, "word": " that.", "probability": 0.69140625}, {"start": 3405.69, "end": 3406.21, "word": " Mixed,", "probability": 0.732666015625}, {"start": 3406.25, "end": 3406.55, "word": " 11.", "probability": 0.6318359375}], "temperature": 1.0}, {"id": 121, "seek": 342833, "start": 3407.63, "end": 3428.33, "text": " The mean of the standard distribution of a sample proportion is the population of proportion pi. That's true. Because if the two conditions we mentioned before are satisfied, then the mean of P is pi. So that's correct. Twelve, the last one.", "tokens": [440, 914, 295, 264, 3832, 7316, 295, 257, 6889, 16068, 307, 264, 4415, 295, 16068, 3895, 13, 663, 311, 2074, 13, 1436, 498, 264, 732, 4487, 321, 2835, 949, 366, 11239, 11, 550, 264, 914, 295, 430, 307, 3895, 13, 407, 300, 311, 3006, 13, 48063, 11, 264, 1036, 472, 13], "avg_logprob": -0.1989182738157419, "compression_ratio": 1.5612903225806452, "no_speech_prob": 0.0, "words": [{"start": 3407.63, "end": 3407.97, "word": " The", "probability": 0.50439453125}, {"start": 3407.97, "end": 3408.27, "word": " mean", "probability": 0.9453125}, {"start": 3408.27, "end": 3408.99, "word": " of", "probability": 0.96484375}, {"start": 3408.99, "end": 3409.19, "word": " the", "probability": 0.7705078125}, {"start": 3409.19, "end": 3409.45, "word": " standard", "probability": 0.68505859375}, {"start": 3409.45, "end": 3410.23, "word": " distribution", "probability": 0.861328125}, {"start": 3410.23, "end": 3410.51, "word": " of", "probability": 0.935546875}, {"start": 3410.51, "end": 3410.67, "word": " a", "probability": 0.86083984375}, {"start": 3410.67, "end": 3410.95, "word": " sample", "probability": 0.779296875}, {"start": 3410.95, "end": 3411.61, "word": " proportion", "probability": 0.8173828125}, {"start": 3411.61, "end": 3412.45, "word": " is", "probability": 0.89306640625}, {"start": 3412.45, "end": 3412.61, "word": " the", "probability": 0.91259765625}, {"start": 3412.61, "end": 3413.07, "word": " population", "probability": 0.84375}, {"start": 3413.07, "end": 3413.25, "word": " of", "probability": 0.54833984375}, {"start": 3413.25, "end": 3413.67, "word": " proportion", "probability": 0.8642578125}, {"start": 3413.67, "end": 3414.03, "word": " pi.", "probability": 0.72265625}, {"start": 3414.29, "end": 3414.63, "word": " That's", "probability": 0.922607421875}, {"start": 3414.63, "end": 3414.97, "word": " true.", "probability": 0.96484375}, {"start": 3415.73, "end": 3416.17, "word": " Because", "probability": 0.88720703125}, {"start": 3416.17, "end": 3418.67, "word": " if", "probability": 0.76318359375}, {"start": 3418.67, "end": 3418.81, "word": " the", "probability": 0.9072265625}, {"start": 3418.81, "end": 3418.97, "word": " two", "probability": 0.9052734375}, {"start": 3418.97, "end": 3419.51, "word": " conditions", "probability": 0.8564453125}, {"start": 3419.51, "end": 3420.91, "word": " we", "probability": 0.7734375}, {"start": 3420.91, "end": 3421.21, "word": " mentioned", "probability": 0.818359375}, {"start": 3421.21, "end": 3421.59, "word": " before", "probability": 0.853515625}, {"start": 3421.59, "end": 3421.79, "word": " are", "probability": 0.93017578125}, {"start": 3421.79, "end": 3422.35, "word": " satisfied,", "probability": 0.89111328125}, {"start": 3422.89, "end": 3423.15, "word": " then", "probability": 0.85400390625}, {"start": 3423.15, "end": 3423.35, "word": " the", "probability": 0.921875}, {"start": 3423.35, "end": 3423.51, "word": " mean", "probability": 0.95751953125}, {"start": 3423.51, "end": 3423.65, "word": " of", "probability": 0.962890625}, {"start": 3423.65, "end": 3423.75, "word": " P", "probability": 0.546875}, {"start": 3423.75, "end": 3423.91, "word": " is", "probability": 0.94384765625}, {"start": 3423.91, "end": 3424.21, "word": " pi.", "probability": 0.8369140625}, {"start": 3424.95, "end": 3425.17, "word": " So", "probability": 0.91796875}, {"start": 3425.17, "end": 3425.41, "word": " that's", "probability": 0.89697265625}, {"start": 3425.41, "end": 3425.69, "word": " correct.", "probability": 0.8984375}, {"start": 3427.05, "end": 3427.65, "word": " Twelve,", "probability": 0.3330078125}, {"start": 3427.77, "end": 3427.85, "word": " the", "probability": 0.9150390625}, {"start": 3427.85, "end": 3428.05, "word": " last", "probability": 0.873046875}, {"start": 3428.05, "end": 3428.33, "word": " one.", "probability": 0.927734375}], "temperature": 1.0}, {"id": 122, "seek": 344749, "start": 3429.77, "end": 3447.49, "text": " The standard error of the sampling distribution of P times 1 minus P divided by N, where P is the sample proportion, it's false, because sigma over P square root of Pi, 1 minus Pi divided by N. So this statement is false.", "tokens": [440, 3832, 6713, 295, 264, 21179, 7316, 295, 430, 1413, 502, 3175, 430, 6666, 538, 426, 11, 689, 430, 307, 264, 6889, 16068, 11, 309, 311, 7908, 11, 570, 12771, 670, 430, 3732, 5593, 295, 17741, 11, 502, 3175, 17741, 6666, 538, 426, 13, 407, 341, 5629, 307, 7908, 13], "avg_logprob": -0.2968749871440962, "compression_ratio": 1.48, "no_speech_prob": 1.1920928955078125e-07, "words": [{"start": 3429.77, "end": 3429.99, "word": " The", "probability": 0.5673828125}, {"start": 3429.99, "end": 3430.41, "word": " standard", "probability": 0.9150390625}, {"start": 3430.41, "end": 3430.83, "word": " error", "probability": 0.86279296875}, {"start": 3430.83, "end": 3431.25, "word": " of", "probability": 0.94580078125}, {"start": 3431.25, "end": 3431.43, "word": " the", "probability": 0.888671875}, {"start": 3431.43, "end": 3431.67, "word": " sampling", "probability": 0.360107421875}, {"start": 3431.67, "end": 3432.09, "word": " distribution", "probability": 0.30078125}, {"start": 3432.09, "end": 3432.37, "word": " of", "probability": 0.66162109375}, {"start": 3432.37, "end": 3432.55, "word": " P", "probability": 0.59716796875}, {"start": 3432.55, "end": 3432.93, "word": " times", "probability": 0.646484375}, {"start": 3432.93, "end": 3433.15, "word": " 1", "probability": 0.59375}, {"start": 3433.15, "end": 3433.39, "word": " minus", "probability": 0.88232421875}, {"start": 3433.39, "end": 3433.55, "word": " P", "probability": 0.9599609375}, {"start": 3433.55, "end": 3433.75, "word": " divided", "probability": 0.74853515625}, {"start": 3433.75, "end": 3433.97, "word": " by", "probability": 0.96826171875}, {"start": 3433.97, "end": 3434.29, "word": " N,", "probability": 0.8544921875}, {"start": 3434.69, "end": 3434.97, "word": " where", "probability": 0.92333984375}, {"start": 3434.97, "end": 3435.09, "word": " P", "probability": 0.55810546875}, {"start": 3435.09, "end": 3435.21, "word": " is", "probability": 0.94873046875}, {"start": 3435.21, "end": 3435.39, "word": " the", "probability": 0.89208984375}, {"start": 3435.39, "end": 3435.67, "word": " sample", "probability": 0.83447265625}, {"start": 3435.67, "end": 3436.45, "word": " proportion,", "probability": 0.79638671875}, {"start": 3437.03, "end": 3437.39, "word": " it's", "probability": 0.79345703125}, {"start": 3437.39, "end": 3437.89, "word": " false,", "probability": 0.88720703125}, {"start": 3438.47, "end": 3438.99, "word": " because", "probability": 0.89208984375}, {"start": 3438.99, "end": 3439.87, "word": " sigma", "probability": 0.6611328125}, {"start": 3439.87, "end": 3440.15, "word": " over", "probability": 0.55810546875}, {"start": 3440.15, "end": 3440.43, "word": " P", "probability": 0.7138671875}, {"start": 3440.43, "end": 3441.09, "word": " square", "probability": 0.294677734375}, {"start": 3441.09, "end": 3441.37, "word": " root", "probability": 0.95556640625}, {"start": 3441.37, "end": 3441.63, "word": " of", "probability": 0.9638671875}, {"start": 3441.63, "end": 3441.95, "word": " Pi,", "probability": 0.513671875}, {"start": 3442.77, "end": 3443.11, "word": " 1", "probability": 0.85205078125}, {"start": 3443.11, "end": 3443.43, "word": " minus", "probability": 0.98291015625}, {"start": 3443.43, "end": 3443.69, "word": " Pi", "probability": 0.9658203125}, {"start": 3443.69, "end": 3444.05, "word": " divided", "probability": 0.7294921875}, {"start": 3444.05, "end": 3444.33, "word": " by", "probability": 0.9658203125}, {"start": 3444.33, "end": 3444.67, "word": " N.", "probability": 0.99267578125}, {"start": 3445.15, "end": 3445.49, "word": " So", "probability": 0.951171875}, {"start": 3445.49, "end": 3445.83, "word": " this", "probability": 0.72998046875}, {"start": 3445.83, "end": 3446.43, "word": " statement", "probability": 0.900390625}, {"start": 3446.43, "end": 3446.95, "word": " is", "probability": 0.94482421875}, {"start": 3446.95, "end": 3447.49, "word": " false.", "probability": 0.89794921875}], "temperature": 1.0}, {"id": 123, "seek": 349108, "start": 3463.56, "end": 3491.08, "text": " Let's do, look at number 13, the last one, number 13. A sample of size 25. So we took a random sample of 25. So N is 25. It provides a sample variance.", "tokens": [961, 311, 360, 11, 574, 412, 1230, 3705, 11, 264, 1036, 472, 11, 1230, 3705, 13, 316, 6889, 295, 2744, 3552, 13, 407, 321, 1890, 257, 4974, 6889, 295, 3552, 13, 407, 426, 307, 3552, 13, 467, 6417, 257, 6889, 21977, 13], "avg_logprob": -0.17859738649323928, "compression_ratio": 1.2991452991452992, "no_speech_prob": 0.0, "words": [{"start": 3463.56, "end": 3464.12, "word": " Let's", "probability": 0.81884765625}, {"start": 3464.12, "end": 3464.52, "word": " do,", "probability": 0.66552734375}, {"start": 3465.92, "end": 3467.8, "word": " look", "probability": 0.83203125}, {"start": 3467.8, "end": 3467.94, "word": " at", "probability": 0.966796875}, {"start": 3467.94, "end": 3468.22, "word": " number", "probability": 0.912109375}, {"start": 3468.22, "end": 3468.7, "word": " 13,", "probability": 0.92578125}, {"start": 3468.8, "end": 3468.9, "word": " the", "probability": 0.90966796875}, {"start": 3468.9, "end": 3469.14, "word": " last", "probability": 0.87158203125}, {"start": 3469.14, "end": 3469.42, "word": " one,", "probability": 0.92529296875}, {"start": 3469.6, "end": 3469.82, "word": " number", "probability": 0.93408203125}, {"start": 3469.82, "end": 3470.36, "word": " 13.", "probability": 0.970703125}, {"start": 3476.9, "end": 3477.24, "word": " A", "probability": 0.8603515625}, {"start": 3477.24, "end": 3477.5, "word": " sample", "probability": 0.7529296875}, {"start": 3477.5, "end": 3477.72, "word": " of", "probability": 0.95849609375}, {"start": 3477.72, "end": 3477.98, "word": " size", "probability": 0.87060546875}, {"start": 3477.98, "end": 3478.52, "word": " 25.", "probability": 0.95166015625}, {"start": 3478.8, "end": 3479.0, "word": " So", "probability": 0.96533203125}, {"start": 3479.0, "end": 3479.18, "word": " we", "probability": 0.54150390625}, {"start": 3479.18, "end": 3479.48, "word": " took", "probability": 0.91552734375}, {"start": 3479.48, "end": 3479.74, "word": " a", "probability": 0.89306640625}, {"start": 3479.74, "end": 3479.96, "word": " random", "probability": 0.85546875}, {"start": 3479.96, "end": 3480.44, "word": " sample", "probability": 0.88232421875}, {"start": 3480.44, "end": 3480.7, "word": " of", "probability": 0.9677734375}, {"start": 3480.7, "end": 3481.32, "word": " 25.", "probability": 0.97509765625}, {"start": 3483.62, "end": 3483.9, "word": " So", "probability": 0.939453125}, {"start": 3483.9, "end": 3484.12, "word": " N", "probability": 0.51171875}, {"start": 3484.12, "end": 3484.26, "word": " is", "probability": 0.94970703125}, {"start": 3484.26, "end": 3484.64, "word": " 25.", "probability": 0.95556640625}, {"start": 3488.88, "end": 3489.44, "word": " It", "probability": 0.95166015625}, {"start": 3489.44, "end": 3489.98, "word": " provides", "probability": 0.9306640625}, {"start": 3489.98, "end": 3490.22, "word": " a", "probability": 0.98681640625}, {"start": 3490.22, "end": 3490.54, "word": " sample", "probability": 0.8583984375}, {"start": 3490.54, "end": 3491.08, "word": " variance.", "probability": 0.8193359375}], "temperature": 1.0}, {"id": 124, "seek": 352111, "start": 3494.23, "end": 3521.11, "text": " The standard error, he asked about the standard error, sigma of x bar, in this case equal to 4, is best described as the estimate of the standard deviation of means calculated from sample size 25. He asked about sigma of x bar. We know that sigma of x bar equals sigma over root n, but sigma is not given.", "tokens": [440, 3832, 6713, 11, 415, 2351, 466, 264, 3832, 6713, 11, 12771, 295, 2031, 2159, 11, 294, 341, 1389, 2681, 281, 1017, 11, 307, 1151, 7619, 382, 264, 12539, 295, 264, 3832, 25163, 295, 1355, 15598, 490, 6889, 2744, 3552, 13, 634, 2351, 466, 12771, 295, 2031, 2159, 13, 492, 458, 300, 12771, 295, 2031, 2159, 6915, 12771, 670, 5593, 297, 11, 457, 12771, 307, 406, 2212, 13], "avg_logprob": -0.23867753623188406, "compression_ratio": 1.7586206896551724, "no_speech_prob": 0.0, "words": [{"start": 3494.23, "end": 3494.83, "word": " The", "probability": 0.1888427734375}, {"start": 3494.83, "end": 3495.29, "word": " standard", "probability": 0.865234375}, {"start": 3495.29, "end": 3495.65, "word": " error,", "probability": 0.791015625}, {"start": 3496.83, "end": 3497.01, "word": " he", "probability": 0.7119140625}, {"start": 3497.01, "end": 3497.19, "word": " asked", "probability": 0.350830078125}, {"start": 3497.19, "end": 3497.51, "word": " about", "probability": 0.90185546875}, {"start": 3497.51, "end": 3497.77, "word": " the", "probability": 0.87939453125}, {"start": 3497.77, "end": 3498.13, "word": " standard", "probability": 0.939453125}, {"start": 3498.13, "end": 3498.41, "word": " error,", "probability": 0.681640625}, {"start": 3498.71, "end": 3498.95, "word": " sigma", "probability": 0.63037109375}, {"start": 3498.95, "end": 3500.31, "word": " of", "probability": 0.6962890625}, {"start": 3500.31, "end": 3500.47, "word": " x", "probability": 0.69091796875}, {"start": 3500.47, "end": 3500.73, "word": " bar,", "probability": 0.89794921875}, {"start": 3501.51, "end": 3502.11, "word": " in", "probability": 0.8818359375}, {"start": 3502.11, "end": 3502.35, "word": " this", "probability": 0.94921875}, {"start": 3502.35, "end": 3502.59, "word": " case", "probability": 0.90576171875}, {"start": 3502.59, "end": 3502.89, "word": " equal", "probability": 0.61669921875}, {"start": 3502.89, "end": 3503.09, "word": " to", "probability": 0.96240234375}, {"start": 3503.09, "end": 3503.43, "word": " 4,", "probability": 0.53466796875}, {"start": 3505.89, "end": 3506.19, "word": " is", "probability": 0.91650390625}, {"start": 3506.19, "end": 3507.03, "word": " best", "probability": 0.91455078125}, {"start": 3507.03, "end": 3507.73, "word": " described", "probability": 0.87353515625}, {"start": 3507.73, "end": 3508.39, "word": " as", "probability": 0.95263671875}, {"start": 3508.39, "end": 3508.53, "word": " the", "probability": 0.89111328125}, {"start": 3508.53, "end": 3509.01, "word": " estimate", "probability": 0.8955078125}, {"start": 3509.01, "end": 3509.27, "word": " of", "probability": 0.96240234375}, {"start": 3509.27, "end": 3509.43, "word": " the", "probability": 0.8837890625}, {"start": 3509.43, "end": 3509.75, "word": " standard", "probability": 0.95654296875}, {"start": 3509.75, "end": 3510.27, "word": " deviation", "probability": 0.89306640625}, {"start": 3510.27, "end": 3510.59, "word": " of", "probability": 0.9306640625}, {"start": 3510.59, "end": 3510.97, "word": " means", "probability": 0.78662109375}, {"start": 3510.97, "end": 3511.89, "word": " calculated", "probability": 0.88916015625}, {"start": 3511.89, "end": 3512.23, "word": " from", "probability": 0.8759765625}, {"start": 3512.23, "end": 3512.53, "word": " sample", "probability": 0.1597900390625}, {"start": 3512.53, "end": 3512.85, "word": " size", "probability": 0.84423828125}, {"start": 3512.85, "end": 3513.25, "word": " 25.", "probability": 0.92333984375}, {"start": 3513.61, "end": 3513.93, "word": " He", "probability": 0.939453125}, {"start": 3513.93, "end": 3514.09, "word": " asked", "probability": 0.76220703125}, {"start": 3514.09, "end": 3514.33, "word": " about", "probability": 0.904296875}, {"start": 3514.33, "end": 3514.57, "word": " sigma", "probability": 0.91162109375}, {"start": 3514.57, "end": 3514.71, "word": " of", "probability": 0.9140625}, {"start": 3514.71, "end": 3514.85, "word": " x", "probability": 0.986328125}, {"start": 3514.85, "end": 3515.13, "word": " bar.", "probability": 0.93896484375}, {"start": 3516.09, "end": 3516.69, "word": " We", "probability": 0.94189453125}, {"start": 3516.69, "end": 3516.87, "word": " know", "probability": 0.876953125}, {"start": 3516.87, "end": 3517.05, "word": " that", "probability": 0.92626953125}, {"start": 3517.05, "end": 3517.35, "word": " sigma", "probability": 0.935546875}, {"start": 3517.35, "end": 3517.51, "word": " of", "probability": 0.9501953125}, {"start": 3517.51, "end": 3517.69, "word": " x", "probability": 0.99462890625}, {"start": 3517.69, "end": 3517.93, "word": " bar", "probability": 0.9521484375}, {"start": 3517.93, "end": 3518.19, "word": " equals", "probability": 0.8935546875}, {"start": 3518.19, "end": 3518.51, "word": " sigma", "probability": 0.93359375}, {"start": 3518.51, "end": 3518.71, "word": " over", "probability": 0.6982421875}, {"start": 3518.71, "end": 3518.93, "word": " root", "probability": 0.92529296875}, {"start": 3518.93, "end": 3519.17, "word": " n,", "probability": 0.66845703125}, {"start": 3519.73, "end": 3520.17, "word": " but", "probability": 0.92822265625}, {"start": 3520.17, "end": 3520.49, "word": " sigma", "probability": 0.93212890625}, {"start": 3520.49, "end": 3520.65, "word": " is", "probability": 0.9345703125}, {"start": 3520.65, "end": 3520.81, "word": " not", "probability": 0.94677734375}, {"start": 3520.81, "end": 3521.11, "word": " given.", "probability": 0.8828125}], "temperature": 1.0}, {"id": 125, "seek": 355076, "start": 3522.62, "end": 3550.76, "text": " S is a square, S is a 20, but what's Sigma? Because Sigma of X bar, Sigma over root N, Sigma is not given. But as we know before, since N is large enough, so if N is large, in this case, if N is large, if N is large, we can replace Sigma by S.", "tokens": [318, 307, 257, 3732, 11, 318, 307, 257, 945, 11, 457, 437, 311, 36595, 30, 1436, 36595, 295, 1783, 2159, 11, 36595, 670, 5593, 426, 11, 36595, 307, 406, 2212, 13, 583, 382, 321, 458, 949, 11, 1670, 426, 307, 2416, 1547, 11, 370, 498, 426, 307, 2416, 11, 294, 341, 1389, 11, 498, 426, 307, 2416, 11, 498, 426, 307, 2416, 11, 321, 393, 7406, 36595, 538, 318, 13], "avg_logprob": -0.20653608819128763, "compression_ratio": 1.5947712418300655, "no_speech_prob": 0.0, "words": [{"start": 3522.62, "end": 3523.14, "word": " S", "probability": 0.5166015625}, {"start": 3523.14, "end": 3523.42, "word": " is", "probability": 0.724609375}, {"start": 3523.42, "end": 3523.64, "word": " a", "probability": 0.7939453125}, {"start": 3523.64, "end": 3524.02, "word": " square,", "probability": 0.85693359375}, {"start": 3524.72, "end": 3525.46, "word": " S", "probability": 0.8525390625}, {"start": 3525.46, "end": 3525.64, "word": " is", "probability": 0.9423828125}, {"start": 3525.64, "end": 3525.78, "word": " a", "probability": 0.7265625}, {"start": 3525.78, "end": 3526.02, "word": " 20,", "probability": 0.410888671875}, {"start": 3526.36, "end": 3526.62, "word": " but", "probability": 0.8876953125}, {"start": 3526.62, "end": 3527.02, "word": " what's", "probability": 0.7958984375}, {"start": 3527.02, "end": 3527.32, "word": " Sigma?", "probability": 0.4541015625}, {"start": 3528.14, "end": 3528.54, "word": " Because", "probability": 0.82177734375}, {"start": 3528.54, "end": 3529.26, "word": " Sigma", "probability": 0.77587890625}, {"start": 3529.26, "end": 3529.46, "word": " of", "probability": 0.87744140625}, {"start": 3529.46, "end": 3529.62, "word": " X", "probability": 0.787109375}, {"start": 3529.62, "end": 3530.02, "word": " bar,", "probability": 0.779296875}, {"start": 3531.32, "end": 3531.6, "word": " Sigma", "probability": 0.8427734375}, {"start": 3531.6, "end": 3531.84, "word": " over", "probability": 0.57080078125}, {"start": 3531.84, "end": 3532.04, "word": " root", "probability": 0.92431640625}, {"start": 3532.04, "end": 3532.26, "word": " N,", "probability": 0.78955078125}, {"start": 3533.02, "end": 3533.6, "word": " Sigma", "probability": 0.8828125}, {"start": 3533.6, "end": 3533.78, "word": " is", "probability": 0.90185546875}, {"start": 3533.78, "end": 3533.96, "word": " not", "probability": 0.943359375}, {"start": 3533.96, "end": 3534.24, "word": " given.", "probability": 0.9033203125}, {"start": 3535.66, "end": 3536.04, "word": " But", "probability": 0.681640625}, {"start": 3536.04, "end": 3536.32, "word": " as", "probability": 0.84130859375}, {"start": 3536.32, "end": 3536.48, "word": " we", "probability": 0.95068359375}, {"start": 3536.48, "end": 3536.64, "word": " know", "probability": 0.85302734375}, {"start": 3536.64, "end": 3537.0, "word": " before,", "probability": 0.822265625}, {"start": 3537.84, "end": 3538.3, "word": " since", "probability": 0.83544921875}, {"start": 3538.3, "end": 3538.64, "word": " N", "probability": 0.96875}, {"start": 3538.64, "end": 3538.82, "word": " is", "probability": 0.95556640625}, {"start": 3538.82, "end": 3539.1, "word": " large", "probability": 0.96435546875}, {"start": 3539.1, "end": 3539.56, "word": " enough,", "probability": 0.87890625}, {"start": 3540.84, "end": 3541.04, "word": " so", "probability": 0.89453125}, {"start": 3541.04, "end": 3541.24, "word": " if", "probability": 0.9404296875}, {"start": 3541.24, "end": 3541.4, "word": " N", "probability": 0.97802734375}, {"start": 3541.4, "end": 3541.54, "word": " is", "probability": 0.95361328125}, {"start": 3541.54, "end": 3541.92, "word": " large,", "probability": 0.96630859375}, {"start": 3543.18, "end": 3543.38, "word": " in", "probability": 0.8984375}, {"start": 3543.38, "end": 3543.64, "word": " this", "probability": 0.9482421875}, {"start": 3543.64, "end": 3544.08, "word": " case,", "probability": 0.91455078125}, {"start": 3544.42, "end": 3544.62, "word": " if", "probability": 0.95263671875}, {"start": 3544.62, "end": 3544.82, "word": " N", "probability": 0.98974609375}, {"start": 3544.82, "end": 3544.98, "word": " is", "probability": 0.95361328125}, {"start": 3544.98, "end": 3545.32, "word": " large,", "probability": 0.97119140625}, {"start": 3546.36, "end": 3546.8, "word": " if", "probability": 0.86767578125}, {"start": 3546.8, "end": 3547.02, "word": " N", "probability": 0.99267578125}, {"start": 3547.02, "end": 3547.2, "word": " is", "probability": 0.95361328125}, {"start": 3547.2, "end": 3547.46, "word": " large,", "probability": 0.96875}, {"start": 3547.52, "end": 3547.7, "word": " we", "probability": 0.9501953125}, {"start": 3547.7, "end": 3548.1, "word": " can", "probability": 0.943359375}, {"start": 3548.1, "end": 3548.7, "word": " replace", "probability": 0.92529296875}, {"start": 3548.7, "end": 3549.12, "word": " Sigma", "probability": 0.830078125}, {"start": 3549.12, "end": 3549.52, "word": " by", "probability": 0.97412109375}, {"start": 3549.52, "end": 3550.76, "word": " S.", "probability": 0.9912109375}], "temperature": 1.0}, {"id": 126, "seek": 356839, "start": 3551.39, "end": 3568.39, "text": " And this is just S over root N. So if N is large enough, more than 15, so we can use or we can apply the central limit theorem. So we get the sigma YS.", "tokens": [400, 341, 307, 445, 318, 670, 5593, 426, 13, 407, 498, 426, 307, 2416, 1547, 11, 544, 813, 2119, 11, 370, 321, 393, 764, 420, 321, 393, 3079, 264, 5777, 4948, 20904, 13, 407, 321, 483, 264, 12771, 398, 50, 13], "avg_logprob": -0.3030134020816712, "compression_ratio": 1.256198347107438, "no_speech_prob": 0.0, "words": [{"start": 3551.39, "end": 3551.67, "word": " And", "probability": 0.42578125}, {"start": 3551.67, "end": 3551.85, "word": " this", "probability": 0.630859375}, {"start": 3551.85, "end": 3551.97, "word": " is", "probability": 0.6171875}, {"start": 3551.97, "end": 3552.25, "word": " just", "probability": 0.9130859375}, {"start": 3552.25, "end": 3552.61, "word": " S", "probability": 0.5771484375}, {"start": 3552.61, "end": 3552.95, "word": " over", "probability": 0.88818359375}, {"start": 3552.95, "end": 3554.33, "word": " root", "probability": 0.88037109375}, {"start": 3554.33, "end": 3554.51, "word": " N.", "probability": 0.78955078125}, {"start": 3555.13, "end": 3555.29, "word": " So", "probability": 0.87939453125}, {"start": 3555.29, "end": 3555.83, "word": " if", "probability": 0.75146484375}, {"start": 3555.83, "end": 3556.41, "word": " N", "probability": 0.880859375}, {"start": 3556.41, "end": 3556.55, "word": " is", "probability": 0.95703125}, {"start": 3556.55, "end": 3556.79, "word": " large", "probability": 0.96533203125}, {"start": 3556.79, "end": 3557.21, "word": " enough,", "probability": 0.890625}, {"start": 3559.35, "end": 3560.67, "word": " more", "probability": 0.3212890625}, {"start": 3560.67, "end": 3561.53, "word": " than", "probability": 0.94580078125}, {"start": 3561.53, "end": 3561.99, "word": " 15,", "probability": 0.71826171875}, {"start": 3562.29, "end": 3562.49, "word": " so", "probability": 0.64599609375}, {"start": 3562.49, "end": 3562.73, "word": " we", "probability": 0.953125}, {"start": 3562.73, "end": 3562.93, "word": " can", "probability": 0.94482421875}, {"start": 3562.93, "end": 3563.27, "word": " use", "probability": 0.8193359375}, {"start": 3563.27, "end": 3563.79, "word": " or", "probability": 0.56982421875}, {"start": 3563.79, "end": 3564.01, "word": " we", "probability": 0.95458984375}, {"start": 3564.01, "end": 3564.23, "word": " can", "probability": 0.951171875}, {"start": 3564.23, "end": 3564.61, "word": " apply", "probability": 0.92529296875}, {"start": 3564.61, "end": 3564.85, "word": " the", "probability": 0.89990234375}, {"start": 3564.85, "end": 3565.15, "word": " central", "probability": 0.75439453125}, {"start": 3565.15, "end": 3565.35, "word": " limit", "probability": 0.8896484375}, {"start": 3565.35, "end": 3565.71, "word": " theorem.", "probability": 0.88623046875}, {"start": 3566.31, "end": 3566.53, "word": " So", "probability": 0.94970703125}, {"start": 3566.53, "end": 3567.15, "word": " we", "probability": 0.7236328125}, {"start": 3567.15, "end": 3567.35, "word": " get", "probability": 0.2470703125}, {"start": 3567.35, "end": 3567.53, "word": " the", "probability": 0.40283203125}, {"start": 3567.53, "end": 3567.89, "word": " sigma", "probability": 0.58447265625}, {"start": 3567.89, "end": 3568.39, "word": " YS.", "probability": 0.605712890625}], "temperature": 1.0}, {"id": 127, "seek": 359506, "start": 3569.18, "end": 3595.06, "text": " So S equals 20 divided by 5 is 4. So the standard error equals 4 in this case. So if S squared is given instead of sigma squared, we can replace sigma by S if N is larger. So in this case, we replace sigma by S. So sigma of x bar equals S over root N. That will give 4.", "tokens": [407, 318, 6915, 945, 6666, 538, 1025, 307, 1017, 13, 407, 264, 3832, 6713, 6915, 1017, 294, 341, 1389, 13, 407, 498, 318, 8889, 307, 2212, 2602, 295, 12771, 8889, 11, 321, 393, 7406, 12771, 538, 318, 498, 426, 307, 4833, 13, 407, 294, 341, 1389, 11, 321, 7406, 12771, 538, 318, 13, 407, 12771, 295, 2031, 2159, 6915, 318, 670, 5593, 426, 13, 663, 486, 976, 1017, 13], "avg_logprob": -0.20223214605024883, "compression_ratio": 1.656441717791411, "no_speech_prob": 0.0, "words": [{"start": 3569.18, "end": 3569.52, "word": " So", "probability": 0.72119140625}, {"start": 3569.52, "end": 3569.86, "word": " S", "probability": 0.427490234375}, {"start": 3569.86, "end": 3570.26, "word": " equals", "probability": 0.411376953125}, {"start": 3570.26, "end": 3570.68, "word": " 20", "probability": 0.85888671875}, {"start": 3570.68, "end": 3571.9, "word": " divided", "probability": 0.5478515625}, {"start": 3571.9, "end": 3572.32, "word": " by", "probability": 0.962890625}, {"start": 3572.32, "end": 3573.74, "word": " 5", "probability": 0.8974609375}, {"start": 3573.74, "end": 3574.3, "word": " is", "probability": 0.397216796875}, {"start": 3574.3, "end": 3574.64, "word": " 4.", "probability": 0.9658203125}, {"start": 3575.14, "end": 3575.34, "word": " So", "probability": 0.94580078125}, {"start": 3575.34, "end": 3575.5, "word": " the", "probability": 0.66845703125}, {"start": 3575.5, "end": 3575.88, "word": " standard", "probability": 0.9306640625}, {"start": 3575.88, "end": 3576.18, "word": " error", "probability": 0.771484375}, {"start": 3576.18, "end": 3576.52, "word": " equals", "probability": 0.359619140625}, {"start": 3576.52, "end": 3576.86, "word": " 4", "probability": 0.83935546875}, {"start": 3576.86, "end": 3577.0, "word": " in", "probability": 0.82861328125}, {"start": 3577.0, "end": 3577.14, "word": " this", "probability": 0.94384765625}, {"start": 3577.14, "end": 3577.38, "word": " case.", "probability": 0.8994140625}, {"start": 3577.72, "end": 3577.92, "word": " So", "probability": 0.9599609375}, {"start": 3577.92, "end": 3578.16, "word": " if", "probability": 0.9169921875}, {"start": 3578.16, "end": 3578.48, "word": " S", "probability": 0.85888671875}, {"start": 3578.48, "end": 3578.8, "word": " squared", "probability": 0.71728515625}, {"start": 3578.8, "end": 3579.06, "word": " is", "probability": 0.94482421875}, {"start": 3579.06, "end": 3579.78, "word": " given", "probability": 0.8984375}, {"start": 3579.78, "end": 3580.88, "word": " instead", "probability": 0.55908203125}, {"start": 3580.88, "end": 3581.06, "word": " of", "probability": 0.9697265625}, {"start": 3581.06, "end": 3581.38, "word": " sigma", "probability": 0.6865234375}, {"start": 3581.38, "end": 3581.76, "word": " squared,", "probability": 0.66796875}, {"start": 3582.64, "end": 3582.9, "word": " we", "probability": 0.95849609375}, {"start": 3582.9, "end": 3583.12, "word": " can", "probability": 0.93212890625}, {"start": 3583.12, "end": 3583.56, "word": " replace", "probability": 0.91796875}, {"start": 3583.56, "end": 3584.16, "word": " sigma", "probability": 0.9228515625}, {"start": 3584.16, "end": 3584.9, "word": " by", "probability": 0.97412109375}, {"start": 3584.9, "end": 3585.24, "word": " S", "probability": 0.95849609375}, {"start": 3585.24, "end": 3585.76, "word": " if", "probability": 0.86767578125}, {"start": 3585.76, "end": 3585.92, "word": " N", "probability": 0.66943359375}, {"start": 3585.92, "end": 3586.04, "word": " is", "probability": 0.94921875}, {"start": 3586.04, "end": 3586.34, "word": " larger.", "probability": 0.892578125}, {"start": 3587.38, "end": 3588.02, "word": " So", "probability": 0.9609375}, {"start": 3588.02, "end": 3588.16, "word": " in", "probability": 0.80517578125}, {"start": 3588.16, "end": 3588.3, "word": " this", "probability": 0.94677734375}, {"start": 3588.3, "end": 3588.62, "word": " case,", "probability": 0.90966796875}, {"start": 3588.78, "end": 3588.94, "word": " we", "probability": 0.947265625}, {"start": 3588.94, "end": 3589.34, "word": " replace", "probability": 0.86279296875}, {"start": 3589.34, "end": 3589.78, "word": " sigma", "probability": 0.9267578125}, {"start": 3589.78, "end": 3590.24, "word": " by", "probability": 0.9736328125}, {"start": 3590.24, "end": 3590.62, "word": " S.", "probability": 0.98974609375}, {"start": 3591.1, "end": 3591.34, "word": " So", "probability": 0.9609375}, {"start": 3591.34, "end": 3591.62, "word": " sigma", "probability": 0.86767578125}, {"start": 3591.62, "end": 3591.76, "word": " of", "probability": 0.497314453125}, {"start": 3591.76, "end": 3591.92, "word": " x", "probability": 0.6298828125}, {"start": 3591.92, "end": 3592.12, "word": " bar", "probability": 0.80126953125}, {"start": 3592.12, "end": 3592.36, "word": " equals", "probability": 0.79638671875}, {"start": 3592.36, "end": 3592.7, "word": " S", "probability": 0.97314453125}, {"start": 3592.7, "end": 3592.96, "word": " over", "probability": 0.8759765625}, {"start": 3592.96, "end": 3593.2, "word": " root", "probability": 0.921875}, {"start": 3593.2, "end": 3593.38, "word": " N.", "probability": 0.93701171875}, {"start": 3593.94, "end": 3594.36, "word": " That", "probability": 0.90234375}, {"start": 3594.36, "end": 3594.54, "word": " will", "probability": 0.83837890625}, {"start": 3594.54, "end": 3594.76, "word": " give", "probability": 0.8544921875}, {"start": 3594.76, "end": 3595.06, "word": " 4.", "probability": 0.744140625}], "temperature": 1.0}, {"id": 128, "seek": 361756, "start": 3598.4, "end": 3617.56, "text": " S is the sample variance. Sigma is population S squared. Sigma squared.", "tokens": [318, 307, 264, 6889, 21977, 13, 36595, 307, 4415, 318, 8889, 13, 36595, 8889, 13], "avg_logprob": -0.36523436196148396, "compression_ratio": 1.1076923076923078, "no_speech_prob": 0.0, "words": [{"start": 3598.4000000000005, "end": 3599.6400000000003, "word": " S", "probability": 0.1905517578125}, {"start": 3599.6400000000003, "end": 3600.88, "word": " is", "probability": 0.84765625}, {"start": 3600.88, "end": 3601.12, "word": " the", "probability": 0.798828125}, {"start": 3601.12, "end": 3601.42, "word": " sample", "probability": 0.87060546875}, {"start": 3601.42, "end": 3602.04, "word": " variance.", "probability": 0.908203125}, {"start": 3605.74, "end": 3606.08, "word": " Sigma", "probability": 0.83203125}, {"start": 3606.08, "end": 3609.34, "word": " is", "probability": 0.8876953125}, {"start": 3609.34, "end": 3609.94, "word": " population", "probability": 0.76611328125}, {"start": 3609.94, "end": 3612.3, "word": " S", "probability": 0.51025390625}, {"start": 3612.3, "end": 3612.82, "word": " squared.", "probability": 0.66552734375}, {"start": 3615.72, "end": 3616.96, "word": " Sigma", "probability": 0.61669921875}, {"start": 3616.96, "end": 3617.56, "word": " squared.", "probability": 0.8056640625}], "temperature": 1.0}, {"id": 129, "seek": 364538, "start": 3619.08, "end": 3645.38, "text": " is the population variance. And we know that S squared is the sample variance, where sigma squared is the population variance. And population in this case is not given. So we replace the population variance by sample variance, if the population is normal or N is large. Because if N is large enough, we can apply the central limit theorem. And if you go back", "tokens": [307, 264, 4415, 21977, 13, 400, 321, 458, 300, 318, 8889, 307, 264, 6889, 21977, 11, 689, 12771, 8889, 307, 264, 4415, 21977, 13, 400, 4415, 294, 341, 1389, 307, 406, 2212, 13, 407, 321, 7406, 264, 4415, 21977, 538, 6889, 21977, 11, 498, 264, 4415, 307, 2710, 420, 426, 307, 2416, 13, 1436, 498, 426, 307, 2416, 1547, 11, 321, 393, 3079, 264, 5777, 4948, 20904, 13, 400, 498, 291, 352, 646], "avg_logprob": -0.22972973536800695, "compression_ratio": 1.9617486338797814, "no_speech_prob": 0.0, "words": [{"start": 3619.08, "end": 3619.36, "word": " is", "probability": 0.42626953125}, {"start": 3619.36, "end": 3619.5, "word": " the", "probability": 0.849609375}, {"start": 3619.5, "end": 3619.92, "word": " population", "probability": 0.94970703125}, {"start": 3619.92, "end": 3620.5, "word": " variance.", "probability": 0.85009765625}, {"start": 3621.48, "end": 3621.58, "word": " And", "probability": 0.27294921875}, {"start": 3621.58, "end": 3621.7, "word": " we", "probability": 0.759765625}, {"start": 3621.7, "end": 3621.84, "word": " know", "probability": 0.87548828125}, {"start": 3621.84, "end": 3622.34, "word": " that", "probability": 0.8720703125}, {"start": 3622.34, "end": 3623.16, "word": " S", "probability": 0.53076171875}, {"start": 3623.16, "end": 3623.58, "word": " squared", "probability": 0.36279296875}, {"start": 3623.58, "end": 3623.86, "word": " is", "probability": 0.94775390625}, {"start": 3623.86, "end": 3624.08, "word": " the", "probability": 0.9013671875}, {"start": 3624.08, "end": 3624.36, "word": " sample", "probability": 0.90234375}, {"start": 3624.36, "end": 3624.8, "word": " variance,", "probability": 0.86669921875}, {"start": 3625.18, "end": 3625.4, "word": " where", "probability": 0.9169921875}, {"start": 3625.4, "end": 3625.82, "word": " sigma", "probability": 0.60595703125}, {"start": 3625.82, "end": 3626.2, "word": " squared", "probability": 0.7998046875}, {"start": 3626.2, "end": 3626.42, "word": " is", "probability": 0.94287109375}, {"start": 3626.42, "end": 3626.5, "word": " the", "probability": 0.8447265625}, {"start": 3626.5, "end": 3626.86, "word": " population", "probability": 0.9521484375}, {"start": 3626.86, "end": 3627.28, "word": " variance.", "probability": 0.81640625}, {"start": 3628.06, "end": 3628.18, "word": " And", "probability": 0.49853515625}, {"start": 3628.18, "end": 3628.58, "word": " population", "probability": 0.88671875}, {"start": 3628.58, "end": 3628.78, "word": " in", "probability": 0.7646484375}, {"start": 3628.78, "end": 3628.92, "word": " this", "probability": 0.9501953125}, {"start": 3628.92, "end": 3629.08, "word": " case", "probability": 0.9248046875}, {"start": 3629.08, "end": 3629.22, "word": " is", "probability": 0.9130859375}, {"start": 3629.22, "end": 3629.38, "word": " not", "probability": 0.94970703125}, {"start": 3629.38, "end": 3629.64, "word": " given.", "probability": 0.90380859375}, {"start": 3630.4, "end": 3630.76, "word": " So", "probability": 0.9482421875}, {"start": 3630.76, "end": 3630.94, "word": " we", "probability": 0.73876953125}, {"start": 3630.94, "end": 3631.32, "word": " replace", "probability": 0.884765625}, {"start": 3631.32, "end": 3631.5, "word": " the", "probability": 0.91015625}, {"start": 3631.5, "end": 3631.94, "word": " population", "probability": 0.9482421875}, {"start": 3631.94, "end": 3632.5, "word": " variance", "probability": 0.86474609375}, {"start": 3632.5, "end": 3633.0, "word": " by", "probability": 0.95263671875}, {"start": 3633.0, "end": 3634.06, "word": " sample", "probability": 0.8291015625}, {"start": 3634.06, "end": 3634.58, "word": " variance,", "probability": 0.88134765625}, {"start": 3634.98, "end": 3635.32, "word": " if", "probability": 0.94970703125}, {"start": 3635.32, "end": 3635.64, "word": " the", "probability": 0.91748046875}, {"start": 3635.64, "end": 3635.96, "word": " population", "probability": 0.96337890625}, {"start": 3635.96, "end": 3636.26, "word": " is", "probability": 0.9443359375}, {"start": 3636.26, "end": 3636.58, "word": " normal", "probability": 0.84033203125}, {"start": 3636.58, "end": 3636.88, "word": " or", "probability": 0.87109375}, {"start": 3636.88, "end": 3637.04, "word": " N", "probability": 0.3671875}, {"start": 3637.04, "end": 3637.16, "word": " is", "probability": 0.935546875}, {"start": 3637.16, "end": 3637.5, "word": " large.", "probability": 0.96728515625}, {"start": 3638.06, "end": 3638.48, "word": " Because", "probability": 0.92138671875}, {"start": 3638.48, "end": 3638.66, "word": " if", "probability": 0.92822265625}, {"start": 3638.66, "end": 3638.82, "word": " N", "probability": 0.97900390625}, {"start": 3638.82, "end": 3638.98, "word": " is", "probability": 0.9501953125}, {"start": 3638.98, "end": 3639.28, "word": " large", "probability": 0.96826171875}, {"start": 3639.28, "end": 3639.7, "word": " enough,", "probability": 0.84033203125}, {"start": 3640.3, "end": 3640.92, "word": " we", "probability": 0.95166015625}, {"start": 3640.92, "end": 3641.3, "word": " can", "probability": 0.94580078125}, {"start": 3641.3, "end": 3643.2, "word": " apply", "probability": 0.90673828125}, {"start": 3643.2, "end": 3643.48, "word": " the", "probability": 0.73046875}, {"start": 3643.48, "end": 3643.76, "word": " central", "probability": 0.671875}, {"start": 3643.76, "end": 3643.94, "word": " limit", "probability": 0.8701171875}, {"start": 3643.94, "end": 3644.24, "word": " theorem.", "probability": 0.8623046875}, {"start": 3644.48, "end": 3644.66, "word": " And", "probability": 0.94091796875}, {"start": 3644.66, "end": 3644.82, "word": " if", "probability": 0.94482421875}, {"start": 3644.82, "end": 3644.92, "word": " you", "probability": 0.95458984375}, {"start": 3644.92, "end": 3645.1, "word": " go", "probability": 0.96142578125}, {"start": 3645.1, "end": 3645.38, "word": " back", "probability": 0.8603515625}], "temperature": 1.0}, {"id": 130, "seek": 367500, "start": 3646.52, "end": 3675.0, "text": " The formula for S, if you remember, we divide by N minus one. And for sigma, we divide by capital. So if N is large enough, then there is small difference between sample variance and population. Okay. Any questions? Any question?", "tokens": [440, 8513, 337, 318, 11, 498, 291, 1604, 11, 321, 9845, 538, 426, 3175, 472, 13, 400, 337, 12771, 11, 321, 9845, 538, 4238, 13, 407, 498, 426, 307, 2416, 1547, 11, 550, 456, 307, 1359, 2649, 1296, 6889, 21977, 293, 4415, 13, 1033, 13, 2639, 1651, 30, 2639, 1168, 30], "avg_logprob": -0.24278845437444174, "compression_ratio": 1.464968152866242, "no_speech_prob": 0.0, "words": [{"start": 3646.52, "end": 3646.76, "word": " The", "probability": 0.59619140625}, {"start": 3646.76, "end": 3647.1, "word": " formula", "probability": 0.94189453125}, {"start": 3647.1, "end": 3647.36, "word": " for", "probability": 0.9453125}, {"start": 3647.36, "end": 3647.68, "word": " S,", "probability": 0.546875}, {"start": 3647.9, "end": 3648.0, "word": " if", "probability": 0.94775390625}, {"start": 3648.0, "end": 3648.08, "word": " you", "probability": 0.96337890625}, {"start": 3648.08, "end": 3648.36, "word": " remember,", "probability": 0.86328125}, {"start": 3648.46, "end": 3648.52, "word": " we", "probability": 0.6943359375}, {"start": 3648.52, "end": 3648.74, "word": " divide", "probability": 0.68359375}, {"start": 3648.74, "end": 3648.9, "word": " by", "probability": 0.8583984375}, {"start": 3648.9, "end": 3649.08, "word": " N", "probability": 0.6806640625}, {"start": 3649.08, "end": 3649.34, "word": " minus", "probability": 0.84228515625}, {"start": 3649.34, "end": 3649.64, "word": " one.", "probability": 0.493896484375}, {"start": 3650.26, "end": 3650.6, "word": " And", "probability": 0.556640625}, {"start": 3650.6, "end": 3650.84, "word": " for", "probability": 0.93994140625}, {"start": 3650.84, "end": 3651.08, "word": " sigma,", "probability": 0.5703125}, {"start": 3651.2, "end": 3651.26, "word": " we", "probability": 0.80810546875}, {"start": 3651.26, "end": 3651.54, "word": " divide", "probability": 0.9033203125}, {"start": 3651.54, "end": 3651.98, "word": " by", "probability": 0.93701171875}, {"start": 3651.98, "end": 3653.64, "word": " capital.", "probability": 0.71875}, {"start": 3654.86, "end": 3655.5, "word": " So", "probability": 0.94677734375}, {"start": 3655.5, "end": 3655.74, "word": " if", "probability": 0.72314453125}, {"start": 3655.74, "end": 3655.94, "word": " N", "probability": 0.96142578125}, {"start": 3655.94, "end": 3656.08, "word": " is", "probability": 0.951171875}, {"start": 3656.08, "end": 3656.46, "word": " large", "probability": 0.97314453125}, {"start": 3656.46, "end": 3656.94, "word": " enough,", "probability": 0.87939453125}, {"start": 3658.0, "end": 3658.8, "word": " then", "probability": 0.85400390625}, {"start": 3658.8, "end": 3659.12, "word": " there", "probability": 0.90673828125}, {"start": 3659.12, "end": 3659.3, "word": " is", "probability": 0.673828125}, {"start": 3659.3, "end": 3659.76, "word": " small", "probability": 0.63134765625}, {"start": 3659.76, "end": 3660.2, "word": " difference", "probability": 0.8740234375}, {"start": 3660.2, "end": 3660.76, "word": " between", "probability": 0.8935546875}, {"start": 3660.76, "end": 3661.64, "word": " sample", "probability": 0.85888671875}, {"start": 3661.64, "end": 3662.14, "word": " variance", "probability": 0.67431640625}, {"start": 3662.14, "end": 3662.44, "word": " and", "probability": 0.8203125}, {"start": 3662.44, "end": 3663.12, "word": " population.", "probability": 0.8623046875}, {"start": 3664.26, "end": 3664.36, "word": " Okay.", "probability": 0.67822265625}, {"start": 3671.04, "end": 3671.68, "word": " Any", "probability": 0.79931640625}, {"start": 3671.68, "end": 3672.14, "word": " questions?", "probability": 0.95458984375}, {"start": 3673.98, "end": 3674.62, "word": " Any", "probability": 0.908203125}, {"start": 3674.62, "end": 3675.0, "word": " question?", "probability": 0.50439453125}], "temperature": 1.0}, {"id": 131, "seek": 369947, "start": 3679.13, "end": 3699.47, "text": " If it is above 15, it's okay. 15 or 30, depends on the population you have. But here, if you divide by 24 or 25, the difference is small. I mean, if you divide this one by 24 or by 25,", "tokens": [759, 309, 307, 3673, 2119, 11, 309, 311, 1392, 13, 2119, 420, 2217, 11, 5946, 322, 264, 4415, 291, 362, 13, 583, 510, 11, 498, 291, 9845, 538, 4022, 420, 3552, 11, 264, 2649, 307, 1359, 13, 286, 914, 11, 498, 291, 9845, 341, 472, 538, 4022, 420, 538, 3552, 11], "avg_logprob": -0.15640023608620351, "compression_ratio": 1.3909774436090225, "no_speech_prob": 0.0, "words": [{"start": 3679.13, "end": 3679.69, "word": " If", "probability": 0.63134765625}, {"start": 3679.69, "end": 3679.81, "word": " it", "probability": 0.93896484375}, {"start": 3679.81, "end": 3679.91, "word": " is", "probability": 0.8095703125}, {"start": 3679.91, "end": 3680.13, "word": " above", "probability": 0.9580078125}, {"start": 3680.13, "end": 3680.67, "word": " 15,", "probability": 0.8076171875}, {"start": 3680.83, "end": 3681.23, "word": " it's", "probability": 0.910400390625}, {"start": 3681.23, "end": 3681.53, "word": " okay.", "probability": 0.81201171875}, {"start": 3683.77, "end": 3684.33, "word": " 15", "probability": 0.7431640625}, {"start": 3684.33, "end": 3684.51, "word": " or", "probability": 0.96435546875}, {"start": 3684.51, "end": 3684.91, "word": " 30,", "probability": 0.93603515625}, {"start": 3685.07, "end": 3685.49, "word": " depends", "probability": 0.52001953125}, {"start": 3685.49, "end": 3685.83, "word": " on", "probability": 0.94775390625}, {"start": 3685.83, "end": 3686.07, "word": " the", "probability": 0.921875}, {"start": 3686.07, "end": 3686.79, "word": " population", "probability": 0.9619140625}, {"start": 3686.79, "end": 3687.05, "word": " you", "probability": 0.66796875}, {"start": 3687.05, "end": 3687.25, "word": " have.", "probability": 0.94775390625}, {"start": 3687.69, "end": 3687.93, "word": " But", "probability": 0.9365234375}, {"start": 3687.93, "end": 3688.19, "word": " here,", "probability": 0.818359375}, {"start": 3688.35, "end": 3688.39, "word": " if", "probability": 0.95458984375}, {"start": 3688.39, "end": 3688.51, "word": " you", "probability": 0.9560546875}, {"start": 3688.51, "end": 3688.89, "word": " divide", "probability": 0.9228515625}, {"start": 3688.89, "end": 3689.25, "word": " by", "probability": 0.9306640625}, {"start": 3689.25, "end": 3690.23, "word": " 24", "probability": 0.9716796875}, {"start": 3690.23, "end": 3691.19, "word": " or", "probability": 0.865234375}, {"start": 3691.19, "end": 3691.95, "word": " 25,", "probability": 0.89306640625}, {"start": 3692.55, "end": 3692.71, "word": " the", "probability": 0.92333984375}, {"start": 3692.71, "end": 3693.07, "word": " difference", "probability": 0.8740234375}, {"start": 3693.07, "end": 3693.31, "word": " is", "probability": 0.9453125}, {"start": 3693.31, "end": 3693.65, "word": " small.", "probability": 0.87109375}, {"start": 3694.69, "end": 3694.87, "word": " I", "probability": 0.96630859375}, {"start": 3694.87, "end": 3694.99, "word": " mean,", "probability": 0.9677734375}, {"start": 3695.09, "end": 3695.15, "word": " if", "probability": 0.9462890625}, {"start": 3695.15, "end": 3695.29, "word": " you", "probability": 0.95849609375}, {"start": 3695.29, "end": 3695.67, "word": " divide", "probability": 0.92138671875}, {"start": 3695.67, "end": 3695.93, "word": " this", "probability": 0.94091796875}, {"start": 3695.93, "end": 3696.17, "word": " one", "probability": 0.92724609375}, {"start": 3696.17, "end": 3696.49, "word": " by", "probability": 0.96875}, {"start": 3696.49, "end": 3698.09, "word": " 24", "probability": 0.96728515625}, {"start": 3698.09, "end": 3698.43, "word": " or", "probability": 0.8916015625}, {"start": 3698.43, "end": 3698.65, "word": " by", "probability": 0.83349609375}, {"start": 3698.65, "end": 3699.47, "word": " 25,", "probability": 0.97509765625}], "temperature": 1.0}, {"id": 132, "seek": 370810, "start": 3700.52, "end": 3708.1, "text": " The difference between S and Sigma is small, so we can replace Sigma by S, if Sigma is unknown. Questions?", "tokens": [440, 2649, 1296, 318, 293, 36595, 307, 1359, 11, 370, 321, 393, 7406, 36595, 538, 318, 11, 498, 36595, 307, 9841, 13, 27738, 30], "avg_logprob": -0.2203125, "compression_ratio": 1.1758241758241759, "no_speech_prob": 0.0, "words": [{"start": 3700.52, "end": 3700.8, "word": " The", "probability": 0.53662109375}, {"start": 3700.8, "end": 3701.12, "word": " difference", "probability": 0.8232421875}, {"start": 3701.12, "end": 3701.44, "word": " between", "probability": 0.869140625}, {"start": 3701.44, "end": 3701.64, "word": " S", "probability": 0.402587890625}, {"start": 3701.64, "end": 3701.78, "word": " and", "probability": 0.9365234375}, {"start": 3701.78, "end": 3702.1, "word": " Sigma", "probability": 0.6416015625}, {"start": 3702.1, "end": 3702.54, "word": " is", "probability": 0.91796875}, {"start": 3702.54, "end": 3702.88, "word": " small,", "probability": 0.85205078125}, {"start": 3703.0, "end": 3703.06, "word": " so", "probability": 0.91552734375}, {"start": 3703.06, "end": 3703.2, "word": " we", "probability": 0.74853515625}, {"start": 3703.2, "end": 3703.38, "word": " can", "probability": 0.91748046875}, {"start": 3703.38, "end": 3703.74, "word": " replace", "probability": 0.92822265625}, {"start": 3703.74, "end": 3704.12, "word": " Sigma", "probability": 0.76220703125}, {"start": 3704.12, "end": 3704.34, "word": " by", "probability": 0.95751953125}, {"start": 3704.34, "end": 3704.66, "word": " S,", "probability": 0.982421875}, {"start": 3705.06, "end": 3705.28, "word": " if", "probability": 0.95947265625}, {"start": 3705.28, "end": 3705.58, "word": " Sigma", "probability": 0.87255859375}, {"start": 3705.58, "end": 3705.76, "word": " is", "probability": 0.94970703125}, {"start": 3705.76, "end": 3706.06, "word": " unknown.", "probability": 0.78369140625}, {"start": 3707.48, "end": 3708.1, "word": " Questions?", "probability": 0.7861328125}], "temperature": 1.0}], "language": "en", "language_probability": 1.0, "duration": 3711.54725, "duration_after_vad": 3501.793124999986} \ No newline at end of file diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/vioR30IO0qQ_raw.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/vioR30IO0qQ_raw.srt new file mode 100644 index 0000000000000000000000000000000000000000..fe5546405f6d861ca16ffd98945ad9d3aa35a2c0 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/vioR30IO0qQ_raw.srt @@ -0,0 +1,2820 @@ +1 +00:00:06,600 --> 00:00:10,640 +the sum and distribution of the sum and mean, and + +2 +00:00:10,640 --> 00:00:16,400 +we have produced two cases. The first one, if the + +3 +00:00:16,400 --> 00:00:20,320 +population is normal and conservative, in this + +4 +00:00:20,320 --> 00:00:24,580 +case, the sum and distribution of the sum and mean + +5 +00:00:24,580 --> 00:00:29,540 +is exactly normal, with mean of x bar equal the + +6 +00:00:29,540 --> 00:00:30,780 +whole population mean. + +7 +00:00:33,650 --> 00:00:38,050 +And sigma of x bar is called the standard error of + +8 +00:00:38,050 --> 00:00:43,870 +the sample mean given by sigma. Also, we mentioned + +9 +00:00:43,870 --> 00:00:47,950 +that the standard error of the sample mean is + +10 +00:00:47,950 --> 00:00:53,270 +always smaller than sigma. And also, as m + +11 +00:00:53,270 --> 00:00:56,610 +increases, the standard error of x bar increases. + +12 +00:00:56,790 --> 00:01:00,450 +So there is a gorgeous negative relationship. + +13 +00:01:01,360 --> 00:01:04,840 +between our fingers, relationship between the + +14 +00:01:04,840 --> 00:01:08,740 +standard error of x bar and the sample size. + +15 +00:01:09,300 --> 00:01:13,680 +That's when the operation is normally distributed. + +16 +00:01:15,540 --> 00:01:19,860 +So again, in this case, the new z score is given + +17 +00:01:19,860 --> 00:01:23,780 +by this equation. Here, we replace sigma by sigma + +18 +00:01:23,780 --> 00:01:27,700 +of x bar. So z score is given by x bar minus the + +19 +00:01:27,700 --> 00:01:32,880 +mean divided by sigma over root n. The second + +20 +00:01:32,880 --> 00:01:40,780 +case, if the population is not normal, in this + +21 +00:01:40,780 --> 00:01:45,140 +case we can apply the central limit theorem, and + +22 +00:01:45,140 --> 00:01:48,260 +this theorem can be applied if the sample size is + +23 +00:01:48,260 --> 00:01:52,500 +large enough. So if N is large, in this case, we + +24 +00:01:52,500 --> 00:01:55,580 +can say that the sampling distribution of the + +25 +00:01:55,580 --> 00:01:59,080 +sample mean is approximately normally distributed + +26 +00:01:59,080 --> 00:02:04,120 +as long as the sample size is large enough. Now, + +27 +00:02:04,480 --> 00:02:08,460 +how large is large enough? We mentioned that for + +28 +00:02:08,460 --> 00:02:13,440 +most distributions, N above 30 will give sampling + +29 +00:02:13,440 --> 00:02:16,280 +distribution that is nearly symmetric or normal. + +30 +00:02:17,730 --> 00:02:20,210 +But on the other hand, for fairly symmetrical + +31 +00:02:20,210 --> 00:02:25,690 +distribution, and above 15 will usually give some + +32 +00:02:25,690 --> 00:02:29,790 +distribution is almost normal. So we have three + +33 +00:02:29,790 --> 00:02:34,290 +cases. For most distributions, we need large + +34 +00:02:34,290 --> 00:02:38,370 +sample to be in above 30. For symmetric + +35 +00:02:38,370 --> 00:02:43,530 +distributions, in above 15. But if the population + +36 +00:02:43,530 --> 00:02:48,030 +is normally distributed, then regardless of the + +37 +00:02:48,030 --> 00:02:50,830 +sample size, the sampling distribution of the + +38 +00:02:50,830 --> 00:02:54,170 +sample mean is always normally distributed. So + +39 +00:02:54,170 --> 00:02:58,440 +here, it's nearly symmetric. almost symmetric and + +40 +00:02:58,440 --> 00:03:00,480 +the other one is always symmetric. So we have to + +41 +00:03:00,480 --> 00:03:04,760 +distinguish between these three cases. And I think + +42 +00:03:04,760 --> 00:03:10,080 +we gave one example. Now let's move to another + +43 +00:03:10,080 --> 00:03:14,380 +part. As we mentioned in the beginning of this + +44 +00:03:14,380 --> 00:03:19,980 +class, data has two types mainly, quantitative and + +45 +00:03:19,980 --> 00:03:24,100 +qualitative. Quantitative, for example, your + +46 +00:03:24,100 --> 00:03:29,800 +score. Score is numerical value, 60, 65, and so + +47 +00:03:29,800 --> 00:03:33,180 +on. In this case, we can talk about assembly. + +48 +00:03:35,560 --> 00:03:46,400 +So first, for quantitative data, + +49 +00:03:47,020 --> 00:03:49,320 +we talk about assembly. + +50 +00:03:55,440 --> 00:04:01,780 +Number two, if we talk about, if we talk about + +51 +00:04:01,780 --> 00:04:09,900 +qualitative data, in this case we talk about + +52 +00:04:09,900 --> 00:04:15,760 +sample of + +53 +00:04:15,760 --> 00:04:16,100 +water. + +54 +00:04:19,220 --> 00:04:24,110 +In this case, proportion is what's the sampling + +55 +00:04:24,110 --> 00:04:28,770 +distribution of the sample proportion. So we are + +56 +00:04:28,770 --> 00:04:35,690 +looking for the sampling distribution + +57 +00:04:35,690 --> 00:04:46,410 +of the sample population + +58 +00:04:46,410 --> 00:04:51,190 +having some characteristic. For example, let's buy + +59 +00:04:52,160 --> 00:05:02,820 +denotes number of smokers among + +60 +00:05:02,820 --> 00:05:03,620 +females. + +61 +00:05:06,500 --> 00:05:12,280 +So let's use Y as number of smokers among all + +62 +00:05:12,280 --> 00:05:19,560 +females. All females have IUDs. So here we are + +63 +00:05:19,560 --> 00:05:24,560 +talking about My population is IUG students, but + +64 +00:05:24,560 --> 00:05:30,240 +here we're focused on female students. And pi is + +65 +00:05:30,240 --> 00:05:33,840 +number of smokers among all female students. + +66 +00:05:36,820 --> 00:05:39,660 +Now sampling proportion, so my characteristic here + +67 +00:05:39,660 --> 00:05:46,000 +is smokers. Maybe your characteristic is person or + +68 +00:05:46,000 --> 00:05:48,800 +student's number. + +69 +00:06:01,190 --> 00:06:08,130 +proportion of students have + +70 +00:06:08,130 --> 00:06:12,790 +scores above + +71 +00:06:12,790 --> 00:06:19,210 +80. So by proportion of students have scores above + +72 +00:06:19,210 --> 00:06:23,430 +80, and so on. So here, in this case, your + +73 +00:06:23,430 --> 00:06:27,810 +characteristic is scores above 80, and so on. + +74 +00:06:29,300 --> 00:06:33,120 +Simple proportion provides an estimate of Pi, + +75 +00:06:33,340 --> 00:06:36,780 +because generally speaking, Pi is not given, or is + +76 +00:06:36,780 --> 00:06:40,900 +unknown, similar as Mu. And Mu is the population + +77 +00:06:40,900 --> 00:06:44,840 +mean, is always unknown, and we are interested to + +78 +00:06:44,840 --> 00:06:48,680 +estimate the population mean. In this case, the + +79 +00:06:48,680 --> 00:06:53,780 +point estimate of Pi is P. So P is the point + +80 +00:06:53,780 --> 00:06:57,980 +estimate, sample, a proportion, + +81 +00:07:00,590 --> 00:07:05,150 +which is the point estimate of Y. Now, for + +82 +00:07:05,150 --> 00:07:07,490 +example, let's go back to the proportion of + +83 +00:07:07,490 --> 00:07:11,350 +smokers among all female students at IUG. Let's + +84 +00:07:11,350 --> 00:07:16,510 +assume that number of females at IUG, for example, + +85 +00:07:16,650 --> 00:07:17,970 +is 1,000 students. + +86 +00:07:23,570 --> 00:07:29,020 +And we talk here around a sample of 1,000. We know + +87 +00:07:29,020 --> 00:07:32,420 +that there are too many female students at IUG. + +88 +00:07:33,040 --> 00:07:35,980 +Suppose we select a random sample of size 1,000. + +89 +00:07:38,040 --> 00:07:44,940 +So that's the sample size. So here, we selected a + +90 +00:07:44,940 --> 00:07:51,380 +random sample of size 1,000 from the population of + +91 +00:07:51,380 --> 00:07:55,840 +females. And suppose we found that number of + +92 +00:07:55,840 --> 00:08:03,640 +smokers among these equal one hundred. So out of + +93 +00:08:03,640 --> 00:08:08,040 +one thousand, one hundred students are smoking. + +94 +00:08:08,820 --> 00:08:11,840 +Now what's the percentage of smokers in this case? + +95 +00:08:13,220 --> 00:08:16,700 +So P equals this one hundred divided by a + +96 +00:08:16,700 --> 00:08:20,760 +thousand, so that's ten percent or point one. + +97 +00:08:25,220 --> 00:08:32,500 +Let's assume that X denotes the number of items in + +98 +00:08:32,500 --> 00:08:34,900 +the sample having the characteristic of interest. + +99 +00:08:35,300 --> 00:08:39,920 +In this case, X number of smokers in your sample, + +100 +00:08:40,040 --> 00:08:44,820 +so X equals 100. N is 1000, so proportion is + +101 +00:08:44,820 --> 00:08:53,900 +always equals X divided by N, X number of items in + +102 +00:08:53,900 --> 00:08:57,580 +the characteristic you have. N is the sample size, + +103 +00:08:57,700 --> 00:09:01,240 +so B equals X over N. Sometimes it might be X + +104 +00:09:01,240 --> 00:09:05,800 +equals zero. So for example suppose I select a + +105 +00:09:05,800 --> 00:09:12,160 +random sample of size 1,000 and none of them So in + +106 +00:09:12,160 --> 00:09:15,820 +this case, x equals zero, that means the + +107 +00:09:15,820 --> 00:09:20,500 +percentage equals zero. So we can have zero + +108 +00:09:20,500 --> 00:09:24,300 +percentage if x equals zero. Also in the upper + +109 +00:09:24,300 --> 00:09:27,980 +limit, suppose here we are talking about students + +110 +00:09:27,980 --> 00:09:32,020 +have scores above 80. And we select a random + +111 +00:09:32,020 --> 00:09:36,100 +sample, and that sample has All of the students + +112 +00:09:36,100 --> 00:09:41,520 +have scores above 80, so that means x equals 1000, + +113 +00:09:42,380 --> 00:09:45,720 +so percentage is 1. So 1000 divided by 1000 is 1. + +114 +00:09:46,220 --> 00:09:50,460 +So the minimum value of B is 0, the maximum is 1, + +115 +00:09:50,600 --> 00:09:53,580 +that means B ranges from 0 to 1. So it could be 0, + +116 +00:09:53,640 --> 00:10:01,240 +it could be 1, but almost B is between 0 and 1. B + +117 +00:10:01,240 --> 00:10:01,940 +is the proportion. + +118 +00:10:04,610 --> 00:10:07,030 +So, B is, in this case, is approximately + +119 +00:10:07,030 --> 00:10:10,190 +distributed as normal distribution when N is + +120 +00:10:10,190 --> 00:10:15,130 +large. But again, how large is large enough, we'll + +121 +00:10:15,130 --> 00:10:18,830 +talk about later. So, B is approximately normal + +122 +00:10:18,830 --> 00:10:24,630 +distributed when N is large. Here, we assume + +123 +00:10:24,630 --> 00:10:29,230 +sampling distribution with replacement if the + +124 +00:10:29,230 --> 00:10:32,660 +population is finite. And sampling without + +125 +00:10:32,660 --> 00:10:35,580 +replacement from infinite population. Finite + +126 +00:10:35,580 --> 00:10:41,520 +population means limited, has fixed size. Infinite + +127 +00:10:41,520 --> 00:10:46,940 +population means unlimited. Because if we select a + +128 +00:10:46,940 --> 00:10:49,560 +random sample without replacement from infinite + +129 +00:10:49,560 --> 00:10:55,540 +population, for example suppose N equals 10,000. + +130 +00:10:57,300 --> 00:11:00,480 +In this case, each person has a chance of being + +131 +00:11:00,480 --> 00:11:05,460 +selected one over 10,000. Let's assume that we + +132 +00:11:05,460 --> 00:11:10,760 +select someone without replacement. So in this + +133 +00:11:10,760 --> 00:11:14,000 +case, if we select one with a proportion of one + +134 +00:11:14,000 --> 00:11:17,820 +over 10,000, it means the second one has a chance + +135 +00:11:17,820 --> 00:11:22,340 +of one divided by 9999. + +136 +00:11:23,000 --> 00:11:25,840 +And the difference between these two is very + +137 +00:11:25,840 --> 00:11:30,250 +small. For this reason, we can select a random + +138 +00:11:30,250 --> 00:11:34,850 +sample without replacement if the population size + +139 +00:11:34,850 --> 00:11:41,310 +is large, because the probability to select an + +140 +00:11:41,310 --> 00:11:45,610 +item from that population remains the same, or + +141 +00:11:45,610 --> 00:11:48,490 +approximately the same, because 1 over 10,000 is + +142 +00:11:48,490 --> 00:11:51,970 +roughly equal to 1 over 9999. + +143 +00:11:54,060 --> 00:11:57,440 +Now the two conditions we have to check in order + +144 +00:11:57,440 --> 00:12:01,900 +to apply or in order to say that B is + +145 +00:12:01,900 --> 00:12:04,740 +approximately normally distributed. The two + +146 +00:12:04,740 --> 00:12:10,020 +conditions are n times pi is at least 5 and we + +147 +00:12:10,020 --> 00:12:15,660 +have n, pi is given. Also, n times 1 minus pi is + +148 +00:12:15,660 --> 00:12:19,380 +at least 5. So there are two conditions should be + +149 +00:12:19,380 --> 00:12:24,260 +satisfied in order to use the normal distribution. + +150 +00:12:24,900 --> 00:12:31,200 +Again, the first one, n times pi is at least 5, n + +151 +00:12:31,200 --> 00:12:36,880 +times 1 minus pi is also at least 5. If these two + +152 +00:12:36,880 --> 00:12:41,600 +conditions are satisfied, then you can say that B, + +153 +00:12:41,780 --> 00:12:44,520 +or the sample proportion, is approximately + +154 +00:12:44,520 --> 00:12:47,780 +normally distributed, so that's the shape of the + +155 +00:12:47,780 --> 00:12:54,900 +distribution, with mean of? Mu of p equals pi, so + +156 +00:12:54,900 --> 00:12:59,180 +the mean of p equals pi, with sigma of p equals + +157 +00:12:59,180 --> 00:13:04,100 +square root of + +158 +00:13:04,100 --> 00:13:09,160 +pi times 1 minus pi, divided by n. So that's the + +159 +00:13:09,160 --> 00:13:14,120 +mean of p is always pi, and sigma of p equals + +160 +00:13:14,120 --> 00:13:16,940 +square root of pi times 1 minus pi divided by n. + +161 +00:13:18,480 --> 00:13:23,810 +Let's compare this result. with the sampling + +162 +00:13:23,810 --> 00:13:28,030 +distribution of the sample mean. If you remember, + +163 +00:13:28,610 --> 00:13:36,990 +the mean of x bar was mu. And here, the statistic + +164 +00:13:36,990 --> 00:13:41,230 +is the sample mean, and the mean of x bar is mu. + +165 +00:13:42,330 --> 00:13:45,430 +On the other hand, the mean of the statistic, my + +166 +00:13:45,430 --> 00:13:49,010 +statistic is the sample proportion is five. So in + +167 +00:13:49,010 --> 00:13:52,930 +the two cases, the mean equals the true value. I + +168 +00:13:52,930 --> 00:13:55,910 +mean, the true parameter. So in this case, the + +169 +00:13:55,910 --> 00:13:59,510 +mean of x bar equal mu, and mu of p equal pi. On + +170 +00:13:59,510 --> 00:14:03,030 +the other hand, the sigma of x bar was sigma over + +171 +00:14:03,030 --> 00:14:08,310 +root n. This looks similar, because this one's + +172 +00:14:08,310 --> 00:14:12,930 +just sigma squared over n. But here, sigma squared + +173 +00:14:12,930 --> 00:14:14,630 +is pi times 1 minus pi. + +174 +00:14:18,430 --> 00:14:21,430 +So again, the standard distribution of B is + +175 +00:14:21,430 --> 00:14:24,290 +roughly symmetric or approximately normally + +176 +00:14:24,290 --> 00:14:30,810 +distributed if these two conditions are satisfied + +177 +00:14:30,810 --> 00:14:34,430 +and mu of B equals pi and sigma of B equals square + +178 +00:14:34,430 --> 00:14:38,230 +root of pi times 1 minus pi over n. Now, this + +179 +00:14:38,230 --> 00:14:44,490 +score, as we mentioned before, the standard + +180 +00:14:44,490 --> 00:14:47,430 +equation is given by x minus the mean of x divided + +181 +00:14:47,430 --> 00:14:52,550 +by sigma. Last time, we talked about semi + +182 +00:14:52,550 --> 00:14:53,730 +-distribution of x bar. + +183 +00:14:56,570 --> 00:15:00,610 +So your z score + +184 +00:15:00,610 --> 00:15:05,050 +equals x bar minus the mean of x bar divided by + +185 +00:15:05,050 --> 00:15:11,250 +sigma of x bar. So that's x bar minus mu, because + +186 +00:15:11,250 --> 00:15:16,050 +the mean of x bar is mu, divided by sigma of x + +187 +00:15:16,050 --> 00:15:18,810 +bar, sigma over root n. + +188 +00:15:22,620 --> 00:15:27,720 +Now let's compute z-score for p. So z equals p + +189 +00:15:27,720 --> 00:15:30,400 +minus the mean of p divided by sigma. + +190 +00:15:33,680 --> 00:15:39,720 +So in this case, z equals the mean of p, which is + +191 +00:15:39,720 --> 00:15:48,060 +pi, divided by pi, 1 minus pi divided by p. So in + +192 +00:15:48,060 --> 00:15:52,090 +this case, the new formula for the z-score is + +193 +00:15:52,090 --> 00:15:59,530 +given by pi minus one. So the zero zero score is + +194 +00:15:59,530 --> 00:16:04,610 +equal to pi minus, p minus pi divided by root pi + +195 +00:16:04,610 --> 00:16:08,390 +times one minus pi divided by small size n. + +196 +00:16:18,270 --> 00:16:20,450 +Now let's do one example. + +197 +00:16:23,240 --> 00:16:29,380 +The example says that if the true proportion of + +198 +00:16:29,380 --> 00:16:37,620 +waters who support proposition A is 40%. So in + +199 +00:16:37,620 --> 00:16:44,420 +this case, pi is 40%. The question is, what's the + +200 +00:16:44,420 --> 00:16:50,820 +probability that A sample of size 200, so we + +201 +00:16:50,820 --> 00:16:56,220 +select a random sample of 200, yields a sample + +202 +00:16:56,220 --> 00:17:02,880 +proportion between 40% and 45%. So in this case, + +203 +00:17:02,940 --> 00:17:10,320 +we are looking for the probability between 40% and + +204 +00:17:10,320 --> 00:17:16,870 +45%. Now, if the problem says, The true + +205 +00:17:16,870 --> 00:17:24,210 +proportion, it means pi. The true mean, it means + +206 +00:17:24,210 --> 00:17:28,210 +also mu. So true, it means we are talking about + +207 +00:17:28,210 --> 00:17:32,670 +the entire population. So true proportion, it + +208 +00:17:32,670 --> 00:17:36,050 +means the population proportion. So the word true + +209 +00:17:36,050 --> 00:17:41,590 +in statistics in this case means population. + +210 +00:17:42,210 --> 00:17:47,300 +So the population proportion equals 0.4, so pi is + +211 +00:17:47,300 --> 00:17:51,880 +equal to 0.4. Now, if pi equals 0.4 and the sample + +212 +00:17:51,880 --> 00:17:58,160 +size is 200, what's the probability that this + +213 +00:17:58,160 --> 00:18:03,500 +sample proportion lies between 40 and 35 percent? + +214 +00:18:05,460 --> 00:18:11,500 +Now, in order to use this score, you have to check + +215 +00:18:11,500 --> 00:18:15,870 +the two assumptions. But since N is large enough, + +216 +00:18:17,170 --> 00:18:22,870 +N is 200. For sure the two conditions most of the + +217 +00:18:22,870 --> 00:18:25,890 +time are satisfied. But you have to check in order + +218 +00:18:25,890 --> 00:18:35,510 +to apply this z-score. Let's see first how + +219 +00:18:35,510 --> 00:18:40,550 +can we check the two conditions. Now, N equals + +220 +00:18:40,550 --> 00:18:40,970 +200. + +221 +00:18:43,690 --> 00:18:52,070 +Pi is 40%, so N times Pi is + +222 +00:18:52,070 --> 00:18:59,050 +80. 200 times 0.4 is 80%. So the first one is + +223 +00:18:59,050 --> 00:19:04,110 +satisfied, the first condition. Can you figure out + +224 +00:19:04,110 --> 00:19:07,210 +the value of N times 1 minus Pi without + +225 +00:19:07,210 --> 00:19:16,110 +calculation? 200 minus 80. So this value is 200 + +226 +00:19:16,110 --> 00:19:24,730 +minus 80. Or just 200 times 1 minus 0.4. 200 times + +227 +00:19:24,730 --> 00:19:29,890 +0.6 is 120. So if you just find the first one and + +228 +00:19:29,890 --> 00:19:35,650 +times 5, the other one is n minus 80 will give the + +229 +00:19:35,650 --> 00:19:38,170 +other condition. So now the two conditions are + +230 +00:19:38,170 --> 00:19:43,400 +satisfied. Then we can use the z-score. I mean, we + +231 +00:19:43,400 --> 00:19:47,880 +can say that the sampling proportion is normally + +232 +00:19:47,880 --> 00:19:54,980 +distributed with mean equal pi, always pi, and pi + +233 +00:19:54,980 --> 00:20:02,700 +is given as 40%. And the sigma of p equals square + +234 +00:20:02,700 --> 00:20:08,360 +root of pi 1 minus pi divided by n. That's your + +235 +00:20:08,360 --> 00:20:15,030 +square root of 0.4 times. divided by 200, and that + +236 +00:20:15,030 --> 00:20:18,830 +will give 0 + +237 +00:20:18,830 --> 00:20:22,810 +.0346. + +238 +00:20:26,210 --> 00:20:29,250 +So, the first step, we have to check the two + +239 +00:20:29,250 --> 00:20:33,730 +conditions. Second step, compute the mean of P, + +240 +00:20:33,970 --> 00:20:42,270 +sigma of P. Now, finally, find the z-score of your + +241 +00:20:42,270 --> 00:20:45,970 +problem, here he asks about what's the probability + +242 +00:20:45,970 --> 00:20:54,210 +that B lies between 0 + +243 +00:20:54,210 --> 00:20:58,430 +.4 and 0.45 percent. So we have to find the score + +244 +00:20:58,430 --> 00:21:05,490 +for 0.4. So that's 0.4 minus again the mean of B + +245 +00:21:05,490 --> 00:21:11,550 +which is again 0.4, so that's valid. Divide by 0 + +246 +00:21:11,550 --> 00:21:26,210 +.0346. So again, b is 0.4. This is pi. And pi, in + +247 +00:21:26,210 --> 00:21:32,150 +this case, is always 0.4. And this is your b. So 0 + +248 +00:21:32,150 --> 00:21:37,350 +.4 minus 0.4 divided by 0.0346 plus 1 equals 0. + +249 +00:21:38,600 --> 00:21:44,000 +The other z-score, for the other value, here, 0 + +250 +00:21:44,000 --> 00:21:44,540 +.45. + +251 +00:21:50,340 --> 00:21:53,360 +And this gives 1.4. + +252 +00:21:56,900 --> 00:22:01,400 +After that, this problem is converted to + +253 +00:22:01,400 --> 00:22:06,840 +standardized normal value. So instead of P, we + +254 +00:22:06,840 --> 00:22:14,920 +have Z. between 0 and 1.44. That's all for using + +255 +00:22:14,920 --> 00:22:21,540 +Chapter 7. Now, to complete your answer, you have + +256 +00:22:21,540 --> 00:22:26,920 +to use Chapter 6. Now, V is between 0 and 1.44. As + +257 +00:22:26,920 --> 00:22:33,160 +we mentioned many times, the area here equals, + +258 +00:22:33,660 --> 00:22:39,960 +I mean, the dashed area equals V of Z. Less than 1 + +259 +00:22:39,960 --> 00:22:45,200 +.44 minus 0.5. Exactly minus 0.5, because the area + +260 +00:22:45,200 --> 00:22:50,580 +to the left of 0 is 0.5. Now by using the normal + +261 +00:22:50,580 --> 00:22:53,980 +table, or standard normal table, P of Z is smaller + +262 +00:22:53,980 --> 00:22:59,780 +than 1.44 is given by 9251. + +263 +00:23:01,540 --> 00:23:05,840 +Just check your table, minus 0.5, so the final + +264 +00:23:05,840 --> 00:23:15,340 +result is 0.4251. That means around 42 percent, 0 + +265 +00:23:15,340 --> 00:23:22,280 +.51, that the proportion lie between 40 and 45 + +266 +00:23:22,280 --> 00:23:28,000 +percent. So that's how can we compute the + +267 +00:23:28,000 --> 00:23:32,080 +probabilities underneath the normal curve if we + +268 +00:23:32,080 --> 00:23:34,680 +are interested in the sample proportion. + +269 +00:23:39,630 --> 00:23:45,870 +Now, in case if one of these conditions is not + +270 +00:23:45,870 --> 00:23:51,510 +satisfied, we cannot use this code unless we + +271 +00:23:51,510 --> 00:23:54,590 +increase the sample size. So here, the two + +272 +00:23:54,590 --> 00:23:56,790 +conditions should be satisfied. For example, + +273 +00:23:56,950 --> 00:24:00,910 +suppose n + +274 +00:24:00,910 --> 00:24:13,390 +equals 200. But this probability of Pi is + +275 +00:24:13,390 --> 00:24:17,570 +1%. Suppose + +276 +00:24:17,570 --> 00:24:26,250 +again N is 200 and Pi is 1%. So this condition is + +277 +00:24:26,250 --> 00:24:33,090 +not satisfied because N times Pi is just 2. So you + +278 +00:24:33,090 --> 00:24:35,450 +cannot use the z-score, because it should be at + +279 +00:24:35,450 --> 00:24:38,810 +least 5. So even if the sample size is large, + +280 +00:24:39,390 --> 00:24:42,070 +maybe one of the conditions is not satisfied + +281 +00:24:42,070 --> 00:24:46,710 +because you have a small true proportion. So if + +282 +00:24:46,710 --> 00:24:49,470 +the true proportion is very, very small, in this + +283 +00:24:49,470 --> 00:24:53,750 +case you have to increase your sample size. Make + +284 +00:24:53,750 --> 00:24:56,270 +sense? Any question? + +285 +00:24:58,950 --> 00:25:05,730 +Now I will discuss some practice problems for + +286 +00:25:05,730 --> 00:25:07,310 +chapter 7. + +287 +00:25:09,450 --> 00:25:16,130 +Let's do some practice problems for chapter 7. I + +288 +00:25:16,130 --> 00:25:17,770 +will give some + +289 +00:25:28,710 --> 00:25:34,150 +We have four choices. Parameters. Now, same + +290 +00:25:34,150 --> 00:25:36,390 +distribution always describes the same + +291 +00:25:36,390 --> 00:25:42,350 +distribution of statistics. So, not parameters, we + +292 +00:25:42,350 --> 00:25:45,970 +have to choose the statistics. Same distribution + +293 +00:25:45,970 --> 00:25:51,250 +describes distribution of always statistics. Next. + +294 +00:25:53,490 --> 00:25:56,890 +The central limit theorem is important in + +295 +00:25:56,890 --> 00:26:01,310 +statistics because we have four choices. Let's see + +296 +00:26:01,310 --> 00:26:07,810 +why C is correct. Part A says, for a large N, it + +297 +00:26:07,810 --> 00:26:11,550 +says that the population is approximately normal. + +298 +00:26:12,710 --> 00:26:16,210 +We cannot say the population is normal. The + +299 +00:26:16,210 --> 00:26:19,510 +standard distribution of a statistic is + +300 +00:26:19,510 --> 00:26:24,410 +approximately normal. So, for this reason, part A + +301 +00:26:24,410 --> 00:26:28,490 +is incorrect. For the other one, for any + +302 +00:26:28,490 --> 00:26:34,690 +population, so regardless of the population, it + +303 +00:26:34,690 --> 00:26:38,950 +says the sample distribution, the sample mean is + +304 +00:26:38,950 --> 00:26:42,730 +approximately normal regardless of the sample + +305 +00:26:42,730 --> 00:26:45,830 +size. This is incorrect because it says for any + +306 +00:26:45,830 --> 00:26:53,940 +population. So P is incorrect. But if it says for + +307 +00:26:53,940 --> 00:26:58,180 +normal population, then we can say the sampling + +308 +00:26:58,180 --> 00:27:00,920 +distribution of the sample mean is approximately + +309 +00:27:00,920 --> 00:27:04,820 +normal regardless of the sample size. But it says + +310 +00:27:04,820 --> 00:27:09,320 +for any, so that means incorrect. Now part D, for + +311 +00:27:09,320 --> 00:27:13,500 +example, for any size sample, it means regardless + +312 +00:27:13,500 --> 00:27:17,480 +of the sample size, the theorem says the sample + +313 +00:27:17,480 --> 00:27:19,840 +distribution of the sample mean is approximately + +314 +00:27:19,840 --> 00:27:24,520 +normal. That's incorrect. Part C, for large N, it + +315 +00:27:24,520 --> 00:27:26,820 +says the sample distribution of the sample mean is + +316 +00:27:26,820 --> 00:27:31,110 +approximately Normal, regardless of the shape of + +317 +00:27:31,110 --> 00:27:34,310 +the distribution, that's true, because here we + +318 +00:27:34,310 --> 00:27:37,170 +have large sample size. So regardless of the + +319 +00:27:37,170 --> 00:27:39,690 +population, the shape of the population, we can + +320 +00:27:39,690 --> 00:27:42,830 +say that the sample distribution of the sample + +321 +00:27:42,830 --> 00:27:48,090 +mean is approximately normally distributed. Number + +322 +00:27:48,090 --> 00:27:52,610 +three. Which of the following statements about the + +323 +00:27:52,610 --> 00:27:54,750 +sample distribution of the sample mean is + +324 +00:27:54,750 --> 00:27:58,860 +incorrect? Here we are looking for the incorrect + +325 +00:27:58,860 --> 00:27:59,480 +statement. + +326 +00:28:02,780 --> 00:28:06,940 +Look at A, the sample distribution of the sample + +327 +00:28:06,940 --> 00:28:10,440 +mean is approximately normal whenever the sample + +328 +00:28:10,440 --> 00:28:14,620 +size is sufficiently large. This is correct. B, + +329 +00:28:14,740 --> 00:28:16,920 +the sample distribution of the sample mean is + +330 +00:28:16,920 --> 00:28:20,360 +generated by repeatedly taking samples of size N + +331 +00:28:20,360 --> 00:28:24,500 +and computing the sample means. That's also + +332 +00:28:24,500 --> 00:28:28,440 +correct. The sample mean, I'm sorry, the mean of + +333 +00:28:28,440 --> 00:28:30,460 +the sample distribution of the sample mean is + +334 +00:28:30,460 --> 00:28:33,680 +always equal to mu, that's correct, because we + +335 +00:28:33,680 --> 00:28:37,340 +know that the mean of x bar is a mu. Now, the + +336 +00:28:37,340 --> 00:28:41,020 +standard deviation of the sampling distribution of + +337 +00:28:41,020 --> 00:28:45,100 +the sample mean is equal to sigma. And we, yes, + +338 +00:28:45,240 --> 00:28:48,780 +exactly, the standard error, which is sigma of x + +339 +00:28:48,780 --> 00:28:56,330 +bar, not sigma, equals sigma Divide by root n. For + +340 +00:28:56,330 --> 00:28:59,570 +this reason, this one is incorrect statement. + +341 +00:29:00,290 --> 00:29:07,210 +Because we have to divide this sigma by square + +342 +00:29:07,210 --> 00:29:12,790 +root of n. Number four. + +343 +00:29:16,390 --> 00:29:23,340 +Which of the following is true? about the sampling + +344 +00:29:23,340 --> 00:29:28,040 +distribution of the sample mean. Again, the mean + +345 +00:29:28,040 --> 00:29:32,200 +of the sampling distribution is always Mu. That's + +346 +00:29:32,200 --> 00:29:34,600 +correct statement. + +347 +00:29:35,860 --> 00:29:39,520 +Now look at V. The standard deviation of the + +348 +00:29:39,520 --> 00:29:42,700 +sampling distribution is always Sigma. Incorrect + +349 +00:29:42,700 --> 00:29:47,540 +because Sigma over root N. Part C, the shape of + +350 +00:29:47,540 --> 00:29:50,000 +the sampling distribution is always approximately + +351 +00:29:50,000 --> 00:29:54,650 +normal. If N is large, then we can say it's + +352 +00:29:54,650 --> 00:29:58,550 +approximately normal. All of the above are true is + +353 +00:29:58,550 --> 00:30:07,370 +incorrect. So that's number 6. + +354 +00:30:15,090 --> 00:30:22,610 +Look at number 10. Number 10. A telemarketer set + +355 +00:30:22,610 --> 00:30:27,130 +the company's computerized dialing system to + +356 +00:30:27,130 --> 00:30:33,910 +contact every 25th person listed in the local + +357 +00:30:33,910 --> 00:30:38,450 +telephone directory. So the company selects the + +358 +00:30:38,450 --> 00:30:45,510 +person that's in the 25th position. So the 25th + +359 +00:30:45,510 --> 00:30:49,600 +person is being selected. The other one is the + +360 +00:30:49,600 --> 00:30:55,940 +first item. The second item should be... + +361 +00:30:55,940 --> 00:31:01,120 +And your key is twenty-fifth. So one, two, three, + +362 +00:31:01,260 --> 00:31:05,440 +four. So number twenty-fifth is the first item. + +363 +00:31:08,500 --> 00:31:11,780 +Maybe you have something more. Then the second + +364 +00:31:11,780 --> 00:31:15,140 +item is number fifty and so on. + +365 +00:31:19,250 --> 00:31:26,650 +What sampling method was used? Systematic sample + +366 +00:31:26,650 --> 00:31:31,450 +because we chose the 50th person, then the second + +367 +00:31:31,450 --> 00:31:38,270 +one is the 50th, and so on. So 25, 50, 75, and so + +368 +00:31:38,270 --> 00:31:43,690 +on. Number 11, which of the following methods were + +369 +00:31:43,690 --> 00:31:49,280 +more likely be susceptible to ethical violation + +370 +00:31:49,280 --> 00:31:54,600 +when used to form conclusions about the entire + +371 +00:31:54,600 --> 00:31:55,360 +population. + +372 +00:31:57,740 --> 00:31:59,800 +Now, the correct answer is convenience sample. + +373 +00:32:01,260 --> 00:32:04,380 +Because number one, convenience sample is used + +374 +00:32:04,380 --> 00:32:10,520 +because it is easy, inexpensive, costly, and it's + +375 +00:32:10,520 --> 00:32:14,880 +used I mean, we select the sample if it is + +376 +00:32:14,880 --> 00:32:19,400 +convenient to the researcher by himself. But maybe + +377 +00:32:19,400 --> 00:32:22,580 +in this case, we have biased collection. For this + +378 +00:32:22,580 --> 00:32:27,260 +reason, this is incorrect sampling. Most of the + +379 +00:32:27,260 --> 00:32:32,880 +time, we are going to avoid using this technique + +380 +00:32:32,880 --> 00:32:39,560 +unless your sample is unbiased. Because if, for + +381 +00:32:39,560 --> 00:32:45,330 +example, suppose I love T of type A. + +382 +00:32:48,470 --> 00:32:52,170 +And my sample, I select a sample of size 20. + +383 +00:32:56,770 --> 00:33:02,430 +Since I love type A, I choose these 20 students or + +384 +00:33:02,430 --> 00:33:08,370 +20 persons that have, that like T of type A. That + +385 +00:33:08,370 --> 00:33:13,320 +means your sample It's convenient for you, but + +386 +00:33:13,320 --> 00:33:13,740 +it's biased. + +387 +00:33:16,860 --> 00:33:19,200 +Okay, so in this case it's called convenient + +388 +00:33:19,200 --> 00:33:24,740 +sample, so it will give incorrect results. So it's + +389 +00:33:24,740 --> 00:33:26,320 +convenient sample. + +390 +00:33:30,360 --> 00:33:34,900 +Let's do one, the other section, one of these + +391 +00:33:34,900 --> 00:33:39,160 +problems, true, false. Let's do some of these. + +392 +00:33:41,910 --> 00:33:45,110 +Now for a true and false problem, try to avoid + +393 +00:33:45,110 --> 00:33:49,370 +calculations as much as you can. Just solve the + +394 +00:33:49,370 --> 00:33:53,010 +problem without any computations. Maybe simple + +395 +00:33:53,010 --> 00:33:57,210 +calculations might be used, but don't use the + +396 +00:33:57,210 --> 00:34:00,890 +exact calculations because the problem asks just + +397 +00:34:00,890 --> 00:34:05,150 +true or false. So sometimes the problem makes + +398 +00:34:05,150 --> 00:34:07,630 +sense, the answer is true, so just say true + +399 +00:34:07,630 --> 00:34:12,990 +without doing the complete calculations. Because + +400 +00:34:12,990 --> 00:34:14,730 +you will waste time. Because as you know, we have + +401 +00:34:14,730 --> 00:34:18,990 +exam of just 60 minutes. And for true and false, I + +402 +00:34:18,990 --> 00:34:22,490 +expect your answer to be within, for example, 15 + +403 +00:34:22,490 --> 00:34:29,170 +seconds. Just read the problem, then figure your + +404 +00:34:29,170 --> 00:34:33,910 +answer. So in this case, sometimes you don't need + +405 +00:34:33,910 --> 00:34:38,000 +to do the exact calculations. But for the free + +406 +00:34:38,000 --> 00:34:40,840 +response problems you have to do the calculations. + +407 +00:34:41,000 --> 00:34:43,820 +But here just think about it a little bit and + +408 +00:34:43,820 --> 00:34:46,740 +within a few seconds you can figure out if it is + +409 +00:34:46,740 --> 00:34:50,680 +true or false. Now let's think about number one. + +410 +00:34:52,480 --> 00:34:55,700 +Now each of these problems I will ask all of you + +411 +00:34:55,700 --> 00:34:59,300 +to figure out the answer and I will give bonus, + +412 +00:34:59,500 --> 00:35:05,130 +one point for each one. Forget about the answer is + +413 +00:35:05,130 --> 00:35:10,530 +it true, I need explanation why is it true. Now + +414 +00:35:10,530 --> 00:35:17,490 +let's see. The amount of time it takes to complete + +415 +00:35:17,490 --> 00:35:22,750 +an examination has skewed left distribution with a + +416 +00:35:22,750 --> 00:35:28,210 +mean of 65 minutes and standard deviation of 8 + +417 +00:35:28,210 --> 00:35:28,610 +minutes. + +418 +00:35:31,460 --> 00:35:36,540 +If 64 students were randomly sampled. So we select + +419 +00:35:36,540 --> 00:35:41,860 +a random sample of 64. Now we ask about the + +420 +00:35:41,860 --> 00:35:47,260 +probability that the sample mean of the sampled + +421 +00:35:47,260 --> 00:35:51,960 +students exceeds 71 minutes is approximately zero. + +422 +00:35:52,980 --> 00:35:59,900 +Probability that the sample mean of the 64 + +423 +00:35:59,900 --> 00:36:00,740 +students + +424 +00:36:03,240 --> 00:36:08,540 +score above 71 this probability is approximately + +425 +00:36:08,540 --> 00:36:12,320 +zero so + +426 +00:36:12,320 --> 00:36:16,980 +the problem says the amount of time it takes to + +427 +00:36:16,980 --> 00:36:21,320 +complete the final examination is not normal it's + +428 +00:36:21,320 --> 00:36:25,500 +left skewed skewed to the left here we select a + +429 +00:36:25,500 --> 00:36:31,280 +random sample of 65 I'm sorry, of 64 students. + +430 +00:36:31,520 --> 00:36:37,140 +That gives sample mean of 65, and we have standard + +431 +00:36:37,140 --> 00:36:43,480 +deviation of 8. Now, the population by itself is + +432 +00:36:43,480 --> 00:36:48,780 +not normal, and the population has mean of 65, as + +433 +00:36:48,780 --> 00:36:54,780 +we mentioned, and standard deviation of 8. Now we + +434 +00:36:54,780 --> 00:36:57,640 +select a random sample of 64, it's large sample + +435 +00:36:57,640 --> 00:37:03,530 +size. What's the probability that the sample mean + +436 +00:37:03,530 --> 00:37:07,470 +exceeds 71? It says the answer is approximately + +437 +00:37:07,470 --> 00:37:15,890 +zero. Think about it. Why the answer is true in + +438 +00:37:15,890 --> 00:37:21,330 +this case? It might be because 71 is more than the + +439 +00:37:21,330 --> 00:37:27,820 +answer. So the first guessing here is just 71 is + +440 +00:37:27,820 --> 00:37:29,460 +above 65. + +441 +00:37:32,540 --> 00:37:38,440 +Yes? We can apply the CLT theorem because N is + +442 +00:37:38,440 --> 00:37:41,560 +above 30, so we can consider that this + +443 +00:37:41,560 --> 00:37:45,360 +distribution is normal, so we transform from It's + +444 +00:37:45,360 --> 00:37:49,140 +part of standardized and do whatever you want. + +445 +00:37:49,360 --> 00:37:53,860 +That's correct, but to do the problem, the + +446 +00:37:53,860 --> 00:37:56,320 +complete answer, yes, you have to convert to this + +447 +00:37:56,320 --> 00:37:59,620 +score, because n is large, then you can figure out + +448 +00:37:59,620 --> 00:38:03,460 +the answer. So she said that. But I'm looking for + +449 +00:38:03,460 --> 00:38:08,520 +something different. First, convert to z. Our root + +450 +00:38:08,520 --> 00:38:13,860 +n, sigma is n. Divide by square root of 64, which + +451 +00:38:13,860 --> 00:38:21,620 +is also 8. So it is Z greater than 1. Sorry, 6 + +452 +00:38:21,620 --> 00:38:29,080 +divided by 1 is 6. Now, Z greater than 6 if you go + +453 +00:38:29,080 --> 00:38:31,000 +back to the normal table. + +454 +00:38:33,560 --> 00:38:42,290 +Now, the table we have given it to try. So this + +455 +00:38:42,290 --> 00:38:50,790 +one is 1 minus z less than 6. For 6, we have 0 + +456 +00:38:50,790 --> 00:38:51,970 +.9999. + +457 +00:38:54,890 --> 00:38:58,490 +So the answer is approximately zero. So it makes + +458 +00:38:58,490 --> 00:39:02,150 +sense, it's zero. This is the complete answer. It + +459 +00:39:02,150 --> 00:39:05,490 +takes time. Because you have to convert the score. + +460 +00:39:06,700 --> 00:39:09,760 +Do some calculations, then use the normal table, + +461 +00:39:09,940 --> 00:39:13,460 +start the normal table. I mentioned before, it + +462 +00:39:13,460 --> 00:39:17,960 +tries to avoid this + +463 +00:39:17,960 --> 00:39:22,080 +kind of calculations. Now, how can we figure out? + +464 +00:39:26,000 --> 00:39:31,560 +Now, since the problem says Q to the left and N is + +465 +00:39:31,560 --> 00:39:36,680 +large, We can assume that X bar is approximately + +466 +00:39:36,680 --> 00:39:39,840 +normal, so don't worry about the shape of the + +467 +00:39:39,840 --> 00:39:42,560 +distribution because we select a random size of + +468 +00:39:42,560 --> 00:39:45,720 +size 6 to 4, so we can apply the central limit + +469 +00:39:45,720 --> 00:39:49,580 +theorem. Any idea? + +470 +00:39:54,580 --> 00:40:01,290 +Again, this way is true to do the complete + +471 +00:40:01,290 --> 00:40:05,350 +calculations, but again you have to avoid using + +472 +00:40:05,350 --> 00:40:06,450 +this one. I will give you a hint. + +473 +00:40:09,510 --> 00:40:15,730 +Now, 71, what's the difference between the true + +474 +00:40:15,730 --> 00:40:18,290 +mean and 76? + +475 +00:40:24,950 --> 00:40:31,060 +Okay, so that's sigma of x bar is 8 divided by + +476 +00:40:31,060 --> 00:40:38,600 +square root of 64 is 1. Again. Continuous, just 1. + +477 +00:40:42,980 --> 00:40:43,200 +Why? + +478 +00:40:51,340 --> 00:40:58,110 +The difference between 71 and 656 and 6 It's six + +479 +00:40:58,110 --> 00:41:03,730 +times the standard error of the sample mean. And + +480 +00:41:03,730 --> 00:41:12,610 +we know that, if you remember this rule, 65, 68, + +481 +00:41:14,110 --> 00:41:21,050 +95, 99, we can't, don't, don't use chebyshev in + +482 +00:41:21,050 --> 00:41:23,370 +this case because the distribution is roughly + +483 +00:41:23,370 --> 00:41:28,170 +normal. We know this rule, the empirical rule. We + +484 +00:41:28,170 --> 00:41:33,890 +said that 68% of the observations lie within one + +485 +00:41:33,890 --> 00:41:39,590 +standard deviation of the mean. 95% in standard + +486 +00:41:39,590 --> 00:41:46,210 +deviation and so on. Now if you go back, now let's + +487 +00:41:46,210 --> 00:41:51,510 +say mu minus three sigma and mu plus three sigma. + +488 +00:41:52,010 --> 00:41:56,410 +But here we are talking about x bar. So the mean + +489 +00:41:56,410 --> 00:42:04,590 +of x bar, sigma of x bar. So empirical rule says + +490 +00:42:04,590 --> 00:42:11,690 +that this area between + +491 +00:42:11,690 --> 00:42:20,490 +mu minus 3 sigma of x bar is around 99.7. Let's + +492 +00:42:20,490 --> 00:42:27,260 +compute the lower bound. Mu is 65. Three times + +493 +00:42:27,260 --> 00:42:31,640 +sigma of x bar is one, so that's 62. + +494 +00:42:33,700 --> 00:42:41,780 +The maximum here equal mu again 65 plus three + +495 +00:42:41,780 --> 00:42:49,020 +times one, 16. So now 99.7% of the sample means + +496 +00:42:49,020 --> 00:42:59,500 +lie between 62 and 68. Now, what's left? 0.3% for + +497 +00:42:59,500 --> 00:43:04,700 +both sides, so that's 0.15 to the right and the + +498 +00:43:04,700 --> 00:43:05,360 +same to the left. + +499 +00:43:08,500 --> 00:43:15,360 +Now, we are looking for x bar exceeds 71. 68 this + +500 +00:43:15,360 --> 00:43:20,360 +point, 71 should be to the right. Now, what's the + +501 +00:43:20,360 --> 00:43:24,290 +probability to the right of 71? is almost zero. + +502 +00:43:26,130 --> 00:43:30,630 +Because since we are saying that most of the data + +503 +00:43:30,630 --> 00:43:35,010 +lies within the three standard deviations of the + +504 +00:43:35,010 --> 00:43:39,230 +mean. Now here, the difference between this point + +505 +00:43:39,230 --> 00:43:47,270 +71 and 65 is 6, and sigma of x bar is 1. So it + +506 +00:43:47,270 --> 00:43:49,770 +means the difference between mu and x bar is + +507 +00:43:49,770 --> 00:43:54,600 +around 6 times sigma of x bar. So you are sure + +508 +00:43:54,600 --> 00:44:01,320 +that 99.7% of the data lies between 62 and 68, and + +509 +00:44:01,320 --> 00:44:05,160 +just 0.3% of the area left to the other side, two + +510 +00:44:05,160 --> 00:44:09,560 +sides, split in half, so we have 0.15 to the right + +511 +00:44:09,560 --> 00:44:12,700 +and the other to the left. But again, we are + +512 +00:44:12,700 --> 00:44:16,380 +looking for expiry exceeds 71, so we have to go + +513 +00:44:16,380 --> 00:44:20,340 +further to the right side, so the area becomes + +514 +00:44:20,340 --> 00:44:20,920 +very small. + +515 +00:44:27,080 --> 00:44:32,020 +This method is faster if you figure out this one + +516 +00:44:32,020 --> 00:44:38,060 +we can apply the empirical rule. Let's do similar + +517 +00:44:38,060 --> 00:44:41,780 +questions. Look at the other one. + +518 +00:44:46,940 --> 00:44:48,180 +Number five. + +519 +00:44:59,270 --> 00:45:02,050 +The amount of gasoline purchased per car at large + +520 +00:45:02,050 --> 00:45:08,110 +surface stations has population mean of 15. So the + +521 +00:45:08,110 --> 00:45:10,070 +mean is 15. + +522 +00:45:13,010 --> 00:45:18,150 +And the population standard deviation of 4. Sigma + +523 +00:45:18,150 --> 00:45:24,330 +is 4. It is assumed that the amount of gasoline + +524 +00:45:24,330 --> 00:45:26,050 +purchased per car is symmetric. + +525 +00:45:28,870 --> 00:45:38,990 +there is approximately 68.26% it shows that a + +526 +00:45:38,990 --> 00:45:44,970 +random sample of 16 cars so n equals 16 cars will + +527 +00:45:44,970 --> 00:45:49,430 +have sample mean between 14 and 16 so it says that + +528 +00:45:49,430 --> 00:45:55,830 +between 14 and 16 the answer is 68.24% + +529 +00:45:58,710 --> 00:46:01,890 +So again, we have a population, this population is + +530 +00:46:01,890 --> 00:46:08,550 +symmetric, with mean of 15 sigma of 4. We select a + +531 +00:46:08,550 --> 00:46:14,170 +random sample of 16. The problem says that the + +532 +00:46:14,170 --> 00:46:17,930 +probability of X bar between 14 and 16 equals 68%. + +533 +00:46:17,930 --> 00:46:24,450 +And the answer is true. Why? How can we apply or + +534 +00:46:24,450 --> 00:46:29,490 +use the empirical rule in this case? A mu minus 3 + +535 +00:46:29,490 --> 00:46:30,150 +standard deviation? + +536 +00:46:34,390 --> 00:46:42,690 +It says 68, 1. So it's a mu minus 1 standard + +537 +00:46:42,690 --> 00:46:46,770 +deviation, 1 sigma X bar, and let's look at 1 plus + +538 +00:46:46,770 --> 00:46:56,400 +sigma X bar. A mu is 15. Now Sigma of X1 is + +539 +00:46:56,400 --> 00:47:00,680 +also 1 because 4 divided by square root of 16 is + +540 +00:47:00,680 --> 00:47:01,580 +1. + +541 +00:47:04,040 --> 00:47:12,540 +So 1 times 1 is 14. On the other hand 15 plus 1 + +542 +00:47:12,540 --> 00:47:14,160 +times 1 is 16. + +543 +00:47:16,790 --> 00:47:21,450 +The problem says 68.4% of the answers is correct. + +544 +00:47:22,810 --> 00:47:28,010 +Because we know that 68% of the data will fall + +545 +00:47:28,010 --> 00:47:32,390 +within one standard deviation around the mean. So + +546 +00:47:32,390 --> 00:47:35,970 +it's, I think, it's very quickly to get your + +547 +00:47:35,970 --> 00:47:39,870 +answer rather than using the exact calculation. + +548 +00:47:40,070 --> 00:47:44,910 +Because for the exact one, you have to do 14 minus + +549 +00:47:44,910 --> 00:47:53,990 +the mean. divided by sigma z, then 16 minus the + +550 +00:47:53,990 --> 00:47:58,430 +mean divided by 1. Then use the normal theorem to + +551 +00:47:58,430 --> 00:48:05,470 +use the empirical rule in this case. So if we + +552 +00:48:05,470 --> 00:48:10,930 +select a sample of size 16, the probability that x + +553 +00:48:10,930 --> 00:48:14,530 +bar lies between 14 and 16 is around 60%. + +554 +00:48:20,320 --> 00:48:27,740 +Look at number six. Again, we assume we have the + +555 +00:48:27,740 --> 00:48:36,560 +same information for the mean. Mu is 15. It's the + +556 +00:48:36,560 --> 00:48:41,640 +same standard deviation is 4. But here, a random + +557 +00:48:41,640 --> 00:48:47,460 +sample of 64 cars selected. So instead of + +558 +00:48:47,460 --> 00:48:51,880 +selecting 16 cars, we select a random sample of + +559 +00:48:51,880 --> 00:48:53,900 +size 64. + +560 +00:48:55,760 --> 00:49:03,560 +Now it says there is approximately 95.44 + +561 +00:49:03,560 --> 00:49:07,960 +% which shows that the sample mean will be between + +562 +00:49:07,960 --> 00:49:14,460 +14 and 16. So again, this probability between 14 + +563 +00:49:14,460 --> 00:49:19,670 +and 16 is also Mu minus 2 standard deviation. And + +564 +00:49:19,670 --> 00:49:22,950 +equals 9.44. Let's see if it's correct or not. + +565 +00:49:23,890 --> 00:49:28,250 +Since it's mentioned that 95%, so it means that we + +566 +00:49:28,250 --> 00:49:31,070 +are talking about two standard deviations. So + +567 +00:49:31,070 --> 00:49:34,630 +let's just compute mu minus 2 standard deviation + +568 +00:49:34,630 --> 00:49:43,090 +and plus 2 sigma x. So the mean is 15 minus plus 2 + +569 +00:49:43,090 --> 00:49:49,510 +times again 1. One is not one because sigma of x + +570 +00:49:49,510 --> 00:49:56,110 +bar was one because n was 16. Now my new sigma + +571 +00:49:56,110 --> 00:50:01,730 +equal one-half because sigma over root n. Now + +572 +00:50:01,730 --> 00:50:10,070 +sigma again the same value four divided by 64 over + +573 +00:50:10,070 --> 00:50:13,970 +eight so that's one-half. So this one should be + +574 +00:50:13,970 --> 00:50:14,810 +two times + +575 +00:50:20,340 --> 00:50:26,820 +So that will give 15 minus 1, 14. 15 plus 1 is 16. + +576 +00:50:30,480 --> 00:50:35,180 +The probability between 14 and 16, I mean X bar + +577 +00:50:35,180 --> 00:50:40,840 +lies between 14 and 16, equals 95%. And we just + +578 +00:50:40,840 --> 00:50:45,210 +change some information a little bit. But the + +579 +00:50:45,210 --> 00:50:48,450 +answer was the probability between X bar, the + +580 +00:50:48,450 --> 00:50:51,790 +probability that X bar lies between 14 and 16 is + +581 +00:50:51,790 --> 00:50:56,690 +around 68%. Now what's the difference between + +582 +00:50:56,690 --> 00:51:03,030 +number 5 and 6? N is large. As N increases, + +583 +00:51:04,530 --> 00:51:06,790 +standard deviation decreases. Standard deviation + +584 +00:51:06,790 --> 00:51:09,910 +decreases. If you look here, sigma X bar was 1. + +585 +00:51:10,560 --> 00:51:13,440 +Then my new sigma of x bar is one half. As we + +586 +00:51:13,440 --> 00:51:16,260 +mentioned before, as n increases, sigma of x bar + +587 +00:51:16,260 --> 00:51:21,760 +decreases. In this case, we have larger + +588 +00:51:21,760 --> 00:51:25,460 +probability. So if we increase the sample size, + +589 +00:51:26,160 --> 00:51:30,760 +then the probability that x bar is between 14 and + +590 +00:51:30,760 --> 00:51:35,040 +16 will increase. So this proportion is increased + +591 +00:51:35,040 --> 00:51:40,630 +around 30 or around 27%. Because you just increase + +592 +00:51:40,630 --> 00:51:46,950 +the sample size from 16 all the way up to 64. So + +593 +00:51:46,950 --> 00:51:50,050 +since, as we know, as the sample size increases, + +594 +00:51:50,770 --> 00:51:55,210 +sigma of X bar decreases. So we are more sure that + +595 +00:51:55,210 --> 00:52:00,710 +X bar lies between 14 and 16. The previous one, + +596 +00:52:01,250 --> 00:52:05,590 +the chance that, or the probability that X bar + +597 +00:52:05,590 --> 00:52:09,450 +lies between 14 and 16 is around two-third. around + +598 +00:52:09,450 --> 00:52:14,530 +68%. But here, for the same kind of probability, + +599 +00:52:15,010 --> 00:52:19,190 +it's equal around 95%, because you increase the + +600 +00:52:19,190 --> 00:52:20,130 +sample size. + +601 +00:52:23,670 --> 00:52:30,210 +Any question? Let's do one more. + +602 +00:52:34,550 --> 00:52:35,490 +Suppose + +603 +00:52:53,560 --> 00:52:56,960 +Sigma squared, it means the variance is 100. + +604 +00:53:00,100 --> 00:53:04,320 +So sigma don't forget to take the square root of + +605 +00:53:04,320 --> 00:53:11,660 +sigma squared in order to find sigma. In a sample + +606 +00:53:11,660 --> 00:53:12,380 +of 100, + +607 +00:53:15,480 --> 00:53:20,540 +95% of all possible sample means will fall between + +608 +00:53:29,270 --> 00:53:36,430 +This equals around 95%. Now without calculations, + +609 +00:53:37,010 --> 00:53:40,450 +since it says the problem is normally distributed + +610 +00:53:40,450 --> 00:53:46,170 +or N is large. In this case, N is large enough to + +611 +00:53:46,170 --> 00:53:49,570 +apply the central limit theorem. Then we can use + +612 +00:53:49,570 --> 00:53:54,190 +the empirical rule. It says 95% is too strong + +613 +00:53:54,190 --> 00:53:59,630 +deviation of the mean. So mu minus plus. 2 sigma + +614 +00:53:59,630 --> 00:54:05,850 +of x bar. Now, mu is 50 minus plus 2 sigma of x + +615 +00:54:05,850 --> 00:54:09,290 +bar. Yes, I have to compute sigma of x bar first. + +616 +00:54:09,910 --> 00:54:15,410 +So, sigma divided by is 1. So, simple calculation, + +617 +00:54:15,550 --> 00:54:21,570 +just sigma over root n is 1. So, minus 1. 50 minus + +618 +00:54:21,570 --> 00:54:29,290 +2 is 48. 50 plus 2 is 52. So it's between 40, 8, + +619 +00:54:29,370 --> 00:54:35,690 +and 52, and this probability is 95%. So it's true. + +620 +00:54:36,950 --> 00:54:40,690 +Now which is faster? To use this method, which is + +621 +00:54:40,690 --> 00:54:42,970 +maybe less than one minute, you can figure out the + +622 +00:54:42,970 --> 00:54:46,890 +answer, or use the complete calculations. In this + +623 +00:54:46,890 --> 00:54:48,990 +case, you have to find z-score for the first one, + +624 +00:54:49,090 --> 00:54:51,930 +z for the other one, then use the normal table, + +625 +00:54:51,990 --> 00:54:56,610 +and that will take at least five minutes. So to + +626 +00:54:56,610 --> 00:54:59,330 +use the empirical rule, most of the time gives + +627 +00:54:59,330 --> 00:55:03,410 +shorter time. + +628 +00:55:05,290 --> 00:55:06,030 +Any question? + +629 +00:55:08,770 --> 00:55:11,430 +Let's do one more. + +630 +00:55:14,350 --> 00:55:23,850 +One more. Number 10. The amount of bleach a + +631 +00:55:23,850 --> 00:55:30,610 +machine pours into bottles has a mean of 36. The + +632 +00:55:30,610 --> 00:55:40,310 +mean is 36. Sigma is 0.15 ounces. Suppose we take + +633 +00:55:40,310 --> 00:55:45,590 +around a sample of size 36 bottles filled by this + +634 +00:55:45,590 --> 00:55:49,390 +machine. The sampling distribution or the sample + +635 +00:55:49,390 --> 00:55:53,730 +mean has standard error of So standard error means + +636 +00:55:53,730 --> 00:55:55,290 +sigma of x bar is 0.15. + +637 +00:55:58,150 --> 00:56:01,990 +Again, it says the sample distribution of the + +638 +00:56:01,990 --> 00:56:06,910 +sample mean has standard error of 0.15. I mean + +639 +00:56:06,910 --> 00:56:11,750 +sigma of x bar is 0.15. It's false. Because sigma + +640 +00:56:11,750 --> 00:56:17,530 +of x bar equals sigma over root n. So 0.15 divided + +641 +00:56:17,530 --> 00:56:18,210 +by 6. + +642 +00:56:20,990 --> 00:56:26,150 +So it's incorrect. That's false. False because + +643 +00:56:26,150 --> 00:56:33,530 +this value is just the standard deviation. But he + +644 +00:56:33,530 --> 00:56:36,270 +asked about the sampling distribution of the + +645 +00:56:36,270 --> 00:56:39,730 +sample mean has standard error. And standard error + +646 +00:56:39,730 --> 00:56:42,110 +means the standard deviation of the sample mean. + +647 +00:56:42,710 --> 00:56:44,750 +So we have to divide sigma by square root of that. + +648 +00:56:45,690 --> 00:56:50,230 +Mixed, 11. The mean of the standard distribution + +649 +00:56:50,230 --> 00:56:53,250 +of a sample proportion is the population of + +650 +00:56:53,250 --> 00:56:58,970 +proportion pi. That's true. Because if the two + +651 +00:56:58,970 --> 00:57:03,150 +conditions we mentioned before are satisfied, then + +652 +00:57:03,150 --> 00:57:07,650 +the mean of P is pi. So that's correct. Twelve, + +653 +00:57:07,770 --> 00:57:11,670 +the last one. The standard error of the sampling + +654 +00:57:11,670 --> 00:57:14,290 +distribution of P times 1 minus P divided by N, + +655 +00:57:14,690 --> 00:57:17,890 +where P is the sample proportion, it's false, + +656 +00:57:18,470 --> 00:57:23,690 +because sigma over P square root of Pi, 1 minus Pi + +657 +00:57:23,690 --> 00:57:27,490 +divided by N. So this statement is false. + +658 +00:57:43,560 --> 00:57:49,820 +Let's do, look at number 13, the last one, number + +659 +00:57:49,820 --> 00:57:50,360 +13. + +660 +00:57:56,900 --> 00:58:00,700 +A sample of size 25. So we took a random sample of + +661 +00:58:00,700 --> 00:58:04,640 +25. So N is 25. + +662 +00:58:08,880 --> 00:58:11,080 +It provides a sample variance. + +663 +00:58:14,230 --> 00:58:18,130 +The standard error, he asked about the standard + +664 +00:58:18,130 --> 00:58:26,190 +error, sigma of x bar, in this case equal to 4, is + +665 +00:58:26,190 --> 00:58:29,750 +best described as the estimate of the standard + +666 +00:58:29,750 --> 00:58:33,250 +deviation of means calculated from sample size 25. + +667 +00:58:33,610 --> 00:58:37,350 +He asked about sigma of x bar. We know that sigma + +668 +00:58:37,350 --> 00:58:40,650 +of x bar equals sigma over root n, but sigma is + +669 +00:58:40,650 --> 00:58:47,020 +not given. S is a square, S is a 20, but what's + +670 +00:58:47,020 --> 00:58:52,260 +Sigma? Because Sigma of X bar, Sigma over root N, + +671 +00:58:53,020 --> 00:58:58,640 +Sigma is not given. But as we know before, since N + +672 +00:58:58,640 --> 00:59:04,080 +is large enough, so if N is large, in this case, + +673 +00:59:04,420 --> 00:59:09,120 +if N is large, if N is large, we can replace Sigma + +674 +00:59:09,120 --> 00:59:16,550 +by S. And this is just S over root N. So if N is + +675 +00:59:16,550 --> 00:59:24,010 +large enough, more than 15, so we can use or we + +676 +00:59:24,010 --> 00:59:27,530 +can apply the central limit theorem. So we get the + +677 +00:59:27,530 --> 00:59:35,500 +sigma YS. So S equals 20 divided by 5 is 4. So the + +678 +00:59:35,500 --> 00:59:38,480 +standard error equals 4 in this case. So if S + +679 +00:59:38,480 --> 00:59:43,120 +squared is given instead of sigma squared, we can + +680 +00:59:43,120 --> 00:59:48,300 +replace sigma by S if N is larger. So in this + +681 +00:59:48,300 --> 00:59:52,120 +case, we replace sigma by S. So sigma of x bar + +682 +00:59:52,120 --> 00:59:55,060 +equals S over root N. That will give 4. + +683 +00:59:58,400 --> 01:00:02,040 +S is the sample variance. + +684 +01:00:05,740 --> 01:00:09,340 +Sigma is + +685 +01:00:09,340 --> 01:00:12,820 +population S squared. + +686 +01:00:15,720 --> 01:00:21,700 +Sigma squared. is the population variance. And we + +687 +01:00:21,700 --> 01:00:25,400 +know that S squared is the sample variance, where + +688 +01:00:25,400 --> 01:00:28,180 +sigma squared is the population variance. And + +689 +01:00:28,180 --> 01:00:30,940 +population in this case is not given. So we + +690 +01:00:30,940 --> 01:00:34,060 +replace the population variance by sample + +691 +01:00:34,060 --> 01:00:37,160 +variance, if the population is normal or N is + +692 +01:00:37,160 --> 01:00:43,200 +large. Because if N is large enough, we can apply + +693 +01:00:43,200 --> 01:00:46,760 +the central limit theorem. And if you go back The + +694 +01:00:46,760 --> 01:00:49,080 +formula for S, if you remember, we divide by N + +695 +01:00:49,080 --> 01:00:55,500 +minus one. And for sigma, we divide by capital. So + +696 +01:00:55,500 --> 01:00:59,760 +if N is large enough, then there is small + +697 +01:00:59,760 --> 01:01:03,120 +difference between sample variance and population. + +698 +01:01:04,260 --> 01:01:04,360 +Okay. + +699 +01:01:11,040 --> 01:01:15,000 +Any questions? Any question? + +700 +01:01:19,130 --> 01:01:25,830 +If it is above 15, it's okay. 15 or 30, depends on + +701 +01:01:25,830 --> 01:01:28,890 +the population you have. But here, if you divide + +702 +01:01:28,890 --> 01:01:35,150 +by 24 or 25, the difference is small. I mean, if + +703 +01:01:35,150 --> 01:01:41,120 +you divide this one by 24 or by 25, The difference + +704 +01:01:41,120 --> 01:01:43,740 +between S and Sigma is small, so we can replace + +705 +01:01:43,740 --> 01:01:48,100 +Sigma by S, if Sigma is unknown. Questions? + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/w-gVlz_LYaI.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/w-gVlz_LYaI.srt new file mode 100644 index 0000000000000000000000000000000000000000..789d89efef0459aa6119923d119be2b0295d06cf --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/w-gVlz_LYaI.srt @@ -0,0 +1,756 @@ + +1 +00:00:07,100 --> 00:00:10,520 +So then shall we continue chapter eight confidence + +2 +00:00:10,520 --> 00:00:14,360 +interval estimation. Last time we talked about + +3 +00:00:14,360 --> 00:00:18,820 +confidence interval for the population mean mu + +4 +00:00:18,820 --> 00:00:25,220 +when sigma is known. And we end with this specific + +5 +00:00:25,220 --> 00:00:27,480 +formula in order to construct + +6 +00:00:30,020 --> 00:00:34,980 +for the true proportion, the true population mean + +7 +00:00:34,980 --> 00:00:39,700 +mu, which is x bar, the sample mean, plus or minus + +8 +00:00:39,700 --> 00:00:43,020 +the alpha over 2, the critical value, times sigma + +9 +00:00:43,020 --> 00:00:46,480 +over root n, which is the standard error of the + +10 +00:00:46,480 --> 00:00:53,200 +estimate. And I think we gave an example for + +11 +00:00:53,200 --> 00:00:57,300 +this one. So one more time, the confidence + +12 +00:00:57,300 --> 00:01:01,480 +intervals for this chapter will be split into two + +13 +00:01:01,480 --> 00:01:07,020 +parts. One for the true mean, and the other for + +14 +00:01:07,020 --> 00:01:11,180 +the true proportion. For the population mean, + +15 +00:01:11,500 --> 00:01:16,760 +there are two cases. One sigma is known, and we + +16 +00:01:16,760 --> 00:01:21,120 +just explained. And the other one, one sigma is + +17 +00:01:21,120 --> 00:01:24,560 +unknown. Let's see how can we construct a + +18 +00:01:24,560 --> 00:01:29,780 +confidence interval when sigma is unknown. In the + +19 +00:01:29,780 --> 00:01:37,600 +real world, I mean in all real world business + +20 +00:01:37,600 --> 00:01:41,160 +situations, sigma is not given. So our goal is to + +21 +00:01:41,160 --> 00:01:44,940 +estimate sigma by using the sample standard + +22 +00:01:44,940 --> 00:01:50,440 +deviation, then replace sigma by S. In this case, + +23 +00:01:50,480 --> 00:01:53,680 +we have to use a new distribution. This + +24 +00:01:53,680 --> 00:01:58,590 +distribution is called T distribution. So again, + +25 +00:01:59,110 --> 00:02:01,990 +if the population standard deviation sigma is + +26 +00:02:01,990 --> 00:02:06,350 +unknown, we can substitute the sample standard + +27 +00:02:06,350 --> 00:02:12,710 +deviation S instead of sigma. In this case, we + +28 +00:02:12,710 --> 00:02:17,790 +should use T distribution instead of normal + +29 +00:02:17,790 --> 00:02:20,810 +distribution. So if sigma is unknown. + +30 +00:02:33,090 --> 00:02:39,890 +replace sigma by S and use T + +31 +00:02:39,890 --> 00:02:44,530 +distribution instead of norm. + +32 +00:02:47,970 --> 00:02:50,530 +So this is the idea here. We have to replace sigma + +33 +00:02:50,530 --> 00:02:55,920 +by S and use a new distribution called T. In this + +34 +00:02:55,920 --> 00:02:59,720 +case, the three assumptions will remain nearly the + +35 +00:02:59,720 --> 00:03:06,620 +same. But instead of the first one will be here, + +36 +00:03:06,720 --> 00:03:08,780 +population standard deviation is unknown instead + +37 +00:03:08,780 --> 00:03:12,280 +of known. So this is the assumption that changed. + +38 +00:03:12,780 --> 00:03:17,140 +The other two assumptions remain the same. Either + +39 +00:03:17,140 --> 00:03:20,380 +the population is normally distributed, or if the + +40 +00:03:20,380 --> 00:03:24,320 +population is not normal, just use large sample. + +41 +00:03:25,430 --> 00:03:28,550 +In this case, the confidence interval estimate is + +42 +00:03:28,550 --> 00:03:35,150 +given by this formula X bar plus or minus T alpha + +43 +00:03:35,150 --> 00:03:40,570 +over 2S over square root of F. Now the question + +44 +00:03:40,570 --> 00:03:45,470 +is, how can we find the critical value T alpha + +45 +00:03:45,470 --> 00:03:49,330 +over 2? There is a table for this one at the end + +46 +00:03:49,330 --> 00:03:52,510 +of your book. The table gives the area in the + +47 +00:03:52,510 --> 00:03:53,090 +upper tier. + +48 +00:03:59,100 --> 00:04:05,080 +For certain probabilities, start from 2.2, + +49 +00:04:05,520 --> 00:04:12,940 +0.10, and so on. So this is the table. So columns + +50 +00:04:12,940 --> 00:04:19,300 +represent the percentages of the area in the upper + +51 +00:04:19,300 --> 00:04:24,180 +tier. Rows represent degrees of freedom, start + +52 +00:04:24,180 --> 00:04:29,110 +from 1 to all the way up to 120. Then infinity. + +53 +00:04:30,370 --> 00:04:35,690 +Now let's see how can we use the table. You have + +54 +00:04:35,690 --> 00:04:40,590 +to know that T is a family of distributions. The + +55 +00:04:40,590 --> 00:04:46,770 +degrees of freedom, DF stands for degrees of + +56 +00:04:46,770 --> 00:04:52,210 +freedom, always equals N minus 1. For example, if + +57 +00:04:52,210 --> 00:04:57,250 +the sample size is 15, So degrees of freedom is 14 + +58 +00:04:57,250 --> 00:05:01,910 +and so on. Now degrees of freedom, number of + +59 +00:05:01,910 --> 00:05:05,590 +observations that are free to vary after sample + +60 +00:05:05,590 --> 00:05:09,290 +mean has been calculated. Let's see the meaning of + +61 +00:05:09,290 --> 00:05:15,410 +degrees of freedom. Now imagine that we + +62 +00:05:15,410 --> 00:05:21,150 +know the mean is + +63 +00:05:21,150 --> 00:05:21,430 +8. + +64 +00:05:25,690 --> 00:05:29,210 +previous information that the mean of three + +65 +00:05:29,210 --> 00:05:34,270 +observations equals eight. So we have three + +66 +00:05:34,270 --> 00:05:39,530 +values. Now the restriction is the mean is eight. + +67 +00:05:40,590 --> 00:05:47,150 +So we have three values x1, x2, x3. In this case, + +68 +00:05:47,210 --> 00:05:51,100 +you can take any two values. So suppose the mean + +69 +00:05:51,100 --> 00:05:53,600 +of three numbers is eight. So there are three + +70 +00:05:53,600 --> 00:05:56,120 +observations, n equal to three, the mean is eight. + +71 +00:05:56,460 --> 00:05:59,980 +In this case, you can take any two of these three + +72 +00:05:59,980 --> 00:06:05,800 +values. For example, maybe x1, I can choose x1 to + +73 +00:06:05,800 --> 00:06:09,380 +be, for example, 10. Someone else will choose x2 + +74 +00:06:09,380 --> 00:06:18,400 +to be 7. Now, the value of x3 should be equal to a + +75 +00:06:18,400 --> 00:06:23,410 +value And in that case, the sample mean should be + +76 +00:06:23,410 --> 00:06:26,950 +eight. And there are three observations, so the + +77 +00:06:26,950 --> 00:06:30,490 +total should be 24. So this value should be seven, + +78 +00:06:30,790 --> 00:06:37,120 +must be seven. So now you have two options. For x1 + +79 +00:06:37,120 --> 00:06:41,060 +and x2, just you can take any two values. But the + +80 +00:06:41,060 --> 00:06:44,560 +third one has a restriction on it that the average + +81 +00:06:44,560 --> 00:06:49,240 +will be three. Maybe someone else will choose x1 + +82 +00:06:49,240 --> 00:06:56,060 +to be seven, x3 to be, for example, 15. So now + +83 +00:06:56,060 --> 00:06:59,900 +what do you think about the remaining item or the + +84 +00:06:59,900 --> 00:07:04,460 +remaining observation? This one should be two. So + +85 +00:07:04,460 --> 00:07:08,030 +you have three. to take any two values out of + +86 +00:07:08,030 --> 00:07:12,530 +three. Here, for this specific example, he chose + +87 +00:07:12,530 --> 00:07:21,230 +x1 to be 7, x2 to be 8. Now, x3 must be 9, because + +88 +00:07:21,230 --> 00:07:24,450 +the total in this case will be 24. So the average, + +89 +00:07:24,710 --> 00:07:29,830 +again, is 8. So here, n is 3. So degrees of + +90 +00:07:29,830 --> 00:07:35,260 +freedom is 2, is n minus 1. So that means two + +91 +00:07:35,260 --> 00:07:39,640 +values can be any numbers. As I mentioned here, it + +92 +00:07:39,640 --> 00:07:44,260 +could be any two numbers. But the third one is not + +93 +00:07:44,260 --> 00:07:48,100 +free to vary for a given mean, because here we + +94 +00:07:48,100 --> 00:07:50,560 +have a restriction that the mean is eight. Another + +95 +00:07:50,560 --> 00:07:54,760 +example, suppose I gave you three cards. + +96 +00:07:57,700 --> 00:08:01,540 +And my restriction is the sum of the numbers on + +97 +00:08:01,540 --> 00:08:06,780 +the three cards will be 15. Maybe you will write + +98 +00:08:06,780 --> 00:08:10,960 +any two values in any two cards. For example, + +99 +00:08:11,100 --> 00:08:16,500 +maybe I will choose seven in the first card, six + +100 +00:08:16,500 --> 00:08:21,620 +in the third, So this one should be 2. So the + +101 +00:08:21,620 --> 00:08:26,340 +degrees of freedom is 2. Any questions? So this is + +102 +00:08:26,340 --> 00:08:30,160 +the meaning of degrees of freedom. So in general, + +103 +00:08:30,680 --> 00:08:33,800 +if the sample size is n, I mean if the sample size + +104 +00:08:33,800 --> 00:08:39,840 +equals n degrees of freedom, n minus 1. That's the + +105 +00:08:39,840 --> 00:08:43,940 +meaning of degrees of freedom. Now let's see the + +106 +00:08:43,940 --> 00:08:46,240 +comparison between T distribution and Z. + +107 +00:08:49,600 --> 00:08:52,280 +The blue one is normal distribution, standard + +108 +00:08:52,280 --> 00:08:59,060 +normal. The other two, the blue and red ones, are + +109 +00:08:59,060 --> 00:09:01,300 +T distributions with different degrees of freedom. + +110 +00:09:01,820 --> 00:09:05,560 +For example, the red one has degrees of freedom + +111 +00:09:05,560 --> 00:09:12,180 +five. So that means the sample size is six. And + +112 +00:09:12,180 --> 00:09:15,720 +actually, in this case, T distribution has long + +113 +00:09:15,720 --> 00:09:21,340 +tail. The other one, the blue one, has degrees of + +114 +00:09:21,340 --> 00:09:25,760 +freedom 13, that means n equals 14. Now we can see + +115 +00:09:25,760 --> 00:09:32,940 +that the blue one is closer to the normal than the + +116 +00:09:32,940 --> 00:09:36,400 +red one. That means as degrees of freedom + +117 +00:09:36,400 --> 00:09:41,180 +increases, the difference between z and t becomes + +118 +00:09:41,180 --> 00:09:48,000 +smaller and smaller. Always, T converges or goes + +119 +00:09:48,000 --> 00:09:55,160 +to Z as N increases. So for large sample sizes, we + +120 +00:09:55,160 --> 00:09:59,540 +can approximate Z value by T. That in general, if + +121 +00:09:59,540 --> 00:10:04,840 +N is very, very large, I mean above 30. But if N, + +122 +00:10:05,000 --> 00:10:07,700 +for example, 100, in this case, the two curves + +123 +00:10:07,700 --> 00:10:09,980 +will be identical. + +124 +00:10:11,660 --> 00:10:17,420 +Let's see how can we use the Table 40. + +125 +00:10:21,140 --> 00:10:25,720 +This table is quite similar to the one you have at + +126 +00:10:25,720 --> 00:10:30,380 +the end of your book. Now again, this table gives + +127 +00:10:30,380 --> 00:10:31,920 +the area in the upper tail. + +128 +00:10:35,660 --> 00:10:41,840 +So if we are interested in + +129 +00:10:41,840 --> 00:10:44,080 +95 confidence interval. + +130 +00:10:51,620 --> 00:11:01,760 +That means alpha is 5% because 95% between these + +131 +00:11:01,760 --> 00:11:08,160 +two values. So 5% left. The distribution is + +132 +00:11:08,160 --> 00:11:13,440 +normal. It is bell-shaped, so 5% for both + +133 +00:11:13,440 --> 00:11:17,720 +directions. So here we have 2.5, and the other one + +134 +00:11:17,720 --> 00:11:22,280 +also 2.5. So the area in the upper tail is 2.5, so + +135 +00:11:22,280 --> 00:11:28,820 +this is my 2.5. So again, columns represents upper + +136 +00:11:28,820 --> 00:11:33,660 +tail probabilities, while rows represent degrees + +137 +00:11:33,660 --> 00:11:40,730 +of freedom. Now, let's imagine that N equals 20, + +138 +00:11:41,610 --> 00:11:46,250 +for example. Now alpha is 5, so that means alpha + +139 +00:11:46,250 --> 00:11:50,470 +over 2, as we mentioned, 2.5%. Here we should look + +140 +00:11:50,470 --> 00:11:56,070 +at degrees of freedom. Equal N minus 1. That's 19. + +141 +00:11:57,470 --> 00:12:06,170 +So this is 19. Go all the way up to 0 to 5. So + +142 +00:12:06,170 --> 00:12:11,650 +this is your answer. 2.093. So T alpha over 2 + +143 +00:12:11,650 --> 00:12:16,970 +equal 2.093. + +144 +00:12:17,810 --> 00:12:20,910 +This is the way how can we use the T distribution. + +145 +00:12:23,010 --> 00:12:29,070 +Now let's see if we have different confidence + +146 +00:12:29,070 --> 00:12:29,390 +level. + +147 +00:12:37,520 --> 00:12:41,040 +We got 2.093. + +148 +00:12:42,420 --> 00:12:45,120 +So T, 0 to 5. + +149 +00:12:50,340 --> 00:12:55,000 +Now for the same size, for the same sample size, + +150 +00:12:55,440 --> 00:13:01,300 +let's talk about 90% confidence. 90% means 10% + +151 +00:13:01,300 --> 00:13:05,940 +left. Five to the right. So alpha. + +152 +00:13:13,290 --> 00:13:17,410 +This is your 0.05, 1.729. + +153 +00:13:24,670 --> 00:13:32,530 +Now as C level increases, T also increases. When + +154 +00:13:32,530 --> 00:13:39,870 +90% we got T to be 1.729. When C11 increases up to + +155 +00:13:39,870 --> 00:13:46,190 +5%, we have 95. My T becomes 2.093. So this is the + +156 +00:13:46,190 --> 00:13:52,570 +way how can we use the TMR. Here in the slide, we + +157 +00:13:52,570 --> 00:13:57,750 +have another example. That is, we have a sample + +158 +00:13:57,750 --> 00:14:02,810 +size of three. The use of freedom is two. In this + +159 +00:14:02,810 --> 00:14:06,550 +case, he's interested in 90% confidence interval, + +160 +00:14:07,530 --> 00:14:12,690 +so alpha is 10%. Consequently, alpha over 2 is 5%. + +161 +00:14:12,690 --> 00:14:16,410 +Here, we should look at degrees of freedom equals + +162 +00:14:16,410 --> 00:14:22,790 +2 at 5%, and the answer is 2.92. + +163 +00:14:27,550 --> 00:14:31,130 +Now, let's see how can we compare the results of + +164 +00:14:31,130 --> 00:14:38,190 +different degrees of freedom comparing to the + +165 +00:14:38,190 --> 00:14:41,910 +standard normal distribution. Z means we have + +166 +00:14:41,910 --> 00:14:44,830 +infinite degrees of freedom, I mean n equal + +167 +00:14:44,830 --> 00:14:50,130 +infinity in this case. Suppose the confidence + +168 +00:14:50,130 --> 00:14:59,790 +level is 80%. 80% means 20%, so 10. In another + +169 +00:14:59,790 --> 00:15:04,550 +tail and 10th left tail Now under degrees of + +170 +00:15:04,550 --> 00:15:14,530 +freedom 10 If we go back 10 degrees + +171 +00:15:14,530 --> 00:15:24,350 +of freedom Up to 10 we have this answer Now + +172 +00:15:24,350 --> 00:15:28,580 +for 20 for example We have 1.325. + +173 +00:15:36,060 --> 00:15:42,300 +Look at the fourth column again. This is 10. 1 + +174 +00:15:42,300 --> 00:15:51,000 +.372. For 20, we have 1.325. For 30, 1.310. + +175 +00:15:52,960 --> 00:15:59,130 +That's for the T values. Now if you go back to the + +176 +00:15:59,130 --> 00:16:08,330 +normal table and just look at 1%. This is the z + +177 +00:16:08,330 --> 00:16:18,790 +table. 1% is the closest value under 1.28. + +178 +00:16:19,590 --> 00:16:25,930 +So that's your z-score. Now the corresponding T + +179 +00:16:25,930 --> 00:16:36,190 +values are 1.372, 1.325, 1.310, the exact Z is 1 + +180 +00:16:36,190 --> 00:16:40,970 +.18. Now, here we can see that the difference + +181 +00:16:40,970 --> 00:16:45,290 +between T and Z becomes smaller and smaller as N, + +182 +00:16:45,530 --> 00:16:51,530 +or degrees of freedom, increases. Here 1.372, then + +183 +00:16:51,530 --> 00:16:57,550 +it becomes less than this value, so 1.325, 1.310, + +184 +00:16:58,070 --> 00:17:02,250 +very close, 1.28. Similarly, we can do for the + +185 +00:17:02,250 --> 00:17:07,450 +other confidence levels. For example, for 90%, we + +186 +00:17:07,450 --> 00:17:13,010 +get this result. And also we have similar scenario + +187 +00:17:13,010 --> 00:17:16,630 +here for 95 and 99. So as degrees of freedom + +188 +00:17:16,630 --> 00:17:22,660 +increases, T goes to Z. Now, what's the smallest + +189 +00:17:22,660 --> 00:17:27,320 +difference between Z and other T values? For \ No newline at end of file diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/w-gVlz_LYaI_postprocess.srt b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/w-gVlz_LYaI_postprocess.srt new file mode 100644 index 0000000000000000000000000000000000000000..acaa2e4aed46c89d6650ab212fff09b339eb2f51 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/w-gVlz_LYaI_postprocess.srt @@ -0,0 +1,824 @@ +1 +00:00:07,100 --> 00:00:10,520 +So then shall we continue chapter eight confidence + +2 +00:00:10,520 --> 00:00:14,360 +interval estimation. Last time we talked about + +3 +00:00:14,360 --> 00:00:18,820 +confidence interval for the population mean mu + +4 +00:00:18,820 --> 00:00:25,220 +when sigma is known. And we end with this specific + +5 +00:00:25,220 --> 00:00:27,480 +formula in order to construct + +6 +00:00:30,020 --> 00:00:34,980 +for the true proportion, the true population mean + +7 +00:00:34,980 --> 00:00:39,700 +mu, which is x bar, the sample mean, plus or minus + +8 +00:00:39,700 --> 00:00:43,020 +the alpha over 2, the critical value, times sigma + +9 +00:00:43,020 --> 00:00:46,480 +over root n, which is the standard error of the + +10 +00:00:46,480 --> 00:00:53,200 +estimate. And I think we gave an example for + +11 +00:00:53,200 --> 00:00:57,300 +this one. So one more time, the confidence + +12 +00:00:57,300 --> 00:01:01,480 +intervals for this chapter will be split into two + +13 +00:01:01,480 --> 00:01:07,020 +parts. One for the true mean, and the other for + +14 +00:01:07,020 --> 00:01:11,180 +the true proportion. For the population mean, + +15 +00:01:11,500 --> 00:01:16,760 +there are two cases. One sigma is known, and we + +16 +00:01:16,760 --> 00:01:21,120 +just explained. And the other one, one sigma is + +17 +00:01:21,120 --> 00:01:24,560 +unknown. Let's see how can we construct a + +18 +00:01:24,560 --> 00:01:29,780 +confidence interval when sigma is unknown. In the + +19 +00:01:29,780 --> 00:01:37,600 +real world, I mean in all real world business + +20 +00:01:37,600 --> 00:01:41,160 +situations, sigma is not given. So our goal is to + +21 +00:01:41,160 --> 00:01:44,940 +estimate sigma by using the sample standard + +22 +00:01:44,940 --> 00:01:50,440 +deviation, then replace sigma by S. In this case, + +23 +00:01:50,480 --> 00:01:53,680 +we have to use a new distribution. This + +24 +00:01:53,680 --> 00:01:58,590 +distribution is called T distribution. So again, + +25 +00:01:59,110 --> 00:02:01,990 +if the population standard deviation sigma is + +26 +00:02:01,990 --> 00:02:06,350 +unknown, we can substitute the sample standard + +27 +00:02:06,350 --> 00:02:12,710 +deviation S instead of sigma. In this case, we + +28 +00:02:12,710 --> 00:02:17,790 +should use T distribution instead of normal + +29 +00:02:17,790 --> 00:02:20,810 +distribution. So if sigma is unknown. + +30 +00:02:33,090 --> 00:02:39,890 +replace sigma by S and use T + +31 +00:02:39,890 --> 00:02:44,530 +distribution instead of norm. + +32 +00:02:47,970 --> 00:02:50,530 +So this is the idea here. We have to replace sigma + +33 +00:02:50,530 --> 00:02:55,920 +by S and use a new distribution called T. In this + +34 +00:02:55,920 --> 00:02:59,720 +case, the three assumptions will remain nearly the + +35 +00:02:59,720 --> 00:03:06,620 +same. But instead of the first one will be here, + +36 +00:03:06,720 --> 00:03:08,780 +population standard deviation is unknown instead + +37 +00:03:08,780 --> 00:03:12,280 +of known. So this is the assumption that changed. + +38 +00:03:12,780 --> 00:03:17,140 +The other two assumptions remain the same. Either + +39 +00:03:17,140 --> 00:03:20,380 +the population is normally distributed, or if the + +40 +00:03:20,380 --> 00:03:24,320 +population is not normal, just use large sample. + +41 +00:03:25,430 --> 00:03:28,550 +In this case, the confidence interval estimate is + +42 +00:03:28,550 --> 00:03:35,150 +given by this formula X bar plus or minus T alpha + +43 +00:03:35,150 --> 00:03:40,570 +over 2S over square root of F. Now the question + +44 +00:03:40,570 --> 00:03:45,470 +is, how can we find the critical value T alpha + +45 +00:03:45,470 --> 00:03:49,330 +over 2? There is a table for this one at the end + +46 +00:03:49,330 --> 00:03:52,510 +of your book. The table gives the area in the + +47 +00:03:52,510 --> 00:03:53,090 +upper tier. + +48 +00:03:59,100 --> 00:04:05,080 +For certain probabilities, start from 2.2, + +49 +00:04:05,520 --> 00:04:12,940 +0.10, and so on. So this is the table. So columns + +50 +00:04:12,940 --> 00:04:19,300 +represent the percentages of the area in the upper + +51 +00:04:19,300 --> 00:04:24,180 +tier. Rows represent degrees of freedom, start + +52 +00:04:24,180 --> 00:04:29,110 +from 1 to all the way up to 120. Then infinity. + +53 +00:04:30,370 --> 00:04:35,690 +Now let's see how can we use the table. You have + +54 +00:04:35,690 --> 00:04:40,590 +to know that T is a family of distributions. The + +55 +00:04:40,590 --> 00:04:46,770 +degrees of freedom, DF stands for degrees of + +56 +00:04:46,770 --> 00:04:52,210 +freedom, always equals N minus 1. For example, if + +57 +00:04:52,210 --> 00:04:57,250 +the sample size is 15, So degrees of freedom is 14 + +58 +00:04:57,250 --> 00:05:01,910 +and so on. Now degrees of freedom, number of + +59 +00:05:01,910 --> 00:05:05,590 +observations that are free to vary after sample + +60 +00:05:05,590 --> 00:05:09,290 +mean has been calculated. Let's see the meaning of + +61 +00:05:09,290 --> 00:05:15,410 +degrees of freedom. Now imagine that we + +62 +00:05:15,410 --> 00:05:21,150 +know the mean is + +63 +00:05:21,150 --> 00:05:21,430 +8. + +64 +00:05:25,690 --> 00:05:29,210 +previous information that the mean of three + +65 +00:05:29,210 --> 00:05:34,270 +observations equals eight. So we have three + +66 +00:05:34,270 --> 00:05:39,530 +values. Now the restriction is the mean is eight. + +67 +00:05:40,590 --> 00:05:47,150 +So we have three values x1, x2, x3. In this case, + +68 +00:05:47,210 --> 00:05:51,100 +you can take any two values. So suppose the mean + +69 +00:05:51,100 --> 00:05:53,600 +of three numbers is eight. So there are three + +70 +00:05:53,600 --> 00:05:56,120 +observations, n equal to three, the mean is eight. + +71 +00:05:56,460 --> 00:05:59,980 +In this case, you can take any two of these three + +72 +00:05:59,980 --> 00:06:05,800 +values. For example, maybe x1, I can choose x1 to + +73 +00:06:05,800 --> 00:06:09,380 +be, for example, 10. Someone else will choose x2 + +74 +00:06:09,380 --> 00:06:18,400 +to be 7. Now, the value of x3 should be equal to a + +75 +00:06:18,400 --> 00:06:23,410 +value And in that case, the sample mean should be + +76 +00:06:23,410 --> 00:06:26,950 +eight. And there are three observations, so the + +77 +00:06:26,950 --> 00:06:30,490 +total should be 24. So this value should be seven, + +78 +00:06:30,790 --> 00:06:37,120 +must be seven. So now you have two options. For x1 + +79 +00:06:37,120 --> 00:06:41,060 +and x2, just you can take any two values. But the + +80 +00:06:41,060 --> 00:06:44,560 +third one has a restriction on it that the average + +81 +00:06:44,560 --> 00:06:49,240 +will be three. Maybe someone else will choose x1 + +82 +00:06:49,240 --> 00:06:56,060 +to be seven, x3 to be, for example, 15. So now + +83 +00:06:56,060 --> 00:06:59,900 +what do you think about the remaining item or the + +84 +00:06:59,900 --> 00:07:04,460 +remaining observation? This one should be two. So + +85 +00:07:04,460 --> 00:07:08,030 +you have three. to take any two values out of + +86 +00:07:08,030 --> 00:07:12,530 +three. Here, for this specific example, he chose + +87 +00:07:12,530 --> 00:07:21,230 +x1 to be 7, x2 to be 8. Now, x3 must be 9, because + +88 +00:07:21,230 --> 00:07:24,450 +the total in this case will be 24. So the average, + +89 +00:07:24,710 --> 00:07:29,830 +again, is 8. So here, n is 3. So degrees of + +90 +00:07:29,830 --> 00:07:35,260 +freedom is 2, is n minus 1. So that means two + +91 +00:07:35,260 --> 00:07:39,640 +values can be any numbers. As I mentioned here, it + +92 +00:07:39,640 --> 00:07:44,260 +could be any two numbers. But the third one is not + +93 +00:07:44,260 --> 00:07:48,100 +free to vary for a given mean, because here we + +94 +00:07:48,100 --> 00:07:50,560 +have a restriction that the mean is eight. Another + +95 +00:07:50,560 --> 00:07:54,760 +example, suppose I gave you three cards. + +96 +00:07:57,700 --> 00:08:01,540 +And my restriction is the sum of the numbers on + +97 +00:08:01,540 --> 00:08:06,780 +the three cards will be 15. Maybe you will write + +98 +00:08:06,780 --> 00:08:10,960 +any two values in any two cards. For example, + +99 +00:08:11,100 --> 00:08:16,500 +maybe I will choose seven in the first card, six + +100 +00:08:16,500 --> 00:08:21,620 +in the third, So this one should be 2. So the + +101 +00:08:21,620 --> 00:08:26,340 +degrees of freedom is 2. Any questions? So this is + +102 +00:08:26,340 --> 00:08:30,160 +the meaning of degrees of freedom. So in general, + +103 +00:08:30,680 --> 00:08:33,800 +if the sample size is n, I mean if the sample size + +104 +00:08:33,800 --> 00:08:39,840 +equals n degrees of freedom, n minus 1. That's the + +105 +00:08:39,840 --> 00:08:43,940 +meaning of degrees of freedom. Now let's see the + +106 +00:08:43,940 --> 00:08:46,240 +comparison between T distribution and Z. + +107 +00:08:49,600 --> 00:08:52,280 +The blue one is normal distribution, standard + +108 +00:08:52,280 --> 00:08:59,060 +normal. The other two, the blue and red ones, are + +109 +00:08:59,060 --> 00:09:01,300 +T distributions with different degrees of freedom. + +110 +00:09:01,820 --> 00:09:05,560 +For example, the red one has degrees of freedom + +111 +00:09:05,560 --> 00:09:12,180 +five. So that means the sample size is six. And + +112 +00:09:12,180 --> 00:09:15,720 +actually, in this case, T distribution has long + +113 +00:09:15,720 --> 00:09:21,340 +tail. The other one, the blue one, has degrees of + +114 +00:09:21,340 --> 00:09:25,760 +freedom 13, that means n equals 14. Now we can see + +115 +00:09:25,760 --> 00:09:32,940 +that the blue one is closer to the normal than the + +116 +00:09:32,940 --> 00:09:36,400 +red one. That means as degrees of freedom + +117 +00:09:36,400 --> 00:09:41,180 +increases, the difference between z and t becomes + +118 +00:09:41,180 --> 00:09:48,000 +smaller and smaller. Always, T converges or goes + +119 +00:09:48,000 --> 00:09:55,160 +to Z as N increases. So for large sample sizes, we + +120 +00:09:55,160 --> 00:09:59,540 +can approximate Z value by T. That in general, if + +121 +00:09:59,540 --> 00:10:04,840 +N is very, very large, I mean above 30. But if N, + +122 +00:10:05,000 --> 00:10:07,700 +for example, 100, in this case, the two curves + +123 +00:10:07,700 --> 00:10:09,980 +will be identical. + +124 +00:10:11,660 --> 00:10:17,420 +Let's see how can we use the Table 40. + +125 +00:10:21,140 --> 00:10:25,720 +This table is quite similar to the one you have at + +126 +00:10:25,720 --> 00:10:30,380 +the end of your book. Now again, this table gives + +127 +00:10:30,380 --> 00:10:31,920 +the area in the upper tail. + +128 +00:10:35,660 --> 00:10:41,840 +So if we are interested in + +129 +00:10:41,840 --> 00:10:44,080 +95 confidence interval. + +130 +00:10:51,620 --> 00:11:01,760 +That means alpha is 5% because 95% between these + +131 +00:11:01,760 --> 00:11:08,160 +two values. So 5% left. The distribution is + +132 +00:11:08,160 --> 00:11:13,440 +normal. It is bell-shaped, so 5% for both + +133 +00:11:13,440 --> 00:11:17,720 +directions. So here we have 2.5, and the other one + +134 +00:11:17,720 --> 00:11:22,280 +also 2.5. So the area in the upper tail is 2.5, so + +135 +00:11:22,280 --> 00:11:28,820 +this is my 2.5. So again, columns represents upper + +136 +00:11:28,820 --> 00:11:33,660 +tail probabilities, while rows represent degrees + +137 +00:11:33,660 --> 00:11:40,730 +of freedom. Now, let's imagine that N equals 20, + +138 +00:11:41,610 --> 00:11:46,250 +for example. Now alpha is 5, so that means alpha + +139 +00:11:46,250 --> 00:11:50,470 +over 2, as we mentioned, 2.5%. Here we should look + +140 +00:11:50,470 --> 00:11:56,070 +at degrees of freedom. Equal N minus 1. That's 19. + +141 +00:11:57,470 --> 00:12:06,170 +So this is 19. Go all the way up to 0 to 5. So + +142 +00:12:06,170 --> 00:12:11,650 +this is your answer. 2.093. So T alpha over 2 + +143 +00:12:11,650 --> 00:12:16,970 +equal 2.093. + +144 +00:12:17,810 --> 00:12:20,910 +This is the way how can we use the T distribution. + +145 +00:12:23,010 --> 00:12:29,070 +Now let's see if we have different confidence + +146 +00:12:29,070 --> 00:12:29,390 +level. + +147 +00:12:37,520 --> 00:12:41,040 +We got 2.093. + +148 +00:12:42,420 --> 00:12:45,120 +So T, 0 to 5. + +149 +00:12:50,340 --> 00:12:55,000 +Now for the same size, for the same sample size, + +150 +00:12:55,440 --> 00:13:01,300 +let's talk about 90% confidence. 90% means 10% + +151 +00:13:01,300 --> 00:13:05,940 +left. Five to the right. So alpha. + +152 +00:13:13,290 --> 00:13:17,410 +This is your 0.05, 1.729. + +153 +00:13:24,670 --> 00:13:32,530 +Now as C level increases, T also increases. When + +154 +00:13:32,530 --> 00:13:39,870 +90% we got T to be 1.729. When C11 increases up to + +155 +00:13:39,870 --> 00:13:46,190 +5%, we have 95. My T becomes 2.093. So this is the + +156 +00:13:46,190 --> 00:13:52,570 +way how can we use the TMR. Here in the slide, we + +157 +00:13:52,570 --> 00:13:57,750 +have another example. That is, we have a sample + +158 +00:13:57,750 --> 00:14:02,810 +size of three. The use of freedom is two. In this + +159 +00:14:02,810 --> 00:14:06,550 +case, he's interested in 90% confidence interval, + +160 +00:14:07,530 --> 00:14:12,690 +so alpha is 10%. Consequently, alpha over 2 is 5%. + +161 +00:14:12,690 --> 00:14:16,410 +Here, we should look at degrees of freedom equals + +162 +00:14:16,410 --> 00:14:22,790 +2 at 5%, and the answer is 2.92. + +163 +00:14:27,550 --> 00:14:31,130 +Now, let's see how can we compare the results of + +164 +00:14:31,130 --> 00:14:38,190 +different degrees of freedom comparing to the + +165 +00:14:38,190 --> 00:14:41,910 +standard normal distribution. Z means we have + +166 +00:14:41,910 --> 00:14:44,830 +infinite degrees of freedom, I mean n equal + +167 +00:14:44,830 --> 00:14:50,130 +infinity in this case. Suppose the confidence + +168 +00:14:50,130 --> 00:14:59,790 +level is 80%. 80% means 20%, so 10. In another + +169 +00:14:59,790 --> 00:15:04,550 +tail and 10th left tail Now under degrees of + +170 +00:15:04,550 --> 00:15:14,530 +freedom 10 If we go back 10 degrees + +171 +00:15:14,530 --> 00:15:24,350 +of freedom Up to 10 we have this answer Now + +172 +00:15:24,350 --> 00:15:28,580 +for 20 for example We have 1.325. + +173 +00:15:36,060 --> 00:15:42,300 +Look at the fourth column again. This is 10. 1 + +174 +00:15:42,300 --> 00:15:51,000 +.372. For 20, we have 1.325. For 30, 1.310. + +175 +00:15:52,960 --> 00:15:59,130 +That's for the T values. Now if you go back to the + +176 +00:15:59,130 --> 00:16:08,330 +normal table and just look at 1%. This is the z + +177 +00:16:08,330 --> 00:16:18,790 +table. 1% is the closest value under 1.28. + +178 +00:16:19,590 --> 00:16:25,930 +So that's your z-score. Now the corresponding T + +179 +00:16:25,930 --> 00:16:36,190 +values are 1.372, 1.325, 1.310, the exact Z is 1 + +180 +00:16:36,190 --> 00:16:40,970 +.18. Now, here we can see that the difference + +181 +00:16:40,970 --> 00:16:45,290 +between T and Z becomes smaller and smaller as N, + +182 +00:16:45,530 --> 00:16:51,530 +or degrees of freedom, increases. Here 1.372, then + +183 +00:16:51,530 --> 00:16:57,550 +it becomes less than this value, so 1.325, 1.310, + +184 +00:16:58,070 --> 00:17:02,250 +very close, 1.28. Similarly, we can do for the + +185 +00:17:02,250 --> 00:17:07,450 +other confidence levels. For example, for 90%, we + +186 +00:17:07,450 --> 00:17:13,010 +get this result. And also we have similar scenario + +187 +00:17:13,010 --> 00:17:16,630 +here for 95 and 99. So as degrees of freedom + +188 +00:17:16,630 --> 00:17:22,660 +increases, T goes to Z. Now, what's the smallest + +189 +00:17:22,660 --> 00:17:27,320 +difference between Z and other T values? For which + +190 +00:17:27,320 --> 00:17:28,680 +level of significance? + +191 +00:17:31,700 --> 00:17:36,820 +80%. For 80%, the difference between Z and T here + +192 +00:17:36,820 --> 00:17:40,700 +is the smallest among the others. Because the + +193 +00:17:40,700 --> 00:17:44,480 +difference between 128 and 131 is the smallest + +194 +00:17:44,480 --> 00:17:47,600 +value corresponding to the other ones, or + +195 +00:17:47,600 --> 00:17:53,130 +comparing to the other ones. For small confidence + +196 +00:17:53,130 --> 00:17:56,370 +level, the difference between z and t becomes + +197 +00:17:56,370 --> 00:18:01,930 +smaller and smaller. So t goes to z as n + +198 +00:18:01,930 --> 00:18:02,530 +increases. + +199 +00:18:06,430 --> 00:18:13,950 +So again, here, the yellow one is the normal + +200 +00:18:13,950 --> 00:18:16,450 +table, standardized normal table. It's a normal + +201 +00:18:16,450 --> 00:18:20,530 +table with degrees of freedom infinity as for t. + +202 +00:18:21,920 --> 00:18:24,740 +The other two for T distribution with different + +203 +00:18:24,740 --> 00:18:28,660 +degrees of freedom. In this case, T distributions + +204 +00:18:28,660 --> 00:18:32,920 +are bell-shaped and symmetric, but have fatter + +205 +00:18:32,920 --> 00:18:37,200 +tails than the normal. That means normal + +206 +00:18:37,200 --> 00:18:41,780 +distribution goes to the zero + diff --git a/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/w-gVlz_LYaI_raw.json b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/w-gVlz_LYaI_raw.json new file mode 100644 index 0000000000000000000000000000000000000000..deb6f989d8dc73eb87ac31b1204a2fd197e6cab3 --- /dev/null +++ b/PL9fwy3NUQKwaIc7KWc8buW344941GStQL/w-gVlz_LYaI_raw.json @@ -0,0 +1 @@ +{"segments": [{"id": 1, "seek": 2748, "start": 7.1, "end": 27.48, "text": " So then shall we continue chapter eight confidence interval estimation. Last time we talked about confidence interval for the population mean mu when sigma is known. And we end with this specific formula in order to construct", "tokens": [407, 550, 4393, 321, 2354, 7187, 3180, 6687, 15035, 35701, 13, 5264, 565, 321, 2825, 466, 6687, 15035, 337, 264, 4415, 914, 2992, 562, 12771, 307, 2570, 13, 400, 321, 917, 365, 341, 2685, 8513, 294, 1668, 281, 7690], "avg_logprob": -0.3240234240889549, "compression_ratio": 1.5374149659863945, "no_speech_prob": 1.1920928955078125e-06, "words": [{"start": 7.1, "end": 7.28, "word": " So", "probability": 0.1055908203125}, {"start": 7.28, "end": 7.44, "word": " then", "probability": 0.16064453125}, {"start": 7.44, "end": 7.6, "word": " shall", "probability": 0.587890625}, {"start": 7.6, "end": 7.76, "word": " we", "probability": 0.87646484375}, {"start": 7.76, "end": 8.34, "word": " continue", "probability": 0.8603515625}, {"start": 8.34, "end": 9.62, "word": " chapter", "probability": 0.57666015625}, {"start": 9.62, "end": 9.98, "word": " eight", "probability": 0.57861328125}, {"start": 9.98, "end": 10.52, "word": " confidence", "probability": 0.64501953125}, {"start": 10.52, "end": 11.12, "word": " interval", "probability": 0.9150390625}, {"start": 11.12, "end": 11.68, "word": " estimation.", "probability": 0.97802734375}, {"start": 12.96, "end": 13.34, "word": " Last", "probability": 0.76416015625}, {"start": 13.34, "end": 13.6, "word": " time", "probability": 0.89306640625}, {"start": 13.6, "end": 13.74, "word": " we", "probability": 0.841796875}, {"start": 13.74, "end": 13.98, "word": " talked", "probability": 0.87158203125}, {"start": 13.98, "end": 14.36, "word": " about", "probability": 0.89697265625}, {"start": 14.36, "end": 15.06, "word": " confidence", "probability": 0.93310546875}, {"start": 15.06, "end": 15.66, "word": " interval", "probability": 0.96044921875}, {"start": 15.66, "end": 16.9, "word": " for", "probability": 0.912109375}, {"start": 16.9, "end": 17.14, "word": " the", "probability": 0.9091796875}, {"start": 17.14, "end": 17.64, "word": " population", "probability": 0.8984375}, {"start": 17.64, "end": 18.1, "word": " mean", "probability": 0.93505859375}, {"start": 18.1, "end": 18.82, "word": " mu", "probability": 0.271728515625}, {"start": 18.82, "end": 20.28, "word": " when", "probability": 0.56982421875}, {"start": 20.28, "end": 20.58, "word": " sigma", "probability": 0.89453125}, {"start": 20.58, "end": 20.98, "word": " is", "probability": 0.953125}, {"start": 20.98, "end": 22.12, "word": " known.", "probability": 0.4404296875}, {"start": 22.92, "end": 23.3, "word": " And", "probability": 0.9306640625}, {"start": 23.3, "end": 23.48, "word": " we", "probability": 0.951171875}, {"start": 23.48, "end": 23.7, "word": " end", "probability": 0.9140625}, {"start": 23.7, "end": 23.94, "word": " with", "probability": 0.89990234375}, {"start": 23.94, "end": 24.34, "word": " this", "probability": 0.9384765625}, {"start": 24.34, "end": 25.22, "word": " specific", "probability": 0.900390625}, {"start": 25.22, "end": 25.84, "word": " formula", "probability": 0.927734375}, {"start": 25.84, "end": 26.28, "word": " in", "probability": 0.89013671875}, {"start": 26.28, "end": 26.46, "word": " order", "probability": 0.92822265625}, {"start": 26.46, "end": 26.82, "word": " to", "probability": 0.96826171875}, {"start": 26.82, "end": 27.48, "word": " construct", "probability": 0.9765625}], "temperature": 1.0}, {"id": 2, "seek": 5880, "start": 30.02, "end": 58.8, "text": " for the true proportion, the true population mean mu, which is x bar, the sample mean, plus or minus the alpha over 2, the critical value, times sigma over root n, which is the standard error of the estimate. And I think we gave an example for this one. So one more time, the confidence intervals for this chapter", "tokens": [337, 264, 2074, 16068, 11, 264, 2074, 4415, 914, 2992, 11, 597, 307, 2031, 2159, 11, 264, 6889, 914, 11, 1804, 420, 3175, 264, 8961, 670, 568, 11, 264, 4924, 2158, 11, 1413, 12771, 670, 5593, 297, 11, 597, 307, 264, 3832, 6713, 295, 264, 12539, 13, 400, 286, 519, 321, 2729, 364, 1365, 337, 341, 472, 13, 407, 472, 544, 565, 11, 264, 6687, 26651, 337, 341, 7187], "avg_logprob": -0.21986606674534934, "compression_ratio": 1.643979057591623, "no_speech_prob": 1.7881393432617188e-07, "words": [{"start": 30.02, "end": 30.14, "word": " for", "probability": 0.454345703125}, {"start": 30.14, "end": 30.32, "word": " the", "probability": 0.89013671875}, {"start": 30.32, "end": 30.52, "word": " true", "probability": 0.60986328125}, {"start": 30.52, "end": 31.0, "word": " proportion,", "probability": 0.763671875}, {"start": 31.34, "end": 31.54, "word": " the", "probability": 0.70068359375}, {"start": 31.54, "end": 32.06, "word": " true", "probability": 0.76318359375}, {"start": 32.06, "end": 33.42, "word": " population", "probability": 0.95703125}, {"start": 33.42, "end": 34.98, "word": " mean", "probability": 0.759765625}, {"start": 34.98, "end": 35.94, "word": " mu,", "probability": 0.2103271484375}, {"start": 36.44, "end": 36.84, "word": " which", "probability": 0.92919921875}, {"start": 36.84, "end": 37.02, "word": " is", "probability": 0.921875}, {"start": 37.02, "end": 37.26, "word": " x", "probability": 0.87158203125}, {"start": 37.26, "end": 37.56, "word": " bar,", "probability": 0.75830078125}, {"start": 37.78, "end": 37.94, "word": " the", "probability": 0.8486328125}, {"start": 37.94, "end": 38.22, "word": " sample", "probability": 0.63720703125}, {"start": 38.22, "end": 38.56, "word": " mean,", "probability": 0.98095703125}, {"start": 38.84, "end": 39.2, "word": " plus", "probability": 0.8515625}, {"start": 39.2, "end": 39.44, "word": " or", "probability": 0.94970703125}, {"start": 39.44, "end": 39.7, "word": " minus", "probability": 0.98876953125}, {"start": 39.7, "end": 39.9, "word": " the", "probability": 0.70703125}, {"start": 39.9, "end": 40.06, "word": " alpha", "probability": 0.6142578125}, {"start": 40.06, "end": 40.4, "word": " over", "probability": 0.76025390625}, {"start": 40.4, "end": 40.56, "word": " 2,", "probability": 0.3994140625}, {"start": 40.66, "end": 40.76, "word": " the", "probability": 0.89990234375}, {"start": 40.76, "end": 41.06, "word": " critical", "probability": 0.93505859375}, {"start": 41.06, "end": 41.56, "word": " value,", "probability": 0.97412109375}, {"start": 42.16, "end": 42.64, "word": " times", "probability": 0.9296875}, {"start": 42.64, "end": 43.02, "word": " sigma", "probability": 0.91552734375}, {"start": 43.02, "end": 43.28, "word": " over", "probability": 0.890625}, {"start": 43.28, "end": 43.5, "word": " root", "probability": 0.92724609375}, {"start": 43.5, "end": 43.72, "word": " n,", "probability": 0.732421875}, {"start": 43.78, "end": 44.02, "word": " which", "probability": 0.94873046875}, {"start": 44.02, "end": 44.2, "word": " is", "probability": 0.9423828125}, {"start": 44.2, "end": 44.38, "word": " the", "probability": 0.896484375}, {"start": 44.38, "end": 44.7, "word": " standard", "probability": 0.93505859375}, {"start": 44.7, "end": 45.06, "word": " error", "probability": 0.8857421875}, {"start": 45.06, "end": 46.2, "word": " of", "probability": 0.966796875}, {"start": 46.2, "end": 46.48, "word": " the", "probability": 0.91845703125}, {"start": 46.48, "end": 47.12, "word": " estimate.", "probability": 0.9072265625}, {"start": 47.3, "end": 47.46, "word": " And", "probability": 0.94921875}, {"start": 47.46, "end": 47.6, "word": " I", "probability": 0.9853515625}, {"start": 47.6, "end": 47.82, "word": " think", "probability": 0.91455078125}, {"start": 47.82, "end": 48.1, "word": " we", "probability": 0.95703125}, {"start": 48.1, "end": 49.12, "word": " gave", "probability": 0.69091796875}, {"start": 49.12, "end": 49.36, "word": " an", "probability": 0.9599609375}, {"start": 49.36, "end": 49.78, "word": " example", "probability": 0.97607421875}, {"start": 49.78, "end": 53.2, "word": " for", "probability": 0.93359375}, {"start": 53.2, "end": 53.4, "word": " this", "probability": 0.95068359375}, {"start": 53.4, "end": 53.64, "word": " one.", "probability": 0.93017578125}, {"start": 55.36, "end": 55.92, "word": " So", "probability": 0.95654296875}, {"start": 55.92, "end": 56.28, "word": " one", "probability": 0.8681640625}, {"start": 56.28, "end": 56.46, "word": " more", "probability": 0.931640625}, {"start": 56.46, "end": 56.72, "word": " time,", "probability": 0.88720703125}, {"start": 56.78, "end": 56.92, "word": " the", "probability": 0.91552734375}, {"start": 56.92, "end": 57.3, "word": " confidence", "probability": 0.974609375}, {"start": 57.3, "end": 57.78, "word": " intervals", "probability": 0.671875}, {"start": 57.78, "end": 58.2, "word": " for", "probability": 0.92041015625}, {"start": 58.2, "end": 58.46, "word": " this", "probability": 0.94287109375}, {"start": 58.46, "end": 58.8, "word": " chapter", "probability": 0.75732421875}], "temperature": 1.0}, {"id": 3, "seek": 8780, "start": 60.14, "end": 87.8, "text": " will be split into two parts. One for the true mean, and the other for the true proportion. For the population mean, there are two cases. One sigma is known, and we just explained. And the other one, one sigma is unknown. Let's see how can we construct a confidence interval when sigma is unknown.", "tokens": [486, 312, 7472, 666, 732, 3166, 13, 1485, 337, 264, 2074, 914, 11, 293, 264, 661, 337, 264, 2074, 16068, 13, 1171, 264, 4415, 914, 11, 456, 366, 732, 3331, 13, 1485, 12771, 307, 2570, 11, 293, 321, 445, 8825, 13, 400, 264, 661, 472, 11, 472, 12771, 307, 9841, 13, 961, 311, 536, 577, 393, 321, 7690, 257, 6687, 15035, 562, 12771, 307, 9841, 13], "avg_logprob": -0.17047575249600766, "compression_ratio": 1.7126436781609196, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 60.14, "end": 60.48, "word": " will", "probability": 0.63916015625}, {"start": 60.48, "end": 60.68, "word": " be", "probability": 0.93310546875}, {"start": 60.68, "end": 60.98, "word": " split", "probability": 0.8779296875}, {"start": 60.98, "end": 61.28, "word": " into", "probability": 0.83642578125}, {"start": 61.28, "end": 61.48, "word": " two", "probability": 0.8583984375}, {"start": 61.48, "end": 61.94, "word": " parts.", "probability": 0.8466796875}, {"start": 62.9, "end": 63.36, "word": " One", "probability": 0.87939453125}, {"start": 63.36, "end": 63.64, "word": " for", "probability": 0.9208984375}, {"start": 63.64, "end": 63.94, "word": " the", "probability": 0.92138671875}, {"start": 63.94, "end": 64.76, "word": " true", "probability": 0.9150390625}, {"start": 64.76, "end": 65.06, "word": " mean,", "probability": 0.94091796875}, {"start": 65.7, "end": 66.38, "word": " and", "probability": 0.91455078125}, {"start": 66.38, "end": 66.5, "word": " the", "probability": 0.70263671875}, {"start": 66.5, "end": 66.7, "word": " other", "probability": 0.89013671875}, {"start": 66.7, "end": 67.02, "word": " for", "probability": 0.912109375}, {"start": 67.02, "end": 67.2, "word": " the", "probability": 0.91015625}, {"start": 67.2, "end": 67.46, "word": " true", "probability": 0.96875}, {"start": 67.46, "end": 68.04, "word": " proportion.", "probability": 0.8193359375}, {"start": 69.56, "end": 70.08, "word": " For", "probability": 0.93603515625}, {"start": 70.08, "end": 70.32, "word": " the", "probability": 0.92041015625}, {"start": 70.32, "end": 70.78, "word": " population", "probability": 0.90478515625}, {"start": 70.78, "end": 71.18, "word": " mean,", "probability": 0.96435546875}, {"start": 71.5, "end": 71.76, "word": " there", "probability": 0.8955078125}, {"start": 71.76, "end": 71.96, "word": " are", "probability": 0.94287109375}, {"start": 71.96, "end": 72.2, "word": " two", "probability": 0.923828125}, {"start": 72.2, "end": 72.9, "word": " cases.", "probability": 0.93408203125}, {"start": 74.4, "end": 74.9, "word": " One", "probability": 0.70361328125}, {"start": 74.9, "end": 75.18, "word": " sigma", "probability": 0.79150390625}, {"start": 75.18, "end": 75.4, "word": " is", "probability": 0.94970703125}, {"start": 75.4, "end": 75.64, "word": " known,", "probability": 0.7138671875}, {"start": 76.38, "end": 76.64, "word": " and", "probability": 0.9033203125}, {"start": 76.64, "end": 76.76, "word": " we", "probability": 0.9326171875}, {"start": 76.76, "end": 77.1, "word": " just", "probability": 0.9013671875}, {"start": 77.1, "end": 78.68, "word": " explained.", "probability": 0.70751953125}, {"start": 79.26, "end": 79.56, "word": " And", "probability": 0.94140625}, {"start": 79.56, "end": 79.68, "word": " the", "probability": 0.88134765625}, {"start": 79.68, "end": 79.9, "word": " other", "probability": 0.88525390625}, {"start": 79.9, "end": 80.22, "word": " one,", "probability": 0.92626953125}, {"start": 80.52, "end": 80.68, "word": " one", "probability": 0.66455078125}, {"start": 80.68, "end": 80.9, "word": " sigma", "probability": 0.92333984375}, {"start": 80.9, "end": 81.12, "word": " is", "probability": 0.94189453125}, {"start": 81.12, "end": 81.4, "word": " unknown.", "probability": 0.8994140625}, {"start": 82.26, "end": 82.64, "word": " Let's", "probability": 0.96826171875}, {"start": 82.64, "end": 82.88, "word": " see", "probability": 0.921875}, {"start": 82.88, "end": 83.54, "word": " how", "probability": 0.83544921875}, {"start": 83.54, "end": 83.76, "word": " can", "probability": 0.8525390625}, {"start": 83.76, "end": 83.92, "word": " we", "probability": 0.9560546875}, {"start": 83.92, "end": 84.4, "word": " construct", "probability": 0.9638671875}, {"start": 84.4, "end": 84.56, "word": " a", "probability": 0.78076171875}, {"start": 84.56, "end": 84.9, "word": " confidence", "probability": 0.98095703125}, {"start": 84.9, "end": 85.5, "word": " interval", "probability": 0.97265625}, {"start": 85.5, "end": 86.98, "word": " when", "probability": 0.8349609375}, {"start": 86.98, "end": 87.32, "word": " sigma", "probability": 0.92138671875}, {"start": 87.32, "end": 87.56, "word": " is", "probability": 0.94970703125}, {"start": 87.56, "end": 87.8, "word": " unknown.", "probability": 0.916015625}], "temperature": 1.0}, {"id": 4, "seek": 11636, "start": 88.96, "end": 116.36, "text": " In the real world, I mean in all real world business situations, sigma is not given. So our goal is to estimate sigma by using the sample standard deviation, then replace sigma by S. In this case, we have to use a new distribution. This distribution is called T distribution.", "tokens": [682, 264, 957, 1002, 11, 286, 914, 294, 439, 957, 1002, 1606, 6851, 11, 12771, 307, 406, 2212, 13, 407, 527, 3387, 307, 281, 12539, 12771, 538, 1228, 264, 6889, 3832, 25163, 11, 550, 7406, 12771, 538, 318, 13, 682, 341, 1389, 11, 321, 362, 281, 764, 257, 777, 7316, 13, 639, 7316, 307, 1219, 314, 7316, 13], "avg_logprob": -0.16353284453941605, "compression_ratio": 1.6727272727272726, "no_speech_prob": 0.0, "words": [{"start": 88.96, "end": 89.38, "word": " In", "probability": 0.66748046875}, {"start": 89.38, "end": 89.78, "word": " the", "probability": 0.88037109375}, {"start": 89.78, "end": 90.28, "word": " real", "probability": 0.96728515625}, {"start": 90.28, "end": 90.78, "word": " world,", "probability": 0.94482421875}, {"start": 92.88, "end": 94.9, "word": " I", "probability": 0.91796875}, {"start": 94.9, "end": 95.12, "word": " mean", "probability": 0.970703125}, {"start": 95.12, "end": 95.54, "word": " in", "probability": 0.68408203125}, {"start": 95.54, "end": 96.12, "word": " all", "probability": 0.92724609375}, {"start": 96.12, "end": 96.64, "word": " real", "probability": 0.9375}, {"start": 96.64, "end": 97.18, "word": " world", "probability": 0.78955078125}, {"start": 97.18, "end": 97.6, "word": " business", "probability": 0.9169921875}, {"start": 97.6, "end": 98.34, "word": " situations,", "probability": 0.7607421875}, {"start": 98.58, "end": 98.74, "word": " sigma", "probability": 0.70703125}, {"start": 98.74, "end": 98.94, "word": " is", "probability": 0.94921875}, {"start": 98.94, "end": 99.12, "word": " not", "probability": 0.9501953125}, {"start": 99.12, "end": 99.38, "word": " given.", "probability": 0.90771484375}, {"start": 100.06, "end": 100.34, "word": " So", "probability": 0.96142578125}, {"start": 100.34, "end": 100.6, "word": " our", "probability": 0.81689453125}, {"start": 100.6, "end": 100.84, "word": " goal", "probability": 0.974609375}, {"start": 100.84, "end": 101.0, "word": " is", "probability": 0.9443359375}, {"start": 101.0, "end": 101.16, "word": " to", "probability": 0.95751953125}, {"start": 101.16, "end": 101.66, "word": " estimate", "probability": 0.94873046875}, {"start": 101.66, "end": 102.16, "word": " sigma", "probability": 0.93017578125}, {"start": 102.16, "end": 103.24, "word": " by", "probability": 0.90771484375}, {"start": 103.24, "end": 103.68, "word": " using", "probability": 0.92919921875}, {"start": 103.68, "end": 104.14, "word": " the", "probability": 0.8955078125}, {"start": 104.14, "end": 104.5, "word": " sample", "probability": 0.326904296875}, {"start": 104.5, "end": 104.94, "word": " standard", "probability": 0.8349609375}, {"start": 104.94, "end": 105.38, "word": " deviation,", "probability": 0.87744140625}, {"start": 106.04, "end": 106.44, "word": " then", "probability": 0.83837890625}, {"start": 106.44, "end": 106.88, "word": " replace", "probability": 0.8935546875}, {"start": 106.88, "end": 107.28, "word": " sigma", "probability": 0.93359375}, {"start": 107.28, "end": 107.64, "word": " by", "probability": 0.970703125}, {"start": 107.64, "end": 109.32, "word": " S.", "probability": 0.529296875}, {"start": 109.78, "end": 109.98, "word": " In", "probability": 0.955078125}, {"start": 109.98, "end": 110.18, "word": " this", "probability": 0.9423828125}, {"start": 110.18, "end": 110.44, "word": " case,", "probability": 0.9140625}, {"start": 110.48, "end": 110.58, "word": " we", "probability": 0.95703125}, {"start": 110.58, "end": 110.8, "word": " have", "probability": 0.94580078125}, {"start": 110.8, "end": 110.92, "word": " to", "probability": 0.96875}, {"start": 110.92, "end": 111.26, "word": " use", "probability": 0.87646484375}, {"start": 111.26, "end": 111.54, "word": " a", "probability": 0.98046875}, {"start": 111.54, "end": 111.74, "word": " new", "probability": 0.91162109375}, {"start": 111.74, "end": 112.34, "word": " distribution.", "probability": 0.8466796875}, {"start": 113.1, "end": 113.68, "word": " This", "probability": 0.83837890625}, {"start": 113.68, "end": 114.2, "word": " distribution", "probability": 0.85693359375}, {"start": 114.2, "end": 114.52, "word": " is", "probability": 0.923828125}, {"start": 114.52, "end": 114.96, "word": " called", "probability": 0.87548828125}, {"start": 114.96, "end": 115.22, "word": " T", "probability": 0.485595703125}, {"start": 115.22, "end": 116.36, "word": " distribution.", "probability": 0.65869140625}], "temperature": 1.0}, {"id": 5, "seek": 14081, "start": 117.87, "end": 140.81, "text": " So again, if the population standard deviation sigma is unknown, we can substitute the sample standard deviation S instead of sigma. In this case, we should use T distribution instead of normal distribution. So if sigma is unknown.", "tokens": [407, 797, 11, 498, 264, 4415, 3832, 25163, 12771, 307, 9841, 11, 321, 393, 15802, 264, 6889, 3832, 25163, 318, 2602, 295, 12771, 13, 682, 341, 1389, 11, 321, 820, 764, 314, 7316, 2602, 295, 2710, 7316, 13, 407, 498, 12771, 307, 9841, 13], "avg_logprob": -0.1602430502573649, "compression_ratio": 1.6934306569343065, "no_speech_prob": 0.0, "words": [{"start": 117.87, "end": 118.21, "word": " So", "probability": 0.9150390625}, {"start": 118.21, "end": 118.59, "word": " again,", "probability": 0.8212890625}, {"start": 119.11, "end": 119.95, "word": " if", "probability": 0.94873046875}, {"start": 119.95, "end": 120.09, "word": " the", "probability": 0.90087890625}, {"start": 120.09, "end": 120.51, "word": " population", "probability": 0.9287109375}, {"start": 120.51, "end": 120.89, "word": " standard", "probability": 0.84375}, {"start": 120.89, "end": 121.29, "word": " deviation", "probability": 0.9287109375}, {"start": 121.29, "end": 121.73, "word": " sigma", "probability": 0.623046875}, {"start": 121.73, "end": 121.99, "word": " is", "probability": 0.9482421875}, {"start": 121.99, "end": 122.33, "word": " unknown,", "probability": 0.876953125}, {"start": 123.45, "end": 123.71, "word": " we", "probability": 0.93310546875}, {"start": 123.71, "end": 123.99, "word": " can", "probability": 0.9375}, {"start": 123.99, "end": 124.57, "word": " substitute", "probability": 0.9140625}, {"start": 124.57, "end": 125.35, "word": " the", "probability": 0.89013671875}, {"start": 125.35, "end": 125.83, "word": " sample", "probability": 0.5576171875}, {"start": 125.83, "end": 126.35, "word": " standard", "probability": 0.91455078125}, {"start": 126.35, "end": 126.77, "word": " deviation", "probability": 0.93017578125}, {"start": 126.77, "end": 127.19, "word": " S", "probability": 0.6552734375}, {"start": 127.19, "end": 127.79, "word": " instead", "probability": 0.744140625}, {"start": 127.79, "end": 128.23, "word": " of", "probability": 0.96826171875}, {"start": 128.23, "end": 129.79, "word": " sigma.", "probability": 0.84033203125}, {"start": 130.71, "end": 131.57, "word": " In", "probability": 0.9482421875}, {"start": 131.57, "end": 131.79, "word": " this", "probability": 0.94873046875}, {"start": 131.79, "end": 132.09, "word": " case,", "probability": 0.91845703125}, {"start": 132.29, "end": 132.71, "word": " we", "probability": 0.947265625}, {"start": 132.71, "end": 132.93, "word": " should", "probability": 0.96875}, {"start": 132.93, "end": 133.41, "word": " use", "probability": 0.8935546875}, {"start": 133.41, "end": 134.25, "word": " T", "probability": 0.6591796875}, {"start": 134.25, "end": 134.93, "word": " distribution", "probability": 0.60693359375}, {"start": 134.93, "end": 136.27, "word": " instead", "probability": 0.84716796875}, {"start": 136.27, "end": 136.87, "word": " of", "probability": 0.966796875}, {"start": 136.87, "end": 137.79, "word": " normal", "probability": 0.83251953125}, {"start": 137.79, "end": 138.25, "word": " distribution.", "probability": 0.7646484375}, {"start": 139.03, "end": 139.35, "word": " So", "probability": 0.95458984375}, {"start": 139.35, "end": 139.69, "word": " if", "probability": 0.7734375}, {"start": 139.69, "end": 140.19, "word": " sigma", "probability": 0.9306640625}, {"start": 140.19, "end": 140.49, "word": " is", "probability": 0.94970703125}, {"start": 140.49, "end": 140.81, "word": " unknown.", "probability": 0.876953125}], "temperature": 1.0}, {"id": 6, "seek": 17401, "start": 153.09, "end": 174.01, "text": " replace sigma by S and use T distribution instead of norm. So this is the idea here. We have to replace sigma by S and use a new distribution called T.", "tokens": [7406, 12771, 538, 318, 293, 764, 314, 7316, 2602, 295, 2026, 13, 407, 341, 307, 264, 1558, 510, 13, 492, 362, 281, 7406, 12771, 538, 318, 293, 764, 257, 777, 7316, 1219, 314, 13], "avg_logprob": -0.31383927379335674, "compression_ratio": 1.4901960784313726, "no_speech_prob": 0.0, "words": [{"start": 153.09, "end": 153.65, "word": " replace", "probability": 0.54638671875}, {"start": 153.65, "end": 154.21, "word": " sigma", "probability": 0.70703125}, {"start": 154.21, "end": 154.43, "word": " by", "probability": 0.459228515625}, {"start": 154.43, "end": 154.97, "word": " S", "probability": 0.2666015625}, {"start": 154.97, "end": 155.27, "word": " and", "probability": 0.759765625}, {"start": 155.27, "end": 155.65, "word": " use", "probability": 0.65673828125}, {"start": 155.65, "end": 159.89, "word": " T", "probability": 0.59423828125}, {"start": 159.89, "end": 160.35, "word": " distribution", "probability": 0.71240234375}, {"start": 160.35, "end": 161.47, "word": " instead", "probability": 0.83740234375}, {"start": 161.47, "end": 164.27, "word": " of", "probability": 0.93017578125}, {"start": 164.27, "end": 164.53, "word": " norm.", "probability": 0.380126953125}, {"start": 167.97, "end": 168.53, "word": " So", "probability": 0.94580078125}, {"start": 168.53, "end": 168.73, "word": " this", "probability": 0.87158203125}, {"start": 168.73, "end": 168.83, "word": " is", "probability": 0.8125}, {"start": 168.83, "end": 168.89, "word": " the", "probability": 0.314208984375}, {"start": 168.89, "end": 169.15, "word": " idea", "probability": 0.9287109375}, {"start": 169.15, "end": 169.41, "word": " here.", "probability": 0.845703125}, {"start": 169.45, "end": 169.59, "word": " We", "probability": 0.9423828125}, {"start": 169.59, "end": 169.77, "word": " have", "probability": 0.94287109375}, {"start": 169.77, "end": 169.87, "word": " to", "probability": 0.95654296875}, {"start": 169.87, "end": 170.19, "word": " replace", "probability": 0.9423828125}, {"start": 170.19, "end": 170.53, "word": " sigma", "probability": 0.9365234375}, {"start": 170.53, "end": 170.77, "word": " by", "probability": 0.95751953125}, {"start": 170.77, "end": 171.01, "word": " S", "probability": 0.984375}, {"start": 171.01, "end": 171.25, "word": " and", "probability": 0.85302734375}, {"start": 171.25, "end": 171.45, "word": " use", "probability": 0.806640625}, {"start": 171.45, "end": 171.59, "word": " a", "probability": 0.43505859375}, {"start": 171.59, "end": 171.73, "word": " new", "probability": 0.8623046875}, {"start": 171.73, "end": 172.23, "word": " distribution", "probability": 0.7998046875}, {"start": 172.23, "end": 172.75, "word": " called", "probability": 0.44140625}, {"start": 172.75, "end": 174.01, "word": " T.", "probability": 0.91357421875}], "temperature": 1.0}, {"id": 7, "seek": 20432, "start": 175.48, "end": 204.32, "text": " In this case, the three assumptions will remain nearly the same. But instead of the first one will be here, population standard deviation is unknown instead of known. So this is the assumption that changed. The other two assumptions remain the same. Either the population is normally distributed, or if the population is not normal, just use large sample.", "tokens": [682, 341, 1389, 11, 264, 1045, 17695, 486, 6222, 6217, 264, 912, 13, 583, 2602, 295, 264, 700, 472, 486, 312, 510, 11, 4415, 3832, 25163, 307, 9841, 2602, 295, 2570, 13, 407, 341, 307, 264, 15302, 300, 3105, 13, 440, 661, 732, 17695, 6222, 264, 912, 13, 13746, 264, 4415, 307, 5646, 12631, 11, 420, 498, 264, 4415, 307, 406, 2710, 11, 445, 764, 2416, 6889, 13], "avg_logprob": -0.18070652519447217, "compression_ratio": 1.78, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 175.48, "end": 175.72, "word": " In", "probability": 0.8720703125}, {"start": 175.72, "end": 175.92, "word": " this", "probability": 0.93994140625}, {"start": 175.92, "end": 176.22, "word": " case,", "probability": 0.927734375}, {"start": 176.32, "end": 176.48, "word": " the", "probability": 0.90966796875}, {"start": 176.48, "end": 176.72, "word": " three", "probability": 0.92626953125}, {"start": 176.72, "end": 177.28, "word": " assumptions", "probability": 0.9619140625}, {"start": 177.28, "end": 178.3, "word": " will", "probability": 0.83837890625}, {"start": 178.3, "end": 179.08, "word": " remain", "probability": 0.892578125}, {"start": 179.08, "end": 179.5, "word": " nearly", "probability": 0.83251953125}, {"start": 179.5, "end": 179.72, "word": " the", "probability": 0.91943359375}, {"start": 179.72, "end": 180.04, "word": " same.", "probability": 0.90771484375}, {"start": 181.26, "end": 181.52, "word": " But", "probability": 0.93505859375}, {"start": 181.52, "end": 182.06, "word": " instead", "probability": 0.86279296875}, {"start": 182.06, "end": 183.54, "word": " of", "probability": 0.8427734375}, {"start": 183.54, "end": 183.68, "word": " the", "probability": 0.91748046875}, {"start": 183.68, "end": 183.96, "word": " first", "probability": 0.8603515625}, {"start": 183.96, "end": 184.3, "word": " one", "probability": 0.927734375}, {"start": 184.3, "end": 185.78, "word": " will", "probability": 0.56103515625}, {"start": 185.78, "end": 186.12, "word": " be", "probability": 0.9501953125}, {"start": 186.12, "end": 186.62, "word": " here,", "probability": 0.5986328125}, {"start": 186.72, "end": 187.12, "word": " population", "probability": 0.92041015625}, {"start": 187.12, "end": 187.42, "word": " standard", "probability": 0.255615234375}, {"start": 187.42, "end": 187.76, "word": " deviation", "probability": 0.94873046875}, {"start": 187.76, "end": 188.0, "word": " is", "probability": 0.93310546875}, {"start": 188.0, "end": 188.38, "word": " unknown", "probability": 0.845703125}, {"start": 188.38, "end": 188.78, "word": " instead", "probability": 0.6875}, {"start": 188.78, "end": 188.96, "word": " of", "probability": 0.966796875}, {"start": 188.96, "end": 189.22, "word": " known.", "probability": 0.77783203125}, {"start": 190.04, "end": 190.44, "word": " So", "probability": 0.9453125}, {"start": 190.44, "end": 190.64, "word": " this", "probability": 0.8974609375}, {"start": 190.64, "end": 190.78, "word": " is", "probability": 0.93310546875}, {"start": 190.78, "end": 190.96, "word": " the", "probability": 0.91796875}, {"start": 190.96, "end": 191.4, "word": " assumption", "probability": 0.9814453125}, {"start": 191.4, "end": 191.78, "word": " that", "probability": 0.92578125}, {"start": 191.78, "end": 192.28, "word": " changed.", "probability": 0.662109375}, {"start": 192.78, "end": 192.96, "word": " The", "probability": 0.85498046875}, {"start": 192.96, "end": 193.24, "word": " other", "probability": 0.888671875}, {"start": 193.24, "end": 194.02, "word": " two", "probability": 0.9443359375}, {"start": 194.02, "end": 194.64, "word": " assumptions", "probability": 0.96875}, {"start": 194.64, "end": 195.84, "word": " remain", "probability": 0.78173828125}, {"start": 195.84, "end": 196.04, "word": " the", "probability": 0.91552734375}, {"start": 196.04, "end": 196.34, "word": " same.", "probability": 0.90673828125}, {"start": 196.82, "end": 197.14, "word": " Either", "probability": 0.83740234375}, {"start": 197.14, "end": 197.52, "word": " the", "probability": 0.88525390625}, {"start": 197.52, "end": 197.88, "word": " population", "probability": 0.94970703125}, {"start": 197.88, "end": 198.12, "word": " is", "probability": 0.87841796875}, {"start": 198.12, "end": 198.44, "word": " normally", "probability": 0.88720703125}, {"start": 198.44, "end": 199.1, "word": " distributed,", "probability": 0.91259765625}, {"start": 199.54, "end": 199.88, "word": " or", "probability": 0.96337890625}, {"start": 199.88, "end": 200.24, "word": " if", "probability": 0.91650390625}, {"start": 200.24, "end": 200.38, "word": " the", "probability": 0.78857421875}, {"start": 200.38, "end": 200.68, "word": " population", "probability": 0.96044921875}, {"start": 200.68, "end": 200.94, "word": " is", "probability": 0.94189453125}, {"start": 200.94, "end": 201.18, "word": " not", "probability": 0.94091796875}, {"start": 201.18, "end": 201.6, "word": " normal,", "probability": 0.87353515625}, {"start": 202.06, "end": 202.4, "word": " just", "probability": 0.72509765625}, {"start": 202.4, "end": 202.74, "word": " use", "probability": 0.88330078125}, {"start": 202.74, "end": 203.92, "word": " large", "probability": 0.92431640625}, {"start": 203.92, "end": 204.32, "word": " sample.", "probability": 0.7275390625}], "temperature": 1.0}, {"id": 8, "seek": 23309, "start": 205.43, "end": 233.09, "text": " In this case, the confidence interval estimate is given by this formula X bar plus or minus T alpha over 2S over square root of F. Now the question is, how can we find the critical value T alpha over 2? There is a table for this one at the end of your book. The table gives the area in the upper tier.", "tokens": [682, 341, 1389, 11, 264, 6687, 15035, 12539, 307, 2212, 538, 341, 8513, 1783, 2159, 1804, 420, 3175, 314, 8961, 670, 568, 50, 670, 3732, 5593, 295, 479, 13, 823, 264, 1168, 307, 11, 577, 393, 321, 915, 264, 4924, 2158, 314, 8961, 670, 568, 30, 821, 307, 257, 3199, 337, 341, 472, 412, 264, 917, 295, 428, 1446, 13, 440, 3199, 2709, 264, 1859, 294, 264, 6597, 12362, 13], "avg_logprob": -0.17990757462004542, "compression_ratio": 1.5252525252525253, "no_speech_prob": 0.0, "words": [{"start": 205.43, "end": 205.69, "word": " In", "probability": 0.79541015625}, {"start": 205.69, "end": 205.89, "word": " this", "probability": 0.94287109375}, {"start": 205.89, "end": 206.13, "word": " case,", "probability": 0.9169921875}, {"start": 206.25, "end": 206.33, "word": " the", "probability": 0.8974609375}, {"start": 206.33, "end": 206.65, "word": " confidence", "probability": 0.9580078125}, {"start": 206.65, "end": 207.35, "word": " interval", "probability": 0.98046875}, {"start": 207.35, "end": 207.95, "word": " estimate", "probability": 0.5791015625}, {"start": 207.95, "end": 208.55, "word": " is", "probability": 0.91943359375}, {"start": 208.55, "end": 208.87, "word": " given", "probability": 0.88818359375}, {"start": 208.87, "end": 209.15, "word": " by", "probability": 0.9638671875}, {"start": 209.15, "end": 209.47, "word": " this", "probability": 0.8876953125}, {"start": 209.47, "end": 211.41, "word": " formula", "probability": 0.9072265625}, {"start": 211.41, "end": 211.71, "word": " X", "probability": 0.2685546875}, {"start": 211.71, "end": 212.01, "word": " bar", "probability": 0.73583984375}, {"start": 212.01, "end": 212.55, "word": " plus", "probability": 0.87646484375}, {"start": 212.55, "end": 212.85, "word": " or", "probability": 0.9501953125}, {"start": 212.85, "end": 213.31, "word": " minus", "probability": 0.98583984375}, {"start": 213.31, "end": 213.83, "word": " T", "probability": 0.88671875}, {"start": 213.83, "end": 215.15, "word": " alpha", "probability": 0.623046875}, {"start": 215.15, "end": 215.45, "word": " over", "probability": 0.9150390625}, {"start": 215.45, "end": 216.07, "word": " 2S", "probability": 0.6728515625}, {"start": 216.07, "end": 216.61, "word": " over", "probability": 0.8662109375}, {"start": 216.61, "end": 217.97, "word": " square", "probability": 0.7939453125}, {"start": 217.97, "end": 218.15, "word": " root", "probability": 0.9306640625}, {"start": 218.15, "end": 218.27, "word": " of", "probability": 0.73876953125}, {"start": 218.27, "end": 218.45, "word": " F.", "probability": 0.45068359375}, {"start": 219.61, "end": 220.17, "word": " Now", "probability": 0.93896484375}, {"start": 220.17, "end": 220.31, "word": " the", "probability": 0.58447265625}, {"start": 220.31, "end": 220.57, "word": " question", "probability": 0.90966796875}, {"start": 220.57, "end": 220.79, "word": " is,", "probability": 0.94970703125}, {"start": 220.85, "end": 220.91, "word": " how", "probability": 0.8154296875}, {"start": 220.91, "end": 221.13, "word": " can", "probability": 0.9365234375}, {"start": 221.13, "end": 221.37, "word": " we", "probability": 0.955078125}, {"start": 221.37, "end": 222.55, "word": " find", "probability": 0.8916015625}, {"start": 222.55, "end": 224.13, "word": " the", "probability": 0.912109375}, {"start": 224.13, "end": 224.57, "word": " critical", "probability": 0.9462890625}, {"start": 224.57, "end": 225.03, "word": " value", "probability": 0.9716796875}, {"start": 225.03, "end": 225.27, "word": " T", "probability": 0.73193359375}, {"start": 225.27, "end": 225.47, "word": " alpha", "probability": 0.86376953125}, {"start": 225.47, "end": 225.77, "word": " over", "probability": 0.921875}, {"start": 225.77, "end": 225.95, "word": " 2?", "probability": 0.9482421875}, {"start": 227.13, "end": 227.69, "word": " There", "probability": 0.77294921875}, {"start": 227.69, "end": 227.81, "word": " is", "probability": 0.921875}, {"start": 227.81, "end": 227.93, "word": " a", "probability": 0.99365234375}, {"start": 227.93, "end": 228.11, "word": " table", "probability": 0.89501953125}, {"start": 228.11, "end": 228.37, "word": " for", "probability": 0.94384765625}, {"start": 228.37, "end": 228.59, "word": " this", "probability": 0.9462890625}, {"start": 228.59, "end": 228.81, "word": " one", "probability": 0.8828125}, {"start": 228.81, "end": 228.97, "word": " at", "probability": 0.83935546875}, {"start": 228.97, "end": 229.17, "word": " the", "probability": 0.92333984375}, {"start": 229.17, "end": 229.33, "word": " end", "probability": 0.8896484375}, {"start": 229.33, "end": 229.45, "word": " of", "probability": 0.966796875}, {"start": 229.45, "end": 229.59, "word": " your", "probability": 0.8779296875}, {"start": 229.59, "end": 229.91, "word": " book.", "probability": 0.958984375}, {"start": 230.67, "end": 230.95, "word": " The", "probability": 0.83984375}, {"start": 230.95, "end": 231.23, "word": " table", "probability": 0.89697265625}, {"start": 231.23, "end": 231.59, "word": " gives", "probability": 0.89794921875}, {"start": 231.59, "end": 231.81, "word": " the", "probability": 0.91552734375}, {"start": 231.81, "end": 232.15, "word": " area", "probability": 0.90185546875}, {"start": 232.15, "end": 232.41, "word": " in", "probability": 0.82177734375}, {"start": 232.41, "end": 232.51, "word": " the", "probability": 0.896484375}, {"start": 232.51, "end": 232.75, "word": " upper", "probability": 0.7626953125}, {"start": 232.75, "end": 233.09, "word": " tier.", "probability": 0.650390625}], "temperature": 1.0}, {"id": 9, "seek": 26730, "start": 239.1, "end": 267.3, "text": " For certain probabilities, start from 2.2, 0.10, and so on. So this is the table. So columns represent the percentages of the area in the upper tier. Rows represent degrees of freedom, start from 1 to all the way up to 120.", "tokens": [1171, 1629, 33783, 11, 722, 490, 568, 13, 17, 11, 1958, 13, 3279, 11, 293, 370, 322, 13, 407, 341, 307, 264, 3199, 13, 407, 13766, 2906, 264, 42270, 295, 264, 1859, 294, 264, 6597, 12362, 13, 497, 1509, 2906, 5310, 295, 5645, 11, 722, 490, 502, 281, 439, 264, 636, 493, 281, 10411, 13], "avg_logprob": -0.23395647693957602, "compression_ratio": 1.4736842105263157, "no_speech_prob": 0.0, "words": [{"start": 239.1, "end": 239.42, "word": " For", "probability": 0.342041015625}, {"start": 239.42, "end": 240.0, "word": " certain", "probability": 0.8837890625}, {"start": 240.0, "end": 240.8, "word": " probabilities,", "probability": 0.91064453125}, {"start": 241.52, "end": 241.92, "word": " start", "probability": 0.67626953125}, {"start": 241.92, "end": 242.2, "word": " from", "probability": 0.8837890625}, {"start": 242.2, "end": 242.44, "word": " 2", "probability": 0.52392578125}, {"start": 242.44, "end": 245.08, "word": ".2,", "probability": 0.772216796875}, {"start": 245.52, "end": 246.14, "word": " 0", "probability": 0.359375}, {"start": 246.14, "end": 247.28, "word": ".10,", "probability": 0.863525390625}, {"start": 247.5, "end": 247.56, "word": " and", "probability": 0.9111328125}, {"start": 247.56, "end": 247.72, "word": " so", "probability": 0.94873046875}, {"start": 247.72, "end": 247.96, "word": " on.", "probability": 0.93798828125}, {"start": 248.5, "end": 248.84, "word": " So", "probability": 0.93896484375}, {"start": 248.84, "end": 249.0, "word": " this", "probability": 0.83642578125}, {"start": 249.0, "end": 249.12, "word": " is", "probability": 0.94287109375}, {"start": 249.12, "end": 249.26, "word": " the", "probability": 0.89892578125}, {"start": 249.26, "end": 249.5, "word": " table.", "probability": 0.87939453125}, {"start": 251.68, "end": 252.2, "word": " So", "probability": 0.9423828125}, {"start": 252.2, "end": 252.94, "word": " columns", "probability": 0.93798828125}, {"start": 252.94, "end": 253.64, "word": " represent", "probability": 0.5}, {"start": 253.64, "end": 255.7, "word": " the", "probability": 0.88232421875}, {"start": 255.7, "end": 256.74, "word": " percentages", "probability": 0.92822265625}, {"start": 256.74, "end": 257.34, "word": " of", "probability": 0.544921875}, {"start": 257.34, "end": 258.32, "word": " the", "probability": 0.91455078125}, {"start": 258.32, "end": 258.74, "word": " area", "probability": 0.89111328125}, {"start": 258.74, "end": 259.0, "word": " in", "probability": 0.83251953125}, {"start": 259.0, "end": 259.08, "word": " the", "probability": 0.89501953125}, {"start": 259.08, "end": 259.3, "word": " upper", "probability": 0.7255859375}, {"start": 259.3, "end": 259.68, "word": " tier.", "probability": 0.90087890625}, {"start": 260.54, "end": 261.0, "word": " Rows", "probability": 0.9716796875}, {"start": 261.0, "end": 262.28, "word": " represent", "probability": 0.82568359375}, {"start": 262.28, "end": 263.22, "word": " degrees", "probability": 0.9306640625}, {"start": 263.22, "end": 263.42, "word": " of", "probability": 0.9658203125}, {"start": 263.42, "end": 263.72, "word": " freedom,", "probability": 0.94921875}, {"start": 263.96, "end": 264.18, "word": " start", "probability": 0.75830078125}, {"start": 264.18, "end": 264.38, "word": " from", "probability": 0.51171875}, {"start": 264.38, "end": 264.52, "word": " 1", "probability": 0.6025390625}, {"start": 264.52, "end": 264.66, "word": " to", "probability": 0.5703125}, {"start": 264.66, "end": 266.16, "word": " all", "probability": 0.56884765625}, {"start": 266.16, "end": 266.34, "word": " the", "probability": 0.91650390625}, {"start": 266.34, "end": 266.52, "word": " way", "probability": 0.951171875}, {"start": 266.52, "end": 266.78, "word": " up", "probability": 0.95068359375}, {"start": 266.78, "end": 266.94, "word": " to", "probability": 0.96826171875}, {"start": 266.94, "end": 267.3, "word": " 120.", "probability": 0.93603515625}], "temperature": 1.0}, {"id": 10, "seek": 29370, "start": 267.99, "end": 293.71, "text": " Then infinity. Now let's see how can we use the table. You have to know that T is a family of distributions. The degrees of freedom, DF stands for degrees of freedom, always equals N minus 1. For example, if the sample size is 15,", "tokens": [1396, 13202, 13, 823, 718, 311, 536, 577, 393, 321, 764, 264, 3199, 13, 509, 362, 281, 458, 300, 314, 307, 257, 1605, 295, 37870, 13, 440, 5310, 295, 5645, 11, 48336, 7382, 337, 5310, 295, 5645, 11, 1009, 6915, 426, 3175, 502, 13, 1171, 1365, 11, 498, 264, 6889, 2744, 307, 2119, 11], "avg_logprob": -0.17428976622494785, "compression_ratio": 1.4085365853658536, "no_speech_prob": 0.0, "words": [{"start": 267.99, "end": 268.37, "word": " Then", "probability": 0.256103515625}, {"start": 268.37, "end": 269.11, "word": " infinity.", "probability": 0.5791015625}, {"start": 270.37, "end": 270.61, "word": " Now", "probability": 0.7763671875}, {"start": 270.61, "end": 270.85, "word": " let's", "probability": 0.83251953125}, {"start": 270.85, "end": 270.97, "word": " see", "probability": 0.91845703125}, {"start": 270.97, "end": 271.09, "word": " how", "probability": 0.9189453125}, {"start": 271.09, "end": 271.31, "word": " can", "probability": 0.80078125}, {"start": 271.31, "end": 271.53, "word": " we", "probability": 0.88232421875}, {"start": 271.53, "end": 272.05, "word": " use", "probability": 0.86474609375}, {"start": 272.05, "end": 272.35, "word": " the", "probability": 0.84423828125}, {"start": 272.35, "end": 274.49, "word": " table.", "probability": 0.7958984375}, {"start": 275.31, "end": 275.51, "word": " You", "probability": 0.93359375}, {"start": 275.51, "end": 275.69, "word": " have", "probability": 0.947265625}, {"start": 275.69, "end": 275.81, "word": " to", "probability": 0.96826171875}, {"start": 275.81, "end": 275.95, "word": " know", "probability": 0.86279296875}, {"start": 275.95, "end": 276.31, "word": " that", "probability": 0.94140625}, {"start": 276.31, "end": 277.47, "word": " T", "probability": 0.798828125}, {"start": 277.47, "end": 277.91, "word": " is", "probability": 0.95068359375}, {"start": 277.91, "end": 278.09, "word": " a", "probability": 0.98828125}, {"start": 278.09, "end": 278.35, "word": " family", "probability": 0.91650390625}, {"start": 278.35, "end": 278.53, "word": " of", "probability": 0.9619140625}, {"start": 278.53, "end": 279.21, "word": " distributions.", "probability": 0.8837890625}, {"start": 280.43, "end": 280.59, "word": " The", "probability": 0.87060546875}, {"start": 280.59, "end": 280.95, "word": " degrees", "probability": 0.94189453125}, {"start": 280.95, "end": 281.17, "word": " of", "probability": 0.9697265625}, {"start": 281.17, "end": 281.65, "word": " freedom,", "probability": 0.94091796875}, {"start": 282.97, "end": 284.45, "word": " DF", "probability": 0.42333984375}, {"start": 284.45, "end": 285.89, "word": " stands", "probability": 0.57275390625}, {"start": 285.89, "end": 286.15, "word": " for", "probability": 0.95849609375}, {"start": 286.15, "end": 286.55, "word": " degrees", "probability": 0.9404296875}, {"start": 286.55, "end": 286.77, "word": " of", "probability": 0.9658203125}, {"start": 286.77, "end": 287.09, "word": " freedom,", "probability": 0.9267578125}, {"start": 288.01, "end": 288.59, "word": " always", "probability": 0.8955078125}, {"start": 288.59, "end": 289.37, "word": " equals", "probability": 0.87646484375}, {"start": 289.37, "end": 289.65, "word": " N", "probability": 0.77001953125}, {"start": 289.65, "end": 289.93, "word": " minus", "probability": 0.90478515625}, {"start": 289.93, "end": 290.19, "word": " 1.", "probability": 0.77734375}, {"start": 291.31, "end": 291.69, "word": " For", "probability": 0.96337890625}, {"start": 291.69, "end": 292.01, "word": " example,", "probability": 0.974609375}, {"start": 292.11, "end": 292.21, "word": " if", "probability": 0.93798828125}, {"start": 292.21, "end": 292.33, "word": " the", "probability": 0.9130859375}, {"start": 292.33, "end": 292.61, "word": " sample", "probability": 0.84130859375}, {"start": 292.61, "end": 292.97, "word": " size", "probability": 0.86474609375}, {"start": 292.97, "end": 293.21, "word": " is", "probability": 0.94677734375}, {"start": 293.21, "end": 293.71, "word": " 15,", "probability": 0.9462890625}], "temperature": 1.0}, {"id": 11, "seek": 32143, "start": 295.01, "end": 321.43, "text": " So degrees of freedom is 14 and so on. Now degrees of freedom, number of observations that are free to vary after sample mean has been calculated. Let's see the meaning of degrees of freedom. Now imagine that we know the mean is 8.", "tokens": [407, 5310, 295, 5645, 307, 3499, 293, 370, 322, 13, 823, 5310, 295, 5645, 11, 1230, 295, 18163, 300, 366, 1737, 281, 10559, 934, 6889, 914, 575, 668, 15598, 13, 961, 311, 536, 264, 3620, 295, 5310, 295, 5645, 13, 823, 3811, 300, 321, 458, 264, 914, 307, 1649, 13], "avg_logprob": -0.2038909378005009, "compression_ratio": 1.5570469798657718, "no_speech_prob": 0.0, "words": [{"start": 295.01, "end": 295.41, "word": " So", "probability": 0.73095703125}, {"start": 295.41, "end": 295.77, "word": " degrees", "probability": 0.591796875}, {"start": 295.77, "end": 295.89, "word": " of", "probability": 0.939453125}, {"start": 295.89, "end": 296.11, "word": " freedom", "probability": 0.94189453125}, {"start": 296.11, "end": 296.43, "word": " is", "probability": 0.6982421875}, {"start": 296.43, "end": 297.25, "word": " 14", "probability": 0.12371826171875}, {"start": 297.25, "end": 297.53, "word": " and", "probability": 0.69482421875}, {"start": 297.53, "end": 297.75, "word": " so", "probability": 0.94482421875}, {"start": 297.75, "end": 297.99, "word": " on.", "probability": 0.93701171875}, {"start": 299.65, "end": 299.99, "word": " Now", "probability": 0.93798828125}, {"start": 299.99, "end": 300.25, "word": " degrees", "probability": 0.8544921875}, {"start": 300.25, "end": 300.43, "word": " of", "probability": 0.966796875}, {"start": 300.43, "end": 300.79, "word": " freedom,", "probability": 0.94140625}, {"start": 301.27, "end": 301.71, "word": " number", "probability": 0.91845703125}, {"start": 301.71, "end": 301.91, "word": " of", "probability": 0.8876953125}, {"start": 301.91, "end": 302.41, "word": " observations", "probability": 0.8271484375}, {"start": 302.41, "end": 303.23, "word": " that", "probability": 0.8916015625}, {"start": 303.23, "end": 303.67, "word": " are", "probability": 0.943359375}, {"start": 303.67, "end": 303.97, "word": " free", "probability": 0.91748046875}, {"start": 303.97, "end": 304.17, "word": " to", "probability": 0.96875}, {"start": 304.17, "end": 304.49, "word": " vary", "probability": 0.96728515625}, {"start": 304.49, "end": 305.13, "word": " after", "probability": 0.82470703125}, {"start": 305.13, "end": 305.59, "word": " sample", "probability": 0.64990234375}, {"start": 305.59, "end": 305.85, "word": " mean", "probability": 0.95751953125}, {"start": 305.85, "end": 306.07, "word": " has", "probability": 0.94287109375}, {"start": 306.07, "end": 306.23, "word": " been", "probability": 0.9150390625}, {"start": 306.23, "end": 306.75, "word": " calculated.", "probability": 0.94970703125}, {"start": 307.49, "end": 307.85, "word": " Let's", "probability": 0.95556640625}, {"start": 307.85, "end": 308.03, "word": " see", "probability": 0.92236328125}, {"start": 308.03, "end": 308.23, "word": " the", "probability": 0.91845703125}, {"start": 308.23, "end": 308.71, "word": " meaning", "probability": 0.6015625}, {"start": 308.71, "end": 309.29, "word": " of", "probability": 0.95751953125}, {"start": 309.29, "end": 309.55, "word": " degrees", "probability": 0.94287109375}, {"start": 309.55, "end": 309.71, "word": " of", "probability": 0.96875}, {"start": 309.71, "end": 309.97, "word": " freedom.", "probability": 0.9423828125}, {"start": 310.91, "end": 311.15, "word": " Now", "probability": 0.94970703125}, {"start": 311.15, "end": 311.55, "word": " imagine", "probability": 0.8251953125}, {"start": 311.55, "end": 311.89, "word": " that", "probability": 0.916015625}, {"start": 311.89, "end": 315.41, "word": " we", "probability": 0.77587890625}, {"start": 315.41, "end": 315.77, "word": " know", "probability": 0.8896484375}, {"start": 315.77, "end": 316.97, "word": " the", "probability": 0.83349609375}, {"start": 316.97, "end": 317.29, "word": " mean", "probability": 0.97509765625}, {"start": 317.29, "end": 321.15, "word": " is", "probability": 0.92138671875}, {"start": 321.15, "end": 321.43, "word": " 8.", "probability": 0.413330078125}], "temperature": 1.0}, {"id": 12, "seek": 34867, "start": 325.69, "end": 348.67, "text": " previous information that the mean of three observations equals eight. So we have three values. Now the restriction is the mean is eight. So we have three values x1, x2, x3. In this case, you can take any two values.", "tokens": [3894, 1589, 300, 264, 914, 295, 1045, 18163, 6915, 3180, 13, 407, 321, 362, 1045, 4190, 13, 823, 264, 29529, 307, 264, 914, 307, 3180, 13, 407, 321, 362, 1045, 4190, 2031, 16, 11, 2031, 17, 11, 2031, 18, 13, 682, 341, 1389, 11, 291, 393, 747, 604, 732, 4190, 13], "avg_logprob": -0.1986177830168834, "compression_ratio": 1.5611510791366907, "no_speech_prob": 0.0, "words": [{"start": 325.68999999999994, "end": 326.40999999999997, "word": " previous", "probability": 0.39013671875}, {"start": 326.40999999999997, "end": 327.13, "word": " information", "probability": 0.8076171875}, {"start": 327.13, "end": 327.57, "word": " that", "probability": 0.82373046875}, {"start": 327.57, "end": 328.27, "word": " the", "probability": 0.80078125}, {"start": 328.27, "end": 328.51, "word": " mean", "probability": 0.9482421875}, {"start": 328.51, "end": 328.85, "word": " of", "probability": 0.654296875}, {"start": 328.85, "end": 329.21, "word": " three", "probability": 0.64892578125}, {"start": 329.21, "end": 329.91, "word": " observations", "probability": 0.767578125}, {"start": 329.91, "end": 332.35, "word": " equals", "probability": 0.79443359375}, {"start": 332.35, "end": 332.71, "word": " eight.", "probability": 0.7333984375}, {"start": 333.45, "end": 333.81, "word": " So", "probability": 0.90576171875}, {"start": 333.81, "end": 333.91, "word": " we", "probability": 0.427490234375}, {"start": 333.91, "end": 334.03, "word": " have", "probability": 0.9482421875}, {"start": 334.03, "end": 334.27, "word": " three", "probability": 0.888671875}, {"start": 334.27, "end": 334.71, "word": " values.", "probability": 0.96923828125}, {"start": 336.09, "end": 336.81, "word": " Now", "probability": 0.9345703125}, {"start": 336.81, "end": 337.01, "word": " the", "probability": 0.64501953125}, {"start": 337.01, "end": 337.43, "word": " restriction", "probability": 0.85107421875}, {"start": 337.43, "end": 337.93, "word": " is", "probability": 0.9482421875}, {"start": 337.93, "end": 338.57, "word": " the", "probability": 0.634765625}, {"start": 338.57, "end": 338.75, "word": " mean", "probability": 0.94140625}, {"start": 338.75, "end": 338.97, "word": " is", "probability": 0.88916015625}, {"start": 338.97, "end": 339.53, "word": " eight.", "probability": 0.8701171875}, {"start": 340.59, "end": 340.91, "word": " So", "probability": 0.94580078125}, {"start": 340.91, "end": 341.07, "word": " we", "probability": 0.92041015625}, {"start": 341.07, "end": 341.33, "word": " have", "probability": 0.94677734375}, {"start": 341.33, "end": 341.71, "word": " three", "probability": 0.87109375}, {"start": 341.71, "end": 342.07, "word": " values", "probability": 0.96435546875}, {"start": 342.07, "end": 342.49, "word": " x1,", "probability": 0.519287109375}, {"start": 342.65, "end": 343.15, "word": " x2,", "probability": 0.9931640625}, {"start": 343.35, "end": 344.09, "word": " x3.", "probability": 0.983154296875}, {"start": 346.05, "end": 346.67, "word": " In", "probability": 0.9052734375}, {"start": 346.67, "end": 346.91, "word": " this", "probability": 0.94677734375}, {"start": 346.91, "end": 347.15, "word": " case,", "probability": 0.91357421875}, {"start": 347.21, "end": 347.33, "word": " you", "probability": 0.95263671875}, {"start": 347.33, "end": 347.49, "word": " can", "probability": 0.94189453125}, {"start": 347.49, "end": 347.79, "word": " take", "probability": 0.89453125}, {"start": 347.79, "end": 348.01, "word": " any", "probability": 0.908203125}, {"start": 348.01, "end": 348.19, "word": " two", "probability": 0.92626953125}, {"start": 348.19, "end": 348.67, "word": " values.", "probability": 0.9677734375}], "temperature": 1.0}, {"id": 13, "seek": 37876, "start": 349.68, "end": 378.76, "text": " So suppose the mean of three numbers is eight. So there are three observations, n equal to three, the mean is eight. In this case, you can take any two of these three values. For example, maybe x1, I can choose x1 to be, for example, 10. Someone else will choose x2 to be 7. Now, the value of x3 should be equal to a value", "tokens": [407, 7297, 264, 914, 295, 1045, 3547, 307, 3180, 13, 407, 456, 366, 1045, 18163, 11, 297, 2681, 281, 1045, 11, 264, 914, 307, 3180, 13, 682, 341, 1389, 11, 291, 393, 747, 604, 732, 295, 613, 1045, 4190, 13, 1171, 1365, 11, 1310, 2031, 16, 11, 286, 393, 2826, 2031, 16, 281, 312, 11, 337, 1365, 11, 1266, 13, 8734, 1646, 486, 2826, 2031, 17, 281, 312, 1614, 13, 823, 11, 264, 2158, 295, 2031, 18, 820, 312, 2681, 281, 257, 2158], "avg_logprob": -0.16424851740399996, "compression_ratio": 1.6479591836734695, "no_speech_prob": 0.0, "words": [{"start": 349.68, "end": 349.98, "word": " So", "probability": 0.83544921875}, {"start": 349.98, "end": 350.5, "word": " suppose", "probability": 0.74169921875}, {"start": 350.5, "end": 350.94, "word": " the", "probability": 0.82373046875}, {"start": 350.94, "end": 351.1, "word": " mean", "probability": 0.94287109375}, {"start": 351.1, "end": 351.24, "word": " of", "probability": 0.9619140625}, {"start": 351.24, "end": 351.42, "word": " three", "probability": 0.82373046875}, {"start": 351.42, "end": 351.82, "word": " numbers", "probability": 0.8935546875}, {"start": 351.82, "end": 352.08, "word": " is", "probability": 0.94482421875}, {"start": 352.08, "end": 352.32, "word": " eight.", "probability": 0.716796875}, {"start": 352.62, "end": 352.96, "word": " So", "probability": 0.94189453125}, {"start": 352.96, "end": 353.28, "word": " there", "probability": 0.814453125}, {"start": 353.28, "end": 353.4, "word": " are", "probability": 0.94091796875}, {"start": 353.4, "end": 353.6, "word": " three", "probability": 0.9326171875}, {"start": 353.6, "end": 354.22, "word": " observations,", "probability": 0.78466796875}, {"start": 354.36, "end": 354.46, "word": " n", "probability": 0.50732421875}, {"start": 354.46, "end": 354.68, "word": " equal", "probability": 0.2978515625}, {"start": 354.68, "end": 354.8, "word": " to", "probability": 0.56396484375}, {"start": 354.8, "end": 355.04, "word": " three,", "probability": 0.73095703125}, {"start": 355.52, "end": 355.68, "word": " the", "probability": 0.88232421875}, {"start": 355.68, "end": 355.82, "word": " mean", "probability": 0.95751953125}, {"start": 355.82, "end": 355.96, "word": " is", "probability": 0.9453125}, {"start": 355.96, "end": 356.12, "word": " eight.", "probability": 0.8935546875}, {"start": 356.46, "end": 356.64, "word": " In", "probability": 0.953125}, {"start": 356.64, "end": 356.86, "word": " this", "probability": 0.94482421875}, {"start": 356.86, "end": 357.12, "word": " case,", "probability": 0.91064453125}, {"start": 357.14, "end": 357.26, "word": " you", "probability": 0.9462890625}, {"start": 357.26, "end": 357.44, "word": " can", "probability": 0.9423828125}, {"start": 357.44, "end": 357.86, "word": " take", "probability": 0.8916015625}, {"start": 357.86, "end": 359.02, "word": " any", "probability": 0.88623046875}, {"start": 359.02, "end": 359.28, "word": " two", "probability": 0.91748046875}, {"start": 359.28, "end": 359.48, "word": " of", "probability": 0.95654296875}, {"start": 359.48, "end": 359.7, "word": " these", "probability": 0.8544921875}, {"start": 359.7, "end": 359.98, "word": " three", "probability": 0.927734375}, {"start": 359.98, "end": 360.42, "word": " values.", "probability": 0.96630859375}, {"start": 361.66, "end": 362.02, "word": " For", "probability": 0.9638671875}, {"start": 362.02, "end": 362.3, "word": " example,", "probability": 0.97216796875}, {"start": 362.4, "end": 362.62, "word": " maybe", "probability": 0.90869140625}, {"start": 362.62, "end": 363.88, "word": " x1,", "probability": 0.781494140625}, {"start": 364.38, "end": 364.74, "word": " I", "probability": 0.99609375}, {"start": 364.74, "end": 364.96, "word": " can", "probability": 0.9453125}, {"start": 364.96, "end": 365.26, "word": " choose", "probability": 0.91162109375}, {"start": 365.26, "end": 365.64, "word": " x1", "probability": 0.986083984375}, {"start": 365.64, "end": 365.8, "word": " to", "probability": 0.9638671875}, {"start": 365.8, "end": 365.94, "word": " be,", "probability": 0.94775390625}, {"start": 366.06, "end": 366.14, "word": " for", "probability": 0.94873046875}, {"start": 366.14, "end": 366.46, "word": " example,", "probability": 0.97607421875}, {"start": 366.56, "end": 366.78, "word": " 10.", "probability": 0.59423828125}, {"start": 367.5, "end": 367.9, "word": " Someone", "probability": 0.953125}, {"start": 367.9, "end": 368.24, "word": " else", "probability": 0.91748046875}, {"start": 368.24, "end": 368.4, "word": " will", "probability": 0.86279296875}, {"start": 368.4, "end": 368.86, "word": " choose", "probability": 0.90673828125}, {"start": 368.86, "end": 369.38, "word": " x2", "probability": 0.993408203125}, {"start": 369.38, "end": 369.52, "word": " to", "probability": 0.962890625}, {"start": 369.52, "end": 369.66, "word": " be", "probability": 0.9462890625}, {"start": 369.66, "end": 369.96, "word": " 7.", "probability": 0.52880859375}, {"start": 370.98, "end": 371.28, "word": " Now,", "probability": 0.9609375}, {"start": 371.38, "end": 371.5, "word": " the", "probability": 0.89990234375}, {"start": 371.5, "end": 371.74, "word": " value", "probability": 0.97509765625}, {"start": 371.74, "end": 371.9, "word": " of", "probability": 0.77587890625}, {"start": 371.9, "end": 372.42, "word": " x3", "probability": 0.994873046875}, {"start": 372.42, "end": 374.82, "word": " should", "probability": 0.94580078125}, {"start": 374.82, "end": 375.22, "word": " be", "probability": 0.94970703125}, {"start": 375.22, "end": 376.02, "word": " equal", "probability": 0.9033203125}, {"start": 376.02, "end": 377.32, "word": " to", "probability": 0.96875}, {"start": 377.32, "end": 378.4, "word": " a", "probability": 0.98193359375}, {"start": 378.4, "end": 378.76, "word": " value", "probability": 0.9736328125}], "temperature": 1.0}, {"id": 14, "seek": 39487, "start": 380.23, "end": 394.87, "text": " And in that case, the sample mean should be eight. And there are three observations, so the total should be 24. So this value should be seven, must be seven. So now you have two options.", "tokens": [400, 294, 300, 1389, 11, 264, 6889, 914, 820, 312, 3180, 13, 400, 456, 366, 1045, 18163, 11, 370, 264, 3217, 820, 312, 4022, 13, 407, 341, 2158, 820, 312, 3407, 11, 1633, 312, 3407, 13, 407, 586, 291, 362, 732, 3956, 13], "avg_logprob": -0.12056108242408796, "compression_ratio": 1.4609375, "no_speech_prob": 0.0, "words": [{"start": 380.23, "end": 380.53, "word": " And", "probability": 0.81005859375}, {"start": 380.53, "end": 380.69, "word": " in", "probability": 0.912109375}, {"start": 380.69, "end": 380.97, "word": " that", "probability": 0.9345703125}, {"start": 380.97, "end": 381.53, "word": " case,", "probability": 0.92138671875}, {"start": 382.25, "end": 382.41, "word": " the", "probability": 0.9130859375}, {"start": 382.41, "end": 382.73, "word": " sample", "probability": 0.888671875}, {"start": 382.73, "end": 383.01, "word": " mean", "probability": 0.90673828125}, {"start": 383.01, "end": 383.23, "word": " should", "probability": 0.962890625}, {"start": 383.23, "end": 383.41, "word": " be", "probability": 0.94970703125}, {"start": 383.41, "end": 383.81, "word": " eight.", "probability": 0.5986328125}, {"start": 385.01, "end": 385.37, "word": " And", "probability": 0.9423828125}, {"start": 385.37, "end": 385.55, "word": " there", "probability": 0.91552734375}, {"start": 385.55, "end": 385.69, "word": " are", "probability": 0.94482421875}, {"start": 385.69, "end": 385.99, "word": " three", "probability": 0.91162109375}, {"start": 385.99, "end": 386.51, "word": " observations,", "probability": 0.7841796875}, {"start": 386.71, "end": 386.81, "word": " so", "probability": 0.94384765625}, {"start": 386.81, "end": 386.95, "word": " the", "probability": 0.90234375}, {"start": 386.95, "end": 387.19, "word": " total", "probability": 0.880859375}, {"start": 387.19, "end": 387.43, "word": " should", "probability": 0.9716796875}, {"start": 387.43, "end": 387.55, "word": " be", "probability": 0.9453125}, {"start": 387.55, "end": 388.01, "word": " 24.", "probability": 0.9287109375}, {"start": 388.97, "end": 389.27, "word": " So", "probability": 0.79638671875}, {"start": 389.27, "end": 389.55, "word": " this", "probability": 0.90185546875}, {"start": 389.55, "end": 389.81, "word": " value", "probability": 0.9462890625}, {"start": 389.81, "end": 390.05, "word": " should", "probability": 0.947265625}, {"start": 390.05, "end": 390.23, "word": " be", "probability": 0.95166015625}, {"start": 390.23, "end": 390.49, "word": " seven,", "probability": 0.8427734375}, {"start": 390.79, "end": 391.13, "word": " must", "probability": 0.8369140625}, {"start": 391.13, "end": 391.37, "word": " be", "probability": 0.951171875}, {"start": 391.37, "end": 391.67, "word": " seven.", "probability": 0.90673828125}, {"start": 392.75, "end": 393.07, "word": " So", "probability": 0.94873046875}, {"start": 393.07, "end": 393.29, "word": " now", "probability": 0.92041015625}, {"start": 393.29, "end": 393.73, "word": " you", "probability": 0.66748046875}, {"start": 393.73, "end": 393.99, "word": " have", "probability": 0.94677734375}, {"start": 393.99, "end": 394.41, "word": " two", "probability": 0.93408203125}, {"start": 394.41, "end": 394.87, "word": " options.", "probability": 0.9052734375}], "temperature": 1.0}, {"id": 15, "seek": 42596, "start": 396.46, "end": 425.96, "text": " For x1 and x2, just you can take any two values. But the third one has a restriction on it that the average will be three. Maybe someone else will choose x1 to be seven, x3 to be, for example, 15. So now what do you think about the remaining item or the remaining observation? This one should be two. So you have three.", "tokens": [1171, 2031, 16, 293, 2031, 17, 11, 445, 291, 393, 747, 604, 732, 4190, 13, 583, 264, 2636, 472, 575, 257, 29529, 322, 309, 300, 264, 4274, 486, 312, 1045, 13, 2704, 1580, 1646, 486, 2826, 2031, 16, 281, 312, 3407, 11, 2031, 18, 281, 312, 11, 337, 1365, 11, 2119, 13, 407, 586, 437, 360, 291, 519, 466, 264, 8877, 3174, 420, 264, 8877, 14816, 30, 639, 472, 820, 312, 732, 13, 407, 291, 362, 1045, 13], "avg_logprob": -0.14814081901236426, "compression_ratio": 1.5238095238095237, "no_speech_prob": 0.0, "words": [{"start": 396.46, "end": 396.76, "word": " For", "probability": 0.60693359375}, {"start": 396.76, "end": 397.12, "word": " x1", "probability": 0.7587890625}, {"start": 397.12, "end": 397.3, "word": " and", "probability": 0.923828125}, {"start": 397.3, "end": 397.64, "word": " x2,", "probability": 0.99267578125}, {"start": 397.76, "end": 398.0, "word": " just", "probability": 0.5556640625}, {"start": 398.0, "end": 398.54, "word": " you", "probability": 0.89453125}, {"start": 398.54, "end": 398.88, "word": " can", "probability": 0.94287109375}, {"start": 398.88, "end": 399.34, "word": " take", "probability": 0.884765625}, {"start": 399.34, "end": 399.7, "word": " any", "probability": 0.91552734375}, {"start": 399.7, "end": 399.88, "word": " two", "probability": 0.92578125}, {"start": 399.88, "end": 400.36, "word": " values.", "probability": 0.966796875}, {"start": 400.72, "end": 400.92, "word": " But", "probability": 0.90185546875}, {"start": 400.92, "end": 401.06, "word": " the", "probability": 0.8984375}, {"start": 401.06, "end": 401.3, "word": " third", "probability": 0.94140625}, {"start": 401.3, "end": 401.48, "word": " one", "probability": 0.92626953125}, {"start": 401.48, "end": 401.66, "word": " has", "probability": 0.9443359375}, {"start": 401.66, "end": 401.74, "word": " a", "probability": 0.61083984375}, {"start": 401.74, "end": 402.22, "word": " restriction", "probability": 0.86376953125}, {"start": 402.22, "end": 402.76, "word": " on", "probability": 0.94775390625}, {"start": 402.76, "end": 403.0, "word": " it", "probability": 0.9521484375}, {"start": 403.0, "end": 403.38, "word": " that", "probability": 0.7587890625}, {"start": 403.38, "end": 404.1, "word": " the", "probability": 0.875}, {"start": 404.1, "end": 404.56, "word": " average", "probability": 0.7998046875}, {"start": 404.56, "end": 404.9, "word": " will", "probability": 0.751953125}, {"start": 404.9, "end": 405.48, "word": " be", "probability": 0.95166015625}, {"start": 405.48, "end": 406.2, "word": " three.", "probability": 0.6845703125}, {"start": 406.66, "end": 406.96, "word": " Maybe", "probability": 0.9365234375}, {"start": 406.96, "end": 407.32, "word": " someone", "probability": 0.93896484375}, {"start": 407.32, "end": 407.68, "word": " else", "probability": 0.9267578125}, {"start": 407.68, "end": 407.88, "word": " will", "probability": 0.86376953125}, {"start": 407.88, "end": 408.36, "word": " choose", "probability": 0.9169921875}, {"start": 408.36, "end": 409.24, "word": " x1", "probability": 0.9853515625}, {"start": 409.24, "end": 409.4, "word": " to", "probability": 0.97021484375}, {"start": 409.4, "end": 409.52, "word": " be", "probability": 0.9501953125}, {"start": 409.52, "end": 409.84, "word": " seven,", "probability": 0.720703125}, {"start": 410.7, "end": 411.22, "word": " x3", "probability": 0.984375}, {"start": 411.22, "end": 411.42, "word": " to", "probability": 0.95947265625}, {"start": 411.42, "end": 411.66, "word": " be,", "probability": 0.95263671875}, {"start": 412.16, "end": 412.36, "word": " for", "probability": 0.95166015625}, {"start": 412.36, "end": 412.68, "word": " example,", "probability": 0.9736328125}, {"start": 412.78, "end": 413.24, "word": " 15.", "probability": 0.5732421875}, {"start": 415.48, "end": 415.88, "word": " So", "probability": 0.94384765625}, {"start": 415.88, "end": 416.06, "word": " now", "probability": 0.72607421875}, {"start": 416.06, "end": 416.48, "word": " what", "probability": 0.513671875}, {"start": 416.48, "end": 416.62, "word": " do", "probability": 0.94189453125}, {"start": 416.62, "end": 416.68, "word": " you", "probability": 0.9462890625}, {"start": 416.68, "end": 417.02, "word": " think", "probability": 0.91796875}, {"start": 417.02, "end": 418.08, "word": " about", "probability": 0.87939453125}, {"start": 418.08, "end": 418.34, "word": " the", "probability": 0.91064453125}, {"start": 418.34, "end": 418.66, "word": " remaining", "probability": 0.89013671875}, {"start": 418.66, "end": 419.18, "word": " item", "probability": 0.9619140625}, {"start": 419.18, "end": 419.76, "word": " or", "probability": 0.61669921875}, {"start": 419.76, "end": 419.9, "word": " the", "probability": 0.9150390625}, {"start": 419.9, "end": 420.18, "word": " remaining", "probability": 0.92529296875}, {"start": 420.18, "end": 420.68, "word": " observation?", "probability": 0.9052734375}, {"start": 422.38, "end": 422.74, "word": " This", "probability": 0.63916015625}, {"start": 422.74, "end": 422.92, "word": " one", "probability": 0.92333984375}, {"start": 422.92, "end": 423.12, "word": " should", "probability": 0.96923828125}, {"start": 423.12, "end": 423.28, "word": " be", "probability": 0.94091796875}, {"start": 423.28, "end": 423.46, "word": " two.", "probability": 0.74853515625}, {"start": 423.98, "end": 424.46, "word": " So", "probability": 0.9482421875}, {"start": 424.46, "end": 424.6, "word": " you", "probability": 0.93017578125}, {"start": 424.6, "end": 424.86, "word": " have", "probability": 0.9453125}, {"start": 424.86, "end": 425.96, "word": " three.", "probability": 0.92529296875}], "temperature": 1.0}, {"id": 16, "seek": 45277, "start": 426.51, "end": 452.77, "text": " to take any two values out of three. Here, for this specific example, he chose x1 to be 7, x2 to be 8. Now, x3 must be 9, because the total in this case will be 24. So the average, again, is 8. So here, n is 3. So degrees of freedom is 2, is n minus 1.", "tokens": [281, 747, 604, 732, 4190, 484, 295, 1045, 13, 1692, 11, 337, 341, 2685, 1365, 11, 415, 5111, 2031, 16, 281, 312, 1614, 11, 2031, 17, 281, 312, 1649, 13, 823, 11, 2031, 18, 1633, 312, 1722, 11, 570, 264, 3217, 294, 341, 1389, 486, 312, 4022, 13, 407, 264, 4274, 11, 797, 11, 307, 1649, 13, 407, 510, 11, 297, 307, 805, 13, 407, 5310, 295, 5645, 307, 568, 11, 307, 297, 3175, 502, 13], "avg_logprob": -0.14914773191724504, "compression_ratio": 1.4213483146067416, "no_speech_prob": 0.0, "words": [{"start": 426.51, "end": 426.65, "word": " to", "probability": 0.406982421875}, {"start": 426.65, "end": 426.89, "word": " take", "probability": 0.8818359375}, {"start": 426.89, "end": 427.13, "word": " any", "probability": 0.90478515625}, {"start": 427.13, "end": 427.33, "word": " two", "probability": 0.89697265625}, {"start": 427.33, "end": 427.59, "word": " values", "probability": 0.87890625}, {"start": 427.59, "end": 427.87, "word": " out", "probability": 0.8876953125}, {"start": 427.87, "end": 428.03, "word": " of", "probability": 0.9697265625}, {"start": 428.03, "end": 428.31, "word": " three.", "probability": 0.8955078125}, {"start": 429.03, "end": 429.55, "word": " Here,", "probability": 0.83544921875}, {"start": 429.83, "end": 429.97, "word": " for", "probability": 0.9453125}, {"start": 429.97, "end": 430.21, "word": " this", "probability": 0.9453125}, {"start": 430.21, "end": 430.85, "word": " specific", "probability": 0.90625}, {"start": 430.85, "end": 431.45, "word": " example,", "probability": 0.9755859375}, {"start": 432.01, "end": 432.15, "word": " he", "probability": 0.87060546875}, {"start": 432.15, "end": 432.53, "word": " chose", "probability": 0.47021484375}, {"start": 432.53, "end": 432.91, "word": " x1", "probability": 0.864990234375}, {"start": 432.91, "end": 433.07, "word": " to", "probability": 0.9658203125}, {"start": 433.07, "end": 433.17, "word": " be", "probability": 0.95068359375}, {"start": 433.17, "end": 433.53, "word": " 7,", "probability": 0.4609375}, {"start": 433.75, "end": 434.11, "word": " x2", "probability": 0.994384765625}, {"start": 434.11, "end": 434.27, "word": " to", "probability": 0.966796875}, {"start": 434.27, "end": 434.41, "word": " be", "probability": 0.95166015625}, {"start": 434.41, "end": 434.73, "word": " 8.", "probability": 0.9033203125}, {"start": 435.39, "end": 435.71, "word": " Now,", "probability": 0.9638671875}, {"start": 436.09, "end": 436.71, "word": " x3", "probability": 0.99169921875}, {"start": 436.71, "end": 439.51, "word": " must", "probability": 0.8876953125}, {"start": 439.51, "end": 439.75, "word": " be", "probability": 0.962890625}, {"start": 439.75, "end": 440.09, "word": " 9,", "probability": 0.8603515625}, {"start": 440.87, "end": 441.23, "word": " because", "probability": 0.900390625}, {"start": 441.23, "end": 441.41, "word": " the", "probability": 0.9150390625}, {"start": 441.41, "end": 441.67, "word": " total", "probability": 0.84423828125}, {"start": 441.67, "end": 441.87, "word": " in", "probability": 0.87841796875}, {"start": 441.87, "end": 442.03, "word": " this", "probability": 0.94189453125}, {"start": 442.03, "end": 442.37, "word": " case", "probability": 0.921875}, {"start": 442.37, "end": 442.59, "word": " will", "probability": 0.8349609375}, {"start": 442.59, "end": 442.79, "word": " be", "probability": 0.94482421875}, {"start": 442.79, "end": 443.29, "word": " 24.", "probability": 0.96533203125}, {"start": 443.73, "end": 443.91, "word": " So", "probability": 0.96484375}, {"start": 443.91, "end": 444.03, "word": " the", "probability": 0.88427734375}, {"start": 444.03, "end": 444.45, "word": " average,", "probability": 0.81005859375}, {"start": 444.71, "end": 445.13, "word": " again,", "probability": 0.9638671875}, {"start": 445.25, "end": 445.61, "word": " is", "probability": 0.94677734375}, {"start": 445.61, "end": 446.13, "word": " 8.", "probability": 0.86962890625}, {"start": 446.85, "end": 447.07, "word": " So", "probability": 0.962890625}, {"start": 447.07, "end": 447.33, "word": " here,", "probability": 0.8427734375}, {"start": 447.41, "end": 447.53, "word": " n", "probability": 0.8720703125}, {"start": 447.53, "end": 447.67, "word": " is", "probability": 0.94775390625}, {"start": 447.67, "end": 447.97, "word": " 3.", "probability": 0.79541015625}, {"start": 449.03, "end": 449.29, "word": " So", "probability": 0.96533203125}, {"start": 449.29, "end": 449.61, "word": " degrees", "probability": 0.94482421875}, {"start": 449.61, "end": 449.83, "word": " of", "probability": 0.96435546875}, {"start": 449.83, "end": 450.21, "word": " freedom", "probability": 0.95458984375}, {"start": 450.21, "end": 451.59, "word": " is", "probability": 0.80322265625}, {"start": 451.59, "end": 451.85, "word": " 2,", "probability": 0.88232421875}, {"start": 451.95, "end": 452.05, "word": " is", "probability": 0.7412109375}, {"start": 452.05, "end": 452.21, "word": " n", "probability": 0.92578125}, {"start": 452.21, "end": 452.47, "word": " minus", "probability": 0.98193359375}, {"start": 452.47, "end": 452.77, "word": " 1.", "probability": 0.9853515625}], "temperature": 1.0}, {"id": 17, "seek": 47476, "start": 453.94, "end": 474.76, "text": " So that means two values can be any numbers. As I mentioned here, it could be any two numbers. But the third one is not free to vary for a given mean, because here we have a restriction that the mean is eight. Another example, suppose I gave you three cards.", "tokens": [407, 300, 1355, 732, 4190, 393, 312, 604, 3547, 13, 1018, 286, 2835, 510, 11, 309, 727, 312, 604, 732, 3547, 13, 583, 264, 2636, 472, 307, 406, 1737, 281, 10559, 337, 257, 2212, 914, 11, 570, 510, 321, 362, 257, 29529, 300, 264, 914, 307, 3180, 13, 3996, 1365, 11, 7297, 286, 2729, 291, 1045, 5632, 13], "avg_logprob": -0.14499470541032694, "compression_ratio": 1.5146198830409356, "no_speech_prob": 0.0, "words": [{"start": 453.94, "end": 454.4, "word": " So", "probability": 0.77001953125}, {"start": 454.4, "end": 454.66, "word": " that", "probability": 0.83837890625}, {"start": 454.66, "end": 455.0, "word": " means", "probability": 0.92431640625}, {"start": 455.0, "end": 455.26, "word": " two", "probability": 0.81689453125}, {"start": 455.26, "end": 455.68, "word": " values", "probability": 0.962890625}, {"start": 455.68, "end": 456.06, "word": " can", "probability": 0.947265625}, {"start": 456.06, "end": 456.3, "word": " be", "probability": 0.95556640625}, {"start": 456.3, "end": 456.62, "word": " any", "probability": 0.91845703125}, {"start": 456.62, "end": 457.1, "word": " numbers.", "probability": 0.8740234375}, {"start": 457.62, "end": 457.84, "word": " As", "probability": 0.95751953125}, {"start": 457.84, "end": 458.3, "word": " I", "probability": 0.79736328125}, {"start": 458.3, "end": 458.72, "word": " mentioned", "probability": 0.80322265625}, {"start": 458.72, "end": 459.06, "word": " here,", "probability": 0.84423828125}, {"start": 459.44, "end": 459.64, "word": " it", "probability": 0.94189453125}, {"start": 459.64, "end": 459.8, "word": " could", "probability": 0.87255859375}, {"start": 459.8, "end": 459.94, "word": " be", "probability": 0.95166015625}, {"start": 459.94, "end": 460.16, "word": " any", "probability": 0.912109375}, {"start": 460.16, "end": 460.44, "word": " two", "probability": 0.91748046875}, {"start": 460.44, "end": 460.9, "word": " numbers.", "probability": 0.90771484375}, {"start": 462.22, "end": 462.78, "word": " But", "probability": 0.94482421875}, {"start": 462.78, "end": 462.98, "word": " the", "probability": 0.8857421875}, {"start": 462.98, "end": 463.26, "word": " third", "probability": 0.9423828125}, {"start": 463.26, "end": 463.58, "word": " one", "probability": 0.92333984375}, {"start": 463.58, "end": 464.02, "word": " is", "probability": 0.93505859375}, {"start": 464.02, "end": 464.26, "word": " not", "probability": 0.93896484375}, {"start": 464.26, "end": 464.52, "word": " free", "probability": 0.90576171875}, {"start": 464.52, "end": 464.72, "word": " to", "probability": 0.95556640625}, {"start": 464.72, "end": 464.98, "word": " vary", "probability": 0.97021484375}, {"start": 464.98, "end": 465.66, "word": " for", "probability": 0.9013671875}, {"start": 465.66, "end": 465.84, "word": " a", "probability": 0.990234375}, {"start": 465.84, "end": 466.24, "word": " given", "probability": 0.88818359375}, {"start": 466.24, "end": 467.32, "word": " mean,", "probability": 0.955078125}, {"start": 467.4, "end": 467.68, "word": " because", "probability": 0.896484375}, {"start": 467.68, "end": 467.9, "word": " here", "probability": 0.83349609375}, {"start": 467.9, "end": 468.1, "word": " we", "probability": 0.791015625}, {"start": 468.1, "end": 468.26, "word": " have", "probability": 0.9501953125}, {"start": 468.26, "end": 468.36, "word": " a", "probability": 0.892578125}, {"start": 468.36, "end": 468.74, "word": " restriction", "probability": 0.8779296875}, {"start": 468.74, "end": 469.12, "word": " that", "probability": 0.90185546875}, {"start": 469.12, "end": 469.7, "word": " the", "probability": 0.91455078125}, {"start": 469.7, "end": 469.86, "word": " mean", "probability": 0.96044921875}, {"start": 469.86, "end": 470.04, "word": " is", "probability": 0.95166015625}, {"start": 470.04, "end": 470.2, "word": " eight.", "probability": 0.366455078125}, {"start": 470.32, "end": 470.56, "word": " Another", "probability": 0.87890625}, {"start": 470.56, "end": 470.96, "word": " example,", "probability": 0.97265625}, {"start": 471.06, "end": 471.46, "word": " suppose", "probability": 0.91015625}, {"start": 471.46, "end": 472.7, "word": " I", "probability": 0.982421875}, {"start": 472.7, "end": 472.92, "word": " gave", "probability": 0.57470703125}, {"start": 472.92, "end": 473.2, "word": " you", "probability": 0.9619140625}, {"start": 473.2, "end": 474.34, "word": " three", "probability": 0.904296875}, {"start": 474.34, "end": 474.76, "word": " cards.", "probability": 0.90283203125}], "temperature": 1.0}, {"id": 18, "seek": 49734, "start": 477.7, "end": 497.34, "text": " And my restriction is the sum of the numbers on the three cards will be 15. Maybe you will write any two values in any two cards. For example, maybe I will choose seven in the first card, six in the third,", "tokens": [400, 452, 29529, 307, 264, 2408, 295, 264, 3547, 322, 264, 1045, 5632, 486, 312, 2119, 13, 2704, 291, 486, 2464, 604, 732, 4190, 294, 604, 732, 5632, 13, 1171, 1365, 11, 1310, 286, 486, 2826, 3407, 294, 264, 700, 2920, 11, 2309, 294, 264, 2636, 11], "avg_logprob": -0.20589193080862364, "compression_ratio": 1.4206896551724137, "no_speech_prob": 0.0, "words": [{"start": 477.7, "end": 478.06, "word": " And", "probability": 0.41650390625}, {"start": 478.06, "end": 478.28, "word": " my", "probability": 0.92431640625}, {"start": 478.28, "end": 478.68, "word": " restriction", "probability": 0.8046875}, {"start": 478.68, "end": 479.08, "word": " is", "probability": 0.9462890625}, {"start": 479.08, "end": 479.48, "word": " the", "probability": 0.52880859375}, {"start": 479.48, "end": 479.92, "word": " sum", "probability": 0.91455078125}, {"start": 479.92, "end": 480.88, "word": " of", "probability": 0.94921875}, {"start": 480.88, "end": 481.02, "word": " the", "probability": 0.90771484375}, {"start": 481.02, "end": 481.36, "word": " numbers", "probability": 0.826171875}, {"start": 481.36, "end": 481.54, "word": " on", "probability": 0.50634765625}, {"start": 481.54, "end": 481.66, "word": " the", "probability": 0.83984375}, {"start": 481.66, "end": 481.86, "word": " three", "probability": 0.724609375}, {"start": 481.86, "end": 482.24, "word": " cards", "probability": 0.876953125}, {"start": 482.24, "end": 482.56, "word": " will", "probability": 0.8623046875}, {"start": 482.56, "end": 482.88, "word": " be", "probability": 0.953125}, {"start": 482.88, "end": 483.72, "word": " 15.", "probability": 0.64111328125}, {"start": 485.72, "end": 486.1, "word": " Maybe", "probability": 0.9423828125}, {"start": 486.1, "end": 486.24, "word": " you", "probability": 0.9248046875}, {"start": 486.24, "end": 486.42, "word": " will", "probability": 0.7080078125}, {"start": 486.42, "end": 486.78, "word": " write", "probability": 0.91650390625}, {"start": 486.78, "end": 488.38, "word": " any", "probability": 0.9111328125}, {"start": 488.38, "end": 488.56, "word": " two", "probability": 0.90771484375}, {"start": 488.56, "end": 489.06, "word": " values", "probability": 0.95947265625}, {"start": 489.06, "end": 489.74, "word": " in", "probability": 0.8544921875}, {"start": 489.74, "end": 489.94, "word": " any", "probability": 0.9111328125}, {"start": 489.94, "end": 490.14, "word": " two", "probability": 0.92529296875}, {"start": 490.14, "end": 490.42, "word": " cards.", "probability": 0.85986328125}, {"start": 490.52, "end": 490.64, "word": " For", "probability": 0.94775390625}, {"start": 490.64, "end": 490.96, "word": " example,", "probability": 0.96923828125}, {"start": 491.1, "end": 491.26, "word": " maybe", "probability": 0.923828125}, {"start": 491.26, "end": 491.4, "word": " I", "probability": 0.98486328125}, {"start": 491.4, "end": 491.56, "word": " will", "probability": 0.7490234375}, {"start": 491.56, "end": 491.9, "word": " choose", "probability": 0.890625}, {"start": 491.9, "end": 493.24, "word": " seven", "probability": 0.430908203125}, {"start": 493.24, "end": 493.64, "word": " in", "probability": 0.9033203125}, {"start": 493.64, "end": 493.76, "word": " the", "probability": 0.91943359375}, {"start": 493.76, "end": 494.04, "word": " first", "probability": 0.89599609375}, {"start": 494.04, "end": 494.46, "word": " card,", "probability": 0.91259765625}, {"start": 495.78, "end": 496.5, "word": " six", "probability": 0.93994140625}, {"start": 496.5, "end": 496.9, "word": " in", "probability": 0.93115234375}, {"start": 496.9, "end": 497.04, "word": " the", "probability": 0.91552734375}, {"start": 497.04, "end": 497.34, "word": " third,", "probability": 0.93310546875}], "temperature": 1.0}, {"id": 19, "seek": 52624, "start": 498.82, "end": 526.24, "text": " So this one should be 2. So the degrees of freedom is 2. Any questions? So this is the meaning of degrees of freedom. So in general, if the sample size is n, I mean if the sample size equals n degrees of freedom, n minus 1. That's the meaning of degrees of freedom. Now let's see the comparison between T distribution and Z.", "tokens": [407, 341, 472, 820, 312, 568, 13, 407, 264, 5310, 295, 5645, 307, 568, 13, 2639, 1651, 30, 407, 341, 307, 264, 3620, 295, 5310, 295, 5645, 13, 407, 294, 2674, 11, 498, 264, 6889, 2744, 307, 297, 11, 286, 914, 498, 264, 6889, 2744, 6915, 297, 5310, 295, 5645, 11, 297, 3175, 502, 13, 663, 311, 264, 3620, 295, 5310, 295, 5645, 13, 823, 718, 311, 536, 264, 9660, 1296, 314, 7316, 293, 1176, 13], "avg_logprob": -0.15604707095530126, "compression_ratio": 1.7955801104972375, "no_speech_prob": 0.0, "words": [{"start": 498.82, "end": 499.1, "word": " So", "probability": 0.77783203125}, {"start": 499.1, "end": 499.32, "word": " this", "probability": 0.86767578125}, {"start": 499.32, "end": 499.52, "word": " one", "probability": 0.9013671875}, {"start": 499.52, "end": 499.78, "word": " should", "probability": 0.9677734375}, {"start": 499.78, "end": 499.94, "word": " be", "probability": 0.94970703125}, {"start": 499.94, "end": 500.18, "word": " 2.", "probability": 0.43408203125}, {"start": 501.22, "end": 501.44, "word": " So", "probability": 0.84423828125}, {"start": 501.44, "end": 501.62, "word": " the", "probability": 0.51416015625}, {"start": 501.62, "end": 501.84, "word": " degrees", "probability": 0.890625}, {"start": 501.84, "end": 501.96, "word": " of", "probability": 0.9677734375}, {"start": 501.96, "end": 502.3, "word": " freedom", "probability": 0.9384765625}, {"start": 502.3, "end": 503.32, "word": " is", "probability": 0.9091796875}, {"start": 503.32, "end": 503.6, "word": " 2.", "probability": 0.8876953125}, {"start": 504.4, "end": 504.68, "word": " Any", "probability": 0.90869140625}, {"start": 504.68, "end": 504.96, "word": " questions?", "probability": 0.58203125}, {"start": 505.62, "end": 505.88, "word": " So", "probability": 0.88037109375}, {"start": 505.88, "end": 506.12, "word": " this", "probability": 0.87841796875}, {"start": 506.12, "end": 506.34, "word": " is", "probability": 0.93505859375}, {"start": 506.34, "end": 506.46, "word": " the", "probability": 0.919921875}, {"start": 506.46, "end": 506.78, "word": " meaning", "probability": 0.865234375}, {"start": 506.78, "end": 507.3, "word": " of", "probability": 0.9638671875}, {"start": 507.3, "end": 507.94, "word": " degrees", "probability": 0.92333984375}, {"start": 507.94, "end": 508.52, "word": " of", "probability": 0.9677734375}, {"start": 508.52, "end": 508.78, "word": " freedom.", "probability": 0.93310546875}, {"start": 509.28, "end": 509.5, "word": " So", "probability": 0.9501953125}, {"start": 509.5, "end": 509.82, "word": " in", "probability": 0.83837890625}, {"start": 509.82, "end": 510.16, "word": " general,", "probability": 0.90380859375}, {"start": 510.68, "end": 510.98, "word": " if", "probability": 0.95703125}, {"start": 510.98, "end": 511.16, "word": " the", "probability": 0.919921875}, {"start": 511.16, "end": 511.4, "word": " sample", "probability": 0.88623046875}, {"start": 511.4, "end": 511.72, "word": " size", "probability": 0.8505859375}, {"start": 511.72, "end": 511.9, "word": " is", "probability": 0.67724609375}, {"start": 511.9, "end": 512.12, "word": " n,", "probability": 0.74609375}, {"start": 512.72, "end": 512.92, "word": " I", "probability": 0.87646484375}, {"start": 512.92, "end": 513.02, "word": " mean", "probability": 0.9697265625}, {"start": 513.02, "end": 513.14, "word": " if", "probability": 0.6396484375}, {"start": 513.14, "end": 513.28, "word": " the", "probability": 0.916015625}, {"start": 513.28, "end": 513.5, "word": " sample", "probability": 0.876953125}, {"start": 513.5, "end": 513.8, "word": " size", "probability": 0.86181640625}, {"start": 513.8, "end": 514.16, "word": " equals", "probability": 0.90673828125}, {"start": 514.16, "end": 514.48, "word": " n", "probability": 0.95751953125}, {"start": 514.48, "end": 515.18, "word": " degrees", "probability": 0.697265625}, {"start": 515.18, "end": 515.4, "word": " of", "probability": 0.962890625}, {"start": 515.4, "end": 515.82, "word": " freedom,", "probability": 0.93603515625}, {"start": 516.08, "end": 516.2, "word": " n", "probability": 0.8134765625}, {"start": 516.2, "end": 517.22, "word": " minus", "probability": 0.81005859375}, {"start": 517.22, "end": 518.06, "word": " 1.", "probability": 0.8828125}, {"start": 519.14, "end": 519.7, "word": " That's", "probability": 0.8994140625}, {"start": 519.7, "end": 519.84, "word": " the", "probability": 0.92041015625}, {"start": 519.84, "end": 520.16, "word": " meaning", "probability": 0.87158203125}, {"start": 520.16, "end": 520.64, "word": " of", "probability": 0.9609375}, {"start": 520.64, "end": 521.22, "word": " degrees", "probability": 0.919921875}, {"start": 521.22, "end": 521.66, "word": " of", "probability": 0.962890625}, {"start": 521.66, "end": 521.96, "word": " freedom.", "probability": 0.94384765625}, {"start": 522.42, "end": 522.62, "word": " Now", "probability": 0.9541015625}, {"start": 522.62, "end": 522.9, "word": " let's", "probability": 0.880859375}, {"start": 522.9, "end": 523.14, "word": " see", "probability": 0.904296875}, {"start": 523.14, "end": 523.94, "word": " the", "probability": 0.9072265625}, {"start": 523.94, "end": 524.48, "word": " comparison", "probability": 0.908203125}, {"start": 524.48, "end": 524.94, "word": " between", "probability": 0.88330078125}, {"start": 524.94, "end": 525.16, "word": " T", "probability": 0.51416015625}, {"start": 525.16, "end": 525.76, "word": " distribution", "probability": 0.716796875}, {"start": 525.76, "end": 526.06, "word": " and", "probability": 0.9345703125}, {"start": 526.06, "end": 526.24, "word": " Z.", "probability": 0.72607421875}], "temperature": 1.0}, {"id": 20, "seek": 55605, "start": 529.6, "end": 556.06, "text": " The blue one is normal distribution, standard normal. The other two, the blue and red ones, are T distributions with different degrees of freedom. For example, the red one has degrees of freedom five. So that means the sample size is six. And actually, in this case, T distribution has long tail.", "tokens": [440, 3344, 472, 307, 2710, 7316, 11, 3832, 2710, 13, 440, 661, 732, 11, 264, 3344, 293, 2182, 2306, 11, 366, 314, 37870, 365, 819, 5310, 295, 5645, 13, 1171, 1365, 11, 264, 2182, 472, 575, 5310, 295, 5645, 1732, 13, 407, 300, 1355, 264, 6889, 2744, 307, 2309, 13, 400, 767, 11, 294, 341, 1389, 11, 314, 7316, 575, 938, 6838, 13], "avg_logprob": -0.16442871442995965, "compression_ratio": 1.65, "no_speech_prob": 0.0, "words": [{"start": 529.6, "end": 529.88, "word": " The", "probability": 0.8193359375}, {"start": 529.88, "end": 530.12, "word": " blue", "probability": 0.9677734375}, {"start": 530.12, "end": 530.38, "word": " one", "probability": 0.92138671875}, {"start": 530.38, "end": 530.7, "word": " is", "probability": 0.9501953125}, {"start": 530.7, "end": 531.02, "word": " normal", "probability": 0.75634765625}, {"start": 531.02, "end": 531.66, "word": " distribution,", "probability": 0.85302734375}, {"start": 531.92, "end": 532.28, "word": " standard", "probability": 0.407470703125}, {"start": 532.28, "end": 532.68, "word": " normal.", "probability": 0.873046875}, {"start": 534.7, "end": 535.34, "word": " The", "probability": 0.88134765625}, {"start": 535.34, "end": 535.68, "word": " other", "probability": 0.8994140625}, {"start": 535.68, "end": 535.98, "word": " two,", "probability": 0.92431640625}, {"start": 536.12, "end": 536.24, "word": " the", "probability": 0.88671875}, {"start": 536.24, "end": 536.46, "word": " blue", "probability": 0.9638671875}, {"start": 536.46, "end": 536.68, "word": " and", "probability": 0.9365234375}, {"start": 536.68, "end": 537.0, "word": " red", "probability": 0.54638671875}, {"start": 537.0, "end": 537.42, "word": " ones,", "probability": 0.90576171875}, {"start": 538.18, "end": 539.06, "word": " are", "probability": 0.9365234375}, {"start": 539.06, "end": 539.22, "word": " T", "probability": 0.7802734375}, {"start": 539.22, "end": 539.82, "word": " distributions", "probability": 0.77001953125}, {"start": 539.82, "end": 540.24, "word": " with", "probability": 0.892578125}, {"start": 540.24, "end": 540.6, "word": " different", "probability": 0.89697265625}, {"start": 540.6, "end": 540.86, "word": " degrees", "probability": 0.94287109375}, {"start": 540.86, "end": 541.04, "word": " of", "probability": 0.970703125}, {"start": 541.04, "end": 541.3, "word": " freedom.", "probability": 0.9404296875}, {"start": 541.82, "end": 542.26, "word": " For", "probability": 0.96484375}, {"start": 542.26, "end": 542.6, "word": " example,", "probability": 0.974609375}, {"start": 542.68, "end": 542.8, "word": " the", "probability": 0.9111328125}, {"start": 542.8, "end": 542.98, "word": " red", "probability": 0.9306640625}, {"start": 542.98, "end": 543.3, "word": " one", "probability": 0.92822265625}, {"start": 543.3, "end": 544.48, "word": " has", "probability": 0.91455078125}, {"start": 544.48, "end": 545.1, "word": " degrees", "probability": 0.94287109375}, {"start": 545.1, "end": 545.34, "word": " of", "probability": 0.966796875}, {"start": 545.34, "end": 545.56, "word": " freedom", "probability": 0.9267578125}, {"start": 545.56, "end": 546.02, "word": " five.", "probability": 0.580078125}, {"start": 547.56, "end": 547.82, "word": " So", "probability": 0.93017578125}, {"start": 547.82, "end": 548.06, "word": " that", "probability": 0.86083984375}, {"start": 548.06, "end": 548.52, "word": " means", "probability": 0.935546875}, {"start": 548.52, "end": 549.44, "word": " the", "probability": 0.61279296875}, {"start": 549.44, "end": 549.9, "word": " sample", "probability": 0.82666015625}, {"start": 549.9, "end": 550.7, "word": " size", "probability": 0.8466796875}, {"start": 550.7, "end": 550.98, "word": " is", "probability": 0.947265625}, {"start": 550.98, "end": 551.4, "word": " six.", "probability": 0.91064453125}, {"start": 552.0, "end": 552.18, "word": " And", "probability": 0.947265625}, {"start": 552.18, "end": 552.64, "word": " actually,", "probability": 0.88818359375}, {"start": 553.46, "end": 553.58, "word": " in", "probability": 0.94140625}, {"start": 553.58, "end": 553.82, "word": " this", "probability": 0.94580078125}, {"start": 553.82, "end": 554.08, "word": " case,", "probability": 0.912109375}, {"start": 554.14, "end": 554.22, "word": " T", "probability": 0.83837890625}, {"start": 554.22, "end": 554.66, "word": " distribution", "probability": 0.81787109375}, {"start": 554.66, "end": 555.0, "word": " has", "probability": 0.884765625}, {"start": 555.0, "end": 555.72, "word": " long", "probability": 0.9091796875}, {"start": 555.72, "end": 556.06, "word": " tail.", "probability": 0.8701171875}], "temperature": 1.0}, {"id": 21, "seek": 58214, "start": 557.72, "end": 582.14, "text": " The other one, the blue one, has degrees of freedom 13, that means n equals 14. Now we can see that the blue one is closer to the normal than the red one. That means as degrees of freedom increases, the difference between z and t becomes smaller and smaller.", "tokens": [440, 661, 472, 11, 264, 3344, 472, 11, 575, 5310, 295, 5645, 3705, 11, 300, 1355, 297, 6915, 3499, 13, 823, 321, 393, 536, 300, 264, 3344, 472, 307, 4966, 281, 264, 2710, 813, 264, 2182, 472, 13, 663, 1355, 382, 5310, 295, 5645, 8637, 11, 264, 2649, 1296, 710, 293, 256, 3643, 4356, 293, 4356, 13], "avg_logprob": -0.1421066807775662, "compression_ratio": 1.6602564102564104, "no_speech_prob": 0.0, "words": [{"start": 557.72, "end": 557.92, "word": " The", "probability": 0.83740234375}, {"start": 557.92, "end": 558.2, "word": " other", "probability": 0.85595703125}, {"start": 558.2, "end": 558.4, "word": " one,", "probability": 0.9189453125}, {"start": 558.48, "end": 558.6, "word": " the", "probability": 0.91796875}, {"start": 558.6, "end": 558.78, "word": " blue", "probability": 0.96923828125}, {"start": 558.78, "end": 559.1, "word": " one,", "probability": 0.9345703125}, {"start": 560.38, "end": 560.72, "word": " has", "probability": 0.861328125}, {"start": 560.72, "end": 561.1, "word": " degrees", "probability": 0.93896484375}, {"start": 561.1, "end": 561.34, "word": " of", "probability": 0.97119140625}, {"start": 561.34, "end": 561.6, "word": " freedom", "probability": 0.9482421875}, {"start": 561.6, "end": 562.1, "word": " 13,", "probability": 0.8369140625}, {"start": 562.22, "end": 562.36, "word": " that", "probability": 0.8818359375}, {"start": 562.36, "end": 562.76, "word": " means", "probability": 0.9208984375}, {"start": 562.76, "end": 563.2, "word": " n", "probability": 0.51416015625}, {"start": 563.2, "end": 563.48, "word": " equals", "probability": 0.4111328125}, {"start": 563.48, "end": 563.98, "word": " 14.", "probability": 0.94482421875}, {"start": 564.78, "end": 565.14, "word": " Now", "probability": 0.92138671875}, {"start": 565.14, "end": 565.4, "word": " we", "probability": 0.56103515625}, {"start": 565.4, "end": 565.6, "word": " can", "probability": 0.94091796875}, {"start": 565.6, "end": 565.76, "word": " see", "probability": 0.921875}, {"start": 565.76, "end": 566.06, "word": " that", "probability": 0.927734375}, {"start": 566.06, "end": 566.8, "word": " the", "probability": 0.88623046875}, {"start": 566.8, "end": 567.04, "word": " blue", "probability": 0.97021484375}, {"start": 567.04, "end": 567.34, "word": " one", "probability": 0.9287109375}, {"start": 567.34, "end": 568.26, "word": " is", "probability": 0.947265625}, {"start": 568.26, "end": 568.86, "word": " closer", "probability": 0.9033203125}, {"start": 568.86, "end": 570.24, "word": " to", "probability": 0.95068359375}, {"start": 570.24, "end": 570.4, "word": " the", "probability": 0.87890625}, {"start": 570.4, "end": 570.92, "word": " normal", "probability": 0.87158203125}, {"start": 570.92, "end": 572.1, "word": " than", "probability": 0.904296875}, {"start": 572.1, "end": 572.94, "word": " the", "probability": 0.91015625}, {"start": 572.94, "end": 573.12, "word": " red", "probability": 0.919921875}, {"start": 573.12, "end": 573.38, "word": " one.", "probability": 0.923828125}, {"start": 573.9, "end": 574.18, "word": " That", "probability": 0.89697265625}, {"start": 574.18, "end": 574.6, "word": " means", "probability": 0.93115234375}, {"start": 574.6, "end": 575.56, "word": " as", "probability": 0.70068359375}, {"start": 575.56, "end": 575.9, "word": " degrees", "probability": 0.89892578125}, {"start": 575.9, "end": 576.12, "word": " of", "probability": 0.966796875}, {"start": 576.12, "end": 576.4, "word": " freedom", "probability": 0.94482421875}, {"start": 576.4, "end": 577.06, "word": " increases,", "probability": 0.76611328125}, {"start": 578.5, "end": 578.84, "word": " the", "probability": 0.91796875}, {"start": 578.84, "end": 579.42, "word": " difference", "probability": 0.84619140625}, {"start": 579.42, "end": 579.96, "word": " between", "probability": 0.88330078125}, {"start": 579.96, "end": 580.24, "word": " z", "probability": 0.8505859375}, {"start": 580.24, "end": 580.54, "word": " and", "probability": 0.94873046875}, {"start": 580.54, "end": 580.74, "word": " t", "probability": 0.90185546875}, {"start": 580.74, "end": 581.18, "word": " becomes", "probability": 0.85986328125}, {"start": 581.18, "end": 581.56, "word": " smaller", "probability": 0.884765625}, {"start": 581.56, "end": 581.84, "word": " and", "probability": 0.9375}, {"start": 581.84, "end": 582.14, "word": " smaller.", "probability": 0.86181640625}], "temperature": 1.0}, {"id": 22, "seek": 61394, "start": 584.62, "end": 613.94, "text": " Always, T converges or goes to Z as N increases. So for large sample sizes, we can approximate Z value by T. That in general, if N is very, very large, I mean above 30. But if N, for example, 100, in this case, the two curves will be identical. Let's see how can we use the", "tokens": [11270, 11, 314, 9652, 2880, 420, 1709, 281, 1176, 382, 426, 8637, 13, 407, 337, 2416, 6889, 11602, 11, 321, 393, 30874, 1176, 2158, 538, 314, 13, 663, 294, 2674, 11, 498, 426, 307, 588, 11, 588, 2416, 11, 286, 914, 3673, 2217, 13, 583, 498, 426, 11, 337, 1365, 11, 2319, 11, 294, 341, 1389, 11, 264, 732, 19490, 486, 312, 14800, 13, 961, 311, 536, 577, 393, 321, 764, 264], "avg_logprob": -0.20216181180248521, "compression_ratio": 1.3979591836734695, "no_speech_prob": 0.0, "words": [{"start": 584.62, "end": 585.24, "word": " Always,", "probability": 0.41748046875}, {"start": 585.74, "end": 586.12, "word": " T", "probability": 0.6982421875}, {"start": 586.12, "end": 587.16, "word": " converges", "probability": 0.95947265625}, {"start": 587.16, "end": 587.68, "word": " or", "probability": 0.7578125}, {"start": 587.68, "end": 588.0, "word": " goes", "probability": 0.93212890625}, {"start": 588.0, "end": 588.34, "word": " to", "probability": 0.92919921875}, {"start": 588.34, "end": 589.58, "word": " Z", "probability": 0.7744140625}, {"start": 589.58, "end": 590.58, "word": " as", "probability": 0.84716796875}, {"start": 590.58, "end": 590.84, "word": " N", "probability": 0.71923828125}, {"start": 590.84, "end": 591.36, "word": " increases.", "probability": 0.92919921875}, {"start": 591.92, "end": 592.14, "word": " So", "probability": 0.92333984375}, {"start": 592.14, "end": 592.38, "word": " for", "probability": 0.71044921875}, {"start": 592.38, "end": 592.78, "word": " large", "probability": 0.95849609375}, {"start": 592.78, "end": 593.1, "word": " sample", "probability": 0.82763671875}, {"start": 593.1, "end": 593.54, "word": " sizes,", "probability": 0.89697265625}, {"start": 594.26, "end": 595.16, "word": " we", "probability": 0.94189453125}, {"start": 595.16, "end": 595.42, "word": " can", "probability": 0.94677734375}, {"start": 595.42, "end": 595.96, "word": " approximate", "probability": 0.958984375}, {"start": 595.96, "end": 596.34, "word": " Z", "probability": 0.93212890625}, {"start": 596.34, "end": 596.74, "word": " value", "probability": 0.8564453125}, {"start": 596.74, "end": 596.96, "word": " by", "probability": 0.97314453125}, {"start": 596.96, "end": 597.26, "word": " T.", "probability": 0.990234375}, {"start": 598.22, "end": 598.62, "word": " That", "probability": 0.65380859375}, {"start": 598.62, "end": 598.88, "word": " in", "probability": 0.70654296875}, {"start": 598.88, "end": 599.3, "word": " general,", "probability": 0.87353515625}, {"start": 599.44, "end": 599.54, "word": " if", "probability": 0.900390625}, {"start": 599.54, "end": 600.38, "word": " N", "probability": 0.430419921875}, {"start": 600.38, "end": 600.58, "word": " is", "probability": 0.94384765625}, {"start": 600.58, "end": 600.84, "word": " very,", "probability": 0.85791015625}, {"start": 600.92, "end": 601.02, "word": " very", "probability": 0.85498046875}, {"start": 601.02, "end": 601.34, "word": " large,", "probability": 0.96533203125}, {"start": 601.4, "end": 601.48, "word": " I", "probability": 0.91796875}, {"start": 601.48, "end": 601.7, "word": " mean", "probability": 0.9619140625}, {"start": 601.7, "end": 602.84, "word": " above", "probability": 0.6328125}, {"start": 602.84, "end": 603.34, "word": " 30.", "probability": 0.90771484375}, {"start": 604.0, "end": 604.42, "word": " But", "probability": 0.9521484375}, {"start": 604.42, "end": 604.6, "word": " if", "probability": 0.90771484375}, {"start": 604.6, "end": 604.84, "word": " N,", "probability": 0.759765625}, {"start": 605.0, "end": 605.14, "word": " for", "probability": 0.955078125}, {"start": 605.14, "end": 605.44, "word": " example,", "probability": 0.97509765625}, {"start": 605.48, "end": 605.88, "word": " 100,", "probability": 0.8544921875}, {"start": 606.38, "end": 606.56, "word": " in", "probability": 0.92724609375}, {"start": 606.56, "end": 606.78, "word": " this", "probability": 0.9443359375}, {"start": 606.78, "end": 607.04, "word": " case,", "probability": 0.91455078125}, {"start": 607.12, "end": 607.22, "word": " the", "probability": 0.9208984375}, {"start": 607.22, "end": 607.42, "word": " two", "probability": 0.9365234375}, {"start": 607.42, "end": 607.7, "word": " curves", "probability": 0.76318359375}, {"start": 607.7, "end": 607.9, "word": " will", "probability": 0.75634765625}, {"start": 607.9, "end": 608.38, "word": " be", "probability": 0.95849609375}, {"start": 608.38, "end": 609.98, "word": " identical.", "probability": 0.9091796875}, {"start": 611.66, "end": 612.28, "word": " Let's", "probability": 0.966796875}, {"start": 612.28, "end": 612.36, "word": " see", "probability": 0.9189453125}, {"start": 612.36, "end": 612.48, "word": " how", "probability": 0.904296875}, {"start": 612.48, "end": 612.66, "word": " can", "probability": 0.88037109375}, {"start": 612.66, "end": 612.82, "word": " we", "probability": 0.95263671875}, {"start": 612.82, "end": 613.1, "word": " use", "probability": 0.8779296875}, {"start": 613.1, "end": 613.94, "word": " the", "probability": 0.88427734375}], "temperature": 1.0}, {"id": 23, "seek": 64408, "start": 615.9, "end": 644.08, "text": " Table 40. This table is quite similar to the one you have at the end of your book. Now again, this table gives the area in the upper tail. So if we are interested in 95 confidence interval.", "tokens": [25535, 3356, 13, 639, 3199, 307, 1596, 2531, 281, 264, 472, 291, 362, 412, 264, 917, 295, 428, 1446, 13, 823, 797, 11, 341, 3199, 2709, 264, 1859, 294, 264, 6597, 6838, 13, 407, 498, 321, 366, 3102, 294, 13420, 6687, 15035, 13], "avg_logprob": -0.14905895258892665, "compression_ratio": 1.3868613138686132, "no_speech_prob": 0.0, "words": [{"start": 615.9, "end": 616.66, "word": " Table", "probability": 0.69189453125}, {"start": 616.66, "end": 617.42, "word": " 40.", "probability": 0.8349609375}, {"start": 621.14, "end": 621.9, "word": " This", "probability": 0.8388671875}, {"start": 621.9, "end": 622.46, "word": " table", "probability": 0.87890625}, {"start": 622.46, "end": 622.76, "word": " is", "probability": 0.92138671875}, {"start": 622.76, "end": 623.18, "word": " quite", "probability": 0.9013671875}, {"start": 623.18, "end": 623.6, "word": " similar", "probability": 0.96484375}, {"start": 623.6, "end": 624.68, "word": " to", "probability": 0.95703125}, {"start": 624.68, "end": 624.82, "word": " the", "probability": 0.91650390625}, {"start": 624.82, "end": 624.96, "word": " one", "probability": 0.92578125}, {"start": 624.96, "end": 625.14, "word": " you", "probability": 0.9609375}, {"start": 625.14, "end": 625.44, "word": " have", "probability": 0.9365234375}, {"start": 625.44, "end": 625.72, "word": " at", "probability": 0.947265625}, {"start": 625.72, "end": 625.84, "word": " the", "probability": 0.921875}, {"start": 625.84, "end": 626.08, "word": " end", "probability": 0.89111328125}, {"start": 626.08, "end": 626.24, "word": " of", "probability": 0.9638671875}, {"start": 626.24, "end": 626.4, "word": " your", "probability": 0.8876953125}, {"start": 626.4, "end": 626.72, "word": " book.", "probability": 0.90283203125}, {"start": 627.94, "end": 628.48, "word": " Now", "probability": 0.94921875}, {"start": 628.48, "end": 628.86, "word": " again,", "probability": 0.68359375}, {"start": 629.5, "end": 629.82, "word": " this", "probability": 0.9423828125}, {"start": 629.82, "end": 630.1, "word": " table", "probability": 0.9013671875}, {"start": 630.1, "end": 630.38, "word": " gives", "probability": 0.908203125}, {"start": 630.38, "end": 630.56, "word": " the", "probability": 0.91943359375}, {"start": 630.56, "end": 630.86, "word": " area", "probability": 0.89599609375}, {"start": 630.86, "end": 631.1, "word": " in", "probability": 0.89404296875}, {"start": 631.1, "end": 631.26, "word": " the", "probability": 0.91259765625}, {"start": 631.26, "end": 631.52, "word": " upper", "probability": 0.72900390625}, {"start": 631.52, "end": 631.92, "word": " tail.", "probability": 0.72802734375}, {"start": 635.66, "end": 636.16, "word": " So", "probability": 0.88720703125}, {"start": 636.16, "end": 636.58, "word": " if", "probability": 0.90869140625}, {"start": 636.58, "end": 637.82, "word": " we", "probability": 0.1951904296875}, {"start": 637.82, "end": 637.98, "word": " are", "probability": 0.9423828125}, {"start": 637.98, "end": 638.44, "word": " interested", "probability": 0.88037109375}, {"start": 638.44, "end": 641.84, "word": " in", "probability": 0.818359375}, {"start": 641.84, "end": 642.92, "word": " 95", "probability": 0.955078125}, {"start": 642.92, "end": 643.68, "word": " confidence", "probability": 0.93505859375}, {"start": 643.68, "end": 644.08, "word": " interval.", "probability": 0.66064453125}], "temperature": 1.0}, {"id": 24, "seek": 66848, "start": 651.62, "end": 668.48, "text": " That means alpha is 5% because 95% between these two values. So 5% left. The distribution is normal.", "tokens": [663, 1355, 8961, 307, 1025, 4, 570, 13420, 4, 1296, 613, 732, 4190, 13, 407, 1025, 4, 1411, 13, 440, 7316, 307, 2710, 13], "avg_logprob": -0.25359375, "compression_ratio": 1.10989010989011, "no_speech_prob": 0.0, "words": [{"start": 651.62, "end": 652.64, "word": " That", "probability": 0.141845703125}, {"start": 652.64, "end": 653.28, "word": " means", "probability": 0.83984375}, {"start": 653.28, "end": 654.26, "word": " alpha", "probability": 0.73974609375}, {"start": 654.26, "end": 656.8, "word": " is", "probability": 0.90478515625}, {"start": 656.8, "end": 657.06, "word": " 5", "probability": 0.763671875}, {"start": 657.06, "end": 657.4, "word": "%", "probability": 0.5419921875}, {"start": 657.4, "end": 657.88, "word": " because", "probability": 0.89111328125}, {"start": 657.88, "end": 659.16, "word": " 95", "probability": 0.95654296875}, {"start": 659.16, "end": 659.7, "word": "%", "probability": 0.9765625}, {"start": 659.7, "end": 660.26, "word": " between", "probability": 0.7451171875}, {"start": 660.26, "end": 661.76, "word": " these", "probability": 0.85986328125}, {"start": 661.76, "end": 661.96, "word": " two", "probability": 0.8798828125}, {"start": 661.96, "end": 662.34, "word": " values.", "probability": 0.96044921875}, {"start": 663.76, "end": 664.46, "word": " So", "probability": 0.94580078125}, {"start": 664.46, "end": 664.76, "word": " 5", "probability": 0.75390625}, {"start": 664.76, "end": 665.22, "word": "%", "probability": 0.99560546875}, {"start": 665.22, "end": 666.16, "word": " left.", "probability": 0.88916015625}, {"start": 667.3, "end": 667.48, "word": " The", "probability": 0.791015625}, {"start": 667.48, "end": 667.9, "word": " distribution", "probability": 0.89306640625}, {"start": 667.9, "end": 668.16, "word": " is", "probability": 0.9482421875}, {"start": 668.16, "end": 668.48, "word": " normal.", "probability": 0.85009765625}], "temperature": 1.0}, {"id": 25, "seek": 69706, "start": 669.18, "end": 697.06, "text": " It is bell-shaped, so 5% for both directions. So here we have 2.5, and the other one also 2.5. So the area in the upper tail is 2.5, so this is my 2.5. So again, columns represents upper tail probabilities, while rows represent degrees of freedom. Now, let's imagine that", "tokens": [467, 307, 4549, 12, 23103, 11, 370, 1025, 4, 337, 1293, 11095, 13, 407, 510, 321, 362, 568, 13, 20, 11, 293, 264, 661, 472, 611, 568, 13, 20, 13, 407, 264, 1859, 294, 264, 6597, 6838, 307, 568, 13, 20, 11, 370, 341, 307, 452, 568, 13, 20, 13, 407, 797, 11, 13766, 8855, 6597, 6838, 33783, 11, 1339, 13241, 2906, 5310, 295, 5645, 13, 823, 11, 718, 311, 3811, 300], "avg_logprob": -0.1723030789257729, "compression_ratio": 1.4863387978142077, "no_speech_prob": 0.0, "words": [{"start": 669.18, "end": 669.46, "word": " It", "probability": 0.351318359375}, {"start": 669.46, "end": 669.74, "word": " is", "probability": 0.912109375}, {"start": 669.74, "end": 670.1, "word": " bell", "probability": 0.9013671875}, {"start": 670.1, "end": 670.48, "word": "-shaped,", "probability": 0.6373291015625}, {"start": 671.54, "end": 671.64, "word": " so", "probability": 0.93310546875}, {"start": 671.64, "end": 671.92, "word": " 5", "probability": 0.82666015625}, {"start": 671.92, "end": 672.34, "word": "%", "probability": 0.96044921875}, {"start": 672.34, "end": 673.1, "word": " for", "probability": 0.9482421875}, {"start": 673.1, "end": 673.44, "word": " both", "probability": 0.8818359375}, {"start": 673.44, "end": 674.24, "word": " directions.", "probability": 0.84521484375}, {"start": 674.62, "end": 675.0, "word": " So", "probability": 0.6669921875}, {"start": 675.0, "end": 675.2, "word": " here", "probability": 0.78369140625}, {"start": 675.2, "end": 675.38, "word": " we", "probability": 0.81494140625}, {"start": 675.38, "end": 675.64, "word": " have", "probability": 0.9423828125}, {"start": 675.64, "end": 675.82, "word": " 2", "probability": 0.95751953125}, {"start": 675.82, "end": 676.32, "word": ".5,", "probability": 0.998046875}, {"start": 676.94, "end": 677.18, "word": " and", "probability": 0.93701171875}, {"start": 677.18, "end": 677.32, "word": " the", "probability": 0.796875}, {"start": 677.32, "end": 677.52, "word": " other", "probability": 0.88916015625}, {"start": 677.52, "end": 677.72, "word": " one", "probability": 0.91748046875}, {"start": 677.72, "end": 677.98, "word": " also", "probability": 0.77734375}, {"start": 677.98, "end": 678.18, "word": " 2", "probability": 0.96630859375}, {"start": 678.18, "end": 678.66, "word": ".5.", "probability": 0.999755859375}, {"start": 679.16, "end": 679.48, "word": " So", "probability": 0.9638671875}, {"start": 679.48, "end": 679.64, "word": " the", "probability": 0.830078125}, {"start": 679.64, "end": 679.84, "word": " area", "probability": 0.919921875}, {"start": 679.84, "end": 680.0, "word": " in", "probability": 0.46142578125}, {"start": 680.0, "end": 680.08, "word": " the", "probability": 0.89990234375}, {"start": 680.08, "end": 680.26, "word": " upper", "probability": 0.68212890625}, {"start": 680.26, "end": 680.52, "word": " tail", "probability": 0.880859375}, {"start": 680.52, "end": 680.66, "word": " is", "probability": 0.94482421875}, {"start": 680.66, "end": 680.82, "word": " 2", "probability": 0.99853515625}, {"start": 680.82, "end": 681.36, "word": ".5,", "probability": 0.999755859375}, {"start": 682.0, "end": 682.28, "word": " so", "probability": 0.9462890625}, {"start": 682.28, "end": 682.52, "word": " this", "probability": 0.9482421875}, {"start": 682.52, "end": 682.62, "word": " is", "probability": 0.94091796875}, {"start": 682.62, "end": 682.74, "word": " my", "probability": 0.9541015625}, {"start": 682.74, "end": 682.94, "word": " 2", "probability": 0.99853515625}, {"start": 682.94, "end": 683.4, "word": ".5.", "probability": 0.999267578125}, {"start": 684.2, "end": 684.4, "word": " So", "probability": 0.9638671875}, {"start": 684.4, "end": 684.68, "word": " again,", "probability": 0.89501953125}, {"start": 685.16, "end": 685.66, "word": " columns", "probability": 0.9453125}, {"start": 685.66, "end": 687.58, "word": " represents", "probability": 0.77197265625}, {"start": 687.58, "end": 688.82, "word": " upper", "probability": 0.76904296875}, {"start": 688.82, "end": 689.14, "word": " tail", "probability": 0.81103515625}, {"start": 689.14, "end": 689.72, "word": " probabilities,", "probability": 0.8935546875}, {"start": 690.98, "end": 691.52, "word": " while", "probability": 0.94921875}, {"start": 691.52, "end": 692.04, "word": " rows", "probability": 0.88037109375}, {"start": 692.04, "end": 692.96, "word": " represent", "probability": 0.83740234375}, {"start": 692.96, "end": 693.66, "word": " degrees", "probability": 0.95263671875}, {"start": 693.66, "end": 693.86, "word": " of", "probability": 0.96826171875}, {"start": 693.86, "end": 694.12, "word": " freedom.", "probability": 0.943359375}, {"start": 695.3, "end": 695.64, "word": " Now,", "probability": 0.96337890625}, {"start": 695.76, "end": 696.24, "word": " let's", "probability": 0.969970703125}, {"start": 696.24, "end": 696.62, "word": " imagine", "probability": 0.90625}, {"start": 696.62, "end": 697.06, "word": " that", "probability": 0.93603515625}], "temperature": 1.0}, {"id": 26, "seek": 72693, "start": 698.03, "end": 726.93, "text": " N equals 20, for example. Now alpha is 5, so that means alpha over 2, as we mentioned, 2.5%. Here we should look at degrees of freedom. Equal N minus 1. That's 19. So this is 19. Go all the way up to 0 to 5. So this is your answer.", "tokens": [426, 6915, 945, 11, 337, 1365, 13, 823, 8961, 307, 1025, 11, 370, 300, 1355, 8961, 670, 568, 11, 382, 321, 2835, 11, 568, 13, 20, 6856, 1692, 321, 820, 574, 412, 5310, 295, 5645, 13, 15624, 304, 426, 3175, 502, 13, 663, 311, 1294, 13, 407, 341, 307, 1294, 13, 1037, 439, 264, 636, 493, 281, 1958, 281, 1025, 13, 407, 341, 307, 428, 1867, 13], "avg_logprob": -0.20921414959080079, "compression_ratio": 1.3647058823529412, "no_speech_prob": 0.0, "words": [{"start": 698.03, "end": 698.35, "word": " N", "probability": 0.2288818359375}, {"start": 698.35, "end": 698.61, "word": " equals", "probability": 0.294677734375}, {"start": 698.61, "end": 700.73, "word": " 20,", "probability": 0.6279296875}, {"start": 701.61, "end": 701.91, "word": " for", "probability": 0.9404296875}, {"start": 701.91, "end": 702.25, "word": " example.", "probability": 0.97216796875}, {"start": 703.11, "end": 703.35, "word": " Now", "probability": 0.89208984375}, {"start": 703.35, "end": 703.63, "word": " alpha", "probability": 0.416748046875}, {"start": 703.63, "end": 703.85, "word": " is", "probability": 0.91357421875}, {"start": 703.85, "end": 704.21, "word": " 5,", "probability": 0.71533203125}, {"start": 704.77, "end": 704.91, "word": " so", "probability": 0.88232421875}, {"start": 704.91, "end": 705.13, "word": " that", "probability": 0.92041015625}, {"start": 705.13, "end": 705.49, "word": " means", "probability": 0.9013671875}, {"start": 705.49, "end": 706.25, "word": " alpha", "probability": 0.7626953125}, {"start": 706.25, "end": 706.51, "word": " over", "probability": 0.9072265625}, {"start": 706.51, "end": 706.69, "word": " 2,", "probability": 0.89599609375}, {"start": 706.71, "end": 706.83, "word": " as", "probability": 0.935546875}, {"start": 706.83, "end": 706.95, "word": " we", "probability": 0.87548828125}, {"start": 706.95, "end": 707.19, "word": " mentioned,", "probability": 0.82861328125}, {"start": 707.33, "end": 707.39, "word": " 2", "probability": 0.51953125}, {"start": 707.39, "end": 708.35, "word": ".5%.", "probability": 0.87353515625}, {"start": 708.35, "end": 709.77, "word": " Here", "probability": 0.845703125}, {"start": 709.77, "end": 709.93, "word": " we", "probability": 0.716796875}, {"start": 709.93, "end": 710.21, "word": " should", "probability": 0.95703125}, {"start": 710.21, "end": 710.47, "word": " look", "probability": 0.9658203125}, {"start": 710.47, "end": 710.69, "word": " at", "probability": 0.9541015625}, {"start": 710.69, "end": 710.97, "word": " degrees", "probability": 0.92578125}, {"start": 710.97, "end": 711.19, "word": " of", "probability": 0.96923828125}, {"start": 711.19, "end": 711.57, "word": " freedom.", "probability": 0.94384765625}, {"start": 712.53, "end": 713.13, "word": " Equal", "probability": 0.75927734375}, {"start": 713.13, "end": 713.27, "word": " N", "probability": 0.71142578125}, {"start": 713.27, "end": 713.55, "word": " minus", "probability": 0.94677734375}, {"start": 713.55, "end": 713.91, "word": " 1.", "probability": 0.923828125}, {"start": 714.95, "end": 715.55, "word": " That's", "probability": 0.90673828125}, {"start": 715.55, "end": 716.07, "word": " 19.", "probability": 0.94677734375}, {"start": 717.47, "end": 717.97, "word": " So", "probability": 0.9287109375}, {"start": 717.97, "end": 718.17, "word": " this", "probability": 0.85986328125}, {"start": 718.17, "end": 718.27, "word": " is", "probability": 0.9345703125}, {"start": 718.27, "end": 718.69, "word": " 19.", "probability": 0.9580078125}, {"start": 720.67, "end": 721.19, "word": " Go", "probability": 0.94140625}, {"start": 721.19, "end": 721.53, "word": " all", "probability": 0.94677734375}, {"start": 721.53, "end": 721.65, "word": " the", "probability": 0.9189453125}, {"start": 721.65, "end": 721.81, "word": " way", "probability": 0.9521484375}, {"start": 721.81, "end": 722.03, "word": " up", "probability": 0.9638671875}, {"start": 722.03, "end": 722.45, "word": " to", "probability": 0.96826171875}, {"start": 722.45, "end": 723.81, "word": " 0", "probability": 0.84033203125}, {"start": 723.81, "end": 723.99, "word": " to", "probability": 0.9677734375}, {"start": 723.99, "end": 724.35, "word": " 5.", "probability": 0.99365234375}, {"start": 725.57, "end": 726.17, "word": " So", "probability": 0.9560546875}, {"start": 726.17, "end": 726.37, "word": " this", "probability": 0.9296875}, {"start": 726.37, "end": 726.47, "word": " is", "probability": 0.9404296875}, {"start": 726.47, "end": 726.63, "word": " your", "probability": 0.8857421875}, {"start": 726.63, "end": 726.93, "word": " answer.", "probability": 0.94482421875}], "temperature": 1.0}, {"id": 27, "seek": 74939, "start": 728.17, "end": 749.39, "text": " 2.093. So T alpha over 2 equal 2.093. This is the way how can we use the T distribution. Now let's see if we have different confidence level.", "tokens": [568, 13, 13811, 18, 13, 407, 314, 8961, 670, 568, 2681, 568, 13, 13811, 18, 13, 639, 307, 264, 636, 577, 393, 321, 764, 264, 314, 7316, 13, 823, 718, 311, 536, 498, 321, 362, 819, 6687, 1496, 13], "avg_logprob": -0.2476562552154064, "compression_ratio": 1.2241379310344827, "no_speech_prob": 0.0, "words": [{"start": 728.1700000000001, "end": 728.85, "word": " 2", "probability": 0.356201171875}, {"start": 728.85, "end": 729.53, "word": ".093.", "probability": 0.9576822916666666}, {"start": 730.07, "end": 730.49, "word": " So", "probability": 0.93212890625}, {"start": 730.49, "end": 730.69, "word": " T", "probability": 0.6845703125}, {"start": 730.69, "end": 730.97, "word": " alpha", "probability": 0.66650390625}, {"start": 730.97, "end": 731.25, "word": " over", "probability": 0.92529296875}, {"start": 731.25, "end": 731.65, "word": " 2", "probability": 0.662109375}, {"start": 731.65, "end": 733.89, "word": " equal", "probability": 0.20751953125}, {"start": 733.89, "end": 734.19, "word": " 2", "probability": 0.6171875}, {"start": 734.19, "end": 736.97, "word": ".093.", "probability": 0.7237955729166666}, {"start": 737.81, "end": 738.49, "word": " This", "probability": 0.77685546875}, {"start": 738.49, "end": 738.61, "word": " is", "probability": 0.9453125}, {"start": 738.61, "end": 738.73, "word": " the", "probability": 0.865234375}, {"start": 738.73, "end": 738.83, "word": " way", "probability": 0.96337890625}, {"start": 738.83, "end": 738.97, "word": " how", "probability": 0.689453125}, {"start": 738.97, "end": 739.11, "word": " can", "probability": 0.74267578125}, {"start": 739.11, "end": 739.25, "word": " we", "probability": 0.92431640625}, {"start": 739.25, "end": 739.59, "word": " use", "probability": 0.87451171875}, {"start": 739.59, "end": 740.01, "word": " the", "probability": 0.908203125}, {"start": 740.01, "end": 740.41, "word": " T", "probability": 0.87890625}, {"start": 740.41, "end": 740.91, "word": " distribution.", "probability": 0.6904296875}, {"start": 743.01, "end": 743.57, "word": " Now", "probability": 0.95166015625}, {"start": 743.57, "end": 743.87, "word": " let's", "probability": 0.838623046875}, {"start": 743.87, "end": 744.09, "word": " see", "probability": 0.460205078125}, {"start": 744.09, "end": 744.41, "word": " if", "probability": 0.94482421875}, {"start": 744.41, "end": 744.75, "word": " we", "probability": 0.95849609375}, {"start": 744.75, "end": 747.47, "word": " have", "probability": 0.9248046875}, {"start": 747.47, "end": 748.15, "word": " different", "probability": 0.86474609375}, {"start": 748.15, "end": 749.07, "word": " confidence", "probability": 0.9833984375}, {"start": 749.07, "end": 749.39, "word": " level.", "probability": 0.83203125}], "temperature": 1.0}, {"id": 28, "seek": 78594, "start": 757.52, "end": 785.94, "text": " We got 2.093. So T, 0 to 5. Now for the same size, for the same sample size, let's talk about 90% confidence. 90% means 10% left. Five to the right. So alpha.", "tokens": [492, 658, 568, 13, 13811, 18, 13, 407, 314, 11, 1958, 281, 1025, 13, 823, 337, 264, 912, 2744, 11, 337, 264, 912, 6889, 2744, 11, 718, 311, 751, 466, 4289, 4, 6687, 13, 4289, 4, 1355, 1266, 4, 1411, 13, 9436, 281, 264, 558, 13, 407, 8961, 13], "avg_logprob": -0.23656249672174454, "compression_ratio": 1.2421875, "no_speech_prob": 0.0, "words": [{"start": 757.52, "end": 757.84, "word": " We", "probability": 0.44189453125}, {"start": 757.84, "end": 758.24, "word": " got", "probability": 0.83935546875}, {"start": 758.24, "end": 758.9, "word": " 2", "probability": 0.888671875}, {"start": 758.9, "end": 761.04, "word": ".093.", "probability": 0.935546875}, {"start": 762.42, "end": 762.88, "word": " So", "probability": 0.83154296875}, {"start": 762.88, "end": 763.18, "word": " T,", "probability": 0.380126953125}, {"start": 763.68, "end": 764.46, "word": " 0", "probability": 0.7705078125}, {"start": 764.46, "end": 764.62, "word": " to", "probability": 0.8994140625}, {"start": 764.62, "end": 765.12, "word": " 5.", "probability": 0.982421875}, {"start": 770.34, "end": 771.14, "word": " Now", "probability": 0.56201171875}, {"start": 771.14, "end": 771.34, "word": " for", "probability": 0.625}, {"start": 771.34, "end": 771.52, "word": " the", "probability": 0.91748046875}, {"start": 771.52, "end": 771.88, "word": " same", "probability": 0.90283203125}, {"start": 771.88, "end": 772.82, "word": " size,", "probability": 0.79248046875}, {"start": 773.5, "end": 773.94, "word": " for", "probability": 0.9248046875}, {"start": 773.94, "end": 774.1, "word": " the", "probability": 0.90869140625}, {"start": 774.1, "end": 774.28, "word": " same", "probability": 0.86376953125}, {"start": 774.28, "end": 774.56, "word": " sample", "probability": 0.87353515625}, {"start": 774.56, "end": 775.0, "word": " size,", "probability": 0.86328125}, {"start": 775.44, "end": 775.68, "word": " let's", "probability": 0.960205078125}, {"start": 775.68, "end": 775.88, "word": " talk", "probability": 0.8837890625}, {"start": 775.88, "end": 776.22, "word": " about", "probability": 0.9072265625}, {"start": 776.22, "end": 776.66, "word": " 90", "probability": 0.95947265625}, {"start": 776.66, "end": 776.94, "word": "%", "probability": 0.82373046875}, {"start": 776.94, "end": 777.52, "word": " confidence.", "probability": 0.95751953125}, {"start": 779.46, "end": 779.98, "word": " 90", "probability": 0.9443359375}, {"start": 779.98, "end": 780.2, "word": "%", "probability": 0.98779296875}, {"start": 780.2, "end": 780.58, "word": " means", "probability": 0.88720703125}, {"start": 780.58, "end": 780.9, "word": " 10", "probability": 0.94091796875}, {"start": 780.9, "end": 781.3, "word": "%", "probability": 0.99169921875}, {"start": 781.3, "end": 781.64, "word": " left.", "probability": 0.9384765625}, {"start": 782.14, "end": 782.94, "word": " Five", "probability": 0.313720703125}, {"start": 782.94, "end": 783.14, "word": " to", "probability": 0.96484375}, {"start": 783.14, "end": 783.28, "word": " the", "probability": 0.92041015625}, {"start": 783.28, "end": 783.58, "word": " right.", "probability": 0.92138671875}, {"start": 785.32, "end": 785.54, "word": " So", "probability": 0.61279296875}, {"start": 785.54, "end": 785.94, "word": " alpha.", "probability": 0.77783203125}], "temperature": 1.0}, {"id": 29, "seek": 81097, "start": 793.29, "end": 810.97, "text": " This is your 0.05, 1.729. Now as C level increases, T also increases.", "tokens": [639, 307, 428, 1958, 13, 13328, 11, 502, 13, 22, 11871, 13, 823, 382, 383, 1496, 8637, 11, 314, 611, 8637, 13], "avg_logprob": -0.3158967443134474, "compression_ratio": 1.0294117647058822, "no_speech_prob": 0.0, "words": [{"start": 793.29, "end": 793.63, "word": " This", "probability": 0.270751953125}, {"start": 793.63, "end": 793.73, "word": " is", "probability": 0.91796875}, {"start": 793.73, "end": 793.89, "word": " your", "probability": 0.81494140625}, {"start": 793.89, "end": 794.13, "word": " 0", "probability": 0.442626953125}, {"start": 794.13, "end": 794.45, "word": ".05,", "probability": 0.61669921875}, {"start": 795.91, "end": 796.19, "word": " 1", "probability": 0.58935546875}, {"start": 796.19, "end": 797.41, "word": ".729.", "probability": 0.9091796875}, {"start": 804.67, "end": 805.59, "word": " Now", "probability": 0.8486328125}, {"start": 805.59, "end": 805.99, "word": " as", "probability": 0.630859375}, {"start": 805.99, "end": 806.15, "word": " C", "probability": 0.6142578125}, {"start": 806.15, "end": 806.33, "word": " level", "probability": 0.6826171875}, {"start": 806.33, "end": 806.99, "word": " increases,", "probability": 0.919921875}, {"start": 808.37, "end": 809.77, "word": " T", "probability": 0.95947265625}, {"start": 809.77, "end": 810.23, "word": " also", "probability": 0.87158203125}, {"start": 810.23, "end": 810.97, "word": " increases.", "probability": 0.93798828125}], "temperature": 1.0}, {"id": 30, "seek": 84135, "start": 812.21, "end": 841.35, "text": " When 90% we got T to be 1.729. When C11 increases up to 5%, we have 95. My T becomes 2.093. So this is the way how can we use the TMR. Here in the slide, we have another example. That is, we have a sample size of three. The use of freedom is two.", "tokens": [1133, 4289, 4, 321, 658, 314, 281, 312, 502, 13, 22, 11871, 13, 1133, 383, 5348, 8637, 493, 281, 1025, 8923, 321, 362, 13420, 13, 1222, 314, 3643, 568, 13, 13811, 18, 13, 407, 341, 307, 264, 636, 577, 393, 321, 764, 264, 314, 21173, 13, 1692, 294, 264, 4137, 11, 321, 362, 1071, 1365, 13, 663, 307, 11, 321, 362, 257, 6889, 2744, 295, 1045, 13, 440, 764, 295, 5645, 307, 732, 13], "avg_logprob": -0.23770832856496174, "compression_ratio": 1.436046511627907, "no_speech_prob": 0.0, "words": [{"start": 812.21, "end": 812.53, "word": " When", "probability": 0.65283203125}, {"start": 812.53, "end": 813.17, "word": " 90", "probability": 0.497314453125}, {"start": 813.17, "end": 813.93, "word": "%", "probability": 0.6171875}, {"start": 813.93, "end": 814.37, "word": " we", "probability": 0.8603515625}, {"start": 814.37, "end": 814.69, "word": " got", "probability": 0.88427734375}, {"start": 814.69, "end": 815.39, "word": " T", "probability": 0.71142578125}, {"start": 815.39, "end": 815.57, "word": " to", "probability": 0.94384765625}, {"start": 815.57, "end": 815.71, "word": " be", "probability": 0.951171875}, {"start": 815.71, "end": 815.93, "word": " 1", "probability": 0.98681640625}, {"start": 815.93, "end": 816.67, "word": ".729.", "probability": 0.97900390625}, {"start": 817.45, "end": 817.81, "word": " When", "probability": 0.9033203125}, {"start": 817.81, "end": 818.85, "word": " C11", "probability": 0.677001953125}, {"start": 818.85, "end": 819.25, "word": " increases", "probability": 0.88671875}, {"start": 819.25, "end": 819.61, "word": " up", "probability": 0.95458984375}, {"start": 819.61, "end": 819.87, "word": " to", "probability": 0.96044921875}, {"start": 819.87, "end": 820.61, "word": " 5%,", "probability": 0.86669921875}, {"start": 820.61, "end": 820.85, "word": " we", "probability": 0.9404296875}, {"start": 820.85, "end": 821.11, "word": " have", "probability": 0.94970703125}, {"start": 821.11, "end": 821.97, "word": " 95.", "probability": 0.98583984375}, {"start": 822.35, "end": 822.91, "word": " My", "probability": 0.80029296875}, {"start": 822.91, "end": 823.31, "word": " T", "probability": 0.8662109375}, {"start": 823.31, "end": 824.05, "word": " becomes", "probability": 0.85888671875}, {"start": 824.05, "end": 824.31, "word": " 2", "probability": 0.990234375}, {"start": 824.31, "end": 825.39, "word": ".093.", "probability": 0.8968098958333334}, {"start": 825.61, "end": 825.79, "word": " So", "probability": 0.95751953125}, {"start": 825.79, "end": 825.99, "word": " this", "probability": 0.8779296875}, {"start": 825.99, "end": 826.09, "word": " is", "probability": 0.93896484375}, {"start": 826.09, "end": 826.19, "word": " the", "probability": 0.87255859375}, {"start": 826.19, "end": 826.35, "word": " way", "probability": 0.9521484375}, {"start": 826.35, "end": 826.49, "word": " how", "probability": 0.76953125}, {"start": 826.49, "end": 826.67, "word": " can", "probability": 0.83740234375}, {"start": 826.67, "end": 826.83, "word": " we", "probability": 0.95703125}, {"start": 826.83, "end": 827.25, "word": " use", "probability": 0.87109375}, {"start": 827.25, "end": 829.73, "word": " the", "probability": 0.8486328125}, {"start": 829.73, "end": 830.67, "word": " TMR.", "probability": 0.5511474609375}, {"start": 831.39, "end": 831.95, "word": " Here", "probability": 0.7958984375}, {"start": 831.95, "end": 832.09, "word": " in", "probability": 0.62255859375}, {"start": 832.09, "end": 832.21, "word": " the", "probability": 0.744140625}, {"start": 832.21, "end": 832.43, "word": " slide,", "probability": 0.8720703125}, {"start": 832.51, "end": 832.57, "word": " we", "probability": 0.95068359375}, {"start": 832.57, "end": 832.69, "word": " have", "probability": 0.9423828125}, {"start": 832.69, "end": 832.95, "word": " another", "probability": 0.9140625}, {"start": 832.95, "end": 833.45, "word": " example.", "probability": 0.9736328125}, {"start": 833.63, "end": 833.83, "word": " That", "probability": 0.90478515625}, {"start": 833.83, "end": 834.15, "word": " is,", "probability": 0.9482421875}, {"start": 835.25, "end": 836.73, "word": " we", "probability": 0.93701171875}, {"start": 836.73, "end": 837.11, "word": " have", "probability": 0.9443359375}, {"start": 837.11, "end": 837.49, "word": " a", "probability": 0.8486328125}, {"start": 837.49, "end": 837.75, "word": " sample", "probability": 0.8779296875}, {"start": 837.75, "end": 838.19, "word": " size", "probability": 0.85205078125}, {"start": 838.19, "end": 838.41, "word": " of", "probability": 0.96044921875}, {"start": 838.41, "end": 838.85, "word": " three.", "probability": 0.572265625}, {"start": 839.79, "end": 840.25, "word": " The", "probability": 0.450439453125}, {"start": 840.25, "end": 840.49, "word": " use", "probability": 0.3857421875}, {"start": 840.49, "end": 840.61, "word": " of", "probability": 0.83203125}, {"start": 840.61, "end": 840.81, "word": " freedom", "probability": 0.1527099609375}, {"start": 840.81, "end": 841.09, "word": " is", "probability": 0.931640625}, {"start": 841.09, "end": 841.35, "word": " two.", "probability": 0.9033203125}], "temperature": 1.0}, {"id": 31, "seek": 86991, "start": 842.31, "end": 869.91, "text": " In this case, he's interested in 90% confidence interval, so alpha is 10%. Consequently, alpha over 2 is 5%. Here, we should look at degrees of freedom equals 2 at 5%, and the answer is 2.92. Now, let's see how can we compare the results", "tokens": [682, 341, 1389, 11, 415, 311, 3102, 294, 4289, 4, 6687, 15035, 11, 370, 8961, 307, 1266, 6856, 2656, 46027, 11, 8961, 670, 568, 307, 1025, 6856, 1692, 11, 321, 820, 574, 412, 5310, 295, 5645, 6915, 568, 412, 1025, 8923, 293, 264, 1867, 307, 568, 13, 21821, 13, 823, 11, 718, 311, 536, 577, 393, 321, 6794, 264, 3542], "avg_logprob": -0.17302766198017558, "compression_ratio": 1.3522727272727273, "no_speech_prob": 0.0, "words": [{"start": 842.31, "end": 842.57, "word": " In", "probability": 0.83837890625}, {"start": 842.57, "end": 842.81, "word": " this", "probability": 0.94482421875}, {"start": 842.81, "end": 843.15, "word": " case,", "probability": 0.92529296875}, {"start": 843.21, "end": 843.37, "word": " he's", "probability": 0.858642578125}, {"start": 843.37, "end": 843.87, "word": " interested", "probability": 0.859375}, {"start": 843.87, "end": 844.13, "word": " in", "probability": 0.94189453125}, {"start": 844.13, "end": 844.43, "word": " 90", "probability": 0.96728515625}, {"start": 844.43, "end": 844.77, "word": "%", "probability": 0.8876953125}, {"start": 844.77, "end": 845.53, "word": " confidence", "probability": 0.9677734375}, {"start": 845.53, "end": 846.55, "word": " interval,", "probability": 0.94970703125}, {"start": 847.53, "end": 847.85, "word": " so", "probability": 0.9541015625}, {"start": 847.85, "end": 848.27, "word": " alpha", "probability": 0.8037109375}, {"start": 848.27, "end": 848.49, "word": " is", "probability": 0.94189453125}, {"start": 848.49, "end": 849.23, "word": " 10%.", "probability": 0.85498046875}, {"start": 849.23, "end": 851.13, "word": " Consequently,", "probability": 0.81201171875}, {"start": 851.21, "end": 851.43, "word": " alpha", "probability": 0.89990234375}, {"start": 851.43, "end": 851.67, "word": " over", "probability": 0.89013671875}, {"start": 851.67, "end": 851.81, "word": " 2", "probability": 0.7255859375}, {"start": 851.81, "end": 851.97, "word": " is", "probability": 0.94677734375}, {"start": 851.97, "end": 852.69, "word": " 5%.", "probability": 0.989013671875}, {"start": 852.69, "end": 854.01, "word": " Here,", "probability": 0.86767578125}, {"start": 854.07, "end": 854.17, "word": " we", "probability": 0.95361328125}, {"start": 854.17, "end": 854.35, "word": " should", "probability": 0.96630859375}, {"start": 854.35, "end": 854.59, "word": " look", "probability": 0.970703125}, {"start": 854.59, "end": 854.93, "word": " at", "probability": 0.96044921875}, {"start": 854.93, "end": 855.57, "word": " degrees", "probability": 0.9423828125}, {"start": 855.57, "end": 855.77, "word": " of", "probability": 0.96337890625}, {"start": 855.77, "end": 856.01, "word": " freedom", "probability": 0.94482421875}, {"start": 856.01, "end": 856.41, "word": " equals", "probability": 0.89501953125}, {"start": 856.41, "end": 856.77, "word": " 2", "probability": 0.294677734375}, {"start": 856.77, "end": 858.47, "word": " at", "probability": 0.72509765625}, {"start": 858.47, "end": 859.61, "word": " 5%,", "probability": 0.811279296875}, {"start": 859.61, "end": 860.21, "word": " and", "probability": 0.939453125}, {"start": 860.21, "end": 860.33, "word": " the", "probability": 0.82861328125}, {"start": 860.33, "end": 860.61, "word": " answer", "probability": 0.95458984375}, {"start": 860.61, "end": 860.97, "word": " is", "probability": 0.9453125}, {"start": 860.97, "end": 861.87, "word": " 2", "probability": 0.9814453125}, {"start": 861.87, "end": 862.79, "word": ".92.", "probability": 0.779052734375}, {"start": 867.55, "end": 868.15, "word": " Now,", "probability": 0.95263671875}, {"start": 868.17, "end": 868.35, "word": " let's", "probability": 0.970947265625}, {"start": 868.35, "end": 868.45, "word": " see", "probability": 0.916015625}, {"start": 868.45, "end": 868.55, "word": " how", "probability": 0.88525390625}, {"start": 868.55, "end": 868.73, "word": " can", "probability": 0.8798828125}, {"start": 868.73, "end": 868.87, "word": " we", "probability": 0.958984375}, {"start": 868.87, "end": 869.25, "word": " compare", "probability": 0.951171875}, {"start": 869.25, "end": 869.45, "word": " the", "probability": 0.9130859375}, {"start": 869.45, "end": 869.91, "word": " results", "probability": 0.8818359375}], "temperature": 1.0}, {"id": 32, "seek": 89707, "start": 870.75, "end": 897.07, "text": " of different degrees of freedom comparing to the standard normal distribution. Z means we have infinite degrees of freedom, I mean n equal infinity in this case. Suppose the confidence level is 80%. 80% means 20%, so 10.", "tokens": [295, 819, 5310, 295, 5645, 15763, 281, 264, 3832, 2710, 7316, 13, 1176, 1355, 321, 362, 13785, 5310, 295, 5645, 11, 286, 914, 297, 2681, 13202, 294, 341, 1389, 13, 21360, 264, 6687, 1496, 307, 4688, 6856, 4688, 4, 1355, 945, 8923, 370, 1266, 13], "avg_logprob": -0.18919837377641513, "compression_ratio": 1.4444444444444444, "no_speech_prob": 0.0, "words": [{"start": 870.75, "end": 871.13, "word": " of", "probability": 0.3935546875}, {"start": 871.13, "end": 871.93, "word": " different", "probability": 0.88037109375}, {"start": 871.93, "end": 873.79, "word": " degrees", "probability": 0.9482421875}, {"start": 873.79, "end": 873.97, "word": " of", "probability": 0.97216796875}, {"start": 873.97, "end": 874.31, "word": " freedom", "probability": 0.95947265625}, {"start": 874.31, "end": 876.49, "word": " comparing", "probability": 0.47119140625}, {"start": 876.49, "end": 877.95, "word": " to", "probability": 0.96044921875}, {"start": 877.95, "end": 878.19, "word": " the", "probability": 0.90478515625}, {"start": 878.19, "end": 878.67, "word": " standard", "probability": 0.93115234375}, {"start": 878.67, "end": 879.01, "word": " normal", "probability": 0.8603515625}, {"start": 879.01, "end": 879.59, "word": " distribution.", "probability": 0.85107421875}, {"start": 880.41, "end": 880.59, "word": " Z", "probability": 0.87890625}, {"start": 880.59, "end": 881.15, "word": " means", "probability": 0.88623046875}, {"start": 881.15, "end": 881.57, "word": " we", "probability": 0.908203125}, {"start": 881.57, "end": 881.91, "word": " have", "probability": 0.94677734375}, {"start": 881.91, "end": 882.75, "word": " infinite", "probability": 0.90869140625}, {"start": 882.75, "end": 883.27, "word": " degrees", "probability": 0.93701171875}, {"start": 883.27, "end": 883.47, "word": " of", "probability": 0.96484375}, {"start": 883.47, "end": 883.75, "word": " freedom,", "probability": 0.95068359375}, {"start": 884.05, "end": 884.13, "word": " I", "probability": 0.9326171875}, {"start": 884.13, "end": 884.29, "word": " mean", "probability": 0.9677734375}, {"start": 884.29, "end": 884.53, "word": " n", "probability": 0.425048828125}, {"start": 884.53, "end": 884.83, "word": " equal", "probability": 0.414794921875}, {"start": 884.83, "end": 885.37, "word": " infinity", "probability": 0.8369140625}, {"start": 885.37, "end": 886.15, "word": " in", "probability": 0.86865234375}, {"start": 886.15, "end": 886.33, "word": " this", "probability": 0.94677734375}, {"start": 886.33, "end": 886.63, "word": " case.", "probability": 0.90625}, {"start": 888.83, "end": 889.47, "word": " Suppose", "probability": 0.8466796875}, {"start": 889.47, "end": 889.75, "word": " the", "probability": 0.88623046875}, {"start": 889.75, "end": 890.13, "word": " confidence", "probability": 0.9794921875}, {"start": 890.13, "end": 890.65, "word": " level", "probability": 0.94287109375}, {"start": 890.65, "end": 891.37, "word": " is", "probability": 0.94384765625}, {"start": 891.37, "end": 892.27, "word": " 80%.", "probability": 0.736328125}, {"start": 892.27, "end": 894.57, "word": " 80", "probability": 0.89990234375}, {"start": 894.57, "end": 894.83, "word": "%", "probability": 0.99560546875}, {"start": 894.83, "end": 895.41, "word": " means", "probability": 0.91943359375}, {"start": 895.41, "end": 896.41, "word": " 20%,", "probability": 0.8017578125}, {"start": 896.41, "end": 896.75, "word": " so", "probability": 0.9365234375}, {"start": 896.75, "end": 897.07, "word": " 10.", "probability": 0.89453125}], "temperature": 1.0}, {"id": 33, "seek": 92547, "start": 898.51, "end": 925.47, "text": " In another tail and 10th left tail Now under degrees of freedom 10 If we go back 10 degrees of freedom Up to 10 we have this answer Now for 20 for example", "tokens": [682, 1071, 6838, 293, 1266, 392, 1411, 6838, 823, 833, 5310, 295, 5645, 1266, 759, 321, 352, 646, 1266, 5310, 295, 5645, 5858, 281, 1266, 321, 362, 341, 1867, 823, 337, 945, 337, 1365], "avg_logprob": -0.32232143197740826, "compression_ratio": 1.3716814159292035, "no_speech_prob": 0.0, "words": [{"start": 898.51, "end": 898.91, "word": " In", "probability": 0.327880859375}, {"start": 898.91, "end": 899.79, "word": " another", "probability": 0.1646728515625}, {"start": 899.79, "end": 900.21, "word": " tail", "probability": 0.68505859375}, {"start": 900.21, "end": 900.47, "word": " and", "probability": 0.60986328125}, {"start": 900.47, "end": 901.23, "word": " 10th", "probability": 0.465087890625}, {"start": 901.23, "end": 902.03, "word": " left", "probability": 0.64599609375}, {"start": 902.03, "end": 902.33, "word": " tail", "probability": 0.83837890625}, {"start": 902.33, "end": 903.53, "word": " Now", "probability": 0.489990234375}, {"start": 903.53, "end": 903.93, "word": " under", "probability": 0.72021484375}, {"start": 903.93, "end": 904.37, "word": " degrees", "probability": 0.93212890625}, {"start": 904.37, "end": 904.55, "word": " of", "probability": 0.96533203125}, {"start": 904.55, "end": 904.83, "word": " freedom", "probability": 0.92919921875}, {"start": 904.83, "end": 905.63, "word": " 10", "probability": 0.72900390625}, {"start": 905.63, "end": 907.89, "word": " If", "probability": 0.42236328125}, {"start": 907.89, "end": 908.13, "word": " we", "probability": 0.95849609375}, {"start": 908.13, "end": 908.29, "word": " go", "probability": 0.96240234375}, {"start": 908.29, "end": 908.77, "word": " back", "probability": 0.88134765625}, {"start": 908.77, "end": 910.45, "word": " 10", "probability": 0.6416015625}, {"start": 910.45, "end": 914.53, "word": " degrees", "probability": 0.91650390625}, {"start": 914.53, "end": 914.75, "word": " of", "probability": 0.96875}, {"start": 914.75, "end": 915.15, "word": " freedom", "probability": 0.93359375}, {"start": 915.15, "end": 917.25, "word": " Up", "probability": 0.74267578125}, {"start": 917.25, "end": 917.41, "word": " to", "probability": 0.96875}, {"start": 917.41, "end": 917.59, "word": " 10", "probability": 0.962890625}, {"start": 917.59, "end": 917.69, "word": " we", "probability": 0.69140625}, {"start": 917.69, "end": 917.89, "word": " have", "probability": 0.94482421875}, {"start": 917.89, "end": 918.13, "word": " this", "probability": 0.94921875}, {"start": 918.13, "end": 919.11, "word": " answer", "probability": 0.96630859375}, {"start": 919.11, "end": 924.35, "word": " Now", "probability": 0.5556640625}, {"start": 924.35, "end": 924.55, "word": " for", "probability": 0.8359375}, {"start": 924.55, "end": 924.87, "word": " 20", "probability": 0.94970703125}, {"start": 924.87, "end": 925.09, "word": " for", "probability": 0.86279296875}, {"start": 925.09, "end": 925.47, "word": " example", "probability": 0.97265625}], "temperature": 1.0}, {"id": 34, "seek": 95668, "start": 927.2, "end": 956.68, "text": " We have 1.325. Look at the fourth column again. This is 10. 1.372. For 20, we have 1.325. For 30, 1.310. That's for the T values.", "tokens": [492, 362, 502, 13, 18, 6074, 13, 2053, 412, 264, 6409, 7738, 797, 13, 639, 307, 1266, 13, 502, 13, 12851, 17, 13, 1171, 945, 11, 321, 362, 502, 13, 18, 6074, 13, 1171, 2217, 11, 502, 13, 18, 3279, 13, 663, 311, 337, 264, 314, 4190, 13], "avg_logprob": -0.10339604835121, "compression_ratio": 1.1926605504587156, "no_speech_prob": 0.0, "words": [{"start": 927.2, "end": 927.42, "word": " We", "probability": 0.77197265625}, {"start": 927.42, "end": 927.58, "word": " have", "probability": 0.91650390625}, {"start": 927.58, "end": 927.8, "word": " 1", "probability": 0.94921875}, {"start": 927.8, "end": 928.58, "word": ".325.", "probability": 0.9615885416666666}, {"start": 936.06, "end": 936.82, "word": " Look", "probability": 0.6611328125}, {"start": 936.82, "end": 936.94, "word": " at", "probability": 0.96337890625}, {"start": 936.94, "end": 937.08, "word": " the", "probability": 0.91552734375}, {"start": 937.08, "end": 937.32, "word": " fourth", "probability": 0.857421875}, {"start": 937.32, "end": 937.58, "word": " column", "probability": 0.82080078125}, {"start": 937.58, "end": 937.94, "word": " again.", "probability": 0.92822265625}, {"start": 938.98, "end": 939.18, "word": " This", "probability": 0.8544921875}, {"start": 939.18, "end": 939.34, "word": " is", "probability": 0.9423828125}, {"start": 939.34, "end": 939.58, "word": " 10.", "probability": 0.8408203125}, {"start": 941.54, "end": 942.3, "word": " 1", "probability": 0.97998046875}, {"start": 942.3, "end": 943.96, "word": ".372.", "probability": 0.9669596354166666}, {"start": 944.42, "end": 944.7, "word": " For", "probability": 0.92578125}, {"start": 944.7, "end": 945.02, "word": " 20,", "probability": 0.779296875}, {"start": 945.06, "end": 945.22, "word": " we", "probability": 0.95654296875}, {"start": 945.22, "end": 945.52, "word": " have", "probability": 0.94677734375}, {"start": 945.52, "end": 946.12, "word": " 1", "probability": 0.99755859375}, {"start": 946.12, "end": 946.98, "word": ".325.", "probability": 0.9739583333333334}, {"start": 948.16, "end": 948.62, "word": " For", "probability": 0.96435546875}, {"start": 948.62, "end": 949.12, "word": " 30,", "probability": 0.9638671875}, {"start": 949.38, "end": 949.74, "word": " 1", "probability": 0.9970703125}, {"start": 949.74, "end": 951.0, "word": ".310.", "probability": 0.9871419270833334}, {"start": 952.96, "end": 953.72, "word": " That's", "probability": 0.9169921875}, {"start": 953.72, "end": 954.1, "word": " for", "probability": 0.951171875}, {"start": 954.1, "end": 954.58, "word": " the", "probability": 0.92041015625}, {"start": 954.58, "end": 956.14, "word": " T", "probability": 0.68115234375}, {"start": 956.14, "end": 956.68, "word": " values.", "probability": 0.814453125}], "temperature": 1.0}, {"id": 35, "seek": 98441, "start": 957.17, "end": 984.41, "text": " Now if you go back to the normal table and just look at 1%. This is the z table. 1% is the closest value under 1.28. So that's your z-score. Now the corresponding", "tokens": [823, 498, 291, 352, 646, 281, 264, 2710, 3199, 293, 445, 574, 412, 502, 6856, 639, 307, 264, 710, 3199, 13, 502, 4, 307, 264, 13699, 2158, 833, 502, 13, 11205, 13, 407, 300, 311, 428, 710, 12, 4417, 418, 13, 823, 264, 11760], "avg_logprob": -0.2531250026490953, "compression_ratio": 1.2538461538461538, "no_speech_prob": 0.0, "words": [{"start": 957.17, "end": 957.51, "word": " Now", "probability": 0.744140625}, {"start": 957.51, "end": 957.77, "word": " if", "probability": 0.59765625}, {"start": 957.77, "end": 957.95, "word": " you", "probability": 0.75390625}, {"start": 957.95, "end": 958.37, "word": " go", "probability": 0.92822265625}, {"start": 958.37, "end": 958.71, "word": " back", "probability": 0.86962890625}, {"start": 958.71, "end": 958.99, "word": " to", "probability": 0.96044921875}, {"start": 958.99, "end": 959.13, "word": " the", "probability": 0.87255859375}, {"start": 959.13, "end": 959.47, "word": " normal", "probability": 0.89892578125}, {"start": 959.47, "end": 959.87, "word": " table", "probability": 0.861328125}, {"start": 959.87, "end": 962.37, "word": " and", "probability": 0.52197265625}, {"start": 962.37, "end": 962.65, "word": " just", "probability": 0.859375}, {"start": 962.65, "end": 962.95, "word": " look", "probability": 0.96240234375}, {"start": 962.95, "end": 963.43, "word": " at", "probability": 0.9619140625}, {"start": 963.43, "end": 964.93, "word": " 1%.", "probability": 0.5439453125}, {"start": 964.93, "end": 967.91, "word": " This", "probability": 0.75732421875}, {"start": 967.91, "end": 968.03, "word": " is", "probability": 0.94189453125}, {"start": 968.03, "end": 968.17, "word": " the", "probability": 0.896484375}, {"start": 968.17, "end": 968.33, "word": " z", "probability": 0.533203125}, {"start": 968.33, "end": 968.73, "word": " table.", "probability": 0.5048828125}, {"start": 969.75, "end": 970.07, "word": " 1", "probability": 0.81201171875}, {"start": 970.07, "end": 970.65, "word": "%", "probability": 0.485107421875}, {"start": 970.65, "end": 973.47, "word": " is", "probability": 0.488037109375}, {"start": 973.47, "end": 973.69, "word": " the", "probability": 0.90380859375}, {"start": 973.69, "end": 974.09, "word": " closest", "probability": 0.91357421875}, {"start": 974.09, "end": 974.89, "word": " value", "probability": 0.97802734375}, {"start": 974.89, "end": 976.17, "word": " under", "probability": 0.71044921875}, {"start": 976.17, "end": 976.49, "word": " 1", "probability": 0.97607421875}, {"start": 976.49, "end": 978.79, "word": ".28.", "probability": 0.94921875}, {"start": 979.59, "end": 980.35, "word": " So", "probability": 0.837890625}, {"start": 980.35, "end": 980.67, "word": " that's", "probability": 0.921630859375}, {"start": 980.67, "end": 980.93, "word": " your", "probability": 0.89208984375}, {"start": 980.93, "end": 981.15, "word": " z", "probability": 0.96728515625}, {"start": 981.15, "end": 981.53, "word": "-score.", "probability": 0.7443033854166666}, {"start": 982.99, "end": 983.75, "word": " Now", "probability": 0.95166015625}, {"start": 983.75, "end": 983.97, "word": " the", "probability": 0.7626953125}, {"start": 983.97, "end": 984.41, "word": " corresponding", "probability": 0.76953125}], "temperature": 1.0}, {"id": 36, "seek": 101229, "start": 985.63, "end": 1012.29, "text": " T values are 1.372, 1.325, 1.310, the exact Z is 1.18. Now, here we can see that the difference between T and Z becomes smaller and smaller as N, or degrees of freedom, increases. Here 1.372, then it becomes", "tokens": [314, 4190, 366, 502, 13, 12851, 17, 11, 502, 13, 18, 6074, 11, 502, 13, 18, 3279, 11, 264, 1900, 1176, 307, 502, 13, 6494, 13, 823, 11, 510, 321, 393, 536, 300, 264, 2649, 1296, 314, 293, 1176, 3643, 4356, 293, 4356, 382, 426, 11, 420, 5310, 295, 5645, 11, 8637, 13, 1692, 502, 13, 12851, 17, 11, 550, 309, 3643], "avg_logprob": -0.16592261479014442, "compression_ratio": 1.4054054054054055, "no_speech_prob": 5.960464477539063e-08, "words": [{"start": 985.63, "end": 985.93, "word": " T", "probability": 0.59619140625}, {"start": 985.93, "end": 986.67, "word": " values", "probability": 0.76123046875}, {"start": 986.67, "end": 987.17, "word": " are", "probability": 0.92724609375}, {"start": 987.17, "end": 988.15, "word": " 1", "probability": 0.9462890625}, {"start": 988.15, "end": 989.83, "word": ".372,", "probability": 0.9453125}, {"start": 990.09, "end": 991.01, "word": " 1", "probability": 0.97705078125}, {"start": 991.01, "end": 991.71, "word": ".325,", "probability": 0.9519856770833334}, {"start": 992.51, "end": 992.73, "word": " 1", "probability": 0.96533203125}, {"start": 992.73, "end": 993.51, "word": ".310,", "probability": 0.9851888020833334}, {"start": 994.15, "end": 994.39, "word": " the", "probability": 0.79296875}, {"start": 994.39, "end": 994.89, "word": " exact", "probability": 0.92333984375}, {"start": 994.89, "end": 995.21, "word": " Z", "probability": 0.4296875}, {"start": 995.21, "end": 995.99, "word": " is", "probability": 0.9345703125}, {"start": 995.99, "end": 996.19, "word": " 1", "probability": 0.99462890625}, {"start": 996.19, "end": 996.75, "word": ".18.", "probability": 0.9921875}, {"start": 997.45, "end": 997.97, "word": " Now,", "probability": 0.958984375}, {"start": 998.27, "end": 998.53, "word": " here", "probability": 0.85546875}, {"start": 998.53, "end": 998.69, "word": " we", "probability": 0.90185546875}, {"start": 998.69, "end": 998.99, "word": " can", "probability": 0.9482421875}, {"start": 998.99, "end": 999.97, "word": " see", "probability": 0.916015625}, {"start": 999.97, "end": 1000.29, "word": " that", "probability": 0.89892578125}, {"start": 1000.29, "end": 1000.55, "word": " the", "probability": 0.8974609375}, {"start": 1000.55, "end": 1000.97, "word": " difference", "probability": 0.87109375}, {"start": 1000.97, "end": 1001.59, "word": " between", "probability": 0.87255859375}, {"start": 1001.59, "end": 1002.45, "word": " T", "probability": 0.75390625}, {"start": 1002.45, "end": 1002.65, "word": " and", "probability": 0.943359375}, {"start": 1002.65, "end": 1002.77, "word": " Z", "probability": 0.9892578125}, {"start": 1002.77, "end": 1003.21, "word": " becomes", "probability": 0.80078125}, {"start": 1003.21, "end": 1003.59, "word": " smaller", "probability": 0.8916015625}, {"start": 1003.59, "end": 1003.85, "word": " and", "probability": 0.9287109375}, {"start": 1003.85, "end": 1004.23, "word": " smaller", "probability": 0.8681640625}, {"start": 1004.23, "end": 1004.91, "word": " as", "probability": 0.869140625}, {"start": 1004.91, "end": 1005.29, "word": " N,", "probability": 0.46630859375}, {"start": 1005.53, "end": 1005.71, "word": " or", "probability": 0.7275390625}, {"start": 1005.71, "end": 1005.95, "word": " degrees", "probability": 0.91357421875}, {"start": 1005.95, "end": 1006.09, "word": " of", "probability": 0.96044921875}, {"start": 1006.09, "end": 1006.45, "word": " freedom,", "probability": 0.9423828125}, {"start": 1007.21, "end": 1007.69, "word": " increases.", "probability": 0.93701171875}, {"start": 1008.37, "end": 1009.01, "word": " Here", "probability": 0.86328125}, {"start": 1009.01, "end": 1009.27, "word": " 1", "probability": 0.54833984375}, {"start": 1009.27, "end": 1010.21, "word": ".372,", "probability": 0.9658203125}, {"start": 1011.25, "end": 1011.53, "word": " then", "probability": 0.85107421875}, {"start": 1011.53, "end": 1011.71, "word": " it", "probability": 0.94775390625}, {"start": 1011.71, "end": 1012.29, "word": " becomes", "probability": 0.86865234375}], "temperature": 1.0}, {"id": 37, "seek": 103986, "start": 1013.75, "end": 1039.87, "text": " less than this value, so 1.325, 1.310, very close, 1.28. Similarly, we can do for the other confidence levels. For example, for 90%, we get this result. And also we have similar scenario here for 95 and 99. So as degrees of freedom increases, T goes to Z.", "tokens": [1570, 813, 341, 2158, 11, 370, 502, 13, 18, 6074, 11, 502, 13, 18, 3279, 11, 588, 1998, 11, 502, 13, 11205, 13, 13157, 11, 321, 393, 360, 337, 264, 661, 6687, 4358, 13, 1171, 1365, 11, 337, 4289, 8923, 321, 483, 341, 1874, 13, 400, 611, 321, 362, 2531, 9005, 510, 337, 13420, 293, 11803, 13, 407, 382, 5310, 295, 5645, 8637, 11, 314, 1709, 281, 1176, 13], "avg_logprob": -0.14308035607848849, "compression_ratio": 1.3837837837837839, "no_speech_prob": 0.0, "words": [{"start": 1013.75, "end": 1014.27, "word": " less", "probability": 0.2301025390625}, {"start": 1014.27, "end": 1014.53, "word": " than", "probability": 0.935546875}, {"start": 1014.53, "end": 1014.77, "word": " this", "probability": 0.92578125}, {"start": 1014.77, "end": 1015.09, "word": " value,", "probability": 0.9404296875}, {"start": 1015.23, "end": 1015.31, "word": " so", "probability": 0.89599609375}, {"start": 1015.31, "end": 1015.51, "word": " 1", "probability": 0.962890625}, {"start": 1015.51, "end": 1016.19, "word": ".325,", "probability": 0.9454752604166666}, {"start": 1016.61, "end": 1016.91, "word": " 1", "probability": 0.98583984375}, {"start": 1016.91, "end": 1017.55, "word": ".310,", "probability": 0.97412109375}, {"start": 1018.07, "end": 1018.41, "word": " very", "probability": 0.7822265625}, {"start": 1018.41, "end": 1018.79, "word": " close,", "probability": 0.89697265625}, {"start": 1018.89, "end": 1019.03, "word": " 1", "probability": 0.99462890625}, {"start": 1019.03, "end": 1019.55, "word": ".28.", "probability": 0.97705078125}, {"start": 1020.27, "end": 1020.95, "word": " Similarly,", "probability": 0.83251953125}, {"start": 1021.11, "end": 1021.21, "word": " we", "probability": 0.505859375}, {"start": 1021.21, "end": 1021.41, "word": " can", "probability": 0.9462890625}, {"start": 1021.41, "end": 1021.63, "word": " do", "probability": 0.9638671875}, {"start": 1021.63, "end": 1022.03, "word": " for", "probability": 0.87744140625}, {"start": 1022.03, "end": 1022.25, "word": " the", "probability": 0.91162109375}, {"start": 1022.25, "end": 1022.55, "word": " other", "probability": 0.89794921875}, {"start": 1022.55, "end": 1023.17, "word": " confidence", "probability": 0.97412109375}, {"start": 1023.17, "end": 1024.05, "word": " levels.", "probability": 0.908203125}, {"start": 1024.55, "end": 1024.69, "word": " For", "probability": 0.9658203125}, {"start": 1024.69, "end": 1025.01, "word": " example,", "probability": 0.96533203125}, {"start": 1025.13, "end": 1025.29, "word": " for", "probability": 0.9443359375}, {"start": 1025.29, "end": 1026.15, "word": " 90%,", "probability": 0.79052734375}, {"start": 1026.15, "end": 1027.45, "word": " we", "probability": 0.95361328125}, {"start": 1027.45, "end": 1027.79, "word": " get", "probability": 0.94482421875}, {"start": 1027.79, "end": 1028.67, "word": " this", "probability": 0.94482421875}, {"start": 1028.67, "end": 1029.09, "word": " result.", "probability": 0.947265625}, {"start": 1030.41, "end": 1031.09, "word": " And", "probability": 0.95703125}, {"start": 1031.09, "end": 1031.31, "word": " also", "probability": 0.86279296875}, {"start": 1031.31, "end": 1031.47, "word": " we", "probability": 0.5322265625}, {"start": 1031.47, "end": 1032.09, "word": " have", "probability": 0.94873046875}, {"start": 1032.09, "end": 1032.55, "word": " similar", "probability": 0.9072265625}, {"start": 1032.55, "end": 1033.01, "word": " scenario", "probability": 0.8486328125}, {"start": 1033.01, "end": 1033.25, "word": " here", "probability": 0.83740234375}, {"start": 1033.25, "end": 1033.55, "word": " for", "probability": 0.90625}, {"start": 1033.55, "end": 1034.39, "word": " 95", "probability": 0.9873046875}, {"start": 1034.39, "end": 1034.65, "word": " and", "probability": 0.88623046875}, {"start": 1034.65, "end": 1035.05, "word": " 99.", "probability": 0.9697265625}, {"start": 1035.45, "end": 1035.71, "word": " So", "probability": 0.9619140625}, {"start": 1035.71, "end": 1036.03, "word": " as", "probability": 0.90869140625}, {"start": 1036.03, "end": 1036.31, "word": " degrees", "probability": 0.9248046875}, {"start": 1036.31, "end": 1036.45, "word": " of", "probability": 0.9677734375}, {"start": 1036.45, "end": 1036.63, "word": " freedom", "probability": 0.94189453125}, {"start": 1036.63, "end": 1037.19, "word": " increases,", "probability": 0.92333984375}, {"start": 1038.55, "end": 1038.93, "word": " T", "probability": 0.77734375}, {"start": 1038.93, "end": 1039.47, "word": " goes", "probability": 0.94677734375}, {"start": 1039.47, "end": 1039.65, "word": " to", "probability": 0.97021484375}, {"start": 1039.65, "end": 1039.87, "word": " Z.", "probability": 0.96142578125}], "temperature": 1.0}, {"id": 38, "seek": 106866, "start": 1041.2, "end": 1068.66, "text": " Now, what's the smallest difference between Z and other T values? For which level of significance? 80%. For 80%, the difference between Z and T here is the smallest among the others. Because the difference between 128 and 131 is the smallest value corresponding to the other ones, or comparing to the other ones.", "tokens": [823, 11, 437, 311, 264, 16998, 2649, 1296, 1176, 293, 661, 314, 4190, 30, 1171, 597, 1496, 295, 17687, 30, 4688, 6856, 1171, 4688, 8923, 264, 2649, 1296, 1176, 293, 314, 510, 307, 264, 16998, 3654, 264, 2357, 13, 1436, 264, 2649, 1296, 29810, 293, 3705, 16, 307, 264, 16998, 2158, 11760, 281, 264, 661, 2306, 11, 420, 15763, 281, 264, 661, 2306, 13], "avg_logprob": -0.18774038461538461, "compression_ratio": 1.8304093567251463, "no_speech_prob": 0.0, "words": [{"start": 1041.2, "end": 1041.56, "word": " Now,", "probability": 0.77099609375}, {"start": 1041.72, "end": 1042.06, "word": " what's", "probability": 0.88232421875}, {"start": 1042.06, "end": 1042.3, "word": " the", "probability": 0.92236328125}, {"start": 1042.3, "end": 1042.66, "word": " smallest", "probability": 0.90576171875}, {"start": 1042.66, "end": 1043.32, "word": " difference", "probability": 0.8486328125}, {"start": 1043.32, "end": 1044.08, "word": " between", "probability": 0.8173828125}, {"start": 1044.08, "end": 1044.36, "word": " Z", "probability": 0.420166015625}, {"start": 1044.36, "end": 1044.58, "word": " and", "probability": 0.92041015625}, {"start": 1044.58, "end": 1044.92, "word": " other", "probability": 0.53662109375}, {"start": 1044.92, "end": 1045.1, "word": " T", "probability": 0.82958984375}, {"start": 1045.1, "end": 1045.54, "word": " values?", "probability": 0.892578125}, {"start": 1046.56, "end": 1047.02, "word": " For", "probability": 0.8583984375}, {"start": 1047.02, "end": 1047.32, "word": " which", "probability": 0.931640625}, {"start": 1047.32, "end": 1047.66, "word": " level", "probability": 0.90625}, {"start": 1047.66, "end": 1047.88, "word": " of", "probability": 0.96044921875}, {"start": 1047.88, "end": 1048.68, "word": " significance?", "probability": 0.9697265625}, {"start": 1051.7, "end": 1052.38, "word": " 80%.", "probability": 0.62890625}, {"start": 1052.38, "end": 1054.16, "word": " For", "probability": 0.8466796875}, {"start": 1054.16, "end": 1054.74, "word": " 80%,", "probability": 0.821533203125}, {"start": 1054.74, "end": 1055.0, "word": " the", "probability": 0.9140625}, {"start": 1055.0, "end": 1055.4, "word": " difference", "probability": 0.85498046875}, {"start": 1055.4, "end": 1055.76, "word": " between", "probability": 0.88232421875}, {"start": 1055.76, "end": 1056.02, "word": " Z", "probability": 0.96533203125}, {"start": 1056.02, "end": 1056.28, "word": " and", "probability": 0.94482421875}, {"start": 1056.28, "end": 1056.46, "word": " T", "probability": 0.9892578125}, {"start": 1056.46, "end": 1056.82, "word": " here", "probability": 0.82373046875}, {"start": 1056.82, "end": 1057.4, "word": " is", "probability": 0.8466796875}, {"start": 1057.4, "end": 1057.6, "word": " the", "probability": 0.904296875}, {"start": 1057.6, "end": 1058.0, "word": " smallest", "probability": 0.939453125}, {"start": 1058.0, "end": 1058.68, "word": " among", "probability": 0.8994140625}, {"start": 1058.68, "end": 1058.92, "word": " the", "probability": 0.84765625}, {"start": 1058.92, "end": 1059.3, "word": " others.", "probability": 0.90380859375}, {"start": 1060.28, "end": 1060.58, "word": " Because", "probability": 0.85107421875}, {"start": 1060.58, "end": 1060.7, "word": " the", "probability": 0.89013671875}, {"start": 1060.7, "end": 1060.94, "word": " difference", "probability": 0.86767578125}, {"start": 1060.94, "end": 1061.22, "word": " between", "probability": 0.89013671875}, {"start": 1061.22, "end": 1061.84, "word": " 128", "probability": 0.71728515625}, {"start": 1061.84, "end": 1062.18, "word": " and", "probability": 0.9306640625}, {"start": 1062.18, "end": 1062.92, "word": " 131", "probability": 0.9599609375}, {"start": 1062.92, "end": 1063.74, "word": " is", "probability": 0.91064453125}, {"start": 1063.74, "end": 1063.96, "word": " the", "probability": 0.91552734375}, {"start": 1063.96, "end": 1064.48, "word": " smallest", "probability": 0.9375}, {"start": 1064.48, "end": 1065.04, "word": " value", "probability": 0.97021484375}, {"start": 1065.04, "end": 1066.02, "word": " corresponding", "probability": 0.77880859375}, {"start": 1066.02, "end": 1066.28, "word": " to", "probability": 0.9677734375}, {"start": 1066.28, "end": 1066.4, "word": " the", "probability": 0.8896484375}, {"start": 1066.4, "end": 1066.6, "word": " other", "probability": 0.84228515625}, {"start": 1066.6, "end": 1066.98, "word": " ones,", "probability": 0.91259765625}, {"start": 1067.46, "end": 1067.6, "word": " or", "probability": 0.90185546875}, {"start": 1067.6, "end": 1067.94, "word": " comparing", "probability": 0.90771484375}, {"start": 1067.94, "end": 1068.1, "word": " to", "probability": 0.958984375}, {"start": 1068.1, "end": 1068.24, "word": " the", "probability": 0.85400390625}, {"start": 1068.24, "end": 1068.36, "word": " other", "probability": 0.8623046875}, {"start": 1068.36, "end": 1068.66, "word": " ones.", "probability": 0.91943359375}], "temperature": 1.0}, {"id": 39, "seek": 110053, "start": 1071.27, "end": 1100.53, "text": " For small confidence level, the difference between z and t becomes smaller and smaller. So t goes to z as n increases. So again, here, the yellow one is the normal table, standardized normal table. It's a normal table with degrees of freedom infinity as for t.", "tokens": [1171, 1359, 6687, 1496, 11, 264, 2649, 1296, 710, 293, 256, 3643, 4356, 293, 4356, 13, 407, 256, 1709, 281, 710, 382, 297, 8637, 13, 407, 797, 11, 510, 11, 264, 5566, 472, 307, 264, 2710, 3199, 11, 31677, 2710, 3199, 13, 467, 311, 257, 2710, 3199, 365, 5310, 295, 5645, 13202, 382, 337, 256, 13], "avg_logprob": -0.19654605681436105, "compression_ratio": 1.5818181818181818, "no_speech_prob": 0.0, "words": [{"start": 1071.27, "end": 1071.59, "word": " For", "probability": 0.66748046875}, {"start": 1071.59, "end": 1072.13, "word": " small", "probability": 0.69677734375}, {"start": 1072.13, "end": 1073.13, "word": " confidence", "probability": 0.97412109375}, {"start": 1073.13, "end": 1073.43, "word": " level,", "probability": 0.88818359375}, {"start": 1074.31, "end": 1074.55, "word": " the", "probability": 0.9140625}, {"start": 1074.55, "end": 1074.97, "word": " difference", "probability": 0.86083984375}, {"start": 1074.97, "end": 1075.39, "word": " between", "probability": 0.88330078125}, {"start": 1075.39, "end": 1075.65, "word": " z", "probability": 0.7333984375}, {"start": 1075.65, "end": 1075.89, "word": " and", "probability": 0.93994140625}, {"start": 1075.89, "end": 1075.99, "word": " t", "probability": 0.65234375}, {"start": 1075.99, "end": 1076.37, "word": " becomes", "probability": 0.83984375}, {"start": 1076.37, "end": 1076.77, "word": " smaller", "probability": 0.89599609375}, {"start": 1076.77, "end": 1077.71, "word": " and", "probability": 0.89990234375}, {"start": 1077.71, "end": 1078.15, "word": " smaller.", "probability": 0.8828125}, {"start": 1079.31, "end": 1080.03, "word": " So", "probability": 0.958984375}, {"start": 1080.03, "end": 1080.35, "word": " t", "probability": 0.5673828125}, {"start": 1080.35, "end": 1080.89, "word": " goes", "probability": 0.94140625}, {"start": 1080.89, "end": 1081.09, "word": " to", "probability": 0.9560546875}, {"start": 1081.09, "end": 1081.33, "word": " z", "probability": 0.98291015625}, {"start": 1081.33, "end": 1081.65, "word": " as", "probability": 0.90234375}, {"start": 1081.65, "end": 1081.93, "word": " n", "probability": 0.89306640625}, {"start": 1081.93, "end": 1082.53, "word": " increases.", "probability": 0.91552734375}, {"start": 1086.43, "end": 1087.15, "word": " So", "probability": 0.951171875}, {"start": 1087.15, "end": 1087.45, "word": " again,", "probability": 0.90625}, {"start": 1087.69, "end": 1088.11, "word": " here,", "probability": 0.85400390625}, {"start": 1089.63, "end": 1090.07, "word": " the", "probability": 0.90966796875}, {"start": 1090.07, "end": 1092.15, "word": " yellow", "probability": 0.8359375}, {"start": 1092.15, "end": 1092.97, "word": " one", "probability": 0.91650390625}, {"start": 1092.97, "end": 1093.51, "word": " is", "probability": 0.94921875}, {"start": 1093.51, "end": 1093.69, "word": " the", "probability": 0.9111328125}, {"start": 1093.69, "end": 1093.95, "word": " normal", "probability": 0.74072265625}, {"start": 1093.95, "end": 1094.35, "word": " table,", "probability": 0.88427734375}, {"start": 1094.67, "end": 1095.07, "word": " standardized", "probability": 0.70947265625}, {"start": 1095.07, "end": 1095.43, "word": " normal", "probability": 0.888671875}, {"start": 1095.43, "end": 1095.77, "word": " table.", "probability": 0.88623046875}, {"start": 1095.91, "end": 1096.01, "word": " It's", "probability": 0.6500244140625}, {"start": 1096.01, "end": 1096.09, "word": " a", "probability": 0.73193359375}, {"start": 1096.09, "end": 1096.45, "word": " normal", "probability": 0.85107421875}, {"start": 1096.45, "end": 1096.79, "word": " table", "probability": 0.88427734375}, {"start": 1096.79, "end": 1097.15, "word": " with", "probability": 0.8935546875}, {"start": 1097.15, "end": 1098.63, "word": " degrees", "probability": 0.93115234375}, {"start": 1098.63, "end": 1098.81, "word": " of", "probability": 0.96728515625}, {"start": 1098.81, "end": 1099.01, "word": " freedom", "probability": 0.95947265625}, {"start": 1099.01, "end": 1099.73, "word": " infinity", "probability": 0.578125}, {"start": 1099.73, "end": 1100.05, "word": " as", "probability": 0.79833984375}, {"start": 1100.05, "end": 1100.27, "word": " for", "probability": 0.6689453125}, {"start": 1100.27, "end": 1100.53, "word": " t.", "probability": 0.9638671875}], "temperature": 1.0}, {"id": 40, "seek": 112178, "start": 1101.92, "end": 1121.78, "text": " The other two for T distribution with different degrees of freedom. In this case, T distributions are bell-shaped and symmetric, but have fatter tails than the normal. That means normal distribution goes to the zero", "tokens": [440, 661, 732, 337, 314, 7316, 365, 819, 5310, 295, 5645, 13, 682, 341, 1389, 11, 314, 37870, 366, 4549, 12, 23103, 293, 32330, 11, 457, 362, 283, 1161, 28537, 813, 264, 2710, 13, 663, 1355, 2710, 7316, 1709, 281, 264, 4018], "avg_logprob": -0.20585029139075167, "compression_ratio": 1.5, "no_speech_prob": 0.0, "words": [{"start": 1101.92, "end": 1102.12, "word": " The", "probability": 0.7421875}, {"start": 1102.12, "end": 1102.42, "word": " other", "probability": 0.8828125}, {"start": 1102.42, "end": 1102.78, "word": " two", "probability": 0.88720703125}, {"start": 1102.78, "end": 1103.46, "word": " for", "probability": 0.6455078125}, {"start": 1103.46, "end": 1103.62, "word": " T", "probability": 0.669921875}, {"start": 1103.62, "end": 1104.16, "word": " distribution", "probability": 0.5498046875}, {"start": 1104.16, "end": 1104.42, "word": " with", "probability": 0.87939453125}, {"start": 1104.42, "end": 1104.74, "word": " different", "probability": 0.9052734375}, {"start": 1104.74, "end": 1105.04, "word": " degrees", "probability": 0.94775390625}, {"start": 1105.04, "end": 1105.26, "word": " of", "probability": 0.97119140625}, {"start": 1105.26, "end": 1105.52, "word": " freedom.", "probability": 0.94677734375}, {"start": 1106.88, "end": 1107.3, "word": " In", "probability": 0.94580078125}, {"start": 1107.3, "end": 1107.5, "word": " this", "probability": 0.94580078125}, {"start": 1107.5, "end": 1107.78, "word": " case,", "probability": 0.91796875}, {"start": 1107.88, "end": 1108.0, "word": " T", "probability": 0.962890625}, {"start": 1108.0, "end": 1108.66, "word": " distributions", "probability": 0.77734375}, {"start": 1108.66, "end": 1109.02, "word": " are", "probability": 0.94384765625}, {"start": 1109.02, "end": 1109.24, "word": " bell", "probability": 0.958984375}, {"start": 1109.24, "end": 1109.56, "word": "-shaped", "probability": 0.747314453125}, {"start": 1109.56, "end": 1109.82, "word": " and", "probability": 0.939453125}, {"start": 1109.82, "end": 1110.24, "word": " symmetric,", "probability": 0.82080078125}, {"start": 1111.06, "end": 1111.94, "word": " but", "probability": 0.90185546875}, {"start": 1111.94, "end": 1112.3, "word": " have", "probability": 0.90771484375}, {"start": 1112.3, "end": 1112.92, "word": " fatter", "probability": 0.89208984375}, {"start": 1112.92, "end": 1113.5, "word": " tails", "probability": 0.90234375}, {"start": 1113.5, "end": 1113.9, "word": " than", "probability": 0.943359375}, {"start": 1113.9, "end": 1114.1, "word": " the", "probability": 0.8955078125}, {"start": 1114.1, "end": 1114.36, "word": " normal.", "probability": 0.88134765625}, {"start": 1115.54, "end": 1116.06, "word": " That", "probability": 0.9091796875}, {"start": 1116.06, "end": 1116.52, "word": " means", "probability": 0.93505859375}, {"start": 1116.52, "end": 1117.2, "word": " normal", "probability": 0.59130859375}, {"start": 1117.2, "end": 1117.96, "word": " distribution", "probability": 0.85205078125}, {"start": 1117.96, "end": 1120.38, "word": " goes", "probability": 0.81396484375}, {"start": 1120.38, "end": 1121.06, "word": " to", "probability": 0.96044921875}, {"start": 1121.06, "end": 1121.28, "word": " the", "probability": 0.73828125}, {"start": 1121.28, "end": 1121.78, "word": " zero", "probability": 0.71728515625}], "temperature": 1.0}], "language": "en", "language_probability": 1.0, "duration": 1130.4635, "duration_after_vad": 1048.7993750000003} \ No newline at end of file